#! /bin/sh # SPDX-License-Identifier: GPL-2.0 # # Randy Dunlap , 2018 # Thorsten Leemhuis , 2018 usage() { cat < Call without parameters to decode /proc/sys/kernel/tainted. Call with a positive integer as parameter to decode a value you retrieved from /proc/sys/kernel/tainted on another system. EOF } if [ "$1"x != "x" ]; then if [ "$1"x == "--helpx" ] || [ "$1"x == "-hx" ] ; then usage exit 1 elif [ $1 -ge 0 ] 2>/dev/null ; then taint=$1 else echo "Error: Parameter '$1' not a positive integer. Aborting." >&2 exit 1 fi else TAINTFILE="/proc/sys/kernel/tainted" if [ ! -r $TAINTFILE ]; then echo "No file: $TAINTFILE" exit fi taint=`cat $TAINTFILE` fi if [ $taint -eq 0 ]; then echo "Kernel not Tainted" exit else echo "Kernel is \"tainted\" for the following reasons:" fi T=$taint out= addout() { out=$out$1 } if [ `expr $T % 2` -eq 0 ]; then addout "G" else addout "P" echo " * proprietary module was loaded (#0)" fi T=`expr $T / 2` if [ `expr $T % 2` -eq 0 ]; then addout " " else addout "F" echo " * module was force loaded (#1)" fi T=`expr $T / 2` if [ `expr $T % 2` -eq 0 ]; then addout " " else addout "S" echo " * kernel running on an out of specification system (#2)" fi T=`expr $T / 2` if [ `expr $T % 2` -eq 0 ]; then addout " " else addout "R" echo " * module was force unloaded (#3)" fi T=`expr $T / 2` if [ `expr $T % 2` -eq 0 ]; then addout " " else addout "M" echo " * processor reported a Machine Check Exception (MCE) (#4)" fi T=`expr $T / 2` if [ `expr $T % 2` -eq 0 ]; then addout " " else addout "B" echo " * bad page referenced or some unexpected page flags (#5)" fi T=`expr $T / 2` if [ `expr $T % 2` -eq 0 ]; then addout " " else addout "U" echo " * taint requested by userspace application (#6)" fi T=`expr $T / 2` if [ `expr $T % 2` -eq 0 ]; then addout " " else addout "D" echo " * kernel died recently, i.e. there was an OOPS or BUG (#7)" fi T=`expr $T / 2` if [ `expr $T % 2` -eq 0 ]; then addout " " else addout "A" echo " * an ACPI table was overridden by user (#8)" fi T=`expr $T / 2` if [ `expr $T % 2` -eq 0 ]; then addout " " else addout "W" echo " * kernel issued warning (#9)" fi T=`expr $T / 2` if [ `expr $T % 2` -eq 0 ]; then addout " " else addout "C" echo " * staging driver was loaded (#10)" fi T=`expr $T / 2` if [ `expr $T % 2` -eq 0 ]; then addout " " else addout "I" echo " * workaround for bug in platform firmware applied (#11)" fi T=`expr $T / 2` if [ `expr $T % 2` -eq 0 ]; then addout " " else addout "O" echo " * externally-built ('out-of-tree') module was loaded (#12)" fi T=`expr $T / 2` if [ `expr $T % 2` -eq 0 ]; then addout " " else addout "E" echo " * unsigned module was loaded (#13)" fi T=`expr $T / 2` if [ `expr $T % 2` -eq 0 ]; then addout " " else addout "L" echo " * soft lockup occurred (#14)" fi T=`expr $T / 2` if [ `expr $T % 2` -eq 0 ]; then addout " " else addout "K" echo " * kernel has been live patched (#15)" fi T=`expr $T / 2` if [ `expr $T % 2` -eq 0 ]; then addout " " else addout "X" echo " * auxiliary taint, defined for and used by distros (#16)" fi T=`expr $T / 2` if [ `expr $T % 2` -eq 0 ]; then addout " " else addout "T" echo " * kernel was built with the struct randomization plugin (#17)" fi echo "For a more detailed explanation of the various taint flags see" echo " Documentation/admin-guide/tainted-kernels.rst in the Linux kernel sources" echo " or https://kernel.org/doc/html/latest/admin-guide/tainted-kernels.html" echo "Raw taint value as int/string: $taint/'$out'" #EOF# form.submit();'>mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/Kconfig1
-rw-r--r--drivers/accel/Makefile1
-rw-r--r--drivers/accel/amdxdna/Kconfig18
-rw-r--r--drivers/accel/amdxdna/Makefile23
-rw-r--r--drivers/accel/amdxdna/TODO3
-rw-r--r--drivers/accel/amdxdna/aie2_ctx.c910
-rw-r--r--drivers/accel/amdxdna/aie2_error.c360
-rw-r--r--drivers/accel/amdxdna/aie2_message.c776
-rw-r--r--drivers/accel/amdxdna/aie2_msg_priv.h370
-rw-r--r--drivers/accel/amdxdna/aie2_pci.c928
-rw-r--r--drivers/accel/amdxdna/aie2_pci.h297
-rw-r--r--drivers/accel/amdxdna/aie2_pm.c108
-rw-r--r--drivers/accel/amdxdna/aie2_psp.c146
-rw-r--r--drivers/accel/amdxdna/aie2_smu.c134
-rw-r--r--drivers/accel/amdxdna/aie2_solver.c380
-rw-r--r--drivers/accel/amdxdna/aie2_solver.h155
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.c550
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.h162
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.c622
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.h65
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.c561
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.h124
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox_helper.c61
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox_helper.h42
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.c434
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.h147
-rw-r--r--drivers/accel/amdxdna/amdxdna_sysfs.c67
-rw-r--r--drivers/accel/amdxdna/npu1_regs.c114
-rw-r--r--drivers/accel/amdxdna/npu2_regs.c113
-rw-r--r--drivers/accel/amdxdna/npu4_regs.c134
-rw-r--r--drivers/accel/amdxdna/npu5_regs.c113
-rw-r--r--drivers/accel/amdxdna/npu6_regs.c114
-rw-r--r--drivers/accel/habanalabs/common/context.c3
-rw-r--r--drivers/accel/habanalabs/common/device.c2
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_drv.c4
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_ioctl.c11
-rw-r--r--drivers/accel/habanalabs/common/memory.c2
-rw-r--r--drivers/accel/ivpu/Kconfig10
-rw-r--r--drivers/accel/ivpu/Makefile8
-rw-r--r--drivers/accel/ivpu/ivpu_coredump.c39
-rw-r--r--drivers/accel/ivpu/ivpu_coredump.h25
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c95
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c68
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h35
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c37
-rw-r--r--drivers/accel/ivpu/ivpu_fw.h9
-rw-r--r--drivers/accel/ivpu/ivpu_fw_log.c113
-rw-r--r--drivers/accel/ivpu/ivpu_fw_log.h17
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c5
-rw-r--r--drivers/accel/ivpu/ivpu_hw.c16
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h2
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx_reg.h2
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.c21
-rw-r--r--drivers/accel/ivpu/ivpu_hw_ip.c62
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c45
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.h9
-rw-r--r--drivers/accel/ivpu/ivpu_job.c190
-rw-r--r--drivers/accel/ivpu/ivpu_job.h2
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.c42
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.h2
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c101
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.h4
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c162
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.h9
-rw-r--r--drivers/accel/ivpu/ivpu_ms.c2
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c110
-rw-r--r--drivers/accel/ivpu/ivpu_sysfs.c24
-rw-r--r--drivers/accel/ivpu/ivpu_trace.h73
-rw-r--r--drivers/accel/ivpu/ivpu_trace_points.c9
-rw-r--r--drivers/accel/ivpu/vpu_boot_api.h45
-rw-r--r--drivers/accel/ivpu/vpu_jsm_api.h303
-rw-r--r--drivers/accel/qaic/mhi_controller.c32
-rw-r--r--drivers/accel/qaic/qaic_control.c2
-rw-r--r--drivers/accel/qaic/qaic_data.c6
-rw-r--r--drivers/accel/qaic/qaic_debugfs.c43
-rw-r--r--drivers/accel/qaic/qaic_drv.c11
-rw-r--r--drivers/accel/qaic/sahara.c387
-rw-r--r--drivers/accessibility/speakup/Makefile4
-rw-r--r--drivers/acpi/Kconfig11
-rw-r--r--drivers/acpi/Makefile2
-rw-r--r--drivers/acpi/ac.c2
-rw-r--r--drivers/acpi/acpi_apd.c2
-rw-r--r--drivers/acpi/acpi_extlog.c14
-rw-r--r--drivers/acpi/acpi_pad.c7
-rw-r--r--drivers/acpi/acpi_tad.c2
-rw-r--r--drivers/acpi/acpi_video.c49
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/apei/einj-core.c2
-rw-r--r--drivers/acpi/apei/einj-cxl.c8
-rw-r--r--drivers/acpi/apei/ghes.c18
-rw-r--r--drivers/acpi/arm64/agdi.c2
-rw-r--r--drivers/acpi/arm64/gtdt.c33
-rw-r--r--drivers/acpi/arm64/init.c2
-rw-r--r--drivers/acpi/arm64/iort.c15
-rw-r--r--drivers/acpi/battery.c48
-rw-r--r--drivers/acpi/bgrt.c6
-rw-r--r--drivers/acpi/bus.c5
-rw-r--r--drivers/acpi/button.c11
-rw-r--r--drivers/acpi/cppc_acpi.c44
-rw-r--r--drivers/acpi/dptf/dptf_pch_fivr.c3
-rw-r--r--drivers/acpi/dptf/dptf_power.c4
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c6
-rw-r--r--drivers/acpi/ec.c4
-rw-r--r--drivers/acpi/event.c4
-rw-r--r--drivers/acpi/evged.c2
-rw-r--r--drivers/acpi/fan.h1
-rw-r--r--drivers/acpi/fan_core.c12
-rw-r--r--drivers/acpi/internal.h25
-rw-r--r--drivers/acpi/mipi-disco-img.c3
-rw-r--r--drivers/acpi/nfit/core.c7
-rw-r--r--drivers/acpi/numa/hmat.c26
-rw-r--r--drivers/acpi/numa/srat.c95
-rw-r--r--drivers/acpi/osl.c34
-rw-r--r--drivers/acpi/pci_link.c4
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/pfr_telemetry.c5
-rw-r--r--drivers/acpi/pfr_update.c2
-rw-r--r--drivers/acpi/platform_profile.c647
-rw-r--r--drivers/acpi/power.c4
-rw-r--r--drivers/acpi/prmt.c27
-rw-r--r--drivers/acpi/processor_driver.c9
-rw-r--r--drivers/acpi/processor_idle.c19
-rw-r--r--drivers/acpi/processor_perflib.c13
-rw-r--r--drivers/acpi/property.c13
-rw-r--r--drivers/acpi/resource.c109
-rw-r--r--drivers/acpi/riscv/init.c2
-rw-r--r--drivers/acpi/sbs.c4
-rw-r--r--drivers/acpi/sbshc.c13
-rw-r--r--drivers/acpi/scan.c19
-rw-r--r--drivers/acpi/sysfs.c8
-rw-r--r--drivers/acpi/tables.c12
-rw-r--r--drivers/acpi/thermal.c8
-rw-r--r--drivers/acpi/thermal_lib.c8
-rw-r--r--drivers/acpi/utils.c7
-rw-r--r--drivers/acpi/video_detect.c16
-rw-r--r--drivers/acpi/x86/utils.c92
-rw-r--r--drivers/amba/bus.c6
-rw-r--r--drivers/android/binder.c102
-rw-r--r--drivers/android/binder_alloc.c374
-rw-r--r--drivers/android/binder_alloc.h47
-rw-r--r--drivers/android/binder_alloc_selftest.c18
-rw-r--r--drivers/android/binder_internal.h11
-rw-r--r--drivers/android/binder_trace.h2
-rw-r--r--drivers/android/binderfs.c2
-rw-r--r--drivers/ata/acard-ahci.c6
-rw-r--r--drivers/ata/ahci.c10
-rw-r--r--drivers/ata/ahci.h17
-rw-r--r--drivers/ata/ahci_brcm.c5
-rw-r--r--drivers/ata/ahci_ceva.c8
-rw-r--r--drivers/ata/ahci_da850.c2
-rw-r--r--drivers/ata/ahci_dm816.c2
-rw-r--r--drivers/ata/ahci_dwc.c2
-rw-r--r--drivers/ata/ahci_imx.c4
-rw-r--r--drivers/ata/ahci_mtk.c2
-rw-r--r--drivers/ata/ahci_mvebu.c2
-rw-r--r--drivers/ata/ahci_platform.c2
-rw-r--r--drivers/ata/ahci_qoriq.c2
-rw-r--r--drivers/ata/ahci_seattle.c2
-rw-r--r--drivers/ata/ahci_st.c8
-rw-r--r--drivers/ata/ahci_sunxi.c2
-rw-r--r--drivers/ata/ahci_tegra.c2
-rw-r--r--drivers/ata/ahci_xgene.c4
-rw-r--r--drivers/ata/ata_generic.c2
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libahci_platform.c40
-rw-r--r--drivers/ata/libata-acpi.c4
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/ata/libata-eh.c19
-rw-r--r--drivers/ata/libata-sata.c8
-rw-r--r--drivers/ata/libata-scsi.c535
-rw-r--r--drivers/ata/libata-sff.c18
-rw-r--r--drivers/ata/pata_arasan_cf.c2
-rw-r--r--drivers/ata/pata_atp867x.c2
-rw-r--r--drivers/ata/pata_ep93xx.c2
-rw-r--r--drivers/ata/pata_falcon.c4
-rw-r--r--drivers/ata/pata_ftide010.c2
-rw-r--r--drivers/ata/pata_gayle.c6
-rw-r--r--drivers/ata/pata_imx.c2
-rw-r--r--drivers/ata/pata_it8213.c2
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c2
-rw-r--r--drivers/ata/pata_macio.c10
-rw-r--r--drivers/ata/pata_mpc52xx.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c2
-rw-r--r--drivers/ata/pata_of_platform.c2
-rw-r--r--drivers/ata/pata_oldpiix.c2
-rw-r--r--drivers/ata/pata_piccolo.c2
-rw-r--r--drivers/ata/pata_platform.c2
-rw-r--r--drivers/ata/pata_pxa.c2
-rw-r--r--drivers/ata/pata_radisys.c2
-rw-r--r--drivers/ata/pata_rb532_cf.c2
-rw-r--r--drivers/ata/pata_rdc.c2
-rw-r--r--drivers/ata/sata_dwc_460ex.c2
-rw-r--r--drivers/ata/sata_fsl.c2
-rw-r--r--drivers/ata/sata_gemini.c34
-rw-r--r--drivers/ata/sata_gemini.h1
-rw-r--r--drivers/ata/sata_highbank.c13
-rw-r--r--drivers/ata/sata_mv.c6
-rw-r--r--drivers/ata/sata_nv.c28
-rw-r--r--drivers/ata/sata_rcar.c2
-rw-r--r--drivers/ata/sata_sil24.c5
-rw-r--r--drivers/ata/sata_sis.c2
-rw-r--r--drivers/ata/sata_uli.c2
-rw-r--r--drivers/ata/sata_vsc.c2
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/auxdisplay/Kconfig2
-rw-r--r--drivers/auxdisplay/cfag12864b.c12
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c2
-rw-r--r--drivers/auxdisplay/hd44780.c2
-rw-r--r--drivers/auxdisplay/ht16k33.c12
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c12
-rw-r--r--drivers/auxdisplay/lcd2s.c2
-rw-r--r--drivers/auxdisplay/line-display.c4
-rw-r--r--drivers/auxdisplay/max6959.c2
-rw-r--r--drivers/auxdisplay/seg-led-gpio.c4
-rw-r--r--drivers/base/arch_numa.c4
-rw-r--r--drivers/base/arch_topology.c6
-rw-r--r--drivers/base/auxiliary.c31
-rw-r--r--drivers/base/bus.c9
-rw-r--r--drivers/base/cacheinfo.c29
-rw-r--r--drivers/base/class.c46
-rw-r--r--drivers/base/core.c317
-rw-r--r--drivers/base/cpu.c3
-rw-r--r--drivers/base/devcoredump.c22
-rw-r--r--drivers/base/devres.c23
-rw-r--r--drivers/base/driver.c9
-rw-r--r--drivers/base/firmware_loader/builtin/main.c2
-rw-r--r--drivers/base/firmware_loader/fallback_table.c8
-rw-r--r--drivers/base/firmware_loader/main.c7
-rw-r--r--drivers/base/firmware_loader/sysfs.c14
-rw-r--r--drivers/base/firmware_loader/sysfs.h2
-rw-r--r--drivers/base/memory.c4
-rw-r--r--drivers/base/module.c4
-rw-r--r--drivers/base/node.c12
-rw-r--r--drivers/base/physical_location.c4
-rw-r--r--drivers/base/power/common.c44
-rw-r--r--drivers/base/power/main.c55
-rw-r--r--drivers/base/power/qos.c1
-rw-r--r--drivers/base/power/sysfs.c18
-rw-r--r--drivers/base/power/wakeirq.c26
-rw-r--r--drivers/base/property.c38
-rw-r--r--drivers/base/regmap/internal.h1
-rw-r--r--drivers/base/regmap/regcache-maple.c10
-rw-r--r--drivers/base/regmap/regcache-rbtree.c10
-rw-r--r--drivers/base/regmap/regcache.c2
-rw-r--r--drivers/base/regmap/regmap-irq.c9
-rw-r--r--drivers/base/regmap/regmap-kunit.c45
-rw-r--r--drivers/base/regmap/regmap-sdw-mbq.c219
-rw-r--r--drivers/base/regmap/regmap.c30
-rw-r--r--drivers/base/swnode.c1
-rw-r--r--drivers/base/test/Kconfig1
-rw-r--r--drivers/base/test/platform-device-test.c41
-rw-r--r--drivers/base/topology.c64
-rw-r--r--drivers/base/trace.h6
-rw-r--r--drivers/bcma/host_soc.c2
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/amiflop.c1
-rw-r--r--drivers/block/aoe/aoeblk.c1
-rw-r--r--drivers/block/aoe/aoedev.c5
-rw-r--r--drivers/block/ataflop.c6
-rw-r--r--drivers/block/brd.c70
-rw-r--r--drivers/block/drbd/drbd_int.h1
-rw-r--r--drivers/block/drbd/drbd_main.c14
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/loop.c222
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c15
-rw-r--r--drivers/block/nbd.c123
-rw-r--r--drivers/block/null_blk/main.c40
-rw-r--r--drivers/block/null_blk/null_blk.h1
-rw-r--r--drivers/block/null_blk/zoned.c2
-rw-r--r--drivers/block/ps3disk.c7
-rw-r--r--drivers/block/rbd.c5
-rw-r--r--drivers/block/rnbd/rnbd-clt.c3
-rw-r--r--drivers/block/rnbd/rnbd-srv.c2
-rw-r--r--drivers/block/rnull.rs31
-rw-r--r--drivers/block/sunvdc.c13
-rw-r--r--drivers/block/swim.c4
-rw-r--r--drivers/block/swim3.c8
-rw-r--r--drivers/block/ublk_drv.c248
-rw-r--r--drivers/block/virtio_blk.c100
-rw-r--r--drivers/block/xen-blkback/blkback.c2
-rw-r--r--drivers/block/xen-blkfront.c1
-rw-r--r--drivers/block/z2ram.c1
-rw-r--r--drivers/block/zram/Kconfig1
-rw-r--r--drivers/block/zram/zram_drv.c688
-rw-r--r--drivers/block/zram/zram_drv.h8
-rw-r--r--drivers/bluetooth/Kconfig6
-rw-r--r--drivers/bluetooth/btbcm.c7
-rw-r--r--drivers/bluetooth/btintel.c130
-rw-r--r--drivers/bluetooth/btintel.h10
-rw-r--r--drivers/bluetooth/btintel_pcie.c387
-rw-r--r--drivers/bluetooth/btintel_pcie.h18
-rw-r--r--drivers/bluetooth/btmrvl_main.c3
-rw-r--r--drivers/bluetooth/btmtk.c34
-rw-r--r--drivers/bluetooth/btmtksdio.c25
-rw-r--r--drivers/bluetooth/btmtkuart.c2
-rw-r--r--drivers/bluetooth/btnxpuart.c83
-rw-r--r--drivers/bluetooth/btqca.c200
-rw-r--r--drivers/bluetooth/btqca.h5
-rw-r--r--drivers/bluetooth/btqcomsmd.c2
-rw-r--r--drivers/bluetooth/btrtl.c6
-rw-r--r--drivers/bluetooth/btusb.c180
-rw-r--r--drivers/bluetooth/hci_bcm.c27
-rw-r--r--drivers/bluetooth/hci_intel.c2
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_ll.c2
-rw-r--r--drivers/bluetooth/hci_nokia.c2
-rw-r--r--drivers/bluetooth/hci_qca.c65
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c8
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c38
-rw-r--r--drivers/bus/hisi_lpc.c2
-rw-r--r--drivers/bus/mhi/host/boot.c5
-rw-r--r--drivers/bus/mhi/host/internal.h2
-rw-r--r--drivers/bus/mhi/host/pci_generic.c63
-rw-r--r--drivers/bus/mhi/host/trace.h25
-rw-r--r--drivers/bus/moxtet.c2
-rw-r--r--drivers/bus/omap-ocp2scp.c2
-rw-r--r--drivers/bus/omap_l3_smx.c2
-rw-r--r--drivers/bus/qcom-ssc-block-bus.c2
-rw-r--r--drivers/bus/simple-pm-bus.c2
-rw-r--r--drivers/bus/sun50i-de2.c2
-rw-r--r--drivers/bus/sunxi-rsb.c2
-rw-r--r--drivers/bus/tegra-aconnect.c2
-rw-r--r--drivers/bus/tegra-gmi.c2
-rw-r--r--drivers/bus/ti-pwmss.c2
-rw-r--r--drivers/bus/ti-sysc.c2
-rw-r--r--drivers/bus/ts-nbus.c2
-rw-r--r--drivers/cdrom/cdrom.c6
-rw-r--r--drivers/cdrom/gdrom.c4
-rw-r--r--drivers/cdx/Makefile2
-rw-r--r--drivers/cdx/cdx.c13
-rw-r--r--drivers/cdx/cdx_msi.c2
-rw-r--r--drivers/cdx/controller/cdx_controller.c4
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/char/hpet.c3
-rw-r--r--drivers/char/hw_random/Kconfig30
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/airoha-trng.c243
-rw-r--r--drivers/char/hw_random/atmel-rng.c2
-rw-r--r--drivers/char/hw_random/bcm74110-rng.c125
-rw-r--r--drivers/char/hw_random/cctrng.c2
-rw-r--r--drivers/char/hw_random/core.c11
-rw-r--r--drivers/char/hw_random/exynos-trng.c2
-rw-r--r--drivers/char/hw_random/histb-rng.c2
-rw-r--r--drivers/char/hw_random/ingenic-rng.c2
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c2
-rw-r--r--drivers/char/hw_random/mxc-rnga.c2
-rw-r--r--drivers/char/hw_random/n2-drv.c2
-rw-r--r--drivers/char/hw_random/npcm-rng.c2
-rw-r--r--drivers/char/hw_random/omap-rng.c2
-rw-r--r--drivers/char/hw_random/stm32-rng.c78
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c2
-rw-r--r--drivers/char/hw_random/xgene-rng.c2
-rw-r--r--drivers/char/ipmi/bt-bmc.c2
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c5
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c5
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c2
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c8
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c2
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c5
-rw-r--r--drivers/char/ipmi/kcs_bmc_aspeed.c2
-rw-r--r--drivers/char/ipmi/kcs_bmc_npcm7xx.c2
-rw-r--r--drivers/char/ipmi/ssif_bmc.c5
-rw-r--r--drivers/char/misc.c39
-rw-r--r--drivers/char/powernv-op-panel.c2
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/eventlog/acpi.c15
-rw-r--r--drivers/char/tpm/tpm-buf.c20
-rw-r--r--drivers/char/tpm/tpm-chip.c14
-rw-r--r--drivers/char/tpm/tpm-dev-common.c3
-rw-r--r--drivers/char/tpm/tpm-interface.c30
-rw-r--r--drivers/char/tpm/tpm2-cmd.c30
-rw-r--r--drivers/char/tpm/tpm2-sessions.c155
-rw-r--r--drivers/char/tpm/tpm_atmel.c63
-rw-r--r--drivers/char/tpm/tpm_atmel.h140
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c2
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c15
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c146
-rw-r--r--drivers/char/tpm/tpm_tis_synquacer.c2
-rw-r--r--drivers/char/virtio_console.c22
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c2
-rw-r--r--drivers/char/xillybus/xillybus_of.c2
-rw-r--r--drivers/clk/.kunitconfig1
-rw-r--r--drivers/clk/Kconfig25
-rw-r--r--drivers/clk/Makefile16
-rw-r--r--drivers/clk/analogbits/wrpll-cln28hpc.c2
-rw-r--r--drivers/clk/at91/Makefile1
-rw-r--r--drivers/clk/at91/clk-master.c2
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c2
-rw-r--r--drivers/clk/at91/pmc.c1
-rw-r--r--drivers/clk/at91/sama7d65.c1375
-rw-r--r--drivers/clk/at91/sckc.c24
-rw-r--r--drivers/clk/bcm/clk-kona.c3
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c33
-rw-r--r--drivers/clk/clk-apple-nco.c3
-rw-r--r--drivers/clk/clk-axi-clkgen.c22
-rw-r--r--drivers/clk/clk-cdce706.c2
-rw-r--r--drivers/clk/clk-cdce925.c2
-rw-r--r--drivers/clk/clk-devres.c9
-rw-r--r--drivers/clk/clk-divider.c16
-rw-r--r--drivers/clk/clk-en7523.c327
-rw-r--r--drivers/clk/clk-ep93xx.c6
-rw-r--r--drivers/clk/clk-eyeq.c859
-rw-r--r--drivers/clk/clk-fixed-factor.c11
-rw-r--r--drivers/clk/clk-gpio.c205
-rw-r--r--drivers/clk/clk-lan966x.c78
-rw-r--r--drivers/clk/clk-lmk04832.c4
-rw-r--r--drivers/clk/clk-loongson2.c15
-rw-r--r--drivers/clk/clk-nomadik.c5
-rw-r--r--drivers/clk/clk-npcm8xx.c430
-rw-r--r--drivers/clk/clk-qoriq.c6
-rw-r--r--drivers/clk/clk-si514.c2
-rw-r--r--drivers/clk/clk-stm32f4.c155
-rw-r--r--drivers/clk/clk-twl.c69
-rw-r--r--drivers/clk/clk-versaclock3.c67
-rw-r--r--drivers/clk/clk-xgene.c4
-rw-r--r--drivers/clk/clk.c10
-rw-r--r--drivers/clk/clk_kunit_helpers.c30
-rw-r--r--drivers/clk/clk_test.c382
-rw-r--r--drivers/clk/davinci/pll.c32
-rw-r--r--drivers/clk/imx/clk-fracn-gppll.c10
-rw-r--r--drivers/clk/imx/clk-imx8-acm.c4
-rw-r--r--drivers/clk/imx/clk-imx8mp-audiomix.c3
-rw-r--r--drivers/clk/imx/clk-imx8mp.c5
-rw-r--r--drivers/clk/imx/clk-imx93.c89
-rw-r--r--drivers/clk/imx/clk-imx95-blk-ctl.c20
-rw-r--r--drivers/clk/imx/clk-lpcg-scu.c41
-rw-r--r--drivers/clk/imx/clk-pll14xx.c2
-rw-r--r--drivers/clk/imx/clk-scu.c2
-rw-r--r--drivers/clk/kunit_clk_assigned_rates.h8
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_multiple.dtso16
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_multiple_consumer.dtso20
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_null.dtso14
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_null_consumer.dtso18
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_one.dtso14
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_one_consumer.dtso18
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_u64_multiple.dtso16
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_u64_multiple_consumer.dtso20
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_u64_one.dtso14
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_u64_one_consumer.dtso18
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_without.dtso13
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_without_consumer.dtso17
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_zero.dtso12
-rw-r--r--drivers/clk/kunit_clk_assigned_rates_zero_consumer.dtso16
-rw-r--r--drivers/clk/mediatek/Kconfig52
-rw-r--r--drivers/clk/mediatek/Makefile5
-rw-r--r--drivers/clk/mediatek/clk-mt2701-aud.c10
-rw-r--r--drivers/clk/mediatek/clk-mt2701-bdp.c1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-img.c1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-mm.c1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-vdec.c1
-rw-r--r--drivers/clk/mediatek/clk-mt6735-apmixedsys.c137
-rw-r--r--drivers/clk/mediatek/clk-mt6735-imgsys.c57
-rw-r--r--drivers/clk/mediatek/clk-mt6735-infracfg.c107
-rw-r--r--drivers/clk/mediatek/clk-mt6735-mfgcfg.c61
-rw-r--r--drivers/clk/mediatek/clk-mt6735-pericfg.c124
-rw-r--r--drivers/clk/mediatek/clk-mt6735-topckgen.c394
-rw-r--r--drivers/clk/mediatek/clk-mt6735-vdecsys.c79
-rw-r--r--drivers/clk/mediatek/clk-mt6735-vencsys.c53
-rw-r--r--drivers/clk/mediatek/clk-mt8188-topckgen.c9
-rw-r--r--drivers/clk/meson/Kconfig1
-rw-r--r--drivers/clk/meson/a1-peripherals.c2
-rw-r--r--drivers/clk/meson/a1-pll.c2
-rw-r--r--drivers/clk/meson/axg-aoclk.c2
-rw-r--r--drivers/clk/meson/axg-audio.c2
-rw-r--r--drivers/clk/meson/axg.c8
-rw-r--r--drivers/clk/meson/c3-peripherals.c2
-rw-r--r--drivers/clk/meson/c3-pll.c3
-rw-r--r--drivers/clk/meson/clk-cpu-dyndiv.c4
-rw-r--r--drivers/clk/meson/clk-dualdiv.c6
-rw-r--r--drivers/clk/meson/clk-mpll.c17
-rw-r--r--drivers/clk/meson/clk-mpll.h1
-rw-r--r--drivers/clk/meson/clk-phase.c8
-rw-r--r--drivers/clk/meson/clk-pll.c16
-rw-r--r--drivers/clk/meson/clk-pll.h1
-rw-r--r--drivers/clk/meson/clk-regmap.c14
-rw-r--r--drivers/clk/meson/g12a-aoclk.c2
-rw-r--r--drivers/clk/meson/g12a.c8
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c2
-rw-r--r--drivers/clk/meson/gxbb.c8
-rw-r--r--drivers/clk/meson/meson-aoclk.c4
-rw-r--r--drivers/clk/meson/meson-clkc-utils.c4
-rw-r--r--drivers/clk/meson/meson-eeclk.c4
-rw-r--r--drivers/clk/meson/meson8b.c10
-rw-r--r--drivers/clk/meson/s4-peripherals.c2
-rw-r--r--drivers/clk/meson/s4-pll.c15
-rw-r--r--drivers/clk/meson/sclk-div.c4
-rw-r--r--drivers/clk/meson/vclk.c6
-rw-r--r--drivers/clk/meson/vid-pll-div.c4
-rw-r--r--drivers/clk/microchip/clk-mpfs.c2
-rw-r--r--drivers/clk/mmp/Makefile2
-rw-r--r--drivers/clk/mmp/clk-frac.c57
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c26
-rw-r--r--drivers/clk/mmp/clk-of-pxa168.c4
-rw-r--r--drivers/clk/mmp/clk-of-pxa1928.c6
-rw-r--r--drivers/clk/mmp/clk-of-pxa910.c4
-rw-r--r--drivers/clk/mmp/clk-pxa1908-apbc.c130
-rw-r--r--drivers/clk/mmp/clk-pxa1908-apbcp.c82
-rw-r--r--drivers/clk/mmp/clk-pxa1908-apmu.c121
-rw-r--r--drivers/clk/mmp/clk-pxa1908-mpmu.c112
-rw-r--r--drivers/clk/mmp/clk.h10
-rw-r--r--drivers/clk/mmp/pwr-island.c2
-rw-r--r--drivers/clk/qcom/Kconfig153
-rw-r--r--drivers/clk/qcom/Makefile14
-rw-r--r--drivers/clk/qcom/apss-ipq-pll.c3
-rw-r--r--drivers/clk/qcom/camcc-sa8775p.c1868
-rw-r--r--drivers/clk/qcom/camcc-sm8450.c294
-rw-r--r--drivers/clk/qcom/camcc-x1e80100.c7
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c206
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h14
-rw-r--r--drivers/clk/qcom/clk-rcg.c1
-rw-r--r--drivers/clk/qcom/clk-rcg.h2
-rw-r--r--drivers/clk/qcom/clk-rcg2.c246
-rw-r--r--drivers/clk/qcom/clk-rpm.c27
-rw-r--r--drivers/clk/qcom/clk-rpmh.c63
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c81
-rw-r--r--drivers/clk/qcom/clk-spmi-pmic-div.c13
-rw-r--r--drivers/clk/qcom/common.h2
-rw-r--r--drivers/clk/qcom/dispcc-qcm2290.c2
-rw-r--r--drivers/clk/qcom/dispcc-sm6115.c2
-rw-r--r--drivers/clk/qcom/dispcc-sm6350.c7
-rw-r--r--drivers/clk/qcom/dispcc-sm8450.c66
-rw-r--r--drivers/clk/qcom/dispcc-sm8550.c18
-rw-r--r--drivers/clk/qcom/dispcc-sm8750.c1963
-rw-r--r--drivers/clk/qcom/dispcc0-sa8775p.c1481
-rw-r--r--drivers/clk/qcom/dispcc1-sa8775p.c1481
-rw-r--r--drivers/clk/qcom/gcc-ipq5332.c382
-rw-r--r--drivers/clk/qcom/gcc-ipq5424.c3310
-rw-r--r--drivers/clk/qcom/gcc-ipq6018.c4
-rw-r--r--drivers/clk/qcom/gcc-ipq9574.c328
-rw-r--r--drivers/clk/qcom/gcc-mdm9607.c2
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c1
-rw-r--r--drivers/clk/qcom/gcc-qcs615.c3034
-rw-r--r--drivers/clk/qcom/gcc-qcs8300.c3640
-rw-r--r--drivers/clk/qcom/gcc-sar2130p.c2366
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c43
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c22
-rw-r--r--drivers/clk/qcom/gcc-sm8450.c181
-rw-r--r--drivers/clk/qcom/gcc-sm8550.c8
-rw-r--r--drivers/clk/qcom/gcc-sm8650.c8
-rw-r--r--drivers/clk/qcom/gcc-sm8750.c3274
-rw-r--r--drivers/clk/qcom/gcc-x1e80100.c14
-rw-r--r--drivers/clk/qcom/gpucc-sar2130p.c502
-rw-r--r--drivers/clk/qcom/gpucc-sm8450.c50
-rw-r--r--drivers/clk/qcom/gpucc-x1p42100.c587
-rw-r--r--drivers/clk/qcom/ipq-cmn-pll.c435
-rw-r--r--drivers/clk/qcom/lpasscc-sm6115.c85
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c61
-rw-r--r--drivers/clk/qcom/tcsrcc-sm8550.c18
-rw-r--r--drivers/clk/qcom/tcsrcc-sm8750.c141
-rw-r--r--drivers/clk/qcom/videocc-sa8775p.c576
-rw-r--r--drivers/clk/qcom/videocc-sm8350.c4
-rw-r--r--drivers/clk/qcom/videocc-sm8450.c48
-rw-r--r--drivers/clk/ralink/clk-mtmips.c55
-rw-r--r--drivers/clk/renesas/Kconfig12
-rw-r--r--drivers/clk/renesas/Makefile2
-rw-r--r--drivers/clk/renesas/clk-r8a73a4.c1
-rw-r--r--drivers/clk/renesas/clk-r8a7778.c1
-rw-r--r--drivers/clk/renesas/clk-vbattb.c205
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c1
-rw-r--r--drivers/clk/renesas/r8a779g0-cpg-mssr.c4
-rw-r--r--drivers/clk/renesas/r8a779h0-cpg-mssr.c14
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c29
-rw-r--r--drivers/clk/renesas/r9a08g045-cpg.c101
-rw-r--r--drivers/clk/renesas/r9a09g011-cpg.c1
-rw-r--r--drivers/clk/renesas/r9a09g047-cpg.c150
-rw-r--r--drivers/clk/renesas/r9a09g057-cpg.c197
-rw-r--r--drivers/clk/renesas/rcar-cpg-lib.c1
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c1
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c4
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c52
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.h10
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.c196
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.h44
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-rk3588.c120
-rw-r--r--drivers/clk/rockchip/clk.c104
-rw-r--r--drivers/clk/rockchip/clk.h40
-rw-r--r--drivers/clk/rockchip/gate-link.c85
-rw-r--r--drivers/clk/samsung/Makefile2
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c2
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c2
-rw-r--r--drivers/clk/samsung/clk-exynos4.c2
-rw-r--r--drivers/clk/samsung/clk-exynos4412-isp.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5260.c4
-rw-r--r--drivers/clk/samsung/clk-exynos5410.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c6
-rw-r--r--drivers/clk/samsung/clk-exynos7.c2
-rw-r--r--drivers/clk/samsung/clk-exynos8895.c2803
-rw-r--r--drivers/clk/samsung/clk-exynos990.c1343
-rw-r--r--drivers/clk/samsung/clk-exynosautov920.c290
-rw-r--r--drivers/clk/samsung/clk-fsd.c23
-rw-r--r--drivers/clk/samsung/clk-gs101.c10
-rw-r--r--drivers/clk/samsung/clk-pll.c16
-rw-r--r--drivers/clk/samsung/clk-pll.h5
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c2
-rw-r--r--drivers/clk/samsung/clk-s5pv210-audss.c2
-rw-r--r--drivers/clk/samsung/clk.c2
-rw-r--r--drivers/clk/socfpga/clk-pll-a10.c2
-rw-r--r--drivers/clk/sophgo/clk-sg2042-pll.c2
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7100-audio.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-aon.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-isp.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-pll.c2
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-stg.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-sys.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-vout.c14
-rw-r--r--drivers/clk/starfive/clk-starfive-jh71x0.c12
-rw-r--r--drivers/clk/starfive/clk-starfive-jh71x0.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c17
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c76
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-rtc.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.c12
-rw-r--r--drivers/clk/sunxi-ng/ccu_gate.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.c12
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_phase.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_reset.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_reset.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu_sdm.c12
-rw-r--r--drivers/clk/tegra/clk-bpmp.c2
-rw-r--r--drivers/clk/thead/clk-th1520-ap.c26
-rw-r--r--drivers/clk/ti/clk.c5
-rw-r--r--drivers/clk/ti/mux.c2
-rw-r--r--drivers/clk/xilinx/clk-xlnx-clock-wizard.c402
-rw-r--r--drivers/clocksource/Kconfig12
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c4
-rw-r--r--drivers/clocksource/arm_global_timer.c1
-rw-r--r--drivers/clocksource/dw_apb_timer.c39
-rw-r--r--drivers/clocksource/exynos_mct.c1
-rw-r--r--drivers/clocksource/hyperv_timer.c16
-rw-r--r--drivers/clocksource/mips-gic-timer.c39
-rw-r--r--drivers/clocksource/timer-armada-370-xp.c1
-rw-r--r--drivers/clocksource/timer-gxp.c2
-rw-r--r--drivers/clocksource/timer-qcom.c1
-rw-r--r--drivers/clocksource/timer-ralink.c150
-rw-r--r--drivers/clocksource/timer-sun5i.c2
-rw-r--r--drivers/clocksource/timer-tegra.c1
-rw-r--r--drivers/clocksource/timer-tegra186.c2
-rw-r--r--drivers/clocksource/timer-ti-dm-systimer.c8
-rw-r--r--drivers/clocksource/timer-ti-dm.c10
-rw-r--r--drivers/comedi/comedi_fops.c12
-rw-r--r--drivers/counter/104-quad-8.c2
-rw-r--r--drivers/counter/counter-chrdev.c2
-rw-r--r--drivers/counter/counter-core.c14
-rw-r--r--drivers/counter/ftm-quaddec.c3
-rw-r--r--drivers/counter/i8254.c4
-rw-r--r--drivers/counter/intel-qep.c12
-rw-r--r--drivers/counter/interrupt-cnt.c2
-rw-r--r--drivers/counter/microchip-tcb-capture.c2
-rw-r--r--drivers/counter/rz-mtu3-cnt.c2
-rw-r--r--drivers/counter/stm32-lptimer-cnt.c2
-rw-r--r--drivers/counter/stm32-timer-cnt.c19
-rw-r--r--drivers/counter/ti-ecap-capture.c11
-rw-r--r--drivers/counter/ti-eqep.c4
-rw-r--r--drivers/cpufreq/Kconfig20
-rw-r--r--drivers/cpufreq/Kconfig.arm9
-rw-r--r--drivers/cpufreq/Kconfig.powerpc7
-rw-r--r--drivers/cpufreq/Makefile3
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c50
-rw-r--r--drivers/cpufreq/airoha-cpufreq.c152
-rw-r--r--drivers/cpufreq/amd-pstate-trace.h52
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c18
-rw-r--r--drivers/cpufreq/amd-pstate.c734
-rw-r--r--drivers/cpufreq/amd-pstate.h3
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c56
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c6
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c143
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c5
-rw-r--r--drivers/cpufreq/cpufreq-dt.c2
-rw-r--r--drivers/cpufreq/cpufreq.c32
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c2
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c2
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c117
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c2
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c4
-rw-r--r--drivers/cpufreq/loongson3_cpufreq.c9
-rw-r--r--drivers/cpufreq/maple-cpufreq.c242
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c4
-rw-r--r--drivers/cpufreq/omap-cpufreq.c2
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c3
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c36
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c84
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c2
-rw-r--r--drivers/cpufreq/raspberrypi-cpufreq.c2
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c11
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c49
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c2
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c2
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c30
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c2
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c2
-rw-r--r--drivers/cpufreq/ti-cpufreq.c10
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c2
-rw-r--r--drivers/cpufreq/virtual-cpufreq.c333
-rw-r--r--drivers/cpuidle/cpuidle-arm.c2
-rw-r--r--drivers/cpuidle/cpuidle-kirkwood.c2
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c1
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c1
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c2
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c15
-rw-r--r--drivers/cpuidle/cpuidle.c12
-rw-r--r--drivers/cpuidle/driver.c4
-rw-r--r--drivers/cpuidle/governors/menu.c76
-rw-r--r--drivers/cpuidle/governors/teo.c276
-rw-r--r--drivers/crypto/Kconfig38
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c2
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c58
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c10
-rw-r--r--drivers/crypto/aspeed/aspeed-acry.c4
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.c2
-rw-r--r--drivers/crypto/atmel-aes.c2
-rw-r--r--drivers/crypto/atmel-ecc.c2
-rw-r--r--drivers/crypto/atmel-sha.c2
-rw-r--r--drivers/crypto/atmel-sha204a.c4
-rw-r--r--drivers/crypto/atmel-tdes.c4
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c2
-rw-r--r--drivers/crypto/bcm/cipher.c7
-rw-r--r--drivers/crypto/bcm/spu.c7
-rw-r--r--drivers/crypto/caam/blob_gen.c3
-rw-r--r--drivers/crypto/caam/caampkc.c11
-rw-r--r--drivers/crypto/caam/jr.c2
-rw-r--r--drivers/crypto/caam/qi.c7
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c6
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c2
-rw-r--r--drivers/crypto/ccp/dbc.c53
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/crypto/ccp/sp-platform.c2
-rw-r--r--drivers/crypto/ccree/cc_aead.c4
-rw-r--r--drivers/crypto/ccree/cc_cipher.c2
-rw-r--r--drivers/crypto/ccree/cc_driver.c2
-rw-r--r--drivers/crypto/ccree/cc_hash.c2
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c2
-rw-r--r--drivers/crypto/exynos-rng.c2
-rw-r--r--drivers/crypto/gemini/sl3516-ce-core.c2
-rw-r--r--drivers/crypto/geode-aes.c2
-rw-r--r--drivers/crypto/hisilicon/debugfs.c4
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h23
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c207
-rw-r--r--drivers/crypto/hisilicon/qm.c457
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h29
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c169
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h11
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c121
-rw-r--r--drivers/crypto/hisilicon/trng/trng.c2
-rw-r--r--drivers/crypto/hisilicon/zip/Makefile2
-rw-r--r--drivers/crypto/hisilicon/zip/dae_main.c262
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h26
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c201
-rw-r--r--drivers/crypto/img-hash.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel.c4
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c2
-rw-r--r--drivers/crypto/intel/iaa/Makefile2
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c14
-rw-r--r--drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c5
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-aes-core.c2
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-ecc.c2
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c2
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_drv.c13
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c13
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_drv.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dbgfs.c13
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c18
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c36
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_hal.c2
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c2
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c2
-rw-r--r--drivers/crypto/marvell/Kconfig2
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c54
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c24
-rw-r--r--drivers/crypto/marvell/cesa/hash.c12
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.c14
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c20
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c20
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c16
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c15
-rw-r--r--drivers/crypto/mxs-dcp.c22
-rw-r--r--drivers/crypto/n2_asm.S96
-rw-r--r--drivers/crypto/n2_core.c2168
-rw-r--r--drivers/crypto/n2_core.h232
-rw-r--r--drivers/crypto/nx/nx-common-pseries.c37
-rw-r--r--drivers/crypto/omap-aes.c36
-rw-r--r--drivers/crypto/omap-aes.h6
-rw-r--r--drivers/crypto/omap-des.c42
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/qce/aead.c2
-rw-r--r--drivers/crypto/qce/core.c129
-rw-r--r--drivers/crypto/qce/core.h9
-rw-r--r--drivers/crypto/qce/dma.c22
-rw-r--r--drivers/crypto/qce/dma.h3
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/qce/skcipher.c2
-rw-r--r--drivers/crypto/qcom-rng.c2
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c2
-rw-r--r--drivers/crypto/s5p-sss.c2
-rw-r--r--drivers/crypto/sa2ul.c4
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.c7
-rw-r--r--drivers/crypto/starfive/jh7110-rsa.c2
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c2
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c2
-rw-r--r--drivers/crypto/stm32/stm32-hash.c2
-rw-r--r--drivers/crypto/talitos.c2
-rw-r--r--drivers/crypto/tegra/tegra-se-aes.c9
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c7
-rw-r--r--drivers/crypto/tegra/tegra-se-main.c4
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c65
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c2
-rw-r--r--drivers/crypto/xilinx/zynqmp-sha.c2
-rw-r--r--drivers/cxl/Kconfig1
-rw-r--r--drivers/cxl/Makefile20
-rw-r--r--drivers/cxl/acpi.c11
-rw-r--r--drivers/cxl/core/cdat.c17
-rw-r--r--drivers/cxl/core/core.h5
-rw-r--r--drivers/cxl/core/hdm.c81
-rw-r--r--drivers/cxl/core/mbox.c22
-rw-r--r--drivers/cxl/core/memdev.c20
-rw-r--r--drivers/cxl/core/pci.c28
-rw-r--r--drivers/cxl/core/pmem.c27
-rw-r--r--drivers/cxl/core/pmu.c2
-rw-r--r--drivers/cxl/core/port.c85
-rw-r--r--drivers/cxl/core/region.c179
-rw-r--r--drivers/cxl/core/regs.c134
-rw-r--r--drivers/cxl/core/suspend.c4
-rw-r--r--drivers/cxl/core/trace.h264
-rw-r--r--drivers/cxl/cxl.h19
-rw-r--r--drivers/cxl/mem.c2
-rw-r--r--drivers/cxl/pci.c125
-rw-r--r--drivers/cxl/pmem.c2
-rw-r--r--drivers/cxl/port.c23
-rw-r--r--drivers/dax/cxl.c2
-rw-r--r--drivers/dax/dax-private.h26
-rw-r--r--drivers/dax/device.c2
-rw-r--r--drivers/dax/pmem/Makefile7
-rw-r--r--drivers/dax/pmem/pmem.c10
-rw-r--r--drivers/devfreq/devfreq-event.c8
-rw-r--r--drivers/devfreq/event/exynos-nocp.c2
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c2
-rw-r--r--drivers/devfreq/exynos-bus.c5
-rw-r--r--drivers/devfreq/mtk-cci-devfreq.c2
-rw-r--r--drivers/devfreq/rk3399_dmc.c2
-rw-r--r--drivers/devfreq/sun8i-a33-mbus.c2
-rw-r--r--drivers/dma-buf/Kconfig1
-rw-r--r--drivers/dma-buf/dma-buf.c73
-rw-r--r--drivers/dma-buf/dma-fence-array.c28
-rw-r--r--drivers/dma-buf/dma-fence-unwrap.c126
-rw-r--r--drivers/dma-buf/dma-fence.c10
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c10
-rw-r--r--drivers/dma-buf/heaps/system_heap.c2
-rw-r--r--drivers/dma-buf/sw_sync.c6
-rw-r--r--drivers/dma-buf/udmabuf.c312
-rw-r--r--drivers/dma/Kconfig34
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/acpi-dma.c43
-rw-r--r--drivers/dma/altera-msgdma.c2
-rw-r--r--drivers/dma/amd/Kconfig28
-rw-r--r--drivers/dma/amd/Makefile2
-rw-r--r--drivers/dma/amd/ae4dma/Makefile10
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma-dev.c157
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma-pci.c158
-rw-r--r--drivers/dma/amd/ae4dma/ae4dma.h100
-rw-r--r--drivers/dma/amd/ptdma/Makefile (renamed from drivers/dma/ptdma/Makefile)0
-rw-r--r--drivers/dma/amd/ptdma/ptdma-debugfs.c (renamed from drivers/dma/ptdma/ptdma-debugfs.c)79
-rw-r--r--drivers/dma/amd/ptdma/ptdma-dev.c (renamed from drivers/dma/ptdma/ptdma-dev.c)0
-rw-r--r--drivers/dma/amd/ptdma/ptdma-dmaengine.c (renamed from drivers/dma/ptdma/ptdma-dmaengine.c)226
-rw-r--r--drivers/dma/amd/ptdma/ptdma-pci.c (renamed from drivers/dma/ptdma/ptdma-pci.c)0
-rw-r--r--drivers/dma/amd/ptdma/ptdma.h (renamed from drivers/dma/ptdma/ptdma.h)4
-rw-r--r--drivers/dma/amd/qdma/qdma.c52
-rw-r--r--drivers/dma/apple-admac.c9
-rw-r--r--drivers/dma/at_hdmac.c2
-rw-r--r--drivers/dma/at_xdmac.c4
-rw-r--r--drivers/dma/bcm-sba-raid.c2
-rw-r--r--drivers/dma/bcm2835-dma.c24
-rw-r--r--drivers/dma/bestcomm/bestcomm.c2
-rw-r--r--drivers/dma/dma-jz4780.c2
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c2
-rw-r--r--drivers/dma/dw/acpi.c6
-rw-r--r--drivers/dma/dw/internal.h8
-rw-r--r--drivers/dma/dw/pci.c4
-rw-r--r--drivers/dma/dw/platform.c2
-rw-r--r--drivers/dma/ep93xx_dma.c12
-rw-r--r--drivers/dma/fsl-edma-common.c36
-rw-r--r--drivers/dma/fsl-edma-common.h4
-rw-r--r--drivers/dma/fsl-edma-main.c158
-rw-r--r--drivers/dma/fsl-qdma.c2
-rw-r--r--drivers/dma/fsl_raid.c2
-rw-r--r--drivers/dma/fsldma.c2
-rw-r--r--drivers/dma/idma64.c2
-rw-r--r--drivers/dma/idxd/Makefile2
-rw-r--r--drivers/dma/idxd/cdev.c5
-rw-r--r--drivers/dma/idxd/compat.c2
-rw-r--r--drivers/dma/idxd/device.c14
-rw-r--r--drivers/dma/idxd/idxd.h15
-rw-r--r--drivers/dma/idxd/init.c483
-rw-r--r--drivers/dma/idxd/irq.c85
-rw-r--r--drivers/dma/idxd/registers.h5
-rw-r--r--drivers/dma/idxd/submit.c6
-rw-r--r--drivers/dma/idxd/sysfs.c10
-rw-r--r--drivers/dma/img-mdc-dma.c2
-rw-r--r--drivers/dma/imx-dma.c2
-rw-r--r--drivers/dma/imx-sdma.c2
-rw-r--r--drivers/dma/ioat/dca.c8
-rw-r--r--drivers/dma/k3dma.c2
-rw-r--r--drivers/dma/loongson2-apb-dma.c (renamed from drivers/dma/ls2x-apb-dma.c)8
-rw-r--r--drivers/dma/mcf-edma-main.c2
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c2
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c2
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c2
-rw-r--r--drivers/dma/milbeaut-hdmac.c2
-rw-r--r--drivers/dma/milbeaut-xdmac.c2
-rw-r--r--drivers/dma/mmp_pdma.c2
-rw-r--r--drivers/dma/mmp_tdma.c2
-rw-r--r--drivers/dma/moxart-dma.c2
-rw-r--r--drivers/dma/mpc512x_dma.c2
-rw-r--r--drivers/dma/mv_xor.c3
-rw-r--r--drivers/dma/mv_xor_v2.c4
-rw-r--r--drivers/dma/nbpfaxi.c2
-rw-r--r--drivers/dma/owl-dma.c2
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/ptdma/Kconfig13
-rw-r--r--drivers/dma/pxa_dma.c2
-rw-r--r--drivers/dma/qcom/bam_dma.c26
-rw-r--r--drivers/dma/qcom/gpi.c31
-rw-r--r--drivers/dma/qcom/hidma.c2
-rw-r--r--drivers/dma/qcom/qcom_adm.c2
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c4
-rw-r--r--drivers/dma/sh/Kconfig8
-rw-r--r--drivers/dma/sh/rcar-dmac.c6
-rw-r--r--drivers/dma/sh/rz-dmac.c29
-rw-r--r--drivers/dma/sh/shdma-base.c2
-rw-r--r--drivers/dma/sh/shdmac.c2
-rw-r--r--drivers/dma/sh/usb-dmac.c4
-rw-r--r--drivers/dma/sprd-dma.c2
-rw-r--r--drivers/dma/st_fdma.c2
-rw-r--r--drivers/dma/stm32/stm32-dma3.c121
-rw-r--r--drivers/dma/sun4i-dma.c210
-rw-r--r--drivers/dma/sun6i-dma.c2
-rw-r--r--drivers/dma/tegra186-gpc-dma.c12
-rw-r--r--drivers/dma/tegra20-apb-dma.c2
-rw-r--r--drivers/dma/tegra210-adma.c88
-rw-r--r--drivers/dma/ti/cppi41.c2
-rw-r--r--drivers/dma/ti/edma.c9
-rw-r--r--drivers/dma/ti/k3-udma.c78
-rw-r--r--drivers/dma/ti/omap-dma.c2
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/dma/txx9dmac.c4
-rw-r--r--drivers/dma/uniphier-mdmac.c2
-rw-r--r--drivers/dma/uniphier-xdmac.c2
-rw-r--r--drivers/dma/xgene-dma.c2
-rw-r--r--drivers/dma/xilinx/xdma.c10
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c22
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c2
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c4
-rw-r--r--drivers/dpll/dpll_netlink.c24
-rw-r--r--drivers/edac/Kconfig35
-rw-r--r--drivers/edac/Makefile5
-rw-r--r--drivers/edac/altera_edac.c4
-rw-r--r--drivers/edac/amd64_edac.c33
-rw-r--r--drivers/edac/amd8111_edac.c596
-rw-r--r--drivers/edac/amd8111_edac.h118
-rw-r--r--drivers/edac/amd8131_edac.c358
-rw-r--r--drivers/edac/amd8131_edac.h107
-rw-r--r--drivers/edac/armada_xp_edac.c4
-rw-r--r--drivers/edac/aspeed_edac.c2
-rw-r--r--drivers/edac/bluefield_edac.c172
-rw-r--r--drivers/edac/cell_edac.c281
-rw-r--r--drivers/edac/cpc925_edac.c2
-rw-r--r--drivers/edac/dmc520_edac.c2
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/edac_mc_sysfs.c6
-rw-r--r--drivers/edac/fsl_ddr_edac.c141
-rw-r--r--drivers/edac/fsl_ddr_edac.h13
-rw-r--r--drivers/edac/highbank_l2_edac.c2
-rw-r--r--drivers/edac/highbank_mc_edac.c2
-rw-r--r--drivers/edac/i10nm_base.c34
-rw-r--r--drivers/edac/i5000_edac.c8
-rw-r--r--drivers/edac/ie31200_edac.c8
-rw-r--r--drivers/edac/igen6_edac.c49
-rw-r--r--drivers/edac/layerscape_edac.c3
-rw-r--r--drivers/edac/loongson_edac.c157
-rw-r--r--drivers/edac/mce_amd.c22
-rw-r--r--drivers/edac/mpc85xx_edac.c6
-rw-r--r--drivers/edac/npcm_edac.c2
-rw-r--r--drivers/edac/octeon_edac-l2c.c2
-rw-r--r--drivers/edac/octeon_edac-lmc.c2
-rw-r--r--drivers/edac/octeon_edac-pc.c2
-rw-r--r--drivers/edac/octeon_edac-pci.c2
-rw-r--r--drivers/edac/qcom_edac.c10
-rw-r--r--drivers/edac/skx_base.c11
-rw-r--r--drivers/edac/skx_common.c104
-rw-r--r--drivers/edac/skx_common.h11
-rw-r--r--drivers/edac/synopsys_edac.c2
-rw-r--r--drivers/edac/ti_edac.c2
-rw-r--r--drivers/edac/versal_edac.c2
-rw-r--r--drivers/edac/xgene_edac.c2
-rw-r--r--drivers/edac/zynqmp_edac.c2
-rw-r--r--drivers/extcon/extcon-adc-jack.c2
-rw-r--r--drivers/extcon/extcon-fsa9480.c2
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c2
-rw-r--r--drivers/extcon/extcon-intel-mrfld.c2
-rw-r--r--drivers/extcon/extcon-max3355.c2
-rw-r--r--drivers/extcon/extcon-max77843.c2
-rw-r--r--drivers/extcon/extcon-ptn5150.c2
-rw-r--r--drivers/extcon/extcon-rtk-type-c.c4
-rw-r--r--drivers/extcon/extcon-usb-gpio.c2
-rw-r--r--drivers/extcon/extcon-usbc-cros-ec.c2
-rw-r--r--drivers/firewire/core-device.c4
-rw-r--r--drivers/firewire/core-topology.c4
-rw-r--r--drivers/firewire/core.h2
-rw-r--r--drivers/firewire/device-attribute-test.c2
-rw-r--r--drivers/firewire/ohci.c55
-rw-r--r--drivers/firewire/sbp2.c10
-rw-r--r--drivers/firmware/Kconfig2
-rw-r--r--drivers/firmware/arm_ffa/bus.c15
-rw-r--r--drivers/firmware/arm_ffa/driver.c20
-rw-r--r--drivers/firmware/arm_scmi/bus.c11
-rw-r--r--drivers/firmware/arm_scmi/common.h51
-rw-r--r--drivers/firmware/arm_scmi/driver.c120
-rw-r--r--drivers/firmware/arm_scmi/perf.c44
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c12
-rw-r--r--drivers/firmware/arm_scmi/shmem.c85
-rw-r--r--drivers/firmware/arm_scmi/transports/Makefile6
-rw-r--r--drivers/firmware/arm_scmi/transports/mailbox.c48
-rw-r--r--drivers/firmware/arm_scmi/transports/optee.c19
-rw-r--r--drivers/firmware/arm_scmi/transports/smc.c14
-rw-r--r--drivers/firmware/arm_scmi/transports/virtio.c16
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/Kconfig1
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c5
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c5
-rw-r--r--drivers/firmware/arm_scpi.c5
-rw-r--r--drivers/firmware/arm_sdei.c2
-rw-r--r--drivers/firmware/cirrus/Kconfig18
-rw-r--r--drivers/firmware/cirrus/Makefile2
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c68
-rw-r--r--drivers/firmware/cirrus/test/Makefile23
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_bin.c199
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c752
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c367
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_utils.c13
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c473
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin.c2556
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c600
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c688
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c3282
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c1851
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c2669
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c2211
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c1347
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_tests.c14
-rw-r--r--drivers/firmware/efi/Kconfig14
-rw-r--r--drivers/firmware/efi/dev-path-parser.c4
-rw-r--r--drivers/firmware/efi/efi-pstore.c2
-rw-r--r--drivers/firmware/efi/efi.c44
-rw-r--r--drivers/firmware/efi/embedded-firmware.c4
-rw-r--r--drivers/firmware/efi/esrt.c2
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot18
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c21
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c59
-rw-r--r--drivers/firmware/efi/libstub/efistub.h22
-rw-r--r--drivers/firmware/efi/libstub/file.c22
-rw-r--r--drivers/firmware/efi/libstub/gop.c323
-rw-r--r--drivers/firmware/efi/libstub/kaslr.c4
-rw-r--r--drivers/firmware/efi/libstub/mem.c20
-rw-r--r--drivers/firmware/efi/libstub/pci.c34
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c4
-rw-r--r--drivers/firmware/efi/libstub/relocate.c10
-rw-r--r--drivers/firmware/efi/libstub/tpm.c9
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c167
-rw-r--r--drivers/firmware/efi/memattr.c18
-rw-r--r--drivers/firmware/efi/sysfb_efi.c2
-rw-r--r--drivers/firmware/efi/tpm.c26
-rw-r--r--drivers/firmware/efi/vars.c16
-rw-r--r--drivers/firmware/google/cbmem.c10
-rw-r--r--drivers/firmware/google/coreboot_table.c2
-rw-r--r--drivers/firmware/google/framebuffer-coreboot.c14
-rw-r--r--drivers/firmware/google/gsmi.c12
-rw-r--r--drivers/firmware/google/memconsole.c4
-rw-r--r--drivers/firmware/google/vpd.c8
-rw-r--r--drivers/firmware/imx/Kconfig1
-rw-r--r--drivers/firmware/imx/imx-dsp.c2
-rw-r--r--drivers/firmware/iscsi_ibft.c5
-rw-r--r--drivers/firmware/memmap.c2
-rw-r--r--drivers/firmware/microchip/mpfs-auto-update.c48
-rw-r--r--drivers/firmware/mtk-adsp-ipc.c9
-rw-r--r--drivers/firmware/psci/psci.c45
-rw-r--r--drivers/firmware/qcom/qcom_scm-smc.c6
-rw-r--r--drivers/firmware/qcom/qcom_scm.c314
-rw-r--r--drivers/firmware/qcom/qcom_scm.h5
-rw-r--r--drivers/firmware/qemu_fw_cfg.c2
-rw-r--r--drivers/firmware/raspberrypi.c2
-rw-r--r--drivers/firmware/smccc/smccc.c4
-rw-r--r--drivers/firmware/stratix10-rsu.c2
-rw-r--r--drivers/firmware/stratix10-svc.c11
-rw-r--r--drivers/firmware/sysfb.c19
-rw-r--r--drivers/firmware/tegra/bpmp.c14
-rw-r--r--drivers/firmware/ti_sci.c489
-rw-r--r--drivers/firmware/ti_sci.h143
-rw-r--r--drivers/firmware/turris-mox-rwtm.c23
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c162
-rw-r--r--drivers/firmware/xilinx/zynqmp.c155
-rw-r--r--drivers/fpga/altera-fpga2sdram.c2
-rw-r--r--drivers/fpga/altera-freeze-bridge.c2
-rw-r--r--drivers/fpga/altera-hps2fpga.c2
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c117
-rw-r--r--drivers/fpga/dfl-afu-error.c59
-rw-r--r--drivers/fpga/dfl-afu-main.c286
-rw-r--r--drivers/fpga/dfl-afu-region.c51
-rw-r--r--drivers/fpga/dfl-afu.h26
-rw-r--r--drivers/fpga/dfl-fme-br.c32
-rw-r--r--drivers/fpga/dfl-fme-error.c98
-rw-r--r--drivers/fpga/dfl-fme-main.c103
-rw-r--r--drivers/fpga/dfl-fme-pr.c86
-rw-r--r--drivers/fpga/dfl-fme-region.c8
-rw-r--r--drivers/fpga/dfl-pci.c16
-rw-r--r--drivers/fpga/dfl.c445
-rw-r--r--drivers/fpga/dfl.h140
-rw-r--r--drivers/fpga/intel-m10-bmc-sec-update.c4
-rw-r--r--drivers/fpga/of-fpga-region.c2
-rw-r--r--drivers/fpga/socfpga-a10.c2
-rw-r--r--drivers/fpga/stratix10-soc.c2
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c2
-rw-r--r--drivers/fpga/zynq-fpga.c2
-rw-r--r--drivers/fsi/fsi-master-aspeed.c2
-rw-r--r--drivers/fsi/fsi-master-ast-cf.c2
-rw-r--r--drivers/fsi/fsi-master-gpio.c2
-rw-r--r--drivers/fsi/fsi-occ.c2
-rw-r--r--drivers/gpio/Kconfig49
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/TODO4
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c4
-rw-r--r--drivers/gpio/gpio-104-idio-16.c2
-rw-r--r--drivers/gpio/gpio-74x164.c21
-rw-r--r--drivers/gpio/gpio-aggregator.c16
-rw-r--r--drivers/gpio/gpio-altera.c190
-rw-r--r--drivers/gpio/gpio-amdpt.c10
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c2
-rw-r--r--drivers/gpio/gpio-aspeed.c622
-rw-r--r--drivers/gpio/gpio-brcmstb.c2
-rw-r--r--drivers/gpio/gpio-cadence.c2
-rw-r--r--drivers/gpio/gpio-cgbc.c196
-rw-r--r--drivers/gpio/gpio-davinci.c10
-rw-r--r--drivers/gpio/gpio-dln2.c2
-rw-r--r--drivers/gpio/gpio-dwapb.c5
-rw-r--r--drivers/gpio/gpio-eic-sprd.c4
-rw-r--r--drivers/gpio/gpio-elkhartlake.c2
-rw-r--r--drivers/gpio/gpio-ep93xx.c2
-rw-r--r--drivers/gpio/gpio-exar.c10
-rw-r--r--drivers/gpio/gpio-ftgpio010.c45
-rw-r--r--drivers/gpio/gpio-gpio-mm.c2
-rw-r--r--drivers/gpio/gpio-graniterapids.c52
-rw-r--r--drivers/gpio/gpio-grgpio.c75
-rw-r--r--drivers/gpio/gpio-hlwd.c2
-rw-r--r--drivers/gpio/gpio-i8255.c2
-rw-r--r--drivers/gpio/gpio-idio-16.c5
-rw-r--r--drivers/gpio/gpio-ljca.c21
-rw-r--r--drivers/gpio/gpio-loongson-64bit.c6
-rw-r--r--drivers/gpio/gpio-lpc18xx.c2
-rw-r--r--drivers/gpio/gpio-max730x.c17
-rw-r--r--drivers/gpio/gpio-mb86s7x.c4
-rw-r--r--drivers/gpio/gpio-menz127.c60
-rw-r--r--drivers/gpio/gpio-merrifield.c17
-rw-r--r--drivers/gpio/gpio-mlxbf2.c2
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c2
-rw-r--r--drivers/gpio/gpio-mpc5200.c4
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c59
-rw-r--r--drivers/gpio/gpio-mpfs.c188
-rw-r--r--drivers/gpio/gpio-mpsse.c527
-rw-r--r--drivers/gpio/gpio-mvebu.c8
-rw-r--r--drivers/gpio/gpio-mxc.c3
-rw-r--r--drivers/gpio/gpio-omap.c4
-rw-r--r--drivers/gpio/gpio-pca953x.c24
-rw-r--r--drivers/gpio/gpio-pci-idio-16.c19
-rw-r--r--drivers/gpio/gpio-pcie-idio-24.c19
-rw-r--r--drivers/gpio/gpio-pl061.c2
-rw-r--r--drivers/gpio/gpio-rcar.c2
-rw-r--r--drivers/gpio/gpio-regmap.c2
-rw-r--r--drivers/gpio/gpio-rockchip.c28
-rw-r--r--drivers/gpio/gpio-sim.c65
-rw-r--r--drivers/gpio/gpio-sloppy-logic-analyzer.c6
-rw-r--r--drivers/gpio/gpio-tangier.c2
-rw-r--r--drivers/gpio/gpio-tb10x.c2
-rw-r--r--drivers/gpio/gpio-tegra.c2
-rw-r--r--drivers/gpio/gpio-tegra186.c2
-rw-r--r--drivers/gpio/gpio-tps65219.c12
-rw-r--r--drivers/gpio/gpio-tqmx86.c208
-rw-r--r--drivers/gpio/gpio-ts4900.c6
-rw-r--r--drivers/gpio/gpio-ts5500.c2
-rw-r--r--drivers/gpio/gpio-twl6040.c6
-rw-r--r--drivers/gpio/gpio-uniphier.c2
-rw-r--r--drivers/gpio/gpio-vf610.c7
-rw-r--r--drivers/gpio/gpio-virtuser.c91
-rw-r--r--drivers/gpio/gpio-visconti.c2
-rw-r--r--drivers/gpio/gpio-xgene-sb.c39
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c4
-rw-r--r--drivers/gpio/gpio-xilinx.c83
-rw-r--r--drivers/gpio/gpio-zevio.c6
-rw-r--r--drivers/gpio/gpio-zynq.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c4
-rw-r--r--drivers/gpio/gpiolib-cdev.c384
-rw-r--r--drivers/gpio/gpiolib-legacy.c3
-rw-r--r--drivers/gpio/gpiolib-of.c2
-rw-r--r--drivers/gpio/gpiolib-swnode.c4
-rw-r--r--drivers/gpio/gpiolib-sysfs.c182
-rw-r--r--drivers/gpio/gpiolib.c204
-rw-r--r--drivers/gpio/gpiolib.h14
-rw-r--r--drivers/gpu/drm/Kconfig69
-rw-r--r--drivers/gpu/drm/Makefile12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c956
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c135
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c544
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c116
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c199
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c210
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c500
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c157
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c176
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c187
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c233
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c291
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c181
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c201
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3_cleaner_shader.asm118
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_cleaner_shader.h56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c156
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c208
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v12_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c199
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c708
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c251
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c197
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c414
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c448
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c345
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c365
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c267
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_14.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_14.h51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c315
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c314
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c116
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c242
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c1118
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c66
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h2556
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm202
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm1126
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm62
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c34
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c166
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c70
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c64
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c70
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c42
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c55
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c7
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c721
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h25
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c564
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h55
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c22
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c184
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c132
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c104
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c69
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c35
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c140
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c274
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c311
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c414
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c82
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h71
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c125
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h179
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h71
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c84
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c201
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c176
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c80
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c132
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h109
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c169
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c209
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h401
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c1429
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h135
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c226
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c438
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c307
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c1178
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h)19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c549
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c144
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c296
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c1134
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h59
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c179
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c120
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl.c381
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_debug.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h12
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h464
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c6
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c8
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h19
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h4
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c307
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h11
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c33
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c31
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c11
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h3
-rw-r--r--drivers/gpu/drm/amd/include/amd_pcie.h18
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h57
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h)4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h)4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h23
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h13
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_offset.h29
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_sh_mask.h37
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h17
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h47
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h222
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h43
-rw-r--r--drivers/gpu/drm/amd/include/mes_v12_api_def.h31
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c108
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c468
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h8
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c56
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c57
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c61
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c430
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h561
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c574
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h26
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c24
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c322
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h132
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c609
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c173
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c1463
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c68
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c61
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c68
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c312
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c26
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c441
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c267
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c40
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c76
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c337
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h1
-rw-r--r--drivers/gpu/drm/arm/Kconfig2
-rw-r--r--drivers/gpu/drm/arm/display/Kconfig1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c6
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c4
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c11
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c7
-rw-r--r--drivers/gpu/drm/armada/Kconfig1
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c2
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h11
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c11
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c113
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c2
-rw-r--r--drivers/gpu/drm/aspeed/Kconfig1
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c7
-rw-r--r--drivers/gpu/drm/ast/Kconfig1
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c141
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c111
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c13
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h20
-rw-r--r--drivers/gpu/drm/ast/ast_main.c67
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c34
-rw-r--r--drivers/gpu/drm/ast/ast_post.c36
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h41
-rw-r--r--drivers/gpu/drm/ast/ast_sil164.c57
-rw-r--r--drivers/gpu/drm/ast/ast_vga.c57
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c8
-rw-r--r--drivers/gpu/drm/bridge/Kconfig20
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c17
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c10
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c4
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c82
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c7
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c4
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c2
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c2
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c28
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h3
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c2
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c2
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c6
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c2
-rw-r--r--drivers/gpu/drm/bridge/imx/Kconfig10
-rw-r--r--drivers/gpu/drm/bridge/imx/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c88
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c2
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c22
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qm-ldb.c11
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c11
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c11
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c2
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c2
-rw-r--r--drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c2
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c907
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c348
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c8
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c2
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c334
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c7
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c2
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c8
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c2
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c4
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c10
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c35
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c4
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig14
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c10
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c628
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h834
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c3
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c1030
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c68
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c29
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c2
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c149
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c10
-rw-r--r--drivers/gpu/drm/bridge/ti-tdp158.c111
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c2
-rw-r--r--drivers/gpu/drm/ci/arm64.config7
-rw-r--r--drivers/gpu/drm/ci/build.sh1
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml14
-rw-r--r--drivers/gpu/drm/ci/image-tags.yml2
-rw-r--r--drivers/gpu/drm/ci/test.yml25
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-fails.txt10
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-glk-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt51
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt13
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt20
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt2
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt34
-rw-r--r--drivers/gpu/drm/ci/xfails/i915-whl-fails.txt9
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt11
-rw-r--r--drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt27
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt27
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt14
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt5
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt15
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-flakes.txt6
-rw-r--r--drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt211
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt1
-rw-r--r--drivers/gpu/drm/ci/xfails/requirements.txt17
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt22
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt28
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt7
-rw-r--r--drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt28
-rwxr-xr-xdrivers/gpu/drm/ci/xfails/update-xfails.py204
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-fails.txt21
-rw-r--r--drivers/gpu/drm/ci/xfails/vkms-none-skips.txt53
-rw-r--r--drivers/gpu/drm/clients/Kconfig123
-rw-r--r--drivers/gpu/drm/clients/Makefile8
-rw-r--r--drivers/gpu/drm/clients/drm_client_internal.h25
-rw-r--r--drivers/gpu/drm/clients/drm_client_setup.c91
-rw-r--r--drivers/gpu/drm/clients/drm_fbdev_client.c167
-rw-r--r--drivers/gpu/drm/clients/drm_log.c420
-rw-r--r--drivers/gpu/drm/display/Kconfig16
-rw-r--r--drivers/gpu/drm/display/Makefile7
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c176
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_bus.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c14
-rw-r--r--drivers/gpu/drm/display/drm_dp_dual_mode_helper.c4
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c125
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c207
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c10
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_audio_helper.c190
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c91
-rw-r--r--drivers/gpu/drm/drm_aperture.c192
-rw-r--r--drivers/gpu/drm/drm_atomic.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_bridge.c4
-rw-r--r--drivers/gpu/drm/drm_client.c121
-rw-r--r--drivers/gpu/drm/drm_client_event.c197
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c37
-rw-r--r--drivers/gpu/drm/drm_connector.c177
-rw-r--r--drivers/gpu/drm/drm_debugfs.c15
-rw-r--r--drivers/gpu/drm/drm_draw.c233
-rw-r--r--drivers/gpu/drm/drm_draw_internal.h56
-rw-r--r--drivers/gpu/drm/drm_drv.c34
-rw-r--r--drivers/gpu/drm/drm_edid.c6
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c118
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c173
-rw-r--r--drivers/gpu/drm/drm_fbdev_shmem.c170
-rw-r--r--drivers/gpu/drm/drm_fbdev_ttm.c225
-rw-r--r--drivers/gpu/drm/drm_file.c37
-rw-r--r--drivers/gpu/drm/drm_fourcc.c30
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c4
-rw-r--r--drivers/gpu/drm/drm_gem.c34
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c34
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c45
-rw-r--r--drivers/gpu/drm/drm_internal.h8
-rw-r--r--drivers/gpu/drm/drm_ioctl.c51
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c16
-rw-r--r--drivers/gpu/drm/drm_mm.c4
-rw-r--r--drivers/gpu/drm/drm_mode_config.c9
-rw-r--r--drivers/gpu/drm/drm_mode_object.c1
-rw-r--r--drivers/gpu/drm/drm_modes.c14
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c14
-rw-r--r--drivers/gpu/drm/drm_of.c86
-rw-r--r--drivers/gpu/drm/drm_panel.c3
-rw-r--r--drivers/gpu/drm/drm_panel_backlight_quirks.c94
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c19
-rw-r--r--drivers/gpu/drm/drm_panic.c259
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs26
-rw-r--r--drivers/gpu/drm/drm_prime.c2
-rw-r--r--drivers/gpu/drm/drm_print.c37
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c2
-rw-r--r--drivers/gpu/drm/drm_syncobj.c9
-rw-r--r--drivers/gpu/drm/drm_vblank_work.c2
-rw-r--r--drivers/gpu/drm/drm_writeback.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c36
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c42
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c107
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c62
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c19
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h23
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c124
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c99
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c31
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c2
-rw-r--r--drivers/gpu/drm/exynos/regs-decon7.h15
-rw-r--r--drivers/gpu/drm/fsl-dcu/Kconfig2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c30
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_tcon.c2
-rw-r--r--drivers/gpu/drm/gma500/Kconfig3
-rw-r--r--drivers/gpu/drm/gma500/fbdev.c100
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h13
-rw-r--r--drivers/gpu/drm/gud/Kconfig1
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c5
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig3
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile3
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c164
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h63
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h19
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c220
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h28
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c332
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h76
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c118
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c23
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h19
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c41
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c20
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/Kconfig1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c3
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c6
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c10
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c4
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c4
-rw-r--r--drivers/gpu/drm/i915/Kconfig5
-rw-r--r--drivers/gpu/drm/i915/Makefile12
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c1
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c84
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.h11
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c45
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.h5
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c69
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.h6
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_display_sr.c97
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_display_sr.h14
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c23
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c205
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.h4
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm_regs.h257
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c451
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c231
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c66
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c219
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.c59
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c1377
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c969
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c367
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt_regs.h50
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c78
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c110
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c525
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c784
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h75
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c1006
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h78
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h30
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c327
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c557
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h334
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c347
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.h38
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c393
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_limits.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c1098
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h39
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c192
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c846
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h50
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_snapshot.c74
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_snapshot.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h263
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h246
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c431
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h30
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.c319
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c2169
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h62
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c98
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c76
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c1014
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.c764
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c166
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c97
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c194
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c169
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c90
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c66
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c291
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus_regs.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c816
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_shim.h137
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c65
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_load_detect.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c55
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c89
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c200
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.h44
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c335
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c58
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c36
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.c554
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c58
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c254
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.h51
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c457
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c253
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_uapi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c101
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc_regs.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c45
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.h6
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c356
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.h11
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c495
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane_regs.h16
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c891
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h9
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c32
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.h7
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c23
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c4
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.c23
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.h6
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c66
-rw-r--r--drivers/gpu/drm/i915/gt/intel_tlb.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c13
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c17
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c50
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c91
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c18
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c85
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h42
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c88
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c2
-rw-r--r--drivers/gpu/drm/i915/i915_active.c20
-rw-r--r--drivers/gpu/drm/i915/i915_active.h1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c161
-rw-r--r--drivers/gpu/drm/i915/i915_driver.h1
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h72
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c50
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h11
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c40
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c338
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h40
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c4
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c6
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c104
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h844
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h10
-rw-r--r--drivers/gpu/drm/i915/i915_request.c17
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c2
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c140
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.h14
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h30
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c4
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c2
-rw-r--r--drivers/gpu/drm/i915/intel_cpu_info.c44
-rw-r--r--drivers/gpu/drm/i915/intel_cpu_info.h13
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c35
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h10
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c74
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c4
-rw-r--r--drivers/gpu/drm/i915/intel_mchbar_regs.h4
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c8
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h15
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.c22
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.h4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore_trace.c7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore_trace.h49
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c14
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h18
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c8
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/scatterlist.c2
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c4
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.c5
-rw-r--r--drivers/gpu/drm/i915/soc/intel_rom.c160
-rw-r--r--drivers/gpu/drm/i915/soc/intel_rom.h25
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.c28
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.h3
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.c1
-rw-r--r--drivers/gpu/drm/imagination/pvr_ccb.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_context.c51
-rw-r--r--drivers/gpu/drm/imagination/pvr_context.h21
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h10
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.c10
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.h1
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.c13
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c4
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c26
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.h1
-rw-r--r--drivers/gpu/drm/imx/dcss/Kconfig3
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-crtc.c6
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-drv.c2
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dtg.c4
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c6
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-scaler.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3/Kconfig14
-rw-r--r--drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm-core.c14
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm.h14
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-ldb.c205
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-tve.c10
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c8
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c145
-rw-r--r--drivers/gpu/drm/imx/lcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/lcdc/imx-lcdc.c7
-rw-r--r--drivers/gpu/drm/ingenic/Kconfig1
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c7
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-ipu.c2
-rw-r--r--drivers/gpu/drm/kmb/Kconfig1
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c7
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.h1
-rw-r--r--drivers/gpu/drm/kmb/kmb_dsi.c4
-rw-r--r--drivers/gpu/drm/lib/drm_random.h2
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c3
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c2
-rw-r--r--drivers/gpu/drm/logicvc/Kconfig1
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_drm.c19
-rw-r--r--drivers/gpu/drm/loongson/Kconfig1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_drv.c20
-rw-r--r--drivers/gpu/drm/mcde/Kconfig1
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c8
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c2
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig10
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c29
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.h10
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_aal.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ccorr.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_merge.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c141
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c52
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c170
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp_reg.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c269
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c65
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mdp_rdma.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_padding.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c15
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.h4
-rw-r--r--drivers/gpu/drm/meson/Kconfig3
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c13
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c16
-rw-r--r--drivers/gpu/drm/meson/meson_dw_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c48
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h15
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh3.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ew3.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200wb.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c77
-rw-r--r--drivers/gpu/drm/msm/Kconfig4
-rw-r--r--drivers/gpu/drm/msm/Makefile3
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c21
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c83
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c207
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h27
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c274
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h171
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c121
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h5
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_preempt.c456
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c37
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h34
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h31
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h210
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h187
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h218
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h338
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h54
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h254
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h485
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h46
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h27
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c128
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h38
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c249
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h107
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h90
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c42
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c250
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h33
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c122
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h31
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.c75
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h70
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c52
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c89
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h38
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c603
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h38
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c217
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h68
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.c2
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c36
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c457
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.h37
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c148
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.h18
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c845
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h133
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c482
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h40
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c68
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.h10
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c914
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h18
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c150
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.h27
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c432
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.h44
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c268
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h43
-rw-r--r--drivers/gpu/drm/msm/dp/dp_utils.c28
-rw-r--r--drivers/gpu/drm/msm/dp/dp_utils.h8
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c21
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c18
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c13
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h38
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c144
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c67
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h11
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h28
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c7
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h6
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c121
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.h4
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h18
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c9
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx.xml7
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml5
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml39
-rw-r--r--drivers/gpu/drm/msm/registers/display/mdp5.xml16
-rw-r--r--drivers/gpu/drm/msm/registers/display/mdss.xml38
-rw-r--r--drivers/gpu/drm/mxsfb/Kconfig2
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c7
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c7
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/tile.h63
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c129
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/log.h51
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c67
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c64
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/fw.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c514
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxch.c (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c)2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxch.h (renamed from drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h)0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c2
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c25
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c148
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h13
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c161
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.h8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/panel/Kconfig42
-rw-r--r--drivers/gpu/drm/panel/Makefile4
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c12
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c108
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83102.c14
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83112a.c297
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c2
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c210
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c23
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c1
-rw-r--r--drivers/gpu/drm/panel/panel-khadas-ts050.c4
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c345
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c2
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3052c.c2
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c15
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c4
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c16
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c1
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c2
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm69380.c93
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ams581vf01.c283
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ams639rq08.c329
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c71
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c342
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c766
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c2
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c166
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c1
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c3
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-r63353.c2
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c9
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c48
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c17
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c30
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h2
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.c41
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.h4
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c94
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h73
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c179
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c203
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c23
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c70
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.h4
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c52
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.h1
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c432
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.h2
-rw-r--r--drivers/gpu/drm/pl111/Kconfig1
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c5
-rw-r--r--drivers/gpu/drm/qxl/Kconfig4
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c9
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h8
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c68
-rw-r--r--drivers/gpu/drm/radeon/Kconfig2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c9
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c45
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c120
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c10
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/Kconfig1
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c25
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c24
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c14
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c6
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h1
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Kconfig1
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c8
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c7
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c18
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c6
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/renesas/shmobile/Kconfig1
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c8
-rw-r--r--drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c14
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig20
-rw-r--r--drivers/gpu/drm/rockchip/Makefile2
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c6
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c15
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.h4
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c4
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c487
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c166
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c497
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c4
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.h2
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c4
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c44
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c598
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h28
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c221
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c52
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c101
-rw-r--r--drivers/gpu/drm/solomon/Kconfig1
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-i2c.c2
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-spi.c2
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c8
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c2
-rw-r--r--drivers/gpu/drm/sprd/sprd_drm.c4
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.c4
-rw-r--r--drivers/gpu/drm/sti/Kconfig1
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c3
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c8
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c2
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c3
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c6
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c5
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c2
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c2
-rw-r--r--drivers/gpu/drm/stm/Kconfig1
-rw-r--r--drivers/gpu/drm/stm/drv.c12
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c2
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c11
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c34
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_drc.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c2
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/dc.c2
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.c15
-rw-r--r--drivers/gpu/drm/tegra/drm.h12
-rw-r--r--drivers/gpu/drm/tegra/dsi.c2
-rw-r--r--drivers/gpu/drm/tegra/fbdev.c98
-rw-r--r--drivers/gpu/drm/tegra/gem.c65
-rw-r--r--drivers/gpu/drm/tegra/gem.h21
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c2
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c13
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c4
-rw-r--r--drivers/gpu/drm/tegra/hub.c2
-rw-r--r--drivers/gpu/drm/tegra/nvdec.c2
-rw-r--r--drivers/gpu/drm/tegra/sor.c2
-rw-r--r--drivers/gpu/drm/tegra/vic.c2
-rw-r--r--drivers/gpu/drm/tests/drm_connector_test.c547
-rw-r--r--drivers/gpu/drm/tests/drm_dp_mst_helper_test.c17
-rw-r--r--drivers/gpu/drm/tests/drm_framebuffer_test.c375
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c469
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_edid.h102
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c41
-rw-r--r--drivers/gpu/drm/tidss/Kconfig1
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c28
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c9
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h5
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.c34
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.h4
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c8
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.h2
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c8
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c2
-rw-r--r--drivers/gpu/drm/tiny/Kconfig41
-rw-r--r--drivers/gpu/drm/tiny/Makefile3
-rw-r--r--drivers/gpu/drm/tiny/arcpgu.c10
-rw-r--r--drivers/gpu/drm/tiny/bochs.c414
-rw-r--r--drivers/gpu/drm/tiny/cirrus-qemu.c (renamed from drivers/gpu/drm/tiny/cirrus.c)18
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c6
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c5
-rw-r--r--drivers/gpu/drm/tiny/ili9163.c5
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c5
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c5
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c5
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c5
-rw-r--r--drivers/gpu/drm/tiny/ofdrm.c17
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c6
-rw-r--r--drivers/gpu/drm/tiny/repaper.c5
-rw-r--r--drivers/gpu/drm/tiny/sharp-memory.c670
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c21
-rw-r--r--drivers/gpu/drm/tiny/st7586.c5
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c5
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c22
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c4
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_resource_test.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c119
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c56
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c51
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c3
-rw-r--r--drivers/gpu/drm/tve200/Kconfig1
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c12
-rw-r--r--drivers/gpu/drm/udl/Kconfig1
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c5
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h1
-rw-r--r--drivers/gpu/drm/v3d/Makefile3
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c20
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c15
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h22
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c6
-rw-r--r--drivers/gpu/drm/v3d/v3d_gemfs.c50
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c14
-rw-r--r--drivers/gpu/drm/v3d/v3d_mmu.c93
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c74
-rw-r--r--drivers/gpu/drm/v3d/v3d_performance_counters.h12
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h29
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c74
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c19
-rw-r--r--drivers/gpu/drm/vboxvideo/Kconfig1
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c10
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h1
-rw-r--r--drivers/gpu/drm/vc4/Kconfig2
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock.c22
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c106
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c28
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c129
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c48
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h83
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c24
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c239
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h6
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_phy.c640
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_regs.h222
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c1060
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c10
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c115
-rw-r--r--drivers/gpu/drm/vc4/vc4_perfmon.c33
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c1144
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h298
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c93
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c12
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/Kconfig1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c12
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h21
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c23
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c24
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c125
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c179
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c35
-rw-r--r--drivers/gpu/drm/vkms/Kconfig1
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c317
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c17
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c11
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h152
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c447
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.h4
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c56
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c21
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig1
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c60
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c5
-rw-r--r--drivers/gpu/drm/xe/Kconfig4
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug16
-rw-r--r--drivers/gpu/drm/xe/Makefile15
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h28
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h99
-rw-r--r--drivers/gpu/drm/xe/abi/guc_capture_abi.h186
-rw-r--r--drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h1
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h21
-rw-r--r--drivers/gpu/drm/xe/abi/guc_log_abi.h75
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h1
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h17
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h64
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h12
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_types.h11
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h2
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h14
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h8
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h17
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h17
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h76
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore_trace.h (renamed from drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h)0
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h4
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h10
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/soc/intel_rom.h6
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_irq.c46
-rw-r--r--drivers/gpu/drm/xe/display/intel_bo.c61
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.c19
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.h24
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c12
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c226
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h14
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c9
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c81
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c52
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c12
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h4
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h76
-rw-r--r--drivers/gpu/drm/xe/regs/xe_guc_regs.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_irq_regs.h82
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_oa_regs.h15
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pmt.h19
-rw-r--r--drivers/gpu/drm/xe/regs/xe_reg_defs.h4
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h18
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c266
-rw-r--r--drivers/gpu/drm/xe/tests/xe_live_test_mod.c4
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c17
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c27
-rw-r--r--drivers/gpu/drm/xe/tests/xe_test_mod.c2
-rw-r--r--drivers/gpu/drm/xe/xe_assert.h10
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c2
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c310
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h43
-rw-r--r--drivers/gpu/drm/xe/xe_bo_doc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c34
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h14
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c29
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c342
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.h13
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump_types.h27
-rw-r--r--drivers/gpu/drm/xe/xe_device.c167
-rw-r--r--drivers/gpu/drm/xe/xe_device.h28
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h136
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c81
-rw-r--r--drivers/gpu/drm/xe/xe_drv.h1
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c31
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c37
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c29
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.c138
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.h23
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c59
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h12
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c49
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c60
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c166
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h29
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.c19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_clock.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c30
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c17
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c143
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c74
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_printk.h33
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c68
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c323
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c44
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c137
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c419
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h24
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h40
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c25
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c67
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.h8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats_types.h15
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c81
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c22
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h26
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c438
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h5
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c193
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c2017
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.h61
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture_types.h68
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c558
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h9
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct_types.h29
-rw-r--r--drivers/gpu/drm/xe/xe_guc_debugfs.c14
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h28
-rw-r--r--drivers/gpu/drm/xe/xe_guc_klv_helpers.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h7
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c315
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.h15
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log_types.h34
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c141
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c387
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h19
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c8
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c2
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c14
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c315
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.h6
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_types.h68
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c16
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c428
-rw-r--r--drivers/gpu/drm/xe/xe_irq.h8
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c2
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c79
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h25
-rw-r--r--drivers/gpu/drm/xe/xe_macros.h12
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.c223
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.h6
-rw-r--r--drivers/gpu/drm/xe/xe_memirq_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c8
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c139
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h39
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c31
-rw-r--r--drivers/gpu/drm/xe/xe_module.c2
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c1023
-rw-r--r--drivers/gpu/drm/xe/xe_oa_types.h20
-rw-r--r--drivers/gpu/drm/xe/xe_observation.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c88
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c108
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c6
-rw-r--r--drivers/gpu/drm/xe/xe_platform_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c21
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c8
-rw-r--r--drivers/gpu/drm/xe/xe_query.c102
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr.c101
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_reg_whitelist.c37
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c5
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c7
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h15
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c2
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c2
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c9
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_helpers.h2
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_types.h17
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.c263
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.h14
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c13
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c3
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h18
-rw-r--r--drivers/gpu/drm/xe/xe_trace_bo.h19
-rw-r--r--drivers/gpu/drm/xe/xe_trace_lrc.c9
-rw-r--r--drivers/gpu/drm/xe/xe_trace_lrc.h52
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c8
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.c61
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c10
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c19
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c45
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h1
-rw-r--r--drivers/gpu/drm/xe/xe_vm_doc.h22
-rw-r--r--drivers/gpu/drm/xe/xe_vram.c19
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.c233
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.h11
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c68
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules5
-rw-r--r--drivers/gpu/drm/xe/xe_wait_user_fence.c10
-rw-r--r--drivers/gpu/drm/xe/xe_wopcm.c15
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c6
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig10
-rw-r--r--drivers/gpu/drm/xlnx/Makefile1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c51
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp_regs.h7
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c896
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.h7
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp_audio.c447
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c41
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.h16
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c11
-rw-r--r--drivers/gpu/host1x/context.c1
-rw-r--r--drivers/gpu/host1x/context_bus.c2
-rw-r--r--drivers/gpu/host1x/dev.c170
-rw-r--r--drivers/gpu/host1x/dev.h6
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c12
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c15
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c2
-rw-r--r--drivers/greybus/interface.c2
-rw-r--r--drivers/hid/Kconfig17
-rw-r--r--drivers/hid/Makefile5
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c14
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_common.h1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c4
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c24
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c38
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h24
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c26
-rw-r--r--drivers/hid/bpf/progs/Huion__Dial-2.bpf.c66
-rw-r--r--drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c60
-rw-r--r--drivers/hid/bpf/progs/Mistel__MD770.bpf.c154
-rw-r--r--drivers/hid/bpf/progs/Rapoo__M50-Plus-Silent.bpf.c148
-rw-r--r--drivers/hid/bpf/progs/hid_report_helpers.h36
-rw-r--r--drivers/hid/hid-asus.c28
-rw-r--r--drivers/hid/hid-core.c204
-rw-r--r--drivers/hid/hid-corsair-void.c829
-rw-r--r--drivers/hid/hid-cp2112.c3
-rw-r--r--drivers/hid/hid-debug.c9
-rw-r--r--drivers/hid/hid-generic.c3
-rw-r--r--drivers/hid/hid-goodix-spi.c35
-rw-r--r--drivers/hid/hid-google-hammer.c2
-rw-r--r--drivers/hid/hid-hyperv.c58
-rw-r--r--drivers/hid/hid-ids.h12
-rw-r--r--drivers/hid/hid-input.c37
-rw-r--r--drivers/hid/hid-kysona.c248
-rw-r--r--drivers/hid/hid-lenovo.c117
-rw-r--r--drivers/hid/hid-lg4ff.c3
-rw-r--r--drivers/hid/hid-logitech-hidpp.c74
-rw-r--r--drivers/hid/hid-magicmouse.c64
-rw-r--r--drivers/hid/hid-multitouch.c54
-rw-r--r--drivers/hid/hid-nintendo.c16
-rw-r--r--drivers/hid/hid-picolcd_fb.c6
-rw-r--r--drivers/hid/hid-picolcd_lcd.c6
-rw-r--r--drivers/hid/hid-plantronics.c23
-rw-r--r--drivers/hid/hid-roccat-arvo.c20
-rw-r--r--drivers/hid/hid-roccat-common.h22
-rw-r--r--drivers/hid/hid-roccat-isku.c22
-rw-r--r--drivers/hid/hid-roccat-kone.c22
-rw-r--r--drivers/hid/hid-roccat-koneplus.c42
-rw-r--r--drivers/hid/hid-roccat-konepure.c4
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c38
-rw-r--r--drivers/hid/hid-roccat-lua.c10
-rw-r--r--drivers/hid/hid-roccat-pyra.c50
-rw-r--r--drivers/hid/hid-roccat-ryos.c4
-rw-r--r--drivers/hid/hid-roccat-savu.c4
-rw-r--r--drivers/hid/hid-sensor-custom.c4
-rw-r--r--drivers/hid/hid-sensor-hub.c21
-rw-r--r--drivers/hid/hid-sony.c3
-rw-r--r--drivers/hid/hid-steam.c3
-rw-r--r--drivers/hid/hid-steelseries.c139
-rw-r--r--drivers/hid/hid-thrustmaster.c8
-rw-r--r--drivers/hid/hid-uclogic-params.c2
-rw-r--r--drivers/hid/hid-uclogic-rdesc-test.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c40
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of.c6
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c45
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-fw-loader.c4
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c25
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.h11
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client-buffers.c21
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c21
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.h2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/init.c30
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h13
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/loader.c35
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/loader.h34
-rw-r--r--drivers/hid/intel-thc-hid/Kconfig43
-rw-r--r--drivers/hid/intel-thc-hid/Makefile22
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c969
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h186
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c166
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.h14
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c224
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.h20
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c987
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h172
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c165
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.h14
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c414
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.h25
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c1578
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h116
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c969
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h146
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h881
-rw-r--r--drivers/hid/surface-hid/surface_kbd.c2
-rw-r--r--drivers/hid/usbhid/hid-core.c2
-rw-r--r--drivers/hid/wacom.h8
-rw-r--r--drivers/hid/wacom_sys.c46
-rw-r--r--drivers/hid/wacom_wac.c18
-rw-r--r--drivers/hid/wacom_wac.h2
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c2
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c2
-rw-r--r--drivers/hte/hte-tegra194-test.c2
-rw-r--r--drivers/hv/channel_mgmt.c61
-rw-r--r--drivers/hv/connection.c4
-rw-r--r--drivers/hv/hv_balloon.c31
-rw-r--r--drivers/hv/hv_common.c19
-rw-r--r--drivers/hv/hv_kvp.c12
-rw-r--r--drivers/hv/hv_snapshot.c11
-rw-r--r--drivers/hv/hv_util.c13
-rw-r--r--drivers/hv/hyperv_vmbus.h18
-rw-r--r--drivers/hv/vmbus_drv.c33
-rw-r--r--drivers/hwmon/Kconfig60
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/abituguru.c2
-rw-r--r--drivers/hwmon/abituguru3.c4
-rw-r--r--drivers/hwmon/acpi_power_meter.c42
-rw-r--r--drivers/hwmon/adt7475.c3
-rw-r--r--drivers/hwmon/amc6821.c14
-rw-r--r--drivers/hwmon/aquacomputer_d5next.c2
-rw-r--r--drivers/hwmon/aspeed-g6-pwm-tach.c2
-rw-r--r--drivers/hwmon/asus-ec-sensors.c13
-rw-r--r--drivers/hwmon/asus_atk0110.c15
-rw-r--r--drivers/hwmon/chipcap2.c63
-rw-r--r--drivers/hwmon/cros_ec_hwmon.c1
-rw-r--r--drivers/hwmon/da9052-hwmon.c2
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c8
-rw-r--r--drivers/hwmon/dme1737.c2
-rw-r--r--drivers/hwmon/drivetemp.c8
-rw-r--r--drivers/hwmon/f71805f.c2
-rw-r--r--drivers/hwmon/f71882fg.c2
-rw-r--r--drivers/hwmon/gsc-hwmon.c9
-rw-r--r--drivers/hwmon/hwmon.c50
-rw-r--r--drivers/hwmon/i5500_temp.c8
-rw-r--r--drivers/hwmon/i5k_amb.c2
-rw-r--r--drivers/hwmon/ina2xx.c152
-rw-r--r--drivers/hwmon/intel-m10-bmc-hwmon.c13
-rw-r--r--drivers/hwmon/isl28022.c532
-rw-r--r--drivers/hwmon/jc42.c8
-rw-r--r--drivers/hwmon/k10temp.c7
-rw-r--r--drivers/hwmon/lm75.c339
-rw-r--r--drivers/hwmon/ltc2991.c2
-rw-r--r--drivers/hwmon/max197.c2
-rw-r--r--drivers/hwmon/max6639.c83
-rw-r--r--drivers/hwmon/mc13783-adc.c2
-rw-r--r--drivers/hwmon/nct6683.c6
-rw-r--r--drivers/hwmon/nct6775-core.c13
-rw-r--r--drivers/hwmon/nct6775-i2c.c2
-rw-r--r--drivers/hwmon/nct6775-platform.c4
-rw-r--r--drivers/hwmon/nct7363.c447
-rw-r--r--drivers/hwmon/nzxt-kraken2.c9
-rw-r--r--drivers/hwmon/occ/p9_sbe.c8
-rw-r--r--drivers/hwmon/pc87360.c2
-rw-r--r--drivers/hwmon/pc87427.c2
-rw-r--r--drivers/hwmon/peci/cputemp.c2
-rw-r--r--drivers/hwmon/peci/dimmtemp.c2
-rw-r--r--drivers/hwmon/pmbus/Kconfig36
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/acbel-fsg032.c2
-rw-r--r--drivers/hwmon/pmbus/adm1266.c2
-rw-r--r--drivers/hwmon/pmbus/adm1275.c12
-rw-r--r--drivers/hwmon/pmbus/adp1050.c2
-rw-r--r--drivers/hwmon/pmbus/bel-pfe.c2
-rw-r--r--drivers/hwmon/pmbus/bpa-rs600.c2
-rw-r--r--drivers/hwmon/pmbus/crps.c74
-rw-r--r--drivers/hwmon/pmbus/delta-ahe50dc-fan.c2
-rw-r--r--drivers/hwmon/pmbus/dps920ab.c9
-rw-r--r--drivers/hwmon/pmbus/fsp-3y.c2
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c2
-rw-r--r--drivers/hwmon/pmbus/inspur-ipsps.c2
-rw-r--r--drivers/hwmon/pmbus/ir35221.c2
-rw-r--r--drivers/hwmon/pmbus/ir36021.c2
-rw-r--r--drivers/hwmon/pmbus/ir38064.c2
-rw-r--r--drivers/hwmon/pmbus/irps5401.c2
-rw-r--r--drivers/hwmon/pmbus/isl68137.c212
-rw-r--r--drivers/hwmon/pmbus/lm25066.c2
-rw-r--r--drivers/hwmon/pmbus/lt7182s.c2
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c22
-rw-r--r--drivers/hwmon/pmbus/ltc3815.c2
-rw-r--r--drivers/hwmon/pmbus/max15301.c3
-rw-r--r--drivers/hwmon/pmbus/max16064.c2
-rw-r--r--drivers/hwmon/pmbus/max16601.c2
-rw-r--r--drivers/hwmon/pmbus/max20730.c2
-rw-r--r--drivers/hwmon/pmbus/max20751.c2
-rw-r--r--drivers/hwmon/pmbus/max31785.c2
-rw-r--r--drivers/hwmon/pmbus/max34440.c2
-rw-r--r--drivers/hwmon/pmbus/max8688.c2
-rw-r--r--drivers/hwmon/pmbus/mp2856.c2
-rw-r--r--drivers/hwmon/pmbus/mp2888.c2
-rw-r--r--drivers/hwmon/pmbus/mp2891.c6
-rw-r--r--drivers/hwmon/pmbus/mp2975.c2
-rw-r--r--drivers/hwmon/pmbus/mp2993.c6
-rw-r--r--drivers/hwmon/pmbus/mp5023.c2
-rw-r--r--drivers/hwmon/pmbus/mp5920.c2
-rw-r--r--drivers/hwmon/pmbus/mp5990.c2
-rw-r--r--drivers/hwmon/pmbus/mp9941.c6
-rw-r--r--drivers/hwmon/pmbus/mpq7932.c2
-rw-r--r--drivers/hwmon/pmbus/mpq8785.c4
-rw-r--r--drivers/hwmon/pmbus/pim4328.c2
-rw-r--r--drivers/hwmon/pmbus/pli1209bc.c2
-rw-r--r--drivers/hwmon/pmbus/pm6764tr.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus.h4
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c150
-rw-r--r--drivers/hwmon/pmbus/pxe1610.c2
-rw-r--r--drivers/hwmon/pmbus/q54sj108a2.c2
-rw-r--r--drivers/hwmon/pmbus/stpddc60.c2
-rw-r--r--drivers/hwmon/pmbus/tda38640.c2
-rw-r--r--drivers/hwmon/pmbus/tps25990.c436
-rw-r--r--drivers/hwmon/pmbus/tps40422.c2
-rw-r--r--drivers/hwmon/pmbus/tps53679.c2
-rw-r--r--drivers/hwmon/pmbus/tps546d24.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c2
-rw-r--r--drivers/hwmon/pmbus/xdp710.c2
-rw-r--r--drivers/hwmon/pmbus/xdpe12284.c2
-rw-r--r--drivers/hwmon/pmbus/xdpe152c4.c2
-rw-r--r--drivers/hwmon/pmbus/zl6100.c2
-rw-r--r--drivers/hwmon/powerz.c8
-rw-r--r--drivers/hwmon/pwm-fan.c49
-rw-r--r--drivers/hwmon/qnap-mcu-hwmon.c364
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c30
-rw-r--r--drivers/hwmon/sch5636.c2
-rw-r--r--drivers/hwmon/sg2042-mcu.c4
-rw-r--r--drivers/hwmon/sht15.c2
-rw-r--r--drivers/hwmon/sht4x.c184
-rw-r--r--drivers/hwmon/sis5595.c2
-rw-r--r--drivers/hwmon/sl28cpld-hwmon.c9
-rw-r--r--drivers/hwmon/smsc47m1.c2
-rw-r--r--drivers/hwmon/spd5118.c10
-rw-r--r--drivers/hwmon/surface_fan.c10
-rw-r--r--drivers/hwmon/tmp108.c81
-rw-r--r--drivers/hwmon/tmp513.c17
-rw-r--r--drivers/hwmon/tps23861.c2
-rw-r--r--drivers/hwmon/ultra45_env.c2
-rw-r--r--drivers/hwmon/via-cputemp.c2
-rw-r--r--drivers/hwmon/via686a.c2
-rw-r--r--drivers/hwmon/vt1211.c2
-rw-r--r--drivers/hwmon/vt8231.c4
-rw-r--r--drivers/hwmon/w83627hf.c2
-rw-r--r--drivers/hwmon/w83781d.c2
-rw-r--r--drivers/hwmon/xgene-hwmon.c2
-rw-r--r--drivers/hwspinlock/u8500_hsem.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c113
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-dummy.c83
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c57
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c27
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-self-hosted-trace.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c19
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.c43
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c17
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.c2
-rw-r--r--drivers/hwtracing/intel_th/acpi.c2
-rw-r--r--drivers/hwtracing/intel_th/core.c3
-rw-r--r--drivers/hwtracing/intel_th/pci.c9
-rw-r--r--drivers/i2c/Makefile7
-rw-r--r--drivers/i2c/busses/Kconfig80
-rw-r--r--drivers/i2c/busses/Makefile17
-rw-r--r--drivers/i2c/busses/i2c-altera.c2
-rw-r--r--drivers/i2c/busses/i2c-amd-asf-plat.c369
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-plat.c2
-rw-r--r--drivers/i2c/busses/i2c-amd756-s4882.c245
-rw-r--r--drivers/i2c/busses/i2c-amd756.c8
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c2
-rw-r--r--drivers/i2c/busses/i2c-at91-core.c2
-rw-r--r--drivers/i2c/busses/i2c-au1550.c2
-rw-r--r--drivers/i2c/busses/i2c-axxia.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c2
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c2
-rw-r--r--drivers/i2c/busses/i2c-cadence.c425
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-cgbc.c406
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c2
-rw-r--r--drivers/i2c/busses/i2c-cpm.c2
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c2
-rw-r--r--drivers/i2c/busses/i2c-davinci.c114
-rw-r--r--drivers/i2c/busses/i2c-designware-amdpsp.c10
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c84
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h19
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c47
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c43
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c60
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c13
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c2
-rw-r--r--drivers/i2c/busses/i2c-dln2.c2
-rw-r--r--drivers/i2c/busses/i2c-emev2.c2
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c37
-rw-r--r--drivers/i2c/busses/i2c-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-gxp.c2
-rw-r--r--drivers/i2c/busses/i2c-highlander.c2
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c137
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c2
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c2
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c813
-rw-r--r--drivers/i2c/busses/i2c-imx.c511
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c2
-rw-r--r--drivers/i2c/busses/i2c-isch.c317
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c2
-rw-r--r--drivers/i2c/busses/i2c-keba.c8
-rw-r--r--drivers/i2c/busses/i2c-kempld.c2
-rw-r--r--drivers/i2c/busses/i2c-ljca.c2
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c2
-rw-r--r--drivers/i2c/busses/i2c-meson.c2
-rw-r--r--drivers/i2c/busses/i2c-microchip-corei2c.c124
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c2
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c2
-rw-r--r--drivers/i2c/busses/i2c-mpc.c2
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c2
-rw-r--r--drivers/i2c/busses/i2c-mt7621.c2
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c2
-rw-r--r--drivers/i2c/busses/i2c-mxs.c2
-rw-r--r--drivers/i2c/busses/i2c-nforce2-s4985.c240
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c16
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c88
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c451
-rw-r--r--drivers/i2c/busses/i2c-ocores.c2
-rw-r--r--drivers/i2c/busses/i2c-octeon-platdrv.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-opal.c2
-rw-r--r--drivers/i2c/busses/i2c-pasemi-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c51
-rw-r--r--drivers/i2c/busses/i2c-piix4.h44
-rw-r--r--drivers/i2c/busses/i2c-pnx.c6
-rw-r--r--drivers/i2c/busses/i2c-powermac.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c23
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c83
-rw-r--r--drivers/i2c/busses/i2c-qup.c6
-rw-r--r--drivers/i2c/busses/i2c-rcar.c22
-rw-r--r--drivers/i2c/busses/i2c-riic.c138
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-rtl9300.c423
-rw-r--r--drivers/i2c/busses/i2c-rzv2m.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/i2c/busses/i2c-scmi.c2
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c2
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c2
-rw-r--r--drivers/i2c/busses/i2c-simtec.c2
-rw-r--r--drivers/i2c/busses/i2c-sprd.c2
-rw-r--r--drivers/i2c/busses/i2c-st.c2
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c2
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c2
-rw-r--r--drivers/i2c/busses/i2c-sun6i-p2wi.c2
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c2
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c2
-rw-r--r--drivers/i2c/busses/i2c-versatile.c2
-rw-r--r--drivers/i2c/busses/i2c-viai2c-wmt.c2
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c2
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c2
-rw-r--r--drivers/i2c/busses/i2c-xiic.c283
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c2
-rw-r--r--drivers/i2c/busses/scx200_acb.c2
-rw-r--r--drivers/i2c/i2c-atr.c14
-rw-r--r--drivers/i2c/i2c-core-acpi.c22
-rw-r--r--drivers/i2c/i2c-core-base.c21
-rw-r--r--drivers/i2c/i2c-core-of-prober.c415
-rw-r--r--drivers/i2c/i2c-core-smbus.c11
-rw-r--r--drivers/i2c/i2c-dev.c17
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c8
-rw-r--r--drivers/i2c/i2c-slave-testunit.c26
-rw-r--r--drivers/i2c/i2c-smbus.c22
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c2
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c8
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpmux.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-mlxcpld.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-mule.c4
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c2
-rw-r--r--drivers/i3c/master.c114
-rw-r--r--drivers/i3c/master/Kconfig11
-rw-r--r--drivers/i3c/master/ast2600-i3c-master.c2
-rw-r--r--drivers/i3c/master/dw-i3c-master.c51
-rw-r--r--drivers/i3c/master/dw-i3c-master.h1
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c5
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/Makefile1
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c21
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dat_v1.c11
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c27
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci.h2
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c148
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/pio.c2
-rw-r--r--drivers/i3c/master/svc-i3c-master.c138
-rw-r--r--drivers/idle/intel_idle.c55
-rw-r--r--drivers/iio/accel/Kconfig2
-rw-r--r--drivers/iio/accel/adis16201.c2
-rw-r--r--drivers/iio/accel/adis16209.c2
-rw-r--r--drivers/iio/accel/adxl313_core.c16
-rw-r--r--drivers/iio/accel/adxl313_i2c.c2
-rw-r--r--drivers/iio/accel/adxl313_spi.c2
-rw-r--r--drivers/iio/accel/adxl345.h81
-rw-r--r--drivers/iio/accel/adxl345_core.c419
-rw-r--r--drivers/iio/accel/adxl345_i2c.c4
-rw-r--r--drivers/iio/accel/adxl345_spi.c9
-rw-r--r--drivers/iio/accel/adxl355_core.c10
-rw-r--r--drivers/iio/accel/adxl355_i2c.c2
-rw-r--r--drivers/iio/accel/adxl355_spi.c2
-rw-r--r--drivers/iio/accel/adxl367.c4
-rw-r--r--drivers/iio/accel/adxl367_i2c.c2
-rw-r--r--drivers/iio/accel/adxl367_spi.c2
-rw-r--r--drivers/iio/accel/adxl372.c6
-rw-r--r--drivers/iio/accel/adxl372_i2c.c2
-rw-r--r--drivers/iio/accel/adxl372_spi.c2
-rw-r--r--drivers/iio/accel/adxl380.c19
-rw-r--r--drivers/iio/accel/adxl380_i2c.c2
-rw-r--r--drivers/iio/accel/adxl380_spi.c2
-rw-r--r--drivers/iio/accel/bma180.c3
-rw-r--r--drivers/iio/accel/bma220_spi.c3
-rw-r--r--drivers/iio/accel/bma400_core.c11
-rw-r--r--drivers/iio/accel/bma400_i2c.c2
-rw-r--r--drivers/iio/accel/bma400_spi.c2
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c10
-rw-r--r--drivers/iio/accel/bmc150-accel-i2c.c2
-rw-r--r--drivers/iio/accel/bmc150-accel-spi.c2
-rw-r--r--drivers/iio/accel/bmc150-accel.h3
-rw-r--r--drivers/iio/accel/bmi088-accel-core.c6
-rw-r--r--drivers/iio/accel/bmi088-accel-i2c.c2
-rw-r--r--drivers/iio/accel/bmi088-accel-spi.c2
-rw-r--r--drivers/iio/accel/fxls8962af-core.c28
-rw-r--r--drivers/iio/accel/fxls8962af-i2c.c4
-rw-r--r--drivers/iio/accel/fxls8962af-spi.c2
-rw-r--r--drivers/iio/accel/fxls8962af.h2
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c11
-rw-r--r--drivers/iio/accel/kionix-kx022a-i2c.c6
-rw-r--r--drivers/iio/accel/kionix-kx022a-spi.c6
-rw-r--r--drivers/iio/accel/kionix-kx022a.c182
-rw-r--r--drivers/iio/accel/kionix-kx022a.h14
-rw-r--r--drivers/iio/accel/kxcjk-1013.c442
-rw-r--r--drivers/iio/accel/kxsd9-i2c.c2
-rw-r--r--drivers/iio/accel/kxsd9-spi.c2
-rw-r--r--drivers/iio/accel/kxsd9.c7
-rw-r--r--drivers/iio/accel/mma7455_core.c9
-rw-r--r--drivers/iio/accel/mma7455_i2c.c2
-rw-r--r--drivers/iio/accel/mma7455_spi.c2
-rw-r--r--drivers/iio/accel/mma8452.c5
-rw-r--r--drivers/iio/accel/mma9551.c29
-rw-r--r--drivers/iio/accel/mma9551_core.c36
-rw-r--r--drivers/iio/accel/mma9553.c48
-rw-r--r--drivers/iio/accel/msa311.c3
-rw-r--r--drivers/iio/accel/mxc4005.c3
-rw-r--r--drivers/iio/accel/sca3000.c6
-rw-r--r--drivers/iio/accel/ssp_accel_sensor.c2
-rw-r--r--drivers/iio/accel/st_accel_core.c6
-rw-r--r--drivers/iio/accel/st_accel_i2c.c2
-rw-r--r--drivers/iio/accel/st_accel_spi.c2
-rw-r--r--drivers/iio/accel/stk8312.c3
-rw-r--r--drivers/iio/accel/stk8ba50.c3
-rw-r--r--drivers/iio/adc/Kconfig54
-rw-r--r--drivers/iio/adc/Makefile3
-rw-r--r--drivers/iio/adc/ab8500-gpadc.c2
-rw-r--r--drivers/iio/adc/ad4000.c319
-rw-r--r--drivers/iio/adc/ad4695.c102
-rw-r--r--drivers/iio/adc/ad7091r-base.c11
-rw-r--r--drivers/iio/adc/ad7091r-base.h2
-rw-r--r--drivers/iio/adc/ad7091r5.c2
-rw-r--r--drivers/iio/adc/ad7091r8.c2
-rw-r--r--drivers/iio/adc/ad7124.c224
-rw-r--r--drivers/iio/adc/ad7173.c169
-rw-r--r--drivers/iio/adc/ad7192.c9
-rw-r--r--drivers/iio/adc/ad7266.c2
-rw-r--r--drivers/iio/adc/ad7280a.c14
-rw-r--r--drivers/iio/adc/ad7291.c2
-rw-r--r--drivers/iio/adc/ad7380.c284
-rw-r--r--drivers/iio/adc/ad7606.c1070
-rw-r--r--drivers/iio/adc/ad7606.h134
-rw-r--r--drivers/iio/adc/ad7606_par.c135
-rw-r--r--drivers/iio/adc/ad7606_spi.c197
-rw-r--r--drivers/iio/adc/ad7625.c684
-rw-r--r--drivers/iio/adc/ad7779.c914
-rw-r--r--drivers/iio/adc/ad7780.c4
-rw-r--r--drivers/iio/adc/ad7791.c7
-rw-r--r--drivers/iio/adc/ad7793.c7
-rw-r--r--drivers/iio/adc/ad7887.c4
-rw-r--r--drivers/iio/adc/ad7923.c4
-rw-r--r--drivers/iio/adc/ad7944.c6
-rw-r--r--drivers/iio/adc/ad799x.c2
-rw-r--r--drivers/iio/adc/ad9467.c17
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c218
-rw-r--r--drivers/iio/adc/adi-axi-adc.c4
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c2
-rw-r--r--drivers/iio/adc/at91_adc.c4
-rw-r--r--drivers/iio/adc/axp20x_adc.c62
-rw-r--r--drivers/iio/adc/axp288_adc.c2
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c8
-rw-r--r--drivers/iio/adc/da9150-gpadc.c26
-rw-r--r--drivers/iio/adc/dln2-adc.c23
-rw-r--r--drivers/iio/adc/ep93xx_adc.c2
-rw-r--r--drivers/iio/adc/exynos_adc.c2
-rw-r--r--drivers/iio/adc/gehc-pmc-adc.c228
-rw-r--r--drivers/iio/adc/hi8435.c2
-rw-r--r--drivers/iio/adc/imx8qxp-adc.c2
-rw-r--r--drivers/iio/adc/imx93_adc.c2
-rw-r--r--drivers/iio/adc/ina2xx-adc.c2
-rw-r--r--drivers/iio/adc/intel_mrfld_adc.c2
-rw-r--r--drivers/iio/adc/lp8788_adc.c18
-rw-r--r--drivers/iio/adc/ltc2497-core.c19
-rw-r--r--drivers/iio/adc/ltc2497.h2
-rw-r--r--drivers/iio/adc/max1118.c2
-rw-r--r--drivers/iio/adc/max11205.c2
-rw-r--r--drivers/iio/adc/max11410.c2
-rw-r--r--drivers/iio/adc/max1363.c35
-rw-r--r--drivers/iio/adc/max34408.c2
-rw-r--r--drivers/iio/adc/mcp3911.c2
-rw-r--r--drivers/iio/adc/men_z188_adc.c2
-rw-r--r--drivers/iio/adc/meson_saradc.c49
-rw-r--r--drivers/iio/adc/mp2629_adc.c4
-rw-r--r--drivers/iio/adc/mt6360-adc.c2
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c6
-rw-r--r--drivers/iio/adc/npcm_adc.c2
-rw-r--r--drivers/iio/adc/pac1921.c141
-rw-r--r--drivers/iio/adc/pac1934.c2
-rw-r--r--drivers/iio/adc/palmas_gpadc.c4
-rw-r--r--drivers/iio/adc/qcom-pm8xxx-xoadc.c10
-rw-r--r--drivers/iio/adc/qcom-spmi-adc5.c4
-rw-r--r--drivers/iio/adc/qcom-spmi-vadc.c7
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c2
-rw-r--r--drivers/iio/adc/rn5t618-adc.c2
-rw-r--r--drivers/iio/adc/rockchip_saradc.c4
-rw-r--r--drivers/iio/adc/rtq6056.c2
-rw-r--r--drivers/iio/adc/rzg2l_adc.c429
-rw-r--r--drivers/iio/adc/sd_adc_modulator.c2
-rw-r--r--drivers/iio/adc/stm32-adc-core.c2
-rw-r--r--drivers/iio/adc/stm32-adc.c2
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c17
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c2
-rw-r--r--drivers/iio/adc/sun20i-gpadc-iio.c7
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c9
-rw-r--r--drivers/iio/adc/ti-adc081c.c2
-rw-r--r--drivers/iio/adc/ti-adc084s021.c2
-rw-r--r--drivers/iio/adc/ti-ads1015.c7
-rw-r--r--drivers/iio/adc/ti-ads1119.c8
-rw-r--r--drivers/iio/adc/ti-ads124s08.c4
-rw-r--r--drivers/iio/adc/ti-ads1298.c4
-rw-r--r--drivers/iio/adc/ti-ads131e08.c2
-rw-r--r--drivers/iio/adc/ti-ads8688.c2
-rw-r--r--drivers/iio/adc/ti-lmp92064.c2
-rw-r--r--drivers/iio/adc/ti-tsc2046.c2
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c10
-rw-r--r--drivers/iio/adc/twl4030-madc.c4
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c2
-rw-r--r--drivers/iio/adc/vf610_adc.c100
-rw-r--r--drivers/iio/adc/xilinx-ams.c2
-rw-r--r--drivers/iio/adc/xilinx-xadc-events.c4
-rw-r--r--drivers/iio/adc/xilinx-xadc.h2
-rw-r--r--drivers/iio/addac/ad74115.c18
-rw-r--r--drivers/iio/addac/ad74413r.c102
-rw-r--r--drivers/iio/addac/stx104.c2
-rw-r--r--drivers/iio/afe/iio-rescale.c4
-rw-r--r--drivers/iio/amplifiers/Kconfig1
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c36
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c27
-rw-r--r--drivers/iio/cdc/ad7150.c2
-rw-r--r--drivers/iio/chemical/Kconfig4
-rw-r--r--drivers/iio/chemical/bme680.h13
-rw-r--r--drivers/iio/chemical/bme680_core.c488
-rw-r--r--drivers/iio/chemical/bme680_i2c.c3
-rw-r--r--drivers/iio/chemical/bme680_spi.c3
-rw-r--r--drivers/iio/chemical/ccs811.c2
-rw-r--r--drivers/iio/chemical/ens160_core.c4
-rw-r--r--drivers/iio/chemical/ens160_i2c.c2
-rw-r--r--drivers/iio/chemical/ens160_spi.c2
-rw-r--r--drivers/iio/chemical/scd30_core.c4
-rw-r--r--drivers/iio/chemical/scd30_i2c.c2
-rw-r--r--drivers/iio/chemical/scd30_serial.c2
-rw-r--r--drivers/iio/chemical/scd4x.c2
-rw-r--r--drivers/iio/chemical/sps30.c2
-rw-r--r--drivers/iio/chemical/sps30_i2c.c2
-rw-r--r--drivers/iio/chemical/sps30_serial.c2
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c26
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c12
-rw-r--r--drivers/iio/common/inv_sensors/inv_sensors_timestamp.c16
-rw-r--r--drivers/iio/common/ms_sensors/ms_sensors_i2c.c24
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c10
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_iio.c20
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c28
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_spi.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c6
-rw-r--r--drivers/iio/dac/Kconfig51
-rw-r--r--drivers/iio/dac/Makefile4
-rw-r--r--drivers/iio/dac/ad3552r-common.c248
-rw-r--r--drivers/iio/dac/ad3552r-hs.c535
-rw-r--r--drivers/iio/dac/ad3552r-hs.h19
-rw-r--r--drivers/iio/dac/ad3552r.c557
-rw-r--r--drivers/iio/dac/ad3552r.h226
-rw-r--r--drivers/iio/dac/ad5380.c85
-rw-r--r--drivers/iio/dac/ad5421.c2
-rw-r--r--drivers/iio/dac/ad5446.c77
-rw-r--r--drivers/iio/dac/ad5504.c61
-rw-r--r--drivers/iio/dac/ad5592r-base.c4
-rw-r--r--drivers/iio/dac/ad5592r.c2
-rw-r--r--drivers/iio/dac/ad5593r.c2
-rw-r--r--drivers/iio/dac/ad5624r.h5
-rw-r--r--drivers/iio/dac/ad5624r_spi.c71
-rw-r--r--drivers/iio/dac/ad5686-spi.c8
-rw-r--r--drivers/iio/dac/ad5686.c64
-rw-r--r--drivers/iio/dac/ad5686.h6
-rw-r--r--drivers/iio/dac/ad5696-i2c.c8
-rw-r--r--drivers/iio/dac/ad5755.c11
-rw-r--r--drivers/iio/dac/ad5761.c109
-rw-r--r--drivers/iio/dac/ad5770r.c44
-rw-r--r--drivers/iio/dac/ad5791.c205
-rw-r--r--drivers/iio/dac/ad7293.c68
-rw-r--r--drivers/iio/dac/ad8460.c951
-rw-r--r--drivers/iio/dac/ad8801.c81
-rw-r--r--drivers/iio/dac/ad9739a.c2
-rw-r--r--drivers/iio/dac/adi-axi-dac.c453
-rw-r--r--drivers/iio/dac/dpot-dac.c2
-rw-r--r--drivers/iio/dac/lpc18xx_dac.c6
-rw-r--r--drivers/iio/dac/ltc2632.c69
-rw-r--r--drivers/iio/dac/ltc2664.c17
-rw-r--r--drivers/iio/dac/ltc2688.c44
-rw-r--r--drivers/iio/dac/m62332.c2
-rw-r--r--drivers/iio/dac/max517.c2
-rw-r--r--drivers/iio/dac/max5821.c36
-rw-r--r--drivers/iio/dac/mcp4725.c2
-rw-r--r--drivers/iio/dac/rohm-bd79703.c162
-rw-r--r--drivers/iio/dac/stm32-dac-core.c2
-rw-r--r--drivers/iio/dac/stm32-dac.c2
-rw-r--r--drivers/iio/dac/vf610_dac.c2
-rw-r--r--drivers/iio/dummy/iio_simple_dummy.h2
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_buffer.c2
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_events.c32
-rw-r--r--drivers/iio/frequency/Kconfig34
-rw-r--r--drivers/iio/frequency/ad9523.c2
-rw-r--r--drivers/iio/frequency/adf4350.c2
-rw-r--r--drivers/iio/frequency/adf4371.c63
-rw-r--r--drivers/iio/gyro/Kconfig2
-rw-r--r--drivers/iio/gyro/adis16136.c2
-rw-r--r--drivers/iio/gyro/adis16260.c2
-rw-r--r--drivers/iio/gyro/adxrs290.c2
-rw-r--r--drivers/iio/gyro/bmg160_core.c21
-rw-r--r--drivers/iio/gyro/bmg160_i2c.c6
-rw-r--r--drivers/iio/gyro/fxas21002c_core.c16
-rw-r--r--drivers/iio/gyro/fxas21002c_i2c.c2
-rw-r--r--drivers/iio/gyro/fxas21002c_spi.c2
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c10
-rw-r--r--drivers/iio/gyro/itg3200_buffer.c2
-rw-r--r--drivers/iio/gyro/mpu3050-core.c4
-rw-r--r--drivers/iio/gyro/ssp_gyro_sensor.c2
-rw-r--r--drivers/iio/gyro/st_gyro_core.c6
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c2
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c2
-rw-r--r--drivers/iio/humidity/am2315.c2
-rw-r--r--drivers/iio/humidity/hdc100x.c2
-rw-r--r--drivers/iio/humidity/hid-sensor-humidity.c6
-rw-r--r--drivers/iio/humidity/hts221.h2
-rw-r--r--drivers/iio/humidity/hts221_buffer.c3
-rw-r--r--drivers/iio/humidity/hts221_core.c2
-rw-r--r--drivers/iio/humidity/hts221_i2c.c2
-rw-r--r--drivers/iio/humidity/hts221_spi.c2
-rw-r--r--drivers/iio/humidity/htu21.c2
-rw-r--r--drivers/iio/imu/Kconfig15
-rw-r--r--drivers/iio/imu/Makefile3
-rw-r--r--drivers/iio/imu/adis.c20
-rw-r--r--drivers/iio/imu/adis16400.c2
-rw-r--r--drivers/iio/imu/adis16460.c2
-rw-r--r--drivers/iio/imu/adis16475.c2
-rw-r--r--drivers/iio/imu/adis16480.c77
-rw-r--r--drivers/iio/imu/adis_buffer.c4
-rw-r--r--drivers/iio/imu/adis_trigger.c2
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c17
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c2
-rw-r--r--drivers/iio/imu/bmi160/bmi160_spi.c2
-rw-r--r--drivers/iio/imu/bmi270/Kconfig33
-rw-r--r--drivers/iio/imu/bmi270/Makefile7
-rw-r--r--drivers/iio/imu/bmi270/bmi270.h38
-rw-r--r--drivers/iio/imu/bmi270/bmi270_core.c734
-rw-r--r--drivers/iio/imu/bmi270/bmi270_i2c.c66
-rw-r--r--drivers/iio/imu/bmi270/bmi270_spi.c92
-rw-r--r--drivers/iio/imu/bmi323/bmi323.h1
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c47
-rw-r--r--drivers/iio/imu/bmi323/bmi323_i2c.c2
-rw-r--r--drivers/iio/imu/bmi323/bmi323_spi.c2
-rw-r--r--drivers/iio/imu/bno055/bno055.c14
-rw-r--r--drivers/iio/imu/bno055/bno055_i2c.c2
-rw-r--r--drivers/iio/imu/bno055/bno055_ser_core.c2
-rw-r--r--drivers/iio/imu/fxos8700_core.c1
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600.h1
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c4
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_core.c38
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c5
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c19
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c22
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c11
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c67
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c18
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c17
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c1
-rw-r--r--drivers/iio/imu/kmx61.c29
-rw-r--r--drivers/iio/imu/smi240.c621
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig18
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c24
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i3c.c8
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c2
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c4
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c2
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c2
-rw-r--r--drivers/iio/industrialio-acpi.c48
-rw-r--r--drivers/iio/industrialio-backend.c130
-rw-r--r--drivers/iio/industrialio-buffer.c4
-rw-r--r--drivers/iio/industrialio-core.c3
-rw-r--r--drivers/iio/industrialio-gts-helper.c111
-rw-r--r--drivers/iio/inkern.c22
-rw-r--r--drivers/iio/light/Kconfig48
-rw-r--r--drivers/iio/light/Makefile3
-rw-r--r--drivers/iio/light/adjd_s311.c2
-rw-r--r--drivers/iio/light/adux1020.c12
-rw-r--r--drivers/iio/light/al3010.c11
-rw-r--r--drivers/iio/light/apds9300.c20
-rw-r--r--drivers/iio/light/apds9306.c9
-rw-r--r--drivers/iio/light/apds9960.c10
-rw-r--r--drivers/iio/light/as73211.c26
-rw-r--r--drivers/iio/light/bh1745.c56
-rw-r--r--drivers/iio/light/cm32181.c3
-rw-r--r--drivers/iio/light/cm3232.c18
-rw-r--r--drivers/iio/light/cm3605.c2
-rw-r--r--drivers/iio/light/cm36651.c2
-rw-r--r--drivers/iio/light/gp2ap002.c2
-rw-r--r--drivers/iio/light/gp2ap020a00f.c2
-rw-r--r--drivers/iio/light/hid-sensor-als.c10
-rw-r--r--drivers/iio/light/hid-sensor-prox.c204
-rw-r--r--drivers/iio/light/iqs621-als.c2
-rw-r--r--drivers/iio/light/isl29018.c38
-rw-r--r--drivers/iio/light/isl29125.c2
-rw-r--r--drivers/iio/light/lm3533-als.c8
-rw-r--r--drivers/iio/light/ltr390.c362
-rw-r--r--drivers/iio/light/ltr501.c37
-rw-r--r--drivers/iio/light/ltrf216a.c1
-rw-r--r--drivers/iio/light/max44000.c2
-rw-r--r--drivers/iio/light/max44009.c2
-rw-r--r--drivers/iio/light/opt3001.c195
-rw-r--r--drivers/iio/light/opt4060.c1343
-rw-r--r--drivers/iio/light/rohm-bu27008.c1635
-rw-r--r--drivers/iio/light/rohm-bu27034.c77
-rw-r--r--drivers/iio/light/rpr0521.c16
-rw-r--r--drivers/iio/light/st_uvis25.h2
-rw-r--r--drivers/iio/light/st_uvis25_core.c5
-rw-r--r--drivers/iio/light/st_uvis25_i2c.c2
-rw-r--r--drivers/iio/light/st_uvis25_spi.c2
-rw-r--r--drivers/iio/light/stk3310.c5
-rw-r--r--drivers/iio/light/tcs3414.c2
-rw-r--r--drivers/iio/light/tcs3472.c4
-rw-r--r--drivers/iio/light/tsl2563.c2
-rw-r--r--drivers/iio/light/tsl2591.c2
-rw-r--r--drivers/iio/light/tsl2772.c6
-rw-r--r--drivers/iio/light/us5182d.c2
-rw-r--r--drivers/iio/light/vcnl4000.c5
-rw-r--r--drivers/iio/light/vcnl4035.c2
-rw-r--r--drivers/iio/light/veml3235.c547
-rw-r--r--drivers/iio/light/veml6030.c620
-rw-r--r--drivers/iio/light/veml6070.c201
-rw-r--r--drivers/iio/light/vl6180.c255
-rw-r--r--drivers/iio/magnetometer/Kconfig15
-rw-r--r--drivers/iio/magnetometer/Makefile1
-rw-r--r--drivers/iio/magnetometer/af8133j.c5
-rw-r--r--drivers/iio/magnetometer/ak8974.c4
-rw-r--r--drivers/iio/magnetometer/ak8975.c2
-rw-r--r--drivers/iio/magnetometer/als31300.c494
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c25
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_i2c.c11
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_spi.c11
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c8
-rw-r--r--drivers/iio/magnetometer/hmc5843.h2
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c4
-rw-r--r--drivers/iio/magnetometer/hmc5843_i2c.c2
-rw-r--r--drivers/iio/magnetometer/hmc5843_spi.c2
-rw-r--r--drivers/iio/magnetometer/mag3110.c2
-rw-r--r--drivers/iio/magnetometer/rm3100-core.c8
-rw-r--r--drivers/iio/magnetometer/rm3100-i2c.c2
-rw-r--r--drivers/iio/magnetometer/rm3100-spi.c2
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c6
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c2
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c2
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c15
-rw-r--r--drivers/iio/multiplexer/iio-mux.c84
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c10
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c10
-rw-r--r--drivers/iio/position/hid-sensor-custom-intel-hinge.c10
-rw-r--r--drivers/iio/position/iqs624-pos.c2
-rw-r--r--drivers/iio/pressure/Kconfig4
-rw-r--r--drivers/iio/pressure/bmp280-core.c758
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c6
-rw-r--r--drivers/iio/pressure/bmp280-regmap.c10
-rw-r--r--drivers/iio/pressure/bmp280-spi.c6
-rw-r--r--drivers/iio/pressure/bmp280.h64
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c10
-rw-r--r--drivers/iio/pressure/hsc030pa.c2
-rw-r--r--drivers/iio/pressure/hsc030pa.h2
-rw-r--r--drivers/iio/pressure/hsc030pa_i2c.c2
-rw-r--r--drivers/iio/pressure/hsc030pa_spi.c2
-rw-r--r--drivers/iio/pressure/mpl115.c2
-rw-r--r--drivers/iio/pressure/mpl115_i2c.c2
-rw-r--r--drivers/iio/pressure/mpl115_spi.c2
-rw-r--r--drivers/iio/pressure/mprls0025pa.c2
-rw-r--r--drivers/iio/pressure/mprls0025pa_i2c.c2
-rw-r--r--drivers/iio/pressure/mprls0025pa_spi.c2
-rw-r--r--drivers/iio/pressure/ms5611_core.c4
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c2
-rw-r--r--drivers/iio/pressure/ms5611_spi.c2
-rw-r--r--drivers/iio/pressure/ms5637.c2
-rw-r--r--drivers/iio/pressure/rohm-bm1390.c83
-rw-r--r--drivers/iio/pressure/st_pressure_core.c6
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c2
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c2
-rw-r--r--drivers/iio/pressure/zpa2326.c14
-rw-r--r--drivers/iio/pressure/zpa2326_i2c.c2
-rw-r--r--drivers/iio/pressure/zpa2326_spi.c2
-rw-r--r--drivers/iio/proximity/Kconfig2
-rw-r--r--drivers/iio/proximity/as3935.c2
-rw-r--r--drivers/iio/proximity/aw96103.c4
-rw-r--r--drivers/iio/proximity/cros_ec_mkbp_proximity.c4
-rw-r--r--drivers/iio/proximity/hx9023s.c99
-rw-r--r--drivers/iio/proximity/irsd200.c5
-rw-r--r--drivers/iio/proximity/mb1232.c2
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c2
-rw-r--r--drivers/iio/proximity/srf04.c2
-rw-r--r--drivers/iio/proximity/srf08.c2
-rw-r--r--drivers/iio/proximity/sx9310.c2
-rw-r--r--drivers/iio/proximity/sx9324.c22
-rw-r--r--drivers/iio/proximity/sx9360.c3
-rw-r--r--drivers/iio/proximity/sx9500.c6
-rw-r--r--drivers/iio/proximity/sx_common.c33
-rw-r--r--drivers/iio/proximity/sx_common.h8
-rw-r--r--drivers/iio/proximity/vcnl3020.c2
-rw-r--r--drivers/iio/proximity/vl53l0x-i2c.c174
-rw-r--r--drivers/iio/resolver/Kconfig3
-rw-r--r--drivers/iio/resolver/ad2s1210.c2
-rw-r--r--drivers/iio/temperature/Kconfig2
-rw-r--r--drivers/iio/temperature/hid-sensor-temperature.c6
-rw-r--r--drivers/iio/temperature/mcp9600.c2
-rw-r--r--drivers/iio/temperature/tmp006.c136
-rw-r--r--drivers/iio/temperature/tmp007.c2
-rw-r--r--drivers/iio/temperature/tsys01.c2
-rw-r--r--drivers/iio/temperature/tsys02d.c2
-rw-r--r--drivers/iio/test/Kconfig2
-rw-r--r--drivers/iio/test/iio-test-gts.c2
-rw-r--r--drivers/iio/test/iio-test-rescale.c6
-rw-r--r--drivers/iio/trigger/iio-trig-interrupt.c2
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c71
-rw-r--r--drivers/infiniband/core/cache.c35
-rw-r--r--drivers/infiniband/core/cm.c170
-rw-r--r--drivers/infiniband/core/cma.c16
-rw-r--r--drivers/infiniband/core/device.c141
-rw-r--r--drivers/infiniband/core/nldev.c44
-rw-r--r--drivers/infiniband/core/rdma_core.c12
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c30
-rw-r--r--drivers/infiniband/core/ucma.c19
-rw-r--r--drivers/infiniband/core/ud_header.c83
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c2
-rw-r--r--drivers/infiniband/core/uverbs.h29
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c24
-rw-r--r--drivers/infiniband/core/uverbs_main.c43
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c42
-rw-r--r--drivers/infiniband/hw/Makefile2
-rw-r--r--drivers/infiniband/hw/bnxt_re/Makefile3
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h52
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.c138
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.h21
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c13
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c229
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h8
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c832
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c157
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h30
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c64
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c28
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h22
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c181
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h9
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h58
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c8
-rw-r--r--drivers/infiniband/hw/efa/efa.h8
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h63
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_defs.h4
-rw-r--r--drivers/infiniband/hw/efa/efa_com.h6
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c6
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h4
-rw-r--r--drivers/infiniband/hw/efa/efa_io_defs.h106
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c28
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c51
-rw-r--r--drivers/infiniband/hw/erdma/Kconfig2
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h14
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c71
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cmdq.c26
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cq.c65
-rw-r--r--drivers/infiniband/hw/erdma/erdma_eq.c6
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h135
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c62
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c301
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c568
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h166
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c2
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h1
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h14
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c31
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h2
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c14
-rw-r--r--drivers/infiniband/hw/hns/Kconfig20
-rw-r--r--drivers/infiniband/hw/hns/Makefile9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_debugfs.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c91
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c281
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c16
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c77
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c4
-rw-r--r--drivers/infiniband/hw/irdma/cm.c2
-rw-r--r--drivers/infiniband/hw/irdma/osdep.h4
-rw-r--r--drivers/infiniband/hw/irdma/protos.h4
-rw-r--r--drivers/infiniband/hw/irdma/utils.c71
-rw-r--r--drivers/infiniband/hw/mana/device.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c60
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h18
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c286
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c12
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c93
-rw-r--r--drivers/infiniband/hw/mlx5/devx.h4
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c37
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c8
-rw-r--r--drivers/infiniband/hw/mlx5/main.c90
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h9
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c17
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c70
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c55
-rw-r--r--drivers/infiniband/hw/mlx5/restrack.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c18
-rw-r--r--drivers/infiniband/hw/usnic/usnic_abi.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c73
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c66
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c23
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c22
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c44
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c32
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h11
-rw-r--r--drivers/infiniband/sw/siw/siw.h7
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c27
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c23
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c34
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c3
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c4
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c6
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c80
-rw-r--r--drivers/input/Kconfig14
-rw-r--r--drivers/input/Makefile1
-rw-r--r--drivers/input/evbug.c100
-rw-r--r--drivers/input/ff-core.c91
-rw-r--r--drivers/input/ff-memless.c18
-rw-r--r--drivers/input/input-mt.c34
-rw-r--r--drivers/input/input-poller.c4
-rw-r--r--drivers/input/input.c480
-rw-r--r--drivers/input/joystick/db9.c30
-rw-r--r--drivers/input/joystick/gamecon.c22
-rw-r--r--drivers/input/joystick/iforce/iforce-ff.c48
-rw-r--r--drivers/input/joystick/iforce/iforce-packets.c57
-rw-r--r--drivers/input/joystick/iforce/iforce-serio.c36
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c13
-rw-r--r--drivers/input/joystick/n64joy.c35
-rw-r--r--drivers/input/joystick/sidewinder.c3
-rw-r--r--drivers/input/joystick/turbografx.c22
-rw-r--r--drivers/input/joystick/xpad.c111
-rw-r--r--drivers/input/keyboard/adp5520-keys.c2
-rw-r--r--drivers/input/keyboard/adp5588-keys.c6
-rw-r--r--drivers/input/keyboard/adp5589-keys.c39
-rw-r--r--drivers/input/keyboard/applespi.c72
-rw-r--r--drivers/input/keyboard/atkbd.c10
-rw-r--r--drivers/input/keyboard/cap11xx.c12
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c2
-rw-r--r--drivers/input/keyboard/cypress-sf.c2
-rw-r--r--drivers/input/keyboard/dlink-dir685-touchkeys.c3
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c10
-rw-r--r--drivers/input/keyboard/gpio_keys.c10
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c12
-rw-r--r--drivers/input/keyboard/hilkbd.c4
-rw-r--r--drivers/input/keyboard/imx_keypad.c27
-rw-r--r--drivers/input/keyboard/ipaq-micro-keys.c12
-rw-r--r--drivers/input/keyboard/iqs62x-keys.c2
-rw-r--r--drivers/input/keyboard/lm8323.c52
-rw-r--r--drivers/input/keyboard/locomokbd.c5
-rw-r--r--drivers/input/keyboard/lpc32xx-keys.c18
-rw-r--r--drivers/input/keyboard/maple_keyb.c9
-rw-r--r--drivers/input/keyboard/matrix_keypad.c19
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c45
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c17
-rw-r--r--drivers/input/keyboard/omap-keypad.c18
-rw-r--r--drivers/input/keyboard/omap4-keypad.c6
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c8
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c16
-rw-r--r--drivers/input/keyboard/samsung-keypad.c2
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/keyboard/spear-keyboard.c9
-rw-r--r--drivers/input/keyboard/st-keyscan.c19
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c2
-rw-r--r--drivers/input/keyboard/sun4i-lradc-keys.c8
-rw-r--r--drivers/input/keyboard/sunkbd.c5
-rw-r--r--drivers/input/misc/88pm80x_onkey.c2
-rw-r--r--drivers/input/misc/Kconfig12
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ad714x.c12
-rw-r--r--drivers/input/misc/ati_remote2.c57
-rw-r--r--drivers/input/misc/cm109.c167
-rw-r--r--drivers/input/misc/cma3000_d0x.c16
-rw-r--r--drivers/input/misc/cs40l50-vibra.c6
-rw-r--r--drivers/input/misc/da7280.c26
-rw-r--r--drivers/input/misc/da9052_onkey.c4
-rw-r--r--drivers/input/misc/da9055_onkey.c4
-rw-r--r--drivers/input/misc/drv260x.c50
-rw-r--r--drivers/input/misc/drv2665.c46
-rw-r--r--drivers/input/misc/drv2667.c46
-rw-r--r--drivers/input/misc/ibm-panel.c5
-rw-r--r--drivers/input/misc/ideapad_slidebar.c30
-rw-r--r--drivers/input/misc/ims-pcu.c2
-rw-r--r--drivers/input/misc/iqs269a.c55
-rw-r--r--drivers/input/misc/iqs626a.c22
-rw-r--r--drivers/input/misc/iqs7222.c30
-rw-r--r--drivers/input/misc/kxtj9.c16
-rw-r--r--drivers/input/misc/m68kspkr.c2
-rw-r--r--drivers/input/misc/max77693-haptic.c3
-rw-r--r--drivers/input/misc/max8997_haptic.c17
-rw-r--r--drivers/input/misc/mc13783-pwrbutton.c2
-rw-r--r--drivers/input/misc/mma8450.c16
-rw-r--r--drivers/input/misc/nxp-bbnsm-pwrkey.c8
-rw-r--r--drivers/input/misc/palmas-pwrbutton.c2
-rw-r--r--drivers/input/misc/pcap_keys.c2
-rw-r--r--drivers/input/misc/pcf50633-input.c2
-rw-r--r--drivers/input/misc/pcspkr.c2
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c2
-rw-r--r--drivers/input/misc/powermate.c11
-rw-r--r--drivers/input/misc/pwm-beeper.c12
-rw-r--r--drivers/input/misc/qnap-mcu-input.c153
-rw-r--r--drivers/input/misc/regulator-haptic.c27
-rw-r--r--drivers/input/misc/rotary_encoder.c23
-rw-r--r--drivers/input/misc/soc_button_array.c4
-rw-r--r--drivers/input/misc/sparcspkr.c59
-rw-r--r--drivers/input/misc/tps65219-pwrbutton.c2
-rw-r--r--drivers/input/misc/twl4030-vibra.c11
-rw-r--r--drivers/input/misc/twl6040-vibra.c8
-rw-r--r--drivers/input/misc/wistron_btns.c4
-rw-r--r--drivers/input/misc/wm831x-on.c2
-rw-r--r--drivers/input/misc/yealink.c4
-rw-r--r--drivers/input/mouse/alps.c4
-rw-r--r--drivers/input/mouse/amimouse.c2
-rw-r--r--drivers/input/mouse/byd.c5
-rw-r--r--drivers/input/mouse/elan_i2c_core.c231
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c14
-rw-r--r--drivers/input/mouse/psmouse-smbus.c28
-rw-r--r--drivers/input/mouse/synaptics.c62
-rw-r--r--drivers/input/mouse/synaptics.h3
-rw-r--r--drivers/input/rmi4/rmi_f03.c4
-rw-r--r--drivers/input/rmi4/rmi_f34.c37
-rw-r--r--drivers/input/serio/altera_ps2.c2
-rw-r--r--drivers/input/serio/ams_delta_serio.c2
-rw-r--r--drivers/input/serio/apbps2.c2
-rw-r--r--drivers/input/serio/arc_ps2.c2
-rw-r--r--drivers/input/serio/ct82c710.c2
-rw-r--r--drivers/input/serio/gscps2.c116
-rw-r--r--drivers/input/serio/hyperv-keyboard.c38
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h2
-rw-r--r--drivers/input/serio/i8042-sparcio.h16
-rw-r--r--drivers/input/serio/i8042.c346
-rw-r--r--drivers/input/serio/ioc3kbd.c2
-rw-r--r--drivers/input/serio/libps2.c28
-rw-r--r--drivers/input/serio/maceps2.c2
-rw-r--r--drivers/input/serio/olpc_apsp.c2
-rw-r--r--drivers/input/serio/ps2-gpio.c6
-rw-r--r--drivers/input/serio/ps2mult.c25
-rw-r--r--drivers/input/serio/q40kbd.c12
-rw-r--r--drivers/input/serio/rpckbd.c2
-rw-r--r--drivers/input/serio/sa1111ps2.c8
-rw-r--r--drivers/input/serio/serio.c165
-rw-r--r--drivers/input/serio/serio_raw.c125
-rw-r--r--drivers/input/serio/serport.c27
-rw-r--r--drivers/input/serio/sun4i-ps2.c10
-rw-r--r--drivers/input/serio/userio.c139
-rw-r--r--drivers/input/serio/xilinx_ps2.c17
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c86
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c20
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/ads7846.c14
-rw-r--r--drivers/input/touchscreen/auo-pixcir-ts.c2
-rw-r--r--drivers/input/touchscreen/bcm_iproc_tsc.c2
-rw-r--r--drivers/input/touchscreen/da9052_tsi.c2
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c21
-rw-r--r--drivers/input/touchscreen/egalax_ts.c3
-rw-r--r--drivers/input/touchscreen/elo.c8
-rw-r--r--drivers/input/touchscreen/ili210x.c2
-rw-r--r--drivers/input/touchscreen/imagis.c2
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c2
-rw-r--r--drivers/input/touchscreen/mc13783_ts.c2
-rw-r--r--drivers/input/touchscreen/novatek-nvt-ts.c67
-rw-r--r--drivers/input/touchscreen/pcap_ts.c2
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c2
-rw-r--r--drivers/input/touchscreen/raspberrypi-ts.c4
-rw-r--r--drivers/input/touchscreen/rohm_bu21023.c4
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c6
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c4
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c4
-rw-r--r--drivers/input/touchscreen/ts4800-ts.c5
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c2
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c6
-rw-r--r--drivers/input/touchscreen/zinitix.c34
-rw-r--r--drivers/interconnect/core.c4
-rw-r--r--drivers/interconnect/icc-clk.c10
-rw-r--r--drivers/interconnect/imx/imx8mm.c2
-rw-r--r--drivers/interconnect/imx/imx8mn.c2
-rw-r--r--drivers/interconnect/imx/imx8mp.c2
-rw-r--r--drivers/interconnect/imx/imx8mq.c2
-rw-r--r--drivers/interconnect/mediatek/mt8183.c2
-rw-r--r--drivers/interconnect/mediatek/mt8195.c2
-rw-r--r--drivers/interconnect/qcom/Kconfig38
-rw-r--r--drivers/interconnect/qcom/Makefile8
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c2
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c5
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.h2
-rw-r--r--drivers/interconnect/qcom/msm8909.c2
-rw-r--r--drivers/interconnect/qcom/msm8916.c2
-rw-r--r--drivers/interconnect/qcom/msm8937.c10
-rw-r--r--drivers/interconnect/qcom/msm8939.c2
-rw-r--r--drivers/interconnect/qcom/msm8953.c2
-rw-r--r--drivers/interconnect/qcom/msm8974.c2
-rw-r--r--drivers/interconnect/qcom/msm8976.c2
-rw-r--r--drivers/interconnect/qcom/msm8996.c2
-rw-r--r--drivers/interconnect/qcom/osm-l3.c2
-rw-r--r--drivers/interconnect/qcom/qcm2290.c2
-rw-r--r--drivers/interconnect/qcom/qcs404.c2
-rw-r--r--drivers/interconnect/qcom/qcs615.c1563
-rw-r--r--drivers/interconnect/qcom/qcs615.h128
-rw-r--r--drivers/interconnect/qcom/qcs8300.c2088
-rw-r--r--drivers/interconnect/qcom/qcs8300.h177
-rw-r--r--drivers/interconnect/qcom/qdu1000.c2
-rw-r--r--drivers/interconnect/qcom/sa8775p.c2
-rw-r--r--drivers/interconnect/qcom/sar2130p.c1930
-rw-r--r--drivers/interconnect/qcom/sc7180.c2
-rw-r--r--drivers/interconnect/qcom/sc7280.c6
-rw-r--r--drivers/interconnect/qcom/sc8180x.c2
-rw-r--r--drivers/interconnect/qcom/sc8280xp.c2
-rw-r--r--drivers/interconnect/qcom/sdm660.c2
-rw-r--r--drivers/interconnect/qcom/sdm670.c2
-rw-r--r--drivers/interconnect/qcom/sdm845.c2
-rw-r--r--drivers/interconnect/qcom/sdx55.c2
-rw-r--r--drivers/interconnect/qcom/sdx65.c2
-rw-r--r--drivers/interconnect/qcom/sdx75.c2
-rw-r--r--drivers/interconnect/qcom/sm6115.c2
-rw-r--r--drivers/interconnect/qcom/sm6350.c2
-rw-r--r--drivers/interconnect/qcom/sm7150.c2
-rw-r--r--drivers/interconnect/qcom/sm8150.c2
-rw-r--r--drivers/interconnect/qcom/sm8250.c2
-rw-r--r--drivers/interconnect/qcom/sm8350.c2
-rw-r--r--drivers/interconnect/qcom/sm8450.c2
-rw-r--r--drivers/interconnect/qcom/sm8550.c2
-rw-r--r--drivers/interconnect/qcom/sm8650.c2
-rw-r--r--drivers/interconnect/qcom/sm8750.c1705
-rw-r--r--drivers/interconnect/qcom/smd-rpm.c2
-rw-r--r--drivers/interconnect/qcom/x1e80100.c2
-rw-r--r--drivers/interconnect/samsung/exynos.c2
-rw-r--r--drivers/iommu/Kconfig22
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h20
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h62
-rw-r--r--drivers/iommu/amd/init.c314
-rw-r--r--drivers/iommu/amd/io_pgtable.c11
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c3
-rw-r--r--drivers/iommu/amd/iommu.c941
-rw-r--r--drivers/iommu/amd/pasid.c9
-rw-r--r--drivers/iommu/apple-dart.c2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/Makefile1
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c395
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c20
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c458
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h125
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c13
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-impl.c9
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c2
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c121
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.h3
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c50
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h2
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c4
-rw-r--r--drivers/iommu/hyperv-iommu.c4
-rw-r--r--drivers/iommu/intel/Kconfig2
-rw-r--r--drivers/iommu/intel/Makefile4
-rw-r--r--drivers/iommu/intel/cache.c45
-rw-r--r--drivers/iommu/intel/cap_audit.c217
-rw-r--r--drivers/iommu/intel/cap_audit.h131
-rw-r--r--drivers/iommu/intel/dmar.c15
-rw-r--r--drivers/iommu/intel/iommu.c603
-rw-r--r--drivers/iommu/intel/iommu.h62
-rw-r--r--drivers/iommu/intel/irq_remapping.c13
-rw-r--r--drivers/iommu/intel/nested.c64
-rw-r--r--drivers/iommu/intel/pasid.c432
-rw-r--r--drivers/iommu/intel/pasid.h28
-rw-r--r--drivers/iommu/intel/prq.c396
-rw-r--r--drivers/iommu/intel/svm.c433
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c149
-rw-r--r--drivers/iommu/io-pgtable-arm.c368
-rw-r--r--drivers/iommu/iommu-sysfs.c2
-rw-r--r--drivers/iommu/iommu.c310
-rw-r--r--drivers/iommu/iommufd/Kconfig4
-rw-r--r--drivers/iommu/iommufd/Makefile6
-rw-r--r--drivers/iommu/iommufd/device.c32
-rw-r--r--drivers/iommu/iommufd/driver.c53
-rw-r--r--drivers/iommu/iommufd/fault.c55
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c136
-rw-r--r--drivers/iommu/iommufd/io_pagetable.c105
-rw-r--r--drivers/iommu/iommufd/io_pagetable.h26
-rw-r--r--drivers/iommu/iommufd/ioas.c259
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h87
-rw-r--r--drivers/iommu/iommufd/iommufd_test.h32
-rw-r--r--drivers/iommu/iommufd/iova_bitmap.c10
-rw-r--r--drivers/iommu/iommufd/main.c97
-rw-r--r--drivers/iommu/iommufd/pages.c319
-rw-r--r--drivers/iommu/iommufd/selftest.c388
-rw-r--r--drivers/iommu/iommufd/vfio_compat.c13
-rw-r--r--drivers/iommu/iommufd/viommu.c157
-rw-r--r--drivers/iommu/iova.c8
-rw-r--r--drivers/iommu/ipmmu-vmsa.c2
-rw-r--r--drivers/iommu/msm_iommu.c51
-rw-r--r--drivers/iommu/mtk_iommu.c13
-rw-r--r--drivers/iommu/mtk_iommu_v1.c5
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/iommu/omap-iommu.c28
-rw-r--r--drivers/iommu/riscv/Kconfig20
-rw-r--r--drivers/iommu/riscv/Makefile3
-rw-r--r--drivers/iommu/riscv/iommu-bits.h784
-rw-r--r--drivers/iommu/riscv/iommu-pci.c128
-rw-r--r--drivers/iommu/riscv/iommu-platform.c164
-rw-r--r--drivers/iommu/riscv/iommu.c1669
-rw-r--r--drivers/iommu/riscv/iommu.h89
-rw-r--r--drivers/iommu/rockchip-iommu.c3
-rw-r--r--drivers/iommu/s390-iommu.c73
-rw-r--r--drivers/iommu/sprd-iommu.c2
-rw-r--r--drivers/irqchip/Kconfig36
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-apple-aic.c3
-rw-r--r--drivers/irqchip/irq-aspeed-intc.c139
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c9
-rw-r--r--drivers/irqchip/irq-bcm2836.c3
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c28
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c221
-rw-r--r--drivers/irqchip/irq-gic-v3.c28
-rw-r--r--drivers/irqchip/irq-gic.c4
-rw-r--r--drivers/irqchip/irq-imgpdc.c2
-rw-r--r--drivers/irqchip/irq-imx-intmux.c2
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c2
-rw-r--r--drivers/irqchip/irq-keystone.c13
-rw-r--r--drivers/irqchip/irq-loongarch-avec.c16
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c108
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c2
-rw-r--r--drivers/irqchip/irq-madera.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c269
-rw-r--r--drivers/irqchip/irq-mscc-ocelot.c10
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c3
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c4
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c2
-rw-r--r--drivers/irqchip/irq-partition-percpu.c2
-rw-r--r--drivers/irqchip/irq-pruss-intc.c2
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c2
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c2
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c2
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c16
-rw-r--r--drivers/irqchip/irq-renesas-rzv2h.c513
-rw-r--r--drivers/irqchip/irq-riscv-aplic-main.c3
-rw-r--r--drivers/irqchip/irq-riscv-aplic-msi.c3
-rw-r--r--drivers/irqchip/irq-riscv-imsic-early.c2
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c2
-rw-r--r--drivers/irqchip/irq-riscv-intc.c19
-rw-r--r--drivers/irqchip/irq-sifive-plic.c38
-rw-r--r--drivers/irqchip/irq-stm32mp-exti.c3
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c3
-rw-r--r--drivers/irqchip/irq-thead-c900-aclint-sswi.c176
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c1
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c1
-rw-r--r--drivers/irqchip/irq-ts4800.c4
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c2
-rw-r--r--drivers/irqchip/irqchip.c4
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c16
-rw-r--r--drivers/isdn/mISDN/core.c14
-rw-r--r--drivers/isdn/mISDN/core.h1
-rw-r--r--drivers/leds/Kconfig44
-rw-r--r--drivers/leds/Makefile4
-rw-r--r--drivers/leds/blink/leds-bcm63138.c29
-rw-r--r--drivers/leds/blink/leds-lgm-sso.c2
-rw-r--r--drivers/leds/flash/leds-aat1290.c2
-rw-r--r--drivers/leds/flash/leds-ktd2692.c5
-rw-r--r--drivers/leds/flash/leds-max77693.c2
-rw-r--r--drivers/leds/flash/leds-mt6360.c5
-rw-r--r--drivers/leds/flash/leds-mt6370-flash.c11
-rw-r--r--drivers/leds/flash/leds-qcom-flash.c6
-rw-r--r--drivers/leds/flash/leds-rt8515.c2
-rw-r--r--drivers/leds/flash/leds-sgm3140.c2
-rw-r--r--drivers/leds/led-class-flash.c1
-rw-r--r--drivers/leds/led-class-multicolor.c2
-rw-r--r--drivers/leds/led-class.c32
-rw-r--r--drivers/leds/led-core.c6
-rw-r--r--drivers/leds/led-triggers.c4
-rw-r--r--drivers/leds/leds-88pm860x.c2
-rw-r--r--drivers/leds/leds-adp5520.c2
-rw-r--r--drivers/leds/leds-aw200xx.c7
-rw-r--r--drivers/leds/leds-bcm6328.c4
-rw-r--r--drivers/leds/leds-cht-wcove.c8
-rw-r--r--drivers/leds/leds-clevo-mail.c2
-rw-r--r--drivers/leds/leds-cr0014114.c4
-rw-r--r--drivers/leds/leds-da903x.c2
-rw-r--r--drivers/leds/leds-da9052.c2
-rw-r--r--drivers/leds/leds-el15203000.c14
-rw-r--r--drivers/leds/leds-expresswire.c12
-rw-r--r--drivers/leds/leds-gpio-register.c2
-rw-r--r--drivers/leds/leds-gpio.c20
-rw-r--r--drivers/leds/leds-lm3532.c18
-rw-r--r--drivers/leds/leds-lm3533.c2
-rw-r--r--drivers/leds/leds-lm3697.c18
-rw-r--r--drivers/leds/leds-lp50xx.c23
-rw-r--r--drivers/leds/leds-lp5562.c25
-rw-r--r--drivers/leds/leds-lp55xx-common.c3
-rw-r--r--drivers/leds/leds-lp8860.c2
-rw-r--r--drivers/leds/leds-lp8864.c296
-rw-r--r--drivers/leds/leds-max5970.c5
-rw-r--r--drivers/leds/leds-max77650.c18
-rw-r--r--drivers/leds/leds-mc13783.c2
-rw-r--r--drivers/leds/leds-mt6323.c2
-rw-r--r--drivers/leds/leds-netxbig.c1
-rw-r--r--drivers/leds/leds-ns2.c7
-rw-r--r--drivers/leds/leds-pca963x.c11
-rw-r--r--drivers/leds/leds-powernv.c4
-rw-r--r--drivers/leds/leds-pwm.c33
-rw-r--r--drivers/leds/leds-qnap-mcu.c227
-rw-r--r--drivers/leds/leds-rb532.c2
-rw-r--r--drivers/leds/leds-regulator.c2
-rw-r--r--drivers/leds/leds-sc27xx-bltc.c2
-rw-r--r--drivers/leds/leds-ss4200.c2
-rw-r--r--drivers/leds/leds-st1202.c416
-rw-r--r--drivers/leds/leds-sun50i-a100.c29
-rw-r--r--drivers/leds/leds-sunfire.c4
-rw-r--r--drivers/leds/leds-tca6507.c7
-rw-r--r--drivers/leds/leds-turris-omnia.c337
-rw-r--r--drivers/leds/leds-upboard.c126
-rw-r--r--drivers/leds/leds-wm831x-status.c2
-rw-r--r--drivers/leds/leds-wm8350.c2
-rw-r--r--drivers/leds/leds.h4
-rw-r--r--drivers/leds/rgb/leds-group-multicolor.c2
-rw-r--r--drivers/leds/rgb/leds-ktd202x.c8
-rw-r--r--drivers/leds/rgb/leds-mt6370-rgb.c39
-rw-r--r--drivers/leds/rgb/leds-pwm-multicolor.c8
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio-apollolake.c2
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c2
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio-f7188x.c2
-rw-r--r--drivers/leds/trigger/ledtrig-activity.c2
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c2
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/macintosh/mac_hid.c2
-rw-r--r--drivers/macintosh/smu.c6
-rw-r--r--drivers/macintosh/therm_windtunnel.c2
-rw-r--r--drivers/macintosh/via-pmu-led.c19
-rw-r--r--drivers/macintosh/windfarm_pm112.c2
-rw-r--r--drivers/macintosh/windfarm_pm121.c2
-rw-r--r--drivers/macintosh/windfarm_pm72.c2
-rw-r--r--drivers/macintosh/windfarm_pm81.c2
-rw-r--r--drivers/macintosh/windfarm_pm91.c2
-rw-r--r--drivers/macintosh/windfarm_rm31.c2
-rw-r--r--drivers/mailbox/Kconfig37
-rw-r--r--drivers/mailbox/Makefile6
-rw-r--r--drivers/mailbox/arm_mhuv2.c8
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c2
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c2
-rw-r--r--drivers/mailbox/exynos-mailbox.c157
-rw-r--r--drivers/mailbox/imx-mailbox.c6
-rw-r--r--drivers/mailbox/mailbox-mchp-ipc-sbi.c504
-rw-r--r--drivers/mailbox/mailbox-mpfs.c81
-rw-r--r--drivers/mailbox/mailbox-test.c4
-rw-r--r--drivers/mailbox/mailbox-th1520.c599
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c14
-rw-r--r--drivers/mailbox/omap-mailbox.c1
-rw-r--r--drivers/mailbox/pcc.c61
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c3
-rw-r--r--drivers/mailbox/qcom-cpucp-mbox.c2
-rw-r--r--drivers/mailbox/qcom-ipcc.c18
-rw-r--r--drivers/mailbox/stm32-ipcc.c2
-rw-r--r--drivers/mailbox/sun6i-msgbox.c4
-rw-r--r--drivers/mailbox/tegra-hsp.c8
-rw-r--r--drivers/mailbox/ti-msgmgr.c2
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c8
-rw-r--r--drivers/mcb/mcb-core.c28
-rw-r--r--drivers/mcb/mcb-lpc.c4
-rw-r--r--drivers/mcb/mcb-parse.c2
-rw-r--r--drivers/mcb/mcb-pci.c2
-rw-r--r--drivers/md/Kconfig13
-rw-r--r--drivers/md/Makefile2
-rw-r--r--drivers/md/bcache/Kconfig1
-rw-r--r--drivers/md/bcache/alloc.c11
-rw-r--r--drivers/md/bcache/bset.c14
-rw-r--r--drivers/md/bcache/extents.c10
-rw-r--r--drivers/md/bcache/movinggc.c12
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/bcache/writeback.c2
-rw-r--r--drivers/md/dm-bio-prison-v1.c35
-rw-r--r--drivers/md/dm-bio-prison-v1.h24
-rw-r--r--drivers/md/dm-bufio.c37
-rw-r--r--drivers/md/dm-cache-background-tracker.c31
-rw-r--r--drivers/md/dm-cache-background-tracker.h9
-rw-r--r--drivers/md/dm-cache-metadata.c33
-rw-r--r--drivers/md/dm-cache-metadata.h3
-rw-r--r--drivers/md/dm-cache-target.c88
-rw-r--r--drivers/md/dm-clone-target.c4
-rw-r--r--drivers/md/dm-crypt.c42
-rw-r--r--drivers/md/dm-ebs-target.c2
-rw-r--r--drivers/md/dm-io.c1
-rw-r--r--drivers/md/dm-ioctl.c4
-rw-r--r--drivers/md/dm-linear.c5
-rw-r--r--drivers/md/dm-ps-io-affinity.c2
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-raid1.c5
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm-stripe.c5
-rw-r--r--drivers/md/dm-table.c34
-rw-r--r--drivers/md/dm-thin.c8
-rw-r--r--drivers/md/dm-unstripe.c4
-rw-r--r--drivers/md/dm-vdo/Kconfig1
-rw-r--r--drivers/md/dm-vdo/block-map.c2
-rw-r--r--drivers/md/dm-vdo/data-vio.c36
-rw-r--r--drivers/md/dm-vdo/data-vio.h5
-rw-r--r--drivers/md/dm-vdo/dedupe.c9
-rw-r--r--drivers/md/dm-vdo/encodings.c2
-rw-r--r--drivers/md/dm-vdo/indexer/index-layout.c26
-rw-r--r--drivers/md/dm-vdo/indexer/indexer.h4
-rw-r--r--drivers/md/dm-vdo/int-map.c28
-rw-r--r--drivers/md/dm-vdo/io-submitter.c2
-rw-r--r--drivers/md/dm-vdo/murmurhash3.c7
-rw-r--r--drivers/md/dm-vdo/packer.c3
-rw-r--r--drivers/md/dm-vdo/physical-zone.c2
-rw-r--r--drivers/md/dm-vdo/recovery-journal.c2
-rw-r--r--drivers/md/dm-vdo/repair.c2
-rw-r--r--drivers/md/dm-vdo/slab-depot.c19
-rw-r--r--drivers/md/dm-vdo/vdo.c4
-rw-r--r--drivers/md/dm-vdo/vio.c1
-rw-r--r--drivers/md/dm-verity-fec.c61
-rw-r--r--drivers/md/dm-verity-target.c19
-rw-r--r--drivers/md/dm-verity.h2
-rw-r--r--drivers/md/dm-zone.c4
-rw-r--r--drivers/md/dm-zoned-metadata.c50
-rw-r--r--drivers/md/dm-zoned-reclaim.c6
-rw-r--r--drivers/md/dm-zoned.h2
-rw-r--r--drivers/md/dm.c96
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/md/md-autodetect.c8
-rw-r--r--drivers/md/md-bitmap.c122
-rw-r--r--drivers/md/md-bitmap.h7
-rw-r--r--drivers/md/md-linear.c352
-rw-r--r--drivers/md/md.c77
-rw-r--r--drivers/md/md.h29
-rw-r--r--drivers/md/persistent-data/dm-array.c19
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c2
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c54
-rw-r--r--drivers/md/raid0.c13
-rw-r--r--drivers/md/raid1.c162
-rw-r--r--drivers/md/raid1.h1
-rw-r--r--drivers/md/raid10.c140
-rw-r--r--drivers/md/raid10.h1
-rw-r--r--drivers/md/raid5-cache.c20
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c128
-rw-r--r--drivers/md/raid5.h6
-rw-r--r--drivers/media/cec/core/cec-adap.c5
-rw-r--r--drivers/media/cec/core/cec-core.c7
-rw-r--r--drivers/media/cec/core/cec-pin-error-inj.c3
-rw-r--r--drivers/media/cec/core/cec-pin.c3
-rw-r--r--drivers/media/cec/platform/Kconfig2
-rw-r--r--drivers/media/cec/platform/cec-gpio/cec-gpio.c11
-rw-r--r--drivers/media/cec/platform/cros-ec/cros-ec-cec.c2
-rw-r--r--drivers/media/cec/platform/meson/ao-cec-g12a.c2
-rw-r--r--drivers/media/cec/platform/meson/ao-cec.c2
-rw-r--r--drivers/media/cec/platform/s5p/s5p_cec.c2
-rw-r--r--drivers/media/cec/platform/seco/seco-cec.c2
-rw-r--r--drivers/media/cec/platform/sti/stih-cec.c2
-rw-r--r--drivers/media/cec/platform/stm32/stm32-cec.c2
-rw-r--r--drivers/media/cec/platform/tegra/tegra_cec.c2
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c6
-rw-r--r--drivers/media/cec/usb/pulse8/pulse8-cec.c2
-rw-r--r--drivers/media/common/b2c2/flexcop-common.h4
-rw-r--r--drivers/media/common/b2c2/flexcop-misc.c13
-rw-r--r--drivers/media/common/saa7146/saa7146_vbi.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_video.c2
-rw-r--r--drivers/media/common/siano/smsdvb-debugfs.c9
-rw-r--r--drivers/media/common/uvc.c8
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c3
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c79
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c2
-rw-r--r--drivers/media/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c4
-rw-r--r--drivers/media/dvb-core/dvb_vb2.c8
-rw-r--r--drivers/media/dvb-core/dvbdev.c16
-rw-r--r--drivers/media/dvb-frontends/bcm3510.c2
-rw-r--r--drivers/media/dvb-frontends/cx24116.c7
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c8
-rw-r--r--drivers/media/dvb-frontends/dib0090.c4
-rw-r--r--drivers/media/dvb-frontends/dib3000mb.c4
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c4
-rw-r--r--drivers/media/dvb-frontends/stb0899_algo.c2
-rw-r--r--drivers/media/dvb-frontends/stv6111.c2
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.c2
-rw-r--r--drivers/media/dvb-frontends/ts2020.c8
-rw-r--r--drivers/media/dvb-frontends/zd1301_demod.c2
-rw-r--r--drivers/media/dvb-frontends/zl10036.c2
-rw-r--r--drivers/media/i2c/adv7180.c3
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c91
-rw-r--r--drivers/media/i2c/adv7604.c123
-rw-r--r--drivers/media/i2c/adv7842.c135
-rw-r--r--drivers/media/i2c/alvium-csi2.c5
-rw-r--r--drivers/media/i2c/ar0521.c4
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c18
-rw-r--r--drivers/media/i2c/ccs/ccs-data.c15
-rw-r--r--drivers/media/i2c/ds90ub913.c28
-rw-r--r--drivers/media/i2c/ds90ub953.c63
-rw-r--r--drivers/media/i2c/ds90ub960.c197
-rw-r--r--drivers/media/i2c/dw9768.c15
-rw-r--r--drivers/media/i2c/gc0308.c4
-rw-r--r--drivers/media/i2c/gc05a2.c10
-rw-r--r--drivers/media/i2c/gc08a3.c10
-rw-r--r--drivers/media/i2c/gc2145.c41
-rw-r--r--drivers/media/i2c/hi556.c2
-rw-r--r--drivers/media/i2c/imx208.c2
-rw-r--r--drivers/media/i2c/imx219.c10
-rw-r--r--drivers/media/i2c/imx283.c10
-rw-r--r--drivers/media/i2c/imx290.c121
-rw-r--r--drivers/media/i2c/imx296.c2
-rw-r--r--drivers/media/i2c/imx412.c42
-rw-r--r--drivers/media/i2c/imx415.c3
-rw-r--r--drivers/media/i2c/max96717.c6
-rw-r--r--drivers/media/i2c/mt9p031.c96
-rw-r--r--drivers/media/i2c/ov01a10.c6
-rw-r--r--drivers/media/i2c/ov08x40.c181
-rw-r--r--drivers/media/i2c/ov2740.c65
-rw-r--r--drivers/media/i2c/ov5640.c3
-rw-r--r--drivers/media/i2c/ov5645.c278
-rw-r--r--drivers/media/i2c/ov5670.c2
-rw-r--r--drivers/media/i2c/ov5675.c4
-rw-r--r--drivers/media/i2c/ov64a40.c10
-rw-r--r--drivers/media/i2c/ov772x.c2
-rw-r--r--drivers/media/i2c/ov7740.c2
-rw-r--r--drivers/media/i2c/ov8856.c2
-rw-r--r--drivers/media/i2c/ov8858.c9
-rw-r--r--drivers/media/i2c/ov9282.c2
-rw-r--r--drivers/media/i2c/ov9650.c2
-rw-r--r--drivers/media/i2c/ov9734.c2
-rw-r--r--drivers/media/i2c/st-mipid02.c117
-rw-r--r--drivers/media/i2c/tc358743.c40
-rw-r--r--drivers/media/i2c/thp7312.c5
-rw-r--r--drivers/media/i2c/ths7303.c2
-rw-r--r--drivers/media/i2c/vgxy61.c2
-rw-r--r--drivers/media/i2c/video-i2c.c4
-rw-r--r--drivers/media/mc/mc-entity.c14
-rw-r--r--drivers/media/mc/mc-request.c20
-rw-r--r--drivers/media/pci/b2c2/flexcop-dma.c17
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-vbi.c2
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c2
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c2
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.c15
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.h1
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-vbi.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c2
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c2
-rw-r--r--drivers/media/pci/cx88/cx88-vbi.c2
-rw-r--r--drivers/media/pci/cx88/cx88-video.c2
-rw-r--r--drivers/media/pci/dt3155/dt3155.c2
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c35
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c4
-rw-r--r--drivers/media/pci/intel/ipu6/Kconfig10
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-bus.c6
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-buttress.c85
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-buttress.h6
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-cpd.c30
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-dma.c208
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-dma.h34
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-fw-com.c40
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-dwc-phy.c4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.c66
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.h1
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.c6
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.c30
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys.h2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-mmu.c314
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-mmu.h4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6.c10
-rw-r--r--drivers/media/pci/intel/ivsc/mei_csi.c2
-rw-r--r--drivers/media/pci/mantis/mantis_core.h43
-rw-r--r--drivers/media/pci/mgb4/mgb4_cmt.c2
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.c16
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.h5
-rw-r--r--drivers/media/pci/mgb4/mgb4_sysfs_in.c12
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.c77
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.h5
-rw-r--r--drivers/media/pci/mgb4/mgb4_vout.c41
-rw-r--r--drivers/media/pci/mgb4/mgb4_vout.h1
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_spi.c6
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-ts.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c4
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c2
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c2
-rw-r--r--drivers/media/pci/tw68/tw68-video.c2
-rw-r--r--drivers/media/pci/tw686x/tw686x-video.c2
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c2
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c8
-rw-r--r--drivers/media/platform/amlogic/meson-ge2d/ge2d.c4
-rw-r--r--drivers/media/platform/amphion/venc.c12
-rw-r--r--drivers/media/platform/amphion/vpu_core.c2
-rw-r--r--drivers/media/platform/amphion/vpu_drv.c4
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.c4
-rw-r--r--drivers/media/platform/aspeed/aspeed-video.c4
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c4
-rw-r--r--drivers/media/platform/broadcom/bcm2835-unicam.c46
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c2
-rw-r--r--drivers/media/platform/cadence/cdns-csi2tx.c2
-rw-r--r--drivers/media/platform/chips-media/coda/coda-common.c4
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.c37
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.h5
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-hw.c30
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c323
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c315
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.c54
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.h5
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuapi.c33
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuapi.h1
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuconfig.h27
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5.h3
-rw-r--r--drivers/media/platform/imagination/e5010-jpeg-enc.c4
-rw-r--r--drivers/media/platform/intel/pxa_camera.c4
-rw-r--r--drivers/media/platform/m2m-deinterlace.c4
-rw-r--r--drivers/media/platform/marvell/mcam-core.c11
-rw-r--r--drivers/media/platform/marvell/mmp-driver.c23
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c16
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c11
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_core.c2
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c2
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c77
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h1
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c537
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h29
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c4
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h1
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c3
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c2
-rw-r--r--drivers/media/platform/mediatek/vpu/mtk_vpu.c2
-rw-r--r--drivers/media/platform/microchip/microchip-csi2dc.c2
-rw-r--r--drivers/media/platform/microchip/microchip-isc-base.c2
-rw-r--r--drivers/media/platform/microchip/microchip-sama5d2-isc.c2
-rw-r--r--drivers/media/platform/microchip/microchip-sama7g5-isc.c2
-rw-r--r--drivers/media/platform/nuvoton/npcm-video.c8
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/dmabuf-cache.c2
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/iommu.c7
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/v4l2.c14
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/vde.c2
-rw-r--r--drivers/media/platform/nxp/dw100/dw100.c6
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c24
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c2
-rw-r--r--drivers/media/platform/nxp/imx-pxp.c4
-rw-r--r--drivers/media/platform/nxp/imx7-media-csi.c4
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c16
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h1
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c2
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c5
-rw-r--r--drivers/media/platform/nxp/imx8mq-mipi-csi2.c2
-rw-r--r--drivers/media/platform/nxp/mx2_emmaprp.c4
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c13
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c41
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.h9
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c5
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c10
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c9
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c2
-rw-r--r--drivers/media/platform/qcom/camss/camss.c602
-rw-r--r--drivers/media/platform/qcom/camss/camss.h12
-rw-r--r--drivers/media/platform/qcom/venus/Kconfig1
-rw-r--r--drivers/media/platform/qcom/venus/core.c138
-rw-r--r--drivers/media/platform/qcom/venus/core.h16
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c23
-rw-r--r--drivers/media/platform/qcom/venus/hfi.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c11
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c44
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c33
-rw-r--r--drivers/media/platform/qcom/venus/vdec.h1
-rw-r--r--drivers/media/platform/qcom/venus/vdec_ctrls.c5
-rw-r--r--drivers/media/platform/qcom/venus/venc.c105
-rw-r--r--drivers/media/platform/qcom/venus/venc.h1
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c131
-rw-r--r--drivers/media/platform/raspberrypi/Kconfig1
-rw-r--r--drivers/media/platform/raspberrypi/Makefile1
-rw-r--r--drivers/media/platform/raspberrypi/pisp_be/pisp_be.c2
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/Kconfig15
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/Makefile6
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe-fmts.h332
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe-trace.h202
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe.c2509
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe.h43
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/csi2.c586
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/csi2.h89
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/dphy.c181
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/dphy.h27
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/pisp-fe.c605
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/pisp-fe.h53
-rw-r--r--drivers/media/platform/renesas/rcar-csi2.c599
-rw-r--r--drivers/media/platform/renesas/rcar-fcp.c2
-rw-r--r--drivers/media/platform/renesas/rcar-isp.c6
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c3
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-dma.c32
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c43
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-vin.h4
-rw-r--r--drivers/media/platform/renesas/rcar_drif.c6
-rw-r--r--drivers/media/platform/renesas/rcar_fdp1.c4
-rw-r--r--drivers/media/platform/renesas/rcar_jpu.c4
-rw-r--r--drivers/media/platform/renesas/renesas-ceu.c4
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c18
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru-regs.h80
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h34
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c41
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c85
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c307
-rw-r--r--drivers/media/platform/renesas/sh_vou.c4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drv.c2
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_histo.c2
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c2
-rw-r--r--drivers/media/platform/rockchip/rga/rga-buf.c4
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.c2
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.h2
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c6
-rw-r--r--drivers/media/platform/rockchip/rga/rga.h2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c6
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c5
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-params.c2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c2
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-core.c2
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-capture.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-errno.c131
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-errno.h1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-i2c.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-param.c9
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is-param.h1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite.c4
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-m2m.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.h5
-rw-r--r--drivers/media/platform/samsung/exynos4-is/mipi-csis.c12
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-capture.c2
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-core.c15
-rw-r--r--drivers/media/platform/samsung/s5p-g2d/g2d.c4
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c21
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos3250.c5
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos3250.h1
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos4.c19
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos4.h4
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c9
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c2
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c4
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c2
-rw-r--r--drivers/media/platform/st/sti/delta/delta-v4l2.c6
-rw-r--r--drivers/media/platform/st/sti/hva/hva-v4l2.c4
-rw-r--r--drivers/media/platform/st/stm32/Kconfig14
-rw-r--r--drivers/media/platform/st/stm32/Makefile1
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d.c4
-rw-r--r--drivers/media/platform/st/stm32/stm32-csi.c1137
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmi.c4
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/Makefile2
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c134
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-byteproc.c119
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-common.h4
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c124
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-input.c540
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-parallel.c440
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c2
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c2
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c2
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c2
-rw-r--r--drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c2
-rw-r--r--drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c2
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c4
-rw-r--r--drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c4
-rw-r--r--drivers/media/platform/ti/am437x/am437x-vpfe.c4
-rw-r--r--drivers/media/platform/ti/cal/cal-video.c2
-rw-r--r--drivers/media/platform/ti/cal/cal.c2
-rw-r--r--drivers/media/platform/ti/davinci/vpif.c2
-rw-r--r--drivers/media/platform/ti/davinci/vpif_capture.c4
-rw-r--r--drivers/media/platform/ti/davinci/vpif_display.c4
-rw-r--r--drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c10
-rw-r--r--drivers/media/platform/ti/omap/omap_vout.c4
-rw-r--r--drivers/media/platform/ti/omap/omap_voutdef.h2
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.c18
-rw-r--r--drivers/media/platform/ti/vpe/vpe.c4
-rw-r--r--drivers/media/platform/verisilicon/hantro.h9
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c2
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2.c2
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c32
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c41
-rw-r--r--drivers/media/platform/verisilicon/imx8m_vpu_hw.c10
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c12
-rw-r--r--drivers/media/platform/via/via-camera.c4
-rw-r--r--drivers/media/platform/video-mux.c8
-rw-r--r--drivers/media/platform/xilinx/xilinx-csi2rxss.c2
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c2
-rw-r--r--drivers/media/platform/xilinx/xilinx-tpg.c16
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.c2
-rw-r--r--drivers/media/radio/Kconfig4
-rw-r--r--drivers/media/radio/Makefile1
-rw-r--r--drivers/media/radio/radio-aimslab.c2
-rw-r--r--drivers/media/radio/radio-gemtek.c2
-rw-r--r--drivers/media/radio/radio-isa.c2
-rw-r--r--drivers/media/radio/radio-isa.h2
-rw-r--r--drivers/media/radio/radio-miropcm20.c2
-rw-r--r--drivers/media/radio/radio-rtrack2.c2
-rw-r--r--drivers/media/radio/radio-si476x.c2
-rw-r--r--drivers/media/radio/radio-terratec.c2
-rw-r--r--drivers/media/radio/radio-timb.c2
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/radio-zoltrix.c2
-rw-r--r--drivers/media/radio/si4713/radio-platform-si4713.c2
-rw-r--r--drivers/media/radio/wl128x/Kconfig15
-rw-r--r--drivers/media/radio/wl128x/Makefile7
-rw-r--r--drivers/media/radio/wl128x/fmdrv.h229
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c1675
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.h389
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.c820
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.h45
-rw-r--r--drivers/media/radio/wl128x/fmdrv_tx.c413
-rw-r--r--drivers/media/radio/wl128x/fmdrv_tx.h24
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c604
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.h20
-rw-r--r--drivers/media/rc/Kconfig1
-rw-r--r--drivers/media/rc/ati_remote.c6
-rw-r--r--drivers/media/rc/gpio-ir-recv.c2
-rw-r--r--drivers/media/rc/gpio-ir-tx.c4
-rw-r--r--drivers/media/rc/iguanair.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-core.c2
-rw-r--r--drivers/media/rc/imon_raw.c2
-rw-r--r--drivers/media/rc/ir-hix5hd2.c2
-rw-r--r--drivers/media/rc/lirc_dev.c13
-rw-r--r--drivers/media/rc/mceusb.c5
-rw-r--r--drivers/media/rc/meson-ir.c2
-rw-r--r--drivers/media/rc/mtk-cir.c2
-rw-r--r--drivers/media/rc/st_rc.c2
-rw-r--r--drivers/media/rc/sunxi-cir.c2
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c29
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_bridge.c10
-rw-r--r--drivers/media/test-drivers/vim2m.c4
-rw-r--r--drivers/media/test-drivers/vimc/vimc-capture.c6
-rw-r--r--drivers/media/test-drivers/vimc/vimc-core.c2
-rw-r--r--drivers/media/test-drivers/visl/visl-core.c2
-rw-r--r--drivers/media/test-drivers/visl/visl-video.c22
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c4
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.h4
-rw-r--r--drivers/media/test-drivers/vivid/vivid-ctrls.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-meta-cap.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-meta-out.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-touch-cap.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-out.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c20
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.c2
-rw-r--r--drivers/media/tuners/fc0013.c64
-rw-r--r--drivers/media/tuners/fc0013.h11
-rw-r--r--drivers/media/tuners/it913x.c2
-rw-r--r--drivers/media/tuners/mt2063.c2
-rw-r--r--drivers/media/tuners/mxl301rf.c2
-rw-r--r--drivers/media/tuners/mxl5005s.c2
-rw-r--r--drivers/media/tuners/tda18271-fe.c4
-rw-r--r--drivers/media/tuners/tea5761.c4
-rw-r--r--drivers/media/tuners/tea5767.c4
-rw-r--r--drivers/media/usb/airspy/airspy.c4
-rw-r--r--drivers/media/usb/au0828/au0828-vbi.c2
-rw-r--r--drivers/media/usb/au0828/au0828-video.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c60
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h5
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c18
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c17
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c12
-rw-r--r--drivers/media/usb/dvb-usb/cxusb-analog.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c2
-rw-r--r--drivers/media/usb/gspca/gspca.c2
-rw-r--r--drivers/media/usb/gspca/ov534.c2
-rw-r--r--drivers/media/usb/hackrf/hackrf.c6
-rw-r--r--drivers/media/usb/msi2500/msi2500.c8
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-io.c4
-rw-r--r--drivers/media/usb/pwc/pwc-if.c4
-rw-r--r--drivers/media/usb/s2255/s2255drv.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c2
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c88
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c374
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c13
-rw-r--r--drivers/media/usb/uvc/uvc_status.c71
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c186
-rw-r--r--drivers/media/usb/uvc/uvc_video.c59
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h25
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c17
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c199
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c43
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c27
-rw-r--r--drivers/memory/brcmstb_dpfe.c2
-rw-r--r--drivers/memory/brcmstb_memc.c2
-rw-r--r--drivers/memory/emif.c2
-rw-r--r--drivers/memory/fsl-corenet-cf.c2
-rw-r--r--drivers/memory/fsl_ifc.c2
-rw-r--r--drivers/memory/jz4780-nemc.c2
-rw-r--r--drivers/memory/mtk-smi.c4
-rw-r--r--drivers/memory/omap-gpmc.c35
-rw-r--r--drivers/memory/renesas-rpc-if.c2
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c2
-rw-r--r--drivers/memory/stm32-fmc2-ebi.c2
-rw-r--r--drivers/memory/tegra/tegra186-emc.c2
-rw-r--r--drivers/memory/tegra/tegra20-emc.c8
-rw-r--r--drivers/memory/tegra/tegra210-emc-core.c2
-rw-r--r--drivers/memory/ti-aemif.c192
-rw-r--r--drivers/memory/ti-emif-pm.c2
-rw-r--r--drivers/memstick/core/memstick.c50
-rw-r--r--drivers/memstick/core/ms_block.c11
-rw-r--r--drivers/memstick/core/mspro_block.c5
-rw-r--r--drivers/memstick/host/r592.c2
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c2
-rw-r--r--drivers/message/fusion/mptfc.c16
-rw-r--r--drivers/message/fusion/mptlan.h3
-rw-r--r--drivers/message/fusion/mptsas.c20
-rw-r--r--drivers/message/fusion/mptscsih.c10
-rw-r--r--drivers/message/fusion/mptscsih.h5
-rw-r--r--drivers/message/fusion/mptspi.c21
-rw-r--r--drivers/mfd/88pm886.c1
-rw-r--r--drivers/mfd/Kconfig39
-rw-r--r--drivers/mfd/Makefile5
-rw-r--r--drivers/mfd/ab8500-sysctrl.c2
-rw-r--r--drivers/mfd/atmel-flexcom.c2
-rw-r--r--drivers/mfd/atmel-smc.c4
-rw-r--r--drivers/mfd/axp20x-i2c.c1
-rw-r--r--drivers/mfd/axp20x.c63
-rw-r--r--drivers/mfd/cgbc-core.c420
-rw-r--r--drivers/mfd/cros_ec_dev.c27
-rw-r--r--drivers/mfd/cs40l50-core.c2
-rw-r--r--drivers/mfd/cs42l43-i2c.c10
-rw-r--r--drivers/mfd/cs42l43-sdw.c12
-rw-r--r--drivers/mfd/cs42l43.c108
-rw-r--r--drivers/mfd/cs42l43.h1
-rw-r--r--drivers/mfd/da9052-core.c1
-rw-r--r--drivers/mfd/da9052-spi.c2
-rw-r--r--drivers/mfd/exynos-lpass.c4
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c2
-rw-r--r--drivers/mfd/hi655x-pmic.c8
-rw-r--r--drivers/mfd/intel-lpss-acpi.c4
-rw-r--r--drivers/mfd/intel-lpss-pci.c2
-rw-r--r--drivers/mfd/intel-lpss.c4
-rw-r--r--drivers/mfd/intel-m10-bmc-core.c10
-rw-r--r--drivers/mfd/intel-m10-bmc-pmci.c2
-rw-r--r--drivers/mfd/intel-m10-bmc-spi.c2
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c177
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c2
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c2
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c9
-rw-r--r--drivers/mfd/ipaq-micro.c1
-rw-r--r--drivers/mfd/kempld-core.c2
-rw-r--r--drivers/mfd/lpc_ich.c3
-rw-r--r--drivers/mfd/mcp-sa11x0.c2
-rw-r--r--drivers/mfd/mt6397-core.c32
-rw-r--r--drivers/mfd/mt6397-irq.c23
-rw-r--r--drivers/mfd/mxs-lradc.c2
-rw-r--r--drivers/mfd/ocelot-core.c6
-rw-r--r--drivers/mfd/ocelot-spi.c4
-rw-r--r--drivers/mfd/omap-usb-host.c2
-rw-r--r--drivers/mfd/omap-usb-tll.c2
-rw-r--r--drivers/mfd/pcf50633-adc.c2
-rw-r--r--drivers/mfd/qcom-pm8xxx.c2
-rw-r--r--drivers/mfd/qnap-mcu.c338
-rw-r--r--drivers/mfd/rk8xx-core.c6
-rw-r--r--drivers/mfd/rohm-bd71828.c12
-rw-r--r--drivers/mfd/rohm-bd96801.c277
-rw-r--r--drivers/mfd/rt5033.c4
-rw-r--r--drivers/mfd/sec-core.c11
-rw-r--r--drivers/mfd/sm501.c2
-rw-r--r--drivers/mfd/stm32-timers.c2
-rw-r--r--drivers/mfd/stpmic1.c6
-rw-r--r--drivers/mfd/syscon.c94
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c2
-rw-r--r--drivers/mfd/tps65010.c8
-rw-r--r--drivers/mfd/tps65219.c15
-rw-r--r--drivers/mfd/tps65911-comparator.c2
-rw-r--r--drivers/mfd/tqmx86.c115
-rw-r--r--drivers/mfd/twl-core.c26
-rw-r--r--drivers/mfd/twl4030-audio.c2
-rw-r--r--drivers/mfd/upboard-fpga.c325
-rw-r--r--drivers/mfd/vexpress-sysreg.c1
-rw-r--r--drivers/mfd/wcd934x.c2
-rw-r--r--drivers/misc/Kconfig25
-rw-r--r--drivers/misc/Makefile5
-rw-r--r--drivers/misc/apds990x.c12
-rw-r--r--drivers/misc/atmel-ssc.c6
-rw-r--r--drivers/misc/c2port/core.c29
-rw-r--r--drivers/misc/cardreader/Kconfig3
-rw-r--r--drivers/misc/cardreader/alcor_pci.c2
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c21
-rw-r--r--drivers/misc/cxl/Kconfig6
-rw-r--r--drivers/misc/cxl/of.c4
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/sysfs.c8
-rw-r--r--drivers/misc/ds1682.c8
-rw-r--r--drivers/misc/eeprom/Kconfig4
-rw-r--r--drivers/misc/eeprom/at24.c4
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c15
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c2
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c6
-rw-r--r--drivers/misc/eeprom/max6875.c4
-rw-r--r--drivers/misc/fastrpc.c70
-rw-r--r--drivers/misc/hisi_hikey_usb.c2
-rw-r--r--drivers/misc/isl29020.c2
-rw-r--r--drivers/misc/keba/Kconfig13
-rw-r--r--drivers/misc/keba/Makefile1
-rw-r--r--drivers/misc/keba/cp500.c579
-rw-r--r--drivers/misc/keba/lan9252.c359
-rw-r--r--drivers/misc/lan966x_pci.c215
-rw-r--r--drivers/misc/lan966x_pci.dtso177
-rw-r--r--drivers/misc/lkdtm/bugs.c2
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c7
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c2
-rw-r--r--drivers/misc/mei/bus.c2
-rw-r--r--drivers/misc/mei/client.c4
-rw-r--r--drivers/misc/mei/platform-vsc.c6
-rw-r--r--drivers/misc/mei/vsc-fw-loader.c28
-rw-r--r--drivers/misc/mei/vsc-tp.c20
-rw-r--r--drivers/misc/misc_minor_kunit.c69
-rw-r--r--drivers/misc/ntsync.c1000
-rw-r--r--drivers/misc/ocxl/sysfs.c6
-rw-r--r--drivers/misc/open-dice.c2
-rw-r--r--drivers/misc/pch_phub.c8
-rw-r--r--drivers/misc/pci_endpoint_test.c352
-rw-r--r--drivers/misc/rpmb-core.c9
-rw-r--r--drivers/misc/sgi-gru/grukservices.c2
-rw-r--r--drivers/misc/sgi-gru/grumain.c4
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c4
-rw-r--r--drivers/misc/sram.c10
-rw-r--r--drivers/misc/ti-st/Kconfig19
-rw-r--r--drivers/misc/ti-st/Makefile7
-rw-r--r--drivers/misc/ti-st/st_core.c918
-rw-r--r--drivers/misc/ti-st/st_kim.c839
-rw-r--r--drivers/misc/ti-st/st_ll.c156
-rw-r--r--drivers/misc/tps6594-esm.c2
-rw-r--r--drivers/misc/tps6594-pfsm.c2
-rw-r--r--drivers/misc/vcpu_stall_detector.c2
-rw-r--r--drivers/misc/xilinx_sdfec.c2
-rw-r--r--drivers/misc/xilinx_tmr_inject.c2
-rw-r--r--drivers/mmc/core/Makefile2
-rw-r--r--drivers/mmc/core/block.c94
-rw-r--r--drivers/mmc/core/bus.c44
-rw-r--r--drivers/mmc/core/card.h10
-rw-r--r--drivers/mmc/core/core.c83
-rw-r--r--drivers/mmc/core/core.h17
-rw-r--r--drivers/mmc/core/host.c1
-rw-r--r--drivers/mmc/core/mmc_ops.c24
-rw-r--r--drivers/mmc/core/mmc_ops.h1
-rw-r--r--drivers/mmc/core/mmc_test.c6
-rw-r--r--drivers/mmc/core/pwrseq_emmc.c2
-rw-r--r--drivers/mmc/core/pwrseq_sd8787.c2
-rw-r--r--drivers/mmc/core/pwrseq_simple.c48
-rw-r--r--drivers/mmc/core/queue.c5
-rw-r--r--drivers/mmc/core/quirks.h9
-rw-r--r--drivers/mmc/core/regulator.c34
-rw-r--r--drivers/mmc/core/sd.c44
-rw-r--r--drivers/mmc/core/sd.h4
-rw-r--r--drivers/mmc/core/sd_ops.c24
-rw-r--r--drivers/mmc/core/sd_ops.h3
-rw-r--r--drivers/mmc/core/sd_uhs2.c1304
-rw-r--r--drivers/mmc/core/sdio.c4
-rw-r--r--drivers/mmc/host/Kconfig11
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/alcor.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c4
-rw-r--r--drivers/mmc/host/au1xmmc.c4
-rw-r--r--drivers/mmc/host/bcm2835.c53
-rw-r--r--drivers/mmc/host/cavium-octeon.c4
-rw-r--r--drivers/mmc/host/cb710-mmc.c2
-rw-r--r--drivers/mmc/host/cqhci-crypto.c38
-rw-r--r--drivers/mmc/host/cqhci.h8
-rw-r--r--drivers/mmc/host/davinci_mmc.c25
-rw-r--r--drivers/mmc/host/dw_mmc-bluefield.c2
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c2
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798cv200.c2
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798mv200.c10
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c2
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c2
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c2
-rw-r--r--drivers/mmc/host/dw_mmc-starfive.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/jz4740_mmc.c2
-rw-r--r--drivers/mmc/host/litex_mmc.c2
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c4
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-mmc.c2
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c2
-rw-r--r--drivers/mmc/host/mmc_spi.c9
-rw-r--r--drivers/mmc/host/mmci.h2
-rw-r--r--drivers/mmc/host/moxart-mmc.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c305
-rw-r--r--drivers/mmc/host/mvsdio.c73
-rw-r--r--drivers/mmc/host/mxcmmc.c10
-rw-r--r--drivers/mmc/host/mxs-mmc.c2
-rw-r--r--drivers/mmc/host/omap.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c2
-rw-r--r--drivers/mmc/host/owl-mmc.c2
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c2
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c4
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c5
-rw-r--r--drivers/mmc/host/sdhci-acpi.c22
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c2
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c2
-rw-r--r--drivers/mmc/host/sdhci-cadence.c2
-rw-r--r--drivers/mmc/host/sdhci-dove.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c34
-rw-r--r--drivers/mmc/host/sdhci-esdhc-mcf.c2
-rw-r--r--drivers/mmc/host/sdhci-iproc.c2
-rw-r--r--drivers/mmc/host/sdhci-milbeaut.c2
-rw-r--r--drivers/mmc/host/sdhci-msm.c159
-rw-r--r--drivers/mmc/host/sdhci-npcm.c2
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c20
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c4
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c2
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c10
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c2
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c2
-rw-r--r--drivers/mmc/host/sdhci-of-ma35d1.c2
-rw-r--r--drivers/mmc/host/sdhci-of-sparx5.c2
-rw-r--r--drivers/mmc/host/sdhci-omap.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c88
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c471
-rw-r--r--drivers/mmc/host/sdhci-pci.h4
-rw-r--r--drivers/mmc/host/sdhci-pic32.c2
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c2
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c2
-rw-r--r--drivers/mmc/host/sdhci-spear.c2
-rw-r--r--drivers/mmc/host/sdhci-sprd.c2
-rw-r--r--drivers/mmc/host/sdhci-st.c2
-rw-r--r--drivers/mmc/host/sdhci-tegra.c3
-rw-r--r--drivers/mmc/host/sdhci-uhs2.c1250
-rw-r--r--drivers/mmc/host/sdhci-uhs2.h188
-rw-r--r--drivers/mmc/host/sdhci-xenon.c2
-rw-r--r--drivers/mmc/host/sdhci.c281
-rw-r--r--drivers/mmc/host/sdhci.h75
-rw-r--r--drivers/mmc/host/sdhci_am654.c32
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c7
-rw-r--r--drivers/mmc/host/sunplus-mmc.c2
-rw-r--r--drivers/mmc/host/sunxi-mmc.c8
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c1
-rw-r--r--drivers/mmc/host/uniphier-sd.c2
-rw-r--r--drivers/mmc/host/usdhi6rol0.c2
-rw-r--r--drivers/mmc/host/wbsd.c2
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c4
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c2
-rw-r--r--drivers/mtd/devices/docg3.c2
-rw-r--r--drivers/mtd/devices/mchp48l640.c28
-rw-r--r--drivers/mtd/devices/phram.c15
-rw-r--r--drivers/mtd/devices/powernv_flash.c2
-rw-r--r--drivers/mtd/devices/spear_smi.c2
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c8
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c23
-rw-r--r--drivers/mtd/hyperbus/rpc-if.c9
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c2
-rw-r--r--drivers/mtd/maps/lantiq-flash.c2
-rw-r--r--drivers/mtd/maps/physmap-core.c2
-rw-r--r--drivers/mtd/maps/plat-ram.c2
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/mtd/maps/sa1100-flash.c2
-rw-r--r--drivers/mtd/maps/sun_uflash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c7
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/ecc-mxic.c8
-rw-r--r--drivers/mtd/nand/ecc-sw-bch.c2
-rw-r--r--drivers/mtd/nand/ecc-sw-hamming.c2
-rw-r--r--drivers/mtd/nand/onenand/generic.c2
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c1
-rw-r--r--drivers/mtd/nand/onenand/onenand_omap2.c2
-rw-r--r--drivers/mtd/nand/onenand/onenand_samsung.c2
-rw-r--r--drivers/mtd/nand/qpic_common.c759
-rw-r--r--drivers/mtd/nand/raw/Kconfig12
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c2
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c13
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.c12
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.h2
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c2
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/main.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcma_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcmbca_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c7
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmstb_nand.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/iproc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c4
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c2
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c197
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c2
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c2
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c2
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/gpio.c2
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c69
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c2
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c2
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c2
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c2
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c2
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c2
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c2
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c2
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/mxic_nand.c2
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c2
-rw-r--r--drivers/mtd/nand/raw/ndfc.c2
-rw-r--r--drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c1029
-rw-r--r--drivers/mtd/nand/raw/omap2.c18
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c2
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c2
-rw-r--r--drivers/mtd/nand/raw/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c4
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c2
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c1777
-rw-r--r--drivers/mtd/nand/raw/r852.c4
-rw-r--r--drivers/mtd/nand/raw/renesas-nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c2
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c2
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c2
-rw-r--r--drivers/mtd/nand/raw/sm_common.c4
-rw-r--r--drivers/mtd/nand/raw/socrates_nand.c2
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c2
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c2
-rw-r--r--drivers/mtd/nand/raw/technologic-nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c2
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c2
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c2
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c2
-rw-r--r--drivers/mtd/nand/spi/Makefile2
-rw-r--r--drivers/mtd/nand/spi/alliancememory.c4
-rw-r--r--drivers/mtd/nand/spi/ato.c4
-rw-r--r--drivers/mtd/nand/spi/core.c42
-rw-r--r--drivers/mtd/nand/spi/esmt.c4
-rw-r--r--drivers/mtd/nand/spi/foresee.c14
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c16
-rw-r--r--drivers/mtd/nand/spi/macronix.c4
-rw-r--r--drivers/mtd/nand/spi/micron.c8
-rw-r--r--drivers/mtd/nand/spi/paragon.c4
-rw-r--r--drivers/mtd/nand/spi/skyhigh.c147
-rw-r--r--drivers/mtd/nand/spi/toshiba.c4
-rw-r--r--drivers/mtd/nand/spi/winbond.c112
-rw-r--r--drivers/mtd/nand/spi/xtx.c4
-rw-r--r--drivers/mtd/spi-nor/atmel.c4
-rw-r--r--drivers/mtd/spi-nor/controllers/hisi-sfc.c2
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c2
-rw-r--r--drivers/mtd/spi-nor/core.c22
-rw-r--r--drivers/mtd/spi-nor/core.h7
-rw-r--r--drivers/mtd/spi-nor/macronix.c106
-rw-r--r--drivers/mtd/spi-nor/sfdp.c4
-rw-r--r--drivers/mtd/spi-nor/sfdp.h1
-rw-r--r--drivers/mtd/spi-nor/spansion.c11
-rw-r--r--drivers/mtd/spi-nor/sysfs.c10
-rw-r--r--drivers/mtd/spi-nor/winbond.c1
-rw-r--r--drivers/mtd/tests/oobtest.c2
-rw-r--r--drivers/mtd/tests/pagetest.c2
-rw-r--r--drivers/mtd/tests/subpagetest.c2
-rw-r--r--drivers/mtd/ubi/attach.c12
-rw-r--r--drivers/mtd/ubi/block.c2
-rw-r--r--drivers/mtd/ubi/build.c2
-rw-r--r--drivers/mtd/ubi/cdev.c70
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c19
-rw-r--r--drivers/mtd/ubi/nvmem.c2
-rw-r--r--drivers/mtd/ubi/ubi.h1
-rw-r--r--drivers/mtd/ubi/vmt.c2
-rw-r--r--drivers/mtd/ubi/wl.c11
-rw-r--r--drivers/mtd/ubi/wl.h3
-rw-r--r--drivers/mux/core.c2
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/amt.c12
-rw-r--r--drivers/net/bareudp.c20
-rw-r--r--drivers/net/bonding/bond_debugfs.c9
-rw-r--r--drivers/net/bonding/bond_main.c45
-rw-r--r--drivers/net/bonding/bond_options.c82
-rw-r--r--drivers/net/can/c_can/c_can_main.c33
-rw-r--r--drivers/net/can/cc770/Kconfig2
-rw-r--r--drivers/net/can/dev/dev.c4
-rw-r--r--drivers/net/can/grcan.c3
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c58
-rw-r--r--drivers/net/can/kvaser_pciefd.c81
-rw-r--r--drivers/net/can/m_can/m_can.c94
-rw-r--r--drivers/net/can/m_can/m_can.h2
-rw-r--r--drivers/net/can/m_can/m_can_pci.c1
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c30
-rw-r--r--drivers/net/can/m_can/tcan4x5x.h2
-rw-r--r--drivers/net/can/rockchip/Kconfig3
-rw-r--r--drivers/net/can/sja1000/Kconfig2
-rw-r--r--drivers/net/can/sja1000/sja1000.c67
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c15
-rw-r--r--drivers/net/can/spi/hi311x.c55
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c8
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c39
-rw-r--r--drivers/net/can/sun4i_can.c24
-rw-r--r--drivers/net/can/usb/ems_usb.c58
-rw-r--r--drivers/net/can/usb/f81604.c10
-rw-r--r--drivers/net/can/usb/gs_usb.c25
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c3
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c133
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c38
-rw-r--r--drivers/net/can/vxcan.c22
-rw-r--r--drivers/net/dsa/b53/b53_common.c34
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c2
-rw-r--r--drivers/net/dsa/b53/b53_priv.h2
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c4
-rw-r--r--drivers/net/dsa/b53/b53_srab.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c17
-rw-r--r--drivers/net/dsa/bcm_sf2.h5
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c22
-rw-r--r--drivers/net/dsa/dsa_loop.c3
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c10
-rw-r--r--drivers/net/dsa/lan9303-core.c29
-rw-r--r--drivers/net/dsa/lantiq_gswip.c2
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c51
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c18
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c494
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h63
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c2
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c11
-rw-r--r--drivers/net/dsa/microchip/lan937x.h2
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c288
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h13
-rw-r--r--drivers/net/dsa/mt7530-mmio.c2
-rw-r--r--drivers/net/dsa/mt7530.c65
-rw-r--r--drivers/net/dsa/mt7530.h12
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig10
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c195
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h29
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/leds.c839
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6185.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6352.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-639x.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h133
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c108
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c14
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h8
-rw-r--r--drivers/net/dsa/ocelot/felix.c9
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c17
-rw-r--r--drivers/net/dsa/ocelot/ocelot_ext.c4
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c2
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c12
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c7
-rw-r--r--drivers/net/dsa/qca/qca8k.h3
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c8
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c8
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c4
-rw-r--r--drivers/net/dsa/realtek/rtl8366-core.c22
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c11
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.c16
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c8
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c7
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c86
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c28
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c8
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c1
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-platform.c2
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c6
-rw-r--r--drivers/net/dummy.c17
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c2
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c2
-rw-r--r--drivers/net/ethernet/8390/ne.c2
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c2
-rw-r--r--drivers/net/ethernet/adi/adin1110.c4
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c5
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c2
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c58
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h32
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c14
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c44
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h1
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/mvme147.c7
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c11
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c2
-rw-r--r--drivers/net/ethernet/amd/sunlance.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c19
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c22
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c19
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c2
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c16
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/apple/macmace.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c73
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c132
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c43
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h32
-rw-r--r--drivers/net/ethernet/arc/emac_main.c27
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c9
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c37
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig3
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c2
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h3
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c46
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c26
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c16
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c70
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h23
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c7
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c3
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c68
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c887
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h100
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c160
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h43
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c294
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h173
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c132
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h44
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c53
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c46
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c11
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c10
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c16
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c168
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c31
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c49
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c169
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c22
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c98
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.c58
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/srq.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c5
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h1
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c9
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c9
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h62
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c450
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c42
-rw-r--r--drivers/net/ethernet/cortina/gemini.c4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c5
-rw-r--r--drivers/net/ethernet/dlink/Kconfig20
-rw-r--r--drivers/net/ethernet/dlink/Makefile1
-rw-r--r--drivers/net/ethernet/dlink/sundance.c1985
-rw-r--r--drivers/net/ethernet/dnet.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c10
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c30
-rw-r--r--drivers/net/ethernet/ethoc.c2
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c39
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c50
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c40
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c15
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c3
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig40
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile9
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c667
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h60
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h178
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c756
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c70
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h90
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c31
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c351
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h21
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c345
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.h19
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c24
-rw-r--r--drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c445
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c74
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c11
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c36
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c117
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h8
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c9
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c8
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c632
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h22
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c95
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.c65
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.h1
-rw-r--r--drivers/net/ethernet/google/Kconfig1
-rw-r--r--drivers/net/ethernet/google/gve/Makefile3
-rw-r--r--drivers/net/ethernet/google/gve/gve.h37
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c18
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c311
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c143
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c314
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c46
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c6
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c1
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig18
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile9
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h160
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c160
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c134
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c198
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c311
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h63
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c127
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c408
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c237
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h182
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c409
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h39
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c181
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h30
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c31
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c71
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c96
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c54
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c97
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_regs.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c49
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c2
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c2
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c1
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c44
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c94
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c49
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c49
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c49
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c51
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h3
-rw-r--r--drivers/net/ethernet/intel/Kconfig2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c21
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c120
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c458
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c249
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h40
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c30
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h23
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c245
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c157
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c23
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c550
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.h71
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.c (renamed from drivers/net/ethernet/intel/ice/devlink/devlink_port.c)8
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.h (renamed from drivers/net/ethernet/intel/ice/devlink/devlink_port.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h129
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c45
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c541
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c360
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c109
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c186
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.h38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c293
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser_rt.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c1590
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h149
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c576
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h92
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c154
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c434
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c57
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c32
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c6
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c11
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c17
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c34
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h11
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c37
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h58
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c284
-rw-r--r--drivers/net/ethernet/intel/igb/igb_xsk.c562
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h3
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_diag.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c316
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c119
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.c50
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c24
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c2658
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h81
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c462
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h87
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h1074
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h20
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c35
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h4
-rw-r--r--drivers/net/ethernet/intel/libeth/rx.c8
-rw-r--r--drivers/net/ethernet/intel/libie/rx.c4
-rw-r--r--drivers/net/ethernet/korina.c2
-rw-r--r--drivers/net/ethernet/lantiq_etop.c27
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c2
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c56
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c13
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c148
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h5
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c154
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c70
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c68
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c23
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.h6
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c91
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c60
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c25
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h6
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c9
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c70
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h79
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h39
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c45
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c117
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c35
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c124
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c132
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c468
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c1056
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h265
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c176
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h118
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c49
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c88
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c322
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c97
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c867
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.h54
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c1
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c16
-rw-r--r--drivers/net/ethernet/marvell/skge.c8
-rw-r--r--drivers/net/ethernet/marvell/sky2.c9
-rw-r--r--drivers/net/ethernet/mediatek/airoha_eth.c719
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c14
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c292
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c155
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c127
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c1072
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c662
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.c195
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_pool.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c377
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c)161
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h)15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c)4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/buddy.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c)76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h)24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c)4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c)97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h)19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c)37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/context.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h)17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c)48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/debug.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c)8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h)8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c1377
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c450
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/internal.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h)37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c)220
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h)19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c)4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c)8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/prm.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h)42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c)145
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/rule.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h)16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c)67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c)24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/vport.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_buddy.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_definer.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_fw.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_icm_pool.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_matcher.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ptrn.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_rule.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c)4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h)19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c)6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c)207
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h236
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c)76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c221
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_table.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c)37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h)40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr_ste_v1.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c211
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/txheader.h63
-rw-r--r--drivers/net/ethernet/meta/Kconfig1
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile8
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h41
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.c148
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h122
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c68
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c536
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c160
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h28
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c193
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h28
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hwmon.c81
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c53
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c72
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h7
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c104
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h23
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c32
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c148
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.h4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_time.c303
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c406
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h19
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c20
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c21
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c46
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c35
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig7
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile10
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.c357
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x.h82
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_calendar.c191
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_fdma.c406
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_regs.c222
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_rgmii.c224
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_ag_api.c3843
-rw-r--r--drivers/net/ethernet/microchip/sparx5/lan969x/lan969x_vcap_impl.c85
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_calendar.c126
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c5
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c34
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c80
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c10
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c362
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h243
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h4750
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c25
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c39
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c35
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c15
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c16
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_police.c3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c181
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.h28
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c49
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c58
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.c11
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.h2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_regs.c222
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_regs.h247
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c25
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c33
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.c8
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c9
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c48
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h21
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c47
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c19
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c61
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c116
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c66
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c54
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c18
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c248
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c37
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c2
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c2
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c2
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c2
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c6
-rw-r--r--drivers/net/ethernet/ni/nixge.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c32
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/oa_tc6.c11
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c44
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h22
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c13
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c14
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c50
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c34
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c60
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c69
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c22
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c4
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c56
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h3
-rw-r--r--drivers/net/ethernet/realtek/8139too.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.h5
-rw-r--r--drivers/net/ethernet/realtek/r8169_firmware.c6
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c507
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c74
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h10
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c67
-rw-r--r--drivers/net/ethernet/renesas/ravb.h6
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c151
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c276
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h75
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c1
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c6
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c117
-rw-r--r--drivers/net/ethernet/sfc/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c9
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.h7
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c16
-rw-r--r--drivers/net/ethernet/sfc/efx_common.h1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c49
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.h1
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c34
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c22
-rw-r--r--drivers/net/ethernet/sfc/falcon/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.c20
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.h7
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.h3
-rw-r--r--drivers/net/ethernet/sfc/io.h24
-rw-r--r--drivers/net/ethernet/sfc/mae.c11
-rw-r--r--drivers/net/ethernet/sfc/mae.h1
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c76
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h10
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h51
-rw-r--r--drivers/net/ethernet/sfc/nic.c9
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c7
-rw-r--r--drivers/net/ethernet/sfc/ptp.h3
-rw-r--r--drivers/net/ethernet/sfc/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c8
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c3
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c46
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h4
-rw-r--r--drivers/net/ethernet/sfc/siena/nic.c14
-rw-r--r--drivers/net/ethernet/sfc/siena/nic_common.h5
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.h2
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c5
-rw-r--r--drivers/net/ethernet/sfc/siena/siena.c2
-rw-r--r--drivers/net/ethernet/sfc/tc_conntrack.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c14
-rw-r--r--drivers/net/ethernet/sfc/tx.h3
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c33
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h4
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c2
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c2
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c94
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c202
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c273
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c150
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c72
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c413
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.h33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c548
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c38
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h1
-rw-r--r--drivers/net/ethernet/sun/niu.c34
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c4
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c737
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h15
-rw-r--r--drivers/net/ethernet/ti/cpsw.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c80
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c2
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c8
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c25
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c43
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h1
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c465
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h27
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c28
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c1
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.h1
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c2
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c9
-rw-r--r--drivers/net/ethernet/via/via-rhine.c13
-rw-r--r--drivers/net/ethernet/via/via-velocity.c8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c24
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c24
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c188
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h9
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c51
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c23
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/fjes/fjes_ethtool.c64
-rw-r--r--drivers/net/fjes/fjes_main.c2
-rw-r--r--drivers/net/geneve.c18
-rw-r--r--drivers/net/gtp.c68
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c1
-rw-r--r--drivers/net/hamradio/scc.c4
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c16
-rw-r--r--drivers/net/hyperv/netvsc_drv.c32
-rw-r--r--drivers/net/hyperv/rndis_filter.c9
-rw-r--r--drivers/net/ieee802154/ca8210.c6
-rw-r--r--drivers/net/ieee802154/fakelb.c2
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c2
-rw-r--r--drivers/net/ifb.c17
-rw-r--r--drivers/net/ipa/ipa_main.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c3
-rw-r--r--drivers/net/ipvlan/ipvlan_l3s.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c6
-rw-r--r--drivers/net/loopback.c5
-rw-r--r--drivers/net/macsec.c91
-rw-r--r--drivers/net/macvlan.c6
-rw-r--r--drivers/net/mctp/mctp-i2c.c9
-rw-r--r--drivers/net/mctp/mctp-i3c.c6
-rw-r--r--drivers/net/mctp/mctp-serial.c5
-rw-r--r--drivers/net/mdio.c172
-rw-r--r--drivers/net/mdio/fwnode_mdio.c13
-rw-r--r--drivers/net/mdio/mdio-aspeed.c2
-rw-r--r--drivers/net/mdio/mdio-bcm-iproc.c2
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c3
-rw-r--r--drivers/net/mdio/mdio-gpio.c2
-rw-r--r--drivers/net/mdio/mdio-hisi-femac.c2
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c7
-rw-r--r--drivers/net/mdio/mdio-ipq8064.c2
-rw-r--r--drivers/net/mdio/mdio-moxart.c2
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c2
-rw-r--r--drivers/net/mdio/mdio-mux-bcm-iproc.c2
-rw-r--r--drivers/net/mdio/mdio-mux-bcm6368.c2
-rw-r--r--drivers/net/mdio/mdio-mux-gpio.c2
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c2
-rw-r--r--drivers/net/mdio/mdio-mux-meson-gxl.c2
-rw-r--r--drivers/net/mdio/mdio-mux-mmioreg.c2
-rw-r--r--drivers/net/mdio/mdio-mux-multiplexer.c2
-rw-r--r--drivers/net/mdio/mdio-octeon.c27
-rw-r--r--drivers/net/mdio/mdio-sun4i.c2
-rw-r--r--drivers/net/mdio/mdio-thunder.c4
-rw-r--r--drivers/net/mdio/mdio-xgene.c2
-rw-r--r--drivers/net/mii.c3
-rw-r--r--drivers/net/netconsole.c257
-rw-r--r--drivers/net/netdevsim/dev.c15
-rw-r--r--drivers/net/netdevsim/ethtool.c10
-rw-r--r--drivers/net/netdevsim/fib.c4
-rw-r--r--drivers/net/netdevsim/health.c2
-rw-r--r--drivers/net/netdevsim/hwstats.c29
-rw-r--r--drivers/net/netdevsim/ipsec.c23
-rw-r--r--drivers/net/netdevsim/macsec.c56
-rw-r--r--drivers/net/netdevsim/netdev.c317
-rw-r--r--drivers/net/netdevsim/netdevsim.h9
-rw-r--r--drivers/net/netdevsim/udp_tunnels.c23
-rw-r--r--drivers/net/netkit.c155
-rw-r--r--drivers/net/pcs/pcs-lynx.c39
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c25
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c2
-rw-r--r--drivers/net/pcs/pcs-xpcs-nxp.c24
-rw-r--r--drivers/net/pcs/pcs-xpcs-wx.c56
-rw-r--r--drivers/net/pcs/pcs-xpcs.c679
-rw-r--r--drivers/net/pcs/pcs-xpcs.h38
-rw-r--r--drivers/net/pfcp.c15
-rw-r--r--drivers/net/phy/Kconfig35
-rw-r--r--drivers/net/phy/Makefile6
-rw-r--r--drivers/net/phy/adin.c8
-rw-r--r--drivers/net/phy/adin1100.c2
-rw-r--r--drivers/net/phy/air_en8811h.c2
-rw-r--r--drivers/net/phy/amd.c2
-rw-r--r--drivers/net/phy/aquantia/aquantia.h1
-rw-r--r--drivers/net/phy/aquantia/aquantia_leds.c19
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c169
-rw-r--r--drivers/net/phy/ax88796b.c2
-rw-r--r--drivers/net/phy/bcm-cygnus.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.c5
-rw-r--r--drivers/net/phy/bcm54140.c2
-rw-r--r--drivers/net/phy/bcm63xx.c2
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/bcm84881.c16
-rw-r--r--drivers/net/phy/broadcom.c2
-rw-r--r--drivers/net/phy/cicada.c2
-rw-r--r--drivers/net/phy/cortina.c2
-rw-r--r--drivers/net/phy/davicom.c2
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/dp83822.c420
-rw-r--r--drivers/net/phy/dp83848.c4
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/dp83869.c23
-rw-r--r--drivers/net/phy/dp83tc811.c2
-rw-r--r--drivers/net/phy/dp83td510.c114
-rw-r--r--drivers/net/phy/dp83tg720.c163
-rw-r--r--drivers/net/phy/et1011c.c2
-rw-r--r--drivers/net/phy/icplus.c5
-rw-r--r--drivers/net/phy/intel-xway.c255
-rw-r--r--drivers/net/phy/lxt.c2
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c159
-rw-r--r--drivers/net/phy/marvell-88x2222.c2
-rw-r--r--drivers/net/phy/marvell.c80
-rw-r--r--drivers/net/phy/marvell10g.c2
-rw-r--r--drivers/net/phy/mediatek/Kconfig27
-rw-r--r--drivers/net/phy/mediatek/Makefile4
-rw-r--r--drivers/net/phy/mediatek/mtk-ge-soc.c (renamed from drivers/net/phy/mediatek-ge-soc.c)421
-rw-r--r--drivers/net/phy/mediatek/mtk-ge.c (renamed from drivers/net/phy/mediatek-ge.c)33
-rw-r--r--drivers/net/phy/mediatek/mtk-phy-lib.c270
-rw-r--r--drivers/net/phy/mediatek/mtk.h89
-rw-r--r--drivers/net/phy/meson-gxl.c2
-rw-r--r--drivers/net/phy/micrel.c124
-rw-r--r--drivers/net/phy/microchip.c23
-rw-r--r--drivers/net/phy/microchip_rds_ptp.c1309
-rw-r--r--drivers/net/phy/microchip_rds_ptp.h247
-rw-r--r--drivers/net/phy/microchip_t1.c280
-rw-r--r--drivers/net/phy/microchip_t1s.c302
-rw-r--r--drivers/net/phy/mscc/mscc_main.c5
-rw-r--r--drivers/net/phy/mxl-gpy.c229
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/ncn26000.c2
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c40
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.h1
-rw-r--r--drivers/net/phy/nxp-cbtx.c4
-rw-r--r--drivers/net/phy/nxp-tja11xx.c2
-rw-r--r--drivers/net/phy/phy-c45.c48
-rw-r--r--drivers/net/phy/phy-core.c52
-rw-r--r--drivers/net/phy/phy.c224
-rw-r--r--drivers/net/phy/phy_device.c135
-rw-r--r--drivers/net/phy/phylink.c829
-rw-r--r--drivers/net/phy/qcom/at803x.c2
-rw-r--r--drivers/net/phy/qcom/qca807x.c2
-rw-r--r--drivers/net/phy/qcom/qca808x.c2
-rw-r--r--drivers/net/phy/qcom/qca83xx.c8
-rw-r--r--drivers/net/phy/qsemi.c2
-rw-r--r--drivers/net/phy/realtek/Kconfig11
-rw-r--r--drivers/net/phy/realtek/Makefile4
-rw-r--r--drivers/net/phy/realtek/realtek.h10
-rw-r--r--drivers/net/phy/realtek/realtek_hwmon.c79
-rw-r--r--drivers/net/phy/realtek/realtek_main.c (renamed from drivers/net/phy/realtek.c)183
-rw-r--r--drivers/net/phy/rockchip.c2
-rw-r--r--drivers/net/phy/sfp.c5
-rw-r--r--drivers/net/phy/smsc.c7
-rw-r--r--drivers/net/phy/spi_ks8995.c8
-rw-r--r--drivers/net/phy/ste10Xp.c2
-rw-r--r--drivers/net/phy/teranetics.c2
-rw-r--r--drivers/net/phy/uPD60620.c2
-rw-r--r--drivers/net/phy/vitesse.c2
-rw-r--r--drivers/net/plip/plip.c2
-rw-r--r--drivers/net/ppp/ppp_async.c2
-rw-r--r--drivers/net/pse-pd/pd692x0.c224
-rw-r--r--drivers/net/pse-pd/pse_core.c198
-rw-r--r--drivers/net/pse-pd/pse_regulator.c23
-rw-r--r--drivers/net/pse-pd/tps23881.c453
-rw-r--r--drivers/net/slip/slhc.c57
-rw-r--r--drivers/net/tap.c6
-rw-r--r--drivers/net/team/team_core.c27
-rw-r--r--drivers/net/tun.c12
-rw-r--r--drivers/net/usb/ipheth.c69
-rw-r--r--drivers/net/usb/lan78xx.c978
-rw-r--r--drivers/net/usb/qmi_wwan.c6
-rw-r--r--drivers/net/usb/r8152.c1
-rw-r--r--drivers/net/usb/rtl8150.c22
-rw-r--r--drivers/net/usb/sr9700.c10
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/veth.c34
-rw-r--r--drivers/net/virtio_net.c602
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c16
-rw-r--r--drivers/net/vrf.c51
-rw-r--r--drivers/net/vxlan/vxlan_core.c302
-rw-r--r--drivers/net/vxlan/vxlan_mdb.c4
-rw-r--r--drivers/net/vxlan/vxlan_private.h2
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c24
-rw-r--r--drivers/net/wan/framer/framer-core.c23
-rw-r--r--drivers/net/wan/framer/pef2256/pef2256.c2
-rw-r--r--drivers/net/wan/fsl_qmc_hdlc.c2
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c2
-rw-r--r--drivers/net/wan/ixp4xx_hss.c2
-rw-r--r--drivers/net/wireguard/device.c3
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c105
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c20
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c134
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c21
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c12
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c13
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.c45
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig10
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c756
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h287
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.c54
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.h81
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c4
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c2527
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h813
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c142
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h40
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c278
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c339
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h8
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c9
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c14
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c12
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h65
-rw-r--r--drivers/net/wireless/ath/ath12k/hif.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c4
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c4223
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h38
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.c17
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c210
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c238
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h30
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c489
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h21
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h88
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c458
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h179
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.c87
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c8
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c52
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c54
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c28
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c13
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c1
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.c27
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/Kconfig1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c21
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c37
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c55
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c22
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/Kconfig11
-rw-r--r--drivers/net/wireless/intel/ipw2x00/Makefile7
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c11
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.h2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c25
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h114
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto.c246
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto_ccmp.c438
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto_tkip.c (renamed from drivers/staging/rtl8192e/rtllib_crypt_tkip.c)528
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto_wep.c247
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_module.c36
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c19
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_spy.c233
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_tx.c4
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c43
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c38
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.h1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c46
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c167
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c80
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c195
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/binding.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/context.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h57
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h63
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c124
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c223
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h106
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c204
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c66
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c107
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c186
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c197
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c57
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h57
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c200
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c103
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c309
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c2
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/Kconfig1
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c1
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/radiotap.h4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c406
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c240
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h154
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c45
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c79
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c84
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c138
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c130
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c236
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c40
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c150
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c216
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c403
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c69
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c908
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c504
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h177
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/scan.c168
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c33
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c10
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c115
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.h2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/mon.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c39
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c108
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c17
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c444
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h53
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8188e.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c79
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c61
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h12
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig38
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile17
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c37
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.h11
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c53
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h17
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.c73
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.h25
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c15
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c56
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h62
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c82
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h196
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c91
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c70
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.c1122
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a_table.c2812
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a_table.h26
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812au.c94
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.c1223
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a_table.c2350
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a_table.h21
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821au.c78
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c106
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.h33
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c92
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h21
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c101
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h9
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw88xxa.c1989
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw88xxa.h175
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c82
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.h64
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c282
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig6
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c47
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h9
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c338
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h53
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c413
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h32
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c393
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h6
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c1246
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h669
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c151
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.c150
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse_be.c52
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c1208
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h367
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c934
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h171
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c946
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c88
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c193
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h52
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c78
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c1003
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h50
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c12
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c121
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h12
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h6
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c168
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c24
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c20
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c21
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c58
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c21
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bte.c10
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c71
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c14
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c146
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c61
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c25
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c63
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c38
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c222
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h10
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c17
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c4
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c27
-rw-r--r--drivers/net/wireless/st/cw1200/queue.h1
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c4
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c2
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c15
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c2
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c55
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c56
-rw-r--r--drivers/net/wwan/qcom_bam_dmux.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c83
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port.h3
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c51
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.h1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_wwan.c8
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c26
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.h5
-rw-r--r--drivers/net/wwan/wwan_core.c12
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/nfc/nfcmrvl/uart.c9
-rw-r--r--drivers/nfc/st21nfca/dep.c18
-rw-r--r--drivers/nfc/st21nfca/i2c.c1
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c13
-rw-r--r--drivers/nvdimm/bus.c2
-rw-r--r--drivers/nvdimm/claim.c9
-rw-r--r--drivers/nvdimm/dax_devs.c4
-rw-r--r--drivers/nvdimm/e820.c2
-rw-r--r--drivers/nvdimm/nd.h7
-rw-r--r--drivers/nvdimm/nd_virtio.c2
-rw-r--r--drivers/nvdimm/of_pmem.c2
-rw-r--r--drivers/nvdimm/pfn_devs.c2
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/nvdimm/region_devs.c2
-rw-r--r--drivers/nvdimm/virtio_pmem.c24
-rw-r--r--drivers/nvme/host/apple.c6
-rw-r--r--drivers/nvme/host/core.c258
-rw-r--r--drivers/nvme/host/fc.c36
-rw-r--r--drivers/nvme/host/ioctl.c40
-rw-r--r--drivers/nvme/host/multipath.c65
-rw-r--r--drivers/nvme/host/nvme.h56
-rw-r--r--drivers/nvme/host/pci.c323
-rw-r--r--drivers/nvme/host/pr.c122
-rw-r--r--drivers/nvme/host/rdma.c12
-rw-r--r--drivers/nvme/host/sysfs.c2
-rw-r--r--drivers/nvme/host/tcp.c112
-rw-r--r--drivers/nvme/host/trace.c58
-rw-r--r--drivers/nvme/host/zns.c2
-rw-r--r--drivers/nvme/target/Kconfig11
-rw-r--r--drivers/nvme/target/Makefile4
-rw-r--r--drivers/nvme/target/admin-cmd.c696
-rw-r--r--drivers/nvme/target/auth.c1
-rw-r--r--drivers/nvme/target/configfs.c99
-rw-r--r--drivers/nvme/target/core.c418
-rw-r--r--drivers/nvme/target/discovery.c17
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c14
-rw-r--r--drivers/nvme/target/fabrics-cmd.c104
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c5
-rw-r--r--drivers/nvme/target/loop.c13
-rw-r--r--drivers/nvme/target/nvmet.h180
-rw-r--r--drivers/nvme/target/passthru.c26
-rw-r--r--drivers/nvme/target/pci-epf.c2591
-rw-r--r--drivers/nvme/target/pr.c1155
-rw-r--r--drivers/nvme/target/rdma.c56
-rw-r--r--drivers/nvme/target/trace.c108
-rw-r--r--drivers/nvme/target/zns.c20
-rw-r--r--drivers/nvmem/Kconfig11
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/brcm_nvram.c2
-rw-r--r--drivers/nvmem/core.c59
-rw-r--r--drivers/nvmem/imx-iim.c10
-rw-r--r--drivers/nvmem/imx-ocotp-ele.c38
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c2
-rw-r--r--drivers/nvmem/lpc18xx_otp.c2
-rw-r--r--drivers/nvmem/microchip-otpc.c2
-rw-r--r--drivers/nvmem/mtk-efuse.c2
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c1
-rw-r--r--drivers/nvmem/rcar-efuse.c142
-rw-r--r--drivers/nvmem/rmem.c95
-rw-r--r--drivers/of/.kunitconfig1
-rw-r--r--drivers/of/Kconfig8
-rw-r--r--drivers/of/address.c94
-rw-r--r--drivers/of/base.c123
-rw-r--r--drivers/of/cpu.c2
-rw-r--r--drivers/of/dynamic.c48
-rw-r--r--drivers/of/empty_root.dts9
-rw-r--r--drivers/of/fdt.c62
-rw-r--r--drivers/of/fdt_address.c25
-rw-r--r--drivers/of/irq.c8
-rw-r--r--drivers/of/kexec.c2
-rw-r--r--drivers/of/kobj.c12
-rw-r--r--drivers/of/module.c4
-rw-r--r--drivers/of/of_kunit_helpers.c15
-rw-r--r--drivers/of/of_numa.c3
-rw-r--r--drivers/of/of_private.h38
-rw-r--r--drivers/of/of_reserved_mem.c242
-rw-r--r--drivers/of/of_test.c3
-rw-r--r--drivers/of/overlay.c19
-rw-r--r--drivers/of/overlay_test.c7
-rw-r--r--drivers/of/pdt.c2
-rw-r--r--drivers/of/platform.c23
-rw-r--r--drivers/of/property.c146
-rw-r--r--drivers/of/resolver.c12
-rw-r--r--drivers/of/unittest-data/tests-address.dtsi2
-rw-r--r--drivers/of/unittest-data/tests-platform.dtsi18
-rw-r--r--drivers/of/unittest.c76
-rw-r--r--drivers/opp/core.c296
-rw-r--r--drivers/opp/debugfs.c10
-rw-r--r--drivers/opp/of.c43
-rw-r--r--drivers/opp/opp.h8
-rw-r--r--drivers/parisc/led.c2
-rw-r--r--drivers/parport/parport_amiga.c2
-rw-r--r--drivers/parport/parport_serial.c12
-rw-r--r--drivers/parport/parport_sunbpp.c2
-rw-r--r--drivers/parport/procfs.c22
-rw-r--r--drivers/pci/Kconfig11
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/ats.c2
-rw-r--r--drivers/pci/bus.c132
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c39
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c4
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c27
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c4
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c486
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c14
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c13
-rw-r--r--drivers/pci/controller/dwc/pcie-bt1.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c90
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c60
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h19
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c69
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c26
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c9
-rw-r--r--drivers/pci/controller/pci-aardvark.c2
-rw-r--r--drivers/pci/controller/pci-host-common.c2
-rw-r--r--drivers/pci/controller/pci-host-generic.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c1
-rw-r--r--drivers/pci/controller/pci-mvebu.c3
-rw-r--r--drivers/pci/controller/pci-tegra.c4
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c4
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c2
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c2
-rw-r--r--drivers/pci/controller/pcie-altera.c6
-rw-r--r--drivers/pci/controller/pcie-apple.c75
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c2
-rw-r--r--drivers/pci/controller/pcie-hisi-error.c2
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c2
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c194
-rw-r--r--drivers/pci/controller/pcie-mediatek.c2
-rw-r--r--drivers/pci/controller/pcie-mt7621.c2
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c2
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c4
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c443
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c6
-rw-r--r--drivers/pci/controller/pcie-rockchip.c240
-rw-r--r--drivers/pci/controller/pcie-rockchip.h59
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c50
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c2
-rw-r--r--drivers/pci/controller/plda/pcie-microchip-host.c222
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c17
-rw-r--r--drivers/pci/controller/plda/pcie-plda.h6
-rw-r--r--drivers/pci/controller/plda/pcie-starfive.c12
-rw-r--r--drivers/pci/controller/vmd.c17
-rw-r--r--drivers/pci/devres.c141
-rw-r--r--drivers/pci/doe.c14
-rw-r--r--drivers/pci/ecam.c2
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c6
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c401
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c228
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c9
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c1
-rw-r--r--drivers/pci/hotplug/Kconfig10
-rw-r--r--drivers/pci/hotplug/Makefile1
-rw-r--r--drivers/pci/hotplug/acpiphp_ampere_altra.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c6
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h1
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c47
-rw-r--r--drivers/pci/hotplug/cpqphp_sysfs.c1
-rw-r--r--drivers/pci/hotplug/octep_hp.c427
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c8
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c5
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c2
-rw-r--r--drivers/pci/iov.c14
-rw-r--r--drivers/pci/msi/irqdomain.c7
-rw-r--r--drivers/pci/msi/msi.c4
-rw-r--r--drivers/pci/of.c49
-rw-r--r--drivers/pci/of_property.c6
-rw-r--r--drivers/pci/p2pdma.c8
-rw-r--r--drivers/pci/pci-driver.c14
-rw-r--r--drivers/pci/pci-sysfs.c218
-rw-r--r--drivers/pci/pci.c367
-rw-r--r--drivers/pci/pci.h102
-rw-r--r--drivers/pci/pcie/Makefile4
-rw-r--r--drivers/pci/pcie/aer.c34
-rw-r--r--drivers/pci/pcie/aspm.c128
-rw-r--r--drivers/pci/pcie/bwctrl.c373
-rw-r--r--drivers/pci/pcie/dpc.c22
-rw-r--r--drivers/pci/pcie/portdrv.c13
-rw-r--r--drivers/pci/pcie/portdrv.h6
-rw-r--r--drivers/pci/pcie/tlp.c115
-rw-r--r--drivers/pci/probe.c186
-rw-r--r--drivers/pci/pwrctl/Makefile6
-rw-r--r--drivers/pci/pwrctl/core.c157
-rw-r--r--drivers/pci/pwrctl/pci-pwrctl-pwrseq.c94
-rw-r--r--drivers/pci/pwrctrl/Kconfig (renamed from drivers/pci/pwrctl/Kconfig)0
-rw-r--r--drivers/pci/pwrctrl/Makefile6
-rw-r--r--drivers/pci/pwrctrl/core.c148
-rw-r--r--drivers/pci/pwrctrl/pci-pwrctrl-pwrseq.c139
-rw-r--r--drivers/pci/quirks.c89
-rw-r--r--drivers/pci/remove.c37
-rw-r--r--drivers/pci/setup-bus.c41
-rw-r--r--drivers/pci/setup-res.c7
-rw-r--r--drivers/pci/slot.c24
-rw-r--r--drivers/pci/switch/switchtec.c26
-rw-r--r--drivers/pci/tph.c547
-rw-r--r--drivers/pci/vpd.c16
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.c2
-rw-r--r--drivers/pcmcia/db1xxx_ss.c2
-rw-r--r--drivers/pcmcia/electra_cf.c2
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.c2
-rw-r--r--drivers/pcmcia/sa1100_generic.c2
-rw-r--r--drivers/pcmcia/soc_common.c12
-rw-r--r--drivers/pcmcia/xxs1500_ss.c2
-rw-r--r--drivers/peci/controller/peci-aspeed.c2
-rw-r--r--drivers/peci/controller/peci-npcm.c4
-rw-r--r--drivers/peci/core.c2
-rw-r--r--drivers/peci/cpu.c12
-rw-r--r--drivers/peci/device.c4
-rw-r--r--drivers/peci/request.c30
-rw-r--r--drivers/perf/Kconfig7
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/alibaba_uncore_drw_pmu.c2
-rw-r--r--drivers/perf/amlogic/meson_g12_ddr_pmu.c2
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c2
-rw-r--r--drivers/perf/arm-cci.c2
-rw-r--r--drivers/perf/arm-ccn.c2
-rw-r--r--drivers/perf/arm-cmn.c10
-rw-r--r--drivers/perf/arm-ni.c1
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c2
-rw-r--r--drivers/perf/arm_cspmu/nvidia_cspmu.c75
-rw-r--r--drivers/perf/arm_dmc620_pmu.c2
-rw-r--r--drivers/perf/arm_dsu_pmu.c2
-rw-r--r--drivers/perf/arm_pmuv3.c34
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c21
-rw-r--r--drivers/perf/arm_spe_pmu.c24
-rw-r--r--drivers/perf/cxl_pmu.c11
-rw-r--r--drivers/perf/dwc_pcie_pmu.c86
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c2
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c40
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c44
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c63
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c50
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c46
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c55
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c160
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h49
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c45
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_uc_pmu.c45
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c532
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c68
-rw-r--r--drivers/perf/marvell_pem_pmu.c425
-rw-r--r--drivers/perf/qcom_l2_pmu.c2
-rw-r--r--drivers/perf/riscv_pmu_sbi.c28
-rw-r--r--drivers/perf/thunderx2_pmu.c2
-rw-r--r--drivers/perf/xgene_pmu.c2
-rw-r--r--drivers/phy/Kconfig11
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c9
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns-usb2.c54
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c18
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c433
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c2
-rw-r--r--drivers/phy/cadence/cdns-dphy.c2
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c23
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c4
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c10
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8qm-lvds-phy.c6
-rw-r--r--drivers/phy/freescale/phy-fsl-lynx-28g.c6
-rw-r--r--drivers/phy/freescale/phy-fsl-samsung-hdmi.c640
-rw-r--r--drivers/phy/hisilicon/phy-hi3670-pcie.c11
-rw-r--r--drivers/phy/intel/phy-intel-lgm-combo.c2
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c2
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-utmi.c16
-rw-r--r--drivers/phy/mediatek/Kconfig1
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c44
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt8195.h3
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.c28
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.h4
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c40
-rw-r--r--drivers/phy/microchip/sparx5_serdes.c195
-rw-r--r--drivers/phy/microchip/sparx5_serdes.h44
-rw-r--r--drivers/phy/microchip/sparx5_serdes_regs.h746
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c2
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c2
-rw-r--r--drivers/phy/phy-airoha-pcie-regs.h6
-rw-r--r--drivers/phy/phy-airoha-pcie.c8
-rw-r--r--drivers/phy/phy-core.c44
-rw-r--r--drivers/phy/phy-lgm-usb.c2
-rw-r--r--drivers/phy/phy-nxp-ptn3222.c123
-rw-r--r--drivers/phy/qualcomm/phy-qcom-apq8064-sata.c10
-rw-r--r--drivers/phy/qualcomm/phy-qcom-edp.c74
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c8
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c111
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c496
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v4_20.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_30.h25
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_30.h19
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c71
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usbc.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c55
-rw-r--r--drivers/phy/realtek/phy-rtk-usb2.c4
-rw-r--r--drivers/phy/realtek/phy-rtk-usb3.c4
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-pcie.c8
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c2
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb3.c8
-rw-r--r--drivers/phy/renesas/r8a779f0-ether-serdes.c2
-rw-r--r--drivers/phy/rockchip/Kconfig1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-csidphy.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-hdmi.c4
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c200
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c281
-rw-r--r--drivers/phy/rockchip/phy-rockchip-pcie.c148
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c20
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c4
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usbdp.c41
-rw-r--r--drivers/phy/samsung/Kconfig1
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.c6
-rw-r--r--drivers/phy/st/Kconfig11
-rw-r--r--drivers/phy/st/Makefile1
-rw-r--r--drivers/phy/st/phy-stm32-combophy.c607
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c2
-rw-r--r--drivers/phy/starfive/phy-jh7110-usb.c16
-rw-r--r--drivers/phy/tegra/Kconfig5
-rw-r--r--drivers/phy/tegra/xusb.c6
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c2
-rw-r--r--drivers/phy/ti/phy-da8xx-usb.c4
-rw-r--r--drivers/phy/ti/phy-dm816x-usb.c2
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c3
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c6
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c2
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c2
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c2
-rw-r--r--drivers/phy/xilinx/phy-zynqmp.c2
-rw-r--r--drivers/pinctrl/Kconfig26
-rw-r--r--drivers/pinctrl/Makefile3
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c3
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-lochnagar.c3
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c2
-rw-r--r--drivers/pinctrl/core.c53
-rw-r--r--drivers/pinctrl/core.h1
-rw-r--r--drivers/pinctrl/freescale/Kconfig93
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c6
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1.c228
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx27.c350
-rw-r--r--drivers/pinctrl/intel/Kconfig1
-rw-r--r--drivers/pinctrl/intel/pinctrl-alderlake.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cedarfork.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-denverton.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-elkhartlake.c40
-rw-r--r--drivers/pinctrl/intel/pinctrl-emmitsburg.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-geminilake.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-icelake.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel-platform.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c34
-rw-r--r--drivers/pinctrl/intel/pinctrl-jasperlake.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-lakefield.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorlake.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorpoint.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-moorefield.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-tangier.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c2
-rw-r--r--drivers/pinctrl/mediatek/Kconfig24
-rw-r--r--drivers/pinctrl/mediatek/Makefile2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-airoha.c2971
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7988.c1556
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c57
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h1
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c2
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c7
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c42
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35.c2
-rw-r--r--drivers/pinctrl/nxp/pinctrl-s32g2.c52
-rw-r--r--drivers/pinctrl/pinctrl-amd.c34
-rw-r--r--drivers/pinctrl/pinctrl-amd.h11
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c3
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c2
-rw-r--r--drivers/pinctrl/pinctrl-aw9523.c57
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c142
-rw-r--r--drivers/pinctrl/pinctrl-gemini.c11
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c2
-rw-r--r--drivers/pinctrl/pinctrl-k210.c17
-rw-r--r--drivers/pinctrl/pinctrl-k230.c641
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c8
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c229
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c206
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h3
-rw-r--r--drivers/pinctrl/pinctrl-single.c3
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c6
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c4
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c2
-rw-r--r--drivers/pinctrl/pinctrl-th1520.c918
-rw-r--r--drivers/pinctrl/pinctrl-xway.c2
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c98
-rw-r--r--drivers/pinctrl/pinmux.c173
-rw-r--r--drivers/pinctrl/qcom/Kconfig.msm45
-rw-r--r--drivers/pinctrl/qcom/Makefile6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8064.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8084.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5018.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5332.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5424.c810
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq6018.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8064.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8074.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq9574.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9607.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9615.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8226.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8660.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8909.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8916.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8917.c1620
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8953.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8960.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8994.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8996.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8998.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8x74.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcm2290.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs404.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs615.c1107
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs8300.c1246
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdf2xxx.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdu1000.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sa8775p.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sar2130p.c1505
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm670.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm845.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx55.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx65.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx75.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm4250-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm4450.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6115-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6115.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6125.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6350.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6375.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm7150.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8150.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8650-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8650.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8750.c1729
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c11
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c7
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c12
-rw-r--r--drivers/pinctrl/qcom/pinctrl-x1e80100.c2
-rw-r--r--drivers/pinctrl/renesas/Kconfig2
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza1.c7
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c3
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c260
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzn1.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c431
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h10
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c10
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h3
-rw-r--r--drivers/pinctrl/sophgo/Kconfig2
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv18xx.c2
-rw-r--r--drivers/pinctrl/spacemit/Kconfig17
-rw-r--r--drivers/pinctrl/spacemit/Makefile3
-rw-r--r--drivers/pinctrl/spacemit/pinctrl-k1.c1051
-rw-r--r--drivers/pinctrl/spacemit/pinctrl-k1.h40
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c2
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c82
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100.c12
-rw-r--r--drivers/platform/chrome/Kconfig18
-rw-r--r--drivers/platform/chrome/Makefile5
-rw-r--r--drivers/platform/chrome/chromeos_of_hw_prober.c154
-rw-r--r--drivers/platform/chrome/cros_ec.c5
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c2
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c2
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c5
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c2
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c2
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c209
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c69
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c2
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c4
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c2
-rw-r--r--drivers/platform/chrome/cros_ec_trace.c10
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c49
-rw-r--r--drivers/platform/chrome/cros_ec_typec.h1
-rw-r--r--drivers/platform/chrome/cros_ec_uart.c2
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c12
-rw-r--r--drivers/platform/chrome/cros_hps_i2c.c2
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c79
-rw-r--r--drivers/platform/chrome/cros_typec_altmode.c373
-rw-r--r--drivers/platform/chrome/cros_typec_altmode.h51
-rw-r--r--drivers/platform/chrome/cros_typec_switch.c2
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c7
-rw-r--r--drivers/platform/chrome/cros_usbpd_notify.c4
-rw-r--r--drivers/platform/chrome/wilco_ec/core.c2
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c2
-rw-r--r--drivers/platform/chrome/wilco_ec/telemetry.c2
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-base.c3
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-gpio.c4
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu.h172
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c5
-rw-r--r--drivers/platform/loongarch/Kconfig2
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c22
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c113
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c2
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c4
-rw-r--r--drivers/platform/mellanox/mlxreg-io.c4
-rw-r--r--drivers/platform/mellanox/mlxreg-lc.c2
-rw-r--r--drivers/platform/mellanox/nvsw-sn2201.c2
-rw-r--r--drivers/platform/surface/surface3-wmi.c2
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c2
-rw-r--r--drivers/platform/surface/surface_aggregator_cdev.c2
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c21
-rw-r--r--drivers/platform/surface/surface_dtx.c2
-rw-r--r--drivers/platform/surface/surface_gpe.c2
-rw-r--r--drivers/platform/surface/surface_hotplug.c2
-rw-r--r--drivers/platform/surface/surface_platform_profile.c44
-rw-r--r--drivers/platform/x86/Kconfig22
-rw-r--r--drivers/platform/x86/acer-wmi.c557
-rw-r--r--drivers/platform/x86/adv_swbutton.c2
-rw-r--r--drivers/platform/x86/amd/Kconfig18
-rw-r--r--drivers/platform/x86/amd/Makefile5
-rw-r--r--drivers/platform/x86/amd/hsmp.c988
-rw-r--r--drivers/platform/x86/amd/hsmp/Kconfig47
-rw-r--r--drivers/platform/x86/amd/hsmp/Makefile12
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c378
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.c443
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.h66
-rw-r--r--drivers/platform/x86/amd/hsmp/plat.c338
-rw-r--r--drivers/platform/x86/amd/pmc/Kconfig2
-rw-r--r--drivers/platform/x86/amd/pmc/Makefile2
-rw-r--r--drivers/platform/x86/amd/pmc/mp1_stb.c332
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c399
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.h24
-rw-r--r--drivers/platform/x86/amd/pmf/Kconfig3
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile2
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c76
-rw-r--r--drivers/platform/x86/amd/pmf/core.c33
-rw-r--r--drivers/platform/x86/amd/pmf/pmf-quirks.c66
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h45
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c76
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c49
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c8
-rw-r--r--drivers/platform/x86/amd/x3d_vcache.c176
-rw-r--r--drivers/platform/x86/amilo-rfkill.c6
-rw-r--r--drivers/platform/x86/asus-laptop.c4
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c4
-rw-r--r--drivers/platform/x86/asus-wmi.c166
-rw-r--r--drivers/platform/x86/asus-wmi.h3
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c2
-rw-r--r--drivers/platform/x86/classmate-laptop.c7
-rw-r--r--drivers/platform/x86/compal-laptop.c61
-rw-r--r--drivers/platform/x86/dell/Kconfig3
-rw-r--r--drivers/platform/x86/dell/Makefile1
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.c1039
-rw-r--r--drivers/platform/x86/dell/dcdbas.c23
-rw-r--r--drivers/platform/x86/dell/dcdbas.h8
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c60
-rw-r--r--drivers/platform/x86/dell/dell-lis3lv02d.c256
-rw-r--r--drivers/platform/x86/dell/dell-pc.c69
-rw-r--r--drivers/platform/x86/dell/dell-smbios-base.c1
-rw-r--r--drivers/platform/x86/dell/dell-smo8800-ids.h27
-rw-r--r--drivers/platform/x86/dell/dell-smo8800.c18
-rw-r--r--drivers/platform/x86/dell/dell-uart-backlight.c9
-rw-r--r--drivers/platform/x86/dell/dell-wmi-base.c15
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c17
-rw-r--r--drivers/platform/x86/dell/dell_rbu.c20
-rw-r--r--drivers/platform/x86/eeepc-laptop.c9
-rw-r--r--drivers/platform/x86/firmware_attributes_class.c42
-rw-r--r--drivers/platform/x86/firmware_attributes_class.h5
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c8
-rw-r--r--drivers/platform/x86/hp/Kconfig1
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/bioscfg.c14
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c11
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c455
-rw-r--r--drivers/platform/x86/hp/hp_accel.c6
-rw-r--r--drivers/platform/x86/hp/tc1100-wmi.c2
-rw-r--r--drivers/platform/x86/huawei-wmi.c2
-rw-r--r--drivers/platform/x86/ideapad-laptop.c54
-rw-r--r--drivers/platform/x86/inspur_platform_profile.c43
-rw-r--r--drivers/platform/x86/intel/Kconfig3
-rw-r--r--drivers/platform/x86/intel/Makefile68
-rw-r--r--drivers/platform/x86/intel/bxtwc_tmu.c24
-rw-r--r--drivers/platform/x86/intel/bytcrc_pwrsrc.c81
-rw-r--r--drivers/platform/x86/intel/chtdc_ti_pwrbtn.c2
-rw-r--r--drivers/platform/x86/intel/chtwc_int33fe.c2
-rw-r--r--drivers/platform/x86/intel/hid.c9
-rw-r--r--drivers/platform/x86/intel/ifs/core.c1
-rw-r--r--drivers/platform/x86/intel/ifs/ifs.h9
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c18
-rw-r--r--drivers/platform/x86/intel/int1092/intel_sar.c2
-rw-r--r--drivers/platform/x86/intel/int3472/common.c2
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c26
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.c3
-rw-r--r--drivers/platform/x86/intel/mrfld_pwrbtn.c2
-rw-r--r--drivers/platform/x86/intel/plr_tpmi.c (renamed from drivers/platform/x86/intel/intel_plr_tpmi.c)6
-rw-r--r--drivers/platform/x86/intel/pmc/adl.c2
-rw-r--r--drivers/platform/x86/intel/pmc/arl.c3
-rw-r--r--drivers/platform/x86/intel/pmc/cnp.c55
-rw-r--r--drivers/platform/x86/intel/pmc/core.c59
-rw-r--r--drivers/platform/x86/intel/pmc/core.h8
-rw-r--r--drivers/platform/x86/intel/pmc/core_ssram.c12
-rw-r--r--drivers/platform/x86/intel/pmc/icl.c2
-rw-r--r--drivers/platform/x86/intel/pmc/lnl.c3
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c5
-rw-r--r--drivers/platform/x86/intel/pmc/tgl.c2
-rw-r--r--drivers/platform/x86/intel/pmt/class.c24
-rw-r--r--drivers/platform/x86/intel/pmt/class.h2
-rw-r--r--drivers/platform/x86/intel/pmt/crashlog.c2
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c18
-rw-r--r--drivers/platform/x86/intel/punit_ipc.c33
-rw-r--r--drivers/platform/x86/intel/sdsi.c36
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c1
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi.c2
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c16
-rw-r--r--drivers/platform/x86/intel/telemetry/pltdrv.c2
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.c9
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c8
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c4
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c2
-rw-r--r--drivers/platform/x86/intel/vbtn.c2
-rw-r--r--drivers/platform/x86/intel/vsec.c12
-rw-r--r--drivers/platform/x86/intel/vsec_tpmi.c (renamed from drivers/platform/x86/intel/tpmi.c)14
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c140
-rw-r--r--drivers/platform/x86/lenovo-wmi-camera.c69
-rw-r--r--drivers/platform/x86/lenovo-ymc.c2
-rw-r--r--drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c7
-rw-r--r--drivers/platform/x86/lenovo-yogabook.c2
-rw-r--r--drivers/platform/x86/mlx-platform.c4
-rw-r--r--drivers/platform/x86/msi-laptop.c6
-rw-r--r--drivers/platform/x86/p2sb.c78
-rw-r--r--drivers/platform/x86/panasonic-laptop.c14
-rw-r--r--drivers/platform/x86/quickstart.c1
-rw-r--r--drivers/platform/x86/samsung-laptop.c2
-rw-r--r--drivers/platform/x86/samsung-q10.c2
-rw-r--r--drivers/platform/x86/sel3350-platform.c2
-rw-r--r--drivers/platform/x86/serdev_helpers.h60
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c14
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c2
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c2
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c2
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt.c2
-rw-r--r--drivers/platform/x86/think-lmi.c162
-rw-r--r--drivers/platform/x86/think-lmi.h6
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c78
-rw-r--r--drivers/platform/x86/toshiba_acpi.c4
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c26
-rw-r--r--drivers/platform/x86/wmi-bmof.c75
-rw-r--r--drivers/platform/x86/wmi.c98
-rw-r--r--drivers/platform/x86/x86-android-tablets/Kconfig4
-rw-r--r--drivers/platform/x86/x86-android-tablets/Makefile2
-rw-r--r--drivers/platform/x86/x86-android-tablets/asus.c4
-rw-r--r--drivers/platform/x86/x86-android-tablets/core.c85
-rw-r--r--drivers/platform/x86/x86-android-tablets/dmi.c10
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c8
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c175
-rw-r--r--drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c261
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h13
-rw-r--r--drivers/platform/x86/xo1-rfkill.c2
-rw-r--r--drivers/pmdomain/arm/scmi_perf_domain.c3
-rw-r--r--drivers/pmdomain/arm/scmi_pm_domain.c8
-rw-r--r--drivers/pmdomain/core.c155
-rw-r--r--drivers/pmdomain/imx/gpc.c8
-rw-r--r--drivers/pmdomain/imx/gpcv2.c14
-rw-r--r--drivers/pmdomain/imx/imx8m-blk-ctrl.c3
-rw-r--r--drivers/pmdomain/imx/imx8mp-blk-ctrl.c5
-rw-r--r--drivers/pmdomain/imx/imx93-blk-ctrl.c6
-rw-r--r--drivers/pmdomain/imx/imx93-pd.c2
-rw-r--r--drivers/pmdomain/mediatek/Kconfig12
-rw-r--r--drivers/pmdomain/mediatek/Makefile8
-rw-r--r--drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c144
-rw-r--r--drivers/pmdomain/mediatek/mt6735-pm-domains.h96
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.c17
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.h2
-rw-r--r--drivers/pmdomain/qcom/cpr.c4
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c87
-rw-r--r--drivers/pmdomain/ti/ti_sci_pm_domains.c106
-rw-r--r--drivers/pmdomain/xilinx/zynqmp-pm-domains.c2
-rw-r--r--drivers/power/reset/Kconfig5
-rw-r--r--drivers/power/reset/as3722-poweroff.c2
-rw-r--r--drivers/power/reset/at91-poweroff.c2
-rw-r--r--drivers/power/reset/at91-reset.c2
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c3
-rw-r--r--drivers/power/reset/gpio-poweroff.c8
-rw-r--r--drivers/power/reset/keystone-reset.c20
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c2
-rw-r--r--drivers/power/reset/qnap-poweroff.c2
-rw-r--r--drivers/power/reset/syscon-reboot.c3
-rw-r--r--drivers/power/sequencing/Kconfig1
-rw-r--r--drivers/power/sequencing/pwrseq-qcom-wcn.c130
-rw-r--r--drivers/power/supply/88pm860x_battery.c8
-rw-r--r--drivers/power/supply/Kconfig19
-rw-r--r--drivers/power/supply/Makefile2
-rw-r--r--drivers/power/supply/ab8500_bmdata.c4
-rw-r--r--drivers/power/supply/ab8500_btemp.c9
-rw-r--r--drivers/power/supply/ab8500_chargalg.c7
-rw-r--r--drivers/power/supply/ab8500_charger.c7
-rw-r--r--drivers/power/supply/ab8500_fg.c35
-rw-r--r--drivers/power/supply/acer_a500_battery.c9
-rw-r--r--drivers/power/supply/act8945a_charger.c2
-rw-r--r--drivers/power/supply/adp5061.c2
-rw-r--r--drivers/power/supply/apm_power.c6
-rw-r--r--drivers/power/supply/axp20x_battery.c33
-rw-r--r--drivers/power/supply/axp20x_usb_power.c33
-rw-r--r--drivers/power/supply/bq2415x_charger.c36
-rw-r--r--drivers/power/supply/bq24190_charger.c41
-rw-r--r--drivers/power/supply/bq24257_charger.c8
-rw-r--r--drivers/power/supply/bq27xxx_battery.c79
-rw-r--r--drivers/power/supply/charger-manager.c10
-rw-r--r--drivers/power/supply/cpcap-battery.c2
-rw-r--r--drivers/power/supply/cpcap-charger.c5
-rw-r--r--drivers/power/supply/cros_charge-control.c218
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c4
-rw-r--r--drivers/power/supply/da9030_battery.c9
-rw-r--r--drivers/power/supply/da9052-battery.c2
-rw-r--r--drivers/power/supply/da9150-charger.c2
-rw-r--r--drivers/power/supply/ds2760_battery.c8
-rw-r--r--drivers/power/supply/ds2780_battery.c24
-rw-r--r--drivers/power/supply/ds2781_battery.c24
-rw-r--r--drivers/power/supply/ds2782_battery.c89
-rw-r--r--drivers/power/supply/generic-adc-battery.c4
-rw-r--r--drivers/power/supply/gpio-charger.c21
-rw-r--r--drivers/power/supply/ip5xxx_power.c572
-rw-r--r--drivers/power/supply/ipaq_micro_battery.c2
-rw-r--r--drivers/power/supply/isp1704_charger.c2
-rw-r--r--drivers/power/supply/lenovo_yoga_c630_battery.c14
-rw-r--r--drivers/power/supply/lp8788-charger.c2
-rw-r--r--drivers/power/supply/ltc4162-l-charger.c440
-rw-r--r--drivers/power/supply/max14577_charger.c2
-rw-r--r--drivers/power/supply/max17042_battery.c203
-rw-r--r--drivers/power/supply/max1720x_battery.c66
-rw-r--r--drivers/power/supply/max77650-charger.c2
-rw-r--r--drivers/power/supply/max77693_charger.c2
-rw-r--r--drivers/power/supply/max77976_charger.c3
-rw-r--r--drivers/power/supply/max8925_power.c4
-rw-r--r--drivers/power/supply/mm8013.c2
-rw-r--r--drivers/power/supply/olpc_battery.c11
-rw-r--r--drivers/power/supply/pcf50633-charger.c2
-rw-r--r--drivers/power/supply/pmu_battery.c1
-rw-r--r--drivers/power/supply/power_supply.h53
-rw-r--r--drivers/power/supply/power_supply_core.c353
-rw-r--r--drivers/power/supply/power_supply_hwmon.c51
-rw-r--r--drivers/power/supply/power_supply_sysfs.c204
-rw-r--r--drivers/power/supply/qcom_battmgr.c2
-rw-r--r--drivers/power/supply/qcom_pmi8998_charger.c2
-rw-r--r--drivers/power/supply/qcom_smbb.c8
-rw-r--r--drivers/power/supply/rk817_charger.c112
-rw-r--r--drivers/power/supply/rt9471.c52
-rw-r--r--drivers/power/supply/samsung-sdi-battery.c10
-rw-r--r--drivers/power/supply/sbs-battery.c5
-rw-r--r--drivers/power/supply/sc2731_charger.c2
-rw-r--r--drivers/power/supply/sc27xx_fuel_gauge.c12
-rw-r--r--drivers/power/supply/stc3117_fuel_gauge.c612
-rw-r--r--drivers/power/supply/surface_battery.c4
-rw-r--r--drivers/power/supply/test_power.c113
-rw-r--r--drivers/power/supply/tps65090-charger.c2
-rw-r--r--drivers/power/supply/tps65217_charger.c2
-rw-r--r--drivers/power/supply/twl4030_charger.c2
-rw-r--r--drivers/power/supply/twl6030_charger.c581
-rw-r--r--drivers/power/supply/ug3105_battery.c4
-rw-r--r--drivers/power/supply/wm831x_power.c2
-rw-r--r--drivers/power/supply/wm8350_power.c2
-rw-r--r--drivers/power/supply/wm97xx_battery.c2
-rw-r--r--drivers/powercap/dtpm_devfreq.c2
-rw-r--r--drivers/powercap/idle_inject.c16
-rw-r--r--drivers/powercap/intel_rapl_common.c1
-rw-r--r--drivers/powercap/intel_rapl_msr.c4
-rw-r--r--drivers/powercap/intel_rapl_tpmi.c21
-rw-r--r--drivers/powercap/powercap_sys.c3
-rw-r--r--drivers/pps/Makefile3
-rw-r--r--drivers/pps/clients/pps-gpio.c12
-rw-r--r--drivers/pps/clients/pps-ktimer.c4
-rw-r--r--drivers/pps/clients/pps-ldisc.c6
-rw-r--r--drivers/pps/clients/pps_parport.c4
-rw-r--r--drivers/pps/generators/Kconfig22
-rw-r--r--drivers/pps/generators/Makefile4
-rw-r--r--drivers/pps/generators/pps_gen-dummy.c96
-rw-r--r--drivers/pps/generators/pps_gen.c344
-rw-r--r--drivers/pps/generators/sysfs.c75
-rw-r--r--drivers/pps/kapi.c10
-rw-r--r--drivers/pps/kc.c10
-rw-r--r--drivers/pps/pps.c127
-rw-r--r--drivers/ps3/ps3-lpm.c2
-rw-r--r--drivers/ps3/ps3-sys-manager.c2
-rw-r--r--drivers/ps3/ps3-vuart.c4
-rw-r--r--drivers/ps3/sys-manager-core.c2
-rw-r--r--drivers/ptp/Kconfig28
-rw-r--r--drivers/ptp/Makefile2
-rw-r--r--drivers/ptp/ptp_chardev.c4
-rw-r--r--drivers/ptp/ptp_clock.c13
-rw-r--r--drivers/ptp/ptp_clockmatrix.c2
-rw-r--r--drivers/ptp/ptp_dte.c2
-rw-r--r--drivers/ptp/ptp_fc3.c7
-rw-r--r--drivers/ptp/ptp_idt82p33.c2
-rw-r--r--drivers/ptp/ptp_ines.c2
-rw-r--r--drivers/ptp/ptp_kvm_x86.c6
-rw-r--r--drivers/ptp/ptp_ocp.c18
-rw-r--r--drivers/ptp/ptp_pch.c6
-rw-r--r--drivers/ptp/ptp_qoriq.c2
-rw-r--r--drivers/ptp/ptp_s390.c129
-rw-r--r--drivers/ptp/ptp_vmclock.c615
-rw-r--r--drivers/pwm/core.c898
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c4
-rw-r--r--drivers/pwm/pwm-axi-pwmgen.c179
-rw-r--r--drivers/pwm/pwm-dwc-core.c2
-rw-r--r--drivers/pwm/pwm-dwc.c14
-rw-r--r--drivers/pwm/pwm-dwc.h2
-rw-r--r--drivers/pwm/pwm-imx-tpm.c4
-rw-r--r--drivers/pwm/pwm-imx27.c161
-rw-r--r--drivers/pwm/pwm-lpss-pci.c11
-rw-r--r--drivers/pwm/pwm-lpss-platform.c2
-rw-r--r--drivers/pwm/pwm-lpss.c2
-rw-r--r--drivers/pwm/pwm-microchip-core.c2
-rw-r--r--drivers/pwm/pwm-stm32-lp.c8
-rw-r--r--drivers/pwm/pwm-stm32.c619
-rw-r--r--drivers/ras/amd/atl/Kconfig1
-rw-r--r--drivers/ras/amd/atl/access.c8
-rw-r--r--drivers/ras/amd/atl/internal.h1
-rw-r--r--drivers/regulator/arizona-ldo1.c12
-rw-r--r--drivers/regulator/axp20x-regulator.c49
-rw-r--r--drivers/regulator/bd9571mwv-regulator.c2
-rw-r--r--drivers/regulator/bd96801-regulator.c130
-rw-r--r--drivers/regulator/core.c275
-rw-r--r--drivers/regulator/db8500-prcmu.c2
-rw-r--r--drivers/regulator/devres.c39
-rw-r--r--drivers/regulator/internal.h18
-rw-r--r--drivers/regulator/isl6271a-regulator.c4
-rw-r--r--drivers/regulator/max5970-regulator.c21
-rw-r--r--drivers/regulator/of_regulator.c70
-rw-r--r--drivers/regulator/pca9450-regulator.c111
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c83
-rw-r--r--drivers/regulator/qcom_smd-regulator.c2
-rw-r--r--drivers/regulator/renesas-usb-vbus-regulator.c7
-rw-r--r--drivers/regulator/rk808-regulator.c43
-rw-r--r--drivers/regulator/rtq2208-regulator.c2
-rw-r--r--drivers/regulator/stm32-vrefbuf.c2
-rw-r--r--drivers/regulator/tps6287x-regulator.c57
-rw-r--r--drivers/regulator/tps65219-regulator.c39
-rw-r--r--drivers/regulator/uniphier-regulator.c2
-rw-r--r--drivers/regulator/userspace-consumer.c2
-rw-r--r--drivers/regulator/virtual.c2
-rw-r--r--drivers/regulator/wm8350-regulator.c6
-rw-r--r--drivers/remoteproc/Kconfig12
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c31
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c2
-rw-r--r--drivers/remoteproc/imx_rproc.c2
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c19
-rw-r--r--drivers/remoteproc/meson_mx_ao_arc.c2
-rw-r--r--drivers/remoteproc/mtk_scp.c14
-rw-r--r--drivers/remoteproc/omap_remoteproc.c24
-rw-r--r--drivers/remoteproc/pru_rproc.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c30
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c56
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c51
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c114
-rw-r--r--drivers/remoteproc/qcom_wcnss.c2
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c5
-rw-r--r--drivers/remoteproc/rcar_rproc.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c20
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c2
-rw-r--r--drivers/remoteproc/st_remoteproc.c56
-rw-r--r--drivers/remoteproc/stm32_rproc.c2
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c10
-rw-r--r--drivers/remoteproc/ti_k3_m4_remoteproc.c6
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c169
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c2
-rw-r--r--drivers/reset/Kconfig20
-rw-r--r--drivers/reset/Makefile3
-rw-r--r--drivers/reset/amlogic/Kconfig27
-rw-r--r--drivers/reset/amlogic/Makefile4
-rw-r--r--drivers/reset/amlogic/reset-meson-audio-arb.c (renamed from drivers/reset/reset-meson-audio-arb.c)2
-rw-r--r--drivers/reset/amlogic/reset-meson-aux.c81
-rw-r--r--drivers/reset/amlogic/reset-meson-common.c142
-rw-r--r--drivers/reset/amlogic/reset-meson.c105
-rw-r--r--drivers/reset/amlogic/reset-meson.h28
-rw-r--r--drivers/reset/core.c119
-rw-r--r--drivers/reset/reset-meson.c159
-rw-r--r--drivers/reset/reset-microchip-sparx5.c38
-rw-r--r--drivers/reset/reset-mpfs.c4
-rw-r--r--drivers/reset/reset-npcm.c82
-rw-r--r--drivers/reset/reset-rzg2l-usbphy-ctrl.c3
-rw-r--r--drivers/reset/reset-ti-sci.c2
-rw-r--r--drivers/reset/reset-uniphier-glue.c24
-rw-r--r--drivers/reset/starfive/reset-starfive-jh71x0.c3
-rw-r--r--drivers/rpmsg/qcom_glink_native.c13
-rw-r--r--drivers/rpmsg/qcom_glink_rpm.c2
-rw-r--r--drivers/rpmsg/qcom_smd.c2
-rw-r--r--drivers/rpmsg/rpmsg_core.c4
-rw-r--r--drivers/rtc/Kconfig35
-rw-r--r--drivers/rtc/Makefile5
-rw-r--r--drivers/rtc/interface.c7
-rw-r--r--drivers/rtc/rtc-88pm80x.c4
-rw-r--r--drivers/rtc/rtc-88pm860x.c4
-rw-r--r--drivers/rtc/rtc-88pm886.c97
-rw-r--r--drivers/rtc/rtc-ab-eoz9.c11
-rw-r--r--drivers/rtc/rtc-ab8500.c2
-rw-r--r--drivers/rtc/rtc-abx80x.c2
-rw-r--r--drivers/rtc/rtc-ac100.c2
-rw-r--r--drivers/rtc/rtc-amlogic-a4.c465
-rw-r--r--drivers/rtc/rtc-armada38x.c2
-rw-r--r--drivers/rtc/rtc-as3722.c2
-rw-r--r--drivers/rtc/rtc-asm9260.c2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c4
-rw-r--r--drivers/rtc/rtc-at91sam9.c4
-rw-r--r--drivers/rtc/rtc-bd70528.c5
-rw-r--r--drivers/rtc/rtc-brcmstb-waketimer.c3
-rw-r--r--drivers/rtc/rtc-cadence.c4
-rw-r--r--drivers/rtc/rtc-cmos.c40
-rw-r--r--drivers/rtc/rtc-cpcap.c2
-rw-r--r--drivers/rtc/rtc-cros-ec.c4
-rw-r--r--drivers/rtc/rtc-da9055.c2
-rw-r--r--drivers/rtc/rtc-ds1685.c2
-rw-r--r--drivers/rtc/rtc-ds3232.c2
-rw-r--r--drivers/rtc/rtc-ftrtc010.c2
-rw-r--r--drivers/rtc/rtc-hid-sensor-time.c4
-rw-r--r--drivers/rtc/rtc-imxdi.c2
-rw-r--r--drivers/rtc/rtc-isl12022.c269
-rw-r--r--drivers/rtc/rtc-isl1208.c2
-rw-r--r--drivers/rtc/rtc-jz4740.c2
-rw-r--r--drivers/rtc/rtc-loongson.c19
-rw-r--r--drivers/rtc/rtc-lp8788.c2
-rw-r--r--drivers/rtc/rtc-lpc24xx.c2
-rw-r--r--drivers/rtc/rtc-lpc32xx.c2
-rw-r--r--drivers/rtc/rtc-m48t59.c26
-rw-r--r--drivers/rtc/rtc-max77686.c4
-rw-r--r--drivers/rtc/rtc-max8925.c2
-rw-r--r--drivers/rtc/rtc-max8997.c2
-rw-r--r--drivers/rtc/rtc-mc13xxx.c2
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c6
-rw-r--r--drivers/rtc/rtc-meson-vrtc.c2
-rw-r--r--drivers/rtc/rtc-mpc5121.c4
-rw-r--r--drivers/rtc/rtc-mpfs.c2
-rw-r--r--drivers/rtc/rtc-mt6397.c31
-rw-r--r--drivers/rtc/rtc-mt7622.c2
-rw-r--r--drivers/rtc/rtc-mv.c6
-rw-r--r--drivers/rtc/rtc-mxc.c2
-rw-r--r--drivers/rtc/rtc-mxc_v2.c4
-rw-r--r--drivers/rtc/rtc-nxp-bbnsm.c20
-rw-r--r--drivers/rtc/rtc-omap.c4
-rw-r--r--drivers/rtc/rtc-palmas.c4
-rw-r--r--drivers/rtc/rtc-pcf2127.c82
-rw-r--r--drivers/rtc/rtc-pcf50633.c2
-rw-r--r--drivers/rtc/rtc-pcf85063.c11
-rw-r--r--drivers/rtc/rtc-pcf8563.c212
-rw-r--r--drivers/rtc/rtc-pic32.c4
-rw-r--r--drivers/rtc/rtc-pm8xxx.c4
-rw-r--r--drivers/rtc/rtc-pxa.c4
-rw-r--r--drivers/rtc/rtc-rc5t583.c4
-rw-r--r--drivers/rtc/rtc-rc5t619.c2
-rw-r--r--drivers/rtc/rtc-renesas-rtca3.c900
-rw-r--r--drivers/rtc/rtc-rk808.c2
-rw-r--r--drivers/rtc/rtc-rtd119x.c2
-rw-r--r--drivers/rtc/rtc-rv3028.c6
-rw-r--r--drivers/rtc/rtc-rzn1.c92
-rw-r--r--drivers/rtc/rtc-s3c.c4
-rw-r--r--drivers/rtc/rtc-s5m.c2
-rw-r--r--drivers/rtc/rtc-sa1100.c4
-rw-r--r--drivers/rtc/rtc-sc27xx.c4
-rw-r--r--drivers/rtc/rtc-sh.c4
-rw-r--r--drivers/rtc/rtc-spear.c6
-rw-r--r--drivers/rtc/rtc-st-lpc.c5
-rw-r--r--drivers/rtc/rtc-stm32.c24
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c2
-rw-r--r--drivers/rtc/rtc-sun6i.c2
-rw-r--r--drivers/rtc/rtc-sunplus.c6
-rw-r--r--drivers/rtc/rtc-tegra.c4
-rw-r--r--drivers/rtc/rtc-test.c2
-rw-r--r--drivers/rtc/rtc-tps6586x.c4
-rw-r--r--drivers/rtc/rtc-tps65910.c2
-rw-r--r--drivers/rtc/rtc-tps6594.c2
-rw-r--r--drivers/rtc/rtc-twl.c4
-rw-r--r--drivers/rtc/rtc-vt8500.c2
-rw-r--r--drivers/rtc/rtc-wm831x.c2
-rw-r--r--drivers/rtc/rtc-wm8350.c4
-rw-r--r--drivers/rtc/rtc-xgene.c6
-rw-r--r--drivers/rtc/rtc-zynqmp.c10
-rw-r--r--drivers/s390/block/dasd.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/s390/block/dasd_diag.c15
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/dasd_genhd.c1
-rw-r--r--drivers/s390/block/dasd_proc.c5
-rw-r--r--drivers/s390/block/dcssblk.c18
-rw-r--r--drivers/s390/block/scm_blk.c1
-rw-r--r--drivers/s390/char/con3215.c1
-rw-r--r--drivers/s390/char/con3270.c4
-rw-r--r--drivers/s390/char/sclp.c15
-rw-r--r--drivers/s390/char/sclp.h36
-rw-r--r--drivers/s390/char/sclp_config.c4
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c8
-rw-r--r--drivers/s390/char/sclp_early.c3
-rw-r--r--drivers/s390/char/sclp_ocf.c4
-rw-r--r--drivers/s390/char/sclp_pci.c19
-rw-r--r--drivers/s390/char/sclp_sd.c4
-rw-r--r--drivers/s390/char/sclp_vt220.c4
-rw-r--r--drivers/s390/char/tape_core.c16
-rw-r--r--drivers/s390/char/uvdevice.c153
-rw-r--r--drivers/s390/char/vmlogrdr.c12
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c2
-rw-r--r--drivers/s390/cio/chp.c57
-rw-r--r--drivers/s390/cio/chp.h1
-rw-r--r--drivers/s390/cio/chsc.c31
-rw-r--r--drivers/s390/cio/chsc.h16
-rw-r--r--drivers/s390/cio/cio.c6
-rw-r--r--drivers/s390/cio/cio.h2
-rw-r--r--drivers/s390/cio/cmf.c15
-rw-r--r--drivers/s390/cio/css.c6
-rw-r--r--drivers/s390/cio/device.c40
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/ioasm.c107
-rw-r--r--drivers/s390/cio/qdio.h9
-rw-r--r--drivers/s390/cio/qdio_main.c28
-rw-r--r--drivers/s390/cio/qdio_setup.c21
-rw-r--r--drivers/s390/cio/scm.c2
-rw-r--r--drivers/s390/crypto/Makefile4
-rw-r--r--drivers/s390/crypto/ap_bus.c5
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/ap_queue.c28
-rw-r--r--drivers/s390/crypto/pkey_base.c14
-rw-r--r--drivers/s390/crypto/pkey_base.h36
-rw-r--r--drivers/s390/crypto/pkey_cca.c5
-rw-r--r--drivers/s390/crypto/pkey_ep11.c1
-rw-r--r--drivers/s390/crypto/pkey_pckmo.c240
-rw-r--r--drivers/s390/crypto/pkey_sysfs.c129
-rw-r--r--drivers/s390/crypto/pkey_uv.c284
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c77
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h1
-rw-r--r--drivers/s390/net/netiucv.c24
-rw-r--r--drivers/s390/scsi/zfcp_fc.c7
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c4
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c15
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c84
-rw-r--r--drivers/s390/scsi/zfcp_unit.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c4
-rw-r--r--drivers/sbus/char/bbc_i2c.c2
-rw-r--r--drivers/sbus/char/display7seg.c2
-rw-r--r--drivers/sbus/char/envctrl.c2
-rw-r--r--drivers/sbus/char/flash.c2
-rw-r--r--drivers/sbus/char/uctrl.c2
-rw-r--r--drivers/scsi/3w-9xxx.c9
-rw-r--r--drivers/scsi/3w-sas.c21
-rw-r--r--drivers/scsi/3w-xxxx.c10
-rw-r--r--drivers/scsi/53c700.c19
-rw-r--r--drivers/scsi/BusLogic.c9
-rw-r--r--drivers/scsi/BusLogic.h3
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/a3000.c6
-rw-r--r--drivers/scsi/a4000t.c6
-rw-r--r--drivers/scsi/aacraid/aacraid.h1
-rw-r--r--drivers/scsi/aacraid/commsup.c121
-rw-r--r--drivers/scsi/aacraid/linit.c8
-rw-r--r--drivers/scsi/advansys.c25
-rw-r--r--drivers/scsi/aha152x.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7770.c15
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c8
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c8
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_scan.l3
-rw-r--r--drivers/scsi/am53c974.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c12
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c12
-rw-r--r--drivers/scsi/atari_scsi.c2
-rw-r--r--drivers/scsi/atp870u.c2
-rw-r--r--drivers/scsi/bfa/bfa.h10
-rw-r--r--drivers/scsi/bfa/bfa_core.c35
-rw-r--r--drivers/scsi/bfa/bfa_defs_fcs.h22
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c482
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.h72
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c9
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h1
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h12
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c142
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c36
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c21
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h2
-rw-r--r--drivers/scsi/bfa/bfa_modules.h1
-rw-r--r--drivers/scsi/bfa/bfa_svc.c72
-rw-r--r--drivers/scsi/bfa/bfa_svc.h5
-rw-r--r--drivers/scsi/bfa/bfad.c23
-rw-r--r--drivers/scsi/bfa/bfad_drv.h1
-rw-r--r--drivers/scsi/bfa/bfad_im.c26
-rw-r--r--drivers/scsi/bfa/bfi.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c14
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c7
-rw-r--r--drivers/scsi/bvme6000_scsi.c2
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c20
-rw-r--r--drivers/scsi/cxlflash/Kconfig6
-rw-r--r--drivers/scsi/cxlflash/main.c4
-rw-r--r--drivers/scsi/cxlflash/superpipe.c2
-rw-r--r--drivers/scsi/dc395x.c14
-rw-r--r--drivers/scsi/dmx3191d.c2
-rw-r--r--drivers/scsi/elx/efct/efct_driver.c2
-rw-r--r--drivers/scsi/esas2r/esas2r.h16
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c32
-rw-r--r--drivers/scsi/esas2r/esas2r_vda.c17
-rw-r--r--drivers/scsi/esp_scsi.c14
-rw-r--r--drivers/scsi/esp_scsi.h2
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--drivers/scsi/fdomain_pci.c2
-rw-r--r--drivers/scsi/fnic/Makefile5
-rw-r--r--drivers/scsi/fnic/fdls_disc.c4997
-rw-r--r--drivers/scsi/fnic/fdls_fc.h253
-rw-r--r--drivers/scsi/fnic/fip.c1005
-rw-r--r--drivers/scsi/fnic/fip.h159
-rw-r--r--drivers/scsi/fnic/fnic.h288
-rw-r--r--drivers/scsi/fnic/fnic_attrs.c12
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c11
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c1742
-rw-r--r--drivers/scsi/fnic/fnic_fdls.h434
-rw-r--r--drivers/scsi/fnic/fnic_fip.h48
-rw-r--r--drivers/scsi/fnic/fnic_io.h14
-rw-r--r--drivers/scsi/fnic/fnic_isr.c28
-rw-r--r--drivers/scsi/fnic/fnic_main.c761
-rw-r--r--drivers/scsi/fnic/fnic_pci_subsys_devid.c131
-rw-r--r--drivers/scsi/fnic/fnic_res.c77
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c1161
-rw-r--r--drivers/scsi/fnic/fnic_stats.h49
-rw-r--r--drivers/scsi/fnic/fnic_trace.c97
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h7
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c44
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c24
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c24
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c203
-rw-r--r--drivers/scsi/hpsa.c20
-rw-r--r--drivers/scsi/hptiop.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c20
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c8
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c48
-rw-r--r--drivers/scsi/ips.c6
-rw-r--r--drivers/scsi/ips.h3
-rw-r--r--drivers/scsi/isci/remote_device.c29
-rw-r--r--drivers/scsi/isci/remote_device.h17
-rw-r--r--drivers/scsi/iscsi_tcp.c6
-rw-r--r--drivers/scsi/jazz_esp.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c6
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c216
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c45
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h71
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c508
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c324
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h85
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c90
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c386
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c64
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c70
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c129
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c26
-rw-r--r--drivers/scsi/mac_esp.c2
-rw-r--r--drivers/scsi/mac_scsi.c2
-rw-r--r--drivers/scsi/megaraid.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c24
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h18
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c44
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c121
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c24
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c42
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c12
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c27
-rw-r--r--drivers/scsi/mvme16x_scsi.c2
-rw-r--r--drivers/scsi/mvsas/mv_init.c2
-rw-r--r--drivers/scsi/mvumi.c5
-rw-r--r--drivers/scsi/myrb.c23
-rw-r--r--drivers/scsi/myrs.c13
-rw-r--r--drivers/scsi/ncr53c8xx.c9
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h9
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c5
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c14
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c95
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h5
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c62
-rw-r--r--drivers/scsi/pmcraid.c24
-rw-r--r--drivers/scsi/ps3rom.c5
-rw-r--r--drivers/scsi/qedf/qedf_attr.c10
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h2
-rw-r--r--drivers/scsi/qedf/qedf_main.c11
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h2
-rw-r--r--drivers/scsi/qedi/qedi_main.c9
-rw-r--r--drivers/scsi/qla1280.c10
-rw-r--r--drivers/scsi/qla1280.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c81
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c124
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c124
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c28
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c33
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c12
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c11
-rw-r--r--drivers/scsi/qlogicpti.c7
-rw-r--r--drivers/scsi/scsi_debug.c44
-rw-r--r--drivers/scsi/scsi_error.c26
-rw-r--r--drivers/scsi/scsi_ioctl.c35
-rw-r--r--drivers/scsi/scsi_lib.c43
-rw-r--r--drivers/scsi/scsi_lib_test.c7
-rw-r--r--drivers/scsi/scsi_scan.c49
-rw-r--r--drivers/scsi/scsi_sysctl.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c22
-rw-r--r--drivers/scsi/scsi_transport_fc.c4
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c41
-rw-r--r--drivers/scsi/scsi_transport_sas.c10
-rw-r--r--drivers/scsi/scsi_transport_spi.c3
-rw-r--r--drivers/scsi/sd.c28
-rw-r--r--drivers/scsi/sd_zbc.c5
-rw-r--r--drivers/scsi/sg.c13
-rw-r--r--drivers/scsi/sgiwd93.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c20
-rw-r--r--drivers/scsi/sni_53c710.c2
-rw-r--r--drivers/scsi/snic/snic_main.c14
-rw-r--r--drivers/scsi/sr.c5
-rw-r--r--drivers/scsi/st.c42
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/scsi/stex.c6
-rw-r--r--drivers/scsi/storvsc_drv.c36
-rw-r--r--drivers/scsi/sun3_scsi.c10
-rw-r--r--drivers/scsi/sun3x_esp.c2
-rw-r--r--drivers/scsi/sun_esp.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c17
-rw-r--r--drivers/scsi/virtio_scsi.c5
-rw-r--r--drivers/scsi/wd33c93.c2
-rw-r--r--drivers/scsi/xen-scsifront.c11
-rw-r--r--drivers/sh/intc/core.c2
-rw-r--r--drivers/sh/intc/virq-debugfs.c1
-rw-r--r--drivers/slimbus/core.c17
-rw-r--r--drivers/slimbus/messaging.c2
-rw-r--r--drivers/slimbus/qcom-ctrl.c2
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c4
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c2
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c2
-rw-r--r--drivers/soc/aspeed/aspeed-p2a-ctrl.c2
-rw-r--r--drivers/soc/aspeed/aspeed-uart-routing.c2
-rw-r--r--drivers/soc/atmel/soc.c2
-rw-r--r--drivers/soc/fsl/dpaa2-console.c2
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c2
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c6
-rw-r--r--drivers/soc/fsl/qe/qmc.c17
-rw-r--r--drivers/soc/fsl/qe/tsa.c30
-rw-r--r--drivers/soc/fsl/rcpm.c1
-rw-r--r--drivers/soc/fujitsu/a64fx-diag.c2
-rw-r--r--drivers/soc/hisilicon/Kconfig7
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c516
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.h33
-rw-r--r--drivers/soc/imx/Makefile2
-rw-r--r--drivers/soc/imx/soc-imx8m.c174
-rw-r--r--drivers/soc/imx/soc-imx9.c128
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-npe.c2
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-qmgr.c2
-rw-r--r--drivers/soc/litex/litex_soc_ctrl.c23
-rw-r--r--drivers/soc/loongson/loongson2_guts.c2
-rw-r--r--drivers/soc/mediatek/Kconfig11
-rw-r--r--drivers/soc/mediatek/Makefile1
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c244
-rw-r--r--drivers/soc/mediatek/mtk-devapc.c21
-rw-r--r--drivers/soc/mediatek/mtk-dvfsrc.c545
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c2
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c4
-rw-r--r--drivers/soc/mediatek/mtk-regulator-coupler.c1
-rw-r--r--drivers/soc/mediatek/mtk-socinfo.c2
-rw-r--r--drivers/soc/mediatek/mtk-svs.c4
-rw-r--r--drivers/soc/microchip/mpfs-sys-controller.c2
-rw-r--r--drivers/soc/pxa/ssp.c2
-rw-r--r--drivers/soc/qcom/Kconfig2
-rw-r--r--drivers/soc/qcom/icc-bwmon.c2
-rw-r--r--drivers/soc/qcom/ice.c6
-rw-r--r--drivers/soc/qcom/llcc-qcom.c3326
-rw-r--r--drivers/soc/qcom/ocmem.c2
-rw-r--r--drivers/soc/qcom/pmic_glink.c67
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c11
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c3
-rw-r--r--drivers/soc/qcom/qcom-pbs.c22
-rw-r--r--drivers/soc/qcom/qcom_aoss.c2
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c2
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c3
-rw-r--r--drivers/soc/qcom/qcom_stats.c2
-rw-r--r--drivers/soc/qcom/qmi_interface.c2
-rw-r--r--drivers/soc/qcom/ramp_controller.c4
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c4
-rw-r--r--drivers/soc/qcom/rpm-proc.c2
-rw-r--r--drivers/soc/qcom/rpm_master_stats.c2
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c9
-rw-r--r--drivers/soc/qcom/smem.c18
-rw-r--r--drivers/soc/qcom/smem_state.c15
-rw-r--r--drivers/soc/qcom/smp2p.c13
-rw-r--r--drivers/soc/qcom/smsm.c6
-rw-r--r--drivers/soc/qcom/socinfo.c18
-rw-r--r--drivers/soc/renesas/Kconfig6
-rw-r--r--drivers/soc/rockchip/io-domain.c8
-rw-r--r--drivers/soc/samsung/exynos-chipid.c7
-rw-r--r--drivers/soc/samsung/exynos-pmu.c2
-rw-r--r--drivers/soc/tegra/cbb/tegra-cbb.c20
-rw-r--r--drivers/soc/tegra/cbb/tegra194-cbb.c2
-rw-r--r--drivers/soc/tegra/cbb/tegra234-cbb.c2
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c17
-rw-r--r--drivers/soc/ti/k3-ringacc.c2
-rw-r--r--drivers/soc/ti/knav_dma.c4
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c8
-rw-r--r--drivers/soc/ti/pm33xx.c2
-rw-r--r--drivers/soc/ti/pruss.c4
-rw-r--r--drivers/soc/ti/smartreflex.c6
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c2
-rw-r--r--drivers/soc/xilinx/xlnx_event_manager.c6
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c2
-rw-r--r--drivers/soundwire/Kconfig1
-rw-r--r--drivers/soundwire/amd_init.c19
-rw-r--r--drivers/soundwire/amd_manager.c106
-rw-r--r--drivers/soundwire/amd_manager.h16
-rw-r--r--drivers/soundwire/bus.c67
-rw-r--r--drivers/soundwire/bus.h3
-rw-r--r--drivers/soundwire/bus_type.c3
-rw-r--r--drivers/soundwire/cadence_master.c30
-rw-r--r--drivers/soundwire/cadence_master.h1
-rw-r--r--drivers/soundwire/generic_bandwidth_allocation.c316
-rw-r--r--drivers/soundwire/intel.c2
-rw-r--r--drivers/soundwire/intel_ace2x.c30
-rw-r--r--drivers/soundwire/intel_auxdevice.c12
-rw-r--r--drivers/soundwire/intel_bus_common.c6
-rw-r--r--drivers/soundwire/intel_init.c23
-rw-r--r--drivers/soundwire/irq.c12
-rw-r--r--drivers/soundwire/irq.h5
-rw-r--r--drivers/soundwire/mipi_disco.c184
-rw-r--r--drivers/soundwire/qcom.c6
-rw-r--r--drivers/soundwire/slave.c14
-rw-r--r--drivers/soundwire/stream.c71
-rw-r--r--drivers/soundwire/sysfs_slave.c2
-rw-r--r--drivers/spi/Kconfig34
-rw-r--r--drivers/spi/Makefile3
-rw-r--r--drivers/spi/atmel-quadspi.c1027
-rw-r--r--drivers/spi/spi-airoha-snfi.c154
-rw-r--r--drivers/spi/spi-amd.c351
-rw-r--r--drivers/spi/spi-amlogic-spifc-a1.c7
-rw-r--r--drivers/spi/spi-apple.c530
-rw-r--r--drivers/spi/spi-ar934x.c2
-rw-r--r--drivers/spi/spi-aspeed-smc.c12
-rw-r--r--drivers/spi/spi-at91-usart.c2
-rw-r--r--drivers/spi/spi-ath79.c2
-rw-r--r--drivers/spi/spi-atmel.c2
-rw-r--r--drivers/spi/spi-au1550.c2
-rw-r--r--drivers/spi/spi-axi-spi-engine.c15
-rw-r--r--drivers/spi/spi-bcm2835.c2
-rw-r--r--drivers/spi/spi-bcm2835aux.c2
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c2
-rw-r--r--drivers/spi/spi-bcm63xx.c2
-rw-r--r--drivers/spi/spi-bcmbca-hsspi.c2
-rw-r--r--drivers/spi/spi-brcmstb-qspi.c2
-rw-r--r--drivers/spi/spi-cadence-quadspi.c67
-rw-r--r--drivers/spi/spi-cadence.c2
-rw-r--r--drivers/spi/spi-cavium-octeon.c2
-rw-r--r--drivers/spi/spi-ch341.c2
-rw-r--r--drivers/spi/spi-coldfire-qspi.c2
-rw-r--r--drivers/spi/spi-cs42l43.c48
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/spi/spi-dln2.c2
-rw-r--r--drivers/spi/spi-dw-bt1.c4
-rw-r--r--drivers/spi/spi-dw-core.c24
-rw-r--r--drivers/spi/spi-dw-dma.c4
-rw-r--r--drivers/spi/spi-dw-mmio.c4
-rw-r--r--drivers/spi/spi-dw-pci.c11
-rw-r--r--drivers/spi/spi-ep93xx.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c16
-rw-r--r--drivers/spi/spi-fsl-espi.c2
-rw-r--r--drivers/spi/spi-fsl-lpspi.c29
-rw-r--r--drivers/spi/spi-fsl-qspi.c14
-rw-r--r--drivers/spi/spi-fsl-spi.c6
-rw-r--r--drivers/spi/spi-geni-qcom.c8
-rw-r--r--drivers/spi/spi-hisi-kunpeng.c2
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c2
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-imx.c115
-rw-r--r--drivers/spi/spi-intel-pci.c3
-rw-r--r--drivers/spi/spi-intel-platform.c1
-rw-r--r--drivers/spi/spi-intel.c64
-rw-r--r--drivers/spi/spi-intel.h2
-rw-r--r--drivers/spi/spi-iproc-qspi.c2
-rw-r--r--drivers/spi/spi-kspi2.c431
-rw-r--r--drivers/spi/spi-lantiq-ssc.c4
-rw-r--r--drivers/spi/spi-ljca.c2
-rw-r--r--drivers/spi/spi-loongson-core.c4
-rw-r--r--drivers/spi/spi-loongson-pci.c7
-rw-r--r--drivers/spi/spi-loongson-plat.c2
-rw-r--r--drivers/spi/spi-mem.c67
-rw-r--r--drivers/spi/spi-meson-spicc.c2
-rw-r--r--drivers/spi/spi-meson-spifc.c2
-rw-r--r--drivers/spi/spi-microchip-core-qspi.c28
-rw-r--r--drivers/spi/spi-microchip-core.c2
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c4
-rw-r--r--drivers/spi/spi-mpc52xx.c3
-rw-r--r--drivers/spi/spi-mt65xx.c9
-rw-r--r--drivers/spi/spi-mtk-nor.c2
-rw-r--r--drivers/spi/spi-mtk-snfi.c6
-rw-r--r--drivers/spi/spi-mxic.c32
-rw-r--r--drivers/spi/spi-mxs.c4
-rw-r--r--drivers/spi/spi-npcm-fiu.c8
-rw-r--r--drivers/spi/spi-npcm-pspi.c2
-rw-r--r--drivers/spi/spi-nxp-fspi.c14
-rw-r--r--drivers/spi/spi-oc-tiny.c2
-rw-r--r--drivers/spi/spi-omap-uwire.c2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c7
-rw-r--r--drivers/spi/spi-orion.c2
-rw-r--r--drivers/spi/spi-pic32-sqi.c4
-rw-r--r--drivers/spi/spi-pic32.c2
-rw-r--r--drivers/spi/spi-pl022.c2
-rw-r--r--drivers/spi/spi-ppc4xx.c2
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c10
-rw-r--r--drivers/spi/spi-pxa2xx-platform.c4
-rw-r--r--drivers/spi/spi-pxa2xx.c92
-rw-r--r--drivers/spi/spi-qcom-qspi.c4
-rw-r--r--drivers/spi/spi-qup.c2
-rw-r--r--drivers/spi/spi-rb4xx.c2
-rw-r--r--drivers/spi/spi-realtek-rtl-snand.c419
-rw-r--r--drivers/spi/spi-rockchip-sfc.c260
-rw-r--r--drivers/spi/spi-rockchip.c73
-rw-r--r--drivers/spi/spi-rpc-if.c2
-rw-r--r--drivers/spi/spi-rspi.c2
-rw-r--r--drivers/spi/spi-rzv2m-csi.c2
-rw-r--r--drivers/spi/spi-s3c64xx.c4
-rw-r--r--drivers/spi/spi-sc18is602.c34
-rw-r--r--drivers/spi/spi-sh-hspi.c2
-rw-r--r--drivers/spi/spi-sh-msiof.c2
-rw-r--r--drivers/spi/spi-sh-sci.c2
-rw-r--r--drivers/spi/spi-sh.c2
-rw-r--r--drivers/spi/spi-sifive.c2
-rw-r--r--drivers/spi/spi-slave-mt27xx.c10
-rw-r--r--drivers/spi/spi-sn-f-ospi.c10
-rw-r--r--drivers/spi/spi-sprd.c4
-rw-r--r--drivers/spi/spi-st-ssc4.c2
-rw-r--r--drivers/spi/spi-stm32-qspi.c2
-rw-r--r--drivers/spi/spi-stm32.c3
-rw-r--r--drivers/spi/spi-sun4i.c2
-rw-r--r--drivers/spi/spi-sun6i.c2
-rw-r--r--drivers/spi/spi-sunplus-sp7021.c2
-rw-r--r--drivers/spi/spi-synquacer.c2
-rw-r--r--drivers/spi/spi-tegra114.c2
-rw-r--r--drivers/spi/spi-tegra20-sflash.c2
-rw-r--r--drivers/spi/spi-tegra20-slink.c4
-rw-r--r--drivers/spi/spi-tegra210-quad.c4
-rw-r--r--drivers/spi/spi-ti-qspi.c24
-rw-r--r--drivers/spi/spi-topcliff-pch.c2
-rw-r--r--drivers/spi/spi-uniphier.c2
-rw-r--r--drivers/spi/spi-xilinx.c2
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c2
-rw-r--r--drivers/spi/spi-zynq-qspi.c28
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c17
-rw-r--r--drivers/spi/spi.c59
-rw-r--r--drivers/spi/spidev.c30
-rw-r--r--drivers/spmi/hisi-spmi-controller.c3
-rw-r--r--drivers/spmi/spmi-mtk-pmif.c2
-rw-r--r--drivers/spmi/spmi-pmic-arb.c5
-rw-r--r--drivers/spmi/spmi.c2
-rw-r--r--drivers/staging/Kconfig18
-rw-r--r--drivers/staging/Makefile9
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c2
-rw-r--r--drivers/staging/fbtft/Kconfig1
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c3
-rw-r--r--drivers/staging/fbtft/fbtft.h2
-rw-r--r--drivers/staging/fieldbus/Documentation/ABI/fieldbus-dev-cdev31
-rw-r--r--drivers/staging/fieldbus/Documentation/ABI/sysfs-class-fieldbus-dev62
-rw-r--r--drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt71
-rw-r--r--drivers/staging/fieldbus/Documentation/fieldbus_dev.txt66
-rw-r--r--drivers/staging/fieldbus/Kconfig19
-rw-r--r--drivers/staging/fieldbus/Makefile7
-rw-r--r--drivers/staging/fieldbus/TODO5
-rw-r--r--drivers/staging/fieldbus/anybuss/Kconfig41
-rw-r--r--drivers/staging/fieldbus/anybuss/Makefile10
-rw-r--r--drivers/staging/fieldbus/anybuss/anybuss-client.h95
-rw-r--r--drivers/staging/fieldbus/anybuss/anybuss-controller.h47
-rw-r--r--drivers/staging/fieldbus/anybuss/arcx-anybus.c379
-rw-r--r--drivers/staging/fieldbus/anybuss/hms-profinet.c224
-rw-r--r--drivers/staging/fieldbus/anybuss/host.c1452
-rw-r--r--drivers/staging/fieldbus/dev_core.c344
-rw-r--r--drivers/staging/fieldbus/fieldbus_dev.h114
-rw-r--r--drivers/staging/gdm724x/Kconfig16
-rw-r--r--drivers/staging/gdm724x/Makefile8
-rw-r--r--drivers/staging/gdm724x/TODO16
-rw-r--r--drivers/staging/gdm724x/gdm_endian.c37
-rw-r--r--drivers/staging/gdm724x/gdm_endian.h30
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c937
-rw-r--r--drivers/staging/gdm724x/gdm_lte.h71
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c668
-rw-r--r--drivers/staging/gdm724x/gdm_mux.h85
-rw-r--r--drivers/staging/gdm724x/gdm_tty.c316
-rw-r--r--drivers/staging/gdm724x/gdm_tty.h60
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c1012
-rw-r--r--drivers/staging/gdm724x/gdm_usb.h99
-rw-r--r--drivers/staging/gdm724x/hci.h45
-rw-r--r--drivers/staging/gdm724x/hci_packet.h82
-rw-r--r--drivers/staging/gdm724x/netlink_k.c128
-rw-r--r--drivers/staging/gdm724x/netlink_k.h16
-rw-r--r--drivers/staging/gpib/Kconfig260
-rw-r--r--drivers/staging/gpib/Makefile20
-rw-r--r--drivers/staging/gpib/TODO21
-rw-r--r--drivers/staging/gpib/agilent_82350b/Makefile2
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.c946
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.h207
-rw-r--r--drivers/staging/gpib/agilent_82357a/Makefile4
-rw-r--r--drivers/staging/gpib/agilent_82357a/agilent_82357a.c1711
-rw-r--r--drivers/staging/gpib/agilent_82357a/agilent_82357a.h182
-rw-r--r--drivers/staging/gpib/cb7210/Makefile4
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.c1620
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.h246
-rw-r--r--drivers/staging/gpib/cec/Makefile3
-rw-r--r--drivers/staging/gpib/cec/cec.h49
-rw-r--r--drivers/staging/gpib/cec/cec_gpib.c389
-rw-r--r--drivers/staging/gpib/common/Makefile6
-rw-r--r--drivers/staging/gpib/common/gpib_os.c2280
-rw-r--r--drivers/staging/gpib/common/iblib.c740
-rw-r--r--drivers/staging/gpib/common/ibsys.h31
-rw-r--r--drivers/staging/gpib/eastwood/Makefile3
-rw-r--r--drivers/staging/gpib/eastwood/fluke_gpib.c1201
-rw-r--r--drivers/staging/gpib/eastwood/fluke_gpib.h143
-rw-r--r--drivers/staging/gpib/fmh_gpib/Makefile2
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.c1757
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.h177
-rw-r--r--drivers/staging/gpib/gpio/Makefile4
-rw-r--r--drivers/staging/gpib/gpio/gpib_bitbang.c1481
-rw-r--r--drivers/staging/gpib/hp_82335/Makefile4
-rw-r--r--drivers/staging/gpib/hp_82335/hp82335.c367
-rw-r--r--drivers/staging/gpib/hp_82335/hp82335.h82
-rw-r--r--drivers/staging/gpib/hp_82341/Makefile2
-rw-r--r--drivers/staging/gpib/hp_82341/hp_82341.c908
-rw-r--r--drivers/staging/gpib/hp_82341/hp_82341.h205
-rw-r--r--drivers/staging/gpib/include/amcc5920.h49
-rw-r--r--drivers/staging/gpib/include/amccs5933.h59
-rw-r--r--drivers/staging/gpib/include/gpibP.h40
-rw-r--r--drivers/staging/gpib/include/gpib_pci_ids.h23
-rw-r--r--drivers/staging/gpib/include/gpib_proto.h56
-rw-r--r--drivers/staging/gpib/include/gpib_state_machines.h23
-rw-r--r--drivers/staging/gpib/include/gpib_types.h354
-rw-r--r--drivers/staging/gpib/include/nec7210.h141
-rw-r--r--drivers/staging/gpib/include/nec7210_registers.h217
-rw-r--r--drivers/staging/gpib/include/plx9050.h72
-rw-r--r--drivers/staging/gpib/include/quancom_pci.h22
-rw-r--r--drivers/staging/gpib/include/tms9914.h277
-rw-r--r--drivers/staging/gpib/include/tnt4882_registers.h192
-rw-r--r--drivers/staging/gpib/ines/Makefile4
-rw-r--r--drivers/staging/gpib/ines/ines.h208
-rw-r--r--drivers/staging/gpib/ines/ines_gpib.c1516
-rw-r--r--drivers/staging/gpib/lpvo_usb_gpib/Makefile3
-rw-r--r--drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c2138
-rw-r--r--drivers/staging/gpib/nec7210/Makefile4
-rw-r--r--drivers/staging/gpib/nec7210/board.h19
-rw-r--r--drivers/staging/gpib/nec7210/nec7210.c1134
-rw-r--r--drivers/staging/gpib/ni_usb/Makefile4
-rw-r--r--drivers/staging/gpib/ni_usb/ni_usb_gpib.c2653
-rw-r--r--drivers/staging/gpib/ni_usb/ni_usb_gpib.h216
-rw-r--r--drivers/staging/gpib/pc2/Makefile5
-rw-r--r--drivers/staging/gpib/pc2/pc2_gpib.c686
-rw-r--r--drivers/staging/gpib/tms9914/Makefile6
-rw-r--r--drivers/staging/gpib/tms9914/tms9914.c910
-rw-r--r--drivers/staging/gpib/tnt4882/Makefile7
-rw-r--r--drivers/staging/gpib/tnt4882/mite.c150
-rw-r--r--drivers/staging/gpib/tnt4882/mite.h234
-rw-r--r--drivers/staging/gpib/tnt4882/tnt4882_gpib.c1947
-rw-r--r--drivers/staging/gpib/uapi/gpib_ioctl.h169
-rw-r--r--drivers/staging/gpib/uapi/gpib_user.h363
-rw-r--r--drivers/staging/greybus/arche-apb-ctrl.c2
-rw-r--r--drivers/staging/greybus/arche-platform.c2
-rw-r--r--drivers/staging/greybus/camera.c17
-rw-r--r--drivers/staging/greybus/gpio.c33
-rw-r--r--drivers/staging/greybus/uart.c2
-rw-r--r--drivers/staging/iio/TODO5
-rw-r--r--drivers/staging/iio/accel/adis16203.c2
-rw-r--r--drivers/staging/iio/accel/adis16240.c2
-rw-r--r--drivers/staging/iio/frequency/ad9832.c9
-rw-r--r--drivers/staging/iio/frequency/ad9834.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c6
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig10
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile2
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c10
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c10
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c211
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c24
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c10
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h13
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.h11
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.h11
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm.h11
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_bo.h15
-rw-r--r--drivers/staging/media/atomisp/include/hmm/hmm_common.h11
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h11
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h9
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h11
-rw-r--r--drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h28
-rw-r--r--drivers/staging/media/atomisp/include/mmu/isp_mmu.h11
-rw-r--r--drivers/staging/media/atomisp/include/mmu/sh_mmu_mrfld.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp-regs.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_common.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_ioctl32.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.c11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_dfs_tables.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.c11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.c13
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_fops.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c24
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_tables.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_trace_event.h11
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c15
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.h11
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf.h9
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_comm.h9
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/interface/ia_css_circbuf_desc.h9
-rw-r--r--drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c9
-rw-r--r--drivers/staging/media/atomisp/pci/base/refcount/interface/ia_css_refcount.h9
-rw-r--r--drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c9
-rw-r--r--drivers/staging/media/atomisp/pci/bits.h9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_binarydesc.h9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_stagedesc.h9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/interface/ia_css_pipe_util.h9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_binarydesc.c9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_stagedesc.c9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_util.c9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/util/interface/ia_css_util.h9
-rw-r--r--drivers/staging/media/atomisp/pci/camera/util/src/util.c9
-rw-r--r--drivers/staging/media/atomisp/pci/cell_params.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/csi_rx_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx.c9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl.c9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma.c9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq.c9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio.c9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/host/pixelgen_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/PixelGen_SysBlock_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/ibuf_cntrl_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_common_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/mipi_backend_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/rx_csi_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/hrt/stream2mmio_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/ibuf_ctrl_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_dma_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/isys_stream2mmio_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_2401_system/pixelgen_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_receiver_2400_common_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_receiver_2400_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/css_trace.h9
-rw-r--r--drivers/staging/media/atomisp/pci/dma_v2_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/gdc_v2_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/gp_timer_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/gpio_block_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/debug_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/dma_global.h10
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/event_fifo_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/fifo_monitor_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gdc_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_device_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gp_timer_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/gpio_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/hmem_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gpio_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c11
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vamem_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/input_formatter_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/irq_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/isp_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/mmu_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/sp_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/timed_ctrl_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/vamem_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_common/vmem_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/assert_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/bitop_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/csi_rx.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/debug.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/device_access/device_access.h8
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/dma.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/event_fifo.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/fifo_monitor.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gdc_device.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_device.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/gp_timer.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/hmem.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/debug_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/dma_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/event_fifo_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/fifo_monitor_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gdc_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_device_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/gp_timer_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/hmem_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/input_formatter_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/irq_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isp_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_stream2mmio_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/mmu_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/sp_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/tag_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/timed_ctrl_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vamem_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/host/vmem_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/input_formatter.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/input_system.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/irq.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isp.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_stream2mmio.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/math_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/misc_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/mmu_device.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/pixelgen.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/platform_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/print_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/queue.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/resource.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/sp.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/tag.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/timed_ctrl.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/type_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/vamem.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_include/vmem.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/queue_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag.c9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/queue_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/sw_event_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_shared/tag_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_isp_css_streaming_to_mipi_types_hrt.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hive_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm.c11
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_bo.c15
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_3a.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_acc_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_buffer.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_control.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_device_access.c9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_device_access.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_dvs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_env.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_err.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_event_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_firmware.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frac.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frame_format.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_frame_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_host_data.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_input_port.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_irq.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_configs.c9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_configs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_params.c9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_params.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_states.c9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_isp_states.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_metadata.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mipi.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mmu.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_mmu_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_morph.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_pipe.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_pipe_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_prbs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_properties.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_shading.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream_format.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_stream_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_timer.h8
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_version.h9
-rw-r--r--drivers/staging/media/atomisp/pci/ia_css_version_data.h9
-rw-r--r--drivers/staging/media/atomisp/pci/if_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/input_formatter_subsystem_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/input_selector_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/input_switch_2400_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_ctrl_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/input_system_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/irq_controller_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/irq_types_hrt.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_state.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/common/ia_css_sdis_common_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/uds/uds_1.0/ia_css_uds_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/input_buf.isp.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/isp_const.h8
-rw-r--r--drivers/staging/media/atomisp/pci/isp/modes/interface/isp_types.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_public.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_global.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_input_system_private.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp_acquisition_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/isp_capture_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/mamoiada_params.h9
-rw-r--r--drivers/staging/media/atomisp/pci/mmu/isp_mmu.c11
-rw-r--r--drivers/staging/media/atomisp/pci/mmu/sh_mmu_mrfld.c11
-rw-r--r--drivers/staging/media/atomisp/pci/mmu_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/interface/ia_css_binary.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c11
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/interface/ia_css_bufq_comm.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_internal.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug_pipe.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/event/interface/ia_css_event.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/event/src/event.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/eventq/interface/ia_css_eventq.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/eventq/src/eventq.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/interface/ia_css_frame_comm.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c11
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/ifmtr/interface/ia_css_ifmtr.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/inputfifo/interface/ia_css_inputfifo.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/inputfifo/src/inputfifo.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/interface/ia_css_isp_param_types.h8
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline_common.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/interface/ia_css_queue_comm.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/interface/ia_css_rmgr_vbuf.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/interface/ia_css_spctrl_comm.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/tagger/interface/ia_css_tagger_common.h9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/timer/src/timer.c9
-rw-r--r--drivers/staging/media/atomisp/pci/scalar_processor_2400_params.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c12
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_firmware.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_firmware.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_frac.h15
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_host_data.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_hrt.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_hrt.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_internal.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_legacy.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_metrics.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_metrics.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.c11
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mmu.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_dvs.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_dvs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_shading.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_shading.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.c11
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_params_internal.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_properties.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_stream_format.c9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_stream_format.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_struct.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_uds.h9
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_version.c9
-rw-r--r--drivers/staging/media/atomisp/pci/str2mem_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/streaming_to_mipi_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/system_local.c9
-rw-r--r--drivers/staging/media/atomisp/pci/system_local.h9
-rw-r--r--drivers/staging/media/atomisp/pci/timed_controller_defs.h9
-rw-r--r--drivers/staging/media/atomisp/pci/version.h9
-rw-r--r--drivers/staging/media/av7110/av7110.h4
-rw-r--r--drivers/staging/media/av7110/av7110_ca.c25
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-isc-base.c2
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c2
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c2
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c2
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c2
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c2
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c2
-rw-r--r--drivers/staging/media/imx/imx-media-of.c8
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c2
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-params.c6
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c2
-rw-r--r--drivers/staging/media/max96712/max96712.c60
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c4
-rw-r--r--drivers/staging/media/omap4iss/Kconfig12
-rw-r--r--drivers/staging/media/omap4iss/Makefile9
-rw-r--r--drivers/staging/media/omap4iss/TODO3
-rw-r--r--drivers/staging/media/omap4iss/iss.c1354
-rw-r--r--drivers/staging/media/omap4iss/iss.h247
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c1379
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.h155
-rw-r--r--drivers/staging/media/omap4iss/iss_csiphy.c277
-rw-r--r--drivers/staging/media/omap4iss/iss_csiphy.h47
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.c579
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.h63
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.c844
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.h89
-rw-r--r--drivers/staging/media/omap4iss/iss_regs.h899
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.c884
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.h72
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c1274
-rw-r--r--drivers/staging/media/omap4iss/iss_video.h203
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.c4
-rw-r--r--drivers/staging/media/starfive/camss/stf-camss.c2
-rw-r--r--drivers/staging/media/starfive/camss/stf-video.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c2
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c2
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c2
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c2
-rw-r--r--drivers/staging/media/tegra-video/csi.c2
-rw-r--r--drivers/staging/media/tegra-video/vi.c4
-rw-r--r--drivers/staging/media/tegra-video/vip.c2
-rw-r--r--drivers/staging/most/TODO7
-rw-r--r--drivers/staging/most/dim2/dim2.c2
-rw-r--r--drivers/staging/most/i2c/i2c.c4
-rw-r--r--drivers/staging/nvec/nvec.c2
-rw-r--r--drivers/staging/nvec/nvec_kbd.c2
-rw-r--r--drivers/staging/nvec/nvec_power.c2
-rw-r--r--drivers/staging/nvec/nvec_ps2.c2
-rw-r--r--drivers/staging/octeon/ethernet-tx.c6
-rw-r--r--drivers/staging/octeon/ethernet.c2
-rw-r--r--drivers/staging/olpc_dcon/Kconfig17
-rw-r--r--drivers/staging/olpc_dcon/Makefile5
-rw-r--r--drivers/staging/olpc_dcon/TODO15
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c807
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h112
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c201
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c204
-rw-r--r--drivers/staging/rtl8192e/Kconfig61
-rw-r--r--drivers/staging/rtl8192e/Makefile19
-rw-r--r--drivers/staging/rtl8192e/TODO18
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Kconfig10
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Makefile19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_def.h266
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c198
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h17
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c79
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h12
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c1915
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h34
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c189
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h52
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h244
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c1110
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h55
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h773
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c123
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.h25
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c2016
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h402
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c1856
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.h155
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c84
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h12
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c37
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.c79
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.h20
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c89
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.h16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c230
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.h31
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c867
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.h13
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/table.c543
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/table.h27
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BA.h60
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c544
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HT.h223
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c699
-rw-r--r--drivers/staging/rtl8192e/rtl819x_Qos.h43
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TS.h50
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c450
-rw-r--r--drivers/staging/rtl8192e/rtllib.h1799
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c411
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c242
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c179
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c2564
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c2309
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c534
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c901
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c752
-rw-r--r--drivers/staging/rtl8712/Kconfig21
-rw-r--r--drivers/staging/rtl8712/Makefile35
-rw-r--r--drivers/staging/rtl8712/TODO13
-rw-r--r--drivers/staging/rtl8712/basic_types.h28
-rw-r--r--drivers/staging/rtl8712/drv_types.h175
-rw-r--r--drivers/staging/rtl8712/ethernet.h21
-rw-r--r--drivers/staging/rtl8712/hal_init.c401
-rw-r--r--drivers/staging/rtl8712/ieee80211.c415
-rw-r--r--drivers/staging/rtl8712/ieee80211.h165
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c160
-rw-r--r--drivers/staging/rtl8712/mlme_osdep.h31
-rw-r--r--drivers/staging/rtl8712/mp_custom_oid.h287
-rw-r--r--drivers/staging/rtl8712/os_intfs.c482
-rw-r--r--drivers/staging/rtl8712/osdep_intf.h32
-rw-r--r--drivers/staging/rtl8712/osdep_service.h60
-rw-r--r--drivers/staging/rtl8712/recv_linux.c139
-rw-r--r--drivers/staging/rtl8712/recv_osdep.h39
-rw-r--r--drivers/staging/rtl8712/rtl8712_bitdef.h26
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c409
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.h231
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmdctrl_bitdef.h95
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmdctrl_regdef.h19
-rw-r--r--drivers/staging/rtl8712/rtl8712_debugctrl_bitdef.h41
-rw-r--r--drivers/staging/rtl8712/rtl8712_debugctrl_regdef.h32
-rw-r--r--drivers/staging/rtl8712/rtl8712_edcasetting_bitdef.h65
-rw-r--r--drivers/staging/rtl8712/rtl8712_edcasetting_regdef.h24
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c563
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.h44
-rw-r--r--drivers/staging/rtl8712/rtl8712_event.h86
-rw-r--r--drivers/staging/rtl8712/rtl8712_fifoctrl_bitdef.h131
-rw-r--r--drivers/staging/rtl8712/rtl8712_fifoctrl_regdef.h61
-rw-r--r--drivers/staging/rtl8712/rtl8712_gp_bitdef.h68
-rw-r--r--drivers/staging/rtl8712/rtl8712_gp_regdef.h29
-rw-r--r--drivers/staging/rtl8712/rtl8712_hal.h142
-rw-r--r--drivers/staging/rtl8712/rtl8712_interrupt_bitdef.h44
-rw-r--r--drivers/staging/rtl8712/rtl8712_io.c99
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c1830
-rw-r--r--drivers/staging/rtl8712/rtl8712_macsetting_bitdef.h31
-rw-r--r--drivers/staging/rtl8712/rtl8712_macsetting_regdef.h20
-rw-r--r--drivers/staging/rtl8712/rtl8712_powersave_bitdef.h39
-rw-r--r--drivers/staging/rtl8712/rtl8712_powersave_regdef.h26
-rw-r--r--drivers/staging/rtl8712/rtl8712_ratectrl_bitdef.h36
-rw-r--r--drivers/staging/rtl8712/rtl8712_ratectrl_regdef.h43
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c1075
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.h145
-rw-r--r--drivers/staging/rtl8712/rtl8712_regdef.h32
-rw-r--r--drivers/staging/rtl8712/rtl8712_security_bitdef.h34
-rw-r--r--drivers/staging/rtl8712/rtl8712_spec.h121
-rw-r--r--drivers/staging/rtl8712/rtl8712_syscfg_bitdef.h163
-rw-r--r--drivers/staging/rtl8712/rtl8712_syscfg_regdef.h42
-rw-r--r--drivers/staging/rtl8712/rtl8712_timectrl_bitdef.h49
-rw-r--r--drivers/staging/rtl8712/rtl8712_timectrl_regdef.h26
-rw-r--r--drivers/staging/rtl8712/rtl8712_wmac_bitdef.h49
-rw-r--r--drivers/staging/rtl8712/rtl8712_wmac_regdef.h36
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c732
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.h108
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c750
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h750
-rw-r--r--drivers/staging/rtl8712/rtl871x_debug.h130
-rw-r--r--drivers/staging/rtl8712/rtl871x_eeprom.c220
-rw-r--r--drivers/staging/rtl8712/rtl871x_eeprom.h88
-rw-r--r--drivers/staging/rtl8712/rtl871x_event.h109
-rw-r--r--drivers/staging/rtl8712/rtl871x_ht.h33
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.c147
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.h236
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl.h94
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c2275
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.c519
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.h109
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c354
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.h45
-rw-r--r--drivers/staging/rtl8712/rtl871x_led.h118
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c1710
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.h205
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.c724
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.h275
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.c883
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.h328
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h1034
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c234
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.h113
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c671
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.h208
-rw-r--r--drivers/staging/rtl8712/rtl871x_rf.h55
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c1386
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.h218
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c263
-rw-r--r--drivers/staging/rtl8712/rtl871x_wlan_sme.h35
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c1056
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h287
-rw-r--r--drivers/staging/rtl8712/sta_info.h132
-rw-r--r--drivers/staging/rtl8712/usb_halinit.c307
-rw-r--r--drivers/staging/rtl8712/usb_intf.c638
-rw-r--r--drivers/staging/rtl8712/usb_ops.c195
-rw-r--r--drivers/staging/rtl8712/usb_ops.h38
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c508
-rw-r--r--drivers/staging/rtl8712/usb_osintf.h35
-rw-r--r--drivers/staging/rtl8712/wifi.h196
-rw-r--r--drivers/staging/rtl8712/wlan_bssdef.h223
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c181
-rw-r--r--drivers/staging/rtl8712/xmit_osdep.h52
-rw-r--r--drivers/staging/rtl8723bs/Makefile1
-rw-r--r--drivers/staging/rtl8723bs/TODO4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c94
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_io.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c12
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c7
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c68
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c8
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c91
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_intf.c137
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c14
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c550
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c15
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c59
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h10
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com.h9
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h96
-rw-r--r--drivers/staging/rtl8723bs/include/hal_phy_cfg.h5
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_intf.h5
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h5
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_recv.h5
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_xmit.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_efuse.h4
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h5
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mp.h4
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h18
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c15
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c1299
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c32
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c17
-rw-r--r--drivers/staging/rts5208/Kconfig9
-rw-r--r--drivers/staging/rts5208/Makefile5
-rw-r--r--drivers/staging/rts5208/TODO7
-rw-r--r--drivers/staging/rts5208/general.c25
-rw-r--r--drivers/staging/rts5208/general.h19
-rw-r--r--drivers/staging/rts5208/ms.c4311
-rw-r--r--drivers/staging/rts5208/ms.h214
-rw-r--r--drivers/staging/rts5208/rtsx.c987
-rw-r--r--drivers/staging/rts5208/rtsx.h164
-rw-r--r--drivers/staging/rts5208/rtsx_card.c1151
-rw-r--r--drivers/staging/rts5208/rtsx_card.h1087
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c2161
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h987
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c3279
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.h131
-rw-r--r--drivers/staging/rts5208/rtsx_sys.h36
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c768
-rw-r--r--drivers/staging/rts5208/rtsx_transport.h57
-rw-r--r--drivers/staging/rts5208/sd.c4717
-rw-r--r--drivers/staging/rts5208/sd.h289
-rw-r--r--drivers/staging/rts5208/spi.c906
-rw-r--r--drivers/staging/rts5208/spi.h52
-rw-r--r--drivers/staging/rts5208/xd.c2145
-rw-r--r--drivers/staging/rts5208/xd.h176
-rw-r--r--drivers/staging/sm750fb/TODO5
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c5
-rw-r--r--drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h7
-rw-r--r--drivers/staging/vc04_services/interface/TESTING4
-rw-r--r--drivers/staging/vc04_services/interface/TODO13
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c532
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h3
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c696
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h55
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h3
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c44
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h21
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c7
-rw-r--r--drivers/staging/vme_user/vme_bridge.h56
-rw-r--r--drivers/staging/vme_user/vme_tsi148.c3
-rw-r--r--drivers/staging/vt6655/Kconfig6
-rw-r--r--drivers/staging/vt6655/Makefile15
-rw-r--r--drivers/staging/vt6655/TODO21
-rw-r--r--drivers/staging/vt6655/baseband.c2257
-rw-r--r--drivers/staging/vt6655/baseband.h72
-rw-r--r--drivers/staging/vt6655/card.c836
-rw-r--r--drivers/staging/vt6655/card.h62
-rw-r--r--drivers/staging/vt6655/channel.c135
-rw-r--r--drivers/staging/vt6655/channel.h17
-rw-r--r--drivers/staging/vt6655/desc.h249
-rw-r--r--drivers/staging/vt6655/device.h292
-rw-r--r--drivers/staging/vt6655/device_cfg.h44
-rw-r--r--drivers/staging/vt6655/device_main.c1868
-rw-r--r--drivers/staging/vt6655/dpc.c145
-rw-r--r--drivers/staging/vt6655/dpc.h21
-rw-r--r--drivers/staging/vt6655/key.c143
-rw-r--r--drivers/staging/vt6655/key.h51
-rw-r--r--drivers/staging/vt6655/mac.c851
-rw-r--r--drivers/staging/vt6655/mac.h580
-rw-r--r--drivers/staging/vt6655/power.c144
-rw-r--r--drivers/staging/vt6655/power.h29
-rw-r--r--drivers/staging/vt6655/rf.c535
-rw-r--r--drivers/staging/vt6655/rf.h71
-rw-r--r--drivers/staging/vt6655/rxtx.c1462
-rw-r--r--drivers/staging/vt6655/rxtx.h184
-rw-r--r--drivers/staging/vt6655/srom.c139
-rw-r--r--drivers/staging/vt6655/srom.h85
-rw-r--r--drivers/staging/vt6655/test9
-rw-r--r--drivers/staging/vt6656/Kconfig7
-rw-r--r--drivers/staging/vt6656/Makefile15
-rw-r--r--drivers/staging/vt6656/TODO18
-rw-r--r--drivers/staging/vt6656/baseband.c455
-rw-r--r--drivers/staging/vt6656/baseband.h75
-rw-r--r--drivers/staging/vt6656/card.c456
-rw-r--r--drivers/staging/vt6656/card.h44
-rw-r--r--drivers/staging/vt6656/channel.c77
-rw-r--r--drivers/staging/vt6656/channel.h21
-rw-r--r--drivers/staging/vt6656/desc.h91
-rw-r--r--drivers/staging/vt6656/device.h386
-rw-r--r--drivers/staging/vt6656/key.c142
-rw-r--r--drivers/staging/vt6656/key.h40
-rw-r--r--drivers/staging/vt6656/mac.c183
-rw-r--r--drivers/staging/vt6656/mac.h373
-rw-r--r--drivers/staging/vt6656/main_usb.c1121
-rw-r--r--drivers/staging/vt6656/power.c112
-rw-r--r--drivers/staging/vt6656/power.h23
-rw-r--r--drivers/staging/vt6656/rf.c443
-rw-r--r--drivers/staging/vt6656/rf.h46
-rw-r--r--drivers/staging/vt6656/rxtx.c730
-rw-r--r--drivers/staging/vt6656/rxtx.h178
-rw-r--r--drivers/staging/vt6656/usbpipe.c506
-rw-r--r--drivers/staging/vt6656/usbpipe.h67
-rw-r--r--drivers/staging/vt6656/wcmd.c185
-rw-r--r--drivers/staging/vt6656/wcmd.h48
-rw-r--r--drivers/target/iscsi/Kconfig4
-rw-r--r--drivers/target/iscsi/iscsi_target.c168
-rw-r--r--drivers/target/iscsi/iscsi_target.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c48
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c50
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c21
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c48
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c58
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h2
-rw-r--r--drivers/target/target_core_device.c2
-rw-r--r--drivers/target/target_core_pscsi.c8
-rw-r--r--drivers/target/target_core_stat.c4
-rw-r--r--drivers/target/target_core_user.c6
-rw-r--r--drivers/tc/tc.c2
-rw-r--r--drivers/tee/optee/smc_abi.c7
-rw-r--r--drivers/thermal/Kconfig9
-rw-r--r--drivers/thermal/Makefile3
-rw-r--r--drivers/thermal/amlogic_thermal.c2
-rw-r--r--drivers/thermal/armada_thermal.c2
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c2
-rw-r--r--drivers/thermal/broadcom/ns-thermal.c2
-rw-r--r--drivers/thermal/da9062-thermal.c6
-rw-r--r--drivers/thermal/dove_thermal.c2
-rw-r--r--drivers/thermal/gov_bang_bang.c70
-rw-r--r--drivers/thermal/gov_fair_share.c20
-rw-r--r--drivers/thermal/gov_power_allocator.c89
-rw-r--r--drivers/thermal/gov_step_wise.c22
-rw-r--r--drivers/thermal/gov_user_space.c12
-rw-r--r--drivers/thermal/hisi_thermal.c4
-rw-r--r--drivers/thermal/imx8mm_thermal.c2
-rw-r--r--drivers/thermal/imx_thermal.c2
-rw-r--r--drivers/thermal/intel/Kconfig4
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig4
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c22
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3401_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3402_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3406_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c4
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c8
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_mbox.c6
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_power_floor.c12
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c70
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_wt_hint.c10
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_wt_req.c2
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c2
-rw-r--r--drivers/thermal/intel/intel_quark_dts_thermal.c2
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.c2
-rw-r--r--drivers/thermal/intel/intel_tcc.c10
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c2
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c2
-rw-r--r--drivers/thermal/k3_bandgap.c2
-rw-r--r--drivers/thermal/k3_j72xx_bandgap.c4
-rw-r--r--drivers/thermal/kirkwood_thermal.c2
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c6
-rw-r--r--drivers/thermal/pcie_cooling.c80
-rw-r--r--drivers/thermal/qcom/lmh.c7
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c7
-rw-r--r--drivers/thermal/qcom/tsens-v1.c21
-rw-r--r--drivers/thermal/qcom/tsens.c5
-rw-r--r--drivers/thermal/qcom/tsens.h2
-rw-r--r--drivers/thermal/renesas/rcar_gen3_thermal.c2
-rw-r--r--drivers/thermal/renesas/rcar_thermal.c2
-rw-r--r--drivers/thermal/renesas/rzg2l_thermal.c2
-rw-r--r--drivers/thermal/rockchip_thermal.c2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c2
-rw-r--r--drivers/thermal/spear_thermal.c2
-rw-r--r--drivers/thermal/sprd_thermal.c2
-rw-r--r--drivers/thermal/st/st_thermal_memmap.c2
-rw-r--r--drivers/thermal/st/stm_thermal.c2
-rw-r--r--drivers/thermal/sun8i_thermal.c11
-rw-r--r--drivers/thermal/tegra/soctherm.c7
-rw-r--r--drivers/thermal/tegra/tegra-bpmp-thermal.c2
-rw-r--r--drivers/thermal/testing/zone.c41
-rw-r--r--drivers/thermal/thermal_core.c898
-rw-r--r--drivers/thermal/thermal_core.h46
-rw-r--r--drivers/thermal/thermal_debugfs.c50
-rw-r--r--drivers/thermal/thermal_helpers.c46
-rw-r--r--drivers/thermal/thermal_hwmon.c7
-rw-r--r--drivers/thermal/thermal_netlink.c262
-rw-r--r--drivers/thermal/thermal_netlink.h34
-rw-r--r--drivers/thermal/thermal_of.c72
-rw-r--r--drivers/thermal/thermal_sysfs.c132
-rw-r--r--drivers/thermal/thermal_thresholds.c244
-rw-r--r--drivers/thermal/thermal_thresholds.h19
-rw-r--r--drivers/thermal/thermal_trip.c48
-rw-r--r--drivers/thermal/ti-soc-thermal/dra752-bandgap.h4
-rw-r--r--drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h8
-rw-r--r--drivers/thermal/ti-soc-thermal/omap5xxx-bandgap.h4
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c2
-rw-r--r--drivers/thermal/uniphier_thermal.c2
-rw-r--r--drivers/thunderbolt/ctl.c11
-rw-r--r--drivers/thunderbolt/ctl.h1
-rw-r--r--drivers/thunderbolt/debugfs.c571
-rw-r--r--drivers/thunderbolt/eeprom.c78
-rw-r--r--drivers/thunderbolt/nhi.c20
-rw-r--r--drivers/thunderbolt/nhi.h4
-rw-r--r--drivers/thunderbolt/path.c4
-rw-r--r--drivers/thunderbolt/retimer.c28
-rw-r--r--drivers/thunderbolt/sb_regs.h32
-rw-r--r--drivers/thunderbolt/tb.c285
-rw-r--r--drivers/thunderbolt/tb.h21
-rw-r--r--drivers/thunderbolt/test.c90
-rw-r--r--drivers/thunderbolt/tunnel.c406
-rw-r--r--drivers/thunderbolt/tunnel.h61
-rw-r--r--drivers/thunderbolt/usb4.c20
-rw-r--r--drivers/thunderbolt/xdomain.c2
-rw-r--r--drivers/tty/Kconfig4
-rw-r--r--drivers/tty/amiserial.c2
-rw-r--r--drivers/tty/goldfish.c2
-rw-r--r--drivers/tty/hvc/hvc_opal.c2
-rw-r--r--drivers/tty/mips_ejtag_fdc.c4
-rw-r--r--drivers/tty/n_gsm.c41
-rw-r--r--drivers/tty/pty.c2
-rw-r--r--drivers/tty/serial/8250/8250.h4
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c2
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c6
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c4
-rw-r--r--drivers/tty/serial/8250/8250_core.c4
-rw-r--r--drivers/tty/serial/8250/8250_dw.c7
-rw-r--r--drivers/tty/serial/8250/8250_early.c4
-rw-r--r--drivers/tty/serial/8250/8250_em.c2
-rw-r--r--drivers/tty/serial/8250/8250_exar.c136
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c16
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c2
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c2
-rw-r--r--drivers/tty/serial/8250/8250_ioc3.c2
-rw-r--r--drivers/tty/serial/8250/8250_lpc18xx.c2
-rw-r--r--drivers/tty/serial/8250/8250_men_mcb.c2
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c6
-rw-r--r--drivers/tty/serial/8250/8250_of.c2
-rw-r--r--drivers/tty/serial/8250/8250_omap.c21
-rw-r--r--drivers/tty/serial/8250/8250_pci.c118
-rw-r--r--drivers/tty/serial/8250/8250_pci1xxxx.c62
-rw-r--r--drivers/tty/serial/8250/8250_pcilib.c15
-rw-r--r--drivers/tty/serial/8250/8250_pcilib.h2
-rw-r--r--drivers/tty/serial/8250/8250_platform.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c127
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c2
-rw-r--r--drivers/tty/serial/8250/8250_tegra.c2
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c2
-rw-r--r--drivers/tty/serial/8250/Kconfig5
-rw-r--r--drivers/tty/serial/Kconfig4
-rw-r--r--drivers/tty/serial/altera_jtaguart.c16
-rw-r--r--drivers/tty/serial/altera_uart.c13
-rw-r--r--drivers/tty/serial/amba-pl010.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c137
-rw-r--r--drivers/tty/serial/ar933x_uart.c2
-rw-r--r--drivers/tty/serial/atmel_serial.c34
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c2
-rw-r--r--drivers/tty/serial/clps711x.c2
-rw-r--r--drivers/tty/serial/cpm_uart.c4
-rw-r--r--drivers/tty/serial/digicolor-usart.c2
-rw-r--r--drivers/tty/serial/earlycon.c23
-rw-r--r--drivers/tty/serial/esp32_acm.c2
-rw-r--r--drivers/tty/serial/esp32_uart.c2
-rw-r--r--drivers/tty/serial/fsl_linflexuart.c2
-rw-r--r--drivers/tty/serial/fsl_lpuart.c30
-rw-r--r--drivers/tty/serial/imx.c152
-rw-r--r--drivers/tty/serial/kgdb_nmi.c101
-rw-r--r--drivers/tty/serial/lantiq.c2
-rw-r--r--drivers/tty/serial/liteuart.c2
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c2
-rw-r--r--drivers/tty/serial/ma35d1_serial.c2
-rw-r--r--drivers/tty/serial/mcf.c2
-rw-r--r--drivers/tty/serial/men_z135_uart.c2
-rw-r--r--drivers/tty/serial/meson_uart.c2
-rw-r--r--drivers/tty/serial/milbeaut_usio.c2
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c4
-rw-r--r--drivers/tty/serial/msm_serial.c2
-rw-r--r--drivers/tty/serial/mxs-auart.c2
-rw-r--r--drivers/tty/serial/omap-serial.c2
-rw-r--r--drivers/tty/serial/owl-uart.c2
-rw-r--r--drivers/tty/serial/pic32_uart.c2
-rw-r--r--drivers/tty/serial/pmac_zilog.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c105
-rw-r--r--drivers/tty/serial/rda-uart.c2
-rw-r--r--drivers/tty/serial/rp2.c12
-rw-r--r--drivers/tty/serial/sa1100.c2
-rw-r--r--drivers/tty/serial/samsung_tty.c15
-rw-r--r--drivers/tty/serial/sc16is7xx.c41
-rw-r--r--drivers/tty/serial/sc16is7xx_i2c.c2
-rw-r--r--drivers/tty/serial/sc16is7xx_spi.c2
-rw-r--r--drivers/tty/serial/sccnxp.c2
-rw-r--r--drivers/tty/serial/serial-tegra.c2
-rw-r--r--drivers/tty/serial/serial_core.c267
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci.c126
-rw-r--r--drivers/tty/serial/sifive.c2
-rw-r--r--drivers/tty/serial/sprd_serial.c43
-rw-r--r--drivers/tty/serial/st-asc.c2
-rw-r--r--drivers/tty/serial/stm32-usart.c6
-rw-r--r--drivers/tty/serial/sunhv.c2
-rw-r--r--drivers/tty/serial/sunplus-uart.c2
-rw-r--r--drivers/tty/serial/sunsab.c2
-rw-r--r--drivers/tty/serial/sunsu.c2
-rw-r--r--drivers/tty/serial/sunzilog.c2
-rw-r--r--drivers/tty/serial/tegra-tcu.c2
-rw-r--r--drivers/tty/serial/timbuart.c2
-rw-r--r--drivers/tty/serial/uartlite.c2
-rw-r--r--drivers/tty/serial/ucc_uart.c4
-rw-r--r--drivers/tty/serial/xilinx_uartps.c10
-rw-r--r--drivers/tty/sysrq.c18
-rw-r--r--drivers/tty/tty_io.c7
-rw-r--r--drivers/tty/vt/selection.c14
-rw-r--r--drivers/tty/vt/vt.c4
-rw-r--r--drivers/ufs/core/ufs-mcq.c31
-rw-r--r--drivers/ufs/core/ufs-sysfs.c13
-rw-r--r--drivers/ufs/core/ufs_bsg.c4
-rw-r--r--drivers/ufs/core/ufshcd-crypto.c26
-rw-r--r--drivers/ufs/core/ufshcd-priv.h7
-rw-r--r--drivers/ufs/core/ufshcd.c1046
-rw-r--r--drivers/ufs/host/cdns-pltfrm.c6
-rw-r--r--drivers/ufs/host/tc-dwc-g210-pci.c8
-rw-r--r--drivers/ufs/host/tc-dwc-g210-pltfrm.c7
-rw-r--r--drivers/ufs/host/ti-j721e-ufs.c2
-rw-r--r--drivers/ufs/host/ufs-exynos.c141
-rw-r--r--drivers/ufs/host/ufs-exynos.h2
-rw-r--r--drivers/ufs/host/ufs-hisi.c6
-rw-r--r--drivers/ufs/host/ufs-mediatek.c17
-rw-r--r--drivers/ufs/host/ufs-qcom.c155
-rw-r--r--drivers/ufs/host/ufs-qcom.h5
-rw-r--r--drivers/ufs/host/ufs-renesas.c15
-rw-r--r--drivers/ufs/host/ufs-sprd.c7
-rw-r--r--drivers/ufs/host/ufshcd-pci.c10
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c42
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.h1
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c7
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c2
-rw-r--r--drivers/uio/uio_hv_generic.c88
-rw-r--r--drivers/uio/uio_pdrv_genirq.c5
-rw-r--r--drivers/usb/atm/ueagle-atm.c6
-rw-r--r--drivers/usb/atm/usbatm.c2
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c2
-rw-r--r--drivers/usb/cdns3/cdns3-imx.c2
-rw-r--r--drivers/usb/cdns3/cdns3-pci-wrap.c4
-rw-r--r--drivers/usb/cdns3/cdns3-plat.c2
-rw-r--r--drivers/usb/cdns3/cdns3-starfive.c2
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c2
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c13
-rw-r--r--drivers/usb/cdns3/cdnsp-pci.c26
-rw-r--r--drivers/usb/cdns3/core.c4
-rw-r--r--drivers/usb/chipidea/ci.h2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c28
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_npcm.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c2
-rw-r--r--drivers/usb/chipidea/core.c6
-rw-r--r--drivers/usb/chipidea/host.c13
-rw-r--r--drivers/usb/chipidea/udc.c178
-rw-r--r--drivers/usb/chipidea/udc.h2
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c4
-rw-r--r--drivers/usb/class/usblp.c9
-rw-r--r--drivers/usb/common/common.c17
-rw-r--r--drivers/usb/common/usb-conn-gpio.c5
-rw-r--r--drivers/usb/core/config.c21
-rw-r--r--drivers/usb/core/devio.c5
-rw-r--r--drivers/usb/core/driver.c31
-rw-r--r--drivers/usb/core/endpoint.c11
-rw-r--r--drivers/usb/core/generic.c12
-rw-r--r--drivers/usb/core/hcd-pci.c15
-rw-r--r--drivers/usb/core/hcd.c10
-rw-r--r--drivers/usb/core/hub.c16
-rw-r--r--drivers/usb/core/ledtrig-usbport.c3
-rw-r--r--drivers/usb/core/port.c21
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/sysfs.c14
-rw-r--r--drivers/usb/core/usb-acpi.c7
-rw-r--r--drivers/usb/core/usb.h2
-rw-r--r--drivers/usb/dwc2/Kconfig2
-rw-r--r--drivers/usb/dwc2/hcd.c19
-rw-r--r--drivers/usb/dwc2/params.c1
-rw-r--r--drivers/usb/dwc2/platform.c2
-rw-r--r--drivers/usb/dwc3/Kconfig2
-rw-r--r--drivers/usb/dwc3/core.c119
-rw-r--r--drivers/usb/dwc3/core.h27
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c86
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c2
-rw-r--r--drivers/usb/dwc3/dwc3-imx8mp.c32
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c2
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c2
-rw-r--r--drivers/usb/dwc3/dwc3-octeon.c2
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c2
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c17
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c4
-rw-r--r--drivers/usb/dwc3/dwc3-rtk.c2
-rw-r--r--drivers/usb/dwc3/dwc3-st.c8
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c7
-rw-r--r--drivers/usb/dwc3/ep0.c4
-rw-r--r--drivers/usb/dwc3/gadget.c288
-rw-r--r--drivers/usb/dwc3/host.c2
-rw-r--r--drivers/usb/fotg210/fotg210-core.c7
-rw-r--r--drivers/usb/gadget/Kconfig4
-rw-r--r--drivers/usb/gadget/composite.c30
-rw-r--r--drivers/usb/gadget/config.c4
-rw-r--r--drivers/usb/gadget/configfs.c8
-rw-r--r--drivers/usb/gadget/function/Makefile4
-rw-r--r--drivers/usb/gadget/function/f_ecm.c4
-rw-r--r--drivers/usb/gadget/function/f_fs.c10
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c8
-rw-r--r--drivers/usb/gadget/function/f_midi.c8
-rw-r--r--drivers/usb/gadget/function/f_midi2.c10
-rw-r--r--drivers/usb/gadget/function/f_ncm.c3
-rw-r--r--drivers/usb/gadget/function/f_tcm.c719
-rw-r--r--drivers/usb/gadget/function/f_uac2.c7
-rw-r--r--drivers/usb/gadget/function/f_uvc.c4
-rw-r--r--drivers/usb/gadget/function/storage_common.h2
-rw-r--r--drivers/usb/gadget/function/tcm.h28
-rw-r--r--drivers/usb/gadget/function/u_serial.c12
-rw-r--r--drivers/usb/gadget/function/uvc.h13
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c348
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.h16
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c26
-rw-r--r--drivers/usb/gadget/function/uvc_queue.h2
-rw-r--r--drivers/usb/gadget/function/uvc_trace.c11
-rw-r--r--drivers/usb/gadget/function/uvc_trace.h60
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c66
-rw-r--r--drivers/usb/gadget/function/uvc_video.c268
-rw-r--r--drivers/usb/gadget/legacy/hid.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c3
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c4
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/core.c2
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c3
-rw-r--r--drivers/usb/gadget/udc/aspeed_udc.c4
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c5
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c2
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-gadget.c13
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-pci.c3
-rw-r--r--drivers/usb/gadget/udc/core.c1
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c29
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c8
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c5
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c4
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c2
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c2
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c2
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c2
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/net2272.c4
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c9
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c12
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usbf.c4
-rw-r--r--drivers/usb/gadget/udc/rzv2m_usb3drd.c2
-rw-r--r--drivers/usb/gadget/udc/snps_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c2
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c2
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c2
-rw-r--r--drivers/usb/gadget/usbstring.c2
-rw-r--r--drivers/usb/host/bcma-hcd.c1
-rw-r--r--drivers/usb/host/ehci-atmel.c2
-rw-r--r--drivers/usb/host/ehci-brcm.c2
-rw-r--r--drivers/usb/host/ehci-exynos.c2
-rw-r--r--drivers/usb/host/ehci-fsl.c2
-rw-r--r--drivers/usb/host/ehci-grlib.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c2
-rw-r--r--drivers/usb/host/ehci-mv.c2
-rw-r--r--drivers/usb/host/ehci-npcm7xx.c2
-rw-r--r--drivers/usb/host/ehci-omap.c2
-rw-r--r--drivers/usb/host/ehci-orion.c2
-rw-r--r--drivers/usb/host/ehci-platform.c2
-rw-r--r--drivers/usb/host/ehci-ppc-of.c2
-rw-r--r--drivers/usb/host/ehci-sh.c11
-rw-r--r--drivers/usb/host/ehci-spear.c9
-rw-r--r--drivers/usb/host/ehci-st.c2
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c2
-rw-r--r--drivers/usb/host/fhci-hcd.c2
-rw-r--r--drivers/usb/host/fhci-sched.c4
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c2
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c2
-rw-r--r--drivers/usb/host/max3421-hcd.c16
-rw-r--r--drivers/usb/host/octeon-hcd.c6
-rw-r--r--drivers/usb/host/ohci-at91.c2
-rw-r--r--drivers/usb/host/ohci-da8xx.c2
-rw-r--r--drivers/usb/host/ohci-exynos.c2
-rw-r--r--drivers/usb/host/ohci-nxp.c2
-rw-r--r--drivers/usb/host/ohci-omap.c4
-rw-r--r--drivers/usb/host/ohci-platform.c2
-rw-r--r--drivers/usb/host/ohci-ppc-of.c2
-rw-r--r--drivers/usb/host/ohci-pxa27x.c2
-rw-r--r--drivers/usb/host/ohci-s3c2410.c2
-rw-r--r--drivers/usb/host/ohci-sm501.c2
-rw-r--r--drivers/usb/host/ohci-spear.c2
-rw-r--r--drivers/usb/host/ohci-st.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c11
-rw-r--r--drivers/usb/host/r8a66597-hcd.c6
-rw-r--r--drivers/usb/host/sl811-hcd.c5
-rw-r--r--drivers/usb/host/uhci-grlib.c2
-rw-r--r--drivers/usb/host/uhci-platform.c2
-rw-r--r--drivers/usb/host/xhci-caps.h6
-rw-r--r--drivers/usb/host/xhci-dbgcap.c15
-rw-r--r--drivers/usb/host/xhci-dbgcap.h1
-rw-r--r--drivers/usb/host/xhci-dbgtty.c153
-rw-r--r--drivers/usb/host/xhci-debugfs.c35
-rw-r--r--drivers/usb/host/xhci-histb.c2
-rw-r--r--drivers/usb/host/xhci-hub.c6
-rw-r--r--drivers/usb/host/xhci-mem.c235
-rw-r--r--drivers/usb/host/xhci-mtk.c2
-rw-r--r--drivers/usb/host/xhci-pci-renesas.c2
-rw-r--r--drivers/usb/host/xhci-pci.c60
-rw-r--r--drivers/usb/host/xhci-plat.c8
-rw-r--r--drivers/usb/host/xhci-rcar.c2
-rw-r--r--drivers/usb/host/xhci-ring.c394
-rw-r--r--drivers/usb/host/xhci-tegra.c11
-rw-r--r--drivers/usb/host/xhci-trace.h79
-rw-r--r--drivers/usb/host/xhci.c126
-rw-r--r--drivers/usb/host/xhci.h57
-rw-r--r--drivers/usb/image/microtek.c4
-rw-r--r--drivers/usb/isp1760/Kconfig2
-rw-r--r--drivers/usb/isp1760/isp1760-if.c2
-rw-r--r--drivers/usb/misc/Kconfig12
-rw-r--r--drivers/usb/misc/chaoskey.c35
-rw-r--r--drivers/usb/misc/iowarrior.c50
-rw-r--r--drivers/usb/misc/onboard_usb_dev.c12
-rw-r--r--drivers/usb/misc/qcom_eud.c2
-rw-r--r--drivers/usb/misc/usb-ljca.c28
-rw-r--r--drivers/usb/misc/usb3503.c2
-rw-r--r--drivers/usb/misc/usbtest.c3
-rw-r--r--drivers/usb/misc/yurex.c26
-rw-r--r--drivers/usb/mon/mon_bin.c2
-rw-r--r--drivers/usb/mtu3/Kconfig2
-rw-r--r--drivers/usb/mtu3/mtu3_debugfs.c43
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c3
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c3
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c4
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/da8xx.c5
-rw-r--r--drivers/usb/musb/jz4740.c2
-rw-r--r--drivers/usb/musb/mediatek.c2
-rw-r--r--drivers/usb/musb/mpfs.c2
-rw-r--r--drivers/usb/musb/musb_core.c7
-rw-r--r--drivers/usb/musb/musb_dsps.c5
-rw-r--r--drivers/usb/musb/musb_gadget.c16
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c2
-rw-r--r--drivers/usb/musb/musb_host.c8
-rw-r--r--drivers/usb/musb/omap2430.c2
-rw-r--r--drivers/usb/musb/sunxi.c4
-rw-r--r--drivers/usb/musb/tusb6010.c2
-rw-r--r--drivers/usb/musb/ux500.c2
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c2
-rw-r--r--drivers/usb/phy/phy-am335x.c2
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c5
-rw-r--r--drivers/usb/phy/phy-generic.c2
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c2
-rw-r--r--drivers/usb/phy/phy-isp1301.c2
-rw-r--r--drivers/usb/phy/phy-keystone.c2
-rw-r--r--drivers/usb/phy/phy-mv-usb.c5
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c2
-rw-r--r--drivers/usb/phy/phy-tahvo.c5
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c2
-rw-r--r--drivers/usb/phy/phy.c28
-rw-r--r--drivers/usb/renesas_usbhs/common.c4
-rw-r--r--drivers/usb/roles/intel-xhci-usb-role-switch.c2
-rw-r--r--drivers/usb/serial/bus.c4
-rw-r--r--drivers/usb/serial/ch341.c35
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/io_edgeport.c10
-rw-r--r--drivers/usb/serial/option.c45
-rw-r--r--drivers/usb/serial/pl2303.c38
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/serial/quatech2.c2
-rw-r--r--drivers/usb/serial/sierra.c2
-rw-r--r--drivers/usb/serial/usb-serial.c4
-rw-r--r--drivers/usb/storage/Kconfig3
-rw-r--r--drivers/usb/storage/Makefile2
-rw-r--r--drivers/usb/storage/alauda.c2
-rw-r--r--drivers/usb/storage/cypress_atacb.c2
-rw-r--r--drivers/usb/storage/datafab.c2
-rw-r--r--drivers/usb/storage/ene_ub6250.c10
-rw-r--r--drivers/usb/storage/freecom.c2
-rw-r--r--drivers/usb/storage/isd200.c2
-rw-r--r--drivers/usb/storage/jumpshot.c2
-rw-r--r--drivers/usb/storage/karma.c2
-rw-r--r--drivers/usb/storage/onetouch.c2
-rw-r--r--drivers/usb/storage/realtek_cr.c6
-rw-r--r--drivers/usb/storage/scsiglue.c15
-rw-r--r--drivers/usb/storage/sddr09.c6
-rw-r--r--drivers/usb/storage/sddr55.c10
-rw-r--r--drivers/usb/storage/shuttle_usbat.c6
-rw-r--r--drivers/usb/storage/transport.c10
-rw-r--r--drivers/usb/storage/uas.c12
-rw-r--r--drivers/usb/storage/unusual_devs.h18
-rw-r--r--drivers/usb/typec/altmodes/Kconfig9
-rw-r--r--drivers/usb/typec/altmodes/Makefile2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c6
-rw-r--r--drivers/usb/typec/altmodes/nvidia.c2
-rw-r--r--drivers/usb/typec/altmodes/thunderbolt.c388
-rw-r--r--drivers/usb/typec/anx7411.c66
-rw-r--r--drivers/usb/typec/bus.c6
-rw-r--r--drivers/usb/typec/class.c261
-rw-r--r--drivers/usb/typec/class.h3
-rw-r--r--drivers/usb/typec/hd3ss3220.c207
-rw-r--r--drivers/usb/typec/mux/Kconfig9
-rw-r--r--drivers/usb/typec/mux/Makefile1
-rw-r--r--drivers/usb/typec/mux/gpio-sbu-mux.c2
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c4
-rw-r--r--drivers/usb/typec/mux/tusb1046.c196
-rw-r--r--drivers/usb/typec/stusb160x.c7
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c24
-rw-r--r--drivers/usb/typec/tcpm/maxim_contaminant.c4
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c12
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c11
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c3
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c5
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c42
-rw-r--r--drivers/usb/typec/tcpm/tcpci_mt6360.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpci_mt6370.c3
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c208
-rw-r--r--drivers/usb/typec/tcpm/wcove.c6
-rw-r--r--drivers/usb/typec/ucsi/Kconfig13
-rw-r--r--drivers/usb/typec/ucsi/Makefile1
-rw-r--r--drivers/usb/typec/ucsi/cros_ec_ucsi.c333
-rw-r--r--drivers/usb/typec/ucsi/debugfs.c1
-rw-r--r--drivers/usb/typec/ucsi/psy.c28
-rw-r--r--drivers/usb/typec/ucsi/trace.h28
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c160
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h242
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c65
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c7
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c26
-rw-r--r--drivers/usb/typec/ucsi/ucsi_yoga_c630.c2
-rw-r--r--drivers/usb/usbip/stub_rx.c2
-rw-r--r--drivers/usb/usbip/stub_tx.c2
-rw-r--r--drivers/usb/usbip/vhci_hcd.c15
-rw-r--r--drivers/usb/usbip/vhci_rx.c6
-rw-r--r--drivers/usb/usbip/vudc_main.c2
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c8
-rw-r--r--drivers/usb/usbip/vudc_tx.c2
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c2
-rw-r--r--drivers/vdpa/mlx5/core/mr.c12
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c25
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa.h32
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_hw.c50
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_main.c99
-rw-r--r--drivers/vdpa/solidrun/snet_main.c63
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c2
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c19
-rw-r--r--drivers/vfio/cdx/main.c2
-rw-r--r--drivers/vfio/group.c6
-rw-r--r--drivers/vfio/iommufd.c4
-rw-r--r--drivers/vfio/mdev/mdev_core.c4
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c266
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h19
-rw-r--r--drivers/vfio/pci/mlx5/cmd.c67
-rw-r--r--drivers/vfio/pci/mlx5/main.c37
-rw-r--r--drivers/vfio/pci/nvgrace-gpu/main.c171
-rw-r--r--drivers/vfio/pci/pds/pci_drv.c2
-rw-r--r--drivers/vfio/pci/qat/main.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c29
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c57
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c38
-rw-r--r--drivers/vfio/pci/virtio/Kconfig42
-rw-r--r--drivers/vfio/pci/virtio/Makefile3
-rw-r--r--drivers/vfio/pci/virtio/common.h127
-rw-r--r--drivers/vfio/pci/virtio/legacy_io.c418
-rw-r--r--drivers/vfio/pci/virtio/main.c476
-rw-r--r--drivers/vfio/pci/virtio/migrate.c1337
-rw-r--r--drivers/vfio/platform/vfio_platform.c2
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c10
-rw-r--r--drivers/vfio/vfio_iommu_type1.c12
-rw-r--r--drivers/vfio/vfio_main.c2
-rw-r--r--drivers/vfio/virqfd.c16
-rw-r--r--drivers/vhost/net.c7
-rw-r--r--drivers/vhost/scsi.c27
-rw-r--r--drivers/video/backlight/88pm860x_bl.c5
-rw-r--r--drivers/video/backlight/aat2870_bl.c2
-rw-r--r--drivers/video/backlight/adp5520_bl.c2
-rw-r--r--drivers/video/backlight/backlight.c42
-rw-r--r--drivers/video/backlight/corgi_lcd.c17
-rw-r--r--drivers/video/backlight/da9052_bl.c2
-rw-r--r--drivers/video/backlight/hp680_bl.c2
-rw-r--r--drivers/video/backlight/hx8357.c2
-rw-r--r--drivers/video/backlight/ili922x.c7
-rw-r--r--drivers/video/backlight/ili9320.c15
-rw-r--r--drivers/video/backlight/jornada720_lcd.c10
-rw-r--r--drivers/video/backlight/ktd2801-backlight.c2
-rw-r--r--drivers/video/backlight/ktz8866.c1
-rw-r--r--drivers/video/backlight/l4f00242t03.c32
-rw-r--r--drivers/video/backlight/lcd.c50
-rw-r--r--drivers/video/backlight/led_bl.c2
-rw-r--r--drivers/video/backlight/lm3533_bl.c2
-rw-r--r--drivers/video/backlight/lms283gf05.c2
-rw-r--r--drivers/video/backlight/lms501kf03.c24
-rw-r--r--drivers/video/backlight/lp8788_bl.c2
-rw-r--r--drivers/video/backlight/ltv350qv.c15
-rw-r--r--drivers/video/backlight/mt6370-backlight.c2
-rw-r--r--drivers/video/backlight/otm3225a.c2
-rw-r--r--drivers/video/backlight/platform_lcd.c20
-rw-r--r--drivers/video/backlight/pwm_bl.c2
-rw-r--r--drivers/video/backlight/qcom-wled.c2
-rw-r--r--drivers/video/backlight/rt4831-backlight.c2
-rw-r--r--drivers/video/backlight/sky81452-backlight.c2
-rw-r--r--drivers/video/backlight/tdo24m.c19
-rw-r--r--drivers/video/fbdev/Kconfig33
-rw-r--r--drivers/video/fbdev/Makefile1
-rw-r--r--drivers/video/fbdev/amifb.c4
-rw-r--r--drivers/video/fbdev/arcfb.c2
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c6
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c6
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c2
-rw-r--r--drivers/video/fbdev/aty/radeon_backlight.c2
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c8
-rw-r--r--drivers/video/fbdev/au1100fb.c2
-rw-r--r--drivers/video/fbdev/au1200fb.c2
-rw-r--r--drivers/video/fbdev/broadsheetfb.c2
-rw-r--r--drivers/video/fbdev/bw2.c4
-rw-r--r--drivers/video/fbdev/cg14.c4
-rw-r--r--drivers/video/fbdev/cg3.c4
-rw-r--r--drivers/video/fbdev/cg6.c4
-rw-r--r--drivers/video/fbdev/chipsfb.c2
-rw-r--r--drivers/video/fbdev/clps711x-fb.c31
-rw-r--r--drivers/video/fbdev/cobalt_lcdfb.c2
-rw-r--r--drivers/video/fbdev/core/Kconfig3
-rw-r--r--drivers/video/fbdev/da8xx-fb.c1665
-rw-r--r--drivers/video/fbdev/efifb.c4
-rw-r--r--drivers/video/fbdev/ep93xx-fb.c2
-rw-r--r--drivers/video/fbdev/ffb.c4
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c6
-rw-r--r--drivers/video/fbdev/gbefb.c6
-rw-r--r--drivers/video/fbdev/goldfishfb.c2
-rw-r--r--drivers/video/fbdev/grvga.c2
-rw-r--r--drivers/video/fbdev/hecubafb.c2
-rw-r--r--drivers/video/fbdev/hgafb.c2
-rw-r--r--drivers/video/fbdev/hitfb.c2
-rw-r--r--drivers/video/fbdev/imxfb.c34
-rw-r--r--drivers/video/fbdev/leo.c4
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/fbdev/metronomefb.c2
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_spi.c6
-rw-r--r--drivers/video/fbdev/nvidia/nv_backlight.c2
-rw-r--r--drivers/video/fbdev/nvidia/nv_hw.c8
-rw-r--r--drivers/video/fbdev/ocfb.c2
-rw-r--r--drivers/video/fbdev/offb.c4
-rw-r--r--drivers/video/fbdev/omap/lcd_ams_delta.c8
-rw-r--r--drivers/video/fbdev/omap/lcd_dma.c4
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c40
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dpi.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c67
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.c22
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.h3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c17
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.h1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/sdi.c9
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/p9100.c4
-rw-r--r--drivers/video/fbdev/platinumfb.c2
-rw-r--r--drivers/video/fbdev/pxa168fb.c2
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c8
-rw-r--r--drivers/video/fbdev/pxafb.c2
-rw-r--r--drivers/video/fbdev/riva/fbdev.c2
-rw-r--r--drivers/video/fbdev/s1d13xxxfb.c2
-rw-r--r--drivers/video/fbdev/s3c-fb.c2
-rw-r--r--drivers/video/fbdev/sbuslib.c2
-rw-r--r--drivers/video/fbdev/sbuslib.h2
-rw-r--r--drivers/video/fbdev/sh7760fb.c5
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c12
-rw-r--r--drivers/video/fbdev/simplefb.c2
-rw-r--r--drivers/video/fbdev/sm501fb.c7
-rw-r--r--drivers/video/fbdev/sstfb.c9
-rw-r--r--drivers/video/fbdev/tcx.c4
-rw-r--r--drivers/video/fbdev/udlfb.c8
-rw-r--r--drivers/video/fbdev/uvesafb.c2
-rw-r--r--drivers/video/fbdev/vesafb.c2
-rw-r--r--drivers/video/fbdev/vfb.c2
-rw-r--r--drivers/video/fbdev/vga16fb.c9
-rw-r--r--drivers/video/fbdev/via/via-gpio.c2
-rw-r--r--drivers/video/fbdev/via/via_i2c.c2
-rw-r--r--drivers/video/fbdev/vt8500lcdfb.c2
-rw-r--r--drivers/video/fbdev/wm8505fb.c2
-rw-r--r--drivers/video/fbdev/wmt_ge_rops.c2
-rw-r--r--drivers/video/fbdev/xilinxfb.c2
-rw-r--r--drivers/video/hdmi.c28
-rw-r--r--drivers/virt/acrn/irqfd.c13
-rw-r--r--drivers/virt/coco/Kconfig2
-rw-r--r--drivers/virt/coco/Makefile1
-rw-r--r--drivers/virt/coco/arm-cca-guest/Kconfig10
-rw-r--r--drivers/virt/coco/arm-cca-guest/Makefile2
-rw-r--r--drivers/virt/coco/arm-cca-guest/arm-cca-guest.c232
-rw-r--r--drivers/virt/coco/efi_secret/efi_secret.c2
-rw-r--r--drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c6
-rw-r--r--drivers/virt/coco/sev-guest/Kconfig3
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c673
-rw-r--r--drivers/virt/coco/tdx-guest/tdx-guest.c4
-rw-r--r--drivers/virt/vboxguest/Kconfig2
-rw-r--r--drivers/virtio/Kconfig12
-rw-r--r--drivers/virtio/virtio.c113
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_dma_buf.c2
-rw-r--r--drivers/virtio/virtio_mem.c103
-rw-r--r--drivers/virtio/virtio_mmio.c2
-rw-r--r--drivers/virtio/virtio_pci_common.c65
-rw-r--r--drivers/virtio/virtio_pci_common.h20
-rw-r--r--drivers/virtio/virtio_pci_modern.c469
-rw-r--r--drivers/virtio/virtio_ring.c595
-rw-r--r--drivers/virtio/virtio_vdpa.c3
-rw-r--r--drivers/w1/masters/amd_axi_w1.c2
-rw-r--r--drivers/w1/masters/ds2482.c26
-rw-r--r--drivers/w1/masters/mxc_w1.c2
-rw-r--r--drivers/w1/masters/omap_hdq.c2
-rw-r--r--drivers/w1/masters/sgi_w1.c2
-rw-r--r--drivers/w1/masters/w1-gpio.c2
-rw-r--r--drivers/w1/slaves/w1_ds2406.c10
-rw-r--r--drivers/w1/slaves/w1_ds2408.c42
-rw-r--r--drivers/w1/slaves/w1_ds2413.c14
-rw-r--r--drivers/w1/slaves/w1_ds2430.c10
-rw-r--r--drivers/w1/slaves/w1_ds2431.c10
-rw-r--r--drivers/w1/slaves/w1_ds2433.c24
-rw-r--r--drivers/w1/slaves/w1_ds2438.c34
-rw-r--r--drivers/w1/slaves/w1_ds2780.c8
-rw-r--r--drivers/w1/slaves/w1_ds2781.c8
-rw-r--r--drivers/w1/slaves/w1_ds2805.c10
-rw-r--r--drivers/w1/slaves/w1_ds28e04.c18
-rw-r--r--drivers/w1/slaves/w1_ds28e17.c4
-rw-r--r--drivers/w1/w1.c12
-rw-r--r--drivers/watchdog/Kconfig31
-rw-r--r--drivers/watchdog/Makefile3
-rw-r--r--drivers/watchdog/acquirewdt.c2
-rw-r--r--drivers/watchdog/advantechwdt.c2
-rw-r--r--drivers/watchdog/airoha_wdt.c216
-rw-r--r--drivers/watchdog/apple_wdt.c8
-rw-r--r--drivers/watchdog/armada_37xx_wdt.c10
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c2
-rw-r--r--drivers/watchdog/at91sam9_wdt.c2
-rw-r--r--drivers/watchdog/ath79_wdt.c2
-rw-r--r--drivers/watchdog/bcm2835_wdt.c2
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c2
-rw-r--r--drivers/watchdog/cgbc_wdt.c211
-rw-r--r--drivers/watchdog/cpu5wdt.c284
-rw-r--r--drivers/watchdog/cpwd.c2
-rw-r--r--drivers/watchdog/da9052_wdt.c13
-rw-r--r--drivers/watchdog/da9055_wdt.c7
-rw-r--r--drivers/watchdog/da9063_wdt.c19
-rw-r--r--drivers/watchdog/dw_wdt.c2
-rw-r--r--drivers/watchdog/gef_wdt.c2
-rw-r--r--drivers/watchdog/geodewdt.c2
-rw-r--r--drivers/watchdog/gxp-wdt.c4
-rw-r--r--drivers/watchdog/iTCO_wdt.c25
-rw-r--r--drivers/watchdog/ib700wdt.c2
-rw-r--r--drivers/watchdog/ie6xx_wdt.c2
-rw-r--r--drivers/watchdog/it87_wdt.c43
-rw-r--r--drivers/watchdog/lpc18xx_wdt.c2
-rw-r--r--drivers/watchdog/max77620_wdt.c1
-rw-r--r--drivers/watchdog/menz69_wdt.c2
-rw-r--r--drivers/watchdog/mtk_wdt.c12
-rw-r--r--drivers/watchdog/mtx-1_wdt.c2
-rw-r--r--drivers/watchdog/nic7018_wdt.c2
-rw-r--r--drivers/watchdog/nv_tco.c2
-rw-r--r--drivers/watchdog/octeon-wdt-main.c4
-rw-r--r--drivers/watchdog/omap_wdt.c2
-rw-r--r--drivers/watchdog/orion_wdt.c2
-rw-r--r--drivers/watchdog/pcwd.c2
-rw-r--r--drivers/watchdog/rc32434_wdt.c2
-rw-r--r--drivers/watchdog/rdc321x_wdt.c2
-rw-r--r--drivers/watchdog/renesas_wdt.c2
-rw-r--r--drivers/watchdog/riowd.c2
-rw-r--r--drivers/watchdog/rti_wdt.c13
-rw-r--r--drivers/watchdog/rza_wdt.c7
-rw-r--r--drivers/watchdog/rzg2l_wdt.c20
-rw-r--r--drivers/watchdog/rzn1_wdt.c2
-rw-r--r--drivers/watchdog/rzv2h_wdt.c16
-rw-r--r--drivers/watchdog/s3c2410_wdt.c45
-rw-r--r--drivers/watchdog/sa1100_wdt.c4
-rw-r--r--drivers/watchdog/sch311x_wdt.c2
-rw-r--r--drivers/watchdog/shwdt.c2
-rw-r--r--drivers/watchdog/sl28cpld_wdt.c4
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c2
-rw-r--r--drivers/watchdog/sp805_wdt.c3
-rw-r--r--drivers/watchdog/st_lpc_wdt.c2
-rw-r--r--drivers/watchdog/starfive-wdt.c4
-rw-r--r--drivers/watchdog/stm32_iwdg.c95
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c2
-rw-r--r--drivers/watchdog/txx9wdt.c2
-rw-r--r--drivers/watchdog/watchdog_core.c26
-rw-r--r--drivers/watchdog/watchdog_dev.c2
-rw-r--r--drivers/watchdog/xilinx_wwdt.c75
-rw-r--r--drivers/watchdog/ziirave_wdt.c2
-rw-r--r--drivers/xen/Kconfig1
-rw-r--r--drivers/xen/acpi.c24
-rw-r--r--drivers/xen/balloon.c2
-rw-r--r--drivers/xen/events/events_base.c8
-rw-r--r--drivers/xen/gntdev-dmabuf.c2
-rw-r--r--drivers/xen/grant-dma-iommu.c2
-rw-r--r--drivers/xen/pcpu.c2
-rw-r--r--drivers/xen/privcmd.c34
-rw-r--r--drivers/xen/pvcalls-front.c14
-rw-r--r--drivers/xen/pvcalls-front.h2
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c11
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c8
-rw-r--r--drivers/zorro/zorro-sysfs.c10
10670 files changed, 477203 insertions, 271280 deletions
diff --git a/drivers/accel/Kconfig b/drivers/accel/Kconfig
index 64065fb8922b..5b9490367a39 100644
--- a/drivers/accel/Kconfig
+++ b/drivers/accel/Kconfig
@@ -24,6 +24,7 @@ menuconfig DRM_ACCEL
different device files, called accel/accel* (in /dev, sysfs
and debugfs).
+source "drivers/accel/amdxdna/Kconfig"
source "drivers/accel/habanalabs/Kconfig"
source "drivers/accel/ivpu/Kconfig"
source "drivers/accel/qaic/Kconfig"
diff --git a/drivers/accel/Makefile b/drivers/accel/Makefile
index ab3df932937f..a301fb6089d4 100644
--- a/drivers/accel/Makefile
+++ b/drivers/accel/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_ACCEL_AMDXDNA) += amdxdna/
obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/
obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/
obj-$(CONFIG_DRM_ACCEL_QAIC) += qaic/
diff --git a/drivers/accel/amdxdna/Kconfig b/drivers/accel/amdxdna/Kconfig
new file mode 100644
index 000000000000..f39d7a87296c
--- /dev/null
+++ b/drivers/accel/amdxdna/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_ACCEL_AMDXDNA
+ tristate "AMD AI Engine"
+ depends on AMD_IOMMU
+ depends on DRM_ACCEL
+ depends on PCI && HAS_IOMEM
+ depends on X86_64
+ select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
+ select FW_LOADER
+ select HMM_MIRROR
+ help
+ Choose this option to enable support for NPU integrated into AMD
+ client CPUs like AMD Ryzen AI 300 Series. AMD NPU can be used to
+ accelerate machine learning applications.
+
+ If "M" is selected, the driver module will be amdxdna.
diff --git a/drivers/accel/amdxdna/Makefile b/drivers/accel/amdxdna/Makefile
new file mode 100644
index 000000000000..0e9adf6890a0
--- /dev/null
+++ b/drivers/accel/amdxdna/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+amdxdna-y := \
+ aie2_ctx.o \
+ aie2_error.o \
+ aie2_message.o \
+ aie2_pci.o \
+ aie2_pm.o \
+ aie2_psp.o \
+ aie2_smu.o \
+ aie2_solver.o \
+ amdxdna_ctx.o \
+ amdxdna_gem.o \
+ amdxdna_mailbox.o \
+ amdxdna_mailbox_helper.o \
+ amdxdna_pci_drv.o \
+ amdxdna_sysfs.o \
+ npu1_regs.o \
+ npu2_regs.o \
+ npu4_regs.o \
+ npu5_regs.o \
+ npu6_regs.o
+obj-$(CONFIG_DRM_ACCEL_AMDXDNA) = amdxdna.o
diff --git a/drivers/accel/amdxdna/TODO b/drivers/accel/amdxdna/TODO
new file mode 100644
index 000000000000..5119bccd1917
--- /dev/null
+++ b/drivers/accel/amdxdna/TODO
@@ -0,0 +1,3 @@
+- Add import and export BO support
+- Add debugfs support
+- Add debug BO support
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
new file mode 100644
index 000000000000..5f43db02b240
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -0,0 +1,910 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/drm_syncobj.h>
+#include <linux/hmm.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+#include <trace/events/amdxdna.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "aie2_solver.h"
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+static bool force_cmdlist;
+module_param(force_cmdlist, bool, 0600);
+MODULE_PARM_DESC(force_cmdlist, "Force use command list (Default false)");
+
+#define HWCTX_MAX_TIMEOUT 60000 /* milliseconds */
+
+static void aie2_job_release(struct kref *ref)
+{
+ struct amdxdna_sched_job *job;
+
+ job = container_of(ref, struct amdxdna_sched_job, refcnt);
+ amdxdna_sched_job_cleanup(job);
+ if (job->out_fence)
+ dma_fence_put(job->out_fence);
+ kfree(job);
+}
+
+static void aie2_job_put(struct amdxdna_sched_job *job)
+{
+ kref_put(&job->refcnt, aie2_job_release);
+}
+
+/* The bad_job is used in aie2_sched_job_timedout, otherwise, set it to NULL */
+static void aie2_hwctx_stop(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx,
+ struct drm_sched_job *bad_job)
+{
+ drm_sched_stop(&hwctx->priv->sched, bad_job);
+ aie2_destroy_context(xdna->dev_handle, hwctx);
+}
+
+static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_gem_obj *heap = hwctx->priv->heap;
+ int ret;
+
+ ret = aie2_create_context(xdna->dev_handle, hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Create hwctx failed, ret %d", ret);
+ goto out;
+ }
+
+ ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
+ heap->mem.userptr, heap->mem.size);
+ if (ret) {
+ XDNA_ERR(xdna, "Map host buf failed, ret %d", ret);
+ goto out;
+ }
+
+ if (hwctx->status != HWCTX_STAT_READY) {
+ XDNA_DBG(xdna, "hwctx is not ready, status %d", hwctx->status);
+ goto out;
+ }
+
+ ret = aie2_config_cu(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Config cu failed, ret %d", ret);
+ goto out;
+ }
+
+out:
+ drm_sched_start(&hwctx->priv->sched, 0);
+ XDNA_DBG(xdna, "%s restarted, ret %d", hwctx->name, ret);
+ return ret;
+}
+
+void aie2_restart_ctx(struct amdxdna_client *client)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ mutex_lock(&client->hwctx_lock);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
+ if (hwctx->status != HWCTX_STAT_STOP)
+ continue;
+
+ hwctx->status = hwctx->old_status;
+ XDNA_DBG(xdna, "Resetting %s", hwctx->name);
+ aie2_hwctx_restart(xdna, hwctx);
+ }
+ mutex_unlock(&client->hwctx_lock);
+}
+
+static struct dma_fence *aie2_cmd_get_out_fence(struct amdxdna_hwctx *hwctx, u64 seq)
+{
+ struct dma_fence *fence, *out_fence = NULL;
+ int ret;
+
+ fence = drm_syncobj_fence_get(hwctx->priv->syncobj);
+ if (!fence)
+ return NULL;
+
+ ret = dma_fence_chain_find_seqno(&fence, seq);
+ if (ret)
+ goto out;
+
+ out_fence = dma_fence_get(dma_fence_chain_contained(fence));
+
+out:
+ dma_fence_put(fence);
+ return out_fence;
+}
+
+static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
+{
+ struct dma_fence *fence;
+
+ fence = aie2_cmd_get_out_fence(hwctx, hwctx->priv->seq - 1);
+ if (!fence)
+ return;
+
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+}
+
+void aie2_hwctx_suspend(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ /*
+ * Command timeout is unlikely. But if it happens, it doesn't
+ * break the system. aie2_hwctx_stop() will destroy mailbox
+ * and abort all commands.
+ */
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ aie2_hwctx_wait_for_idle(hwctx);
+ aie2_hwctx_stop(xdna, hwctx, NULL);
+ hwctx->old_status = hwctx->status;
+ hwctx->status = HWCTX_STAT_STOP;
+}
+
+void aie2_hwctx_resume(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ /*
+ * The resume path cannot guarantee that mailbox channel can be
+ * regenerated. If this happen, when submit message to this
+ * mailbox channel, error will return.
+ */
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ hwctx->status = hwctx->old_status;
+ aie2_hwctx_restart(xdna, hwctx);
+}
+
+static void
+aie2_sched_notify(struct amdxdna_sched_job *job)
+{
+ struct dma_fence *fence = job->fence;
+
+ trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq);
+ job->hwctx->priv->completed++;
+ dma_fence_signal(fence);
+
+ up(&job->hwctx->priv->job_sem);
+ job->job_done = true;
+ dma_fence_put(fence);
+ mmput_async(job->mm);
+ aie2_job_put(job);
+}
+
+static int
+aie2_sched_resp_handler(void *handle, const u32 *data, size_t size)
+{
+ struct amdxdna_sched_job *job = handle;
+ struct amdxdna_gem_obj *cmd_abo;
+ u32 ret = 0;
+ u32 status;
+
+ cmd_abo = job->cmd_bo;
+
+ if (unlikely(!data))
+ goto out;
+
+ if (unlikely(size != sizeof(u32))) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ status = *data;
+ XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
+ if (status == AIE2_STATUS_SUCCESS)
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
+ else
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ERROR);
+
+out:
+ aie2_sched_notify(job);
+ return ret;
+}
+
+static int
+aie2_sched_nocmd_resp_handler(void *handle, const u32 *data, size_t size)
+{
+ struct amdxdna_sched_job *job = handle;
+ u32 ret = 0;
+ u32 status;
+
+ if (unlikely(!data))
+ goto out;
+
+ if (unlikely(size != sizeof(u32))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ status = *data;
+ XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
+
+out:
+ aie2_sched_notify(job);
+ return ret;
+}
+
+static int
+aie2_sched_cmdlist_resp_handler(void *handle, const u32 *data, size_t size)
+{
+ struct amdxdna_sched_job *job = handle;
+ struct amdxdna_gem_obj *cmd_abo;
+ struct cmd_chain_resp *resp;
+ struct amdxdna_dev *xdna;
+ u32 fail_cmd_status;
+ u32 fail_cmd_idx;
+ u32 ret = 0;
+
+ cmd_abo = job->cmd_bo;
+ if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ resp = (struct cmd_chain_resp *)data;
+ xdna = job->hwctx->client->xdna;
+ XDNA_DBG(xdna, "Status 0x%x", resp->status);
+ if (resp->status == AIE2_STATUS_SUCCESS) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
+ goto out;
+ }
+
+ /* Slow path to handle error, read from ringbuf on BAR */
+ fail_cmd_idx = resp->fail_cmd_idx;
+ fail_cmd_status = resp->fail_cmd_status;
+ XDNA_DBG(xdna, "Failed cmd idx %d, status 0x%x",
+ fail_cmd_idx, fail_cmd_status);
+
+ if (fail_cmd_status == AIE2_STATUS_SUCCESS) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
+ ret = -EINVAL;
+ goto out;
+ }
+ amdxdna_cmd_set_state(cmd_abo, fail_cmd_status);
+
+ if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN) {
+ struct amdxdna_cmd_chain *cc = amdxdna_cmd_get_payload(cmd_abo, NULL);
+
+ cc->error_index = fail_cmd_idx;
+ if (cc->error_index >= cc->command_count)
+ cc->error_index = 0;
+ }
+out:
+ aie2_sched_notify(job);
+ return ret;
+}
+
+static struct dma_fence *
+aie2_sched_job_run(struct drm_sched_job *sched_job)
+{
+ struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct amdxdna_hwctx *hwctx = job->hwctx;
+ struct dma_fence *fence;
+ int ret;
+
+ if (!mmget_not_zero(job->mm))
+ return ERR_PTR(-ESRCH);
+
+ kref_get(&job->refcnt);
+ fence = dma_fence_get(job->fence);
+
+ if (unlikely(!cmd_abo)) {
+ ret = aie2_sync_bo(hwctx, job, aie2_sched_nocmd_resp_handler);
+ goto out;
+ }
+
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_NEW);
+
+ if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN)
+ ret = aie2_cmdlist_multi_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
+ else if (force_cmdlist)
+ ret = aie2_cmdlist_single_execbuf(hwctx, job, aie2_sched_cmdlist_resp_handler);
+ else
+ ret = aie2_execbuf(hwctx, job, aie2_sched_resp_handler);
+
+out:
+ if (ret) {
+ dma_fence_put(job->fence);
+ aie2_job_put(job);
+ mmput(job->mm);
+ fence = ERR_PTR(ret);
+ }
+ trace_xdna_job(sched_job, hwctx->name, "sent to device", job->seq);
+
+ return fence;
+}
+
+static void aie2_sched_job_free(struct drm_sched_job *sched_job)
+{
+ struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
+ struct amdxdna_hwctx *hwctx = job->hwctx;
+
+ trace_xdna_job(sched_job, hwctx->name, "job free", job->seq);
+ if (!job->job_done)
+ up(&hwctx->priv->job_sem);
+
+ drm_sched_job_cleanup(sched_job);
+ aie2_job_put(job);
+}
+
+static enum drm_gpu_sched_stat
+aie2_sched_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct amdxdna_sched_job *job = drm_job_to_xdna_job(sched_job);
+ struct amdxdna_hwctx *hwctx = job->hwctx;
+ struct amdxdna_dev *xdna;
+
+ xdna = hwctx->client->xdna;
+ trace_xdna_job(sched_job, hwctx->name, "job timedout", job->seq);
+ mutex_lock(&xdna->dev_lock);
+ aie2_hwctx_stop(xdna, hwctx, sched_job);
+
+ aie2_hwctx_restart(xdna, hwctx);
+ mutex_unlock(&xdna->dev_lock);
+
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+}
+
+const struct drm_sched_backend_ops sched_ops = {
+ .run_job = aie2_sched_job_run,
+ .free_job = aie2_sched_job_free,
+ .timedout_job = aie2_sched_job_timedout,
+};
+
+static int aie2_hwctx_col_list(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int start, end, first, last;
+ u32 width = 1, entries = 0;
+ int i;
+
+ if (!hwctx->num_tiles) {
+ XDNA_ERR(xdna, "Number of tiles is zero");
+ return -EINVAL;
+ }
+
+ ndev = xdna->dev_handle;
+ if (unlikely(!ndev->metadata.core.row_count)) {
+ XDNA_WARN(xdna, "Core tile row count is zero");
+ return -EINVAL;
+ }
+
+ hwctx->num_col = hwctx->num_tiles / ndev->metadata.core.row_count;
+ if (!hwctx->num_col || hwctx->num_col > ndev->total_col) {
+ XDNA_ERR(xdna, "Invalid num_col %d", hwctx->num_col);
+ return -EINVAL;
+ }
+
+ if (ndev->priv->col_align == COL_ALIGN_NATURE)
+ width = hwctx->num_col;
+
+ /*
+ * In range [start, end], find out columns that is multiple of width.
+ * 'first' is the first column,
+ * 'last' is the last column,
+ * 'entries' is the total number of columns.
+ */
+ start = xdna->dev_info->first_col;
+ end = ndev->total_col - hwctx->num_col;
+ if (start > 0 && end == 0) {
+ XDNA_DBG(xdna, "Force start from col 0");
+ start = 0;
+ }
+ first = start + (width - start % width) % width;
+ last = end - end % width;
+ if (last >= first)
+ entries = (last - first) / width + 1;
+ XDNA_DBG(xdna, "start %d end %d first %d last %d",
+ start, end, first, last);
+
+ if (unlikely(!entries)) {
+ XDNA_ERR(xdna, "Start %d end %d width %d",
+ start, end, width);
+ return -EINVAL;
+ }
+
+ hwctx->col_list = kmalloc_array(entries, sizeof(*hwctx->col_list), GFP_KERNEL);
+ if (!hwctx->col_list)
+ return -ENOMEM;
+
+ hwctx->col_list_len = entries;
+ hwctx->col_list[0] = first;
+ for (i = 1; i < entries; i++)
+ hwctx->col_list[i] = hwctx->col_list[i - 1] + width;
+
+ print_hex_dump_debug("col_list: ", DUMP_PREFIX_OFFSET, 16, 4, hwctx->col_list,
+ entries * sizeof(*hwctx->col_list), false);
+ return 0;
+}
+
+static int aie2_alloc_resource(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct alloc_requests *xrs_req;
+ int ret;
+
+ xrs_req = kzalloc(sizeof(*xrs_req), GFP_KERNEL);
+ if (!xrs_req)
+ return -ENOMEM;
+
+ xrs_req->cdo.start_cols = hwctx->col_list;
+ xrs_req->cdo.cols_len = hwctx->col_list_len;
+ xrs_req->cdo.ncols = hwctx->num_col;
+ xrs_req->cdo.qos_cap.opc = hwctx->max_opc;
+
+ xrs_req->rqos.gops = hwctx->qos.gops;
+ xrs_req->rqos.fps = hwctx->qos.fps;
+ xrs_req->rqos.dma_bw = hwctx->qos.dma_bandwidth;
+ xrs_req->rqos.latency = hwctx->qos.latency;
+ xrs_req->rqos.exec_time = hwctx->qos.frame_exec_time;
+ xrs_req->rqos.priority = hwctx->qos.priority;
+
+ xrs_req->rid = (uintptr_t)hwctx;
+
+ ret = xrs_allocate_resource(xdna->xrs_hdl, xrs_req, hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "Allocate AIE resource failed, ret %d", ret);
+
+ kfree(xrs_req);
+ return ret;
+}
+
+static void aie2_release_resource(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ int ret;
+
+ ret = xrs_release_resource(xdna->xrs_hdl, (uintptr_t)hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "Release AIE resource failed, ret %d", ret);
+}
+
+static int aie2_ctx_syncobj_create(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct drm_file *filp = hwctx->client->filp;
+ struct drm_syncobj *syncobj;
+ u32 hdl;
+ int ret;
+
+ hwctx->syncobj_hdl = AMDXDNA_INVALID_FENCE_HANDLE;
+
+ ret = drm_syncobj_create(&syncobj, 0, NULL);
+ if (ret) {
+ XDNA_ERR(xdna, "Create ctx syncobj failed, ret %d", ret);
+ return ret;
+ }
+ ret = drm_syncobj_get_handle(filp, syncobj, &hdl);
+ if (ret) {
+ drm_syncobj_put(syncobj);
+ XDNA_ERR(xdna, "Create ctx syncobj handle failed, ret %d", ret);
+ return ret;
+ }
+ hwctx->priv->syncobj = syncobj;
+ hwctx->syncobj_hdl = hdl;
+
+ return 0;
+}
+
+static void aie2_ctx_syncobj_destroy(struct amdxdna_hwctx *hwctx)
+{
+ /*
+ * The syncobj_hdl is owned by user space and will be cleaned up
+ * separately.
+ */
+ drm_syncobj_put(hwctx->priv->syncobj);
+}
+
+int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_client *client = hwctx->client;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct drm_gpu_scheduler *sched;
+ struct amdxdna_hwctx_priv *priv;
+ struct amdxdna_gem_obj *heap;
+ struct amdxdna_dev_hdl *ndev;
+ int i, ret;
+
+ priv = kzalloc(sizeof(*hwctx->priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ hwctx->priv = priv;
+
+ mutex_lock(&client->mm_lock);
+ heap = client->dev_heap;
+ if (!heap) {
+ XDNA_ERR(xdna, "The client dev heap object not exist");
+ mutex_unlock(&client->mm_lock);
+ ret = -ENOENT;
+ goto free_priv;
+ }
+ drm_gem_object_get(to_gobj(heap));
+ mutex_unlock(&client->mm_lock);
+ priv->heap = heap;
+ sema_init(&priv->job_sem, HWCTX_MAX_CMDS);
+
+ ret = amdxdna_gem_pin(heap);
+ if (ret) {
+ XDNA_ERR(xdna, "Dev heap pin failed, ret %d", ret);
+ goto put_heap;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
+ struct amdxdna_gem_obj *abo;
+ struct amdxdna_drm_create_bo args = {
+ .flags = 0,
+ .type = AMDXDNA_BO_DEV,
+ .vaddr = 0,
+ .size = MAX_CHAIN_CMDBUF_SIZE,
+ };
+
+ abo = amdxdna_drm_alloc_dev_bo(&xdna->ddev, &args, client->filp, true);
+ if (IS_ERR(abo)) {
+ ret = PTR_ERR(abo);
+ goto free_cmd_bufs;
+ }
+
+ XDNA_DBG(xdna, "Command buf %d addr 0x%llx size 0x%lx",
+ i, abo->mem.dev_addr, abo->mem.size);
+ priv->cmd_buf[i] = abo;
+ }
+
+ sched = &priv->sched;
+ mutex_init(&priv->io_lock);
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&priv->io_lock);
+ fs_reclaim_release(GFP_KERNEL);
+
+ ret = drm_sched_init(sched, &sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT,
+ HWCTX_MAX_CMDS, 0, msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
+ NULL, NULL, hwctx->name, xdna->ddev.dev);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to init DRM scheduler. ret %d", ret);
+ goto free_cmd_bufs;
+ }
+
+ ret = drm_sched_entity_init(&priv->entity, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to initial sched entiry. ret %d", ret);
+ goto free_sched;
+ }
+
+ ret = aie2_hwctx_col_list(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Create col list failed, ret %d", ret);
+ goto free_entity;
+ }
+
+ ret = aie2_alloc_resource(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Alloc hw resource failed, ret %d", ret);
+ goto free_col_list;
+ }
+
+ ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
+ heap->mem.userptr, heap->mem.size);
+ if (ret) {
+ XDNA_ERR(xdna, "Map host buffer failed, ret %d", ret);
+ goto release_resource;
+ }
+
+ ret = aie2_ctx_syncobj_create(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Create syncobj failed, ret %d", ret);
+ goto release_resource;
+ }
+
+ hwctx->status = HWCTX_STAT_INIT;
+ ndev = xdna->dev_handle;
+ ndev->hwctx_num++;
+
+ XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name);
+
+ return 0;
+
+release_resource:
+ aie2_release_resource(hwctx);
+free_col_list:
+ kfree(hwctx->col_list);
+free_entity:
+ drm_sched_entity_destroy(&priv->entity);
+free_sched:
+ drm_sched_fini(&priv->sched);
+free_cmd_bufs:
+ for (i = 0; i < ARRAY_SIZE(priv->cmd_buf); i++) {
+ if (!priv->cmd_buf[i])
+ continue;
+ drm_gem_object_put(to_gobj(priv->cmd_buf[i]));
+ }
+ amdxdna_gem_unpin(heap);
+put_heap:
+ drm_gem_object_put(to_gobj(heap));
+free_priv:
+ kfree(priv);
+ return ret;
+}
+
+void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_dev_hdl *ndev;
+ struct amdxdna_dev *xdna;
+ int idx;
+
+ xdna = hwctx->client->xdna;
+ ndev = xdna->dev_handle;
+ ndev->hwctx_num--;
+ drm_sched_wqueue_stop(&hwctx->priv->sched);
+
+ /* Now, scheduler will not send command to device. */
+ aie2_release_resource(hwctx);
+
+ /*
+ * All submitted commands are aborted.
+ * Restart scheduler queues to cleanup jobs. The amdxdna_sched_job_run()
+ * will return NODEV if it is called.
+ */
+ drm_sched_wqueue_start(&hwctx->priv->sched);
+
+ aie2_hwctx_wait_for_idle(hwctx);
+ drm_sched_entity_destroy(&hwctx->priv->entity);
+ drm_sched_fini(&hwctx->priv->sched);
+ aie2_ctx_syncobj_destroy(hwctx);
+
+ XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
+
+ for (idx = 0; idx < ARRAY_SIZE(hwctx->priv->cmd_buf); idx++)
+ drm_gem_object_put(to_gobj(hwctx->priv->cmd_buf[idx]));
+ amdxdna_gem_unpin(hwctx->priv->heap);
+ drm_gem_object_put(to_gobj(hwctx->priv->heap));
+
+ mutex_destroy(&hwctx->priv->io_lock);
+ kfree(hwctx->col_list);
+ kfree(hwctx->priv);
+ kfree(hwctx->cus);
+}
+
+static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size)
+{
+ struct amdxdna_hwctx_param_config_cu *config = buf;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ u32 total_size;
+ int ret;
+
+ XDNA_DBG(xdna, "Config %d CU to %s", config->num_cus, hwctx->name);
+ if (XDNA_MBZ_DBG(xdna, config->pad, sizeof(config->pad)))
+ return -EINVAL;
+
+ if (hwctx->status != HWCTX_STAT_INIT) {
+ XDNA_ERR(xdna, "Not support re-config CU");
+ return -EINVAL;
+ }
+
+ if (!config->num_cus) {
+ XDNA_ERR(xdna, "Number of CU is zero");
+ return -EINVAL;
+ }
+
+ total_size = struct_size(config, cu_configs, config->num_cus);
+ if (total_size > size) {
+ XDNA_ERR(xdna, "CU config larger than size");
+ return -EINVAL;
+ }
+
+ hwctx->cus = kmemdup(config, total_size, GFP_KERNEL);
+ if (!hwctx->cus)
+ return -ENOMEM;
+
+ ret = aie2_config_cu(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Config CU to firmware failed, ret %d", ret);
+ goto free_cus;
+ }
+
+ wmb(); /* To avoid locking in command submit when check status */
+ hwctx->status = HWCTX_STAT_READY;
+
+ return 0;
+
+free_cus:
+ kfree(hwctx->cus);
+ hwctx->cus = NULL;
+ return ret;
+}
+
+int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ switch (type) {
+ case DRM_AMDXDNA_HWCTX_CONFIG_CU:
+ return aie2_hwctx_cu_config(hwctx, buf, size);
+ case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
+ case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
+ return -EOPNOTSUPP;
+ default:
+ XDNA_DBG(xdna, "Not supported type %d", type);
+ return -EOPNOTSUPP;
+ }
+}
+
+static int aie2_populate_range(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct mm_struct *mm = abo->mem.notifier.mm;
+ struct hmm_range range = { 0 };
+ unsigned long timeout;
+ int ret;
+
+ XDNA_INFO_ONCE(xdna, "populate memory range %llx size %lx",
+ abo->mem.userptr, abo->mem.size);
+ range.notifier = &abo->mem.notifier;
+ range.start = abo->mem.userptr;
+ range.end = abo->mem.userptr + abo->mem.size;
+ range.hmm_pfns = abo->mem.pfns;
+ range.default_flags = HMM_PFN_REQ_FAULT;
+
+ if (!mmget_not_zero(mm))
+ return -EFAULT;
+
+ timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+again:
+ range.notifier_seq = mmu_interval_read_begin(&abo->mem.notifier);
+ mmap_read_lock(mm);
+ ret = hmm_range_fault(&range);
+ mmap_read_unlock(mm);
+ if (ret) {
+ if (time_after(jiffies, timeout)) {
+ ret = -ETIME;
+ goto put_mm;
+ }
+
+ if (ret == -EBUSY)
+ goto again;
+
+ goto put_mm;
+ }
+
+ down_read(&xdna->notifier_lock);
+ if (mmu_interval_read_retry(&abo->mem.notifier, range.notifier_seq)) {
+ up_read(&xdna->notifier_lock);
+ goto again;
+ }
+ abo->mem.map_invalid = false;
+ up_read(&xdna->notifier_lock);
+
+put_mm:
+ mmput(mm);
+ return ret;
+}
+
+int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct ww_acquire_ctx acquire_ctx;
+ struct dma_fence_chain *chain;
+ struct amdxdna_gem_obj *abo;
+ unsigned long timeout = 0;
+ int ret, i;
+
+ ret = down_interruptible(&hwctx->priv->job_sem);
+ if (ret) {
+ XDNA_ERR(xdna, "Grab job sem failed, ret %d", ret);
+ return ret;
+ }
+
+ chain = dma_fence_chain_alloc();
+ if (!chain) {
+ XDNA_ERR(xdna, "Alloc fence chain failed");
+ ret = -ENOMEM;
+ goto up_sem;
+ }
+
+ ret = drm_sched_job_init(&job->base, &hwctx->priv->entity, 1, hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "DRM job init failed, ret %d", ret);
+ goto free_chain;
+ }
+
+retry:
+ ret = drm_gem_lock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+ if (ret) {
+ XDNA_WARN(xdna, "Failed to lock BOs, ret %d", ret);
+ goto cleanup_job;
+ }
+
+ for (i = 0; i < job->bo_cnt; i++) {
+ ret = dma_resv_reserve_fences(job->bos[i]->resv, 1);
+ if (ret) {
+ XDNA_WARN(xdna, "Failed to reserve fences %d", ret);
+ drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+ goto cleanup_job;
+ }
+ }
+
+ down_read(&xdna->notifier_lock);
+ for (i = 0; i < job->bo_cnt; i++) {
+ abo = to_xdna_obj(job->bos[i]);
+ if (abo->mem.map_invalid) {
+ up_read(&xdna->notifier_lock);
+ drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+ if (!timeout) {
+ timeout = jiffies +
+ msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ } else if (time_after(jiffies, timeout)) {
+ ret = -ETIME;
+ goto cleanup_job;
+ }
+
+ ret = aie2_populate_range(abo);
+ if (ret)
+ goto cleanup_job;
+ goto retry;
+ }
+ }
+
+ mutex_lock(&hwctx->priv->io_lock);
+ drm_sched_job_arm(&job->base);
+ job->out_fence = dma_fence_get(&job->base.s_fence->finished);
+ for (i = 0; i < job->bo_cnt; i++)
+ dma_resv_add_fence(job->bos[i]->resv, job->out_fence, DMA_RESV_USAGE_WRITE);
+ job->seq = hwctx->priv->seq++;
+ kref_get(&job->refcnt);
+ drm_sched_entity_push_job(&job->base);
+
+ *seq = job->seq;
+ drm_syncobj_add_point(hwctx->priv->syncobj, chain, job->out_fence, *seq);
+ mutex_unlock(&hwctx->priv->io_lock);
+
+ up_read(&xdna->notifier_lock);
+ drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
+
+ aie2_job_put(job);
+
+ return 0;
+
+cleanup_job:
+ drm_sched_job_cleanup(&job->base);
+free_chain:
+ dma_fence_chain_free(chain);
+up_sem:
+ up(&hwctx->priv->job_sem);
+ job->job_done = true;
+ return ret;
+}
+
+void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo,
+ unsigned long cur_seq)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ struct drm_gem_object *gobj = to_gobj(abo);
+ long ret;
+
+ down_write(&xdna->notifier_lock);
+ abo->mem.map_invalid = true;
+ mmu_interval_set_seq(&abo->mem.notifier, cur_seq);
+ up_write(&xdna->notifier_lock);
+ ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP,
+ true, MAX_SCHEDULE_TIMEOUT);
+ if (!ret || ret == -ERESTARTSYS)
+ XDNA_ERR(xdna, "Failed to wait for bo, ret %ld", ret);
+}
diff --git a/drivers/accel/amdxdna/aie2_error.c b/drivers/accel/amdxdna/aie2_error.c
new file mode 100644
index 000000000000..b1defaa8513b
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_error.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/dma-mapping.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+struct async_event {
+ struct amdxdna_dev_hdl *ndev;
+ struct async_event_msg_resp resp;
+ struct workqueue_struct *wq;
+ struct work_struct work;
+ u8 *buf;
+ dma_addr_t addr;
+ u32 size;
+};
+
+struct async_events {
+ struct workqueue_struct *wq;
+ u8 *buf;
+ dma_addr_t addr;
+ u32 size;
+ u32 event_cnt;
+ struct async_event event[] __counted_by(event_cnt);
+};
+
+/*
+ * Below enum, struct and lookup tables are porting from XAIE util header file.
+ *
+ * Below data is defined by AIE device and it is used for decode error message
+ * from the device.
+ */
+
+enum aie_module_type {
+ AIE_MEM_MOD = 0,
+ AIE_CORE_MOD,
+ AIE_PL_MOD,
+};
+
+enum aie_error_category {
+ AIE_ERROR_SATURATION = 0,
+ AIE_ERROR_FP,
+ AIE_ERROR_STREAM,
+ AIE_ERROR_ACCESS,
+ AIE_ERROR_BUS,
+ AIE_ERROR_INSTRUCTION,
+ AIE_ERROR_ECC,
+ AIE_ERROR_LOCK,
+ AIE_ERROR_DMA,
+ AIE_ERROR_MEM_PARITY,
+ /* Unknown is not from XAIE, added for better category */
+ AIE_ERROR_UNKNOWN,
+};
+
+/* Don't pack, unless XAIE side changed */
+struct aie_error {
+ __u8 row;
+ __u8 col;
+ __u32 mod_type;
+ __u8 event_id;
+};
+
+struct aie_err_info {
+ u32 err_cnt;
+ u32 ret_code;
+ u32 rsvd;
+ struct aie_error payload[] __counted_by(err_cnt);
+};
+
+struct aie_event_category {
+ u8 event_id;
+ enum aie_error_category category;
+};
+
+#define EVENT_CATEGORY(id, cat) { id, cat }
+static const struct aie_event_category aie_ml_mem_event_cat[] = {
+ EVENT_CATEGORY(88U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(90U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(91U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(92U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(93U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(94U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(95U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(96U, AIE_ERROR_MEM_PARITY),
+ EVENT_CATEGORY(97U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(98U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(99U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(100U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(101U, AIE_ERROR_LOCK),
+};
+
+static const struct aie_event_category aie_ml_core_event_cat[] = {
+ EVENT_CATEGORY(55U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(56U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(57U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(58U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(59U, AIE_ERROR_INSTRUCTION),
+ EVENT_CATEGORY(60U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(62U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(64U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(65U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(66U, AIE_ERROR_ACCESS),
+ EVENT_CATEGORY(67U, AIE_ERROR_LOCK),
+ EVENT_CATEGORY(70U, AIE_ERROR_INSTRUCTION),
+ EVENT_CATEGORY(71U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(72U, AIE_ERROR_BUS),
+};
+
+static const struct aie_event_category aie_ml_mem_tile_event_cat[] = {
+ EVENT_CATEGORY(130U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(132U, AIE_ERROR_ECC),
+ EVENT_CATEGORY(133U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(134U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(135U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(136U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(137U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(138U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(139U, AIE_ERROR_LOCK),
+};
+
+static const struct aie_event_category aie_ml_shim_tile_event_cat[] = {
+ EVENT_CATEGORY(64U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(65U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(66U, AIE_ERROR_STREAM),
+ EVENT_CATEGORY(67U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(68U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(69U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(70U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(71U, AIE_ERROR_BUS),
+ EVENT_CATEGORY(72U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(73U, AIE_ERROR_DMA),
+ EVENT_CATEGORY(74U, AIE_ERROR_LOCK),
+};
+
+static enum aie_error_category
+aie_get_error_category(u8 row, u8 event_id, enum aie_module_type mod_type)
+{
+ const struct aie_event_category *lut;
+ int num_entry;
+ int i;
+
+ switch (mod_type) {
+ case AIE_PL_MOD:
+ lut = aie_ml_shim_tile_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_shim_tile_event_cat);
+ break;
+ case AIE_CORE_MOD:
+ lut = aie_ml_core_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_core_event_cat);
+ break;
+ case AIE_MEM_MOD:
+ if (row == 1) {
+ lut = aie_ml_mem_tile_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_mem_tile_event_cat);
+ } else {
+ lut = aie_ml_mem_event_cat;
+ num_entry = ARRAY_SIZE(aie_ml_mem_event_cat);
+ }
+ break;
+ default:
+ return AIE_ERROR_UNKNOWN;
+ }
+
+ for (i = 0; i < num_entry; i++) {
+ if (event_id != lut[i].event_id)
+ continue;
+
+ return lut[i].category;
+ }
+
+ return AIE_ERROR_UNKNOWN;
+}
+
+static u32 aie2_error_backtrack(struct amdxdna_dev_hdl *ndev, void *err_info, u32 num_err)
+{
+ struct aie_error *errs = err_info;
+ u32 err_col = 0; /* assume that AIE has less than 32 columns */
+ int i;
+
+ /* Get err column bitmap */
+ for (i = 0; i < num_err; i++) {
+ struct aie_error *err = &errs[i];
+ enum aie_error_category cat;
+
+ cat = aie_get_error_category(err->row, err->event_id, err->mod_type);
+ XDNA_ERR(ndev->xdna, "Row: %d, Col: %d, module %d, event ID %d, category %d",
+ err->row, err->col, err->mod_type,
+ err->event_id, cat);
+
+ if (err->col >= 32) {
+ XDNA_WARN(ndev->xdna, "Invalid column number");
+ break;
+ }
+
+ err_col |= (1 << err->col);
+ }
+
+ return err_col;
+}
+
+static int aie2_error_async_cb(void *handle, const u32 *data, size_t size)
+{
+ struct async_event_msg_resp *resp;
+ struct async_event *e = handle;
+
+ if (data) {
+ resp = (struct async_event_msg_resp *)data;
+ e->resp.type = resp->type;
+ wmb(); /* Update status in the end, so that no lock for here */
+ e->resp.status = resp->status;
+ }
+ queue_work(e->wq, &e->work);
+ return 0;
+}
+
+static int aie2_error_event_send(struct async_event *e)
+{
+ drm_clflush_virt_range(e->buf, e->size); /* device can access */
+ return aie2_register_asyn_event_msg(e->ndev, e->addr, e->size, e,
+ aie2_error_async_cb);
+}
+
+static void aie2_error_worker(struct work_struct *err_work)
+{
+ struct aie_err_info *info;
+ struct amdxdna_dev *xdna;
+ struct async_event *e;
+ u32 max_err;
+ u32 err_col;
+
+ e = container_of(err_work, struct async_event, work);
+
+ xdna = e->ndev->xdna;
+
+ if (e->resp.status == MAX_AIE2_STATUS_CODE)
+ return;
+
+ e->resp.status = MAX_AIE2_STATUS_CODE;
+
+ print_hex_dump_debug("AIE error: ", DUMP_PREFIX_OFFSET, 16, 4,
+ e->buf, 0x100, false);
+
+ info = (struct aie_err_info *)e->buf;
+ XDNA_DBG(xdna, "Error count %d return code %d", info->err_cnt, info->ret_code);
+
+ max_err = (e->size - sizeof(*info)) / sizeof(struct aie_error);
+ if (unlikely(info->err_cnt > max_err)) {
+ WARN_ONCE(1, "Error count too large %d\n", info->err_cnt);
+ return;
+ }
+ err_col = aie2_error_backtrack(e->ndev, info->payload, info->err_cnt);
+ if (!err_col) {
+ XDNA_WARN(xdna, "Did not get error column");
+ return;
+ }
+
+ mutex_lock(&xdna->dev_lock);
+ /* Re-sent this event to firmware */
+ if (aie2_error_event_send(e))
+ XDNA_WARN(xdna, "Unable to register async event");
+ mutex_unlock(&xdna->dev_lock);
+}
+
+int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct async_event *e;
+ int i, ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ for (i = 0; i < ndev->async_events->event_cnt; i++) {
+ e = &ndev->async_events->event[i];
+ ret = aie2_error_event_send(e);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct async_events *events;
+
+ events = ndev->async_events;
+
+ mutex_unlock(&xdna->dev_lock);
+ destroy_workqueue(events->wq);
+ mutex_lock(&xdna->dev_lock);
+
+ dma_free_noncoherent(xdna->ddev.dev, events->size, events->buf,
+ events->addr, DMA_FROM_DEVICE);
+ kfree(events);
+}
+
+int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ u32 total_col = ndev->total_col;
+ u32 total_size = ASYNC_BUF_SIZE * total_col;
+ struct async_events *events;
+ int i, ret;
+
+ events = kzalloc(struct_size(events, event, total_col), GFP_KERNEL);
+ if (!events)
+ return -ENOMEM;
+
+ events->buf = dma_alloc_noncoherent(xdna->ddev.dev, total_size, &events->addr,
+ DMA_FROM_DEVICE, GFP_KERNEL);
+ if (!events->buf) {
+ ret = -ENOMEM;
+ goto free_events;
+ }
+ events->size = total_size;
+ events->event_cnt = total_col;
+
+ events->wq = alloc_ordered_workqueue("async_wq", 0);
+ if (!events->wq) {
+ ret = -ENOMEM;
+ goto free_buf;
+ }
+
+ for (i = 0; i < events->event_cnt; i++) {
+ struct async_event *e = &events->event[i];
+ u32 offset = i * ASYNC_BUF_SIZE;
+
+ e->ndev = ndev;
+ e->wq = events->wq;
+ e->buf = &events->buf[offset];
+ e->addr = events->addr + offset;
+ e->size = ASYNC_BUF_SIZE;
+ e->resp.status = MAX_AIE2_STATUS_CODE;
+ INIT_WORK(&e->work, aie2_error_worker);
+ }
+
+ ndev->async_events = events;
+
+ XDNA_DBG(xdna, "Async event count %d, buf total size 0x%x",
+ events->event_cnt, events->size);
+ return 0;
+
+free_buf:
+ dma_free_noncoherent(xdna->ddev.dev, events->size, events->buf,
+ events->addr, DMA_FROM_DEVICE);
+free_events:
+ kfree(events);
+ return ret;
+}
diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
new file mode 100644
index 000000000000..9e2c9a44f76a
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_message.c
@@ -0,0 +1,776 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_mailbox_helper.h"
+#include "amdxdna_pci_drv.h"
+
+#define DECLARE_AIE2_MSG(name, op) \
+ DECLARE_XDNA_MSG_COMMON(name, op, MAX_AIE2_STATUS_CODE)
+
+static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev,
+ struct xdna_mailbox_msg *msg)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct xdna_notify *hdl = msg->handle;
+ int ret;
+
+ if (!ndev->mgmt_chann)
+ return -ENODEV;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ ret = xdna_send_msg_wait(xdna, ndev->mgmt_chann, msg);
+ if (ret == -ETIME) {
+ xdna_mailbox_stop_channel(ndev->mgmt_chann);
+ xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+ ndev->mgmt_chann = NULL;
+ }
+
+ if (!ret && *hdl->data != AIE2_STATUS_SUCCESS) {
+ XDNA_ERR(xdna, "command opcode 0x%x failed, status 0x%x",
+ msg->opcode, *hdl->data);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev)
+{
+ DECLARE_AIE2_MSG(suspend, MSG_OP_SUSPEND);
+
+ return aie2_send_mgmt_msg_wait(ndev, &msg);
+}
+
+int aie2_resume_fw(struct amdxdna_dev_hdl *ndev)
+{
+ DECLARE_AIE2_MSG(suspend, MSG_OP_RESUME);
+
+ return aie2_send_mgmt_msg_wait(ndev, &msg);
+}
+
+int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value)
+{
+ DECLARE_AIE2_MSG(set_runtime_cfg, MSG_OP_SET_RUNTIME_CONFIG);
+ int ret;
+
+ req.type = type;
+ req.value = value;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Failed to set runtime config, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int aie2_get_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 *value)
+{
+ DECLARE_AIE2_MSG(get_runtime_cfg, MSG_OP_GET_RUNTIME_CONFIG);
+ int ret;
+
+ req.type = type;
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Failed to get runtime config, ret %d", ret);
+ return ret;
+ }
+
+ *value = resp.value;
+ return 0;
+}
+
+int aie2_assign_mgmt_pasid(struct amdxdna_dev_hdl *ndev, u16 pasid)
+{
+ DECLARE_AIE2_MSG(assign_mgmt_pasid, MSG_OP_ASSIGN_MGMT_PASID);
+
+ req.pasid = pasid;
+
+ return aie2_send_mgmt_msg_wait(ndev, &msg);
+}
+
+int aie2_query_aie_version(struct amdxdna_dev_hdl *ndev, struct aie_version *version)
+{
+ DECLARE_AIE2_MSG(aie_version_info, MSG_OP_QUERY_AIE_VERSION);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ int ret;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ XDNA_DBG(xdna, "Query AIE version - major: %u minor: %u completed",
+ resp.major, resp.minor);
+
+ version->major = resp.major;
+ version->minor = resp.minor;
+
+ return 0;
+}
+
+int aie2_query_aie_metadata(struct amdxdna_dev_hdl *ndev, struct aie_metadata *metadata)
+{
+ DECLARE_AIE2_MSG(aie_tile_info, MSG_OP_QUERY_AIE_TILE_INFO);
+ int ret;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ metadata->size = resp.info.size;
+ metadata->cols = resp.info.cols;
+ metadata->rows = resp.info.rows;
+
+ metadata->version.major = resp.info.major;
+ metadata->version.minor = resp.info.minor;
+
+ metadata->core.row_count = resp.info.core_rows;
+ metadata->core.row_start = resp.info.core_row_start;
+ metadata->core.dma_channel_count = resp.info.core_dma_channels;
+ metadata->core.lock_count = resp.info.core_locks;
+ metadata->core.event_reg_count = resp.info.core_events;
+
+ metadata->mem.row_count = resp.info.mem_rows;
+ metadata->mem.row_start = resp.info.mem_row_start;
+ metadata->mem.dma_channel_count = resp.info.mem_dma_channels;
+ metadata->mem.lock_count = resp.info.mem_locks;
+ metadata->mem.event_reg_count = resp.info.mem_events;
+
+ metadata->shim.row_count = resp.info.shim_rows;
+ metadata->shim.row_start = resp.info.shim_row_start;
+ metadata->shim.dma_channel_count = resp.info.shim_dma_channels;
+ metadata->shim.lock_count = resp.info.shim_locks;
+ metadata->shim.event_reg_count = resp.info.shim_events;
+
+ return 0;
+}
+
+int aie2_query_firmware_version(struct amdxdna_dev_hdl *ndev,
+ struct amdxdna_fw_ver *fw_ver)
+{
+ DECLARE_AIE2_MSG(firmware_version, MSG_OP_GET_FIRMWARE_VERSION);
+ int ret;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ fw_ver->major = resp.major;
+ fw_ver->minor = resp.minor;
+ fw_ver->sub = resp.sub;
+ fw_ver->build = resp.build;
+
+ return 0;
+}
+
+int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx)
+{
+ DECLARE_AIE2_MSG(create_ctx, MSG_OP_CREATE_CONTEXT);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct xdna_mailbox_chann_res x2i;
+ struct xdna_mailbox_chann_res i2x;
+ struct cq_pair *cq_pair;
+ u32 intr_reg;
+ int ret;
+
+ req.aie_type = 1;
+ req.start_col = hwctx->start_col;
+ req.num_col = hwctx->num_col;
+ req.num_cq_pairs_requested = 1;
+ req.pasid = hwctx->client->pasid;
+ req.context_priority = 2;
+
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ hwctx->fw_ctx_id = resp.context_id;
+ WARN_ONCE(hwctx->fw_ctx_id == -1, "Unexpected context id");
+
+ cq_pair = &resp.cq_pair[0];
+ x2i.mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.head_addr);
+ x2i.mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.tail_addr);
+ x2i.rb_start_addr = AIE2_SRAM_OFF(ndev, cq_pair->x2i_q.buf_addr);
+ x2i.rb_size = cq_pair->x2i_q.buf_size;
+
+ i2x.mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->i2x_q.head_addr);
+ i2x.mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->i2x_q.tail_addr);
+ i2x.rb_start_addr = AIE2_SRAM_OFF(ndev, cq_pair->i2x_q.buf_addr);
+ i2x.rb_size = cq_pair->i2x_q.buf_size;
+
+ ret = pci_irq_vector(to_pci_dev(xdna->ddev.dev), resp.msix_id);
+ if (ret == -EINVAL) {
+ XDNA_ERR(xdna, "not able to create channel");
+ goto out_destroy_context;
+ }
+
+ intr_reg = i2x.mb_head_ptr_reg + 4;
+ hwctx->priv->mbox_chann = xdna_mailbox_create_channel(ndev->mbox, &x2i, &i2x,
+ intr_reg, ret);
+ if (!hwctx->priv->mbox_chann) {
+ XDNA_ERR(xdna, "not able to create channel");
+ ret = -EINVAL;
+ goto out_destroy_context;
+ }
+
+ XDNA_DBG(xdna, "%s mailbox channel irq: %d, msix_id: %d",
+ hwctx->name, ret, resp.msix_id);
+ XDNA_DBG(xdna, "%s created fw ctx %d pasid %d", hwctx->name,
+ hwctx->fw_ctx_id, hwctx->client->pasid);
+
+ return 0;
+
+out_destroy_context:
+ aie2_destroy_context(ndev, hwctx);
+ return ret;
+}
+
+int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx)
+{
+ DECLARE_AIE2_MSG(destroy_ctx, MSG_OP_DESTROY_CONTEXT);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ int ret;
+
+ if (hwctx->fw_ctx_id == -1)
+ return 0;
+
+ xdna_mailbox_stop_channel(hwctx->priv->mbox_chann);
+
+ req.context_id = hwctx->fw_ctx_id;
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ XDNA_WARN(xdna, "%s destroy context failed, ret %d", hwctx->name, ret);
+
+ xdna_mailbox_destroy_channel(hwctx->priv->mbox_chann);
+ XDNA_DBG(xdna, "%s destroyed fw ctx %d", hwctx->name,
+ hwctx->fw_ctx_id);
+ hwctx->priv->mbox_chann = NULL;
+ hwctx->fw_ctx_id = -1;
+
+ return ret;
+}
+
+int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size)
+{
+ DECLARE_AIE2_MSG(map_host_buffer, MSG_OP_MAP_HOST_BUFFER);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ int ret;
+
+ req.context_id = context_id;
+ req.buf_addr = addr;
+ req.buf_size = size;
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret)
+ return ret;
+
+ XDNA_DBG(xdna, "fw ctx %d map host buf addr 0x%llx size 0x%llx",
+ context_id, addr, size);
+
+ return 0;
+}
+
+int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
+ u32 size, u32 *cols_filled)
+{
+ DECLARE_AIE2_MSG(aie_column_info, MSG_OP_QUERY_COL_STATUS);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ struct amdxdna_client *client;
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+ dma_addr_t dma_addr;
+ u32 aie_bitmap = 0;
+ u8 *buff_addr;
+ int ret, idx;
+
+ buff_addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr,
+ DMA_FROM_DEVICE, GFP_KERNEL);
+ if (!buff_addr)
+ return -ENOMEM;
+
+ /* Go through each hardware context and mark the AIE columns that are active */
+ list_for_each_entry(client, &xdna->client_list, node) {
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
+ aie_bitmap |= amdxdna_hwctx_col_map(hwctx);
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ }
+
+ *cols_filled = 0;
+ req.dump_buff_addr = dma_addr;
+ req.dump_buff_size = size;
+ req.num_cols = hweight32(aie_bitmap);
+ req.aie_bitmap = aie_bitmap;
+
+ drm_clflush_virt_range(buff_addr, size); /* device can access */
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret) {
+ XDNA_ERR(xdna, "Error during NPU query, status %d", ret);
+ goto fail;
+ }
+
+ if (resp.status != AIE2_STATUS_SUCCESS) {
+ XDNA_ERR(xdna, "Query NPU status failed, status 0x%x", resp.status);
+ ret = -EINVAL;
+ goto fail;
+ }
+ XDNA_DBG(xdna, "Query NPU status completed");
+
+ if (size < resp.size) {
+ ret = -EINVAL;
+ XDNA_ERR(xdna, "Bad buffer size. Available: %u. Needs: %u", size, resp.size);
+ goto fail;
+ }
+
+ if (copy_to_user(buf, buff_addr, resp.size)) {
+ ret = -EFAULT;
+ XDNA_ERR(xdna, "Failed to copy NPU status to user space");
+ goto fail;
+ }
+
+ *cols_filled = aie_bitmap;
+
+fail:
+ dma_free_noncoherent(xdna->ddev.dev, size, buff_addr, dma_addr, DMA_FROM_DEVICE);
+ return ret;
+}
+
+int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
+ void *handle, int (*cb)(void*, const u32 *, size_t))
+{
+ struct async_event_msg_req req = { 0 };
+ struct xdna_mailbox_msg msg = {
+ .send_data = (u8 *)&req,
+ .send_size = sizeof(req),
+ .handle = handle,
+ .opcode = MSG_OP_REGISTER_ASYNC_EVENT_MSG,
+ .notify_cb = cb,
+ };
+
+ req.buf_addr = addr;
+ req.buf_size = size;
+
+ XDNA_DBG(ndev->xdna, "Register addr 0x%llx size 0x%x", addr, size);
+ return xdna_mailbox_send_msg(ndev->mgmt_chann, &msg, TX_TIMEOUT);
+}
+
+int aie2_config_cu(struct amdxdna_hwctx *hwctx)
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ u32 shift = xdna->dev_info->dev_mem_buf_shift;
+ DECLARE_AIE2_MSG(config_cu, MSG_OP_CONFIG_CU);
+ struct drm_gem_object *gobj;
+ struct amdxdna_gem_obj *abo;
+ int ret, i;
+
+ if (!chann)
+ return -ENODEV;
+
+ if (hwctx->cus->num_cus > MAX_NUM_CUS) {
+ XDNA_DBG(xdna, "Exceed maximum CU %d", MAX_NUM_CUS);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < hwctx->cus->num_cus; i++) {
+ struct amdxdna_cu_config *cu = &hwctx->cus->cu_configs[i];
+
+ if (XDNA_MBZ_DBG(xdna, cu->pad, sizeof(cu->pad)))
+ return -EINVAL;
+
+ gobj = drm_gem_object_lookup(hwctx->client->filp, cu->cu_bo);
+ if (!gobj) {
+ XDNA_ERR(xdna, "Lookup GEM object failed");
+ return -EINVAL;
+ }
+ abo = to_xdna_obj(gobj);
+
+ if (abo->type != AMDXDNA_BO_DEV) {
+ drm_gem_object_put(gobj);
+ XDNA_ERR(xdna, "Invalid BO type");
+ return -EINVAL;
+ }
+
+ req.cfgs[i] = FIELD_PREP(AIE2_MSG_CFG_CU_PDI_ADDR,
+ abo->mem.dev_addr >> shift);
+ req.cfgs[i] |= FIELD_PREP(AIE2_MSG_CFG_CU_FUNC, cu->cu_func);
+ XDNA_DBG(xdna, "CU %d full addr 0x%llx, cfg 0x%x", i,
+ abo->mem.dev_addr, req.cfgs[i]);
+ drm_gem_object_put(gobj);
+ }
+ req.num_cus = hwctx->cus->num_cus;
+
+ ret = xdna_send_msg_wait(xdna, chann, &msg);
+ if (ret == -ETIME)
+ aie2_destroy_context(xdna->dev_handle, hwctx);
+
+ if (resp.status == AIE2_STATUS_SUCCESS) {
+ XDNA_DBG(xdna, "Configure %d CUs, ret %d", req.num_cus, ret);
+ return 0;
+ }
+
+ XDNA_ERR(xdna, "Command opcode 0x%x failed, status 0x%x ret %d",
+ msg.opcode, resp.status, ret);
+ return ret;
+}
+
+int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t))
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ union {
+ struct execute_buffer_req ebuf;
+ struct exec_dpu_req dpu;
+ } req;
+ struct xdna_mailbox_msg msg;
+ u32 payload_len;
+ void *payload;
+ int cu_idx;
+ int ret;
+ u32 op;
+
+ if (!chann)
+ return -ENODEV;
+
+ payload = amdxdna_cmd_get_payload(cmd_abo, &payload_len);
+ if (!payload) {
+ XDNA_ERR(xdna, "Invalid command, cannot get payload");
+ return -EINVAL;
+ }
+
+ cu_idx = amdxdna_cmd_get_cu_idx(cmd_abo);
+ if (cu_idx < 0) {
+ XDNA_DBG(xdna, "Invalid cu idx");
+ return -EINVAL;
+ }
+
+ op = amdxdna_cmd_get_op(cmd_abo);
+ switch (op) {
+ case ERT_START_CU:
+ if (unlikely(payload_len > sizeof(req.ebuf.payload)))
+ XDNA_DBG(xdna, "Invalid ebuf payload len: %d", payload_len);
+ req.ebuf.cu_idx = cu_idx;
+ memcpy(req.ebuf.payload, payload, sizeof(req.ebuf.payload));
+ msg.send_size = sizeof(req.ebuf);
+ msg.opcode = MSG_OP_EXECUTE_BUFFER_CF;
+ break;
+ case ERT_START_NPU: {
+ struct amdxdna_cmd_start_npu *sn = payload;
+
+ if (unlikely(payload_len - sizeof(*sn) > sizeof(req.dpu.payload)))
+ XDNA_DBG(xdna, "Invalid dpu payload len: %d", payload_len);
+ req.dpu.inst_buf_addr = sn->buffer;
+ req.dpu.inst_size = sn->buffer_size;
+ req.dpu.inst_prop_cnt = sn->prop_count;
+ req.dpu.cu_idx = cu_idx;
+ memcpy(req.dpu.payload, sn->prop_args, sizeof(req.dpu.payload));
+ msg.send_size = sizeof(req.dpu);
+ msg.opcode = MSG_OP_EXEC_DPU;
+ break;
+ }
+ default:
+ XDNA_DBG(xdna, "Invalid ERT cmd op code: %d", op);
+ return -EINVAL;
+ }
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ print_hex_dump_debug("cmd: ", DUMP_PREFIX_OFFSET, 16, 4, &req,
+ 0x40, false);
+
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_one_slot_cf(void *cmd_buf, u32 offset,
+ struct amdxdna_gem_obj *abo, u32 *size)
+{
+ struct cmd_chain_slot_execbuf_cf *buf = cmd_buf + offset;
+ int cu_idx = amdxdna_cmd_get_cu_idx(abo);
+ u32 payload_len;
+ void *payload;
+
+ if (cu_idx < 0)
+ return -EINVAL;
+
+ payload = amdxdna_cmd_get_payload(abo, &payload_len);
+ if (!payload)
+ return -EINVAL;
+
+ if (!slot_cf_has_space(offset, payload_len))
+ return -ENOSPC;
+
+ buf->cu_idx = cu_idx;
+ buf->arg_cnt = payload_len / sizeof(u32);
+ memcpy(buf->args, payload, payload_len);
+ /* Accurate buf size to hint firmware to do necessary copy */
+ *size = sizeof(*buf) + payload_len;
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset,
+ struct amdxdna_gem_obj *abo, u32 *size)
+{
+ struct cmd_chain_slot_dpu *buf = cmd_buf + offset;
+ int cu_idx = amdxdna_cmd_get_cu_idx(abo);
+ struct amdxdna_cmd_start_npu *sn;
+ u32 payload_len;
+ void *payload;
+ u32 arg_sz;
+
+ if (cu_idx < 0)
+ return -EINVAL;
+
+ payload = amdxdna_cmd_get_payload(abo, &payload_len);
+ if (!payload)
+ return -EINVAL;
+ sn = payload;
+ arg_sz = payload_len - sizeof(*sn);
+ if (payload_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE)
+ return -EINVAL;
+
+ if (!slot_dpu_has_space(offset, arg_sz))
+ return -ENOSPC;
+
+ buf->inst_buf_addr = sn->buffer;
+ buf->inst_size = sn->buffer_size;
+ buf->inst_prop_cnt = sn->prop_count;
+ buf->cu_idx = cu_idx;
+ buf->arg_cnt = arg_sz / sizeof(u32);
+ memcpy(buf->args, sn->prop_args, arg_sz);
+
+ /* Accurate buf size to hint firmware to do necessary copy */
+ *size += sizeof(*buf) + arg_sz;
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_one_slot(u32 op, struct amdxdna_gem_obj *cmdbuf_abo, u32 offset,
+ struct amdxdna_gem_obj *abo, u32 *size)
+{
+ u32 this_op = amdxdna_cmd_get_op(abo);
+ void *cmd_buf = cmdbuf_abo->mem.kva;
+ int ret;
+
+ if (this_op != op) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ switch (op) {
+ case ERT_START_CU:
+ ret = aie2_cmdlist_fill_one_slot_cf(cmd_buf, offset, abo, size);
+ break;
+ case ERT_START_NPU:
+ ret = aie2_cmdlist_fill_one_slot_dpu(cmd_buf, offset, abo, size);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+done:
+ if (ret) {
+ XDNA_ERR(abo->client->xdna, "Can't fill slot for cmd op %d ret %d",
+ op, ret);
+ }
+ return ret;
+}
+
+static inline struct amdxdna_gem_obj *
+aie2_cmdlist_get_cmd_buf(struct amdxdna_sched_job *job)
+{
+ int idx = get_job_idx(job->seq);
+
+ return job->hwctx->priv->cmd_buf[idx];
+}
+
+static void
+aie2_cmdlist_prepare_request(struct cmd_chain_req *req,
+ struct amdxdna_gem_obj *cmdbuf_abo, u32 size, u32 cnt)
+{
+ req->buf_addr = cmdbuf_abo->mem.dev_addr;
+ req->buf_size = size;
+ req->count = cnt;
+ drm_clflush_virt_range(cmdbuf_abo->mem.kva, size);
+ XDNA_DBG(cmdbuf_abo->client->xdna, "Command buf addr 0x%llx size 0x%x count %d",
+ req->buf_addr, size, cnt);
+}
+
+static inline u32
+aie2_cmd_op_to_msg_op(u32 op)
+{
+ switch (op) {
+ case ERT_START_CU:
+ return MSG_OP_CHAIN_EXEC_BUFFER_CF;
+ case ERT_START_NPU:
+ return MSG_OP_CHAIN_EXEC_DPU;
+ default:
+ return MSG_OP_MAX_OPCODE;
+ }
+}
+
+int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t))
+{
+ struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job);
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_client *client = hwctx->client;
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct amdxdna_cmd_chain *payload;
+ struct xdna_mailbox_msg msg;
+ struct cmd_chain_req req;
+ u32 payload_len;
+ u32 offset = 0;
+ u32 size;
+ int ret;
+ u32 op;
+ u32 i;
+
+ op = amdxdna_cmd_get_op(cmd_abo);
+ payload = amdxdna_cmd_get_payload(cmd_abo, &payload_len);
+ if (op != ERT_CMD_CHAIN || !payload ||
+ payload_len < struct_size(payload, data, payload->command_count))
+ return -EINVAL;
+
+ for (i = 0; i < payload->command_count; i++) {
+ u32 boh = (u32)(payload->data[i]);
+ struct amdxdna_gem_obj *abo;
+
+ abo = amdxdna_gem_get_obj(client, boh, AMDXDNA_BO_CMD);
+ if (!abo) {
+ XDNA_ERR(client->xdna, "Failed to find cmd BO %d", boh);
+ return -ENOENT;
+ }
+
+ /* All sub-cmd should have same op, use the first one. */
+ if (i == 0)
+ op = amdxdna_cmd_get_op(abo);
+
+ ret = aie2_cmdlist_fill_one_slot(op, cmdbuf_abo, offset, abo, &size);
+ amdxdna_gem_put_obj(abo);
+ if (ret)
+ return -EINVAL;
+
+ offset += size;
+ }
+
+ /* The offset is the accumulated total size of the cmd buffer */
+ aie2_cmdlist_prepare_request(&req, cmdbuf_abo, offset, payload->command_count);
+
+ msg.opcode = aie2_cmd_op_to_msg_op(op);
+ if (msg.opcode == MSG_OP_MAX_OPCODE)
+ return -EOPNOTSUPP;
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(hwctx->client->xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t))
+{
+ struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job);
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct xdna_mailbox_msg msg;
+ struct cmd_chain_req req;
+ u32 size;
+ int ret;
+ u32 op;
+
+ op = amdxdna_cmd_get_op(cmd_abo);
+ ret = aie2_cmdlist_fill_one_slot(op, cmdbuf_abo, 0, cmd_abo, &size);
+ if (ret)
+ return ret;
+
+ aie2_cmdlist_prepare_request(&req, cmdbuf_abo, size, 1);
+
+ msg.opcode = aie2_cmd_op_to_msg_op(op);
+ if (msg.opcode == MSG_OP_MAX_OPCODE)
+ return -EOPNOTSUPP;
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(hwctx->client->xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t))
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_gem_obj *abo = to_xdna_obj(job->bos[0]);
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct xdna_mailbox_msg msg;
+ struct sync_bo_req req;
+ int ret = 0;
+
+ req.src_addr = 0;
+ req.dst_addr = abo->mem.dev_addr - hwctx->client->dev_heap->mem.dev_addr;
+ req.size = abo->mem.size;
+
+ /* Device to Host */
+ req.type = FIELD_PREP(AIE2_MSG_SYNC_BO_SRC_TYPE, SYNC_BO_DEV_MEM) |
+ FIELD_PREP(AIE2_MSG_SYNC_BO_DST_TYPE, SYNC_BO_HOST_MEM);
+
+ XDNA_DBG(xdna, "sync %d bytes src(0x%llx) to dst(0x%llx) completed",
+ req.size, req.src_addr, req.dst_addr);
+
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ msg.opcode = MSG_OP_SYNC_BO;
+
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(xdna, "Send message failed");
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/accel/amdxdna/aie2_msg_priv.h b/drivers/accel/amdxdna/aie2_msg_priv.h
new file mode 100644
index 000000000000..4e02e744b470
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_msg_priv.h
@@ -0,0 +1,370 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_MSG_PRIV_H_
+#define _AIE2_MSG_PRIV_H_
+
+enum aie2_msg_opcode {
+ MSG_OP_CREATE_CONTEXT = 0x2,
+ MSG_OP_DESTROY_CONTEXT = 0x3,
+ MSG_OP_SYNC_BO = 0x7,
+ MSG_OP_EXECUTE_BUFFER_CF = 0xC,
+ MSG_OP_QUERY_COL_STATUS = 0xD,
+ MSG_OP_QUERY_AIE_TILE_INFO = 0xE,
+ MSG_OP_QUERY_AIE_VERSION = 0xF,
+ MSG_OP_EXEC_DPU = 0x10,
+ MSG_OP_CONFIG_CU = 0x11,
+ MSG_OP_CHAIN_EXEC_BUFFER_CF = 0x12,
+ MSG_OP_CHAIN_EXEC_DPU = 0x13,
+ MSG_OP_MAX_XRT_OPCODE,
+ MSG_OP_SUSPEND = 0x101,
+ MSG_OP_RESUME = 0x102,
+ MSG_OP_ASSIGN_MGMT_PASID = 0x103,
+ MSG_OP_INVOKE_SELF_TEST = 0x104,
+ MSG_OP_MAP_HOST_BUFFER = 0x106,
+ MSG_OP_GET_FIRMWARE_VERSION = 0x108,
+ MSG_OP_SET_RUNTIME_CONFIG = 0x10A,
+ MSG_OP_GET_RUNTIME_CONFIG = 0x10B,
+ MSG_OP_REGISTER_ASYNC_EVENT_MSG = 0x10C,
+ MSG_OP_MAX_DRV_OPCODE,
+ MSG_OP_GET_PROTOCOL_VERSION = 0x301,
+ MSG_OP_MAX_OPCODE
+};
+
+enum aie2_msg_status {
+ AIE2_STATUS_SUCCESS = 0x0,
+ /* AIE Error codes */
+ AIE2_STATUS_AIE_SATURATION_ERROR = 0x1000001,
+ AIE2_STATUS_AIE_FP_ERROR = 0x1000002,
+ AIE2_STATUS_AIE_STREAM_ERROR = 0x1000003,
+ AIE2_STATUS_AIE_ACCESS_ERROR = 0x1000004,
+ AIE2_STATUS_AIE_BUS_ERROR = 0x1000005,
+ AIE2_STATUS_AIE_INSTRUCTION_ERROR = 0x1000006,
+ AIE2_STATUS_AIE_ECC_ERROR = 0x1000007,
+ AIE2_STATUS_AIE_LOCK_ERROR = 0x1000008,
+ AIE2_STATUS_AIE_DMA_ERROR = 0x1000009,
+ AIE2_STATUS_AIE_MEM_PARITY_ERROR = 0x100000a,
+ AIE2_STATUS_AIE_PWR_CFG_ERROR = 0x100000b,
+ AIE2_STATUS_AIE_BACKTRACK_ERROR = 0x100000c,
+ AIE2_STATUS_MAX_AIE_STATUS_CODE,
+ /* MGMT ERT Error codes */
+ AIE2_STATUS_MGMT_ERT_SELF_TEST_FAILURE = 0x2000001,
+ AIE2_STATUS_MGMT_ERT_HASH_MISMATCH,
+ AIE2_STATUS_MGMT_ERT_NOAVAIL,
+ AIE2_STATUS_MGMT_ERT_INVALID_PARAM,
+ AIE2_STATUS_MGMT_ERT_ENTER_SUSPEND_FAILURE,
+ AIE2_STATUS_MGMT_ERT_BUSY,
+ AIE2_STATUS_MGMT_ERT_APPLICATION_ACTIVE,
+ MAX_MGMT_ERT_STATUS_CODE,
+ /* APP ERT Error codes */
+ AIE2_STATUS_APP_ERT_FIRST_ERROR = 0x3000001,
+ AIE2_STATUS_APP_INVALID_INSTR,
+ AIE2_STATUS_APP_LOAD_PDI_FAIL,
+ MAX_APP_ERT_STATUS_CODE,
+ /* NPU RTOS Error Codes */
+ AIE2_STATUS_INVALID_INPUT_BUFFER = 0x4000001,
+ AIE2_STATUS_INVALID_COMMAND,
+ AIE2_STATUS_INVALID_PARAM,
+ AIE2_STATUS_INVALID_OPERATION = 0x4000006,
+ AIE2_STATUS_ASYNC_EVENT_MSGS_FULL,
+ AIE2_STATUS_MAX_RTOS_STATUS_CODE,
+ MAX_AIE2_STATUS_CODE
+};
+
+struct assign_mgmt_pasid_req {
+ __u16 pasid;
+ __u16 reserved;
+} __packed;
+
+struct assign_mgmt_pasid_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct map_host_buffer_req {
+ __u32 context_id;
+ __u64 buf_addr;
+ __u64 buf_size;
+} __packed;
+
+struct map_host_buffer_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+#define MAX_CQ_PAIRS 2
+struct cq_info {
+ __u32 head_addr;
+ __u32 tail_addr;
+ __u32 buf_addr;
+ __u32 buf_size;
+};
+
+struct cq_pair {
+ struct cq_info x2i_q;
+ struct cq_info i2x_q;
+};
+
+struct create_ctx_req {
+ __u32 aie_type;
+ __u8 start_col;
+ __u8 num_col;
+ __u16 reserved;
+ __u8 num_cq_pairs_requested;
+ __u8 reserved1;
+ __u16 pasid;
+ __u32 pad[2];
+ __u32 sec_comm_target_type;
+ __u32 context_priority;
+} __packed;
+
+struct create_ctx_resp {
+ enum aie2_msg_status status;
+ __u32 context_id;
+ __u16 msix_id;
+ __u8 num_cq_pairs_allocated;
+ __u8 reserved;
+ struct cq_pair cq_pair[MAX_CQ_PAIRS];
+} __packed;
+
+struct destroy_ctx_req {
+ __u32 context_id;
+} __packed;
+
+struct destroy_ctx_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct execute_buffer_req {
+ __u32 cu_idx;
+ __u32 payload[19];
+} __packed;
+
+struct exec_dpu_req {
+ __u64 inst_buf_addr;
+ __u32 inst_size;
+ __u32 inst_prop_cnt;
+ __u32 cu_idx;
+ __u32 payload[35];
+} __packed;
+
+struct execute_buffer_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct aie_tile_info {
+ __u32 size;
+ __u16 major;
+ __u16 minor;
+ __u16 cols;
+ __u16 rows;
+ __u16 core_rows;
+ __u16 mem_rows;
+ __u16 shim_rows;
+ __u16 core_row_start;
+ __u16 mem_row_start;
+ __u16 shim_row_start;
+ __u16 core_dma_channels;
+ __u16 mem_dma_channels;
+ __u16 shim_dma_channels;
+ __u16 core_locks;
+ __u16 mem_locks;
+ __u16 shim_locks;
+ __u16 core_events;
+ __u16 mem_events;
+ __u16 shim_events;
+ __u16 reserved;
+};
+
+struct aie_tile_info_req {
+ __u32 reserved;
+} __packed;
+
+struct aie_tile_info_resp {
+ enum aie2_msg_status status;
+ struct aie_tile_info info;
+} __packed;
+
+struct aie_version_info_req {
+ __u32 reserved;
+} __packed;
+
+struct aie_version_info_resp {
+ enum aie2_msg_status status;
+ __u16 major;
+ __u16 minor;
+} __packed;
+
+struct aie_column_info_req {
+ __u64 dump_buff_addr;
+ __u32 dump_buff_size;
+ __u32 num_cols;
+ __u32 aie_bitmap;
+} __packed;
+
+struct aie_column_info_resp {
+ enum aie2_msg_status status;
+ __u32 size;
+} __packed;
+
+struct suspend_req {
+ __u32 place_holder;
+} __packed;
+
+struct suspend_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct resume_req {
+ __u32 place_holder;
+} __packed;
+
+struct resume_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct check_header_hash_req {
+ __u64 hash_high;
+ __u64 hash_low;
+} __packed;
+
+struct check_header_hash_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct query_error_req {
+ __u64 buf_addr;
+ __u32 buf_size;
+ __u32 next_row;
+ __u32 next_column;
+ __u32 next_module;
+} __packed;
+
+struct query_error_resp {
+ enum aie2_msg_status status;
+ __u32 num_err;
+ __u32 has_next_err;
+ __u32 next_row;
+ __u32 next_column;
+ __u32 next_module;
+} __packed;
+
+struct protocol_version_req {
+ __u32 reserved;
+} __packed;
+
+struct protocol_version_resp {
+ enum aie2_msg_status status;
+ __u32 major;
+ __u32 minor;
+} __packed;
+
+struct firmware_version_req {
+ __u32 reserved;
+} __packed;
+
+struct firmware_version_resp {
+ enum aie2_msg_status status;
+ __u32 major;
+ __u32 minor;
+ __u32 sub;
+ __u32 build;
+} __packed;
+
+#define MAX_NUM_CUS 32
+#define AIE2_MSG_CFG_CU_PDI_ADDR GENMASK(16, 0)
+#define AIE2_MSG_CFG_CU_FUNC GENMASK(24, 17)
+struct config_cu_req {
+ __u32 num_cus;
+ __u32 cfgs[MAX_NUM_CUS];
+} __packed;
+
+struct config_cu_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct set_runtime_cfg_req {
+ __u32 type;
+ __u64 value;
+} __packed;
+
+struct set_runtime_cfg_resp {
+ enum aie2_msg_status status;
+} __packed;
+
+struct get_runtime_cfg_req {
+ __u32 type;
+} __packed;
+
+struct get_runtime_cfg_resp {
+ enum aie2_msg_status status;
+ __u64 value;
+} __packed;
+
+enum async_event_type {
+ ASYNC_EVENT_TYPE_AIE_ERROR,
+ ASYNC_EVENT_TYPE_EXCEPTION,
+ MAX_ASYNC_EVENT_TYPE
+};
+
+#define ASYNC_BUF_SIZE SZ_8K
+struct async_event_msg_req {
+ __u64 buf_addr;
+ __u32 buf_size;
+} __packed;
+
+struct async_event_msg_resp {
+ enum aie2_msg_status status;
+ enum async_event_type type;
+} __packed;
+
+#define MAX_CHAIN_CMDBUF_SIZE SZ_4K
+#define slot_cf_has_space(offset, payload_size) \
+ (MAX_CHAIN_CMDBUF_SIZE - ((offset) + (payload_size)) > \
+ offsetof(struct cmd_chain_slot_execbuf_cf, args[0]))
+struct cmd_chain_slot_execbuf_cf {
+ __u32 cu_idx;
+ __u32 arg_cnt;
+ __u32 args[] __counted_by(arg_cnt);
+};
+
+#define slot_dpu_has_space(offset, payload_size) \
+ (MAX_CHAIN_CMDBUF_SIZE - ((offset) + (payload_size)) > \
+ offsetof(struct cmd_chain_slot_dpu, args[0]))
+struct cmd_chain_slot_dpu {
+ __u64 inst_buf_addr;
+ __u32 inst_size;
+ __u32 inst_prop_cnt;
+ __u32 cu_idx;
+ __u32 arg_cnt;
+#define MAX_DPU_ARGS_SIZE (34 * sizeof(__u32))
+ __u32 args[] __counted_by(arg_cnt);
+};
+
+struct cmd_chain_req {
+ __u64 buf_addr;
+ __u32 buf_size;
+ __u32 count;
+} __packed;
+
+struct cmd_chain_resp {
+ enum aie2_msg_status status;
+ __u32 fail_cmd_idx;
+ enum aie2_msg_status fail_cmd_status;
+} __packed;
+
+#define AIE2_MSG_SYNC_BO_SRC_TYPE GENMASK(3, 0)
+#define AIE2_MSG_SYNC_BO_DST_TYPE GENMASK(7, 4)
+struct sync_bo_req {
+ __u64 src_addr;
+ __u64 dst_addr;
+ __u32 size;
+#define SYNC_BO_DEV_MEM 0
+#define SYNC_BO_HOST_MEM 2
+ __u32 type;
+} __packed;
+
+struct sync_bo_resp {
+ enum aie2_msg_status status;
+} __packed;
+#endif /* _AIE2_MSG_PRIV_H_ */
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
new file mode 100644
index 000000000000..5a058e565b01
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -0,0 +1,928 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/xarray.h>
+
+#include "aie2_msg_priv.h"
+#include "aie2_pci.h"
+#include "aie2_solver.h"
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+static int aie2_max_col = XRS_MAX_COL;
+module_param(aie2_max_col, uint, 0600);
+MODULE_PARM_DESC(aie2_max_col, "Maximum column could be used");
+
+/*
+ * The management mailbox channel is allocated by firmware.
+ * The related register and ring buffer information is on SRAM BAR.
+ * This struct is the register layout.
+ */
+#define MGMT_MBOX_MAGIC 0x55504e5f /* _NPU */
+struct mgmt_mbox_chann_info {
+ __u32 x2i_tail;
+ __u32 x2i_head;
+ __u32 x2i_buf;
+ __u32 x2i_buf_sz;
+ __u32 i2x_tail;
+ __u32 i2x_head;
+ __u32 i2x_buf;
+ __u32 i2x_buf_sz;
+ __u32 magic;
+ __u32 msi_id;
+ __u32 prot_major;
+ __u32 prot_minor;
+ __u32 rsvd[4];
+};
+
+static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 fw_minor)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+
+ /*
+ * The driver supported mailbox behavior is defined by
+ * ndev->priv->protocol_major and protocol_minor.
+ *
+ * When protocol_major and fw_major are different, it means driver
+ * and firmware are incompatible.
+ */
+ if (ndev->priv->protocol_major != fw_major) {
+ XDNA_ERR(xdna, "Incompatible firmware protocol major %d minor %d",
+ fw_major, fw_minor);
+ return -EINVAL;
+ }
+
+ /*
+ * When protocol_minor is greater then fw_minor, that means driver
+ * relies on operation the installed firmware does not support.
+ */
+ if (ndev->priv->protocol_minor > fw_minor) {
+ XDNA_ERR(xdna, "Firmware minor version smaller than supported");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void aie2_dump_chann_info_debug(struct amdxdna_dev_hdl *ndev)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+
+ XDNA_DBG(xdna, "i2x tail 0x%x", ndev->mgmt_i2x.mb_tail_ptr_reg);
+ XDNA_DBG(xdna, "i2x head 0x%x", ndev->mgmt_i2x.mb_head_ptr_reg);
+ XDNA_DBG(xdna, "i2x ringbuf 0x%x", ndev->mgmt_i2x.rb_start_addr);
+ XDNA_DBG(xdna, "i2x rsize 0x%x", ndev->mgmt_i2x.rb_size);
+ XDNA_DBG(xdna, "x2i tail 0x%x", ndev->mgmt_x2i.mb_tail_ptr_reg);
+ XDNA_DBG(xdna, "x2i head 0x%x", ndev->mgmt_x2i.mb_head_ptr_reg);
+ XDNA_DBG(xdna, "x2i ringbuf 0x%x", ndev->mgmt_x2i.rb_start_addr);
+ XDNA_DBG(xdna, "x2i rsize 0x%x", ndev->mgmt_x2i.rb_size);
+ XDNA_DBG(xdna, "x2i chann index 0x%x", ndev->mgmt_chan_idx);
+ XDNA_DBG(xdna, "mailbox protocol major 0x%x", ndev->mgmt_prot_major);
+ XDNA_DBG(xdna, "mailbox protocol minor 0x%x", ndev->mgmt_prot_minor);
+}
+
+static int aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl *ndev)
+{
+ struct mgmt_mbox_chann_info info_regs;
+ struct xdna_mailbox_chann_res *i2x;
+ struct xdna_mailbox_chann_res *x2i;
+ u32 addr, off;
+ u32 *reg;
+ int ret;
+ int i;
+
+ /*
+ * Once firmware is alive, it will write management channel
+ * information in SRAM BAR and write the address of that information
+ * at FW_ALIVE_OFF offset in SRMA BAR.
+ *
+ * Read a non-zero value from FW_ALIVE_OFF implies that firmware
+ * is alive.
+ */
+ ret = readx_poll_timeout(readl, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF),
+ addr, addr, AIE2_INTERVAL, AIE2_TIMEOUT);
+ if (ret || !addr)
+ return -ETIME;
+
+ off = AIE2_SRAM_OFF(ndev, addr);
+ reg = (u32 *)&info_regs;
+ for (i = 0; i < sizeof(info_regs) / sizeof(u32); i++)
+ reg[i] = readl(ndev->sram_base + off + i * sizeof(u32));
+
+ if (info_regs.magic != MGMT_MBOX_MAGIC) {
+ XDNA_ERR(ndev->xdna, "Invalid mbox magic 0x%x", info_regs.magic);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ i2x = &ndev->mgmt_i2x;
+ x2i = &ndev->mgmt_x2i;
+
+ i2x->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_head);
+ i2x->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_tail);
+ i2x->rb_start_addr = AIE2_SRAM_OFF(ndev, info_regs.i2x_buf);
+ i2x->rb_size = info_regs.i2x_buf_sz;
+
+ x2i->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_head);
+ x2i->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_tail);
+ x2i->rb_start_addr = AIE2_SRAM_OFF(ndev, info_regs.x2i_buf);
+ x2i->rb_size = info_regs.x2i_buf_sz;
+
+ ndev->mgmt_chan_idx = info_regs.msi_id;
+ ndev->mgmt_prot_major = info_regs.prot_major;
+ ndev->mgmt_prot_minor = info_regs.prot_minor;
+
+ ret = aie2_check_protocol(ndev, ndev->mgmt_prot_major, ndev->mgmt_prot_minor);
+
+done:
+ aie2_dump_chann_info_debug(ndev);
+
+ /* Must clear address at FW_ALIVE_OFF */
+ writel(0, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF));
+
+ return ret;
+}
+
+int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
+ enum rt_config_category category, u32 *val)
+{
+ const struct rt_config *cfg;
+ u32 value;
+ int ret;
+
+ for (cfg = ndev->priv->rt_config; cfg->type; cfg++) {
+ if (cfg->category != category)
+ continue;
+
+ value = val ? *val : cfg->value;
+ ret = aie2_set_runtime_cfg(ndev, cfg->type, value);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set type %d value %d failed",
+ cfg->type, value);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int aie2_xdna_reset(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_suspend_fw(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Suspend firmware failed");
+ return ret;
+ }
+
+ ret = aie2_resume_fw(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Resume firmware failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_INIT, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Runtime config failed");
+ return ret;
+ }
+
+ ret = aie2_assign_mgmt_pasid(ndev, 0);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Can not assign PASID");
+ return ret;
+ }
+
+ ret = aie2_xdna_reset(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Reset firmware failed");
+ return ret;
+ }
+
+ if (!ndev->async_events)
+ return 0;
+
+ ret = aie2_error_async_events_send(ndev);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Send async events failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "query firmware version failed");
+ return ret;
+ }
+
+ ret = aie2_query_aie_version(ndev, &ndev->version);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Query AIE version failed");
+ return ret;
+ }
+
+ ret = aie2_query_aie_metadata(ndev, &ndev->metadata);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Query AIE metadata failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void aie2_mgmt_fw_fini(struct amdxdna_dev_hdl *ndev)
+{
+ if (aie2_suspend_fw(ndev))
+ XDNA_ERR(ndev->xdna, "Suspend_fw failed");
+ XDNA_DBG(ndev->xdna, "Firmware suspended");
+}
+
+static int aie2_xrs_load(void *cb_arg, struct xrs_action_load *action)
+{
+ struct amdxdna_hwctx *hwctx = cb_arg;
+ struct amdxdna_dev *xdna;
+ int ret;
+
+ xdna = hwctx->client->xdna;
+
+ hwctx->start_col = action->part.start_col;
+ hwctx->num_col = action->part.ncols;
+ ret = aie2_create_context(xdna->dev_handle, hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "create context failed, ret %d", ret);
+
+ return ret;
+}
+
+static int aie2_xrs_unload(void *cb_arg)
+{
+ struct amdxdna_hwctx *hwctx = cb_arg;
+ struct amdxdna_dev *xdna;
+ int ret;
+
+ xdna = hwctx->client->xdna;
+
+ ret = aie2_destroy_context(xdna->dev_handle, hwctx);
+ if (ret)
+ XDNA_ERR(xdna, "destroy context failed, ret %d", ret);
+
+ return ret;
+}
+
+static int aie2_xrs_set_dft_dpm_level(struct drm_device *ddev, u32 dpm_level)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(ddev);
+ struct amdxdna_dev_hdl *ndev;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ ndev = xdna->dev_handle;
+ ndev->dft_dpm_level = dpm_level;
+ if (ndev->pw_mode != POWER_MODE_DEFAULT || ndev->dpm_level == dpm_level)
+ return 0;
+
+ return ndev->priv->hw_ops.set_dpm(ndev, dpm_level);
+}
+
+static struct xrs_action_ops aie2_xrs_actions = {
+ .load = aie2_xrs_load,
+ .unload = aie2_xrs_unload,
+ .set_dft_dpm_level = aie2_xrs_set_dft_dpm_level,
+};
+
+static void aie2_hw_stop(struct amdxdna_dev *xdna)
+{
+ struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
+ struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
+
+ if (ndev->dev_status <= AIE2_DEV_INIT) {
+ XDNA_ERR(xdna, "device is already stopped");
+ return;
+ }
+
+ aie2_mgmt_fw_fini(ndev);
+ xdna_mailbox_stop_channel(ndev->mgmt_chann);
+ xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+ ndev->mgmt_chann = NULL;
+ drmm_kfree(&xdna->ddev, ndev->mbox);
+ ndev->mbox = NULL;
+ aie2_psp_stop(ndev->psp_hdl);
+ aie2_smu_fini(ndev);
+ pci_disable_device(pdev);
+
+ ndev->dev_status = AIE2_DEV_INIT;
+}
+
+static int aie2_hw_start(struct amdxdna_dev *xdna)
+{
+ struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
+ struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
+ struct xdna_mailbox_res mbox_res;
+ u32 xdna_mailbox_intr_reg;
+ int mgmt_mb_irq, ret;
+
+ if (ndev->dev_status >= AIE2_DEV_START) {
+ XDNA_INFO(xdna, "device is already started");
+ return 0;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to enable device, ret %d", ret);
+ return ret;
+ }
+ pci_set_master(pdev);
+
+ ret = aie2_smu_init(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to init smu, ret %d", ret);
+ goto disable_dev;
+ }
+
+ ret = aie2_psp_start(ndev->psp_hdl);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to start psp, ret %d", ret);
+ goto fini_smu;
+ }
+
+ ret = aie2_get_mgmt_chann_info(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "firmware is not alive");
+ goto stop_psp;
+ }
+
+ mbox_res.ringbuf_base = ndev->sram_base;
+ mbox_res.ringbuf_size = pci_resource_len(pdev, xdna->dev_info->sram_bar);
+ mbox_res.mbox_base = ndev->mbox_base;
+ mbox_res.mbox_size = MBOX_SIZE(ndev);
+ mbox_res.name = "xdna_mailbox";
+ ndev->mbox = xdnam_mailbox_create(&xdna->ddev, &mbox_res);
+ if (!ndev->mbox) {
+ XDNA_ERR(xdna, "failed to create mailbox device");
+ ret = -ENODEV;
+ goto stop_psp;
+ }
+
+ mgmt_mb_irq = pci_irq_vector(pdev, ndev->mgmt_chan_idx);
+ if (mgmt_mb_irq < 0) {
+ ret = mgmt_mb_irq;
+ XDNA_ERR(xdna, "failed to alloc irq vector, ret %d", ret);
+ goto stop_psp;
+ }
+
+ xdna_mailbox_intr_reg = ndev->mgmt_i2x.mb_head_ptr_reg + 4;
+ ndev->mgmt_chann = xdna_mailbox_create_channel(ndev->mbox,
+ &ndev->mgmt_x2i,
+ &ndev->mgmt_i2x,
+ xdna_mailbox_intr_reg,
+ mgmt_mb_irq);
+ if (!ndev->mgmt_chann) {
+ XDNA_ERR(xdna, "failed to create management mailbox channel");
+ ret = -EINVAL;
+ goto stop_psp;
+ }
+
+ ret = aie2_pm_init(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to init pm, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
+ ret = aie2_mgmt_fw_init(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
+ ndev->dev_status = AIE2_DEV_START;
+
+ return 0;
+
+destroy_mgmt_chann:
+ xdna_mailbox_stop_channel(ndev->mgmt_chann);
+ xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+stop_psp:
+ aie2_psp_stop(ndev->psp_hdl);
+fini_smu:
+ aie2_smu_fini(ndev);
+disable_dev:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static int aie2_init(struct amdxdna_dev *xdna)
+{
+ struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
+ void __iomem *tbl[PCI_NUM_RESOURCES] = {0};
+ struct init_config xrs_cfg = { 0 };
+ struct amdxdna_dev_hdl *ndev;
+ struct psp_config psp_conf;
+ const struct firmware *fw;
+ unsigned long bars = 0;
+ int i, nvec, ret;
+
+ ndev = drmm_kzalloc(&xdna->ddev, sizeof(*ndev), GFP_KERNEL);
+ if (!ndev)
+ return -ENOMEM;
+
+ ndev->priv = xdna->dev_info->dev_priv;
+ ndev->xdna = xdna;
+
+ ret = request_firmware(&fw, ndev->priv->fw_path, &pdev->dev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to request_firmware %s, ret %d",
+ ndev->priv->fw_path, ret);
+ return ret;
+ }
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ XDNA_ERR(xdna, "pcim enable device failed, ret %d", ret);
+ goto release_fw;
+ }
+
+ for (i = 0; i < PSP_MAX_REGS; i++)
+ set_bit(PSP_REG_BAR(ndev, i), &bars);
+
+ set_bit(xdna->dev_info->sram_bar, &bars);
+ set_bit(xdna->dev_info->smu_bar, &bars);
+ set_bit(xdna->dev_info->mbox_bar, &bars);
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ if (!test_bit(i, &bars))
+ continue;
+ tbl[i] = pcim_iomap(pdev, i, 0);
+ if (!tbl[i]) {
+ XDNA_ERR(xdna, "map bar %d failed", i);
+ ret = -ENOMEM;
+ goto release_fw;
+ }
+ }
+
+ ndev->sram_base = tbl[xdna->dev_info->sram_bar];
+ ndev->smu_base = tbl[xdna->dev_info->smu_bar];
+ ndev->mbox_base = tbl[xdna->dev_info->mbox_bar];
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to set DMA mask: %d", ret);
+ goto release_fw;
+ }
+
+ nvec = pci_msix_vec_count(pdev);
+ if (nvec <= 0) {
+ XDNA_ERR(xdna, "does not get number of interrupt vector");
+ ret = -EINVAL;
+ goto release_fw;
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ XDNA_ERR(xdna, "failed to alloc irq vectors, ret %d", ret);
+ goto release_fw;
+ }
+
+ ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+ if (ret) {
+ XDNA_ERR(xdna, "Enable PASID failed, ret %d", ret);
+ goto free_irq;
+ }
+
+ psp_conf.fw_size = fw->size;
+ psp_conf.fw_buf = fw->data;
+ for (i = 0; i < PSP_MAX_REGS; i++)
+ psp_conf.psp_regs[i] = tbl[PSP_REG_BAR(ndev, i)] + PSP_REG_OFF(ndev, i);
+ ndev->psp_hdl = aie2m_psp_create(&xdna->ddev, &psp_conf);
+ if (!ndev->psp_hdl) {
+ XDNA_ERR(xdna, "failed to create psp");
+ ret = -ENOMEM;
+ goto disable_sva;
+ }
+ xdna->dev_handle = ndev;
+
+ ret = aie2_hw_start(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "start npu failed, ret %d", ret);
+ goto disable_sva;
+ }
+
+ ret = aie2_mgmt_fw_query(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "Query firmware failed, ret %d", ret);
+ goto stop_hw;
+ }
+ ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
+
+ xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1;
+ for (i = 0; i < xrs_cfg.clk_list.num_levels; i++)
+ xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk;
+ xrs_cfg.sys_eff_factor = 1;
+ xrs_cfg.ddev = &xdna->ddev;
+ xrs_cfg.actions = &aie2_xrs_actions;
+ xrs_cfg.total_col = ndev->total_col;
+
+ xdna->xrs_hdl = xrsm_init(&xrs_cfg);
+ if (!xdna->xrs_hdl) {
+ XDNA_ERR(xdna, "Initialize resolver failed");
+ ret = -EINVAL;
+ goto stop_hw;
+ }
+
+ ret = aie2_error_async_events_alloc(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
+ goto stop_hw;
+ }
+
+ ret = aie2_error_async_events_send(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "Send async events failed, ret %d", ret);
+ goto async_event_free;
+ }
+
+ /* Issue a command to make sure firmware handled async events */
+ ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
+ if (ret) {
+ XDNA_ERR(xdna, "Re-query firmware version failed");
+ goto async_event_free;
+ }
+
+ release_firmware(fw);
+ return 0;
+
+async_event_free:
+ aie2_error_async_events_free(ndev);
+stop_hw:
+ aie2_hw_stop(xdna);
+disable_sva:
+ iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+free_irq:
+ pci_free_irq_vectors(pdev);
+release_fw:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static void aie2_fini(struct amdxdna_dev *xdna)
+{
+ struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
+ struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
+
+ aie2_hw_stop(xdna);
+ aie2_error_async_events_free(ndev);
+ iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
+ pci_free_irq_vectors(pdev);
+}
+
+static int aie2_get_aie_status(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_aie_status status;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int ret;
+
+ ndev = xdna->dev_handle;
+ if (copy_from_user(&status, u64_to_user_ptr(args->buffer), sizeof(status))) {
+ XDNA_ERR(xdna, "Failed to copy AIE request into kernel");
+ return -EFAULT;
+ }
+
+ if (ndev->metadata.cols * ndev->metadata.size < status.buffer_size) {
+ XDNA_ERR(xdna, "Invalid buffer size. Given Size: %u. Need Size: %u.",
+ status.buffer_size, ndev->metadata.cols * ndev->metadata.size);
+ return -EINVAL;
+ }
+
+ ret = aie2_query_status(ndev, u64_to_user_ptr(status.buffer),
+ status.buffer_size, &status.cols_filled);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to get AIE status info. Ret: %d", ret);
+ return ret;
+ }
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &status, sizeof(status))) {
+ XDNA_ERR(xdna, "Failed to copy AIE request info to user space");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int aie2_get_aie_metadata(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_aie_metadata *meta;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int ret = 0;
+
+ ndev = xdna->dev_handle;
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ return -ENOMEM;
+
+ meta->col_size = ndev->metadata.size;
+ meta->cols = ndev->metadata.cols;
+ meta->rows = ndev->metadata.rows;
+
+ meta->version.major = ndev->metadata.version.major;
+ meta->version.minor = ndev->metadata.version.minor;
+
+ meta->core.row_count = ndev->metadata.core.row_count;
+ meta->core.row_start = ndev->metadata.core.row_start;
+ meta->core.dma_channel_count = ndev->metadata.core.dma_channel_count;
+ meta->core.lock_count = ndev->metadata.core.lock_count;
+ meta->core.event_reg_count = ndev->metadata.core.event_reg_count;
+
+ meta->mem.row_count = ndev->metadata.mem.row_count;
+ meta->mem.row_start = ndev->metadata.mem.row_start;
+ meta->mem.dma_channel_count = ndev->metadata.mem.dma_channel_count;
+ meta->mem.lock_count = ndev->metadata.mem.lock_count;
+ meta->mem.event_reg_count = ndev->metadata.mem.event_reg_count;
+
+ meta->shim.row_count = ndev->metadata.shim.row_count;
+ meta->shim.row_start = ndev->metadata.shim.row_start;
+ meta->shim.dma_channel_count = ndev->metadata.shim.dma_channel_count;
+ meta->shim.lock_count = ndev->metadata.shim.lock_count;
+ meta->shim.event_reg_count = ndev->metadata.shim.event_reg_count;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), meta, sizeof(*meta)))
+ ret = -EFAULT;
+
+ kfree(meta);
+ return ret;
+}
+
+static int aie2_get_aie_version(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_aie_version version;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+
+ ndev = xdna->dev_handle;
+ version.major = ndev->version.major;
+ version.minor = ndev->version.minor;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_get_firmware_version(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_firmware_version version;
+ struct amdxdna_dev *xdna = client->xdna;
+
+ version.major = xdna->fw_ver.major;
+ version.minor = xdna->fw_ver.minor;
+ version.patch = xdna->fw_ver.sub;
+ version.build = xdna->fw_ver.build;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_get_power_mode(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_get_power_mode mode = {};
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+
+ ndev = xdna->dev_handle;
+ mode.power_mode = ndev->pw_mode;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &mode, sizeof(mode)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_get_clock_metadata(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_clock_metadata *clock;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+ int ret = 0;
+
+ ndev = xdna->dev_handle;
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return -ENOMEM;
+
+ snprintf(clock->mp_npu_clock.name, sizeof(clock->mp_npu_clock.name),
+ "MP-NPU Clock");
+ clock->mp_npu_clock.freq_mhz = ndev->npuclk_freq;
+ snprintf(clock->h_clock.name, sizeof(clock->h_clock.name), "H Clock");
+ clock->h_clock.freq_mhz = ndev->hclk_freq;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), clock, sizeof(*clock)))
+ ret = -EFAULT;
+
+ kfree(clock);
+ return ret;
+}
+
+static int aie2_get_hwctx_status(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_hwctx __user *buf;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_drm_query_hwctx *tmp;
+ struct amdxdna_client *tmp_client;
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+ bool overflow = false;
+ u32 req_bytes = 0;
+ u32 hw_i = 0;
+ int ret = 0;
+ int idx;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ buf = u64_to_user_ptr(args->buffer);
+ list_for_each_entry(tmp_client, &xdna->client_list, node) {
+ idx = srcu_read_lock(&tmp_client->hwctx_srcu);
+ amdxdna_for_each_hwctx(tmp_client, hwctx_id, hwctx) {
+ req_bytes += sizeof(*tmp);
+ if (args->buffer_size < req_bytes) {
+ /* Continue iterating to get the required size */
+ overflow = true;
+ continue;
+ }
+
+ memset(tmp, 0, sizeof(*tmp));
+ tmp->pid = tmp_client->pid;
+ tmp->context_id = hwctx->id;
+ tmp->start_col = hwctx->start_col;
+ tmp->num_col = hwctx->num_col;
+ tmp->command_submissions = hwctx->priv->seq;
+ tmp->command_completions = hwctx->priv->completed;
+
+ if (copy_to_user(&buf[hw_i], tmp, sizeof(*tmp))) {
+ ret = -EFAULT;
+ srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
+ goto out;
+ }
+ hw_i++;
+ }
+ srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
+ }
+
+ if (overflow) {
+ XDNA_ERR(xdna, "Invalid buffer size. Given: %u Need: %u.",
+ args->buffer_size, req_bytes);
+ ret = -EINVAL;
+ }
+
+out:
+ kfree(tmp);
+ args->buffer_size = req_bytes;
+ return ret;
+}
+
+static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ int ret, idx;
+
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+ switch (args->param) {
+ case DRM_AMDXDNA_QUERY_AIE_STATUS:
+ ret = aie2_get_aie_status(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_AIE_METADATA:
+ ret = aie2_get_aie_metadata(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_AIE_VERSION:
+ ret = aie2_get_aie_version(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_CLOCK_METADATA:
+ ret = aie2_get_clock_metadata(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_HW_CONTEXTS:
+ ret = aie2_get_hwctx_status(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_FIRMWARE_VERSION:
+ ret = aie2_get_firmware_version(client, args);
+ break;
+ case DRM_AMDXDNA_GET_POWER_MODE:
+ ret = aie2_get_power_mode(client, args);
+ break;
+ default:
+ XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
+ ret = -EOPNOTSUPP;
+ }
+ XDNA_DBG(xdna, "Got param %d", args->param);
+
+ drm_dev_exit(idx);
+ return ret;
+}
+
+static int aie2_set_power_mode(struct amdxdna_client *client,
+ struct amdxdna_drm_set_state *args)
+{
+ struct amdxdna_drm_set_power_mode power_state;
+ enum amdxdna_power_mode_type power_mode;
+ struct amdxdna_dev *xdna = client->xdna;
+
+ if (copy_from_user(&power_state, u64_to_user_ptr(args->buffer),
+ sizeof(power_state))) {
+ XDNA_ERR(xdna, "Failed to copy power mode request into kernel");
+ return -EFAULT;
+ }
+
+ if (XDNA_MBZ_DBG(xdna, power_state.pad, sizeof(power_state.pad)))
+ return -EINVAL;
+
+ power_mode = power_state.power_mode;
+ if (power_mode > POWER_MODE_TURBO) {
+ XDNA_ERR(xdna, "Invalid power mode %d", power_mode);
+ return -EINVAL;
+ }
+
+ return aie2_pm_set_mode(xdna->dev_handle, power_mode);
+}
+
+static int aie2_set_state(struct amdxdna_client *client,
+ struct amdxdna_drm_set_state *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ int ret, idx;
+
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+ switch (args->param) {
+ case DRM_AMDXDNA_SET_POWER_MODE:
+ ret = aie2_set_power_mode(client, args);
+ break;
+ default:
+ XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ drm_dev_exit(idx);
+ return ret;
+}
+
+const struct amdxdna_dev_ops aie2_ops = {
+ .init = aie2_init,
+ .fini = aie2_fini,
+ .resume = aie2_hw_start,
+ .suspend = aie2_hw_stop,
+ .get_aie_info = aie2_get_info,
+ .set_aie_state = aie2_set_state,
+ .hwctx_init = aie2_hwctx_init,
+ .hwctx_fini = aie2_hwctx_fini,
+ .hwctx_config = aie2_hwctx_config,
+ .cmd_submit = aie2_cmd_submit,
+ .hmm_invalidate = aie2_hmm_invalidate,
+ .hwctx_suspend = aie2_hwctx_suspend,
+ .hwctx_resume = aie2_hwctx_resume,
+};
diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h
new file mode 100644
index 000000000000..f2d95531ddc2
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_pci.h
@@ -0,0 +1,297 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_PCI_H_
+#define _AIE2_PCI_H_
+
+#include <drm/amdxdna_accel.h>
+#include <linux/semaphore.h>
+
+#include "amdxdna_mailbox.h"
+
+#define AIE2_INTERVAL 20000 /* us */
+#define AIE2_TIMEOUT 1000000 /* us */
+
+/* Firmware determines device memory base address and size */
+#define AIE2_DEVM_BASE 0x4000000
+#define AIE2_DEVM_SIZE SZ_64M
+
+#define NDEV2PDEV(ndev) (to_pci_dev((ndev)->xdna->ddev.dev))
+
+#define AIE2_SRAM_OFF(ndev, addr) ((addr) - (ndev)->priv->sram_dev_addr)
+#define AIE2_MBOX_OFF(ndev, addr) ((addr) - (ndev)->priv->mbox_dev_addr)
+
+#define PSP_REG_BAR(ndev, idx) ((ndev)->priv->psp_regs_off[(idx)].bar_idx)
+#define PSP_REG_OFF(ndev, idx) ((ndev)->priv->psp_regs_off[(idx)].offset)
+#define SRAM_REG_OFF(ndev, idx) ((ndev)->priv->sram_offs[(idx)].offset)
+
+#define SMU_REG(ndev, idx) \
+({ \
+ typeof(ndev) _ndev = ndev; \
+ ((_ndev)->smu_base + (_ndev)->priv->smu_regs_off[(idx)].offset); \
+})
+#define SRAM_GET_ADDR(ndev, idx) \
+({ \
+ typeof(ndev) _ndev = ndev; \
+ ((_ndev)->sram_base + SRAM_REG_OFF((_ndev), (idx))); \
+})
+
+#define CHAN_SLOT_SZ SZ_8K
+#define MBOX_SIZE(ndev) \
+({ \
+ typeof(ndev) _ndev = (ndev); \
+ ((_ndev)->priv->mbox_size) ? (_ndev)->priv->mbox_size : \
+ pci_resource_len(NDEV2PDEV(_ndev), (_ndev)->xdna->dev_info->mbox_bar); \
+})
+
+enum aie2_smu_reg_idx {
+ SMU_CMD_REG = 0,
+ SMU_ARG_REG,
+ SMU_INTR_REG,
+ SMU_RESP_REG,
+ SMU_OUT_REG,
+ SMU_MAX_REGS /* Keep this at the end */
+};
+
+enum aie2_sram_reg_idx {
+ MBOX_CHANN_OFF = 0,
+ FW_ALIVE_OFF,
+ SRAM_MAX_INDEX /* Keep this at the end */
+};
+
+enum psp_reg_idx {
+ PSP_CMD_REG = 0,
+ PSP_ARG0_REG,
+ PSP_ARG1_REG,
+ PSP_ARG2_REG,
+ PSP_NUM_IN_REGS, /* number of input registers */
+ PSP_INTR_REG = PSP_NUM_IN_REGS,
+ PSP_STATUS_REG,
+ PSP_RESP_REG,
+ PSP_MAX_REGS /* Keep this at the end */
+};
+
+struct amdxdna_client;
+struct amdxdna_fw_ver;
+struct amdxdna_hwctx;
+struct amdxdna_sched_job;
+
+struct psp_config {
+ const void *fw_buf;
+ u32 fw_size;
+ void __iomem *psp_regs[PSP_MAX_REGS];
+};
+
+struct aie_version {
+ u16 major;
+ u16 minor;
+};
+
+struct aie_tile_metadata {
+ u16 row_count;
+ u16 row_start;
+ u16 dma_channel_count;
+ u16 lock_count;
+ u16 event_reg_count;
+};
+
+struct aie_metadata {
+ u32 size;
+ u16 cols;
+ u16 rows;
+ struct aie_version version;
+ struct aie_tile_metadata core;
+ struct aie_tile_metadata mem;
+ struct aie_tile_metadata shim;
+};
+
+enum rt_config_category {
+ AIE2_RT_CFG_INIT,
+ AIE2_RT_CFG_CLK_GATING,
+};
+
+struct rt_config {
+ u32 type;
+ u32 value;
+ u32 category;
+};
+
+struct dpm_clk_freq {
+ u32 npuclk;
+ u32 hclk;
+};
+
+/*
+ * Define the maximum number of pending commands in a hardware context.
+ * Must be power of 2!
+ */
+#define HWCTX_MAX_CMDS 4
+#define get_job_idx(seq) ((seq) & (HWCTX_MAX_CMDS - 1))
+struct amdxdna_hwctx_priv {
+ struct amdxdna_gem_obj *heap;
+ void *mbox_chann;
+
+ struct drm_gpu_scheduler sched;
+ struct drm_sched_entity entity;
+
+ struct mutex io_lock; /* protect seq and cmd order */
+ struct wait_queue_head job_free_wq;
+ u32 num_pending;
+ u64 seq;
+ struct semaphore job_sem;
+ bool job_done;
+
+ /* Completed job counter */
+ u64 completed;
+
+ struct amdxdna_gem_obj *cmd_buf[HWCTX_MAX_CMDS];
+ struct drm_syncobj *syncobj;
+};
+
+enum aie2_dev_status {
+ AIE2_DEV_UNINIT,
+ AIE2_DEV_INIT,
+ AIE2_DEV_START,
+};
+
+struct amdxdna_dev_hdl {
+ struct amdxdna_dev *xdna;
+ const struct amdxdna_dev_priv *priv;
+ void __iomem *sram_base;
+ void __iomem *smu_base;
+ void __iomem *mbox_base;
+ struct psp_device *psp_hdl;
+
+ struct xdna_mailbox_chann_res mgmt_x2i;
+ struct xdna_mailbox_chann_res mgmt_i2x;
+ u32 mgmt_chan_idx;
+ u32 mgmt_prot_major;
+ u32 mgmt_prot_minor;
+
+ u32 total_col;
+ struct aie_version version;
+ struct aie_metadata metadata;
+
+ /* power management and clock*/
+ enum amdxdna_power_mode_type pw_mode;
+ u32 dpm_level;
+ u32 dft_dpm_level;
+ u32 max_dpm_level;
+ u32 clk_gating;
+ u32 npuclk_freq;
+ u32 hclk_freq;
+
+ /* Mailbox and the management channel */
+ struct mailbox *mbox;
+ struct mailbox_channel *mgmt_chann;
+ struct async_events *async_events;
+
+ enum aie2_dev_status dev_status;
+ u32 hwctx_num;
+};
+
+#define DEFINE_BAR_OFFSET(reg_name, bar, reg_addr) \
+ [reg_name] = {bar##_BAR_INDEX, (reg_addr) - bar##_BAR_BASE}
+
+struct aie2_bar_off_pair {
+ int bar_idx;
+ u32 offset;
+};
+
+struct aie2_hw_ops {
+ int (*set_dpm)(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
+};
+
+struct amdxdna_dev_priv {
+ const char *fw_path;
+ u64 protocol_major;
+ u64 protocol_minor;
+ const struct rt_config *rt_config;
+ const struct dpm_clk_freq *dpm_clk_tbl;
+
+#define COL_ALIGN_NONE 0
+#define COL_ALIGN_NATURE 1
+ u32 col_align;
+ u32 mbox_dev_addr;
+ /* If mbox_size is 0, use BAR size. See MBOX_SIZE macro */
+ u32 mbox_size;
+ u32 sram_dev_addr;
+ struct aie2_bar_off_pair sram_offs[SRAM_MAX_INDEX];
+ struct aie2_bar_off_pair psp_regs_off[PSP_MAX_REGS];
+ struct aie2_bar_off_pair smu_regs_off[SMU_MAX_REGS];
+ struct aie2_hw_ops hw_ops;
+};
+
+extern const struct amdxdna_dev_ops aie2_ops;
+
+int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
+ enum rt_config_category category, u32 *val);
+
+/* aie2 npu hw config */
+extern const struct dpm_clk_freq npu1_dpm_clk_table[];
+extern const struct dpm_clk_freq npu4_dpm_clk_table[];
+extern const struct rt_config npu1_default_rt_cfg[];
+extern const struct rt_config npu4_default_rt_cfg[];
+
+/* aie2_smu.c */
+int aie2_smu_init(struct amdxdna_dev_hdl *ndev);
+void aie2_smu_fini(struct amdxdna_dev_hdl *ndev);
+int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
+int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
+
+/* aie2_pm.c */
+int aie2_pm_init(struct amdxdna_dev_hdl *ndev);
+int aie2_pm_set_mode(struct amdxdna_dev_hdl *ndev, enum amdxdna_power_mode_type target);
+
+/* aie2_psp.c */
+struct psp_device *aie2m_psp_create(struct drm_device *ddev, struct psp_config *conf);
+int aie2_psp_start(struct psp_device *psp);
+void aie2_psp_stop(struct psp_device *psp);
+
+/* aie2_error.c */
+int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev);
+void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev);
+int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev);
+int aie2_error_async_msg_thread(void *data);
+
+/* aie2_message.c */
+int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev);
+int aie2_resume_fw(struct amdxdna_dev_hdl *ndev);
+int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value);
+int aie2_get_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 *value);
+int aie2_assign_mgmt_pasid(struct amdxdna_dev_hdl *ndev, u16 pasid);
+int aie2_query_aie_version(struct amdxdna_dev_hdl *ndev, struct aie_version *version);
+int aie2_query_aie_metadata(struct amdxdna_dev_hdl *ndev, struct aie_metadata *metadata);
+int aie2_query_firmware_version(struct amdxdna_dev_hdl *ndev,
+ struct amdxdna_fw_ver *fw_ver);
+int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx);
+int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx);
+int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size);
+int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled);
+int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
+ void *handle, int (*cb)(void*, const u32 *, size_t));
+int aie2_config_cu(struct amdxdna_hwctx *hwctx);
+int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t));
+int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t));
+int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
+ struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t));
+int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, const u32 *, size_t));
+
+/* aie2_hwctx.c */
+int aie2_hwctx_init(struct amdxdna_hwctx *hwctx);
+void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx);
+int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
+void aie2_hwctx_suspend(struct amdxdna_hwctx *hwctx);
+void aie2_hwctx_resume(struct amdxdna_hwctx *hwctx);
+int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
+void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
+void aie2_restart_ctx(struct amdxdna_client *client);
+
+#endif /* _AIE2_PCI_H_ */
diff --git a/drivers/accel/amdxdna/aie2_pm.c b/drivers/accel/amdxdna/aie2_pm.c
new file mode 100644
index 000000000000..426c38fce848
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_pm.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_pci_drv.h"
+
+#define AIE2_CLK_GATING_ENABLE 1
+#define AIE2_CLK_GATING_DISABLE 0
+
+static int aie2_pm_set_clk_gating(struct amdxdna_dev_hdl *ndev, u32 val)
+{
+ int ret;
+
+ ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_CLK_GATING, &val);
+ if (ret)
+ return ret;
+
+ ndev->clk_gating = val;
+ return 0;
+}
+
+int aie2_pm_init(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ if (ndev->dev_status != AIE2_DEV_UNINIT) {
+ /* Resume device */
+ ret = ndev->priv->hw_ops.set_dpm(ndev, ndev->dpm_level);
+ if (ret)
+ return ret;
+
+ ret = aie2_pm_set_clk_gating(ndev, ndev->clk_gating);
+ if (ret)
+ return ret;
+
+ return 0;
+ }
+
+ while (ndev->priv->dpm_clk_tbl[ndev->max_dpm_level].hclk)
+ ndev->max_dpm_level++;
+ ndev->max_dpm_level--;
+
+ ret = ndev->priv->hw_ops.set_dpm(ndev, ndev->max_dpm_level);
+ if (ret)
+ return ret;
+
+ ret = aie2_pm_set_clk_gating(ndev, AIE2_CLK_GATING_ENABLE);
+ if (ret)
+ return ret;
+
+ ndev->pw_mode = POWER_MODE_DEFAULT;
+ ndev->dft_dpm_level = ndev->max_dpm_level;
+
+ return 0;
+}
+
+int aie2_pm_set_mode(struct amdxdna_dev_hdl *ndev, enum amdxdna_power_mode_type target)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+ u32 clk_gating, dpm_level;
+ int ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ if (ndev->pw_mode == target)
+ return 0;
+
+ switch (target) {
+ case POWER_MODE_TURBO:
+ if (ndev->hwctx_num) {
+ XDNA_ERR(xdna, "Can not set turbo when there is active hwctx");
+ return -EINVAL;
+ }
+
+ clk_gating = AIE2_CLK_GATING_DISABLE;
+ dpm_level = ndev->max_dpm_level;
+ break;
+ case POWER_MODE_HIGH:
+ clk_gating = AIE2_CLK_GATING_ENABLE;
+ dpm_level = ndev->max_dpm_level;
+ break;
+ case POWER_MODE_DEFAULT:
+ clk_gating = AIE2_CLK_GATING_ENABLE;
+ dpm_level = ndev->dft_dpm_level;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = ndev->priv->hw_ops.set_dpm(ndev, dpm_level);
+ if (ret)
+ return ret;
+
+ ret = aie2_pm_set_clk_gating(ndev, clk_gating);
+ if (ret)
+ return ret;
+
+ ndev->pw_mode = target;
+
+ return 0;
+}
diff --git a/drivers/accel/amdxdna/aie2_psp.c b/drivers/accel/amdxdna/aie2_psp.c
new file mode 100644
index 000000000000..dc3a072ce3b6
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_psp.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+#define PSP_STATUS_READY BIT(31)
+
+/* PSP commands */
+#define PSP_VALIDATE 1
+#define PSP_START 2
+#define PSP_RELEASE_TMR 3
+
+/* PSP special arguments */
+#define PSP_START_COPY_FW 1
+
+/* PSP response error code */
+#define PSP_ERROR_CANCEL 0xFFFF0002
+#define PSP_ERROR_BAD_STATE 0xFFFF0007
+
+#define PSP_FW_ALIGN 0x10000
+#define PSP_POLL_INTERVAL 20000 /* us */
+#define PSP_POLL_TIMEOUT 1000000 /* us */
+
+#define PSP_REG(p, reg) ((p)->psp_regs[reg])
+
+struct psp_device {
+ struct drm_device *ddev;
+ struct psp_config conf;
+ u32 fw_buf_sz;
+ u64 fw_paddr;
+ void *fw_buffer;
+ void __iomem *psp_regs[PSP_MAX_REGS];
+};
+
+static int psp_exec(struct psp_device *psp, u32 *reg_vals)
+{
+ u32 resp_code;
+ int ret, i;
+ u32 ready;
+
+ /* Write command and argument registers */
+ for (i = 0; i < PSP_NUM_IN_REGS; i++)
+ writel(reg_vals[i], PSP_REG(psp, i));
+
+ /* clear and set PSP INTR register to kick off */
+ writel(0, PSP_REG(psp, PSP_INTR_REG));
+ writel(1, PSP_REG(psp, PSP_INTR_REG));
+
+ /* PSP should be busy. Wait for ready, so we know task is done. */
+ ret = readx_poll_timeout(readl, PSP_REG(psp, PSP_STATUS_REG), ready,
+ FIELD_GET(PSP_STATUS_READY, ready),
+ PSP_POLL_INTERVAL, PSP_POLL_TIMEOUT);
+ if (ret) {
+ drm_err(psp->ddev, "PSP is not ready, ret 0x%x", ret);
+ return ret;
+ }
+
+ resp_code = readl(PSP_REG(psp, PSP_RESP_REG));
+ if (resp_code) {
+ drm_err(psp->ddev, "fw return error 0x%x", resp_code);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void aie2_psp_stop(struct psp_device *psp)
+{
+ u32 reg_vals[PSP_NUM_IN_REGS] = { PSP_RELEASE_TMR, };
+ int ret;
+
+ ret = psp_exec(psp, reg_vals);
+ if (ret)
+ drm_err(psp->ddev, "release tmr failed, ret %d", ret);
+}
+
+int aie2_psp_start(struct psp_device *psp)
+{
+ u32 reg_vals[PSP_NUM_IN_REGS];
+ int ret;
+
+ reg_vals[0] = PSP_VALIDATE;
+ reg_vals[1] = lower_32_bits(psp->fw_paddr);
+ reg_vals[2] = upper_32_bits(psp->fw_paddr);
+ reg_vals[3] = psp->fw_buf_sz;
+
+ ret = psp_exec(psp, reg_vals);
+ if (ret) {
+ drm_err(psp->ddev, "failed to validate fw, ret %d", ret);
+ return ret;
+ }
+
+ memset(reg_vals, 0, sizeof(reg_vals));
+ reg_vals[0] = PSP_START;
+ reg_vals[1] = PSP_START_COPY_FW;
+ ret = psp_exec(psp, reg_vals);
+ if (ret) {
+ drm_err(psp->ddev, "failed to start fw, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct psp_device *aie2m_psp_create(struct drm_device *ddev, struct psp_config *conf)
+{
+ struct psp_device *psp;
+ u64 offset;
+
+ psp = drmm_kzalloc(ddev, sizeof(*psp), GFP_KERNEL);
+ if (!psp)
+ return NULL;
+
+ psp->ddev = ddev;
+ memcpy(psp->psp_regs, conf->psp_regs, sizeof(psp->psp_regs));
+
+ psp->fw_buf_sz = ALIGN(conf->fw_size, PSP_FW_ALIGN) + PSP_FW_ALIGN;
+ psp->fw_buffer = drmm_kmalloc(ddev, psp->fw_buf_sz, GFP_KERNEL);
+ if (!psp->fw_buffer) {
+ drm_err(ddev, "no memory for fw buffer");
+ return NULL;
+ }
+
+ /*
+ * AMD Platform Security Processor(PSP) requires host physical
+ * address to load NPU firmware.
+ */
+ psp->fw_paddr = virt_to_phys(psp->fw_buffer);
+ offset = ALIGN(psp->fw_paddr, PSP_FW_ALIGN) - psp->fw_paddr;
+ psp->fw_paddr += offset;
+ memcpy(psp->fw_buffer + offset, conf->fw_buf, conf->fw_size);
+
+ return psp;
+}
diff --git a/drivers/accel/amdxdna/aie2_smu.c b/drivers/accel/amdxdna/aie2_smu.c
new file mode 100644
index 000000000000..73388443c676
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_smu.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/iopoll.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_pci_drv.h"
+
+#define SMU_RESULT_OK 1
+
+/* SMU commands */
+#define AIE2_SMU_POWER_ON 0x3
+#define AIE2_SMU_POWER_OFF 0x4
+#define AIE2_SMU_SET_MPNPUCLK_FREQ 0x5
+#define AIE2_SMU_SET_HCLK_FREQ 0x6
+#define AIE2_SMU_SET_SOFT_DPMLEVEL 0x7
+#define AIE2_SMU_SET_HARD_DPMLEVEL 0x8
+
+static int aie2_smu_exec(struct amdxdna_dev_hdl *ndev, u32 reg_cmd,
+ u32 reg_arg, u32 *out)
+{
+ u32 resp;
+ int ret;
+
+ writel(0, SMU_REG(ndev, SMU_RESP_REG));
+ writel(reg_arg, SMU_REG(ndev, SMU_ARG_REG));
+ writel(reg_cmd, SMU_REG(ndev, SMU_CMD_REG));
+
+ /* Clear and set SMU_INTR_REG to kick off */
+ writel(0, SMU_REG(ndev, SMU_INTR_REG));
+ writel(1, SMU_REG(ndev, SMU_INTR_REG));
+
+ ret = readx_poll_timeout(readl, SMU_REG(ndev, SMU_RESP_REG), resp,
+ resp, AIE2_INTERVAL, AIE2_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "smu cmd %d timed out", reg_cmd);
+ return ret;
+ }
+
+ if (out)
+ *out = readl(SMU_REG(ndev, SMU_OUT_REG));
+
+ if (resp != SMU_RESULT_OK) {
+ XDNA_ERR(ndev->xdna, "smu cmd %d failed, 0x%x", reg_cmd, resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
+{
+ u32 freq;
+ int ret;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_MPNPUCLK_FREQ,
+ ndev->priv->dpm_clk_tbl[dpm_level].npuclk, &freq);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set npu clock to %d failed, ret %d\n",
+ ndev->priv->dpm_clk_tbl[dpm_level].npuclk, ret);
+ }
+ ndev->npuclk_freq = freq;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HCLK_FREQ,
+ ndev->priv->dpm_clk_tbl[dpm_level].hclk, &freq);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set h clock to %d failed, ret %d\n",
+ ndev->priv->dpm_clk_tbl[dpm_level].hclk, ret);
+ }
+ ndev->hclk_freq = freq;
+ ndev->dpm_level = dpm_level;
+
+ XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n",
+ ndev->npuclk_freq, ndev->hclk_freq);
+
+ return 0;
+}
+
+int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
+{
+ int ret;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HARD_DPMLEVEL, dpm_level, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set hard dpm level %d failed, ret %d ",
+ dpm_level, ret);
+ return ret;
+ }
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_SET_SOFT_DPMLEVEL, dpm_level, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Set soft dpm level %d failed, ret %d",
+ dpm_level, ret);
+ return ret;
+ }
+
+ ndev->npuclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].npuclk;
+ ndev->hclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].hclk;
+ ndev->dpm_level = dpm_level;
+
+ XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n",
+ ndev->npuclk_freq, ndev->hclk_freq);
+
+ return 0;
+}
+
+int aie2_smu_init(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_ON, 0, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Power on failed, ret %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void aie2_smu_fini(struct amdxdna_dev_hdl *ndev)
+{
+ int ret;
+
+ ndev->priv->hw_ops.set_dpm(ndev, 0);
+ ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_OFF, 0, NULL);
+ if (ret)
+ XDNA_ERR(ndev->xdna, "Power off failed, ret %d", ret);
+}
diff --git a/drivers/accel/amdxdna/aie2_solver.c b/drivers/accel/amdxdna/aie2_solver.c
new file mode 100644
index 000000000000..2013d1f13aae
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_solver.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+#include <linux/bitops.h>
+#include <linux/bitmap.h>
+#include <linux/slab.h>
+
+#include "aie2_solver.h"
+
+struct partition_node {
+ struct list_head list;
+ u32 nshared; /* # shared requests */
+ u32 start_col; /* start column */
+ u32 ncols; /* # columns */
+ bool exclusive; /* can not be shared if set */
+};
+
+struct solver_node {
+ struct list_head list;
+ u64 rid; /* Request ID from consumer */
+
+ struct partition_node *pt_node;
+ void *cb_arg;
+ u32 dpm_level;
+ u32 cols_len;
+ u32 start_cols[] __counted_by(cols_len);
+};
+
+struct solver_rgroup {
+ u32 rgid;
+ u32 nnode;
+ u32 npartition_node;
+
+ DECLARE_BITMAP(resbit, XRS_MAX_COL);
+ struct list_head node_list;
+ struct list_head pt_node_list;
+};
+
+struct solver_state {
+ struct solver_rgroup rgp;
+ struct init_config cfg;
+ struct xrs_action_ops *actions;
+};
+
+static u32 calculate_gops(struct aie_qos *rqos)
+{
+ u32 service_rate = 0;
+
+ if (rqos->latency)
+ service_rate = (1000 / rqos->latency);
+
+ if (rqos->fps > service_rate)
+ return rqos->fps * rqos->gops;
+
+ return service_rate * rqos->gops;
+}
+
+/*
+ * qos_meet() - Check the QOS request can be met.
+ */
+static int qos_meet(struct solver_state *xrs, struct aie_qos *rqos, u32 cgops)
+{
+ u32 request_gops = calculate_gops(rqos) * xrs->cfg.sys_eff_factor;
+
+ if (request_gops <= cgops)
+ return 0;
+
+ return -EINVAL;
+}
+
+/*
+ * sanity_check() - Do a basic sanity check on allocation request.
+ */
+static int sanity_check(struct solver_state *xrs, struct alloc_requests *req)
+{
+ struct cdo_parts *cdop = &req->cdo;
+ struct aie_qos *rqos = &req->rqos;
+ u32 cu_clk_freq;
+
+ if (cdop->ncols > xrs->cfg.total_col)
+ return -EINVAL;
+
+ /*
+ * We can find at least one CDOs groups that meet the
+ * GOPs requirement.
+ */
+ cu_clk_freq = xrs->cfg.clk_list.cu_clk_list[xrs->cfg.clk_list.num_levels - 1];
+
+ if (qos_meet(xrs, rqos, cdop->qos_cap.opc * cu_clk_freq / 1000))
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool is_valid_qos_dpm_params(struct aie_qos *rqos)
+{
+ /*
+ * gops is retrieved from the xmodel, so it's always set
+ * fps and latency are the configurable params from the application
+ */
+ if (rqos->gops > 0 && (rqos->fps > 0 || rqos->latency > 0))
+ return true;
+
+ return false;
+}
+
+static int set_dpm_level(struct solver_state *xrs, struct alloc_requests *req, u32 *dpm_level)
+{
+ struct solver_rgroup *rgp = &xrs->rgp;
+ struct cdo_parts *cdop = &req->cdo;
+ struct aie_qos *rqos = &req->rqos;
+ u32 freq, max_dpm_level, level;
+ struct solver_node *node;
+
+ max_dpm_level = xrs->cfg.clk_list.num_levels - 1;
+ /* If no QoS parameters are passed, set it to the max DPM level */
+ if (!is_valid_qos_dpm_params(rqos)) {
+ level = max_dpm_level;
+ goto set_dpm;
+ }
+
+ /* Find one CDO group that meet the GOPs requirement. */
+ for (level = 0; level < max_dpm_level; level++) {
+ freq = xrs->cfg.clk_list.cu_clk_list[level];
+ if (!qos_meet(xrs, rqos, cdop->qos_cap.opc * freq / 1000))
+ break;
+ }
+
+ /* set the dpm level which fits all the sessions */
+ list_for_each_entry(node, &rgp->node_list, list) {
+ if (node->dpm_level > level)
+ level = node->dpm_level;
+ }
+
+set_dpm:
+ *dpm_level = level;
+ return xrs->cfg.actions->set_dft_dpm_level(xrs->cfg.ddev, level);
+}
+
+static struct solver_node *rg_search_node(struct solver_rgroup *rgp, u64 rid)
+{
+ struct solver_node *node;
+
+ list_for_each_entry(node, &rgp->node_list, list) {
+ if (node->rid == rid)
+ return node;
+ }
+
+ return NULL;
+}
+
+static void remove_partition_node(struct solver_rgroup *rgp,
+ struct partition_node *pt_node)
+{
+ pt_node->nshared--;
+ if (pt_node->nshared > 0)
+ return;
+
+ list_del(&pt_node->list);
+ rgp->npartition_node--;
+
+ bitmap_clear(rgp->resbit, pt_node->start_col, pt_node->ncols);
+ kfree(pt_node);
+}
+
+static void remove_solver_node(struct solver_rgroup *rgp,
+ struct solver_node *node)
+{
+ list_del(&node->list);
+ rgp->nnode--;
+
+ if (node->pt_node)
+ remove_partition_node(rgp, node->pt_node);
+
+ kfree(node);
+}
+
+static int get_free_partition(struct solver_state *xrs,
+ struct solver_node *snode,
+ struct alloc_requests *req)
+{
+ struct partition_node *pt_node;
+ u32 ncols = req->cdo.ncols;
+ u32 col, i;
+
+ for (i = 0; i < snode->cols_len; i++) {
+ col = snode->start_cols[i];
+ if (find_next_bit(xrs->rgp.resbit, XRS_MAX_COL, col) >= col + ncols)
+ break;
+ }
+
+ if (i == snode->cols_len)
+ return -ENODEV;
+
+ pt_node = kzalloc(sizeof(*pt_node), GFP_KERNEL);
+ if (!pt_node)
+ return -ENOMEM;
+
+ pt_node->nshared = 1;
+ pt_node->start_col = col;
+ pt_node->ncols = ncols;
+
+ /*
+ * Always set exclusive to false for now.
+ */
+ pt_node->exclusive = false;
+
+ list_add_tail(&pt_node->list, &xrs->rgp.pt_node_list);
+ xrs->rgp.npartition_node++;
+ bitmap_set(xrs->rgp.resbit, pt_node->start_col, pt_node->ncols);
+
+ snode->pt_node = pt_node;
+
+ return 0;
+}
+
+static int allocate_partition(struct solver_state *xrs,
+ struct solver_node *snode,
+ struct alloc_requests *req)
+{
+ struct partition_node *pt_node, *rpt_node = NULL;
+ int idx, ret;
+
+ ret = get_free_partition(xrs, snode, req);
+ if (!ret)
+ return ret;
+
+ /* try to get a share-able partition */
+ list_for_each_entry(pt_node, &xrs->rgp.pt_node_list, list) {
+ if (pt_node->exclusive)
+ continue;
+
+ if (rpt_node && pt_node->nshared >= rpt_node->nshared)
+ continue;
+
+ for (idx = 0; idx < snode->cols_len; idx++) {
+ if (snode->start_cols[idx] != pt_node->start_col)
+ continue;
+
+ if (req->cdo.ncols != pt_node->ncols)
+ continue;
+
+ rpt_node = pt_node;
+ break;
+ }
+ }
+
+ if (!rpt_node)
+ return -ENODEV;
+
+ rpt_node->nshared++;
+ snode->pt_node = rpt_node;
+
+ return 0;
+}
+
+static struct solver_node *create_solver_node(struct solver_state *xrs,
+ struct alloc_requests *req)
+{
+ struct cdo_parts *cdop = &req->cdo;
+ struct solver_node *node;
+ int ret;
+
+ node = kzalloc(struct_size(node, start_cols, cdop->cols_len), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->rid = req->rid;
+ node->cols_len = cdop->cols_len;
+ memcpy(node->start_cols, cdop->start_cols, cdop->cols_len * sizeof(u32));
+
+ ret = allocate_partition(xrs, node, req);
+ if (ret)
+ goto free_node;
+
+ list_add_tail(&node->list, &xrs->rgp.node_list);
+ xrs->rgp.nnode++;
+ return node;
+
+free_node:
+ kfree(node);
+ return ERR_PTR(ret);
+}
+
+static void fill_load_action(struct solver_state *xrs,
+ struct solver_node *snode,
+ struct xrs_action_load *action)
+{
+ action->rid = snode->rid;
+ action->part.start_col = snode->pt_node->start_col;
+ action->part.ncols = snode->pt_node->ncols;
+}
+
+int xrs_allocate_resource(void *hdl, struct alloc_requests *req, void *cb_arg)
+{
+ struct xrs_action_load load_act;
+ struct solver_node *snode;
+ struct solver_state *xrs;
+ u32 dpm_level;
+ int ret;
+
+ xrs = (struct solver_state *)hdl;
+
+ ret = sanity_check(xrs, req);
+ if (ret) {
+ drm_err(xrs->cfg.ddev, "invalid request");
+ return ret;
+ }
+
+ if (rg_search_node(&xrs->rgp, req->rid)) {
+ drm_err(xrs->cfg.ddev, "rid %lld is in-use", req->rid);
+ return -EEXIST;
+ }
+
+ snode = create_solver_node(xrs, req);
+ if (IS_ERR(snode))
+ return PTR_ERR(snode);
+
+ fill_load_action(xrs, snode, &load_act);
+ ret = xrs->cfg.actions->load(cb_arg, &load_act);
+ if (ret)
+ goto free_node;
+
+ ret = set_dpm_level(xrs, req, &dpm_level);
+ if (ret)
+ goto free_node;
+
+ snode->dpm_level = dpm_level;
+ snode->cb_arg = cb_arg;
+
+ drm_dbg(xrs->cfg.ddev, "start col %d ncols %d\n",
+ snode->pt_node->start_col, snode->pt_node->ncols);
+
+ return 0;
+
+free_node:
+ remove_solver_node(&xrs->rgp, snode);
+
+ return ret;
+}
+
+int xrs_release_resource(void *hdl, u64 rid)
+{
+ struct solver_state *xrs = hdl;
+ struct solver_node *node;
+
+ node = rg_search_node(&xrs->rgp, rid);
+ if (!node) {
+ drm_err(xrs->cfg.ddev, "node not exist");
+ return -ENODEV;
+ }
+
+ xrs->cfg.actions->unload(node->cb_arg);
+ remove_solver_node(&xrs->rgp, node);
+
+ return 0;
+}
+
+void *xrsm_init(struct init_config *cfg)
+{
+ struct solver_rgroup *rgp;
+ struct solver_state *xrs;
+
+ xrs = drmm_kzalloc(cfg->ddev, sizeof(*xrs), GFP_KERNEL);
+ if (!xrs)
+ return NULL;
+
+ memcpy(&xrs->cfg, cfg, sizeof(*cfg));
+
+ rgp = &xrs->rgp;
+ INIT_LIST_HEAD(&rgp->node_list);
+ INIT_LIST_HEAD(&rgp->pt_node_list);
+
+ return xrs;
+}
diff --git a/drivers/accel/amdxdna/aie2_solver.h b/drivers/accel/amdxdna/aie2_solver.h
new file mode 100644
index 000000000000..a2e3c52229e9
--- /dev/null
+++ b/drivers/accel/amdxdna/aie2_solver.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_SOLVER_H
+#define _AIE2_SOLVER_H
+
+#define XRS_MAX_COL 128
+
+/*
+ * Structure used to describe a partition. A partition is column based
+ * allocation unit described by its start column and number of columns.
+ */
+struct aie_part {
+ u32 start_col;
+ u32 ncols;
+};
+
+/*
+ * The QoS capabilities of a given AIE partition.
+ */
+struct aie_qos_cap {
+ u32 opc; /* operations per cycle */
+ u32 dma_bw; /* DMA bandwidth */
+};
+
+/*
+ * QoS requirement of a resource allocation.
+ */
+struct aie_qos {
+ u32 gops; /* Giga operations */
+ u32 fps; /* Frames per second */
+ u32 dma_bw; /* DMA bandwidth */
+ u32 latency; /* Frame response latency */
+ u32 exec_time; /* Frame execution time */
+ u32 priority; /* Request priority */
+};
+
+/*
+ * Structure used to describe a relocatable CDO (Configuration Data Object).
+ */
+struct cdo_parts {
+ u32 *start_cols; /* Start column array */
+ u32 cols_len; /* Length of start column array */
+ u32 ncols; /* # of column */
+ struct aie_qos_cap qos_cap; /* CDO QoS capabilities */
+};
+
+/*
+ * Structure used to describe a request to allocate.
+ */
+struct alloc_requests {
+ u64 rid;
+ struct cdo_parts cdo;
+ struct aie_qos rqos; /* Requested QoS */
+};
+
+/*
+ * Load callback argument
+ */
+struct xrs_action_load {
+ u32 rid;
+ struct aie_part part;
+};
+
+/*
+ * Define the power level available
+ *
+ * POWER_LEVEL_MIN:
+ * Lowest power level. Usually set when all actions are unloaded.
+ *
+ * POWER_LEVEL_n
+ * Power levels 0 - n, is a step increase in system frequencies
+ */
+enum power_level {
+ POWER_LEVEL_MIN = 0x0,
+ POWER_LEVEL_0 = 0x1,
+ POWER_LEVEL_1 = 0x2,
+ POWER_LEVEL_2 = 0x3,
+ POWER_LEVEL_3 = 0x4,
+ POWER_LEVEL_4 = 0x5,
+ POWER_LEVEL_5 = 0x6,
+ POWER_LEVEL_6 = 0x7,
+ POWER_LEVEL_7 = 0x8,
+ POWER_LEVEL_NUM,
+};
+
+/*
+ * Structure used to describe the frequency table.
+ * Resource solver chooses the frequency from the table
+ * to meet the QOS requirements.
+ */
+struct clk_list_info {
+ u32 num_levels; /* available power levels */
+ u32 cu_clk_list[POWER_LEVEL_NUM]; /* available aie clock frequencies in Mhz*/
+};
+
+struct xrs_action_ops {
+ int (*load)(void *cb_arg, struct xrs_action_load *action);
+ int (*unload)(void *cb_arg);
+ int (*set_dft_dpm_level)(struct drm_device *ddev, u32 level);
+};
+
+/*
+ * Structure used to describe information for solver during initialization.
+ */
+struct init_config {
+ u32 total_col;
+ u32 sys_eff_factor; /* system efficiency factor */
+ u32 latency_adj; /* latency adjustment in ms */
+ struct clk_list_info clk_list; /* List of frequencies available in system */
+ struct drm_device *ddev;
+ struct xrs_action_ops *actions;
+};
+
+/*
+ * xrsm_init() - Register resource solver. Resource solver client needs
+ * to call this function to register itself.
+ *
+ * @cfg: The system metrics for resource solver to use
+ *
+ * Return: A resource solver handle
+ *
+ * Note: We should only create one handle per AIE array to be managed.
+ */
+void *xrsm_init(struct init_config *cfg);
+
+/*
+ * xrs_allocate_resource() - Request to allocate resources for a given context
+ * and a partition metadata. (See struct part_meta)
+ *
+ * @hdl: Resource solver handle obtained from xrs_init()
+ * @req: Input to the Resource solver including request id
+ * and partition metadata.
+ * @cb_arg: callback argument pointer
+ *
+ * Return: 0 when successful.
+ * Or standard error number when failing
+ *
+ * Note:
+ * There is no lock mechanism inside resource solver. So it is
+ * the caller's responsibility to lock down XCLBINs and grab
+ * necessary lock.
+ */
+int xrs_allocate_resource(void *hdl, struct alloc_requests *req, void *cb_arg);
+
+/*
+ * xrs_release_resource() - Request to free resources for a given context.
+ *
+ * @hdl: Resource solver handle obtained from xrs_init()
+ * @rid: The Request ID to identify the requesting context
+ */
+int xrs_release_resource(void *hdl, u64 rid);
+#endif /* _AIE2_SOLVER_H */
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
new file mode 100644
index 000000000000..d11b1c83d9c3
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ctx.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/xarray.h>
+#include <trace/events/amdxdna.h>
+
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+
+#define MAX_HWCTX_ID 255
+#define MAX_ARG_COUNT 4095
+
+struct amdxdna_fence {
+ struct dma_fence base;
+ spinlock_t lock; /* for base */
+ struct amdxdna_hwctx *hwctx;
+};
+
+static const char *amdxdna_fence_get_driver_name(struct dma_fence *fence)
+{
+ return KBUILD_MODNAME;
+}
+
+static const char *amdxdna_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct amdxdna_fence *xdna_fence;
+
+ xdna_fence = container_of(fence, struct amdxdna_fence, base);
+
+ return xdna_fence->hwctx->name;
+}
+
+static const struct dma_fence_ops fence_ops = {
+ .get_driver_name = amdxdna_fence_get_driver_name,
+ .get_timeline_name = amdxdna_fence_get_timeline_name,
+};
+
+static struct dma_fence *amdxdna_fence_create(struct amdxdna_hwctx *hwctx)
+{
+ struct amdxdna_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ fence->hwctx = hwctx;
+ spin_lock_init(&fence->lock);
+ dma_fence_init(&fence->base, &fence_ops, &fence->lock, hwctx->id, 0);
+ return &fence->base;
+}
+
+void amdxdna_hwctx_suspend(struct amdxdna_client *client)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ mutex_lock(&client->hwctx_lock);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
+ xdna->dev_info->ops->hwctx_suspend(hwctx);
+ mutex_unlock(&client->hwctx_lock);
+}
+
+void amdxdna_hwctx_resume(struct amdxdna_client *client)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ mutex_lock(&client->hwctx_lock);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
+ xdna->dev_info->ops->hwctx_resume(hwctx);
+ mutex_unlock(&client->hwctx_lock);
+}
+
+static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
+ struct srcu_struct *ss)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+
+ synchronize_srcu(ss);
+
+ /* At this point, user is not able to submit new commands */
+ mutex_lock(&xdna->dev_lock);
+ xdna->dev_info->ops->hwctx_fini(hwctx);
+ mutex_unlock(&xdna->dev_lock);
+
+ kfree(hwctx->name);
+ kfree(hwctx);
+}
+
+void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+ u32 num_masks, count;
+
+ if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
+ num_masks = 0;
+ else
+ num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
+
+ if (size) {
+ count = FIELD_GET(AMDXDNA_CMD_COUNT, cmd->header);
+ if (unlikely(count <= num_masks)) {
+ *size = 0;
+ return NULL;
+ }
+ *size = (count - num_masks) * sizeof(u32);
+ }
+ return &cmd->data[num_masks];
+}
+
+int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+ u32 num_masks, i;
+ u32 *cu_mask;
+
+ if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
+ return -1;
+
+ num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
+ cu_mask = cmd->data;
+ for (i = 0; i < num_masks; i++) {
+ if (cu_mask[i])
+ return ffs(cu_mask[i]) - 1;
+ }
+
+ return -1;
+}
+
+/*
+ * This should be called in close() and remove(). DO NOT call in other syscalls.
+ * This guarantee that when hwctx and resources will be released, if user
+ * doesn't call amdxdna_drm_destroy_hwctx_ioctl.
+ */
+void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
+{
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+
+ mutex_lock(&client->hwctx_lock);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
+ XDNA_DBG(client->xdna, "PID %d close HW context %d",
+ client->pid, hwctx->id);
+ xa_erase(&client->hwctx_xa, hwctx->id);
+ mutex_unlock(&client->hwctx_lock);
+ amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
+ mutex_lock(&client->hwctx_lock);
+ }
+ mutex_unlock(&client->hwctx_lock);
+}
+
+int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_create_hwctx *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_hwctx *hwctx;
+ int ret, idx;
+
+ if (args->ext || args->ext_flags)
+ return -EINVAL;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
+
+ hwctx = kzalloc(sizeof(*hwctx), GFP_KERNEL);
+ if (!hwctx) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (copy_from_user(&hwctx->qos, u64_to_user_ptr(args->qos_p), sizeof(hwctx->qos))) {
+ XDNA_ERR(xdna, "Access QoS info failed");
+ ret = -EFAULT;
+ goto free_hwctx;
+ }
+
+ hwctx->client = client;
+ hwctx->fw_ctx_id = -1;
+ hwctx->num_tiles = args->num_tiles;
+ hwctx->mem_size = args->mem_size;
+ hwctx->max_opc = args->max_opc;
+ ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
+ XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
+ &client->next_hwctxid, GFP_KERNEL);
+ if (ret < 0) {
+ XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
+ goto free_hwctx;
+ }
+
+ hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->id);
+ if (!hwctx->name) {
+ ret = -ENOMEM;
+ goto rm_id;
+ }
+
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->hwctx_init(hwctx);
+ if (ret) {
+ mutex_unlock(&xdna->dev_lock);
+ XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
+ goto free_name;
+ }
+ args->handle = hwctx->id;
+ args->syncobj_handle = hwctx->syncobj_hdl;
+ mutex_unlock(&xdna->dev_lock);
+
+ XDNA_DBG(xdna, "PID %d create HW context %d, ret %d", client->pid, args->handle, ret);
+ drm_dev_exit(idx);
+ return 0;
+
+free_name:
+ kfree(hwctx->name);
+rm_id:
+ xa_erase(&client->hwctx_xa, hwctx->id);
+free_hwctx:
+ kfree(hwctx);
+exit:
+ drm_dev_exit(idx);
+ return ret;
+}
+
+int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_destroy_hwctx *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_hwctx *hwctx;
+ int ret = 0, idx;
+
+ if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
+ return -EINVAL;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
+
+ hwctx = xa_erase(&client->hwctx_xa, args->handle);
+ if (!hwctx) {
+ ret = -EINVAL;
+ XDNA_DBG(xdna, "PID %d HW context %d not exist",
+ client->pid, args->handle);
+ goto out;
+ }
+
+ /*
+ * The pushed jobs are handled by DRM scheduler during destroy.
+ * SRCU to synchronize with exec command ioctls.
+ */
+ amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
+
+ XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle);
+out:
+ drm_dev_exit(idx);
+ return ret;
+}
+
+int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_config_hwctx *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_hwctx *hwctx;
+ int ret, idx;
+ u32 buf_size;
+ void *buf;
+ u64 val;
+
+ if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
+ return -EINVAL;
+
+ if (!xdna->dev_info->ops->hwctx_config)
+ return -EOPNOTSUPP;
+
+ val = args->param_val;
+ buf_size = args->param_val_size;
+
+ switch (args->param_type) {
+ case DRM_AMDXDNA_HWCTX_CONFIG_CU:
+ /* For those types that param_val is pointer */
+ if (buf_size > PAGE_SIZE) {
+ XDNA_ERR(xdna, "Config CU param buffer too large");
+ return -E2BIG;
+ }
+
+ /* Hwctx needs to keep buf */
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, u64_to_user_ptr(val), buf_size)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ break;
+ case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
+ case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
+ /* For those types that param_val is a value */
+ buf = NULL;
+ buf_size = 0;
+ break;
+ default:
+ XDNA_DBG(xdna, "Unknown HW context config type %d", args->param_type);
+ return -EINVAL;
+ }
+
+ mutex_lock(&xdna->dev_lock);
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ hwctx = xa_load(&client->hwctx_xa, args->handle);
+ if (!hwctx) {
+ XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle);
+ ret = -EINVAL;
+ goto unlock_srcu;
+ }
+
+ ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size);
+
+unlock_srcu:
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ mutex_unlock(&xdna->dev_lock);
+ kfree(buf);
+ return ret;
+}
+
+static void
+amdxdna_arg_bos_put(struct amdxdna_sched_job *job)
+{
+ int i;
+
+ for (i = 0; i < job->bo_cnt; i++) {
+ if (!job->bos[i])
+ break;
+ drm_gem_object_put(job->bos[i]);
+ }
+}
+
+static int
+amdxdna_arg_bos_lookup(struct amdxdna_client *client,
+ struct amdxdna_sched_job *job,
+ u32 *bo_hdls, u32 bo_cnt)
+{
+ struct drm_gem_object *gobj;
+ int i, ret;
+
+ job->bo_cnt = bo_cnt;
+ for (i = 0; i < job->bo_cnt; i++) {
+ struct amdxdna_gem_obj *abo;
+
+ gobj = drm_gem_object_lookup(client->filp, bo_hdls[i]);
+ if (!gobj) {
+ ret = -ENOENT;
+ goto put_shmem_bo;
+ }
+ abo = to_xdna_obj(gobj);
+
+ mutex_lock(&abo->lock);
+ if (abo->pinned) {
+ mutex_unlock(&abo->lock);
+ job->bos[i] = gobj;
+ continue;
+ }
+
+ ret = amdxdna_gem_pin_nolock(abo);
+ if (ret) {
+ mutex_unlock(&abo->lock);
+ drm_gem_object_put(gobj);
+ goto put_shmem_bo;
+ }
+ abo->pinned = true;
+ mutex_unlock(&abo->lock);
+
+ job->bos[i] = gobj;
+ }
+
+ return 0;
+
+put_shmem_bo:
+ amdxdna_arg_bos_put(job);
+ return ret;
+}
+
+void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job)
+{
+ trace_amdxdna_debug_point(job->hwctx->name, job->seq, "job release");
+ amdxdna_arg_bos_put(job);
+ amdxdna_gem_put_obj(job->cmd_bo);
+}
+
+int amdxdna_cmd_submit(struct amdxdna_client *client,
+ u32 cmd_bo_hdl, u32 *arg_bo_hdls, u32 arg_bo_cnt,
+ u32 hwctx_hdl, u64 *seq)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_sched_job *job;
+ struct amdxdna_hwctx *hwctx;
+ int ret, idx;
+
+ XDNA_DBG(xdna, "Command BO hdl %d, Arg BO count %d", cmd_bo_hdl, arg_bo_cnt);
+ job = kzalloc(struct_size(job, bos, arg_bo_cnt), GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+
+ if (cmd_bo_hdl != AMDXDNA_INVALID_BO_HANDLE) {
+ job->cmd_bo = amdxdna_gem_get_obj(client, cmd_bo_hdl, AMDXDNA_BO_CMD);
+ if (!job->cmd_bo) {
+ XDNA_ERR(xdna, "Failed to get cmd bo from %d", cmd_bo_hdl);
+ ret = -EINVAL;
+ goto free_job;
+ }
+ } else {
+ job->cmd_bo = NULL;
+ }
+
+ ret = amdxdna_arg_bos_lookup(client, job, arg_bo_hdls, arg_bo_cnt);
+ if (ret) {
+ XDNA_ERR(xdna, "Argument BOs lookup failed, ret %d", ret);
+ goto cmd_put;
+ }
+
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ hwctx = xa_load(&client->hwctx_xa, hwctx_hdl);
+ if (!hwctx) {
+ XDNA_DBG(xdna, "PID %d failed to get hwctx %d",
+ client->pid, hwctx_hdl);
+ ret = -EINVAL;
+ goto unlock_srcu;
+ }
+
+ if (hwctx->status != HWCTX_STAT_READY) {
+ XDNA_ERR(xdna, "HW Context is not ready");
+ ret = -EINVAL;
+ goto unlock_srcu;
+ }
+
+ job->hwctx = hwctx;
+ job->mm = current->mm;
+
+ job->fence = amdxdna_fence_create(hwctx);
+ if (!job->fence) {
+ XDNA_ERR(xdna, "Failed to create fence");
+ ret = -ENOMEM;
+ goto unlock_srcu;
+ }
+ kref_init(&job->refcnt);
+
+ ret = xdna->dev_info->ops->cmd_submit(hwctx, job, seq);
+ if (ret)
+ goto put_fence;
+
+ /*
+ * The amdxdna_hwctx_destroy_rcu() will release hwctx and associated
+ * resource after synchronize_srcu(). The submitted jobs should be
+ * handled by the queue, for example DRM scheduler, in device layer.
+ * For here we can unlock SRCU.
+ */
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ trace_amdxdna_debug_point(hwctx->name, *seq, "job pushed");
+
+ return 0;
+
+put_fence:
+ dma_fence_put(job->fence);
+unlock_srcu:
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ amdxdna_arg_bos_put(job);
+cmd_put:
+ amdxdna_gem_put_obj(job->cmd_bo);
+free_job:
+ kfree(job);
+ return ret;
+}
+
+/*
+ * The submit command ioctl submits a command to firmware. One firmware command
+ * may contain multiple command BOs for processing as a whole.
+ * The command sequence number is returned which can be used for wait command ioctl.
+ */
+static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
+ struct amdxdna_drm_exec_cmd *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ u32 *arg_bo_hdls;
+ u32 cmd_bo_hdl;
+ int ret;
+
+ if (!args->arg_count || args->arg_count > MAX_ARG_COUNT) {
+ XDNA_ERR(xdna, "Invalid arg bo count %d", args->arg_count);
+ return -EINVAL;
+ }
+
+ /* Only support single command for now. */
+ if (args->cmd_count != 1) {
+ XDNA_ERR(xdna, "Invalid cmd bo count %d", args->cmd_count);
+ return -EINVAL;
+ }
+
+ cmd_bo_hdl = (u32)args->cmd_handles;
+ arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL);
+ if (!arg_bo_hdls)
+ return -ENOMEM;
+ ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args),
+ args->arg_count * sizeof(u32));
+ if (ret) {
+ ret = -EFAULT;
+ goto free_cmd_bo_hdls;
+ }
+
+ ret = amdxdna_cmd_submit(client, cmd_bo_hdl, arg_bo_hdls,
+ args->arg_count, args->hwctx, &args->seq);
+ if (ret)
+ XDNA_DBG(xdna, "Submit cmds failed, ret %d", ret);
+
+free_cmd_bo_hdls:
+ kfree(arg_bo_hdls);
+ if (!ret)
+ XDNA_DBG(xdna, "Pushed cmd %lld to scheduler", args->seq);
+ return ret;
+}
+
+int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_drm_exec_cmd *args = data;
+
+ if (args->ext || args->ext_flags)
+ return -EINVAL;
+
+ switch (args->type) {
+ case AMDXDNA_CMD_SUBMIT_EXEC_BUF:
+ return amdxdna_drm_submit_execbuf(client, args);
+ }
+
+ XDNA_ERR(client->xdna, "Invalid command type %d", args->type);
+ return -EINVAL;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h
new file mode 100644
index 000000000000..80b0304193ec
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ctx.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_CTX_H_
+#define _AMDXDNA_CTX_H_
+
+#include <linux/bitfield.h>
+
+#include "amdxdna_gem.h"
+
+struct amdxdna_hwctx_priv;
+
+enum ert_cmd_opcode {
+ ERT_START_CU = 0,
+ ERT_CMD_CHAIN = 19,
+ ERT_START_NPU = 20,
+};
+
+enum ert_cmd_state {
+ ERT_CMD_STATE_INVALID,
+ ERT_CMD_STATE_NEW,
+ ERT_CMD_STATE_QUEUED,
+ ERT_CMD_STATE_RUNNING,
+ ERT_CMD_STATE_COMPLETED,
+ ERT_CMD_STATE_ERROR,
+ ERT_CMD_STATE_ABORT,
+ ERT_CMD_STATE_SUBMITTED,
+ ERT_CMD_STATE_TIMEOUT,
+ ERT_CMD_STATE_NORESPONSE,
+};
+
+/*
+ * Interpretation of the beginning of data payload for ERT_START_NPU in
+ * amdxdna_cmd. The rest of the payload in amdxdna_cmd is regular kernel args.
+ */
+struct amdxdna_cmd_start_npu {
+ u64 buffer; /* instruction buffer address */
+ u32 buffer_size; /* size of buffer in bytes */
+ u32 prop_count; /* properties count */
+ u32 prop_args[]; /* properties and regular kernel arguments */
+};
+
+/*
+ * Interpretation of the beginning of data payload for ERT_CMD_CHAIN in
+ * amdxdna_cmd. The rest of the payload in amdxdna_cmd is cmd BO handles.
+ */
+struct amdxdna_cmd_chain {
+ u32 command_count;
+ u32 submit_index;
+ u32 error_index;
+ u32 reserved[3];
+ u64 data[] __counted_by(command_count);
+};
+
+/* Exec buffer command header format */
+#define AMDXDNA_CMD_STATE GENMASK(3, 0)
+#define AMDXDNA_CMD_EXTRA_CU_MASK GENMASK(11, 10)
+#define AMDXDNA_CMD_COUNT GENMASK(22, 12)
+#define AMDXDNA_CMD_OPCODE GENMASK(27, 23)
+struct amdxdna_cmd {
+ u32 header;
+ u32 data[];
+};
+
+struct amdxdna_hwctx {
+ struct amdxdna_client *client;
+ struct amdxdna_hwctx_priv *priv;
+ char *name;
+
+ u32 id;
+ u32 max_opc;
+ u32 num_tiles;
+ u32 mem_size;
+ u32 fw_ctx_id;
+ u32 col_list_len;
+ u32 *col_list;
+ u32 start_col;
+ u32 num_col;
+#define HWCTX_STAT_INIT 0
+#define HWCTX_STAT_READY 1
+#define HWCTX_STAT_STOP 2
+ u32 status;
+ u32 old_status;
+
+ struct amdxdna_qos_info qos;
+ struct amdxdna_hwctx_param_config_cu *cus;
+ u32 syncobj_hdl;
+};
+
+#define drm_job_to_xdna_job(j) \
+ container_of(j, struct amdxdna_sched_job, base)
+
+struct amdxdna_sched_job {
+ struct drm_sched_job base;
+ struct kref refcnt;
+ struct amdxdna_hwctx *hwctx;
+ struct mm_struct *mm;
+ /* The fence to notice DRM scheduler that job is done by hardware */
+ struct dma_fence *fence;
+ /* user can wait on this fence */
+ struct dma_fence *out_fence;
+ bool job_done;
+ u64 seq;
+ struct amdxdna_gem_obj *cmd_bo;
+ size_t bo_cnt;
+ struct drm_gem_object *bos[] __counted_by(bo_cnt);
+};
+
+static inline u32
+amdxdna_cmd_get_op(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+
+ return FIELD_GET(AMDXDNA_CMD_OPCODE, cmd->header);
+}
+
+static inline void
+amdxdna_cmd_set_state(struct amdxdna_gem_obj *abo, enum ert_cmd_state s)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+
+ cmd->header &= ~AMDXDNA_CMD_STATE;
+ cmd->header |= FIELD_PREP(AMDXDNA_CMD_STATE, s);
+}
+
+static inline enum ert_cmd_state
+amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_cmd *cmd = abo->mem.kva;
+
+ return FIELD_GET(AMDXDNA_CMD_STATE, cmd->header);
+}
+
+void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size);
+int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
+
+static inline u32 amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx)
+{
+ return GENMASK(hwctx->start_col + hwctx->num_col - 1,
+ hwctx->start_col);
+}
+
+void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job);
+void amdxdna_hwctx_remove_all(struct amdxdna_client *client);
+void amdxdna_hwctx_suspend(struct amdxdna_client *client);
+void amdxdna_hwctx_resume(struct amdxdna_client *client);
+
+int amdxdna_cmd_submit(struct amdxdna_client *client,
+ u32 cmd_bo_hdls, u32 *arg_bo_hdls, u32 arg_bo_cnt,
+ u32 hwctx_hdl, u64 *seq);
+
+int amdxdna_cmd_wait(struct amdxdna_client *client, u32 hwctx_hdl,
+ u64 seq, u32 timeout);
+
+int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+#endif /* _AMDXDNA_CTX_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
new file mode 100644
index 000000000000..606433d73236
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -0,0 +1,622 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/iosys-map.h>
+#include <linux/vmalloc.h>
+
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+
+#define XDNA_MAX_CMD_BO_SIZE SZ_32K
+
+static int
+amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
+{
+ struct amdxdna_client *client = abo->client;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_mem *mem = &abo->mem;
+ u64 offset;
+ u32 align;
+ int ret;
+
+ align = 1 << max(PAGE_SHIFT, xdna->dev_info->dev_mem_buf_shift);
+ ret = drm_mm_insert_node_generic(&abo->dev_heap->mm, &abo->mm_node,
+ mem->size, align,
+ 0, DRM_MM_INSERT_BEST);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
+ return ret;
+ }
+
+ mem->dev_addr = abo->mm_node.start;
+ offset = mem->dev_addr - abo->dev_heap->mem.dev_addr;
+ mem->userptr = abo->dev_heap->mem.userptr + offset;
+ mem->pages = &abo->dev_heap->base.pages[offset >> PAGE_SHIFT];
+ mem->nr_pages = mem->size >> PAGE_SHIFT;
+
+ if (use_vmap) {
+ mem->kva = vmap(mem->pages, mem->nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!mem->kva) {
+ XDNA_ERR(xdna, "Failed to vmap");
+ drm_mm_remove_node(&abo->mm_node);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
+
+ XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
+ if (abo->pinned)
+ amdxdna_gem_unpin(abo);
+
+ if (abo->type == AMDXDNA_BO_DEV) {
+ mutex_lock(&abo->client->mm_lock);
+ drm_mm_remove_node(&abo->mm_node);
+ mutex_unlock(&abo->client->mm_lock);
+
+ vunmap(abo->mem.kva);
+ drm_gem_object_put(to_gobj(abo->dev_heap));
+ drm_gem_object_release(gobj);
+ mutex_destroy(&abo->lock);
+ kfree(abo);
+ return;
+ }
+
+ if (abo->type == AMDXDNA_BO_DEV_HEAP)
+ drm_mm_takedown(&abo->mm);
+
+ drm_gem_vunmap_unlocked(gobj, &map);
+ mutex_destroy(&abo->lock);
+ drm_gem_shmem_free(&abo->base);
+}
+
+static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
+ .free = amdxdna_gem_obj_free,
+};
+
+static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct amdxdna_gem_obj *abo = container_of(mni, struct amdxdna_gem_obj,
+ mem.notifier);
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+
+ XDNA_DBG(xdna, "Invalid range 0x%llx, 0x%lx, type %d",
+ abo->mem.userptr, abo->mem.size, abo->type);
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ xdna->dev_info->ops->hmm_invalidate(abo, cur_seq);
+
+ return true;
+}
+
+static const struct mmu_interval_notifier_ops amdxdna_hmm_ops = {
+ .invalidate = amdxdna_hmm_invalidate,
+};
+
+static void amdxdna_hmm_unregister(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+
+ if (!xdna->dev_info->ops->hmm_invalidate)
+ return;
+
+ mmu_interval_notifier_remove(&abo->mem.notifier);
+ kvfree(abo->mem.pfns);
+ abo->mem.pfns = NULL;
+}
+
+static int amdxdna_hmm_register(struct amdxdna_gem_obj *abo, unsigned long addr,
+ size_t len)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ u32 nr_pages;
+ int ret;
+
+ if (!xdna->dev_info->ops->hmm_invalidate)
+ return 0;
+
+ if (abo->mem.pfns)
+ return -EEXIST;
+
+ nr_pages = (PAGE_ALIGN(addr + len) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
+ abo->mem.pfns = kvcalloc(nr_pages, sizeof(*abo->mem.pfns),
+ GFP_KERNEL);
+ if (!abo->mem.pfns)
+ return -ENOMEM;
+
+ ret = mmu_interval_notifier_insert_locked(&abo->mem.notifier,
+ current->mm,
+ addr,
+ len,
+ &amdxdna_hmm_ops);
+ if (ret) {
+ XDNA_ERR(xdna, "Insert mmu notifier failed, ret %d", ret);
+ kvfree(abo->mem.pfns);
+ }
+ abo->mem.userptr = addr;
+
+ return ret;
+}
+
+static int amdxdna_gem_obj_mmap(struct drm_gem_object *gobj,
+ struct vm_area_struct *vma)
+{
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+ unsigned long num_pages;
+ int ret;
+
+ ret = amdxdna_hmm_register(abo, vma->vm_start, gobj->size);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_shmem_mmap(&abo->base, vma);
+ if (ret)
+ goto hmm_unreg;
+
+ num_pages = gobj->size >> PAGE_SHIFT;
+ /* Try to insert the pages */
+ vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
+ ret = vm_insert_pages(vma, vma->vm_start, abo->base.pages, &num_pages);
+ if (ret)
+ XDNA_ERR(abo->client->xdna, "Failed insert pages, ret %d", ret);
+
+ return 0;
+
+hmm_unreg:
+ amdxdna_hmm_unregister(abo);
+ return ret;
+}
+
+static vm_fault_t amdxdna_gem_vm_fault(struct vm_fault *vmf)
+{
+ return drm_gem_shmem_vm_ops.fault(vmf);
+}
+
+static void amdxdna_gem_vm_open(struct vm_area_struct *vma)
+{
+ drm_gem_shmem_vm_ops.open(vma);
+}
+
+static void amdxdna_gem_vm_close(struct vm_area_struct *vma)
+{
+ struct drm_gem_object *gobj = vma->vm_private_data;
+
+ amdxdna_hmm_unregister(to_xdna_obj(gobj));
+ drm_gem_shmem_vm_ops.close(vma);
+}
+
+static const struct vm_operations_struct amdxdna_gem_vm_ops = {
+ .fault = amdxdna_gem_vm_fault,
+ .open = amdxdna_gem_vm_open,
+ .close = amdxdna_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
+ .free = amdxdna_gem_obj_free,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = amdxdna_gem_obj_mmap,
+ .vm_ops = &amdxdna_gem_vm_ops,
+};
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_obj(struct drm_device *dev, size_t size)
+{
+ struct amdxdna_gem_obj *abo;
+
+ abo = kzalloc(sizeof(*abo), GFP_KERNEL);
+ if (!abo)
+ return ERR_PTR(-ENOMEM);
+
+ abo->pinned = false;
+ abo->assigned_hwctx = AMDXDNA_INVALID_CTX_HANDLE;
+ mutex_init(&abo->lock);
+
+ abo->mem.userptr = AMDXDNA_INVALID_ADDR;
+ abo->mem.dev_addr = AMDXDNA_INVALID_ADDR;
+ abo->mem.size = size;
+
+ return abo;
+}
+
+/* For drm_driver->gem_create_object callback */
+struct drm_gem_object *
+amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size)
+{
+ struct amdxdna_gem_obj *abo;
+
+ abo = amdxdna_gem_create_obj(dev, size);
+ if (IS_ERR(abo))
+ return ERR_CAST(abo);
+
+ to_gobj(abo)->funcs = &amdxdna_gem_shmem_funcs;
+
+ return to_gobj(abo);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_drm_alloc_shmem(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct drm_gem_shmem_object *shmem;
+ struct amdxdna_gem_obj *abo;
+
+ shmem = drm_gem_shmem_create(dev, args->size);
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
+
+ shmem->map_wc = false;
+
+ abo = to_xdna_obj(&shmem->base);
+ abo->client = client;
+ abo->type = AMDXDNA_BO_SHMEM;
+
+ return abo;
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_drm_create_dev_heap(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct drm_gem_shmem_object *shmem;
+ struct amdxdna_gem_obj *abo;
+ int ret;
+
+ if (args->size > xdna->dev_info->dev_mem_size) {
+ XDNA_DBG(xdna, "Invalid dev heap size 0x%llx, limit 0x%lx",
+ args->size, xdna->dev_info->dev_mem_size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mutex_lock(&client->mm_lock);
+ if (client->dev_heap) {
+ XDNA_DBG(client->xdna, "dev heap is already created");
+ ret = -EBUSY;
+ goto mm_unlock;
+ }
+
+ shmem = drm_gem_shmem_create(dev, args->size);
+ if (IS_ERR(shmem)) {
+ ret = PTR_ERR(shmem);
+ goto mm_unlock;
+ }
+
+ shmem->map_wc = false;
+ abo = to_xdna_obj(&shmem->base);
+
+ abo->type = AMDXDNA_BO_DEV_HEAP;
+ abo->client = client;
+ abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
+ drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size);
+
+ client->dev_heap = abo;
+ drm_gem_object_get(to_gobj(abo));
+ mutex_unlock(&client->mm_lock);
+
+ return abo;
+
+mm_unlock:
+ mutex_unlock(&client->mm_lock);
+ return ERR_PTR(ret);
+}
+
+struct amdxdna_gem_obj *
+amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp, bool use_vmap)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ size_t aligned_sz = PAGE_ALIGN(args->size);
+ struct amdxdna_gem_obj *abo, *heap;
+ int ret;
+
+ mutex_lock(&client->mm_lock);
+ heap = client->dev_heap;
+ if (!heap) {
+ ret = -EINVAL;
+ goto mm_unlock;
+ }
+
+ if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) {
+ XDNA_ERR(xdna, "Invalid dev heap userptr");
+ ret = -EINVAL;
+ goto mm_unlock;
+ }
+
+ if (args->size > heap->mem.size) {
+ XDNA_ERR(xdna, "Invalid dev bo size 0x%llx, limit 0x%lx",
+ args->size, heap->mem.size);
+ ret = -EINVAL;
+ goto mm_unlock;
+ }
+
+ abo = amdxdna_gem_create_obj(&xdna->ddev, aligned_sz);
+ if (IS_ERR(abo)) {
+ ret = PTR_ERR(abo);
+ goto mm_unlock;
+ }
+ to_gobj(abo)->funcs = &amdxdna_gem_dev_obj_funcs;
+ abo->type = AMDXDNA_BO_DEV;
+ abo->client = client;
+ abo->dev_heap = heap;
+ ret = amdxdna_gem_insert_node_locked(abo, use_vmap);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
+ goto mm_unlock;
+ }
+
+ drm_gem_object_get(to_gobj(heap));
+ drm_gem_private_object_init(&xdna->ddev, to_gobj(abo), aligned_sz);
+
+ mutex_unlock(&client->mm_lock);
+ return abo;
+
+mm_unlock:
+ mutex_unlock(&client->mm_lock);
+ return ERR_PTR(ret);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_drm_create_cmd_bo(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct drm_gem_shmem_object *shmem;
+ struct amdxdna_gem_obj *abo;
+ struct iosys_map map;
+ int ret;
+
+ if (args->size > XDNA_MAX_CMD_BO_SIZE) {
+ XDNA_ERR(xdna, "Command bo size 0x%llx too large", args->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (args->size < sizeof(struct amdxdna_cmd)) {
+ XDNA_DBG(xdna, "Command BO size 0x%llx too small", args->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ shmem = drm_gem_shmem_create(dev, args->size);
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
+
+ shmem->map_wc = false;
+ abo = to_xdna_obj(&shmem->base);
+
+ abo->type = AMDXDNA_BO_CMD;
+ abo->client = filp->driver_priv;
+
+ ret = drm_gem_vmap_unlocked(to_gobj(abo), &map);
+ if (ret) {
+ XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
+ goto release_obj;
+ }
+ abo->mem.kva = map.vaddr;
+
+ return abo;
+
+release_obj:
+ drm_gem_shmem_free(shmem);
+ return ERR_PTR(ret);
+}
+
+int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_create_bo *args = data;
+ struct amdxdna_gem_obj *abo;
+ int ret;
+
+ if (args->flags || args->vaddr || !args->size)
+ return -EINVAL;
+
+ XDNA_DBG(xdna, "BO arg type %d vaddr 0x%llx size 0x%llx flags 0x%llx",
+ args->type, args->vaddr, args->size, args->flags);
+ switch (args->type) {
+ case AMDXDNA_BO_SHMEM:
+ abo = amdxdna_drm_alloc_shmem(dev, args, filp);
+ break;
+ case AMDXDNA_BO_DEV_HEAP:
+ abo = amdxdna_drm_create_dev_heap(dev, args, filp);
+ break;
+ case AMDXDNA_BO_DEV:
+ abo = amdxdna_drm_alloc_dev_bo(dev, args, filp, false);
+ break;
+ case AMDXDNA_BO_CMD:
+ abo = amdxdna_drm_create_cmd_bo(dev, args, filp);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (IS_ERR(abo))
+ return PTR_ERR(abo);
+
+ /* ready to publish object to userspace */
+ ret = drm_gem_handle_create(filp, to_gobj(abo), &args->handle);
+ if (ret) {
+ XDNA_ERR(xdna, "Create handle failed");
+ goto put_obj;
+ }
+
+ XDNA_DBG(xdna, "BO hdl %d type %d userptr 0x%llx xdna_addr 0x%llx size 0x%lx",
+ args->handle, args->type, abo->mem.userptr,
+ abo->mem.dev_addr, abo->mem.size);
+put_obj:
+ /* Dereference object reference. Handle holds it now. */
+ drm_gem_object_put(to_gobj(abo));
+ return ret;
+}
+
+int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
+ int ret;
+
+ switch (abo->type) {
+ case AMDXDNA_BO_SHMEM:
+ case AMDXDNA_BO_DEV_HEAP:
+ ret = drm_gem_shmem_pin(&abo->base);
+ break;
+ case AMDXDNA_BO_DEV:
+ ret = drm_gem_shmem_pin(&abo->dev_heap->base);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ XDNA_DBG(xdna, "BO type %d ret %d", abo->type, ret);
+ return ret;
+}
+
+int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
+{
+ int ret;
+
+ if (abo->type == AMDXDNA_BO_DEV)
+ abo = abo->dev_heap;
+
+ mutex_lock(&abo->lock);
+ ret = amdxdna_gem_pin_nolock(abo);
+ mutex_unlock(&abo->lock);
+
+ return ret;
+}
+
+void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo)
+{
+ if (abo->type == AMDXDNA_BO_DEV)
+ abo = abo->dev_heap;
+
+ mutex_lock(&abo->lock);
+ drm_gem_shmem_unpin(&abo->base);
+ mutex_unlock(&abo->lock);
+}
+
+struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client,
+ u32 bo_hdl, u8 bo_type)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+
+ gobj = drm_gem_object_lookup(client->filp, bo_hdl);
+ if (!gobj) {
+ XDNA_DBG(xdna, "Can not find bo %d", bo_hdl);
+ return NULL;
+ }
+
+ abo = to_xdna_obj(gobj);
+ if (bo_type == AMDXDNA_BO_INVALID || abo->type == bo_type)
+ return abo;
+
+ drm_gem_object_put(gobj);
+ return NULL;
+}
+
+int amdxdna_drm_get_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_drm_get_bo_info *args = data;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ int ret = 0;
+
+ if (args->ext || args->ext_flags || args->pad)
+ return -EINVAL;
+
+ gobj = drm_gem_object_lookup(filp, args->handle);
+ if (!gobj) {
+ XDNA_DBG(xdna, "Lookup GEM object %d failed", args->handle);
+ return -ENOENT;
+ }
+
+ abo = to_xdna_obj(gobj);
+ args->vaddr = abo->mem.userptr;
+ args->xdna_addr = abo->mem.dev_addr;
+
+ if (abo->type != AMDXDNA_BO_DEV)
+ args->map_offset = drm_vma_node_offset_addr(&gobj->vma_node);
+ else
+ args->map_offset = AMDXDNA_INVALID_ADDR;
+
+ XDNA_DBG(xdna, "BO hdl %d map_offset 0x%llx vaddr 0x%llx xdna_addr 0x%llx",
+ args->handle, args->map_offset, args->vaddr, args->xdna_addr);
+
+ drm_gem_object_put(gobj);
+ return ret;
+}
+
+/*
+ * The sync bo ioctl is to make sure the CPU cache is in sync with memory.
+ * This is required because NPU is not cache coherent device. CPU cache
+ * flushing/invalidation is expensive so it is best to handle this outside
+ * of the command submission path. This ioctl allows explicit cache
+ * flushing/invalidation outside of the critical path.
+ */
+int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_sync_bo *args = data;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ int ret;
+
+ gobj = drm_gem_object_lookup(filp, args->handle);
+ if (!gobj) {
+ XDNA_ERR(xdna, "Lookup GEM object failed");
+ return -ENOENT;
+ }
+ abo = to_xdna_obj(gobj);
+
+ ret = amdxdna_gem_pin(abo);
+ if (ret) {
+ XDNA_ERR(xdna, "Pin BO %d failed, ret %d", args->handle, ret);
+ goto put_obj;
+ }
+
+ if (abo->type == AMDXDNA_BO_DEV)
+ drm_clflush_pages(abo->mem.pages, abo->mem.nr_pages);
+ else
+ drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT);
+
+ amdxdna_gem_unpin(abo);
+
+ XDNA_DBG(xdna, "Sync bo %d offset 0x%llx, size 0x%llx\n",
+ args->handle, args->offset, args->size);
+
+put_obj:
+ drm_gem_object_put(gobj);
+ return ret;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_gem.h b/drivers/accel/amdxdna/amdxdna_gem.h
new file mode 100644
index 000000000000..8ccc0375dd9d
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_gem.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_GEM_H_
+#define _AMDXDNA_GEM_H_
+
+struct amdxdna_mem {
+ u64 userptr;
+ void *kva;
+ u64 dev_addr;
+ size_t size;
+ struct page **pages;
+ u32 nr_pages;
+ struct mmu_interval_notifier notifier;
+ unsigned long *pfns;
+ bool map_invalid;
+};
+
+struct amdxdna_gem_obj {
+ struct drm_gem_shmem_object base;
+ struct amdxdna_client *client;
+ u8 type;
+ bool pinned;
+ struct mutex lock; /* Protects: pinned */
+ struct amdxdna_mem mem;
+
+ /* Below members is uninitialized when needed */
+ struct drm_mm mm; /* For AMDXDNA_BO_DEV_HEAP */
+ struct amdxdna_gem_obj *dev_heap; /* For AMDXDNA_BO_DEV */
+ struct drm_mm_node mm_node; /* For AMDXDNA_BO_DEV */
+ u32 assigned_hwctx;
+};
+
+#define to_gobj(obj) (&(obj)->base.base)
+
+static inline struct amdxdna_gem_obj *to_xdna_obj(struct drm_gem_object *gobj)
+{
+ return container_of(gobj, struct amdxdna_gem_obj, base.base);
+}
+
+struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client,
+ u32 bo_hdl, u8 bo_type);
+static inline void amdxdna_gem_put_obj(struct amdxdna_gem_obj *abo)
+{
+ drm_gem_object_put(to_gobj(abo));
+}
+
+struct drm_gem_object *
+amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size);
+struct amdxdna_gem_obj *
+amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args,
+ struct drm_file *filp, bool use_vmap);
+
+int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo);
+int amdxdna_gem_pin(struct amdxdna_gem_obj *abo);
+void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo);
+
+int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_get_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+#endif /* _AMDXDNA_GEM_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.c b/drivers/accel/amdxdna/amdxdna_mailbox.c
new file mode 100644
index 000000000000..814b16bb1953
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/drm_device.h>
+#include <drm/drm_managed.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/xarray.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/amdxdna.h>
+
+#include "amdxdna_mailbox.h"
+
+#define MB_ERR(chann, fmt, args...) \
+({ \
+ typeof(chann) _chann = chann; \
+ dev_err((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
+ (_chann)->msix_irq, ##args); \
+})
+#define MB_DBG(chann, fmt, args...) \
+({ \
+ typeof(chann) _chann = chann; \
+ dev_dbg((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
+ (_chann)->msix_irq, ##args); \
+})
+#define MB_WARN_ONCE(chann, fmt, args...) \
+({ \
+ typeof(chann) _chann = chann; \
+ dev_warn_once((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
+ (_chann)->msix_irq, ##args); \
+})
+
+#define MAGIC_VAL 0x1D000000U
+#define MAGIC_VAL_MASK 0xFF000000
+#define MAX_MSG_ID_ENTRIES 256
+#define MSG_RX_TIMER 200 /* milliseconds */
+#define MAILBOX_NAME "xdna_mailbox"
+
+enum channel_res_type {
+ CHAN_RES_X2I,
+ CHAN_RES_I2X,
+ CHAN_RES_NUM
+};
+
+struct mailbox {
+ struct device *dev;
+ struct xdna_mailbox_res res;
+};
+
+struct mailbox_channel {
+ struct mailbox *mb;
+ struct xdna_mailbox_chann_res res[CHAN_RES_NUM];
+ int msix_irq;
+ u32 iohub_int_addr;
+ struct xarray chan_xa;
+ u32 next_msgid;
+ u32 x2i_tail;
+
+ /* Received msg related fields */
+ struct workqueue_struct *work_q;
+ struct work_struct rx_work;
+ u32 i2x_head;
+ bool bad_state;
+};
+
+#define MSG_BODY_SZ GENMASK(10, 0)
+#define MSG_PROTO_VER GENMASK(23, 16)
+struct xdna_msg_header {
+ __u32 total_size;
+ __u32 sz_ver;
+ __u32 id;
+ __u32 opcode;
+} __packed;
+
+static_assert(sizeof(struct xdna_msg_header) == 16);
+
+struct mailbox_pkg {
+ struct xdna_msg_header header;
+ __u32 payload[];
+};
+
+/* The protocol version. */
+#define MSG_PROTOCOL_VERSION 0x1
+/* The tombstone value. */
+#define TOMBSTONE 0xDEADFACE
+
+struct mailbox_msg {
+ void *handle;
+ int (*notify_cb)(void *handle, const u32 *data, size_t size);
+ size_t pkg_size; /* package size in bytes */
+ struct mailbox_pkg pkg;
+};
+
+static void mailbox_reg_write(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 data)
+{
+ struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
+ void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
+
+ writel(data, ringbuf_addr);
+}
+
+static u32 mailbox_reg_read(struct mailbox_channel *mb_chann, u32 mbox_reg)
+{
+ struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
+ void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
+
+ return readl(ringbuf_addr);
+}
+
+static int mailbox_reg_read_non_zero(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 *val)
+{
+ struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
+ void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
+ int ret, value;
+
+ /* Poll till value is not zero */
+ ret = readx_poll_timeout(readl, ringbuf_addr, value,
+ value, 1 /* us */, 100);
+ if (ret < 0)
+ return ret;
+
+ *val = value;
+ return 0;
+}
+
+static inline void
+mailbox_set_headptr(struct mailbox_channel *mb_chann, u32 headptr_val)
+{
+ mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_head_ptr_reg, headptr_val);
+ mb_chann->i2x_head = headptr_val;
+}
+
+static inline void
+mailbox_set_tailptr(struct mailbox_channel *mb_chann, u32 tailptr_val)
+{
+ mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_X2I].mb_tail_ptr_reg, tailptr_val);
+ mb_chann->x2i_tail = tailptr_val;
+}
+
+static inline u32
+mailbox_get_headptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
+{
+ return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_head_ptr_reg);
+}
+
+static inline u32
+mailbox_get_tailptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
+{
+ return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_tail_ptr_reg);
+}
+
+static inline u32
+mailbox_get_ringbuf_size(struct mailbox_channel *mb_chann, enum channel_res_type type)
+{
+ return mb_chann->res[type].rb_size;
+}
+
+static inline int mailbox_validate_msgid(int msg_id)
+{
+ return (msg_id & MAGIC_VAL_MASK) == MAGIC_VAL;
+}
+
+static int mailbox_acquire_msgid(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
+{
+ u32 msg_id;
+ int ret;
+
+ ret = xa_alloc_cyclic_irq(&mb_chann->chan_xa, &msg_id, mb_msg,
+ XA_LIMIT(0, MAX_MSG_ID_ENTRIES - 1),
+ &mb_chann->next_msgid, GFP_NOWAIT);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Add MAGIC_VAL to the higher bits.
+ */
+ msg_id |= MAGIC_VAL;
+ return msg_id;
+}
+
+static void mailbox_release_msgid(struct mailbox_channel *mb_chann, int msg_id)
+{
+ msg_id &= ~MAGIC_VAL_MASK;
+ xa_erase_irq(&mb_chann->chan_xa, msg_id);
+}
+
+static void mailbox_release_msg(struct mailbox_channel *mb_chann,
+ struct mailbox_msg *mb_msg)
+{
+ MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x",
+ mb_msg->pkg.header.id, mb_msg->pkg.header.opcode);
+ mb_msg->notify_cb(mb_msg->handle, NULL, 0);
+ kfree(mb_msg);
+}
+
+static int
+mailbox_send_msg(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
+{
+ void __iomem *write_addr;
+ u32 ringbuf_size;
+ u32 head, tail;
+ u32 start_addr;
+ u32 tmp_tail;
+
+ head = mailbox_get_headptr(mb_chann, CHAN_RES_X2I);
+ tail = mb_chann->x2i_tail;
+ ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I);
+ start_addr = mb_chann->res[CHAN_RES_X2I].rb_start_addr;
+ tmp_tail = tail + mb_msg->pkg_size;
+
+ if (tail < head && tmp_tail >= head)
+ goto no_space;
+
+ if (tail >= head && (tmp_tail > ringbuf_size - sizeof(u32) &&
+ mb_msg->pkg_size >= head))
+ goto no_space;
+
+ if (tail >= head && tmp_tail > ringbuf_size - sizeof(u32)) {
+ write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
+ writel(TOMBSTONE, write_addr);
+
+ /* tombstone is set. Write from the start of the ringbuf */
+ tail = 0;
+ }
+
+ write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
+ memcpy_toio(write_addr, &mb_msg->pkg, mb_msg->pkg_size);
+ mailbox_set_tailptr(mb_chann, tail + mb_msg->pkg_size);
+
+ trace_mbox_set_tail(MAILBOX_NAME, mb_chann->msix_irq,
+ mb_msg->pkg.header.opcode,
+ mb_msg->pkg.header.id);
+
+ return 0;
+
+no_space:
+ return -ENOSPC;
+}
+
+static int
+mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *header,
+ void *data)
+{
+ struct mailbox_msg *mb_msg;
+ int msg_id;
+ int ret;
+
+ msg_id = header->id;
+ if (!mailbox_validate_msgid(msg_id)) {
+ MB_ERR(mb_chann, "Bad message ID 0x%x", msg_id);
+ return -EINVAL;
+ }
+
+ msg_id &= ~MAGIC_VAL_MASK;
+ mb_msg = xa_erase_irq(&mb_chann->chan_xa, msg_id);
+ if (!mb_msg) {
+ MB_ERR(mb_chann, "Cannot find msg 0x%x", msg_id);
+ return -EINVAL;
+ }
+
+ MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
+ header->opcode, header->total_size, header->id);
+ ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);
+ if (unlikely(ret))
+ MB_ERR(mb_chann, "Message callback ret %d", ret);
+
+ kfree(mb_msg);
+ return ret;
+}
+
+static int mailbox_get_msg(struct mailbox_channel *mb_chann)
+{
+ struct xdna_msg_header header;
+ void __iomem *read_addr;
+ u32 msg_size, rest;
+ u32 ringbuf_size;
+ u32 head, tail;
+ u32 start_addr;
+ int ret;
+
+ if (mailbox_reg_read_non_zero(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_tail_ptr_reg, &tail))
+ return -EINVAL;
+ head = mb_chann->i2x_head;
+ ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_I2X);
+ start_addr = mb_chann->res[CHAN_RES_I2X].rb_start_addr;
+
+ if (unlikely(tail > ringbuf_size || !IS_ALIGNED(tail, 4))) {
+ MB_WARN_ONCE(mb_chann, "Invalid tail 0x%x", tail);
+ return -EINVAL;
+ }
+
+ /* ringbuf empty */
+ if (head == tail)
+ return -ENOENT;
+
+ if (head == ringbuf_size)
+ head = 0;
+
+ /* Peek size of the message or TOMBSTONE */
+ read_addr = mb_chann->mb->res.ringbuf_base + start_addr + head;
+ header.total_size = readl(read_addr);
+ /* size is TOMBSTONE, set next read from 0 */
+ if (header.total_size == TOMBSTONE) {
+ if (head < tail) {
+ MB_WARN_ONCE(mb_chann, "Tombstone, head 0x%x tail 0x%x",
+ head, tail);
+ return -EINVAL;
+ }
+ mailbox_set_headptr(mb_chann, 0);
+ return 0;
+ }
+
+ if (unlikely(!header.total_size || !IS_ALIGNED(header.total_size, 4))) {
+ MB_WARN_ONCE(mb_chann, "Invalid total size 0x%x", header.total_size);
+ return -EINVAL;
+ }
+ msg_size = sizeof(header) + header.total_size;
+
+ if (msg_size > ringbuf_size - head || msg_size > tail - head) {
+ MB_WARN_ONCE(mb_chann, "Invalid message size %d, tail %d, head %d",
+ msg_size, tail, head);
+ return -EINVAL;
+ }
+
+ rest = sizeof(header) - sizeof(u32);
+ read_addr += sizeof(u32);
+ memcpy_fromio((u32 *)&header + 1, read_addr, rest);
+ read_addr += rest;
+
+ ret = mailbox_get_resp(mb_chann, &header, (u32 *)read_addr);
+
+ mailbox_set_headptr(mb_chann, head + msg_size);
+ /* After update head, it can equal to ringbuf_size. This is expected. */
+ trace_mbox_set_head(MAILBOX_NAME, mb_chann->msix_irq,
+ header.opcode, header.id);
+
+ return ret;
+}
+
+static irqreturn_t mailbox_irq_handler(int irq, void *p)
+{
+ struct mailbox_channel *mb_chann = p;
+
+ trace_mbox_irq_handle(MAILBOX_NAME, irq);
+ /* Schedule a rx_work to call the callback functions */
+ queue_work(mb_chann->work_q, &mb_chann->rx_work);
+ /* Clear IOHUB register */
+ mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
+
+ return IRQ_HANDLED;
+}
+
+static void mailbox_rx_worker(struct work_struct *rx_work)
+{
+ struct mailbox_channel *mb_chann;
+ int ret;
+
+ mb_chann = container_of(rx_work, struct mailbox_channel, rx_work);
+
+ if (READ_ONCE(mb_chann->bad_state)) {
+ MB_ERR(mb_chann, "Channel in bad state, work aborted");
+ return;
+ }
+
+ while (1) {
+ /*
+ * If return is 0, keep consuming next message, until there is
+ * no messages or an error happened.
+ */
+ ret = mailbox_get_msg(mb_chann);
+ if (ret == -ENOENT)
+ break;
+
+ /* Other error means device doesn't look good, disable irq. */
+ if (unlikely(ret)) {
+ MB_ERR(mb_chann, "Unexpected ret %d, disable irq", ret);
+ WRITE_ONCE(mb_chann->bad_state, true);
+ disable_irq(mb_chann->msix_irq);
+ break;
+ }
+ }
+}
+
+int xdna_mailbox_send_msg(struct mailbox_channel *mb_chann,
+ const struct xdna_mailbox_msg *msg, u64 tx_timeout)
+{
+ struct xdna_msg_header *header;
+ struct mailbox_msg *mb_msg;
+ size_t pkg_size;
+ int ret;
+
+ pkg_size = sizeof(*header) + msg->send_size;
+ if (pkg_size > mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I)) {
+ MB_ERR(mb_chann, "Message size larger than ringbuf size");
+ return -EINVAL;
+ }
+
+ if (unlikely(!IS_ALIGNED(msg->send_size, 4))) {
+ MB_ERR(mb_chann, "Message must be 4 bytes align");
+ return -EINVAL;
+ }
+
+ /* The fist word in payload can NOT be TOMBSTONE */
+ if (unlikely(((u32 *)msg->send_data)[0] == TOMBSTONE)) {
+ MB_ERR(mb_chann, "Tomb stone in data");
+ return -EINVAL;
+ }
+
+ if (READ_ONCE(mb_chann->bad_state)) {
+ MB_ERR(mb_chann, "Channel in bad state");
+ return -EPIPE;
+ }
+
+ mb_msg = kzalloc(sizeof(*mb_msg) + pkg_size, GFP_KERNEL);
+ if (!mb_msg)
+ return -ENOMEM;
+
+ mb_msg->handle = msg->handle;
+ mb_msg->notify_cb = msg->notify_cb;
+ mb_msg->pkg_size = pkg_size;
+
+ header = &mb_msg->pkg.header;
+ /*
+ * Hardware use total_size and size to split huge message.
+ * We do not support it here. Thus the values are the same.
+ */
+ header->total_size = msg->send_size;
+ header->sz_ver = FIELD_PREP(MSG_BODY_SZ, msg->send_size) |
+ FIELD_PREP(MSG_PROTO_VER, MSG_PROTOCOL_VERSION);
+ header->opcode = msg->opcode;
+ memcpy(mb_msg->pkg.payload, msg->send_data, msg->send_size);
+
+ ret = mailbox_acquire_msgid(mb_chann, mb_msg);
+ if (unlikely(ret < 0)) {
+ MB_ERR(mb_chann, "mailbox_acquire_msgid failed");
+ goto msg_id_failed;
+ }
+ header->id = ret;
+
+ MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
+ header->opcode, header->total_size, header->id);
+
+ ret = mailbox_send_msg(mb_chann, mb_msg);
+ if (ret) {
+ MB_DBG(mb_chann, "Error in mailbox send msg, ret %d", ret);
+ goto release_id;
+ }
+
+ return 0;
+
+release_id:
+ mailbox_release_msgid(mb_chann, header->id);
+msg_id_failed:
+ kfree(mb_msg);
+ return ret;
+}
+
+struct mailbox_channel *
+xdna_mailbox_create_channel(struct mailbox *mb,
+ const struct xdna_mailbox_chann_res *x2i,
+ const struct xdna_mailbox_chann_res *i2x,
+ u32 iohub_int_addr,
+ int mb_irq)
+{
+ struct mailbox_channel *mb_chann;
+ int ret;
+
+ if (!is_power_of_2(x2i->rb_size) || !is_power_of_2(i2x->rb_size)) {
+ pr_err("Ring buf size must be power of 2");
+ return NULL;
+ }
+
+ mb_chann = kzalloc(sizeof(*mb_chann), GFP_KERNEL);
+ if (!mb_chann)
+ return NULL;
+
+ mb_chann->mb = mb;
+ mb_chann->msix_irq = mb_irq;
+ mb_chann->iohub_int_addr = iohub_int_addr;
+ memcpy(&mb_chann->res[CHAN_RES_X2I], x2i, sizeof(*x2i));
+ memcpy(&mb_chann->res[CHAN_RES_I2X], i2x, sizeof(*i2x));
+
+ xa_init_flags(&mb_chann->chan_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+ mb_chann->x2i_tail = mailbox_get_tailptr(mb_chann, CHAN_RES_X2I);
+ mb_chann->i2x_head = mailbox_get_headptr(mb_chann, CHAN_RES_I2X);
+
+ INIT_WORK(&mb_chann->rx_work, mailbox_rx_worker);
+ mb_chann->work_q = create_singlethread_workqueue(MAILBOX_NAME);
+ if (!mb_chann->work_q) {
+ MB_ERR(mb_chann, "Create workqueue failed");
+ goto free_and_out;
+ }
+
+ /* Everything look good. Time to enable irq handler */
+ ret = request_irq(mb_irq, mailbox_irq_handler, 0, MAILBOX_NAME, mb_chann);
+ if (ret) {
+ MB_ERR(mb_chann, "Failed to request irq %d ret %d", mb_irq, ret);
+ goto destroy_wq;
+ }
+
+ mb_chann->bad_state = false;
+
+ MB_DBG(mb_chann, "Mailbox channel created (irq: %d)", mb_chann->msix_irq);
+ return mb_chann;
+
+destroy_wq:
+ destroy_workqueue(mb_chann->work_q);
+free_and_out:
+ kfree(mb_chann);
+ return NULL;
+}
+
+int xdna_mailbox_destroy_channel(struct mailbox_channel *mb_chann)
+{
+ struct mailbox_msg *mb_msg;
+ unsigned long msg_id;
+
+ MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
+ free_irq(mb_chann->msix_irq, mb_chann);
+ destroy_workqueue(mb_chann->work_q);
+ /* We can clean up and release resources */
+
+ xa_for_each(&mb_chann->chan_xa, msg_id, mb_msg)
+ mailbox_release_msg(mb_chann, mb_msg);
+
+ xa_destroy(&mb_chann->chan_xa);
+
+ MB_DBG(mb_chann, "Mailbox channel destroyed, irq: %d", mb_chann->msix_irq);
+ kfree(mb_chann);
+ return 0;
+}
+
+void xdna_mailbox_stop_channel(struct mailbox_channel *mb_chann)
+{
+ /* Disable an irq and wait. This might sleep. */
+ disable_irq(mb_chann->msix_irq);
+
+ /* Cancel RX work and wait for it to finish */
+ cancel_work_sync(&mb_chann->rx_work);
+ MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
+}
+
+struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,
+ const struct xdna_mailbox_res *res)
+{
+ struct mailbox *mb;
+
+ mb = drmm_kzalloc(ddev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return NULL;
+ mb->dev = ddev->dev;
+
+ /* mailbox and ring buf base and size information */
+ memcpy(&mb->res, res, sizeof(*res));
+
+ return mb;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.h b/drivers/accel/amdxdna/amdxdna_mailbox.h
new file mode 100644
index 000000000000..57954c303bdd
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AIE2_MAILBOX_H_
+#define _AIE2_MAILBOX_H_
+
+struct mailbox;
+struct mailbox_channel;
+
+/*
+ * xdna_mailbox_msg - message struct
+ *
+ * @opcode: opcode for firmware
+ * @handle: handle used for the notify callback
+ * @notify_cb: callback function to notify the sender when there is response
+ * @send_data: pointing to sending data
+ * @send_size: size of the sending data
+ *
+ * The mailbox will split the sending data in to multiple firmware message if
+ * the size of the data is too big. This is transparent to the sender. The
+ * sender will receive one notification.
+ */
+struct xdna_mailbox_msg {
+ u32 opcode;
+ void *handle;
+ int (*notify_cb)(void *handle, const u32 *data, size_t size);
+ u8 *send_data;
+ size_t send_size;
+};
+
+/*
+ * xdna_mailbox_res - mailbox hardware resource
+ *
+ * @ringbuf_base: ring buffer base address
+ * @ringbuf_size: ring buffer size
+ * @mbox_base: mailbox base address
+ * @mbox_size: mailbox size
+ */
+struct xdna_mailbox_res {
+ void __iomem *ringbuf_base;
+ size_t ringbuf_size;
+ void __iomem *mbox_base;
+ size_t mbox_size;
+ const char *name;
+};
+
+/*
+ * xdna_mailbox_chann_res - resources
+ *
+ * @rb_start_addr: ring buffer start address
+ * @rb_size: ring buffer size
+ * @mb_head_ptr_reg: mailbox head pointer register
+ * @mb_tail_ptr_reg: mailbox tail pointer register
+ */
+struct xdna_mailbox_chann_res {
+ u32 rb_start_addr;
+ u32 rb_size;
+ u32 mb_head_ptr_reg;
+ u32 mb_tail_ptr_reg;
+};
+
+/*
+ * xdna_mailbox_create() -- create mailbox subsystem and initialize
+ *
+ * @ddev: device pointer
+ * @res: SRAM and mailbox resources
+ *
+ * Return: If success, return a handle of mailbox subsystem.
+ * Otherwise, return NULL pointer.
+ */
+struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,
+ const struct xdna_mailbox_res *res);
+
+/*
+ * xdna_mailbox_create_channel() -- Create a mailbox channel instance
+ *
+ * @mailbox: the handle return from xdna_mailbox_create()
+ * @x2i: host to firmware mailbox resources
+ * @i2x: firmware to host mailbox resources
+ * @xdna_mailbox_intr_reg: register addr of MSI-X interrupt
+ * @mb_irq: Linux IRQ number associated with mailbox MSI-X interrupt vector index
+ *
+ * Return: If success, return a handle of mailbox channel. Otherwise, return NULL.
+ */
+struct mailbox_channel *
+xdna_mailbox_create_channel(struct mailbox *mailbox,
+ const struct xdna_mailbox_chann_res *x2i,
+ const struct xdna_mailbox_chann_res *i2x,
+ u32 xdna_mailbox_intr_reg,
+ int mb_irq);
+
+/*
+ * xdna_mailbox_destroy_channel() -- destroy mailbox channel
+ *
+ * @mailbox_chann: the handle return from xdna_mailbox_create_channel()
+ *
+ * Return: if success, return 0. otherwise return error code
+ */
+int xdna_mailbox_destroy_channel(struct mailbox_channel *mailbox_chann);
+
+/*
+ * xdna_mailbox_stop_channel() -- stop mailbox channel
+ *
+ * @mailbox_chann: the handle return from xdna_mailbox_create_channel()
+ *
+ * Return: if success, return 0. otherwise return error code
+ */
+void xdna_mailbox_stop_channel(struct mailbox_channel *mailbox_chann);
+
+/*
+ * xdna_mailbox_send_msg() -- Send a message
+ *
+ * @mailbox_chann: Mailbox channel handle
+ * @msg: message struct for message information
+ * @tx_timeout: the timeout value for sending the message in ms.
+ *
+ * Return: If success return 0, otherwise, return error code
+ */
+int xdna_mailbox_send_msg(struct mailbox_channel *mailbox_chann,
+ const struct xdna_mailbox_msg *msg, u64 tx_timeout);
+
+#endif /* _AIE2_MAILBOX_ */
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.c b/drivers/accel/amdxdna/amdxdna_mailbox_helper.c
new file mode 100644
index 000000000000..5139a9c96a91
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/completion.h>
+
+#include "amdxdna_gem.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_mailbox_helper.h"
+#include "amdxdna_pci_drv.h"
+
+int xdna_msg_cb(void *handle, const u32 *data, size_t size)
+{
+ struct xdna_notify *cb_arg = handle;
+ int ret;
+
+ if (unlikely(!data))
+ goto out;
+
+ if (unlikely(cb_arg->size != size)) {
+ cb_arg->error = -EINVAL;
+ goto out;
+ }
+
+ print_hex_dump_debug("resp data: ", DUMP_PREFIX_OFFSET,
+ 16, 4, data, cb_arg->size, true);
+ memcpy(cb_arg->data, data, cb_arg->size);
+out:
+ ret = cb_arg->error;
+ complete(&cb_arg->comp);
+ return ret;
+}
+
+int xdna_send_msg_wait(struct amdxdna_dev *xdna, struct mailbox_channel *chann,
+ struct xdna_mailbox_msg *msg)
+{
+ struct xdna_notify *hdl = msg->handle;
+ int ret;
+
+ ret = xdna_mailbox_send_msg(chann, msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(xdna, "Send message failed, ret %d", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&hdl->comp,
+ msecs_to_jiffies(RX_TIMEOUT));
+ if (!ret) {
+ XDNA_ERR(xdna, "Wait for completion timeout");
+ return -ETIME;
+ }
+
+ return hdl->error;
+}
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
new file mode 100644
index 000000000000..23e1317b79fe
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_MAILBOX_HELPER_H
+#define _AMDXDNA_MAILBOX_HELPER_H
+
+#define TX_TIMEOUT 2000 /* milliseconds */
+#define RX_TIMEOUT 5000 /* milliseconds */
+
+struct amdxdna_dev;
+
+struct xdna_notify {
+ struct completion comp;
+ u32 *data;
+ size_t size;
+ int error;
+};
+
+#define DECLARE_XDNA_MSG_COMMON(name, op, status) \
+ struct name##_req req = { 0 }; \
+ struct name##_resp resp = { status }; \
+ struct xdna_notify hdl = { \
+ .error = 0, \
+ .data = (u32 *)&resp, \
+ .size = sizeof(resp), \
+ .comp = COMPLETION_INITIALIZER_ONSTACK(hdl.comp), \
+ }; \
+ struct xdna_mailbox_msg msg = { \
+ .send_data = (u8 *)&req, \
+ .send_size = sizeof(req), \
+ .handle = &hdl, \
+ .opcode = op, \
+ .notify_cb = xdna_msg_cb, \
+ }
+
+int xdna_msg_cb(void *handle, const u32 *data, size_t size);
+int xdna_send_msg_wait(struct amdxdna_dev *xdna, struct mailbox_channel *chann,
+ struct xdna_mailbox_msg *msg);
+
+#endif /* _AMDXDNA_MAILBOX_HELPER_H */
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
new file mode 100644
index 000000000000..f5b8497cf5ad
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_accel.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/iommu.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+#include "amdxdna_ctx.h"
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+
+#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */
+
+MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
+
+/*
+ * Bind the driver base on (vendor_id, device_id) pair and later use the
+ * (device_id, rev_id) pair as a key to select the devices. The devices with
+ * same device_id have very similar interface to host driver.
+ */
+static const struct pci_device_id pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1502) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x17f0) },
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static const struct amdxdna_device_id amdxdna_ids[] = {
+ { 0x1502, 0x0, &dev_npu1_info },
+ { 0x17f0, 0x0, &dev_npu2_info },
+ { 0x17f0, 0x10, &dev_npu4_info },
+ { 0x17f0, 0x11, &dev_npu5_info },
+ { 0x17f0, 0x20, &dev_npu6_info },
+ {0}
+};
+
+static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(ddev);
+ struct amdxdna_client *client;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret) {
+ XDNA_ERR(xdna, "Failed to get rpm, ret %d", ret);
+ return ret;
+ }
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client) {
+ ret = -ENOMEM;
+ goto put_rpm;
+ }
+
+ client->pid = pid_nr(rcu_access_pointer(filp->pid));
+ client->xdna = xdna;
+
+ client->sva = iommu_sva_bind_device(xdna->ddev.dev, current->mm);
+ if (IS_ERR(client->sva)) {
+ ret = PTR_ERR(client->sva);
+ XDNA_ERR(xdna, "SVA bind device failed, ret %d", ret);
+ goto failed;
+ }
+ client->pasid = iommu_sva_get_pasid(client->sva);
+ if (client->pasid == IOMMU_PASID_INVALID) {
+ XDNA_ERR(xdna, "SVA get pasid failed");
+ ret = -ENODEV;
+ goto unbind_sva;
+ }
+ mutex_init(&client->hwctx_lock);
+ init_srcu_struct(&client->hwctx_srcu);
+ xa_init_flags(&client->hwctx_xa, XA_FLAGS_ALLOC);
+ mutex_init(&client->mm_lock);
+
+ mutex_lock(&xdna->dev_lock);
+ list_add_tail(&client->node, &xdna->client_list);
+ mutex_unlock(&xdna->dev_lock);
+
+ filp->driver_priv = client;
+ client->filp = filp;
+
+ XDNA_DBG(xdna, "pid %d opened", client->pid);
+ return 0;
+
+unbind_sva:
+ iommu_sva_unbind_device(client->sva);
+failed:
+ kfree(client);
+put_rpm:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ return ret;
+}
+
+static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(ddev);
+
+ XDNA_DBG(xdna, "closing pid %d", client->pid);
+
+ xa_destroy(&client->hwctx_xa);
+ cleanup_srcu_struct(&client->hwctx_srcu);
+ mutex_destroy(&client->hwctx_lock);
+ mutex_destroy(&client->mm_lock);
+ if (client->dev_heap)
+ drm_gem_object_put(to_gobj(client->dev_heap));
+
+ iommu_sva_unbind_device(client->sva);
+
+ XDNA_DBG(xdna, "pid %d closed", client->pid);
+ kfree(client);
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+}
+
+static int amdxdna_flush(struct file *f, fl_owner_t id)
+{
+ struct drm_file *filp = f->private_data;
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = client->xdna;
+ int idx;
+
+ XDNA_DBG(xdna, "PID %d flushing...", client->pid);
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return 0;
+
+ mutex_lock(&xdna->dev_lock);
+ list_del_init(&client->node);
+ mutex_unlock(&xdna->dev_lock);
+ amdxdna_hwctx_remove_all(client);
+
+ drm_dev_exit(idx);
+ return 0;
+}
+
+static int amdxdna_drm_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_get_info *args = data;
+ int ret;
+
+ if (!xdna->dev_info->ops->get_aie_info)
+ return -EOPNOTSUPP;
+
+ XDNA_DBG(xdna, "Request parameter %u", args->param);
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->get_aie_info(client, args);
+ mutex_unlock(&xdna->dev_lock);
+ return ret;
+}
+
+static int amdxdna_drm_set_state_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_set_state *args = data;
+ int ret;
+
+ if (!xdna->dev_info->ops->set_aie_state)
+ return -EOPNOTSUPP;
+
+ XDNA_DBG(xdna, "Request parameter %u", args->param);
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->set_aie_state(client, args);
+ mutex_unlock(&xdna->dev_lock);
+
+ return ret;
+}
+
+static const struct drm_ioctl_desc amdxdna_drm_ioctls[] = {
+ /* Context */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_HWCTX, amdxdna_drm_create_hwctx_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_DESTROY_HWCTX, amdxdna_drm_destroy_hwctx_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_CONFIG_HWCTX, amdxdna_drm_config_hwctx_ioctl, 0),
+ /* BO */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_BO, amdxdna_drm_create_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_GET_BO_INFO, amdxdna_drm_get_bo_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_SYNC_BO, amdxdna_drm_sync_bo_ioctl, 0),
+ /* Execution */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_EXEC_CMD, amdxdna_drm_submit_cmd_ioctl, 0),
+ /* AIE hardware */
+ DRM_IOCTL_DEF_DRV(AMDXDNA_GET_INFO, amdxdna_drm_get_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_SET_STATE, amdxdna_drm_set_state_ioctl, DRM_ROOT_ONLY),
+};
+
+static const struct file_operations amdxdna_fops = {
+ .owner = THIS_MODULE,
+ .open = accel_open,
+ .release = drm_release,
+ .flush = amdxdna_flush,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_mmap,
+ .fop_flags = FOP_UNSIGNED_OFFSET,
+};
+
+const struct drm_driver amdxdna_drm_drv = {
+ .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL |
+ DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
+ .fops = &amdxdna_fops,
+ .name = "amdxdna_accel_driver",
+ .desc = "AMD XDNA DRM implementation",
+ .open = amdxdna_drm_open,
+ .postclose = amdxdna_drm_close,
+ .ioctls = amdxdna_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(amdxdna_drm_ioctls),
+
+ .gem_create_object = amdxdna_gem_create_object_cb,
+};
+
+static const struct amdxdna_dev_info *
+amdxdna_get_dev_info(struct pci_dev *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(amdxdna_ids); i++) {
+ if (pdev->device == amdxdna_ids[i].device &&
+ pdev->revision == amdxdna_ids[i].revision)
+ return amdxdna_ids[i].dev_info;
+ }
+ return NULL;
+}
+
+static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct amdxdna_dev *xdna;
+ int ret;
+
+ xdna = devm_drm_dev_alloc(dev, &amdxdna_drm_drv, typeof(*xdna), ddev);
+ if (IS_ERR(xdna))
+ return PTR_ERR(xdna);
+
+ xdna->dev_info = amdxdna_get_dev_info(pdev);
+ if (!xdna->dev_info)
+ return -ENODEV;
+
+ drmm_mutex_init(&xdna->ddev, &xdna->dev_lock);
+ init_rwsem(&xdna->notifier_lock);
+ INIT_LIST_HEAD(&xdna->client_list);
+ pci_set_drvdata(pdev, xdna);
+
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&xdna->notifier_lock);
+ fs_reclaim_release(GFP_KERNEL);
+ }
+
+ mutex_lock(&xdna->dev_lock);
+ ret = xdna->dev_info->ops->init(xdna);
+ mutex_unlock(&xdna->dev_lock);
+ if (ret) {
+ XDNA_ERR(xdna, "Hardware init failed, ret %d", ret);
+ return ret;
+ }
+
+ ret = amdxdna_sysfs_init(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "Create amdxdna attrs failed: %d", ret);
+ goto failed_dev_fini;
+ }
+
+ pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_allow(dev);
+
+ ret = drm_dev_register(&xdna->ddev, 0);
+ if (ret) {
+ XDNA_ERR(xdna, "DRM register failed, ret %d", ret);
+ pm_runtime_forbid(dev);
+ goto failed_sysfs_fini;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return 0;
+
+failed_sysfs_fini:
+ amdxdna_sysfs_fini(xdna);
+failed_dev_fini:
+ mutex_lock(&xdna->dev_lock);
+ xdna->dev_info->ops->fini(xdna);
+ mutex_unlock(&xdna->dev_lock);
+ return ret;
+}
+
+static void amdxdna_remove(struct pci_dev *pdev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct amdxdna_client *client;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_forbid(dev);
+
+ drm_dev_unplug(&xdna->ddev);
+ amdxdna_sysfs_fini(xdna);
+
+ mutex_lock(&xdna->dev_lock);
+ client = list_first_entry_or_null(&xdna->client_list,
+ struct amdxdna_client, node);
+ while (client) {
+ list_del_init(&client->node);
+ mutex_unlock(&xdna->dev_lock);
+
+ amdxdna_hwctx_remove_all(client);
+
+ mutex_lock(&xdna->dev_lock);
+ client = list_first_entry_or_null(&xdna->client_list,
+ struct amdxdna_client, node);
+ }
+
+ xdna->dev_info->ops->fini(xdna);
+ mutex_unlock(&xdna->dev_lock);
+}
+
+static int amdxdna_dev_suspend_nolock(struct amdxdna_dev *xdna)
+{
+ if (xdna->dev_info->ops->suspend)
+ xdna->dev_info->ops->suspend(xdna);
+
+ return 0;
+}
+
+static int amdxdna_dev_resume_nolock(struct amdxdna_dev *xdna)
+{
+ if (xdna->dev_info->ops->resume)
+ return xdna->dev_info->ops->resume(xdna);
+
+ return 0;
+}
+
+static int amdxdna_pmops_suspend(struct device *dev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
+ struct amdxdna_client *client;
+
+ mutex_lock(&xdna->dev_lock);
+ list_for_each_entry(client, &xdna->client_list, node)
+ amdxdna_hwctx_suspend(client);
+
+ amdxdna_dev_suspend_nolock(xdna);
+ mutex_unlock(&xdna->dev_lock);
+
+ return 0;
+}
+
+static int amdxdna_pmops_resume(struct device *dev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
+ struct amdxdna_client *client;
+ int ret;
+
+ XDNA_INFO(xdna, "firmware resuming...");
+ mutex_lock(&xdna->dev_lock);
+ ret = amdxdna_dev_resume_nolock(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "resume NPU firmware failed");
+ mutex_unlock(&xdna->dev_lock);
+ return ret;
+ }
+
+ XDNA_INFO(xdna, "hardware context resuming...");
+ list_for_each_entry(client, &xdna->client_list, node)
+ amdxdna_hwctx_resume(client);
+ mutex_unlock(&xdna->dev_lock);
+
+ return 0;
+}
+
+static int amdxdna_rpmops_suspend(struct device *dev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
+ int ret;
+
+ mutex_lock(&xdna->dev_lock);
+ ret = amdxdna_dev_suspend_nolock(xdna);
+ mutex_unlock(&xdna->dev_lock);
+
+ XDNA_DBG(xdna, "Runtime suspend done ret: %d", ret);
+ return ret;
+}
+
+static int amdxdna_rpmops_resume(struct device *dev)
+{
+ struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
+ int ret;
+
+ mutex_lock(&xdna->dev_lock);
+ ret = amdxdna_dev_resume_nolock(xdna);
+ mutex_unlock(&xdna->dev_lock);
+
+ XDNA_DBG(xdna, "Runtime resume done ret: %d", ret);
+ return ret;
+}
+
+static const struct dev_pm_ops amdxdna_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume)
+ RUNTIME_PM_OPS(amdxdna_rpmops_suspend, amdxdna_rpmops_resume, NULL)
+};
+
+static struct pci_driver amdxdna_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pci_ids,
+ .probe = amdxdna_probe,
+ .remove = amdxdna_remove,
+ .driver.pm = &amdxdna_pm_ops,
+};
+
+module_pci_driver(amdxdna_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
+MODULE_DESCRIPTION("amdxdna driver");
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h
new file mode 100644
index 000000000000..37848a8d8031
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_PCI_DRV_H_
+#define _AMDXDNA_PCI_DRV_H_
+
+#include <linux/xarray.h>
+
+#define XDNA_INFO(xdna, fmt, args...) drm_info(&(xdna)->ddev, fmt, ##args)
+#define XDNA_WARN(xdna, fmt, args...) drm_warn(&(xdna)->ddev, "%s: "fmt, __func__, ##args)
+#define XDNA_ERR(xdna, fmt, args...) drm_err(&(xdna)->ddev, "%s: "fmt, __func__, ##args)
+#define XDNA_DBG(xdna, fmt, args...) drm_dbg(&(xdna)->ddev, fmt, ##args)
+#define XDNA_INFO_ONCE(xdna, fmt, args...) drm_info_once(&(xdna)->ddev, fmt, ##args)
+
+#define XDNA_MBZ_DBG(xdna, ptr, sz) \
+ ({ \
+ int __i; \
+ int __ret = 0; \
+ u8 *__ptr = (u8 *)(ptr); \
+ for (__i = 0; __i < (sz); __i++) { \
+ if (__ptr[__i]) { \
+ XDNA_DBG(xdna, "MBZ check failed"); \
+ __ret = -EINVAL; \
+ break; \
+ } \
+ } \
+ __ret; \
+ })
+
+#define to_xdna_dev(drm_dev) \
+ ((struct amdxdna_dev *)container_of(drm_dev, struct amdxdna_dev, ddev))
+
+extern const struct drm_driver amdxdna_drm_drv;
+
+struct amdxdna_client;
+struct amdxdna_dev;
+struct amdxdna_drm_get_info;
+struct amdxdna_drm_set_state;
+struct amdxdna_gem_obj;
+struct amdxdna_hwctx;
+struct amdxdna_sched_job;
+
+/*
+ * struct amdxdna_dev_ops - Device hardware operation callbacks
+ */
+struct amdxdna_dev_ops {
+ int (*init)(struct amdxdna_dev *xdna);
+ void (*fini)(struct amdxdna_dev *xdna);
+ int (*resume)(struct amdxdna_dev *xdna);
+ void (*suspend)(struct amdxdna_dev *xdna);
+ int (*hwctx_init)(struct amdxdna_hwctx *hwctx);
+ void (*hwctx_fini)(struct amdxdna_hwctx *hwctx);
+ int (*hwctx_config)(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
+ void (*hmm_invalidate)(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
+ void (*hwctx_suspend)(struct amdxdna_hwctx *hwctx);
+ void (*hwctx_resume)(struct amdxdna_hwctx *hwctx);
+ int (*cmd_submit)(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
+ int (*get_aie_info)(struct amdxdna_client *client, struct amdxdna_drm_get_info *args);
+ int (*set_aie_state)(struct amdxdna_client *client, struct amdxdna_drm_set_state *args);
+};
+
+/*
+ * struct amdxdna_dev_info - Device hardware information
+ * Record device static information, like reg, mbox, PSP, SMU bar index
+ */
+struct amdxdna_dev_info {
+ int reg_bar;
+ int mbox_bar;
+ int sram_bar;
+ int psp_bar;
+ int smu_bar;
+ int device_type;
+ int first_col;
+ u32 dev_mem_buf_shift;
+ u64 dev_mem_base;
+ size_t dev_mem_size;
+ char *vbnv;
+ const struct amdxdna_dev_priv *dev_priv;
+ const struct amdxdna_dev_ops *ops;
+};
+
+struct amdxdna_fw_ver {
+ u32 major;
+ u32 minor;
+ u32 sub;
+ u32 build;
+};
+
+struct amdxdna_dev {
+ struct drm_device ddev;
+ struct amdxdna_dev_hdl *dev_handle;
+ const struct amdxdna_dev_info *dev_info;
+ void *xrs_hdl;
+
+ struct mutex dev_lock; /* per device lock */
+ struct list_head client_list;
+ struct amdxdna_fw_ver fw_ver;
+ struct rw_semaphore notifier_lock; /* for mmu notifier*/
+};
+
+/*
+ * struct amdxdna_device_id - PCI device info
+ */
+struct amdxdna_device_id {
+ unsigned short device;
+ u8 revision;
+ const struct amdxdna_dev_info *dev_info;
+};
+
+/*
+ * struct amdxdna_client - amdxdna client
+ * A per fd data structure for managing context and other user process stuffs.
+ */
+struct amdxdna_client {
+ struct list_head node;
+ pid_t pid;
+ struct mutex hwctx_lock; /* protect hwctx */
+ /* do NOT wait this srcu when hwctx_lock is held */
+ struct srcu_struct hwctx_srcu;
+ struct xarray hwctx_xa;
+ u32 next_hwctxid;
+ struct amdxdna_dev *xdna;
+ struct drm_file *filp;
+
+ struct mutex mm_lock; /* protect memory related */
+ struct amdxdna_gem_obj *dev_heap;
+
+ struct iommu_sva *sva;
+ int pasid;
+};
+
+#define amdxdna_for_each_hwctx(client, hwctx_id, entry) \
+ xa_for_each(&(client)->hwctx_xa, hwctx_id, entry)
+
+/* Add device info below */
+extern const struct amdxdna_dev_info dev_npu1_info;
+extern const struct amdxdna_dev_info dev_npu2_info;
+extern const struct amdxdna_dev_info dev_npu4_info;
+extern const struct amdxdna_dev_info dev_npu5_info;
+extern const struct amdxdna_dev_info dev_npu6_info;
+
+int amdxdna_sysfs_init(struct amdxdna_dev *xdna);
+void amdxdna_sysfs_fini(struct amdxdna_dev *xdna);
+
+#endif /* _AMDXDNA_PCI_DRV_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_sysfs.c b/drivers/accel/amdxdna/amdxdna_sysfs.c
new file mode 100644
index 000000000000..f27e4ee960a0
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_sysfs.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/types.h>
+
+#include "amdxdna_gem.h"
+#include "amdxdna_pci_drv.h"
+
+static ssize_t vbnv_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct amdxdna_dev *xdna = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", xdna->dev_info->vbnv);
+}
+static DEVICE_ATTR_RO(vbnv);
+
+static ssize_t device_type_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct amdxdna_dev *xdna = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", xdna->dev_info->device_type);
+}
+static DEVICE_ATTR_RO(device_type);
+
+static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct amdxdna_dev *xdna = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d.%d.%d.%d\n", xdna->fw_ver.major,
+ xdna->fw_ver.minor, xdna->fw_ver.sub,
+ xdna->fw_ver.build);
+}
+static DEVICE_ATTR_RO(fw_version);
+
+static struct attribute *amdxdna_attrs[] = {
+ &dev_attr_device_type.attr,
+ &dev_attr_vbnv.attr,
+ &dev_attr_fw_version.attr,
+ NULL,
+};
+
+static struct attribute_group amdxdna_attr_group = {
+ .attrs = amdxdna_attrs,
+};
+
+int amdxdna_sysfs_init(struct amdxdna_dev *xdna)
+{
+ int ret;
+
+ ret = sysfs_create_group(&xdna->ddev.dev->kobj, &amdxdna_attr_group);
+ if (ret)
+ XDNA_ERR(xdna, "Create attr group failed");
+
+ return ret;
+}
+
+void amdxdna_sysfs_fini(struct amdxdna_dev *xdna)
+{
+ sysfs_remove_group(&xdna->ddev.dev->kobj, &amdxdna_attr_group);
+}
diff --git a/drivers/accel/amdxdna/npu1_regs.c b/drivers/accel/amdxdna/npu1_regs.c
new file mode 100644
index 000000000000..e4f6dac7d00f
--- /dev/null
+++ b/drivers/accel/amdxdna/npu1_regs.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* Address definition from NPU1 docs */
+#define MPNPU_PUB_SEC_INTR 0x3010090
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010094
+#define MPNPU_PUB_SCRATCH2 0x30100A0
+#define MPNPU_PUB_SCRATCH3 0x30100A4
+#define MPNPU_PUB_SCRATCH4 0x30100A8
+#define MPNPU_PUB_SCRATCH5 0x30100AC
+#define MPNPU_PUB_SCRATCH6 0x30100B0
+#define MPNPU_PUB_SCRATCH7 0x30100B4
+#define MPNPU_PUB_SCRATCH9 0x30100BC
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x30A0000
+#define MPNPU_SRAM_X2I_MAILBOX_1 0x30A2000
+#define MPNPU_SRAM_I2X_MAILBOX_15 0x30BF000
+
+#define MPNPU_APERTURE0_BASE 0x3000000
+#define MPNPU_APERTURE1_BASE 0x3080000
+#define MPNPU_APERTURE2_BASE 0x30C0000
+
+/* PCIe BAR Index for NPU1 */
+#define NPU1_REG_BAR_INDEX 0
+#define NPU1_MBOX_BAR_INDEX 4
+#define NPU1_PSP_BAR_INDEX 0
+#define NPU1_SMU_BAR_INDEX 0
+#define NPU1_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU1_REG_BAR_BASE MPNPU_APERTURE0_BASE
+#define NPU1_MBOX_BAR_BASE MPNPU_APERTURE2_BASE
+#define NPU1_PSP_BAR_BASE MPNPU_APERTURE0_BASE
+#define NPU1_SMU_BAR_BASE MPNPU_APERTURE0_BASE
+#define NPU1_SRAM_BAR_BASE MPNPU_APERTURE1_BASE
+
+const struct rt_config npu1_default_rt_cfg[] = {
+ { 2, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */
+ { 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 0 },
+};
+
+const struct dpm_clk_freq npu1_dpm_clk_table[] = {
+ {400, 800},
+ {600, 1024},
+ {600, 1024},
+ {600, 1024},
+ {600, 1024},
+ {720, 1309},
+ {720, 1309},
+ {847, 1600},
+ { 0 }
+};
+
+static const struct amdxdna_dev_priv npu1_dev_priv = {
+ .fw_path = "amdnpu/1502_00/npu.sbin",
+ .protocol_major = 0x5,
+ .protocol_minor = 0x7,
+ .rt_config = npu1_default_rt_cfg,
+ .dpm_clk_tbl = npu1_dpm_clk_table,
+ .col_align = COL_ALIGN_NONE,
+ .mbox_dev_addr = NPU1_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU1_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU1_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU1_SRAM, MPNPU_SRAM_I2X_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU1_PSP, MPNPU_PUB_SCRATCH2),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU1_PSP, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU1_PSP, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU1_PSP, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU1_PSP, MPNPU_PUB_SEC_INTR),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU1_PSP, MPNPU_PUB_SCRATCH2),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU1_PSP, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU1_SMU, MPNPU_PUB_SCRATCH5),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU1_SMU, MPNPU_PUB_SCRATCH7),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU1_SMU, MPNPU_PUB_PWRMGMT_INTR),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU1_SMU, MPNPU_PUB_SCRATCH6),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU1_SMU, MPNPU_PUB_SCRATCH7),
+ },
+ .hw_ops = {
+ .set_dpm = npu1_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu1_info = {
+ .reg_bar = NPU1_REG_BAR_INDEX,
+ .mbox_bar = NPU1_MBOX_BAR_INDEX,
+ .sram_bar = NPU1_SRAM_BAR_INDEX,
+ .psp_bar = NPU1_PSP_BAR_INDEX,
+ .smu_bar = NPU1_SMU_BAR_INDEX,
+ .first_col = 1,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu1",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu1_dev_priv,
+ .ops = &aie2_ops,
+};
diff --git a/drivers/accel/amdxdna/npu2_regs.c b/drivers/accel/amdxdna/npu2_regs.c
new file mode 100644
index 000000000000..a081cac75ee0
--- /dev/null
+++ b/drivers/accel/amdxdna/npu2_regs.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU2 */
+#define NPU2_REG_BAR_INDEX 0
+#define NPU2_MBOX_BAR_INDEX 0
+#define NPU2_PSP_BAR_INDEX 4
+#define NPU2_SMU_BAR_INDEX 5
+#define NPU2_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU2_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU2_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU2_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU2_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU2_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+static const struct amdxdna_dev_priv npu2_dev_priv = {
+ .fw_path = "amdnpu/17f0_00/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 0x6,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU2_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU2_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU2_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU2_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU2_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU2_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU2_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU2_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU2_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU2_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU2_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU2_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU2_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU2_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU2_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU2_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu2_info = {
+ .reg_bar = NPU2_REG_BAR_INDEX,
+ .mbox_bar = NPU2_MBOX_BAR_INDEX,
+ .sram_bar = NPU2_SRAM_BAR_INDEX,
+ .psp_bar = NPU2_PSP_BAR_INDEX,
+ .smu_bar = NPU2_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu2",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu2_dev_priv,
+ .ops = &aie2_ops, /* NPU2 can share NPU1's callback */
+};
diff --git a/drivers/accel/amdxdna/npu4_regs.c b/drivers/accel/amdxdna/npu4_regs.c
new file mode 100644
index 000000000000..9f2e33182ec6
--- /dev/null
+++ b/drivers/accel/amdxdna/npu4_regs.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU4 */
+#define NPU4_REG_BAR_INDEX 0
+#define NPU4_MBOX_BAR_INDEX 0
+#define NPU4_PSP_BAR_INDEX 4
+#define NPU4_SMU_BAR_INDEX 5
+#define NPU4_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU4_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU4_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU4_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU4_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU4_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+const struct rt_config npu4_default_rt_cfg[] = {
+ { 5, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */
+ { 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 2, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 3, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 4, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 0 },
+};
+
+const struct dpm_clk_freq npu4_dpm_clk_table[] = {
+ {396, 792},
+ {600, 1056},
+ {792, 1152},
+ {975, 1267},
+ {975, 1267},
+ {1056, 1408},
+ {1152, 1584},
+ {1267, 1800},
+ { 0 }
+};
+
+static const struct amdxdna_dev_priv npu4_dev_priv = {
+ .fw_path = "amdnpu/17f0_10/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 12,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU4_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU4_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU4_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU4_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU4_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU4_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU4_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU4_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU4_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU4_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU4_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU4_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU4_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU4_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU4_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU4_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu4_info = {
+ .reg_bar = NPU4_REG_BAR_INDEX,
+ .mbox_bar = NPU4_MBOX_BAR_INDEX,
+ .sram_bar = NPU4_SRAM_BAR_INDEX,
+ .psp_bar = NPU4_PSP_BAR_INDEX,
+ .smu_bar = NPU4_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu4",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu4_dev_priv,
+ .ops = &aie2_ops, /* NPU4 can share NPU1's callback */
+};
diff --git a/drivers/accel/amdxdna/npu5_regs.c b/drivers/accel/amdxdna/npu5_regs.c
new file mode 100644
index 000000000000..5f1cf83461c4
--- /dev/null
+++ b/drivers/accel/amdxdna/npu5_regs.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU5 */
+#define NPU5_REG_BAR_INDEX 0
+#define NPU5_MBOX_BAR_INDEX 0
+#define NPU5_PSP_BAR_INDEX 4
+#define NPU5_SMU_BAR_INDEX 5
+#define NPU5_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU5_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU5_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU5_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU5_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU5_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+static const struct amdxdna_dev_priv npu5_dev_priv = {
+ .fw_path = "amdnpu/17f0_11/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 12,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU5_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU5_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU5_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU5_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU5_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU5_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU5_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU5_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU5_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU5_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU5_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU5_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU5_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU5_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU5_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU5_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+};
+
+const struct amdxdna_dev_info dev_npu5_info = {
+ .reg_bar = NPU5_REG_BAR_INDEX,
+ .mbox_bar = NPU5_MBOX_BAR_INDEX,
+ .sram_bar = NPU5_SRAM_BAR_INDEX,
+ .psp_bar = NPU5_PSP_BAR_INDEX,
+ .smu_bar = NPU5_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu5",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu5_dev_priv,
+ .ops = &aie2_ops,
+};
diff --git a/drivers/accel/amdxdna/npu6_regs.c b/drivers/accel/amdxdna/npu6_regs.c
new file mode 100644
index 000000000000..94a7005685a7
--- /dev/null
+++ b/drivers/accel/amdxdna/npu6_regs.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+#include <linux/sizes.h>
+
+#include "aie2_pci.h"
+#include "amdxdna_mailbox.h"
+#include "amdxdna_pci_drv.h"
+
+/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
+#define MPNPU_PUB_SEC_INTR 0x3010060
+#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
+#define MPNPU_PUB_SCRATCH0 0x301006C
+#define MPNPU_PUB_SCRATCH1 0x3010070
+#define MPNPU_PUB_SCRATCH2 0x3010074
+#define MPNPU_PUB_SCRATCH3 0x3010078
+#define MPNPU_PUB_SCRATCH4 0x301007C
+#define MPNPU_PUB_SCRATCH5 0x3010080
+#define MPNPU_PUB_SCRATCH6 0x3010084
+#define MPNPU_PUB_SCRATCH7 0x3010088
+#define MPNPU_PUB_SCRATCH8 0x301008C
+#define MPNPU_PUB_SCRATCH9 0x3010090
+#define MPNPU_PUB_SCRATCH10 0x3010094
+#define MPNPU_PUB_SCRATCH11 0x3010098
+#define MPNPU_PUB_SCRATCH12 0x301009C
+#define MPNPU_PUB_SCRATCH13 0x30100A0
+#define MPNPU_PUB_SCRATCH14 0x30100A4
+#define MPNPU_PUB_SCRATCH15 0x30100A8
+#define MP0_C2PMSG_73 0x3810A24
+#define MP0_C2PMSG_123 0x3810AEC
+
+#define MP1_C2PMSG_0 0x3B10900
+#define MP1_C2PMSG_60 0x3B109F0
+#define MP1_C2PMSG_61 0x3B109F4
+
+#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
+#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
+#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
+#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
+
+#define MMNPU_APERTURE0_BASE 0x3000000
+#define MMNPU_APERTURE1_BASE 0x3600000
+#define MMNPU_APERTURE3_BASE 0x3810000
+#define MMNPU_APERTURE4_BASE 0x3B10000
+
+/* PCIe BAR Index for NPU6 */
+#define NPU6_REG_BAR_INDEX 0
+#define NPU6_MBOX_BAR_INDEX 0
+#define NPU6_PSP_BAR_INDEX 4
+#define NPU6_SMU_BAR_INDEX 5
+#define NPU6_SRAM_BAR_INDEX 2
+/* Associated BARs and Apertures */
+#define NPU6_REG_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU6_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
+#define NPU6_PSP_BAR_BASE MMNPU_APERTURE3_BASE
+#define NPU6_SMU_BAR_BASE MMNPU_APERTURE4_BASE
+#define NPU6_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
+
+static const struct amdxdna_dev_priv npu6_dev_priv = {
+ .fw_path = "amdnpu/17f0_10/npu.sbin",
+ .protocol_major = 0x6,
+ .protocol_minor = 12,
+ .rt_config = npu4_default_rt_cfg,
+ .dpm_clk_tbl = npu4_dpm_clk_table,
+ .col_align = COL_ALIGN_NATURE,
+ .mbox_dev_addr = NPU6_MBOX_BAR_BASE,
+ .mbox_size = 0, /* Use BAR size */
+ .sram_dev_addr = NPU6_SRAM_BAR_BASE,
+ .sram_offs = {
+ DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
+ DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
+ },
+ .psp_regs_off = {
+ DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU6_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU6_REG, MPNPU_PUB_SCRATCH3),
+ DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU6_REG, MPNPU_PUB_SCRATCH4),
+ DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU6_REG, MPNPU_PUB_SCRATCH9),
+ DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU6_PSP, MP0_C2PMSG_73),
+ DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU6_PSP, MP0_C2PMSG_123),
+ DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU6_REG, MPNPU_PUB_SCRATCH3),
+ },
+ .smu_regs_off = {
+ DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU6_SMU, MP1_C2PMSG_0),
+ DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU6_SMU, MP1_C2PMSG_60),
+ DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU6_SMU, MMNPU_APERTURE4_BASE),
+ DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU6_SMU, MP1_C2PMSG_61),
+ DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU6_SMU, MP1_C2PMSG_60),
+ },
+ .hw_ops = {
+ .set_dpm = npu4_set_dpm,
+ },
+
+};
+
+const struct amdxdna_dev_info dev_npu6_info = {
+ .reg_bar = NPU6_REG_BAR_INDEX,
+ .mbox_bar = NPU6_MBOX_BAR_INDEX,
+ .sram_bar = NPU6_SRAM_BAR_INDEX,
+ .psp_bar = NPU6_PSP_BAR_INDEX,
+ .smu_bar = NPU6_SMU_BAR_INDEX,
+ .first_col = 0,
+ .dev_mem_buf_shift = 15, /* 32 KiB aligned */
+ .dev_mem_base = AIE2_DEVM_BASE,
+ .dev_mem_size = AIE2_DEVM_SIZE,
+ .vbnv = "RyzenAI-npu6",
+ .device_type = AMDXDNA_DEV_TYPE_KMQ,
+ .dev_priv = &npu6_dev_priv,
+ .ops = &aie2_ops,
+};
diff --git a/drivers/accel/habanalabs/common/context.c b/drivers/accel/habanalabs/common/context.c
index b83141f58319..9f212b17611a 100644
--- a/drivers/accel/habanalabs/common/context.c
+++ b/drivers/accel/habanalabs/common/context.c
@@ -199,7 +199,6 @@ out_err:
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
{
- char task_comm[TASK_COMM_LEN];
int rc = 0, i;
ctx->hdev = hdev;
@@ -272,7 +271,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
mutex_init(&ctx->ts_reg_lock);
dev_dbg(hdev->dev, "create user context, comm=\"%s\", asid=%u\n",
- get_task_comm(task_comm, current), ctx->asid);
+ current->comm, ctx->asid);
}
return 0;
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index e0cf3b4343bb..30277ae410d4 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -817,7 +817,7 @@ static void device_hard_reset_pending(struct work_struct *work)
}
queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work,
- msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
+ secs_to_jiffies(HL_PENDING_RESET_PER_SEC));
}
}
diff --git a/drivers/accel/habanalabs/common/habanalabs_drv.c b/drivers/accel/habanalabs/common/habanalabs_drv.c
index 708dfd10f39c..596c52e8aa26 100644
--- a/drivers/accel/habanalabs/common/habanalabs_drv.c
+++ b/drivers/accel/habanalabs/common/habanalabs_drv.c
@@ -101,7 +101,6 @@ static const struct drm_driver hl_driver = {
.major = LINUX_VERSION_MAJOR,
.minor = LINUX_VERSION_PATCHLEVEL,
.patchlevel = LINUX_VERSION_SUBLEVEL,
- .date = "20190505",
.fops = &hl_fops,
.open = hl_device_open,
@@ -362,8 +361,7 @@ static void fixup_device_params_per_asic(struct hl_device *hdev, int timeout)
* a different default timeout for Gaudi
*/
if (timeout == HL_DEFAULT_TIMEOUT_LOCKED)
- hdev->timeout_jiffies = msecs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED *
- MSEC_PER_SEC);
+ hdev->timeout_jiffies = secs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED);
hdev->reset_upon_device_release = 0;
break;
diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
index 1dd6e23172ca..8729a0c57d78 100644
--- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
@@ -1279,13 +1279,10 @@ static long _hl_ioctl(struct hl_fpriv *hpriv, unsigned int cmd, unsigned long ar
retcode = -EFAULT;
out_err:
- if (retcode) {
- char task_comm[TASK_COMM_LEN];
-
+ if (retcode)
dev_dbg_ratelimited(dev,
"error in ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n",
- task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr);
- }
+ task_pid_nr(current), current->comm, cmd, nr);
if (kdata != stack_kdata)
kfree(kdata);
@@ -1308,11 +1305,9 @@ long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
if (nr == _IOC_NR(DRM_IOCTL_HL_INFO)) {
ioctl = &hl_ioctls_control[nr - HL_COMMAND_START];
} else {
- char task_comm[TASK_COMM_LEN];
-
dev_dbg_ratelimited(hdev->dev_ctrl,
"invalid ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n",
- task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr);
+ task_pid_nr(current), current->comm, cmd, nr);
return -ENOTTY;
}
diff --git a/drivers/accel/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c
index 3348ad12c237..601fdbe70179 100644
--- a/drivers/accel/habanalabs/common/memory.c
+++ b/drivers/accel/habanalabs/common/memory.c
@@ -14,7 +14,7 @@
#include <linux/vmalloc.h>
#include <linux/pci-p2pdma.h>
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
#define HL_MMU_DEBUG 0
diff --git a/drivers/accel/ivpu/Kconfig b/drivers/accel/ivpu/Kconfig
index 682c53245286..9e055b5ce03d 100644
--- a/drivers/accel/ivpu/Kconfig
+++ b/drivers/accel/ivpu/Kconfig
@@ -8,6 +8,7 @@ config DRM_ACCEL_IVPU
select FW_LOADER
select DRM_GEM_SHMEM_HELPER
select GENERIC_ALLOCATOR
+ select WANT_DEV_COREDUMP
help
Choose this option if you have a system with an 14th generation
Intel CPU (Meteor Lake) or newer. Intel NPU (formerly called Intel VPU)
@@ -15,3 +16,12 @@ config DRM_ACCEL_IVPU
and Deep Learning applications.
If "M" is selected, the module will be called intel_vpu.
+
+config DRM_ACCEL_IVPU_DEBUG
+ bool "Intel NPU debug mode"
+ depends on DRM_ACCEL_IVPU
+ help
+ Choose this option to enable additional
+ debug features for the Intel NPU driver:
+ - Always print debug messages regardless of dyndbg config,
+ - Enable unsafe module params.
diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile
index ebd682a42eb1..1029e0bab061 100644
--- a/drivers/accel/ivpu/Makefile
+++ b/drivers/accel/ivpu/Makefile
@@ -16,8 +16,14 @@ intel_vpu-y := \
ivpu_mmu_context.o \
ivpu_ms.o \
ivpu_pm.o \
- ivpu_sysfs.o
+ ivpu_sysfs.o \
+ ivpu_trace_points.o
intel_vpu-$(CONFIG_DEBUG_FS) += ivpu_debugfs.o
+intel_vpu-$(CONFIG_DEV_COREDUMP) += ivpu_coredump.o
obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o
+
+subdir-ccflags-$(CONFIG_DRM_ACCEL_IVPU_DEBUG) += -DDEBUG
+
+CFLAGS_ivpu_trace_points.o = -I$(src)
diff --git a/drivers/accel/ivpu/ivpu_coredump.c b/drivers/accel/ivpu/ivpu_coredump.c
new file mode 100644
index 000000000000..16ad0c30818c
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_coredump.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2024 Intel Corporation
+ */
+
+#include <linux/devcoredump.h>
+#include <linux/firmware.h>
+
+#include "ivpu_coredump.h"
+#include "ivpu_fw.h"
+#include "ivpu_gem.h"
+#include "vpu_boot_api.h"
+
+#define CRASH_DUMP_HEADER "Intel NPU crash dump"
+#define CRASH_DUMP_HEADERS_SIZE SZ_4K
+
+void ivpu_dev_coredump(struct ivpu_device *vdev)
+{
+ struct drm_print_iterator pi = {};
+ struct drm_printer p;
+ size_t coredump_size;
+ char *coredump;
+
+ coredump_size = CRASH_DUMP_HEADERS_SIZE + FW_VERSION_HEADER_SIZE +
+ ivpu_bo_size(vdev->fw->mem_log_crit) + ivpu_bo_size(vdev->fw->mem_log_verb);
+ coredump = vmalloc(coredump_size);
+ if (!coredump)
+ return;
+
+ pi.data = coredump;
+ pi.remain = coredump_size;
+ p = drm_coredump_printer(&pi);
+
+ drm_printf(&p, "%s\n", CRASH_DUMP_HEADER);
+ drm_printf(&p, "FW version: %s\n", vdev->fw->version);
+ ivpu_fw_log_print(vdev, false, &p);
+
+ dev_coredumpv(vdev->drm.dev, coredump, pi.offset, GFP_KERNEL);
+}
diff --git a/drivers/accel/ivpu/ivpu_coredump.h b/drivers/accel/ivpu/ivpu_coredump.h
new file mode 100644
index 000000000000..8efb09d02441
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_coredump.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2024 Intel Corporation
+ */
+
+#ifndef __IVPU_COREDUMP_H__
+#define __IVPU_COREDUMP_H__
+
+#include <drm/drm_print.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_fw_log.h"
+
+#ifdef CONFIG_DEV_COREDUMP
+void ivpu_dev_coredump(struct ivpu_device *vdev);
+#else
+static inline void ivpu_dev_coredump(struct ivpu_device *vdev)
+{
+ struct drm_printer p = drm_info_printer(vdev->drm.dev);
+
+ ivpu_fw_log_print(vdev, false, &p);
+}
+#endif
+
+#endif /* __IVPU_COREDUMP_H__ */
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index 6f86f8df30db..8180b95ed69d 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -45,6 +45,14 @@ static int fw_name_show(struct seq_file *s, void *v)
return 0;
}
+static int fw_version_show(struct seq_file *s, void *v)
+{
+ struct ivpu_device *vdev = seq_to_ivpu(s);
+
+ seq_printf(s, "%s\n", vdev->fw->version);
+ return 0;
+}
+
static int fw_trace_capability_show(struct seq_file *s, void *v)
{
struct ivpu_device *vdev = seq_to_ivpu(s);
@@ -108,42 +116,43 @@ static int reset_pending_show(struct seq_file *s, void *v)
return 0;
}
+static int firewall_irq_counter_show(struct seq_file *s, void *v)
+{
+ struct ivpu_device *vdev = seq_to_ivpu(s);
+
+ seq_printf(s, "%d\n", atomic_read(&vdev->hw->firewall_irq_counter));
+ return 0;
+}
+
static const struct drm_debugfs_info vdev_debugfs_list[] = {
{"bo_list", bo_list_show, 0},
{"fw_name", fw_name_show, 0},
+ {"fw_version", fw_version_show, 0},
{"fw_trace_capability", fw_trace_capability_show, 0},
{"fw_trace_config", fw_trace_config_show, 0},
{"last_bootmode", last_bootmode_show, 0},
{"reset_counter", reset_counter_show, 0},
{"reset_pending", reset_pending_show, 0},
+ {"firewall_irq_counter", firewall_irq_counter_show, 0},
};
-static ssize_t
-dvfs_mode_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+static int dvfs_mode_get(void *data, u64 *dvfs_mode)
{
- struct ivpu_device *vdev = file->private_data;
- struct ivpu_fw_info *fw = vdev->fw;
- u32 dvfs_mode;
- int ret;
-
- ret = kstrtou32_from_user(user_buf, size, 0, &dvfs_mode);
- if (ret < 0)
- return ret;
+ struct ivpu_device *vdev = (struct ivpu_device *)data;
- fw->dvfs_mode = dvfs_mode;
+ *dvfs_mode = vdev->fw->dvfs_mode;
+ return 0;
+}
- ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
- if (ret)
- return ret;
+static int dvfs_mode_set(void *data, u64 dvfs_mode)
+{
+ struct ivpu_device *vdev = (struct ivpu_device *)data;
- return size;
+ vdev->fw->dvfs_mode = (u32)dvfs_mode;
+ return pci_try_reset_function(to_pci_dev(vdev->drm.dev));
}
-static const struct file_operations dvfs_mode_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .write = dvfs_mode_fops_write,
-};
+DEFINE_DEBUGFS_ATTRIBUTE(dvfs_mode_fops, dvfs_mode_get, dvfs_mode_set, "%llu\n");
static ssize_t
fw_dyndbg_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
@@ -192,7 +201,7 @@ fw_log_fops_write(struct file *file, const char __user *user_buf, size_t size, l
if (!size)
return -EINVAL;
- ivpu_fw_log_clear(vdev);
+ ivpu_fw_log_mark_read(vdev);
return size;
}
@@ -337,49 +346,23 @@ static const struct file_operations ivpu_force_recovery_fops = {
.write = ivpu_force_recovery_fn,
};
-static ssize_t
-ivpu_reset_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+static int ivpu_reset_engine_fn(void *data, u64 val)
{
- struct ivpu_device *vdev = file->private_data;
+ struct ivpu_device *vdev = (struct ivpu_device *)data;
- if (!size)
- return -EINVAL;
-
- if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
- return -ENODEV;
- if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COPY))
- return -ENODEV;
-
- return size;
+ return ivpu_jsm_reset_engine(vdev, (u32)val);
}
-static const struct file_operations ivpu_reset_engine_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .write = ivpu_reset_engine_fn,
-};
+DEFINE_DEBUGFS_ATTRIBUTE(ivpu_reset_engine_fops, NULL, ivpu_reset_engine_fn, "0x%02llx\n");
-static ssize_t
-ivpu_resume_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
+static int ivpu_resume_engine_fn(void *data, u64 val)
{
- struct ivpu_device *vdev = file->private_data;
-
- if (!size)
- return -EINVAL;
-
- if (ivpu_jsm_hws_resume_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
- return -ENODEV;
- if (ivpu_jsm_hws_resume_engine(vdev, DRM_IVPU_ENGINE_COPY))
- return -ENODEV;
+ struct ivpu_device *vdev = (struct ivpu_device *)data;
- return size;
+ return ivpu_jsm_hws_resume_engine(vdev, (u32)val);
}
-static const struct file_operations ivpu_resume_engine_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .write = ivpu_resume_engine_fn,
-};
+DEFINE_DEBUGFS_ATTRIBUTE(ivpu_resume_engine_fops, NULL, ivpu_resume_engine_fn, "0x%02llx\n");
static int dct_active_get(void *data, u64 *active_percent)
{
@@ -423,7 +406,7 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
debugfs_create_file("force_recovery", 0200, debugfs_root, vdev,
&ivpu_force_recovery_fops);
- debugfs_create_file("dvfs_mode", 0200, debugfs_root, vdev,
+ debugfs_create_file("dvfs_mode", 0644, debugfs_root, vdev,
&dvfs_mode_fops);
debugfs_create_file("fw_dyndbg", 0200, debugfs_root, vdev,
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index c91400ecf926..38cf1c342c72 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
+#include <generated/utsrelease.h>
#include <drm/drm_accel.h>
#include <drm/drm_file.h>
@@ -14,7 +15,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_prime.h>
-#include "vpu_boot_api.h"
+#include "ivpu_coredump.h"
#include "ivpu_debugfs.h"
#include "ivpu_drv.h"
#include "ivpu_fw.h"
@@ -29,10 +30,10 @@
#include "ivpu_ms.h"
#include "ivpu_pm.h"
#include "ivpu_sysfs.h"
+#include "vpu_boot_api.h"
#ifndef DRIVER_VERSION_STR
-#define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \
- __stringify(DRM_IVPU_DRIVER_MINOR) "."
+#define DRIVER_VERSION_STR "1.0.0 " UTS_RELEASE
#endif
static struct lock_class_key submitted_jobs_xa_lock_class_key;
@@ -42,8 +43,10 @@ module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
int ivpu_test_mode;
+#if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros.");
+#endif
u8 ivpu_pll_min_ratio;
module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
@@ -53,9 +56,9 @@ u8 ivpu_pll_max_ratio = U8_MAX;
module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
-int ivpu_sched_mode;
+int ivpu_sched_mode = IVPU_SCHED_MODE_AUTO;
module_param_named(sched_mode, ivpu_sched_mode, int, 0444);
-MODULE_PARM_DESC(sched_mode, "Scheduler mode: 0 - Default scheduler, 1 - Force HW scheduler");
+MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler, 1 - Use HW scheduler");
bool ivpu_disable_mmu_cont_pages;
module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444);
@@ -85,7 +88,7 @@ static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *fi
ivpu_cmdq_release_all_locked(file_priv);
ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
- ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
+ ivpu_mmu_context_fini(vdev, &file_priv->ctx);
file_priv->bound = false;
drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
}
@@ -103,6 +106,8 @@ static void file_priv_release(struct kref *ref)
pm_runtime_get_sync(vdev->drm.dev);
mutex_lock(&vdev->context_list_lock);
file_priv_unbind(vdev, file_priv);
+ drm_WARN_ON(&vdev->drm, !xa_empty(&file_priv->cmdq_xa));
+ xa_destroy(&file_priv->cmdq_xa);
mutex_unlock(&vdev->context_list_lock);
pm_runtime_put_autosuspend(vdev->drm.dev);
@@ -116,8 +121,6 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link)
struct ivpu_file_priv *file_priv = *link;
struct ivpu_device *vdev = file_priv->vdev;
- drm_WARN_ON(&vdev->drm, !file_priv);
-
ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n",
file_priv->ctx.id, kref_read(&file_priv->ref));
@@ -255,9 +258,14 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
goto err_unlock;
}
- ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
- if (ret)
- goto err_xa_erase;
+ ivpu_mmu_context_init(vdev, &file_priv->ctx, ctx_id);
+
+ file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
+ file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK;
+
+ xa_init_flags(&file_priv->cmdq_xa, XA_FLAGS_ALLOC1);
+ file_priv->cmdq_limit.min = IVPU_CMDQ_MIN_ID;
+ file_priv->cmdq_limit.max = IVPU_CMDQ_MAX_ID;
mutex_unlock(&vdev->context_list_lock);
drm_dev_exit(idx);
@@ -269,8 +277,6 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
return 0;
-err_xa_erase:
- xa_erase_irq(&vdev->context_xa, ctx_id);
err_unlock:
mutex_unlock(&vdev->context_list_lock);
mutex_destroy(&file_priv->ms_lock);
@@ -346,7 +352,7 @@ static int ivpu_hw_sched_init(struct ivpu_device *vdev)
{
int ret = 0;
- if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_jsm_hws_setup_priority_bands(vdev);
if (ret) {
ivpu_err(vdev, "Failed to enable hw scheduler: %d", ret);
@@ -380,10 +386,7 @@ int ivpu_boot(struct ivpu_device *vdev)
ret = ivpu_wait_for_ready(vdev);
if (ret) {
ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
- ivpu_hw_diagnose_failure(vdev);
- ivpu_mmu_evtq_dump(vdev);
- ivpu_fw_log_dump(vdev);
- return ret;
+ goto err_diagnose_failure;
}
ivpu_hw_irq_clear(vdev);
@@ -394,12 +397,24 @@ int ivpu_boot(struct ivpu_device *vdev)
if (ivpu_fw_is_cold_boot(vdev)) {
ret = ivpu_pm_dct_init(vdev);
if (ret)
- return ret;
+ goto err_disable_ipc;
- return ivpu_hw_sched_init(vdev);
+ ret = ivpu_hw_sched_init(vdev);
+ if (ret)
+ goto err_disable_ipc;
}
return 0;
+
+err_disable_ipc:
+ ivpu_ipc_disable(vdev);
+ ivpu_hw_irq_disable(vdev);
+ disable_irq(vdev->irq);
+err_diagnose_failure:
+ ivpu_hw_diagnose_failure(vdev);
+ ivpu_mmu_evtq_dump(vdev);
+ ivpu_dev_coredump(vdev);
+ return ret;
}
void ivpu_prepare_for_reset(struct ivpu_device *vdev)
@@ -446,9 +461,8 @@ static const struct drm_driver driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRM_IVPU_DRIVER_MAJOR,
- .minor = DRM_IVPU_DRIVER_MINOR,
+
+ .major = 1,
};
static void ivpu_context_abort_invalid(struct ivpu_device *vdev)
@@ -606,6 +620,9 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
INIT_LIST_HEAD(&vdev->bo_list);
+ vdev->db_limit.min = IVPU_MIN_DB;
+ vdev->db_limit.max = IVPU_MAX_DB;
+
ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
if (ret)
goto err_xa_destroy;
@@ -632,9 +649,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
if (ret)
goto err_shutdown;
- ret = ivpu_mmu_global_context_init(vdev);
- if (ret)
- goto err_shutdown;
+ ivpu_mmu_global_context_init(vdev);
ret = ivpu_mmu_init(vdev);
if (ret)
@@ -722,6 +737,7 @@ static struct pci_device_id ivpu_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) },
{ }
};
MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 63f13b697eed..3fdff3f6cffd 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -21,11 +21,11 @@
#define DRIVER_NAME "intel_vpu"
#define DRIVER_DESC "Driver for Intel NPU (Neural Processing Unit)"
-#define DRIVER_DATE "20230117"
-#define PCI_DEVICE_ID_MTL 0x7d1d
-#define PCI_DEVICE_ID_ARL 0xad1d
-#define PCI_DEVICE_ID_LNL 0x643e
+#define PCI_DEVICE_ID_MTL 0x7d1d
+#define PCI_DEVICE_ID_ARL 0xad1d
+#define PCI_DEVICE_ID_LNL 0x643e
+#define PCI_DEVICE_ID_PTL_P 0xb03e
#define IVPU_HW_IP_37XX 37
#define IVPU_HW_IP_40XX 40
@@ -46,17 +46,22 @@
#define IVPU_MIN_DB 1
#define IVPU_MAX_DB 255
-#define IVPU_NUM_ENGINES 2
+#define IVPU_JOB_ID_JOB_MASK GENMASK(7, 0)
+#define IVPU_JOB_ID_CONTEXT_MASK GENMASK(31, 8)
+
#define IVPU_NUM_PRIORITIES 4
-#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_ENGINES * IVPU_NUM_PRIORITIES)
+#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_PRIORITIES)
-#define IVPU_CMDQ_INDEX(engine, priority) ((engine) * IVPU_NUM_PRIORITIES + (priority))
+#define IVPU_CMDQ_MIN_ID 1
+#define IVPU_CMDQ_MAX_ID 255
#define IVPU_PLATFORM_SILICON 0
#define IVPU_PLATFORM_SIMICS 2
#define IVPU_PLATFORM_FPGA 3
#define IVPU_PLATFORM_INVALID 8
+#define IVPU_SCHED_MODE_AUTO -1
+
#define IVPU_DBG_REG BIT(0)
#define IVPU_DBG_IRQ BIT(1)
#define IVPU_DBG_MMU BIT(2)
@@ -134,6 +139,8 @@ struct ivpu_device {
struct xa_limit context_xa_limit;
struct xarray db_xa;
+ struct xa_limit db_limit;
+ u32 db_next;
struct mutex bo_list_lock; /* Protects bo_list */
struct list_head bo_list;
@@ -152,6 +159,7 @@ struct ivpu_device {
int tdr;
int autosuspend;
int d0i3_entry_msg;
+ int state_dump_msg;
} timeout;
};
@@ -163,11 +171,15 @@ struct ivpu_file_priv {
struct kref ref;
struct ivpu_device *vdev;
struct mutex lock; /* Protects cmdq */
- struct ivpu_cmdq *cmdq[IVPU_NUM_CMDQS_PER_CTX];
+ struct xarray cmdq_xa;
struct ivpu_mmu_context ctx;
struct mutex ms_lock; /* Protects ms_instance_list, ms_info_bo */
struct list_head ms_instance_list;
struct ivpu_bo *ms_info_bo;
+ struct xa_limit job_limit;
+ u32 job_id_next;
+ struct xa_limit cmdq_limit;
+ u32 cmdq_id_next;
bool has_mmu_faults;
bool bound;
bool aborted;
@@ -185,9 +197,9 @@ extern bool ivpu_force_snoop;
#define IVPU_TEST_MODE_NULL_SUBMISSION BIT(2)
#define IVPU_TEST_MODE_D0I3_MSG_DISABLE BIT(4)
#define IVPU_TEST_MODE_D0I3_MSG_ENABLE BIT(5)
-#define IVPU_TEST_MODE_PREEMPTION_DISABLE BIT(6)
-#define IVPU_TEST_MODE_HWS_EXTRA_EVENTS BIT(7)
+#define IVPU_TEST_MODE_MIP_DISABLE BIT(6)
#define IVPU_TEST_MODE_DISABLE_TIMEOUTS BIT(8)
+#define IVPU_TEST_MODE_TURBO BIT(9)
extern int ivpu_test_mode;
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
@@ -215,6 +227,8 @@ static inline int ivpu_hw_ip_gen(struct ivpu_device *vdev)
return IVPU_HW_IP_37XX;
case PCI_DEVICE_ID_LNL:
return IVPU_HW_IP_40XX;
+ case PCI_DEVICE_ID_PTL_P:
+ return IVPU_HW_IP_50XX;
default:
dump_stack();
ivpu_err(vdev, "Unknown NPU IP generation\n");
@@ -229,6 +243,7 @@ static inline int ivpu_hw_btrs_gen(struct ivpu_device *vdev)
case PCI_DEVICE_ID_ARL:
return IVPU_HW_BTRS_MTL;
case PCI_DEVICE_ID_LNL:
+ case PCI_DEVICE_ID_PTL_P:
return IVPU_HW_BTRS_LNL;
default:
dump_stack();
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index ede6165e09d9..6037ec0b3096 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -25,7 +25,6 @@
#define FW_SHAVE_NN_MAX_SIZE SZ_2M
#define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START)
#define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE)
-#define FW_VERSION_HEADER_SIZE SZ_4K
#define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE)
#define WATCHDOG_MSS_REDIRECT 32
@@ -47,8 +46,10 @@
#define IVPU_FOCUS_PRESENT_TIMER_MS 1000
static char *ivpu_firmware;
+#if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
MODULE_PARM_DESC(firmware, "NPU firmware binary in /lib/firmware/..");
+#endif
static struct {
int gen;
@@ -58,11 +59,14 @@ static struct {
{ IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
{ IVPU_HW_IP_40XX, "vpu_40xx.bin" },
{ IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
+ { IVPU_HW_IP_50XX, "vpu_50xx.bin" },
+ { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v0.0.bin" },
};
/* Production fw_names from the table above */
MODULE_FIRMWARE("intel/vpu/vpu_37xx_v0.0.bin");
MODULE_FIRMWARE("intel/vpu/vpu_40xx_v0.0.bin");
+MODULE_FIRMWARE("intel/vpu/vpu_50xx_v0.0.bin");
static int ivpu_fw_request(struct ivpu_device *vdev)
{
@@ -135,6 +139,15 @@ static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range
return true;
}
+static u32
+ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr)
+{
+ if (ivpu_sched_mode != IVPU_SCHED_MODE_AUTO)
+ return ivpu_sched_mode;
+
+ return VPU_SCHEDULING_MODE_OS;
+}
+
static int ivpu_fw_parse(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
@@ -191,8 +204,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
fw_hdr->header_version, fw_hdr->image_format);
- ivpu_info(vdev, "Firmware: %s, version: %s", fw->name,
- (const char *)fw_hdr + VPU_FW_HEADER_SIZE);
+ if (!scnprintf(fw->version, sizeof(fw->version), "%s", fw->file->data + VPU_FW_HEADER_SIZE))
+ ivpu_warn(vdev, "Missing firmware version\n");
+
+ ivpu_info(vdev, "Firmware: %s, version: %s\n", fw->name, fw->version);
if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, BOOT, 3))
return -EINVAL;
@@ -208,14 +223,16 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->cold_boot_entry_point = fw_hdr->entry_point;
fw->entry_point = fw->cold_boot_entry_point;
- fw->trace_level = min_t(u32, ivpu_log_level, IVPU_FW_LOG_FATAL);
+ fw->trace_level = min_t(u32, ivpu_fw_log_level, IVPU_FW_LOG_FATAL);
fw->trace_destination_mask = VPU_TRACE_DESTINATION_VERBOSE_TRACING;
fw->trace_hw_component_mask = -1;
fw->dvfs_mode = 0;
+ fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr);
fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
+ ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS");
if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address,
fw_hdr->ro_section_size,
@@ -311,7 +328,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
goto err_free_fw_mem;
}
- if (ivpu_log_level <= IVPU_FW_LOG_INFO)
+ if (ivpu_fw_log_level <= IVPU_FW_LOG_INFO)
log_verb_size = IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE;
else
log_verb_size = IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE;
@@ -567,8 +584,10 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ivpu_bo_size(ipc_mem_rx) / 2;
boot_params->ipc_payload_area_size = ivpu_bo_size(ipc_mem_rx) / 2;
- boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
- boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
+ if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
+ boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
+ boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
+ }
/* Allow configuration for L2C_PAGE_TABLE with boot param value */
boot_params->autoconfig = 1;
@@ -604,8 +623,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->punit_telemetry_sram_base = ivpu_hw_telemetry_offset_get(vdev);
boot_params->punit_telemetry_sram_size = ivpu_hw_telemetry_size_get(vdev);
boot_params->vpu_telemetry_enable = ivpu_hw_telemetry_enable_get(vdev);
- boot_params->vpu_scheduling_mode = vdev->hw->sched_mode;
- if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW)
+ boot_params->vpu_scheduling_mode = vdev->fw->sched_mode;
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
boot_params->vpu_focus_present_timer_ms = IVPU_FOCUS_PRESENT_TIMER_MS;
boot_params->dvfs_mode = vdev->fw->dvfs_mode;
if (!IVPU_WA(disable_d0i3_msg))
diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
index 40d9d17be3f5..1d0b2bd9d65c 100644
--- a/drivers/accel/ivpu/ivpu_fw.h
+++ b/drivers/accel/ivpu/ivpu_fw.h
@@ -1,11 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_FW_H__
#define __IVPU_FW_H__
+#include "vpu_jsm_api.h"
+
+#define FW_VERSION_HEADER_SIZE SZ_4K
+#define FW_VERSION_STR_SIZE SZ_256
+
struct ivpu_device;
struct ivpu_bo;
struct vpu_boot_params;
@@ -13,6 +18,7 @@ struct vpu_boot_params;
struct ivpu_fw_info {
const struct firmware *file;
const char *name;
+ char version[FW_VERSION_STR_SIZE];
struct ivpu_bo *mem;
struct ivpu_bo *mem_shave_nn;
struct ivpu_bo *mem_log_crit;
@@ -32,6 +38,7 @@ struct ivpu_fw_info {
u32 secondary_preempt_buf_size;
u64 read_only_addr;
u32 read_only_size;
+ u32 sched_mode;
};
int ivpu_fw_init(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_fw_log.c b/drivers/accel/ivpu/ivpu_fw_log.c
index ef0adb5e0fbe..337c906b0210 100644
--- a/drivers/accel/ivpu/ivpu_fw_log.c
+++ b/drivers/accel/ivpu/ivpu_fw_log.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/ctype.h>
@@ -15,19 +15,19 @@
#include "ivpu_fw_log.h"
#include "ivpu_gem.h"
-#define IVPU_FW_LOG_LINE_LENGTH 256
+#define IVPU_FW_LOG_LINE_LENGTH 256
-unsigned int ivpu_log_level = IVPU_FW_LOG_ERROR;
-module_param(ivpu_log_level, uint, 0444);
-MODULE_PARM_DESC(ivpu_log_level,
- "NPU firmware default trace level: debug=" __stringify(IVPU_FW_LOG_DEBUG)
+unsigned int ivpu_fw_log_level = IVPU_FW_LOG_ERROR;
+module_param_named(fw_log_level, ivpu_fw_log_level, uint, 0444);
+MODULE_PARM_DESC(fw_log_level,
+ "NPU firmware default log level: debug=" __stringify(IVPU_FW_LOG_DEBUG)
" info=" __stringify(IVPU_FW_LOG_INFO)
" warn=" __stringify(IVPU_FW_LOG_WARN)
" error=" __stringify(IVPU_FW_LOG_ERROR)
" fatal=" __stringify(IVPU_FW_LOG_FATAL));
-static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
- struct vpu_tracing_buffer_header **log_header)
+static int fw_log_from_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
+ struct vpu_tracing_buffer_header **out_log)
{
struct vpu_tracing_buffer_header *log;
@@ -48,7 +48,7 @@ static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
return -EINVAL;
}
- *log_header = log;
+ *out_log = log;
*offset += log->size;
ivpu_dbg(vdev, FW_BOOT,
@@ -59,7 +59,7 @@ static int fw_log_ptr(struct ivpu_device *vdev, struct ivpu_bo *bo, u32 *offset,
return 0;
}
-static void buffer_print(char *buffer, u32 size, struct drm_printer *p)
+static void fw_log_print_lines(char *buffer, u32 size, struct drm_printer *p)
{
char line[IVPU_FW_LOG_LINE_LENGTH];
u32 index = 0;
@@ -87,56 +87,89 @@ static void buffer_print(char *buffer, u32 size, struct drm_printer *p)
}
line[index] = 0;
if (index != 0)
- drm_printf(p, "%s\n", line);
+ drm_printf(p, "%s", line);
}
-static void fw_log_print_buffer(struct ivpu_device *vdev, struct vpu_tracing_buffer_header *log,
- const char *prefix, bool only_new_msgs, struct drm_printer *p)
+static void fw_log_print_buffer(struct vpu_tracing_buffer_header *log, const char *prefix,
+ bool only_new_msgs, struct drm_printer *p)
{
- char *log_buffer = (void *)log + log->header_size;
- u32 log_size = log->size - log->header_size;
- u32 log_start = log->read_index;
- u32 log_end = log->write_index;
-
- if (!(log->write_index || log->wrap_count) ||
- (log->write_index == log->read_index && only_new_msgs)) {
- drm_printf(p, "==== %s \"%s\" log empty ====\n", prefix, log->name);
- return;
+ char *log_data = (void *)log + log->header_size;
+ u32 data_size = log->size - log->header_size;
+ u32 log_start = only_new_msgs ? READ_ONCE(log->read_index) : 0;
+ u32 log_end = READ_ONCE(log->write_index);
+
+ if (log->wrap_count == log->read_wrap_count) {
+ if (log_end <= log_start) {
+ drm_printf(p, "==== %s \"%s\" log empty ====\n", prefix, log->name);
+ return;
+ }
+ } else if (log->wrap_count == log->read_wrap_count + 1) {
+ if (log_end > log_start)
+ log_start = log_end;
+ } else {
+ log_start = log_end;
}
drm_printf(p, "==== %s \"%s\" log start ====\n", prefix, log->name);
- if (log->write_index > log->read_index) {
- buffer_print(log_buffer + log_start, log_end - log_start, p);
+ if (log_end > log_start) {
+ fw_log_print_lines(log_data + log_start, log_end - log_start, p);
} else {
- buffer_print(log_buffer + log_end, log_size - log_end, p);
- buffer_print(log_buffer, log_end, p);
+ fw_log_print_lines(log_data + log_start, data_size - log_start, p);
+ fw_log_print_lines(log_data, log_end, p);
}
- drm_printf(p, "\x1b[0m");
+ drm_printf(p, "\n\x1b[0m"); /* add new line and clear formatting */
drm_printf(p, "==== %s \"%s\" log end ====\n", prefix, log->name);
}
-void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p)
+static void
+fw_log_print_all_in_bo(struct ivpu_device *vdev, const char *name,
+ struct ivpu_bo *bo, bool only_new_msgs, struct drm_printer *p)
{
- struct vpu_tracing_buffer_header *log_header;
+ struct vpu_tracing_buffer_header *log;
u32 next = 0;
- while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
- fw_log_print_buffer(vdev, log_header, "NPU critical", only_new_msgs, p);
+ while (fw_log_from_bo(vdev, bo, &next, &log) == 0)
+ fw_log_print_buffer(log, name, only_new_msgs, p);
+}
+
+void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p)
+{
+ fw_log_print_all_in_bo(vdev, "NPU critical", vdev->fw->mem_log_crit, only_new_msgs, p);
+ fw_log_print_all_in_bo(vdev, "NPU verbose", vdev->fw->mem_log_verb, only_new_msgs, p);
+}
+
+void ivpu_fw_log_mark_read(struct ivpu_device *vdev)
+{
+ struct vpu_tracing_buffer_header *log;
+ u32 next;
+
+ next = 0;
+ while (fw_log_from_bo(vdev, vdev->fw->mem_log_crit, &next, &log) == 0) {
+ log->read_index = READ_ONCE(log->write_index);
+ log->read_wrap_count = READ_ONCE(log->wrap_count);
+ }
next = 0;
- while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
- fw_log_print_buffer(vdev, log_header, "NPU verbose", only_new_msgs, p);
+ while (fw_log_from_bo(vdev, vdev->fw->mem_log_verb, &next, &log) == 0) {
+ log->read_index = READ_ONCE(log->write_index);
+ log->read_wrap_count = READ_ONCE(log->wrap_count);
+ }
}
-void ivpu_fw_log_clear(struct ivpu_device *vdev)
+void ivpu_fw_log_reset(struct ivpu_device *vdev)
{
- struct vpu_tracing_buffer_header *log_header;
- u32 next = 0;
+ struct vpu_tracing_buffer_header *log;
+ u32 next;
- while (fw_log_ptr(vdev, vdev->fw->mem_log_crit, &next, &log_header) == 0)
- log_header->read_index = log_header->write_index;
+ next = 0;
+ while (fw_log_from_bo(vdev, vdev->fw->mem_log_crit, &next, &log) == 0) {
+ log->read_index = 0;
+ log->read_wrap_count = 0;
+ }
next = 0;
- while (fw_log_ptr(vdev, vdev->fw->mem_log_verb, &next, &log_header) == 0)
- log_header->read_index = log_header->write_index;
+ while (fw_log_from_bo(vdev, vdev->fw->mem_log_verb, &next, &log) == 0) {
+ log->read_index = 0;
+ log->read_wrap_count = 0;
+ }
}
diff --git a/drivers/accel/ivpu/ivpu_fw_log.h b/drivers/accel/ivpu/ivpu_fw_log.h
index 0b2573f6f315..8bb528a73cb7 100644
--- a/drivers/accel/ivpu/ivpu_fw_log.h
+++ b/drivers/accel/ivpu/ivpu_fw_log.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_FW_LOG_H__
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/drm_print.h>
-
#include "ivpu_drv.h"
#define IVPU_FW_LOG_DEFAULT 0
@@ -19,20 +17,15 @@
#define IVPU_FW_LOG_ERROR 4
#define IVPU_FW_LOG_FATAL 5
-extern unsigned int ivpu_log_level;
-
#define IVPU_FW_VERBOSE_BUFFER_SMALL_SIZE SZ_1M
#define IVPU_FW_VERBOSE_BUFFER_LARGE_SIZE SZ_8M
#define IVPU_FW_CRITICAL_BUFFER_SIZE SZ_512K
-void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p);
-void ivpu_fw_log_clear(struct ivpu_device *vdev);
+extern unsigned int ivpu_fw_log_level;
-static inline void ivpu_fw_log_dump(struct ivpu_device *vdev)
-{
- struct drm_printer p = drm_info_printer(vdev->drm.dev);
+void ivpu_fw_log_print(struct ivpu_device *vdev, bool only_new_msgs, struct drm_printer *p);
+void ivpu_fw_log_mark_read(struct ivpu_device *vdev);
+void ivpu_fw_log_reset(struct ivpu_device *vdev);
- ivpu_fw_log_print(vdev, false, &p);
-}
#endif /* __IVPU_FW_LOG_H__ */
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 1b409dbd332d..16178054e629 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -384,6 +384,9 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
+ /* Add 1 jiffy to ensure the wait function never times out before intended timeout_ns */
+ timeout += 1;
+
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -EINVAL;
@@ -406,7 +409,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
mutex_lock(&bo->lock);
drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
- bo, bo->ctx->id, bo->vpu_addr, bo->base.base.size,
+ bo, bo->ctx ? bo->ctx->id : 0, bo->vpu_addr, bo->base.base.size,
bo->flags, kref_read(&bo->base.base.refcount));
if (bo->base.pages)
diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c
index 27f0fe4d54e0..4e1054f3466e 100644
--- a/drivers/accel/ivpu/ivpu_hw.c
+++ b/drivers/accel/ivpu/ivpu_hw.c
@@ -89,12 +89,14 @@ static void timeouts_init(struct ivpu_device *vdev)
vdev->timeout.tdr = 2000000;
vdev->timeout.autosuspend = -1;
vdev->timeout.d0i3_entry_msg = 500;
+ vdev->timeout.state_dump_msg = 10;
} else if (ivpu_is_simics(vdev)) {
vdev->timeout.boot = 50;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 10000;
- vdev->timeout.autosuspend = -1;
+ vdev->timeout.autosuspend = 100;
vdev->timeout.d0i3_entry_msg = 100;
+ vdev->timeout.state_dump_msg = 10;
} else {
vdev->timeout.boot = 1000;
vdev->timeout.jsm = 500;
@@ -104,6 +106,7 @@ static void timeouts_init(struct ivpu_device *vdev)
else
vdev->timeout.autosuspend = 100;
vdev->timeout.d0i3_entry_msg = 5;
+ vdev->timeout.state_dump_msg = 10;
}
}
@@ -111,14 +114,14 @@ static void memory_ranges_init(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
- ivpu_hw_range_init(&vdev->hw->ranges.user, 0xc0000000, 255 * SZ_1M);
+ ivpu_hw_range_init(&vdev->hw->ranges.user, 0x88000000, 511 * SZ_1M);
ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G);
- ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
+ ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_128G);
} else {
ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
- ivpu_hw_range_init(&vdev->hw->ranges.user, 0x80000000, SZ_256M);
- ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M);
- ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
+ ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000, SZ_2G);
+ ivpu_hw_range_init(&vdev->hw->ranges.user, 0x100000000, SZ_256G);
+ vdev->hw->ranges.dma = vdev->hw->ranges.user;
}
}
@@ -249,6 +252,7 @@ int ivpu_hw_init(struct ivpu_device *vdev)
platform_init(vdev);
wa_init(vdev);
timeouts_init(vdev);
+ atomic_set(&vdev->hw->firewall_irq_counter, 0);
return 0;
}
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index 1c0c98e3afb8..fc4dbfc980c8 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -46,12 +46,12 @@ struct ivpu_hw_info {
u32 profiling_freq;
} pll;
u32 tile_fuse;
- u32 sched_mode;
u32 sku;
u16 config;
int dma_bits;
ktime_t d0i3_entry_host_ts;
u64 d0i3_entry_vpu_ts;
+ atomic_t firewall_irq_counter;
};
int ivpu_hw_init(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
index d0b795b344c7..fc0ee8d637f9 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
+++ b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
@@ -115,6 +115,8 @@
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY 0x00030068u
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST_DLY_MASK GENMASK(7, 0)
+#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST1_DLY_MASK GENMASK(15, 8)
+#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST2_DLY_MASK GENMASK(23, 16)
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY 0x0003006cu
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY_STATUS_DLY_MASK GENMASK(7, 0)
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c
index 745e5248803d..3212c99f3682 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.c
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.c
@@ -141,16 +141,10 @@ static int read_tile_config_fuse(struct ivpu_device *vdev, u32 *tile_fuse_config
}
config = REG_GET_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, CONFIG, fuse);
- if (!tile_disable_check(config)) {
- ivpu_err(vdev, "Fuse: Invalid tile disable config (0x%x)\n", config);
- return -EIO;
- }
+ if (!tile_disable_check(config))
+ ivpu_warn(vdev, "More than 1 tile disabled, tile fuse config mask: 0x%x\n", config);
- if (config)
- ivpu_dbg(vdev, MISC, "Fuse: %d tiles enabled. Tile number %d disabled\n",
- BTRS_LNL_TILE_MAX_NUM - 1, ffs(config) - 1);
- else
- ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", BTRS_LNL_TILE_MAX_NUM);
+ ivpu_dbg(vdev, MISC, "Tile disable config mask: 0x%x\n", config);
*tile_fuse_config = config;
return 0;
@@ -163,7 +157,6 @@ static int info_init_mtl(struct ivpu_device *vdev)
hw->tile_fuse = BTRS_MTL_TILE_FUSE_ENABLE_BOTH;
hw->sku = BTRS_MTL_TILE_SKU_BOTH;
hw->config = BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO;
- hw->sched_mode = ivpu_sched_mode;
return 0;
}
@@ -178,7 +171,6 @@ static int info_init_lnl(struct ivpu_device *vdev)
if (ret)
return ret;
- hw->sched_mode = ivpu_sched_mode;
hw->tile_fuse = tile_fuse_config;
hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
@@ -315,10 +307,6 @@ static void prepare_wp_request(struct ivpu_device *vdev, struct wp_request *wp,
wp->cdyn = enable ? PLL_CDYN_DEFAULT : 0;
wp->epp = enable ? PLL_EPP_DEFAULT : 0;
}
-
- /* Simics cannot start without at least one tile */
- if (enable && ivpu_is_simics(vdev))
- wp->cfg = 1;
}
static int wait_for_pll_lock(struct ivpu_device *vdev, bool enable)
@@ -465,9 +453,6 @@ int ivpu_hw_btrs_wait_for_clock_res_own_ack(struct ivpu_device *vdev)
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
return 0;
- if (ivpu_is_simics(vdev))
- return 0;
-
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, CLOCK_RESOURCE_OWN_ACK, 1, TIMEOUT_US);
}
diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c
index dfd2f4a5b526..029dd065614b 100644
--- a/drivers/accel/ivpu/ivpu_hw_ip.c
+++ b/drivers/accel/ivpu/ivpu_hw_ip.c
@@ -8,15 +8,12 @@
#include "ivpu_hw.h"
#include "ivpu_hw_37xx_reg.h"
#include "ivpu_hw_40xx_reg.h"
+#include "ivpu_hw_btrs.h"
#include "ivpu_hw_ip.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_mmu.h"
#include "ivpu_pm.h"
-#define PWR_ISLAND_EN_POST_DLY_FREQ_DEFAULT 0
-#define PWR_ISLAND_EN_POST_DLY_FREQ_HIGH 18
-#define PWR_ISLAND_STATUS_DLY_FREQ_DEFAULT 3
-#define PWR_ISLAND_STATUS_DLY_FREQ_HIGH 46
#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
#define TIM_SAFE_ENABLE 0xf1d0dead
@@ -268,20 +265,15 @@ void ivpu_hw_ip_idle_gen_disable(struct ivpu_device *vdev)
idle_gen_drive_40xx(vdev, false);
}
-static void pwr_island_delay_set_50xx(struct ivpu_device *vdev)
+static void
+pwr_island_delay_set_50xx(struct ivpu_device *vdev, u32 post, u32 post1, u32 post2, u32 status)
{
- u32 val, post, status;
-
- if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT) {
- post = PWR_ISLAND_EN_POST_DLY_FREQ_DEFAULT;
- status = PWR_ISLAND_STATUS_DLY_FREQ_DEFAULT;
- } else {
- post = PWR_ISLAND_EN_POST_DLY_FREQ_HIGH;
- status = PWR_ISLAND_STATUS_DLY_FREQ_HIGH;
- }
+ u32 val;
val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY);
val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST_DLY, post, val);
+ val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST1_DLY, post1, val);
+ val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST2_DLY, post2, val);
REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, val);
val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY);
@@ -311,9 +303,6 @@ static void pwr_island_trickle_drive_40xx(struct ivpu_device *vdev, bool enable)
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
-
- if (enable)
- ndelay(500);
}
static void pwr_island_drive_37xx(struct ivpu_device *vdev, bool enable)
@@ -326,9 +315,6 @@ static void pwr_island_drive_37xx(struct ivpu_device *vdev, bool enable)
val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
-
- if (!enable)
- ndelay(500);
}
static void pwr_island_drive_40xx(struct ivpu_device *vdev, bool enable)
@@ -347,9 +333,11 @@ static void pwr_island_enable(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
pwr_island_trickle_drive_37xx(vdev, true);
+ ndelay(500);
pwr_island_drive_37xx(vdev, true);
} else {
pwr_island_trickle_drive_40xx(vdev, true);
+ ndelay(500);
pwr_island_drive_40xx(vdev, true);
}
}
@@ -686,13 +674,36 @@ static void dpu_active_drive_37xx(struct ivpu_device *vdev, bool enable)
REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
}
+static void pwr_island_delay_set(struct ivpu_device *vdev)
+{
+ bool high = vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_HIGH;
+ u32 post, post1, post2, status;
+
+ if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX)
+ return;
+
+ switch (ivpu_device_id(vdev)) {
+ case PCI_DEVICE_ID_PTL_P:
+ post = high ? 18 : 0;
+ post1 = 0;
+ post2 = 0;
+ status = high ? 46 : 3;
+ break;
+
+ default:
+ dump_stack();
+ ivpu_err(vdev, "Unknown device ID\n");
+ return;
+ }
+
+ pwr_island_delay_set_50xx(vdev, post, post1, post2, status);
+}
+
int ivpu_hw_ip_pwr_domain_enable(struct ivpu_device *vdev)
{
int ret;
- if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_50XX)
- pwr_island_delay_set_50xx(vdev);
-
+ pwr_island_delay_set(vdev);
pwr_island_enable(vdev);
ret = wait_for_pwr_island_status(vdev, 0x1);
@@ -1062,7 +1073,10 @@ static void irq_wdt_mss_handler(struct ivpu_device *vdev)
static void irq_noc_firewall_handler(struct ivpu_device *vdev)
{
- ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
+ atomic_inc(&vdev->hw->firewall_irq_counter);
+
+ ivpu_dbg(vdev, IRQ, "NOC Firewall interrupt detected, counter %d\n",
+ atomic_read(&vdev->hw->firewall_irq_counter));
}
/* Handler for IRQs from NPU core */
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index 78b32a823241..01ebf88fe6ef 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -15,6 +15,7 @@
#include "ivpu_ipc.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_pm.h"
+#include "ivpu_trace.h"
#define IPC_MAX_RX_MSG 128
@@ -227,6 +228,7 @@ int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, stru
goto unlock;
ivpu_ipc_tx(vdev, cons->tx_vpu_addr);
+ trace_jsm("[tx]", req);
unlock:
mutex_unlock(&ipc->lock);
@@ -278,12 +280,13 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*jsm_msg));
if (rx_msg->jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
- ivpu_dbg(vdev, IPC, "IPC resp result error: %d\n", rx_msg->jsm_msg->result);
+ ivpu_err(vdev, "IPC resp result error: %d\n", rx_msg->jsm_msg->result);
ret = -EBADMSG;
}
if (jsm_msg)
memcpy(jsm_msg, rx_msg->jsm_msg, size);
+ trace_jsm("[rx]", rx_msg->jsm_msg);
}
ivpu_ipc_rx_msg_del(vdev, rx_msg);
@@ -291,15 +294,16 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
return ret;
}
-static int
+int
ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
enum vpu_ipc_msg_type expected_resp_type,
- struct vpu_jsm_msg *resp, u32 channel,
- unsigned long timeout_ms)
+ struct vpu_jsm_msg *resp, u32 channel, unsigned long timeout_ms)
{
struct ivpu_ipc_consumer cons;
int ret;
+ drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev));
+
ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
ret = ivpu_ipc_send(vdev, &cons, req);
@@ -325,19 +329,21 @@ consumer_del:
return ret;
}
-int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
- enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
- u32 channel, unsigned long timeout_ms)
+int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
+ u32 channel, unsigned long timeout_ms)
{
struct vpu_jsm_msg hb_req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
struct vpu_jsm_msg hb_resp;
int ret, hb_ret;
- drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev));
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp, resp, channel, timeout_ms);
if (ret != -ETIMEDOUT)
- return ret;
+ goto rpm_put;
hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE,
&hb_resp, VPU_IPC_CHAN_ASYNC_CMD,
@@ -345,21 +351,33 @@ int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *r
if (hb_ret == -ETIMEDOUT)
ivpu_pm_trigger_recovery(vdev, "IPC timeout");
+rpm_put:
+ ivpu_rpm_put(vdev);
return ret;
}
-int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
- enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
- u32 channel, unsigned long timeout_ms)
+int ivpu_ipc_send_and_wait(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ u32 channel, unsigned long timeout_ms)
{
+ struct ivpu_ipc_consumer cons;
int ret;
ret = ivpu_rpm_get(vdev);
if (ret < 0)
return ret;
- ret = ivpu_ipc_send_receive_active(vdev, req, expected_resp, resp, channel, timeout_ms);
+ ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
+ ret = ivpu_ipc_send(vdev, &cons, req);
+ if (ret) {
+ ivpu_warn_ratelimited(vdev, "IPC send failed: %d\n", ret);
+ goto consumer_del;
+ }
+
+ msleep(timeout_ms);
+
+consumer_del:
+ ivpu_ipc_consumer_del(vdev, &cons);
ivpu_rpm_put(vdev);
return ret;
}
@@ -518,7 +536,6 @@ void ivpu_ipc_fini(struct ivpu_device *vdev)
{
struct ivpu_ipc_info *ipc = vdev->ipc;
- drm_WARN_ON(&vdev->drm, ipc->on);
drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cons_list));
drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list));
drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0);
diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h
index 4fe38141045e..b4dfb504679b 100644
--- a/drivers/accel/ivpu/ivpu_ipc.h
+++ b/drivers/accel/ivpu/ivpu_ipc.h
@@ -101,12 +101,13 @@ int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *jsm_msg,
unsigned long timeout_ms);
-
-int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
- enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
- u32 channel, unsigned long timeout_ms);
+int ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ enum vpu_ipc_msg_type expected_resp_type,
+ struct vpu_jsm_msg *resp, u32 channel, unsigned long timeout_ms);
int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
u32 channel, unsigned long timeout_ms);
+int ivpu_ipc_send_and_wait(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ u32 channel, unsigned long timeout_ms);
#endif /* __IVPU_IPC_H__ */
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index be2e2bf0f43f..7149312f16e1 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -18,11 +18,10 @@
#include "ivpu_job.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_pm.h"
+#include "ivpu_trace.h"
#include "vpu_boot_api.h"
#define CMD_BUF_IDX 0
-#define JOB_ID_JOB_MASK GENMASK(7, 0)
-#define JOB_ID_CONTEXT_MASK GENMASK(31, 8)
#define JOB_MAX_BUFFER_COUNT 65535
static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
@@ -35,24 +34,20 @@ static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
{
u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE);
u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE);
- struct ivpu_addr_range range;
- if (vdev->hw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW ||
+ ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE)
return 0;
- range.start = vdev->hw->ranges.user.end - (primary_size * IVPU_NUM_CMDQS_PER_CTX);
- range.end = vdev->hw->ranges.user.end;
- cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &range, primary_size,
- DRM_IVPU_BO_WC);
+ cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user,
+ primary_size, DRM_IVPU_BO_WC);
if (!cmdq->primary_preempt_buf) {
ivpu_err(vdev, "Failed to create primary preemption buffer\n");
return -ENOMEM;
}
- range.start = vdev->hw->ranges.shave.end - (secondary_size * IVPU_NUM_CMDQS_PER_CTX);
- range.end = vdev->hw->ranges.shave.end;
- cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &range, secondary_size,
- DRM_IVPU_BO_WC);
+ cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma,
+ secondary_size, DRM_IVPU_BO_WC);
if (!cmdq->secondary_preempt_buf) {
ivpu_err(vdev, "Failed to create secondary preemption buffer\n");
goto err_free_primary;
@@ -62,24 +57,24 @@ static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
err_free_primary:
ivpu_bo_free(cmdq->primary_preempt_buf);
+ cmdq->primary_preempt_buf = NULL;
return -ENOMEM;
}
static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
- if (vdev->hw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
return;
- drm_WARN_ON(&vdev->drm, !cmdq->primary_preempt_buf);
- drm_WARN_ON(&vdev->drm, !cmdq->secondary_preempt_buf);
- ivpu_bo_free(cmdq->primary_preempt_buf);
- ivpu_bo_free(cmdq->secondary_preempt_buf);
+ if (cmdq->primary_preempt_buf)
+ ivpu_bo_free(cmdq->primary_preempt_buf);
+ if (cmdq->secondary_preempt_buf)
+ ivpu_bo_free(cmdq->secondary_preempt_buf);
}
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
{
- struct xa_limit db_xa_limit = {.max = IVPU_MAX_DB, .min = IVPU_MIN_DB};
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_cmdq *cmdq;
int ret;
@@ -88,25 +83,33 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
if (!cmdq)
return NULL;
- ret = xa_alloc(&vdev->db_xa, &cmdq->db_id, NULL, db_xa_limit, GFP_KERNEL);
- if (ret) {
+ ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
+ GFP_KERNEL);
+ if (ret < 0) {
ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
goto err_free_cmdq;
}
+ ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
+ &file_priv->cmdq_id_next, GFP_KERNEL);
+ if (ret < 0) {
+ ivpu_err(vdev, "Failed to allocate command queue id: %d\n", ret);
+ goto err_erase_db_xa;
+ }
+
cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!cmdq->mem)
- goto err_erase_xa;
+ goto err_erase_cmdq_xa;
ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
if (ret)
- goto err_free_cmdq_mem;
+ ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n");
return cmdq;
-err_free_cmdq_mem:
- ivpu_bo_free(cmdq->mem);
-err_erase_xa:
+err_erase_cmdq_xa:
+ xa_erase(&file_priv->cmdq_xa, cmdq->id);
+err_erase_db_xa:
xa_erase(&vdev->db_xa, cmdq->db_id);
err_free_cmdq:
kfree(cmdq);
@@ -130,13 +133,13 @@ static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq
struct ivpu_device *vdev = file_priv->vdev;
int ret;
- ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->db_id,
+ ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->id,
task_pid_nr(current), engine,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
if (ret)
return ret;
- ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->db_id,
+ ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->id,
priority);
if (ret)
return ret;
@@ -149,21 +152,22 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
struct ivpu_device *vdev = file_priv->vdev;
int ret;
- if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW)
- ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->db_id,
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
+ ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
else
ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
if (!ret)
- ivpu_dbg(vdev, JOB, "DB %d registered to ctx %d\n", cmdq->db_id, file_priv->ctx.id);
+ ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d\n",
+ cmdq->db_id, cmdq->id, file_priv->ctx.id);
return ret;
}
static int
-ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine, u8 priority)
+ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u8 priority)
{
struct ivpu_device *vdev = file_priv->vdev;
struct vpu_job_queue_header *jobq_header;
@@ -179,13 +183,18 @@ ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 eng
cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
jobq_header = &cmdq->jobq->header;
- jobq_header->engine_idx = engine;
+ jobq_header->engine_idx = VPU_ENGINE_COMPUTE;
jobq_header->head = 0;
jobq_header->tail = 0;
+ if (ivpu_test_mode & IVPU_TEST_MODE_TURBO) {
+ ivpu_dbg(vdev, JOB, "Turbo mode enabled");
+ jobq_header->flags = VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
+ }
+
wmb(); /* Flush WC buffer for jobq->header */
- if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
- ret = ivpu_hws_cmdq_init(file_priv, cmdq, engine, priority);
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+ ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, priority);
if (ret)
return ret;
}
@@ -211,10 +220,10 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
cmdq->db_registered = false;
- if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
- ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->db_id);
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+ ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id);
if (!ret)
- ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->db_id);
+ ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->id);
}
ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
@@ -224,55 +233,46 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
return 0;
}
-static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine,
- u8 priority)
+static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u8 priority)
{
- int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
- struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
+ struct ivpu_cmdq *cmdq;
+ unsigned long cmdq_id;
int ret;
lockdep_assert_held(&file_priv->lock);
+ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
+ if (cmdq->priority == priority)
+ break;
+
if (!cmdq) {
cmdq = ivpu_cmdq_alloc(file_priv);
if (!cmdq)
return NULL;
- file_priv->cmdq[cmdq_idx] = cmdq;
+ cmdq->priority = priority;
}
- ret = ivpu_cmdq_init(file_priv, cmdq, engine, priority);
+ ret = ivpu_cmdq_init(file_priv, cmdq, priority);
if (ret)
return NULL;
return cmdq;
}
-static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine, u8 priority)
+void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
{
- int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
- struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
+ struct ivpu_cmdq *cmdq;
+ unsigned long cmdq_id;
lockdep_assert_held(&file_priv->lock);
- if (cmdq) {
- file_priv->cmdq[cmdq_idx] = NULL;
+ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
+ xa_erase(&file_priv->cmdq_xa, cmdq_id);
ivpu_cmdq_fini(file_priv, cmdq);
ivpu_cmdq_free(file_priv, cmdq);
}
}
-void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
-{
- u16 engine;
- u8 priority;
-
- lockdep_assert_held(&file_priv->lock);
-
- for (engine = 0; engine < IVPU_NUM_ENGINES; engine++)
- for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++)
- ivpu_cmdq_release_locked(file_priv, engine, priority);
-}
-
/*
* Mark the doorbell as unregistered
* This function needs to be called when the VPU hardware is restarted
@@ -281,20 +281,13 @@ void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
*/
static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
{
- u16 engine;
- u8 priority;
+ struct ivpu_cmdq *cmdq;
+ unsigned long cmdq_id;
mutex_lock(&file_priv->lock);
- for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) {
- for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
- int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
- struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
-
- if (cmdq)
- cmdq->db_registered = false;
- }
- }
+ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
+ cmdq->db_registered = false;
mutex_unlock(&file_priv->lock);
}
@@ -314,17 +307,11 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
static void ivpu_cmdq_fini_all(struct ivpu_file_priv *file_priv)
{
- u16 engine;
- u8 priority;
-
- for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) {
- for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
- int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
+ struct ivpu_cmdq *cmdq;
+ unsigned long cmdq_id;
- if (file_priv->cmdq[cmdq_idx])
- ivpu_cmdq_fini(file_priv, file_priv->cmdq[cmdq_idx]);
- }
- }
+ xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
+ ivpu_cmdq_fini(file_priv, cmdq);
}
void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
@@ -335,7 +322,7 @@ void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
ivpu_cmdq_fini_all(file_priv);
- if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_OS)
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS)
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
}
@@ -349,24 +336,29 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
/* Check if there is space left in job queue */
if (next_entry == header->head) {
- ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n",
- job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail);
+ ivpu_dbg(vdev, JOB, "Job queue full: ctx %d cmdq %d db %d head %d tail %d\n",
+ job->file_priv->ctx.id, cmdq->id, cmdq->db_id, header->head, tail);
return -EBUSY;
}
- entry = &cmdq->jobq->job[tail];
+ entry = &cmdq->jobq->slot[tail].job;
entry->batch_buf_addr = job->cmd_buf_vpu_addr;
entry->job_id = job->job_id;
entry->flags = 0;
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
- if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW &&
- (unlikely(!(ivpu_test_mode & IVPU_TEST_MODE_PREEMPTION_DISABLE)))) {
- entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
- entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
- entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
- entry->secondary_preempt_buf_size = ivpu_bo_size(cmdq->secondary_preempt_buf);
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
+ if (cmdq->primary_preempt_buf) {
+ entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
+ entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
+ }
+
+ if (cmdq->secondary_preempt_buf) {
+ entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
+ entry->secondary_preempt_buf_size =
+ ivpu_bo_size(cmdq->secondary_preempt_buf);
+ }
}
wmb(); /* Ensure that tail is updated after filling entry */
@@ -457,6 +449,7 @@ ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
job->file_priv = ivpu_file_priv_get(file_priv);
+ trace_job("create", job);
ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
return job;
@@ -496,6 +489,7 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
job->bos[CMD_BUF_IDX]->job_status = job_status;
dma_fence_signal(job->done_fence);
+ trace_job("done", job);
ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n",
job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
@@ -519,7 +513,6 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
{
struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = job->vdev;
- struct xa_limit job_id_range;
struct ivpu_cmdq *cmdq;
bool is_first_job;
int ret;
@@ -530,7 +523,7 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
mutex_lock(&file_priv->lock);
- cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx, priority);
+ cmdq = ivpu_cmdq_acquire(file_priv, priority);
if (!cmdq) {
ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
file_priv->ctx.id, job->engine_idx, priority);
@@ -538,13 +531,11 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
goto err_unlock_file_priv;
}
- job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
- job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
-
xa_lock(&vdev->submitted_jobs_xa);
is_first_job = xa_empty(&vdev->submitted_jobs_xa);
- ret = __xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
- if (ret) {
+ ret = __xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
+ &file_priv->job_id_next, GFP_KERNEL);
+ if (ret < 0) {
ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
file_priv->ctx.id);
ret = -EBUSY;
@@ -566,6 +557,7 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
vdev->busy_start_ts = ktime_get();
}
+ trace_job("submit", job);
ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d prio %d addr 0x%llx next %d\n",
job->job_id, file_priv->ctx.id, job->engine_idx, priority,
job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
@@ -673,7 +665,7 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
int idx, ret;
u8 priority;
- if (params->engine > DRM_IVPU_ENGINE_COPY)
+ if (params->engine != DRM_IVPU_ENGINE_COMPUTE)
return -EINVAL;
if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
index 6accb94028c7..8b19e3f8b4cf 100644
--- a/drivers/accel/ivpu/ivpu_job.h
+++ b/drivers/accel/ivpu/ivpu_job.h
@@ -28,8 +28,10 @@ struct ivpu_cmdq {
struct ivpu_bo *secondary_preempt_buf;
struct ivpu_bo *mem;
u32 entry_count;
+ u32 id;
u32 db_id;
bool db_registered;
+ u8 priority;
};
/**
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
index 46ef16c3c069..30a40be76930 100644
--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
@@ -48,9 +48,10 @@ const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
- IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED);
IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
+ IVPU_CASE_TO_STR(VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED);
IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE);
IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE);
@@ -131,7 +132,7 @@ int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
struct vpu_jsm_msg resp;
int ret;
- if (engine > VPU_ENGINE_COPY)
+ if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.query_engine_hb.engine_idx = engine;
@@ -154,7 +155,7 @@ int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
struct vpu_jsm_msg resp;
int ret;
- if (engine > VPU_ENGINE_COPY)
+ if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.engine_reset.engine_idx = engine;
@@ -173,7 +174,7 @@ int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id
struct vpu_jsm_msg resp;
int ret;
- if (engine > VPU_ENGINE_COPY)
+ if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.engine_preempt.engine_idx = engine;
@@ -196,7 +197,7 @@ int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size
strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
- VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ VPU_IPC_CHAN_GEN_CMD, vdev->timeout.jsm);
if (ret)
ivpu_warn_ratelimited(vdev, "Failed to send command \"%s\": ret %d\n",
command, ret);
@@ -270,9 +271,8 @@ int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
req.payload.pwr_d0i3_enter.send_response = 1;
- ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE,
- &resp, VPU_IPC_CHAN_GEN_CMD,
- vdev->timeout.d0i3_entry_msg);
+ ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE, &resp,
+ VPU_IPC_CHAN_GEN_CMD, vdev->timeout.d0i3_entry_msg);
if (ret)
return ret;
@@ -346,7 +346,7 @@ int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
struct vpu_jsm_msg resp;
int ret;
- if (engine >= VPU_ENGINE_NB)
+ if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.hws_resume_engine.engine_idx = engine;
@@ -394,8 +394,6 @@ int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u3
req.payload.hws_set_scheduling_log.host_ssid = host_ssid;
req.payload.hws_set_scheduling_log.vpu_log_buffer_va = vpu_log_buffer_va;
req.payload.hws_set_scheduling_log.notify_index = 0;
- req.payload.hws_set_scheduling_log.enable_extra_events =
- ivpu_test_mode & IVPU_TEST_MODE_HWS_EXTRA_EVENTS;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
@@ -430,8 +428,8 @@ int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
req.payload.hws_priority_band_setup.normal_band_percentage = 10;
- ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
- &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
+ &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_warn_ratelimited(vdev, "Failed to set priority bands: %d\n", ret);
@@ -544,9 +542,8 @@ int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us
req.payload.pwr_dct_control.dct_active_us = active_us;
req.payload.pwr_dct_control.dct_inactive_us = inactive_us;
- return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE,
- &resp, VPU_IPC_CHAN_ASYNC_CMD,
- vdev->timeout.jsm);
+ return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
}
int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
@@ -554,7 +551,14 @@ int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_DISABLE };
struct vpu_jsm_msg resp;
- return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE,
- &resp, VPU_IPC_CHAN_ASYNC_CMD,
- vdev->timeout.jsm);
+ return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+}
+
+int ivpu_jsm_state_dump(struct ivpu_device *vdev)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_STATE_DUMP };
+
+ return ivpu_ipc_send_and_wait(vdev, &req, VPU_IPC_CHAN_ASYNC_CMD,
+ vdev->timeout.state_dump_msg);
}
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.h b/drivers/accel/ivpu/ivpu_jsm_msg.h
index e4e42c0ff6e6..9e84d3526a14 100644
--- a/drivers/accel/ivpu/ivpu_jsm_msg.h
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.h
@@ -43,4 +43,6 @@ int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mas
u64 buffer_size, u32 *sample_size, u64 *info_size);
int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us);
int ivpu_jsm_dct_disable(struct ivpu_device *vdev);
+int ivpu_jsm_state_dump(struct ivpu_device *vdev);
+
#endif
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index c078e214b221..26ef52fbb93e 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -696,7 +696,7 @@ unlock:
return ret;
}
-static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
+static int ivpu_mmu_cdtab_entry_set(struct ivpu_device *vdev, u32 ssid, u64 cd_dma, bool valid)
{
struct ivpu_mmu_info *mmu = vdev->mmu;
struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
@@ -708,30 +708,29 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
return -EINVAL;
entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
-
- if (cd_dma != 0) {
- cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) |
- FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
- FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
- FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
- FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
- FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) |
- FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
- IVPU_MMU_CD_0_TCR_EPD1 |
- IVPU_MMU_CD_0_AA64 |
- IVPU_MMU_CD_0_R |
- IVPU_MMU_CD_0_ASET |
- IVPU_MMU_CD_0_V;
- cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK;
- cd[2] = 0;
- cd[3] = 0x0000000000007444;
-
- /* For global context generate memory fault on VPU */
- if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
- cd[0] |= IVPU_MMU_CD_0_A;
- } else {
- memset(cd, 0, sizeof(cd));
- }
+ drm_WARN_ON(&vdev->drm, (entry[0] & IVPU_MMU_CD_0_V) == valid);
+
+ cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) |
+ FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
+ IVPU_MMU_CD_0_TCR_EPD1 |
+ IVPU_MMU_CD_0_AA64 |
+ IVPU_MMU_CD_0_R |
+ IVPU_MMU_CD_0_ASET;
+ cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK;
+ cd[2] = 0;
+ cd[3] = 0x0000000000007444;
+
+ /* For global context generate memory fault on VPU */
+ if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
+ cd[0] |= IVPU_MMU_CD_0_A;
+
+ if (valid)
+ cd[0] |= IVPU_MMU_CD_0_V;
WRITE_ONCE(entry[1], cd[1]);
WRITE_ONCE(entry[2], cd[2]);
@@ -741,8 +740,8 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
if (!ivpu_is_force_snoop_enabled(vdev))
clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
- ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
- cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
+ ivpu_dbg(vdev, MMU, "CDTAB set %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
+ valid ? "valid" : "invalid", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
mutex_lock(&mmu->lock);
if (!mmu->on)
@@ -750,38 +749,18 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
if (ret)
- goto unlock;
+ goto err_invalidate;
ret = ivpu_mmu_cmdq_sync(vdev);
+ if (ret)
+ goto err_invalidate;
unlock:
mutex_unlock(&mmu->lock);
- return ret;
-}
-
-static int ivpu_mmu_cd_add_gbl(struct ivpu_device *vdev)
-{
- int ret;
-
- ret = ivpu_mmu_cd_add(vdev, 0, vdev->gctx.pgtable.pgd_dma);
- if (ret)
- ivpu_err(vdev, "Failed to add global CD entry: %d\n", ret);
-
- return ret;
-}
-
-static int ivpu_mmu_cd_add_user(struct ivpu_device *vdev, u32 ssid, dma_addr_t cd_dma)
-{
- int ret;
-
- if (ssid == 0) {
- ivpu_err(vdev, "Invalid SSID: %u\n", ssid);
- return -EINVAL;
- }
-
- ret = ivpu_mmu_cd_add(vdev, ssid, cd_dma);
- if (ret)
- ivpu_err(vdev, "Failed to add CD entry SSID=%u: %d\n", ssid, ret);
+ return 0;
+err_invalidate:
+ WRITE_ONCE(entry[0], 0);
+ mutex_unlock(&mmu->lock);
return ret;
}
@@ -808,12 +787,6 @@ int ivpu_mmu_init(struct ivpu_device *vdev)
return ret;
}
- ret = ivpu_mmu_cd_add_gbl(vdev);
- if (ret) {
- ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
- return ret;
- }
-
ret = ivpu_mmu_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
@@ -966,12 +939,12 @@ void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_GERRORN, gerror_val);
}
-int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
+int ivpu_mmu_cd_set(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
{
- return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma);
+ return ivpu_mmu_cdtab_entry_set(vdev, ssid, pgtable->pgd_dma, true);
}
-void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid)
+void ivpu_mmu_cd_clear(struct ivpu_device *vdev, int ssid)
{
- ivpu_mmu_cd_add_user(vdev, ssid, 0); /* 0 will clear CD entry */
+ ivpu_mmu_cdtab_entry_set(vdev, ssid, 0, false);
}
diff --git a/drivers/accel/ivpu/ivpu_mmu.h b/drivers/accel/ivpu/ivpu_mmu.h
index 6fa35c240710..7afea9cd8731 100644
--- a/drivers/accel/ivpu/ivpu_mmu.h
+++ b/drivers/accel/ivpu/ivpu_mmu.h
@@ -40,8 +40,8 @@ struct ivpu_mmu_info {
int ivpu_mmu_init(struct ivpu_device *vdev);
void ivpu_mmu_disable(struct ivpu_device *vdev);
int ivpu_mmu_enable(struct ivpu_device *vdev);
-int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable);
-void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid);
+int ivpu_mmu_cd_set(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable);
+void ivpu_mmu_cd_clear(struct ivpu_device *vdev, int ssid);
int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid);
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index bbe652a7019d..0af614dfb6f9 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -90,19 +90,6 @@ static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_
}
}
-static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
-{
- dma_addr_t pgd_dma;
-
- pgtable->pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma);
- if (!pgtable->pgd_dma_ptr)
- return -ENOMEM;
-
- pgtable->pgd_dma = pgd_dma;
-
- return 0;
-}
-
static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
{
int pgd_idx, pud_idx, pmd_idx;
@@ -140,6 +127,27 @@ static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgt
}
ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
+ pgtable->pgd_dma_ptr = NULL;
+ pgtable->pgd_dma = 0;
+}
+
+static u64*
+ivpu_mmu_ensure_pgd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
+{
+ u64 *pgd_dma_ptr = pgtable->pgd_dma_ptr;
+ dma_addr_t pgd_dma;
+
+ if (pgd_dma_ptr)
+ return pgd_dma_ptr;
+
+ pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma);
+ if (!pgd_dma_ptr)
+ return NULL;
+
+ pgtable->pgd_dma_ptr = pgd_dma_ptr;
+ pgtable->pgd_dma = pgd_dma;
+
+ return pgd_dma_ptr;
}
static u64*
@@ -237,6 +245,12 @@ ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx
int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
+ drm_WARN_ON(&vdev->drm, ctx->id == IVPU_RESERVED_CONTEXT_MMU_SSID);
+
+ /* Allocate PGD - first level page table if needed */
+ if (!ivpu_mmu_ensure_pgd(vdev, &ctx->pgtable))
+ return -ENOMEM;
+
/* Allocate PUD - second level page table if needed */
if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))
return -ENOMEM;
@@ -418,6 +432,7 @@ int
ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt, bool llc_coherent)
{
+ size_t start_vpu_addr = vpu_addr;
struct scatterlist *sg;
int ret;
u64 prot;
@@ -448,20 +463,36 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
if (ret) {
ivpu_err(vdev, "Failed to map context pages\n");
- mutex_unlock(&ctx->lock);
- return ret;
+ goto err_unmap_pages;
}
vpu_addr += size;
}
+ if (!ctx->is_cd_valid) {
+ ret = ivpu_mmu_cd_set(vdev, ctx->id, &ctx->pgtable);
+ if (ret) {
+ ivpu_err(vdev, "Failed to set context descriptor for context %u: %d\n",
+ ctx->id, ret);
+ goto err_unmap_pages;
+ }
+ ctx->is_cd_valid = true;
+ }
+
/* Ensure page table modifications are flushed from wc buffers to memory */
wmb();
- mutex_unlock(&ctx->lock);
-
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
- if (ret)
+ if (ret) {
ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
+ goto err_unmap_pages;
+ }
+
+ mutex_unlock(&ctx->lock);
+ return 0;
+
+err_unmap_pages:
+ ivpu_mmu_context_unmap_pages(ctx, start_vpu_addr, vpu_addr - start_vpu_addr);
+ mutex_unlock(&ctx->lock);
return ret;
}
@@ -530,65 +561,79 @@ ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *n
mutex_unlock(&ctx->lock);
}
-static int
-ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
+void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
{
u64 start, end;
- int ret;
mutex_init(&ctx->lock);
- ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
- if (ret) {
- ivpu_err(vdev, "Failed to initialize pgtable for ctx %u: %d\n", context_id, ret);
- return ret;
- }
-
if (!context_id) {
start = vdev->hw->ranges.global.start;
end = vdev->hw->ranges.shave.end;
} else {
- start = vdev->hw->ranges.user.start;
- end = vdev->hw->ranges.dma.end;
+ start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start);
+ end = max_t(u64, vdev->hw->ranges.user.end, vdev->hw->ranges.dma.end);
}
drm_mm_init(&ctx->mm, start, end - start);
ctx->id = context_id;
-
- return 0;
}
-static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
+void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
{
- if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr))
- return;
+ if (ctx->is_cd_valid) {
+ ivpu_mmu_cd_clear(vdev, ctx->id);
+ ctx->is_cd_valid = false;
+ }
mutex_destroy(&ctx->lock);
ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);
drm_mm_takedown(&ctx->mm);
-
- ctx->pgtable.pgd_dma_ptr = NULL;
- ctx->pgtable.pgd_dma = 0;
}
-int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
+void ivpu_mmu_global_context_init(struct ivpu_device *vdev)
{
- return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
+ ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
}
void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)
{
- return ivpu_mmu_context_fini(vdev, &vdev->gctx);
+ ivpu_mmu_context_fini(vdev, &vdev->gctx);
}
int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev)
{
- return ivpu_mmu_user_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID);
+ int ret;
+
+ ivpu_mmu_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID);
+
+ mutex_lock(&vdev->rctx.lock);
+
+ if (!ivpu_mmu_ensure_pgd(vdev, &vdev->rctx.pgtable)) {
+ ivpu_err(vdev, "Failed to allocate root page table for reserved context\n");
+ ret = -ENOMEM;
+ goto err_ctx_fini;
+ }
+
+ ret = ivpu_mmu_cd_set(vdev, vdev->rctx.id, &vdev->rctx.pgtable);
+ if (ret) {
+ ivpu_err(vdev, "Failed to set context descriptor for reserved context\n");
+ goto err_ctx_fini;
+ }
+
+ mutex_unlock(&vdev->rctx.lock);
+ return ret;
+
+err_ctx_fini:
+ mutex_unlock(&vdev->rctx.lock);
+ ivpu_mmu_context_fini(vdev, &vdev->rctx);
+ return ret;
}
void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)
{
- return ivpu_mmu_user_context_fini(vdev, &vdev->rctx);
+ ivpu_mmu_cd_clear(vdev, vdev->rctx.id);
+ ivpu_mmu_context_fini(vdev, &vdev->rctx);
}
void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
@@ -603,36 +648,3 @@ void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
xa_unlock(&vdev->context_xa);
}
-
-int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id)
-{
- int ret;
-
- drm_WARN_ON(&vdev->drm, !ctx_id);
-
- ret = ivpu_mmu_context_init(vdev, ctx, ctx_id);
- if (ret) {
- ivpu_err(vdev, "Failed to initialize context %u: %d\n", ctx_id, ret);
- return ret;
- }
-
- ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable);
- if (ret) {
- ivpu_err(vdev, "Failed to set page table for context %u: %d\n", ctx_id, ret);
- goto err_context_fini;
- }
-
- return 0;
-
-err_context_fini:
- ivpu_mmu_context_fini(vdev, ctx);
- return ret;
-}
-
-void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
-{
- drm_WARN_ON(&vdev->drm, !ctx->id);
-
- ivpu_mmu_clear_pgtable(vdev, ctx->id);
- ivpu_mmu_context_fini(vdev, ctx);
-}
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h
index 7f9aaf3d10c2..8042fc067062 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.h
+++ b/drivers/accel/ivpu/ivpu_mmu_context.h
@@ -23,19 +23,20 @@ struct ivpu_mmu_pgtable {
};
struct ivpu_mmu_context {
- struct mutex lock; /* Protects: mm, pgtable */
+ struct mutex lock; /* Protects: mm, pgtable, is_cd_valid */
struct drm_mm mm;
struct ivpu_mmu_pgtable pgtable;
+ bool is_cd_valid;
u32 id;
};
-int ivpu_mmu_global_context_init(struct ivpu_device *vdev);
+void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id);
+void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
+void ivpu_mmu_global_context_init(struct ivpu_device *vdev);
void ivpu_mmu_global_context_fini(struct ivpu_device *vdev);
int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev);
void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev);
-int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id);
-void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid);
int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
diff --git a/drivers/accel/ivpu/ivpu_ms.c b/drivers/accel/ivpu/ivpu_ms.c
index 2f9d37f5c208..ffe7b10f8a76 100644
--- a/drivers/accel/ivpu/ivpu_ms.c
+++ b/drivers/accel/ivpu/ivpu_ms.c
@@ -11,7 +11,7 @@
#include "ivpu_ms.h"
#include "ivpu_pm.h"
-#define MS_INFO_BUFFER_SIZE SZ_16K
+#define MS_INFO_BUFFER_SIZE SZ_64K
#define MS_NUM_BUFFERS 2
#define MS_READ_PERIOD_MULTIPLIER 2
#define MS_MIN_SAMPLE_PERIOD_NS 1000000
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 59d3170f5e35..5060c5dd40d1 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -9,21 +9,25 @@
#include <linux/pm_runtime.h>
#include <linux/reboot.h>
-#include "vpu_boot_api.h"
+#include "ivpu_coredump.h"
#include "ivpu_drv.h"
-#include "ivpu_hw.h"
#include "ivpu_fw.h"
#include "ivpu_fw_log.h"
+#include "ivpu_hw.h"
#include "ivpu_ipc.h"
#include "ivpu_job.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_mmu.h"
#include "ivpu_ms.h"
#include "ivpu_pm.h"
+#include "ivpu_trace.h"
+#include "vpu_boot_api.h"
static bool ivpu_disable_recovery;
+#if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644);
MODULE_PARM_DESC(disable_recovery, "Disables recovery when NPU hang is detected");
+#endif
static unsigned long ivpu_tdr_timeout_ms;
module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644);
@@ -37,6 +41,7 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
ivpu_cmdq_reset_all_contexts(vdev);
ivpu_ipc_reset(vdev);
+ ivpu_fw_log_reset(vdev);
ivpu_fw_load(vdev);
fw->entry_point = fw->cold_boot_entry_point;
}
@@ -73,8 +78,8 @@ static int ivpu_resume(struct ivpu_device *vdev)
int ret;
retry:
- pci_restore_state(to_pci_dev(vdev->drm.dev));
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
+ pci_restore_state(to_pci_dev(vdev->drm.dev));
ret = ivpu_hw_power_up(vdev);
if (ret) {
@@ -110,40 +115,57 @@ err_power_down:
return ret;
}
-static void ivpu_pm_recovery_work(struct work_struct *work)
+static void ivpu_pm_reset_begin(struct ivpu_device *vdev)
{
- struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
- struct ivpu_device *vdev = pm->vdev;
- char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
- int ret;
-
- ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
-
- ret = pm_runtime_resume_and_get(vdev->drm.dev);
- if (ret)
- ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
-
- ivpu_fw_log_dump(vdev);
+ pm_runtime_disable(vdev->drm.dev);
atomic_inc(&vdev->pm->reset_counter);
atomic_set(&vdev->pm->reset_pending, 1);
down_write(&vdev->pm->reset_lock);
+}
+
+static void ivpu_pm_reset_complete(struct ivpu_device *vdev)
+{
+ int ret;
- ivpu_suspend(vdev);
ivpu_pm_prepare_cold_boot(vdev);
ivpu_jobs_abort_all(vdev);
ivpu_ms_cleanup_all(vdev);
ret = ivpu_resume(vdev);
- if (ret)
+ if (ret) {
ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+ pm_runtime_set_suspended(vdev->drm.dev);
+ } else {
+ pm_runtime_set_active(vdev->drm.dev);
+ }
up_write(&vdev->pm->reset_lock);
atomic_set(&vdev->pm->reset_pending, 0);
- kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
pm_runtime_mark_last_busy(vdev->drm.dev);
- pm_runtime_put_autosuspend(vdev->drm.dev);
+ pm_runtime_enable(vdev->drm.dev);
+}
+
+static void ivpu_pm_recovery_work(struct work_struct *work)
+{
+ struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
+ struct ivpu_device *vdev = pm->vdev;
+ char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
+
+ ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
+
+ ivpu_pm_reset_begin(vdev);
+
+ if (!pm_runtime_status_suspended(vdev->drm.dev)) {
+ ivpu_jsm_state_dump(vdev);
+ ivpu_dev_coredump(vdev);
+ ivpu_suspend(vdev);
+ }
+
+ ivpu_pm_reset_complete(vdev);
+
+ kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
}
void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
@@ -195,6 +217,7 @@ int ivpu_pm_suspend_cb(struct device *dev)
struct ivpu_device *vdev = to_ivpu_device(drm);
unsigned long timeout;
+ trace_pm("suspend");
ivpu_dbg(vdev, PM, "Suspend..\n");
timeout = jiffies + msecs_to_jiffies(vdev->timeout.tdr);
@@ -212,6 +235,7 @@ int ivpu_pm_suspend_cb(struct device *dev)
ivpu_pm_prepare_warm_boot(vdev);
ivpu_dbg(vdev, PM, "Suspend done.\n");
+ trace_pm("suspend done");
return 0;
}
@@ -222,6 +246,7 @@ int ivpu_pm_resume_cb(struct device *dev)
struct ivpu_device *vdev = to_ivpu_device(drm);
int ret;
+ trace_pm("resume");
ivpu_dbg(vdev, PM, "Resume..\n");
ret = ivpu_resume(vdev);
@@ -229,6 +254,7 @@ int ivpu_pm_resume_cb(struct device *dev)
ivpu_err(vdev, "Failed to resume: %d\n", ret);
ivpu_dbg(vdev, PM, "Resume done.\n");
+ trace_pm("resume done");
return ret;
}
@@ -243,6 +269,7 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
drm_WARN_ON(&vdev->drm, work_pending(&vdev->pm->recovery_work));
+ trace_pm("runtime suspend");
ivpu_dbg(vdev, PM, "Runtime suspend..\n");
ivpu_mmu_disable(vdev);
@@ -262,13 +289,14 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
if (!is_idle || ret_d0i3) {
ivpu_err(vdev, "Forcing cold boot due to previous errors\n");
atomic_inc(&vdev->pm->reset_counter);
- ivpu_fw_log_dump(vdev);
+ ivpu_dev_coredump(vdev);
ivpu_pm_prepare_cold_boot(vdev);
} else {
ivpu_pm_prepare_warm_boot(vdev);
}
ivpu_dbg(vdev, PM, "Runtime suspend done.\n");
+ trace_pm("runtime suspend done");
return 0;
}
@@ -279,6 +307,7 @@ int ivpu_pm_runtime_resume_cb(struct device *dev)
struct ivpu_device *vdev = to_ivpu_device(drm);
int ret;
+ trace_pm("runtime resume");
ivpu_dbg(vdev, PM, "Runtime resume..\n");
ret = ivpu_resume(vdev);
@@ -286,6 +315,7 @@ int ivpu_pm_runtime_resume_cb(struct device *dev)
ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
ivpu_dbg(vdev, PM, "Runtime resume done.\n");
+ trace_pm("runtime resume done");
return ret;
}
@@ -295,7 +325,10 @@ int ivpu_rpm_get(struct ivpu_device *vdev)
int ret;
ret = pm_runtime_resume_and_get(vdev->drm.dev);
- drm_WARN_ON(&vdev->drm, ret < 0);
+ if (ret < 0) {
+ ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+ pm_runtime_set_suspended(vdev->drm.dev);
+ }
return ret;
}
@@ -311,16 +344,13 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
struct ivpu_device *vdev = pci_get_drvdata(pdev);
ivpu_dbg(vdev, PM, "Pre-reset..\n");
- atomic_inc(&vdev->pm->reset_counter);
- atomic_set(&vdev->pm->reset_pending, 1);
- pm_runtime_get_sync(vdev->drm.dev);
- down_write(&vdev->pm->reset_lock);
- ivpu_prepare_for_reset(vdev);
- ivpu_hw_reset(vdev);
- ivpu_pm_prepare_cold_boot(vdev);
- ivpu_jobs_abort_all(vdev);
- ivpu_ms_cleanup_all(vdev);
+ ivpu_pm_reset_begin(vdev);
+
+ if (!pm_runtime_status_suspended(vdev->drm.dev)) {
+ ivpu_prepare_for_reset(vdev);
+ ivpu_hw_reset(vdev);
+ }
ivpu_dbg(vdev, PM, "Pre-reset done.\n");
}
@@ -328,18 +358,12 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
{
struct ivpu_device *vdev = pci_get_drvdata(pdev);
- int ret;
ivpu_dbg(vdev, PM, "Post-reset..\n");
- ret = ivpu_resume(vdev);
- if (ret)
- ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
- up_write(&vdev->pm->reset_lock);
- atomic_set(&vdev->pm->reset_pending, 0);
- ivpu_dbg(vdev, PM, "Post-reset done.\n");
- pm_runtime_mark_last_busy(vdev->drm.dev);
- pm_runtime_put_autosuspend(vdev->drm.dev);
+ ivpu_pm_reset_complete(vdev);
+
+ ivpu_dbg(vdev, PM, "Post-reset done.\n");
}
void ivpu_pm_init(struct ivpu_device *vdev)
@@ -364,6 +388,7 @@ void ivpu_pm_init(struct ivpu_device *vdev)
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, delay);
+ pm_runtime_set_active(dev);
ivpu_dbg(vdev, PM, "Autosuspend delay = %d\n", delay);
}
@@ -378,7 +403,6 @@ void ivpu_pm_enable(struct ivpu_device *vdev)
{
struct device *dev = vdev->drm.dev;
- pm_runtime_set_active(dev);
pm_runtime_allow(dev);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -411,7 +435,7 @@ int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent)
ret = ivpu_jsm_dct_enable(vdev, active_us, inactive_us);
if (ret) {
- ivpu_err_ratelimited(vdev, "Filed to enable DCT: %d\n", ret);
+ ivpu_err_ratelimited(vdev, "Failed to enable DCT: %d\n", ret);
return ret;
}
@@ -428,7 +452,7 @@ int ivpu_pm_dct_disable(struct ivpu_device *vdev)
ret = ivpu_jsm_dct_disable(vdev);
if (ret) {
- ivpu_err_ratelimited(vdev, "Filed to disable DCT: %d\n", ret);
+ ivpu_err_ratelimited(vdev, "Failed to disable DCT: %d\n", ret);
return ret;
}
diff --git a/drivers/accel/ivpu/ivpu_sysfs.c b/drivers/accel/ivpu/ivpu_sysfs.c
index 913669f1786e..616477fc17fa 100644
--- a/drivers/accel/ivpu/ivpu_sysfs.c
+++ b/drivers/accel/ivpu/ivpu_sysfs.c
@@ -6,6 +6,8 @@
#include <linux/device.h>
#include <linux/err.h>
+#include "ivpu_drv.h"
+#include "ivpu_fw.h"
#include "ivpu_hw.h"
#include "ivpu_sysfs.h"
@@ -39,8 +41,30 @@ npu_busy_time_us_show(struct device *dev, struct device_attribute *attr, char *b
static DEVICE_ATTR_RO(npu_busy_time_us);
+/**
+ * DOC: sched_mode
+ *
+ * The sched_mode is used to report current NPU scheduling mode.
+ *
+ * It returns following strings:
+ * - "HW" - Hardware Scheduler mode
+ * - "OS" - Operating System Scheduler mode
+ *
+ */
+static ssize_t
+sched_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+
+ return sysfs_emit(buf, "%s\n", vdev->fw->sched_mode ? "HW" : "OS");
+}
+
+static DEVICE_ATTR_RO(sched_mode);
+
static struct attribute *ivpu_dev_attrs[] = {
&dev_attr_npu_busy_time_us.attr,
+ &dev_attr_sched_mode.attr,
NULL,
};
diff --git a/drivers/accel/ivpu/ivpu_trace.h b/drivers/accel/ivpu/ivpu_trace.h
new file mode 100644
index 000000000000..eb792038e701
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_trace.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2024 Intel Corporation
+ */
+
+#if !defined(__IVPU_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __IVPU_TRACE_H__
+
+#include <linux/tracepoint.h>
+#include "ivpu_drv.h"
+#include "ivpu_job.h"
+#include "vpu_jsm_api.h"
+#include "ivpu_jsm_msg.h"
+#include "ivpu_ipc.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vpu
+#define TRACE_INCLUDE_FILE ivpu_trace
+
+TRACE_EVENT(pm,
+ TP_PROTO(const char *event),
+ TP_ARGS(event),
+ TP_STRUCT__entry(__field(const char *, event)),
+ TP_fast_assign(__entry->event = event;),
+ TP_printk("%s", __entry->event)
+);
+
+TRACE_EVENT(job,
+ TP_PROTO(const char *event, struct ivpu_job *job),
+ TP_ARGS(event, job),
+ TP_STRUCT__entry(__field(const char *, event)
+ __field(u32, ctx_id)
+ __field(u32, engine_id)
+ __field(u32, job_id)
+ ),
+ TP_fast_assign(__entry->event = event;
+ __entry->ctx_id = job->file_priv->ctx.id;
+ __entry->engine_id = job->engine_idx;
+ __entry->job_id = job->job_id;),
+ TP_printk("%s context:%d engine:%d job:%d",
+ __entry->event,
+ __entry->ctx_id,
+ __entry->engine_id,
+ __entry->job_id)
+);
+
+TRACE_EVENT(jsm,
+ TP_PROTO(const char *event, struct vpu_jsm_msg *msg),
+ TP_ARGS(event, msg),
+ TP_STRUCT__entry(__field(const char *, event)
+ __field(const char *, type)
+ __field(enum vpu_ipc_msg_status, status)
+ __field(u32, request_id)
+ __field(u32, result)
+ ),
+ TP_fast_assign(__entry->event = event;
+ __entry->type = ivpu_jsm_msg_type_to_str(msg->type);
+ __entry->status = msg->status;
+ __entry->request_id = msg->request_id;
+ __entry->result = msg->result;),
+ TP_printk("%s type:%s, status:%#x, id:%#x, result:%#x",
+ __entry->event,
+ __entry->type,
+ __entry->status,
+ __entry->request_id,
+ __entry->result)
+);
+
+#endif /* __IVPU_TRACE_H__ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/accel/ivpu/ivpu_trace_points.c b/drivers/accel/ivpu/ivpu_trace_points.c
new file mode 100644
index 000000000000..f8fb99de0de3
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_trace_points.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2024 Intel Corporation
+ */
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "ivpu_trace.h"
+#endif
diff --git a/drivers/accel/ivpu/vpu_boot_api.h b/drivers/accel/ivpu/vpu_boot_api.h
index 82954b91b748..908e68ea1c39 100644
--- a/drivers/accel/ivpu/vpu_boot_api.h
+++ b/drivers/accel/ivpu/vpu_boot_api.h
@@ -1,14 +1,13 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright (c) 2020-2023, Intel Corporation.
+ * Copyright (c) 2020-2024, Intel Corporation.
*/
#ifndef VPU_BOOT_API_H
#define VPU_BOOT_API_H
/*
- * =========== FW API version information beginning ================
- * The bellow values will be used to construct the version info this way:
+ * The below values will be used to construct the version info this way:
* fw_bin_header->api_version[VPU_BOOT_API_VER_ID] = (VPU_BOOT_API_VER_MAJOR << 16) |
* VPU_BOOT_API_VER_MINOR;
* VPU_BOOT_API_VER_PATCH will be ignored. KMD and compatibility is not affected if this changes
@@ -27,19 +26,18 @@
* Minor version changes when API backward compatibility is preserved.
* Resets to 0 if Major version is incremented.
*/
-#define VPU_BOOT_API_VER_MINOR 24
+#define VPU_BOOT_API_VER_MINOR 26
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
*/
-#define VPU_BOOT_API_VER_PATCH 0
+#define VPU_BOOT_API_VER_PATCH 3
/*
* Index in the API version table
* Must be unique for each API
*/
#define VPU_BOOT_API_VER_INDEX 0
-/* ------------ FW API version information end ---------------------*/
#pragma pack(push, 4)
@@ -164,8 +162,6 @@ enum vpu_trace_destination {
/* VPU 30xx HW component IDs are sequential, so define first and last IDs. */
#define VPU_TRACE_PROC_BIT_30XX_FIRST VPU_TRACE_PROC_BIT_LRT
#define VPU_TRACE_PROC_BIT_30XX_LAST VPU_TRACE_PROC_BIT_SHV_15
-#define VPU_TRACE_PROC_BIT_KMB_FIRST VPU_TRACE_PROC_BIT_30XX_FIRST
-#define VPU_TRACE_PROC_BIT_KMB_LAST VPU_TRACE_PROC_BIT_30XX_LAST
struct vpu_boot_l2_cache_config {
u8 use;
@@ -199,6 +195,17 @@ struct vpu_warm_boot_section {
*/
#define POWER_PROFILE_SURVIVABILITY 0x1
+/**
+ * Enum for dvfs_mode boot param.
+ */
+enum vpu_governor {
+ VPU_GOV_DEFAULT = 0, /* Default Governor for the system */
+ VPU_GOV_MAX_PERFORMANCE = 1, /* Maximum performance governor */
+ VPU_GOV_ON_DEMAND = 2, /* On Demand frequency control governor */
+ VPU_GOV_POWER_SAVE = 3, /* Power save governor */
+ VPU_GOV_ON_DEMAND_PRIORITY_AWARE = 4 /* On Demand priority based governor */
+};
+
struct vpu_boot_params {
u32 magic;
u32 vpu_id;
@@ -301,7 +308,14 @@ struct vpu_boot_params {
u32 temp_sensor_period_ms;
/** PLL ratio for efficient clock frequency */
u32 pn_freq_pll_ratio;
- /** DVFS Mode: Default: 0, Max Performance: 1, On Demand: 2, Power Save: 3 */
+ /**
+ * DVFS Mode:
+ * 0 - Default, DVFS mode selected by the firmware
+ * 1 - Max Performance
+ * 2 - On Demand
+ * 3 - Power Save
+ * 4 - On Demand Priority Aware
+ */
u32 dvfs_mode;
/**
* Depending on DVFS Mode:
@@ -332,8 +346,8 @@ struct vpu_boot_params {
u64 d0i3_entry_vpu_ts;
/*
* The system time of the host operating system in microseconds.
- * E.g the number of microseconds since 1st of January 1970, or whatever date the
- * host operating system uses to maintain system time.
+ * E.g the number of microseconds since 1st of January 1970, or whatever
+ * date the host operating system uses to maintain system time.
* This value will be used to track system time on the VPU.
* The KMD is required to update this value on every VPU reset.
*/
@@ -382,10 +396,7 @@ struct vpu_boot_params {
u32 pad6[734];
};
-/*
- * Magic numbers set between host and vpu to detect corruptio of tracing init
- */
-
+/* Magic numbers set between host and vpu to detect corruption of tracing init */
#define VPU_TRACING_BUFFER_CANARY (0xCAFECAFE)
/* Tracing buffer message format definitions */
@@ -405,7 +416,9 @@ struct vpu_tracing_buffer_header {
u32 host_canary_start;
/* offset from start of buffer for trace entries */
u32 read_index;
- u32 pad_to_cache_line_size_0[14];
+ /* keeps track of wrapping on the reader side */
+ u32 read_wrap_count;
+ u32 pad_to_cache_line_size_0[13];
/* End of first cache line */
/**
diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h
index 33f462b1a25d..7215c144158c 100644
--- a/drivers/accel/ivpu/vpu_jsm_api.h
+++ b/drivers/accel/ivpu/vpu_jsm_api.h
@@ -22,7 +22,7 @@
/*
* Minor version changes when API backward compatibility is preserved.
*/
-#define VPU_JSM_API_VER_MINOR 16
+#define VPU_JSM_API_VER_MINOR 25
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
@@ -36,7 +36,7 @@
/*
* Number of Priority Bands for Hardware Scheduling
- * Bands: RealTime, Focus, Normal, Idle
+ * Bands: Idle(0), Normal(1), Focus(2), RealTime(3)
*/
#define VPU_HWS_NUM_PRIORITY_BANDS 4
@@ -74,6 +74,7 @@
#define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU
/* Job status returned when the job was preempted mid-inference */
#define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU
+#define VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW 0xEU
/*
* Host <-> VPU IPC channels.
@@ -86,18 +87,58 @@
/*
* Job flags bit masks.
*/
-#define VPU_JOB_FLAGS_NULL_SUBMISSION_MASK 0x00000001
-#define VPU_JOB_FLAGS_PRIVATE_DATA_MASK 0xFF000000
+enum {
+ /*
+ * Null submission mask.
+ * When set, batch buffer's commands are not processed but returned as
+ * successful immediately, except fences and timestamps.
+ * When cleared, batch buffer's commands are processed normally.
+ * Used for testing and profiling purposes.
+ */
+ VPU_JOB_FLAGS_NULL_SUBMISSION_MASK = (1 << 0U),
+ /*
+ * Inline command mask.
+ * When set, the object in job queue is an inline command (see struct vpu_inline_cmd below).
+ * When cleared, the object in job queue is a job (see struct vpu_job_queue_entry below).
+ */
+ VPU_JOB_FLAGS_INLINE_CMD_MASK = (1 << 1U),
+ /*
+ * VPU private data mask.
+ * Reserved for the VPU to store private data about the job (or inline command)
+ * while being processed.
+ */
+ VPU_JOB_FLAGS_PRIVATE_DATA_MASK = 0xFFFF0000U
+};
/*
- * Sizes of the reserved areas in jobs, in bytes.
+ * Job queue flags bit masks.
*/
-#define VPU_JOB_RESERVED_BYTES 8
+enum {
+ /*
+ * No job done notification mask.
+ * When set, indicates that no job done notification should be sent for any
+ * job from this queue. When cleared, indicates that job done notification
+ * should be sent for every job completed from this queue.
+ */
+ VPU_JOB_QUEUE_FLAGS_NO_JOB_DONE_MASK = (1 << 0U),
+ /*
+ * Native fence usage mask.
+ * When set, indicates that job queue uses native fences (as inline commands
+ * in job queue). Such queues may also use legacy fences (as commands in batch buffers).
+ * When cleared, indicates the job queue only uses legacy fences.
+ * NOTE: For queues using native fences, VPU expects that all jobs in the queue
+ * are immediately followed by an inline command object. This object is expected
+ * to be a fence signal command in most cases, but can also be a NOP in case the host
+ * does not need per-job fence signalling. Other inline commands objects can be
+ * inserted between "job and inline command" pairs.
+ */
+ VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U),
-/*
- * Sizes of the reserved areas in job queues, in bytes.
- */
-#define VPU_JOB_QUEUE_RESERVED_BYTES 52
+ /*
+ * Enable turbo mode for testing NPU performance; not recommended for regular usage.
+ */
+ VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U)
+};
/*
* Max length (including trailing NULL char) of trace entity name (e.g., the
@@ -141,23 +182,112 @@
#define VPU_HWS_INVALID_CMDQ_HANDLE 0ULL
/*
+ * Inline commands types.
+ */
+/*
+ * NOP.
+ * VPU does nothing other than consuming the inline command object.
+ */
+#define VPU_INLINE_CMD_TYPE_NOP 0x0
+/*
+ * Fence wait.
+ * VPU waits for the fence current value to reach monitored value.
+ * Fence wait operations are executed upon job dispatching. While waiting for
+ * the fence to be satisfied, VPU blocks fetching of the next objects in the queue.
+ * Jobs present in the queue prior to the fence wait object may be processed
+ * concurrently.
+ */
+#define VPU_INLINE_CMD_TYPE_FENCE_WAIT 0x1
+/*
+ * Fence signal.
+ * VPU sets the fence current value to the provided value. If new current value
+ * is equal to or higher than monitored value, VPU sends fence signalled notification
+ * to the host. Fence signal operations are executed upon completion of all the jobs
+ * present in the queue prior to them, and in-order relative to each other in the queue.
+ * But jobs in-between them may be processed concurrently and may complete out-of-order.
+ */
+#define VPU_INLINE_CMD_TYPE_FENCE_SIGNAL 0x2
+
+/*
+ * Job scheduling priority bands for both hardware scheduling and OS scheduling.
+ */
+enum vpu_job_scheduling_priority_band {
+ VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE = 0,
+ VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL = 1,
+ VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS = 2,
+ VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME = 3,
+ VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT = 4,
+};
+
+/*
* Job format.
+ * Jobs defines the actual workloads to be executed by a given engine.
*/
struct vpu_job_queue_entry {
- u64 batch_buf_addr; /**< Address of VPU commands batch buffer */
- u32 job_id; /**< Job ID */
- u32 flags; /**< Flags bit field, see VPU_JOB_FLAGS_* above */
- u64 root_page_table_addr; /**< Address of root page table to use for this job */
- u64 root_page_table_update_counter; /**< Page tables update events counter */
- u64 primary_preempt_buf_addr;
+ /**< Address of VPU commands batch buffer */
+ u64 batch_buf_addr;
+ /**< Job ID */
+ u32 job_id;
+ /**< Flags bit field, see VPU_JOB_FLAGS_* above */
+ u32 flags;
+ /**
+ * Doorbell ring timestamp taken by KMD from SoC's global system clock, in
+ * microseconds. NPU can convert this value to its own fixed clock's timebase,
+ * to match other profiling timestamps.
+ */
+ u64 doorbell_timestamp;
+ /**< Extra id for job tracking, used only in the firmware perf traces */
+ u64 host_tracking_id;
/**< Address of the primary preemption buffer to use for this job */
- u32 primary_preempt_buf_size;
+ u64 primary_preempt_buf_addr;
/**< Size of the primary preemption buffer to use for this job */
- u32 secondary_preempt_buf_size;
+ u32 primary_preempt_buf_size;
/**< Size of secondary preemption buffer to use for this job */
- u64 secondary_preempt_buf_addr;
+ u32 secondary_preempt_buf_size;
/**< Address of secondary preemption buffer to use for this job */
- u8 reserved_0[VPU_JOB_RESERVED_BYTES];
+ u64 secondary_preempt_buf_addr;
+ u64 reserved_0;
+};
+
+/*
+ * Inline command format.
+ * Inline commands are the commands executed at scheduler level (typically,
+ * synchronization directives). Inline command and job objects must be of
+ * the same size and have flags field at same offset.
+ */
+struct vpu_inline_cmd {
+ u64 reserved_0;
+ /* Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */
+ u32 type;
+ /* Flags bit field, see VPU_JOB_FLAGS_* above. */
+ u32 flags;
+ /* Inline command payload. Depends on inline command type. */
+ union {
+ /* Fence (wait and signal) commands' payload. */
+ struct {
+ /* Fence object handle. */
+ u64 fence_handle;
+ /* User VA of the current fence value. */
+ u64 current_value_va;
+ /* User VA of the monitored fence value (read-only). */
+ u64 monitored_value_va;
+ /* Value to wait for or write in fence location. */
+ u64 value;
+ /* User VA of the log buffer in which to add log entry on completion. */
+ u64 log_buffer_va;
+ } fence;
+ /* Other commands do not have a payload. */
+ /* Payload definition for future inline commands can be inserted here. */
+ u64 reserved_1[6];
+ } payload;
+};
+
+/*
+ * Job queue slots can be populated either with job objects or inline command objects.
+ */
+union vpu_jobq_slot {
+ struct vpu_job_queue_entry job;
+ struct vpu_inline_cmd inline_cmd;
};
/*
@@ -167,7 +297,21 @@ struct vpu_job_queue_header {
u32 engine_idx;
u32 head;
u32 tail;
- u8 reserved_0[VPU_JOB_QUEUE_RESERVED_BYTES];
+ u32 flags;
+ /* Set to 1 to indicate priority_band field is valid */
+ u32 priority_band_valid;
+ /*
+ * Priority for the work of this job queue, valid only if the HWS is NOT used
+ * and the `priority_band_valid` is set to 1. It is applied only during
+ * the VPU_JSM_MSG_REGISTER_DB message processing.
+ * The device firmware might use the `priority_band` to optimize the power
+ * management logic, but it will not affect the order of jobs.
+ * Available priority bands: @see enum vpu_job_scheduling_priority_band
+ */
+ u32 priority_band;
+ /* Inside realtime band assigns a further priority, limited to 0..31 range */
+ u32 realtime_priority_level;
+ u32 reserved_0[9];
};
/*
@@ -175,7 +319,7 @@ struct vpu_job_queue_header {
*/
struct vpu_job_queue {
struct vpu_job_queue_header header;
- struct vpu_job_queue_entry job[];
+ union vpu_jobq_slot slot[];
};
/**
@@ -197,9 +341,7 @@ enum vpu_trace_entity_type {
struct vpu_hws_log_buffer_header {
/* Written by VPU after adding a log entry. Initialised by host to 0. */
u32 first_free_entry_index;
- /* Incremented by VPU every time the VPU overwrites the 0th entry;
- * initialised by host to 0.
- */
+ /* Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */
u32 wraparound_count;
/*
* This is the number of buffers that can be stored in the log buffer provided by the host.
@@ -230,14 +372,80 @@ struct vpu_hws_log_buffer_entry {
u64 operation_data[2];
};
+/* Native fence log buffer types. */
+enum vpu_hws_native_fence_log_type {
+ VPU_HWS_NATIVE_FENCE_LOG_TYPE_WAITS = 1,
+ VPU_HWS_NATIVE_FENCE_LOG_TYPE_SIGNALS = 2
+};
+
+/* HWS native fence log buffer header. */
+struct vpu_hws_native_fence_log_header {
+ union {
+ struct {
+ /* Index of the first free entry in buffer. */
+ u32 first_free_entry_idx;
+ /* Incremented each time NPU wraps around the buffer to write next entry. */
+ u32 wraparound_count;
+ };
+ /* Field allowing atomic update of both fields above. */
+ u64 atomic_wraparound_and_entry_idx;
+ };
+ /* Log buffer type, see enum vpu_hws_native_fence_log_type. */
+ u64 type;
+ /* Allocated number of entries in the log buffer. */
+ u64 entry_nb;
+ u64 reserved[2];
+};
+
+/* Native fence log operation types. */
+enum vpu_hws_native_fence_log_op {
+ VPU_HWS_NATIVE_FENCE_LOG_OP_SIGNAL_EXECUTED = 0,
+ VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED = 1
+};
+
+/* HWS native fence log entry. */
+struct vpu_hws_native_fence_log_entry {
+ /* Newly signaled/unblocked fence value. */
+ u64 fence_value;
+ /* Native fence object handle to which this operation belongs. */
+ u64 fence_handle;
+ /* Operation type, see enum vpu_hws_native_fence_log_op. */
+ u64 op_type;
+ u64 reserved_0;
+ /*
+ * VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED only: Timestamp at which fence
+ * wait was started (in NPU SysTime).
+ */
+ u64 fence_wait_start_ts;
+ u64 reserved_1;
+ /* Timestamp at which fence operation was completed (in NPU SysTime). */
+ u64 fence_end_ts;
+};
+
+/* Native fence log buffer. */
+struct vpu_hws_native_fence_log_buffer {
+ struct vpu_hws_native_fence_log_header header;
+ struct vpu_hws_native_fence_log_entry entry[];
+};
+
/*
* Host <-> VPU IPC messages types.
*/
enum vpu_ipc_msg_type {
VPU_JSM_MSG_UNKNOWN = 0xFFFFFFFF,
+
/* IPC Host -> Device, Async commands */
VPU_JSM_MSG_ASYNC_CMD = 0x1100,
VPU_JSM_MSG_ENGINE_RESET = VPU_JSM_MSG_ASYNC_CMD,
+ /**
+ * Preempt engine. The NPU stops (preempts) all the jobs currently
+ * executing on the target engine making the engine become idle and ready to
+ * execute new jobs.
+ * NOTE: The NPU does not remove unstarted jobs (if any) from job queues of
+ * the target engine, but it stops processing them (until the queue doorbell
+ * is rung again); the host is responsible to reset the job queue, either
+ * after preemption or when resubmitting jobs to the queue.
+ */
VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101,
VPU_JSM_MSG_REGISTER_DB = 0x1102,
VPU_JSM_MSG_UNREGISTER_DB = 0x1103,
@@ -323,9 +531,10 @@ enum vpu_ipc_msg_type {
* NOTE: Please introduce new ASYNC commands before this one. *
*/
VPU_JSM_MSG_STATE_DUMP = 0x11FF,
+
/* IPC Host -> Device, General commands */
VPU_JSM_MSG_GENERAL_CMD = 0x1200,
- VPU_JSM_MSG_BLOB_DEINIT = VPU_JSM_MSG_GENERAL_CMD,
+ VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED = VPU_JSM_MSG_GENERAL_CMD,
/**
* Control dyndbg behavior by executing a dyndbg command; equivalent to
* Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`.
@@ -335,8 +544,12 @@ enum vpu_ipc_msg_type {
* Perform the save procedure for the D0i3 entry
*/
VPU_JSM_MSG_PWR_D0I3_ENTER = 0x1202,
+
/* IPC Device -> Host, Job completion */
VPU_JSM_MSG_JOB_DONE = 0x2100,
+ /* IPC Device -> Host, Fence signalled */
+ VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED = 0x2101,
+
/* IPC Device -> Host, Async command completion */
VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200,
VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE,
@@ -422,6 +635,7 @@ enum vpu_ipc_msg_type {
* NOTE: Please introduce new ASYNC responses before this one. *
*/
VPU_JSM_MSG_STATE_DUMP_RSP = 0x22FF,
+
/* IPC Device -> Host, General command completion */
VPU_JSM_MSG_GENERAL_CMD_DONE = 0x2300,
VPU_JSM_MSG_BLOB_DEINIT_DONE = VPU_JSM_MSG_GENERAL_CMD_DONE,
@@ -600,11 +814,6 @@ struct vpu_jsm_metric_streamer_update {
u64 next_buffer_size;
};
-struct vpu_ipc_msg_payload_blob_deinit {
- /* 64-bit unique ID for the blob to be de-initialized. */
- u64 blob_id;
-};
-
struct vpu_ipc_msg_payload_job_done {
/* Engine to which the job was submitted. */
u32 engine_idx;
@@ -622,6 +831,21 @@ struct vpu_ipc_msg_payload_job_done {
u64 cmdq_id;
};
+/*
+ * Notification message upon native fence signalling.
+ * @see VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED
+ */
+struct vpu_ipc_msg_payload_native_fence_signalled {
+ /* Engine ID. */
+ u32 engine_idx;
+ /* Host SSID. */
+ u32 host_ssid;
+ /* CMDQ ID */
+ u64 cmdq_id;
+ /* Fence object handle. */
+ u64 fence_handle;
+};
+
struct vpu_jsm_engine_reset_context {
/* Host SSID */
u32 host_ssid;
@@ -700,11 +924,6 @@ struct vpu_ipc_msg_payload_get_power_level_count_done {
u8 power_limit[16];
};
-struct vpu_ipc_msg_payload_blob_deinit_done {
- /* 64-bit unique ID for the blob de-initialized. */
- u64 blob_id;
-};
-
/* HWS priority band setup request / response */
struct vpu_ipc_msg_payload_hws_priority_band_setup {
/*
@@ -794,7 +1013,10 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
u32 reserved_0;
/* Command queue id */
u64 cmdq_id;
- /* Priority band to assign to work of this context */
+ /*
+ * Priority band to assign to work of this context.
+ * Available priority bands: @see enum vpu_job_scheduling_priority_band
+ */
u32 priority_band;
/* Inside realtime band assigns a further priority */
u32 realtime_priority_level;
@@ -869,9 +1091,7 @@ struct vpu_ipc_msg_payload_hws_set_scheduling_log {
*/
u64 notify_index;
/*
- * Enable extra events to be output to log for debug of scheduling algorithm.
- * Interpreted by VPU as a boolean to enable or disable, expected values are
- * 0 and 1.
+ * Field is now deprecated, will be removed when KMD is updated to support removal
*/
u32 enable_extra_events;
/* Zero Padding */
@@ -1243,10 +1463,10 @@ union vpu_ipc_msg_payload {
struct vpu_jsm_metric_streamer_start metric_streamer_start;
struct vpu_jsm_metric_streamer_stop metric_streamer_stop;
struct vpu_jsm_metric_streamer_update metric_streamer_update;
- struct vpu_ipc_msg_payload_blob_deinit blob_deinit;
struct vpu_ipc_msg_payload_ssid_release ssid_release;
struct vpu_jsm_hws_register_db hws_register_db;
struct vpu_ipc_msg_payload_job_done job_done;
+ struct vpu_ipc_msg_payload_native_fence_signalled native_fence_signalled;
struct vpu_ipc_msg_payload_engine_reset_done engine_reset_done;
struct vpu_ipc_msg_payload_engine_preempt_done engine_preempt_done;
struct vpu_ipc_msg_payload_register_db_done register_db_done;
@@ -1254,7 +1474,6 @@ union vpu_ipc_msg_payload {
struct vpu_ipc_msg_payload_query_engine_hb_done query_engine_hb_done;
struct vpu_ipc_msg_payload_get_power_level_count_done get_power_level_count_done;
struct vpu_jsm_metric_streamer_done metric_streamer_done;
- struct vpu_ipc_msg_payload_blob_deinit_done blob_deinit_done;
struct vpu_ipc_msg_payload_trace_config trace_config;
struct vpu_ipc_msg_payload_trace_capability_rsp trace_capability;
struct vpu_ipc_msg_payload_trace_get_name trace_get_name;
diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c
index ada9b1eb0787..8ab82e78dd94 100644
--- a/drivers/accel/qaic/mhi_controller.c
+++ b/drivers/accel/qaic/mhi_controller.c
@@ -405,6 +405,38 @@ static const struct mhi_channel_config aic100_channels[] = {
.auto_queue = false,
.wake_capable = false,
},
+ {
+ .name = "IPCR",
+ .num = 24,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .wake_capable = false,
+ },
+ {
+ .name = "IPCR",
+ .num = 25,
+ .num_elements = 32,
+ .local_elements = 0,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = MHI_CH_EE_AMSS,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ .wake_capable = false,
+ },
};
static struct mhi_event_config aic100_events[] = {
diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
index 9e8a8cbadf6b..d8bdab69f800 100644
--- a/drivers/accel/qaic/qaic_control.c
+++ b/drivers/accel/qaic/qaic_control.c
@@ -496,7 +496,7 @@ static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wr
nents = sgt->nents;
nents_dma = nents;
*size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
- for_each_sgtable_sg(sgt, sg, i) {
+ for_each_sgtable_dma_sg(sgt, sg, i) {
*size -= sizeof(*asp);
/* Save 1K for possible follow-up transactions. */
if (*size < SZ_1K) {
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index e86e71c1cdd8..c20eb63750f5 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -184,7 +184,7 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
nents = 0;
size = size ? size : PAGE_SIZE;
- for (sg = sgt_in->sgl; sg; sg = sg_next(sg)) {
+ for_each_sgtable_dma_sg(sgt_in, sg, j) {
len = sg_dma_len(sg);
if (!len)
@@ -221,7 +221,7 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
/* copy relevant sg node and fix page and length */
sgn = sgf;
- for_each_sgtable_sg(sgt, sg, j) {
+ for_each_sgtable_dma_sg(sgt, sg, j) {
memcpy(sg, sgn, sizeof(*sg));
if (sgn == sgf) {
sg_dma_address(sg) += offf;
@@ -301,7 +301,7 @@ static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice,
* fence.
*/
dev_addr = req->dev_addr;
- for_each_sgtable_sg(slice->sgt, sg, i) {
+ for_each_sgtable_dma_sg(slice->sgt, sg, i) {
slice->reqs[i].cmd = cmd;
slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
sg_dma_address(sg) : dev_addr);
diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c
index 20b653d99e52..ba0cf2f94732 100644
--- a/drivers/accel/qaic/qaic_debugfs.c
+++ b/drivers/accel/qaic/qaic_debugfs.c
@@ -64,20 +64,9 @@ static int bootlog_show(struct seq_file *s, void *unused)
return 0;
}
-static int bootlog_fops_open(struct inode *inode, struct file *file)
-{
- return single_open(file, bootlog_show, inode->i_private);
-}
-
-static const struct file_operations bootlog_fops = {
- .owner = THIS_MODULE,
- .open = bootlog_fops_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(bootlog);
-static int read_dbc_fifo_size(struct seq_file *s, void *unused)
+static int fifo_size_show(struct seq_file *s, void *unused)
{
struct dma_bridge_chan *dbc = s->private;
@@ -85,20 +74,9 @@ static int read_dbc_fifo_size(struct seq_file *s, void *unused)
return 0;
}
-static int fifo_size_open(struct inode *inode, struct file *file)
-{
- return single_open(file, read_dbc_fifo_size, inode->i_private);
-}
-
-static const struct file_operations fifo_size_fops = {
- .owner = THIS_MODULE,
- .open = fifo_size_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(fifo_size);
-static int read_dbc_queued(struct seq_file *s, void *unused)
+static int queued_show(struct seq_file *s, void *unused)
{
struct dma_bridge_chan *dbc = s->private;
u32 tail = 0, head = 0;
@@ -115,18 +93,7 @@ static int read_dbc_queued(struct seq_file *s, void *unused)
return 0;
}
-static int queued_open(struct inode *inode, struct file *file)
-{
- return single_open(file, read_dbc_queued, inode->i_private);
-}
-
-static const struct file_operations queued_fops = {
- .owner = THIS_MODULE,
- .open = queued_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(queued);
void qaic_debugfs_init(struct qaic_drm_device *qddev)
{
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index bf10156c334e..81819b9ef8d4 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -32,8 +32,9 @@
#include "qaic_timesync.h"
#include "sahara.h"
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
+#define PCI_DEV_AIC080 0xa080
#define PCI_DEV_AIC100 0xa100
#define QAIC_NAME "qaic"
#define QAIC_DESC "Qualcomm Cloud AI Accelerators"
@@ -53,12 +54,12 @@ static void qaicm_wq_release(struct drm_device *dev, void *res)
destroy_workqueue(wq);
}
-static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *fmt)
+static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *name)
{
struct workqueue_struct *wq;
int ret;
- wq = alloc_workqueue(fmt, WQ_UNBOUND, 0);
+ wq = alloc_workqueue("%s", WQ_UNBOUND, 0, name);
if (!wq)
return ERR_PTR(-ENOMEM);
ret = drmm_add_action_or_reset(dev, qaicm_wq_release, wq);
@@ -207,7 +208,6 @@ static const struct drm_driver qaic_accel_driver = {
.name = QAIC_NAME,
.desc = QAIC_DESC,
- .date = "20190618",
.fops = &qaic_accel_fops,
.open = qaic_open,
@@ -365,7 +365,7 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
return NULL;
qdev->dev_state = QAIC_OFFLINE;
- if (id->device == PCI_DEV_AIC100) {
+ if (id->device == PCI_DEV_AIC080 || id->device == PCI_DEV_AIC100) {
qdev->num_dbc = 16;
qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
if (!qdev->dbc)
@@ -607,6 +607,7 @@ static struct mhi_driver qaic_mhi_driver = {
};
static const struct pci_device_id qaic_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC080), },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC100), },
{ }
};
diff --git a/drivers/accel/qaic/sahara.c b/drivers/accel/qaic/sahara.c
index bf94bbab6be5..21d58aed0deb 100644
--- a/drivers/accel/qaic/sahara.c
+++ b/drivers/accel/qaic/sahara.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+#include <linux/devcoredump.h>
#include <linux/firmware.h>
#include <linux/limits.h>
#include <linux/mhi.h>
@@ -9,6 +10,7 @@
#include <linux/mod_devicetable.h>
#include <linux/overflow.h>
#include <linux/types.h>
+#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include "sahara.h"
@@ -36,12 +38,14 @@
#define SAHARA_PACKET_MAX_SIZE 0xffffU /* MHI_MAX_MTU */
#define SAHARA_TRANSFER_MAX_SIZE 0x80000
+#define SAHARA_READ_MAX_SIZE 0xfff0U /* Avoid unaligned requests */
#define SAHARA_NUM_TX_BUF DIV_ROUND_UP(SAHARA_TRANSFER_MAX_SIZE,\
SAHARA_PACKET_MAX_SIZE)
#define SAHARA_IMAGE_ID_NONE U32_MAX
#define SAHARA_VERSION 2
#define SAHARA_SUCCESS 0
+#define SAHARA_TABLE_ENTRY_STR_LEN 20
#define SAHARA_MODE_IMAGE_TX_PENDING 0x0
#define SAHARA_MODE_IMAGE_TX_COMPLETE 0x1
@@ -53,6 +57,8 @@
#define SAHARA_END_OF_IMAGE_LENGTH 0x10
#define SAHARA_DONE_LENGTH 0x8
#define SAHARA_RESET_LENGTH 0x8
+#define SAHARA_MEM_DEBUG64_LENGTH 0x18
+#define SAHARA_MEM_READ64_LENGTH 0x18
struct sahara_packet {
__le32 cmd;
@@ -80,18 +86,95 @@ struct sahara_packet {
__le32 image;
__le32 status;
} end_of_image;
+ struct {
+ __le64 table_address;
+ __le64 table_length;
+ } memory_debug64;
+ struct {
+ __le64 memory_address;
+ __le64 memory_length;
+ } memory_read64;
};
};
+struct sahara_debug_table_entry64 {
+ __le64 type;
+ __le64 address;
+ __le64 length;
+ char description[SAHARA_TABLE_ENTRY_STR_LEN];
+ char filename[SAHARA_TABLE_ENTRY_STR_LEN];
+};
+
+struct sahara_dump_table_entry {
+ u64 type;
+ u64 address;
+ u64 length;
+ char description[SAHARA_TABLE_ENTRY_STR_LEN];
+ char filename[SAHARA_TABLE_ENTRY_STR_LEN];
+};
+
+#define SAHARA_DUMP_V1_MAGIC 0x1234567890abcdef
+#define SAHARA_DUMP_V1_VER 1
+struct sahara_memory_dump_meta_v1 {
+ u64 magic;
+ u64 version;
+ u64 dump_size;
+ u64 table_size;
+};
+
+/*
+ * Layout of crashdump provided to user via devcoredump
+ * +------------------------------------------+
+ * | Crashdump Meta structure |
+ * | type: struct sahara_memory_dump_meta_v1 |
+ * +------------------------------------------+
+ * | Crashdump Table |
+ * | type: array of struct |
+ * | sahara_dump_table_entry |
+ * | |
+ * | |
+ * +------------------------------------------+
+ * | Crashdump |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +------------------------------------------+
+ *
+ * First is the metadata header. Userspace can use the magic number to verify
+ * the content type, and then check the version for the rest of the format.
+ * New versions should keep the magic number location/value, and version
+ * location, but increment the version value.
+ *
+ * For v1, the metadata lists the size of the entire dump (header + table +
+ * dump) and the size of the table. Then the dump image table, which describes
+ * the contents of the dump. Finally all the images are listed in order, with
+ * no deadspace in between. Userspace can use the sizes listed in the image
+ * table to reconstruct the individual images.
+ */
+
struct sahara_context {
struct sahara_packet *tx[SAHARA_NUM_TX_BUF];
struct sahara_packet *rx;
- struct work_struct work;
+ struct work_struct fw_work;
+ struct work_struct dump_work;
struct mhi_device *mhi_dev;
const char **image_table;
u32 table_size;
u32 active_image_id;
const struct firmware *firmware;
+ u64 dump_table_address;
+ u64 dump_table_length;
+ size_t rx_size;
+ size_t rx_size_requested;
+ void *mem_dump;
+ size_t mem_dump_sz;
+ struct sahara_dump_table_entry *dump_image;
+ u64 dump_image_offset;
+ void *mem_dump_freespace;
+ u64 dump_images_left;
+ bool is_mem_dump_mode;
};
static const char *aic100_image_table[] = {
@@ -153,6 +236,8 @@ static void sahara_send_reset(struct sahara_context *context)
{
int ret;
+ context->is_mem_dump_mode = false;
+
context->tx[0]->cmd = cpu_to_le32(SAHARA_RESET_CMD);
context->tx[0]->length = cpu_to_le32(SAHARA_RESET_LENGTH);
@@ -186,7 +271,8 @@ static void sahara_hello(struct sahara_context *context)
}
if (le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_PENDING &&
- le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_COMPLETE) {
+ le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_COMPLETE &&
+ le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_MEMORY_DEBUG) {
dev_err(&context->mhi_dev->dev, "Unsupported hello packet - mode %d\n",
le32_to_cpu(context->rx->hello.mode));
return;
@@ -320,9 +406,70 @@ static void sahara_end_of_image(struct sahara_context *context)
dev_dbg(&context->mhi_dev->dev, "Unable to send done response %d\n", ret);
}
+static void sahara_memory_debug64(struct sahara_context *context)
+{
+ int ret;
+
+ dev_dbg(&context->mhi_dev->dev,
+ "MEMORY DEBUG64 cmd received. length:%d table_address:%#llx table_length:%#llx\n",
+ le32_to_cpu(context->rx->length),
+ le64_to_cpu(context->rx->memory_debug64.table_address),
+ le64_to_cpu(context->rx->memory_debug64.table_length));
+
+ if (le32_to_cpu(context->rx->length) != SAHARA_MEM_DEBUG64_LENGTH) {
+ dev_err(&context->mhi_dev->dev, "Malformed memory debug64 packet - length %d\n",
+ le32_to_cpu(context->rx->length));
+ return;
+ }
+
+ context->dump_table_address = le64_to_cpu(context->rx->memory_debug64.table_address);
+ context->dump_table_length = le64_to_cpu(context->rx->memory_debug64.table_length);
+
+ if (context->dump_table_length % sizeof(struct sahara_debug_table_entry64) != 0 ||
+ !context->dump_table_length) {
+ dev_err(&context->mhi_dev->dev, "Malformed memory debug64 packet - table length %lld\n",
+ context->dump_table_length);
+ return;
+ }
+
+ /*
+ * From this point, the protocol flips. We make memory_read requests to
+ * the device, and the device responds with the raw data. If the device
+ * has an error, it will send an End of Image command. First we need to
+ * request the memory dump table so that we know where all the pieces
+ * of the dump are that we can consume.
+ */
+
+ context->is_mem_dump_mode = true;
+
+ /*
+ * Assume that the table is smaller than our MTU so that we can read it
+ * in one shot. The spec does not put an upper limit on the table, but
+ * no known device will exceed this.
+ */
+ if (context->dump_table_length > SAHARA_PACKET_MAX_SIZE) {
+ dev_err(&context->mhi_dev->dev, "Memory dump table length %lld exceeds supported size. Discarding dump\n",
+ context->dump_table_length);
+ sahara_send_reset(context);
+ return;
+ }
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
+ context->tx[0]->memory_read64.memory_address = cpu_to_le64(context->dump_table_address);
+ context->tx[0]->memory_read64.memory_length = cpu_to_le64(context->dump_table_length);
+
+ context->rx_size_requested = context->dump_table_length;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_MEM_READ64_LENGTH, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to send read for dump table %d\n", ret);
+}
+
static void sahara_processing(struct work_struct *work)
{
- struct sahara_context *context = container_of(work, struct sahara_context, work);
+ struct sahara_context *context = container_of(work, struct sahara_context, fw_work);
int ret;
switch (le32_to_cpu(context->rx->cmd)) {
@@ -338,6 +485,12 @@ static void sahara_processing(struct work_struct *work)
case SAHARA_DONE_RESP_CMD:
/* Intentional do nothing as we don't need to exit an app */
break;
+ case SAHARA_RESET_RESP_CMD:
+ /* Intentional do nothing as we don't need to exit an app */
+ break;
+ case SAHARA_MEM_DEBUG64_CMD:
+ sahara_memory_debug64(context);
+ break;
default:
dev_err(&context->mhi_dev->dev, "Unknown command %d\n",
le32_to_cpu(context->rx->cmd));
@@ -350,6 +503,217 @@ static void sahara_processing(struct work_struct *work)
dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret);
}
+static void sahara_parse_dump_table(struct sahara_context *context)
+{
+ struct sahara_dump_table_entry *image_out_table;
+ struct sahara_debug_table_entry64 *dev_table;
+ struct sahara_memory_dump_meta_v1 *dump_meta;
+ u64 table_nents;
+ u64 dump_length;
+ int ret;
+ u64 i;
+
+ table_nents = context->dump_table_length / sizeof(*dev_table);
+ context->dump_images_left = table_nents;
+ dump_length = 0;
+
+ dev_table = (struct sahara_debug_table_entry64 *)(context->rx);
+ for (i = 0; i < table_nents; ++i) {
+ /* Do not trust the device, ensure the strings are terminated */
+ dev_table[i].description[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
+ dev_table[i].filename[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
+
+ dump_length = size_add(dump_length, le64_to_cpu(dev_table[i].length));
+ if (dump_length == SIZE_MAX) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+
+ dev_dbg(&context->mhi_dev->dev,
+ "Memory dump table entry %lld type: %lld address: %#llx length: %#llx description: \"%s\" filename \"%s\"\n",
+ i,
+ le64_to_cpu(dev_table[i].type),
+ le64_to_cpu(dev_table[i].address),
+ le64_to_cpu(dev_table[i].length),
+ dev_table[i].description,
+ dev_table[i].filename);
+ }
+
+ dump_length = size_add(dump_length, sizeof(*dump_meta));
+ if (dump_length == SIZE_MAX) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+ dump_length = size_add(dump_length, size_mul(sizeof(*image_out_table), table_nents));
+ if (dump_length == SIZE_MAX) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+
+ context->mem_dump_sz = dump_length;
+ context->mem_dump = vzalloc(dump_length);
+ if (!context->mem_dump) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+
+ /* Populate the dump metadata and table for userspace */
+ dump_meta = context->mem_dump;
+ dump_meta->magic = SAHARA_DUMP_V1_MAGIC;
+ dump_meta->version = SAHARA_DUMP_V1_VER;
+ dump_meta->dump_size = dump_length;
+ dump_meta->table_size = context->dump_table_length;
+
+ image_out_table = context->mem_dump + sizeof(*dump_meta);
+ for (i = 0; i < table_nents; ++i) {
+ image_out_table[i].type = le64_to_cpu(dev_table[i].type);
+ image_out_table[i].address = le64_to_cpu(dev_table[i].address);
+ image_out_table[i].length = le64_to_cpu(dev_table[i].length);
+ strscpy(image_out_table[i].description, dev_table[i].description,
+ SAHARA_TABLE_ENTRY_STR_LEN);
+ strscpy(image_out_table[i].filename,
+ dev_table[i].filename,
+ SAHARA_TABLE_ENTRY_STR_LEN);
+ }
+
+ context->mem_dump_freespace = &image_out_table[i];
+
+ /* Done parsing the table, switch to image dump mode */
+ context->dump_table_length = 0;
+
+ /* Request the first chunk of the first image */
+ context->dump_image = &image_out_table[0];
+ dump_length = min(context->dump_image->length, SAHARA_READ_MAX_SIZE);
+ /* Avoid requesting EOI sized data so that we can identify errors */
+ if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
+ dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
+
+ context->dump_image_offset = dump_length;
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
+ context->tx[0]->memory_read64.memory_address = cpu_to_le64(context->dump_image->address);
+ context->tx[0]->memory_read64.memory_length = cpu_to_le64(dump_length);
+
+ context->rx_size_requested = dump_length;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_MEM_READ64_LENGTH, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to send read for dump content %d\n", ret);
+}
+
+static void sahara_parse_dump_image(struct sahara_context *context)
+{
+ u64 dump_length;
+ int ret;
+
+ memcpy(context->mem_dump_freespace, context->rx, context->rx_size);
+ context->mem_dump_freespace += context->rx_size;
+
+ if (context->dump_image_offset >= context->dump_image->length) {
+ /* Need to move to next image */
+ context->dump_image++;
+ context->dump_images_left--;
+ context->dump_image_offset = 0;
+
+ if (!context->dump_images_left) {
+ /* Dump done */
+ dev_coredumpv(context->mhi_dev->mhi_cntrl->cntrl_dev,
+ context->mem_dump,
+ context->mem_dump_sz,
+ GFP_KERNEL);
+ context->mem_dump = NULL;
+ sahara_send_reset(context);
+ return;
+ }
+ }
+
+ /* Get next image chunk */
+ dump_length = context->dump_image->length - context->dump_image_offset;
+ dump_length = min(dump_length, SAHARA_READ_MAX_SIZE);
+ /* Avoid requesting EOI sized data so that we can identify errors */
+ if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
+ dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
+
+ context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
+ context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
+ context->tx[0]->memory_read64.memory_address =
+ cpu_to_le64(context->dump_image->address + context->dump_image_offset);
+ context->tx[0]->memory_read64.memory_length = cpu_to_le64(dump_length);
+
+ context->dump_image_offset += dump_length;
+ context->rx_size_requested = dump_length;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
+ SAHARA_MEM_READ64_LENGTH, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev,
+ "Unable to send read for dump content %d\n", ret);
+}
+
+static void sahara_dump_processing(struct work_struct *work)
+{
+ struct sahara_context *context = container_of(work, struct sahara_context, dump_work);
+ int ret;
+
+ /*
+ * We should get the expected raw data, but if the device has an error
+ * it is supposed to send EOI with an error code.
+ */
+ if (context->rx_size != context->rx_size_requested &&
+ context->rx_size != SAHARA_END_OF_IMAGE_LENGTH) {
+ dev_err(&context->mhi_dev->dev,
+ "Unexpected response to read_data. Expected size: %#zx got: %#zx\n",
+ context->rx_size_requested,
+ context->rx_size);
+ goto error;
+ }
+
+ if (context->rx_size == SAHARA_END_OF_IMAGE_LENGTH &&
+ le32_to_cpu(context->rx->cmd) == SAHARA_END_OF_IMAGE_CMD) {
+ dev_err(&context->mhi_dev->dev,
+ "Unexpected EOI response to read_data. Status: %d\n",
+ le32_to_cpu(context->rx->end_of_image.status));
+ goto error;
+ }
+
+ if (context->rx_size == SAHARA_END_OF_IMAGE_LENGTH &&
+ le32_to_cpu(context->rx->cmd) != SAHARA_END_OF_IMAGE_CMD) {
+ dev_err(&context->mhi_dev->dev,
+ "Invalid EOI response to read_data. CMD: %d\n",
+ le32_to_cpu(context->rx->cmd));
+ goto error;
+ }
+
+ /*
+ * Need to know if we received the dump table, or part of a dump image.
+ * Since we get raw data, we cannot tell from the data itself. Instead,
+ * we use the stored dump_table_length, which we zero after we read and
+ * process the entire table.
+ */
+ if (context->dump_table_length)
+ sahara_parse_dump_table(context);
+ else
+ sahara_parse_dump_image(context);
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_FROM_DEVICE, context->rx,
+ SAHARA_PACKET_MAX_SIZE, MHI_EOT);
+ if (ret)
+ dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret);
+
+ return;
+
+error:
+ vfree(context->mem_dump);
+ context->mem_dump = NULL;
+ sahara_send_reset(context);
+}
+
static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
{
struct sahara_context *context;
@@ -382,7 +746,8 @@ static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_
}
context->mhi_dev = mhi_dev;
- INIT_WORK(&context->work, sahara_processing);
+ INIT_WORK(&context->fw_work, sahara_processing);
+ INIT_WORK(&context->dump_work, sahara_dump_processing);
context->image_table = aic100_image_table;
context->table_size = ARRAY_SIZE(aic100_image_table);
context->active_image_id = SAHARA_IMAGE_ID_NONE;
@@ -405,7 +770,9 @@ static void sahara_mhi_remove(struct mhi_device *mhi_dev)
{
struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
- cancel_work_sync(&context->work);
+ cancel_work_sync(&context->fw_work);
+ cancel_work_sync(&context->dump_work);
+ vfree(context->mem_dump);
sahara_release_image(context);
mhi_unprepare_from_transfer(mhi_dev);
}
@@ -418,8 +785,14 @@ static void sahara_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result
{
struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
- if (!mhi_result->transaction_status)
- schedule_work(&context->work);
+ if (!mhi_result->transaction_status) {
+ context->rx_size = mhi_result->bytes_xferd;
+ if (context->is_mem_dump_mode)
+ schedule_work(&context->dump_work);
+ else
+ schedule_work(&context->fw_work);
+ }
+
}
static const struct mhi_device_id sahara_mhi_match_table[] = {
diff --git a/drivers/accessibility/speakup/Makefile b/drivers/accessibility/speakup/Makefile
index 6f6a83565c0d..14ba1cca87f4 100644
--- a/drivers/accessibility/speakup/Makefile
+++ b/drivers/accessibility/speakup/Makefile
@@ -40,9 +40,7 @@ hostprogs += makemapdata
makemapdata-objs := makemapdata.o
quiet_cmd_mkmap = MKMAP $@
- cmd_mkmap = TOPDIR=$(srctree) \
- SPKDIR=$(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD),$(srctree)/drivers/accessibility/speakup) \
- $(obj)/makemapdata > $@
+ cmd_mkmap = TOPDIR=$(srctree) SPKDIR=$(src) $(obj)/makemapdata > $@
$(obj)/mapdata.h: $(obj)/makemapdata
$(call cmd,mkmap)
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index d67f63d93b2a..d81b55f5068c 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -132,8 +132,17 @@ config ACPI_REV_OVERRIDE_POSSIBLE
makes it possible to force the kernel to return "5" as the supported
ACPI revision via the "acpi_rev_override" command line switch.
+config ACPI_EC
+ bool "Embedded Controller"
+ depends on HAS_IOPORT
+ default X86 || LOONGARCH
+ help
+ This driver handles communication with the microcontroller
+ on many x86/LoongArch laptops and other machines.
+
config ACPI_EC_DEBUGFS
tristate "EC read/write access through /sys/kernel/debug/ec"
+ depends on ACPI_EC
help
Say N to disable Embedded Controller /sys/kernel/debug interface
@@ -433,7 +442,7 @@ config ACPI_HOTPLUG_IOAPIC
config ACPI_SBS
tristate "Smart Battery System"
- depends on X86
+ depends on X86 && ACPI_EC
select POWER_SUPPLY
help
This driver supports the Smart Battery System, another
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 61ca4afe83dc..40208a0f5dfb 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -41,7 +41,7 @@ acpi-y += resource.o
acpi-y += acpi_processor.o
acpi-y += processor_core.o
acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o
-acpi-y += ec.o
+acpi-$(CONFIG_ACPI_EC) += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o
obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 7c5b040a83e8..1f69be8f51a2 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -290,7 +290,7 @@ static void acpi_ac_remove(struct platform_device *pdev)
static struct platform_driver acpi_ac_driver = {
.probe = acpi_ac_probe,
- .remove_new = acpi_ac_remove,
+ .remove = acpi_ac_remove,
.driver = {
.name = "ac",
.acpi_match_table = ac_device_ids,
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 800f97868448..49539f7528c6 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -86,7 +86,7 @@ static int fch_misc_setup(struct apd_private_data *pdata)
if (!clk_data->name)
return -ENOMEM;
- strcpy(clk_data->name, obj->string.pointer);
+ strscpy(clk_data->name, obj->string.pointer, obj->string.length);
} else {
/* Set default name to mclk if entry missing in firmware */
clk_data->name = "mclk";
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index ca87a0939135..f7fb7205028d 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -251,6 +251,10 @@ static int __init extlog_init(void)
}
extlog_l1_hdr = acpi_os_map_iomem(l1_dirbase, l1_hdr_size);
+ if (!extlog_l1_hdr) {
+ rc = -ENOMEM;
+ goto err_release_l1_hdr;
+ }
l1_head = (struct extlog_l1_head *)extlog_l1_hdr;
l1_size = l1_head->total_len;
l1_percpu_entry = l1_head->entries;
@@ -268,6 +272,10 @@ static int __init extlog_init(void)
goto err;
}
extlog_l1_addr = acpi_os_map_iomem(l1_dirbase, l1_size);
+ if (!extlog_l1_addr) {
+ rc = -ENOMEM;
+ goto err_release_l1_dir;
+ }
l1_entry_base = (u64 *)((u8 *)extlog_l1_addr + l1_hdr_size);
/* remap elog table */
@@ -279,6 +287,10 @@ static int __init extlog_init(void)
goto err_release_l1_dir;
}
elog_addr = acpi_os_map_iomem(elog_base, elog_size);
+ if (!elog_addr) {
+ rc = -ENOMEM;
+ goto err_release_elog;
+ }
rc = -ENOMEM;
/* allocate buffer to save elog record */
@@ -300,6 +312,8 @@ err_release_l1_dir:
if (extlog_l1_addr)
acpi_os_unmap_iomem(extlog_l1_addr, l1_size);
release_mem_region(l1_dirbase, l1_size);
+err_release_l1_hdr:
+ release_mem_region(l1_dirbase, l1_hdr_size);
err:
pr_warn(FW_BUG "Extended error log disabled because of problems parsing f/w tables\n");
return rc;
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 42b7220d4cfd..3fde4496f8a2 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -19,6 +19,7 @@
#include <linux/acpi.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
+#include <asm/cpuid.h>
#include <asm/mwait.h>
#include <xen/xen.h>
@@ -46,10 +47,8 @@ static void power_saving_mwait_init(void)
if (!boot_cpu_has(X86_FEATURE_MWAIT))
return;
- if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
- return;
- cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+ cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &edx);
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
!(ecx & CPUID5_ECX_INTERRUPT_BREAK))
@@ -462,7 +461,7 @@ MODULE_DEVICE_TABLE(acpi, pad_device_ids);
static struct platform_driver acpi_pad_driver = {
.probe = acpi_pad_probe,
- .remove_new = acpi_pad_remove,
+ .remove = acpi_pad_remove,
.driver = {
.dev_groups = acpi_pad_groups,
.name = "processor_aggregator",
diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
index b831cb8e53dc..825c2a8acea4 100644
--- a/drivers/acpi/acpi_tad.c
+++ b/drivers/acpi/acpi_tad.c
@@ -684,7 +684,7 @@ static struct platform_driver acpi_tad_driver = {
.acpi_match_table = acpi_tad_ids,
},
.probe = acpi_tad_probe,
- .remove_new = acpi_tad_remove,
+ .remove = acpi_tad_remove,
};
MODULE_DEVICE_TABLE(acpi, acpi_tad_ids);
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 8274a17872ed..a972831dbd66 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -610,16 +610,28 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
return 0;
}
+/**
+ * acpi_video_device_EDID() - Get EDID from ACPI _DDC
+ * @device: video output device (LCD, CRT, ..)
+ * @edid: address for returned EDID pointer
+ * @length: _DDC length to request (must be a multiple of 128)
+ *
+ * Get EDID from ACPI _DDC. On success, a pointer to the EDID data is written
+ * to the @edid address, and the length of the EDID is returned. The caller is
+ * responsible for freeing the edid pointer.
+ *
+ * Return the length of EDID (positive value) on success or error (negative
+ * value).
+ */
static int
-acpi_video_device_EDID(struct acpi_video_device *device,
- union acpi_object **edid, int length)
+acpi_video_device_EDID(struct acpi_video_device *device, void **edid, int length)
{
- int status;
+ acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
-
+ int ret;
*edid = NULL;
@@ -636,16 +648,17 @@ acpi_video_device_EDID(struct acpi_video_device *device,
obj = buffer.pointer;
- if (obj && obj->type == ACPI_TYPE_BUFFER)
- *edid = obj;
- else {
+ if (obj && obj->type == ACPI_TYPE_BUFFER) {
+ *edid = kmemdup(obj->buffer.pointer, obj->buffer.length, GFP_KERNEL);
+ ret = *edid ? obj->buffer.length : -ENOMEM;
+ } else {
acpi_handle_debug(device->dev->handle,
"Invalid _DDC data for length %d\n", length);
- status = -EFAULT;
- kfree(obj);
+ ret = -EFAULT;
}
- return status;
+ kfree(obj);
+ return ret;
}
/* bus */
@@ -1435,9 +1448,7 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
{
struct acpi_video_bus *video;
struct acpi_video_device *video_device;
- union acpi_object *buffer = NULL;
- acpi_status status;
- int i, length;
+ int i, length, ret;
if (!device || !acpi_driver_data(device))
return -EINVAL;
@@ -1477,16 +1488,10 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
}
for (length = 512; length > 0; length -= 128) {
- status = acpi_video_device_EDID(video_device, &buffer,
- length);
- if (ACPI_SUCCESS(status))
- break;
+ ret = acpi_video_device_EDID(video_device, edid, length);
+ if (ret > 0)
+ return ret;
}
- if (!length)
- continue;
-
- *edid = buffer->buffer.pointer;
- return length;
}
return -ENODEV;
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 79bbfe00d241..b8543a34caea 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -103,8 +103,6 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
acpi_status acpi_hw_enable_all_runtime_gpes(void);
-acpi_status acpi_hw_enable_all_wakeup_gpes(void);
-
u8 acpi_hw_check_all_gpes(acpi_handle gpe_skip_device, u32 gpe_skip_number);
acpi_status
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 95f78383bbdb..bff2d099f469 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -232,8 +232,6 @@ acpi_remove_address_space_handler(acpi_handle device,
/* Now we can delete the handler object */
- acpi_os_release_mutex(handler_obj->address_space.
- context_mutex);
acpi_ut_remove_reference(handler_obj);
goto unlock_and_exit;
}
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index 5c22720f43cc..04731a5b01fa 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -880,7 +880,7 @@ static struct platform_device *einj_dev;
* triggering a section mismatch warning.
*/
static struct platform_driver einj_driver __refdata = {
- .remove_new = __exit_p(einj_remove),
+ .remove = __exit_p(einj_remove),
.driver = {
.name = "acpi-einj",
},
diff --git a/drivers/acpi/apei/einj-cxl.c b/drivers/acpi/apei/einj-cxl.c
index a4e709937236..78da9ae543a2 100644
--- a/drivers/acpi/apei/einj-cxl.c
+++ b/drivers/acpi/apei/einj-cxl.c
@@ -45,7 +45,7 @@ int einj_cxl_available_error_type_show(struct seq_file *m, void *v)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(einj_cxl_available_error_type_show, CXL);
+EXPORT_SYMBOL_NS_GPL(einj_cxl_available_error_type_show, "CXL");
static int cxl_dport_get_sbdf(struct pci_dev *dport_dev, u64 *sbdf)
{
@@ -83,7 +83,7 @@ int einj_cxl_inject_rch_error(u64 rcrb, u64 type)
return einj_cxl_rch_error_inject(type, 0x2, rcrb, GENMASK_ULL(63, 0),
0, 0);
}
-EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_rch_error, CXL);
+EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_rch_error, "CXL");
int einj_cxl_inject_error(struct pci_dev *dport, u64 type)
{
@@ -104,10 +104,10 @@ int einj_cxl_inject_error(struct pci_dev *dport, u64 type)
return einj_error_inject(type, 0x4, 0, 0, 0, param4);
}
-EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_error, CXL);
+EXPORT_SYMBOL_NS_GPL(einj_cxl_inject_error, "CXL");
bool einj_cxl_is_initialized(void)
{
return einj_initialized;
}
-EXPORT_SYMBOL_NS_GPL(einj_cxl_is_initialized, CXL);
+EXPORT_SYMBOL_NS_GPL(einj_cxl_is_initialized, "CXL");
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index ada93cfde9ba..b72772494655 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -173,8 +173,6 @@ static struct gen_pool *ghes_estatus_pool;
static struct ghes_estatus_cache __rcu *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
static atomic_t ghes_estatus_cache_alloced;
-static int ghes_panic_timeout __read_mostly = 30;
-
static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx)
{
phys_addr_t paddr;
@@ -726,7 +724,7 @@ int cxl_cper_register_work(struct work_struct *work)
cxl_cper_work = work;
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_register_work, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_cper_register_work, "CXL");
int cxl_cper_unregister_work(struct work_struct *work)
{
@@ -737,13 +735,13 @@ int cxl_cper_unregister_work(struct work_struct *work)
cxl_cper_work = NULL;
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_work, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_work, "CXL");
int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
{
return kfifo_get(&cxl_cper_fifo, wd);
}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, "CXL");
static bool ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
@@ -983,14 +981,16 @@ static void __ghes_panic(struct ghes *ghes,
struct acpi_hest_generic_status *estatus,
u64 buf_paddr, enum fixed_addresses fixmap_idx)
{
+ const char *msg = GHES_PFX "Fatal hardware error";
+
__ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
- /* reboot to log the error! */
if (!panic_timeout)
- panic_timeout = ghes_panic_timeout;
- panic("Fatal hardware error!");
+ pr_emerg("%s but panic disabled\n", msg);
+
+ panic(msg);
}
static int ghes_proc(struct ghes *ghes)
@@ -1605,7 +1605,7 @@ static struct platform_driver ghes_platform_driver = {
.name = "GHES",
},
.probe = ghes_probe,
- .remove_new = ghes_remove,
+ .remove = ghes_remove,
};
void __init acpi_ghes_init(void)
diff --git a/drivers/acpi/arm64/agdi.c b/drivers/acpi/arm64/agdi.c
index f5f21dd0d277..e0df3daa4abf 100644
--- a/drivers/acpi/arm64/agdi.c
+++ b/drivers/acpi/arm64/agdi.c
@@ -88,7 +88,7 @@ static struct platform_driver agdi_driver = {
.name = "agdi",
},
.probe = agdi_probe,
- .remove_new = agdi_remove,
+ .remove = agdi_remove,
};
void __init acpi_agdi_init(void)
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index c0e77c1c8e09..3561553eff8b 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -36,19 +36,25 @@ struct acpi_gtdt_descriptor {
static struct acpi_gtdt_descriptor acpi_gtdt_desc __initdata;
-static inline __init void *next_platform_timer(void *platform_timer)
+static __init bool platform_timer_valid(void *platform_timer)
{
struct acpi_gtdt_header *gh = platform_timer;
- platform_timer += gh->length;
- if (platform_timer < acpi_gtdt_desc.gtdt_end)
- return platform_timer;
+ return (platform_timer >= (void *)(acpi_gtdt_desc.gtdt + 1) &&
+ platform_timer < acpi_gtdt_desc.gtdt_end &&
+ gh->length != 0 &&
+ platform_timer + gh->length <= acpi_gtdt_desc.gtdt_end);
+}
+
+static __init void *next_platform_timer(void *platform_timer)
+{
+ struct acpi_gtdt_header *gh = platform_timer;
- return NULL;
+ return platform_timer + gh->length;
}
#define for_each_platform_timer(_g) \
- for (_g = acpi_gtdt_desc.platform_timer; _g; \
+ for (_g = acpi_gtdt_desc.platform_timer; platform_timer_valid(_g);\
_g = next_platform_timer(_g))
static inline bool is_timer_block(void *platform_timer)
@@ -157,6 +163,7 @@ int __init acpi_gtdt_init(struct acpi_table_header *table,
{
void *platform_timer;
struct acpi_table_gtdt *gtdt;
+ int cnt = 0;
gtdt = container_of(table, struct acpi_table_gtdt, header);
acpi_gtdt_desc.gtdt = gtdt;
@@ -176,12 +183,16 @@ int __init acpi_gtdt_init(struct acpi_table_header *table,
return 0;
}
- platform_timer = (void *)gtdt + gtdt->platform_timer_offset;
- if (platform_timer < (void *)table + sizeof(struct acpi_table_gtdt)) {
+ acpi_gtdt_desc.platform_timer = (void *)gtdt + gtdt->platform_timer_offset;
+ for_each_platform_timer(platform_timer)
+ cnt++;
+
+ if (cnt != gtdt->platform_timer_count) {
+ acpi_gtdt_desc.platform_timer = NULL;
pr_err(FW_BUG "invalid timer data.\n");
return -EINVAL;
}
- acpi_gtdt_desc.platform_timer = platform_timer;
+
if (platform_timer_count)
*platform_timer_count = gtdt->platform_timer_count;
@@ -283,7 +294,7 @@ error:
if (frame->virt_irq > 0)
acpi_unregister_gsi(gtdt_frame->virtual_timer_interrupt);
frame->virt_irq = 0;
- } while (i-- >= 0 && gtdt_frame--);
+ } while (i-- > 0 && gtdt_frame--);
return -EINVAL;
}
@@ -352,7 +363,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
}
irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
- res[2] = (struct resource)DEFINE_RES_IRQ(irq);
+ res[2] = DEFINE_RES_IRQ(irq);
if (irq <= 0) {
pr_warn("failed to map the Watchdog interrupt.\n");
nr_res--;
diff --git a/drivers/acpi/arm64/init.c b/drivers/acpi/arm64/init.c
index d0c8aed90fd1..7a47d8095a7d 100644
--- a/drivers/acpi/arm64/init.c
+++ b/drivers/acpi/arm64/init.c
@@ -2,7 +2,7 @@
#include <linux/acpi.h>
#include "init.h"
-void __init acpi_arm_init(void)
+void __init acpi_arch_init(void)
{
if (IS_ENABLED(CONFIG_ACPI_AGDI))
acpi_agdi_init();
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 4c745a26226b..98759d6199d3 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1218,6 +1218,17 @@ static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
}
+static bool iort_pci_rc_supports_canwbs(struct acpi_iort_node *node)
+{
+ struct acpi_iort_memory_access *memory_access;
+ struct acpi_iort_root_complex *pci_rc;
+
+ pci_rc = (struct acpi_iort_root_complex *)node->node_data;
+ memory_access =
+ (struct acpi_iort_memory_access *)&pci_rc->memory_properties;
+ return memory_access->memory_flags & ACPI_IORT_MF_CANWBS;
+}
+
static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
u32 streamid)
{
@@ -1335,6 +1346,8 @@ int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
fwspec = dev_iommu_fwspec_get(dev);
if (fwspec && iort_pci_rc_supports_ats(node))
fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
+ if (fwspec && iort_pci_rc_supports_canwbs(node))
+ fwspec->flags |= IOMMU_FWSPEC_PCI_RC_CANWBS;
} else {
node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
iort_match_node_callback, dev);
@@ -1703,6 +1716,8 @@ static struct acpi_platform_list pmcg_plat_info[] __initdata = {
/* HiSilicon Hip09 Platform */
{"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
+ {"HISI ", "HIP09A ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+ "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
/* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */
{"HISI ", "HIP10 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 65fa3444367a..6760330a8af5 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -717,7 +717,7 @@ static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook)
}
list_del_init(&hook->list);
- pr_info("extension unregistered: %s\n", hook->name);
+ pr_info("hook unregistered: %s\n", hook->name);
}
void battery_hook_unregister(struct acpi_battery_hook *hook)
@@ -751,18 +751,18 @@ void battery_hook_register(struct acpi_battery_hook *hook)
if (hook->add_battery(battery->bat, hook)) {
/*
* If a add-battery returns non-zero,
- * the registration of the extension has failed,
+ * the registration of the hook has failed,
* and we will not add it to the list of loaded
* hooks.
*/
- pr_err("extension failed to load: %s", hook->name);
+ pr_err("hook failed to load: %s", hook->name);
battery_hook_unregister_unlocked(hook);
goto end;
}
power_supply_changed(battery->bat);
}
- pr_info("new extension: %s\n", hook->name);
+ pr_info("new hook: %s\n", hook->name);
end:
mutex_unlock(&hook_mutex);
}
@@ -805,10 +805,10 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) {
if (hook_node->add_battery(battery->bat, hook_node)) {
/*
- * The notification of the extensions has failed, to
- * prevent further errors we will unload the extension.
+ * The notification of the hook has failed, to
+ * prevent further errors we will unload the hook.
*/
- pr_err("error in extension, unloading: %s",
+ pr_err("error in hook, unloading: %s",
hook_node->name);
battery_hook_unregister_unlocked(hook_node);
}
@@ -853,6 +853,7 @@ static int sysfs_add_battery(struct acpi_battery *battery)
struct power_supply_config psy_cfg = {
.drv_data = battery,
.attr_grp = acpi_battery_groups,
+ .no_wakeup_source = true,
};
bool full_cap_broken = false;
@@ -888,7 +889,7 @@ static int sysfs_add_battery(struct acpi_battery *battery)
battery->bat_desc.type = POWER_SUPPLY_TYPE_BATTERY;
battery->bat_desc.get_property = acpi_battery_get_property;
- battery->bat = power_supply_register_no_ws(&battery->device->dev,
+ battery->bat = power_supply_register(&battery->device->dev,
&battery->bat_desc, &psy_cfg);
if (IS_ERR(battery->bat)) {
@@ -1218,15 +1219,21 @@ static int acpi_battery_add(struct acpi_device *device)
if (device->dep_unmet)
return -EPROBE_DEFER;
- battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL);
+ battery = devm_kzalloc(&device->dev, sizeof(*battery), GFP_KERNEL);
if (!battery)
return -ENOMEM;
battery->device = device;
strscpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
strscpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
device->driver_data = battery;
- mutex_init(&battery->lock);
- mutex_init(&battery->sysfs_lock);
+ result = devm_mutex_init(&device->dev, &battery->lock);
+ if (result)
+ return result;
+
+ result = devm_mutex_init(&device->dev, &battery->sysfs_lock);
+ if (result)
+ return result;
+
if (acpi_has_method(battery->device->handle, "_BIX"))
set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
@@ -1238,7 +1245,9 @@ static int acpi_battery_add(struct acpi_device *device)
device->status.battery_present ? "present" : "absent");
battery->pm_nb.notifier_call = battery_notify;
- register_pm_notifier(&battery->pm_nb);
+ result = register_pm_notifier(&battery->pm_nb);
+ if (result)
+ goto fail;
device_init_wakeup(&device->dev, 1);
@@ -1254,9 +1263,6 @@ fail_pm:
unregister_pm_notifier(&battery->pm_nb);
fail:
sysfs_remove_battery(battery);
- mutex_destroy(&battery->lock);
- mutex_destroy(&battery->sysfs_lock);
- kfree(battery);
return result;
}
@@ -1276,13 +1282,8 @@ static void acpi_battery_remove(struct acpi_device *device)
device_init_wakeup(&device->dev, 0);
unregister_pm_notifier(&battery->pm_nb);
sysfs_remove_battery(battery);
-
- mutex_destroy(&battery->lock);
- mutex_destroy(&battery->sysfs_lock);
- kfree(battery);
}
-#ifdef CONFIG_PM_SLEEP
/* this is needed to learn about changes made in suspended state */
static int acpi_battery_resume(struct device *dev)
{
@@ -1299,11 +1300,8 @@ static int acpi_battery_resume(struct device *dev)
acpi_battery_update(battery, true);
return 0;
}
-#else
-#define acpi_battery_resume NULL
-#endif
-static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
static struct acpi_driver acpi_battery_driver = {
.name = "battery",
@@ -1313,7 +1311,7 @@ static struct acpi_driver acpi_battery_driver = {
.add = acpi_battery_add,
.remove = acpi_battery_remove,
},
- .drv.pm = &acpi_battery_pm,
+ .drv.pm = pm_sleep_ptr(&acpi_battery_pm),
.drv.probe_type = PROBE_PREFER_ASYNCHRONOUS,
};
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index d1d9c9289087..35ece8e9f15d 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -29,7 +29,7 @@ BGRT_SHOW(type, image_type);
BGRT_SHOW(xoffset, image_offset_x);
BGRT_SHOW(yoffset, image_offset_y);
-static BIN_ATTR_SIMPLE_RO(image);
+static __ro_after_init BIN_ATTR_SIMPLE_RO(image);
static struct attribute *bgrt_attributes[] = {
&bgrt_attr_version.attr,
@@ -40,14 +40,14 @@ static struct attribute *bgrt_attributes[] = {
NULL,
};
-static struct bin_attribute *bgrt_bin_attributes[] = {
+static const struct bin_attribute *const bgrt_bin_attributes[] = {
&bin_attr_image,
NULL,
};
static const struct attribute_group bgrt_attribute_group = {
.attrs = bgrt_attributes,
- .bin_attrs = bgrt_bin_attributes,
+ .bin_attrs_new = bgrt_bin_attributes,
};
int __init acpi_parse_bgrt(struct acpi_table_header *table)
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 16917dc3ad60..058910af82bc 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1434,6 +1434,8 @@ static int __init acpi_bus_init(void)
struct kobject *acpi_kobj;
EXPORT_SYMBOL_GPL(acpi_kobj);
+void __weak __init acpi_arch_init(void) { }
+
static int __init acpi_init(void)
{
int result;
@@ -1461,8 +1463,7 @@ static int __init acpi_init(void)
acpi_viot_early_init();
acpi_hest_init();
acpi_ghes_init();
- acpi_arm_init();
- acpi_riscv_init();
+ acpi_arch_init();
acpi_scan_init();
acpi_ec_init();
acpi_debugfs_init();
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 51470208e6da..7773e6b860e7 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -130,6 +130,17 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
},
.driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
+ {
+ /*
+ * Samsung galaxybook2 ,initial _LID device notification returns
+ * lid closed.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "750XED"),
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+ },
{}
};
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index b73b3aa92f3f..f193e713825a 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -671,10 +671,6 @@ static int pcc_data_alloc(int pcc_ss_id)
* )
*/
-#ifndef arch_init_invariance_cppc
-static inline void arch_init_invariance_cppc(void) { }
-#endif
-
/**
* acpi_cppc_processor_probe - Search for per CPU _CPC objects.
* @pr: Ptr to acpi_processor containing this CPU's logical ID.
@@ -867,7 +863,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
/* Store CPU Logical ID */
cpc_ptr->cpu_id = pr->id;
- spin_lock_init(&cpc_ptr->rmw_lock);
+ raw_spin_lock_init(&cpc_ptr->rmw_lock);
/* Parse PSD data for this CPU */
ret = acpi_get_psd(cpc_ptr, handle);
@@ -905,8 +901,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
- arch_init_invariance_cppc();
-
kfree(output.pointer);
return 0;
@@ -1017,7 +1011,8 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = 0;
size = GET_BIT_WIDTH(reg);
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ if (IS_ENABLED(CONFIG_HAS_IOPORT) &&
+ reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
u32 val_u32;
acpi_status status;
@@ -1087,10 +1082,12 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
struct cpc_desc *cpc_desc;
+ unsigned long flags;
size = GET_BIT_WIDTH(reg);
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ if (IS_ENABLED(CONFIG_HAS_IOPORT) &&
+ reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
acpi_status status;
status = acpi_os_write_port((acpi_io_address)reg->address,
@@ -1126,7 +1123,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
return -ENODEV;
}
- spin_lock(&cpc_desc->rmw_lock);
+ raw_spin_lock_irqsave(&cpc_desc->rmw_lock, flags);
switch (size) {
case 8:
prev_val = readb_relaxed(vaddr);
@@ -1141,11 +1138,10 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
prev_val = readq_relaxed(vaddr);
break;
default:
- spin_unlock(&cpc_desc->rmw_lock);
+ raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
return -EFAULT;
}
val = MASK_VAL_WRITE(reg, prev_val, val);
- val |= prev_val;
}
switch (size) {
@@ -1174,7 +1170,7 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
}
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- spin_unlock(&cpc_desc->rmw_lock);
+ raw_spin_unlock_irqrestore(&cpc_desc->rmw_lock, flags);
return ret_val;
}
@@ -1916,9 +1912,15 @@ unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf)
u64 mul, div;
if (caps->lowest_freq && caps->nominal_freq) {
- mul = caps->nominal_freq - caps->lowest_freq;
+ /* Avoid special case when nominal_freq is equal to lowest_freq */
+ if (caps->lowest_freq == caps->nominal_freq) {
+ mul = caps->nominal_freq;
+ div = caps->nominal_perf;
+ } else {
+ mul = caps->nominal_freq - caps->lowest_freq;
+ div = caps->nominal_perf - caps->lowest_perf;
+ }
mul *= KHZ_PER_MHZ;
- div = caps->nominal_perf - caps->lowest_perf;
offset = caps->nominal_freq * KHZ_PER_MHZ -
div64_u64(caps->nominal_perf * mul, div);
} else {
@@ -1939,11 +1941,17 @@ unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq)
{
s64 retval, offset = 0;
static u64 max_khz;
- u64 mul, div;
+ u64 mul, div;
if (caps->lowest_freq && caps->nominal_freq) {
- mul = caps->nominal_perf - caps->lowest_perf;
- div = caps->nominal_freq - caps->lowest_freq;
+ /* Avoid special case when nominal_freq is equal to lowest_freq */
+ if (caps->lowest_freq == caps->nominal_freq) {
+ mul = caps->nominal_perf;
+ div = caps->nominal_freq;
+ } else {
+ mul = caps->nominal_perf - caps->lowest_perf;
+ div = caps->nominal_freq - caps->lowest_freq;
+ }
/*
* We don't need to convert to kHz for computing offset and can
* directly use nominal_freq and lowest_freq as the div64_u64
diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c
index d202730fafd8..952216c67d58 100644
--- a/drivers/acpi/dptf/dptf_pch_fivr.c
+++ b/drivers/acpi/dptf/dptf_pch_fivr.c
@@ -152,13 +152,14 @@ static const struct acpi_device_id pch_fivr_device_ids[] = {
{"INTC1064", 0},
{"INTC106B", 0},
{"INTC10A3", 0},
+ {"INTC10D7", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, pch_fivr_device_ids);
static struct platform_driver pch_fivr_driver = {
.probe = pch_fivr_add,
- .remove_new = pch_fivr_remove,
+ .remove = pch_fivr_remove,
.driver = {
.name = "dptf_pch_fivr",
.acpi_match_table = pch_fivr_device_ids,
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index 8023b3e23315..e8caf4106ff9 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -236,13 +236,15 @@ static const struct acpi_device_id int3407_device_ids[] = {
{"INTC106D", 0},
{"INTC10A4", 0},
{"INTC10A5", 0},
+ {"INTC10D8", 0},
+ {"INTC10D9", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
static struct platform_driver dptf_power_driver = {
.probe = dptf_power_add,
- .remove_new = dptf_power_remove,
+ .remove = dptf_power_remove,
.driver = {
.name = "dptf_power",
.acpi_match_table = int3407_device_ids,
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index 014ada759954..aef7aca2161d 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -55,6 +55,12 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
{"INTC10A3"},
{"INTC10A4"},
{"INTC10A5"},
+ {"INTC10D4"},
+ {"INTC10D5"},
+ {"INTC10D6"},
+ {"INTC10D7"},
+ {"INTC10D8"},
+ {"INTC10D9"},
{""},
};
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 25399f6dde7e..8db09d81918f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1677,8 +1677,8 @@ static int acpi_ec_add(struct acpi_device *device)
struct acpi_ec *ec;
int ret;
- strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_EC_CLASS);
+ strscpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_EC_CLASS);
if (boot_ec && (boot_ec->handle == device->handle ||
!strcmp(acpi_device_hid(device), ACPI_ECDT_HID))) {
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index d199a19bb292..96a9aaaaf9f7 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -28,8 +28,8 @@ int acpi_notifier_call_chain(struct acpi_device *dev, u32 type, u32 data)
{
struct acpi_bus_event event;
- strcpy(event.device_class, dev->pnp.device_class);
- strcpy(event.bus_id, dev->pnp.bus_id);
+ strscpy(event.device_class, dev->pnp.device_class);
+ strscpy(event.bus_id, dev->pnp.bus_id);
event.type = type;
event.data = data;
return (blocking_notifier_call_chain(&acpi_chain_head, 0, (void *)&event)
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index 11778c93254b..5c35cbc7f6ff 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -185,7 +185,7 @@ static const struct acpi_device_id ged_acpi_ids[] = {
static struct platform_driver ged_driver = {
.probe = ged_probe,
- .remove_new = ged_remove,
+ .remove = ged_remove,
.shutdown = ged_shutdown,
.driver = {
.name = MODULE_NAME,
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index db25a3898af7..488b51e2cb31 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -19,6 +19,7 @@
{"INTC1063", }, /* Fan for Meteor Lake generation */ \
{"INTC106A", }, /* Fan for Lunar Lake generation */ \
{"INTC10A2", }, /* Fan for Raptor Lake generation */ \
+ {"INTC10D6", }, /* Fan for Panther Lake generation */ \
{"PNP0C0B", } /* Generic ACPI fan */
#define ACPI_FPS_NAME_LEN 20
diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
index 7cea4495f19b..10016f52f4f4 100644
--- a/drivers/acpi/fan_core.c
+++ b/drivers/acpi/fan_core.c
@@ -371,19 +371,25 @@ static int acpi_fan_probe(struct platform_device *pdev)
result = sysfs_create_link(&pdev->dev.kobj,
&cdev->device.kobj,
"thermal_cooling");
- if (result)
+ if (result) {
dev_err(&pdev->dev, "Failed to create sysfs link 'thermal_cooling'\n");
+ goto err_unregister;
+ }
result = sysfs_create_link(&cdev->device.kobj,
&pdev->dev.kobj,
"device");
if (result) {
dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n");
- goto err_end;
+ goto err_remove_link;
}
return 0;
+err_remove_link:
+ sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
+err_unregister:
+ thermal_cooling_device_unregister(cdev);
err_end:
if (fan->acpi4)
acpi_fan_delete_attributes(device);
@@ -448,7 +454,7 @@ static const struct dev_pm_ops acpi_fan_pm = {
static struct platform_driver acpi_fan_driver = {
.probe = acpi_fan_probe,
- .remove_new = acpi_fan_remove,
+ .remove = acpi_fan_remove,
.driver = {
.name = "acpi-fan",
.acpi_match_table = fan_device_ids,
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index ced7dff9a5db..00910ccd7eda 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -215,6 +215,8 @@ extern struct acpi_ec *first_ec;
/* External interfaces use first EC only, so remember */
typedef int (*acpi_ec_query_func) (void *data);
+#ifdef CONFIG_ACPI_EC
+
void acpi_ec_init(void);
void acpi_ec_ecdt_probe(void);
void acpi_ec_dsdt_probe(void);
@@ -231,6 +233,29 @@ void acpi_ec_flush_work(void);
bool acpi_ec_dispatch_gpe(void);
#endif
+#else
+
+static inline void acpi_ec_init(void) {}
+static inline void acpi_ec_ecdt_probe(void) {}
+static inline void acpi_ec_dsdt_probe(void) {}
+static inline void acpi_ec_block_transactions(void) {}
+static inline void acpi_ec_unblock_transactions(void) {}
+static inline int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
+ acpi_handle handle, acpi_ec_query_func func,
+ void *data)
+{
+ return -ENXIO;
+}
+static inline void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) {}
+static inline void acpi_ec_register_opregions(struct acpi_device *adev) {}
+
+static inline void acpi_ec_flush_work(void) {}
+static inline bool acpi_ec_dispatch_gpe(void)
+{
+ return false;
+}
+
+#endif
/*--------------------------------------------------------------------------
Suspend/Resume
diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c
index 92b658f92dc0..5b85989f96be 100644
--- a/drivers/acpi/mipi-disco-img.c
+++ b/drivers/acpi/mipi-disco-img.c
@@ -624,8 +624,7 @@ static void init_crs_csi2_swnodes(struct crs_csi2 *csi2)
if (!fwnode_property_present(adev_fwnode, "rotation")) {
struct acpi_pld_info *pld;
- status = acpi_get_physical_device_location(handle, &pld);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_get_physical_device_location(handle, &pld)) {
swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_ROTATION)] =
PROPERTY_ENTRY_U32("rotation",
pld->rotation * 45U);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 5429ec9ef06f..a5d47819b3a4 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -454,8 +454,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
if (cmd_rc)
*cmd_rc = -EINVAL;
- if (cmd == ND_CMD_CALL)
+ if (cmd == ND_CMD_CALL) {
+ if (!buf || buf_len < sizeof(*call_pkg))
+ return -EINVAL;
+
call_pkg = buf;
+ }
+
func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
if (func < 0)
return func;
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index 1a902a02390f..bfbb08b1e6af 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -151,7 +151,7 @@ int acpi_get_genport_coordinates(u32 uid,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(acpi_get_genport_coordinates, CXL);
+EXPORT_SYMBOL_NS_GPL(acpi_get_genport_coordinates, "CXL");
static __init void alloc_memory_initiator(unsigned int cpu_pxm)
{
@@ -442,9 +442,9 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
return -EINVAL;
}
- pr_info("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
- hmat_loc->flags, hmat_data_type(type), ipds, tpds,
- hmat_loc->entry_base_unit);
+ pr_debug("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
+ hmat_loc->flags, hmat_data_type(type), ipds, tpds,
+ hmat_loc->entry_base_unit);
inits = (u32 *)(hmat_loc + 1);
targs = inits + ipds;
@@ -455,9 +455,9 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
value = hmat_normalize(entries[init * tpds + targ],
hmat_loc->entry_base_unit,
type);
- pr_info(" Initiator-Target[%u-%u]:%u%s\n",
- inits[init], targs[targ], value,
- hmat_data_type_suffix(type));
+ pr_debug(" Initiator-Target[%u-%u]:%u%s\n",
+ inits[init], targs[targ], value,
+ hmat_data_type_suffix(type));
hmat_update_target(targs[targ], inits[init],
mem_hier, type, value);
@@ -485,9 +485,9 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
}
attrs = cache->cache_attributes;
- pr_info("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
- cache->memory_PD, cache->cache_size, attrs,
- cache->number_of_SMBIOShandles);
+ pr_debug("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
+ cache->memory_PD, cache->cache_size, attrs,
+ cache->number_of_SMBIOShandles);
target = find_mem_target(cache->memory_PD);
if (!target)
@@ -546,9 +546,9 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
}
if (hmat_revision == 1)
- pr_info("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
- p->reserved3, p->reserved4, p->flags, p->processor_PD,
- p->memory_PD);
+ pr_debug("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
+ p->reserved3, p->reserved4, p->flags, p->processor_PD,
+ p->memory_PD);
else
pr_info("Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->flags, p->processor_PD, p->memory_PD);
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index bec0dcd1f9c3..00ac0d7bb8c9 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -81,6 +81,101 @@ int acpi_map_pxm_to_node(int pxm)
}
EXPORT_SYMBOL(acpi_map_pxm_to_node);
+#ifdef CONFIG_NUMA_EMU
+/*
+ * Take max_nid - 1 fake-numa nodes into account in both
+ * pxm_to_node_map()/node_to_pxm_map[] tables.
+ */
+int __init fix_pxm_node_maps(int max_nid)
+{
+ static int pxm_to_node_map_copy[MAX_PXM_DOMAINS] __initdata
+ = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE };
+ static int node_to_pxm_map_copy[MAX_NUMNODES] __initdata
+ = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
+ int i, j, index = -1, count = 0;
+ nodemask_t nodes_to_enable;
+
+ if (numa_off)
+ return -1;
+
+ /* no or incomplete node/PXM mapping set, nothing to do */
+ if (srat_disabled())
+ return 0;
+
+ /* find fake nodes PXM mapping */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ if (node_to_pxm_map[i] != PXM_INVAL) {
+ for (j = 0; j <= max_nid; j++) {
+ if ((emu_nid_to_phys[j] == i) &&
+ WARN(node_to_pxm_map_copy[j] != PXM_INVAL,
+ "Node %d is already binded to PXM %d\n",
+ j, node_to_pxm_map_copy[j]))
+ return -1;
+ if (emu_nid_to_phys[j] == i) {
+ node_to_pxm_map_copy[j] =
+ node_to_pxm_map[i];
+ if (j > index)
+ index = j;
+ count++;
+ }
+ }
+ }
+ }
+ if (index == -1) {
+ pr_debug("No node/PXM mapping has been set\n");
+ /* nothing more to be done */
+ return 0;
+ }
+ if (WARN(index != max_nid, "%d max nid when expected %d\n",
+ index, max_nid))
+ return -1;
+
+ nodes_clear(nodes_to_enable);
+
+ /* map phys nodes not used for fake nodes */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ if (node_to_pxm_map[i] != PXM_INVAL) {
+ for (j = 0; j <= max_nid; j++)
+ if (emu_nid_to_phys[j] == i)
+ break;
+ /* fake nodes PXM mapping has been done */
+ if (j <= max_nid)
+ continue;
+ /* find first hole */
+ for (j = 0;
+ j < MAX_NUMNODES &&
+ node_to_pxm_map_copy[j] != PXM_INVAL;
+ j++)
+ ;
+ if (WARN(j == MAX_NUMNODES,
+ "Number of nodes exceeds MAX_NUMNODES\n"))
+ return -1;
+ node_to_pxm_map_copy[j] = node_to_pxm_map[i];
+ node_set(j, nodes_to_enable);
+ count++;
+ }
+ }
+
+ /* creating reverse mapping in pxm_to_node_map[] */
+ for (i = 0; i < MAX_NUMNODES; i++)
+ if (node_to_pxm_map_copy[i] != PXM_INVAL &&
+ pxm_to_node_map_copy[node_to_pxm_map_copy[i]] == NUMA_NO_NODE)
+ pxm_to_node_map_copy[node_to_pxm_map_copy[i]] = i;
+
+ /* overwrite with new mapping */
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ node_to_pxm_map[i] = node_to_pxm_map_copy[i];
+ pxm_to_node_map[i] = pxm_to_node_map_copy[i];
+ }
+
+ /* enable other nodes found in PXM for hotplug */
+ nodes_or(numa_nodes_parsed, nodes_to_enable, numa_nodes_parsed);
+
+ pr_debug("found %d total number of nodes\n", count);
+ return 0;
+}
+#endif
+
static void __init
acpi_table_print_srat_entry(struct acpi_subtable_header *header)
{
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 70af3fbbebe5..5ff343096ece 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -607,7 +607,27 @@ acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
void acpi_os_sleep(u64 ms)
{
- msleep(ms);
+ u64 usec = ms * USEC_PER_MSEC, delta_us = 50;
+
+ /*
+ * Use a hrtimer because the timer wheel timers are optimized for
+ * cancelation before they expire and this timer is not going to be
+ * canceled.
+ *
+ * Set the delta between the requested sleep time and the effective
+ * deadline to at least 50 us in case there is an opportunity for timer
+ * coalescing.
+ *
+ * Moreover, longer sleeps can be assumed to need somewhat less timer
+ * precision, so sacrifice some of it for making the timer a more likely
+ * candidate for coalescing by setting the delta to 1% of the sleep time
+ * if it is above 5 ms (this value is chosen so that the delta is a
+ * continuous function of the sleep time).
+ */
+ if (ms > 5)
+ delta_us = (USEC_PER_MSEC / 100) * ms;
+
+ usleep_range(usec, usec + delta_us);
}
void acpi_os_stall(u32 us)
@@ -642,6 +662,15 @@ acpi_status acpi_os_read_port(acpi_io_address port, u32 *value, u32 width)
{
u32 dummy;
+ if (!IS_ENABLED(CONFIG_HAS_IOPORT)) {
+ /*
+ * set all-1 result as if reading from non-existing
+ * I/O port
+ */
+ *value = GENMASK(width, 0);
+ return AE_NOT_IMPLEMENTED;
+ }
+
if (value)
*value = 0;
else
@@ -665,6 +694,9 @@ EXPORT_SYMBOL(acpi_os_read_port);
acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
{
+ if (!IS_ENABLED(CONFIG_HAS_IOPORT))
+ return AE_NOT_IMPLEMENTED;
+
if (width <= 8) {
outb(value, port);
} else if (width <= 16) {
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index b727db968f33..08e10b6226dc 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -714,8 +714,8 @@ static int acpi_pci_link_add(struct acpi_device *device,
return -ENOMEM;
link->device = device;
- strcpy(acpi_device_name(device), ACPI_PCI_LINK_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS);
+ strscpy(acpi_device_name(device), ACPI_PCI_LINK_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS);
device->driver_data = link;
mutex_lock(&acpi_link_lock);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d0bfb3706801..d0b6a024daae 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -689,8 +689,8 @@ static int acpi_pci_root_add(struct acpi_device *device,
root->device = device;
root->segment = segment & 0xFFFF;
- strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
+ strscpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
if (hotadd && dmar_device_add(handle)) {
diff --git a/drivers/acpi/pfr_telemetry.c b/drivers/acpi/pfr_telemetry.c
index 998264a7333d..32bdf8cbe8f2 100644
--- a/drivers/acpi/pfr_telemetry.c
+++ b/drivers/acpi/pfr_telemetry.c
@@ -272,9 +272,6 @@ static long pfrt_log_ioctl(struct file *file, unsigned int cmd, unsigned long ar
case PFRT_LOG_IOC_GET_INFO:
info.log_level = get_pfrt_log_level(pfrt_log_dev);
- if (ret < 0)
- return ret;
-
info.log_type = pfrt_log_dev->info.log_type;
info.log_revid = pfrt_log_dev->info.log_revid;
if (copy_to_user(p, &info, sizeof(info)))
@@ -425,7 +422,7 @@ static struct platform_driver acpi_pfrt_log_driver = {
.acpi_match_table = acpi_pfrt_log_ids,
},
.probe = acpi_pfrt_log_probe,
- .remove_new = acpi_pfrt_log_remove,
+ .remove = acpi_pfrt_log_remove,
};
module_platform_driver(acpi_pfrt_log_driver);
diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c
index 8b2910995fc1..031d1ba81b86 100644
--- a/drivers/acpi/pfr_update.c
+++ b/drivers/acpi/pfr_update.c
@@ -565,7 +565,7 @@ static struct platform_driver acpi_pfru_driver = {
.acpi_match_table = acpi_pfru_ids,
},
.probe = acpi_pfru_probe,
- .remove_new = acpi_pfru_remove,
+ .remove = acpi_pfru_remove,
};
module_platform_driver(acpi_pfru_driver);
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index d2f7fd7743a1..fc92e43d0fe9 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -2,16 +2,28 @@
/* Platform profile sysfs interface */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/acpi.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/platform_profile.h>
#include <linux/sysfs.h>
-static struct platform_profile_handler *cur_profile;
+#define to_pprof_handler(d) (container_of(d, struct platform_profile_handler, dev))
+
static DEFINE_MUTEX(profile_lock);
+struct platform_profile_handler {
+ const char *name;
+ struct device dev;
+ int minor;
+ unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ const struct platform_profile_ops *ops;
+};
+
static const char * const profile_names[] = {
[PLATFORM_PROFILE_LOW_POWER] = "low-power",
[PLATFORM_PROFILE_COOL] = "cool",
@@ -19,99 +31,373 @@ static const char * const profile_names[] = {
[PLATFORM_PROFILE_BALANCED] = "balanced",
[PLATFORM_PROFILE_BALANCED_PERFORMANCE] = "balanced-performance",
[PLATFORM_PROFILE_PERFORMANCE] = "performance",
+ [PLATFORM_PROFILE_CUSTOM] = "custom",
};
static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST);
-static ssize_t platform_profile_choices_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int len = 0;
- int err, i;
-
- err = mutex_lock_interruptible(&profile_lock);
- if (err)
- return err;
+static DEFINE_IDA(platform_profile_ida);
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
- }
+/**
+ * _commmon_choices_show - Show the available profile choices
+ * @choices: The available profile choices
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t _commmon_choices_show(unsigned long *choices, char *buf)
+{
+ int i, len = 0;
- for_each_set_bit(i, cur_profile->choices, PLATFORM_PROFILE_LAST) {
+ for_each_set_bit(i, choices, PLATFORM_PROFILE_LAST) {
if (len == 0)
len += sysfs_emit_at(buf, len, "%s", profile_names[i]);
else
len += sysfs_emit_at(buf, len, " %s", profile_names[i]);
}
len += sysfs_emit_at(buf, len, "\n");
- mutex_unlock(&profile_lock);
+
return len;
}
-static ssize_t platform_profile_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+/**
+ * _store_class_profile - Set the profile for a class device
+ * @dev: The class device
+ * @data: The profile to set
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _store_class_profile(struct device *dev, void *data)
{
- enum platform_profile_option profile = PLATFORM_PROFILE_BALANCED;
+ struct platform_profile_handler *handler;
+ int *bit = (int *)data;
+
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ if (!test_bit(*bit, handler->choices))
+ return -EOPNOTSUPP;
+
+ return handler->ops->profile_set(dev, *bit);
+}
+
+/**
+ * _notify_class_profile - Notify the class device of a profile change
+ * @dev: The class device
+ * @data: Unused
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _notify_class_profile(struct device *dev, void *data)
+{
+ struct platform_profile_handler *handler = to_pprof_handler(dev);
+
+ lockdep_assert_held(&profile_lock);
+ sysfs_notify(&handler->dev.kobj, NULL, "profile");
+ kobject_uevent(&handler->dev.kobj, KOBJ_CHANGE);
+
+ return 0;
+}
+
+/**
+ * get_class_profile - Show the current profile for a class device
+ * @dev: The class device
+ * @profile: The profile to return
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int get_class_profile(struct device *dev,
+ enum platform_profile_option *profile)
+{
+ struct platform_profile_handler *handler;
+ enum platform_profile_option val;
int err;
- err = mutex_lock_interruptible(&profile_lock);
- if (err)
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ err = handler->ops->profile_get(dev, &val);
+ if (err) {
+ pr_err("Failed to get profile for handler %s\n", handler->name);
return err;
+ }
+
+ if (WARN_ON(val >= PLATFORM_PROFILE_LAST))
+ return -EINVAL;
+ *profile = val;
+
+ return 0;
+}
+
+/**
+ * name_show - Show the name of the profile handler
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct platform_profile_handler *handler = to_pprof_handler(dev);
+
+ return sysfs_emit(buf, "%s\n", handler->name);
+}
+static DEVICE_ATTR_RO(name);
+
+/**
+ * choices_show - Show the available profile choices
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t choices_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_profile_handler *handler = to_pprof_handler(dev);
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
+ return _commmon_choices_show(handler->choices, buf);
+}
+static DEVICE_ATTR_RO(choices);
+
+/**
+ * profile_show - Show the current profile for a class device
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t profile_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
+ int err;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = get_class_profile(dev, &profile);
+ if (err)
+ return err;
}
- err = cur_profile->profile_get(cur_profile, &profile);
- mutex_unlock(&profile_lock);
+ return sysfs_emit(buf, "%s\n", profile_names[profile]);
+}
+
+/**
+ * profile_store - Set the profile for a class device
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to read from
+ * @count: The number of bytes to read
+ *
+ * Return: The number of bytes read
+ */
+static ssize_t profile_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int index, ret;
+
+ index = sysfs_match_string(profile_names, buf);
+ if (index < 0)
+ return -EINVAL;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ ret = _store_class_profile(dev, &index);
+ if (ret)
+ return ret;
+ }
+
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ return count;
+}
+static DEVICE_ATTR_RW(profile);
+
+static struct attribute *profile_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_choices.attr,
+ &dev_attr_profile.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(profile);
+
+static void pprof_device_release(struct device *dev)
+{
+ struct platform_profile_handler *pprof = to_pprof_handler(dev);
+
+ kfree(pprof);
+}
+
+static const struct class platform_profile_class = {
+ .name = "platform-profile",
+ .dev_groups = profile_groups,
+ .dev_release = pprof_device_release,
+};
+
+/**
+ * _aggregate_choices - Aggregate the available profile choices
+ * @dev: The device
+ * @data: The available profile choices
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _aggregate_choices(struct device *dev, void *data)
+{
+ struct platform_profile_handler *handler;
+ unsigned long *aggregate = data;
+
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ if (test_bit(PLATFORM_PROFILE_LAST, aggregate))
+ bitmap_copy(aggregate, handler->choices, PLATFORM_PROFILE_LAST);
+ else
+ bitmap_and(aggregate, handler->choices, aggregate, PLATFORM_PROFILE_LAST);
+
+ return 0;
+}
+
+/**
+ * platform_profile_choices_show - Show the available profile choices for legacy sysfs interface
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t platform_profile_choices_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ int err;
+
+ set_bit(PLATFORM_PROFILE_LAST, aggregate);
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ aggregate, _aggregate_choices);
+ if (err)
+ return err;
+ }
+
+ /* no profile handler registered any more */
+ if (bitmap_empty(aggregate, PLATFORM_PROFILE_LAST))
+ return -EINVAL;
+
+ return _commmon_choices_show(aggregate, buf);
+}
+
+/**
+ * _aggregate_profiles - Aggregate the profiles for legacy sysfs interface
+ * @dev: The device
+ * @data: The profile to return
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _aggregate_profiles(struct device *dev, void *data)
+{
+ enum platform_profile_option *profile = data;
+ enum platform_profile_option val;
+ int err;
+
+ err = get_class_profile(dev, &val);
if (err)
return err;
- /* Check that profile is valid index */
- if (WARN_ON((profile < 0) || (profile >= ARRAY_SIZE(profile_names))))
- return -EIO;
+ if (*profile != PLATFORM_PROFILE_LAST && *profile != val)
+ *profile = PLATFORM_PROFILE_CUSTOM;
+ else
+ *profile = val;
- return sysfs_emit(buf, "%s\n", profile_names[profile]);
+ return 0;
}
-static ssize_t platform_profile_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+/**
+ * _store_and_notify - Store and notify a class from legacy sysfs interface
+ * @dev: The device
+ * @data: The profile to return
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _store_and_notify(struct device *dev, void *data)
{
- int err, i;
+ enum platform_profile_option *profile = data;
+ int err;
- err = mutex_lock_interruptible(&profile_lock);
+ err = _store_class_profile(dev, profile);
if (err)
return err;
+ return _notify_class_profile(dev, NULL);
+}
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
+/**
+ * platform_profile_show - Show the current profile for legacy sysfs interface
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to write to
+ *
+ * Return: The number of bytes written
+ */
+static ssize_t platform_profile_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
+ int err;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &profile, _aggregate_profiles);
+ if (err)
+ return err;
}
+ /* no profile handler registered any more */
+ if (profile == PLATFORM_PROFILE_LAST)
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%s\n", profile_names[profile]);
+}
+
+/**
+ * platform_profile_store - Set the profile for legacy sysfs interface
+ * @dev: The device
+ * @attr: The attribute
+ * @buf: The buffer to read from
+ * @count: The number of bytes to read
+ *
+ * Return: The number of bytes read
+ */
+static ssize_t platform_profile_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ int ret;
+ int i;
+
/* Scan for a matching profile */
i = sysfs_match_string(profile_names, buf);
- if (i < 0) {
- mutex_unlock(&profile_lock);
+ if (i < 0 || i == PLATFORM_PROFILE_CUSTOM)
return -EINVAL;
+ set_bit(PLATFORM_PROFILE_LAST, choices);
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ ret = class_for_each_device(&platform_profile_class, NULL,
+ choices, _aggregate_choices);
+ if (ret)
+ return ret;
+ if (!test_bit(i, choices))
+ return -EOPNOTSUPP;
+
+ ret = class_for_each_device(&platform_profile_class, NULL, &i,
+ _store_and_notify);
+ if (ret)
+ return ret;
}
- /* Check that platform supports this profile choice */
- if (!test_bit(i, cur_profile->choices)) {
- mutex_unlock(&profile_lock);
- return -EOPNOTSUPP;
- }
-
- err = cur_profile->profile_set(cur_profile, i);
- if (!err)
- sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
- mutex_unlock(&profile_lock);
- if (err)
- return err;
return count;
}
@@ -124,98 +410,249 @@ static struct attribute *platform_profile_attrs[] = {
NULL
};
+static int profile_class_registered(struct device *dev, const void *data)
+{
+ return 1;
+}
+
+static umode_t profile_class_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ if (!class_find_device(&platform_profile_class, NULL, NULL, profile_class_registered))
+ return 0;
+ return attr->mode;
+}
+
static const struct attribute_group platform_profile_group = {
- .attrs = platform_profile_attrs
+ .attrs = platform_profile_attrs,
+ .is_visible = profile_class_is_visible,
};
-void platform_profile_notify(void)
+/**
+ * platform_profile_notify - Notify class device and legacy sysfs interface
+ * @dev: The class device
+ */
+void platform_profile_notify(struct device *dev)
{
- if (!cur_profile)
- return;
+ scoped_cond_guard(mutex_intr, return, &profile_lock) {
+ _notify_class_profile(dev, NULL);
+ }
sysfs_notify(acpi_kobj, NULL, "platform_profile");
}
EXPORT_SYMBOL_GPL(platform_profile_notify);
+/**
+ * platform_profile_cycle - Cycles profiles available on all registered class devices
+ *
+ * Return: 0 on success, -errno on failure
+ */
int platform_profile_cycle(void)
{
- enum platform_profile_option profile;
- enum platform_profile_option next;
+ enum platform_profile_option next = PLATFORM_PROFILE_LAST;
+ enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
+ unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
int err;
- err = mutex_lock_interruptible(&profile_lock);
- if (err)
- return err;
+ set_bit(PLATFORM_PROFILE_LAST, choices);
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &profile, _aggregate_profiles);
+ if (err)
+ return err;
- if (!cur_profile) {
- mutex_unlock(&profile_lock);
- return -ENODEV;
- }
+ if (profile == PLATFORM_PROFILE_CUSTOM ||
+ profile == PLATFORM_PROFILE_LAST)
+ return -EINVAL;
- err = cur_profile->profile_get(cur_profile, &profile);
- if (err) {
- mutex_unlock(&profile_lock);
- return err;
- }
+ err = class_for_each_device(&platform_profile_class, NULL,
+ choices, _aggregate_choices);
+ if (err)
+ return err;
- next = find_next_bit_wrap(cur_profile->choices, PLATFORM_PROFILE_LAST,
- profile + 1);
+ /* never iterate into a custom if all drivers supported it */
+ clear_bit(PLATFORM_PROFILE_CUSTOM, choices);
- if (WARN_ON(next == PLATFORM_PROFILE_LAST)) {
- mutex_unlock(&profile_lock);
- return -EINVAL;
- }
+ next = find_next_bit_wrap(choices,
+ PLATFORM_PROFILE_LAST,
+ profile + 1);
- err = cur_profile->profile_set(cur_profile, next);
- mutex_unlock(&profile_lock);
+ err = class_for_each_device(&platform_profile_class, NULL, &next,
+ _store_and_notify);
- if (!err)
- sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ if (err)
+ return err;
+ }
- return err;
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ return 0;
}
EXPORT_SYMBOL_GPL(platform_profile_cycle);
-int platform_profile_register(struct platform_profile_handler *pprof)
+/**
+ * platform_profile_register - Creates and registers a platform profile class device
+ * @dev: Parent device
+ * @name: Name of the class device
+ * @drvdata: Driver data that will be attached to the class device
+ * @ops: Platform profile's mandatory operations
+ *
+ * Return: pointer to the new class device on success, ERR_PTR on failure
+ */
+struct device *platform_profile_register(struct device *dev, const char *name,
+ void *drvdata,
+ const struct platform_profile_ops *ops)
{
+ struct device *ppdev;
+ int minor;
int err;
- mutex_lock(&profile_lock);
- /* We can only have one active profile */
- if (cur_profile) {
- mutex_unlock(&profile_lock);
- return -EEXIST;
+ /* Sanity check */
+ if (WARN_ON_ONCE(!dev || !name || !ops || !ops->profile_get ||
+ !ops->profile_set || !ops->probe))
+ return ERR_PTR(-EINVAL);
+
+ struct platform_profile_handler *pprof __free(kfree) = kzalloc(
+ sizeof(*pprof), GFP_KERNEL);
+ if (!pprof)
+ return ERR_PTR(-ENOMEM);
+
+ err = ops->probe(drvdata, pprof->choices);
+ if (err) {
+ dev_err(dev, "platform_profile probe failed\n");
+ return ERR_PTR(err);
}
- /* Sanity check the profile handler field are set */
- if (!pprof || bitmap_empty(pprof->choices, PLATFORM_PROFILE_LAST) ||
- !pprof->profile_set || !pprof->profile_get) {
- mutex_unlock(&profile_lock);
- return -EINVAL;
+ if (bitmap_empty(pprof->choices, PLATFORM_PROFILE_LAST)) {
+ dev_err(dev, "Failed to register platform_profile class device with empty choices\n");
+ return ERR_PTR(-EINVAL);
}
- err = sysfs_create_group(acpi_kobj, &platform_profile_group);
+ guard(mutex)(&profile_lock);
+
+ /* create class interface for individual handler */
+ minor = ida_alloc(&platform_profile_ida, GFP_KERNEL);
+ if (minor < 0)
+ return ERR_PTR(minor);
+
+ pprof->name = name;
+ pprof->ops = ops;
+ pprof->minor = minor;
+ pprof->dev.class = &platform_profile_class;
+ pprof->dev.parent = dev;
+ dev_set_drvdata(&pprof->dev, drvdata);
+ dev_set_name(&pprof->dev, "platform-profile-%d", pprof->minor);
+ /* device_register() takes ownership of pprof/ppdev */
+ ppdev = &no_free_ptr(pprof)->dev;
+ err = device_register(ppdev);
if (err) {
- mutex_unlock(&profile_lock);
- return err;
+ put_device(ppdev);
+ goto cleanup_ida;
}
- cur_profile = pprof;
- mutex_unlock(&profile_lock);
- return 0;
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ err = sysfs_update_group(acpi_kobj, &platform_profile_group);
+ if (err)
+ goto cleanup_cur;
+
+ return ppdev;
+
+cleanup_cur:
+ device_unregister(ppdev);
+
+cleanup_ida:
+ ida_free(&platform_profile_ida, minor);
+
+ return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(platform_profile_register);
-int platform_profile_remove(void)
+/**
+ * platform_profile_remove - Unregisters a platform profile class device
+ * @dev: Class device
+ *
+ * Return: 0
+ */
+int platform_profile_remove(struct device *dev)
{
- sysfs_remove_group(acpi_kobj, &platform_profile_group);
+ struct platform_profile_handler *pprof = to_pprof_handler(dev);
+ int id;
+ guard(mutex)(&profile_lock);
+
+ id = pprof->minor;
+ device_unregister(&pprof->dev);
+ ida_free(&platform_profile_ida, id);
+
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+
+ sysfs_update_group(acpi_kobj, &platform_profile_group);
- mutex_lock(&profile_lock);
- cur_profile = NULL;
- mutex_unlock(&profile_lock);
return 0;
}
EXPORT_SYMBOL_GPL(platform_profile_remove);
+static void devm_platform_profile_release(struct device *dev, void *res)
+{
+ struct device **ppdev = res;
+
+ platform_profile_remove(*ppdev);
+}
+
+/**
+ * devm_platform_profile_register - Device managed version of platform_profile_register
+ * @dev: Parent device
+ * @name: Name of the class device
+ * @drvdata: Driver data that will be attached to the class device
+ * @ops: Platform profile's mandatory operations
+ *
+ * Return: pointer to the new class device on success, ERR_PTR on failure
+ */
+struct device *devm_platform_profile_register(struct device *dev, const char *name,
+ void *drvdata,
+ const struct platform_profile_ops *ops)
+{
+ struct device *ppdev;
+ struct device **dr;
+
+ dr = devres_alloc(devm_platform_profile_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ ppdev = platform_profile_register(dev, name, drvdata, ops);
+ if (IS_ERR(ppdev)) {
+ devres_free(dr);
+ return ppdev;
+ }
+
+ *dr = ppdev;
+ devres_add(dev, dr);
+
+ return ppdev;
+}
+EXPORT_SYMBOL_GPL(devm_platform_profile_register);
+
+static int __init platform_profile_init(void)
+{
+ int err;
+
+ err = class_register(&platform_profile_class);
+ if (err)
+ return err;
+
+ err = sysfs_create_group(acpi_kobj, &platform_profile_group);
+ if (err)
+ class_unregister(&platform_profile_class);
+
+ return err;
+}
+
+static void __exit platform_profile_exit(void)
+{
+ sysfs_remove_group(acpi_kobj, &platform_profile_group);
+ class_unregister(&platform_profile_class);
+}
+module_init(platform_profile_init);
+module_exit(platform_profile_exit);
+
MODULE_AUTHOR("Mark Pearson <markpearson@lenovo.com>");
MODULE_DESCRIPTION("ACPI platform profile sysfs interface");
MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index c2c70139c4f1..25174c24d3d7 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -950,8 +950,8 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
mutex_init(&resource->resource_lock);
INIT_LIST_HEAD(&resource->list_node);
INIT_LIST_HEAD(&resource->dependents);
- strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
+ strscpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_POWER_CLASS);
device->power.state = ACPI_STATE_UNKNOWN;
device->flags.match_driver = true;
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index 1cfaa5957ac4..e549914a636c 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -52,7 +52,7 @@ struct prm_context_buffer {
static LIST_HEAD(prm_module_list);
struct prm_handler_info {
- guid_t guid;
+ efi_guid_t guid;
efi_status_t (__efiapi *handler_addr)(u64, void *);
u64 static_data_buffer_addr;
u64 acpi_param_buffer_addr;
@@ -72,17 +72,21 @@ struct prm_module_info {
struct prm_handler_info handlers[] __counted_by(handler_count);
};
-static u64 efi_pa_va_lookup(u64 pa)
+static u64 efi_pa_va_lookup(efi_guid_t *guid, u64 pa)
{
efi_memory_desc_t *md;
u64 pa_offset = pa & ~PAGE_MASK;
u64 page = pa & PAGE_MASK;
for_each_efi_memory_desc(md) {
- if (md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)
+ if ((md->attribute & EFI_MEMORY_RUNTIME) &&
+ (md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)) {
return pa_offset + md->virt_addr + page - md->phys_addr;
+ }
}
+ pr_warn("Failed to find VA for GUID: %pUL, PA: 0x%llx", guid, pa);
+
return 0;
}
@@ -148,9 +152,15 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
th = &tm->handlers[cur_handler];
guid_copy(&th->guid, (guid_t *)handler_info->handler_guid);
- th->handler_addr = (void *)efi_pa_va_lookup(handler_info->handler_address);
- th->static_data_buffer_addr = efi_pa_va_lookup(handler_info->static_data_buffer_address);
- th->acpi_param_buffer_addr = efi_pa_va_lookup(handler_info->acpi_param_buffer_address);
+ th->handler_addr =
+ (void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address);
+
+ th->static_data_buffer_addr =
+ efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address);
+
+ th->acpi_param_buffer_addr =
+ efi_pa_va_lookup(&th->guid, handler_info->acpi_param_buffer_address);
+
} while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
return 0;
@@ -277,6 +287,11 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
if (!handler || !module)
goto invalid_guid;
+ if (!handler->handler_addr) {
+ buffer->prm_status = PRM_HANDLER_ERROR;
+ return AE_OK;
+ }
+
ACPI_COPY_NAMESEG(context.signature, "PRMC");
context.revision = 0x0;
context.reserved = 0x0;
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index cb52dd000b95..3b281bc1e73c 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -237,6 +237,9 @@ static struct notifier_block acpi_processor_notifier_block = {
.notifier_call = acpi_processor_notifier,
};
+void __weak acpi_processor_init_invariance_cppc(void)
+{ }
+
/*
* We keep the driver loaded even when ACPI is not running.
* This is needed for the powernow-k8 driver, that works even without
@@ -270,6 +273,12 @@ static int __init acpi_processor_driver_init(void)
NULL, acpi_soft_cpu_dead);
acpi_processor_throttling_init();
+
+ /*
+ * Frequency invariance calculations on AMD platforms can't be run until
+ * after acpi_cppc_processor_probe() has been called for all online CPUs
+ */
+ acpi_processor_init_invariance_cppc();
return 0;
err:
driver_unregister(&acpi_processor_driver);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 831fa4a12159..698897b29de2 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -578,7 +578,7 @@ static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
* @dev: the target CPU
* @index: the index of suggested state
*/
-static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
+static void acpi_idle_play_dead(struct cpuidle_device *dev, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
@@ -591,11 +591,8 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
io_idle(cx->address);
} else
- return -ENODEV;
+ return;
}
-
- /* Never reached */
- return 0;
}
static __always_inline bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
@@ -803,12 +800,12 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
state->enter = acpi_idle_enter;
state->flags = 0;
- if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
- cx->type == ACPI_STATE_C3) {
- state->enter_dead = acpi_idle_play_dead;
- if (cx->type != ACPI_STATE_C3)
- drv->safe_state_index = count;
- }
+
+ state->enter_dead = acpi_idle_play_dead;
+
+ if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2)
+ drv->safe_state_index = count;
+
/*
* Halt-induced C1 is not good for ->enter_s2idle, because it
* re-enables interrupts on exit. Moreover, C1 is generally not
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 4265814c74f8..53996f1a2d80 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -24,8 +24,6 @@
#define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
-static DEFINE_MUTEX(performance_mutex);
-
/*
* _PPC support is implemented as a CPUfreq policy notifier:
* This means each time a CPUfreq driver registered also with
@@ -209,6 +207,10 @@ void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
}
}
+#ifdef CONFIG_X86
+
+static DEFINE_MUTEX(performance_mutex);
+
static int acpi_processor_get_performance_control(struct acpi_processor *pr)
{
int result = 0;
@@ -267,7 +269,6 @@ end:
return result;
}
-#ifdef CONFIG_X86
/*
* Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
* in their ACPI data. Calculate the real values and fix up the _PSS data.
@@ -298,9 +299,6 @@ static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
px->core_frequency = (100 * (fid + 8)) >> did;
}
}
-#else
-static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
-#endif
static int acpi_processor_get_performance_states(struct acpi_processor *pr)
{
@@ -440,13 +438,11 @@ int acpi_processor_get_performance_info(struct acpi_processor *pr)
* the BIOS is older than the CPU and does not know its frequencies
*/
update_bios:
-#ifdef CONFIG_X86
if (acpi_has_method(pr->handle, "_PPC")) {
if(boot_cpu_has(X86_FEATURE_EST))
pr_warn(FW_BUG "BIOS needs update for CPU "
"frequency support\n");
}
-#endif
return result;
}
EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
@@ -788,3 +784,4 @@ unlock:
mutex_unlock(&performance_mutex);
}
EXPORT_SYMBOL(acpi_processor_unregister_performance);
+#endif
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 80a52a4e66dd..436019d96027 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1187,8 +1187,6 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
}
break;
}
- if (nval == 0)
- return -EINVAL;
if (obj->type == ACPI_TYPE_BUFFER) {
if (proptype != DEV_PROP_U8)
@@ -1212,9 +1210,11 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
ret = acpi_copy_property_array_uint(items, (u64 *)val, nval);
break;
case DEV_PROP_STRING:
- ret = acpi_copy_property_array_string(
- items, (char **)val,
- min_t(u32, nval, obj->package.count));
+ nval = min_t(u32, nval, obj->package.count);
+ if (nval == 0)
+ return -ENODATA;
+
+ ret = acpi_copy_property_array_string(items, (char **)val, nval);
break;
default:
ret = -EINVAL;
@@ -1492,7 +1492,7 @@ acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode)
static bool acpi_fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
if (!is_acpi_device_node(fwnode))
- return false;
+ return true;
return acpi_device_is_present(to_acpi_device_node(fwnode));
}
@@ -1656,6 +1656,7 @@ static int acpi_fwnode_irq_get(const struct fwnode_handle *fwnode,
acpi_fwnode_device_dma_supported, \
.device_get_dma_attr = acpi_fwnode_device_get_dma_attr, \
.property_present = acpi_fwnode_property_present, \
+ .property_read_bool = acpi_fwnode_property_present, \
.property_read_int_array = \
acpi_fwnode_property_read_int_array, \
.property_read_string_array = \
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 3d74ebe9dbd8..b4cd14e7fa76 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -250,6 +250,9 @@ static bool acpi_decode_space(struct resource_win *win,
switch (addr->resource_type) {
case ACPI_MEMORY_RANGE:
acpi_dev_memresource_flags(res, len, wp);
+
+ if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
+ res->flags |= IORESOURCE_PREFETCH;
break;
case ACPI_IO_RANGE:
acpi_dev_ioresource_flags(res, len, iodec,
@@ -265,9 +268,6 @@ static bool acpi_decode_space(struct resource_win *win,
if (addr->producer_consumer == ACPI_PRODUCER)
res->flags |= IORESOURCE_WINDOW;
- if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
- res->flags |= IORESOURCE_PREFETCH;
-
return !(res->flags & IORESOURCE_DISABLED);
}
@@ -441,80 +441,45 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
- /* Asus Vivobook X1704VAP */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "X1704VAP"),
- },
- },
- {
- /* Asus ExpertBook B1402CBA */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
- },
- },
- {
- /* Asus ExpertBook B1402CVA */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
- },
- },
- {
- /* Asus ExpertBook B1502CBA */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B1502CBA"),
- },
- },
- {
- /* Asus ExpertBook B1502CGA */
+ /* Asus Vivobook X1504VAP */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B1502CGA"),
+ DMI_MATCH(DMI_BOARD_NAME, "X1504VAP"),
},
},
- {
- /* Asus ExpertBook B1502CVA */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B1502CVA"),
- },
- },
{
- /* Asus ExpertBook B2402CBA */
+ /* Asus Vivobook X1704VAP */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"),
+ DMI_MATCH(DMI_BOARD_NAME, "X1704VAP"),
},
},
{
- /* Asus ExpertBook B2402FBA */
+ /* Asus ExpertBook B1402C* */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B2402FBA"),
+ DMI_MATCH(DMI_BOARD_NAME, "B1402C"),
},
},
{
- /* Asus ExpertBook B2502 */
+ /* Asus ExpertBook B1502C* */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"),
+ DMI_MATCH(DMI_BOARD_NAME, "B1502C"),
},
},
{
- /* Asus ExpertBook B2502FBA */
+ /* Asus ExpertBook B2402 (B2402CBA / B2402FBA / B2402CVA / B2402FVA) */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B2502FBA"),
+ DMI_MATCH(DMI_BOARD_NAME, "B2402"),
},
},
{
- /* Asus ExpertBook B2502CVA */
+ /* Asus ExpertBook B2502 (B2502CBA / B2502FBA / B2502CVA / B2502FVA) */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "B2502CVA"),
+ DMI_MATCH(DMI_BOARD_NAME, "B2502"),
},
},
{
@@ -532,31 +497,24 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
- /* Asus Vivobook Pro N6506MV */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "N6506MV"),
- },
- },
- {
- /* Asus Vivobook Pro N6506MU */
+ /* Asus Vivobook Pro N6506M* */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "N6506MU"),
+ DMI_MATCH(DMI_BOARD_NAME, "N6506M"),
},
},
{
- /* Asus Vivobook Pro N6506MJ */
+ /* LG Electronics 17U70P */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_BOARD_NAME, "N6506MJ"),
+ DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+ DMI_MATCH(DMI_BOARD_NAME, "17U70P"),
},
},
{
- /* LG Electronics 17U70P */
+ /* LG Electronics 16T90SP */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
- DMI_MATCH(DMI_BOARD_NAME, "17U70P"),
+ DMI_MATCH(DMI_BOARD_NAME, "16T90SP"),
},
},
{ }
@@ -606,6 +564,12 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
},
},
{
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "MECH-17"),
+ },
+ },
+ {
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
@@ -695,6 +659,17 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
},
},
+ {
+ /*
+ * TongFang GM5HG0A in case of the SKIKK Vanaheim relabel the
+ * board-name is changed, so check OEM strings instead. Note
+ * OEM string matches are always exact matches.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=219614
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_OEM_STRING, "GM5HG0A"),
+ },
+ },
{ }
};
@@ -720,11 +695,11 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
for (i = 0; i < ARRAY_SIZE(override_table); i++) {
const struct irq_override_cmp *entry = &override_table[i];
- if (dmi_check_system(entry->system) &&
- entry->irq == gsi &&
+ if (entry->irq == gsi &&
entry->triggering == triggering &&
entry->polarity == polarity &&
- entry->shareable == shareable)
+ entry->shareable == shareable &&
+ dmi_check_system(entry->system))
return entry->override;
}
diff --git a/drivers/acpi/riscv/init.c b/drivers/acpi/riscv/init.c
index 5ef97905a727..673e4d5dd752 100644
--- a/drivers/acpi/riscv/init.c
+++ b/drivers/acpi/riscv/init.c
@@ -7,7 +7,7 @@
#include <linux/acpi.h>
#include "init.h"
-void __init acpi_riscv_init(void)
+void __init acpi_arch_init(void)
{
riscv_acpi_init_gsi_mapping();
}
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 7a0914055fca..a3f95a3fffde 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -644,8 +644,8 @@ static int acpi_sbs_add(struct acpi_device *device)
sbs->hc = acpi_driver_data(acpi_dev_parent(device));
sbs->device = device;
- strcpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_SBS_CLASS);
+ strscpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_SBS_CLASS);
device->driver_data = sbs;
result = acpi_charger_add(sbs);
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 16f2daaa2c45..1a2bf520be23 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include "sbshc.h"
+#include "internal.h"
#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
#define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
@@ -236,12 +237,6 @@ static int smbus_alarm(void *context)
return 0;
}
-typedef int (*acpi_ec_query_func) (void *data);
-
-extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
- acpi_handle handle, acpi_ec_query_func func,
- void *data);
-
static int acpi_smbus_hc_add(struct acpi_device *device)
{
int status;
@@ -257,8 +252,8 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
return -EIO;
}
- strcpy(acpi_device_name(device), ACPI_SMB_HC_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_SMB_HC_CLASS);
+ strscpy(acpi_device_name(device), ACPI_SMB_HC_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_SMB_HC_CLASS);
hc = kzalloc(sizeof(struct acpi_smb_hc), GFP_KERNEL);
if (!hc)
@@ -278,8 +273,6 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
return 0;
}
-extern void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
-
static void acpi_smbus_hc_remove(struct acpi_device *device)
{
struct acpi_smb_hc *hc;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 7ecc401fb97f..9f4efa8f75a6 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -723,10 +723,8 @@ int acpi_tie_acpi_dev(struct acpi_device *adev)
static void acpi_store_pld_crc(struct acpi_device *adev)
{
struct acpi_pld_info *pld;
- acpi_status status;
- status = acpi_get_physical_device_location(adev->handle, &pld);
- if (ACPI_FAILURE(status))
+ if (!acpi_get_physical_device_location(adev->handle, &pld))
return;
adev->pld_crc = crc32(~0, pld, sizeof(*pld));
@@ -1179,19 +1177,19 @@ static void acpi_device_get_busid(struct acpi_device *device)
* TBD: Shouldn't this value be unique (within the ACPI namespace)?
*/
if (!acpi_dev_parent(device)) {
- strcpy(device->pnp.bus_id, "ACPI");
+ strscpy(device->pnp.bus_id, "ACPI");
return;
}
switch (device->device_type) {
case ACPI_BUS_TYPE_POWER_BUTTON:
- strcpy(device->pnp.bus_id, "PWRF");
+ strscpy(device->pnp.bus_id, "PWRF");
break;
case ACPI_BUS_TYPE_SLEEP_BUTTON:
- strcpy(device->pnp.bus_id, "SLPF");
+ strscpy(device->pnp.bus_id, "SLPF");
break;
case ACPI_BUS_TYPE_ECDT_EC:
- strcpy(device->pnp.bus_id, "ECDT");
+ strscpy(device->pnp.bus_id, "ECDT");
break;
default:
acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
@@ -1202,7 +1200,7 @@ static void acpi_device_get_busid(struct acpi_device *device)
else
break;
}
- strcpy(device->pnp.bus_id, bus_id);
+ strscpy(device->pnp.bus_id, bus_id);
break;
}
}
@@ -1453,8 +1451,8 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
acpi_object_is_system_bus(handle)) {
/* \_SB, \_TZ, LNXSYBUS */
acpi_add_id(pnp, ACPI_BUS_HID);
- strcpy(pnp->device_name, ACPI_BUS_DEVICE_NAME);
- strcpy(pnp->device_class, ACPI_BUS_CLASS);
+ strscpy(pnp->device_name, ACPI_BUS_DEVICE_NAME);
+ strscpy(pnp->device_class, ACPI_BUS_CLASS);
}
break;
@@ -1769,6 +1767,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
{"CSC3557", },
{"INT33FE", },
{"INT3515", },
+ {"TXNW2781", },
/* Non-conforming _HID for Cirrus Logic already released */
{"CLSA0100", },
{"CLSA0101", },
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 687524b50085..a48ebbf768f9 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -319,7 +319,7 @@ struct acpi_data_attr {
};
static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct acpi_table_attr *table_attr =
@@ -372,7 +372,7 @@ static int acpi_table_attr_init(struct kobject *tables_obj,
}
table_attr->attr.size = table_header->length;
- table_attr->attr.read = acpi_table_show;
+ table_attr->attr.read_new = acpi_table_show;
table_attr->attr.attr.name = table_attr->filename;
table_attr->attr.attr.mode = 0400;
@@ -412,7 +412,7 @@ acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
}
static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct acpi_data_attr *data_attr;
@@ -495,7 +495,7 @@ static int acpi_table_data_init(struct acpi_table_header *th)
if (!data_attr)
return -ENOMEM;
sysfs_attr_init(&data_attr->attr.attr);
- data_attr->attr.read = acpi_data_show;
+ data_attr->attr.read_new = acpi_data_show;
data_attr->attr.attr.mode = 0400;
return acpi_data_objs[i].fn(th, data_attr);
}
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 9e1b01c35070..2295abbecd14 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -56,7 +56,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
(struct acpi_madt_local_apic *)header;
pr_debug("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
p->processor_id, p->id,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED));
}
break;
@@ -66,7 +66,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
(struct acpi_madt_local_x2apic *)header;
pr_debug("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
p->local_apic_id, p->uid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED));
}
break;
@@ -160,7 +160,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
(struct acpi_madt_local_sapic *)header;
pr_debug("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
p->processor_id, p->id, p->eid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED));
}
break;
@@ -183,7 +183,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
pr_debug("GICC (acpi_id[0x%04x] address[%llx] MPIDR[0x%llx] %s)\n",
p->uid, p->base_address,
p->arm_mpidr,
- (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_MADT_ENABLED));
}
break;
@@ -218,7 +218,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
pr_debug("CORE PIC (processor_id[0x%02x] core_id[0x%02x] %s)\n",
p->processor_id, p->core_id,
- (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_MADT_ENABLED));
}
break;
@@ -228,7 +228,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
pr_debug("RISC-V INTC (acpi_uid[0x%04x] hart_id[0x%llx] %s)\n",
p->uid, p->hart_id,
- (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_MADT_ENABLED));
}
break;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 78db38c7076e..95982c098d5b 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -796,9 +796,9 @@ static int acpi_thermal_add(struct acpi_device *device)
return -ENOMEM;
tz->device = device;
- strcpy(tz->name, device->pnp.bus_id);
- strcpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_THERMAL_CLASS);
+ strscpy(tz->name, device->pnp.bus_id);
+ strscpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME);
+ strscpy(acpi_device_class(device), ACPI_THERMAL_CLASS);
device->driver_data = tz;
acpi_thermal_aml_dependency_fix(tz);
@@ -1082,7 +1082,7 @@ static void __exit acpi_thermal_exit(void)
module_init(acpi_thermal_init);
module_exit(acpi_thermal_exit);
-MODULE_IMPORT_NS(ACPI_THERMAL);
+MODULE_IMPORT_NS("ACPI_THERMAL");
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Thermal Zone Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/thermal_lib.c b/drivers/acpi/thermal_lib.c
index 6214d6ebe1fa..f81591927e86 100644
--- a/drivers/acpi/thermal_lib.c
+++ b/drivers/acpi/thermal_lib.c
@@ -53,25 +53,25 @@ int acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp)
return acpi_trip_temp(adev, obj_name, ret_temp);
}
-EXPORT_SYMBOL_NS_GPL(acpi_active_trip_temp, ACPI_THERMAL);
+EXPORT_SYMBOL_NS_GPL(acpi_active_trip_temp, "ACPI_THERMAL");
int acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp)
{
return acpi_trip_temp(adev, "_PSV", ret_temp);
}
-EXPORT_SYMBOL_NS_GPL(acpi_passive_trip_temp, ACPI_THERMAL);
+EXPORT_SYMBOL_NS_GPL(acpi_passive_trip_temp, "ACPI_THERMAL");
int acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp)
{
return acpi_trip_temp(adev, "_HOT", ret_temp);
}
-EXPORT_SYMBOL_NS_GPL(acpi_hot_trip_temp, ACPI_THERMAL);
+EXPORT_SYMBOL_NS_GPL(acpi_hot_trip_temp, "ACPI_THERMAL");
int acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp)
{
return acpi_trip_temp(adev, "_CRT", ret_temp);
}
-EXPORT_SYMBOL_NS_GPL(acpi_critical_trip_temp, ACPI_THERMAL);
+EXPORT_SYMBOL_NS_GPL(acpi_critical_trip_temp, "ACPI_THERMAL");
static int thermal_temp(int error, int temp_decik, int *ret_temp)
{
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 6de542d99518..526563a0d188 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -494,7 +494,7 @@ bool acpi_device_dep(acpi_handle target, acpi_handle match)
}
EXPORT_SYMBOL_GPL(acpi_device_dep);
-acpi_status
+bool
acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld)
{
acpi_status status;
@@ -502,9 +502,8 @@ acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld
union acpi_object *output;
status = acpi_evaluate_object(handle, "_PLD", NULL, &buffer);
-
if (ACPI_FAILURE(status))
- return status;
+ return false;
output = buffer.pointer;
@@ -523,7 +522,7 @@ acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld
out:
kfree(buffer.pointer);
- return status;
+ return ACPI_SUCCESS(status);
}
EXPORT_SYMBOL(acpi_get_physical_device_location);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 015bd8e66c1c..d507d5e08435 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -551,6 +551,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_native,
+ /* Apple MacBook Air 7,2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir7,2"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Apple MacBook Air 9,1 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
@@ -566,6 +574,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ .callback = video_detect_force_native,
+ /* Apple MacBook Pro 11,2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro11,2"),
+ },
+ },
+ {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
.callback = video_detect_force_native,
/* Apple MacBook Pro 12,1 */
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 6af546b21574..068c1612660b 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -12,6 +12,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
@@ -295,6 +296,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
/*
* 2. Devices which also have the skip i2c/serdev quirks and which
* need the x86-android-tablets module to properly work.
+ * Sorted alphabetically.
*/
#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
{
@@ -308,6 +310,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
+ /* Acer Iconia One 8 A1-840 (non FHD version) */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
+ /* Above strings are too generic also match BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "04/01/2014"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ },
+ {
+ /* Asus ME176C tablet */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"),
@@ -318,23 +333,24 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
- /* Lenovo Yoga Book X90F/L */
+ /* Asus TF103C transformer 2-in-1 */
.matches = {
- DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
- DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
- ACPI_QUIRK_UART1_SKIP |
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
+ /* Lenovo Yoga Book X90F/L */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_UART1_SKIP |
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
@@ -392,6 +408,32 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
},
{
+ /* Vexia Edu Atla 10 tablet 5V version */
+ .matches = {
+ /* Having all 3 of these not set is somewhat unique */
+ DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ },
+ {
+ /* Vexia Edu Atla 10 tablet 9V version */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"),
+ },
+ .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_UART1_SKIP |
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ },
+ {
/* Whitelabel (sold as various brands) TM800A550L */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
@@ -411,6 +453,7 @@ static const struct acpi_device_id i2c_acpi_known_good_ids[] = {
{ "10EC5640", 0 }, /* RealTek ALC5640 audio codec */
{ "10EC5651", 0 }, /* RealTek ALC5651 audio codec */
{ "INT33F4", 0 }, /* X-Powers AXP288 PMIC */
+ { "INT33F5", 0 }, /* TI Dollar Cove PMIC */
{ "INT33FD", 0 }, /* Intel Crystal Cove PMIC */
{ "INT34D3", 0 }, /* Intel Whiskey Cove PMIC */
{ "NPCE69A", 0 }, /* Asus Transformer keyboard dock */
@@ -439,18 +482,35 @@ static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bo
struct acpi_device *adev = ACPI_COMPANION(controller_parent);
const struct dmi_system_id *dmi_id;
long quirks = 0;
- u64 uid;
- int ret;
+ u64 uid = 0;
- ret = acpi_dev_uid_to_integer(adev, &uid);
- if (ret)
+ dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
+ if (!dmi_id)
return 0;
- dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
- if (dmi_id)
- quirks = (unsigned long)dmi_id->driver_data;
+ quirks = (unsigned long)dmi_id->driver_data;
+
+ /* uid is left at 0 on errors and 0 is not a valid UART UID */
+ acpi_dev_uid_to_integer(adev, &uid);
+
+ /* For PCI UARTs without an UID */
+ if (!uid && dev_is_pci(controller_parent)) {
+ struct pci_dev *pdev = to_pci_dev(controller_parent);
+
+ /*
+ * Devfn values for PCI UARTs on Bay Trail SoCs, which are
+ * the only devices where this fallback is necessary.
+ */
+ if (pdev->devfn == PCI_DEVFN(0x1e, 3))
+ uid = 1;
+ else if (pdev->devfn == PCI_DEVFN(0x1e, 4))
+ uid = 2;
+ }
+
+ if (!uid)
+ return 0;
- if (!dev_is_platform(controller_parent)) {
+ if (!dev_is_platform(controller_parent) && !dev_is_pci(controller_parent)) {
/* PNP enumerated UARTs */
if ((quirks & ACPI_QUIRK_PNP_UART1_SKIP) && uid == 1)
*skip = true;
@@ -505,7 +565,7 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
* Set skip to true so that the tty core creates a serdev ctrl device.
* The backlight driver will manually create the serdev client device.
*/
- if (acpi_dev_hid_match(adev, "DELL0501")) {
+ if (adev && acpi_dev_hid_match(adev, "DELL0501")) {
*skip = true;
/*
* Create a platform dev for dell-uart-backlight to bind to.
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 0230c43377c1..8ef259b4d037 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -449,6 +449,12 @@ const struct bus_type amba_bustype = {
};
EXPORT_SYMBOL_GPL(amba_bustype);
+bool dev_is_amba(const struct device *dev)
+{
+ return dev->bus == &amba_bustype;
+}
+EXPORT_SYMBOL_GPL(dev_is_amba);
+
static int __init amba_init(void)
{
return bus_register(&amba_bustype);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 978740537a1a..76052006bd87 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1225,6 +1225,12 @@ static void binder_cleanup_ref_olocked(struct binder_ref *ref)
binder_dequeue_work(ref->proc, &ref->death->work);
binder_stats_deleted(BINDER_STAT_DEATH);
}
+
+ if (ref->freeze) {
+ binder_dequeue_work(ref->proc, &ref->freeze->work);
+ binder_stats_deleted(BINDER_STAT_FREEZE);
+ }
+
binder_stats_deleted(BINDER_STAT_REF);
}
@@ -1965,7 +1971,7 @@ static bool binder_validate_fixup(struct binder_proc *proc,
* struct binder_task_work_cb - for deferred close
*
* @twork: callback_head for task work
- * @fd: fd to close
+ * @file: file to close
*
* Structure to pass task work to be handled after
* returning from binder_ioctl() via task_work_add().
@@ -3011,8 +3017,7 @@ static void binder_transaction(struct binder_proc *proc,
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
ktime_t t_start_time = ktime_get();
- char *secctx = NULL;
- u32 secctx_sz = 0;
+ struct lsm_context lsmctx = { };
struct list_head sgc_head;
struct list_head pf_head;
const void __user *user_buffer = (const void __user *)
@@ -3291,8 +3296,8 @@ static void binder_transaction(struct binder_proc *proc,
size_t added_size;
security_cred_getsecid(proc->cred, &secid);
- ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
- if (ret) {
+ ret = security_secid_to_secctx(secid, &lsmctx);
+ if (ret < 0) {
binder_txn_error("%d:%d failed to get security context\n",
thread->pid, proc->pid);
return_error = BR_FAILED_REPLY;
@@ -3300,7 +3305,7 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_get_secctx_failed;
}
- added_size = ALIGN(secctx_sz, sizeof(u64));
+ added_size = ALIGN(lsmctx.len, sizeof(u64));
extra_buffers_size += added_size;
if (extra_buffers_size < added_size) {
binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
@@ -3334,23 +3339,23 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
- if (secctx) {
+ if (lsmctx.context) {
int err;
size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
ALIGN(tr->offsets_size, sizeof(void *)) +
ALIGN(extra_buffers_size, sizeof(void *)) -
- ALIGN(secctx_sz, sizeof(u64));
+ ALIGN(lsmctx.len, sizeof(u64));
t->security_ctx = t->buffer->user_data + buf_offset;
err = binder_alloc_copy_to_buffer(&target_proc->alloc,
t->buffer, buf_offset,
- secctx, secctx_sz);
+ lsmctx.context, lsmctx.len);
if (err) {
t->security_ctx = 0;
WARN_ON(1);
}
- security_release_secctx(secctx, secctx_sz);
- secctx = NULL;
+ security_release_secctx(&lsmctx);
+ lsmctx.context = NULL;
}
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
@@ -3394,7 +3399,7 @@ static void binder_transaction(struct binder_proc *proc,
off_end_offset = off_start_offset + tr->offsets_size;
sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
- ALIGN(secctx_sz, sizeof(u64));
+ ALIGN(lsmctx.len, sizeof(u64));
off_min = 0;
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
buffer_offset += sizeof(binder_size_t)) {
@@ -3773,8 +3778,8 @@ err_copy_data_failed:
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
err_bad_extra_size:
- if (secctx)
- security_release_secctx(secctx, secctx_sz);
+ if (lsmctx.context)
+ security_release_secctx(&lsmctx);
err_get_secctx_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
@@ -3795,13 +3800,13 @@ err_invalid_target_handle:
}
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
+ "%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
proc->pid, thread->pid, reply ? "reply" :
(tr->flags & TF_ONE_WAY ? "async" : "call"),
target_proc ? target_proc->pid : 0,
target_thread ? target_thread->pid : 0,
t_debug_id, return_error, return_error_param,
- (u64)tr->data_size, (u64)tr->offsets_size,
+ tr->code, (u64)tr->data_size, (u64)tr->offsets_size,
return_error_line);
if (target_thread)
@@ -3850,7 +3855,6 @@ binder_request_freeze_notification(struct binder_proc *proc,
{
struct binder_ref_freeze *freeze;
struct binder_ref *ref;
- bool is_frozen;
freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
if (!freeze)
@@ -3866,32 +3870,31 @@ binder_request_freeze_notification(struct binder_proc *proc,
}
binder_node_lock(ref->node);
-
- if (ref->freeze || !ref->node->proc) {
- binder_user_error("%d:%d invalid BC_REQUEST_FREEZE_NOTIFICATION %s\n",
- proc->pid, thread->pid,
- ref->freeze ? "already set" : "dead node");
+ if (ref->freeze) {
+ binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
+ proc->pid, thread->pid);
binder_node_unlock(ref->node);
binder_proc_unlock(proc);
kfree(freeze);
return -EINVAL;
}
- binder_inner_proc_lock(ref->node->proc);
- is_frozen = ref->node->proc->is_frozen;
- binder_inner_proc_unlock(ref->node->proc);
binder_stats_created(BINDER_STAT_FREEZE);
INIT_LIST_HEAD(&freeze->work.entry);
freeze->cookie = handle_cookie->cookie;
freeze->work.type = BINDER_WORK_FROZEN_BINDER;
- freeze->is_frozen = is_frozen;
-
ref->freeze = freeze;
- binder_inner_proc_lock(proc);
- binder_enqueue_work_ilocked(&ref->freeze->work, &proc->todo);
- binder_wakeup_proc_ilocked(proc);
- binder_inner_proc_unlock(proc);
+ if (ref->node->proc) {
+ binder_inner_proc_lock(ref->node->proc);
+ freeze->is_frozen = ref->node->proc->is_frozen;
+ binder_inner_proc_unlock(ref->node->proc);
+
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ binder_inner_proc_unlock(proc);
+ }
binder_node_unlock(ref->node);
binder_proc_unlock(proc);
@@ -5151,6 +5154,16 @@ static void binder_release_work(struct binder_proc *proc,
} break;
case BINDER_WORK_NODE:
break;
+ case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
+ struct binder_ref_freeze *freeze;
+
+ freeze = container_of(w, struct binder_ref_freeze, work);
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "undelivered freeze notification, %016llx\n",
+ (u64)freeze->cookie);
+ kfree(freeze);
+ binder_stats_deleted(BINDER_STAT_FREEZE);
+ } break;
default:
pr_err("unexpected work type, %d, not freed\n",
wtype);
@@ -5552,6 +5565,7 @@ static bool binder_txns_pending_ilocked(struct binder_proc *proc)
static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
{
+ struct binder_node *prev = NULL;
struct rb_node *n;
struct binder_ref *ref;
@@ -5560,7 +5574,10 @@ static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
struct binder_node *node;
node = rb_entry(n, struct binder_node, rb_node);
+ binder_inc_node_tmpref_ilocked(node);
binder_inner_proc_unlock(proc);
+ if (prev)
+ binder_put_node(prev);
binder_node_lock(node);
hlist_for_each_entry(ref, &node->refs, node_entry) {
/*
@@ -5586,10 +5603,15 @@ static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
}
binder_inner_proc_unlock(ref->proc);
}
+ prev = node;
binder_node_unlock(node);
binder_inner_proc_lock(proc);
+ if (proc->is_dead)
+ break;
}
binder_inner_proc_unlock(proc);
+ if (prev)
+ binder_put_node(prev);
}
static int binder_ioctl_freeze(struct binder_freeze_info *info,
@@ -6260,6 +6282,7 @@ static void binder_deferred_release(struct binder_proc *proc)
binder_release_work(proc, &proc->todo);
binder_release_work(proc, &proc->delivered_death);
+ binder_release_work(proc, &proc->delivered_freeze);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
@@ -6350,7 +6373,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd offset %lx\n",
buffer->data_size, buffer->offsets_size,
- proc->alloc.buffer - buffer->user_data);
+ proc->alloc.vm_start - buffer->user_data);
}
static void print_binder_work_ilocked(struct seq_file *m,
@@ -6393,6 +6416,12 @@ static void print_binder_work_ilocked(struct seq_file *m,
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
seq_printf(m, "%shas cleared death notification\n", prefix);
break;
+ case BINDER_WORK_FROZEN_BINDER:
+ seq_printf(m, "%shas frozen binder\n", prefix);
+ break;
+ case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION:
+ seq_printf(m, "%shas cleared freeze notification\n", prefix);
+ break;
default:
seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
break;
@@ -6539,6 +6568,10 @@ static void print_binder_proc(struct seq_file *m,
seq_puts(m, " has delivered dead binder\n");
break;
}
+ list_for_each_entry(w, &proc->delivered_freeze, entry) {
+ seq_puts(m, " has delivered freeze binder\n");
+ break;
+ }
binder_inner_proc_unlock(proc);
if (!print_all && m->count == header_pos)
m->count = start_pos;
@@ -6894,6 +6927,11 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
{} /* terminator */
};
+void binder_add_device(struct binder_device *device)
+{
+ hlist_add_head(&device->hlist, &binder_devices);
+}
+
static int __init init_binder_device(const char *name)
{
int ret;
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index b3acbc4174fb..fcfaf1b899c8 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -61,7 +61,7 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
- return alloc->buffer + alloc->buffer_size - buffer->user_data;
+ return alloc->vm_start + alloc->buffer_size - buffer->user_data;
return binder_buffer_next(buffer)->user_data - buffer->user_data;
}
@@ -169,32 +169,33 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
{
struct binder_buffer *buffer;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
return buffer;
}
static inline void
-binder_set_installed_page(struct binder_lru_page *lru_page,
+binder_set_installed_page(struct binder_alloc *alloc,
+ unsigned long index,
struct page *page)
{
/* Pairs with acquire in binder_get_installed_page() */
- smp_store_release(&lru_page->page_ptr, page);
+ smp_store_release(&alloc->pages[index], page);
}
static inline struct page *
-binder_get_installed_page(struct binder_lru_page *lru_page)
+binder_get_installed_page(struct binder_alloc *alloc, unsigned long index)
{
/* Pairs with release in binder_set_installed_page() */
- return smp_load_acquire(&lru_page->page_ptr);
+ return smp_load_acquire(&alloc->pages[index]);
}
static void binder_lru_freelist_add(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{
- struct binder_lru_page *page;
unsigned long page_addr;
+ struct page *page;
trace_binder_update_page_range(alloc, false, start, end);
@@ -202,65 +203,159 @@ static void binder_lru_freelist_add(struct binder_alloc *alloc,
size_t index;
int ret;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
-
- if (!binder_get_installed_page(page))
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ page = binder_get_installed_page(alloc, index);
+ if (!page)
continue;
trace_binder_free_lru_start(alloc, index);
- ret = list_lru_add_obj(&binder_freelist, &page->lru);
+ ret = list_lru_add(&binder_freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
WARN_ON(!ret);
trace_binder_free_lru_end(alloc, index);
}
}
-static int binder_install_single_page(struct binder_alloc *alloc,
- struct binder_lru_page *lru_page,
- unsigned long addr)
+static inline
+void binder_alloc_set_mapped(struct binder_alloc *alloc, bool state)
{
- struct page *page;
- int ret = 0;
+ /* pairs with smp_load_acquire in binder_alloc_is_mapped() */
+ smp_store_release(&alloc->mapped, state);
+}
- if (!mmget_not_zero(alloc->mm))
- return -ESRCH;
+static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
+{
+ /* pairs with smp_store_release in binder_alloc_set_mapped() */
+ return smp_load_acquire(&alloc->mapped);
+}
+
+static struct page *binder_page_lookup(struct binder_alloc *alloc,
+ unsigned long addr)
+{
+ struct mm_struct *mm = alloc->mm;
+ struct page *page;
+ long npages = 0;
/*
- * Protected with mmap_sem in write mode as multiple tasks
- * might race to install the same page.
+ * Find an existing page in the remote mm. If missing,
+ * don't attempt to fault-in just propagate an error.
*/
- mmap_write_lock(alloc->mm);
- if (binder_get_installed_page(lru_page))
- goto out;
+ mmap_read_lock(mm);
+ if (binder_alloc_is_mapped(alloc))
+ npages = get_user_pages_remote(mm, addr, 1, FOLL_NOFAULT,
+ &page, NULL);
+ mmap_read_unlock(mm);
- if (!alloc->vma) {
- pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
- ret = -ESRCH;
- goto out;
+ return npages > 0 ? page : NULL;
+}
+
+static int binder_page_insert(struct binder_alloc *alloc,
+ unsigned long addr,
+ struct page *page)
+{
+ struct mm_struct *mm = alloc->mm;
+ struct vm_area_struct *vma;
+ int ret = -ESRCH;
+
+ /* attempt per-vma lock first */
+ vma = lock_vma_under_rcu(mm, addr);
+ if (vma) {
+ if (binder_alloc_is_mapped(alloc))
+ ret = vm_insert_page(vma, addr, page);
+ vma_end_read(vma);
+ return ret;
}
+ /* fall back to mmap_lock */
+ mmap_read_lock(mm);
+ vma = vma_lookup(mm, addr);
+ if (vma && binder_alloc_is_mapped(alloc))
+ ret = vm_insert_page(vma, addr, page);
+ mmap_read_unlock(mm);
+
+ return ret;
+}
+
+static struct page *binder_page_alloc(struct binder_alloc *alloc,
+ unsigned long index)
+{
+ struct binder_shrinker_mdata *mdata;
+ struct page *page;
+
page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ if (!page)
+ return NULL;
+
+ /* allocate and install shrinker metadata under page->private */
+ mdata = kzalloc(sizeof(*mdata), GFP_KERNEL);
+ if (!mdata) {
+ __free_page(page);
+ return NULL;
+ }
+
+ mdata->alloc = alloc;
+ mdata->page_index = index;
+ INIT_LIST_HEAD(&mdata->lru);
+ set_page_private(page, (unsigned long)mdata);
+
+ return page;
+}
+
+static void binder_free_page(struct page *page)
+{
+ kfree((struct binder_shrinker_mdata *)page_private(page));
+ __free_page(page);
+}
+
+static int binder_install_single_page(struct binder_alloc *alloc,
+ unsigned long index,
+ unsigned long addr)
+{
+ struct page *page;
+ int ret;
+
+ if (!mmget_not_zero(alloc->mm))
+ return -ESRCH;
+
+ page = binder_page_alloc(alloc, index);
if (!page) {
- pr_err("%d: failed to allocate page\n", alloc->pid);
ret = -ENOMEM;
goto out;
}
- ret = vm_insert_page(alloc->vma, addr, page);
- if (ret) {
+ ret = binder_page_insert(alloc, addr, page);
+ switch (ret) {
+ case -EBUSY:
+ /*
+ * EBUSY is ok. Someone installed the pte first but the
+ * alloc->pages[index] has not been updated yet. Discard
+ * our page and look up the one already installed.
+ */
+ ret = 0;
+ binder_free_page(page);
+ page = binder_page_lookup(alloc, addr);
+ if (!page) {
+ pr_err("%d: failed to find page at offset %lx\n",
+ alloc->pid, addr - alloc->vm_start);
+ ret = -ESRCH;
+ break;
+ }
+ fallthrough;
+ case 0:
+ /* Mark page installation complete and safe to use */
+ binder_set_installed_page(alloc, index, page);
+ break;
+ default:
+ binder_free_page(page);
pr_err("%d: %s failed to insert page at offset %lx with %d\n",
- alloc->pid, __func__, addr - alloc->buffer, ret);
- __free_page(page);
- ret = -ENOMEM;
- goto out;
+ alloc->pid, __func__, addr - alloc->vm_start, ret);
+ break;
}
-
- /* Mark page installation complete and safe to use */
- binder_set_installed_page(lru_page, page);
out:
- mmap_write_unlock(alloc->mm);
mmput_async(alloc->mm);
return ret;
}
@@ -269,7 +364,6 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
struct binder_buffer *buffer,
size_t size)
{
- struct binder_lru_page *page;
unsigned long start, final;
unsigned long page_addr;
@@ -280,15 +374,13 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
unsigned long index;
int ret;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
-
- if (binder_get_installed_page(page))
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ if (binder_get_installed_page(alloc, index))
continue;
trace_binder_alloc_page_start(alloc, index);
- ret = binder_install_single_page(alloc, page, page_addr);
+ ret = binder_install_single_page(alloc, index, page_addr);
if (ret)
return ret;
@@ -302,8 +394,8 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
static void binder_lru_freelist_del(struct binder_alloc *alloc,
unsigned long start, unsigned long end)
{
- struct binder_lru_page *page;
unsigned long page_addr;
+ struct page *page;
trace_binder_update_page_range(alloc, true, start, end);
@@ -311,13 +403,16 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
unsigned long index;
bool on_lru;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
+ index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ page = binder_get_installed_page(alloc, index);
- if (page->page_ptr) {
+ if (page) {
trace_binder_alloc_lru_start(alloc, index);
- on_lru = list_lru_del_obj(&binder_freelist, &page->lru);
+ on_lru = list_lru_del(&binder_freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
WARN_ON(!on_lru);
trace_binder_alloc_lru_end(alloc, index);
@@ -329,20 +424,6 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
}
}
-static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
- struct vm_area_struct *vma)
-{
- /* pairs with smp_load_acquire in binder_alloc_get_vma() */
- smp_store_release(&alloc->vma, vma);
-}
-
-static inline struct vm_area_struct *binder_alloc_get_vma(
- struct binder_alloc *alloc)
-{
- /* pairs with smp_store_release in binder_alloc_set_vma() */
- return smp_load_acquire(&alloc->vma);
-}
-
static void debug_no_space_locked(struct binder_alloc *alloc)
{
size_t largest_alloc_size = 0;
@@ -576,7 +657,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
int ret;
/* Check binder_alloc is fully initialized */
- if (!binder_alloc_get_vma(alloc)) {
+ if (!binder_alloc_is_mapped(alloc)) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
@@ -597,10 +678,10 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
if (!next)
return ERR_PTR(-ENOMEM);
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
if (IS_ERR(buffer)) {
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
goto out;
}
@@ -608,7 +689,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
buffer->offsets_size = offsets_size;
buffer->extra_buffers_size = extra_buffers_size;
buffer->pid = current->tgid;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
ret = binder_install_buffer_pages(alloc, buffer, size);
if (ret) {
@@ -674,8 +755,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON(buffer->user_data < alloc->buffer);
- BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
+ BUG_ON(buffer->user_data < alloc->vm_start);
+ BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += buffer_size;
@@ -734,14 +815,13 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
pgoff_t *pgoffp)
{
binder_size_t buffer_space_offset = buffer_offset +
- (buffer->user_data - alloc->buffer);
+ (buffer->user_data - alloc->vm_start);
pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
size_t index = buffer_space_offset >> PAGE_SHIFT;
- struct binder_lru_page *lru_page;
- lru_page = &alloc->pages[index];
*pgoffp = pgoff;
- return lru_page->page_ptr;
+
+ return alloc->pages[index];
}
/**
@@ -785,17 +865,17 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
* We could eliminate the call to binder_alloc_clear_buf()
* from binder_alloc_deferred_release() by moving this to
* binder_free_buf_locked(). However, that could
- * increase contention for the alloc->lock if clear_on_free
- * is used frequently for large buffers. This lock is not
+ * increase contention for the alloc mutex if clear_on_free
+ * is used frequently for large buffers. The mutex is not
* needed for correctness here.
*/
if (buffer->clear_on_free) {
binder_alloc_clear_buf(alloc, buffer);
buffer->clear_on_free = false;
}
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
binder_free_buf_locked(alloc, buffer);
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
}
/**
@@ -816,7 +896,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
{
struct binder_buffer *buffer;
const char *failure_string;
- int ret, i;
+ int ret;
if (unlikely(vma->vm_mm != alloc->mm)) {
ret = -EINVAL;
@@ -834,22 +914,17 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
SZ_4M);
mutex_unlock(&binder_alloc_mmap_lock);
- alloc->buffer = vma->vm_start;
+ alloc->vm_start = vma->vm_start;
alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
- if (alloc->pages == NULL) {
+ if (!alloc->pages) {
ret = -ENOMEM;
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- alloc->pages[i].alloc = alloc;
- INIT_LIST_HEAD(&alloc->pages[i].lru);
- }
-
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
@@ -857,14 +932,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_alloc_buf_struct_failed;
}
- buffer->user_data = alloc->buffer;
+ buffer->user_data = alloc->vm_start;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
/* Signal binder_alloc is fully initialized */
- binder_alloc_set_vma(alloc, vma);
+ binder_alloc_set_mapped(alloc, true);
return 0;
@@ -872,7 +947,7 @@ err_alloc_buf_struct_failed:
kvfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
- alloc->buffer = 0;
+ alloc->vm_start = 0;
mutex_lock(&binder_alloc_mmap_lock);
alloc->buffer_size = 0;
err_already_mapped:
@@ -893,8 +968,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
struct binder_buffer *buffer;
buffers = 0;
- spin_lock(&alloc->lock);
- BUG_ON(alloc->vma);
+ mutex_lock(&alloc->mutex);
+ BUG_ON(alloc->mapped);
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -925,22 +1000,26 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ struct page *page;
bool on_lru;
- if (!alloc->pages[i].page_ptr)
+ page = binder_get_installed_page(alloc, i);
+ if (!page)
continue;
- on_lru = list_lru_del_obj(&binder_freelist,
- &alloc->pages[i].lru);
+ on_lru = list_lru_del(&binder_freelist,
+ page_to_lru(page),
+ page_to_nid(page),
+ NULL);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%s: %d: page %d %s\n",
__func__, alloc->pid, i,
on_lru ? "on lru" : "active");
- __free_page(alloc->pages[i].page_ptr);
+ binder_free_page(page);
page_count++;
}
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
kvfree(alloc->pages);
if (alloc->mm)
mmdrop(alloc->mm);
@@ -964,17 +1043,17 @@ void binder_alloc_print_allocated(struct seq_file *m,
struct binder_buffer *buffer;
struct rb_node *n;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
buffer->debug_id,
- buffer->user_data - alloc->buffer,
+ buffer->user_data - alloc->vm_start,
buffer->data_size, buffer->offsets_size,
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
}
/**
@@ -985,29 +1064,29 @@ void binder_alloc_print_allocated(struct seq_file *m,
void binder_alloc_print_pages(struct seq_file *m,
struct binder_alloc *alloc)
{
- struct binder_lru_page *page;
+ struct page *page;
int i;
int active = 0;
int lru = 0;
int free = 0;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
/*
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
- if (binder_alloc_get_vma(alloc) != NULL) {
+ if (binder_alloc_is_mapped(alloc)) {
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
+ page = binder_get_installed_page(alloc, i);
+ if (!page)
free++;
- else if (list_empty(&page->lru))
+ else if (list_empty(page_to_lru(page)))
active++;
else
lru++;
}
}
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
}
@@ -1023,10 +1102,10 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
struct rb_node *n;
int count = 0;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
count++;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
return count;
}
@@ -1036,18 +1115,18 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
* @alloc: binder_alloc for this proc
*
* Called from binder_vma_close() when releasing address space.
- * Clears alloc->vma to prevent new incoming transactions from
+ * Clears alloc->mapped to prevent new incoming transactions from
* allocating more buffers.
*/
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
- binder_alloc_set_vma(alloc, NULL);
+ binder_alloc_set_mapped(alloc, false);
}
/**
* binder_alloc_free_page() - shrinker callback to free pages
* @item: item to free
- * @lock: lock protecting the item
+ * @lru: list_lru instance of the item
* @cb_arg: callback argument
*
* Called from list_lru_walk() in binder_shrink_scan() to free
@@ -1055,44 +1134,54 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
*/
enum lru_status binder_alloc_free_page(struct list_head *item,
struct list_lru_one *lru,
- spinlock_t *lock,
void *cb_arg)
- __must_hold(lock)
+ __must_hold(&lru->lock)
{
- struct binder_lru_page *page = container_of(item, typeof(*page), lru);
- struct binder_alloc *alloc = page->alloc;
+ struct binder_shrinker_mdata *mdata = container_of(item, typeof(*mdata), lru);
+ struct binder_alloc *alloc = mdata->alloc;
struct mm_struct *mm = alloc->mm;
struct vm_area_struct *vma;
struct page *page_to_free;
unsigned long page_addr;
+ int mm_locked = 0;
size_t index;
if (!mmget_not_zero(mm))
goto err_mmget;
- if (!mmap_read_trylock(mm))
- goto err_mmap_read_lock_failed;
- if (!spin_trylock(&alloc->lock))
- goto err_get_alloc_lock_failed;
- if (!page->page_ptr)
- goto err_page_already_freed;
-
- index = page - alloc->pages;
- page_addr = alloc->buffer + index * PAGE_SIZE;
-
- vma = vma_lookup(mm, page_addr);
- if (vma && vma != binder_alloc_get_vma(alloc))
+
+ index = mdata->page_index;
+ page_addr = alloc->vm_start + index * PAGE_SIZE;
+
+ /* attempt per-vma lock first */
+ vma = lock_vma_under_rcu(mm, page_addr);
+ if (!vma) {
+ /* fall back to mmap_lock */
+ if (!mmap_read_trylock(mm))
+ goto err_mmap_read_lock_failed;
+ mm_locked = 1;
+ vma = vma_lookup(mm, page_addr);
+ }
+
+ if (!mutex_trylock(&alloc->mutex))
+ goto err_get_alloc_mutex_failed;
+
+ /*
+ * Since a binder_alloc can only be mapped once, we ensure
+ * the vma corresponds to this mapping by checking whether
+ * the binder_alloc is still mapped.
+ */
+ if (vma && !binder_alloc_is_mapped(alloc))
goto err_invalid_vma;
trace_binder_unmap_kernel_start(alloc, index);
- page_to_free = page->page_ptr;
- page->page_ptr = NULL;
+ page_to_free = alloc->pages[index];
+ binder_set_installed_page(alloc, index, NULL);
trace_binder_unmap_kernel_end(alloc, index);
list_lru_isolate(lru, item);
- spin_unlock(&alloc->lock);
- spin_unlock(lock);
+ spin_unlock(&lru->lock);
if (vma) {
trace_binder_unmap_user_start(alloc, index);
@@ -1102,18 +1191,23 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
- mmap_read_unlock(mm);
+ mutex_unlock(&alloc->mutex);
+ if (mm_locked)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
mmput_async(mm);
- __free_page(page_to_free);
+ binder_free_page(page_to_free);
- spin_lock(lock);
return LRU_REMOVED_RETRY;
err_invalid_vma:
-err_page_already_freed:
- spin_unlock(&alloc->lock);
-err_get_alloc_lock_failed:
- mmap_read_unlock(mm);
+ mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+ if (mm_locked)
+ mmap_read_unlock(mm);
+ else
+ vma_end_read(vma);
err_mmap_read_lock_failed:
mmput_async(mm);
err_mmget:
@@ -1147,7 +1241,7 @@ void binder_alloc_init(struct binder_alloc *alloc)
alloc->pid = current->group_leader->pid;
alloc->mm = current->mm;
mmgrab(alloc->mm);
- spin_lock_init(&alloc->lock);
+ mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);
}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 70387234477e..feecd7414241 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -9,7 +9,7 @@
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/mm.h>
-#include <linux/spinlock.h>
+#include <linux/rtmutex.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/list_lru.h>
@@ -59,34 +59,43 @@ struct binder_buffer {
};
/**
- * struct binder_lru_page - page object used for binder shrinker
- * @page_ptr: pointer to physical page in mmap'd space
- * @lru: entry in binder_freelist
- * @alloc: binder_alloc for a proc
+ * struct binder_shrinker_mdata - binder metadata used to reclaim pages
+ * @lru: LRU entry in binder_freelist
+ * @alloc: binder_alloc owning the page to reclaim
+ * @page_index: offset in @alloc->pages[] into the page to reclaim
*/
-struct binder_lru_page {
+struct binder_shrinker_mdata {
struct list_head lru;
- struct page *page_ptr;
struct binder_alloc *alloc;
+ unsigned long page_index;
};
+static inline struct list_head *page_to_lru(struct page *p)
+{
+ struct binder_shrinker_mdata *mdata;
+
+ mdata = (struct binder_shrinker_mdata *)page_private(p);
+
+ return &mdata->lru;
+}
+
/**
* struct binder_alloc - per-binder proc state for binder allocator
- * @lock: protects binder_alloc fields
- * @vma: vm_area_struct passed to mmap_handler
- * (invariant after mmap)
+ * @mutex: protects binder_alloc fields
* @mm: copy of task->mm (invariant after open)
- * @buffer: base of per-proc address space mapped via mmap
+ * @vm_start: base of per-proc address space mapped via mmap
* @buffers: list of all buffers for this proc
* @free_buffers: rb tree of buffers available for allocation
* sorted by size
* @allocated_buffers: rb tree of allocated buffers sorted by address
* @free_async_space: VA space available for async buffers. This is
* initialized at mmap time to 1/2 the full VA space
- * @pages: array of binder_lru_page
+ * @pages: array of struct page *
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
* @pages_high: high watermark of offset in @pages
+ * @mapped: whether the vm area is mapped, each binder instance is
+ * allowed a single mapping throughout its lifetime
* @oneway_spam_detected: %true if oneway spam detection fired, clear that
* flag once the async buffer has returned to a healthy state
*
@@ -96,18 +105,18 @@ struct binder_lru_page {
* struct binder_buffer objects used to track the user buffers
*/
struct binder_alloc {
- spinlock_t lock;
- struct vm_area_struct *vma;
+ struct mutex mutex;
struct mm_struct *mm;
- unsigned long buffer;
+ unsigned long vm_start;
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
- struct binder_lru_page *pages;
+ struct page **pages;
size_t buffer_size;
int pid;
size_t pages_high;
+ bool mapped;
bool oneway_spam_detected;
};
@@ -118,7 +127,7 @@ static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
#endif
enum lru_status binder_alloc_free_page(struct list_head *item,
struct list_lru_one *lru,
- spinlock_t *lock, void *cb_arg);
+ void *cb_arg);
struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
@@ -153,9 +162,9 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc)
{
size_t free_async_space;
- spin_lock(&alloc->lock);
+ mutex_lock(&alloc->mutex);
free_async_space = alloc->free_async_space;
- spin_unlock(&alloc->lock);
+ mutex_unlock(&alloc->mutex);
return free_async_space;
}
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index 81442fe20a69..c88735c54848 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -104,11 +104,11 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
end = PAGE_ALIGN(buffer->user_data + size);
page_addr = buffer->user_data;
for (; page_addr < end; page_addr += PAGE_SIZE) {
- page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
- if (!alloc->pages[page_index].page_ptr ||
- !list_empty(&alloc->pages[page_index].lru)) {
+ page_index = (page_addr - alloc->vm_start) / PAGE_SIZE;
+ if (!alloc->pages[page_index] ||
+ !list_empty(page_to_lru(alloc->pages[page_index]))) {
pr_err("expect alloc but is %s at page index %d\n",
- alloc->pages[page_index].page_ptr ?
+ alloc->pages[page_index] ?
"lru" : "free", page_index);
return false;
}
@@ -148,10 +148,10 @@ static void binder_selftest_free_buf(struct binder_alloc *alloc,
* if binder shrinker ran during binder_alloc_free_buf
* calls above.
*/
- if (list_empty(&alloc->pages[i].lru)) {
+ if (list_empty(page_to_lru(alloc->pages[i]))) {
pr_err_size_seq(sizes, seq);
pr_err("expect lru but is %s at page index %d\n",
- alloc->pages[i].page_ptr ? "alloc" : "free", i);
+ alloc->pages[i] ? "alloc" : "free", i);
binder_selftest_failures++;
}
}
@@ -168,9 +168,9 @@ static void binder_selftest_free_page(struct binder_alloc *alloc)
}
for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
- if (alloc->pages[i].page_ptr) {
+ if (alloc->pages[i]) {
pr_err("expect free but is %s at page index %d\n",
- list_empty(&alloc->pages[i].lru) ?
+ list_empty(page_to_lru(alloc->pages[i])) ?
"alloc" : "lru", i);
binder_selftest_failures++;
}
@@ -291,7 +291,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
if (!binder_selftest_run)
return;
mutex_lock(&binder_selftest_lock);
- if (!binder_selftest_run || !alloc->vma)
+ if (!binder_selftest_run || !alloc->mapped)
goto done;
pr_info("STARTED\n");
binder_selftest_alloc_offset(alloc, end_offset, 0);
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index f8d6be682f23..e4eb8357989c 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -25,8 +25,7 @@ struct binder_context {
/**
* struct binder_device - information about a binder device node
- * @hlist: list of binder devices (only used for devices requested via
- * CONFIG_ANDROID_BINDER_DEVICES)
+ * @hlist: list of binder devices
* @miscdev: information about a binder character device node
* @context: binder context information
* @binderfs_inode: This is the inode of the root dentry of the super block
@@ -582,4 +581,12 @@ struct binder_object {
};
};
+/**
+ * Add a binder device to binder_devices
+ * @device: the new binder device to add to the global list
+ *
+ * Not reentrant as the list is not protected by any locks
+ */
+void binder_add_device(struct binder_device *device);
+
#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index fe38c6fc65d0..16de1b9e72f7 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -328,7 +328,7 @@ TRACE_EVENT(binder_update_page_range,
TP_fast_assign(
__entry->proc = alloc->pid;
__entry->allocate = allocate;
- __entry->offset = start - alloc->buffer;
+ __entry->offset = start - alloc->vm_start;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index ad1fa7abc323..bc6bae76ccaf 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -207,6 +207,8 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
fsnotify_create(root->d_inode, dentry);
inode_unlock(d_inode(root));
+ binder_add_device(device);
+
return 0;
err:
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 547f56341705..3999305b5356 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -370,7 +370,7 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id
/* AHCI controllers often implement SFF compatible interface.
* Grab all PCI BARs just in case.
*/
- rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
+ rc = pcim_request_all_regions(pdev, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
@@ -386,7 +386,9 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id
if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
pci_enable_msi(pdev);
- hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
+ hpriv->mmio = pcim_iomap(pdev, AHCI_PCI_BAR, 0);
+ if (!hpriv->mmio)
+ return -ENOMEM;
/* save initial config */
ahci_save_initial_config(&pdev->dev, hpriv);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 45f63b09828a..f813dbdc2346 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1676,7 +1676,7 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
/*
* If number of MSIs is less than number of ports then Sharing Last
* Message mode could be enforced. In this case assume that advantage
- * of multipe MSIs is negated and use single MSI mode instead.
+ * of multiple MSIs is negated and use single MSI mode instead.
*/
if (n_ports > 1) {
nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
@@ -1869,7 +1869,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* AHCI controllers often implement SFF compatible interface.
* Grab all PCI BARs just in case.
*/
- rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
+ rc = pcim_request_all_regions(pdev, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
@@ -1893,7 +1893,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ahci_sb600_enable_64bit(pdev))
hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
- hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
+ hpriv->mmio = pcim_iomap(pdev, ahci_pci_bar, 0);
+ if (!hpriv->mmio)
+ return -ENOMEM;
/* detect remapped nvme devices */
ahci_remap_check(pdev, ahci_pci_bar, hpriv);
@@ -1985,7 +1987,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
/* legacy intx interrupts */
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
}
hpriv->irq = pci_irq_vector(pdev, 0);
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 8f40f75ba08c..8e895ae45c86 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -328,7 +328,7 @@ struct ahci_port_priv {
struct ahci_host_priv {
/* Input fields */
unsigned int flags; /* AHCI_HFLAG_* */
- u32 mask_port_map; /* mask out particular bits */
+ u32 mask_port_map; /* Mask of valid ports */
void __iomem * mmio; /* bus-independent mem map */
u32 cap; /* cap to use */
@@ -379,6 +379,17 @@ struct ahci_host_priv {
int port);
};
+/*
+ * Return true if a port should be ignored because it is excluded from
+ * the host port map.
+ */
+static inline bool ahci_ignore_port(struct ahci_host_priv *hpriv,
+ unsigned int portid)
+{
+ return portid >= hpriv->nports ||
+ !(hpriv->mask_port_map & (1 << portid));
+}
+
extern int ahci_ignore_sss;
extern const struct attribute_group *ahci_shost_groups[];
@@ -396,8 +407,8 @@ extern const struct attribute_group *ahci_sdev_groups[];
.shost_groups = ahci_shost_groups, \
.sdev_groups = ahci_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth, \
- .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
- .device_configure = ata_scsi_device_configure
+ .tag_alloc_policy_rr = true, \
+ .sdev_configure = ata_scsi_sdev_configure
extern struct ata_port_operations ahci_ops;
extern struct ata_port_operations ahci_platform_ops;
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 2f16524c2526..29be74fedcf0 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -288,6 +288,9 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev,
/* Re-initialize and calibrate the PHY */
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_phys;
@@ -571,7 +574,7 @@ static SIMPLE_DEV_PM_OPS(ahci_brcm_pm_ops, brcm_ahci_suspend, brcm_ahci_resume);
static struct platform_driver brcm_ahci_driver = {
.probe = brcm_ahci_probe,
- .remove_new = brcm_ahci_remove,
+ .remove = brcm_ahci_remove,
.shutdown = brcm_ahci_shutdown,
.driver = {
.name = DRV_NAME,
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index 11a2c199a7c2..2d6a08c23d6a 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -206,6 +206,9 @@ static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
goto disable_clks;
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_rsts;
@@ -215,6 +218,9 @@ static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
ahci_platform_deassert_rsts(hpriv);
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_power_on(hpriv->phys[i]);
if (rc) {
phy_exit(hpriv->phys[i]);
@@ -402,7 +408,7 @@ MODULE_DEVICE_TABLE(of, ceva_ahci_of_match);
static struct platform_driver ceva_ahci_driver = {
.probe = ceva_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ceva_ahci_of_match,
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index 55a6627d5450..ca0924dc5bd2 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -238,7 +238,7 @@ MODULE_DEVICE_TABLE(of, ahci_da850_of_match);
static struct platform_driver ahci_da850_driver = {
.probe = ahci_da850_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_da850_of_match,
diff --git a/drivers/ata/ahci_dm816.c b/drivers/ata/ahci_dm816.c
index 4cb70064fb99..b08547b877a1 100644
--- a/drivers/ata/ahci_dm816.c
+++ b/drivers/ata/ahci_dm816.c
@@ -182,7 +182,7 @@ MODULE_DEVICE_TABLE(of, ahci_dm816_of_match);
static struct platform_driver ahci_dm816_driver = {
.probe = ahci_dm816_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = AHCI_DM816_DRV_NAME,
.of_match_table = ahci_dm816_of_match,
diff --git a/drivers/ata/ahci_dwc.c b/drivers/ata/ahci_dwc.c
index ed263de3fd70..aec6d793f51a 100644
--- a/drivers/ata/ahci_dwc.c
+++ b/drivers/ata/ahci_dwc.c
@@ -478,7 +478,7 @@ MODULE_DEVICE_TABLE(of, ahci_dwc_of_match);
static struct platform_driver ahci_dwc_driver = {
.probe = ahci_dwc_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.shutdown = ahci_platform_shutdown,
.driver = {
.name = DRV_NAME,
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 6f955e9105e8..f01f08048f97 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -511,7 +511,7 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
if (imxpriv->type == AHCI_IMX6Q || imxpriv->type == AHCI_IMX6QP) {
/*
- * set PHY Paremeters, two steps to configure the GPR13,
+ * set PHY Parameters, two steps to configure the GPR13,
* one write for rest of parameters, mask of first write
* is 0x07ffffff, and the other one write for setting
* the mpll_clk_en.
@@ -1027,7 +1027,7 @@ static SIMPLE_DEV_PM_OPS(ahci_imx_pm_ops, imx_ahci_suspend, imx_ahci_resume);
static struct platform_driver imx_ahci_driver = {
.probe = imx_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = imx_ahci_of_match,
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
index adc851cd5578..7295b9066ae2 100644
--- a/drivers/ata/ahci_mtk.c
+++ b/drivers/ata/ahci_mtk.c
@@ -174,7 +174,7 @@ MODULE_DEVICE_TABLE(of, ahci_of_match);
static struct platform_driver mtk_ahci_driver = {
.probe = mtk_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_of_match,
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index f3187351e8a6..8744dae41612 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -245,7 +245,7 @@ MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match);
static struct platform_driver ahci_mvebu_driver = {
.probe = ahci_mvebu_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.suspend = ahci_mvebu_suspend,
.resume = ahci_mvebu_resume,
.driver = {
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 81fc63f6b008..c18054333f7c 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -96,7 +96,7 @@ MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
static struct platform_driver ahci_driver = {
.probe = ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.shutdown = ahci_platform_shutdown,
.driver = {
.name = DRV_NAME,
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index b1a4e57578e2..30e39885b64e 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -357,7 +357,7 @@ static SIMPLE_DEV_PM_OPS(ahci_qoriq_pm_ops, ahci_platform_suspend,
static struct platform_driver ahci_qoriq_driver = {
.probe = ahci_qoriq_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_qoriq_of_match,
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
index 59f97aa7ac75..3f16c1678402 100644
--- a/drivers/ata/ahci_seattle.c
+++ b/drivers/ata/ahci_seattle.c
@@ -185,7 +185,7 @@ MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
static struct platform_driver ahci_seattle_driver = {
.probe = ahci_seattle_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.acpi_match_table = ahci_acpi_match,
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 79a8b0aa37bf..4336c8a6e208 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -176,7 +176,6 @@ static int st_ahci_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int st_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
@@ -221,9 +220,8 @@ static int st_ahci_resume(struct device *dev)
return ahci_platform_resume_host(dev);
}
-#endif
-static SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
static const struct of_device_id st_ahci_match[] = {
{ .compatible = "st,ahci", },
@@ -234,11 +232,11 @@ MODULE_DEVICE_TABLE(of, st_ahci_match);
static struct platform_driver st_ahci_driver = {
.driver = {
.name = DRV_NAME,
- .pm = &st_ahci_pm_ops,
+ .pm = pm_sleep_ptr(&st_ahci_pm_ops),
.of_match_table = st_ahci_match,
},
.probe = st_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
};
module_platform_driver(st_ahci_driver);
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index 58b2683954dd..5d4584570ae0 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -292,7 +292,7 @@ MODULE_DEVICE_TABLE(of, ahci_sunxi_of_match);
static struct platform_driver ahci_sunxi_driver = {
.probe = ahci_sunxi_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_sunxi_of_match,
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c
index 8703c2a4658b..44584eed6374 100644
--- a/drivers/ata/ahci_tegra.c
+++ b/drivers/ata/ahci_tegra.c
@@ -608,7 +608,7 @@ deinit_controller:
static struct platform_driver tegra_ahci_driver = {
.probe = tegra_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = tegra_ahci_of_match,
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 81a1d838c0fc..dfbd8c53abcb 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -534,7 +534,7 @@ softreset_retry:
/**
* xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
- * @host: Host that recieved the irq
+ * @host: Host that received the irq
* @irq_masked: HOST_IRQ_STAT value
*
* For hardware with broken edge trigger latch
@@ -859,7 +859,7 @@ disable_resources:
static struct platform_driver xgene_ahci_driver = {
.probe = xgene_ahci_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
.of_match_table = xgene_ahci_of_match,
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 2f57ec00ab82..e70b6c089cf1 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -209,7 +209,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, (void *)id, 0);
}
-static struct pci_device_id ata_generic[] = {
+static const struct pci_device_id ata_generic[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), },
{ PCI_DEVICE(PCI_VENDOR_ID_HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), },
{ PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8673F), },
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 093b940bc953..d441246fa357 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1725,7 +1725,7 @@ static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* message-signalled interrupts currently).
*/
if (port_flags & PIIX_FLAG_CHECKINTR)
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
if (piix_check_450nx_errata(pdev)) {
/* This writes into the master table but it does not
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 7a8064520a35..53b2c7719dc5 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -49,6 +49,9 @@ int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
int rc, i;
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
rc = phy_init(hpriv->phys[i]);
if (rc)
goto disable_phys;
@@ -70,6 +73,9 @@ int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
disable_phys:
while (--i >= 0) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
phy_power_off(hpriv->phys[i]);
phy_exit(hpriv->phys[i]);
}
@@ -88,6 +94,9 @@ void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
int i;
for (i = 0; i < hpriv->nports; i++) {
+ if (ahci_ignore_port(hpriv, i))
+ continue;
+
phy_power_off(hpriv->phys[i]);
phy_exit(hpriv->phys[i]);
}
@@ -432,6 +441,20 @@ static int ahci_platform_get_firmware(struct ahci_host_priv *hpriv,
return 0;
}
+static u32 ahci_platform_find_max_port_id(struct device *dev)
+{
+ u32 max_port = 0;
+
+ for_each_child_of_node_scoped(dev->of_node, child) {
+ u32 port;
+
+ if (!of_property_read_u32(child, "reg", &port))
+ max_port = max(max_port, port);
+ }
+
+ return max_port;
+}
+
/**
* ahci_platform_get_resources - Get platform resources
* @pdev: platform device to get resources for
@@ -458,6 +481,7 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
u32 mask_port_map = 0;
+ u32 max_port;
if (!devres_open_group(dev, NULL, GFP_KERNEL))
return ERR_PTR(-ENOMEM);
@@ -549,15 +573,17 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
goto err_out;
}
+ /* find maximum port id for allocating structures */
+ max_port = ahci_platform_find_max_port_id(dev);
/*
- * If no sub-node was found, we still need to set nports to
- * one in order to be able to use the
+ * Set nports according to maximum port id. Clamp at
+ * AHCI_MAX_PORTS, warning message for invalid port id
+ * is generated later.
+ * When DT has no sub-nodes max_port is 0, nports is 1,
+ * in order to be able to use the
* ahci_platform_[en|dis]able_[phys|regulators] functions.
*/
- if (child_nodes)
- hpriv->nports = child_nodes;
- else
- hpriv->nports = 1;
+ hpriv->nports = min(AHCI_MAX_PORTS, max_port + 1);
hpriv->phys = devm_kcalloc(dev, hpriv->nports, sizeof(*hpriv->phys), GFP_KERNEL);
if (!hpriv->phys) {
@@ -625,6 +651,8 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
* If no sub-node was found, keep this for device tree
* compatibility
*/
+ hpriv->mask_port_map |= BIT(0);
+
rc = ahci_platform_get_phy(hpriv, 0, dev, dev->of_node);
if (rc)
goto err_out;
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index d36e71f475ab..b7f0bf795521 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -86,7 +86,7 @@ static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev)
* @dev: ATA device ACPI event occurred (can be NULL)
* @event: ACPI event which occurred
*
- * All ACPI bay / device realted events end up in this function. If
+ * All ACPI bay / device related events end up in this function. If
* the event is port-wide @dev is NULL. If the event is specific to a
* device, @dev points to it.
*
@@ -832,7 +832,7 @@ void ata_acpi_on_resume(struct ata_port *ap)
dev->flags |= ATA_DFLAG_ACPI_PENDING;
}
} else {
- /* SATA _GTF needs to be evaulated after _SDD and
+ /* SATA _GTF needs to be evaluated after _SDD and
* there's no reason to evaluate IDE _GTF early
* without _STM. Clear cache and schedule _GTF.
*/
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c085dd81ebe7..63ec2f218431 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4143,6 +4143,10 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
{ "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
ATA_QUIRK_NO_NCQ_ON_ATI },
+ { "Samsung SSD 870 QVO*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
+ ATA_QUIRK_ZERO_AFTER_TRIM |
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NOLPM },
{ "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
ATA_QUIRK_NO_NCQ_ON_ATI },
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 3f0144e7dc80..3b303d4ae37a 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -651,6 +651,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
/* the scmd has an associated qc */
if (!(qc->flags & ATA_QCFLAG_EH)) {
/* which hasn't failed yet, timeout */
+ set_host_byte(scmd, DID_TIME_OUT);
qc->err_mask |= AC_ERR_TIMEOUT;
qc->flags |= ATA_QCFLAG_EH;
nr_timedout++;
@@ -4099,10 +4100,20 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
- /* Set all devices attached to the port in standby mode */
- ata_for_each_link(link, ap, HOST_FIRST) {
- ata_for_each_dev(dev, link, ENABLED)
- ata_dev_power_set_standby(dev);
+ /*
+ * We will reach this point for all of the PM events:
+ * PM_EVENT_SUSPEND (if runtime pm, PM_EVENT_AUTO will also be set)
+ * PM_EVENT_FREEZE, and PM_EVENT_HIBERNATE.
+ *
+ * We do not want to perform disk spin down for PM_EVENT_FREEZE.
+ * (Spin down will be performed by the subsequent PM_EVENT_HIBERNATE.)
+ */
+ if (!(ap->pm_mesg.event & PM_EVENT_FREEZE)) {
+ /* Set all devices attached to the port in standby mode */
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ ata_for_each_dev(dev, link, ENABLED)
+ ata_dev_power_set_standby(dev);
+ }
}
/*
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 9c76fb1ad2ec..ba300cc0a3a3 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -1313,7 +1313,7 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
/**
- * ata_sas_device_configure - Default device_configure routine for libata
+ * ata_sas_sdev_configure - Default sdev_configure routine for libata
* devices
* @sdev: SCSI device to configure
* @lim: queue limits
@@ -1323,14 +1323,14 @@ EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
* Zero.
*/
-int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim,
- struct ata_port *ap)
+int ata_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_port *ap)
{
ata_scsi_sdev_config(sdev);
return ata_scsi_dev_config(sdev, lim, ap->link.device);
}
-EXPORT_SYMBOL_GPL(ata_sas_device_configure);
+EXPORT_SYMBOL_GPL(ata_sas_sdev_configure);
/**
* ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index f915e3df57a9..2796c0da8257 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1133,7 +1133,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
}
/**
- * ata_scsi_slave_alloc - Early setup of SCSI device
+ * ata_scsi_sdev_init - Early setup of SCSI device
* @sdev: SCSI device to examine
*
* This is called from scsi_alloc_sdev() when the scsi device
@@ -1143,7 +1143,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
* Defined by SCSI layer. We don't really care.
*/
-int ata_scsi_slave_alloc(struct scsi_device *sdev)
+int ata_scsi_sdev_init(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct device_link *link;
@@ -1166,10 +1166,10 @@ int ata_scsi_slave_alloc(struct scsi_device *sdev)
return 0;
}
-EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
+EXPORT_SYMBOL_GPL(ata_scsi_sdev_init);
/**
- * ata_scsi_device_configure - Set SCSI device attributes
+ * ata_scsi_sdev_configure - Set SCSI device attributes
* @sdev: SCSI device to examine
* @lim: queue limits
*
@@ -1181,8 +1181,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
* Defined by SCSI layer. We don't really care.
*/
-int ata_scsi_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+int ata_scsi_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
@@ -1192,10 +1191,10 @@ int ata_scsi_device_configure(struct scsi_device *sdev,
return 0;
}
-EXPORT_SYMBOL_GPL(ata_scsi_device_configure);
+EXPORT_SYMBOL_GPL(ata_scsi_sdev_configure);
/**
- * ata_scsi_slave_destroy - SCSI device is about to be destroyed
+ * ata_scsi_sdev_destroy - SCSI device is about to be destroyed
* @sdev: SCSI device to be destroyed
*
* @sdev is about to be destroyed for hot/warm unplugging. If
@@ -1208,7 +1207,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_device_configure);
* LOCKING:
* Defined by SCSI layer. We don't really care.
*/
-void ata_scsi_slave_destroy(struct scsi_device *sdev)
+void ata_scsi_sdev_destroy(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
unsigned long flags;
@@ -1228,7 +1227,7 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
kfree(sdev->dma_drain_buf);
}
-EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
+EXPORT_SYMBOL_GPL(ata_scsi_sdev_destroy);
/**
* ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
@@ -1334,17 +1333,8 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc)
*/
static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
{
- u64 lba = 0;
- u32 len;
-
- lba |= ((u64)(cdb[1] & 0x1f)) << 16;
- lba |= ((u64)cdb[2]) << 8;
- lba |= ((u64)cdb[3]);
-
- len = cdb[4];
-
- *plba = lba;
- *plen = len;
+ *plba = get_unaligned_be24(&cdb[1]) & 0x1fffff;
+ *plen = cdb[4];
}
/**
@@ -1781,15 +1771,10 @@ defer:
return SCSI_MLQUEUE_HOST_BUSY;
}
-struct ata_scsi_args {
- struct ata_device *dev;
- u16 *id;
- struct scsi_cmnd *cmd;
-};
-
/**
* ata_scsi_rbuf_fill - wrapper for SCSI command simulators
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @actor: Callback hook for desired SCSI command simulator
*
* Takes care of the hard work of simulating a SCSI command...
@@ -1802,30 +1787,32 @@ struct ata_scsi_args {
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
- unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf))
+static void ata_scsi_rbuf_fill(struct ata_device *dev, struct scsi_cmnd *cmd,
+ unsigned int (*actor)(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf))
{
- unsigned int rc;
- struct scsi_cmnd *cmd = args->cmd;
unsigned long flags;
+ unsigned int len;
spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
- rc = actor(args, ata_scsi_rbuf);
- if (rc == 0)
+ len = actor(dev, cmd, ata_scsi_rbuf);
+ if (len) {
sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
+ cmd->result = SAM_STAT_GOOD;
+ if (scsi_bufflen(cmd) > len)
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - len);
+ }
spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
-
- if (rc == 0)
- cmd->result = SAM_STAT_GOOD;
}
/**
- * ata_scsiop_inq_std - Simulate INQUIRY command
- * @args: device IDENTIFY data / SCSI command of interest.
+ * ata_scsiop_inq_std - Simulate standard INQUIRY command
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Returns standard device identification data associated
@@ -1834,7 +1821,8 @@ static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_std(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
static const u8 versions[] = {
0x00,
@@ -1875,40 +1863,45 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
* Set the SCSI Removable Media Bit (RMB) if the ATA removable media
* device bit (obsolete since ATA-8 ACS) is set.
*/
- if (ata_id_removable(args->id))
+ if (ata_id_removable(dev->id))
hdr[1] |= (1 << 7);
- if (args->dev->class == ATA_DEV_ZAC) {
+ if (dev->class == ATA_DEV_ZAC) {
hdr[0] = TYPE_ZBC;
hdr[2] = 0x7; /* claim SPC-5 version compatibility */
}
- if (args->dev->flags & ATA_DFLAG_CDL)
+ if (dev->flags & ATA_DFLAG_CDL)
hdr[2] = 0xd; /* claim SPC-6 version compatibility */
memcpy(rbuf, hdr, sizeof(hdr));
memcpy(&rbuf[8], "ATA ", 8);
- ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
+ ata_id_string(dev->id, &rbuf[16], ATA_ID_PROD, 16);
/* From SAT, use last 2 words from fw rev unless they are spaces */
- ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV + 2, 4);
+ ata_id_string(dev->id, &rbuf[32], ATA_ID_FW_REV + 2, 4);
if (strncmp(&rbuf[32], " ", 4) == 0)
- ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
+ ata_id_string(dev->id, &rbuf[32], ATA_ID_FW_REV, 4);
if (rbuf[32] == 0 || rbuf[32] == ' ')
memcpy(&rbuf[32], "n/a ", 4);
- if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC)
+ if (ata_id_zoned_cap(dev->id) || dev->class == ATA_DEV_ZAC)
memcpy(rbuf + 58, versions_zbc, sizeof(versions_zbc));
else
memcpy(rbuf + 58, versions, sizeof(versions));
- return 0;
+ /*
+ * Include all 8 possible version descriptors, even if not all of
+ * them are popoulated.
+ */
+ return 96;
}
/**
* ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Returns list of inquiry VPD pages available.
@@ -1916,7 +1909,8 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_00(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
int i, num_pages = 0;
static const u8 pages[] = {
@@ -1933,18 +1927,20 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
for (i = 0; i < sizeof(pages); i++) {
if (pages[i] == 0xb6 &&
- !(args->dev->flags & ATA_DFLAG_ZAC))
+ !(dev->flags & ATA_DFLAG_ZAC))
continue;
rbuf[num_pages + 4] = pages[i];
num_pages++;
}
rbuf[3] = num_pages; /* number of supported VPD pages */
- return 0;
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
* ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Returns ATA device serial number.
@@ -1952,7 +1948,8 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_80(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
static const u8 hdr[] = {
0,
@@ -1962,14 +1959,16 @@ static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
};
memcpy(rbuf, hdr, sizeof(hdr));
- ata_id_string(args->id, (unsigned char *) &rbuf[4],
+ ata_id_string(dev->id, (unsigned char *) &rbuf[4],
ATA_ID_SERNO, ATA_ID_SERNO_LEN);
- return 0;
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
* ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Yields two logical unit device identification designators:
@@ -1980,7 +1979,8 @@ static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_83(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
const int sat_model_serial_desc_len = 68;
int num;
@@ -1992,7 +1992,7 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
rbuf[num + 0] = 2;
rbuf[num + 3] = ATA_ID_SERNO_LEN;
num += 4;
- ata_id_string(args->id, (unsigned char *) rbuf + num,
+ ata_id_string(dev->id, (unsigned char *) rbuf + num,
ATA_ID_SERNO, ATA_ID_SERNO_LEN);
num += ATA_ID_SERNO_LEN;
@@ -2004,31 +2004,33 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
num += 4;
memcpy(rbuf + num, "ATA ", 8);
num += 8;
- ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
+ ata_id_string(dev->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
ATA_ID_PROD_LEN);
num += ATA_ID_PROD_LEN;
- ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
+ ata_id_string(dev->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
ATA_ID_SERNO_LEN);
num += ATA_ID_SERNO_LEN;
- if (ata_id_has_wwn(args->id)) {
+ if (ata_id_has_wwn(dev->id)) {
/* SAT defined lu world wide name */
/* piv=0, assoc=lu, code_set=binary, designator=NAA */
rbuf[num + 0] = 1;
rbuf[num + 1] = 3;
rbuf[num + 3] = ATA_ID_WWN_LEN;
num += 4;
- ata_id_string(args->id, (unsigned char *) rbuf + num,
+ ata_id_string(dev->id, (unsigned char *) rbuf + num,
ATA_ID_WWN, ATA_ID_WWN_LEN);
num += ATA_ID_WWN_LEN;
}
rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
- return 0;
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
/**
* ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Yields SAT-specified ATA VPD page.
@@ -2036,7 +2038,8 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_inq_89(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
rbuf[1] = 0x89; /* our page code */
rbuf[2] = (0x238 >> 8); /* page size fixed at 238h */
@@ -2057,13 +2060,25 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
rbuf[56] = ATA_CMD_ID_ATA;
- memcpy(&rbuf[60], &args->id[0], 512);
- return 0;
+ memcpy(&rbuf[60], &dev->id[0], 512);
+
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b0 - Simulate INQUIRY VPD page B0, Block Limits
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B0h (Block Limits).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b0(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_device *dev = args->dev;
u16 min_io_sectors;
rbuf[1] = 0xb0;
@@ -2076,7 +2091,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
* logical than physical sector size we need to figure out what the
* latter is.
*/
- min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id);
+ min_io_sectors = 1 << ata_id_log2_per_physical_sector(dev->id);
put_unaligned_be16(min_io_sectors, &rbuf[6]);
/*
@@ -2088,7 +2103,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
* that we support some form of unmap - in thise case via WRITE SAME
* with the unmap bit set.
*/
- if (ata_id_has_trim(args->id)) {
+ if (ata_id_has_trim(dev->id)) {
u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM;
if (dev->quirks & ATA_QUIRK_MAX_TRIM_128M)
@@ -2098,14 +2113,27 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
put_unaligned_be32(1, &rbuf[28]);
}
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b1 - Simulate INQUIRY VPD page B1, Block Device
+ * Characteristics
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B1h (Block Device Characteristics).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b1(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- int form_factor = ata_id_form_factor(args->id);
- int media_rotation_rate = ata_id_rotation_rate(args->id);
- u8 zoned = ata_id_zoned_cap(args->id);
+ int form_factor = ata_id_form_factor(dev->id);
+ int media_rotation_rate = ata_id_rotation_rate(dev->id);
+ u8 zoned = ata_id_zoned_cap(dev->id);
rbuf[1] = 0xb1;
rbuf[3] = 0x3c;
@@ -2115,21 +2143,52 @@ static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
if (zoned)
rbuf[8] = (zoned << 4);
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b2 - Simulate INQUIRY VPD page B2, Logical Block
+ * Provisioning
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B2h (Logical Block Provisioning).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b2(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
/* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */
rbuf[1] = 0xb2;
rbuf[3] = 0x4;
rbuf[5] = 1 << 6; /* TPWS */
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b6 - Simulate INQUIRY VPD page B6, Zoned Block Device
+ * Characteristics
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B2h (Zoned Block Device Characteristics).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b6(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
+ if (!(dev->flags & ATA_DFLAG_ZAC)) {
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ return 0;
+ }
+
/*
* zbc-r05 SCSI Zoned Block device characteristics VPD page
*/
@@ -2139,21 +2198,39 @@ static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
/*
* URSWRZ bit is only meaningful for host-managed ZAC drives
*/
- if (args->dev->zac_zoned_cap & 1)
+ if (dev->zac_zoned_cap & 1)
rbuf[4] |= 1;
- put_unaligned_be32(args->dev->zac_zones_optimal_open, &rbuf[8]);
- put_unaligned_be32(args->dev->zac_zones_optimal_nonseq, &rbuf[12]);
- put_unaligned_be32(args->dev->zac_zones_max_open, &rbuf[16]);
+ put_unaligned_be32(dev->zac_zones_optimal_open, &rbuf[8]);
+ put_unaligned_be32(dev->zac_zones_optimal_nonseq, &rbuf[12]);
+ put_unaligned_be32(dev->zac_zones_max_open, &rbuf[16]);
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
}
-static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
+/**
+ * ata_scsiop_inq_b9 - Simulate INQUIRY VPD page B9, Concurrent Positioning
+ * Ranges
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Return data for the VPD page B9h (Concurrent Positioning Ranges).
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inq_b9(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_cpr_log *cpr_log = args->dev->cpr_log;
+ struct ata_cpr_log *cpr_log = dev->cpr_log;
u8 *desc = &rbuf[64];
int i;
+ if (!cpr_log) {
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ return 0;
+ }
+
/* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */
rbuf[1] = 0xb9;
put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]);
@@ -2165,7 +2242,58 @@ static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]);
}
- return 0;
+ return get_unaligned_be16(&rbuf[2]) + 4;
+}
+
+/**
+ * ata_scsiop_inquiry - Simulate INQUIRY command
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Returns data associated with an INQUIRY command output.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_inquiry(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
+{
+ const u8 *scsicmd = cmd->cmnd;
+
+ /* is CmdDt set? */
+ if (scsicmd[1] & 2) {
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 0;
+ }
+
+ /* Is EVPD clear? */
+ if ((scsicmd[1] & 1) == 0)
+ return ata_scsiop_inq_std(dev, cmd, rbuf);
+
+ switch (scsicmd[2]) {
+ case 0x00:
+ return ata_scsiop_inq_00(dev, cmd, rbuf);
+ case 0x80:
+ return ata_scsiop_inq_80(dev, cmd, rbuf);
+ case 0x83:
+ return ata_scsiop_inq_83(dev, cmd, rbuf);
+ case 0x89:
+ return ata_scsiop_inq_89(dev, cmd, rbuf);
+ case 0xb0:
+ return ata_scsiop_inq_b0(dev, cmd, rbuf);
+ case 0xb1:
+ return ata_scsiop_inq_b1(dev, cmd, rbuf);
+ case 0xb2:
+ return ata_scsiop_inq_b2(dev, cmd, rbuf);
+ case 0xb6:
+ return ata_scsiop_inq_b6(dev, cmd, rbuf);
+ case 0xb9:
+ return ata_scsiop_inq_b9(dev, cmd, rbuf);
+ default:
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ return 0;
+ }
}
/**
@@ -2388,7 +2516,8 @@ static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable)
/**
* ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Simulate MODE SENSE commands. Assume this is invoked for direct
@@ -2398,10 +2527,10 @@ static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_mode_sense(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_device *dev = args->dev;
- u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
+ u8 *scsicmd = cmd->cmnd, *p = rbuf;
static const u8 sat_blk_desc[] = {
0, 0, 0, 0, /* number of blocks: sat unspecified */
0,
@@ -2466,17 +2595,17 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
break;
case CACHE_MPAGE:
- p += ata_msense_caching(args->id, p, page_control == 1);
+ p += ata_msense_caching(dev->id, p, page_control == 1);
break;
case CONTROL_MPAGE:
- p += ata_msense_control(args->dev, p, spg, page_control == 1);
+ p += ata_msense_control(dev, p, spg, page_control == 1);
break;
case ALL_MPAGES:
p += ata_msense_rw_recovery(p, page_control == 1);
- p += ata_msense_caching(args->id, p, page_control == 1);
- p += ata_msense_control(args->dev, p, spg, page_control == 1);
+ p += ata_msense_caching(dev->id, p, page_control == 1);
+ p += ata_msense_control(dev, p, spg, page_control == 1);
break;
default: /* invalid page code */
@@ -2494,29 +2623,33 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
rbuf[3] = sizeof(sat_blk_desc);
memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc));
}
- } else {
- put_unaligned_be16(p - rbuf - 2, &rbuf[0]);
- rbuf[3] |= dpofua;
- if (ebd) {
- rbuf[7] = sizeof(sat_blk_desc);
- memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
- }
+
+ return rbuf[0] + 1;
}
- return 0;
+
+ put_unaligned_be16(p - rbuf - 2, &rbuf[0]);
+ rbuf[3] |= dpofua;
+ if (ebd) {
+ rbuf[7] = sizeof(sat_blk_desc);
+ memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
+ }
+
+ return get_unaligned_be16(&rbuf[0]) + 2;
invalid_fld:
- ata_scsi_set_invalid_field(dev, args->cmd, fp, bp);
- return 1;
+ ata_scsi_set_invalid_field(dev, cmd, fp, bp);
+ return 0;
saving_not_supp:
- ata_scsi_set_sense(dev, args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
+ ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x39, 0x0);
/* "Saving parameters not supported" */
- return 1;
+ return 0;
}
/**
* ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Simulate READ CAPACITY commands.
@@ -2524,9 +2657,10 @@ saving_not_supp:
* LOCKING:
* None.
*/
-static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_read_cap(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_device *dev = args->dev;
+ u8 *scsicmd = cmd->cmnd;
u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */
u32 sector_size; /* physical sector size in bytes */
u8 log2_per_phys;
@@ -2536,7 +2670,7 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
log2_per_phys = ata_id_log2_per_physical_sector(dev->id);
lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys);
- if (args->cmd->cmnd[0] == READ_CAPACITY) {
+ if (scsicmd[0] == READ_CAPACITY) {
if (last_lba >= 0xffffffffULL)
last_lba = 0xffffffff;
@@ -2551,48 +2685,59 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
rbuf[5] = sector_size >> (8 * 2);
rbuf[6] = sector_size >> (8 * 1);
rbuf[7] = sector_size;
- } else {
- /* sector count, 64-bit */
- rbuf[0] = last_lba >> (8 * 7);
- rbuf[1] = last_lba >> (8 * 6);
- rbuf[2] = last_lba >> (8 * 5);
- rbuf[3] = last_lba >> (8 * 4);
- rbuf[4] = last_lba >> (8 * 3);
- rbuf[5] = last_lba >> (8 * 2);
- rbuf[6] = last_lba >> (8 * 1);
- rbuf[7] = last_lba;
- /* sector size */
- rbuf[ 8] = sector_size >> (8 * 3);
- rbuf[ 9] = sector_size >> (8 * 2);
- rbuf[10] = sector_size >> (8 * 1);
- rbuf[11] = sector_size;
-
- rbuf[12] = 0;
- rbuf[13] = log2_per_phys;
- rbuf[14] = (lowest_aligned >> 8) & 0x3f;
- rbuf[15] = lowest_aligned;
-
- if (ata_id_has_trim(args->id) &&
- !(dev->quirks & ATA_QUIRK_NOTRIM)) {
- rbuf[14] |= 0x80; /* LBPME */
-
- if (ata_id_has_zero_after_trim(args->id) &&
- dev->quirks & ATA_QUIRK_ZERO_AFTER_TRIM) {
- ata_dev_info(dev, "Enabling discard_zeroes_data\n");
- rbuf[14] |= 0x40; /* LBPRZ */
- }
+ return 8;
+ }
+
+ /*
+ * READ CAPACITY 16 command is defined as a service action
+ * (SERVICE_ACTION_IN_16 command).
+ */
+ if (scsicmd[0] != SERVICE_ACTION_IN_16 ||
+ (scsicmd[1] & 0x1f) != SAI_READ_CAPACITY_16) {
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 0;
+ }
+
+ /* sector count, 64-bit */
+ rbuf[0] = last_lba >> (8 * 7);
+ rbuf[1] = last_lba >> (8 * 6);
+ rbuf[2] = last_lba >> (8 * 5);
+ rbuf[3] = last_lba >> (8 * 4);
+ rbuf[4] = last_lba >> (8 * 3);
+ rbuf[5] = last_lba >> (8 * 2);
+ rbuf[6] = last_lba >> (8 * 1);
+ rbuf[7] = last_lba;
+
+ /* sector size */
+ rbuf[ 8] = sector_size >> (8 * 3);
+ rbuf[ 9] = sector_size >> (8 * 2);
+ rbuf[10] = sector_size >> (8 * 1);
+ rbuf[11] = sector_size;
+
+ if (ata_id_zoned_cap(dev->id) || dev->class == ATA_DEV_ZAC)
+ rbuf[12] = (1 << 4); /* RC_BASIS */
+ rbuf[13] = log2_per_phys;
+ rbuf[14] = (lowest_aligned >> 8) & 0x3f;
+ rbuf[15] = lowest_aligned;
+
+ if (ata_id_has_trim(dev->id) && !(dev->quirks & ATA_QUIRK_NOTRIM)) {
+ rbuf[14] |= 0x80; /* LBPME */
+
+ if (ata_id_has_zero_after_trim(dev->id) &&
+ dev->quirks & ATA_QUIRK_ZERO_AFTER_TRIM) {
+ ata_dev_info(dev, "Enabling discard_zeroes_data\n");
+ rbuf[14] |= 0x40; /* LBPRZ */
}
- if (ata_id_zoned_cap(args->id) ||
- args->dev->class == ATA_DEV_ZAC)
- rbuf[12] = (1 << 4); /* RC_BASIS */
}
- return 0;
+
+ return 16;
}
/**
* ata_scsiop_report_luns - Simulate REPORT LUNS command
- * @args: device IDENTIFY data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Simulate REPORT LUNS command.
@@ -2600,11 +2745,12 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_report_luns(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
- return 0;
+ return 16;
}
/*
@@ -3312,7 +3458,8 @@ invalid_opcode:
/**
* ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
- * @args: device MAINTENANCE_IN data / SCSI command of interest.
+ * @dev: Target device.
+ * @cmd: SCSI command of interest.
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
*
* Yields a subset to satisfy scsi_report_opcode()
@@ -3320,17 +3467,21 @@ invalid_opcode:
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
+static unsigned int ata_scsiop_maint_in(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 *rbuf)
{
- struct ata_device *dev = args->dev;
- u8 *cdb = args->cmd->cmnd;
+ u8 *cdb = cmd->cmnd;
u8 supported = 0, cdlp = 0, rwcdlp = 0;
- unsigned int err = 0;
+
+ if ((cdb[1] & 0x1f) != MI_REPORT_SUPPORTED_OPERATION_CODES) {
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 0;
+ }
if (cdb[2] != 1 && cdb[2] != 3) {
ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
- err = 2;
- goto out;
+ ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ return 0;
}
switch (cdb[3]) {
@@ -3398,11 +3549,12 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
default:
break;
}
-out:
+
/* One command format */
rbuf[0] = rwcdlp;
rbuf[1] = cdlp | supported;
- return err;
+
+ return 4;
}
/**
@@ -4262,78 +4414,26 @@ EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
{
- struct ata_scsi_args args;
const u8 *scsicmd = cmd->cmnd;
u8 tmp8;
- args.dev = dev;
- args.id = dev->id;
- args.cmd = cmd;
-
switch(scsicmd[0]) {
case INQUIRY:
- if (scsicmd[1] & 2) /* is CmdDt set? */
- ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
- else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
- else switch (scsicmd[2]) {
- case 0x00:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
- break;
- case 0x80:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
- break;
- case 0x83:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
- break;
- case 0x89:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
- break;
- case 0xb0:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b0);
- break;
- case 0xb1:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
- break;
- case 0xb2:
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
- break;
- case 0xb6:
- if (dev->flags & ATA_DFLAG_ZAC)
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6);
- else
- ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
- break;
- case 0xb9:
- if (dev->cpr_log)
- ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b9);
- else
- ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
- break;
- default:
- ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
- break;
- }
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_inquiry);
break;
case MODE_SENSE:
case MODE_SENSE_10:
- ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_mode_sense);
break;
case READ_CAPACITY:
- ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
- break;
-
case SERVICE_ACTION_IN_16:
- if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
- ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
- else
- ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_read_cap);
break;
case REPORT_LUNS:
- ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_report_luns);
break;
case REQUEST_SENSE:
@@ -4361,10 +4461,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
break;
case MAINTENANCE_IN:
- if ((scsicmd[1] & 0x1f) == MI_REPORT_SUPPORTED_OPERATION_CODES)
- ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
- else
- ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
+ ata_scsi_rbuf_fill(dev, cmd, ata_scsiop_maint_in);
break;
/* all other commands */
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 67f277e1c3bf..5a46c066abc3 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -601,7 +601,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct page *page;
- unsigned int offset;
+ unsigned int offset, count;
if (!qc->cursg) {
qc->curbytes = qc->nbytes;
@@ -617,25 +617,27 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
page = nth_page(page, (offset >> PAGE_SHIFT));
offset %= PAGE_SIZE;
- trace_ata_sff_pio_transfer_data(qc, offset, qc->sect_size);
+ /* don't overrun current sg */
+ count = min(qc->cursg->length - qc->cursg_ofs, qc->sect_size);
+
+ trace_ata_sff_pio_transfer_data(qc, offset, count);
/*
* Split the transfer when it splits a page boundary. Note that the
* split still has to be dword aligned like all ATA data transfers.
*/
WARN_ON_ONCE(offset % 4);
- if (offset + qc->sect_size > PAGE_SIZE) {
+ if (offset + count > PAGE_SIZE) {
unsigned int split_len = PAGE_SIZE - offset;
ata_pio_xfer(qc, page, offset, split_len);
- ata_pio_xfer(qc, nth_page(page, 1), 0,
- qc->sect_size - split_len);
+ ata_pio_xfer(qc, nth_page(page, 1), 0, count - split_len);
} else {
- ata_pio_xfer(qc, page, offset, qc->sect_size);
+ ata_pio_xfer(qc, page, offset, count);
}
- qc->curbytes += qc->sect_size;
- qc->cursg_ofs += qc->sect_size;
+ qc->curbytes += count;
+ qc->cursg_ofs += count;
if (qc->cursg_ofs == qc->cursg->length) {
qc->cursg = sg_next(qc->cursg);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index d0c6924d25b6..514d549286b5 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -964,7 +964,7 @@ MODULE_DEVICE_TABLE(of, arasan_cf_id_table);
static struct platform_driver arasan_cf_driver = {
.probe = arasan_cf_probe,
- .remove_new = arasan_cf_remove,
+ .remove = arasan_cf_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &arasan_cf_pm_ops,
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index aaef5924f636..308f86f9e2f0 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -525,7 +525,7 @@ static int atp867x_reinit_one(struct pci_dev *pdev)
}
#endif
-static struct pci_device_id atp867x_pci_tbl[] = {
+static const struct pci_device_id atp867x_pci_tbl[] = {
{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867A), 0 },
{ PCI_VDEVICE(ARTOP, PCI_DEVICE_ID_ARTOP_ATP867B), 0 },
{ },
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index f3f5b2b0ecc9..e8cda988feb5 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -1015,7 +1015,7 @@ static struct platform_driver ep93xx_pata_platform_driver = {
.of_match_table = ep93xx_pata_of_ids,
},
.probe = ep93xx_pata_probe,
- .remove_new = ep93xx_pata_remove,
+ .remove = ep93xx_pata_remove,
};
module_platform_driver(ep93xx_pata_platform_driver);
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
index 18ceefd176df..334c4eea41ec 100644
--- a/drivers/ata/pata_falcon.c
+++ b/drivers/ata/pata_falcon.c
@@ -225,8 +225,8 @@ static void pata_falcon_remove_one(struct platform_device *pdev)
static struct platform_driver pata_falcon_driver = {
.probe = pata_falcon_init_one,
- .remove_new = pata_falcon_remove_one,
- .driver = {
+ .remove = pata_falcon_remove_one,
+ .driver = {
.name = "atari-falcon-ide",
},
};
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
index 73a9a5109238..c3a8384c3e04 100644
--- a/drivers/ata/pata_ftide010.c
+++ b/drivers/ata/pata_ftide010.c
@@ -557,7 +557,7 @@ static struct platform_driver pata_ftide010_driver = {
.of_match_table = pata_ftide010_of_match,
},
.probe = pata_ftide010_probe,
- .remove_new = pata_ftide010_remove,
+ .remove = pata_ftide010_remove,
};
module_platform_driver(pata_ftide010_driver);
diff --git a/drivers/ata/pata_gayle.c b/drivers/ata/pata_gayle.c
index 94df60ac2307..8602c3889948 100644
--- a/drivers/ata/pata_gayle.c
+++ b/drivers/ata/pata_gayle.c
@@ -202,9 +202,9 @@ static void pata_gayle_remove_one(struct platform_device *pdev)
static struct platform_driver pata_gayle_driver = {
.probe = pata_gayle_init_one,
- .remove_new = pata_gayle_remove_one,
- .driver = {
- .name = "amiga-gayle-ide",
+ .remove = pata_gayle_remove_one,
+ .driver = {
+ .name = "amiga-gayle-ide",
},
};
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index d0aa8fc929b4..b37682b0578f 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -249,7 +249,7 @@ MODULE_DEVICE_TABLE(of, imx_pata_dt_ids);
static struct platform_driver pata_imx_driver = {
.probe = pata_imx_probe,
- .remove_new = pata_imx_remove,
+ .remove = pata_imx_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = imx_pata_dt_ids,
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index b7ac56103c8a..9cbe2132ce59 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -81,7 +81,7 @@ static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev)
int control = 0;
/*
- * See Intel Document 298600-004 for the timing programing rules
+ * See Intel Document 298600-004 for the timing programming rules
* for PIIX/ICH. The 8213 is a clone so very similar
*/
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 8a9ee828478f..80f6a91acf6f 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -298,7 +298,7 @@ static struct platform_driver ixp4xx_pata_platform_driver = {
.of_match_table = ixp4xx_pata_of_match,
},
.probe = ixp4xx_pata_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
};
module_platform_driver(ixp4xx_pata_platform_driver);
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index f2f36e55a1f4..fbf5f07ea357 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -812,8 +812,8 @@ static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
/* Hook the standard slave config to fixup some HW related alignment
* restrictions
*/
-static int pata_macio_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int pata_macio_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pata_macio_priv *priv = ap->private_data;
@@ -822,7 +822,7 @@ static int pata_macio_device_configure(struct scsi_device *sdev,
int rc;
/* First call original */
- rc = ata_scsi_device_configure(sdev, lim);
+ rc = ata_scsi_sdev_configure(sdev, lim);
if (rc)
return rc;
@@ -932,10 +932,10 @@ static const struct scsi_host_template pata_macio_sht = {
/* We may not need that strict one */
.dma_boundary = ATA_DMA_BOUNDARY,
.max_segment_size = PATA_MACIO_MAX_SEGMENT_SIZE,
- .device_configure = pata_macio_device_configure,
+ .sdev_configure = pata_macio_sdev_configure,
.sdev_groups = ata_common_sdev_groups,
.can_queue = ATA_DEF_QUEUE,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
static struct ata_port_operations pata_macio_ops = {
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 3f9258677915..210a63283f62 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -854,7 +854,7 @@ static const struct of_device_id mpc52xx_ata_of_match[] = {
static struct platform_driver mpc52xx_ata_of_platform_driver = {
.probe = mpc52xx_ata_probe,
- .remove_new = mpc52xx_ata_remove,
+ .remove = mpc52xx_ata_remove,
#ifdef CONFIG_PM_SLEEP
.suspend = mpc52xx_ata_suspend,
.resume = mpc52xx_ata_resume,
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 0bb9607e7348..dce24806a052 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -183,7 +183,7 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
reg_tim.s.ale = 0;
/* Not used */
reg_tim.s.page = 0;
- /* Time after IORDY to coninue to assert the data */
+ /* Time after IORDY to continue to assert the data */
reg_tim.s.wait = 0;
/* Time to wait to complete the cycle. */
reg_tim.s.pause = pause;
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 4956f0f5b93f..178b28eff170 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -89,7 +89,7 @@ static struct platform_driver pata_of_platform_driver = {
.of_match_table = pata_of_platform_match,
},
.probe = pata_of_platform_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
};
module_platform_driver(pata_of_platform_driver);
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index dca82d92b004..3d01b7000e41 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -70,7 +70,7 @@ static void oldpiix_set_piomode (struct ata_port *ap, struct ata_device *adev)
int control = 0;
/*
- * See Intel Document 298600-004 for the timing programing rules
+ * See Intel Document 298600-004 for the timing programming rules
* for PIIX/ICH. Note that the early PIIX does not have the slave
* timing port at 0x44.
*/
diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c
index ced906bf56be..beb53bd990be 100644
--- a/drivers/ata/pata_piccolo.c
+++ b/drivers/ata/pata_piccolo.c
@@ -97,7 +97,7 @@ static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id
return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0);
}
-static struct pci_device_id ata_tosh[] = {
+static const struct pci_device_id ata_tosh[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), },
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 232c3dad7ee8..87479bc893b2 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -223,7 +223,7 @@ static int pata_platform_probe(struct platform_device *pdev)
static struct platform_driver pata_platform_driver = {
.probe = pata_platform_probe,
- .remove_new = ata_platform_remove_one,
+ .remove = ata_platform_remove_one,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 538bd3423d85..434f380114af 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -306,7 +306,7 @@ static void pxa_ata_remove(struct platform_device *pdev)
static struct platform_driver pxa_ata_driver = {
.probe = pxa_ata_probe,
- .remove_new = pxa_ata_remove,
+ .remove = pxa_ata_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 84b001097093..40ef8072c159 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -45,7 +45,7 @@ static void radisys_set_piomode (struct ata_port *ap, struct ata_device *adev)
int control = 0;
/*
- * See Intel Document 298600-004 for the timing programing rules
+ * See Intel Document 298600-004 for the timing programming rules
* for PIIX/ICH. Note that the early PIIX does not have the slave
* timing port at 0x44. The Radisys is a relative of the PIIX
* but not the same so be careful.
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 0fa253ad7c93..fd81e75c9402 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -164,7 +164,7 @@ static void rb532_pata_driver_remove(struct platform_device *pdev)
static struct platform_driver rb532_pata_platform_driver = {
.probe = rb532_pata_driver_probe,
- .remove_new = rb532_pata_driver_remove,
+ .remove = rb532_pata_driver_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 0a9689862f71..09792aac7f9d 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -340,7 +340,7 @@ static int rdc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
host->private_data = hpriv;
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
host->flags |= ATA_HOST_PARALLEL_SCAN;
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 52f5168e4db5..6e1dd0d9c035 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1240,7 +1240,7 @@ static struct platform_driver sata_dwc_driver = {
.of_match_table = sata_dwc_match,
},
.probe = sata_dwc_probe,
- .remove_new = sata_dwc_remove,
+ .remove = sata_dwc_remove,
};
module_platform_driver(sata_dwc_driver);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 01aa05f4c3f5..87e91a937a44 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1589,7 +1589,7 @@ static struct platform_driver fsl_sata_driver = {
.of_match_table = fsl_sata_match,
},
.probe = sata_fsl_probe,
- .remove_new = sata_fsl_remove,
+ .remove = sata_fsl_remove,
#ifdef CONFIG_PM_SLEEP
.suspend = sata_fsl_suspend,
.resume = sata_fsl_resume,
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index f574e3c3f5b4..530ee26b3012 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -11,7 +11,6 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/delay.h>
-#include <linux/reset.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -27,8 +26,6 @@
* @muxmode: the current muxing mode
* @ide_pins: if the device is using the plain IDE interface pins
* @sata_bridge: if the device enables the SATA bridge
- * @sata0_reset: SATA0 reset handler
- * @sata1_reset: SATA1 reset handler
* @sata0_pclk: SATA0 PCLK handler
* @sata1_pclk: SATA1 PCLK handler
*/
@@ -38,8 +35,6 @@ struct sata_gemini {
enum gemini_muxmode muxmode;
bool ide_pins;
bool sata_bridge;
- struct reset_control *sata0_reset;
- struct reset_control *sata1_reset;
struct clk *sata0_pclk;
struct clk *sata1_pclk;
};
@@ -224,18 +219,6 @@ void gemini_sata_stop_bridge(struct sata_gemini *sg, unsigned int bridge)
}
EXPORT_SYMBOL(gemini_sata_stop_bridge);
-int gemini_sata_reset_bridge(struct sata_gemini *sg,
- unsigned int bridge)
-{
- if (bridge == 0)
- reset_control_reset(sg->sata0_reset);
- else
- reset_control_reset(sg->sata1_reset);
- msleep(10);
- return gemini_sata_setup_bridge(sg, bridge);
-}
-EXPORT_SYMBOL(gemini_sata_reset_bridge);
-
static int gemini_sata_bridge_init(struct sata_gemini *sg)
{
struct device *dev = sg->dev;
@@ -265,21 +248,6 @@ static int gemini_sata_bridge_init(struct sata_gemini *sg)
return ret;
}
- sg->sata0_reset = devm_reset_control_get_exclusive(dev, "sata0");
- if (IS_ERR(sg->sata0_reset)) {
- dev_err(dev, "no SATA0 reset controller\n");
- clk_disable_unprepare(sg->sata1_pclk);
- clk_disable_unprepare(sg->sata0_pclk);
- return PTR_ERR(sg->sata0_reset);
- }
- sg->sata1_reset = devm_reset_control_get_exclusive(dev, "sata1");
- if (IS_ERR(sg->sata1_reset)) {
- dev_err(dev, "no SATA1 reset controller\n");
- clk_disable_unprepare(sg->sata1_pclk);
- clk_disable_unprepare(sg->sata0_pclk);
- return PTR_ERR(sg->sata1_reset);
- }
-
sata_id = readl(sg->base + GEMINI_SATA_ID);
sata_phy_id = readl(sg->base + GEMINI_SATA_PHY_ID);
sg->sata_bridge = true;
@@ -425,7 +393,7 @@ static struct platform_driver gemini_sata_driver = {
.of_match_table = gemini_sata_of_match,
},
.probe = gemini_sata_probe,
- .remove_new = gemini_sata_remove,
+ .remove = gemini_sata_remove,
};
module_platform_driver(gemini_sata_driver);
diff --git a/drivers/ata/sata_gemini.h b/drivers/ata/sata_gemini.h
index 6f6e691d6007..b6e4a5c86e01 100644
--- a/drivers/ata/sata_gemini.h
+++ b/drivers/ata/sata_gemini.h
@@ -17,6 +17,5 @@ bool gemini_sata_bridge_enabled(struct sata_gemini *sg, bool is_ata1);
enum gemini_muxmode gemini_sata_get_muxmode(struct sata_gemini *sg);
int gemini_sata_start_bridge(struct sata_gemini *sg, unsigned int bridge);
void gemini_sata_stop_bridge(struct sata_gemini *sg, unsigned int bridge);
-int gemini_sata_reset_bridge(struct sata_gemini *sg, unsigned int bridge);
#endif
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 63ef7bb073ce..c8c817c51230 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -348,6 +348,7 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
phy_nodes[phy] = phy_data.np;
cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
if (cphy_base[phy] == NULL) {
+ of_node_put(phy_data.np);
return 0;
}
phy_count += 1;
@@ -614,12 +615,12 @@ static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
ahci_highbank_suspend, ahci_highbank_resume);
static struct platform_driver ahci_highbank_driver = {
- .remove_new = ata_platform_remove_one,
- .driver = {
- .name = "highbank-ahci",
- .of_match_table = ahci_of_match,
- .pm = &ahci_highbank_pm_ops,
- },
+ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = "highbank-ahci",
+ .of_match_table = ahci_of_match,
+ .pm = &ahci_highbank_pm_ops,
+ },
.probe = ahci_highbank_probe,
};
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 05c905827dc5..bcbf96867f89 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -672,8 +672,8 @@ static const struct scsi_host_template mv6_sht = {
.dma_boundary = MV_DMA_BOUNDARY,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
- .device_configure = ata_scsi_device_configure
+ .tag_alloc_policy_rr = true,
+ .sdev_configure = ata_scsi_sdev_configure
};
static struct ata_port_operations mv5_ops = {
@@ -4255,7 +4255,7 @@ MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
static struct platform_driver mv_platform_driver = {
.probe = mv_platform_probe,
- .remove_new = mv_platform_remove,
+ .remove = mv_platform_remove,
.suspend = mv_platform_suspend,
.resume = mv_platform_resume,
.driver = {
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 36d99043ef50..f36e2915ccf1 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -296,8 +296,8 @@ static void nv_nf2_freeze(struct ata_port *ap);
static void nv_nf2_thaw(struct ata_port *ap);
static void nv_ck804_freeze(struct ata_port *ap);
static void nv_ck804_thaw(struct ata_port *ap);
-static int nv_adma_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim);
+static int nv_adma_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
@@ -319,8 +319,8 @@ static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
static void nv_mcp55_thaw(struct ata_port *ap);
static void nv_mcp55_freeze(struct ata_port *ap);
static void nv_swncq_error_handler(struct ata_port *ap);
-static int nv_swncq_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim);
+static int nv_swncq_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int nv_swncq_port_start(struct ata_port *ap);
static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
@@ -382,10 +382,10 @@ static const struct scsi_host_template nv_adma_sht = {
.can_queue = NV_ADMA_MAX_CPBS,
.sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
.dma_boundary = NV_ADMA_DMA_BOUNDARY,
- .device_configure = nv_adma_device_configure,
+ .sdev_configure = nv_adma_sdev_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
static const struct scsi_host_template nv_swncq_sht = {
@@ -393,10 +393,10 @@ static const struct scsi_host_template nv_swncq_sht = {
.can_queue = ATA_MAX_QUEUE - 1,
.sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
- .device_configure = nv_swncq_device_configure,
+ .sdev_configure = nv_swncq_sdev_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
};
/*
@@ -663,8 +663,8 @@ static void nv_adma_mode(struct ata_port *ap)
pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
}
-static int nv_adma_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int nv_adma_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct nv_adma_port_priv *pp = ap->private_data;
@@ -676,7 +676,7 @@ static int nv_adma_device_configure(struct scsi_device *sdev,
int adma_enable;
u32 current_reg, new_reg, config_mask;
- rc = ata_scsi_device_configure(sdev, lim);
+ rc = ata_scsi_sdev_configure(sdev, lim);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
@@ -1871,8 +1871,8 @@ static void nv_swncq_host_init(struct ata_host *host)
writel(~0x0, mmio + NV_INT_STATUS_MCP55);
}
-static int nv_swncq_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int nv_swncq_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
@@ -1882,7 +1882,7 @@ static int nv_swncq_device_configure(struct scsi_device *sdev,
u8 check_maxtor = 0;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
- rc = ata_scsi_device_configure(sdev, lim);
+ rc = ata_scsi_sdev_configure(sdev, lim);
if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
/* Not a proper libata device, ignore */
return rc;
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index c1469d076880..22820a02d740 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -1009,7 +1009,7 @@ static const struct dev_pm_ops sata_rcar_pm_ops = {
static struct platform_driver sata_rcar_driver = {
.probe = sata_rcar_probe,
- .remove_new = sata_rcar_remove,
+ .remove = sata_rcar_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = sata_rcar_match,
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 72c03cbdaff4..87f4cde6a686 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -378,10 +378,9 @@ static const struct scsi_host_template sil24_sht = {
.can_queue = SIL24_MAX_CMDS,
.sg_tablesize = SIL24_MAX_SGE,
.dma_boundary = ATA_DMA_BOUNDARY,
- .tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
- .device_configure = ata_scsi_device_configure
+ .sdev_configure = ata_scsi_sdev_configure
};
static struct ata_port_operations sil24_ops = {
@@ -1317,7 +1316,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (sata_sil24_msi && !pci_enable_msi(pdev)) {
dev_info(&pdev->dev, "Using MSI\n");
- pci_intx(pdev, 0);
+ pcim_intx(pdev, 0);
}
pci_set_master(pdev);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index ef8724986de3..b8b6d9eff3b8 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -290,7 +290,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &sis_sht);
}
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 60ea45926cd1..52894ff49dcb 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -221,7 +221,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- pci_intx(pdev, 1);
+ pcim_intx(pdev, 1);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &uli_sht);
}
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index d39b87537168..a53a2dfc1e17 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -384,7 +384,7 @@ static int vsc_sata_init_one(struct pci_dev *pdev,
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
if (pci_enable_msi(pdev) == 0)
- pci_intx(pdev, 0);
+ pcim_intx(pdev, 0);
/*
* Config offset 0x98 is "Extended Control and Status Register 0"
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index cb00f8244e41..4fea1149e003 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2569,7 +2569,7 @@ static struct platform_driver fore200e_sba_driver = {
.of_match_table = fore200e_sba_match,
},
.probe = fore200e_sba_probe,
- .remove_new = fore200e_sba_remove,
+ .remove = fore200e_sba_remove,
};
#endif
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 21545ffba065..8934e6ad5772 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -489,7 +489,7 @@ config IMG_ASCII_LCD
config HT16K33
tristate "Holtek Ht16K33 LED controller with keyscan"
- depends on FB && I2C && INPUT
+ depends on FB && I2C && INPUT && BACKLIGHT_CLASS_DEVICE
select FB_SYSMEM_HELPERS
select INPUT_MATRIXKMAP
select FB_BACKLIGHT
diff --git a/drivers/auxdisplay/cfag12864b.c b/drivers/auxdisplay/cfag12864b.c
index 6526aa51fb1d..e1a94ae3eb0c 100644
--- a/drivers/auxdisplay/cfag12864b.c
+++ b/drivers/auxdisplay/cfag12864b.c
@@ -37,11 +37,6 @@ module_param(cfag12864b_rate, uint, 0444);
MODULE_PARM_DESC(cfag12864b_rate,
"Refresh rate (hertz)");
-unsigned int cfag12864b_getrate(void)
-{
- return cfag12864b_rate;
-}
-
/*
* cfag12864b Commands
*
@@ -249,11 +244,6 @@ void cfag12864b_disable(void)
mutex_unlock(&cfag12864b_mutex);
}
-unsigned char cfag12864b_isenabled(void)
-{
- return cfag12864b_updating;
-}
-
static void cfag12864b_update(struct work_struct *work)
{
unsigned char c;
@@ -293,10 +283,8 @@ static void cfag12864b_update(struct work_struct *work)
*/
EXPORT_SYMBOL_GPL(cfag12864b_buffer);
-EXPORT_SYMBOL_GPL(cfag12864b_getrate);
EXPORT_SYMBOL_GPL(cfag12864b_enable);
EXPORT_SYMBOL_GPL(cfag12864b_disable);
-EXPORT_SYMBOL_GPL(cfag12864b_isenabled);
/*
* Is the module inited?
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 2b74dabe7e17..24baf6b2c587 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -108,7 +108,7 @@ static void cfag12864bfb_remove(struct platform_device *device)
static struct platform_driver cfag12864bfb_driver = {
.probe = cfag12864bfb_probe,
- .remove_new = cfag12864bfb_remove,
+ .remove = cfag12864bfb_remove,
.driver = {
.name = CFAG12864BFB_NAME,
},
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 025dc6855cb2..0526f0d90a79 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -339,7 +339,7 @@ MODULE_DEVICE_TABLE(of, hd44780_of_match);
static struct platform_driver hd44780_driver = {
.probe = hd44780_probe,
- .remove_new = hd44780_remove,
+ .remove = hd44780_remove,
.driver = {
.name = "hd44780",
.of_match_table = hd44780_of_match,
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index a816f9e10255..0b8ba754b343 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -657,7 +657,6 @@ static int ht16k33_seg_probe(struct device *dev, struct ht16k33_priv *priv,
static int ht16k33_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
- const struct of_device_id *id;
struct ht16k33_priv *priv;
uint32_t dft_brightness;
int err;
@@ -672,9 +671,8 @@ static int ht16k33_probe(struct i2c_client *client)
return -ENOMEM;
priv->client = client;
- id = i2c_of_match_device(dev->driver->of_match_table, client);
- if (id)
- priv->type = (uintptr_t)id->data;
+ priv->type = (uintptr_t)i2c_get_match_data(client);
+
i2c_set_clientdata(client, priv);
err = ht16k33_initialize(priv);
@@ -747,7 +745,9 @@ static void ht16k33_remove(struct i2c_client *client)
}
static const struct i2c_device_id ht16k33_i2c_match[] = {
- { "ht16k33", 0 },
+ { "3108", DISP_QUAD_7SEG },
+ { "3130", DISP_QUAD_14SEG },
+ { "ht16k33", DISP_MATRIX },
{ }
};
MODULE_DEVICE_TABLE(i2c, ht16k33_i2c_match);
@@ -780,5 +780,5 @@ module_i2c_driver(ht16k33_driver);
MODULE_DESCRIPTION("Holtek HT16K33 driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(LINEDISP);
+MODULE_IMPORT_NS("LINEDISP");
MODULE_AUTHOR("Robin van der Gracht <robin@protonic.nl>");
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index 9ba132dc6143..32e1863ef4b2 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -36,7 +36,6 @@ struct img_ascii_lcd_config {
* @base: the base address of the LCD registers
* @regmap: the regmap through which LCD registers are accessed
* @offset: the offset within regmap to the start of the LCD registers
- * @cfg: pointer to the LCD model configuration
*/
struct img_ascii_lcd_ctx {
struct linedisp linedisp;
@@ -45,7 +44,6 @@ struct img_ascii_lcd_ctx {
struct regmap *regmap;
};
u32 offset;
- const struct img_ascii_lcd_config *cfg;
};
/*
@@ -71,7 +69,7 @@ static void boston_update(struct linedisp *linedisp)
#endif
}
-static struct img_ascii_lcd_config boston_config = {
+static const struct img_ascii_lcd_config boston_config = {
.num_chars = 8,
.ops = {
.update = boston_update,
@@ -100,7 +98,7 @@ static void malta_update(struct linedisp *linedisp)
pr_err_ratelimited("Failed to update LCD display: %d\n", err);
}
-static struct img_ascii_lcd_config malta_config = {
+static const struct img_ascii_lcd_config malta_config = {
.num_chars = 8,
.external_regmap = true,
.ops = {
@@ -202,7 +200,7 @@ static void sead3_update(struct linedisp *linedisp)
pr_err_ratelimited("Failed to update LCD display: %d\n", err);
}
-static struct img_ascii_lcd_config sead3_config = {
+static const struct img_ascii_lcd_config sead3_config = {
.num_chars = 16,
.external_regmap = true,
.ops = {
@@ -291,11 +289,11 @@ static struct platform_driver img_ascii_lcd_driver = {
.of_match_table = img_ascii_lcd_matches,
},
.probe = img_ascii_lcd_probe,
- .remove_new = img_ascii_lcd_remove,
+ .remove = img_ascii_lcd_remove,
};
module_platform_driver(img_ascii_lcd_driver);
MODULE_DESCRIPTION("Imagination Technologies ASCII LCD Display");
MODULE_AUTHOR("Paul Burton <paul.burton@mips.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(LINEDISP);
+MODULE_IMPORT_NS("LINEDISP");
diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c
index 6422be0dfe20..a28daa4ffbf7 100644
--- a/drivers/auxdisplay/lcd2s.c
+++ b/drivers/auxdisplay/lcd2s.c
@@ -349,7 +349,7 @@ static void lcd2s_i2c_remove(struct i2c_client *i2c)
}
static const struct i2c_device_id lcd2s_i2c_id[] = {
- { "lcd2s", 0 },
+ { "lcd2s" },
{ }
};
MODULE_DEVICE_TABLE(i2c, lcd2s_i2c_id);
diff --git a/drivers/auxdisplay/line-display.c b/drivers/auxdisplay/line-display.c
index 731ffdfafc4e..fcec77f100ce 100644
--- a/drivers/auxdisplay/line-display.c
+++ b/drivers/auxdisplay/line-display.c
@@ -381,7 +381,7 @@ out_put_device:
put_device(&linedisp->dev);
return err;
}
-EXPORT_SYMBOL_NS_GPL(linedisp_register, LINEDISP);
+EXPORT_SYMBOL_NS_GPL(linedisp_register, "LINEDISP");
/**
* linedisp_unregister - unregister a character line display
@@ -394,7 +394,7 @@ void linedisp_unregister(struct linedisp *linedisp)
del_timer_sync(&linedisp->timer);
put_device(&linedisp->dev);
}
-EXPORT_SYMBOL_NS_GPL(linedisp_unregister, LINEDISP);
+EXPORT_SYMBOL_NS_GPL(linedisp_unregister, "LINEDISP");
MODULE_DESCRIPTION("Character line display core support");
MODULE_LICENSE("GPL");
diff --git a/drivers/auxdisplay/max6959.c b/drivers/auxdisplay/max6959.c
index 5519c014bd29..962488197b9e 100644
--- a/drivers/auxdisplay/max6959.c
+++ b/drivers/auxdisplay/max6959.c
@@ -191,4 +191,4 @@ module_i2c_driver(max6959_i2c_driver);
MODULE_DESCRIPTION("MAX6958/6959 7-segment LED controller");
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(LINEDISP);
+MODULE_IMPORT_NS("LINEDISP");
diff --git a/drivers/auxdisplay/seg-led-gpio.c b/drivers/auxdisplay/seg-led-gpio.c
index 183ab3011cbb..f10c25e6bf12 100644
--- a/drivers/auxdisplay/seg-led-gpio.c
+++ b/drivers/auxdisplay/seg-led-gpio.c
@@ -97,7 +97,7 @@ MODULE_DEVICE_TABLE(of, seg_led_of_match);
static struct platform_driver seg_led_driver = {
.probe = seg_led_probe,
- .remove_new = seg_led_remove,
+ .remove = seg_led_remove,
.driver = {
.name = "seg-led-gpio",
.of_match_table = seg_led_of_match,
@@ -108,4 +108,4 @@ module_platform_driver(seg_led_driver);
MODULE_AUTHOR("Chris Packham <chris.packham@alliedtelesis.co.nz>");
MODULE_DESCRIPTION("7 segment LED driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(LINEDISP);
+MODULE_IMPORT_NS("LINEDISP");
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
index e18701676426..c99f2ab105e5 100644
--- a/drivers/base/arch_numa.c
+++ b/drivers/base/arch_numa.c
@@ -208,6 +208,10 @@ static int __init numa_register_nodes(void)
{
int nid;
+ /* Check the validity of the memblock/node mapping */
+ if (!memblock_validate_numa_coverage(0))
+ return -EINVAL;
+
/* Finally register nodes. */
for_each_node_mask(nid, numa_nodes_parsed) {
unsigned long start_pfn, end_pfn;
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 75fcb75d5515..3ebe77566788 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -366,7 +366,7 @@ void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate)
#ifdef CONFIG_ACPI_CPPC_LIB
#include <acpi/cppc_acpi.h>
-void topology_init_cpu_capacity_cppc(void)
+static inline void topology_init_cpu_capacity_cppc(void)
{
u64 capacity, capacity_scale = 0;
struct cppc_perf_caps perf_caps;
@@ -417,6 +417,10 @@ void topology_init_cpu_capacity_cppc(void)
exit:
free_raw_capacity();
}
+void acpi_processor_init_invariance_cppc(void)
+{
+ topology_init_cpu_capacity_cppc();
+}
#endif
#ifdef CONFIG_CPU_FREQ
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 7823888af4f6..afa4df4c5a3f 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -92,7 +92,7 @@
* Auxiliary devices are created and registered by a subsystem-level core
* device that needs to break up its functionality into smaller fragments. One
* way to extend the scope of an auxiliary_device is to encapsulate it within a
- * domain- pecific structure defined by the parent device. This structure
+ * domain-specific structure defined by the parent device. This structure
* contains the auxiliary_device and any associated shared data/callbacks
* needed to establish the connection with the parent.
*
@@ -336,35 +336,6 @@ int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname)
EXPORT_SYMBOL_GPL(__auxiliary_device_add);
/**
- * auxiliary_find_device - auxiliary device iterator for locating a particular device.
- * @start: Device to begin with
- * @data: Data to pass to match function
- * @match: Callback function to check device
- *
- * This function returns a reference to a device that is 'found'
- * for later use, as determined by the @match callback.
- *
- * The reference returned should be released with put_device().
- *
- * The callback should return 0 if the device doesn't match and non-zero
- * if it does. If the callback returns non-zero, this function will
- * return to the caller and not iterate over any more devices.
- */
-struct auxiliary_device *auxiliary_find_device(struct device *start,
- const void *data,
- device_match_t match)
-{
- struct device *dev;
-
- dev = bus_find_device(&auxiliary_bus_type, start, data, match);
- if (!dev)
- return NULL;
-
- return to_auxiliary_dev(dev);
-}
-EXPORT_SYMBOL_GPL(auxiliary_find_device);
-
-/**
* __auxiliary_driver_register - register a driver for auxiliary bus devices
* @auxdrv: auxiliary_driver structure
* @owner: owning module/driver
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 657c93c38b0d..6b9e65a42cd2 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -354,7 +354,7 @@ static struct device *next_device(struct klist_iter *i)
* count in the supplied callback.
*/
int bus_for_each_dev(const struct bus_type *bus, struct device *start,
- void *data, int (*fn)(struct device *, void *))
+ void *data, device_iter_t fn)
{
struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
@@ -402,9 +402,12 @@ struct device *bus_find_device(const struct bus_type *bus,
klist_iter_init_node(&sp->klist_devices, &i,
(start ? &start->p->knode_bus : NULL));
- while ((dev = next_device(&i)))
- if (match(dev, data) && get_device(dev))
+ while ((dev = next_device(&i))) {
+ if (match(dev, data)) {
+ get_device(dev);
break;
+ }
+ }
klist_iter_exit(&i);
subsys_put(sp);
return dev;
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 7a7609298e18..cf0d455209d7 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -58,7 +58,7 @@ bool last_level_cache_is_valid(unsigned int cpu)
{
struct cacheinfo *llc;
- if (!cache_leaves(cpu))
+ if (!cache_leaves(cpu) || !per_cpu_cacheinfo(cpu))
return false;
llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
@@ -254,11 +254,11 @@ static int of_count_cache_leaves(struct device_node *np)
{
unsigned int leaves = 0;
- if (of_property_read_bool(np, "cache-size"))
+ if (of_property_present(np, "cache-size"))
++leaves;
- if (of_property_read_bool(np, "i-cache-size"))
+ if (of_property_present(np, "i-cache-size"))
++leaves;
- if (of_property_read_bool(np, "d-cache-size"))
+ if (of_property_present(np, "d-cache-size"))
++leaves;
if (!leaves) {
@@ -367,9 +367,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
for_each_online_cpu(i) {
- struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
-
- if (i == cpu || !sib_cpu_ci->info_list)
+ if (i == cpu || !per_cpu_cacheinfo(i))
continue;/* skip if itself or no cacheinfo */
for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
@@ -409,10 +407,7 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
for (index = 0; index < cache_leaves(cpu); index++) {
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
- struct cpu_cacheinfo *sib_cpu_ci =
- get_cpu_cacheinfo(sibling);
-
- if (sibling == cpu || !sib_cpu_ci->info_list)
+ if (sibling == cpu || !per_cpu_cacheinfo(sibling))
continue;/* skip if itself or no cacheinfo */
for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
@@ -463,11 +458,9 @@ int __weak populate_cache_leaves(unsigned int cpu)
return -ENOENT;
}
-static inline
-int allocate_cache_info(int cpu)
+static inline int allocate_cache_info(int cpu)
{
- per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
- sizeof(struct cacheinfo), GFP_ATOMIC);
+ per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), sizeof(struct cacheinfo), GFP_ATOMIC);
if (!per_cpu_cacheinfo(cpu)) {
cache_leaves(cpu) = 0;
return -ENOMEM;
@@ -539,7 +532,11 @@ static inline int init_level_allocate_ci(unsigned int cpu)
*/
ci_cacheinfo(cpu)->early_ci_levels = false;
- if (cache_leaves(cpu) <= early_leaves)
+ /*
+ * Some architectures (e.g., x86) do not use early initialization.
+ * Allocate memory now in such case.
+ */
+ if (cache_leaves(cpu) <= early_leaves && per_cpu_cacheinfo(cpu))
return 0;
kfree(per_cpu_cacheinfo(cpu));
diff --git a/drivers/base/class.c b/drivers/base/class.c
index cb5359235c70..2526c57d924e 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -323,8 +323,12 @@ void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
struct subsys_private *sp = class_to_subsys(class);
struct klist_node *start_knode = NULL;
- if (!sp)
+ memset(iter, 0, sizeof(*iter));
+ if (!sp) {
+ pr_crit("%s: class %p was not registered yet\n",
+ __func__, class);
return;
+ }
if (start)
start_knode = &start->p->knode_class;
@@ -351,6 +355,9 @@ struct device *class_dev_iter_next(struct class_dev_iter *iter)
struct klist_node *knode;
struct device *dev;
+ if (!iter->sp)
+ return NULL;
+
while (1) {
knode = klist_next(&iter->ki);
if (!knode)
@@ -395,7 +402,7 @@ EXPORT_SYMBOL_GPL(class_dev_iter_exit);
* code. There's no locking restriction.
*/
int class_for_each_device(const struct class *class, const struct device *start,
- void *data, int (*fn)(struct device *, void *))
+ void *data, device_iter_t fn)
{
struct subsys_private *sp = class_to_subsys(class);
struct class_dev_iter iter;
@@ -405,7 +412,7 @@ int class_for_each_device(const struct class *class, const struct device *start,
if (!class)
return -EINVAL;
if (!sp) {
- WARN(1, "%s called for class '%s' before it was initialized",
+ WARN(1, "%s called for class '%s' before it was registered",
__func__, class->name);
return -EINVAL;
}
@@ -453,7 +460,7 @@ struct device *class_find_device(const struct class *class, const struct device
if (!class)
return NULL;
if (!sp) {
- WARN(1, "%s called for class '%s' before it was initialized",
+ WARN(1, "%s called for class '%s' before it was registered",
__func__, class->name);
return NULL;
}
@@ -594,30 +601,10 @@ EXPORT_SYMBOL_GPL(class_compat_unregister);
* a bus device
* @cls: the compatibility class
* @dev: the target bus device
- * @device_link: an optional device to which a "device" link should be created
*/
-int class_compat_create_link(struct class_compat *cls, struct device *dev,
- struct device *device_link)
+int class_compat_create_link(struct class_compat *cls, struct device *dev)
{
- int error;
-
- error = sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
- if (error)
- return error;
-
- /*
- * Optionally add a "device" link (typically to the parent), as a
- * class device would have one and we want to provide as much
- * backwards compatibility as possible.
- */
- if (device_link) {
- error = sysfs_create_link(&dev->kobj, &device_link->kobj,
- "device");
- if (error)
- sysfs_remove_link(cls->kobj, dev_name(dev));
- }
-
- return error;
+ return sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
}
EXPORT_SYMBOL_GPL(class_compat_create_link);
@@ -626,14 +613,9 @@ EXPORT_SYMBOL_GPL(class_compat_create_link);
* a bus device
* @cls: the compatibility class
* @dev: the target bus device
- * @device_link: an optional device to which a "device" link was previously
- * created
*/
-void class_compat_remove_link(struct class_compat *cls, struct device *dev,
- struct device *device_link)
+void class_compat_remove_link(struct class_compat *cls, struct device *dev)
{
- if (device_link)
- sysfs_remove_link(&dev->kobj, "device");
sysfs_remove_link(cls->kobj, dev_name(dev));
}
EXPORT_SYMBOL_GPL(class_compat_remove_link);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index a4c853411a6b..5a1f05198114 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -26,7 +26,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
-#include <linux/rcupdate.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
@@ -553,7 +552,7 @@ void device_link_wait_removal(void)
}
EXPORT_SYMBOL_GPL(device_link_wait_removal);
-static struct class devlink_class = {
+static const struct class devlink_class = {
.name = "devlink",
.dev_groups = devlink_groups,
.dev_release = devlink_dev_release,
@@ -1972,7 +1971,7 @@ static struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwn
/**
* __fw_devlink_relax_cycles - Relax and mark dependency cycles.
- * @con: Potential consumer device.
+ * @con_handle: Potential consumer device fwnode.
* @sup_handle: Potential supplier's fwnode.
*
* Needs to be called with fwnode_lock and device link lock held.
@@ -1990,10 +1989,10 @@ static struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwn
*
* Return true if one or more cycles were found. Otherwise, return false.
*/
-static bool __fw_devlink_relax_cycles(struct device *con,
+static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
struct fwnode_handle *sup_handle)
{
- struct device *sup_dev = NULL, *par_dev = NULL;
+ struct device *sup_dev = NULL, *par_dev = NULL, *con_dev = NULL;
struct fwnode_link *link;
struct device_link *dev_link;
bool ret = false;
@@ -2010,22 +2009,22 @@ static bool __fw_devlink_relax_cycles(struct device *con,
sup_handle->flags |= FWNODE_FLAG_VISITED;
- sup_dev = get_dev_from_fwnode(sup_handle);
-
/* Termination condition. */
- if (sup_dev == con) {
+ if (sup_handle == con_handle) {
pr_debug("----- cycle: start -----\n");
ret = true;
goto out;
}
+ sup_dev = get_dev_from_fwnode(sup_handle);
+ con_dev = get_dev_from_fwnode(con_handle);
/*
* If sup_dev is bound to a driver and @con hasn't started binding to a
* driver, sup_dev can't be a consumer of @con. So, no need to check
* further.
*/
if (sup_dev && sup_dev->links.status == DL_DEV_DRIVER_BOUND &&
- con->links.status == DL_DEV_NO_DRIVER) {
+ con_dev && con_dev->links.status == DL_DEV_NO_DRIVER) {
ret = false;
goto out;
}
@@ -2034,7 +2033,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
if (link->flags & FWLINK_FLAG_IGNORE)
continue;
- if (__fw_devlink_relax_cycles(con, link->supplier)) {
+ if (__fw_devlink_relax_cycles(con_handle, link->supplier)) {
__fwnode_link_cycle(link);
ret = true;
}
@@ -2049,7 +2048,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
else
par_dev = fwnode_get_next_parent_dev(sup_handle);
- if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode)) {
+ if (par_dev && __fw_devlink_relax_cycles(con_handle, par_dev->fwnode)) {
pr_debug("%pfwf: cycle: child of %pfwf\n", sup_handle,
par_dev->fwnode);
ret = true;
@@ -2067,7 +2066,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
!(dev_link->flags & DL_FLAG_CYCLE))
continue;
- if (__fw_devlink_relax_cycles(con,
+ if (__fw_devlink_relax_cycles(con_handle,
dev_link->supplier->fwnode)) {
pr_debug("%pfwf: cycle: depends on %pfwf\n", sup_handle,
dev_link->supplier->fwnode);
@@ -2115,11 +2114,6 @@ static int fw_devlink_create_devlink(struct device *con,
if (link->flags & FWLINK_FLAG_IGNORE)
return 0;
- if (con->fwnode == link->consumer)
- flags = fw_devlink_get_flags(link->flags);
- else
- flags = FW_DEVLINK_FLAGS_PERMISSIVE;
-
/*
* In some cases, a device P might also be a supplier to its child node
* C. However, this would defer the probe of C until the probe of P
@@ -2140,25 +2134,23 @@ static int fw_devlink_create_devlink(struct device *con,
return -EINVAL;
/*
- * SYNC_STATE_ONLY device links don't block probing and supports cycles.
- * So, one might expect that cycle detection isn't necessary for them.
- * However, if the device link was marked as SYNC_STATE_ONLY because
- * it's part of a cycle, then we still need to do cycle detection. This
- * is because the consumer and supplier might be part of multiple cycles
- * and we need to detect all those cycles.
+ * Don't try to optimize by not calling the cycle detection logic under
+ * certain conditions. There's always some corner case that won't get
+ * detected.
*/
- if (!device_link_flag_is_sync_state_only(flags) ||
- flags & DL_FLAG_CYCLE) {
- device_links_write_lock();
- if (__fw_devlink_relax_cycles(con, sup_handle)) {
- __fwnode_link_cycle(link);
- flags = fw_devlink_get_flags(link->flags);
- pr_debug("----- cycle: end -----\n");
- dev_info(con, "Fixed dependency cycle(s) with %pfwf\n",
- sup_handle);
- }
- device_links_write_unlock();
+ device_links_write_lock();
+ if (__fw_devlink_relax_cycles(link->consumer, sup_handle)) {
+ __fwnode_link_cycle(link);
+ pr_debug("----- cycle: end -----\n");
+ pr_info("%pfwf: Fixed dependency cycle(s) with %pfwf\n",
+ link->consumer, sup_handle);
}
+ device_links_write_unlock();
+
+ if (con->fwnode == link->consumer)
+ flags = fw_devlink_get_flags(link->flags);
+ else
+ flags = FW_DEVLINK_FLAGS_PERMISSIVE;
if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE)
sup_dev = fwnode_get_next_parent_dev(sup_handle);
@@ -2181,8 +2173,8 @@ static int fw_devlink_create_devlink(struct device *con,
}
if (con != sup_dev && !device_link_add(con, sup_dev, flags)) {
- dev_err(con, "Failed to create device link (0x%x) with %s\n",
- flags, dev_name(sup_dev));
+ dev_err(con, "Failed to create device link (0x%x) with supplier %s for %pfwf\n",
+ flags, dev_name(sup_dev), link->consumer);
ret = -EINVAL;
}
@@ -2634,7 +2626,6 @@ static const char *dev_uevent_name(const struct kobject *kobj)
static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
{
const struct device *dev = kobj_to_dev(kobj);
- struct device_driver *driver;
int retval = 0;
/* add device node properties if present */
@@ -2663,12 +2654,8 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
if (dev->type && dev->type->name)
add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
- /* Synchronize with module_remove_driver() */
- rcu_read_lock();
- driver = READ_ONCE(dev->driver);
- if (driver)
- add_uevent_var(env, "DRIVER=%s", driver->name);
- rcu_read_unlock();
+ if (dev->driver)
+ add_uevent_var(env, "DRIVER=%s", dev->driver->name);
/* Add common DT information about the device */
of_device_uevent(dev, env);
@@ -2738,8 +2725,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
if (!env)
return -ENOMEM;
+ /* Synchronize with really_probe() */
+ device_lock(dev);
/* let the kset specific function add its keys */
retval = kset->uevent_ops->uevent(&dev->kobj, env);
+ device_unlock(dev);
if (retval)
goto out;
@@ -3990,7 +3980,7 @@ const char *device_get_devnode(const struct device *dev,
* other than 0, we break out and return that value.
*/
int device_for_each_child(struct device *parent, void *data,
- int (*fn)(struct device *dev, void *data))
+ device_iter_t fn)
{
struct klist_iter i;
struct device *child;
@@ -4020,7 +4010,7 @@ EXPORT_SYMBOL_GPL(device_for_each_child);
* other than 0, we break out and return that value.
*/
int device_for_each_child_reverse(struct device *parent, void *data,
- int (*fn)(struct device *dev, void *data))
+ device_iter_t fn)
{
struct klist_iter i;
struct device *child;
@@ -4038,87 +4028,77 @@ int device_for_each_child_reverse(struct device *parent, void *data,
EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
/**
- * device_find_child - device iterator for locating a particular device.
- * @parent: parent struct device
- * @match: Callback function to check device
- * @data: Data to pass to match function
- *
- * This is similar to the device_for_each_child() function above, but it
- * returns a reference to a device that is 'found' for later use, as
- * determined by the @match callback.
+ * device_for_each_child_reverse_from - device child iterator in reversed order.
+ * @parent: parent struct device.
+ * @from: optional starting point in child list
+ * @fn: function to be called for each device.
+ * @data: data for the callback.
*
- * The callback should return 0 if the device doesn't match and non-zero
- * if it does. If the callback returns non-zero and a reference to the
- * current device can be obtained, this function will return to the caller
- * and not iterate over any more devices.
+ * Iterate over @parent's child devices, starting at @from, and call @fn
+ * for each, passing it @data. This helper is identical to
+ * device_for_each_child_reverse() when @from is NULL.
*
- * NOTE: you will need to drop the reference with put_device() after use.
+ * @fn is checked each iteration. If it returns anything other than 0,
+ * iteration stop and that value is returned to the caller of
+ * device_for_each_child_reverse_from();
*/
-struct device *device_find_child(struct device *parent, void *data,
- int (*match)(struct device *dev, void *data))
+int device_for_each_child_reverse_from(struct device *parent,
+ struct device *from, void *data,
+ device_iter_t fn)
{
struct klist_iter i;
struct device *child;
+ int error = 0;
if (!parent || !parent->p)
- return NULL;
+ return 0;
- klist_iter_init(&parent->p->klist_children, &i);
- while ((child = next_device(&i)))
- if (match(child, data) && get_device(child))
- break;
+ klist_iter_init_node(&parent->p->klist_children, &i,
+ (from ? &from->p->knode_parent : NULL));
+ while ((child = prev_device(&i)) && !error)
+ error = fn(child, data);
klist_iter_exit(&i);
- return child;
+ return error;
}
-EXPORT_SYMBOL_GPL(device_find_child);
+EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from);
/**
- * device_find_child_by_name - device iterator for locating a child device.
+ * device_find_child - device iterator for locating a particular device.
* @parent: parent struct device
- * @name: name of the child device
+ * @match: Callback function to check device
+ * @data: Data to pass to match function
+ *
+ * This is similar to the device_for_each_child() function above, but it
+ * returns a reference to a device that is 'found' for later use, as
+ * determined by the @match callback.
*
- * This is similar to the device_find_child() function above, but it
- * returns a reference to a device that has the name @name.
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does. If the callback returns non-zero and a reference to the
+ * current device can be obtained, this function will return to the caller
+ * and not iterate over any more devices.
*
* NOTE: you will need to drop the reference with put_device() after use.
*/
-struct device *device_find_child_by_name(struct device *parent,
- const char *name)
+struct device *device_find_child(struct device *parent, const void *data,
+ device_match_t match)
{
struct klist_iter i;
struct device *child;
- if (!parent)
+ if (!parent || !parent->p)
return NULL;
klist_iter_init(&parent->p->klist_children, &i);
- while ((child = next_device(&i)))
- if (sysfs_streq(dev_name(child), name) && get_device(child))
+ while ((child = next_device(&i))) {
+ if (match(child, data)) {
+ get_device(child);
break;
+ }
+ }
klist_iter_exit(&i);
return child;
}
-EXPORT_SYMBOL_GPL(device_find_child_by_name);
-
-static int match_any(struct device *dev, void *unused)
-{
- return 1;
-}
-
-/**
- * device_find_any_child - device iterator for locating a child device, if any.
- * @parent: parent struct device
- *
- * This is similar to the device_find_child() function above, but it
- * returns a reference to a child device, if any.
- *
- * NOTE: you will need to drop the reference with put_device() after use.
- */
-struct device *device_find_any_child(struct device *parent)
-{
- return device_find_child(parent, NULL, match_any);
-}
-EXPORT_SYMBOL_GPL(device_find_any_child);
+EXPORT_SYMBOL_GPL(device_find_child);
int __init devices_init(void)
{
@@ -4980,6 +4960,49 @@ define_dev_printk_level(_dev_info, KERN_INFO);
#endif
+static void __dev_probe_failed(const struct device *dev, int err, bool fatal,
+ const char *fmt, va_list vargsp)
+{
+ struct va_format vaf;
+ va_list vargs;
+
+ /*
+ * On x86_64 and possibly on other architectures, va_list is actually a
+ * size-1 array containing a structure. As a result, function parameter
+ * vargsp decays from T[1] to T*, and &vargsp has type T** rather than
+ * T(*)[1], which is expected by its assignment to vaf.va below.
+ *
+ * One standard way to solve this mess is by creating a copy in a local
+ * variable of type va_list and then using a pointer to that local copy
+ * instead, which is the approach employed here.
+ */
+ va_copy(vargs, vargsp);
+
+ vaf.fmt = fmt;
+ vaf.va = &vargs;
+
+ switch (err) {
+ case -EPROBE_DEFER:
+ device_set_deferred_probe_reason(dev, &vaf);
+ dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
+ break;
+
+ case -ENOMEM:
+ /* Don't print anything on -ENOMEM, there's already enough output */
+ break;
+
+ default:
+ /* Log fatal final failures as errors, otherwise produce warnings */
+ if (fatal)
+ dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
+ else
+ dev_warn(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
+ break;
+ }
+
+ va_end(vargs);
+}
+
/**
* dev_err_probe - probe error check and log helper
* @dev: the pointer to the struct device
@@ -4992,7 +5015,7 @@ define_dev_printk_level(_dev_info, KERN_INFO);
* -EPROBE_DEFER and propagate error upwards.
* In case of -EPROBE_DEFER it sets also defer probe reason, which can be
* checked later by reading devices_deferred debugfs attribute.
- * It replaces code sequence::
+ * It replaces the following code sequence::
*
* if (err != -EPROBE_DEFER)
* dev_err(dev, ...);
@@ -5004,47 +5027,77 @@ define_dev_printk_level(_dev_info, KERN_INFO);
*
* return dev_err_probe(dev, err, ...);
*
- * Using this helper in your probe function is totally fine even if @err is
- * known to never be -EPROBE_DEFER.
+ * Using this helper in your probe function is totally fine even if @err
+ * is known to never be -EPROBE_DEFER.
* The benefit compared to a normal dev_err() is the standardized format
- * of the error code, it being emitted symbolically (i.e. you get "EAGAIN"
- * instead of "-35") and the fact that the error code is returned which allows
- * more compact error paths.
+ * of the error code, which is emitted symbolically (i.e. you get "EAGAIN"
+ * instead of "-35"), and having the error code returned allows more
+ * compact error paths.
*
* Returns @err.
*/
int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
{
- struct va_format vaf;
- va_list args;
+ va_list vargs;
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
+ va_start(vargs, fmt);
- switch (err) {
- case -EPROBE_DEFER:
- device_set_deferred_probe_reason(dev, &vaf);
- dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
- break;
+ /* Use dev_err() for logging when err doesn't equal -EPROBE_DEFER */
+ __dev_probe_failed(dev, err, true, fmt, vargs);
- case -ENOMEM:
- /*
- * We don't print anything on -ENOMEM, there is already enough
- * output.
- */
- break;
+ va_end(vargs);
- default:
- dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
- break;
- }
+ return err;
+}
+EXPORT_SYMBOL_GPL(dev_err_probe);
- va_end(args);
+/**
+ * dev_warn_probe - probe error check and log helper
+ * @dev: the pointer to the struct device
+ * @err: error value to test
+ * @fmt: printf-style format string
+ * @...: arguments as specified in the format string
+ *
+ * This helper implements common pattern present in probe functions for error
+ * checking: print debug or warning message depending if the error value is
+ * -EPROBE_DEFER and propagate error upwards.
+ * In case of -EPROBE_DEFER it sets also defer probe reason, which can be
+ * checked later by reading devices_deferred debugfs attribute.
+ * It replaces the following code sequence::
+ *
+ * if (err != -EPROBE_DEFER)
+ * dev_warn(dev, ...);
+ * else
+ * dev_dbg(dev, ...);
+ * return err;
+ *
+ * with::
+ *
+ * return dev_warn_probe(dev, err, ...);
+ *
+ * Using this helper in your probe function is totally fine even if @err
+ * is known to never be -EPROBE_DEFER.
+ * The benefit compared to a normal dev_warn() is the standardized format
+ * of the error code, which is emitted symbolically (i.e. you get "EAGAIN"
+ * instead of "-35"), and having the error code returned allows more
+ * compact error paths.
+ *
+ * Returns @err.
+ */
+int dev_warn_probe(const struct device *dev, int err, const char *fmt, ...)
+{
+ va_list vargs;
+
+ va_start(vargs, fmt);
+
+ /* Use dev_warn() for logging when err doesn't equal -EPROBE_DEFER */
+ __dev_probe_failed(dev, err, false, fmt, vargs);
+
+ va_end(vargs);
return err;
}
-EXPORT_SYMBOL_GPL(dev_err_probe);
+EXPORT_SYMBOL_GPL(dev_warn_probe);
static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
{
@@ -5146,15 +5199,21 @@ int device_match_name(struct device *dev, const void *name)
}
EXPORT_SYMBOL_GPL(device_match_name);
+int device_match_type(struct device *dev, const void *type)
+{
+ return dev->type == type;
+}
+EXPORT_SYMBOL_GPL(device_match_type);
+
int device_match_of_node(struct device *dev, const void *np)
{
- return dev->of_node == np;
+ return np && dev->of_node == np;
}
EXPORT_SYMBOL_GPL(device_match_of_node);
int device_match_fwnode(struct device *dev, const void *fwnode)
{
- return dev_fwnode(dev) == fwnode;
+ return fwnode && dev_fwnode(dev) == fwnode;
}
EXPORT_SYMBOL_GPL(device_match_fwnode);
@@ -5166,13 +5225,13 @@ EXPORT_SYMBOL_GPL(device_match_devt);
int device_match_acpi_dev(struct device *dev, const void *adev)
{
- return ACPI_COMPANION(dev) == adev;
+ return adev && ACPI_COMPANION(dev) == adev;
}
EXPORT_SYMBOL(device_match_acpi_dev);
int device_match_acpi_handle(struct device *dev, const void *handle)
{
- return ACPI_HANDLE(dev) == handle;
+ return handle && ACPI_HANDLE(dev) == handle;
}
EXPORT_SYMBOL(device_match_acpi_handle);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index fdaa24bb641a..a7e511849875 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -599,6 +599,7 @@ CPU_SHOW_VULN_FALLBACK(retbleed);
CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gds);
CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
+CPU_SHOW_VULN_FALLBACK(ghostwrite);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -614,6 +615,7 @@ static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
+static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -630,6 +632,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_spec_rstack_overflow.attr,
&dev_attr_gather_data_sampling.attr,
&dev_attr_reg_file_data_sampling.attr,
+ &dev_attr_ghostwrite.attr,
NULL
};
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index c795edad1b96..64840e5d5fcc 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -106,7 +106,7 @@ static void devcd_del(struct work_struct *wk)
}
static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -116,7 +116,7 @@ static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
}
static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -132,19 +132,15 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute devcd_attr_data = {
- .attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, },
- .size = 0,
- .read = devcd_data_read,
- .write = devcd_data_write,
-};
+static const struct bin_attribute devcd_attr_data =
+ __BIN_ATTR(data, 0600, devcd_data_read, devcd_data_write, 0);
-static struct bin_attribute *devcd_dev_bin_attrs[] = {
+static const struct bin_attribute *const devcd_dev_bin_attrs[] = {
&devcd_attr_data, NULL,
};
static const struct attribute_group devcd_dev_group = {
- .bin_attrs = devcd_dev_bin_attrs,
+ .bin_attrs_new = devcd_dev_bin_attrs,
};
static const struct attribute_group *devcd_dev_groups[] = {
@@ -186,9 +182,9 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri
* mutex_lock(&devcd->mutex);
*
*
- * In the above diagram, It looks like disabled_store() would be racing with parallely
+ * In the above diagram, it looks like disabled_store() would be racing with parallelly
* running devcd_del() and result in memory abort while acquiring devcd->mutex which
- * is called after kfree of devcd memory after dropping its last reference with
+ * is called after kfree of devcd memory after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
@@ -285,6 +281,8 @@ static void devcd_free_sgtable(void *data)
* @offset: start copy from @offset@ bytes from the head of the data
* in the given scatterlist
* @data_len: the length of the data in the sg_table
+ *
+ * Returns: the number of bytes copied
*/
static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
size_t buf_len, void *data,
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 2152eec0c135..93e7779ef21e 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -750,25 +750,38 @@ int __devm_add_action(struct device *dev, void (*action)(void *), void *data, co
EXPORT_SYMBOL_GPL(__devm_add_action);
/**
- * devm_remove_action() - removes previously added custom action
+ * devm_remove_action_nowarn() - removes previously added custom action
* @dev: Device that owns the action
* @action: Function implementing the action
* @data: Pointer to data passed to @action implementation
*
* Removes instance of @action previously added by devm_add_action().
* Both action and data should match one of the existing entries.
+ *
+ * In contrast to devm_remove_action(), this function does not WARN() if no
+ * entry could have been found.
+ *
+ * This should only be used if the action is contained in an object with
+ * independent lifetime management, e.g. the Devres rust abstraction.
+ *
+ * Causing the warning from regular driver code most likely indicates an abuse
+ * of the devres API.
+ *
+ * Returns: 0 on success, -ENOENT if no entry could have been found.
*/
-void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
+int devm_remove_action_nowarn(struct device *dev,
+ void (*action)(void *),
+ void *data)
{
struct action_devres devres = {
.data = data,
.action = action,
};
- WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
- &devres));
+ return devres_destroy(dev, devm_action_release, devm_action_match,
+ &devres);
}
-EXPORT_SYMBOL_GPL(devm_remove_action);
+EXPORT_SYMBOL_GPL(devm_remove_action_nowarn);
/**
* devm_release_action() - release previously added custom action
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index b4eb5b89c4ee..8ab010ddf709 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(driver_set_override);
* Iterate over the @drv's list of devices calling @fn for each one.
*/
int driver_for_each_device(struct device_driver *drv, struct device *start,
- void *data, int (*fn)(struct device *, void *))
+ void *data, device_iter_t fn)
{
struct klist_iter i;
struct device *dev;
@@ -160,9 +160,12 @@ struct device *driver_find_device(const struct device_driver *drv,
klist_iter_init_node(&drv->p->klist_devices, &i,
(start ? &start->p->knode_driver : NULL));
- while ((dev = next_device(&i)))
- if (match(dev, data) && get_device(dev))
+ while ((dev = next_device(&i))) {
+ if (match(dev, data)) {
+ get_device(dev);
break;
+ }
+ }
klist_iter_exit(&i);
return dev;
}
diff --git a/drivers/base/firmware_loader/builtin/main.c b/drivers/base/firmware_loader/builtin/main.c
index a065c3150897..d36befebb1b9 100644
--- a/drivers/base/firmware_loader/builtin/main.c
+++ b/drivers/base/firmware_loader/builtin/main.c
@@ -61,7 +61,7 @@ bool firmware_request_builtin(struct firmware *fw, const char *name)
return false;
}
-EXPORT_SYMBOL_NS_GPL(firmware_request_builtin, TEST_FIRMWARE);
+EXPORT_SYMBOL_NS_GPL(firmware_request_builtin, "TEST_FIRMWARE");
/**
* firmware_request_builtin_buf() - load builtin firmware into optional buffer
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index 8432ab2c3b3c..c8afc501a8a4 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -22,10 +22,10 @@ struct firmware_fallback_config fw_fallback_config = {
.loading_timeout = 60,
.old_timeout = 60,
};
-EXPORT_SYMBOL_NS_GPL(fw_fallback_config, FIRMWARE_LOADER_PRIVATE);
+EXPORT_SYMBOL_NS_GPL(fw_fallback_config, "FIRMWARE_LOADER_PRIVATE");
#ifdef CONFIG_SYSCTL
-static struct ctl_table firmware_config_table[] = {
+static const struct ctl_table firmware_config_table[] = {
{
.procname = "force_sysfs_fallback",
.data = &fw_fallback_config.force_sysfs_fallback,
@@ -56,13 +56,13 @@ int register_firmware_config_sysctl(void)
return -ENOMEM;
return 0;
}
-EXPORT_SYMBOL_NS_GPL(register_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
+EXPORT_SYMBOL_NS_GPL(register_firmware_config_sysctl, "FIRMWARE_LOADER_PRIVATE");
void unregister_firmware_config_sysctl(void)
{
unregister_sysctl_table(firmware_config_sysct_table_header);
firmware_config_sysct_table_header = NULL;
}
-EXPORT_SYMBOL_NS_GPL(unregister_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
+EXPORT_SYMBOL_NS_GPL(unregister_firmware_config_sysctl, "FIRMWARE_LOADER_PRIVATE");
#endif /* CONFIG_SYSCTL */
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 324a9a3c087a..cb0912ea3e62 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -829,19 +829,18 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name, st
shash->tfm = alg;
if (crypto_shash_digest(shash, fw->data, fw->size, sha256buf) < 0)
- goto out_shash;
+ goto out_free;
for (int i = 0; i < SHA256_DIGEST_SIZE; i++)
sprintf(&outbuf[i * 2], "%02x", sha256buf[i]);
outbuf[SHA256_BLOCK_SIZE] = 0;
dev_dbg(device, "Loaded FW: %s, sha256: %s\n", name, outbuf);
-out_shash:
- crypto_free_shash(alg);
out_free:
kfree(shash);
kfree(outbuf);
kfree(sha256buf);
+ crypto_free_shash(alg);
}
#else
static void fw_log_firmware_info(const struct firmware *fw, const char *name,
@@ -1075,8 +1074,8 @@ EXPORT_SYMBOL_GPL(firmware_request_platform);
/**
* firmware_request_cache() - cache firmware for suspend so resume can use it
- * @name: name of firmware file
* @device: device for which firmware should be cached for
+ * @name: name of firmware file
*
* There are some devices with an optimization that enables the device to not
* require loading firmware on system reboot. This optimization may still
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
index c9c93b47d9a5..d254ceb56d84 100644
--- a/drivers/base/firmware_loader/sysfs.c
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -259,7 +259,7 @@ static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
}
static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -316,7 +316,7 @@ static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
* the driver as a firmware image.
**/
static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -356,11 +356,11 @@ out:
return retval;
}
-static struct bin_attribute firmware_attr_data = {
+static const struct bin_attribute firmware_attr_data = {
.attr = { .name = "data", .mode = 0644 },
.size = 0,
- .read = firmware_data_read,
- .write = firmware_data_write,
+ .read_new = firmware_data_read,
+ .write_new = firmware_data_write,
};
static struct attribute *fw_dev_attrs[] = {
@@ -374,14 +374,14 @@ static struct attribute *fw_dev_attrs[] = {
NULL
};
-static struct bin_attribute *fw_dev_bin_attrs[] = {
+static const struct bin_attribute *const fw_dev_bin_attrs[] = {
&firmware_attr_data,
NULL
};
static const struct attribute_group fw_dev_attr_group = {
.attrs = fw_dev_attrs,
- .bin_attrs = fw_dev_bin_attrs,
+ .bin_attrs_new = fw_dev_bin_attrs,
#ifdef CONFIG_FW_UPLOAD
.is_visible = fw_upload_is_visible,
#endif
diff --git a/drivers/base/firmware_loader/sysfs.h b/drivers/base/firmware_loader/sysfs.h
index 2060add8ef81..1cabea544a40 100644
--- a/drivers/base/firmware_loader/sysfs.h
+++ b/drivers/base/firmware_loader/sysfs.h
@@ -6,7 +6,7 @@
#include "firmware.h"
-MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE);
+MODULE_IMPORT_NS("FIRMWARE_LOADER_PRIVATE");
extern struct firmware_fallback_config fw_fallback_config;
extern struct device_attribute dev_attr_loading;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 67858eeb92ed..348c5dbbfa68 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -512,7 +512,7 @@ static ssize_t auto_online_blocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%s\n",
- online_type_to_str[mhp_default_online_type]);
+ online_type_to_str[mhp_get_default_online_type()]);
}
static ssize_t auto_online_blocks_store(struct device *dev,
@@ -524,7 +524,7 @@ static ssize_t auto_online_blocks_store(struct device *dev,
if (online_type < 0)
return -EINVAL;
- mhp_default_online_type = online_type;
+ mhp_set_default_online_type(online_type);
return count;
}
diff --git a/drivers/base/module.c b/drivers/base/module.c
index c4eaa1158d54..5bc71bea883a 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -7,7 +7,6 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/rcupdate.h>
#include "base.h"
static char *make_driver_name(const struct device_driver *drv)
@@ -102,9 +101,6 @@ void module_remove_driver(const struct device_driver *drv)
if (!drv)
return;
- /* Synchronize with dev_uevent() */
- synchronize_rcu();
-
sysfs_remove_link(&drv->p->kobj, "module");
if (drv->owner)
diff --git a/drivers/base/node.c b/drivers/base/node.c
index eb72580288e6..0ea653fa3433 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -27,7 +27,7 @@ static const struct bus_type node_subsys = {
};
static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -45,10 +45,10 @@ static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
return n;
}
-static BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES);
static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -66,7 +66,7 @@ static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
return n;
}
-static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
/**
* struct node_access_nodes - Access class device to hold user visible
@@ -578,7 +578,7 @@ static struct attribute *node_dev_attrs[] = {
NULL
};
-static struct bin_attribute *node_dev_bin_attrs[] = {
+static const struct bin_attribute *node_dev_bin_attrs[] = {
&bin_attr_cpumap,
&bin_attr_cpulist,
NULL
@@ -586,7 +586,7 @@ static struct bin_attribute *node_dev_bin_attrs[] = {
static const struct attribute_group node_dev_group = {
.attrs = node_dev_attrs,
- .bin_attrs = node_dev_bin_attrs
+ .bin_attrs_new = node_dev_bin_attrs,
};
static const struct attribute_group *node_dev_groups[] = {
diff --git a/drivers/base/physical_location.c b/drivers/base/physical_location.c
index 951819e71b4a..5db06e825c94 100644
--- a/drivers/base/physical_location.c
+++ b/drivers/base/physical_location.c
@@ -13,13 +13,11 @@
bool dev_add_physical_location(struct device *dev)
{
struct acpi_pld_info *pld;
- acpi_status status;
if (!has_acpi_companion(dev))
return false;
- status = acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld);
- if (ACPI_FAILURE(status))
+ if (!acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld))
return false;
dev->physical_location =
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 8c34ae1cd8d5..781968a128ff 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -11,6 +11,7 @@
#include <linux/pm_clock.h>
#include <linux/acpi.h>
#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
#include "power.h"
@@ -195,6 +196,7 @@ int dev_pm_domain_attach_list(struct device *dev,
struct device *pd_dev = NULL;
int ret, i, num_pds = 0;
bool by_id = true;
+ size_t size;
u32 pd_flags = data ? data->pd_flags : 0;
u32 link_flags = pd_flags & PD_FLAG_NO_DEV_LINK ? 0 :
DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
@@ -217,19 +219,19 @@ int dev_pm_domain_attach_list(struct device *dev,
if (num_pds <= 0)
return 0;
- pds = devm_kzalloc(dev, sizeof(*pds), GFP_KERNEL);
+ pds = kzalloc(sizeof(*pds), GFP_KERNEL);
if (!pds)
return -ENOMEM;
- pds->pd_devs = devm_kcalloc(dev, num_pds, sizeof(*pds->pd_devs),
- GFP_KERNEL);
- if (!pds->pd_devs)
- return -ENOMEM;
-
- pds->pd_links = devm_kcalloc(dev, num_pds, sizeof(*pds->pd_links),
- GFP_KERNEL);
- if (!pds->pd_links)
- return -ENOMEM;
+ size = sizeof(*pds->pd_devs) + sizeof(*pds->pd_links) +
+ sizeof(*pds->opp_tokens);
+ pds->pd_devs = kcalloc(num_pds, size, GFP_KERNEL);
+ if (!pds->pd_devs) {
+ ret = -ENOMEM;
+ goto free_pds;
+ }
+ pds->pd_links = (void *)(pds->pd_devs + num_pds);
+ pds->opp_tokens = (void *)(pds->pd_links + num_pds);
if (link_flags && pd_flags & PD_FLAG_DEV_LINK_ON)
link_flags |= DL_FLAG_RPM_ACTIVE;
@@ -245,6 +247,19 @@ int dev_pm_domain_attach_list(struct device *dev,
goto err_attach;
}
+ if (pd_flags & PD_FLAG_REQUIRED_OPP) {
+ struct dev_pm_opp_config config = {
+ .required_dev = pd_dev,
+ .required_dev_index = i,
+ };
+
+ ret = dev_pm_opp_set_config(dev, &config);
+ if (ret < 0)
+ goto err_link;
+
+ pds->opp_tokens[i] = ret;
+ }
+
if (link_flags) {
struct device_link *link;
@@ -265,13 +280,18 @@ int dev_pm_domain_attach_list(struct device *dev,
return num_pds;
err_link:
+ dev_pm_opp_clear_config(pds->opp_tokens[i]);
dev_pm_domain_detach(pd_dev, true);
err_attach:
while (--i >= 0) {
+ dev_pm_opp_clear_config(pds->opp_tokens[i]);
if (pds->pd_links[i])
device_link_del(pds->pd_links[i]);
dev_pm_domain_detach(pds->pd_devs[i], true);
}
+ kfree(pds->pd_devs);
+free_pds:
+ kfree(pds);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_domain_attach_list);
@@ -359,10 +379,14 @@ void dev_pm_domain_detach_list(struct dev_pm_domain_list *list)
return;
for (i = 0; i < list->num_pds; i++) {
+ dev_pm_opp_clear_config(list->opp_tokens[i]);
if (list->pd_links[i])
device_link_del(list->pd_links[i]);
dev_pm_domain_detach(list->pd_devs[i], true);
}
+
+ kfree(list->pd_devs);
+ kfree(list);
}
EXPORT_SYMBOL_GPL(dev_pm_domain_detach_list);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 4a67e83300e1..d497d448e4b2 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -496,6 +496,7 @@ struct dpm_watchdog {
struct device *dev;
struct task_struct *tsk;
struct timer_list timer;
+ bool fatal;
};
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
@@ -512,11 +513,23 @@ struct dpm_watchdog {
static void dpm_watchdog_handler(struct timer_list *t)
{
struct dpm_watchdog *wd = from_timer(wd, t, timer);
+ struct timer_list *timer = &wd->timer;
+ unsigned int time_left;
+
+ if (wd->fatal) {
+ dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+ show_stack(wd->tsk, NULL, KERN_EMERG);
+ panic("%s %s: unrecoverable failure\n",
+ dev_driver_string(wd->dev), dev_name(wd->dev));
+ }
- dev_emerg(wd->dev, "**** DPM device timeout ****\n");
- show_stack(wd->tsk, NULL, KERN_EMERG);
- panic("%s %s: unrecoverable failure\n",
- dev_driver_string(wd->dev), dev_name(wd->dev));
+ time_left = CONFIG_DPM_WATCHDOG_TIMEOUT - CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
+ dev_warn(wd->dev, "**** DPM device timeout after %u seconds; %u seconds until panic ****\n",
+ CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT, time_left);
+ show_stack(wd->tsk, NULL, KERN_WARNING);
+
+ wd->fatal = true;
+ mod_timer(timer, jiffies + HZ * time_left);
}
/**
@@ -530,10 +543,11 @@ static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
wd->dev = dev;
wd->tsk = current;
+ wd->fatal = CONFIG_DPM_WATCHDOG_TIMEOUT == CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
/* use same timeout value for both suspend and resume */
- timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+ timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_WARNING_TIMEOUT;
add_timer(timer);
}
@@ -642,13 +656,15 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
* so change its status accordingly.
*
* Otherwise, the device is going to be resumed, so set its PM-runtime
- * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
- * to avoid confusing drivers that don't use it.
+ * status to "active" unless its power.set_active flag is clear, in
+ * which case it is not necessary to update its PM-runtime status.
*/
- if (skip_resume)
+ if (skip_resume) {
pm_runtime_set_suspended(dev);
- else if (dev_pm_skip_suspend(dev))
+ } else if (dev->power.set_active) {
pm_runtime_set_active(dev);
+ dev->power.set_active = false;
+ }
if (dev->pm_domain) {
info = "noirq power domain ";
@@ -914,7 +930,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
if (dev->power.direct_complete) {
- /* Match the pm_runtime_disable() in __device_suspend(). */
+ /* Match the pm_runtime_disable() in device_suspend(). */
pm_runtime_enable(dev);
goto Complete;
}
@@ -1175,18 +1191,24 @@ static pm_message_t resume_event(pm_message_t sleep_state)
return PMSG_ON;
}
-static void dpm_superior_set_must_resume(struct device *dev)
+static void dpm_superior_set_must_resume(struct device *dev, bool set_active)
{
struct device_link *link;
int idx;
- if (dev->parent)
+ if (dev->parent) {
dev->parent->power.must_resume = true;
+ if (set_active)
+ dev->parent->power.set_active = true;
+ }
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
link->supplier->power.must_resume = true;
+ if (set_active)
+ link->supplier->power.set_active = true;
+ }
device_links_read_unlock(idx);
}
@@ -1264,8 +1286,11 @@ Skip:
dev->power.may_skip_resume))
dev->power.must_resume = true;
- if (dev->power.must_resume)
- dpm_superior_set_must_resume(dev);
+ if (dev->power.must_resume) {
+ dev->power.set_active = dev->power.set_active ||
+ dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
+ dpm_superior_set_must_resume(dev, dev->power.set_active);
+ }
Complete:
complete_all(&dev->power.completion);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index bd77f6734f14..ff393cba7649 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -137,6 +137,7 @@ s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
return ret;
}
+EXPORT_SYMBOL_GPL(dev_pm_qos_read_value);
/**
* apply_constraint - Add/modify/remove device PM QoS request.
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a1474fb67db9..f84018125b46 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -6,7 +6,6 @@
#include <linux/export.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
-#include <linux/pm_wakeup.h>
#include <linux/atomic.h>
#include <linux/jiffies.h>
#include "power.h"
@@ -509,14 +508,6 @@ static ssize_t wakeup_last_time_ms_show(struct device *dev,
return sysfs_emit(buf, "%lld\n", msec);
}
-static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
- kgid_t kgid)
-{
- if (dev->power.wakeup && dev->power.wakeup->dev)
- return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
- return 0;
-}
-
static DEVICE_ATTR_RO(wakeup_last_time_ms);
#ifdef CONFIG_PM_AUTOSLEEP
@@ -541,6 +532,15 @@ static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
#endif /* CONFIG_PM_AUTOSLEEP */
+
+static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid)
+{
+ if (dev->power.wakeup && dev->power.wakeup->dev)
+ return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
+ return 0;
+}
+
#else /* CONFIG_PM_SLEEP */
static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
kgid_t kgid)
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 5a5a9e978e85..8aa28c08b289 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -103,6 +103,32 @@ void dev_pm_clear_wake_irq(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
+static void devm_pm_clear_wake_irq(void *dev)
+{
+ dev_pm_clear_wake_irq(dev);
+}
+
+/**
+ * devm_pm_set_wake_irq - device-managed variant of dev_pm_set_wake_irq
+ * @dev: Device entry
+ * @irq: Device IO interrupt
+ *
+ *
+ * Attach a device IO interrupt as a wake IRQ, same with dev_pm_set_wake_irq,
+ * but the device will be auto clear wake capability on driver detach.
+ */
+int devm_pm_set_wake_irq(struct device *dev, int irq)
+{
+ int ret;
+
+ ret = dev_pm_set_wake_irq(dev, irq);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_pm_clear_wake_irq, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_set_wake_irq);
+
/**
* handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
* @irq: Device specific dedicated wake-up interrupt
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 837d77e3af2b..c1392743df9c 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -71,6 +71,44 @@ bool fwnode_property_present(const struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_property_present);
/**
+ * device_property_read_bool - Return the value for a boolean property of a device
+ * @dev: Device whose property is being checked
+ * @propname: Name of the property
+ *
+ * Return if property @propname is true or false in the device firmware description.
+ *
+ * Return: true if property @propname is present. Otherwise, returns false.
+ */
+bool device_property_read_bool(const struct device *dev, const char *propname)
+{
+ return fwnode_property_read_bool(dev_fwnode(dev), propname);
+}
+EXPORT_SYMBOL_GPL(device_property_read_bool);
+
+/**
+ * fwnode_property_read_bool - Return the value for a boolean property of a firmware node
+ * @fwnode: Firmware node whose property to check
+ * @propname: Name of the property
+ *
+ * Return if property @propname is true or false in the firmware description.
+ */
+bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ bool ret;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return false;
+
+ ret = fwnode_call_bool_op(fwnode, property_read_bool, propname);
+ if (ret)
+ return ret;
+
+ return fwnode_call_bool_op(fwnode->secondary, property_read_bool, propname);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_bool);
+
+/**
* device_property_read_u8_array - return a u8 array property of a device
* @dev: Device to get the property of
* @propname: Name of the property
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 83acccdc1008..bdb450436cbc 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -59,6 +59,7 @@ struct regmap {
unsigned long raw_spinlock_flags;
};
};
+ struct lock_class_key *lock_key;
regmap_lock lock;
regmap_unlock unlock;
void *lock_arg; /* This is passed to lock/unlock functions */
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index 8d27d3653ea3..2319c30283a6 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -73,8 +73,7 @@ static int regcache_maple_write(struct regmap *map, unsigned int reg,
rcu_read_unlock();
- entry = kmalloc((last - index + 1) * sizeof(unsigned long),
- map->alloc_flags);
+ entry = kmalloc_array(last - index + 1, sizeof(*entry), map->alloc_flags);
if (!entry)
return -ENOMEM;
@@ -204,7 +203,7 @@ static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry,
* overheads.
*/
if (max - min > 1 && regmap_can_raw_write(map)) {
- buf = kmalloc(val_bytes * (max - min), map->alloc_flags);
+ buf = kmalloc_array(max - min, val_bytes, map->alloc_flags);
if (!buf) {
ret = -ENOMEM;
goto out;
@@ -320,7 +319,7 @@ static int regcache_maple_insert_block(struct regmap *map, int first,
unsigned long *entry;
int i, ret;
- entry = kcalloc(last - first + 1, sizeof(unsigned long), map->alloc_flags);
+ entry = kmalloc_array(last - first + 1, sizeof(*entry), map->alloc_flags);
if (!entry)
return -ENOMEM;
@@ -355,6 +354,9 @@ static int regcache_maple_init(struct regmap *map)
mt_init(mt);
+ if (!mt_external_lock(mt) && map->lock_key)
+ lockdep_set_class_and_subclass(&mt->ma_lock, map->lock_key, 1);
+
if (!map->num_reg_defaults)
return 0;
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 188438186589..a9d17f316e55 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -275,18 +275,16 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
pos = (reg - base_reg) / map->reg_stride;
offset = (rbnode->base_reg - base_reg) / map->reg_stride;
- blk = krealloc(rbnode->block,
- blklen * map->cache_word_size,
- map->alloc_flags);
+ blk = krealloc_array(rbnode->block, blklen, map->cache_word_size, map->alloc_flags);
if (!blk)
return -ENOMEM;
rbnode->block = blk;
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
- present = krealloc(rbnode->cache_present,
- BITS_TO_LONGS(blklen) * sizeof(*present),
- map->alloc_flags);
+ present = krealloc_array(rbnode->cache_present,
+ BITS_TO_LONGS(blklen), sizeof(*present),
+ map->alloc_flags);
if (!present)
return -ENOMEM;
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index d3659ba3cc11..b1f8508c3966 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -154,7 +154,7 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
map->num_reg_defaults = config->num_reg_defaults;
map->num_reg_defaults_raw = config->num_reg_defaults_raw;
map->reg_defaults_raw = config->reg_defaults_raw;
- map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
+ map->cache_word_size = BITS_TO_BYTES(config->val_bits);
map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
map->cache = NULL;
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index a750e48a26b8..0bcd81389a29 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -364,14 +364,11 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
memset32(data->status_buf, GENMASK(31, 0), chip->num_regs);
} else if (chip->num_main_regs) {
unsigned int max_main_bits;
- unsigned long size;
-
- size = chip->num_regs * sizeof(unsigned int);
max_main_bits = (chip->num_main_status_bits) ?
chip->num_main_status_bits : chip->num_regs;
/* Clear the status buf as we don't read all status regs */
- memset(data->status_buf, 0, size);
+ memset32(data->status_buf, 0, chip->num_regs);
/* We could support bulk read for main status registers
* but I don't expect to see devices with really many main
@@ -514,12 +511,16 @@ exit:
return IRQ_NONE;
}
+static struct lock_class_key regmap_irq_lock_class;
+static struct lock_class_key regmap_irq_request_class;
+
static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct regmap_irq_chip_data *data = h->host_data;
irq_set_chip_data(virq, data);
+ irq_set_lockdep_class(virq, &regmap_irq_lock_class, &regmap_irq_request_class);
irq_set_chip(virq, &data->irq_chip);
irq_set_nested_thread(virq, 1);
irq_set_parent(virq, data->irq);
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index 4bf3f1e59ed7..64ea340950b6 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -126,7 +126,7 @@ static const struct regmap_test_param real_cache_types_list[] = {
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0 },
- { .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0, .fast_io = true },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
{ .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
@@ -1499,6 +1499,48 @@ static void cache_present(struct kunit *test)
KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i));
}
+static void cache_write_zero(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val;
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->read[param->from_reg + i] = false;
+
+ /* No defaults so no registers cached. */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
+
+ /* We didn't trigger any reads */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
+
+ /* Write a zero value */
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, 0));
+
+ /* Read that zero value back */
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
+ KUNIT_EXPECT_EQ(test, 0, val);
+
+ /* From the cache? */
+ KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, 1));
+
+ /* Try to throw it away */
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 1, 1));
+ KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, 1));
+}
+
/* Check that caching the window register works with sync */
static void cache_range_window_reg(struct kunit *test)
{
@@ -2012,6 +2054,7 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_write_zero, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c
index c99eada83780..86644bbd0710 100644
--- a/drivers/base/regmap/regmap-sdw-mbq.c
+++ b/drivers/base/regmap/regmap-sdw-mbq.c
@@ -1,45 +1,187 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright(c) 2020 Intel Corporation.
+#include <linux/bits.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_registers.h>
+#include <sound/sdca_function.h>
#include "internal.h"
+struct regmap_mbq_context {
+ struct device *dev;
+
+ struct regmap_sdw_mbq_cfg cfg;
+
+ int val_size;
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
+};
+
+static int regmap_sdw_mbq_size(struct regmap_mbq_context *ctx, unsigned int reg)
+{
+ int size = ctx->val_size;
+
+ if (ctx->cfg.mbq_size) {
+ size = ctx->cfg.mbq_size(ctx->dev, reg);
+ if (!size || size > ctx->val_size)
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+static bool regmap_sdw_mbq_deferrable(struct regmap_mbq_context *ctx, unsigned int reg)
+{
+ if (ctx->cfg.deferrable)
+ return ctx->cfg.deferrable(ctx->dev, reg);
+
+ return false;
+}
+
+static int regmap_sdw_mbq_poll_busy(struct sdw_slave *slave, unsigned int reg,
+ struct regmap_mbq_context *ctx)
+{
+ struct device *dev = &slave->dev;
+ int val, ret = 0;
+
+ dev_dbg(dev, "Deferring transaction for 0x%x\n", reg);
+
+ reg = SDW_SDCA_CTL(SDW_SDCA_CTL_FUNC(reg), 0,
+ SDCA_CTL_ENTITY_0_FUNCTION_STATUS, 0);
+
+ if (ctx->readable_reg(dev, reg)) {
+ ret = read_poll_timeout(sdw_read_no_pm, val,
+ val < 0 || !(val & SDCA_CTL_ENTITY_0_FUNCTION_BUSY),
+ ctx->cfg.timeout_us, ctx->cfg.retry_us,
+ false, slave, reg);
+ if (val < 0)
+ return val;
+ if (ret)
+ dev_err(dev, "Function busy timed out 0x%x: %d\n", reg, val);
+ } else {
+ fsleep(ctx->cfg.timeout_us);
+ }
+
+ return ret;
+}
+
+static int regmap_sdw_mbq_write_impl(struct sdw_slave *slave,
+ unsigned int reg, unsigned int val,
+ int mbq_size, bool deferrable)
+{
+ int shift = mbq_size * BITS_PER_BYTE;
+ int ret;
+
+ while (--mbq_size > 0) {
+ shift -= BITS_PER_BYTE;
+
+ ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg),
+ (val >> shift) & 0xff);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = sdw_write_no_pm(slave, reg, val & 0xff);
+ if (deferrable && ret == -ENODATA)
+ return -EAGAIN;
+
+ return ret;
+}
+
static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int val)
{
- struct device *dev = context;
+ struct regmap_mbq_context *ctx = context;
+ struct device *dev = ctx->dev;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
+ int mbq_size = regmap_sdw_mbq_size(ctx, reg);
int ret;
- ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg), (val >> 8) & 0xff);
- if (ret < 0)
- return ret;
+ if (mbq_size < 0)
+ return mbq_size;
+
+ /*
+ * Technically the spec does allow a device to set itself to busy for
+ * internal reasons, but since it doesn't provide any information on
+ * how to handle timeouts in that case, for now the code will only
+ * process a single wait/timeout on function busy and a single retry
+ * of the transaction.
+ */
+ ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, deferrable);
+ if (ret == -EAGAIN) {
+ ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
+ if (ret)
+ return ret;
+
+ ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, false);
+ }
+
+ return ret;
+}
+
+static int regmap_sdw_mbq_read_impl(struct sdw_slave *slave,
+ unsigned int reg, unsigned int *val,
+ int mbq_size, bool deferrable)
+{
+ int shift = BITS_PER_BYTE;
+ int read;
+
+ read = sdw_read_no_pm(slave, reg);
+ if (read < 0) {
+ if (deferrable && read == -ENODATA)
+ return -EAGAIN;
+
+ return read;
+ }
+
+ *val = read;
+
+ while (--mbq_size > 0) {
+ read = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
+ if (read < 0)
+ return read;
+
+ *val |= read << shift;
+ shift += BITS_PER_BYTE;
+ }
- return sdw_write_no_pm(slave, reg, val & 0xff);
+ return 0;
}
static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val)
{
- struct device *dev = context;
+ struct regmap_mbq_context *ctx = context;
+ struct device *dev = ctx->dev;
struct sdw_slave *slave = dev_to_sdw_dev(dev);
- int read0;
- int read1;
+ bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
+ int mbq_size = regmap_sdw_mbq_size(ctx, reg);
+ int ret;
- read0 = sdw_read_no_pm(slave, reg);
- if (read0 < 0)
- return read0;
+ if (mbq_size < 0)
+ return mbq_size;
- read1 = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
- if (read1 < 0)
- return read1;
+ /*
+ * Technically the spec does allow a device to set itself to busy for
+ * internal reasons, but since it doesn't provide any information on
+ * how to handle timeouts in that case, for now the code will only
+ * process a single wait/timeout on function busy and a single retry
+ * of the transaction.
+ */
+ ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, deferrable);
+ if (ret == -EAGAIN) {
+ ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
+ if (ret)
+ return ret;
- *val = (read1 << 8) | read0;
+ ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, false);
+ }
- return 0;
+ return ret;
}
static const struct regmap_bus regmap_sdw_mbq = {
@@ -51,8 +193,7 @@ static const struct regmap_bus regmap_sdw_mbq = {
static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
{
- /* MBQ-based controls are only 16-bits for now */
- if (config->val_bits != 16)
+ if (config->val_bits > (sizeof(unsigned int) * BITS_PER_BYTE))
return -ENOTSUPP;
/* Registers are 32 bits wide */
@@ -65,35 +206,69 @@ static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
return 0;
}
+static struct regmap_mbq_context *
+regmap_sdw_mbq_gen_context(struct device *dev,
+ const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config)
+{
+ struct regmap_mbq_context *ctx;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->dev = dev;
+
+ if (mbq_config)
+ ctx->cfg = *mbq_config;
+
+ ctx->val_size = config->val_bits / BITS_PER_BYTE;
+ ctx->readable_reg = config->readable_reg;
+
+ return ctx;
+}
+
struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name)
{
+ struct regmap_mbq_context *ctx;
int ret;
ret = regmap_sdw_mbq_config_check(config);
if (ret)
return ERR_PTR(ret);
- return __regmap_init(&sdw->dev, &regmap_sdw_mbq,
- &sdw->dev, config, lock_key, lock_name);
+ ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __regmap_init(&sdw->dev, &regmap_sdw_mbq, ctx,
+ config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_sdw_mbq);
struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name)
{
+ struct regmap_mbq_context *ctx;
int ret;
ret = regmap_sdw_mbq_config_check(config);
if (ret)
return ERR_PTR(ret);
- return __devm_regmap_init(&sdw->dev, &regmap_sdw_mbq,
- &sdw->dev, config, lock_key, lock_name);
+ ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __devm_regmap_init(&sdw->dev, &regmap_sdw_mbq, ctx,
+ config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 4ded93687c1f..f2843f814675 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -598,6 +598,17 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
}
EXPORT_SYMBOL_GPL(regmap_attach_dev);
+static int dev_get_regmap_match(struct device *dev, void *res, void *data);
+
+static int regmap_detach_dev(struct device *dev, struct regmap *map)
+{
+ if (!dev)
+ return 0;
+
+ return devres_release(dev, dev_get_regmap_release,
+ dev_get_regmap_match, (void *)map->name);
+}
+
static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
const struct regmap_config *config)
{
@@ -745,6 +756,7 @@ struct regmap *__regmap_init(struct device *dev,
lock_key, lock_name);
}
map->lock_arg = map;
+ map->lock_key = lock_key;
}
/*
@@ -757,14 +769,13 @@ struct regmap *__regmap_init(struct device *dev,
map->alloc_flags = GFP_KERNEL;
map->reg_base = config->reg_base;
+ map->reg_shift = config->pad_bits % 8;
- map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
map->format.reg_shift = config->reg_shift;
- map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
- map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
- config->val_bits + config->pad_bits, 8);
- map->reg_shift = config->pad_bits % 8;
+ map->format.reg_bytes = BITS_TO_BYTES(config->reg_bits);
+ map->format.val_bytes = BITS_TO_BYTES(config->val_bits);
+ map->format.buf_size = BITS_TO_BYTES(config->reg_bits + config->val_bits + config->pad_bits);
if (config->reg_stride)
map->reg_stride = config->reg_stride;
else
@@ -1051,13 +1062,13 @@ skip_format_initialization:
/* Sanity check */
if (range_cfg->range_max < range_cfg->range_min) {
- dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
+ dev_err(map->dev, "Invalid range %d: %u < %u\n", i,
range_cfg->range_max, range_cfg->range_min);
goto err_range;
}
if (range_cfg->range_max > map->max_register) {
- dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
+ dev_err(map->dev, "Invalid range %d: %u > %u\n", i,
range_cfg->range_max, map->max_register);
goto err_range;
}
@@ -1444,6 +1455,7 @@ void regmap_exit(struct regmap *map)
{
struct regmap_async *async;
+ regmap_detach_dev(map->dev, map);
regcache_exit(map);
regmap_debugfs_exit(map);
@@ -3103,7 +3115,7 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id,
EXPORT_SYMBOL_GPL(regmap_fields_read);
static int _regmap_bulk_read(struct regmap *map, unsigned int reg,
- unsigned int *regs, void *val, size_t val_count)
+ const unsigned int *regs, void *val, size_t val_count)
{
u32 *u32 = val;
u16 *u16 = val;
@@ -3197,7 +3209,7 @@ EXPORT_SYMBOL_GPL(regmap_bulk_read);
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
-int regmap_multi_reg_read(struct regmap *map, unsigned int *regs, void *val,
+int regmap_multi_reg_read(struct regmap *map, const unsigned int *regs, void *val,
size_t val_count)
{
if (val_count == 0)
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index eb6eb25b343b..b1726a3515f6 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -677,6 +677,7 @@ static const struct fwnode_operations software_node_ops = {
.get = software_node_get,
.put = software_node_put,
.property_present = software_node_property_present,
+ .property_read_bool = software_node_property_present,
.property_read_int_array = software_node_read_int_array,
.property_read_string_array = software_node_read_string_array,
.get_name = software_node_get_name,
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
index 5c7fac80611c..2756870615cc 100644
--- a/drivers/base/test/Kconfig
+++ b/drivers/base/test/Kconfig
@@ -12,6 +12,7 @@ config TEST_ASYNC_DRIVER_PROBE
config DM_KUNIT_TEST
tristate "KUnit Tests for the device model" if !KUNIT_ALL_TESTS
depends on KUNIT
+ default KUNIT_ALL_TESTS
config DRIVER_PE_KUNIT_TEST
tristate "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS
diff --git a/drivers/base/test/platform-device-test.c b/drivers/base/test/platform-device-test.c
index ea05b8785743..6355a2231b74 100644
--- a/drivers/base/test/platform-device-test.c
+++ b/drivers/base/test/platform-device-test.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
+#include <kunit/platform_device.h>
#include <kunit/resource.h>
#include <linux/device.h>
+#include <linux/device/bus.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#define DEVICE_NAME "test"
@@ -217,7 +220,43 @@ static struct kunit_suite platform_device_devm_test_suite = {
.test_cases = platform_device_devm_tests,
};
-kunit_test_suite(platform_device_devm_test_suite);
+static void platform_device_find_by_null_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = kunit_platform_device_alloc(test, DEVICE_NAME, PLATFORM_DEVID_NONE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+ ret = kunit_platform_device_add(test, pdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, of_find_device_by_node(NULL), NULL);
+
+ KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_of_node(&platform_bus_type, NULL), NULL);
+ KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_fwnode(&platform_bus_type, NULL), NULL);
+ KUNIT_EXPECT_PTR_EQ(test, bus_find_device_by_acpi_dev(&platform_bus_type, NULL), NULL);
+
+ KUNIT_EXPECT_FALSE(test, device_match_of_node(&pdev->dev, NULL));
+ KUNIT_EXPECT_FALSE(test, device_match_fwnode(&pdev->dev, NULL));
+ KUNIT_EXPECT_FALSE(test, device_match_acpi_dev(&pdev->dev, NULL));
+ KUNIT_EXPECT_FALSE(test, device_match_acpi_handle(&pdev->dev, NULL));
+}
+
+static struct kunit_case platform_device_match_tests[] = {
+ KUNIT_CASE(platform_device_find_by_null_test),
+ {}
+};
+
+static struct kunit_suite platform_device_match_test_suite = {
+ .name = "platform-device-match",
+ .test_cases = platform_device_match_tests,
+};
+
+kunit_test_suites(
+ &platform_device_devm_test_suite,
+ &platform_device_match_test_suite,
+);
MODULE_DESCRIPTION("Test module for platform devices");
MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 89f98be5c5b9..b962da263eee 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -23,23 +23,39 @@ static ssize_t name##_show(struct device *dev, \
#define define_siblings_read_func(name, mask) \
static ssize_t name##_read(struct file *file, struct kobject *kobj, \
- struct bin_attribute *attr, char *buf, \
+ const struct bin_attribute *attr, char *buf, \
loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
+ cpumask_var_t mask; \
+ ssize_t n; \
\
- return cpumap_print_bitmask_to_buf(buf, topology_##mask(dev->id), \
- off, count); \
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) \
+ return -ENOMEM; \
+ \
+ cpumask_copy(mask, topology_##mask(dev->id)); \
+ n = cpumap_print_bitmask_to_buf(buf, mask, off, count); \
+ free_cpumask_var(mask); \
+ \
+ return n; \
} \
\
static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
- struct bin_attribute *attr, char *buf, \
+ const struct bin_attribute *attr, char *buf, \
loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
+ cpumask_var_t mask; \
+ ssize_t n; \
+ \
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) \
+ return -ENOMEM; \
+ \
+ cpumask_copy(mask, topology_##mask(dev->id)); \
+ n = cpumap_print_list_to_buf(buf, mask, off, count); \
+ free_cpumask_var(mask); \
\
- return cpumap_print_list_to_buf(buf, topology_##mask(dev->id), \
- off, count); \
+ return n; \
}
define_id_show_func(physical_package_id, "%d");
@@ -62,50 +78,50 @@ define_id_show_func(ppin, "0x%llx");
static DEVICE_ATTR_ADMIN_RO(ppin);
define_siblings_read_func(thread_siblings, sibling_cpumask);
-static BIN_ATTR_RO(thread_siblings, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(thread_siblings_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(thread_siblings, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(thread_siblings_list, CPULIST_FILE_MAX_BYTES);
define_siblings_read_func(core_cpus, sibling_cpumask);
-static BIN_ATTR_RO(core_cpus, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(core_cpus_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(core_cpus, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(core_cpus_list, CPULIST_FILE_MAX_BYTES);
define_siblings_read_func(core_siblings, core_cpumask);
-static BIN_ATTR_RO(core_siblings, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(core_siblings_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(core_siblings, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(core_siblings_list, CPULIST_FILE_MAX_BYTES);
#ifdef TOPOLOGY_CLUSTER_SYSFS
define_siblings_read_func(cluster_cpus, cluster_cpumask);
-static BIN_ATTR_RO(cluster_cpus, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(cluster_cpus_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(cluster_cpus, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(cluster_cpus_list, CPULIST_FILE_MAX_BYTES);
#endif
#ifdef TOPOLOGY_DIE_SYSFS
define_siblings_read_func(die_cpus, die_cpumask);
-static BIN_ATTR_RO(die_cpus, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(die_cpus_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(die_cpus, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(die_cpus_list, CPULIST_FILE_MAX_BYTES);
#endif
define_siblings_read_func(package_cpus, core_cpumask);
-static BIN_ATTR_RO(package_cpus, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(package_cpus_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(package_cpus, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(package_cpus_list, CPULIST_FILE_MAX_BYTES);
#ifdef TOPOLOGY_BOOK_SYSFS
define_id_show_func(book_id, "%d");
static DEVICE_ATTR_RO(book_id);
define_siblings_read_func(book_siblings, book_cpumask);
-static BIN_ATTR_RO(book_siblings, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(book_siblings_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(book_siblings, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(book_siblings_list, CPULIST_FILE_MAX_BYTES);
#endif
#ifdef TOPOLOGY_DRAWER_SYSFS
define_id_show_func(drawer_id, "%d");
static DEVICE_ATTR_RO(drawer_id);
define_siblings_read_func(drawer_siblings, drawer_cpumask);
-static BIN_ATTR_RO(drawer_siblings, CPUMAP_FILE_MAX_BYTES);
-static BIN_ATTR_RO(drawer_siblings_list, CPULIST_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(drawer_siblings, CPUMAP_FILE_MAX_BYTES);
+static const BIN_ATTR_RO(drawer_siblings_list, CPULIST_FILE_MAX_BYTES);
#endif
-static struct bin_attribute *bin_attrs[] = {
+static const struct bin_attribute *const bin_attrs[] = {
&bin_attr_core_cpus,
&bin_attr_core_cpus_list,
&bin_attr_thread_siblings,
@@ -163,7 +179,7 @@ static umode_t topology_is_visible(struct kobject *kobj,
static const struct attribute_group topology_attr_group = {
.attrs = default_attrs,
- .bin_attrs = bin_attrs,
+ .bin_attrs_new = bin_attrs,
.is_visible = topology_is_visible,
.name = "topology"
};
diff --git a/drivers/base/trace.h b/drivers/base/trace.h
index e52b6eae060d..3b83b13a57ff 100644
--- a/drivers/base/trace.h
+++ b/drivers/base/trace.h
@@ -24,18 +24,18 @@ DECLARE_EVENT_CLASS(devres,
__field(struct device *, dev)
__field(const char *, op)
__field(void *, node)
- __field(const char *, name)
+ __string(name, name)
__field(size_t, size)
),
TP_fast_assign(
__assign_str(devname);
__entry->op = op;
__entry->node = node;
- __entry->name = name;
+ __assign_str(name);
__entry->size = size;
),
TP_printk("%s %3s %p %s (%zu bytes)", __get_str(devname),
- __entry->op, __entry->node, __entry->name, __entry->size)
+ __entry->op, __entry->node, __get_str(name), __entry->size)
);
DEFINE_EVENT(devres, devres_log,
diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c
index 8ae0b918e740..20b1816c570b 100644
--- a/drivers/bcma/host_soc.c
+++ b/drivers/bcma/host_soc.c
@@ -261,7 +261,7 @@ static struct platform_driver bcma_host_soc_driver = {
.of_match_table = bcma_host_soc_of_match,
},
.probe = bcma_host_soc_probe,
- .remove_new = bcma_host_soc_remove,
+ .remove = bcma_host_soc_remove,
};
int __init bcma_host_soc_register_driver(void)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ed209f4f2798..a97f2c40c640 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -130,7 +130,7 @@ config BLK_DEV_UBD_SYNC
kernel command line option. Alternatively, you can say Y here to
turn on synchronous operation by default for all block devices.
- If you're running a journalling file system (like reiserfs, for
+ If you're running a journalling file system (like xfs, for
example) in your virtual machine, you will want to say Y here. If
you care for the safety of the data in your virtual machine, Y is a
wise choice too. In all other cases (for example, if you're just
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 49ced65bef4c..9edd4468f755 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1819,7 +1819,6 @@ static int fd_alloc_drive(int drive)
unit[drive].tag_set.nr_maps = 1;
unit[drive].tag_set.queue_depth = 2;
unit[drive].tag_set.numa_node = NUMA_NO_NODE;
- unit[drive].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
if (blk_mq_alloc_tag_set(&unit[drive].tag_set))
goto out_cleanup_trackbuf;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 2028795ec61c..00b74a845328 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -368,7 +368,6 @@ aoeblk_gdalloc(void *vp)
set->nr_hw_queues = 1;
set->queue_depth = 128;
set->numa_node = NUMA_NO_NODE;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
err = blk_mq_alloc_tag_set(set);
if (err) {
pr_err("aoe: cannot allocate tag set for %ld.%d\n",
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 3523dd82d7a0..4db7f6ce8ade 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -226,10 +226,11 @@ aoedev_downdev(struct aoedev *d)
/* fast fail all pending I/O */
if (d->blkq) {
/* UP is cleared, freeze+quiesce to insure all are errored */
- blk_mq_freeze_queue(d->blkq);
+ unsigned int memflags = blk_mq_freeze_queue(d->blkq);
+
blk_mq_quiesce_queue(d->blkq);
blk_mq_unquiesce_queue(d->blkq);
- blk_mq_unfreeze_queue(d->blkq);
+ blk_mq_unfreeze_queue(d->blkq, memflags);
}
if (d->gd)
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 4ba98c6654be..a81ade622a01 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -746,6 +746,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
unsigned char *p;
int sect, nsect;
unsigned long flags;
+ unsigned int memflags;
int ret;
if (type) {
@@ -758,7 +759,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
}
q = unit[drive].disk[type]->queue;
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
local_irq_save(flags);
@@ -817,7 +818,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
ret = FormatError ? -EIO : 0;
out:
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
return ret;
}
@@ -2088,7 +2089,6 @@ static int __init atari_floppy_init (void)
unit[i].tag_set.nr_maps = 1;
unit[i].tag_set.queue_depth = 2;
unit[i].tag_set.numa_node = NUMA_NO_NODE;
- unit[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ret = blk_mq_alloc_tag_set(&unit[i].tag_set);
if (ret)
goto err;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 2fd1ed101748..292f127cae0a 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -231,8 +231,10 @@ static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
xa_lock(&brd->brd_pages);
while (size >= PAGE_SIZE && aligned_sector < rd_size * 2) {
page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT);
- if (page)
+ if (page) {
__free_page(page);
+ brd->brd_nr_pages--;
+ }
aligned_sector += PAGE_SECTORS;
size -= PAGE_SIZE;
}
@@ -316,8 +318,40 @@ __setup("ramdisk_size=", ramdisk_size);
* (should share code eventually).
*/
static LIST_HEAD(brd_devices);
+static DEFINE_MUTEX(brd_devices_mutex);
static struct dentry *brd_debugfs_dir;
+static struct brd_device *brd_find_or_alloc_device(int i)
+{
+ struct brd_device *brd;
+
+ mutex_lock(&brd_devices_mutex);
+ list_for_each_entry(brd, &brd_devices, brd_list) {
+ if (brd->brd_number == i) {
+ mutex_unlock(&brd_devices_mutex);
+ return ERR_PTR(-EEXIST);
+ }
+ }
+
+ brd = kzalloc(sizeof(*brd), GFP_KERNEL);
+ if (!brd) {
+ mutex_unlock(&brd_devices_mutex);
+ return ERR_PTR(-ENOMEM);
+ }
+ brd->brd_number = i;
+ list_add_tail(&brd->brd_list, &brd_devices);
+ mutex_unlock(&brd_devices_mutex);
+ return brd;
+}
+
+static void brd_free_device(struct brd_device *brd)
+{
+ mutex_lock(&brd_devices_mutex);
+ list_del(&brd->brd_list);
+ mutex_unlock(&brd_devices_mutex);
+ kfree(brd);
+}
+
static int brd_alloc(int i)
{
struct brd_device *brd;
@@ -340,14 +374,9 @@ static int brd_alloc(int i)
BLK_FEAT_NOWAIT,
};
- list_for_each_entry(brd, &brd_devices, brd_list)
- if (brd->brd_number == i)
- return -EEXIST;
- brd = kzalloc(sizeof(*brd), GFP_KERNEL);
- if (!brd)
- return -ENOMEM;
- brd->brd_number = i;
- list_add_tail(&brd->brd_list, &brd_devices);
+ brd = brd_find_or_alloc_device(i);
+ if (IS_ERR(brd))
+ return PTR_ERR(brd);
xa_init(&brd->brd_pages);
@@ -378,8 +407,7 @@ static int brd_alloc(int i)
out_cleanup_disk:
put_disk(disk);
out_free_dev:
- list_del(&brd->brd_list);
- kfree(brd);
+ brd_free_device(brd);
return err;
}
@@ -398,8 +426,7 @@ static void brd_cleanup(void)
del_gendisk(brd->brd_disk);
put_disk(brd->brd_disk);
brd_free_pages(brd);
- list_del(&brd->brd_list);
- kfree(brd);
+ brd_free_device(brd);
}
}
@@ -426,16 +453,6 @@ static int __init brd_init(void)
{
int err, i;
- brd_check_and_reset_par();
-
- brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
-
- for (i = 0; i < rd_nr; i++) {
- err = brd_alloc(i);
- if (err)
- goto out_free;
- }
-
/*
* brd module now has a feature to instantiate underlying device
* structure on-demand, provided that there is an access dev node.
@@ -451,11 +468,18 @@ static int __init brd_init(void)
* dynamically.
*/
+ brd_check_and_reset_par();
+
+ brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
+
if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe)) {
err = -EIO;
goto out_free;
}
+ for (i = 0; i < rd_nr; i++)
+ brd_alloc(i);
+
pr_info("brd: module loaded\n");
return 0;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 2a05d955e30b..e21492981f7d 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1364,7 +1364,6 @@ extern struct bio_set drbd_io_bio_set;
extern struct mutex resources_mutex;
-extern int conn_lowest_minor(struct drbd_connection *connection);
extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
extern void drbd_destroy_device(struct kref *kref);
extern void drbd_delete_device(struct drbd_device *device);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 0d74d75260ef..5bbd312c3e14 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -471,20 +471,6 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
wait_for_completion(&thi->stop);
}
-int conn_lowest_minor(struct drbd_connection *connection)
-{
- struct drbd_peer_device *peer_device;
- int vnr = 0, minor = -1;
-
- rcu_read_lock();
- peer_device = idr_get_next(&connection->peer_devices, &vnr);
- if (peer_device)
- minor = device_to_minor(peer_device->device);
- rcu_read_unlock();
-
- return minor;
-}
-
#ifdef CONFIG_SMP
/*
* drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 3affb538b989..abf0486f0d4f 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4596,7 +4596,6 @@ static int __init do_floppy_init(void)
tag_sets[drive].nr_maps = 1;
tag_sets[drive].queue_depth = 2;
tag_sets[drive].numa_node = NUMA_NO_NODE;
- tag_sets[drive].flags = BLK_MQ_F_SHOULD_MERGE;
err = blk_mq_alloc_tag_set(&tag_sets[drive]);
if (err)
goto out_put_disk;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 78a7bb28defe..c05fe27a96b6 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -68,7 +68,6 @@ struct loop_device {
struct list_head idle_worker_list;
struct rb_root worker_tree;
struct timer_list timer;
- bool use_dio;
bool sysfs_inited;
struct request_queue *lo_queue;
@@ -173,7 +172,7 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file)
static bool lo_bdev_can_use_dio(struct loop_device *lo,
struct block_device *backing_bdev)
{
- unsigned short sb_bsize = bdev_logical_block_size(backing_bdev);
+ unsigned int sb_bsize = bdev_logical_block_size(backing_bdev);
if (queue_logical_block_size(lo->lo_queue) < sb_bsize)
return false;
@@ -182,41 +181,44 @@ static bool lo_bdev_can_use_dio(struct loop_device *lo,
return true;
}
-static void __loop_update_dio(struct loop_device *lo, bool dio)
+static bool lo_can_use_dio(struct loop_device *lo)
{
- struct file *file = lo->lo_backing_file;
- struct inode *inode = file->f_mapping->host;
- struct block_device *backing_bdev = NULL;
- bool use_dio;
+ struct inode *inode = lo->lo_backing_file->f_mapping->host;
- if (S_ISBLK(inode->i_mode))
- backing_bdev = I_BDEV(inode);
- else if (inode->i_sb->s_bdev)
- backing_bdev = inode->i_sb->s_bdev;
+ if (!(lo->lo_backing_file->f_mode & FMODE_CAN_ODIRECT))
+ return false;
- use_dio = dio && (file->f_mode & FMODE_CAN_ODIRECT) &&
- (!backing_bdev || lo_bdev_can_use_dio(lo, backing_bdev));
+ if (S_ISBLK(inode->i_mode))
+ return lo_bdev_can_use_dio(lo, I_BDEV(inode));
+ if (inode->i_sb->s_bdev)
+ return lo_bdev_can_use_dio(lo, inode->i_sb->s_bdev);
+ return true;
+}
- if (lo->use_dio == use_dio)
- return;
+/*
+ * Direct I/O can be enabled either by using an O_DIRECT file descriptor, or by
+ * passing in the LO_FLAGS_DIRECT_IO flag from userspace. It will be silently
+ * disabled when the device block size is too small or the offset is unaligned.
+ *
+ * loop_get_status will always report the effective LO_FLAGS_DIRECT_IO flag and
+ * not the originally passed in one.
+ */
+static inline void loop_update_dio(struct loop_device *lo)
+{
+ bool dio_in_use = lo->lo_flags & LO_FLAGS_DIRECT_IO;
- /* flush dirty pages before changing direct IO */
- vfs_fsync(file, 0);
+ lockdep_assert_held(&lo->lo_mutex);
+ WARN_ON_ONCE(lo->lo_state == Lo_bound &&
+ lo->lo_queue->mq_freeze_depth == 0);
- /*
- * The flag of LO_FLAGS_DIRECT_IO is handled similarly with
- * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
- * will get updated by ioctl(LOOP_GET_STATUS)
- */
- if (lo->lo_state == Lo_bound)
- blk_mq_freeze_queue(lo->lo_queue);
- lo->use_dio = use_dio;
- if (use_dio)
+ if (lo->lo_backing_file->f_flags & O_DIRECT)
lo->lo_flags |= LO_FLAGS_DIRECT_IO;
- else
+ if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !lo_can_use_dio(lo))
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
- if (lo->lo_state == Lo_bound)
- blk_mq_unfreeze_queue(lo->lo_queue);
+
+ /* flush dirty pages before starting to issue direct I/O */
+ if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !dio_in_use)
+ vfs_fsync(lo->lo_backing_file, 0);
}
/**
@@ -311,6 +313,13 @@ static void loop_clear_limits(struct loop_device *lo, int mode)
lim.discard_granularity = 0;
}
+ /*
+ * XXX: this updates the queue limits without freezing the queue, which
+ * is against the locking protocol and dangerous. But we can't just
+ * freeze the queue as we're inside the ->queue_rq method here. So this
+ * should move out into a workqueue unless we get the file operations to
+ * advertise if they support specific fallocate operations.
+ */
queue_limits_commit_update(lo->lo_queue, &lim);
}
@@ -520,12 +529,6 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
}
}
-static inline void loop_update_dio(struct loop_device *lo)
-{
- __loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) |
- lo->use_dio);
-}
-
static void loop_reread_partitions(struct loop_device *lo)
{
int rc;
@@ -583,6 +586,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
{
struct file *file = fget(arg);
struct file *old_file;
+ unsigned int memflags;
int error;
bool partscan;
bool is_loop;
@@ -620,14 +624,14 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
/* and ... switch */
disk_force_media_change(lo->lo_disk);
- blk_mq_freeze_queue(lo->lo_queue);
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
lo->lo_backing_file = file;
lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
mapping_set_gfp_mask(file->f_mapping,
lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
loop_update_dio(lo);
- blk_mq_unfreeze_queue(lo->lo_queue);
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
loop_global_unlock(lo, is_loop);
@@ -770,12 +774,11 @@ static void loop_sysfs_exit(struct loop_device *lo)
&loop_attribute_group);
}
-static void loop_config_discard(struct loop_device *lo,
- struct queue_limits *lim)
+static void loop_get_discard_config(struct loop_device *lo,
+ u32 *granularity, u32 *max_discard_sectors)
{
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
- u32 granularity = 0, max_discard_sectors = 0;
struct kstatfs sbuf;
/*
@@ -786,27 +789,19 @@ static void loop_config_discard(struct loop_device *lo,
* file-backed loop devices: discarded regions read back as zero.
*/
if (S_ISBLK(inode->i_mode)) {
- struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
+ struct block_device *bdev = I_BDEV(inode);
- max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
- granularity = bdev_discard_granularity(I_BDEV(inode)) ?:
- queue_physical_block_size(backingq);
+ *max_discard_sectors = bdev_write_zeroes_sectors(bdev);
+ *granularity = bdev_discard_granularity(bdev);
/*
* We use punch hole to reclaim the free space used by the
* image a.k.a. discard.
*/
} else if (file->f_op->fallocate && !vfs_statfs(&file->f_path, &sbuf)) {
- max_discard_sectors = UINT_MAX >> 9;
- granularity = sbuf.f_bsize;
+ *max_discard_sectors = UINT_MAX >> 9;
+ *granularity = sbuf.f_bsize;
}
-
- lim->max_hw_discard_sectors = max_discard_sectors;
- lim->max_write_zeroes_sectors = max_discard_sectors;
- if (max_discard_sectors)
- lim->discard_granularity = granularity;
- else
- lim->discard_granularity = 0;
}
struct loop_worker {
@@ -973,11 +968,10 @@ loop_set_status_from_info(struct loop_device *lo,
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
- lo->lo_flags = info->lo_flags;
return 0;
}
-static unsigned short loop_default_blocksize(struct loop_device *lo,
+static unsigned int loop_default_blocksize(struct loop_device *lo,
struct block_device *backing_bdev)
{
/* In case of direct I/O, match underlying block size */
@@ -986,12 +980,13 @@ static unsigned short loop_default_blocksize(struct loop_device *lo,
return SECTOR_SIZE;
}
-static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize)
+static void loop_update_limits(struct loop_device *lo, struct queue_limits *lim,
+ unsigned int bsize)
{
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
struct block_device *backing_bdev = NULL;
- struct queue_limits lim;
+ u32 granularity = 0, max_discard_sectors = 0;
if (S_ISBLK(inode->i_mode))
backing_bdev = I_BDEV(inode);
@@ -1001,17 +996,22 @@ static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize)
if (!bsize)
bsize = loop_default_blocksize(lo, backing_bdev);
- lim = queue_limits_start_update(lo->lo_queue);
- lim.logical_block_size = bsize;
- lim.physical_block_size = bsize;
- lim.io_min = bsize;
- lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL);
+ loop_get_discard_config(lo, &granularity, &max_discard_sectors);
+
+ lim->logical_block_size = bsize;
+ lim->physical_block_size = bsize;
+ lim->io_min = bsize;
+ lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL);
if (file->f_op->fsync && !(lo->lo_flags & LO_FLAGS_READ_ONLY))
- lim.features |= BLK_FEAT_WRITE_CACHE;
+ lim->features |= BLK_FEAT_WRITE_CACHE;
if (backing_bdev && !bdev_nonrot(backing_bdev))
- lim.features |= BLK_FEAT_ROTATIONAL;
- loop_config_discard(lo, &lim);
- return queue_limits_commit_update(lo->lo_queue, &lim);
+ lim->features |= BLK_FEAT_ROTATIONAL;
+ lim->max_hw_discard_sectors = max_discard_sectors;
+ lim->max_write_zeroes_sectors = max_discard_sectors;
+ if (max_discard_sectors)
+ lim->discard_granularity = granularity;
+ else
+ lim->discard_granularity = 0;
}
static int loop_configure(struct loop_device *lo, blk_mode_t mode,
@@ -1020,6 +1020,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
{
struct file *file = fget(config->fd);
struct address_space *mapping;
+ struct queue_limits lim;
int error;
loff_t size;
bool partscan;
@@ -1064,6 +1065,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
error = loop_set_status_from_info(lo, &config->info);
if (error)
goto out_unlock;
+ lo->lo_flags = config->info.lo_flags;
if (!(file->f_mode & FMODE_WRITE) || !(mode & BLK_OPEN_WRITE) ||
!file->f_op->write_iter)
@@ -1085,13 +1087,15 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
disk_force_media_change(lo->lo_disk);
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
- lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
lo->lo_device = bdev;
lo->lo_backing_file = file;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
- error = loop_reconfigure_limits(lo, config->block_size);
+ lim = queue_limits_start_update(lo->lo_queue);
+ loop_update_limits(lo, &lim, config->block_size);
+ /* No need to freeze the queue as the device isn't bound yet. */
+ error = queue_limits_commit_update(lo->lo_queue, &lim);
if (error)
goto out_unlock;
@@ -1151,7 +1155,12 @@ static void __loop_clr_fd(struct loop_device *lo)
lo->lo_sizelimit = 0;
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
- /* reset the block size to the default */
+ /*
+ * Reset the block size to the default.
+ *
+ * No queue freezing needed because this is called from the final
+ * ->release call only, so there can't be any outstanding I/O.
+ */
lim = queue_limits_start_update(lo->lo_queue);
lim.logical_block_size = SECTOR_SIZE;
lim.physical_block_size = SECTOR_SIZE;
@@ -1245,9 +1254,9 @@ static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
{
int err;
- int prev_lo_flags;
bool partscan = false;
bool size_changed = false;
+ unsigned int memflags;
err = mutex_lock_killable(&lo->lo_mutex);
if (err)
@@ -1264,21 +1273,18 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
invalidate_bdev(lo->lo_device);
}
- /* I/O need to be drained during transfer transition */
- blk_mq_freeze_queue(lo->lo_queue);
-
- prev_lo_flags = lo->lo_flags;
+ /* I/O needs to be drained before changing lo_offset or lo_sizelimit */
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
err = loop_set_status_from_info(lo, info);
if (err)
goto out_unfreeze;
- /* Mask out flags that can't be set using LOOP_SET_STATUS. */
- lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
- /* For those flags, use the previous values instead */
- lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
- /* For flags that can't be cleared, use previous values too */
- lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
+ partscan = !(lo->lo_flags & LO_FLAGS_PARTSCAN) &&
+ (info->lo_flags & LO_FLAGS_PARTSCAN);
+
+ lo->lo_flags &= ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
+ lo->lo_flags |= (info->lo_flags & LOOP_SET_STATUS_SETTABLE_FLAGS);
if (size_changed) {
loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
@@ -1286,17 +1292,13 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
loop_set_size(lo, new_size);
}
- /* update dio if lo_offset or transfer is changed */
- __loop_update_dio(lo, lo->use_dio);
+ /* update the direct I/O flag if lo_offset changed */
+ loop_update_dio(lo);
out_unfreeze:
- blk_mq_unfreeze_queue(lo->lo_queue);
-
- if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
- !(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
+ if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
- partscan = true;
- }
out_unlock:
mutex_unlock(&lo->lo_mutex);
if (partscan)
@@ -1445,20 +1447,34 @@ static int loop_set_capacity(struct loop_device *lo)
static int loop_set_dio(struct loop_device *lo, unsigned long arg)
{
- int error = -ENXIO;
- if (lo->lo_state != Lo_bound)
- goto out;
+ bool use_dio = !!arg;
+ unsigned int memflags;
- __loop_update_dio(lo, !!arg);
- if (lo->use_dio == !!arg)
+ if (lo->lo_state != Lo_bound)
+ return -ENXIO;
+ if (use_dio == !!(lo->lo_flags & LO_FLAGS_DIRECT_IO))
return 0;
- error = -EINVAL;
- out:
- return error;
+
+ if (use_dio) {
+ if (!lo_can_use_dio(lo))
+ return -EINVAL;
+ /* flush dirty pages before starting to use direct I/O */
+ vfs_fsync(lo->lo_backing_file, 0);
+ }
+
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
+ if (use_dio)
+ lo->lo_flags |= LO_FLAGS_DIRECT_IO;
+ else
+ lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
+ return 0;
}
static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
{
+ struct queue_limits lim;
+ unsigned int memflags;
int err = 0;
if (lo->lo_state != Lo_bound)
@@ -1470,10 +1486,13 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
sync_blockdev(lo->lo_device);
invalidate_bdev(lo->lo_device);
- blk_mq_freeze_queue(lo->lo_queue);
- err = loop_reconfigure_limits(lo, arg);
+ lim = queue_limits_start_update(lo->lo_queue);
+ loop_update_limits(lo, &lim, arg);
+
+ memflags = blk_mq_freeze_queue(lo->lo_queue);
+ err = queue_limits_commit_update(lo->lo_queue, &lim);
loop_update_dio(lo);
- blk_mq_unfreeze_queue(lo->lo_queue);
+ blk_mq_unfreeze_queue(lo->lo_queue, memflags);
return err;
}
@@ -1855,7 +1874,7 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
cmd->use_aio = false;
break;
default:
- cmd->use_aio = lo->use_dio;
+ cmd->use_aio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
break;
}
@@ -2024,8 +2043,7 @@ static int loop_add(int i)
lo->tag_set.queue_depth = hw_queue_depth;
lo->tag_set.numa_node = NUMA_NO_NODE;
lo->tag_set.cmd_size = sizeof(struct loop_cmd);
- lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |
- BLK_MQ_F_NO_SCHED_BY_DEFAULT;
+ lo->tag_set.flags = BLK_MQ_F_STACKING | BLK_MQ_F_NO_SCHED_BY_DEFAULT;
lo->tag_set.driver_data = lo;
err = blk_mq_alloc_tag_set(&lo->tag_set);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 223faa9d5ffd..95361099a2dc 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2701,7 +2701,12 @@ static int mtip_hw_init(struct driver_data *dd)
int rv;
unsigned long timeout, timetaken;
- dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
+ dd->mmio = pcim_iomap_region(dd->pdev, MTIP_ABAR, MTIP_DRV_NAME);
+ if (IS_ERR(dd->mmio)) {
+ dev_err(&dd->pdev->dev, "Unable to request / ioremap PCI region\n");
+ return PTR_ERR(dd->mmio);
+ }
+
mtip_detect_product(dd);
if (dd->product_type == MTIP_PRODUCT_UNKNOWN) {
@@ -3411,7 +3416,6 @@ static int mtip_block_initialize(struct driver_data *dd)
dd->tags.reserved_tags = 1;
dd->tags.cmd_size = sizeof(struct mtip_cmd);
dd->tags.numa_node = dd->numa_node;
- dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
dd->tags.driver_data = dd;
dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
@@ -3710,13 +3714,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
goto iomap_err;
}
- /* Map BAR5 to memory. */
- rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME);
- if (rv < 0) {
- dev_err(&pdev->dev, "Unable to map regions\n");
- goto iomap_err;
- }
-
rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rv) {
dev_warn(&pdev->dev, "64-bit DMA enable failed\n");
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index b852050d8a96..7bdc7eb808ea 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -62,6 +62,7 @@ struct nbd_sock {
bool dead;
int fallback_index;
int cookie;
+ struct work_struct work;
};
struct recv_thread_args {
@@ -141,6 +142,9 @@ struct nbd_device {
*/
#define NBD_CMD_INFLIGHT 2
+/* Just part of request header or data payload is sent successfully */
+#define NBD_CMD_PARTIAL_SEND 3
+
struct nbd_cmd {
struct nbd_device *nbd;
struct mutex lock;
@@ -327,8 +331,7 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
nsock->sent = 0;
}
-static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
- loff_t blksize)
+static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, loff_t blksize)
{
struct queue_limits lim;
int error;
@@ -368,7 +371,7 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
lim.logical_block_size = blksize;
lim.physical_block_size = blksize;
- error = queue_limits_commit_update(nbd->disk->queue, &lim);
+ error = queue_limits_commit_update_frozen(nbd->disk->queue, &lim);
if (error)
return error;
@@ -379,18 +382,6 @@ static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
return 0;
}
-static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
- loff_t blksize)
-{
- int error;
-
- blk_mq_freeze_queue(nbd->disk->queue);
- error = __nbd_set_size(nbd, bytesize, blksize);
- blk_mq_unfreeze_queue(nbd->disk->queue);
-
- return error;
-}
-
static void nbd_complete_rq(struct request *req)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
@@ -466,6 +457,12 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
if (!mutex_trylock(&cmd->lock))
return BLK_EH_RESET_TIMER;
+ /* partial send is handled in nbd_sock's work function */
+ if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)) {
+ mutex_unlock(&cmd->lock);
+ return BLK_EH_RESET_TIMER;
+ }
+
if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
mutex_unlock(&cmd->lock);
return BLK_EH_DONE;
@@ -615,6 +612,30 @@ static inline int was_interrupted(int result)
}
/*
+ * We've already sent header or part of data payload, have no choice but
+ * to set pending and schedule it in work.
+ *
+ * And we have to return BLK_STS_OK to block core, otherwise this same
+ * request may be re-dispatched with different tag, but our header has
+ * been sent out with old tag, and this way does confuse reply handling.
+ */
+static void nbd_sched_pending_work(struct nbd_device *nbd,
+ struct nbd_sock *nsock,
+ struct nbd_cmd *cmd, int sent)
+{
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+
+ /* pending work should be scheduled only once */
+ WARN_ON_ONCE(test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags));
+
+ nsock->pending = req;
+ nsock->sent = sent;
+ set_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags);
+ refcount_inc(&nbd->config_refs);
+ schedule_work(&nsock->work);
+}
+
+/*
* Returns BLK_STS_RESOURCE if the caller should retry after a delay.
* Returns BLK_STS_IOERR if sending failed.
*/
@@ -699,8 +720,8 @@ static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd,
* completely done.
*/
if (sent) {
- nsock->pending = req;
- nsock->sent = sent;
+ nbd_sched_pending_work(nbd, nsock, cmd, sent);
+ return BLK_STS_OK;
}
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
return BLK_STS_RESOURCE;
@@ -737,14 +758,8 @@ send_pages:
result = sock_xmit(nbd, index, 1, &from, flags, &sent);
if (result < 0) {
if (was_interrupted(result)) {
- /* We've already sent the header, we
- * have no choice but to set pending and
- * return BUSY.
- */
- nsock->pending = req;
- nsock->sent = sent;
- set_bit(NBD_CMD_REQUEUED, &cmd->flags);
- return BLK_STS_RESOURCE;
+ nbd_sched_pending_work(nbd, nsock, cmd, sent);
+ return BLK_STS_OK;
}
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
@@ -770,6 +785,14 @@ out:
return BLK_STS_OK;
requeue:
+ /*
+ * Can't requeue in case we are dealing with partial send
+ *
+ * We must run from pending work function.
+ * */
+ if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags))
+ return BLK_STS_OK;
+
/* retry on a different socket */
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Request send failed, requeueing\n");
@@ -778,6 +801,44 @@ requeue:
return BLK_STS_OK;
}
+/* handle partial sending */
+static void nbd_pending_cmd_work(struct work_struct *work)
+{
+ struct nbd_sock *nsock = container_of(work, struct nbd_sock, work);
+ struct request *req = nsock->pending;
+ struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
+ struct nbd_device *nbd = cmd->nbd;
+ unsigned long deadline = READ_ONCE(req->deadline);
+ unsigned int wait_ms = 2;
+
+ mutex_lock(&cmd->lock);
+
+ WARN_ON_ONCE(test_bit(NBD_CMD_REQUEUED, &cmd->flags));
+ if (WARN_ON_ONCE(!test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)))
+ goto out;
+
+ mutex_lock(&nsock->tx_lock);
+ while (true) {
+ nbd_send_cmd(nbd, cmd, cmd->index);
+ if (!nsock->pending)
+ break;
+
+ /* don't bother timeout handler for partial sending */
+ if (READ_ONCE(jiffies) + msecs_to_jiffies(wait_ms) >= deadline) {
+ cmd->status = BLK_STS_IOERR;
+ blk_mq_complete_request(req);
+ break;
+ }
+ msleep(wait_ms);
+ wait_ms *= 2;
+ }
+ mutex_unlock(&nsock->tx_lock);
+ clear_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags);
+out:
+ mutex_unlock(&cmd->lock);
+ nbd_config_put(nbd);
+}
+
static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
struct nbd_reply *reply)
{
@@ -1173,6 +1234,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
struct socket *sock;
struct nbd_sock **socks;
struct nbd_sock *nsock;
+ unsigned int memflags;
int err;
/* Arg will be cast to int, check it to avoid overflow */
@@ -1186,7 +1248,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
* We need to make sure we don't get any errant requests while we're
* reallocating the ->socks array.
*/
- blk_mq_freeze_queue(nbd->disk->queue);
+ memflags = blk_mq_freeze_queue(nbd->disk->queue);
if (!netlink && !nbd->task_setup &&
!test_bit(NBD_RT_BOUND, &config->runtime_flags))
@@ -1224,14 +1286,15 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
nsock->pending = NULL;
nsock->sent = 0;
nsock->cookie = 0;
+ INIT_WORK(&nsock->work, nbd_pending_cmd_work);
socks[config->num_connections++] = nsock;
atomic_inc(&config->live_connections);
- blk_mq_unfreeze_queue(nbd->disk->queue);
+ blk_mq_unfreeze_queue(nbd->disk->queue, memflags);
return 0;
put_socket:
- blk_mq_unfreeze_queue(nbd->disk->queue);
+ blk_mq_unfreeze_queue(nbd->disk->queue, memflags);
sockfd_put(sock);
return err;
}
@@ -1841,8 +1904,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
nbd->tag_set.queue_depth = 128;
nbd->tag_set.numa_node = NUMA_NO_NODE;
nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
- nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_MQ_F_BLOCKING;
+ nbd->tag_set.flags = BLK_MQ_F_BLOCKING;
nbd->tag_set.driver_data = nbd;
INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
nbd->backend = NULL;
@@ -2180,6 +2242,7 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
flush_workqueue(nbd->recv_workq);
nbd_clear_que(nbd);
nbd->task_setup = NULL;
+ clear_bit(NBD_RT_BOUND, &nbd->config->runtime_flags);
mutex_unlock(&nbd->config_lock);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 2f0431e42c49..d94ef37480bd 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -266,6 +266,10 @@ static bool g_zone_full;
module_param_named(zone_full, g_zone_full, bool, S_IRUGO);
MODULE_PARM_DESC(zone_full, "Initialize the sequential write required zones of a zoned device to be full. Default: false");
+static bool g_rotational;
+module_param_named(rotational, g_rotational, bool, S_IRUGO);
+MODULE_PARM_DESC(rotational, "Set the rotational feature for the device. Default: false");
+
static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
@@ -468,6 +472,7 @@ NULLB_DEVICE_ATTR(no_sched, bool, NULL);
NULLB_DEVICE_ATTR(shared_tags, bool, NULL);
NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
NULLB_DEVICE_ATTR(fua, bool, NULL);
+NULLB_DEVICE_ATTR(rotational, bool, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -621,6 +626,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_shared_tags,
&nullb_device_attr_shared_tag_bitmap,
&nullb_device_attr_fua,
+ &nullb_device_attr_rotational,
NULL,
};
@@ -706,7 +712,8 @@ static ssize_t memb_group_features_show(struct config_item *item, char *page)
"shared_tags,size,submit_queues,use_per_node_hctx,"
"virt_boundary,zoned,zone_capacity,zone_max_active,"
"zone_max_open,zone_nr_conv,zone_offline,zone_readonly,"
- "zone_size,zone_append_max_sectors,zone_full\n");
+ "zone_size,zone_append_max_sectors,zone_full,"
+ "rotational\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -793,6 +800,7 @@ static struct nullb_device *null_alloc_dev(void)
dev->shared_tags = g_shared_tags;
dev->shared_tag_bitmap = g_shared_tag_bitmap;
dev->fua = g_fua;
+ dev->rotational = g_rotational;
return dev;
}
@@ -899,7 +907,7 @@ static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
if (radix_tree_insert(root, idx, t_page)) {
null_free_page(t_page);
t_page = radix_tree_lookup(root, idx);
- WARN_ON(!t_page || t_page->page->index != idx);
+ WARN_ON(!t_page || t_page->page->private != idx);
} else if (is_cache)
nullb->dev->curr_cache += PAGE_SIZE;
@@ -922,7 +930,7 @@ static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
(void **)t_pages, pos, FREE_BATCH);
for (i = 0; i < nr_pages; i++) {
- pos = t_pages[i]->page->index;
+ pos = t_pages[i]->page->private;
ret = radix_tree_delete_item(root, pos, t_pages[i]);
WARN_ON(ret != t_pages[i]);
null_free_page(ret);
@@ -948,7 +956,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb,
root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
t_page = radix_tree_lookup(root, idx);
- WARN_ON(t_page && t_page->page->index != idx);
+ WARN_ON(t_page && t_page->page->private != idx);
if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
return t_page;
@@ -991,7 +999,7 @@ static struct nullb_page *null_insert_page(struct nullb *nullb,
spin_lock_irq(&nullb->lock);
idx = sector >> PAGE_SECTORS_SHIFT;
- t_page->page->index = idx;
+ t_page->page->private = idx;
t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
radix_tree_preload_end();
@@ -1011,7 +1019,7 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
struct nullb_page *t_page, *ret;
void *dst, *src;
- idx = c_page->page->index;
+ idx = c_page->page->private;
t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
@@ -1070,7 +1078,7 @@ again:
* avoid race, we don't allow page free
*/
for (i = 0; i < nr_pages; i++) {
- nullb->cache_flush_pos = c_pages[i]->page->index;
+ nullb->cache_flush_pos = c_pages[i]->page->private;
/*
* We found the page which is being flushed to disk by other
* threads
@@ -1638,10 +1646,9 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static void null_queue_rqs(struct request **rqlist)
+static void null_queue_rqs(struct rq_list *rqlist)
{
- struct request *requeue_list = NULL;
- struct request **requeue_lastp = &requeue_list;
+ struct rq_list requeue_list = {};
struct blk_mq_queue_data bd = { };
blk_status_t ret;
@@ -1651,8 +1658,8 @@ static void null_queue_rqs(struct request **rqlist)
bd.rq = rq;
ret = null_queue_rq(rq->mq_hctx, &bd);
if (ret != BLK_STS_OK)
- rq_list_add_tail(&requeue_lastp, rq);
- } while (!rq_list_empty(*rqlist));
+ rq_list_add_tail(&requeue_list, rq);
+ } while (!rq_list_empty(rqlist));
*rqlist = requeue_list;
}
@@ -1784,9 +1791,8 @@ static int null_init_global_tag_set(void)
tag_set.nr_hw_queues = g_submit_queues;
tag_set.queue_depth = g_hw_queue_depth;
tag_set.numa_node = g_home_node;
- tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
if (g_no_sched)
- tag_set.flags |= BLK_MQ_F_NO_SCHED;
+ tag_set.flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT;
if (g_shared_tag_bitmap)
tag_set.flags |= BLK_MQ_F_TAG_HCTX_SHARED;
if (g_blocking)
@@ -1810,9 +1816,8 @@ static int null_setup_tagset(struct nullb *nullb)
nullb->tag_set->nr_hw_queues = nullb->dev->submit_queues;
nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth;
nullb->tag_set->numa_node = nullb->dev->home_node;
- nullb->tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
if (nullb->dev->no_sched)
- nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED;
+ nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT;
if (nullb->dev->shared_tag_bitmap)
nullb->tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
if (nullb->dev->blocking)
@@ -1939,6 +1944,9 @@ static int null_add_dev(struct nullb_device *dev)
lim.features |= BLK_FEAT_FUA;
}
+ if (dev->rotational)
+ lim.features |= BLK_FEAT_ROTATIONAL;
+
nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb);
if (IS_ERR(nullb->disk)) {
rv = PTR_ERR(nullb->disk);
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index a7bb32f73ec3..6f9fe6171087 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -107,6 +107,7 @@ struct nullb_device {
bool shared_tags; /* share tag set between devices for blk-mq */
bool shared_tag_bitmap; /* use hostwide shared tags */
bool fua; /* Support FUA */
+ bool rotational; /* Fake rotational device */
};
struct nullb {
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 9bc768b2ca56..0d5f9bf95229 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -166,7 +166,7 @@ int null_init_zoned_dev(struct nullb_device *dev,
lim->features |= BLK_FEAT_ZONED;
lim->chunk_sectors = dev->zone_size_sects;
- lim->max_zone_append_sectors = dev->zone_append_max_sectors;
+ lim->max_hw_zone_append_sectors = dev->zone_append_max_sectors;
lim->max_open_zones = dev->zone_max_open;
lim->max_active_zones = dev->zone_max_active;
return 0;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index ff45ed766469..dc9e4a14b885 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -384,9 +384,9 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
unsigned int devidx;
struct queue_limits lim = {
.logical_block_size = dev->blk_size,
- .max_hw_sectors = dev->bounce_size >> 9,
+ .max_hw_sectors = BOUNCE_SIZE >> 9,
.max_segments = -1,
- .max_segment_size = dev->bounce_size,
+ .max_segment_size = BOUNCE_SIZE,
.dma_alignment = dev->blk_size - 1,
.features = BLK_FEAT_WRITE_CACHE |
BLK_FEAT_ROTATIONAL,
@@ -434,8 +434,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
ps3disk_identify(dev);
- error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE);
+ error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1, 0);
if (error)
goto fail_teardown;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 9c8b19a22c2a..faafd7ff43d6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4964,7 +4964,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
rbd_dev->tag_set.ops = &rbd_mq_ops;
rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
- rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
@@ -7282,8 +7281,10 @@ static ssize_t do_rbd_remove(const char *buf, size_t count)
* Prevent new IO from being queued and wait for existing
* IO to complete/fail.
*/
- blk_mq_freeze_queue(rbd_dev->disk->queue);
+ unsigned int memflags = blk_mq_freeze_queue(rbd_dev->disk->queue);
+
blk_mark_disk_dead(rbd_dev->disk);
+ blk_mq_unfreeze_queue(rbd_dev->disk->queue, memflags);
}
del_gendisk(rbd_dev->disk);
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index c34695d2eea7..82467ecde7ec 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1209,8 +1209,7 @@ static int setup_mq_tags(struct rnbd_clt_session *sess)
tag_set->ops = &rnbd_mq_ops;
tag_set->queue_depth = sess->queue_depth;
tag_set->numa_node = NUMA_NO_NODE;
- tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_MQ_F_TAG_QUEUE_SHARED;
+ tag_set->flags = BLK_MQ_F_TAG_QUEUE_SHARED;
tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE;
/* for HCTX_TYPE_DEFAULT, HCTX_TYPE_READ, HCTX_TYPE_POLL */
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 08ce6d96d04c..2ee6e9bd4e28 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -167,7 +167,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio);
- bio_set_prio(bio, prio);
+ bio->bi_ioprio = prio;
submit_bio(bio);
diff --git a/drivers/block/rnull.rs b/drivers/block/rnull.rs
index b0227cf9ddd3..ddf3629d8894 100644
--- a/drivers/block/rnull.rs
+++ b/drivers/block/rnull.rs
@@ -28,28 +28,35 @@ module! {
type: NullBlkModule,
name: "rnull_mod",
author: "Andreas Hindborg",
+ description: "Rust implementation of the C null block driver",
license: "GPL v2",
}
+#[pin_data]
struct NullBlkModule {
- _disk: Pin<Box<Mutex<GenDisk<NullBlkDevice>>>>,
+ #[pin]
+ _disk: Mutex<GenDisk<NullBlkDevice>>,
}
-impl kernel::Module for NullBlkModule {
- fn init(_module: &'static ThisModule) -> Result<Self> {
+impl kernel::InPlaceModule for NullBlkModule {
+ fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> {
pr_info!("Rust null_blk loaded\n");
- let tagset = Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?;
- let disk = gen_disk::GenDiskBuilder::new()
- .capacity_sectors(4096 << 11)
- .logical_block_size(4096)?
- .physical_block_size(4096)?
- .rotational(false)
- .build(format_args!("rnullb{}", 0), tagset)?;
+ // Use a immediately-called closure as a stable `try` block
+ let disk = /* try */ (|| {
+ let tagset = Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?;
- let disk = Box::pin_init(new_mutex!(disk, "nullb:disk"), flags::GFP_KERNEL)?;
+ gen_disk::GenDiskBuilder::new()
+ .capacity_sectors(4096 << 11)
+ .logical_block_size(4096)?
+ .physical_block_size(4096)?
+ .rotational(false)
+ .build(format_args!("rnullb{}", 0), tagset)
+ })();
- Ok(Self { _disk: disk })
+ try_pin_init!(Self {
+ _disk <- new_mutex!(disk?, "nullb:disk"),
+ })
}
}
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 2d38331ee667..282f81616a78 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -829,7 +829,7 @@ static int probe_disk(struct vdc_port *port)
}
err = blk_mq_alloc_sq_tag_set(&port->tag_set, &vdc_mq_ops,
- VDC_TX_RING_SIZE, BLK_MQ_F_SHOULD_MERGE);
+ VDC_TX_RING_SIZE, 0);
if (err)
return err;
@@ -918,12 +918,12 @@ struct vdc_check_port_data {
char *type;
};
-static int vdc_device_probed(struct device *dev, void *arg)
+static int vdc_device_probed(struct device *dev, const void *arg)
{
struct vio_dev *vdev = to_vio_dev(dev);
- struct vdc_check_port_data *port_data;
+ const struct vdc_check_port_data *port_data;
- port_data = (struct vdc_check_port_data *)arg;
+ port_data = (const struct vdc_check_port_data *)arg;
if ((vdev->dev_no == port_data->dev_no) &&
(!(strcmp((char *)&vdev->type, port_data->type))) &&
@@ -1113,6 +1113,7 @@ static void vdc_requeue_inflight(struct vdc_port *port)
static void vdc_queue_drain(struct vdc_port *port)
{
struct request_queue *q = port->disk->queue;
+ unsigned int memflags;
/*
* Mark the queue as draining, then freeze/quiesce to ensure
@@ -1121,13 +1122,13 @@ static void vdc_queue_drain(struct vdc_port *port)
port->drain = 1;
spin_unlock_irq(&port->vio.lock);
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
spin_lock_irq(&port->vio.lock);
port->drain = 0;
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
}
static void vdc_ldc_reset_timer_work(struct work_struct *work)
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 126f151c4f2c..eda33c5eb5e2 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -818,7 +818,7 @@ static int swim_floppy_init(struct swim_priv *swd)
for (drive = 0; drive < swd->floppy_count; drive++) {
err = blk_mq_alloc_sq_tag_set(&swd->unit[drive].tag_set,
- &swim_mq_ops, 2, BLK_MQ_F_SHOULD_MERGE);
+ &swim_mq_ops, 2, 0);
if (err)
goto exit_put_disks;
@@ -944,7 +944,7 @@ static void swim_remove(struct platform_device *dev)
static struct platform_driver swim_driver = {
.probe = swim_probe,
- .remove_new = swim_remove,
+ .remove = swim_remove,
.driver = {
.name = CARDNAME,
},
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 90be1017f7bf..3aedcb5add61 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -840,6 +840,7 @@ static int grab_drive(struct floppy_state *fs, enum swim_state state,
static void release_drive(struct floppy_state *fs)
{
struct request_queue *q = disks[fs->index]->queue;
+ unsigned int memflags;
unsigned long flags;
swim3_dbg("%s", "-> release drive\n");
@@ -848,10 +849,10 @@ static void release_drive(struct floppy_state *fs)
fs->state = idle;
spin_unlock_irqrestore(&swim3_lock, flags);
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
}
static int fd_eject(struct floppy_state *fs)
@@ -1208,8 +1209,7 @@ static int swim3_attach(struct macio_dev *mdev,
fs = &floppy_states[floppy_count];
memset(fs, 0, sizeof(*fs));
- rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE);
+ rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2, 0);
if (rc)
goto out_unregister;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index a6c8e5cc6051..529085181f35 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -60,7 +60,12 @@
| UBLK_F_UNPRIVILEGED_DEV \
| UBLK_F_CMD_IOCTL_ENCODE \
| UBLK_F_USER_COPY \
- | UBLK_F_ZONED)
+ | UBLK_F_ZONED \
+ | UBLK_F_USER_RECOVERY_FAIL_IO)
+
+#define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \
+ | UBLK_F_USER_RECOVERY_REISSUE \
+ | UBLK_F_USER_RECOVERY_FAIL_IO)
/* All UBLK_PARAM_TYPE_* should be included here */
#define UBLK_PARAM_TYPE_ALL \
@@ -143,6 +148,7 @@ struct ublk_queue {
bool force_abort;
bool timeout;
bool canceling;
+ bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */
unsigned short nr_io_ready; /* how many ios setup */
spinlock_t cancel_lock;
struct ublk_device *dev;
@@ -179,8 +185,7 @@ struct ublk_device {
unsigned int nr_queues_ready;
unsigned int nr_privileged_daemon;
- struct work_struct quiesce_work;
- struct work_struct stop_work;
+ struct work_struct nosrv_work;
};
/* header of ublk_params */
@@ -664,30 +669,69 @@ static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
return ublk_get_queue(ub, q_id)->io_cmd_buf;
}
+static inline int __ublk_queue_cmd_buf_size(int depth)
+{
+ return round_up(depth * sizeof(struct ublksrv_io_desc), PAGE_SIZE);
+}
+
static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
{
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
- return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
- PAGE_SIZE);
+ return __ublk_queue_cmd_buf_size(ubq->q_depth);
+}
+
+static int ublk_max_cmd_buf_size(void)
+{
+ return __ublk_queue_cmd_buf_size(UBLK_MAX_QUEUE_DEPTH);
}
-static inline bool ublk_queue_can_use_recovery_reissue(
- struct ublk_queue *ubq)
+/*
+ * Should I/O outstanding to the ublk server when it exits be reissued?
+ * If not, outstanding I/O will get errors.
+ */
+static inline bool ublk_nosrv_should_reissue_outstanding(struct ublk_device *ub)
+{
+ return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
+ (ub->dev_info.flags & UBLK_F_USER_RECOVERY_REISSUE);
+}
+
+/*
+ * Should I/O issued while there is no ublk server queue? If not, I/O
+ * issued while there is no ublk server will get errors.
+ */
+static inline bool ublk_nosrv_dev_should_queue_io(struct ublk_device *ub)
+{
+ return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
+ !(ub->dev_info.flags & UBLK_F_USER_RECOVERY_FAIL_IO);
+}
+
+/*
+ * Same as ublk_nosrv_dev_should_queue_io, but uses a queue-local copy
+ * of the device flags for smaller cache footprint - better for fast
+ * paths.
+ */
+static inline bool ublk_nosrv_should_queue_io(struct ublk_queue *ubq)
{
return (ubq->flags & UBLK_F_USER_RECOVERY) &&
- (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
+ !(ubq->flags & UBLK_F_USER_RECOVERY_FAIL_IO);
}
-static inline bool ublk_queue_can_use_recovery(
- struct ublk_queue *ubq)
+/*
+ * Should ublk devices be stopped (i.e. no recovery possible) when the
+ * ublk server exits? If not, devices can be used again by a future
+ * incarnation of a ublk server via the start_recovery/end_recovery
+ * commands.
+ */
+static inline bool ublk_nosrv_should_stop_dev(struct ublk_device *ub)
{
- return ubq->flags & UBLK_F_USER_RECOVERY;
+ return !(ub->dev_info.flags & UBLK_F_USER_RECOVERY);
}
-static inline bool ublk_can_use_recovery(struct ublk_device *ub)
+static inline bool ublk_dev_in_recoverable_state(struct ublk_device *ub)
{
- return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
+ return ub->dev_info.state == UBLK_S_DEV_QUIESCED ||
+ ub->dev_info.state == UBLK_S_DEV_FAIL_IO;
}
static void ublk_free_disk(struct gendisk *disk)
@@ -1063,7 +1107,7 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
{
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
- if (ublk_queue_can_use_recovery_reissue(ubq))
+ if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
blk_mq_requeue_request(req, false);
else
ublk_put_req_ref(ubq, req);
@@ -1091,7 +1135,7 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
struct request *rq)
{
/* We cannot process this rq so just requeue it. */
- if (ublk_queue_can_use_recovery(ubq))
+ if (ublk_nosrv_dev_should_queue_io(ubq->dev))
blk_mq_requeue_request(rq, false);
else
blk_mq_end_request(rq, BLK_STS_IOERR);
@@ -1236,10 +1280,7 @@ static enum blk_eh_timer_return ublk_timeout(struct request *rq)
struct ublk_device *ub = ubq->dev;
if (ublk_abort_requests(ub, ubq)) {
- if (ublk_can_use_recovery(ub))
- schedule_work(&ub->quiesce_work);
- else
- schedule_work(&ub->stop_work);
+ schedule_work(&ub->nosrv_work);
}
return BLK_EH_DONE;
}
@@ -1254,6 +1295,10 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *rq = bd->rq;
blk_status_t res;
+ if (unlikely(ubq->fail_io)) {
+ return BLK_STS_TARGET;
+ }
+
/* fill iod to slot in io cmd buffer */
res = ublk_setup_iod(ubq, rq);
if (unlikely(res != BLK_STS_OK))
@@ -1268,7 +1313,7 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
* Note: force_abort is guaranteed to be seen because it is set
* before request queue is unqiuesced.
*/
- if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
+ if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
return BLK_STS_IOERR;
if (unlikely(ubq->canceling)) {
@@ -1322,7 +1367,7 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct ublk_device *ub = filp->private_data;
size_t sz = vma->vm_end - vma->vm_start;
- unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc);
+ unsigned max_sz = ublk_max_cmd_buf_size();
unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
int q_id, ret = 0;
@@ -1489,10 +1534,7 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
ublk_cancel_cmd(ubq, io, issue_flags);
if (need_schedule) {
- if (ublk_can_use_recovery(ub))
- schedule_work(&ub->quiesce_work);
- else
- schedule_work(&ub->stop_work);
+ schedule_work(&ub->nosrv_work);
}
}
@@ -1555,20 +1597,6 @@ static void __ublk_quiesce_dev(struct ublk_device *ub)
ub->dev_info.state = UBLK_S_DEV_QUIESCED;
}
-static void ublk_quiesce_work_fn(struct work_struct *work)
-{
- struct ublk_device *ub =
- container_of(work, struct ublk_device, quiesce_work);
-
- mutex_lock(&ub->mutex);
- if (ub->dev_info.state != UBLK_S_DEV_LIVE)
- goto unlock;
- __ublk_quiesce_dev(ub);
- unlock:
- mutex_unlock(&ub->mutex);
- ublk_cancel_dev(ub);
-}
-
static void ublk_unquiesce_dev(struct ublk_device *ub)
{
int i;
@@ -1590,6 +1618,21 @@ static void ublk_unquiesce_dev(struct ublk_device *ub)
blk_mq_kick_requeue_list(ub->ub_disk->queue);
}
+static struct gendisk *ublk_detach_disk(struct ublk_device *ub)
+{
+ struct gendisk *disk;
+
+ /* Sync with ublk_abort_queue() by holding the lock */
+ spin_lock(&ub->lock);
+ disk = ub->ub_disk;
+ ub->dev_info.state = UBLK_S_DEV_DEAD;
+ ub->dev_info.ublksrv_pid = -1;
+ ub->ub_disk = NULL;
+ spin_unlock(&ub->lock);
+
+ return disk;
+}
+
static void ublk_stop_dev(struct ublk_device *ub)
{
struct gendisk *disk;
@@ -1597,26 +1640,50 @@ static void ublk_stop_dev(struct ublk_device *ub)
mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_DEAD)
goto unlock;
- if (ublk_can_use_recovery(ub)) {
+ if (ublk_nosrv_dev_should_queue_io(ub)) {
if (ub->dev_info.state == UBLK_S_DEV_LIVE)
__ublk_quiesce_dev(ub);
ublk_unquiesce_dev(ub);
}
del_gendisk(ub->ub_disk);
-
- /* Sync with ublk_abort_queue() by holding the lock */
- spin_lock(&ub->lock);
- disk = ub->ub_disk;
- ub->dev_info.state = UBLK_S_DEV_DEAD;
- ub->dev_info.ublksrv_pid = -1;
- ub->ub_disk = NULL;
- spin_unlock(&ub->lock);
+ disk = ublk_detach_disk(ub);
put_disk(disk);
unlock:
mutex_unlock(&ub->mutex);
ublk_cancel_dev(ub);
}
+static void ublk_nosrv_work(struct work_struct *work)
+{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, nosrv_work);
+ int i;
+
+ if (ublk_nosrv_should_stop_dev(ub)) {
+ ublk_stop_dev(ub);
+ return;
+ }
+
+ mutex_lock(&ub->mutex);
+ if (ub->dev_info.state != UBLK_S_DEV_LIVE)
+ goto unlock;
+
+ if (ublk_nosrv_dev_should_queue_io(ub)) {
+ __ublk_quiesce_dev(ub);
+ } else {
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ ublk_get_queue(ub, i)->fail_io = true;
+ }
+ blk_mq_unquiesce_queue(ub->ub_disk->queue);
+ }
+
+ unlock:
+ mutex_unlock(&ub->mutex);
+ ublk_cancel_dev(ub);
+}
+
/* device can only be started after all IOs are ready */
static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
{
@@ -2130,14 +2197,6 @@ static int ublk_add_chdev(struct ublk_device *ub)
return ret;
}
-static void ublk_stop_work_fn(struct work_struct *work)
-{
- struct ublk_device *ub =
- container_of(work, struct ublk_device, stop_work);
-
- ublk_stop_dev(ub);
-}
-
/* align max io buffer size with PAGE_SIZE */
static void ublk_align_max_io_size(struct ublk_device *ub)
{
@@ -2154,7 +2213,6 @@ static int ublk_add_tag_set(struct ublk_device *ub)
ub->tag_set.queue_depth = ub->dev_info.queue_depth;
ub->tag_set.numa_node = NUMA_NO_NODE;
ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
- ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ub->tag_set.driver_data = ub;
return blk_mq_alloc_tag_set(&ub->tag_set);
}
@@ -2162,8 +2220,7 @@ static int ublk_add_tag_set(struct ublk_device *ub)
static void ublk_remove(struct ublk_device *ub)
{
ublk_stop_dev(ub);
- cancel_work_sync(&ub->stop_work);
- cancel_work_sync(&ub->quiesce_work);
+ cancel_work_sync(&ub->nosrv_work);
cdev_device_del(&ub->cdev, &ub->cdev_dev);
ublk_put_device(ub);
ublks_added--;
@@ -2229,7 +2286,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
lim.features |= BLK_FEAT_ZONED;
lim.max_active_zones = p->max_active_zones;
lim.max_open_zones = p->max_open_zones;
- lim.max_zone_append_sectors = p->max_zone_append_sectors;
+ lim.max_hw_zone_append_sectors = p->max_zone_append_sectors;
}
if (ub->params.basic.attrs & UBLK_ATTR_VOLATILE_CACHE) {
@@ -2286,7 +2343,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
out_put_cdev:
if (ret) {
- ub->dev_info.state = UBLK_S_DEV_DEAD;
+ ublk_detach_disk(ub);
ublk_put_device(ub);
}
if (ret)
@@ -2372,6 +2429,19 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
return -EPERM;
+ /* forbid nonsense combinations of recovery flags */
+ switch (info.flags & UBLK_F_ALL_RECOVERY_FLAGS) {
+ case 0:
+ case UBLK_F_USER_RECOVERY:
+ case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_REISSUE):
+ case (UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_FAIL_IO):
+ break;
+ default:
+ pr_warn("%s: invalid recovery flags %llx\n", __func__,
+ info.flags & UBLK_F_ALL_RECOVERY_FLAGS);
+ return -EINVAL;
+ }
+
/*
* unprivileged device can't be trusted, but RECOVERY and
* RECOVERY_REISSUE still may hang error handling, so can't
@@ -2380,10 +2450,19 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
* TODO: provide forward progress for RECOVERY handler, so that
* unprivileged device can benefit from it
*/
- if (info.flags & UBLK_F_UNPRIVILEGED_DEV)
+ if (info.flags & UBLK_F_UNPRIVILEGED_DEV) {
info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
UBLK_F_USER_RECOVERY);
+ /*
+ * For USER_COPY, we depends on userspace to fill request
+ * buffer by pwrite() to ublk char device, which can't be
+ * used for unprivileged device
+ */
+ if (info.flags & UBLK_F_USER_COPY)
+ return -EINVAL;
+ }
+
/* the created device is always owned by current user */
ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
@@ -2415,8 +2494,7 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
goto out_unlock;
mutex_init(&ub->mutex);
spin_lock_init(&ub->lock);
- INIT_WORK(&ub->quiesce_work, ublk_quiesce_work_fn);
- INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
+ INIT_WORK(&ub->nosrv_work, ublk_nosrv_work);
ret = ublk_alloc_dev_number(ub, header->dev_id);
if (ret < 0)
@@ -2551,9 +2629,7 @@ static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
static int ublk_ctrl_stop_dev(struct ublk_device *ub)
{
ublk_stop_dev(ub);
- cancel_work_sync(&ub->stop_work);
- cancel_work_sync(&ub->quiesce_work);
-
+ cancel_work_sync(&ub->nosrv_work);
return 0;
}
@@ -2690,7 +2766,7 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
int i;
mutex_lock(&ub->mutex);
- if (!ublk_can_use_recovery(ub))
+ if (ublk_nosrv_should_stop_dev(ub))
goto out_unlock;
if (!ub->nr_queues_ready)
goto out_unlock;
@@ -2701,14 +2777,18 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
* and related io_uring ctx is freed so file struct of /dev/ublkcX is
* released.
*
+ * and one of the following holds
+ *
* (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
* (a)has quiesced request queue
* (b)has requeued every inflight rqs whose io_flags is ACTIVE
* (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
* (d)has completed/camceled all ioucmds owned by ther dying process
+ *
+ * (3) UBLK_S_DEV_FAIL_IO is set, which means the queue is not
+ * quiesced, but all I/O is being immediately errored
*/
- if (test_bit(UB_STATE_OPEN, &ub->state) ||
- ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
+ if (test_bit(UB_STATE_OPEN, &ub->state) || !ublk_dev_in_recoverable_state(ub)) {
ret = -EBUSY;
goto out_unlock;
}
@@ -2732,6 +2812,7 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ublksrv_pid = (int)header->data[0];
int ret = -EINVAL;
+ int i;
pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
@@ -2743,21 +2824,32 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
mutex_lock(&ub->mutex);
- if (!ublk_can_use_recovery(ub))
+ if (ublk_nosrv_should_stop_dev(ub))
goto out_unlock;
- if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
+ if (!ublk_dev_in_recoverable_state(ub)) {
ret = -EBUSY;
goto out_unlock;
}
ub->dev_info.ublksrv_pid = ublksrv_pid;
pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
__func__, ublksrv_pid, header->dev_id);
- blk_mq_unquiesce_queue(ub->ub_disk->queue);
- pr_devel("%s: queue unquiesced, dev id %d.\n",
- __func__, header->dev_id);
- blk_mq_kick_requeue_list(ub->ub_disk->queue);
- ub->dev_info.state = UBLK_S_DEV_LIVE;
+
+ if (ublk_nosrv_dev_should_queue_io(ub)) {
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
+ blk_mq_unquiesce_queue(ub->ub_disk->queue);
+ pr_devel("%s: queue unquiesced, dev id %d.\n",
+ __func__, header->dev_id);
+ blk_mq_kick_requeue_list(ub->ub_disk->queue);
+ } else {
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ ublk_get_queue(ub, i)->fail_io = false;
+ }
+ blk_mq_unquiesce_queue(ub->ub_disk->queue);
+ }
+
ret = 0;
out_unlock:
mutex_unlock(&ub->mutex);
@@ -2956,7 +3048,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
ret = ublk_ctrl_end_recovery(ub, cmd);
break;
default:
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
break;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 194417abc105..6a61ec35f426 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -13,7 +13,6 @@
#include <linux/string_helpers.h>
#include <linux/idr.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-virtio.h>
#include <linux/numa.h>
#include <linux/vmalloc.h>
#include <uapi/linux/virtio_ring.h>
@@ -471,18 +470,18 @@ static bool virtblk_prep_rq_batch(struct request *req)
return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
}
-static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
- struct request **rqlist)
+static void virtblk_add_req_batch(struct virtio_blk_vq *vq,
+ struct rq_list *rqlist)
{
+ struct request *req;
unsigned long flags;
- int err;
bool kick;
spin_lock_irqsave(&vq->lock, flags);
- while (!rq_list_empty(*rqlist)) {
- struct request *req = rq_list_pop(rqlist);
+ while ((req = rq_list_pop(rqlist))) {
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ int err;
err = virtblk_add_req(vq->vq, vbr);
if (err) {
@@ -495,37 +494,32 @@ static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
kick = virtqueue_kick_prepare(vq->vq);
spin_unlock_irqrestore(&vq->lock, flags);
- return kick;
+ if (kick)
+ virtqueue_notify(vq->vq);
}
-static void virtio_queue_rqs(struct request **rqlist)
+static void virtio_queue_rqs(struct rq_list *rqlist)
{
- struct request *req, *next, *prev = NULL;
- struct request *requeue_list = NULL;
-
- rq_list_for_each_safe(rqlist, req, next) {
- struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
- bool kick;
-
- if (!virtblk_prep_rq_batch(req)) {
- rq_list_move(rqlist, &requeue_list, req, prev);
- req = prev;
- if (!req)
- continue;
- }
+ struct rq_list submit_list = { };
+ struct rq_list requeue_list = { };
+ struct virtio_blk_vq *vq = NULL;
+ struct request *req;
+
+ while ((req = rq_list_pop(rqlist))) {
+ struct virtio_blk_vq *this_vq = get_virtio_blk_vq(req->mq_hctx);
- if (!next || req->mq_hctx != next->mq_hctx) {
- req->rq_next = NULL;
- kick = virtblk_add_req_batch(vq, rqlist);
- if (kick)
- virtqueue_notify(vq->vq);
+ if (vq && vq != this_vq)
+ virtblk_add_req_batch(vq, &submit_list);
+ vq = this_vq;
- *rqlist = next;
- prev = NULL;
- } else
- prev = req;
+ if (virtblk_prep_rq_batch(req))
+ rq_list_add_tail(&submit_list, req);
+ else
+ rq_list_add_tail(&requeue_list, req);
}
+ if (vq)
+ virtblk_add_req_batch(vq, &submit_list);
*rqlist = requeue_list;
}
@@ -784,7 +778,7 @@ static int virtblk_read_zoned_limits(struct virtio_blk *vblk,
wg, v);
return -ENODEV;
}
- lim->max_zone_append_sectors = v;
+ lim->max_hw_zone_append_sectors = v;
dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
return 0;
@@ -1111,9 +1105,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
lim.features |= BLK_FEAT_WRITE_CACHE;
else
lim.features &= ~BLK_FEAT_WRITE_CACHE;
- blk_mq_freeze_queue(disk->queue);
- i = queue_limits_commit_update(disk->queue, &lim);
- blk_mq_unfreeze_queue(disk->queue);
+ i = queue_limits_commit_update_frozen(disk->queue, &lim);
if (i)
return i;
return count;
@@ -1186,7 +1178,8 @@ static void virtblk_map_queues(struct blk_mq_tag_set *set)
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(&set->map[i]);
else
- blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
+ blk_mq_map_hw_queues(&set->map[i],
+ &vblk->vdev->dev, 0);
}
}
@@ -1486,7 +1479,6 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->tag_set.ops = &virtio_mq_ops;
vblk->tag_set.queue_depth = queue_depth;
vblk->tag_set.numa_node = NUMA_NO_NODE;
- vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
vblk->tag_set.cmd_size =
sizeof(struct virtblk_req) +
sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
@@ -1587,13 +1579,16 @@ static void virtblk_remove(struct virtio_device *vdev)
put_disk(vblk->disk);
}
-#ifdef CONFIG_PM_SLEEP
-static int virtblk_freeze(struct virtio_device *vdev)
+static int virtblk_freeze_priv(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
+ struct request_queue *q = vblk->disk->queue;
+ unsigned int memflags;
/* Ensure no requests in virtqueues before deleting vqs. */
- blk_mq_freeze_queue(vblk->disk->queue);
+ memflags = blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue_nowait(q);
+ blk_mq_unfreeze_queue(q, memflags);
/* Ensure we don't receive any more interrupts */
virtio_reset_device(vdev);
@@ -1607,7 +1602,7 @@ static int virtblk_freeze(struct virtio_device *vdev)
return 0;
}
-static int virtblk_restore(struct virtio_device *vdev)
+static int virtblk_restore_priv(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
int ret;
@@ -1617,12 +1612,33 @@ static int virtblk_restore(struct virtio_device *vdev)
return ret;
virtio_device_ready(vdev);
+ blk_mq_unquiesce_queue(vblk->disk->queue);
- blk_mq_unfreeze_queue(vblk->disk->queue);
return 0;
}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtblk_freeze(struct virtio_device *vdev)
+{
+ return virtblk_freeze_priv(vdev);
+}
+
+static int virtblk_restore(struct virtio_device *vdev)
+{
+ return virtblk_restore_priv(vdev);
+}
#endif
+static int virtblk_reset_prepare(struct virtio_device *vdev)
+{
+ return virtblk_freeze_priv(vdev);
+}
+
+static int virtblk_reset_done(struct virtio_device *vdev)
+{
+ return virtblk_restore_priv(vdev);
+}
+
static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -1658,6 +1674,8 @@ static struct virtio_driver virtio_blk = {
.freeze = virtblk_freeze,
.restore = virtblk_restore,
#endif
+ .reset_prepare = virtblk_reset_prepare,
+ .reset_done = virtblk_reset_done,
};
static int __init virtio_blk_init(void)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 838064593f62..a7c2b04ab943 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -544,7 +544,7 @@ static void print_stats(struct xen_blkif_ring *ring)
ring->st_rd_req, ring->st_wr_req,
ring->st_f_req, ring->st_ds_req,
ring->persistent_gnt_c, max_pgrants);
- ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
+ ring->st_print = jiffies + secs_to_jiffies(10);
ring->st_rd_req = 0;
ring->st_wr_req = 0;
ring->st_oo_req = 0;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 59ce113b882a..edcd08a9dcef 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1131,7 +1131,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
} else
info->tag_set.queue_depth = BLK_RING_SIZE(info);
info->tag_set.numa_node = NUMA_NO_NODE;
- info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
info->tag_set.cmd_size = sizeof(struct blkif_req);
info->tag_set.driver_data = info;
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 4b7219be1bb8..8c1c7f4211eb 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -354,7 +354,6 @@ static int __init z2_init(void)
tag_set.nr_maps = 1;
tag_set.queue_depth = 16;
tag_set.numa_node = NUMA_NO_NODE;
- tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ret = blk_mq_alloc_tag_set(&tag_set);
if (ret)
goto out_unregister_blkdev;
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index 6aea609b795c..402b7b175863 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -94,6 +94,7 @@ endchoice
config ZRAM_DEF_COMP
string
+ depends on ZRAM
default "lzo-rle" if ZRAM_DEF_COMP_LZORLE
default "lzo" if ZRAM_DEF_COMP_LZO
default "lz4" if ZRAM_DEF_COMP_LZ4
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index ad9c9bc3ccfc..9f5020b077c5 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -55,8 +55,8 @@ static size_t huge_class_size;
static const struct block_device_operations zram_devops;
static void zram_free_page(struct zram *zram, size_t index);
-static int zram_read_page(struct zram *zram, struct page *page, u32 index,
- struct bio *parent);
+static int zram_read_from_zspool(struct zram *zram, struct page *page,
+ u32 index);
static int zram_slot_trylock(struct zram *zram, u32 index)
{
@@ -112,17 +112,6 @@ static void zram_clear_flag(struct zram *zram, u32 index,
zram->table[index].flags &= ~BIT(flag);
}
-static inline void zram_set_element(struct zram *zram, u32 index,
- unsigned long element)
-{
- zram->table[index].element = element;
-}
-
-static unsigned long zram_get_element(struct zram *zram, u32 index)
-{
- return zram->table[index].element;
-}
-
static size_t zram_get_obj_size(struct zram *zram, u32 index)
{
return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
@@ -143,6 +132,27 @@ static inline bool zram_allocated(struct zram *zram, u32 index)
zram_test_flag(zram, index, ZRAM_WB);
}
+static inline void update_used_max(struct zram *zram, const unsigned long pages)
+{
+ unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
+
+ do {
+ if (cur_max >= pages)
+ return;
+ } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
+ &cur_max, pages));
+}
+
+static bool zram_can_store_page(struct zram *zram)
+{
+ unsigned long alloced_pages;
+
+ alloced_pages = zs_get_total_pages(zram->mem_pool);
+ update_used_max(zram, alloced_pages);
+
+ return !zram->limit_pages || alloced_pages <= zram->limit_pages;
+}
+
#if PAGE_SIZE != 4096
static inline bool is_partial_io(struct bio_vec *bvec)
{
@@ -178,23 +188,105 @@ static inline u32 zram_get_priority(struct zram *zram, u32 index)
static void zram_accessed(struct zram *zram, u32 index)
{
zram_clear_flag(zram, index, ZRAM_IDLE);
+ zram_clear_flag(zram, index, ZRAM_PP_SLOT);
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
zram->table[index].ac_time = ktime_get_boottime();
#endif
}
-static inline void update_used_max(struct zram *zram,
- const unsigned long pages)
+#if defined CONFIG_ZRAM_WRITEBACK || defined CONFIG_ZRAM_MULTI_COMP
+struct zram_pp_slot {
+ unsigned long index;
+ struct list_head entry;
+};
+
+/*
+ * A post-processing bucket is, essentially, a size class, this defines
+ * the range (in bytes) of pp-slots sizes in particular bucket.
+ */
+#define PP_BUCKET_SIZE_RANGE 64
+#define NUM_PP_BUCKETS ((PAGE_SIZE / PP_BUCKET_SIZE_RANGE) + 1)
+
+struct zram_pp_ctl {
+ struct list_head pp_buckets[NUM_PP_BUCKETS];
+};
+
+static struct zram_pp_ctl *init_pp_ctl(void)
{
- unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
+ struct zram_pp_ctl *ctl;
+ u32 idx;
- do {
- if (cur_max >= pages)
- return;
- } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
- &cur_max, pages));
+ ctl = kmalloc(sizeof(*ctl), GFP_KERNEL);
+ if (!ctl)
+ return NULL;
+
+ for (idx = 0; idx < NUM_PP_BUCKETS; idx++)
+ INIT_LIST_HEAD(&ctl->pp_buckets[idx]);
+ return ctl;
+}
+
+static void release_pp_slot(struct zram *zram, struct zram_pp_slot *pps)
+{
+ list_del_init(&pps->entry);
+
+ zram_slot_lock(zram, pps->index);
+ zram_clear_flag(zram, pps->index, ZRAM_PP_SLOT);
+ zram_slot_unlock(zram, pps->index);
+
+ kfree(pps);
}
+static void release_pp_ctl(struct zram *zram, struct zram_pp_ctl *ctl)
+{
+ u32 idx;
+
+ if (!ctl)
+ return;
+
+ for (idx = 0; idx < NUM_PP_BUCKETS; idx++) {
+ while (!list_empty(&ctl->pp_buckets[idx])) {
+ struct zram_pp_slot *pps;
+
+ pps = list_first_entry(&ctl->pp_buckets[idx],
+ struct zram_pp_slot,
+ entry);
+ release_pp_slot(zram, pps);
+ }
+ }
+
+ kfree(ctl);
+}
+
+static void place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl,
+ struct zram_pp_slot *pps)
+{
+ u32 idx;
+
+ idx = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE;
+ list_add(&pps->entry, &ctl->pp_buckets[idx]);
+
+ zram_set_flag(zram, pps->index, ZRAM_PP_SLOT);
+}
+
+static struct zram_pp_slot *select_pp_slot(struct zram_pp_ctl *ctl)
+{
+ struct zram_pp_slot *pps = NULL;
+ s32 idx = NUM_PP_BUCKETS - 1;
+
+ /* The higher the bucket id the more optimal slot post-processing is */
+ while (idx >= 0) {
+ pps = list_first_entry_or_null(&ctl->pp_buckets[idx],
+ struct zram_pp_slot,
+ entry);
+ if (pps)
+ break;
+
+ idx--;
+ }
+ return pps;
+}
+#endif
+
static inline void zram_fill_page(void *ptr, unsigned long len,
unsigned long value)
{
@@ -296,19 +388,28 @@ static void mark_idle(struct zram *zram, ktime_t cutoff)
for (index = 0; index < nr_pages; index++) {
/*
- * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
- * See the comment in writeback_store.
+ * Do not mark ZRAM_SAME slots as ZRAM_IDLE, because no
+ * post-processing (recompress, writeback) happens to the
+ * ZRAM_SAME slot.
+ *
+ * And ZRAM_WB slots simply cannot be ZRAM_IDLE.
*/
zram_slot_lock(zram, index);
- if (zram_allocated(zram, index) &&
- !zram_test_flag(zram, index, ZRAM_UNDER_WB)) {
+ if (!zram_allocated(zram, index) ||
+ zram_test_flag(zram, index, ZRAM_WB) ||
+ zram_test_flag(zram, index, ZRAM_SAME)) {
+ zram_slot_unlock(zram, index);
+ continue;
+ }
+
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
- is_idle = !cutoff || ktime_after(cutoff,
- zram->table[index].ac_time);
+ is_idle = !cutoff ||
+ ktime_after(cutoff, zram->table[index].ac_time);
#endif
- if (is_idle)
- zram_set_flag(zram, index, ZRAM_IDLE);
- }
+ if (is_idle)
+ zram_set_flag(zram, index, ZRAM_IDLE);
+ else
+ zram_clear_flag(zram, index, ZRAM_IDLE);
zram_slot_unlock(zram, index);
}
}
@@ -511,6 +612,12 @@ static ssize_t backing_dev_store(struct device *dev,
}
nr_pages = i_size_read(inode) >> PAGE_SHIFT;
+ /* Refuse to use zero sized device (also prevents self reference) */
+ if (!nr_pages) {
+ err = -EINVAL;
+ goto out;
+ }
+
bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
if (!bitmap) {
@@ -587,11 +694,57 @@ static void read_from_bdev_async(struct zram *zram, struct page *page,
#define IDLE_WRITEBACK (1<<1)
#define INCOMPRESSIBLE_WRITEBACK (1<<2)
+static int scan_slots_for_writeback(struct zram *zram, u32 mode,
+ unsigned long nr_pages,
+ unsigned long index,
+ struct zram_pp_ctl *ctl)
+{
+ struct zram_pp_slot *pps = NULL;
+
+ for (; nr_pages != 0; index++, nr_pages--) {
+ if (!pps)
+ pps = kmalloc(sizeof(*pps), GFP_KERNEL);
+ if (!pps)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pps->entry);
+
+ zram_slot_lock(zram, index);
+ if (!zram_allocated(zram, index))
+ goto next;
+
+ if (zram_test_flag(zram, index, ZRAM_WB) ||
+ zram_test_flag(zram, index, ZRAM_SAME))
+ goto next;
+
+ if (mode & IDLE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_IDLE))
+ goto next;
+ if (mode & HUGE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_HUGE))
+ goto next;
+ if (mode & INCOMPRESSIBLE_WRITEBACK &&
+ !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
+ goto next;
+
+ pps->index = index;
+ place_pp_slot(zram, ctl, pps);
+ pps = NULL;
+next:
+ zram_slot_unlock(zram, index);
+ }
+
+ kfree(pps);
+ return 0;
+}
+
static ssize_t writeback_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
+ struct zram_pp_ctl *ctl = NULL;
+ struct zram_pp_slot *pps;
unsigned long index = 0;
struct bio bio;
struct bio_vec bio_vec;
@@ -626,6 +779,12 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
+ /* Do not permit concurrent post-processing actions. */
+ if (atomic_xchg(&zram->pp_in_progress, 1)) {
+ up_read(&zram->init_lock);
+ return -EAGAIN;
+ }
+
if (!zram->backing_dev) {
ret = -ENODEV;
goto release_init_lock;
@@ -637,7 +796,15 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- for (; nr_pages != 0; index++, nr_pages--) {
+ ctl = init_pp_ctl();
+ if (!ctl) {
+ ret = -ENOMEM;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_writeback(zram, mode, nr_pages, index, ctl);
+
+ while ((pps = select_pp_slot(ctl))) {
spin_lock(&zram->wb_limit_lock);
if (zram->wb_limit_enable && !zram->bd_wb_limit) {
spin_unlock(&zram->wb_limit_lock);
@@ -654,40 +821,19 @@ static ssize_t writeback_store(struct device *dev,
}
}
+ index = pps->index;
zram_slot_lock(zram, index);
- if (!zram_allocated(zram, index))
- goto next;
-
- if (zram_test_flag(zram, index, ZRAM_WB) ||
- zram_test_flag(zram, index, ZRAM_SAME) ||
- zram_test_flag(zram, index, ZRAM_UNDER_WB))
- goto next;
-
- if (mode & IDLE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_IDLE))
- goto next;
- if (mode & HUGE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_HUGE))
- goto next;
- if (mode & INCOMPRESSIBLE_WRITEBACK &&
- !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
- goto next;
-
/*
- * Clearing ZRAM_UNDER_WB is duty of caller.
- * IOW, zram_free_page never clear it.
+ * scan_slots() sets ZRAM_PP_SLOT and relases slot lock, so
+ * slots can change in the meantime. If slots are accessed or
+ * freed they lose ZRAM_PP_SLOT flag and hence we don't
+ * post-process them.
*/
- zram_set_flag(zram, index, ZRAM_UNDER_WB);
- /* Need for hugepage writeback racing */
- zram_set_flag(zram, index, ZRAM_IDLE);
+ if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
+ goto next;
+ if (zram_read_from_zspool(zram, page, index))
+ goto next;
zram_slot_unlock(zram, index);
- if (zram_read_page(zram, page, index, NULL)) {
- zram_slot_lock(zram, index);
- zram_clear_flag(zram, index, ZRAM_UNDER_WB);
- zram_clear_flag(zram, index, ZRAM_IDLE);
- zram_slot_unlock(zram, index);
- continue;
- }
bio_init(&bio, zram->bdev, &bio_vec, 1,
REQ_OP_WRITE | REQ_SYNC);
@@ -700,10 +846,7 @@ static ssize_t writeback_store(struct device *dev,
*/
err = submit_bio_wait(&bio);
if (err) {
- zram_slot_lock(zram, index);
- zram_clear_flag(zram, index, ZRAM_UNDER_WB);
- zram_clear_flag(zram, index, ZRAM_IDLE);
- zram_slot_unlock(zram, index);
+ release_pp_slot(zram, pps);
/*
* BIO errors are not fatal, we continue and simply
* attempt to writeback the remaining objects (pages).
@@ -717,27 +860,21 @@ static ssize_t writeback_store(struct device *dev,
}
atomic64_inc(&zram->stats.bd_writes);
+ zram_slot_lock(zram, index);
/*
- * We released zram_slot_lock so need to check if the slot was
- * changed. If there is freeing for the slot, we can catch it
- * easily by zram_allocated.
- * A subtle case is the slot is freed/reallocated/marked as
- * ZRAM_IDLE again. To close the race, idle_store doesn't
- * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
- * Thus, we could close the race by checking ZRAM_IDLE bit.
+ * Same as above, we release slot lock during writeback so
+ * slot can change under us: slot_free() or slot_free() and
+ * reallocation (zram_write_page()). In both cases slot loses
+ * ZRAM_PP_SLOT flag. No concurrent post-processing can set
+ * ZRAM_PP_SLOT on such slots until current post-processing
+ * finishes.
*/
- zram_slot_lock(zram, index);
- if (!zram_allocated(zram, index) ||
- !zram_test_flag(zram, index, ZRAM_IDLE)) {
- zram_clear_flag(zram, index, ZRAM_UNDER_WB);
- zram_clear_flag(zram, index, ZRAM_IDLE);
+ if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
goto next;
- }
zram_free_page(zram, index);
- zram_clear_flag(zram, index, ZRAM_UNDER_WB);
zram_set_flag(zram, index, ZRAM_WB);
- zram_set_element(zram, index, blk_idx);
+ zram_set_handle(zram, index, blk_idx);
blk_idx = 0;
atomic64_inc(&zram->stats.pages_stored);
spin_lock(&zram->wb_limit_lock);
@@ -746,12 +883,17 @@ static ssize_t writeback_store(struct device *dev,
spin_unlock(&zram->wb_limit_lock);
next:
zram_slot_unlock(zram, index);
+ release_pp_slot(zram, pps);
+
+ cond_resched();
}
if (blk_idx)
free_block_bdev(zram, blk_idx);
__free_page(page);
release_init_lock:
+ release_pp_ctl(zram, ctl);
+ atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
return ret;
@@ -1299,12 +1441,16 @@ static void zram_meta_free(struct zram *zram, u64 disksize)
size_t num_pages = disksize >> PAGE_SHIFT;
size_t index;
+ if (!zram->table)
+ return;
+
/* Free all pages that are still in this zram device */
for (index = 0; index < num_pages; index++)
zram_free_page(zram, index);
zs_destroy_pool(zram->mem_pool);
vfree(zram->table);
+ zram->table = NULL;
}
static bool zram_meta_alloc(struct zram *zram, u64 disksize)
@@ -1319,6 +1465,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
zram->mem_pool = zs_create_pool(zram->disk->disk_name);
if (!zram->mem_pool) {
vfree(zram->table);
+ zram->table = NULL;
return false;
}
@@ -1342,22 +1489,20 @@ static void zram_free_page(struct zram *zram, size_t index)
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
zram->table[index].ac_time = 0;
#endif
- if (zram_test_flag(zram, index, ZRAM_IDLE))
- zram_clear_flag(zram, index, ZRAM_IDLE);
+
+ zram_clear_flag(zram, index, ZRAM_IDLE);
+ zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE);
+ zram_clear_flag(zram, index, ZRAM_PP_SLOT);
+ zram_set_priority(zram, index, 0);
if (zram_test_flag(zram, index, ZRAM_HUGE)) {
zram_clear_flag(zram, index, ZRAM_HUGE);
atomic64_dec(&zram->stats.huge_pages);
}
- if (zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
- zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE);
-
- zram_set_priority(zram, index, 0);
-
if (zram_test_flag(zram, index, ZRAM_WB)) {
zram_clear_flag(zram, index, ZRAM_WB);
- free_block_bdev(zram, zram_get_element(zram, index));
+ free_block_bdev(zram, zram_get_handle(zram, index));
goto out;
}
@@ -1378,65 +1523,80 @@ static void zram_free_page(struct zram *zram, size_t index)
zs_free(zram->mem_pool, handle);
atomic64_sub(zram_get_obj_size(zram, index),
- &zram->stats.compr_data_size);
+ &zram->stats.compr_data_size);
out:
atomic64_dec(&zram->stats.pages_stored);
zram_set_handle(zram, index, 0);
zram_set_obj_size(zram, index, 0);
- WARN_ON_ONCE(zram->table[index].flags &
- ~(1UL << ZRAM_UNDER_WB));
}
-/*
- * Reads (decompresses if needed) a page from zspool (zsmalloc).
- * Corresponding ZRAM slot should be locked.
- */
-static int zram_read_from_zspool(struct zram *zram, struct page *page,
+static int read_same_filled_page(struct zram *zram, struct page *page,
u32 index)
{
+ void *mem;
+
+ mem = kmap_local_page(page);
+ zram_fill_page(mem, PAGE_SIZE, zram_get_handle(zram, index));
+ kunmap_local(mem);
+ return 0;
+}
+
+static int read_incompressible_page(struct zram *zram, struct page *page,
+ u32 index)
+{
+ unsigned long handle;
+ void *src, *dst;
+
+ handle = zram_get_handle(zram, index);
+ src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
+ dst = kmap_local_page(page);
+ copy_page(dst, src);
+ kunmap_local(dst);
+ zs_unmap_object(zram->mem_pool, handle);
+
+ return 0;
+}
+
+static int read_compressed_page(struct zram *zram, struct page *page, u32 index)
+{
struct zcomp_strm *zstrm;
unsigned long handle;
unsigned int size;
void *src, *dst;
- u32 prio;
- int ret;
+ int ret, prio;
handle = zram_get_handle(zram, index);
- if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
- unsigned long value;
- void *mem;
-
- value = handle ? zram_get_element(zram, index) : 0;
- mem = kmap_local_page(page);
- zram_fill_page(mem, PAGE_SIZE, value);
- kunmap_local(mem);
- return 0;
- }
-
size = zram_get_obj_size(zram, index);
+ prio = zram_get_priority(zram, index);
- if (size != PAGE_SIZE) {
- prio = zram_get_priority(zram, index);
- zstrm = zcomp_stream_get(zram->comps[prio]);
- }
-
+ zstrm = zcomp_stream_get(zram->comps[prio]);
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
- if (size == PAGE_SIZE) {
- dst = kmap_local_page(page);
- copy_page(dst, src);
- kunmap_local(dst);
- ret = 0;
- } else {
- dst = kmap_local_page(page);
- ret = zcomp_decompress(zram->comps[prio], zstrm,
- src, size, dst);
- kunmap_local(dst);
- zcomp_stream_put(zram->comps[prio]);
- }
+ dst = kmap_local_page(page);
+ ret = zcomp_decompress(zram->comps[prio], zstrm, src, size, dst);
+ kunmap_local(dst);
zs_unmap_object(zram->mem_pool, handle);
+ zcomp_stream_put(zram->comps[prio]);
+
return ret;
}
+/*
+ * Reads (decompresses if needed) a page from zspool (zsmalloc).
+ * Corresponding ZRAM slot should be locked.
+ */
+static int zram_read_from_zspool(struct zram *zram, struct page *page,
+ u32 index)
+{
+ if (zram_test_flag(zram, index, ZRAM_SAME) ||
+ !zram_get_handle(zram, index))
+ return read_same_filled_page(zram, page, index);
+
+ if (!zram_test_flag(zram, index, ZRAM_HUGE))
+ return read_compressed_page(zram, page, index);
+ else
+ return read_incompressible_page(zram, page, index);
+}
+
static int zram_read_page(struct zram *zram, struct page *page, u32 index,
struct bio *parent)
{
@@ -1454,7 +1614,7 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index,
*/
zram_slot_unlock(zram, index);
- ret = read_from_bdev(zram, page, zram_get_element(zram, index),
+ ret = read_from_bdev(zram, page, zram_get_handle(zram, index),
parent);
}
@@ -1492,33 +1652,88 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
return zram_read_page(zram, bvec->bv_page, index, bio);
}
+static int write_same_filled_page(struct zram *zram, unsigned long fill,
+ u32 index)
+{
+ zram_slot_lock(zram, index);
+ zram_set_flag(zram, index, ZRAM_SAME);
+ zram_set_handle(zram, index, fill);
+ zram_slot_unlock(zram, index);
+
+ atomic64_inc(&zram->stats.same_pages);
+ atomic64_inc(&zram->stats.pages_stored);
+
+ return 0;
+}
+
+static int write_incompressible_page(struct zram *zram, struct page *page,
+ u32 index)
+{
+ unsigned long handle;
+ void *src, *dst;
+
+ /*
+ * This function is called from preemptible context so we don't need
+ * to do optimistic and fallback to pessimistic handle allocation,
+ * like we do for compressible pages.
+ */
+ handle = zs_malloc(zram->mem_pool, PAGE_SIZE,
+ GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE);
+ if (IS_ERR_VALUE(handle))
+ return PTR_ERR((void *)handle);
+
+ if (!zram_can_store_page(zram)) {
+ zs_free(zram->mem_pool, handle);
+ return -ENOMEM;
+ }
+
+ dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
+ src = kmap_local_page(page);
+ memcpy(dst, src, PAGE_SIZE);
+ kunmap_local(src);
+ zs_unmap_object(zram->mem_pool, handle);
+
+ zram_slot_lock(zram, index);
+ zram_set_flag(zram, index, ZRAM_HUGE);
+ zram_set_handle(zram, index, handle);
+ zram_set_obj_size(zram, index, PAGE_SIZE);
+ zram_slot_unlock(zram, index);
+
+ atomic64_add(PAGE_SIZE, &zram->stats.compr_data_size);
+ atomic64_inc(&zram->stats.huge_pages);
+ atomic64_inc(&zram->stats.huge_pages_since);
+ atomic64_inc(&zram->stats.pages_stored);
+
+ return 0;
+}
+
static int zram_write_page(struct zram *zram, struct page *page, u32 index)
{
int ret = 0;
- unsigned long alloced_pages;
unsigned long handle = -ENOMEM;
unsigned int comp_len = 0;
- void *src, *dst, *mem;
+ void *dst, *mem;
struct zcomp_strm *zstrm;
unsigned long element = 0;
- enum zram_pageflags flags = 0;
+ bool same_filled;
+
+ /* First, free memory allocated to this slot (if any) */
+ zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
+ zram_slot_unlock(zram, index);
mem = kmap_local_page(page);
- if (page_same_filled(mem, &element)) {
- kunmap_local(mem);
- /* Free memory associated with this sector now. */
- flags = ZRAM_SAME;
- atomic64_inc(&zram->stats.same_pages);
- goto out;
- }
+ same_filled = page_same_filled(mem, &element);
kunmap_local(mem);
+ if (same_filled)
+ return write_same_filled_page(zram, element, index);
compress_again:
zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
- src = kmap_local_page(page);
+ mem = kmap_local_page(page);
ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm,
- src, &comp_len);
- kunmap_local(src);
+ mem, &comp_len);
+ kunmap_local(mem);
if (unlikely(ret)) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
@@ -1527,8 +1742,11 @@ compress_again:
return ret;
}
- if (comp_len >= huge_class_size)
- comp_len = PAGE_SIZE;
+ if (comp_len >= huge_class_size) {
+ zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
+ return write_incompressible_page(zram, page, index);
+ }
+
/*
* handle allocation has 2 paths:
* a) fast path is executed with preemption disabled (for
@@ -1544,35 +1762,23 @@ compress_again:
*/
if (IS_ERR_VALUE(handle))
handle = zs_malloc(zram->mem_pool, comp_len,
- __GFP_KSWAPD_RECLAIM |
- __GFP_NOWARN |
- __GFP_HIGHMEM |
- __GFP_MOVABLE);
+ __GFP_KSWAPD_RECLAIM |
+ __GFP_NOWARN |
+ __GFP_HIGHMEM |
+ __GFP_MOVABLE);
if (IS_ERR_VALUE(handle)) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
atomic64_inc(&zram->stats.writestall);
handle = zs_malloc(zram->mem_pool, comp_len,
- GFP_NOIO | __GFP_HIGHMEM |
- __GFP_MOVABLE);
+ GFP_NOIO | __GFP_HIGHMEM |
+ __GFP_MOVABLE);
if (IS_ERR_VALUE(handle))
return PTR_ERR((void *)handle);
- if (comp_len != PAGE_SIZE)
- goto compress_again;
- /*
- * If the page is not compressible, you need to acquire the
- * lock and execute the code below. The zcomp_stream_get()
- * call is needed to disable the cpu hotplug and grab the
- * zstrm buffer back. It is necessary that the dereferencing
- * of the zstrm variable below occurs correctly.
- */
- zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
+ goto compress_again;
}
- alloced_pages = zs_get_total_pages(zram->mem_pool);
- update_used_max(zram, alloced_pages);
-
- if (zram->limit_pages && alloced_pages > zram->limit_pages) {
+ if (!zram_can_store_page(zram)) {
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
zs_free(zram->mem_pool, handle);
return -ENOMEM;
@@ -1580,41 +1786,19 @@ compress_again:
dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
- src = zstrm->buffer;
- if (comp_len == PAGE_SIZE)
- src = kmap_local_page(page);
- memcpy(dst, src, comp_len);
- if (comp_len == PAGE_SIZE)
- kunmap_local(src);
-
+ memcpy(dst, zstrm->buffer, comp_len);
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
zs_unmap_object(zram->mem_pool, handle);
- atomic64_add(comp_len, &zram->stats.compr_data_size);
-out:
- /*
- * Free memory associated with this sector
- * before overwriting unused sectors.
- */
- zram_slot_lock(zram, index);
- zram_free_page(zram, index);
- if (comp_len == PAGE_SIZE) {
- zram_set_flag(zram, index, ZRAM_HUGE);
- atomic64_inc(&zram->stats.huge_pages);
- atomic64_inc(&zram->stats.huge_pages_since);
- }
-
- if (flags) {
- zram_set_flag(zram, index, flags);
- zram_set_element(zram, index, element);
- } else {
- zram_set_handle(zram, index, handle);
- zram_set_obj_size(zram, index, comp_len);
- }
+ zram_slot_lock(zram, index);
+ zram_set_handle(zram, index, handle);
+ zram_set_obj_size(zram, index, comp_len);
zram_slot_unlock(zram, index);
/* Update stats */
atomic64_inc(&zram->stats.pages_stored);
+ atomic64_add(comp_len, &zram->stats.compr_data_size);
+
return ret;
}
@@ -1648,6 +1832,52 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
}
#ifdef CONFIG_ZRAM_MULTI_COMP
+#define RECOMPRESS_IDLE (1 << 0)
+#define RECOMPRESS_HUGE (1 << 1)
+
+static int scan_slots_for_recompress(struct zram *zram, u32 mode,
+ struct zram_pp_ctl *ctl)
+{
+ unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
+ struct zram_pp_slot *pps = NULL;
+ unsigned long index;
+
+ for (index = 0; index < nr_pages; index++) {
+ if (!pps)
+ pps = kmalloc(sizeof(*pps), GFP_KERNEL);
+ if (!pps)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pps->entry);
+
+ zram_slot_lock(zram, index);
+ if (!zram_allocated(zram, index))
+ goto next;
+
+ if (mode & RECOMPRESS_IDLE &&
+ !zram_test_flag(zram, index, ZRAM_IDLE))
+ goto next;
+
+ if (mode & RECOMPRESS_HUGE &&
+ !zram_test_flag(zram, index, ZRAM_HUGE))
+ goto next;
+
+ if (zram_test_flag(zram, index, ZRAM_WB) ||
+ zram_test_flag(zram, index, ZRAM_SAME) ||
+ zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
+ goto next;
+
+ pps->index = index;
+ place_pp_slot(zram, ctl, pps);
+ pps = NULL;
+next:
+ zram_slot_unlock(zram, index);
+ }
+
+ kfree(pps);
+ return 0;
+}
+
/*
* This function will decompress (unless it's ZRAM_HUGE) the page and then
* attempt to compress it using provided compression algorithm priority
@@ -1655,7 +1885,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
*
* Corresponding ZRAM slot should be locked.
*/
-static int zram_recompress(struct zram *zram, u32 index, struct page *page,
+static int recompress_slot(struct zram *zram, u32 index, struct page *page,
u64 *num_recomp_pages, u32 threshold, u32 prio,
u32 prio_max)
{
@@ -1685,6 +1915,13 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
if (ret)
return ret;
+ /*
+ * We touched this entry so mark it as non-IDLE. This makes sure that
+ * we don't preserve IDLE flag and don't incorrectly pick this entry
+ * for different post-processing type (e.g. writeback).
+ */
+ zram_clear_flag(zram, index, ZRAM_IDLE);
+
class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old);
/*
* Iterate the secondary comp algorithms list (in order of priority)
@@ -1798,20 +2035,17 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
return 0;
}
-#define RECOMPRESS_IDLE (1 << 0)
-#define RECOMPRESS_HUGE (1 << 1)
-
static ssize_t recompress_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS;
struct zram *zram = dev_to_zram(dev);
- unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
char *args, *param, *val, *algo = NULL;
u64 num_recomp_pages = ULLONG_MAX;
+ struct zram_pp_ctl *ctl = NULL;
+ struct zram_pp_slot *pps;
u32 mode = 0, threshold = 0;
- unsigned long index;
struct page *page;
ssize_t ret;
@@ -1881,6 +2115,12 @@ static ssize_t recompress_store(struct device *dev,
goto release_init_lock;
}
+ /* Do not permit concurrent post-processing actions. */
+ if (atomic_xchg(&zram->pp_in_progress, 1)) {
+ up_read(&zram->init_lock);
+ return -EAGAIN;
+ }
+
if (algo) {
bool found = false;
@@ -1907,36 +2147,32 @@ static ssize_t recompress_store(struct device *dev,
goto release_init_lock;
}
+ ctl = init_pp_ctl();
+ if (!ctl) {
+ ret = -ENOMEM;
+ goto release_init_lock;
+ }
+
+ scan_slots_for_recompress(zram, mode, ctl);
+
ret = len;
- for (index = 0; index < nr_pages; index++) {
+ while ((pps = select_pp_slot(ctl))) {
int err = 0;
if (!num_recomp_pages)
break;
- zram_slot_lock(zram, index);
-
- if (!zram_allocated(zram, index))
- goto next;
-
- if (mode & RECOMPRESS_IDLE &&
- !zram_test_flag(zram, index, ZRAM_IDLE))
- goto next;
-
- if (mode & RECOMPRESS_HUGE &&
- !zram_test_flag(zram, index, ZRAM_HUGE))
+ zram_slot_lock(zram, pps->index);
+ if (!zram_test_flag(zram, pps->index, ZRAM_PP_SLOT))
goto next;
- if (zram_test_flag(zram, index, ZRAM_WB) ||
- zram_test_flag(zram, index, ZRAM_UNDER_WB) ||
- zram_test_flag(zram, index, ZRAM_SAME) ||
- zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
- goto next;
-
- err = zram_recompress(zram, index, page, &num_recomp_pages,
- threshold, prio, prio_max);
+ err = recompress_slot(zram, pps->index, page,
+ &num_recomp_pages, threshold,
+ prio, prio_max);
next:
- zram_slot_unlock(zram, index);
+ zram_slot_unlock(zram, pps->index);
+ release_pp_slot(zram, pps);
+
if (err) {
ret = err;
break;
@@ -1948,6 +2184,8 @@ next:
__free_page(page);
release_init_lock:
+ release_pp_ctl(zram, ctl);
+ atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
return ret;
}
@@ -2105,7 +2343,7 @@ static void zram_destroy_comps(struct zram *zram)
{
u32 prio;
- for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) {
+ for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
struct zcomp *comp = zram->comps[prio];
zram->comps[prio] = NULL;
@@ -2131,11 +2369,6 @@ static void zram_reset_device(struct zram *zram)
zram->limit_pages = 0;
- if (!init_done(zram)) {
- up_write(&zram->init_lock);
- return;
- }
-
set_capacity_and_notify(zram->disk, 0);
part_stat_set_all(zram->disk->part0, 0);
@@ -2144,6 +2377,7 @@ static void zram_reset_device(struct zram *zram)
zram->disksize = 0;
zram_destroy_comps(zram);
memset(&zram->stats, 0, sizeof(zram->stats));
+ atomic_set(&zram->pp_in_progress, 0);
reset_bdev(zram);
comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
@@ -2176,7 +2410,7 @@ static ssize_t disksize_store(struct device *dev,
goto out_unlock;
}
- for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) {
+ for (prio = ZRAM_PRIMARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
if (!zram->comp_algs[prio])
continue;
@@ -2381,6 +2615,9 @@ static int zram_add(void)
zram->disk->fops = &zram_devops;
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
+ atomic_set(&zram->pp_in_progress, 0);
+ zram_comp_params_reset(zram);
+ comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
/* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);
@@ -2388,9 +2625,6 @@ static int zram_add(void)
if (ret)
goto out_cleanup_disk;
- zram_comp_params_reset(zram);
- comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
-
zram_debugfs_register(zram);
pr_info("Added device: %s\n", zram->disk->disk_name);
return device_id;
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index cfc8c059db63..db78d7c01b9a 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -47,7 +47,7 @@
enum zram_pageflags {
ZRAM_SAME = ZRAM_FLAG_SHIFT, /* Page consists the same element */
ZRAM_WB, /* page is stored on backing_device */
- ZRAM_UNDER_WB, /* page is under writeback */
+ ZRAM_PP_SLOT, /* Selected for post-processing */
ZRAM_HUGE, /* Incompressible page */
ZRAM_IDLE, /* not accessed page since last idle marking */
ZRAM_INCOMPRESSIBLE, /* none of the algorithms could compress it */
@@ -62,10 +62,7 @@ enum zram_pageflags {
/* Allocated for each disk page */
struct zram_table_entry {
- union {
- unsigned long handle;
- unsigned long element;
- };
+ unsigned long handle;
unsigned int flags;
spinlock_t lock;
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
@@ -139,5 +136,6 @@ struct zram {
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
struct dentry *debugfs_dir;
#endif
+ atomic_t pp_in_progress;
};
#endif
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 18767b54df35..4ab32abf0f48 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -336,7 +336,7 @@ config BT_HCIBFUSB
config BT_HCIDTL1
tristate "HCI DTL1 (PC Card) driver"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Bluetooth HCI DTL1 (PC Card) driver.
This driver provides support for Bluetooth PCMCIA devices with
@@ -349,7 +349,7 @@ config BT_HCIDTL1
config BT_HCIBT3C
tristate "HCI BT3C (PC Card) driver"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
select FW_LOADER
help
Bluetooth HCI BT3C (PC Card) driver.
@@ -363,7 +363,7 @@ config BT_HCIBT3C
config BT_HCIBLUECARD
tristate "HCI BlueCard (PC Card) driver"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Bluetooth HCI BlueCard (PC Card) driver.
This driver provides support for Bluetooth PCMCIA devices with
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index eef00467905e..0a60660fc8ce 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -541,11 +541,10 @@ static const struct bcm_subver_table bcm_usb_subver_table[] = {
static const char *btbcm_get_board_name(struct device *dev)
{
#ifdef CONFIG_OF
- struct device_node *root;
+ struct device_node *root __free(device_node) = of_find_node_by_path("/");
char *board_type;
const char *tmp;
- root = of_find_node_by_path("/");
if (!root)
return NULL;
@@ -554,8 +553,10 @@ static const char *btbcm_get_board_name(struct device *dev)
/* get rid of any '/' in the compatible string */
board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
+ if (!board_type)
+ return NULL;
+
strreplace(board_type, '/', '-');
- of_node_put(root);
return board_type;
#else
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 438b92967bc3..d2540b28bc7a 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
#include <linux/unaligned.h>
@@ -506,13 +507,13 @@ int btintel_version_info_tlv(struct hci_dev *hdev,
bt_dev_info(hdev, "Device revision is %u", version->dev_rev_id);
bt_dev_info(hdev, "Secure boot is %s",
- version->secure_boot ? "enabled" : "disabled");
+ str_enabled_disabled(version->secure_boot));
bt_dev_info(hdev, "OTP lock is %s",
- version->otp_lock ? "enabled" : "disabled");
+ str_enabled_disabled(version->otp_lock));
bt_dev_info(hdev, "API lock is %s",
- version->api_lock ? "enabled" : "disabled");
+ str_enabled_disabled(version->api_lock));
bt_dev_info(hdev, "Debug lock is %s",
- version->debug_lock ? "enabled" : "disabled");
+ str_enabled_disabled(version->debug_lock));
bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
version->min_fw_build_nn, version->min_fw_build_cw,
2000 + version->min_fw_build_yy);
@@ -927,16 +928,16 @@ int btintel_read_boot_params(struct hci_dev *hdev,
le16_to_cpu(params->dev_revid));
bt_dev_info(hdev, "Secure boot is %s",
- params->secure_boot ? "enabled" : "disabled");
+ str_enabled_disabled(params->secure_boot));
bt_dev_info(hdev, "OTP lock is %s",
- params->otp_lock ? "enabled" : "disabled");
+ str_enabled_disabled(params->otp_lock));
bt_dev_info(hdev, "API lock is %s",
- params->api_lock ? "enabled" : "disabled");
+ str_enabled_disabled(params->api_lock));
bt_dev_info(hdev, "Debug lock is %s",
- params->debug_lock ? "enabled" : "disabled");
+ str_enabled_disabled(params->debug_lock));
bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
params->min_fw_build_nn, params->min_fw_build_cw,
@@ -1040,7 +1041,7 @@ static int btintel_download_firmware_payload(struct hci_dev *hdev,
* as needed.
*
* Send set of commands with 4 byte alignment from the
- * firmware data buffer as a single Data fragement.
+ * firmware data buffer as a single Data fragment.
*/
if (!(frag_len % 4)) {
err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
@@ -1252,6 +1253,12 @@ static void btintel_reset_to_bootloader(struct hci_dev *hdev)
struct intel_reset params;
struct sk_buff *skb;
+ /* PCIe transport uses shared hardware reset mechanism for recovery
+ * which gets triggered in pcie *setup* function on error.
+ */
+ if (hdev->bus == HCI_PCI)
+ return;
+
/* Send Intel Reset command. This will result in
* re-enumeration of BT controller.
*
@@ -1267,6 +1274,7 @@ static void btintel_reset_to_bootloader(struct hci_dev *hdev)
* boot_param: Boot address
*
*/
+
params.reset_type = 0x01;
params.patch_enable = 0x01;
params.ddc_reload = 0x01;
@@ -1841,6 +1849,37 @@ static int btintel_boot_wait(struct hci_dev *hdev, ktime_t calltime, int msec)
return 0;
}
+static int btintel_boot_wait_d0(struct hci_dev *hdev, ktime_t calltime,
+ int msec)
+{
+ ktime_t delta, rettime;
+ unsigned long long duration;
+ int err;
+
+ bt_dev_info(hdev, "Waiting for device transition to d0");
+
+ err = btintel_wait_on_flag_timeout(hdev, INTEL_WAIT_FOR_D0,
+ TASK_INTERRUPTIBLE,
+ msecs_to_jiffies(msec));
+ if (err == -EINTR) {
+ bt_dev_err(hdev, "Device d0 move interrupted");
+ return -EINTR;
+ }
+
+ if (err) {
+ bt_dev_err(hdev, "Device d0 move timeout");
+ return -ETIMEDOUT;
+ }
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long)ktime_to_ns(delta) >> 10;
+
+ bt_dev_info(hdev, "Device moved to D0 in %llu usecs", duration);
+
+ return 0;
+}
+
static int btintel_boot(struct hci_dev *hdev, u32 boot_addr)
{
ktime_t calltime;
@@ -1849,6 +1888,7 @@ static int btintel_boot(struct hci_dev *hdev, u32 boot_addr)
calltime = ktime_get();
btintel_set_flag(hdev, INTEL_BOOTING);
+ btintel_set_flag(hdev, INTEL_WAIT_FOR_D0);
err = btintel_send_intel_reset(hdev, boot_addr);
if (err) {
@@ -1861,13 +1901,28 @@ static int btintel_boot(struct hci_dev *hdev, u32 boot_addr)
* is done by the operational firmware sending bootup notification.
*
* Booting into operational firmware should not take longer than
- * 1 second. However if that happens, then just fail the setup
+ * 5 second. However if that happens, then just fail the setup
* since something went wrong.
*/
- err = btintel_boot_wait(hdev, calltime, 1000);
- if (err == -ETIMEDOUT)
+ err = btintel_boot_wait(hdev, calltime, 5000);
+ if (err == -ETIMEDOUT) {
btintel_reset_to_bootloader(hdev);
+ goto exit_error;
+ }
+
+ if (hdev->bus == HCI_PCI) {
+ /* In case of PCIe, after receiving bootup event, driver performs
+ * D0 entry by writing 0 to sleep control register (check
+ * btintel_pcie_recv_event())
+ * Firmware acks with alive interrupt indicating host is full ready to
+ * perform BT operation. Lets wait here till INTEL_WAIT_FOR_D0
+ * bit is cleared.
+ */
+ calltime = ktime_get();
+ err = btintel_boot_wait_d0(hdev, calltime, 2000);
+ }
+exit_error:
return err;
}
@@ -2693,20 +2748,32 @@ static int btintel_set_dsbr(struct hci_dev *hdev, struct intel_version_tlv *ver)
struct btintel_dsbr_cmd cmd;
struct sk_buff *skb;
+ u32 dsbr, cnvi;
u8 status;
- u32 dsbr;
- bool apply_dsbr;
int err;
- /* DSBR command needs to be sent for BlazarI + B0 step product after
- * downloading IML image.
+ cnvi = ver->cnvi_top & 0xfff;
+ /* DSBR command needs to be sent for,
+ * 1. BlazarI or BlazarIW + B0 step product in IML image.
+ * 2. Gale Peak2 or BlazarU in OP image.
*/
- apply_dsbr = (ver->img_type == BTINTEL_IMG_IML &&
- ((ver->cnvi_top & 0xfff) == BTINTEL_CNVI_BLAZARI) &&
- INTEL_CNVX_TOP_STEP(ver->cnvi_top) == 0x01);
- if (!apply_dsbr)
+ switch (cnvi) {
+ case BTINTEL_CNVI_BLAZARI:
+ case BTINTEL_CNVI_BLAZARIW:
+ if (ver->img_type == BTINTEL_IMG_IML &&
+ INTEL_CNVX_TOP_STEP(ver->cnvi_top) == 0x01)
+ break;
return 0;
+ case BTINTEL_CNVI_GAP:
+ case BTINTEL_CNVI_BLAZARU:
+ if (ver->img_type == BTINTEL_IMG_OP &&
+ hdev->bus == HCI_USB)
+ break;
+ return 0;
+ default:
+ return 0;
+ }
dsbr = 0;
err = btintel_uefi_get_dsbr(&dsbr);
@@ -2749,6 +2816,13 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
*/
boot_param = 0x00000000;
+ /* In case of PCIe, this function might get called multiple times with
+ * same hdev instance if there is any error on firmware download.
+ * Need to clear stale bits of previous firmware download attempt.
+ */
+ for (int i = 0; i < __INTEL_NUM_FLAGS; i++)
+ btintel_clear_flag(hdev, i);
+
btintel_set_flag(hdev, INTEL_BOOTLOADER);
err = btintel_prepare_fw_download_tlv(hdev, ver, &boot_param);
@@ -2835,7 +2909,7 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x12: /* ThP */
case 0x13: /* HrP */
case 0x14: /* CcP */
- /* All Intel new genration controllers support the Microsoft vendor
+ /* All Intel new generation controllers support the Microsoft vendor
* extension are using 0xFC1E for VsMsftOpCode.
*/
case 0x17:
@@ -3273,7 +3347,7 @@ int btintel_configure_setup(struct hci_dev *hdev, const char *driver_name)
}
EXPORT_SYMBOL_GPL(btintel_configure_setup);
-static int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
+int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
{
struct intel_tlv *tlv = (void *)&skb->data[5];
@@ -3288,13 +3362,12 @@ static int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
case INTEL_TLV_TEST_EXCEPTION:
/* Generate devcoredump from exception */
if (!hci_devcd_init(hdev, skb->len)) {
- hci_devcd_append(hdev, skb);
+ hci_devcd_append(hdev, skb_clone(skb, GFP_ATOMIC));
hci_devcd_complete(hdev);
} else {
bt_dev_err(hdev, "Failed to generate devcoredump");
- kfree_skb(skb);
}
- return 0;
+ break;
default:
bt_dev_err(hdev, "Invalid exception type %02X", tlv->val[0]);
}
@@ -3302,6 +3375,7 @@ static int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
recv_frame:
return hci_recv_frame(hdev, skb);
}
+EXPORT_SYMBOL_GPL(btintel_diagnostics);
int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
{
@@ -3321,7 +3395,8 @@ int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
* indicating that the bootup completed.
*/
btintel_bootup(hdev, ptr, len);
- break;
+ kfree_skb(skb);
+ return 0;
case 0x06:
/* When the firmware loading completes the
* device sends out a vendor specific event
@@ -3329,7 +3404,8 @@ int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
* loading.
*/
btintel_secure_send_result(hdev, ptr, len);
- break;
+ kfree_skb(skb);
+ return 0;
}
}
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index aa70e4c27416..fa43eb137821 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -53,6 +53,9 @@ struct intel_tlv {
} __packed;
#define BTINTEL_CNVI_BLAZARI 0x900
+#define BTINTEL_CNVI_BLAZARIW 0x901
+#define BTINTEL_CNVI_GAP 0x910
+#define BTINTEL_CNVI_BLAZARU 0x930
#define BTINTEL_IMG_BOOTLOADER 0x01 /* Bootloader image */
#define BTINTEL_IMG_IML 0x02 /* Intermediate image */
@@ -178,6 +181,7 @@ enum {
INTEL_ROM_LEGACY,
INTEL_ROM_LEGACY_NO_WBS_SUPPORT,
INTEL_ACPI_RESET_ACTIVE,
+ INTEL_WAIT_FOR_D0,
__INTEL_NUM_FLAGS,
};
@@ -249,6 +253,7 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
int btintel_shutdown_combined(struct hci_dev *hdev);
void btintel_hw_error(struct hci_dev *hdev, u8 code);
void btintel_print_fseq_info(struct hci_dev *hdev);
+int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@@ -382,4 +387,9 @@ static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
static inline void btintel_print_fseq_info(struct hci_dev *hdev)
{
}
+
+static inline int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ return -EOPNOTSUPP;
+}
#endif
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 5252125b003f..2b79952f3628 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -48,6 +48,17 @@ MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
#define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004
#define BTINTEL_PCIE_HCI_ISO_PKT 0x00000005
+/* Alive interrupt context */
+enum {
+ BTINTEL_PCIE_ROM,
+ BTINTEL_PCIE_FW_DL,
+ BTINTEL_PCIE_HCI_RESET,
+ BTINTEL_PCIE_INTEL_HCI_RESET1,
+ BTINTEL_PCIE_INTEL_HCI_RESET2,
+ BTINTEL_PCIE_D0,
+ BTINTEL_PCIE_D3
+};
+
static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia,
u16 queue_num)
{
@@ -64,24 +75,6 @@ static inline void ipc_print_urbd1(struct hci_dev *hdev, struct urbd1 *urbd1,
index, urbd1->frbd_tag, urbd1->status, urbd1->fixed);
}
-static int btintel_pcie_poll_bit(struct btintel_pcie_data *data, u32 offset,
- u32 bits, u32 mask, int timeout_us)
-{
- int t = 0;
- u32 reg;
-
- do {
- reg = btintel_pcie_rd_reg32(data, offset);
-
- if ((reg & mask) == (bits & mask))
- return t;
- udelay(POLL_INTERVAL_US);
- t += POLL_INTERVAL_US;
- } while (t < timeout_us);
-
- return -ETIMEDOUT;
-}
-
static struct btintel_pcie_data *btintel_pcie_get_data(struct msix_entry *entry)
{
u8 queue = entry->entry;
@@ -237,10 +230,47 @@ static void btintel_pcie_reset_ia(struct btintel_pcie_data *data)
memset(data->ia.cr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
}
-static void btintel_pcie_reset_bt(struct btintel_pcie_data *data)
+static int btintel_pcie_reset_bt(struct btintel_pcie_data *data)
{
- btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
- BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
+ u32 reg;
+ int retry = 3;
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+
+ reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
+ reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON;
+
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
+
+ do {
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS)
+ break;
+ usleep_range(10000, 12000);
+
+ } while (--retry > 0);
+ usleep_range(10000, 12000);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+
+ reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
+ reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET;
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
+ usleep_range(10000, 12000);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ bt_dev_dbg(data->hdev, "csr register after reset: 0x%8.8x", reg);
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
+
+ /* If shared hardware reset is success then boot stage register shall be
+ * set to 0
+ */
+ return reg == 0 ? 0 : -ENODEV;
}
/* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
@@ -252,6 +282,7 @@ static void btintel_pcie_reset_bt(struct btintel_pcie_data *data)
static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
{
int err;
+ u32 reg;
data->gp0_received = false;
@@ -267,22 +298,17 @@ static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
data->boot_stage_cache = 0x0;
/* Set MAC_INIT bit to start primary bootloader */
- btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
+ reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
- btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
- BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
-
- /* Wait until MAC_ACCESS is granted */
- err = btintel_pcie_poll_bit(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
- BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS,
- BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS,
- BTINTEL_DEFAULT_MAC_ACCESS_TIMEOUT_US);
- if (err < 0)
- return -ENODEV;
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
/* MAC is ready. Enable BT FUNC */
btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
- BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
@@ -290,8 +316,9 @@ static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
/* wait for interrupt from the device after booting up to primary
* bootloader.
*/
+ data->alive_intr_ctxt = BTINTEL_PCIE_ROM;
err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
- msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT));
+ msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
if (!err)
return -ETIME;
@@ -302,12 +329,77 @@ static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
return 0;
}
+/* BIT(0) - ROM, BIT(1) - IML and BIT(3) - OP
+ * Sometimes during firmware image switching from ROM to IML or IML to OP image,
+ * the previous image bit is not cleared by firmware when alive interrupt is
+ * received. Driver needs to take care of these sticky bits when deciding the
+ * current image running on controller.
+ * Ex: 0x10 and 0x11 - both represents that controller is running IML
+ */
+static inline bool btintel_pcie_in_rom(struct btintel_pcie_data *data)
+{
+ return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM &&
+ !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML) &&
+ !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW);
+}
+
+static inline bool btintel_pcie_in_op(struct btintel_pcie_data *data)
+{
+ return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW;
+}
+
+static inline bool btintel_pcie_in_iml(struct btintel_pcie_data *data)
+{
+ return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML &&
+ !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW);
+}
+
+static inline bool btintel_pcie_in_d3(struct btintel_pcie_data *data)
+{
+ return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY;
+}
+
+static inline bool btintel_pcie_in_d0(struct btintel_pcie_data *data)
+{
+ return !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY);
+}
+
+static void btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data *data,
+ u32 dxstate)
+{
+ bt_dev_dbg(data->hdev, "writing sleep_ctl_reg: 0x%8.8x", dxstate);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG, dxstate);
+}
+
+static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
+{
+ switch (alive_intr_ctxt) {
+ case BTINTEL_PCIE_ROM:
+ return "rom";
+ case BTINTEL_PCIE_FW_DL:
+ return "fw_dl";
+ case BTINTEL_PCIE_D0:
+ return "d0";
+ case BTINTEL_PCIE_D3:
+ return "d3";
+ case BTINTEL_PCIE_HCI_RESET:
+ return "hci_reset";
+ case BTINTEL_PCIE_INTEL_HCI_RESET1:
+ return "intel_reset1";
+ case BTINTEL_PCIE_INTEL_HCI_RESET2:
+ return "intel_reset2";
+ default:
+ return "unknown";
+ }
+}
+
/* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
* BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
*/
static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
{
- u32 reg;
+ bool submit_rx, signal_waitq;
+ u32 reg, old_ctxt;
/* This interrupt is for three different causes and it is not easy to
* know what causes the interrupt. So, it compares each register value
@@ -317,20 +409,87 @@ static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
if (reg != data->boot_stage_cache)
data->boot_stage_cache = reg;
+ bt_dev_dbg(data->hdev, "Alive context: %s old_boot_stage: 0x%8.8x new_boot_stage: 0x%8.8x",
+ btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt),
+ data->boot_stage_cache, reg);
reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IMG_RESPONSE_REG);
if (reg != data->img_resp_cache)
data->img_resp_cache = reg;
data->gp0_received = true;
- /* If the boot stage is OP or IML, reset IA and start RX again */
- if (data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW ||
- data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML) {
+ old_ctxt = data->alive_intr_ctxt;
+ submit_rx = false;
+ signal_waitq = false;
+
+ switch (data->alive_intr_ctxt) {
+ case BTINTEL_PCIE_ROM:
+ data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
+ signal_waitq = true;
+ break;
+ case BTINTEL_PCIE_FW_DL:
+ /* Error case is already handled. Ideally control shall not
+ * reach here
+ */
+ break;
+ case BTINTEL_PCIE_INTEL_HCI_RESET1:
+ if (btintel_pcie_in_op(data)) {
+ submit_rx = true;
+ break;
+ }
+
+ if (btintel_pcie_in_iml(data)) {
+ submit_rx = true;
+ data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
+ break;
+ }
+ break;
+ case BTINTEL_PCIE_INTEL_HCI_RESET2:
+ if (btintel_test_and_clear_flag(data->hdev, INTEL_WAIT_FOR_D0)) {
+ btintel_wake_up_flag(data->hdev, INTEL_WAIT_FOR_D0);
+ data->alive_intr_ctxt = BTINTEL_PCIE_D0;
+ }
+ break;
+ case BTINTEL_PCIE_D0:
+ if (btintel_pcie_in_d3(data)) {
+ data->alive_intr_ctxt = BTINTEL_PCIE_D3;
+ signal_waitq = true;
+ break;
+ }
+ break;
+ case BTINTEL_PCIE_D3:
+ if (btintel_pcie_in_d0(data)) {
+ data->alive_intr_ctxt = BTINTEL_PCIE_D0;
+ submit_rx = true;
+ signal_waitq = true;
+ break;
+ }
+ break;
+ case BTINTEL_PCIE_HCI_RESET:
+ data->alive_intr_ctxt = BTINTEL_PCIE_D0;
+ submit_rx = true;
+ signal_waitq = true;
+ break;
+ default:
+ bt_dev_err(data->hdev, "Unknown state: 0x%2.2x",
+ data->alive_intr_ctxt);
+ break;
+ }
+
+ if (submit_rx) {
btintel_pcie_reset_ia(data);
btintel_pcie_start_rx(data);
}
- wake_up(&data->gp0_wait_q);
+ if (signal_waitq) {
+ bt_dev_dbg(data->hdev, "wake up gp0 wait_q");
+ wake_up(&data->gp0_wait_q);
+ }
+
+ if (old_ctxt != data->alive_intr_ctxt)
+ bt_dev_dbg(data->hdev, "alive context changed: %s -> %s",
+ btintel_pcie_alivectxt_state2str(old_ctxt),
+ btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
}
/* This function handles the MSX-X interrupt for rx queue 0 which is for TX
@@ -364,6 +523,83 @@ static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
}
}
+static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_event_hdr *hdr = (void *)skb->data;
+ const char diagnostics_hdr[] = { 0x87, 0x80, 0x03 };
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+
+ if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
+ hdr->plen > 0) {
+ const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
+ unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
+
+ if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
+ switch (skb->data[2]) {
+ case 0x02:
+ /* When switching to the operational firmware
+ * the device sends a vendor specific event
+ * indicating that the bootup completed.
+ */
+ btintel_bootup(hdev, ptr, len);
+
+ /* If bootup event is from operational image,
+ * driver needs to write sleep control register to
+ * move into D0 state
+ */
+ if (btintel_pcie_in_op(data)) {
+ btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
+ data->alive_intr_ctxt = BTINTEL_PCIE_INTEL_HCI_RESET2;
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (btintel_pcie_in_iml(data)) {
+ /* In case of IML, there is no concept
+ * of D0 transition. Just mimic as if
+ * IML moved to D0 by clearing INTEL_WAIT_FOR_D0
+ * bit and waking up the task waiting on
+ * INTEL_WAIT_FOR_D0. This is required
+ * as intel_boot() is common function for
+ * both IML and OP image loading.
+ */
+ if (btintel_test_and_clear_flag(data->hdev,
+ INTEL_WAIT_FOR_D0))
+ btintel_wake_up_flag(data->hdev,
+ INTEL_WAIT_FOR_D0);
+ }
+ kfree_skb(skb);
+ return 0;
+ case 0x06:
+ /* When the firmware loading completes the
+ * device sends out a vendor specific event
+ * indicating the result of the firmware
+ * loading.
+ */
+ btintel_secure_send_result(hdev, ptr, len);
+ kfree_skb(skb);
+ return 0;
+ }
+ }
+
+ /* Handle all diagnostics events separately. May still call
+ * hci_recv_frame.
+ */
+ if (len >= sizeof(diagnostics_hdr) &&
+ memcmp(&skb->data[2], diagnostics_hdr,
+ sizeof(diagnostics_hdr)) == 0) {
+ return btintel_diagnostics(hdev, skb);
+ }
+
+ /* This is a debug event that comes from IML and OP image when it
+ * starts execution. There is no need pass this event to stack.
+ */
+ if (skb->data[2] == 0x97)
+ return 0;
+ }
+
+ return hci_recv_frame(hdev, skb);
+}
/* Process the received rx data
* It check the frame header to identify the data type and create skb
* and calling HCI API
@@ -465,7 +701,7 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
hdev->stat.byte_rx += plen;
if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
- ret = btintel_recv_event(hdev, new_skb);
+ ret = btintel_pcie_recv_event(hdev, new_skb);
else
ret = hci_recv_frame(hdev, new_skb);
@@ -516,10 +752,8 @@ static int btintel_pcie_submit_rx_work(struct btintel_pcie_data *data, u8 status
buf += sizeof(*rfh_hdr);
skb = alloc_skb(len, GFP_ATOMIC);
- if (!skb) {
- ret = -ENOMEM;
+ if (!skb)
goto resubmit;
- }
skb_put_data(skb, buf, len);
skb_queue_tail(&data->rx_skb_q, skb);
@@ -734,13 +968,9 @@ static int btintel_pcie_config_pcie(struct pci_dev *pdev,
return err;
}
- err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
- if (err)
- return err;
-
- data->base_addr = pcim_iomap_table(pdev)[0];
- if (!data->base_addr)
- return -ENODEV;
+ data->base_addr = pcim_iomap_region(pdev, 0, KBUILD_MODNAME);
+ if (IS_ERR(data->base_addr))
+ return PTR_ERR(data->base_addr);
err = btintel_pcie_setup_irq(data);
if (err)
@@ -1053,8 +1283,11 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+ struct hci_command_hdr *cmd;
+ __u16 opcode = ~0;
int ret;
u32 type;
+ u32 old_ctxt;
/* Due to the fw limitation, the type header of the packet should be
* 4 bytes unlike 1 byte for UART. In UART, the firmware can read
@@ -1073,6 +1306,8 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
switch (hci_skb_pkt_type(skb)) {
case HCI_COMMAND_PKT:
type = BTINTEL_PCIE_HCI_CMD_PKT;
+ cmd = (void *)skb->data;
+ opcode = le16_to_cpu(cmd->opcode);
if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
struct hci_command_hdr *cmd = (void *)skb->data;
__u16 opcode = le16_to_cpu(cmd->opcode);
@@ -1111,6 +1346,30 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
bt_dev_err(hdev, "Failed to send frame (%d)", ret);
goto exit_error;
}
+
+ if (type == BTINTEL_PCIE_HCI_CMD_PKT &&
+ (opcode == HCI_OP_RESET || opcode == 0xfc01)) {
+ old_ctxt = data->alive_intr_ctxt;
+ data->alive_intr_ctxt =
+ (opcode == 0xfc01 ? BTINTEL_PCIE_INTEL_HCI_RESET1 :
+ BTINTEL_PCIE_HCI_RESET);
+ bt_dev_dbg(data->hdev, "sent cmd: 0x%4.4x alive context changed: %s -> %s",
+ opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
+ btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
+ if (opcode == HCI_OP_RESET) {
+ data->gp0_received = false;
+ ret = wait_event_timeout(data->gp0_wait_q,
+ data->gp0_received,
+ msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
+ if (!ret) {
+ hdev->stat.err_tx++;
+ bt_dev_err(hdev, "No alive interrupt received for %s",
+ btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
+ ret = -ETIME;
+ goto exit_error;
+ }
+ }
+ }
hdev->stat.byte_tx += skb->len;
kfree_skb(skb);
@@ -1128,7 +1387,7 @@ static void btintel_pcie_release_hdev(struct btintel_pcie_data *data)
data->hdev = NULL;
}
-static int btintel_pcie_setup(struct hci_dev *hdev)
+static int btintel_pcie_setup_internal(struct hci_dev *hdev)
{
const u8 param[1] = { 0xFF };
struct intel_version_tlv ver_tlv;
@@ -1219,6 +1478,32 @@ exit_error:
return err;
}
+static int btintel_pcie_setup(struct hci_dev *hdev)
+{
+ int err, fw_dl_retry = 0;
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+
+ while ((err = btintel_pcie_setup_internal(hdev)) && fw_dl_retry++ < 1) {
+ bt_dev_err(hdev, "Firmware download retry count: %d",
+ fw_dl_retry);
+ err = btintel_pcie_reset_bt(data);
+ if (err) {
+ bt_dev_err(hdev, "Failed to do shr reset: %d", err);
+ break;
+ }
+ usleep_range(10000, 12000);
+ btintel_pcie_reset_ia(data);
+ btintel_pcie_config_msix(data);
+ err = btintel_pcie_enable_bt(data);
+ if (err) {
+ bt_dev_err(hdev, "Failed to enable hardware: %d", err);
+ break;
+ }
+ btintel_pcie_start_rx(data);
+ }
+ return err;
+}
+
static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
{
int err;
diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
index baaff70420f5..f9aada0543c4 100644
--- a/drivers/bluetooth/btintel_pcie.h
+++ b/drivers/bluetooth/btintel_pcie.h
@@ -12,6 +12,7 @@
#define BTINTEL_PCIE_CSR_HW_REV_REG (BTINTEL_PCIE_CSR_BASE + 0x028)
#define BTINTEL_PCIE_CSR_RF_ID_REG (BTINTEL_PCIE_CSR_BASE + 0x09C)
#define BTINTEL_PCIE_CSR_BOOT_STAGE_REG (BTINTEL_PCIE_CSR_BASE + 0x108)
+#define BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG (BTINTEL_PCIE_CSR_BASE + 0x114)
#define BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG (BTINTEL_PCIE_CSR_BASE + 0x118)
#define BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG (BTINTEL_PCIE_CSR_BASE + 0x11C)
#define BTINTEL_PCIE_CSR_IMG_RESPONSE_REG (BTINTEL_PCIE_CSR_BASE + 0x12C)
@@ -22,6 +23,8 @@
#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT (BIT(6))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT (BIT(7))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS (BIT(20))
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS (BIT(28))
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON (BIT(29))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET (BIT(31))
/* Value for BTINTEL_PCIE_CSR_BOOT_STAGE register */
@@ -32,6 +35,7 @@
#define BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN (BIT(11))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_MAC_ACCESS_ON (BIT(16))
#define BTINTEL_PCIE_CSR_BOOT_STAGE_ALIVE (BIT(23))
+#define BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY (BIT(24))
/* Registers for MSI-X */
#define BTINTEL_PCIE_CSR_MSIX_BASE (0x2000)
@@ -55,6 +59,16 @@ enum msix_hw_int_causes {
BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0 = BIT(0), /* cause 32 */
};
+/* PCIe device states
+ * Host-Device interface is active
+ * Host-Device interface is inactive(as reflected by IPC_SLEEP_CONTROL_CSR_AD)
+ * Host-Device interface is inactive(as reflected by IPC_SLEEP_CONTROL_CSR_AD)
+ */
+enum {
+ BTINTEL_PCIE_STATE_D0 = 0,
+ BTINTEL_PCIE_STATE_D3_HOT = 2,
+ BTINTEL_PCIE_STATE_D3_COLD = 3,
+};
#define BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE BIT(7)
/* Minimum and Maximum number of MSI-X Vector
@@ -67,7 +81,7 @@ enum msix_hw_int_causes {
#define BTINTEL_DEFAULT_MAC_ACCESS_TIMEOUT_US 200000
/* Default interrupt timeout in msec */
-#define BTINTEL_DEFAULT_INTR_TIMEOUT 3000
+#define BTINTEL_DEFAULT_INTR_TIMEOUT_MS 3000
/* The number of descriptors in TX/RX queues */
#define BTINTEL_DESCS_COUNT 16
@@ -343,6 +357,7 @@ struct rxq {
* @ia: Index Array struct
* @txq: TX Queue struct
* @rxq: RX Queue struct
+ * @alive_intr_ctxt: Alive interrupt context
*/
struct btintel_pcie_data {
struct pci_dev *pdev;
@@ -389,6 +404,7 @@ struct btintel_pcie_data {
struct ia ia;
struct txq txq;
struct rxq rxq;
+ u32 alive_intr_ctxt;
};
static inline u32 btintel_pcie_rd_reg32(struct btintel_pcie_data *data,
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 18f34998a120..e26b07a9387d 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/string_choices.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <linux/mmc/sdio_func.h>
@@ -88,7 +89,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
else
adapter->psmode = 0;
BT_DBG("PS Mode:%s",
- (adapter->psmode) ? "Enable" : "Disable");
+ str_enable_disable(adapter->psmode));
} else {
BT_DBG("PS Mode command failed");
}
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 9bbf20502163..68846c5bd4f7 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -324,7 +324,7 @@ int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
wmt_params.data = NULL;
wmt_params.status = NULL;
- /* Activate funciton the firmware providing to */
+ /* Activate function the firmware providing to */
err = wmt_cmd_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
@@ -395,6 +395,7 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btmtk_data *data = hci_get_priv(hdev);
int err;
+ bool complete = false;
if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) {
kfree_skb(skb);
@@ -416,19 +417,22 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
fallthrough;
case HCI_DEVCOREDUMP_ACTIVE:
default:
+ /* Mediatek coredump data would be more than MTK_COREDUMP_NUM */
+ if (data->cd_info.cnt >= MTK_COREDUMP_NUM &&
+ skb->len > MTK_COREDUMP_END_LEN)
+ if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN],
+ MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1))
+ complete = true;
+
err = hci_devcd_append(hdev, skb);
if (err < 0)
break;
data->cd_info.cnt++;
- /* Mediatek coredump data would be more than MTK_COREDUMP_NUM */
- if (data->cd_info.cnt > MTK_COREDUMP_NUM &&
- skb->len > MTK_COREDUMP_END_LEN)
- if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN],
- MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1)) {
- bt_dev_info(hdev, "Mediatek coredump end");
- hci_devcd_complete(hdev);
- }
+ if (complete) {
+ bt_dev_info(hdev, "Mediatek coredump end");
+ hci_devcd_complete(hdev);
+ }
break;
}
@@ -1215,7 +1219,6 @@ static int btmtk_usb_isointf_init(struct hci_dev *hdev)
struct sk_buff *skb;
int err;
- init_usb_anchor(&btmtk_data->isopkt_anchor);
spin_lock_init(&btmtk_data->isorxlock);
__set_mtk_intr_interface(hdev);
@@ -1326,7 +1329,6 @@ int btmtk_usb_setup(struct hci_dev *hdev)
fwname = FIRMWARE_MT7668;
break;
case 0x7922:
- case 0x7961:
case 0x7925:
/* Reset the device to ensure it's in the initial state before
* downloading the firmware to ensure.
@@ -1334,7 +1336,8 @@ int btmtk_usb_setup(struct hci_dev *hdev)
if (!test_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags))
btmtk_usb_subsys_reset(hdev, dev_id);
-
+ fallthrough;
+ case 0x7961:
btmtk_fw_get_filename(fw_bin_name, sizeof(fw_bin_name), dev_id,
fw_version, fw_flavor);
@@ -1469,10 +1472,15 @@ EXPORT_SYMBOL_GPL(btmtk_usb_setup);
int btmtk_usb_shutdown(struct hci_dev *hdev)
{
+ struct btmtk_data *data = hci_get_priv(hdev);
struct btmtk_hci_wmt_params wmt_params;
u8 param = 0;
int err;
+ err = usb_autopm_get_interface(data->intf);
+ if (err < 0)
+ return err;
+
/* Disable the device */
wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 0;
@@ -1483,9 +1491,11 @@ int btmtk_usb_shutdown(struct hci_dev *hdev)
err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ usb_autopm_put_interface(data->intf);
return err;
}
+ usb_autopm_put_interface(data->intf);
return 0;
}
EXPORT_SYMBOL_GPL(btmtk_usb_shutdown);
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 11d33cd7b08f..bd5464bde174 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -681,7 +681,7 @@ static int btmtksdio_open(struct hci_dev *hdev)
if (err < 0)
goto err_release_irq;
- /* Explitly set write-1-clear method */
+ /* Explicitly set write-1-clear method */
val = sdio_readl(bdev->func, MTK_REG_CHCR, &err);
if (err < 0)
goto err_release_irq;
@@ -1249,7 +1249,7 @@ static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
-static void btmtksdio_cmd_timeout(struct hci_dev *hdev)
+static void btmtksdio_reset(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
u32 status;
@@ -1328,6 +1328,8 @@ static int btmtksdio_probe(struct sdio_func *func,
{
struct btmtksdio_dev *bdev;
struct hci_dev *hdev;
+ struct device_node *old_node;
+ bool restore_node;
int err;
bdev = devm_kzalloc(&func->dev, sizeof(*bdev), GFP_KERNEL);
@@ -1358,7 +1360,7 @@ static int btmtksdio_probe(struct sdio_func *func,
hdev->open = btmtksdio_open;
hdev->close = btmtksdio_close;
- hdev->cmd_timeout = btmtksdio_cmd_timeout;
+ hdev->reset = btmtksdio_reset;
hdev->flush = btmtksdio_flush;
hdev->setup = btmtksdio_setup;
hdev->shutdown = btmtksdio_shutdown;
@@ -1396,7 +1398,7 @@ static int btmtksdio_probe(struct sdio_func *func,
if (pm_runtime_enabled(bdev->dev))
pm_runtime_disable(bdev->dev);
- /* As explaination in drivers/mmc/core/sdio_bus.c tells us:
+ /* As explanation in drivers/mmc/core/sdio_bus.c tells us:
* Unbound SDIO functions are always suspended.
* During probe, the function is set active and the usage count
* is incremented. If the driver supports runtime PM,
@@ -1411,13 +1413,24 @@ static int btmtksdio_probe(struct sdio_func *func,
if (err)
bt_dev_err(hdev, "failed to initialize device wakeup");
- bdev->dev->of_node = of_find_compatible_node(NULL, NULL,
- "mediatek,mt7921s-bluetooth");
+ restore_node = false;
+ if (!of_device_is_compatible(bdev->dev->of_node, "mediatek,mt7921s-bluetooth")) {
+ restore_node = true;
+ old_node = bdev->dev->of_node;
+ bdev->dev->of_node = of_find_compatible_node(NULL, NULL,
+ "mediatek,mt7921s-bluetooth");
+ }
+
bdev->reset = devm_gpiod_get_optional(bdev->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(bdev->reset))
err = PTR_ERR(bdev->reset);
+ if (restore_node) {
+ of_node_put(bdev->dev->of_node);
+ bdev->dev->of_node = old_node;
+ }
+
return err;
}
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index 64e4d835af52..c97e260fcb0c 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -327,7 +327,7 @@ mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
if (count <= 0)
return NULL;
- /* Tranlate to how much the size of data H4 can handle so far */
+ /* Translate to how much the size of data H4 can handle so far */
*sz_h4 = min_t(int, count, bdev->stp_dlen);
/* Update the remaining size of STP packet */
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 5ea0d23e88c0..aa5ec1d444a9 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -16,6 +16,7 @@
#include <linux/crc8.h>
#include <linux/crc32.h>
#include <linux/string_helpers.h>
+#include <linux/gpio/consumer.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -34,16 +35,17 @@
/* NXP HW err codes */
#define BTNXPUART_IR_HW_ERR 0xb0
-#define FIRMWARE_W8987 "uart8987_bt_v0.bin"
+#define FIRMWARE_W8987 "uart8987_bt.bin"
#define FIRMWARE_W8987_OLD "uartuart8987_bt.bin"
#define FIRMWARE_W8997 "uart8997_bt_v4.bin"
#define FIRMWARE_W8997_OLD "uartuart8997_bt_v4.bin"
#define FIRMWARE_W9098 "uart9098_bt_v1.bin"
#define FIRMWARE_W9098_OLD "uartuart9098_bt_v1.bin"
-#define FIRMWARE_IW416 "uartiw416_bt_v0.bin"
+#define FIRMWARE_IW416 "uartiw416_bt.bin"
+#define FIRMWARE_IW416_OLD "uartiw416_bt_v0.bin"
#define FIRMWARE_IW612 "uartspi_n61x_v1.bin.se"
-#define FIRMWARE_IW615 "uartspi_iw610_v0.bin"
-#define FIRMWARE_SECURE_IW615 "uartspi_iw610_v0.bin.se"
+#define FIRMWARE_IW610 "uartspi_iw610.bin"
+#define FIRMWARE_SECURE_IW610 "uartspi_iw610.bin.se"
#define FIRMWARE_IW624 "uartiw624_bt.bin"
#define FIRMWARE_SECURE_IW624 "uartiw624_bt.bin.se"
#define FIRMWARE_AW693 "uartaw693_bt.bin"
@@ -59,8 +61,8 @@
#define CHIP_ID_IW624c 0x8001
#define CHIP_ID_AW693a0 0x8200
#define CHIP_ID_AW693a1 0x8201
-#define CHIP_ID_IW615a0 0x8800
-#define CHIP_ID_IW615a1 0x8801
+#define CHIP_ID_IW610a0 0x8800
+#define CHIP_ID_IW610a1 0x8801
#define FW_SECURE_MASK 0xc0
#define FW_OPEN 0x00
@@ -81,6 +83,7 @@
#define WAKEUP_METHOD_BREAK 1
#define WAKEUP_METHOD_EXT_BREAK 2
#define WAKEUP_METHOD_RTS 3
+#define WAKEUP_METHOD_GPIO 4
#define WAKEUP_METHOD_INVALID 0xff
/* power save mode status */
@@ -134,6 +137,7 @@ struct ps_data {
bool driver_sent_cmd;
u16 h2c_ps_interval;
u16 c2h_ps_interval;
+ struct gpio_desc *h2c_ps_gpio;
struct hci_dev *hdev;
struct work_struct work;
struct timer_list ps_timer;
@@ -364,7 +368,7 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct ps_data *psdata = &nxpdev->psdata;
- int status;
+ int status = 0;
if (psdata->ps_state == ps_state ||
!test_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state))
@@ -372,6 +376,14 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state)
mutex_lock(&psdata->ps_lock);
switch (psdata->cur_h2c_wakeupmode) {
+ case WAKEUP_METHOD_GPIO:
+ if (ps_state == PS_STATE_AWAKE)
+ gpiod_set_value_cansleep(psdata->h2c_ps_gpio, 0);
+ else
+ gpiod_set_value_cansleep(psdata->h2c_ps_gpio, 1);
+ bt_dev_dbg(hdev, "Set h2c_ps_gpio: %s",
+ str_high_low(ps_state == PS_STATE_SLEEP));
+ break;
case WAKEUP_METHOD_DTR:
if (ps_state == PS_STATE_AWAKE)
status = serdev_device_set_tiocm(nxpdev->serdev, TIOCM_DTR, 0);
@@ -421,15 +433,29 @@ static void ps_timeout_func(struct timer_list *t)
}
}
-static void ps_setup(struct hci_dev *hdev)
+static int ps_setup(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct serdev_device *serdev = nxpdev->serdev;
struct ps_data *psdata = &nxpdev->psdata;
+ psdata->h2c_ps_gpio = devm_gpiod_get_optional(&serdev->dev, "device-wakeup",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(psdata->h2c_ps_gpio)) {
+ bt_dev_err(hdev, "Error fetching device-wakeup-gpios: %ld",
+ PTR_ERR(psdata->h2c_ps_gpio));
+ return PTR_ERR(psdata->h2c_ps_gpio);
+ }
+
+ if (!psdata->h2c_ps_gpio)
+ psdata->h2c_wakeup_gpio = 0xff;
+
psdata->hdev = hdev;
INIT_WORK(&psdata->work, ps_work_func);
mutex_init(&psdata->ps_lock);
timer_setup(&psdata->ps_timer, ps_timeout_func, 0);
+
+ return 0;
}
static bool ps_wakeup(struct btnxpuart_dev *nxpdev)
@@ -515,6 +541,9 @@ static int send_wakeup_method_cmd(struct hci_dev *hdev, void *data)
pcmd.c2h_wakeupmode = psdata->c2h_wakeupmode;
pcmd.c2h_wakeup_gpio = psdata->c2h_wakeup_gpio;
switch (psdata->h2c_wakeupmode) {
+ case WAKEUP_METHOD_GPIO:
+ pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_GPIO;
+ break;
case WAKEUP_METHOD_DTR:
pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_DSR;
break;
@@ -549,6 +578,7 @@ static void ps_init(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct ps_data *psdata = &nxpdev->psdata;
+ u8 default_h2c_wakeup_mode = DEFAULT_H2C_WAKEUP_MODE;
serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_RTS);
usleep_range(5000, 10000);
@@ -560,8 +590,17 @@ static void ps_init(struct hci_dev *hdev)
psdata->c2h_wakeup_gpio = 0xff;
psdata->cur_h2c_wakeupmode = WAKEUP_METHOD_INVALID;
+ if (psdata->h2c_ps_gpio)
+ default_h2c_wakeup_mode = WAKEUP_METHOD_GPIO;
+
psdata->h2c_ps_interval = PS_DEFAULT_TIMEOUT_PERIOD_MS;
- switch (DEFAULT_H2C_WAKEUP_MODE) {
+
+ switch (default_h2c_wakeup_mode) {
+ case WAKEUP_METHOD_GPIO:
+ psdata->h2c_wakeupmode = WAKEUP_METHOD_GPIO;
+ gpiod_set_value_cansleep(psdata->h2c_ps_gpio, 0);
+ usleep_range(5000, 10000);
+ break;
case WAKEUP_METHOD_DTR:
psdata->h2c_wakeupmode = WAKEUP_METHOD_DTR;
serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_DTR);
@@ -946,12 +985,12 @@ static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
else
bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
break;
- case CHIP_ID_IW615a0:
- case CHIP_ID_IW615a1:
+ case CHIP_ID_IW610a0:
+ case CHIP_ID_IW610a1:
if ((loader_ver & FW_SECURE_MASK) == FW_OPEN)
- fw_name = FIRMWARE_IW615;
+ fw_name = FIRMWARE_IW610;
else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL)
- fw_name = FIRMWARE_SECURE_IW615;
+ fw_name = FIRMWARE_SECURE_IW610;
else
bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
break;
@@ -971,6 +1010,9 @@ static char *nxp_get_old_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
case CHIP_ID_W9098:
fw_name_old = FIRMWARE_W9098_OLD;
break;
+ case CHIP_ID_IW416:
+ fw_name_old = FIRMWARE_IW416_OLD;
+ break;
}
return fw_name_old;
}
@@ -1275,6 +1317,9 @@ static int nxp_enqueue(struct hci_dev *hdev, struct sk_buff *skb)
psdata->c2h_wakeup_gpio = wakeup_parm.c2h_wakeup_gpio;
psdata->h2c_wakeup_gpio = wakeup_parm.h2c_wakeup_gpio;
switch (wakeup_parm.h2c_wakeupmode) {
+ case BT_CTRL_WAKEUP_METHOD_GPIO:
+ psdata->h2c_wakeupmode = WAKEUP_METHOD_GPIO;
+ break;
case BT_CTRL_WAKEUP_METHOD_DSR:
psdata->h2c_wakeupmode = WAKEUP_METHOD_DTR;
break;
@@ -1341,7 +1386,7 @@ static void btnxpuart_tx_work(struct work_struct *work)
skb_pull(skb, len);
if (skb->len > 0) {
skb_queue_head(&nxpdev->txq, skb);
- break;
+ continue;
}
switch (hci_skb_pkt_type(skb)) {
@@ -1505,13 +1550,17 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
if (hci_register_dev(hdev) < 0) {
dev_err(&serdev->dev, "Can't register HCI device\n");
- hci_free_dev(hdev);
- return -ENODEV;
+ goto probe_fail;
}
- ps_setup(hdev);
+ if (ps_setup(hdev))
+ goto probe_fail;
return 0;
+
+probe_fail:
+ hci_free_dev(hdev);
+ return -ENODEV;
}
static void nxp_serdev_remove(struct serdev_device *serdev)
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index dfbbac92242a..cdf09d9a9ad2 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -272,6 +272,39 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
}
EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
+static bool qca_filename_has_extension(const char *filename)
+{
+ const char *suffix = strrchr(filename, '.');
+
+ /* File extensions require a dot, but not as the first or last character */
+ if (!suffix || suffix == filename || *(suffix + 1) == '\0')
+ return 0;
+
+ /* Avoid matching directories with names that look like files with extensions */
+ return !strchr(suffix, '/');
+}
+
+static bool qca_get_alt_nvm_file(char *filename, size_t max_size)
+{
+ char fwname[64];
+ const char *suffix;
+
+ /* nvm file name has an extension, replace with .bin */
+ if (qca_filename_has_extension(filename)) {
+ suffix = strrchr(filename, '.');
+ strscpy(fwname, filename, suffix - filename + 1);
+ snprintf(fwname + (suffix - filename),
+ sizeof(fwname) - (suffix - filename), ".bin");
+ /* If nvm file is already the default one, return false to skip the retry. */
+ if (strcmp(fwname, filename) == 0)
+ return false;
+
+ snprintf(filename, max_size, "%s", fwname);
+ return true;
+ }
+ return false;
+}
+
static int qca_tlv_check_data(struct hci_dev *hdev,
struct qca_fw_config *config,
u8 *fw_data, size_t fw_size,
@@ -564,6 +597,19 @@ static int qca_download_firmware(struct hci_dev *hdev,
config->fwname, ret);
return ret;
}
+ }
+ /* If the board-specific file is missing, try loading the default
+ * one, unless that was attempted already.
+ */
+ else if (config->type == TLV_TYPE_NVM &&
+ qca_get_alt_nvm_file(config->fwname, sizeof(config->fwname))) {
+ bt_dev_info(hdev, "QCA Downloading %s", config->fwname);
+ ret = request_firmware(&fw, config->fwname, &hdev->dev);
+ if (ret) {
+ bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
+ config->fwname, ret);
+ return ret;
+ }
} else {
bt_dev_err(hdev, "QCA Failed to request file: %s (%d)",
config->fwname, ret);
@@ -700,39 +746,43 @@ static int qca_check_bdaddr(struct hci_dev *hdev, const struct qca_fw_config *co
return 0;
}
-static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
+static void qca_get_nvm_name_by_board(char *fwname, size_t max_size,
+ const char *stem, enum qca_btsoc_type soc_type,
struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
{
const char *variant;
+ const char *prefix;
- /* hsp gf chip */
- if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
- variant = "g";
- else
- variant = "";
+ /* Set the default value to variant and prefix */
+ variant = "";
+ prefix = "b";
- if (bid == 0x0)
- snprintf(fwname, max_size, "qca/hpnv%02x%s.bin", rom_ver, variant);
- else
- snprintf(fwname, max_size, "qca/hpnv%02x%s.%x", rom_ver, variant, bid);
-}
+ if (soc_type == QCA_QCA2066)
+ prefix = "";
-static inline void qca_get_nvm_name_generic(struct qca_fw_config *cfg,
- const char *stem, u8 rom_ver, u16 bid)
-{
- if (bid == 0x0)
- snprintf(cfg->fwname, sizeof(cfg->fwname), "qca/%snv%02x.bin", stem, rom_ver);
- else if (bid & 0xff00)
- snprintf(cfg->fwname, sizeof(cfg->fwname),
- "qca/%snv%02x.b%x", stem, rom_ver, bid);
- else
- snprintf(cfg->fwname, sizeof(cfg->fwname),
- "qca/%snv%02x.b%02x", stem, rom_ver, bid);
+ if (soc_type == QCA_WCN6855 || soc_type == QCA_QCA2066) {
+ /* If the chip is manufactured by GlobalFoundries */
+ if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID)
+ variant = "g";
+ }
+
+ if (rom_ver != 0) {
+ if (bid == 0x0 || bid == 0xffff)
+ snprintf(fwname, max_size, "qca/%s%02x%s.bin", stem, rom_ver, variant);
+ else
+ snprintf(fwname, max_size, "qca/%s%02x%s.%s%02x", stem, rom_ver,
+ variant, prefix, bid);
+ } else {
+ if (bid == 0x0 || bid == 0xffff)
+ snprintf(fwname, max_size, "qca/%s%s.bin", stem, variant);
+ else
+ snprintf(fwname, max_size, "qca/%s%s.%s%02x", stem, variant, prefix, bid);
+ }
}
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, struct qca_btsoc_version ver,
- const char *firmware_name)
+ const char *firmware_name, const char *rampatch_name)
{
struct qca_fw_config config = {};
int err;
@@ -761,44 +811,48 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Download rampatch file */
config.type = TLV_TYPE_PATCH;
- switch (soc_type) {
- case QCA_WCN3990:
- case QCA_WCN3991:
- case QCA_WCN3998:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/crbtfw%02x.tlv", rom_ver);
- break;
- case QCA_WCN3988:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/apbtfw%02x.tlv", rom_ver);
- break;
- case QCA_QCA2066:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hpbtfw%02x.tlv", rom_ver);
- break;
- case QCA_QCA6390:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/htbtfw%02x.tlv", rom_ver);
- break;
- case QCA_WCN6750:
- /* Choose mbn file by default.If mbn file is not found
- * then choose tlv file
- */
- config.type = ELF_TYPE_PATCH;
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/msbtfw%02x.mbn", rom_ver);
- break;
- case QCA_WCN6855:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hpbtfw%02x.tlv", rom_ver);
- break;
- case QCA_WCN7850:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hmtbtfw%02x.tlv", rom_ver);
- break;
- default:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/rampatch_%08x.bin", soc_ver);
+ if (rampatch_name) {
+ snprintf(config.fwname, sizeof(config.fwname), "qca/%s", rampatch_name);
+ } else {
+ switch (soc_type) {
+ case QCA_WCN3990:
+ case QCA_WCN3991:
+ case QCA_WCN3998:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/crbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_WCN3988:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/apbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_QCA2066:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hpbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_QCA6390:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/htbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_WCN6750:
+ /* Choose mbn file by default.If mbn file is not found
+ * then choose tlv file
+ */
+ config.type = ELF_TYPE_PATCH;
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/msbtfw%02x.mbn", rom_ver);
+ break;
+ case QCA_WCN6855:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hpbtfw%02x.tlv", rom_ver);
+ break;
+ case QCA_WCN7850:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/hmtbtfw%02x.tlv", rom_ver);
+ break;
+ default:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/rampatch_%08x.bin", soc_ver);
+ }
}
err = qca_download_firmware(hdev, &config, soc_type, rom_ver);
@@ -816,8 +870,14 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Download NVM configuration */
config.type = TLV_TYPE_NVM;
if (firmware_name) {
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/%s", firmware_name);
+ /* The firmware name has an extension, use it directly */
+ if (qca_filename_has_extension(firmware_name)) {
+ snprintf(config.fwname, sizeof(config.fwname), "qca/%s", firmware_name);
+ } else {
+ qca_read_fw_board_id(hdev, &boardid);
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ firmware_name, soc_type, ver, 0, boardid);
+ }
} else {
switch (soc_type) {
case QCA_WCN3990:
@@ -836,8 +896,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
"qca/apnv%02x.bin", rom_ver);
break;
case QCA_QCA2066:
- qca_generate_hsp_nvm_name(config.fwname,
- sizeof(config.fwname), ver, rom_ver, boardid);
+ qca_get_nvm_name_by_board(config.fwname,
+ sizeof(config.fwname), "hpnv", soc_type, ver,
+ rom_ver, boardid);
break;
case QCA_QCA6390:
snprintf(config.fwname, sizeof(config.fwname),
@@ -848,13 +909,14 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
"qca/msnv%02x.bin", rom_ver);
break;
case QCA_WCN6855:
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/hpnv%02x.bin", rom_ver);
+ qca_read_fw_board_id(hdev, &boardid);
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ "hpnv", soc_type, ver, rom_ver, boardid);
break;
case QCA_WCN7850:
- qca_get_nvm_name_generic(&config, "hmt", rom_ver, boardid);
+ qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname),
+ "hmtnv", soc_type, ver, rom_ver, boardid);
break;
-
default:
snprintf(config.fwname, sizeof(config.fwname),
"qca/nvm_%08x.bin", soc_ver);
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index bb5207d7a8c7..9d28c8800225 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -161,7 +161,7 @@ enum qca_btsoc_type {
int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, struct qca_btsoc_version ver,
- const char *firmware_name);
+ const char *firmware_name, const char *rampatch_name);
int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver,
enum qca_btsoc_type);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
@@ -176,7 +176,8 @@ static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdad
static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type,
struct qca_btsoc_version ver,
- const char *firmware_name)
+ const char *firmware_name,
+ const char *rampatch_name)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 88dbb2f3fabf..c0eb71d6ffd3 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -216,7 +216,7 @@ MODULE_DEVICE_TABLE(of, btqcomsmd_of_match);
static struct platform_driver btqcomsmd_driver = {
.probe = btqcomsmd_probe,
- .remove_new = btqcomsmd_remove,
+ .remove = btqcomsmd_remove,
.driver = {
.name = "btqcomsmd",
.of_match_table = btqcomsmd_of_match,
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 0bcb44cf7b31..d3eba0d4a57d 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -1351,12 +1351,14 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
btrtl_set_quirks(hdev, btrtl_dev);
- hci_set_hw_info(hdev,
+ if (btrtl_dev->ic_info) {
+ hci_set_hw_info(hdev,
"RTL lmp_subver=%u hci_rev=%u hci_ver=%u hci_bus=%u",
btrtl_dev->ic_info->lmp_subver,
btrtl_dev->ic_info->hci_rev,
btrtl_dev->ic_info->hci_ver,
btrtl_dev->ic_info->hci_bus);
+ }
btrtl_free(btrtl_dev);
return ret;
@@ -1371,7 +1373,7 @@ int btrtl_shutdown_realtek(struct hci_dev *hdev)
/* According to the vendor driver, BT must be reset on close to avoid
* firmware crash.
*/
- skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
bt_dev_err(hdev, "HCI reset during shutdown failed");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f23c8801ad5c..90966dfbd278 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -371,6 +371,14 @@ static const struct usb_device_id quirks_table[] = {
/* QCA WCN785x chipset */
{ USB_DEVICE(0x0cf3, 0xe700), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe0fc), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe0f3), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3623), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2c7c, 0x0130), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -475,6 +483,9 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x13d3, 0x3549), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Realtek 8851BE Bluetooth devices */
+ { USB_DEVICE(0x13d3, 0x3600), .driver_info = BTUSB_REALTEK },
+
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@@ -524,6 +535,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe123), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@@ -563,6 +576,16 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x043e, 0x3109), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Additional MediaTek MT7920 Bluetooth devices */
+ { USB_DEVICE(0x0489, 0xe134), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3620), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3621), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3622), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Additional MediaTek MT7921 Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
@@ -582,6 +605,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3567), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3576), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3578), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3583), .driver_info = BTUSB_MEDIATEK |
@@ -592,6 +617,8 @@ static const struct usb_device_id quirks_table[] = {
/* MediaTek MT7922 Bluetooth devices */
{ USB_DEVICE(0x13d3, 0x3585), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3610), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
/* MediaTek MT7922A Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK |
@@ -630,12 +657,24 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
/* Additional MediaTek MT7925 Bluetooth devices */
+ { USB_DEVICE(0x0489, 0xe111), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe113), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe118), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe11e), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe124), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe139), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe14f), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe150), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe151), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3602), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3603), .driver_info = BTUSB_MEDIATEK |
@@ -644,6 +683,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3608), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3628), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
@@ -846,9 +887,9 @@ struct btusb_data {
int (*suspend)(struct hci_dev *hdev);
int (*resume)(struct hci_dev *hdev);
+ int (*disconnect)(struct hci_dev *hdev);
int oob_wake_irq; /* irq for out-of-band wake-on-bt */
- unsigned cmd_timeout_cnt;
struct qca_dump_info qca_dump;
};
@@ -858,11 +899,6 @@ static void btusb_reset(struct hci_dev *hdev)
struct btusb_data *data;
int err;
- if (hdev->reset) {
- hdev->reset(hdev);
- return;
- }
-
data = hci_get_drvdata(hdev);
/* This is not an unbalanced PM reference since the device will reset */
err = usb_autopm_get_interface(data->intf);
@@ -875,15 +911,12 @@ static void btusb_reset(struct hci_dev *hdev)
usb_queue_reset_device(data->intf);
}
-static void btusb_intel_cmd_timeout(struct hci_dev *hdev)
+static void btusb_intel_reset(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct gpio_desc *reset_gpio = data->reset_gpio;
struct btintel_data *intel_data = hci_get_priv(hdev);
- if (++data->cmd_timeout_cnt < 5)
- return;
-
if (intel_data->acpi_reset_method) {
if (test_and_set_bit(INTEL_ACPI_RESET_ACTIVE, intel_data->flags)) {
bt_dev_err(hdev, "acpi: last reset failed ? Not resetting again");
@@ -956,7 +989,7 @@ static inline void btusb_rtl_alloc_devcoredump(struct hci_dev *hdev,
}
}
-static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
+static void btusb_rtl_reset(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct gpio_desc *reset_gpio = data->reset_gpio;
@@ -966,9 +999,6 @@ static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
btusb_rtl_alloc_devcoredump(hdev, &hdr, NULL, 0);
- if (++data->cmd_timeout_cnt < 5)
- return;
-
if (!reset_gpio) {
btusb_reset(hdev);
return;
@@ -1003,19 +1033,16 @@ static void btusb_rtl_hw_error(struct hci_dev *hdev, u8 code)
btusb_rtl_alloc_devcoredump(hdev, &hdr, NULL, 0);
}
-static void btusb_qca_cmd_timeout(struct hci_dev *hdev)
+static void btusb_qca_reset(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct gpio_desc *reset_gpio = data->reset_gpio;
if (test_bit(BTUSB_HW_SSR_ACTIVE, &data->flags)) {
- bt_dev_info(hdev, "Ramdump in progress, defer cmd_timeout");
+ bt_dev_info(hdev, "Ramdump in progress, defer reset");
return;
}
- if (++data->cmd_timeout_cnt < 5)
- return;
-
if (reset_gpio) {
bt_dev_err(hdev, "Reset qca device via bt_en gpio");
@@ -1061,7 +1088,7 @@ static inline void btusb_free_frags(struct btusb_data *data)
static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
{
if (data->intr_interval) {
- /* Trigger dequeue immediatelly if an event is received */
+ /* Trigger dequeue immediately if an event is received */
schedule_delayed_work(&data->rx_work, 0);
}
@@ -1345,10 +1372,15 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
if (!urb)
return -ENOMEM;
- /* Use maximum HCI Event size so the USB stack handles
- * ZPL/short-transfer automatically.
- */
- size = HCI_MAX_EVENT_SIZE;
+ if (le16_to_cpu(data->udev->descriptor.idVendor) == 0x0a12 &&
+ le16_to_cpu(data->udev->descriptor.idProduct) == 0x0001)
+ /* Fake CSR devices don't seem to support sort-transter */
+ size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
+ else
+ /* Use maximum HCI Event size so the USB stack handles
+ * ZPL/short-transfer automatically.
+ */
+ size = HCI_MAX_EVENT_SIZE;
buf = kmalloc(size, mem_flags);
if (!buf) {
@@ -2602,8 +2634,15 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
int err;
+ /*
+ * The function usb_driver_claim_interface() is documented to need
+ * locks held if it's not called from a probe routine. The code here
+ * is called from the hci_power_on workqueue, so grab the lock.
+ */
+ device_lock(&btmtk_data->isopkt_intf->dev);
err = usb_driver_claim_interface(&btusb_driver,
btmtk_data->isopkt_intf, data);
+ device_unlock(&btmtk_data->isopkt_intf->dev);
if (err < 0) {
btmtk_data->isopkt_intf = NULL;
bt_dev_err(data->hdev, "Failed to claim iso interface");
@@ -2611,13 +2650,14 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
}
set_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);
+ init_usb_anchor(&btmtk_data->isopkt_anchor);
}
-static void btusb_mtk_release_iso_intf(struct btusb_data *data)
+static void btusb_mtk_release_iso_intf(struct hci_dev *hdev)
{
- struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
+ struct btmtk_data *btmtk_data = hci_get_priv(hdev);
- if (btmtk_data->isopkt_intf) {
+ if (test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) {
usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags);
@@ -2631,6 +2671,16 @@ static void btusb_mtk_release_iso_intf(struct btusb_data *data)
clear_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);
}
+static int btusb_mtk_disconnect(struct hci_dev *hdev)
+{
+ /* This function describes the specific additional steps taken by MediaTek
+ * when Bluetooth usb driver's resume function is called.
+ */
+ btusb_mtk_release_iso_intf(hdev);
+
+ return 0;
+}
+
static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data)
{
struct btusb_data *data = hci_get_drvdata(hdev);
@@ -2647,8 +2697,8 @@ static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data)
if (err < 0)
return err;
- if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags))
- btusb_mtk_release_iso_intf(data);
+ /* Release MediaTek ISO data interface */
+ btusb_mtk_release_iso_intf(hdev);
btusb_stop_traffic(data);
usb_kill_anchored_urbs(&data->tx_anchor);
@@ -2693,22 +2743,24 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
btmtk_data->reset_sync = btusb_mtk_reset;
/* Claim ISO data interface and endpoint */
- btmtk_data->isopkt_intf = usb_ifnum_to_if(data->udev, MTK_ISO_IFNUM);
- if (btmtk_data->isopkt_intf)
+ if (!test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) {
+ btmtk_data->isopkt_intf = usb_ifnum_to_if(data->udev, MTK_ISO_IFNUM);
btusb_mtk_claim_iso_intf(data);
+ }
return btmtk_usb_setup(hdev);
}
static int btusb_mtk_shutdown(struct hci_dev *hdev)
{
- struct btusb_data *data = hci_get_drvdata(hdev);
- struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ int ret;
+
+ ret = btmtk_usb_shutdown(hdev);
- if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags))
- btusb_mtk_release_iso_intf(data);
+ /* Release MediaTek iso interface after shutdown */
+ btusb_mtk_release_iso_intf(hdev);
- return btmtk_usb_shutdown(hdev);
+ return ret;
}
#ifdef CONFIG_PM
@@ -3596,6 +3648,32 @@ static const struct file_operations force_poll_sync_fops = {
.llseek = default_llseek,
};
+static ssize_t isoc_alt_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct btusb_data *data = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", data->isoc_altsetting);
+}
+
+static ssize_t isoc_alt_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct btusb_data *data = dev_get_drvdata(dev);
+ int alt;
+ int ret;
+
+ if (kstrtoint(buf, 10, &alt))
+ return -EINVAL;
+
+ ret = btusb_switch_alt_setting(data->hdev, alt);
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_RW(isoc_alt);
+
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -3792,7 +3870,7 @@ static int btusb_probe(struct usb_interface *intf,
/* Transport specific configuration */
hdev->send = btusb_send_frame_intel;
- hdev->cmd_timeout = btusb_intel_cmd_timeout;
+ hdev->reset = btusb_intel_reset;
if (id->driver_info & BTUSB_INTEL_NO_WBS_SUPPORT)
btintel_set_flag(hdev, INTEL_ROM_LEGACY_NO_WBS_SUPPORT);
@@ -3812,7 +3890,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->setup = btusb_mtk_setup;
hdev->shutdown = btusb_mtk_shutdown;
hdev->manufacturer = 70;
- hdev->cmd_timeout = btmtk_reset_sync;
+ hdev->reset = btmtk_reset_sync;
hdev->set_bdaddr = btmtk_set_bdaddr;
hdev->send = btusb_send_frame_mtk;
set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks);
@@ -3820,6 +3898,7 @@ static int btusb_probe(struct usb_interface *intf,
data->recv_acl = btmtk_usb_recv_acl;
data->suspend = btmtk_usb_suspend;
data->resume = btmtk_usb_resume;
+ data->disconnect = btusb_mtk_disconnect;
}
if (id->driver_info & BTUSB_SWAVE) {
@@ -3843,7 +3922,7 @@ static int btusb_probe(struct usb_interface *intf,
data->setup_on_usb = btusb_setup_qca;
hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
- hdev->cmd_timeout = btusb_qca_cmd_timeout;
+ hdev->reset = btusb_qca_reset;
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
btusb_check_needs_reset_resume(intf);
}
@@ -3857,7 +3936,7 @@ static int btusb_probe(struct usb_interface *intf,
data->setup_on_usb = btusb_setup_qca;
hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_wcn6855;
- hdev->cmd_timeout = btusb_qca_cmd_timeout;
+ hdev->reset = btusb_qca_reset;
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
hci_set_msft_opcode(hdev, 0xFD70);
}
@@ -3876,7 +3955,7 @@ static int btusb_probe(struct usb_interface *intf,
btrtl_set_driver_name(hdev, btusb_driver.name);
hdev->setup = btusb_setup_realtek;
hdev->shutdown = btrtl_shutdown_realtek;
- hdev->cmd_timeout = btusb_rtl_cmd_timeout;
+ hdev->reset = btusb_rtl_reset;
hdev->hw_error = btusb_rtl_hw_error;
/* Realtek devices need to set remote wakeup on auto-suspend */
@@ -3891,6 +3970,8 @@ static int btusb_probe(struct usb_interface *intf,
set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT, &hdev->quirks);
}
if (!reset)
@@ -3956,6 +4037,10 @@ static int btusb_probe(struct usb_interface *intf,
data->isoc, data);
if (err < 0)
goto out_free_dev;
+
+ err = device_create_file(&intf->dev, &dev_attr_isoc_alt);
+ if (err)
+ goto out_free_dev;
}
if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) && data->diag) {
@@ -4002,12 +4087,17 @@ static void btusb_disconnect(struct usb_interface *intf)
hdev = data->hdev;
usb_set_intfdata(data->intf, NULL);
- if (data->isoc)
+ if (data->isoc) {
+ device_remove_file(&intf->dev, &dev_attr_isoc_alt);
usb_set_intfdata(data->isoc, NULL);
+ }
if (data->diag)
usb_set_intfdata(data->diag, NULL);
+ if (data->disconnect)
+ data->disconnect(hdev);
+
hci_unregister_dev(hdev);
if (intf == data->intf) {
@@ -4041,8 +4131,10 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
BT_DBG("intf %p", intf);
- /* Don't suspend if there are connections */
- if (hci_conn_count(data->hdev))
+ /* Don't auto-suspend if there are connections; external suspend calls
+ * shall never fail.
+ */
+ if (PMSG_IS_AUTO(message) && hci_conn_count(data->hdev))
return -EBUSY;
if (data->suspend_count++)
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 89d4c2224546..9684eb16059b 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -1068,17 +1068,17 @@ static struct clk *bcm_get_txco(struct device *dev)
struct clk *clk;
/* New explicit name */
- clk = devm_clk_get(dev, "txco");
- if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
+ clk = devm_clk_get_optional(dev, "txco");
+ if (clk)
return clk;
/* Deprecated name */
- clk = devm_clk_get(dev, "extclk");
- if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
+ clk = devm_clk_get_optional(dev, "extclk");
+ if (clk)
return clk;
/* Original code used no name at all */
- return devm_clk_get(dev, NULL);
+ return devm_clk_get_optional(dev, NULL);
}
static int bcm_get_resources(struct bcm_device *dev)
@@ -1093,21 +1093,12 @@ static int bcm_get_resources(struct bcm_device *dev)
return 0;
dev->txco_clk = bcm_get_txco(dev->dev);
-
- /* Handle deferred probing */
- if (dev->txco_clk == ERR_PTR(-EPROBE_DEFER))
- return PTR_ERR(dev->txco_clk);
-
- /* Ignore all other errors as before */
if (IS_ERR(dev->txco_clk))
- dev->txco_clk = NULL;
-
- dev->lpo_clk = devm_clk_get(dev->dev, "lpo");
- if (dev->lpo_clk == ERR_PTR(-EPROBE_DEFER))
- return PTR_ERR(dev->lpo_clk);
+ return PTR_ERR(dev->txco_clk);
+ dev->lpo_clk = devm_clk_get_optional(dev->dev, "lpo");
if (IS_ERR(dev->lpo_clk))
- dev->lpo_clk = NULL;
+ return PTR_ERR(dev->lpo_clk);
/* Check if we accidentally fetched the lpo clock twice */
if (dev->lpo_clk && clk_is_match(dev->lpo_clk, dev->txco_clk)) {
@@ -1507,7 +1498,7 @@ static const struct dev_pm_ops bcm_pm_ops = {
static struct platform_driver bcm_driver = {
.probe = bcm_probe,
- .remove_new = bcm_remove,
+ .remove = bcm_remove,
.driver = {
.name = "hci_bcm",
.acpi_match_table = ACPI_PTR(bcm_acpi_match),
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 999ccd5bb4f2..811f33701f84 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -1206,7 +1206,7 @@ static void intel_remove(struct platform_device *pdev)
static struct platform_driver intel_driver = {
.probe = intel_probe,
- .remove_new = intel_remove,
+ .remove = intel_remove,
.driver = {
.name = "hci_intel",
.acpi_match_table = ACPI_PTR(intel_acpi_match),
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 395d66e32a2e..d2d6ba8d2f8b 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -594,7 +594,7 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
* Called by tty low level driver when receive data is
* available.
*
- * Arguments: tty pointer to tty isntance data
+ * Arguments: tty pointer to tty instance data
* data pointer to received data
* flags pointer to flags for data
* count count of received data in bytes
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 4a0b5c3160c2..e19e9bd49555 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -305,7 +305,7 @@ static void ll_device_woke_up(struct hci_uart *hu)
hci_uart_tx_wakeup(hu);
}
-/* Enqueue frame for transmittion (padding, crc, etc) */
+/* Enqueue frame for transmission (padding, crc, etc) */
/* may be called from two simultaneous tasklets */
static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index 49bbe4975be4..9fc10a16fd96 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -501,7 +501,7 @@ static int nokia_close(struct hci_uart *hu)
return 0;
}
-/* Enqueue frame for transmittion (padding, crc, etc) */
+/* Enqueue frame for transmission (padding, crc, etc) */
static int nokia_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{
struct nokia_bt_dev *btdev = hu->priv;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 37fddf6055be..0ac2168f1dc4 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -31,6 +31,7 @@
#include <linux/pwrseq/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
+#include <linux/string_choices.h>
#include <linux/mutex.h>
#include <linux/unaligned.h>
@@ -228,7 +229,7 @@ struct qca_serdev {
u32 init_speed;
u32 oper_speed;
bool bdaddr_property_broken;
- const char *firmware_name;
+ const char *firmware_name[2];
};
static int qca_regulator_enable(struct qca_serdev *qcadev);
@@ -258,7 +259,18 @@ static const char *qca_get_firmware_name(struct hci_uart *hu)
if (hu->serdev) {
struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
- return qsd->firmware_name;
+ return qsd->firmware_name[0];
+ } else {
+ return NULL;
+ }
+}
+
+static const char *qca_get_rampatch_name(struct hci_uart *hu)
+{
+ if (hu->serdev) {
+ struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
+
+ return qsd->firmware_name[1];
} else {
return NULL;
}
@@ -332,8 +344,8 @@ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
else
__serial_clock_off(hu->tty);
- BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false",
- vote ? "true" : "false");
+ BT_DBG("Vote serial clock %s(%s)", str_true_false(new_vote),
+ str_true_false(vote));
diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
@@ -873,7 +885,7 @@ static void device_woke_up(struct hci_uart *hu)
hci_uart_tx_wakeup(hu);
}
-/* Enqueue frame for transmittion (padding, crc, etc) may be called from
+/* Enqueue frame for transmission (padding, crc, etc) may be called from
* two simultaneous tasklets.
*/
static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
@@ -1059,7 +1071,7 @@ static void qca_controller_memdump(struct work_struct *work)
if (!seq_no) {
/* This is the first frame of memdump packet from
- * the controller, Disable IBS to recevie dump
+ * the controller, Disable IBS to receive dump
* with out any interruption, ideally time required for
* the controller to send the dump is 8 seconds. let us
* start timer to handle this asynchronous activity.
@@ -1638,7 +1650,7 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
}
-static void qca_cmd_timeout(struct hci_dev *hdev)
+static void qca_reset(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
@@ -1855,6 +1867,7 @@ static int qca_setup(struct hci_uart *hu)
unsigned int retries = 0;
enum qca_btsoc_type soc_type = qca_soc_type(hu);
const char *firmware_name = qca_get_firmware_name(hu);
+ const char *rampatch_name = qca_get_rampatch_name(hu);
int ret;
struct qca_btsoc_version ver;
struct qca_serdev *qcadev;
@@ -1963,12 +1976,12 @@ retry:
/* Setup patch / NVM configurations */
ret = qca_uart_setup(hdev, qca_baudrate, soc_type, ver,
- firmware_name);
+ firmware_name, rampatch_name);
if (!ret) {
clear_bit(QCA_IBS_DISABLED, &qca->flags);
qca_debugfs_init(hdev);
hu->hdev->hw_error = qca_hw_error;
- hu->hdev->cmd_timeout = qca_cmd_timeout;
+ hu->hdev->reset = qca_reset;
if (hu->serdev) {
if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
hu->hdev->wakeup = qca_wakeup;
@@ -2202,7 +2215,7 @@ static int qca_power_off(struct hci_dev *hdev)
enum qca_btsoc_type soc_type = qca_soc_type(hu);
hu->hdev->hw_error = NULL;
- hu->hdev->cmd_timeout = NULL;
+ hu->hdev->reset = NULL;
del_timer_sync(&qca->wake_retrans_timer);
del_timer_sync(&qca->tx_idle_timer);
@@ -2294,13 +2307,6 @@ static int qca_init_regulators(struct qca_power *qca,
return 0;
}
-static void qca_clk_disable_unprepare(void *data)
-{
- struct clk *clk = data;
-
- clk_disable_unprepare(clk);
-}
-
static int qca_serdev_probe(struct serdev_device *serdev)
{
struct qca_serdev *qcadev;
@@ -2316,8 +2322,8 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->serdev_hu.serdev = serdev;
data = device_get_match_data(&serdev->dev);
serdev_device_set_drvdata(serdev, qcadev);
- device_property_read_string(&serdev->dev, "firmware-name",
- &qcadev->firmware_name);
+ device_property_read_string_array(&serdev->dev, "firmware-name",
+ qcadev->firmware_name, ARRAY_SIZE(qcadev->firmware_name));
device_property_read_u32(&serdev->dev, "max-speed",
&qcadev->oper_speed);
if (!qcadev->oper_speed)
@@ -2358,7 +2364,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
* Backward compatibility with old DT sources. If the
* node doesn't have the 'enable-gpios' property then
* let's use the power sequencer. Otherwise, let's
- * drive everything outselves.
+ * drive everything ourselves.
*/
qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev,
"bluetooth");
@@ -2433,25 +2439,12 @@ static int qca_serdev_probe(struct serdev_device *serdev)
if (!qcadev->bt_en)
power_ctrl_enabled = false;
- qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
+ qcadev->susclk = devm_clk_get_optional_enabled_with_rate(
+ &serdev->dev, NULL, SUSCLK_RATE_32KHZ);
if (IS_ERR(qcadev->susclk)) {
dev_warn(&serdev->dev, "failed to acquire clk\n");
return PTR_ERR(qcadev->susclk);
}
- err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
- if (err)
- return err;
-
- err = clk_prepare_enable(qcadev->susclk);
- if (err)
- return err;
-
- err = devm_add_action_or_reset(&serdev->dev,
- qca_clk_disable_unprepare,
- qcadev->susclk);
- if (err)
- return err;
-
}
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
@@ -2530,7 +2523,7 @@ static void qca_serdev_shutdown(struct device *dev)
hci_dev_test_flag(hdev, HCI_SETUP))
return;
- /* The serdev must be in open state when conrol logic arrives
+ /* The serdev must be in open state when control logic arrives
* here, so also fix the use-after-free issue caused by that
* the serdev is flushed or wrote after it is closed.
*/
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 4b68c84ef485..52053f7c6d9a 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -22,8 +22,8 @@ struct fsl_mc_child_objs {
struct fsl_mc_obj_desc *child_array;
};
-static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
- struct fsl_mc_obj_desc *obj_desc)
+static bool fsl_mc_device_match(const struct fsl_mc_device *mc_dev,
+ const struct fsl_mc_obj_desc *obj_desc)
{
return mc_dev->obj_desc.id == obj_desc->id &&
strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0;
@@ -112,9 +112,9 @@ void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
}
EXPORT_SYMBOL_GPL(dprc_remove_devices);
-static int __fsl_mc_device_match(struct device *dev, void *data)
+static int __fsl_mc_device_match(struct device *dev, const void *data)
{
- struct fsl_mc_obj_desc *obj_desc = data;
+ const struct fsl_mc_obj_desc *obj_desc = data;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
return fsl_mc_device_match(mc_dev, obj_desc);
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 930d8a3ba722..d1f3d327ddd1 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -320,90 +320,90 @@ const struct bus_type fsl_mc_bus_type = {
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
-struct device_type fsl_mc_bus_dprc_type = {
+const struct device_type fsl_mc_bus_dprc_type = {
.name = "fsl_mc_bus_dprc"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dprc_type);
-struct device_type fsl_mc_bus_dpni_type = {
+const struct device_type fsl_mc_bus_dpni_type = {
.name = "fsl_mc_bus_dpni"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpni_type);
-struct device_type fsl_mc_bus_dpio_type = {
+const struct device_type fsl_mc_bus_dpio_type = {
.name = "fsl_mc_bus_dpio"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpio_type);
-struct device_type fsl_mc_bus_dpsw_type = {
+const struct device_type fsl_mc_bus_dpsw_type = {
.name = "fsl_mc_bus_dpsw"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpsw_type);
-struct device_type fsl_mc_bus_dpbp_type = {
+const struct device_type fsl_mc_bus_dpbp_type = {
.name = "fsl_mc_bus_dpbp"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpbp_type);
-struct device_type fsl_mc_bus_dpcon_type = {
+const struct device_type fsl_mc_bus_dpcon_type = {
.name = "fsl_mc_bus_dpcon"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpcon_type);
-struct device_type fsl_mc_bus_dpmcp_type = {
+const struct device_type fsl_mc_bus_dpmcp_type = {
.name = "fsl_mc_bus_dpmcp"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmcp_type);
-struct device_type fsl_mc_bus_dpmac_type = {
+const struct device_type fsl_mc_bus_dpmac_type = {
.name = "fsl_mc_bus_dpmac"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmac_type);
-struct device_type fsl_mc_bus_dprtc_type = {
+const struct device_type fsl_mc_bus_dprtc_type = {
.name = "fsl_mc_bus_dprtc"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dprtc_type);
-struct device_type fsl_mc_bus_dpseci_type = {
+const struct device_type fsl_mc_bus_dpseci_type = {
.name = "fsl_mc_bus_dpseci"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type);
-struct device_type fsl_mc_bus_dpdmux_type = {
+const struct device_type fsl_mc_bus_dpdmux_type = {
.name = "fsl_mc_bus_dpdmux"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmux_type);
-struct device_type fsl_mc_bus_dpdcei_type = {
+const struct device_type fsl_mc_bus_dpdcei_type = {
.name = "fsl_mc_bus_dpdcei"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdcei_type);
-struct device_type fsl_mc_bus_dpaiop_type = {
+const struct device_type fsl_mc_bus_dpaiop_type = {
.name = "fsl_mc_bus_dpaiop"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpaiop_type);
-struct device_type fsl_mc_bus_dpci_type = {
+const struct device_type fsl_mc_bus_dpci_type = {
.name = "fsl_mc_bus_dpci"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpci_type);
-struct device_type fsl_mc_bus_dpdmai_type = {
+const struct device_type fsl_mc_bus_dpdmai_type = {
.name = "fsl_mc_bus_dpdmai"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type);
-struct device_type fsl_mc_bus_dpdbg_type = {
+const struct device_type fsl_mc_bus_dpdbg_type = {
.name = "fsl_mc_bus_dpdbg"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdbg_type);
-static struct device_type *fsl_mc_get_device_type(const char *type)
+static const struct device_type *fsl_mc_get_device_type(const char *type)
{
static const struct {
- struct device_type *dev_type;
+ const struct device_type *dev_type;
const char *type;
} dev_types[] = {
{ &fsl_mc_bus_dprc_type, "dprc" },
@@ -1210,7 +1210,7 @@ static struct platform_driver fsl_mc_bus_driver = {
.acpi_match_table = fsl_mc_bus_acpi_match_table,
},
.probe = fsl_mc_bus_probe,
- .remove_new = fsl_mc_bus_remove,
+ .remove = fsl_mc_bus_remove,
.shutdown = fsl_mc_bus_remove,
};
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 09340adbacc2..53dd1573e323 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -689,6 +689,6 @@ static struct platform_driver hisi_lpc_driver = {
.acpi_match_table = hisi_lpc_acpi_match,
},
.probe = hisi_lpc_probe,
- .remove_new = hisi_lpc_remove,
+ .remove = hisi_lpc_remove,
};
builtin_platform_driver(hisi_lpc_driver);
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
index dedd29ca8db3..9dcc7184817d 100644
--- a/drivers/bus/mhi/host/boot.c
+++ b/drivers/bus/mhi/host/boot.c
@@ -82,9 +82,9 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
* other cores to shutdown while we're collecting RDDM buffer. After
* returning from this function, we expect the device to reset.
*
- * Normaly, we read/write pm_state only after grabbing the
+ * Normally, we read/write pm_state only after grabbing the
* pm_lock, since we're in a panic, skipping it. Also there is no
- * gurantee that this state change would take effect since
+ * guarantee that this state change would take effect since
* we're setting it w/o grabbing pm_lock
*/
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
@@ -357,6 +357,7 @@ error_alloc_segment:
for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
mhi_buf->buf, mhi_buf->dma_addr);
+ kfree(img_info->mhi_buf);
error_alloc_mhi_buf:
kfree(img_info);
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index d057e877932e..3134f111be35 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -255,7 +255,7 @@ struct mhi_chan {
/*
* Important: When consuming, increment tre_ring first and when
* releasing, decrement buf_ring first. If tre_ring has space, buf_ring
- * is guranteed to have space so we do not need to check both rings.
+ * is guaranteed to have space so we do not need to check both rings.
*/
struct mhi_ring buf_ring;
struct mhi_ring tre_ring;
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 9938bb034c1c..c41119b9079f 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -245,6 +245,58 @@ struct mhi_pci_dev_info {
.channel = ch_num, \
}
+static const struct mhi_channel_config mhi_qcom_qdu100_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 2),
+ MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 2),
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 128, 1),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 128, 1),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 3),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 3),
+ MHI_CHANNEL_CONFIG_UL(9, "QDSS", 64, 3),
+ MHI_CHANNEL_CONFIG_UL(14, "NMEA", 32, 4),
+ MHI_CHANNEL_CONFIG_DL(15, "NMEA", 32, 4),
+ MHI_CHANNEL_CONFIG_UL(16, "CSM_CTRL", 32, 4),
+ MHI_CHANNEL_CONFIG_DL(17, "CSM_CTRL", 32, 4),
+ MHI_CHANNEL_CONFIG_UL(40, "MHI_PHC", 32, 4),
+ MHI_CHANNEL_CONFIG_DL(41, "MHI_PHC", 32, 4),
+ MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 256, 5),
+ MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 256, 5),
+};
+
+static struct mhi_event_config mhi_qcom_qdu100_events[] = {
+ /* first ring is control+data ring */
+ MHI_EVENT_CONFIG_CTRL(0, 64),
+ /* SAHARA dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(1, 256),
+ /* Software channels dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(2, 64),
+ MHI_EVENT_CONFIG_SW_DATA(3, 256),
+ MHI_EVENT_CONFIG_SW_DATA(4, 256),
+ /* Software IP channels dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(5, 512),
+ MHI_EVENT_CONFIG_SW_DATA(6, 512),
+ MHI_EVENT_CONFIG_SW_DATA(7, 512),
+};
+
+static const struct mhi_controller_config mhi_qcom_qdu100_config = {
+ .max_channels = 128,
+ .timeout_ms = 120000,
+ .num_channels = ARRAY_SIZE(mhi_qcom_qdu100_channels),
+ .ch_cfg = mhi_qcom_qdu100_channels,
+ .num_events = ARRAY_SIZE(mhi_qcom_qdu100_events),
+ .event_cfg = mhi_qcom_qdu100_events,
+};
+
+static const struct mhi_pci_dev_info mhi_qcom_qdu100_info = {
+ .name = "qcom-qdu100",
+ .fw = "qcom/qdu100/xbl_s.melf",
+ .edl_trigger = true,
+ .config = &mhi_qcom_qdu100_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+};
+
static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
@@ -742,6 +794,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info },
+ /* QDU100, x100-DU */
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0601),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_qdu100_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
@@ -917,12 +972,12 @@ static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
return err;
}
- err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev));
- if (err) {
+ mhi_cntrl->regs = pcim_iomap_region(pdev, bar_num, pci_name(pdev));
+ if (IS_ERR(mhi_cntrl->regs)) {
+ err = PTR_ERR(mhi_cntrl->regs);
dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
return err;
}
- mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num);
err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
@@ -949,7 +1004,7 @@ static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
*/
mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
- nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
+ nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nr_vectors < 0) {
dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
nr_vectors);
diff --git a/drivers/bus/mhi/host/trace.h b/drivers/bus/mhi/host/trace.h
index 95613c8ebe06..3e0c41777429 100644
--- a/drivers/bus/mhi/host/trace.h
+++ b/drivers/bus/mhi/host/trace.h
@@ -9,6 +9,7 @@
#if !defined(_TRACE_EVENT_MHI_HOST_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_EVENT_MHI_HOST_H
+#include <linux/byteorder/generic.h>
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#include "../common.h"
@@ -97,18 +98,18 @@ TRACE_EVENT(mhi_gen_tre,
__string(name, mhi_cntrl->mhi_dev->name)
__field(int, ch_num)
__field(void *, wp)
- __field(__le64, tre_ptr)
- __field(__le32, dword0)
- __field(__le32, dword1)
+ __field(uint64_t, tre_ptr)
+ __field(uint32_t, dword0)
+ __field(uint32_t, dword1)
),
TP_fast_assign(
__assign_str(name);
__entry->ch_num = mhi_chan->chan;
__entry->wp = mhi_tre;
- __entry->tre_ptr = mhi_tre->ptr;
- __entry->dword0 = mhi_tre->dword[0];
- __entry->dword1 = mhi_tre->dword[1];
+ __entry->tre_ptr = le64_to_cpu(mhi_tre->ptr);
+ __entry->dword0 = le32_to_cpu(mhi_tre->dword[0]);
+ __entry->dword1 = le32_to_cpu(mhi_tre->dword[1]);
),
TP_printk("%s: Chan: %d TRE: 0x%p TRE buf: 0x%llx DWORD0: 0x%08x DWORD1: 0x%08x\n",
@@ -176,19 +177,19 @@ DECLARE_EVENT_CLASS(mhi_process_event_ring,
TP_STRUCT__entry(
__string(name, mhi_cntrl->mhi_dev->name)
- __field(__le32, dword0)
- __field(__le32, dword1)
+ __field(uint32_t, dword0)
+ __field(uint32_t, dword1)
__field(int, state)
- __field(__le64, ptr)
+ __field(uint64_t, ptr)
__field(void *, rp)
),
TP_fast_assign(
__assign_str(name);
__entry->rp = rp;
- __entry->ptr = rp->ptr;
- __entry->dword0 = rp->dword[0];
- __entry->dword1 = rp->dword[1];
+ __entry->ptr = le64_to_cpu(rp->ptr);
+ __entry->dword0 = le32_to_cpu(rp->dword[0]);
+ __entry->dword1 = le32_to_cpu(rp->dword[1]);
__entry->state = MHI_TRE_GET_EV_STATE(rp);
),
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 6276551d7968..1e57ebfb7622 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -657,7 +657,7 @@ static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p)
id = moxtet->modules[pos->idx];
- seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
+ seq_printf(p, "moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
pos->bit);
}
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
index 7d7479ba0a75..e4dfda7b3b10 100644
--- a/drivers/bus/omap-ocp2scp.c
+++ b/drivers/bus/omap-ocp2scp.c
@@ -101,7 +101,7 @@ MODULE_DEVICE_TABLE(of, omap_ocp2scp_id_table);
static struct platform_driver omap_ocp2scp_driver = {
.probe = omap_ocp2scp_probe,
- .remove_new = omap_ocp2scp_remove,
+ .remove = omap_ocp2scp_remove,
.driver = {
.name = "omap-ocp2scp",
.of_match_table = of_match_ptr(omap_ocp2scp_id_table),
diff --git a/drivers/bus/omap_l3_smx.c b/drivers/bus/omap_l3_smx.c
index ee6d29925e4d..7f0a8f8b3f4c 100644
--- a/drivers/bus/omap_l3_smx.c
+++ b/drivers/bus/omap_l3_smx.c
@@ -273,7 +273,7 @@ static void omap3_l3_remove(struct platform_device *pdev)
static struct platform_driver omap3_l3_driver = {
.probe = omap3_l3_probe,
- .remove_new = omap3_l3_remove,
+ .remove = omap3_l3_remove,
.driver = {
.name = "omap_l3_smx",
.of_match_table = of_match_ptr(omap3_l3_match),
diff --git a/drivers/bus/qcom-ssc-block-bus.c b/drivers/bus/qcom-ssc-block-bus.c
index 5931974a21fa..85d781a32df4 100644
--- a/drivers/bus/qcom-ssc-block-bus.c
+++ b/drivers/bus/qcom-ssc-block-bus.c
@@ -373,7 +373,7 @@ MODULE_DEVICE_TABLE(of, qcom_ssc_block_bus_of_match);
static struct platform_driver qcom_ssc_block_bus_driver = {
.probe = qcom_ssc_block_bus_probe,
- .remove_new = qcom_ssc_block_bus_remove,
+ .remove = qcom_ssc_block_bus_remove,
.driver = {
.name = "qcom-ssc-block-bus",
.of_match_table = qcom_ssc_block_bus_of_match,
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
index 50870c827889..5dea31769f9a 100644
--- a/drivers/bus/simple-pm-bus.c
+++ b/drivers/bus/simple-pm-bus.c
@@ -128,7 +128,7 @@ MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);
static struct platform_driver simple_pm_bus_driver = {
.probe = simple_pm_bus_probe,
- .remove_new = simple_pm_bus_remove,
+ .remove = simple_pm_bus_remove,
.driver = {
.name = "simple-pm-bus",
.of_match_table = simple_pm_bus_of_match,
diff --git a/drivers/bus/sun50i-de2.c b/drivers/bus/sun50i-de2.c
index 3339311ce068..dfe588179aca 100644
--- a/drivers/bus/sun50i-de2.c
+++ b/drivers/bus/sun50i-de2.c
@@ -36,7 +36,7 @@ static const struct of_device_id sun50i_de2_bus_of_match[] = {
static struct platform_driver sun50i_de2_bus_driver = {
.probe = sun50i_de2_bus_probe,
- .remove_new = sun50i_de2_bus_remove,
+ .remove = sun50i_de2_bus_remove,
.driver = {
.name = "sun50i-de2-bus",
.of_match_table = sun50i_de2_bus_of_match,
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index a89d78925637..7a33c3b31d1e 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -832,7 +832,7 @@ MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table);
static struct platform_driver sunxi_rsb_driver = {
.probe = sunxi_rsb_probe,
- .remove_new = sunxi_rsb_remove,
+ .remove = sunxi_rsb_remove,
.driver = {
.name = RSB_CTRL_NAME,
.of_match_table = sunxi_rsb_of_match_table,
diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c
index de80008bff92..90e3b0a10816 100644
--- a/drivers/bus/tegra-aconnect.c
+++ b/drivers/bus/tegra-aconnect.c
@@ -104,7 +104,7 @@ MODULE_DEVICE_TABLE(of, tegra_aconnect_of_match);
static struct platform_driver tegra_aconnect_driver = {
.probe = tegra_aconnect_probe,
- .remove_new = tegra_aconnect_remove,
+ .remove = tegra_aconnect_remove,
.driver = {
.name = "tegra-aconnect",
.of_match_table = tegra_aconnect_of_match,
diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c
index f5d6414df9f2..9c09141961d8 100644
--- a/drivers/bus/tegra-gmi.c
+++ b/drivers/bus/tegra-gmi.c
@@ -303,7 +303,7 @@ MODULE_DEVICE_TABLE(of, tegra_gmi_id_table);
static struct platform_driver tegra_gmi_driver = {
.probe = tegra_gmi_probe,
- .remove_new = tegra_gmi_remove,
+ .remove = tegra_gmi_remove,
.driver = {
.name = "tegra-gmi",
.of_match_table = tegra_gmi_id_table,
diff --git a/drivers/bus/ti-pwmss.c b/drivers/bus/ti-pwmss.c
index 4969c556e752..1f2cab91e438 100644
--- a/drivers/bus/ti-pwmss.c
+++ b/drivers/bus/ti-pwmss.c
@@ -44,7 +44,7 @@ static struct platform_driver pwmss_driver = {
.of_match_table = pwmss_of_match,
},
.probe = pwmss_probe,
- .remove_new = pwmss_remove,
+ .remove = pwmss_remove,
};
module_platform_driver(pwmss_driver);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 270a94a06e05..f67b927ae4ca 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -3345,7 +3345,7 @@ MODULE_DEVICE_TABLE(of, sysc_match);
static struct platform_driver sysc_driver = {
.probe = sysc_probe,
- .remove_new = sysc_remove,
+ .remove = sysc_remove,
.driver = {
.name = "ti-sysc",
.of_match_table = sysc_match,
diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c
index b8af44c5cdbd..2328c48b9b12 100644
--- a/drivers/bus/ts-nbus.c
+++ b/drivers/bus/ts-nbus.c
@@ -336,7 +336,7 @@ MODULE_DEVICE_TABLE(of, ts_nbus_of_match);
static struct platform_driver ts_nbus_driver = {
.probe = ts_nbus_probe,
- .remove_new = ts_nbus_remove,
+ .remove = ts_nbus_remove,
.driver = {
.name = "ts_nbus",
.of_match_table = ts_nbus_of_match,
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 9b0f37d4b9d4..b163e043c687 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1106,7 +1106,7 @@ int open_for_data(struct cdrom_device_info *cdi)
}
}
- cd_dbg(CD_OPEN, "all seems well, opening the devicen");
+ cd_dbg(CD_OPEN, "all seems well, opening the device\n");
/* all seems well, we can open the device */
ret = cdo->open(cdi, 0); /* open for data */
@@ -2313,7 +2313,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
return -EINVAL;
/* Prevent arg from speculatively bypassing the length check */
- barrier_nospec();
+ arg = array_index_nospec(arg, cdi->capacity);
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
@@ -3612,7 +3612,7 @@ static int cdrom_sysctl_handler(const struct ctl_table *ctl, int write,
}
/* Place files in /proc/sys/dev/cdrom */
-static struct ctl_table cdrom_table[] = {
+static const struct ctl_table cdrom_table[] = {
{
.procname = "info",
.data = &cdrom_sysctl_settings.info,
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 71cfe7a85913..85aceab5eac6 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -777,7 +777,7 @@ static int probe_gdrom(struct platform_device *devptr)
probe_gdrom_setupcd();
err = blk_mq_alloc_sq_tag_set(&gd.tag_set, &gdrom_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ BLK_MQ_F_BLOCKING);
if (err)
goto probe_fail_free_cd_info;
@@ -847,7 +847,7 @@ static void remove_gdrom(struct platform_device *devptr)
static struct platform_driver gdrom_driver = {
.probe = probe_gdrom,
- .remove_new = remove_gdrom,
+ .remove = remove_gdrom,
.driver = {
.name = GDROM_DEV_NAME,
},
diff --git a/drivers/cdx/Makefile b/drivers/cdx/Makefile
index 749a3295c2bd..3ca7068a3052 100644
--- a/drivers/cdx/Makefile
+++ b/drivers/cdx/Makefile
@@ -5,7 +5,7 @@
# Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
#
-ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CDX_BUS
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CDX_BUS"'
obj-$(CONFIG_CDX_BUS) += cdx.o controller/
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index 07371cb653d3..c573ed2ee71a 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -338,7 +338,10 @@ static void cdx_shutdown(struct device *dev)
{
struct cdx_driver *cdx_drv = to_cdx_driver(dev->driver);
struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ if (cdx_dev->is_bus && cdx_dev->enabled && cdx->ops->bus_disable)
+ cdx->ops->bus_disable(cdx, cdx_dev->bus_num);
if (cdx_drv && cdx_drv->shutdown)
cdx_drv->shutdown(cdx_dev);
}
@@ -707,7 +710,7 @@ static const struct vm_operations_struct cdx_phys_vm_ops = {
* Return: true on success, false otherwise.
*/
static int cdx_mmap_resource(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct cdx_device *cdx_dev = to_cdx_device(kobj_to_dev(kobj));
@@ -868,7 +871,7 @@ fail:
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cdx_device_add, CDX_BUS_CONTROLLER);
+EXPORT_SYMBOL_NS_GPL(cdx_device_add, "CDX_BUS_CONTROLLER");
struct device *cdx_bus_add(struct cdx_controller *cdx, u8 bus_num)
{
@@ -915,7 +918,7 @@ device_add_fail:
return NULL;
}
-EXPORT_SYMBOL_NS_GPL(cdx_bus_add, CDX_BUS_CONTROLLER);
+EXPORT_SYMBOL_NS_GPL(cdx_bus_add, "CDX_BUS_CONTROLLER");
int cdx_register_controller(struct cdx_controller *cdx)
{
@@ -940,7 +943,7 @@ int cdx_register_controller(struct cdx_controller *cdx)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cdx_register_controller, CDX_BUS_CONTROLLER);
+EXPORT_SYMBOL_NS_GPL(cdx_register_controller, "CDX_BUS_CONTROLLER");
void cdx_unregister_controller(struct cdx_controller *cdx)
{
@@ -955,7 +958,7 @@ void cdx_unregister_controller(struct cdx_controller *cdx)
mutex_unlock(&cdx_controller_lock);
}
-EXPORT_SYMBOL_NS_GPL(cdx_unregister_controller, CDX_BUS_CONTROLLER);
+EXPORT_SYMBOL_NS_GPL(cdx_unregister_controller, "CDX_BUS_CONTROLLER");
static int __init cdx_bus_init(void)
{
diff --git a/drivers/cdx/cdx_msi.c b/drivers/cdx/cdx_msi.c
index e55f1716cfcb..06d723978232 100644
--- a/drivers/cdx/cdx_msi.c
+++ b/drivers/cdx/cdx_msi.c
@@ -189,4 +189,4 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev)
return cdx_msi_domain;
}
-EXPORT_SYMBOL_NS_GPL(cdx_msi_domain_init, CDX_BUS_CONTROLLER);
+EXPORT_SYMBOL_NS_GPL(cdx_msi_domain_init, "CDX_BUS_CONTROLLER");
diff --git a/drivers/cdx/controller/cdx_controller.c b/drivers/cdx/controller/cdx_controller.c
index 201f9a6fbde7..d623f9c7517a 100644
--- a/drivers/cdx/controller/cdx_controller.c
+++ b/drivers/cdx/controller/cdx_controller.c
@@ -250,7 +250,7 @@ static struct platform_driver cdx_pdriver = {
.of_match_table = cdx_match_table,
},
.probe = xlnx_cdx_probe,
- .remove_new = xlnx_cdx_remove,
+ .remove = xlnx_cdx_remove,
};
static int __init cdx_controller_init(void)
@@ -275,4 +275,4 @@ module_exit(cdx_controller_exit);
MODULE_AUTHOR("AMD Inc.");
MODULE_DESCRIPTION("CDX controller for AMD devices");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CDX_BUS_CONTROLLER);
+MODULE_IMPORT_NS("CDX_BUS_CONTROLLER");
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 7c8dd0abcfdf..8fb33c90482f 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -238,6 +238,7 @@ config APPLICOM
config SONYPI
tristate "Sony Vaio Programmable I/O Control Device support"
depends on X86_32 && PCI && INPUT
+ depends on ACPI_EC || !ACPI
help
This driver enables access to the Sony Programmable I/O Control
Device which can be found in many (all ?) Sony Vaio laptops.
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index e904e476e49a..e110857824fc 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -162,6 +162,7 @@ static irqreturn_t hpet_interrupt(int irq, void *data)
static void hpet_timer_set_irq(struct hpet_dev *devp)
{
+ const unsigned int nr_irqs = irq_get_nr_irqs();
unsigned long v;
int irq, gsi;
struct hpet_timer __iomem *timer;
@@ -723,7 +724,7 @@ static int hpet_is_known(struct hpet_data *hdp)
return 0;
}
-static struct ctl_table hpet_table[] = {
+static const struct ctl_table hpet_table[] = {
{
.procname = "max-user-freq",
.data = &hpet_max_freq,
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index b51d9e243f35..17854f052386 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -50,7 +50,7 @@ config HW_RANDOM_INTEL
config HW_RANDOM_AMD
tristate "AMD HW Random Number Generator support"
- depends on (X86 || PPC_MAPLE || COMPILE_TEST)
+ depends on (X86 || COMPILE_TEST)
depends on PCI && HAS_IOPORT_MAP
default HW_RANDOM
help
@@ -62,6 +62,19 @@ config HW_RANDOM_AMD
If unsure, say Y.
+config HW_RANDOM_AIROHA
+ tristate "Airoha True HW Random Number Generator support"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the True Random Number
+ Generator hardware found on Airoha SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called airoha-rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_ATMEL
tristate "Atmel Random Number Generator support"
depends on (ARCH_AT91 || COMPILE_TEST)
@@ -99,9 +112,22 @@ config HW_RANDOM_BCM2835
If unsure, say Y.
+config HW_RANDOM_BCM74110
+ tristate "Broadcom BCM74110 Random Number Generator support"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on the Broadcom BCM74110 SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bcm74110-rng
+
+ If unsure, say Y.
+
config HW_RANDOM_IPROC_RNG200
tristate "Broadcom iProc/STB RNG200 support"
- depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
+ depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BCMBCA || ARCH_BRCMSTB || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the RNG200
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 01f012eab440..b9132b3f5d21 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -8,6 +8,7 @@ rng-core-y := core.o
obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
+obj-$(CONFIG_HW_RANDOM_AIROHA) += airoha-trng.o
obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
obj-$(CONFIG_HW_RANDOM_BA431) += ba431-rng.o
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
obj-$(CONFIG_HW_RANDOM_HISTB) += histb-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
+obj-$(CONFIG_HW_RANDOM_BCM74110) += bcm74110-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
diff --git a/drivers/char/hw_random/airoha-trng.c b/drivers/char/hw_random/airoha-trng.c
new file mode 100644
index 000000000000..1dbfa9505c21
--- /dev/null
+++ b/drivers/char/hw_random/airoha-trng.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024 Christian Marangi */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+
+#define TRNG_IP_RDY 0x800
+#define CNT_TRANS GENMASK(15, 8)
+#define SAMPLE_RDY BIT(0)
+#define TRNG_NS_SEK_AND_DAT_EN 0x804
+#define RNG_EN BIT(31) /* referenced as ring_en */
+#define RAW_DATA_EN BIT(16)
+#define TRNG_HEALTH_TEST_SW_RST 0x808
+#define SW_RST BIT(0) /* Active High */
+#define TRNG_INTR_EN 0x818
+#define INTR_MASK BIT(16)
+#define CONTINUOUS_HEALTH_INITR_EN BIT(2)
+#define SW_STARTUP_INITR_EN BIT(1)
+#define RST_STARTUP_INITR_EN BIT(0)
+/* Notice that Health Test are done only out of Reset and with RNG_EN */
+#define TRNG_HEALTH_TEST_STATUS 0x824
+#define CONTINUOUS_HEALTH_AP_TEST_FAIL BIT(23)
+#define CONTINUOUS_HEALTH_RC_TEST_FAIL BIT(22)
+#define SW_STARTUP_TEST_DONE BIT(21)
+#define SW_STARTUP_AP_TEST_FAIL BIT(20)
+#define SW_STARTUP_RC_TEST_FAIL BIT(19)
+#define RST_STARTUP_TEST_DONE BIT(18)
+#define RST_STARTUP_AP_TEST_FAIL BIT(17)
+#define RST_STARTUP_RC_TEST_FAIL BIT(16)
+#define RAW_DATA_VALID BIT(7)
+
+#define TRNG_RAW_DATA_OUT 0x828
+
+#define TRNG_CNT_TRANS_VALID 0x80
+#define BUSY_LOOP_SLEEP 10
+#define BUSY_LOOP_TIMEOUT (BUSY_LOOP_SLEEP * 10000)
+
+struct airoha_trng {
+ void __iomem *base;
+ struct hwrng rng;
+ struct device *dev;
+
+ struct completion rng_op_done;
+};
+
+static int airoha_trng_irq_mask(struct airoha_trng *trng)
+{
+ u32 val;
+
+ val = readl(trng->base + TRNG_INTR_EN);
+ val |= INTR_MASK;
+ writel(val, trng->base + TRNG_INTR_EN);
+
+ return 0;
+}
+
+static int airoha_trng_irq_unmask(struct airoha_trng *trng)
+{
+ u32 val;
+
+ val = readl(trng->base + TRNG_INTR_EN);
+ val &= ~INTR_MASK;
+ writel(val, trng->base + TRNG_INTR_EN);
+
+ return 0;
+}
+
+static int airoha_trng_init(struct hwrng *rng)
+{
+ struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
+ int ret;
+ u32 val;
+
+ val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
+ val |= RNG_EN;
+ writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
+
+ /* Set out of SW Reset */
+ airoha_trng_irq_unmask(trng);
+ writel(0, trng->base + TRNG_HEALTH_TEST_SW_RST);
+
+ ret = wait_for_completion_timeout(&trng->rng_op_done, BUSY_LOOP_TIMEOUT);
+ if (ret <= 0) {
+ dev_err(trng->dev, "Timeout waiting for Health Check\n");
+ airoha_trng_irq_mask(trng);
+ return -ENODEV;
+ }
+
+ /* Check if Health Test Failed */
+ val = readl(trng->base + TRNG_HEALTH_TEST_STATUS);
+ if (val & (RST_STARTUP_AP_TEST_FAIL | RST_STARTUP_RC_TEST_FAIL)) {
+ dev_err(trng->dev, "Health Check fail: %s test fail\n",
+ val & RST_STARTUP_AP_TEST_FAIL ? "AP" : "RC");
+ return -ENODEV;
+ }
+
+ /* Check if IP is ready */
+ ret = readl_poll_timeout(trng->base + TRNG_IP_RDY, val,
+ val & SAMPLE_RDY, 10, 1000);
+ if (ret < 0) {
+ dev_err(trng->dev, "Timeout waiting for IP ready");
+ return -ENODEV;
+ }
+
+ /* CNT_TRANS must be 0x80 for IP to be considered ready */
+ ret = readl_poll_timeout(trng->base + TRNG_IP_RDY, val,
+ FIELD_GET(CNT_TRANS, val) == TRNG_CNT_TRANS_VALID,
+ 10, 1000);
+ if (ret < 0) {
+ dev_err(trng->dev, "Timeout waiting for IP ready");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void airoha_trng_cleanup(struct hwrng *rng)
+{
+ struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
+ u32 val;
+
+ val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
+ val &= ~RNG_EN;
+ writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
+
+ /* Put it in SW Reset */
+ writel(SW_RST, trng->base + TRNG_HEALTH_TEST_SW_RST);
+}
+
+static int airoha_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
+ u32 *data = buf;
+ u32 status;
+ int ret;
+
+ ret = readl_poll_timeout(trng->base + TRNG_HEALTH_TEST_STATUS, status,
+ status & RAW_DATA_VALID, 10, 1000);
+ if (ret < 0) {
+ dev_err(trng->dev, "Timeout waiting for TRNG RAW Data valid\n");
+ return ret;
+ }
+
+ *data = readl(trng->base + TRNG_RAW_DATA_OUT);
+
+ return 4;
+}
+
+static irqreturn_t airoha_trng_irq(int irq, void *priv)
+{
+ struct airoha_trng *trng = (struct airoha_trng *)priv;
+
+ airoha_trng_irq_mask(trng);
+ /* Just complete the task, we will read the value later */
+ complete(&trng->rng_op_done);
+
+ return IRQ_HANDLED;
+}
+
+static int airoha_trng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct airoha_trng *trng;
+ int irq, ret;
+ u32 val;
+
+ trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return -ENOMEM;
+
+ trng->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(trng->base))
+ return PTR_ERR(trng->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ airoha_trng_irq_mask(trng);
+ ret = devm_request_irq(&pdev->dev, irq, airoha_trng_irq, 0,
+ pdev->name, (void *)trng);
+ if (ret) {
+ dev_err(dev, "Can't get interrupt working.\n");
+ return ret;
+ }
+
+ init_completion(&trng->rng_op_done);
+
+ /* Enable interrupt for SW reset Health Check */
+ val = readl(trng->base + TRNG_INTR_EN);
+ val |= RST_STARTUP_INITR_EN;
+ writel(val, trng->base + TRNG_INTR_EN);
+
+ /* Set output to raw data */
+ val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
+ val |= RAW_DATA_EN;
+ writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
+
+ /* Put it in SW Reset */
+ writel(SW_RST, trng->base + TRNG_HEALTH_TEST_SW_RST);
+
+ trng->dev = dev;
+ trng->rng.name = pdev->name;
+ trng->rng.init = airoha_trng_init;
+ trng->rng.cleanup = airoha_trng_cleanup;
+ trng->rng.read = airoha_trng_read;
+
+ ret = devm_hwrng_register(dev, &trng->rng);
+ if (ret) {
+ dev_err(dev, "failed to register rng device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id airoha_trng_of_match[] = {
+ { .compatible = "airoha,en7581-trng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, airoha_trng_of_match);
+
+static struct platform_driver airoha_trng_driver = {
+ .driver = {
+ .name = "airoha-trng",
+ .of_match_table = airoha_trng_of_match,
+ },
+ .probe = airoha_trng_probe,
+};
+
+module_platform_driver(airoha_trng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("Airoha True Random Number Generator driver");
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index e9157255f851..143406bc6939 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -216,7 +216,7 @@ MODULE_DEVICE_TABLE(of, atmel_trng_dt_ids);
static struct platform_driver atmel_trng_driver = {
.probe = atmel_trng_probe,
- .remove_new = atmel_trng_remove,
+ .remove = atmel_trng_remove,
.driver = {
.name = "atmel-trng",
.pm = pm_ptr(&atmel_trng_pm_ops),
diff --git a/drivers/char/hw_random/bcm74110-rng.c b/drivers/char/hw_random/bcm74110-rng.c
new file mode 100644
index 000000000000..5c64148e91f1
--- /dev/null
+++ b/drivers/char/hw_random/bcm74110-rng.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Broadcom
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/hw_random.h>
+
+#define HOST_REV_ID 0x00
+#define HOST_FIFO_DEPTH 0x04
+#define HOST_FIFO_COUNT 0x08
+#define HOST_FIFO_THRESHOLD 0x0c
+#define HOST_FIFO_DATA 0x10
+
+#define HOST_FIFO_COUNT_MASK 0xffff
+
+/* Delay range in microseconds */
+#define FIFO_DELAY_MIN_US 3
+#define FIFO_DELAY_MAX_US 7
+#define FIFO_DELAY_MAX_COUNT 10
+
+struct bcm74110_priv {
+ void __iomem *base;
+};
+
+static inline int bcm74110_rng_fifo_count(void __iomem *mem)
+{
+ return readl_relaxed(mem) & HOST_FIFO_COUNT_MASK;
+}
+
+static int bcm74110_rng_read(struct hwrng *rng, void *buf, size_t max,
+ bool wait)
+{
+ struct bcm74110_priv *priv = (struct bcm74110_priv *)rng->priv;
+ void __iomem *fc_addr = priv->base + HOST_FIFO_COUNT;
+ void __iomem *fd_addr = priv->base + HOST_FIFO_DATA;
+ unsigned underrun_count = 0;
+ u32 max_words = max / sizeof(u32);
+ u32 num_words;
+ unsigned i;
+
+ /*
+ * We need to check how many words are available in the RNG FIFO. If
+ * there aren't any, we need to wait for some to become available.
+ */
+ while ((num_words = bcm74110_rng_fifo_count(fc_addr)) == 0) {
+ if (!wait)
+ return 0;
+ /*
+ * As a precaution, limit how long we wait. If the FIFO doesn't
+ * refill within the allotted time, return 0 (=no data) to the
+ * caller.
+ */
+ if (likely(underrun_count < FIFO_DELAY_MAX_COUNT))
+ usleep_range(FIFO_DELAY_MIN_US, FIFO_DELAY_MAX_US);
+ else
+ return 0;
+ underrun_count++;
+ }
+ if (num_words > max_words)
+ num_words = max_words;
+
+ /* Bail early if we run out of random numbers unexpectedly */
+ for (i = 0; i < num_words && bcm74110_rng_fifo_count(fc_addr) > 0; i++)
+ ((u32 *)buf)[i] = readl_relaxed(fd_addr);
+
+ return i * sizeof(u32);
+}
+
+static struct hwrng bcm74110_hwrng = {
+ .read = bcm74110_rng_read,
+};
+
+static int bcm74110_rng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm74110_priv *priv;
+ int rc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ bcm74110_hwrng.name = pdev->name;
+ bcm74110_hwrng.priv = (unsigned long)priv;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ rc = devm_hwrng_register(dev, &bcm74110_hwrng);
+ if (rc)
+ dev_err(dev, "hwrng registration failed (%d)\n", rc);
+ else
+ dev_info(dev, "hwrng registered\n");
+
+ return rc;
+}
+
+static const struct of_device_id bcm74110_rng_match[] = {
+ { .compatible = "brcm,bcm74110-rng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm74110_rng_match);
+
+static struct platform_driver bcm74110_rng_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = bcm74110_rng_match,
+ },
+ .probe = bcm74110_rng_probe,
+};
+module_platform_driver(bcm74110_rng_driver);
+
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("BCM 74110 Random Number Generator (RNG) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
index 4c50efc46483..4db198849695 100644
--- a/drivers/char/hw_random/cctrng.c
+++ b/drivers/char/hw_random/cctrng.c
@@ -653,7 +653,7 @@ static struct platform_driver cctrng_driver = {
.pm = &cctrng_pm,
},
.probe = cctrng_probe,
- .remove_new = cctrng_remove,
+ .remove = cctrng_remove,
};
module_platform_driver(cctrng_driver);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 57c51efa5613..018316f54621 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -181,8 +181,15 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
int present;
BUG_ON(!mutex_is_locked(&reading_mutex));
- if (rng->read)
- return rng->read(rng, (void *)buffer, size, wait);
+ if (rng->read) {
+ int err;
+
+ err = rng->read(rng, buffer, size, wait);
+ if (WARN_ON_ONCE(err > 0 && err > size))
+ err = size;
+
+ return err;
+ }
if (rng->data_present)
present = rng->data_present(rng, wait);
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
index 9f039fddaee3..02e207c09e81 100644
--- a/drivers/char/hw_random/exynos-trng.c
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -335,7 +335,7 @@ static struct platform_driver exynos_trng_driver = {
.of_match_table = exynos_trng_dt_match,
},
.probe = exynos_trng_probe,
- .remove_new = exynos_trng_remove,
+ .remove = exynos_trng_remove,
};
module_platform_driver(exynos_trng_driver);
diff --git a/drivers/char/hw_random/histb-rng.c b/drivers/char/hw_random/histb-rng.c
index f652e1135e4b..1b91e88cc4c0 100644
--- a/drivers/char/hw_random/histb-rng.c
+++ b/drivers/char/hw_random/histb-rng.c
@@ -89,7 +89,7 @@ depth_show(struct device *dev, struct device_attribute *attr, char *buf)
struct histb_rng_priv *priv = dev_get_drvdata(dev);
void __iomem *base = priv->base;
- return sprintf(buf, "%d\n", histb_rng_get_depth(base));
+ return sprintf(buf, "%u\n", histb_rng_get_depth(base));
}
static ssize_t
diff --git a/drivers/char/hw_random/ingenic-rng.c b/drivers/char/hw_random/ingenic-rng.c
index 2f9b6483c4a1..bbfd662d25a6 100644
--- a/drivers/char/hw_random/ingenic-rng.c
+++ b/drivers/char/hw_random/ingenic-rng.c
@@ -132,7 +132,7 @@ MODULE_DEVICE_TABLE(of, ingenic_rng_of_match);
static struct platform_driver ingenic_rng_driver = {
.probe = ingenic_rng_probe,
- .remove_new = ingenic_rng_remove,
+ .remove = ingenic_rng_remove,
.driver = {
.name = "ingenic-rng",
.of_match_table = ingenic_rng_of_match,
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index 36c34252b4f6..d8fd8a354482 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -261,7 +261,7 @@ static struct platform_driver ks_sa_rng_driver = {
.of_match_table = ks_sa_rng_dt_match,
},
.probe = ks_sa_rng_probe,
- .remove_new = ks_sa_rng_remove,
+ .remove = ks_sa_rng_remove,
};
module_platform_driver(ks_sa_rng_driver);
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
index f01eb95bee31..e3fcb8bcc29b 100644
--- a/drivers/char/hw_random/mxc-rnga.c
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -188,7 +188,7 @@ static struct platform_driver mxc_rnga_driver = {
.of_match_table = mxc_rnga_of_match,
},
.probe = mxc_rnga_probe,
- .remove_new = mxc_rnga_remove,
+ .remove = mxc_rnga_remove,
};
module_platform_driver(mxc_rnga_driver);
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 1b49e3a86d57..ea6d5599242f 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -858,7 +858,7 @@ static struct platform_driver n2rng_driver = {
.of_match_table = n2rng_match,
},
.probe = n2rng_probe,
- .remove_new = n2rng_remove,
+ .remove = n2rng_remove,
};
module_platform_driver(n2rng_driver);
diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c
index bce8c4829a1f..9ff00f096f38 100644
--- a/drivers/char/hw_random/npcm-rng.c
+++ b/drivers/char/hw_random/npcm-rng.c
@@ -176,7 +176,7 @@ static struct platform_driver npcm_rng_driver = {
.of_match_table = of_match_ptr(rng_dt_id),
},
.probe = npcm_rng_probe,
- .remove_new = npcm_rng_remove,
+ .remove = npcm_rng_remove,
};
module_platform_driver(npcm_rng_driver);
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 4914a8720e58..5e8b50f15db7 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -558,7 +558,7 @@ static struct platform_driver omap_rng_driver = {
.of_match_table = of_match_ptr(omap_rng_of_match),
},
.probe = omap_rng_probe,
- .remove_new = omap_rng_remove,
+ .remove = omap_rng_remove,
};
module_platform_driver(omap_rng_driver);
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 9d041a67c295..98edbe796bc5 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/io.h>
@@ -49,6 +50,7 @@
struct stm32_rng_data {
uint max_clock_rate;
+ uint nb_clock;
u32 cr;
u32 nscr;
u32 htcr;
@@ -72,7 +74,7 @@ struct stm32_rng_private {
struct hwrng rng;
struct device *dev;
void __iomem *base;
- struct clk *clk;
+ struct clk_bulk_data *clk_bulk;
struct reset_control *rst;
struct stm32_rng_config pm_conf;
const struct stm32_rng_data *data;
@@ -266,7 +268,7 @@ static uint stm32_rng_clock_freq_restrain(struct hwrng *rng)
unsigned long clock_rate = 0;
uint clock_div = 0;
- clock_rate = clk_get_rate(priv->clk);
+ clock_rate = clk_get_rate(priv->clk_bulk[0].clk);
/*
* Get the exponent to apply on the CLKDIV field in RNG_CR register
@@ -276,7 +278,7 @@ static uint stm32_rng_clock_freq_restrain(struct hwrng *rng)
while ((clock_rate >> clock_div) > priv->data->max_clock_rate)
clock_div++;
- pr_debug("RNG clk rate : %lu\n", clk_get_rate(priv->clk) >> clock_div);
+ pr_debug("RNG clk rate : %lu\n", clk_get_rate(priv->clk_bulk[0].clk) >> clock_div);
return clock_div;
}
@@ -288,7 +290,7 @@ static int stm32_rng_init(struct hwrng *rng)
int err;
u32 reg;
- err = clk_prepare_enable(priv->clk);
+ err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@@ -328,7 +330,7 @@ static int stm32_rng_init(struct hwrng *rng)
(!(reg & RNG_CR_CONDRST)),
10, 50000);
if (err) {
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
dev_err(priv->dev, "%s: timeout %x!\n", __func__, reg);
return -EINVAL;
}
@@ -356,12 +358,13 @@ static int stm32_rng_init(struct hwrng *rng)
reg & RNG_SR_DRDY,
10, 100000);
if (err || (reg & ~RNG_SR_DRDY)) {
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
dev_err(priv->dev, "%s: timeout:%x SR: %x!\n", __func__, err, reg);
+
return -EINVAL;
}
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@@ -379,7 +382,8 @@ static int __maybe_unused stm32_rng_runtime_suspend(struct device *dev)
reg = readl_relaxed(priv->base + RNG_CR);
reg &= ~RNG_CR_RNGEN;
writel_relaxed(reg, priv->base + RNG_CR);
- clk_disable_unprepare(priv->clk);
+
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@@ -389,7 +393,7 @@ static int __maybe_unused stm32_rng_suspend(struct device *dev)
struct stm32_rng_private *priv = dev_get_drvdata(dev);
int err;
- err = clk_prepare_enable(priv->clk);
+ err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@@ -403,7 +407,7 @@ static int __maybe_unused stm32_rng_suspend(struct device *dev)
writel_relaxed(priv->pm_conf.cr, priv->base + RNG_CR);
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@@ -414,7 +418,7 @@ static int __maybe_unused stm32_rng_runtime_resume(struct device *dev)
int err;
u32 reg;
- err = clk_prepare_enable(priv->clk);
+ err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@@ -434,7 +438,7 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
int err;
u32 reg;
- err = clk_prepare_enable(priv->clk);
+ err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@@ -462,7 +466,7 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
reg & ~RNG_CR_CONDRST, 10, 100000);
if (err) {
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
dev_err(priv->dev, "%s: timeout:%x CR: %x!\n", __func__, err, reg);
return -EINVAL;
}
@@ -472,7 +476,7 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
writel_relaxed(reg, priv->base + RNG_CR);
}
- clk_disable_unprepare(priv->clk);
+ clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@@ -484,9 +488,19 @@ static const struct dev_pm_ops __maybe_unused stm32_rng_pm_ops = {
stm32_rng_resume)
};
+static const struct stm32_rng_data stm32mp25_rng_data = {
+ .has_cond_reset = true,
+ .max_clock_rate = 48000000,
+ .nb_clock = 2,
+ .cr = 0x00F00D00,
+ .nscr = 0x2B5BB,
+ .htcr = 0x969D,
+};
+
static const struct stm32_rng_data stm32mp13_rng_data = {
.has_cond_reset = true,
.max_clock_rate = 48000000,
+ .nb_clock = 1,
.cr = 0x00F00D00,
.nscr = 0x2B5BB,
.htcr = 0x969D,
@@ -494,11 +508,16 @@ static const struct stm32_rng_data stm32mp13_rng_data = {
static const struct stm32_rng_data stm32_rng_data = {
.has_cond_reset = false,
- .max_clock_rate = 3000000,
+ .max_clock_rate = 48000000,
+ .nb_clock = 1,
};
static const struct of_device_id stm32_rng_match[] = {
{
+ .compatible = "st,stm32mp25-rng",
+ .data = &stm32mp25_rng_data,
+ },
+ {
.compatible = "st,stm32mp13-rng",
.data = &stm32mp13_rng_data,
},
@@ -516,6 +535,7 @@ static int stm32_rng_probe(struct platform_device *ofdev)
struct device_node *np = ofdev->dev.of_node;
struct stm32_rng_private *priv;
struct resource *res;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -525,10 +545,6 @@ static int stm32_rng_probe(struct platform_device *ofdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- priv->clk = devm_clk_get(&ofdev->dev, NULL);
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
-
priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
if (!IS_ERR(priv->rst)) {
reset_control_assert(priv->rst);
@@ -551,6 +567,28 @@ static int stm32_rng_probe(struct platform_device *ofdev)
priv->rng.read = stm32_rng_read;
priv->rng.quality = 900;
+ if (!priv->data->nb_clock || priv->data->nb_clock > 2)
+ return -EINVAL;
+
+ ret = devm_clk_bulk_get_all(dev, &priv->clk_bulk);
+ if (ret != priv->data->nb_clock)
+ return dev_err_probe(dev, -EINVAL, "Failed to get clocks: %d\n", ret);
+
+ if (priv->data->nb_clock == 2) {
+ const char *id = priv->clk_bulk[1].id;
+ struct clk *clk = priv->clk_bulk[1].clk;
+
+ if (!priv->clk_bulk[0].id || !priv->clk_bulk[1].id)
+ return dev_err_probe(dev, -EINVAL, "Missing clock name\n");
+
+ if (strcmp(priv->clk_bulk[0].id, "core")) {
+ priv->clk_bulk[1].id = priv->clk_bulk[0].id;
+ priv->clk_bulk[1].clk = priv->clk_bulk[0].clk;
+ priv->clk_bulk[0].id = id;
+ priv->clk_bulk[0].clk = clk;
+ }
+ }
+
pm_runtime_set_autosuspend_delay(dev, 100);
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
@@ -565,7 +603,7 @@ static struct platform_driver stm32_rng_driver = {
.of_match_table = stm32_rng_match,
},
.probe = stm32_rng_probe,
- .remove_new = stm32_rng_remove,
+ .remove = stm32_rng_remove,
};
module_platform_driver(stm32_rng_driver);
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index 65b8260339f5..7174bfccc7b3 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -193,7 +193,7 @@ static struct platform_driver timeriomem_rng_driver = {
.of_match_table = timeriomem_rng_match,
},
.probe = timeriomem_rng_probe,
- .remove_new = timeriomem_rng_remove,
+ .remove = timeriomem_rng_remove,
};
module_platform_driver(timeriomem_rng_driver);
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index 642d13519464..39acaa503fec 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -375,7 +375,7 @@ MODULE_DEVICE_TABLE(of, xgene_rng_of_match);
static struct platform_driver xgene_rng_driver = {
.probe = xgene_rng_probe,
- .remove_new = xgene_rng_remove,
+ .remove = xgene_rng_remove,
.driver = {
.name = "xgene-rng",
.of_match_table = xgene_rng_of_match,
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index b8b9c07d3b5d..009e32033b17 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -481,7 +481,7 @@ static struct platform_driver bt_bmc_driver = {
.of_match_table = bt_bmc_match,
},
.probe = bt_bmc_probe,
- .remove_new = bt_bmc_remove,
+ .remove = bt_bmc_remove,
};
module_platform_driver(bt_bmc_driver);
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index 7296127181ec..ee2bdc7ed0da 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -321,6 +321,9 @@ static int ipmb_probe(struct i2c_client *client)
ipmb_dev->miscdev.name = devm_kasprintf(&client->dev, GFP_KERNEL,
"%s%d", "ipmb-",
client->adapter->nr);
+ if (!ipmb_dev->miscdev.name)
+ return -ENOMEM;
+
ipmb_dev->miscdev.fops = &ipmb_fops;
ipmb_dev->miscdev.parent = &client->dev;
ret = misc_register(&ipmb_dev->miscdev);
@@ -355,11 +358,13 @@ static const struct i2c_device_id ipmb_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ipmb_id);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id acpi_ipmb_id[] = {
{ "IPMB0001", 0 },
{},
};
MODULE_DEVICE_TABLE(acpi, acpi_ipmb_id);
+#endif
static struct i2c_driver ipmb_driver = {
.driver = {
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 332082e02ea5..e6ba35b71f10 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -122,12 +122,9 @@ out:
static int ipmi_release(struct inode *inode, struct file *file)
{
struct ipmi_file_private *priv = file->private_data;
- int rv;
struct ipmi_recv_msg *msg, *next;
- rv = ipmi_destroy_user(priv->user);
- if (rv)
- return rv;
+ ipmi_destroy_user(priv->user);
list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
ipmi_free_recv_msg(msg);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index e12b531f5c2f..1e5313748f8b 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -1398,13 +1398,11 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
module_put(owner);
}
-int ipmi_destroy_user(struct ipmi_user *user)
+void ipmi_destroy_user(struct ipmi_user *user)
{
_ipmi_destroy_user(user);
kref_put(&user->refcount, free_user);
-
- return 0;
}
EXPORT_SYMBOL(ipmi_destroy_user);
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index c59a86eb58c7..4a2efafcd1f8 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -302,7 +302,7 @@ static struct platform_driver powernv_ipmi_driver = {
.of_match_table = ipmi_powernv_match,
},
.probe = ipmi_powernv_probe,
- .remove_new = ipmi_powernv_remove,
+ .remove = ipmi_powernv_remove,
};
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index 941d2dcc8c9d..e63c316d8aaa 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -650,7 +650,7 @@ static struct ipmi_smi_watcher smi_watcher = {
#ifdef CONFIG_PROC_FS
#include <linux/sysctl.h>
-static struct ctl_table ipmi_table[] = {
+static const struct ctl_table ipmi_table[] = {
{ .procname = "poweroff_powercycle",
.data = &poweroff_powercycle,
.maxlen = sizeof(poweroff_powercycle),
@@ -699,8 +699,6 @@ static int __init ipmi_poweroff_init(void)
#ifdef MODULE
static void __exit ipmi_poweroff_cleanup(void)
{
- int rv;
-
#ifdef CONFIG_PROC_FS
unregister_sysctl_table(ipmi_table_header);
#endif
@@ -708,9 +706,7 @@ static void __exit ipmi_poweroff_cleanup(void)
ipmi_smi_watcher_unregister(&smi_watcher);
if (ready) {
- rv = ipmi_destroy_user(ipmi_user);
- if (rv)
- pr_err("could not cleanup the IPMI user: 0x%x\n", rv);
+ ipmi_destroy_user(ipmi_user);
pm_power_off = old_poweroff_func;
}
}
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
index b83d55685b22..8c0ea637aba0 100644
--- a/drivers/char/ipmi/ipmi_si_pci.c
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -118,7 +118,7 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
if (io.irq)
io.irq_setup = ipmi_std_irq_setup;
- dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
+ dev_info(&pdev->dev, "%pR regsize %u spacing %u irq %d\n",
&pdev->resource[0], io.regsize, io.regspacing, io.irq);
return ipmi_si_add_smi(&io);
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index 96ba85648120..550cabd43ae6 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -445,7 +445,7 @@ struct platform_driver ipmi_platform_driver = {
.acpi_match_table = ACPI_PTR(acpi_ipmi_match),
},
.probe = ipmi_probe,
- .remove_new = ipmi_remove,
+ .remove = ipmi_remove,
.id_table = si_plat_ids
};
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index d04b391048fb..506d9988721e 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -2114,7 +2114,7 @@ static struct platform_driver ipmi_driver = {
.name = DEVICE_NAME,
},
.probe = ssif_platform_probe,
- .remove_new = ssif_platform_remove,
+ .remove = ssif_platform_remove,
.id_table = ssif_plat_ids
};
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 335eea80054e..f1875b2bebbc 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1064,7 +1064,6 @@ static void ipmi_register_watchdog(int ipmi_intf)
static void ipmi_unregister_watchdog(int ipmi_intf)
{
- int rv;
struct ipmi_user *loc_user = watchdog_user;
if (!loc_user)
@@ -1089,9 +1088,7 @@ static void ipmi_unregister_watchdog(int ipmi_intf)
mutex_lock(&ipmi_watchdog_mutex);
/* Disconnect from IPMI. */
- rv = ipmi_destroy_user(loc_user);
- if (rv)
- pr_warn("error unlinking from IPMI: %d\n", rv);
+ ipmi_destroy_user(loc_user);
/* If it comes back, restart it properly. */
ipmi_start_timer_on_heartbeat = 1;
diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
index 227bf06c7ca4..c03bc1ec593a 100644
--- a/drivers/char/ipmi/kcs_bmc_aspeed.c
+++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
@@ -672,7 +672,7 @@ static struct platform_driver ast_kcs_bmc_driver = {
.of_match_table = ast_kcs_bmc_match,
},
.probe = aspeed_kcs_probe,
- .remove_new = aspeed_kcs_remove,
+ .remove = aspeed_kcs_remove,
};
module_platform_driver(ast_kcs_bmc_driver);
diff --git a/drivers/char/ipmi/kcs_bmc_npcm7xx.c b/drivers/char/ipmi/kcs_bmc_npcm7xx.c
index 07710198233a..4808a61bf273 100644
--- a/drivers/char/ipmi/kcs_bmc_npcm7xx.c
+++ b/drivers/char/ipmi/kcs_bmc_npcm7xx.c
@@ -241,7 +241,7 @@ static struct platform_driver npcm_kcs_bmc_driver = {
.of_match_table = npcm_kcs_bmc_match,
},
.probe = npcm7xx_kcs_probe,
- .remove_new = npcm7xx_kcs_remove,
+ .remove = npcm7xx_kcs_remove,
};
module_platform_driver(npcm_kcs_bmc_driver);
diff --git a/drivers/char/ipmi/ssif_bmc.c b/drivers/char/ipmi/ssif_bmc.c
index a14fafc583d4..310f17dd9511 100644
--- a/drivers/char/ipmi/ssif_bmc.c
+++ b/drivers/char/ipmi/ssif_bmc.c
@@ -292,7 +292,6 @@ static void complete_response(struct ssif_bmc_ctx *ssif_bmc)
ssif_bmc->nbytes_processed = 0;
ssif_bmc->remain_len = 0;
ssif_bmc->busy = false;
- memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer));
wake_up_all(&ssif_bmc->wait_queue);
}
@@ -744,9 +743,11 @@ static void on_stop_event(struct ssif_bmc_ctx *ssif_bmc, u8 *val)
ssif_bmc->aborting = true;
}
} else if (ssif_bmc->state == SSIF_RES_SENDING) {
- if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF)
+ if (ssif_bmc->is_singlepart_read || ssif_bmc->block_num == 0xFF) {
+ memset(&ssif_bmc->part_buf, 0, sizeof(struct ssif_part_buffer));
/* Invalidate response buffer to denote it is sent */
complete_response(ssif_bmc);
+ }
ssif_bmc->state = SSIF_READY;
}
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 541edc26ec89..2cf595d2e10b 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -63,16 +63,30 @@ static DEFINE_MUTEX(misc_mtx);
#define DYNAMIC_MINORS 128 /* like dynamic majors */
static DEFINE_IDA(misc_minors_ida);
-static int misc_minor_alloc(void)
+static int misc_minor_alloc(int minor)
{
- int ret;
-
- ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL);
- if (ret >= 0) {
- ret = DYNAMIC_MINORS - ret - 1;
+ int ret = 0;
+
+ if (minor == MISC_DYNAMIC_MINOR) {
+ /* allocate free id */
+ ret = ida_alloc_max(&misc_minors_ida, DYNAMIC_MINORS - 1, GFP_KERNEL);
+ if (ret >= 0) {
+ ret = DYNAMIC_MINORS - ret - 1;
+ } else {
+ ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1,
+ MINORMASK, GFP_KERNEL);
+ }
} else {
- ret = ida_alloc_range(&misc_minors_ida, MISC_DYNAMIC_MINOR + 1,
- MINORMASK, GFP_KERNEL);
+ /* specific minor, check if it is in dynamic or misc dynamic range */
+ if (minor < DYNAMIC_MINORS) {
+ minor = DYNAMIC_MINORS - minor - 1;
+ ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL);
+ } else if (minor > MISC_DYNAMIC_MINOR) {
+ ret = ida_alloc_range(&misc_minors_ida, minor, minor, GFP_KERNEL);
+ } else {
+ /* case of non-dynamic minors, no need to allocate id */
+ ret = 0;
+ }
}
return ret;
}
@@ -219,7 +233,7 @@ int misc_register(struct miscdevice *misc)
mutex_lock(&misc_mtx);
if (is_dynamic) {
- int i = misc_minor_alloc();
+ int i = misc_minor_alloc(misc->minor);
if (i < 0) {
err = -EBUSY;
@@ -228,6 +242,7 @@ int misc_register(struct miscdevice *misc)
misc->minor = i;
} else {
struct miscdevice *c;
+ int i;
list_for_each_entry(c, &misc_list, list) {
if (c->minor == misc->minor) {
@@ -235,6 +250,12 @@ int misc_register(struct miscdevice *misc)
goto out;
}
}
+
+ i = misc_minor_alloc(misc->minor);
+ if (i < 0) {
+ err = -EBUSY;
+ goto out;
+ }
}
dev = MKDEV(MISC_MAJOR, misc->minor);
diff --git a/drivers/char/powernv-op-panel.c b/drivers/char/powernv-op-panel.c
index f2cff1a6fed5..53467b0a6187 100644
--- a/drivers/char/powernv-op-panel.c
+++ b/drivers/char/powernv-op-panel.c
@@ -213,7 +213,7 @@ static struct platform_driver oppanel_driver = {
.of_match_table = oppanel_match,
},
.probe = oppanel_probe,
- .remove_new = oppanel_remove,
+ .remove = oppanel_remove,
};
module_platform_driver(oppanel_driver);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 23ee76bbb4aa..2581186fa61b 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1665,7 +1665,7 @@ static int proc_do_rointvec(const struct ctl_table *table, int write, void *buf,
return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
}
-static struct ctl_table random_table[] = {
+static const struct ctl_table random_table[] = {
{
.procname = "poolsize",
.data = &sysctl_poolsize,
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 0f8185e541ed..f887569fd3d0 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -1467,7 +1467,7 @@ static struct platform_driver sonypi_driver = {
.pm = SONYPI_PM,
},
.probe = sonypi_probe,
- .remove_new = sonypi_remove,
+ .remove = sonypi_remove,
.shutdown = sonypi_shutdown,
};
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index cf0be8a7939d..0fc9a510e059 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -162,7 +162,7 @@ config TCG_NSC
config TCG_ATMEL
tristate "Atmel TPM Interface"
- depends on PPC64 || HAS_IOPORT_MAP
+ depends on HAS_IOPORT_MAP
depends on HAS_IOPORT
help
If you have a TPM security chip from Atmel say Yes and it
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
index 69533d0bfb51..cf02ec646f46 100644
--- a/drivers/char/tpm/eventlog/acpi.c
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -63,6 +63,11 @@ static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
return n == 0;
}
+static void tpm_bios_log_free(void *data)
+{
+ kvfree(data);
+}
+
/* read binary bios log */
int tpm_read_log_acpi(struct tpm_chip *chip)
{
@@ -136,7 +141,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
}
/* malloc EventLog space */
- log->bios_event_log = devm_kmalloc(&chip->dev, len, GFP_KERNEL);
+ log->bios_event_log = kvmalloc(len, GFP_KERNEL);
if (!log->bios_event_log)
return -ENOMEM;
@@ -161,10 +166,16 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
goto err;
}
+ ret = devm_add_action(&chip->dev, tpm_bios_log_free, log->bios_event_log);
+ if (ret) {
+ log->bios_event_log = NULL;
+ goto err;
+ }
+
return format;
err:
- devm_kfree(&chip->dev, log->bios_event_log);
+ tpm_bios_log_free(log->bios_event_log);
log->bios_event_log = NULL;
return ret;
}
diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c
index cad0048bcc3c..e49a19fea3bd 100644
--- a/drivers/char/tpm/tpm-buf.c
+++ b/drivers/char/tpm/tpm-buf.c
@@ -147,6 +147,26 @@ void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
EXPORT_SYMBOL_GPL(tpm_buf_append_u32);
/**
+ * tpm_buf_append_handle() - Add a handle
+ * @chip: &tpm_chip instance
+ * @buf: &tpm_buf instance
+ * @handle: a TPM object handle
+ *
+ * Add a handle to the buffer, and increase the count tracking the number of
+ * handles in the command buffer. Works only for command buffers.
+ */
+void tpm_buf_append_handle(struct tpm_chip *chip, struct tpm_buf *buf, u32 handle)
+{
+ if (buf->flags & TPM_BUF_TPM2B) {
+ dev_err(&chip->dev, "Invalid buffer type (TPM2B)\n");
+ return;
+ }
+
+ tpm_buf_append_u32(buf, handle);
+ buf->handles++;
+}
+
+/**
* tpm_buf_read() - Read from a TPM buffer
* @buf: &tpm_buf instance
* @offset: offset within the buffer
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 854546000c92..7df7abaf3e52 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -525,10 +525,6 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
- /* Give back zero bytes, as TPM chip has not yet fully resumed: */
- if (chip->flags & TPM_CHIP_FLAG_SUSPENDED)
- return 0;
-
return tpm_get_random(chip, data, max);
}
@@ -674,6 +670,16 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
*/
void tpm_chip_unregister(struct tpm_chip *chip)
{
+#ifdef CONFIG_TCG_TPM2_HMAC
+ int rc;
+
+ rc = tpm_try_get_ops(chip);
+ if (!rc) {
+ tpm2_end_auth_session(chip);
+ tpm_put_ops(chip);
+ }
+#endif
+
tpm_del_legacy_sysfs(chip);
if (tpm_is_hwrng_enabled(chip))
hwrng_unregister(&chip->hwrng);
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index c3fbbf4d3db7..48ff87444f85 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -27,6 +27,9 @@ static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
struct tpm_header *header = (void *)buf;
ssize_t ret, len;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ tpm2_end_auth_session(chip);
+
ret = tpm2_prepare_space(chip, space, buf, bufsiz);
/* If the command is not implemented by the TPM, synthesize a
* response with a TPM2_RC_COMMAND_CODE return for user-space.
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 5da134f12c9a..b1daa0d7b341 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -370,6 +370,13 @@ int tpm_pm_suspend(struct device *dev)
if (!chip)
return -ENODEV;
+ rc = tpm_try_get_ops(chip);
+ if (rc) {
+ /* Can be safely set out of locks, as no action cannot race: */
+ chip->flags |= TPM_CHIP_FLAG_SUSPENDED;
+ goto out;
+ }
+
if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
goto suspended;
@@ -377,19 +384,19 @@ int tpm_pm_suspend(struct device *dev)
!pm_suspend_via_firmware())
goto suspended;
- rc = tpm_try_get_ops(chip);
- if (!rc) {
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- tpm2_shutdown(chip, TPM2_SU_STATE);
- else
- rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
-
- tpm_put_ops(chip);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ tpm2_end_auth_session(chip);
+ tpm2_shutdown(chip, TPM2_SU_STATE);
+ goto suspended;
}
+ rc = tpm1_pm_suspend(chip, tpm_suspend_pcr);
+
suspended:
chip->flags |= TPM_CHIP_FLAG_SUSPENDED;
+ tpm_put_ops(chip);
+out:
if (rc)
dev_err(dev, "Ignoring error %d while suspending\n", rc);
return 0;
@@ -438,11 +445,18 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
if (!chip)
return -ENODEV;
+ /* Give back zero bytes, as TPM chip has not yet fully resumed: */
+ if (chip->flags & TPM_CHIP_FLAG_SUSPENDED) {
+ rc = 0;
+ goto out;
+ }
+
if (chip->flags & TPM_CHIP_FLAG_TPM2)
rc = tpm2_get_random(chip, out, max);
else
rc = tpm1_get_random(chip, out, max);
+out:
tpm_put_ops(chip);
return rc;
}
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 1e856259219e..dfdcbd009720 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -14,6 +14,10 @@
#include "tpm.h"
#include <crypto/hash_info.h>
+static bool disable_pcr_integrity;
+module_param(disable_pcr_integrity, bool, 0444);
+MODULE_PARM_DESC(disable_pcr_integrity, "Disable integrity protection of TPM2_PCR_Extend");
+
static struct tpm2_hash tpm2_hash_map[] = {
{HASH_ALGO_SHA1, TPM_ALG_SHA1},
{HASH_ALGO_SHA256, TPM_ALG_SHA256},
@@ -232,18 +236,26 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
int rc;
int i;
- rc = tpm2_start_auth_session(chip);
- if (rc)
- return rc;
+ if (!disable_pcr_integrity) {
+ rc = tpm2_start_auth_session(chip);
+ if (rc)
+ return rc;
+ }
rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND);
if (rc) {
- tpm2_end_auth_session(chip);
+ if (!disable_pcr_integrity)
+ tpm2_end_auth_session(chip);
return rc;
}
- tpm_buf_append_name(chip, &buf, pcr_idx, NULL);
- tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0);
+ if (!disable_pcr_integrity) {
+ tpm_buf_append_name(chip, &buf, pcr_idx, NULL);
+ tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0);
+ } else {
+ tpm_buf_append_handle(chip, &buf, pcr_idx);
+ tpm_buf_append_auth(chip, &buf, 0, NULL, 0);
+ }
tpm_buf_append_u32(&buf, chip->nr_allocated_banks);
@@ -253,9 +265,11 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
chip->allocated_banks[i].digest_size);
}
- tpm_buf_fill_hmac_session(chip, &buf);
+ if (!disable_pcr_integrity)
+ tpm_buf_fill_hmac_session(chip, &buf);
rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value");
- rc = tpm_buf_check_hmac_response(chip, &buf, rc);
+ if (!disable_pcr_integrity)
+ rc = tpm_buf_check_hmac_response(chip, &buf, rc);
tpm_buf_destroy(&buf);
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index 511c67061728..b70165b588ec 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -237,9 +237,7 @@ void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
#endif
if (!tpm2_chip_auth(chip)) {
- tpm_buf_append_u32(buf, handle);
- /* count the number of handles in the upper bits of flags */
- buf->handles++;
+ tpm_buf_append_handle(chip, buf, handle);
return;
}
@@ -272,6 +270,31 @@ void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
}
EXPORT_SYMBOL_GPL(tpm_buf_append_name);
+void tpm_buf_append_auth(struct tpm_chip *chip, struct tpm_buf *buf,
+ u8 attributes, u8 *passphrase, int passphrase_len)
+{
+ /* offset tells us where the sessions area begins */
+ int offset = buf->handles * 4 + TPM_HEADER_SIZE;
+ u32 len = 9 + passphrase_len;
+
+ if (tpm_buf_length(buf) != offset) {
+ /* not the first session so update the existing length */
+ len += get_unaligned_be32(&buf->data[offset]);
+ put_unaligned_be32(len, &buf->data[offset]);
+ } else {
+ tpm_buf_append_u32(buf, len);
+ }
+ /* auth handle */
+ tpm_buf_append_u32(buf, TPM2_RS_PW);
+ /* nonce */
+ tpm_buf_append_u16(buf, 0);
+ /* attributes */
+ tpm_buf_append_u8(buf, 0);
+ /* passphrase */
+ tpm_buf_append_u16(buf, passphrase_len);
+ tpm_buf_append(buf, passphrase, passphrase_len);
+}
+
/**
* tpm_buf_append_hmac_session() - Append a TPM session element
* @chip: the TPM chip structure
@@ -309,30 +332,15 @@ void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
#endif
if (!tpm2_chip_auth(chip)) {
- /* offset tells us where the sessions area begins */
- int offset = buf->handles * 4 + TPM_HEADER_SIZE;
- u32 len = 9 + passphrase_len;
-
- if (tpm_buf_length(buf) != offset) {
- /* not the first session so update the existing length */
- len += get_unaligned_be32(&buf->data[offset]);
- put_unaligned_be32(len, &buf->data[offset]);
- } else {
- tpm_buf_append_u32(buf, len);
- }
- /* auth handle */
- tpm_buf_append_u32(buf, TPM2_RS_PW);
- /* nonce */
- tpm_buf_append_u16(buf, 0);
- /* attributes */
- tpm_buf_append_u8(buf, 0);
- /* passphrase */
- tpm_buf_append_u16(buf, passphrase_len);
- tpm_buf_append(buf, passphrase, passphrase_len);
+ tpm_buf_append_auth(chip, buf, attributes, passphrase,
+ passphrase_len);
return;
}
#ifdef CONFIG_TCG_TPM2_HMAC
+ /* The first write to /dev/tpm{rm0} will flush the session. */
+ attributes |= TPM2_SA_CONTINUE_SESSION;
+
/*
* The Architecture Guide requires us to strip trailing zeros
* before computing the HMAC
@@ -484,7 +492,8 @@ static void tpm2_KDFe(u8 z[EC_PT_SZ], const char *str, u8 *pt_u, u8 *pt_v,
sha256_final(&sctx, out);
}
-static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
+static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip,
+ struct tpm2_auth *auth)
{
struct crypto_kpp *kpp;
struct kpp_request *req;
@@ -543,7 +552,7 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
sg_set_buf(&s[0], chip->null_ec_key_x, EC_PT_SZ);
sg_set_buf(&s[1], chip->null_ec_key_y, EC_PT_SZ);
kpp_request_set_input(req, s, EC_PT_SZ*2);
- sg_init_one(d, chip->auth->salt, EC_PT_SZ);
+ sg_init_one(d, auth->salt, EC_PT_SZ);
kpp_request_set_output(req, d, EC_PT_SZ);
crypto_kpp_compute_shared_secret(req);
kpp_request_free(req);
@@ -554,8 +563,7 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip)
* This works because KDFe fully consumes the secret before it
* writes the salt
*/
- tpm2_KDFe(chip->auth->salt, "SECRET", x, chip->null_ec_key_x,
- chip->auth->salt);
+ tpm2_KDFe(auth->salt, "SECRET", x, chip->null_ec_key_x, auth->salt);
out:
crypto_free_kpp(kpp);
@@ -853,7 +861,9 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
if (rc)
/* manually close the session if it wasn't consumed */
tpm2_flush_context(chip, auth->handle);
- memzero_explicit(auth, sizeof(*auth));
+
+ kfree_sensitive(auth);
+ chip->auth = NULL;
} else {
/* reset for next use */
auth->session = TPM_HEADER_SIZE;
@@ -881,7 +891,8 @@ void tpm2_end_auth_session(struct tpm_chip *chip)
return;
tpm2_flush_context(chip, auth->handle);
- memzero_explicit(auth, sizeof(*auth));
+ kfree_sensitive(auth);
+ chip->auth = NULL;
}
EXPORT_SYMBOL(tpm2_end_auth_session);
@@ -915,32 +926,39 @@ static int tpm2_parse_start_auth_session(struct tpm2_auth *auth,
static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key)
{
- int rc;
unsigned int offset = 0; /* dummy offset for null seed context */
u8 name[SHA256_DIGEST_SIZE + 2];
+ u32 tmp_null_key;
+ int rc;
rc = tpm2_load_context(chip, chip->null_key_context, &offset,
- null_key);
- if (rc != -EINVAL)
- return rc;
+ &tmp_null_key);
+ if (rc != -EINVAL) {
+ if (!rc)
+ *null_key = tmp_null_key;
+ goto err;
+ }
- /* an integrity failure may mean the TPM has been reset */
- dev_err(&chip->dev, "NULL key integrity failure!\n");
- /* check the null name against what we know */
- tpm2_create_primary(chip, TPM2_RH_NULL, NULL, name);
- if (memcmp(name, chip->null_key_name, sizeof(name)) == 0)
- /* name unchanged, assume transient integrity failure */
- return rc;
- /*
- * Fatal TPM failure: the NULL seed has actually changed, so
- * the TPM must have been illegally reset. All in-kernel TPM
- * operations will fail because the NULL primary can't be
- * loaded to salt the sessions, but disable the TPM anyway so
- * userspace programmes can't be compromised by it.
- */
- dev_err(&chip->dev, "NULL name has changed, disabling TPM due to interference\n");
- chip->flags |= TPM_CHIP_FLAG_DISABLE;
+ /* Try to re-create null key, given the integrity failure: */
+ rc = tpm2_create_primary(chip, TPM2_RH_NULL, &tmp_null_key, name);
+ if (rc)
+ goto err;
+
+ /* Return null key if the name has not been changed: */
+ if (!memcmp(name, chip->null_key_name, sizeof(name))) {
+ *null_key = tmp_null_key;
+ return 0;
+ }
+
+ /* Deduce from the name change TPM interference: */
+ dev_err(&chip->dev, "null key integrity check failed\n");
+ tpm2_flush_context(chip, tmp_null_key);
+err:
+ if (rc) {
+ chip->flags |= TPM_CHIP_FLAG_DISABLE;
+ rc = -ENODEV;
+ }
return rc;
}
@@ -958,16 +976,20 @@ static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key)
*/
int tpm2_start_auth_session(struct tpm_chip *chip)
{
+ struct tpm2_auth *auth;
struct tpm_buf buf;
- struct tpm2_auth *auth = chip->auth;
- int rc;
u32 null_key;
+ int rc;
- if (!auth) {
- dev_warn_once(&chip->dev, "auth session is not active\n");
+ if (chip->auth) {
+ dev_warn_once(&chip->dev, "auth session is active\n");
return 0;
}
+ auth = kzalloc(sizeof(*auth), GFP_KERNEL);
+ if (!auth)
+ return -ENOMEM;
+
rc = tpm2_load_null(chip, &null_key);
if (rc)
goto out;
@@ -988,7 +1010,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
tpm_buf_append(&buf, auth->our_nonce, sizeof(auth->our_nonce));
/* append encrypted salt and squirrel away unencrypted in auth */
- tpm_buf_append_salt(&buf, chip);
+ tpm_buf_append_salt(&buf, chip, auth);
/* session type (HMAC, audit or policy) */
tpm_buf_append_u8(&buf, TPM2_SE_HMAC);
@@ -1010,10 +1032,13 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
tpm_buf_destroy(&buf);
- if (rc)
- goto out;
+ if (rc == TPM2_RC_SUCCESS) {
+ chip->auth = auth;
+ return 0;
+ }
- out:
+out:
+ kfree_sensitive(auth);
return rc;
}
EXPORT_SYMBOL(tpm2_start_auth_session);
@@ -1347,20 +1372,22 @@ static int tpm2_create_null_primary(struct tpm_chip *chip)
*
* Derive and context save the null primary and allocate memory in the
* struct tpm_chip for the authorizations.
+ *
+ * Return:
+ * * 0 - OK
+ * * -errno - A system error
+ * * TPM_RC - A TPM error
*/
int tpm2_sessions_init(struct tpm_chip *chip)
{
int rc;
rc = tpm2_create_null_primary(chip);
- if (rc)
- dev_err(&chip->dev, "TPM: security failed (NULL seed derivation): %d\n", rc);
-
- chip->auth = kmalloc(sizeof(*chip->auth), GFP_KERNEL);
- if (!chip->auth)
- return -ENOMEM;
+ if (rc) {
+ dev_err(&chip->dev, "null key creation failed with %d\n", rc);
+ return rc;
+ }
return rc;
}
-EXPORT_SYMBOL(tpm2_sessions_init);
#endif /* CONFIG_TCG_TPM2_HMAC */
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index 9fb2defa9dc4..54a0360a3c95 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -15,7 +15,66 @@
*/
#include "tpm.h"
-#include "tpm_atmel.h"
+
+struct tpm_atmel_priv {
+ int region_size;
+ int have_region;
+ unsigned long base;
+ void __iomem *iobase;
+};
+
+#define atmel_getb(chip, offset) inb(atmel_get_priv(chip)->base + (offset))
+#define atmel_putb(val, chip, offset) \
+ outb(val, atmel_get_priv(chip)->base + (offset))
+#define atmel_request_region request_region
+#define atmel_release_region release_region
+/* Atmel definitions */
+enum tpm_atmel_addr {
+ TPM_ATMEL_BASE_ADDR_LO = 0x08,
+ TPM_ATMEL_BASE_ADDR_HI = 0x09
+};
+
+static inline int tpm_read_index(int base, int index)
+{
+ outb(index, base);
+ return inb(base + 1) & 0xFF;
+}
+
+/* Verify this is a 1.1 Atmel TPM */
+static int atmel_verify_tpm11(void)
+{
+ /* verify that it is an Atmel part */
+ if (tpm_read_index(TPM_ADDR, 4) != 'A' ||
+ tpm_read_index(TPM_ADDR, 5) != 'T' ||
+ tpm_read_index(TPM_ADDR, 6) != 'M' ||
+ tpm_read_index(TPM_ADDR, 7) != 'L')
+ return 1;
+
+ /* query chip for its version number */
+ if (tpm_read_index(TPM_ADDR, 0x00) != 1 ||
+ tpm_read_index(TPM_ADDR, 0x01) != 1)
+ return 1;
+
+ /* This is an atmel supported part */
+ return 0;
+}
+
+/* Determine where to talk to device */
+static void __iomem *atmel_get_base_addr(unsigned long *base, int *region_size)
+{
+ int lo, hi;
+
+ if (atmel_verify_tpm11() != 0)
+ return NULL;
+
+ lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO);
+ hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI);
+
+ *base = (hi << 8) | lo;
+ *region_size = 2;
+
+ return ioport_map(*base, *region_size);
+}
/* write status bits */
enum tpm_atmel_write_status {
@@ -142,7 +201,6 @@ static void atml_plat_remove(void)
tpm_chip_unregister(chip);
if (priv->have_region)
atmel_release_region(priv->base, priv->region_size);
- atmel_put_base_addr(priv->iobase);
platform_device_unregister(pdev);
}
@@ -211,7 +269,6 @@ static int __init init_atmel(void)
err_unreg_dev:
platform_device_unregister(pdev);
err_rel_reg:
- atmel_put_base_addr(iobase);
if (have_region)
atmel_release_region(base,
region_size);
diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h
deleted file mode 100644
index 7ac3f69dcf0f..000000000000
--- a/drivers/char/tpm/tpm_atmel.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2005 IBM Corporation
- *
- * Authors:
- * Kylene Hall <kjhall@us.ibm.com>
- *
- * Maintained by: <tpmdd-devel@lists.sourceforge.net>
- *
- * Device driver for TCG/TCPA TPM (trusted platform module).
- * Specifications at www.trustedcomputinggroup.org
- *
- * These difference are required on power because the device must be
- * discovered through the device tree and iomap must be used to get
- * around the need for holes in the io_page_mask. This does not happen
- * automatically because the tpm is not a normal pci device and lives
- * under the root node.
- */
-
-struct tpm_atmel_priv {
- int region_size;
- int have_region;
- unsigned long base;
- void __iomem *iobase;
-};
-
-#ifdef CONFIG_PPC64
-
-#include <linux/of.h>
-
-#define atmel_getb(priv, offset) readb(priv->iobase + offset)
-#define atmel_putb(val, priv, offset) writeb(val, priv->iobase + offset)
-#define atmel_request_region request_mem_region
-#define atmel_release_region release_mem_region
-
-static inline void atmel_put_base_addr(void __iomem *iobase)
-{
- iounmap(iobase);
-}
-
-static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
-{
- struct device_node *dn;
- unsigned long address, size;
- const unsigned int *reg;
- int reglen;
- int naddrc;
- int nsizec;
-
- dn = of_find_node_by_name(NULL, "tpm");
-
- if (!dn)
- return NULL;
-
- if (!of_device_is_compatible(dn, "AT97SC3201")) {
- of_node_put(dn);
- return NULL;
- }
-
- reg = of_get_property(dn, "reg", &reglen);
- naddrc = of_n_addr_cells(dn);
- nsizec = of_n_size_cells(dn);
-
- of_node_put(dn);
-
-
- if (naddrc == 2)
- address = ((unsigned long) reg[0] << 32) | reg[1];
- else
- address = reg[0];
-
- if (nsizec == 2)
- size =
- ((unsigned long) reg[naddrc] << 32) | reg[naddrc + 1];
- else
- size = reg[naddrc];
-
- *base = address;
- *region_size = size;
- return ioremap(*base, *region_size);
-}
-#else
-#define atmel_getb(chip, offset) inb(atmel_get_priv(chip)->base + offset)
-#define atmel_putb(val, chip, offset) \
- outb(val, atmel_get_priv(chip)->base + offset)
-#define atmel_request_region request_region
-#define atmel_release_region release_region
-/* Atmel definitions */
-enum tpm_atmel_addr {
- TPM_ATMEL_BASE_ADDR_LO = 0x08,
- TPM_ATMEL_BASE_ADDR_HI = 0x09
-};
-
-static inline int tpm_read_index(int base, int index)
-{
- outb(index, base);
- return inb(base+1) & 0xFF;
-}
-
-/* Verify this is a 1.1 Atmel TPM */
-static int atmel_verify_tpm11(void)
-{
-
- /* verify that it is an Atmel part */
- if (tpm_read_index(TPM_ADDR, 4) != 'A' ||
- tpm_read_index(TPM_ADDR, 5) != 'T' ||
- tpm_read_index(TPM_ADDR, 6) != 'M' ||
- tpm_read_index(TPM_ADDR, 7) != 'L')
- return 1;
-
- /* query chip for its version number */
- if (tpm_read_index(TPM_ADDR, 0x00) != 1 ||
- tpm_read_index(TPM_ADDR, 0x01) != 1)
- return 1;
-
- /* This is an atmel supported part */
- return 0;
-}
-
-static inline void atmel_put_base_addr(void __iomem *iobase)
-{
-}
-
-/* Determine where to talk to device */
-static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
-{
- int lo, hi;
-
- if (atmel_verify_tpm11() != 0)
- return NULL;
-
- lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO);
- hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI);
-
- *base = (hi << 8) | lo;
- *region_size = 2;
-
- return ioport_map(*base, *region_size);
-}
-#endif
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
index 2ea4882251cf..139556b21cc6 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.c
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -366,7 +366,7 @@ static struct platform_driver ftpm_tee_plat_driver = {
},
.shutdown = ftpm_plat_tee_shutdown,
.probe = ftpm_plat_tee_probe,
- .remove_new = ftpm_plat_tee_remove,
+ .remove = ftpm_plat_tee_remove,
};
/* UUID of the fTPM TA */
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 1e5b107d1f3b..76d048f63d55 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -450,6 +450,7 @@ static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
}
static const struct tpm_class_ops tpm_ibmvtpm = {
+ .flags = TPM_OPS_AUTO_STARTUP,
.recv = tpm_ibmvtpm_recv,
.send = tpm_ibmvtpm_send,
.cancel = tpm_ibmvtpm_cancel,
@@ -690,20 +691,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
if (!strcmp(id->compat, "IBM,vtpm20"))
chip->flags |= TPM_CHIP_FLAG_TPM2;
- rc = tpm_get_timeouts(chip);
- if (rc)
- goto init_irq_cleanup;
-
- if (chip->flags & TPM_CHIP_FLAG_TPM2) {
- rc = tpm2_get_cc_attrs_tbl(chip);
- if (rc)
- goto init_irq_cleanup;
-
- rc = tpm2_sessions_init(chip);
- if (rc)
- goto init_irq_cleanup;
- }
-
return tpm_chip_register(chip);
init_irq_cleanup:
do {
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 2f7326d297ad..9aa230a63616 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -356,7 +356,7 @@ MODULE_DEVICE_TABLE(of, tis_of_platform_match);
static struct platform_driver tis_drv = {
.probe = tpm_tis_plat_probe,
- .remove_new = tpm_tis_plat_remove,
+ .remove = tpm_tis_plat_remove,
.driver = {
.name = "tpm_tis",
.pm = &tpm_tis_pm,
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
index adf22992138e..3b55a7b05c46 100644
--- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -17,6 +17,7 @@
*/
#include <linux/acpi.h>
+#include <linux/bug.h>
#include <linux/completion.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -30,11 +31,13 @@
#define TPM_CR50_MAX_BUFSIZE 64
#define TPM_CR50_TIMEOUT_SHORT_MS 2 /* Short timeout during transactions */
#define TPM_CR50_TIMEOUT_NOIRQ_MS 20 /* Timeout for TPM ready without IRQ */
-#define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID reg value */
-#define TPM_TI50_I2C_DID_VID 0x504a6666L /* Device and vendor ID reg value */
+#define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID for Cr50 H1 */
+#define TPM_TI50_DT_I2C_DID_VID 0x504a6666L /* Device and vendor ID for Ti50 DT */
+#define TPM_TI50_OT_I2C_DID_VID 0x50666666L /* Device and vendor ID for TI50 OT */
#define TPM_CR50_I2C_MAX_RETRIES 3 /* Max retries due to I2C errors */
#define TPM_CR50_I2C_RETRY_DELAY_LO 55 /* Min usecs between retries on I2C */
#define TPM_CR50_I2C_RETRY_DELAY_HI 65 /* Max usecs between retries on I2C */
+#define TPM_CR50_I2C_DEFAULT_LOC 0
#define TPM_I2C_ACCESS(l) (0x0000 | ((l) << 4))
#define TPM_I2C_STS(l) (0x0001 | ((l) << 4))
@@ -199,8 +202,6 @@ static int tpm_cr50_i2c_read(struct tpm_chip *chip, u8 addr, u8 *buffer, size_t
};
int rc;
- i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
-
/* Prepare for completion interrupt */
tpm_cr50_i2c_enable_tpm_irq(chip);
@@ -219,7 +220,6 @@ static int tpm_cr50_i2c_read(struct tpm_chip *chip, u8 addr, u8 *buffer, size_t
out:
tpm_cr50_i2c_disable_tpm_irq(chip);
- i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
if (rc < 0)
return rc;
@@ -261,8 +261,6 @@ static int tpm_cr50_i2c_write(struct tpm_chip *chip, u8 addr, u8 *buffer,
priv->buf[0] = addr;
memcpy(priv->buf + 1, buffer, len);
- i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
-
/* Prepare for completion interrupt */
tpm_cr50_i2c_enable_tpm_irq(chip);
@@ -276,7 +274,6 @@ static int tpm_cr50_i2c_write(struct tpm_chip *chip, u8 addr, u8 *buffer,
out:
tpm_cr50_i2c_disable_tpm_irq(chip);
- i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
if (rc < 0)
return rc;
@@ -285,25 +282,26 @@ out:
}
/**
- * tpm_cr50_check_locality() - Verify TPM locality 0 is active.
+ * tpm_cr50_check_locality() - Verify if required TPM locality is active.
* @chip: A TPM chip.
+ * @loc: Locality to be verified
*
* Return:
- * - 0: Success.
+ * - loc: Success.
* - -errno: A POSIX error code.
*/
-static int tpm_cr50_check_locality(struct tpm_chip *chip)
+static int tpm_cr50_check_locality(struct tpm_chip *chip, int loc)
{
u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY;
u8 buf;
int rc;
- rc = tpm_cr50_i2c_read(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf));
+ rc = tpm_cr50_i2c_read(chip, TPM_I2C_ACCESS(loc), &buf, sizeof(buf));
if (rc < 0)
return rc;
if ((buf & mask) == mask)
- return 0;
+ return loc;
return -EIO;
}
@@ -311,53 +309,72 @@ static int tpm_cr50_check_locality(struct tpm_chip *chip)
/**
* tpm_cr50_release_locality() - Release TPM locality.
* @chip: A TPM chip.
- * @force: Flag to force release if set.
+ * @loc: Locality to be released
+ *
+ * Return:
+ * - 0: Success.
+ * - -errno: A POSIX error code.
*/
-static void tpm_cr50_release_locality(struct tpm_chip *chip, bool force)
+static int tpm_cr50_release_locality(struct tpm_chip *chip, int loc)
{
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
u8 mask = TPM_ACCESS_VALID | TPM_ACCESS_REQUEST_PENDING;
- u8 addr = TPM_I2C_ACCESS(0);
+ u8 addr = TPM_I2C_ACCESS(loc);
u8 buf;
+ int rc;
- if (tpm_cr50_i2c_read(chip, addr, &buf, sizeof(buf)) < 0)
- return;
+ rc = tpm_cr50_i2c_read(chip, addr, &buf, sizeof(buf));
+ if (rc < 0)
+ goto unlock_out;
- if (force || (buf & mask) == mask) {
+ if ((buf & mask) == mask) {
buf = TPM_ACCESS_ACTIVE_LOCALITY;
- tpm_cr50_i2c_write(chip, addr, &buf, sizeof(buf));
+ rc = tpm_cr50_i2c_write(chip, addr, &buf, sizeof(buf));
}
+
+unlock_out:
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+ return rc;
}
/**
- * tpm_cr50_request_locality() - Request TPM locality 0.
+ * tpm_cr50_request_locality() - Request TPM locality.
* @chip: A TPM chip.
+ * @loc: Locality to be requested.
*
* Return:
- * - 0: Success.
+ * - loc: Success.
* - -errno: A POSIX error code.
*/
-static int tpm_cr50_request_locality(struct tpm_chip *chip)
+static int tpm_cr50_request_locality(struct tpm_chip *chip, int loc)
{
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
u8 buf = TPM_ACCESS_REQUEST_USE;
unsigned long stop;
int rc;
- if (!tpm_cr50_check_locality(chip))
- return 0;
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
- rc = tpm_cr50_i2c_write(chip, TPM_I2C_ACCESS(0), &buf, sizeof(buf));
+ if (tpm_cr50_check_locality(chip, loc) == loc)
+ return loc;
+
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_ACCESS(loc), &buf, sizeof(buf));
if (rc < 0)
- return rc;
+ goto unlock_out;
stop = jiffies + chip->timeout_a;
do {
- if (!tpm_cr50_check_locality(chip))
- return 0;
+ if (tpm_cr50_check_locality(chip, loc) == loc)
+ return loc;
msleep(TPM_CR50_TIMEOUT_SHORT_MS);
} while (time_before(jiffies, stop));
- return -ETIMEDOUT;
+ rc = -ETIMEDOUT;
+
+unlock_out:
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+ return rc;
}
/**
@@ -373,7 +390,7 @@ static u8 tpm_cr50_i2c_tis_status(struct tpm_chip *chip)
{
u8 buf[4];
- if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0)
+ if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(chip->locality), buf, sizeof(buf)) < 0)
return 0;
return buf[0];
@@ -389,7 +406,7 @@ static void tpm_cr50_i2c_tis_set_ready(struct tpm_chip *chip)
{
u8 buf[4] = { TPM_STS_COMMAND_READY };
- tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), buf, sizeof(buf));
+ tpm_cr50_i2c_write(chip, TPM_I2C_STS(chip->locality), buf, sizeof(buf));
msleep(TPM_CR50_TIMEOUT_SHORT_MS);
}
@@ -419,7 +436,7 @@ static int tpm_cr50_i2c_get_burst_and_status(struct tpm_chip *chip, u8 mask,
stop = jiffies + chip->timeout_b;
do {
- if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(0), buf, sizeof(buf)) < 0) {
+ if (tpm_cr50_i2c_read(chip, TPM_I2C_STS(chip->locality), buf, sizeof(buf)) < 0) {
msleep(TPM_CR50_TIMEOUT_SHORT_MS);
continue;
}
@@ -453,7 +470,7 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
u8 mask = TPM_STS_VALID | TPM_STS_DATA_AVAIL;
size_t burstcnt, cur, len, expected;
- u8 addr = TPM_I2C_DATA_FIFO(0);
+ u8 addr = TPM_I2C_DATA_FIFO(chip->locality);
u32 status;
int rc;
@@ -515,7 +532,6 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
goto out_err;
}
- tpm_cr50_release_locality(chip, false);
return cur;
out_err:
@@ -523,7 +539,6 @@ out_err:
if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)
tpm_cr50_i2c_tis_set_ready(chip);
- tpm_cr50_release_locality(chip, false);
return rc;
}
@@ -545,10 +560,6 @@ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
u32 status;
int rc;
- rc = tpm_cr50_request_locality(chip);
- if (rc < 0)
- return rc;
-
/* Wait until TPM is ready for a command */
stop = jiffies + chip->timeout_b;
while (!(tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)) {
@@ -577,7 +588,8 @@ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
* that is inserted by tpm_cr50_i2c_write()
*/
limit = min_t(size_t, burstcnt - 1, len);
- rc = tpm_cr50_i2c_write(chip, TPM_I2C_DATA_FIFO(0), &buf[sent], limit);
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_DATA_FIFO(chip->locality),
+ &buf[sent], limit);
if (rc < 0) {
dev_err(&chip->dev, "Write failed\n");
goto out_err;
@@ -598,7 +610,7 @@ static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
}
/* Start the TPM command */
- rc = tpm_cr50_i2c_write(chip, TPM_I2C_STS(0), tpm_go,
+ rc = tpm_cr50_i2c_write(chip, TPM_I2C_STS(chip->locality), tpm_go,
sizeof(tpm_go));
if (rc < 0) {
dev_err(&chip->dev, "Start command failed\n");
@@ -611,7 +623,6 @@ out_err:
if (tpm_cr50_i2c_tis_status(chip) & TPM_STS_COMMAND_READY)
tpm_cr50_i2c_tis_set_ready(chip);
- tpm_cr50_release_locality(chip, false);
return rc;
}
@@ -650,6 +661,8 @@ static const struct tpm_class_ops cr50_i2c = {
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_canceled = &tpm_cr50_i2c_req_canceled,
+ .request_locality = &tpm_cr50_request_locality,
+ .relinquish_locality = &tpm_cr50_release_locality,
};
#ifdef CONFIG_ACPI
@@ -669,6 +682,27 @@ MODULE_DEVICE_TABLE(of, of_cr50_i2c_match);
#endif
/**
+ * tpm_cr50_vid_to_name() - Maps VID to name.
+ * @vendor: Vendor identifier to map to name
+ *
+ * Return:
+ * A valid string for the vendor or empty string
+ */
+static const char *tpm_cr50_vid_to_name(u32 vendor)
+{
+ switch (vendor) {
+ case TPM_CR50_I2C_DID_VID:
+ return "cr50";
+ case TPM_TI50_DT_I2C_DID_VID:
+ return "ti50 DT";
+ case TPM_TI50_OT_I2C_DID_VID:
+ return "ti50 OT";
+ default:
+ return "unknown";
+ }
+}
+
+/**
* tpm_cr50_i2c_probe() - Driver probe function.
* @client: I2C client information.
*
@@ -684,6 +718,7 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client)
u32 vendor;
u8 buf[4];
int rc;
+ int loc;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
@@ -726,29 +761,37 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client)
TPM_CR50_TIMEOUT_NOIRQ_MS);
}
- rc = tpm_cr50_request_locality(chip);
- if (rc < 0) {
+ loc = tpm_cr50_request_locality(chip, TPM_CR50_I2C_DEFAULT_LOC);
+ if (loc < 0) {
dev_err(dev, "Could not request locality\n");
- return rc;
+ return loc;
}
/* Read four bytes from DID_VID register */
- rc = tpm_cr50_i2c_read(chip, TPM_I2C_DID_VID(0), buf, sizeof(buf));
+ rc = tpm_cr50_i2c_read(chip, TPM_I2C_DID_VID(loc), buf, sizeof(buf));
if (rc < 0) {
dev_err(dev, "Could not read vendor id\n");
- tpm_cr50_release_locality(chip, true);
+ if (tpm_cr50_release_locality(chip, loc))
+ dev_err(dev, "Could not release locality\n");
+ return rc;
+ }
+
+ rc = tpm_cr50_release_locality(chip, loc);
+ if (rc) {
+ dev_err(dev, "Could not release locality\n");
return rc;
}
vendor = le32_to_cpup((__le32 *)buf);
- if (vendor != TPM_CR50_I2C_DID_VID && vendor != TPM_TI50_I2C_DID_VID) {
+ if (vendor != TPM_CR50_I2C_DID_VID &&
+ vendor != TPM_TI50_DT_I2C_DID_VID &&
+ vendor != TPM_TI50_OT_I2C_DID_VID) {
dev_err(dev, "Vendor ID did not match! ID was %08x\n", vendor);
- tpm_cr50_release_locality(chip, true);
return -ENODEV;
}
dev_info(dev, "%s TPM 2.0 (i2c 0x%02x irq %d id 0x%x)\n",
- vendor == TPM_TI50_I2C_DID_VID ? "ti50" : "cr50",
+ tpm_cr50_vid_to_name(vendor),
client->addr, client->irq, vendor >> 16);
return tpm_chip_register(chip);
}
@@ -772,7 +815,6 @@ static void tpm_cr50_i2c_remove(struct i2c_client *client)
}
tpm_chip_unregister(chip);
- tpm_cr50_release_locality(chip, true);
}
static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume);
diff --git a/drivers/char/tpm/tpm_tis_synquacer.c b/drivers/char/tpm/tpm_tis_synquacer.c
index 0621ebec530b..4927714d277a 100644
--- a/drivers/char/tpm/tpm_tis_synquacer.c
+++ b/drivers/char/tpm/tpm_tis_synquacer.c
@@ -152,7 +152,7 @@ MODULE_DEVICE_TABLE(acpi, tpm_synquacer_acpi_tbl);
static struct platform_driver tis_synquacer_drv = {
.probe = tpm_tis_synquacer_probe,
- .remove_new = tpm_tis_synquacer_remove,
+ .remove = tpm_tis_synquacer_remove,
.driver = {
.name = "tpm_tis_synquacer",
.pm = &tpm_tis_synquacer_pm,
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 99a7f2441e70..24442485e73e 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -883,9 +883,9 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
- src = kmap_atomic(buf->page);
+ src = kmap_local_page(buf->page);
memcpy(page_address(page) + offset, src + buf->offset, len);
- kunmap_atomic(src);
+ kunmap_local(src);
sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
}
@@ -2006,25 +2006,27 @@ static int virtcons_probe(struct virtio_device *vdev)
multiport = true;
}
- err = init_vqs(portdev);
- if (err < 0) {
- dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
- goto free_chrdev;
- }
-
spin_lock_init(&portdev->ports_lock);
INIT_LIST_HEAD(&portdev->ports);
INIT_LIST_HEAD(&portdev->list);
- virtio_device_ready(portdev->vdev);
-
INIT_WORK(&portdev->config_work, &config_work_handler);
INIT_WORK(&portdev->control_work, &control_work_handler);
if (multiport) {
spin_lock_init(&portdev->c_ivq_lock);
spin_lock_init(&portdev->c_ovq_lock);
+ }
+ err = init_vqs(portdev);
+ if (err < 0) {
+ dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
+ goto free_chrdev;
+ }
+
+ virtio_device_ready(portdev->vdev);
+
+ if (multiport) {
err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
if (err < 0) {
dev_err(&vdev->dev,
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 4f6c3cb8aa41..34a345dc5e72 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -738,7 +738,7 @@ MODULE_DEVICE_TABLE(of, hwicap_of_match);
static struct platform_driver hwicap_platform_driver = {
.probe = hwicap_drv_probe,
- .remove_new = hwicap_drv_remove,
+ .remove = hwicap_drv_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = hwicap_of_match,
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
index 8802e2a6fd20..1a1e64133315 100644
--- a/drivers/char/xillybus/xillybus_of.c
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -74,7 +74,7 @@ static void xilly_drv_remove(struct platform_device *op)
static struct platform_driver xillybus_platform_driver = {
.probe = xilly_drv_probe,
- .remove_new = xilly_drv_remove,
+ .remove = xilly_drv_remove,
.driver = {
.name = xillyname,
.of_match_table = xillybus_of_match,
diff --git a/drivers/clk/.kunitconfig b/drivers/clk/.kunitconfig
index 54ece9207055..08e26137f3d9 100644
--- a/drivers/clk/.kunitconfig
+++ b/drivers/clk/.kunitconfig
@@ -1,5 +1,6 @@
CONFIG_KUNIT=y
CONFIG_OF=y
+CONFIG_OF_OVERLAY=y
CONFIG_COMMON_CLK=y
CONFIG_CLK_KUNIT_TEST=y
CONFIG_CLK_FIXED_RATE_KUNIT_TEST=y
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 299bc678ed1b..713573b6c86c 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -226,6 +226,17 @@ config COMMON_CLK_EP93XX
help
This driver supports the SoC clocks on the Cirrus Logic ep93xx.
+config COMMON_CLK_EYEQ
+ bool "Clock driver for the Mobileye EyeQ platform"
+ depends on MACH_EYEQ5 || MACH_EYEQ6H || COMPILE_TEST
+ select AUXILIARY_BUS
+ default MACH_EYEQ5 || MACH_EYEQ6H
+ help
+ This driver provides clocks found on Mobileye EyeQ5, EyeQ6L and Eye6H
+ SoCs. Controllers live in shared register regions called OLB. Driver
+ provides read-only PLLs, derived from the main crystal clock (which
+ must be constant). It also exposes some divider clocks.
+
config COMMON_CLK_FSL_FLEXSPI
tristate "Clock driver for FlexSPI on Layerscape SoCs"
depends on ARCH_LAYERSCAPE || COMPILE_TEST
@@ -259,7 +270,7 @@ config COMMON_CLK_LAN966X
tristate "Generic Clock Controller driver for LAN966X SoC"
depends on HAS_IOMEM
depends on OF
- depends on SOC_LAN966 || COMPILE_TEST
+ depends on SOC_LAN966 || ARCH_LAN969X || COMPILE_TEST
help
This driver provides support for Generic Clock Controller(GCK) on
LAN966X SoC. GCK generates and supplies clock to various peripherals
@@ -291,7 +302,7 @@ config CLK_TWL
help
Enable support for controlling the clock resources on TWL family
PMICs. These devices have some 32K clock outputs which can be
- controlled by software. For now, only the TWL6032 clocks are
+ controlled by software. For now, the TWL6032 and TWL6030 clocks are
supported.
config CLK_TWL6040
@@ -342,6 +353,14 @@ config COMMON_CLK_LOCHNAGAR
This driver supports the clocking features of the Cirrus Logic
Lochnagar audio development board.
+config COMMON_CLK_NPCM8XX
+ tristate "Clock driver for the NPCM8XX SoC Family"
+ depends on ARCH_NPCM || COMPILE_TEST
+ help
+ This driver supports the clocks on the Nuvoton BMC NPCM8XX SoC Family,
+ all the clocks are initialized by the bootloader, so this driver
+ allows only reading of current settings directly from the hardware.
+
config COMMON_CLK_LOONGSON2
bool "Clock driver for Loongson-2 SoC"
depends on LOONGARCH || COMPILE_TEST
@@ -517,7 +536,6 @@ config CLK_KUNIT_TEST
tristate "Basic Clock Framework Kunit Tests" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
- select OF_OVERLAY if OF
select DTC
help
Kunit tests for the common clock framework.
@@ -526,7 +544,6 @@ config CLK_FIXED_RATE_KUNIT_TEST
tristate "Basic fixed rate clk type KUnit test" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
- select OF_OVERLAY if OF
select DTC
help
KUnit tests for the basic fixed rate clk type.
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index fb8878a5d7d9..bf4bd45adc3a 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -4,6 +4,20 @@ obj-$(CONFIG_HAVE_CLK) += clk-devres.o clk-bulk.o clkdev.o
obj-$(CONFIG_COMMON_CLK) += clk.o
obj-$(CONFIG_CLK_KUNIT_TEST) += clk-test.o
clk-test-y := clk_test.o \
+ kunit_clk_assigned_rates_u64_one.dtbo.o \
+ kunit_clk_assigned_rates_u64_one_consumer.dtbo.o \
+ kunit_clk_assigned_rates_u64_multiple.dtbo.o \
+ kunit_clk_assigned_rates_u64_multiple_consumer.dtbo.o \
+ kunit_clk_assigned_rates_multiple.dtbo.o \
+ kunit_clk_assigned_rates_multiple_consumer.dtbo.o \
+ kunit_clk_assigned_rates_null.dtbo.o \
+ kunit_clk_assigned_rates_null_consumer.dtbo.o \
+ kunit_clk_assigned_rates_one.dtbo.o \
+ kunit_clk_assigned_rates_one_consumer.dtbo.o \
+ kunit_clk_assigned_rates_without.dtbo.o \
+ kunit_clk_assigned_rates_without_consumer.dtbo.o \
+ kunit_clk_assigned_rates_zero.dtbo.o \
+ kunit_clk_assigned_rates_zero_consumer.dtbo.o \
kunit_clk_parent_data_test.dtbo.o
obj-$(CONFIG_COMMON_CLK) += clk-divider.o
obj-$(CONFIG_COMMON_CLK) += clk-fixed-factor.o
@@ -42,6 +56,7 @@ obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
obj-$(CONFIG_COMMON_CLK_EP93XX) += clk-ep93xx.o
obj-$(CONFIG_ARCH_SPARX5) += clk-sparx5.o
obj-$(CONFIG_COMMON_CLK_EN7523) += clk-en7523.o
+obj-$(CONFIG_COMMON_CLK_EYEQ) += clk-eyeq.o
obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o
obj-$(CONFIG_COMMON_CLK_FSL_FLEXSPI) += clk-fsl-flexspi.o
obj-$(CONFIG_COMMON_CLK_FSL_SAI) += clk-fsl-sai.o
@@ -62,6 +77,7 @@ obj-$(CONFIG_ARCH_MILBEAUT_M10V) += clk-milbeaut.o
obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
obj-$(CONFIG_ARCH_NPCM7XX) += clk-npcm7xx.o
+obj-$(CONFIG_COMMON_CLK_NPCM8XX) += clk-npcm8xx.o
obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o
obj-$(CONFIG_CLK_LS1028A_PLLDIG) += clk-plldig.o
diff --git a/drivers/clk/analogbits/wrpll-cln28hpc.c b/drivers/clk/analogbits/wrpll-cln28hpc.c
index 65d422a588e1..9d178afc73bd 100644
--- a/drivers/clk/analogbits/wrpll-cln28hpc.c
+++ b/drivers/clk/analogbits/wrpll-cln28hpc.c
@@ -292,7 +292,7 @@ int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate,
vco = vco_pre * f;
}
- delta = abs(target_rate - vco);
+ delta = abs(target_vco_rate - vco);
if (delta < best_delta) {
best_delta = delta;
best_r = r;
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index 8e3684ba2c74..9128a06b860d 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -24,4 +24,5 @@ obj-$(CONFIG_SOC_SAM9X7) += sam9x7.o
obj-$(CONFIG_SOC_SAMA5D3) += sama5d3.o dt-compat.o
obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o dt-compat.o
obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o dt-compat.o
+obj-$(CONFIG_SOC_SAMA7D65) += sama7d65.o
obj-$(CONFIG_SOC_SAMA7G5) += sama7g5.o
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index 15c46489ba85..7a544e429d34 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -20,7 +20,7 @@
#define PMC_MCR_CSS_SHIFT (16)
-#define MASTER_MAX_ID 4
+#define MASTER_MAX_ID 9
#define to_clk_master(hw) container_of(hw, struct clk_master, hw)
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index fda041102224..cefd9948e103 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -23,7 +23,7 @@
#define UPLL_DIV 2
#define PLL_MUL_MAX (FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, UINT_MAX) + 1)
-#define PLL_MAX_ID 7
+#define PLL_MAX_ID 9
struct sam9x60_pll_core {
struct regmap *regmap;
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index 5aa9c1f1c886..acf780a81589 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -151,6 +151,7 @@ static struct syscore_ops pmc_syscore_ops = {
static const struct of_device_id pmc_dt_ids[] = {
{ .compatible = "atmel,sama5d2-pmc" },
{ .compatible = "microchip,sama7g5-pmc", },
+ { .compatible = "microchip,sama7d65-pmc", },
{ /* sentinel */ }
};
diff --git a/drivers/clk/at91/sama7d65.c b/drivers/clk/at91/sama7d65.c
new file mode 100644
index 000000000000..a5d40df8b2f2
--- /dev/null
+++ b/drivers/clk/at91/sama7d65.c
@@ -0,0 +1,1375 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SAMA7D65 PMC code.
+ *
+ * Copyright (C) 2024 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Ryan Wanner <ryan.wanner@microchip.com>
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static DEFINE_SPINLOCK(pmc_pll_lock);
+static DEFINE_SPINLOCK(pmc_mck0_lock);
+static DEFINE_SPINLOCK(pmc_mckX_lock);
+
+#define PMC_INDEX_MAX 25
+
+/*
+ * PLL clocks identifiers
+ * @PLL_ID_CPU: CPU PLL identifier
+ * @PLL_ID_SYS: System PLL identifier
+ * @PLL_ID_DDR: DDR PLL identifier
+ * @PLL_ID_GPU: Graphics subsystem PLL identifier
+ * @PLL_ID_BAUD: Baud PLL identifier
+ * @PLL_ID_AUDIO: Audio PLL identifier
+ * @PLL_ID_ETH: Ethernet PLL identifier
+ * @PLL_ID_LVDS: LVDS PLL identifier
+ * @PLL_ID_USB: USB PLL identifier
+ */
+enum pll_ids {
+ PLL_ID_CPU,
+ PLL_ID_SYS,
+ PLL_ID_DDR,
+ PLL_ID_GPU,
+ PLL_ID_BAUD,
+ PLL_ID_AUDIO,
+ PLL_ID_ETH,
+ PLL_ID_LVDS,
+ PLL_ID_USB,
+ PLL_ID_MAX
+};
+
+/*
+ * PLL component identifier
+ * @PLL_COMPID_FRAC: Fractional PLL component identifier
+ * @PLL_COMPID_DIV0: 1st PLL divider component identifier
+ * @PLL_COMPID_DIV1: 2nd PLL divider component identifier
+ */
+enum pll_component_id {
+ PLL_COMPID_FRAC,
+ PLL_COMPID_DIV0,
+ PLL_COMPID_DIV1,
+ PLL_COMPID_MAX
+};
+
+/*
+ * PLL type identifiers
+ * @PLL_TYPE_FRAC: fractional PLL identifier
+ * @PLL_TYPE_DIV: divider PLL identifier
+ */
+enum pll_type {
+ PLL_TYPE_FRAC,
+ PLL_TYPE_DIV
+};
+
+/* Layout for fractional PLLs. */
+static const struct clk_pll_layout pll_layout_frac = {
+ .mul_mask = GENMASK(31, 24),
+ .frac_mask = GENMASK(21, 0),
+ .mul_shift = 24,
+ .frac_shift = 0,
+};
+
+/* Layout for DIVPMC dividers. */
+static const struct clk_pll_layout pll_layout_divpmc = {
+ .div_mask = GENMASK(7, 0),
+ .endiv_mask = BIT(29),
+ .div_shift = 0,
+ .endiv_shift = 29,
+};
+
+/* Layout for DIVIO dividers. */
+static const struct clk_pll_layout pll_layout_divio = {
+ .div_mask = GENMASK(19, 12),
+ .endiv_mask = BIT(30),
+ .div_shift = 12,
+ .endiv_shift = 30,
+};
+
+/*
+ * CPU PLL output range.
+ * Notice: The upper limit has been setup to 1000000002 due to hardware
+ * block which cannot output exactly 1GHz.
+ */
+static const struct clk_range cpu_pll_outputs[] = {
+ { .min = 2343750, .max = 1000000002 },
+};
+
+/* PLL output range. */
+static const struct clk_range pll_outputs[] = {
+ { .min = 2343750, .max = 1200000000 },
+};
+
+/*
+ * Min: fCOREPLLCK = 600 MHz, PMC_PLL_CTRL0.DIVPMC = 255
+ * Max: fCOREPLLCK = 800 MHz, PMC_PLL_CTRL0.DIVPMC = 0
+ */
+static const struct clk_range lvdspll_outputs[] = {
+ { .min = 16406250, .max = 800000000 },
+};
+
+static const struct clk_range upll_outputs[] = {
+ { .min = 480000000, .max = 480000000 },
+};
+
+/* Fractional PLL core output range. */
+static const struct clk_range core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+static const struct clk_range lvdspll_core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+static const struct clk_range upll_core_outputs[] = {
+ { .min = 600000000, .max = 1200000000 },
+};
+
+/* CPU PLL characteristics. */
+static const struct clk_pll_characteristics cpu_pll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(cpu_pll_outputs),
+ .output = cpu_pll_outputs,
+ .core_output = core_outputs,
+};
+
+/* PLL characteristics. */
+static const struct clk_pll_characteristics pll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(pll_outputs),
+ .output = pll_outputs,
+ .core_output = core_outputs,
+};
+
+static const struct clk_pll_characteristics lvdspll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(lvdspll_outputs),
+ .output = lvdspll_outputs,
+ .core_output = lvdspll_core_outputs,
+};
+
+static const struct clk_pll_characteristics upll_characteristics = {
+ .input = { .min = 20000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(upll_outputs),
+ .output = upll_outputs,
+ .core_output = upll_core_outputs,
+ .upll = true,
+};
+
+/*
+ * SAMA7D65 PLL possible parents
+ * @SAMA7D65_PLL_PARENT_MAINCK: MAINCK is PLL a parent
+ * @SAMA7D65_PLL_PARENT_MAIN_XTAL: MAIN XTAL is a PLL parent
+ * @SAMA7D65_PLL_PARENT_FRACCK: Frac PLL is a PLL parent (for PLL dividers)
+ */
+enum sama7d65_pll_parent {
+ SAMA7D65_PLL_PARENT_MAINCK,
+ SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ SAMA7D65_PLL_PARENT_FRACCK
+};
+
+/*
+ * PLL clocks description
+ * @n: clock name
+ * @l: clock layout
+ * @c: clock characteristics
+ * @hw: pointer to clk_hw
+ * @t: clock type
+ * @f: clock flags
+ * @p: clock parent
+ * @eid: export index in sama7d65->chws[] array
+ * @safe_div: intermediate divider need to be set on PRE_RATE_CHANGE
+ * notification
+ */
+static struct sama7d65_pll {
+ const char *n;
+ const struct clk_pll_layout *l;
+ const struct clk_pll_characteristics *c;
+ struct clk_hw *hw;
+ unsigned long f;
+ enum sama7d65_pll_parent p;
+ u8 t;
+ u8 eid;
+ u8 safe_div;
+} sama7d65_plls[][PLL_COMPID_MAX] = {
+ [PLL_ID_CPU] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "cpupll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &cpu_pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds cpupll_divpmcck which feeds CPU. It should
+ * not be disabled.
+ */
+ .f = CLK_IS_CRITICAL,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "cpupll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &cpu_pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ /* This feeds CPU. It should not be disabled. */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .eid = PMC_CPUPLL,
+ /*
+ * Safe div=15 should be safe even for switching b/w 1GHz and
+ * 90MHz (frac pll might go up to 1.2GHz).
+ */
+ .safe_div = 15,
+ },
+ },
+
+ [PLL_ID_SYS] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "syspll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds syspll_divpmcck which may feed critical parts
+ * of the systems like timers. Therefore it should not be
+ * disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "syspll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ /*
+ * This may feed critical parts of the systems like timers.
+ * Therefore it should not be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ .eid = PMC_SYSPLL,
+ },
+ },
+
+ [PLL_ID_DDR] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "ddrpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ /*
+ * This feeds ddrpll_divpmcck which feeds DDR. It should not
+ * be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "ddrpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ /* This feeds DDR. It should not be disabled. */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ },
+ },
+
+ [PLL_ID_GPU] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "gpupll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "gpupll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ },
+ },
+
+ [PLL_ID_BAUD] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "baudpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAINCK,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "baudpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_BAUDPLL,
+ },
+ },
+
+ [PLL_ID_AUDIO] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "audiopll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "audiopll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_AUDIOPMCPLL,
+ },
+
+ [PLL_COMPID_DIV1] = {
+ .n = "audiopll_diviock",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divio,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_AUDIOIOPLL,
+ },
+ },
+
+ [PLL_ID_ETH] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "ethpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "ethpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_ETHPLL,
+ },
+ },
+
+ [PLL_ID_LVDS] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "lvdspll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &lvdspll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "lvdspll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &lvdspll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_LVDSPLL,
+ },
+ },
+
+ [PLL_ID_USB] = {
+ [PLL_COMPID_FRAC] = {
+ .n = "usbpll_fracck",
+ .p = SAMA7D65_PLL_PARENT_MAIN_XTAL,
+ .l = &pll_layout_frac,
+ .c = &upll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE,
+ },
+
+ [PLL_COMPID_DIV0] = {
+ .n = "usbpll_divpmcck",
+ .p = SAMA7D65_PLL_PARENT_FRACCK,
+ .l = &pll_layout_divpmc,
+ .c = &upll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_UTMI,
+ },
+ },
+};
+
+/* Used to create an array entry identifying a PLL by its components. */
+#define PLL_IDS_TO_ARR_ENTRY(_id, _comp) { PLL_ID_##_id, PLL_COMPID_##_comp}
+
+/*
+ * Master clock (MCK[0..9]) description
+ * @n: clock name
+ * @ep_chg_chg_id: index in parents array that specifies the changeable
+ * @ep: extra parents names array (entry formed by PLL components
+ * identifiers (see enum pll_component_id))
+ * @hw: pointer to clk_hw
+ * parent
+ * @ep_count: extra parents count
+ * @ep_mux_table: mux table for extra parents
+ * @id: clock id
+ * @eid: export index in sama7d65->chws[] array
+ * @c: true if clock is critical and cannot be disabled
+ */
+static struct {
+ const char *n;
+ struct {
+ int pll_id;
+ int pll_compid;
+ } ep[4];
+ struct clk_hw *hw;
+ int ep_chg_id;
+ u8 ep_count;
+ u8 ep_mux_table[4];
+ u8 id;
+ u8 eid;
+ u8 c;
+} sama7d65_mckx[] = {
+ { .n = "mck0", }, /* Dummy entry for MCK0 to store hw in probe. */
+ { .n = "mck1",
+ .id = 1,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_count = 1,
+ .ep_chg_id = INT_MIN,
+ .eid = PMC_MCK1,
+ .c = 1, },
+
+ { .n = "mck2",
+ .id = 2,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(DDR, DIV0), },
+ .ep_mux_table = { 5, 6, },
+ .ep_count = 2,
+ .ep_chg_id = INT_MIN,
+ .c = 1, },
+
+ { .n = "mck3",
+ .id = 3,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(DDR, DIV0), },
+ .ep_mux_table = { 5, 6, },
+ .ep_count = 2,
+ .ep_chg_id = INT_MIN,
+ .eid = PMC_MCK3,
+ .c = 1, },
+
+ { .n = "mck4",
+ .id = 4,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_count = 1,
+ .ep_chg_id = INT_MIN,
+ .c = 1, },
+
+ { .n = "mck5",
+ .id = 5,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_count = 1,
+ .ep_chg_id = INT_MIN,
+ .eid = PMC_MCK5,
+ .c = 1, },
+
+ { .n = "mck6",
+ .id = 6,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1,
+ .c = 1, },
+
+ { .n = "mck7",
+ .id = 7,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1, },
+
+ { .n = "mck8",
+ .id = 8,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1, },
+
+ { .n = "mck9",
+ .id = 9,
+ .ep = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .ep_mux_table = { 5, },
+ .ep_chg_id = INT_MIN,
+ .ep_count = 1, },
+};
+
+/*
+ * System clock description
+ * @n: clock name
+ * @p: clock parent name
+ * @id: clock id
+ */
+static const struct {
+ const char *n;
+ const char *p;
+ u8 id;
+} sama7d65_systemck[] = {
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "pck0", .p = "prog0", .id = 8, },
+ { .n = "pck1", .p = "prog1", .id = 9, },
+ { .n = "pck2", .p = "prog2", .id = 10, },
+ { .n = "pck3", .p = "prog3", .id = 11, },
+ { .n = "pck4", .p = "prog4", .id = 12, },
+ { .n = "pck5", .p = "prog5", .id = 13, },
+ { .n = "pck6", .p = "prog6", .id = 14, },
+ { .n = "pck7", .p = "prog7", .id = 15, },
+};
+
+/* Mux table for programmable clocks. */
+static u32 sama7d65_prog_mux_table[] = { 0, 1, 2, 5, 7, 8, 9, 10, 12 };
+
+/*
+ * Peripheral clock parent hw identifier (used to index in sama7d65_mckx[])
+ * @PCK_PARENT_HW_MCK0: pck parent hw identifier is MCK0
+ * @PCK_PARENT_HW_MCK1: pck parent hw identifier is MCK1
+ * @PCK_PARENT_HW_MCK2: pck parent hw identifier is MCK2
+ * @PCK_PARENT_HW_MCK3: pck parent hw identifier is MCK3
+ * @PCK_PARENT_HW_MCK4: pck parent hw identifier is MCK4
+ * @PCK_PARENT_HW_MCK5: pck parent hw identifier is MCK5
+ * @PCK_PARENT_HW_MCK6: pck parent hw identifier is MCK6
+ * @PCK_PARENT_HW_MCK7: pck parent hw identifier is MCK7
+ * @PCK_PARENT_HW_MCK8: pck parent hw identifier is MCK8
+ * @PCK_PARENT_HW_MCK9: pck parent hw identifier is MCK9
+ * @PCK_PARENT_HW_MAX: max identifier
+ */
+enum sama7d65_pck_parent_hw_id {
+ PCK_PARENT_HW_MCK0,
+ PCK_PARENT_HW_MCK1,
+ PCK_PARENT_HW_MCK2,
+ PCK_PARENT_HW_MCK3,
+ PCK_PARENT_HW_MCK4,
+ PCK_PARENT_HW_MCK5,
+ PCK_PARENT_HW_MCK6,
+ PCK_PARENT_HW_MCK7,
+ PCK_PARENT_HW_MCK8,
+ PCK_PARENT_HW_MCK9,
+ PCK_PARENT_HW_MAX
+};
+
+/*
+ * Peripheral clock description
+ * @n: clock name
+ * @p: clock parent hw id
+ * @r: clock range values
+ * @id: clock id
+ * @chgp: index in parent array of the changeable parent
+ */
+static struct {
+ const char *n;
+ enum sama7d65_pck_parent_hw_id p;
+ struct clk_range r;
+ u8 chgp;
+ u8 id;
+} sama7d65_periphck[] = {
+ { .n = "pioA_clk", .p = PCK_PARENT_HW_MCK0, .id = 10, },
+ { .n = "securam_clk", .p = PCK_PARENT_HW_MCK0, .id = 17, },
+ { .n = "sfr_clk", .p = PCK_PARENT_HW_MCK7, .id = 18, },
+ { .n = "hsmc_clk", .p = PCK_PARENT_HW_MCK5, .id = 20, },
+ { .n = "xdmac0_clk", .p = PCK_PARENT_HW_MCK6, .id = 21, },
+ { .n = "xdmac1_clk", .p = PCK_PARENT_HW_MCK6, .id = 22, },
+ { .n = "xdmac2_clk", .p = PCK_PARENT_HW_MCK1, .id = 23, },
+ { .n = "acc_clk", .p = PCK_PARENT_HW_MCK7, .id = 24, },
+ { .n = "aes_clk", .p = PCK_PARENT_HW_MCK6, .id = 26, },
+ { .n = "tzaesbasc_clk", .p = PCK_PARENT_HW_MCK8, .id = 27, },
+ { .n = "asrc_clk", .p = PCK_PARENT_HW_MCK9, .id = 29, .r = { .max = 200000000, }, },
+ { .n = "cpkcc_clk", .p = PCK_PARENT_HW_MCK0, .id = 30, },
+ { .n = "eic_clk", .p = PCK_PARENT_HW_MCK7, .id = 33, },
+ { .n = "flex0_clk", .p = PCK_PARENT_HW_MCK7, .id = 34, },
+ { .n = "flex1_clk", .p = PCK_PARENT_HW_MCK7, .id = 35, },
+ { .n = "flex2_clk", .p = PCK_PARENT_HW_MCK7, .id = 36, },
+ { .n = "flex3_clk", .p = PCK_PARENT_HW_MCK7, .id = 37, },
+ { .n = "flex4_clk", .p = PCK_PARENT_HW_MCK8, .id = 38, },
+ { .n = "flex5_clk", .p = PCK_PARENT_HW_MCK8, .id = 39, },
+ { .n = "flex6_clk", .p = PCK_PARENT_HW_MCK8, .id = 40, },
+ { .n = "flex7_clk", .p = PCK_PARENT_HW_MCK8, .id = 41, },
+ { .n = "flex8_clk", .p = PCK_PARENT_HW_MCK9, .id = 42, },
+ { .n = "flex9_clk", .p = PCK_PARENT_HW_MCK9, .id = 43, },
+ { .n = "flex10_clk", .p = PCK_PARENT_HW_MCK9, .id = 44, },
+ { .n = "gmac0_clk", .p = PCK_PARENT_HW_MCK6, .id = 46, },
+ { .n = "gmac1_clk", .p = PCK_PARENT_HW_MCK6, .id = 47, },
+ { .n = "gmac0_tsu_clk", .p = PCK_PARENT_HW_MCK1, .id = 49, },
+ { .n = "gmac1_tsu_clk", .p = PCK_PARENT_HW_MCK1, .id = 50, },
+ { .n = "icm_clk", .p = PCK_PARENT_HW_MCK5, .id = 53, },
+ { .n = "i2smcc0_clk", .p = PCK_PARENT_HW_MCK9, .id = 54, .r = { .max = 200000000, }, },
+ { .n = "i2smcc1_clk", .p = PCK_PARENT_HW_MCK9, .id = 55, .r = { .max = 200000000, }, },
+ { .n = "lcd_clk", .p = PCK_PARENT_HW_MCK3, .id = 56, },
+ { .n = "matrix_clk", .p = PCK_PARENT_HW_MCK5, .id = 57, },
+ { .n = "mcan0_clk", .p = PCK_PARENT_HW_MCK5, .id = 58, .r = { .max = 200000000, }, },
+ { .n = "mcan1_clk", .p = PCK_PARENT_HW_MCK5, .id = 59, .r = { .max = 200000000, }, },
+ { .n = "mcan2_clk", .p = PCK_PARENT_HW_MCK5, .id = 60, .r = { .max = 200000000, }, },
+ { .n = "mcan3_clk", .p = PCK_PARENT_HW_MCK5, .id = 61, .r = { .max = 200000000, }, },
+ { .n = "mcan4_clk", .p = PCK_PARENT_HW_MCK5, .id = 62, .r = { .max = 200000000, }, },
+ { .n = "pdmc0_clk", .p = PCK_PARENT_HW_MCK9, .id = 64, .r = { .max = 200000000, }, },
+ { .n = "pdmc1_clk", .p = PCK_PARENT_HW_MCK9, .id = 65, .r = { .max = 200000000, }, },
+ { .n = "pit64b0_clk", .p = PCK_PARENT_HW_MCK7, .id = 66, },
+ { .n = "pit64b1_clk", .p = PCK_PARENT_HW_MCK7, .id = 67, },
+ { .n = "pit64b2_clk", .p = PCK_PARENT_HW_MCK7, .id = 68, },
+ { .n = "pit64b3_clk", .p = PCK_PARENT_HW_MCK8, .id = 69, },
+ { .n = "pit64b4_clk", .p = PCK_PARENT_HW_MCK8, .id = 70, },
+ { .n = "pit64b5_clk", .p = PCK_PARENT_HW_MCK8, .id = 71, },
+ { .n = "pwm_clk", .p = PCK_PARENT_HW_MCK7, .id = 72, },
+ { .n = "qspi0_clk", .p = PCK_PARENT_HW_MCK5, .id = 73, },
+ { .n = "qspi1_clk", .p = PCK_PARENT_HW_MCK5, .id = 74, },
+ { .n = "sdmmc0_clk", .p = PCK_PARENT_HW_MCK1, .id = 75, },
+ { .n = "sdmmc1_clk", .p = PCK_PARENT_HW_MCK1, .id = 76, },
+ { .n = "sdmmc2_clk", .p = PCK_PARENT_HW_MCK1, .id = 77, },
+ { .n = "sha_clk", .p = PCK_PARENT_HW_MCK6, .id = 78, },
+ { .n = "spdifrx_clk", .p = PCK_PARENT_HW_MCK9, .id = 79, .r = { .max = 200000000, }, },
+ { .n = "spdiftx_clk", .p = PCK_PARENT_HW_MCK9, .id = 80, .r = { .max = 200000000, }, },
+ { .n = "ssc0_clk", .p = PCK_PARENT_HW_MCK7, .id = 81, .r = { .max = 200000000, }, },
+ { .n = "ssc1_clk", .p = PCK_PARENT_HW_MCK8, .id = 82, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch0_clk", .p = PCK_PARENT_HW_MCK8, .id = 83, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch1_clk", .p = PCK_PARENT_HW_MCK8, .id = 84, .r = { .max = 200000000, }, },
+ { .n = "tcb0_ch2_clk", .p = PCK_PARENT_HW_MCK8, .id = 85, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch0_clk", .p = PCK_PARENT_HW_MCK5, .id = 86, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch1_clk", .p = PCK_PARENT_HW_MCK5, .id = 87, .r = { .max = 200000000, }, },
+ { .n = "tcb1_ch2_clk", .p = PCK_PARENT_HW_MCK5, .id = 88, .r = { .max = 200000000, }, },
+ { .n = "tcpca_clk", .p = PCK_PARENT_HW_MCK5, .id = 89, },
+ { .n = "tcpcb_clk", .p = PCK_PARENT_HW_MCK5, .id = 90, },
+ { .n = "tdes_clk", .p = PCK_PARENT_HW_MCK6, .id = 91, },
+ { .n = "trng_clk", .p = PCK_PARENT_HW_MCK6, .id = 92, },
+ { .n = "udphsa_clk", .p = PCK_PARENT_HW_MCK5, .id = 99, },
+ { .n = "udphsb_clk", .p = PCK_PARENT_HW_MCK5, .id = 100, },
+ { .n = "uhphs_clk", .p = PCK_PARENT_HW_MCK5, .id = 101, },
+ { .n = "dsi_clk", .p = PCK_PARENT_HW_MCK3, .id = 103, },
+ { .n = "lvdsc_clk", .p = PCK_PARENT_HW_MCK3, .id = 104, },
+};
+
+/*
+ * Generic clock description
+ * @n: clock name
+ * @pp: PLL parents (entry formed by PLL components identifiers
+ * (see enum pll_component_id))
+ * @pp_mux_table: PLL parents mux table
+ * @r: clock output range
+ * @pp_chg_id: id in parent array of changeable PLL parent
+ * @pp_count: PLL parents count
+ * @id: clock id
+ */
+static const struct {
+ const char *n;
+ struct {
+ int pll_id;
+ int pll_compid;
+ } pp[8];
+ const char pp_mux_table[8];
+ struct clk_range r;
+ int pp_chg_id;
+ u8 pp_count;
+ u8 id;
+} sama7d65_gck[] = {
+ { .n = "adc_gclk",
+ .id = 25,
+ .r = { .max = 100000000, },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 8, 9, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "asrc_gclk",
+ .id = 29,
+ .r = { .max = 200000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex0_gclk",
+ .id = 34,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex1_gclk",
+ .id = 35,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex2_gclk",
+ .id = 36,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex3_gclk",
+ .id = 37,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = {8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex4_gclk",
+ .id = 38,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex5_gclk",
+ .id = 39,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex6_gclk",
+ .id = 40,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex7_gclk",
+ .id = 41,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex8_gclk",
+ .id = 42,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex9_gclk",
+ .id = 43,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "flex10_gclk",
+ .id = 44,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 8, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "gmac0_gclk",
+ .id = 46,
+ .r = { .max = 125000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 10, },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "gmac1_gclk",
+ .id = 47,
+ .r = { .max = 125000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 10, },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "gmac0_tsu_gclk",
+ .id = 49,
+ .r = { .max = 400000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {10, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "gmac1_tsu_gclk",
+ .id = 50,
+ .r = { .max = 400000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 10, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "i2smcc0_gclk",
+ .id = 54,
+ .r = { .max = 100000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "i2smcc1_gclk",
+ .id = 55,
+ .r = { .max = 100000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "lcdc_gclk",
+ .id = 56,
+ .r = { .max = 90000000 },
+ .pp_count = 0,
+ .pp_chg_id = INT_MIN,
+ },
+
+ { .n = "mcan0_gclk",
+ .id = 58,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan1_gclk",
+ .id = 59,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan2_gclk",
+ .id = 60,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan3_gclk",
+ .id = 61,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "mcan4_gclk",
+ .id = 62,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(USB, DIV0), },
+ .pp_mux_table = { 12 },
+ .pp_count = 1,
+ .pp_chg_id = 4, },
+
+ { .n = "pdmc0_gclk",
+ .id = 64,
+ .r = { .max = 80000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9 },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pdmc1_gclk",
+ .id = 65,
+ .r = { .max = 80000000, },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b0_gclk",
+ .id = 66,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b1_gclk",
+ .id = 67,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b2_gclk",
+ .id = 68,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b3_gclk",
+ .id = 69,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b4_gclk",
+ .id = 70,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "pit64b5_gclk",
+ .id = 71,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "qspi0_gclk",
+ .id = 73,
+ .r = { .max = 400000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "qspi1_gclk",
+ .id = 74,
+ .r = { .max = 266000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), },
+ .pp_mux_table = { 5, 8, },
+ .pp_count = 2,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "sdmmc0_gclk",
+ .id = 75,
+ .r = { .max = 208000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 10, },
+ .pp_count = 2,
+ .pp_chg_id = 4, },
+
+ { .n = "sdmmc1_gclk",
+ .id = 76,
+ .r = { .max = 208000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 10, },
+ .pp_count = 2,
+ .pp_chg_id = 4, },
+
+ { .n = "sdmmc2_gclk",
+ .id = 77,
+ .r = { .max = 208000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 10 },
+ .pp_count = 2,
+ .pp_chg_id = 4, },
+
+ { .n = "spdifrx_gclk",
+ .id = 79,
+ .r = { .max = 150000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "spdiftx_gclk",
+ .id = 80,
+ .r = { .max = 25000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0), },
+ .pp_mux_table = { 9, },
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "tcb0_ch0_gclk",
+ .id = 83,
+ .r = { .max = 34000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "tcb1_ch0_gclk",
+ .id = 86,
+ .r = { .max = 67000000 },
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = { 8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "DSI_gclk",
+ .id = 103,
+ .r = {.max = 27000000},
+ .pp = { PLL_IDS_TO_ARR_ENTRY(SYS, DIV0), },
+ .pp_mux_table = {5},
+ .pp_count = 1,
+ .pp_chg_id = INT_MIN, },
+
+ { .n = "I3CC_gclk",
+ .id = 105,
+ .r = {.max = 125000000},
+ .pp = { PLL_IDS_TO_ARR_ENTRY(BAUD, DIV0), PLL_IDS_TO_ARR_ENTRY(AUDIO, DIV0),
+ PLL_IDS_TO_ARR_ENTRY(ETH, DIV0), },
+ .pp_mux_table = {8, 9, 10, },
+ .pp_count = 3,
+ .pp_chg_id = INT_MIN, },
+};
+
+/* MCK0 characteristics. */
+static const struct clk_master_characteristics mck0_characteristics = {
+ .output = { .min = 32768, .max = 200000000 },
+ .divisors = { 1, 2, 4, 3, 5 },
+ .have_div3_pres = 1,
+};
+
+/* MCK0 layout. */
+static const struct clk_master_layout mck0_layout = {
+ .mask = 0x773,
+ .pres_shift = 4,
+ .offset = 0x28,
+};
+
+/* Programmable clock layout. */
+static const struct clk_programmable_layout programmable_layout = {
+ .pres_mask = 0xff,
+ .pres_shift = 8,
+ .css_mask = 0x1f,
+ .have_slck_mck = 0,
+ .is_pres_direct = 1,
+};
+
+/* Peripheral clock layout. */
+static const struct clk_pcr_layout sama7d65_pcr_layout = {
+ .offset = 0x88,
+ .cmd = BIT(31),
+ .gckcss_mask = GENMASK(12, 8),
+ .pid_mask = GENMASK(6, 0),
+};
+
+static void __init sama7d65_pmc_setup(struct device_node *np)
+{
+ const char *main_xtal_name = "main_xtal";
+ struct pmc_data *sama7d65_pmc;
+ const char *parent_names[11];
+ void **alloc_mem = NULL;
+ int alloc_mem_size = 0;
+ struct regmap *regmap;
+ struct clk_hw *hw, *main_rc_hw, *main_osc_hw, *main_xtal_hw;
+ struct clk_hw *td_slck_hw, *md_slck_hw;
+ static struct clk_parent_data parent_data;
+ struct clk_hw *parent_hws[10];
+ bool bypass;
+ int i, j;
+
+ td_slck_hw = __clk_get_hw(of_clk_get_by_name(np, "td_slck"));
+ md_slck_hw = __clk_get_hw(of_clk_get_by_name(np, "md_slck"));
+ main_xtal_hw = __clk_get_hw(of_clk_get_by_name(np, main_xtal_name));
+
+ if (!td_slck_hw || !md_slck_hw || !main_xtal_hw)
+ return;
+
+ regmap = device_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ sama7d65_pmc = pmc_data_allocate(PMC_INDEX_MAX,
+ nck(sama7d65_systemck),
+ nck(sama7d65_periphck),
+ nck(sama7d65_gck), 8);
+ if (!sama7d65_pmc)
+ return;
+
+ alloc_mem = kmalloc(sizeof(void *) *
+ (ARRAY_SIZE(sama7d65_mckx) + ARRAY_SIZE(sama7d65_gck)),
+ GFP_KERNEL);
+ if (!alloc_mem)
+ goto err_free;
+
+ main_rc_hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000,
+ 50000000);
+ if (IS_ERR(main_rc_hw))
+ goto err_free;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ parent_data.name = main_xtal_name;
+ parent_data.fw_name = main_xtal_name;
+ main_osc_hw = at91_clk_register_main_osc(regmap, "main_osc", NULL,
+ &parent_data, bypass);
+ if (IS_ERR(main_osc_hw))
+ goto err_free;
+
+ parent_hws[0] = main_rc_hw;
+ parent_hws[1] = main_osc_hw;
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", NULL, parent_hws, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->chws[PMC_MAIN] = hw;
+
+ for (i = 0; i < PLL_ID_MAX; i++) {
+ for (j = 0; j < PLL_COMPID_MAX; j++) {
+ struct clk_hw *parent_hw;
+
+ if (!sama7d65_plls[i][j].n)
+ continue;
+
+ switch (sama7d65_plls[i][j].t) {
+ case PLL_TYPE_FRAC:
+ switch (sama7d65_plls[i][j].p) {
+ case SAMA7D65_PLL_PARENT_MAINCK:
+ parent_hw = sama7d65_pmc->chws[PMC_MAIN];
+ break;
+ case SAMA7D65_PLL_PARENT_MAIN_XTAL:
+ parent_hw = main_xtal_hw;
+ break;
+ default:
+ /* Should not happen. */
+ parent_hw = NULL;
+ break;
+ }
+
+ hw = sam9x60_clk_register_frac_pll(regmap,
+ &pmc_pll_lock, sama7d65_plls[i][j].n,
+ NULL, parent_hw, i,
+ sama7d65_plls[i][j].c,
+ sama7d65_plls[i][j].l,
+ sama7d65_plls[i][j].f);
+ break;
+
+ case PLL_TYPE_DIV:
+ hw = sam9x60_clk_register_div_pll(regmap,
+ &pmc_pll_lock, sama7d65_plls[i][j].n,
+ NULL, sama7d65_plls[i][0].hw, i,
+ sama7d65_plls[i][j].c,
+ sama7d65_plls[i][j].l,
+ sama7d65_plls[i][j].f,
+ sama7d65_plls[i][j].safe_div);
+ break;
+
+ default:
+ continue;
+ }
+
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_plls[i][j].hw = hw;
+ if (sama7d65_plls[i][j].eid)
+ sama7d65_pmc->chws[sama7d65_plls[i][j].eid] = hw;
+ }
+ }
+
+ hw = at91_clk_register_master_div(regmap, "mck0", NULL,
+ sama7d65_plls[PLL_ID_CPU][1].hw,
+ &mck0_layout, &mck0_characteristics,
+ &pmc_mck0_lock, CLK_GET_RATE_NOCACHE, 5);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->chws[PMC_MCK] = hw;
+ sama7d65_mckx[PCK_PARENT_HW_MCK0].hw = hw;
+
+ parent_hws[0] = md_slck_hw;
+ parent_hws[1] = td_slck_hw;
+ parent_hws[2] = sama7d65_pmc->chws[PMC_MAIN];
+ for (i = PCK_PARENT_HW_MCK1; i < ARRAY_SIZE(sama7d65_mckx); i++) {
+ u8 num_parents = 3 + sama7d65_mckx[i].ep_count;
+ struct clk_hw *tmp_parent_hws[8];
+ u32 *mux_table;
+
+ mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
+ GFP_KERNEL);
+ if (!mux_table)
+ goto err_free;
+
+ alloc_mem[alloc_mem_size++] = mux_table;
+
+ PMC_INIT_TABLE(mux_table, 3);
+ PMC_FILL_TABLE(&mux_table[3], sama7d65_mckx[i].ep_mux_table,
+ sama7d65_mckx[i].ep_count);
+ for (j = 0; j < sama7d65_mckx[i].ep_count; j++) {
+ u8 pll_id = sama7d65_mckx[i].ep[j].pll_id;
+ u8 pll_compid = sama7d65_mckx[i].ep[j].pll_compid;
+
+ tmp_parent_hws[j] = sama7d65_plls[pll_id][pll_compid].hw;
+ }
+ PMC_FILL_TABLE(&parent_hws[3], tmp_parent_hws,
+ sama7d65_mckx[i].ep_count);
+
+ hw = at91_clk_sama7g5_register_master(regmap, sama7d65_mckx[i].n,
+ num_parents, NULL, parent_hws,
+ mux_table, &pmc_mckX_lock,
+ sama7d65_mckx[i].id,
+ sama7d65_mckx[i].c,
+ sama7d65_mckx[i].ep_chg_id);
+
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_mckx[i].hw = hw;
+ if (sama7d65_mckx[i].eid)
+ sama7d65_pmc->chws[sama7d65_mckx[i].eid] = hw;
+ }
+
+ parent_names[0] = "syspll_divpmcck";
+ parent_names[1] = "usbpll_divpmcck";
+ parent_names[2] = "main_osc";
+ hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 3);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_hws[0] = md_slck_hw;
+ parent_hws[1] = td_slck_hw;
+ parent_hws[2] = sama7d65_pmc->chws[PMC_MAIN];
+ parent_hws[3] = sama7d65_plls[PLL_ID_SYS][PLL_COMPID_DIV0].hw;
+ parent_hws[4] = sama7d65_plls[PLL_ID_DDR][PLL_COMPID_DIV0].hw;
+ parent_hws[5] = sama7d65_plls[PLL_ID_GPU][PLL_COMPID_DIV0].hw;
+ parent_hws[6] = sama7d65_plls[PLL_ID_BAUD][PLL_COMPID_DIV0].hw;
+ parent_hws[7] = sama7d65_plls[PLL_ID_AUDIO][PLL_COMPID_DIV0].hw;
+ parent_hws[8] = sama7d65_plls[PLL_ID_ETH][PLL_COMPID_DIV0].hw;
+
+ for (i = 0; i < 8; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name, NULL, parent_hws,
+ 9, i,
+ &programmable_layout,
+ sama7d65_prog_mux_table);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->pchws[i] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama7d65_systemck); i++) {
+ hw = at91_clk_register_system(regmap, sama7d65_systemck[i].n,
+ sama7d65_systemck[i].p, NULL,
+ sama7d65_systemck[i].id, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->shws[sama7d65_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sama7d65_periphck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sama7d65_pcr_layout,
+ sama7d65_periphck[i].n,
+ NULL,
+ sama7d65_mckx[sama7d65_periphck[i].p].hw,
+ sama7d65_periphck[i].id,
+ &sama7d65_periphck[i].r,
+ sama7d65_periphck[i].chgp ? 0 :
+ INT_MIN, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->phws[sama7d65_periphck[i].id] = hw;
+ }
+
+ parent_hws[0] = md_slck_hw;
+ parent_hws[1] = td_slck_hw;
+ parent_hws[2] = sama7d65_pmc->chws[PMC_MAIN];
+ parent_hws[3] = sama7d65_pmc->chws[PMC_MCK1];
+ for (i = 0; i < ARRAY_SIZE(sama7d65_gck); i++) {
+ u8 num_parents = 4 + sama7d65_gck[i].pp_count;
+ struct clk_hw *tmp_parent_hws[8];
+ u32 *mux_table;
+
+ mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
+ GFP_KERNEL);
+ if (!mux_table)
+ goto err_free;
+
+ alloc_mem[alloc_mem_size++] = mux_table;
+
+ PMC_INIT_TABLE(mux_table, 4);
+ PMC_FILL_TABLE(&mux_table[4], sama7d65_gck[i].pp_mux_table,
+ sama7d65_gck[i].pp_count);
+ for (j = 0; j < sama7d65_gck[i].pp_count; j++) {
+ u8 pll_id = sama7d65_gck[i].pp[j].pll_id;
+ u8 pll_compid = sama7d65_gck[i].pp[j].pll_compid;
+
+ tmp_parent_hws[j] = sama7d65_plls[pll_id][pll_compid].hw;
+ }
+ PMC_FILL_TABLE(&parent_hws[4], tmp_parent_hws,
+ sama7d65_gck[i].pp_count);
+
+ hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
+ &sama7d65_pcr_layout,
+ sama7d65_gck[i].n, NULL,
+ parent_hws, mux_table,
+ num_parents,
+ sama7d65_gck[i].id,
+ &sama7d65_gck[i].r,
+ sama7d65_gck[i].pp_chg_id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7d65_pmc->ghws[sama7d65_gck[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sama7d65_pmc);
+ kfree(alloc_mem);
+
+ return;
+
+err_free:
+ if (alloc_mem) {
+ for (i = 0; i < alloc_mem_size; i++)
+ kfree(alloc_mem[i]);
+ kfree(alloc_mem);
+ }
+
+ kfree(sama7d65_pmc);
+}
+
+/* Some clks are used for a clocksource */
+CLK_OF_DECLARE(sama7d65_pmc, "microchip,sama7d65-pmc", sama7d65_pmc_setup);
diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c
index 7741d8f3dbee..021d1b412af4 100644
--- a/drivers/clk/at91/sckc.c
+++ b/drivers/clk/at91/sckc.c
@@ -12,6 +12,8 @@
#include <linux/of_address.h>
#include <linux/io.h>
+#include <dt-bindings/clock/at91.h>
+
#define SLOW_CLOCK_FREQ 32768
#define SLOWCK_SW_CYCLES 5
#define SLOWCK_SW_TIME_USEC ((SLOWCK_SW_CYCLES * USEC_PER_SEC) / \
@@ -470,7 +472,7 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
{
void __iomem *regbase = of_iomap(np, 0);
struct clk_hw_onecell_data *clk_data;
- struct clk_hw *slow_rc, *slow_osc;
+ struct clk_hw *slow_rc, *slow_osc, *hw;
const char *xtal_name;
const struct clk_hw *parent_hws[2];
static struct clk_parent_data parent_data = {
@@ -506,19 +508,19 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
/* MD_SLCK and TD_SLCK. */
clk_data->num = 2;
- clk_data->hws[0] = clk_hw_register_fixed_rate_parent_hw(NULL, "md_slck",
- slow_rc,
- 0, 32768);
- if (IS_ERR(clk_data->hws[0]))
+ hw = clk_hw_register_fixed_rate_parent_hw(NULL, "md_slck", slow_rc,
+ 0, 32768);
+ if (IS_ERR(hw))
goto clk_data_free;
+ clk_data->hws[SCKC_MD_SLCK] = hw;
parent_hws[0] = slow_rc;
parent_hws[1] = slow_osc;
- clk_data->hws[1] = at91_clk_register_sam9x5_slow(regbase, "td_slck",
- parent_hws, 2,
- &at91sam9x60_bits);
- if (IS_ERR(clk_data->hws[1]))
+ hw = at91_clk_register_sam9x5_slow(regbase, "td_slck", parent_hws,
+ 2, &at91sam9x60_bits);
+ if (IS_ERR(hw))
goto unregister_md_slck;
+ clk_data->hws[SCKC_TD_SLCK] = hw;
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
if (WARN_ON(ret))
@@ -527,9 +529,9 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
return;
unregister_td_slck:
- at91_clk_unregister_sam9x5_slow(clk_data->hws[1]);
+ at91_clk_unregister_sam9x5_slow(clk_data->hws[SCKC_TD_SLCK]);
unregister_md_slck:
- clk_hw_unregister(clk_data->hws[0]);
+ clk_hw_unregister(clk_data->hws[SCKC_MD_SLCK]);
clk_data_free:
kfree(clk_data);
unregister_slow_osc:
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index ec5749e301ba..2b0ea882f1e4 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/clk-provider.h>
+#include <linux/string_choices.h>
/*
* "Policies" affect the frequencies of bus clocks provided by a
@@ -502,7 +503,7 @@ static int clk_gate(struct ccu_data *ccu, const char *name,
return 0;
pr_err("%s: failed to %s gate for %s\n", __func__,
- enable ? "enable" : "disable", name);
+ str_enable_disable(enable), name);
return -EIO;
}
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index a18a8768feb4..0e1fe3759530 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -34,6 +34,7 @@ static char *rpi_firmware_clk_names[] = {
[RPI_FIRMWARE_M2MC_CLK_ID] = "m2mc",
[RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = "pixel-bvb",
[RPI_FIRMWARE_VEC_CLK_ID] = "vec",
+ [RPI_FIRMWARE_DISP_CLK_ID] = "disp",
};
#define RPI_FIRMWARE_STATE_ENABLE_BIT BIT(0)
@@ -56,6 +57,12 @@ struct raspberrypi_clk_data {
struct raspberrypi_clk *rpi;
};
+static inline
+const struct raspberrypi_clk_data *clk_hw_to_data(const struct clk_hw *hw)
+{
+ return container_of(hw, struct raspberrypi_clk_data, hw);
+}
+
struct raspberrypi_clk_variant {
bool export;
char *clkdev;
@@ -111,18 +118,31 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
},
[RPI_FIRMWARE_V3D_CLK_ID] = {
.export = true,
+ .minimize = true,
},
[RPI_FIRMWARE_PIXEL_CLK_ID] = {
.export = true,
+ .minimize = true,
},
[RPI_FIRMWARE_HEVC_CLK_ID] = {
.export = true,
+ .minimize = true,
+ },
+ [RPI_FIRMWARE_ISP_CLK_ID] = {
+ .export = true,
+ .minimize = true,
},
[RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = {
.export = true,
+ .minimize = true,
},
[RPI_FIRMWARE_VEC_CLK_ID] = {
.export = true,
+ .minimize = true,
+ },
+ [RPI_FIRMWARE_DISP_CLK_ID] = {
+ .export = true,
+ .minimize = true,
},
};
@@ -153,7 +173,6 @@ static int raspberrypi_clock_property(struct rpi_firmware *firmware,
struct raspberrypi_firmware_prop msg = {
.id = cpu_to_le32(data->id),
.val = cpu_to_le32(*val),
- .disable_turbo = cpu_to_le32(1),
};
int ret;
@@ -168,8 +187,7 @@ static int raspberrypi_clock_property(struct rpi_firmware *firmware,
static int raspberrypi_fw_is_prepared(struct clk_hw *hw)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk *rpi = data->rpi;
u32 val = 0;
int ret;
@@ -186,8 +204,7 @@ static int raspberrypi_fw_is_prepared(struct clk_hw *hw)
static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk *rpi = data->rpi;
u32 val = 0;
int ret;
@@ -203,8 +220,7 @@ static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk *rpi = data->rpi;
u32 _rate = rate;
int ret;
@@ -221,8 +237,7 @@ static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate,
static int raspberrypi_fw_dumb_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct raspberrypi_clk_data *data =
- container_of(hw, struct raspberrypi_clk_data, hw);
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
struct raspberrypi_clk_variant *variant = data->variant;
/*
diff --git a/drivers/clk/clk-apple-nco.c b/drivers/clk/clk-apple-nco.c
index 39472a51530a..457a48d48941 100644
--- a/drivers/clk/clk-apple-nco.c
+++ b/drivers/clk/clk-apple-nco.c
@@ -297,6 +297,9 @@ static int applnco_probe(struct platform_device *pdev)
memset(&init, 0, sizeof(init));
init.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"%s-%d", np->name, i);
+ if (!init.name)
+ return -ENOMEM;
+
init.ops = &applnco_ops;
init.parent_data = &pdata;
init.num_parents = 1;
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index bf4d8ddc93ae..934e53a96ddd 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -7,6 +7,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/slab.h>
#include <linux/io.h>
@@ -512,6 +513,7 @@ static int axi_clkgen_probe(struct platform_device *pdev)
struct clk_init_data init;
const char *parent_names[2];
const char *clk_name;
+ struct clk *axi_clk;
unsigned int i;
int ret;
@@ -528,8 +530,24 @@ static int axi_clkgen_probe(struct platform_device *pdev)
return PTR_ERR(axi_clkgen->base);
init.num_parents = of_clk_get_parent_count(pdev->dev.of_node);
- if (init.num_parents < 1 || init.num_parents > 2)
- return -EINVAL;
+
+ axi_clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
+ if (!IS_ERR(axi_clk)) {
+ if (init.num_parents < 2 || init.num_parents > 3)
+ return -EINVAL;
+
+ init.num_parents -= 1;
+ } else {
+ /*
+ * Legacy... So that old DTs which do not have clock-names still
+ * work. In this case we don't explicitly enable the AXI bus
+ * clock.
+ */
+ if (PTR_ERR(axi_clk) != -ENOENT)
+ return PTR_ERR(axi_clk);
+ if (init.num_parents < 1 || init.num_parents > 2)
+ return -EINVAL;
+ }
for (i = 0; i < init.num_parents; i++) {
parent_names[i] = of_clk_get_parent_name(pdev->dev.of_node, i);
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
index dd3d42d9ad86..d0705bb03a2a 100644
--- a/drivers/clk/clk-cdce706.c
+++ b/drivers/clk/clk-cdce706.c
@@ -678,7 +678,7 @@ MODULE_DEVICE_TABLE(of, cdce706_dt_match);
#endif
static const struct i2c_device_id cdce706_id[] = {
- { "cdce706", 0 },
+ { "cdce706" },
{ }
};
MODULE_DEVICE_TABLE(i2c, cdce706_id);
diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
index e48be7a6c0e2..c51818c1af98 100644
--- a/drivers/clk/clk-cdce925.c
+++ b/drivers/clk/clk-cdce925.c
@@ -601,7 +601,7 @@ static int cdce925_regulator_enable(struct device *dev, const char *name)
/* The CDCE925 uses a funky way to read/write registers. Bulk mode is
* just weird, so just use the single byte mode exclusively. */
-static struct regmap_bus regmap_cdce925_bus = {
+static const struct regmap_bus regmap_cdce925_bus = {
.write = cdce925_regmap_i2c_write,
.read = cdce925_regmap_i2c_read,
};
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index 82ae1f26e634..5368d92d9b39 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -218,8 +218,8 @@ static void devm_clk_bulk_release_all_enable(struct device *dev, void *res)
clk_bulk_put_all(devres->num_clks, devres->clks);
}
-int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
- struct clk_bulk_data **clks)
+int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
+ struct clk_bulk_data **clks)
{
struct clk_bulk_devres *devres;
int ret;
@@ -244,11 +244,12 @@ int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
} else {
clk_bulk_put_all(devres->num_clks, devres->clks);
devres_free(devres);
+ return ret;
}
- return ret;
+ return devres->num_clks;
}
-EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enable);
+EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enabled);
static int devm_clk_match(struct device *dev, void *res, void *data)
{
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index a2c2b5203b0a..c1f426b8a504 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -72,6 +72,8 @@ static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width,
return clk_div_mask(width);
if (flags & CLK_DIVIDER_POWER_OF_TWO)
return 1 << clk_div_mask(width);
+ if (flags & CLK_DIVIDER_EVEN_INTEGERS)
+ return 2 * (clk_div_mask(width) + 1);
if (table)
return _get_table_maxdiv(table, width);
return clk_div_mask(width) + 1;
@@ -97,6 +99,8 @@ static unsigned int _get_div(const struct clk_div_table *table,
return 1 << val;
if (flags & CLK_DIVIDER_MAX_AT_ZERO)
return val ? val : clk_div_mask(width) + 1;
+ if (flags & CLK_DIVIDER_EVEN_INTEGERS)
+ return 2 * (val + 1);
if (table)
return _get_table_div(table, val);
return val + 1;
@@ -122,6 +126,8 @@ static unsigned int _get_val(const struct clk_div_table *table,
return __ffs(div);
if (flags & CLK_DIVIDER_MAX_AT_ZERO)
return (div == clk_div_mask(width) + 1) ? 0 : div;
+ if (flags & CLK_DIVIDER_EVEN_INTEGERS)
+ return (div >> 1) - 1;
if (table)
return _get_table_val(table, div);
return div - 1;
@@ -538,7 +544,8 @@ struct clk_hw *__clk_hw_register_divider(struct device *dev,
struct device_node *np, const char *name,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+ void __iomem *reg, u8 shift, u8 width,
+ unsigned long clk_divider_flags,
const struct clk_div_table *table, spinlock_t *lock)
{
struct clk_divider *div;
@@ -610,8 +617,8 @@ EXPORT_SYMBOL_GPL(__clk_hw_register_divider);
struct clk *clk_register_divider_table(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, const struct clk_div_table *table,
- spinlock_t *lock)
+ unsigned long clk_divider_flags,
+ const struct clk_div_table *table, spinlock_t *lock)
{
struct clk_hw *hw;
@@ -664,7 +671,8 @@ struct clk_hw *__devm_clk_hw_register_divider(struct device *dev,
struct device_node *np, const char *name,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+ void __iomem *reg, u8 shift, u8 width,
+ unsigned long clk_divider_flags,
const struct clk_div_table *table, spinlock_t *lock)
{
struct clk_hw **ptr, *hw;
diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c
index 22fbea61c3dc..15bbdeb60b8e 100644
--- a/drivers/clk/clk-en7523.c
+++ b/drivers/clk/clk-en7523.c
@@ -3,8 +3,10 @@
#include <linux/delay.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/regmap.h>
#include <linux/reset-controller.h>
#include <dt-bindings/clock/en7523-clk.h>
#include <dt-bindings/reset/airoha,en7581-reset.h>
@@ -31,19 +33,14 @@
#define REG_RESET_CONTROL_PCIE1 BIT(27)
#define REG_RESET_CONTROL_PCIE2 BIT(26)
/* EN7581 */
-#define REG_PCIE0_MEM 0x00
-#define REG_PCIE0_MEM_MASK 0x04
-#define REG_PCIE1_MEM 0x08
-#define REG_PCIE1_MEM_MASK 0x0c
-#define REG_PCIE2_MEM 0x10
-#define REG_PCIE2_MEM_MASK 0x14
#define REG_NP_SCU_PCIC 0x88
#define REG_NP_SCU_SSTR 0x9c
#define REG_PCIE_XSI0_SEL_MASK GENMASK(14, 13)
#define REG_PCIE_XSI1_SEL_MASK GENMASK(12, 11)
+#define REG_CRYPTO_CLKSRC2 0x20c
-#define REG_RST_CTRL2 0x00
-#define REG_RST_CTRL1 0x04
+#define REG_RST_CTRL2 0x830
+#define REG_RST_CTRL1 0x834
struct en_clk_desc {
int id;
@@ -78,13 +75,10 @@ struct en_rst_data {
};
struct en_clk_soc_data {
+ u32 num_clocks;
const struct clk_ops pcie_ops;
- struct {
- const u16 *bank_ofs;
- const u16 *idx_map;
- u16 idx_map_nr;
- } reset;
- int (*hw_init)(struct platform_device *pdev, void __iomem *np_base);
+ int (*hw_init)(struct platform_device *pdev,
+ struct clk_hw_onecell_data *clk_data);
};
static const u32 gsw_base[] = { 400000000, 500000000 };
@@ -92,6 +86,12 @@ static const u32 emi_base[] = { 333000000, 400000000 };
static const u32 bus_base[] = { 500000000, 540000000 };
static const u32 slic_base[] = { 100000000, 3125000 };
static const u32 npu_base[] = { 333000000, 400000000, 500000000 };
+/* EN7581 */
+static const u32 emi7581_base[] = { 540000000, 480000000, 400000000, 300000000 };
+static const u32 bus7581_base[] = { 600000000, 540000000 };
+static const u32 npu7581_base[] = { 800000000, 750000000, 720000000, 600000000 };
+static const u32 crypto_base[] = { 540000000, 480000000 };
+static const u32 emmc7581_base[] = { 200000000, 150000000 };
static const struct en_clk_desc en7523_base_clks[] = {
{
@@ -189,6 +189,111 @@ static const struct en_clk_desc en7523_base_clks[] = {
}
};
+static const struct en_clk_desc en7581_base_clks[] = {
+ {
+ .id = EN7523_CLK_GSW,
+ .name = "gsw",
+
+ .base_reg = REG_GSW_CLK_DIV_SEL,
+ .base_bits = 1,
+ .base_shift = 8,
+ .base_values = gsw_base,
+ .n_base_values = ARRAY_SIZE(gsw_base),
+
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
+ .div_offset = 1,
+ }, {
+ .id = EN7523_CLK_EMI,
+ .name = "emi",
+
+ .base_reg = REG_EMI_CLK_DIV_SEL,
+ .base_bits = 2,
+ .base_shift = 8,
+ .base_values = emi7581_base,
+ .n_base_values = ARRAY_SIZE(emi7581_base),
+
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
+ .div_offset = 1,
+ }, {
+ .id = EN7523_CLK_BUS,
+ .name = "bus",
+
+ .base_reg = REG_BUS_CLK_DIV_SEL,
+ .base_bits = 1,
+ .base_shift = 8,
+ .base_values = bus7581_base,
+ .n_base_values = ARRAY_SIZE(bus7581_base),
+
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
+ .div_offset = 1,
+ }, {
+ .id = EN7523_CLK_SLIC,
+ .name = "slic",
+
+ .base_reg = REG_SPI_CLK_FREQ_SEL,
+ .base_bits = 1,
+ .base_shift = 0,
+ .base_values = slic_base,
+ .n_base_values = ARRAY_SIZE(slic_base),
+
+ .div_reg = REG_SPI_CLK_DIV_SEL,
+ .div_bits = 5,
+ .div_shift = 24,
+ .div_val0 = 20,
+ .div_step = 2,
+ }, {
+ .id = EN7523_CLK_SPI,
+ .name = "spi",
+
+ .base_reg = REG_SPI_CLK_DIV_SEL,
+
+ .base_value = 400000000,
+
+ .div_bits = 5,
+ .div_shift = 8,
+ .div_val0 = 40,
+ .div_step = 2,
+ }, {
+ .id = EN7523_CLK_NPU,
+ .name = "npu",
+
+ .base_reg = REG_NPU_CLK_DIV_SEL,
+ .base_bits = 2,
+ .base_shift = 8,
+ .base_values = npu7581_base,
+ .n_base_values = ARRAY_SIZE(npu7581_base),
+
+ .div_bits = 3,
+ .div_shift = 0,
+ .div_step = 1,
+ .div_offset = 1,
+ }, {
+ .id = EN7523_CLK_CRYPTO,
+ .name = "crypto",
+
+ .base_reg = REG_CRYPTO_CLKSRC2,
+ .base_bits = 1,
+ .base_shift = 0,
+ .base_values = crypto_base,
+ .n_base_values = ARRAY_SIZE(crypto_base),
+ }, {
+ .id = EN7581_CLK_EMMC,
+ .name = "emmc",
+
+ .base_reg = REG_CRYPTO_CLKSRC2,
+ .base_bits = 1,
+ .base_shift = 12,
+ .base_values = emmc7581_base,
+ .n_base_values = ARRAY_SIZE(emmc7581_base),
+ }
+};
+
static const u16 en7581_rst_ofs[] = {
REG_RST_CTRL2,
REG_RST_CTRL1,
@@ -252,15 +357,11 @@ static const u16 en7581_rst_map[] = {
[EN7581_XPON_MAC_RST] = RST_NR_PER_BANK + 31,
};
-static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i)
+static u32 en7523_get_base_rate(const struct en_clk_desc *desc, u32 val)
{
- const struct en_clk_desc *desc = &en7523_base_clks[i];
- u32 val;
-
if (!desc->base_bits)
return desc->base_value;
- val = readl(base + desc->base_reg);
val >>= desc->base_shift;
val &= (1 << desc->base_bits) - 1;
@@ -270,16 +371,11 @@ static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i)
return desc->base_values[val];
}
-static u32 en7523_get_div(void __iomem *base, int i)
+static u32 en7523_get_div(const struct en_clk_desc *desc, u32 val)
{
- const struct en_clk_desc *desc = &en7523_base_clks[i];
- u32 reg, val;
-
if (!desc->div_bits)
return 1;
- reg = desc->div_reg ? desc->div_reg : desc->base_reg;
- val = readl(base + reg);
val >>= desc->div_shift;
val &= (1 << desc->div_bits) - 1;
@@ -393,7 +489,6 @@ static int en7581_pci_enable(struct clk_hw *hw)
REG_PCI_CONTROL_PERSTOUT;
val = readl(np_base + REG_PCI_CONTROL);
writel(val | mask, np_base + REG_PCI_CONTROL);
- msleep(250);
return 0;
}
@@ -412,44 +507,81 @@ static void en7581_pci_disable(struct clk_hw *hw)
usleep_range(1000, 2000);
}
-static int en7581_clk_hw_init(struct platform_device *pdev,
- void __iomem *np_base)
+static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
+ void __iomem *base, void __iomem *np_base)
{
- void __iomem *pb_base;
- u32 val;
+ struct clk_hw *hw;
+ u32 rate;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
+ const struct en_clk_desc *desc = &en7523_base_clks[i];
+ u32 reg = desc->div_reg ? desc->div_reg : desc->base_reg;
+ u32 val = readl(base + desc->base_reg);
- pb_base = devm_platform_ioremap_resource(pdev, 3);
- if (IS_ERR(pb_base))
- return PTR_ERR(pb_base);
+ rate = en7523_get_base_rate(desc, val);
+ val = readl(base + reg);
+ rate /= en7523_get_div(desc, val);
- val = readl(np_base + REG_NP_SCU_SSTR);
- val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK);
- writel(val, np_base + REG_NP_SCU_SSTR);
- val = readl(np_base + REG_NP_SCU_PCIC);
- writel(val | 3, np_base + REG_NP_SCU_PCIC);
+ hw = clk_hw_register_fixed_rate(dev, desc->name, NULL, 0, rate);
+ if (IS_ERR(hw)) {
+ pr_err("Failed to register clk %s: %ld\n",
+ desc->name, PTR_ERR(hw));
+ continue;
+ }
- writel(0x20000000, pb_base + REG_PCIE0_MEM);
- writel(0xfc000000, pb_base + REG_PCIE0_MEM_MASK);
- writel(0x24000000, pb_base + REG_PCIE1_MEM);
- writel(0xfc000000, pb_base + REG_PCIE1_MEM_MASK);
- writel(0x28000000, pb_base + REG_PCIE2_MEM);
- writel(0xfc000000, pb_base + REG_PCIE2_MEM_MASK);
+ clk_data->hws[desc->id] = hw;
+ }
+
+ hw = en7523_register_pcie_clk(dev, np_base);
+ clk_data->hws[EN7523_CLK_PCIE] = hw;
+}
+
+static int en7523_clk_hw_init(struct platform_device *pdev,
+ struct clk_hw_onecell_data *clk_data)
+{
+ void __iomem *base, *np_base;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ np_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(np_base))
+ return PTR_ERR(np_base);
+
+ en7523_register_clocks(&pdev->dev, clk_data, base, np_base);
return 0;
}
-static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
- void __iomem *base, void __iomem *np_base)
+static void en7581_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
+ struct regmap *map, void __iomem *base)
{
struct clk_hw *hw;
u32 rate;
int i;
- for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
- const struct en_clk_desc *desc = &en7523_base_clks[i];
+ for (i = 0; i < ARRAY_SIZE(en7581_base_clks); i++) {
+ const struct en_clk_desc *desc = &en7581_base_clks[i];
+ u32 val, reg = desc->div_reg ? desc->div_reg : desc->base_reg;
+ int err;
+
+ err = regmap_read(map, desc->base_reg, &val);
+ if (err) {
+ pr_err("Failed reading fixed clk rate %s: %d\n",
+ desc->name, err);
+ continue;
+ }
+ rate = en7523_get_base_rate(desc, val);
- rate = en7523_get_base_rate(base, i);
- rate /= en7523_get_div(base, i);
+ err = regmap_read(map, reg, &val);
+ if (err) {
+ pr_err("Failed reading fixed clk div %s: %d\n",
+ desc->name, err);
+ continue;
+ }
+ rate /= en7523_get_div(desc, val);
hw = clk_hw_register_fixed_rate(dev, desc->name, NULL, 0, rate);
if (IS_ERR(hw)) {
@@ -461,10 +593,8 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat
clk_data->hws[desc->id] = hw;
}
- hw = en7523_register_pcie_clk(dev, np_base);
+ hw = en7523_register_pcie_clk(dev, base);
clk_data->hws[EN7523_CLK_PCIE] = hw;
-
- clk_data->num = EN7523_NUM_CLOCKS;
}
static int en7523_reset_update(struct reset_controller_dev *rcdev,
@@ -516,38 +646,27 @@ static int en7523_reset_xlate(struct reset_controller_dev *rcdev,
return rst_data->idx_map[reset_spec->args[0]];
}
-static const struct reset_control_ops en7523_reset_ops = {
+static const struct reset_control_ops en7581_reset_ops = {
.assert = en7523_reset_assert,
.deassert = en7523_reset_deassert,
.status = en7523_reset_status,
};
-static int en7523_reset_register(struct platform_device *pdev,
- const struct en_clk_soc_data *soc_data)
+static int en7581_reset_register(struct device *dev, void __iomem *base)
{
- struct device *dev = &pdev->dev;
struct en_rst_data *rst_data;
- void __iomem *base;
-
- /* no reset lines available */
- if (!soc_data->reset.idx_map_nr)
- return 0;
-
- base = devm_platform_ioremap_resource(pdev, 2);
- if (IS_ERR(base))
- return PTR_ERR(base);
rst_data = devm_kzalloc(dev, sizeof(*rst_data), GFP_KERNEL);
if (!rst_data)
return -ENOMEM;
- rst_data->bank_ofs = soc_data->reset.bank_ofs;
- rst_data->idx_map = soc_data->reset.idx_map;
+ rst_data->bank_ofs = en7581_rst_ofs;
+ rst_data->idx_map = en7581_rst_map;
rst_data->base = base;
- rst_data->rcdev.nr_resets = soc_data->reset.idx_map_nr;
+ rst_data->rcdev.nr_resets = ARRAY_SIZE(en7581_rst_map);
rst_data->rcdev.of_xlate = en7523_reset_xlate;
- rst_data->rcdev.ops = &en7523_reset_ops;
+ rst_data->rcdev.ops = &en7581_reset_ops;
rst_data->rcdev.of_node = dev->of_node;
rst_data->rcdev.of_reset_n_cells = 1;
rst_data->rcdev.owner = THIS_MODULE;
@@ -556,71 +675,73 @@ static int en7523_reset_register(struct platform_device *pdev,
return devm_reset_controller_register(dev, &rst_data->rcdev);
}
-static int en7523_clk_probe(struct platform_device *pdev)
+static int en7581_clk_hw_init(struct platform_device *pdev,
+ struct clk_hw_onecell_data *clk_data)
{
- struct device_node *node = pdev->dev.of_node;
- const struct en_clk_soc_data *soc_data;
- struct clk_hw_onecell_data *clk_data;
- void __iomem *base, *np_base;
- int r;
+ struct regmap *map;
+ void __iomem *base;
+ u32 val;
+
+ map = syscon_regmap_lookup_by_compatible("airoha,en7581-chip-scu");
+ if (IS_ERR(map))
+ return PTR_ERR(map);
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
- np_base = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(np_base))
- return PTR_ERR(np_base);
+ en7581_register_clocks(&pdev->dev, clk_data, map, base);
+
+ val = readl(base + REG_NP_SCU_SSTR);
+ val &= ~(REG_PCIE_XSI0_SEL_MASK | REG_PCIE_XSI1_SEL_MASK);
+ writel(val, base + REG_NP_SCU_SSTR);
+ val = readl(base + REG_NP_SCU_PCIC);
+ writel(val | 3, base + REG_NP_SCU_PCIC);
+
+ return en7581_reset_register(&pdev->dev, base);
+}
+
+static int en7523_clk_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ const struct en_clk_soc_data *soc_data;
+ struct clk_hw_onecell_data *clk_data;
+ int r;
soc_data = device_get_match_data(&pdev->dev);
- if (soc_data->hw_init) {
- r = soc_data->hw_init(pdev, np_base);
- if (r)
- return r;
- }
clk_data = devm_kzalloc(&pdev->dev,
- struct_size(clk_data, hws, EN7523_NUM_CLOCKS),
+ struct_size(clk_data, hws, soc_data->num_clocks),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
- en7523_register_clocks(&pdev->dev, clk_data, base, np_base);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ clk_data->num = soc_data->num_clocks;
+ r = soc_data->hw_init(pdev, clk_data);
if (r)
- return dev_err_probe(&pdev->dev, r, "Could not register clock provider: %s\n",
- pdev->name);
-
- r = en7523_reset_register(pdev, soc_data);
- if (r) {
- of_clk_del_provider(node);
- return dev_err_probe(&pdev->dev, r, "Could not register reset controller: %s\n",
- pdev->name);
- }
+ return r;
- return 0;
+ return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
static const struct en_clk_soc_data en7523_data = {
+ .num_clocks = ARRAY_SIZE(en7523_base_clks) + 1,
.pcie_ops = {
.is_enabled = en7523_pci_is_enabled,
.prepare = en7523_pci_prepare,
.unprepare = en7523_pci_unprepare,
},
+ .hw_init = en7523_clk_hw_init,
};
static const struct en_clk_soc_data en7581_data = {
+ /* We increment num_clocks by 1 to account for additional PCIe clock */
+ .num_clocks = ARRAY_SIZE(en7581_base_clks) + 1,
.pcie_ops = {
.is_enabled = en7581_pci_is_enabled,
.enable = en7581_pci_enable,
.disable = en7581_pci_disable,
},
- .reset = {
- .bank_ofs = en7581_rst_ofs,
- .idx_map = en7581_rst_map,
- .idx_map_nr = ARRAY_SIZE(en7581_rst_map),
- },
.hw_init = en7581_clk_hw_init,
};
diff --git a/drivers/clk/clk-ep93xx.c b/drivers/clk/clk-ep93xx.c
index f888aed79b11..4bd8d6ecf6a2 100644
--- a/drivers/clk/clk-ep93xx.c
+++ b/drivers/clk/clk-ep93xx.c
@@ -586,9 +586,9 @@ static unsigned long calc_pll_rate(u64 rate, u32 config_word)
static int ep93xx_plls_init(struct ep93xx_clk_priv *priv)
{
- const char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 };
- const char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 };
- const char pclk_divisors[] = { 1, 2, 4, 8 };
+ static const char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 };
+ static const char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 };
+ static const char pclk_divisors[] = { 1, 2, 4, 8 };
struct clk_parent_data xtali = { .index = 0 };
unsigned int clk_f_div, clk_h_div, clk_p_div;
unsigned long clk_pll1_rate, clk_pll2_rate;
diff --git a/drivers/clk/clk-eyeq.c b/drivers/clk/clk-eyeq.c
new file mode 100644
index 000000000000..640c25788487
--- /dev/null
+++ b/drivers/clk/clk-eyeq.c
@@ -0,0 +1,859 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PLL clock driver for the Mobileye EyeQ5, EyeQ6L and EyeQ6H platforms.
+ *
+ * This controller handles:
+ * - Read-only PLLs, all derived from the same main crystal clock.
+ * - It also exposes divider clocks, those are children to PLLs.
+ * - Fixed factor clocks, children to PLLs.
+ *
+ * Parent clock is expected to be constant. This driver's registers live in a
+ * shared region called OLB. Some PLLs and fixed-factors are initialised early
+ * by of_clk_init(); if so, two clk providers are registered.
+ *
+ * We use eqc_ as prefix, as-in "EyeQ Clock", but way shorter.
+ *
+ * Copyright (C) 2024 Mobileye Vision Technologies Ltd.
+ */
+
+/*
+ * Set pr_fmt() for printing from eqc_early_init().
+ * It is called at of_clk_init() stage (read: really early).
+ */
+#define pr_fmt(fmt) "clk-eyeq: " fmt
+
+#include <linux/array_size.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <dt-bindings/clock/mobileye,eyeq5-clk.h>
+
+/* In frac mode, it enables fractional noise canceling DAC. Else, no function. */
+#define PCSR0_DAC_EN BIT(0)
+/* Fractional or integer mode */
+#define PCSR0_DSM_EN BIT(1)
+#define PCSR0_PLL_EN BIT(2)
+/* All clocks output held at 0 */
+#define PCSR0_FOUTPOSTDIV_EN BIT(3)
+#define PCSR0_POST_DIV1 GENMASK(6, 4)
+#define PCSR0_POST_DIV2 GENMASK(9, 7)
+#define PCSR0_REF_DIV GENMASK(15, 10)
+#define PCSR0_INTIN GENMASK(27, 16)
+#define PCSR0_BYPASS BIT(28)
+/* Bits 30..29 are reserved */
+#define PCSR0_PLL_LOCKED BIT(31)
+
+#define PCSR1_RESET BIT(0)
+#define PCSR1_SSGC_DIV GENMASK(4, 1)
+/* Spread amplitude (% = 0.1 * SPREAD[4:0]) */
+#define PCSR1_SPREAD GENMASK(9, 5)
+#define PCSR1_DIS_SSCG BIT(10)
+/* Down-spread or center-spread */
+#define PCSR1_DOWN_SPREAD BIT(11)
+#define PCSR1_FRAC_IN GENMASK(31, 12)
+
+struct eqc_pll {
+ unsigned int index;
+ const char *name;
+ unsigned int reg64;
+};
+
+/*
+ * Divider clock. Divider is 2*(v+1), with v the register value.
+ * Min divider is 2, max is 2*(2^width).
+ */
+struct eqc_div {
+ unsigned int index;
+ const char *name;
+ unsigned int parent;
+ unsigned int reg;
+ u8 shift;
+ u8 width;
+};
+
+struct eqc_fixed_factor {
+ unsigned int index;
+ const char *name;
+ unsigned int mult;
+ unsigned int div;
+ unsigned int parent;
+};
+
+struct eqc_match_data {
+ unsigned int pll_count;
+ const struct eqc_pll *plls;
+
+ unsigned int div_count;
+ const struct eqc_div *divs;
+
+ unsigned int fixed_factor_count;
+ const struct eqc_fixed_factor *fixed_factors;
+
+ const char *reset_auxdev_name;
+ const char *pinctrl_auxdev_name;
+
+ unsigned int early_clk_count;
+};
+
+struct eqc_early_match_data {
+ unsigned int early_pll_count;
+ const struct eqc_pll *early_plls;
+
+ unsigned int early_fixed_factor_count;
+ const struct eqc_fixed_factor *early_fixed_factors;
+
+ /*
+ * We want our of_xlate callback to EPROBE_DEFER instead of dev_err()
+ * and EINVAL. For that, we must know the total clock count.
+ */
+ unsigned int late_clk_count;
+};
+
+/*
+ * Both factors (mult and div) must fit in 32 bits. When an operation overflows,
+ * this function throws away low bits so that factors still fit in 32 bits.
+ *
+ * Precision loss depends on amplitude of mult and div. Worst theorical
+ * loss is: (UINT_MAX+1) / UINT_MAX - 1 = 2.3e-10.
+ * This is 1Hz every 4.3GHz.
+ */
+static void eqc_pll_downshift_factors(unsigned long *mult, unsigned long *div)
+{
+ unsigned long biggest;
+ unsigned int shift;
+
+ /* This function can be removed if mult/div switch to unsigned long. */
+ static_assert(sizeof_field(struct clk_fixed_factor, mult) == sizeof(unsigned int));
+ static_assert(sizeof_field(struct clk_fixed_factor, div) == sizeof(unsigned int));
+
+ /* No overflow, nothing to be done. */
+ if (*mult <= UINT_MAX && *div <= UINT_MAX)
+ return;
+
+ /*
+ * Compute the shift required to bring the biggest factor into unsigned
+ * int range. That is, shift its highest set bit to the unsigned int
+ * most significant bit.
+ */
+ biggest = max(*mult, *div);
+ shift = __fls(biggest) - (BITS_PER_BYTE * sizeof(unsigned int)) + 1;
+
+ *mult >>= shift;
+ *div >>= shift;
+}
+
+static int eqc_pll_parse_registers(u32 r0, u32 r1, unsigned long *mult,
+ unsigned long *div, unsigned long *acc)
+{
+ u32 spread;
+
+ if (r0 & PCSR0_BYPASS) {
+ *mult = 1;
+ *div = 1;
+ *acc = 0;
+ return 0;
+ }
+
+ if (!(r0 & PCSR0_PLL_LOCKED))
+ return -EINVAL;
+
+ *mult = FIELD_GET(PCSR0_INTIN, r0);
+ *div = FIELD_GET(PCSR0_REF_DIV, r0);
+ if (r0 & PCSR0_FOUTPOSTDIV_EN)
+ *div *= FIELD_GET(PCSR0_POST_DIV1, r0) * FIELD_GET(PCSR0_POST_DIV2, r0);
+
+ /* Fractional mode, in 2^20 (0x100000) parts. */
+ if (r0 & PCSR0_DSM_EN) {
+ *div *= (1ULL << 20);
+ *mult = *mult * (1ULL << 20) + FIELD_GET(PCSR1_FRAC_IN, r1);
+ }
+
+ if (!*mult || !*div)
+ return -EINVAL;
+
+ if (r1 & (PCSR1_RESET | PCSR1_DIS_SSCG)) {
+ *acc = 0;
+ return 0;
+ }
+
+ /*
+ * Spread spectrum.
+ *
+ * Spread is 1/1000 parts of frequency, accuracy is half of
+ * that. To get accuracy, convert to ppb (parts per billion).
+ *
+ * acc = spread * 1e6 / 2
+ * with acc in parts per billion and,
+ * spread in parts per thousand.
+ */
+ spread = FIELD_GET(PCSR1_SPREAD, r1);
+ *acc = spread * 500000;
+
+ if (r1 & PCSR1_DOWN_SPREAD) {
+ /*
+ * Downspreading: the central frequency is half a
+ * spread lower.
+ */
+ *mult *= 2000 - spread;
+ *div *= 2000;
+
+ /*
+ * Previous operation might overflow 32 bits. If it
+ * does, throw away the least amount of low bits.
+ */
+ eqc_pll_downshift_factors(mult, div);
+ }
+
+ return 0;
+}
+
+static void eqc_probe_init_plls(struct device *dev, const struct eqc_match_data *data,
+ void __iomem *base, struct clk_hw_onecell_data *cells)
+{
+ unsigned long mult, div, acc;
+ const struct eqc_pll *pll;
+ struct clk_hw *hw;
+ unsigned int i;
+ u32 r0, r1;
+ u64 val;
+ int ret;
+
+ for (i = 0; i < data->pll_count; i++) {
+ pll = &data->plls[i];
+
+ val = readq(base + pll->reg64);
+ r0 = val;
+ r1 = val >> 32;
+
+ ret = eqc_pll_parse_registers(r0, r1, &mult, &div, &acc);
+ if (ret) {
+ dev_warn(dev, "failed parsing state of %s\n", pll->name);
+ cells->hws[pll->index] = ERR_PTR(ret);
+ continue;
+ }
+
+ hw = clk_hw_register_fixed_factor_with_accuracy_fwname(dev,
+ dev->of_node, pll->name, "ref", 0, mult, div, acc);
+ cells->hws[pll->index] = hw;
+ if (IS_ERR(hw))
+ dev_warn(dev, "failed registering %s: %pe\n", pll->name, hw);
+ }
+}
+
+static void eqc_probe_init_divs(struct device *dev, const struct eqc_match_data *data,
+ void __iomem *base, struct clk_hw_onecell_data *cells)
+{
+ struct clk_parent_data parent_data = { };
+ const struct eqc_div *div;
+ struct clk_hw *parent;
+ void __iomem *reg;
+ struct clk_hw *hw;
+ unsigned int i;
+
+ for (i = 0; i < data->div_count; i++) {
+ div = &data->divs[i];
+ reg = base + div->reg;
+ parent = cells->hws[div->parent];
+
+ if (IS_ERR(parent)) {
+ /* Parent is in early clk provider. */
+ parent_data.index = div->parent;
+ parent_data.hw = NULL;
+ } else {
+ /* Avoid clock lookup when we already have the hw reference. */
+ parent_data.index = 0;
+ parent_data.hw = parent;
+ }
+
+ hw = clk_hw_register_divider_table_parent_data(dev, div->name,
+ &parent_data, 0, reg, div->shift, div->width,
+ CLK_DIVIDER_EVEN_INTEGERS, NULL, NULL);
+ cells->hws[div->index] = hw;
+ if (IS_ERR(hw))
+ dev_warn(dev, "failed registering %s: %pe\n",
+ div->name, hw);
+ }
+}
+
+static void eqc_probe_init_fixed_factors(struct device *dev,
+ const struct eqc_match_data *data,
+ struct clk_hw_onecell_data *cells)
+{
+ const struct eqc_fixed_factor *ff;
+ struct clk_hw *hw, *parent_hw;
+ unsigned int i;
+
+ for (i = 0; i < data->fixed_factor_count; i++) {
+ ff = &data->fixed_factors[i];
+ parent_hw = cells->hws[ff->parent];
+
+ if (IS_ERR(parent_hw)) {
+ /* Parent is in early clk provider. */
+ hw = clk_hw_register_fixed_factor_index(dev, ff->name,
+ ff->parent, 0, ff->mult, ff->div);
+ } else {
+ /* Avoid clock lookup when we already have the hw reference. */
+ hw = clk_hw_register_fixed_factor_parent_hw(dev, ff->name,
+ parent_hw, 0, ff->mult, ff->div);
+ }
+
+ cells->hws[ff->index] = hw;
+ if (IS_ERR(hw))
+ dev_warn(dev, "failed registering %s: %pe\n",
+ ff->name, hw);
+ }
+}
+
+static void eqc_auxdev_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ kfree(adev);
+}
+
+static int eqc_auxdev_create(struct device *dev, void __iomem *base,
+ const char *name, u32 id)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ adev->name = name;
+ adev->dev.parent = dev;
+ adev->dev.platform_data = (void __force *)base;
+ adev->dev.release = eqc_auxdev_release;
+ adev->id = id;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ return ret;
+
+ ret = auxiliary_device_add(adev);
+ if (ret)
+ auxiliary_device_uninit(adev);
+
+ return ret;
+}
+
+static int eqc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct eqc_match_data *data;
+ struct clk_hw_onecell_data *cells;
+ unsigned int i, clk_count;
+ struct resource *res;
+ void __iomem *base;
+ int ret;
+
+ data = device_get_match_data(dev);
+ if (!data)
+ return 0; /* No clocks nor auxdevs, we are done. */
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base)
+ return -ENOMEM;
+
+ /* Init optional reset auxiliary device. */
+ if (data->reset_auxdev_name) {
+ ret = eqc_auxdev_create(dev, base, data->reset_auxdev_name, 0);
+ if (ret)
+ dev_warn(dev, "failed creating auxiliary device %s.%s: %d\n",
+ KBUILD_MODNAME, data->reset_auxdev_name, ret);
+ }
+
+ /* Init optional pinctrl auxiliary device. */
+ if (data->pinctrl_auxdev_name) {
+ ret = eqc_auxdev_create(dev, base, data->pinctrl_auxdev_name, 0);
+ if (ret)
+ dev_warn(dev, "failed creating auxiliary device %s.%s: %d\n",
+ KBUILD_MODNAME, data->pinctrl_auxdev_name, ret);
+ }
+
+ if (data->pll_count + data->div_count + data->fixed_factor_count == 0)
+ return 0; /* Zero clocks, we are done. */
+
+ clk_count = data->pll_count + data->div_count +
+ data->fixed_factor_count + data->early_clk_count;
+ cells = kzalloc(struct_size(cells, hws, clk_count), GFP_KERNEL);
+ if (!cells)
+ return -ENOMEM;
+
+ cells->num = clk_count;
+
+ /* Early PLLs are marked as errors: the early provider will get queried. */
+ for (i = 0; i < clk_count; i++)
+ cells->hws[i] = ERR_PTR(-EINVAL);
+
+ eqc_probe_init_plls(dev, data, base, cells);
+
+ eqc_probe_init_divs(dev, data, base, cells);
+
+ eqc_probe_init_fixed_factors(dev, data, cells);
+
+ return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, cells);
+}
+
+/* Required early for GIC timer (pll-cpu) and UARTs (pll-per). */
+static const struct eqc_pll eqc_eyeq5_early_plls[] = {
+ { .index = EQ5C_PLL_CPU, .name = "pll-cpu", .reg64 = 0x02C },
+ { .index = EQ5C_PLL_PER, .name = "pll-per", .reg64 = 0x05C },
+};
+
+static const struct eqc_pll eqc_eyeq5_plls[] = {
+ { .index = EQ5C_PLL_VMP, .name = "pll-vmp", .reg64 = 0x034 },
+ { .index = EQ5C_PLL_PMA, .name = "pll-pma", .reg64 = 0x03C },
+ { .index = EQ5C_PLL_VDI, .name = "pll-vdi", .reg64 = 0x044 },
+ { .index = EQ5C_PLL_DDR0, .name = "pll-ddr0", .reg64 = 0x04C },
+ { .index = EQ5C_PLL_PCI, .name = "pll-pci", .reg64 = 0x054 },
+ { .index = EQ5C_PLL_PMAC, .name = "pll-pmac", .reg64 = 0x064 },
+ { .index = EQ5C_PLL_MPC, .name = "pll-mpc", .reg64 = 0x06C },
+ { .index = EQ5C_PLL_DDR1, .name = "pll-ddr1", .reg64 = 0x074 },
+};
+
+enum {
+ /*
+ * EQ5C_PLL_CPU children.
+ * EQ5C_PER_OCC_PCI is the last clock exposed in dt-bindings.
+ */
+ EQ5C_CPU_OCC = EQ5C_PER_OCC_PCI + 1,
+ EQ5C_CPU_SI_CSS0,
+ EQ5C_CPU_CPC,
+ EQ5C_CPU_CM,
+ EQ5C_CPU_MEM,
+ EQ5C_CPU_OCC_ISRAM,
+ EQ5C_CPU_ISRAM,
+ EQ5C_CPU_OCC_DBU,
+ EQ5C_CPU_SI_DBU_TP,
+
+ /*
+ * EQ5C_PLL_VDI children.
+ */
+ EQ5C_VDI_OCC_VDI,
+ EQ5C_VDI_VDI,
+ EQ5C_VDI_OCC_CAN_SER,
+ EQ5C_VDI_CAN_SER,
+ EQ5C_VDI_I2C_SER,
+
+ /*
+ * EQ5C_PLL_PER children.
+ */
+ EQ5C_PER_PERIPH,
+ EQ5C_PER_CAN,
+ EQ5C_PER_TIMER,
+ EQ5C_PER_CCF,
+ EQ5C_PER_OCC_MJPEG,
+ EQ5C_PER_HSM,
+ EQ5C_PER_MJPEG,
+ EQ5C_PER_FCMU_A,
+};
+
+static const struct eqc_fixed_factor eqc_eyeq5_early_fixed_factors[] = {
+ /* EQ5C_PLL_CPU children */
+ { EQ5C_CPU_OCC, "occ-cpu", 1, 1, EQ5C_PLL_CPU },
+ { EQ5C_CPU_SI_CSS0, "si-css0", 1, 1, EQ5C_CPU_OCC },
+ { EQ5C_CPU_CORE0, "core0", 1, 1, EQ5C_CPU_SI_CSS0 },
+ { EQ5C_CPU_CORE1, "core1", 1, 1, EQ5C_CPU_SI_CSS0 },
+ { EQ5C_CPU_CORE2, "core2", 1, 1, EQ5C_CPU_SI_CSS0 },
+ { EQ5C_CPU_CORE3, "core3", 1, 1, EQ5C_CPU_SI_CSS0 },
+
+ /* EQ5C_PLL_PER children */
+ { EQ5C_PER_OCC, "occ-periph", 1, 16, EQ5C_PLL_PER },
+ { EQ5C_PER_UART, "uart", 1, 1, EQ5C_PER_OCC },
+};
+
+static const struct eqc_fixed_factor eqc_eyeq5_fixed_factors[] = {
+ /* EQ5C_PLL_CPU children */
+ { EQ5C_CPU_CPC, "cpc", 1, 1, EQ5C_CPU_SI_CSS0 },
+ { EQ5C_CPU_CM, "cm", 1, 1, EQ5C_CPU_SI_CSS0 },
+ { EQ5C_CPU_MEM, "mem", 1, 1, EQ5C_CPU_SI_CSS0 },
+ { EQ5C_CPU_OCC_ISRAM, "occ-isram", 1, 2, EQ5C_PLL_CPU },
+ { EQ5C_CPU_ISRAM, "isram", 1, 1, EQ5C_CPU_OCC_ISRAM },
+ { EQ5C_CPU_OCC_DBU, "occ-dbu", 1, 10, EQ5C_PLL_CPU },
+ { EQ5C_CPU_SI_DBU_TP, "si-dbu-tp", 1, 1, EQ5C_CPU_OCC_DBU },
+
+ /* EQ5C_PLL_VDI children */
+ { EQ5C_VDI_OCC_VDI, "occ-vdi", 1, 2, EQ5C_PLL_VDI },
+ { EQ5C_VDI_VDI, "vdi", 1, 1, EQ5C_VDI_OCC_VDI },
+ { EQ5C_VDI_OCC_CAN_SER, "occ-can-ser", 1, 16, EQ5C_PLL_VDI },
+ { EQ5C_VDI_CAN_SER, "can-ser", 1, 1, EQ5C_VDI_OCC_CAN_SER },
+ { EQ5C_VDI_I2C_SER, "i2c-ser", 1, 20, EQ5C_PLL_VDI },
+
+ /* EQ5C_PLL_PER children */
+ { EQ5C_PER_PERIPH, "periph", 1, 1, EQ5C_PER_OCC },
+ { EQ5C_PER_CAN, "can", 1, 1, EQ5C_PER_OCC },
+ { EQ5C_PER_SPI, "spi", 1, 1, EQ5C_PER_OCC },
+ { EQ5C_PER_I2C, "i2c", 1, 1, EQ5C_PER_OCC },
+ { EQ5C_PER_TIMER, "timer", 1, 1, EQ5C_PER_OCC },
+ { EQ5C_PER_GPIO, "gpio", 1, 1, EQ5C_PER_OCC },
+ { EQ5C_PER_EMMC, "emmc-sys", 1, 10, EQ5C_PLL_PER },
+ { EQ5C_PER_CCF, "ccf-ctrl", 1, 4, EQ5C_PLL_PER },
+ { EQ5C_PER_OCC_MJPEG, "occ-mjpeg", 1, 2, EQ5C_PLL_PER },
+ { EQ5C_PER_HSM, "hsm", 1, 1, EQ5C_PER_OCC_MJPEG },
+ { EQ5C_PER_MJPEG, "mjpeg", 1, 1, EQ5C_PER_OCC_MJPEG },
+ { EQ5C_PER_FCMU_A, "fcmu-a", 1, 20, EQ5C_PLL_PER },
+ { EQ5C_PER_OCC_PCI, "occ-pci-sys", 1, 8, EQ5C_PLL_PER },
+};
+
+static const struct eqc_div eqc_eyeq5_divs[] = {
+ {
+ .index = EQ5C_DIV_OSPI,
+ .name = "div-ospi",
+ .parent = EQ5C_PLL_PER,
+ .reg = 0x11C,
+ .shift = 0,
+ .width = 4,
+ },
+};
+
+static const struct eqc_early_match_data eqc_eyeq5_early_match_data __initconst = {
+ .early_pll_count = ARRAY_SIZE(eqc_eyeq5_early_plls),
+ .early_plls = eqc_eyeq5_early_plls,
+
+ .early_fixed_factor_count = ARRAY_SIZE(eqc_eyeq5_early_fixed_factors),
+ .early_fixed_factors = eqc_eyeq5_early_fixed_factors,
+
+ .late_clk_count = ARRAY_SIZE(eqc_eyeq5_plls) + ARRAY_SIZE(eqc_eyeq5_divs) +
+ ARRAY_SIZE(eqc_eyeq5_fixed_factors),
+};
+
+static const struct eqc_match_data eqc_eyeq5_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq5_plls),
+ .plls = eqc_eyeq5_plls,
+
+ .div_count = ARRAY_SIZE(eqc_eyeq5_divs),
+ .divs = eqc_eyeq5_divs,
+
+ .fixed_factor_count = ARRAY_SIZE(eqc_eyeq5_fixed_factors),
+ .fixed_factors = eqc_eyeq5_fixed_factors,
+
+ .reset_auxdev_name = "reset",
+ .pinctrl_auxdev_name = "pinctrl",
+
+ .early_clk_count = ARRAY_SIZE(eqc_eyeq5_early_plls) +
+ ARRAY_SIZE(eqc_eyeq5_early_fixed_factors),
+};
+
+static const struct eqc_pll eqc_eyeq6l_plls[] = {
+ { .index = EQ6LC_PLL_DDR, .name = "pll-ddr", .reg64 = 0x02C },
+ { .index = EQ6LC_PLL_CPU, .name = "pll-cpu", .reg64 = 0x034 }, /* also acc */
+ { .index = EQ6LC_PLL_PER, .name = "pll-per", .reg64 = 0x03C },
+ { .index = EQ6LC_PLL_VDI, .name = "pll-vdi", .reg64 = 0x044 },
+};
+
+static const struct eqc_match_data eqc_eyeq6l_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq6l_plls),
+ .plls = eqc_eyeq6l_plls,
+
+ .reset_auxdev_name = "reset",
+};
+
+static const struct eqc_match_data eqc_eyeq6h_west_match_data = {
+ .reset_auxdev_name = "reset_west",
+};
+
+static const struct eqc_pll eqc_eyeq6h_east_plls[] = {
+ { .index = 0, .name = "pll-east", .reg64 = 0x074 },
+};
+
+static const struct eqc_match_data eqc_eyeq6h_east_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq6h_east_plls),
+ .plls = eqc_eyeq6h_east_plls,
+
+ .reset_auxdev_name = "reset_east",
+};
+
+static const struct eqc_pll eqc_eyeq6h_south_plls[] = {
+ { .index = EQ6HC_SOUTH_PLL_VDI, .name = "pll-vdi", .reg64 = 0x000 },
+ { .index = EQ6HC_SOUTH_PLL_PCIE, .name = "pll-pcie", .reg64 = 0x008 },
+ { .index = EQ6HC_SOUTH_PLL_PER, .name = "pll-per", .reg64 = 0x010 },
+ { .index = EQ6HC_SOUTH_PLL_ISP, .name = "pll-isp", .reg64 = 0x018 },
+};
+
+static const struct eqc_div eqc_eyeq6h_south_divs[] = {
+ {
+ .index = EQ6HC_SOUTH_DIV_EMMC,
+ .name = "div-emmc",
+ .parent = EQ6HC_SOUTH_PLL_PER,
+ .reg = 0x070,
+ .shift = 4,
+ .width = 4,
+ },
+ {
+ .index = EQ6HC_SOUTH_DIV_OSPI_REF,
+ .name = "div-ospi-ref",
+ .parent = EQ6HC_SOUTH_PLL_PER,
+ .reg = 0x090,
+ .shift = 4,
+ .width = 4,
+ },
+ {
+ .index = EQ6HC_SOUTH_DIV_OSPI_SYS,
+ .name = "div-ospi-sys",
+ .parent = EQ6HC_SOUTH_PLL_PER,
+ .reg = 0x090,
+ .shift = 8,
+ .width = 1,
+ },
+ {
+ .index = EQ6HC_SOUTH_DIV_TSU,
+ .name = "div-tsu",
+ .parent = EQ6HC_SOUTH_PLL_PCIE,
+ .reg = 0x098,
+ .shift = 4,
+ .width = 8,
+ },
+};
+
+static const struct eqc_match_data eqc_eyeq6h_south_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq6h_south_plls),
+ .plls = eqc_eyeq6h_south_plls,
+
+ .div_count = ARRAY_SIZE(eqc_eyeq6h_south_divs),
+ .divs = eqc_eyeq6h_south_divs,
+};
+
+static const struct eqc_pll eqc_eyeq6h_ddr0_plls[] = {
+ { .index = 0, .name = "pll-ddr0", .reg64 = 0x074 },
+};
+
+static const struct eqc_match_data eqc_eyeq6h_ddr0_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq6h_ddr0_plls),
+ .plls = eqc_eyeq6h_ddr0_plls,
+};
+
+static const struct eqc_pll eqc_eyeq6h_ddr1_plls[] = {
+ { .index = 0, .name = "pll-ddr1", .reg64 = 0x074 },
+};
+
+static const struct eqc_match_data eqc_eyeq6h_ddr1_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq6h_ddr1_plls),
+ .plls = eqc_eyeq6h_ddr1_plls,
+};
+
+static const struct eqc_pll eqc_eyeq6h_acc_plls[] = {
+ { .index = EQ6HC_ACC_PLL_XNN, .name = "pll-xnn", .reg64 = 0x040 },
+ { .index = EQ6HC_ACC_PLL_VMP, .name = "pll-vmp", .reg64 = 0x050 },
+ { .index = EQ6HC_ACC_PLL_PMA, .name = "pll-pma", .reg64 = 0x05C },
+ { .index = EQ6HC_ACC_PLL_MPC, .name = "pll-mpc", .reg64 = 0x068 },
+ { .index = EQ6HC_ACC_PLL_NOC, .name = "pll-noc", .reg64 = 0x070 },
+};
+
+static const struct eqc_match_data eqc_eyeq6h_acc_match_data = {
+ .pll_count = ARRAY_SIZE(eqc_eyeq6h_acc_plls),
+ .plls = eqc_eyeq6h_acc_plls,
+
+ .reset_auxdev_name = "reset_acc",
+};
+
+static const struct of_device_id eqc_match_table[] = {
+ { .compatible = "mobileye,eyeq5-olb", .data = &eqc_eyeq5_match_data },
+ { .compatible = "mobileye,eyeq6l-olb", .data = &eqc_eyeq6l_match_data },
+ { .compatible = "mobileye,eyeq6h-west-olb", .data = &eqc_eyeq6h_west_match_data },
+ { .compatible = "mobileye,eyeq6h-east-olb", .data = &eqc_eyeq6h_east_match_data },
+ { .compatible = "mobileye,eyeq6h-south-olb", .data = &eqc_eyeq6h_south_match_data },
+ { .compatible = "mobileye,eyeq6h-ddr0-olb", .data = &eqc_eyeq6h_ddr0_match_data },
+ { .compatible = "mobileye,eyeq6h-ddr1-olb", .data = &eqc_eyeq6h_ddr1_match_data },
+ { .compatible = "mobileye,eyeq6h-acc-olb", .data = &eqc_eyeq6h_acc_match_data },
+ {}
+};
+
+static struct platform_driver eqc_driver = {
+ .probe = eqc_probe,
+ .driver = {
+ .name = "clk-eyeq",
+ .of_match_table = eqc_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(eqc_driver);
+
+/* Required early for GIC timer. */
+static const struct eqc_pll eqc_eyeq6h_central_early_plls[] = {
+ { .index = EQ6HC_CENTRAL_PLL_CPU, .name = "pll-cpu", .reg64 = 0x02C },
+};
+
+static const struct eqc_fixed_factor eqc_eyeq6h_central_early_fixed_factors[] = {
+ { EQ6HC_CENTRAL_CPU_OCC, "occ-cpu", 1, 1, EQ6HC_CENTRAL_PLL_CPU },
+};
+
+static const struct eqc_early_match_data eqc_eyeq6h_central_early_match_data __initconst = {
+ .early_pll_count = ARRAY_SIZE(eqc_eyeq6h_central_early_plls),
+ .early_plls = eqc_eyeq6h_central_early_plls,
+
+ .early_fixed_factor_count = ARRAY_SIZE(eqc_eyeq6h_central_early_fixed_factors),
+ .early_fixed_factors = eqc_eyeq6h_central_early_fixed_factors,
+};
+
+/* Required early for UART. */
+static const struct eqc_pll eqc_eyeq6h_west_early_plls[] = {
+ { .index = EQ6HC_WEST_PLL_PER, .name = "pll-west", .reg64 = 0x074 },
+};
+
+static const struct eqc_fixed_factor eqc_eyeq6h_west_early_fixed_factors[] = {
+ { EQ6HC_WEST_PER_OCC, "west-per-occ", 1, 10, EQ6HC_WEST_PLL_PER },
+ { EQ6HC_WEST_PER_UART, "west-per-uart", 1, 1, EQ6HC_WEST_PER_OCC },
+};
+
+static const struct eqc_early_match_data eqc_eyeq6h_west_early_match_data __initconst = {
+ .early_pll_count = ARRAY_SIZE(eqc_eyeq6h_west_early_plls),
+ .early_plls = eqc_eyeq6h_west_early_plls,
+
+ .early_fixed_factor_count = ARRAY_SIZE(eqc_eyeq6h_west_early_fixed_factors),
+ .early_fixed_factors = eqc_eyeq6h_west_early_fixed_factors,
+};
+
+static void __init eqc_early_init(struct device_node *np,
+ const struct eqc_early_match_data *early_data)
+{
+ struct clk_hw_onecell_data *cells;
+ unsigned int i, clk_count;
+ void __iomem *base;
+ int ret;
+
+ clk_count = early_data->early_pll_count + early_data->early_fixed_factor_count +
+ early_data->late_clk_count;
+ cells = kzalloc(struct_size(cells, hws, clk_count), GFP_KERNEL);
+ if (!cells) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ cells->num = clk_count;
+
+ /*
+ * Mark all clocks as deferred; some are registered here, the rest at
+ * platform device probe.
+ *
+ * Once the platform device is probed, its provider will take priority
+ * when looking up clocks.
+ */
+ for (i = 0; i < clk_count; i++)
+ cells->hws[i] = ERR_PTR(-EPROBE_DEFER);
+
+ /* Offsets (reg64) of early PLLs are relative to OLB block. */
+ base = of_iomap(np, 0);
+ if (!base) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ for (i = 0; i < early_data->early_pll_count; i++) {
+ const struct eqc_pll *pll = &early_data->early_plls[i];
+ unsigned long mult, div, acc;
+ struct clk_hw *hw;
+ u32 r0, r1;
+ u64 val;
+
+ val = readq(base + pll->reg64);
+ r0 = val;
+ r1 = val >> 32;
+
+ ret = eqc_pll_parse_registers(r0, r1, &mult, &div, &acc);
+ if (ret) {
+ pr_err("failed parsing state of %s\n", pll->name);
+ goto err;
+ }
+
+ hw = clk_hw_register_fixed_factor_with_accuracy_fwname(NULL,
+ np, pll->name, "ref", 0, mult, div, acc);
+ cells->hws[pll->index] = hw;
+ if (IS_ERR(hw)) {
+ pr_err("failed registering %s: %pe\n", pll->name, hw);
+ ret = PTR_ERR(hw);
+ goto err;
+ }
+ }
+
+ for (i = 0; i < early_data->early_fixed_factor_count; i++) {
+ const struct eqc_fixed_factor *ff = &early_data->early_fixed_factors[i];
+ struct clk_hw *parent_hw = cells->hws[ff->parent];
+ struct clk_hw *hw;
+
+ hw = clk_hw_register_fixed_factor_parent_hw(NULL, ff->name,
+ parent_hw, 0, ff->mult, ff->div);
+ cells->hws[ff->index] = hw;
+ if (IS_ERR(hw)) {
+ pr_err("failed registering %s: %pe\n", ff->name, hw);
+ ret = PTR_ERR(hw);
+ goto err;
+ }
+ }
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, cells);
+ if (ret) {
+ pr_err("failed registering clk provider: %d\n", ret);
+ goto err;
+ }
+
+ return;
+
+err:
+ /*
+ * We are doomed. The system will not be able to boot.
+ *
+ * Let's still try to be good citizens by freeing resources and print
+ * a last error message that might help debugging.
+ */
+
+ pr_err("failed clk init: %d\n", ret);
+
+ if (cells) {
+ of_clk_del_provider(np);
+
+ for (i = 0; i < early_data->early_pll_count; i++) {
+ const struct eqc_pll *pll = &early_data->early_plls[i];
+ struct clk_hw *hw = cells->hws[pll->index];
+
+ if (!IS_ERR_OR_NULL(hw))
+ clk_hw_unregister_fixed_factor(hw);
+ }
+
+ kfree(cells);
+ }
+}
+
+static void __init eqc_eyeq5_early_init(struct device_node *np)
+{
+ eqc_early_init(np, &eqc_eyeq5_early_match_data);
+}
+CLK_OF_DECLARE_DRIVER(eqc_eyeq5, "mobileye,eyeq5-olb", eqc_eyeq5_early_init);
+
+static void __init eqc_eyeq6h_central_early_init(struct device_node *np)
+{
+ eqc_early_init(np, &eqc_eyeq6h_central_early_match_data);
+}
+CLK_OF_DECLARE_DRIVER(eqc_eyeq6h_central, "mobileye,eyeq6h-central-olb",
+ eqc_eyeq6h_central_early_init);
+
+static void __init eqc_eyeq6h_west_early_init(struct device_node *np)
+{
+ eqc_early_init(np, &eqc_eyeq6h_west_early_match_data);
+}
+CLK_OF_DECLARE_DRIVER(eqc_eyeq6h_west, "mobileye,eyeq6h-west-olb",
+ eqc_eyeq6h_west_early_init);
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 8fba63fc70c5..e62ae8794d44 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -241,6 +241,17 @@ struct clk_hw *clk_hw_register_fixed_factor_with_accuracy_fwname(struct device *
}
EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor_with_accuracy_fwname);
+struct clk_hw *clk_hw_register_fixed_factor_index(struct device *dev,
+ const char *name, unsigned int index, unsigned long flags,
+ unsigned int mult, unsigned int div)
+{
+ const struct clk_parent_data pdata = { .index = index };
+
+ return __clk_hw_register_fixed_factor(dev, NULL, name, NULL, NULL, &pdata,
+ flags, mult, div, 0, 0, false);
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor_index);
+
struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div)
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 5b114043771d..9099c57e2715 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -17,13 +17,15 @@
#include <linux/device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
/**
* DOC: basic gpio gated clock which can be enabled and disabled
* with gpio output
* Traits of this clock:
- * prepare - clk_(un)prepare only ensures parent is (un)prepared
- * enable - clk_enable and clk_disable are functional & control gpio
+ * prepare - clk_(un)prepare are functional and control a gpio that can sleep
+ * enable - clk_enable and clk_disable are functional & control
+ * non-sleeping gpio
* rate - inherits rate from parent. No clk_set_rate support
* parent - fixed parent. No clk_set_parent support
*/
@@ -199,7 +201,6 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
struct gpio_desc *gpiod;
struct clk_hw *hw;
bool is_mux;
- int ret;
is_mux = of_device_is_compatible(node, "gpio-mux-clock");
@@ -211,17 +212,9 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
gpio_name = is_mux ? "select" : "enable";
gpiod = devm_gpiod_get(dev, gpio_name, GPIOD_OUT_LOW);
- if (IS_ERR(gpiod)) {
- ret = PTR_ERR(gpiod);
- if (ret == -EPROBE_DEFER)
- pr_debug("%pOFn: %s: GPIOs not yet available, retry later\n",
- node, __func__);
- else
- pr_err("%pOFn: %s: Can't get '%s' named GPIO property\n",
- node, __func__,
- gpio_name);
- return ret;
- }
+ if (IS_ERR(gpiod))
+ return dev_err_probe(dev, PTR_ERR(gpiod),
+ "Can't get '%s' named GPIO property\n", gpio_name);
if (is_mux)
hw = clk_hw_register_gpio_mux(dev, gpiod);
@@ -247,3 +240,187 @@ static struct platform_driver gpio_clk_driver = {
},
};
builtin_platform_driver(gpio_clk_driver);
+
+/**
+ * DOC: gated fixed clock, controlled with a gpio output and a regulator
+ * Traits of this clock:
+ * prepare - clk_prepare and clk_unprepare are function & control regulator
+ * optionally a gpio that can sleep
+ * enable - clk_enable and clk_disable are functional & control gpio
+ * rate - rate is fixed and set on clock registration
+ * parent - fixed clock is a root clock and has no parent
+ */
+
+/**
+ * struct clk_gated_fixed - Gateable fixed rate clock
+ * @clk_gpio: instance of clk_gpio for gate-gpio
+ * @supply: supply regulator
+ * @rate: fixed rate
+ */
+struct clk_gated_fixed {
+ struct clk_gpio clk_gpio;
+ struct regulator *supply;
+ unsigned long rate;
+};
+
+#define to_clk_gated_fixed(_clk_gpio) container_of(_clk_gpio, struct clk_gated_fixed, clk_gpio)
+
+static unsigned long clk_gated_fixed_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return to_clk_gated_fixed(to_clk_gpio(hw))->rate;
+}
+
+static int clk_gated_fixed_prepare(struct clk_hw *hw)
+{
+ struct clk_gated_fixed *clk = to_clk_gated_fixed(to_clk_gpio(hw));
+
+ if (!clk->supply)
+ return 0;
+
+ return regulator_enable(clk->supply);
+}
+
+static void clk_gated_fixed_unprepare(struct clk_hw *hw)
+{
+ struct clk_gated_fixed *clk = to_clk_gated_fixed(to_clk_gpio(hw));
+
+ if (!clk->supply)
+ return;
+
+ regulator_disable(clk->supply);
+}
+
+static int clk_gated_fixed_is_prepared(struct clk_hw *hw)
+{
+ struct clk_gated_fixed *clk = to_clk_gated_fixed(to_clk_gpio(hw));
+
+ if (!clk->supply)
+ return true;
+
+ return regulator_is_enabled(clk->supply);
+}
+
+/*
+ * Fixed gated clock with non-sleeping gpio.
+ *
+ * Prepare operation turns on the supply regulator
+ * and the enable operation switches the enable-gpio.
+ */
+static const struct clk_ops clk_gated_fixed_ops = {
+ .prepare = clk_gated_fixed_prepare,
+ .unprepare = clk_gated_fixed_unprepare,
+ .is_prepared = clk_gated_fixed_is_prepared,
+ .enable = clk_gpio_gate_enable,
+ .disable = clk_gpio_gate_disable,
+ .is_enabled = clk_gpio_gate_is_enabled,
+ .recalc_rate = clk_gated_fixed_recalc_rate,
+};
+
+static int clk_sleeping_gated_fixed_prepare(struct clk_hw *hw)
+{
+ int ret;
+
+ ret = clk_gated_fixed_prepare(hw);
+ if (ret)
+ return ret;
+
+ ret = clk_sleeping_gpio_gate_prepare(hw);
+ if (ret)
+ clk_gated_fixed_unprepare(hw);
+
+ return ret;
+}
+
+static void clk_sleeping_gated_fixed_unprepare(struct clk_hw *hw)
+{
+ clk_gated_fixed_unprepare(hw);
+ clk_sleeping_gpio_gate_unprepare(hw);
+}
+
+/*
+ * Fixed gated clock with non-sleeping gpio.
+ *
+ * Enabling the supply regulator and switching the enable-gpio happens
+ * both in the prepare step.
+ * is_prepared only needs to check the gpio state, as toggling the
+ * gpio is the last step when preparing.
+ */
+static const struct clk_ops clk_sleeping_gated_fixed_ops = {
+ .prepare = clk_sleeping_gated_fixed_prepare,
+ .unprepare = clk_sleeping_gated_fixed_unprepare,
+ .is_prepared = clk_sleeping_gpio_gate_is_prepared,
+ .recalc_rate = clk_gated_fixed_recalc_rate,
+};
+
+static int clk_gated_fixed_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk_gated_fixed *clk;
+ const struct clk_ops *ops;
+ const char *clk_name;
+ u32 rate;
+ int ret;
+
+ clk = devm_kzalloc(dev, sizeof(*clk), GFP_KERNEL);
+ if (!clk)
+ return -ENOMEM;
+
+ ret = device_property_read_u32(dev, "clock-frequency", &rate);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clock-frequency\n");
+ clk->rate = rate;
+
+ ret = device_property_read_string(dev, "clock-output-names", &clk_name);
+ if (ret)
+ clk_name = fwnode_get_name(dev->fwnode);
+
+ clk->supply = devm_regulator_get_optional(dev, "vdd");
+ if (IS_ERR(clk->supply)) {
+ if (PTR_ERR(clk->supply) != -ENODEV)
+ return dev_err_probe(dev, PTR_ERR(clk->supply),
+ "Failed to get regulator\n");
+ clk->supply = NULL;
+ }
+
+ clk->clk_gpio.gpiod = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(clk->clk_gpio.gpiod))
+ return dev_err_probe(dev, PTR_ERR(clk->clk_gpio.gpiod),
+ "Failed to get gpio\n");
+
+ if (gpiod_cansleep(clk->clk_gpio.gpiod))
+ ops = &clk_sleeping_gated_fixed_ops;
+ else
+ ops = &clk_gated_fixed_ops;
+
+ clk->clk_gpio.hw.init = CLK_HW_INIT_NO_PARENT(clk_name, ops, 0);
+
+ /* register the clock */
+ ret = devm_clk_hw_register(dev, &clk->clk_gpio.hw);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register clock\n");
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &clk->clk_gpio.hw);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register clock provider\n");
+
+ return 0;
+}
+
+static const struct of_device_id gated_fixed_clk_match_table[] = {
+ { .compatible = "gated-fixed-clock" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver gated_fixed_clk_driver = {
+ .probe = clk_gated_fixed_probe,
+ .driver = {
+ .name = "gated-fixed-clk",
+ .of_match_table = gated_fixed_clk_match_table,
+ },
+};
+builtin_platform_driver(gated_fixed_clk_driver);
diff --git a/drivers/clk/clk-lan966x.c b/drivers/clk/clk-lan966x.c
index 870fd7df50c1..16e0405fe28b 100644
--- a/drivers/clk/clk-lan966x.c
+++ b/drivers/clk/clk-lan966x.c
@@ -24,13 +24,20 @@
#define DIV_MAX 255
-static const char *clk_names[N_CLOCKS] = {
+static const char * const lan966x_clk_names[] = {
"qspi0", "qspi1", "qspi2", "sdmmc0",
"pi", "mcan0", "mcan1", "flexcom0",
"flexcom1", "flexcom2", "flexcom3",
"flexcom4", "timer1", "usb_refclk",
};
+static const char * const lan969x_clk_names[] = {
+ "qspi0", "qspi2", "sdmmc0", "sdmmc1",
+ "mcan0", "mcan1", "flexcom0",
+ "flexcom1", "flexcom2", "flexcom3",
+ "timer1", "usb_refclk",
+};
+
struct lan966x_gck {
struct clk_hw hw;
void __iomem *reg;
@@ -53,7 +60,7 @@ struct clk_gate_soc_desc {
int bit_idx;
};
-static const struct clk_gate_soc_desc clk_gate_desc[] = {
+static const struct clk_gate_soc_desc lan966x_clk_gate_desc[] = {
{ "uhphs", 11 },
{ "udphs", 10 },
{ "mcramc", 9 },
@@ -61,6 +68,37 @@ static const struct clk_gate_soc_desc clk_gate_desc[] = {
{ }
};
+static const struct clk_gate_soc_desc lan969x_clk_gate_desc[] = {
+ { "usb_drd", 10 },
+ { "mcramc", 9 },
+ { "hmatrix", 8 },
+ { }
+};
+
+struct lan966x_match_data {
+ char *name;
+ const char * const *clk_name;
+ const struct clk_gate_soc_desc *clk_gate_desc;
+ u8 num_generic_clks;
+ u8 num_total_clks;
+};
+
+static struct lan966x_match_data lan966x_desc = {
+ .name = "lan966x",
+ .clk_name = lan966x_clk_names,
+ .clk_gate_desc = lan966x_clk_gate_desc,
+ .num_total_clks = 18,
+ .num_generic_clks = 14,
+};
+
+static struct lan966x_match_data lan969x_desc = {
+ .name = "lan969x",
+ .clk_name = lan969x_clk_names,
+ .clk_gate_desc = lan969x_clk_gate_desc,
+ .num_total_clks = 15,
+ .num_generic_clks = 12,
+};
+
static DEFINE_SPINLOCK(clk_gate_lock);
static void __iomem *base;
@@ -186,24 +224,26 @@ static struct clk_hw *lan966x_gck_clk_register(struct device *dev, int i)
};
static int lan966x_gate_clk_register(struct device *dev,
+ const struct lan966x_match_data *data,
struct clk_hw_onecell_data *hw_data,
void __iomem *gate_base)
{
- int i;
+ for (int i = data->num_generic_clks; i < data->num_total_clks; ++i) {
+ int idx = i - data->num_generic_clks;
+ const struct clk_gate_soc_desc *desc;
- for (i = GCK_GATE_UHPHS; i < N_CLOCKS; ++i) {
- int idx = i - GCK_GATE_UHPHS;
+ desc = &data->clk_gate_desc[idx];
hw_data->hws[i] =
- devm_clk_hw_register_gate(dev, clk_gate_desc[idx].name,
- "lan966x", 0, gate_base,
- clk_gate_desc[idx].bit_idx,
+ devm_clk_hw_register_gate(dev, desc->name,
+ data->name, 0, gate_base,
+ desc->bit_idx,
0, &clk_gate_lock);
if (IS_ERR(hw_data->hws[i]))
return dev_err_probe(dev, PTR_ERR(hw_data->hws[i]),
"failed to register %s clock\n",
- clk_gate_desc[idx].name);
+ desc->name);
}
return 0;
@@ -211,13 +251,18 @@ static int lan966x_gate_clk_register(struct device *dev,
static int lan966x_clk_probe(struct platform_device *pdev)
{
+ const struct lan966x_match_data *data;
struct clk_hw_onecell_data *hw_data;
struct device *dev = &pdev->dev;
void __iomem *gate_base;
struct resource *res;
int i, ret;
- hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, N_CLOCKS),
+ data = device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, data->num_total_clks),
GFP_KERNEL);
if (!hw_data)
return -ENOMEM;
@@ -228,10 +273,10 @@ static int lan966x_clk_probe(struct platform_device *pdev)
init.ops = &lan966x_gck_ops;
- hw_data->num = GCK_GATE_UHPHS;
+ hw_data->num = data->num_generic_clks;
- for (i = 0; i < GCK_GATE_UHPHS; i++) {
- init.name = clk_names[i];
+ for (i = 0; i < data->num_generic_clks; i++) {
+ init.name = data->clk_name[i];
hw_data->hws[i] = lan966x_gck_clk_register(dev, i);
if (IS_ERR(hw_data->hws[i])) {
dev_err(dev, "failed to register %s clock\n",
@@ -246,9 +291,9 @@ static int lan966x_clk_probe(struct platform_device *pdev)
if (IS_ERR(gate_base))
return PTR_ERR(gate_base);
- hw_data->num = N_CLOCKS;
+ hw_data->num = data->num_total_clks;
- ret = lan966x_gate_clk_register(dev, hw_data, gate_base);
+ ret = lan966x_gate_clk_register(dev, data, hw_data, gate_base);
if (ret)
return ret;
}
@@ -257,7 +302,8 @@ static int lan966x_clk_probe(struct platform_device *pdev)
}
static const struct of_device_id lan966x_clk_dt_ids[] = {
- { .compatible = "microchip,lan966x-gck", },
+ { .compatible = "microchip,lan966x-gck", .data = &lan966x_desc },
+ { .compatible = "microchip,lan9691-gck", .data = &lan969x_desc },
{ }
};
MODULE_DEVICE_TABLE(of, lan966x_clk_dt_ids);
diff --git a/drivers/clk/clk-lmk04832.c b/drivers/clk/clk-lmk04832.c
index c997e7491996..2bcf422f0b04 100644
--- a/drivers/clk/clk-lmk04832.c
+++ b/drivers/clk/clk-lmk04832.c
@@ -375,7 +375,7 @@ static unsigned long lmk04832_vco_recalc_rate(struct clk_hw *hw,
unsigned long prate)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, vco);
- const unsigned int pll2_p[] = {8, 2, 2, 3, 4, 5, 6, 7};
+ static const unsigned int pll2_p[] = {8, 2, 2, 3, 4, 5, 6, 7};
unsigned int pll2_n, p, pll2_r;
unsigned int pll2_misc;
unsigned long vco_rate;
@@ -637,7 +637,7 @@ static int lmk04832_register_vco(struct lmk04832 *lmk)
static int lmk04832_clkout_set_ddly(struct lmk04832 *lmk, int id)
{
- const int dclk_div_adj[] = {0, 0, -2, -2, 0, 3, -1, 0};
+ static const int dclk_div_adj[] = {0, 0, -2, -2, 0, 3, -1, 0};
unsigned int sclkx_y_ddly = 10;
unsigned int dclkx_y_ddly;
unsigned int dclkx_y_div;
diff --git a/drivers/clk/clk-loongson2.c b/drivers/clk/clk-loongson2.c
index 820bb1e9e3b7..27e632edd484 100644
--- a/drivers/clk/clk-loongson2.c
+++ b/drivers/clk/clk-loongson2.c
@@ -29,8 +29,10 @@ enum loongson2_clk_type {
struct loongson2_clk_provider {
void __iomem *base;
struct device *dev;
- struct clk_hw_onecell_data clk_data;
spinlock_t clk_lock; /* protect access to DIV registers */
+
+ /* Must be last --ends in a flexible-array member. */
+ struct clk_hw_onecell_data clk_data;
};
struct loongson2_clk_data {
@@ -292,7 +294,7 @@ static int loongson2_clk_probe(struct platform_device *pdev)
return -EINVAL;
for (p = data; p->name; p++)
- clks_num++;
+ clks_num = max(clks_num, p->id + 1);
clp = devm_kzalloc(dev, struct_size(clp, clk_data.hws, clks_num),
GFP_KERNEL);
@@ -304,9 +306,12 @@ static int loongson2_clk_probe(struct platform_device *pdev)
return PTR_ERR(clp->base);
spin_lock_init(&clp->clk_lock);
- clp->clk_data.num = clks_num + 1;
+ clp->clk_data.num = clks_num;
clp->dev = dev;
+ /* Avoid returning NULL for unused id */
+ memset_p((void **)clp->clk_data.hws, ERR_PTR(-ENOENT), clks_num);
+
for (i = 0; i < clks_num; i++) {
p = &data[i];
switch (p->type) {
@@ -333,8 +338,8 @@ static int loongson2_clk_probe(struct platform_device *pdev)
&clp->clk_lock);
break;
case CLK_TYPE_FIXED:
- hw = clk_hw_register_fixed_rate_parent_data(dev, p->name, pdata,
- 0, p->fixed_rate);
+ hw = devm_clk_hw_register_fixed_rate_parent_data(dev, p->name, pdata,
+ 0, p->fixed_rate);
break;
default:
return dev_err_probe(dev, -EINVAL, "Invalid clk type\n");
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 06245681dac7..fc0aeb4247f2 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/reboot.h>
/*
@@ -116,9 +117,9 @@ static void __init nomadik_src_init(void)
val = readl(src_base + SRC_XTALCR);
pr_info("SXTALO is %s\n",
- (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled");
+ str_disabled_enabled(val & SRC_XTALCR_SXTALDIS));
pr_info("MXTAL is %s\n",
- (val & SRC_XTALCR_MXTALSTAT) ? "enabled" : "disabled");
+ str_enabled_disabled(val & SRC_XTALCR_MXTALSTAT));
if (of_property_read_bool(np, "disable-sxtalo")) {
/* The machine uses an external oscillator circuit */
val |= SRC_XTALCR_SXTALDIS;
diff --git a/drivers/clk/clk-npcm8xx.c b/drivers/clk/clk-npcm8xx.c
new file mode 100644
index 000000000000..2138c011411d
--- /dev/null
+++ b/drivers/clk/clk-npcm8xx.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton NPCM8xx Clock Generator
+ * All the clocks are initialized by the bootloader, so this driver allows only
+ * reading of current settings directly from the hardware.
+ *
+ * Copyright (C) 2020 Nuvoton Technologies
+ * Author: Tomer Maimon <tomer.maimon@nuvoton.com>
+ */
+
+#define pr_fmt(fmt) "npcm8xx_clk: " fmt
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/nuvoton,npcm845-clk.h>
+#include <soc/nuvoton/clock-npcm8xx.h>
+
+/* npcm8xx clock registers*/
+#define NPCM8XX_CLKSEL 0x04
+#define NPCM8XX_CLKDIV1 0x08
+#define NPCM8XX_CLKDIV2 0x2C
+#define NPCM8XX_CLKDIV3 0x58
+#define NPCM8XX_CLKDIV4 0x7C
+#define NPCM8XX_PLLCON0 0x0C
+#define NPCM8XX_PLLCON1 0x10
+#define NPCM8XX_PLLCON2 0x54
+#define NPCM8XX_PLLCONG 0x60
+#define NPCM8XX_THRTL_CNT 0xC0
+
+#define PLLCON_LOKI BIT(31)
+#define PLLCON_LOKS BIT(30)
+#define PLLCON_FBDV GENMASK(27, 16)
+#define PLLCON_OTDV2 GENMASK(15, 13)
+#define PLLCON_PWDEN BIT(12)
+#define PLLCON_OTDV1 GENMASK(10, 8)
+#define PLLCON_INDV GENMASK(5, 0)
+
+static void __iomem *clk_base;
+
+struct npcm8xx_clk_pll {
+ void __iomem *pllcon;
+ unsigned int id;
+ const char *name;
+ unsigned long flags;
+ struct clk_hw hw;
+};
+
+#define to_npcm8xx_clk_pll(_hw) container_of(_hw, struct npcm8xx_clk_pll, hw)
+
+struct npcm8xx_clk_pll_data {
+ const char *name;
+ struct clk_parent_data parent;
+ unsigned int reg;
+ unsigned long flags;
+ struct clk_hw hw;
+};
+
+struct npcm8xx_clk_div_data {
+ u32 reg;
+ u8 shift;
+ u8 width;
+ const char *name;
+ const struct clk_hw *parent_hw;
+ unsigned long clk_divider_flags;
+ unsigned long flags;
+ int onecell_idx;
+ struct clk_hw hw;
+};
+
+struct npcm8xx_clk_mux_data {
+ u8 shift;
+ u32 mask;
+ const u32 *table;
+ const char *name;
+ const struct clk_parent_data *parent_data;
+ u8 num_parents;
+ unsigned long flags;
+ struct clk_hw hw;
+};
+
+static struct clk_hw hw_pll1_div2, hw_pll2_div2, hw_gfx_div2, hw_pre_clk;
+static struct npcm8xx_clk_pll_data npcm8xx_pll_clks[] = {
+ { "pll0", { .index = 0 }, NPCM8XX_PLLCON0, 0 },
+ { "pll1", { .index = 0 }, NPCM8XX_PLLCON1, 0 },
+ { "pll2", { .index = 0 }, NPCM8XX_PLLCON2, 0 },
+ { "pll_gfx", { .index = 0 }, NPCM8XX_PLLCONG, 0 },
+};
+
+static const u32 cpuck_mux_table[] = { 0, 1, 2, 7 };
+static const struct clk_parent_data cpuck_mux_parents[] = {
+ { .hw = &npcm8xx_pll_clks[0].hw },
+ { .hw = &npcm8xx_pll_clks[1].hw },
+ { .index = 0 },
+ { .hw = &npcm8xx_pll_clks[2].hw }
+};
+
+static const u32 pixcksel_mux_table[] = { 0, 2 };
+static const struct clk_parent_data pixcksel_mux_parents[] = {
+ { .hw = &npcm8xx_pll_clks[3].hw },
+ { .index = 0 }
+};
+
+static const u32 default_mux_table[] = { 0, 1, 2, 3 };
+static const struct clk_parent_data default_mux_parents[] = {
+ { .hw = &npcm8xx_pll_clks[0].hw },
+ { .hw = &npcm8xx_pll_clks[1].hw },
+ { .index = 0 },
+ { .hw = &hw_pll2_div2 }
+};
+
+static const u32 sucksel_mux_table[] = { 2, 3 };
+static const struct clk_parent_data sucksel_mux_parents[] = {
+ { .index = 0 },
+ { .hw = &hw_pll2_div2 }
+};
+
+static const u32 mccksel_mux_table[] = { 0, 2 };
+static const struct clk_parent_data mccksel_mux_parents[] = {
+ { .hw = &hw_pll1_div2 },
+ { .index = 0 }
+};
+
+static const u32 clkoutsel_mux_table[] = { 0, 1, 2, 3, 4 };
+static const struct clk_parent_data clkoutsel_mux_parents[] = {
+ { .hw = &npcm8xx_pll_clks[0].hw },
+ { .hw = &npcm8xx_pll_clks[1].hw },
+ { .index = 0 },
+ { .hw = &hw_gfx_div2 },
+ { .hw = &hw_pll2_div2 }
+};
+
+static const u32 gfxmsel_mux_table[] = { 2, 3 };
+static const struct clk_parent_data gfxmsel_mux_parents[] = {
+ { .index = 0 },
+ { .hw = &npcm8xx_pll_clks[2].hw }
+};
+
+static const u32 dvcssel_mux_table[] = { 2, 3 };
+static const struct clk_parent_data dvcssel_mux_parents[] = {
+ { .index = 0 },
+ { .hw = &npcm8xx_pll_clks[2].hw }
+};
+
+static const u32 default3_mux_table[] = { 0, 1, 2 };
+static const struct clk_parent_data default3_mux_parents[] = {
+ { .hw = &npcm8xx_pll_clks[0].hw },
+ { .hw = &npcm8xx_pll_clks[1].hw },
+ { .index = 0 }
+};
+
+static struct npcm8xx_clk_mux_data npcm8xx_muxes[] = {
+ { 0, 3, cpuck_mux_table, "cpu_mux", cpuck_mux_parents,
+ ARRAY_SIZE(cpuck_mux_parents), CLK_IS_CRITICAL },
+ { 4, 2, pixcksel_mux_table, "gfx_pixel_mux", pixcksel_mux_parents,
+ ARRAY_SIZE(pixcksel_mux_parents), 0 },
+ { 6, 2, default_mux_table, "sd_mux", default_mux_parents,
+ ARRAY_SIZE(default_mux_parents), 0 },
+ { 8, 2, default_mux_table, "uart_mux", default_mux_parents,
+ ARRAY_SIZE(default_mux_parents), 0 },
+ { 10, 2, sucksel_mux_table, "serial_usb_mux", sucksel_mux_parents,
+ ARRAY_SIZE(sucksel_mux_parents), 0 },
+ { 12, 2, mccksel_mux_table, "mc_mux", mccksel_mux_parents,
+ ARRAY_SIZE(mccksel_mux_parents), 0 },
+ { 14, 2, default_mux_table, "adc_mux", default_mux_parents,
+ ARRAY_SIZE(default_mux_parents), 0 },
+ { 16, 2, default_mux_table, "gfx_mux", default_mux_parents,
+ ARRAY_SIZE(default_mux_parents), 0 },
+ { 18, 3, clkoutsel_mux_table, "clkout_mux", clkoutsel_mux_parents,
+ ARRAY_SIZE(clkoutsel_mux_parents), 0 },
+ { 21, 2, gfxmsel_mux_table, "gfxm_mux", gfxmsel_mux_parents,
+ ARRAY_SIZE(gfxmsel_mux_parents), 0 },
+ { 23, 2, dvcssel_mux_table, "dvc_mux", dvcssel_mux_parents,
+ ARRAY_SIZE(dvcssel_mux_parents), 0 },
+ { 25, 2, default3_mux_table, "rg_mux", default3_mux_parents,
+ ARRAY_SIZE(default3_mux_parents), 0 },
+ { 27, 2, default3_mux_table, "rcp_mux", default3_mux_parents,
+ ARRAY_SIZE(default3_mux_parents), 0 },
+};
+
+/* configurable pre dividers: */
+static struct npcm8xx_clk_div_data npcm8xx_pre_divs[] = {
+ { NPCM8XX_CLKDIV1, 21, 5, "pre_adc", &npcm8xx_muxes[6].hw, CLK_DIVIDER_READ_ONLY, 0, -1 },
+ { NPCM8XX_CLKDIV1, 26, 2, "ahb", &hw_pre_clk, CLK_DIVIDER_READ_ONLY, CLK_IS_CRITICAL, NPCM8XX_CLK_AHB },
+};
+
+/* configurable dividers: */
+static struct npcm8xx_clk_div_data npcm8xx_divs[] = {
+ { NPCM8XX_CLKDIV1, 28, 3, "adc", &npcm8xx_pre_divs[0].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_ADC },
+ { NPCM8XX_CLKDIV1, 16, 5, "uart", &npcm8xx_muxes[3].hw, 0, 0, NPCM8XX_CLK_UART },
+ { NPCM8XX_CLKDIV1, 11, 5, "mmc", &npcm8xx_muxes[2].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_MMC },
+ { NPCM8XX_CLKDIV1, 6, 5, "spi3", &npcm8xx_pre_divs[1].hw, 0, 0, NPCM8XX_CLK_SPI3 },
+ { NPCM8XX_CLKDIV1, 2, 4, "pci", &npcm8xx_muxes[7].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_PCI },
+
+ { NPCM8XX_CLKDIV2, 30, 2, "apb4", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB4 },
+ { NPCM8XX_CLKDIV2, 28, 2, "apb3", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB3 },
+ { NPCM8XX_CLKDIV2, 26, 2, "apb2", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB2 },
+ { NPCM8XX_CLKDIV2, 24, 2, "apb1", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB1 },
+ { NPCM8XX_CLKDIV2, 22, 2, "apb5", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_APB5 },
+ { NPCM8XX_CLKDIV2, 16, 5, "clkout", &npcm8xx_muxes[8].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_CLKOUT },
+ { NPCM8XX_CLKDIV2, 13, 3, "gfx", &npcm8xx_muxes[7].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_GFX },
+ { NPCM8XX_CLKDIV2, 8, 5, "usb_bridge", &npcm8xx_muxes[4].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SU },
+ { NPCM8XX_CLKDIV2, 4, 4, "usb_host", &npcm8xx_muxes[4].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SU48 },
+ { NPCM8XX_CLKDIV2, 0, 4, "sdhc", &npcm8xx_muxes[2].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SDHC },
+
+ { NPCM8XX_CLKDIV3, 16, 8, "spi1", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SPI1 },
+ { NPCM8XX_CLKDIV3, 11, 5, "uart2", &npcm8xx_muxes[3].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_UART2 },
+ { NPCM8XX_CLKDIV3, 6, 5, "spi0", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SPI0 },
+ { NPCM8XX_CLKDIV3, 1, 5, "spix", &npcm8xx_pre_divs[1].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_SPIX },
+
+ { NPCM8XX_CLKDIV4, 28, 4, "rg", &npcm8xx_muxes[11].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_RG },
+ { NPCM8XX_CLKDIV4, 12, 4, "rcp", &npcm8xx_muxes[12].hw, CLK_DIVIDER_READ_ONLY, 0, NPCM8XX_CLK_RCP },
+
+ { NPCM8XX_THRTL_CNT, 0, 2, "th", &npcm8xx_muxes[0].hw, CLK_DIVIDER_READ_ONLY | CLK_DIVIDER_POWER_OF_TWO, 0, NPCM8XX_CLK_TH },
+};
+
+static unsigned long npcm8xx_clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct npcm8xx_clk_pll *pll = to_npcm8xx_clk_pll(hw);
+ unsigned long fbdv, indv, otdv1, otdv2;
+ unsigned int val;
+ u64 ret;
+
+ if (parent_rate == 0) {
+ pr_debug("%s: parent rate is zero\n", __func__);
+ return 0;
+ }
+
+ val = readl_relaxed(pll->pllcon);
+
+ indv = FIELD_GET(PLLCON_INDV, val);
+ fbdv = FIELD_GET(PLLCON_FBDV, val);
+ otdv1 = FIELD_GET(PLLCON_OTDV1, val);
+ otdv2 = FIELD_GET(PLLCON_OTDV2, val);
+
+ ret = (u64)parent_rate * fbdv;
+ do_div(ret, indv * otdv1 * otdv2);
+
+ return ret;
+}
+
+static const struct clk_ops npcm8xx_clk_pll_ops = {
+ .recalc_rate = npcm8xx_clk_pll_recalc_rate,
+};
+
+static struct clk_hw *
+npcm8xx_clk_register_pll(struct device *dev, void __iomem *pllcon,
+ const char *name, const struct clk_parent_data *parent,
+ unsigned long flags)
+{
+ struct npcm8xx_clk_pll *pll;
+ struct clk_init_data init = {};
+ int ret;
+
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &npcm8xx_clk_pll_ops;
+ init.parent_data = parent;
+ init.num_parents = 1;
+ init.flags = flags;
+
+ pll->pllcon = pllcon;
+ pll->hw.init = &init;
+
+ ret = devm_clk_hw_register(dev, &pll->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &pll->hw;
+}
+
+static DEFINE_SPINLOCK(npcm8xx_clk_lock);
+
+static int npcm8xx_clk_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct npcm_clock_adev *rdev = to_npcm_clock_adev(adev);
+ struct clk_hw_onecell_data *npcm8xx_clk_data;
+ struct device *dev = &adev->dev;
+ struct clk_hw *hw;
+ unsigned int i;
+
+ npcm8xx_clk_data = devm_kzalloc(dev, struct_size(npcm8xx_clk_data, hws,
+ NPCM8XX_NUM_CLOCKS),
+ GFP_KERNEL);
+ if (!npcm8xx_clk_data)
+ return -ENOMEM;
+
+ clk_base = rdev->base;
+
+ npcm8xx_clk_data->num = NPCM8XX_NUM_CLOCKS;
+
+ for (i = 0; i < NPCM8XX_NUM_CLOCKS; i++)
+ npcm8xx_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
+
+ /* Register plls */
+ for (i = 0; i < ARRAY_SIZE(npcm8xx_pll_clks); i++) {
+ struct npcm8xx_clk_pll_data *pll_clk = &npcm8xx_pll_clks[i];
+
+ hw = npcm8xx_clk_register_pll(dev, clk_base + pll_clk->reg,
+ pll_clk->name, &pll_clk->parent,
+ pll_clk->flags);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register pll\n");
+ pll_clk->hw = *hw;
+ }
+
+ /* Register fixed dividers */
+ hw = devm_clk_hw_register_fixed_factor(dev, "pll1_div2", "pll1", 0, 1, 2);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register fixed div\n");
+ hw_pll1_div2 = *hw;
+
+ hw = devm_clk_hw_register_fixed_factor(dev, "pll2_div2", "pll2", 0, 1, 2);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register pll2 div2\n");
+ hw_pll2_div2 = *hw;
+
+ hw = devm_clk_hw_register_fixed_factor(dev, "pll_gfx_div2", "pll_gfx", 0, 1, 2);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register gfx div2\n");
+ hw_gfx_div2 = *hw;
+
+ /* Register muxes */
+ for (i = 0; i < ARRAY_SIZE(npcm8xx_muxes); i++) {
+ struct npcm8xx_clk_mux_data *mux_data = &npcm8xx_muxes[i];
+
+ hw = devm_clk_hw_register_mux_parent_data_table(dev,
+ mux_data->name,
+ mux_data->parent_data,
+ mux_data->num_parents,
+ mux_data->flags,
+ clk_base + NPCM8XX_CLKSEL,
+ mux_data->shift,
+ mux_data->mask,
+ 0,
+ mux_data->table,
+ &npcm8xx_clk_lock);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register mux\n");
+ mux_data->hw = *hw;
+ }
+
+ hw = devm_clk_hw_register_fixed_factor(dev, "pre_clk", "cpu_mux", 0, 1, 2);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register pre clk div2\n");
+ hw_pre_clk = *hw;
+
+ hw = devm_clk_hw_register_fixed_factor(dev, "axi", "th", 0, 1, 2);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register axi div2\n");
+ npcm8xx_clk_data->hws[NPCM8XX_CLK_AXI] = hw;
+
+ hw = devm_clk_hw_register_fixed_factor(dev, "atb", "axi", 0, 1, 2);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register atb div2\n");
+ npcm8xx_clk_data->hws[NPCM8XX_CLK_ATB] = hw;
+
+ /* Register pre dividers */
+ for (i = 0; i < ARRAY_SIZE(npcm8xx_pre_divs); i++) {
+ struct npcm8xx_clk_div_data *div_data = &npcm8xx_pre_divs[i];
+
+ hw = devm_clk_hw_register_divider_parent_hw(dev, div_data->name,
+ div_data->parent_hw,
+ div_data->flags,
+ clk_base + div_data->reg,
+ div_data->shift,
+ div_data->width,
+ div_data->clk_divider_flags,
+ &npcm8xx_clk_lock);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register pre div\n");
+ div_data->hw = *hw;
+
+ if (div_data->onecell_idx >= 0)
+ npcm8xx_clk_data->hws[div_data->onecell_idx] = hw;
+ }
+
+ /* Register dividers */
+ for (i = 0; i < ARRAY_SIZE(npcm8xx_divs); i++) {
+ struct npcm8xx_clk_div_data *div_data = &npcm8xx_divs[i];
+
+ hw = devm_clk_hw_register_divider_parent_hw(dev, div_data->name,
+ div_data->parent_hw,
+ div_data->flags,
+ clk_base + div_data->reg,
+ div_data->shift,
+ div_data->width,
+ div_data->clk_divider_flags,
+ &npcm8xx_clk_lock);
+ if (IS_ERR(hw))
+ return dev_err_probe(dev, PTR_ERR(hw), "Can't register div\n");
+
+ if (div_data->onecell_idx >= 0)
+ npcm8xx_clk_data->hws[div_data->onecell_idx] = hw;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ npcm8xx_clk_data);
+}
+
+static const struct auxiliary_device_id npcm8xx_clock_ids[] = {
+ {
+ .name = "reset_npcm.clk-npcm8xx",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, npcm8xx_clock_ids);
+
+static struct auxiliary_driver npcm8xx_clock_driver = {
+ .probe = npcm8xx_clk_probe,
+ .id_table = npcm8xx_clock_ids,
+};
+module_auxiliary_driver(npcm8xx_clock_driver);
+
+MODULE_DESCRIPTION("Clock driver for Nuvoton NPCM8XX BMC SoC");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 4dcde305944c..a560edeb4b55 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
@@ -1065,11 +1066,8 @@ static void __init _clockgen_init(struct device_node *np, bool legacy);
static void __init legacy_init_clockgen(struct device_node *np)
{
if (!clockgen.node) {
- struct device_node *parent_np;
-
- parent_np = of_get_parent(np);
+ struct device_node *parent_np __free(device_node) = of_get_parent(np);
_clockgen_init(parent_np, true);
- of_node_put(parent_np);
}
}
diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c
index 6ee148e5469d..1127c35ce57d 100644
--- a/drivers/clk/clk-si514.c
+++ b/drivers/clk/clk-si514.c
@@ -371,7 +371,7 @@ static int si514_probe(struct i2c_client *client)
}
static const struct i2c_device_id si514_id[] = {
- { "si514", 0 },
+ { "si514" },
{ }
};
MODULE_DEVICE_TABLE(i2c, si514_id);
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 07c13ebe327d..f476883bc93b 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -5,6 +5,7 @@
* Inspired by clk-asm9260.c .
*/
+#include <linux/bitfield.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -34,11 +35,20 @@
#define STM32F4_RCC_APB2ENR 0x44
#define STM32F4_RCC_BDCR 0x70
#define STM32F4_RCC_CSR 0x74
+#define STM32F4_RCC_SSCGR 0x80
#define STM32F4_RCC_PLLI2SCFGR 0x84
#define STM32F4_RCC_PLLSAICFGR 0x88
#define STM32F4_RCC_DCKCFGR 0x8c
#define STM32F7_RCC_DCKCFGR2 0x90
+#define STM32F4_RCC_PLLCFGR_N_MASK GENMASK(14, 6)
+
+#define STM32F4_RCC_SSCGR_SSCGEN BIT(31)
+#define STM32F4_RCC_SSCGR_SPREADSEL BIT(30)
+#define STM32F4_RCC_SSCGR_RESERVED_MASK GENMASK(29, 28)
+#define STM32F4_RCC_SSCGR_INCSTEP_MASK GENMASK(27, 13)
+#define STM32F4_RCC_SSCGR_MODPER_MASK GENMASK(12, 0)
+
#define NONE -1
#define NO_IDX NONE
#define NO_MUX NONE
@@ -364,6 +374,16 @@ static const struct stm32f4_gate_data stm32f769_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 30, "mdio", "apb2_div" },
};
+enum stm32f4_pll_ssc_mod_type {
+ STM32F4_PLL_SSC_CENTER_SPREAD,
+ STM32F4_PLL_SSC_DOWN_SPREAD,
+};
+
+static const char * const stm32f4_ssc_mod_methods[] __initconst = {
+ [STM32F4_PLL_SSC_DOWN_SPREAD] = "down-spread",
+ [STM32F4_PLL_SSC_CENTER_SPREAD] = "center-spread",
+};
+
/*
* This bitmask tells us which bit offsets (0..192) on STM32F4[23]xxx
* have gate bits associated with them. Its combined hweight is 71.
@@ -509,6 +529,12 @@ static const struct clk_div_table pll_divr_table[] = {
{ 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, { 0 }
};
+struct stm32f4_pll_ssc {
+ unsigned int mod_freq;
+ unsigned int mod_depth;
+ enum stm32f4_pll_ssc_mod_type mod_type;
+};
+
struct stm32f4_pll {
spinlock_t *lock;
struct clk_gate gate;
@@ -516,6 +542,8 @@ struct stm32f4_pll {
u8 bit_rdy_idx;
u8 status;
u8 n_start;
+ bool ssc_enable;
+ struct stm32f4_pll_ssc ssc_conf;
};
#define to_stm32f4_pll(_gate) container_of(_gate, struct stm32f4_pll, gate)
@@ -538,6 +566,7 @@ struct stm32f4_vco_data {
u8 offset;
u8 bit_idx;
u8 bit_rdy_idx;
+ bool sscg;
};
static const struct stm32f4_vco_data vco_data[] = {
@@ -632,9 +661,11 @@ static unsigned long stm32f4_pll_recalc(struct clk_hw *hw,
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32f4_pll *pll = to_stm32f4_pll(gate);
+ unsigned long val;
unsigned long n;
- n = (readl(base + pll->offset) >> 6) & 0x1ff;
+ val = readl(base + pll->offset);
+ n = FIELD_GET(STM32F4_RCC_PLLCFGR_N_MASK, val);
return parent_rate * n;
}
@@ -656,6 +687,32 @@ static long stm32f4_pll_round_rate(struct clk_hw *hw, unsigned long rate,
return *prate * n;
}
+static void stm32f4_pll_set_ssc(struct clk_hw *hw, unsigned long parent_rate,
+ unsigned int ndiv)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32f4_pll *pll = to_stm32f4_pll(gate);
+ struct stm32f4_pll_ssc *ssc = &pll->ssc_conf;
+ u32 modeper, incstep;
+ u32 sscgr;
+
+ sscgr = readl(base + STM32F4_RCC_SSCGR);
+ /* reserved field must be kept at reset value */
+ sscgr &= STM32F4_RCC_SSCGR_RESERVED_MASK;
+
+ modeper = DIV_ROUND_CLOSEST(parent_rate, 4 * ssc->mod_freq);
+ incstep = DIV_ROUND_CLOSEST(((1 << 15) - 1) * ssc->mod_depth * ndiv,
+ 5 * 10000 * modeper);
+ sscgr |= STM32F4_RCC_SSCGR_SSCGEN |
+ FIELD_PREP(STM32F4_RCC_SSCGR_INCSTEP_MASK, incstep) |
+ FIELD_PREP(STM32F4_RCC_SSCGR_MODPER_MASK, modeper);
+
+ if (ssc->mod_type)
+ sscgr |= STM32F4_RCC_SSCGR_SPREADSEL;
+
+ writel(sscgr, base + STM32F4_RCC_SSCGR);
+}
+
static int stm32f4_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -673,9 +730,13 @@ static int stm32f4_pll_set_rate(struct clk_hw *hw, unsigned long rate,
n = rate / parent_rate;
- val = readl(base + pll->offset) & ~(0x1ff << 6);
+ val = readl(base + pll->offset) & ~STM32F4_RCC_PLLCFGR_N_MASK;
+ val |= FIELD_PREP(STM32F4_RCC_PLLCFGR_N_MASK, n);
+
+ writel(val, base + pll->offset);
- writel(val | ((n & 0x1ff) << 6), base + pll->offset);
+ if (pll->ssc_enable)
+ stm32f4_pll_set_ssc(hw, parent_rate, n);
if (pll_state)
stm32f4_pll_enable(hw);
@@ -782,6 +843,84 @@ static struct clk_hw *clk_register_pll_div(const char *name,
return hw;
}
+static int __init stm32f4_pll_init_ssc(struct clk_hw *hw,
+ const struct stm32f4_pll_ssc *conf)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct stm32f4_pll *pll = to_stm32f4_pll(gate);
+ struct clk_hw *parent;
+ unsigned long parent_rate;
+ int pll_state;
+ unsigned long n, val;
+
+ parent = clk_hw_get_parent(hw);
+ if (!parent) {
+ pr_err("%s: failed to get clock parent\n", __func__);
+ return -ENODEV;
+ }
+
+ parent_rate = clk_hw_get_rate(parent);
+
+ pll->ssc_enable = true;
+ memcpy(&pll->ssc_conf, conf, sizeof(pll->ssc_conf));
+
+ pll_state = stm32f4_pll_is_enabled(hw);
+
+ if (pll_state)
+ stm32f4_pll_disable(hw);
+
+ val = readl(base + pll->offset);
+ n = FIELD_GET(STM32F4_RCC_PLLCFGR_N_MASK, val);
+
+ pr_debug("%s: pll: %s, parent: %s, parent-rate: %lu, n: %lu\n",
+ __func__, clk_hw_get_name(hw), clk_hw_get_name(parent),
+ parent_rate, n);
+
+ stm32f4_pll_set_ssc(hw, parent_rate, n);
+
+ if (pll_state)
+ stm32f4_pll_enable(hw);
+
+ return 0;
+}
+
+static int __init stm32f4_pll_ssc_parse_dt(struct device_node *np,
+ struct stm32f4_pll_ssc *conf)
+{
+ int ret;
+ const char *s;
+
+ if (!conf)
+ return -EINVAL;
+
+ ret = of_property_read_u32(np, "st,ssc-modfreq-hz", &conf->mod_freq);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(np, "st,ssc-moddepth-permyriad",
+ &conf->mod_depth);
+ if (ret) {
+ pr_err("%pOF: missing st,ssc-moddepth-permyriad\n", np);
+ return ret;
+ }
+
+ ret = fwnode_property_match_property_string(of_fwnode_handle(np),
+ "st,ssc-modmethod",
+ stm32f4_ssc_mod_methods,
+ ARRAY_SIZE(stm32f4_ssc_mod_methods));
+ if (ret < 0) {
+ pr_err("%pOF: failed to get st,ssc-modmethod\n", np);
+ return ret;
+ }
+
+ conf->mod_type = ret;
+
+ pr_debug("%pOF: SSCG settings: mod_freq: %d, mod_depth: %d mod_method: %s [%d]\n",
+ np, conf->mod_freq, conf->mod_depth, s, conf->mod_type);
+
+ return 0;
+}
+
static struct clk_hw *stm32f4_rcc_register_pll(const char *pllsrc,
const struct stm32f4_pll_data *data, spinlock_t *lock)
{
@@ -1689,7 +1828,8 @@ static void __init stm32f4_rcc_init(struct device_node *np)
const struct of_device_id *match;
const struct stm32f4_clk_data *data;
unsigned long pllm;
- struct clk_hw *pll_src_hw;
+ struct clk_hw *pll_src_hw, *pll_vco_hw;
+ struct stm32f4_pll_ssc ssc_conf;
base = of_iomap(np, 0);
if (!base) {
@@ -1748,8 +1888,8 @@ static void __init stm32f4_rcc_init(struct device_node *np)
clk_hw_register_fixed_factor(NULL, "vco_in", pll_src,
0, 1, pllm);
- stm32f4_rcc_register_pll("vco_in", &data->pll_data[0],
- &stm32f4_clk_lock);
+ pll_vco_hw = stm32f4_rcc_register_pll("vco_in", &data->pll_data[0],
+ &stm32f4_clk_lock);
clks[PLL_VCO_I2S] = stm32f4_rcc_register_pll("vco_in",
&data->pll_data[1], &stm32f4_clk_lock);
@@ -1894,6 +2034,9 @@ static void __init stm32f4_rcc_init(struct device_node *np)
of_clk_add_hw_provider(np, stm32f4_rcc_lookup_clk, NULL);
+ if (!stm32f4_pll_ssc_parse_dt(np, &ssc_conf))
+ stm32f4_pll_init_ssc(pll_vco_hw, &ssc_conf);
+
return;
fail:
kfree(clks);
diff --git a/drivers/clk/clk-twl.c b/drivers/clk/clk-twl.c
index eab9d3c8ed8a..20bc3bf8fd62 100644
--- a/drivers/clk/clk-twl.c
+++ b/drivers/clk/clk-twl.c
@@ -11,13 +11,29 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-#define VREG_STATE 2
+#define VREG_STATE 2
+#define VREG_GRP 0
#define TWL6030_CFG_STATE_OFF 0x00
#define TWL6030_CFG_STATE_ON 0x01
#define TWL6030_CFG_STATE_MASK 0x03
+#define TWL6030_CFG_STATE_GRP_SHIFT 5
+#define TWL6030_CFG_STATE_APP_SHIFT 2
+#define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
+#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
+ TWL6030_CFG_STATE_APP_SHIFT)
+#define P1_GRP BIT(0) /* processor power group */
+#define P2_GRP BIT(1)
+#define P3_GRP BIT(2)
+#define ALL_GRP (P1_GRP | P2_GRP | P3_GRP)
+
+enum twl_type {
+ TWL_TYPE_6030,
+ TWL_TYPE_6032,
+};
struct twl_clock_info {
struct device *dev;
+ enum twl_type type;
u8 base;
struct clk_hw hw;
};
@@ -56,14 +72,21 @@ static unsigned long twl_clks_recalc_rate(struct clk_hw *hw,
static int twl6032_clks_prepare(struct clk_hw *hw)
{
struct twl_clock_info *cinfo = to_twl_clks_info(hw);
- int ret;
- ret = twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE,
- TWL6030_CFG_STATE_ON);
- if (ret < 0)
- dev_err(cinfo->dev, "clk prepare failed\n");
+ if (cinfo->type == TWL_TYPE_6030) {
+ int grp;
+
+ grp = twlclk_read(cinfo, TWL_MODULE_PM_RECEIVER, VREG_GRP);
+ if (grp < 0)
+ return grp;
- return ret;
+ return twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+ grp << TWL6030_CFG_STATE_GRP_SHIFT |
+ TWL6030_CFG_STATE_ON);
+ }
+
+ return twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+ TWL6030_CFG_STATE_ON);
}
static void twl6032_clks_unprepare(struct clk_hw *hw)
@@ -71,32 +94,21 @@ static void twl6032_clks_unprepare(struct clk_hw *hw)
struct twl_clock_info *cinfo = to_twl_clks_info(hw);
int ret;
- ret = twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE,
- TWL6030_CFG_STATE_OFF);
+ if (cinfo->type == TWL_TYPE_6030)
+ ret = twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+ ALL_GRP << TWL6030_CFG_STATE_GRP_SHIFT |
+ TWL6030_CFG_STATE_OFF);
+ else
+ ret = twlclk_write(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+ TWL6030_CFG_STATE_OFF);
+
if (ret < 0)
dev_err(cinfo->dev, "clk unprepare failed\n");
}
-static int twl6032_clks_is_prepared(struct clk_hw *hw)
-{
- struct twl_clock_info *cinfo = to_twl_clks_info(hw);
- int val;
-
- val = twlclk_read(cinfo, TWL_MODULE_PM_RECEIVER, VREG_STATE);
- if (val < 0) {
- dev_err(cinfo->dev, "clk read failed\n");
- return val;
- }
-
- val &= TWL6030_CFG_STATE_MASK;
-
- return val == TWL6030_CFG_STATE_ON;
-}
-
static const struct clk_ops twl6032_clks_ops = {
.prepare = twl6032_clks_prepare,
.unprepare = twl6032_clks_unprepare,
- .is_prepared = twl6032_clks_is_prepared,
.recalc_rate = twl_clks_recalc_rate,
};
@@ -155,6 +167,7 @@ static int twl_clks_probe(struct platform_device *pdev)
for (i = 0; i < count; i++) {
cinfo[i].base = hw_data[i].base;
cinfo[i].dev = &pdev->dev;
+ cinfo[i].type = platform_get_device_id(pdev)->driver_data;
cinfo[i].hw.init = &hw_data[i].init;
ret = devm_clk_hw_register(&pdev->dev, &cinfo[i].hw);
if (ret) {
@@ -176,7 +189,11 @@ static int twl_clks_probe(struct platform_device *pdev)
static const struct platform_device_id twl_clks_id[] = {
{
+ .name = "twl6030-clk",
+ .driver_data = TWL_TYPE_6030,
+ }, {
.name = "twl6032-clk",
+ .driver_data = TWL_TYPE_6032,
}, {
/* sentinel */
}
diff --git a/drivers/clk/clk-versaclock3.c b/drivers/clk/clk-versaclock3.c
index 76d7ea1964c3..9fe27dace111 100644
--- a/drivers/clk/clk-versaclock3.c
+++ b/drivers/clk/clk-versaclock3.c
@@ -78,9 +78,6 @@
#define VC3_PLL1_VCO_MIN 300000000UL
#define VC3_PLL1_VCO_MAX 600000000UL
-#define VC3_PLL2_VCO_MIN 400000000UL
-#define VC3_PLL2_VCO_MAX 1200000000UL
-
#define VC3_PLL3_VCO_MIN 300000000UL
#define VC3_PLL3_VCO_MAX 800000000UL
@@ -147,9 +144,13 @@ struct vc3_pfd_data {
u8 mdiv2_bitmsk;
};
+struct vc3_vco {
+ unsigned long min;
+ unsigned long max;
+};
+
struct vc3_pll_data {
- unsigned long vco_min;
- unsigned long vco_max;
+ struct vc3_vco vco;
u8 num;
u8 int_div_msb_offs;
u8 int_div_lsb_offs;
@@ -166,12 +167,17 @@ struct vc3_div_data {
struct vc3_hw_data {
struct clk_hw hw;
struct regmap *regmap;
- const void *data;
+ void *data;
u32 div_int;
u32 div_frc;
};
+struct vc3_hw_cfg {
+ struct vc3_vco pll2_vco;
+ u32 se2_clk_sel_msk;
+};
+
static const struct clk_div_table div1_divs[] = {
{ .val = 0, .div = 1, }, { .val = 1, .div = 4, },
{ .val = 2, .div = 5, }, { .val = 3, .div = 6, },
@@ -386,10 +392,10 @@ static long vc3_pll_round_rate(struct clk_hw *hw, unsigned long rate,
const struct vc3_pll_data *pll = vc3->data;
u64 div_frc;
- if (rate < pll->vco_min)
- rate = pll->vco_min;
- if (rate > pll->vco_max)
- rate = pll->vco_max;
+ if (rate < pll->vco.min)
+ rate = pll->vco.min;
+ if (rate > pll->vco.max)
+ rate = pll->vco.max;
vc3->div_int = rate / *parent_rate;
@@ -680,8 +686,10 @@ static struct vc3_hw_data clk_pll[] = {
.num = VC3_PLL1,
.int_div_msb_offs = VC3_PLL1_LOOP_FILTER_N_DIV_MSB,
.int_div_lsb_offs = VC3_PLL1_VCO_N_DIVIDER,
- .vco_min = VC3_PLL1_VCO_MIN,
- .vco_max = VC3_PLL1_VCO_MAX
+ .vco = {
+ .min = VC3_PLL1_VCO_MIN,
+ .max = VC3_PLL1_VCO_MAX
+ }
},
.hw.init = &(struct clk_init_data) {
.name = "pll1",
@@ -698,8 +706,6 @@ static struct vc3_hw_data clk_pll[] = {
.num = VC3_PLL2,
.int_div_msb_offs = VC3_PLL2_FB_INT_DIV_MSB,
.int_div_lsb_offs = VC3_PLL2_FB_INT_DIV_LSB,
- .vco_min = VC3_PLL2_VCO_MIN,
- .vco_max = VC3_PLL2_VCO_MAX
},
.hw.init = &(struct clk_init_data) {
.name = "pll2",
@@ -716,8 +722,10 @@ static struct vc3_hw_data clk_pll[] = {
.num = VC3_PLL3,
.int_div_msb_offs = VC3_PLL3_LOOP_FILTER_N_DIV_MSB,
.int_div_lsb_offs = VC3_PLL3_N_DIVIDER,
- .vco_min = VC3_PLL3_VCO_MIN,
- .vco_max = VC3_PLL3_VCO_MAX
+ .vco = {
+ .min = VC3_PLL3_VCO_MIN,
+ .max = VC3_PLL3_VCO_MAX
+ }
},
.hw.init = &(struct clk_init_data) {
.name = "pll3",
@@ -901,7 +909,6 @@ static struct vc3_hw_data clk_mux[] = {
[VC3_SE2_MUX] = {
.data = &(struct vc3_clk_data) {
.offs = VC3_SE2_CTRL_REG0,
- .bitmsk = VC3_SE2_CTRL_REG0_SE2_CLK_SEL
},
.hw.init = &(struct clk_init_data) {
.name = "se2_mux",
@@ -982,6 +989,7 @@ static int vc3_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
u8 settings[NUM_CONFIG_REGISTERS];
+ const struct vc3_hw_cfg *data;
struct regmap *regmap;
const char *name;
int ret, i;
@@ -1029,9 +1037,16 @@ static int vc3_probe(struct i2c_client *client)
clk_pfd[i].hw.init->name);
}
+ data = i2c_get_match_data(client);
+
/* Register pll's */
for (i = 0; i < ARRAY_SIZE(clk_pll); i++) {
clk_pll[i].regmap = regmap;
+ if (i == VC3_PLL2) {
+ struct vc3_pll_data *pll_data = clk_pll[i].data;
+
+ pll_data->vco = data->pll2_vco;
+ }
ret = devm_clk_hw_register(dev, &clk_pll[i].hw);
if (ret)
return dev_err_probe(dev, ret, "%s failed\n",
@@ -1059,6 +1074,11 @@ static int vc3_probe(struct i2c_client *client)
/* Register clk muxes */
for (i = 0; i < ARRAY_SIZE(clk_mux); i++) {
clk_mux[i].regmap = regmap;
+ if (i == VC3_SE2_MUX) {
+ struct vc3_clk_data *clk_data = clk_mux[i].data;
+
+ clk_data->bitmsk = data->se2_clk_sel_msk;
+ }
ret = devm_clk_hw_register(dev, &clk_mux[i].hw);
if (ret)
return dev_err_probe(dev, ret, "%s failed\n",
@@ -1108,8 +1128,19 @@ static int vc3_probe(struct i2c_client *client)
return ret;
}
+static const struct vc3_hw_cfg vc3_5p = {
+ .pll2_vco = { .min = 400000000UL, .max = 1200000000UL },
+ .se2_clk_sel_msk = BIT(6),
+};
+
+static const struct vc3_hw_cfg vc3_5l = {
+ .pll2_vco = { .min = 30000000UL, .max = 130000000UL },
+ .se2_clk_sel_msk = BIT(0),
+};
+
static const struct of_device_id dev_ids[] = {
- { .compatible = "renesas,5p35023" },
+ { .compatible = "renesas,5p35023", .data = &vc3_5p },
+ { .compatible = "renesas,5l35023", .data = &vc3_5l },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, dev_ids);
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 0c3d0cee98c8..96946a8e2854 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -7,6 +7,7 @@
*/
#include <linux/module.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/clkdev.h>
@@ -520,8 +521,7 @@ static int xgene_clk_is_enabled(struct clk_hw *hw)
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_clk_offset);
pr_debug("%s clock is %s\n", clk_hw_get_name(hw),
- data & pclk->param.reg_clk_mask ? "enabled" :
- "disabled");
+ str_enabled_disabled(data & pclk->param.reg_clk_mask));
} else {
return 1;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index d02451f951cf..cf7720b9172f 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -608,12 +608,6 @@ bool clk_hw_is_prepared(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
-bool clk_hw_rate_is_protected(const struct clk_hw *hw)
-{
- return clk_core_rate_is_protected(hw->core);
-}
-EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
-
bool clk_hw_is_enabled(const struct clk_hw *hw)
{
return clk_core_is_enabled(hw->core);
@@ -5391,8 +5385,10 @@ const char *of_clk_get_parent_name(const struct device_node *np, int index)
count++;
}
/* We went off the end of 'clock-indices' without finding it */
- if (of_property_present(clkspec.np, "clock-indices") && !found)
+ if (of_property_present(clkspec.np, "clock-indices") && !found) {
+ of_node_put(clkspec.np);
return NULL;
+ }
if (of_property_read_string_index(clkspec.np, "clock-output-names",
index,
diff --git a/drivers/clk/clk_kunit_helpers.c b/drivers/clk/clk_kunit_helpers.c
index 52fd25594c96..68a28e70bb61 100644
--- a/drivers/clk/clk_kunit_helpers.c
+++ b/drivers/clk/clk_kunit_helpers.c
@@ -203,5 +203,35 @@ int of_clk_hw_register_kunit(struct kunit *test, struct device_node *node, struc
}
EXPORT_SYMBOL_GPL(of_clk_hw_register_kunit);
+KUNIT_DEFINE_ACTION_WRAPPER(of_clk_del_provider_wrapper,
+ of_clk_del_provider, struct device_node *);
+
+/**
+ * of_clk_add_hw_provider_kunit() - Test managed of_clk_add_hw_provider()
+ * @test: The test context
+ * @np: Device node pointer associated with clock provider
+ * @get: Callback for decoding clk_hw
+ * @data: Context pointer for @get callback.
+ *
+ * Just like of_clk_add_hw_provider(), except the clk_hw provider is managed by
+ * the test case and is automatically unregistered after the test case
+ * concludes.
+ *
+ * Return: 0 on success or a negative errno value on failure.
+ */
+int of_clk_add_hw_provider_kunit(struct kunit *test, struct device_node *np,
+ struct clk_hw *(*get)(struct of_phandle_args *clkspec, void *data),
+ void *data)
+{
+ int ret;
+
+ ret = of_clk_add_hw_provider(np, get, data);
+ if (ret)
+ return ret;
+
+ return kunit_add_action_or_reset(test, of_clk_del_provider_wrapper, np);
+}
+EXPORT_SYMBOL_GPL(of_clk_add_hw_provider_kunit);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("KUnit helpers for clk providers and consumers");
diff --git a/drivers/clk/clk_test.c b/drivers/clk/clk_test.c
index 41fc8eba3418..f08feeaa3750 100644
--- a/drivers/clk/clk_test.c
+++ b/drivers/clk/clk_test.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/clk/clk-conf.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -15,6 +16,7 @@
#include <kunit/platform_device.h>
#include <kunit/test.h>
+#include "kunit_clk_assigned_rates.h"
#include "clk_parent_data_test.h"
static const struct clk_ops empty_clk_ops = { };
@@ -473,7 +475,7 @@ clk_multiple_parents_mux_test_init(struct kunit *test)
&clk_dummy_rate_ops,
0);
ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
- ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
+ ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[0].hw);
if (ret)
return ret;
@@ -481,7 +483,7 @@ clk_multiple_parents_mux_test_init(struct kunit *test)
&clk_dummy_rate_ops,
0);
ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
- ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
+ ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[1].hw);
if (ret)
return ret;
@@ -489,23 +491,13 @@ clk_multiple_parents_mux_test_init(struct kunit *test)
ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
&clk_multiple_parents_mux_ops,
CLK_SET_RATE_PARENT);
- ret = clk_hw_register(NULL, &ctx->hw);
+ ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
if (ret)
return ret;
return 0;
}
-static void
-clk_multiple_parents_mux_test_exit(struct kunit *test)
-{
- struct clk_multiple_parent_ctx *ctx = test->priv;
-
- clk_hw_unregister(&ctx->hw);
- clk_hw_unregister(&ctx->parents_ctx[0].hw);
- clk_hw_unregister(&ctx->parents_ctx[1].hw);
-}
-
/*
* Test that for a clock with multiple parents, clk_get_parent()
* actually returns the current one.
@@ -561,18 +553,18 @@ clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
struct clk *parent1, *parent2;
unsigned long rate;
int ret;
kunit_skip(test, "This needs to be fixed in the core.");
- parent1 = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
+ parent1 = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[0].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
- parent2 = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ parent2 = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
@@ -593,10 +585,6 @@ clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
-
- clk_put(parent2);
- clk_put(parent1);
- clk_put(clk);
}
static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
@@ -617,7 +605,6 @@ static struct kunit_suite
clk_multiple_parents_mux_test_suite = {
.name = "clk-multiple-parents-mux-test",
.init = clk_multiple_parents_mux_test_init,
- .exit = clk_multiple_parents_mux_test_exit,
.test_cases = clk_multiple_parents_mux_test_cases,
};
@@ -637,29 +624,20 @@ clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
&clk_dummy_rate_ops,
0);
ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
- ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
+ ret = clk_hw_register_kunit(test, NULL, &ctx->parents_ctx[1].hw);
if (ret)
return ret;
ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
&clk_multiple_parents_mux_ops,
CLK_SET_RATE_PARENT);
- ret = clk_hw_register(NULL, &ctx->hw);
+ ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
if (ret)
return ret;
return 0;
}
-static void
-clk_orphan_transparent_multiple_parent_mux_test_exit(struct kunit *test)
-{
- struct clk_multiple_parent_ctx *ctx = test->priv;
-
- clk_hw_unregister(&ctx->hw);
- clk_hw_unregister(&ctx->parents_ctx[1].hw);
-}
-
/*
* Test that, for a mux whose current parent hasn't been registered yet and is
* thus orphan, clk_get_parent() will return NULL.
@@ -912,7 +890,7 @@ clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(st
{
struct clk_multiple_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
struct clk *parent;
unsigned long rate;
int ret;
@@ -921,7 +899,7 @@ clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(st
clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
- parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ parent = clk_hw_get_clk_kunit(test, &ctx->parents_ctx[1].hw, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
ret = clk_set_parent(clk, parent);
@@ -931,9 +909,6 @@ clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(st
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
-
- clk_put(parent);
- clk_put(clk);
}
static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
@@ -961,7 +936,6 @@ static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[]
static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
.name = "clk-orphan-transparent-multiple-parent-mux-test",
.init = clk_orphan_transparent_multiple_parent_mux_test_init,
- .exit = clk_orphan_transparent_multiple_parent_mux_test_exit,
.test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
};
@@ -986,7 +960,7 @@ static int clk_single_parent_mux_test_init(struct kunit *test)
&clk_dummy_rate_ops,
0);
- ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
+ ret = clk_hw_register_kunit(test, NULL, &ctx->parent_ctx.hw);
if (ret)
return ret;
@@ -994,7 +968,7 @@ static int clk_single_parent_mux_test_init(struct kunit *test)
&clk_dummy_single_parent_ops,
CLK_SET_RATE_PARENT);
- ret = clk_hw_register(NULL, &ctx->hw);
+ ret = clk_hw_register_kunit(test, NULL, &ctx->hw);
if (ret)
return ret;
@@ -1060,7 +1034,7 @@ clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
struct clk *parent;
int ret;
@@ -1074,8 +1048,6 @@ clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
ret = clk_set_rate_range(clk, 3000, 4000);
KUNIT_EXPECT_LT(test, ret, 0);
-
- clk_put(clk);
}
/*
@@ -1092,7 +1064,7 @@ clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *clk = clk_hw_get_clk_kunit(test, hw, NULL);
struct clk *parent;
int ret;
@@ -1106,8 +1078,6 @@ clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
ret = clk_set_rate_range(parent, 3000, 4000);
KUNIT_EXPECT_LT(test, ret, 0);
-
- clk_put(clk);
}
/*
@@ -1238,7 +1208,6 @@ static struct kunit_suite
clk_single_parent_mux_test_suite = {
.name = "clk-single-parent-mux-test",
.init = clk_single_parent_mux_test_init,
- .exit = clk_single_parent_mux_test_exit,
.test_cases = clk_single_parent_mux_test_cases,
};
@@ -3108,7 +3077,326 @@ static struct kunit_suite clk_register_clk_parent_data_device_suite = {
.test_cases = clk_register_clk_parent_data_device_test_cases,
};
+struct clk_assigned_rates_context {
+ struct clk_dummy_context clk0;
+ struct clk_dummy_context clk1;
+};
+
+/*
+ * struct clk_assigned_rates_test_param - Test parameters for clk_assigned_rates test
+ * @desc: Test description
+ * @overlay_begin: Pointer to start of DT overlay to apply for test
+ * @overlay_end: Pointer to end of DT overlay to apply for test
+ * @rate0: Initial rate of first clk
+ * @rate1: Initial rate of second clk
+ * @consumer_test: true if a consumer is being tested
+ */
+struct clk_assigned_rates_test_param {
+ const char *desc;
+ u8 *overlay_begin;
+ u8 *overlay_end;
+ unsigned long rate0;
+ unsigned long rate1;
+ bool consumer_test;
+};
+
+#define TEST_PARAM_OVERLAY(overlay_name) \
+ .overlay_begin = of_overlay_begin(overlay_name), \
+ .overlay_end = of_overlay_end(overlay_name)
+
+static void
+clk_assigned_rates_register_clk(struct kunit *test,
+ struct clk_dummy_context *ctx,
+ struct device_node *np, const char *name,
+ unsigned long rate)
+{
+ struct clk_init_data init = { };
+
+ init.name = name;
+ init.ops = &clk_dummy_rate_ops;
+ ctx->hw.init = &init;
+ ctx->rate = rate;
+
+ KUNIT_ASSERT_EQ(test, 0, of_clk_hw_register_kunit(test, np, &ctx->hw));
+ KUNIT_ASSERT_EQ(test, ctx->rate, rate);
+}
+
+/*
+ * Does most of the work of the test:
+ *
+ * 1. Apply the overlay to test
+ * 2. Register the clk or clks to test
+ * 3. Register the clk provider
+ * 4. Apply clk defaults to the consumer device if this is a consumer test
+ *
+ * The tests will set different test_param values to test different scenarios
+ * and validate that in their test functions.
+ */
+static int clk_assigned_rates_test_init(struct kunit *test)
+{
+ struct device_node *np, *consumer;
+ struct clk_hw_onecell_data *data;
+ struct clk_assigned_rates_context *ctx;
+ u32 clk_cells;
+ const struct clk_assigned_rates_test_param *test_param;
+
+ test_param = test->param_value;
+
+ KUNIT_ASSERT_EQ(test, 0, __of_overlay_apply_kunit(test,
+ test_param->overlay_begin,
+ test_param->overlay_end));
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL));
+ test->priv = ctx;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
+ np = of_find_compatible_node(NULL, NULL, "test,clk-assigned-rates"));
+ of_node_put_kunit(test, np);
+
+ KUNIT_ASSERT_EQ(test, 0, of_property_read_u32(np, "#clock-cells", &clk_cells));
+ /* Only support #clock-cells = <0> or <1> */
+ KUNIT_ASSERT_LT(test, clk_cells, 2);
+
+ clk_assigned_rates_register_clk(test, &ctx->clk0, np,
+ "test_assigned_rate0", test_param->rate0);
+ if (clk_cells == 0) {
+ KUNIT_ASSERT_EQ(test, 0,
+ of_clk_add_hw_provider_kunit(test, np, of_clk_hw_simple_get,
+ &ctx->clk0.hw));
+ } else if (clk_cells == 1) {
+ clk_assigned_rates_register_clk(test, &ctx->clk1, np,
+ "test_assigned_rate1", test_param->rate1);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
+ data = kunit_kzalloc(test, struct_size(data, hws, 2), GFP_KERNEL));
+ data->num = 2;
+ data->hws[0] = &ctx->clk0.hw;
+ data->hws[1] = &ctx->clk1.hw;
+
+ KUNIT_ASSERT_EQ(test, 0,
+ of_clk_add_hw_provider_kunit(test, np, of_clk_hw_onecell_get, data));
+ }
+
+ /* Consumers are optional */
+ if (test_param->consumer_test) {
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test,
+ consumer = of_find_compatible_node(NULL, NULL, "test,clk-consumer"));
+ of_node_put_kunit(test, consumer);
+
+ KUNIT_ASSERT_EQ(test, 0, of_clk_set_defaults(consumer, false));
+ }
+
+ return 0;
+}
+
+static void clk_assigned_rates_assigns_one(struct kunit *test)
+{
+ struct clk_assigned_rates_context *ctx = test->priv;
+
+ KUNIT_EXPECT_EQ(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE);
+}
+
+static void clk_assigned_rates_assigns_multiple(struct kunit *test)
+{
+ struct clk_assigned_rates_context *ctx = test->priv;
+
+ KUNIT_EXPECT_EQ(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE);
+ KUNIT_EXPECT_EQ(test, ctx->clk1.rate, ASSIGNED_RATES_1_RATE);
+}
+
+static void clk_assigned_rates_skips(struct kunit *test)
+{
+ struct clk_assigned_rates_context *ctx = test->priv;
+ const struct clk_assigned_rates_test_param *test_param = test->param_value;
+
+ KUNIT_EXPECT_NE(test, ctx->clk0.rate, ASSIGNED_RATES_0_RATE);
+ KUNIT_EXPECT_EQ(test, ctx->clk0.rate, test_param->rate0);
+}
+
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_one);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_one_consumer);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_one);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_one_consumer);
+
+/* Test cases that assign one rate */
+static const struct clk_assigned_rates_test_param clk_assigned_rates_assigns_one_test_params[] = {
+ {
+ /*
+ * Test that a single cell assigned-clock-rates property
+ * assigns the rate when the property is in the provider.
+ */
+ .desc = "provider assigns",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_one),
+ },
+ {
+ /*
+ * Test that a single cell assigned-clock-rates property
+ * assigns the rate when the property is in the consumer.
+ */
+ .desc = "consumer assigns",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_one_consumer),
+ .consumer_test = true,
+ },
+ {
+ /*
+ * Test that a single cell assigned-clock-rates-u64 property
+ * assigns the rate when the property is in the provider.
+ */
+ .desc = "provider assigns u64",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_one),
+ },
+ {
+ /*
+ * Test that a single cell assigned-clock-rates-u64 property
+ * assigns the rate when the property is in the consumer.
+ */
+ .desc = "consumer assigns u64",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_one_consumer),
+ .consumer_test = true,
+ },
+};
+KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_assigns_one,
+ clk_assigned_rates_assigns_one_test_params, desc)
+
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_multiple);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_multiple_consumer);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_multiple);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_u64_multiple_consumer);
+
+/* Test cases that assign multiple rates */
+static const struct clk_assigned_rates_test_param clk_assigned_rates_assigns_multiple_test_params[] = {
+ {
+ /*
+ * Test that a multiple cell assigned-clock-rates property
+ * assigns the rates when the property is in the provider.
+ */
+ .desc = "provider assigns",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_multiple),
+ },
+ {
+ /*
+ * Test that a multiple cell assigned-clock-rates property
+ * assigns the rates when the property is in the consumer.
+ */
+ .desc = "consumer assigns",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_multiple_consumer),
+ .consumer_test = true,
+ },
+ {
+ /*
+ * Test that a single cell assigned-clock-rates-u64 property
+ * assigns the rate when the property is in the provider.
+ */
+ .desc = "provider assigns u64",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_multiple),
+ },
+ {
+ /*
+ * Test that a multiple cell assigned-clock-rates-u64 property
+ * assigns the rates when the property is in the consumer.
+ */
+ .desc = "consumer assigns u64",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_u64_multiple_consumer),
+ .consumer_test = true,
+ },
+};
+KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_assigns_multiple,
+ clk_assigned_rates_assigns_multiple_test_params,
+ desc)
+
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_without);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_without_consumer);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_zero);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_zero_consumer);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_null);
+OF_OVERLAY_DECLARE(kunit_clk_assigned_rates_null_consumer);
+
+/* Test cases that skip changing the rate due to malformed DT */
+static const struct clk_assigned_rates_test_param clk_assigned_rates_skips_test_params[] = {
+ {
+ /*
+ * Test that an assigned-clock-rates property without an assigned-clocks
+ * property fails when the property is in the provider.
+ */
+ .desc = "provider missing assigned-clocks",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_without),
+ .rate0 = 3000,
+ },
+ {
+ /*
+ * Test that an assigned-clock-rates property without an assigned-clocks
+ * property fails when the property is in the consumer.
+ */
+ .desc = "consumer missing assigned-clocks",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_without_consumer),
+ .rate0 = 3000,
+ .consumer_test = true,
+ },
+ {
+ /*
+ * Test that an assigned-clock-rates property of zero doesn't
+ * set a rate when the property is in the provider.
+ */
+ .desc = "provider assigned-clock-rates of zero",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_zero),
+ .rate0 = 3000,
+ },
+ {
+ /*
+ * Test that an assigned-clock-rates property of zero doesn't
+ * set a rate when the property is in the consumer.
+ */
+ .desc = "consumer assigned-clock-rates of zero",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_zero_consumer),
+ .rate0 = 3000,
+ .consumer_test = true,
+ },
+ {
+ /*
+ * Test that an assigned-clocks property with a null phandle
+ * doesn't set a rate when the property is in the provider.
+ */
+ .desc = "provider assigned-clocks null phandle",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_null),
+ .rate0 = 3000,
+ },
+ {
+ /*
+ * Test that an assigned-clocks property with a null phandle
+ * doesn't set a rate when the property is in the consumer.
+ */
+ .desc = "provider assigned-clocks null phandle",
+ TEST_PARAM_OVERLAY(kunit_clk_assigned_rates_null_consumer),
+ .rate0 = 3000,
+ .consumer_test = true,
+ },
+};
+KUNIT_ARRAY_PARAM_DESC(clk_assigned_rates_skips,
+ clk_assigned_rates_skips_test_params,
+ desc)
+
+static struct kunit_case clk_assigned_rates_test_cases[] = {
+ KUNIT_CASE_PARAM(clk_assigned_rates_assigns_one,
+ clk_assigned_rates_assigns_one_gen_params),
+ KUNIT_CASE_PARAM(clk_assigned_rates_assigns_multiple,
+ clk_assigned_rates_assigns_multiple_gen_params),
+ KUNIT_CASE_PARAM(clk_assigned_rates_skips,
+ clk_assigned_rates_skips_gen_params),
+ {}
+};
+
+/*
+ * Test suite for assigned-clock-rates{-u64} DT property.
+ */
+static struct kunit_suite clk_assigned_rates_suite = {
+ .name = "clk_assigned_rates",
+ .test_cases = clk_assigned_rates_test_cases,
+ .init = clk_assigned_rates_test_init,
+};
+
kunit_test_suites(
+ &clk_assigned_rates_suite,
&clk_leaf_mux_set_rate_parent_test_suite,
&clk_test_suite,
&clk_multiple_parents_mux_test_suite,
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index 5bbbb3a66477..82727b1fc67a 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -19,7 +19,6 @@
#include <linux/mfd/syscon.h>
#include <linux/notifier.h>
#include <linux/of.h>
-#include <linux/platform_data/clk-davinci-pll.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -840,27 +839,6 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
return 0;
}
-static struct davinci_pll_platform_data *davinci_pll_get_pdata(struct device *dev)
-{
- struct davinci_pll_platform_data *pdata = dev_get_platdata(dev);
-
- /*
- * Platform data is optional, so allocate a new struct if one was not
- * provided. For device tree, this will always be the case.
- */
- if (!pdata)
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return NULL;
-
- /* for device tree, we need to fill in the struct */
- if (dev->of_node)
- pdata->cfgchip =
- syscon_regmap_lookup_by_compatible("ti,da830-cfgchip");
-
- return pdata;
-}
-
/* needed in early boot for clocksource/clockevent */
#ifdef CONFIG_ARCH_DAVINCI_DA850
CLK_OF_DECLARE(da850_pll0, "ti,da850-pll0", of_da850_pll0_init);
@@ -890,8 +868,8 @@ typedef int (*davinci_pll_init)(struct device *dev, void __iomem *base,
static int davinci_pll_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct davinci_pll_platform_data *pdata;
davinci_pll_init pll_init = NULL;
+ struct regmap *cfgchip;
void __iomem *base;
pll_init = device_get_match_data(dev);
@@ -903,17 +881,13 @@ static int davinci_pll_probe(struct platform_device *pdev)
return -EINVAL;
}
- pdata = davinci_pll_get_pdata(dev);
- if (!pdata) {
- dev_err(dev, "missing platform data\n");
- return -EINVAL;
- }
+ cfgchip = syscon_regmap_lookup_by_compatible("ti,da830-cfgchip");
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
- return pll_init(dev, base, pdata->cfgchip);
+ return pll_init(dev, base, cfgchip);
}
static struct platform_driver davinci_pll_driver = {
diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c
index 591e0364ee5c..85771afd4698 100644
--- a/drivers/clk/imx/clk-fracn-gppll.c
+++ b/drivers/clk/imx/clk-fracn-gppll.c
@@ -254,9 +254,11 @@ static int clk_fracn_gppll_set_rate(struct clk_hw *hw, unsigned long drate,
pll_div = FIELD_PREP(PLL_RDIV_MASK, rate->rdiv) | rate->odiv |
FIELD_PREP(PLL_MFI_MASK, rate->mfi);
writel_relaxed(pll_div, pll->base + PLL_DIV);
+ readl(pll->base + PLL_DIV);
if (pll->flags & CLK_FRACN_GPPLL_FRACN) {
writel_relaxed(rate->mfd, pll->base + PLL_DENOMINATOR);
writel_relaxed(FIELD_PREP(PLL_MFN_MASK, rate->mfn), pll->base + PLL_NUMERATOR);
+ readl(pll->base + PLL_NUMERATOR);
}
/* Wait for 5us according to fracn mode pll doc */
@@ -265,6 +267,7 @@ static int clk_fracn_gppll_set_rate(struct clk_hw *hw, unsigned long drate,
/* Enable Powerup */
tmp |= POWERUP_MASK;
writel_relaxed(tmp, pll->base + PLL_CTRL);
+ readl(pll->base + PLL_CTRL);
/* Wait Lock */
ret = clk_fracn_gppll_wait_lock(pll);
@@ -302,14 +305,15 @@ static int clk_fracn_gppll_prepare(struct clk_hw *hw)
val |= POWERUP_MASK;
writel_relaxed(val, pll->base + PLL_CTRL);
-
- val |= CLKMUX_EN;
- writel_relaxed(val, pll->base + PLL_CTRL);
+ readl(pll->base + PLL_CTRL);
ret = clk_fracn_gppll_wait_lock(pll);
if (ret)
return ret;
+ val |= CLKMUX_EN;
+ writel_relaxed(val, pll->base + PLL_CTRL);
+
val &= ~CLKMUX_BYPASS;
writel_relaxed(val, pll->base + PLL_CTRL);
diff --git a/drivers/clk/imx/clk-imx8-acm.c b/drivers/clk/imx/clk-imx8-acm.c
index 6c351050b82a..c169fe53a35f 100644
--- a/drivers/clk/imx/clk-imx8-acm.c
+++ b/drivers/clk/imx/clk-imx8-acm.c
@@ -294,9 +294,9 @@ static int clk_imx_acm_attach_pm_domains(struct device *dev,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
- if (IS_ERR(dev_pm->pd_dev_link[i])) {
+ if (!dev_pm->pd_dev_link[i]) {
dev_pm_domain_detach(dev_pm->pd_dev[i], false);
- ret = PTR_ERR(dev_pm->pd_dev_link[i]);
+ ret = -EINVAL;
goto detach_pm;
}
}
diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c
index b2cb157703c5..c409fc7e0618 100644
--- a/drivers/clk/imx/clk-imx8mp-audiomix.c
+++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
@@ -278,7 +278,8 @@ static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
#else /* !CONFIG_RESET_CONTROLLER */
-static int clk_imx8mp_audiomix_reset_controller_register(struct clk_imx8mp_audiomix_priv *priv)
+static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
+ struct clk_imx8mp_audiomix_priv *priv)
{
return 0;
}
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index 516dbd170c8a..fb18f507f121 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -399,8 +399,9 @@ static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_r
static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out",
"dummy", "dummy", "gpu_pll_out", "vpu_pll_out",
- "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3",
- "dummy", "dummy", "osc_24m", "dummy", "osc_32k"};
+ "arm_pll_out", "sys_pll1_out", "sys_pll2_out",
+ "sys_pll3_out", "dummy", "dummy", "osc_24m",
+ "dummy", "osc_32k"};
static struct clk_hw **hws;
static struct clk_hw_onecell_data *clk_hw_data;
diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
index c6a9bc8ecc1f..c5f358a75f30 100644
--- a/drivers/clk/imx/clk-imx93.c
+++ b/drivers/clk/imx/clk-imx93.c
@@ -15,6 +15,11 @@
#include "clk.h"
+#define IMX93_CLK_END 208
+
+#define PLAT_IMX93 BIT(0)
+#define PLAT_IMX91 BIT(1)
+
enum clk_sel {
LOW_SPEED_IO_SEL,
NON_IO_SEL,
@@ -33,6 +38,7 @@ static u32 share_count_sai2;
static u32 share_count_sai3;
static u32 share_count_mub;
static u32 share_count_pdm;
+static u32 share_count_spdif;
static const char * const a55_core_sels[] = {"a55_alt", "arm_pll"};
static const char *parent_names[MAX_SEL][4] = {
@@ -53,6 +59,7 @@ static const struct imx93_clk_root {
u32 off;
enum clk_sel sel;
unsigned long flags;
+ unsigned long plat;
} root_array[] = {
/* a55/m33/bus critical clk for system run */
{ IMX93_CLK_A55_PERIPH, "a55_periph_root", 0x0000, FAST_SEL, CLK_IS_CRITICAL },
@@ -63,9 +70,9 @@ static const struct imx93_clk_root {
{ IMX93_CLK_BUS_AON, "bus_aon_root", 0x0300, LOW_SPEED_IO_SEL, CLK_IS_CRITICAL },
{ IMX93_CLK_WAKEUP_AXI, "wakeup_axi_root", 0x0380, FAST_SEL, CLK_IS_CRITICAL },
{ IMX93_CLK_SWO_TRACE, "swo_trace_root", 0x0400, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_M33_SYSTICK, "m33_systick_root", 0x0480, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_FLEXIO1, "flexio1_root", 0x0500, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_FLEXIO2, "flexio2_root", 0x0580, LOW_SPEED_IO_SEL, },
+ { IMX93_CLK_M33_SYSTICK, "m33_systick_root", 0x0480, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_FLEXIO1, "flexio1_root", 0x0500, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_FLEXIO2, "flexio2_root", 0x0580, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
{ IMX93_CLK_LPTMR1, "lptmr1_root", 0x0700, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_LPTMR2, "lptmr2_root", 0x0780, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_TPM2, "tpm2_root", 0x0880, TPM_SEL, },
@@ -120,15 +127,15 @@ static const struct imx93_clk_root {
{ IMX93_CLK_HSIO_ACSCAN_80M, "hsio_acscan_80m_root", 0x1f80, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_HSIO_ACSCAN_480M, "hsio_acscan_480m_root", 0x2000, MISC_SEL, },
{ IMX93_CLK_NIC_AXI, "nic_axi_root", 0x2080, FAST_SEL, CLK_IS_CRITICAL, },
- { IMX93_CLK_ML_APB, "ml_apb_root", 0x2180, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_ML, "ml_root", 0x2200, FAST_SEL, },
+ { IMX93_CLK_ML_APB, "ml_apb_root", 0x2180, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_ML, "ml_root", 0x2200, FAST_SEL, 0, PLAT_IMX93, },
{ IMX93_CLK_MEDIA_AXI, "media_axi_root", 0x2280, FAST_SEL, },
{ IMX93_CLK_MEDIA_APB, "media_apb_root", 0x2300, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_MEDIA_LDB, "media_ldb_root", 0x2380, VIDEO_SEL, },
+ { IMX93_CLK_MEDIA_LDB, "media_ldb_root", 0x2380, VIDEO_SEL, 0, PLAT_IMX93, },
{ IMX93_CLK_MEDIA_DISP_PIX, "media_disp_pix_root", 0x2400, VIDEO_SEL, },
{ IMX93_CLK_CAM_PIX, "cam_pix_root", 0x2480, VIDEO_SEL, },
- { IMX93_CLK_MIPI_TEST_BYTE, "mipi_test_byte_root", 0x2500, VIDEO_SEL, },
- { IMX93_CLK_MIPI_PHY_CFG, "mipi_phy_cfg_root", 0x2580, VIDEO_SEL, },
+ { IMX93_CLK_MIPI_TEST_BYTE, "mipi_test_byte_root", 0x2500, VIDEO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_MIPI_PHY_CFG, "mipi_phy_cfg_root", 0x2580, VIDEO_SEL, 0, PLAT_IMX93, },
{ IMX93_CLK_ADC, "adc_root", 0x2700, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_PDM, "pdm_root", 0x2780, AUDIO_SEL, },
{ IMX93_CLK_TSTMR1, "tstmr1_root", 0x2800, LOW_SPEED_IO_SEL, },
@@ -137,13 +144,16 @@ static const struct imx93_clk_root {
{ IMX93_CLK_MQS2, "mqs2_root", 0x2980, AUDIO_SEL, },
{ IMX93_CLK_AUDIO_XCVR, "audio_xcvr_root", 0x2a00, NON_IO_SEL, },
{ IMX93_CLK_SPDIF, "spdif_root", 0x2a80, AUDIO_SEL, },
- { IMX93_CLK_ENET, "enet_root", 0x2b00, NON_IO_SEL, },
- { IMX93_CLK_ENET_TIMER1, "enet_timer1_root", 0x2b80, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_ENET_TIMER2, "enet_timer2_root", 0x2c00, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_ENET_REF, "enet_ref_root", 0x2c80, NON_IO_SEL, },
- { IMX93_CLK_ENET_REF_PHY, "enet_ref_phy_root", 0x2d00, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_I3C1_SLOW, "i3c1_slow_root", 0x2d80, LOW_SPEED_IO_SEL, },
- { IMX93_CLK_I3C2_SLOW, "i3c2_slow_root", 0x2e00, LOW_SPEED_IO_SEL, },
+ { IMX93_CLK_ENET, "enet_root", 0x2b00, NON_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_ENET_TIMER1, "enet_timer1_root", 0x2b80, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_ENET_TIMER2, "enet_timer2_root", 0x2c00, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_ENET_REF, "enet_ref_root", 0x2c80, NON_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_ENET_REF_PHY, "enet_ref_phy_root", 0x2d00, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX91_CLK_ENET1_QOS_TSN, "enet1_qos_tsn_root", 0x2b00, NON_IO_SEL, 0, PLAT_IMX91, },
+ { IMX91_CLK_ENET_TIMER, "enet_timer_root", 0x2b80, LOW_SPEED_IO_SEL, 0, PLAT_IMX91, },
+ { IMX91_CLK_ENET2_REGULAR, "enet2_regular_root", 0x2c80, NON_IO_SEL, 0, PLAT_IMX91, },
+ { IMX93_CLK_I3C1_SLOW, "i3c1_slow_root", 0x2d80, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
+ { IMX93_CLK_I3C2_SLOW, "i3c2_slow_root", 0x2e00, LOW_SPEED_IO_SEL, 0, PLAT_IMX93, },
{ IMX93_CLK_USB_PHY_BURUNIN, "usb_phy_root", 0x2e80, LOW_SPEED_IO_SEL, },
{ IMX93_CLK_PAL_CAME_SCAN, "pal_came_scan_root", 0x2f00, MISC_SEL, }
};
@@ -155,6 +165,7 @@ static const struct imx93_clk_ccgr {
u32 off;
unsigned long flags;
u32 *shared_count;
+ unsigned long plat;
} ccgr_array[] = {
{ IMX93_CLK_A55_GATE, "a55_alt", "a55_alt_root", 0x8000, },
/* M33 critical clk for system run */
@@ -167,10 +178,10 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_WDOG5_GATE, "wdog5", "osc_24m", 0x8400, },
{ IMX93_CLK_SEMA1_GATE, "sema1", "bus_aon_root", 0x8440, },
{ IMX93_CLK_SEMA2_GATE, "sema2", "bus_wakeup_root", 0x8480, },
- { IMX93_CLK_MU1_A_GATE, "mu1_a", "bus_aon_root", 0x84c0, CLK_IGNORE_UNUSED },
- { IMX93_CLK_MU2_A_GATE, "mu2_a", "bus_wakeup_root", 0x84c0, CLK_IGNORE_UNUSED },
- { IMX93_CLK_MU1_B_GATE, "mu1_b", "bus_aon_root", 0x8500, 0, &share_count_mub },
- { IMX93_CLK_MU2_B_GATE, "mu2_b", "bus_wakeup_root", 0x8500, 0, &share_count_mub },
+ { IMX93_CLK_MU1_A_GATE, "mu1_a", "bus_aon_root", 0x84c0, CLK_IGNORE_UNUSED, NULL, PLAT_IMX93 },
+ { IMX93_CLK_MU2_A_GATE, "mu2_a", "bus_wakeup_root", 0x84c0, CLK_IGNORE_UNUSED, NULL, PLAT_IMX93 },
+ { IMX93_CLK_MU1_B_GATE, "mu1_b", "bus_aon_root", 0x8500, 0, &share_count_mub, PLAT_IMX93 },
+ { IMX93_CLK_MU2_B_GATE, "mu2_b", "bus_wakeup_root", 0x8500, 0, &share_count_mub, PLAT_IMX93 },
{ IMX93_CLK_EDMA1_GATE, "edma1", "m33_root", 0x8540, },
{ IMX93_CLK_EDMA2_GATE, "edma2", "wakeup_axi_root", 0x8580, },
{ IMX93_CLK_FLEXSPI1_GATE, "flexspi1", "flexspi1_root", 0x8640, },
@@ -178,8 +189,8 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_GPIO2_GATE, "gpio2", "bus_wakeup_root", 0x88c0, },
{ IMX93_CLK_GPIO3_GATE, "gpio3", "bus_wakeup_root", 0x8900, },
{ IMX93_CLK_GPIO4_GATE, "gpio4", "bus_wakeup_root", 0x8940, },
- { IMX93_CLK_FLEXIO1_GATE, "flexio1", "flexio1_root", 0x8980, },
- { IMX93_CLK_FLEXIO2_GATE, "flexio2", "flexio2_root", 0x89c0, },
+ { IMX93_CLK_FLEXIO1_GATE, "flexio1", "flexio1_root", 0x8980, 0, NULL, PLAT_IMX93},
+ { IMX93_CLK_FLEXIO2_GATE, "flexio2", "flexio2_root", 0x89c0, 0, NULL, PLAT_IMX93},
{ IMX93_CLK_LPIT1_GATE, "lpit1", "bus_aon_root", 0x8a00, },
{ IMX93_CLK_LPIT2_GATE, "lpit2", "bus_wakeup_root", 0x8a40, },
{ IMX93_CLK_LPTMR1_GATE, "lptmr1", "lptmr1_root", 0x8a80, },
@@ -228,10 +239,10 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_SAI3_GATE, "sai3", "sai3_root", 0x94c0, 0, &share_count_sai3},
{ IMX93_CLK_SAI3_IPG, "sai3_ipg_clk", "bus_wakeup_root", 0x94c0, 0, &share_count_sai3},
{ IMX93_CLK_MIPI_CSI_GATE, "mipi_csi", "media_apb_root", 0x9580, },
- { IMX93_CLK_MIPI_DSI_GATE, "mipi_dsi", "media_apb_root", 0x95c0, },
- { IMX93_CLK_LVDS_GATE, "lvds", "media_ldb_root", 0x9600, },
+ { IMX93_CLK_MIPI_DSI_GATE, "mipi_dsi", "media_apb_root", 0x95c0, 0, NULL, PLAT_IMX93 },
+ { IMX93_CLK_LVDS_GATE, "lvds", "media_ldb_root", 0x9600, 0, NULL, PLAT_IMX93 },
{ IMX93_CLK_LCDIF_GATE, "lcdif", "media_apb_root", 0x9640, },
- { IMX93_CLK_PXP_GATE, "pxp", "media_apb_root", 0x9680, },
+ { IMX93_CLK_PXP_GATE, "pxp", "media_apb_root", 0x9680, 0, NULL, PLAT_IMX93 },
{ IMX93_CLK_ISI_GATE, "isi", "media_apb_root", 0x96c0, },
{ IMX93_CLK_NIC_MEDIA_GATE, "nic_media", "media_axi_root", 0x9700, },
{ IMX93_CLK_USB_CONTROLLER_GATE, "usb_controller", "hsio_root", 0x9a00, },
@@ -242,10 +253,13 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_MQS1_GATE, "mqs1", "sai1_root", 0x9b00, },
{ IMX93_CLK_MQS2_GATE, "mqs2", "sai3_root", 0x9b40, },
{ IMX93_CLK_AUD_XCVR_GATE, "aud_xcvr", "audio_xcvr_root", 0x9b80, },
- { IMX93_CLK_SPDIF_GATE, "spdif", "spdif_root", 0x9c00, },
+ { IMX93_CLK_SPDIF_IPG, "spdif_ipg_clk", "bus_wakeup_root", 0x9c00, 0, &share_count_spdif},
+ { IMX93_CLK_SPDIF_GATE, "spdif", "spdif_root", 0x9c00, 0, &share_count_spdif},
{ IMX93_CLK_HSIO_32K_GATE, "hsio_32k", "osc_32k", 0x9dc0, },
- { IMX93_CLK_ENET1_GATE, "enet1", "wakeup_axi_root", 0x9e00, },
- { IMX93_CLK_ENET_QOS_GATE, "enet_qos", "wakeup_axi_root", 0x9e40, },
+ { IMX93_CLK_ENET1_GATE, "enet1", "wakeup_axi_root", 0x9e00, 0, NULL, PLAT_IMX93, },
+ { IMX93_CLK_ENET_QOS_GATE, "enet_qos", "wakeup_axi_root", 0x9e40, 0, NULL, PLAT_IMX93, },
+ { IMX91_CLK_ENET2_REGULAR_GATE, "enet2_regular", "wakeup_axi_root", 0x9e00, 0, NULL, PLAT_IMX91, },
+ { IMX91_CLK_ENET1_QOS_TSN_GATE, "enet1_qos_tsn", "wakeup_axi_root", 0x9e40, 0, NULL, PLAT_IMX91, },
/* Critical because clk accessed during CPU idle */
{ IMX93_CLK_SYS_CNT_GATE, "sys_cnt", "osc_24m", 0x9e80, CLK_IS_CRITICAL},
{ IMX93_CLK_TSTMR1_GATE, "tstmr1", "bus_aon_root", 0x9ec0, },
@@ -265,6 +279,7 @@ static int imx93_clocks_probe(struct platform_device *pdev)
const struct imx93_clk_ccgr *ccgr;
void __iomem *base, *anatop_base;
int i, ret;
+ const unsigned long plat = (unsigned long)device_get_match_data(&pdev->dev);
clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws,
IMX93_CLK_END), GFP_KERNEL);
@@ -314,17 +329,20 @@ static int imx93_clocks_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(root_array); i++) {
root = &root_array[i];
- clks[root->clk] = imx93_clk_composite_flags(root->name,
- parent_names[root->sel],
- 4, base + root->off, 3,
- root->flags);
+ if (!root->plat || root->plat & plat)
+ clks[root->clk] = imx93_clk_composite_flags(root->name,
+ parent_names[root->sel],
+ 4, base + root->off, 3,
+ root->flags);
}
for (i = 0; i < ARRAY_SIZE(ccgr_array); i++) {
ccgr = &ccgr_array[i];
- clks[ccgr->clk] = imx93_clk_gate(NULL, ccgr->name, ccgr->parent_name,
- ccgr->flags, base + ccgr->off, 0, 1, 1, 3,
- ccgr->shared_count);
+ if (!ccgr->plat || ccgr->plat & plat)
+ clks[ccgr->clk] = imx93_clk_gate(NULL,
+ ccgr->name, ccgr->parent_name,
+ ccgr->flags, base + ccgr->off, 0, 1, 1, 3,
+ ccgr->shared_count);
}
clks[IMX93_CLK_A55_SEL] = imx_clk_hw_mux2("a55_sel", base + 0x4820, 0, 1, a55_core_sels,
@@ -354,7 +372,8 @@ unregister_hws:
}
static const struct of_device_id imx93_clk_of_match[] = {
- { .compatible = "fsl,imx93-ccm" },
+ { .compatible = "fsl,imx93-ccm", .data = (void *)PLAT_IMX93 },
+ { .compatible = "fsl,imx91-ccm", .data = (void *)PLAT_IMX91 },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, imx93_clk_of_match);
diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c
index 19a62da74be4..25974947ad0c 100644
--- a/drivers/clk/imx/clk-imx95-blk-ctl.c
+++ b/drivers/clk/imx/clk-imx95-blk-ctl.c
@@ -277,6 +277,25 @@ static const struct imx95_blk_ctl_dev_data netcmix_dev_data = {
.clk_reg_offset = 0,
};
+static const struct imx95_blk_ctl_clk_dev_data hsio_blk_ctl_clk_dev_data[] = {
+ [0] = {
+ .name = "hsio_blk_ctl_clk",
+ .parent_names = (const char *[]){ "hsio_pll", },
+ .num_parents = 1,
+ .reg = 0,
+ .bit_idx = 6,
+ .bit_width = 1,
+ .type = CLK_GATE,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static const struct imx95_blk_ctl_dev_data hsio_blk_ctl_dev_data = {
+ .num_clks = 1,
+ .clk_dev_data = hsio_blk_ctl_clk_dev_data,
+ .clk_reg_offset = 0,
+};
+
static int imx95_bc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -447,6 +466,7 @@ static const struct of_device_id imx95_bc_of_match[] = {
{ .compatible = "nxp,imx95-display-master-csr", },
{ .compatible = "nxp,imx95-lvds-csr", .data = &lvds_csr_dev_data },
{ .compatible = "nxp,imx95-display-csr", .data = &dispmix_csr_dev_data },
+ { .compatible = "nxp,imx95-hsio-blk-ctl", .data = &hsio_blk_ctl_dev_data },
{ .compatible = "nxp,imx95-vpu-csr", .data = &vpublk_dev_data },
{ .compatible = "nxp,imx95-netcmix-blk-ctrl", .data = &netcmix_dev_data},
{ /* Sentinel */ },
diff --git a/drivers/clk/imx/clk-lpcg-scu.c b/drivers/clk/imx/clk-lpcg-scu.c
index dd5abd09f3e2..6376557a3c3d 100644
--- a/drivers/clk/imx/clk-lpcg-scu.c
+++ b/drivers/clk/imx/clk-lpcg-scu.c
@@ -6,10 +6,12 @@
#include <linux/bits.h>
#include <linux/clk-provider.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/units.h>
#include "clk-scu.h"
@@ -41,6 +43,29 @@ struct clk_lpcg_scu {
#define to_clk_lpcg_scu(_hw) container_of(_hw, struct clk_lpcg_scu, hw)
+/* e10858 -LPCG clock gating register synchronization errata */
+static void lpcg_e10858_writel(unsigned long rate, void __iomem *reg, u32 val)
+{
+ writel(val, reg);
+
+ if (rate >= 24 * HZ_PER_MHZ || rate == 0) {
+ /*
+ * The time taken to access the LPCG registers from the AP core
+ * through the interconnect is longer than the minimum delay
+ * of 4 clock cycles required by the errata.
+ * Adding a readl will provide sufficient delay to prevent
+ * back-to-back writes.
+ */
+ readl(reg);
+ } else {
+ /*
+ * For clocks running below 24MHz, wait a minimum of
+ * 4 clock cycles.
+ */
+ ndelay(4 * (DIV_ROUND_UP(1000 * HZ_PER_MHZ, rate)));
+ }
+}
+
static int clk_lpcg_scu_enable(struct clk_hw *hw)
{
struct clk_lpcg_scu *clk = to_clk_lpcg_scu(hw);
@@ -57,7 +82,8 @@ static int clk_lpcg_scu_enable(struct clk_hw *hw)
val |= CLK_GATE_SCU_LPCG_HW_SEL;
reg |= val << clk->bit_idx;
- writel(reg, clk->reg);
+
+ lpcg_e10858_writel(clk_hw_get_rate(hw), clk->reg, reg);
spin_unlock_irqrestore(&imx_lpcg_scu_lock, flags);
@@ -74,7 +100,7 @@ static void clk_lpcg_scu_disable(struct clk_hw *hw)
reg = readl_relaxed(clk->reg);
reg &= ~(CLK_GATE_SCU_LPCG_MASK << clk->bit_idx);
- writel(reg, clk->reg);
+ lpcg_e10858_writel(clk_hw_get_rate(hw), clk->reg, reg);
spin_unlock_irqrestore(&imx_lpcg_scu_lock, flags);
}
@@ -135,6 +161,9 @@ static int __maybe_unused imx_clk_lpcg_scu_suspend(struct device *dev)
{
struct clk_lpcg_scu *clk = dev_get_drvdata(dev);
+ if (!strncmp("hdmi_lpcg", clk_hw_get_name(&clk->hw), strlen("hdmi_lpcg")))
+ return 0;
+
clk->state = readl_relaxed(clk->reg);
dev_dbg(dev, "save lpcg state 0x%x\n", clk->state);
@@ -145,13 +174,11 @@ static int __maybe_unused imx_clk_lpcg_scu_resume(struct device *dev)
{
struct clk_lpcg_scu *clk = dev_get_drvdata(dev);
- /*
- * FIXME: Sometimes writes don't work unless the CPU issues
- * them twice
- */
+ if (!strncmp("hdmi_lpcg", clk_hw_get_name(&clk->hw), strlen("hdmi_lpcg")))
+ return 0;
writel(clk->state, clk->reg);
- writel(clk->state, clk->reg);
+ lpcg_e10858_writel(0, clk->reg, clk->state);
dev_dbg(dev, "restore lpcg state 0x%x\n", clk->state);
return 0;
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index d63564dbb12c..f290981ea13b 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -56,7 +56,9 @@ static const struct imx_pll14xx_rate_table imx_pll1416x_tbl[] = {
PLL_1416X_RATE(700000000U, 350, 3, 2),
PLL_1416X_RATE(640000000U, 320, 3, 2),
PLL_1416X_RATE(600000000U, 300, 3, 2),
+ PLL_1416X_RATE(416000000U, 208, 3, 2),
PLL_1416X_RATE(320000000U, 160, 3, 2),
+ PLL_1416X_RATE(208000000U, 208, 3, 3),
};
static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
index b1dd0c08e091..b27186aaf2a1 100644
--- a/drivers/clk/imx/clk-scu.c
+++ b/drivers/clk/imx/clk-scu.c
@@ -596,7 +596,7 @@ static int __maybe_unused imx_clk_scu_suspend(struct device *dev)
clk->rate = clk_scu_recalc_rate(&clk->hw, 0);
else
clk->rate = clk_hw_get_rate(&clk->hw);
- clk->is_enabled = clk_hw_is_enabled(&clk->hw);
+ clk->is_enabled = clk_hw_is_prepared(&clk->hw);
if (clk->parent)
dev_dbg(dev, "save parent %s idx %u\n", clk_hw_get_name(clk->parent),
diff --git a/drivers/clk/kunit_clk_assigned_rates.h b/drivers/clk/kunit_clk_assigned_rates.h
new file mode 100644
index 000000000000..df2d84dcaa93
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _KUNIT_CLK_ASSIGNED_RATES_H
+#define _KUNIT_CLK_ASSIGNED_RATES_H
+
+#define ASSIGNED_RATES_0_RATE 1600000
+#define ASSIGNED_RATES_1_RATE 9700000
+
+#endif
diff --git a/drivers/clk/kunit_clk_assigned_rates_multiple.dtso b/drivers/clk/kunit_clk_assigned_rates_multiple.dtso
new file mode 100644
index 000000000000..e600736e70f5
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_multiple.dtso
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <1>;
+ assigned-clocks = <&clk 0>,
+ <&clk 1>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>,
+ <ASSIGNED_RATES_1_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_multiple_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_multiple_consumer.dtso
new file mode 100644
index 000000000000..260aba458daf
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_multiple_consumer.dtso
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <1>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clocks = <&clk 0>,
+ <&clk 1>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>,
+ <ASSIGNED_RATES_1_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_null.dtso b/drivers/clk/kunit_clk_assigned_rates_null.dtso
new file mode 100644
index 000000000000..0b27b38a9130
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_null.dtso
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ assigned-clocks = <0>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_null_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_null_consumer.dtso
new file mode 100644
index 000000000000..99fb332ae83d
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_null_consumer.dtso
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clocks = <0>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_one.dtso b/drivers/clk/kunit_clk_assigned_rates_one.dtso
new file mode 100644
index 000000000000..dd95ec9b1cf9
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_one.dtso
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ assigned-clocks = <&clk>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_one_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_one_consumer.dtso
new file mode 100644
index 000000000000..a41dca806318
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_one_consumer.dtso
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clocks = <&clk>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_u64_multiple.dtso b/drivers/clk/kunit_clk_assigned_rates_u64_multiple.dtso
new file mode 100644
index 000000000000..389b4e2eb7f7
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_u64_multiple.dtso
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <1>;
+ assigned-clocks = <&clk 0>,
+ <&clk 1>;
+ assigned-clock-rates-u64 = /bits/ 64 <ASSIGNED_RATES_0_RATE>,
+ /bits/ 64 <ASSIGNED_RATES_1_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_u64_multiple_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_u64_multiple_consumer.dtso
new file mode 100644
index 000000000000..3e117fd59b7d
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_u64_multiple_consumer.dtso
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <1>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clocks = <&clk 0>,
+ <&clk 1>;
+ assigned-clock-rates-u64 = /bits/ 64 <ASSIGNED_RATES_0_RATE>,
+ /bits/ 64 <ASSIGNED_RATES_1_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_u64_one.dtso b/drivers/clk/kunit_clk_assigned_rates_u64_one.dtso
new file mode 100644
index 000000000000..87041264e8f5
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_u64_one.dtso
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ assigned-clocks = <&clk>;
+ assigned-clock-rates-u64 = /bits/ 64 <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_u64_one_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_u64_one_consumer.dtso
new file mode 100644
index 000000000000..3259c003aec0
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_u64_one_consumer.dtso
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clocks = <&clk>;
+ assigned-clock-rates-u64 = /bits/ 64 <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_without.dtso b/drivers/clk/kunit_clk_assigned_rates_without.dtso
new file mode 100644
index 000000000000..22d333495cf2
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_without.dtso
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_without_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_without_consumer.dtso
new file mode 100644
index 000000000000..75ac09140f83
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_without_consumer.dtso
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+#include "kunit_clk_assigned_rates.h"
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clock-rates = <ASSIGNED_RATES_0_RATE>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_zero.dtso b/drivers/clk/kunit_clk_assigned_rates_zero.dtso
new file mode 100644
index 000000000000..08e042c2eafe
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_zero.dtso
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ assigned-clocks = <&clk>;
+ assigned-clock-rates = <0>;
+ };
+};
diff --git a/drivers/clk/kunit_clk_assigned_rates_zero_consumer.dtso b/drivers/clk/kunit_clk_assigned_rates_zero_consumer.dtso
new file mode 100644
index 000000000000..1d964672e855
--- /dev/null
+++ b/drivers/clk/kunit_clk_assigned_rates_zero_consumer.dtso
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+/plugin/;
+
+&{/} {
+ clk: kunit-clock {
+ compatible = "test,clk-assigned-rates";
+ #clock-cells = <0>;
+ };
+
+ kunit-clock-consumer {
+ compatible = "test,clk-consumer";
+ assigned-clocks = <&clk>;
+ assigned-clock-rates = <0>;
+ };
+};
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 70a005e7e1b1..5f8e6d68fa14 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -124,6 +124,43 @@ config COMMON_CLK_MT2712_VENCSYS
help
This driver supports MediaTek MT2712 vencsys clocks.
+config COMMON_CLK_MT6735
+ tristate "Main clock drivers for MediaTek MT6735"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ help
+ This enables drivers for clocks and resets provided
+ by apmixedsys, topckgen, infracfg and pericfg on the
+ MediaTek MT6735 SoC.
+
+config COMMON_CLK_MT6735_IMGSYS
+ tristate "Clock driver for MediaTek MT6735 imgsys"
+ depends on COMMON_CLK_MT6735
+ help
+ This enables a driver for clocks provided by imgsys
+ on the MediaTek MT6735 SoC.
+
+config COMMON_CLK_MT6735_MFGCFG
+ tristate "Clock driver for MediaTek MT6735 mfgcfg"
+ depends on COMMON_CLK_MT6735
+ help
+ This enables a driver for clocks and resets provided
+ by mfgcfg on the MediaTek MT6735 SoC.
+
+config COMMON_CLK_MT6735_VDECSYS
+ tristate "Clock driver for MediaTek MT6735 vdecsys"
+ depends on COMMON_CLK_MT6735
+ help
+ This enables a driver for clocks and resets provided
+ by vdecsys on the MediaTek MT6735 SoC.
+
+config COMMON_CLK_MT6735_VENCSYS
+ tristate "Clock driver for MediaTek MT6735 vencsys"
+ depends on COMMON_CLK_MT6735
+ help
+ This enables a driver for clocks provided by vencsys
+ on the MediaTek MT6735 SoC.
+
config COMMON_CLK_MT6765
bool "Clock driver for MediaTek MT6765"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
@@ -887,13 +924,6 @@ config COMMON_CLK_MT8195_APUSYS
help
This driver supports MediaTek MT8195 AI Processor Unit System clocks.
-config COMMON_CLK_MT8195_AUDSYS
- tristate "Clock driver for MediaTek MT8195 audsys"
- depends on COMMON_CLK_MT8195
- default COMMON_CLK_MT8195
- help
- This driver supports MediaTek MT8195 audsys clocks.
-
config COMMON_CLK_MT8195_IMP_IIC_WRAP
tristate "Clock driver for MediaTek MT8195 imp_iic_wrap"
depends on COMMON_CLK_MT8195
@@ -908,14 +938,6 @@ config COMMON_CLK_MT8195_MFGCFG
help
This driver supports MediaTek MT8195 mfgcfg clocks.
-config COMMON_CLK_MT8195_MSDC
- tristate "Clock driver for MediaTek MT8195 msdc"
- depends on COMMON_CLK_MT8195
- default COMMON_CLK_MT8195
- help
- This driver supports MediaTek MT8195 MMC and SD Controller's
- msdc and msdc_top clocks.
-
config COMMON_CLK_MT8195_SCP_ADSP
tristate "Clock driver for MediaTek MT8195 scp_adsp"
depends on COMMON_CLK_MT8195
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index eeccfa039896..6efec95406bd 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -2,6 +2,11 @@
obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o reset.o clk-mux.o
obj-$(CONFIG_COMMON_CLK_MEDIATEK_FHCTL) += clk-fhctl.o clk-pllfh.o
+obj-$(CONFIG_COMMON_CLK_MT6735) += clk-mt6735-apmixedsys.o clk-mt6735-infracfg.o clk-mt6735-pericfg.o clk-mt6735-topckgen.o
+obj-$(CONFIG_COMMON_CLK_MT6735_IMGSYS) += clk-mt6735-imgsys.o
+obj-$(CONFIG_COMMON_CLK_MT6735_MFGCFG) += clk-mt6735-mfgcfg.o
+obj-$(CONFIG_COMMON_CLK_MT6735_VDECSYS) += clk-mt6735-vdecsys.o
+obj-$(CONFIG_COMMON_CLK_MT6735_VENCSYS) += clk-mt6735-vencsys.o
obj-$(CONFIG_COMMON_CLK_MT6765) += clk-mt6765.o
obj-$(CONFIG_COMMON_CLK_MT6765_AUDIOSYS) += clk-mt6765-audio.o
obj-$(CONFIG_COMMON_CLK_MT6765_CAMSYS) += clk-mt6765-cam.o
diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c
index 425c69cfb105..e103121cf58e 100644
--- a/drivers/clk/mediatek/clk-mt2701-aud.c
+++ b/drivers/clk/mediatek/clk-mt2701-aud.c
@@ -55,10 +55,16 @@ static const struct mtk_gate audio_clks[] = {
GATE_DUMMY(CLK_DUMMY, "aud_dummy"),
/* AUDIO0 */
GATE_AUDIO0(CLK_AUD_AFE, "audio_afe", "aud_intbus_sel", 2),
+ GATE_DUMMY(CLK_AUD_LRCK_DETECT, "audio_lrck_detect_dummy"),
+ GATE_DUMMY(CLK_AUD_I2S, "audio_i2c_dummy"),
+ GATE_DUMMY(CLK_AUD_APLL_TUNER, "audio_apll_tuner_dummy"),
GATE_AUDIO0(CLK_AUD_HDMI, "audio_hdmi", "audpll_sel", 20),
GATE_AUDIO0(CLK_AUD_SPDF, "audio_spdf", "audpll_sel", 21),
GATE_AUDIO0(CLK_AUD_SPDF2, "audio_spdf2", "audpll_sel", 22),
GATE_AUDIO0(CLK_AUD_APLL, "audio_apll", "audpll_sel", 23),
+ GATE_DUMMY(CLK_AUD_TML, "audio_tml_dummy"),
+ GATE_DUMMY(CLK_AUD_AHB_IDLE_EXT, "audio_ahb_idle_ext_dummy"),
+ GATE_DUMMY(CLK_AUD_AHB_IDLE_INT, "audio_ahb_idle_int_dummy"),
/* AUDIO1 */
GATE_AUDIO1(CLK_AUD_I2SIN1, "audio_i2sin1", "aud_mux1_sel", 0),
GATE_AUDIO1(CLK_AUD_I2SIN2, "audio_i2sin2", "aud_mux1_sel", 1),
@@ -76,10 +82,12 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO1(CLK_AUD_ASRCI2, "audio_asrci2", "asm_h_sel", 13),
GATE_AUDIO1(CLK_AUD_ASRCO1, "audio_asrco1", "asm_h_sel", 14),
GATE_AUDIO1(CLK_AUD_ASRCO2, "audio_asrco2", "asm_h_sel", 15),
+ GATE_DUMMY(CLK_AUD_HDMIRX, "audio_hdmirx_dummy"),
GATE_AUDIO1(CLK_AUD_INTDIR, "audio_intdir", "intdir_sel", 20),
GATE_AUDIO1(CLK_AUD_A1SYS, "audio_a1sys", "aud_mux1_sel", 21),
GATE_AUDIO1(CLK_AUD_A2SYS, "audio_a2sys", "aud_mux2_sel", 22),
GATE_AUDIO1(CLK_AUD_AFE_CONN, "audio_afe_conn", "aud_mux1_sel", 23),
+ GATE_DUMMY(CLK_AUD_AFE_PCMIF, "audio_afe_pcmif_dummy"),
GATE_AUDIO1(CLK_AUD_AFE_MRGIF, "audio_afe_mrgif", "aud_mux1_sel", 25),
/* AUDIO2 */
GATE_AUDIO2(CLK_AUD_MMIF_UL1, "audio_ul1", "aud_mux1_sel", 0),
@@ -100,6 +108,8 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO2(CLK_AUD_MMIF_AWB2, "audio_awb2", "aud_mux1_sel", 15),
GATE_AUDIO2(CLK_AUD_MMIF_DAI, "audio_dai", "aud_mux1_sel", 16),
/* AUDIO3 */
+ GATE_DUMMY(CLK_AUD_DMIC1, "audio_dmic1_dummy"),
+ GATE_DUMMY(CLK_AUD_DMIC2, "audio_dmic2_dummy"),
GATE_AUDIO3(CLK_AUD_ASRCI3, "audio_asrci3", "asm_h_sel", 2),
GATE_AUDIO3(CLK_AUD_ASRCI4, "audio_asrci4", "asm_h_sel", 3),
GATE_AUDIO3(CLK_AUD_ASRCI5, "audio_asrci5", "asm_h_sel", 4),
diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
index 5da3eabffd3e..f11c7a4fa37b 100644
--- a/drivers/clk/mediatek/clk-mt2701-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
@@ -31,6 +31,7 @@ static const struct mtk_gate_regs bdp1_cg_regs = {
GATE_MTK(_id, _name, _parent, &bdp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate bdp_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "bdp_dummy"),
GATE_BDP0(CLK_BDP_BRG_BA, "brg_baclk", "mm_sel", 0),
GATE_BDP0(CLK_BDP_BRG_DRAM, "brg_dram", "mm_sel", 1),
GATE_BDP0(CLK_BDP_LARB_DRAM, "larb_dram", "mm_sel", 2),
diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
index 875594bc9dcb..c158e54c4652 100644
--- a/drivers/clk/mediatek/clk-mt2701-img.c
+++ b/drivers/clk/mediatek/clk-mt2701-img.c
@@ -22,6 +22,7 @@ static const struct mtk_gate_regs img_cg_regs = {
GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate img_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "img_dummy"),
GATE_IMG(CLK_IMG_SMI_COMM, "img_smi_comm", "mm_sel", 0),
GATE_IMG(CLK_IMG_RESZ, "img_resz", "mm_sel", 1),
GATE_IMG(CLK_IMG_JPGDEC_SMI, "img_jpgdec_smi", "mm_sel", 5),
diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
index bc68fa718878..474d87d62e83 100644
--- a/drivers/clk/mediatek/clk-mt2701-mm.c
+++ b/drivers/clk/mediatek/clk-mt2701-mm.c
@@ -31,6 +31,7 @@ static const struct mtk_gate_regs disp1_cg_regs = {
GATE_MTK(_id, _name, _parent, &disp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
static const struct mtk_gate mm_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "mm_dummy"),
GATE_DISP0(CLK_MM_SMI_COMMON, "mm_smi_comm", "mm_sel", 0),
GATE_DISP0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
GATE_DISP0(CLK_MM_CMDQ, "mm_cmdq", "mm_sel", 2),
diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
index 94db86f8d0a4..5299d92f3aba 100644
--- a/drivers/clk/mediatek/clk-mt2701-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
@@ -31,6 +31,7 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
static const struct mtk_gate vdec_clks[] = {
+ GATE_DUMMY(CLK_DUMMY, "vdec_dummy"),
GATE_VDEC0(CLK_VDEC_CKGEN, "vdec_cken", "vdec_sel", 0),
GATE_VDEC1(CLK_VDEC_LARB, "vdec_larb_cken", "mm_sel", 0),
};
diff --git a/drivers/clk/mediatek/clk-mt6735-apmixedsys.c b/drivers/clk/mediatek/clk-mt6735-apmixedsys.c
new file mode 100644
index 000000000000..e0949911e8f7
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-apmixedsys.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-apmixedsys.h>
+
+#define AP_PLL_CON_5 0x014
+#define ARMPLL_CON0 0x200
+#define ARMPLL_CON1 0x204
+#define ARMPLL_PWR_CON0 0x20c
+#define MAINPLL_CON0 0x210
+#define MAINPLL_CON1 0x214
+#define MAINPLL_PWR_CON0 0x21c
+#define UNIVPLL_CON0 0x220
+#define UNIVPLL_CON1 0x224
+#define UNIVPLL_PWR_CON0 0x22c
+#define MMPLL_CON0 0x230
+#define MMPLL_CON1 0x234
+#define MMPLL_PWR_CON0 0x23c
+#define MSDCPLL_CON0 0x240
+#define MSDCPLL_CON1 0x244
+#define MSDCPLL_PWR_CON0 0x24c
+#define VENCPLL_CON0 0x250
+#define VENCPLL_CON1 0x254
+#define VENCPLL_PWR_CON0 0x25c
+#define TVDPLL_CON0 0x260
+#define TVDPLL_CON1 0x264
+#define TVDPLL_PWR_CON0 0x26c
+#define APLL1_CON0 0x270
+#define APLL1_CON1 0x274
+#define APLL1_CON2 0x278
+#define APLL1_PWR_CON0 0x280
+#define APLL2_CON0 0x284
+#define APLL2_CON1 0x288
+#define APLL2_CON2 0x28c
+#define APLL2_PWR_CON0 0x294
+
+#define CON0_RST_BAR BIT(24)
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _rst_bar_mask, \
+ _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcwbits, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = "clk26m", \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_chg_reg = _pcw_reg, \
+ .pcwbits = _pcwbits, \
+ .flags = _flags, \
+ }
+
+static const struct mtk_pll_data apmixedsys_plls[] = {
+ PLL(CLK_APMIXED_ARMPLL, "armpll", ARMPLL_CON0, ARMPLL_PWR_CON0, 0x00000001, 0, ARMPLL_CON1, 24, 0, 0, 0, ARMPLL_CON1, 21, PLL_AO),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", MAINPLL_CON0, MAINPLL_PWR_CON0, 0xf0000101, CON0_RST_BAR, MAINPLL_CON1, 24, 0, 0, 0, MAINPLL_CON1, 21, HAVE_RST_BAR),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", UNIVPLL_CON0, UNIVPLL_PWR_CON0, 0xfc000001, CON0_RST_BAR, UNIVPLL_CON1, 24, 0, 0, 0, UNIVPLL_CON1, 21, HAVE_RST_BAR),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", MMPLL_CON0, MMPLL_PWR_CON0, 0x00000001, 0, MMPLL_CON1, 24, 0, 0, 0, MMPLL_CON1, 21, 0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", MSDCPLL_CON0, MSDCPLL_PWR_CON0, 0x00000001, 0, MSDCPLL_CON1, 24, 0, 0, 0, MSDCPLL_CON1, 21, 0),
+ PLL(CLK_APMIXED_VENCPLL, "vencpll", VENCPLL_CON0, VENCPLL_PWR_CON0, 0x00000001, CON0_RST_BAR, VENCPLL_CON1, 24, 0, 0, 0, VENCPLL_CON1, 21, HAVE_RST_BAR),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", TVDPLL_CON0, TVDPLL_PWR_CON0, 0x00000001, 0, TVDPLL_CON1, 24, 0, 0, 0, TVDPLL_CON1, 21, 0),
+ PLL(CLK_APMIXED_APLL1, "apll1", APLL1_CON0, APLL1_PWR_CON0, 0x00000001, 0, APLL1_CON0, 4, APLL1_CON2, AP_PLL_CON_5, 0, APLL1_CON1, 31, 0),
+ PLL(CLK_APMIXED_APLL2, "apll2", APLL2_CON0, APLL2_PWR_CON0, 0x00000001, 0, APLL2_CON0, 4, APLL2_CON2, AP_PLL_CON_5, 1, APLL2_CON1, 31, 0)
+};
+
+static int clk_mt6735_apmixed_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct clk_hw_onecell_data *clk_data;
+ int ret;
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_devm_alloc_clk_data(&pdev->dev, ARRAY_SIZE(apmixedsys_plls));
+ if (!clk_data)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, clk_data);
+
+ ret = mtk_clk_register_plls(pdev->dev.of_node, apmixedsys_plls,
+ ARRAY_SIZE(apmixedsys_plls), clk_data);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register PLLs: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
+ clk_data);
+ if (ret)
+ dev_err(&pdev->dev,
+ "Failed to register clock provider: %d\n", ret);
+
+ return ret;
+}
+
+static void clk_mt6735_apmixed_remove(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ mtk_clk_unregister_plls(apmixedsys_plls, ARRAY_SIZE(apmixedsys_plls), clk_data);
+}
+
+static const struct of_device_id of_match_mt6735_apmixedsys[] = {
+ { .compatible = "mediatek,mt6735-apmixedsys" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_mt6735_apmixedsys);
+
+static struct platform_driver clk_mt6735_apmixedsys = {
+ .probe = clk_mt6735_apmixed_probe,
+ .remove = clk_mt6735_apmixed_remove,
+ .driver = {
+ .name = "clk-mt6735-apmixedsys",
+ .of_match_table = of_match_mt6735_apmixedsys,
+ },
+};
+module_platform_driver(clk_mt6735_apmixedsys);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("MediaTek MT6735 apmixedsys clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-imgsys.c b/drivers/clk/mediatek/clk-mt6735-imgsys.c
new file mode 100644
index 000000000000..c564f8f72432
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-imgsys.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-imgsys.h>
+
+#define IMG_CG_CON 0x00
+#define IMG_CG_SET 0x04
+#define IMG_CG_CLR 0x08
+
+static struct mtk_gate_regs imgsys_cg_regs = {
+ .set_ofs = IMG_CG_SET,
+ .clr_ofs = IMG_CG_CLR,
+ .sta_ofs = IMG_CG_CON,
+};
+
+static const struct mtk_gate imgsys_gates[] = {
+ GATE_MTK(CLK_IMG_SMI_LARB2, "smi_larb2", "mm_sel", &imgsys_cg_regs, 0, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_CAM_SMI, "cam_smi", "mm_sel", &imgsys_cg_regs, 5, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_CAM_CAM, "cam_cam", "mm_sel", &imgsys_cg_regs, 6, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_SEN_TG, "sen_tg", "mm_sel", &imgsys_cg_regs, 7, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_SEN_CAM, "sen_cam", "mm_sel", &imgsys_cg_regs, 8, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_CAM_SV, "cam_sv", "mm_sel", &imgsys_cg_regs, 9, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_SUFOD, "sufod", "mm_sel", &imgsys_cg_regs, 10, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_IMG_FD, "fd", "mm_sel", &imgsys_cg_regs, 11, &mtk_clk_gate_ops_setclr),
+};
+
+static const struct mtk_clk_desc imgsys_clks = {
+ .clks = imgsys_gates,
+ .num_clks = ARRAY_SIZE(imgsys_gates),
+};
+
+static const struct of_device_id of_match_mt6735_imgsys[] = {
+ { .compatible = "mediatek,mt6735-imgsys", .data = &imgsys_clks },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_mt6735_imgsys = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-imgsys",
+ .of_match_table = of_match_mt6735_imgsys,
+ },
+};
+module_platform_driver(clk_mt6735_imgsys);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("MediaTek MT6735 imgsys clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-infracfg.c b/drivers/clk/mediatek/clk-mt6735-infracfg.c
new file mode 100644
index 000000000000..c1171f903cfa
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-infracfg.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-infracfg.h>
+#include <dt-bindings/reset/mediatek,mt6735-infracfg.h>
+
+#define INFRA_RST0 0x30
+#define INFRA_GLOBALCON_PDN0 0x40
+#define INFRA_PDN1 0x44
+#define INFRA_PDN_STA 0x48
+
+#define RST_NR_PER_BANK 32
+
+static struct mtk_gate_regs infra_cg_regs = {
+ .set_ofs = INFRA_GLOBALCON_PDN0,
+ .clr_ofs = INFRA_PDN1,
+ .sta_ofs = INFRA_PDN_STA,
+};
+
+static const struct mtk_gate infracfg_gates[] = {
+ GATE_MTK(CLK_INFRA_DBG, "dbg", "axi_sel", &infra_cg_regs, 0, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_GCE, "gce", "axi_sel", &infra_cg_regs, 1, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_TRBG, "trbg", "axi_sel", &infra_cg_regs, 2, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_CPUM, "cpum", "axi_sel", &infra_cg_regs, 3, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_DEVAPC, "devapc", "axi_sel", &infra_cg_regs, 4, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_AUDIO, "audio", "aud_intbus_sel", &infra_cg_regs, 5, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_GCPU, "gcpu", "axi_sel", &infra_cg_regs, 6, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_L2C_SRAM, "l2csram", "axi_sel", &infra_cg_regs, 7, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_M4U, "m4u", "axi_sel", &infra_cg_regs, 8, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_CLDMA, "cldma", "axi_sel", &infra_cg_regs, 12, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_CONNMCU_BUS, "connmcu_bus", "axi_sel", &infra_cg_regs, 15, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_KP, "kp", "axi_sel", &infra_cg_regs, 16, &mtk_clk_gate_ops_setclr),
+ GATE_MTK_FLAGS(CLK_INFRA_APXGPT, "apxgpt", "axi_sel", &infra_cg_regs, 18, &mtk_clk_gate_ops_setclr, CLK_IS_CRITICAL),
+ GATE_MTK(CLK_INFRA_SEJ, "sej", "axi_sel", &infra_cg_regs, 19, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_CCIF0_AP, "ccif0ap", "axi_sel", &infra_cg_regs, 20, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_CCIF1_AP, "ccif1ap", "axi_sel", &infra_cg_regs, 21, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_PMIC_SPI, "pmicspi", "pmicspi_sel", &infra_cg_regs, 22, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_INFRA_PMIC_WRAP, "pmicwrap", "axi_sel", &infra_cg_regs, 23, &mtk_clk_gate_ops_setclr)
+};
+
+static u16 infracfg_rst_bank_ofs[] = { INFRA_RST0 };
+
+static u16 infracfg_rst_idx_map[] = {
+ [MT6735_INFRA_RST0_EMI_REG] = 0 * RST_NR_PER_BANK + 0,
+ [MT6735_INFRA_RST0_DRAMC0_AO] = 0 * RST_NR_PER_BANK + 1,
+ [MT6735_INFRA_RST0_AP_CIRQ_EINT] = 0 * RST_NR_PER_BANK + 3,
+ [MT6735_INFRA_RST0_APXGPT] = 0 * RST_NR_PER_BANK + 4,
+ [MT6735_INFRA_RST0_SCPSYS] = 0 * RST_NR_PER_BANK + 5,
+ [MT6735_INFRA_RST0_KP] = 0 * RST_NR_PER_BANK + 6,
+ [MT6735_INFRA_RST0_PMIC_WRAP] = 0 * RST_NR_PER_BANK + 7,
+ [MT6735_INFRA_RST0_CLDMA_AO_TOP] = 0 * RST_NR_PER_BANK + 8,
+ [MT6735_INFRA_RST0_USBSIF_TOP] = 0 * RST_NR_PER_BANK + 9,
+ [MT6735_INFRA_RST0_EMI] = 0 * RST_NR_PER_BANK + 16,
+ [MT6735_INFRA_RST0_CCIF] = 0 * RST_NR_PER_BANK + 17,
+ [MT6735_INFRA_RST0_DRAMC0] = 0 * RST_NR_PER_BANK + 18,
+ [MT6735_INFRA_RST0_EMI_AO_REG] = 0 * RST_NR_PER_BANK + 19,
+ [MT6735_INFRA_RST0_CCIF_AO] = 0 * RST_NR_PER_BANK + 20,
+ [MT6735_INFRA_RST0_TRNG] = 0 * RST_NR_PER_BANK + 21,
+ [MT6735_INFRA_RST0_SYS_CIRQ] = 0 * RST_NR_PER_BANK + 22,
+ [MT6735_INFRA_RST0_GCE] = 0 * RST_NR_PER_BANK + 23,
+ [MT6735_INFRA_RST0_M4U] = 0 * RST_NR_PER_BANK + 24,
+ [MT6735_INFRA_RST0_CCIF1] = 0 * RST_NR_PER_BANK + 25,
+ [MT6735_INFRA_RST0_CLDMA_TOP_PD] = 0 * RST_NR_PER_BANK + 26
+};
+
+static const struct mtk_clk_rst_desc infracfg_resets = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = infracfg_rst_bank_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infracfg_rst_bank_ofs),
+ .rst_idx_map = infracfg_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(infracfg_rst_idx_map)
+};
+
+static const struct mtk_clk_desc infracfg_clks = {
+ .clks = infracfg_gates,
+ .num_clks = ARRAY_SIZE(infracfg_gates),
+
+ .rst_desc = &infracfg_resets
+};
+
+static const struct of_device_id of_match_mt6735_infracfg[] = {
+ { .compatible = "mediatek,mt6735-infracfg", .data = &infracfg_clks },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_mt6735_infracfg);
+
+static struct platform_driver clk_mt6735_infracfg = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-infracfg",
+ .of_match_table = of_match_mt6735_infracfg,
+ },
+};
+module_platform_driver(clk_mt6735_infracfg);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("MediaTek MT6735 infracfg clock and reset driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-mfgcfg.c b/drivers/clk/mediatek/clk-mt6735-mfgcfg.c
new file mode 100644
index 000000000000..1f5aedddf209
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-mfgcfg.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-mfgcfg.h>
+
+#define MFG_CG_CON 0x00
+#define MFG_CG_SET 0x04
+#define MFG_CG_CLR 0x08
+#define MFG_RESET 0x0c
+
+static struct mtk_gate_regs mfgcfg_cg_regs = {
+ .set_ofs = MFG_CG_SET,
+ .clr_ofs = MFG_CG_CLR,
+ .sta_ofs = MFG_CG_CON,
+};
+
+static const struct mtk_gate mfgcfg_gates[] = {
+ GATE_MTK(CLK_MFG_BG3D, "bg3d", "mfg_sel", &mfgcfg_cg_regs, 0, &mtk_clk_gate_ops_setclr),
+};
+
+static u16 mfgcfg_rst_ofs[] = { MFG_RESET };
+
+static const struct mtk_clk_rst_desc mfgcfg_resets = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = mfgcfg_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(mfgcfg_rst_ofs)
+};
+
+static const struct mtk_clk_desc mfgcfg_clks = {
+ .clks = mfgcfg_gates,
+ .num_clks = ARRAY_SIZE(mfgcfg_gates),
+
+ .rst_desc = &mfgcfg_resets
+};
+
+static const struct of_device_id of_match_mt6735_mfgcfg[] = {
+ { .compatible = "mediatek,mt6735-mfgcfg", .data = &mfgcfg_clks },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_mt6735_mfgcfg = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-mfgcfg",
+ .of_match_table = of_match_mt6735_mfgcfg,
+ },
+};
+module_platform_driver(clk_mt6735_mfgcfg);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("Mediatek MT6735 mfgcfg clock and reset driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-pericfg.c b/drivers/clk/mediatek/clk-mt6735-pericfg.c
new file mode 100644
index 000000000000..cbdf6d25c1b2
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-pericfg.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-pericfg.h>
+#include <dt-bindings/reset/mediatek,mt6735-pericfg.h>
+
+#define PERI_GLOBALCON_RST0 0x00
+#define PERI_GLOBALCON_RST1 0x04
+#define PERI_GLOBALCON_PDN0_SET 0x08
+#define PERI_GLOBALCON_PDN0_CLR 0x10
+#define PERI_GLOBALCON_PDN0_STA 0x18
+
+#define RST_NR_PER_BANK 32
+
+static struct mtk_gate_regs peri_cg_regs = {
+ .set_ofs = PERI_GLOBALCON_PDN0_SET,
+ .clr_ofs = PERI_GLOBALCON_PDN0_CLR,
+ .sta_ofs = PERI_GLOBALCON_PDN0_STA,
+};
+
+static const struct mtk_gate pericfg_gates[] = {
+ GATE_MTK(CLK_PERI_DISP_PWM, "disp_pwm", "disppwm_sel", &peri_cg_regs, 0, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_THERM, "therm", "axi_sel", &peri_cg_regs, 1, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM1, "pwm1", "axi_sel", &peri_cg_regs, 2, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM2, "pwm2", "axi_sel", &peri_cg_regs, 3, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM3, "pwm3", "axi_sel", &peri_cg_regs, 4, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM4, "pwm4", "axi_sel", &peri_cg_regs, 5, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM5, "pwm5", "axi_sel", &peri_cg_regs, 6, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM6, "pwm6", "axi_sel", &peri_cg_regs, 7, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM7, "pwm7", "axi_sel", &peri_cg_regs, 8, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_PWM, "pwm", "axi_sel", &peri_cg_regs, 9, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_USB0, "usb0", "usb20_sel", &peri_cg_regs, 10, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_IRDA, "irda", "irda_sel", &peri_cg_regs, 11, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_APDMA, "apdma", "axi_sel", &peri_cg_regs, 12, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_MSDC30_0, "msdc30_0", "msdc30_0_sel", &peri_cg_regs, 13, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_MSDC30_1, "msdc30_1", "msdc30_1_sel", &peri_cg_regs, 14, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_MSDC30_2, "msdc30_2", "msdc30_2_sel", &peri_cg_regs, 15, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_MSDC30_3, "msdc30_3", "msdc30_3_sel", &peri_cg_regs, 16, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_UART0, "uart0", "uart_sel", &peri_cg_regs, 17, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_UART1, "uart1", "uart_sel", &peri_cg_regs, 18, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_UART2, "uart2", "uart_sel", &peri_cg_regs, 19, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_UART3, "uart3", "uart_sel", &peri_cg_regs, 20, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_UART4, "uart4", "uart_sel", &peri_cg_regs, 21, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_BTIF, "btif", "axi_sel", &peri_cg_regs, 22, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_I2C0, "i2c0", "axi_sel", &peri_cg_regs, 23, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_I2C1, "i2c1", "axi_sel", &peri_cg_regs, 24, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_I2C2, "i2c2", "axi_sel", &peri_cg_regs, 25, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_I2C3, "i2c3", "axi_sel", &peri_cg_regs, 26, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_AUXADC, "auxadc", "axi_sel", &peri_cg_regs, 27, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_SPI0, "spi0", "spi_sel", &peri_cg_regs, 28, &mtk_clk_gate_ops_setclr),
+ GATE_MTK(CLK_PERI_IRTX, "irtx", "irtx_sel", &peri_cg_regs, 29, &mtk_clk_gate_ops_setclr)
+};
+
+static u16 pericfg_rst_bank_ofs[] = { PERI_GLOBALCON_RST0, PERI_GLOBALCON_RST1 };
+
+static u16 pericfg_rst_idx_map[] = {
+ [MT6735_PERI_RST0_UART0] = 0 * RST_NR_PER_BANK + 0,
+ [MT6735_PERI_RST0_UART1] = 0 * RST_NR_PER_BANK + 1,
+ [MT6735_PERI_RST0_UART2] = 0 * RST_NR_PER_BANK + 2,
+ [MT6735_PERI_RST0_UART3] = 0 * RST_NR_PER_BANK + 3,
+ [MT6735_PERI_RST0_UART4] = 0 * RST_NR_PER_BANK + 4,
+ [MT6735_PERI_RST0_BTIF] = 0 * RST_NR_PER_BANK + 6,
+ [MT6735_PERI_RST0_DISP_PWM_PERI] = 0 * RST_NR_PER_BANK + 7,
+ [MT6735_PERI_RST0_PWM] = 0 * RST_NR_PER_BANK + 8,
+ [MT6735_PERI_RST0_AUXADC] = 0 * RST_NR_PER_BANK + 10,
+ [MT6735_PERI_RST0_DMA] = 0 * RST_NR_PER_BANK + 11,
+ [MT6735_PERI_RST0_IRDA] = 0 * RST_NR_PER_BANK + 12,
+ [MT6735_PERI_RST0_IRTX] = 0 * RST_NR_PER_BANK + 13,
+ [MT6735_PERI_RST0_THERM] = 0 * RST_NR_PER_BANK + 16,
+ [MT6735_PERI_RST0_MSDC2] = 0 * RST_NR_PER_BANK + 17,
+ [MT6735_PERI_RST0_MSDC3] = 0 * RST_NR_PER_BANK + 18,
+ [MT6735_PERI_RST0_MSDC0] = 0 * RST_NR_PER_BANK + 19,
+ [MT6735_PERI_RST0_MSDC1] = 0 * RST_NR_PER_BANK + 20,
+ [MT6735_PERI_RST0_I2C0] = 0 * RST_NR_PER_BANK + 22,
+ [MT6735_PERI_RST0_I2C1] = 0 * RST_NR_PER_BANK + 23,
+ [MT6735_PERI_RST0_I2C2] = 0 * RST_NR_PER_BANK + 24,
+ [MT6735_PERI_RST0_I2C3] = 0 * RST_NR_PER_BANK + 25,
+ [MT6735_PERI_RST0_USB] = 0 * RST_NR_PER_BANK + 28,
+
+ [MT6735_PERI_RST1_SPI0] = 1 * RST_NR_PER_BANK + 1,
+};
+
+static const struct mtk_clk_rst_desc pericfg_resets = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = pericfg_rst_bank_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pericfg_rst_bank_ofs),
+ .rst_idx_map = pericfg_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(pericfg_rst_idx_map)
+};
+
+static const struct mtk_clk_desc pericfg_clks = {
+ .clks = pericfg_gates,
+ .num_clks = ARRAY_SIZE(pericfg_gates),
+
+ .rst_desc = &pericfg_resets
+};
+
+static const struct of_device_id of_match_mt6735_pericfg[] = {
+ { .compatible = "mediatek,mt6735-pericfg", .data = &pericfg_clks },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_mt6735_pericfg);
+
+static struct platform_driver clk_mt6735_pericfg = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-pericfg",
+ .of_match_table = of_match_mt6735_pericfg,
+ },
+};
+module_platform_driver(clk_mt6735_pericfg);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("MediaTek MT6735 pericfg clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-topckgen.c b/drivers/clk/mediatek/clk-mt6735-topckgen.c
new file mode 100644
index 000000000000..2589ebfe2271
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-topckgen.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-topckgen.h>
+
+#define CLK_CFG_0 0x40
+#define CLK_CFG_0_SET 0x44
+#define CLK_CFG_0_CLR 0x48
+#define CLK_CFG_1 0x50
+#define CLK_CFG_1_SET 0x54
+#define CLK_CFG_1_CLR 0x58
+#define CLK_CFG_2 0x60
+#define CLK_CFG_2_SET 0x64
+#define CLK_CFG_2_CLR 0x68
+#define CLK_CFG_3 0x70
+#define CLK_CFG_3_SET 0x74
+#define CLK_CFG_3_CLR 0x78
+#define CLK_CFG_4 0x80
+#define CLK_CFG_4_SET 0x84
+#define CLK_CFG_4_CLR 0x88
+#define CLK_CFG_5 0x90
+#define CLK_CFG_5_SET 0x94
+#define CLK_CFG_5_CLR 0x98
+#define CLK_CFG_6 0xa0
+#define CLK_CFG_6_SET 0xa4
+#define CLK_CFG_6_CLR 0xa8
+#define CLK_CFG_7 0xb0
+#define CLK_CFG_7_SET 0xb4
+#define CLK_CFG_7_CLR 0xb8
+
+static DEFINE_SPINLOCK(mt6735_topckgen_lock);
+
+/* Some clocks with unknown details are modeled as fixed clocks */
+static const struct mtk_fixed_clk topckgen_fixed_clks[] = {
+ /*
+ * This clock is available as a parent option for multiple
+ * muxes and seems like an alternative name for clk26m at first,
+ * but it appears alongside it in several muxes which should
+ * mean it is a separate clock.
+ */
+ FIXED_CLK(CLK_TOP_AD_SYS_26M_CK, "ad_sys_26m_ck", "clk26m", 26 * MHZ),
+ /*
+ * This clock is the parent of DMPLL divisors. It might be MEMPLL
+ * or its parent, as DMPLL appears to be an alternative name for
+ * MEMPLL.
+ */
+ FIXED_CLK(CLK_TOP_CLKPH_MCK_O, "clkph_mck_o", NULL, 0),
+ /*
+ * DMPLL clock (dmpll_ck), controlled by DDRPHY.
+ */
+ FIXED_CLK(CLK_TOP_DMPLL, "dmpll", "clkph_mck_o", 0),
+ /*
+ * MIPI DPI clock. Parent option for dpi0_sel. Unknown parent.
+ */
+ FIXED_CLK(CLK_TOP_DPI_CK, "dpi_ck", NULL, 0),
+ /*
+ * This clock is a child of WHPLL which is controlled by
+ * the modem.
+ */
+ FIXED_CLK(CLK_TOP_WHPLL_AUDIO_CK, "whpll_audio_ck", NULL, 0)
+};
+
+static const struct mtk_fixed_factor topckgen_factors[] = {
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "mainpll", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "mainpll", 1, 16),
+ FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univpll", 1, 26),
+ FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1, 4),
+ FACTOR(CLK_TOP_MSDCPLL_D8, "msdcpll_d8", "msdcpll", 1, 8),
+ FACTOR(CLK_TOP_MSDCPLL_D16, "msdcpll_d16", "msdcpll", 1, 16),
+ FACTOR(CLK_TOP_VENCPLL_D3, "vencpll_d3", "vencpll", 1, 3),
+ FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll", 1, 4),
+ FACTOR(CLK_TOP_DMPLL_D2, "dmpll_d2", "clkph_mck_o", 1, 2),
+ FACTOR(CLK_TOP_DMPLL_D4, "dmpll_d4", "clkph_mck_o", 1, 4),
+ FACTOR(CLK_TOP_DMPLL_D8, "dmpll_d8", "clkph_mck_o", 1, 8),
+ FACTOR(CLK_TOP_AD_SYS_26M_D2, "ad_sys_26m_d2", "clk26m", 1, 2)
+};
+
+static const char * const axi_sel_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll2_d2",
+ "dmpll",
+ "dmpll_d2"
+};
+
+static const char * const mem_sel_parents[] = {
+ "clk26m",
+ "dmpll"
+};
+
+static const char * const ddrphycfg_parents[] = {
+ "clk26m",
+ "syspll1_d8"
+};
+
+static const char * const mm_sel_parents[] = {
+ "clk26m",
+ "vencpll",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll2_d2",
+ "dmpll"
+};
+
+static const char * const pwm_sel_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "univpll3_d2",
+ "univpll1_d4"
+};
+
+static const char * const vdec_sel_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "syspll_d2",
+ "syspll2_d2",
+ "msdcpll_d2"
+};
+
+static const char * const mfg_sel_parents[] = {
+ "clk26m",
+ "mmpll",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "syspll_d3",
+ "syspll1_d2",
+ "syspll_d5",
+ "univpll_d3",
+ "univpll1_d2"
+};
+
+static const char * const camtg_sel_parents[] = {
+ "clk26m",
+ "univpll_d26",
+ "univpll2_d2",
+ "syspll3_d2",
+ "syspll3_d4",
+ "msdcpll_d4"
+};
+
+static const char * const uart_sel_parents[] = {
+ "clk26m",
+ "univpll2_d8"
+};
+
+static const char * const spi_sel_parents[] = {
+ "clk26m",
+ "syspll3_d2",
+ "msdcpll_d8",
+ "syspll2_d4",
+ "syspll4_d2",
+ "univpll2_d4",
+ "univpll1_d8"
+};
+
+static const char * const usb20_sel_parents[] = {
+ "clk26m",
+ "univpll1_d8",
+ "univpll3_d4"
+};
+
+static const char * const msdc50_0_sel_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll2_d2",
+ "syspll4_d2",
+ "univpll_d5",
+ "univpll1_d4"
+};
+
+static const char * const msdc30_0_sel_parents[] = {
+ "clk26m",
+ "msdcpll",
+ "msdcpll_d2",
+ "msdcpll_d4",
+ "syspll2_d2",
+ "syspll1_d4",
+ "univpll1_d4",
+ "univpll_d3",
+ "univpll_d26",
+ "syspll2_d4",
+ "univpll_d2"
+};
+
+static const char * const msdc30_1_2_sel_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "msdcpll_d4",
+ "syspll2_d2",
+ "syspll1_d4",
+ "univpll1_d4",
+ "univpll_d26",
+ "syspll2_d4"
+};
+
+static const char * const msdc30_3_sel_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "msdcpll_d4",
+ "syspll2_d2",
+ "syspll1_d4",
+ "univpll1_d4",
+ "univpll_d26",
+ "msdcpll_d16",
+ "syspll2_d4"
+};
+
+static const char * const audio_sel_parents[] = {
+ "clk26m",
+ "syspll3_d4",
+ "syspll4_d4",
+ "syspll1_d16"
+};
+
+static const char * const aud_intbus_sel_parents[] = {
+ "clk26m",
+ "syspll1_d4",
+ "syspll4_d2",
+ "dmpll_d4"
+};
+
+static const char * const pmicspi_sel_parents[] = {
+ "clk26m",
+ "syspll1_d8",
+ "syspll3_d4",
+ "syspll1_d16",
+ "univpll3_d4",
+ "univpll_d26",
+ "dmpll_d4",
+ "dmpll_d8"
+};
+
+static const char * const scp_sel_parents[] = {
+ "clk26m",
+ "syspll1_d8",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const char * const atb_sel_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d5",
+ "dmpll"
+};
+
+static const char * const dpi0_sel_parents[] = {
+ "clk26m",
+ "tvdpll",
+ "tvdpll_d2",
+ "tvdpll_d4",
+ "dpi_ck"
+};
+
+static const char * const scam_sel_parents[] = {
+ "clk26m",
+ "syspll3_d2",
+ "univpll2_d4",
+ "vencpll_d3"
+};
+
+static const char * const mfg13m_sel_parents[] = {
+ "clk26m",
+ "ad_sys_26m_d2"
+};
+
+static const char * const aud_1_2_sel_parents[] = {
+ "clk26m",
+ "apll1"
+};
+
+static const char * const irda_sel_parents[] = {
+ "clk26m",
+ "univpll2_d4"
+};
+
+static const char * const irtx_sel_parents[] = {
+ "clk26m",
+ "ad_sys_26m_ck"
+};
+
+static const char * const disppwm_sel_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "syspll4_d2_d8",
+ "ad_sys_26m_ck"
+};
+
+static const struct mtk_mux topckgen_muxes[] = {
+ MUX_CLR_SET_UPD(CLK_TOP_AXI_SEL, "axi_sel", axi_sel_parents, CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR, 0, 3, 0, 0),
+ MUX_CLR_SET_UPD(CLK_TOP_MEM_SEL, "mem_sel", mem_sel_parents, CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR, 8, 1, 0, 0),
+ MUX_CLR_SET_UPD(CLK_TOP_DDRPHY_SEL, "ddrphycfg_sel", ddrphycfg_parents, CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR, 16, 1, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MM_SEL, "mm_sel", mm_sel_parents, CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR, 24, 3, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM_SEL, "pwm_sel", pwm_sel_parents, CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR, 0, 2, 7, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_VDEC_SEL, "vdec_sel", vdec_sel_parents, CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR, 8, 3, 15, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG_SEL, "mfg_sel", mfg_sel_parents, CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR, 16, 4, 23, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG_SEL, "camtg_sel", camtg_sel_parents, CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR, 24, 3, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UART_SEL, "uart_sel", uart_sel_parents, CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 0, 1, 7, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI_SEL, "spi_sel", spi_sel_parents, CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 8, 3, 15, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB20_SEL, "usb20_sel", usb20_sel_parents, CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 16, 2, 23, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel", msdc50_0_sel_parents, CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 24, 3, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_0_SEL, "msdc30_0_sel", msdc30_0_sel_parents, CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR, 0, 4, 7, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_1_2_sel_parents, CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR, 8, 3, 15, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel", msdc30_1_2_sel_parents, CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR, 16, 3, 23, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel", msdc30_3_sel_parents, CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR, 24, 4, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_SEL, "audio_sel", audio_sel_parents, CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR, 0, 2, 7, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDINTBUS_SEL, "aud_intbus_sel", aud_intbus_sel_parents, CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR, 8, 2, 15, 0, 0),
+ MUX_CLR_SET_UPD(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_sel_parents, CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR, 16, 3, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SCP_SEL, "scp_sel", scp_sel_parents, CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR, 24, 2, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ATB_SEL, "atb_sel", atb_sel_parents, CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 0, 2, 7, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_sel_parents, CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 8, 3, 15, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SCAM_SEL, "scam_sel", scam_sel_parents, CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 16, 2, 23, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG13M_SEL, "mfg13m_sel", mfg13m_sel_parents, CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 24, 1, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD1_SEL, "aud_1_sel", aud_1_2_sel_parents, CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 0, 1, 7, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD2_SEL, "aud_2_sel", aud_1_2_sel_parents, CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 8, 1, 15, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IRDA_SEL, "irda_sel", irda_sel_parents, CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 16, 1, 23, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IRTX_SEL, "irtx_sel", irtx_sel_parents, CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 24, 1, 31, 0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DISPPWM_SEL, "disppwm_sel", disppwm_sel_parents, CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR, 0, 2, 7, 0, 0),
+};
+
+static const struct mtk_clk_desc topckgen_desc = {
+ .fixed_clks = topckgen_fixed_clks,
+ .num_fixed_clks = ARRAY_SIZE(topckgen_fixed_clks),
+ .factor_clks = topckgen_factors,
+ .num_factor_clks = ARRAY_SIZE(topckgen_factors),
+ .mux_clks = topckgen_muxes,
+ .num_mux_clks = ARRAY_SIZE(topckgen_muxes),
+ .clk_lock = &mt6735_topckgen_lock,
+};
+
+static const struct of_device_id of_match_mt6735_topckgen[] = {
+ { .compatible = "mediatek,mt6735-topckgen", .data = &topckgen_desc},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_mt6735_topckgen);
+
+static struct platform_driver clk_mt6735_topckgen = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-topckgen",
+ .of_match_table = of_match_mt6735_topckgen,
+ },
+};
+module_platform_driver(clk_mt6735_topckgen);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("MediaTek MT6735 topckgen clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-vdecsys.c b/drivers/clk/mediatek/clk-mt6735-vdecsys.c
new file mode 100644
index 000000000000..8817085fc1db
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-vdecsys.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-vdecsys.h>
+#include <dt-bindings/reset/mediatek,mt6735-vdecsys.h>
+
+#define VDEC_CKEN_SET 0x00
+#define VDEC_CKEN_CLR 0x04
+#define SMI_LARB1_CKEN_SET 0x08
+#define SMI_LARB1_CKEN_CLR 0x0c
+#define VDEC_RESETB_CON 0x10
+#define SMI_LARB1_RESETB_CON 0x14
+
+#define RST_NR_PER_BANK 32
+
+static struct mtk_gate_regs vdec_cg_regs = {
+ .set_ofs = VDEC_CKEN_SET,
+ .clr_ofs = VDEC_CKEN_CLR,
+ .sta_ofs = VDEC_CKEN_SET,
+};
+
+static struct mtk_gate_regs smi_larb1_cg_regs = {
+ .set_ofs = SMI_LARB1_CKEN_SET,
+ .clr_ofs = SMI_LARB1_CKEN_CLR,
+ .sta_ofs = SMI_LARB1_CKEN_SET,
+};
+
+static const struct mtk_gate vdecsys_gates[] = {
+ GATE_MTK(CLK_VDEC_VDEC, "vdec", "vdec_sel", &vdec_cg_regs, 0, &mtk_clk_gate_ops_setclr_inv),
+ GATE_MTK(CLK_VDEC_SMI_LARB1, "smi_larb1", "vdec_sel", &smi_larb1_cg_regs, 0, &mtk_clk_gate_ops_setclr_inv),
+};
+
+static u16 vdecsys_rst_bank_ofs[] = { VDEC_RESETB_CON, SMI_LARB1_RESETB_CON };
+
+static u16 vdecsys_rst_idx_map[] = {
+ [MT6735_VDEC_RST0_VDEC] = 0 * RST_NR_PER_BANK + 0,
+ [MT6735_VDEC_RST1_SMI_LARB1] = 1 * RST_NR_PER_BANK + 0,
+};
+
+static const struct mtk_clk_rst_desc vdecsys_resets = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = vdecsys_rst_bank_ofs,
+ .rst_bank_nr = ARRAY_SIZE(vdecsys_rst_bank_ofs),
+ .rst_idx_map = vdecsys_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(vdecsys_rst_idx_map)
+};
+
+static const struct mtk_clk_desc vdecsys_clks = {
+ .clks = vdecsys_gates,
+ .num_clks = ARRAY_SIZE(vdecsys_gates),
+ .rst_desc = &vdecsys_resets
+};
+
+static const struct of_device_id of_match_mt6735_vdecsys[] = {
+ { .compatible = "mediatek,mt6735-vdecsys", .data = &vdecsys_clks },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_mt6735_vdecsys = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-vdecsys",
+ .of_match_table = of_match_mt6735_vdecsys,
+ },
+};
+module_platform_driver(clk_mt6735_vdecsys);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("MediaTek MT6735 vdecsys clock and reset driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6735-vencsys.c b/drivers/clk/mediatek/clk-mt6735-vencsys.c
new file mode 100644
index 000000000000..8dec7f98492a
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6735-vencsys.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mediatek,mt6735-vencsys.h>
+
+#define VENC_CG_CON 0x00
+#define VENC_CG_SET 0x04
+#define VENC_CG_CLR 0x08
+
+static struct mtk_gate_regs venc_cg_regs = {
+ .set_ofs = VENC_CG_SET,
+ .clr_ofs = VENC_CG_CLR,
+ .sta_ofs = VENC_CG_CON,
+};
+
+static const struct mtk_gate vencsys_gates[] = {
+ GATE_MTK(CLK_VENC_SMI_LARB3, "smi_larb3", "mm_sel", &venc_cg_regs, 0, &mtk_clk_gate_ops_setclr_inv),
+ GATE_MTK(CLK_VENC_VENC, "venc", "mm_sel", &venc_cg_regs, 4, &mtk_clk_gate_ops_setclr_inv),
+ GATE_MTK(CLK_VENC_JPGENC, "jpgenc", "mm_sel", &venc_cg_regs, 8, &mtk_clk_gate_ops_setclr_inv),
+ GATE_MTK(CLK_VENC_JPGDEC, "jpgdec", "mm_sel", &venc_cg_regs, 12, &mtk_clk_gate_ops_setclr_inv),
+};
+
+static const struct mtk_clk_desc vencsys_clks = {
+ .clks = vencsys_gates,
+ .num_clks = ARRAY_SIZE(vencsys_gates),
+};
+
+static const struct of_device_id of_match_mt6735_vencsys[] = {
+ { .compatible = "mediatek,mt6735-vencsys", .data = &vencsys_clks },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_mt6735_vencsys = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6735-vencsys",
+ .of_match_table = of_match_mt6735_vencsys,
+ },
+};
+module_platform_driver(clk_mt6735_vencsys);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("Mediatek MT6735 vencsys clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8188-topckgen.c b/drivers/clk/mediatek/clk-mt8188-topckgen.c
index c4baf4076ed6..6b07abe9a8f5 100644
--- a/drivers/clk/mediatek/clk-mt8188-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8188-topckgen.c
@@ -342,11 +342,14 @@ static const char * const dsp7_parents[] = {
"univpll_d3"
};
+/*
+ * MFG can be also parented to "univpll_d6" and "univpll_d7":
+ * these have been removed from the parents list to let us
+ * achieve GPU DVFS without any special clock handlers.
+ */
static const char * const mfg_core_tmp_parents[] = {
"clk26m",
- "mainpll_d5_d2",
- "univpll_d6",
- "univpll_d7"
+ "mainpll_d5_d2"
};
static const char * const camtg_parents[] = {
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 78f648c9c97d..be2e3a5f8336 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -106,6 +106,7 @@ config COMMON_CLK_AXG_AUDIO
select COMMON_CLK_MESON_SCLK_DIV
select COMMON_CLK_MESON_CLKC_UTILS
select REGMAP_MMIO
+ select RESET_CONTROLLER
help
Support for the audio clock controller on AmLogic A113D devices,
aka axg, Say Y if you want audio subsystem to work.
diff --git a/drivers/clk/meson/a1-peripherals.c b/drivers/clk/meson/a1-peripherals.c
index 7aa6abb2eb1f..36489e0f948a 100644
--- a/drivers/clk/meson/a1-peripherals.c
+++ b/drivers/clk/meson/a1-peripherals.c
@@ -2246,4 +2246,4 @@ MODULE_DESCRIPTION("Amlogic A1 Peripherals Clock Controller driver");
MODULE_AUTHOR("Jian Hu <jian.hu@amlogic.com>");
MODULE_AUTHOR("Dmitry Rokosov <ddrokosov@sberdevices.ru>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/a1-pll.c b/drivers/clk/meson/a1-pll.c
index 8e5a42d1afbb..8d7c7b4493c4 100644
--- a/drivers/clk/meson/a1-pll.c
+++ b/drivers/clk/meson/a1-pll.c
@@ -360,4 +360,4 @@ MODULE_DESCRIPTION("Amlogic S4 PLL Clock Controller driver");
MODULE_AUTHOR("Jian Hu <jian.hu@amlogic.com>");
MODULE_AUTHOR("Dmitry Rokosov <ddrokosov@sberdevices.ru>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c
index 1dabc81535a6..f44091ffb57d 100644
--- a/drivers/clk/meson/axg-aoclk.c
+++ b/drivers/clk/meson/axg-aoclk.c
@@ -342,4 +342,4 @@ module_platform_driver(axg_aoclkc_driver);
MODULE_DESCRIPTION("Amlogic AXG Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index beda86349389..9df627b142f8 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -1912,4 +1912,4 @@ module_platform_driver(axg_audio_driver);
MODULE_DESCRIPTION("Amlogic AXG/G12A/SM1 Audio Clock driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 757c7a28c53d..448eece246ca 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -23,8 +23,6 @@
#include <dt-bindings/clock/axg-clkc.h>
-static DEFINE_SPINLOCK(meson_clk_lock);
-
static struct clk_regmap axg_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
@@ -506,7 +504,6 @@ static struct clk_regmap axg_mpll0_div = {
.shift = 0,
.width = 1,
},
- .lock = &meson_clk_lock,
.flags = CLK_MESON_MPLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
@@ -557,7 +554,6 @@ static struct clk_regmap axg_mpll1_div = {
.shift = 1,
.width = 1,
},
- .lock = &meson_clk_lock,
.flags = CLK_MESON_MPLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
@@ -613,7 +609,6 @@ static struct clk_regmap axg_mpll2_div = {
.shift = 2,
.width = 1,
},
- .lock = &meson_clk_lock,
.flags = CLK_MESON_MPLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
@@ -664,7 +659,6 @@ static struct clk_regmap axg_mpll3_div = {
.shift = 3,
.width = 1,
},
- .lock = &meson_clk_lock,
.flags = CLK_MESON_MPLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
@@ -2187,4 +2181,4 @@ module_platform_driver(axg_driver);
MODULE_DESCRIPTION("Amlogic AXG Main Clock Controller driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/c3-peripherals.c b/drivers/clk/meson/c3-peripherals.c
index 7dcbf4ebee07..2075668ed306 100644
--- a/drivers/clk/meson/c3-peripherals.c
+++ b/drivers/clk/meson/c3-peripherals.c
@@ -2364,4 +2364,4 @@ module_platform_driver(c3_peripherals_driver);
MODULE_DESCRIPTION("Amlogic C3 Peripherals Clock Controller driver");
MODULE_AUTHOR("Chuan Liu <chuan.liu@amlogic.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/c3-pll.c b/drivers/clk/meson/c3-pll.c
index 32bd2ed9d304..ed4bc495862e 100644
--- a/drivers/clk/meson/c3-pll.c
+++ b/drivers/clk/meson/c3-pll.c
@@ -361,6 +361,7 @@ static struct clk_regmap hifi_pll_dco = {
.range = &c3_gp0_pll_mult_range,
.init_regs = c3_hifi_init_regs,
.init_count = ARRAY_SIZE(c3_hifi_init_regs),
+ .frac_max = 100000,
},
.hw.init = &(struct clk_init_data) {
.name = "hifi_pll_dco",
@@ -745,4 +746,4 @@ module_platform_driver(c3_pll_driver);
MODULE_DESCRIPTION("Amlogic C3 PLL Clock Controller driver");
MODULE_AUTHOR("Chuan Liu <chuan.liu@amlogic.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/clk-cpu-dyndiv.c b/drivers/clk/meson/clk-cpu-dyndiv.c
index 6c1f58826e24..cb043b52b65d 100644
--- a/drivers/clk/meson/clk-cpu-dyndiv.c
+++ b/drivers/clk/meson/clk-cpu-dyndiv.c
@@ -65,9 +65,9 @@ const struct clk_ops meson_clk_cpu_dyndiv_ops = {
.determine_rate = meson_clk_cpu_dyndiv_determine_rate,
.set_rate = meson_clk_cpu_dyndiv_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_cpu_dyndiv_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_cpu_dyndiv_ops, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic CPU Dynamic Clock divider");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/clk-dualdiv.c b/drivers/clk/meson/clk-dualdiv.c
index 913bf25d3771..c896cf29b318 100644
--- a/drivers/clk/meson/clk-dualdiv.c
+++ b/drivers/clk/meson/clk-dualdiv.c
@@ -130,15 +130,15 @@ const struct clk_ops meson_clk_dualdiv_ops = {
.determine_rate = meson_clk_dualdiv_determine_rate,
.set_rate = meson_clk_dualdiv_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_dualdiv_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_dualdiv_ops, "CLK_MESON");
const struct clk_ops meson_clk_dualdiv_ro_ops = {
.recalc_rate = meson_clk_dualdiv_recalc_rate,
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_dualdiv_ro_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_dualdiv_ro_ops, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic dual divider driver");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index f639d56f0fd3..ee91e32b4050 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -112,26 +112,15 @@ static int mpll_set_rate(struct clk_hw *hw,
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_mpll_data *mpll = meson_clk_mpll_data(clk);
unsigned int sdm, n2;
- unsigned long flags = 0;
params_from_rate(rate, parent_rate, &sdm, &n2, mpll->flags);
- if (mpll->lock)
- spin_lock_irqsave(mpll->lock, flags);
- else
- __acquire(mpll->lock);
-
/* Set the fractional part */
meson_parm_write(clk->map, &mpll->sdm, sdm);
/* Set the integer divider part */
meson_parm_write(clk->map, &mpll->n2, n2);
- if (mpll->lock)
- spin_unlock_irqrestore(mpll->lock, flags);
- else
- __release(mpll->lock);
-
return 0;
}
@@ -165,7 +154,7 @@ const struct clk_ops meson_clk_mpll_ro_ops = {
.recalc_rate = mpll_recalc_rate,
.determine_rate = mpll_determine_rate,
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_mpll_ro_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_mpll_ro_ops, "CLK_MESON");
const struct clk_ops meson_clk_mpll_ops = {
.recalc_rate = mpll_recalc_rate,
@@ -173,9 +162,9 @@ const struct clk_ops meson_clk_mpll_ops = {
.set_rate = mpll_set_rate,
.init = mpll_init,
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_mpll_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_mpll_ops, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic MPLL driver");
MODULE_AUTHOR("Michael Turquette <mturquette@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/clk-mpll.h b/drivers/clk/meson/clk-mpll.h
index a991d568c43a..4ffd3aeef799 100644
--- a/drivers/clk/meson/clk-mpll.h
+++ b/drivers/clk/meson/clk-mpll.h
@@ -20,7 +20,6 @@ struct meson_clk_mpll_data {
struct parm misc;
const struct reg_sequence *init_regs;
unsigned int init_count;
- spinlock_t *lock;
u8 flags;
};
diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c
index c1526fbfb6c4..701211120610 100644
--- a/drivers/clk/meson/clk-phase.c
+++ b/drivers/clk/meson/clk-phase.c
@@ -61,7 +61,7 @@ const struct clk_ops meson_clk_phase_ops = {
.get_phase = meson_clk_phase_get_phase,
.set_phase = meson_clk_phase_set_phase,
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_phase_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_phase_ops, "CLK_MESON");
/*
* This is a special clock for the audio controller.
@@ -123,7 +123,7 @@ const struct clk_ops meson_clk_triphase_ops = {
.get_phase = meson_clk_triphase_get_phase,
.set_phase = meson_clk_triphase_set_phase,
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_triphase_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_triphase_ops, "CLK_MESON");
/*
* This is a special clock for the audio controller.
@@ -178,9 +178,9 @@ const struct clk_ops meson_sclk_ws_inv_ops = {
.get_phase = meson_sclk_ws_inv_get_phase,
.set_phase = meson_sclk_ws_inv_set_phase,
};
-EXPORT_SYMBOL_NS_GPL(meson_sclk_ws_inv_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_sclk_ws_inv_ops, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic phase driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index bc570a2ff3a3..e8e53855b00a 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -57,12 +57,13 @@ static unsigned long __pll_params_to_rate(unsigned long parent_rate,
struct meson_clk_pll_data *pll)
{
u64 rate = (u64)parent_rate * m;
+ unsigned int frac_max = pll->frac_max ? pll->frac_max :
+ (1 << pll->frac.width);
if (frac && MESON_PARM_APPLICABLE(&pll->frac)) {
u64 frac_rate = (u64)parent_rate * frac;
- rate += DIV_ROUND_UP_ULL(frac_rate,
- (1 << pll->frac.width));
+ rate += DIV_ROUND_UP_ULL(frac_rate, frac_max);
}
return DIV_ROUND_UP_ULL(rate, n);
@@ -100,7 +101,8 @@ static unsigned int __pll_params_with_frac(unsigned long rate,
unsigned int n,
struct meson_clk_pll_data *pll)
{
- unsigned int frac_max = (1 << pll->frac.width);
+ unsigned int frac_max = pll->frac_max ? pll->frac_max :
+ (1 << pll->frac.width);
u64 val = (u64)rate * n;
/* Bail out if we are already over the requested rate */
@@ -472,7 +474,7 @@ const struct clk_ops meson_clk_pcie_pll_ops = {
.enable = meson_clk_pcie_pll_enable,
.disable = meson_clk_pll_disable
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_pcie_pll_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_pcie_pll_ops, "CLK_MESON");
const struct clk_ops meson_clk_pll_ops = {
.init = meson_clk_pll_init,
@@ -483,16 +485,16 @@ const struct clk_ops meson_clk_pll_ops = {
.enable = meson_clk_pll_enable,
.disable = meson_clk_pll_disable
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_pll_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_pll_ops, "CLK_MESON");
const struct clk_ops meson_clk_pll_ro_ops = {
.recalc_rate = meson_clk_pll_recalc_rate,
.is_enabled = meson_clk_pll_is_enabled,
};
-EXPORT_SYMBOL_NS_GPL(meson_clk_pll_ro_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_pll_ro_ops, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic PLL driver");
MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/clk-pll.h b/drivers/clk/meson/clk-pll.h
index 7b6b87274073..949157fb7bf5 100644
--- a/drivers/clk/meson/clk-pll.h
+++ b/drivers/clk/meson/clk-pll.h
@@ -43,6 +43,7 @@ struct meson_clk_pll_data {
unsigned int init_count;
const struct pll_params_table *table;
const struct pll_mult_range *range;
+ unsigned int frac_max;
u8 flags;
};
diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c
index 07f7e441b916..f3e504f67571 100644
--- a/drivers/clk/meson/clk-regmap.c
+++ b/drivers/clk/meson/clk-regmap.c
@@ -49,12 +49,12 @@ const struct clk_ops clk_regmap_gate_ops = {
.disable = clk_regmap_gate_disable,
.is_enabled = clk_regmap_gate_is_enabled,
};
-EXPORT_SYMBOL_NS_GPL(clk_regmap_gate_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_gate_ops, "CLK_MESON");
const struct clk_ops clk_regmap_gate_ro_ops = {
.is_enabled = clk_regmap_gate_is_enabled,
};
-EXPORT_SYMBOL_NS_GPL(clk_regmap_gate_ro_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_gate_ro_ops, "CLK_MESON");
static unsigned long clk_regmap_div_recalc_rate(struct clk_hw *hw,
unsigned long prate)
@@ -125,13 +125,13 @@ const struct clk_ops clk_regmap_divider_ops = {
.determine_rate = clk_regmap_div_determine_rate,
.set_rate = clk_regmap_div_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(clk_regmap_divider_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_divider_ops, "CLK_MESON");
const struct clk_ops clk_regmap_divider_ro_ops = {
.recalc_rate = clk_regmap_div_recalc_rate,
.determine_rate = clk_regmap_div_determine_rate,
};
-EXPORT_SYMBOL_NS_GPL(clk_regmap_divider_ro_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_divider_ro_ops, "CLK_MESON");
static u8 clk_regmap_mux_get_parent(struct clk_hw *hw)
{
@@ -174,14 +174,14 @@ const struct clk_ops clk_regmap_mux_ops = {
.set_parent = clk_regmap_mux_set_parent,
.determine_rate = clk_regmap_mux_determine_rate,
};
-EXPORT_SYMBOL_NS_GPL(clk_regmap_mux_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_mux_ops, "CLK_MESON");
const struct clk_ops clk_regmap_mux_ro_ops = {
.get_parent = clk_regmap_mux_get_parent,
};
-EXPORT_SYMBOL_NS_GPL(clk_regmap_mux_ro_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(clk_regmap_mux_ro_ops, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic regmap backed clock driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c
index f0a18d8c9fc2..71c758ffa493 100644
--- a/drivers/clk/meson/g12a-aoclk.c
+++ b/drivers/clk/meson/g12a-aoclk.c
@@ -477,4 +477,4 @@ module_platform_driver(g12a_aoclkc_driver);
MODULE_DESCRIPTION("Amlogic G12A Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index 02dda57105b1..cfffd434e998 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -28,8 +28,6 @@
#include <dt-bindings/clock/g12a-clkc.h>
-static DEFINE_SPINLOCK(meson_clk_lock);
-
static struct clk_regmap g12a_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
@@ -2225,7 +2223,6 @@ static struct clk_regmap g12a_mpll0_div = {
.shift = 29,
.width = 1,
},
- .lock = &meson_clk_lock,
.init_regs = g12a_mpll0_init_regs,
.init_count = ARRAY_SIZE(g12a_mpll0_init_regs),
},
@@ -2279,7 +2276,6 @@ static struct clk_regmap g12a_mpll1_div = {
.shift = 29,
.width = 1,
},
- .lock = &meson_clk_lock,
.init_regs = g12a_mpll1_init_regs,
.init_count = ARRAY_SIZE(g12a_mpll1_init_regs),
},
@@ -2333,7 +2329,6 @@ static struct clk_regmap g12a_mpll2_div = {
.shift = 29,
.width = 1,
},
- .lock = &meson_clk_lock,
.init_regs = g12a_mpll2_init_regs,
.init_count = ARRAY_SIZE(g12a_mpll2_init_regs),
},
@@ -2387,7 +2382,6 @@ static struct clk_regmap g12a_mpll3_div = {
.shift = 29,
.width = 1,
},
- .lock = &meson_clk_lock,
.init_regs = g12a_mpll3_init_regs,
.init_count = ARRAY_SIZE(g12a_mpll3_init_regs),
},
@@ -5616,4 +5610,4 @@ module_platform_driver(g12a_driver);
MODULE_DESCRIPTION("Amlogic G12/SM1 Main Clock Controller driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index 83b034157b35..43940232f718 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -303,4 +303,4 @@ module_platform_driver(gxbb_aoclkc_driver);
MODULE_DESCRIPTION("Amlogic GXBB Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index f071faad1ebb..8575b8485385 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -19,8 +19,6 @@
#include <dt-bindings/clock/gxbb-clkc.h>
-static DEFINE_SPINLOCK(meson_clk_lock);
-
static const struct pll_params_table gxbb_gp0_pll_params_table[] = {
PLL_PARAMS(32, 1),
PLL_PARAMS(33, 1),
@@ -731,7 +729,6 @@ static struct clk_regmap gxbb_mpll0_div = {
.shift = 16,
.width = 9,
},
- .lock = &meson_clk_lock,
},
.hw.init = &(struct clk_init_data){
.name = "mpll0_div",
@@ -760,7 +757,6 @@ static struct clk_regmap gxl_mpll0_div = {
.shift = 16,
.width = 9,
},
- .lock = &meson_clk_lock,
},
.hw.init = &(struct clk_init_data){
.name = "mpll0_div",
@@ -812,7 +808,6 @@ static struct clk_regmap gxbb_mpll1_div = {
.shift = 16,
.width = 9,
},
- .lock = &meson_clk_lock,
},
.hw.init = &(struct clk_init_data){
.name = "mpll1_div",
@@ -855,7 +850,6 @@ static struct clk_regmap gxbb_mpll2_div = {
.shift = 16,
.width = 9,
},
- .lock = &meson_clk_lock,
},
.hw.init = &(struct clk_init_data){
.name = "mpll2_div",
@@ -3571,4 +3565,4 @@ module_platform_driver(gxbb_driver);
MODULE_DESCRIPTION("Amlogic GXBB Main Clock Controller driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index 053940ee8940..995be51987f4 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -88,8 +88,8 @@ int meson_aoclkc_probe(struct platform_device *pdev)
return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
}
-EXPORT_SYMBOL_NS_GPL(meson_aoclkc_probe, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_aoclkc_probe, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic Always-ON Clock Controller helpers");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/meson-clkc-utils.c b/drivers/clk/meson/meson-clkc-utils.c
index a8cd2c21fab7..6937d1482719 100644
--- a/drivers/clk/meson/meson-clkc-utils.c
+++ b/drivers/clk/meson/meson-clkc-utils.c
@@ -20,8 +20,8 @@ struct clk_hw *meson_clk_hw_get(struct of_phandle_args *clkspec, void *clk_hw_da
return data->hws[idx];
}
-EXPORT_SYMBOL_NS_GPL(meson_clk_hw_get, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_clk_hw_get, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic Clock Controller Utilities");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
index 66f79e384fe5..3053ee7425eb 100644
--- a/drivers/clk/meson/meson-eeclk.c
+++ b/drivers/clk/meson/meson-eeclk.c
@@ -57,8 +57,8 @@ int meson_eeclkc_probe(struct platform_device *pdev)
return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
}
-EXPORT_SYMBOL_NS_GPL(meson_eeclkc_probe, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_eeclkc_probe, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic Main Clock Controller Helpers");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index b7417ac262d3..e4b474c5f86c 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -25,8 +25,6 @@
#include <dt-bindings/clock/meson8b-clkc.h>
#include <dt-bindings/reset/amlogic,meson8b-clkc-reset.h>
-static DEFINE_SPINLOCK(meson_clk_lock);
-
struct meson8b_clk_reset {
struct reset_controller_dev reset;
struct regmap *regmap;
@@ -492,7 +490,6 @@ static struct clk_regmap meson8b_mpll0_div = {
.shift = 25,
.width = 1,
},
- .lock = &meson_clk_lock,
},
.hw.init = &(struct clk_init_data){
.name = "mpll0_div",
@@ -537,7 +534,6 @@ static struct clk_regmap meson8b_mpll1_div = {
.shift = 16,
.width = 9,
},
- .lock = &meson_clk_lock,
},
.hw.init = &(struct clk_init_data){
.name = "mpll1_div",
@@ -582,7 +578,6 @@ static struct clk_regmap meson8b_mpll2_div = {
.shift = 16,
.width = 9,
},
- .lock = &meson_clk_lock,
},
.hw.init = &(struct clk_init_data){
.name = "mpll2_div",
@@ -3702,7 +3697,6 @@ static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev,
container_of(rcdev, struct meson8b_clk_reset, reset);
const struct meson8b_clk_reset_line *reset;
unsigned int value = 0;
- unsigned long flags;
if (id >= ARRAY_SIZE(meson8b_clk_reset_bits))
return -EINVAL;
@@ -3712,13 +3706,9 @@ static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev,
if (assert != reset->active_low)
value = BIT(reset->bit_idx);
- spin_lock_irqsave(&meson_clk_lock, flags);
-
regmap_update_bits(meson8b_clk_reset->regmap, reset->reg,
BIT(reset->bit_idx), value);
- spin_unlock_irqrestore(&meson_clk_lock, flags);
-
return 0;
}
diff --git a/drivers/clk/meson/s4-peripherals.c b/drivers/clk/meson/s4-peripherals.c
index c930cf0614a0..8a4037377787 100644
--- a/drivers/clk/meson/s4-peripherals.c
+++ b/drivers/clk/meson/s4-peripherals.c
@@ -3814,4 +3814,4 @@ module_platform_driver(s4_driver);
MODULE_DESCRIPTION("Amlogic S4 Peripherals Clock Controller driver");
MODULE_AUTHOR("Yu Tu <yu.tu@amlogic.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/s4-pll.c b/drivers/clk/meson/s4-pll.c
index b0258933fb9d..f9cc05a506e3 100644
--- a/drivers/clk/meson/s4-pll.c
+++ b/drivers/clk/meson/s4-pll.c
@@ -17,8 +17,6 @@
#include "meson-clkc-utils.h"
#include <dt-bindings/clock/amlogic,s4-pll-clkc.h>
-static DEFINE_SPINLOCK(meson_clk_lock);
-
/*
* These clock are a fixed value (fixed_pll is 2GHz) that is initialized by ROMcode.
* The chip was changed fixed pll for security reasons. Fixed PLL registers are not writable
@@ -329,7 +327,6 @@ static struct clk_regmap s4_gp0_pll = {
* Internal hifi pll emulation configuration parameters
*/
static const struct reg_sequence s4_hifi_init_regs[] = {
- { .reg = ANACTRL_HIFIPLL_CTRL1, .def = 0x00010e56 },
{ .reg = ANACTRL_HIFIPLL_CTRL2, .def = 0x00000000 },
{ .reg = ANACTRL_HIFIPLL_CTRL3, .def = 0x6a285c00 },
{ .reg = ANACTRL_HIFIPLL_CTRL4, .def = 0x65771290 },
@@ -354,6 +351,11 @@ static struct clk_regmap s4_hifi_pll_dco = {
.shift = 10,
.width = 5,
},
+ .frac = {
+ .reg_off = ANACTRL_HIFIPLL_CTRL1,
+ .shift = 0,
+ .width = 17,
+ },
.l = {
.reg_off = ANACTRL_HIFIPLL_CTRL0,
.shift = 31,
@@ -367,6 +369,7 @@ static struct clk_regmap s4_hifi_pll_dco = {
.range = &s4_gp0_pll_mult_range,
.init_regs = s4_hifi_init_regs,
.init_count = ARRAY_SIZE(s4_hifi_init_regs),
+ .frac_max = 100000,
.flags = CLK_MESON_PLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
@@ -542,7 +545,6 @@ static struct clk_regmap s4_mpll0_div = {
.shift = 29,
.width = 1,
},
- .lock = &meson_clk_lock,
.init_regs = s4_mpll0_init_regs,
.init_count = ARRAY_SIZE(s4_mpll0_init_regs),
},
@@ -596,7 +598,6 @@ static struct clk_regmap s4_mpll1_div = {
.shift = 29,
.width = 1,
},
- .lock = &meson_clk_lock,
.init_regs = s4_mpll1_init_regs,
.init_count = ARRAY_SIZE(s4_mpll1_init_regs),
},
@@ -650,7 +651,6 @@ static struct clk_regmap s4_mpll2_div = {
.shift = 29,
.width = 1,
},
- .lock = &meson_clk_lock,
.init_regs = s4_mpll2_init_regs,
.init_count = ARRAY_SIZE(s4_mpll2_init_regs),
},
@@ -704,7 +704,6 @@ static struct clk_regmap s4_mpll3_div = {
.shift = 29,
.width = 1,
},
- .lock = &meson_clk_lock,
.init_regs = s4_mpll3_init_regs,
.init_count = ARRAY_SIZE(s4_mpll3_init_regs),
},
@@ -873,4 +872,4 @@ module_platform_driver(s4_driver);
MODULE_DESCRIPTION("Amlogic S4 PLL Clock Controller driver");
MODULE_AUTHOR("Yu Tu <yu.tu@amlogic.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c
index ae03b048182f..9c4945234f26 100644
--- a/drivers/clk/meson/sclk-div.c
+++ b/drivers/clk/meson/sclk-div.c
@@ -247,9 +247,9 @@ const struct clk_ops meson_sclk_div_ops = {
.set_duty_cycle = sclk_div_set_duty_cycle,
.init = sclk_div_init,
};
-EXPORT_SYMBOL_NS_GPL(meson_sclk_div_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_sclk_div_ops, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic Sample divider driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/vclk.c b/drivers/clk/meson/vclk.c
index 36f637d2d01b..6a167ebdc8d7 100644
--- a/drivers/clk/meson/vclk.c
+++ b/drivers/clk/meson/vclk.c
@@ -49,7 +49,7 @@ const struct clk_ops meson_vclk_gate_ops = {
.disable = meson_vclk_gate_disable,
.is_enabled = meson_vclk_gate_is_enabled,
};
-EXPORT_SYMBOL_NS_GPL(meson_vclk_gate_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_vclk_gate_ops, "CLK_MESON");
/* The VCLK Divider has supplementary reset & enable bits */
@@ -134,9 +134,9 @@ const struct clk_ops meson_vclk_div_ops = {
.disable = meson_vclk_div_disable,
.is_enabled = meson_vclk_div_is_enabled,
};
-EXPORT_SYMBOL_NS_GPL(meson_vclk_div_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_vclk_div_ops, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic vclk clock driver");
MODULE_AUTHOR("Neil Armstrong <neil.armstrong@linaro.org>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c
index 486cf68fc97a..965ed7281f57 100644
--- a/drivers/clk/meson/vid-pll-div.c
+++ b/drivers/clk/meson/vid-pll-div.c
@@ -92,9 +92,9 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw,
const struct clk_ops meson_vid_pll_div_ro_ops = {
.recalc_rate = meson_vid_pll_div_recalc_rate,
};
-EXPORT_SYMBOL_NS_GPL(meson_vid_pll_div_ro_ops, CLK_MESON);
+EXPORT_SYMBOL_NS_GPL(meson_vid_pll_div_ro_ops, "CLK_MESON");
MODULE_DESCRIPTION("Amlogic video pll divider driver");
MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CLK_MESON);
+MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
index 28ec0da88cb3..c22632a7439c 100644
--- a/drivers/clk/microchip/clk-mpfs.c
+++ b/drivers/clk/microchip/clk-mpfs.c
@@ -443,4 +443,4 @@ MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Driver");
MODULE_AUTHOR("Padmarao Begari <padmarao.begari@microchip.com>");
MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
-MODULE_IMPORT_NS(MCHP_CLK_MPFS);
+MODULE_IMPORT_NS("MCHP_CLK_MPFS");
diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile
index 441bf83080a1..062cd87fa8dd 100644
--- a/drivers/clk/mmp/Makefile
+++ b/drivers/clk/mmp/Makefile
@@ -11,4 +11,4 @@ obj-$(CONFIG_MACH_MMP_DT) += clk-of-pxa168.o clk-of-pxa910.o
obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o clk-pll.o pwr-island.o
obj-$(CONFIG_COMMON_CLK_MMP2_AUDIO) += clk-audio.o
-obj-y += clk-of-pxa1928.o
+obj-$(CONFIG_ARCH_MMP) += clk-of-pxa1928.o clk-pxa1908-apbc.o clk-pxa1908-apbcp.o clk-pxa1908-apmu.o clk-pxa1908-mpmu.o
diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c
index 1b90867b60c4..6556f6ada2e8 100644
--- a/drivers/clk/mmp/clk-frac.c
+++ b/drivers/clk/mmp/clk-frac.c
@@ -26,14 +26,15 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
{
struct mmp_clk_factor *factor = to_clk_factor(hw);
u64 rate = 0, prev_rate;
+ struct u32_fract *d;
int i;
for (i = 0; i < factor->ftbl_cnt; i++) {
- prev_rate = rate;
- rate = *prate;
- rate *= factor->ftbl[i].den;
- do_div(rate, factor->ftbl[i].num * factor->masks->factor);
+ d = &factor->ftbl[i];
+ prev_rate = rate;
+ rate = (u64)(*prate) * d->denominator;
+ do_div(rate, d->numerator * factor->masks->factor);
if (rate > drate)
break;
}
@@ -52,23 +53,22 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
{
struct mmp_clk_factor *factor = to_clk_factor(hw);
struct mmp_clk_factor_masks *masks = factor->masks;
- unsigned int val, num, den;
+ struct u32_fract d;
+ unsigned int val;
u64 rate;
val = readl_relaxed(factor->base);
/* calculate numerator */
- num = (val >> masks->num_shift) & masks->num_mask;
+ d.numerator = (val >> masks->num_shift) & masks->num_mask;
/* calculate denominator */
- den = (val >> masks->den_shift) & masks->den_mask;
-
- if (!den)
+ d.denominator = (val >> masks->den_shift) & masks->den_mask;
+ if (!d.denominator)
return 0;
- rate = parent_rate;
- rate *= den;
- do_div(rate, num * factor->masks->factor);
+ rate = (u64)parent_rate * d.denominator;
+ do_div(rate, d.numerator * factor->masks->factor);
return rate;
}
@@ -82,18 +82,18 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
int i;
unsigned long val;
unsigned long flags = 0;
+ struct u32_fract *d;
u64 rate = 0;
for (i = 0; i < factor->ftbl_cnt; i++) {
- rate = prate;
- rate *= factor->ftbl[i].den;
- do_div(rate, factor->ftbl[i].num * factor->masks->factor);
+ d = &factor->ftbl[i];
+ rate = (u64)prate * d->denominator;
+ do_div(rate, d->numerator * factor->masks->factor);
if (rate > drate)
break;
}
- if (i > 0)
- i--;
+ d = i ? &factor->ftbl[i - 1] : &factor->ftbl[0];
if (factor->lock)
spin_lock_irqsave(factor->lock, flags);
@@ -101,10 +101,10 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
val = readl_relaxed(factor->base);
val &= ~(masks->num_mask << masks->num_shift);
- val |= (factor->ftbl[i].num & masks->num_mask) << masks->num_shift;
+ val |= (d->numerator & masks->num_mask) << masks->num_shift;
val &= ~(masks->den_mask << masks->den_shift);
- val |= (factor->ftbl[i].den & masks->den_mask) << masks->den_shift;
+ val |= (d->denominator & masks->den_mask) << masks->den_shift;
writel_relaxed(val, factor->base);
@@ -118,7 +118,8 @@ static int clk_factor_init(struct clk_hw *hw)
{
struct mmp_clk_factor *factor = to_clk_factor(hw);
struct mmp_clk_factor_masks *masks = factor->masks;
- u32 val, num, den;
+ struct u32_fract d;
+ u32 val;
int i;
unsigned long flags = 0;
@@ -128,23 +129,22 @@ static int clk_factor_init(struct clk_hw *hw)
val = readl(factor->base);
/* calculate numerator */
- num = (val >> masks->num_shift) & masks->num_mask;
+ d.numerator = (val >> masks->num_shift) & masks->num_mask;
/* calculate denominator */
- den = (val >> masks->den_shift) & masks->den_mask;
+ d.denominator = (val >> masks->den_shift) & masks->den_mask;
for (i = 0; i < factor->ftbl_cnt; i++)
- if (den == factor->ftbl[i].den && num == factor->ftbl[i].num)
+ if (d.denominator == factor->ftbl[i].denominator &&
+ d.numerator == factor->ftbl[i].numerator)
break;
if (i >= factor->ftbl_cnt) {
val &= ~(masks->num_mask << masks->num_shift);
- val |= (factor->ftbl[0].num & masks->num_mask) <<
- masks->num_shift;
+ val |= (factor->ftbl[0].numerator & masks->num_mask) << masks->num_shift;
val &= ~(masks->den_mask << masks->den_shift);
- val |= (factor->ftbl[0].den & masks->den_mask) <<
- masks->den_shift;
+ val |= (factor->ftbl[0].denominator & masks->den_mask) << masks->den_shift;
}
if (!(val & masks->enable_mask) || i >= factor->ftbl_cnt) {
@@ -168,8 +168,7 @@ static const struct clk_ops clk_factor_ops = {
struct clk *mmp_clk_register_factor(const char *name, const char *parent_name,
unsigned long flags, void __iomem *base,
struct mmp_clk_factor_masks *masks,
- struct mmp_clk_factor_tbl *ftbl,
- unsigned int ftbl_cnt, spinlock_t *lock)
+ struct u32_fract *ftbl, unsigned int ftbl_cnt, spinlock_t *lock)
{
struct mmp_clk_factor *factor;
struct clk_init_data init;
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index eaad36ee323d..a4f15cee630e 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -143,9 +143,9 @@ static struct mmp_clk_factor_masks uart_factor_masks = {
.den_shift = 0,
};
-static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
- {.num = 8125, .den = 1536}, /*14.745MHZ */
- {.num = 3521, .den = 689}, /*19.23MHZ */
+static struct u32_fract uart_factor_tbl[] = {
+ { .numerator = 8125, .denominator = 1536 }, /* 14.745MHZ */
+ { .numerator = 3521, .denominator = 689 }, /* 19.23MHZ */
};
static struct mmp_clk_factor_masks i2s_factor_masks = {
@@ -157,16 +157,16 @@ static struct mmp_clk_factor_masks i2s_factor_masks = {
.enable_mask = 0xd0000000,
};
-static struct mmp_clk_factor_tbl i2s_factor_tbl[] = {
- {.num = 24868, .den = 511}, /* 2.0480 MHz */
- {.num = 28003, .den = 793}, /* 2.8224 MHz */
- {.num = 24941, .den = 1025}, /* 4.0960 MHz */
- {.num = 28003, .den = 1586}, /* 5.6448 MHz */
- {.num = 31158, .den = 2561}, /* 8.1920 MHz */
- {.num = 16288, .den = 1845}, /* 11.2896 MHz */
- {.num = 20772, .den = 2561}, /* 12.2880 MHz */
- {.num = 8144, .den = 1845}, /* 22.5792 MHz */
- {.num = 10386, .den = 2561}, /* 24.5760 MHz */
+static struct u32_fract i2s_factor_tbl[] = {
+ { .numerator = 24868, .denominator = 511 }, /* 2.0480 MHz */
+ { .numerator = 28003, .denominator = 793 }, /* 2.8224 MHz */
+ { .numerator = 24941, .denominator = 1025 }, /* 4.0960 MHz */
+ { .numerator = 28003, .denominator = 1586 }, /* 5.6448 MHz */
+ { .numerator = 31158, .denominator = 2561 }, /* 8.1920 MHz */
+ { .numerator = 16288, .denominator = 1845 }, /* 11.2896 MHz */
+ { .numerator = 20772, .denominator = 2561 }, /* 12.2880 MHz */
+ { .numerator = 8144, .denominator = 1845 }, /* 22.5792 MHz */
+ { .numerator = 10386, .denominator = 2561 }, /* 24.5760 MHz */
};
static DEFINE_SPINLOCK(acgr_lock);
diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
index c5a7ba1deaa3..5f250427e60d 100644
--- a/drivers/clk/mmp/clk-of-pxa168.c
+++ b/drivers/clk/mmp/clk-of-pxa168.c
@@ -106,8 +106,8 @@ static struct mmp_clk_factor_masks uart_factor_masks = {
.den_shift = 0,
};
-static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
- {.num = 8125, .den = 1536}, /*14.745MHZ */
+static struct u32_fract uart_factor_tbl[] = {
+ { .numerator = 8125, .denominator = 1536 }, /* 14.745MHZ */
};
static void pxa168_pll_init(struct pxa168_clk_unit *pxa_unit)
diff --git a/drivers/clk/mmp/clk-of-pxa1928.c b/drivers/clk/mmp/clk-of-pxa1928.c
index 9def4b5f10e9..ebb6e278eda3 100644
--- a/drivers/clk/mmp/clk-of-pxa1928.c
+++ b/drivers/clk/mmp/clk-of-pxa1928.c
@@ -61,9 +61,9 @@ static struct mmp_clk_factor_masks uart_factor_masks = {
.den_shift = 0,
};
-static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
- {.num = 832, .den = 234}, /*58.5MHZ */
- {.num = 1, .den = 1}, /*26MHZ */
+static struct u32_fract uart_factor_tbl[] = {
+ { .numerator = 832, .denominator = 234 }, /* 58.5MHZ */
+ { .numerator = 1, .denominator = 1 }, /* 26MHZ */
};
static void pxa1928_pll_init(struct pxa1928_clk_unit *pxa_unit)
diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
index 7a38c424782e..fe65e7bdb411 100644
--- a/drivers/clk/mmp/clk-of-pxa910.c
+++ b/drivers/clk/mmp/clk-of-pxa910.c
@@ -86,8 +86,8 @@ static struct mmp_clk_factor_masks uart_factor_masks = {
.den_shift = 0,
};
-static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
- {.num = 8125, .den = 1536}, /*14.745MHZ */
+static struct u32_fract uart_factor_tbl[] = {
+ { .numerator = 8125, .denominator = 1536 }, /* 14.745MHZ */
};
static void pxa910_pll_init(struct pxa910_clk_unit *pxa_unit)
diff --git a/drivers/clk/mmp/clk-pxa1908-apbc.c b/drivers/clk/mmp/clk-pxa1908-apbc.c
new file mode 100644
index 000000000000..3fd7b5e644f3
--- /dev/null
+++ b/drivers/clk/mmp/clk-pxa1908-apbc.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/marvell,pxa1908.h>
+
+#include "clk.h"
+
+#define APBC_UART0 0x0
+#define APBC_UART1 0x4
+#define APBC_GPIO 0x8
+#define APBC_PWM0 0xc
+#define APBC_PWM1 0x10
+#define APBC_PWM2 0x14
+#define APBC_PWM3 0x18
+#define APBC_SSP0 0x1c
+#define APBC_SSP1 0x20
+#define APBC_IPC_RST 0x24
+#define APBC_RTC 0x28
+#define APBC_TWSI0 0x2c
+#define APBC_KPC 0x30
+#define APBC_SWJTAG 0x40
+#define APBC_SSP2 0x4c
+#define APBC_TWSI1 0x60
+#define APBC_THERMAL 0x6c
+#define APBC_TWSI3 0x70
+
+#define APBC_NR_CLKS 19
+
+struct pxa1908_clk_unit {
+ struct mmp_clk_unit unit;
+ void __iomem *base;
+};
+
+static DEFINE_SPINLOCK(pwm0_lock);
+static DEFINE_SPINLOCK(pwm2_lock);
+
+static DEFINE_SPINLOCK(uart0_lock);
+static DEFINE_SPINLOCK(uart1_lock);
+
+static const char * const uart_parent_names[] = {"pll1_117", "uart_pll"};
+static const char * const ssp_parent_names[] = {"pll1_d16", "pll1_d48", "pll1_d24", "pll1_d12"};
+
+static struct mmp_param_gate_clk apbc_gate_clks[] = {
+ {PXA1908_CLK_TWSI0, "twsi0_clk", "pll1_32", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x7, 3, 0, 0, NULL},
+ {PXA1908_CLK_TWSI1, "twsi1_clk", "pll1_32", CLK_SET_RATE_PARENT, APBC_TWSI1, 0x7, 3, 0, 0, NULL},
+ {PXA1908_CLK_TWSI3, "twsi3_clk", "pll1_32", CLK_SET_RATE_PARENT, APBC_TWSI3, 0x7, 3, 0, 0, NULL},
+ {PXA1908_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x7, 3, 0, 0, NULL},
+ {PXA1908_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_KPC, 0x7, 3, 0, MMP_CLK_GATE_NEED_DELAY, NULL},
+ {PXA1908_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_RTC, 0x87, 0x83, 0, MMP_CLK_GATE_NEED_DELAY, NULL},
+ {PXA1908_CLK_PWM0, "pwm0_clk", "pwm01_apb_share", CLK_SET_RATE_PARENT, APBC_PWM0, 0x2, 2, 0, 0, &pwm0_lock},
+ {PXA1908_CLK_PWM1, "pwm1_clk", "pwm01_apb_share", CLK_SET_RATE_PARENT, APBC_PWM1, 0x6, 2, 0, 0, NULL},
+ {PXA1908_CLK_PWM2, "pwm2_clk", "pwm23_apb_share", CLK_SET_RATE_PARENT, APBC_PWM2, 0x2, 2, 0, 0, NULL},
+ {PXA1908_CLK_PWM3, "pwm3_clk", "pwm23_apb_share", CLK_SET_RATE_PARENT, APBC_PWM3, 0x6, 2, 0, 0, NULL},
+ {PXA1908_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, APBC_UART0, 0x7, 3, 0, 0, &uart0_lock},
+ {PXA1908_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x7, 3, 0, 0, &uart1_lock},
+ {PXA1908_CLK_THERMAL, "thermal_clk", NULL, 0, APBC_THERMAL, 0x7, 3, 0, 0, NULL},
+ {PXA1908_CLK_IPC_RST, "ipc_clk", NULL, 0, APBC_IPC_RST, 0x7, 3, 0, 0, NULL},
+ {PXA1908_CLK_SSP0, "ssp0_clk", "ssp0_mux", 0, APBC_SSP0, 0x7, 3, 0, 0, NULL},
+ {PXA1908_CLK_SSP2, "ssp2_clk", "ssp2_mux", 0, APBC_SSP2, 0x7, 3, 0, 0, NULL},
+};
+
+static struct mmp_param_mux_clk apbc_mux_clks[] = {
+ {0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART0, 4, 3, 0, &uart0_lock},
+ {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock},
+ {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), 0, APBC_SSP0, 4, 3, 0, NULL},
+ {0, "ssp2_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), 0, APBC_SSP2, 4, 3, 0, NULL},
+};
+
+static void pxa1908_apb_periph_clk_init(struct pxa1908_clk_unit *pxa_unit)
+{
+ struct mmp_clk_unit *unit = &pxa_unit->unit;
+ struct clk *clk;
+
+ mmp_clk_register_gate(NULL, "pwm01_apb_share", "pll1_d48",
+ CLK_SET_RATE_PARENT,
+ pxa_unit->base + APBC_PWM0,
+ 0x5, 1, 0, 0, &pwm0_lock);
+ mmp_clk_register_gate(NULL, "pwm23_apb_share", "pll1_d48",
+ CLK_SET_RATE_PARENT,
+ pxa_unit->base + APBC_PWM2,
+ 0x5, 1, 0, 0, &pwm2_lock);
+ clk = mmp_clk_register_apbc("swjtag", NULL,
+ pxa_unit->base + APBC_SWJTAG, 10, 0, NULL);
+ mmp_clk_add(unit, PXA1908_CLK_SWJTAG, clk);
+ mmp_register_mux_clks(unit, apbc_mux_clks, pxa_unit->base,
+ ARRAY_SIZE(apbc_mux_clks));
+ mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->base,
+ ARRAY_SIZE(apbc_gate_clks));
+}
+
+static int pxa1908_apbc_probe(struct platform_device *pdev)
+{
+ struct pxa1908_clk_unit *pxa_unit;
+
+ pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
+ if (!pxa_unit)
+ return -ENOMEM;
+
+ pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pxa_unit->base))
+ return PTR_ERR(pxa_unit->base);
+
+ mmp_clk_init(pdev->dev.of_node, &pxa_unit->unit, APBC_NR_CLKS);
+
+ pxa1908_apb_periph_clk_init(pxa_unit);
+
+ return 0;
+}
+
+static const struct of_device_id pxa1908_apbc_match_table[] = {
+ { .compatible = "marvell,pxa1908-apbc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pxa1908_apbc_match_table);
+
+static struct platform_driver pxa1908_apbc_driver = {
+ .probe = pxa1908_apbc_probe,
+ .driver = {
+ .name = "pxa1908-apbc",
+ .of_match_table = pxa1908_apbc_match_table
+ }
+};
+module_platform_driver(pxa1908_apbc_driver);
+
+MODULE_AUTHOR("Duje Mihanović <duje.mihanovic@skole.hr>");
+MODULE_DESCRIPTION("Marvell PXA1908 APBC Clock Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mmp/clk-pxa1908-apbcp.c b/drivers/clk/mmp/clk-pxa1908-apbcp.c
new file mode 100644
index 000000000000..f638d7e89b47
--- /dev/null
+++ b/drivers/clk/mmp/clk-pxa1908-apbcp.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/marvell,pxa1908.h>
+
+#include "clk.h"
+
+#define APBCP_UART2 0x1c
+#define APBCP_TWSI2 0x28
+#define APBCP_AICER 0x38
+
+#define APBCP_NR_CLKS 4
+
+struct pxa1908_clk_unit {
+ struct mmp_clk_unit unit;
+ void __iomem *base;
+};
+
+static DEFINE_SPINLOCK(uart2_lock);
+
+static const char * const uart_parent_names[] = {"pll1_117", "uart_pll"};
+
+static struct mmp_param_gate_clk apbcp_gate_clks[] = {
+ {PXA1908_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, APBCP_UART2, 0x7, 0x3, 0x0, 0, &uart2_lock},
+ {PXA1908_CLK_TWSI2, "twsi2_clk", "pll1_32", CLK_SET_RATE_PARENT, APBCP_TWSI2, 0x7, 0x3, 0x0, 0, NULL},
+ {PXA1908_CLK_AICER, "ripc_clk", NULL, 0, APBCP_AICER, 0x7, 0x2, 0x0, 0, NULL},
+};
+
+static struct mmp_param_mux_clk apbcp_mux_clks[] = {
+ {0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBCP_UART2, 4, 3, 0, &uart2_lock},
+};
+
+static void pxa1908_apb_p_periph_clk_init(struct pxa1908_clk_unit *pxa_unit)
+{
+ struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+ mmp_register_mux_clks(unit, apbcp_mux_clks, pxa_unit->base,
+ ARRAY_SIZE(apbcp_mux_clks));
+ mmp_register_gate_clks(unit, apbcp_gate_clks, pxa_unit->base,
+ ARRAY_SIZE(apbcp_gate_clks));
+}
+
+static int pxa1908_apbcp_probe(struct platform_device *pdev)
+{
+ struct pxa1908_clk_unit *pxa_unit;
+
+ pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
+ if (!pxa_unit)
+ return -ENOMEM;
+
+ pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pxa_unit->base))
+ return PTR_ERR(pxa_unit->base);
+
+ mmp_clk_init(pdev->dev.of_node, &pxa_unit->unit, APBCP_NR_CLKS);
+
+ pxa1908_apb_p_periph_clk_init(pxa_unit);
+
+ return 0;
+}
+
+static const struct of_device_id pxa1908_apbcp_match_table[] = {
+ { .compatible = "marvell,pxa1908-apbcp" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pxa1908_apbcp_match_table);
+
+static struct platform_driver pxa1908_apbcp_driver = {
+ .probe = pxa1908_apbcp_probe,
+ .driver = {
+ .name = "pxa1908-apbcp",
+ .of_match_table = pxa1908_apbcp_match_table
+ }
+};
+module_platform_driver(pxa1908_apbcp_driver);
+
+MODULE_AUTHOR("Duje Mihanović <duje.mihanovic@skole.hr>");
+MODULE_DESCRIPTION("Marvell PXA1908 APBCP Clock Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mmp/clk-pxa1908-apmu.c b/drivers/clk/mmp/clk-pxa1908-apmu.c
new file mode 100644
index 000000000000..8cfb1258202f
--- /dev/null
+++ b/drivers/clk/mmp/clk-pxa1908-apmu.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/clock/marvell,pxa1908.h>
+
+#include "clk.h"
+
+#define APMU_CLK_GATE_CTRL 0x40
+#define APMU_CCIC1 0x24
+#define APMU_ISP 0x38
+#define APMU_DSI1 0x44
+#define APMU_DISP1 0x4c
+#define APMU_CCIC0 0x50
+#define APMU_SDH0 0x54
+#define APMU_SDH1 0x58
+#define APMU_USB 0x5c
+#define APMU_NF 0x60
+#define APMU_VPU 0xa4
+#define APMU_GC 0xcc
+#define APMU_SDH2 0xe0
+#define APMU_GC2D 0xf4
+#define APMU_TRACE 0x108
+#define APMU_DVC_DFC_DEBUG 0x140
+
+#define APMU_NR_CLKS 17
+
+struct pxa1908_clk_unit {
+ struct mmp_clk_unit unit;
+ void __iomem *base;
+};
+
+static DEFINE_SPINLOCK(pll1_lock);
+static struct mmp_param_general_gate_clk pll1_gate_clks[] = {
+ {PXA1908_CLK_PLL1_D2_GATE, "pll1_d2_gate", "pll1_d2", 0, APMU_CLK_GATE_CTRL, 29, 0, &pll1_lock},
+ {PXA1908_CLK_PLL1_416_GATE, "pll1_416_gate", "pll1_416", 0, APMU_CLK_GATE_CTRL, 27, 0, &pll1_lock},
+ {PXA1908_CLK_PLL1_624_GATE, "pll1_624_gate", "pll1_624", 0, APMU_CLK_GATE_CTRL, 26, 0, &pll1_lock},
+ {PXA1908_CLK_PLL1_832_GATE, "pll1_832_gate", "pll1_832", 0, APMU_CLK_GATE_CTRL, 30, 0, &pll1_lock},
+ {PXA1908_CLK_PLL1_1248_GATE, "pll1_1248_gate", "pll1_1248", 0, APMU_CLK_GATE_CTRL, 28, 0, &pll1_lock},
+};
+
+static DEFINE_SPINLOCK(sdh0_lock);
+static DEFINE_SPINLOCK(sdh1_lock);
+static DEFINE_SPINLOCK(sdh2_lock);
+
+static const char * const sdh_parent_names[] = {"pll1_416", "pll1_624"};
+
+static struct mmp_clk_mix_config sdh_mix_config = {
+ .reg_info = DEFINE_MIX_REG_INFO(3, 8, 2, 6, 11),
+};
+
+static struct mmp_param_gate_clk apmu_gate_clks[] = {
+ {PXA1908_CLK_USB, "usb_clk", NULL, 0, APMU_USB, 0x9, 0x9, 0x1, 0, NULL},
+ {PXA1908_CLK_SDH0, "sdh0_clk", "sdh0_mix_clk", CLK_SET_RATE_PARENT | CLK_SET_RATE_UNGATE, APMU_SDH0, 0x12, 0x12, 0x0, 0, &sdh0_lock},
+ {PXA1908_CLK_SDH1, "sdh1_clk", "sdh1_mix_clk", CLK_SET_RATE_PARENT | CLK_SET_RATE_UNGATE, APMU_SDH1, 0x12, 0x12, 0x0, 0, &sdh1_lock},
+ {PXA1908_CLK_SDH2, "sdh2_clk", "sdh2_mix_clk", CLK_SET_RATE_PARENT | CLK_SET_RATE_UNGATE, APMU_SDH2, 0x12, 0x12, 0x0, 0, &sdh2_lock}
+};
+
+static void pxa1908_axi_periph_clk_init(struct pxa1908_clk_unit *pxa_unit)
+{
+ struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+ mmp_register_general_gate_clks(unit, pll1_gate_clks,
+ pxa_unit->base, ARRAY_SIZE(pll1_gate_clks));
+
+ sdh_mix_config.reg_info.reg_clk_ctrl = pxa_unit->base + APMU_SDH0;
+ mmp_clk_register_mix(NULL, "sdh0_mix_clk", sdh_parent_names,
+ ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT,
+ &sdh_mix_config, &sdh0_lock);
+ sdh_mix_config.reg_info.reg_clk_ctrl = pxa_unit->base + APMU_SDH1;
+ mmp_clk_register_mix(NULL, "sdh1_mix_clk", sdh_parent_names,
+ ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT,
+ &sdh_mix_config, &sdh1_lock);
+ sdh_mix_config.reg_info.reg_clk_ctrl = pxa_unit->base + APMU_SDH2;
+ mmp_clk_register_mix(NULL, "sdh2_mix_clk", sdh_parent_names,
+ ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT,
+ &sdh_mix_config, &sdh2_lock);
+
+ mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->base,
+ ARRAY_SIZE(apmu_gate_clks));
+}
+
+static int pxa1908_apmu_probe(struct platform_device *pdev)
+{
+ struct pxa1908_clk_unit *pxa_unit;
+
+ pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
+ if (IS_ERR(pxa_unit))
+ return PTR_ERR(pxa_unit);
+
+ pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pxa_unit->base))
+ return PTR_ERR(pxa_unit->base);
+
+ mmp_clk_init(pdev->dev.of_node, &pxa_unit->unit, APMU_NR_CLKS);
+
+ pxa1908_axi_periph_clk_init(pxa_unit);
+
+ return 0;
+}
+
+static const struct of_device_id pxa1908_apmu_match_table[] = {
+ { .compatible = "marvell,pxa1908-apmu" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pxa1908_apmu_match_table);
+
+static struct platform_driver pxa1908_apmu_driver = {
+ .probe = pxa1908_apmu_probe,
+ .driver = {
+ .name = "pxa1908-apmu",
+ .of_match_table = pxa1908_apmu_match_table
+ }
+};
+module_platform_driver(pxa1908_apmu_driver);
+
+MODULE_AUTHOR("Duje Mihanović <duje.mihanovic@skole.hr>");
+MODULE_DESCRIPTION("Marvell PXA1908 APMU Clock Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mmp/clk-pxa1908-mpmu.c b/drivers/clk/mmp/clk-pxa1908-mpmu.c
new file mode 100644
index 000000000000..90b4b2488574
--- /dev/null
+++ b/drivers/clk/mmp/clk-pxa1908-mpmu.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/bits.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/units.h>
+
+#include <dt-bindings/clock/marvell,pxa1908.h>
+
+#include "clk.h"
+
+#define MPMU_UART_PLL 0x14
+
+#define MPMU_NR_CLKS 39
+
+struct pxa1908_clk_unit {
+ struct mmp_clk_unit unit;
+ void __iomem *base;
+};
+
+static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
+ {PXA1908_CLK_CLK32, "clk32", NULL, 0, 32768},
+ {PXA1908_CLK_VCTCXO, "vctcxo", NULL, 0, 26 * HZ_PER_MHZ},
+ {PXA1908_CLK_PLL1_624, "pll1_624", NULL, 0, 624 * HZ_PER_MHZ},
+ {PXA1908_CLK_PLL1_416, "pll1_416", NULL, 0, 416 * HZ_PER_MHZ},
+ {PXA1908_CLK_PLL1_499, "pll1_499", NULL, 0, 499 * HZ_PER_MHZ},
+ {PXA1908_CLK_PLL1_832, "pll1_832", NULL, 0, 832 * HZ_PER_MHZ},
+ {PXA1908_CLK_PLL1_1248, "pll1_1248", NULL, 0, 1248 * HZ_PER_MHZ},
+};
+
+static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
+ {PXA1908_CLK_PLL1_D2, "pll1_d2", "pll1_624", 1, 2, 0},
+ {PXA1908_CLK_PLL1_D4, "pll1_d4", "pll1_d2", 1, 2, 0},
+ {PXA1908_CLK_PLL1_D6, "pll1_d6", "pll1_d2", 1, 3, 0},
+ {PXA1908_CLK_PLL1_D8, "pll1_d8", "pll1_d4", 1, 2, 0},
+ {PXA1908_CLK_PLL1_D12, "pll1_d12", "pll1_d6", 1, 2, 0},
+ {PXA1908_CLK_PLL1_D13, "pll1_d13", "pll1_624", 1, 13, 0},
+ {PXA1908_CLK_PLL1_D16, "pll1_d16", "pll1_d8", 1, 2, 0},
+ {PXA1908_CLK_PLL1_D24, "pll1_d24", "pll1_d12", 1, 2, 0},
+ {PXA1908_CLK_PLL1_D48, "pll1_d48", "pll1_d24", 1, 2, 0},
+ {PXA1908_CLK_PLL1_D96, "pll1_d96", "pll1_d48", 1, 2, 0},
+ {PXA1908_CLK_PLL1_32, "pll1_32", "pll1_d13", 2, 3, 0},
+ {PXA1908_CLK_PLL1_208, "pll1_208", "pll1_d2", 2, 3, 0},
+ {PXA1908_CLK_PLL1_117, "pll1_117", "pll1_624", 3, 16, 0},
+};
+
+static struct u32_fract uart_factor_tbl[] = {
+ {.numerator = 8125, .denominator = 1536}, /* 14.745MHz */
+};
+
+static struct mmp_clk_factor_masks uart_factor_masks = {
+ .factor = 2,
+ .num_mask = GENMASK(12, 0),
+ .den_mask = GENMASK(12, 0),
+ .num_shift = 16,
+ .den_shift = 0,
+};
+
+static void pxa1908_pll_init(struct pxa1908_clk_unit *pxa_unit)
+{
+ struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+ mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
+ ARRAY_SIZE(fixed_rate_clks));
+
+ mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
+ ARRAY_SIZE(fixed_factor_clks));
+
+ mmp_clk_register_factor("uart_pll", "pll1_d4",
+ CLK_SET_RATE_PARENT,
+ pxa_unit->base + MPMU_UART_PLL,
+ &uart_factor_masks, uart_factor_tbl,
+ ARRAY_SIZE(uart_factor_tbl), NULL);
+}
+
+static int pxa1908_mpmu_probe(struct platform_device *pdev)
+{
+ struct pxa1908_clk_unit *pxa_unit;
+
+ pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
+ if (!pxa_unit)
+ return -ENOMEM;
+
+ pxa_unit->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pxa_unit->base))
+ return PTR_ERR(pxa_unit->base);
+
+ mmp_clk_init(pdev->dev.of_node, &pxa_unit->unit, MPMU_NR_CLKS);
+
+ pxa1908_pll_init(pxa_unit);
+
+ return 0;
+}
+
+static const struct of_device_id pxa1908_mpmu_match_table[] = {
+ { .compatible = "marvell,pxa1908-mpmu" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pxa1908_mpmu_match_table);
+
+static struct platform_driver pxa1908_mpmu_driver = {
+ .probe = pxa1908_mpmu_probe,
+ .driver = {
+ .name = "pxa1908-mpmu",
+ .of_match_table = pxa1908_mpmu_match_table
+ }
+};
+module_platform_driver(pxa1908_mpmu_driver);
+
+MODULE_AUTHOR("Duje Mihanović <duje.mihanovic@skole.hr>");
+MODULE_DESCRIPTION("Marvell PXA1908 MPMU Clock Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mmp/clk.h b/drivers/clk/mmp/clk.h
index 55ac05379781..c83cec169ddc 100644
--- a/drivers/clk/mmp/clk.h
+++ b/drivers/clk/mmp/clk.h
@@ -3,6 +3,7 @@
#define __MACH_MMP_CLK_H
#include <linux/clk-provider.h>
+#include <linux/math.h>
#include <linux/pm_domain.h>
#include <linux/clkdev.h>
@@ -20,16 +21,11 @@ struct mmp_clk_factor_masks {
unsigned int enable_mask;
};
-struct mmp_clk_factor_tbl {
- unsigned int num;
- unsigned int den;
-};
-
struct mmp_clk_factor {
struct clk_hw hw;
void __iomem *base;
struct mmp_clk_factor_masks *masks;
- struct mmp_clk_factor_tbl *ftbl;
+ struct u32_fract *ftbl;
unsigned int ftbl_cnt;
spinlock_t *lock;
};
@@ -37,7 +33,7 @@ struct mmp_clk_factor {
extern struct clk *mmp_clk_register_factor(const char *name,
const char *parent_name, unsigned long flags,
void __iomem *base, struct mmp_clk_factor_masks *masks,
- struct mmp_clk_factor_tbl *ftbl, unsigned int ftbl_cnt,
+ struct u32_fract *ftbl, unsigned int ftbl_cnt,
spinlock_t *lock);
/* Clock type "mix" */
diff --git a/drivers/clk/mmp/pwr-island.c b/drivers/clk/mmp/pwr-island.c
index edaa2433a472..eaf5d2c5e593 100644
--- a/drivers/clk/mmp/pwr-island.c
+++ b/drivers/clk/mmp/pwr-island.c
@@ -106,10 +106,10 @@ struct generic_pm_domain *mmp_pm_domain_register(const char *name,
pm_domain->flags = flags;
pm_domain->lock = lock;
- pm_genpd_init(&pm_domain->genpd, NULL, true);
pm_domain->genpd.name = name;
pm_domain->genpd.power_on = mmp_pm_domain_power_on;
pm_domain->genpd.power_off = mmp_pm_domain_power_off;
+ pm_genpd_init(&pm_domain->genpd, NULL, true);
return &pm_domain->genpd;
}
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index a3e2a09e2105..69bbf62ba3cd 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -64,6 +64,15 @@ config CLK_X1E80100_TCSRCC
Support for the TCSR clock controller on X1E80100 devices.
Say Y if you want to use peripheral devices such as SD/UFS.
+config CLK_X1P42100_GPUCC
+ tristate "X1P42100 Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select CLK_X1E80100_GCC
+ help
+ Support for the graphics clock controller on X1P42100 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config CLK_QCM2290_GPUCC
tristate "QCM2290 Graphics Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -190,6 +199,15 @@ config IPQ_APSS_6018
Say Y if you want to support CPU frequency scaling on
ipq based devices.
+config IPQ_CMN_PLL
+ tristate "IPQ CMN PLL Clock Controller"
+ help
+ Support for CMN PLL clock controller on IPQ platform. The
+ CMN PLL consumes the AHB/SYS clocks from GCC and supplies
+ the output clocks to the networking hardware and GCC blocks.
+ Say Y or M if you want to support CMN PLL clock on the IPQ
+ based devices.
+
config IPQ_GCC_4019
tristate "IPQ4019 Global Clock Controller"
help
@@ -213,6 +231,14 @@ config IPQ_GCC_5332
Say Y if you want to use peripheral devices such as UART, SPI,
i2c, USB, SD/eMMC, etc.
+config IPQ_GCC_5424
+ tristate "IPQ5424 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ help
+ Support for the global clock controller on ipq5424 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
config IPQ_GCC_6018
tristate "IPQ6018 Global Clock Controller"
help
@@ -467,6 +493,35 @@ config QCS_GCC_404
Say Y if you want to use multimedia devices or peripheral
devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc.
+config SA_CAMCC_8775P
+ tristate "SA8775P Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SA_GCC_8775P
+ help
+ Support for the camera clock controller on Qualcomm Technologies, Inc
+ SA8775P devices.
+ Say Y if you want to support camera devices and functionality such as
+ capturing pictures.
+
+config QCS_GCC_8300
+ tristate "QCS8300 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on Qualcomm Technologies, Inc
+ QCS8300 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
+config QCS_GCC_615
+ tristate "QCS615 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on QCS615 devices.
+ Say Y if you want to use multimedia devices or peripheral
+ devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc.
+
config SC_CAMCC_7180
tristate "SC7180 Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -497,6 +552,16 @@ config SC_CAMCC_8280XP
Say Y if you want to support camera devices and functionality such as
capturing pictures.
+config SA_DISPCC_8775P
+ tristate "SA8775P Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SA_GCC_8775P
+ help
+ Support for the two display clock controllers on Qualcomm
+ Technologies, Inc. SA8775P devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
config SC_DISPCC_7180
tristate "SC7180 Display Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -545,6 +610,24 @@ config SA_GPUCC_8775P
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SAR_GCC_2130P
+ tristate "SAR2130P Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on SAR2130P devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ I2C, USB, SDCC, etc.
+
+config SAR_GPUCC_2130P
+ tristate "SAR2130P Graphics clock controller"
+ select QCOM_GDSC
+ select SAR_GCC_2130P
+ help
+ Support for the graphics clock controller on SAR2130P devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SC_GCC_7180
tristate "SC7180 Global Clock Controller"
select QCOM_GDSC
@@ -857,7 +940,7 @@ config SM_CAMCC_8450
depends on ARM64 || COMPILE_TEST
select SM_GCC_8450
help
- Support for the camera clock controller on SM8450 devices.
+ Support for the camera clock controller on SM8450 or SM8475 devices.
Say Y if you want to support camera devices and camera functionality.
config SM_CAMCC_8550
@@ -952,17 +1035,28 @@ config SM_DISPCC_8450
depends on SM_GCC_8450
help
Support for the display clock controller on Qualcomm Technologies, Inc
- SM8450 devices.
+ SM8450 or SM8475 devices.
Say Y if you want to support display devices and functionality such as
splash screen.
config SM_DISPCC_8550
tristate "SM8550 Display Clock Controller"
depends on ARM64 || COMPILE_TEST
- depends on SM_GCC_8550 || SM_GCC_8650
+ depends on SM_GCC_8550 || SM_GCC_8650 || SAR_GCC_2130P
help
Support for the display clock controller on Qualcomm Technologies, Inc
- SM8550 or SM8650 devices.
+ SAR2130P, SM8550 or SM8650 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
+config SM_DISPCC_8750
+ tristate "SM8750 Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ depends on SM_GCC_8750
+ select QCOM_GDSC
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ SM8750 devices.
Say Y if you want to support display devices and functionality such as
splash screen.
@@ -987,6 +1081,7 @@ config SM_GCC_6115
config SM_GCC_6125
tristate "SM6125 Global Clock Controller"
depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
help
Support for the global clock controller on SM6125 devices.
Say Y if you want to use peripheral devices such as UART,
@@ -1022,6 +1117,7 @@ config SM_GCC_7150
config SM_GCC_8150
tristate "SM8150 Global Clock Controller"
depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
help
Support for the global clock controller on SM8150 devices.
Say Y if you want to use peripheral devices such as UART,
@@ -1050,7 +1146,8 @@ config SM_GCC_8450
depends on ARM64 || COMPILE_TEST
select QCOM_GDSC
help
- Support for the global clock controller on SM8450 devices.
+ Support for the global clock controller on SM8450 or SM8475
+ devices.
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
@@ -1072,6 +1169,15 @@ config SM_GCC_8650
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GCC_8750
+ tristate "SM8750 Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SM8750 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_GPUCC_4450
tristate "SM4450 Graphics Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1149,7 +1255,8 @@ config SM_GPUCC_8450
depends on ARM64 || COMPILE_TEST
select SM_GCC_8450
help
- Support for the graphics clock controller on SM8450 devices.
+ Support for the graphics clock controller on SM8450 or SM8475
+ devices.
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
@@ -1171,6 +1278,15 @@ config SM_GPUCC_8650
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SM_LPASSCC_6115
+ tristate "SM6115 Low Power Audio Subsystem (LPASS) Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_6115
+ help
+ Support for the LPASS clock controller on SM6115 devices.
+ Say Y if you want to toggle LPASS-adjacent resets within
+ this clock controller to reset the LPASS subsystem.
+
config SM_TCSRCC_8550
tristate "SM8550 TCSR Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1187,6 +1303,25 @@ config SM_TCSRCC_8650
Support for the TCSR clock controller on SM8650 devices.
Say Y if you want to use peripheral devices such as SD/UFS.
+config SM_TCSRCC_8750
+ tristate "SM8750 TCSR Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the TCSR clock controller on SM8750 devices.
+ Say Y if you want to use peripheral devices such as UFS/USB/PCIe.
+
+config SA_VIDEOCC_8775P
+ tristate "SA8775P Video Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SA_GCC_8775P
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on Qualcomm Technologies, Inc.
+ SA8775P devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode/decode.
+
config SM_VIDEOCC_7150
tristate "SM7150 Video Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -1230,11 +1365,11 @@ config SM_VIDEOCC_8350
config SM_VIDEOCC_8550
tristate "SM8550 Video Clock Controller"
depends on ARM64 || COMPILE_TEST
- select SM_GCC_8550
+ depends on SM_GCC_8550 || SM_GCC_8650
select QCOM_GDSC
help
Support for the video clock controller on Qualcomm Technologies, Inc.
- SM8550 devices.
+ SM8550 or SM8650 devices.
Say Y if you want to support video devices and functionality such as
video encode/decode.
@@ -1283,7 +1418,7 @@ config SM_VIDEOCC_8450
select QCOM_GDSC
help
Support for the video clock controller on Qualcomm Technologies, Inc.
- SM8450 devices.
+ SM8450 or SM8475 devices.
Say Y if you want to support video devices and functionality such as
video encode/decode.
endif
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 2b378667a63f..0db2f98bcb3e 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -26,12 +26,15 @@ obj-$(CONFIG_CLK_X1E80100_DISPCC) += dispcc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_GCC) += gcc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_GPUCC) += gpucc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_TCSRCC) += tcsrcc-x1e80100.o
+obj-$(CONFIG_CLK_X1P42100_GPUCC) += gpucc-x1p42100.o
obj-$(CONFIG_CLK_QCM2290_GPUCC) += gpucc-qcm2290.o
obj-$(CONFIG_IPQ_APSS_PLL) += apss-ipq-pll.o
obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o
+obj-$(CONFIG_IPQ_CMN_PLL) += ipq-cmn-pll.o
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
obj-$(CONFIG_IPQ_GCC_5018) += gcc-ipq5018.o
obj-$(CONFIG_IPQ_GCC_5332) += gcc-ipq5332.o
+obj-$(CONFIG_IPQ_GCC_5424) += gcc-ipq5424.o
obj-$(CONFIG_IPQ_GCC_6018) += gcc-ipq6018.o
obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
@@ -70,6 +73,8 @@ obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
obj-$(CONFIG_QCM_GCC_2290) += gcc-qcm2290.o
obj-$(CONFIG_QCM_DISPCC_2290) += dispcc-qcm2290.o
obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
+obj-$(CONFIG_QCS_GCC_615) += gcc-qcs615.o
+obj-$(CONFIG_QCS_GCC_8300) += gcc-qcs8300.o
obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
obj-$(CONFIG_QDU_ECPRICC_1000) += ecpricc-qdu1000.o
@@ -80,8 +85,13 @@ obj-$(CONFIG_SC_CAMCC_8280XP) += camcc-sc8280xp.o
obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
obj-$(CONFIG_SC_DISPCC_7280) += dispcc-sc7280.o
obj-$(CONFIG_SC_DISPCC_8280XP) += dispcc-sc8280xp.o
+obj-$(CONFIG_SA_CAMCC_8775P) += camcc-sa8775p.o
+obj-$(CONFIG_SA_DISPCC_8775P) += dispcc0-sa8775p.o dispcc1-sa8775p.o
obj-$(CONFIG_SA_GCC_8775P) += gcc-sa8775p.o
obj-$(CONFIG_SA_GPUCC_8775P) += gpucc-sa8775p.o
+obj-$(CONFIG_SA_VIDEOCC_8775P) += videocc-sa8775p.o
+obj-$(CONFIG_SAR_GCC_2130P) += gcc-sar2130p.o
+obj-$(CONFIG_SAR_GPUCC_2130P) += gpucc-sar2130p.o
obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
obj-$(CONFIG_SC_GCC_7280) += gcc-sc7280.o
obj-$(CONFIG_SC_GCC_8180X) += gcc-sc8180x.o
@@ -124,6 +134,7 @@ obj-$(CONFIG_SM_DISPCC_7150) += dispcc-sm7150.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
obj-$(CONFIG_SM_DISPCC_8450) += dispcc-sm8450.o
obj-$(CONFIG_SM_DISPCC_8550) += dispcc-sm8550.o
+obj-$(CONFIG_SM_DISPCC_8750) += dispcc-sm8750.o
obj-$(CONFIG_SM_GCC_4450) += gcc-sm4450.o
obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o
obj-$(CONFIG_SM_GCC_6125) += gcc-sm6125.o
@@ -136,6 +147,7 @@ obj-$(CONFIG_SM_GCC_8350) += gcc-sm8350.o
obj-$(CONFIG_SM_GCC_8450) += gcc-sm8450.o
obj-$(CONFIG_SM_GCC_8550) += gcc-sm8550.o
obj-$(CONFIG_SM_GCC_8650) += gcc-sm8650.o
+obj-$(CONFIG_SM_GCC_8750) += gcc-sm8750.o
obj-$(CONFIG_SM_GPUCC_4450) += gpucc-sm4450.o
obj-$(CONFIG_SM_GPUCC_6115) += gpucc-sm6115.o
obj-$(CONFIG_SM_GPUCC_6125) += gpucc-sm6125.o
@@ -147,8 +159,10 @@ obj-$(CONFIG_SM_GPUCC_8350) += gpucc-sm8350.o
obj-$(CONFIG_SM_GPUCC_8450) += gpucc-sm8450.o
obj-$(CONFIG_SM_GPUCC_8550) += gpucc-sm8550.o
obj-$(CONFIG_SM_GPUCC_8650) += gpucc-sm8650.o
+obj-$(CONFIG_SM_LPASSCC_6115) += lpasscc-sm6115.o
obj-$(CONFIG_SM_TCSRCC_8550) += tcsrcc-sm8550.o
obj-$(CONFIG_SM_TCSRCC_8650) += tcsrcc-sm8650.o
+obj-$(CONFIG_SM_TCSRCC_8750) += tcsrcc-sm8750.o
obj-$(CONFIG_SM_VIDEOCC_7150) += videocc-sm7150.o
obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
obj-$(CONFIG_SM_VIDEOCC_8250) += videocc-sm8250.o
diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
index e8632db2c542..d6c1aea7e9e1 100644
--- a/drivers/clk/qcom/apss-ipq-pll.c
+++ b/drivers/clk/qcom/apss-ipq-pll.c
@@ -73,20 +73,19 @@ static const struct alpha_pll_config ipq5018_pll_config = {
.main_output_mask = BIT(0),
.aux_output_mask = BIT(1),
.early_output_mask = BIT(3),
- .alpha_en_mask = BIT(24),
.status_val = 0x3,
.status_mask = GENMASK(10, 8),
.lock_det = BIT(2),
.test_ctl_hi_val = 0x00400003,
};
+/* 1.080 GHz configuration */
static const struct alpha_pll_config ipq5332_pll_config = {
.l = 0x2d,
.config_ctl_val = 0x4001075b,
.main_output_mask = BIT(0),
.aux_output_mask = BIT(1),
.early_output_mask = BIT(3),
- .alpha_en_mask = BIT(24),
.status_val = 0x3,
.status_mask = GENMASK(10, 8),
.lock_det = BIT(2),
diff --git a/drivers/clk/qcom/camcc-sa8775p.c b/drivers/clk/qcom/camcc-sa8775p.c
new file mode 100644
index 000000000000..c04801a5af35
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sa8775p.c
@@ -0,0 +1,1868 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sa8775p-camcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_BI_TCXO_AO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL2_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_MAIN,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+ P_CAM_CC_PLL5_OUT_EVEN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2020000000, 0 },
+};
+
+static const struct pll_vco rivian_evo_vco[] = {
+ { 864000000, 1056000000, 0 },
+};
+
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00008400,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x2, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 14,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x90008820,
+ .config_ctl_hi_val = 0x00890263,
+ .config_ctl_hi1_val = 0x00000247,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00400000,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x1000,
+ .vco_table = rivian_evo_vco,
+ .num_vco = ARRAY_SIZE(rivian_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_rivian_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x2000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+ .offset = 0x2000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll3_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x3000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll4_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll4_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct alpha_pll_config cam_cc_pll5_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll cam_cc_pll5 = {
+ .offset = 0x4000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll5_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = {
+ .offset = 0x4000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_cam_cc_pll5_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll5_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_pll5_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll5.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_ODD, 3 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EVEN, 3 },
+ { P_CAM_CC_PLL2_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll2.clkr.hw },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll4_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL5_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll5_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &cam_cc_pll3_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map cam_cc_parent_map_6_ao[] = {
+ { P_BI_TCXO_AO, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+ .cmd_rcgr = 0x13170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(37500000, P_CAM_CC_PLL0_OUT_MAIN, 16, 1, 2),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0x130a0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0x130bc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_2_clk_src = {
+ .cmd_rcgr = 0x130d8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_2_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_3_clk_src = {
+ .cmd_rcgr = 0x130f4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_3_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x11034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x15074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x15098,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x150b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x150d8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csid_clk_src = {
+ .cmd_rcgr = 0x13150,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x13120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0x1307c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(480000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0x11004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
+ F(480000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0x12004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_lite_clk_src[] = {
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+ .cmd_rcgr = 0x13000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_ife_lite_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+ .cmd_rcgr = 0x13020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_clk_src[] = {
+ F(480000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ipe_clk_src = {
+ .cmd_rcgr = 0x10004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_CAM_CC_PLL2_OUT_MAIN, 1, 1, 50),
+ F(24000000, P_CAM_CC_PLL2_OUT_MAIN, 10, 1, 4),
+ F(64000000, P_CAM_CC_PLL2_OUT_MAIN, 15, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x15004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x15020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x1503c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x15058,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x131f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sleep_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x13138,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO_AO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_xo_clk_src = {
+ .cmd_rcgr = 0x131d4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6_ao,
+ .freq_tbl = ftbl_cam_cc_xo_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_xo_clk_src",
+ .parent_data = cam_cc_parent_data_6_ao,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0x13188,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13188,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_dcd_xo_clk = {
+ .halt_reg = 0x13190,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13190,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_camnoc_dcd_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_qdss_debug_xo_clk = {
+ .halt_reg = 0x131b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x131b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_qdss_debug_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0x130b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0x130d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_2_clk = {
+ .halt_reg = 0x130f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x130f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_3_clk = {
+ .halt_reg = 0x1310c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1310c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cci_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cci_3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0x131d0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x131d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_core_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0x13110,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13110,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_fast_ahb_clk = {
+ .halt_reg = 0x13118,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13118,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_0_clk = {
+ .halt_reg = 0x11024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_1_clk = {
+ .halt_reg = 0x12024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ife_lite_clk = {
+ .halt_reg = 0x1301c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1301c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ife_lite_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ipe_clk = {
+ .halt_reg = 0x10024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_ipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_sfe_lite_0_clk = {
+ .halt_reg = 0x13050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_sfe_lite_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_sfe_lite_1_clk = {
+ .halt_reg = 0x13068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_cpas_sfe_lite_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x1508c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1508c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x150b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x150d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0x150f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csid_clk = {
+ .halt_reg = 0x13168,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13168,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csid_csiphy_rx_clk = {
+ .halt_reg = 0x15094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csid_csiphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x15090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x150b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x150d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0x150f4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x150f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_csiphy3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ahb_clk = {
+ .halt_reg = 0x1309c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1309c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0x13094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_icp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0x1101c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1101c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_fast_ahb_clk = {
+ .halt_reg = 0x11030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_0_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0x1201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_fast_ahb_clk = {
+ .halt_reg = 0x12030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_1_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_ahb_clk = {
+ .halt_reg = 0x13044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_clk = {
+ .halt_reg = 0x13018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
+ .halt_reg = 0x13040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_csid_clk = {
+ .halt_reg = 0x13038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ife_lite_csid_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_lite_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_ahb_clk = {
+ .halt_reg = 0x10030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_clk = {
+ .halt_reg = 0x1001c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1001c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_fast_ahb_clk = {
+ .halt_reg = 0x10034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_ipe_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x1501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x15038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x15054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x15070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_mclk3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_lite_0_clk = {
+ .halt_reg = 0x1304c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1304c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_lite_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_lite_0_fast_ahb_clk = {
+ .halt_reg = 0x1305c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1305c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_lite_0_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_lite_1_clk = {
+ .halt_reg = 0x13064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_lite_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sfe_lite_1_fast_ahb_clk = {
+ .halt_reg = 0x13074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sfe_lite_1_fast_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sm_obs_clk = {
+ .halt_reg = 0x1510c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x1510c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "cam_cc_sm_obs_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc cam_cc_titan_top_gdsc = {
+ .gdscr = 0x131bc,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "cam_cc_titan_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *cam_cc_sa8775p_clocks[] = {
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+ [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CCI_2_CLK] = &cam_cc_cci_2_clk.clkr,
+ [CAM_CC_CCI_2_CLK_SRC] = &cam_cc_cci_2_clk_src.clkr,
+ [CAM_CC_CCI_3_CLK] = &cam_cc_cci_3_clk.clkr,
+ [CAM_CC_CCI_3_CLK_SRC] = &cam_cc_cci_3_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPAS_FAST_AHB_CLK] = &cam_cc_cpas_fast_ahb_clk.clkr,
+ [CAM_CC_CPAS_IFE_0_CLK] = &cam_cc_cpas_ife_0_clk.clkr,
+ [CAM_CC_CPAS_IFE_1_CLK] = &cam_cc_cpas_ife_1_clk.clkr,
+ [CAM_CC_CPAS_IFE_LITE_CLK] = &cam_cc_cpas_ife_lite_clk.clkr,
+ [CAM_CC_CPAS_IPE_CLK] = &cam_cc_cpas_ipe_clk.clkr,
+ [CAM_CC_CPAS_SFE_LITE_0_CLK] = &cam_cc_cpas_sfe_lite_0_clk.clkr,
+ [CAM_CC_CPAS_SFE_LITE_1_CLK] = &cam_cc_cpas_sfe_lite_1_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSID_CLK] = &cam_cc_csid_clk.clkr,
+ [CAM_CC_CSID_CLK_SRC] = &cam_cc_csid_clk_src.clkr,
+ [CAM_CC_CSID_CSIPHY_RX_CLK] = &cam_cc_csid_csiphy_rx_clk.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_FAST_AHB_CLK] = &cam_cc_ife_0_fast_ahb_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_FAST_AHB_CLK] = &cam_cc_ife_1_fast_ahb_clk.clkr,
+ [CAM_CC_IFE_LITE_AHB_CLK] = &cam_cc_ife_lite_ahb_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr,
+ [CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr,
+ [CAM_CC_IPE_AHB_CLK] = &cam_cc_ipe_ahb_clk.clkr,
+ [CAM_CC_IPE_CLK] = &cam_cc_ipe_clk.clkr,
+ [CAM_CC_IPE_CLK_SRC] = &cam_cc_ipe_clk_src.clkr,
+ [CAM_CC_IPE_FAST_AHB_CLK] = &cam_cc_ipe_fast_ahb_clk.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr,
+ [CAM_CC_PLL5] = &cam_cc_pll5.clkr,
+ [CAM_CC_PLL5_OUT_EVEN] = &cam_cc_pll5_out_even.clkr,
+ [CAM_CC_SFE_LITE_0_CLK] = &cam_cc_sfe_lite_0_clk.clkr,
+ [CAM_CC_SFE_LITE_0_FAST_AHB_CLK] = &cam_cc_sfe_lite_0_fast_ahb_clk.clkr,
+ [CAM_CC_SFE_LITE_1_CLK] = &cam_cc_sfe_lite_1_clk.clkr,
+ [CAM_CC_SFE_LITE_1_FAST_AHB_CLK] = &cam_cc_sfe_lite_1_fast_ahb_clk.clkr,
+ [CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_SM_OBS_CLK] = &cam_cc_sm_obs_clk.clkr,
+ [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
+ [CAM_CC_QDSS_DEBUG_XO_CLK] = &cam_cc_qdss_debug_xo_clk.clkr,
+};
+
+static struct gdsc *cam_cc_sa8775p_gdscs[] = {
+ [CAM_CC_TITAN_TOP_GDSC] = &cam_cc_titan_top_gdsc,
+};
+
+static const struct qcom_reset_map cam_cc_sa8775p_resets[] = {
+ [CAM_CC_ICP_BCR] = { 0x13078 },
+ [CAM_CC_IFE_0_BCR] = { 0x11000 },
+ [CAM_CC_IFE_1_BCR] = { 0x12000 },
+ [CAM_CC_IPE_0_BCR] = { 0x10000 },
+ [CAM_CC_SFE_LITE_0_BCR] = { 0x13048 },
+ [CAM_CC_SFE_LITE_1_BCR] = { 0x13060 },
+};
+
+static const struct regmap_config cam_cc_sa8775p_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x16218,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc cam_cc_sa8775p_desc = {
+ .config = &cam_cc_sa8775p_regmap_config,
+ .clks = cam_cc_sa8775p_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_sa8775p_clocks),
+ .resets = cam_cc_sa8775p_resets,
+ .num_resets = ARRAY_SIZE(cam_cc_sa8775p_resets),
+ .gdscs = cam_cc_sa8775p_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_sa8775p_gdscs),
+};
+
+static const struct of_device_id cam_cc_sa8775p_match_table[] = {
+ { .compatible = "qcom,sa8775p-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sa8775p_match_table);
+
+static int cam_cc_sa8775p_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &cam_cc_sa8775p_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_evo_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x13194); /* CAM_CC_CAMNOC_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_GDSC_CLK */
+ qcom_branch_set_clk_en(regmap, 0x13208); /* CAM_CC_SLEEP_CLK */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sa8775p_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver cam_cc_sa8775p_driver = {
+ .probe = cam_cc_sa8775p_probe,
+ .driver = {
+ .name = "camcc-sa8775p",
+ .of_match_table = cam_cc_sa8775p_match_table,
+ },
+};
+
+module_platform_driver(cam_cc_sa8775p_driver);
+
+MODULE_DESCRIPTION("QTI CAMCC SA8775P Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-sm8450.c b/drivers/clk/qcom/camcc-sm8450.c
index 26b78eed15ef..08982737e490 100644
--- a/drivers/clk/qcom/camcc-sm8450.c
+++ b/drivers/clk/qcom/camcc-sm8450.c
@@ -54,6 +54,10 @@ static const struct pll_vco rivian_evo_vco[] = {
{ 864000000, 1056000000, 0 },
};
+static const struct pll_vco rivian_ole_vco[] = {
+ { 864000000, 1075000000, 0 },
+};
+
static const struct clk_parent_data pll_parent_data_tcxo = { .index = DT_BI_TCXO };
static const struct alpha_pll_config cam_cc_pll0_config = {
@@ -66,6 +70,20 @@ static const struct alpha_pll_config cam_cc_pll0_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_cam_cc_pll0_config = {
+ .l = 0x3e,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00008400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll cam_cc_pll0 = {
.offset = 0x0,
.vco_table = lucid_evo_vco,
@@ -86,6 +104,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
{ }
};
+static struct clk_init_data sm8475_cam_cc_pll0_out_even_init = {
+ .name = "cam_cc_pll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
.offset = 0x0,
.post_div_shift = 10,
@@ -109,6 +137,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
{ }
};
+static struct clk_init_data sm8475_cam_cc_pll0_out_odd_init = {
+ .name = "cam_cc_pll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
.offset = 0x0,
.post_div_shift = 14,
@@ -137,6 +175,20 @@ static const struct alpha_pll_config cam_cc_pll1_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_cam_cc_pll1_config = {
+ .l = 0x25,
+ .alpha = 0xeaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll cam_cc_pll1 = {
.offset = 0x1000,
.vco_table = lucid_evo_vco,
@@ -157,6 +209,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = {
{ }
};
+static struct clk_init_data sm8475_cam_cc_pll1_out_even_init = {
+ .name = "cam_cc_pll1_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
.offset = 0x1000,
.post_div_shift = 10,
@@ -183,6 +245,16 @@ static const struct alpha_pll_config cam_cc_pll2_config = {
.config_ctl_hi1_val = 0x00000217,
};
+static const struct alpha_pll_config sm8475_cam_cc_pll2_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x10000030,
+ .config_ctl_hi_val = 0x80890263,
+ .config_ctl_hi1_val = 0x00000217,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00000000,
+};
+
static struct clk_alpha_pll cam_cc_pll2 = {
.offset = 0x2000,
.vco_table = rivian_evo_vco,
@@ -208,6 +280,20 @@ static const struct alpha_pll_config cam_cc_pll3_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_cam_cc_pll3_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll cam_cc_pll3 = {
.offset = 0x3000,
.vco_table = lucid_evo_vco,
@@ -228,6 +314,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
{ }
};
+static struct clk_init_data sm8475_cam_cc_pll3_out_even_init = {
+ .name = "cam_cc_pll3_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
.offset = 0x3000,
.post_div_shift = 10,
@@ -256,6 +352,20 @@ static const struct alpha_pll_config cam_cc_pll4_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_cam_cc_pll4_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll cam_cc_pll4 = {
.offset = 0x4000,
.vco_table = lucid_evo_vco,
@@ -276,6 +386,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
{ }
};
+static struct clk_init_data sm8475_cam_cc_pll4_out_even_init = {
+ .name = "cam_cc_pll4_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
.offset = 0x4000,
.post_div_shift = 10,
@@ -304,6 +424,20 @@ static const struct alpha_pll_config cam_cc_pll5_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_cam_cc_pll5_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll cam_cc_pll5 = {
.offset = 0x5000,
.vco_table = lucid_evo_vco,
@@ -324,6 +458,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll5_out_even[] = {
{ }
};
+static struct clk_init_data sm8475_cam_cc_pll5_out_even_init = {
+ .name = "cam_cc_pll5_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll5.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = {
.offset = 0x5000,
.post_div_shift = 10,
@@ -352,6 +496,20 @@ static const struct alpha_pll_config cam_cc_pll6_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_cam_cc_pll6_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll cam_cc_pll6 = {
.offset = 0x6000,
.vco_table = lucid_evo_vco,
@@ -372,6 +530,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll6_out_even[] = {
{ }
};
+static struct clk_init_data sm8475_cam_cc_pll6_out_even_init = {
+ .name = "cam_cc_pll6_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static struct clk_alpha_pll_postdiv cam_cc_pll6_out_even = {
.offset = 0x6000,
.post_div_shift = 10,
@@ -400,6 +568,20 @@ static const struct alpha_pll_config cam_cc_pll7_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_cam_cc_pll7_config = {
+ .l = 0x2d,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll cam_cc_pll7 = {
.offset = 0x7000,
.vco_table = lucid_evo_vco,
@@ -420,6 +602,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll7_out_even[] = {
{ }
};
+static struct clk_init_data sm8475_cam_cc_pll7_out_even_init = {
+ .name = "cam_cc_pll7_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll7.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static struct clk_alpha_pll_postdiv cam_cc_pll7_out_even = {
.offset = 0x7000,
.post_div_shift = 10,
@@ -448,6 +640,20 @@ static const struct alpha_pll_config cam_cc_pll8_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_cam_cc_pll8_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll cam_cc_pll8 = {
.offset = 0x8000,
.vco_table = lucid_evo_vco,
@@ -468,6 +674,16 @@ static const struct clk_div_table post_div_table_cam_cc_pll8_out_even[] = {
{ }
};
+static struct clk_init_data sm8475_cam_cc_pll8_out_even_init = {
+ .name = "cam_cc_pll8_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &cam_cc_pll8.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static struct clk_alpha_pll_postdiv cam_cc_pll8_out_even = {
.offset = 0x8000,
.post_div_shift = 10,
@@ -2817,6 +3033,7 @@ static const struct qcom_cc_desc cam_cc_sm8450_desc = {
static const struct of_device_id cam_cc_sm8450_match_table[] = {
{ .compatible = "qcom,sm8450-camcc" },
+ { .compatible = "qcom,sm8475-camcc" },
{ }
};
MODULE_DEVICE_TABLE(of, cam_cc_sm8450_match_table);
@@ -2829,15 +3046,72 @@ static int cam_cc_sm8450_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- clk_lucid_evo_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
- clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config);
- clk_lucid_evo_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-camcc")) {
+ /* Update CAMCC PLL0 */
+ cam_cc_pll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll0_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll0_out_even.clkr.hw.init = &sm8475_cam_cc_pll0_out_even_init;
+ cam_cc_pll0_out_odd.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll0_out_odd.clkr.hw.init = &sm8475_cam_cc_pll0_out_odd_init;
+
+ /* Update CAMCC PLL1 */
+ cam_cc_pll1.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll1_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll1_out_even.clkr.hw.init = &sm8475_cam_cc_pll1_out_even_init;
+
+ /* Update CAMCC PLL2 */
+ cam_cc_pll2.vco_table = rivian_ole_vco;
+
+ /* Update CAMCC PLL3 */
+ cam_cc_pll3.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll3_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll3_out_even.clkr.hw.init = &sm8475_cam_cc_pll3_out_even_init;
+
+ /* Update CAMCC PLL4 */
+ cam_cc_pll4.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll4_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll4_out_even.clkr.hw.init = &sm8475_cam_cc_pll4_out_even_init;
+
+ /* Update CAMCC PLL5 */
+ cam_cc_pll5.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll5_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll5_out_even.clkr.hw.init = &sm8475_cam_cc_pll5_out_even_init;
+
+ /* Update CAMCC PLL6 */
+ cam_cc_pll6.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll6_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll6_out_even.clkr.hw.init = &sm8475_cam_cc_pll6_out_even_init;
+
+ /* Update CAMCC PLL7 */
+ cam_cc_pll7.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll7_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll7_out_even.clkr.hw.init = &sm8475_cam_cc_pll7_out_even_init;
+
+ /* Update CAMCC PLL8 */
+ cam_cc_pll8.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll8_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ cam_cc_pll8_out_even.clkr.hw.init = &sm8475_cam_cc_pll8_out_even_init;
+
+ clk_lucid_ole_pll_configure(&cam_cc_pll0, regmap, &sm8475_cam_cc_pll0_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll1, regmap, &sm8475_cam_cc_pll1_config);
+ clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &sm8475_cam_cc_pll2_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll3, regmap, &sm8475_cam_cc_pll3_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll4, regmap, &sm8475_cam_cc_pll4_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll5, regmap, &sm8475_cam_cc_pll5_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll6, regmap, &sm8475_cam_cc_pll6_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll7, regmap, &sm8475_cam_cc_pll7_config);
+ clk_lucid_ole_pll_configure(&cam_cc_pll8, regmap, &sm8475_cam_cc_pll8_config);
+ } else {
+ clk_lucid_evo_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+ clk_rivian_evo_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll7, regmap, &cam_cc_pll7_config);
+ clk_lucid_evo_pll_configure(&cam_cc_pll8, regmap, &cam_cc_pll8_config);
+ }
return qcom_cc_really_probe(&pdev->dev, &cam_cc_sm8450_desc, regmap);
}
@@ -2852,5 +3126,5 @@ static struct platform_driver cam_cc_sm8450_driver = {
module_platform_driver(cam_cc_sm8450_driver);
-MODULE_DESCRIPTION("QCOM CAMCC SM8450 Driver");
+MODULE_DESCRIPTION("QCOM CAMCC SM8450 / SM8475 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-x1e80100.c b/drivers/clk/qcom/camcc-x1e80100.c
index 85e76c7712ad..b73524ae64b1 100644
--- a/drivers/clk/qcom/camcc-x1e80100.c
+++ b/drivers/clk/qcom/camcc-x1e80100.c
@@ -2212,6 +2212,8 @@ static struct clk_branch cam_cc_sfe_0_fast_ahb_clk = {
},
};
+static struct gdsc cam_cc_titan_top_gdsc;
+
static struct gdsc cam_cc_bps_gdsc = {
.gdscr = 0x10004,
.en_rest_wait_val = 0x2,
@@ -2221,6 +2223,7 @@ static struct gdsc cam_cc_bps_gdsc = {
.name = "cam_cc_bps_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -2233,6 +2236,7 @@ static struct gdsc cam_cc_ife_0_gdsc = {
.name = "cam_cc_ife_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -2245,6 +2249,7 @@ static struct gdsc cam_cc_ife_1_gdsc = {
.name = "cam_cc_ife_1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -2257,6 +2262,7 @@ static struct gdsc cam_cc_ipe_0_gdsc = {
.name = "cam_cc_ipe_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -2269,6 +2275,7 @@ static struct gdsc cam_cc_sfe_0_gdsc = {
.name = "cam_cc_sfe_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index f9105443d7db..9a65d14acf71 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -40,7 +40,7 @@
#define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
# define PLL_POST_DIV_SHIFT 8
-# define PLL_POST_DIV_MASK(p) GENMASK((p)->width - 1, 0)
+# define PLL_POST_DIV_MASK(p) GENMASK((p)->width ? (p)->width - 1 : 3, 0)
# define PLL_ALPHA_MSB BIT(15)
# define PLL_ALPHA_EN BIT(24)
# define PLL_ALPHA_MODE BIT(25)
@@ -58,6 +58,7 @@
#define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U])
#define PLL_TEST_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U1])
#define PLL_TEST_CTL_U2(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U2])
+#define PLL_TEST_CTL_U3(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U3])
#define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS])
#define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE])
#define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC])
@@ -197,6 +198,37 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U1] = 0x34,
[PLL_OFF_TEST_CTL_U2] = 0x38,
},
+ [CLK_ALPHA_PLL_TYPE_PONGO_ELU] = {
+ [PLL_OFF_OPMODE] = 0x04,
+ [PLL_OFF_STATE] = 0x08,
+ [PLL_OFF_STATUS] = 0x0c,
+ [PLL_OFF_L_VAL] = 0x10,
+ [PLL_OFF_USER_CTL] = 0x14,
+ [PLL_OFF_USER_CTL_U] = 0x18,
+ [PLL_OFF_CONFIG_CTL] = 0x1c,
+ [PLL_OFF_CONFIG_CTL_U] = 0x20,
+ [PLL_OFF_CONFIG_CTL_U1] = 0x24,
+ [PLL_OFF_CONFIG_CTL_U2] = 0x28,
+ [PLL_OFF_TEST_CTL] = 0x2c,
+ [PLL_OFF_TEST_CTL_U] = 0x30,
+ [PLL_OFF_TEST_CTL_U1] = 0x34,
+ [PLL_OFF_TEST_CTL_U2] = 0x38,
+ [PLL_OFF_TEST_CTL_U3] = 0x3c,
+ },
+ [CLK_ALPHA_PLL_TYPE_TAYCAN_ELU] = {
+ [PLL_OFF_OPMODE] = 0x04,
+ [PLL_OFF_STATE] = 0x08,
+ [PLL_OFF_STATUS] = 0x0c,
+ [PLL_OFF_L_VAL] = 0x10,
+ [PLL_OFF_ALPHA_VAL] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_USER_CTL_U] = 0x1c,
+ [PLL_OFF_CONFIG_CTL] = 0x20,
+ [PLL_OFF_CONFIG_CTL_U] = 0x24,
+ [PLL_OFF_CONFIG_CTL_U1] = 0x28,
+ [PLL_OFF_TEST_CTL] = 0x2c,
+ [PLL_OFF_TEST_CTL_U] = 0x30,
+ },
[CLK_ALPHA_PLL_TYPE_RIVIAN_EVO] = {
[PLL_OFF_OPMODE] = 0x04,
[PLL_OFF_STATUS] = 0x0c,
@@ -267,6 +299,17 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_OPMODE] = 0x30,
[PLL_OFF_STATUS] = 0x3c,
},
+ [CLK_ALPHA_PLL_TYPE_NSS_HUAYRA] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_TEST_CTL] = 0x0c,
+ [PLL_OFF_TEST_CTL_U] = 0x10,
+ [PLL_OFF_USER_CTL] = 0x14,
+ [PLL_OFF_CONFIG_CTL] = 0x18,
+ [PLL_OFF_CONFIG_CTL_U] = 0x1c,
+ [PLL_OFF_STATUS] = 0x20,
+ },
+
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
@@ -312,6 +355,12 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
#define LUCID_EVO_PLL_CAL_L_VAL_SHIFT 16
#define LUCID_OLE_PLL_RINGOSC_CAL_L_VAL_SHIFT 24
+/* PONGO ELU PLL specific setting and offsets */
+#define PONGO_PLL_OUT_MASK GENMASK(1, 0)
+#define PONGO_PLL_L_VAL_MASK GENMASK(11, 0)
+#define PONGO_XO_PRESENT BIT(10)
+#define PONGO_CLOCK_SELECT BIT(12)
+
/* ZONDA PLL specific */
#define ZONDA_PLL_OUT_MASK 0xf
#define ZONDA_STAY_IN_CFA BIT(16)
@@ -341,7 +390,8 @@ static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
if (ret)
return ret;
- for (count = 200; count > 0; count--) {
+ /* Pongo PLLs using a 32KHz reference can take upwards of 1500us to lock. */
+ for (count = 1500; count > 0; count--) {
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return ret;
@@ -421,6 +471,8 @@ void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
mask |= config->pre_div_mask;
mask |= config->post_div_mask;
mask |= config->vco_mask;
+ mask |= config->alpha_en_mask;
+ mask |= config->alpha_mode_mask;
regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
@@ -1903,9 +1955,8 @@ static int alpha_pll_lucid_5lpe_enable(struct clk_hw *hw)
}
/* Check if PLL is already enabled, return if enabled */
- ret = trion_pll_is_enabled(pll, pll->clkr.regmap);
- if (ret < 0)
- return ret;
+ if (trion_pll_is_enabled(pll, pll->clkr.regmap))
+ return 0;
ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
if (ret)
@@ -2318,13 +2369,8 @@ static int alpha_pll_lucid_evo_enable(struct clk_hw *hw)
}
/* Check if PLL is already enabled */
- ret = trion_pll_is_enabled(pll, regmap);
- if (ret < 0) {
- return ret;
- } else if (ret) {
- pr_warn("%s PLL is already enabled\n", clk_hw_get_name(&pll->clkr.hw));
+ if (trion_pll_is_enabled(pll, regmap))
return 0;
- }
ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
if (ret)
@@ -2489,6 +2535,144 @@ const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops = {
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_reset_lucid_evo_ops);
+static int alpha_pll_pongo_elu_prepare(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ int ret;
+
+ /* Enable PLL intially to one-time calibrate against XO. */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_RUN);
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+ regmap_update_bits(regmap, PLL_MODE(pll), PONGO_XO_PRESENT, PONGO_XO_PRESENT);
+
+ /* Set regmap for wait_for_pll() */
+ pll->clkr.regmap = regmap;
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret) {
+ /* Reverse calibration - disable PLL output */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+ return ret;
+ }
+
+ /* Disable PLL after one-time calibration. */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+
+ /* Select internally generated clock. */
+ regmap_update_bits(regmap, PLL_MODE(pll), PONGO_CLOCK_SELECT,
+ PONGO_CLOCK_SELECT);
+
+ return 0;
+}
+
+static int alpha_pll_pongo_elu_enable(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ int ret;
+
+ /* Check if PLL is already enabled */
+ if (trion_pll_is_enabled(pll, regmap))
+ return 0;
+
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ /* Set operation mode to RUN */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_RUN);
+
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+
+ /* Enable the global PLL outputs */
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, PLL_OUTCTRL);
+ if (ret)
+ return ret;
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+
+ return ret;
+}
+
+static void alpha_pll_pongo_elu_disable(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ int ret;
+
+ /* Disable the global PLL output */
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+ if (ret)
+ return;
+
+ /* Place the PLL mode in STANDBY */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+}
+
+static unsigned long alpha_pll_pongo_elu_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ u32 l;
+
+ if (regmap_read(regmap, PLL_L_VAL(pll), &l))
+ return 0;
+
+ l &= PONGO_PLL_L_VAL_MASK;
+
+ return alpha_pll_calc_rate(parent_rate, l, 0, pll_alpha_width(pll));
+}
+
+const struct clk_ops clk_alpha_pll_pongo_elu_ops = {
+ .prepare = alpha_pll_pongo_elu_prepare,
+ .enable = alpha_pll_pongo_elu_enable,
+ .disable = alpha_pll_pongo_elu_disable,
+ .recalc_rate = alpha_pll_pongo_elu_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_pongo_elu_ops);
+
+void clk_pongo_elu_pll_configure(struct clk_alpha_pll *pll,
+ struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ u32 val;
+
+ regmap_update_bits(regmap, PLL_USER_CTL(pll), PONGO_PLL_OUT_MASK,
+ PONGO_PLL_OUT_MASK);
+
+ if (trion_pll_is_enabled(pll, regmap))
+ return;
+
+ if (regmap_read(regmap, PLL_L_VAL(pll), &val))
+ return;
+ val &= PONGO_PLL_L_VAL_MASK;
+ if (val)
+ return;
+
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
+ clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll), config->config_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll), config->config_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U2(pll), config->config_ctl_hi2_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll),
+ config->user_ctl_val | PONGO_PLL_OUT_MASK);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll), config->user_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll), config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll), config->test_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll), config->test_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U2(pll), config->test_ctl_hi2_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U3(pll), config->test_ctl_hi3_val);
+
+ /* Disable PLL output */
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+}
+EXPORT_SYMBOL_GPL(clk_pongo_elu_pll_configure);
+
void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
{
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 55eca04b23a1..79aca8525262 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -27,11 +27,14 @@ enum {
CLK_ALPHA_PLL_TYPE_ZONDA_OLE,
CLK_ALPHA_PLL_TYPE_LUCID_EVO,
CLK_ALPHA_PLL_TYPE_LUCID_OLE,
+ CLK_ALPHA_PLL_TYPE_PONGO_ELU,
+ CLK_ALPHA_PLL_TYPE_TAYCAN_ELU,
CLK_ALPHA_PLL_TYPE_RIVIAN_EVO,
CLK_ALPHA_PLL_TYPE_DEFAULT_EVO,
CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
CLK_ALPHA_PLL_TYPE_STROMER,
CLK_ALPHA_PLL_TYPE_STROMER_PLUS,
+ CLK_ALPHA_PLL_TYPE_NSS_HUAYRA,
CLK_ALPHA_PLL_TYPE_MAX,
};
@@ -51,6 +54,7 @@ enum {
PLL_OFF_TEST_CTL_U,
PLL_OFF_TEST_CTL_U1,
PLL_OFF_TEST_CTL_U2,
+ PLL_OFF_TEST_CTL_U3,
PLL_OFF_STATE,
PLL_OFF_STATUS,
PLL_OFF_OPMODE,
@@ -136,6 +140,7 @@ struct alpha_pll_config {
u32 test_ctl_hi_mask;
u32 test_ctl_hi1_val;
u32 test_ctl_hi2_val;
+ u32 test_ctl_hi3_val;
u32 main_output_mask;
u32 aux_output_mask;
u32 aux2_output_mask;
@@ -184,13 +189,17 @@ extern const struct clk_ops clk_alpha_pll_zonda_ops;
#define clk_alpha_pll_zonda_ole_ops clk_alpha_pll_zonda_ops
extern const struct clk_ops clk_alpha_pll_lucid_evo_ops;
+#define clk_alpha_pll_taycan_elu_ops clk_alpha_pll_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops;
#define clk_alpha_pll_reset_lucid_ole_ops clk_alpha_pll_reset_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops;
#define clk_alpha_pll_fixed_lucid_ole_ops clk_alpha_pll_fixed_lucid_evo_ops
+#define clk_alpha_pll_fixed_taycan_elu_ops clk_alpha_pll_fixed_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops;
#define clk_alpha_pll_postdiv_lucid_ole_ops clk_alpha_pll_postdiv_lucid_evo_ops
+#define clk_alpha_pll_postdiv_taycan_elu_ops clk_alpha_pll_postdiv_lucid_evo_ops
+extern const struct clk_ops clk_alpha_pll_pongo_elu_ops;
extern const struct clk_ops clk_alpha_pll_rivian_evo_ops;
#define clk_alpha_pll_postdiv_rivian_evo_ops clk_alpha_pll_postdiv_fabia_ops
@@ -217,6 +226,11 @@ void clk_lucid_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regma
const struct alpha_pll_config *config);
void clk_lucid_ole_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
+void clk_pongo_elu_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
+#define clk_taycan_elu_pll_configure(pll, regmap, config) \
+ clk_lucid_evo_pll_configure(pll, regmap, config)
+
void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_stromer_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index 88845baa7f84..987141c91fe0 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -597,6 +597,7 @@ struct frac_entry {
};
static const struct frac_entry pixel_table[] = {
+ { 1, 1 },
{ 1, 2 },
{ 1, 3 },
{ 3, 16 },
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 8e0f3372dc7a..4fbdf4880d03 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -189,6 +189,7 @@ struct clk_rcg2_gfx3d {
container_of(to_clk_rcg2(_hw), struct clk_rcg2_gfx3d, rcg)
extern const struct clk_ops clk_rcg2_ops;
+extern const struct clk_ops clk_rcg2_gp_ops;
extern const struct clk_ops clk_rcg2_floor_ops;
extern const struct clk_ops clk_rcg2_fm_ops;
extern const struct clk_ops clk_rcg2_mux_closest_ops;
@@ -198,6 +199,7 @@ extern const struct clk_ops clk_byte2_ops;
extern const struct clk_ops clk_pixel_ops;
extern const struct clk_ops clk_gfx3d_ops;
extern const struct clk_ops clk_rcg2_shared_ops;
+extern const struct clk_ops clk_rcg2_shared_floor_ops;
extern const struct clk_ops clk_rcg2_shared_no_init_park_ops;
extern const struct clk_ops clk_dp_ops;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index bf26c5448f00..8001fd9faf9d 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -8,11 +8,13 @@
#include <linux/err.h>
#include <linux/bug.h>
#include <linux/export.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/rational.h>
#include <linux/regmap.h>
#include <linux/math64.h>
+#include <linux/gcd.h>
#include <linux/minmax.h>
#include <linux/slab.h>
@@ -32,6 +34,7 @@
#define CFG_REG 0x4
#define CFG_SRC_DIV_SHIFT 0
+#define CFG_SRC_DIV_LENGTH 8
#define CFG_SRC_SEL_SHIFT 8
#define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
#define CFG_MODE_SHIFT 12
@@ -148,12 +151,32 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
return update_config(rcg);
}
-/*
- * Calculate m/n:d rate
+/**
+ * convert_to_reg_val() - Convert divisor values to hardware values.
+ *
+ * @f: Frequency table with pure m/n/pre_div parameters.
+ */
+static void convert_to_reg_val(struct freq_tbl *f)
+{
+ f->pre_div *= 2;
+ f->pre_div -= 1;
+}
+
+/**
+ * calc_rate() - Calculate rate based on m/n:d values
+ *
+ * @rate: Parent rate.
+ * @m: Multiplier.
+ * @n: Divisor.
+ * @mode: Use zero to ignore m/n calculation.
+ * @hid_div: Pre divisor register value. Pre divisor value
+ * relates to hid_div as pre_div = (hid_div + 1) / 2.
+ *
+ * Return calculated rate according to formula:
*
* parent_rate m
* rate = ----------- x ---
- * hid_div n
+ * pre_div n
*/
static unsigned long
calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
@@ -393,16 +416,110 @@ static int clk_rcg2_fm_determine_rate(struct clk_hw *hw,
return _freq_tbl_fm_determine_rate(hw, rcg->freq_multi_tbl, req);
}
-static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
- u32 *_cfg)
+/**
+ * clk_rcg2_split_div() - Split multiplier that doesn't fit in n neither in pre_div.
+ *
+ * @multiplier: Multiplier to split between n and pre_div.
+ * @pre_div: Pointer to pre divisor value.
+ * @n: Pointer to n divisor value.
+ * @pre_div_max: Pre divisor maximum value.
+ */
+static inline void clk_rcg2_split_div(int multiplier, unsigned int *pre_div,
+ u16 *n, unsigned int pre_div_max)
+{
+ *n = mult_frac(multiplier * *n, *pre_div, pre_div_max);
+ *pre_div = pre_div_max;
+}
+
+static void clk_rcg2_calc_mnd(u64 parent_rate, u64 rate, struct freq_tbl *f,
+ unsigned int mnd_max, unsigned int pre_div_max)
+{
+ int i = 2;
+ unsigned int pre_div = 1;
+ unsigned long rates_gcd, scaled_parent_rate;
+ u16 m, n = 1, n_candidate = 1, n_max;
+
+ rates_gcd = gcd(parent_rate, rate);
+ m = div64_u64(rate, rates_gcd);
+ scaled_parent_rate = div64_u64(parent_rate, rates_gcd);
+ while (scaled_parent_rate > (mnd_max + m) * pre_div_max) {
+ // we're exceeding divisor's range, trying lower scale.
+ if (m > 1) {
+ m--;
+ scaled_parent_rate = mult_frac(scaled_parent_rate, m, (m + 1));
+ } else {
+ // cannot lower scale, just set max divisor values.
+ f->n = mnd_max + m;
+ f->pre_div = pre_div_max;
+ f->m = m;
+ return;
+ }
+ }
+
+ n_max = m + mnd_max;
+
+ while (scaled_parent_rate > 1) {
+ while (scaled_parent_rate % i == 0) {
+ n_candidate *= i;
+ if (n_candidate < n_max)
+ n = n_candidate;
+ else if (pre_div * i < pre_div_max)
+ pre_div *= i;
+ else
+ clk_rcg2_split_div(i, &pre_div, &n, pre_div_max);
+
+ scaled_parent_rate /= i;
+ }
+ i++;
+ }
+
+ f->m = m;
+ f->n = n;
+ f->pre_div = pre_div > 1 ? pre_div : 0;
+}
+
+static int clk_rcg2_determine_gp_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f_tbl = {}, *f = &f_tbl;
+ int mnd_max = BIT(rcg->mnd_width) - 1;
+ int hid_max = BIT(rcg->hid_width) - 1;
+ struct clk_hw *parent;
+ u64 parent_rate;
+
+ parent = clk_hw_get_parent(hw);
+ parent_rate = clk_get_rate(parent->clk);
+ if (!parent_rate)
+ return -EINVAL;
+
+ clk_rcg2_calc_mnd(parent_rate, req->rate, f, mnd_max, hid_max / 2);
+ convert_to_reg_val(f);
+ req->rate = calc_rate(parent_rate, f->m, f->n, f->n, f->pre_div);
+
+ return 0;
+}
+
+static int __clk_rcg2_configure_parent(struct clk_rcg2 *rcg, u8 src, u32 *_cfg)
{
- u32 cfg, mask, d_val, not2d_val, n_minus_m;
struct clk_hw *hw = &rcg->clkr.hw;
- int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ int index = qcom_find_src_index(hw, rcg->parent_map, src);
if (index < 0)
return index;
+ *_cfg &= ~CFG_SRC_SEL_MASK;
+ *_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+
+ return 0;
+}
+
+static int __clk_rcg2_configure_mnd(struct clk_rcg2 *rcg, const struct freq_tbl *f,
+ u32 *_cfg)
+{
+ u32 cfg, mask, d_val, not2d_val, n_minus_m;
+ int ret;
+
if (rcg->mnd_width && f->n) {
mask = BIT(rcg->mnd_width) - 1;
ret = regmap_update_bits(rcg->clkr.regmap,
@@ -431,9 +548,8 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
}
mask = BIT(rcg->hid_width) - 1;
- mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
+ mask |= CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
- cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
if (rcg->mnd_width && f->n && (f->m != f->n))
cfg |= CFG_MODE_DUAL_EDGE;
if (rcg->hw_clk_ctrl)
@@ -445,6 +561,22 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
return 0;
}
+static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
+ u32 *_cfg)
+{
+ int ret;
+
+ ret = __clk_rcg2_configure_parent(rcg, f->src, _cfg);
+ if (ret)
+ return ret;
+
+ ret = __clk_rcg2_configure_mnd(rcg, f, _cfg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
{
u32 cfg;
@@ -465,6 +597,26 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
return update_config(rcg);
}
+static int clk_rcg2_configure_gp(struct clk_rcg2 *rcg, const struct freq_tbl *f)
+{
+ u32 cfg;
+ int ret;
+
+ ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
+ if (ret)
+ return ret;
+
+ ret = __clk_rcg2_configure_mnd(rcg, f, &cfg);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
enum freq_policy policy)
{
@@ -518,6 +670,22 @@ static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
return __clk_rcg2_set_rate(hw, rate, CEIL);
}
+static int clk_rcg2_set_gp_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ int mnd_max = BIT(rcg->mnd_width) - 1;
+ int hid_max = BIT(rcg->hid_width) - 1;
+ struct freq_tbl f_tbl = {}, *f = &f_tbl;
+ int ret;
+
+ clk_rcg2_calc_mnd(parent_rate, rate, f, mnd_max, hid_max / 2);
+ convert_to_reg_val(f);
+ ret = clk_rcg2_configure_gp(rcg, f);
+
+ return ret;
+}
+
static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -645,6 +813,18 @@ const struct clk_ops clk_rcg2_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_ops);
+const struct clk_ops clk_rcg2_gp_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_gp_rate,
+ .set_rate = clk_rcg2_set_gp_rate,
+ .get_duty_cycle = clk_rcg2_get_duty_cycle,
+ .set_duty_cycle = clk_rcg2_set_duty_cycle,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_gp_ops);
+
const struct clk_ops clk_rcg2_floor_ops = {
.is_enabled = clk_rcg2_is_enabled,
.get_parent = clk_rcg2_get_parent,
@@ -1186,15 +1366,23 @@ clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
return clk_rcg2_clear_force_enable(hw);
}
-static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+static int __clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate,
+ enum freq_policy policy)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
const struct freq_tbl *f;
- f = qcom_find_freq(rcg->freq_tbl, rate);
- if (!f)
+ switch (policy) {
+ case FLOOR:
+ f = qcom_find_freq_floor(rcg->freq_tbl, rate);
+ break;
+ case CEIL:
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ break;
+ default:
return -EINVAL;
+ }
/*
* In case clock is disabled, update the M, N and D registers, cache
@@ -1207,10 +1395,28 @@ static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
return clk_rcg2_shared_force_enable_clear(hw, f);
}
+static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL);
+}
+
static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
unsigned long rate, unsigned long parent_rate, u8 index)
{
- return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
+ return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL);
+}
+
+static int clk_rcg2_shared_set_floor_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR);
+}
+
+static int clk_rcg2_shared_set_floor_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR);
}
static int clk_rcg2_shared_enable(struct clk_hw *hw)
@@ -1348,6 +1554,18 @@ const struct clk_ops clk_rcg2_shared_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
+const struct clk_ops clk_rcg2_shared_floor_ops = {
+ .enable = clk_rcg2_shared_enable,
+ .disable = clk_rcg2_shared_disable,
+ .get_parent = clk_rcg2_shared_get_parent,
+ .set_parent = clk_rcg2_shared_set_parent,
+ .recalc_rate = clk_rcg2_shared_recalc_rate,
+ .determine_rate = clk_rcg2_determine_floor_rate,
+ .set_rate = clk_rcg2_shared_set_floor_rate,
+ .set_rate_and_parent = clk_rcg2_shared_set_floor_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_shared_floor_ops);
+
static int clk_rcg2_shared_no_init_park(struct clk_hw *hw)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index 9da034f8f2ff..ccc112c21667 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -4,6 +4,7 @@
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -224,10 +225,10 @@ static void clk_rpm_unprepare(struct clk_hw *hw)
unsigned long active_rate, sleep_rate;
int ret;
- mutex_lock(&rpm_clk_lock);
+ guard(mutex)(&rpm_clk_lock);
if (!r->rate)
- goto out;
+ return;
/* Take peer clock's rate into account only if it's enabled. */
if (peer->enabled)
@@ -237,17 +238,14 @@ static void clk_rpm_unprepare(struct clk_hw *hw)
active_rate = r->branch ? !!peer_rate : peer_rate;
ret = clk_rpm_set_rate_active(r, active_rate);
if (ret)
- goto out;
+ return;
sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
ret = clk_rpm_set_rate_sleep(r, sleep_rate);
if (ret)
- goto out;
+ return;
r->enabled = false;
-
-out:
- mutex_unlock(&rpm_clk_lock);
}
static int clk_rpm_xo_prepare(struct clk_hw *hw)
@@ -324,12 +322,12 @@ static int clk_rpm_set_rate(struct clk_hw *hw,
unsigned long active_rate, sleep_rate;
unsigned long this_rate = 0, this_sleep_rate = 0;
unsigned long peer_rate = 0, peer_sleep_rate = 0;
- int ret = 0;
+ int ret;
- mutex_lock(&rpm_clk_lock);
+ guard(mutex)(&rpm_clk_lock);
if (!r->enabled)
- goto out;
+ return 0;
to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
@@ -341,19 +339,16 @@ static int clk_rpm_set_rate(struct clk_hw *hw,
active_rate = max(this_rate, peer_rate);
ret = clk_rpm_set_rate_active(r, active_rate);
if (ret)
- goto out;
+ return ret;
sleep_rate = max(this_sleep_rate, peer_sleep_rate);
ret = clk_rpm_set_rate_sleep(r, sleep_rate);
if (ret)
- goto out;
+ return ret;
r->rate = rate;
-out:
- mutex_unlock(&rpm_clk_lock);
-
- return ret;
+ return 0;
}
static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 4acde937114a..c7675930fde1 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <soc/qcom/cmd-db.h>
#include <soc/qcom/rpmh.h>
#include <soc/qcom/tcs.h>
@@ -206,7 +207,7 @@ static int clk_rpmh_aggregate_state_send_command(struct clk_rpmh *c,
c->state = c->valid_state_mask;
WARN(1, "clk: %s failed to %s\n", c->res_name,
- enable ? "enable" : "disable");
+ str_enable_disable(enable));
return ret;
}
@@ -329,7 +330,7 @@ static unsigned long clk_rpmh_bcm_recalc_rate(struct clk_hw *hw,
{
struct clk_rpmh *c = to_clk_rpmh(hw);
- return c->aggr_state * c->unit;
+ return (unsigned long)c->aggr_state * c->unit;
}
static const struct clk_ops clk_rpmh_bcm_ops = {
@@ -368,6 +369,8 @@ DEFINE_CLK_RPMH_VRM(rf_clk2, _d, "rfclkd2", 1);
DEFINE_CLK_RPMH_VRM(rf_clk3, _d, "rfclkd3", 1);
DEFINE_CLK_RPMH_VRM(rf_clk4, _d, "rfclkd4", 1);
+DEFINE_CLK_RPMH_VRM(rf_clk3, _a2, "rfclka3", 2);
+
DEFINE_CLK_RPMH_VRM(clk1, _a1, "clka1", 1);
DEFINE_CLK_RPMH_VRM(clk2, _a1, "clka2", 1);
DEFINE_CLK_RPMH_VRM(clk3, _a1, "clka3", 1);
@@ -389,6 +392,18 @@ DEFINE_CLK_RPMH_BCM(ipa, "IP0");
DEFINE_CLK_RPMH_BCM(pka, "PKA0");
DEFINE_CLK_RPMH_BCM(qpic_clk, "QP0");
+static struct clk_hw *sar2130p_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div1.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div1_ao.hw,
+ [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw,
+ [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sar2130p = {
+ .clks = sar2130p_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sar2130p_rpmh_clocks),
+};
+
static struct clk_hw *sdm845_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
[RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
@@ -795,6 +810,45 @@ static const struct clk_rpmh_desc clk_rpmh_x1e80100 = {
.num_clks = ARRAY_SIZE(x1e80100_rpmh_clocks),
};
+static struct clk_hw *qcs615_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
+ [RPMH_LN_BB_CLK2] = &clk_rpmh_ln_bb_clk2_a2.hw,
+ [RPMH_LN_BB_CLK2_A] = &clk_rpmh_ln_bb_clk2_a2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &clk_rpmh_ln_bb_clk3_a2.hw,
+ [RPMH_LN_BB_CLK3_A] = &clk_rpmh_ln_bb_clk3_a2_ao.hw,
+ [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw,
+ [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw,
+ [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw,
+ [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_qcs615 = {
+ .clks = qcs615_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(qcs615_rpmh_clocks),
+};
+
+static struct clk_hw *sm8750_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
+ [RPMH_LN_BB_CLK1] = &clk_rpmh_clk6_a2.hw,
+ [RPMH_LN_BB_CLK1_A] = &clk_rpmh_clk6_a2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &clk_rpmh_clk8_a2.hw,
+ [RPMH_LN_BB_CLK3_A] = &clk_rpmh_clk8_a2_ao.hw,
+ [RPMH_RF_CLK1] = &clk_rpmh_rf_clk1_a.hw,
+ [RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw,
+ [RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw,
+ [RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw,
+ [RPMH_RF_CLK3] = &clk_rpmh_rf_clk3_a2.hw,
+ [RPMH_RF_CLK3_A] = &clk_rpmh_rf_clk3_a2_ao.hw,
+ [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sm8750 = {
+ .clks = sm8750_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sm8750_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -878,9 +932,12 @@ static int clk_rpmh_probe(struct platform_device *pdev)
}
static const struct of_device_id clk_rpmh_match_table[] = {
+ { .compatible = "qcom,qcs615-rpmh-clk", .data = &clk_rpmh_qcs615},
{ .compatible = "qcom,qdu1000-rpmh-clk", .data = &clk_rpmh_qdu1000},
{ .compatible = "qcom,sa8775p-rpmh-clk", .data = &clk_rpmh_sa8775p},
+ { .compatible = "qcom,sar2130p-rpmh-clk", .data = &clk_rpmh_sar2130p},
{ .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
+ { .compatible = "qcom,sc7280-rpmh-clk", .data = &clk_rpmh_sc7280},
{ .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x},
{ .compatible = "qcom,sc8280xp-rpmh-clk", .data = &clk_rpmh_sc8280xp},
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
@@ -896,7 +953,7 @@ static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sm8450-rpmh-clk", .data = &clk_rpmh_sm8450},
{ .compatible = "qcom,sm8550-rpmh-clk", .data = &clk_rpmh_sm8550},
{ .compatible = "qcom,sm8650-rpmh-clk", .data = &clk_rpmh_sm8650},
- { .compatible = "qcom,sc7280-rpmh-clk", .data = &clk_rpmh_sc7280},
+ { .compatible = "qcom,sm8750-rpmh-clk", .data = &clk_rpmh_sm8750},
{ .compatible = "qcom,x1e80100-rpmh-clk", .data = &clk_rpmh_x1e80100},
{ }
};
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 45c5255bcd11..29ef08a9d50b 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -4,6 +4,7 @@
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -309,10 +310,10 @@ static void clk_smd_rpm_unprepare(struct clk_hw *hw)
unsigned long active_rate, sleep_rate;
int ret;
- mutex_lock(&rpm_smd_clk_lock);
+ guard(mutex)(&rpm_smd_clk_lock);
if (!r->rate)
- goto out;
+ return;
/* Take peer clock's rate into account only if it's enabled. */
if (peer->enabled)
@@ -322,17 +323,14 @@ static void clk_smd_rpm_unprepare(struct clk_hw *hw)
active_rate = r->branch ? !!peer_rate : peer_rate;
ret = clk_smd_rpm_set_rate_active(r, active_rate);
if (ret)
- goto out;
+ return;
sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
if (ret)
- goto out;
+ return;
r->enabled = false;
-
-out:
- mutex_unlock(&rpm_smd_clk_lock);
}
static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -345,10 +343,10 @@ static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long peer_rate = 0, peer_sleep_rate = 0;
int ret = 0;
- mutex_lock(&rpm_smd_clk_lock);
+ guard(mutex)(&rpm_smd_clk_lock);
if (!r->enabled)
- goto out;
+ return 0;
to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
@@ -360,19 +358,16 @@ static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
active_rate = max(this_rate, peer_rate);
ret = clk_smd_rpm_set_rate_active(r, active_rate);
if (ret)
- goto out;
+ return ret;
sleep_rate = max(this_sleep_rate, peer_sleep_rate);
ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
if (ret)
- goto out;
+ return ret;
r->rate = rate;
-out:
- mutex_unlock(&rpm_smd_clk_lock);
-
- return ret;
+ return 0;
}
static long clk_smd_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -700,6 +695,60 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8936 = {
.num_icc_clks = ARRAY_SIZE(bimc_pcnoc_snoc_smmnoc_icc_clks),
};
+static struct clk_smd_rpm *msm8937_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_DIV_CLK2] = &clk_smd_rpm_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &clk_smd_rpm_div_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8937 = {
+ .clks = msm8937_clks,
+ .num_clks = ARRAY_SIZE(msm8937_clks),
+ .icc_clks = bimc_pcnoc_snoc_smmnoc_icc_clks,
+ .num_icc_clks = ARRAY_SIZE(bimc_pcnoc_snoc_smmnoc_icc_clks),
+};
+
+static struct clk_smd_rpm *msm8940_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
+ [RPM_SMD_IPA_CLK] = &clk_smd_rpm_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &clk_smd_rpm_ipa_a_clk,
+ [RPM_SMD_QDSS_CLK] = &clk_smd_rpm_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &clk_smd_rpm_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &clk_smd_rpm_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &clk_smd_rpm_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &clk_smd_rpm_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &clk_smd_rpm_bb_clk2_a,
+ [RPM_SMD_RF_CLK2] = &clk_smd_rpm_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &clk_smd_rpm_rf_clk2_a,
+ [RPM_SMD_DIV_CLK2] = &clk_smd_rpm_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &clk_smd_rpm_div_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &clk_smd_rpm_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &clk_smd_rpm_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &clk_smd_rpm_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &clk_smd_rpm_bb_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8940 = {
+ .clks = msm8940_clks,
+ .num_clks = ARRAY_SIZE(msm8940_clks),
+ .icc_clks = bimc_pcnoc_snoc_smmnoc_icc_clks,
+ .num_icc_clks = ARRAY_SIZE(bimc_pcnoc_snoc_smmnoc_icc_clks),
+};
+
static struct clk_smd_rpm *msm8974_clks[] = {
[RPM_SMD_XO_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo,
[RPM_SMD_XO_A_CLK_SRC] = &clk_smd_rpm_branch_bi_tcxo_a,
@@ -1216,6 +1265,8 @@ static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
{ .compatible = "qcom,rpmcc-msm8917", .data = &rpm_clk_msm8917 },
{ .compatible = "qcom,rpmcc-msm8936", .data = &rpm_clk_msm8936 },
+ { .compatible = "qcom,rpmcc-msm8937", .data = &rpm_clk_msm8937 },
+ { .compatible = "qcom,rpmcc-msm8940", .data = &rpm_clk_msm8940 },
{ .compatible = "qcom,rpmcc-msm8953", .data = &rpm_clk_msm8953 },
{ .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 },
{ .compatible = "qcom,rpmcc-msm8976", .data = &rpm_clk_msm8976 },
diff --git a/drivers/clk/qcom/clk-spmi-pmic-div.c b/drivers/clk/qcom/clk-spmi-pmic-div.c
index f394031eb0e5..41a0a4f3b4fb 100644
--- a/drivers/clk/qcom/clk-spmi-pmic-div.c
+++ b/drivers/clk/qcom/clk-spmi-pmic-div.c
@@ -3,6 +3,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
@@ -140,30 +141,26 @@ static int clk_spmi_pmic_div_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clkdiv *clkdiv = to_clkdiv(hw);
unsigned int div_factor = div_to_div_factor(parent_rate / rate);
- unsigned long flags;
bool enabled;
int ret;
- spin_lock_irqsave(&clkdiv->lock, flags);
+ guard(spinlock_irqsave)(&clkdiv->lock);
+
enabled = is_spmi_pmic_clkdiv_enabled(clkdiv);
if (enabled) {
ret = spmi_pmic_clkdiv_set_enable_state(clkdiv, false);
if (ret)
- goto unlock;
+ return ret;
}
ret = regmap_update_bits(clkdiv->regmap, clkdiv->base + REG_DIV_CTL1,
DIV_CTL1_DIV_FACTOR_MASK, div_factor);
if (ret)
- goto unlock;
+ return ret;
if (enabled)
ret = __spmi_pmic_clkdiv_set_enable_state(clkdiv, true,
div_factor);
-
-unlock:
- spin_unlock_irqrestore(&clkdiv->lock, flags);
-
return ret;
}
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 7e57f8fe8ea6..7ace5d7f5836 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -35,7 +35,7 @@ struct qcom_cc_desc {
size_t num_gdscs;
struct clk_hw **clk_hws;
size_t num_clk_hws;
- struct qcom_icc_hws_data *icc_hws;
+ const struct qcom_icc_hws_data *icc_hws;
size_t num_icc_hws;
unsigned int icc_first_node_id;
};
diff --git a/drivers/clk/qcom/dispcc-qcm2290.c b/drivers/clk/qcom/dispcc-qcm2290.c
index 449ffea2295d..d7bb1399e102 100644
--- a/drivers/clk/qcom/dispcc-qcm2290.c
+++ b/drivers/clk/qcom/dispcc-qcm2290.c
@@ -40,8 +40,6 @@ static const struct pll_vco spark_vco[] = {
/* 768MHz configuration */
static const struct alpha_pll_config disp_cc_pll0_config = {
.l = 0x28,
- .alpha = 0x0,
- .alpha_en_mask = BIT(24),
.vco_val = 0x2 << 20,
.vco_mask = GENMASK(21, 20),
.main_output_mask = BIT(0),
diff --git a/drivers/clk/qcom/dispcc-sm6115.c b/drivers/clk/qcom/dispcc-sm6115.c
index 939887f82ecc..2b236d52b29f 100644
--- a/drivers/clk/qcom/dispcc-sm6115.c
+++ b/drivers/clk/qcom/dispcc-sm6115.c
@@ -48,8 +48,6 @@ static const struct pll_vco spark_vco[] = {
/* 768MHz configuration */
static const struct alpha_pll_config disp_cc_pll0_config = {
.l = 0x28,
- .alpha = 0x0,
- .alpha_en_mask = BIT(24),
.vco_val = 0x2 << 20,
.vco_mask = GENMASK(21, 20),
.main_output_mask = BIT(0),
diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
index 50facb36701a..2bc6b5f99f57 100644
--- a/drivers/clk/qcom/dispcc-sm6350.c
+++ b/drivers/clk/qcom/dispcc-sm6350.c
@@ -187,13 +187,12 @@ static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
.cmd_rcgr = 0x1144,
.mnd_width = 0,
.hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
.freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "disp_cc_mdss_dp_aux_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .fw_name = "bi_tcxo",
- },
- .num_parents = 1,
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
.ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/qcom/dispcc-sm8450.c b/drivers/clk/qcom/dispcc-sm8450.c
index d1d3f60789ee..a1f183e6c636 100644
--- a/drivers/clk/qcom/dispcc-sm8450.c
+++ b/drivers/clk/qcom/dispcc-sm8450.c
@@ -85,6 +85,29 @@ static const struct alpha_pll_config disp_cc_pll0_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_disp_cc_pll0_config = {
+ .l = 0xd,
+ .alpha = 0x6492,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_init_data sm8475_disp_cc_pll0_init = {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_reset_lucid_ole_ops,
+};
+
static struct clk_alpha_pll disp_cc_pll0 = {
.offset = 0x0,
.vco_table = lucid_evo_vco,
@@ -112,6 +135,29 @@ static const struct alpha_pll_config disp_cc_pll1_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_disp_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_init_data sm8475_disp_cc_pll1_init = {
+ .name = "disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_reset_lucid_ole_ops,
+};
+
static struct clk_alpha_pll disp_cc_pll1 = {
.offset = 0x1000,
.vco_table = lucid_evo_vco,
@@ -1746,6 +1792,7 @@ static struct qcom_cc_desc disp_cc_sm8450_desc = {
static const struct of_device_id disp_cc_sm8450_match_table[] = {
{ .compatible = "qcom,sm8450-dispcc" },
+ { .compatible = "qcom,sm8475-dispcc" },
{ }
};
MODULE_DEVICE_TABLE(of, disp_cc_sm8450_match_table);
@@ -1769,8 +1816,21 @@ static int disp_cc_sm8450_probe(struct platform_device *pdev)
goto err_put_rpm;
}
- clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
- clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-dispcc")) {
+ /* Update DISPCC PLL0 */
+ disp_cc_pll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ disp_cc_pll0.clkr.hw.init = &sm8475_disp_cc_pll0_init;
+
+ /* Update DISPCC PLL1 */
+ disp_cc_pll1.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ disp_cc_pll1.clkr.hw.init = &sm8475_disp_cc_pll1_init;
+
+ clk_lucid_ole_pll_configure(&disp_cc_pll0, regmap, &sm8475_disp_cc_pll0_config);
+ clk_lucid_ole_pll_configure(&disp_cc_pll1, regmap, &sm8475_disp_cc_pll1_config);
+ } else {
+ clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+ }
/* Enable clock gating for MDP clocks */
regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
@@ -1802,5 +1862,5 @@ static struct platform_driver disp_cc_sm8450_driver = {
module_platform_driver(disp_cc_sm8450_driver);
-MODULE_DESCRIPTION("QTI DISPCC SM8450 Driver");
+MODULE_DESCRIPTION("QTI DISPCC SM8450 / SM8475 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c
index 7f9021ca0ecb..e41d4104d770 100644
--- a/drivers/clk/qcom/dispcc-sm8550.c
+++ b/drivers/clk/qcom/dispcc-sm8550.c
@@ -75,7 +75,7 @@ static struct pll_vco lucid_ole_vco[] = {
{ 249600000, 2000000000, 0 },
};
-static const struct alpha_pll_config disp_cc_pll0_config = {
+static struct alpha_pll_config disp_cc_pll0_config = {
.l = 0xd,
.alpha = 0x6492,
.config_ctl_val = 0x20485699,
@@ -106,7 +106,7 @@ static struct clk_alpha_pll disp_cc_pll0 = {
},
};
-static const struct alpha_pll_config disp_cc_pll1_config = {
+static struct alpha_pll_config disp_cc_pll1_config = {
.l = 0x1f,
.alpha = 0x4000,
.config_ctl_val = 0x20485699,
@@ -594,6 +594,13 @@ static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sar2130p[] = {
+ F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sm8650[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
@@ -1750,6 +1757,7 @@ static struct qcom_cc_desc disp_cc_sm8550_desc = {
};
static const struct of_device_id disp_cc_sm8550_match_table[] = {
+ { .compatible = "qcom,sar2130p-dispcc" },
{ .compatible = "qcom,sm8550-dispcc" },
{ .compatible = "qcom,sm8650-dispcc" },
{ }
@@ -1780,6 +1788,12 @@ static int disp_cc_sm8550_probe(struct platform_device *pdev)
disp_cc_mdss_mdp_clk_src.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src_sm8650;
disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr.hw.init->parent_hws[0] =
&disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw;
+ } else if (of_device_is_compatible(pdev->dev.of_node, "qcom,sar2130p-dispcc")) {
+ disp_cc_pll0_config.l = 0x1f;
+ disp_cc_pll0_config.alpha = 0x4000;
+ disp_cc_pll0_config.user_ctl_val = 0x1;
+ disp_cc_pll1_config.user_ctl_val = 0x1;
+ disp_cc_mdss_mdp_clk_src.freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src_sar2130p;
}
clk_lucid_ole_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
diff --git a/drivers/clk/qcom/dispcc-sm8750.c b/drivers/clk/qcom/dispcc-sm8750.c
new file mode 100644
index 000000000000..0358dff91da5
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sm8750.c
@@ -0,0 +1,1963 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023-2024, Linaro Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+
+#include <dt-bindings/clock/qcom,sm8750-dispcc.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "reset.h"
+#include "gdsc.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_AHB_CLK,
+ DT_SLEEP_CLK,
+
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_BYTECLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+ DT_DP1_PHY_PLL_LINK_CLK,
+ DT_DP1_PHY_PLL_VCO_DIV_CLK,
+ DT_DP2_PHY_PLL_LINK_CLK,
+ DT_DP2_PHY_PLL_VCO_DIV_CLK,
+ DT_DP3_PHY_PLL_LINK_CLK,
+ DT_DP3_PHY_PLL_VCO_DIV_CLK,
+};
+
+#define DISP_CC_MISC_CMD 0xF000
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DISP_CC_PLL1_OUT_EVEN,
+ P_DISP_CC_PLL1_OUT_MAIN,
+ P_DISP_CC_PLL2_OUT_MAIN,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DP1_PHY_PLL_LINK_CLK,
+ P_DP1_PHY_PLL_VCO_DIV_CLK,
+ P_DP2_PHY_PLL_LINK_CLK,
+ P_DP2_PHY_PLL_VCO_DIV_CLK,
+ P_DP3_PHY_PLL_LINK_CLK,
+ P_DP3_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco pongo_elu_vco[] = {
+ { 38400000, 38400000, 0 },
+};
+
+static const struct pll_vco taycan_elu_vco[] = {
+ { 249600000, 2500000000, 0 },
+};
+
+static struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0xd,
+ .alpha = 0x6492,
+ .config_ctl_val = 0x19660387,
+ .config_ctl_hi_val = 0x098060a0,
+ .config_ctl_hi1_val = 0xb416cb20,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000002,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = taycan_elu_vco,
+ .num_vco = ARRAY_SIZE(taycan_elu_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_taycan_elu_ops,
+ },
+ },
+};
+
+static struct alpha_pll_config disp_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x19660387,
+ .config_ctl_hi_val = 0x098060a0,
+ .config_ctl_hi1_val = 0xb416cb20,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000002,
+};
+
+static struct clk_alpha_pll disp_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = taycan_elu_vco,
+ .num_vco = ARRAY_SIZE(taycan_elu_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_taycan_elu_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config disp_cc_pll2_config = {
+ .l = 0x493,
+ .alpha = 0x0,
+ .config_ctl_val = 0x60000f68,
+ .config_ctl_hi_val = 0x0001c808,
+ .config_ctl_hi1_val = 0x00000000,
+ .config_ctl_hi2_val = 0x040082f4,
+ .test_ctl_val = 0x00008000,
+ .test_ctl_hi_val = 0x0080c496,
+ .test_ctl_hi1_val = 0x40100180,
+ .test_ctl_hi2_val = 0x441001bc,
+ .test_ctl_hi3_val = 0x002003d8,
+ .user_ctl_val = 0x00000400,
+ .user_ctl_hi_val = 0x00e50302,
+};
+
+static struct clk_alpha_pll disp_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = pongo_elu_vco,
+ .num_vco = ARRAY_SIZE(pongo_elu_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_PONGO_ELU],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_SLEEP_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_pongo_elu_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DISP_CC_PLL2_OUT_MAIN, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .hw = &disp_cc_pll2.clkr.hw },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL2_OUT_MAIN, 2 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_10[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_11[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_11[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_esync0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_esync0_clk_src = {
+ .cmd_rcgr = 0x80c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync0_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_esync1_clk_src = {
+ .cmd_rcgr = 0x80d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync1_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x8360,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_7,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x8180,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x819c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x8234,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x81e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_8,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x8204,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x821c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = {
+ .cmd_rcgr = 0x8298,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ .cmd_rcgr = 0x827c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = {
+ .cmd_rcgr = 0x824c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = {
+ .cmd_rcgr = 0x8264,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = {
+ .cmd_rcgr = 0x82fc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ .cmd_rcgr = 0x82b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = {
+ .cmd_rcgr = 0x82cc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = {
+ .cmd_rcgr = 0x82e4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = {
+ .cmd_rcgr = 0x8348,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ .cmd_rcgr = 0x832c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = {
+ .cmd_rcgr = 0x8314,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x81b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x81d0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(156000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(207000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(337000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(417000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(532000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(575000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x8150,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_9,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_9,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ /*
+ * TODO: Downstream does not manage the clock directly, but
+ * places votes via new hardware block called "cesta".
+ * It is not clear whether such approach should be taken instead
+ * of manual control.
+ */
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x8108,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x8120,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk2_clk_src = {
+ .cmd_rcgr = 0x8138,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk2_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x8168,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_osc_clk_src[] = {
+ F(38400000, P_DISP_CC_PLL2_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_osc_clk_src = {
+ .cmd_rcgr = 0x80f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_10,
+ .freq_tbl = ftbl_disp_cc_osc_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_osc_clk_src",
+ .parent_data = disp_cc_parent_data_10,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xe064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_11,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_11,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_11),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xe044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_xo_clk_src",
+ .parent_data = disp_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x8198,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x81b4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x8200,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = {
+ .reg = 0x8294,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = {
+ .reg = 0x82c8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = {
+ .reg = 0x8344,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_esync0_clk = {
+ .halt_reg = 0x80b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_esync0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_esync1_clk = {
+ .halt_reg = 0x80bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_esync1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_accu_shift_clk = {
+ .halt_reg = 0xe060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xe060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_accu_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x80b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = {
+ .halt_reg = 0x8058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_aux_clk = {
+ .halt_reg = 0x8080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_crypto_clk = {
+ .halt_reg = 0x807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_clk = {
+ .halt_reg = 0x8070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = {
+ .halt_reg = 0x8078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = {
+ .halt_reg = 0x8068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = {
+ .halt_reg = 0x806c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
+ .halt_reg = 0x8074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_aux_clk = {
+ .halt_reg = 0x8098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8098,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_crypto_clk = {
+ .halt_reg = 0x8094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_clk = {
+ .halt_reg = 0x808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = {
+ .halt_reg = 0x8090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = {
+ .halt_reg = 0x8084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = {
+ .halt_reg = 0x8088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_aux_clk = {
+ .halt_reg = 0x80a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_crypto_clk = {
+ .halt_reg = 0x80ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_clk = {
+ .halt_reg = 0x80a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = {
+ .halt_reg = 0x80a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = {
+ .halt_reg = 0x809c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x809c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x8010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0xa014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x8020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk2_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0xa024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_osc_clk = {
+ .halt_reg = 0x80b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_osc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_osc_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x9000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc mdss_int2_gdsc = {
+ .gdscr = 0xb000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_sm8750_clocks[] = {
+ [DISP_CC_ESYNC0_CLK] = &disp_cc_esync0_clk.clkr,
+ [DISP_CC_ESYNC0_CLK_SRC] = &disp_cc_esync0_clk_src.clkr,
+ [DISP_CC_ESYNC1_CLK] = &disp_cc_esync1_clk.clkr,
+ [DISP_CC_ESYNC1_CLK_SRC] = &disp_cc_esync1_clk_src.clkr,
+ [DISP_CC_MDSS_ACCU_SHIFT_CLK] = &disp_cc_mdss_accu_shift_clk.clkr,
+ [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &disp_cc_mdss_dptx0_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &disp_cc_mdss_dptx1_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_CRYPTO_CLK] = &disp_cc_mdss_dptx2_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_CRYPTO_CLK] = &disp_cc_mdss_dptx3_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+ [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+ [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK2_CLK] = &disp_cc_mdss_pclk2_clk.clkr,
+ [DISP_CC_MDSS_PCLK2_CLK_SRC] = &disp_cc_mdss_pclk2_clk_src.clkr,
+ [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_OSC_CLK] = &disp_cc_osc_clk.clkr,
+ [DISP_CC_OSC_CLK_SRC] = &disp_cc_osc_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL1] = &disp_cc_pll1.clkr,
+ [DISP_CC_PLL2] = &disp_cc_pll2.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static const struct qcom_reset_map disp_cc_sm8750_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
+};
+
+static struct gdsc *disp_cc_sm8750_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+ [MDSS_INT2_GDSC] = &mdss_int2_gdsc,
+};
+
+static const struct regmap_config disp_cc_sm8750_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11014,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc disp_cc_sm8750_desc = {
+ .config = &disp_cc_sm8750_regmap_config,
+ .clks = disp_cc_sm8750_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_sm8750_clocks),
+ .resets = disp_cc_sm8750_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sm8750_resets),
+ .gdscs = disp_cc_sm8750_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_sm8750_gdscs),
+};
+
+static const struct of_device_id disp_cc_sm8750_match_table[] = {
+ { .compatible = "qcom,sm8750-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sm8750_match_table);
+
+static int disp_cc_sm8750_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_sm8750_desc);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ goto err_put_rpm;
+ }
+
+ clk_taycan_elu_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ clk_taycan_elu_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+ clk_pongo_elu_pll_configure(&disp_cc_pll2, regmap, &disp_cc_pll2_config);
+
+ /* Enable clock gating for MDP clocks */
+ regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
+
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0xe07c); /* DISP_CC_SLEEP_CLK */
+ qcom_branch_set_clk_en(regmap, 0xe05c); /* DISP_CC_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0xc00c); /* DISP_CC_MDSS_RSCC_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xc008); /* DISP_CC_MDSS_RSCC_VSYNC_CLK */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_sm8750_desc, regmap);
+ if (ret)
+ goto err_put_rpm;
+
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+
+err_put_rpm:
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver disp_cc_sm8750_driver = {
+ .probe = disp_cc_sm8750_probe,
+ .driver = {
+ .name = "disp_cc-sm8750",
+ .of_match_table = disp_cc_sm8750_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_sm8750_driver);
+
+MODULE_DESCRIPTION("QTI DISPCC SM8750 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc0-sa8775p.c b/drivers/clk/qcom/dispcc0-sa8775p.c
new file mode 100644
index 000000000000..6e399b5f1383
--- /dev/null
+++ b/drivers/clk/qcom/dispcc0-sa8775p.c
@@ -0,0 +1,1481 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sa8775p-dispcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+ DT_DP1_PHY_PLL_LINK_CLK,
+ DT_DP1_PHY_PLL_VCO_DIV_CLK,
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_BYTECLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DP1_PHY_PLL_LINK_CLK,
+ P_DP1_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_MDSS_0_DISP_CC_PLL0_OUT_MAIN,
+ P_MDSS_0_DISP_CC_PLL1_OUT_EVEN,
+ P_MDSS_0_DISP_CC_PLL1_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2020000000, 0 },
+};
+
+static const struct alpha_pll_config mdss_0_disp_cc_pll0_config = {
+ .l = 0x3a,
+ .alpha = 0x9800,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll mdss_0_disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config mdss_0_disp_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll mdss_0_disp_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_0_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_0_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_0_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_0_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_0_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_0_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data disp_cc_0_parent_data_2_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map disp_cc_0_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_0_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_0_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_0_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_0_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_MDSS_0_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_MDSS_0_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_0_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &mdss_0_disp_cc_pll1.clkr.hw },
+ { .hw = &mdss_0_disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_0_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_MDSS_0_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_MDSS_0_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_MDSS_0_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_0_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &mdss_0_disp_cc_pll0.clkr.hw },
+ { .hw = &mdss_0_disp_cc_pll1.clkr.hw },
+ { .hw = &mdss_0_disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_0_parent_map_7[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_0_parent_data_7[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_mdss_0_disp_cc_mdss_ahb_clk_src[] = {
+ F(37500000, P_MDSS_0_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_MDSS_0_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x824c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_5,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_0_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdss_0_disp_cc_mdss_byte0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x80ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_1,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_0_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x8108,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_1,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_0_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x81b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_2,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_0_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_crypto_clk_src = {
+ .cmd_rcgr = 0x8170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_3,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_crypto_clk_src",
+ .parent_data = disp_cc_0_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x8154,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_3,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_0_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x8188,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_0,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_0_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x81a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_0,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_0_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_pixel2_clk_src = {
+ .cmd_rcgr = 0x826c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_0,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_pixel2_clk_src",
+ .parent_data = disp_cc_0_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx0_pixel3_clk_src = {
+ .cmd_rcgr = 0x8284,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_0,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_pixel3_clk_src",
+ .parent_data = disp_cc_0_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx1_aux_clk_src = {
+ .cmd_rcgr = 0x8234,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_2,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_aux_clk_src",
+ .parent_data = disp_cc_0_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx1_crypto_clk_src = {
+ .cmd_rcgr = 0x821c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_3,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_crypto_clk_src",
+ .parent_data = disp_cc_0_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx1_link_clk_src = {
+ .cmd_rcgr = 0x8200,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_3,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_0_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx1_pixel0_clk_src = {
+ .cmd_rcgr = 0x81d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_0,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_pixel0_clk_src",
+ .parent_data = disp_cc_0_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_dptx1_pixel1_clk_src = {
+ .cmd_rcgr = 0x81e8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_0,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_pixel1_clk_src",
+ .parent_data = disp_cc_0_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x8124,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_4,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_0_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x813c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_4,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_esc1_clk_src",
+ .parent_data = disp_cc_0_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdss_0_disp_cc_mdss_mdp_clk_src[] = {
+ F(375000000, P_MDSS_0_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(500000000, P_MDSS_0_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(575000000, P_MDSS_0_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(650000000, P_MDSS_0_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x80bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_6,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_0_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x808c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_1,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_0_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x80a4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_1,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_0_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x80d4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_2,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_0_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdss_0_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xc058,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_7,
+ .freq_tbl = ftbl_mdss_0_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_0_parent_data_7,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_0_disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xc03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_0_parent_map_2,
+ .freq_tbl = ftbl_mdss_0_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_xo_clk_src",
+ .parent_data = disp_cc_0_parent_data_2_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_0_parent_data_2_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div mdss_0_disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x8104,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div mdss_0_disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x8120,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_byte1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div mdss_0_disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x816c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div mdss_0_disp_cc_mdss_dptx1_link_div_clk_src = {
+ .reg = 0x8218,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0x8088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x8084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_byte1_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx0_crypto_clk = {
+ .halt_reg = 0x8058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x8060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx0_pixel2_clk = {
+ .halt_reg = 0x8264,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8264,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_pixel2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_pixel2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx0_pixel3_clk = {
+ .halt_reg = 0x8268,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8268,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_pixel3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_pixel3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx1_aux_clk = {
+ .halt_reg = 0x8080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx1_crypto_clk = {
+ .halt_reg = 0x807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx1_crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx1_link_clk = {
+ .halt_reg = 0x8070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx1_link_intf_clk = {
+ .halt_reg = 0x8074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx1_pixel0_clk = {
+ .halt_reg = 0x8068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx1_pixel1_clk = {
+ .halt_reg = 0x806c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
+ .halt_reg = 0x8078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_dptx1_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0x8014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0x8024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x801c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_pll_lock_monitor_clk = {
+ .halt_reg = 0xe000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_pll_lock_monitor_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0xa00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0xa008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_0_disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_0_disp_cc_sm_obs_clk = {
+ .halt_reg = 0x11014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x11014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_0_disp_cc_sm_obs_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_0_disp_cc_mdss_core_gdsc = {
+ .gdscr = 0x9000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_0_disp_cc_mdss_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL,
+};
+
+static struct gdsc mdss_0_disp_cc_mdss_core_int2_gdsc = {
+ .gdscr = 0xd000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_0_disp_cc_mdss_core_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL,
+};
+
+static struct clk_regmap *disp_cc_0_sa8775p_clocks[] = {
+ [MDSS_DISP_CC_MDSS_AHB1_CLK] = &mdss_0_disp_cc_mdss_ahb1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_AHB_CLK] = &mdss_0_disp_cc_mdss_ahb_clk.clkr,
+ [MDSS_DISP_CC_MDSS_AHB_CLK_SRC] = &mdss_0_disp_cc_mdss_ahb_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE0_CLK] = &mdss_0_disp_cc_mdss_byte0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE0_CLK_SRC] = &mdss_0_disp_cc_mdss_byte0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &mdss_0_disp_cc_mdss_byte0_div_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE0_INTF_CLK] = &mdss_0_disp_cc_mdss_byte0_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE1_CLK] = &mdss_0_disp_cc_mdss_byte1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE1_CLK_SRC] = &mdss_0_disp_cc_mdss_byte1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &mdss_0_disp_cc_mdss_byte1_div_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE1_INTF_CLK] = &mdss_0_disp_cc_mdss_byte1_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK] = &mdss_0_disp_cc_mdss_dptx0_aux_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &mdss_0_disp_cc_mdss_dptx0_crypto_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_crypto_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK] = &mdss_0_disp_cc_mdss_dptx0_link_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] =
+ &mdss_0_disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &mdss_0_disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &mdss_0_disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &mdss_0_disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK] = &mdss_0_disp_cc_mdss_dptx0_pixel2_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_pixel2_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK] = &mdss_0_disp_cc_mdss_dptx0_pixel3_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx0_pixel3_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &mdss_0_disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK] = &mdss_0_disp_cc_mdss_dptx1_aux_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx1_aux_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &mdss_0_disp_cc_mdss_dptx1_crypto_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx1_crypto_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK] = &mdss_0_disp_cc_mdss_dptx1_link_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx1_link_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] =
+ &mdss_0_disp_cc_mdss_dptx1_link_div_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &mdss_0_disp_cc_mdss_dptx1_link_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &mdss_0_disp_cc_mdss_dptx1_pixel0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &mdss_0_disp_cc_mdss_dptx1_pixel1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &mdss_0_disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
+ &mdss_0_disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_ESC0_CLK] = &mdss_0_disp_cc_mdss_esc0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_ESC0_CLK_SRC] = &mdss_0_disp_cc_mdss_esc0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_ESC1_CLK] = &mdss_0_disp_cc_mdss_esc1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_ESC1_CLK_SRC] = &mdss_0_disp_cc_mdss_esc1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_MDP1_CLK] = &mdss_0_disp_cc_mdss_mdp1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_MDP_CLK] = &mdss_0_disp_cc_mdss_mdp_clk.clkr,
+ [MDSS_DISP_CC_MDSS_MDP_CLK_SRC] = &mdss_0_disp_cc_mdss_mdp_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_MDP_LUT1_CLK] = &mdss_0_disp_cc_mdss_mdp_lut1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_MDP_LUT_CLK] = &mdss_0_disp_cc_mdss_mdp_lut_clk.clkr,
+ [MDSS_DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &mdss_0_disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [MDSS_DISP_CC_MDSS_PCLK0_CLK] = &mdss_0_disp_cc_mdss_pclk0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_PCLK0_CLK_SRC] = &mdss_0_disp_cc_mdss_pclk0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_PCLK1_CLK] = &mdss_0_disp_cc_mdss_pclk1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_PCLK1_CLK_SRC] = &mdss_0_disp_cc_mdss_pclk1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_PLL_LOCK_MONITOR_CLK] = &mdss_0_disp_cc_mdss_pll_lock_monitor_clk.clkr,
+ [MDSS_DISP_CC_MDSS_RSCC_AHB_CLK] = &mdss_0_disp_cc_mdss_rscc_ahb_clk.clkr,
+ [MDSS_DISP_CC_MDSS_RSCC_VSYNC_CLK] = &mdss_0_disp_cc_mdss_rscc_vsync_clk.clkr,
+ [MDSS_DISP_CC_MDSS_VSYNC1_CLK] = &mdss_0_disp_cc_mdss_vsync1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_VSYNC_CLK] = &mdss_0_disp_cc_mdss_vsync_clk.clkr,
+ [MDSS_DISP_CC_MDSS_VSYNC_CLK_SRC] = &mdss_0_disp_cc_mdss_vsync_clk_src.clkr,
+ [MDSS_DISP_CC_PLL0] = &mdss_0_disp_cc_pll0.clkr,
+ [MDSS_DISP_CC_PLL1] = &mdss_0_disp_cc_pll1.clkr,
+ [MDSS_DISP_CC_SLEEP_CLK_SRC] = &mdss_0_disp_cc_sleep_clk_src.clkr,
+ [MDSS_DISP_CC_SM_OBS_CLK] = &mdss_0_disp_cc_sm_obs_clk.clkr,
+ [MDSS_DISP_CC_XO_CLK_SRC] = &mdss_0_disp_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *disp_cc_0_sa8775p_gdscs[] = {
+ [MDSS_DISP_CC_MDSS_CORE_GDSC] = &mdss_0_disp_cc_mdss_core_gdsc,
+ [MDSS_DISP_CC_MDSS_CORE_INT2_GDSC] = &mdss_0_disp_cc_mdss_core_int2_gdsc,
+};
+
+static const struct qcom_reset_map disp_cc_0_sa8775p_resets[] = {
+ [MDSS_DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [MDSS_DISP_CC_MDSS_RSCC_BCR] = { 0xa000 },
+};
+
+static const struct regmap_config disp_cc_0_sa8775p_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x12414,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc disp_cc_0_sa8775p_desc = {
+ .config = &disp_cc_0_sa8775p_regmap_config,
+ .clks = disp_cc_0_sa8775p_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_0_sa8775p_clocks),
+ .resets = disp_cc_0_sa8775p_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_0_sa8775p_resets),
+ .gdscs = disp_cc_0_sa8775p_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_0_sa8775p_gdscs),
+};
+
+static const struct of_device_id disp_cc_0_sa8775p_match_table[] = {
+ { .compatible = "qcom,sa8775p-dispcc0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_0_sa8775p_match_table);
+
+static int disp_cc_0_sa8775p_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_0_sa8775p_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_evo_pll_configure(&mdss_0_disp_cc_pll0, regmap, &mdss_0_disp_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&mdss_0_disp_cc_pll1, regmap, &mdss_0_disp_cc_pll1_config);
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0xc070); /* MDSS_0_DISP_CC_SLEEP_CLK */
+ qcom_branch_set_clk_en(regmap, 0xc054); /* MDSS_0_DISP_CC_XO_CLK */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_0_sa8775p_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver disp_cc_0_sa8775p_driver = {
+ .probe = disp_cc_0_sa8775p_probe,
+ .driver = {
+ .name = "dispcc0-sa8775p",
+ .of_match_table = disp_cc_0_sa8775p_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_0_sa8775p_driver);
+
+MODULE_DESCRIPTION("QTI DISPCC0 SA8775P Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc1-sa8775p.c b/drivers/clk/qcom/dispcc1-sa8775p.c
new file mode 100644
index 000000000000..30ccea59415a
--- /dev/null
+++ b/drivers/clk/qcom/dispcc1-sa8775p.c
@@ -0,0 +1,1481 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sa8775p-dispcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+ DT_DP1_PHY_PLL_LINK_CLK,
+ DT_DP1_PHY_PLL_VCO_DIV_CLK,
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_BYTECLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DP1_PHY_PLL_LINK_CLK,
+ P_DP1_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_MDSS_1_DISP_CC_PLL0_OUT_MAIN,
+ P_MDSS_1_DISP_CC_PLL1_OUT_EVEN,
+ P_MDSS_1_DISP_CC_PLL1_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2020000000, 0 },
+};
+
+static const struct alpha_pll_config mdss_1_disp_cc_pll0_config = {
+ .l = 0x3a,
+ .alpha = 0x9800,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll mdss_1_disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config mdss_1_disp_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll mdss_1_disp_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_1_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_1_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_1_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_1_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_1_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_1_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data disp_cc_1_parent_data_2_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map disp_cc_1_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+};
+
+static const struct clk_parent_data disp_cc_1_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_1_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_1_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_1_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_MDSS_1_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_MDSS_1_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_1_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &mdss_1_disp_cc_pll1.clkr.hw },
+ { .hw = &mdss_1_disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_1_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_MDSS_1_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_MDSS_1_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_1_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &mdss_1_disp_cc_pll0.clkr.hw },
+ { .hw = &mdss_1_disp_cc_pll1.clkr.hw },
+ { .hw = &mdss_1_disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_1_parent_map_7[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_1_parent_data_7_ao[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_mdss_1_disp_cc_mdss_ahb_clk_src[] = {
+ F(37500000, P_MDSS_1_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_MDSS_1_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x824c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_5,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_1_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdss_1_disp_cc_mdss_byte0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x80ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_1,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_1_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x8108,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_1,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_1_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x81b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_2,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_1_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_crypto_clk_src = {
+ .cmd_rcgr = 0x8170,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_3,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_crypto_clk_src",
+ .parent_data = disp_cc_1_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x8154,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_3,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_1_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x8188,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_0,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_1_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x81a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_0,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_1_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_pixel2_clk_src = {
+ .cmd_rcgr = 0x826c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_0,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_pixel2_clk_src",
+ .parent_data = disp_cc_1_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx0_pixel3_clk_src = {
+ .cmd_rcgr = 0x8284,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_0,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_pixel3_clk_src",
+ .parent_data = disp_cc_1_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_aux_clk_src = {
+ .cmd_rcgr = 0x8234,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_2,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_aux_clk_src",
+ .parent_data = disp_cc_1_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_crypto_clk_src = {
+ .cmd_rcgr = 0x821c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_3,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_crypto_clk_src",
+ .parent_data = disp_cc_1_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_link_clk_src = {
+ .cmd_rcgr = 0x8200,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_3,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_1_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_pixel0_clk_src = {
+ .cmd_rcgr = 0x81d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_0,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_pixel0_clk_src",
+ .parent_data = disp_cc_1_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_dptx1_pixel1_clk_src = {
+ .cmd_rcgr = 0x81e8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_0,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_pixel1_clk_src",
+ .parent_data = disp_cc_1_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x8124,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_4,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_1_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x813c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_4,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_esc1_clk_src",
+ .parent_data = disp_cc_1_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdss_1_disp_cc_mdss_mdp_clk_src[] = {
+ F(375000000, P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(500000000, P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(575000000, P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(650000000, P_MDSS_1_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x80bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_6,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_1_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x808c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_1,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_1_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x80a4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_1,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_1_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x80d4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_2,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_1_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdss_1_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xc058,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_7,
+ .freq_tbl = ftbl_mdss_1_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_1_parent_data_7_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_7_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 mdss_1_disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xc03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_1_parent_map_2,
+ .freq_tbl = ftbl_mdss_1_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_xo_clk_src",
+ .parent_data = disp_cc_1_parent_data_2_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_1_parent_data_2_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div mdss_1_disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x8104,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div mdss_1_disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x8120,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_byte1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div mdss_1_disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x816c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div mdss_1_disp_cc_mdss_dptx1_link_div_clk_src = {
+ .reg = 0x8218,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0x8088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x8084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_byte1_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx0_crypto_clk = {
+ .halt_reg = 0x8058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x8060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx0_pixel2_clk = {
+ .halt_reg = 0x8264,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8264,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_pixel2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_pixel2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx0_pixel3_clk = {
+ .halt_reg = 0x8268,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8268,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_pixel3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_pixel3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx1_aux_clk = {
+ .halt_reg = 0x8080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx1_crypto_clk = {
+ .halt_reg = 0x807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx1_crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx1_link_clk = {
+ .halt_reg = 0x8070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx1_link_intf_clk = {
+ .halt_reg = 0x8074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx1_pixel0_clk = {
+ .halt_reg = 0x8068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx1_pixel1_clk = {
+ .halt_reg = 0x806c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
+ .halt_reg = 0x8078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_dptx1_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0x8014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0x8024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x801c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_pll_lock_monitor_clk = {
+ .halt_reg = 0xe000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_pll_lock_monitor_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0xa00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0xa008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdss_1_disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_1_disp_cc_sm_obs_clk = {
+ .halt_reg = 0x11014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x11014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "mdss_1_disp_cc_sm_obs_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_1_disp_cc_mdss_core_gdsc = {
+ .gdscr = 0x9000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_1_disp_cc_mdss_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL,
+};
+
+static struct gdsc mdss_1_disp_cc_mdss_core_int2_gdsc = {
+ .gdscr = 0xd000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "mdss_1_disp_cc_mdss_core_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL,
+};
+
+static struct clk_regmap *disp_cc_1_sa8775p_clocks[] = {
+ [MDSS_DISP_CC_MDSS_AHB1_CLK] = &mdss_1_disp_cc_mdss_ahb1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_AHB_CLK] = &mdss_1_disp_cc_mdss_ahb_clk.clkr,
+ [MDSS_DISP_CC_MDSS_AHB_CLK_SRC] = &mdss_1_disp_cc_mdss_ahb_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE0_CLK] = &mdss_1_disp_cc_mdss_byte0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE0_CLK_SRC] = &mdss_1_disp_cc_mdss_byte0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &mdss_1_disp_cc_mdss_byte0_div_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE0_INTF_CLK] = &mdss_1_disp_cc_mdss_byte0_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE1_CLK] = &mdss_1_disp_cc_mdss_byte1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE1_CLK_SRC] = &mdss_1_disp_cc_mdss_byte1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &mdss_1_disp_cc_mdss_byte1_div_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_BYTE1_INTF_CLK] = &mdss_1_disp_cc_mdss_byte1_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK] = &mdss_1_disp_cc_mdss_dptx0_aux_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &mdss_1_disp_cc_mdss_dptx0_crypto_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_crypto_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK] = &mdss_1_disp_cc_mdss_dptx0_link_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] =
+ &mdss_1_disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &mdss_1_disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &mdss_1_disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &mdss_1_disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK] = &mdss_1_disp_cc_mdss_dptx0_pixel2_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_pixel2_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK] = &mdss_1_disp_cc_mdss_dptx0_pixel3_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx0_pixel3_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &mdss_1_disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK] = &mdss_1_disp_cc_mdss_dptx1_aux_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_aux_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &mdss_1_disp_cc_mdss_dptx1_crypto_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_crypto_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK] = &mdss_1_disp_cc_mdss_dptx1_link_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_link_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] =
+ &mdss_1_disp_cc_mdss_dptx1_link_div_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &mdss_1_disp_cc_mdss_dptx1_link_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &mdss_1_disp_cc_mdss_dptx1_pixel0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &mdss_1_disp_cc_mdss_dptx1_pixel1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &mdss_1_disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
+ &mdss_1_disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
+ [MDSS_DISP_CC_MDSS_ESC0_CLK] = &mdss_1_disp_cc_mdss_esc0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_ESC0_CLK_SRC] = &mdss_1_disp_cc_mdss_esc0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_ESC1_CLK] = &mdss_1_disp_cc_mdss_esc1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_ESC1_CLK_SRC] = &mdss_1_disp_cc_mdss_esc1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_MDP1_CLK] = &mdss_1_disp_cc_mdss_mdp1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_MDP_CLK] = &mdss_1_disp_cc_mdss_mdp_clk.clkr,
+ [MDSS_DISP_CC_MDSS_MDP_CLK_SRC] = &mdss_1_disp_cc_mdss_mdp_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_MDP_LUT1_CLK] = &mdss_1_disp_cc_mdss_mdp_lut1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_MDP_LUT_CLK] = &mdss_1_disp_cc_mdss_mdp_lut_clk.clkr,
+ [MDSS_DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &mdss_1_disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [MDSS_DISP_CC_MDSS_PCLK0_CLK] = &mdss_1_disp_cc_mdss_pclk0_clk.clkr,
+ [MDSS_DISP_CC_MDSS_PCLK0_CLK_SRC] = &mdss_1_disp_cc_mdss_pclk0_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_PCLK1_CLK] = &mdss_1_disp_cc_mdss_pclk1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_PCLK1_CLK_SRC] = &mdss_1_disp_cc_mdss_pclk1_clk_src.clkr,
+ [MDSS_DISP_CC_MDSS_PLL_LOCK_MONITOR_CLK] = &mdss_1_disp_cc_mdss_pll_lock_monitor_clk.clkr,
+ [MDSS_DISP_CC_MDSS_RSCC_AHB_CLK] = &mdss_1_disp_cc_mdss_rscc_ahb_clk.clkr,
+ [MDSS_DISP_CC_MDSS_RSCC_VSYNC_CLK] = &mdss_1_disp_cc_mdss_rscc_vsync_clk.clkr,
+ [MDSS_DISP_CC_MDSS_VSYNC1_CLK] = &mdss_1_disp_cc_mdss_vsync1_clk.clkr,
+ [MDSS_DISP_CC_MDSS_VSYNC_CLK] = &mdss_1_disp_cc_mdss_vsync_clk.clkr,
+ [MDSS_DISP_CC_MDSS_VSYNC_CLK_SRC] = &mdss_1_disp_cc_mdss_vsync_clk_src.clkr,
+ [MDSS_DISP_CC_PLL0] = &mdss_1_disp_cc_pll0.clkr,
+ [MDSS_DISP_CC_PLL1] = &mdss_1_disp_cc_pll1.clkr,
+ [MDSS_DISP_CC_SLEEP_CLK_SRC] = &mdss_1_disp_cc_sleep_clk_src.clkr,
+ [MDSS_DISP_CC_SM_OBS_CLK] = &mdss_1_disp_cc_sm_obs_clk.clkr,
+ [MDSS_DISP_CC_XO_CLK_SRC] = &mdss_1_disp_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *disp_cc_1_sa8775p_gdscs[] = {
+ [MDSS_DISP_CC_MDSS_CORE_GDSC] = &mdss_1_disp_cc_mdss_core_gdsc,
+ [MDSS_DISP_CC_MDSS_CORE_INT2_GDSC] = &mdss_1_disp_cc_mdss_core_int2_gdsc,
+};
+
+static const struct qcom_reset_map disp_cc_1_sa8775p_resets[] = {
+ [MDSS_DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [MDSS_DISP_CC_MDSS_RSCC_BCR] = { 0xa000 },
+};
+
+static const struct regmap_config disp_cc_1_sa8775p_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x12414,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc disp_cc_1_sa8775p_desc = {
+ .config = &disp_cc_1_sa8775p_regmap_config,
+ .clks = disp_cc_1_sa8775p_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_1_sa8775p_clocks),
+ .resets = disp_cc_1_sa8775p_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_1_sa8775p_resets),
+ .gdscs = disp_cc_1_sa8775p_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_1_sa8775p_gdscs),
+};
+
+static const struct of_device_id disp_cc_1_sa8775p_match_table[] = {
+ { .compatible = "qcom,sa8775p-dispcc1" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_1_sa8775p_match_table);
+
+static int disp_cc_1_sa8775p_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_1_sa8775p_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_evo_pll_configure(&mdss_1_disp_cc_pll0, regmap, &mdss_1_disp_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&mdss_1_disp_cc_pll1, regmap, &mdss_1_disp_cc_pll1_config);
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0xc070); /* MDSS_1_DISP_CC_SLEEP_CLK */
+ qcom_branch_set_clk_en(regmap, 0xc054); /* MDSS_1_DISP_CC_XO_CLK */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_1_sa8775p_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver disp_cc_1_sa8775p_driver = {
+ .probe = disp_cc_1_sa8775p_probe,
+ .driver = {
+ .name = "dispcc1-sa8775p",
+ .of_match_table = disp_cc_1_sa8775p_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_1_sa8775p_driver);
+
+MODULE_DESCRIPTION("QTI DISPCC1 SA8775P Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-ipq5332.c b/drivers/clk/qcom/gcc-ipq5332.c
index 9536b2b7d07c..9246e97d785a 100644
--- a/drivers/clk/qcom/gcc-ipq5332.c
+++ b/drivers/clk/qcom/gcc-ipq5332.c
@@ -2185,150 +2185,6 @@ static struct clk_branch gcc_prng_ahb_clk = {
},
};
-static struct clk_branch gcc_q6_ahb_clk = {
- .halt_reg = 0x25014,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x25014,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6_ahb_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_wcss_ahb_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_q6_ahb_s_clk = {
- .halt_reg = 0x25018,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x25018,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6_ahb_s_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_wcss_ahb_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_q6_axim_clk = {
- .halt_reg = 0x2500c,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x2500c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6_axim_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_q6_axim_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_q6_axis_clk = {
- .halt_reg = 0x25010,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x25010,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6_axis_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_system_noc_bfdcd_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_q6_tsctr_1to2_clk = {
- .halt_reg = 0x25020,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x25020,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6_tsctr_1to2_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_tsctr_div2_clk_src.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_q6ss_atbm_clk = {
- .halt_reg = 0x2501c,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x2501c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6ss_atbm_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_at_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_q6ss_pclkdbg_clk = {
- .halt_reg = 0x25024,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x25024,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6ss_pclkdbg_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_dap_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_q6ss_trig_clk = {
- .halt_reg = 0x250a0,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x250a0,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6ss_trig_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_dap_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_qdss_at_clk = {
.halt_reg = 0x2d038,
.halt_check = BRANCH_HALT_VOTED,
@@ -2756,24 +2612,6 @@ static struct clk_branch gcc_sys_noc_at_clk = {
},
};
-static struct clk_branch gcc_sys_noc_wcss_ahb_clk = {
- .halt_reg = 0x2e030,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x2e030,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_sys_noc_wcss_ahb_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_wcss_ahb_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_uniphy0_ahb_clk = {
.halt_reg = 0x16010,
.halt_check = BRANCH_HALT,
@@ -2989,204 +2827,6 @@ static struct clk_branch gcc_usb0_sleep_clk = {
},
};
-static struct clk_branch gcc_wcss_axim_clk = {
- .halt_reg = 0x2505c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x2505c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_axim_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_system_noc_bfdcd_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_axis_clk = {
- .halt_reg = 0x25060,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x25060,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_axis_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_system_noc_bfdcd_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_apb_bdg_clk = {
- .halt_reg = 0x25048,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x25048,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_apb_bdg_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_dap_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_apb_clk = {
- .halt_reg = 0x25038,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x25038,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_apb_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_dap_div_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_atb_bdg_clk = {
- .halt_reg = 0x2504c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x2504c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_atb_bdg_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_at_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_atb_clk = {
- .halt_reg = 0x2503c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x2503c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_atb_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_at_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_nts_bdg_clk = {
- .halt_reg = 0x25050,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x25050,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_nts_bdg_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_tsctr_div2_clk_src.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_nts_clk = {
- .halt_reg = 0x25040,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x25040,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_nts_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_qdss_tsctr_div2_clk_src.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_ecahb_clk = {
- .halt_reg = 0x25058,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x25058,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_ecahb_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_wcss_ahb_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_mst_async_bdg_clk = {
- .halt_reg = 0x2e0b0,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x2e0b0,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_mst_async_bdg_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_system_noc_bfdcd_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_slv_async_bdg_clk = {
- .halt_reg = 0x2e0b4,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x2e0b4,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_slv_async_bdg_clk",
- .parent_hws = (const struct clk_hw*[]) {
- &gcc_system_noc_bfdcd_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_xo_clk = {
.halt_reg = 0x34018,
.halt_check = BRANCH_HALT,
@@ -3362,15 +3002,7 @@ static struct clk_regmap *gcc_ipq5332_clocks[] = {
[GCC_PCNOC_BFDCD_CLK_SRC] = &gcc_pcnoc_bfdcd_clk_src.clkr,
[GCC_PCNOC_LPASS_CLK] = &gcc_pcnoc_lpass_clk.clkr,
[GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
- [GCC_Q6_AHB_CLK] = &gcc_q6_ahb_clk.clkr,
- [GCC_Q6_AHB_S_CLK] = &gcc_q6_ahb_s_clk.clkr,
- [GCC_Q6_AXIM_CLK] = &gcc_q6_axim_clk.clkr,
[GCC_Q6_AXIM_CLK_SRC] = &gcc_q6_axim_clk_src.clkr,
- [GCC_Q6_AXIS_CLK] = &gcc_q6_axis_clk.clkr,
- [GCC_Q6_TSCTR_1TO2_CLK] = &gcc_q6_tsctr_1to2_clk.clkr,
- [GCC_Q6SS_ATBM_CLK] = &gcc_q6ss_atbm_clk.clkr,
- [GCC_Q6SS_PCLKDBG_CLK] = &gcc_q6ss_pclkdbg_clk.clkr,
- [GCC_Q6SS_TRIG_CLK] = &gcc_q6ss_trig_clk.clkr,
[GCC_QDSS_AT_CLK] = &gcc_qdss_at_clk.clkr,
[GCC_QDSS_AT_CLK_SRC] = &gcc_qdss_at_clk_src.clkr,
[GCC_QDSS_CFG_AHB_CLK] = &gcc_qdss_cfg_ahb_clk.clkr,
@@ -3400,7 +3032,6 @@ static struct clk_regmap *gcc_ipq5332_clocks[] = {
[GCC_SNOC_PCIE3_2LANE_S_CLK] = &gcc_snoc_pcie3_2lane_s_clk.clkr,
[GCC_SNOC_USB_CLK] = &gcc_snoc_usb_clk.clkr,
[GCC_SYS_NOC_AT_CLK] = &gcc_sys_noc_at_clk.clkr,
- [GCC_SYS_NOC_WCSS_AHB_CLK] = &gcc_sys_noc_wcss_ahb_clk.clkr,
[GCC_SYSTEM_NOC_BFDCD_CLK_SRC] = &gcc_system_noc_bfdcd_clk_src.clkr,
[GCC_UNIPHY0_AHB_CLK] = &gcc_uniphy0_ahb_clk.clkr,
[GCC_UNIPHY0_SYS_CLK] = &gcc_uniphy0_sys_clk.clkr,
@@ -3421,17 +3052,6 @@ static struct clk_regmap *gcc_ipq5332_clocks[] = {
[GCC_USB0_PIPE_CLK] = &gcc_usb0_pipe_clk.clkr,
[GCC_USB0_SLEEP_CLK] = &gcc_usb0_sleep_clk.clkr,
[GCC_WCSS_AHB_CLK_SRC] = &gcc_wcss_ahb_clk_src.clkr,
- [GCC_WCSS_AXIM_CLK] = &gcc_wcss_axim_clk.clkr,
- [GCC_WCSS_AXIS_CLK] = &gcc_wcss_axis_clk.clkr,
- [GCC_WCSS_DBG_IFC_APB_BDG_CLK] = &gcc_wcss_dbg_ifc_apb_bdg_clk.clkr,
- [GCC_WCSS_DBG_IFC_APB_CLK] = &gcc_wcss_dbg_ifc_apb_clk.clkr,
- [GCC_WCSS_DBG_IFC_ATB_BDG_CLK] = &gcc_wcss_dbg_ifc_atb_bdg_clk.clkr,
- [GCC_WCSS_DBG_IFC_ATB_CLK] = &gcc_wcss_dbg_ifc_atb_clk.clkr,
- [GCC_WCSS_DBG_IFC_NTS_BDG_CLK] = &gcc_wcss_dbg_ifc_nts_bdg_clk.clkr,
- [GCC_WCSS_DBG_IFC_NTS_CLK] = &gcc_wcss_dbg_ifc_nts_clk.clkr,
- [GCC_WCSS_ECAHB_CLK] = &gcc_wcss_ecahb_clk.clkr,
- [GCC_WCSS_MST_ASYNC_BDG_CLK] = &gcc_wcss_mst_async_bdg_clk.clkr,
- [GCC_WCSS_SLV_ASYNC_BDG_CLK] = &gcc_wcss_slv_async_bdg_clk.clkr,
[GCC_XO_CLK] = &gcc_xo_clk.clkr,
[GCC_XO_CLK_SRC] = &gcc_xo_clk_src.clkr,
[GCC_XO_DIV4_CLK] = &gcc_xo_div4_clk.clkr,
@@ -3622,7 +3242,7 @@ static const struct qcom_reset_map gcc_ipq5332_resets[] = {
#define IPQ_APPS_ID 5332 /* some unique value */
-static struct qcom_icc_hws_data icc_ipq5332_hws[] = {
+static const struct qcom_icc_hws_data icc_ipq5332_hws[] = {
{ MASTER_SNOC_PCIE3_1_M, SLAVE_SNOC_PCIE3_1_M, GCC_SNOC_PCIE3_1LANE_M_CLK },
{ MASTER_ANOC_PCIE3_1_S, SLAVE_ANOC_PCIE3_1_S, GCC_SNOC_PCIE3_1LANE_S_CLK },
{ MASTER_SNOC_PCIE3_2_M, SLAVE_SNOC_PCIE3_2_M, GCC_SNOC_PCIE3_2LANE_M_CLK },
diff --git a/drivers/clk/qcom/gcc-ipq5424.c b/drivers/clk/qcom/gcc-ipq5424.c
new file mode 100644
index 000000000000..d5b218b76e29
--- /dev/null
+++ b/drivers/clk/qcom/gcc-ipq5424.c
@@ -0,0 +1,3310 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/interconnect-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,ipq5424-gcc.h>
+#include <dt-bindings/interconnect/qcom,ipq5424.h>
+#include <dt-bindings/reset/qcom,ipq5424-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "common.h"
+#include "reset.h"
+
+enum {
+ DT_XO,
+ DT_SLEEP_CLK,
+ DT_PCIE30_PHY0_PIPE_CLK,
+ DT_PCIE30_PHY1_PIPE_CLK,
+ DT_PCIE30_PHY2_PIPE_CLK,
+ DT_PCIE30_PHY3_PIPE_CLK,
+ DT_USB_PCIE_WRAPPER_PIPE_CLK,
+};
+
+enum {
+ P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC,
+ P_GPLL0_OUT_AUX,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL2_OUT_AUX,
+ P_GPLL2_OUT_MAIN,
+ P_GPLL4_OUT_AUX,
+ P_GPLL4_OUT_MAIN,
+ P_SLEEP_CLK,
+ P_XO,
+ P_USB3PHY_0_PIPE,
+};
+
+static const struct clk_parent_data gcc_parent_data_xo = { .index = DT_XO };
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x20000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
+ .clkr = {
+ .enable_reg = 0xb000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll0",
+ .parent_data = &gcc_parent_data_xo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gpll0_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll0_div2",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll2 = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA],
+ .clkr = {
+ .enable_reg = 0xb000,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll2",
+ .parent_data = &gcc_parent_data_xo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll2_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll2_out_main = {
+ .offset = 0x21000,
+ .post_div_table = post_div_table_gpll2_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll2_out_main),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpll2_out_main",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x22000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
+ .clkr = {
+ .enable_reg = 0xb000,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll4",
+ .parent_data = &gcc_parent_data_xo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ /*
+ * There are no consumers for this GPLL in kernel yet,
+ * (will be added soon), so the clock framework
+ * disables this source. But some of the clocks
+ * initialized by boot loaders uses this source. So we
+ * need to keep this clock ON. Add the
+ * CLK_IGNORE_UNUSED flag so the clock will not be
+ * disabled. Once the consumer in kernel is added, we
+ * can get rid of this flag.
+ */
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_xo[] = {
+ { P_XO, 0 },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_div2.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_XO, 0 },
+ { P_GPLL4_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_AUX, 2 },
+ { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_XO },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_div2.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_XO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_XO },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_XO, 0 },
+ { P_GPLL4_OUT_AUX, 1 },
+ { P_GPLL0_OUT_MAIN, 3 },
+ { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_XO },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_div2.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_AUX, 2 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL2_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2_out_main.clkr.hw },
+ { .hw = &gpll0_div2.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_MAIN_DIV_CLK_SRC, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_div2.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_AUX, 2 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL2_OUT_AUX, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_adss_pwm_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_adss_pwm_clk_src = {
+ .cmd_rcgr = 0x1c004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_adss_pwm_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_adss_pwm_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_nss_ts_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_xo_clk_src = {
+ .cmd_rcgr = 0x34004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_xo,
+ .freq_tbl = ftbl_gcc_nss_ts_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_xo_clk_src",
+ .parent_data = &gcc_parent_data_xo,
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_xo_clk = {
+ .halt_reg = 0x34018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x34018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_xo_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gcc_xo_div4_clk_src = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_xo_div4_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_xo_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_nss_ts_clk_src = {
+ .cmd_rcgr = 0x17088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_nss_ts_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nss_ts_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie0_axi_m_clk_src[] = {
+ F(240000000, P_GPLL4_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie0_axi_m_clk_src = {
+ .cmd_rcgr = 0x28018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie0_axi_m_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_axi_m_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie0_axi_s_clk_src = {
+ .cmd_rcgr = 0x28020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie0_axi_m_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_axi_s_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie1_axi_m_clk_src = {
+ .cmd_rcgr = 0x29018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie0_axi_m_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_axi_m_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie1_axi_s_clk_src = {
+ .cmd_rcgr = 0x29020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie0_axi_m_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_axi_s_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie2_axi_m_clk_src[] = {
+ F(266666667, P_GPLL4_OUT_MAIN, 4.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie2_axi_m_clk_src = {
+ .cmd_rcgr = 0x2a018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie2_axi_m_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_axi_m_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie2_axi_s_clk_src = {
+ .cmd_rcgr = 0x2a020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie0_axi_m_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_axi_s_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie3_axi_m_clk_src = {
+ .cmd_rcgr = 0x2b018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie2_axi_m_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_axi_m_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie3_axi_s_clk_src = {
+ .cmd_rcgr = 0x2b020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie0_axi_m_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_axi_s_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_aux_clk_src[] = {
+ F(20000000, P_GPLL0_OUT_MAIN, 10, 1, 4),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_aux_clk_src = {
+ .cmd_rcgr = 0x28004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_pcie_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_aux_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_i2c0_clk_src[] = {
+ F(4800000, P_XO, 5, 0, 0),
+ F(9600000, P_XO, 2.5, 0, 0),
+ F(24000000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(64000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c0_clk_src = {
+ .cmd_rcgr = 0x2018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_i2c0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c1_clk_src = {
+ .cmd_rcgr = 0x3018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_i2c0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_spi0_clk_src[] = {
+ F(960000, P_XO, 10, 2, 5),
+ F(4800000, P_XO, 5, 0, 0),
+ F(9600000, P_XO, 2, 4, 5),
+ F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5),
+ F(24000000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(32000000, P_GPLL0_OUT_MAIN, 10, 2, 5),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qupv3_spi0_clk_src = {
+ .cmd_rcgr = 0x4004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_spi0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_spi0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_spi1_clk_src = {
+ .cmd_rcgr = 0x5004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_spi0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_spi1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_uart0_clk_src[] = {
+ F(960000, P_XO, 10, 2, 5),
+ F(4800000, P_XO, 5, 0, 0),
+ F(9600000, P_XO, 2, 4, 5),
+ F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5),
+ F(24000000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(64000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qupv3_uart0_clk_src = {
+ .cmd_rcgr = 0x202c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_uart0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_uart0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_uart1_clk_src = {
+ .cmd_rcgr = 0x302c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_uart0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_uart1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_XO, 16, 12, 125),
+ F(400000, P_XO, 12, 1, 5),
+ F(24000000, P_XO, 1, 0, 0),
+ F(48000000, P_GPLL2_OUT_MAIN, 12, 1, 2),
+ F(96000000, P_GPLL2_OUT_MAIN, 6, 1, 2),
+ F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(192000000, P_GPLL2_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x33004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(300000000, P_GPLL4_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x33018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_uniphy_sys_clk_src = {
+ .cmd_rcgr = 0x17090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_nss_ts_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy_sys_clk_src",
+ .parent_data = &gcc_parent_data_xo,
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb0_aux_clk_src = {
+ .cmd_rcgr = 0x2c018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_nss_ts_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_aux_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb0_master_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb0_master_clk_src = {
+ .cmd_rcgr = 0x2c004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb0_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb0_mock_utmi_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(60000000, P_GPLL4_OUT_AUX, 10, 1, 2),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb0_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x2c02c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_usb0_mock_utmi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb1_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x3c004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_usb0_mock_utmi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb1_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_wcss_ahb_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_wcss_ahb_clk_src = {
+ .cmd_rcgr = 0x25030,
+ .freq_tbl = ftbl_gcc_wcss_ahb_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_wcss_ahb_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qdss_at_clk_src[] = {
+ F(240000000, P_GPLL4_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qdss_at_clk_src = {
+ .cmd_rcgr = 0x2d004,
+ .freq_tbl = ftbl_gcc_qdss_at_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_at_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qdss_tsctr_clk_src[] = {
+ F(600000000, P_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qdss_tsctr_clk_src = {
+ .cmd_rcgr = 0x2d01c,
+ .freq_tbl = ftbl_gcc_qdss_tsctr_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_tsctr_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_fixed_factor gcc_qdss_tsctr_div2_clk_src = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_tsctr_div2_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_qdss_tsctr_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor gcc_qdss_dap_sync_clk_src = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_dap_sync_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_qdss_tsctr_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_system_noc_bfdcd_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266666667, P_GPLL4_OUT_MAIN, 4.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_system_noc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x2e004,
+ .freq_tbl = ftbl_gcc_system_noc_bfdcd_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_system_noc_bfdcd_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcnoc_bfdcd_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcnoc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x31004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcnoc_bfdcd_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcnoc_bfdcd_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_lpass_sway_clk_src[] = {
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_lpass_sway_clk_src = {
+ .cmd_rcgr = 0x27004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_lpass_sway_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_lpass_sway_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_lpass_axim_clk_src = {
+ .cmd_rcgr = 0x2700c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_lpass_sway_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_lpass_axim_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_fixed_factor gcc_eud_at_div_clk_src = {
+ .mult = 1,
+ .div = 6,
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eud_at_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_qdss_at_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sleep_clk_src = {
+ .cmd_rcgr = 0x3400c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sleep_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qpic_io_macro_clk_src[] = {
+ F(24000000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ F(400000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qpic_io_macro_clk_src = {
+ .cmd_rcgr = 0x32004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_qpic_io_macro_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qpic_io_macro_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qpic_clk_src = {
+ .cmd_rcgr = 0x32020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_qpic_io_macro_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qpic_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie0_rchng_clk_src = {
+ .cmd_rcgr = 0x28028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_adss_pwm_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_rchng_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie1_rchng_clk_src = {
+ .cmd_rcgr = 0x29028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_adss_pwm_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_rchng_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie2_rchng_clk_src = {
+ .cmd_rcgr = 0x2a028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_adss_pwm_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_rchng_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie3_rchng_clk_src = {
+ .cmd_rcgr = 0x2b028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_adss_pwm_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_rchng_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_i2c0_div_clk_src = {
+ .reg = 0x2020,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_i2c1_div_clk_src = {
+ .reg = 0x3020,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb0_mock_utmi_div_clk_src = {
+ .reg = 0x2c040,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_mock_utmi_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb0_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb1_mock_utmi_div_clk_src = {
+ .reg = 0x3c018,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb1_mock_utmi_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb1_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_adss_pwm_clk = {
+ .halt_reg = 0x1c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_adss_pwm_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_adss_pwm_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie0_1lane_s_clk = {
+ .halt_reg = 0x31088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie0_1lane_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie0_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie1_1lane_s_clk = {
+ .halt_reg = 0x3108c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie1_1lane_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie1_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie2_2lane_s_clk = {
+ .halt_reg = 0x31090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie2_2lane_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie2_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie3_2lane_s_clk = {
+ .halt_reg = 0x31094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31094,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie3_2lane_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_usb_clk = {
+ .halt_reg = 0x310a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x310a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_usb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb0_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdio_ahb_clk = {
+ .halt_reg = 0x17040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_mdio_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nss_ts_clk = {
+ .halt_reg = 0x17018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x17018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nss_ts_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_nss_ts_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nsscc_clk = {
+ .halt_reg = 0x17034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nsscc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nsscfg_clk = {
+ .halt_reg = 0x1702c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nsscfg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_atb_clk = {
+ .halt_reg = 0x17014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_atb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qdss_at_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_nsscc_clk = {
+ .halt_reg = 0x17030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_nsscc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_pcnoc_1_clk = {
+ .halt_reg = 0x17080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_pcnoc_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_qosgen_ref_clk = {
+ .halt_reg = 0x1701c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_qosgen_ref_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_xo_div4_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_snoc_1_clk = {
+ .halt_reg = 0x1707c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1707c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_snoc_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_system_noc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_snoc_clk = {
+ .halt_reg = 0x17028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_snoc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_system_noc_bfdcd_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_timeout_ref_clk = {
+ .halt_reg = 0x17020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_timeout_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_xo_div4_clk_src.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_nssnoc_xo_dcd_clk = {
+ .halt_reg = 0x17074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_nssnoc_xo_dcd_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_ahb_clk = {
+ .halt_reg = 0x28030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_aux_clk = {
+ .halt_reg = 0x28070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_axi_m_clk = {
+ .halt_reg = 0x28038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_axi_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie0_axi_m_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_anoc_pcie0_1lane_m_clk = {
+ .halt_reg = 0x2e07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_anoc_pcie0_1lane_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie0_axi_m_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_axi_s_bridge_clk = {
+ .halt_reg = 0x28048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_axi_s_bridge_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie0_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_axi_s_clk = {
+ .halt_reg = 0x28040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x28040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_axi_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie0_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie0_pipe_clk_src = {
+ .reg = 0x28064,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "pcie0_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE30_PHY0_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_pipe_clk = {
+ .halt_reg = 0x28068,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x28068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_pipe_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie0_pipe_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_ahb_clk = {
+ .halt_reg = 0x29030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_aux_clk = {
+ .halt_reg = 0x29074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_axi_m_clk = {
+ .halt_reg = 0x29038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_axi_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie1_axi_m_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_anoc_pcie1_1lane_m_clk = {
+ .halt_reg = 0x2e084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_anoc_pcie1_1lane_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie1_axi_m_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_axi_s_bridge_clk = {
+ .halt_reg = 0x29048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_axi_s_bridge_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie1_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_axi_s_clk = {
+ .halt_reg = 0x29040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x29040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_axi_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie1_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie1_pipe_clk_src = {
+ .reg = 0x29064,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "pcie1_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE30_PHY1_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_pipe_clk = {
+ .halt_reg = 0x29068,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x29068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_pipe_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie1_pipe_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_ahb_clk = {
+ .halt_reg = 0x2a030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_aux_clk = {
+ .halt_reg = 0x2a078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_axi_m_clk = {
+ .halt_reg = 0x2a038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_axi_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie2_axi_m_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_anoc_pcie2_2lane_m_clk = {
+ .halt_reg = 0x2e080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_anoc_pcie2_2lane_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie2_axi_m_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_axi_s_bridge_clk = {
+ .halt_reg = 0x2a048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_axi_s_bridge_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie2_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_axi_s_clk = {
+ .halt_reg = 0x2a040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2a040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_axi_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie2_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie2_pipe_clk_src = {
+ .reg = 0x2a064,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "pcie2_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE30_PHY2_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_pipe_clk = {
+ .halt_reg = 0x2a068,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2a068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_pipe_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie2_pipe_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_ahb_clk = {
+ .halt_reg = 0x2b030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_aux_clk = {
+ .halt_reg = 0x2b07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_axi_m_clk = {
+ .halt_reg = 0x2b038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_axi_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3_axi_m_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_anoc_pcie3_2lane_m_clk = {
+ .halt_reg = 0x2e090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_anoc_pcie3_2lane_m_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3_axi_m_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_axi_s_bridge_clk = {
+ .halt_reg = 0x2b048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_axi_s_bridge_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_axi_s_clk = {
+ .halt_reg = 0x2b040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_axi_s_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie3_axi_s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie3_pipe_clk_src = {
+ .reg = 0x2b064,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "pcie3_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE30_PHY3_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_pipe_clk = {
+ .halt_reg = 0x2b068,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2b068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_pipe_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie3_pipe_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x13024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb004,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_prng_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_ahb_mst_clk = {
+ .halt_reg = 0x1014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb004,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_ahb_mst_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_ahb_slv_clk = {
+ .halt_reg = 0x102c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb004,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_ahb_slv_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c0_clk = {
+ .halt_reg = 0x2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c1_clk = {
+ .halt_reg = 0x3024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_spi0_clk = {
+ .halt_reg = 0x4020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_spi0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_spi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_spi1_clk = {
+ .halt_reg = 0x5020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_spi1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_spi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_uart0_clk = {
+ .halt_reg = 0x2040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_uart0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_uart0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_uart1_clk = {
+ .halt_reg = 0x3040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_uart1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_uart1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x3303c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3303c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x3302c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x33034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_ahb_clk = {
+ .halt_reg = 0x1704c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1704c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy0_sys_clk = {
+ .halt_reg = 0x17048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x17048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy0_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_uniphy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy1_ahb_clk = {
+ .halt_reg = 0x1705c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1705c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy1_sys_clk = {
+ .halt_reg = 0x17058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x17058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy1_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_uniphy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy2_ahb_clk = {
+ .halt_reg = 0x1706c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1706c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy2_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_uniphy2_sys_clk = {
+ .halt_reg = 0x17068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x17068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_uniphy2_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_uniphy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_aux_clk = {
+ .halt_reg = 0x2c04c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2c04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_master_clk = {
+ .halt_reg = 0x2c044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2c044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb0_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_mock_utmi_clk = {
+ .halt_reg = 0x2c050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2c050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb0_mock_utmi_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb1_mock_utmi_clk = {
+ .halt_reg = 0x3c024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb1_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb1_mock_utmi_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_phy_cfg_ahb_clk = {
+ .halt_reg = 0x2c05c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2c05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_phy_cfg_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb1_phy_cfg_ahb_clk = {
+ .halt_reg = 0x3c01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3c01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb1_phy_cfg_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb1_master_clk = {
+ .halt_reg = 0x3c028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3c028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb1_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb0_pipe_clk_src = {
+ .reg = 0x2c074,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_USB_PCIE_WRAPPER_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_pipe_clk = {
+ .halt_reg = 0x2c054,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2c054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_pipe_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_usb0_pipe_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_sleep_clk = {
+ .halt_reg = 0x2c058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2c058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb1_sleep_clk = {
+ .halt_reg = 0x3c020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3c020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb1_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cmn_12gpll_ahb_clk = {
+ .halt_reg = 0x3a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cmn_12gpll_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cmn_12gpll_sys_clk = {
+ .halt_reg = 0x3a008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cmn_12gpll_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_uniphy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_lpass_sway_clk = {
+ .halt_reg = 0x27014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x27014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_lpass_sway_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_lpass_sway_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_lpass_cfg_clk = {
+ .halt_reg = 0x2e028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_lpass_cfg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_lpass_sway_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_lpass_core_axim_clk = {
+ .halt_reg = 0x27018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x27018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_lpass_core_axim_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_lpass_axim_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_lpass_clk = {
+ .halt_reg = 0x31020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x31020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_snoc_lpass_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_lpass_axim_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb0_eud_at_clk = {
+ .halt_reg = 0x30004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x30004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb0_eud_at_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_eud_at_div_clk_src.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_ahb_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qpic_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_clk = {
+ .halt_reg = 0x32028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x32028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qpic_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qpic_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_io_macro_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qpic_io_macro_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qpic_io_macro_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_dap_clk = {
+ .halt_reg = 0x2d058,
+ .clkr = {
+ .enable_reg = 0x2d058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_dap_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_qdss_dap_sync_clk_src.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_at_clk = {
+ .halt_reg = 0x2d034,
+ .clkr = {
+ .enable_reg = 0x2d034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qdss_at_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_qdss_at_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_rchng_clk = {
+ .halt_reg = 0x28028,
+ .clkr = {
+ .enable_reg = 0x28028,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_rchng_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie0_rchng_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie1_rchng_clk = {
+ .halt_reg = 0x29028,
+ .clkr = {
+ .enable_reg = 0x29028,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie1_rchng_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie1_rchng_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie2_rchng_clk = {
+ .halt_reg = 0x2a028,
+ .clkr = {
+ .enable_reg = 0x2a028,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie2_rchng_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie2_rchng_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie3_rchng_clk = {
+ .halt_reg = 0x2b028,
+ .clkr = {
+ .enable_reg = 0x2b028,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie3_rchng_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gcc_pcie3_rchng_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_ipq5424_clocks[] = {
+ [GCC_ADSS_PWM_CLK] = &gcc_adss_pwm_clk.clkr,
+ [GCC_ADSS_PWM_CLK_SRC] = &gcc_adss_pwm_clk_src.clkr,
+ [GCC_CNOC_PCIE0_1LANE_S_CLK] = &gcc_cnoc_pcie0_1lane_s_clk.clkr,
+ [GCC_CNOC_PCIE1_1LANE_S_CLK] = &gcc_cnoc_pcie1_1lane_s_clk.clkr,
+ [GCC_CNOC_PCIE2_2LANE_S_CLK] = &gcc_cnoc_pcie2_2lane_s_clk.clkr,
+ [GCC_CNOC_PCIE3_2LANE_S_CLK] = &gcc_cnoc_pcie3_2lane_s_clk.clkr,
+ [GCC_CNOC_USB_CLK] = &gcc_cnoc_usb_clk.clkr,
+ [GCC_MDIO_AHB_CLK] = &gcc_mdio_ahb_clk.clkr,
+ [GCC_NSS_TS_CLK] = &gcc_nss_ts_clk.clkr,
+ [GCC_NSS_TS_CLK_SRC] = &gcc_nss_ts_clk_src.clkr,
+ [GCC_NSSCC_CLK] = &gcc_nsscc_clk.clkr,
+ [GCC_NSSCFG_CLK] = &gcc_nsscfg_clk.clkr,
+ [GCC_NSSNOC_ATB_CLK] = &gcc_nssnoc_atb_clk.clkr,
+ [GCC_NSSNOC_NSSCC_CLK] = &gcc_nssnoc_nsscc_clk.clkr,
+ [GCC_NSSNOC_PCNOC_1_CLK] = &gcc_nssnoc_pcnoc_1_clk.clkr,
+ [GCC_NSSNOC_QOSGEN_REF_CLK] = &gcc_nssnoc_qosgen_ref_clk.clkr,
+ [GCC_NSSNOC_SNOC_1_CLK] = &gcc_nssnoc_snoc_1_clk.clkr,
+ [GCC_NSSNOC_SNOC_CLK] = &gcc_nssnoc_snoc_clk.clkr,
+ [GCC_NSSNOC_TIMEOUT_REF_CLK] = &gcc_nssnoc_timeout_ref_clk.clkr,
+ [GCC_NSSNOC_XO_DCD_CLK] = &gcc_nssnoc_xo_dcd_clk.clkr,
+ [GCC_PCIE0_AHB_CLK] = &gcc_pcie0_ahb_clk.clkr,
+ [GCC_PCIE0_AUX_CLK] = &gcc_pcie0_aux_clk.clkr,
+ [GCC_PCIE0_AXI_M_CLK] = &gcc_pcie0_axi_m_clk.clkr,
+ [GCC_PCIE0_AXI_M_CLK_SRC] = &gcc_pcie0_axi_m_clk_src.clkr,
+ [GCC_PCIE0_AXI_S_BRIDGE_CLK] = &gcc_pcie0_axi_s_bridge_clk.clkr,
+ [GCC_PCIE0_AXI_S_CLK] = &gcc_pcie0_axi_s_clk.clkr,
+ [GCC_PCIE0_AXI_S_CLK_SRC] = &gcc_pcie0_axi_s_clk_src.clkr,
+ [GCC_PCIE0_PIPE_CLK] = &gcc_pcie0_pipe_clk.clkr,
+ [GCC_ANOC_PCIE0_1LANE_M_CLK] = &gcc_anoc_pcie0_1lane_m_clk.clkr,
+ [GCC_PCIE0_PIPE_CLK_SRC] = &gcc_pcie0_pipe_clk_src.clkr,
+ [GCC_PCIE0_RCHNG_CLK_SRC] = &gcc_pcie0_rchng_clk_src.clkr,
+ [GCC_PCIE0_RCHNG_CLK] = &gcc_pcie0_rchng_clk.clkr,
+ [GCC_PCIE1_AHB_CLK] = &gcc_pcie1_ahb_clk.clkr,
+ [GCC_PCIE1_AUX_CLK] = &gcc_pcie1_aux_clk.clkr,
+ [GCC_PCIE1_AXI_M_CLK] = &gcc_pcie1_axi_m_clk.clkr,
+ [GCC_PCIE1_AXI_M_CLK_SRC] = &gcc_pcie1_axi_m_clk_src.clkr,
+ [GCC_PCIE1_AXI_S_BRIDGE_CLK] = &gcc_pcie1_axi_s_bridge_clk.clkr,
+ [GCC_PCIE1_AXI_S_CLK] = &gcc_pcie1_axi_s_clk.clkr,
+ [GCC_PCIE1_AXI_S_CLK_SRC] = &gcc_pcie1_axi_s_clk_src.clkr,
+ [GCC_PCIE1_PIPE_CLK] = &gcc_pcie1_pipe_clk.clkr,
+ [GCC_ANOC_PCIE1_1LANE_M_CLK] = &gcc_anoc_pcie1_1lane_m_clk.clkr,
+ [GCC_PCIE1_PIPE_CLK_SRC] = &gcc_pcie1_pipe_clk_src.clkr,
+ [GCC_PCIE1_RCHNG_CLK_SRC] = &gcc_pcie1_rchng_clk_src.clkr,
+ [GCC_PCIE1_RCHNG_CLK] = &gcc_pcie1_rchng_clk.clkr,
+ [GCC_PCIE2_AHB_CLK] = &gcc_pcie2_ahb_clk.clkr,
+ [GCC_PCIE2_AUX_CLK] = &gcc_pcie2_aux_clk.clkr,
+ [GCC_PCIE2_AXI_M_CLK] = &gcc_pcie2_axi_m_clk.clkr,
+ [GCC_PCIE2_AXI_M_CLK_SRC] = &gcc_pcie2_axi_m_clk_src.clkr,
+ [GCC_PCIE2_AXI_S_BRIDGE_CLK] = &gcc_pcie2_axi_s_bridge_clk.clkr,
+ [GCC_PCIE2_AXI_S_CLK] = &gcc_pcie2_axi_s_clk.clkr,
+ [GCC_PCIE2_AXI_S_CLK_SRC] = &gcc_pcie2_axi_s_clk_src.clkr,
+ [GCC_PCIE2_PIPE_CLK] = &gcc_pcie2_pipe_clk.clkr,
+ [GCC_ANOC_PCIE2_2LANE_M_CLK] = &gcc_anoc_pcie2_2lane_m_clk.clkr,
+ [GCC_PCIE2_PIPE_CLK_SRC] = &gcc_pcie2_pipe_clk_src.clkr,
+ [GCC_PCIE2_RCHNG_CLK_SRC] = &gcc_pcie2_rchng_clk_src.clkr,
+ [GCC_PCIE2_RCHNG_CLK] = &gcc_pcie2_rchng_clk.clkr,
+ [GCC_PCIE3_AHB_CLK] = &gcc_pcie3_ahb_clk.clkr,
+ [GCC_PCIE3_AUX_CLK] = &gcc_pcie3_aux_clk.clkr,
+ [GCC_PCIE3_AXI_M_CLK] = &gcc_pcie3_axi_m_clk.clkr,
+ [GCC_PCIE3_AXI_M_CLK_SRC] = &gcc_pcie3_axi_m_clk_src.clkr,
+ [GCC_PCIE3_AXI_S_BRIDGE_CLK] = &gcc_pcie3_axi_s_bridge_clk.clkr,
+ [GCC_PCIE3_AXI_S_CLK] = &gcc_pcie3_axi_s_clk.clkr,
+ [GCC_PCIE3_AXI_S_CLK_SRC] = &gcc_pcie3_axi_s_clk_src.clkr,
+ [GCC_PCIE3_PIPE_CLK] = &gcc_pcie3_pipe_clk.clkr,
+ [GCC_ANOC_PCIE3_2LANE_M_CLK] = &gcc_anoc_pcie3_2lane_m_clk.clkr,
+ [GCC_PCIE3_PIPE_CLK_SRC] = &gcc_pcie3_pipe_clk_src.clkr,
+ [GCC_PCIE3_RCHNG_CLK_SRC] = &gcc_pcie3_rchng_clk_src.clkr,
+ [GCC_PCIE3_RCHNG_CLK] = &gcc_pcie3_rchng_clk.clkr,
+ [GCC_PCIE_AUX_CLK_SRC] = &gcc_pcie_aux_clk_src.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QUPV3_AHB_MST_CLK] = &gcc_qupv3_ahb_mst_clk.clkr,
+ [GCC_QUPV3_AHB_SLV_CLK] = &gcc_qupv3_ahb_slv_clk.clkr,
+ [GCC_QUPV3_I2C0_CLK] = &gcc_qupv3_i2c0_clk.clkr,
+ [GCC_QUPV3_I2C0_CLK_SRC] = &gcc_qupv3_i2c0_clk_src.clkr,
+ [GCC_QUPV3_I2C0_DIV_CLK_SRC] = &gcc_qupv3_i2c0_div_clk_src.clkr,
+ [GCC_QUPV3_I2C1_CLK] = &gcc_qupv3_i2c1_clk.clkr,
+ [GCC_QUPV3_I2C1_CLK_SRC] = &gcc_qupv3_i2c1_clk_src.clkr,
+ [GCC_QUPV3_I2C1_DIV_CLK_SRC] = &gcc_qupv3_i2c1_div_clk_src.clkr,
+ [GCC_QUPV3_SPI0_CLK] = &gcc_qupv3_spi0_clk.clkr,
+ [GCC_QUPV3_SPI0_CLK_SRC] = &gcc_qupv3_spi0_clk_src.clkr,
+ [GCC_QUPV3_SPI1_CLK] = &gcc_qupv3_spi1_clk.clkr,
+ [GCC_QUPV3_SPI1_CLK_SRC] = &gcc_qupv3_spi1_clk_src.clkr,
+ [GCC_QUPV3_UART0_CLK] = &gcc_qupv3_uart0_clk.clkr,
+ [GCC_QUPV3_UART0_CLK_SRC] = &gcc_qupv3_uart0_clk_src.clkr,
+ [GCC_QUPV3_UART1_CLK] = &gcc_qupv3_uart1_clk.clkr,
+ [GCC_QUPV3_UART1_CLK_SRC] = &gcc_qupv3_uart1_clk_src.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_UNIPHY0_AHB_CLK] = &gcc_uniphy0_ahb_clk.clkr,
+ [GCC_UNIPHY0_SYS_CLK] = &gcc_uniphy0_sys_clk.clkr,
+ [GCC_UNIPHY1_AHB_CLK] = &gcc_uniphy1_ahb_clk.clkr,
+ [GCC_UNIPHY1_SYS_CLK] = &gcc_uniphy1_sys_clk.clkr,
+ [GCC_UNIPHY2_AHB_CLK] = &gcc_uniphy2_ahb_clk.clkr,
+ [GCC_UNIPHY2_SYS_CLK] = &gcc_uniphy2_sys_clk.clkr,
+ [GCC_UNIPHY_SYS_CLK_SRC] = &gcc_uniphy_sys_clk_src.clkr,
+ [GCC_USB0_AUX_CLK] = &gcc_usb0_aux_clk.clkr,
+ [GCC_USB0_AUX_CLK_SRC] = &gcc_usb0_aux_clk_src.clkr,
+ [GCC_USB0_MASTER_CLK] = &gcc_usb0_master_clk.clkr,
+ [GCC_USB0_MASTER_CLK_SRC] = &gcc_usb0_master_clk_src.clkr,
+ [GCC_USB0_MOCK_UTMI_CLK] = &gcc_usb0_mock_utmi_clk.clkr,
+ [GCC_USB0_MOCK_UTMI_CLK_SRC] = &gcc_usb0_mock_utmi_clk_src.clkr,
+ [GCC_USB0_MOCK_UTMI_DIV_CLK_SRC] = &gcc_usb0_mock_utmi_div_clk_src.clkr,
+ [GCC_USB0_EUD_AT_CLK] = &gcc_usb0_eud_at_clk.clkr,
+ [GCC_USB0_PIPE_CLK_SRC] = &gcc_usb0_pipe_clk_src.clkr,
+ [GCC_USB1_MOCK_UTMI_CLK] = &gcc_usb1_mock_utmi_clk.clkr,
+ [GCC_USB1_MOCK_UTMI_CLK_SRC] = &gcc_usb1_mock_utmi_clk_src.clkr,
+ [GCC_USB1_MOCK_UTMI_DIV_CLK_SRC] = &gcc_usb1_mock_utmi_div_clk_src.clkr,
+ [GCC_USB0_PHY_CFG_AHB_CLK] = &gcc_usb0_phy_cfg_ahb_clk.clkr,
+ [GCC_USB1_PHY_CFG_AHB_CLK] = &gcc_usb1_phy_cfg_ahb_clk.clkr,
+ [GCC_USB0_PIPE_CLK] = &gcc_usb0_pipe_clk.clkr,
+ [GCC_USB0_SLEEP_CLK] = &gcc_usb0_sleep_clk.clkr,
+ [GCC_USB1_SLEEP_CLK] = &gcc_usb1_sleep_clk.clkr,
+ [GCC_USB1_MASTER_CLK] = &gcc_usb1_master_clk.clkr,
+ [GCC_WCSS_AHB_CLK_SRC] = &gcc_wcss_ahb_clk_src.clkr,
+ [GCC_CMN_12GPLL_AHB_CLK] = &gcc_cmn_12gpll_ahb_clk.clkr,
+ [GCC_CMN_12GPLL_SYS_CLK] = &gcc_cmn_12gpll_sys_clk.clkr,
+ [GCC_LPASS_SWAY_CLK] = &gcc_lpass_sway_clk.clkr,
+ [GCC_CNOC_LPASS_CFG_CLK] = &gcc_cnoc_lpass_cfg_clk.clkr,
+ [GCC_LPASS_CORE_AXIM_CLK] = &gcc_lpass_core_axim_clk.clkr,
+ [GCC_SNOC_LPASS_CLK] = &gcc_snoc_lpass_clk.clkr,
+ [GCC_QDSS_AT_CLK_SRC] = &gcc_qdss_at_clk_src.clkr,
+ [GCC_QDSS_TSCTR_CLK_SRC] = &gcc_qdss_tsctr_clk_src.clkr,
+ [GCC_SYSTEM_NOC_BFDCD_CLK_SRC] = &gcc_system_noc_bfdcd_clk_src.clkr,
+ [GCC_PCNOC_BFDCD_CLK_SRC] = &gcc_pcnoc_bfdcd_clk_src.clkr,
+ [GCC_LPASS_SWAY_CLK_SRC] = &gcc_lpass_sway_clk_src.clkr,
+ [GCC_LPASS_AXIM_CLK_SRC] = &gcc_lpass_axim_clk_src.clkr,
+ [GCC_SLEEP_CLK_SRC] = &gcc_sleep_clk_src.clkr,
+ [GCC_QPIC_IO_MACRO_CLK] = &gcc_qpic_io_macro_clk.clkr,
+ [GCC_QPIC_IO_MACRO_CLK_SRC] = &gcc_qpic_io_macro_clk_src.clkr,
+ [GCC_QPIC_CLK] = &gcc_qpic_clk.clkr,
+ [GCC_QPIC_CLK_SRC] = &gcc_qpic_clk_src.clkr,
+ [GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr,
+ [GCC_XO_CLK_SRC] = &gcc_xo_clk_src.clkr,
+ [GCC_XO_CLK] = &gcc_xo_clk.clkr,
+ [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
+ [GCC_QDSS_AT_CLK] = &gcc_qdss_at_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL2] = &gpll2.clkr,
+ [GPLL2_OUT_MAIN] = &gpll2_out_main.clkr,
+ [GPLL4] = &gpll4.clkr,
+};
+
+static const struct qcom_reset_map gcc_ipq5424_resets[] = {
+ [GCC_QUPV3_BCR] = { 0x01000, 0 },
+ [GCC_QUPV3_I2C0_BCR] = { 0x02000, 0 },
+ [GCC_QUPV3_UART0_BCR] = { 0x02020, 0 },
+ [GCC_QUPV3_I2C1_BCR] = { 0x03000, 0 },
+ [GCC_QUPV3_UART1_BCR] = { 0x03028, 0 },
+ [GCC_QUPV3_SPI0_BCR] = { 0x04000, 0 },
+ [GCC_QUPV3_SPI1_BCR] = { 0x05000, 0 },
+ [GCC_IMEM_BCR] = { 0x0e000, 0 },
+ [GCC_TME_BCR] = { 0x100000, 0 },
+ [GCC_DDRSS_BCR] = { 0x11000, 0 },
+ [GCC_PRNG_BCR] = { 0x13020, 0 },
+ [GCC_BOOT_ROM_BCR] = { 0x13028, 0 },
+ [GCC_NSS_BCR] = { 0x17000, 0 },
+ [GCC_MDIO_BCR] = { 0x1703c, 0 },
+ [GCC_UNIPHY0_BCR] = { 0x17044, 0 },
+ [GCC_UNIPHY1_BCR] = { 0x17054, 0 },
+ [GCC_UNIPHY2_BCR] = { 0x17064, 0 },
+ [GCC_WCSS_BCR] = { 0x18004, 0 },
+ [GCC_SEC_CTRL_BCR] = { 0x1a000, 0 },
+ [GCC_TME_SEC_BUS_BCR] = { 0xa1030, 0 },
+ [GCC_ADSS_BCR] = { 0x1c000, 0 },
+ [GCC_LPASS_BCR] = { 0x27000, 0 },
+ [GCC_PCIE0_BCR] = { 0x28000, 0 },
+ [GCC_PCIE0_LINK_DOWN_BCR] = { 0x28054, 0 },
+ [GCC_PCIE0PHY_PHY_BCR] = { 0x2805c, 0 },
+ [GCC_PCIE0_PHY_BCR] = { 0x28060, 0 },
+ [GCC_PCIE1_BCR] = { 0x29000, 0 },
+ [GCC_PCIE1_LINK_DOWN_BCR] = { 0x29054, 0 },
+ [GCC_PCIE1PHY_PHY_BCR] = { 0x2905c, 0 },
+ [GCC_PCIE1_PHY_BCR] = { 0x29060, 0 },
+ [GCC_PCIE2_BCR] = { 0x2a000, 0 },
+ [GCC_PCIE2_LINK_DOWN_BCR] = { 0x2a054, 0 },
+ [GCC_PCIE2PHY_PHY_BCR] = { 0x2a05c, 0 },
+ [GCC_PCIE2_PHY_BCR] = { 0x2a060, 0 },
+ [GCC_PCIE3_BCR] = { 0x2b000, 0 },
+ [GCC_PCIE3_LINK_DOWN_BCR] = { 0x2b054, 0 },
+ [GCC_PCIE3PHY_PHY_BCR] = { 0x2b05c, 0 },
+ [GCC_PCIE3_PHY_BCR] = { 0x2b060, 0 },
+ [GCC_USB_BCR] = { 0x2c000, 0 },
+ [GCC_QUSB2_0_PHY_BCR] = { 0x2c068, 0 },
+ [GCC_USB0_PHY_BCR] = { 0x2c06c, 0 },
+ [GCC_USB3PHY_0_PHY_BCR] = { 0x2c070, 0 },
+ [GCC_QDSS_BCR] = { 0x2d000, 0 },
+ [GCC_SNOC_BCR] = { 0x2e000, 0 },
+ [GCC_ANOC_BCR] = { 0x2e074, 0 },
+ [GCC_PCNOC_BCR] = { 0x31000, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT0_BCR] = { 0x31030, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT1_BCR] = { 0x31038, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT2_BCR] = { 0x31040, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT3_BCR] = { 0x31048, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT4_BCR] = { 0x31050, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT5_BCR] = { 0x31058, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT6_BCR] = { 0x31060, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT7_BCR] = { 0x31068, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT8_BCR] = { 0x31070, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT9_BCR] = { 0x31078, 0 },
+ [GCC_QPIC_BCR] = { 0x32000, 0 },
+ [GCC_SDCC_BCR] = { 0x33000, 0 },
+ [GCC_DCC_BCR] = { 0x35000, 0 },
+ [GCC_SPDM_BCR] = { 0x36000, 0 },
+ [GCC_MPM_BCR] = { 0x37000, 0 },
+ [GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR] = { 0x38000, 0 },
+ [GCC_RBCPR_BCR] = { 0x39000, 0 },
+ [GCC_CMN_BLK_BCR] = { 0x3a000, 0 },
+ [GCC_TCSR_BCR] = { 0x3d000, 0 },
+ [GCC_TLMM_BCR] = { 0x3e000, 0 },
+ [GCC_QUPV3_AHB_MST_ARES] = { 0x01014, 2 },
+ [GCC_QUPV3_CORE_ARES] = { 0x01018, 2 },
+ [GCC_QUPV3_2X_CORE_ARES] = { 0x01020, 2 },
+ [GCC_QUPV3_SLEEP_ARES] = { 0x01028, 2 },
+ [GCC_QUPV3_AHB_SLV_ARES] = { 0x0102c, 2 },
+ [GCC_QUPV3_I2C0_ARES] = { 0x02024, 2 },
+ [GCC_QUPV3_UART0_ARES] = { 0x02040, 2 },
+ [GCC_QUPV3_I2C1_ARES] = { 0x03024, 2 },
+ [GCC_QUPV3_UART1_ARES] = { 0x03040, 2 },
+ [GCC_QUPV3_SPI0_ARES] = { 0x04020, 2 },
+ [GCC_QUPV3_SPI1_ARES] = { 0x05020, 2 },
+ [GCC_DEBUG_ARES] = { 0x06068, 2 },
+ [GCC_GP1_ARES] = { 0x08018, 2 },
+ [GCC_GP2_ARES] = { 0x09018, 2 },
+ [GCC_GP3_ARES] = { 0x0a018, 2 },
+ [GCC_IMEM_AXI_ARES] = { 0x0e004, 2 },
+ [GCC_IMEM_CFG_AHB_ARES] = { 0x0e00c, 2 },
+ [GCC_TME_ARES] = { 0x100b4, 2 },
+ [GCC_TME_TS_ARES] = { 0x100c0, 2 },
+ [GCC_TME_SLOW_ARES] = { 0x100d0, 2 },
+ [GCC_TME_RTC_TOGGLE_ARES] = { 0x100d8, 2 },
+ [GCC_TIC_ARES] = { 0x12004, 2 },
+ [GCC_PRNG_AHB_ARES] = { 0x13024, 2 },
+ [GCC_BOOT_ROM_AHB_ARES] = { 0x1302c, 2 },
+ [GCC_NSSNOC_ATB_ARES] = { 0x17014, 2 },
+ [GCC_NSS_TS_ARES] = { 0x17018, 2 },
+ [GCC_NSSNOC_QOSGEN_REF_ARES] = { 0x1701c, 2 },
+ [GCC_NSSNOC_TIMEOUT_REF_ARES] = { 0x17020, 2 },
+ [GCC_NSSNOC_MEMNOC_ARES] = { 0x17024, 2 },
+ [GCC_NSSNOC_SNOC_ARES] = { 0x17028, 2 },
+ [GCC_NSSCFG_ARES] = { 0x1702c, 2 },
+ [GCC_NSSNOC_NSSCC_ARES] = { 0x17030, 2 },
+ [GCC_NSSCC_ARES] = { 0x17034, 2 },
+ [GCC_MDIO_AHB_ARES] = { 0x17040, 2 },
+ [GCC_UNIPHY0_SYS_ARES] = { 0x17048, 2 },
+ [GCC_UNIPHY0_AHB_ARES] = { 0x1704c, 2 },
+ [GCC_UNIPHY1_SYS_ARES] = { 0x17058, 2 },
+ [GCC_UNIPHY1_AHB_ARES] = { 0x1705c, 2 },
+ [GCC_UNIPHY2_SYS_ARES] = { 0x17068, 2 },
+ [GCC_UNIPHY2_AHB_ARES] = { 0x1706c, 2 },
+ [GCC_NSSNOC_XO_DCD_ARES] = { 0x17074, 2 },
+ [GCC_NSSNOC_SNOC_1_ARES] = { 0x1707c, 2 },
+ [GCC_NSSNOC_PCNOC_1_ARES] = { 0x17080, 2 },
+ [GCC_NSSNOC_MEMNOC_1_ARES] = { 0x17084, 2 },
+ [GCC_DDRSS_ATB_ARES] = { 0x19004, 2 },
+ [GCC_DDRSS_AHB_ARES] = { 0x19008, 2 },
+ [GCC_GEMNOC_AHB_ARES] = { 0x1900c, 2 },
+ [GCC_GEMNOC_Q6_AXI_ARES] = { 0x19010, 2 },
+ [GCC_GEMNOC_NSSNOC_ARES] = { 0x19014, 2 },
+ [GCC_GEMNOC_SNOC_ARES] = { 0x19018, 2 },
+ [GCC_GEMNOC_APSS_ARES] = { 0x1901c, 2 },
+ [GCC_GEMNOC_QOSGEN_EXTREF_ARES] = { 0x19024, 2 },
+ [GCC_GEMNOC_TS_ARES] = { 0x19028, 2 },
+ [GCC_DDRSS_SMS_SLOW_ARES] = { 0x1902c, 2 },
+ [GCC_GEMNOC_CNOC_ARES] = { 0x19038, 2 },
+ [GCC_GEMNOC_XO_DBG_ARES] = { 0x19040, 2 },
+ [GCC_GEMNOC_ANOC_ARES] = { 0x19048, 2 },
+ [GCC_DDRSS_LLCC_ATB_ARES] = { 0x1904c, 2 },
+ [GCC_LLCC_TPDM_CFG_ARES] = { 0x19050, 2 },
+ [GCC_TME_BUS_ARES] = { 0x1a014, 2 },
+ [GCC_SEC_CTRL_ACC_ARES] = { 0x1a018, 2 },
+ [GCC_SEC_CTRL_ARES] = { 0x1a020, 2 },
+ [GCC_SEC_CTRL_SENSE_ARES] = { 0x1a028, 2 },
+ [GCC_SEC_CTRL_AHB_ARES] = { 0x1a038, 2 },
+ [GCC_SEC_CTRL_BOOT_ROM_PATCH_ARES] = { 0x1a03c, 2 },
+ [GCC_ADSS_PWM_ARES] = { 0x1c00c, 2 },
+ [GCC_TME_ATB_ARES] = { 0x1e030, 2 },
+ [GCC_TME_DBGAPB_ARES] = { 0x1e034, 2 },
+ [GCC_TME_DEBUG_ARES] = { 0x1e038, 2 },
+ [GCC_TME_AT_ARES] = { 0x1e03C, 2 },
+ [GCC_TME_APB_ARES] = { 0x1e040, 2 },
+ [GCC_TME_DMI_DBG_HS_ARES] = { 0x1e044, 2 },
+ [GCC_APSS_AHB_ARES] = { 0x24014, 2 },
+ [GCC_APSS_AXI_ARES] = { 0x24018, 2 },
+ [GCC_CPUSS_TRIG_ARES] = { 0x2401c, 2 },
+ [GCC_APSS_DBG_ARES] = { 0x2402c, 2 },
+ [GCC_APSS_TS_ARES] = { 0x24030, 2 },
+ [GCC_APSS_ATB_ARES] = { 0x24034, 2 },
+ [GCC_Q6_AXIM_ARES] = { 0x2500c, 2 },
+ [GCC_Q6_AXIS_ARES] = { 0x25010, 2 },
+ [GCC_Q6_AHB_ARES] = { 0x25014, 2 },
+ [GCC_Q6_AHB_S_ARES] = { 0x25018, 2 },
+ [GCC_Q6SS_ATBM_ARES] = { 0x2501c, 2 },
+ [GCC_Q6_TSCTR_1TO2_ARES] = { 0x25020, 2 },
+ [GCC_Q6SS_PCLKDBG_ARES] = { 0x25024, 2 },
+ [GCC_Q6SS_TRIG_ARES] = { 0x25028, 2 },
+ [GCC_Q6SS_BOOT_CBCR_ARES] = { 0x2502c, 2 },
+ [GCC_WCSS_DBG_IFC_APB_ARES] = { 0x25038, 2 },
+ [GCC_WCSS_DBG_IFC_ATB_ARES] = { 0x2503c, 2 },
+ [GCC_WCSS_DBG_IFC_NTS_ARES] = { 0x25040, 2 },
+ [GCC_WCSS_DBG_IFC_DAPBUS_ARES] = { 0x25044, 2 },
+ [GCC_WCSS_DBG_IFC_APB_BDG_ARES] = { 0x25048, 2 },
+ [GCC_WCSS_DBG_IFC_NTS_BDG_ARES] = { 0x25050, 2 },
+ [GCC_WCSS_DBG_IFC_DAPBUS_BDG_ARES] = { 0x25054, 2 },
+ [GCC_WCSS_ECAHB_ARES] = { 0x25058, 2 },
+ [GCC_WCSS_ACMT_ARES] = { 0x2505c, 2 },
+ [GCC_WCSS_AHB_S_ARES] = { 0x25060, 2 },
+ [GCC_WCSS_AXI_M_ARES] = { 0x25064, 2 },
+ [GCC_PCNOC_WAPSS_ARES] = { 0x25080, 2 },
+ [GCC_SNOC_WAPSS_ARES] = { 0x25090, 2 },
+ [GCC_LPASS_SWAY_ARES] = { 0x27014, 2 },
+ [GCC_LPASS_CORE_AXIM_ARES] = { 0x27018, 2 },
+ [GCC_PCIE0_AHB_ARES] = { 0x28030, 2 },
+ [GCC_PCIE0_AXI_M_ARES] = { 0x28038, 2 },
+ [GCC_PCIE0_AXI_S_ARES] = { 0x28040, 2 },
+ [GCC_PCIE0_AXI_S_BRIDGE_ARES] = { 0x28048, 2},
+ [GCC_PCIE0_PIPE_ARES] = { 0x28068, 2},
+ [GCC_PCIE0_AUX_ARES] = { 0x28070, 2 },
+ [GCC_PCIE1_AHB_ARES] = { 0x29030, 2 },
+ [GCC_PCIE1_AXI_M_ARES] = { 0x29038, 2 },
+ [GCC_PCIE1_AXI_S_ARES] = { 0x29040, 2 },
+ [GCC_PCIE1_AXI_S_BRIDGE_ARES] = { 0x29048, 2 },
+ [GCC_PCIE1_PIPE_ARES] = { 0x29068, 2 },
+ [GCC_PCIE1_AUX_ARES] = { 0x29074, 2 },
+ [GCC_PCIE2_AHB_ARES] = { 0x2a030, 2 },
+ [GCC_PCIE2_AXI_M_ARES] = { 0x2a038, 2 },
+ [GCC_PCIE2_AXI_S_ARES] = { 0x2a040, 2 },
+ [GCC_PCIE2_AXI_S_BRIDGE_ARES] = { 0x2a048, 2 },
+ [GCC_PCIE2_PIPE_ARES] = { 0x2a068, 2 },
+ [GCC_PCIE2_AUX_ARES] = { 0x2a078, 2 },
+ [GCC_PCIE3_AHB_ARES] = { 0x2b030, 2 },
+ [GCC_PCIE3_AXI_M_ARES] = { 0x2b038, 2 },
+ [GCC_PCIE3_AXI_S_ARES] = { 0x2b040, 2 },
+ [GCC_PCIE3_AXI_S_BRIDGE_ARES] = { 0x2b048, 2 },
+ [GCC_PCIE3_PIPE_ARES] = { 0x2b068, 2 },
+ [GCC_PCIE3_AUX_ARES] = { 0x2b07C, 2 },
+ [GCC_USB0_MASTER_ARES] = { 0x2c044, 2 },
+ [GCC_USB0_AUX_ARES] = { 0x2c04c, 2 },
+ [GCC_USB0_MOCK_UTMI_ARES] = { 0x2c050, 2 },
+ [GCC_USB0_PIPE_ARES] = { 0x2c054, 2 },
+ [GCC_USB0_SLEEP_ARES] = { 0x2c058, 2 },
+ [GCC_USB0_PHY_CFG_AHB_ARES] = { 0x2c05c, 2 },
+ [GCC_QDSS_AT_ARES] = { 0x2d034, 2 },
+ [GCC_QDSS_STM_ARES] = { 0x2d03C, 2 },
+ [GCC_QDSS_TRACECLKIN_ARES] = { 0x2d040, 2 },
+ [GCC_QDSS_TSCTR_DIV2_ARES] = { 0x2d044, 2 },
+ [GCC_QDSS_TSCTR_DIV3_ARES] = { 0x2d048, 2 },
+ [GCC_QDSS_TSCTR_DIV4_ARES] = { 0x2d04c, 2 },
+ [GCC_QDSS_TSCTR_DIV8_ARES] = { 0x2d050, 2 },
+ [GCC_QDSS_TSCTR_DIV16_ARES] = { 0x2d054, 2 },
+ [GCC_QDSS_DAP_ARES] = { 0x2d058, 2 },
+ [GCC_QDSS_APB2JTAG_ARES] = { 0x2d05c, 2 },
+ [GCC_QDSS_ETR_USB_ARES] = { 0x2d060, 2 },
+ [GCC_QDSS_DAP_AHB_ARES] = { 0x2d064, 2 },
+ [GCC_QDSS_CFG_AHB_ARES] = { 0x2d068, 2 },
+ [GCC_QDSS_EUD_AT_ARES] = { 0x2d06c, 2 },
+ [GCC_QDSS_TS_ARES] = { 0x2d078, 2 },
+ [GCC_QDSS_USB_ARES] = { 0x2d07c, 2 },
+ [GCC_SYS_NOC_AXI_ARES] = { 0x2e01c, 2 },
+ [GCC_SNOC_QOSGEN_EXTREF_ARES] = { 0x2e020, 2 },
+ [GCC_CNOC_LPASS_CFG_ARES] = { 0x2e028, 2 },
+ [GCC_SYS_NOC_AT_ARES] = { 0x2e038, 2 },
+ [GCC_SNOC_PCNOC_AHB_ARES] = { 0x2e03c, 2 },
+ [GCC_SNOC_TME_ARES] = { 0x2e05c, 2 },
+ [GCC_SNOC_XO_DCD_ARES] = { 0x2e060, 2 },
+ [GCC_SNOC_TS_ARES] = { 0x2e068, 2 },
+ [GCC_ANOC0_AXI_ARES] = { 0x2e078, 2 },
+ [GCC_ANOC_PCIE0_1LANE_M_ARES] = { 0x2e07c, 2 },
+ [GCC_ANOC_PCIE2_2LANE_M_ARES] = { 0x2e080, 2 },
+ [GCC_ANOC_PCIE1_1LANE_M_ARES] = { 0x2e084, 2 },
+ [GCC_ANOC_PCIE3_2LANE_M_ARES] = { 0x2e090, 2 },
+ [GCC_ANOC_PCNOC_AHB_ARES] = { 0x2e094, 2 },
+ [GCC_ANOC_QOSGEN_EXTREF_ARES] = { 0x2e098, 2 },
+ [GCC_ANOC_XO_DCD_ARES] = { 0x2e09C, 2 },
+ [GCC_SNOC_XO_DBG_ARES] = { 0x2e0a0, 2 },
+ [GCC_AGGRNOC_ATB_ARES] = { 0x2e0ac, 2 },
+ [GCC_AGGRNOC_TS_ARES] = { 0x2e0b0, 2 },
+ [GCC_USB0_EUD_AT_ARES] = { 0x30004, 2 },
+ [GCC_PCNOC_TIC_ARES] = { 0x31014, 2 },
+ [GCC_PCNOC_AHB_ARES] = { 0x31018, 2 },
+ [GCC_PCNOC_XO_DBG_ARES] = { 0x3101c, 2 },
+ [GCC_SNOC_LPASS_ARES] = { 0x31020, 2 },
+ [GCC_PCNOC_AT_ARES] = { 0x31024, 2 },
+ [GCC_PCNOC_XO_DCD_ARES] = { 0x31028, 2 },
+ [GCC_PCNOC_TS_ARES] = { 0x3102c, 2 },
+ [GCC_PCNOC_BUS_TIMEOUT0_AHB_ARES] = { 0x31034, 2 },
+ [GCC_PCNOC_BUS_TIMEOUT1_AHB_ARES] = { 0x3103c, 2 },
+ [GCC_PCNOC_BUS_TIMEOUT2_AHB_ARES] = { 0x31044, 2 },
+ [GCC_PCNOC_BUS_TIMEOUT3_AHB_ARES] = { 0x3104c, 2 },
+ [GCC_PCNOC_BUS_TIMEOUT4_AHB_ARES] = { 0x31054, 2 },
+ [GCC_PCNOC_BUS_TIMEOUT5_AHB_ARES] = { 0x3105c, 2 },
+ [GCC_PCNOC_BUS_TIMEOUT6_AHB_ARES] = { 0x31064, 2 },
+ [GCC_PCNOC_BUS_TIMEOUT7_AHB_ARES] = { 0x3106c, 2 },
+ [GCC_Q6_AXIM_RESET] = { 0x2506c, 0 },
+ [GCC_Q6_AXIS_RESET] = { 0x2506c, 1 },
+ [GCC_Q6_AHB_S_RESET] = { 0x2506c, 2 },
+ [GCC_Q6_AHB_RESET] = { 0x2506c, 3 },
+ [GCC_Q6SS_DBG_RESET] = { 0x2506c, 4 },
+ [GCC_WCSS_ECAHB_RESET] = { 0x25070, 0 },
+ [GCC_WCSS_DBG_BDG_RESET] = { 0x25070, 1 },
+ [GCC_WCSS_DBG_RESET] = { 0x25070, 2 },
+ [GCC_WCSS_AXI_M_RESET] = { 0x25070, 3 },
+ [GCC_WCSS_AHB_S_RESET] = { 0x25070, 4 },
+ [GCC_WCSS_ACMT_RESET] = { 0x25070, 5 },
+ [GCC_WCSSAON_RESET] = { 0x25074, 0 },
+ [GCC_PCIE0_PIPE_RESET] = { 0x28058, 0 },
+ [GCC_PCIE0_CORE_STICKY_RESET] = { 0x28058, 1 },
+ [GCC_PCIE0_AXI_S_STICKY_RESET] = { 0x28058, 2 },
+ [GCC_PCIE0_AXI_S_RESET] = { 0x28058, 3 },
+ [GCC_PCIE0_AXI_M_STICKY_RESET] = { 0x28058, 4 },
+ [GCC_PCIE0_AXI_M_RESET] = { 0x28058, 5 },
+ [GCC_PCIE0_AUX_RESET] = { 0x28058, 6 },
+ [GCC_PCIE0_AHB_RESET] = { 0x28058, 7 },
+ [GCC_PCIE1_PIPE_RESET] = { 0x29058, 0 },
+ [GCC_PCIE1_CORE_STICKY_RESET] = { 0x29058, 1 },
+ [GCC_PCIE1_AXI_S_STICKY_RESET] = { 0x29058, 2 },
+ [GCC_PCIE1_AXI_S_RESET] = { 0x29058, 3 },
+ [GCC_PCIE1_AXI_M_STICKY_RESET] = { 0x29058, 4 },
+ [GCC_PCIE1_AXI_M_RESET] = { 0x29058, 5 },
+ [GCC_PCIE1_AUX_RESET] = { 0x29058, 6 },
+ [GCC_PCIE1_AHB_RESET] = { 0x29058, 7 },
+ [GCC_PCIE2_PIPE_RESET] = { 0x2a058, 0 },
+ [GCC_PCIE2_CORE_STICKY_RESET] = { 0x2a058, 1 },
+ [GCC_PCIE2_AXI_S_STICKY_RESET] = { 0x2a058, 2 },
+ [GCC_PCIE2_AXI_S_RESET] = { 0x2a058, 3 },
+ [GCC_PCIE2_AXI_M_STICKY_RESET] = { 0x2a058, 4 },
+ [GCC_PCIE2_AXI_M_RESET] = { 0x2a058, 5 },
+ [GCC_PCIE2_AUX_RESET] = { 0x2a058, 6 },
+ [GCC_PCIE2_AHB_RESET] = { 0x2a058, 7 },
+ [GCC_PCIE3_PIPE_RESET] = { 0x2b058, 0 },
+ [GCC_PCIE3_CORE_STICKY_RESET] = { 0x2b058, 1 },
+ [GCC_PCIE3_AXI_S_STICKY_RESET] = { 0x2b058, 2 },
+ [GCC_PCIE3_AXI_S_RESET] = { 0x2b058, 3 },
+ [GCC_PCIE3_AXI_M_STICKY_RESET] = { 0x2b058, 4 },
+ [GCC_PCIE3_AXI_M_RESET] = { 0x2b058, 5 },
+ [GCC_PCIE3_AUX_RESET] = { 0x2b058, 6 },
+ [GCC_PCIE3_AHB_RESET] = { 0x2b058, 7 },
+ [GCC_NSS_PARTIAL_RESET] = { 0x17078, 0 },
+ [GCC_UNIPHY0_XPCS_ARES] = { 0x17050, 2 },
+ [GCC_UNIPHY1_XPCS_ARES] = { 0x17060, 2 },
+ [GCC_UNIPHY2_XPCS_ARES] = { 0x17070, 2 },
+ [GCC_USB1_BCR] = { 0x3C000, 0 },
+ [GCC_QUSB2_1_PHY_BCR] = { 0x3C030, 0 },
+};
+
+#define IPQ_APPS_ID 5424 /* some unique value */
+
+static const struct qcom_icc_hws_data icc_ipq5424_hws[] = {
+ { MASTER_ANOC_PCIE0, SLAVE_ANOC_PCIE0, GCC_ANOC_PCIE0_1LANE_M_CLK },
+ { MASTER_CNOC_PCIE0, SLAVE_CNOC_PCIE0, GCC_CNOC_PCIE0_1LANE_S_CLK },
+ { MASTER_ANOC_PCIE1, SLAVE_ANOC_PCIE1, GCC_ANOC_PCIE1_1LANE_M_CLK },
+ { MASTER_CNOC_PCIE1, SLAVE_CNOC_PCIE1, GCC_CNOC_PCIE1_1LANE_S_CLK },
+ { MASTER_ANOC_PCIE2, SLAVE_ANOC_PCIE2, GCC_ANOC_PCIE2_2LANE_M_CLK },
+ { MASTER_CNOC_PCIE2, SLAVE_CNOC_PCIE2, GCC_CNOC_PCIE2_2LANE_S_CLK },
+ { MASTER_ANOC_PCIE3, SLAVE_ANOC_PCIE3, GCC_ANOC_PCIE3_2LANE_M_CLK },
+ { MASTER_CNOC_PCIE3, SLAVE_CNOC_PCIE3, GCC_CNOC_PCIE3_2LANE_S_CLK },
+ { MASTER_CNOC_USB, SLAVE_CNOC_USB, GCC_CNOC_USB_CLK },
+};
+
+static const struct of_device_id gcc_ipq5424_match_table[] = {
+ { .compatible = "qcom,ipq5424-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_ipq5424_match_table);
+
+static const struct regmap_config gcc_ipq5424_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x3f024,
+ .fast_io = true,
+};
+
+static struct clk_hw *gcc_ipq5424_hws[] = {
+ &gpll0_div2.hw,
+ &gcc_xo_div4_clk_src.hw,
+ &gcc_qdss_tsctr_div2_clk_src.hw,
+ &gcc_qdss_dap_sync_clk_src.hw,
+ &gcc_eud_at_div_clk_src.hw,
+};
+
+static const struct qcom_cc_desc gcc_ipq5424_desc = {
+ .config = &gcc_ipq5424_regmap_config,
+ .clks = gcc_ipq5424_clocks,
+ .num_clks = ARRAY_SIZE(gcc_ipq5424_clocks),
+ .resets = gcc_ipq5424_resets,
+ .num_resets = ARRAY_SIZE(gcc_ipq5424_resets),
+ .clk_hws = gcc_ipq5424_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_ipq5424_hws),
+ .icc_hws = icc_ipq5424_hws,
+ .num_icc_hws = ARRAY_SIZE(icc_ipq5424_hws),
+};
+
+static int gcc_ipq5424_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gcc_ipq5424_desc);
+}
+
+static struct platform_driver gcc_ipq5424_driver = {
+ .probe = gcc_ipq5424_probe,
+ .driver = {
+ .name = "qcom,gcc-ipq5424",
+ .of_match_table = gcc_ipq5424_match_table,
+ .sync_state = icc_sync_state,
+ },
+};
+
+static int __init gcc_ipq5424_init(void)
+{
+ return platform_driver_register(&gcc_ipq5424_driver);
+}
+core_initcall(gcc_ipq5424_init);
+
+static void __exit gcc_ipq5424_exit(void)
+{
+ platform_driver_unregister(&gcc_ipq5424_driver);
+}
+module_exit(gcc_ipq5424_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. GCC IPQ5424 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
index ab0f7fc665a9..d861191b0c85 100644
--- a/drivers/clk/qcom/gcc-ipq6018.c
+++ b/drivers/clk/qcom/gcc-ipq6018.c
@@ -4194,10 +4194,9 @@ static const struct alpha_pll_config ubi32_pll_config = {
.test_ctl_hi_val = 0x4000,
};
+/* 1200 MHz configuration */
static const struct alpha_pll_config nss_crypto_pll_config = {
.l = 0x32,
- .alpha = 0x0,
- .alpha_hi = 0x0,
.config_ctl_val = 0x4001055b,
.main_output_mask = BIT(0),
.pre_div_val = 0x0,
@@ -4206,7 +4205,6 @@ static const struct alpha_pll_config nss_crypto_pll_config = {
.post_div_mask = GENMASK(11, 8),
.vco_mask = GENMASK(21, 20),
.vco_val = 0x0,
- .alpha_en_mask = BIT(24),
};
static struct clk_hw *gcc_ipq6018_hws[] = {
diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
index 645109f75b46..6bb66a7e1fb6 100644
--- a/drivers/clk/qcom/gcc-ipq9574.c
+++ b/drivers/clk/qcom/gcc-ipq9574.c
@@ -2645,24 +2645,6 @@ static struct clk_rcg2 system_noc_bfdcd_clk_src = {
},
};
-static struct clk_branch gcc_q6ss_boot_clk = {
- .halt_reg = 0x25080,
- .halt_check = BRANCH_HALT_SKIP,
- .clkr = {
- .enable_reg = 0x25080,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6ss_boot_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &system_noc_bfdcd_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_nssnoc_snoc_clk = {
.halt_reg = 0x17028,
.clkr = {
@@ -2733,91 +2715,6 @@ static struct clk_rcg2 wcss_ahb_clk_src = {
},
};
-static struct clk_branch gcc_q6_ahb_clk = {
- .halt_reg = 0x25014,
- .clkr = {
- .enable_reg = 0x25014,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6_ahb_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &wcss_ahb_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_q6_ahb_s_clk = {
- .halt_reg = 0x25018,
- .clkr = {
- .enable_reg = 0x25018,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6_ahb_s_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &wcss_ahb_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_ecahb_clk = {
- .halt_reg = 0x25058,
- .clkr = {
- .enable_reg = 0x25058,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_ecahb_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &wcss_ahb_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_acmt_clk = {
- .halt_reg = 0x2505c,
- .clkr = {
- .enable_reg = 0x2505c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_acmt_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &wcss_ahb_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_sys_noc_wcss_ahb_clk = {
- .halt_reg = 0x2e030,
- .clkr = {
- .enable_reg = 0x2e030,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_sys_noc_wcss_ahb_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &wcss_ahb_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static const struct freq_tbl ftbl_wcss_axi_m_clk_src[] = {
F(24000000, P_XO, 1, 0, 0),
F(133333333, P_GPLL0, 6, 0, 0),
@@ -2838,23 +2735,6 @@ static struct clk_rcg2 wcss_axi_m_clk_src = {
},
};
-static struct clk_branch gcc_anoc_wcss_axi_m_clk = {
- .halt_reg = 0x2e0a8,
- .clkr = {
- .enable_reg = 0x2e0a8,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_anoc_wcss_axi_m_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &wcss_axi_m_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static const struct freq_tbl ftbl_qdss_at_clk_src[] = {
F(240000000, P_GPLL4, 5, 0, 0),
{ }
@@ -2873,40 +2753,6 @@ static struct clk_rcg2 qdss_at_clk_src = {
},
};
-static struct clk_branch gcc_q6ss_atbm_clk = {
- .halt_reg = 0x2501c,
- .clkr = {
- .enable_reg = 0x2501c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6ss_atbm_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &qdss_at_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_atb_clk = {
- .halt_reg = 0x2503c,
- .clkr = {
- .enable_reg = 0x2503c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_atb_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &qdss_at_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_nssnoc_atb_clk = {
.halt_reg = 0x17014,
.clkr = {
@@ -3143,40 +2989,6 @@ static struct clk_fixed_factor qdss_tsctr_div2_clk_src = {
},
};
-static struct clk_branch gcc_q6_tsctr_1to2_clk = {
- .halt_reg = 0x25020,
- .clkr = {
- .enable_reg = 0x25020,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6_tsctr_1to2_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &qdss_tsctr_div2_clk_src.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_nts_clk = {
- .halt_reg = 0x25040,
- .clkr = {
- .enable_reg = 0x25040,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_nts_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &qdss_tsctr_div2_clk_src.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_qdss_tsctr_div2_clk = {
.halt_reg = 0x2d044,
.clkr = {
@@ -3351,74 +3163,6 @@ static struct clk_branch gcc_qdss_tsctr_div16_clk = {
},
};
-static struct clk_branch gcc_q6ss_pclkdbg_clk = {
- .halt_reg = 0x25024,
- .clkr = {
- .enable_reg = 0x25024,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6ss_pclkdbg_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &qdss_dap_sync_clk_src.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_q6ss_trig_clk = {
- .halt_reg = 0x25068,
- .clkr = {
- .enable_reg = 0x25068,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6ss_trig_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &qdss_dap_sync_clk_src.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_apb_clk = {
- .halt_reg = 0x25038,
- .clkr = {
- .enable_reg = 0x25038,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_apb_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &qdss_dap_sync_clk_src.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_dbg_ifc_dapbus_clk = {
- .halt_reg = 0x25044,
- .clkr = {
- .enable_reg = 0x25044,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_dbg_ifc_dapbus_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &qdss_dap_sync_clk_src.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_qdss_dap_clk = {
.halt_reg = 0x2d058,
.clkr = {
@@ -3540,58 +3284,6 @@ static struct clk_rcg2 q6_axi_clk_src = {
},
};
-static struct clk_branch gcc_q6_axim_clk = {
- .halt_reg = 0x2500c,
- .clkr = {
- .enable_reg = 0x2500c,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_q6_axim_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &q6_axi_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_wcss_q6_tbu_clk = {
- .halt_reg = 0x12050,
- .halt_check = BRANCH_HALT_DELAY,
- .clkr = {
- .enable_reg = 0xb00c,
- .enable_mask = BIT(6),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_wcss_q6_tbu_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &q6_axi_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_mem_noc_q6_axi_clk = {
- .halt_reg = 0x19010,
- .clkr = {
- .enable_reg = 0x19010,
- .enable_mask = BIT(0),
- .hw.init = &(const struct clk_init_data) {
- .name = "gcc_mem_noc_q6_axi_clk",
- .parent_hws = (const struct clk_hw *[]) {
- &q6_axi_clk_src.clkr.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static const struct freq_tbl ftbl_q6_axim2_clk_src[] = {
F(342857143, P_GPLL4, 3.5, 0, 0),
{ }
@@ -4141,16 +3833,8 @@ static struct clk_regmap *gcc_ipq9574_clks[] = {
[GCC_NSSNOC_SNOC_1_CLK] = &gcc_nssnoc_snoc_1_clk.clkr,
[GCC_QDSS_ETR_USB_CLK] = &gcc_qdss_etr_usb_clk.clkr,
[WCSS_AHB_CLK_SRC] = &wcss_ahb_clk_src.clkr,
- [GCC_Q6_AHB_CLK] = &gcc_q6_ahb_clk.clkr,
- [GCC_Q6_AHB_S_CLK] = &gcc_q6_ahb_s_clk.clkr,
- [GCC_WCSS_ECAHB_CLK] = &gcc_wcss_ecahb_clk.clkr,
- [GCC_WCSS_ACMT_CLK] = &gcc_wcss_acmt_clk.clkr,
- [GCC_SYS_NOC_WCSS_AHB_CLK] = &gcc_sys_noc_wcss_ahb_clk.clkr,
[WCSS_AXI_M_CLK_SRC] = &wcss_axi_m_clk_src.clkr,
- [GCC_ANOC_WCSS_AXI_M_CLK] = &gcc_anoc_wcss_axi_m_clk.clkr,
[QDSS_AT_CLK_SRC] = &qdss_at_clk_src.clkr,
- [GCC_Q6SS_ATBM_CLK] = &gcc_q6ss_atbm_clk.clkr,
- [GCC_WCSS_DBG_IFC_ATB_CLK] = &gcc_wcss_dbg_ifc_atb_clk.clkr,
[GCC_NSSNOC_ATB_CLK] = &gcc_nssnoc_atb_clk.clkr,
[GCC_QDSS_AT_CLK] = &gcc_qdss_at_clk.clkr,
[GCC_SYS_NOC_AT_CLK] = &gcc_sys_noc_at_clk.clkr,
@@ -4163,27 +3847,18 @@ static struct clk_regmap *gcc_ipq9574_clks[] = {
[QDSS_TRACECLKIN_CLK_SRC] = &qdss_traceclkin_clk_src.clkr,
[GCC_QDSS_TRACECLKIN_CLK] = &gcc_qdss_traceclkin_clk.clkr,
[QDSS_TSCTR_CLK_SRC] = &qdss_tsctr_clk_src.clkr,
- [GCC_Q6_TSCTR_1TO2_CLK] = &gcc_q6_tsctr_1to2_clk.clkr,
- [GCC_WCSS_DBG_IFC_NTS_CLK] = &gcc_wcss_dbg_ifc_nts_clk.clkr,
[GCC_QDSS_TSCTR_DIV2_CLK] = &gcc_qdss_tsctr_div2_clk.clkr,
[GCC_QDSS_TS_CLK] = &gcc_qdss_ts_clk.clkr,
[GCC_QDSS_TSCTR_DIV4_CLK] = &gcc_qdss_tsctr_div4_clk.clkr,
[GCC_NSS_TS_CLK] = &gcc_nss_ts_clk.clkr,
[GCC_QDSS_TSCTR_DIV8_CLK] = &gcc_qdss_tsctr_div8_clk.clkr,
[GCC_QDSS_TSCTR_DIV16_CLK] = &gcc_qdss_tsctr_div16_clk.clkr,
- [GCC_Q6SS_PCLKDBG_CLK] = &gcc_q6ss_pclkdbg_clk.clkr,
- [GCC_Q6SS_TRIG_CLK] = &gcc_q6ss_trig_clk.clkr,
- [GCC_WCSS_DBG_IFC_APB_CLK] = &gcc_wcss_dbg_ifc_apb_clk.clkr,
- [GCC_WCSS_DBG_IFC_DAPBUS_CLK] = &gcc_wcss_dbg_ifc_dapbus_clk.clkr,
[GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
[GCC_QDSS_APB2JTAG_CLK] = &gcc_qdss_apb2jtag_clk.clkr,
[GCC_QDSS_TSCTR_DIV3_CLK] = &gcc_qdss_tsctr_div3_clk.clkr,
[QPIC_IO_MACRO_CLK_SRC] = &qpic_io_macro_clk_src.clkr,
[GCC_QPIC_IO_MACRO_CLK] = &gcc_qpic_io_macro_clk.clkr,
[Q6_AXI_CLK_SRC] = &q6_axi_clk_src.clkr,
- [GCC_Q6_AXIM_CLK] = &gcc_q6_axim_clk.clkr,
- [GCC_WCSS_Q6_TBU_CLK] = &gcc_wcss_q6_tbu_clk.clkr,
- [GCC_MEM_NOC_Q6_AXI_CLK] = &gcc_mem_noc_q6_axi_clk.clkr,
[Q6_AXIM2_CLK_SRC] = &q6_axim2_clk_src.clkr,
[NSSNOC_MEMNOC_BFDCD_CLK_SRC] = &nssnoc_memnoc_bfdcd_clk_src.clkr,
[GCC_NSSNOC_MEMNOC_CLK] = &gcc_nssnoc_memnoc_clk.clkr,
@@ -4207,7 +3882,6 @@ static struct clk_regmap *gcc_ipq9574_clks[] = {
[GCC_UNIPHY1_SYS_CLK] = &gcc_uniphy1_sys_clk.clkr,
[GCC_UNIPHY2_SYS_CLK] = &gcc_uniphy2_sys_clk.clkr,
[GCC_CMN_12GPLL_SYS_CLK] = &gcc_cmn_12gpll_sys_clk.clkr,
- [GCC_Q6SS_BOOT_CLK] = &gcc_q6ss_boot_clk.clkr,
[UNIPHY_SYS_CLK_SRC] = &uniphy_sys_clk_src.clkr,
[NSS_TS_CLK_SRC] = &nss_ts_clk_src.clkr,
[GCC_ANOC_PCIE0_1LANE_M_CLK] = &gcc_anoc_pcie0_1lane_m_clk.clkr,
@@ -4384,7 +4058,7 @@ static const struct qcom_reset_map gcc_ipq9574_resets[] = {
#define IPQ_APPS_ID 9574 /* some unique value */
-static struct qcom_icc_hws_data icc_ipq9574_hws[] = {
+static const struct qcom_icc_hws_data icc_ipq9574_hws[] = {
{ MASTER_ANOC_PCIE0, SLAVE_ANOC_PCIE0, GCC_ANOC_PCIE0_1LANE_M_CLK },
{ MASTER_SNOC_PCIE0, SLAVE_SNOC_PCIE0, GCC_SNOC_PCIE0_1LANE_S_CLK },
{ MASTER_ANOC_PCIE1, SLAVE_ANOC_PCIE1, GCC_ANOC_PCIE1_1LANE_M_CLK },
diff --git a/drivers/clk/qcom/gcc-mdm9607.c b/drivers/clk/qcom/gcc-mdm9607.c
index 6e6068b168e6..07f1b78d737a 100644
--- a/drivers/clk/qcom/gcc-mdm9607.c
+++ b/drivers/clk/qcom/gcc-mdm9607.c
@@ -535,7 +535,7 @@ static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
};
static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
- .cmd_rcgr = 0x6044,
+ .cmd_rcgr = 0x7044,
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index c3cfd572e7c1..5ca003c9bfba 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -131,6 +131,7 @@ static struct clk_alpha_pll gpll1_out_main = {
/* 930MHz configuration */
static const struct alpha_pll_config gpll3_config = {
.l = 48,
+ .alpha_hi = 0x70,
.alpha = 0x0,
.alpha_en_mask = BIT(24),
.post_div_mask = 0xf << 8,
diff --git a/drivers/clk/qcom/gcc-qcs615.c b/drivers/clk/qcom/gcc-qcs615.c
new file mode 100644
index 000000000000..9695446bc2a3
--- /dev/null
+++ b/drivers/clk/qcom/gcc-qcs615.c
@@ -0,0 +1,3034 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs615-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_AUX2_DIV,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL3_OUT_MAIN,
+ P_GPLL3_OUT_MAIN_DIV,
+ P_GPLL4_OUT_MAIN,
+ P_GPLL6_OUT_MAIN,
+ P_GPLL7_OUT_MAIN,
+ P_GPLL8_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* Fixed divider clock of GPLL0 instead of PLL normal postdiv */
+static struct clk_fixed_factor gpll0_out_aux2_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll0_out_aux2_div",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll3 = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* Fixed divider clock of GPLL3 instead of PLL normal postdiv */
+static struct clk_fixed_factor gpll3_out_aux2_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll3_out_aux2_div",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll6 = {
+ .offset = 0x13000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll6_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll6_out_main = {
+ .offset = 0x13000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll6_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpll6_out_main",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll7 = {
+ .offset = 0x1a000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll8 = {
+ .offset = 0x1b000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpll8",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll8_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll8_out_main = {
+ .offset = 0x1b000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll8_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll8_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpll8_out_main",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll8.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct clk_parent_data gcc_parent_data_0_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL6_OUT_MAIN, 2 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6_out_main.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL7_OUT_MAIN, 3 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll7.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL7_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll7.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL3_OUT_MAIN_DIV, 4 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll3_out_aux2_div.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL8_OUT_MAIN, 2 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_AUX2_DIV, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8_out_main.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_out_aux2_div.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL3_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x48014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0_ao),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(125000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac_ptp_clk_src = {
+ .cmd_rcgr = 0x6038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_emac_ptp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_ptp_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac_rgmii_clk_src[] = {
+ F(2500000, P_BI_TCXO, 1, 25, 192),
+ F(5000000, P_BI_TCXO, 1, 25, 96),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(125000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac_rgmii_clk_src = {
+ .cmd_rcgr = 0x601c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_emac_rgmii_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_rgmii_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b02c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = {
+ .cmd_rcgr = 0x6f014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_refgen_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qspi_core_clk_src[] = {
+ F(60000000, P_GPLL0_OUT_AUX2_DIV, 5, 0, 0),
+ F(133250000, P_GPLL3_OUT_MAIN_DIV, 4, 0, 0),
+ F(266500000, P_GPLL3_OUT_MAIN_DIV, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qspi_core_clk_src = {
+ .cmd_rcgr = 0x4b008,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_qspi_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qspi_core_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_AUX2_DIV, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_AUX2_DIV, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_AUX2_DIV, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_AUX2_DIV, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_AUX2_DIV, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_AUX2_DIV, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_AUX2_DIV, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_AUX2_DIV, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GPLL0_OUT_AUX2_DIV, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_AUX2_DIV, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_AUX2_DIV, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_AUX2_DIV, 2.5, 0, 0),
+ F(128000000, P_GPLL6_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x173a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x174d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x17608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x183a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x184d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x18608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_AUX2_DIV, 5, 1, 3),
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 6, 1, 2),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2_DIV, 3, 0, 0),
+ F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x12028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x12010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2_DIV, 3, 0, 0),
+ F(202000000, P_GPLL8_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_AUX2_DIV, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_AUX2_DIV, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_AUX2_DIV, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x77048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x7707c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_AUX2_DIV, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb20_sec_master_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb20_sec_master_clk_src = {
+ .cmd_rcgr = 0xa601c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb20_sec_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb20_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xa6034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb2_sec_phy_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb2_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0xa6060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_usb2_sec_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GPLL0_OUT_AUX2_DIV, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_AUX2_DIV, 15, 0, 0),
+ F(40000000, P_GPLL0_OUT_AUX2_DIV, 7.5, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_usb2_sec_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_vsensor_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_vsensor_clk_src = {
+ .cmd_rcgr = 0x7a018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_vsensor_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_vsensor_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb2_sec_axi_clk = {
+ .halt_reg = 0xa6084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa6084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb2_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0xf07c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xf07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_east_clk = {
+ .halt_reg = 0x6a008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6a008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ahb2phy_east_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_west_clk = {
+ .halt_reg = 0x6a004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ahb2phy_west_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x4100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ce1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x41008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ce1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x41004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ce1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb2_sec_axi_clk = {
+ .halt_reg = 0xa609c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa609c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb2_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0xf078,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xf078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0_out_aux2_div.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0xb038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_axi_clk = {
+ .halt_reg = 0x6010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_ptp_clk = {
+ .halt_reg = 0x6034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_ptp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac_ptp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_rgmii_clk = {
+ .halt_reg = 0x6018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_rgmii_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac_rgmii_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac_slv_ahb_clk = {
+ .halt_reg = 0x6014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x6014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0_out_aux2_div.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_clk = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_iref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie0_phy_refgen_clk = {
+ .halt_reg = 0x6f02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie0_phy_refgen_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_refgen_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_clk = {
+ .halt_reg = 0x8c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_aux_clk = {
+ .halt_reg = 0x6f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0xb018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0xb020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_ahb_clk = {
+ .halt_reg = 0x6b044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0xb014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_cnoc_periph_ahb_clk = {
+ .halt_reg = 0x4b000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qspi_cnoc_periph_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_core_clk = {
+ .halt_reg = 0x4b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qspi_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qspi_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x17014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x17144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x17274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x173a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x174d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x17604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x17734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x18014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x183a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x184d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x18604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx1_usb2_clkref_clk = {
+ .halt_reg = 0x8c030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_rx1_usb2_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx3_usb2_clkref_clk = {
+ .halt_reg = 0x8c038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8c038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_rx3_usb2_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x12008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x12004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x1200c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x4819c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_card_clkref_clk = {
+ .halt_reg = 0x8c004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_card_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_mem_clkref_clk = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_mem_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77044,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x77078,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77078,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77040,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sec_master_clk = {
+ .halt_reg = 0xa6010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa6010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sec_mock_utmi_clk = {
+ .halt_reg = 0xa6018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_sec_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sec_sleep_clk = {
+ .halt_reg = 0xa6014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_prim_clkref_clk = {
+ .halt_reg = 0x8c028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8c028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_sec_clkref_clk = {
+ .halt_reg = 0x8c018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8c018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_sec_phy_aux_clk = {
+ .halt_reg = 0xa6050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb2_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_sec_phy_com_aux_clk = {
+ .halt_reg = 0xa6054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa6054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb2_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_sec_phy_pipe_clk = {
+ .halt_reg = 0xa6058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xa6058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb2_sec_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x8c014,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x8c014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0xf058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xf058,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_clkref_clk = {
+ .halt_reg = 0x8c008,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x8c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0xb024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xb024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_hw *gcc_qcs615_hws[] = {
+ [GPLL0_OUT_AUX2_DIV] = &gpll0_out_aux2_div.hw,
+ [GPLL3_OUT_AUX2_DIV] = &gpll3_out_aux2_div.hw,
+};
+
+static struct gdsc emac_gdsc = {
+ .gdscr = 0x6004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "emac_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb20_sec_gdsc = {
+ .gdscr = 0xa6004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "usb20_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0xf004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc = {
+ .gdscr = 0x7d040,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_audio_tbu",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu1_gdsc = {
+ .gdscr = 0x7d044,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu1",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_tbu2_gdsc = {
+ .gdscr = 0x7d048,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_tbu2",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc = {
+ .gdscr = 0x7d04c,
+ .pd = {
+ .name = "hlos1_vote_aggre_noc_mmu_pcie_tbu",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x7d050,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
+ .gdscr = 0x7d054,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = {
+ .gdscr = 0x7d058,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gcc_qcs615_clocks[] = {
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB2_SEC_AXI_CLK] = &gcc_aggre_usb2_sec_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AHB2PHY_EAST_CLK] = &gcc_ahb2phy_east_clk.clkr,
+ [GCC_AHB2PHY_WEST_CLK] = &gcc_ahb2phy_west_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CFG_NOC_USB2_SEC_AXI_CLK] = &gcc_cfg_noc_usb2_sec_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_EMAC_AXI_CLK] = &gcc_emac_axi_clk.clkr,
+ [GCC_EMAC_PTP_CLK] = &gcc_emac_ptp_clk.clkr,
+ [GCC_EMAC_PTP_CLK_SRC] = &gcc_emac_ptp_clk_src.clkr,
+ [GCC_EMAC_RGMII_CLK] = &gcc_emac_rgmii_clk.clkr,
+ [GCC_EMAC_RGMII_CLK_SRC] = &gcc_emac_rgmii_clk_src.clkr,
+ [GCC_EMAC_SLV_AHB_CLK] = &gcc_emac_slv_ahb_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_PCIE0_PHY_REFGEN_CLK] = &gcc_pcie0_phy_refgen_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr,
+ [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QSPI_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_cnoc_periph_ahb_clk.clkr,
+ [GCC_QSPI_CORE_CLK] = &gcc_qspi_core_clk.clkr,
+ [GCC_QSPI_CORE_CLK_SRC] = &gcc_qspi_core_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr,
+ [GCC_RX3_USB2_CLKREF_CLK] = &gcc_rx3_usb2_clkref_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_UFS_CARD_CLKREF_CLK] = &gcc_ufs_card_clkref_clk.clkr,
+ [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB20_SEC_MASTER_CLK] = &gcc_usb20_sec_master_clk.clkr,
+ [GCC_USB20_SEC_MASTER_CLK_SRC] = &gcc_usb20_sec_master_clk_src.clkr,
+ [GCC_USB20_SEC_MOCK_UTMI_CLK] = &gcc_usb20_sec_mock_utmi_clk.clkr,
+ [GCC_USB20_SEC_MOCK_UTMI_CLK_SRC] = &gcc_usb20_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB20_SEC_SLEEP_CLK] = &gcc_usb20_sec_sleep_clk.clkr,
+ [GCC_USB2_PRIM_CLKREF_CLK] = &gcc_usb2_prim_clkref_clk.clkr,
+ [GCC_USB2_SEC_CLKREF_CLK] = &gcc_usb2_sec_clkref_clk.clkr,
+ [GCC_USB2_SEC_PHY_AUX_CLK] = &gcc_usb2_sec_phy_aux_clk.clkr,
+ [GCC_USB2_SEC_PHY_AUX_CLK_SRC] = &gcc_usb2_sec_phy_aux_clk_src.clkr,
+ [GCC_USB2_SEC_PHY_COM_AUX_CLK] = &gcc_usb2_sec_phy_com_aux_clk.clkr,
+ [GCC_USB2_SEC_PHY_PIPE_CLK] = &gcc_usb2_sec_phy_pipe_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_CLKREF_CLK] = &gcc_usb3_sec_clkref_clk.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VSENSOR_CLK_SRC] = &gcc_vsensor_clk_src.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL6_OUT_MAIN] = &gpll6_out_main.clkr,
+ [GPLL7] = &gpll7.clkr,
+ [GPLL8] = &gpll8.clkr,
+ [GPLL8_OUT_MAIN] = &gpll8_out_main.clkr,
+};
+
+static struct gdsc *gcc_qcs615_gdscs[] = {
+ [EMAC_GDSC] = &emac_gdsc,
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB20_SEC_GDSC] = &usb20_sec_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC] = &hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC] = &hlos1_vote_aggre_noc_mmu_tbu1_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC] = &hlos1_vote_aggre_noc_mmu_tbu2_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC] = &hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+};
+
+static const struct qcom_reset_map gcc_qcs615_resets[] = {
+ [GCC_EMAC_BCR] = { 0x6000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0xd000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0xd004 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB2_PHY_SEC_BCR] = { 0x50018 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50020 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x5001c },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB20_SEC_BCR] = { 0xa6000 },
+ [GCC_USB3PHY_PHY_PRIM_SP0_BCR] = { 0x50008 },
+ [GCC_USB3_PHY_PRIM_SP0_BCR] = { 0x50000 },
+ [GCC_SDCC1_BCR] = { 0x12000 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+};
+
+static const struct regmap_config gcc_qcs615_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xa609c,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_qcs615_desc = {
+ .config = &gcc_qcs615_regmap_config,
+ .clk_hws = gcc_qcs615_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_qcs615_hws),
+ .clks = gcc_qcs615_clocks,
+ .num_clks = ARRAY_SIZE(gcc_qcs615_clocks),
+ .resets = gcc_qcs615_resets,
+ .num_resets = ARRAY_SIZE(gcc_qcs615_resets),
+ .gdscs = gcc_qcs615_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_qcs615_gdscs),
+};
+
+static const struct of_device_id gcc_qcs615_match_table[] = {
+ { .compatible = "qcom,qcs615-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_qcs615_match_table);
+
+static int gcc_qcs615_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_qcs615_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+ /*
+ * Disable the GPLL0 active input to MM blocks and GPU
+ * via MISC registers.
+ */
+ regmap_update_bits(regmap, 0x0b084, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x9b000, BIT(0), BIT(0));
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0xb008); /* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb044); /* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb00c); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb048); /* GCC_DISP_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x71004); /* GCC_GPU_CFG_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0xb040); /* GCC_VIDEO_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x480040); /* GCC_CPUSS_GNOC_CLK */
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ return qcom_cc_really_probe(&pdev->dev, &gcc_qcs615_desc, regmap);
+}
+
+static struct platform_driver gcc_qcs615_driver = {
+ .probe = gcc_qcs615_probe,
+ .driver = {
+ .name = "gcc-qcs615",
+ .of_match_table = gcc_qcs615_match_table,
+ },
+};
+
+static int __init gcc_qcs615_init(void)
+{
+ return platform_driver_register(&gcc_qcs615_driver);
+}
+subsys_initcall(gcc_qcs615_init);
+
+static void __exit gcc_qcs615_exit(void)
+{
+ platform_driver_unregister(&gcc_qcs615_driver);
+}
+module_exit(gcc_qcs615_exit);
+
+MODULE_DESCRIPTION("QTI GCC QCS615 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-qcs8300.c b/drivers/clk/qcom/gcc-qcs8300.c
new file mode 100644
index 000000000000..80831c7dea3b
--- /dev/null
+++ b/drivers/clk/qcom/gcc-qcs8300.c
@@ -0,0 +1,3640 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,qcs8300-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_PCIE_0_PIPE_CLK,
+ DT_PCIE_1_PIPE_CLK,
+ DT_PCIE_PHY_AUX_CLK,
+ DT_RXC0_REF_CLK,
+ DT_UFS_PHY_RX_SYMBOL_0_CLK,
+ DT_UFS_PHY_RX_SYMBOL_1_CLK,
+ DT_UFS_PHY_TX_SYMBOL_0_CLK,
+ DT_USB3_PHY_WRAPPER_GCC_USB30_PRIM_PIPE_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL1_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_PCIE_0_PIPE_CLK,
+ P_PCIE_1_PIPE_CLK,
+ P_PCIE_PHY_AUX_CLK,
+ P_RXC0_REF_CLK,
+ P_SLEEP_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PRIM_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x4b028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x4b028,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x4b028,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x4b028,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x4b028,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_RXC0_REF_CLK, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .index = DT_RXC0_REF_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_PCIE_PHY_AUX_CLK, 1 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_PCIE_PHY_AUX_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_PCIE_PHY_AUX_CLK, 1 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_PCIE_PHY_AUX_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_13[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL4_OUT_MAIN, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_14[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_15[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_15[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_16[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_16[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_17[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_17[] = {
+ { .index = DT_UFS_PHY_TX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_18[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PRIM_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_18[] = {
+ { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_PRIM_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static struct clk_regmap_mux gcc_pcie_0_phy_aux_clk_src = {
+ .reg = 0xa9074,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_8,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0xa906c,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE_0_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_1_phy_aux_clk_src = {
+ .reg = 0x77074,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_10,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = {
+ .reg = 0x7706c,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE_1_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x83060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_15,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_15,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_15),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x830d0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_16,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_16,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_16),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x83050,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_17,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_17,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_17),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x1b068,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_18,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_18,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_18),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac0_phy_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac0_phy_aux_clk_src = {
+ .cmd_rcgr = 0xb6028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac0_ptp_clk_src[] = {
+ F(125000000, P_GCC_GPLL7_OUT_MAIN, 8, 0, 0),
+ F(230400000, P_GCC_GPLL4_OUT_MAIN, 3.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac0_ptp_clk_src = {
+ .cmd_rcgr = 0xb6060,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_emac0_ptp_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_ptp_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac0_rgmii_clk_src[] = {
+ F(5000000, P_GCC_GPLL0_OUT_EVEN, 10, 1, 6),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(125000000, P_GCC_GPLL7_OUT_MAIN, 8, 0, 0),
+ F(250000000, P_GCC_GPLL7_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac0_rgmii_clk_src = {
+ .cmd_rcgr = 0xb6048,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_emac0_rgmii_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_rgmii_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x70004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x71004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x62004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp4_clk_src = {
+ .cmd_rcgr = 0x1e004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp5_clk_src = {
+ .cmd_rcgr = 0x1f004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0xa9078,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xa9054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x77078,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x77054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x3f010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x23154,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x23288,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s2_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x233bc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x234f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x23624,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x23758,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x2388c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x239c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x24154,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x24288,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x243bc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x244f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x24624,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x24758,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x2488c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0x249c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap3_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(403200000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap3_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap3_s0_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap3_s0_clk_src = {
+ .cmd_rcgr = 0xc4158,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_qupv3_wrap3_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap3_s0_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GCC_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(192000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ F(384000000, P_GCC_GPLL9_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x20014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_12,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x2002c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_13,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_13,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_13),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x8302c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(201600000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403200000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x83074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_14,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_14,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_14),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x830a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x8308c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb20_master_clk_src[] = {
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb20_master_clk_src = {
+ .cmd_rcgr = 0x1c028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb20_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb20_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x1c040,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x1b028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x1b040,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x1b06c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_emac0_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_0_pipe_div_clk_src = {
+ .reg = 0xa9070,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_1_pipe_div_clk_src = {
+ .reg = 0x77070,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap3_s0_div_clk_src = {
+ .reg = 0xc4288,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap3_s0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap3_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb20_mock_utmi_postdiv_clk_src = {
+ .reg = 0x1c058,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x1b058,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_qupv3_axi_clk = {
+ .halt_reg = 0x8e200,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8e200,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_qupv3_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x830d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x830d4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x830d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb2_prim_axi_clk = {
+ .halt_reg = 0x1c05c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1c05c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1c05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb2_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x1b084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1b084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1b084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy0_clk = {
+ .halt_reg = 0x76004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x76004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x76004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ahb2phy0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy2_clk = {
+ .halt_reg = 0x76008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x76008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x76008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ahb2phy2_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy3_clk = {
+ .halt_reg = 0x7600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ahb2phy3_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x44004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x44004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x32018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_throttle_xo_clk = {
+ .halt_reg = 0x32024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x32024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_throttle_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb2_prim_axi_clk = {
+ .halt_reg = 0x1c060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1c060,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1c060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb2_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x1b088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1b088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1b088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x7d164,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7d164,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d164,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x33010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_edp_ref_clkref_en = {
+ .halt_reg = 0x97448,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x97448,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_edp_ref_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_axi_clk = {
+ .halt_reg = 0xb6018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb6018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb6018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_phy_aux_clk = {
+ .halt_reg = 0xb6024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb6024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac0_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_ptp_clk = {
+ .halt_reg = 0xb6040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb6040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_ptp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac0_ptp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_rgmii_clk = {
+ .halt_reg = 0xb6044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb6044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_rgmii_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_emac0_rgmii_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_emac0_slv_ahb_clk = {
+ .halt_reg = 0xb6020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb6020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb6020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_emac0_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x70000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x70000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x71000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x62000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp4_clk = {
+ .halt_reg = 0x1e000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp5_clk = {
+ .halt_reg = 0x1f000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_center_pipeline_clk = {
+ .halt_reg = 0x7d160,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7d160,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d160,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_memnoc_gfx_center_pipeline_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7d010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7d010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x7d01c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7d01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_tcu_throttle_ahb_clk = {
+ .halt_reg = 0x7d008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7d008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_tcu_throttle_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_tcu_throttle_clk = {
+ .halt_reg = 0x7d014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7d014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7d014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_tcu_throttle_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0xa9038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0xa902c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xa902c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0xa9024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_aux_clk = {
+ .halt_reg = 0xa9030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0xa9050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0xa9040,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipediv2_clk = {
+ .halt_reg = 0xa9048,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x4b018,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipediv2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0xa901c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b010,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0xa9018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b018,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x77038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x7702c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7702c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x77024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_aux_clk = {
+ .halt_reg = 0x77030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_rchng_clk = {
+ .halt_reg = 0x77050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x77040,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipediv2_clk = {
+ .halt_reg = 0x77048,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x4b018,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipediv2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_clkref_en = {
+ .halt_reg = 0x9746c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x9746c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_throttle_cfg_clk = {
+ .halt_reg = 0xb2034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b020,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_throttle_cfg_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3f00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x3f004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x3f008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x32008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_rot_ahb_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_disp_rot_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x34008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x34008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x3400c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3400c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcpu_ahb_clk = {
+ .halt_reg = 0x34010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x34010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x23018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x2300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x2314c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x23280,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x233b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x234e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x2361c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x23750,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x23884,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x239b8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x24018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x2400c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x2414c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x24280,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x243b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x244e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x2461c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x24750,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x24884,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b018,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0x249b8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b018,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap3_core_2x_clk = {
+ .halt_reg = 0xc4018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap3_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap3_core_clk = {
+ .halt_reg = 0xc400c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap3_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap3_qspi_clk = {
+ .halt_reg = 0xc4284,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap3_qspi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap3_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap3_s0_clk = {
+ .halt_reg = 0xc4150,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap3_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap3_s0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x23008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x24004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x24004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x24008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x24008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_3_m_ahb_clk = {
+ .halt_reg = 0xc4004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc4004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_3_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_3_s_ahb_clk = {
+ .halt_reg = 0xc4008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc4008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b000,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_3_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x2000c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x20004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x20044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x20044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x20044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sgmi_clkref_en = {
+ .halt_reg = 0x97034,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x97034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sgmi_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x83020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x83020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x83020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x83018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x83018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x83018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x8306c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8306c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8306c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x830a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x830a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x830a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x83028,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x83028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x830c0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x830c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x83024,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x83024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x83064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x83064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x83064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_master_clk = {
+ .halt_reg = 0x1c018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_mock_utmi_clk = {
+ .halt_reg = 0x1c024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sleep_clk = {
+ .halt_reg = 0x1c020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x1b018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x1b024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x1b020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x1b05c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x1b060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x1b064,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x1b064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1b064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_clkref_en = {
+ .halt_reg = 0x97468,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x97468,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x34014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x34014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x3401c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3401c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gcc_emac0_gdsc = {
+ .gdscr = 0xb6004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_emac0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_pcie_0_gdsc = {
+ .gdscr = 0xa9004,
+ .collapse_ctrl = 0x4b104,
+ .collapse_mask = BIT(0),
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+};
+
+static struct gdsc gcc_pcie_1_gdsc = {
+ .gdscr = 0x77004,
+ .collapse_ctrl = 0x4b104,
+ .collapse_mask = BIT(1),
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+};
+
+static struct gdsc gcc_ufs_phy_gdsc = {
+ .gdscr = 0x83004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+};
+
+static struct gdsc gcc_usb20_prim_gdsc = {
+ .gdscr = 0x1c004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb20_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+};
+
+static struct gdsc gcc_usb30_prim_gdsc = {
+ .gdscr = 0x1b004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gcc_qcs8300_clocks[] = {
+ [GCC_AGGRE_NOC_QUPV3_AXI_CLK] = &gcc_aggre_noc_qupv3_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB2_PRIM_AXI_CLK] = &gcc_aggre_usb2_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AHB2PHY0_CLK] = &gcc_ahb2phy0_clk.clkr,
+ [GCC_AHB2PHY2_CLK] = &gcc_ahb2phy2_clk.clkr,
+ [GCC_AHB2PHY3_CLK] = &gcc_ahb2phy3_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CAMERA_THROTTLE_XO_CLK] = &gcc_camera_throttle_xo_clk.clkr,
+ [GCC_CFG_NOC_USB2_PRIM_AXI_CLK] = &gcc_cfg_noc_usb2_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_EDP_REF_CLKREF_EN] = &gcc_edp_ref_clkref_en.clkr,
+ [GCC_EMAC0_AXI_CLK] = &gcc_emac0_axi_clk.clkr,
+ [GCC_EMAC0_PHY_AUX_CLK] = &gcc_emac0_phy_aux_clk.clkr,
+ [GCC_EMAC0_PHY_AUX_CLK_SRC] = &gcc_emac0_phy_aux_clk_src.clkr,
+ [GCC_EMAC0_PTP_CLK] = &gcc_emac0_ptp_clk.clkr,
+ [GCC_EMAC0_PTP_CLK_SRC] = &gcc_emac0_ptp_clk_src.clkr,
+ [GCC_EMAC0_RGMII_CLK] = &gcc_emac0_rgmii_clk.clkr,
+ [GCC_EMAC0_RGMII_CLK_SRC] = &gcc_emac0_rgmii_clk_src.clkr,
+ [GCC_EMAC0_SLV_AHB_CLK] = &gcc_emac0_slv_ahb_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GP4_CLK] = &gcc_gp4_clk.clkr,
+ [GCC_GP4_CLK_SRC] = &gcc_gp4_clk_src.clkr,
+ [GCC_GP5_CLK] = &gcc_gp5_clk.clkr,
+ [GCC_GP5_CLK_SRC] = &gcc_gp5_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL1] = &gcc_gpll1.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CENTER_PIPELINE_CLK] = &gcc_gpu_memnoc_gfx_center_pipeline_clk.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_TCU_THROTTLE_AHB_CLK] = &gcc_gpu_tcu_throttle_ahb_clk.clkr,
+ [GCC_GPU_TCU_THROTTLE_CLK] = &gcc_gpu_tcu_throttle_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_AUX_CLK] = &gcc_pcie_0_phy_aux_clk.clkr,
+ [GCC_PCIE_0_PHY_AUX_CLK_SRC] = &gcc_pcie_0_phy_aux_clk_src.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_DIV_CLK_SRC] = &gcc_pcie_0_pipe_div_clk_src.clkr,
+ [GCC_PCIE_0_PIPEDIV2_CLK] = &gcc_pcie_0_pipediv2_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_AUX_CLK] = &gcc_pcie_1_phy_aux_clk.clkr,
+ [GCC_PCIE_1_PHY_AUX_CLK_SRC] = &gcc_pcie_1_phy_aux_clk_src.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_DIV_CLK_SRC] = &gcc_pcie_1_pipe_div_clk_src.clkr,
+ [GCC_PCIE_1_PIPEDIV2_CLK] = &gcc_pcie_1_pipediv2_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_CLKREF_EN] = &gcc_pcie_clkref_en.clkr,
+ [GCC_PCIE_THROTTLE_CFG_CLK] = &gcc_pcie_throttle_cfg_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_DISP_ROT_AHB_CLK] = &gcc_qmip_disp_rot_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCPU_AHB_CLK] = &gcc_qmip_video_vcpu_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP3_CORE_2X_CLK] = &gcc_qupv3_wrap3_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP3_CORE_CLK] = &gcc_qupv3_wrap3_core_clk.clkr,
+ [GCC_QUPV3_WRAP3_QSPI_CLK] = &gcc_qupv3_wrap3_qspi_clk.clkr,
+ [GCC_QUPV3_WRAP3_S0_CLK] = &gcc_qupv3_wrap3_s0_clk.clkr,
+ [GCC_QUPV3_WRAP3_S0_CLK_SRC] = &gcc_qupv3_wrap3_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP3_S0_DIV_CLK_SRC] = &gcc_qupv3_wrap3_s0_div_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_3_M_AHB_CLK] = &gcc_qupv3_wrap_3_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_3_S_AHB_CLK] = &gcc_qupv3_wrap_3_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SGMI_CLKREF_EN] = &gcc_sgmi_clkref_en.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB20_MASTER_CLK] = &gcc_usb20_master_clk.clkr,
+ [GCC_USB20_MASTER_CLK_SRC] = &gcc_usb20_master_clk_src.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK] = &gcc_usb20_mock_utmi_clk.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK_SRC] = &gcc_usb20_mock_utmi_clk_src.clkr,
+ [GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb20_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB20_SLEEP_CLK] = &gcc_usb20_sleep_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB_CLKREF_EN] = &gcc_usb_clkref_en.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+};
+
+static struct gdsc *gcc_qcs8300_gdscs[] = {
+ [GCC_EMAC0_GDSC] = &gcc_emac0_gdsc,
+ [GCC_PCIE_0_GDSC] = &gcc_pcie_0_gdsc,
+ [GCC_PCIE_1_GDSC] = &gcc_pcie_1_gdsc,
+ [GCC_UFS_PHY_GDSC] = &gcc_ufs_phy_gdsc,
+ [GCC_USB20_PRIM_GDSC] = &gcc_usb20_prim_gdsc,
+ [GCC_USB30_PRIM_GDSC] = &gcc_usb30_prim_gdsc,
+};
+
+static const struct qcom_reset_map gcc_qcs8300_resets[] = {
+ [GCC_EMAC0_BCR] = { 0xb6000 },
+ [GCC_PCIE_0_BCR] = { 0xa9000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0xbf000 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0xbf008 },
+ [GCC_PCIE_0_PHY_BCR] = { 0xa9144 },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0xbf00c },
+ [GCC_PCIE_1_BCR] = { 0x77000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0xae084 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0xae090 },
+ [GCC_PCIE_1_PHY_BCR] = { 0xae08c },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0xae094 },
+ [GCC_SDCC1_BCR] = { 0x20000 },
+ [GCC_UFS_PHY_BCR] = { 0x83000 },
+ [GCC_USB20_PRIM_BCR] = { 0x1c000 },
+ [GCC_USB2_PHY_PRIM_BCR] = { 0x5c01c },
+ [GCC_USB2_PHY_SEC_BCR] = { 0x5c020 },
+ [GCC_USB30_PRIM_BCR] = { 0x1b000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x5c008 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x5c000 },
+ [GCC_USB3_PHY_TERT_BCR] = { 0x5c024 },
+ [GCC_USB3_UNIPHY_MP0_BCR] = { 0x5c00c },
+ [GCC_USB3_UNIPHY_MP1_BCR] = { 0x5c010 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x5c004 },
+ [GCC_USB3UNIPHY_PHY_MP0_BCR] = { 0x5c014 },
+ [GCC_USB3UNIPHY_PHY_MP1_BCR] = { 0x5c018 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x76000 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x34014, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0x3401c, 2 },
+ [GCC_VIDEO_BCR] = { 0x34000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap3_s0_clk_src),
+};
+
+static const struct regmap_config gcc_qcs8300_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x472cffc,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_qcs8300_desc = {
+ .config = &gcc_qcs8300_regmap_config,
+ .clks = gcc_qcs8300_clocks,
+ .num_clks = ARRAY_SIZE(gcc_qcs8300_clocks),
+ .resets = gcc_qcs8300_resets,
+ .num_resets = ARRAY_SIZE(gcc_qcs8300_resets),
+ .gdscs = gcc_qcs8300_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_qcs8300_gdscs),
+};
+
+static const struct of_device_id gcc_qcs8300_match_table[] = {
+ { .compatible = "qcom,qcs8300-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_qcs8300_match_table);
+
+static int gcc_qcs8300_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_qcs8300_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x32004); /* GCC_CAMERA_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x32020); /* GCC_CAMERA_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x33004); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x33018); /* GCC_DISP_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x7d004); /* GCC_GPU_CFG_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x34004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x34024); /* GCC_VIDEO_XO_CLK */
+
+ /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+
+ return qcom_cc_really_probe(&pdev->dev, &gcc_qcs8300_desc, regmap);
+}
+
+static struct platform_driver gcc_qcs8300_driver = {
+ .probe = gcc_qcs8300_probe,
+ .driver = {
+ .name = "gcc-qcs8300",
+ .of_match_table = gcc_qcs8300_match_table,
+ },
+};
+
+static int __init gcc_qcs8300_init(void)
+{
+ return platform_driver_register(&gcc_qcs8300_driver);
+}
+subsys_initcall(gcc_qcs8300_init);
+
+static void __exit gcc_qcs8300_exit(void)
+{
+ platform_driver_unregister(&gcc_qcs8300_driver);
+}
+module_exit(gcc_qcs8300_exit);
+
+MODULE_DESCRIPTION("QTI GCC QCS8300 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-sar2130p.c b/drivers/clk/qcom/gcc-sar2130p.c
new file mode 100644
index 000000000000..475e2cda3618
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sar2130p.c
@@ -0,0 +1,2366 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021-2023, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sar2130p-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+
+ DT_PCIE_0_PIPE,
+ DT_PCIE_1_PIPE,
+
+ DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL1_OUT_EVEN,
+ P_GCC_GPLL1_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL5_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL9_OUT_EVEN,
+ P_PCIE_0_PIPE_CLK,
+ P_PCIE_1_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll5 = {
+ .offset = 0x5000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll9_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll9_out_even = {
+ .offset = 0x9000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll9_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll9_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll9.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL5_OUT_MAIN, 3 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll5.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_EVEN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9_out_even.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL1_OUT_EVEN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE },
+ { .index = DT_BI_TCXO },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0x7b070,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE_0_PIPE,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = {
+ .reg = 0x9d06c,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_PCIE_1_PIPE,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x4906c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_8,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ddrss_spad_clk_src[] = {
+ F(300000000, P_GCC_GPLL0_OUT_EVEN, 1, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ F(426400000, P_GCC_GPLL1_OUT_MAIN, 2.5, 0, 0),
+ F(500000000, P_GCC_GPLL7_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ddrss_spad_clk_src = {
+ .cmd_rcgr = 0x70004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ddrss_spad_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_spad_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x74004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x75004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x76004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x7b074,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x7b058,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x9d070,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x9d054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x43010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x28018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s1_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x28150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x28288,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x283c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x284f8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x28630,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x2e018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x2e150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x2e288,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x2e3c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x2e4f8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x2e630,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(20000000, P_GCC_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(192000000, P_GCC_GPLL9_OUT_EVEN, 2, 0, 0),
+ F(384000000, P_GCC_GPLL9_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x26018,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_shared_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x2603c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_shared_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x4902c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x49044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x49070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x4905c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_1_axi_clk = {
+ .halt_reg = 0x7b094,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x7b094,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x4908c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4908c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4908c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x48004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x48004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = {
+ .halt_reg = 0x20034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x20034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x49088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x49088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x49088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x81154,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x81154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x81154,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_pcie_sf_clk = {
+ .halt_reg = 0x9d098,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9d098,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_pcie_sf_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_spad_clk = {
+ .halt_reg = 0x70000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x70000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x70000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_spad_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ddrss_spad_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x37008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x37008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x37008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x74000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x74000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x75000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x75000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x76000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x76000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x9b010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x9b018,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x9b018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_iris_ss_hf_axi1_clk = {
+ .halt_reg = 0x42030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x42030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_iris_ss_hf_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_iris_ss_spd_axi1_clk = {
+ .halt_reg = 0x70020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x70020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x70020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_iris_ss_spd_axi1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ddrss_spad_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x7b03c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x7b038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7b038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x7b02c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x7b02c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0x7b054,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x7b048,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x7b020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7b020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x7b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x9d038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x9d034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x9d028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9d028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_rchng_clk = {
+ .halt_reg = 0x9d050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x9d044,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x9d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x9d018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x4300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x43004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x43004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x43004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x43008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x43008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_ahb_clk = {
+ .halt_reg = 0x9b008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_gpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_ahb_clk = {
+ .halt_reg = 0x7b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7b018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = {
+ .halt_reg = 0x42014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x42014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cv_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x42008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x42008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_lsr_ahb_clk = {
+ .halt_reg = 0x4204c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4204c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4204c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_lsr_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = {
+ .halt_reg = 0x42010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x42010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_v_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x4200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x33034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x33024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x2800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x28144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x2827c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x283b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x284ec,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x28624,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x3317c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x3316c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x2e00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x2e144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x2e27c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x2e3b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x2e4ec,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x2e624,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x28004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x28004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x28008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x28008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x2e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2e004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x2e008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2e008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x26004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x26030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x49018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x49028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x49024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x49060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x49064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x49068,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x49068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x49068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x42018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x42018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x42024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x42024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc hlos1_vote_mm_snoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x8d204,
+ .pd = {
+ .name = "hlos1_vote_mm_snoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mm_snoc_mmu_tbu_sf0_gdsc = {
+ .gdscr = 0x8d054,
+ .pd = {
+ .name = "hlos1_vote_mm_snoc_mmu_tbu_sf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_turing_mmu_tbu0_gdsc = {
+ .gdscr = 0x8d05c,
+ .pd = {
+ .name = "hlos1_vote_turing_mmu_tbu0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_turing_mmu_tbu1_gdsc = {
+ .gdscr = 0x8d060,
+ .pd = {
+ .name = "hlos1_vote_turing_mmu_tbu1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x7b004,
+ .collapse_ctrl = 0x62200,
+ .collapse_mask = BIT(0),
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc pcie_0_phy_gdsc = {
+ .gdscr = 0x7c000,
+ .collapse_ctrl = 0x62200,
+ .collapse_mask = BIT(3),
+ .pd = {
+ .name = "pcie_0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x9d004,
+ .collapse_ctrl = 0x62200,
+ .collapse_mask = BIT(1),
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc pcie_1_phy_gdsc = {
+ .gdscr = 0x9e000,
+ .collapse_ctrl = 0x62200,
+ .collapse_mask = BIT(4),
+ .pd = {
+ .name = "pcie_1_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0x49004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc usb3_phy_gdsc = {
+ .gdscr = 0x60018,
+ .pd = {
+ .name = "usb3_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gcc_sar2130p_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_1_AXI_CLK] = &gcc_aggre_noc_pcie_1_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_CLK] = &gcc_ddrss_pcie_sf_clk.clkr,
+ [GCC_DDRSS_SPAD_CLK] = &gcc_ddrss_spad_clk.clkr,
+ [GCC_DDRSS_SPAD_CLK_SRC] = &gcc_ddrss_spad_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL1] = &gcc_gpll1.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL5] = &gcc_gpll5.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPLL9_OUT_EVEN] = &gcc_gpll9_out_even.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_IRIS_SS_HF_AXI1_CLK] = &gcc_iris_ss_hf_axi1_clk.clkr,
+ [GCC_IRIS_SS_SPD_AXI1_CLK] = &gcc_iris_ss_spd_axi1_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_LSR_AHB_CLK] = &gcc_qmip_video_lsr_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_sar2130p_resets[] = {
+ [GCC_DISPLAY_BCR] = { 0x37000 },
+ [GCC_GPU_BCR] = { 0x9b000 },
+ [GCC_PCIE_0_BCR] = { 0x7b000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x7c014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x7c020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x7c01c },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x7c028 },
+ [GCC_PCIE_1_BCR] = { 0x9d000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x9e014 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x9e020 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x9e01c },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x9e024 },
+ [GCC_PCIE_PHY_BCR] = { 0x7f000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x7f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x7f010 },
+ [GCC_PDM_BCR] = { 0x43000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x28000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x2e000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x22000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x22004 },
+ [GCC_SDCC1_BCR] = { 0x26000 },
+ [GCC_USB30_PRIM_BCR] = { 0x49000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x60008 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x60014 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x60000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x6000c },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x60004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x60010 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { .reg = 0x42018, .bit = 2, .udelay = 1000 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { .reg = 0x42024, .bit = 2, .udelay = 1000 },
+ [GCC_VIDEO_BCR] = { 0x42000 },
+ [GCC_IRIS_SS_HF_AXI_CLK_ARES] = { .reg = 0x42030, .bit = 2 },
+ [GCC_IRIS_SS_SPD_AXI_CLK_ARES] = { .reg = 0x70020, .bit = 2 },
+ [GCC_DDRSS_SPAD_CLK_ARES] = { .reg = 0x70000, .bit = 2 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+};
+
+static struct gdsc *gcc_sar2130p_gdscs[] = {
+ [HLOS1_VOTE_MM_SNOC_MMU_TBU_HF0_GDSC] = &hlos1_vote_mm_snoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MM_SNOC_MMU_TBU_SF0_GDSC] = &hlos1_vote_mm_snoc_mmu_tbu_sf0_gdsc,
+ [HLOS1_VOTE_TURING_MMU_TBU0_GDSC] = &hlos1_vote_turing_mmu_tbu0_gdsc,
+ [HLOS1_VOTE_TURING_MMU_TBU1_GDSC] = &hlos1_vote_turing_mmu_tbu1_gdsc,
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_0_PHY_GDSC] = &pcie_0_phy_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [PCIE_1_PHY_GDSC] = &pcie_1_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [USB3_PHY_GDSC] = &usb3_phy_gdsc,
+};
+
+static const struct regmap_config gcc_sar2130p_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f1030,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sar2130p_desc = {
+ .config = &gcc_sar2130p_regmap_config,
+ .clks = gcc_sar2130p_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sar2130p_clocks),
+ .resets = gcc_sar2130p_resets,
+ .num_resets = ARRAY_SIZE(gcc_sar2130p_resets),
+ .gdscs = gcc_sar2130p_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sar2130p_gdscs),
+};
+
+static const struct of_device_id gcc_sar2130p_match_table[] = {
+ { .compatible = "qcom,sar2130p-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sar2130p_match_table);
+
+static int gcc_sar2130p_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sar2130p_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x37004); /* GCC_DISP_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x42004); /* GCC_VIDEO_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x42028); /* GCC_VIDEO_XO_CLK */
+ qcom_branch_set_clk_en(regmap, 0x9b004); /* GCC_GPU_CFG_AHB_CLK */
+
+ /* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */
+ regmap_write(regmap, 0x62204, 0x0);
+
+ return qcom_cc_really_probe(&pdev->dev, &gcc_sar2130p_desc, regmap);
+}
+
+static struct platform_driver gcc_sar2130p_driver = {
+ .probe = gcc_sar2130p_probe,
+ .driver = {
+ .name = "gcc-sar2130p",
+ .of_match_table = gcc_sar2130p_match_table,
+ },
+};
+
+static int __init gcc_sar2130p_init(void)
+{
+ return platform_driver_register(&gcc_sar2130p_driver);
+}
+subsys_initcall(gcc_sar2130p_init);
+
+static void __exit gcc_sar2130p_exit(void)
+{
+ platform_driver_unregister(&gcc_sar2130p_driver);
+}
+module_exit(gcc_sar2130p_exit);
+
+MODULE_DESCRIPTION("QTI GCC SAR2130P Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index dc3aa7014c3e..6d0f9cede5cf 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -284,11 +284,6 @@ static struct clk_rcg2 gcc_sdm670_cpuss_rbcpr_clk_src = {
};
static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
- F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
- F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
- F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
- F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
{ }
};
@@ -302,7 +297,7 @@ static struct clk_rcg2 gcc_gp1_clk_src = {
.name = "gcc_gp1_clk_src",
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_gp_ops,
},
};
@@ -316,7 +311,7 @@ static struct clk_rcg2 gcc_gp2_clk_src = {
.name = "gcc_gp2_clk_src",
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_gp_ops,
},
};
@@ -330,7 +325,7 @@ static struct clk_rcg2 gcc_gp3_clk_src = {
.name = "gcc_gp3_clk_src",
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_gp_ops,
},
};
@@ -454,7 +449,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
.name = "gcc_qupv3_wrap0_s0_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
@@ -470,7 +465,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
.name = "gcc_qupv3_wrap0_s1_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
@@ -486,7 +481,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
.name = "gcc_qupv3_wrap0_s2_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
@@ -502,7 +497,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
.name = "gcc_qupv3_wrap0_s3_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
@@ -518,7 +513,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
.name = "gcc_qupv3_wrap0_s4_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
@@ -534,7 +529,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
.name = "gcc_qupv3_wrap0_s5_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
@@ -550,7 +545,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
.name = "gcc_qupv3_wrap0_s6_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
@@ -566,7 +561,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
.name = "gcc_qupv3_wrap0_s7_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
@@ -582,7 +577,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
.name = "gcc_qupv3_wrap1_s0_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@@ -598,7 +593,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
.name = "gcc_qupv3_wrap1_s1_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@@ -614,7 +609,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
.name = "gcc_qupv3_wrap1_s2_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
@@ -630,7 +625,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
.name = "gcc_qupv3_wrap1_s3_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@@ -646,7 +641,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
.name = "gcc_qupv3_wrap1_s4_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@@ -662,7 +657,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
.name = "gcc_qupv3_wrap1_s5_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@@ -678,7 +673,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
.name = "gcc_qupv3_wrap1_s6_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
@@ -694,7 +689,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
.name = "gcc_qupv3_wrap1_s7_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
index a811fad2aa27..74346dc02606 100644
--- a/drivers/clk/qcom/gcc-sm6350.c
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -182,6 +182,14 @@ static const struct clk_parent_data gcc_parent_data_2_ao[] = {
{ .hw = &gpll0_out_odd.clkr.hw },
};
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+};
+
static const struct parent_map gcc_parent_map_4[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
@@ -701,13 +709,12 @@ static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
.cmd_rcgr = 0x3a0b0,
.mnd_width = 0,
.hid_width = 5,
+ .parent_map = gcc_parent_map_3,
.freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_phy_phy_aux_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .fw_name = "bi_tcxo",
- },
- .num_parents = 1,
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
.ops = &clk_rcg2_ops,
},
};
@@ -764,13 +771,12 @@ static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
.cmd_rcgr = 0x1a034,
.mnd_width = 0,
.hid_width = 5,
+ .parent_map = gcc_parent_map_3,
.freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_prim_mock_utmi_clk_src",
- .parent_data = &(const struct clk_parent_data){
- .fw_name = "bi_tcxo",
- },
- .num_parents = 1,
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
.ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c
index c445c271678a..65d7d52bce03 100644
--- a/drivers/clk/qcom/gcc-sm8450.c
+++ b/drivers/clk/qcom/gcc-sm8450.c
@@ -26,6 +26,8 @@ enum {
P_BI_TCXO,
P_GCC_GPLL0_OUT_EVEN,
P_GCC_GPLL0_OUT_MAIN,
+ P_SM8475_GCC_GPLL2_OUT_EVEN,
+ P_SM8475_GCC_GPLL3_OUT_EVEN,
P_GCC_GPLL4_OUT_MAIN,
P_GCC_GPLL9_OUT_MAIN,
P_PCIE_1_PHY_AUX_CLK,
@@ -36,6 +38,15 @@ enum {
P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
};
+static struct clk_init_data sm8475_gcc_gpll0_init = {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+};
+
static struct clk_alpha_pll gcc_gpll0 = {
.offset = 0x0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -53,6 +64,15 @@ static struct clk_alpha_pll gcc_gpll0 = {
},
};
+static struct clk_init_data sm8475_gcc_gpll0_out_even_init = {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ole_ops,
+};
+
static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
{ 0x1, 2 },
{ }
@@ -75,6 +95,49 @@ static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
},
};
+static struct clk_alpha_pll sm8475_gcc_gpll2 = {
+ .offset = 0x2000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll2",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll sm8475_gcc_gpll3 = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll3",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+ },
+ },
+};
+
+static struct clk_init_data sm8475_gcc_gpll4_init = {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+};
+
static struct clk_alpha_pll gcc_gpll4 = {
.offset = 0x4000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -92,6 +155,15 @@ static struct clk_alpha_pll gcc_gpll4 = {
},
};
+static struct clk_init_data sm8475_gcc_gpll9_init = {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ole_ops,
+};
+
static struct clk_alpha_pll gcc_gpll9 = {
.offset = 0x9000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
@@ -153,6 +225,22 @@ static const struct clk_parent_data gcc_parent_data_3[] = {
{ .fw_name = "bi_tcxo" },
};
+static const struct parent_map sm8475_gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SM8475_GCC_GPLL2_OUT_EVEN, 2 },
+ { P_SM8475_GCC_GPLL3_OUT_EVEN, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data sm8475_gcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &sm8475_gcc_gpll2.clkr.hw },
+ { .hw = &sm8475_gcc_gpll3.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
static const struct parent_map gcc_parent_map_5[] = {
{ P_PCIE_1_PHY_AUX_CLK, 0 },
{ P_BI_TCXO, 2 },
@@ -915,6 +1003,16 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
.clkr.hw.init = &gcc_qupv3_wrap2_s6_clk_src_init,
};
+static const struct freq_tbl sm8475_ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(37000000, P_GCC_GPLL9_OUT_MAIN, 16, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(148000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
F(400000, P_BI_TCXO, 12, 1, 4),
F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
@@ -963,6 +1061,25 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
},
};
+static const struct freq_tbl sm8475_ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_GCC_GPLL0_OUT_MAIN, 1, 0, 0),
+ F(806400000, P_SM8475_GCC_GPLL2_OUT_EVEN, 1, 0, 0),
+ F(850000000, P_SM8475_GCC_GPLL2_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_init_data sm8475_gcc_ufs_phy_axi_clk_src_init = {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = sm8475_gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(sm8475_gcc_parent_map_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
@@ -987,6 +1104,24 @@ static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
},
};
+static const struct freq_tbl sm8475_ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_GCC_GPLL0_OUT_MAIN, 1, 0, 0),
+ F(806400000, P_SM8475_GCC_GPLL2_OUT_EVEN, 1, 0, 0),
+ F(850000000, P_SM8475_GCC_GPLL2_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_init_data sm8475_gcc_ufs_phy_ice_core_clk_src_init = {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = sm8475_gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(sm8475_gcc_parent_map_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
@@ -1032,6 +1167,14 @@ static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
},
};
+static struct clk_init_data sm8475_gcc_ufs_phy_unipro_core_clk_src_init = {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = sm8475_gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(sm8475_gcc_parent_map_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
.cmd_rcgr = 0x8708c,
.mnd_width = 0,
@@ -3166,6 +3309,8 @@ static struct clk_regmap *gcc_sm8450_clocks[] = {
[GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
[GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
[GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+ [SM8475_GCC_GPLL2] = NULL,
+ [SM8475_GCC_GPLL3] = NULL,
};
static const struct qcom_reset_map gcc_sm8450_resets[] = {
@@ -3259,6 +3404,7 @@ static const struct qcom_cc_desc gcc_sm8450_desc = {
static const struct of_device_id gcc_sm8450_match_table[] = {
{ .compatible = "qcom,gcc-sm8450" },
+ { .compatible = "qcom,sm8475-gcc" },
{ }
};
MODULE_DEVICE_TABLE(of, gcc_sm8450_match_table);
@@ -3277,6 +3423,39 @@ static int gcc_sm8450_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-gcc")) {
+ /* Update GCC PLL0 */
+ gcc_gpll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ gcc_gpll0.clkr.hw.init = &sm8475_gcc_gpll0_init;
+ gcc_gpll0_out_even.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ gcc_gpll0_out_even.clkr.hw.init = &sm8475_gcc_gpll0_out_even_init;
+
+ /* Update GCC PLL4 */
+ gcc_gpll4.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ gcc_gpll4.clkr.hw.init = &sm8475_gcc_gpll4_init;
+
+ /* Update GCC PLL9 */
+ gcc_gpll9.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+ gcc_gpll9.clkr.hw.init = &sm8475_gcc_gpll9_init;
+
+ gcc_sdcc2_apps_clk_src.freq_tbl = sm8475_ftbl_gcc_sdcc2_apps_clk_src;
+
+ gcc_ufs_phy_axi_clk_src.parent_map = sm8475_gcc_parent_map_3;
+ gcc_ufs_phy_axi_clk_src.freq_tbl = sm8475_ftbl_gcc_ufs_phy_axi_clk_src;
+ gcc_ufs_phy_axi_clk_src.clkr.hw.init = &sm8475_gcc_ufs_phy_axi_clk_src_init;
+
+ gcc_ufs_phy_ice_core_clk_src.parent_map = sm8475_gcc_parent_map_3;
+ gcc_ufs_phy_ice_core_clk_src.freq_tbl = sm8475_ftbl_gcc_ufs_phy_ice_core_clk_src;
+ gcc_ufs_phy_ice_core_clk_src.clkr.hw.init = &sm8475_gcc_ufs_phy_ice_core_clk_src_init;
+
+ gcc_ufs_phy_unipro_core_clk_src.parent_map = sm8475_gcc_parent_map_3;
+ gcc_ufs_phy_unipro_core_clk_src.freq_tbl = sm8475_ftbl_gcc_ufs_phy_ice_core_clk_src;
+ gcc_ufs_phy_unipro_core_clk_src.clkr.hw.init = &sm8475_gcc_ufs_phy_unipro_core_clk_src_init;
+
+ gcc_sm8450_desc.clks[SM8475_GCC_GPLL2] = &sm8475_gcc_gpll2.clkr;
+ gcc_sm8450_desc.clks[SM8475_GCC_GPLL3] = &sm8475_gcc_gpll3.clkr;
+ }
+
/* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
regmap_update_bits(regmap, gcc_ufs_phy_ice_core_clk.halt_reg, BIT(14), BIT(14));
@@ -3312,5 +3491,5 @@ static void __exit gcc_sm8450_exit(void)
}
module_exit(gcc_sm8450_exit);
-MODULE_DESCRIPTION("QTI GCC SM8450 Driver");
+MODULE_DESCRIPTION("QTI GCC SM8450 / SM8475 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-sm8550.c b/drivers/clk/qcom/gcc-sm8550.c
index 5abaeddd6afc..862a9bf73bcb 100644
--- a/drivers/clk/qcom/gcc-sm8550.c
+++ b/drivers/clk/qcom/gcc-sm8550.c
@@ -3003,7 +3003,7 @@ static struct gdsc pcie_0_gdsc = {
.pd = {
.name = "pcie_0_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3014,7 +3014,7 @@ static struct gdsc pcie_0_phy_gdsc = {
.pd = {
.name = "pcie_0_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3025,7 +3025,7 @@ static struct gdsc pcie_1_gdsc = {
.pd = {
.name = "pcie_1_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3036,7 +3036,7 @@ static struct gdsc pcie_1_phy_gdsc = {
.pd = {
.name = "pcie_1_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
diff --git a/drivers/clk/qcom/gcc-sm8650.c b/drivers/clk/qcom/gcc-sm8650.c
index fd9d6544bdd5..9dd5c48f33be 100644
--- a/drivers/clk/qcom/gcc-sm8650.c
+++ b/drivers/clk/qcom/gcc-sm8650.c
@@ -3437,7 +3437,7 @@ static struct gdsc pcie_0_gdsc = {
.pd = {
.name = "pcie_0_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
};
@@ -3448,7 +3448,7 @@ static struct gdsc pcie_0_phy_gdsc = {
.pd = {
.name = "pcie_0_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
};
@@ -3459,7 +3459,7 @@ static struct gdsc pcie_1_gdsc = {
.pd = {
.name = "pcie_1_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
};
@@ -3470,7 +3470,7 @@ static struct gdsc pcie_1_phy_gdsc = {
.pd = {
.name = "pcie_1_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
};
diff --git a/drivers/clk/qcom/gcc-sm8750.c b/drivers/clk/qcom/gcc-sm8750.c
new file mode 100644
index 000000000000..b36d70976095
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sm8750.c
@@ -0,0 +1,3274 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8750-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+ DT_PCIE_0_PIPE_CLK,
+ DT_UFS_PHY_RX_SYMBOL_0_CLK,
+ DT_UFS_PHY_RX_SYMBOL_1_CLK,
+ DT_UFS_PHY_TX_SYMBOL_0_CLK,
+ DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL1_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_PCIE_0_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_taycan_elu_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .enable_reg = 0x52020,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_elu_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_UFS_PHY_TX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0x6b080,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_0_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x77068,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_9,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x770ec,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_10,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x77058,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_11,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x39070,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_12,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x6b084,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x6b068,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s0_clk_src = {
+ .cmd_rcgr = 0x17008,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s1_clk_src = {
+ .cmd_rcgr = 0x17024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s2_clk_src = {
+ .cmd_rcgr = 0x17040,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s3_clk_src = {
+ .cmd_rcgr = 0x1705c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s4_clk_src = {
+ .cmd_rcgr = 0x17078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s5_clk_src = {
+ .cmd_rcgr = 0x17094,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s6_clk_src = {
+ .cmd_rcgr = 0x170b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s7_clk_src = {
+ .cmd_rcgr = 0x170cc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s8_clk_src = {
+ .cmd_rcgr = 0x170e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s8_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_qupv3_i2c_s9_clk_src = {
+ .cmd_rcgr = 0x17104,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s9_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+/* Check this frequency table.*/
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_qspi_ref_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(250000000, P_GCC_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_ref_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_ref_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_ref_clk_src = {
+ .cmd_rcgr = 0x188c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_qspi_ref_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_ref_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_s3_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x182a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x183dc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18518,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x18654,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0x18790,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap2_ibi_ctrl_0_clk_src[] = {
+ F(37500000, P_GCC_GPLL0_OUT_EVEN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_ibi_ctrl_0_clk_src = {
+ .cmd_rcgr = 0x1e9f4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_qupv3_wrap2_ibi_ctrl_0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_ibi_ctrl_0_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0x1e014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0x1e150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
+ .cmd_rcgr = 0x1e28c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
+ .cmd_rcgr = 0x1e3c8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0x1e504,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0x1e640,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s3_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap2_s6_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GCC_GPLL0_OUT_EVEN, 1, 64, 375),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(128000000, P_GCC_GPLL0_OUT_MAIN, 1, 16, 75),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s6_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
+ .cmd_rcgr = 0x1e77c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_qupv3_wrap2_s6_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
+ .cmd_rcgr = 0x1e8b8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1401c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1601c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77034,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x7708c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770c0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x770a4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x39030,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x39048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x39074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s2_clk_src = {
+ .reg = 0x1828c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x39060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_axi_clk = {
+ .halt_reg = 0x10068,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x10068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x770f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770f0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x39090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x39090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x39090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x26014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x26024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = {
+ .halt_reg = 0x10050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10050,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x3908c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3908c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3908c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cnoc_pcie_sf_axi_clk = {
+ .halt_reg = 0x10058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x10058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cnoc_pcie_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x71150,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x71150,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71150,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_pcie_sf_qtb_clk = {
+ .halt_reg = 0x1007c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x1007c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ddrss_pcie_sf_qtb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_axi0_clk = {
+ .halt_reg = 0x9f008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9f008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_axi0c_clk = {
+ .halt_reg = 0x9f018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9f018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9f018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_axi0c_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gemnoc_gfx_clk = {
+ .halt_reg = 0x71010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gemnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x6b030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0x6b064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b054,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x6b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_cmd_ahb_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_cmd_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_ahb_clk = {
+ .halt_reg = 0x71008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_gpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_ahb_clk = {
+ .halt_reg = 0x6b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6b018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = {
+ .halt_reg = 0x32014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cv_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x32008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_v_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_core_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s0_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s1_clk = {
+ .halt_reg = 0x17020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s2_clk = {
+ .halt_reg = 0x1703c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s3_clk = {
+ .halt_reg = 0x17058,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s4_clk = {
+ .halt_reg = 0x17074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s5_clk = {
+ .halt_reg = 0x17090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s6_clk = {
+ .halt_reg = 0x170ac,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s7_clk = {
+ .halt_reg = 0x170c8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s8_clk = {
+ .halt_reg = 0x170e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s8_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s8_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s9_clk = {
+ .halt_reg = 0x17100,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s9_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_i2c_s9_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_i2c_s_ahb_clk = {
+ .halt_reg = 0x23000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_i2c_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x2315c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x23148,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_ref_clk = {
+ .halt_reg = 0x188bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_ref_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x1827c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x18290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x183cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18508,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x18644,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0x18780,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = {
+ .halt_reg = 0x232b4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_clk = {
+ .halt_reg = 0x232a0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_ibi_ctrl_2_clk = {
+ .halt_reg = 0x1e9ec,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9ec,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_ibi_ctrl_2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_ibi_ctrl_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_ibi_ctrl_3_clk = {
+ .halt_reg = 0x1e9f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9f0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_ibi_ctrl_3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_ibi_ctrl_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0x1e140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0x1e27c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0x1e3b8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0x1e4f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0x1e630,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s6_clk = {
+ .halt_reg = 0x1e76c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s7_clk = {
+ .halt_reg = 0x1e8a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x23140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23140,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x23144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23144,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_ibi_2_ahb_clk = {
+ .halt_reg = 0x1e9e4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9e4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_ibi_2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_ibi_3_ahb_clk = {
+ .halt_reg = 0x1e9e8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1e9e8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_ibi_3_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0x23298,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23298,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0x2329c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2329c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x7707c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7707c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7707c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x770bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770bc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x77030,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770d8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x7702c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x7702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x7706c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7706c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7706c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x39018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x3902c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3902c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x39028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x39064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x39068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x3906c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x3906c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3906c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x32018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x32028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gcc_pcie_0_gdsc = {
+ .gdscr = 0x6b004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(0),
+ .pd = {
+ .name = "gcc_pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_0_phy_gdsc = {
+ .gdscr = 0x6c000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .collapse_ctrl = 0x5214c,
+ .collapse_mask = BIT(2),
+ .pd = {
+ .name = "gcc_pcie_0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_ufs_mem_phy_gdsc = {
+ .gdscr = 0x9e000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_ufs_mem_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_prim_gdsc = {
+ .gdscr = 0x39004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb3_phy_gdsc = {
+ .gdscr = 0x50018,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb3_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gcc_sm8750_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_AXI_CLK] = &gcc_aggre_noc_pcie_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CNOC_PCIE_SF_AXI_CLK] = &gcc_cnoc_pcie_sf_axi_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_QTB_CLK] = &gcc_ddrss_pcie_sf_qtb_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_EVA_AXI0_CLK] = &gcc_eva_axi0_clk.clkr,
+ [GCC_EVA_AXI0C_CLK] = &gcc_eva_axi0c_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL1] = &gcc_gpll1.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GEMNOC_GFX_CLK] = &gcc_gpu_gemnoc_gfx_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_CAMERA_CMD_AHB_CLK] = &gcc_qmip_camera_cmd_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_I2C_CORE_CLK] = &gcc_qupv3_i2c_core_clk.clkr,
+ [GCC_QUPV3_I2C_S0_CLK] = &gcc_qupv3_i2c_s0_clk.clkr,
+ [GCC_QUPV3_I2C_S0_CLK_SRC] = &gcc_qupv3_i2c_s0_clk_src.clkr,
+ [GCC_QUPV3_I2C_S1_CLK] = &gcc_qupv3_i2c_s1_clk.clkr,
+ [GCC_QUPV3_I2C_S1_CLK_SRC] = &gcc_qupv3_i2c_s1_clk_src.clkr,
+ [GCC_QUPV3_I2C_S2_CLK] = &gcc_qupv3_i2c_s2_clk.clkr,
+ [GCC_QUPV3_I2C_S2_CLK_SRC] = &gcc_qupv3_i2c_s2_clk_src.clkr,
+ [GCC_QUPV3_I2C_S3_CLK] = &gcc_qupv3_i2c_s3_clk.clkr,
+ [GCC_QUPV3_I2C_S3_CLK_SRC] = &gcc_qupv3_i2c_s3_clk_src.clkr,
+ [GCC_QUPV3_I2C_S4_CLK] = &gcc_qupv3_i2c_s4_clk.clkr,
+ [GCC_QUPV3_I2C_S4_CLK_SRC] = &gcc_qupv3_i2c_s4_clk_src.clkr,
+ [GCC_QUPV3_I2C_S5_CLK] = &gcc_qupv3_i2c_s5_clk.clkr,
+ [GCC_QUPV3_I2C_S5_CLK_SRC] = &gcc_qupv3_i2c_s5_clk_src.clkr,
+ [GCC_QUPV3_I2C_S6_CLK] = &gcc_qupv3_i2c_s6_clk.clkr,
+ [GCC_QUPV3_I2C_S6_CLK_SRC] = &gcc_qupv3_i2c_s6_clk_src.clkr,
+ [GCC_QUPV3_I2C_S7_CLK] = &gcc_qupv3_i2c_s7_clk.clkr,
+ [GCC_QUPV3_I2C_S7_CLK_SRC] = &gcc_qupv3_i2c_s7_clk_src.clkr,
+ [GCC_QUPV3_I2C_S8_CLK] = &gcc_qupv3_i2c_s8_clk.clkr,
+ [GCC_QUPV3_I2C_S8_CLK_SRC] = &gcc_qupv3_i2c_s8_clk_src.clkr,
+ [GCC_QUPV3_I2C_S9_CLK] = &gcc_qupv3_i2c_s9_clk.clkr,
+ [GCC_QUPV3_I2C_S9_CLK_SRC] = &gcc_qupv3_i2c_s9_clk_src.clkr,
+ [GCC_QUPV3_I2C_S_AHB_CLK] = &gcc_qupv3_i2c_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_REF_CLK] = &gcc_qupv3_wrap1_qspi_ref_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC] = &gcc_qupv3_wrap1_qspi_ref_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr,
+ [GCC_QUPV3_WRAP2_IBI_CTRL_0_CLK_SRC] = &gcc_qupv3_wrap2_ibi_ctrl_0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_IBI_CTRL_2_CLK] = &gcc_qupv3_wrap2_ibi_ctrl_2_clk.clkr,
+ [GCC_QUPV3_WRAP2_IBI_CTRL_3_CLK] = &gcc_qupv3_wrap2_ibi_ctrl_3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK] = &gcc_qupv3_wrap2_s6_clk.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK_SRC] = &gcc_qupv3_wrap2_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK] = &gcc_qupv3_wrap2_s7_clk.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK_SRC] = &gcc_qupv3_wrap2_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_IBI_2_AHB_CLK] = &gcc_qupv3_wrap_2_ibi_2_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_IBI_3_AHB_CLK] = &gcc_qupv3_wrap_2_ibi_3_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+};
+
+static struct gdsc *gcc_sm8750_gdscs[] = {
+ [GCC_PCIE_0_GDSC] = &gcc_pcie_0_gdsc,
+ [GCC_PCIE_0_PHY_GDSC] = &gcc_pcie_0_phy_gdsc,
+ [GCC_UFS_MEM_PHY_GDSC] = &gcc_ufs_mem_phy_gdsc,
+ [GCC_UFS_PHY_GDSC] = &gcc_ufs_phy_gdsc,
+ [GCC_USB30_PRIM_GDSC] = &gcc_usb30_prim_gdsc,
+ [GCC_USB3_PHY_GDSC] = &gcc_usb3_phy_gdsc,
+};
+
+static const struct qcom_reset_map gcc_sm8750_resets[] = {
+ [GCC_CAMERA_BCR] = { 0x26000 },
+ [GCC_DISPLAY_BCR] = { 0x27000 },
+ [GCC_EVA_BCR] = { 0x9f000 },
+ [GCC_EVA_AXI0_CLK_ARES] = { 0x9f008, 2 },
+ [GCC_EVA_AXI0C_CLK_ARES] = { 0x9f018, 2 },
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x6c014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x6c020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x6c028 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x6f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 },
+ [GCC_PCIE_RSCC_BCR] = { 0x11000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0x1e000 },
+ [GCC_QUPV3_WRAPPER_I2C_BCR] = { 0x17000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0x39000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
+ [GCC_VIDEO_BCR] = { 0x32000 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x32018, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0x32028, 2 },
+};
+
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_ref_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s7_clk_src),
+};
+
+static const struct regmap_config gcc_sm8750_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f41f0,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sm8750_desc = {
+ .config = &gcc_sm8750_regmap_config,
+ .clks = gcc_sm8750_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sm8750_clocks),
+ .resets = gcc_sm8750_resets,
+ .num_resets = ARRAY_SIZE(gcc_sm8750_resets),
+ .gdscs = gcc_sm8750_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sm8750_gdscs),
+};
+
+static const struct of_device_id gcc_sm8750_match_table[] = {
+ { .compatible = "qcom,sm8750-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sm8750_match_table);
+
+static int gcc_sm8750_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sm8750_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /*
+ * Keep clocks always enabled:
+ * gcc_cam_bist_mclk_ahb_clk
+ * gcc_camera_ahb_clk
+ * gcc_camera_xo_clk
+ * gcc_disp_ahb_clk
+ * gcc_eva_ahb_clk
+ * gcc_eva_xo_clk
+ * gcc_gpu_cfg_ahb_clk
+ * gcc_video_ahb_clk
+ * gcc_video_xo_clk
+ * gcc_pcie_rscc_cfg_ahb_clk
+ * gcc_pcie_rscc_xo_clk
+ */
+ qcom_branch_set_clk_en(regmap, 0xa0004);
+ qcom_branch_set_clk_en(regmap, 0x26004);
+ qcom_branch_set_clk_en(regmap, 0x26034);
+ qcom_branch_set_clk_en(regmap, 0x27004);
+ qcom_branch_set_clk_en(regmap, 0x9f004);
+ qcom_branch_set_clk_en(regmap, 0x9f01c);
+ qcom_branch_set_clk_en(regmap, 0x71004);
+ qcom_branch_set_clk_en(regmap, 0x32004);
+ qcom_branch_set_clk_en(regmap, 0x32038);
+ regmap_update_bits(regmap, 0x52010, BIT(20), BIT(20));
+ regmap_update_bits(regmap, 0x52010, BIT(21), BIT(21));
+
+ /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+
+ return qcom_cc_really_probe(&pdev->dev, &gcc_sm8750_desc, regmap);
+}
+
+static struct platform_driver gcc_sm8750_driver = {
+ .probe = gcc_sm8750_probe,
+ .driver = {
+ .name = "gcc-sm8750",
+ .of_match_table = gcc_sm8750_match_table,
+ },
+};
+
+static int __init gcc_sm8750_init(void)
+{
+ return platform_driver_register(&gcc_sm8750_driver);
+}
+subsys_initcall(gcc_sm8750_init);
+
+static void __exit gcc_sm8750_exit(void)
+{
+ platform_driver_unregister(&gcc_sm8750_driver);
+}
+module_exit(gcc_sm8750_exit);
+
+MODULE_DESCRIPTION("QTI GCC SM8750 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
index 0f578771071f..7288af845434 100644
--- a/drivers/clk/qcom/gcc-x1e80100.c
+++ b/drivers/clk/qcom/gcc-x1e80100.c
@@ -3123,7 +3123,7 @@ static struct clk_branch gcc_pcie_3_pipe_clk = {
static struct clk_branch gcc_pcie_3_pipediv2_clk = {
.halt_reg = 0x58060,
- .halt_check = BRANCH_HALT_VOTED,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52020,
.enable_mask = BIT(5),
@@ -3248,7 +3248,7 @@ static struct clk_branch gcc_pcie_4_pipe_clk = {
static struct clk_branch gcc_pcie_4_pipediv2_clk = {
.halt_reg = 0x6b054,
- .halt_check = BRANCH_HALT_VOTED,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52010,
.enable_mask = BIT(27),
@@ -3373,7 +3373,7 @@ static struct clk_branch gcc_pcie_5_pipe_clk = {
static struct clk_branch gcc_pcie_5_pipediv2_clk = {
.halt_reg = 0x2f054,
- .halt_check = BRANCH_HALT_VOTED,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52018,
.enable_mask = BIT(19),
@@ -3511,7 +3511,7 @@ static struct clk_branch gcc_pcie_6a_pipe_clk = {
static struct clk_branch gcc_pcie_6a_pipediv2_clk = {
.halt_reg = 0x31060,
- .halt_check = BRANCH_HALT_VOTED,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52018,
.enable_mask = BIT(28),
@@ -3649,7 +3649,7 @@ static struct clk_branch gcc_pcie_6b_pipe_clk = {
static struct clk_branch gcc_pcie_6b_pipediv2_clk = {
.halt_reg = 0x8d060,
- .halt_check = BRANCH_HALT_VOTED,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x52010,
.enable_mask = BIT(28),
@@ -6083,7 +6083,7 @@ static struct gdsc gcc_usb20_prim_gdsc = {
.pd = {
.name = "gcc_usb20_prim_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -6155,7 +6155,7 @@ static struct gdsc gcc_usb3_mp_ss1_phy_gdsc = {
.pd = {
.name = "gcc_usb3_mp_ss1_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
diff --git a/drivers/clk/qcom/gpucc-sar2130p.c b/drivers/clk/qcom/gpucc-sar2130p.c
new file mode 100644
index 000000000000..dd72b2a48c42
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sar2130p.c
@@ -0,0 +1,502 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sar2130p-gpucc.h>
+#include <dt-bindings/reset/qcom,sar2130p-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0_OUT_MAIN,
+ DT_GPLL0_OUT_MAIN_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+/* 470MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x18,
+ .alpha = 0x7aaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+/* 440MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x16,
+ .alpha = 0xeaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_ff_clk_src = {
+ .cmd_rcgr = 0x9474,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ff_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(220000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ F(550000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x9318,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x93ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_2,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x911c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x911c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x9120,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_ff_clk = {
+ .halt_reg = 0x914c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x914c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x913c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x913c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x9004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x9144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x90bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x93e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x93e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x9148,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9148,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_memnoc_gfx_clk = {
+ .halt_reg = 0x9150,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9150,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x7000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x9134,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9134,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+ .gdscr = 0x9108,
+ .gds_hw_ctrl = 0x953c,
+ .clk_dis_wait_val = 8,
+ .pd = {
+ .name = "gpu_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x905c,
+ .clamp_io_ctrl = 0x9504,
+ .resets = (unsigned int []){ GPUCC_GPU_CC_GX_BCR,
+ GPUCC_GPU_CC_ACD_BCR,
+ GPUCC_GPU_CC_GX_ACD_IROOT_BCR },
+ .reset_count = 3,
+ .pd = {
+ .name = "gpu_gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | AON_RESET | SW_RESET,
+};
+
+static struct clk_regmap *gpu_cc_sar2130p_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+};
+
+static const struct qcom_reset_map gpu_cc_sar2130p_resets[] = {
+ [GPUCC_GPU_CC_ACD_BCR] = { 0x9358 },
+ [GPUCC_GPU_CC_GX_ACD_IROOT_BCR] = { 0x958c },
+ [GPUCC_GPU_CC_GX_BCR] = { 0x9058 },
+};
+
+static struct gdsc *gpu_cc_sar2130p_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct regmap_config gpu_cc_sar2130p_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xa000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sar2130p_desc = {
+ .config = &gpu_cc_sar2130p_regmap_config,
+ .clks = gpu_cc_sar2130p_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sar2130p_clocks),
+ .resets = gpu_cc_sar2130p_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_sar2130p_resets),
+ .gdscs = gpu_cc_sar2130p_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sar2130p_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sar2130p_match_table[] = {
+ { .compatible = "qcom,sar2130p-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sar2130p_match_table);
+
+static int gpu_cc_sar2130p_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sar2130p_desc);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "Couldn't map GPU_CC\n");
+
+ clk_lucid_ole_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_ole_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ /* Keep some clocks always-on */
+ qcom_branch_set_clk_en(regmap, 0x900c); /* GPU_CC_DEMET_CLK */
+
+ return qcom_cc_really_probe(dev, &gpu_cc_sar2130p_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sar2130p_driver = {
+ .probe = gpu_cc_sar2130p_probe,
+ .driver = {
+ .name = "gpu_cc-sar2130p",
+ .of_match_table = gpu_cc_sar2130p_match_table,
+ },
+};
+module_platform_driver(gpu_cc_sar2130p_driver);
+
+MODULE_DESCRIPTION("QTI GPU_CC SAR2130P Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gpucc-sm8450.c b/drivers/clk/qcom/gpucc-sm8450.c
index b3c5d6923cd2..059df72deaa1 100644
--- a/drivers/clk/qcom/gpucc-sm8450.c
+++ b/drivers/clk/qcom/gpucc-sm8450.c
@@ -40,7 +40,7 @@ static const struct pll_vco lucid_evo_vco[] = {
{ 249600000, 2000000000, 0 },
};
-static struct alpha_pll_config gpu_cc_pll0_config = {
+static const struct alpha_pll_config gpu_cc_pll0_config = {
.l = 0x1d,
.alpha = 0xb000,
.config_ctl_val = 0x20485699,
@@ -50,6 +50,20 @@ static struct alpha_pll_config gpu_cc_pll0_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_gpu_cc_pll0_config = {
+ .l = 0x1d,
+ .alpha = 0xb000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll gpu_cc_pll0 = {
.offset = 0x0,
.vco_table = lucid_evo_vco,
@@ -67,7 +81,7 @@ static struct clk_alpha_pll gpu_cc_pll0 = {
},
};
-static struct alpha_pll_config gpu_cc_pll1_config = {
+static const struct alpha_pll_config gpu_cc_pll1_config = {
.l = 0x34,
.alpha = 0x1555,
.config_ctl_val = 0x20485699,
@@ -77,6 +91,20 @@ static struct alpha_pll_config gpu_cc_pll1_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_gpu_cc_pll1_config = {
+ .l = 0x34,
+ .alpha = 0x1555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll gpu_cc_pll1 = {
.offset = 0x1000,
.vco_table = lucid_evo_vco,
@@ -736,6 +764,7 @@ static const struct qcom_cc_desc gpu_cc_sm8450_desc = {
static const struct of_device_id gpu_cc_sm8450_match_table[] = {
{ .compatible = "qcom,sm8450-gpucc" },
+ { .compatible = "qcom,sm8475-gpucc" },
{ }
};
MODULE_DEVICE_TABLE(of, gpu_cc_sm8450_match_table);
@@ -748,8 +777,19 @@ static int gpu_cc_sm8450_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- clk_lucid_evo_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
- clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-gpucc")) {
+ /* Update GPUCC PLL0 */
+ gpu_cc_pll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+
+ /* Update GPUCC PLL1 */
+ gpu_cc_pll1.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+
+ clk_lucid_ole_pll_configure(&gpu_cc_pll0, regmap, &sm8475_gpu_cc_pll0_config);
+ clk_lucid_ole_pll_configure(&gpu_cc_pll1, regmap, &sm8475_gpu_cc_pll1_config);
+ } else {
+ clk_lucid_evo_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+ }
return qcom_cc_really_probe(&pdev->dev, &gpu_cc_sm8450_desc, regmap);
}
@@ -763,5 +803,5 @@ static struct platform_driver gpu_cc_sm8450_driver = {
};
module_platform_driver(gpu_cc_sm8450_driver);
-MODULE_DESCRIPTION("QTI GPU_CC SM8450 Driver");
+MODULE_DESCRIPTION("QTI GPU_CC SM8450 / SM8475 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gpucc-x1p42100.c b/drivers/clk/qcom/gpucc-x1p42100.c
new file mode 100644
index 000000000000..dba783339613
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-x1p42100.c
@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,x1e80100-gpucc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_GPLL0_OUT_MAIN,
+ DT_GPLL0_OUT_MAIN_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_ole_vco[] = {
+ { 249600000, 2300000000, 0 },
+};
+
+/* 560.0 MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x1d,
+ .alpha = 0x2aaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+/* 440.0 MHz Configuration */
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x16,
+ .alpha = 0xeaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_ole_vco,
+ .num_vco = ARRAY_SIZE(lucid_ole_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct parent_map gpu_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GPLL0_OUT_MAIN },
+ { .index = DT_GPLL0_OUT_MAIN_DIV },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_ff_clk_src = {
+ .cmd_rcgr = 0x9474,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ff_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(220000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ F(550000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x9318,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x93ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_2,
+ .freq_tbl = ftbl_gpu_cc_ff_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x911c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x911c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x9120,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9120,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_accu_shift_clk = {
+ .halt_reg = 0x9480,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9480,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_accu_shift_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_ff_clk = {
+ .halt_reg = 0x914c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x914c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_ff_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_ff_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x913c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x913c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x9144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_freq_measure_clk = {
+ .halt_reg = 0x9008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_freq_measure_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_accu_shift_clk = {
+ .halt_reg = 0x947c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x947c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_accu_shift_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x90bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x90bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_vsense_clk = {
+ .halt_reg = 0x90b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x90b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_gx_vsense_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x93e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x93e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x9148,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9148,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_memnoc_gfx_clk = {
+ .halt_reg = 0x9150,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9150,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_0_gfx3d_clk = {
+ .halt_reg = 0x9288,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9288,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_mnd1x_0_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_mnd1x_1_gfx3d_clk = {
+ .halt_reg = 0x928c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x928c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_mnd1x_1_gfx3d_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x9134,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9134,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cc_cx_gdsc = {
+ .gdscr = 0x9108,
+ .gds_hw_ctrl = 0x953c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gpu_cc_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gpu_cc_gx_gdsc = {
+ .gdscr = 0x905c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gpu_cc_gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gpu_cc_x1p42100_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_ACCU_SHIFT_CLK] = &gpu_cc_cx_accu_shift_clk.clkr,
+ [GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
+ [GPU_CC_FREQ_MEASURE_CLK] = &gpu_cc_freq_measure_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_ACCU_SHIFT_CLK] = &gpu_cc_gx_accu_shift_clk.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr,
+ [GPU_CC_MND1X_0_GFX3D_CLK] = &gpu_cc_mnd1x_0_gfx3d_clk.clkr,
+ [GPU_CC_MND1X_1_GFX3D_CLK] = &gpu_cc_mnd1x_1_gfx3d_clk.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+};
+
+static struct gdsc *gpu_cc_x1p42100_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cc_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_cc_gx_gdsc,
+};
+
+static const struct qcom_reset_map gpu_cc_x1p42100_resets[] = {
+ [GPU_CC_ACD_BCR] = { 0x9358 },
+ [GPU_CC_CB_BCR] = { 0x93a0 },
+ [GPU_CC_CX_BCR] = { 0x9104 },
+ [GPU_CC_FAST_HUB_BCR] = { 0x93e4 },
+ [GPU_CC_FF_BCR] = { 0x9470 },
+ [GPU_CC_GFX3D_AON_BCR] = { 0x9198 },
+ [GPU_CC_GMU_BCR] = { 0x9314 },
+ [GPU_CC_GX_BCR] = { 0x9058 },
+ [GPU_CC_XO_BCR] = { 0x9000 },
+};
+
+static const struct regmap_config gpu_cc_x1p42100_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9988,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc gpu_cc_x1p42100_desc = {
+ .config = &gpu_cc_x1p42100_regmap_config,
+ .clks = gpu_cc_x1p42100_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_x1p42100_clocks),
+ .resets = gpu_cc_x1p42100_resets,
+ .num_resets = ARRAY_SIZE(gpu_cc_x1p42100_resets),
+ .gdscs = gpu_cc_x1p42100_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_x1p42100_gdscs),
+};
+
+static const struct of_device_id gpu_cc_x1p42100_match_table[] = {
+ { .compatible = "qcom,x1p42100-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_x1p42100_match_table);
+
+static int gpu_cc_x1p42100_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_x1p42100_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_ole_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_ole_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x93a4); /* GPU_CC_CB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x9004); /* GPU_CC_CXO_AON_CLK */
+ qcom_branch_set_clk_en(regmap, 0x900c); /* GPU_CC_DEMET_CLK */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &gpu_cc_x1p42100_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver gpu_cc_x1p42100_driver = {
+ .probe = gpu_cc_x1p42100_probe,
+ .driver = {
+ .name = "gpucc-x1p42100",
+ .of_match_table = gpu_cc_x1p42100_match_table,
+ },
+};
+
+module_platform_driver(gpu_cc_x1p42100_driver);
+
+MODULE_DESCRIPTION("QTI GPUCC X1P42100 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/ipq-cmn-pll.c b/drivers/clk/qcom/ipq-cmn-pll.c
new file mode 100644
index 000000000000..432d4c4b7aa6
--- /dev/null
+++ b/drivers/clk/qcom/ipq-cmn-pll.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/*
+ * CMN PLL block expects the reference clock from on-board Wi-Fi block,
+ * and supplies fixed rate clocks as output to the networking hardware
+ * blocks and to GCC. The networking related blocks include PPE (packet
+ * process engine), the externally connected PHY or switch devices, and
+ * the PCS.
+ *
+ * On the IPQ9574 SoC, there are three clocks with 50 MHZ and one clock
+ * with 25 MHZ which are output from the CMN PLL to Ethernet PHY (or switch),
+ * and one clock with 353 MHZ to PPE. The other fixed rate output clocks
+ * are supplied to GCC (24 MHZ as XO and 32 KHZ as sleep clock), and to PCS
+ * with 31.25 MHZ.
+ *
+ * +---------+
+ * | GCC |
+ * +--+---+--+
+ * AHB CLK| |SYS CLK
+ * V V
+ * +-------+---+------+
+ * | +-------------> eth0-50mhz
+ * REF CLK | IPQ9574 |
+ * -------->+ +-------------> eth1-50mhz
+ * | CMN PLL block |
+ * | +-------------> eth2-50mhz
+ * | |
+ * +----+----+----+---+-------------> eth-25mhz
+ * | | |
+ * V V V
+ * GCC PCS NSS/PPE
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,ipq-cmn-pll.h>
+
+#define CMN_PLL_REFCLK_SRC_SELECTION 0x28
+#define CMN_PLL_REFCLK_SRC_DIV GENMASK(9, 8)
+
+#define CMN_PLL_LOCKED 0x64
+#define CMN_PLL_CLKS_LOCKED BIT(8)
+
+#define CMN_PLL_POWER_ON_AND_RESET 0x780
+#define CMN_ANA_EN_SW_RSTN BIT(6)
+
+#define CMN_PLL_REFCLK_CONFIG 0x784
+#define CMN_PLL_REFCLK_EXTERNAL BIT(9)
+#define CMN_PLL_REFCLK_DIV GENMASK(8, 4)
+#define CMN_PLL_REFCLK_INDEX GENMASK(3, 0)
+
+#define CMN_PLL_CTRL 0x78c
+#define CMN_PLL_CTRL_LOCK_DETECT_EN BIT(15)
+
+#define CMN_PLL_DIVIDER_CTRL 0x794
+#define CMN_PLL_DIVIDER_CTRL_FACTOR GENMASK(9, 0)
+
+/**
+ * struct cmn_pll_fixed_output_clk - CMN PLL output clocks information
+ * @id: Clock specifier to be supplied
+ * @name: Clock name to be registered
+ * @rate: Clock rate
+ */
+struct cmn_pll_fixed_output_clk {
+ unsigned int id;
+ const char *name;
+ unsigned long rate;
+};
+
+/**
+ * struct clk_cmn_pll - CMN PLL hardware specific data
+ * @regmap: hardware regmap.
+ * @hw: handle between common and hardware-specific interfaces
+ */
+struct clk_cmn_pll {
+ struct regmap *regmap;
+ struct clk_hw hw;
+};
+
+#define CLK_PLL_OUTPUT(_id, _name, _rate) { \
+ .id = _id, \
+ .name = _name, \
+ .rate = _rate, \
+}
+
+#define to_clk_cmn_pll(_hw) container_of(_hw, struct clk_cmn_pll, hw)
+
+static const struct regmap_config ipq_cmn_pll_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7fc,
+ .fast_io = true,
+};
+
+static const struct cmn_pll_fixed_output_clk ipq9574_output_clks[] = {
+ CLK_PLL_OUTPUT(XO_24MHZ_CLK, "xo-24mhz", 24000000UL),
+ CLK_PLL_OUTPUT(SLEEP_32KHZ_CLK, "sleep-32khz", 32000UL),
+ CLK_PLL_OUTPUT(PCS_31P25MHZ_CLK, "pcs-31p25mhz", 31250000UL),
+ CLK_PLL_OUTPUT(NSS_1200MHZ_CLK, "nss-1200mhz", 1200000000UL),
+ CLK_PLL_OUTPUT(PPE_353MHZ_CLK, "ppe-353mhz", 353000000UL),
+ CLK_PLL_OUTPUT(ETH0_50MHZ_CLK, "eth0-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(ETH1_50MHZ_CLK, "eth1-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(ETH2_50MHZ_CLK, "eth2-50mhz", 50000000UL),
+ CLK_PLL_OUTPUT(ETH_25MHZ_CLK, "eth-25mhz", 25000000UL),
+};
+
+/*
+ * CMN PLL has the single parent clock, which supports the several
+ * possible parent clock rates, each parent clock rate is reflected
+ * by the specific reference index value in the hardware.
+ */
+static int ipq_cmn_pll_find_freq_index(unsigned long parent_rate)
+{
+ int index = -EINVAL;
+
+ switch (parent_rate) {
+ case 25000000:
+ index = 3;
+ break;
+ case 31250000:
+ index = 4;
+ break;
+ case 40000000:
+ index = 6;
+ break;
+ case 48000000:
+ case 96000000:
+ /*
+ * Parent clock rate 48 MHZ and 96 MHZ take the same value
+ * of reference clock index. 96 MHZ needs the source clock
+ * divider to be programmed as 2.
+ */
+ index = 7;
+ break;
+ case 50000000:
+ index = 8;
+ break;
+ default:
+ break;
+ }
+
+ return index;
+}
+
+static unsigned long clk_cmn_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_cmn_pll *cmn_pll = to_clk_cmn_pll(hw);
+ u32 val, factor;
+
+ /*
+ * The value of CMN_PLL_DIVIDER_CTRL_FACTOR is automatically adjusted
+ * by HW according to the parent clock rate.
+ */
+ regmap_read(cmn_pll->regmap, CMN_PLL_DIVIDER_CTRL, &val);
+ factor = FIELD_GET(CMN_PLL_DIVIDER_CTRL_FACTOR, val);
+
+ return parent_rate * 2 * factor;
+}
+
+static int clk_cmn_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ int ret;
+
+ /* Validate the rate of the single parent clock. */
+ ret = ipq_cmn_pll_find_freq_index(req->best_parent_rate);
+
+ return ret < 0 ? ret : 0;
+}
+
+/*
+ * This function is used to initialize the CMN PLL to enable the fixed
+ * rate output clocks. It is expected to be configured once.
+ */
+static int clk_cmn_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_cmn_pll *cmn_pll = to_clk_cmn_pll(hw);
+ int ret, index;
+ u32 val;
+
+ /*
+ * Configure the reference input clock selection as per the given
+ * parent clock. The output clock rates are always of fixed value.
+ */
+ index = ipq_cmn_pll_find_freq_index(parent_rate);
+ if (index < 0)
+ return index;
+
+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_CONFIG,
+ CMN_PLL_REFCLK_INDEX,
+ FIELD_PREP(CMN_PLL_REFCLK_INDEX, index));
+ if (ret)
+ return ret;
+
+ /*
+ * Update the source clock rate selection and source clock
+ * divider as 2 when the parent clock rate is 96 MHZ.
+ */
+ if (parent_rate == 96000000) {
+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_CONFIG,
+ CMN_PLL_REFCLK_DIV,
+ FIELD_PREP(CMN_PLL_REFCLK_DIV, 2));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(cmn_pll->regmap, CMN_PLL_REFCLK_SRC_SELECTION,
+ CMN_PLL_REFCLK_SRC_DIV,
+ FIELD_PREP(CMN_PLL_REFCLK_SRC_DIV, 0));
+ if (ret)
+ return ret;
+ }
+
+ /* Enable PLL locked detect. */
+ ret = regmap_set_bits(cmn_pll->regmap, CMN_PLL_CTRL,
+ CMN_PLL_CTRL_LOCK_DETECT_EN);
+ if (ret)
+ return ret;
+
+ /*
+ * Reset the CMN PLL block to ensure the updated configurations
+ * take effect.
+ */
+ ret = regmap_clear_bits(cmn_pll->regmap, CMN_PLL_POWER_ON_AND_RESET,
+ CMN_ANA_EN_SW_RSTN);
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 1200);
+ ret = regmap_set_bits(cmn_pll->regmap, CMN_PLL_POWER_ON_AND_RESET,
+ CMN_ANA_EN_SW_RSTN);
+ if (ret)
+ return ret;
+
+ /* Stability check of CMN PLL output clocks. */
+ return regmap_read_poll_timeout(cmn_pll->regmap, CMN_PLL_LOCKED, val,
+ (val & CMN_PLL_CLKS_LOCKED),
+ 100, 100 * USEC_PER_MSEC);
+}
+
+static const struct clk_ops clk_cmn_pll_ops = {
+ .recalc_rate = clk_cmn_pll_recalc_rate,
+ .determine_rate = clk_cmn_pll_determine_rate,
+ .set_rate = clk_cmn_pll_set_rate,
+};
+
+static struct clk_hw *ipq_cmn_pll_clk_hw_register(struct platform_device *pdev)
+{
+ struct clk_parent_data pdata = { .index = 0 };
+ struct device *dev = &pdev->dev;
+ struct clk_init_data init = {};
+ struct clk_cmn_pll *cmn_pll;
+ struct regmap *regmap;
+ void __iomem *base;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &ipq_cmn_pll_regmap_config);
+ if (IS_ERR(regmap))
+ return ERR_CAST(regmap);
+
+ cmn_pll = devm_kzalloc(dev, sizeof(*cmn_pll), GFP_KERNEL);
+ if (!cmn_pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = "cmn_pll";
+ init.parent_data = &pdata;
+ init.num_parents = 1;
+ init.ops = &clk_cmn_pll_ops;
+
+ cmn_pll->hw.init = &init;
+ cmn_pll->regmap = regmap;
+
+ ret = devm_clk_hw_register(dev, &cmn_pll->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &cmn_pll->hw;
+}
+
+static int ipq_cmn_pll_register_clks(struct platform_device *pdev)
+{
+ const struct cmn_pll_fixed_output_clk *fixed_clk;
+ struct clk_hw_onecell_data *hw_data;
+ struct device *dev = &pdev->dev;
+ struct clk_hw *cmn_pll_hw;
+ unsigned int num_clks;
+ struct clk_hw *hw;
+ int ret, i;
+
+ fixed_clk = ipq9574_output_clks;
+ num_clks = ARRAY_SIZE(ipq9574_output_clks);
+
+ hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, num_clks + 1),
+ GFP_KERNEL);
+ if (!hw_data)
+ return -ENOMEM;
+
+ /*
+ * Register the CMN PLL clock, which is the parent clock of
+ * the fixed rate output clocks.
+ */
+ cmn_pll_hw = ipq_cmn_pll_clk_hw_register(pdev);
+ if (IS_ERR(cmn_pll_hw))
+ return PTR_ERR(cmn_pll_hw);
+
+ /* Register the fixed rate output clocks. */
+ for (i = 0; i < num_clks; i++) {
+ hw = clk_hw_register_fixed_rate_parent_hw(dev, fixed_clk[i].name,
+ cmn_pll_hw, 0,
+ fixed_clk[i].rate);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto unregister_fixed_clk;
+ }
+
+ hw_data->hws[fixed_clk[i].id] = hw;
+ }
+
+ /*
+ * Provide the CMN PLL clock. The clock rate of CMN PLL
+ * is configured to 12 GHZ by DT property assigned-clock-rates-u64.
+ */
+ hw_data->hws[CMN_PLL_CLK] = cmn_pll_hw;
+ hw_data->num = num_clks + 1;
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, hw_data);
+ if (ret)
+ goto unregister_fixed_clk;
+
+ platform_set_drvdata(pdev, hw_data);
+
+ return 0;
+
+unregister_fixed_clk:
+ while (i > 0)
+ clk_hw_unregister(hw_data->hws[fixed_clk[--i].id]);
+
+ return ret;
+}
+
+static int ipq_cmn_pll_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = devm_pm_clk_create(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * To access the CMN PLL registers, the GCC AHB & SYS clocks
+ * of CMN PLL block need to be enabled.
+ */
+ ret = pm_clk_add(dev, "ahb");
+ if (ret)
+ return dev_err_probe(dev, ret, "Fail to add AHB clock\n");
+
+ ret = pm_clk_add(dev, "sys");
+ if (ret)
+ return dev_err_probe(dev, ret, "Fail to add SYS clock\n");
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ /* Register CMN PLL clock and fixed rate output clocks. */
+ ret = ipq_cmn_pll_register_clks(pdev);
+ pm_runtime_put(dev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Fail to register CMN PLL clocks\n");
+
+ return 0;
+}
+
+static void ipq_cmn_pll_clk_remove(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *hw_data = platform_get_drvdata(pdev);
+ int i;
+
+ /*
+ * The clock with index CMN_PLL_CLK is unregistered by
+ * device management.
+ */
+ for (i = 0; i < hw_data->num; i++) {
+ if (i != CMN_PLL_CLK)
+ clk_hw_unregister(hw_data->hws[i]);
+ }
+}
+
+static const struct dev_pm_ops ipq_cmn_pll_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static const struct of_device_id ipq_cmn_pll_clk_ids[] = {
+ { .compatible = "qcom,ipq9574-cmn-pll", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ipq_cmn_pll_clk_ids);
+
+static struct platform_driver ipq_cmn_pll_clk_driver = {
+ .probe = ipq_cmn_pll_clk_probe,
+ .remove = ipq_cmn_pll_clk_remove,
+ .driver = {
+ .name = "ipq_cmn_pll",
+ .of_match_table = ipq_cmn_pll_clk_ids,
+ .pm = &ipq_cmn_pll_pm_ops,
+ },
+};
+module_platform_driver(ipq_cmn_pll_clk_driver);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPQ CMN PLL Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/lpasscc-sm6115.c b/drivers/clk/qcom/lpasscc-sm6115.c
new file mode 100644
index 000000000000..8ffdab71b948
--- /dev/null
+++ b/drivers/clk/qcom/lpasscc-sm6115.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, 2023 Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm6115-lpasscc.h>
+
+#include "common.h"
+#include "reset.h"
+
+static const struct qcom_reset_map lpass_audiocc_sm6115_resets[] = {
+ [LPASS_AUDIO_SWR_RX_CGCR] = { .reg = 0x98, .bit = 1, .udelay = 500 },
+};
+
+static struct regmap_config lpass_audiocc_sm6115_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .name = "lpass-audio-csr",
+ .max_register = 0x1000,
+};
+
+static const struct qcom_cc_desc lpass_audiocc_sm6115_reset_desc = {
+ .config = &lpass_audiocc_sm6115_regmap_config,
+ .resets = lpass_audiocc_sm6115_resets,
+ .num_resets = ARRAY_SIZE(lpass_audiocc_sm6115_resets),
+};
+
+static const struct qcom_reset_map lpasscc_sm6115_resets[] = {
+ [LPASS_SWR_TX_CONFIG_CGCR] = { .reg = 0x100, .bit = 1, .udelay = 500 },
+};
+
+static struct regmap_config lpasscc_sm6115_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .name = "lpass-tcsr",
+ .max_register = 0x1000,
+};
+
+static const struct qcom_cc_desc lpasscc_sm6115_reset_desc = {
+ .config = &lpasscc_sm6115_regmap_config,
+ .resets = lpasscc_sm6115_resets,
+ .num_resets = ARRAY_SIZE(lpasscc_sm6115_resets),
+};
+
+static const struct of_device_id lpasscc_sm6115_match_table[] = {
+ {
+ .compatible = "qcom,sm6115-lpassaudiocc",
+ .data = &lpass_audiocc_sm6115_reset_desc,
+ }, {
+ .compatible = "qcom,sm6115-lpasscc",
+ .data = &lpasscc_sm6115_reset_desc,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lpasscc_sm6115_match_table);
+
+static int lpasscc_sm6115_probe(struct platform_device *pdev)
+{
+ const struct qcom_cc_desc *desc = of_device_get_match_data(&pdev->dev);
+
+ return qcom_cc_probe_by_index(pdev, 0, desc);
+}
+
+static struct platform_driver lpasscc_sm6115_driver = {
+ .probe = lpasscc_sm6115_probe,
+ .driver = {
+ .name = "lpasscc-sm6115",
+ .of_match_table = lpasscc_sm6115_match_table,
+ },
+};
+
+module_platform_driver(lpasscc_sm6115_driver);
+
+MODULE_DESCRIPTION("QTI LPASSCC SM6115 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index 3f41249c5ae4..20d1c43f35d9 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -37,6 +37,7 @@ enum {
P_DSI2_PLL_DSICLK,
P_DSI1_PLL_BYTECLK,
P_DSI2_PLL_BYTECLK,
+ P_LVDS_PLL,
};
#define F_MN(f, s, _m, _n) { .freq = f, .src = s, .m = _m, .n = _n }
@@ -143,6 +144,20 @@ static const struct clk_parent_data mmcc_pxo_dsi2_dsi1[] = {
{ .fw_name = "dsi1pll", .name = "dsi1pll" },
};
+static const struct parent_map mmcc_pxo_dsi2_dsi1_lvds_map[] = {
+ { P_PXO, 0 },
+ { P_DSI2_PLL_DSICLK, 1 },
+ { P_LVDS_PLL, 2 },
+ { P_DSI1_PLL_DSICLK, 3 },
+};
+
+static const struct clk_parent_data mmcc_pxo_dsi2_dsi1_lvds[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "dsi2pll", .name = "dsi2pll" },
+ { .fw_name = "lvdspll", .name = "mpd4_lvds_pll" },
+ { .fw_name = "dsi1pll", .name = "dsi1pll" },
+};
+
static const struct parent_map mmcc_pxo_dsi1_dsi2_byte_map[] = {
{ P_PXO, 0 },
{ P_DSI1_PLL_BYTECLK, 1 },
@@ -2439,26 +2454,42 @@ static struct clk_rcg dsi2_pixel_src = {
},
.s = {
.src_sel_shift = 0,
- .parent_map = mmcc_pxo_dsi2_dsi1_map,
+ .parent_map = mmcc_pxo_dsi2_dsi1_lvds_map,
},
.clkr = {
.enable_reg = 0x0094,
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi2_pixel_src",
- .parent_data = mmcc_pxo_dsi2_dsi1,
- .num_parents = ARRAY_SIZE(mmcc_pxo_dsi2_dsi1),
+ .parent_data = mmcc_pxo_dsi2_dsi1_lvds,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi2_dsi1_lvds),
.ops = &clk_rcg_pixel_ops,
},
},
};
+static struct clk_branch dsi2_pixel_lvds_src = {
+ .clkr = {
+ .enable_reg = 0x0094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi2_pixel_lvds_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi2_pixel_src.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_simple_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch dsi2_pixel_clk = {
.halt_reg = 0x01d0,
.halt_bit = 19,
.clkr = {
.enable_reg = 0x0094,
- .enable_mask = BIT(0),
+ .enable_mask = 0,
.hw.init = &(struct clk_init_data){
.name = "mdp_pclk2_clk",
.parent_hws = (const struct clk_hw*[]){
@@ -2471,6 +2502,24 @@ static struct clk_branch dsi2_pixel_clk = {
},
};
+static struct clk_branch lvds_clk = {
+ .halt_reg = 0x024c,
+ .halt_bit = 6,
+ .clkr = {
+ .enable_reg = 0x0264,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdp_lvds_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi2_pixel_lvds_src.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gfx2d0_ahb_clk = {
.hwcg_reg = 0x0038,
.hwcg_bit = 28,
@@ -2799,6 +2848,8 @@ static struct clk_regmap *mmcc_msm8960_clks[] = {
[CSIPHY1_TIMER_CLK] = &csiphy1_timer_clk.clkr,
[CSIPHY0_TIMER_CLK] = &csiphy0_timer_clk.clkr,
[PLL2] = &pll2.clkr,
+ [DSI2_PIXEL_LVDS_SRC] = &dsi2_pixel_lvds_src.clkr,
+ [LVDS_CLK] = &lvds_clk.clkr,
};
static const struct qcom_reset_map mmcc_msm8960_resets[] = {
@@ -2983,6 +3034,8 @@ static struct clk_regmap *mmcc_apq8064_clks[] = {
[VCAP_CLK] = &vcap_clk.clkr,
[VCAP_NPL_CLK] = &vcap_npl_clk.clkr,
[PLL15] = &pll15.clkr,
+ [DSI2_PIXEL_LVDS_SRC] = &dsi2_pixel_lvds_src.clkr,
+ [LVDS_CLK] = &lvds_clk.clkr,
};
static const struct qcom_reset_map mmcc_apq8064_resets[] = {
diff --git a/drivers/clk/qcom/tcsrcc-sm8550.c b/drivers/clk/qcom/tcsrcc-sm8550.c
index e5e8f2e82b94..41d73f92a000 100644
--- a/drivers/clk/qcom/tcsrcc-sm8550.c
+++ b/drivers/clk/qcom/tcsrcc-sm8550.c
@@ -129,6 +129,13 @@ static struct clk_branch tcsr_usb3_clkref_en = {
},
};
+static struct clk_regmap *tcsr_cc_sar2130p_clocks[] = {
+ [TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr,
+ [TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr,
+ [TCSR_USB2_CLKREF_EN] = &tcsr_usb2_clkref_en.clkr,
+ [TCSR_USB3_CLKREF_EN] = &tcsr_usb3_clkref_en.clkr,
+};
+
static struct clk_regmap *tcsr_cc_sm8550_clocks[] = {
[TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr,
[TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr,
@@ -146,6 +153,12 @@ static const struct regmap_config tcsr_cc_sm8550_regmap_config = {
.fast_io = true,
};
+static const struct qcom_cc_desc tcsr_cc_sar2130p_desc = {
+ .config = &tcsr_cc_sm8550_regmap_config,
+ .clks = tcsr_cc_sar2130p_clocks,
+ .num_clks = ARRAY_SIZE(tcsr_cc_sar2130p_clocks),
+};
+
static const struct qcom_cc_desc tcsr_cc_sm8550_desc = {
.config = &tcsr_cc_sm8550_regmap_config,
.clks = tcsr_cc_sm8550_clocks,
@@ -153,7 +166,8 @@ static const struct qcom_cc_desc tcsr_cc_sm8550_desc = {
};
static const struct of_device_id tcsr_cc_sm8550_match_table[] = {
- { .compatible = "qcom,sm8550-tcsr" },
+ { .compatible = "qcom,sar2130p-tcsr", .data = &tcsr_cc_sar2130p_desc },
+ { .compatible = "qcom,sm8550-tcsr", .data = &tcsr_cc_sm8550_desc },
{ }
};
MODULE_DEVICE_TABLE(of, tcsr_cc_sm8550_match_table);
@@ -162,7 +176,7 @@ static int tcsr_cc_sm8550_probe(struct platform_device *pdev)
{
struct regmap *regmap;
- regmap = qcom_cc_map(pdev, &tcsr_cc_sm8550_desc);
+ regmap = qcom_cc_map(pdev, of_device_get_match_data(&pdev->dev));
if (IS_ERR(regmap))
return PTR_ERR(regmap);
diff --git a/drivers/clk/qcom/tcsrcc-sm8750.c b/drivers/clk/qcom/tcsrcc-sm8750.c
new file mode 100644
index 000000000000..242e320986ef
--- /dev/null
+++ b/drivers/clk/qcom/tcsrcc-sm8750.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8750-tcsr.h>
+
+#include "clk-branch.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+
+enum {
+ DT_BI_TCXO_PAD,
+};
+
+static struct clk_branch tcsr_pcie_0_clkref_en = {
+ .halt_reg = 0x0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_0_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_ufs_clkref_en = {
+ .halt_reg = 0x1000,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_ufs_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_clkref_en = {
+ .halt_reg = 0x2000,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb3_clkref_en = {
+ .halt_reg = 0x3000,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x3000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb3_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *tcsr_cc_sm8750_clocks[] = {
+ [TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr,
+ [TCSR_UFS_CLKREF_EN] = &tcsr_ufs_clkref_en.clkr,
+ [TCSR_USB2_CLKREF_EN] = &tcsr_usb2_clkref_en.clkr,
+ [TCSR_USB3_CLKREF_EN] = &tcsr_usb3_clkref_en.clkr,
+};
+
+static const struct regmap_config tcsr_cc_sm8750_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x3000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc tcsr_cc_sm8750_desc = {
+ .config = &tcsr_cc_sm8750_regmap_config,
+ .clks = tcsr_cc_sm8750_clocks,
+ .num_clks = ARRAY_SIZE(tcsr_cc_sm8750_clocks),
+};
+
+static const struct of_device_id tcsr_cc_sm8750_match_table[] = {
+ { .compatible = "qcom,sm8750-tcsr" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tcsr_cc_sm8750_match_table);
+
+static int tcsr_cc_sm8750_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &tcsr_cc_sm8750_desc);
+}
+
+static struct platform_driver tcsr_cc_sm8750_driver = {
+ .probe = tcsr_cc_sm8750_probe,
+ .driver = {
+ .name = "tcsr_cc-sm8750",
+ .of_match_table = tcsr_cc_sm8750_match_table,
+ },
+};
+
+static int __init tcsr_cc_sm8750_init(void)
+{
+ return platform_driver_register(&tcsr_cc_sm8750_driver);
+}
+subsys_initcall(tcsr_cc_sm8750_init);
+
+static void __exit tcsr_cc_sm8750_exit(void)
+{
+ platform_driver_unregister(&tcsr_cc_sm8750_driver);
+}
+module_exit(tcsr_cc_sm8750_exit);
+
+MODULE_DESCRIPTION("QTI TCSR_CC SM8750 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-sa8775p.c b/drivers/clk/qcom/videocc-sa8775p.c
new file mode 100644
index 000000000000..bf5de411fd5d
--- /dev/null
+++ b/drivers/clk/qcom/videocc-sa8775p.c
@@ -0,0 +1,576 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sa8775p-videocc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_IFACE,
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_BI_TCXO_AO,
+ P_SLEEP_CLK,
+ P_VIDEO_PLL0_OUT_MAIN,
+ P_VIDEO_PLL1_OUT_MAIN,
+};
+
+static const struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2020000000, 0 },
+};
+
+static const struct alpha_pll_config video_pll0_config = {
+ .l = 0x39,
+ .alpha = 0x3000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll video_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config video_pll1_config = {
+ .l = 0x39,
+ .alpha = 0x3000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32aa299c,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00400805,
+};
+
+static struct clk_alpha_pll video_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map video_cc_parent_map_0_ao[] = {
+ { P_BI_TCXO_AO, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_PLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_pll0.clkr.hw },
+};
+
+static const struct parent_map video_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_PLL1_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_pll1.clkr.hw },
+};
+
+static const struct parent_map video_cc_parent_map_3[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_3[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_video_cc_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO_AO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_ahb_clk_src = {
+ .cmd_rcgr = 0x8030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0_ao,
+ .freq_tbl = ftbl_video_cc_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_ahb_clk_src",
+ .parent_data = video_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = {
+ F(1098000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1332000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1599000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1680000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_mvs0_clk_src = {
+ .cmd_rcgr = 0x8000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_mvs0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_mvs1_clk_src[] = {
+ F(1098000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1332000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1600000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
+ F(1800000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_mvs1_clk_src = {
+ .cmd_rcgr = 0x8018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_2,
+ .freq_tbl = ftbl_video_cc_mvs1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1_clk_src",
+ .parent_data = video_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x812c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_3,
+ .freq_tbl = ftbl_video_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sleep_clk_src",
+ .parent_data = video_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 video_cc_xo_clk_src = {
+ .cmd_rcgr = 0x8110,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0_ao,
+ .freq_tbl = ftbl_video_cc_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_xo_clk_src",
+ .parent_data = video_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0_div_clk_src = {
+ .reg = 0x80b8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0c_div2_div_clk_src = {
+ .reg = 0x806c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_div2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs1_div_clk_src = {
+ .reg = 0x80dc,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs1c_div2_div_clk_src = {
+ .reg = 0x8094,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1c_div2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_sm_div_clk_src = {
+ .reg = 0x8108,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sm_div_clk_src",
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch video_cc_mvs0_clk = {
+ .halt_reg = 0x80b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x80b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x80b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0c_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs1_clk = {
+ .halt_reg = 0x80d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x80d4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x80d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs1c_clk = {
+ .halt_reg = 0x808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs1c_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs1c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_pll_lock_monitor_clk = {
+ .halt_reg = 0x9000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_pll_lock_monitor_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_sm_obs_clk = {
+ .halt_reg = 0x810c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x810c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sm_obs_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_sm_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc video_cc_mvs0c_gdsc = {
+ .gdscr = 0x804c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs0c_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+};
+
+static struct gdsc video_cc_mvs0_gdsc = {
+ .gdscr = 0x809c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &video_cc_mvs0c_gdsc.pd,
+ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR | HW_CTRL_TRIGGER,
+};
+
+static struct gdsc video_cc_mvs1c_gdsc = {
+ .gdscr = 0x8074,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs1c_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR,
+};
+
+static struct gdsc video_cc_mvs1_gdsc = {
+ .gdscr = 0x80c0,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &video_cc_mvs1c_gdsc.pd,
+ .flags = RETAIN_FF_ENABLE | POLL_CFG_GDSCR | HW_CTRL_TRIGGER,
+};
+
+static struct clk_regmap *video_cc_sa8775p_clocks[] = {
+ [VIDEO_CC_AHB_CLK_SRC] = &video_cc_ahb_clk_src.clkr,
+ [VIDEO_CC_MVS0_CLK] = &video_cc_mvs0_clk.clkr,
+ [VIDEO_CC_MVS0_CLK_SRC] = &video_cc_mvs0_clk_src.clkr,
+ [VIDEO_CC_MVS0_DIV_CLK_SRC] = &video_cc_mvs0_div_clk_src.clkr,
+ [VIDEO_CC_MVS0C_CLK] = &video_cc_mvs0c_clk.clkr,
+ [VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC] = &video_cc_mvs0c_div2_div_clk_src.clkr,
+ [VIDEO_CC_MVS1_CLK] = &video_cc_mvs1_clk.clkr,
+ [VIDEO_CC_MVS1_CLK_SRC] = &video_cc_mvs1_clk_src.clkr,
+ [VIDEO_CC_MVS1_DIV_CLK_SRC] = &video_cc_mvs1_div_clk_src.clkr,
+ [VIDEO_CC_MVS1C_CLK] = &video_cc_mvs1c_clk.clkr,
+ [VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC] = &video_cc_mvs1c_div2_div_clk_src.clkr,
+ [VIDEO_CC_PLL_LOCK_MONITOR_CLK] = &video_cc_pll_lock_monitor_clk.clkr,
+ [VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr,
+ [VIDEO_CC_SM_DIV_CLK_SRC] = &video_cc_sm_div_clk_src.clkr,
+ [VIDEO_CC_SM_OBS_CLK] = &video_cc_sm_obs_clk.clkr,
+ [VIDEO_CC_XO_CLK_SRC] = &video_cc_xo_clk_src.clkr,
+ [VIDEO_PLL0] = &video_pll0.clkr,
+ [VIDEO_PLL1] = &video_pll1.clkr,
+};
+
+static struct gdsc *video_cc_sa8775p_gdscs[] = {
+ [VIDEO_CC_MVS0_GDSC] = &video_cc_mvs0_gdsc,
+ [VIDEO_CC_MVS0C_GDSC] = &video_cc_mvs0c_gdsc,
+ [VIDEO_CC_MVS1_GDSC] = &video_cc_mvs1_gdsc,
+ [VIDEO_CC_MVS1C_GDSC] = &video_cc_mvs1c_gdsc,
+};
+
+static const struct qcom_reset_map video_cc_sa8775p_resets[] = {
+ [VIDEO_CC_INTERFACE_BCR] = { 0x80e8 },
+ [VIDEO_CC_MVS0_BCR] = { 0x8098 },
+ [VIDEO_CC_MVS0C_CLK_ARES] = { 0x8064, 2 },
+ [VIDEO_CC_MVS0C_BCR] = { 0x8048 },
+ [VIDEO_CC_MVS1_BCR] = { 0x80bc },
+ [VIDEO_CC_MVS1C_CLK_ARES] = { 0x808c, 2 },
+ [VIDEO_CC_MVS1C_BCR] = { 0x8070 },
+};
+
+static const struct regmap_config video_cc_sa8775p_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xb000,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc video_cc_sa8775p_desc = {
+ .config = &video_cc_sa8775p_regmap_config,
+ .clks = video_cc_sa8775p_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_sa8775p_clocks),
+ .resets = video_cc_sa8775p_resets,
+ .num_resets = ARRAY_SIZE(video_cc_sa8775p_resets),
+ .gdscs = video_cc_sa8775p_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_sa8775p_gdscs),
+};
+
+static const struct of_device_id video_cc_sa8775p_match_table[] = {
+ { .compatible = "qcom,sa8775p-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_sa8775p_match_table);
+
+static int video_cc_sa8775p_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &video_cc_sa8775p_desc);
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
+ return PTR_ERR(regmap);
+ }
+
+ clk_lucid_evo_pll_configure(&video_pll0, regmap, &video_pll0_config);
+ clk_lucid_evo_pll_configure(&video_pll1, regmap, &video_pll1_config);
+
+ /* Keep some clocks always enabled */
+ qcom_branch_set_clk_en(regmap, 0x80ec); /* VIDEO_CC_AHB_CLK */
+ qcom_branch_set_clk_en(regmap, 0x8144); /* VIDEO_CC_SLEEP_CLK */
+ qcom_branch_set_clk_en(regmap, 0x8128); /* VIDEO_CC_XO_CLK */
+
+ ret = qcom_cc_really_probe(&pdev->dev, &video_cc_sa8775p_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver video_cc_sa8775p_driver = {
+ .probe = video_cc_sa8775p_probe,
+ .driver = {
+ .name = "videocc-sa8775p",
+ .of_match_table = video_cc_sa8775p_match_table,
+ },
+};
+
+module_platform_driver(video_cc_sa8775p_driver);
+
+MODULE_DESCRIPTION("QTI VIDEOCC SA8775P Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/videocc-sm8350.c b/drivers/clk/qcom/videocc-sm8350.c
index 5bd6fe3e1298..874d4da95ff8 100644
--- a/drivers/clk/qcom/videocc-sm8350.c
+++ b/drivers/clk/qcom/videocc-sm8350.c
@@ -452,7 +452,7 @@ static struct gdsc mvs0_gdsc = {
.pd = {
.name = "mvs0_gdsc",
},
- .flags = HW_CTRL | RETAIN_FF_ENABLE,
+ .flags = HW_CTRL_TRIGGER | RETAIN_FF_ENABLE,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -461,7 +461,7 @@ static struct gdsc mvs1_gdsc = {
.pd = {
.name = "mvs1_gdsc",
},
- .flags = HW_CTRL | RETAIN_FF_ENABLE,
+ .flags = HW_CTRL_TRIGGER | RETAIN_FF_ENABLE,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/videocc-sm8450.c b/drivers/clk/qcom/videocc-sm8450.c
index ed9163d64244..f26c7eccb62e 100644
--- a/drivers/clk/qcom/videocc-sm8450.c
+++ b/drivers/clk/qcom/videocc-sm8450.c
@@ -46,6 +46,21 @@ static const struct alpha_pll_config video_cc_pll0_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_video_cc_pll0_config = {
+ /* .l includes CAL_L_VAL, L_VAL fields */
+ .l = 0x1e,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll video_cc_pll0 = {
.offset = 0x0,
.vco_table = lucid_evo_vco,
@@ -74,6 +89,21 @@ static const struct alpha_pll_config video_cc_pll1_config = {
.user_ctl_hi_val = 0x00000805,
};
+static const struct alpha_pll_config sm8475_video_cc_pll1_config = {
+ /* .l includes CAL_L_VAL, L_VAL fields */
+ .l = 0x2b,
+ .alpha = 0xc000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x82aa299c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000003,
+ .test_ctl_hi1_val = 0x00009000,
+ .test_ctl_hi2_val = 0x00000034,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000005,
+};
+
static struct clk_alpha_pll video_cc_pll1 = {
.offset = 0x1000,
.vco_table = lucid_evo_vco,
@@ -397,6 +427,7 @@ static struct qcom_cc_desc video_cc_sm8450_desc = {
static const struct of_device_id video_cc_sm8450_match_table[] = {
{ .compatible = "qcom,sm8450-videocc" },
+ { .compatible = "qcom,sm8475-videocc" },
{ }
};
MODULE_DEVICE_TABLE(of, video_cc_sm8450_match_table);
@@ -420,8 +451,19 @@ static int video_cc_sm8450_probe(struct platform_device *pdev)
return PTR_ERR(regmap);
}
- clk_lucid_evo_pll_configure(&video_cc_pll0, regmap, &video_cc_pll0_config);
- clk_lucid_evo_pll_configure(&video_cc_pll1, regmap, &video_cc_pll1_config);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,sm8475-videocc")) {
+ /* Update VideoCC PLL0 */
+ video_cc_pll0.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+
+ /* Update VideoCC PLL1 */
+ video_cc_pll1.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE];
+
+ clk_lucid_ole_pll_configure(&video_cc_pll0, regmap, &sm8475_video_cc_pll0_config);
+ clk_lucid_ole_pll_configure(&video_cc_pll1, regmap, &sm8475_video_cc_pll1_config);
+ } else {
+ clk_lucid_evo_pll_configure(&video_cc_pll0, regmap, &video_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&video_cc_pll1, regmap, &video_cc_pll1_config);
+ }
/* Keep some clocks always-on */
qcom_branch_set_clk_en(regmap, 0x80e4); /* VIDEO_CC_AHB_CLK */
@@ -445,5 +487,5 @@ static struct platform_driver video_cc_sm8450_driver = {
module_platform_driver(video_cc_sm8450_driver);
-MODULE_DESCRIPTION("QTI VIDEOCC SM8450 Driver");
+MODULE_DESCRIPTION("QTI VIDEOCC SM8450 / SM8475 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/ralink/clk-mtmips.c b/drivers/clk/ralink/clk-mtmips.c
index 50a443bf79ec..19d433034884 100644
--- a/drivers/clk/ralink/clk-mtmips.c
+++ b/drivers/clk/ralink/clk-mtmips.c
@@ -207,6 +207,7 @@ static struct mtmips_clk mt7620_pherip_clks[] = {
{ CLK_PERIPH("10000b00.spi", "bus") },
{ CLK_PERIPH("10000b40.spi", "bus") },
{ CLK_PERIPH("10000c00.uartlite", "periph") },
+ { CLK_PERIPH("10130000.mmc", "sdhc") },
{ CLK_PERIPH("10180000.wmac", "xtal") }
};
@@ -220,6 +221,7 @@ static struct mtmips_clk mt76x8_pherip_clks[] = {
{ CLK_PERIPH("10000c00.uart0", "periph") },
{ CLK_PERIPH("10000d00.uart1", "periph") },
{ CLK_PERIPH("10000e00.uart2", "periph") },
+ { CLK_PERIPH("10130000.mmc", "sdhc") },
{ CLK_PERIPH("10300000.wmac", "xtal") }
};
@@ -263,16 +265,21 @@ err_clk_unreg:
.rate = _rate \
}
-static struct mtmips_clk_fixed rt305x_fixed_clocks[] = {
- CLK_FIXED("xtal", NULL, 40000000)
+static struct mtmips_clk_fixed rt3883_fixed_clocks[] = {
+ CLK_FIXED("periph", "xtal", 40000000)
};
static struct mtmips_clk_fixed rt3352_fixed_clocks[] = {
CLK_FIXED("periph", "xtal", 40000000)
};
+static struct mtmips_clk_fixed mt7620_fixed_clocks[] = {
+ CLK_FIXED("bbppll", "xtal", 480000000)
+};
+
static struct mtmips_clk_fixed mt76x8_fixed_clocks[] = {
- CLK_FIXED("pcmi2s", "xtal", 480000000),
+ CLK_FIXED("bbppll", "xtal", 480000000),
+ CLK_FIXED("pcmi2s", "bbppll", 480000000),
CLK_FIXED("periph", "xtal", 40000000)
};
@@ -327,6 +334,15 @@ static struct mtmips_clk_factor rt305x_factor_clocks[] = {
CLK_FACTOR("bus", "cpu", 1, 3)
};
+static struct mtmips_clk_factor mt7620_factor_clocks[] = {
+ CLK_FACTOR("sdhc", "bbppll", 1, 10)
+};
+
+static struct mtmips_clk_factor mt76x8_factor_clocks[] = {
+ CLK_FACTOR("bus", "cpu", 1, 3),
+ CLK_FACTOR("sdhc", "bbppll", 1, 10)
+};
+
static int mtmips_register_factor_clocks(struct clk_hw_onecell_data *clk_data,
struct mtmips_clk_priv *priv)
{
@@ -366,6 +382,12 @@ static inline struct mtmips_clk *to_mtmips_clk(struct clk_hw *hw)
return container_of(hw, struct mtmips_clk, hw);
}
+static unsigned long rt2880_xtal_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return 40000000;
+}
+
static unsigned long rt5350_xtal_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -677,10 +699,12 @@ static unsigned long mt76x8_cpu_recalc_rate(struct clk_hw *hw,
}
static struct mtmips_clk rt2880_clks_base[] = {
+ { CLK_BASE("xtal", NULL, rt2880_xtal_recalc_rate) },
{ CLK_BASE("cpu", "xtal", rt2880_cpu_recalc_rate) }
};
static struct mtmips_clk rt305x_clks_base[] = {
+ { CLK_BASE("xtal", NULL, rt2880_xtal_recalc_rate) },
{ CLK_BASE("cpu", "xtal", rt305x_cpu_recalc_rate) }
};
@@ -690,6 +714,7 @@ static struct mtmips_clk rt3352_clks_base[] = {
};
static struct mtmips_clk rt3883_clks_base[] = {
+ { CLK_BASE("xtal", NULL, rt2880_xtal_recalc_rate) },
{ CLK_BASE("cpu", "xtal", rt3883_cpu_recalc_rate) },
{ CLK_BASE("bus", "cpu", rt3883_bus_recalc_rate) }
};
@@ -746,8 +771,8 @@ err_clk_unreg:
static const struct mtmips_clk_data rt2880_clk_data = {
.clk_base = rt2880_clks_base,
.num_clk_base = ARRAY_SIZE(rt2880_clks_base),
- .clk_fixed = rt305x_fixed_clocks,
- .num_clk_fixed = ARRAY_SIZE(rt305x_fixed_clocks),
+ .clk_fixed = NULL,
+ .num_clk_fixed = 0,
.clk_factor = rt2880_factor_clocks,
.num_clk_factor = ARRAY_SIZE(rt2880_factor_clocks),
.clk_periph = rt2880_pherip_clks,
@@ -757,8 +782,8 @@ static const struct mtmips_clk_data rt2880_clk_data = {
static const struct mtmips_clk_data rt305x_clk_data = {
.clk_base = rt305x_clks_base,
.num_clk_base = ARRAY_SIZE(rt305x_clks_base),
- .clk_fixed = rt305x_fixed_clocks,
- .num_clk_fixed = ARRAY_SIZE(rt305x_fixed_clocks),
+ .clk_fixed = NULL,
+ .num_clk_fixed = 0,
.clk_factor = rt305x_factor_clocks,
.num_clk_factor = ARRAY_SIZE(rt305x_factor_clocks),
.clk_periph = rt305x_pherip_clks,
@@ -779,8 +804,8 @@ static const struct mtmips_clk_data rt3352_clk_data = {
static const struct mtmips_clk_data rt3883_clk_data = {
.clk_base = rt3883_clks_base,
.num_clk_base = ARRAY_SIZE(rt3883_clks_base),
- .clk_fixed = rt305x_fixed_clocks,
- .num_clk_fixed = ARRAY_SIZE(rt305x_fixed_clocks),
+ .clk_fixed = rt3883_fixed_clocks,
+ .num_clk_fixed = ARRAY_SIZE(rt3883_fixed_clocks),
.clk_factor = NULL,
.num_clk_factor = 0,
.clk_periph = rt5350_pherip_clks,
@@ -801,10 +826,10 @@ static const struct mtmips_clk_data rt5350_clk_data = {
static const struct mtmips_clk_data mt7620_clk_data = {
.clk_base = mt7620_clks_base,
.num_clk_base = ARRAY_SIZE(mt7620_clks_base),
- .clk_fixed = NULL,
- .num_clk_fixed = 0,
- .clk_factor = NULL,
- .num_clk_factor = 0,
+ .clk_fixed = mt7620_fixed_clocks,
+ .num_clk_fixed = ARRAY_SIZE(mt7620_fixed_clocks),
+ .clk_factor = mt7620_factor_clocks,
+ .num_clk_factor = ARRAY_SIZE(mt7620_factor_clocks),
.clk_periph = mt7620_pherip_clks,
.num_clk_periph = ARRAY_SIZE(mt7620_pherip_clks),
};
@@ -814,8 +839,8 @@ static const struct mtmips_clk_data mt76x8_clk_data = {
.num_clk_base = ARRAY_SIZE(mt76x8_clks_base),
.clk_fixed = mt76x8_fixed_clocks,
.num_clk_fixed = ARRAY_SIZE(mt76x8_fixed_clocks),
- .clk_factor = rt305x_factor_clocks,
- .num_clk_factor = ARRAY_SIZE(rt305x_factor_clocks),
+ .clk_factor = mt76x8_factor_clocks,
+ .num_clk_factor = ARRAY_SIZE(mt76x8_factor_clocks),
.clk_periph = mt76x8_pherip_clks,
.num_clk_periph = ARRAY_SIZE(mt76x8_pherip_clks),
};
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 76791a1c50ac..5a4bc3f94d49 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -40,6 +40,7 @@ config CLK_RENESAS
select CLK_R9A07G054 if ARCH_R9A07G054
select CLK_R9A08G045 if ARCH_R9A08G045
select CLK_R9A09G011 if ARCH_R9A09G011
+ select CLK_R9A09G047 if ARCH_R9A09G047
select CLK_R9A09G057 if ARCH_R9A09G057
select CLK_SH73A0 if ARCH_SH73A0
@@ -194,6 +195,10 @@ config CLK_R9A09G011
bool "RZ/V2M clock support" if COMPILE_TEST
select CLK_RZG2L
+config CLK_R9A09G047
+ bool "RZ/G3E clock support" if COMPILE_TEST
+ select CLK_RZV2H
+
config CLK_R9A09G057
bool "RZ/V2H(P) clock support" if COMPILE_TEST
select CLK_RZV2H
@@ -234,7 +239,12 @@ config CLK_RZG2L
select RESET_CONTROLLER
config CLK_RZV2H
- bool "RZ/V2H(P) family clock support" if COMPILE_TEST
+ bool "RZ/{G3E,V2H(P)} family clock support" if COMPILE_TEST
+ select RESET_CONTROLLER
+
+config CLK_RENESAS_VBATTB
+ tristate "Renesas VBATTB clock controller"
+ depends on ARCH_RZG2L || COMPILE_TEST
select RESET_CONTROLLER
# Generic
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 23d2e26051c8..2d6e746939c4 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_CLK_R9A07G044) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_R9A07G054) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_R9A08G045) += r9a08g045-cpg.o
obj-$(CONFIG_CLK_R9A09G011) += r9a09g011-cpg.o
+obj-$(CONFIG_CLK_R9A09G047) += r9a09g047-cpg.o
obj-$(CONFIG_CLK_R9A09G057) += r9a09g057-cpg.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
@@ -53,3 +54,4 @@ obj-$(CONFIG_CLK_RZV2H) += rzv2h-cpg.o
obj-$(CONFIG_CLK_RENESAS_CPG_MSSR) += renesas-cpg-mssr.o
obj-$(CONFIG_CLK_RENESAS_CPG_MSTP) += clk-mstp.o
obj-$(CONFIG_CLK_RENESAS_DIV6) += clk-div6.o
+obj-$(CONFIG_CLK_RENESAS_VBATTB) += clk-vbattb.o
diff --git a/drivers/clk/renesas/clk-r8a73a4.c b/drivers/clk/renesas/clk-r8a73a4.c
index 4b1815147f77..f331d8bc9daf 100644
--- a/drivers/clk/renesas/clk-r8a73a4.c
+++ b/drivers/clk/renesas/clk-r8a73a4.c
@@ -64,7 +64,6 @@ r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
unsigned int mult = 1;
unsigned int div = 1;
-
if (!strcmp(name, "main")) {
u32 ckscr = readl(base + CPG_CKSCR);
diff --git a/drivers/clk/renesas/clk-r8a7778.c b/drivers/clk/renesas/clk-r8a7778.c
index 797556259370..6ea173f22251 100644
--- a/drivers/clk/renesas/clk-r8a7778.c
+++ b/drivers/clk/renesas/clk-r8a7778.c
@@ -67,7 +67,6 @@ r8a7778_cpg_register_clock(struct device_node *np, const char *name)
return ERR_PTR(-EINVAL);
}
-
static void __init r8a7778_cpg_clocks_init(struct device_node *np)
{
struct clk_onecell_data *data;
diff --git a/drivers/clk/renesas/clk-vbattb.c b/drivers/clk/renesas/clk-vbattb.c
new file mode 100644
index 000000000000..ff9d1ead455c
--- /dev/null
+++ b/drivers/clk/renesas/clk-vbattb.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VBATTB clock driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#include <linux/cleanup.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <dt-bindings/clock/renesas,r9a08g045-vbattb.h>
+
+#define VBATTB_BKSCCR 0x1c
+#define VBATTB_BKSCCR_SOSEL 6
+#define VBATTB_SOSCCR2 0x24
+#define VBATTB_SOSCCR2_SOSTP2 0
+#define VBATTB_XOSCCR 0x30
+#define VBATTB_XOSCCR_OUTEN 16
+#define VBATTB_XOSCCR_XSEL GENMASK(1, 0)
+#define VBATTB_XOSCCR_XSEL_4_PF 0x0
+#define VBATTB_XOSCCR_XSEL_7_PF 0x1
+#define VBATTB_XOSCCR_XSEL_9_PF 0x2
+#define VBATTB_XOSCCR_XSEL_12_5_PF 0x3
+
+/**
+ * struct vbattb_clk - VBATTB clock data structure
+ * @base: base address
+ * @lock: lock
+ */
+struct vbattb_clk {
+ void __iomem *base;
+ spinlock_t lock;
+};
+
+static int vbattb_clk_validate_load_capacitance(u32 *reg_lc, u32 of_lc)
+{
+ switch (of_lc) {
+ case 4000:
+ *reg_lc = VBATTB_XOSCCR_XSEL_4_PF;
+ break;
+ case 7000:
+ *reg_lc = VBATTB_XOSCCR_XSEL_7_PF;
+ break;
+ case 9000:
+ *reg_lc = VBATTB_XOSCCR_XSEL_9_PF;
+ break;
+ case 12500:
+ *reg_lc = VBATTB_XOSCCR_XSEL_12_5_PF;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vbattb_clk_action(void *data)
+{
+ struct device *dev = data;
+ struct reset_control *rstc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = reset_control_assert(rstc);
+ if (ret)
+ dev_err(dev, "Failed to de-assert reset!");
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ dev_err(dev, "Failed to runtime suspend!");
+
+ of_clk_del_provider(dev->of_node);
+}
+
+static int vbattb_clk_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct clk_parent_data parent_data = {};
+ struct clk_hw_onecell_data *clk_data;
+ const struct clk_hw *parent_hws[2];
+ struct device *dev = &pdev->dev;
+ struct reset_control *rstc;
+ struct vbattb_clk *vbclk;
+ u32 of_lc, reg_lc;
+ struct clk_hw *hw;
+ /* 4 clocks are exported: VBATTB_XC, VBATTB_XBYP, VBATTB_MUX, VBATTB_VBATTCLK. */
+ u8 num_clks = 4;
+ int ret;
+
+ /* Default to 4pF as this is not needed if external clock device is connected. */
+ of_lc = 4000;
+ of_property_read_u32(np, "quartz-load-femtofarads", &of_lc);
+
+ ret = vbattb_clk_validate_load_capacitance(&reg_lc, of_lc);
+ if (ret)
+ return ret;
+
+ vbclk = devm_kzalloc(dev, sizeof(*vbclk), GFP_KERNEL);
+ if (!vbclk)
+ return -ENOMEM;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_clks), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+ clk_data->num = num_clks;
+
+ vbclk->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vbclk->base))
+ return PTR_ERR(vbclk->base);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ rstc = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(rstc))
+ return PTR_ERR(rstc);
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(rstc);
+ if (ret) {
+ pm_runtime_put_sync(dev);
+ return ret;
+ }
+
+ dev_set_drvdata(dev, rstc);
+ ret = devm_add_action_or_reset(dev, vbattb_clk_action, dev);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&vbclk->lock);
+
+ parent_data.fw_name = "rtx";
+ hw = devm_clk_hw_register_gate_parent_data(dev, "xc", &parent_data, 0,
+ vbclk->base + VBATTB_SOSCCR2,
+ VBATTB_SOSCCR2_SOSTP2,
+ CLK_GATE_SET_TO_DISABLE, &vbclk->lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ clk_data->hws[VBATTB_XC] = hw;
+
+ hw = devm_clk_hw_register_fixed_factor_fwname(dev, np, "xbyp", "rtx", 0, 1, 1);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ clk_data->hws[VBATTB_XBYP] = hw;
+
+ parent_hws[0] = clk_data->hws[VBATTB_XC];
+ parent_hws[1] = clk_data->hws[VBATTB_XBYP];
+ hw = devm_clk_hw_register_mux_parent_hws(dev, "mux", parent_hws, 2, 0,
+ vbclk->base + VBATTB_BKSCCR,
+ VBATTB_BKSCCR_SOSEL,
+ 1, 0, &vbclk->lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ clk_data->hws[VBATTB_MUX] = hw;
+
+ /* Set load capacitance before registering the VBATTCLK clock. */
+ scoped_guard(spinlock, &vbclk->lock) {
+ u32 val = readl_relaxed(vbclk->base + VBATTB_XOSCCR);
+
+ val &= ~VBATTB_XOSCCR_XSEL;
+ val |= reg_lc;
+ writel_relaxed(val, vbclk->base + VBATTB_XOSCCR);
+ }
+
+ /* This feeds the RTC counter clock and it needs to stay on. */
+ hw = devm_clk_hw_register_gate_parent_hw(dev, "vbattclk", hw, CLK_IS_CRITICAL,
+ vbclk->base + VBATTB_XOSCCR,
+ VBATTB_XOSCCR_OUTEN, 0,
+ &vbclk->lock);
+
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ clk_data->hws[VBATTB_VBATTCLK] = hw;
+
+ return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
+}
+
+static const struct of_device_id vbattb_clk_match[] = {
+ { .compatible = "renesas,r9a08g045-vbattb" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vbattb_clk_match);
+
+static struct platform_driver vbattb_clk_driver = {
+ .driver = {
+ .name = "renesas-vbattb-clk",
+ .of_match_table = vbattb_clk_match,
+ },
+ .probe = vbattb_clk_probe,
+};
+module_platform_driver(vbattb_clk_driver);
+
+MODULE_DESCRIPTION("Renesas VBATTB Clock Driver");
+MODULE_AUTHOR("Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index 4c8e4c69c1bf..9c7e4094705c 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -266,7 +266,6 @@ static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] __initconst = {
{ 2, 128, 1, 192, 1, 32, },
};
-
static int __init r8a779a0_cpg_mssr_init(struct device *dev)
{
const struct rcar_gen4_cpg_pll_config *cpg_pll_config;
diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
index 55c8dd032fc3..d45571096b96 100644
--- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
@@ -238,6 +238,10 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
DEF_MOD("pfc2", 917, R8A779G0_CLK_CP),
DEF_MOD("pfc3", 918, R8A779G0_CLK_CP),
DEF_MOD("tsc", 919, R8A779G0_CLK_CL16M),
+ DEF_MOD("vspx0", 1028, R8A779G0_CLK_S0D1_VIO),
+ DEF_MOD("vspx1", 1029, R8A779G0_CLK_S0D1_VIO),
+ DEF_MOD("fcpvx0", 1100, R8A779G0_CLK_S0D1_VIO),
+ DEF_MOD("fcpvx1", 1101, R8A779G0_CLK_S0D1_VIO),
DEF_MOD("tsn", 2723, R8A779G0_CLK_S0D4_HSC),
DEF_MOD("ssiu", 2926, R8A779G0_CLK_S0D6_PER),
DEF_MOD("ssi", 2927, R8A779G0_CLK_S0D6_PER),
diff --git a/drivers/clk/renesas/r8a779h0-cpg-mssr.c b/drivers/clk/renesas/r8a779h0-cpg-mssr.c
index e20c048bfa9b..607fa815b6c1 100644
--- a/drivers/clk/renesas/r8a779h0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779h0-cpg-mssr.c
@@ -37,7 +37,6 @@ enum clk_ids {
CLK_PLL5,
CLK_PLL6,
CLK_PLL1_DIV2,
- CLK_PLL2_DIV2,
CLK_PLL3_DIV2,
CLK_PLL4_DIV2,
CLK_PLL4_DIV5,
@@ -78,7 +77,6 @@ static const struct cpg_core_clk r8a779h0_core_clks[] __initconst = {
DEF_GEN4_PLL_V8_25(".pll6", 6, CLK_PLL6, CLK_MAIN),
DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
- DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1),
DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 2, 1),
DEF_FIXED(".pll4_div2", CLK_PLL4_DIV2, CLK_PLL4, 2, 1),
DEF_FIXED(".pll4_div5", CLK_PLL4_DIV5, CLK_PLL4, 5, 1),
@@ -101,10 +99,10 @@ static const struct cpg_core_clk r8a779h0_core_clks[] __initconst = {
DEF_RATE(".oco", CLK_OCO, 32768),
/* Core Clock Outputs */
- DEF_GEN4_Z("zc0", R8A779H0_CLK_ZC0, CLK_TYPE_GEN4_Z, CLK_PLL2_DIV2, 2, 0),
- DEF_GEN4_Z("zc1", R8A779H0_CLK_ZC1, CLK_TYPE_GEN4_Z, CLK_PLL2_DIV2, 2, 8),
- DEF_GEN4_Z("zc2", R8A779H0_CLK_ZC2, CLK_TYPE_GEN4_Z, CLK_PLL2_DIV2, 2, 32),
- DEF_GEN4_Z("zc3", R8A779H0_CLK_ZC3, CLK_TYPE_GEN4_Z, CLK_PLL2_DIV2, 2, 40),
+ DEF_GEN4_Z("zc0", R8A779H0_CLK_ZC0, CLK_TYPE_GEN4_Z, CLK_PLL2, 4, 0),
+ DEF_GEN4_Z("zc1", R8A779H0_CLK_ZC1, CLK_TYPE_GEN4_Z, CLK_PLL2, 4, 8),
+ DEF_GEN4_Z("zc2", R8A779H0_CLK_ZC2, CLK_TYPE_GEN4_Z, CLK_PLL2, 4, 32),
+ DEF_GEN4_Z("zc3", R8A779H0_CLK_ZC3, CLK_TYPE_GEN4_Z, CLK_PLL2, 4, 40),
DEF_FIXED("s0d2", R8A779H0_CLK_S0D2, CLK_S0, 2, 1),
DEF_FIXED("s0d3", R8A779H0_CLK_S0D3, CLK_S0, 3, 1),
DEF_FIXED("s0d4", R8A779H0_CLK_S0D4, CLK_S0, 4, 1),
@@ -179,6 +177,9 @@ static const struct mssr_mod_clk r8a779h0_mod_clks[] __initconst = {
DEF_MOD("canfd0", 328, R8A779H0_CLK_SASYNCPERD2),
DEF_MOD("csi40", 331, R8A779H0_CLK_CSI),
DEF_MOD("csi41", 400, R8A779H0_CLK_CSI),
+ DEF_MOD("dis0", 411, R8A779H0_CLK_VIOBUSD2),
+ DEF_MOD("dsitxlink0", 415, R8A779H0_CLK_VIOBUSD2),
+ DEF_MOD("fcpvd0", 508, R8A779H0_CLK_VIOBUSD2),
DEF_MOD("hscif0", 514, R8A779H0_CLK_SASYNCPERD1),
DEF_MOD("hscif1", 515, R8A779H0_CLK_SASYNCPERD1),
DEF_MOD("hscif2", 516, R8A779H0_CLK_SASYNCPERD1),
@@ -227,6 +228,7 @@ static const struct mssr_mod_clk r8a779h0_mod_clks[] __initconst = {
DEF_MOD("vin15", 811, R8A779H0_CLK_S0D4_VIO),
DEF_MOD("vin16", 812, R8A779H0_CLK_S0D4_VIO),
DEF_MOD("vin17", 813, R8A779H0_CLK_S0D4_VIO),
+ DEF_MOD("vspd0", 830, R8A779H0_CLK_VIOBUSD2),
DEF_MOD("wdt1:wdt0", 907, R8A779H0_CLK_R),
DEF_MOD("cmt0", 910, R8A779H0_CLK_R),
DEF_MOD("cmt1", 911, R8A779H0_CLK_R),
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index c1348e2d450c..dcda19318b2a 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -20,15 +20,24 @@
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
+#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/soc/renesas/r9a06g032-sysctrl.h>
#include <linux/spinlock.h>
#include <dt-bindings/clock/r9a06g032-sysctrl.h>
#define R9A06G032_SYSCTRL_USB 0x00
-#define R9A06G032_SYSCTRL_USB_H2MODE (1<<1)
+#define R9A06G032_SYSCTRL_USB_H2MODE BIT(1)
#define R9A06G032_SYSCTRL_DMAMUX 0xA0
+#define R9A06G032_SYSCTRL_RSTEN 0x120
+#define R9A06G032_SYSCTRL_RSTEN_MRESET_EN BIT(0)
+#define R9A06G032_SYSCTRL_RSTCTRL 0x198
+/* These work for both reset registers */
+#define R9A06G032_SYSCTRL_SWRST BIT(6)
+#define R9A06G032_SYSCTRL_WDA7RST_1 BIT(2)
+#define R9A06G032_SYSCTRL_WDA7RST_0 BIT(1)
+
/**
* struct regbit - describe one bit in a register
* @reg: offset of register relative to base address,
@@ -1270,6 +1279,12 @@ static void r9a06g032_clocks_del_clk_provider(void *data)
of_clk_del_provider(data);
}
+static int r9a06g032_restart_handler(struct sys_off_data *data)
+{
+ writel(R9A06G032_SYSCTRL_SWRST, sysctrl_priv->reg + R9A06G032_SYSCTRL_RSTCTRL);
+ return NOTIFY_DONE;
+}
+
static void __init r9a06g032_init_h2mode(struct r9a06g032_priv *clocks)
{
struct device_node *usbf_np;
@@ -1324,6 +1339,18 @@ static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
r9a06g032_init_h2mode(clocks);
+ /* Clear potentially pending resets */
+ writel(R9A06G032_SYSCTRL_WDA7RST_0 | R9A06G032_SYSCTRL_WDA7RST_1,
+ clocks->reg + R9A06G032_SYSCTRL_RSTCTRL);
+ /* Allow software reset */
+ writel(R9A06G032_SYSCTRL_SWRST | R9A06G032_SYSCTRL_RSTEN_MRESET_EN,
+ clocks->reg + R9A06G032_SYSCTRL_RSTEN);
+
+ error = devm_register_sys_off_handler(dev, SYS_OFF_MODE_RESTART, SYS_OFF_PRIO_HIGH,
+ r9a06g032_restart_handler, NULL);
+ if (error)
+ dev_warn(dev, "couldn't register restart handler (%d)\n", error);
+
for (i = 0; i < ARRAY_SIZE(r9a06g032_clocks); ++i) {
const struct r9a06g032_clkdesc *d = &r9a06g032_clocks[i];
const char *parent_name = d->source ?
diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c
index 1ce40fb51f13..0e7e3bf05b52 100644
--- a/drivers/clk/renesas/r9a08g045-cpg.c
+++ b/drivers/clk/renesas/r9a08g045-cpg.c
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/pm_domain.h>
#include <dt-bindings/clock/r9a08g045-cpg.h>
@@ -186,6 +187,7 @@ static const struct cpg_core_clk r9a08g045_core_clks[] __initconst = {
DEF_FIXED("OSC", R9A08G045_OSCCLK, CLK_EXTAL, 1, 1),
DEF_FIXED("OSC2", R9A08G045_OSCCLK2, CLK_EXTAL, 1, 3),
DEF_FIXED("HP", R9A08G045_CLK_HP, CLK_PLL6, 1, 2),
+ DEF_FIXED("TSU", R9A08G045_CLK_TSU, CLK_PLL2_DIV2, 1, 8),
};
static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
@@ -208,6 +210,14 @@ static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
DEF_MOD("sdhi2_imclk2", R9A08G045_SDHI2_IMCLK2, CLK_SD2_DIV4, 0x554, 9),
DEF_MOD("sdhi2_clk_hs", R9A08G045_SDHI2_CLK_HS, R9A08G045_CLK_SD2, 0x554, 10),
DEF_MOD("sdhi2_aclk", R9A08G045_SDHI2_ACLK, R9A08G045_CLK_P1, 0x554, 11),
+ DEF_MOD("ssi0_pclk2", R9A08G045_SSI0_PCLK2, R9A08G045_CLK_P0, 0x570, 0),
+ DEF_MOD("ssi0_sfr", R9A08G045_SSI0_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 1),
+ DEF_MOD("ssi1_pclk2", R9A08G045_SSI1_PCLK2, R9A08G045_CLK_P0, 0x570, 2),
+ DEF_MOD("ssi1_sfr", R9A08G045_SSI1_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 3),
+ DEF_MOD("ssi2_pclk2", R9A08G045_SSI2_PCLK2, R9A08G045_CLK_P0, 0x570, 4),
+ DEF_MOD("ssi2_sfr", R9A08G045_SSI2_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 5),
+ DEF_MOD("ssi3_pclk2", R9A08G045_SSI3_PCLK2, R9A08G045_CLK_P0, 0x570, 6),
+ DEF_MOD("ssi3_sfr", R9A08G045_SSI3_PCLK_SFR, R9A08G045_CLK_P0, 0x570, 7),
DEF_MOD("usb0_host", R9A08G045_USB_U2H0_HCLK, R9A08G045_CLK_P1, 0x578, 0),
DEF_MOD("usb1_host", R9A08G045_USB_U2H1_HCLK, R9A08G045_CLK_P1, 0x578, 1),
DEF_MOD("usb0_func", R9A08G045_USB_U2P_EXR_CPUCLK, R9A08G045_CLK_P1, 0x578, 2),
@@ -223,7 +233,14 @@ static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
DEF_MOD("i2c2_pclk", R9A08G045_I2C2_PCLK, R9A08G045_CLK_P0, 0x580, 2),
DEF_MOD("i2c3_pclk", R9A08G045_I2C3_PCLK, R9A08G045_CLK_P0, 0x580, 3),
DEF_MOD("scif0_clk_pck", R9A08G045_SCIF0_CLK_PCK, R9A08G045_CLK_P0, 0x584, 0),
+ DEF_MOD("scif1_clk_pck", R9A08G045_SCIF1_CLK_PCK, R9A08G045_CLK_P0, 0x584, 1),
+ DEF_MOD("scif2_clk_pck", R9A08G045_SCIF2_CLK_PCK, R9A08G045_CLK_P0, 0x584, 2),
+ DEF_MOD("scif3_clk_pck", R9A08G045_SCIF3_CLK_PCK, R9A08G045_CLK_P0, 0x584, 3),
+ DEF_MOD("scif4_clk_pck", R9A08G045_SCIF4_CLK_PCK, R9A08G045_CLK_P0, 0x584, 4),
+ DEF_MOD("scif5_clk_pck", R9A08G045_SCIF5_CLK_PCK, R9A08G045_CLK_P0, 0x584, 5),
DEF_MOD("gpio_hclk", R9A08G045_GPIO_HCLK, R9A08G045_OSCCLK, 0x598, 0),
+ DEF_MOD("adc_adclk", R9A08G045_ADC_ADCLK, R9A08G045_CLK_TSU, 0x5a8, 0),
+ DEF_MOD("adc_pclk", R9A08G045_ADC_PCLK, R9A08G045_CLK_TSU, 0x5a8, 1),
DEF_MOD("vbat_bclk", R9A08G045_VBAT_BCLK, R9A08G045_OSCCLK, 0x614, 0),
};
@@ -237,6 +254,10 @@ static const struct rzg2l_reset r9a08g045_resets[] = {
DEF_RST(R9A08G045_SDHI0_IXRST, 0x854, 0),
DEF_RST(R9A08G045_SDHI1_IXRST, 0x854, 1),
DEF_RST(R9A08G045_SDHI2_IXRST, 0x854, 2),
+ DEF_RST(R9A08G045_SSI0_RST_M2_REG, 0x870, 0),
+ DEF_RST(R9A08G045_SSI1_RST_M2_REG, 0x870, 1),
+ DEF_RST(R9A08G045_SSI2_RST_M2_REG, 0x870, 2),
+ DEF_RST(R9A08G045_SSI3_RST_M2_REG, 0x870, 3),
DEF_RST(R9A08G045_USB_U2H0_HRESETN, 0x878, 0),
DEF_RST(R9A08G045_USB_U2H1_HRESETN, 0x878, 1),
DEF_RST(R9A08G045_USB_U2P_EXL_SYSRST, 0x878, 2),
@@ -248,9 +269,16 @@ static const struct rzg2l_reset r9a08g045_resets[] = {
DEF_RST(R9A08G045_I2C2_MRST, 0x880, 2),
DEF_RST(R9A08G045_I2C3_MRST, 0x880, 3),
DEF_RST(R9A08G045_SCIF0_RST_SYSTEM_N, 0x884, 0),
+ DEF_RST(R9A08G045_SCIF1_RST_SYSTEM_N, 0x884, 1),
+ DEF_RST(R9A08G045_SCIF2_RST_SYSTEM_N, 0x884, 2),
+ DEF_RST(R9A08G045_SCIF3_RST_SYSTEM_N, 0x884, 3),
+ DEF_RST(R9A08G045_SCIF4_RST_SYSTEM_N, 0x884, 4),
+ DEF_RST(R9A08G045_SCIF5_RST_SYSTEM_N, 0x884, 5),
DEF_RST(R9A08G045_GPIO_RSTN, 0x898, 0),
DEF_RST(R9A08G045_GPIO_PORT_RESETN, 0x898, 1),
DEF_RST(R9A08G045_GPIO_SPARE_RESETN, 0x898, 2),
+ DEF_RST(R9A08G045_ADC_PRESETN, 0x8a8, 0),
+ DEF_RST(R9A08G045_ADC_ADRST_N, 0x8a8, 1),
DEF_RST(R9A08G045_VBAT_BRESETN, 0x914, 0),
};
@@ -266,61 +294,70 @@ static const struct rzg2l_cpg_pm_domain_init_data r9a08g045_pm_domains[] = {
/* Keep always-on domain on the first position for proper domains registration. */
DEF_PD("always-on", R9A08G045_PD_ALWAYS_ON,
DEF_REG_CONF(0, 0),
- RZG2L_PD_F_ALWAYS_ON),
+ GENPD_FLAG_ALWAYS_ON | GENPD_FLAG_IRQ_SAFE),
DEF_PD("gic", R9A08G045_PD_GIC,
DEF_REG_CONF(CPG_BUS_ACPU_MSTOP, BIT(3)),
- RZG2L_PD_F_ALWAYS_ON),
+ GENPD_FLAG_ALWAYS_ON),
DEF_PD("ia55", R9A08G045_PD_IA55,
DEF_REG_CONF(CPG_BUS_PERI_CPU_MSTOP, BIT(13)),
- RZG2L_PD_F_ALWAYS_ON),
+ GENPD_FLAG_ALWAYS_ON),
DEF_PD("dmac", R9A08G045_PD_DMAC,
DEF_REG_CONF(CPG_BUS_REG1_MSTOP, GENMASK(3, 0)),
- RZG2L_PD_F_ALWAYS_ON),
+ GENPD_FLAG_ALWAYS_ON),
DEF_PD("wdt0", R9A08G045_PD_WDT0,
DEF_REG_CONF(CPG_BUS_REG0_MSTOP, BIT(0)),
- RZG2L_PD_F_NONE),
+ GENPD_FLAG_IRQ_SAFE),
DEF_PD("sdhi0", R9A08G045_PD_SDHI0,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(0)),
- RZG2L_PD_F_NONE),
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(0)), 0),
DEF_PD("sdhi1", R9A08G045_PD_SDHI1,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(1)),
- RZG2L_PD_F_NONE),
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(1)), 0),
DEF_PD("sdhi2", R9A08G045_PD_SDHI2,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(11)),
- RZG2L_PD_F_NONE),
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(11)), 0),
+ DEF_PD("ssi0", R9A08G045_PD_SSI0,
+ DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(10)), 0),
+ DEF_PD("ssi1", R9A08G045_PD_SSI1,
+ DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(11)), 0),
+ DEF_PD("ssi2", R9A08G045_PD_SSI2,
+ DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(12)), 0),
+ DEF_PD("ssi3", R9A08G045_PD_SSI3,
+ DEF_REG_CONF(CPG_BUS_MCPU1_MSTOP, BIT(13)), 0),
DEF_PD("usb0", R9A08G045_PD_USB0,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, GENMASK(6, 5)),
- RZG2L_PD_F_NONE),
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, GENMASK(6, 5)), 0),
DEF_PD("usb1", R9A08G045_PD_USB1,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(7)),
- RZG2L_PD_F_NONE),
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(7)), 0),
DEF_PD("usb-phy", R9A08G045_PD_USB_PHY,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(4)),
- RZG2L_PD_F_NONE),
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(4)), 0),
DEF_PD("eth0", R9A08G045_PD_ETHER0,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(2)),
- RZG2L_PD_F_NONE),
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(2)), 0),
DEF_PD("eth1", R9A08G045_PD_ETHER1,
- DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(3)),
- RZG2L_PD_F_NONE),
+ DEF_REG_CONF(CPG_BUS_PERI_COM_MSTOP, BIT(3)), 0),
DEF_PD("i2c0", R9A08G045_PD_I2C0,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(10)),
- RZG2L_PD_F_NONE),
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(10)), 0),
DEF_PD("i2c1", R9A08G045_PD_I2C1,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(11)),
- RZG2L_PD_F_NONE),
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(11)), 0),
DEF_PD("i2c2", R9A08G045_PD_I2C2,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(12)),
- RZG2L_PD_F_NONE),
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(12)), 0),
DEF_PD("i2c3", R9A08G045_PD_I2C3,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(13)),
- RZG2L_PD_F_NONE),
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(13)), 0),
DEF_PD("scif0", R9A08G045_PD_SCIF0,
- DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(1)),
- RZG2L_PD_F_NONE),
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(1)), 0),
+ DEF_PD("scif1", R9A08G045_PD_SCIF1,
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(2)), 0),
+ DEF_PD("scif2", R9A08G045_PD_SCIF2,
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(3)), 0),
+ DEF_PD("scif3", R9A08G045_PD_SCIF3,
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(4)), 0),
+ DEF_PD("scif4", R9A08G045_PD_SCIF4,
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(5)), 0),
+ DEF_PD("scif5", R9A08G045_PD_SCIF5,
+ DEF_REG_CONF(CPG_BUS_MCPU3_MSTOP, BIT(4)), 0),
+ DEF_PD("adc", R9A08G045_PD_ADC,
+ DEF_REG_CONF(CPG_BUS_MCPU2_MSTOP, BIT(14)), 0),
DEF_PD("vbat", R9A08G045_PD_VBAT,
DEF_REG_CONF(CPG_BUS_MCPU3_MSTOP, BIT(8)),
- RZG2L_PD_F_ALWAYS_ON),
+ GENPD_FLAG_ALWAYS_ON),
+ DEF_PD("rtc", R9A08G045_PD_RTC,
+ DEF_REG_CONF(CPG_BUS_MCPU3_MSTOP, BIT(7)), 0),
};
const struct rzg2l_cpg_info r9a08g045_cpg_info = {
diff --git a/drivers/clk/renesas/r9a09g011-cpg.c b/drivers/clk/renesas/r9a09g011-cpg.c
index dda9f29dff33..22272279b104 100644
--- a/drivers/clk/renesas/r9a09g011-cpg.c
+++ b/drivers/clk/renesas/r9a09g011-cpg.c
@@ -98,7 +98,6 @@ static const struct clk_div_table dtable_divd[] = {
{0, 0},
};
-
static const struct clk_div_table dtable_divw[] = {
{0, 6},
{1, 7},
diff --git a/drivers/clk/renesas/r9a09g047-cpg.c b/drivers/clk/renesas/r9a09g047-cpg.c
new file mode 100644
index 000000000000..536d922bed70
--- /dev/null
+++ b/drivers/clk/renesas/r9a09g047-cpg.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G3E CPG driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/clock/renesas,r9a09g047-cpg.h>
+
+#include "rzv2h-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R9A09G047_IOTOP_0_SHCLK,
+
+ /* External Input Clocks */
+ CLK_AUDIO_EXTAL,
+ CLK_RTXIN,
+ CLK_QEXTAL,
+
+ /* PLL Clocks */
+ CLK_PLLCM33,
+ CLK_PLLCLN,
+ CLK_PLLDTY,
+ CLK_PLLCA55,
+
+ /* Internal Core Clocks */
+ CLK_PLLCM33_DIV16,
+ CLK_PLLCLN_DIV16,
+ CLK_PLLDTY_ACPU,
+ CLK_PLLDTY_ACPU_DIV4,
+
+ /* Module Clocks */
+ MOD_CLK_BASE,
+};
+
+static const struct clk_div_table dtable_1_8[] = {
+ {0, 1},
+ {1, 2},
+ {2, 4},
+ {3, 8},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_64[] = {
+ {0, 2},
+ {1, 4},
+ {2, 8},
+ {3, 16},
+ {4, 64},
+ {0, 0},
+};
+
+static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL),
+ DEF_INPUT("rtxin", CLK_RTXIN),
+ DEF_INPUT("qextal", CLK_QEXTAL),
+
+ /* PLL Clocks */
+ DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
+ DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
+ DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)),
+
+ /* Internal Core Clocks */
+ DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+
+ DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
+
+ DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+
+ /* Core Clocks */
+ DEF_FIXED("sys_0_pclk", R9A09G047_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
+ DEF_DDIV("ca55_0_coreclk0", R9A09G047_CA55_0_CORECLK0, CLK_PLLCA55,
+ CDDIV1_DIVCTL0, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk1", R9A09G047_CA55_0_CORECLK1, CLK_PLLCA55,
+ CDDIV1_DIVCTL1, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk2", R9A09G047_CA55_0_CORECLK2, CLK_PLLCA55,
+ CDDIV1_DIVCTL2, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk3", R9A09G047_CA55_0_CORECLK3, CLK_PLLCA55,
+ CDDIV1_DIVCTL3, dtable_1_8),
+ DEF_FIXED("iotop_0_shclk", R9A09G047_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
+};
+
+static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
+ DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
+ BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
+ BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
+ BUS_MSTOP(3, BIT(13))),
+ DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20,
+ BUS_MSTOP(1, BIT(1))),
+ DEF_MOD("riic_1_ckm", CLK_PLLCLN_DIV16, 9, 5, 4, 21,
+ BUS_MSTOP(1, BIT(2))),
+ DEF_MOD("riic_2_ckm", CLK_PLLCLN_DIV16, 9, 6, 4, 22,
+ BUS_MSTOP(1, BIT(3))),
+ DEF_MOD("riic_3_ckm", CLK_PLLCLN_DIV16, 9, 7, 4, 23,
+ BUS_MSTOP(1, BIT(4))),
+ DEF_MOD("riic_4_ckm", CLK_PLLCLN_DIV16, 9, 8, 4, 24,
+ BUS_MSTOP(1, BIT(5))),
+ DEF_MOD("riic_5_ckm", CLK_PLLCLN_DIV16, 9, 9, 4, 25,
+ BUS_MSTOP(1, BIT(6))),
+ DEF_MOD("riic_6_ckm", CLK_PLLCLN_DIV16, 9, 10, 4, 26,
+ BUS_MSTOP(1, BIT(7))),
+ DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27,
+ BUS_MSTOP(1, BIT(8))),
+};
+
+static const struct rzv2h_reset r9a09g047_resets[] __initconst = {
+ DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
+ DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
+ DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
+ DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
+ DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
+ DEF_RST(9, 10, 4, 11), /* RIIC_2_MRST */
+ DEF_RST(9, 11, 4, 12), /* RIIC_3_MRST */
+ DEF_RST(9, 12, 4, 13), /* RIIC_4_MRST */
+ DEF_RST(9, 13, 4, 14), /* RIIC_5_MRST */
+ DEF_RST(9, 14, 4, 15), /* RIIC_6_MRST */
+ DEF_RST(9, 15, 4, 16), /* RIIC_7_MRST */
+ DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
+};
+
+const struct rzv2h_cpg_info r9a09g047_cpg_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r9a09g047_core_clks,
+ .num_core_clks = ARRAY_SIZE(r9a09g047_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r9a09g047_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r9a09g047_mod_clks),
+ .num_hw_mod_clks = 28 * 16,
+
+ /* Resets */
+ .resets = r9a09g047_resets,
+ .num_resets = ARRAY_SIZE(r9a09g047_resets),
+
+ .num_mstop_bits = 208,
+};
diff --git a/drivers/clk/renesas/r9a09g057-cpg.c b/drivers/clk/renesas/r9a09g057-cpg.c
index 3ee32db5c0af..3705e18f66ad 100644
--- a/drivers/clk/renesas/r9a09g057-cpg.c
+++ b/drivers/clk/renesas/r9a09g057-cpg.c
@@ -28,6 +28,7 @@ enum clk_ids {
CLK_PLLCLN,
CLK_PLLDTY,
CLK_PLLCA55,
+ CLK_PLLVDO,
/* Internal Core Clocks */
CLK_PLLCM33_DIV16,
@@ -35,12 +36,32 @@ enum clk_ids {
CLK_PLLCLN_DIV8,
CLK_PLLCLN_DIV16,
CLK_PLLDTY_ACPU,
+ CLK_PLLDTY_ACPU_DIV2,
CLK_PLLDTY_ACPU_DIV4,
+ CLK_PLLDTY_DIV16,
+ CLK_PLLVDO_CRU0,
+ CLK_PLLVDO_CRU1,
+ CLK_PLLVDO_CRU2,
+ CLK_PLLVDO_CRU3,
/* Module Clocks */
MOD_CLK_BASE,
};
+static const struct clk_div_table dtable_1_8[] = {
+ {0, 1},
+ {1, 2},
+ {2, 4},
+ {3, 8},
+ {0, 0},
+};
+
+static const struct clk_div_table dtable_2_4[] = {
+ {0, 2},
+ {1, 4},
+ {0, 0},
+};
+
static const struct clk_div_table dtable_2_64[] = {
{0, 2},
{1, 4},
@@ -61,6 +82,7 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)),
+ DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
/* Internal Core Clocks */
DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
@@ -70,55 +92,140 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2),
DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
+ DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
+
+ DEF_DDIV(".pllvdo_cru0", CLK_PLLVDO_CRU0, CLK_PLLVDO, CDDIV3_DIVCTL3, dtable_2_4),
+ DEF_DDIV(".pllvdo_cru1", CLK_PLLVDO_CRU1, CLK_PLLVDO, CDDIV4_DIVCTL0, dtable_2_4),
+ DEF_DDIV(".pllvdo_cru2", CLK_PLLVDO_CRU2, CLK_PLLVDO, CDDIV4_DIVCTL1, dtable_2_4),
+ DEF_DDIV(".pllvdo_cru3", CLK_PLLVDO_CRU3, CLK_PLLVDO, CDDIV4_DIVCTL2, dtable_2_4),
/* Core Clocks */
DEF_FIXED("sys_0_pclk", R9A09G057_SYS_0_PCLK, CLK_QEXTAL, 1, 1),
+ DEF_DDIV("ca55_0_coreclk0", R9A09G057_CA55_0_CORE_CLK0, CLK_PLLCA55,
+ CDDIV1_DIVCTL0, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk1", R9A09G057_CA55_0_CORE_CLK1, CLK_PLLCA55,
+ CDDIV1_DIVCTL1, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk2", R9A09G057_CA55_0_CORE_CLK2, CLK_PLLCA55,
+ CDDIV1_DIVCTL2, dtable_1_8),
+ DEF_DDIV("ca55_0_coreclk3", R9A09G057_CA55_0_CORE_CLK3, CLK_PLLCA55,
+ CDDIV1_DIVCTL3, dtable_1_8),
DEF_FIXED("iotop_0_shclk", R9A09G057_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
};
static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
- DEF_MOD("gtm_0_pclk", CLK_PLLCM33_DIV16, 4, 3, 2, 3),
- DEF_MOD("gtm_1_pclk", CLK_PLLCM33_DIV16, 4, 4, 2, 4),
- DEF_MOD("gtm_2_pclk", CLK_PLLCLN_DIV16, 4, 5, 2, 5),
- DEF_MOD("gtm_3_pclk", CLK_PLLCLN_DIV16, 4, 6, 2, 6),
- DEF_MOD("gtm_4_pclk", CLK_PLLCLN_DIV16, 4, 7, 2, 7),
- DEF_MOD("gtm_5_pclk", CLK_PLLCLN_DIV16, 4, 8, 2, 8),
- DEF_MOD("gtm_6_pclk", CLK_PLLCLN_DIV16, 4, 9, 2, 9),
- DEF_MOD("gtm_7_pclk", CLK_PLLCLN_DIV16, 4, 10, 2, 10),
- DEF_MOD("wdt_0_clkp", CLK_PLLCM33_DIV16, 4, 11, 2, 11),
- DEF_MOD("wdt_0_clk_loco", CLK_QEXTAL, 4, 12, 2, 12),
- DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13),
- DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14),
- DEF_MOD("wdt_2_clkp", CLK_PLLCLN_DIV16, 4, 15, 2, 15),
- DEF_MOD("wdt_2_clk_loco", CLK_QEXTAL, 5, 0, 2, 16),
- DEF_MOD("wdt_3_clkp", CLK_PLLCLN_DIV16, 5, 1, 2, 17),
- DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18),
- DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15),
- DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19),
- DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20),
- DEF_MOD("riic_1_ckm", CLK_PLLCLN_DIV16, 9, 5, 4, 21),
- DEF_MOD("riic_2_ckm", CLK_PLLCLN_DIV16, 9, 6, 4, 22),
- DEF_MOD("riic_3_ckm", CLK_PLLCLN_DIV16, 9, 7, 4, 23),
- DEF_MOD("riic_4_ckm", CLK_PLLCLN_DIV16, 9, 8, 4, 24),
- DEF_MOD("riic_5_ckm", CLK_PLLCLN_DIV16, 9, 9, 4, 25),
- DEF_MOD("riic_6_ckm", CLK_PLLCLN_DIV16, 9, 10, 4, 26),
- DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27),
- DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3),
- DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4),
- DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5),
- DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6),
- DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7),
- DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8),
- DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9),
- DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10),
- DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11),
- DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12),
- DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13),
- DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14),
+ DEF_MOD_CRITICAL("icu_0_pclk_i", CLK_PLLCM33_DIV16, 0, 5, 0, 5,
+ BUS_MSTOP_NONE),
+ DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
+ BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("gtm_0_pclk", CLK_PLLCM33_DIV16, 4, 3, 2, 3,
+ BUS_MSTOP(5, BIT(10))),
+ DEF_MOD("gtm_1_pclk", CLK_PLLCM33_DIV16, 4, 4, 2, 4,
+ BUS_MSTOP(5, BIT(11))),
+ DEF_MOD("gtm_2_pclk", CLK_PLLCLN_DIV16, 4, 5, 2, 5,
+ BUS_MSTOP(2, BIT(13))),
+ DEF_MOD("gtm_3_pclk", CLK_PLLCLN_DIV16, 4, 6, 2, 6,
+ BUS_MSTOP(2, BIT(14))),
+ DEF_MOD("gtm_4_pclk", CLK_PLLCLN_DIV16, 4, 7, 2, 7,
+ BUS_MSTOP(11, BIT(13))),
+ DEF_MOD("gtm_5_pclk", CLK_PLLCLN_DIV16, 4, 8, 2, 8,
+ BUS_MSTOP(11, BIT(14))),
+ DEF_MOD("gtm_6_pclk", CLK_PLLCLN_DIV16, 4, 9, 2, 9,
+ BUS_MSTOP(11, BIT(15))),
+ DEF_MOD("gtm_7_pclk", CLK_PLLCLN_DIV16, 4, 10, 2, 10,
+ BUS_MSTOP(12, BIT(0))),
+ DEF_MOD("wdt_0_clkp", CLK_PLLCM33_DIV16, 4, 11, 2, 11,
+ BUS_MSTOP(3, BIT(10))),
+ DEF_MOD("wdt_0_clk_loco", CLK_QEXTAL, 4, 12, 2, 12,
+ BUS_MSTOP(3, BIT(10))),
+ DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13,
+ BUS_MSTOP(1, BIT(0))),
+ DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14,
+ BUS_MSTOP(1, BIT(0))),
+ DEF_MOD("wdt_2_clkp", CLK_PLLCLN_DIV16, 4, 15, 2, 15,
+ BUS_MSTOP(5, BIT(12))),
+ DEF_MOD("wdt_2_clk_loco", CLK_QEXTAL, 5, 0, 2, 16,
+ BUS_MSTOP(5, BIT(12))),
+ DEF_MOD("wdt_3_clkp", CLK_PLLCLN_DIV16, 5, 1, 2, 17,
+ BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18,
+ BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
+ BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
+ BUS_MSTOP(3, BIT(13))),
+ DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20,
+ BUS_MSTOP(1, BIT(1))),
+ DEF_MOD("riic_1_ckm", CLK_PLLCLN_DIV16, 9, 5, 4, 21,
+ BUS_MSTOP(1, BIT(2))),
+ DEF_MOD("riic_2_ckm", CLK_PLLCLN_DIV16, 9, 6, 4, 22,
+ BUS_MSTOP(1, BIT(3))),
+ DEF_MOD("riic_3_ckm", CLK_PLLCLN_DIV16, 9, 7, 4, 23,
+ BUS_MSTOP(1, BIT(4))),
+ DEF_MOD("riic_4_ckm", CLK_PLLCLN_DIV16, 9, 8, 4, 24,
+ BUS_MSTOP(1, BIT(5))),
+ DEF_MOD("riic_5_ckm", CLK_PLLCLN_DIV16, 9, 9, 4, 25,
+ BUS_MSTOP(1, BIT(6))),
+ DEF_MOD("riic_6_ckm", CLK_PLLCLN_DIV16, 9, 10, 4, 26,
+ BUS_MSTOP(1, BIT(7))),
+ DEF_MOD("riic_7_ckm", CLK_PLLCLN_DIV16, 9, 11, 4, 27,
+ BUS_MSTOP(1, BIT(8))),
+ DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6,
+ BUS_MSTOP(8, BIT(2))),
+ DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10,
+ BUS_MSTOP(8, BIT(3))),
+ DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
+ BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("cru_0_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 2, 6, 18,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD_NO_PM("cru_0_vclk", CLK_PLLVDO_CRU0, 13, 3, 6, 19,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("cru_0_pclk", CLK_PLLDTY_DIV16, 13, 4, 6, 20,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("cru_1_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 5, 6, 21,
+ BUS_MSTOP(9, BIT(5))),
+ DEF_MOD_NO_PM("cru_1_vclk", CLK_PLLVDO_CRU1, 13, 6, 6, 22,
+ BUS_MSTOP(9, BIT(5))),
+ DEF_MOD("cru_1_pclk", CLK_PLLDTY_DIV16, 13, 7, 6, 23,
+ BUS_MSTOP(9, BIT(5))),
+ DEF_MOD("cru_2_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 8, 6, 24,
+ BUS_MSTOP(9, BIT(6))),
+ DEF_MOD_NO_PM("cru_2_vclk", CLK_PLLVDO_CRU2, 13, 9, 6, 25,
+ BUS_MSTOP(9, BIT(6))),
+ DEF_MOD("cru_2_pclk", CLK_PLLDTY_DIV16, 13, 10, 6, 26,
+ BUS_MSTOP(9, BIT(6))),
+ DEF_MOD("cru_3_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 11, 6, 27,
+ BUS_MSTOP(9, BIT(7))),
+ DEF_MOD_NO_PM("cru_3_vclk", CLK_PLLVDO_CRU3, 13, 12, 6, 28,
+ BUS_MSTOP(9, BIT(7))),
+ DEF_MOD("cru_3_pclk", CLK_PLLDTY_DIV16, 13, 13, 6, 29,
+ BUS_MSTOP(9, BIT(7))),
};
static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
+ DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
+ DEF_RST(3, 6, 1, 7), /* ICU_0_PRESETN_I */
+ DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
+ DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
DEF_RST(6, 13, 2, 30), /* GTM_0_PRESETZ */
DEF_RST(6, 14, 2, 31), /* GTM_1_PRESETZ */
DEF_RST(6, 15, 3, 0), /* GTM_2_PRESETZ */
@@ -144,6 +251,18 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
+ DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
+ DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
+ DEF_RST(12, 8, 5, 25), /* CRU_1_PRESETN */
+ DEF_RST(12, 9, 5, 26), /* CRU_1_ARESETN */
+ DEF_RST(12, 10, 5, 27), /* CRU_1_S_RESETN */
+ DEF_RST(12, 11, 5, 28), /* CRU_2_PRESETN */
+ DEF_RST(12, 12, 5, 29), /* CRU_2_ARESETN */
+ DEF_RST(12, 13, 5, 30), /* CRU_2_S_RESETN */
+ DEF_RST(12, 14, 5, 31), /* CRU_3_PRESETN */
+ DEF_RST(12, 15, 6, 0), /* CRU_3_ARESETN */
+ DEF_RST(13, 0, 6, 1), /* CRU_3_S_RESETN */
};
const struct rzv2h_cpg_info r9a09g057_cpg_info __initconst = {
@@ -161,4 +280,6 @@ const struct rzv2h_cpg_info r9a09g057_cpg_info __initconst = {
/* Resets */
.resets = r9a09g057_resets,
.num_resets = ARRAY_SIZE(r9a09g057_resets),
+
+ .num_mstop_bits = 192,
};
diff --git a/drivers/clk/renesas/rcar-cpg-lib.c b/drivers/clk/renesas/rcar-cpg-lib.c
index 42b126ea3e13..a45f8e7e9ab6 100644
--- a/drivers/clk/renesas/rcar-cpg-lib.c
+++ b/drivers/clk/renesas/rcar-cpg-lib.c
@@ -206,4 +206,3 @@ struct clk * __init cpg_rpcd2_clk_register(const char *name,
return clk;
}
-
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 20b89eb6c35c..027100e84ee4 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -335,7 +335,6 @@ static u32 cpg_quirks __initdata;
#define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */
-
static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
{
.soc_id = "r8a7796", .revision = "ES1.0",
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 1b421b809796..bf85501709f0 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -39,7 +39,6 @@
#define WARN_DEBUG(x) do { } while (0)
#endif
-
/*
* Module Standby and Software Reset register offets.
*
@@ -716,7 +715,6 @@ static inline int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
}
#endif /* !CONFIG_RESET_CONTROLLER */
-
static const struct of_device_id cpg_mssr_match[] = {
#ifdef CONFIG_CLK_R7S9210
{
@@ -981,7 +979,7 @@ static void __init cpg_mssr_reserved_exit(struct cpg_mssr_priv *priv)
static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
const struct cpg_mssr_info *info)
{
- struct device_node *soc = of_find_node_by_path("/soc");
+ struct device_node *soc __free(device_node) = of_find_node_by_path("/soc");
struct device_node *node;
uint32_t args[MAX_PHANDLE_ARGS];
unsigned int *ids = NULL;
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index 88bf39e8c79c..ddf722ca79eb 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -548,7 +548,7 @@ static unsigned long
rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
unsigned long rate)
{
- unsigned long foutpostdiv_rate;
+ unsigned long foutpostdiv_rate, foutvco_rate;
params->pl5_intin = rate / MEGA;
params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
@@ -557,10 +557,11 @@ rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
params->pl5_postdiv2 = 1;
params->pl5_spread = 0x16;
- foutpostdiv_rate =
- EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv *
- ((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) /
- (params->pl5_postdiv1 * params->pl5_postdiv2);
+ foutvco_rate = div_u64(mul_u32_u32(EXTAL_FREQ_IN_MEGA_HZ * MEGA,
+ (params->pl5_intin << 24) + params->pl5_fracin),
+ params->pl5_refdiv) >> 24;
+ foutpostdiv_rate = DIV_ROUND_CLOSEST_ULL(foutvco_rate,
+ params->pl5_postdiv1 * params->pl5_postdiv2);
return foutpostdiv_rate;
}
@@ -1680,23 +1681,31 @@ static int rzg2l_cpg_power_off(struct generic_pm_domain *domain)
return 0;
}
-static int __init rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd *pd, bool always_on)
+static int __init rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd *pd)
{
+ bool always_on = !!(pd->genpd.flags & GENPD_FLAG_ALWAYS_ON);
struct dev_power_governor *governor;
+ int ret;
+
+ if (always_on)
+ governor = &pm_domain_always_on_gov;
+ else
+ governor = &simple_qos_governor;
pd->genpd.flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
pd->genpd.attach_dev = rzg2l_cpg_attach_dev;
pd->genpd.detach_dev = rzg2l_cpg_detach_dev;
- if (always_on) {
- pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
- governor = &pm_domain_always_on_gov;
- } else {
- pd->genpd.power_on = rzg2l_cpg_power_on;
- pd->genpd.power_off = rzg2l_cpg_power_off;
- governor = &simple_qos_governor;
- }
+ pd->genpd.power_on = rzg2l_cpg_power_on;
+ pd->genpd.power_off = rzg2l_cpg_power_off;
+
+ ret = pm_genpd_init(&pd->genpd, governor, !always_on);
+ if (ret)
+ return ret;
- return pm_genpd_init(&pd->genpd, governor, !always_on);
+ if (always_on)
+ ret = rzg2l_cpg_power_on(&pd->genpd);
+
+ return ret;
}
static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
@@ -1711,8 +1720,9 @@ static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
return -ENOMEM;
pd->genpd.name = np->name;
+ pd->genpd.flags = GENPD_FLAG_ALWAYS_ON;
pd->priv = priv;
- ret = rzg2l_cpg_pd_setup(pd, true);
+ ret = rzg2l_cpg_pd_setup(pd);
if (ret)
return ret;
@@ -1777,7 +1787,6 @@ static int __init rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv *priv)
return ret;
for (unsigned int i = 0; i < info->num_pm_domains; i++) {
- bool always_on = !!(info->pm_domains[i].flags & RZG2L_PD_F_ALWAYS_ON);
struct rzg2l_cpg_pd *pd;
pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
@@ -1785,20 +1794,15 @@ static int __init rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv *priv)
return -ENOMEM;
pd->genpd.name = info->pm_domains[i].name;
+ pd->genpd.flags = info->pm_domains[i].genpd_flags;
pd->conf = info->pm_domains[i].conf;
pd->id = info->pm_domains[i].id;
pd->priv = priv;
- ret = rzg2l_cpg_pd_setup(pd, always_on);
+ ret = rzg2l_cpg_pd_setup(pd);
if (ret)
return ret;
- if (always_on) {
- ret = rzg2l_cpg_power_on(&pd->genpd);
- if (ret)
- return ret;
- }
-
domains->domains[i] = &pd->genpd;
/* Parent should be on the very first entry of info->pm_domains[]. */
if (!i) {
diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
index ecfe7e7ea8a1..881a89b5a710 100644
--- a/drivers/clk/renesas/rzg2l-cpg.h
+++ b/drivers/clk/renesas/rzg2l-cpg.h
@@ -270,14 +270,14 @@ struct rzg2l_cpg_pm_domain_conf {
* struct rzg2l_cpg_pm_domain_init_data - PM domain init data
* @name: PM domain name
* @conf: PM domain configuration
- * @flags: RZG2L PM domain flags (see RZG2L_PD_F_*)
+ * @genpd_flags: genpd flags (see GENPD_FLAG_*)
* @id: PM domain ID (similar to the ones defined in
* include/dt-bindings/clock/<soc-id>-cpg.h)
*/
struct rzg2l_cpg_pm_domain_init_data {
const char * const name;
struct rzg2l_cpg_pm_domain_conf conf;
- u32 flags;
+ u32 genpd_flags;
u16 id;
};
@@ -288,13 +288,9 @@ struct rzg2l_cpg_pm_domain_init_data {
.conf = { \
.mstop = (_mstop_conf), \
}, \
- .flags = (_flags), \
+ .genpd_flags = (_flags), \
}
-/* Power domain flags. */
-#define RZG2L_PD_F_ALWAYS_ON BIT(0)
-#define RZG2L_PD_F_NONE (0)
-
/**
* struct rzg2l_cpg_info - SoC-specific CPG Description
*
diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c
index b524a9d33610..a4c1e92e1fd7 100644
--- a/drivers/clk/renesas/rzv2h-cpg.c
+++ b/drivers/clk/renesas/rzv2h-cpg.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
+#include <linux/refcount.h>
#include <linux/reset-controller.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -40,6 +41,9 @@
#define GET_RST_OFFSET(x) (0x900 + ((x) * 4))
#define GET_RST_MON_OFFSET(x) (0xA00 + ((x) * 4))
+#define CPG_BUS_1_MSTOP (0xd00)
+#define CPG_BUS_MSTOP(m) (CPG_BUS_1_MSTOP + ((m) - 1) * 4)
+
#define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), (val)))
#define MDIV(val) FIELD_GET(GENMASK(15, 6), (val))
#define PDIV(val) FIELD_GET(GENMASK(5, 0), (val))
@@ -64,6 +68,7 @@
* @resets: Array of resets
* @num_resets: Number of Module Resets in info->resets[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
+ * @mstop_count: Array of mstop values
* @rcdev: Reset controller entity
*/
struct rzv2h_cpg_priv {
@@ -78,6 +83,8 @@ struct rzv2h_cpg_priv {
unsigned int num_resets;
unsigned int last_dt_core_clk;
+ atomic_t *mstop_count;
+
struct reset_controller_dev rcdev;
};
@@ -97,7 +104,9 @@ struct pll_clk {
* struct mod_clock - Module clock
*
* @priv: CPG private data
+ * @mstop_data: mstop data relating to module clock
* @hw: handle between common and hardware-specific interfaces
+ * @no_pm: flag to indicate PM is not supported
* @on_index: register offset
* @on_bit: ON/MON bit
* @mon_index: monitor register offset
@@ -105,7 +114,9 @@ struct pll_clk {
*/
struct mod_clock {
struct rzv2h_cpg_priv *priv;
+ unsigned int mstop_data;
struct clk_hw hw;
+ bool no_pm;
u8 on_index;
u8 on_bit;
s8 mon_index;
@@ -431,8 +442,71 @@ fail:
core->name, PTR_ERR(clk));
}
+static void rzv2h_mod_clock_mstop_enable(struct rzv2h_cpg_priv *priv,
+ u32 mstop_data)
+{
+ unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
+ u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
+ unsigned int index = (mstop_index - 1) * 16;
+ atomic_t *mstop = &priv->mstop_count[index];
+ unsigned long flags;
+ unsigned int i;
+ u32 val = 0;
+
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ for_each_set_bit(i, &mstop_mask, 16) {
+ if (!atomic_read(&mstop[i]))
+ val |= BIT(i) << 16;
+ atomic_inc(&mstop[i]);
+ }
+ if (val)
+ writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+}
+
+static void rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv *priv,
+ u32 mstop_data)
+{
+ unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
+ u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
+ unsigned int index = (mstop_index - 1) * 16;
+ atomic_t *mstop = &priv->mstop_count[index];
+ unsigned long flags;
+ unsigned int i;
+ u32 val = 0;
+
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ for_each_set_bit(i, &mstop_mask, 16) {
+ if (!atomic_read(&mstop[i]) ||
+ atomic_dec_and_test(&mstop[i]))
+ val |= BIT(i) << 16 | BIT(i);
+ }
+ if (val)
+ writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+}
+
+static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
+{
+ struct mod_clock *clock = to_mod_clock(hw);
+ struct rzv2h_cpg_priv *priv = clock->priv;
+ u32 bitmask;
+ u32 offset;
+
+ if (clock->mon_index >= 0) {
+ offset = GET_CLK_MON_OFFSET(clock->mon_index);
+ bitmask = BIT(clock->mon_bit);
+ } else {
+ offset = GET_CLK_ON_OFFSET(clock->on_index);
+ bitmask = BIT(clock->on_bit);
+ }
+
+ return readl(priv->base + offset) & bitmask;
+}
+
static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
{
+ bool enabled = rzv2h_mod_clock_is_enabled(hw);
struct mod_clock *clock = to_mod_clock(hw);
unsigned int reg = GET_CLK_ON_OFFSET(clock->on_index);
struct rzv2h_cpg_priv *priv = clock->priv;
@@ -444,11 +518,20 @@ static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk,
enable ? "ON" : "OFF");
+ if (enabled == enable)
+ return 0;
+
value = bitmask << 16;
- if (enable)
+ if (enable) {
value |= bitmask;
-
- writel(value, priv->base + reg);
+ writel(value, priv->base + reg);
+ if (clock->mstop_data != BUS_MSTOP_NONE)
+ rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data);
+ } else {
+ if (clock->mstop_data != BUS_MSTOP_NONE)
+ rzv2h_mod_clock_mstop_disable(priv, clock->mstop_data);
+ writel(value, priv->base + reg);
+ }
if (!enable || clock->mon_index < 0)
return 0;
@@ -474,24 +557,6 @@ static void rzv2h_mod_clock_disable(struct clk_hw *hw)
rzv2h_mod_clock_endisable(hw, false);
}
-static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
-{
- struct mod_clock *clock = to_mod_clock(hw);
- struct rzv2h_cpg_priv *priv = clock->priv;
- u32 bitmask;
- u32 offset;
-
- if (clock->mon_index >= 0) {
- offset = GET_CLK_MON_OFFSET(clock->mon_index);
- bitmask = BIT(clock->mon_bit);
- } else {
- offset = GET_CLK_ON_OFFSET(clock->on_index);
- bitmask = BIT(clock->on_bit);
- }
-
- return readl(priv->base + offset) & bitmask;
-}
-
static const struct clk_ops rzv2h_mod_clock_ops = {
.enable = rzv2h_mod_clock_enable,
.disable = rzv2h_mod_clock_disable,
@@ -541,8 +606,10 @@ rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
clock->on_bit = mod->on_bit;
clock->mon_index = mod->mon_index;
clock->mon_bit = mod->mon_bit;
+ clock->no_pm = mod->no_pm;
clock->priv = priv;
clock->hw.init = &init;
+ clock->mstop_data = mod->mstop_data;
ret = devm_clk_hw_register(dev, &clock->hw);
if (ret) {
@@ -552,6 +619,41 @@ rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
priv->clks[id] = clock->hw.clk;
+ /*
+ * Ensure the module clocks and MSTOP bits are synchronized when they are
+ * turned ON by the bootloader. Enable MSTOP bits for module clocks that were
+ * turned ON in an earlier boot stage.
+ */
+ if (clock->mstop_data != BUS_MSTOP_NONE &&
+ !mod->critical && rzv2h_mod_clock_is_enabled(&clock->hw)) {
+ rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data);
+ } else if (clock->mstop_data != BUS_MSTOP_NONE && mod->critical) {
+ unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, clock->mstop_data);
+ u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, clock->mstop_data);
+ unsigned int index = (mstop_index - 1) * 16;
+ atomic_t *mstop = &priv->mstop_count[index];
+ unsigned long flags;
+ unsigned int i;
+ u32 val = 0;
+
+ /*
+ * Critical clocks are turned ON immediately upon registration, and the
+ * MSTOP counter is updated through the rzv2h_mod_clock_enable() path.
+ * However, if the critical clocks were already turned ON by the initial
+ * bootloader, synchronize the atomic counter here and clear the MSTOP bit.
+ */
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ for_each_set_bit(i, &mstop_mask, 16) {
+ if (atomic_read(&mstop[i]))
+ continue;
+ val |= BIT(i) << 16;
+ atomic_inc(&mstop[i]);
+ }
+ if (val)
+ writel(val, priv->base + CPG_BUS_MSTOP(mstop_index));
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ }
+
return;
fail:
@@ -668,17 +770,51 @@ struct rzv2h_cpg_pd {
struct generic_pm_domain genpd;
};
+static bool rzv2h_cpg_is_pm_clk(struct rzv2h_cpg_pd *pd,
+ const struct of_phandle_args *clkspec)
+{
+ if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
+ return false;
+
+ switch (clkspec->args[0]) {
+ case CPG_MOD: {
+ struct rzv2h_cpg_priv *priv = pd->priv;
+ unsigned int id = clkspec->args[1];
+ struct mod_clock *clock;
+
+ if (id >= priv->num_mod_clks)
+ return false;
+
+ if (priv->clks[priv->num_core_clks + id] == ERR_PTR(-ENOENT))
+ return false;
+
+ clock = to_mod_clock(__clk_get_hw(priv->clks[priv->num_core_clks + id]));
+
+ return !clock->no_pm;
+ }
+
+ case CPG_CORE:
+ default:
+ return false;
+ }
+}
+
static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
{
+ struct rzv2h_cpg_pd *pd = container_of(domain, struct rzv2h_cpg_pd, genpd);
struct device_node *np = dev->of_node;
struct of_phandle_args clkspec;
bool once = true;
struct clk *clk;
+ unsigned int i;
int error;
- int i = 0;
- while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
- &clkspec)) {
+ for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
+ if (!rzv2h_cpg_is_pm_clk(pd, &clkspec)) {
+ of_node_put(clkspec.np);
+ continue;
+ }
+
if (once) {
once = false;
error = pm_clk_create(dev);
@@ -700,7 +836,6 @@ static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device
error);
goto fail_put;
}
- i++;
}
return 0;
@@ -786,6 +921,11 @@ static int __init rzv2h_cpg_probe(struct platform_device *pdev)
if (!clks)
return -ENOMEM;
+ priv->mstop_count = devm_kcalloc(dev, info->num_mstop_bits,
+ sizeof(*priv->mstop_count), GFP_KERNEL);
+ if (!priv->mstop_count)
+ return -ENOMEM;
+
priv->resets = devm_kmemdup(dev, info->resets, sizeof(*info->resets) *
info->num_resets, GFP_KERNEL);
if (!priv->resets)
@@ -833,6 +973,12 @@ static const struct of_device_id rzv2h_cpg_match[] = {
.data = &r9a09g057_cpg_info,
},
#endif
+#ifdef CONFIG_CLK_R9A09G047
+ {
+ .compatible = "renesas,r9a09g047-cpg",
+ .data = &r9a09g047_cpg_info,
+ },
+#endif
{ /* sentinel */ }
};
diff --git a/drivers/clk/renesas/rzv2h-cpg.h b/drivers/clk/renesas/rzv2h-cpg.h
index 1bd406c69015..fd8eb985c75b 100644
--- a/drivers/clk/renesas/rzv2h-cpg.h
+++ b/drivers/clk/renesas/rzv2h-cpg.h
@@ -8,6 +8,8 @@
#ifndef __RENESAS_RZV2H_CPG_H__
#define __RENESAS_RZV2H_CPG_H__
+#include <linux/bitfield.h>
+
/**
* struct ddiv - Structure for dynamic switching divider
*
@@ -32,8 +34,25 @@ struct ddiv {
})
#define CPG_CDDIV0 (0x400)
+#define CPG_CDDIV1 (0x404)
+#define CPG_CDDIV3 (0x40C)
+#define CPG_CDDIV4 (0x410)
#define CDDIV0_DIVCTL2 DDIV_PACK(CPG_CDDIV0, 8, 3, 2)
+#define CDDIV1_DIVCTL0 DDIV_PACK(CPG_CDDIV1, 0, 2, 4)
+#define CDDIV1_DIVCTL1 DDIV_PACK(CPG_CDDIV1, 4, 2, 5)
+#define CDDIV1_DIVCTL2 DDIV_PACK(CPG_CDDIV1, 8, 2, 6)
+#define CDDIV1_DIVCTL3 DDIV_PACK(CPG_CDDIV1, 12, 2, 7)
+#define CDDIV3_DIVCTL3 DDIV_PACK(CPG_CDDIV3, 12, 1, 15)
+#define CDDIV4_DIVCTL0 DDIV_PACK(CPG_CDDIV4, 0, 1, 16)
+#define CDDIV4_DIVCTL1 DDIV_PACK(CPG_CDDIV4, 4, 1, 17)
+#define CDDIV4_DIVCTL2 DDIV_PACK(CPG_CDDIV4, 8, 1, 18)
+
+#define BUS_MSTOP_IDX_MASK GENMASK(31, 16)
+#define BUS_MSTOP_BITS_MASK GENMASK(15, 0)
+#define BUS_MSTOP(idx, mask) (FIELD_PREP_CONST(BUS_MSTOP_IDX_MASK, (idx)) | \
+ FIELD_PREP_CONST(BUS_MSTOP_BITS_MASK, (mask)))
+#define BUS_MSTOP_NONE GENMASK(31, 0)
/**
* Definitions of CPG Core Clocks
@@ -93,8 +112,10 @@ enum clk_types {
* struct rzv2h_mod_clk - Module Clocks definitions
*
* @name: handle between common and hardware-specific interfaces
+ * @mstop_data: packed data mstop register offset and mask
* @parent: id of parent clock
* @critical: flag to indicate the clock is critical
+ * @no_pm: flag to indicate PM is not supported
* @on_index: control register index
* @on_bit: ON bit
* @mon_index: monitor register index
@@ -102,30 +123,37 @@ enum clk_types {
*/
struct rzv2h_mod_clk {
const char *name;
+ u32 mstop_data;
u16 parent;
bool critical;
+ bool no_pm;
u8 on_index;
u8 on_bit;
s8 mon_index;
u8 mon_bit;
};
-#define DEF_MOD_BASE(_name, _parent, _critical, _onindex, _onbit, _monindex, _monbit) \
+#define DEF_MOD_BASE(_name, _mstop, _parent, _critical, _no_pm, _onindex, _onbit, _monindex, _monbit) \
{ \
.name = (_name), \
+ .mstop_data = (_mstop), \
.parent = (_parent), \
.critical = (_critical), \
+ .no_pm = (_no_pm), \
.on_index = (_onindex), \
.on_bit = (_onbit), \
.mon_index = (_monindex), \
.mon_bit = (_monbit), \
}
-#define DEF_MOD(_name, _parent, _onindex, _onbit, _monindex, _monbit) \
- DEF_MOD_BASE(_name, _parent, false, _onindex, _onbit, _monindex, _monbit)
+#define DEF_MOD(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop) \
+ DEF_MOD_BASE(_name, _mstop, _parent, false, false, _onindex, _onbit, _monindex, _monbit)
+
+#define DEF_MOD_CRITICAL(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop) \
+ DEF_MOD_BASE(_name, _mstop, _parent, true, false, _onindex, _onbit, _monindex, _monbit)
-#define DEF_MOD_CRITICAL(_name, _parent, _onindex, _onbit, _monindex, _monbit) \
- DEF_MOD_BASE(_name, _parent, true, _onindex, _onbit, _monindex, _monbit)
+#define DEF_MOD_NO_PM(_name, _parent, _onindex, _onbit, _monindex, _monbit, _mstop) \
+ DEF_MOD_BASE(_name, _mstop, _parent, false, true, _onindex, _onbit, _monindex, _monbit)
/**
* struct rzv2h_reset - Reset definitions
@@ -167,6 +195,9 @@ struct rzv2h_reset {
*
* @resets: Array of Module Reset definitions
* @num_resets: Number of entries in resets[]
+ *
+ * @num_mstop_bits: Maximum number of MSTOP bits supported, equivalent to the
+ * number of CPG_BUS_m_MSTOP registers multiplied by 16.
*/
struct rzv2h_cpg_info {
/* Core Clocks */
@@ -183,8 +214,11 @@ struct rzv2h_cpg_info {
/* Resets */
const struct rzv2h_reset *resets;
unsigned int num_resets;
+
+ unsigned int num_mstop_bits;
};
+extern const struct rzv2h_cpg_info r9a09g047_cpg_info;
extern const struct rzv2h_cpg_info r9a09g057_cpg_info;
#endif /* __RENESAS_RZV2H_CPG_H__ */
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index af2ade54a7ef..3fe7616f0ebe 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -13,6 +13,7 @@ clk-rockchip-y += clk-inverter.o
clk-rockchip-y += clk-mmc-phase.o
clk-rockchip-y += clk-muxgrf.o
clk-rockchip-y += clk-ddr.o
+clk-rockchip-y += gate-link.o
clk-rockchip-$(CONFIG_RESET_CONTROLLER) += softrst.o
obj-$(CONFIG_CLK_PX30) += clk-px30.o
diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c
index 0ffaf639f807..4031733def4e 100644
--- a/drivers/clk/rockchip/clk-rk3588.c
+++ b/drivers/clk/rockchip/clk-rk3588.c
@@ -12,28 +12,6 @@
#include <dt-bindings/clock/rockchip,rk3588-cru.h>
#include "clk.h"
-/*
- * Recent Rockchip SoCs have a new hardware block called Native Interface
- * Unit (NIU), which gates clocks to devices behind them. These effectively
- * need two parent clocks.
- *
- * Downstream enables the linked clock via runtime PM whenever the gate is
- * enabled. This implementation uses separate clock nodes for each of the
- * linked gate clocks, which leaks parts of the clock tree into DT.
- *
- * The GATE_LINK macro instead takes the second parent via 'linkname', but
- * ignores the information. Once the clock framework is ready to handle it, the
- * information should be passed on here. But since these clocks are required to
- * access multiple relevant IP blocks, such as PCIe or USB, we mark all linked
- * clocks critical until a better solution is available. This will waste some
- * power, but avoids leaking implementation details into DT or hanging the
- * system.
- */
-#define GATE_LINK(_id, cname, pname, linkedclk, f, o, b, gf) \
- GATE(_id, cname, pname, f, o, b, gf)
-#define RK3588_LINKED_CLK CLK_IS_CRITICAL
-
-
#define RK3588_GRF_SOC_STATUS0 0x600
#define RK3588_PHYREF_ALT_GATE 0xc38
@@ -266,6 +244,8 @@ static struct rockchip_pll_rate_table rk3588_pll_rates[] = {
}, \
}
+static struct rockchip_clk_provider *early_ctx;
+
static struct rockchip_cpuclk_rate_table rk3588_cpub0clk_rates[] __initdata = {
RK3588_CPUB01CLK_RATE(2496000000, 1),
RK3588_CPUB01CLK_RATE(2400000000, 1),
@@ -694,7 +674,7 @@ static struct rockchip_pll_clock rk3588_pll_clks[] __initdata = {
RK3588_MODE_CON0, 10, 15, 0, rk3588_pll_rates),
};
-static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
+static struct rockchip_clk_branch rk3588_early_clk_branches[] __initdata = {
/*
* CRU Clock-Architecture
*/
@@ -792,10 +772,10 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE(MCLK_GMAC0_OUT, "mclk_gmac0_out", gpll_cpll_p, 0,
RK3588_CLKSEL_CON(15), 7, 1, MFLAGS, 0, 7, DFLAGS,
RK3588_CLKGATE_CON(5), 3, GFLAGS),
- COMPOSITE(REFCLKO25M_ETH0_OUT, "refclko25m_eth0_out", gpll_cpll_p, 0,
+ COMPOSITE(REFCLKO25M_ETH0_OUT, "refclko25m_eth0_out", gpll_cpll_p, CLK_IS_CRITICAL,
RK3588_CLKSEL_CON(15), 15, 1, MFLAGS, 8, 7, DFLAGS,
RK3588_CLKGATE_CON(5), 4, GFLAGS),
- COMPOSITE(REFCLKO25M_ETH1_OUT, "refclko25m_eth1_out", gpll_cpll_p, 0,
+ COMPOSITE(REFCLKO25M_ETH1_OUT, "refclko25m_eth1_out", gpll_cpll_p, CLK_IS_CRITICAL,
RK3588_CLKSEL_CON(16), 7, 1, MFLAGS, 0, 7, DFLAGS,
RK3588_CLKGATE_CON(5), 5, GFLAGS),
COMPOSITE(CLK_CIFOUT_OUT, "clk_cifout_out", gpll_cpll_24m_spll_p, 0,
@@ -1456,7 +1436,7 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE_NODIV(HCLK_NVM_ROOT, "hclk_nvm_root", mux_200m_100m_50m_24m_p, 0,
RK3588_CLKSEL_CON(77), 0, 2, MFLAGS,
RK3588_CLKGATE_CON(31), 0, GFLAGS),
- COMPOSITE(ACLK_NVM_ROOT, "aclk_nvm_root", gpll_cpll_p, RK3588_LINKED_CLK,
+ COMPOSITE(ACLK_NVM_ROOT, "aclk_nvm_root", gpll_cpll_p, 0,
RK3588_CLKSEL_CON(77), 7, 1, MFLAGS, 2, 5, DFLAGS,
RK3588_CLKGATE_CON(31), 1, GFLAGS),
GATE(ACLK_EMMC, "aclk_emmc", "aclk_nvm_root", 0,
@@ -1685,13 +1665,13 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
RK3588_CLKGATE_CON(42), 9, GFLAGS),
/* vdpu */
- COMPOSITE(ACLK_VDPU_ROOT, "aclk_vdpu_root", gpll_cpll_aupll_p, RK3588_LINKED_CLK,
+ COMPOSITE(ACLK_VDPU_ROOT, "aclk_vdpu_root", gpll_cpll_aupll_p, 0,
RK3588_CLKSEL_CON(98), 5, 2, MFLAGS, 0, 5, DFLAGS,
RK3588_CLKGATE_CON(44), 0, GFLAGS),
COMPOSITE_NODIV(ACLK_VDPU_LOW_ROOT, "aclk_vdpu_low_root", mux_400m_200m_100m_24m_p, 0,
RK3588_CLKSEL_CON(98), 7, 2, MFLAGS,
RK3588_CLKGATE_CON(44), 1, GFLAGS),
- COMPOSITE_NODIV(HCLK_VDPU_ROOT, "hclk_vdpu_root", mux_200m_100m_50m_24m_p, RK3588_LINKED_CLK,
+ COMPOSITE_NODIV(HCLK_VDPU_ROOT, "hclk_vdpu_root", mux_200m_100m_50m_24m_p, 0,
RK3588_CLKSEL_CON(98), 9, 2, MFLAGS,
RK3588_CLKGATE_CON(44), 2, GFLAGS),
COMPOSITE(ACLK_JPEG_DECODER_ROOT, "aclk_jpeg_decoder_root", gpll_cpll_aupll_spll_p, 0,
@@ -1742,9 +1722,9 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE(ACLK_RKVENC0_ROOT, "aclk_rkvenc0_root", gpll_cpll_npll_p, 0,
RK3588_CLKSEL_CON(102), 7, 2, MFLAGS, 2, 5, DFLAGS,
RK3588_CLKGATE_CON(47), 1, GFLAGS),
- GATE(HCLK_RKVENC0, "hclk_rkvenc0", "hclk_rkvenc0_root", RK3588_LINKED_CLK,
+ GATE(HCLK_RKVENC0, "hclk_rkvenc0", "hclk_rkvenc0_root", 0,
RK3588_CLKGATE_CON(47), 4, GFLAGS),
- GATE(ACLK_RKVENC0, "aclk_rkvenc0", "aclk_rkvenc0_root", RK3588_LINKED_CLK,
+ GATE(ACLK_RKVENC0, "aclk_rkvenc0", "aclk_rkvenc0_root", 0,
RK3588_CLKGATE_CON(47), 5, GFLAGS),
COMPOSITE(CLK_RKVENC0_CORE, "clk_rkvenc0_core", gpll_cpll_aupll_npll_p, 0,
RK3588_CLKSEL_CON(102), 14, 2, MFLAGS, 9, 5, DFLAGS,
@@ -1754,10 +1734,10 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
RK3588_CLKGATE_CON(48), 6, GFLAGS),
/* vi */
- COMPOSITE(ACLK_VI_ROOT, "aclk_vi_root", gpll_cpll_npll_aupll_spll_p, RK3588_LINKED_CLK,
+ COMPOSITE(ACLK_VI_ROOT, "aclk_vi_root", gpll_cpll_npll_aupll_spll_p, 0,
RK3588_CLKSEL_CON(106), 5, 3, MFLAGS, 0, 5, DFLAGS,
RK3588_CLKGATE_CON(49), 0, GFLAGS),
- COMPOSITE_NODIV(HCLK_VI_ROOT, "hclk_vi_root", mux_200m_100m_50m_24m_p, RK3588_LINKED_CLK,
+ COMPOSITE_NODIV(HCLK_VI_ROOT, "hclk_vi_root", mux_200m_100m_50m_24m_p, 0,
RK3588_CLKSEL_CON(106), 8, 2, MFLAGS,
RK3588_CLKGATE_CON(49), 1, GFLAGS),
COMPOSITE_NODIV(PCLK_VI_ROOT, "pclk_vi_root", mux_100m_50m_24m_p, 0,
@@ -1927,10 +1907,10 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
COMPOSITE(ACLK_VOP_ROOT, "aclk_vop_root", gpll_cpll_dmyaupll_npll_spll_p, 0,
RK3588_CLKSEL_CON(110), 5, 3, MFLAGS, 0, 5, DFLAGS,
RK3588_CLKGATE_CON(52), 0, GFLAGS),
- COMPOSITE_NODIV(ACLK_VOP_LOW_ROOT, "aclk_vop_low_root", mux_400m_200m_100m_24m_p, RK3588_LINKED_CLK,
+ COMPOSITE_NODIV(ACLK_VOP_LOW_ROOT, "aclk_vop_low_root", mux_400m_200m_100m_24m_p, 0,
RK3588_CLKSEL_CON(110), 8, 2, MFLAGS,
RK3588_CLKGATE_CON(52), 1, GFLAGS),
- COMPOSITE_NODIV(HCLK_VOP_ROOT, "hclk_vop_root", mux_200m_100m_50m_24m_p, RK3588_LINKED_CLK,
+ COMPOSITE_NODIV(HCLK_VOP_ROOT, "hclk_vop_root", mux_200m_100m_50m_24m_p, 0,
RK3588_CLKSEL_CON(110), 10, 2, MFLAGS,
RK3588_CLKGATE_CON(52), 2, GFLAGS),
COMPOSITE_NODIV(PCLK_VOP_ROOT, "pclk_vop_root", mux_100m_50m_24m_p, 0,
@@ -2428,10 +2408,12 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
RK3588_CLKGATE_CON(68), 5, GFLAGS),
GATE(ACLK_AV1, "aclk_av1", "aclk_av1_pre", 0,
RK3588_CLKGATE_CON(68), 2, GFLAGS),
+};
+static struct rockchip_clk_branch rk3588_clk_branches[] = {
GATE_LINK(ACLK_ISP1_PRE, "aclk_isp1_pre", "aclk_isp1_root", ACLK_VI_ROOT, 0, RK3588_CLKGATE_CON(26), 6, GFLAGS),
GATE_LINK(HCLK_ISP1_PRE, "hclk_isp1_pre", "hclk_isp1_root", HCLK_VI_ROOT, 0, RK3588_CLKGATE_CON(26), 8, GFLAGS),
- GATE_LINK(HCLK_NVM, "hclk_nvm", "hclk_nvm_root", ACLK_NVM_ROOT, RK3588_LINKED_CLK, RK3588_CLKGATE_CON(31), 2, GFLAGS),
+ GATE_LINK(HCLK_NVM, "hclk_nvm", "hclk_nvm_root", ACLK_NVM_ROOT, 0, RK3588_CLKGATE_CON(31), 2, GFLAGS),
GATE_LINK(ACLK_USB, "aclk_usb", "aclk_usb_root", ACLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(42), 2, GFLAGS),
GATE_LINK(HCLK_USB, "hclk_usb", "hclk_usb_root", HCLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(42), 3, GFLAGS),
GATE_LINK(ACLK_JPEG_DECODER_PRE, "aclk_jpeg_decoder_pre", "aclk_jpeg_decoder_root", ACLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(44), 7, GFLAGS),
@@ -2443,9 +2425,9 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
GATE_LINK(HCLK_RKVDEC1_PRE, "hclk_rkvdec1_pre", "hclk_rkvdec1_root", HCLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(41), 4, GFLAGS),
GATE_LINK(ACLK_RKVDEC1_PRE, "aclk_rkvdec1_pre", "aclk_rkvdec1_root", ACLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(41), 5, GFLAGS),
GATE_LINK(ACLK_HDCP0_PRE, "aclk_hdcp0_pre", "aclk_vo0_root", ACLK_VOP_LOW_ROOT, 0, RK3588_CLKGATE_CON(55), 9, GFLAGS),
- GATE_LINK(HCLK_VO0, "hclk_vo0", "hclk_vo0_root", HCLK_VOP_ROOT, RK3588_LINKED_CLK, RK3588_CLKGATE_CON(55), 5, GFLAGS),
+ GATE_LINK(HCLK_VO0, "hclk_vo0", "hclk_vo0_root", HCLK_VOP_ROOT, 0, RK3588_CLKGATE_CON(55), 5, GFLAGS),
GATE_LINK(ACLK_HDCP1_PRE, "aclk_hdcp1_pre", "aclk_hdcp1_root", ACLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(59), 6, GFLAGS),
- GATE_LINK(HCLK_VO1, "hclk_vo1", "hclk_vo1_root", HCLK_VO1USB_TOP_ROOT, RK3588_LINKED_CLK, RK3588_CLKGATE_CON(59), 9, GFLAGS),
+ GATE_LINK(HCLK_VO1, "hclk_vo1", "hclk_vo1_root", HCLK_VO1USB_TOP_ROOT, 0, RK3588_CLKGATE_CON(59), 9, GFLAGS),
GATE_LINK(ACLK_AV1_PRE, "aclk_av1_pre", "aclk_av1_root", ACLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(68), 1, GFLAGS),
GATE_LINK(PCLK_AV1_PRE, "pclk_av1_pre", "pclk_av1_root", HCLK_VDPU_ROOT, 0, RK3588_CLKGATE_CON(68), 4, GFLAGS),
GATE_LINK(HCLK_SDIO_PRE, "hclk_sdio_pre", "hclk_sdio_root", HCLK_NVM, 0, RK3588_CLKGATE_CON(75), 1, GFLAGS),
@@ -2453,26 +2435,31 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
GATE_LINK(PCLK_VO1GRF, "pclk_vo1grf", "pclk_vo1_root", HCLK_VO1, CLK_IGNORE_UNUSED, RK3588_CLKGATE_CON(59), 12, GFLAGS),
};
-static void __init rk3588_clk_init(struct device_node *np)
+static void __init rk3588_clk_early_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
- unsigned long clk_nr_clks;
+ unsigned long clk_nr_clks, max_clk_id1, max_clk_id2;
void __iomem *reg_base;
- clk_nr_clks = rockchip_clk_find_max_clk_id(rk3588_clk_branches,
- ARRAY_SIZE(rk3588_clk_branches)) + 1;
+ max_clk_id1 = rockchip_clk_find_max_clk_id(rk3588_clk_branches,
+ ARRAY_SIZE(rk3588_clk_branches));
+ max_clk_id2 = rockchip_clk_find_max_clk_id(rk3588_early_clk_branches,
+ ARRAY_SIZE(rk3588_early_clk_branches));
+ clk_nr_clks = max(max_clk_id1, max_clk_id2) + 1;
+
reg_base = of_iomap(np, 0);
if (!reg_base) {
pr_err("%s: could not map cru region\n", __func__);
return;
}
- ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
+ ctx = rockchip_clk_init_early(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
return;
}
+ early_ctx = ctx;
rockchip_clk_register_plls(ctx, rk3588_pll_clks,
ARRAY_SIZE(rk3588_pll_clks),
@@ -2491,14 +2478,55 @@ static void __init rk3588_clk_init(struct device_node *np)
&rk3588_cpub1clk_data, rk3588_cpub1clk_rates,
ARRAY_SIZE(rk3588_cpub1clk_rates));
- rockchip_clk_register_branches(ctx, rk3588_clk_branches,
- ARRAY_SIZE(rk3588_clk_branches));
+ rockchip_clk_register_branches(ctx, rk3588_early_clk_branches,
+ ARRAY_SIZE(rk3588_early_clk_branches));
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+CLK_OF_DECLARE_DRIVER(rk3588_cru, "rockchip,rk3588-cru", rk3588_clk_early_init);
+
+static int clk_rk3588_probe(struct platform_device *pdev)
+{
+ struct rockchip_clk_provider *ctx = early_ctx;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ rockchip_clk_register_late_branches(dev, ctx, rk3588_clk_branches,
+ ARRAY_SIZE(rk3588_clk_branches));
- rk3588_rst_init(np, reg_base);
+ rockchip_clk_finalize(ctx);
+ rk3588_rst_init(np, ctx->reg_base);
rockchip_register_restart_notifier(ctx, RK3588_GLB_SRST_FST, NULL);
+ /*
+ * Re-add clock provider, so that the newly added clocks are also
+ * re-parented and get their defaults configured.
+ */
+ of_clk_del_provider(np);
rockchip_clk_of_add_provider(np, ctx);
+
+ return 0;
}
-CLK_OF_DECLARE(rk3588_cru, "rockchip,rk3588-cru", rk3588_clk_init);
+static const struct of_device_id clk_rk3588_match_table[] = {
+ {
+ .compatible = "rockchip,rk3588-cru",
+ },
+ { }
+};
+
+static struct platform_driver clk_rk3588_driver = {
+ .probe = clk_rk3588_probe,
+ .driver = {
+ .name = "clk-rk3588",
+ .of_match_table = clk_rk3588_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init rockchip_clk_rk3588_drv_register(void)
+{
+ return platform_driver_register(&clk_rk3588_driver);
+}
+core_initcall(rockchip_clk_rk3588_drv_register);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 2fa7253c73b2..cbf93ea119a9 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -19,6 +19,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reboot.h>
@@ -197,12 +198,6 @@ static void rockchip_fractional_approximation(struct clk_hw *hw,
clk_fractional_divider_general_approximation(hw, rate, parent_rate, m, n);
}
-static void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx,
- struct clk *clk, unsigned int id)
-{
- ctx->clk_data.clks[id] = clk;
-}
-
static struct clk *rockchip_clk_register_frac_branch(
struct rockchip_clk_provider *ctx, const char *name,
const char *const *parent_names, u8 num_parents,
@@ -292,7 +287,7 @@ static struct clk *rockchip_clk_register_frac_branch(
return mux_clk;
}
- rockchip_clk_add_lookup(ctx, mux_clk, child->id);
+ rockchip_clk_set_lookup(ctx, mux_clk, child->id);
/* notifier on the fraction divider to catch rate changes */
if (frac->mux_frac_idx >= 0) {
@@ -359,14 +354,17 @@ static struct clk *rockchip_clk_register_factor_branch(const char *name,
return hw->clk;
}
-struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
- void __iomem *base,
- unsigned long nr_clks)
+static struct rockchip_clk_provider *rockchip_clk_init_base(
+ struct device_node *np, void __iomem *base,
+ unsigned long nr_clks, bool has_late_clocks)
{
struct rockchip_clk_provider *ctx;
struct clk **clk_table;
+ struct clk *default_clk_val;
int i;
+ default_clk_val = ERR_PTR(has_late_clocks ? -EPROBE_DEFER : -ENOENT);
+
ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
@@ -376,7 +374,7 @@ struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
goto err_free;
for (i = 0; i < nr_clks; ++i)
- clk_table[i] = ERR_PTR(-ENOENT);
+ clk_table[i] = default_clk_val;
ctx->reg_base = base;
ctx->clk_data.clks = clk_table;
@@ -393,8 +391,33 @@ err_free:
kfree(ctx);
return ERR_PTR(-ENOMEM);
}
+
+struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
+ void __iomem *base,
+ unsigned long nr_clks)
+{
+ return rockchip_clk_init_base(np, base, nr_clks, false);
+}
EXPORT_SYMBOL_GPL(rockchip_clk_init);
+struct rockchip_clk_provider *rockchip_clk_init_early(struct device_node *np,
+ void __iomem *base,
+ unsigned long nr_clks)
+{
+ return rockchip_clk_init_base(np, base, nr_clks, true);
+}
+EXPORT_SYMBOL_GPL(rockchip_clk_init_early);
+
+void rockchip_clk_finalize(struct rockchip_clk_provider *ctx)
+{
+ int i;
+
+ for (i = 0; i < ctx->clk_data.clk_num; ++i)
+ if (ctx->clk_data.clks[i] == ERR_PTR(-EPROBE_DEFER))
+ ctx->clk_data.clks[i] = ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL_GPL(rockchip_clk_finalize);
+
void rockchip_clk_of_add_provider(struct device_node *np,
struct rockchip_clk_provider *ctx)
{
@@ -424,7 +447,7 @@ void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
continue;
}
- rockchip_clk_add_lookup(ctx, clk, list->id);
+ rockchip_clk_set_lookup(ctx, clk, list->id);
}
}
EXPORT_SYMBOL_GPL(rockchip_clk_register_plls);
@@ -439,13 +462,36 @@ unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
if (list->id > max)
max = list->id;
if (list->child && list->child->id > max)
- max = list->id;
+ max = list->child->id;
}
return max;
}
EXPORT_SYMBOL_GPL(rockchip_clk_find_max_clk_id);
+static struct platform_device *rockchip_clk_register_gate_link(
+ struct device *parent_dev,
+ struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *clkbr)
+{
+ struct rockchip_gate_link_platdata gate_link_pdata = {
+ .ctx = ctx,
+ .clkbr = clkbr,
+ };
+
+ struct platform_device_info pdevinfo = {
+ .parent = parent_dev,
+ .name = "rockchip-gate-link-clk",
+ .id = clkbr->id,
+ .fwnode = dev_fwnode(parent_dev),
+ .of_node_reused = true,
+ .data = &gate_link_pdata,
+ .size_data = sizeof(gate_link_pdata),
+ };
+
+ return platform_device_register_full(&pdevinfo);
+}
+
void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk)
@@ -571,6 +617,9 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
list->div_width, list->div_flags,
ctx->reg_base, &ctx->lock);
break;
+ case branch_linked_gate:
+ /* must be registered late, fall-through for error message */
+ break;
}
/* none of the cases above matched */
@@ -586,11 +635,36 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
continue;
}
- rockchip_clk_add_lookup(ctx, clk, list->id);
+ rockchip_clk_set_lookup(ctx, clk, list->id);
}
}
EXPORT_SYMBOL_GPL(rockchip_clk_register_branches);
+void rockchip_clk_register_late_branches(struct device *dev,
+ struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *list,
+ unsigned int nr_clk)
+{
+ unsigned int idx;
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ struct platform_device *pdev = NULL;
+
+ switch (list->branch_type) {
+ case branch_linked_gate:
+ pdev = rockchip_clk_register_gate_link(dev, ctx, list);
+ break;
+ default:
+ dev_err(dev, "unknown clock type %d\n", list->branch_type);
+ break;
+ }
+
+ if (!pdev)
+ dev_err(dev, "failed to register device for clock %s\n", list->name);
+ }
+}
+EXPORT_SYMBOL_GPL(rockchip_clk_register_late_branches);
+
void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
unsigned int lookup_id,
const char *name, const char *const *parent_names,
@@ -610,7 +684,7 @@ void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
return;
}
- rockchip_clk_add_lookup(ctx, clk, lookup_id);
+ rockchip_clk_set_lookup(ctx, clk, lookup_id);
}
EXPORT_SYMBOL_GPL(rockchip_clk_register_armclk);
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index f1957e1c1178..9b37d44b9e5d 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -570,6 +570,7 @@ enum rockchip_clk_branch_type {
branch_divider,
branch_fraction_divider,
branch_gate,
+ branch_linked_gate,
branch_mmc,
branch_inverter,
branch_factor,
@@ -597,6 +598,7 @@ struct rockchip_clk_branch {
int gate_offset;
u8 gate_shift;
u8 gate_flags;
+ unsigned int linked_clk_id;
struct rockchip_clk_branch *child;
};
@@ -895,6 +897,20 @@ struct rockchip_clk_branch {
.gate_flags = gf, \
}
+#define GATE_LINK(_id, cname, pname, linkedclk, f, o, b, gf) \
+ { \
+ .id = _id, \
+ .branch_type = branch_linked_gate, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .linked_clk_id = linkedclk, \
+ .num_parents = 1, \
+ .flags = f, \
+ .gate_offset = o, \
+ .gate_shift = b, \
+ .gate_flags = gf, \
+ }
+
#define MMC(_id, cname, pname, offset, shift) \
{ \
.id = _id, \
@@ -1022,8 +1038,28 @@ struct rockchip_clk_branch {
#define SGRF_GATE(_id, cname, pname) \
FACTOR(_id, cname, pname, 0, 1, 1)
+static inline struct clk *rockchip_clk_get_lookup(struct rockchip_clk_provider *ctx,
+ unsigned int id)
+{
+ return ctx->clk_data.clks[id];
+}
+
+static inline void rockchip_clk_set_lookup(struct rockchip_clk_provider *ctx,
+ struct clk *clk, unsigned int id)
+{
+ ctx->clk_data.clks[id] = clk;
+}
+
+struct rockchip_gate_link_platdata {
+ struct rockchip_clk_provider *ctx;
+ struct rockchip_clk_branch *clkbr;
+};
+
struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
void __iomem *base, unsigned long nr_clks);
+struct rockchip_clk_provider *rockchip_clk_init_early(struct device_node *np,
+ void __iomem *base, unsigned long nr_clks);
+void rockchip_clk_finalize(struct rockchip_clk_provider *ctx);
void rockchip_clk_of_add_provider(struct device_node *np,
struct rockchip_clk_provider *ctx);
unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
@@ -1031,6 +1067,10 @@ unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk);
+void rockchip_clk_register_late_branches(struct device *dev,
+ struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *list,
+ unsigned int nr_clk);
void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
struct rockchip_pll_clock *pll_list,
unsigned int nr_pll, int grf_lock_offset);
diff --git a/drivers/clk/rockchip/gate-link.c b/drivers/clk/rockchip/gate-link.c
new file mode 100644
index 000000000000..cd0f7a2d30ab
--- /dev/null
+++ b/drivers/clk/rockchip/gate-link.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2024 Collabora Ltd.
+ * Author: Sebastian Reichel <sebastian.reichel@collabora.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include "clk.h"
+
+static int rk_clk_gate_link_register(struct device *dev,
+ struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *clkbr)
+{
+ unsigned long flags = clkbr->flags | CLK_SET_RATE_PARENT;
+ struct clk *clk;
+
+ clk = clk_register_gate(dev, clkbr->name, clkbr->parent_names[0],
+ flags, ctx->reg_base + clkbr->gate_offset,
+ clkbr->gate_shift, clkbr->gate_flags,
+ &ctx->lock);
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rockchip_clk_set_lookup(ctx, clk, clkbr->id);
+ return 0;
+}
+
+static int rk_clk_gate_link_probe(struct platform_device *pdev)
+{
+ struct rockchip_gate_link_platdata *pdata;
+ struct device *dev = &pdev->dev;
+ struct clk *linked_clk;
+ int ret;
+
+ pdata = dev_get_platdata(dev);
+ if (!pdata)
+ return dev_err_probe(dev, -ENODEV, "missing platform data");
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = devm_pm_clk_create(dev);
+ if (ret)
+ return ret;
+
+ linked_clk = rockchip_clk_get_lookup(pdata->ctx, pdata->clkbr->linked_clk_id);
+ ret = pm_clk_add_clk(dev, linked_clk);
+ if (ret)
+ return ret;
+
+ ret = rk_clk_gate_link_register(dev, pdata->ctx, pdata->clkbr);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ pm_clk_remove_clk(dev, linked_clk);
+ return ret;
+}
+
+static const struct dev_pm_ops rk_clk_gate_link_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static struct platform_driver rk_clk_gate_link_driver = {
+ .probe = rk_clk_gate_link_probe,
+ .driver = {
+ .name = "rockchip-gate-link-clk",
+ .pm = &rk_clk_gate_link_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init rk_clk_gate_link_drv_register(void)
+{
+ return platform_driver_register(&rk_clk_gate_link_driver);
+}
+core_initcall(rk_clk_gate_link_drv_register);
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index f1ba48758c78..90e5b114872c 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -20,6 +20,8 @@ obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos-arm64.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7885.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos850.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos8895.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos990.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov9.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov920.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-gs101.o
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index abd49edcf707..e11ac67819ef 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -4,7 +4,7 @@
* Author: Padmavathi Venna <padma.v@samsung.com>
*
* Common Clock Framework support for Audio Subsystem Clock Controller.
-*/
+ */
#include <linux/slab.h>
#include <linux/io.h>
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index cd4fec323a42..aec4d18c1f9e 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -260,7 +260,7 @@ static const struct samsung_mux_clock mux_clks[] __initconst = {
/* SRC_TOP0 */
MUX(CLK_MOUT_EBI, "mout_ebi", mout_ebi_p, SRC_TOP0, 28, 1),
- MUX(CLK_MOUT_ACLK_200, "mout_aclk_200", group_div_mpll_pre_p,SRC_TOP0, 24, 1),
+ MUX(CLK_MOUT_ACLK_200, "mout_aclk_200", group_div_mpll_pre_p, SRC_TOP0, 24, 1),
MUX(CLK_MOUT_ACLK_160, "mout_aclk_160", group_div_mpll_pre_p, SRC_TOP0, 20, 1),
MUX(CLK_MOUT_ACLK_100, "mout_aclk_100", group_div_mpll_pre_p, SRC_TOP0, 16, 1),
MUX(CLK_MOUT_ACLK_266_1, "mout_aclk_266_1", mout_aclk_266_1_p, SRC_TOP0, 14, 1),
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 28945b6b0ee1..16be0c53903c 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -5,7 +5,7 @@
* Author: Thomas Abraham <thomas.ab@samsung.com>
*
* Common Clock Framework support for all Exynos4 SoCs.
-*/
+ */
#include <dt-bindings/clock/exynos4.h>
#include <linux/slab.h>
diff --git a/drivers/clk/samsung/clk-exynos4412-isp.c b/drivers/clk/samsung/clk-exynos4412-isp.c
index a70c2b06a61a..29c5644f0593 100644
--- a/drivers/clk/samsung/clk-exynos4412-isp.c
+++ b/drivers/clk/samsung/clk-exynos4412-isp.c
@@ -4,7 +4,7 @@
* Author: Marek Szyprowski <m.szyprowski@samsung.com>
*
* Common Clock Framework support for Exynos4412 ISP module.
-*/
+ */
#include <dt-bindings/clock/exynos4.h>
#include <linux/slab.h>
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index e02e7c013f3d..47e9ac2275ee 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -5,7 +5,7 @@
* Author: Thomas Abraham <thomas.ab@samsung.com>
*
* Common Clock Framework support for Exynos5250 SoC.
-*/
+ */
#include <dt-bindings/clock/exynos5250.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
index 16da6ef5ca0c..fd0520d204dc 100644
--- a/drivers/clk/samsung/clk-exynos5260.c
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -1458,7 +1458,7 @@ static const struct samsung_fixed_rate_clock fixed_rate_clks[] __initconst = {
FRATE(PHYCLK_HDMI_LINK_O_TMDS_CLKHI, "phyclk_hdmi_link_o_tmds_clkhi",
NULL, 0, 125000000),
FRATE(PHYCLK_MIPI_DPHY_4L_M_TXBYTECLKHS,
- "phyclk_mipi_dphy_4l_m_txbyte_clkhs" , NULL,
+ "phyclk_mipi_dphy_4l_m_txbyte_clkhs", NULL,
0, 187500000),
FRATE(PHYCLK_DPTX_PHY_O_REF_CLK_24M, "phyclk_dptx_phy_o_ref_clk_24m",
NULL, 0, 24000000),
@@ -1629,7 +1629,7 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
mout_isp1_media_400_p,
MUX_SEL_TOP_ISP10, 4, 1),
MUX(TOP_MOUT_ACLK_ISP1_400, "mout_aclk_isp1_400", mout_aclk_isp1_400_p,
- MUX_SEL_TOP_ISP10, 8 , 1),
+ MUX_SEL_TOP_ISP10, 8, 1),
MUX(TOP_MOUT_ISP1_MEDIA_266, "mout_isp1_media_266",
mout_isp1_media_266_p,
MUX_SEL_TOP_ISP10, 16, 1),
diff --git a/drivers/clk/samsung/clk-exynos5410.c b/drivers/clk/samsung/clk-exynos5410.c
index 2654077211e7..99b1bb4539fd 100644
--- a/drivers/clk/samsung/clk-exynos5410.c
+++ b/drivers/clk/samsung/clk-exynos5410.c
@@ -4,7 +4,7 @@
* Author: Tarek Dakhran <t.dakhran@samsung.com>
*
* Common Clock Framework support for Exynos5410 SoC.
-*/
+ */
#include <dt-bindings/clock/exynos5410.h>
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index c630135c686b..333c52fda17f 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -5,7 +5,7 @@
* Chander Kashyap <k.chander@samsung.com>
*
* Common Clock Framework support for Exynos5420 SoC.
-*/
+ */
#include <dt-bindings/clock/exynos5420.h>
#include <linux/slab.h>
@@ -295,8 +295,8 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = {
/* list of all parent clocks */
PNAME(mout_mspll_cpu_p) = {"mout_sclk_cpll", "mout_sclk_dpll",
"mout_sclk_mpll", "mout_sclk_spll"};
-PNAME(mout_cpu_p) = {"mout_apll" , "mout_mspll_cpu"};
-PNAME(mout_kfc_p) = {"mout_kpll" , "mout_mspll_kfc"};
+PNAME(mout_cpu_p) = {"mout_apll", "mout_mspll_cpu"};
+PNAME(mout_kfc_p) = {"mout_kpll", "mout_mspll_kfc"};
PNAME(mout_apll_p) = {"fin_pll", "fout_apll"};
PNAME(mout_bpll_p) = {"fin_pll", "fout_bpll"};
PNAME(mout_cpll_p) = {"fin_pll", "fout_cpll"};
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
index 4a5d2a914bd6..e6c938effa29 100644
--- a/drivers/clk/samsung/clk-exynos7.c
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Naveen Krishna Ch <naveenkrishna.ch@gmail.com>
-*/
+ */
#include <linux/clk-provider.h>
#include <linux/of.h>
diff --git a/drivers/clk/samsung/clk-exynos8895.c b/drivers/clk/samsung/clk-exynos8895.c
new file mode 100644
index 000000000000..29ec0c4a8635
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos8895.c
@@ -0,0 +1,2803 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+ * Author: Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+ *
+ * Common Clock Framework support for Exynos8895 SoC.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/samsung,exynos8895.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+
+/* NOTE: Must be equal to the last clock ID increased by one */
+#define CLKS_NR_TOP (CLK_GOUT_CMU_VPU_BUS + 1)
+#define CLKS_NR_FSYS0 (CLK_GOUT_FSYS0_XIU_P_FSYS0_ACLK + 1)
+#define CLKS_NR_FSYS1 (CLK_GOUT_FSYS1_XIU_P_FSYS1_ACLK + 1)
+#define CLKS_NR_PERIC0 (CLK_GOUT_PERIC0_USI03_I_SCLK_USI + 1)
+#define CLKS_NR_PERIC1 (CLK_GOUT_PERIC1_XIU_P_PERIC1_ACLK + 1)
+#define CLKS_NR_PERIS (CLK_GOUT_PERIS_XIU_P_PERIS_ACLK + 1)
+
+/* ---- CMU_TOP ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_TOP (0x15a80000) */
+#define PLL_LOCKTIME_PLL_SHARED0 0x0000
+#define PLL_LOCKTIME_PLL_SHARED1 0x0004
+#define PLL_LOCKTIME_PLL_SHARED2 0x0008
+#define PLL_LOCKTIME_PLL_SHARED3 0x000c
+#define PLL_LOCKTIME_PLL_SHARED4 0x0010
+#define PLL_CON0_MUX_CP2AP_MIF_CLK_USER 0x0100
+#define PLL_CON2_MUX_CP2AP_MIF_CLK_USER 0x0108
+#define PLL_CON0_PLL_SHARED0 0x0120
+#define PLL_CON0_PLL_SHARED1 0x0140
+#define PLL_CON0_PLL_SHARED2 0x0160
+#define PLL_CON0_PLL_SHARED3 0x0180
+#define PLL_CON0_PLL_SHARED4 0x01a0
+#define CLK_CON_MUX_MUX_CLKCMU_ABOX_CPUABOX 0x1000
+#define CLK_CON_MUX_MUX_CLKCMU_APM_BUS 0x1004
+#define CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS 0x1008
+#define CLK_CON_MUX_MUX_CLKCMU_BUSC_BUS 0x100c
+#define CLK_CON_MUX_MUX_CLKCMU_BUSC_BUSPHSI2C 0x1010
+#define CLK_CON_MUX_MUX_CLKCMU_CAM_BUS 0x1014
+#define CLK_CON_MUX_MUX_CLKCMU_CAM_TPU0 0x1018
+#define CLK_CON_MUX_MUX_CLKCMU_CAM_TPU1 0x101c
+#define CLK_CON_MUX_MUX_CLKCMU_CAM_VRA 0x1020
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0 0x1024
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1 0x1028
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2 0x102c
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3 0x1030
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1034
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH 0x1038
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH 0x103c
+#define CLK_CON_MUX_MUX_CLKCMU_DBG_BUS 0x1040
+#define CLK_CON_MUX_MUX_CLKCMU_DCAM_BUS 0x1044
+#define CLK_CON_MUX_MUX_CLKCMU_DCAM_IMGD 0x1048
+#define CLK_CON_MUX_MUX_CLKCMU_DPU_BUS 0x104c
+#define CLK_CON_MUX_MUX_CLKCMU_DROOPDETECTOR 0x1050
+#define CLK_CON_MUX_MUX_CLKCMU_DSP_BUS 0x1054
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_BUS 0x1058
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_DPGTC 0x105c
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_MMC_EMBD 0x1060
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_UFS_EMBD 0x1064
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_USBDRD30 0x1068
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS1_BUS 0x106c
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS1_MMC_CARD 0x1070
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS1_PCIE 0x1074
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS1_UFS_CARD 0x1078
+#define CLK_CON_MUX_MUX_CLKCMU_G2D_G2D 0x107c
+#define CLK_CON_MUX_MUX_CLKCMU_G2D_JPEG 0x1080
+#define CLK_CON_MUX_MUX_CLKCMU_HPM 0x1084
+#define CLK_CON_MUX_MUX_CLKCMU_IMEM_BUS 0x1088
+#define CLK_CON_MUX_MUX_CLKCMU_ISPHQ_BUS 0x108c
+#define CLK_CON_MUX_MUX_CLKCMU_ISPLP_BUS 0x1090
+#define CLK_CON_MUX_MUX_CLKCMU_IVA_BUS 0x1094
+#define CLK_CON_MUX_MUX_CLKCMU_MFC_BUS 0x1098
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH 0x109c
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS 0x10a0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_UART_DBG 0x10a4
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI00 0x10a8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI01 0x10ac
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI02 0x10b0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI03 0x10b4
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS 0x10b8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPEEDY2 0x10bc
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPI_CAM0 0x10c0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPI_CAM1 0x10c4
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_UART_BT 0x10c8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI04 0x10cc
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI05 0x10d0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI06 0x10d4
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI07 0x10d8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI08 0x10dc
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI09 0x10e0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI10 0x10e4
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI11 0x10e8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI12 0x10ec
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI13 0x10f0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS 0x10f4
+#define CLK_CON_MUX_MUX_CLKCMU_SRDZ_BUS 0x10f8
+#define CLK_CON_MUX_MUX_CLKCMU_SRDZ_IMGD 0x10fc
+#define CLK_CON_MUX_MUX_CLKCMU_VPU_BUS 0x1100
+#define CLK_CON_MUX_MUX_CLK_CMU_CMUREF 0x1104
+#define CLK_CON_MUX_MUX_CMU_CMUREF 0x1108
+#define CLK_CON_DIV_CLKCMU_ABOX_CPUABOX 0x1800
+#define CLK_CON_DIV_CLKCMU_APM_BUS 0x1804
+#define CLK_CON_DIV_CLKCMU_BUS1_BUS 0x1808
+#define CLK_CON_DIV_CLKCMU_BUSC_BUS 0x180c
+#define CLK_CON_DIV_CLKCMU_BUSC_BUSPHSI2C 0x1810
+#define CLK_CON_DIV_CLKCMU_CAM_BUS 0x1814
+#define CLK_CON_DIV_CLKCMU_CAM_TPU0 0x1818
+#define CLK_CON_DIV_CLKCMU_CAM_TPU1 0x181c
+#define CLK_CON_DIV_CLKCMU_CAM_VRA 0x1820
+#define CLK_CON_DIV_CLKCMU_CIS_CLK0 0x1824
+#define CLK_CON_DIV_CLKCMU_CIS_CLK1 0x1828
+#define CLK_CON_DIV_CLKCMU_CIS_CLK2 0x182c
+#define CLK_CON_DIV_CLKCMU_CIS_CLK3 0x1830
+#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x1834
+#define CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH 0x1838
+#define CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH 0x183c
+#define CLK_CON_DIV_CLKCMU_DBG_BUS 0x1840
+#define CLK_CON_DIV_CLKCMU_DCAM_BUS 0x1844
+#define CLK_CON_DIV_CLKCMU_DCAM_IMGD 0x1848
+#define CLK_CON_DIV_CLKCMU_DPU_BUS 0x184c
+#define CLK_CON_DIV_CLKCMU_DSP_BUS 0x1850
+#define CLK_CON_DIV_CLKCMU_FSYS0_BUS 0x1854
+#define CLK_CON_DIV_CLKCMU_FSYS0_DPGTC 0x1858
+#define CLK_CON_DIV_CLKCMU_FSYS0_MMC_EMBD 0x185c
+#define CLK_CON_DIV_CLKCMU_FSYS0_UFS_EMBD 0x1860
+#define CLK_CON_DIV_CLKCMU_FSYS0_USBDRD30 0x1864
+#define CLK_CON_DIV_CLKCMU_FSYS1_BUS 0x1868
+#define CLK_CON_DIV_CLKCMU_FSYS1_MMC_CARD 0x186c
+#define CLK_CON_DIV_CLKCMU_FSYS1_PCIE 0x1870
+#define CLK_CON_DIV_CLKCMU_FSYS1_UFS_CARD 0x1874
+#define CLK_CON_DIV_CLKCMU_G2D_G2D 0x1878
+#define CLK_CON_DIV_CLKCMU_G2D_JPEG 0x187c
+#define CLK_CON_DIV_CLKCMU_G3D_SWITCH 0x1880
+#define CLK_CON_DIV_CLKCMU_HPM 0x1884
+#define CLK_CON_DIV_CLKCMU_IMEM_BUS 0x1888
+#define CLK_CON_DIV_CLKCMU_ISPHQ_BUS 0x188c
+#define CLK_CON_DIV_CLKCMU_ISPLP_BUS 0x1890
+#define CLK_CON_DIV_CLKCMU_IVA_BUS 0x1894
+#define CLK_CON_DIV_CLKCMU_MFC_BUS 0x1898
+#define CLK_CON_DIV_CLKCMU_MODEM_SHARED0 0x189c
+#define CLK_CON_DIV_CLKCMU_MODEM_SHARED1 0x18a0
+#define CLK_CON_DIV_CLKCMU_OTP 0x18a4
+#define CLK_CON_DIV_CLKCMU_PERIC0_BUS 0x18a8
+#define CLK_CON_DIV_CLKCMU_PERIC0_UART_DBG 0x18ac
+#define CLK_CON_DIV_CLKCMU_PERIC0_USI00 0x18b0
+#define CLK_CON_DIV_CLKCMU_PERIC0_USI01 0x18b4
+#define CLK_CON_DIV_CLKCMU_PERIC0_USI02 0x18b8
+#define CLK_CON_DIV_CLKCMU_PERIC0_USI03 0x18bc
+#define CLK_CON_DIV_CLKCMU_PERIC1_BUS 0x18c0
+#define CLK_CON_DIV_CLKCMU_PERIC1_SPEEDY2 0x18c4
+#define CLK_CON_DIV_CLKCMU_PERIC1_SPI_CAM0 0x18c8
+#define CLK_CON_DIV_CLKCMU_PERIC1_SPI_CAM1 0x18cc
+#define CLK_CON_DIV_CLKCMU_PERIC1_UART_BT 0x18d0
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI04 0x18d4
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI05 0x18d8
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI06 0x18dc
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI07 0x18e0
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI08 0x18e4
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI09 0x18e8
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI10 0x18ec
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI11 0x18f0
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI12 0x18f4
+#define CLK_CON_DIV_CLKCMU_PERIC1_USI13 0x18f8
+#define CLK_CON_DIV_CLKCMU_PERIS_BUS 0x18fc
+#define CLK_CON_DIV_CLKCMU_SRDZ_BUS 0x1900
+#define CLK_CON_DIV_CLKCMU_SRDZ_IMGD 0x1904
+#define CLK_CON_DIV_CLKCMU_VPU_BUS 0x1908
+#define CLK_CON_DIV_DIV_CLK_CMU_CMUREF 0x190c
+#define CLK_CON_DIV_DIV_CP2AP_MIF_CLK_DIV2 0x1910
+#define CLK_CON_DIV_DIV_PLL_SHARED0_DIV2 0x1914
+#define CLK_CON_DIV_DIV_PLL_SHARED0_DIV4 0x1918
+#define CLK_CON_DIV_DIV_PLL_SHARED1_DIV2 0x191c
+#define CLK_CON_DIV_DIV_PLL_SHARED1_DIV4 0x1920
+#define CLK_CON_DIV_DIV_PLL_SHARED2_DIV2 0x1924
+#define CLK_CON_DIV_DIV_PLL_SHARED3_DIV2 0x1928
+#define CLK_CON_DIV_DIV_PLL_SHARED4_DIV2 0x192c
+#define CLK_CON_GAT_CLKCMU_DROOPDETECTOR 0x2000
+#define CLK_CON_GAT_CLKCMU_MIF_SWITCH 0x2004
+#define CLK_CON_GAT_GATE_CLKCMU_ABOX_CPUABOX 0x2008
+#define CLK_CON_GAT_GATE_CLKCMU_APM_BUS 0x200c
+#define CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS 0x2010
+#define CLK_CON_GAT_GATE_CLKCMU_BUSC_BUS 0x2014
+#define CLK_CON_GAT_GATE_CLKCMU_BUSC_BUSPHSI2C 0x2018
+#define CLK_CON_GAT_GATE_CLKCMU_CAM_BUS 0x201c
+#define CLK_CON_GAT_GATE_CLKCMU_CAM_TPU0 0x2020
+#define CLK_CON_GAT_GATE_CLKCMU_CAM_TPU1 0x2024
+#define CLK_CON_GAT_GATE_CLKCMU_CAM_VRA 0x2028
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0 0x202c
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1 0x2030
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2 0x2034
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3 0x2038
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x203c
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH 0x2040
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH 0x2044
+#define CLK_CON_GAT_GATE_CLKCMU_DBG_BUS 0x2048
+#define CLK_CON_GAT_GATE_CLKCMU_DCAM_BUS 0x204c
+#define CLK_CON_GAT_GATE_CLKCMU_DCAM_IMGD 0x2050
+#define CLK_CON_GAT_GATE_CLKCMU_DPU_BUS 0x2054
+#define CLK_CON_GAT_GATE_CLKCMU_DSP_BUS 0x2058
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_BUS 0x205c
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_DPGTC 0x2060
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_MMC_EMBD 0x2064
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_UFS_EMBD 0x2068
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_USBDRD30 0x206c
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS1_BUS 0x2070
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS1_MMC_CARD 0x2074
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS1_PCIE 0x2078
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS1_UFS_CARD 0x207c
+#define CLK_CON_GAT_GATE_CLKCMU_G2D_G2D 0x2080
+#define CLK_CON_GAT_GATE_CLKCMU_G2D_JPEG 0x2084
+#define CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH 0x2088
+#define CLK_CON_GAT_GATE_CLKCMU_HPM 0x208c
+#define CLK_CON_GAT_GATE_CLKCMU_IMEM_BUS 0x2090
+#define CLK_CON_GAT_GATE_CLKCMU_ISPHQ_BUS 0x2094
+#define CLK_CON_GAT_GATE_CLKCMU_ISPLP_BUS 0x2098
+#define CLK_CON_GAT_GATE_CLKCMU_IVA_BUS 0x209c
+#define CLK_CON_GAT_GATE_CLKCMU_MFC_BUS 0x20a0
+#define CLK_CON_GAT_GATE_CLKCMU_MODEM_SHARED0 0x20a4
+#define CLK_CON_GAT_GATE_CLKCMU_MODEM_SHARED1 0x20a8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS 0x20ac
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_UART_DBG 0x20b0
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI00 0x20b4
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI01 0x20b8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI02 0x20bc
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI03 0x20c0
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS 0x20c4
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPEEDY2 0x20c8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPI_CAM0 0x20cc
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPI_CAM1 0x20d0
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_UART_BT 0x20d4
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI04 0x20d8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI05 0x20dc
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI06 0x20e0
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI07 0x20e4
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI08 0x20e8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI09 0x20ec
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI10 0x20f0
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI11 0x20f4
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI12 0x20f8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI13 0x20fc
+#define CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS 0x2100
+#define CLK_CON_GAT_GATE_CLKCMU_SRDZ_BUS 0x2104
+#define CLK_CON_GAT_GATE_CLKCMU_SRDZ_IMGD 0x2108
+#define CLK_CON_GAT_GATE_CLKCMU_VPU_BUS 0x210c
+
+static const unsigned long top_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_LOCKTIME_PLL_SHARED2,
+ PLL_LOCKTIME_PLL_SHARED3,
+ PLL_LOCKTIME_PLL_SHARED4,
+ PLL_CON0_MUX_CP2AP_MIF_CLK_USER,
+ PLL_CON2_MUX_CP2AP_MIF_CLK_USER,
+ PLL_CON0_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
+ PLL_CON0_PLL_SHARED2,
+ PLL_CON0_PLL_SHARED3,
+ PLL_CON0_PLL_SHARED4,
+ CLK_CON_MUX_MUX_CLKCMU_ABOX_CPUABOX,
+ CLK_CON_MUX_MUX_CLKCMU_APM_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_BUSC_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_BUSC_BUSPHSI2C,
+ CLK_CON_MUX_MUX_CLKCMU_CAM_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CAM_TPU0,
+ CLK_CON_MUX_MUX_CLKCMU_CAM_TPU1,
+ CLK_CON_MUX_MUX_CLKCMU_CAM_VRA,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_DBG_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DCAM_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DCAM_IMGD,
+ CLK_CON_MUX_MUX_CLKCMU_DPU_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DROOPDETECTOR,
+ CLK_CON_MUX_MUX_CLKCMU_DSP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS0_DPGTC,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS0_MMC_EMBD,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS0_UFS_EMBD,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS0_USBDRD30,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS1_MMC_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS1_PCIE,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS1_UFS_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_G2D,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_JPEG,
+ CLK_CON_MUX_MUX_CLKCMU_HPM,
+ CLK_CON_MUX_MUX_CLKCMU_IMEM_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_ISPHQ_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_ISPLP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_IVA_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_MFC_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_UART_DBG,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI00,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI01,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI02,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI03,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPEEDY2,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPI_CAM0,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPI_CAM1,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_UART_BT,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI04,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI05,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI06,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI07,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI08,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI09,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI10,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI11,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI12,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI13,
+ CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_SRDZ_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_SRDZ_IMGD,
+ CLK_CON_MUX_MUX_CLKCMU_VPU_BUS,
+ CLK_CON_MUX_MUX_CLK_CMU_CMUREF,
+ CLK_CON_MUX_MUX_CMU_CMUREF,
+ CLK_CON_DIV_CLKCMU_ABOX_CPUABOX,
+ CLK_CON_DIV_CLKCMU_APM_BUS,
+ CLK_CON_DIV_CLKCMU_BUS1_BUS,
+ CLK_CON_DIV_CLKCMU_BUSC_BUS,
+ CLK_CON_DIV_CLKCMU_BUSC_BUSPHSI2C,
+ CLK_CON_DIV_CLKCMU_CAM_BUS,
+ CLK_CON_DIV_CLKCMU_CAM_TPU0,
+ CLK_CON_DIV_CLKCMU_CAM_TPU1,
+ CLK_CON_DIV_CLKCMU_CAM_VRA,
+ CLK_CON_DIV_CLKCMU_CIS_CLK0,
+ CLK_CON_DIV_CLKCMU_CIS_CLK1,
+ CLK_CON_DIV_CLKCMU_CIS_CLK2,
+ CLK_CON_DIV_CLKCMU_CIS_CLK3,
+ CLK_CON_DIV_CLKCMU_CORE_BUS,
+ CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_DIV_CLKCMU_DBG_BUS,
+ CLK_CON_DIV_CLKCMU_DCAM_BUS,
+ CLK_CON_DIV_CLKCMU_DCAM_IMGD,
+ CLK_CON_DIV_CLKCMU_DPU_BUS,
+ CLK_CON_DIV_CLKCMU_DSP_BUS,
+ CLK_CON_DIV_CLKCMU_FSYS0_BUS,
+ CLK_CON_DIV_CLKCMU_FSYS0_DPGTC,
+ CLK_CON_DIV_CLKCMU_FSYS0_MMC_EMBD,
+ CLK_CON_DIV_CLKCMU_FSYS0_UFS_EMBD,
+ CLK_CON_DIV_CLKCMU_FSYS0_USBDRD30,
+ CLK_CON_DIV_CLKCMU_FSYS1_BUS,
+ CLK_CON_DIV_CLKCMU_FSYS1_MMC_CARD,
+ CLK_CON_DIV_CLKCMU_FSYS1_PCIE,
+ CLK_CON_DIV_CLKCMU_FSYS1_UFS_CARD,
+ CLK_CON_DIV_CLKCMU_G2D_G2D,
+ CLK_CON_DIV_CLKCMU_G2D_JPEG,
+ CLK_CON_DIV_CLKCMU_G3D_SWITCH,
+ CLK_CON_DIV_CLKCMU_HPM,
+ CLK_CON_DIV_CLKCMU_IMEM_BUS,
+ CLK_CON_DIV_CLKCMU_ISPHQ_BUS,
+ CLK_CON_DIV_CLKCMU_ISPLP_BUS,
+ CLK_CON_DIV_CLKCMU_IVA_BUS,
+ CLK_CON_DIV_CLKCMU_MFC_BUS,
+ CLK_CON_DIV_CLKCMU_MODEM_SHARED0,
+ CLK_CON_DIV_CLKCMU_MODEM_SHARED1,
+ CLK_CON_DIV_CLKCMU_OTP,
+ CLK_CON_DIV_CLKCMU_PERIC0_BUS,
+ CLK_CON_DIV_CLKCMU_PERIC0_UART_DBG,
+ CLK_CON_DIV_CLKCMU_PERIC0_USI00,
+ CLK_CON_DIV_CLKCMU_PERIC0_USI01,
+ CLK_CON_DIV_CLKCMU_PERIC0_USI02,
+ CLK_CON_DIV_CLKCMU_PERIC0_USI03,
+ CLK_CON_DIV_CLKCMU_PERIC1_BUS,
+ CLK_CON_DIV_CLKCMU_PERIC1_SPEEDY2,
+ CLK_CON_DIV_CLKCMU_PERIC1_SPI_CAM0,
+ CLK_CON_DIV_CLKCMU_PERIC1_SPI_CAM1,
+ CLK_CON_DIV_CLKCMU_PERIC1_UART_BT,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI04,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI05,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI06,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI07,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI08,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI09,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI10,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI11,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI12,
+ CLK_CON_DIV_CLKCMU_PERIC1_USI13,
+ CLK_CON_DIV_CLKCMU_PERIS_BUS,
+ CLK_CON_DIV_CLKCMU_SRDZ_BUS,
+ CLK_CON_DIV_CLKCMU_SRDZ_IMGD,
+ CLK_CON_DIV_CLKCMU_VPU_BUS,
+ CLK_CON_DIV_DIV_CLK_CMU_CMUREF,
+ CLK_CON_DIV_DIV_CP2AP_MIF_CLK_DIV2,
+ CLK_CON_DIV_DIV_PLL_SHARED0_DIV2,
+ CLK_CON_DIV_DIV_PLL_SHARED0_DIV4,
+ CLK_CON_DIV_DIV_PLL_SHARED1_DIV2,
+ CLK_CON_DIV_DIV_PLL_SHARED1_DIV4,
+ CLK_CON_DIV_DIV_PLL_SHARED2_DIV2,
+ CLK_CON_DIV_DIV_PLL_SHARED3_DIV2,
+ CLK_CON_DIV_DIV_PLL_SHARED4_DIV2,
+ CLK_CON_GAT_CLKCMU_DROOPDETECTOR,
+ CLK_CON_GAT_CLKCMU_MIF_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_ABOX_CPUABOX,
+ CLK_CON_GAT_GATE_CLKCMU_APM_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_BUSC_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_BUSC_BUSPHSI2C,
+ CLK_CON_GAT_GATE_CLKCMU_CAM_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CAM_TPU0,
+ CLK_CON_GAT_GATE_CLKCMU_CAM_TPU1,
+ CLK_CON_GAT_GATE_CLKCMU_CAM_VRA,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_DBG_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DCAM_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DCAM_IMGD,
+ CLK_CON_GAT_GATE_CLKCMU_DPU_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DSP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS0_DPGTC,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS0_MMC_EMBD,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS0_UFS_EMBD,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS0_USBDRD30,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS1_MMC_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS1_PCIE,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS1_UFS_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_G2D_G2D,
+ CLK_CON_GAT_GATE_CLKCMU_G2D_JPEG,
+ CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_HPM,
+ CLK_CON_GAT_GATE_CLKCMU_IMEM_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_ISPHQ_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_ISPLP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_IVA_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_MFC_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_MODEM_SHARED0,
+ CLK_CON_GAT_GATE_CLKCMU_MODEM_SHARED1,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_UART_DBG,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI00,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI01,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI02,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI03,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPEEDY2,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPI_CAM0,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPI_CAM1,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_UART_BT,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI04,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI05,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI06,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI07,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI08,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI09,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI10,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI11,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI12,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI13,
+ CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_SRDZ_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_SRDZ_IMGD,
+ CLK_CON_GAT_GATE_CLKCMU_VPU_BUS,
+};
+
+static const struct samsung_pll_rate_table pll_shared0_rate_table[] __initconst = {
+ PLL_35XX_RATE(26 * MHZ, 2132000000U, 328, 4, 0),
+};
+
+static const struct samsung_pll_rate_table pll_shared1_rate_table[] __initconst = {
+ PLL_35XX_RATE(26 * MHZ, 1865500000U, 287, 4, 0),
+};
+
+static const struct samsung_pll_rate_table pll_shared2_rate_table[] __initconst = {
+ PLL_35XX_RATE(26 * MHZ, 800000000U, 400, 13, 0),
+};
+
+static const struct samsung_pll_rate_table pll_shared3_rate_table[] __initconst = {
+ PLL_35XX_RATE(26 * MHZ, 630000000U, 315, 13, 0),
+};
+
+static const struct samsung_pll_rate_table pll_shared4_rate_table[] __initconst = {
+ PLL_35XX_RATE(26 * MHZ, 667333333U, 154, 6, 0),
+};
+
+static const struct samsung_pll_clock top_pll_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+ PLL(pll_1051x, CLK_FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON0_PLL_SHARED0,
+ pll_shared0_rate_table),
+ PLL(pll_1051x, CLK_FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON0_PLL_SHARED1,
+ pll_shared1_rate_table),
+ PLL(pll_1052x, CLK_FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED2, PLL_CON0_PLL_SHARED2,
+ pll_shared2_rate_table),
+ PLL(pll_1052x, CLK_FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED3, PLL_CON0_PLL_SHARED3,
+ pll_shared3_rate_table),
+ PLL(pll_1052x, CLK_FOUT_SHARED4_PLL, "fout_shared4_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED4, PLL_CON0_PLL_SHARED4,
+ pll_shared4_rate_table),
+};
+
+/* List of parent clocks for Muxes in CMU_TOP */
+PNAME(mout_pll_shared0_p) = { "oscclk", "fout_shared0_pll" };
+PNAME(mout_pll_shared1_p) = { "oscclk", "fout_shared1_pll" };
+PNAME(mout_pll_shared2_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_pll_shared3_p) = { "oscclk", "fout_shared3_pll" };
+PNAME(mout_pll_shared4_p) = { "oscclk", "fout_shared4_pll" };
+PNAME(mout_cp2ap_mif_clk_user_p) = { "oscclk" };
+PNAME(mout_cmu_abox_cpuabox_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "fout_shared4_pll" };
+PNAME(mout_cmu_apm_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_bus1_bus_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_busc_bus_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_cp2ap_mif_clk_div2",
+ "oscclk", "oscclk", "oscclk" };
+PNAME(mout_cmu_busc_busphsi2c_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_cp2ap_mif_clk_div2",
+ "oscclk" };
+PNAME(mout_cmu_cam_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_cam_tpu0_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_cam_tpu1_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_cam_vra_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_cis_clk0_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk1_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk2_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk3_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_core_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "fout_shared4_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_cp2ap_mif_clk_div2" };
+PNAME(mout_cmu_cpucl0_switch_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "fout_shared4_pll" };
+PNAME(mout_cmu_cpucl1_switch_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "fout_shared4_pll" };
+PNAME(mout_cmu_dbg_bus_p) = { "fout_shared2_pll",
+ "fout_shared4_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4" };
+PNAME(mout_cmu_dcam_bus_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_dcam_imgd_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_dpu_bus_p) = { "dout_cmu_shared0_div2",
+ "fout_shared3_pll",
+ "fout_shared4_pll",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk", "oscclk" };
+PNAME(mout_cmu_droopdetector_p) = { "oscclk", "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll" };
+PNAME(mout_cmu_dsp_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_fsys0_bus_p) = { "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "fout_shared4_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_fsys0_dpgtc_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_fsys0_mmc_embd_p) = { "oscclk", "fout_shared2_pll",
+ "fout_shared4_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "oscclk", "oscclk", "oscclk" };
+PNAME(mout_fsys0_ufs_embd_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_fsys0_usbdrd30_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_fsys1_bus_p) = { "fout_shared2_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_fsys1_mmc_card_p) = { "oscclk", "fout_shared2_pll",
+ "fout_shared4_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "oscclk", "oscclk", "oscclk" };
+PNAME(mout_cmu_fsys1_pcie_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_cmu_fsys1_ufs_card_p) = { "oscclk",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_g2d_g2d_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_g2d_jpeg_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_hpm_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_imem_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_isphq_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_isplp_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_iva_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_mfc_bus_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_mif_switch_p) = { "fout_shared0_pll",
+ "fout_shared1_pll",
+ "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "mout_cp2ap_mif_clk_user",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_peric0_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peric0_uart_dbg_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric0_usi00_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric0_usi01_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric0_usi02_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric0_usi03_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peric1_speedy2_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_peric1_spi_cam0_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_spi_cam1_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_uart_bt_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi04_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi05_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi06_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi07_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi08_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi09_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi10_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi11_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi12_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peric1_usi13_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_peris_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_srdz_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_srdz_imgd_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+PNAME(mout_cmu_vpu_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div2" };
+
+/*
+ * Register name to clock name mangling strategy used in this file
+ *
+ * Replace PLL_CON0_PLL with CLK_MOUT_PLL and mout_pll
+ * Replace CLK_CON_MUX_MUX_CLKCMU with CLK_MOUT_CMU and mout_cmu
+ * Replace CLK_CON_DIV_CLKCMU with CLK_DOUT_CMU and dout_cmu
+ * Replace CLK_CON_DIV_DIV_CLKCMU with CLK_DOUT_CMU and dout_cmu
+ * Replace CLK_CON_GAT_CLKCMU with CLK_GOUT_CMU and gout_cmu
+ * Replace CLK_CON_GAT_GATE_CLKCMU with CLK_GOUT_CMU and gout_cmu
+ *
+ * For gates remove _UID _BLK _IPCLKPORT and _RSTNSYNC
+ */
+
+static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_SHARED0, "mout_pll_shared0", mout_pll_shared0_p,
+ PLL_CON0_PLL_SHARED0, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED1, "mout_pll_shared1", mout_pll_shared1_p,
+ PLL_CON0_PLL_SHARED1, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED2, "mout_pll_shared2", mout_pll_shared2_p,
+ PLL_CON0_PLL_SHARED2, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED3, "mout_pll_shared3", mout_pll_shared3_p,
+ PLL_CON0_PLL_SHARED3, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED4, "mout_pll_shared4", mout_pll_shared4_p,
+ PLL_CON0_PLL_SHARED4, 4, 1),
+ MUX(CLK_MOUT_CP2AP_MIF_CLK_USER, "mout_cp2ap_mif_clk_user",
+ mout_cp2ap_mif_clk_user_p, PLL_CON0_MUX_CP2AP_MIF_CLK_USER, 4, 1),
+ MUX(CLK_MOUT_CMU_ABOX_CPUABOX, "mout_cmu_abox_cpuabox",
+ mout_cmu_abox_cpuabox_p, CLK_CON_MUX_MUX_CLKCMU_ABOX_CPUABOX,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_APM_BUS, "mout_cmu_apm_bus", mout_cmu_apm_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_APM_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_BUS1_BUS, "mout_cmu_bus1_bus", mout_cmu_bus1_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_BUSC_BUS, "mout_cmu_busc_bus", mout_cmu_busc_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_BUSC_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_BUSC_BUSPHSI2C, "mout_cmu_busc_busphsi2c",
+ mout_cmu_busc_busphsi2c_p, CLK_CON_MUX_MUX_CLKCMU_BUSC_BUSPHSI2C,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CAM_BUS, "mout_cmu_cam_bus", mout_cmu_cam_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_CAM_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_CAM_TPU0, "mout_cmu_cam_tpu0", mout_cmu_cam_tpu0_p,
+ CLK_CON_MUX_MUX_CLKCMU_CAM_TPU0, 0, 2),
+ MUX(CLK_MOUT_CMU_CAM_TPU1, "mout_cmu_cam_tpu1", mout_cmu_cam_tpu1_p,
+ CLK_CON_MUX_MUX_CLKCMU_CAM_TPU1, 0, 2),
+ MUX(CLK_MOUT_CMU_CAM_VRA, "mout_cmu_cam_vra", mout_cmu_cam_vra_p,
+ CLK_CON_MUX_MUX_CLKCMU_CAM_VRA, 0, 2),
+ MUX(CLK_MOUT_CMU_CIS_CLK0, "mout_cmu_cis_clk0", mout_cmu_cis_clk0_p,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK1, "mout_cmu_cis_clk1", mout_cmu_cis_clk1_p,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK2, "mout_cmu_cis_clk2", mout_cmu_cis_clk2_p,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK3, "mout_cmu_cis_clk3", mout_cmu_cis_clk3_p,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3, 0, 1),
+ MUX(CLK_MOUT_CMU_CORE_BUS, "mout_core_bus", mout_core_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_CPUCL0_SWITCH, "mout_cmu_cpucl0_switch",
+ mout_cmu_cpucl0_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CPUCL1_SWITCH, "mout_cmu_cpucl1_switch",
+ mout_cmu_cpucl1_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_DBG_BUS, "mout_cmu_dbg_bus", mout_cmu_dbg_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_DBG_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_DCAM_BUS, "mout_cmu_dcam_bus", mout_cmu_dcam_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_DCAM_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_DCAM_IMGD, "mout_cmu_dcam_imgd", mout_cmu_dcam_imgd_p,
+ CLK_CON_MUX_MUX_CLKCMU_DCAM_IMGD, 0, 2),
+ MUX(CLK_MOUT_CMU_DPU_BUS, "mout_cmu_dpu_bus", mout_cmu_dpu_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_DPU_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_DROOPDETECTOR, "mout_cmu_droopdetector",
+ mout_cmu_droopdetector_p, CLK_CON_MUX_MUX_CLKCMU_DROOPDETECTOR,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_DSP_BUS, "mout_cmu_dsp_bus", mout_cmu_dsp_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_DSP_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_FSYS0_BUS, "mout_fsys0_bus", mout_fsys0_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS0_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_FSYS0_DPGTC, "mout_fsys0_dpgtc", mout_fsys0_dpgtc_p,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS0_DPGTC, 0, 2),
+ MUX(CLK_MOUT_CMU_FSYS0_MMC_EMBD, "mout_fsys0_mmc_embd",
+ mout_fsys0_mmc_embd_p, CLK_CON_MUX_MUX_CLKCMU_FSYS0_MMC_EMBD,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_FSYS0_UFS_EMBD, "mout_fsys0_ufs_embd",
+ mout_fsys0_ufs_embd_p, CLK_CON_MUX_MUX_CLKCMU_FSYS0_UFS_EMBD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_FSYS0_USBDRD30, "mout_fsys0_usbdrd30",
+ mout_fsys0_usbdrd30_p, CLK_CON_MUX_MUX_CLKCMU_FSYS0_USBDRD30,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_FSYS1_BUS, "mout_cmu_fsys1_bus", mout_cmu_fsys1_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS1_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_FSYS1_MMC_CARD, "mout_cmu_fsys1_mmc_card",
+ mout_cmu_fsys1_mmc_card_p, CLK_CON_MUX_MUX_CLKCMU_FSYS1_MMC_CARD,
+ 0, 3),
+ MUX(CLK_MOUT_CMU_FSYS1_PCIE, "mout_cmu_fsys1_pcie",
+ mout_cmu_fsys1_pcie_p, CLK_CON_MUX_MUX_CLKCMU_FSYS1_PCIE, 0, 1),
+ MUX(CLK_MOUT_CMU_FSYS1_UFS_CARD, "mout_cmu_fsys1_ufs_card",
+ mout_cmu_fsys1_ufs_card_p, CLK_CON_MUX_MUX_CLKCMU_FSYS1_UFS_CARD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_G2D_G2D, "mout_cmu_g2d_g2d", mout_cmu_g2d_g2d_p,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_G2D, 0, 2),
+ MUX(CLK_MOUT_CMU_G2D_JPEG, "mout_cmu_g2d_jpeg", mout_cmu_g2d_jpeg_p,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_JPEG, 0, 2),
+ MUX(CLK_MOUT_CMU_HPM, "mout_cmu_hpm", mout_cmu_hpm_p,
+ CLK_CON_MUX_MUX_CLKCMU_HPM, 0, 2),
+ MUX(CLK_MOUT_CMU_IMEM_BUS, "mout_cmu_imem_bus", mout_cmu_imem_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_IMEM_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_ISPHQ_BUS, "mout_cmu_isphq_bus", mout_cmu_isphq_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_ISPHQ_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_ISPLP_BUS, "mout_cmu_isplp_bus", mout_cmu_isplp_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_ISPLP_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_IVA_BUS, "mout_cmu_iva_bus", mout_cmu_iva_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_IVA_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_MFC_BUS, "mout_cmu_mfc_bus", mout_cmu_mfc_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_MFC_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_MIF_SWITCH, "mout_cmu_mif_switch",
+ mout_cmu_mif_switch_p, CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH, 0, 3),
+ MUX(CLK_MOUT_CMU_PERIC0_BUS, "mout_cmu_peric0_bus",
+ mout_cmu_peric0_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIC0_UART_DBG, "mout_cmu_peric0_uart_dbg",
+ mout_cmu_peric0_uart_dbg_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_UART_DBG,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC0_USI00, "mout_cmu_peric0_usi00",
+ mout_cmu_peric0_usi00_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI00,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC0_USI01, "mout_cmu_peric0_usi01",
+ mout_cmu_peric0_usi01_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI01,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC0_USI02, "mout_cmu_peric0_usi02",
+ mout_cmu_peric0_usi02_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI02,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC0_USI03, "mout_cmu_peric0_usi03",
+ mout_cmu_peric0_usi03_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_USI03,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_BUS, "mout_cmu_peric1_bus",
+ mout_cmu_peric1_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIC1_SPEEDY2, "mout_cmu_peric1_speedy2",
+ mout_cmu_peric1_speedy2_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPEEDY2,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_SPI_CAM0, "mout_cmu_peric1_spi_cam0",
+ mout_cmu_peric1_spi_cam0_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPI_CAM0,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_SPI_CAM1, "mout_cmu_peric1_spi_cam1",
+ mout_cmu_peric1_spi_cam1_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_SPI_CAM1,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_UART_BT, "mout_cmu_peric1_uart_bt",
+ mout_cmu_peric1_uart_bt_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_UART_BT,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI04, "mout_cmu_peric1_usi04",
+ mout_cmu_peric1_usi04_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI04,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI05, "mout_cmu_peric1_usi05",
+ mout_cmu_peric1_usi05_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI05,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI06, "mout_cmu_peric1_usi06",
+ mout_cmu_peric1_usi06_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI06,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI07, "mout_cmu_peric1_usi07",
+ mout_cmu_peric1_usi07_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI07,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI08, "mout_cmu_peric1_usi08",
+ mout_cmu_peric1_usi08_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI08,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI09, "mout_cmu_peric1_usi09",
+ mout_cmu_peric1_usi09_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI09,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI10, "mout_cmu_peric1_usi10",
+ mout_cmu_peric1_usi10_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI10,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI11, "mout_cmu_peric1_usi11",
+ mout_cmu_peric1_usi11_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI11,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI12, "mout_cmu_peric1_usi12",
+ mout_cmu_peric1_usi12_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI12,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIC1_USI13, "mout_cmu_peric1_usi13",
+ mout_cmu_peric1_usi13_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_USI13,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_PERIS_BUS, "mout_cmu_peris_bus", mout_cmu_peris_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_SRDZ_BUS, "mout_cmu_srdz_bus", mout_cmu_srdz_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_SRDZ_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_SRDZ_IMGD, "mout_cmu_srdz_imgd", mout_cmu_srdz_imgd_p,
+ CLK_CON_MUX_MUX_CLKCMU_SRDZ_IMGD, 0, 2),
+ MUX(CLK_MOUT_CMU_VPU_BUS, "mout_cmu_vpu_bus", mout_cmu_vpu_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_VPU_BUS, 0, 2),
+};
+
+static const struct samsung_div_clock top_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CMU_ABOX_CPUABOX, "dout_cmu_cmu_abox_cpuabox",
+ "gout_cmu_abox_cpuabox", CLK_CON_DIV_CLKCMU_ABOX_CPUABOX, 0, 3),
+ DIV(CLK_DOUT_CMU_APM_BUS, "dout_cmu_apm_bus", "gout_cmu_apm_bus",
+ CLK_CON_DIV_CLKCMU_APM_BUS, 0, 3),
+ DIV(CLK_DOUT_CMU_BUS1_BUS, "dout_cmu_bus1_bus", "gout_cmu_bus1_bus",
+ CLK_CON_DIV_CLKCMU_BUS1_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUSC_BUS, "dout_cmu_busc_bus",
+ "gout_cmu_clkcmu_busc_bus", CLK_CON_DIV_CLKCMU_BUSC_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUSC_BUSPHSI2C, "dout_cmu_busc_busphsi2c",
+ "gout_cmu_busc_busphsi2c", CLK_CON_DIV_CLKCMU_BUSC_BUSPHSI2C,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_CAM_BUS, "dout_cmu_cam_bus", "gout_cmu_cam_bus",
+ CLK_CON_DIV_CLKCMU_CAM_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_CAM_TPU0, "dout_cmu_cam_tpu0", "gout_cmu_cam_tpu0",
+ CLK_CON_DIV_CLKCMU_CAM_TPU0, 0, 4),
+ DIV(CLK_DOUT_CMU_CAM_TPU1, "dout_cmu_cam_tpu1", "gout_cmu_cam_tpu1",
+ CLK_CON_DIV_CLKCMU_CAM_TPU1, 0, 4),
+ DIV(CLK_DOUT_CMU_CAM_VRA, "dout_cmu_cam_vra", "gout_cmu_cam_vra",
+ CLK_CON_DIV_CLKCMU_CAM_VRA, 0, 4),
+ DIV(CLK_DOUT_CMU_CIS_CLK0, "dout_cmu_cis_clk0", "gout_cmu_cis_clk0",
+ CLK_CON_DIV_CLKCMU_CIS_CLK0, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK1, "dout_cmu_cis_clk1", "gout_cmu_cis_clk1",
+ CLK_CON_DIV_CLKCMU_CIS_CLK1, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK2, "dout_cmu_cis_clk2", "gout_cmu_cis_clk2",
+ CLK_CON_DIV_CLKCMU_CIS_CLK2, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK3, "dout_cmu_cis_clk3", "gout_cmu_cis_clk3",
+ CLK_CON_DIV_CLKCMU_CIS_CLK3, 0, 5),
+ DIV(CLK_DOUT_CMU_CORE_BUS, "dout_cmu_core_bus", "gout_cmu_core_bus",
+ CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_CPUCL0_SWITCH, "dout_cmu_cpucl0_switch",
+ "gout_cmu_cpucl0_switch", CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_CPUCL1_SWITCH, "dout_cmu_cpucl1_switch",
+ "gout_cmu_cpucl1_switch", CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_DBG_BUS, "dout_cmu_dbg_bus", "gout_cmu_dbg_bus",
+ CLK_CON_DIV_CLKCMU_DBG_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DCAM_BUS, "dout_cmu_dcam_bus", "gout_cmu_dcam_bus",
+ CLK_CON_DIV_CLKCMU_DCAM_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DCAM_IMGD, "dout_cmu_dcam_imgd", "gout_cmu_dcam_imgd",
+ CLK_CON_DIV_CLKCMU_DCAM_IMGD, 0, 4),
+ DIV(CLK_DOUT_CMU_DPU_BUS, "dout_cmu_dpu_bus", "gout_cmu_dpu_bus",
+ CLK_CON_DIV_CLKCMU_DPU_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DSP_BUS, "dout_cmu_dsp_bus", "gout_cmu_dsp_bus",
+ CLK_CON_DIV_CLKCMU_DSP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_FSYS0_BUS, "dout_cmu_fsys0_bus", "gout_cmu_fsys0_bus",
+ CLK_CON_DIV_CLKCMU_FSYS0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_FSYS0_DPGTC, "dout_cmu_fsys0_dpgtc",
+ "gout_cmu_fsys0_dpgtc", CLK_CON_DIV_CLKCMU_FSYS0_DPGTC, 0, 3),
+ DIV(CLK_DOUT_CMU_FSYS0_MMC_EMBD, "dout_cmu_fsys0_mmc_embd",
+ "gout_cmu_fsys0_mmc_embd", CLK_CON_DIV_CLKCMU_FSYS0_MMC_EMBD,
+ 0, 9),
+ DIV(CLK_DOUT_CMU_FSYS0_UFS_EMBD, "dout_cmu_fsys0_ufs_embd",
+ "gout_cmu_fsys0_ufs_embd", CLK_CON_DIV_CLKCMU_FSYS0_UFS_EMBD,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_FSYS0_USBDRD30, "dout_cmu_fsys0_usbdrd30",
+ "gout_cmu_fsys0_usbdrd30", CLK_CON_DIV_CLKCMU_FSYS0_USBDRD30,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_FSYS1_BUS, "dout_cmu_fsys1_bus", "gout_cmu_fsys1_bus",
+ CLK_CON_DIV_CLKCMU_FSYS1_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_FSYS1_MMC_CARD, "dout_cmu_fsys1_mmc_card",
+ "gout_cmu_fsys1_mmc_card", CLK_CON_DIV_CLKCMU_FSYS1_MMC_CARD,
+ 0, 9),
+ DIV(CLK_DOUT_CMU_FSYS1_UFS_CARD, "dout_cmu_fsys1_ufs_card",
+ "gout_cmu_fsys1_ufs_card", CLK_CON_DIV_CLKCMU_FSYS1_UFS_CARD,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_G2D_G2D, "dout_cmu_g2d_g2d", "gout_cmu_g2d_g2d",
+ CLK_CON_DIV_CLKCMU_G2D_G2D, 0, 4),
+ DIV(CLK_DOUT_CMU_G2D_JPEG, "dout_cmu_g2d_jpeg", "gout_cmu_g2d_jpeg",
+ CLK_CON_DIV_CLKCMU_G2D_JPEG, 0, 4),
+ DIV(CLK_DOUT_CMU_G3D_SWITCH, "dout_cmu_g3d_switch",
+ "gout_cmu_g3d_switch", CLK_CON_DIV_CLKCMU_G3D_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_HPM, "dout_cmu_hpm", "gout_cmu_hpm",
+ CLK_CON_DIV_CLKCMU_HPM, 0, 2),
+ DIV(CLK_DOUT_CMU_IMEM_BUS, "dout_cmu_imem_bus", "gout_cmu_imem_bus",
+ CLK_CON_DIV_CLKCMU_IMEM_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_ISPHQ_BUS, "dout_cmu_isphq_bus", "gout_cmu_isphq_bus",
+ CLK_CON_DIV_CLKCMU_ISPHQ_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_ISPLP_BUS, "dout_cmu_isplp_bus", "gout_cmu_isplp_bus",
+ CLK_CON_DIV_CLKCMU_ISPLP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_IVA_BUS, "dout_cmu_iva_bus", "gout_cmu_iva_bus",
+ CLK_CON_DIV_CLKCMU_IVA_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_MFC_BUS, "dout_cmu_mfc_bus", "gout_cmu_mfc_bus",
+ CLK_CON_DIV_CLKCMU_MFC_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_MODEM_SHARED0, "dout_cmu_modem_shared0",
+ "gout_cmu_modem_shared0", CLK_CON_DIV_CLKCMU_MODEM_SHARED0, 0, 3),
+ DIV(CLK_DOUT_CMU_MODEM_SHARED1, "dout_cmu_modem_shared1",
+ "gout_cmu_modem_shared1", CLK_CON_DIV_CLKCMU_MODEM_SHARED1, 0, 3),
+ DIV(CLK_DOUT_CMU_PERIC0_BUS, "dout_cmu_peric0_bus",
+ "gout_cmu_peric0_bus", CLK_CON_DIV_CLKCMU_PERIC0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_UART_DBG, "dout_cmu_peric0_uart_dbg",
+ "gout_cmu_peric0_uart_dbg", CLK_CON_DIV_CLKCMU_PERIC0_UART_DBG,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_USI00, "dout_cmu_peric0_usi00",
+ "gout_cmu_peric0_usi00", CLK_CON_DIV_CLKCMU_PERIC0_USI00, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_USI01, "dout_cmu_peric0_usi01",
+ "gout_cmu_peric0_usi01", CLK_CON_DIV_CLKCMU_PERIC0_USI01, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_USI02, "dout_cmu_peric0_usi02",
+ "gout_cmu_peric0_usi02", CLK_CON_DIV_CLKCMU_PERIC0_USI02, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_USI03, "dout_cmu_peric0_usi03",
+ "gout_cmu_peric0_usi03", CLK_CON_DIV_CLKCMU_PERIC0_USI03, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_BUS, "dout_cmu_peric1_bus",
+ "gout_cmu_peric1_bus", CLK_CON_DIV_CLKCMU_PERIC1_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_SPEEDY2, "dout_cmu_peric1_speedy2",
+ "gout_cmu_peric1_speedy2", CLK_CON_DIV_CLKCMU_PERIC1_SPEEDY2,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_SPI_CAM0, "dout_cmu_peric1_spi_cam0",
+ "gout_cmu_peric1_spi_cam0", CLK_CON_DIV_CLKCMU_PERIC1_SPI_CAM0,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_SPI_CAM1, "dout_cmu_peric1_spi_cam1",
+ "gout_cmu_peric1_spi_cam1", CLK_CON_DIV_CLKCMU_PERIC1_SPI_CAM1,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_UART_BT, "dout_cmu_peric1_uart_bt",
+ "gout_cmu_peric1_uart_bt", CLK_CON_DIV_CLKCMU_PERIC1_UART_BT,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI04, "dout_cmu_peric1_usi04",
+ "gout_cmu_peric1_usi04", CLK_CON_DIV_CLKCMU_PERIC1_USI04, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI05, "dout_cmu_peric1_usi05",
+ "gout_cmu_peric1_usi05", CLK_CON_DIV_CLKCMU_PERIC1_USI05, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI06, "dout_cmu_peric1_usi06",
+ "gout_cmu_peric1_usi06", CLK_CON_DIV_CLKCMU_PERIC1_USI06, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI07, "dout_cmu_peric1_usi07",
+ "gout_cmu_peric1_usi07", CLK_CON_DIV_CLKCMU_PERIC1_USI07, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI08, "dout_cmu_peric1_usi08",
+ "gout_cmu_peric1_usi08", CLK_CON_DIV_CLKCMU_PERIC1_USI08, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI09, "dout_cmu_peric1_usi09",
+ "gout_cmu_peric1_usi09", CLK_CON_DIV_CLKCMU_PERIC1_USI09, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI10, "dout_cmu_peric1_usi10",
+ "gout_cmu_peric1_usi10", CLK_CON_DIV_CLKCMU_PERIC1_USI10, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI11, "dout_cmu_peric1_usi11",
+ "gout_cmu_peric1_usi11", CLK_CON_DIV_CLKCMU_PERIC1_USI11, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI12, "dout_cmu_peric1_usi12",
+ "gout_cmu_peric1_usi12", CLK_CON_DIV_CLKCMU_PERIC1_USI12, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_USI13, "dout_cmu_peric1_usi13",
+ "gout_cmu_peric1_usi13", CLK_CON_DIV_CLKCMU_PERIC1_USI13, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIS_BUS, "dout_cmu_peris_bus", "gout_cmu_peris_bus",
+ CLK_CON_DIV_CLKCMU_PERIS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_SRDZ_BUS, "dout_cmu_srdz_bus", "gout_cmu_srdz_bus",
+ CLK_CON_DIV_CLKCMU_SRDZ_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_SRDZ_IMGD, "dout_cmu_srdz_imgd", "gout_cmu_srdz_imgd",
+ CLK_CON_DIV_CLKCMU_SRDZ_IMGD, 0, 4),
+ DIV(CLK_DOUT_CMU_VPU_BUS, "dout_cmu_vpu_bus", "gout_cmu_vpu_bus",
+ CLK_CON_DIV_CLKCMU_VPU_BUS, 0, 4),
+};
+
+static const struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initconst = {
+ FFACTOR(CLK_DOUT_CMU_SHARED0_DIV2, "dout_cmu_shared0_div2",
+ "mout_pll_shared0", 1, 2, 0),
+ FFACTOR(CLK_DOUT_CMU_SHARED0_DIV4, "dout_cmu_shared0_div4",
+ "mout_pll_shared0", 1, 4, 0),
+ FFACTOR(CLK_DOUT_CMU_SHARED1_DIV2, "dout_cmu_shared1_div2",
+ "mout_pll_shared1", 1, 2, 0),
+ FFACTOR(CLK_DOUT_CMU_SHARED1_DIV4, "dout_cmu_shared1_div4",
+ "mout_pll_shared1", 1, 4, 0),
+ FFACTOR(CLK_DOUT_CMU_SHARED2_DIV2, "dout_cmu_shared2_div2",
+ "mout_pll_shared2", 1, 2, 0),
+ FFACTOR(CLK_DOUT_CMU_SHARED3_DIV2, "dout_cmu_shared3_div2",
+ "mout_pll_shared3", 1, 2, 0),
+ FFACTOR(CLK_DOUT_CMU_SHARED4_DIV2, "dout_cmu_shared4_div2",
+ "mout_pll_shared4", 1, 2, 0),
+ FFACTOR(CLK_DOUT_CMU_FSYS1_PCIE, "dout_cmu_fsys1_pcie",
+ "gout_cmu_fsys1_pcie", 1, 8, 0),
+ FFACTOR(CLK_DOUT_CMU_CP2AP_MIF_CLK_DIV2, "dout_cmu_cp2ap_mif_clk_div2",
+ "mout_cp2ap_mif_clk_user", 1, 2, 0),
+ FFACTOR(CLK_DOUT_CMU_CMU_OTP, "dout_cmu_cmu_otp", "oscclk", 1, 8, 0),
+};
+
+static const struct samsung_gate_clock top_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CMU_DROOPDETECTOR, "gout_droopdetector",
+ "mout_cmu_droopdetector", CLK_CON_GAT_CLKCMU_DROOPDETECTOR,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MIF_SWITCH, "gout_cmu_mif_switch",
+ "mout_cmu_mif_switch", CLK_CON_GAT_CLKCMU_MIF_SWITCH, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_ABOX_CPUABOX, "gout_cmu_abox_cpuabox",
+ "mout_cmu_abox_cpuabox", CLK_CON_GAT_GATE_CLKCMU_ABOX_CPUABOX,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_APM_BUS, "gout_cmu_apm_bus", "mout_cmu_apm_bus",
+ CLK_CON_GAT_GATE_CLKCMU_APM_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_BUS1_BUS, "gout_cmu_bus1_bus", "mout_cmu_bus1_bus",
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_BUSC_BUS, "gout_cmu_busc_bus", "mout_cmu_busc_bus",
+ CLK_CON_GAT_GATE_CLKCMU_BUSC_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_BUSC_BUSPHSI2C, "gout_cmu_busc_busphsi2c",
+ "mout_cmu_busc_busphsi2c", CLK_CON_GAT_GATE_CLKCMU_BUSC_BUSPHSI2C,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CAM_BUS, "gout_cmu_cam_bus", "mout_cmu_cam_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CAM_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CAM_TPU0, "gout_cmu_cam_tpu0", "mout_cmu_cam_tpu0",
+ CLK_CON_GAT_GATE_CLKCMU_CAM_TPU0, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CAM_TPU1, "gout_cmu_cam_tpu1", "mout_cmu_cam_tpu1",
+ CLK_CON_GAT_GATE_CLKCMU_CAM_TPU1, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CAM_VRA, "gout_cmu_cam_vra", "mout_cmu_cam_vra",
+ CLK_CON_GAT_GATE_CLKCMU_CAM_VRA, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK0, "gout_cmu_cis_clk0", "mout_cmu_cis_clk0",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK1, "gout_cmu_cis_clk1", "mout_cmu_cis_clk1",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK2, "gout_cmu_cis_clk2", "mout_cmu_cis_clk2",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK3, "gout_cmu_cis_clk3", "mout_cmu_cis_clk3",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CORE_BUS, "gout_cmu_core_bus", "mout_core_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL0_SWITCH, "gout_cmu_cpucl0_switch",
+ "mout_cmu_cpucl0_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL1_SWITCH, "gout_cmu_cpucl1_switch",
+ "mout_cmu_cpucl1_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_DBG_BUS, "gout_cmu_dbg_bus", "mout_cmu_dbg_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DBG_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DCAM_BUS, "gout_cmu_dcam_bus", "mout_cmu_dcam_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DCAM_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DCAM_IMGD, "gout_cmu_dcam_imgd",
+ "mout_cmu_dcam_imgd", CLK_CON_GAT_GATE_CLKCMU_DCAM_IMGD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DPU_BUS, "gout_cmu_dpu_bus", "mout_cmu_dpu_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DPU_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_DSP_BUS, "gout_cmu_dsp_bus", "mout_cmu_dsp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DSP_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_FSYS0_BUS, "gout_cmu_fsys0_bus", "mout_fsys0_bus",
+ CLK_CON_GAT_GATE_CLKCMU_FSYS0_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_FSYS0_DPGTC, "gout_cmu_fsys0_dpgtc",
+ "mout_fsys0_dpgtc", CLK_CON_GAT_GATE_CLKCMU_FSYS0_DPGTC,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_FSYS0_MMC_EMBD, "gout_cmu_fsys0_mmc_embd",
+ "mout_fsys0_mmc_embd", CLK_CON_GAT_GATE_CLKCMU_FSYS0_MMC_EMBD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_FSYS0_UFS_EMBD, "gout_cmu_fsys0_ufs_embd",
+ "mout_fsys0_ufs_embd", CLK_CON_GAT_GATE_CLKCMU_FSYS0_UFS_EMBD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_FSYS0_USBDRD30, "gout_cmu_fsys0_usbdrd30",
+ "mout_fsys0_usbdrd30", CLK_CON_GAT_GATE_CLKCMU_FSYS0_USBDRD30,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_FSYS1_BUS, "gout_cmu_fsys1_bus",
+ "mout_cmu_fsys1_bus", CLK_CON_GAT_GATE_CLKCMU_FSYS1_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_FSYS1_MMC_CARD, "gout_cmu_fsys1_mmc_card",
+ "mout_cmu_fsys1_mmc_card", CLK_CON_GAT_GATE_CLKCMU_FSYS1_MMC_CARD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_FSYS1_PCIE, "gout_cmu_fsys1_pcie",
+ "mout_cmu_fsys1_pcie", CLK_CON_GAT_GATE_CLKCMU_FSYS1_PCIE,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_FSYS1_UFS_CARD, "gout_cmu_fsys1_ufs_card",
+ "mout_cmu_fsys1_ufs_card", CLK_CON_GAT_GATE_CLKCMU_FSYS1_UFS_CARD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G2D_G2D, "gout_cmu_g2d_g2d", "mout_cmu_g2d_g2d",
+ CLK_CON_GAT_GATE_CLKCMU_G2D_G2D, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G2D_JPEG, "gout_cmu_g2d_jpeg", "mout_cmu_g2d_jpeg",
+ CLK_CON_GAT_GATE_CLKCMU_G2D_JPEG, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G3D_SWITCH, "gout_cmu_g3d_switch",
+ "fout_shared2_pll", CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HPM, "gout_cmu_hpm", "mout_cmu_hpm",
+ CLK_CON_GAT_GATE_CLKCMU_HPM, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_IMEM_BUS, "gout_cmu_imem_bus", "mout_cmu_imem_bus",
+ CLK_CON_GAT_GATE_CLKCMU_IMEM_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_ISPHQ_BUS, "gout_cmu_isphq_bus",
+ "mout_cmu_isphq_bus", CLK_CON_GAT_GATE_CLKCMU_ISPHQ_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_ISPLP_BUS, "gout_cmu_isplp_bus",
+ "mout_cmu_isplp_bus", CLK_CON_GAT_GATE_CLKCMU_ISPLP_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_IVA_BUS, "gout_cmu_iva_bus", "mout_cmu_iva_bus",
+ CLK_CON_GAT_GATE_CLKCMU_IVA_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MFC_BUS, "gout_cmu_mfc_bus", "mout_cmu_mfc_bus",
+ CLK_CON_GAT_GATE_CLKCMU_MFC_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MODEM_SHARED0, "gout_cmu_modem_shared0",
+ "dout_cmu_shared0_div2", CLK_CON_GAT_GATE_CLKCMU_MODEM_SHARED0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MODEM_SHARED1, "gout_cmu_modem_shared1",
+ "fout_shared2_pll", CLK_CON_GAT_GATE_CLKCMU_MODEM_SHARED1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_BUS, "gout_cmu_peric0_bus",
+ "mout_cmu_peric0_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_UART_DBG, "gout_cmu_peric0_uart_dbg",
+ "mout_cmu_peric0_uart_dbg",
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_UART_DBG, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_USI00, "gout_cmu_peric0_usi00",
+ "mout_cmu_peric0_usi00", CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI00,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_USI01, "gout_cmu_peric0_usi01",
+ "mout_cmu_peric0_usi01", CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI01,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_USI02, "gout_cmu_peric0_usi02",
+ "mout_cmu_peric0_usi02", CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI02,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_USI03, "gout_cmu_peric0_usi03",
+ "mout_cmu_peric0_usi03", CLK_CON_GAT_GATE_CLKCMU_PERIC0_USI03,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_BUS, "gout_cmu_peric1_bus",
+ "mout_cmu_peric1_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_SPEEDY2, "gout_cmu_peric1_speedy2",
+ "mout_cmu_peric1_speedy2", CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPEEDY2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_SPI_CAM0, "gout_cmu_peric1_spi_cam0",
+ "mout_cmu_peric1_spi_cam0",
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPI_CAM0, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_SPI_CAM1, "gout_cmu_peric1_spi_cam1",
+ "mout_cmu_peric1_spi_cam1",
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_SPI_CAM1, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_UART_BT, "gout_cmu_peric1_uart_bt",
+ "mout_cmu_peric1_uart_bt", CLK_CON_GAT_GATE_CLKCMU_PERIC1_UART_BT,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI04, "gout_cmu_peric1_usi04",
+ "mout_cmu_peric1_usi04", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI04,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI05, "gout_cmu_peric1_usi05",
+ "mout_cmu_peric1_usi05", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI05,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI06, "gout_cmu_peric1_usi06",
+ "mout_cmu_peric1_usi06", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI06,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI07, "gout_cmu_peric1_usi07",
+ "mout_cmu_peric1_usi07", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI07,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI08, "gout_cmu_peric1_usi08",
+ "mout_cmu_peric1_usi08", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI08,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI09, "gout_cmu_peric1_usi09",
+ "mout_cmu_peric1_usi09", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI09,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI10, "gout_cmu_peric1_usi10",
+ "mout_cmu_peric1_usi10", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI11, "gout_cmu_peric1_usi11",
+ "mout_cmu_peric1_usi11", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI12, "gout_cmu_peric1_usi12",
+ "mout_cmu_peric1_usi12", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_USI13, "gout_cmu_peric1_usi13",
+ "mout_cmu_peric1_usi13", CLK_CON_GAT_GATE_CLKCMU_PERIC1_USI13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIS_BUS, "gout_cmu_peris_bus",
+ "mout_cmu_peris_bus", CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_SRDZ_BUS, "gout_cmu_srdz_bus", "mout_cmu_srdz_bus",
+ CLK_CON_GAT_GATE_CLKCMU_SRDZ_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_SRDZ_IMGD, "gout_cmu_srdz_imgd",
+ "mout_cmu_srdz_imgd", CLK_CON_GAT_GATE_CLKCMU_SRDZ_IMGD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_VPU_BUS, "gout_cmu_vpu_bus", "mout_cmu_vpu_bus",
+ CLK_CON_GAT_GATE_CLKCMU_VPU_BUS, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info top_cmu_info __initconst = {
+ .pll_clks = top_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(top_pll_clks),
+ .mux_clks = top_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top_mux_clks),
+ .div_clks = top_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .fixed_factor_clks = top_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(top_fixed_factor_clks),
+ .gate_clks = top_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(top_gate_clks),
+ .nr_clk_ids = CLKS_NR_TOP,
+ .clk_regs = top_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top_clk_regs),
+};
+
+static void __init exynos8895_cmu_top_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &top_cmu_info);
+}
+
+/* Register CMU_TOP early, as it's a dependency for other early domains */
+CLK_OF_DECLARE(exynos8895_cmu_top, "samsung,exynos8895-cmu-top",
+ exynos8895_cmu_top_init);
+
+/* ---- CMU_PERIS ----------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIS (0x10010000) */
+#define PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER 0x0100
+#define PLL_CON2_MUX_CLKCMU_PERIS_BUS_USER 0x0108
+#define CLK_CON_MUX_MUX_CLK_PERIS_GIC 0x1000
+#define CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKM 0x2010
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKS 0x2014
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_AXI2APB_PERISP0_IPCLKPORT_ACLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_AXI2APB_PERISP1_IPCLKPORT_ACLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_BUSIF_TMU_IPCLKPORT_PCLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_GIC_IPCLKPORT_CLK 0x2024
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_LHM_AXI_P_PERIS_IPCLKPORT_I_CLK 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK 0x202c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK 0x2034
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_PMU_PERIS_IPCLKPORT_PCLK 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_BUSP_IPCLKPORT_CLK 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK 0x2044
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC00_IPCLKPORT_PCLK 0x2048
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC01_IPCLKPORT_PCLK 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC02_IPCLKPORT_PCLK 0x2050
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC03_IPCLKPORT_PCLK 0x2054
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC04_IPCLKPORT_PCLK 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC05_IPCLKPORT_PCLK 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC06_IPCLKPORT_PCLK 0x2060
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC07_IPCLKPORT_PCLK 0x2064
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC08_IPCLKPORT_PCLK 0x2068
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC09_IPCLKPORT_PCLK 0x206c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC10_IPCLKPORT_PCLK 0x2070
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC11_IPCLKPORT_PCLK 0x2074
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC12_IPCLKPORT_PCLK 0x2078
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC13_IPCLKPORT_PCLK 0x207c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC14_IPCLKPORT_PCLK 0x2080
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC15_IPCLKPORT_PCLK 0x2084
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK 0x2088
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER1_IPCLKPORT_PCLK 0x208c
+#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_XIU_P_PERIS_IPCLKPORT_ACLK 0x2090
+
+static const unsigned long peris_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER,
+ PLL_CON2_MUX_CLKCMU_PERIS_BUS_USER,
+ CLK_CON_MUX_MUX_CLK_PERIS_GIC,
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKM,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKS,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AXI2APB_PERISP0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AXI2APB_PERISP1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_BUSIF_TMU_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_GIC_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_LHM_AXI_P_PERIS_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_PMU_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC00_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC01_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC02_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC03_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC04_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC05_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC06_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC07_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC08_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC09_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC10_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC11_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC12_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC13_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC14_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC15_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_XIU_P_PERIS_IPCLKPORT_ACLK,
+};
+
+/* List of parent clocks for Muxes in CMU_PERIS */
+PNAME(mout_peris_bus_user_p) = { "oscclk", "dout_cmu_peris_bus" };
+PNAME(mout_peris_gic_p) = { "oscclk", "mout_peris_bus_user" };
+
+static const struct samsung_mux_clock peris_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIS_BUS_USER, "mout_peris_bus_user",
+ mout_peris_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_PERIS_GIC, "mout_peris_gic",
+ mout_peris_gic_p, CLK_CON_MUX_MUX_CLK_PERIS_GIC, 0, 5),
+};
+
+static const struct samsung_gate_clock peris_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERIS_CMU_PERIS_PCLK, "gout_peris_cmu_peris_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIS_UID_PERIS_CMU_PERIS_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIS_AD_AXI_P_PERIS_ACLKM,
+ "gout_peris_ad_axi_p_peris_aclkm", "mout_peris_gic",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKM,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIS_AD_AXI_P_PERIS_ACLKS,
+ "gout_peris_ad_axi_p_peris_aclks", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AD_AXI_P_PERIS_IPCLKPORT_ACLKS,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIS_AXI2APB_PERISP0_ACLK,
+ "gout_peris_axi2apb_perisp0_aclk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AXI2APB_PERISP0_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIS_AXI2APB_PERISP1_ACLK,
+ "gout_peris_axi2apb_perisp1_aclk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_AXI2APB_PERISP1_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIS_BUSIF_TMU_PCLK, "gout_peris_busif_tmu_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_BUSIF_TMU_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ /* GIC (interrupt controller) clock must be always running */
+ GATE(CLK_GOUT_PERIS_GIC_CLK, "gout_peris_gic_clk",
+ "mout_peris_gic",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_GIC_IPCLKPORT_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIS_LHM_AXI_P_PERIS_I_CLK,
+ "gout_peris_lhm_axi_p_peris_i_clk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_LHM_AXI_P_PERIS_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIS_MCT_PCLK, "gout_peris_mct_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_MCT_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_OTP_CON_BIRA_PCLK, "gout_peris_otp_con_bira_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_BIRA_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_OTP_CON_TOP_PCLK, "gout_peris_otp_con_top_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_OTP_CON_TOP_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_PMU_PERIS_PCLK, "gout_peris_pmu_peris_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_PMU_PERIS_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_RSTNSYNC_CLK_PERIS_BUSP_CLK,
+ "gout_peris_rstnsync_clk_peris_busp_clk", "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_RSTNSYNC_CLK_PERIS_GIC_CLK,
+ "gout_peris_rstnsync_clk_peris_gic_clk", "mout_peris_gic",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_RSTNSYNC_CLK_PERIS_GIC_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_SYSREG_PERIS_PCLK, "gout_peris_sysreg_peris_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC00_PCLK, "gout_peris_tzpc00_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC00_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC01_PCLK, "gout_peris_tzpc01_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC01_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC02_PCLK, "gout_peris_tzpc02_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC02_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC03_PCLK, "gout_peris_tzpc03_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC03_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC04_PCLK, "gout_peris_tzpc04_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC04_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC05_PCLK, "gout_peris_tzpc05_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC05_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC06_PCLK, "gout_peris_tzpc06_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC06_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC07_PCLK, "gout_peris_tzpc07_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC07_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC08_PCLK, "gout_peris_tzpc08_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC08_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC09_PCLK, "gout_peris_tzpc09_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC09_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC10_PCLK, "gout_peris_tzpc10_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC10_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC11_PCLK, "gout_peris_tzpc11_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC11_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC12_PCLK, "gout_peris_tzpc12_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC12_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC13_PCLK, "gout_peris_tzpc13_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC13_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC14_PCLK, "gout_peris_tzpc14_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC14_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_TZPC15_PCLK, "gout_peris_tzpc15_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_TZPC15_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_WDT_CLUSTER0_PCLK, "gout_peris_wdt_cluster0_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_WDT_CLUSTER1_PCLK, "gout_peris_wdt_cluster1_pclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIS_XIU_P_PERIS_ACLK, "gout_peris_xiu_p_peris_aclk",
+ "mout_peris_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIS_UID_XIU_P_PERIS_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info peris_cmu_info __initconst = {
+ .mux_clks = peris_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peris_mux_clks),
+ .gate_clks = peris_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peris_gate_clks),
+ .nr_clk_ids = CLKS_NR_PERIS,
+ .clk_regs = peris_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peris_clk_regs),
+ .clk_name = "bus",
+};
+
+static void __init exynos8895_cmu_peris_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &peris_cmu_info);
+}
+
+/* Register CMU_PERIS early, as it's needed for MCT timer */
+CLK_OF_DECLARE(exynos8895_cmu_peris, "samsung,exynos8895-cmu-peris",
+ exynos8895_cmu_peris_init);
+
+/* ---- CMU_FSYS0 ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_FSYS0 (0x11000000) */
+#define PLL_CON0_MUX_CLKCMU_FSYS0_BUS_USER 0x0100
+#define PLL_CON2_MUX_CLKCMU_FSYS0_BUS_USER 0x0108
+#define PLL_CON0_MUX_CLKCMU_FSYS0_DPGTC_USER 0x0120
+#define PLL_CON2_MUX_CLKCMU_FSYS0_DPGTC_USER 0x0128
+#define PLL_CON0_MUX_CLKCMU_FSYS0_MMC_EMBD_USER 0x0140
+#define PLL_CON2_MUX_CLKCMU_FSYS0_MMC_EMBD_USER 0x0148
+#define PLL_CON0_MUX_CLKCMU_FSYS0_UFS_EMBD_USER 0x0160
+#define PLL_CON2_MUX_CLKCMU_FSYS0_UFS_EMBD_USER 0x0168
+#define PLL_CON0_MUX_CLKCMU_FSYS0_USBDRD30_USER 0x0180
+#define PLL_CON2_MUX_CLKCMU_FSYS0_USBDRD30_USER 0x0188
+#define CLK_CON_GAT_CLK_BLK_FSYS0_UID_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AHBBR_FSYS0_IPCLKPORT_HCLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2AHB_FSYS0_IPCLKPORT_ACLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2AHB_USB_FSYS0_IPCLKPORT_ACLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2APB_FSYS0_IPCLKPORT_ACLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BTM_FSYS0_IPCLKPORT_I_ACLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BTM_FSYS0_IPCLKPORT_I_PCLK 0x2024
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_DP_LINK_IPCLKPORT_I_GTC_EXT_CLK 0x202c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_DP_LINK_IPCLKPORT_I_PCLK 0x2030
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_ETR_MIU_IPCLKPORT_I_ACLK 0x2034
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_ETR_MIU_IPCLKPORT_I_PCLK 0x2038
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_GPIO_FSYS0_IPCLKPORT_PCLK 0x203c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_D_USBTV_IPCLKPORT_I_CLK 0x2040
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_G_ETR_IPCLKPORT_I_CLK 0x2044
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_P_FSYS0_IPCLKPORT_I_CLK 0x2048
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHS_ACEL_D_FSYS0_IPCLKPORT_I_CLK 0x204c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_MMC_EMBD_IPCLKPORT_I_ACLK 0x2050
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_MMC_EMBD_IPCLKPORT_SDCLKIN 0x2054
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PMU_FSYS0_IPCLKPORT_PCLK 0x2058
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BCM_FSYS0_IPCLKPORT_ACLK 0x205c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BCM_FSYS0_IPCLKPORT_PCLK 0x2060
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_RSTNSYNC_CLK_FSYS0_BUS_IPCLKPORT_CLK 0x2064
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_SYSREG_FSYS0_IPCLKPORT_PCLK 0x2068
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_ACLK 0x206c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO 0x2070
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK 0x2074
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_ACLK 0x2078
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_REF_CLK 0x207c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_SUSPEND_CLK 0x2080
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_AHB_CLK 0x2084
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_CORE_CLK 0x2088
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_XIU_CLK 0x208c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_US_D_FSYS0_USB_IPCLKPORT_ACLK 0x2090
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_D_FSYS0_IPCLKPORT_ACLK 0x2094
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_D_FSYS0_USB_IPCLKPORT_ACLK 0x2098
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_P_FSYS0_IPCLKPORT_ACLK 0x209c
+
+static const unsigned long fsys0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_FSYS0_BUS_USER,
+ PLL_CON2_MUX_CLKCMU_FSYS0_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS0_DPGTC_USER,
+ PLL_CON2_MUX_CLKCMU_FSYS0_DPGTC_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS0_MMC_EMBD_USER,
+ PLL_CON2_MUX_CLKCMU_FSYS0_MMC_EMBD_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS0_UFS_EMBD_USER,
+ PLL_CON2_MUX_CLKCMU_FSYS0_UFS_EMBD_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS0_USBDRD30_USER,
+ PLL_CON2_MUX_CLKCMU_FSYS0_USBDRD30_USER,
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AHBBR_FSYS0_IPCLKPORT_HCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2AHB_FSYS0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2AHB_USB_FSYS0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2APB_FSYS0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BTM_FSYS0_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BTM_FSYS0_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_DP_LINK_IPCLKPORT_I_GTC_EXT_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_DP_LINK_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_ETR_MIU_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_ETR_MIU_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_GPIO_FSYS0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_D_USBTV_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_G_ETR_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_P_FSYS0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHS_ACEL_D_FSYS0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_MMC_EMBD_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_MMC_EMBD_IPCLKPORT_SDCLKIN,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PMU_FSYS0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BCM_FSYS0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BCM_FSYS0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_RSTNSYNC_CLK_FSYS0_BUS_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_SYSREG_FSYS0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_REF_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_SUSPEND_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_AHB_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_CORE_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_XIU_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_US_D_FSYS0_USB_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_D_FSYS0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_D_FSYS0_USB_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_P_FSYS0_IPCLKPORT_ACLK,
+};
+
+/* List of parent clocks for Muxes in CMU_FSYS0 */
+PNAME(mout_fsys0_bus_user_p) = { "oscclk", "dout_cmu_fsys0_bus" };
+PNAME(mout_fsys0_dpgtc_user_p) = { "oscclk", "dout_cmu_fsys0_dpgtc" };
+PNAME(mout_fsys0_mmc_embd_user_p) = { "oscclk",
+ "dout_cmu_fsys0_mmc_embd" };
+PNAME(mout_fsys0_ufs_embd_user_p) = { "oscclk",
+ "dout_cmu_fsys0_ufs_embd" };
+PNAME(mout_fsys0_usbdrd30_user_p) = { "oscclk",
+ "dout_cmu_fsys0_usbdrd30" };
+
+static const struct samsung_mux_clock fsys0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_FSYS0_BUS_USER, "mout_fsys0_bus_user",
+ mout_fsys0_bus_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS0_DPGTC_USER, "mout_fsys0_dpgtc_user",
+ mout_fsys0_dpgtc_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_DPGTC_USER,
+ 4, 1),
+ MUX_F(CLK_MOUT_FSYS0_MMC_EMBD_USER, "mout_fsys0_mmc_embd_user",
+ mout_fsys0_mmc_embd_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_MMC_EMBD_USER,
+ 4, 1, CLK_SET_RATE_PARENT, 0),
+ MUX(CLK_MOUT_FSYS0_UFS_EMBD_USER, "mout_fsys0_ufs_embd_user",
+ mout_fsys0_ufs_embd_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_UFS_EMBD_USER,
+ 4, 1),
+ MUX(CLK_MOUT_FSYS0_USBDRD30_USER, "mout_fsys0_usbdrd30_user",
+ mout_fsys0_usbdrd30_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_USBDRD30_USER,
+ 4, 1),
+};
+
+static const struct samsung_gate_clock fsys0_gate_clks[] __initconst = {
+ /* Disabling this clock makes the system hang. Mark the clock as critical. */
+ GATE(CLK_GOUT_FSYS0_FSYS0_CMU_FSYS0_PCLK,
+ "gout_fsys0_fsys0_cmu_fsys0_pclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS0_AHBBR_FSYS0_HCLK, "gout_fsys0_ahbbr_fsys0_hclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AHBBR_FSYS0_IPCLKPORT_HCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_AXI2AHB_FSYS0_ACLK,
+ "gout_fsys0_axi2ahb_fsys0_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2AHB_FSYS0_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS0_AXI2AHB_USB_FSYS0_ACLK,
+ "gout_fsys0_axi2ahb_usb_fsys0_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2AHB_USB_FSYS0_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS0_AXI2APB_FSYS0_ACLK,
+ "gout_fsys0_axi2apb_fsys0_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_AXI2APB_FSYS0_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS0_BTM_FSYS0_I_ACLK, "gout_fsys0_btm_fsys0_i_aclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BTM_FSYS0_IPCLKPORT_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_BTM_FSYS0_I_PCLK, "gout_fsys0_btm_fsys0_i_pclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BTM_FSYS0_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_DP_LINK_I_GTC_EXT_CLK,
+ "gout_fsys0_dp_link_i_gtc_ext_clk", "mout_fsys0_dpgtc_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_DP_LINK_IPCLKPORT_I_GTC_EXT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_DP_LINK_I_PCLK, "gout_fsys0_dp_link_i_pclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_DP_LINK_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_ETR_MIU_I_ACLK, "gout_fsys0_etr_miu_i_aclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_ETR_MIU_IPCLKPORT_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_ETR_MIU_I_PCLK, "gout_fsys0_etr_miu_i_pclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_ETR_MIU_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_GPIO_FSYS0_PCLK, "gout_fsys0_gpio_fsys0_pclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_GPIO_FSYS0_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_FSYS0_LHM_AXI_D_USBTV_I_CLK,
+ "gout_fsys0_lhm_axi_d_usbtv_i_clk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_D_USBTV_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_LHM_AXI_G_ETR_I_CLK,
+ "gout_fsys0_lhm_axi_g_etr_i_clk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_G_ETR_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_LHM_AXI_P_FSYS0_I_CLK,
+ "gout_fsys0_lhm_axi_p_fsys0_i_clk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHM_AXI_P_FSYS0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS0_LHS_ACEL_D_FSYS0_I_CLK,
+ "gout_fsys0_lhs_acel_d_fsys0_i_clk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_LHS_ACEL_D_FSYS0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS0_MMC_EMBD_I_ACLK, "gout_fsys0_mmc_embd_i_aclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_MMC_EMBD_IPCLKPORT_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_MMC_EMBD_SDCLKIN, "gout_fsys0_mmc_embd_sdclkin",
+ "mout_fsys0_mmc_embd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_MMC_EMBD_IPCLKPORT_SDCLKIN,
+ 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS0_PMU_FSYS0_PCLK, "gout_fsys0_pmu_fsys0_pclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PMU_FSYS0_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_BCM_FSYS0_ACLK, "gout_fsys0_bcm_fsys0_aclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BCM_FSYS0_IPCLKPORT_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_BCM_FSYS0_PCLK, "gout_fsys0_bcm_fsys0_pclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_BCM_FSYS0_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_RSTNSYNC_CLK_FSYS0_BUS_CLK,
+ "gout_fsys0_rstnsync_clk_fsys0_bus_clk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_RSTNSYNC_CLK_FSYS0_BUS_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_SYSREG_FSYS0_PCLK, "gout_fsys0_sysreg_fsys0_pclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_SYSREG_FSYS0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_UFS_EMBD_I_ACLK, "gout_fsys0_ufs_embd_i_aclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_UFS_EMBD_I_CLK_UNIPRO,
+ "gout_fsys0_ufs_embd_i_clk_unipro", "mout_fsys0_ufs_embd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS0_UFS_EMBD_I_FMP_CLK,
+ "gout_fsys0_ufs_embd_i_fmp_clk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_USBTV_I_USB30DRD_ACLK,
+ "gout_fsys0_usbtv_i_usb30drd_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_USBTV_I_USB30DRD_REF_CLK,
+ "gout_fsys0_usbtv_i_usb30drd_ref_clk", "mout_fsys0_usbdrd30_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_REF_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_USBTV_I_USB30DRD_SUSPEND_CLK,
+ "gout_fsys0_usbtv_i_usb30drd_suspend_clk",
+ "mout_fsys0_usbdrd30_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USB30DRD_SUSPEND_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_USBTV_I_USBTVH_AHB_CLK,
+ "gout_fsys0_usbtv_i_usbtvh_ahb_clk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_AHB_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_USBTV_I_USBTVH_CORE_CLK,
+ "gout_fsys0_usbtv_i_usbtvh_core_clk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_CORE_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_USBTV_I_USBTVH_XIU_CLK,
+ "gout_fsys0_usbtv_i_usbtvh_xiu_clk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_USBTV_IPCLKPORT_I_USBTVH_XIU_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_US_D_FSYS0_USB_ACLK,
+ "gout_fsys0_us_d_fsys0_usb_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_US_D_FSYS0_USB_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_XIU_D_FSYS0_ACLK, "gout_fsys0_xiu_d_fsys0_aclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_D_FSYS0_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_FSYS0_XIU_D_FSYS0_USB_ACLK,
+ "gout_fsys0_xiu_d_fsys0_usb_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_D_FSYS0_USB_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_FSYS0_XIU_P_FSYS0_ACLK,
+ "gout_fsys0_xiu_p_fsys0_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_XIU_P_FSYS0_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info fsys0_cmu_info __initconst = {
+ .mux_clks = fsys0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(fsys0_mux_clks),
+ .gate_clks = fsys0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys0_gate_clks),
+ .nr_clk_ids = CLKS_NR_FSYS0,
+ .clk_regs = fsys0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys0_clk_regs),
+ .clk_name = "bus",
+};
+
+/* ---- CMU_FSYS1 ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_FSYS1 (0x11400000) */
+#define PLL_CON0_MUX_CLKCMU_FSYS1_BUS_USER 0x0100
+#define PLL_CON2_MUX_CLKCMU_FSYS1_BUS_USER 0x0108
+#define PLL_CON0_MUX_CLKCMU_FSYS1_MMC_CARD_USER 0x0120
+#define PLL_CON2_MUX_CLKCMU_FSYS1_MMC_CARD_USER 0x0128
+#define PLL_CON0_MUX_CLKCMU_FSYS1_PCIE_USER 0x0140
+#define PLL_CON2_MUX_CLKCMU_FSYS1_PCIE_USER 0x0148
+#define PLL_CON0_MUX_CLKCMU_FSYS1_UFS_CARD_USER 0x0160
+#define PLL_CON2_MUX_CLKCMU_FSYS1_UFS_CARD_USER 0x0168
+#define CLK_CON_GAT_CLK_BLK_FSYS1_UID_PCIE_IPCLKPORT_PHY_REF_CLK_IN 0x2000
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_ADM_AHB_SSS_IPCLKPORT_HCLKM 0x2004
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AHBBR_FSYS1_IPCLKPORT_HCLK 0x2008
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2AHB_FSYS1_IPCLKPORT_ACLK 0x200c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2APB_FSYS1P0_IPCLKPORT_ACLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2APB_FSYS1P1_IPCLKPORT_ACLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BTM_FSYS1_IPCLKPORT_I_ACLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BTM_FSYS1_IPCLKPORT_I_PCLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK 0x2024
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_GPIO_FSYS1_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_LHM_AXI_P_FSYS1_IPCLKPORT_I_CLK 0x202c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_LHS_ACEL_D_FSYS1_IPCLKPORT_I_CLK 0x2030
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_I_ACLK 0x2034
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_SDCLKIN 0x2038
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_DBI_ACLK_0 0x203c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_DBI_ACLK_1 0x2040
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_IEEE1500_WRAPPER_FOR_PCIE_PHY_LC_X2_INST_0_I_SCL_APB_PCLK 0x2044
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_MSTR_ACLK_0 0x2048
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_MSTR_ACLK_1 0x204c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK 0x2050
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PCIE_SUB_CTRL_INST_1_I_DRIVER_APB_CLK 0x2054
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PIPE2_DIGITAL_X2_WRAP_INST_0_I_APB_PCLK_SCL 0x2058
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_SLV_ACLK_0 0x205c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_SLV_ACLK_1 0x2060
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PMU_FSYS1_IPCLKPORT_PCLK 0x2068
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BCM_FSYS1_IPCLKPORT_ACLK 0x206c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BCM_FSYS1_IPCLKPORT_PCLK 0x2070
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RSTNSYNC_CLK_FSYS1_BUS_IPCLKPORT_CLK 0x2074
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RTIC_IPCLKPORT_I_ACLK 0x207c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RTIC_IPCLKPORT_I_PCLK 0x2080
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SSS_IPCLKPORT_I_ACLK 0x2084
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SSS_IPCLKPORT_I_PCLK 0x2088
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SYSREG_FSYS1_IPCLKPORT_PCLK 0x2090
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_TOE_WIFI0_IPCLKPORT_I_CLK 0x2094
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_TOE_WIFI1_IPCLKPORT_I_CLK 0x2098
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_ACLK 0x209c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_CLK_UNIPRO 0x20a0
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_FMP_CLK 0x20a4
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_XIU_D_FSYS1_IPCLKPORT_ACLK 0x20a8
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_XIU_P_FSYS1_IPCLKPORT_ACLK 0x20ac
+
+static const unsigned long fsys1_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_FSYS1_BUS_USER,
+ PLL_CON2_MUX_CLKCMU_FSYS1_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS1_MMC_CARD_USER,
+ PLL_CON2_MUX_CLKCMU_FSYS1_MMC_CARD_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS1_PCIE_USER,
+ PLL_CON2_MUX_CLKCMU_FSYS1_PCIE_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS1_UFS_CARD_USER,
+ PLL_CON2_MUX_CLKCMU_FSYS1_UFS_CARD_USER,
+ CLK_CON_GAT_CLK_BLK_FSYS1_UID_PCIE_IPCLKPORT_PHY_REF_CLK_IN,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_ADM_AHB_SSS_IPCLKPORT_HCLKM,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AHBBR_FSYS1_IPCLKPORT_HCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2AHB_FSYS1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2APB_FSYS1P0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2APB_FSYS1P1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BTM_FSYS1_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BTM_FSYS1_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_GPIO_FSYS1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_LHM_AXI_P_FSYS1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_LHS_ACEL_D_FSYS1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_SDCLKIN,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_DBI_ACLK_0,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_DBI_ACLK_1,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_IEEE1500_WRAPPER_FOR_PCIE_PHY_LC_X2_INST_0_I_SCL_APB_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_MSTR_ACLK_0,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_MSTR_ACLK_1,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PCIE_SUB_CTRL_INST_1_I_DRIVER_APB_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PIPE2_DIGITAL_X2_WRAP_INST_0_I_APB_PCLK_SCL,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_SLV_ACLK_0,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_SLV_ACLK_1,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PMU_FSYS1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BCM_FSYS1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BCM_FSYS1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RSTNSYNC_CLK_FSYS1_BUS_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RTIC_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RTIC_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SSS_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SSS_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SYSREG_FSYS1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_TOE_WIFI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_TOE_WIFI1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_CLK_UNIPRO,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_FMP_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_XIU_D_FSYS1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_XIU_P_FSYS1_IPCLKPORT_ACLK,
+};
+
+/* List of parent clocks for Muxes in CMU_FSYS1 */
+PNAME(mout_fsys1_bus_user_p) = { "oscclk", "dout_cmu_fsys1_bus" };
+PNAME(mout_fsys1_mmc_card_user_p) = { "oscclk",
+ "dout_cmu_fsys1_mmc_card" };
+PNAME(mout_fsys1_pcie_user_p) = { "oscclk", "dout_cmu_fsys1_pcie" };
+PNAME(mout_fsys1_ufs_card_user_p) = { "oscclk",
+ "dout_cmu_fsys1_ufs_card" };
+
+static const struct samsung_mux_clock fsys1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_FSYS1_BUS_USER, "mout_fsys1_bus_user",
+ mout_fsys1_bus_user_p, PLL_CON0_MUX_CLKCMU_FSYS1_BUS_USER, 4, 1),
+ MUX_F(CLK_MOUT_FSYS1_MMC_CARD_USER, "mout_fsys1_mmc_card_user",
+ mout_fsys1_mmc_card_user_p,
+ PLL_CON0_MUX_CLKCMU_FSYS1_MMC_CARD_USER,
+ 4, 1, CLK_SET_RATE_PARENT, 0),
+ MUX(CLK_MOUT_FSYS1_PCIE_USER, "mout_fsys1_pcie_user",
+ mout_fsys1_pcie_user_p, PLL_CON0_MUX_CLKCMU_FSYS1_PCIE_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS1_UFS_CARD_USER, "mout_fsys1_ufs_card_user",
+ mout_fsys1_ufs_card_user_p,
+ PLL_CON0_MUX_CLKCMU_FSYS1_UFS_CARD_USER, 4, 1),
+};
+
+static const struct samsung_gate_clock fsys1_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_FSYS1_PCIE_PHY_REF_CLK_IN,
+ "gout_clk_blk_fsys1_pcie_phy_ref_clk_in", "mout_fsys1_pcie_user",
+ CLK_CON_GAT_CLK_BLK_FSYS1_UID_PCIE_IPCLKPORT_PHY_REF_CLK_IN,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_ADM_AHB_SSS_HCLKM, "gout_fsys1_adm_ahb_sss_hclkm",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_ADM_AHB_SSS_IPCLKPORT_HCLKM,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_AHBBR_FSYS1_HCLK, "gout_fsys1_ahbbr_fsys1_hclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AHBBR_FSYS1_IPCLKPORT_HCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS1_AXI2AHB_FSYS1_ACLK,
+ "gout_fsys1_axi2ahb_fsys1_aclk", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2AHB_FSYS1_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS1_AXI2APB_FSYS1P0_ACLK,
+ "gout_fsys1_axi2apb_fsys1p0_aclk", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2APB_FSYS1P0_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS1_AXI2APB_FSYS1P1_ACLK,
+ "gout_fsys1_axi2apb_fsys1p1_aclk", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_AXI2APB_FSYS1P1_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS1_BTM_FSYS1_I_ACLK, "gout_fsys1_btm_fsys1_i_aclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BTM_FSYS1_IPCLKPORT_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_BTM_FSYS1_I_PCLK, "gout_fsys1_btm_fsys1_i_pclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BTM_FSYS1_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_FSYS1_CMU_FSYS1_PCLK,
+ "gout_fsys1_fsys1_cmu_fsys1_pclk", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS1_GPIO_FSYS1_PCLK, "gout_fsys1_gpio_fsys1_pclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_GPIO_FSYS1_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_FSYS1_LHM_AXI_P_FSYS1_I_CLK,
+ "gout_fsys1_lhm_axi_p_fsys1_i_clk", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_LHM_AXI_P_FSYS1_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS1_LHS_ACEL_D_FSYS1_I_CLK,
+ "gout_fsys1_lhs_acel_d_fsys1_i_clk", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_LHS_ACEL_D_FSYS1_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS1_MMC_CARD_I_ACLK, "gout_fsys1_mmc_card_i_aclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_MMC_CARD_SDCLKIN, "gout_fsys1_mmc_card_sdclkin",
+ "mout_fsys1_mmc_card_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_SDCLKIN,
+ 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_DBI_ACLK_0, "gout_fsys1_pcie_dbi_aclk_0",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_DBI_ACLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_DBI_ACLK_1, "gout_fsys1_pcie_dbi_aclk_1",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_DBI_ACLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_IEEE1500_WRAPPER_FOR_PCIE_PHY_LC_X2_INST_0_I_SCL_APB_PCLK,
+ "gout_fsys1_pcie_ieee1500_wrapper_for_pcie_phy_lc_x2_inst_0_i_scl_apb_pclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_IEEE1500_WRAPPER_FOR_PCIE_PHY_LC_X2_INST_0_I_SCL_APB_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_MSTR_ACLK_0,
+ "gout_fsys1_pcie_mstr_aclk_0", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_MSTR_ACLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_MSTR_ACLK_1,
+ "gout_fsys1_pcie_mstr_aclk_1", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_MSTR_ACLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK,
+ "gout_fsys1_pcie_pcie_sub_ctrl_inst_0_i_driver_apb_clk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_PCIE_SUB_CTRL_INST_1_I_DRIVER_APB_CLK,
+ "gout_fsys1_pcie_pcie_sub_ctrl_inst_1_i_driver_apb_clk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PCIE_SUB_CTRL_INST_1_I_DRIVER_APB_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_PIPE2_DIGITAL_X2_WRAP_INST_0_I_APB_PCLK_SCL,
+ "gout_fsys1_pcie_pipe2_digital_x2_wrap_inst_0_i_apb_pclk_scl",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_PIPE2_DIGITAL_X2_WRAP_INST_0_I_APB_PCLK_SCL,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_SLV_ACLK_0, "gout_fsys1_pcie_slv_aclk_0",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_SLV_ACLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PCIE_SLV_ACLK_1, "gout_fsys1_pcie_slv_aclk_1",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PCIE_IPCLKPORT_SLV_ACLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_PMU_FSYS1_PCLK, "gout_fsys1_pmu_fsys1_pclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_PMU_FSYS1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_BCM_FSYS1_ACLK, "gout_fsys1_bcm_fsys1_aclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BCM_FSYS1_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_BCM_FSYS1_PCLK, "gout_fsys1_bcm_fsys1_pclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_BCM_FSYS1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_RSTNSYNC_CLK_FSYS1_BUS_CLK,
+ "gout_fsys1_rstnsync_clk_fsys1_bus_clk", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RSTNSYNC_CLK_FSYS1_BUS_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_RTIC_I_ACLK, "gout_fsys1_rtic_i_aclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RTIC_IPCLKPORT_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_RTIC_I_PCLK, "gout_fsys1_rtic_i_pclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_RTIC_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_SSS_I_ACLK, "gout_fsys1_sss_i_aclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SSS_IPCLKPORT_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_SSS_I_PCLK, "gout_fsys1_sss_i_pclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SSS_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_SYSREG_FSYS1_PCLK, "gout_fsys1_sysreg_fsys1_pclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_SYSREG_FSYS1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_TOE_WIFI0_I_CLK, "gout_fsys1_toe_wifi0_i_clk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_TOE_WIFI0_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_TOE_WIFI1_I_CLK, "gout_fsys1_toe_wifi1_i_clk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_TOE_WIFI1_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_UFS_CARD_I_ACLK, "gout_fsys1_ufs_card_i_aclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_UFS_CARD_I_CLK_UNIPRO,
+ "gout_fsys1_ufs_card_i_clk_unipro", "mout_fsys1_ufs_card_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_CLK_UNIPRO,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_FSYS1_UFS_CARD_I_FMP_CLK,
+ "gout_fsys1_ufs_card_i_fmp_clk", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_UFS_CARD_IPCLKPORT_I_FMP_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_XIU_D_FSYS1_ACLK, "gout_fsys1_xiu_d_fsys1_aclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_XIU_D_FSYS1_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_FSYS1_XIU_P_FSYS1_ACLK, "gout_fsys1_xiu_p_fsys1_aclk",
+ "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_XIU_P_FSYS1_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info fsys1_cmu_info __initconst = {
+ .mux_clks = fsys1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(fsys1_mux_clks),
+ .gate_clks = fsys1_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys1_gate_clks),
+ .nr_clk_ids = CLKS_NR_FSYS1,
+ .clk_regs = fsys1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys1_clk_regs),
+ .clk_name = "bus",
+};
+
+/* ---- CMU_PERIC0 ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC0 (0x10400000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER 0x0100
+#define PLL_CON2_MUX_CLKCMU_PERIC0_BUS_USER 0x0108
+#define PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG_USER 0x0120
+#define PLL_CON2_MUX_CLKCMU_PERIC0_UART_DBG_USER 0x0128
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USER 0x0140
+#define PLL_CON2_MUX_CLKCMU_PERIC0_USI00_USER 0x0148
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USER 0x0160
+#define PLL_CON2_MUX_CLKCMU_PERIC0_USI01_USER 0x0168
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USER 0x0180
+#define PLL_CON2_MUX_CLKCMU_PERIC0_USI02_USER 0x0188
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USER 0x01a0
+#define PLL_CON2_MUX_CLKCMU_PERIC0_USI03_USER 0x01a8
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_AXI2APB_PERIC0_IPCLKPORT_ACLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PMU_PERIC0_IPCLKPORT_PCLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PWM_IPCLKPORT_I_PCLK_S0 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK 0x202c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SPEEDY2_TSP_IPCLKPORT_CLK 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK 0x2034
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_UART_DBG_IPCLKPORT_EXT_UCLK 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_UART_DBG_IPCLKPORT_PCLK 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI00_IPCLKPORT_I_PCLK 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI00_IPCLKPORT_I_SCLK_USI 0x2044
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI01_IPCLKPORT_I_PCLK 0x2048
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI01_IPCLKPORT_I_SCLK_USI 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI02_IPCLKPORT_I_PCLK 0x2050
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI02_IPCLKPORT_I_SCLK_USI 0x2054
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI03_IPCLKPORT_I_PCLK 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI03_IPCLKPORT_I_SCLK_USI 0x205c
+
+static const unsigned long peric0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC0_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC0_UART_DBG_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC0_USI00_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC0_USI01_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC0_USI02_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC0_USI03_USER,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_AXI2APB_PERIC0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PMU_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PWM_IPCLKPORT_I_PCLK_S0,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SPEEDY2_TSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_UART_DBG_IPCLKPORT_EXT_UCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_UART_DBG_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI00_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI00_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI01_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI01_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI02_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI02_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI03_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI03_IPCLKPORT_I_SCLK_USI,
+};
+
+/* List of parent clocks for Muxes in CMU_PERIC0 */
+PNAME(mout_peric0_bus_user_p) = { "oscclk", "dout_cmu_peric0_bus" };
+PNAME(mout_peric0_uart_dbg_user_p) = { "oscclk",
+ "dout_cmu_peric0_uart_dbg" };
+PNAME(mout_peric0_usi00_user_p) = { "oscclk",
+ "dout_cmu_peric0_usi00" };
+PNAME(mout_peric0_usi01_user_p) = { "oscclk",
+ "dout_cmu_peric0_usi01" };
+PNAME(mout_peric0_usi02_user_p) = { "oscclk",
+ "dout_cmu_peric0_usi02" };
+PNAME(mout_peric0_usi03_user_p) = { "oscclk",
+ "dout_cmu_peric0_usi03" };
+
+static const struct samsung_mux_clock peric0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC0_BUS_USER, "mout_peric0_bus_user",
+ mout_peric0_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_UART_DBG_USER, "mout_peric0_uart_dbg_user",
+ mout_peric0_uart_dbg_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI00_USER, "mout_peric0_usi00_user",
+ mout_peric0_usi00_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI01_USER, "mout_peric0_usi01_user",
+ mout_peric0_usi01_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI02_USER, "mout_peric0_usi02_user",
+ mout_peric0_usi02_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI03_USER, "mout_peric0_usi03_user",
+ mout_peric0_usi03_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USER,
+ 4, 1),
+};
+
+static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERIC0_PERIC0_CMU_PERIC0_PCLK,
+ "gout_cperic0_peric0_cmu_peric0_pclk", "mout_peric0_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC0_AXI2APB_PERIC0_ACLK,
+ "gout_peric0_axi2apb_peric0_aclk", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_AXI2APB_PERIC0_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC0_GPIO_PERIC0_PCLK, "gout_peric0_gpio_peric0_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIC0_LHM_AXI_P_PERIC0_I_CLK,
+ "gout_peric0_lhm_axi_p_peric0_i_clk", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC0_PMU_PERIC0_PCLK, "gout_peric0_pmu_peric0_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PMU_PERIC0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PWM_I_PCLK_S0, "gout_peric0_pwm_i_pclk_s0",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PWM_IPCLKPORT_I_PCLK_S0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_RSTNSYNC_CLK_PERIC0_BUSP_CLK,
+ "gout_peric0_rstnsync_clk_peric0_busp_clk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_SPEEDY2_TSP_CLK, "gout_peric0_speedy2_tsp_clk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SPEEDY2_TSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_SYSREG_PERIC0_PCLK,
+ "gout_peric0_sysreg_peric0_pclk", "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_UART_DBG_EXT_UCLK,
+ "gout_peric0_uart_dbg_ext_uclk", "mout_peric0_uart_dbg_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_UART_DBG_IPCLKPORT_EXT_UCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_UART_DBG_PCLK, "gout_peric0_uart_dbg_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_UART_DBG_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI00_I_PCLK, "gout_peric0_usi00_i_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI00_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI00_I_SCLK_USI, "gout_peric0_usi00_i_sclk_usi",
+ "mout_peric0_usi00_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI00_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI01_I_PCLK, "gout_peric0_usi01_i_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI01_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI01_I_SCLK_USI, "gout_peric0_usi01_i_sclk_usi",
+ "mout_peric0_usi01_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI01_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI02_I_PCLK, "gout_peric0_usi02_i_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI02_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI02_I_SCLK_USI, "gout_peric0_usi02_i_sclk_usi",
+ "mout_peric0_usi02_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI02_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI03_I_PCLK, "gout_peric0_usi03_i_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI03_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI03_I_SCLK_USI, "gout_peric0_usi03_i_sclk_usi",
+ "mout_peric0_usi03_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_USI03_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+};
+
+static const struct samsung_cmu_info peric0_cmu_info __initconst = {
+ .mux_clks = peric0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks),
+ .gate_clks = peric0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric0_gate_clks),
+ .nr_clk_ids = CLKS_NR_PERIC0,
+ .clk_regs = peric0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric0_clk_regs),
+ .clk_name = "bus",
+};
+
+/* ---- CMU_PERIC1 ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC1 (0x10800000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER 0x0100
+#define PLL_CON2_MUX_CLKCMU_PERIC1_BUS_USER 0x0108
+#define PLL_CON0_MUX_CLKCMU_PERIC1_SPEEDY2_USER 0x0120
+#define PLL_CON2_MUX_CLKCMU_PERIC1_SPEEDY2_USER 0x0128
+#define PLL_CON0_MUX_CLKCMU_PERIC1_SPI_CAM0_USER 0x0140
+#define PLL_CON2_MUX_CLKCMU_PERIC1_SPI_CAM0_USER 0x0148
+#define PLL_CON0_MUX_CLKCMU_PERIC1_SPI_CAM1_USER 0x0160
+#define PLL_CON2_MUX_CLKCMU_PERIC1_SPI_CAM1_USER 0x0168
+#define PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER 0x0180
+#define PLL_CON2_MUX_CLKCMU_PERIC1_UART_BT_USER 0x0188
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI04_USER 0x01a0
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI04_USER 0x01a8
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI05_USER 0x01c0
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI05_USER 0x01c8
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USER 0x01e0
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI06_USER 0x01e8
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USER 0x0200
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI07_USER 0x0208
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USER 0x0220
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI08_USER 0x0228
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USER 0x0240
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI09_USER 0x0248
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USER 0x0260
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI10_USER 0x0268
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USER 0x0280
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI11_USER 0x0288
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USER 0x02a0
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI12_USER 0x02a8
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USER 0x02c0
+#define PLL_CON2_MUX_CLKCMU_PERIC1_USI13_USER 0x02c8
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_SPEEDY2_IPCLKPORT_CLK 0x200c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P0_IPCLKPORT_ACLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P1_IPCLKPORT_ACLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P2_IPCLKPORT_ACLK 0x2024
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM0_IPCLKPORT_IPCLK 0x202c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM1_IPCLKPORT_IPCLK 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM2_IPCLKPORT_IPCLK 0x2034
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM3_IPCLKPORT_IPCLK 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PMU_PERIC1_IPCLKPORT_PCLK 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK 0x2044
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI1_IPCLKPORT_CLK 0x2048
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI1_IPCLKPORT_SCLK 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI2_IPCLKPORT_CLK 0x2050
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI2_IPCLKPORT_SCLK 0x2054
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI_IPCLKPORT_CLK 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI_IPCLKPORT_SCLK 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_TSP1_IPCLKPORT_CLK 0x2060
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_TSP2_IPCLKPORT_CLK 0x2064
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM0_IPCLKPORT_PCLK 0x2068
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM0_IPCLKPORT_SPI_EXT_CLK 0x206c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM1_IPCLKPORT_PCLK 0x2070
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM1_IPCLKPORT_SPI_EXT_CLK 0x2074
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK 0x2078
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_UART_BT_IPCLKPORT_EXT_UCLK 0x207c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_UART_BT_IPCLKPORT_PCLK 0x2080
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI04_IPCLKPORT_I_PCLK 0x2084
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI04_IPCLKPORT_I_SCLK_USI 0x2088
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI05_IPCLKPORT_I_PCLK 0x208c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI05_IPCLKPORT_I_SCLK_USI 0x2090
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI06_IPCLKPORT_I_PCLK 0x2094
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI06_IPCLKPORT_I_SCLK_USI 0x2098
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI07_IPCLKPORT_I_PCLK 0x209c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI07_IPCLKPORT_I_SCLK_USI 0x20a0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI08_IPCLKPORT_I_PCLK 0x20a4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI08_IPCLKPORT_I_SCLK_USI 0x20a8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI09_IPCLKPORT_I_PCLK 0x20ac
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI09_IPCLKPORT_I_SCLK_USI 0x20b0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI10_IPCLKPORT_I_PCLK 0x20b4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI10_IPCLKPORT_I_SCLK_USI 0x20b8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI11_IPCLKPORT_I_PCLK 0x20bc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI11_IPCLKPORT_I_SCLK_USI 0x20c0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI12_IPCLKPORT_I_PCLK 0x20c4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI12_IPCLKPORT_I_SCLK_USI 0x20c8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI13_IPCLKPORT_I_PCLK 0x20cc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI13_IPCLKPORT_I_SCLK_USI 0x20d0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK 0x20d4
+
+static const unsigned long peric1_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_SPEEDY2_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_SPEEDY2_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_SPI_CAM0_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_SPI_CAM0_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_SPI_CAM1_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_SPI_CAM1_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_UART_BT_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI04_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI04_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI05_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI05_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI06_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI07_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI08_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI09_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI10_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI11_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI12_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USER,
+ PLL_CON2_MUX_CLKCMU_PERIC1_USI13_USER,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_SPEEDY2_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P0_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P2_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM0_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM1_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM2_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM3_IPCLKPORT_IPCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PMU_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI1_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI1_IPCLKPORT_SCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI2_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI2_IPCLKPORT_SCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI_IPCLKPORT_SCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_TSP1_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_TSP2_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM0_IPCLKPORT_SPI_EXT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM1_IPCLKPORT_SPI_EXT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_UART_BT_IPCLKPORT_EXT_UCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_UART_BT_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI04_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI04_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI05_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI05_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI06_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI06_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI07_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI07_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI08_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI08_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI09_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI09_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI10_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI10_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI11_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI11_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI12_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI12_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI13_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI13_IPCLKPORT_I_SCLK_USI,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK,
+};
+
+/* List of parent clocks for Muxes in CMU_PERIC1 */
+PNAME(mout_peric1_bus_user_p) = { "oscclk", "dout_cmu_peric1_bus" };
+PNAME(mout_peric1_speedy2_user_p) = { "oscclk",
+ "dout_cmu_peric1_speedy2" };
+PNAME(mout_peric1_spi_cam0_user_p) = { "oscclk",
+ "dout_cmu_peric1_spi_cam0" };
+PNAME(mout_peric1_spi_cam1_user_p) = { "oscclk",
+ "dout_cmu_peric1_spi_cam1" };
+PNAME(mout_peric1_uart_bt_user_p) = { "oscclk",
+ "dout_cmu_peric1_uart_bt" };
+PNAME(mout_peric1_usi04_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi04" };
+PNAME(mout_peric1_usi05_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi05" };
+PNAME(mout_peric1_usi06_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi06" };
+PNAME(mout_peric1_usi07_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi07" };
+PNAME(mout_peric1_usi08_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi08" };
+PNAME(mout_peric1_usi09_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi09" };
+PNAME(mout_peric1_usi10_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi10" };
+PNAME(mout_peric1_usi11_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi11" };
+PNAME(mout_peric1_usi12_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi12" };
+PNAME(mout_peric1_usi13_user_p) = { "oscclk",
+ "dout_cmu_peric1_usi13" };
+
+static const struct samsung_mux_clock peric1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC1_BUS_USER, "mout_peric1_bus_user",
+ mout_peric1_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_SPEEDY2_USER, "mout_peric1_speedy2_user",
+ mout_peric1_speedy2_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_SPEEDY2_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_SPI_CAM0_USER, "mout_peric1_spi_cam0_user",
+ mout_peric1_spi_cam0_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_SPI_CAM0_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_SPI_CAM1_USER, "mout_peric1_spi_cam1_user",
+ mout_peric1_spi_cam1_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_SPI_CAM1_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_UART_BT_USER, "mout_peric1_uart_bt_user",
+ mout_peric1_uart_bt_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI04_USER, "mout_peric1_usi04_user",
+ mout_peric1_usi04_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI04_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI05_USER, "mout_peric1_usi05_user",
+ mout_peric1_usi05_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI05_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI06_USER, "mout_peric1_usi06_user",
+ mout_peric1_usi06_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI07_USER, "mout_peric1_usi07_user",
+ mout_peric1_usi07_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI08_USER, "mout_peric1_usi08_user",
+ mout_peric1_usi08_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI09_USER, "mout_peric1_usi09_user",
+ mout_peric1_usi09_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI10_USER, "mout_peric1_usi10_user",
+ mout_peric1_usi10_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI11_USER, "mout_peric1_usi11_user",
+ mout_peric1_usi11_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI12_USER, "mout_peric1_usi12_user",
+ mout_peric1_usi12_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI13_USER, "mout_peric1_usi13_user",
+ mout_peric1_usi13_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USER,
+ 4, 1),
+};
+
+static const struct samsung_gate_clock peric1_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERIC1_PERIC1_CMU_PERIC1_PCLK,
+ "gout_peric1_peric1_cmu_peric1_pclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_RSTNSYNC_CLK_PERIC1_SPEEDY2_CLK,
+ "gout_peric1_rstnsync_clk_peric1_speedy2_clk",
+ "mout_peric1_speedy2_user",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_SPEEDY2_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_AXI2APB_PERIC1P0_ACLK,
+ "gout_peric1_axi2apb_peric1p0_aclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P0_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_AXI2APB_PERIC1P1_ACLK,
+ "gout_peric1_axi2apb_peric1p1_aclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P1_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_AXI2APB_PERIC1P2_ACLK,
+ "gout_peric1_axi2apb_peric1p2_aclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_AXI2APB_PERIC1P2_IPCLKPORT_ACLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_GPIO_PERIC1_PCLK,
+ "gout_peric1_gpio_peric1_pclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIC1_HSI2C_CAM0_IPCLK, "gout_peric1_hsi2c_cam0_ipclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM0_IPCLKPORT_IPCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_HSI2C_CAM1_IPCLK,
+ "gout_peric1_hsi2c_cam1_ipclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM1_IPCLKPORT_IPCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_HSI2C_CAM2_IPCLK,
+ "gout_peric1_hsi2c_cam2_ipclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM2_IPCLKPORT_IPCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_HSI2C_CAM3_IPCLK, "gout_peric1_hsi2c_cam3_ipclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_HSI2C_CAM3_IPCLKPORT_IPCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_LHM_AXI_P_PERIC1_I_CLK,
+ "gout_peric1_lhm_axi_p_peric1_i_clk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_PMU_PERIC1_PCLK, "gout_peric1_pmu_peric1_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PMU_PERIC1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_RSTNSYNC_CLK_PERIC1_BUSP_CLK,
+ "gout_peric1_rstnsync_clk_peric1_busp_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPEEDY2_DDI1_CLK, "gout_peric1_speedy2_ddi1_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI1_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPEEDY2_DDI1_SCLK,
+ "gout_peric1_speedy2_ddi1_sclk", "mout_peric1_speedy2_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI1_IPCLKPORT_SCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPEEDY2_DDI2_CLK, "gout_peric1_speedy2_ddi2_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI2_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPEEDY2_DDI2_SCLK,
+ "gout_peric1_speedy2_ddi2_sclk", "mout_peric1_speedy2_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI2_IPCLKPORT_SCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPEEDY2_DDI_CLK, "gout_peric1_speedy2_ddi_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPEEDY2_DDI_SCLK, "gout_peric1_speedy2_ddi_sclk",
+ "mout_peric1_speedy2_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_DDI_IPCLKPORT_SCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPEEDY2_TSP1_CLK, "gout_peric1_speedy2_tsp1_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_TSP1_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPEEDY2_TSP2_CLK, "gout_peric1_speedy2_tsp2_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPEEDY2_TSP2_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPI_CAM0_PCLK, "gout_peric1_spi_cam0_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPI_CAM0_SPI_EXT_CLK,
+ "gout_peric1_spi_cam0_spi_ext_clk", "mout_peric1_spi_cam0_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM0_IPCLKPORT_SPI_EXT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPI_CAM1_PCLK, "gout_peric1_spi_cam1_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SPI_CAM1_SPI_EXT_CLK,
+ "gout_peric1_spi_cam1_spi_ext_clk", "mout_peric1_spi_cam1_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SPI_CAM1_IPCLKPORT_SPI_EXT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SYSREG_PERIC1_PCLK,
+ "gout_peric1_sysreg_peric1_pclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_UART_BT_EXT_UCLK, "gout_peric1_uart_bt_ext_uclk",
+ "mout_peric1_uart_bt_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_UART_BT_IPCLKPORT_EXT_UCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_UART_BT_PCLK, "gout_peric1_uart_bt_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_UART_BT_IPCLKPORT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI04_I_PCLK, "gout_peric1_usi04_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI04_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI04_I_SCLK_USI, "gout_peric1_usi04_i_sclk_usi",
+ "mout_peric1_usi04_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI04_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI05_I_PCLK, "gout_peric1_usi05_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI05_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI05_I_SCLK_USI, "gout_peric1_usi05_i_sclk_usi",
+ "mout_peric1_usi05_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI05_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI06_I_PCLK, "gout_peric1_usi06_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI06_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI06_I_SCLK_USI, "gout_peric1_usi06_i_sclk_usi",
+ "mout_peric1_usi06_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI06_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI07_I_PCLK, "gout_peric1_usi07_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI07_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI07_I_SCLK_USI, "gout_peric1_usi07_i_sclk_usi",
+ "mout_peric1_usi07_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI07_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI08_I_PCLK, "gout_peric1_usi08_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI08_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI08_I_SCLK_USI, "gout_peric1_usi08_i_sclk_usi",
+ "mout_peric1_usi08_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI08_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI09_I_PCLK, "gout_peric1_usi09_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI09_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI09_I_SCLK_USI, "gout_peric1_usi09_i_sclk_usi",
+ "mout_peric1_usi09_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI09_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI10_I_PCLK, "gout_peric1_usi10_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI10_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI10_I_SCLK_USI, "gout_peric1_usi10_i_sclk_usi",
+ "mout_peric1_usi10_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI10_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI11_I_PCLK, "gout_peric1_usi11_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI11_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI11_I_SCLK_USI, "gout_peric1_usi11_i_sclk_usi",
+ "mout_peric1_usi11_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI11_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI12_I_PCLK, "gout_peric1_usi12_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI12_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI12_I_SCLK_USI, "gout_peric1_usi12_i_sclk_usi",
+ "mout_peric1_usi12_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI12_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI13_I_PCLK, "gout_peric1_usi13_i_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI13_IPCLKPORT_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI13_I_SCLK_USI, "gout_peric1_usi13_i_sclk_usi",
+ "mout_peric1_usi13_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI13_IPCLKPORT_I_SCLK_USI,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_XIU_P_PERIC1_ACLK,
+ "gout_peric1_xiu_p_peric1_aclk", "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info peric1_cmu_info __initconst = {
+ .mux_clks = peric1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks),
+ .gate_clks = peric1_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric1_gate_clks),
+ .nr_clk_ids = CLKS_NR_PERIC1,
+ .clk_regs = peric1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric1_clk_regs),
+ .clk_name = "bus",
+};
+
+static int __init exynos8895_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id exynos8895_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynos8895-cmu-fsys0",
+ .data = &fsys0_cmu_info,
+ }, {
+ .compatible = "samsung,exynos8895-cmu-fsys1",
+ .data = &fsys1_cmu_info,
+ }, {
+ .compatible = "samsung,exynos8895-cmu-peric0",
+ .data = &peric0_cmu_info,
+ }, {
+ .compatible = "samsung,exynos8895-cmu-peric1",
+ .data = &peric1_cmu_info,
+ },
+ { }
+};
+
+static struct platform_driver exynos8895_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynos8895-cmu",
+ .of_match_table = exynos8895_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynos8895_cmu_probe,
+};
+
+static int __init exynos8895_cmu_init(void)
+{
+ return platform_driver_register(&exynos8895_cmu_driver);
+}
+core_initcall(exynos8895_cmu_init);
diff --git a/drivers/clk/samsung/clk-exynos990.c b/drivers/clk/samsung/clk-exynos990.c
new file mode 100644
index 000000000000..8e2a2e8eccee
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos990.c
@@ -0,0 +1,1343 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Igor Belwon <igor.belwon@mentallysanemainliners.org>
+ *
+ * Common Clock Framework support for Exynos990.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/samsung,exynos990.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+#include "clk-pll.h"
+
+/* NOTE: Must be equal to the last clock ID increased by one */
+#define CLKS_NR_TOP (CLK_GOUT_CMU_VRA_BUS + 1)
+#define CLKS_NR_HSI0 (CLK_GOUT_HSI0_XIU_D_HSI0_ACLK + 1)
+
+/* ---- CMU_TOP ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_TOP (0x1a330000) */
+#define PLL_LOCKTIME_PLL_G3D 0x0000
+#define PLL_LOCKTIME_PLL_MMC 0x0004
+#define PLL_LOCKTIME_PLL_SHARED0 0x0008
+#define PLL_LOCKTIME_PLL_SHARED1 0x000c
+#define PLL_LOCKTIME_PLL_SHARED2 0x0010
+#define PLL_LOCKTIME_PLL_SHARED3 0x0014
+#define PLL_LOCKTIME_PLL_SHARED4 0x0018
+#define PLL_CON0_PLL_G3D 0x0100
+#define PLL_CON3_PLL_G3D 0x010c
+#define PLL_CON0_PLL_MMC 0x0140
+#define PLL_CON3_PLL_MMC 0x014c
+#define PLL_CON0_PLL_SHARED0 0x0180
+#define PLL_CON3_PLL_SHARED0 0x018c
+#define PLL_CON0_PLL_SHARED1 0x01c0
+#define PLL_CON3_PLL_SHARED1 0x01cc
+#define PLL_CON0_PLL_SHARED2 0x0200
+#define PLL_CON3_PLL_SHARED2 0x020c
+#define PLL_CON0_PLL_SHARED3 0x0240
+#define PLL_CON3_PLL_SHARED3 0x024c
+#define PLL_CON0_PLL_SHARED4 0x0280
+#define PLL_CON3_PLL_SHARED4 0x028c
+#define CLK_CON_MUX_MUX_CLKCMU_APM_BUS 0x1004
+#define CLK_CON_MUX_MUX_CLKCMU_AUD_CPU 0x1008
+#define CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS 0x100c
+#define CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS 0x1010
+#define CLK_CON_MUX_MUX_CLKCMU_BUS1_SSS 0x1014
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0 0x1018
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1 0x101c
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2 0x1020
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3 0x1024
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4 0x1028
+#define CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5 0x102c
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST 0x1030
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1034
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG_BUS 0x1038
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH 0x103c
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH 0x1040
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL2_BUSP 0x1044
+#define CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH 0x1048
+#define CLK_CON_MUX_MUX_CLKCMU_CSIS_BUS 0x104c
+#define CLK_CON_MUX_MUX_CLKCMU_CSIS_OIS_MCU 0x1050
+#define CLK_CON_MUX_MUX_CLKCMU_DNC_BUS 0x1054
+#define CLK_CON_MUX_MUX_CLKCMU_DNC_BUSM 0x1058
+#define CLK_CON_MUX_MUX_CLKCMU_DNS_BUS 0x105c
+#define CLK_CON_MUX_MUX_CLKCMU_DPU 0x1060
+#define CLK_CON_MUX_MUX_CLKCMU_DPU_ALT 0x1064
+#define CLK_CON_MUX_MUX_CLKCMU_DSP_BUS 0x1068
+#define CLK_CON_MUX_MUX_CLKCMU_G2D_G2D 0x106c
+#define CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL 0x1070
+#define CLK_CON_MUX_MUX_CLKCMU_HPM 0x1074
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_BUS 0x1078
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC 0x107c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_USB31DRD 0x1080
+#define CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG 0x1084
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS 0x1088
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD 0x108c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE 0x1090
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_CARD 0x1094
+#define CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_EMBD 0x1098
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS 0x109c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI2_PCIE 0x10a0
+#define CLK_CON_MUX_MUX_CLKCMU_IPP_BUS 0x10a4
+#define CLK_CON_MUX_MUX_CLKCMU_ITP_BUS 0x10a8
+#define CLK_CON_MUX_MUX_CLKCMU_MCSC_BUS 0x10ac
+#define CLK_CON_MUX_MUX_CLKCMU_MCSC_GDC 0x10b0
+#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CPU 0x10b4
+#define CLK_CON_MUX_MUX_CLKCMU_MFC0_MFC0 0x10b8
+#define CLK_CON_MUX_MUX_CLKCMU_MFC0_WFD 0x10bc
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP 0x10c0
+#define CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH 0x10c4
+#define CLK_CON_MUX_MUX_CLKCMU_NPU_BUS 0x10c8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS 0x10cc
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP 0x10d0
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS 0x10d4
+#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP 0x10d8
+#define CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS 0x10dc
+#define CLK_CON_MUX_MUX_CLKCMU_SSP_BUS 0x10e0
+#define CLK_CON_MUX_MUX_CLKCMU_TNR_BUS 0x10e4
+#define CLK_CON_MUX_MUX_CLKCMU_VRA_BUS 0x10e8
+#define CLK_CON_DIV_CLKCMU_APM_BUS 0x1800
+#define CLK_CON_DIV_CLKCMU_AUD_CPU 0x1804
+#define CLK_CON_DIV_CLKCMU_BUS0_BUS 0x1808
+#define CLK_CON_DIV_CLKCMU_BUS1_BUS 0x180c
+#define CLK_CON_DIV_CLKCMU_BUS1_SSS 0x1810
+#define CLK_CON_DIV_CLKCMU_CIS_CLK0 0x1814
+#define CLK_CON_DIV_CLKCMU_CIS_CLK1 0x1818
+#define CLK_CON_DIV_CLKCMU_CIS_CLK2 0x181c
+#define CLK_CON_DIV_CLKCMU_CIS_CLK3 0x1820
+#define CLK_CON_DIV_CLKCMU_CIS_CLK4 0x1824
+#define CLK_CON_DIV_CLKCMU_CIS_CLK5 0x1828
+#define CLK_CON_DIV_CLKCMU_CMU_BOOST 0x182c
+#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x1830
+#define CLK_CON_DIV_CLKCMU_CPUCL0_DBG_BUS 0x1834
+#define CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH 0x1838
+#define CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH 0x183c
+#define CLK_CON_DIV_CLKCMU_CPUCL2_BUSP 0x1840
+#define CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH 0x1844
+#define CLK_CON_DIV_CLKCMU_CSIS_BUS 0x1848
+#define CLK_CON_DIV_CLKCMU_CSIS_OIS_MCU 0x184c
+#define CLK_CON_DIV_CLKCMU_DNC_BUS 0x1850
+#define CLK_CON_DIV_CLKCMU_DNC_BUSM 0x1854
+#define CLK_CON_DIV_CLKCMU_DNS_BUS 0x1858
+#define CLK_CON_DIV_CLKCMU_DSP_BUS 0x185c
+#define CLK_CON_DIV_CLKCMU_G2D_G2D 0x1860
+#define CLK_CON_DIV_CLKCMU_G2D_MSCL 0x1864
+#define CLK_CON_DIV_CLKCMU_G3D_SWITCH 0x1868
+#define CLK_CON_DIV_CLKCMU_HPM 0x186c
+#define CLK_CON_DIV_CLKCMU_HSI0_BUS 0x1870
+#define CLK_CON_DIV_CLKCMU_HSI0_DPGTC 0x1874
+#define CLK_CON_DIV_CLKCMU_HSI0_USB31DRD 0x1878
+#define CLK_CON_DIV_CLKCMU_HSI0_USBDP_DEBUG 0x187c
+#define CLK_CON_DIV_CLKCMU_HSI1_BUS 0x1880
+#define CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD 0x1884
+#define CLK_CON_DIV_CLKCMU_HSI1_PCIE 0x1888
+#define CLK_CON_DIV_CLKCMU_HSI1_UFS_CARD 0x188c
+#define CLK_CON_DIV_CLKCMU_HSI1_UFS_EMBD 0x1890
+#define CLK_CON_DIV_CLKCMU_HSI2_BUS 0x1894
+#define CLK_CON_DIV_CLKCMU_HSI2_PCIE 0x1898
+#define CLK_CON_DIV_CLKCMU_IPP_BUS 0x189c
+#define CLK_CON_DIV_CLKCMU_ITP_BUS 0x18a0
+#define CLK_CON_DIV_CLKCMU_MCSC_BUS 0x18a4
+#define CLK_CON_DIV_CLKCMU_MCSC_GDC 0x18a8
+#define CLK_CON_DIV_CLKCMU_CMU_BOOST_CPU 0x18ac
+#define CLK_CON_DIV_CLKCMU_MFC0_MFC0 0x18b0
+#define CLK_CON_DIV_CLKCMU_MFC0_WFD 0x18b4
+#define CLK_CON_DIV_CLKCMU_MIF_BUSP 0x18b8
+#define CLK_CON_DIV_CLKCMU_NPU_BUS 0x18bc
+#define CLK_CON_DIV_CLKCMU_OTP 0x18c0
+#define CLK_CON_DIV_CLKCMU_PERIC0_BUS 0x18c4
+#define CLK_CON_DIV_CLKCMU_PERIC0_IP 0x18c8
+#define CLK_CON_DIV_CLKCMU_PERIC1_BUS 0x18cc
+#define CLK_CON_DIV_CLKCMU_PERIC1_IP 0x18d0
+#define CLK_CON_DIV_CLKCMU_PERIS_BUS 0x18d4
+#define CLK_CON_DIV_CLKCMU_SSP_BUS 0x18d8
+#define CLK_CON_DIV_CLKCMU_TNR_BUS 0x18dc
+#define CLK_CON_DIV_CLKCMU_VRA_BUS 0x18e0
+#define CLK_CON_DIV_DIV_CLKCMU_DPU 0x18e8
+#define CLK_CON_DIV_DIV_CLKCMU_DPU_ALT 0x18ec
+#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x18f4
+#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x18f8
+#define CLK_CON_DIV_PLL_SHARED0_DIV4 0x18fc
+#define CLK_CON_DIV_PLL_SHARED1_DIV2 0x1900
+#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x1904
+#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x1908
+#define CLK_CON_DIV_PLL_SHARED2_DIV2 0x190c
+#define CLK_CON_DIV_PLL_SHARED4_DIV2 0x1910
+#define CLK_CON_DIV_PLL_SHARED4_DIV3 0x1914
+#define CLK_CON_DIV_PLL_SHARED4_DIV4 0x1918
+#define CLK_CON_GAT_CLKCMU_G3D_BUS 0x2000
+#define CLK_CON_GAT_CLKCMU_MIF_SWITCH 0x2004
+#define CLK_CON_GAT_GATE_CLKCMU_APM_BUS 0x2008
+#define CLK_CON_GAT_GATE_CLKCMU_AUD_CPU 0x200c
+#define CLK_CON_GAT_GATE_CLKCMU_BUS0_BUS 0x2010
+#define CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS 0x2014
+#define CLK_CON_GAT_GATE_CLKCMU_BUS1_SSS 0x2018
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0 0x201c
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1 0x2020
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2 0x2024
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3 0x2028
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4 0x202c
+#define CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5 0x2030
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x2034
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_BUS 0x2038
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH 0x203c
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH 0x2040
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL2_BUSP 0x2044
+#define CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH 0x2048
+#define CLK_CON_GAT_GATE_CLKCMU_CSIS_BUS 0x204c
+#define CLK_CON_GAT_GATE_CLKCMU_CSIS_OIS_MCU 0x2050
+#define CLK_CON_GAT_GATE_CLKCMU_DNC_BUS 0x2054
+#define CLK_CON_GAT_GATE_CLKCMU_DNC_BUSM 0x2058
+#define CLK_CON_GAT_GATE_CLKCMU_DNS_BUS 0x205c
+#define CLK_CON_GAT_GATE_CLKCMU_DPU 0x2060
+#define CLK_CON_GAT_GATE_CLKCMU_DPU_BUS 0x2064
+#define CLK_CON_GAT_GATE_CLKCMU_DSP_BUS 0x2068
+#define CLK_CON_GAT_GATE_CLKCMU_G2D_G2D 0x206c
+#define CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL 0x2070
+#define CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH 0x2074
+#define CLK_CON_GAT_GATE_CLKCMU_HPM 0x2078
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_BUS 0x207c
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC 0x2080
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_USB31DRD 0x2084
+#define CLK_CON_GAT_GATE_CLKCMU_HSI0_USBDP_DEBUG 0x2088
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_BUS 0x208c
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_MMC_CARD 0x2090
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE 0x2094
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_CARD 0x2098
+#define CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_EMBD 0x209c
+#define CLK_CON_GAT_GATE_CLKCMU_HSI2_BUS 0x20a0
+#define CLK_CON_GAT_GATE_CLKCMU_HSI2_PCIE 0x20a4
+#define CLK_CON_GAT_GATE_CLKCMU_IPP_BUS 0x20a8
+#define CLK_CON_GAT_GATE_CLKCMU_ITP_BUS 0x20ac
+#define CLK_CON_GAT_GATE_CLKCMU_MCSC_BUS 0x20b0
+#define CLK_CON_GAT_GATE_CLKCMU_MCSC_GDC 0x20b4
+#define CLK_CON_GAT_GATE_CLKCMU_MFC0_MFC0 0x20bc
+#define CLK_CON_GAT_GATE_CLKCMU_MFC0_WFD 0x20c0
+#define CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP 0x20c4
+#define CLK_CON_GAT_GATE_CLKCMU_NPU_BUS 0x20c8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS 0x20cc
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP 0x20d0
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS 0x20d4
+#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP 0x20d8
+#define CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS 0x20dc
+#define CLK_CON_GAT_GATE_CLKCMU_SSP_BUS 0x20e0
+#define CLK_CON_GAT_GATE_CLKCMU_TNR_BUS 0x20e4
+#define CLK_CON_GAT_GATE_CLKCMU_VRA_BUS 0x20e8
+
+static const unsigned long top_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_G3D,
+ PLL_LOCKTIME_PLL_MMC,
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_LOCKTIME_PLL_SHARED2,
+ PLL_LOCKTIME_PLL_SHARED3,
+ PLL_LOCKTIME_PLL_SHARED4,
+ PLL_CON3_PLL_G3D,
+ PLL_CON3_PLL_MMC,
+ PLL_CON3_PLL_SHARED0,
+ PLL_CON3_PLL_SHARED1,
+ PLL_CON3_PLL_SHARED2,
+ PLL_CON3_PLL_SHARED3,
+ PLL_CON3_PLL_SHARED4,
+ CLK_CON_MUX_MUX_CLKCMU_APM_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_AUD_CPU,
+ CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_BUS1_SSS,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4,
+ CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL2_BUSP,
+ CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_CSIS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CSIS_OIS_MCU,
+ CLK_CON_MUX_MUX_CLKCMU_DNC_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DNC_BUSM,
+ CLK_CON_MUX_MUX_CLKCMU_DNS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_DPU,
+ CLK_CON_MUX_MUX_CLKCMU_DPU_ALT,
+ CLK_CON_MUX_MUX_CLKCMU_DSP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_G2D,
+ CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL,
+ CLK_CON_MUX_MUX_CLKCMU_HPM,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USB31DRD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_EMBD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI2_PCIE,
+ CLK_CON_MUX_MUX_CLKCMU_IPP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_ITP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_MCSC_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_MCSC_GDC,
+ CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CPU,
+ CLK_CON_MUX_MUX_CLKCMU_MFC0_MFC0,
+ CLK_CON_MUX_MUX_CLKCMU_MFC0_WFD,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP,
+ CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH,
+ CLK_CON_MUX_MUX_CLKCMU_NPU_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP,
+ CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_SSP_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_TNR_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_VRA_BUS,
+ CLK_CON_DIV_CLKCMU_APM_BUS,
+ CLK_CON_DIV_CLKCMU_AUD_CPU,
+ CLK_CON_DIV_CLKCMU_BUS0_BUS,
+ CLK_CON_DIV_CLKCMU_BUS1_BUS,
+ CLK_CON_DIV_CLKCMU_BUS1_SSS,
+ CLK_CON_DIV_CLKCMU_CIS_CLK0,
+ CLK_CON_DIV_CLKCMU_CIS_CLK1,
+ CLK_CON_DIV_CLKCMU_CIS_CLK2,
+ CLK_CON_DIV_CLKCMU_CIS_CLK3,
+ CLK_CON_DIV_CLKCMU_CIS_CLK4,
+ CLK_CON_DIV_CLKCMU_CIS_CLK5,
+ CLK_CON_DIV_CLKCMU_CMU_BOOST,
+ CLK_CON_DIV_CLKCMU_CORE_BUS,
+ CLK_CON_DIV_CLKCMU_CPUCL0_DBG_BUS,
+ CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_DIV_CLKCMU_CPUCL2_BUSP,
+ CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_DIV_CLKCMU_CSIS_BUS,
+ CLK_CON_DIV_CLKCMU_CSIS_OIS_MCU,
+ CLK_CON_DIV_CLKCMU_DNC_BUS,
+ CLK_CON_DIV_CLKCMU_DNC_BUSM,
+ CLK_CON_DIV_CLKCMU_DNS_BUS,
+ CLK_CON_DIV_CLKCMU_DSP_BUS,
+ CLK_CON_DIV_CLKCMU_G2D_G2D,
+ CLK_CON_DIV_CLKCMU_G2D_MSCL,
+ CLK_CON_DIV_CLKCMU_G3D_SWITCH,
+ CLK_CON_DIV_CLKCMU_HPM,
+ CLK_CON_DIV_CLKCMU_HSI0_BUS,
+ CLK_CON_DIV_CLKCMU_HSI0_DPGTC,
+ CLK_CON_DIV_CLKCMU_HSI0_USB31DRD,
+ CLK_CON_DIV_CLKCMU_HSI0_USBDP_DEBUG,
+ CLK_CON_DIV_CLKCMU_HSI1_BUS,
+ CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD,
+ CLK_CON_DIV_CLKCMU_HSI1_PCIE,
+ CLK_CON_DIV_CLKCMU_HSI1_UFS_CARD,
+ CLK_CON_DIV_CLKCMU_HSI1_UFS_EMBD,
+ CLK_CON_DIV_CLKCMU_HSI2_BUS,
+ CLK_CON_DIV_CLKCMU_HSI2_PCIE,
+ CLK_CON_DIV_CLKCMU_IPP_BUS,
+ CLK_CON_DIV_CLKCMU_ITP_BUS,
+ CLK_CON_DIV_CLKCMU_MCSC_BUS,
+ CLK_CON_DIV_CLKCMU_MCSC_GDC,
+ CLK_CON_DIV_CLKCMU_CMU_BOOST_CPU,
+ CLK_CON_DIV_CLKCMU_MFC0_MFC0,
+ CLK_CON_DIV_CLKCMU_MFC0_WFD,
+ CLK_CON_DIV_CLKCMU_MIF_BUSP,
+ CLK_CON_DIV_CLKCMU_NPU_BUS,
+ CLK_CON_DIV_CLKCMU_OTP,
+ CLK_CON_DIV_CLKCMU_PERIC0_BUS,
+ CLK_CON_DIV_CLKCMU_PERIC0_IP,
+ CLK_CON_DIV_CLKCMU_PERIC1_BUS,
+ CLK_CON_DIV_CLKCMU_PERIC1_IP,
+ CLK_CON_DIV_CLKCMU_PERIS_BUS,
+ CLK_CON_DIV_CLKCMU_SSP_BUS,
+ CLK_CON_DIV_CLKCMU_TNR_BUS,
+ CLK_CON_DIV_CLKCMU_VRA_BUS,
+ CLK_CON_DIV_DIV_CLKCMU_DPU,
+ CLK_CON_DIV_DIV_CLKCMU_DPU_ALT,
+ CLK_CON_DIV_PLL_SHARED0_DIV2,
+ CLK_CON_DIV_PLL_SHARED0_DIV3,
+ CLK_CON_DIV_PLL_SHARED0_DIV4,
+ CLK_CON_DIV_PLL_SHARED1_DIV2,
+ CLK_CON_DIV_PLL_SHARED1_DIV3,
+ CLK_CON_DIV_PLL_SHARED1_DIV4,
+ CLK_CON_DIV_PLL_SHARED2_DIV2,
+ CLK_CON_DIV_PLL_SHARED4_DIV2,
+ CLK_CON_DIV_PLL_SHARED4_DIV3,
+ CLK_CON_DIV_PLL_SHARED4_DIV4,
+ CLK_CON_GAT_CLKCMU_G3D_BUS,
+ CLK_CON_GAT_CLKCMU_MIF_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_APM_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_AUD_CPU,
+ CLK_CON_GAT_GATE_CLKCMU_BUS0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_SSS,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4,
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL2_BUSP,
+ CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_OIS_MCU,
+ CLK_CON_GAT_GATE_CLKCMU_DNC_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DNC_BUSM,
+ CLK_CON_GAT_GATE_CLKCMU_DNS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DPU,
+ CLK_CON_GAT_GATE_CLKCMU_DPU_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_DSP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_G2D_G2D,
+ CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL,
+ CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH,
+ CLK_CON_GAT_GATE_CLKCMU_HPM,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_USB31DRD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI0_USBDP_DEBUG,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_MMC_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_EMBD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI2_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_HSI2_PCIE,
+ CLK_CON_GAT_GATE_CLKCMU_IPP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_ITP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_GDC,
+ CLK_CON_GAT_GATE_CLKCMU_MFC0_MFC0,
+ CLK_CON_GAT_GATE_CLKCMU_MFC0_WFD,
+ CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP,
+ CLK_CON_GAT_GATE_CLKCMU_NPU_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP,
+ CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_SSP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_TNR_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_VRA_BUS,
+};
+
+static const struct samsung_pll_clock top_pll_clks[] __initconst = {
+ PLL(pll_0717x, CLK_FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0, NULL),
+ PLL(pll_0717x, CLK_FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1, NULL),
+ PLL(pll_0718x, CLK_FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED2, PLL_CON3_PLL_SHARED2, NULL),
+ PLL(pll_0718x, CLK_FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED3, PLL_CON3_PLL_SHARED3, NULL),
+ PLL(pll_0717x, CLK_FOUT_SHARED4_PLL, "fout_shared4_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED4, PLL_CON3_PLL_SHARED4, NULL),
+ PLL(pll_0732x, CLK_FOUT_MMC_PLL, "fout_mmc_pll", "oscclk",
+ PLL_LOCKTIME_PLL_MMC, PLL_CON3_PLL_MMC, NULL),
+ PLL(pll_0718x, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "oscclk",
+ PLL_LOCKTIME_PLL_G3D, PLL_CON3_PLL_G3D, NULL),
+};
+
+/* Parent clock list for CMU_TOP muxes*/
+PNAME(mout_pll_shared0_p) = { "oscclk", "fout_shared0_pll" };
+PNAME(mout_pll_shared1_p) = { "oscclk", "fout_shared1_pll" };
+PNAME(mout_pll_shared2_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_pll_shared3_p) = { "oscclk", "fout_shared3_pll" };
+PNAME(mout_pll_shared4_p) = { "oscclk", "fout_shared4_pll" };
+PNAME(mout_pll_mmc_p) = { "oscclk", "fout_mmc_pll" };
+PNAME(mout_pll_g3d_p) = { "oscclk", "fout_g3d_pll" };
+PNAME(mout_cmu_apm_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_aud_cpu_p) = { "dout_cmu_shared0_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_bus0_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_bus1_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_bus1_sss_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_cis_clk0_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk1_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk2_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk3_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk4_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cis_clk5_p) = { "oscclk",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cmu_boost_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_core_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div3",
+ "dout_cmu_shared1_div3",
+ "dout_cmu_shared0_div4",
+ "fout_shared3_pll", "oscclk" };
+PNAME(mout_cmu_cpucl0_dbg_bus_p) = { "fout_shared2_pll",
+ "dout_cmu_shared0_div3",
+ "dout_cmu_shared0_div4",
+ "oscclk" };
+PNAME(mout_cmu_cpucl0_switch_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_cpucl1_switch_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_cpucl2_busp_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_cpucl2_switch_p) = { "fout_shared4_pll",
+ "dout_cmu_shared0_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_csis_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3" };
+PNAME(mout_cmu_csis_ois_mcu_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_dnc_bus_p) = { "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_dnc_busm_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div4" };
+PNAME(mout_cmu_dns_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_dpu_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_dpu_alt_p) = { "dout_cmu_shared4_div2",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_dsp_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared4_div2",
+ "fout_shared3_pll", "oscclk",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_g2d_g2d_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_g2d_mscl_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "dout_cmu_shared4_div4",
+ "oscclk" };
+PNAME(mout_cmu_hpm_p) = { "oscclk",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi0_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_hsi0_dpgtc_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi0_usb31drd_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi0_usbdp_debug_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_cmu_hsi1_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "fout_mmc_pll", "oscclk", "oscclk" };
+PNAME(mout_cmu_hsi1_mmc_card_p) = { "oscclk", "fout_shared2_pll",
+ "fout_mmc_pll",
+ "dout_cmu_shared0_div4" };
+PNAME(mout_cmu_hsi1_pcie_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_cmu_hsi1_ufs_card_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi1_ufs_embd_p) = { "oscclk", "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_hsi2_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_hsi2_pcie_p) = { "oscclk", "fout_shared2_pll" };
+PNAME(mout_cmu_ipp_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "oscclk", "oscclk", "oscclk" };
+PNAME(mout_cmu_itp_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mcsc_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_mcsc_gdc_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_cmu_boost_cpu_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_mfc0_mfc0_p) = { "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_mfc0_wfd_p) = { "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_mif_busp_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_mif_switch_p) = { "fout_shared0_pll",
+ "fout_shared1_pll",
+ "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
+PNAME(mout_cmu_npu_bus_p) = { "dout_cmu_shared0_div2",
+ "dout_cmu_shared1_div2",
+ "fout_shared2_pll",
+ "dout_cmu_shared4_div2",
+ "fout_shared3_pll", "oscclk",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_peric0_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peric0_ip_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peric1_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peric1_ip_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_peris_bus_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_ssp_bus_p) = { "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2" };
+PNAME(mout_cmu_tnr_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared4_div3",
+ "dout_cmu_shared2_div2",
+ "oscclk", "oscclk" };
+PNAME(mout_cmu_vra_bus_p) = { "dout_cmu_shared0_div3",
+ "dout_cmu_shared4_div2",
+ "dout_cmu_shared0_div4",
+ "dout_cmu_shared4_div3" };
+
+/*
+ * Register name to clock name mangling strategy used in this file
+ *
+ * Replace PLL_CON{0,3}_PLL with CLK_MOUT_PLL and mout_pll
+ * Replace CLK_CON_MUX_MUX_CLKCMU with CLK_MOUT_CMU and mout_cmu
+ * Replace CLK_CON_DIV_CLKCMU with CLK_DOUT_CMU_CMU and dout_cmu_cmu
+ * Replace CLK_CON_DIV_DIV_CLKCMU with CLK_DOUT_CMU_CMU and dout_cmu_cmu
+ * Replace CLK_CON_DIV_PLL_CLKCMU with CLK_DOUT_CMU_CMU and dout_cmu_cmu
+ * Replace CLK_CON_GAT_CLKCMU with CLK_GOUT_CMU and gout_cmu
+ * Replace CLK_CON_GAT_GATE_CLKCMU with CLK_GOUT_CMU and gout_cmu
+ *
+ * For gates remove _UID _BLK _IPCLKPORT, _I and _RSTNSYNC
+ */
+
+static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PLL_SHARED0, "mout_pll_shared0", mout_pll_shared0_p,
+ PLL_CON3_PLL_SHARED0, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED1, "mout_pll_shared1", mout_pll_shared1_p,
+ PLL_CON3_PLL_SHARED1, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED2, "mout_pll_shared2", mout_pll_shared2_p,
+ PLL_CON3_PLL_SHARED2, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED3, "mout_pll_shared3", mout_pll_shared3_p,
+ PLL_CON3_PLL_SHARED3, 4, 1),
+ MUX(CLK_MOUT_PLL_SHARED4, "mout_pll_shared4", mout_pll_shared4_p,
+ PLL_CON0_PLL_SHARED4, 4, 1),
+ MUX(CLK_MOUT_PLL_MMC, "mout_pll_mmc", mout_pll_mmc_p,
+ PLL_CON0_PLL_MMC, 4, 1),
+ MUX(CLK_MOUT_PLL_G3D, "mout_pll_g3d", mout_pll_g3d_p,
+ PLL_CON0_PLL_G3D, 4, 1),
+ MUX(CLK_MOUT_CMU_APM_BUS, "mout_cmu_apm_bus",
+ mout_cmu_apm_bus_p, CLK_CON_MUX_MUX_CLKCMU_APM_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_AUD_CPU, "mout_cmu_aud_cpu",
+ mout_cmu_aud_cpu_p, CLK_CON_MUX_MUX_CLKCMU_AUD_CPU, 0, 2),
+ MUX(CLK_MOUT_CMU_BUS0_BUS, "mout_cmu_bus0_bus",
+ mout_cmu_bus0_bus_p, CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_BUS1_BUS, "mout_cmu_bus1_bus",
+ mout_cmu_bus1_bus_p, CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_BUS1_SSS, "mout_cmu_bus1_sss",
+ mout_cmu_bus1_sss_p, CLK_CON_MUX_MUX_CLKCMU_BUS1_SSS, 0, 2),
+ MUX(CLK_MOUT_CMU_CIS_CLK0, "mout_cmu_cis_clk0",
+ mout_cmu_cis_clk0_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK0, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK1, "mout_cmu_cis_clk1",
+ mout_cmu_cis_clk1_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK1, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK2, "mout_cmu_cis_clk2",
+ mout_cmu_cis_clk2_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK2, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK3, "mout_cmu_cis_clk3",
+ mout_cmu_cis_clk3_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK3, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK4, "mout_cmu_cis_clk4",
+ mout_cmu_cis_clk4_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK4, 0, 1),
+ MUX(CLK_MOUT_CMU_CIS_CLK5, "mout_cmu_cis_clk5",
+ mout_cmu_cis_clk5_p, CLK_CON_MUX_MUX_CLKCMU_CIS_CLK5, 0, 1),
+ MUX(CLK_MOUT_CMU_CMU_BOOST, "mout_cmu_cmu_boost",
+ mout_cmu_cmu_boost_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST, 0, 2),
+ MUX(CLK_MOUT_CMU_CORE_BUS, "mout_cmu_core_bus",
+ mout_cmu_core_bus_p, CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_CPUCL0_DBG_BUS, "mout_cmu_cpucl0_dbg_bus",
+ mout_cmu_cpucl0_dbg_bus_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_DBG_BUS,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CPUCL0_SWITCH, "mout_cmu_cpucl0_switch",
+ mout_cmu_cpucl0_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CPUCL1_SWITCH, "mout_cmu_cpucl1_switch",
+ mout_cmu_cpucl1_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CPUCL2_BUSP, "mout_cmu_cpucl2_busp",
+ mout_cmu_cpucl2_busp_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL2_BUSP,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_CPUCL2_SWITCH, "mout_cmu_cpucl2_switch",
+ mout_cmu_cpucl2_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL2_SWITCH,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_CSIS_BUS, "mout_cmu_csis_bus",
+ mout_cmu_csis_bus_p, CLK_CON_MUX_MUX_CLKCMU_CSIS_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_CSIS_OIS_MCU, "mout_cmu_csis_ois_mcu",
+ mout_cmu_csis_ois_mcu_p, CLK_CON_MUX_MUX_CLKCMU_CSIS_OIS_MCU,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_DNC_BUS, "mout_cmu_dnc_bus",
+ mout_cmu_dnc_bus_p, CLK_CON_MUX_MUX_CLKCMU_DNC_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_DNC_BUSM, "mout_cmu_dnc_busm",
+ mout_cmu_dnc_busm_p, CLK_CON_MUX_MUX_CLKCMU_DNC_BUSM, 0, 2),
+ MUX(CLK_MOUT_CMU_DNS_BUS, "mout_cmu_dns_bus",
+ mout_cmu_dns_bus_p, CLK_CON_MUX_MUX_CLKCMU_DNS_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_DPU, "mout_cmu_dpu",
+ mout_cmu_dpu_p, CLK_CON_MUX_MUX_CLKCMU_DPU, 0, 1),
+ MUX(CLK_MOUT_CMU_DPU_ALT, "mout_cmu_dpu_alt",
+ mout_cmu_dpu_alt_p, CLK_CON_MUX_MUX_CLKCMU_DPU_ALT, 0, 2),
+ MUX(CLK_MOUT_CMU_DSP_BUS, "mout_cmu_dsp_bus",
+ mout_cmu_dsp_bus_p, CLK_CON_MUX_MUX_CLKCMU_DSP_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_G2D_G2D, "mout_cmu_g2d_g2d",
+ mout_cmu_g2d_g2d_p, CLK_CON_MUX_MUX_CLKCMU_G2D_G2D, 0, 2),
+ MUX(CLK_MOUT_CMU_G2D_MSCL, "mout_cmu_g2d_mscl",
+ mout_cmu_g2d_mscl_p, CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL, 0, 1),
+ MUX(CLK_MOUT_CMU_HPM, "mout_cmu_hpm",
+ mout_cmu_hpm_p, CLK_CON_MUX_MUX_CLKCMU_HPM, 0, 2),
+ MUX(CLK_MOUT_CMU_HSI0_BUS, "mout_cmu_hsi0_bus",
+ mout_cmu_hsi0_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_HSI0_DPGTC, "mout_cmu_hsi0_dpgtc",
+ mout_cmu_hsi0_dpgtc_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_DPGTC, 0, 2),
+ MUX(CLK_MOUT_CMU_HSI0_USB31DRD, "mout_cmu_hsi0_usb31drd",
+ mout_cmu_hsi0_usb31drd_p, CLK_CON_MUX_MUX_CLKCMU_HSI0_USB31DRD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_HSI0_USBDP_DEBUG, "mout_cmu_hsi0_usbdp_debug",
+ mout_cmu_hsi0_usbdp_debug_p,
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG, 0, 2),
+ MUX(CLK_MOUT_CMU_HSI1_BUS, "mout_cmu_hsi1_bus",
+ mout_cmu_hsi1_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_HSI1_MMC_CARD, "mout_cmu_hsi1_mmc_card",
+ mout_cmu_hsi1_mmc_card_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_MMC_CARD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_HSI1_PCIE, "mout_cmu_hsi1_pcie",
+ mout_cmu_hsi1_pcie_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_PCIE, 0, 1),
+ MUX(CLK_MOUT_CMU_HSI1_UFS_CARD, "mout_cmu_hsi1_ufs_card",
+ mout_cmu_hsi1_ufs_card_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_CARD,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_HSI1_UFS_EMBD, "mout_cmu_hsi1_ufs_embd",
+ mout_cmu_hsi1_ufs_embd_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_EMBD,
+ 0, 1),
+ MUX(CLK_MOUT_CMU_HSI2_BUS, "mout_cmu_hsi2_bus",
+ mout_cmu_hsi2_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_HSI2_PCIE, "mout_cmu_hsi2_pcie",
+ mout_cmu_hsi2_pcie_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_PCIE, 0, 1),
+ MUX(CLK_MOUT_CMU_IPP_BUS, "mout_cmu_ipp_bus",
+ mout_cmu_ipp_bus_p, CLK_CON_MUX_MUX_CLKCMU_IPP_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_ITP_BUS, "mout_cmu_itp_bus",
+ mout_cmu_itp_bus_p, CLK_CON_MUX_MUX_CLKCMU_ITP_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_MCSC_BUS, "mout_cmu_mcsc_bus",
+ mout_cmu_mcsc_bus_p, CLK_CON_MUX_MUX_CLKCMU_MCSC_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_MCSC_GDC, "mout_cmu_mcsc_gdc",
+ mout_cmu_mcsc_gdc_p, CLK_CON_MUX_MUX_CLKCMU_MCSC_GDC, 0, 3),
+ MUX(CLK_MOUT_CMU_CMU_BOOST_CPU, "mout_cmu_cmu_boost_cpu",
+ mout_cmu_cmu_boost_cpu_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST_CPU,
+ 0, 2),
+ MUX(CLK_MOUT_CMU_MFC0_MFC0, "mout_cmu_mfc0_mfc0",
+ mout_cmu_mfc0_mfc0_p, CLK_CON_MUX_MUX_CLKCMU_MFC0_MFC0, 0, 2),
+ MUX(CLK_MOUT_CMU_MFC0_WFD, "mout_cmu_mfc0_wfd",
+ mout_cmu_mfc0_wfd_p, CLK_CON_MUX_MUX_CLKCMU_MFC0_WFD, 0, 2),
+ MUX(CLK_MOUT_CMU_MIF_BUSP, "mout_cmu_mif_busp",
+ mout_cmu_mif_busp_p, CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP, 0, 2),
+ MUX(CLK_MOUT_CMU_MIF_SWITCH, "mout_cmu_mif_switch",
+ mout_cmu_mif_switch_p, CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH, 0, 3),
+ MUX(CLK_MOUT_CMU_NPU_BUS, "mout_cmu_npu_bus",
+ mout_cmu_npu_bus_p, CLK_CON_MUX_MUX_CLKCMU_NPU_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_PERIC0_BUS, "mout_cmu_peric0_bus",
+ mout_cmu_peric0_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIC0_IP, "mout_cmu_peric0_ip",
+ mout_cmu_peric0_ip_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIC1_BUS, "mout_cmu_peric1_bus",
+ mout_cmu_peric1_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIC1_IP, "mout_cmu_peric1_ip",
+ mout_cmu_peric1_ip_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP, 0, 1),
+ MUX(CLK_MOUT_CMU_PERIS_BUS, "mout_cmu_peris_bus",
+ mout_cmu_peris_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS, 0, 1),
+ MUX(CLK_MOUT_CMU_SSP_BUS, "mout_cmu_ssp_bus",
+ mout_cmu_ssp_bus_p, CLK_CON_MUX_MUX_CLKCMU_SSP_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_TNR_BUS, "mout_cmu_tnr_bus",
+ mout_cmu_tnr_bus_p, CLK_CON_MUX_MUX_CLKCMU_TNR_BUS, 0, 3),
+ MUX(CLK_MOUT_CMU_VRA_BUS, "mout_cmu_vra_bus",
+ mout_cmu_vra_bus_p, CLK_CON_MUX_MUX_CLKCMU_VRA_BUS, 0, 2),
+};
+
+static const struct samsung_div_clock top_div_clks[] __initconst = {
+ /* SHARED0 region*/
+ DIV(CLK_DOUT_CMU_SHARED0_DIV2, "dout_cmu_shared0_div2", "mout_pll_shared0",
+ CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
+ DIV(CLK_DOUT_CMU_SHARED0_DIV3, "dout_cmu_shared0_div3", "mout_pll_shared0",
+ CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
+ DIV(CLK_DOUT_CMU_SHARED0_DIV4, "dout_cmu_shared0_div4", "dout_cmu_shared0_div2",
+ CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1),
+
+ /* SHARED1 region*/
+ DIV(CLK_DOUT_CMU_SHARED1_DIV2, "dout_cmu_shared1_div2", "mout_pll_shared1",
+ CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
+ DIV(CLK_DOUT_CMU_SHARED1_DIV3, "dout_cmu_shared1_div3", "mout_pll_shared1",
+ CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
+ DIV(CLK_DOUT_CMU_SHARED1_DIV4, "dout_cmu_shared1_div4", "dout_cmu_shared1_div2",
+ CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
+
+ /* SHARED2 region */
+ DIV(CLK_DOUT_CMU_SHARED2_DIV2, "dout_cmu_shared2_div2", "mout_pll_shared2",
+ CLK_CON_DIV_PLL_SHARED2_DIV2, 0, 1),
+
+ /* SHARED4 region*/
+ DIV(CLK_DOUT_CMU_SHARED4_DIV2, "dout_cmu_shared4_div2", "mout_pll_shared4",
+ CLK_CON_DIV_PLL_SHARED4_DIV2, 0, 1),
+ DIV(CLK_DOUT_CMU_SHARED4_DIV3, "dout_cmu_shared4_div3", "mout_pll_shared4",
+ CLK_CON_DIV_PLL_SHARED4_DIV3, 0, 2),
+ DIV(CLK_DOUT_CMU_SHARED4_DIV4, "dout_cmu_shared4_div4", "mout_pll_shared4",
+ CLK_CON_DIV_PLL_SHARED4_DIV4, 0, 1),
+
+ DIV(CLK_DOUT_CMU_APM_BUS, "dout_cmu_apm_bus", "gout_cmu_apm_bus",
+ CLK_CON_DIV_CLKCMU_APM_BUS, 0, 3),
+ DIV(CLK_DOUT_CMU_AUD_CPU, "dout_cmu_aud_cpu", "gout_cmu_aud_cpu",
+ CLK_CON_DIV_CLKCMU_AUD_CPU, 0, 3),
+ DIV(CLK_DOUT_CMU_BUS0_BUS, "dout_cmu_bus0_bus", "gout_cmu_bus0_bus",
+ CLK_CON_DIV_CLKCMU_BUS0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUS1_BUS, "dout_cmu_bus1_bus", "gout_cmu_bus1_bus",
+ CLK_CON_DIV_CLKCMU_BUS1_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUS1_SSS, "dout_cmu_bus1_sss", "gout_cmu_bus1_sss",
+ CLK_CON_DIV_CLKCMU_BUS1_SSS, 0, 4),
+ DIV(CLK_DOUT_CMU_CIS_CLK0, "dout_cmu_cis_clk0", "gout_cmu_cis_clk0",
+ CLK_CON_DIV_CLKCMU_CIS_CLK0, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK1, "dout_cmu_cis_clk1", "gout_cmu_cis_clk1",
+ CLK_CON_DIV_CLKCMU_CIS_CLK1, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK2, "dout_cmu_cis_clk2", "gout_cmu_cis_clk2",
+ CLK_CON_DIV_CLKCMU_CIS_CLK2, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK3, "dout_cmu_cis_clk3", "gout_cmu_cis_clk3",
+ CLK_CON_DIV_CLKCMU_CIS_CLK3, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK4, "dout_cmu_cis_clk4", "gout_cmu_cis_clk4",
+ CLK_CON_DIV_CLKCMU_CIS_CLK4, 0, 5),
+ DIV(CLK_DOUT_CMU_CIS_CLK5, "dout_cmu_cis_clk5", "gout_cmu_cis_clk5",
+ CLK_CON_DIV_CLKCMU_CIS_CLK5, 0, 5),
+ DIV(CLK_DOUT_CMU_CMU_BOOST, "dout_cmu_cmu_boost", "mout_cmu_cmu_boost",
+ CLK_CON_DIV_CLKCMU_CMU_BOOST, 0, 2),
+ DIV(CLK_DOUT_CMU_CORE_BUS, "dout_cmu_core_bus", "gout_cmu_core_bus",
+ CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_CPUCL0_DBG_BUS, "dout_cmu_cpucl0_debug",
+ "gout_cmu_cpucl0_dbg_bus", CLK_CON_DIV_CLKCMU_CPUCL0_DBG_BUS,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_CPUCL0_SWITCH, "dout_cmu_cpucl0_switch",
+ "gout_cmu_cpucl0_switch", CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_CPUCL1_SWITCH, "dout_cmu_cpucl1_switch",
+ "gout_cmu_cpucl1_switch", CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_CPUCL2_BUSP, "dout_cmu_cpucl2_busp",
+ "gout_cmu_cpucl2_busp", CLK_CON_DIV_CLKCMU_CPUCL2_BUSP, 0, 4),
+ DIV(CLK_DOUT_CMU_CPUCL2_SWITCH, "dout_cmu_cpucl2_switch",
+ "gout_cmu_cpucl2_switch", CLK_CON_DIV_CLKCMU_CPUCL2_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_CSIS_BUS, "dout_cmu_csis_bus", "gout_cmu_csis_bus",
+ CLK_CON_DIV_CLKCMU_CSIS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_CSIS_OIS_MCU, "dout_cmu_csis_ois_mcu",
+ "gout_cmu_csis_ois_mcu", CLK_CON_DIV_CLKCMU_CSIS_OIS_MCU, 0, 4),
+ DIV(CLK_DOUT_CMU_DNC_BUS, "dout_cmu_dnc_bus", "gout_cmu_dnc_bus",
+ CLK_CON_DIV_CLKCMU_DNC_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DNC_BUSM, "dout_cmu_dnc_busm", "gout_cmu_dnc_busm",
+ CLK_CON_DIV_CLKCMU_DNC_BUSM, 0, 4),
+ DIV(CLK_DOUT_CMU_DNS_BUS, "dout_cmu_dns_bus", "gout_cmu_dns_bus",
+ CLK_CON_DIV_CLKCMU_DNS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DSP_BUS, "dout_cmu_dsp_bus", "gout_cmu_dsp_bus",
+ CLK_CON_DIV_CLKCMU_DSP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_G2D_G2D, "dout_cmu_g2d_g2d", "gout_cmu_g2d_g2d",
+ CLK_CON_DIV_CLKCMU_G2D_G2D, 0, 4),
+ DIV(CLK_DOUT_CMU_G2D_MSCL, "dout_cmu_g2d_mscl", "gout_cmu_g2d_mscl",
+ CLK_CON_DIV_CLKCMU_G2D_MSCL, 0, 4),
+ DIV(CLK_DOUT_CMU_G3D_SWITCH, "dout_cmu_g3d_switch",
+ "gout_cmu_g3d_switch", CLK_CON_DIV_CLKCMU_G3D_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_HPM, "dout_cmu_hpm", "gout_cmu_hpm",
+ CLK_CON_DIV_CLKCMU_HPM, 0, 2),
+ DIV(CLK_DOUT_CMU_HSI0_BUS, "dout_cmu_hsi0_bus", "gout_cmu_hsi0_bus",
+ CLK_CON_DIV_CLKCMU_HSI0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI0_DPGTC, "dout_cmu_hsi0_dpgtc", "gout_cmu_hsi0_dpgtc",
+ CLK_CON_DIV_CLKCMU_HSI0_DPGTC, 0, 3),
+ DIV(CLK_DOUT_CMU_HSI0_USB31DRD, "dout_cmu_hsi0_usb31drd",
+ "gout_cmu_hsi0_usb31drd", CLK_CON_DIV_CLKCMU_HSI0_USB31DRD, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI0_USBDP_DEBUG, "dout_cmu_hsi0_usbdp_debug",
+ "gout_cmu_hsi0_usbdp_debug", CLK_CON_DIV_CLKCMU_HSI0_USBDP_DEBUG,
+ 0, 4),
+ DIV(CLK_DOUT_CMU_HSI1_BUS, "dout_cmu_hsi1_bus", "gout_cmu_hsi1_bus",
+ CLK_CON_DIV_CLKCMU_HSI1_BUS, 0, 3),
+ DIV(CLK_DOUT_CMU_HSI1_MMC_CARD, "dout_cmu_hsi1_mmc_card",
+ "gout_cmu_hsi1_mmc_card", CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD,
+ 0, 9),
+ DIV(CLK_DOUT_CMU_HSI1_PCIE, "dout_cmu_hsi1_pcie", "gout_cmu_hsi1_pcie",
+ CLK_CON_DIV_CLKCMU_HSI1_PCIE, 0, 7),
+ DIV(CLK_DOUT_CMU_HSI1_UFS_CARD, "dout_cmu_hsi1_ufs_card",
+ "gout_cmu_hsi1_ufs_card", CLK_CON_DIV_CLKCMU_HSI1_UFS_CARD,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_HSI1_UFS_EMBD, "dout_cmu_hsi1_ufs_embd",
+ "gout_cmu_hsi1_ufs_embd", CLK_CON_DIV_CLKCMU_HSI1_UFS_EMBD,
+ 0, 3),
+ DIV(CLK_DOUT_CMU_HSI2_BUS, "dout_cmu_hsi2_bus", "gout_cmu_hsi2_bus",
+ CLK_CON_DIV_CLKCMU_HSI2_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_HSI2_PCIE, "dout_cmu_hsi2_pcie", "gout_cmu_hsi2_pcie",
+ CLK_CON_DIV_CLKCMU_HSI2_PCIE, 0, 7),
+ DIV(CLK_DOUT_CMU_IPP_BUS, "dout_cmu_ipp_bus", "gout_cmu_ipp_bus",
+ CLK_CON_DIV_CLKCMU_IPP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_ITP_BUS, "dout_cmu_itp_bus", "gout_cmu_itp_bus",
+ CLK_CON_DIV_CLKCMU_ITP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_MCSC_BUS, "dout_cmu_mcsc_bus", "gout_cmu_mcsc_bus",
+ CLK_CON_DIV_CLKCMU_MCSC_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_MCSC_GDC, "dout_cmu_mcsc_gdc", "gout_cmu_mcsc_gdc",
+ CLK_CON_DIV_CLKCMU_MCSC_GDC, 0, 4),
+ DIV(CLK_DOUT_CMU_CMU_BOOST_CPU, "dout_cmu_cmu_boost_cpu",
+ "mout_cmu_cmu_boost_cpu", CLK_CON_DIV_CLKCMU_CMU_BOOST_CPU,
+ 0, 2),
+ DIV(CLK_DOUT_CMU_MFC0_MFC0, "dout_cmu_mfc0_mfc0", "gout_cmu_mfc0_mfc0",
+ CLK_CON_DIV_CLKCMU_MFC0_MFC0, 0, 4),
+ DIV(CLK_DOUT_CMU_MFC0_WFD, "dout_cmu_mfc0_wfd", "gout_cmu_mfc0_wfd",
+ CLK_CON_DIV_CLKCMU_MFC0_WFD, 0, 4),
+ DIV(CLK_DOUT_CMU_MIF_BUSP, "dout_cmu_mif_busp", "gout_cmu_mif_busp",
+ CLK_CON_DIV_CLKCMU_MIF_BUSP, 0, 4),
+ DIV(CLK_DOUT_CMU_NPU_BUS, "dout_cmu_npu_bus", "gout_cmu_npu_bus",
+ CLK_CON_DIV_CLKCMU_NPU_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_BUS, "dout_cmu_peric0_bus", "gout_cmu_peric0_bus",
+ CLK_CON_DIV_CLKCMU_PERIC0_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC0_IP, "dout_cmu_peric0_ip", "gout_cmu_peric0_ip",
+ CLK_CON_DIV_CLKCMU_PERIC0_IP, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_BUS, "dout_cmu_peric1_bus", "gout_cmu_peric1_bus",
+ CLK_CON_DIV_CLKCMU_PERIC1_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIC1_IP, "dout_cmu_peric1_ip", "gout_cmu_peric1_ip",
+ CLK_CON_DIV_CLKCMU_PERIC1_IP, 0, 4),
+ DIV(CLK_DOUT_CMU_PERIS_BUS, "dout_cmu_peris_bus", "gout_cmu_peris_bus",
+ CLK_CON_DIV_CLKCMU_PERIS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_SSP_BUS, "dout_cmu_ssp_bus", "gout_cmu_ssp_bus",
+ CLK_CON_DIV_CLKCMU_SSP_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_TNR_BUS, "dout_cmu_tnr_bus", "gout_cmu_tnr_bus",
+ CLK_CON_DIV_CLKCMU_TNR_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_VRA_BUS, "dout_cmu_vra_bus", "gout_cmu_vra_bus",
+ CLK_CON_DIV_CLKCMU_VRA_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_DPU, "dout_cmu_clkcmu_dpu", "gout_cmu_dpu",
+ CLK_CON_DIV_DIV_CLKCMU_DPU, 0, 4),
+};
+
+static const struct samsung_gate_clock top_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CMU_APM_BUS, "gout_cmu_apm_bus", "mout_cmu_apm_bus",
+ CLK_CON_GAT_GATE_CLKCMU_APM_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_AUD_CPU, "gout_cmu_aud_cpu", "mout_cmu_aud_cpu",
+ CLK_CON_GAT_GATE_CLKCMU_AUD_CPU, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_BUS0_BUS, "gout_cmu_bus0_bus", "mout_cmu_bus0_bus",
+ CLK_CON_GAT_GATE_CLKCMU_BUS0_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_BUS1_BUS, "gout_cmu_bus1_bus", "mout_cmu_bus1_bus",
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_BUS1_SSS, "gout_cmu_bus1_sss", "mout_cmu_bus1_sss",
+ CLK_CON_GAT_GATE_CLKCMU_BUS1_SSS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK0, "gout_cmu_cis_clk0", "mout_cmu_cis_clk0",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK0, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK1, "gout_cmu_cis_clk1", "mout_cmu_cis_clk1",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK1, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK2, "gout_cmu_cis_clk2", "mout_cmu_cis_clk2",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK2, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK3, "gout_cmu_cis_clk3", "mout_cmu_cis_clk3",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK3, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK4, "gout_cmu_cis_clk4", "mout_cmu_cis_clk4",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK4, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CIS_CLK5, "gout_cmu_cis_clk5", "mout_cmu_cis_clk5",
+ CLK_CON_GAT_GATE_CLKCMU_CIS_CLK5, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CORE_BUS, "gout_cmu_core_bus", "mout_cmu_core_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL0_DBG_BUS, "gout_cmu_cpucl0_dbg_bus",
+ "mout_cmu_cpucl0_dbg_bus", CLK_CON_GAT_GATE_CLKCMU_CPUCL0_DBG_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CPUCL0_SWITCH, "gout_cmu_cpucl0_switch",
+ "mout_cmu_cpucl0_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL1_SWITCH, "gout_cmu_cpucl1_switch",
+ "mout_cmu_cpucl1_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL2_BUSP, "gout_cmu_cpucl2_busp",
+ "mout_cmu_cpucl2_busp", CLK_CON_GAT_GATE_CLKCMU_CPUCL2_BUSP,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CPUCL2_SWITCH, "gout_cmu_cpucl2_switch",
+ "mout_cmu_cpucl2_switch", CLK_CON_GAT_GATE_CLKCMU_CPUCL2_SWITCH,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_CSIS_BUS, "gout_cmu_csis_bus", "mout_cmu_csis_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CSIS_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_CSIS_OIS_MCU, "gout_cmu_csis_ois_mcu",
+ "mout_cmu_csis_ois_mcu", CLK_CON_GAT_GATE_CLKCMU_CSIS_OIS_MCU,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DNC_BUS, "gout_cmu_dnc_bus", "mout_cmu_dnc_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DNC_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DNC_BUSM, "gout_cmu_dnc_busm", "mout_cmu_dnc_busm",
+ CLK_CON_GAT_GATE_CLKCMU_DNC_BUSM, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DNS_BUS, "gout_cmu_dns_bus", "mout_cmu_dns_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DNS_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DPU, "gout_cmu_dpu", "mout_cmu_dpu",
+ CLK_CON_GAT_GATE_CLKCMU_DPU, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_DPU_BUS, "gout_cmu_dpu_bus", "mout_cmu_dpu_alt",
+ CLK_CON_GAT_GATE_CLKCMU_DPU_BUS, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_DSP_BUS, "gout_cmu_dsp_bus", "mout_cmu_dsp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_DSP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G2D_G2D, "gout_cmu_g2d_g2d", "mout_cmu_g2d_g2d",
+ CLK_CON_GAT_GATE_CLKCMU_G2D_G2D, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G2D_MSCL, "gout_cmu_g2d_mscl", "mout_cmu_g2d_mscl",
+ CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_G3D_SWITCH, "gout_cmu_g3d_switch",
+ "fout_shared2_pll", CLK_CON_GAT_GATE_CLKCMU_G3D_SWITCH,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HPM, "gout_cmu_hpm", "mout_cmu_hpm",
+ CLK_CON_GAT_GATE_CLKCMU_HPM, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_BUS, "gout_cmu_hsi0_bus",
+ "mout_cmu_hsi0_bus", CLK_CON_GAT_GATE_CLKCMU_HSI0_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_DPGTC, "gout_cmu_hsi0_dpgtc",
+ "mout_cmu_hsi0_dpgtc", CLK_CON_GAT_GATE_CLKCMU_HSI0_DPGTC,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_USB31DRD, "gout_cmu_hsi0_usb31drd",
+ "mout_cmu_hsi0_usb31drd", CLK_CON_GAT_GATE_CLKCMU_HSI0_USB31DRD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI0_USBDP_DEBUG, "gout_cmu_hsi0_usbdp_debug",
+ "mout_cmu_hsi0_usbdp_debug", CLK_CON_GAT_GATE_CLKCMU_HSI0_USBDP_DEBUG,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_BUS, "gout_cmu_hsi1_bus", "mout_cmu_hsi1_bus",
+ CLK_CON_GAT_GATE_CLKCMU_HSI1_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_MMC_CARD, "gout_cmu_hsi1_mmc_card",
+ "mout_cmu_hsi1_mmc_card", CLK_CON_GAT_GATE_CLKCMU_HSI1_MMC_CARD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_PCIE, "gout_cmu_hsi1_pcie",
+ "mout_cmu_hsi1_pcie", CLK_CON_GAT_GATE_CLKCMU_HSI1_PCIE,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_UFS_CARD, "gout_cmu_hsi1_ufs_card",
+ "mout_cmu_hsi1_ufs_card", CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_CARD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI1_UFS_EMBD, "gout_cmu_hsi1_ufs_embd",
+ "mout_cmu_hsi1_ufs_embd", CLK_CON_GAT_GATE_CLKCMU_HSI1_UFS_EMBD,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI2_BUS, "gout_cmu_hsi2_bus", "mout_cmu_hsi2_bus",
+ CLK_CON_GAT_GATE_CLKCMU_HSI2_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_HSI2_PCIE, "gout_cmu_hsi2_pcie",
+ "mout_cmu_hsi2_pcie", CLK_CON_GAT_GATE_CLKCMU_HSI2_PCIE,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_IPP_BUS, "gout_cmu_ipp_bus", "mout_cmu_ipp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_IPP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_ITP_BUS, "gout_cmu_itp_bus", "mout_cmu_itp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_ITP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MCSC_BUS, "gout_cmu_mcsc_bus", "mout_cmu_mcsc_bus",
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MCSC_GDC, "gout_cmu_mcsc_gdc", "mout_cmu_mcsc_gdc",
+ CLK_CON_GAT_GATE_CLKCMU_MCSC_GDC, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MFC0_MFC0, "gout_cmu_mfc0_mfc0",
+ "mout_cmu_mfc0_mfc0", CLK_CON_GAT_GATE_CLKCMU_MFC0_MFC0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MFC0_WFD, "gout_cmu_mfc0_wfd", "mout_cmu_mfc0_wfd",
+ CLK_CON_GAT_GATE_CLKCMU_MFC0_WFD, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_MIF_BUSP, "gout_cmu_mif_busp", "mout_cmu_mif_busp",
+ CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_NPU_BUS, "gout_cmu_npu_bus", "mout_cmu_npu_bus",
+ CLK_CON_GAT_GATE_CLKCMU_NPU_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_BUS, "gout_cmu_peric0_bus",
+ "mout_cmu_peric0_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC0_IP, "gout_cmu_peric0_ip",
+ "mout_cmu_peric0_ip", CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_BUS, "gout_cmu_peric1_bus",
+ "mout_cmu_peric1_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIC1_IP, "gout_cmu_peric1_ip",
+ "mout_cmu_peric1_ip", CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP,
+ 21, 0, 0),
+ GATE(CLK_GOUT_CMU_PERIS_BUS, "gout_cmu_peris_bus",
+ "mout_cmu_peris_bus", CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMU_SSP_BUS, "gout_cmu_ssp_bus", "mout_cmu_ssp_bus",
+ CLK_CON_GAT_GATE_CLKCMU_SSP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_TNR_BUS, "gout_cmu_tnr_bus", "mout_cmu_tnr_bus",
+ CLK_CON_GAT_GATE_CLKCMU_TNR_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CMU_VRA_BUS, "gout_cmu_vra_bus", "mout_cmu_vra_bus",
+ CLK_CON_GAT_GATE_CLKCMU_VRA_BUS, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info top_cmu_info __initconst = {
+ .pll_clks = top_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(top_pll_clks),
+ .mux_clks = top_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top_mux_clks),
+ .div_clks = top_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .gate_clks = top_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(top_gate_clks),
+ .nr_clk_ids = CLKS_NR_TOP,
+ .clk_regs = top_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top_clk_regs),
+};
+
+static void __init exynos990_cmu_top_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &top_cmu_info);
+}
+
+/* Register CMU_TOP early, as it's a dependency for other early domains */
+CLK_OF_DECLARE(exynos990_cmu_top, "samsung,exynos990-cmu-top",
+ exynos990_cmu_top_init);
+
+/* ---- CMU_HSI0 ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_HSI0 (0x10a00000) */
+#define PLL_CON0_MUX_CLKCMU_HSI0_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_HSI0_USB31DRD_USER 0x0620
+#define PLL_CON0_MUX_CLKCMU_HSI0_USBDP_DEBUG_USER 0x0630
+#define PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER 0x0610
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK 0x2004
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_ACLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_ACEL_D_HSI0_IPCLKPORT_I_CLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_BUS_IPCLKPORT_CLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_VGEN_LITE_HSI0_IPCLKPORT_CLK 0x2044
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK 0x2008
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_HSI0_IPCLKPORT_I_CLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_PCLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2 0x2024
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK 0x2028
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL 0x202c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_REF_CLK_40 0x2034
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK 0x203c
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK 0x2040
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY 0x2030
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK 0x2000
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK 0x2048
+#define CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_REF_SOC_PLL 0x2038
+
+static const unsigned long hsi0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_HSI0_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_USB31DRD_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_USBDP_DEBUG_USER,
+ PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_ACLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_ACEL_D_HSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_BUS_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_VGEN_LITE_HSI0_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_HSI0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK,
+};
+
+PNAME(mout_hsi0_bus_user_p) = { "oscclk", "dout_cmu_hsi0_bus" };
+PNAME(mout_hsi0_usb31drd_user_p) = { "oscclk", "dout_cmu_hsi0_usb31drd" };
+PNAME(mout_hsi0_usbdp_debug_user_p) = { "oscclk",
+ "dout_cmu_hsi0_usbdp_debug" };
+PNAME(mout_hsi0_dpgtc_user_p) = { "oscclk", "dout_cmu_hsi0_dpgtc" };
+
+static const struct samsung_mux_clock hsi0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_HSI0_BUS_USER, "mout_hsi0_bus_user",
+ mout_hsi0_bus_user_p, PLL_CON0_MUX_CLKCMU_HSI0_BUS_USER,
+ 4, 1),
+ MUX(CLK_MOUT_HSI0_USB31DRD_USER, "mout_hsi0_usb31drd_user",
+ mout_hsi0_usb31drd_user_p, PLL_CON0_MUX_CLKCMU_HSI0_USB31DRD_USER,
+ 4, 1),
+ MUX(CLK_MOUT_HSI0_USBDP_DEBUG_USER, "mout_hsi0_usbdp_debug_user",
+ mout_hsi0_usbdp_debug_user_p,
+ PLL_CON0_MUX_CLKCMU_HSI0_USBDP_DEBUG_USER,
+ 4, 1),
+ MUX(CLK_MOUT_HSI0_DPGTC_USER, "mout_hsi0_dpgtc_user",
+ mout_hsi0_dpgtc_user_p, PLL_CON0_MUX_CLKCMU_HSI0_DPGTC_USER,
+ 4, 1),
+};
+
+static const struct samsung_gate_clock hsi0_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_HSI0_DP_LINK_DP_GTC_CLK,
+ "gout_hsi0_dp_link_dp_gtc_clk", "mout_hsi0_dpgtc_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_DP_GTC_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_DP_LINK_PCLK,
+ "gout_hsi0_dp_link_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_DP_LINK_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_D_TZPC_HSI0_PCLK,
+ "gout_hsi0_d_tzpc_hsi0_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_D_TZPC_HSI0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_LHM_AXI_P_HSI0_CLK,
+ "gout_hsi0_lhm_axi_p_hsi0_clk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHM_AXI_P_HSI0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_HSI0_PPMU_HSI0_BUS1_ACLK,
+ "gout_hsi0_ppmu_hsi0_bus1_aclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_PPMU_HSI0_BUS1_PCLK,
+ "gout_hsi0_ppmu_hsi0_bus1_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_PPMU_HSI0_BUS1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_CLK_HSI0_BUS_CLK,
+ "gout_hsi0_clk_hsi0_bus_clk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_RSTNSYNC_CLK_HSI0_BUS_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_SYSMMU_USB_CLK_S2,
+ "gout_hsi0_sysmmu_usb_clk_s2", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI0_SYSREG_HSI0_PCLK,
+ "gout_hsi0_sysreg_hsi0_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_ACLK_PHYCTRL,
+ "gout_hsi0_usb31drd_aclk_phyctrl", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_BUS_CLK_EARLY,
+ "gout_hsi0_usb31drd_bus_clk_early",
+ "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_USB31DRD_REF_CLK_40,
+ "gout_hsi0_usb31drd_usb31drd_ref_clk_40",
+ "mout_hsi0_usb31drd_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_REF_CLK_40,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_USBDPPHY_REF_SOC_PLL,
+ "gout_hsi0_usb31drd_usbdpphy_ref_soc_pll",
+ "mout_hsi0_usbdp_debug_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_REF_SOC_PLL,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_USBDPPHY_SCL_APB,
+ "gout_hsi0_usb31drd_ipclkport_i_usbdpphy_scl_apb_pclk",
+ "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_USB31DRD_USBPCS_APB_CLK,
+ "gout_hsi0_usb31drd_usbpcs_apb_clk",
+ "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_VGEN_LITE_HSI0_CLK,
+ "gout_hsi0_vgen_lite_ipclkport_clk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_VGEN_LITE_HSI0_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_HSI0_CMU_HSI0_PCLK,
+ "gout_hsi0_cmu_hsi0_pclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_HSI0_CMU_HSI0_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI0_XIU_D_HSI0_ACLK,
+ "gout_hsi0_xiu_d_hsi0_aclk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info hsi0_cmu_info __initconst = {
+ .mux_clks = hsi0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(hsi0_mux_clks),
+ .gate_clks = hsi0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(hsi0_gate_clks),
+ .nr_clk_ids = CLKS_NR_HSI0,
+ .clk_regs = hsi0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(hsi0_clk_regs),
+ .clk_name = "bus",
+};
+
+/* ----- platform_driver ----- */
+
+static int __init exynos990_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id exynos990_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynos990-cmu-hsi0",
+ .data = &hsi0_cmu_info,
+ },
+ { },
+};
+
+static struct platform_driver exynos990_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynos990-cmu",
+ .of_match_table = exynos990_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynos990_cmu_probe,
+};
+
+static int __init exynos990_cmu_init(void)
+{
+ return platform_driver_register(&exynos990_cmu_driver);
+}
+
+core_initcall(exynos990_cmu_init);
diff --git a/drivers/clk/samsung/clk-exynosautov920.c b/drivers/clk/samsung/clk-exynosautov920.c
index 7ba9748c0526..2a8bfd5d9abc 100644
--- a/drivers/clk/samsung/clk-exynosautov920.c
+++ b/drivers/clk/samsung/clk-exynosautov920.c
@@ -19,6 +19,10 @@
/* NOTE: Must be equal to the last clock ID increased by one */
#define CLKS_NR_TOP (DOUT_CLKCMU_TAA_NOC + 1)
#define CLKS_NR_PERIC0 (CLK_DOUT_PERIC0_I3C + 1)
+#define CLKS_NR_PERIC1 (CLK_DOUT_PERIC1_I3C + 1)
+#define CLKS_NR_MISC (CLK_DOUT_MISC_OSC_DIV2 + 1)
+#define CLKS_NR_HSI0 (CLK_DOUT_HSI0_PCIE_APB + 1)
+#define CLKS_NR_HSI1 (CLK_MOUT_HSI1_USBDRD + 1)
/* ---- CMU_TOP ------------------------------------------------------------ */
@@ -974,6 +978,8 @@ static const struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initcon
"mout_shared5_pll", 1, 3, 0),
FFACTOR(DOUT_SHARED5_DIV4, "dout_shared5_div4",
"mout_shared5_pll", 1, 4, 0),
+ FFACTOR(DOUT_TCXO_DIV2, "dout_tcxo_div2",
+ "oscclk", 1, 2, 0),
};
static const struct samsung_cmu_info top_cmu_info __initconst = {
@@ -1139,6 +1145,277 @@ static const struct samsung_cmu_info peric0_cmu_info __initconst = {
.clk_name = "noc",
};
+/* ---- CMU_PERIC1 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC1 (0x10C00000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC1_IP_USER 0x600
+#define PLL_CON0_MUX_CLKCMU_PERIC1_NOC_USER 0x610
+#define CLK_CON_MUX_MUX_CLK_PERIC1_I3C 0x1000
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI09_USI 0x1004
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI10_USI 0x1008
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI11_USI 0x100c
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI12_USI 0x1010
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI13_USI 0x1014
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI14_USI 0x1018
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI15_USI 0x101c
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI16_USI 0x1020
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI17_USI 0x1024
+#define CLK_CON_MUX_MUX_CLK_PERIC1_USI_I2C 0x1028
+#define CLK_CON_DIV_DIV_CLK_PERIC1_I3C 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI14_USI 0x1818
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI15_USI 0x181c
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI 0x1820
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI 0x1824
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C 0x1828
+
+static const unsigned long peric1_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC1_IP_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_NOC_USER,
+ CLK_CON_MUX_MUX_CLK_PERIC1_I3C,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI09_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI10_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI11_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI12_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI13_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI14_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI15_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI16_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI17_USI,
+ CLK_CON_MUX_MUX_CLK_PERIC1_USI_I2C,
+ CLK_CON_DIV_DIV_CLK_PERIC1_I3C,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI14_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI15_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C,
+};
+
+/* List of parent clocks for Muxes in CMU_PERIC1 */
+PNAME(mout_peric1_ip_user_p) = { "oscclk", "dout_clkcmu_peric1_ip" };
+PNAME(mout_peric1_noc_user_p) = { "oscclk", "dout_clkcmu_peric1_noc" };
+PNAME(mout_peric1_usi_p) = { "oscclk", "mout_peric1_ip_user" };
+
+static const struct samsung_mux_clock peric1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC1_IP_USER, "mout_peric1_ip_user",
+ mout_peric1_ip_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_IP_USER, 4, 1),
+ MUX(CLK_MOUT_PERIC1_NOC_USER, "mout_peric1_noc_user",
+ mout_peric1_noc_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_NOC_USER, 4, 1),
+ /* USI09 ~ USI17 */
+ MUX(CLK_MOUT_PERIC1_USI09_USI, "mout_peric1_usi09_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI09_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI10_USI, "mout_peric1_usi10_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI10_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI11_USI, "mout_peric1_usi11_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI11_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI12_USI, "mout_peric1_usi12_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI12_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI13_USI, "mout_peric1_usi13_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI13_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI14_USI, "mout_peric1_usi14_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI14_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI15_USI, "mout_peric1_usi15_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI15_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI16_USI, "mout_peric1_usi16_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI16_USI, 0, 1),
+ MUX(CLK_MOUT_PERIC1_USI17_USI, "mout_peric1_usi17_usi",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI17_USI, 0, 1),
+ /* USI_I2C */
+ MUX(CLK_MOUT_PERIC1_USI_I2C, "mout_peric1_usi_i2c",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI_I2C, 0, 1),
+ /* USI_I3C */
+ MUX(CLK_MOUT_PERIC1_I3C, "mout_peric1_i3c",
+ mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_I3C, 0, 1),
+};
+
+static const struct samsung_div_clock peric1_div_clks[] __initconst = {
+ /* USI09 ~ USI17 */
+ DIV(CLK_DOUT_PERIC1_USI09_USI, "dout_peric1_usi09_usi",
+ "mout_peric1_usi09_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI10_USI, "dout_peric1_usi10_usi",
+ "mout_peric1_usi10_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI11_USI, "dout_peric1_usi11_usi",
+ "mout_peric1_usi11_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI12_USI, "dout_peric1_usi12_usi",
+ "mout_peric1_usi12_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI13_USI, "dout_peric1_usi13_usi",
+ "mout_peric1_usi13_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI14_USI, "dout_peric1_usi14_usi",
+ "mout_peric1_usi14_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI14_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI15_USI, "dout_peric1_usi15_usi",
+ "mout_peric1_usi15_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI15_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI16_USI, "dout_peric1_usi16_usi",
+ "mout_peric1_usi16_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI17_USI, "dout_peric1_usi17_usi",
+ "mout_peric1_usi17_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI,
+ 0, 4),
+ /* USI_I2C */
+ DIV(CLK_DOUT_PERIC1_USI_I2C, "dout_peric1_usi_i2c",
+ "mout_peric1_usi_i2c", CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C, 0, 4),
+ /* USI_I3C */
+ DIV(CLK_DOUT_PERIC1_I3C, "dout_peric1_i3c",
+ "mout_peric1_i3c", CLK_CON_DIV_DIV_CLK_PERIC1_I3C, 0, 4),
+};
+
+static const struct samsung_cmu_info peric1_cmu_info __initconst = {
+ .mux_clks = peric1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks),
+ .div_clks = peric1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric1_div_clks),
+ .nr_clk_ids = CLKS_NR_PERIC1,
+ .clk_regs = peric1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric1_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_MISC --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_MISC (0x10020000) */
+#define PLL_CON0_MUX_CLKCMU_MISC_NOC_USER 0x600
+#define CLK_CON_MUX_MUX_CLK_MISC_GIC 0x1000
+#define CLK_CON_DIV_CLKCMU_OTP 0x1800
+#define CLK_CON_DIV_DIV_CLK_MISC_NOCP 0x1804
+#define CLK_CON_DIV_DIV_CLK_MISC_OSC_DIV2 0x1808
+
+static const unsigned long misc_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_MISC_NOC_USER,
+ CLK_CON_MUX_MUX_CLK_MISC_GIC,
+ CLK_CON_DIV_CLKCMU_OTP,
+ CLK_CON_DIV_DIV_CLK_MISC_NOCP,
+ CLK_CON_DIV_DIV_CLK_MISC_OSC_DIV2,
+};
+
+/* List of parent clocks for Muxes in CMU_MISC */
+PNAME(mout_misc_noc_user_p) = { "oscclk", "dout_clkcmu_misc_noc" };
+PNAME(mout_misc_gic_p) = { "dout_misc_nocp", "oscclk" };
+
+static const struct samsung_mux_clock misc_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_MISC_NOC_USER, "mout_misc_noc_user",
+ mout_misc_noc_user_p, PLL_CON0_MUX_CLKCMU_MISC_NOC_USER, 4, 1),
+ MUX(CLK_MOUT_MISC_GIC, "mout_misc_gic",
+ mout_misc_gic_p, CLK_CON_MUX_MUX_CLK_MISC_GIC, 0, 1),
+};
+
+static const struct samsung_div_clock misc_div_clks[] __initconst = {
+ DIV(CLK_DOUT_MISC_NOCP, "dout_misc_nocp",
+ "mout_misc_noc_user", CLK_CON_DIV_DIV_CLK_MISC_NOCP,
+ 0, 3),
+};
+
+static const struct samsung_fixed_factor_clock misc_fixed_factor_clks[] __initconst = {
+ FFACTOR(CLK_DOUT_MISC_OTP, "dout_misc_otp",
+ "oscclk", 1, 10, 0),
+ FFACTOR(CLK_DOUT_MISC_OSC_DIV2, "dout_misc_osc_div2",
+ "oscclk", 1, 2, 0),
+};
+
+static const struct samsung_cmu_info misc_cmu_info __initconst = {
+ .mux_clks = misc_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(misc_mux_clks),
+ .div_clks = misc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(misc_div_clks),
+ .fixed_factor_clks = misc_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(misc_fixed_factor_clks),
+ .nr_clk_ids = CLKS_NR_MISC,
+ .clk_regs = misc_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(misc_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_HSI0 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_HSI0 (0x16000000) */
+#define PLL_CON0_MUX_CLKCMU_HSI0_NOC_USER 0x600
+#define CLK_CON_DIV_DIV_CLK_HSI0_PCIE_APB 0x1800
+
+static const unsigned long hsi0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_HSI0_NOC_USER,
+ CLK_CON_DIV_DIV_CLK_HSI0_PCIE_APB,
+};
+
+/* List of parent clocks for Muxes in CMU_HSI0 */
+PNAME(mout_hsi0_noc_user_p) = { "oscclk", "dout_clkcmu_hsi0_noc" };
+
+static const struct samsung_mux_clock hsi0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_HSI0_NOC_USER, "mout_hsi0_noc_user",
+ mout_hsi0_noc_user_p, PLL_CON0_MUX_CLKCMU_HSI0_NOC_USER, 4, 1),
+};
+
+static const struct samsung_div_clock hsi0_div_clks[] __initconst = {
+ DIV(CLK_DOUT_HSI0_PCIE_APB, "dout_hsi0_pcie_apb",
+ "mout_hsi0_noc_user", CLK_CON_DIV_DIV_CLK_HSI0_PCIE_APB,
+ 0, 4),
+};
+
+static const struct samsung_cmu_info hsi0_cmu_info __initconst = {
+ .mux_clks = hsi0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(hsi0_mux_clks),
+ .div_clks = hsi0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(hsi0_div_clks),
+ .nr_clk_ids = CLKS_NR_HSI0,
+ .clk_regs = hsi0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(hsi0_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_HSI1 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_HSI1 (0x16400000) */
+#define PLL_CON0_MUX_CLKCMU_HSI1_MMC_CARD_USER 0x600
+#define PLL_CON0_MUX_CLKCMU_HSI1_NOC_USER 0x610
+#define PLL_CON0_MUX_CLKCMU_HSI1_USBDRD_USER 0x620
+#define CLK_CON_MUX_MUX_CLK_HSI1_USBDRD 0x1000
+
+static const unsigned long hsi1_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_HSI1_MMC_CARD_USER,
+ PLL_CON0_MUX_CLKCMU_HSI1_NOC_USER,
+ PLL_CON0_MUX_CLKCMU_HSI1_USBDRD_USER,
+ CLK_CON_MUX_MUX_CLK_HSI1_USBDRD,
+};
+
+/* List of parent clocks for Muxes in CMU_HSI1 */
+PNAME(mout_hsi1_mmc_card_user_p) = {"oscclk", "dout_clkcmu_hsi1_mmc_card"};
+PNAME(mout_hsi1_noc_user_p) = { "oscclk", "dout_clkcmu_hsi1_noc" };
+PNAME(mout_hsi1_usbdrd_user_p) = { "oscclk", "mout_clkcmu_hsi1_usbdrd" };
+PNAME(mout_hsi1_usbdrd_p) = { "dout_tcxo_div2", "mout_hsi1_usbdrd_user" };
+
+static const struct samsung_mux_clock hsi1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_HSI1_MMC_CARD_USER, "mout_hsi1_mmc_card_user",
+ mout_hsi1_mmc_card_user_p, PLL_CON0_MUX_CLKCMU_HSI1_MMC_CARD_USER, 4, 1),
+ MUX(CLK_MOUT_HSI1_NOC_USER, "mout_hsi1_noc_user",
+ mout_hsi1_noc_user_p, PLL_CON0_MUX_CLKCMU_HSI1_NOC_USER, 4, 1),
+ MUX(CLK_MOUT_HSI1_USBDRD_USER, "mout_hsi1_usbdrd_user",
+ mout_hsi1_usbdrd_user_p, PLL_CON0_MUX_CLKCMU_HSI1_USBDRD_USER, 4, 1),
+ MUX(CLK_MOUT_HSI1_USBDRD, "mout_hsi1_usbdrd",
+ mout_hsi1_usbdrd_p, CLK_CON_MUX_MUX_CLK_HSI1_USBDRD, 4, 1),
+};
+
+static const struct samsung_cmu_info hsi1_cmu_info __initconst = {
+ .mux_clks = hsi1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(hsi1_mux_clks),
+ .nr_clk_ids = CLKS_NR_HSI1,
+ .clk_regs = hsi1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(hsi1_clk_regs),
+ .clk_name = "noc",
+};
+
static int __init exynosautov920_cmu_probe(struct platform_device *pdev)
{
const struct samsung_cmu_info *info;
@@ -1154,7 +1431,20 @@ static const struct of_device_id exynosautov920_cmu_of_match[] = {
{
.compatible = "samsung,exynosautov920-cmu-peric0",
.data = &peric0_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov920-cmu-peric1",
+ .data = &peric1_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov920-cmu-misc",
+ .data = &misc_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov920-cmu-hsi0",
+ .data = &hsi0_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov920-cmu-hsi1",
+ .data = &hsi1_cmu_info,
},
+ { }
};
static struct platform_driver exynosautov920_cmu_driver __refdata = {
diff --git a/drivers/clk/samsung/clk-fsd.c b/drivers/clk/samsung/clk-fsd.c
index 6f984cfcd33c..9a6006c298c2 100644
--- a/drivers/clk/samsung/clk-fsd.c
+++ b/drivers/clk/samsung/clk-fsd.c
@@ -82,6 +82,15 @@
#define GAT_CMU_NS_BRDG_CMU_IPCLKPORT_CLK__PSOC_CMU__CLK_CMU 0x205c
#define GAT_CMU_SYSREG_CMU_IPCLKPORT_PCLK 0x2060
+/* NOTE: Must be equal to the last clock ID increased by one */
+#define CLKS_NR_CMU (GAT_CMU_FSYS0_SHARED0DIV4 + 1)
+#define CLKS_NR_PERIC (PERIC_DOUT_RGMII_CLK + 1)
+#define CLKS_NR_FSYS0 (FSYS0_DOUT_FSYS0_PERIBUS_GRP + 1)
+#define CLKS_NR_FSYS1 (PCIE_LINK1_IPCLKPORT_SLV_ACLK + 1)
+#define CLKS_NR_IMEM (IMEM_TMU_GT_IPCLKPORT_I_CLK_TS + 1)
+#define CLKS_NR_MFC (MFC_MFC_IPCLKPORT_ACLK + 1)
+#define CLKS_NR_CAM_CSI (CAM_CSI2_3_IPCLKPORT_I_ACLK + 1)
+
static const unsigned long cmu_clk_regs[] __initconst = {
PLL_LOCKTIME_PLL_SHARED0,
PLL_LOCKTIME_PLL_SHARED1,
@@ -300,7 +309,7 @@ static const struct samsung_cmu_info cmu_cmu_info __initconst = {
.nr_div_clks = ARRAY_SIZE(cmu_div_clks),
.gate_clks = cmu_gate_clks,
.nr_gate_clks = ARRAY_SIZE(cmu_gate_clks),
- .nr_clk_ids = CMU_NR_CLK,
+ .nr_clk_ids = CLKS_NR_CMU,
.clk_regs = cmu_clk_regs,
.nr_clk_regs = ARRAY_SIZE(cmu_clk_regs),
};
@@ -665,7 +674,7 @@ static const struct samsung_cmu_info peric_cmu_info __initconst = {
.nr_gate_clks = ARRAY_SIZE(peric_gate_clks),
.fixed_clks = peric_fixed_clks,
.nr_fixed_clks = ARRAY_SIZE(peric_fixed_clks),
- .nr_clk_ids = PERIC_NR_CLK,
+ .nr_clk_ids = CLKS_NR_PERIC,
.clk_regs = peric_clk_regs,
.nr_clk_regs = ARRAY_SIZE(peric_clk_regs),
.clk_name = "dout_cmu_pll_shared0_div4",
@@ -964,7 +973,7 @@ static const struct samsung_cmu_info fsys0_cmu_info __initconst = {
.nr_gate_clks = ARRAY_SIZE(fsys0_gate_clks),
.fixed_clks = fsys0_fixed_clks,
.nr_fixed_clks = ARRAY_SIZE(fsys0_fixed_clks),
- .nr_clk_ids = FSYS0_NR_CLK,
+ .nr_clk_ids = CLKS_NR_FSYS0,
.clk_regs = fsys0_clk_regs,
.nr_clk_regs = ARRAY_SIZE(fsys0_clk_regs),
.clk_name = "dout_cmu_fsys0_shared1div4",
@@ -1136,7 +1145,7 @@ static const struct samsung_cmu_info fsys1_cmu_info __initconst = {
.nr_gate_clks = ARRAY_SIZE(fsys1_gate_clks),
.fixed_clks = fsys1_fixed_clks,
.nr_fixed_clks = ARRAY_SIZE(fsys1_fixed_clks),
- .nr_clk_ids = FSYS1_NR_CLK,
+ .nr_clk_ids = CLKS_NR_FSYS1,
.clk_regs = fsys1_clk_regs,
.nr_clk_regs = ARRAY_SIZE(fsys1_clk_regs),
.clk_name = "dout_cmu_fsys1_shared0div4",
@@ -1413,7 +1422,7 @@ static const struct samsung_cmu_info imem_cmu_info __initconst = {
.nr_div_clks = ARRAY_SIZE(imem_div_clks),
.gate_clks = imem_gate_clks,
.nr_gate_clks = ARRAY_SIZE(imem_gate_clks),
- .nr_clk_ids = IMEM_NR_CLK,
+ .nr_clk_ids = CLKS_NR_IMEM,
.clk_regs = imem_clk_regs,
.nr_clk_regs = ARRAY_SIZE(imem_clk_regs),
};
@@ -1538,7 +1547,7 @@ static const struct samsung_cmu_info mfc_cmu_info __initconst = {
.nr_div_clks = ARRAY_SIZE(mfc_div_clks),
.gate_clks = mfc_gate_clks,
.nr_gate_clks = ARRAY_SIZE(mfc_gate_clks),
- .nr_clk_ids = MFC_NR_CLK,
+ .nr_clk_ids = CLKS_NR_MFC,
.clk_regs = mfc_clk_regs,
.nr_clk_regs = ARRAY_SIZE(mfc_clk_regs),
};
@@ -1742,7 +1751,7 @@ static const struct samsung_cmu_info cam_csi_cmu_info __initconst = {
.nr_div_clks = ARRAY_SIZE(cam_csi_div_clks),
.gate_clks = cam_csi_gate_clks,
.nr_gate_clks = ARRAY_SIZE(cam_csi_gate_clks),
- .nr_clk_ids = CAM_CSI_NR_CLK,
+ .nr_clk_ids = CLKS_NR_CAM_CSI,
.clk_regs = cam_csi_clk_regs,
.nr_clk_regs = ARRAY_SIZE(cam_csi_clk_regs),
};
diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
index 85098c61c15e..86b39edba122 100644
--- a/drivers/clk/samsung/clk-gs101.c
+++ b/drivers/clk/samsung/clk-gs101.c
@@ -2775,11 +2775,11 @@ static const struct samsung_gate_clock hsi2_gate_clks[] __initconst = {
GATE(CLK_GOUT_HSI2_QE_UFS_EMBD_HSI2_ACLK,
"gout_hsi2_qe_ufs_embd_hsi2_aclk", "mout_hsi2_bus_user",
CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_UFS_EMBD_HSI2_IPCLKPORT_ACLK,
- 21, 0, 0),
+ 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_HSI2_QE_UFS_EMBD_HSI2_PCLK,
"gout_hsi2_qe_ufs_embd_hsi2_pclk", "mout_hsi2_bus_user",
CLK_CON_GAT_GOUT_BLK_HSI2_UID_QE_UFS_EMBD_HSI2_IPCLKPORT_PCLK,
- 21, 0, 0),
+ 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_HSI2_CLK_HSI2_BUS_CLK,
"gout_hsi2_clk_hsi2_bus_clk", "mout_hsi2_bus_user",
CLK_CON_GAT_GOUT_BLK_HSI2_UID_RSTNSYNC_CLK_HSI2_BUS_IPCLKPORT_CLK,
@@ -2806,7 +2806,7 @@ static const struct samsung_gate_clock hsi2_gate_clks[] __initconst = {
GATE(CLK_GOUT_HSI2_SYSREG_HSI2_PCLK,
"gout_hsi2_sysreg_hsi2_pclk", "mout_hsi2_bus_user",
CLK_CON_GAT_GOUT_BLK_HSI2_UID_SYSREG_HSI2_IPCLKPORT_PCLK,
- 21, 0, 0),
+ 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_HSI2_UASC_PCIE_GEN4A_DBI_1_ACLK,
"gout_hsi2_uasc_pcie_gen4a_dbi_1_aclk", "mout_hsi2_bus_user",
CLK_CON_GAT_GOUT_BLK_HSI2_UID_UASC_PCIE_GEN4A_DBI_1_IPCLKPORT_ACLK,
@@ -2842,7 +2842,7 @@ static const struct samsung_gate_clock hsi2_gate_clks[] __initconst = {
GATE(CLK_GOUT_HSI2_UFS_EMBD_I_ACLK,
"gout_hsi2_ufs_embd_i_aclk", "mout_hsi2_bus_user",
CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_ACLK,
- 21, 0, 0),
+ 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_HSI2_UFS_EMBD_I_CLK_UNIPRO,
"gout_hsi2_ufs_embd_i_clk_unipro", "mout_hsi2_ufs_embd_user",
CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_CLK_UNIPRO,
@@ -2850,7 +2850,7 @@ static const struct samsung_gate_clock hsi2_gate_clks[] __initconst = {
GATE(CLK_GOUT_HSI2_UFS_EMBD_I_FMP_CLK,
"gout_hsi2_ufs_embd_i_fmp_clk", "mout_hsi2_bus_user",
CLK_CON_GAT_GOUT_BLK_HSI2_UID_UFS_EMBD_IPCLKPORT_I_FMP_CLK,
- 21, 0, 0),
+ 21, CLK_IS_CRITICAL, 0),
/* TODO: should have a driver for this */
GATE(CLK_GOUT_HSI2_XIU_D_HSI2_ACLK,
"gout_hsi2_xiu_d_hsi2_aclk", "mout_hsi2_bus_user",
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index cca3e630922c..2e94bba6c396 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -430,7 +430,10 @@ static const struct clk_ops samsung_pll36xx_clk_min_ops = {
#define PLL0822X_LOCK_STAT_SHIFT (29)
#define PLL0822X_ENABLE_SHIFT (31)
-/* PLL1418x is similar to PLL0822x, except that MDIV is one bit smaller */
+/*
+ * PLL1418x, PLL0717x and PLL0718x are similar
+ * to PLL0822x, except that MDIV is one bit smaller
+ */
#define PLL1418X_MDIV_MASK (0x1FF)
static unsigned long samsung_pll0822x_recalc_rate(struct clk_hw *hw,
@@ -441,10 +444,14 @@ static unsigned long samsung_pll0822x_recalc_rate(struct clk_hw *hw,
u64 fvco = parent_rate;
pll_con3 = readl_relaxed(pll->con_reg);
- if (pll->type != pll_1418x)
+
+ if (pll->type != pll_1418x &&
+ pll->type != pll_0717x &&
+ pll->type != pll_0718x)
mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL0822X_MDIV_MASK;
else
mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL1418X_MDIV_MASK;
+
pdiv = (pll_con3 >> PLL0822X_PDIV_SHIFT) & PLL0822X_PDIV_MASK;
sdiv = (pll_con3 >> PLL0822X_SDIV_SHIFT) & PLL0822X_SDIV_MASK;
@@ -1370,11 +1377,16 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
break;
case pll_1417x:
case pll_1418x:
+ case pll_1051x:
+ case pll_1052x:
case pll_0818x:
case pll_0822x:
case pll_0516x:
case pll_0517x:
case pll_0518x:
+ case pll_0717x:
+ case pll_0718x:
+ case pll_0732x:
pll->enable_offs = PLL0822X_ENABLE_SHIFT;
pll->lock_offs = PLL0822X_LOCK_STAT_SHIFT;
if (!pll->rate_table)
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index 3481941ba07a..6ddc54d173a0 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -43,6 +43,11 @@ enum samsung_pll_type {
pll_0517x,
pll_0518x,
pll_531x,
+ pll_1051x,
+ pll_1052x,
+ pll_0717x,
+ pll_0718x,
+ pll_0732x,
};
#define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index d27a1f73f077..e2ec8fe32e39 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -3,7 +3,7 @@
* Copyright (c) 2013 Tomasz Figa <tomasz.figa at gmail.com>
*
* Common Clock Framework support for all S3C64xx SoCs.
-*/
+ */
#include <linux/slab.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/samsung/clk-s5pv210-audss.c b/drivers/clk/samsung/clk-s5pv210-audss.c
index b31c00ea331f..d19a3d9fd452 100644
--- a/drivers/clk/samsung/clk-s5pv210-audss.c
+++ b/drivers/clk/samsung/clk-s5pv210-audss.c
@@ -8,7 +8,7 @@
* Author: Padmavathi Venna <padma.v@samsung.com>
*
* Driver for Audio Subsystem Clock Controller of S5PV210-compatible SoCs.
-*/
+ */
#include <linux/io.h>
#include <linux/clk.h>
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index afa5760ed3a1..283c523763e6 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -6,7 +6,7 @@
*
* This file includes utility functions to register clocks to common
* clock framework for Samsung platforms.
-*/
+ */
#include <linux/slab.h>
#include <linux/clkdev.h>
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
index b028f25c658a..62eed964c3d0 100644
--- a/drivers/clk/socfpga/clk-pll-a10.c
+++ b/drivers/clk/socfpga/clk-pll-a10.c
@@ -35,7 +35,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
- unsigned long divf, divq, reg;
+ u32 divf, divq, reg;
unsigned long long vco_freq;
/* read VCO1 reg for numerator and denominator */
diff --git a/drivers/clk/sophgo/clk-sg2042-pll.c b/drivers/clk/sophgo/clk-sg2042-pll.c
index ff9deeef509b..1537f4f05860 100644
--- a/drivers/clk/sophgo/clk-sg2042-pll.c
+++ b/drivers/clk/sophgo/clk-sg2042-pll.c
@@ -153,7 +153,7 @@ static unsigned long sg2042_pll_recalc_rate(unsigned int reg_value,
sg2042_pll_ctrl_decode(reg_value, &ctrl_table);
- numerator = parent_rate * ctrl_table.fbdiv;
+ numerator = (u64)parent_rate * ctrl_table.fbdiv;
denominator = ctrl_table.refdiv * ctrl_table.postdiv1 * ctrl_table.postdiv2;
do_div(numerator, denominator);
return numerator;
diff --git a/drivers/clk/starfive/clk-starfive-jh7100-audio.c b/drivers/clk/starfive/clk-starfive-jh7100-audio.c
index 1fcf4e62f347..7de23f6749aa 100644
--- a/drivers/clk/starfive/clk-starfive-jh7100-audio.c
+++ b/drivers/clk/starfive/clk-starfive-jh7100-audio.c
@@ -84,17 +84,6 @@ static const struct jh71x0_clk_data jh7100_audclk_data[] = {
JH7100_AUDCLK_AUDIO_12288),
};
-static struct clk_hw *jh7100_audclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7100_AUDCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
static int jh7100_audclk_probe(struct platform_device *pdev)
{
struct jh71x0_clk_priv *priv;
@@ -106,6 +95,7 @@ static int jh7100_audclk_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7100_AUDCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -146,7 +136,7 @@ static int jh7100_audclk_probe(struct platform_device *pdev)
return ret;
}
- return devm_of_clk_add_hw_provider(priv->dev, jh7100_audclk_get, priv);
+ return devm_of_clk_add_hw_provider(priv->dev, jh71x0_clk_get, priv);
}
static const struct of_device_id jh7100_audclk_match[] = {
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-aon.c b/drivers/clk/starfive/clk-starfive-jh7110-aon.c
index 418efdad719b..6f67587f4335 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-aon.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-aon.c
@@ -54,17 +54,6 @@ static const struct jh71x0_clk_data jh7110_aonclk_data[] = {
JH71X0_GATE(JH7110_AONCLK_RTC_CAL, "rtc_cal", 0, JH7110_AONCLK_OSC),
};
-static struct clk_hw *jh7110_aonclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_AONCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
static int jh7110_aoncrg_probe(struct platform_device *pdev)
{
struct jh71x0_clk_priv *priv;
@@ -78,6 +67,7 @@ static int jh7110_aoncrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_AONCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -127,7 +117,7 @@ static int jh7110_aoncrg_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_aonclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
return ret;
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-isp.c b/drivers/clk/starfive/clk-starfive-jh7110-isp.c
index 8c4c3a958a9f..f3fa069db193 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-isp.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-isp.c
@@ -75,17 +75,6 @@ static inline int jh7110_isp_top_rst_init(struct jh71x0_clk_priv *priv)
return reset_control_deassert(top_rsts);
}
-static struct clk_hw *jh7110_ispclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_ISPCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
#ifdef CONFIG_PM
static int jh7110_ispcrg_suspend(struct device *dev)
{
@@ -126,6 +115,7 @@ static int jh7110_ispcrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_ISPCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -186,7 +176,7 @@ static int jh7110_ispcrg_probe(struct platform_device *pdev)
goto err_exit;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_ispclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
goto err_exit;
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-pll.c b/drivers/clk/starfive/clk-starfive-jh7110-pll.c
index 3598390e8fd0..56dc58a04f8a 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-pll.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-pll.c
@@ -453,7 +453,7 @@ static struct clk_hw *jh7110_pll_get(struct of_phandle_args *clkspec, void *data
return ERR_PTR(-EINVAL);
}
-static int jh7110_pll_probe(struct platform_device *pdev)
+static int __init jh7110_pll_probe(struct platform_device *pdev)
{
struct jh7110_pll_priv *priv;
unsigned int idx;
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-stg.c b/drivers/clk/starfive/clk-starfive-jh7110-stg.c
index dafcb7190592..2a5ad0e07d1d 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-stg.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-stg.c
@@ -75,17 +75,6 @@ static const struct jh71x0_clk_data jh7110_stgclk_data[] = {
JH71X0_GATE(JH7110_STGCLK_DMA1P_AHB, "dma1p_ahb", 0, JH7110_STGCLK_STG_AXIAHB),
};
-static struct clk_hw *jh7110_stgclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_STGCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
static int jh7110_stgcrg_probe(struct platform_device *pdev)
{
struct jh71x0_clk_priv *priv;
@@ -98,6 +87,7 @@ static int jh7110_stgcrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_STGCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -145,7 +135,7 @@ static int jh7110_stgcrg_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_stgclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
return ret;
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-sys.c b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
index 17325f17696f..e9d8168d02b8 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-sys.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
@@ -323,17 +323,6 @@ static const struct jh71x0_clk_data jh7110_sysclk_data[] __initconst = {
JH7110_SYSCLK_OSC),
};
-static struct clk_hw *jh7110_sysclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_SYSCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
static void jh7110_reset_unregister_adev(void *_adev)
{
struct auxiliary_device *adev = _adev;
@@ -425,6 +414,7 @@ static int __init jh7110_syscrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_SYSCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -526,7 +516,7 @@ static int __init jh7110_syscrg_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_sysclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
return ret;
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-vout.c b/drivers/clk/starfive/clk-starfive-jh7110-vout.c
index 04eeed199087..bad20d5d794a 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-vout.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-vout.c
@@ -80,17 +80,6 @@ static int jh7110_vout_top_rst_init(struct jh71x0_clk_priv *priv)
return reset_control_deassert(top_rst);
}
-static struct clk_hw *jh7110_voutclk_get(struct of_phandle_args *clkspec, void *data)
-{
- struct jh71x0_clk_priv *priv = data;
- unsigned int idx = clkspec->args[0];
-
- if (idx < JH7110_VOUTCLK_END)
- return &priv->reg[idx].hw;
-
- return ERR_PTR(-EINVAL);
-}
-
#ifdef CONFIG_PM
static int jh7110_voutcrg_suspend(struct device *dev)
{
@@ -131,6 +120,7 @@ static int jh7110_voutcrg_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->rmw_lock);
+ priv->num_reg = JH7110_VOUTCLK_END;
priv->dev = &pdev->dev;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -193,7 +183,7 @@ static int jh7110_voutcrg_probe(struct platform_device *pdev)
goto err_exit;
}
- ret = devm_of_clk_add_hw_provider(&pdev->dev, jh7110_voutclk_get, priv);
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, jh71x0_clk_get, priv);
if (ret)
goto err_exit;
diff --git a/drivers/clk/starfive/clk-starfive-jh71x0.c b/drivers/clk/starfive/clk-starfive-jh71x0.c
index aebc99264a0b..80e9157347eb 100644
--- a/drivers/clk/starfive/clk-starfive-jh71x0.c
+++ b/drivers/clk/starfive/clk-starfive-jh71x0.c
@@ -325,3 +325,15 @@ const struct clk_ops *starfive_jh71x0_clk_ops(u32 max)
return &jh71x0_clk_inv_ops;
}
EXPORT_SYMBOL_GPL(starfive_jh71x0_clk_ops);
+
+struct clk_hw *jh71x0_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct jh71x0_clk_priv *priv = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx < priv->num_reg)
+ return &priv->reg[idx].hw;
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(jh71x0_clk_get);
diff --git a/drivers/clk/starfive/clk-starfive-jh71x0.h b/drivers/clk/starfive/clk-starfive-jh71x0.h
index e3f441393e48..9d5dec1d5cd1 100644
--- a/drivers/clk/starfive/clk-starfive-jh71x0.h
+++ b/drivers/clk/starfive/clk-starfive-jh71x0.h
@@ -117,9 +117,11 @@ struct jh71x0_clk_priv {
struct clk *original_clk;
struct notifier_block pll_clk_nb;
struct clk_hw *pll[3];
- struct jh71x0_clk reg[];
+ unsigned int num_reg;
+ struct jh71x0_clk reg[] __counted_by(num_reg);
};
const struct clk_ops *starfive_jh71x0_clk_ops(u32 max);
+struct clk_hw *jh71x0_clk_get(struct of_phandle_args *clkspec, void *data);
#endif
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c
index de36e21d3eaf..44b2ebdebdac 100644
--- a/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c
@@ -91,7 +91,7 @@ static struct clk_hw_onecell_data sun20i_d1_r_hw_clks = {
},
};
-static struct ccu_reset_map sun20i_d1_r_ccu_resets[] = {
+static const struct ccu_reset_map sun20i_d1_r_ccu_resets[] = {
[RST_BUS_R_TIMER] = { 0x11c, BIT(16) },
[RST_BUS_R_TWD] = { 0x12c, BIT(16) },
[RST_BUS_R_PPU] = { 0x1ac, BIT(16) },
@@ -137,6 +137,6 @@ static struct platform_driver sun20i_d1_r_ccu_driver = {
};
module_platform_driver(sun20i_d1_r_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner D1/R528/T113 PRCM CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
index 9b5cfac2ee70..bb66c906ebbb 100644
--- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
@@ -1232,7 +1232,7 @@ static struct clk_hw_onecell_data sun20i_d1_hw_clks = {
},
};
-static struct ccu_reset_map sun20i_d1_ccu_resets[] = {
+static const struct ccu_reset_map sun20i_d1_ccu_resets[] = {
[RST_MBUS] = { 0x540, BIT(30) },
[RST_BUS_DE] = { 0x60c, BIT(16) },
[RST_BUS_DI] = { 0x62c, BIT(16) },
@@ -1371,7 +1371,7 @@ static int sun20i_d1_ccu_probe(struct platform_device *pdev)
/* Enforce m1 = 0, m0 = 0 for PLL_AUDIO0 */
val = readl(reg + SUN20I_D1_PLL_AUDIO0_REG);
- val &= ~BIT(1) | BIT(0);
+ val &= ~(BIT(1) | BIT(0));
writel(val, reg + SUN20I_D1_PLL_AUDIO0_REG);
/* Force fanout-27M factor N to 0. */
@@ -1406,6 +1406,6 @@ static struct platform_driver sun20i_d1_ccu_driver = {
};
module_platform_driver(sun20i_d1_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner D1/R528/T113 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
index d1a1683baff4..409feb085021 100644
--- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
@@ -1382,7 +1382,7 @@ static struct clk_hw_onecell_data sun7i_a20_hw_clks = {
.num = CLK_NUMBER_SUN7I,
};
-static struct ccu_reset_map sunxi_a10_a20_ccu_resets[] = {
+static const struct ccu_reset_map sunxi_a10_a20_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_PHY2] = { 0x0cc, BIT(2) },
@@ -1493,6 +1493,6 @@ static struct platform_driver sun4i_a10_ccu_driver = {
};
module_platform_driver(sun4i_a10_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A10/A20 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
index 2c791761a646..cb0f8d110c32 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
@@ -166,7 +166,7 @@ static struct clk_hw_onecell_data sun50i_a100_r_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun50i_a100_r_ccu_resets[] = {
+static const struct ccu_reset_map sun50i_a100_r_ccu_resets[] = {
[RST_R_APB1_TIMER] = { 0x11c, BIT(16) },
[RST_R_APB1_BUS_PWM] = { 0x13c, BIT(16) },
[RST_R_APB1_PPU] = { 0x17c, BIT(16) },
@@ -214,6 +214,6 @@ static struct platform_driver sun50i_a100_r_ccu_driver = {
};
module_platform_driver(sun50i_a100_r_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A100 PRCM CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
index bbaa82978716..1f81c7ac41af 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
@@ -436,7 +436,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", mmc_parents, 0x830,
24, 2, /* mux */
BIT(31), /* gate */
2, /* post-div */
- CLK_SET_RATE_NO_REPARENT);
+ 0);
static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834,
0, 4, /* M */
@@ -444,7 +444,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834,
24, 2, /* mux */
BIT(31), /* gate */
2, /* post-div */
- CLK_SET_RATE_NO_REPARENT);
+ 0);
static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838,
0, 4, /* M */
@@ -452,7 +452,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838,
24, 2, /* mux */
BIT(31), /* gate */
2, /* post-div */
- CLK_SET_RATE_NO_REPARENT);
+ 0);
static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb3", 0x84c, BIT(0), 0);
static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb3", 0x84c, BIT(1), 0);
@@ -1061,7 +1061,7 @@ static struct clk_hw_onecell_data sun50i_a100_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun50i_a100_ccu_resets[] = {
+static const struct ccu_reset_map sun50i_a100_ccu_resets[] = {
[RST_MBUS] = { 0x540, BIT(30) },
[RST_BUS_DE] = { 0x60c, BIT(16) },
@@ -1276,6 +1276,6 @@ static struct platform_driver sun50i_a100_ccu_driver = {
};
module_platform_driver(sun50i_a100_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A100 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index c255dba2c96d..ba1ad267f123 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -535,11 +535,11 @@ static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
CLK_SET_RATE_PARENT);
/*
- * DSI output seems to work only when PLL_MIPI selected. Set it and prevent
- * the mux from reparenting.
+ * Experiments showed that RGB output requires pll-video0-2x, while DSI
+ * requires pll-mipi. It will not work with incorrect clock, the screen will
+ * be blank.
+ * sun50i-a64.dtsi assigns pll-mipi as TCON0 parent by default
*/
-#define SUN50I_A64_TCON0_CLK_REG 0x118
-
static const char * const tcon0_parents[] = { "pll-mipi", "pll-video0-2x" };
static const u8 tcon0_table[] = { 0, 2, };
static SUNXI_CCU_MUX_TABLE_WITH_GATE_CLOSEST(tcon0_clk, "tcon0", tcon0_parents,
@@ -858,7 +858,7 @@ static struct clk_hw_onecell_data sun50i_a64_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun50i_a64_ccu_resets[] = {
+static const struct ccu_reset_map sun50i_a64_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_HSIC] = { 0x0cc, BIT(2) },
@@ -959,11 +959,6 @@ static int sun50i_a64_ccu_probe(struct platform_device *pdev)
writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG);
- /* Set PLL MIPI as parent for TCON0 */
- val = readl(reg + SUN50I_A64_TCON0_CLK_REG);
- val &= ~GENMASK(26, 24);
- writel(val | (0 << 24), reg + SUN50I_A64_TCON0_CLK_REG);
-
ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_a64_ccu_desc);
if (ret)
return ret;
@@ -994,6 +989,6 @@ static struct platform_driver sun50i_a64_ccu_driver = {
};
module_platform_driver(sun50i_a64_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A64 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
index a8c11c0b4e06..dfba88a5ad0f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h
@@ -21,7 +21,6 @@
/* PLL_VIDEO0 exported for HDMI PHY */
-#define CLK_PLL_VIDEO0_2X 8
#define CLK_PLL_VE 9
#define CLK_PLL_DDR0 10
@@ -32,7 +31,6 @@
#define CLK_PLL_PERIPH1_2X 14
#define CLK_PLL_VIDEO1 15
#define CLK_PLL_GPU 16
-#define CLK_PLL_MIPI 17
#define CLK_PLL_HSIC 18
#define CLK_PLL_DE 19
#define CLK_PLL_DDR1 20
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
index c72815841111..acb4e8b9b1ba 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -179,7 +179,7 @@ static struct clk_hw_onecell_data sun50i_h616_r_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun50i_h6_r_ccu_resets[] = {
+static const struct ccu_reset_map sun50i_h6_r_ccu_resets[] = {
[RST_R_APB1_TIMER] = { 0x11c, BIT(16) },
[RST_R_APB1_TWD] = { 0x12c, BIT(16) },
[RST_R_APB1_PWM] = { 0x13c, BIT(16) },
@@ -190,7 +190,7 @@ static struct ccu_reset_map sun50i_h6_r_ccu_resets[] = {
[RST_R_APB1_W1] = { 0x1ec, BIT(16) },
};
-static struct ccu_reset_map sun50i_h616_r_ccu_resets[] = {
+static const struct ccu_reset_map sun50i_h616_r_ccu_resets[] = {
[RST_R_APB1_TWD] = { 0x12c, BIT(16) },
[RST_R_APB2_I2C] = { 0x19c, BIT(16) },
[RST_R_APB2_RSB] = { 0x1bc, BIT(16) },
@@ -256,6 +256,6 @@ static struct platform_driver sun50i_h6_r_ccu_driver = {
};
module_platform_driver(sun50i_h6_r_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner H6 and H616 PRCM CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index a20b621ad8f1..7fccda96d444 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -1076,7 +1076,7 @@ static struct clk_hw_onecell_data sun50i_h6_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun50i_h6_ccu_resets[] = {
+static const struct ccu_reset_map sun50i_h6_ccu_resets[] = {
[RST_MBUS] = { 0x540, BIT(30) },
[RST_BUS_DE] = { 0x60c, BIT(16) },
@@ -1286,6 +1286,6 @@ static struct platform_driver sun50i_h6_ccu_driver = {
};
module_platform_driver(sun50i_h6_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner H6 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
index 84e406ddf9d1..190816c35da9 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
@@ -216,19 +216,29 @@ static struct ccu_nkmp pll_de_clk = {
};
/*
- * TODO: Determine SDM settings for the audio PLL. The manual suggests
- * PLL_FACTOR_N=16, PLL_POST_DIV_P=2, OUTPUT_DIV=2, pattern=0xe000c49b
- * for 24.576 MHz, and PLL_FACTOR_N=22, PLL_POST_DIV_P=3, OUTPUT_DIV=2,
- * pattern=0xe001288c for 22.5792 MHz.
- * This clashes with our fixed PLL_POST_DIV_P.
+ * Sigma-delta modulation settings table obtained from the vendor SDK driver.
+ * There are additional M0 and M1 divider bits not modelled here, so forced to
+ * fixed values in the probe routine. Sigma-delta modulation allows providing a
+ * fractional-N divider in the PLL, to help reaching those specific
+ * frequencies with less error.
*/
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+ { .rate = 90316800, .pattern = 0xc001288d, .m = 3, .n = 22 },
+ { .rate = 98304000, .pattern = 0xc001eb85, .m = 5, .n = 40 },
+};
+
#define SUN50I_H616_PLL_AUDIO_REG 0x078
static struct ccu_nm pll_audio_hs_clk = {
.enable = BIT(31),
.lock = BIT(28),
.n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
- .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .m = _SUNXI_CCU_DIV(16, 6),
+ .sdm = _SUNXI_CCU_SDM(pll_audio_sdm_table,
+ BIT(24), 0x178, BIT(31)),
+ .fixed_post_div = 2,
.common = {
+ .features = CCU_FEATURE_FIXED_POSTDIV |
+ CCU_FEATURE_SIGMA_DELTA_MOD,
.reg = 0x078,
.hw.init = CLK_HW_INIT("pll-audio-hs", "osc24M",
&ccu_nm_ops,
@@ -685,18 +695,20 @@ static const struct clk_hw *clk_parent_pll_audio[] = {
};
/*
- * The divider of pll-audio is fixed to 24 for now, so 24576000 and 22579200
- * rates can be set exactly in conjunction with sigma-delta modulation.
+ * The PLL_AUDIO_4X clock defaults to 24.5714 MHz according to the manual, with
+ * a final divider of 1. The 2X and 1X clocks use 2 and 4 respectively. The 1x
+ * clock is set to either 24576000 or 22579200 for 48Khz and 44.1Khz (and
+ * multiples).
*/
static CLK_FIXED_FACTOR_HWS(pll_audio_1x_clk, "pll-audio-1x",
clk_parent_pll_audio,
- 96, 1, CLK_SET_RATE_PARENT);
+ 4, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x",
clk_parent_pll_audio,
- 48, 1, CLK_SET_RATE_PARENT);
+ 2, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR_HWS(pll_audio_4x_clk, "pll-audio-4x",
clk_parent_pll_audio,
- 24, 1, CLK_SET_RATE_PARENT);
+ 1, 1, CLK_SET_RATE_PARENT);
static const struct clk_hw *pll_periph0_parents[] = {
&pll_periph0_clk.common.hw
@@ -990,7 +1002,7 @@ static struct clk_hw_onecell_data sun50i_h616_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun50i_h616_ccu_resets[] = {
+static const struct ccu_reset_map sun50i_h616_ccu_resets[] = {
[RST_MBUS] = { 0x540, BIT(30) },
[RST_BUS_DE] = { 0x60c, BIT(16) },
@@ -1095,11 +1107,24 @@ static const u32 usb2_clk_regs[] = {
SUN50I_H616_USB3_CLK_REG,
};
+static struct ccu_mux_nb sun50i_h616_cpu_nb = {
+ .common = &cpux_clk.common,
+ .cm = &cpux_clk.mux,
+ .delay_us = 1, /* manual doesn't really say */
+ .bypass_index = 4, /* PLL_PERI0@600MHz, as recommended by manual */
+};
+
+static struct ccu_pll_nb sun50i_h616_pll_cpu_nb = {
+ .common = &pll_cpux_clk.common,
+ .enable = BIT(29), /* LOCK_ENABLE */
+ .lock = BIT(28),
+};
+
static int sun50i_h616_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
u32 val;
- int i;
+ int ret, i;
reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
@@ -1136,12 +1161,14 @@ static int sun50i_h616_ccu_probe(struct platform_device *pdev)
}
/*
- * Force the post-divider of pll-audio to 12 and the output divider
- * of it to 2, so 24576000 and 22579200 rates can be set exactly.
+ * Set the output-divider for the pll-audio clocks (M0) to 2 and the
+ * input divider (M1) to 1 as recommended by the manual when using
+ * SDM.
*/
val = readl(reg + SUN50I_H616_PLL_AUDIO_REG);
- val &= ~(GENMASK(21, 16) | BIT(0));
- writel(val | (11 << 16) | BIT(0), reg + SUN50I_H616_PLL_AUDIO_REG);
+ val &= ~BIT(1);
+ val |= BIT(0);
+ writel(val, reg + SUN50I_H616_PLL_AUDIO_REG);
/*
* First clock parent (osc32K) is unusable for CEC. But since there
@@ -1152,7 +1179,18 @@ static int sun50i_h616_ccu_probe(struct platform_device *pdev)
val |= BIT(24);
writel(val, reg + SUN50I_H616_HDMI_CEC_CLK_REG);
- return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h616_ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h616_ccu_desc);
+ if (ret)
+ return ret;
+
+ /* Reparent CPU during CPU PLL rate changes */
+ ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
+ &sun50i_h616_cpu_nb);
+
+ /* Re-lock the CPU PLL after any rate changes */
+ ccu_pll_notifier_register(&sun50i_h616_pll_cpu_nb);
+
+ return 0;
}
static const struct of_device_id sun50i_h616_ccu_ids[] = {
@@ -1171,6 +1209,6 @@ static struct platform_driver sun50i_h616_ccu_driver = {
};
module_platform_driver(sun50i_h616_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner H616 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
index 1f4bc0e773a7..c9bf1fdb8a8a 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
@@ -731,7 +731,7 @@ static struct clk_hw_onecell_data sun5i_a10s_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun5i_a10s_ccu_resets[] = {
+static const struct ccu_reset_map sun5i_a10s_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index e8b8d2dd7f2c..bab65cfe9501 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -1146,7 +1146,7 @@ static struct clk_hw_onecell_data sun6i_a31_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun6i_a31_ccu_resets[] = {
+static const struct ccu_reset_map sun6i_a31_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_PHY2] = { 0x0cc, BIT(2) },
@@ -1283,6 +1283,6 @@ static struct platform_driver sun6i_a31_ccu_driver = {
};
module_platform_driver(sun6i_a31_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A31/A31s CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
index 87e23d16ed0f..0536e880b80f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
@@ -356,7 +356,7 @@ int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg)
const char *fw_name;
/* ext-osc32k was the only input clock in the old binding. */
- fw_name = of_property_read_bool(dev->of_node, "clock-names")
+ fw_name = of_property_present(dev->of_node, "clock-names")
? "ext-osc32k" : NULL;
ext_osc32k_clk = devm_clk_get_optional(dev, fw_name);
if (IS_ERR(ext_osc32k_clk))
@@ -381,6 +381,6 @@ int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg)
return devm_sunxi_ccu_probe(dev, reg, &sun6i_rtc_ccu_desc);
}
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner H616/R329 RTC CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index 6c2a08f722a8..78cf3818ab09 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -668,7 +668,7 @@ static struct clk_hw_onecell_data sun8i_a23_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun8i_a23_ccu_resets[] = {
+static const struct ccu_reset_map sun8i_a23_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_HSIC] = { 0x0cc, BIT(2) },
@@ -763,6 +763,6 @@ static struct platform_driver sun8i_a23_ccu_driver = {
};
module_platform_driver(sun8i_a23_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A23 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index 5e0bc08a9ce3..b039d419512c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -712,7 +712,7 @@ static struct clk_hw_onecell_data sun8i_a33_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun8i_a33_ccu_resets[] = {
+static const struct ccu_reset_map sun8i_a33_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_HSIC] = { 0x0cc, BIT(2) },
@@ -835,6 +835,6 @@ static struct platform_driver sun8i_a33_ccu_driver = {
};
module_platform_driver(sun8i_a33_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A33 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
index cb4c6b16c467..60e918965a72 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
@@ -797,7 +797,7 @@ static struct clk_hw_onecell_data sun8i_a83t_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun8i_a83t_ccu_resets[] = {
+static const struct ccu_reset_map sun8i_a83t_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_HSIC] = { 0x0cc, BIT(2) },
@@ -923,6 +923,6 @@ static struct platform_driver sun8i_a83t_ccu_driver = {
};
module_platform_driver(sun8i_a83t_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A83T CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index 7683ea08d8e3..f2aa71206bc2 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -146,7 +146,7 @@ static struct clk_hw_onecell_data sun50i_a64_de2_hw_clks = {
.num = CLK_NUMBER_WITH_ROT,
};
-static struct ccu_reset_map sun8i_a83t_de2_resets[] = {
+static const struct ccu_reset_map sun8i_a83t_de2_resets[] = {
[RST_MIXER0] = { 0x08, BIT(0) },
/*
* Mixer1 reset line is shared with wb, so only RST_WB is
@@ -156,7 +156,7 @@ static struct ccu_reset_map sun8i_a83t_de2_resets[] = {
[RST_ROT] = { 0x08, BIT(3) },
};
-static struct ccu_reset_map sun8i_h3_de2_resets[] = {
+static const struct ccu_reset_map sun8i_h3_de2_resets[] = {
[RST_MIXER0] = { 0x08, BIT(0) },
/*
* Mixer1 reset line is shared with wb, so only RST_WB is
@@ -166,14 +166,14 @@ static struct ccu_reset_map sun8i_h3_de2_resets[] = {
[RST_WB] = { 0x08, BIT(2) },
};
-static struct ccu_reset_map sun50i_a64_de2_resets[] = {
+static const struct ccu_reset_map sun50i_a64_de2_resets[] = {
[RST_MIXER0] = { 0x08, BIT(0) },
[RST_MIXER1] = { 0x08, BIT(1) },
[RST_WB] = { 0x08, BIT(2) },
[RST_ROT] = { 0x08, BIT(3) },
};
-static struct ccu_reset_map sun50i_h5_de2_resets[] = {
+static const struct ccu_reset_map sun50i_h5_de2_resets[] = {
[RST_MIXER0] = { 0x08, BIT(0) },
[RST_MIXER1] = { 0x08, BIT(1) },
[RST_WB] = { 0x08, BIT(2) },
@@ -348,6 +348,6 @@ static struct platform_driver sunxi_de2_clk_driver = {
};
module_platform_driver(sunxi_de2_clk_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner SoCs DE2 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 13e57db2f8d5..740c4c97331c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -876,7 +876,7 @@ static struct clk_hw_onecell_data sun50i_h5_hw_clks = {
.num = CLK_NUMBER_H5,
};
-static struct ccu_reset_map sun8i_h3_ccu_resets[] = {
+static const struct ccu_reset_map sun8i_h3_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_PHY2] = { 0x0cc, BIT(2) },
@@ -939,7 +939,7 @@ static struct ccu_reset_map sun8i_h3_ccu_resets[] = {
[RST_BUS_SCR0] = { 0x2d8, BIT(20) },
};
-static struct ccu_reset_map sun50i_h5_ccu_resets[] = {
+static const struct ccu_reset_map sun50i_h5_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_PHY2] = { 0x0cc, BIT(2) },
@@ -1094,6 +1094,6 @@ static struct platform_driver sun8i_h3_ccu_driver = {
};
module_platform_driver(sun8i_h3_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner H3 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
index da6569334d68..0e324344673b 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
@@ -178,7 +178,7 @@ static struct clk_hw_onecell_data sun50i_a64_r_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun8i_a83t_r_ccu_resets[] = {
+static const struct ccu_reset_map sun8i_a83t_r_ccu_resets[] = {
[RST_APB0_IR] = { 0xb0, BIT(1) },
[RST_APB0_TIMER] = { 0xb0, BIT(2) },
[RST_APB0_RSB] = { 0xb0, BIT(3) },
@@ -186,14 +186,14 @@ static struct ccu_reset_map sun8i_a83t_r_ccu_resets[] = {
[RST_APB0_I2C] = { 0xb0, BIT(6) },
};
-static struct ccu_reset_map sun8i_h3_r_ccu_resets[] = {
+static const struct ccu_reset_map sun8i_h3_r_ccu_resets[] = {
[RST_APB0_IR] = { 0xb0, BIT(1) },
[RST_APB0_TIMER] = { 0xb0, BIT(2) },
[RST_APB0_UART] = { 0xb0, BIT(4) },
[RST_APB0_I2C] = { 0xb0, BIT(6) },
};
-static struct ccu_reset_map sun50i_a64_r_ccu_resets[] = {
+static const struct ccu_reset_map sun50i_a64_r_ccu_resets[] = {
[RST_APB0_IR] = { 0xb0, BIT(1) },
[RST_APB0_TIMER] = { 0xb0, BIT(2) },
[RST_APB0_RSB] = { 0xb0, BIT(3) },
@@ -274,6 +274,6 @@ static struct platform_driver sun8i_r_ccu_driver = {
};
module_platform_driver(sun8i_r_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for Allwinner SoCs' PRCM CCUs");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 2f51ceab8016..8b729c9b3545 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -1162,7 +1162,7 @@ static struct clk_hw_onecell_data sun8i_r40_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun8i_r40_ccu_resets[] = {
+static const struct ccu_reset_map sun8i_r40_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_USB_PHY1] = { 0x0cc, BIT(1) },
[RST_USB_PHY2] = { 0x0cc, BIT(2) },
@@ -1375,6 +1375,6 @@ static struct platform_driver sun8i_r40_ccu_driver = {
};
module_platform_driver(sun8i_r40_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner R40 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index d24c0d8dfee4..579a81bb46df 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -644,7 +644,7 @@ static struct clk_hw_onecell_data sun8i_v3_hw_clks = {
.num = CLK_I2S0 + 1,
};
-static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
+static const struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_MBUS] = { 0x0fc, BIT(31) },
@@ -679,7 +679,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
[RST_BUS_UART2] = { 0x2d8, BIT(18) },
};
-static struct ccu_reset_map sun8i_v3_ccu_resets[] = {
+static const struct ccu_reset_map sun8i_v3_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_MBUS] = { 0x0fc, BIT(31) },
@@ -780,6 +780,6 @@ static struct platform_driver sun8i_v3s_ccu_driver = {
};
module_platform_driver(sun8i_v3s_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner V3s CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
index 0975ac58949f..91e5dc448bc0 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
@@ -177,7 +177,7 @@ static struct clk_hw_onecell_data sun9i_a80_de_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun9i_a80_de_resets[] = {
+static const struct ccu_reset_map sun9i_a80_de_resets[] = {
[RST_FE0] = { 0x0c, BIT(0) },
[RST_FE1] = { 0x0c, BIT(1) },
[RST_FE2] = { 0x0c, BIT(2) },
@@ -266,6 +266,6 @@ static struct platform_driver sun9i_a80_de_clk_driver = {
};
module_platform_driver(sun9i_a80_de_clk_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A80 Display Engine CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
index e5527c8cc64f..62063f525616 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
@@ -68,7 +68,7 @@ static struct clk_hw_onecell_data sun9i_a80_usb_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun9i_a80_usb_resets[] = {
+static const struct ccu_reset_map sun9i_a80_usb_resets[] = {
[RST_USB0_HCI] = { 0x0, BIT(17) },
[RST_USB1_HCI] = { 0x0, BIT(18) },
[RST_USB2_HCI] = { 0x0, BIT(19) },
@@ -138,6 +138,6 @@ static struct platform_driver sun9i_a80_usb_clk_driver = {
};
module_platform_driver(sun9i_a80_usb_clk_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A80 USB CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
index 756dd8fca6b0..337751998005 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
@@ -1108,7 +1108,7 @@ static struct clk_hw_onecell_data sun9i_a80_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map sun9i_a80_ccu_resets[] = {
+static const struct ccu_reset_map sun9i_a80_ccu_resets[] = {
/* AHB0 reset controls */
[RST_BUS_FD] = { 0x5a0, BIT(0) },
[RST_BUS_VE] = { 0x5a0, BIT(1) },
@@ -1248,6 +1248,6 @@ static struct platform_driver sun9i_a80_ccu_driver = {
};
module_platform_driver(sun9i_a80_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner A80 CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
index 52f1a04269f8..35935423145e 100644
--- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
+++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
@@ -477,7 +477,7 @@ static struct clk_hw_onecell_data suniv_hw_clks = {
.num = CLK_NUMBER,
};
-static struct ccu_reset_map suniv_ccu_resets[] = {
+static const struct ccu_reset_map suniv_ccu_resets[] = {
[RST_USB_PHY0] = { 0x0cc, BIT(0) },
[RST_BUS_DMA] = { 0x2c0, BIT(6) },
@@ -577,6 +577,6 @@ static struct platform_driver suniv_f1c100s_ccu_driver = {
};
module_platform_driver(suniv_f1c100s_ccu_driver);
-MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_IMPORT_NS("SUNXI_CCU");
MODULE_DESCRIPTION("Support for the Allwinner newer F1C100s CCU");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
index 4117b0bea267..88ed89658d45 100644
--- a/drivers/clk/sunxi-ng/ccu_common.c
+++ b/drivers/clk/sunxi-ng/ccu_common.c
@@ -37,7 +37,7 @@ void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000));
}
-EXPORT_SYMBOL_NS_GPL(ccu_helper_wait_for_lock, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_helper_wait_for_lock, "SUNXI_CCU");
bool ccu_is_better_rate(struct ccu_common *common,
unsigned long target_rate,
@@ -59,7 +59,7 @@ bool ccu_is_better_rate(struct ccu_common *common,
return current_rate <= target_rate && current_rate > best_rate;
}
-EXPORT_SYMBOL_NS_GPL(ccu_is_better_rate, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_is_better_rate, "SUNXI_CCU");
/*
* This clock notifier is called when the frequency of a PLL clock is
@@ -107,7 +107,7 @@ int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb)
return clk_notifier_register(pll_nb->common->hw.clk,
&pll_nb->clk_nb);
}
-EXPORT_SYMBOL_NS_GPL(ccu_pll_notifier_register, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_pll_notifier_register, "SUNXI_CCU");
static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
struct device_node *node, void __iomem *reg,
@@ -234,7 +234,7 @@ int devm_sunxi_ccu_probe(struct device *dev, void __iomem *reg,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(devm_sunxi_ccu_probe, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(devm_sunxi_ccu_probe, "SUNXI_CCU");
void of_sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
const struct sunxi_ccu_desc *desc)
diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h
index 329734f8cf42..dd330426a6e5 100644
--- a/drivers/clk/sunxi-ng/ccu_common.h
+++ b/drivers/clk/sunxi-ng/ccu_common.h
@@ -50,7 +50,7 @@ struct sunxi_ccu_desc {
struct clk_hw_onecell_data *hw_clks;
- struct ccu_reset_map *resets;
+ const struct ccu_reset_map *resets;
unsigned long num_resets;
};
diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c
index cb10a3ea23f9..7f4691f09e01 100644
--- a/drivers/clk/sunxi-ng/ccu_div.c
+++ b/drivers/clk/sunxi-ng/ccu_div.c
@@ -141,4 +141,4 @@ const struct clk_ops ccu_div_ops = {
.recalc_rate = ccu_div_recalc_rate,
.set_rate = ccu_div_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_div_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_div_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_frac.c b/drivers/clk/sunxi-ng/ccu_frac.c
index b31f3ad946d6..75323912608a 100644
--- a/drivers/clk/sunxi-ng/ccu_frac.c
+++ b/drivers/clk/sunxi-ng/ccu_frac.c
@@ -18,7 +18,7 @@ bool ccu_frac_helper_is_enabled(struct ccu_common *common,
return !(readl(common->base + common->reg) & cf->enable);
}
-EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_is_enabled, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_is_enabled, "SUNXI_CCU");
void ccu_frac_helper_enable(struct ccu_common *common,
struct ccu_frac_internal *cf)
@@ -34,7 +34,7 @@ void ccu_frac_helper_enable(struct ccu_common *common,
writel(reg & ~cf->enable, common->base + common->reg);
spin_unlock_irqrestore(common->lock, flags);
}
-EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_enable, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_enable, "SUNXI_CCU");
void ccu_frac_helper_disable(struct ccu_common *common,
struct ccu_frac_internal *cf)
@@ -50,7 +50,7 @@ void ccu_frac_helper_disable(struct ccu_common *common,
writel(reg | cf->enable, common->base + common->reg);
spin_unlock_irqrestore(common->lock, flags);
}
-EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_disable, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_disable, "SUNXI_CCU");
bool ccu_frac_helper_has_rate(struct ccu_common *common,
struct ccu_frac_internal *cf,
@@ -61,7 +61,7 @@ bool ccu_frac_helper_has_rate(struct ccu_common *common,
return (cf->rates[0] == rate) || (cf->rates[1] == rate);
}
-EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_has_rate, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_has_rate, "SUNXI_CCU");
unsigned long ccu_frac_helper_read_rate(struct ccu_common *common,
struct ccu_frac_internal *cf)
@@ -83,7 +83,7 @@ unsigned long ccu_frac_helper_read_rate(struct ccu_common *common,
return (reg & cf->select) ? cf->rates[1] : cf->rates[0];
}
-EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_read_rate, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_read_rate, "SUNXI_CCU");
int ccu_frac_helper_set_rate(struct ccu_common *common,
struct ccu_frac_internal *cf,
@@ -112,4 +112,4 @@ int ccu_frac_helper_set_rate(struct ccu_common *common,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_set_rate, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_set_rate, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_gate.c b/drivers/clk/sunxi-ng/ccu_gate.c
index a2115a21807d..ac52fd6bff67 100644
--- a/drivers/clk/sunxi-ng/ccu_gate.c
+++ b/drivers/clk/sunxi-ng/ccu_gate.c
@@ -24,7 +24,7 @@ void ccu_gate_helper_disable(struct ccu_common *common, u32 gate)
spin_unlock_irqrestore(common->lock, flags);
}
-EXPORT_SYMBOL_NS_GPL(ccu_gate_helper_disable, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_gate_helper_disable, "SUNXI_CCU");
static void ccu_gate_disable(struct clk_hw *hw)
{
@@ -50,7 +50,7 @@ int ccu_gate_helper_enable(struct ccu_common *common, u32 gate)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(ccu_gate_helper_enable, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_gate_helper_enable, "SUNXI_CCU");
static int ccu_gate_enable(struct clk_hw *hw)
{
@@ -66,7 +66,7 @@ int ccu_gate_helper_is_enabled(struct ccu_common *common, u32 gate)
return readl(common->base + common->reg) & gate;
}
-EXPORT_SYMBOL_NS_GPL(ccu_gate_helper_is_enabled, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_gate_helper_is_enabled, "SUNXI_CCU");
static int ccu_gate_is_enabled(struct clk_hw *hw)
{
@@ -127,4 +127,4 @@ const struct clk_ops ccu_gate_ops = {
.set_rate = ccu_gate_set_rate,
.recalc_rate = ccu_gate_recalc_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_gate_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_gate_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index cc94a694cb67..2bb8987ddcc2 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -246,7 +246,7 @@ const struct clk_ops ccu_mp_ops = {
.recalc_rate = ccu_mp_recalc_rate,
.set_rate = ccu_mp_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_mp_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_mp_ops, "SUNXI_CCU");
/*
* Support for MMC timing mode switching
@@ -327,4 +327,4 @@ const struct clk_ops ccu_mp_mmc_ops = {
.recalc_rate = ccu_mp_mmc_recalc_rate,
.set_rate = ccu_mp_mmc_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_mp_mmc_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_mp_mmc_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_mult.c b/drivers/clk/sunxi-ng/ccu_mult.c
index 7bee217ef111..8d5720f3dec1 100644
--- a/drivers/clk/sunxi-ng/ccu_mult.c
+++ b/drivers/clk/sunxi-ng/ccu_mult.c
@@ -170,4 +170,4 @@ const struct clk_ops ccu_mult_ops = {
.recalc_rate = ccu_mult_recalc_rate,
.set_rate = ccu_mult_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_mult_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_mult_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_mux.c b/drivers/clk/sunxi-ng/ccu_mux.c
index 5edc63b46651..d7ffbdeee9e0 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.c
+++ b/drivers/clk/sunxi-ng/ccu_mux.c
@@ -66,7 +66,7 @@ unsigned long ccu_mux_helper_apply_prediv(struct ccu_common *common,
{
return parent_rate / ccu_mux_get_prediv(common, cm, parent_index);
}
-EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_apply_prediv, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_apply_prediv, "SUNXI_CCU");
static unsigned long ccu_mux_helper_unapply_prediv(struct ccu_common *common,
struct ccu_mux_internal *cm,
@@ -155,7 +155,7 @@ out:
req->rate = best_rate;
return 0;
}
-EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_determine_rate, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_determine_rate, "SUNXI_CCU");
u8 ccu_mux_helper_get_parent(struct ccu_common *common,
struct ccu_mux_internal *cm)
@@ -178,7 +178,7 @@ u8 ccu_mux_helper_get_parent(struct ccu_common *common,
return parent;
}
-EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_get_parent, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_get_parent, "SUNXI_CCU");
int ccu_mux_helper_set_parent(struct ccu_common *common,
struct ccu_mux_internal *cm,
@@ -205,7 +205,7 @@ int ccu_mux_helper_set_parent(struct ccu_common *common,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_set_parent, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_set_parent, "SUNXI_CCU");
static void ccu_mux_disable(struct clk_hw *hw)
{
@@ -273,7 +273,7 @@ const struct clk_ops ccu_mux_ops = {
.determine_rate = ccu_mux_determine_rate,
.recalc_rate = ccu_mux_recalc_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_mux_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_mux_ops, "SUNXI_CCU");
/*
* This clock notifier is called when the frequency of the of the parent
@@ -308,4 +308,4 @@ int ccu_mux_notifier_register(struct clk *clk, struct ccu_mux_nb *mux_nb)
return clk_notifier_register(clk, &mux_nb->clk_nb);
}
-EXPORT_SYMBOL_NS_GPL(ccu_mux_notifier_register, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_mux_notifier_register, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c
index 8aa35d5804f3..555e99de2cc6 100644
--- a/drivers/clk/sunxi-ng/ccu_nk.c
+++ b/drivers/clk/sunxi-ng/ccu_nk.c
@@ -158,4 +158,4 @@ const struct clk_ops ccu_nk_ops = {
.round_rate = ccu_nk_round_rate,
.set_rate = ccu_nk_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_nk_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_nk_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c
index 1168d894d636..784eec9ac997 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.c
+++ b/drivers/clk/sunxi-ng/ccu_nkm.c
@@ -267,4 +267,4 @@ const struct clk_ops ccu_nkm_ops = {
.recalc_rate = ccu_nkm_recalc_rate,
.set_rate = ccu_nkm_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_nkm_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_nkm_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index 99359a06892d..6e03b69d4028 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -230,4 +230,4 @@ const struct clk_ops ccu_nkmp_ops = {
.round_rate = ccu_nkmp_round_rate,
.set_rate = ccu_nkmp_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_nkmp_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_nkmp_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index ffac3deb89d6..a4e2243b8d6b 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -236,4 +236,4 @@ const struct clk_ops ccu_nm_ops = {
.round_rate = ccu_nm_round_rate,
.set_rate = ccu_nm_set_rate,
};
-EXPORT_SYMBOL_NS_GPL(ccu_nm_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_nm_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_phase.c b/drivers/clk/sunxi-ng/ccu_phase.c
index e4cae2afe9db..ca43cf448666 100644
--- a/drivers/clk/sunxi-ng/ccu_phase.c
+++ b/drivers/clk/sunxi-ng/ccu_phase.c
@@ -121,4 +121,4 @@ const struct clk_ops ccu_phase_ops = {
.get_phase = ccu_phase_get_phase,
.set_phase = ccu_phase_set_phase,
};
-EXPORT_SYMBOL_NS_GPL(ccu_phase_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_phase_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_reset.c b/drivers/clk/sunxi-ng/ccu_reset.c
index 6577aa18cb01..55bc7c7cda0f 100644
--- a/drivers/clk/sunxi-ng/ccu_reset.c
+++ b/drivers/clk/sunxi-ng/ccu_reset.c
@@ -75,4 +75,4 @@ const struct reset_control_ops ccu_reset_ops = {
.reset = ccu_reset_reset,
.status = ccu_reset_status,
};
-EXPORT_SYMBOL_NS_GPL(ccu_reset_ops, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_reset_ops, "SUNXI_CCU");
diff --git a/drivers/clk/sunxi-ng/ccu_reset.h b/drivers/clk/sunxi-ng/ccu_reset.h
index e9b973cae4af..941276a8ec2e 100644
--- a/drivers/clk/sunxi-ng/ccu_reset.h
+++ b/drivers/clk/sunxi-ng/ccu_reset.h
@@ -17,7 +17,7 @@ struct ccu_reset_map {
struct ccu_reset {
void __iomem *base;
- struct ccu_reset_map *reset_map;
+ const struct ccu_reset_map *reset_map;
spinlock_t *lock;
struct reset_controller_dev rcdev;
diff --git a/drivers/clk/sunxi-ng/ccu_sdm.c b/drivers/clk/sunxi-ng/ccu_sdm.c
index 41937ed0766d..c564e5f9e610 100644
--- a/drivers/clk/sunxi-ng/ccu_sdm.c
+++ b/drivers/clk/sunxi-ng/ccu_sdm.c
@@ -20,7 +20,7 @@ bool ccu_sdm_helper_is_enabled(struct ccu_common *common,
return !!(readl(common->base + sdm->tuning_reg) & sdm->tuning_enable);
}
-EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_is_enabled, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_is_enabled, "SUNXI_CCU");
void ccu_sdm_helper_enable(struct ccu_common *common,
struct ccu_sdm_internal *sdm,
@@ -50,7 +50,7 @@ void ccu_sdm_helper_enable(struct ccu_common *common,
writel(reg | sdm->enable, common->base + common->reg);
spin_unlock_irqrestore(common->lock, flags);
}
-EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_enable, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_enable, "SUNXI_CCU");
void ccu_sdm_helper_disable(struct ccu_common *common,
struct ccu_sdm_internal *sdm)
@@ -71,7 +71,7 @@ void ccu_sdm_helper_disable(struct ccu_common *common,
writel(reg & ~sdm->tuning_enable, common->base + sdm->tuning_reg);
spin_unlock_irqrestore(common->lock, flags);
}
-EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_disable, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_disable, "SUNXI_CCU");
/*
* Sigma delta modulation provides a way to do fractional-N frequency
@@ -105,7 +105,7 @@ bool ccu_sdm_helper_has_rate(struct ccu_common *common,
return false;
}
-EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_has_rate, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_has_rate, "SUNXI_CCU");
unsigned long ccu_sdm_helper_read_rate(struct ccu_common *common,
struct ccu_sdm_internal *sdm,
@@ -136,7 +136,7 @@ unsigned long ccu_sdm_helper_read_rate(struct ccu_common *common,
/* We can't calculate the effective clock rate, so just fail. */
return 0;
}
-EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_read_rate, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_read_rate, "SUNXI_CCU");
int ccu_sdm_helper_get_factors(struct ccu_common *common,
struct ccu_sdm_internal *sdm,
@@ -158,4 +158,4 @@ int ccu_sdm_helper_get_factors(struct ccu_common *common,
/* nothing found */
return -EINVAL;
}
-EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_get_factors, SUNXI_CCU);
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_get_factors, "SUNXI_CCU");
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
index 7bfba0afd778..b2323cb8eddc 100644
--- a/drivers/clk/tegra/clk-bpmp.c
+++ b/drivers/clk/tegra/clk-bpmp.c
@@ -174,7 +174,7 @@ static int tegra_bpmp_clk_determine_rate(struct clk_hw *hw,
unsigned long rate;
int err;
- rate = min(max(rate_req->rate, rate_req->min_rate), rate_req->max_rate);
+ rate = clamp(rate_req->rate, rate_req->min_rate, rate_req->max_rate);
memset(&request, 0, sizeof(request));
request.rate = min_t(u64, rate, S64_MAX);
diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
index 17e32ae08720..4c9555fc6184 100644
--- a/drivers/clk/thead/clk-th1520-ap.c
+++ b/drivers/clk/thead/clk-th1520-ap.c
@@ -657,7 +657,7 @@ static struct ccu_div apb_pclk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("apb-pclk",
apb_parents,
&ccu_div_ops,
- 0),
+ CLK_IGNORE_UNUSED),
},
};
@@ -779,6 +779,13 @@ static struct ccu_div dpu1_clk = {
},
};
+static CLK_FIXED_FACTOR_HW(emmc_sdio_ref_clk, "emmc-sdio-ref",
+ &video_pll_clk.common.hw, 4, 1, 0);
+
+static const struct clk_parent_data emmc_sdio_ref_clk_pd[] = {
+ { .hw = &emmc_sdio_ref_clk.hw },
+};
+
static CCU_GATE(CLK_BROM, brom_clk, "brom", ahb2_cpusys_hclk_pd, 0x100, BIT(4), 0);
static CCU_GATE(CLK_BMU, bmu_clk, "bmu", axi4_cpusys2_aclk_pd, 0x100, BIT(5), 0);
static CCU_GATE(CLK_AON2CPU_A2X, aon2cpu_a2x_clk, "aon2cpu-a2x", axi4_cpusys2_aclk_pd,
@@ -787,18 +794,18 @@ static CCU_GATE(CLK_X2X_CPUSYS, x2x_cpusys_clk, "x2x-cpusys", axi4_cpusys2_aclk_
0x134, BIT(7), 0);
static CCU_GATE(CLK_CPU2AON_X2H, cpu2aon_x2h_clk, "cpu2aon-x2h", axi_aclk_pd, 0x138, BIT(8), 0);
static CCU_GATE(CLK_CPU2PERI_X2H, cpu2peri_x2h_clk, "cpu2peri-x2h", axi4_cpusys2_aclk_pd,
- 0x140, BIT(9), 0);
+ 0x140, BIT(9), CLK_IGNORE_UNUSED);
static CCU_GATE(CLK_PERISYS_APB1_HCLK, perisys_apb1_hclk, "perisys-apb1-hclk", perisys_ahb_hclk_pd,
0x150, BIT(9), 0);
static CCU_GATE(CLK_PERISYS_APB2_HCLK, perisys_apb2_hclk, "perisys-apb2-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(10), 0);
+ 0x150, BIT(10), CLK_IGNORE_UNUSED);
static CCU_GATE(CLK_PERISYS_APB3_HCLK, perisys_apb3_hclk, "perisys-apb3-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(11), 0);
+ 0x150, BIT(11), CLK_IGNORE_UNUSED);
static CCU_GATE(CLK_PERISYS_APB4_HCLK, perisys_apb4_hclk, "perisys-apb4-hclk", perisys_ahb_hclk_pd,
0x150, BIT(12), 0);
static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, BIT(5), 0);
static CCU_GATE(CLK_CPU2VP, cpu2vp_clk, "cpu2vp", axi_aclk_pd, 0x1e0, BIT(13), 0);
-static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", video_pll_clk_pd, 0x204, BIT(30), 0);
+static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", emmc_sdio_ref_clk_pd, 0x204, BIT(30), 0);
static CCU_GATE(CLK_GMAC1, gmac1_clk, "gmac1", gmac_pll_clk_pd, 0x204, BIT(26), 0);
static CCU_GATE(CLK_PADCTRL1, padctrl1_clk, "padctrl1", perisys_apb_pclk_pd, 0x204, BIT(24), 0);
static CCU_GATE(CLK_DSMART, dsmart_clk, "dsmart", perisys_apb_pclk_pd, 0x204, BIT(23), 0);
@@ -889,7 +896,6 @@ static struct ccu_common *th1520_div_clks[] = {
&vo_axi_clk.common,
&vp_apb_clk.common,
&vp_axi_clk.common,
- &cpu2vp_clk.common,
&venc_clk.common,
&dpu0_clk.common,
&dpu1_clk.common,
@@ -909,6 +915,7 @@ static struct ccu_common *th1520_gate_clks[] = {
&bmu_clk.common,
&cpu2aon_x2h_clk.common,
&cpu2peri_x2h_clk.common,
+ &cpu2vp_clk.common,
&perisys_apb1_hclk.common,
&perisys_apb2_hclk.common,
&perisys_apb3_hclk.common,
@@ -1041,7 +1048,8 @@ static int th1520_clk_probe(struct platform_device *pdev)
hw = devm_clk_hw_register_gate_parent_data(dev,
cg->common.hw.init->name,
cg->common.hw.init->parent_data,
- 0, base + cg->common.cfg0,
+ cg->common.hw.init->flags,
+ base + cg->common.cfg0,
ffs(cg->enable) - 1, 0, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
@@ -1059,6 +1067,10 @@ static int th1520_clk_probe(struct platform_device *pdev)
return ret;
priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw;
+ ret = devm_clk_hw_register(dev, &emmc_sdio_ref_clk.hw);
+ if (ret)
+ return ret;
+
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, priv);
if (ret)
return ret;
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index f2117fef7c7d..9c75dcc9a534 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -449,10 +449,7 @@ void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem)
{
struct clk_iomap *io;
- io = memblock_alloc(sizeof(*io), SMP_CACHE_BYTES);
- if (!io)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(*io));
+ io = memblock_alloc_or_panic(sizeof(*io), SMP_CACHE_BYTES);
io->mem = mem;
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 216d85d6aac6..f684fc306ecc 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -180,7 +180,7 @@ static void of_mux_clk_setup(struct device_node *node)
pr_err("mux-clock %pOFn must have parents\n", node);
return;
}
- parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL);
+ parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
if (!parent_names)
goto cleanup;
diff --git a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
index 7a0269bdfbb3..bbf7714480e7 100644
--- a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
+++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
@@ -17,6 +17,7 @@
#include <linux/of.h>
#include <linux/math64.h>
#include <linux/module.h>
+#include <linux/overflow.h>
#include <linux/err.h>
#include <linux/iopoll.h>
@@ -51,6 +52,8 @@
#define WZRD_CLKFBOUT_MULT_SHIFT 8
#define WZRD_CLKFBOUT_MULT_MASK (0xff << WZRD_CLKFBOUT_MULT_SHIFT)
+#define WZRD_CLKFBOUT_MULT_FRAC_MASK GENMASK(25, 16)
+#define WZRD_CLKFBOUT_O_MASK GENMASK(7, 0)
#define WZRD_CLKFBOUT_L_SHIFT 0
#define WZRD_CLKFBOUT_H_SHIFT 8
#define WZRD_CLKFBOUT_L_MASK GENMASK(7, 0)
@@ -86,14 +89,14 @@
#define DIV_O 0x01
#define DIV_ALL 0x03
-#define WZRD_M_MIN 2
-#define WZRD_M_MAX 128
-#define WZRD_D_MIN 1
-#define WZRD_D_MAX 106
-#define WZRD_VCO_MIN 800000000
-#define WZRD_VCO_MAX 1600000000
-#define WZRD_O_MIN 1
-#define WZRD_O_MAX 128
+#define WZRD_M_MIN 2ULL
+#define WZRD_M_MAX 128ULL
+#define WZRD_D_MIN 1ULL
+#define WZRD_D_MAX 106ULL
+#define WZRD_VCO_MIN 800000000ULL
+#define WZRD_VCO_MAX 1600000000ULL
+#define WZRD_O_MIN 2ULL
+#define WZRD_O_MAX 128ULL
#define VER_WZRD_M_MIN 4
#define VER_WZRD_M_MAX 432
#define VER_WZRD_D_MIN 1
@@ -121,26 +124,24 @@ enum clk_wzrd_int_clks {
/**
* struct clk_wzrd - Clock wizard private data structure
*
- * @clk_data: Clock data
* @nb: Notifier block
* @base: Memory base
* @clk_in1: Handle to input clock 'clk_in1'
* @axi_clk: Handle to input clock 's_axi_aclk'
* @clks_internal: Internal clocks
- * @clkout: Output clocks
* @speed_grade: Speed grade of the device
* @suspended: Flag indicating power state of the device
+ * @clk_data: Output clock data
*/
struct clk_wzrd {
- struct clk_onecell_data clk_data;
struct notifier_block nb;
void __iomem *base;
struct clk *clk_in1;
struct clk *axi_clk;
- struct clk *clks_internal[wzrd_clk_int_max];
- struct clk *clkout[WZRD_NUM_OUTPUTS];
+ struct clk_hw *clks_internal[wzrd_clk_int_max];
unsigned int speed_grade;
bool suspended;
+ struct clk_hw_onecell_data clk_data;
};
/**
@@ -154,8 +155,10 @@ struct clk_wzrd {
* @flags: clk_wzrd divider flags
* @table: array of value/divider pairs, last entry should have div = 0
* @m: value of the multiplier
+ * @m_frac: fractional value of the multiplier
* @d: value of the common divider
* @o: value of the leaf divider
+ * @o_frac: value of the fractional leaf divider
* @lock: register lock
*/
struct clk_wzrd_divider {
@@ -167,8 +170,10 @@ struct clk_wzrd_divider {
u8 flags;
const struct clk_div_table *table;
u32 m;
+ u32 m_frac;
u32 d;
u32 o;
+ u32 o_frac;
spinlock_t *lock; /* divider lock */
};
@@ -373,38 +378,40 @@ static int clk_wzrd_get_divisors(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
- u64 vco_freq, freq, diff, vcomin, vcomax;
- u32 m, d, o;
- u32 mmin, mmax, dmin, dmax, omin, omax;
+ u64 vco_freq, freq, diff, vcomin, vcomax, best_diff = -1ULL;
+ u64 m, d, o;
+ u64 mmin, mmax, dmin, dmax, omin, omax, mdmin, mdmax;
- mmin = WZRD_M_MIN;
- mmax = WZRD_M_MAX;
+ mmin = WZRD_M_MIN << 3;
+ mmax = WZRD_M_MAX << 3;
dmin = WZRD_D_MIN;
dmax = WZRD_D_MAX;
- omin = WZRD_O_MIN;
- omax = WZRD_O_MAX;
- vcomin = WZRD_VCO_MIN;
- vcomax = WZRD_VCO_MAX;
+ omin = WZRD_O_MIN << 3;
+ omax = WZRD_O_MAX << 3;
+ vcomin = WZRD_VCO_MIN << 3;
+ vcomax = WZRD_VCO_MAX << 3;
for (m = mmin; m <= mmax; m++) {
- for (d = dmin; d <= dmax; d++) {
- vco_freq = DIV_ROUND_CLOSEST((parent_rate * m), d);
- if (vco_freq >= vcomin && vco_freq <= vcomax) {
- for (o = omin; o <= omax; o++) {
- freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
- diff = abs(freq - rate);
-
- if (diff < WZRD_MIN_ERR) {
- divider->m = m;
- divider->d = d;
- divider->o = o;
- return 0;
- }
- }
+ mdmin = max(dmin, div64_u64(parent_rate * m + vcomax / 2, vcomax));
+ mdmax = min(dmax, div64_u64(parent_rate * m + vcomin / 2, vcomin));
+ for (d = mdmin; d <= mdmax; d++) {
+ vco_freq = DIV_ROUND_CLOSEST_ULL((parent_rate * m), d);
+ o = DIV_ROUND_CLOSEST_ULL(vco_freq, rate);
+ if (o < omin || o > omax)
+ continue;
+ freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
+ diff = freq - rate;
+ if (diff < best_diff) {
+ best_diff = diff;
+ divider->m = m >> 3;
+ divider->m_frac = (m - (divider->m << 3)) * 125;
+ divider->d = d;
+ divider->o = o >> 3;
+ divider->o_frac = (o - (divider->o << 3)) * 125;
}
}
}
- return -EBUSY;
+ return best_diff < WZRD_MIN_ERR ? 0 : -EBUSY;
}
static int clk_wzrd_reconfig(struct clk_wzrd_divider *divider, void __iomem *div_addr)
@@ -497,33 +504,22 @@ static int clk_wzrd_dynamic_all_nolock(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
- unsigned long vco_freq, rate_div, clockout0_div;
void __iomem *div_addr;
- u32 reg, pre, f;
+ u32 reg;
int err;
err = clk_wzrd_get_divisors(hw, rate, parent_rate);
if (err)
return err;
- vco_freq = DIV_ROUND_CLOSEST(parent_rate * divider->m, divider->d);
- rate_div = DIV_ROUND_CLOSEST_ULL((vco_freq * WZRD_FRAC_POINTS), rate);
-
- clockout0_div = div_u64(rate_div, WZRD_FRAC_POINTS);
-
- pre = DIV_ROUND_CLOSEST_ULL(vco_freq * WZRD_FRAC_POINTS, rate);
- f = (pre - (clockout0_div * WZRD_FRAC_POINTS));
- f &= WZRD_CLKOUT_FRAC_MASK;
-
- reg = FIELD_PREP(WZRD_CLKOUT_DIVIDE_MASK, clockout0_div) |
- FIELD_PREP(WZRD_CLKOUT0_FRAC_MASK, f);
+ reg = FIELD_PREP(WZRD_CLKOUT_DIVIDE_MASK, divider->o) |
+ FIELD_PREP(WZRD_CLKOUT0_FRAC_MASK, divider->o_frac);
writel(reg, divider->base + WZRD_CLK_CFG_REG(0, 2));
- /* Set divisor and clear phase offset */
reg = FIELD_PREP(WZRD_CLKFBOUT_MULT_MASK, divider->m) |
+ FIELD_PREP(WZRD_CLKFBOUT_MULT_FRAC_MASK, divider->m_frac) |
FIELD_PREP(WZRD_DIVCLK_DIVIDE_MASK, divider->d);
writel(reg, divider->base + WZRD_CLK_CFG_REG(0, 0));
- writel(divider->o, divider->base + WZRD_CLK_CFG_REG(0, 2));
writel(0, divider->base + WZRD_CLK_CFG_REG(0, 3));
div_addr = divider->base + WZRD_DR_INIT_REG_OFFSET;
return clk_wzrd_reconfig(divider, div_addr);
@@ -565,18 +561,19 @@ static unsigned long clk_wzrd_recalc_rate_all(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
- u32 m, d, o, div, reg, f;
+ u32 m, d, o, reg, f, mf;
+ u64 mul;
reg = readl(divider->base + WZRD_CLK_CFG_REG(0, 0));
d = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
m = FIELD_GET(WZRD_CLKFBOUT_MULT_MASK, reg);
+ mf = FIELD_GET(WZRD_CLKFBOUT_MULT_FRAC_MASK, reg);
reg = readl(divider->base + WZRD_CLK_CFG_REG(0, 2));
o = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
f = FIELD_GET(WZRD_CLKOUT0_FRAC_MASK, reg);
- div = DIV_ROUND_CLOSEST(d * (WZRD_FRAC_POINTS * o + f), WZRD_FRAC_POINTS);
- return divider_recalc_rate(hw, parent_rate * m, div, divider->table,
- divider->flags, divider->width);
+ mul = m * 1000 + mf;
+ return DIV_ROUND_CLOSEST_ULL(parent_rate * mul, d * (o * 1000 + f));
}
static unsigned long clk_wzrd_recalc_rate_all_ver(struct clk_hw *hw,
@@ -649,6 +646,25 @@ static long clk_wzrd_round_rate_all(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ u32 m, d, o;
+ int err;
+
+ err = clk_wzrd_get_divisors(hw, rate, *prate);
+ if (err)
+ return err;
+
+ m = divider->m;
+ d = divider->d;
+ o = divider->o;
+
+ rate = div_u64(*prate * (m * 1000 + divider->m_frac), d * (o * 1000 + divider->o_frac));
+ return rate;
+}
+
+static long clk_wzrd_ver_round_rate_all(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
unsigned long int_freq;
u32 m, d, o, div, f;
int err;
@@ -679,7 +695,7 @@ static const struct clk_ops clk_wzrd_ver_divider_ops = {
};
static const struct clk_ops clk_wzrd_ver_div_all_ops = {
- .round_rate = clk_wzrd_round_rate_all,
+ .round_rate = clk_wzrd_ver_round_rate_all,
.set_rate = clk_wzrd_dynamic_all_ver,
.recalc_rate = clk_wzrd_recalc_rate_all_ver,
};
@@ -765,7 +781,7 @@ static const struct clk_ops clk_wzrd_clk_divider_ops_f = {
.recalc_rate = clk_wzrd_recalc_ratef,
};
-static struct clk *clk_wzrd_register_divf(struct device *dev,
+static struct clk_hw *clk_wzrd_register_divf(struct device *dev,
const char *name,
const char *parent_name,
unsigned long flags,
@@ -805,10 +821,10 @@ static struct clk *clk_wzrd_register_divf(struct device *dev,
if (ret)
return ERR_PTR(ret);
- return hw->clk;
+ return hw;
}
-static struct clk *clk_wzrd_ver_register_divider(struct device *dev,
+static struct clk_hw *clk_wzrd_ver_register_divider(struct device *dev,
const char *name,
const char *parent_name,
unsigned long flags,
@@ -852,10 +868,10 @@ static struct clk *clk_wzrd_ver_register_divider(struct device *dev,
if (ret)
return ERR_PTR(ret);
- return hw->clk;
+ return hw;
}
-static struct clk *clk_wzrd_register_divider(struct device *dev,
+static struct clk_hw *clk_wzrd_register_divider(struct device *dev,
const char *name,
const char *parent_name,
unsigned long flags,
@@ -898,7 +914,7 @@ static struct clk *clk_wzrd_register_divider(struct device *dev,
if (ret)
return ERR_PTR(ret);
- return hw->clk;
+ return hw;
}
static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event,
@@ -963,81 +979,30 @@ static const struct versal_clk_data versal_data = {
.is_versal = true,
};
-static int clk_wzrd_probe(struct platform_device *pdev)
+static int clk_wzrd_register_output_clocks(struct device *dev, int nr_outputs)
{
const char *clkout_name, *clk_name, *clk_mul_name;
+ struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
u32 regl, regh, edge, regld, reghd, edged, div;
- struct device_node *np = pdev->dev.of_node;
const struct versal_clk_data *data;
- struct clk_wzrd *clk_wzrd;
unsigned long flags = 0;
+ bool is_versal = false;
void __iomem *ctrl_reg;
u32 reg, reg_f, mult;
- bool is_versal = false;
- unsigned long rate;
- int nr_outputs;
- int i, ret;
-
- clk_wzrd = devm_kzalloc(&pdev->dev, sizeof(*clk_wzrd), GFP_KERNEL);
- if (!clk_wzrd)
- return -ENOMEM;
- platform_set_drvdata(pdev, clk_wzrd);
-
- clk_wzrd->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(clk_wzrd->base))
- return PTR_ERR(clk_wzrd->base);
-
- ret = of_property_read_u32(np, "xlnx,speed-grade", &clk_wzrd->speed_grade);
- if (!ret) {
- if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) {
- dev_warn(&pdev->dev, "invalid speed grade '%d'\n",
- clk_wzrd->speed_grade);
- clk_wzrd->speed_grade = 0;
- }
- }
-
- clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
- if (IS_ERR(clk_wzrd->clk_in1))
- return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->clk_in1),
- "clk_in1 not found\n");
-
- clk_wzrd->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
- if (IS_ERR(clk_wzrd->axi_clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->axi_clk),
- "s_axi_aclk not found\n");
- ret = clk_prepare_enable(clk_wzrd->axi_clk);
- if (ret) {
- dev_err(&pdev->dev, "enabling s_axi_aclk failed\n");
- return ret;
- }
- rate = clk_get_rate(clk_wzrd->axi_clk);
- if (rate > WZRD_ACLK_MAX_FREQ) {
- dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n",
- rate);
- ret = -EINVAL;
- goto err_disable_clk;
- }
+ int i;
- data = device_get_match_data(&pdev->dev);
+ data = device_get_match_data(dev);
if (data)
is_versal = data->is_versal;
- ret = of_property_read_u32(np, "xlnx,nr-outputs", &nr_outputs);
- if (ret || nr_outputs > WZRD_NUM_OUTPUTS) {
- ret = -EINVAL;
- goto err_disable_clk;
- }
-
- clkout_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_out0", dev_name(&pdev->dev));
- if (!clkout_name) {
- ret = -ENOMEM;
- goto err_disable_clk;
- }
+ clkout_name = devm_kasprintf(dev, GFP_KERNEL, "%s_out0", dev_name(dev));
+ if (!clkout_name)
+ return -ENOMEM;
if (is_versal) {
if (nr_outputs == 1) {
- clk_wzrd->clkout[0] = clk_wzrd_ver_register_divider
- (&pdev->dev, clkout_name,
+ clk_wzrd->clk_data.hws[0] = clk_wzrd_ver_register_divider
+ (dev, clkout_name,
__clk_get_name(clk_wzrd->clk_in1), 0,
clk_wzrd->base, WZRD_CLK_CFG_REG(is_versal, 3),
WZRD_CLKOUT_DIVIDE_SHIFT,
@@ -1045,7 +1010,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
DIV_ALL, &clkwzrd_lock);
- goto out;
+ return 0;
}
/* register multiplier */
edge = !!(readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0)) &
@@ -1069,8 +1034,8 @@ static int clk_wzrd_probe(struct platform_device *pdev)
div = 64;
} else {
if (nr_outputs == 1) {
- clk_wzrd->clkout[0] = clk_wzrd_register_divider
- (&pdev->dev, clkout_name,
+ clk_wzrd->clk_data.hws[0] = clk_wzrd_register_divider
+ (dev, clkout_name,
__clk_get_name(clk_wzrd->clk_in1), 0,
clk_wzrd->base, WZRD_CLK_CFG_REG(is_versal, 3),
WZRD_CLKOUT_DIVIDE_SHIFT,
@@ -1078,7 +1043,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
DIV_ALL, &clkwzrd_lock);
- goto out;
+ return 0;
}
reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0));
reg_f = reg & WZRD_CLKFBOUT_FRAC_MASK;
@@ -1089,26 +1054,21 @@ static int clk_wzrd_probe(struct platform_device *pdev)
mult = (reg * 1000) + reg_f;
div = 1000;
}
- clk_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_mul", dev_name(&pdev->dev));
- if (!clk_name) {
- ret = -ENOMEM;
- goto err_disable_clk;
- }
- clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor
- (&pdev->dev, clk_name,
+ clk_name = devm_kasprintf(dev, GFP_KERNEL, "%s_mul", dev_name(dev));
+ if (!clk_name)
+ return -ENOMEM;
+ clk_wzrd->clks_internal[wzrd_clk_mul] = devm_clk_hw_register_fixed_factor
+ (dev, clk_name,
__clk_get_name(clk_wzrd->clk_in1),
0, mult, div);
if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
- dev_err(&pdev->dev, "unable to register fixed-factor clock\n");
- ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]);
- goto err_disable_clk;
+ dev_err(dev, "unable to register fixed-factor clock\n");
+ return PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]);
}
- clk_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_mul_div", dev_name(&pdev->dev));
- if (!clk_name) {
- ret = -ENOMEM;
- goto err_rm_int_clk;
- }
+ clk_name = devm_kasprintf(dev, GFP_KERNEL, "%s_mul_div", dev_name(dev));
+ if (!clk_name)
+ return -ENOMEM;
if (is_versal) {
edged = !!(readl(clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 20)) &
@@ -1121,36 +1081,31 @@ static int clk_wzrd_probe(struct platform_device *pdev)
if (!div)
div = 1;
- clk_mul_name = __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]);
+ clk_mul_name = clk_hw_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]);
clk_wzrd->clks_internal[wzrd_clk_mul_div] =
- clk_register_fixed_factor(&pdev->dev, clk_name,
- clk_mul_name, 0, 1, div);
+ devm_clk_hw_register_fixed_factor(dev, clk_name, clk_mul_name, 0, 1, div);
} else {
ctrl_reg = clk_wzrd->base + WZRD_CLK_CFG_REG(is_versal, 0);
- clk_wzrd->clks_internal[wzrd_clk_mul_div] = clk_register_divider
- (&pdev->dev, clk_name,
- __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]),
+ clk_wzrd->clks_internal[wzrd_clk_mul_div] = devm_clk_hw_register_divider
+ (dev, clk_name,
+ clk_hw_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]),
flags, ctrl_reg, 0, 8, CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO, &clkwzrd_lock);
}
if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) {
- dev_err(&pdev->dev, "unable to register divider clock\n");
- ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]);
- goto err_rm_int_clk;
+ dev_err(dev, "unable to register divider clock\n");
+ return PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]);
}
/* register div per output */
for (i = nr_outputs - 1; i >= 0 ; i--) {
- clkout_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "%s_out%d", dev_name(&pdev->dev), i);
- if (!clkout_name) {
- ret = -ENOMEM;
- goto err_rm_int_clk;
- }
+ clkout_name = devm_kasprintf(dev, GFP_KERNEL, "%s_out%d", dev_name(dev), i);
+ if (!clkout_name)
+ return -ENOMEM;
if (is_versal) {
- clk_wzrd->clkout[i] = clk_wzrd_ver_register_divider
- (&pdev->dev,
+ clk_wzrd->clk_data.hws[i] = clk_wzrd_ver_register_divider
+ (dev,
clkout_name, clk_name, 0,
clk_wzrd->base,
(WZRD_CLK_CFG_REG(is_versal, 3) + i * 8),
@@ -1161,84 +1116,108 @@ static int clk_wzrd_probe(struct platform_device *pdev)
DIV_O, &clkwzrd_lock);
} else {
if (!i)
- clk_wzrd->clkout[i] = clk_wzrd_register_divf
- (&pdev->dev, clkout_name, clk_name, flags, clk_wzrd->base,
+ clk_wzrd->clk_data.hws[i] = clk_wzrd_register_divf
+ (dev, clkout_name, clk_name, flags, clk_wzrd->base,
(WZRD_CLK_CFG_REG(is_versal, 2) + i * 12),
WZRD_CLKOUT_DIVIDE_SHIFT,
WZRD_CLKOUT_DIVIDE_WIDTH,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
DIV_O, &clkwzrd_lock);
else
- clk_wzrd->clkout[i] = clk_wzrd_register_divider
- (&pdev->dev, clkout_name, clk_name, 0, clk_wzrd->base,
+ clk_wzrd->clk_data.hws[i] = clk_wzrd_register_divider
+ (dev, clkout_name, clk_name, 0, clk_wzrd->base,
(WZRD_CLK_CFG_REG(is_versal, 2) + i * 12),
WZRD_CLKOUT_DIVIDE_SHIFT,
WZRD_CLKOUT_DIVIDE_WIDTH,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
DIV_O, &clkwzrd_lock);
}
- if (IS_ERR(clk_wzrd->clkout[i])) {
- int j;
-
- for (j = i + 1; j < nr_outputs; j++)
- clk_unregister(clk_wzrd->clkout[j]);
- dev_err(&pdev->dev,
- "unable to register divider clock\n");
- ret = PTR_ERR(clk_wzrd->clkout[i]);
- goto err_rm_int_clks;
+ if (IS_ERR(clk_wzrd->clk_data.hws[i])) {
+ dev_err(dev, "unable to register divider clock\n");
+ return PTR_ERR(clk_wzrd->clk_data.hws[i]);
}
}
-out:
- clk_wzrd->clk_data.clks = clk_wzrd->clkout;
- clk_wzrd->clk_data.clk_num = ARRAY_SIZE(clk_wzrd->clkout);
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_wzrd->clk_data);
+ return 0;
+}
+
+static int clk_wzrd_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct clk_wzrd *clk_wzrd;
+ unsigned long rate;
+ int nr_outputs;
+ int ret;
+
+ ret = of_property_read_u32(np, "xlnx,nr-outputs", &nr_outputs);
+ if (ret || nr_outputs > WZRD_NUM_OUTPUTS)
+ return -EINVAL;
- if (clk_wzrd->speed_grade) {
- clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier;
+ clk_wzrd = devm_kzalloc(&pdev->dev, struct_size(clk_wzrd, clk_data.hws, nr_outputs),
+ GFP_KERNEL);
+ if (!clk_wzrd)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, clk_wzrd);
- ret = clk_notifier_register(clk_wzrd->clk_in1,
- &clk_wzrd->nb);
- if (ret)
- dev_warn(&pdev->dev,
- "unable to register clock notifier\n");
+ clk_wzrd->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(clk_wzrd->base))
+ return PTR_ERR(clk_wzrd->base);
- ret = clk_notifier_register(clk_wzrd->axi_clk, &clk_wzrd->nb);
- if (ret)
- dev_warn(&pdev->dev,
- "unable to register clock notifier\n");
+ clk_wzrd->axi_clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(clk_wzrd->axi_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->axi_clk),
+ "s_axi_aclk not found\n");
+ rate = clk_get_rate(clk_wzrd->axi_clk);
+ if (rate > WZRD_ACLK_MAX_FREQ) {
+ dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n", rate);
+ return -EINVAL;
}
- return 0;
-
-err_rm_int_clks:
- clk_unregister(clk_wzrd->clks_internal[1]);
-err_rm_int_clk:
- clk_unregister(clk_wzrd->clks_internal[0]);
-err_disable_clk:
- clk_disable_unprepare(clk_wzrd->axi_clk);
+ if (!of_property_present(np, "xlnx,static-config")) {
+ ret = of_property_read_u32(np, "xlnx,speed-grade", &clk_wzrd->speed_grade);
+ if (!ret) {
+ if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) {
+ dev_warn(&pdev->dev, "invalid speed grade '%d'\n",
+ clk_wzrd->speed_grade);
+ clk_wzrd->speed_grade = 0;
+ }
+ }
- return ret;
-}
+ clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
+ if (IS_ERR(clk_wzrd->clk_in1))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->clk_in1),
+ "clk_in1 not found\n");
-static void clk_wzrd_remove(struct platform_device *pdev)
-{
- int i;
- struct clk_wzrd *clk_wzrd = platform_get_drvdata(pdev);
+ ret = clk_wzrd_register_output_clocks(&pdev->dev, nr_outputs);
+ if (ret)
+ return ret;
+
+ clk_wzrd->clk_data.num = nr_outputs;
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
+ &clk_wzrd->clk_data);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register clock provider\n");
+ return ret;
+ }
- of_clk_del_provider(pdev->dev.of_node);
+ if (clk_wzrd->speed_grade) {
+ clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier;
- for (i = 0; i < WZRD_NUM_OUTPUTS; i++)
- clk_unregister(clk_wzrd->clkout[i]);
- for (i = 0; i < wzrd_clk_int_max; i++)
- clk_unregister(clk_wzrd->clks_internal[i]);
+ ret = devm_clk_notifier_register(&pdev->dev, clk_wzrd->clk_in1,
+ &clk_wzrd->nb);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "unable to register clock notifier\n");
- if (clk_wzrd->speed_grade) {
- clk_notifier_unregister(clk_wzrd->axi_clk, &clk_wzrd->nb);
- clk_notifier_unregister(clk_wzrd->clk_in1, &clk_wzrd->nb);
+ ret = devm_clk_notifier_register(&pdev->dev, clk_wzrd->axi_clk,
+ &clk_wzrd->nb);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "unable to register clock notifier\n");
+ }
}
- clk_disable_unprepare(clk_wzrd->axi_clk);
+ return 0;
}
static const struct of_device_id clk_wzrd_ids[] = {
@@ -1257,7 +1236,6 @@ static struct platform_driver clk_wzrd_driver = {
.pm = &clk_wzrd_dev_pm_ops,
},
.probe = clk_wzrd_probe,
- .remove = clk_wzrd_remove,
};
module_platform_driver(clk_wzrd_driver);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 95dd4660b5b6..487c85259967 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -400,7 +400,8 @@ config ARM_GT_INITIAL_PRESCALER_VAL
This affects CPU_FREQ max delta from the initial frequency.
config ARM_TIMER_SP804
- bool "Support for Dual Timer SP804 module" if COMPILE_TEST
+ bool "Support for Dual Timer SP804 module"
+ depends on ARM || ARM64 || COMPILE_TEST
depends on GENERIC_SCHED_CLOCK && HAVE_CLK
select CLKSRC_MMIO
select TIMER_OF if OF
@@ -753,4 +754,13 @@ config EP93XX_TIMER
Enables support for the Cirrus Logic timer block
EP93XX.
+config RALINK_TIMER
+ bool "Ralink System Tick Counter"
+ depends on SOC_RT305X || SOC_MT7620 || COMPILE_TEST
+ select CLKSRC_MMIO
+ select TIMER_OF
+ help
+ Enables support for system tick counter present on
+ Ralink SoCs RT3352 and MT7620.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 22743785299e..43ef16a4efa6 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -91,3 +91,4 @@ obj-$(CONFIG_GOLDFISH_TIMER) += timer-goldfish.o
obj-$(CONFIG_GXP_TIMER) += timer-gxp.o
obj-$(CONFIG_CLKSRC_LOONGSON1_PWM) += timer-loongson1-pwm.o
obj-$(CONFIG_EP93XX_TIMER) += timer-ep93xx.o
+obj-$(CONFIG_RALINK_TIMER) += timer-ralink.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 03733101e231..808f259781fd 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -1179,8 +1179,6 @@ static void arch_timer_stop(struct clock_event_device *clk)
disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
if (arch_timer_has_nonsecure_ppi())
disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
-
- clk->set_state_shutdown(clk);
}
static int arch_timer_dying_cpu(unsigned int cpu)
@@ -1430,7 +1428,7 @@ static int __init arch_timer_of_init(struct device_node *np)
arch_timers_present |= ARCH_TIMER_TYPE_CP15;
- has_names = of_property_read_bool(np, "interrupt-names");
+ has_names = of_property_present(np, "interrupt-names");
for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) {
if (has_names)
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index a05cfaab5f84..2d86bbc2764a 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -195,7 +195,6 @@ static int gt_dying_cpu(unsigned int cpu)
{
struct clock_event_device *clk = this_cpu_ptr(gt_evt);
- gt_clockevent_shutdown(clk);
disable_percpu_irq(clk->irq);
return 0;
}
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index f5f24a95ee82..3a55ae5fe225 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -68,25 +68,6 @@ static inline void apbt_writel_relaxed(struct dw_apb_timer *timer, u32 val,
writel_relaxed(val, timer->base + offs);
}
-static void apbt_disable_int(struct dw_apb_timer *timer)
-{
- u32 ctrl = apbt_readl(timer, APBTMR_N_CONTROL);
-
- ctrl |= APBTMR_CONTROL_INT;
- apbt_writel(timer, ctrl, APBTMR_N_CONTROL);
-}
-
-/**
- * dw_apb_clockevent_pause() - stop the clock_event_device from running
- *
- * @dw_ced: The APB clock to stop generating events.
- */
-void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced)
-{
- disable_irq(dw_ced->timer.irq);
- apbt_disable_int(&dw_ced->timer);
-}
-
static void apbt_eoi(struct dw_apb_timer *timer)
{
apbt_readl_relaxed(timer, APBTMR_N_EOI);
@@ -285,26 +266,6 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
}
/**
- * dw_apb_clockevent_resume() - resume a clock that has been paused.
- *
- * @dw_ced: The APB clock to resume.
- */
-void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced)
-{
- enable_irq(dw_ced->timer.irq);
-}
-
-/**
- * dw_apb_clockevent_stop() - stop the clock_event_device and release the IRQ.
- *
- * @dw_ced: The APB clock to stop generating the events.
- */
-void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced)
-{
- free_irq(dw_ced->timer.irq, &dw_ced->ced);
-}
-
-/**
* dw_apb_clockevent_register() - register the clock with the generic layer
*
* @dw_ced: The APB clock to register as a clock_event_device.
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index ef8cb1b71be4..e6a02e351d77 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -496,7 +496,6 @@ static int exynos4_mct_dying_cpu(unsigned int cpu)
per_cpu_ptr(&percpu_mct_tick, cpu);
struct clock_event_device *evt = &mevt->evt;
- evt->set_state_shutdown(evt);
if (mct_int_type == MCT_INT_SPI) {
if (evt->irq != -1)
disable_irq_nosync(evt->irq);
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 99177835cade..f00019b078a7 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -23,11 +23,12 @@
#include <linux/acpi.h>
#include <linux/hyperv.h>
#include <clocksource/hyperv_timer.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
static struct clock_event_device __percpu *hv_clock_event;
-static u64 hv_sched_clock_offset __ro_after_init;
+/* Note: offset can hold negative values after hibernation. */
+static u64 hv_sched_clock_offset __read_mostly;
/*
* If false, we're using the old mechanism for stimer0 interrupts
@@ -470,6 +471,17 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
}
+/*
+ * Called during resume from hibernation, from overridden
+ * x86_platform.restore_sched_clock_state routine. This is to adjust offsets
+ * used to calculate time for hv tsc page based sched_clock, to account for
+ * time spent before hibernation.
+ */
+void hv_adj_sched_clock_offset(u64 offset)
+{
+ hv_sched_clock_offset -= offset;
+}
+
#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
static int hv_cs_enable(struct clocksource *cs)
{
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 110347707ff9..7907b740497a 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -166,6 +166,37 @@ static u64 gic_hpt_read(struct clocksource *cs)
return gic_read_count();
}
+static u64 gic_hpt_read_multicluster(struct clocksource *cs)
+{
+ unsigned int hi, hi2, lo;
+ u64 count;
+
+ mips_cm_lock_other(0, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+
+ if (mips_cm_is64) {
+ count = read_gic_redir_counter();
+ goto out;
+ }
+
+ hi = read_gic_redir_counter_32h();
+ while (true) {
+ lo = read_gic_redir_counter_32l();
+
+ /* If hi didn't change then lo didn't wrap & we're done */
+ hi2 = read_gic_redir_counter_32h();
+ if (hi2 == hi)
+ break;
+
+ /* Otherwise, repeat with the latest hi value */
+ hi = hi2;
+ }
+
+ count = (((u64)hi) << 32) + lo;
+out:
+ mips_cm_unlock_other();
+ return count;
+}
+
static struct clocksource gic_clocksource = {
.name = "GIC",
.read = gic_hpt_read,
@@ -203,6 +234,11 @@ static int __init __gic_clocksource_init(void)
gic_clocksource.rating = 200;
gic_clocksource.rating += clamp(gic_frequency / 10000000, 0, 99);
+ if (mips_cps_multicluster_cpus()) {
+ gic_clocksource.read = &gic_hpt_read_multicluster;
+ gic_clocksource.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
+ }
+
ret = clocksource_register_hz(&gic_clocksource, gic_frequency);
if (ret < 0)
pr_warn("Unable to register clocksource\n");
@@ -261,7 +297,8 @@ static int __init gic_clocksource_of_init(struct device_node *node)
* stable CPU frequency or on the platforms with CM3 and CPU frequency
* change performed by the CPC core clocks divider.
*/
- if (mips_cm_revision() >= CM_REV_CM3 || !IS_ENABLED(CONFIG_CPU_FREQ)) {
+ if ((mips_cm_revision() >= CM_REV_CM3 || !IS_ENABLED(CONFIG_CPU_FREQ)) &&
+ !mips_cps_multicluster_cpus()) {
sched_clock_register(mips_cm_is64 ?
gic_read_count_64 : gic_read_count_2x32,
gic_count_width, gic_frequency);
diff --git a/drivers/clocksource/timer-armada-370-xp.c b/drivers/clocksource/timer-armada-370-xp.c
index 6ec565d6939a..54284c1c0651 100644
--- a/drivers/clocksource/timer-armada-370-xp.c
+++ b/drivers/clocksource/timer-armada-370-xp.c
@@ -201,7 +201,6 @@ static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
{
struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
- evt->set_state_shutdown(evt);
disable_percpu_irq(evt->irq);
return 0;
}
diff --git a/drivers/clocksource/timer-gxp.c b/drivers/clocksource/timer-gxp.c
index 57aa2e2cce53..48a73c101eb8 100644
--- a/drivers/clocksource/timer-gxp.c
+++ b/drivers/clocksource/timer-gxp.c
@@ -85,7 +85,7 @@ static int __init gxp_timer_init(struct device_node *node)
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
- ret = (int)PTR_ERR(clk);
+ ret = PTR_ERR(clk);
pr_err("%pOFn clock not found: %d\n", node, ret);
goto err_free;
}
diff --git a/drivers/clocksource/timer-qcom.c b/drivers/clocksource/timer-qcom.c
index eac4c95c6127..ddb1debe6a6b 100644
--- a/drivers/clocksource/timer-qcom.c
+++ b/drivers/clocksource/timer-qcom.c
@@ -130,7 +130,6 @@ static int msm_local_timer_dying_cpu(unsigned int cpu)
{
struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
- evt->set_state_shutdown(evt);
disable_percpu_irq(evt->irq);
return 0;
}
diff --git a/drivers/clocksource/timer-ralink.c b/drivers/clocksource/timer-ralink.c
new file mode 100644
index 000000000000..6ecdb4228f76
--- /dev/null
+++ b/drivers/clocksource/timer-ralink.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ralink System Tick Counter driver present on RT3352 and MT7620 SoCs.
+ *
+ * Copyright (C) 2013 by John Crispin <john@phrozen.org>
+ */
+
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#define SYSTICK_FREQ (50 * 1000)
+
+#define SYSTICK_CONFIG 0x00
+#define SYSTICK_COMPARE 0x04
+#define SYSTICK_COUNT 0x08
+
+/* route systick irq to mips irq 7 instead of the r4k-timer */
+#define CFG_EXT_STK_EN 0x2
+/* enable the counter */
+#define CFG_CNT_EN 0x1
+
+struct systick_device {
+ void __iomem *membase;
+ struct clock_event_device dev;
+ int irq_requested;
+ int freq_scale;
+};
+
+static int systick_set_oneshot(struct clock_event_device *evt);
+static int systick_shutdown(struct clock_event_device *evt);
+
+static int systick_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ struct systick_device *sdev;
+ u32 count;
+
+ sdev = container_of(evt, struct systick_device, dev);
+ count = ioread32(sdev->membase + SYSTICK_COUNT);
+ count = (count + delta) % SYSTICK_FREQ;
+ iowrite32(count, sdev->membase + SYSTICK_COMPARE);
+
+ return 0;
+}
+
+static void systick_event_handler(struct clock_event_device *dev)
+{
+ /* noting to do here */
+}
+
+static irqreturn_t systick_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *dev = (struct clock_event_device *)dev_id;
+
+ dev->event_handler(dev);
+
+ return IRQ_HANDLED;
+}
+
+static struct systick_device systick = {
+ .dev = {
+ /*
+ * cevt-r4k uses 300, make sure systick
+ * gets used if available
+ */
+ .rating = 310,
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = systick_next_event,
+ .set_state_shutdown = systick_shutdown,
+ .set_state_oneshot = systick_set_oneshot,
+ .event_handler = systick_event_handler,
+ },
+};
+
+static int systick_shutdown(struct clock_event_device *evt)
+{
+ struct systick_device *sdev;
+
+ sdev = container_of(evt, struct systick_device, dev);
+
+ if (sdev->irq_requested)
+ free_irq(systick.dev.irq, &systick.dev);
+ sdev->irq_requested = 0;
+ iowrite32(0, systick.membase + SYSTICK_CONFIG);
+
+ return 0;
+}
+
+static int systick_set_oneshot(struct clock_event_device *evt)
+{
+ const char *name = systick.dev.name;
+ struct systick_device *sdev;
+ int irq = systick.dev.irq;
+
+ sdev = container_of(evt, struct systick_device, dev);
+
+ if (!sdev->irq_requested) {
+ if (request_irq(irq, systick_interrupt,
+ IRQF_PERCPU | IRQF_TIMER, name, &systick.dev))
+ pr_err("Failed to request irq %d (%s)\n", irq, name);
+ }
+ sdev->irq_requested = 1;
+ iowrite32(CFG_EXT_STK_EN | CFG_CNT_EN,
+ systick.membase + SYSTICK_CONFIG);
+
+ return 0;
+}
+
+static int __init ralink_systick_init(struct device_node *np)
+{
+ int ret;
+
+ systick.membase = of_iomap(np, 0);
+ if (!systick.membase)
+ return -ENXIO;
+
+ systick.dev.name = np->name;
+ clockevents_calc_mult_shift(&systick.dev, SYSTICK_FREQ, 60);
+ systick.dev.max_delta_ns = clockevent_delta2ns(0x7fff, &systick.dev);
+ systick.dev.max_delta_ticks = 0x7fff;
+ systick.dev.min_delta_ns = clockevent_delta2ns(0x3, &systick.dev);
+ systick.dev.min_delta_ticks = 0x3;
+ systick.dev.irq = irq_of_parse_and_map(np, 0);
+ if (!systick.dev.irq) {
+ pr_err("%pOFn: request_irq failed", np);
+ return -EINVAL;
+ }
+
+ ret = clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
+ SYSTICK_FREQ, 301, 16,
+ clocksource_mmio_readl_up);
+ if (ret)
+ return ret;
+
+ clockevents_register_device(&systick.dev);
+
+ pr_info("%pOFn: running - mult: %d, shift: %d\n",
+ np, systick.dev.mult, systick.dev.shift);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 0d229a9058da..6b48a9006444 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -318,7 +318,7 @@ MODULE_DEVICE_TABLE(of, sun5i_timer_of_match);
static struct platform_driver sun5i_timer_driver = {
.probe = sun5i_timer_probe,
- .remove_new = sun5i_timer_remove,
+ .remove = sun5i_timer_remove,
.driver = {
.name = "sun5i-timer",
.of_match_table = sun5i_timer_of_match,
diff --git a/drivers/clocksource/timer-tegra.c b/drivers/clocksource/timer-tegra.c
index e9635c25eef4..35b6ce9deffa 100644
--- a/drivers/clocksource/timer-tegra.c
+++ b/drivers/clocksource/timer-tegra.c
@@ -158,7 +158,6 @@ static int tegra_timer_stop(unsigned int cpu)
{
struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
- to->clkevt.set_state_shutdown(&to->clkevt);
disable_irq_nosync(to->clkevt.irq);
return 0;
diff --git a/drivers/clocksource/timer-tegra186.c b/drivers/clocksource/timer-tegra186.c
index 304537dadf2c..5d4cf5237a11 100644
--- a/drivers/clocksource/timer-tegra186.c
+++ b/drivers/clocksource/timer-tegra186.c
@@ -502,7 +502,7 @@ static struct platform_driver tegra186_wdt_driver = {
.of_match_table = tegra186_timer_of_match,
},
.probe = tegra186_timer_probe,
- .remove_new = tegra186_timer_remove,
+ .remove = tegra186_timer_remove,
};
module_platform_driver(tegra186_wdt_driver);
diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
index c2dcd8d68e45..985a6d08512b 100644
--- a/drivers/clocksource/timer-ti-dm-systimer.c
+++ b/drivers/clocksource/timer-ti-dm-systimer.c
@@ -202,10 +202,10 @@ static bool __init dmtimer_is_preferred(struct device_node *np)
/* Secure gptimer12 is always clocked with a fixed source */
if (!of_property_read_bool(np, "ti,timer-secure")) {
- if (!of_property_read_bool(np, "assigned-clocks"))
+ if (!of_property_present(np, "assigned-clocks"))
return false;
- if (!of_property_read_bool(np, "assigned-clock-parents"))
+ if (!of_property_present(np, "assigned-clock-parents"))
return false;
}
@@ -686,9 +686,9 @@ subsys_initcall(dmtimer_percpu_timer_startup);
static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
{
- struct device_node *arm_timer;
+ struct device_node *arm_timer __free(device_node) =
+ of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
- arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
if (of_device_is_available(arm_timer)) {
pr_warn_once("ARM architected timer wrap issue i940 detected\n");
return 0;
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index b7a34b1a975e..e9e32df6b566 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -1104,8 +1104,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
return -ENOMEM;
timer->irq = platform_get_irq(pdev, 0);
- if (timer->irq < 0)
- return timer->irq;
+ if (timer->irq < 0) {
+ if (of_property_read_bool(dev->of_node, "ti,timer-pwm"))
+ dev_info(dev, "Did not find timer interrupt, timer usable in PWM mode only\n");
+ else
+ return timer->irq;
+ }
timer->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(timer->io_base))
@@ -1291,7 +1295,7 @@ MODULE_DEVICE_TABLE(of, omap_timer_match);
static struct platform_driver omap_dm_timer_driver = {
.probe = omap_dm_timer_probe,
- .remove_new = omap_dm_timer_remove,
+ .remove = omap_dm_timer_remove,
.driver = {
.name = "omap_timer",
.of_match_table = omap_timer_match,
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index 1b481731df96..b9df9b19d4bd 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -2407,6 +2407,18 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
start += PAGE_SIZE;
}
+
+#ifdef CONFIG_MMU
+ /*
+ * Leaving behind a partial mapping of a buffer we're about to
+ * drop is unsafe, see remap_pfn_range_notrack().
+ * We need to zap the range here ourselves instead of relying
+ * on the automatic zapping in remap_pfn_range() because we call
+ * remap_pfn_range() in a loop.
+ */
+ if (retval)
+ zap_vma_ptes(vma, vma->vm_start, size);
+#endif
}
if (retval == 0) {
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index 4a6868b8f58b..ce81fc4e1ae7 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -1360,4 +1360,4 @@ module_isa_driver_with_irq(quad8_driver, num_quad8, num_irq);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-QUAD-8 driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/counter-chrdev.c b/drivers/counter/counter-chrdev.c
index 3ee75e1a78cd..23fdf0caf712 100644
--- a/drivers/counter/counter-chrdev.c
+++ b/drivers/counter/counter-chrdev.c
@@ -672,4 +672,4 @@ exit_early:
if (copied)
wake_up_poll(&counter->events_wait, EPOLLIN);
}
-EXPORT_SYMBOL_NS_GPL(counter_push_event, COUNTER);
+EXPORT_SYMBOL_NS_GPL(counter_push_event, "COUNTER");
diff --git a/drivers/counter/counter-core.c b/drivers/counter/counter-core.c
index 893b4f0726d2..50bd30ba3d03 100644
--- a/drivers/counter/counter-core.c
+++ b/drivers/counter/counter-core.c
@@ -74,7 +74,7 @@ void *counter_priv(const struct counter_device *const counter)
return &ch->privdata;
}
-EXPORT_SYMBOL_NS_GPL(counter_priv, COUNTER);
+EXPORT_SYMBOL_NS_GPL(counter_priv, "COUNTER");
/**
* counter_alloc - allocate a counter_device
@@ -134,13 +134,13 @@ err_ida_alloc:
return NULL;
}
-EXPORT_SYMBOL_NS_GPL(counter_alloc, COUNTER);
+EXPORT_SYMBOL_NS_GPL(counter_alloc, "COUNTER");
void counter_put(struct counter_device *counter)
{
put_device(&counter->dev);
}
-EXPORT_SYMBOL_NS_GPL(counter_put, COUNTER);
+EXPORT_SYMBOL_NS_GPL(counter_put, "COUNTER");
/**
* counter_add - complete registration of a counter
@@ -167,7 +167,7 @@ int counter_add(struct counter_device *counter)
/* implies device_add(dev) */
return cdev_device_add(&counter->chrdev, dev);
}
-EXPORT_SYMBOL_NS_GPL(counter_add, COUNTER);
+EXPORT_SYMBOL_NS_GPL(counter_add, "COUNTER");
/**
* counter_unregister - unregister Counter from the system
@@ -189,7 +189,7 @@ void counter_unregister(struct counter_device *const counter)
mutex_unlock(&counter->ops_exist_lock);
}
-EXPORT_SYMBOL_NS_GPL(counter_unregister, COUNTER);
+EXPORT_SYMBOL_NS_GPL(counter_unregister, "COUNTER");
static void devm_counter_release(void *counter)
{
@@ -224,7 +224,7 @@ struct counter_device *devm_counter_alloc(struct device *dev, size_t sizeof_priv
return counter;
}
-EXPORT_SYMBOL_NS_GPL(devm_counter_alloc, COUNTER);
+EXPORT_SYMBOL_NS_GPL(devm_counter_alloc, "COUNTER");
/**
* devm_counter_add - complete registration of a counter
@@ -245,7 +245,7 @@ int devm_counter_add(struct device *dev,
return devm_add_action_or_reset(dev, devm_counter_release, counter);
}
-EXPORT_SYMBOL_NS_GPL(devm_counter_add, COUNTER);
+EXPORT_SYMBOL_NS_GPL(devm_counter_add, "COUNTER");
#define COUNTER_DEV_MAX 256
diff --git a/drivers/counter/ftm-quaddec.c b/drivers/counter/ftm-quaddec.c
index 200876f3ec04..c47741292ae1 100644
--- a/drivers/counter/ftm-quaddec.c
+++ b/drivers/counter/ftm-quaddec.c
@@ -311,6 +311,7 @@ static const struct of_device_id ftm_quaddec_match[] = {
{ .compatible = "fsl,ftm-quaddec" },
{},
};
+MODULE_DEVICE_TABLE(of, ftm_quaddec_match);
static struct platform_driver ftm_quaddec_driver = {
.driver = {
@@ -326,4 +327,4 @@ MODULE_DESCRIPTION("Flex Timer Module Quadrature decoder");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kjeld Flarup <kfa@deif.com>");
MODULE_AUTHOR("Patrick Havelange <patrick.havelange@essensium.com>");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/i8254.c b/drivers/counter/i8254.c
index 6d74e8ef92f0..95ad928725ec 100644
--- a/drivers/counter/i8254.c
+++ b/drivers/counter/i8254.c
@@ -439,9 +439,9 @@ int devm_i8254_regmap_register(struct device *const dev,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(devm_i8254_regmap_register, I8254);
+EXPORT_SYMBOL_NS_GPL(devm_i8254_regmap_register, "I8254");
MODULE_AUTHOR("William Breathitt Gray");
MODULE_DESCRIPTION("Intel 8254 Programmable Interval Timer");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/intel-qep.c b/drivers/counter/intel-qep.c
index af5942e66f7d..c49c178056f4 100644
--- a/drivers/counter/intel-qep.c
+++ b/drivers/counter/intel-qep.c
@@ -408,13 +408,9 @@ static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id)
pci_set_master(pci);
- ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci));
- if (ret)
- return ret;
-
- regs = pcim_iomap_table(pci)[0];
- if (!regs)
- return -ENOMEM;
+ regs = pcim_iomap_region(pci, 0, pci_name(pci));
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
qep->dev = dev;
qep->regs = regs;
@@ -523,4 +519,4 @@ MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Intel Quadrature Encoder Peripheral driver");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/interrupt-cnt.c b/drivers/counter/interrupt-cnt.c
index 229473855c5b..949598d51575 100644
--- a/drivers/counter/interrupt-cnt.c
+++ b/drivers/counter/interrupt-cnt.c
@@ -253,4 +253,4 @@ MODULE_ALIAS("platform:interrupt-counter");
MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
MODULE_DESCRIPTION("Interrupt counter driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index b3e615cbd2ca..2f096a5b973d 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -403,4 +403,4 @@ module_platform_driver(mchp_tc_driver);
MODULE_AUTHOR("Kamel Bouhara <kamel.bouhara@bootlin.com>");
MODULE_DESCRIPTION("Microchip TCB Capture driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/rz-mtu3-cnt.c b/drivers/counter/rz-mtu3-cnt.c
index ee821493b166..e755d54dfece 100644
--- a/drivers/counter/rz-mtu3-cnt.c
+++ b/drivers/counter/rz-mtu3-cnt.c
@@ -903,4 +903,4 @@ MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
MODULE_ALIAS("platform:rz-mtu3-counter");
MODULE_DESCRIPTION("Renesas RZ/G2L MTU3a counter driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
index 8439755559b2..cf73f65baf60 100644
--- a/drivers/counter/stm32-lptimer-cnt.c
+++ b/drivers/counter/stm32-lptimer-cnt.c
@@ -520,4 +520,4 @@ MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
MODULE_ALIAS("platform:stm32-lptimer-counter");
MODULE_DESCRIPTION("STMicroelectronics STM32 LPTIM counter driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
index 186e73d6ccb4..e75b69476a00 100644
--- a/drivers/counter/stm32-timer-cnt.c
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -214,11 +214,17 @@ static int stm32_count_enable_write(struct counter_device *counter,
{
struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cr1;
+ int ret;
if (enable) {
regmap_read(priv->regmap, TIM_CR1, &cr1);
- if (!(cr1 & TIM_CR1_CEN))
- clk_enable(priv->clk);
+ if (!(cr1 & TIM_CR1_CEN)) {
+ ret = clk_enable(priv->clk);
+ if (ret) {
+ dev_err(counter->parent, "Cannot enable clock %d\n", ret);
+ return ret;
+ }
+ }
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
TIM_CR1_CEN);
@@ -694,6 +700,7 @@ static int stm32_timer_cnt_probe_encoder(struct device *dev,
}
ret = of_property_read_u32(tnode, "reg", &idx);
+ of_node_put(tnode);
if (ret) {
dev_err(dev, "Can't get index (%d)\n", ret);
return ret;
@@ -816,7 +823,11 @@ static int __maybe_unused stm32_timer_cnt_resume(struct device *dev)
return ret;
if (priv->enabled) {
- clk_enable(priv->clk);
+ ret = clk_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable clock %d\n", ret);
+ return ret;
+ }
/* Restore registers that may have been lost */
regmap_write(priv->regmap, TIM_SMCR, priv->bak.smcr);
@@ -853,4 +864,4 @@ MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
MODULE_ALIAS("platform:stm32-timer-counter");
MODULE_DESCRIPTION("STMicroelectronics STM32 TIMER counter driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/ti-ecap-capture.c b/drivers/counter/ti-ecap-capture.c
index 675447315caf..3faaf7f60539 100644
--- a/drivers/counter/ti-ecap-capture.c
+++ b/drivers/counter/ti-ecap-capture.c
@@ -574,8 +574,13 @@ static int ecap_cnt_resume(struct device *dev)
{
struct counter_device *counter_dev = dev_get_drvdata(dev);
struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
+ int ret;
- clk_enable(ecap_dev->clk);
+ ret = clk_enable(ecap_dev->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable clock %d\n", ret);
+ return ret;
+ }
ecap_cnt_capture_set_evmode(counter_dev, ecap_dev->pm_ctx.ev_mode);
@@ -598,7 +603,7 @@ MODULE_DEVICE_TABLE(of, ecap_cnt_of_match);
static struct platform_driver ecap_cnt_driver = {
.probe = ecap_cnt_probe,
- .remove_new = ecap_cnt_remove,
+ .remove = ecap_cnt_remove,
.driver = {
.name = "ecap-capture",
.of_match_table = ecap_cnt_of_match,
@@ -610,4 +615,4 @@ module_platform_driver(ecap_cnt_driver);
MODULE_DESCRIPTION("ECAP Capture driver");
MODULE_AUTHOR("Julien Panis <jpanis@baylibre.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index 313b91456f26..bc586eff0dae 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -548,7 +548,7 @@ MODULE_DEVICE_TABLE(of, ti_eqep_of_match);
static struct platform_driver ti_eqep_driver = {
.probe = ti_eqep_probe,
- .remove_new = ti_eqep_remove,
+ .remove = ti_eqep_remove,
.driver = {
.name = "ti-eqep-cnt",
.of_match_table = ti_eqep_of_match,
@@ -559,4 +559,4 @@ module_platform_driver(ti_eqep_driver);
MODULE_AUTHOR("David Lechner <david@lechnology.com>");
MODULE_DESCRIPTION("TI eQEP counter driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(COUNTER);
+MODULE_IMPORT_NS("COUNTER");
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 2561b215432a..d64b07ec48e5 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -217,8 +217,22 @@ config CPUFREQ_DT
If in doubt, say N.
+config CPUFREQ_VIRT
+ tristate "Virtual cpufreq driver"
+ depends on GENERIC_ARCH_TOPOLOGY
+ help
+ This adds a virtualized cpufreq driver for guest kernels that
+ read/writes to a MMIO region for a virtualized cpufreq device to
+ communicate with the host. It sends performance requests to the host
+ which gets used as a hint to schedule vCPU threads and select CPU
+ frequency. If a VM does not support a virtualized FIE such as AMUs,
+ it updates the frequency scaling factor by polling host CPU frequency
+ to enable accurate Per-Entity Load Tracking for tasks running in the guest.
+
+ If in doubt, say N.
+
config CPUFREQ_DT_PLATDEV
- tristate "Generic DT based cpufreq platdev driver"
+ bool "Generic DT based cpufreq platdev driver"
depends on OF
help
This adds a generic DT based cpufreq platdev driver for frequency
@@ -311,8 +325,6 @@ config QORIQ_CPUFREQ
This adds the CPUFreq driver support for Freescale QorIQ SoCs
which are capable of changing the CPU's frequency dynamically.
-endif
-
config ACPI_CPPC_CPUFREQ
tristate "CPUFreq driver based on the ACPI CPPC spec"
depends on ACPI_PROCESSOR
@@ -341,4 +353,6 @@ config ACPI_CPPC_CPUFREQ_FIE
If in doubt, say N.
+endif
+
endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 5f7e13e60c80..9e46960f6a86 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -15,6 +15,15 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
To compile this driver as a module, choose M here: the
module will be called sun50i-cpufreq-nvmem.
+config ARM_AIROHA_SOC_CPUFREQ
+ tristate "Airoha EN7581 SoC CPUFreq support"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ depends on OF
+ select PM_OPP
+ default ARCH_AIROHA
+ help
+ This adds the CPUFreq driver for Airoha EN7581 SoCs.
+
config ARM_APPLE_SOC_CPUFREQ
tristate "Apple Silicon SoC CPUFreq support"
depends on ARCH_APPLE || (COMPILE_TEST && 64BIT)
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index 58151ca56695..eb678fa5260a 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -17,13 +17,6 @@ config CPU_FREQ_CBE_PMI
frequencies. Using PMI, the processor will not only be able to run at
lower speed, but also at lower core voltage.
-config CPU_FREQ_MAPLE
- bool "Support for Maple 970FX Evaluation Board"
- depends on PPC_MAPLE
- help
- This adds support for frequency switching on Maple 970FX
- Evaluation Board and compatible boards (IBM JS2x blades).
-
config CPU_FREQ_PMAC
bool "Support for Apple PowerBooks"
depends on ADB_PMU && PPC32
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 0f184031dd12..890fff99f37d 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o
obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o
+obj-$(CONFIG_CPUFREQ_VIRT) += virtual-cpufreq.o
# Traces
CFLAGS_amd-pstate-trace.o := -I$(src)
@@ -52,6 +53,7 @@ obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
##################################################################################
# ARM SoC drivers
+obj-$(CONFIG_ARM_AIROHA_SOC_CPUFREQ) += airoha-cpufreq.o
obj-$(CONFIG_ARM_APPLE_SOC_CPUFREQ) += apple-soc-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o
@@ -92,7 +94,6 @@ obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o
ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o
obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o
-obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o
obj-$(CONFIG_QORIQ_CPUFREQ) += qoriq-cpufreq.o
obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o
obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 0f04feb6cafa..463b69a2dff5 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -73,20 +73,17 @@ static unsigned int acpi_pstate_strict;
static bool boost_state(unsigned int cpu)
{
- u32 lo, hi;
u64 msr;
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR:
case X86_VENDOR_ZHAOXIN:
- rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
- msr = lo | ((u64)hi << 32);
+ rdmsrl_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &msr);
return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
- rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
- msr = lo | ((u64)hi << 32);
+ rdmsrl_on_cpu(cpu, MSR_K7_HWCR, &msr);
return !(msr & MSR_K7_HWCR_CPB_DIS);
}
return false;
@@ -626,7 +623,14 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
#endif
#ifdef CONFIG_ACPI_CPPC_LIB
-static u64 get_max_boost_ratio(unsigned int cpu)
+/*
+ * get_max_boost_ratio: Computes the max_boost_ratio as the ratio
+ * between the highest_perf and the nominal_perf.
+ *
+ * Returns the max_boost_ratio for @cpu. Returns the CPPC nominal
+ * frequency via @nominal_freq if it is non-NULL pointer.
+ */
+static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
{
struct cppc_perf_caps perf_caps;
u64 highest_perf, nominal_perf;
@@ -655,6 +659,9 @@ static u64 get_max_boost_ratio(unsigned int cpu)
nominal_perf = perf_caps.nominal_perf;
+ if (nominal_freq)
+ *nominal_freq = perf_caps.nominal_freq;
+
if (!highest_perf || !nominal_perf) {
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
return 0;
@@ -667,8 +674,12 @@ static u64 get_max_boost_ratio(unsigned int cpu)
return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
}
+
#else
-static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
+static inline u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
+{
+ return 0;
+}
#endif
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
@@ -678,9 +689,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
struct acpi_cpufreq_data *data;
unsigned int cpu = policy->cpu;
struct cpuinfo_x86 *c = &cpu_data(cpu);
+ u64 max_boost_ratio, nominal_freq = 0;
unsigned int valid_states = 0;
unsigned int result = 0;
- u64 max_boost_ratio;
unsigned int i;
#ifdef CONFIG_SMP
static int blacklisted;
@@ -830,16 +841,20 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
- max_boost_ratio = get_max_boost_ratio(cpu);
+ max_boost_ratio = get_max_boost_ratio(cpu, &nominal_freq);
if (max_boost_ratio) {
- unsigned int freq = freq_table[0].frequency;
+ unsigned int freq = nominal_freq;
/*
- * Because the loop above sorts the freq_table entries in the
- * descending order, freq is the maximum frequency in the table.
- * Assume that it corresponds to the CPPC nominal frequency and
- * use it to set cpuinfo.max_freq.
+ * The loop above sorts the freq_table entries in the
+ * descending order. If ACPI CPPC has not advertised
+ * the nominal frequency (this is possible in CPPC
+ * revisions prior to 3), then use the first entry in
+ * the pstate table as a proxy for nominal frequency.
*/
+ if (!freq)
+ freq = freq_table[0].frequency;
+
policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
} else {
/*
@@ -894,11 +909,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
- if (acpi_cpufreq_driver.set_boost) {
- set_boost(policy, acpi_cpufreq_driver.boost_enabled);
- policy->boost_enabled = acpi_cpufreq_driver.boost_enabled;
- }
-
return result;
err_unreg:
@@ -1028,7 +1038,7 @@ static struct platform_driver acpi_cpufreq_platdrv = {
.driver = {
.name = "acpi-cpufreq",
},
- .remove_new = acpi_cpufreq_remove,
+ .remove = acpi_cpufreq_remove,
};
static int __init acpi_cpufreq_init(void)
diff --git a/drivers/cpufreq/airoha-cpufreq.c b/drivers/cpufreq/airoha-cpufreq.c
new file mode 100644
index 000000000000..4fe39eadd163
--- /dev/null
+++ b/drivers/cpufreq/airoha-cpufreq.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitfield.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "cpufreq-dt.h"
+
+struct airoha_cpufreq_priv {
+ int opp_token;
+ struct dev_pm_domain_list *pd_list;
+ struct platform_device *cpufreq_dt;
+};
+
+static struct platform_device *cpufreq_pdev;
+
+/* NOP function to disable OPP from setting clock */
+static int airoha_cpufreq_config_clks_nop(struct device *dev,
+ struct opp_table *opp_table,
+ struct dev_pm_opp *opp,
+ void *data, bool scaling_down)
+{
+ return 0;
+}
+
+static const char * const airoha_cpufreq_clk_names[] = { "cpu", NULL };
+static const char * const airoha_cpufreq_pd_names[] = { "perf" };
+
+static int airoha_cpufreq_probe(struct platform_device *pdev)
+{
+ const struct dev_pm_domain_attach_data attach_data = {
+ .pd_names = airoha_cpufreq_pd_names,
+ .num_pd_names = ARRAY_SIZE(airoha_cpufreq_pd_names),
+ .pd_flags = PD_FLAG_DEV_LINK_ON | PD_FLAG_REQUIRED_OPP,
+ };
+ struct dev_pm_opp_config config = {
+ .clk_names = airoha_cpufreq_clk_names,
+ .config_clks = airoha_cpufreq_config_clks_nop,
+ };
+ struct platform_device *cpufreq_dt;
+ struct airoha_cpufreq_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct device *cpu_dev;
+ int ret;
+
+ /* CPUs refer to the same OPP table */
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Set OPP table conf with NOP config_clks */
+ priv->opp_token = dev_pm_opp_set_config(cpu_dev, &config);
+ if (priv->opp_token < 0)
+ return dev_err_probe(dev, priv->opp_token, "Failed to set OPP config\n");
+
+ /* Attach PM for OPP */
+ ret = dev_pm_domain_attach_list(cpu_dev, &attach_data,
+ &priv->pd_list);
+ if (ret)
+ goto clear_opp_config;
+
+ cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+ ret = PTR_ERR_OR_ZERO(cpufreq_dt);
+ if (ret) {
+ dev_err(dev, "failed to create cpufreq-dt device: %d\n", ret);
+ goto detach_pm;
+ }
+
+ priv->cpufreq_dt = cpufreq_dt;
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+detach_pm:
+ dev_pm_domain_detach_list(priv->pd_list);
+clear_opp_config:
+ dev_pm_opp_clear_config(priv->opp_token);
+
+ return ret;
+}
+
+static void airoha_cpufreq_remove(struct platform_device *pdev)
+{
+ struct airoha_cpufreq_priv *priv = platform_get_drvdata(pdev);
+
+ platform_device_unregister(priv->cpufreq_dt);
+
+ dev_pm_domain_detach_list(priv->pd_list);
+
+ dev_pm_opp_clear_config(priv->opp_token);
+}
+
+static struct platform_driver airoha_cpufreq_driver = {
+ .probe = airoha_cpufreq_probe,
+ .remove = airoha_cpufreq_remove,
+ .driver = {
+ .name = "airoha-cpufreq",
+ },
+};
+
+static const struct of_device_id airoha_cpufreq_match_list[] __initconst = {
+ { .compatible = "airoha,en7581" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, airoha_cpufreq_match_list);
+
+static int __init airoha_cpufreq_init(void)
+{
+ struct device_node *np = of_find_node_by_path("/");
+ const struct of_device_id *match;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(airoha_cpufreq_match_list, np);
+ of_node_put(np);
+ if (!match)
+ return -ENODEV;
+
+ ret = platform_driver_register(&airoha_cpufreq_driver);
+ if (unlikely(ret < 0))
+ return ret;
+
+ cpufreq_pdev = platform_device_register_data(NULL, "airoha-cpufreq",
+ -1, match, sizeof(*match));
+ ret = PTR_ERR_OR_ZERO(cpufreq_pdev);
+ if (ret)
+ platform_driver_unregister(&airoha_cpufreq_driver);
+
+ return ret;
+}
+module_init(airoha_cpufreq_init);
+
+static void __exit airoha_cpufreq_exit(void)
+{
+ platform_device_unregister(cpufreq_pdev);
+ platform_driver_unregister(&airoha_cpufreq_driver);
+}
+module_exit(airoha_cpufreq_exit);
+
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("CPUfreq driver for Airoha SoCs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
index 35f38ae67fb1..8d692415d905 100644
--- a/drivers/cpufreq/amd-pstate-trace.h
+++ b/drivers/cpufreq/amd-pstate-trace.h
@@ -32,7 +32,6 @@ TRACE_EVENT(amd_pstate_perf,
u64 aperf,
u64 tsc,
unsigned int cpu_id,
- bool changed,
bool fast_switch
),
@@ -44,7 +43,6 @@ TRACE_EVENT(amd_pstate_perf,
aperf,
tsc,
cpu_id,
- changed,
fast_switch
),
@@ -57,7 +55,6 @@ TRACE_EVENT(amd_pstate_perf,
__field(unsigned long long, aperf)
__field(unsigned long long, tsc)
__field(unsigned int, cpu_id)
- __field(bool, changed)
__field(bool, fast_switch)
),
@@ -70,11 +67,10 @@ TRACE_EVENT(amd_pstate_perf,
__entry->aperf = aperf;
__entry->tsc = tsc;
__entry->cpu_id = cpu_id;
- __entry->changed = changed;
__entry->fast_switch = fast_switch;
),
- TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u changed=%s fast_switch=%s",
+ TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
(unsigned long)__entry->min_perf,
(unsigned long)__entry->target_perf,
(unsigned long)__entry->capacity,
@@ -83,11 +79,55 @@ TRACE_EVENT(amd_pstate_perf,
(unsigned long long)__entry->aperf,
(unsigned long long)__entry->tsc,
(unsigned int)__entry->cpu_id,
- (__entry->changed) ? "true" : "false",
(__entry->fast_switch) ? "true" : "false"
)
);
+TRACE_EVENT(amd_pstate_epp_perf,
+
+ TP_PROTO(unsigned int cpu_id,
+ unsigned int highest_perf,
+ unsigned int epp,
+ unsigned int min_perf,
+ unsigned int max_perf,
+ bool boost
+ ),
+
+ TP_ARGS(cpu_id,
+ highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ boost),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu_id)
+ __field(unsigned int, highest_perf)
+ __field(unsigned int, epp)
+ __field(unsigned int, min_perf)
+ __field(unsigned int, max_perf)
+ __field(bool, boost)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu_id = cpu_id;
+ __entry->highest_perf = highest_perf;
+ __entry->epp = epp;
+ __entry->min_perf = min_perf;
+ __entry->max_perf = max_perf;
+ __entry->boost = boost;
+ ),
+
+ TP_printk("cpu%u: [%u<->%u]/%u, epp=%u, boost=%u",
+ (unsigned int)__entry->cpu_id,
+ (unsigned int)__entry->min_perf,
+ (unsigned int)__entry->max_perf,
+ (unsigned int)__entry->highest_perf,
+ (unsigned int)__entry->epp,
+ (bool)__entry->boost
+ )
+);
+
#endif /* _AMD_PSTATE_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index f66701514d90..3a0a380c3590 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -207,7 +207,6 @@ static void amd_pstate_ut_check_freq(u32 index)
int cpu = 0;
struct cpufreq_policy *policy = NULL;
struct amd_cpudata *cpudata = NULL;
- u32 nominal_freq_khz;
for_each_possible_cpu(cpu) {
policy = cpufreq_cpu_get(cpu);
@@ -215,34 +214,33 @@ static void amd_pstate_ut_check_freq(u32 index)
break;
cpudata = policy->driver_data;
- nominal_freq_khz = cpudata->nominal_freq*1000;
- if (!((cpudata->max_freq >= nominal_freq_khz) &&
- (nominal_freq_khz > cpudata->lowest_nonlinear_freq) &&
+ if (!((cpudata->max_freq >= cpudata->nominal_freq) &&
+ (cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) &&
(cpudata->lowest_nonlinear_freq > cpudata->min_freq) &&
(cpudata->min_freq > 0))) {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
- __func__, cpu, cpudata->max_freq, nominal_freq_khz,
+ __func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
cpudata->lowest_nonlinear_freq, cpudata->min_freq);
goto skip_test;
}
- if (cpudata->min_freq != policy->min) {
+ if (cpudata->lowest_nonlinear_freq != policy->min) {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
- pr_err("%s cpu%d cpudata_min_freq=%d policy_min=%d, they should be equal!\n",
- __func__, cpu, cpudata->min_freq, policy->min);
+ pr_err("%s cpu%d cpudata_lowest_nonlinear_freq=%d policy_min=%d, they should be equal!\n",
+ __func__, cpu, cpudata->lowest_nonlinear_freq, policy->min);
goto skip_test;
}
if (cpudata->boost_supported) {
if ((policy->max == cpudata->max_freq) ||
- (policy->max == nominal_freq_khz))
+ (policy->max == cpudata->nominal_freq))
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
else {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
__func__, cpu, policy->max, cpudata->max_freq,
- nominal_freq_khz);
+ cpudata->nominal_freq);
goto skip_test;
}
} else {
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 15e201d5e911..313550fa62d4 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -22,6 +22,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -88,6 +89,11 @@ static bool cppc_enabled;
static bool amd_pstate_prefcore = true;
static struct quirk_entry *quirks;
+#define AMD_CPPC_MAX_PERF_MASK GENMASK(7, 0)
+#define AMD_CPPC_MIN_PERF_MASK GENMASK(15, 8)
+#define AMD_CPPC_DES_PERF_MASK GENMASK(23, 16)
+#define AMD_CPPC_EPP_PERF_MASK GENMASK(31, 24)
+
/*
* AMD Energy Preference Performance (EPP)
* The EPP is used in the CCLK DPM controller to drive
@@ -180,120 +186,145 @@ static inline int get_mode_idx_from_str(const char *str, size_t size)
static DEFINE_MUTEX(amd_pstate_limits_lock);
static DEFINE_MUTEX(amd_pstate_driver_lock);
-static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
+static s16 msr_get_epp(struct amd_cpudata *cpudata)
{
- u64 epp;
+ u64 value;
int ret;
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- if (!cppc_req_cached) {
- epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
- &cppc_req_cached);
- if (epp)
- return epp;
- }
- epp = (cppc_req_cached >> 24) & 0xFF;
- } else {
- ret = cppc_get_epp_perf(cpudata->cpu, &epp);
- if (ret < 0) {
- pr_debug("Could not retrieve energy perf value (%d)\n", ret);
- return -EIO;
- }
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
+ if (ret < 0) {
+ pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+ return ret;
}
- return (s16)(epp & 0xff);
+ return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, value);
}
-static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
+DEFINE_STATIC_CALL(amd_pstate_get_epp, msr_get_epp);
+
+static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata)
{
- s16 epp;
- int index = -EINVAL;
+ return static_call(amd_pstate_get_epp)(cpudata);
+}
- epp = amd_pstate_get_epp(cpudata, 0);
- if (epp < 0)
- return epp;
+static s16 shmem_get_epp(struct amd_cpudata *cpudata)
+{
+ u64 epp;
+ int ret;
- switch (epp) {
- case AMD_CPPC_EPP_PERFORMANCE:
- index = EPP_INDEX_PERFORMANCE;
- break;
- case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
- index = EPP_INDEX_BALANCE_PERFORMANCE;
- break;
- case AMD_CPPC_EPP_BALANCE_POWERSAVE:
- index = EPP_INDEX_BALANCE_POWERSAVE;
- break;
- case AMD_CPPC_EPP_POWERSAVE:
- index = EPP_INDEX_POWERSAVE;
- break;
- default:
- break;
+ ret = cppc_get_epp_perf(cpudata->cpu, &epp);
+ if (ret < 0) {
+ pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+ return ret;
}
- return index;
+ return (s16)(epp & 0xff);
}
-static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, bool fast_switch)
+static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
+ u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
{
- if (fast_switch)
- wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
- else
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
- READ_ONCE(cpudata->cppc_req_cached));
+ u64 value, prev;
+
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
+ AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK);
+ value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf);
+ value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf);
+ value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+
+ if (value == prev)
+ return 0;
+
+ if (fast_switch) {
+ wrmsrl(MSR_AMD_CPPC_REQ, value);
+ return 0;
+ } else {
+ int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+
+ if (ret)
+ return ret;
+ }
+
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+ WRITE_ONCE(cpudata->epp_cached, epp);
+
+ return 0;
}
-DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
+DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
-static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
+static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
u32 min_perf, u32 des_perf,
- u32 max_perf, bool fast_switch)
+ u32 max_perf, u32 epp,
+ bool fast_switch)
{
- static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
- max_perf, fast_switch);
+ return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
+ max_perf, epp, fast_switch);
}
-static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
{
+ u64 value, prev;
int ret;
- struct cppc_perf_ctrls perf_ctrls;
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- u64 value = READ_ONCE(cpudata->cppc_req_cached);
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+ value &= ~AMD_CPPC_EPP_PERF_MASK;
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
- value &= ~GENMASK_ULL(31, 24);
- value |= (u64)epp << 24;
- WRITE_ONCE(cpudata->cppc_req_cached, value);
-
- ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- if (!ret)
- cpudata->epp_cached = epp;
- } else {
- amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
- cpudata->max_limit_perf, false);
+ if (value == prev)
+ return 0;
- perf_ctrls.energy_perf = epp;
- ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
- if (ret) {
- pr_debug("failed to set energy perf value (%d)\n", ret);
- return ret;
- }
- cpudata->epp_cached = epp;
+ ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ if (ret) {
+ pr_err("failed to set energy perf value (%d)\n", ret);
+ return ret;
}
+ /* update both so that msr_update_perf() can effectively check */
+ WRITE_ONCE(cpudata->epp_cached, epp);
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
return ret;
}
-static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
- int pref_index)
+DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
+
+static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+{
+ return static_call(amd_pstate_set_epp)(cpudata, epp);
+}
+
+static int shmem_set_epp(struct amd_cpudata *cpudata, u32 epp)
{
- int epp = -EINVAL;
int ret;
+ struct cppc_perf_ctrls perf_ctrls;
+
+ if (epp == cpudata->epp_cached)
+ return 0;
+
+ perf_ctrls.energy_perf = epp;
+ ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+ if (ret) {
+ pr_debug("failed to set energy perf value (%d)\n", ret);
+ return ret;
+ }
+ WRITE_ONCE(cpudata->epp_cached, epp);
+
+ return ret;
+}
+
+static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy,
+ int pref_index)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int epp;
if (!pref_index)
epp = cpudata->epp_default;
-
- if (epp == -EINVAL)
+ else
epp = epp_values[pref_index];
if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
@@ -301,16 +332,28 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
return -EBUSY;
}
- ret = amd_pstate_set_epp(cpudata, epp);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+ epp,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
+ policy->boost_enabled);
+ }
- return ret;
+ return amd_pstate_set_epp(cpudata, epp);
}
-static inline int pstate_enable(bool enable)
+static inline int msr_cppc_enable(bool enable)
{
int ret, cpu;
unsigned long logical_proc_id_mask = 0;
+ /*
+ * MSR_AMD_CPPC_ENABLE is write-once, once set it cannot be cleared.
+ */
+ if (!enable)
+ return 0;
+
if (enable == cppc_enabled)
return 0;
@@ -332,7 +375,7 @@ static inline int pstate_enable(bool enable)
return 0;
}
-static int cppc_enable(bool enable)
+static int shmem_cppc_enable(bool enable)
{
int cpu, ret = 0;
struct cppc_perf_ctrls perf_ctrls;
@@ -359,24 +402,28 @@ static int cppc_enable(bool enable)
return ret;
}
-DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable);
+DEFINE_STATIC_CALL(amd_pstate_cppc_enable, msr_cppc_enable);
-static inline int amd_pstate_enable(bool enable)
+static inline int amd_pstate_cppc_enable(bool enable)
{
- return static_call(amd_pstate_enable)(enable);
+ return static_call(amd_pstate_cppc_enable)(enable);
}
-static int pstate_init_perf(struct amd_cpudata *cpudata)
+static int msr_init_perf(struct amd_cpudata *cpudata)
{
- u64 cap1;
+ u64 cap1, numerator;
int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
&cap1);
if (ret)
return ret;
- WRITE_ONCE(cpudata->highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
- WRITE_ONCE(cpudata->max_limit_perf, AMD_CPPC_HIGHEST_PERF(cap1));
+ ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
+ if (ret)
+ return ret;
+
+ WRITE_ONCE(cpudata->highest_perf, numerator);
+ WRITE_ONCE(cpudata->max_limit_perf, numerator);
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
@@ -385,16 +432,21 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
return 0;
}
-static int cppc_init_perf(struct amd_cpudata *cpudata)
+static int shmem_init_perf(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
+ u64 numerator;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
- WRITE_ONCE(cpudata->highest_perf, cppc_perf.highest_perf);
- WRITE_ONCE(cpudata->max_limit_perf, cppc_perf.highest_perf);
+ ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
+ if (ret)
+ return ret;
+
+ WRITE_ONCE(cpudata->highest_perf, numerator);
+ WRITE_ONCE(cpudata->max_limit_perf, numerator);
WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
cppc_perf.lowest_nonlinear_perf);
@@ -420,24 +472,30 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
return ret;
}
-DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf);
+DEFINE_STATIC_CALL(amd_pstate_init_perf, msr_init_perf);
static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
{
return static_call(amd_pstate_init_perf)(cpudata);
}
-static void cppc_update_perf(struct amd_cpudata *cpudata,
- u32 min_perf, u32 des_perf,
- u32 max_perf, bool fast_switch)
+static int shmem_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
+ u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
{
struct cppc_perf_ctrls perf_ctrls;
+ if (cppc_state == AMD_PSTATE_ACTIVE) {
+ int ret = shmem_set_epp(cpudata, epp);
+
+ if (ret)
+ return ret;
+ }
+
perf_ctrls.max_perf = max_perf;
perf_ctrls.min_perf = min_perf;
perf_ctrls.desired_perf = des_perf;
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ return cppc_set_perf(cpudata->cpu, &perf_ctrls);
}
static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
@@ -478,14 +536,8 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
{
unsigned long max_freq;
struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu);
- u64 prev = READ_ONCE(cpudata->cppc_req_cached);
u32 nominal_perf = READ_ONCE(cpudata->nominal_perf);
- u64 value = prev;
- min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
- max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
max_freq = READ_ONCE(cpudata->max_limit_freq);
@@ -496,58 +548,59 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
des_perf = 0;
}
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
-
- value &= ~AMD_CPPC_DES_PERF(~0L);
- value |= AMD_CPPC_DES_PERF(des_perf);
-
/* limit the max perf when core performance boost feature is disabled */
if (!cpudata->boost_supported)
max_perf = min_t(unsigned long, nominal_perf, max_perf);
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(max_perf);
-
if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
- cpudata->cpu, (value != prev), fast_switch);
+ cpudata->cpu, fast_switch);
}
- if (value == prev)
- goto cpufreq_policy_put;
-
- WRITE_ONCE(cpudata->cppc_req_cached, value);
+ amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch);
- amd_pstate_update_perf(cpudata, min_perf, des_perf,
- max_perf, fast_switch);
-
-cpufreq_policy_put:
cpufreq_cpu_put(policy);
}
-static int amd_pstate_verify(struct cpufreq_policy_data *policy)
+static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
{
- cpufreq_verify_within_cpu_limits(policy);
+ /*
+ * Initialize lower frequency limit (i.e.policy->min) with
+ * lowest_nonlinear_frequency which is the most energy efficient
+ * frequency. Override the initial value set by cpufreq core and
+ * amd-pstate qos_requests.
+ */
+ if (policy_data->min == FREQ_QOS_MIN_DEFAULT_VALUE) {
+ struct cpufreq_policy *policy = cpufreq_cpu_get(policy_data->cpu);
+ struct amd_cpudata *cpudata;
+
+ if (!policy)
+ return -EINVAL;
+
+ cpudata = policy->driver_data;
+ policy_data->min = cpudata->lowest_nonlinear_freq;
+ cpufreq_cpu_put(policy);
+ }
+
+ cpufreq_verify_within_cpu_limits(policy_data);
+ pr_debug("policy_max =%d, policy_min=%d\n", policy_data->max, policy_data->min);
return 0;
}
static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
{
- u32 max_limit_perf, min_limit_perf, lowest_perf;
+ u32 max_limit_perf, min_limit_perf, max_perf, max_freq;
struct amd_cpudata *cpudata = policy->driver_data;
- max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
- min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
-
- lowest_perf = READ_ONCE(cpudata->lowest_perf);
- if (min_limit_perf < lowest_perf)
- min_limit_perf = lowest_perf;
+ max_perf = READ_ONCE(cpudata->highest_perf);
+ max_freq = READ_ONCE(cpudata->max_freq);
+ max_limit_perf = div_u64(policy->max * max_perf, max_freq);
+ min_limit_perf = div_u64(policy->min * max_perf, max_freq);
- if (max_limit_perf < min_limit_perf)
- max_limit_perf = min_limit_perf;
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ min_limit_perf = min(cpudata->nominal_perf, max_limit_perf);
WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
@@ -646,7 +699,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
if (min_perf < lowest_nonlinear_perf)
min_perf = lowest_nonlinear_perf;
- max_perf = cap_perf;
+ max_perf = cpudata->max_limit_perf;
if (max_perf < min_perf)
max_perf = min_perf;
@@ -660,38 +713,16 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
{
struct amd_cpudata *cpudata = policy->driver_data;
- struct cppc_perf_ctrls perf_ctrls;
- u32 highest_perf, nominal_perf, nominal_freq, max_freq;
+ u32 nominal_freq, max_freq;
int ret = 0;
- highest_perf = READ_ONCE(cpudata->highest_perf);
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
nominal_freq = READ_ONCE(cpudata->nominal_freq);
max_freq = READ_ONCE(cpudata->max_freq);
- if (boot_cpu_has(X86_FEATURE_CPPC)) {
- u64 value = READ_ONCE(cpudata->cppc_req_cached);
-
- value &= ~GENMASK_ULL(7, 0);
- value |= on ? highest_perf : nominal_perf;
- WRITE_ONCE(cpudata->cppc_req_cached, value);
-
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- } else {
- perf_ctrls.max_perf = on ? highest_perf : nominal_perf;
- ret = cppc_set_perf(cpudata->cpu, &perf_ctrls);
- if (ret) {
- cpufreq_cpu_release(policy);
- pr_debug("Failed to set max perf on CPU:%d. ret:%d\n",
- cpudata->cpu, ret);
- return ret;
- }
- }
-
if (on)
policy->cpuinfo.max_freq = max_freq;
- else if (policy->cpuinfo.max_freq > nominal_freq * 1000)
- policy->cpuinfo.max_freq = nominal_freq * 1000;
+ else if (policy->cpuinfo.max_freq > nominal_freq)
+ policy->cpuinfo.max_freq = nominal_freq;
policy->max = policy->cpuinfo.max_freq;
@@ -713,12 +744,10 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
pr_err("Boost mode is not supported by this processor or SBIOS\n");
return -EOPNOTSUPP;
}
- mutex_lock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
+
ret = amd_pstate_cpu_boost_update(policy, state);
- WRITE_ONCE(cpudata->boost_state, !ret ? state : false);
- policy->boost_enabled = !ret ? state : false;
refresh_frequency_limits(policy);
- mutex_unlock(&amd_pstate_driver_lock);
return ret;
}
@@ -738,9 +767,6 @@ static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata)
goto exit_err;
}
- /* at least one CPU supports CPB, even if others fail later on to set up */
- current_pstate_driver->boost_enabled = true;
-
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
if (ret) {
pr_err_once("failed to read initial CPU boost state!\n");
@@ -788,31 +814,35 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
* sched_set_itmt_support(true) has been called and it is valid to
* update them at any time after it has been called.
*/
- sched_set_itmt_core_prio((int)READ_ONCE(cpudata->highest_perf), cpudata->cpu);
+ sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu);
schedule_work(&sched_prefcore_work);
}
static void amd_pstate_update_limits(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy = NULL;
struct amd_cpudata *cpudata;
u32 prev_high = 0, cur_high = 0;
int ret;
bool highest_perf_changed = false;
+ if (!amd_pstate_prefcore)
+ return;
+
+ policy = cpufreq_cpu_get(cpu);
if (!policy)
return;
cpudata = policy->driver_data;
- if (!amd_pstate_prefcore)
- return;
+ guard(mutex)(&amd_pstate_driver_lock);
- mutex_lock(&amd_pstate_driver_lock);
ret = amd_get_highest_perf(cpu, &cur_high);
- if (ret)
- goto free_cpufreq_put;
+ if (ret) {
+ cpufreq_cpu_put(policy);
+ return;
+ }
prev_high = READ_ONCE(cpudata->prefcore_ranking);
highest_perf_changed = (prev_high != cur_high);
@@ -822,14 +852,11 @@ static void amd_pstate_update_limits(unsigned int cpu)
if (cur_high < CPPC_MAX_PERF)
sched_set_itmt_core_prio((int)cur_high, cpu);
}
-
-free_cpufreq_put:
cpufreq_cpu_put(policy);
if (!highest_perf_changed)
cpufreq_update_policy(cpu);
- mutex_unlock(&amd_pstate_driver_lock);
}
/*
@@ -842,7 +869,7 @@ static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
transition_delay_ns = cppc_get_transition_latency(cpu);
if (transition_delay_ns == CPUFREQ_ETERNAL) {
- if (cpu_feature_enabled(X86_FEATURE_FAST_CPPC))
+ if (cpu_feature_enabled(X86_FEATURE_AMD_FAST_CPPC))
return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY;
else
return AMD_PSTATE_TRANSITION_DELAY;
@@ -881,10 +908,8 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
{
int ret;
u32 min_freq, max_freq;
- u64 numerator;
- u32 nominal_perf, nominal_freq;
+ u32 highest_perf, nominal_perf, nominal_freq;
u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
- u32 boost_ratio, lowest_nonlinear_ratio;
struct cppc_perf_caps cppc_perf;
ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
@@ -892,32 +917,25 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
return ret;
if (quirks && quirks->lowest_freq)
- min_freq = quirks->lowest_freq * 1000;
+ min_freq = quirks->lowest_freq;
else
- min_freq = cppc_perf.lowest_freq * 1000;
+ min_freq = cppc_perf.lowest_freq;
if (quirks && quirks->nominal_freq)
- nominal_freq = quirks->nominal_freq ;
+ nominal_freq = quirks->nominal_freq;
else
nominal_freq = cppc_perf.nominal_freq;
+ highest_perf = READ_ONCE(cpudata->highest_perf);
nominal_perf = READ_ONCE(cpudata->nominal_perf);
-
- ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
- if (ret)
- return ret;
- boost_ratio = div_u64(numerator << SCHED_CAPACITY_SHIFT, nominal_perf);
- max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
+ max_freq = div_u64((u64)highest_perf * nominal_freq, nominal_perf);
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
- lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
- nominal_perf);
- lowest_nonlinear_freq = (nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
-
- WRITE_ONCE(cpudata->min_freq, min_freq);
- WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq);
- WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
- WRITE_ONCE(cpudata->max_freq, max_freq);
+ lowest_nonlinear_freq = div_u64((u64)nominal_freq * lowest_nonlinear_perf, nominal_perf);
+ WRITE_ONCE(cpudata->min_freq, min_freq * 1000);
+ WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq * 1000);
+ WRITE_ONCE(cpudata->nominal_freq, nominal_freq * 1000);
+ WRITE_ONCE(cpudata->max_freq, max_freq * 1000);
/**
* Below values need to be initialized correctly, otherwise driver will fail to load
@@ -927,13 +945,13 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
*/
if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) {
pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n",
- min_freq, max_freq, nominal_freq * 1000);
+ min_freq, max_freq, nominal_freq);
return -EINVAL;
}
- if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq * 1000) {
+ if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq) {
pr_err("lowest_nonlinear_freq(%d) value is out of range [min_freq(%d), nominal_freq(%d)]\n",
- lowest_nonlinear_freq, min_freq, nominal_freq * 1000);
+ lowest_nonlinear_freq, min_freq, nominal_freq);
return -EINVAL;
}
@@ -996,7 +1014,7 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
policy->fast_switch_possible = true;
ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
- FREQ_QOS_MIN, policy->cpuinfo.min_freq);
+ FREQ_QOS_MIN, FREQ_QOS_MIN_DEFAULT_VALUE);
if (ret < 0) {
dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
goto free_cpudata1;
@@ -1040,7 +1058,7 @@ static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
{
int ret;
- ret = amd_pstate_enable(true);
+ ret = amd_pstate_cppc_enable(true);
if (ret)
pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
@@ -1051,7 +1069,7 @@ static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
{
int ret;
- ret = amd_pstate_enable(false);
+ ret = amd_pstate_cppc_enable(false);
if (ret)
pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
@@ -1150,7 +1168,6 @@ static ssize_t show_energy_performance_available_preferences(
static ssize_t store_energy_performance_preference(
struct cpufreq_policy *policy, const char *buf, size_t count)
{
- struct amd_cpudata *cpudata = policy->driver_data;
char str_preference[21];
ssize_t ret;
@@ -1162,11 +1179,11 @@ static ssize_t store_energy_performance_preference(
if (ret < 0)
return -EINVAL;
- mutex_lock(&amd_pstate_limits_lock);
- ret = amd_pstate_set_energy_pref_index(cpudata, ret);
- mutex_unlock(&amd_pstate_limits_lock);
+ guard(mutex)(&amd_pstate_limits_lock);
- return ret ?: count;
+ ret = amd_pstate_set_energy_pref_index(policy, ret);
+
+ return ret ? ret : count;
}
static ssize_t show_energy_performance_preference(
@@ -1175,37 +1192,79 @@ static ssize_t show_energy_performance_preference(
struct amd_cpudata *cpudata = policy->driver_data;
int preference;
- preference = amd_pstate_get_energy_pref_index(cpudata);
- if (preference < 0)
- return preference;
+ switch (cpudata->epp_cached) {
+ case AMD_CPPC_EPP_PERFORMANCE:
+ preference = EPP_INDEX_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
+ preference = EPP_INDEX_BALANCE_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_POWERSAVE:
+ preference = EPP_INDEX_BALANCE_POWERSAVE;
+ break;
+ case AMD_CPPC_EPP_POWERSAVE:
+ preference = EPP_INDEX_POWERSAVE;
+ break;
+ default:
+ return -EINVAL;
+ }
return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
}
static void amd_pstate_driver_cleanup(void)
{
- amd_pstate_enable(false);
+ amd_pstate_cppc_enable(false);
cppc_state = AMD_PSTATE_DISABLE;
current_pstate_driver = NULL;
}
+static int amd_pstate_set_driver(int mode_idx)
+{
+ if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
+ cppc_state = mode_idx;
+ if (cppc_state == AMD_PSTATE_DISABLE)
+ pr_info("driver is explicitly disabled\n");
+
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ current_pstate_driver = &amd_pstate_epp_driver;
+
+ if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
+ current_pstate_driver = &amd_pstate_driver;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static int amd_pstate_register_driver(int mode)
{
int ret;
- if (mode == AMD_PSTATE_PASSIVE || mode == AMD_PSTATE_GUIDED)
- current_pstate_driver = &amd_pstate_driver;
- else if (mode == AMD_PSTATE_ACTIVE)
- current_pstate_driver = &amd_pstate_epp_driver;
- else
- return -EINVAL;
+ ret = amd_pstate_set_driver(mode);
+ if (ret)
+ return ret;
cppc_state = mode;
+
+ ret = amd_pstate_cppc_enable(true);
+ if (ret) {
+ pr_err("failed to enable cppc during amd-pstate driver registration, return %d\n",
+ ret);
+ amd_pstate_driver_cleanup();
+ return ret;
+ }
+
+ /* at least one CPU supports CPB */
+ current_pstate_driver->boost_enabled = cpu_feature_enabled(X86_FEATURE_CPB);
+
ret = cpufreq_register_driver(current_pstate_driver);
if (ret) {
amd_pstate_driver_cleanup();
return ret;
}
+
return 0;
}
@@ -1304,13 +1363,10 @@ EXPORT_SYMBOL_GPL(amd_pstate_update_status);
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- ssize_t ret;
- mutex_lock(&amd_pstate_driver_lock);
- ret = amd_pstate_show_status(buf);
- mutex_unlock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
- return ret;
+ return amd_pstate_show_status(buf);
}
static ssize_t status_store(struct device *a, struct device_attribute *b,
@@ -1319,9 +1375,8 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
char *p = memchr(buf, '\n', count);
int ret;
- mutex_lock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
ret = amd_pstate_update_status(buf, p ? p - buf : count);
- mutex_unlock(&amd_pstate_driver_lock);
return ret < 0 ? ret : count;
}
@@ -1415,7 +1470,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
return -ENOMEM;
cpudata->cpu = policy->cpu;
- cpudata->epp_policy = 0;
ret = amd_pstate_init_perf(cpudata);
if (ret)
@@ -1441,8 +1495,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = cpudata;
- cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata, 0);
-
policy->min = policy->cpuinfo.min_freq;
policy->max = policy->cpuinfo.max_freq;
@@ -1453,10 +1505,13 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
* the default cpufreq governor is neither powersave nor performance.
*/
if (amd_pstate_acpi_pm_profile_server() ||
- amd_pstate_acpi_pm_profile_undefined())
+ amd_pstate_acpi_pm_profile_undefined()) {
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
- else
+ cpudata->epp_default = amd_pstate_get_epp(cpudata);
+ } else {
policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ cpudata->epp_default = AMD_CPPC_EPP_BALANCE_PERFORMANCE;
+ }
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
@@ -1469,6 +1524,11 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
return ret;
WRITE_ONCE(cpudata->cppc_cap1_cached, value);
}
+ ret = amd_pstate_set_epp(cpudata, cpudata->epp_default);
+ if (ret)
+ return ret;
+
+ current_pstate_driver->adjust_perf = NULL;
return 0;
@@ -1492,67 +1552,24 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
- u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
- u64 value;
- s16 epp;
-
- max_perf = READ_ONCE(cpudata->highest_perf);
- min_perf = READ_ONCE(cpudata->lowest_perf);
- max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
- min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
-
- if (min_limit_perf < min_perf)
- min_limit_perf = min_perf;
-
- if (max_limit_perf < min_limit_perf)
- max_limit_perf = min_limit_perf;
-
- WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
- WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
-
- max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
- min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
- cpudata->max_limit_perf);
- value = READ_ONCE(cpudata->cppc_req_cached);
-
- if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
- min_perf = max_perf;
-
- /* Initial min/max values for CPPC Performance Controls Register */
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
-
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(max_perf);
-
- /* CPPC EPP feature require to set zero to the desire perf bit */
- value &= ~AMD_CPPC_DES_PERF(~0L);
- value |= AMD_CPPC_DES_PERF(0);
-
- cpudata->epp_policy = cpudata->policy;
+ u32 epp;
- /* Get BIOS pre-defined epp value */
- epp = amd_pstate_get_epp(cpudata, value);
- if (epp < 0) {
- /**
- * This return value can only be negative for shared_memory
- * systems where EPP register read/write not supported.
- */
- return epp;
- }
+ amd_pstate_update_min_max_limit(policy);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
epp = 0;
+ else
+ epp = READ_ONCE(cpudata->epp_cached);
- /* Set initial EPP value */
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- value &= ~GENMASK_ULL(31, 24);
- value |= (u64)epp << 24;
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
+ cpudata->min_limit_perf,
+ cpudata->max_limit_perf,
+ policy->boost_enabled);
}
- WRITE_ONCE(cpudata->cppc_req_cached, value);
- return amd_pstate_set_epp(cpudata, epp);
+ return amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
+ cpudata->max_limit_perf, epp, false);
}
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
@@ -1581,91 +1598,63 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
return 0;
}
-static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
+static int amd_pstate_epp_reenable(struct cpufreq_policy *policy)
{
- struct cppc_perf_ctrls perf_ctrls;
- u64 value, max_perf;
+ struct amd_cpudata *cpudata = policy->driver_data;
+ u64 max_perf;
int ret;
- ret = amd_pstate_enable(true);
+ ret = amd_pstate_cppc_enable(true);
if (ret)
pr_err("failed to enable amd pstate during resume, return %d\n", ret);
- value = READ_ONCE(cpudata->cppc_req_cached);
max_perf = READ_ONCE(cpudata->highest_perf);
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- } else {
- perf_ctrls.max_perf = max_perf;
- perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+ cpudata->epp_cached,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
+ max_perf, policy->boost_enabled);
}
+
+ return amd_pstate_update_perf(cpudata, 0, 0, max_perf, cpudata->epp_cached, false);
}
static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
- if (cppc_state == AMD_PSTATE_ACTIVE) {
- amd_pstate_epp_reenable(cpudata);
- cpudata->suspended = false;
- }
+ ret = amd_pstate_epp_reenable(policy);
+ if (ret)
+ return ret;
+ cpudata->suspended = false;
return 0;
}
-static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
-{
- struct amd_cpudata *cpudata = policy->driver_data;
- struct cppc_perf_ctrls perf_ctrls;
- int min_perf;
- u64 value;
-
- min_perf = READ_ONCE(cpudata->lowest_perf);
- value = READ_ONCE(cpudata->cppc_req_cached);
-
- mutex_lock(&amd_pstate_limits_lock);
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
-
- /* Set max perf same as min perf */
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(min_perf);
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- } else {
- perf_ctrls.desired_perf = 0;
- perf_ctrls.max_perf = min_perf;
- perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
- }
- mutex_unlock(&amd_pstate_limits_lock);
-}
-
static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
-
- pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
+ int min_perf;
if (cpudata->suspended)
return 0;
- if (cppc_state == AMD_PSTATE_ACTIVE)
- amd_pstate_epp_offline(policy);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
- return 0;
-}
+ guard(mutex)(&amd_pstate_limits_lock);
-static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy)
-{
- cpufreq_verify_within_cpu_limits(policy);
- pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min);
- return 0;
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+ AMD_CPPC_EPP_BALANCE_POWERSAVE,
+ min_perf, min_perf, policy->boost_enabled);
+ }
+
+ return amd_pstate_update_perf(cpudata, min_perf, 0, min_perf,
+ AMD_CPPC_EPP_BALANCE_POWERSAVE, false);
}
static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
@@ -1681,7 +1670,7 @@ static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
cpudata->suspended = true;
/* disable CPPC in lowlevel firmware */
- ret = amd_pstate_enable(false);
+ ret = amd_pstate_cppc_enable(false);
if (ret)
pr_err("failed to suspend, return %d\n", ret);
@@ -1693,12 +1682,10 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata->suspended) {
- mutex_lock(&amd_pstate_limits_lock);
+ guard(mutex)(&amd_pstate_limits_lock);
/* enable amd pstate from suspend state*/
- amd_pstate_epp_reenable(cpudata);
-
- mutex_unlock(&amd_pstate_limits_lock);
+ amd_pstate_epp_reenable(policy);
cpudata->suspended = false;
}
@@ -1723,7 +1710,7 @@ static struct cpufreq_driver amd_pstate_driver = {
static struct cpufreq_driver amd_pstate_epp_driver = {
.flags = CPUFREQ_CONST_LOOPS,
- .verify = amd_pstate_epp_verify_policy,
+ .verify = amd_pstate_verify,
.setpolicy = amd_pstate_epp_set_policy,
.init = amd_pstate_epp_cpu_init,
.exit = amd_pstate_epp_cpu_exit,
@@ -1737,26 +1724,7 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
.attr = amd_pstate_epp_attr,
};
-static int __init amd_pstate_set_driver(int mode_idx)
-{
- if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
- cppc_state = mode_idx;
- if (cppc_state == AMD_PSTATE_DISABLE)
- pr_info("driver is explicitly disabled\n");
-
- if (cppc_state == AMD_PSTATE_ACTIVE)
- current_pstate_driver = &amd_pstate_epp_driver;
-
- if (cppc_state == AMD_PSTATE_PASSIVE || cppc_state == AMD_PSTATE_GUIDED)
- current_pstate_driver = &amd_pstate_driver;
-
- return 0;
- }
-
- return -EINVAL;
-}
-
-/**
+/*
* CPPC function is not supported for family ID 17H with model_ID ranging from 0x10 to 0x2F.
* show the debug message that helps to check if the CPU has CPPC support for loading issue.
*/
@@ -1846,10 +1814,10 @@ static int __init amd_pstate_init(void)
if (cppc_state == AMD_PSTATE_UNDEFINED) {
/* Disable on the following configs by default:
* 1. Undefined platforms
- * 2. Server platforms
+ * 2. Server platforms with CPUs older than Family 0x1A.
*/
if (amd_pstate_acpi_pm_profile_undefined() ||
- amd_pstate_acpi_pm_profile_server()) {
+ (amd_pstate_acpi_pm_profile_server() && boot_cpu_data.x86 < 0x1A)) {
pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
}
@@ -1857,31 +1825,21 @@ static int __init amd_pstate_init(void)
cppc_state = CONFIG_X86_AMD_PSTATE_DEFAULT_MODE;
}
- switch (cppc_state) {
- case AMD_PSTATE_DISABLE:
+ if (cppc_state == AMD_PSTATE_DISABLE) {
pr_info("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
- case AMD_PSTATE_PASSIVE:
- case AMD_PSTATE_ACTIVE:
- case AMD_PSTATE_GUIDED:
- ret = amd_pstate_set_driver(cppc_state);
- if (ret)
- return ret;
- break;
- default:
- return -EINVAL;
}
/* capability check */
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
pr_debug("AMD CPPC MSR based functionality is supported\n");
- if (cppc_state != AMD_PSTATE_ACTIVE)
- current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
} else {
pr_debug("AMD CPPC shared memory based functionality is supported\n");
- static_call_update(amd_pstate_enable, cppc_enable);
- static_call_update(amd_pstate_init_perf, cppc_init_perf);
- static_call_update(amd_pstate_update_perf, cppc_update_perf);
+ static_call_update(amd_pstate_cppc_enable, shmem_cppc_enable);
+ static_call_update(amd_pstate_init_perf, shmem_init_perf);
+ static_call_update(amd_pstate_update_perf, shmem_update_perf);
+ static_call_update(amd_pstate_get_epp, shmem_get_epp);
+ static_call_update(amd_pstate_set_epp, shmem_set_epp);
}
if (amd_pstate_prefcore) {
@@ -1890,17 +1848,10 @@ static int __init amd_pstate_init(void)
return ret;
}
- /* enable amd pstate feature */
- ret = amd_pstate_enable(true);
- if (ret) {
- pr_err("failed to enable driver mode(%d)\n", cppc_state);
- return ret;
- }
-
- ret = cpufreq_register_driver(current_pstate_driver);
+ ret = amd_pstate_register_driver(cppc_state);
if (ret) {
pr_err("failed to register with return %d\n", ret);
- goto disable_driver;
+ return ret;
}
dev_root = bus_get_dev_root(&cpu_subsys);
@@ -1917,8 +1868,7 @@ static int __init amd_pstate_init(void)
global_attr_free:
cpufreq_unregister_driver(current_pstate_driver);
-disable_driver:
- amd_pstate_enable(false);
+ amd_pstate_cppc_enable(false);
return ret;
}
device_initcall(amd_pstate_init);
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
index cd573bc6b6db..9747e3be6cee 100644
--- a/drivers/cpufreq/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -57,7 +57,6 @@ struct amd_aperf_mperf {
* @hw_prefcore: check whether HW supports preferred core featue.
* Only when hw_prefcore and early prefcore param are true,
* AMD P-State driver supports preferred core featue.
- * @epp_policy: Last saved policy used to set energy-performance preference
* @epp_cached: Cached CPPC energy-performance preference value
* @policy: Cpufreq policy value
* @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value
@@ -94,13 +93,11 @@ struct amd_cpudata {
bool hw_prefcore;
/* EPP feature related attributes*/
- s16 epp_policy;
s16 epp_cached;
u32 policy;
u64 cppc_cap1_cached;
bool suspended;
s16 epp_default;
- bool boost_state;
};
/*
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
index 4dcacab9b4bf..269b18c62d04 100644
--- a/drivers/cpufreq/apple-soc-cpufreq.c
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -22,11 +22,14 @@
#include <linux/pm_opp.h>
#include <linux/slab.h>
-#define APPLE_DVFS_CMD 0x20
-#define APPLE_DVFS_CMD_BUSY BIT(31)
-#define APPLE_DVFS_CMD_SET BIT(25)
-#define APPLE_DVFS_CMD_PS2 GENMASK(16, 12)
-#define APPLE_DVFS_CMD_PS1 GENMASK(4, 0)
+#define APPLE_DVFS_CMD 0x20
+#define APPLE_DVFS_CMD_BUSY BIT(31)
+#define APPLE_DVFS_CMD_SET BIT(25)
+#define APPLE_DVFS_CMD_PS1_S5L8960X GENMASK(24, 22)
+#define APPLE_DVFS_CMD_PS1_S5L8960X_SHIFT 22
+#define APPLE_DVFS_CMD_PS2 GENMASK(15, 12)
+#define APPLE_DVFS_CMD_PS1 GENMASK(4, 0)
+#define APPLE_DVFS_CMD_PS1_SHIFT 0
/* Same timebase as CPU counter (24MHz) */
#define APPLE_DVFS_LAST_CHG_TIME 0x38
@@ -35,6 +38,9 @@
* Apple ran out of bits and had to shift this in T8112...
*/
#define APPLE_DVFS_STATUS 0x50
+#define APPLE_DVFS_STATUS_CUR_PS_S5L8960X GENMASK(5, 3)
+#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_S5L8960X 3
+#define APPLE_DVFS_STATUS_TGT_PS_S5L8960X GENMASK(2, 0)
#define APPLE_DVFS_STATUS_CUR_PS_T8103 GENMASK(7, 4)
#define APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103 4
#define APPLE_DVFS_STATUS_TGT_PS_T8103 GENMASK(3, 0)
@@ -52,12 +58,15 @@
#define APPLE_DVFS_PLL_FACTOR_MULT GENMASK(31, 16)
#define APPLE_DVFS_PLL_FACTOR_DIV GENMASK(15, 0)
-#define APPLE_DVFS_TRANSITION_TIMEOUT 100
+#define APPLE_DVFS_TRANSITION_TIMEOUT 400
struct apple_soc_cpufreq_info {
+ bool has_ps2;
u64 max_pstate;
u64 cur_pstate_mask;
u64 cur_pstate_shift;
+ u64 ps1_mask;
+ u64 ps1_shift;
};
struct apple_cpu_priv {
@@ -68,25 +77,47 @@ struct apple_cpu_priv {
static struct cpufreq_driver apple_soc_cpufreq_driver;
+static const struct apple_soc_cpufreq_info soc_s5l8960x_info = {
+ .has_ps2 = false,
+ .max_pstate = 7,
+ .cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_S5L8960X,
+ .cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_S5L8960X,
+ .ps1_mask = APPLE_DVFS_CMD_PS1_S5L8960X,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_S5L8960X_SHIFT,
+};
+
static const struct apple_soc_cpufreq_info soc_t8103_info = {
+ .has_ps2 = true,
.max_pstate = 15,
.cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8103,
.cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8103,
+ .ps1_mask = APPLE_DVFS_CMD_PS1,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_SHIFT,
};
static const struct apple_soc_cpufreq_info soc_t8112_info = {
+ .has_ps2 = false,
.max_pstate = 31,
.cur_pstate_mask = APPLE_DVFS_STATUS_CUR_PS_T8112,
.cur_pstate_shift = APPLE_DVFS_STATUS_CUR_PS_SHIFT_T8112,
+ .ps1_mask = APPLE_DVFS_CMD_PS1,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_SHIFT,
};
static const struct apple_soc_cpufreq_info soc_default_info = {
+ .has_ps2 = false,
.max_pstate = 15,
.cur_pstate_mask = 0, /* fallback */
+ .ps1_mask = APPLE_DVFS_CMD_PS1,
+ .ps1_shift = APPLE_DVFS_CMD_PS1_SHIFT,
};
static const struct of_device_id apple_soc_cpufreq_of_match[] __maybe_unused = {
{
+ .compatible = "apple,s5l8960x-cluster-cpufreq",
+ .data = &soc_s5l8960x_info,
+ },
+ {
.compatible = "apple,t8103-cluster-cpufreq",
.data = &soc_t8103_info,
},
@@ -109,7 +140,7 @@ static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu)
unsigned int pstate;
if (priv->info->cur_pstate_mask) {
- u64 reg = readq_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
+ u32 reg = readl_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
pstate = (reg & priv->info->cur_pstate_mask) >> priv->info->cur_pstate_shift;
} else {
@@ -148,9 +179,12 @@ static int apple_soc_cpufreq_set_target(struct cpufreq_policy *policy,
return -EIO;
}
- reg &= ~(APPLE_DVFS_CMD_PS1 | APPLE_DVFS_CMD_PS2);
- reg |= FIELD_PREP(APPLE_DVFS_CMD_PS1, pstate);
- reg |= FIELD_PREP(APPLE_DVFS_CMD_PS2, pstate);
+ reg &= ~priv->info->ps1_mask;
+ reg |= pstate << priv->info->ps1_shift;
+ if (priv->info->has_ps2) {
+ reg &= ~APPLE_DVFS_CMD_PS2;
+ reg |= FIELD_PREP(APPLE_DVFS_CMD_PS2, pstate);
+ }
reg |= APPLE_DVFS_CMD_SET;
writeq_relaxed(reg, priv->reg_base + APPLE_DVFS_CMD);
@@ -275,7 +309,7 @@ static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
if (!transition_latency)
- transition_latency = CPUFREQ_ETERNAL;
+ transition_latency = APPLE_DVFS_TRANSITION_TIMEOUT * NSEC_PER_USEC;
policy->cpuinfo.transition_latency = transition_latency;
policy->dvfs_possible_from_any_cpu = true;
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index ea8438550b49..2fd0f6be6fa3 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -474,8 +474,8 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
rc = brcm_avs_get_pmap(priv, NULL);
magic = readl(priv->base + AVS_MBOX_MAGIC);
- return (magic == AVS_FIRMWARE_MAGIC) && ((rc != -ENOTSUPP) ||
- (rc != -EINVAL));
+ return (magic == AVS_FIRMWARE_MAGIC) && (rc != -ENOTSUPP) &&
+ (rc != -EINVAL);
}
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
@@ -777,7 +777,7 @@ static struct platform_driver brcm_avs_cpufreq_platdrv = {
.of_match_table = brcm_avs_cpufreq_match,
},
.probe = brcm_avs_cpufreq_probe,
- .remove_new = brcm_avs_cpufreq_remove,
+ .remove = brcm_avs_cpufreq_remove,
};
module_platform_driver(brcm_avs_cpufreq_platdrv);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 2b8708475ac7..8f512448382f 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -36,33 +36,15 @@ static LIST_HEAD(cpu_data_list);
static bool boost_supported;
-struct cppc_workaround_oem_info {
- char oem_id[ACPI_OEM_ID_SIZE + 1];
- char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
- u32 oem_revision;
-};
-
-static struct cppc_workaround_oem_info wa_info[] = {
- {
- .oem_id = "HISI ",
- .oem_table_id = "HIP07 ",
- .oem_revision = 0,
- }, {
- .oem_id = "HISI ",
- .oem_table_id = "HIP08 ",
- .oem_revision = 0,
- }
-};
-
static struct cpufreq_driver cppc_cpufreq_driver;
+#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
static enum {
FIE_UNSET = -1,
FIE_ENABLED,
FIE_DISABLED
} fie_disabled = FIE_UNSET;
-#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
module_param(fie_disabled, int, 0444);
MODULE_PARM_DESC(fie_disabled, "Disable Frequency Invariance Engine (FIE)");
@@ -78,7 +60,6 @@ struct cppc_freq_invariance {
static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
static struct kthread_worker *kworker_fie;
-static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu);
static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1);
@@ -118,6 +99,9 @@ static void cppc_scale_freq_workfn(struct kthread_work *work)
perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs,
&fb_ctrs);
+ if (!perf)
+ return;
+
cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
perf <<= SCHED_CAPACITY_SHIFT;
@@ -241,7 +225,7 @@ static void __init cppc_freq_invariance_init(void)
if (fie_disabled)
return;
- kworker_fie = kthread_create_worker(0, "cppc_fie");
+ kworker_fie = kthread_run_worker(0, "cppc_fie");
if (IS_ERR(kworker_fie)) {
pr_warn("%s: failed to create kworker_fie: %ld\n", __func__,
PTR_ERR(kworker_fie));
@@ -420,6 +404,9 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
struct cppc_cpudata *cpu_data;
policy = cpufreq_cpu_get_raw(cpu_dev->id);
+ if (!policy)
+ return -EINVAL;
+
cpu_data = policy->driver_data;
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
@@ -487,6 +474,9 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
int step;
policy = cpufreq_cpu_get_raw(cpu_dev->id);
+ if (!policy)
+ return -EINVAL;
+
cpu_data = policy->driver_data;
perf_caps = &cpu_data->perf_caps;
max_cap = arch_scale_cpu_capacity(cpu_dev->id);
@@ -621,7 +611,8 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
*/
policy->min = cppc_perf_to_khz(caps, caps->lowest_nonlinear_perf);
- policy->max = cppc_perf_to_khz(caps, caps->nominal_perf);
+ policy->max = cppc_perf_to_khz(caps, policy->boost_enabled ?
+ caps->highest_perf : caps->nominal_perf);
/*
* Set cpuinfo.min_freq to Lowest to make the full range of performance
@@ -629,7 +620,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* nonlinear perf
*/
policy->cpuinfo.min_freq = cppc_perf_to_khz(caps, caps->lowest_perf);
- policy->cpuinfo.max_freq = cppc_perf_to_khz(caps, caps->nominal_perf);
+ policy->cpuinfo.max_freq = policy->max;
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
policy->shared_type = cpu_data->shared_type;
@@ -724,13 +715,31 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
delta_delivered = get_delta(fb_ctrs_t1->delivered,
fb_ctrs_t0->delivered);
- /* Check to avoid divide-by zero and invalid delivered_perf */
+ /*
+ * Avoid divide-by zero and unchanged feedback counters.
+ * Leave it for callers to handle.
+ */
if (!delta_reference || !delta_delivered)
- return cpu_data->perf_ctrls.desired_perf;
+ return 0;
return (reference_perf * delta_delivered) / delta_reference;
}
+static int cppc_get_perf_ctrs_sample(int cpu,
+ struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+ struct cppc_perf_fb_ctrs *fb_ctrs_t1)
+{
+ int ret;
+
+ ret = cppc_get_perf_ctrs(cpu, fb_ctrs_t0);
+ if (ret)
+ return ret;
+
+ udelay(2); /* 2usec delay between sampling */
+
+ return cppc_get_perf_ctrs(cpu, fb_ctrs_t1);
+}
+
static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
{
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
@@ -746,18 +755,32 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
cpufreq_cpu_put(policy);
- ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
- if (ret)
- return 0;
-
- udelay(2); /* 2usec delay between sampling */
-
- ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1);
- if (ret)
- return 0;
+ ret = cppc_get_perf_ctrs_sample(cpu, &fb_ctrs_t0, &fb_ctrs_t1);
+ if (ret) {
+ if (ret == -EFAULT)
+ /* Any of the associated CPPC regs is 0. */
+ goto out_invalid_counters;
+ else
+ return 0;
+ }
delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
&fb_ctrs_t1);
+ if (!delivered_perf)
+ goto out_invalid_counters;
+
+ return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf);
+
+out_invalid_counters:
+ /*
+ * Feedback counters could be unchanged or 0 when a cpu enters a
+ * low-power idle state, e.g. clock-gated or power-gated.
+ * Use desired perf for reflecting frequency. Get the latest register
+ * value first as some platforms may update the actual delivered perf
+ * there; if failed, resort to the cached desired perf.
+ */
+ if (cppc_get_desired_perf(cpu, &delivered_perf))
+ delivered_perf = cpu_data->perf_ctrls.desired_perf;
return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf);
}
@@ -812,57 +835,6 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
.name = "cppc_cpufreq",
};
-/*
- * HISI platform does not support delivered performance counter and
- * reference performance counter. It can calculate the performance using the
- * platform specific mechanism. We reuse the desired performance register to
- * store the real performance calculated by the platform.
- */
-static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
-{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct cppc_cpudata *cpu_data;
- u64 desired_perf;
- int ret;
-
- if (!policy)
- return -ENODEV;
-
- cpu_data = policy->driver_data;
-
- cpufreq_cpu_put(policy);
-
- ret = cppc_get_desired_perf(cpu, &desired_perf);
- if (ret < 0)
- return -EIO;
-
- return cppc_perf_to_khz(&cpu_data->perf_caps, desired_perf);
-}
-
-static void cppc_check_hisi_workaround(void)
-{
- struct acpi_table_header *tbl;
- acpi_status status = AE_OK;
- int i;
-
- status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
- if (ACPI_FAILURE(status) || !tbl)
- return;
-
- for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
- if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
- !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
- wa_info[i].oem_revision == tbl->oem_revision) {
- /* Overwrite the get() callback */
- cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate;
- fie_disabled = FIE_DISABLED;
- break;
- }
- }
-
- acpi_put_table(tbl);
-}
-
static int __init cppc_cpufreq_init(void)
{
int ret;
@@ -870,7 +842,6 @@ static int __init cppc_cpufreq_init(void)
if (!acpi_cpc_valid())
return -ENODEV;
- cppc_check_hisi_workaround();
cppc_freq_invariance_init();
populate_efficiency_class();
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 18942bfe9c95..2aa00769cf09 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -103,6 +103,9 @@ static const struct of_device_id allowlist[] __initconst = {
* platforms using "operating-points-v2" property.
*/
static const struct of_device_id blocklist[] __initconst = {
+ { .compatible = "airoha,en7581", },
+
+ { .compatible = "allwinner,sun50i-a100" },
{ .compatible = "allwinner,sun50i-h6", },
{ .compatible = "allwinner,sun50i-h616", },
{ .compatible = "allwinner,sun50i-h618", },
@@ -234,5 +237,3 @@ create_pdev:
sizeof(struct cpufreq_dt_platform_data)));
}
core_initcall(cpufreq_dt_platdev_init);
-MODULE_DESCRIPTION("Generic DT based cpufreq platdev driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 983443396f8f..3a7c3372bda7 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -345,7 +345,7 @@ static struct platform_driver dt_cpufreq_platdrv = {
.name = "cpufreq-dt",
},
.probe = dt_cpufreq_probe,
- .remove_new = dt_cpufreq_remove,
+ .remove = dt_cpufreq_remove,
};
module_platform_driver(dt_cpufreq_platdrv);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index f98c9438760c..30ffbddc7ece 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -25,6 +25,7 @@
#include <linux/mutex.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/tick.h>
@@ -602,12 +603,12 @@ static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
if (cpufreq_boost_trigger_state(enable)) {
pr_err("%s: Cannot %s BOOST!\n",
- __func__, enable ? "enable" : "disable");
+ __func__, str_enable_disable(enable));
return -EINVAL;
}
pr_debug("%s: cpufreq BOOST %s\n",
- __func__, enable ? "enabled" : "disabled");
+ __func__, str_enabled_disabled(enable));
return count;
}
@@ -1409,10 +1410,6 @@ static int cpufreq_online(unsigned int cpu)
goto out_free_policy;
}
- /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
- if (cpufreq_boost_enabled() && policy_has_boost_freq(policy))
- policy->boost_enabled = true;
-
/*
* The initialization has succeeded and the policy is online.
* If there is a problem with its frequency table, take it
@@ -1475,6 +1472,10 @@ static int cpufreq_online(unsigned int cpu)
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy);
+ } else {
+ ret = freq_qos_update_request(policy->max_freq_req, policy->max);
+ if (ret < 0)
+ goto out_destroy_policy;
}
if (cpufreq_driver->get && has_target()) {
@@ -1520,7 +1521,7 @@ static int cpufreq_online(unsigned int cpu)
* frequency for longer duration. Hence, a BUG_ON().
*/
BUG_ON(ret);
- pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
+ pr_info("%s: CPU%d: Running at unlisted initial frequency: %u kHz, changing to: %u kHz\n",
__func__, policy->cpu, old_freq, policy->cur);
}
}
@@ -1538,7 +1539,7 @@ static int cpufreq_online(unsigned int cpu)
/*
* Register with the energy model before
- * sugov_eas_rebuild_sd() is called, which will result
+ * em_rebuild_sched_domains() is called, which will result
* in rebuilding of the sched domains, which should only be done
* once the energy model is properly initialized for the policy
* first.
@@ -1569,6 +1570,19 @@ static int cpufreq_online(unsigned int cpu)
if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
+ /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
+ if (cpufreq_driver->set_boost &&
+ policy->boost_enabled != cpufreq_boost_enabled()) {
+ policy->boost_enabled = cpufreq_boost_enabled();
+ ret = cpufreq_driver->set_boost(policy, policy->boost_enabled);
+ if (ret) {
+ /* If the set_boost fails, the online operation is not affected */
+ pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
+ policy->boost_enabled ? "enable" : "disable");
+ policy->boost_enabled = !policy->boost_enabled;
+ }
+ }
+
pr_debug("initialization complete\n");
return 0;
@@ -2812,7 +2826,7 @@ err_reset_state:
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_err("%s: Cannot %s BOOST\n",
- __func__, state ? "enable" : "disable");
+ __func__, str_enable_disable(state));
return ret;
}
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 7d2754411d8c..8736be3a06ce 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -145,7 +145,7 @@ static struct platform_driver davinci_cpufreq_driver = {
.driver = {
.name = "cpufreq-davinci",
},
- .remove_new = __exit_p(davinci_cpufreq_remove),
+ .remove = __exit_p(davinci_cpufreq_remove),
};
int __init davinci_cpufreq_init(void)
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 577bb9e2f112..1492c92ffc1a 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -183,7 +183,7 @@ static void imx_cpufreq_dt_remove(struct platform_device *pdev)
static struct platform_driver imx_cpufreq_dt_driver = {
.probe = imx_cpufreq_dt_probe,
- .remove_new = imx_cpufreq_dt_remove,
+ .remove = imx_cpufreq_dt_remove,
.driver = {
.name = "imx-cpufreq-dt",
},
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index c20d3ecc5a81..f3c99f378ad6 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -522,7 +522,7 @@ static struct platform_driver imx6q_cpufreq_platdrv = {
.name = "imx6q-cpufreq",
},
.probe = imx6q_cpufreq_probe,
- .remove_new = imx6q_cpufreq_remove,
+ .remove = imx6q_cpufreq_remove,
};
module_platform_driver(imx6q_cpufreq_platdrv);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b0018f371ea3..9c4cc01fd51a 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -28,6 +28,7 @@
#include <linux/pm_qos.h>
#include <linux/bitfield.h>
#include <trace/events/power.h>
+#include <linux/units.h>
#include <asm/cpu.h>
#include <asm/div64.h>
@@ -302,11 +303,11 @@ static bool hwp_is_hybrid;
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
-#define HYBRID_SCALING_FACTOR 78741
+#define HYBRID_SCALING_FACTOR_ADL 78741
#define HYBRID_SCALING_FACTOR_MTL 80000
#define HYBRID_SCALING_FACTOR_LNL 86957
-static int hybrid_scaling_factor = HYBRID_SCALING_FACTOR;
+static int hybrid_scaling_factor;
static inline int core_get_scaling(void)
{
@@ -414,18 +415,15 @@ static int intel_pstate_get_cppc_guaranteed(int cpu)
static int intel_pstate_cppc_get_scaling(int cpu)
{
struct cppc_perf_caps cppc_perf;
- int ret;
-
- ret = cppc_get_perf_caps(cpu, &cppc_perf);
/*
- * If the nominal frequency and the nominal performance are not
- * zero and the ratio between them is not 100, return the hybrid
- * scaling factor.
+ * Compute the perf-to-frequency scaling factor for the given CPU if
+ * possible, unless it would be 0.
*/
- if (!ret && cppc_perf.nominal_perf && cppc_perf.nominal_freq &&
- cppc_perf.nominal_perf * 100 != cppc_perf.nominal_freq)
- return hybrid_scaling_factor;
+ if (!cppc_get_perf_caps(cpu, &cppc_perf) &&
+ cppc_perf.nominal_perf && cppc_perf.nominal_freq)
+ return div_u64(cppc_perf.nominal_freq * KHZ_PER_MHZ,
+ cppc_perf.nominal_perf);
return core_get_scaling();
}
@@ -1028,26 +1026,29 @@ static void hybrid_update_cpu_capacity_scaling(void)
}
}
-static void __hybrid_init_cpu_capacity_scaling(void)
+static void __hybrid_refresh_cpu_capacity_scaling(void)
{
hybrid_max_perf_cpu = NULL;
hybrid_update_cpu_capacity_scaling();
}
-static void hybrid_init_cpu_capacity_scaling(void)
+static void hybrid_refresh_cpu_capacity_scaling(void)
{
- bool disable_itmt = false;
+ guard(mutex)(&hybrid_capacity_lock);
- mutex_lock(&hybrid_capacity_lock);
+ __hybrid_refresh_cpu_capacity_scaling();
+}
+static void hybrid_init_cpu_capacity_scaling(bool refresh)
+{
/*
* If hybrid_max_perf_cpu is set at this point, the hybrid CPU capacity
* scaling has been enabled already and the driver is just changing the
* operation mode.
*/
- if (hybrid_max_perf_cpu) {
- __hybrid_init_cpu_capacity_scaling();
- goto unlock;
+ if (refresh) {
+ hybrid_refresh_cpu_capacity_scaling();
+ return;
}
/*
@@ -1056,19 +1057,25 @@ static void hybrid_init_cpu_capacity_scaling(void)
* do not do that when SMT is in use.
*/
if (hwp_is_hybrid && !sched_smt_active() && arch_enable_hybrid_capacity_scale()) {
- __hybrid_init_cpu_capacity_scaling();
- disable_itmt = true;
+ hybrid_refresh_cpu_capacity_scaling();
+ /*
+ * Disabling ITMT causes sched domains to be rebuilt to disable asym
+ * packing and enable asym capacity.
+ */
+ sched_clear_itmt_support();
}
+}
-unlock:
- mutex_unlock(&hybrid_capacity_lock);
+static bool hybrid_clear_max_perf_cpu(void)
+{
+ bool ret;
- /*
- * Disabling ITMT causes sched domains to be rebuilt to disable asym
- * packing and enable asym capacity.
- */
- if (disable_itmt)
- sched_clear_itmt_support();
+ guard(mutex)(&hybrid_capacity_lock);
+
+ ret = !!hybrid_max_perf_cpu;
+ hybrid_max_perf_cpu = NULL;
+
+ return ret;
}
static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
@@ -1392,7 +1399,7 @@ static void intel_pstate_update_limits_for_all(void)
mutex_lock(&hybrid_capacity_lock);
if (hybrid_max_perf_cpu)
- __hybrid_init_cpu_capacity_scaling();
+ __hybrid_refresh_cpu_capacity_scaling();
mutex_unlock(&hybrid_capacity_lock);
}
@@ -2202,24 +2209,30 @@ static void hybrid_get_type(void *data)
static int hwp_get_cpu_scaling(int cpu)
{
- u8 cpu_type = 0;
+ if (hybrid_scaling_factor) {
+ u8 cpu_type = 0;
+
+ smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
+
+ /*
+ * Return the hybrid scaling factor for P-cores and use the
+ * default core scaling for E-cores.
+ */
+ if (cpu_type == 0x40)
+ return hybrid_scaling_factor;
- smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
- /* P-cores have a smaller perf level-to-freqency scaling factor. */
- if (cpu_type == 0x40)
- return hybrid_scaling_factor;
+ if (cpu_type == 0x20)
+ return core_get_scaling();
+ }
- /* Use default core scaling for E-cores */
- if (cpu_type == 0x20)
+ /* Use core scaling on non-hybrid systems. */
+ if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
return core_get_scaling();
/*
- * If reached here, this system is either non-hybrid (like Tiger
- * Lake) or hybrid-capable (like Alder Lake or Raptor Lake) with
- * no E cores (in which case CPUID for hybrid support is 0).
- *
- * The CPPC nominal_frequency field is 0 for non-hybrid systems,
- * so the default core scaling will be used for them.
+ * The system is hybrid, but the hybrid scaling factor is not known or
+ * the CPU type is not one of the above, so use CPPC to compute the
+ * scaling factor for this CPU.
*/
return intel_pstate_cppc_get_scaling(cpu);
}
@@ -2263,6 +2276,11 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
} else {
cpu->pstate.scaling = perf_ctl_scaling;
}
+ /*
+ * If the CPU is going online for the first time and it was
+ * offline initially, asym capacity scaling needs to be updated.
+ */
+ hybrid_update_capacity(cpu);
} else {
cpu->pstate.scaling = perf_ctl_scaling;
cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu);
@@ -2695,7 +2713,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
}
cpu->epp_powersave = -EINVAL;
- cpu->epp_policy = 0;
+ cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
intel_pstate_get_cpu_pstates(cpu);
@@ -3352,6 +3370,7 @@ static void intel_pstate_driver_cleanup(void)
static int intel_pstate_register_driver(struct cpufreq_driver *driver)
{
+ bool refresh_cpu_cap_scaling;
int ret;
if (driver == &intel_pstate)
@@ -3364,6 +3383,8 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
arch_set_max_freq_ratio(global.turbo_disabled);
+ refresh_cpu_cap_scaling = hybrid_clear_max_perf_cpu();
+
intel_pstate_driver = driver;
ret = cpufreq_register_driver(intel_pstate_driver);
if (ret) {
@@ -3373,7 +3394,7 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
global.min_perf_pct = min_perf_pct_min();
- hybrid_init_cpu_capacity_scaling();
+ hybrid_init_cpu_capacity_scaling(refresh_cpu_cap_scaling);
return 0;
}
@@ -3638,6 +3659,8 @@ static const struct x86_cpu_id intel_epp_default[] = {
X86_MATCH_VFM(INTEL_ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)),
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
X86_MATCH_VFM(INTEL_METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
179, 64, 16)),
X86_MATCH_VFM(INTEL_ARROWLAKE, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
@@ -3646,8 +3669,12 @@ static const struct x86_cpu_id intel_epp_default[] = {
};
static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
+ X86_MATCH_VFM(INTEL_ALDERLAKE, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, HYBRID_SCALING_FACTOR_ADL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, HYBRID_SCALING_FACTOR_ADL),
X86_MATCH_VFM(INTEL_METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL),
- X86_MATCH_VFM(INTEL_ARROWLAKE, HYBRID_SCALING_FACTOR_MTL),
X86_MATCH_VFM(INTEL_LUNARLAKE_M, HYBRID_SCALING_FACTOR_LNL),
{}
};
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index fd20b986d1f2..312f2654d1d5 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -189,7 +189,7 @@ static void kirkwood_cpufreq_remove(struct platform_device *pdev)
static struct platform_driver kirkwood_cpufreq_platform_driver = {
.probe = kirkwood_cpufreq_probe,
- .remove_new = kirkwood_cpufreq_remove,
+ .remove = kirkwood_cpufreq_remove,
.driver = {
.name = "kirkwood-cpufreq",
},
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index 6a8e97896d38..ed1a6dbad638 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -148,7 +148,9 @@ static int __init cpufreq_init(void)
ret = cpufreq_register_driver(&loongson2_cpufreq_driver);
- if (!ret && !nowait) {
+ if (ret) {
+ platform_driver_unregister(&platform_driver);
+ } else if (!nowait) {
saved_cpu_wait = cpu_wait;
cpu_wait = loongson2_cpu_wait;
}
diff --git a/drivers/cpufreq/loongson3_cpufreq.c b/drivers/cpufreq/loongson3_cpufreq.c
index 6b5e6798d9a2..bd34bf0fafa5 100644
--- a/drivers/cpufreq/loongson3_cpufreq.c
+++ b/drivers/cpufreq/loongson3_cpufreq.c
@@ -346,8 +346,11 @@ static int loongson3_cpufreq_probe(struct platform_device *pdev)
{
int i, ret;
- for (i = 0; i < MAX_PACKAGES; i++)
- devm_mutex_init(&pdev->dev, &cpufreq_mutex[i]);
+ for (i = 0; i < MAX_PACKAGES; i++) {
+ ret = devm_mutex_init(&pdev->dev, &cpufreq_mutex[i]);
+ if (ret)
+ return ret;
+ }
ret = do_service_request(0, 0, CMD_GET_VERSION, 0, 0);
if (ret <= 0)
@@ -386,7 +389,7 @@ static struct platform_driver loongson3_platform_driver = {
},
.id_table = cpufreq_id_table,
.probe = loongson3_cpufreq_probe,
- .remove_new = loongson3_cpufreq_remove,
+ .remove = loongson3_cpufreq_remove,
};
module_platform_driver(loongson3_platform_driver);
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
deleted file mode 100644
index 690da85c4865..000000000000
--- a/drivers/cpufreq/maple-cpufreq.c
+++ /dev/null
@@ -1,242 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2011 Dmitry Eremin-Solenikov
- * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
- * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
- *
- * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs,
- * that is iMac G5 and latest single CPU desktop.
- */
-
-#undef DEBUG
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/cpufreq.h>
-#include <linux/init.h>
-#include <linux/completion.h>
-#include <linux/mutex.h>
-#include <linux/time.h>
-#include <linux/of.h>
-
-#define DBG(fmt...) pr_debug(fmt)
-
-/* see 970FX user manual */
-
-#define SCOM_PCR 0x0aa001 /* PCR scom addr */
-
-#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */
-#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */
-#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */
-#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */
-#define PCR_SPEED_MASK 0x000e0000U /* speed mask */
-#define PCR_SPEED_SHIFT 17
-#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */
-#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */
-#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */
-#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */
-#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */
-#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */
-
-#define SCOM_PSR 0x408001 /* PSR scom addr */
-/* warning: PSR is a 64 bits register */
-#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */
-#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */
-#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */
-#define PSR_CUR_SPEED_SHIFT (56)
-
-/*
- * The G5 only supports two frequencies (Quarter speed is not supported)
- */
-#define CPUFREQ_HIGH 0
-#define CPUFREQ_LOW 1
-
-static struct cpufreq_frequency_table maple_cpu_freqs[] = {
- {0, CPUFREQ_HIGH, 0},
- {0, CPUFREQ_LOW, 0},
- {0, 0, CPUFREQ_TABLE_END},
-};
-
-/* Power mode data is an array of the 32 bits PCR values to use for
- * the various frequencies, retrieved from the device-tree
- */
-static int maple_pmode_cur;
-
-static const u32 *maple_pmode_data;
-static int maple_pmode_max;
-
-/*
- * SCOM based frequency switching for 970FX rev3
- */
-static int maple_scom_switch_freq(int speed_mode)
-{
- unsigned long flags;
- int to;
-
- local_irq_save(flags);
-
- /* Clear PCR high */
- scom970_write(SCOM_PCR, 0);
- /* Clear PCR low */
- scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0);
- /* Set PCR low */
- scom970_write(SCOM_PCR, PCR_HILO_SELECT |
- maple_pmode_data[speed_mode]);
-
- /* Wait for completion */
- for (to = 0; to < 10; to++) {
- unsigned long psr = scom970_read(SCOM_PSR);
-
- if ((psr & PSR_CMD_RECEIVED) == 0 &&
- (((psr >> PSR_CUR_SPEED_SHIFT) ^
- (maple_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3)
- == 0)
- break;
- if (psr & PSR_CMD_COMPLETED)
- break;
- udelay(100);
- }
-
- local_irq_restore(flags);
-
- maple_pmode_cur = speed_mode;
- ppc_proc_freq = maple_cpu_freqs[speed_mode].frequency * 1000ul;
-
- return 0;
-}
-
-static int maple_scom_query_freq(void)
-{
- unsigned long psr = scom970_read(SCOM_PSR);
- int i;
-
- for (i = 0; i <= maple_pmode_max; i++)
- if ((((psr >> PSR_CUR_SPEED_SHIFT) ^
- (maple_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0)
- break;
- return i;
-}
-
-/*
- * Common interface to the cpufreq core
- */
-
-static int maple_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int index)
-{
- return maple_scom_switch_freq(index);
-}
-
-static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
-{
- return maple_cpu_freqs[maple_pmode_cur].frequency;
-}
-
-static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- cpufreq_generic_init(policy, maple_cpu_freqs, 12000);
- return 0;
-}
-
-static struct cpufreq_driver maple_cpufreq_driver = {
- .name = "maple",
- .flags = CPUFREQ_CONST_LOOPS,
- .init = maple_cpufreq_cpu_init,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = maple_cpufreq_target,
- .get = maple_cpufreq_get_speed,
- .attr = cpufreq_generic_attr,
-};
-
-static int __init maple_cpufreq_init(void)
-{
- struct device_node *cpunode;
- unsigned int psize;
- unsigned long max_freq;
- const u32 *valp;
- u32 pvr_hi;
- int rc = -ENODEV;
-
- /*
- * Behave here like powermac driver which checks machine compatibility
- * to ease merging of two drivers in future.
- */
- if (!of_machine_is_compatible("Momentum,Maple") &&
- !of_machine_is_compatible("Momentum,Apache"))
- return 0;
-
- /* Get first CPU node */
- cpunode = of_cpu_device_node_get(0);
- if (cpunode == NULL) {
- pr_err("Can't find any CPU 0 node\n");
- goto bail_noprops;
- }
-
- /* Check 970FX for now */
- /* we actually don't care on which CPU to access PVR */
- pvr_hi = PVR_VER(mfspr(SPRN_PVR));
- if (pvr_hi != 0x3c && pvr_hi != 0x44) {
- pr_err("Unsupported CPU version (%x)\n", pvr_hi);
- goto bail_noprops;
- }
-
- /* Look for the powertune data in the device-tree */
- /*
- * On Maple this property is provided by PIBS in dual-processor config,
- * not provided by PIBS in CPU0 config and also not provided by SLOF,
- * so YMMV
- */
- maple_pmode_data = of_get_property(cpunode, "power-mode-data", &psize);
- if (!maple_pmode_data) {
- DBG("No power-mode-data !\n");
- goto bail_noprops;
- }
- maple_pmode_max = psize / sizeof(u32) - 1;
-
- /*
- * From what I see, clock-frequency is always the maximal frequency.
- * The current driver can not slew sysclk yet, so we really only deal
- * with powertune steps for now. We also only implement full freq and
- * half freq in this version. So far, I haven't yet seen a machine
- * supporting anything else.
- */
- valp = of_get_property(cpunode, "clock-frequency", NULL);
- if (!valp)
- goto bail_noprops;
- max_freq = (*valp)/1000;
- maple_cpu_freqs[0].frequency = max_freq;
- maple_cpu_freqs[1].frequency = max_freq/2;
-
- /* Force apply current frequency to make sure everything is in
- * sync (voltage is right for example). Firmware may leave us with
- * a strange setting ...
- */
- msleep(10);
- maple_pmode_cur = -1;
- maple_scom_switch_freq(maple_scom_query_freq());
-
- pr_info("Registering Maple CPU frequency driver\n");
- pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
- maple_cpu_freqs[1].frequency/1000,
- maple_cpu_freqs[0].frequency/1000,
- maple_cpu_freqs[maple_pmode_cur].frequency/1000);
-
- rc = cpufreq_register_driver(&maple_cpufreq_driver);
-
-bail_noprops:
- of_node_put(cpunode);
-
- return rc;
-}
-
-module_init(maple_cpufreq_init);
-
-
-MODULE_DESCRIPTION("cpufreq driver for Maple 970FX/970MP boards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index 8925e096d5b9..9252ebd60373 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -62,7 +62,7 @@ mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
policy = cpufreq_cpu_get_raw(cpu_dev->id);
if (!policy)
- return 0;
+ return -EINVAL;
data = policy->driver_data;
@@ -344,7 +344,7 @@ MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match);
static struct platform_driver mtk_cpufreq_hw_driver = {
.probe = mtk_cpufreq_hw_driver_probe,
- .remove_new = mtk_cpufreq_hw_driver_remove,
+ .remove = mtk_cpufreq_hw_driver_remove,
.driver = {
.name = "mtk-cpufreq-hw",
.of_match_table = mtk_cpufreq_hw_match,
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index de8be0a8932d..106220c0fd11 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -188,7 +188,7 @@ static struct platform_driver omap_cpufreq_platdrv = {
.name = "omap-cpufreq",
},
.probe = omap_cpufreq_probe,
- .remove_new = omap_cpufreq_remove,
+ .remove = omap_cpufreq_remove,
};
module_platform_driver(omap_cpufreq_platdrv);
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 771efbf51a48..ac2e90a65f0c 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -615,7 +615,7 @@ static struct platform_driver pcc_cpufreq_platdrv = {
.driver = {
.name = "pcc-cpufreq",
},
- .remove_new = pcc_cpufreq_remove,
+ .remove = pcc_cpufreq_remove,
};
static int __init pcc_cpufreq_init(void)
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 8de759247771..ae79d909943b 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/reboot.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/cpu.h>
#include <linux/hashtable.h>
#include <trace/events/power.h>
@@ -281,7 +282,7 @@ next:
pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min,
pstate_nominal, pstate_max);
pr_info("Workload Optimized Frequency is %s in the platform\n",
- (powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
+ str_enabled_disabled(powernv_pstate_info.wof_enabled));
pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
if (!pstate_ids) {
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 900d6844c43d..b2e7e89feaac 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -143,14 +143,12 @@ static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
}
/* Get the frequency requested by the cpufreq core for the CPU */
-static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
+static unsigned int qcom_cpufreq_get_freq(struct cpufreq_policy *policy)
{
struct qcom_cpufreq_data *data;
const struct qcom_cpufreq_soc_data *soc_data;
- struct cpufreq_policy *policy;
unsigned int index;
- policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
@@ -163,12 +161,10 @@ static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
return policy->freq_table[index].frequency;
}
-static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+static unsigned int __qcom_cpufreq_hw_get(struct cpufreq_policy *policy)
{
struct qcom_cpufreq_data *data;
- struct cpufreq_policy *policy;
- policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
@@ -177,7 +173,12 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
if (data->throttle_irq >= 0)
return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
- return qcom_cpufreq_get_freq(cpu);
+ return qcom_cpufreq_get_freq(policy);
+}
+
+static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+{
+ return __qcom_cpufreq_hw_get(cpufreq_cpu_get_raw(cpu));
}
static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
@@ -363,7 +364,7 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
* If h/w throttled frequency is higher than what cpufreq has requested
* for, then stop polling and switch back to interrupt mechanism.
*/
- if (throttled_freq >= qcom_cpufreq_get_freq(cpu))
+ if (throttled_freq >= qcom_cpufreq_get_freq(cpufreq_cpu_get_raw(cpu)))
enable_irq(data->throttle_irq);
else
mod_delayed_work(system_highpri_wq, &data->throttle_work,
@@ -441,7 +442,6 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
return data->throttle_irq;
data->cancel_throttle = false;
- data->policy = policy;
mutex_init(&data->throttle_lock);
INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
@@ -552,6 +552,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = data;
policy->dvfs_possible_from_any_cpu = true;
+ data->policy = policy;
ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
if (ret) {
@@ -622,11 +623,24 @@ static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw *hw, unsigned lon
{
struct qcom_cpufreq_data *data = container_of(hw, struct qcom_cpufreq_data, cpu_clk);
- return qcom_lmh_get_throttle_freq(data);
+ return __qcom_cpufreq_hw_get(data->policy) * HZ_PER_KHZ;
+}
+
+/*
+ * Since we cannot determine the closest rate of the target rate, let's just
+ * return the actual rate at which the clock is running at. This is needed to
+ * make clk_set_rate() API work properly.
+ */
+static int qcom_cpufreq_hw_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+ req->rate = qcom_cpufreq_hw_recalc_rate(hw, 0);
+
+ return 0;
}
static const struct clk_ops qcom_cpufreq_hw_clk_ops = {
.recalc_rate = qcom_cpufreq_hw_recalc_rate,
+ .determine_rate = qcom_cpufreq_hw_determine_rate,
};
static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
@@ -736,7 +750,7 @@ static void qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
static struct platform_driver qcom_cpufreq_hw_driver = {
.probe = qcom_cpufreq_hw_driver_probe,
- .remove_new = qcom_cpufreq_hw_driver_remove,
+ .remove = qcom_cpufreq_hw_driver_remove,
.driver = {
.name = "qcom-cpufreq-hw",
.of_match_table = qcom_cpufreq_hw_match,
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 703308fb891a..3a8ed723a23e 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -52,12 +52,13 @@ struct qcom_cpufreq_match_data {
struct nvmem_cell *speedbin_nvmem,
char **pvs_name,
struct qcom_cpufreq_drv *drv);
- const char **genpd_names;
+ const char **pd_names;
+ unsigned int num_pd_names;
};
struct qcom_cpufreq_drv_cpu {
int opp_token;
- struct device **virt_devs;
+ struct dev_pm_domain_list *pd_list;
};
struct qcom_cpufreq_drv {
@@ -395,8 +396,6 @@ static int qcom_cpufreq_ipq8074_name_version(struct device *cpu_dev,
return 0;
}
-static const char *generic_genpd_names[] = { "perf", NULL };
-
static const struct qcom_cpufreq_match_data match_data_kryo = {
.get_version = qcom_cpufreq_kryo_name_version,
};
@@ -407,13 +406,13 @@ static const struct qcom_cpufreq_match_data match_data_krait = {
static const struct qcom_cpufreq_match_data match_data_msm8909 = {
.get_version = qcom_cpufreq_simple_get_version,
- .genpd_names = generic_genpd_names,
+ .pd_names = (const char *[]) { "perf" },
+ .num_pd_names = 1,
};
-static const char *qcs404_genpd_names[] = { "cpr", NULL };
-
static const struct qcom_cpufreq_match_data match_data_qcs404 = {
- .genpd_names = qcs404_genpd_names,
+ .pd_names = (const char *[]) { "cpr" },
+ .num_pd_names = 1,
};
static const struct qcom_cpufreq_match_data match_data_ipq6018 = {
@@ -428,28 +427,16 @@ static const struct qcom_cpufreq_match_data match_data_ipq8074 = {
.get_version = qcom_cpufreq_ipq8074_name_version,
};
-static void qcom_cpufreq_suspend_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
-{
- const char * const *name = drv->data->genpd_names;
- int i;
-
- if (!drv->cpus[cpu].virt_devs)
- return;
-
- for (i = 0; *name; i++, name++)
- device_set_awake_path(drv->cpus[cpu].virt_devs[i]);
-}
-
-static void qcom_cpufreq_put_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
+static void qcom_cpufreq_suspend_pd_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
{
- const char * const *name = drv->data->genpd_names;
+ struct dev_pm_domain_list *pd_list = drv->cpus[cpu].pd_list;
int i;
- if (!drv->cpus[cpu].virt_devs)
+ if (!pd_list)
return;
- for (i = 0; *name; i++, name++)
- pm_runtime_put(drv->cpus[cpu].virt_devs[i]);
+ for (i = 0; i < pd_list->num_pds; i++)
+ device_set_awake_path(pd_list->pd_devs[i]);
}
static int qcom_cpufreq_probe(struct platform_device *pdev)
@@ -503,7 +490,6 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
}
for_each_possible_cpu(cpu) {
- struct device **virt_devs = NULL;
struct dev_pm_opp_config config = {
.supported_hw = NULL,
};
@@ -522,12 +508,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
config.prop_name = pvs_name;
}
- if (drv->data->genpd_names) {
- config.genpd_names = drv->data->genpd_names;
- config.virt_devs = &virt_devs;
- }
-
- if (config.supported_hw || config.genpd_names) {
+ if (config.supported_hw) {
drv->cpus[cpu].opp_token = dev_pm_opp_set_config(cpu_dev, &config);
if (drv->cpus[cpu].opp_token < 0) {
ret = drv->cpus[cpu].opp_token;
@@ -536,25 +517,18 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
}
}
- if (virt_devs) {
- const char * const *name = config.genpd_names;
- int i, j;
-
- for (i = 0; *name; i++, name++) {
- ret = pm_runtime_resume_and_get(virt_devs[i]);
- if (ret) {
- dev_err(cpu_dev, "failed to resume %s: %d\n",
- *name, ret);
-
- /* Rollback previous PM runtime calls */
- name = config.genpd_names;
- for (j = 0; *name && j < i; j++, name++)
- pm_runtime_put(virt_devs[j]);
-
- goto free_opp;
- }
- }
- drv->cpus[cpu].virt_devs = virt_devs;
+ if (drv->data->pd_names) {
+ struct dev_pm_domain_attach_data attach_data = {
+ .pd_names = drv->data->pd_names,
+ .num_pd_names = drv->data->num_pd_names,
+ .pd_flags = PD_FLAG_DEV_LINK_ON |
+ PD_FLAG_REQUIRED_OPP,
+ };
+
+ ret = dev_pm_domain_attach_list(cpu_dev, &attach_data,
+ &drv->cpus[cpu].pd_list);
+ if (ret < 0)
+ goto free_opp;
}
}
@@ -570,7 +544,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
free_opp:
for_each_possible_cpu(cpu) {
- qcom_cpufreq_put_virt_devs(drv, cpu);
+ dev_pm_domain_detach_list(drv->cpus[cpu].pd_list);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
}
return ret;
@@ -584,7 +558,7 @@ static void qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
for_each_possible_cpu(cpu) {
- qcom_cpufreq_put_virt_devs(drv, cpu);
+ dev_pm_domain_detach_list(drv->cpus[cpu].pd_list);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
}
}
@@ -595,7 +569,7 @@ static int qcom_cpufreq_suspend(struct device *dev)
unsigned int cpu;
for_each_possible_cpu(cpu)
- qcom_cpufreq_suspend_virt_devs(drv, cpu);
+ qcom_cpufreq_suspend_pd_devs(drv, cpu);
return 0;
}
@@ -604,7 +578,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(qcom_cpufreq_pm_ops, qcom_cpufreq_suspend, NULL)
static struct platform_driver qcom_cpufreq_driver = {
.probe = qcom_cpufreq_probe,
- .remove_new = qcom_cpufreq_remove,
+ .remove = qcom_cpufreq_remove,
.driver = {
.name = "qcom-cpufreq-nvmem",
.pm = pm_sleep_ptr(&qcom_cpufreq_pm_ops),
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 3519bf34d397..a37ce051236c 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -296,7 +296,7 @@ static struct platform_driver qoriq_cpufreq_platform_driver = {
.name = "qoriq-cpufreq",
},
.probe = qoriq_cpufreq_probe,
- .remove_new = qoriq_cpufreq_remove,
+ .remove = qoriq_cpufreq_remove,
};
module_platform_driver(qoriq_cpufreq_platform_driver);
diff --git a/drivers/cpufreq/raspberrypi-cpufreq.c b/drivers/cpufreq/raspberrypi-cpufreq.c
index e0705cc9a57d..5050932954e3 100644
--- a/drivers/cpufreq/raspberrypi-cpufreq.c
+++ b/drivers/cpufreq/raspberrypi-cpufreq.c
@@ -85,7 +85,7 @@ static struct platform_driver raspberrypi_cpufreq_driver = {
.name = "raspberrypi-cpufreq",
},
.probe = raspberrypi_cpufreq_probe,
- .remove_new = raspberrypi_cpufreq_remove,
+ .remove = raspberrypi_cpufreq_remove,
};
module_platform_driver(raspberrypi_cpufreq_driver);
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index c6bdfc308e99..9cef71528076 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -24,6 +24,7 @@ struct s3c64xx_dvfs {
unsigned int vddarm_max;
};
+#ifdef CONFIG_REGULATOR
static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
[0] = { 1000000, 1150000 },
[1] = { 1050000, 1150000 },
@@ -31,6 +32,7 @@ static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
[3] = { 1200000, 1350000 },
[4] = { 1300000, 1350000 },
};
+#endif
static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
{ 0, 0, 66000 },
@@ -51,15 +53,16 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
- struct s3c64xx_dvfs *dvfs;
- unsigned int old_freq, new_freq;
+ unsigned int new_freq = s3c64xx_freq_table[index].frequency;
int ret;
+#ifdef CONFIG_REGULATOR
+ struct s3c64xx_dvfs *dvfs;
+ unsigned int old_freq;
+
old_freq = clk_get_rate(policy->clk) / 1000;
- new_freq = s3c64xx_freq_table[index].frequency;
dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
-#ifdef CONFIG_REGULATOR
if (vddarm && new_freq > old_freq) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 5892c73e129d..b8fe758aeb01 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -16,6 +16,7 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/scmi_protocol.h>
#include <linux/types.h>
@@ -26,6 +27,8 @@ struct scmi_data {
int nr_opp;
struct device *cpu_dev;
cpumask_var_t opp_shared_cpus;
+ struct notifier_block limit_notify_nb;
+ struct freq_qos_request limits_freq_req;
};
static struct scmi_protocol_handle *ph;
@@ -174,6 +177,22 @@ static struct freq_attr *scmi_cpufreq_hw_attr[] = {
NULL,
};
+static int scmi_limit_notify_cb(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct scmi_data *priv = container_of(nb, struct scmi_data, limit_notify_nb);
+ struct scmi_perf_limits_report *limit_notify = data;
+ unsigned int limit_freq_khz;
+ int ret;
+
+ limit_freq_khz = limit_notify->range_max_freq / HZ_PER_KHZ;
+
+ ret = freq_qos_update_request(&priv->limits_freq_req, limit_freq_khz);
+ if (ret < 0)
+ pr_warn("failed to update freq constraint: %d\n", ret);
+
+ return NOTIFY_OK;
+}
+
static int scmi_cpufreq_init(struct cpufreq_policy *policy)
{
int ret, nr_opp, domain;
@@ -181,6 +200,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
struct device *cpu_dev;
struct scmi_data *priv;
struct cpufreq_frequency_table *freq_table;
+ struct scmi_device *sdev = cpufreq_get_driver_data();
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
@@ -287,15 +307,34 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
ret = cpufreq_enable_boost_support();
if (ret) {
dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
- goto out_free_opp;
+ goto out_free_table;
} else {
scmi_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
scmi_cpufreq_driver.boost_enabled = true;
}
}
+ ret = freq_qos_add_request(&policy->constraints, &priv->limits_freq_req, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ if (ret < 0) {
+ dev_err(cpu_dev, "failed to add qos limits request: %d\n", ret);
+ goto out_free_table;
+ }
+
+ priv->limit_notify_nb.notifier_call = scmi_limit_notify_cb;
+ ret = sdev->handle->notify_ops->event_notifier_register(sdev->handle, SCMI_PROTOCOL_PERF,
+ SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+ &priv->domain_id,
+ &priv->limit_notify_nb);
+ if (ret)
+ dev_warn(&sdev->dev,
+ "failed to register for limits change notifier for domain %d\n",
+ priv->domain_id);
+
return 0;
+out_free_table:
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_free_opp:
dev_pm_opp_remove_all_dynamic(cpu_dev);
@@ -311,7 +350,13 @@ out_free_priv:
static void scmi_cpufreq_exit(struct cpufreq_policy *policy)
{
struct scmi_data *priv = policy->driver_data;
+ struct scmi_device *sdev = cpufreq_get_driver_data();
+ sdev->handle->notify_ops->event_notifier_unregister(sdev->handle, SCMI_PROTOCOL_PERF,
+ SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+ &priv->domain_id,
+ &priv->limit_notify_nb);
+ freq_qos_remove_request(&priv->limits_freq_req);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
free_cpumask_var(priv->opp_shared_cpus);
@@ -370,6 +415,8 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
if (!handle)
return -ENODEV;
+ scmi_cpufreq_driver.driver_data = sdev;
+
perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph);
if (IS_ERR(perf_ops))
return PTR_ERR(perf_ops);
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 8d73e6e8be2a..cd89c1b9832c 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -217,7 +217,7 @@ static struct platform_driver scpi_cpufreq_platdrv = {
.name = "scpi-cpufreq",
},
.probe = scpi_cpufreq_probe,
- .remove_new = scpi_cpufreq_remove,
+ .remove = scpi_cpufreq_remove,
};
module_platform_driver(scpi_cpufreq_platdrv);
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index 8a0cd5312a59..15899dd77c08 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -323,7 +323,7 @@ static int __init us2e_freq_init(void)
impl = ((ver >> 32) & 0xffff);
if (manuf == 0x17 && impl == 0x13) {
- us2e_freq_table = kzalloc(NR_CPUS * sizeof(*us2e_freq_table),
+ us2e_freq_table = kcalloc(NR_CPUS, sizeof(*us2e_freq_table),
GFP_KERNEL);
if (!us2e_freq_table)
return -ENOMEM;
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index b50f9d13e6d2..de50a2f3b124 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -171,7 +171,7 @@ static int __init us3_freq_init(void)
impl == CHEETAH_PLUS_IMPL ||
impl == JAGUAR_IMPL ||
impl == PANTHER_IMPL)) {
- us3_freq_table = kzalloc(NR_CPUS * sizeof(*us3_freq_table),
+ us3_freq_table = kcalloc(NR_CPUS, sizeof(*us3_freq_table),
GFP_KERNEL);
if (!us3_freq_table)
return -ENOMEM;
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 293921acec93..17d6a149f580 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -22,6 +22,9 @@
#define NVMEM_MASK 0x7
#define NVMEM_SHIFT 5
+#define SUN50I_A100_NVMEM_MASK 0xf
+#define SUN50I_A100_NVMEM_SHIFT 12
+
static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev;
struct sunxi_cpufreq_data {
@@ -45,6 +48,23 @@ static u32 sun50i_h6_efuse_xlate(u32 speedbin)
return 0;
}
+static u32 sun50i_a100_efuse_xlate(u32 speedbin)
+{
+ u32 efuse_value;
+
+ efuse_value = (speedbin >> SUN50I_A100_NVMEM_SHIFT) &
+ SUN50I_A100_NVMEM_MASK;
+
+ switch (efuse_value) {
+ case 0b100:
+ return 2;
+ case 0b010:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
static int get_soc_id_revision(void)
{
#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
@@ -108,6 +128,10 @@ static struct sunxi_cpufreq_data sun50i_h6_cpufreq_data = {
.efuse_xlate = sun50i_h6_efuse_xlate,
};
+static struct sunxi_cpufreq_data sun50i_a100_cpufreq_data = {
+ .efuse_xlate = sun50i_a100_efuse_xlate,
+};
+
static struct sunxi_cpufreq_data sun50i_h616_cpufreq_data = {
.efuse_xlate = sun50i_h616_efuse_xlate,
};
@@ -116,6 +140,9 @@ static const struct of_device_id cpu_opp_match_list[] = {
{ .compatible = "allwinner,sun50i-h6-operating-points",
.data = &sun50i_h6_cpufreq_data,
},
+ { .compatible = "allwinner,sun50i-a100-operating-points",
+ .data = &sun50i_a100_cpufreq_data,
+ },
{ .compatible = "allwinner,sun50i-h616-operating-points",
.data = &sun50i_h616_cpufreq_data,
},
@@ -283,7 +310,7 @@ static void sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
static struct platform_driver sun50i_cpufreq_driver = {
.probe = sun50i_cpufreq_nvmem_probe,
- .remove_new = sun50i_cpufreq_nvmem_remove,
+ .remove = sun50i_cpufreq_nvmem_remove,
.driver = {
.name = "sun50i-cpufreq-nvmem",
},
@@ -291,6 +318,7 @@ static struct platform_driver sun50i_cpufreq_driver = {
static const struct of_device_id sun50i_cpufreq_match_list[] = {
{ .compatible = "allwinner,sun50i-h6" },
+ { .compatible = "allwinner,sun50i-a100" },
{ .compatible = "allwinner,sun50i-h616" },
{ .compatible = "allwinner,sun50i-h618" },
{ .compatible = "allwinner,sun50i-h700" },
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index 7b8fcfa55038..c7761eb99f3c 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -276,7 +276,7 @@ static struct platform_driver tegra186_cpufreq_platform_driver = {
.of_match_table = tegra186_cpufreq_of_match,
},
.probe = tegra186_cpufreq_probe,
- .remove_new = tegra186_cpufreq_remove,
+ .remove = tegra186_cpufreq_remove,
};
module_platform_driver(tegra186_cpufreq_platform_driver);
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 07ea7ed61b68..9055dd398e7f 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -818,7 +818,7 @@ static struct platform_driver tegra194_ccplex_driver = {
.of_match_table = tegra194_cpufreq_of_match,
},
.probe = tegra194_cpufreq_probe,
- .remove_new = tegra194_cpufreq_remove,
+ .remove = tegra194_cpufreq_remove,
};
module_platform_driver(tegra194_ccplex_driver);
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index ba621ce1cdda..5a5147277cd0 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -93,6 +93,8 @@ struct ti_cpufreq_soc_data {
bool multi_regulator;
/* Backward compatibility hack: Might have missing syscon */
#define TI_QUIRK_SYSCON_MAY_BE_MISSING 0x1
+/* Backward compatibility hack: new syscon size is 1 register wide */
+#define TI_QUIRK_SYSCON_IS_SINGLE_REG 0x2
u8 quirks;
};
@@ -316,8 +318,8 @@ static struct ti_cpufreq_soc_data am625_soc_data = {
.efuse_offset = 0x0018,
.efuse_mask = 0x07c0,
.efuse_shift = 0x6,
- .rev_offset = 0x0014,
.multi_regulator = false,
+ .quirks = TI_QUIRK_SYSCON_IS_SINGLE_REG,
};
static struct ti_cpufreq_soc_data am62a7_soc_data = {
@@ -325,7 +327,6 @@ static struct ti_cpufreq_soc_data am62a7_soc_data = {
.efuse_offset = 0x0,
.efuse_mask = 0x07c0,
.efuse_shift = 0x6,
- .rev_offset = 0x0014,
.multi_regulator = false,
};
@@ -334,7 +335,6 @@ static struct ti_cpufreq_soc_data am62p5_soc_data = {
.efuse_offset = 0x0,
.efuse_mask = 0x07c0,
.efuse_shift = 0x6,
- .rev_offset = 0x0014,
.multi_regulator = false,
};
@@ -354,6 +354,10 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset,
&efuse);
+
+ if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_IS_SINGLE_REG && ret == -EIO)
+ ret = regmap_read(opp_data->syscon, 0x0, &efuse);
+
if (opp_data->soc_data->quirks & TI_QUIRK_SYSCON_MAY_BE_MISSING && ret == -EIO) {
/* not a syscon register! */
void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 3fadf536c429..0f86cdb7ec8a 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -565,7 +565,7 @@ static struct platform_driver ve_spc_cpufreq_platdrv = {
.name = "vexpress-spc-cpufreq",
},
.probe = ve_spc_cpufreq_probe,
- .remove_new = ve_spc_cpufreq_remove,
+ .remove = ve_spc_cpufreq_remove,
};
module_platform_driver(ve_spc_cpufreq_platdrv);
diff --git a/drivers/cpufreq/virtual-cpufreq.c b/drivers/cpufreq/virtual-cpufreq.c
new file mode 100644
index 000000000000..a050b3a6737f
--- /dev/null
+++ b/drivers/cpufreq/virtual-cpufreq.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Google LLC
+ */
+
+#include <linux/arch_topology.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/*
+ * CPU0..CPUn
+ * +-------------+-------------------------------+--------+-------+
+ * | Register | Description | Offset | Len |
+ * +-------------+-------------------------------+--------+-------+
+ * | cur_perf | read this register to get | 0x0 | 0x4 |
+ * | | the current perf (integer val | | |
+ * | | representing perf relative to | | |
+ * | | max performance) | | |
+ * | | that vCPU is running at | | |
+ * +-------------+-------------------------------+--------+-------+
+ * | set_perf | write to this register to set | 0x4 | 0x4 |
+ * | | perf value of the vCPU | | |
+ * +-------------+-------------------------------+--------+-------+
+ * | perftbl_len | number of entries in perf | 0x8 | 0x4 |
+ * | | table. A single entry in the | | |
+ * | | perf table denotes no table | | |
+ * | | and the entry contains | | |
+ * | | the maximum perf value | | |
+ * | | that this vCPU supports. | | |
+ * | | The guest can request any | | |
+ * | | value between 1 and max perf | | |
+ * | | when perftbls are not used. | | |
+ * +---------------------------------------------+--------+-------+
+ * | perftbl_sel | write to this register to | 0xc | 0x4 |
+ * | | select perf table entry to | | |
+ * | | read from | | |
+ * +---------------------------------------------+--------+-------+
+ * | perftbl_rd | read this register to get | 0x10 | 0x4 |
+ * | | perf value of the selected | | |
+ * | | entry based on perftbl_sel | | |
+ * +---------------------------------------------+--------+-------+
+ * | perf_domain | performance domain number | 0x14 | 0x4 |
+ * | | that this vCPU belongs to. | | |
+ * | | vCPUs sharing the same perf | | |
+ * | | domain number are part of the | | |
+ * | | same performance domain. | | |
+ * +-------------+-------------------------------+--------+-------+
+ */
+
+#define REG_CUR_PERF_STATE_OFFSET 0x0
+#define REG_SET_PERF_STATE_OFFSET 0x4
+#define REG_PERFTBL_LEN_OFFSET 0x8
+#define REG_PERFTBL_SEL_OFFSET 0xc
+#define REG_PERFTBL_RD_OFFSET 0x10
+#define REG_PERF_DOMAIN_OFFSET 0x14
+#define PER_CPU_OFFSET 0x1000
+
+#define PERFTBL_MAX_ENTRIES 64U
+
+static void __iomem *base;
+static DEFINE_PER_CPU(u32, perftbl_num_entries);
+
+static void virt_scale_freq_tick(void)
+{
+ int cpu = smp_processor_id();
+ u32 max_freq = (u32)cpufreq_get_hw_max_freq(cpu);
+ u64 cur_freq;
+ unsigned long scale;
+
+ cur_freq = (u64)readl_relaxed(base + cpu * PER_CPU_OFFSET
+ + REG_CUR_PERF_STATE_OFFSET);
+
+ cur_freq <<= SCHED_CAPACITY_SHIFT;
+ scale = (unsigned long)div_u64(cur_freq, max_freq);
+ scale = min(scale, SCHED_CAPACITY_SCALE);
+
+ this_cpu_write(arch_freq_scale, scale);
+}
+
+static struct scale_freq_data virt_sfd = {
+ .source = SCALE_FREQ_SOURCE_VIRT,
+ .set_freq_scale = virt_scale_freq_tick,
+};
+
+static unsigned int virt_cpufreq_set_perf(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ writel_relaxed(target_freq,
+ base + policy->cpu * PER_CPU_OFFSET + REG_SET_PERF_STATE_OFFSET);
+ return 0;
+}
+
+static unsigned int virt_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ virt_cpufreq_set_perf(policy, target_freq);
+ return target_freq;
+}
+
+static u32 virt_cpufreq_get_perftbl_entry(int cpu, u32 idx)
+{
+ writel_relaxed(idx, base + cpu * PER_CPU_OFFSET +
+ REG_PERFTBL_SEL_OFFSET);
+ return readl_relaxed(base + cpu * PER_CPU_OFFSET +
+ REG_PERFTBL_RD_OFFSET);
+}
+
+static int virt_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ int ret = 0;
+
+ freqs.old = policy->cur;
+ freqs.new = target_freq;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ ret = virt_cpufreq_set_perf(policy, target_freq);
+ cpufreq_freq_transition_end(policy, &freqs, ret != 0);
+
+ return ret;
+}
+
+static int virt_cpufreq_get_sharing_cpus(struct cpufreq_policy *policy)
+{
+ u32 cur_perf_domain, perf_domain;
+ struct device *cpu_dev;
+ int cpu;
+
+ cur_perf_domain = readl_relaxed(base + policy->cpu *
+ PER_CPU_OFFSET + REG_PERF_DOMAIN_OFFSET);
+
+ for_each_possible_cpu(cpu) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ continue;
+
+ perf_domain = readl_relaxed(base + cpu *
+ PER_CPU_OFFSET + REG_PERF_DOMAIN_OFFSET);
+
+ if (perf_domain == cur_perf_domain)
+ cpumask_set_cpu(cpu, policy->cpus);
+ }
+
+ return 0;
+}
+
+static int virt_cpufreq_get_freq_info(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *table;
+ u32 num_perftbl_entries, idx;
+
+ num_perftbl_entries = per_cpu(perftbl_num_entries, policy->cpu);
+
+ if (num_perftbl_entries == 1) {
+ policy->cpuinfo.min_freq = 1;
+ policy->cpuinfo.max_freq = virt_cpufreq_get_perftbl_entry(policy->cpu, 0);
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+
+ policy->cur = policy->max;
+ return 0;
+ }
+
+ table = kcalloc(num_perftbl_entries + 1, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ for (idx = 0; idx < num_perftbl_entries; idx++)
+ table[idx].frequency = virt_cpufreq_get_perftbl_entry(policy->cpu, idx);
+
+ table[idx].frequency = CPUFREQ_TABLE_END;
+ policy->freq_table = table;
+
+ return 0;
+}
+
+static int virt_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct device *cpu_dev;
+ int ret;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ ret = virt_cpufreq_get_freq_info(policy);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to get cpufreq info\n");
+ return ret;
+ }
+
+ ret = virt_cpufreq_get_sharing_cpus(policy);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to get sharing cpumask\n");
+ return ret;
+ }
+
+ /*
+ * To simplify and improve latency of handling frequency requests on
+ * the host side, this ensures that the vCPU thread triggering the MMIO
+ * abort is the same thread whose performance constraints (Ex. uclamp
+ * settings) need to be updated. This simplifies the VMM (Virtual
+ * Machine Manager) having to find the correct vCPU thread and/or
+ * facing permission issues when configuring other threads.
+ */
+ policy->dvfs_possible_from_any_cpu = false;
+ policy->fast_switch_possible = true;
+
+ /*
+ * Using the default SCALE_FREQ_SOURCE_CPUFREQ is insufficient since
+ * the actual physical CPU frequency may not match requested frequency
+ * from the vCPU thread due to frequency update latencies or other
+ * inputs to the physical CPU frequency selection. This additional FIE
+ * source allows for more accurate freq_scale updates and only takes
+ * effect if another FIE source such as AMUs have not been registered.
+ */
+ topology_set_scale_freq_source(&virt_sfd, policy->cpus);
+
+ return 0;
+}
+
+static void virt_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ topology_clear_scale_freq_source(SCALE_FREQ_SOURCE_VIRT, policy->related_cpus);
+ kfree(policy->freq_table);
+}
+
+static int virt_cpufreq_online(struct cpufreq_policy *policy)
+{
+ /* Nothing to restore. */
+ return 0;
+}
+
+static int virt_cpufreq_offline(struct cpufreq_policy *policy)
+{
+ /* Dummy offline() to avoid exit() being called and freeing resources. */
+ return 0;
+}
+
+static int virt_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
+{
+ if (policy->freq_table)
+ return cpufreq_frequency_table_verify(policy, policy->freq_table);
+
+ cpufreq_verify_within_cpu_limits(policy);
+ return 0;
+}
+
+static struct cpufreq_driver cpufreq_virt_driver = {
+ .name = "virt-cpufreq",
+ .init = virt_cpufreq_cpu_init,
+ .exit = virt_cpufreq_cpu_exit,
+ .online = virt_cpufreq_online,
+ .offline = virt_cpufreq_offline,
+ .verify = virt_cpufreq_verify_policy,
+ .target = virt_cpufreq_target,
+ .fast_switch = virt_cpufreq_fast_switch,
+ .attr = cpufreq_generic_attr,
+};
+
+static int virt_cpufreq_driver_probe(struct platform_device *pdev)
+{
+ u32 num_perftbl_entries;
+ int ret, cpu;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ for_each_possible_cpu(cpu) {
+ num_perftbl_entries = readl_relaxed(base + cpu * PER_CPU_OFFSET +
+ REG_PERFTBL_LEN_OFFSET);
+
+ if (!num_perftbl_entries || num_perftbl_entries > PERFTBL_MAX_ENTRIES)
+ return -ENODEV;
+
+ per_cpu(perftbl_num_entries, cpu) = num_perftbl_entries;
+ }
+
+ ret = cpufreq_register_driver(&cpufreq_virt_driver);
+ if (ret) {
+ dev_err(&pdev->dev, "Virtual CPUFreq driver failed to register: %d\n", ret);
+ return ret;
+ }
+
+ dev_dbg(&pdev->dev, "Virtual CPUFreq driver initialized\n");
+ return 0;
+}
+
+static void virt_cpufreq_driver_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&cpufreq_virt_driver);
+}
+
+static const struct of_device_id virt_cpufreq_match[] = {
+ { .compatible = "qemu,virtual-cpufreq", .data = NULL},
+ {}
+};
+MODULE_DEVICE_TABLE(of, virt_cpufreq_match);
+
+static struct platform_driver virt_cpufreq_driver = {
+ .probe = virt_cpufreq_driver_probe,
+ .remove = virt_cpufreq_driver_remove,
+ .driver = {
+ .name = "virt-cpufreq",
+ .of_match_table = virt_cpufreq_match,
+ },
+};
+
+static int __init virt_cpufreq_init(void)
+{
+ return platform_driver_register(&virt_cpufreq_driver);
+}
+postcore_initcall(virt_cpufreq_init);
+
+static void __exit virt_cpufreq_exit(void)
+{
+ platform_driver_unregister(&virt_cpufreq_driver);
+}
+module_exit(virt_cpufreq_exit);
+
+MODULE_DESCRIPTION("Virtual cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 7cfb980a357d..caba6f4bb1b7 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -139,7 +139,7 @@ out_kfree_drv:
*
* Initializes arm cpuidle driver for all CPUs, if any CPU fails
* to register cpuidle driver then rollback to cancel all CPUs
- * registeration.
+ * registration.
*/
static int __init arm_idle_init(void)
{
diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c
index 602c4dfdd7e2..5235e6e8f360 100644
--- a/drivers/cpuidle/cpuidle-kirkwood.c
+++ b/drivers/cpuidle/cpuidle-kirkwood.c
@@ -66,7 +66,7 @@ static void kirkwood_cpuidle_remove(struct platform_device *pdev)
static struct platform_driver kirkwood_cpuidle_driver = {
.probe = kirkwood_cpuidle_probe,
- .remove_new = kirkwood_cpuidle_remove,
+ .remove = kirkwood_cpuidle_remove,
.driver = {
.name = "kirkwood_cpuidle",
},
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index 146f97068022..5fb5228f6bf1 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -72,6 +72,7 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
*/
if (use_osi) {
pd->power_off = psci_pd_power_off;
+ pd->flags |= GENPD_FLAG_ACTIVE_WAKEUP;
if (IS_ENABLED(CONFIG_PREEMPT_RT))
pd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
} else {
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 14db9b7d985d..f68c65f1d023 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -22,6 +22,7 @@
#include <asm/idle.h>
#include <asm/plpar_wrappers.h>
#include <asm/rtas.h>
+#include <asm/time.h>
static struct cpuidle_driver pseries_idle_driver = {
.name = "pseries_idle",
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index 1fc9968eae19..3ab240e0e122 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -48,7 +48,7 @@ static int qcom_cpu_spc(struct spm_driver_data *drv)
ret = cpu_suspend(0, qcom_pm_collapse);
/*
* ARM common code executes WFI without calling into our driver and
- * if the SPM mode is not reset, then we may accidently power down the
+ * if the SPM mode is not reset, then we may accidentally power down the
* cpu when we intended only to gate the cpu clock.
* Ensure the state is set to standby before returning.
*/
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index d228b4d18d56..0c92a628bbd4 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -26,6 +26,7 @@
#include <asm/smp.h>
#include <asm/suspend.h>
+#include "cpuidle.h"
#include "dt_idle_states.h"
#include "dt_idle_genpd.h"
@@ -329,6 +330,9 @@ static int sbi_cpuidle_init_cpu(struct device *dev, int cpu)
return ret;
}
+ if (cpuidle_disabled())
+ return 0;
+
ret = cpuidle_register(drv, NULL);
if (ret)
goto deinit;
@@ -500,12 +504,12 @@ static int sbi_cpuidle_probe(struct platform_device *pdev)
int cpu, ret;
struct cpuidle_driver *drv;
struct cpuidle_device *dev;
- struct device_node *np, *pds_node;
+ struct device_node *pds_node;
/* Detect OSI support based on CPU DT nodes */
sbi_cpuidle_use_osi = true;
for_each_possible_cpu(cpu) {
- np = of_cpu_device_node_get(cpu);
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
if (np &&
of_property_present(np, "power-domains") &&
of_property_present(np, "power-domain-names")) {
@@ -538,7 +542,10 @@ static int sbi_cpuidle_probe(struct platform_device *pdev)
/* Setup CPU hotplut notifiers */
sbi_idle_init_cpuhp();
- pr_info("idle driver registered for all CPUs\n");
+ if (cpuidle_disabled())
+ pr_info("cpuidle is disabled\n");
+ else
+ pr_info("idle driver registered for all CPUs\n");
return 0;
@@ -582,4 +589,4 @@ static int __init sbi_cpuidle_init(void)
return 0;
}
-device_initcall(sbi_cpuidle_init);
+arch_initcall(sbi_cpuidle_init);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 9e418aec1755..0835da449db8 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -69,11 +69,15 @@ int cpuidle_play_dead(void)
if (!drv)
return -ENODEV;
- /* Find lowest-power state that supports long-term idle */
- for (i = drv->state_count - 1; i >= 0; i--)
+ for (i = drv->state_count - 1; i >= 0; i--) {
if (drv->states[i].enter_dead)
- return drv->states[i].enter_dead(dev, i);
+ drv->states[i].enter_dead(dev, i);
+ }
+ /*
+ * If :enter_dead() is successful, it will never return, so reaching
+ * here means that all of them failed above or were not present.
+ */
return -ENODEV;
}
@@ -406,7 +410,7 @@ void cpuidle_reflect(struct cpuidle_device *dev, int index)
* Min polling interval of 10usec is a guess. It is assuming that
* for most users, the time for a single ping-pong workload like
* perf bench pipe would generally complete within 10usec but
- * this is hardware dependant. Actual time can be estimated with
+ * this is hardware dependent. Actual time can be estimated with
*
* perf bench sched pipe -l 10000
*
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index cf5873cc45dc..9bbfa594c442 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -261,7 +261,7 @@ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv)
* @drv: a pointer to a valid struct cpuidle_driver
*
* Register the driver under a lock to prevent concurrent attempts to
- * [un]register the driver from occuring at the same time.
+ * [un]register the driver from occurring at the same time.
*
* Returns 0 on success, a negative error code (returned by
* __cpuidle_register_driver()) otherwise.
@@ -296,7 +296,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_driver);
* @drv: a pointer to a valid struct cpuidle_driver
*
* Unregisters the cpuidle driver under a lock to prevent concurrent attempts
- * to [un]register the driver from occuring at the same time. @drv has to
+ * to [un]register the driver from occurring at the same time. @drv has to
* match the currently registered driver.
*/
void cpuidle_unregister_driver(struct cpuidle_driver *drv)
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index f3c9d49f0f2a..28363bfa3e4c 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -19,7 +19,7 @@
#include "gov.h"
-#define BUCKETS 12
+#define BUCKETS 6
#define INTERVAL_SHIFT 3
#define INTERVALS (1UL << INTERVAL_SHIFT)
#define RESOLUTION 1024
@@ -29,12 +29,11 @@
/*
* Concepts and ideas behind the menu governor
*
- * For the menu governor, there are 3 decision factors for picking a C
+ * For the menu governor, there are 2 decision factors for picking a C
* state:
* 1) Energy break even point
- * 2) Performance impact
- * 3) Latency tolerance (from pmqos infrastructure)
- * These three factors are treated independently.
+ * 2) Latency tolerance (from pmqos infrastructure)
+ * These two factors are treated independently.
*
* Energy break even point
* -----------------------
@@ -75,30 +74,6 @@
* intervals and if the stand deviation of these 8 intervals is below a
* threshold value, we use the average of these intervals as prediction.
*
- * Limiting Performance Impact
- * ---------------------------
- * C states, especially those with large exit latencies, can have a real
- * noticeable impact on workloads, which is not acceptable for most sysadmins,
- * and in addition, less performance has a power price of its own.
- *
- * As a general rule of thumb, menu assumes that the following heuristic
- * holds:
- * The busier the system, the less impact of C states is acceptable
- *
- * This rule-of-thumb is implemented using a performance-multiplier:
- * If the exit latency times the performance multiplier is longer than
- * the predicted duration, the C state is not considered a candidate
- * for selection due to a too high performance impact. So the higher
- * this multiplier is, the longer we need to be idle to pick a deep C
- * state, and thus the less likely a busy CPU will hit such a deep
- * C state.
- *
- * Currently there is only one value determining the factor:
- * 10 points are added for each process that is waiting for IO on this CPU.
- * (This value was experimentally determined.)
- * Utilization is no longer a factor as it was shown that it never contributed
- * significantly to the performance multiplier in the first place.
- *
*/
struct menu_device {
@@ -112,19 +87,10 @@ struct menu_device {
int interval_ptr;
};
-static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters)
+static inline int which_bucket(u64 duration_ns)
{
int bucket = 0;
- /*
- * We keep two groups of stats; one with no
- * IO pending, one without.
- * This allows us to calculate
- * E(duration)|iowait
- */
- if (nr_iowaiters)
- bucket = BUCKETS/2;
-
if (duration_ns < 10ULL * NSEC_PER_USEC)
return bucket;
if (duration_ns < 100ULL * NSEC_PER_USEC)
@@ -138,19 +104,6 @@ static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters)
return bucket + 5;
}
-/*
- * Return a multiplier for the exit latency that is intended
- * to take performance requirements into account.
- * The more performance critical we estimate the system
- * to be, the higher this multiplier, and thus the higher
- * the barrier to go to an expensive C state.
- */
-static inline int performance_multiplier(unsigned int nr_iowaiters)
-{
- /* for IO wait tasks (per cpu!) we add 10x each */
- return 1 + 10 * nr_iowaiters;
-}
-
static DEFINE_PER_CPU(struct menu_device, menu_devices);
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
@@ -258,8 +211,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
struct menu_device *data = this_cpu_ptr(&menu_devices);
s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
u64 predicted_ns;
- u64 interactivity_req;
- unsigned int nr_iowaiters;
ktime_t delta, delta_tick;
int i, idx;
@@ -268,8 +219,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
data->needs_update = 0;
}
- nr_iowaiters = nr_iowait_cpu(dev->cpu);
-
/* Find the shortest expected idle interval. */
predicted_ns = get_typical_interval(data) * NSEC_PER_USEC;
if (predicted_ns > RESIDENCY_THRESHOLD_NS) {
@@ -283,7 +232,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
}
data->next_timer_ns = delta;
- data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters);
+ data->bucket = which_bucket(data->next_timer_ns);
/* Round up the result for half microseconds. */
timer_us = div_u64((RESOLUTION * DECAY * NSEC_PER_USEC) / 2 +
@@ -301,7 +250,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
*/
data->next_timer_ns = KTIME_MAX;
delta_tick = TICK_NSEC / 2;
- data->bucket = which_bucket(KTIME_MAX, nr_iowaiters);
+ data->bucket = which_bucket(KTIME_MAX);
}
if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
@@ -328,15 +277,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
*/
if (predicted_ns < TICK_NSEC)
predicted_ns = data->next_timer_ns;
- } else {
- /*
- * Use the performance multiplier and the user-configurable
- * latency_req to determine the maximum exit latency.
- */
- interactivity_req = div64_u64(predicted_ns,
- performance_multiplier(nr_iowaiters));
- if (latency_req > interactivity_req)
- latency_req = interactivity_req;
+ } else if (latency_req > predicted_ns) {
+ latency_req = predicted_ns;
}
/*
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index f2992f92d8db..8fe5e1b47ef9 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -10,25 +10,27 @@
* DOC: teo-description
*
* The idea of this governor is based on the observation that on many systems
- * timer events are two or more orders of magnitude more frequent than any
- * other interrupts, so they are likely to be the most significant cause of CPU
- * wakeups from idle states. Moreover, information about what happened in the
- * (relatively recent) past can be used to estimate whether or not the deepest
- * idle state with target residency within the (known) time till the closest
- * timer event, referred to as the sleep length, is likely to be suitable for
- * the upcoming CPU idle period and, if not, then which of the shallower idle
- * states to choose instead of it.
+ * timer interrupts are two or more orders of magnitude more frequent than any
+ * other interrupt types, so they are likely to dominate CPU wakeup patterns.
+ * Moreover, in principle, the time when the next timer event is going to occur
+ * can be determined at the idle state selection time, although doing that may
+ * be costly, so it can be regarded as the most reliable source of information
+ * for idle state selection.
*
- * Of course, non-timer wakeup sources are more important in some use cases
- * which can be covered by taking a few most recent idle time intervals of the
- * CPU into account. However, even in that context it is not necessary to
- * consider idle duration values greater than the sleep length, because the
- * closest timer will ultimately wake up the CPU anyway unless it is woken up
- * earlier.
+ * Of course, non-timer wakeup sources are more important in some use cases,
+ * but even then it is generally unnecessary to consider idle duration values
+ * greater than the time time till the next timer event, referred as the sleep
+ * length in what follows, because the closest timer will ultimately wake up the
+ * CPU anyway unless it is woken up earlier.
*
- * Thus this governor estimates whether or not the prospective idle duration of
- * a CPU is likely to be significantly shorter than the sleep length and selects
- * an idle state for it accordingly.
+ * However, since obtaining the sleep length may be costly, the governor first
+ * checks if it can select a shallow idle state using wakeup pattern information
+ * from recent times, in which case it can do without knowing the sleep length
+ * at all. For this purpose, it counts CPU wakeup events and looks for an idle
+ * state whose target residency has not exceeded the idle duration (measured
+ * after wakeup) in the majority of relevant recent cases. If the target
+ * residency of that state is small enough, it may be used right away and the
+ * sleep length need not be determined.
*
* The computations carried out by this governor are based on using bins whose
* boundaries are aligned with the target residency parameter values of the CPU
@@ -49,47 +51,50 @@
* sleep length and the idle duration measured after CPU wakeup fall into the
* same bin (that is, the CPU appears to wake up "on time" relative to the sleep
* length). In turn, the "intercepts" metric reflects the relative frequency of
- * situations in which the measured idle duration is so much shorter than the
- * sleep length that the bin it falls into corresponds to an idle state
- * shallower than the one whose bin is fallen into by the sleep length (these
- * situations are referred to as "intercepts" below).
+ * non-timer wakeup events for which the measured idle duration falls into a bin
+ * that corresponds to an idle state shallower than the one whose bin is fallen
+ * into by the sleep length (these events are also referred to as "intercepts"
+ * below).
+ *
+ * The governor also counts "intercepts" with the measured idle duration below
+ * the tick period length and uses this information when deciding whether or not
+ * to stop the scheduler tick.
*
* In order to select an idle state for a CPU, the governor takes the following
* steps (modulo the possible latency constraint that must be taken into account
* too):
*
- * 1. Find the deepest CPU idle state whose target residency does not exceed
- * the current sleep length (the candidate idle state) and compute 2 sums as
- * follows:
+ * 1. Find the deepest enabled CPU idle state (the candidate idle state) and
+ * compute 2 sums as follows:
*
- * - The sum of the "hits" and "intercepts" metrics for the candidate state
- * and all of the deeper idle states (it represents the cases in which the
- * CPU was idle long enough to avoid being intercepted if the sleep length
- * had been equal to the current one).
+ * - The sum of the "hits" metric for all of the idle states shallower than
+ * the candidate one (it represents the cases in which the CPU was likely
+ * woken up by a timer).
*
- * - The sum of the "intercepts" metrics for all of the idle states shallower
- * than the candidate one (it represents the cases in which the CPU was not
- * idle long enough to avoid being intercepted if the sleep length had been
- * equal to the current one).
+ * - The sum of the "intercepts" metric for all of the idle states shallower
+ * than the candidate one (it represents the cases in which the CPU was
+ * likely woken up by a non-timer wakeup source).
*
- * 2. If the second sum is greater than the first one the CPU is likely to wake
- * up early, so look for an alternative idle state to select.
+ * 2. If the second sum computed in step 1 is greater than a half of the sum of
+ * both metrics for the candidate state bin and all subsequent bins(if any),
+ * a shallower idle state is likely to be more suitable, so look for it.
*
- * - Traverse the idle states shallower than the candidate one in the
+ * - Traverse the enabled idle states shallower than the candidate one in the
* descending order.
*
* - For each of them compute the sum of the "intercepts" metrics over all
* of the idle states between it and the candidate one (including the
* former and excluding the latter).
*
- * - If each of these sums that needs to be taken into account (because the
- * check related to it has indicated that the CPU is likely to wake up
- * early) is greater than a half of the corresponding sum computed in step
- * 1 (which means that the target residency of the state in question had
- * not exceeded the idle duration in over a half of the relevant cases),
- * select the given idle state instead of the candidate one.
+ * - If this sum is greater than a half of the second sum computed in step 1,
+ * use the given idle state as the new candidate one.
+ *
+ * 3. If the current candidate state is state 0 or its target residency is short
+ * enough, return it and prevent the scheduler tick from being stopped.
*
- * 3. By default, select the candidate state.
+ * 4. Obtain the sleep length value and check if it is below the target
+ * residency of the current candidate state, in which case a new shallower
+ * candidate state needs to be found, so look for it.
*/
#include <linux/cpuidle.h>
@@ -101,6 +106,12 @@
#include "gov.h"
/*
+ * Idle state exit latency threshold used for deciding whether or not to check
+ * the time till the closest expected timer event.
+ */
+#define LATENCY_THRESHOLD_NS (RESIDENCY_THRESHOLD_NS / 2)
+
+/*
* The PULSE value is added to metrics when they grow and the DECAY_SHIFT value
* is used for decreasing metrics on a regular basis.
*/
@@ -119,18 +130,20 @@ struct teo_bin {
/**
* struct teo_cpu - CPU data used by the TEO cpuidle governor.
- * @time_span_ns: Time between idle state selection and post-wakeup update.
* @sleep_length_ns: Time till the closest timer event (at the selection time).
* @state_bins: Idle state data bins for this CPU.
* @total: Grand total of the "intercepts" and "hits" metrics for all bins.
- * @tick_hits: Number of "hits" after TICK_NSEC.
+ * @tick_intercepts: "Intercepts" before TICK_NSEC.
+ * @short_idles: Wakeups after short idle periods.
+ * @artificial_wakeup: Set if the wakeup has been triggered by a safety net.
*/
struct teo_cpu {
- s64 time_span_ns;
s64 sleep_length_ns;
struct teo_bin state_bins[CPUIDLE_STATE_MAX];
unsigned int total;
- unsigned int tick_hits;
+ unsigned int tick_intercepts;
+ unsigned int short_idles;
+ bool artificial_wakeup;
};
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
@@ -147,23 +160,17 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
s64 target_residency_ns;
u64 measured_ns;
- if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) {
+ cpu_data->short_idles -= cpu_data->short_idles >> DECAY_SHIFT;
+
+ if (cpu_data->artificial_wakeup) {
/*
- * One of the safety nets has triggered or the wakeup was close
- * enough to the closest timer event expected at the idle state
- * selection time to be discarded.
+ * If one of the safety nets has triggered, assume that this
+ * might have been a long sleep.
*/
measured_ns = U64_MAX;
} else {
u64 lat_ns = drv->states[dev->last_state_idx].exit_latency_ns;
- /*
- * The computations below are to determine whether or not the
- * (saved) time till the next timer event and the measured idle
- * duration fall into the same "bin", so use last_residency_ns
- * for that instead of time_span_ns which includes the cpuidle
- * overhead.
- */
measured_ns = dev->last_residency_ns;
/*
* The delay between the wakeup and the first instruction
@@ -171,14 +178,16 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* time, so take 1/2 of the exit latency as a very rough
* approximation of the average of it.
*/
- if (measured_ns >= lat_ns)
+ if (measured_ns >= lat_ns) {
measured_ns -= lat_ns / 2;
- else
+ if (measured_ns < RESIDENCY_THRESHOLD_NS)
+ cpu_data->short_idles += PULSE;
+ } else {
measured_ns /= 2;
+ cpu_data->short_idles += PULSE;
+ }
}
- cpu_data->total = 0;
-
/*
* Decay the "hits" and "intercepts" metrics for all of the bins and
* find the bins that the sleep length and the measured idle duration
@@ -190,8 +199,6 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
bin->hits -= bin->hits >> DECAY_SHIFT;
bin->intercepts -= bin->intercepts >> DECAY_SHIFT;
- cpu_data->total += bin->hits + bin->intercepts;
-
target_residency_ns = drv->states[i].target_residency_ns;
if (target_residency_ns <= cpu_data->sleep_length_ns) {
@@ -201,38 +208,22 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
}
}
- /*
- * If the deepest state's target residency is below the tick length,
- * make a record of it to help teo_select() decide whether or not
- * to stop the tick. This effectively adds an extra hits-only bin
- * beyond the last state-related one.
- */
- if (target_residency_ns < TICK_NSEC) {
- cpu_data->tick_hits -= cpu_data->tick_hits >> DECAY_SHIFT;
-
- cpu_data->total += cpu_data->tick_hits;
-
- if (TICK_NSEC <= cpu_data->sleep_length_ns) {
- idx_timer = drv->state_count;
- if (TICK_NSEC <= measured_ns) {
- cpu_data->tick_hits += PULSE;
- goto end;
- }
- }
- }
-
+ cpu_data->tick_intercepts -= cpu_data->tick_intercepts >> DECAY_SHIFT;
/*
* If the measured idle duration falls into the same bin as the sleep
* length, this is a "hit", so update the "hits" metric for that bin.
* Otherwise, update the "intercepts" metric for the bin fallen into by
* the measured idle duration.
*/
- if (idx_timer == idx_duration)
+ if (idx_timer == idx_duration) {
cpu_data->state_bins[idx_timer].hits += PULSE;
- else
+ } else {
cpu_data->state_bins[idx_duration].intercepts += PULSE;
+ if (TICK_NSEC <= measured_ns)
+ cpu_data->tick_intercepts += PULSE;
+ }
-end:
+ cpu_data->total -= cpu_data->total >> DECAY_SHIFT;
cpu_data->total += PULSE;
}
@@ -280,14 +271,12 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
ktime_t delta_tick = TICK_NSEC / 2;
- unsigned int tick_intercept_sum = 0;
unsigned int idx_intercept_sum = 0;
unsigned int intercept_sum = 0;
unsigned int idx_hit_sum = 0;
unsigned int hit_sum = 0;
int constraint_idx = 0;
int idx0 = 0, idx = -1;
- int prev_intercept_idx;
s64 duration_ns;
int i;
@@ -296,10 +285,14 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
dev->last_state_idx = -1;
}
- cpu_data->time_span_ns = local_clock();
/*
- * Set the expected sleep length to infinity in case of an early
- * return.
+ * Set the sleep length to infinity in case the invocation of
+ * tick_nohz_get_sleep_length() below is skipped, in which case it won't
+ * be known whether or not the subsequent wakeup is caused by a timer.
+ * It is generally fine to count the wakeup as an intercept then, except
+ * for the cases when the CPU is mostly woken up by timers and there may
+ * be opportunities to ask for a deeper idle state when no imminent
+ * timers are scheduled which may be missed.
*/
cpu_data->sleep_length_ns = KTIME_MAX;
@@ -355,17 +348,13 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
goto end;
}
- tick_intercept_sum = intercept_sum +
- cpu_data->state_bins[drv->state_count-1].intercepts;
-
/*
* If the sum of the intercepts metric for all of the idle states
* shallower than the current candidate one (idx) is greater than the
* sum of the intercepts and hits metrics for the candidate state and
- * all of the deeper states a shallower idle state is likely to be a
+ * all of the deeper states, a shallower idle state is likely to be a
* better choice.
*/
- prev_intercept_idx = idx;
if (2 * idx_intercept_sum > cpu_data->total - idx_hit_sum) {
int first_suitable_idx = idx;
@@ -391,41 +380,38 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* first enabled state that is deep enough.
*/
if (teo_state_ok(i, drv) &&
- !dev->states_usage[i].disable)
+ !dev->states_usage[i].disable) {
idx = i;
- else
- idx = first_suitable_idx;
-
+ break;
+ }
+ idx = first_suitable_idx;
break;
}
if (dev->states_usage[i].disable)
continue;
- if (!teo_state_ok(i, drv)) {
+ if (teo_state_ok(i, drv)) {
/*
- * The current state is too shallow, but if an
- * alternative candidate state has been found,
- * it may still turn out to be a better choice.
+ * The current state is deep enough, but still
+ * there may be a better one.
*/
- if (first_suitable_idx != idx)
- continue;
-
- break;
+ first_suitable_idx = i;
+ continue;
}
- first_suitable_idx = i;
+ /*
+ * The current state is too shallow, so if no suitable
+ * states other than the initial candidate have been
+ * found, give up (the remaining states to check are
+ * shallower still), but otherwise the first suitable
+ * state other than the initial candidate may turn out
+ * to be preferable.
+ */
+ if (first_suitable_idx == idx)
+ break;
}
}
- if (!idx && prev_intercept_idx) {
- /*
- * We have to query the sleep length here otherwise we don't
- * know after wakeup if our guess was correct.
- */
- duration_ns = tick_nohz_get_sleep_length(&delta_tick);
- cpu_data->sleep_length_ns = duration_ns;
- goto out_tick;
- }
/*
* If there is a latency constraint, it may be necessary to select an
@@ -435,24 +421,39 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
idx = constraint_idx;
/*
- * Skip the timers check if state 0 is the current candidate one,
- * because an immediate non-timer wakeup is expected in that case.
- */
- if (!idx)
- goto out_tick;
-
- /*
- * If state 0 is a polling one, check if the target residency of
- * the current candidate state is low enough and skip the timers
- * check in that case too.
+ * If either the candidate state is state 0 or its target residency is
+ * low enough, there is basically nothing more to do, but if the sleep
+ * length is not updated, the subsequent wakeup will be counted as an
+ * "intercept" which may be problematic in the cases when timer wakeups
+ * are dominant. Namely, it may effectively prevent deeper idle states
+ * from being selected at one point even if no imminent timers are
+ * scheduled.
+ *
+ * However, frequent timers in the RESIDENCY_THRESHOLD_NS range on one
+ * CPU are unlikely (user space has a default 50 us slack value for
+ * hrtimers and there are relatively few timers with a lower deadline
+ * value in the kernel), and even if they did happen, the potential
+ * benefit from using a deep idle state in that case would be
+ * questionable anyway for latency reasons. Thus if the measured idle
+ * duration falls into that range in the majority of cases, assume
+ * non-timer wakeups to be dominant and skip updating the sleep length
+ * to reduce latency.
+ *
+ * Also, if the latency constraint is sufficiently low, it will force
+ * shallow idle states regardless of the wakeup type, so the sleep
+ * length need not be known in that case.
*/
- if ((drv->states[0].flags & CPUIDLE_FLAG_POLLING) &&
- drv->states[idx].target_residency_ns < RESIDENCY_THRESHOLD_NS)
+ if ((!idx || drv->states[idx].target_residency_ns < RESIDENCY_THRESHOLD_NS) &&
+ (2 * cpu_data->short_idles >= cpu_data->total ||
+ latency_req < LATENCY_THRESHOLD_NS))
goto out_tick;
duration_ns = tick_nohz_get_sleep_length(&delta_tick);
cpu_data->sleep_length_ns = duration_ns;
+ if (!idx)
+ goto out_tick;
+
/*
* If the closest expected timer is before the target residency of the
* candidate state, a shallower one needs to be found.
@@ -469,7 +470,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* total wakeup events, do not stop the tick.
*/
if (drv->states[idx].target_residency_ns < TICK_NSEC &&
- tick_intercept_sum > cpu_data->total / 2 + cpu_data->total / 8)
+ cpu_data->tick_intercepts > cpu_data->total / 2 + cpu_data->total / 8)
duration_ns = TICK_NSEC / 2;
end:
@@ -506,17 +507,16 @@ static void teo_reflect(struct cpuidle_device *dev, int state)
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
dev->last_state_idx = state;
- /*
- * If the wakeup was not "natural", but triggered by one of the safety
- * nets, assume that the CPU might have been idle for the entire sleep
- * length time.
- */
if (dev->poll_time_limit ||
(tick_nohz_idle_got_tick() && cpu_data->sleep_length_ns > TICK_NSEC)) {
+ /*
+ * The wakeup was not "genuine", but triggered by one of the
+ * safety nets.
+ */
dev->poll_time_limit = false;
- cpu_data->time_span_ns = cpu_data->sleep_length_ns;
+ cpu_data->artificial_wakeup = true;
} else {
- cpu_data->time_span_ns = local_clock() - cpu_data->time_span_ns;
+ cpu_data->artificial_wakeup = false;
}
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 08b1238bcd7b..19ab145f912e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -95,6 +95,9 @@ config PKEY
loaded when a CEX crypto card is available.
- A pkey EP11 kernel module (pkey-ep11.ko) which is automatically
loaded when a CEX crypto card is available.
+ - A pkey UV kernel module (pkey-uv.ko) which is automatically
+ loaded when the Ultravisor feature is available within a
+ protected execution environment.
Select this option if you want to enable the kernel and userspace
API for protected key handling.
@@ -152,6 +155,24 @@ config PKEY_PCKMO
this option unless you are sure you never need to derive protected
keys from clear key values directly via PCKMO.
+config PKEY_UV
+ tristate "PKEY UV support handler"
+ depends on PKEY
+ depends on S390_UV_UAPI
+ help
+ This is the PKEY Ultravisor support handler for deriving protected
+ keys from secrets stored within the Ultravisor (UV).
+
+ This module works together with the UV device and supports the
+ retrieval of protected keys from secrets stored within the
+ UV firmware layer. This service is only available within
+ a protected execution guest and thus this module will fail upon
+ modprobe if no protected execution environment is detected.
+
+ Enable this option if you intend to run this kernel with an KVM
+ guest with protected execution and you want to use UV retrievable
+ secrets via PKEY API.
+
config CRYPTO_PAES_S390
tristate "PAES cipher algorithms"
depends on S390
@@ -179,23 +200,6 @@ config S390_PRNG
It is available as of z9.
-config CRYPTO_DEV_NIAGARA2
- tristate "Niagara2 Stream Processing Unit driver"
- select CRYPTO_LIB_DES
- select CRYPTO_SKCIPHER
- select CRYPTO_HASH
- select CRYPTO_MD5
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- depends on SPARC64
- help
- Each core of a Niagara2 processor contains a Stream
- Processing Unit, which itself contains several cryptographic
- sub-units. One set provides the Modular Arithmetic Unit,
- used for SSL offload. The other set provides the Cipher
- Group, which can perform encryption, decryption, hashing,
- checksumming, and raw copies.
-
config CRYPTO_DEV_SL3516
tristate "Storlink SL3516 crypto offloader"
depends on ARCH_GEMINI || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index ad4ccef67d12..fef18ffdb128 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -21,8 +21,6 @@ obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
-obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
-n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
obj-$(CONFIG_CRYPTO_DEV_OMAP) += omap-crypto.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 890664bd5f0f..58a76e2ba64e 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -542,7 +542,7 @@ MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table);
static struct platform_driver sun4i_ss_driver = {
.probe = sun4i_ss_probe,
- .remove_new = sun4i_ss_remove,
+ .remove = sun4i_ss_remove,
.driver = {
.name = "sun4i-ss",
.pm = &sun4i_ss_pm_ops,
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index e55e58e164db..ec1ffda9ea32 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -1129,7 +1129,7 @@ MODULE_DEVICE_TABLE(of, sun8i_ce_crypto_of_match_table);
static struct platform_driver sun8i_ce_driver = {
.probe = sun8i_ce_probe,
- .remove_new = sun8i_ce_remove,
+ .remove = sun8i_ce_remove,
.driver = {
.name = "sun8i-ce",
.pm = &sun8i_ce_pm_ops,
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 0dbc0220146c..f45685707e0d 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -929,7 +929,7 @@ MODULE_DEVICE_TABLE(of, sun8i_ss_crypto_of_match_table);
static struct platform_driver sun8i_ss_driver = {
.probe = sun8i_ss_probe,
- .remove_new = sun8i_ss_remove,
+ .remove = sun8i_ss_remove,
.driver = {
.name = "sun8i-ss",
.pm = &sun8i_ss_pm_ops,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 6006703fb6d7..ec3ccfa60445 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -653,9 +653,6 @@ static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
crypto4xx_destroy_pdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
crypto4xx_destroy_sdr(core_dev->dev);
- iounmap(core_dev->dev->ce_base);
- kfree(core_dev->dev);
- kfree(core_dev);
}
static u32 get_next_gd(u32 current)
@@ -1333,17 +1330,12 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
static int crypto4xx_probe(struct platform_device *ofdev)
{
int rc;
- struct resource res;
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev;
struct device_node *np;
u32 pvr;
bool is_revb = true;
- rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
- if (rc)
- return -ENODEV;
-
np = of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto");
if (np) {
mtdcri(SDR0, PPC460EX_SDR0_SRST,
@@ -1374,16 +1366,17 @@ static int crypto4xx_probe(struct platform_device *ofdev)
of_node_put(np);
- core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
+ core_dev = devm_kzalloc(
+ &ofdev->dev, sizeof(struct crypto4xx_core_device), GFP_KERNEL);
if (!core_dev)
return -ENOMEM;
dev_set_drvdata(dev, core_dev);
core_dev->ofdev = ofdev;
- core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
- rc = -ENOMEM;
+ core_dev->dev = devm_kzalloc(
+ &ofdev->dev, sizeof(struct crypto4xx_device), GFP_KERNEL);
if (!core_dev->dev)
- goto err_alloc_dev;
+ return -ENOMEM;
/*
* Older version of 460EX/GT have a hardware bug.
@@ -1402,7 +1395,9 @@ static int crypto4xx_probe(struct platform_device *ofdev)
core_dev->dev->core_dev = core_dev;
core_dev->dev->is_revb = is_revb;
core_dev->device = dev;
- mutex_init(&core_dev->rng_lock);
+ rc = devm_mutex_init(&ofdev->dev, &core_dev->rng_lock);
+ if (rc)
+ return rc;
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
ratelimit_default_init(&core_dev->dev->aead_ratelimit);
@@ -1421,21 +1416,21 @@ static int crypto4xx_probe(struct platform_device *ofdev)
tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
(unsigned long) dev);
- core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
- if (!core_dev->dev->ce_base) {
- dev_err(dev, "failed to of_iomap\n");
- rc = -ENOMEM;
- goto err_iomap;
+ core_dev->dev->ce_base = devm_platform_ioremap_resource(ofdev, 0);
+ if (IS_ERR(core_dev->dev->ce_base)) {
+ dev_err(&ofdev->dev, "failed to ioremap resource");
+ rc = PTR_ERR(core_dev->dev->ce_base);
+ goto err_build_sdr;
}
/* Register for Crypto isr, Crypto Engine IRQ */
core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- rc = request_irq(core_dev->irq, is_revb ?
- crypto4xx_ce_interrupt_handler_revb :
- crypto4xx_ce_interrupt_handler, 0,
- KBUILD_MODNAME, dev);
+ rc = devm_request_irq(&ofdev->dev, core_dev->irq,
+ is_revb ? crypto4xx_ce_interrupt_handler_revb :
+ crypto4xx_ce_interrupt_handler,
+ 0, KBUILD_MODNAME, dev);
if (rc)
- goto err_request_irq;
+ goto err_iomap;
/* need to setup pdr, rdr, gdr and sdr before this */
crypto4xx_hw_init(core_dev->dev);
@@ -1444,26 +1439,17 @@ static int crypto4xx_probe(struct platform_device *ofdev)
rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
ARRAY_SIZE(crypto4xx_alg));
if (rc)
- goto err_start_dev;
+ goto err_iomap;
ppc4xx_trng_probe(core_dev);
return 0;
-err_start_dev:
- free_irq(core_dev->irq, dev);
-err_request_irq:
- irq_dispose_mapping(core_dev->irq);
- iounmap(core_dev->dev->ce_base);
err_iomap:
tasklet_kill(&core_dev->tasklet);
err_build_sdr:
crypto4xx_destroy_sdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
crypto4xx_destroy_pdr(core_dev->dev);
- kfree(core_dev->dev);
-err_alloc_dev:
- kfree(core_dev);
-
return rc;
}
@@ -1474,13 +1460,9 @@ static void crypto4xx_remove(struct platform_device *ofdev)
ppc4xx_trng_remove(core_dev);
- free_irq(core_dev->irq, dev);
- irq_dispose_mapping(core_dev->irq);
-
tasklet_kill(&core_dev->tasklet);
/* Un-register with Linux CryptoAPI */
crypto4xx_unregister_alg(core_dev->dev);
- mutex_destroy(&core_dev->rng_lock);
/* Free all allocated memory */
crypto4xx_stop_all(core_dev);
}
@@ -1497,7 +1479,7 @@ static struct platform_driver crypto4xx_driver = {
.of_match_table = crypto4xx_match,
},
.probe = crypto4xx_probe,
- .remove_new = crypto4xx_remove,
+ .remove = crypto4xx_remove,
};
module_platform_driver(crypto4xx_driver);
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index f54ab0d0b1e8..1c18a5b8470e 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -240,11 +240,9 @@ static int meson_crypto_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mc);
mc->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mc->base)) {
- err = PTR_ERR(mc->base);
- dev_err(&pdev->dev, "Cannot request MMIO err=%d\n", err);
- return err;
- }
+ if (IS_ERR(mc->base))
+ return PTR_ERR(mc->base);
+
mc->busclk = devm_clk_get(&pdev->dev, "blkmv");
if (IS_ERR(mc->busclk)) {
err = PTR_ERR(mc->busclk);
@@ -322,7 +320,7 @@ MODULE_DEVICE_TABLE(of, meson_crypto_of_match_table);
static struct platform_driver meson_crypto_driver = {
.probe = meson_crypto_probe,
- .remove_new = meson_crypto_remove,
+ .remove = meson_crypto_remove,
.driver = {
.name = "gxl-crypto",
.of_match_table = meson_crypto_of_match_table,
diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c
index b4613bd4ad96..8d1c79aaca07 100644
--- a/drivers/crypto/aspeed/aspeed-acry.c
+++ b/drivers/crypto/aspeed/aspeed-acry.c
@@ -601,8 +601,6 @@ static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = {
.akcipher.base = {
.encrypt = aspeed_acry_rsa_enc,
.decrypt = aspeed_acry_rsa_dec,
- .sign = aspeed_acry_rsa_dec,
- .verify = aspeed_acry_rsa_enc,
.set_pub_key = aspeed_acry_rsa_set_pub_key,
.set_priv_key = aspeed_acry_rsa_set_priv_key,
.max_size = aspeed_acry_rsa_max_size,
@@ -808,7 +806,7 @@ MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches);
static struct platform_driver aspeed_acry_driver = {
.probe = aspeed_acry_probe,
- .remove_new = aspeed_acry_remove,
+ .remove = aspeed_acry_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = aspeed_acry_of_matches,
diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c
index 062f2a66dd23..3fe644bfe037 100644
--- a/drivers/crypto/aspeed/aspeed-hace.c
+++ b/drivers/crypto/aspeed/aspeed-hace.c
@@ -266,7 +266,7 @@ MODULE_DEVICE_TABLE(of, aspeed_hace_of_matches);
static struct platform_driver aspeed_hace_driver = {
.probe = aspeed_hace_probe,
- .remove_new = aspeed_hace_remove,
+ .remove = aspeed_hace_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = aspeed_hace_of_matches,
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 0dd90785db9a..14bf86957d31 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2453,7 +2453,7 @@ static void atmel_aes_remove(struct platform_device *pdev)
static struct platform_driver atmel_aes_driver = {
.probe = atmel_aes_probe,
- .remove_new = atmel_aes_remove,
+ .remove = atmel_aes_remove,
.driver = {
.name = "atmel_aes",
.of_match_table = atmel_aes_dt_ids,
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 590ea984c622..0d48e64d28b1 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -379,7 +379,7 @@ MODULE_DEVICE_TABLE(of, atmel_ecc_dt_ids);
#endif
static const struct i2c_device_id atmel_ecc_id[] = {
- { "atecc508a", 0 },
+ { "atecc508a" },
{ }
};
MODULE_DEVICE_TABLE(i2c, atmel_ecc_id);
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8cc57df25778..67a170608566 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2691,7 +2691,7 @@ static void atmel_sha_remove(struct platform_device *pdev)
static struct platform_driver atmel_sha_driver = {
.probe = atmel_sha_probe,
- .remove_new = atmel_sha_remove,
+ .remove = atmel_sha_remove,
.driver = {
.name = "atmel_sha",
.of_match_table = atmel_sha_dt_ids,
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index a02d496f4c41..75bebec2c757 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -202,8 +202,8 @@ static const struct of_device_id atmel_sha204a_dt_ids[] __maybe_unused = {
MODULE_DEVICE_TABLE(of, atmel_sha204a_dt_ids);
static const struct i2c_device_id atmel_sha204a_id[] = {
- { "atsha204", 0 },
- { "atsha204a", 0 },
+ { "atsha204" },
+ { "atsha204a" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, atmel_sha204a_id);
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index dcc2380a5889..de9717e221e4 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -872,7 +872,7 @@ static void atmel_tdes_done_task(unsigned long data)
if (!err)
err = atmel_tdes_crypt_start(dd);
if (!err)
- return; /* DMA started. Not fininishing. */
+ return; /* DMA started. Not finishing. */
}
atmel_tdes_finish_req(dd, err);
@@ -1074,7 +1074,7 @@ static void atmel_tdes_remove(struct platform_device *pdev)
static struct platform_driver atmel_tdes_driver = {
.probe = atmel_tdes_probe,
- .remove_new = atmel_tdes_remove,
+ .remove = atmel_tdes_remove,
.driver = {
.name = "atmel_tdes",
.of_match_table = atmel_tdes_dt_ids,
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 75440ea6206e..1c1f57baef0e 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2975,7 +2975,7 @@ static void artpec6_crypto_remove(struct platform_device *pdev)
static struct platform_driver artpec6_crypto_driver = {
.probe = artpec6_crypto_probe,
- .remove_new = artpec6_crypto_remove,
+ .remove = artpec6_crypto_remove,
.driver = {
.name = "artpec6-crypto",
.of_match_table = artpec6_crypto_of_match,
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 1a3ecd44cbaf..9e6798efbfb7 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -2415,6 +2415,7 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
static int ahash_hmac_init(struct ahash_request *req)
{
+ int ret;
struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
@@ -2424,7 +2425,9 @@ static int ahash_hmac_init(struct ahash_request *req)
flow_log("ahash_hmac_init()\n");
/* init the context as a hash */
- ahash_init(req);
+ ret = ahash_init(req);
+ if (ret)
+ return ret;
if (!spu_no_incr_hash(ctx)) {
/* SPU-M can do incr hashing but needs sw for outer HMAC */
@@ -4704,7 +4707,7 @@ static struct platform_driver bcm_spu_pdriver = {
.of_match_table = of_match_ptr(bcm_spu_dt_ids),
},
.probe = bcm_spu_probe,
- .remove_new = bcm_spu_remove,
+ .remove = bcm_spu_remove,
};
module_platform_driver(bcm_spu_pdriver);
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index 6283e8c6d51d..86c227caa722 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -836,7 +836,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
u32 cipher_bits = 0;
u32 ecf_bits = 0;
u8 sctx_words = 0;
- u8 *ptr = spu_hdr;
flow_log("%s()\n", __func__);
flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg,
@@ -847,7 +846,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
/* starting out: zero the header (plus some) */
memset(spu_hdr, 0, sizeof(struct SPUHEADER));
- ptr += sizeof(struct SPUHEADER);
/* format master header word */
/* Do not set the next bit even though the datasheet says to */
@@ -861,10 +859,8 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
/* copy the encryption keys in the SAD entry */
if (cipher_parms->alg) {
- if (cipher_parms->key_len) {
- ptr += cipher_parms->key_len;
+ if (cipher_parms->key_len)
sctx_words += cipher_parms->key_len / 4;
- }
/*
* if encrypting then set IV size, use SCTX IV unless no IV
@@ -873,7 +869,6 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
if (cipher_parms->iv_len) {
/* Use SCTX IV */
ecf_bits |= SCTX_IV;
- ptr += cipher_parms->iv_len;
sctx_words += cipher_parms->iv_len / 4;
}
}
diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c
index 87781c1534ee..079a22cc9f02 100644
--- a/drivers/crypto/caam/blob_gen.c
+++ b/drivers/crypto/caam/blob_gen.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
* Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ * Copyright 2024 NXP
*/
#define pr_fmt(fmt) "caam blob_gen: " fmt
@@ -104,7 +105,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
}
ctrlpriv = dev_get_drvdata(jrdev->parent);
- moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->ctrl->perfmon.status));
+ moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status));
if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
dev_warn(jrdev,
"using insecure test key, enable HAB to use unique device key!\n");
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 887a5f2fb927..cb001aa1de66 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -984,7 +984,7 @@ err:
return -ENOMEM;
}
-static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
+static int caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
struct rsa_key *raw_key)
{
struct caam_rsa_key *rsa_key = &ctx->key;
@@ -994,7 +994,7 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
if (!rsa_key->p)
- return;
+ return -ENOMEM;
rsa_key->p_sz = p_sz;
rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
@@ -1029,7 +1029,7 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
rsa_key->priv_form = FORM3;
- return;
+ return 0;
free_dq:
kfree_sensitive(rsa_key->dq);
@@ -1043,6 +1043,7 @@ free_q:
kfree_sensitive(rsa_key->q);
free_p:
kfree_sensitive(rsa_key->p);
+ return -ENOMEM;
}
static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
@@ -1088,7 +1089,9 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
rsa_key->e_sz = raw_key.e_sz;
rsa_key->n_sz = raw_key.n_sz;
- caam_rsa_set_priv_key_form(ctx, &raw_key);
+ ret = caam_rsa_set_priv_key_form(ctx, &raw_key);
+ if (ret)
+ goto err;
return 0;
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 26eba7de3fb0..9fcdb64084ac 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -819,7 +819,7 @@ static struct platform_driver caam_jr_driver = {
.pm = pm_ptr(&caam_jr_pm_ops),
},
.probe = caam_jr_probe,
- .remove_new = caam_jr_remove,
+ .remove = caam_jr_remove,
.shutdown = caam_jr_remove,
};
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index f6111ee9ed34..7701d00bcb3a 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -733,7 +733,7 @@ static void free_caam_qi_pcpu_netdev(const cpumask_t *cpus)
int caam_qi_init(struct platform_device *caam_pdev)
{
int err, i;
- struct device *ctrldev = &caam_pdev->dev, *qidev;
+ struct device *qidev = &caam_pdev->dev;
struct caam_drv_private *ctrlpriv;
const cpumask_t *cpus = qman_affine_cpus();
cpumask_var_t clean_mask;
@@ -742,8 +742,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL))
goto fail_cpumask;
- ctrlpriv = dev_get_drvdata(ctrldev);
- qidev = ctrldev;
+ ctrlpriv = dev_get_drvdata(qidev);
/* Initialize the congestion detection */
err = init_cgr(qidev);
@@ -794,7 +793,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
caam_debugfs_qi_init(ctrlpriv);
- err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv);
+ err = devm_add_action_or_reset(qidev, caam_qi_shutdown, qidev);
if (err)
goto fail2;
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 6872ac344001..54de869e5374 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -44,7 +44,7 @@ static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask,
dev_err(dev, "Cores still busy %llx", coremask);
grp = cpt_read_csr64(cpt->reg_base,
CPTX_PF_EXEC_BUSY(0));
- if (timeout--)
+ if (!timeout--)
break;
udelay(CSR_DELAY);
@@ -302,6 +302,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
ret = do_cpt_init(cpt, mcode);
if (ret) {
+ dma_free_coherent(&cpt->pdev->dev, mcode->code_size,
+ mcode->code, mcode->phys_base);
dev_err(dev, "do_cpt_init failed with ret: %d\n", ret);
goto fw_release;
}
@@ -394,7 +396,7 @@ static void cpt_disable_all_cores(struct cpt_device *cpt)
dev_err(dev, "Cores still busy");
grp = cpt_read_csr64(cpt->reg_base,
CPTX_PF_EXEC_BUSY(0));
- if (timeout--)
+ if (!timeout--)
break;
udelay(CSR_DELAY);
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index 153004bdfb5c..fb59bb282455 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -238,7 +238,7 @@ static int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
qinfo = &cptvf->cqinfo;
queue = &qinfo->queue[qno];
- /* lock commad queue */
+ /* lock command queue */
spin_lock(&queue->lock);
ent = &queue->qhead->head[queue->idx * qinfo->cmd_size];
memcpy(ent, (void *)cmd, qinfo->cmd_size);
@@ -510,7 +510,7 @@ get_pending_entry:
info->time_in = jiffies;
info->req = req;
- /* Create the CPT_INST_S type command for HW intrepretation */
+ /* Create the CPT_INST_S type command for HW interpretation */
cptinst.s.doneint = true;
cptinst.s.res_addr = (u64)info->comp_baddr;
cptinst.s.tag = 0;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index a5cdc2b48bd6..068265207ddd 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -17,7 +17,7 @@
#define CRYPTO_CTX_SIZE 256
-/* packet inuput ring alignments */
+/* packet input ring alignments */
#define PKTIN_Q_ALIGN_BYTES 16
/* AQM Queue input alignments */
#define AQM_Q_ALIGN_BYTES 32
diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c
index 5b105a23f699..410084a9039c 100644
--- a/drivers/crypto/ccp/dbc.c
+++ b/drivers/crypto/ccp/dbc.c
@@ -7,6 +7,8 @@
* Author: Mario Limonciello <mario.limonciello@amd.com>
*/
+#include <linux/mutex.h>
+
#include "dbc.h"
#define DBC_DEFAULT_TIMEOUT (10 * MSEC_PER_SEC)
@@ -137,64 +139,49 @@ static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -ENODEV;
dbc_dev = psp_master->dbc_data;
- mutex_lock(&dbc_dev->ioctl_mutex);
+ guard(mutex)(&dbc_dev->ioctl_mutex);
switch (cmd) {
case DBCIOCNONCE:
- if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_nonce))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_nonce)))
+ return -EFAULT;
ret = send_dbc_nonce(dbc_dev);
if (ret)
- goto unlock;
+ return ret;
- if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_nonce))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_nonce)))
+ return -EFAULT;
break;
case DBCIOCUID:
- if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_setuid))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_setuid)))
+ return -EFAULT;
*dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_setuid);
ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_UID);
if (ret)
- goto unlock;
+ return ret;
- if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_setuid))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_setuid)))
+ return -EFAULT;
break;
case DBCIOCPARAM:
- if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_param))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_param)))
+ return -EFAULT;
*dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_param);
ret = send_dbc_parameter(dbc_dev);
if (ret)
- goto unlock;
+ return ret;
- if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_param))) {
- ret = -EFAULT;
- goto unlock;
- }
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_param)))
+ return -EFAULT;
break;
default:
- ret = -EINVAL;
-
+ return -EINVAL;
}
-unlock:
- mutex_unlock(&dbc_dev->ioctl_mutex);
- return ret;
+ return 0;
}
static const struct file_operations dbc_fops = {
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index af018afd9cd7..2e87ca0e292a 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -249,7 +249,7 @@ static struct file *open_file_as_root(const char *filename, int flags, umode_t m
fp = file_open_root(&root, filename, flags, mode);
path_put(&root);
- revert_creds(old_cred);
+ put_cred(revert_creds(old_cred));
return fp;
}
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index ff6ceb4feee0..3933cac1694d 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -210,7 +210,7 @@ static struct platform_driver sp_platform_driver = {
.of_match_table = sp_of_match,
},
.probe = sp_platform_probe,
- .remove_new = sp_platform_remove,
+ .remove = sp_platform_remove,
#ifdef CONFIG_PM
.suspend = sp_platform_suspend,
.resume = sp_platform_resume,
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 5ef39d682389..81533681f7fb 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -2226,7 +2226,7 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
memset(areq_ctx, 0, sizeof(*areq_ctx));
- //plaintext is not encryped with rfc4543
+ //plaintext is not encrypted with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
@@ -2277,7 +2277,7 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
memset(areq_ctx, 0, sizeof(*areq_ctx));
- //plaintext is not decryped with rfc4543
+ //plaintext is not decrypted with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 3fb667a17bbb..d39c067672fd 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -179,7 +179,7 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
}
max_key_buf_size <<= 1;
- /* Alloc fallabck tfm or essiv when key size != 256 bit */
+ /* Alloc fallback tfm or essiv when key size != 256 bit */
ctx_p->fallback_tfm =
crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 9177b54bb0f5..061e68a31c36 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -643,7 +643,7 @@ static struct platform_driver ccree_driver = {
#endif
},
.probe = ccree_probe,
- .remove_new = ccree_remove,
+ .remove = ccree_remove,
};
static int __init ccree_init(void)
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index f418162932fe..d0612bec4d58 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -1577,7 +1577,7 @@ struct cc_hash_template {
/* hash descriptors */
static struct cc_hash_template driver_hash[] = {
- //Asynchronize hash template
+ //Asynchronous hash template
{
.name = "sha1",
.driver_name = "sha1-ccree",
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 177428480c7d..af37477ffd8d 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -1186,7 +1186,7 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
else
bytes = rounddown(bytes, 16);
} else {
- /*CTR mode counter overfloa*/
+ /*CTR mode counter overflow*/
bytes = req->cryptlen - reqctx->processed;
}
err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
index 0dd8baf16cb4..2aaa98f9b44e 100644
--- a/drivers/crypto/exynos-rng.c
+++ b/drivers/crypto/exynos-rng.c
@@ -389,7 +389,7 @@ static struct platform_driver exynos_rng_driver = {
.of_match_table = exynos_rng_dt_match,
},
.probe = exynos_rng_probe,
- .remove_new = exynos_rng_remove,
+ .remove = exynos_rng_remove,
};
module_platform_driver(exynos_rng_driver);
diff --git a/drivers/crypto/gemini/sl3516-ce-core.c b/drivers/crypto/gemini/sl3516-ce-core.c
index 1d1a889599bb..f7e0e3fea15c 100644
--- a/drivers/crypto/gemini/sl3516-ce-core.c
+++ b/drivers/crypto/gemini/sl3516-ce-core.c
@@ -528,7 +528,7 @@ MODULE_DEVICE_TABLE(of, sl3516_ce_crypto_of_match_table);
static struct platform_driver sl3516_ce_driver = {
.probe = sl3516_ce_probe,
- .remove_new = sl3516_ce_remove,
+ .remove = sl3516_ce_remove,
.driver = {
.name = "sl3516-crypto",
.pm = &sl3516_ce_pm_ops,
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index fa5a9f207bc9..d933f26aeb3a 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -433,4 +433,4 @@ module_pci_driver(geode_aes_driver);
MODULE_AUTHOR("Advanced Micro Devices, Inc.");
MODULE_DESCRIPTION("Geode LX Hardware AES driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
index 1b9b7bccdeff..45e130b901eb 100644
--- a/drivers/crypto/hisilicon/debugfs.c
+++ b/drivers/crypto/hisilicon/debugfs.c
@@ -192,7 +192,7 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
down_read(&qm->qps_lock);
if (qm->sqc) {
- memcpy(&sqc, qm->sqc + qp_id * sizeof(struct qm_sqc), sizeof(struct qm_sqc));
+ memcpy(&sqc, qm->sqc + qp_id, sizeof(struct qm_sqc));
sqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
sqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
dump_show(qm, &sqc, sizeof(struct qm_sqc), "SOFT SQC");
@@ -229,7 +229,7 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
down_read(&qm->qps_lock);
if (qm->cqc) {
- memcpy(&cqc, qm->cqc + qp_id * sizeof(struct qm_cqc), sizeof(struct qm_cqc));
+ memcpy(&cqc, qm->cqc + qp_id, sizeof(struct qm_cqc));
cqc.base_h = cpu_to_le32(QM_XQC_ADDR_MASK);
cqc.base_l = cpu_to_le32(QM_XQC_ADDR_MASK);
dump_show(qm, &cqc, sizeof(struct qm_cqc), "SOFT CQC");
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 9f0b94c8e03d..0f3ddbadbcf9 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -100,6 +100,29 @@ struct hpre_sqe {
__le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
};
+enum hpre_cap_table_type {
+ QM_RAS_NFE_TYPE = 0x0,
+ QM_RAS_NFE_RESET,
+ QM_RAS_CE_TYPE,
+ HPRE_RAS_NFE_TYPE,
+ HPRE_RAS_NFE_RESET,
+ HPRE_RAS_CE_TYPE,
+ HPRE_CORE_INFO,
+ HPRE_CORE_EN,
+ HPRE_DRV_ALG_BITMAP,
+ HPRE_ALG_BITMAP,
+ HPRE_CORE1_BITMAP_CAP,
+ HPRE_CORE2_BITMAP_CAP,
+ HPRE_CORE3_BITMAP_CAP,
+ HPRE_CORE4_BITMAP_CAP,
+ HPRE_CORE5_BITMAP_CAP,
+ HPRE_CORE6_BITMAP_CAP,
+ HPRE_CORE7_BITMAP_CAP,
+ HPRE_CORE8_BITMAP_CAP,
+ HPRE_CORE9_BITMAP_CAP,
+ HPRE_CORE10_BITMAP_CAP,
+};
+
struct hisi_qp *hpre_create_qp(u8 type);
int hpre_algs_register(struct hisi_qm *qm);
void hpre_algs_unregister(struct hisi_qm *qm);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index c167dbd6c7d6..2a2910261210 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -2006,8 +2006,6 @@ static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
}
static struct akcipher_alg rsa = {
- .sign = hpre_rsa_dec,
- .verify = hpre_rsa_enc,
.encrypt = hpre_rsa_enc,
.decrypt = hpre_rsa_dec,
.set_pub_key = hpre_rsa_setpubkey,
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 6b536ad2ada5..f5b47e5ff48a 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -13,6 +13,7 @@
#include <linux/uacce.h>
#include "hpre.h"
+#define CAP_FILE_PERMISSION 0444
#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
#define HPRE_CTRL_CNT_CLR_CE 0x301000
#define HPRE_FSM_MAX_CNT 0x301008
@@ -203,7 +204,7 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
{HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFC3E},
{HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFC3E},
{HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
- {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
+ {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
{HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2},
{HPRE_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA},
{HPRE_CLUSTER_CORE_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA},
@@ -222,18 +223,27 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
{HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
};
-enum hpre_pre_store_cap_idx {
- HPRE_CLUSTER_NUM_CAP_IDX = 0x0,
- HPRE_CORE_ENABLE_BITMAP_CAP_IDX,
- HPRE_DRV_ALG_BITMAP_CAP_IDX,
- HPRE_DEV_ALG_BITMAP_CAP_IDX,
-};
-
-static const u32 hpre_pre_store_caps[] = {
- HPRE_CLUSTER_NUM_CAP,
- HPRE_CORE_ENABLE_BITMAP_CAP,
- HPRE_DRV_ALG_BITMAP_CAP,
- HPRE_DEV_ALG_BITMAP_CAP,
+static const struct hisi_qm_cap_query_info hpre_cap_query_info[] = {
+ {QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C37, 0x7C37},
+ {QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC77, 0x6C77},
+ {QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8},
+ {HPRE_RAS_NFE_TYPE, "HPRE_RAS_NFE_TYPE ", 0x3130, 0x0, 0x3FFFFE, 0x1FFFC3E},
+ {HPRE_RAS_NFE_RESET, "HPRE_RAS_NFE_RESET ", 0x3134, 0x0, 0x3FFFFE, 0xBFFC3E},
+ {HPRE_RAS_CE_TYPE, "HPRE_RAS_CE_TYPE ", 0x3138, 0x0, 0x1, 0x1},
+ {HPRE_CORE_INFO, "HPRE_CORE_INFO ", 0x313c, 0x0, 0x420802, 0x120A0A},
+ {HPRE_CORE_EN, "HPRE_CORE_EN ", 0x3140, 0x0, 0xF, 0x3FF},
+ {HPRE_DRV_ALG_BITMAP, "HPRE_DRV_ALG_BITMAP ", 0x3144, 0x0, 0x03, 0x27},
+ {HPRE_ALG_BITMAP, "HPRE_ALG_BITMAP ", 0x3148, 0x0, 0x03, 0x7F},
+ {HPRE_CORE1_BITMAP_CAP, "HPRE_CORE1_BITMAP_CAP ", 0x314c, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE2_BITMAP_CAP, "HPRE_CORE2_BITMAP_CAP ", 0x3150, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE3_BITMAP_CAP, "HPRE_CORE3_BITMAP_CAP ", 0x3154, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE4_BITMAP_CAP, "HPRE_CORE4_BITMAP_CAP ", 0x3158, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE5_BITMAP_CAP, "HPRE_CORE5_BITMAP_CAP ", 0x315c, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE6_BITMAP_CAP, "HPRE_CORE6_BITMAP_CAP ", 0x3160, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE7_BITMAP_CAP, "HPRE_CORE7_BITMAP_CAP ", 0x3164, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE8_BITMAP_CAP, "HPRE_CORE8_BITMAP_CAP ", 0x3168, 0x0, 0x7F, 0x7F},
+ {HPRE_CORE9_BITMAP_CAP, "HPRE_CORE9_BITMAP_CAP ", 0x316c, 0x0, 0x10, 0x10},
+ {HPRE_CORE10_BITMAP_CAP, "HPRE_CORE10_BITMAP_CAP ", 0x3170, 0x0, 0x10, 0x10},
};
static const struct hpre_hw_error hpre_hw_errors[] = {
@@ -360,7 +370,7 @@ bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
{
u32 cap_val;
- cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP_CAP_IDX].cap_val;
+ cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP].cap_val;
if (alg & cap_val)
return true;
@@ -415,7 +425,7 @@ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
pf_q_num_flag = true;
- return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
+ return hisi_qm_q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
}
static const struct kernel_param_ops hpre_pf_q_num_ops = {
@@ -503,14 +513,17 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
static int hpre_set_cluster(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- unsigned long offset;
u32 cluster_core_mask;
+ unsigned long offset;
+ u32 hpre_core_info;
u8 clusters_num;
u32 val = 0;
int ret, i;
- cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_ENABLE_BITMAP_CAP_IDX].cap_val;
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_EN].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
offset = i * HPRE_CLSTR_ADDR_INTRVL;
@@ -593,6 +606,9 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm)
static void hpre_enable_clock_gate(struct hisi_qm *qm)
{
+ unsigned long offset;
+ u8 clusters_num, i;
+ u32 hpre_core_info;
u32 val;
if (qm->ver < QM_HW_V3)
@@ -606,17 +622,26 @@ static void hpre_enable_clock_gate(struct hisi_qm *qm)
val |= HPRE_PEH_CFG_AUTO_GATE_EN;
writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
- val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
- val |= HPRE_CLUSTER_DYN_CTL_EN;
- writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
-
- val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
- val |= HPRE_CORE_GATE_EN;
- writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
+ for (i = 0; i < clusters_num; i++) {
+ offset = (unsigned long)i * HPRE_CLSTR_ADDR_INTRVL;
+ val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
+ val |= HPRE_CLUSTER_DYN_CTL_EN;
+ writel(val, qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
+
+ val = readl(qm->io_base + offset + HPRE_CORE_SHB_CFG);
+ val |= HPRE_CORE_GATE_EN;
+ writel(val, qm->io_base + offset + HPRE_CORE_SHB_CFG);
+ }
}
static void hpre_disable_clock_gate(struct hisi_qm *qm)
{
+ unsigned long offset;
+ u8 clusters_num, i;
+ u32 hpre_core_info;
u32 val;
if (qm->ver < QM_HW_V3)
@@ -630,13 +655,19 @@ static void hpre_disable_clock_gate(struct hisi_qm *qm)
val &= ~HPRE_PEH_CFG_AUTO_GATE_EN;
writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
- val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
- val &= ~HPRE_CLUSTER_DYN_CTL_EN;
- writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
-
- val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
- val &= ~HPRE_CORE_GATE_EN;
- writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
+ for (i = 0; i < clusters_num; i++) {
+ offset = (unsigned long)i * HPRE_CLSTR_ADDR_INTRVL;
+ val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
+ val &= ~HPRE_CLUSTER_DYN_CTL_EN;
+ writel(val, qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
+
+ val = readl(qm->io_base + offset + HPRE_CORE_SHB_CFG);
+ val &= ~HPRE_CORE_GATE_EN;
+ writel(val, qm->io_base + offset + HPRE_CORE_SHB_CFG);
+ }
}
static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
@@ -699,11 +730,14 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
static void hpre_cnt_regs_clear(struct hisi_qm *qm)
{
unsigned long offset;
+ u32 hpre_core_info;
u8 clusters_num;
int i;
/* clear clusterX/cluster_ctrl */
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
@@ -995,10 +1029,13 @@ static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
+ u32 hpre_core_info;
u8 clusters_num;
int i, ret;
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
if (ret >= HPRE_DBGFS_VAL_MAX_LEN)
@@ -1041,6 +1078,26 @@ static int hpre_ctrl_debug_init(struct hisi_qm *qm)
return hpre_cluster_debugfs_init(qm);
}
+static int hpre_cap_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+ u32 i, size;
+
+ size = qm->cap_tables.qm_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name,
+ qm->cap_tables.qm_cap_table[i].cap_val);
+
+ size = qm->cap_tables.dev_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name,
+ qm->cap_tables.dev_cap_table[i].cap_val);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(hpre_cap_regs);
+
static void hpre_dfx_debug_init(struct hisi_qm *qm)
{
struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs;
@@ -1059,6 +1116,9 @@ static void hpre_dfx_debug_init(struct hisi_qm *qm)
if (qm->fun_type == QM_HW_PF && hpre_regs)
debugfs_create_file("diff_regs", 0444, parent,
qm, &hpre_diff_regs_fops);
+
+ debugfs_create_file("cap_regs", CAP_FILE_PERMISSION,
+ qm->debug.debug_root, qm, &hpre_cap_regs_fops);
}
static int hpre_debugfs_init(struct hisi_qm *qm)
@@ -1106,26 +1166,33 @@ static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
{
struct hisi_qm_cap_record *hpre_cap;
struct device *dev = &qm->pdev->dev;
+ u32 hpre_core_info;
+ u8 clusters_num;
size_t i, size;
- size = ARRAY_SIZE(hpre_pre_store_caps);
+ size = ARRAY_SIZE(hpre_cap_query_info);
hpre_cap = devm_kzalloc(dev, sizeof(*hpre_cap) * size, GFP_KERNEL);
if (!hpre_cap)
return -ENOMEM;
for (i = 0; i < size; i++) {
- hpre_cap[i].type = hpre_pre_store_caps[i];
- hpre_cap[i].cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info,
- hpre_pre_store_caps[i], qm->cap_ver);
+ hpre_cap[i].type = hpre_cap_query_info[i].type;
+ hpre_cap[i].name = hpre_cap_query_info[i].name;
+ hpre_cap[i].cap_val = hisi_qm_get_cap_value(qm, hpre_cap_query_info,
+ i, qm->cap_ver);
}
- if (hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val > HPRE_CLUSTERS_NUM_MAX) {
+ hpre_core_info = hpre_cap[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
+ if (clusters_num > HPRE_CLUSTERS_NUM_MAX) {
dev_err(dev, "Device cluster num %u is out of range for driver supports %d!\n",
- hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val, HPRE_CLUSTERS_NUM_MAX);
+ clusters_num, HPRE_CLUSTERS_NUM_MAX);
return -EINVAL;
}
qm->cap_tables.dev_cap_table = hpre_cap;
+ qm->cap_tables.dev_cap_size = size;
return 0;
}
@@ -1142,7 +1209,6 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->mode = uacce_mode;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->sqe_size = HPRE_SQE_SIZE;
qm->dev_name = hpre_name;
@@ -1172,7 +1238,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return ret;
}
- alg_msk = qm->cap_tables.dev_cap_table[HPRE_DEV_ALG_BITMAP_CAP_IDX].cap_val;
+ alg_msk = qm->cap_tables.dev_cap_table[HPRE_ALG_BITMAP].cap_val;
ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs));
if (ret) {
pci_err(pdev, "Failed to set hpre algs!\n");
@@ -1188,10 +1254,13 @@ static int hpre_show_last_regs_init(struct hisi_qm *qm)
int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
struct qm_debug *debug = &qm->debug;
void __iomem *io_base;
+ u32 hpre_core_info;
u8 clusters_num;
int i, j, idx;
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num +
com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
if (!debug->last_words)
@@ -1231,6 +1300,7 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
struct qm_debug *debug = &qm->debug;
struct pci_dev *pdev = qm->pdev;
void __iomem *io_base;
+ u32 hpre_core_info;
u8 clusters_num;
int i, j, idx;
u32 val;
@@ -1246,7 +1316,9 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
hpre_com_dfx_regs[i].name, debug->last_words[i], val);
}
- clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
+ hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
+ clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
+ hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
io_base = qm->io_base + hpre_cluster_offsets[i];
for (j = 0; j < cluster_dfx_regs_num; j++) {
@@ -1280,11 +1352,15 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
- u32 nfe;
-
writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
- nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
- writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
+}
+
+static void hpre_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ u32 nfe_mask;
+
+ nfe_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe_mask & (~err_type), qm->io_base + HPRE_RAS_NFE_ENB);
}
static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
@@ -1298,6 +1374,38 @@ static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
+static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hpre_get_hw_err_status(qm);
+ if (err_status) {
+ if (err_status & qm->err_info.ecc_2bits_mask)
+ qm->err_status.is_dev_ecc_mbit = true;
+ hpre_log_hw_error(qm, err_status);
+
+ if (err_status & qm->err_info.dev_reset_mask) {
+ /* Disable the same error reporting until device is recovered. */
+ hpre_disable_error_report(qm, err_status);
+ return ACC_ERR_NEED_RESET;
+ }
+ hpre_clear_hw_err_status(qm, err_status);
+ }
+
+ return ACC_ERR_RECOVERED;
+}
+
+static bool hpre_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hpre_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_shutdown_mask)
+ return true;
+
+ return false;
+}
+
static void hpre_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -1324,12 +1432,13 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.hw_err_disable = hpre_hw_error_disable,
.get_dev_hw_err_status = hpre_get_hw_err_status,
.clear_dev_hw_err_status = hpre_clear_hw_err_status,
- .log_dev_hw_err = hpre_log_hw_error,
.open_axi_master_ooo = hpre_open_axi_master_ooo,
.open_sva_prefetch = hpre_open_sva_prefetch,
.close_sva_prefetch = hpre_close_sva_prefetch,
.show_last_dfx_regs = hpre_show_last_dfx_regs,
.err_info_init = hpre_err_info_init,
+ .get_err_result = hpre_get_err_result,
+ .dev_is_abnormal = hpre_dev_is_abnormal,
};
static int hpre_pf_probe_init(struct hpre *hpre)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 07983af9e3e2..d3f5d108b898 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -30,8 +30,6 @@
/* mailbox */
#define QM_MB_PING_ALL_VFS 0xffff
-#define QM_MB_CMD_DATA_SHIFT 32
-#define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
#define QM_MB_STATUS_MASK GENMASK(12, 9)
/* sqc shift */
@@ -102,6 +100,8 @@
#define QM_PM_CTRL 0x100148
#define QM_IDLE_DISABLE BIT(9)
+#define QM_SUB_VERSION_ID 0x210
+
#define QM_VFT_CFG_DATA_L 0x100064
#define QM_VFT_CFG_DATA_H 0x100068
#define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
@@ -119,6 +119,7 @@
#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
#define QM_SQC_VFT_NUM_SHIFT_V2 45
#define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0)
+#define QM_MAX_QC_TYPE 2
#define QM_ABNORMAL_INT_SOURCE 0x100000
#define QM_ABNORMAL_INT_MASK 0x100004
@@ -176,6 +177,10 @@
#define QM_IFC_INT_MASK 0x0024
#define QM_IFC_INT_STATUS 0x0028
#define QM_IFC_INT_SET_V 0x002C
+#define QM_PF2VF_PF_W 0x104700
+#define QM_VF2PF_PF_R 0x104800
+#define QM_VF2PF_VF_W 0x320
+#define QM_PF2VF_VF_R 0x380
#define QM_IFC_SEND_ALL_VFS GENMASK(6, 0)
#define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0)
#define QM_IFC_INT_SOURCE_MASK BIT(0)
@@ -185,8 +190,11 @@
#define QM_WAIT_DST_ACK 10
#define QM_MAX_PF_WAIT_COUNT 10
#define QM_MAX_VF_WAIT_COUNT 40
-#define QM_VF_RESET_WAIT_US 20000
-#define QM_VF_RESET_WAIT_CNT 3000
+#define QM_VF_RESET_WAIT_US 20000
+#define QM_VF_RESET_WAIT_CNT 3000
+#define QM_VF2PF_REG_SIZE 4
+#define QM_IFC_CMD_MASK GENMASK(31, 0)
+#define QM_IFC_DATA_SHIFT 32
#define QM_VF_RESET_WAIT_TIMEOUT_US \
(QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
@@ -234,8 +242,6 @@
#define QM_QOS_MAX_CIR_U 6
#define QM_AUTOSUSPEND_DELAY 3000
-#define QM_DEV_ALG_MAX_LEN 256
-
/* abnormal status value for stopping queue */
#define QM_STOP_QUEUE_FAIL 1
#define QM_DUMP_SQC_FAIL 3
@@ -271,18 +277,12 @@ enum vft_type {
SHAPER_VFT,
};
-enum acc_err_result {
- ACC_ERR_NONE,
- ACC_ERR_NEED_RESET,
- ACC_ERR_RECOVERED,
-};
-
enum qm_alg_type {
ALG_TYPE_0,
ALG_TYPE_1,
};
-enum qm_mb_cmd {
+enum qm_ifc_cmd {
QM_PF_FLR_PREPARE = 0x01,
QM_PF_SRST_PREPARE,
QM_PF_RESET_DONE,
@@ -307,11 +307,29 @@ enum qm_basic_type {
QM_VF_IRQ_NUM_CAP,
};
-enum qm_pre_store_cap_idx {
- QM_EQ_IRQ_TYPE_CAP_IDX = 0x0,
- QM_AEQ_IRQ_TYPE_CAP_IDX,
- QM_ABN_IRQ_TYPE_CAP_IDX,
- QM_PF2VF_IRQ_TYPE_CAP_IDX,
+enum qm_cap_table_type {
+ QM_CAP_VF = 0x0,
+ QM_AEQE_NUM,
+ QM_SCQE_NUM,
+ QM_EQ_IRQ,
+ QM_AEQ_IRQ,
+ QM_ABNORMAL_IRQ,
+ QM_MB_IRQ,
+ MAX_IRQ_NUM,
+ EXT_BAR_INDEX,
+};
+
+static const struct hisi_qm_cap_query_info qm_cap_query_info[] = {
+ {QM_CAP_VF, "QM_CAP_VF ", 0x3100, 0x0, 0x0, 0x6F01},
+ {QM_AEQE_NUM, "QM_AEQE_NUM ", 0x3104, 0x800, 0x4000800, 0x4000800},
+ {QM_SCQE_NUM, "QM_SCQE_NUM ",
+ 0x3108, 0x4000400, 0x4000400, 0x4000400},
+ {QM_EQ_IRQ, "QM_EQ_IRQ ", 0x310c, 0x10000, 0x10000, 0x10000},
+ {QM_AEQ_IRQ, "QM_AEQ_IRQ ", 0x3110, 0x0, 0x10001, 0x10001},
+ {QM_ABNORMAL_IRQ, "QM_ABNORMAL_IRQ ", 0x3114, 0x0, 0x10003, 0x10003},
+ {QM_MB_IRQ, "QM_MB_IRQ ", 0x3118, 0x0, 0x0, 0x10002},
+ {MAX_IRQ_NUM, "MAX_IRQ_NUM ", 0x311c, 0x10001, 0x40002, 0x40003},
+ {EXT_BAR_INDEX, "EXT_BAR_INDEX ", 0x3120, 0x0, 0x0, 0x14},
};
static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
@@ -321,6 +339,7 @@ static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
{QM_SUPPORT_STOP_FUNC, 0x3100, 0, BIT(10), 0x0, 0x0, 0x1},
{QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
{QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_DAE, 0x3100, 0, BIT(15), 0x0, 0x0, 0x0},
};
static const struct hisi_qm_cap_info qm_cap_info_pf[] = {
@@ -344,13 +363,6 @@ static const struct hisi_qm_cap_info qm_basic_info[] = {
{QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3},
};
-static const u32 qm_pre_store_caps[] = {
- QM_EQ_IRQ_TYPE_CAP,
- QM_AEQ_IRQ_TYPE_CAP,
- QM_ABN_IRQ_TYPE_CAP,
- QM_PF2VF_IRQ_TYPE_CAP,
-};
-
struct qm_mailbox {
__le16 w0;
__le16 queue_num;
@@ -391,6 +403,11 @@ struct hisi_qm_hw_ops {
void (*hw_error_uninit)(struct hisi_qm *qm);
enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
int (*set_msi)(struct hisi_qm *qm, bool set);
+
+ /* (u64)msg = (u32)data << 32 | (enum qm_ifc_cmd)cmd */
+ int (*set_ifc_begin)(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num);
+ void (*set_ifc_end)(struct hisi_qm *qm);
+ int (*get_ifc)(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num);
};
struct hisi_qm_hw_error {
@@ -451,6 +468,37 @@ static struct qm_typical_qos_table shaper_cbs_s[] = {
static void qm_irqs_unregister(struct hisi_qm *qm);
static int qm_reset_device(struct hisi_qm *qm);
+int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp,
+ unsigned int device)
+{
+ struct pci_dev *pdev;
+ u32 n, q_num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL);
+ if (!pdev) {
+ q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
+ pr_info("No device found currently, suppose queue number is %u\n",
+ q_num);
+ } else {
+ if (pdev->revision == QM_HW_V1)
+ q_num = QM_QNUM_V1;
+ else
+ q_num = QM_QNUM_V2;
+
+ pci_dev_put(pdev);
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret || n < QM_MIN_QNUM || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_q_num_set);
static u32 qm_get_hw_error_status(struct hisi_qm *qm)
{
@@ -465,15 +513,20 @@ static u32 qm_get_dev_err_status(struct hisi_qm *qm)
/* Check if the error causes the master ooo block */
static bool qm_check_dev_error(struct hisi_qm *qm)
{
- u32 val, dev_val;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
+ u32 err_status;
- if (qm->fun_type == QM_HW_VF)
+ if (pf_qm->fun_type == QM_HW_VF)
return false;
- val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask;
- dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask;
+ err_status = qm_get_hw_error_status(pf_qm);
+ if (err_status & pf_qm->err_info.qm_shutdown_mask)
+ return true;
+
+ if (pf_qm->err_ini->dev_is_abnormal)
+ return pf_qm->err_ini->dev_is_abnormal(pf_qm);
- return val || dev_val;
+ return false;
}
static int qm_wait_reset_finish(struct hisi_qm *qm)
@@ -618,7 +671,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_mb);
/* op 0: set xqc information to hardware, 1: get xqc information from hardware. */
int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op)
{
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
struct qm_mailbox mailbox;
dma_addr_t xqc_dma;
void *tmp_xqc;
@@ -652,7 +704,7 @@ int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op
}
/* Setting xqc will fail if master OOO is blocked. */
- if (qm_check_dev_error(pf_qm)) {
+ if (qm_check_dev_error(qm)) {
dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n");
return -EIO;
}
@@ -763,6 +815,27 @@ u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
}
EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info);
+u32 hisi_qm_get_cap_value(struct hisi_qm *qm,
+ const struct hisi_qm_cap_query_info *info_table,
+ u32 index, bool is_read)
+{
+ u32 val;
+
+ switch (qm->ver) {
+ case QM_HW_V1:
+ return info_table[index].v1_val;
+ case QM_HW_V2:
+ return info_table[index].v2_val;
+ default:
+ if (!is_read)
+ return info_table[index].v3_val;
+
+ val = readl(qm->io_base + info_table[index].offset);
+ return val;
+ }
+}
+EXPORT_SYMBOL_GPL(hisi_qm_get_cap_value);
+
static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
u16 *high_bits, enum qm_basic_type type)
{
@@ -798,10 +871,10 @@ int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *d
strcat(algs, dev_algs[i].alg);
ptr = strrchr(algs, '\n');
- if (ptr) {
+ if (ptr)
*ptr = '\0';
- qm->uacce->algs = algs;
- }
+
+ qm->uacce->algs = algs;
return 0;
}
@@ -995,11 +1068,10 @@ static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
static void qm_reset_function(struct hisi_qm *qm)
{
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
struct device *dev = &qm->pdev->dev;
int ret;
- if (qm_check_dev_error(pf_qm))
+ if (qm_check_dev_error(qm))
return;
ret = qm_reset_prepare_ready(qm);
@@ -1425,22 +1497,25 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
{
- u32 error_status, tmp;
+ u32 error_status;
- /* read err sts */
- tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
- error_status = qm->error_mask & tmp;
-
- if (error_status) {
+ error_status = qm_get_hw_error_status(qm);
+ if (error_status & qm->error_mask) {
if (error_status & QM_ECC_MBIT)
qm->err_status.is_qm_ecc_mbit = true;
qm_log_hw_error(qm, error_status);
- if (error_status & qm->err_info.qm_reset_mask)
+ if (error_status & qm->err_info.qm_reset_mask) {
+ /* Disable the same error reporting until device is recovered. */
+ writel(qm->err_info.nfe & (~error_status),
+ qm->io_base + QM_RAS_NFE_ENABLE);
return ACC_ERR_NEED_RESET;
+ }
+ /* Clear error source if not need reset. */
writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(qm->err_info.ce, qm->io_base + QM_RAS_CE_ENABLE);
}
return ACC_ERR_RECOVERED;
@@ -1480,17 +1555,15 @@ static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
{
struct device *dev = &qm->pdev->dev;
- u32 cmd;
- u64 msg;
+ enum qm_ifc_cmd cmd;
int ret;
- ret = qm_get_mb_cmd(qm, &msg, vf_id);
+ ret = qm->ops->get_ifc(qm, &cmd, NULL, vf_id);
if (ret) {
- dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id);
+ dev_err(dev, "failed to get command from VF(%u)!\n", vf_id);
return;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
switch (cmd) {
case QM_VF_PREPARE_FAIL:
dev_err(dev, "failed to stop VF(%u)!\n", vf_id);
@@ -1502,7 +1575,7 @@ static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
case QM_VF_START_DONE:
break;
default:
- dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id);
+ dev_err(dev, "unsupported command(0x%x) sent by VF(%u)!\n", cmd, vf_id);
break;
}
}
@@ -1570,17 +1643,14 @@ static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
writel(val, qm->io_base + QM_IFC_INT_SET_V);
}
-static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
+static int qm_ping_single_vf(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- struct qm_mailbox mailbox;
int cnt = 0;
u64 val;
int ret;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0);
- mutex_lock(&qm->mailbox_lock);
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, data, fun_num);
if (ret) {
dev_err(dev, "failed to send command to vf(%u)!\n", fun_num);
goto err_unlock;
@@ -1602,27 +1672,23 @@ static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
}
err_unlock:
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
return ret;
}
-static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
+static int qm_ping_all_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
struct device *dev = &qm->pdev->dev;
u32 vfs_num = qm->vfs_num;
- struct qm_mailbox mailbox;
u64 val = 0;
int cnt = 0;
int ret;
u32 i;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0);
- mutex_lock(&qm->mailbox_lock);
- /* PF sends command to all VFs by mailbox */
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, 0, QM_MB_PING_ALL_VFS);
if (ret) {
- dev_err(dev, "failed to send command to VFs!\n");
- mutex_unlock(&qm->mailbox_lock);
+ dev_err(dev, "failed to send command(0x%x) to all vfs!\n", cmd);
+ qm->ops->set_ifc_end(qm);
return ret;
}
@@ -1632,7 +1698,7 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
val = readq(qm->io_base + QM_IFC_READY_STATUS);
/* If all VFs acked, PF notifies VFs successfully. */
if (!(val & GENMASK(vfs_num, 1))) {
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
return 0;
}
@@ -1640,7 +1706,7 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
break;
}
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
/* Check which vf respond timeout. */
for (i = 1; i <= vfs_num; i++) {
@@ -1651,18 +1717,15 @@ static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
return -ETIMEDOUT;
}
-static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
+static int qm_ping_pf(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
- struct qm_mailbox mailbox;
int cnt = 0;
u32 val;
int ret;
- qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0);
- mutex_lock(&qm->mailbox_lock);
- ret = qm_mb_nolock(qm, &mailbox);
+ ret = qm->ops->set_ifc_begin(qm, cmd, 0, 0);
if (ret) {
- dev_err(&qm->pdev->dev, "failed to send command to PF!\n");
+ dev_err(&qm->pdev->dev, "failed to send command(0x%x) to PF!\n", cmd);
goto unlock;
}
@@ -1681,7 +1744,8 @@ static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
}
unlock:
- mutex_unlock(&qm->mailbox_lock);
+ qm->ops->set_ifc_end(qm);
+
return ret;
}
@@ -1782,6 +1846,94 @@ static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
return ret;
}
+static int qm_set_ifc_begin_v3(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
+{
+ struct qm_mailbox mailbox;
+ u64 msg;
+
+ msg = cmd | (u64)data << QM_IFC_DATA_SHIFT;
+
+ qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, msg, fun_num, 0);
+ mutex_lock(&qm->mailbox_lock);
+ return qm_mb_nolock(qm, &mailbox);
+}
+
+static void qm_set_ifc_end_v3(struct hisi_qm *qm)
+{
+ mutex_unlock(&qm->mailbox_lock);
+}
+
+static int qm_get_ifc_v3(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num)
+{
+ u64 msg;
+ int ret;
+
+ ret = qm_get_mb_cmd(qm, &msg, fun_num);
+ if (ret)
+ return ret;
+
+ *cmd = msg & QM_IFC_CMD_MASK;
+
+ if (data)
+ *data = msg >> QM_IFC_DATA_SHIFT;
+
+ return 0;
+}
+
+static int qm_set_ifc_begin_v4(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
+{
+ uintptr_t offset;
+ u64 msg;
+
+ if (qm->fun_type == QM_HW_PF)
+ offset = QM_PF2VF_PF_W;
+ else
+ offset = QM_VF2PF_VF_W;
+
+ msg = cmd | (u64)data << QM_IFC_DATA_SHIFT;
+
+ mutex_lock(&qm->ifc_lock);
+ writeq(msg, qm->io_base + offset);
+
+ return 0;
+}
+
+static void qm_set_ifc_end_v4(struct hisi_qm *qm)
+{
+ mutex_unlock(&qm->ifc_lock);
+}
+
+static u64 qm_get_ifc_pf(struct hisi_qm *qm, u32 fun_num)
+{
+ uintptr_t offset;
+
+ offset = QM_VF2PF_PF_R + QM_VF2PF_REG_SIZE * fun_num;
+
+ return (u64)readl(qm->io_base + offset);
+}
+
+static u64 qm_get_ifc_vf(struct hisi_qm *qm)
+{
+ return readq(qm->io_base + QM_PF2VF_VF_R);
+}
+
+static int qm_get_ifc_v4(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num)
+{
+ u64 msg;
+
+ if (qm->fun_type == QM_HW_PF)
+ msg = qm_get_ifc_pf(qm, fun_num);
+ else
+ msg = qm_get_ifc_vf(qm);
+
+ *cmd = msg & QM_IFC_CMD_MASK;
+
+ if (data)
+ *data = msg >> QM_IFC_DATA_SHIFT;
+
+ return 0;
+}
+
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
.qm_db = qm_db_v1,
.hw_error_init = qm_hw_error_init_v1,
@@ -1804,6 +1956,21 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
.hw_error_uninit = qm_hw_error_uninit_v3,
.hw_error_handle = qm_hw_error_handle_v2,
.set_msi = qm_set_msi_v3,
+ .set_ifc_begin = qm_set_ifc_begin_v3,
+ .set_ifc_end = qm_set_ifc_end_v3,
+ .get_ifc = qm_get_ifc_v3,
+};
+
+static const struct hisi_qm_hw_ops qm_hw_ops_v4 = {
+ .get_vft = qm_get_vft_v2,
+ .qm_db = qm_db_v2,
+ .hw_error_init = qm_hw_error_init_v3,
+ .hw_error_uninit = qm_hw_error_uninit_v3,
+ .hw_error_handle = qm_hw_error_handle_v2,
+ .set_msi = qm_set_msi_v3,
+ .set_ifc_begin = qm_set_ifc_begin_v4,
+ .set_ifc_end = qm_set_ifc_end_v4,
+ .get_ifc = qm_get_ifc_v4,
};
static void *qm_get_avail_sqe(struct hisi_qp *qp)
@@ -2096,12 +2263,11 @@ static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id)
static int qm_drain_qp(struct hisi_qp *qp)
{
struct hisi_qm *qm = qp->qm;
- struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
u32 state = 0;
int ret;
/* No need to judge if master OOO is blocked. */
- if (qm_check_dev_error(pf_qm))
+ if (qm_check_dev_error(qm))
return 0;
/* HW V3 supports drain qp by device */
@@ -2415,7 +2581,7 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
sizeof(struct hisi_qp_ctx)))
return -EFAULT;
- if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
+ if (qp_ctx.qc_type > QM_MAX_QC_TYPE)
return -EINVAL;
qm_set_sqctype(q, qp_ctx.qc_type);
@@ -2783,11 +2949,14 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
qm->ops = &qm_hw_ops_v1;
else if (qm->ver == QM_HW_V2)
qm->ops = &qm_hw_ops_v2;
- else
+ else if (qm->ver == QM_HW_V3)
qm->ops = &qm_hw_ops_v3;
+ else
+ qm->ops = &qm_hw_ops_v4;
pci_set_drvdata(pdev, qm);
mutex_init(&qm->mailbox_lock);
+ mutex_init(&qm->ifc_lock);
init_rwsem(&qm->qps_lock);
qm->qp_in_used = 0;
if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
@@ -3547,7 +3716,6 @@ static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- u64 mb_cmd;
u32 qos;
int ret;
@@ -3557,10 +3725,9 @@ static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
return;
}
- mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT;
- ret = qm_ping_single_vf(qm, mb_cmd, fun_num);
+ ret = qm_ping_single_vf(qm, QM_PF_SET_QOS, qos, fun_num);
if (ret)
- dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num);
+ dev_err(dev, "failed to send command(0x%x) to VF(%u)!\n", QM_PF_SET_QOS, fun_num);
}
static int qm_vf_read_qos(struct hisi_qm *qm)
@@ -3861,30 +4028,12 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
{
- u32 err_sts;
-
- if (!qm->err_ini->get_dev_hw_err_status) {
- dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
+ if (!qm->err_ini->get_err_result) {
+ dev_err(&qm->pdev->dev, "Device doesn't support reset!\n");
return ACC_ERR_NONE;
}
- /* get device hardware error status */
- err_sts = qm->err_ini->get_dev_hw_err_status(qm);
- if (err_sts) {
- if (err_sts & qm->err_info.ecc_2bits_mask)
- qm->err_status.is_dev_ecc_mbit = true;
-
- if (qm->err_ini->log_dev_hw_err)
- qm->err_ini->log_dev_hw_err(qm, err_sts);
-
- if (err_sts & qm->err_info.dev_reset_mask)
- return ACC_ERR_NEED_RESET;
-
- if (qm->err_ini->clear_dev_hw_err_status)
- qm->err_ini->clear_dev_hw_err_status(qm, err_sts);
- }
-
- return ACC_ERR_RECOVERED;
+ return qm->err_ini->get_err_result(qm);
}
static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
@@ -4067,7 +4216,7 @@ stop_fail:
return ret;
}
-static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
+static int qm_try_stop_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd,
enum qm_stop_reason stop_reason)
{
struct pci_dev *pdev = qm->pdev;
@@ -4080,7 +4229,7 @@ static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
ret = qm_ping_all_vfs(qm, cmd);
if (ret)
- pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
+ pci_err(pdev, "failed to send command to all VFs before PF reset!\n");
} else {
ret = qm_vf_reset_prepare(qm, stop_reason);
if (ret)
@@ -4095,6 +4244,12 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
int ret;
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret)
+ return ret;
+ }
+
ret = qm_reset_prepare_ready(qm);
if (ret) {
pci_err(pdev, "Controller reset not ready!\n");
@@ -4256,7 +4411,7 @@ restart_fail:
return ret;
}
-static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
+static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
{
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4485,7 +4640,7 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
* Check whether there is an ECC mbit error, If it occurs, need to
* wait for soft reset to fix it.
*/
- while (qm_check_dev_error(pf_qm)) {
+ while (qm_check_dev_error(qm)) {
msleep(++delay);
if (delay > QM_RESET_WAIT_TIMEOUT)
return;
@@ -4633,7 +4788,7 @@ static void hisi_qm_controller_reset(struct work_struct *rst_work)
static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
enum qm_stop_reason stop_reason)
{
- enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE;
+ enum qm_ifc_cmd cmd = QM_VF_PREPARE_DONE;
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4667,7 +4822,7 @@ out:
static void qm_pf_reset_vf_done(struct hisi_qm *qm)
{
- enum qm_mb_cmd cmd = QM_VF_START_DONE;
+ enum qm_ifc_cmd cmd = QM_VF_START_DONE;
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -4690,7 +4845,6 @@ static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
u32 val, cmd;
- u64 msg;
int ret;
/* Wait for reset to finish */
@@ -4707,16 +4861,15 @@ static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
* Whether message is got successfully,
* VF needs to ack PF by clearing the interrupt.
*/
- ret = qm_get_mb_cmd(qm, &msg, 0);
+ ret = qm->ops->get_ifc(qm, &cmd, NULL, 0);
qm_clear_cmd_interrupt(qm, 0);
if (ret) {
- dev_err(dev, "failed to get msg from PF in reset done!\n");
+ dev_err(dev, "failed to get command from PF in reset done!\n");
return ret;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
if (cmd != QM_PF_RESET_DONE) {
- dev_err(dev, "the cmd(%u) is not reset done!\n", cmd);
+ dev_err(dev, "the command(0x%x) is not reset done!\n", cmd);
ret = -EINVAL;
}
@@ -4753,22 +4906,21 @@ err_get_status:
static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
{
struct device *dev = &qm->pdev->dev;
- u64 msg;
- u32 cmd;
+ enum qm_ifc_cmd cmd;
+ u32 data;
int ret;
/*
* Get the msg from source by sending mailbox. Whether message is got
* successfully, destination needs to ack source by clearing the interrupt.
*/
- ret = qm_get_mb_cmd(qm, &msg, fun_num);
+ ret = qm->ops->get_ifc(qm, &cmd, &data, fun_num);
qm_clear_cmd_interrupt(qm, BIT(fun_num));
if (ret) {
- dev_err(dev, "failed to get msg from source!\n");
+ dev_err(dev, "failed to get command from source!\n");
return;
}
- cmd = msg & QM_MB_CMD_DATA_MASK;
switch (cmd) {
case QM_PF_FLR_PREPARE:
qm_pf_reset_vf_process(qm, QM_DOWN);
@@ -4780,10 +4932,10 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
qm_vf_get_qos(qm, fun_num);
break;
case QM_PF_SET_QOS:
- qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT;
+ qm->mb_qos = data;
break;
default:
- dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num);
+ dev_err(dev, "unsupported command(0x%x) sent by function(%u)!\n", cmd, fun_num);
break;
}
}
@@ -4866,7 +5018,7 @@ static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
if (qm->fun_type == QM_HW_VF)
return;
- val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
return;
@@ -4883,7 +5035,7 @@ static int qm_register_abnormal_irq(struct hisi_qm *qm)
if (qm->fun_type == QM_HW_VF)
return 0;
- val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
return 0;
@@ -4900,7 +5052,7 @@ static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;
- val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return;
@@ -4914,7 +5066,7 @@ static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;
- val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return 0;
@@ -4931,7 +5083,7 @@ static void qm_unregister_aeq_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;
- val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return;
@@ -4945,7 +5097,7 @@ static int qm_register_aeq_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;
- val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return 0;
@@ -4963,7 +5115,7 @@ static void qm_unregister_eq_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;
- val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return;
@@ -4977,7 +5129,7 @@ static int qm_register_eq_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;
- val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
+ val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
return 0;
@@ -5065,24 +5217,26 @@ static int qm_get_qp_num(struct hisi_qm *qm)
return 0;
}
-static int qm_pre_store_irq_type_caps(struct hisi_qm *qm)
+static int qm_pre_store_caps(struct hisi_qm *qm)
{
struct hisi_qm_cap_record *qm_cap;
struct pci_dev *pdev = qm->pdev;
size_t i, size;
- size = ARRAY_SIZE(qm_pre_store_caps);
+ size = ARRAY_SIZE(qm_cap_query_info);
qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL);
if (!qm_cap)
return -ENOMEM;
for (i = 0; i < size; i++) {
- qm_cap[i].type = qm_pre_store_caps[i];
- qm_cap[i].cap_val = hisi_qm_get_hw_info(qm, qm_basic_info,
- qm_pre_store_caps[i], qm->cap_ver);
+ qm_cap[i].type = qm_cap_query_info[i].type;
+ qm_cap[i].name = qm_cap_query_info[i].name;
+ qm_cap[i].cap_val = hisi_qm_get_cap_value(qm, qm_cap_query_info,
+ i, qm->cap_ver);
}
qm->cap_tables.qm_cap_table = qm_cap;
+ qm->cap_tables.qm_cap_size = size;
return 0;
}
@@ -5119,8 +5273,22 @@ static int qm_get_hw_caps(struct hisi_qm *qm)
set_bit(cap_info[i].type, &qm->caps);
}
- /* Fetch and save the value of irq type related capability registers */
- return qm_pre_store_irq_type_caps(qm);
+ /* Fetch and save the value of qm capability registers */
+ return qm_pre_store_caps(qm);
+}
+
+static void qm_get_version(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 sub_version_id;
+
+ qm->ver = pdev->revision;
+
+ if (pdev->revision == QM_HW_V3) {
+ sub_version_id = readl(qm->io_base + QM_SUB_VERSION_ID);
+ if (sub_version_id)
+ qm->ver = sub_version_id;
+ }
}
static int qm_get_pci_res(struct hisi_qm *qm)
@@ -5142,6 +5310,8 @@ static int qm_get_pci_res(struct hisi_qm *qm)
goto err_request_mem_regions;
}
+ qm_get_version(qm);
+
ret = qm_get_hw_caps(qm);
if (ret)
goto err_ioremap;
@@ -5161,6 +5331,7 @@ static int qm_get_pci_res(struct hisi_qm *qm)
qm->db_interval = 0;
}
+ hisi_qm_pre_init(qm);
ret = qm_get_qp_num(qm);
if (ret)
goto err_db_ioremap;
@@ -5203,6 +5374,14 @@ static int qm_clear_device(struct hisi_qm *qm)
return ret;
}
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret) {
+ writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
+ return ret;
+ }
+ }
+
return qm_reset_device(qm);
}
@@ -5417,8 +5596,6 @@ int hisi_qm_init(struct hisi_qm *qm)
struct device *dev = &pdev->dev;
int ret;
- hisi_qm_pre_init(qm);
-
ret = hisi_qm_pci_init(qm);
if (ret)
return ret;
@@ -5554,6 +5731,12 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm)
if (ret)
return ret;
+ if (qm->err_ini->set_priv_status) {
+ ret = qm->err_ini->set_priv_status(qm);
+ if (ret)
+ return ret;
+ }
+
ret = qm_set_pf_mse(qm, false);
if (ret)
pci_err(pdev, "failed to disable MSE before suspending!\n");
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index 9bafcc5aa404..ef0cb733c92c 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -1304,7 +1304,7 @@ MODULE_DEVICE_TABLE(acpi, sec_acpi_match);
static struct platform_driver sec_driver = {
.probe = sec_probe,
- .remove_new = sec_remove,
+ .remove = sec_remove,
.driver = {
.name = "hisi_sec_platform_driver",
.of_match_table = sec_match,
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 410c83712e28..4b9970230822 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -37,6 +37,7 @@ struct sec_aead_req {
u8 *a_ivin;
dma_addr_t a_ivin_dma;
struct aead_request *aead_req;
+ bool fallback;
};
/* SEC request of Crypto */
@@ -90,9 +91,7 @@ struct sec_auth_ctx {
dma_addr_t a_key_dma;
u8 *a_key;
u8 a_key_len;
- u8 mac_len;
u8 a_alg;
- bool fallback;
struct crypto_shash *hash_tfm;
struct crypto_aead *fallback_aead_tfm;
};
@@ -220,11 +219,27 @@ enum sec_cap_type {
SEC_CORE4_ALG_BITMAP_HIGH,
};
-enum sec_cap_reg_record_idx {
- SEC_DRV_ALG_BITMAP_LOW_IDX = 0x0,
- SEC_DRV_ALG_BITMAP_HIGH_IDX,
- SEC_DEV_ALG_BITMAP_LOW_IDX,
- SEC_DEV_ALG_BITMAP_HIGH_IDX,
+enum sec_cap_table_type {
+ QM_RAS_NFE_TYPE = 0x0,
+ QM_RAS_NFE_RESET,
+ QM_RAS_CE_TYPE,
+ SEC_RAS_NFE_TYPE,
+ SEC_RAS_NFE_RESET,
+ SEC_RAS_CE_TYPE,
+ SEC_CORE_INFO,
+ SEC_CORE_EN,
+ SEC_DRV_ALG_BITMAP_LOW_TB,
+ SEC_DRV_ALG_BITMAP_HIGH_TB,
+ SEC_ALG_BITMAP_LOW,
+ SEC_ALG_BITMAP_HIGH,
+ SEC_CORE1_BITMAP_LOW,
+ SEC_CORE1_BITMAP_HIGH,
+ SEC_CORE2_BITMAP_LOW,
+ SEC_CORE2_BITMAP_HIGH,
+ SEC_CORE3_BITMAP_LOW,
+ SEC_CORE3_BITMAP_HIGH,
+ SEC_CORE4_BITMAP_LOW,
+ SEC_CORE4_BITMAP_HIGH,
};
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 0558f98e221f..66bc07da9eb6 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -948,15 +948,14 @@ static int sec_aead_mac_init(struct sec_aead_req *req)
struct aead_request *aead_req = req->aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
size_t authsize = crypto_aead_authsize(tfm);
- u8 *mac_out = req->out_mac;
struct scatterlist *sgl = aead_req->src;
+ u8 *mac_out = req->out_mac;
size_t copy_size;
off_t skip_size;
/* Copy input mac */
skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
- copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
- authsize, skip_size);
+ copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size);
if (unlikely(copy_size != authsize))
return -EINVAL;
@@ -1120,10 +1119,7 @@ static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- if (unlikely(a_ctx->fallback_aead_tfm))
- return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
-
- return 0;
+ return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
}
static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
@@ -1139,7 +1135,6 @@ static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
const u32 keylen, const enum sec_hash_alg a_alg,
const enum sec_calg c_alg,
- const enum sec_mac_len mac_len,
const enum sec_cmode c_mode)
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
@@ -1151,7 +1146,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
ctx->a_ctx.a_alg = a_alg;
ctx->c_ctx.c_alg = c_alg;
- ctx->a_ctx.mac_len = mac_len;
c_ctx->c_mode = c_mode;
if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
@@ -1162,13 +1156,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
}
memcpy(c_ctx->c_key, key, keylen);
- if (unlikely(a_ctx->fallback_aead_tfm)) {
- ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
- if (ret)
- return ret;
- }
-
- return 0;
+ return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
}
ret = crypto_authenc_extractkeys(&keys, key, keylen);
@@ -1187,10 +1175,15 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
goto bad_key;
}
- if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) ||
- (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
+ if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) {
ret = -EINVAL;
- dev_err(dev, "MAC or AUTH key length error!\n");
+ dev_err(dev, "AUTH key length error!\n");
+ goto bad_key;
+ }
+
+ ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
+ if (ret) {
+ dev_err(dev, "set sec fallback key err!\n");
goto bad_key;
}
@@ -1202,27 +1195,19 @@ bad_key:
}
-#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \
-static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \
- u32 keylen) \
-{ \
- return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
-}
-
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
- SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
- SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
- SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
- SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
-GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
- SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
-GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
- SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
-GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
- SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
+#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode) \
+static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen) \
+{ \
+ return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode); \
+}
+
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM)
static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
@@ -1470,9 +1455,10 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
- struct sec_cipher_req *c_req = &req->c_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+ size_t authsize = crypto_aead_authsize(tfm);
struct sec_aead_req *a_req = &req->aead_req;
- size_t authsize = ctx->a_ctx.mac_len;
+ struct sec_cipher_req *c_req = &req->c_req;
u32 data_size = aead_req->cryptlen;
u8 flage = 0;
u8 cm, cl;
@@ -1513,10 +1499,8 @@ static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
{
struct aead_request *aead_req = req->aead_req.aead_req;
- struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
- size_t authsize = crypto_aead_authsize(tfm);
- struct sec_cipher_req *c_req = &req->c_req;
struct sec_aead_req *a_req = &req->aead_req;
+ struct sec_cipher_req *c_req = &req->c_req;
memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
@@ -1524,15 +1508,11 @@ static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
/*
* CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
* the counter must set to 0x01
+ * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length}
*/
- ctx->a_ctx.mac_len = authsize;
- /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
set_aead_auth_iv(ctx, req);
- }
-
- /* GCM 12Byte Cipher_IV == Auth_IV */
- if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
- ctx->a_ctx.mac_len = authsize;
+ } else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
+ /* GCM 12Byte Cipher_IV == Auth_IV */
memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
}
}
@@ -1542,9 +1522,11 @@ static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
{
struct sec_aead_req *a_req = &req->aead_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
- sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize);
/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
@@ -1568,9 +1550,11 @@ static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
{
struct sec_aead_req *a_req = &req->aead_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
- sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
+ sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3);
/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
sqe3->a_key_addr = sqe3->c_key_addr;
@@ -1594,11 +1578,12 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
- sec_sqe->type2.mac_key_alg =
- cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
+ sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE);
sec_sqe->type2.mac_key_alg |=
cpu_to_le32((u32)((ctx->a_key_len) /
@@ -1648,11 +1633,13 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct aead_request *aq = a_req->aead_req;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
+ size_t authsize = crypto_aead_authsize(tfm);
sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
sqe3->auth_mac_key |=
- cpu_to_le32((u32)(ctx->mac_len /
+ cpu_to_le32((u32)(authsize /
SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
sqe3->auth_mac_key |=
@@ -1703,9 +1690,9 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
{
struct aead_request *a_req = req->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
+ size_t authsize = crypto_aead_authsize(tfm);
struct sec_aead_req *aead_req = &req->aead_req;
struct sec_cipher_req *c_req = &req->c_req;
- size_t authsize = crypto_aead_authsize(tfm);
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct aead_request *backlog_aead_req;
struct sec_req *backlog_req;
@@ -1718,10 +1705,8 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
if (!err && c_req->encrypt) {
struct scatterlist *sgl = a_req->dst;
- sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
- aead_req->out_mac,
- authsize, a_req->cryptlen +
- a_req->assoclen);
+ sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac,
+ authsize, a_req->cryptlen + a_req->assoclen);
if (unlikely(sz != authsize)) {
dev_err(c->dev, "copy out mac err!\n");
err = -EINVAL;
@@ -1929,8 +1914,10 @@ static void sec_aead_exit(struct crypto_aead *tfm)
static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
- struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
+ struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+ const char *aead_name = alg->base.cra_name;
int ret;
ret = sec_aead_init(tfm);
@@ -1939,11 +1926,20 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
return ret;
}
- auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
- if (IS_ERR(auth_ctx->hash_tfm)) {
+ a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(a_ctx->hash_tfm)) {
dev_err(ctx->dev, "aead alloc shash error!\n");
sec_aead_exit(tfm);
- return PTR_ERR(auth_ctx->hash_tfm);
+ return PTR_ERR(a_ctx->hash_tfm);
+ }
+
+ a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
+ if (IS_ERR(a_ctx->fallback_aead_tfm)) {
+ dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
+ crypto_free_shash(ctx->a_ctx.hash_tfm);
+ sec_aead_exit(tfm);
+ return PTR_ERR(a_ctx->fallback_aead_tfm);
}
return 0;
@@ -1953,6 +1949,7 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm)
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+ crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
crypto_free_shash(ctx->a_ctx.hash_tfm);
sec_aead_exit(tfm);
}
@@ -1979,7 +1976,6 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
sec_aead_exit(tfm);
return PTR_ERR(a_ctx->fallback_aead_tfm);
}
- a_ctx->fallback = false;
return 0;
}
@@ -2233,21 +2229,20 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
struct aead_request *req = sreq->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- size_t authsize = crypto_aead_authsize(tfm);
+ size_t sz = crypto_aead_authsize(tfm);
u8 c_mode = ctx->c_ctx.c_mode;
struct device *dev = ctx->dev;
int ret;
- if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
- req->assoclen > SEC_MAX_AAD_LEN)) {
- dev_err(dev, "aead input spec error!\n");
+ /* Hardware does not handle cases where authsize is less than 4 bytes */
+ if (unlikely(sz < MIN_MAC_LEN)) {
+ sreq->aead_req.fallback = true;
return -EINVAL;
}
- if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
- (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
- authsize & MAC_LEN_MASK)))) {
- dev_err(dev, "aead input mac length error!\n");
+ if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
+ req->assoclen > SEC_MAX_AAD_LEN)) {
+ dev_err(dev, "aead input spec error!\n");
return -EINVAL;
}
@@ -2266,7 +2261,7 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
if (sreq->c_req.encrypt)
sreq->c_req.c_len = req->cryptlen;
else
- sreq->c_req.c_len = req->cryptlen - authsize;
+ sreq->c_req.c_len = req->cryptlen - sz;
if (c_mode == SEC_CMODE_CBC) {
if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
dev_err(dev, "aead crypto length error!\n");
@@ -2292,8 +2287,8 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
if (ctx->sec->qm.ver == QM_HW_V2) {
if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
- req->cryptlen <= authsize))) {
- ctx->a_ctx.fallback = true;
+ req->cryptlen <= authsize))) {
+ sreq->aead_req.fallback = true;
return -EINVAL;
}
}
@@ -2321,16 +2316,9 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
bool encrypt)
{
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- struct device *dev = ctx->dev;
struct aead_request *subreq;
int ret;
- /* Kunpeng920 aead mode not support input 0 size */
- if (!a_ctx->fallback_aead_tfm) {
- dev_err(dev, "aead fallback tfm is NULL!\n");
- return -EINVAL;
- }
-
subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
if (!subreq)
return -ENOMEM;
@@ -2362,10 +2350,11 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
+ req->aead_req.fallback = false;
ret = sec_aead_param_check(ctx, req);
if (unlikely(ret)) {
- if (ctx->a_ctx.fallback)
+ if (req->aead_req.fallback)
return sec_aead_soft_crypto(ctx, a_req, encrypt);
return -EINVAL;
}
@@ -2520,8 +2509,8 @@ int sec_register_to_crypto(struct hisi_qm *qm)
u64 alg_mask;
int ret = 0;
- alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
- SEC_DRV_ALG_BITMAP_LOW_IDX);
+ alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB,
+ SEC_DRV_ALG_BITMAP_LOW_TB);
mutex_lock(&sec_algs_lock);
if (sec_available_devs) {
@@ -2553,8 +2542,8 @@ void sec_unregister_from_crypto(struct hisi_qm *qm)
{
u64 alg_mask;
- alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
- SEC_DRV_ALG_BITMAP_LOW_IDX);
+ alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB,
+ SEC_DRV_ALG_BITMAP_LOW_TB);
mutex_lock(&sec_algs_lock);
if (--sec_available_devs)
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index 27a0ee5ad913..04725b514382 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -23,17 +23,6 @@ enum sec_hash_alg {
SEC_A_HMAC_SHA512 = 0x15,
};
-enum sec_mac_len {
- SEC_HMAC_CCM_MAC = 16,
- SEC_HMAC_GCM_MAC = 16,
- SEC_SM3_MAC = 32,
- SEC_HMAC_SM3_MAC = 32,
- SEC_HMAC_MD5_MAC = 16,
- SEC_HMAC_SHA1_MAC = 20,
- SEC_HMAC_SHA256_MAC = 32,
- SEC_HMAC_SHA512_MAC = 64,
-};
-
enum sec_cmode {
SEC_CMODE_ECB = 0x0,
SEC_CMODE_CBC = 0x1,
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index c35533d8930b..72cf48d1f3ab 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -14,9 +14,9 @@
#include <linux/seq_file.h>
#include <linux/topology.h>
#include <linux/uacce.h>
-
#include "sec.h"
+#define CAP_FILE_PERMISSION 0444
#define SEC_VF_NUM 63
#define SEC_QUEUE_NUM_V1 4096
#define PCI_DEVICE_ID_HUAWEI_SEC_PF 0xa255
@@ -167,11 +167,34 @@ static const struct hisi_qm_cap_info sec_basic_info[] = {
{SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
};
-static const u32 sec_pre_store_caps[] = {
- SEC_DRV_ALG_BITMAP_LOW,
- SEC_DRV_ALG_BITMAP_HIGH,
- SEC_DEV_ALG_BITMAP_LOW,
- SEC_DEV_ALG_BITMAP_HIGH,
+static const struct hisi_qm_cap_query_info sec_cap_query_info[] = {
+ {QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C77, 0x7C77},
+ {QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC77, 0x6C77},
+ {QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8},
+ {SEC_RAS_NFE_TYPE, "SEC_RAS_NFE_TYPE ", 0x3130, 0x0, 0x177, 0x60177},
+ {SEC_RAS_NFE_RESET, "SEC_RAS_NFE_RESET ", 0x3134, 0x0, 0x177, 0x177},
+ {SEC_RAS_CE_TYPE, "SEC_RAS_CE_TYPE ", 0x3138, 0x0, 0x88, 0xC088},
+ {SEC_CORE_INFO, "SEC_CORE_INFO ", 0x313c, 0x110404, 0x110404, 0x110404},
+ {SEC_CORE_EN, "SEC_CORE_EN ", 0x3140, 0x17F, 0x17F, 0xF},
+ {SEC_DRV_ALG_BITMAP_LOW_TB, "SEC_DRV_ALG_BITMAP_LOW ",
+ 0x3144, 0x18050CB, 0x18050CB, 0x18670CF},
+ {SEC_DRV_ALG_BITMAP_HIGH_TB, "SEC_DRV_ALG_BITMAP_HIGH ",
+ 0x3148, 0x395C, 0x395C, 0x395C},
+ {SEC_ALG_BITMAP_LOW, "SEC_ALG_BITMAP_LOW ",
+ 0x314c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_ALG_BITMAP_HIGH, "SEC_ALG_BITMAP_HIGH ", 0x3150, 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE1_BITMAP_LOW, "SEC_CORE1_BITMAP_LOW ",
+ 0x3154, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE1_BITMAP_HIGH, "SEC_CORE1_BITMAP_HIGH ", 0x3158, 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE2_BITMAP_LOW, "SEC_CORE2_BITMAP_LOW ",
+ 0x315c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE2_BITMAP_HIGH, "SEC_CORE2_BITMAP_HIGH ", 0x3160, 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE3_BITMAP_LOW, "SEC_CORE3_BITMAP_LOW ",
+ 0x3164, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE3_BITMAP_HIGH, "SEC_CORE3_BITMAP_HIGH ", 0x3168, 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE4_BITMAP_LOW, "SEC_CORE4_BITMAP_LOW ",
+ 0x316c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE4_BITMAP_HIGH, "SEC_CORE4_BITMAP_HIGH ", 0x3170, 0x3FFF, 0x3FFF, 0x3FFF},
};
static const struct qm_dev_alg sec_dev_algs[] = { {
@@ -322,7 +345,7 @@ static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
{
pf_q_num_flag = true;
- return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
+ return hisi_qm_q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
}
static const struct kernel_param_ops sec_pf_q_num_ops = {
@@ -838,6 +861,26 @@ static int sec_regs_show(struct seq_file *s, void *unused)
DEFINE_SHOW_ATTRIBUTE(sec_regs);
+static int sec_cap_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+ u32 i, size;
+
+ size = qm->cap_tables.qm_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name,
+ qm->cap_tables.qm_cap_table[i].cap_val);
+
+ size = qm->cap_tables.dev_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name,
+ qm->cap_tables.dev_cap_table[i].cap_val);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(sec_cap_regs);
+
static int sec_core_debug_init(struct hisi_qm *qm)
{
struct dfx_diff_registers *sec_regs = qm->debug.acc_diff_regs;
@@ -872,6 +915,9 @@ static int sec_core_debug_init(struct hisi_qm *qm)
tmp_d, data, &sec_atomic64_ops);
}
+ debugfs_create_file("cap_regs", CAP_FILE_PERMISSION,
+ qm->debug.debug_root, qm, &sec_cap_regs_fops);
+
return 0;
}
@@ -1010,11 +1056,15 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm)
static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
- u32 nfe;
-
writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
- nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
- writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
+}
+
+static void sec_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ u32 nfe_mask;
+
+ nfe_mask = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe_mask & (~err_type), qm->io_base + SEC_RAS_NFE_REG);
}
static void sec_open_axi_master_ooo(struct hisi_qm *qm)
@@ -1026,6 +1076,38 @@ static void sec_open_axi_master_ooo(struct hisi_qm *qm)
writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG);
}
+static enum acc_err_result sec_get_err_result(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = sec_get_hw_err_status(qm);
+ if (err_status) {
+ if (err_status & qm->err_info.ecc_2bits_mask)
+ qm->err_status.is_dev_ecc_mbit = true;
+ sec_log_hw_error(qm, err_status);
+
+ if (err_status & qm->err_info.dev_reset_mask) {
+ /* Disable the same error reporting until device is recovered. */
+ sec_disable_error_report(qm, err_status);
+ return ACC_ERR_NEED_RESET;
+ }
+ sec_clear_hw_err_status(qm, err_status);
+ }
+
+ return ACC_ERR_RECOVERED;
+}
+
+static bool sec_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = sec_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_shutdown_mask)
+ return true;
+
+ return false;
+}
+
static void sec_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -1052,12 +1134,13 @@ static const struct hisi_qm_err_ini sec_err_ini = {
.hw_err_disable = sec_hw_error_disable,
.get_dev_hw_err_status = sec_get_hw_err_status,
.clear_dev_hw_err_status = sec_clear_hw_err_status,
- .log_dev_hw_err = sec_log_hw_error,
.open_axi_master_ooo = sec_open_axi_master_ooo,
.open_sva_prefetch = sec_open_sva_prefetch,
.close_sva_prefetch = sec_close_sva_prefetch,
.show_last_dfx_regs = sec_show_last_dfx_regs,
.err_info_init = sec_err_info_init,
+ .get_err_result = sec_get_err_result,
+ .dev_is_abnormal = sec_dev_is_abnormal,
};
static int sec_pf_probe_init(struct sec_dev *sec)
@@ -1085,18 +1168,20 @@ static int sec_pre_store_cap_reg(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
size_t i, size;
- size = ARRAY_SIZE(sec_pre_store_caps);
+ size = ARRAY_SIZE(sec_cap_query_info);
sec_cap = devm_kzalloc(&pdev->dev, sizeof(*sec_cap) * size, GFP_KERNEL);
if (!sec_cap)
return -ENOMEM;
for (i = 0; i < size; i++) {
- sec_cap[i].type = sec_pre_store_caps[i];
- sec_cap[i].cap_val = hisi_qm_get_hw_info(qm, sec_basic_info,
- sec_pre_store_caps[i], qm->cap_ver);
+ sec_cap[i].type = sec_cap_query_info[i].type;
+ sec_cap[i].name = sec_cap_query_info[i].name;
+ sec_cap[i].cap_val = hisi_qm_get_cap_value(qm, sec_cap_query_info,
+ i, qm->cap_ver);
}
qm->cap_tables.dev_cap_table = sec_cap;
+ qm->cap_tables.dev_cap_size = size;
return 0;
}
@@ -1107,7 +1192,6 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
int ret;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->mode = uacce_mode;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
@@ -1146,8 +1230,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
hisi_qm_uninit(qm);
return ret;
}
-
- alg_msk = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH_IDX, SEC_DEV_ALG_BITMAP_LOW_IDX);
+ alg_msk = sec_get_alg_bitmap(qm, SEC_ALG_BITMAP_HIGH, SEC_ALG_BITMAP_LOW);
ret = hisi_qm_set_algs(qm, alg_msk, sec_dev_algs, ARRAY_SIZE(sec_dev_algs));
if (ret) {
pci_err(qm->pdev, "Failed to set sec algs!\n");
diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c
index 66c551ecdee8..ac74df4a9471 100644
--- a/drivers/crypto/hisilicon/trng/trng.c
+++ b/drivers/crypto/hisilicon/trng/trng.c
@@ -324,7 +324,7 @@ MODULE_DEVICE_TABLE(acpi, hisi_trng_acpi_match);
static struct platform_driver hisi_trng_driver = {
.probe = hisi_trng_probe,
- .remove_new = hisi_trng_remove,
+ .remove = hisi_trng_remove,
.driver = {
.name = "hisi-trng-v2",
.acpi_match_table = ACPI_PTR(hisi_trng_acpi_match),
diff --git a/drivers/crypto/hisilicon/zip/Makefile b/drivers/crypto/hisilicon/zip/Makefile
index a936f099ee22..13de020b77d6 100644
--- a/drivers/crypto/hisilicon/zip/Makefile
+++ b/drivers/crypto/hisilicon/zip/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_CRYPTO_DEV_HISI_ZIP) += hisi_zip.o
-hisi_zip-objs = zip_main.o zip_crypto.o
+hisi_zip-objs = zip_main.o zip_crypto.o dae_main.o
diff --git a/drivers/crypto/hisilicon/zip/dae_main.c b/drivers/crypto/hisilicon/zip/dae_main.c
new file mode 100644
index 000000000000..6f22e4c36e49
--- /dev/null
+++ b/drivers/crypto/hisilicon/zip/dae_main.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 HiSilicon Limited. */
+
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uacce.h>
+#include "zip.h"
+
+/* memory */
+#define DAE_MEM_START_OFFSET 0x331040
+#define DAE_MEM_DONE_OFFSET 0x331044
+#define DAE_MEM_START_MASK 0x1
+#define DAE_MEM_DONE_MASK 0x1
+#define DAE_REG_RD_INTVRL_US 10
+#define DAE_REG_RD_TMOUT_US USEC_PER_SEC
+
+#define DAE_ALG_NAME "hashagg"
+
+/* error */
+#define DAE_AXI_CFG_OFFSET 0x331000
+#define DAE_AXI_SHUTDOWN_MASK (BIT(0) | BIT(5))
+#define DAE_ERR_SOURCE_OFFSET 0x331C84
+#define DAE_ERR_STATUS_OFFSET 0x331C88
+#define DAE_ERR_CE_OFFSET 0x331CA0
+#define DAE_ERR_CE_MASK BIT(3)
+#define DAE_ERR_NFE_OFFSET 0x331CA4
+#define DAE_ERR_NFE_MASK 0x17
+#define DAE_ERR_FE_OFFSET 0x331CA8
+#define DAE_ERR_FE_MASK 0
+#define DAE_ECC_MBIT_MASK BIT(2)
+#define DAE_ECC_INFO_OFFSET 0x33400C
+#define DAE_ERR_SHUTDOWN_OFFSET 0x331CAC
+#define DAE_ERR_SHUTDOWN_MASK 0x17
+#define DAE_ERR_ENABLE_OFFSET 0x331C80
+#define DAE_ERR_ENABLE_MASK (DAE_ERR_FE_MASK | DAE_ERR_NFE_MASK | DAE_ERR_CE_MASK)
+#define DAE_AM_CTRL_GLOBAL_OFFSET 0x330000
+#define DAE_AM_RETURN_OFFSET 0x330150
+#define DAE_AM_RETURN_MASK 0x3
+#define DAE_AXI_CFG_OFFSET 0x331000
+#define DAE_AXI_SHUTDOWN_EN_MASK (BIT(0) | BIT(5))
+
+struct hisi_dae_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
+static const struct hisi_dae_hw_error dae_hw_error[] = {
+ { .int_msk = BIT(0), .msg = "dae_axi_bus_err" },
+ { .int_msk = BIT(1), .msg = "dae_axi_poison_err" },
+ { .int_msk = BIT(2), .msg = "dae_ecc_2bit_err" },
+ { .int_msk = BIT(3), .msg = "dae_ecc_1bit_err" },
+ { .int_msk = BIT(4), .msg = "dae_fsm_hbeat_err" },
+};
+
+static inline bool dae_is_support(struct hisi_qm *qm)
+{
+ if (test_bit(QM_SUPPORT_DAE, &qm->caps))
+ return true;
+
+ return false;
+}
+
+int hisi_dae_set_user_domain(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ val = readl(qm->io_base + DAE_MEM_START_OFFSET);
+ val |= DAE_MEM_START_MASK;
+ writel(val, qm->io_base + DAE_MEM_START_OFFSET);
+ ret = readl_relaxed_poll_timeout(qm->io_base + DAE_MEM_DONE_OFFSET, val,
+ val & DAE_MEM_DONE_MASK,
+ DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to init dae memory!\n");
+
+ return ret;
+}
+
+int hisi_dae_set_alg(struct hisi_qm *qm)
+{
+ size_t len;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ if (!qm->uacce)
+ return 0;
+
+ len = strlen(qm->uacce->algs);
+ /* A line break may be required */
+ if (len + strlen(DAE_ALG_NAME) + 1 >= QM_DEV_ALG_MAX_LEN) {
+ pci_err(qm->pdev, "algorithm name is too long!\n");
+ return -EINVAL;
+ }
+
+ if (len)
+ strcat((char *)qm->uacce->algs, "\n");
+
+ strcat((char *)qm->uacce->algs, DAE_ALG_NAME);
+
+ return 0;
+}
+
+static void hisi_dae_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
+{
+ u32 axi_val, err_val;
+
+ axi_val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
+ if (enable) {
+ axi_val |= DAE_AXI_SHUTDOWN_MASK;
+ err_val = DAE_ERR_SHUTDOWN_MASK;
+ } else {
+ axi_val &= ~DAE_AXI_SHUTDOWN_MASK;
+ err_val = 0;
+ }
+
+ writel(axi_val, qm->io_base + DAE_AXI_CFG_OFFSET);
+ writel(err_val, qm->io_base + DAE_ERR_SHUTDOWN_OFFSET);
+}
+
+void hisi_dae_hw_error_enable(struct hisi_qm *qm)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ /* clear dae hw error source if having */
+ writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_SOURCE_OFFSET);
+
+ /* configure error type */
+ writel(DAE_ERR_CE_MASK, qm->io_base + DAE_ERR_CE_OFFSET);
+ writel(DAE_ERR_NFE_MASK, qm->io_base + DAE_ERR_NFE_OFFSET);
+ writel(DAE_ERR_FE_MASK, qm->io_base + DAE_ERR_FE_OFFSET);
+
+ hisi_dae_master_ooo_ctrl(qm, true);
+
+ /* enable dae hw error interrupts */
+ writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_ENABLE_OFFSET);
+}
+
+void hisi_dae_hw_error_disable(struct hisi_qm *qm)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ writel(0, qm->io_base + DAE_ERR_ENABLE_OFFSET);
+ hisi_dae_master_ooo_ctrl(qm, false);
+}
+
+static u32 hisi_dae_get_hw_err_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + DAE_ERR_STATUS_OFFSET);
+}
+
+static void hisi_dae_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ if (!dae_is_support(qm))
+ return;
+
+ writel(err_sts, qm->io_base + DAE_ERR_SOURCE_OFFSET);
+}
+
+static void hisi_dae_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ writel(DAE_ERR_NFE_MASK & (~err_type), qm->io_base + DAE_ERR_NFE_OFFSET);
+}
+
+static void hisi_dae_log_hw_error(struct hisi_qm *qm, u32 err_type)
+{
+ const struct hisi_dae_hw_error *err = dae_hw_error;
+ struct device *dev = &qm->pdev->dev;
+ u32 ecc_info;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dae_hw_error); i++) {
+ err = &dae_hw_error[i];
+ if (!(err->int_msk & err_type))
+ continue;
+
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+
+ if (err->int_msk & DAE_ECC_MBIT_MASK) {
+ ecc_info = readl(qm->io_base + DAE_ECC_INFO_OFFSET);
+ dev_err(dev, "dae multi ecc sram info 0x%x\n", ecc_info);
+ }
+ }
+}
+
+enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ if (!dae_is_support(qm))
+ return ACC_ERR_NONE;
+
+ err_status = hisi_dae_get_hw_err_status(qm);
+ if (!err_status)
+ return ACC_ERR_NONE;
+
+ hisi_dae_log_hw_error(qm, err_status);
+
+ if (err_status & DAE_ERR_NFE_MASK) {
+ /* Disable the same error reporting until device is recovered. */
+ hisi_dae_disable_error_report(qm, err_status);
+ return ACC_ERR_NEED_RESET;
+ }
+ hisi_dae_clear_hw_err_status(qm, err_status);
+
+ return ACC_ERR_RECOVERED;
+}
+
+bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ if (!dae_is_support(qm))
+ return false;
+
+ err_status = hisi_dae_get_hw_err_status(qm);
+ if (err_status & DAE_ERR_NFE_MASK)
+ return true;
+
+ return false;
+}
+
+int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!dae_is_support(qm))
+ return 0;
+
+ val = readl(qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
+ val |= BIT(0);
+ writel(val, qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + DAE_AM_RETURN_OFFSET,
+ val, (val == DAE_AM_RETURN_MASK),
+ DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to close dae axi ooo!\n");
+
+ return ret;
+}
+
+void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (!dae_is_support(qm))
+ return;
+
+ val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
+
+ writel(val & ~DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
+ writel(val | DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
+}
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index f2e6da3240ae..9fb2a9c01132 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -81,8 +81,34 @@ struct hisi_zip_sqe {
u32 rsvd1[4];
};
+enum zip_cap_table_type {
+ QM_RAS_NFE_TYPE,
+ QM_RAS_NFE_RESET,
+ QM_RAS_CE_TYPE,
+ ZIP_RAS_NFE_TYPE,
+ ZIP_RAS_NFE_RESET,
+ ZIP_RAS_CE_TYPE,
+ ZIP_CORE_INFO,
+ ZIP_CORE_EN,
+ ZIP_DRV_ALG_BITMAP_TB,
+ ZIP_ALG_BITMAP,
+ ZIP_CORE1_BITMAP,
+ ZIP_CORE2_BITMAP,
+ ZIP_CORE3_BITMAP,
+ ZIP_CORE4_BITMAP,
+ ZIP_CORE5_BITMAP,
+};
+
int zip_create_qps(struct hisi_qp **qps, int qp_num, int node);
int hisi_zip_register_to_crypto(struct hisi_qm *qm);
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg);
+int hisi_dae_set_user_domain(struct hisi_qm *qm);
+int hisi_dae_set_alg(struct hisi_qm *qm);
+void hisi_dae_hw_error_disable(struct hisi_qm *qm);
+void hisi_dae_hw_error_enable(struct hisi_qm *qm);
+void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm);
+int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm);
+bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm);
+enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index d07e47b48be0..d8ba23b7cc7d 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -14,6 +14,7 @@
#include <linux/uacce.h>
#include "zip.h"
+#define CAP_FILE_PERMISSION 0444
#define PCI_DEVICE_ID_HUAWEI_ZIP_PF 0xa250
#define HZIP_QUEUE_NUM_V1 4096
@@ -250,24 +251,22 @@ static struct hisi_qm_cap_info zip_basic_cap_info[] = {
{ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0}
};
-enum zip_pre_store_cap_idx {
- ZIP_CORE_NUM_CAP_IDX = 0x0,
- ZIP_CLUSTER_COMP_NUM_CAP_IDX,
- ZIP_CLUSTER_DECOMP_NUM_CAP_IDX,
- ZIP_DECOMP_ENABLE_BITMAP_IDX,
- ZIP_COMP_ENABLE_BITMAP_IDX,
- ZIP_DRV_ALG_BITMAP_IDX,
- ZIP_DEV_ALG_BITMAP_IDX,
-};
-
-static const u32 zip_pre_store_caps[] = {
- ZIP_CORE_NUM_CAP,
- ZIP_CLUSTER_COMP_NUM_CAP,
- ZIP_CLUSTER_DECOMP_NUM_CAP,
- ZIP_DECOMP_ENABLE_BITMAP,
- ZIP_COMP_ENABLE_BITMAP,
- ZIP_DRV_ALG_BITMAP,
- ZIP_DEV_ALG_BITMAP,
+static const struct hisi_qm_cap_query_info zip_cap_query_info[] = {
+ {QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C57, 0x7C77},
+ {QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC57, 0x6C77},
+ {QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8},
+ {ZIP_RAS_NFE_TYPE, "ZIP_RAS_NFE_TYPE ", 0x3130, 0x0, 0x7FE, 0x1FFE},
+ {ZIP_RAS_NFE_RESET, "ZIP_RAS_NFE_RESET ", 0x3134, 0x0, 0x7FE, 0x7FE},
+ {ZIP_RAS_CE_TYPE, "ZIP_RAS_CE_TYPE ", 0x3138, 0x0, 0x1, 0x1},
+ {ZIP_CORE_INFO, "ZIP_CORE_INFO ", 0x313C, 0x12080206, 0x12080206, 0x12050203},
+ {ZIP_CORE_EN, "ZIP_CORE_EN ", 0x3140, 0xFC0003, 0xFC0003, 0x1C0003},
+ {ZIP_DRV_ALG_BITMAP_TB, "ZIP_DRV_ALG_BITMAP ", 0x3144, 0x0, 0x0, 0x30},
+ {ZIP_ALG_BITMAP, "ZIP_ALG_BITMAP ", 0x3148, 0xF, 0xF, 0x3F},
+ {ZIP_CORE1_BITMAP, "ZIP_CORE1_BITMAP ", 0x314C, 0x5, 0x5, 0xD5},
+ {ZIP_CORE2_BITMAP, "ZIP_CORE2_BITMAP ", 0x3150, 0x5, 0x5, 0xD5},
+ {ZIP_CORE3_BITMAP, "ZIP_CORE3_BITMAP ", 0x3154, 0xA, 0xA, 0x2A},
+ {ZIP_CORE4_BITMAP, "ZIP_CORE4_BITMAP ", 0x3158, 0xA, 0xA, 0x2A},
+ {ZIP_CORE5_BITMAP, "ZIP_CORE5_BITMAP ", 0x315C, 0xA, 0xA, 0x2A},
};
static const struct debugfs_reg32 hzip_dfx_regs[] = {
@@ -402,7 +401,7 @@ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
pf_q_num_flag = true;
- return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF);
+ return hisi_qm_q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF);
}
static const struct kernel_param_ops pf_q_num_ops = {
@@ -442,7 +441,7 @@ bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
{
u32 cap_val;
- cap_val = qm->cap_tables.dev_cap_table[ZIP_DRV_ALG_BITMAP_IDX].cap_val;
+ cap_val = qm->cap_tables.dev_cap_table[ZIP_DRV_ALG_BITMAP_TB].cap_val;
if ((alg & cap_val) == alg)
return true;
@@ -530,6 +529,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
{
void __iomem *base = qm->io_base;
u32 dcomp_bm, comp_bm;
+ u32 zip_core_en;
/* qm user domain */
writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
@@ -567,8 +567,12 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
}
/* let's open all compression/decompression cores */
- dcomp_bm = qm->cap_tables.dev_cap_table[ZIP_DECOMP_ENABLE_BITMAP_IDX].cap_val;
- comp_bm = qm->cap_tables.dev_cap_table[ZIP_COMP_ENABLE_BITMAP_IDX].cap_val;
+
+ zip_core_en = qm->cap_tables.dev_cap_table[ZIP_CORE_EN].cap_val;
+ dcomp_bm = (zip_core_en >> zip_basic_cap_info[ZIP_DECOMP_ENABLE_BITMAP].shift) &
+ zip_basic_cap_info[ZIP_DECOMP_ENABLE_BITMAP].mask;
+ comp_bm = (zip_core_en >> zip_basic_cap_info[ZIP_COMP_ENABLE_BITMAP].shift) &
+ zip_basic_cap_info[ZIP_COMP_ENABLE_BITMAP].mask;
writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL);
/* enable sqc,cqc writeback */
@@ -578,7 +582,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
hisi_zip_enable_clock_gate(qm);
- return 0;
+ return hisi_dae_set_user_domain(qm);
}
static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
@@ -627,6 +631,8 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
/* enable ZIP hw error interrupts */
writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ hisi_dae_hw_error_enable(qm);
}
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
@@ -639,6 +645,8 @@ static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG);
hisi_zip_master_ooo_ctrl(qm, false);
+
+ hisi_dae_hw_error_disable(qm);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -788,7 +796,12 @@ DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs);
static void __iomem *get_zip_core_addr(struct hisi_qm *qm, int core_num)
{
- u32 zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
+ u8 zip_comp_core_num;
+ u32 zip_core_info;
+
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_comp_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].mask;
if (core_num < zip_comp_core_num)
return qm->io_base + HZIP_CORE_DFX_BASE +
@@ -803,12 +816,16 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
u32 zip_core_num, zip_comp_core_num;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
+ u32 zip_core_info;
struct dentry *tmp_d;
char buf[HZIP_BUF_SIZE];
int i;
- zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
- zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
+ zip_comp_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].mask;
for (i = 0; i < zip_core_num; i++) {
if (i < zip_comp_core_num)
@@ -834,6 +851,26 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
return 0;
}
+static int zip_cap_regs_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+ u32 i, size;
+
+ size = qm->cap_tables.qm_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name,
+ qm->cap_tables.qm_cap_table[i].cap_val);
+
+ size = qm->cap_tables.dev_cap_size;
+ for (i = 0; i < size; i++)
+ seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name,
+ qm->cap_tables.dev_cap_table[i].cap_val);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(zip_cap_regs);
+
static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
{
struct dfx_diff_registers *hzip_regs = qm->debug.acc_diff_regs;
@@ -854,6 +891,9 @@ static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
if (qm->fun_type == QM_HW_PF && hzip_regs)
debugfs_create_file("diff_regs", 0444, tmp_dir,
qm, &hzip_diff_regs_fops);
+
+ debugfs_create_file("cap_regs", CAP_FILE_PERMISSION,
+ qm->debug.debug_root, qm, &zip_cap_regs_fops);
}
static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
@@ -912,9 +952,14 @@ debugfs_remove:
/* hisi_zip_debug_regs_clear() - clear the zip debug regs */
static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
{
- u32 zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
+ u32 zip_core_info;
+ u8 zip_core_num;
int i, j;
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
+
/* enable register read_clear bit */
writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
for (i = 0; i < zip_core_num; i++)
@@ -946,10 +991,13 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
struct qm_debug *debug = &qm->debug;
void __iomem *io_base;
+ u32 zip_core_info;
u32 zip_core_num;
int i, j, idx;
- zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num,
sizeof(unsigned int), GFP_KERNEL);
@@ -991,6 +1039,7 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
u32 zip_core_num, zip_comp_core_num;
struct qm_debug *debug = &qm->debug;
char buf[HZIP_BUF_SIZE];
+ u32 zip_core_info;
void __iomem *base;
int i, j, idx;
u32 val;
@@ -1005,8 +1054,11 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
hzip_com_dfx_regs[i].name, debug->last_words[i], val);
}
- zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
- zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
+ zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val;
+ zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask;
+ zip_comp_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].shift) &
+ zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].mask;
for (i = 0; i < zip_core_num; i++) {
if (i < zip_comp_core_num)
@@ -1059,11 +1111,15 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
- u32 nfe;
-
writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
- nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
- writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+}
+
+static void hisi_zip_disable_error_report(struct hisi_qm *qm, u32 err_type)
+{
+ u32 nfe_mask;
+
+ nfe_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe_mask & (~err_type), qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
}
static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
@@ -1077,6 +1133,8 @@ static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ hisi_dae_open_axi_master_ooo(qm);
}
static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
@@ -1093,6 +1151,51 @@ static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
qm->io_base + HZIP_CORE_INT_SET);
}
+static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
+{
+ enum acc_err_result zip_result = ACC_ERR_NONE;
+ enum acc_err_result dae_result;
+ u32 err_status;
+
+ /* Get device hardware new error status */
+ err_status = hisi_zip_get_hw_err_status(qm);
+ if (err_status) {
+ if (err_status & qm->err_info.ecc_2bits_mask)
+ qm->err_status.is_dev_ecc_mbit = true;
+ hisi_zip_log_hw_error(qm, err_status);
+
+ if (err_status & qm->err_info.dev_reset_mask) {
+ /* Disable the same error reporting until device is recovered. */
+ hisi_zip_disable_error_report(qm, err_status);
+ return ACC_ERR_NEED_RESET;
+ } else {
+ hisi_zip_clear_hw_err_status(qm, err_status);
+ }
+ }
+
+ dae_result = hisi_dae_get_err_result(qm);
+
+ return (zip_result == ACC_ERR_NEED_RESET ||
+ dae_result == ACC_ERR_NEED_RESET) ?
+ ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
+}
+
+static bool hisi_zip_dev_is_abnormal(struct hisi_qm *qm)
+{
+ u32 err_status;
+
+ err_status = hisi_zip_get_hw_err_status(qm);
+ if (err_status & qm->err_info.dev_shutdown_mask)
+ return true;
+
+ return hisi_dae_dev_is_abnormal(qm);
+}
+
+static int hisi_zip_set_priv_status(struct hisi_qm *qm)
+{
+ return hisi_dae_close_axi_master_ooo(qm);
+}
+
static void hisi_zip_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@@ -1120,13 +1223,15 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.hw_err_disable = hisi_zip_hw_error_disable,
.get_dev_hw_err_status = hisi_zip_get_hw_err_status,
.clear_dev_hw_err_status = hisi_zip_clear_hw_err_status,
- .log_dev_hw_err = hisi_zip_log_hw_error,
.open_axi_master_ooo = hisi_zip_open_axi_master_ooo,
.close_axi_master_ooo = hisi_zip_close_axi_master_ooo,
.open_sva_prefetch = hisi_zip_open_sva_prefetch,
.close_sva_prefetch = hisi_zip_close_sva_prefetch,
.show_last_dfx_regs = hisi_zip_show_last_dfx_regs,
.err_info_init = hisi_zip_err_info_init,
+ .get_err_result = hisi_zip_get_err_result,
+ .set_priv_status = hisi_zip_set_priv_status,
+ .dev_is_abnormal = hisi_zip_dev_is_abnormal,
};
static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
@@ -1167,18 +1272,20 @@ static int zip_pre_store_cap_reg(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
size_t i, size;
- size = ARRAY_SIZE(zip_pre_store_caps);
+ size = ARRAY_SIZE(zip_cap_query_info);
zip_cap = devm_kzalloc(&pdev->dev, sizeof(*zip_cap) * size, GFP_KERNEL);
if (!zip_cap)
return -ENOMEM;
for (i = 0; i < size; i++) {
- zip_cap[i].type = zip_pre_store_caps[i];
- zip_cap[i].cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- zip_pre_store_caps[i], qm->cap_ver);
+ zip_cap[i].type = zip_cap_query_info[i].type;
+ zip_cap[i].name = zip_cap_query_info[i].name;
+ zip_cap[i].cap_val = hisi_qm_get_cap_value(qm, zip_cap_query_info,
+ i, qm->cap_ver);
}
qm->cap_tables.dev_cap_table = zip_cap;
+ qm->cap_tables.dev_cap_size = size;
return 0;
}
@@ -1189,7 +1296,6 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
int ret;
qm->pdev = pdev;
- qm->ver = pdev->revision;
qm->mode = uacce_mode;
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
@@ -1226,17 +1332,24 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
ret = zip_pre_store_cap_reg(qm);
if (ret) {
pci_err(qm->pdev, "Failed to pre-store capability registers!\n");
- hisi_qm_uninit(qm);
- return ret;
+ goto err_qm_uninit;
}
- alg_msk = qm->cap_tables.dev_cap_table[ZIP_DEV_ALG_BITMAP_IDX].cap_val;
+ alg_msk = qm->cap_tables.dev_cap_table[ZIP_ALG_BITMAP].cap_val;
ret = hisi_qm_set_algs(qm, alg_msk, zip_dev_algs, ARRAY_SIZE(zip_dev_algs));
if (ret) {
pci_err(qm->pdev, "Failed to set zip algs!\n");
- hisi_qm_uninit(qm);
+ goto err_qm_uninit;
}
+ ret = hisi_dae_set_alg(qm);
+ if (ret)
+ goto err_qm_uninit;
+
+ return 0;
+
+err_qm_uninit:
+ hisi_qm_uninit(qm);
return ret;
}
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 7e93159c3b6b..1dc2378aa88b 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -1084,7 +1084,7 @@ static const struct dev_pm_ops img_hash_pm_ops = {
static struct platform_driver img_hash_driver = {
.probe = img_hash_probe,
- .remove_new = img_hash_remove,
+ .remove = img_hash_remove,
.driver = {
.name = "img-hash-accelerator",
.pm = &img_hash_pm_ops,
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index f5c1912aa564..9ca80d082c4f 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1868,7 +1868,7 @@ MODULE_DEVICE_TABLE(of, safexcel_of_match_table);
static struct platform_driver crypto_safexcel = {
.probe = safexcel_probe,
- .remove_new = safexcel_remove,
+ .remove = safexcel_remove,
.driver = {
.name = "crypto-safexcel",
.of_match_table = safexcel_of_match_table,
@@ -2031,7 +2031,7 @@ MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
MODULE_FIRMWARE("ifpp.bin");
MODULE_FIRMWARE("ipue.bin");
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index e17577b785c3..f44c08f5f5ec 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -2093,7 +2093,7 @@ static int safexcel_xcbcmac_cra_init(struct crypto_tfm *tfm)
safexcel_ahash_cra_init(tfm);
ctx->aes = kmalloc(sizeof(*ctx->aes), GFP_KERNEL);
- return PTR_ERR_OR_ZERO(ctx->aes);
+ return ctx->aes == NULL ? -ENOMEM : 0;
}
static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/intel/iaa/Makefile b/drivers/crypto/intel/iaa/Makefile
index b64b208d2344..55bda7770fac 100644
--- a/drivers/crypto/intel/iaa/Makefile
+++ b/drivers/crypto/intel/iaa/Makefile
@@ -3,7 +3,7 @@
# Makefile for IAA crypto device drivers
#
-ccflags-y += -I $(srctree)/drivers/dma/idxd -DDEFAULT_SYMBOL_NAMESPACE=IDXD
+ccflags-y += -I $(srctree)/drivers/dma/idxd -DDEFAULT_SYMBOL_NAMESPACE='"IDXD"'
obj-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO) := iaa_crypto.o
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 237f87000070..c3776b0de51d 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -173,7 +173,7 @@ static int set_iaa_sync_mode(const char *name)
async_mode = false;
use_irq = false;
} else if (sysfs_streq(name, "async")) {
- async_mode = true;
+ async_mode = false;
use_irq = false;
} else if (sysfs_streq(name, "async_irq")) {
async_mode = true;
@@ -945,12 +945,22 @@ static inline int check_completion(struct device *dev,
bool only_once)
{
char *op_str = compress ? "compress" : "decompress";
+ int status_checks = 0;
int ret = 0;
while (!comp->status) {
if (only_once)
return -EAGAIN;
cpu_relax();
+ if (status_checks++ >= IAA_COMPLETION_TIMEOUT) {
+ /* Something is wrong with the hw, disable it. */
+ dev_err(dev, "%s completion timed out - "
+ "assuming broken hw, iaa_crypto now DISABLED\n",
+ op_str);
+ iaa_crypto_enabled = false;
+ ret = -ETIMEDOUT;
+ goto out;
+ }
}
if (comp->status != IAX_COMP_SUCCESS) {
@@ -2084,7 +2094,7 @@ static void __exit iaa_crypto_cleanup_module(void)
pr_debug("cleaned up\n");
}
-MODULE_IMPORT_NS(IDXD);
+MODULE_IMPORT_NS("IDXD");
MODULE_LICENSE("GPL");
MODULE_ALIAS_IDXD_DEVICE(0);
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
index f8a77bff8844..fcc0cf4df637 100644
--- a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
+++ b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
@@ -471,6 +471,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
npe_id = npe_spec.args[0];
+ of_node_put(npe_spec.np);
ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
&queue_spec);
@@ -479,6 +480,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
recv_qid = queue_spec.args[0];
+ of_node_put(queue_spec.np);
ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
&queue_spec);
@@ -487,6 +489,7 @@ static int init_ixp_crypto(struct device *dev)
return -ENODEV;
}
send_qid = queue_spec.args[0];
+ of_node_put(queue_spec.np);
} else {
/*
* Hardcoded engine when using platform data, this goes away
@@ -1588,7 +1591,7 @@ static const struct of_device_id ixp4xx_crypto_of_match[] = {
static struct platform_driver ixp_crypto_driver = {
.probe = ixp_crypto_probe,
- .remove_new = ixp_crypto_remove,
+ .remove = ixp_crypto_remove,
.driver = {
.name = "ixp4xx_crypto",
.of_match_table = ixp4xx_crypto_of_match,
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
index 9b2d098e5eb2..8a8f6c81e010 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
@@ -1656,7 +1656,7 @@ list_del:
/* The OCS driver is a platform device. */
static struct platform_driver kmb_ocs_aes_driver = {
.probe = kmb_ocs_aes_probe,
- .remove_new = kmb_ocs_aes_remove,
+ .remove = kmb_ocs_aes_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = kmb_ocs_aes_of_match,
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
index 5e24f2d8affc..59308926399d 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
@@ -991,7 +991,7 @@ static const struct of_device_id kmb_ocs_ecc_of_match[] = {
/* The OCS driver is a platform device. */
static struct platform_driver kmb_ocs_ecc_driver = {
.probe = kmb_ocs_ecc_probe,
- .remove_new = kmb_ocs_ecc_remove,
+ .remove = kmb_ocs_ecc_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = kmb_ocs_ecc_of_match,
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
index e54c79890d44..95dc8979918d 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
@@ -1243,7 +1243,7 @@ list_del:
/* The OCS driver is a platform device. */
static struct platform_driver kmb_ocs_hcu_driver = {
.probe = kmb_ocs_hcu_probe,
- .remove_new = kmb_ocs_hcu_remove,
+ .remove = kmb_ocs_hcu_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = kmb_ocs_hcu_of_match,
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 78f0ea49254d..9faef33e54bd 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -375,7 +375,7 @@ static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
else
id = -EINVAL;
- if (id < 0 || id > num_objs)
+ if (id < 0 || id >= num_objs)
return NULL;
return fw_objs[id];
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
index f49818a13013..9589d60fb281 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
@@ -129,16 +129,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK;
- ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
+ ret = pcim_request_all_regions(pdev, pci_name(pdev));
if (ret) {
- dev_err(&pdev->dev, "Failed to map pci regions.\n");
+ dev_err(&pdev->dev, "Failed to request PCI regions.\n");
goto out_err;
}
i = 0;
for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) {
bar = &accel_pci_dev->pci_bars[i++];
- bar->virt_addr = pcim_iomap_table(pdev)[bar_nr];
+ bar->virt_addr = pcim_iomap(pdev, bar_nr, 0);
+ if (!bar->virt_addr) {
+ dev_err(&pdev->dev, "Failed to ioremap PCI region.\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
}
pci_set_master(pdev);
@@ -199,4 +204,4 @@ MODULE_FIRMWARE(ADF_420XX_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
MODULE_SOFTDEP("pre: crypto-intel_qat");
-MODULE_IMPORT_NS(CRYPTO_QAT);
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 9fd7ec53b9f3..bbd92c017c28 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -334,7 +334,7 @@ static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
else
id = -EINVAL;
- if (id < 0 || id > num_objs)
+ if (id < 0 || id >= num_objs)
return NULL;
return fw_objs[id];
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index 659905e45950..d7de1cad1335 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -131,16 +131,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK;
- ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
+ ret = pcim_request_all_regions(pdev, pci_name(pdev));
if (ret) {
- dev_err(&pdev->dev, "Failed to map pci regions.\n");
+ dev_err(&pdev->dev, "Failed to request PCI regions.\n");
goto out_err;
}
i = 0;
for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) {
bar = &accel_pci_dev->pci_bars[i++];
- bar->virt_addr = pcim_iomap_table(pdev)[bar_nr];
+ bar->virt_addr = pcim_iomap(pdev, bar_nr, 0);
+ if (!bar->virt_addr) {
+ dev_err(&pdev->dev, "Failed to ioremap PCI region.\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
}
pci_set_master(pdev);
@@ -203,4 +208,4 @@ MODULE_FIRMWARE(ADF_402XX_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
MODULE_SOFTDEP("pre: crypto-intel_qat");
-MODULE_IMPORT_NS(CRYPTO_QAT);
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
index 4d18057745d4..caa53882fda6 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
@@ -252,4 +252,4 @@ MODULE_FIRMWARE(ADF_C3XXX_FW);
MODULE_FIRMWARE(ADF_C3XXX_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
-MODULE_IMPORT_NS(CRYPTO_QAT);
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
index f0023cfb234c..c622793e94a8 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
@@ -226,4 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
-MODULE_IMPORT_NS(CRYPTO_QAT);
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
index e6b5de55434e..b7398fee19ed 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
@@ -252,4 +252,4 @@ MODULE_FIRMWARE(ADF_C62X_FW);
MODULE_FIRMWARE(ADF_C62X_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
-MODULE_IMPORT_NS(CRYPTO_QAT);
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
index 2bd5b0ff00e3..4840d44bbd5b 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
@@ -226,4 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
-MODULE_IMPORT_NS(CRYPTO_QAT);
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index eac73cbfdd38..7acf9c576149 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
-ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CRYPTO_QAT
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CRYPTO_QAT"'
intel_qat-objs := adf_cfg.o \
adf_isr.o \
adf_ctl_drv.o \
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
index ec7913ab00a2..4cb8bd83f570 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -281,8 +281,11 @@ int adf_init_aer(void)
return -EFAULT;
device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0);
- if (!device_sriov_wq)
+ if (!device_sriov_wq) {
+ destroy_workqueue(device_reset_wq);
+ device_reset_wq = NULL;
return -EFAULT;
+ }
return 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index f7ecabdf7805..eaa6388a6678 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -69,7 +69,6 @@ void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf);
struct list_head *adf_devmgr_get_head(void);
struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id);
-struct adf_accel_dev *adf_devmgr_get_first(void);
struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev);
int adf_devmgr_verify_id(u32 id);
void adf_devmgr_get_num_dev(u32 *num);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
index 70fa0f6497a9..48c62a14a6a7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
@@ -475,4 +475,4 @@ MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_ALIAS_CRYPTO("intel_qat");
MODULE_VERSION(ADF_DRV_VERSION);
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
index c42f5c25aabd..4c11ad1ebcf0 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
@@ -22,18 +22,13 @@
void adf_dbgfs_init(struct adf_accel_dev *accel_dev)
{
char name[ADF_DEVICE_NAME_LENGTH];
- void *ret;
/* Create dev top level debugfs entry */
snprintf(name, sizeof(name), "%s%s_%s", ADF_DEVICE_NAME_PREFIX,
accel_dev->hw_device->dev_class->name,
pci_name(accel_dev->accel_pci_dev.pci_dev));
- ret = debugfs_create_dir(name, NULL);
- if (IS_ERR_OR_NULL(ret))
- return;
-
- accel_dev->debugfs_dir = ret;
+ accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
adf_cfg_dev_dbgfs_add(accel_dev);
}
@@ -59,9 +54,6 @@ EXPORT_SYMBOL_GPL(adf_dbgfs_exit);
*/
void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
{
- if (!accel_dev->debugfs_dir)
- return;
-
if (!accel_dev->is_vf) {
adf_fw_counters_dbgfs_add(accel_dev);
adf_heartbeat_dbgfs_add(accel_dev);
@@ -77,9 +69,6 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
*/
void adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
{
- if (!accel_dev->debugfs_dir)
- return;
-
if (!accel_dev->is_vf) {
adf_tl_dbgfs_rm(accel_dev);
adf_cnv_dbgfs_rm(accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
index 96ddd1c419c4..34b9f7731c78 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c
@@ -276,16 +276,6 @@ unlock:
}
EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
-struct adf_accel_dev *adf_devmgr_get_first(void)
-{
- struct adf_accel_dev *dev = NULL;
-
- if (!list_empty(&accel_table))
- dev = list_first_entry(&accel_table, struct adf_accel_dev,
- list);
- return dev;
-}
-
/**
* adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
* @pci_dev: Pointer to PCI device.
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
index ee0b5079de3e..2e4095c4c12c 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
@@ -42,13 +42,13 @@ struct pm_status_row {
const char *key;
};
-static struct pm_status_row pm_fuse_rows[] = {
+static const struct pm_status_row pm_fuse_rows[] = {
PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM),
PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM_IDLE),
PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_DEEP_PM_IDLE),
};
-static struct pm_status_row pm_info_rows[] = {
+static const struct pm_status_row pm_info_rows[] = {
PM_INFO_REGSET_ENTRY(pm.status, CPM_PM_STATE),
PM_INFO_REGSET_ENTRY(pm.status, PENDING_WP),
PM_INFO_REGSET_ENTRY(pm.status, CURRENT_WP),
@@ -59,7 +59,7 @@ static struct pm_status_row pm_info_rows[] = {
PM_INFO_REGSET_ENTRY(pm.main, THR_VALUE),
};
-static struct pm_status_row pm_ssm_rows[] = {
+static const struct pm_status_row pm_ssm_rows[] = {
PM_INFO_REGSET_ENTRY(ssm.pm_enable, SSM_PM_ENABLE),
PM_INFO_REGSET_ENTRY32(ssm.active_constraint, ACTIVE_CONSTRAINT),
PM_INFO_REGSET_ENTRY(ssm.pm_domain_status, DOMAIN_POWER_GATED),
@@ -83,7 +83,7 @@ static struct pm_status_row pm_ssm_rows[] = {
PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, WCP_MANAGED_COUNT),
};
-static struct pm_status_row pm_log_rows[] = {
+static const struct pm_status_row pm_log_rows[] = {
PM_INFO_REGSET_ENTRY32(event_counters.host_msg, HOST_MSG_EVENT_COUNT),
PM_INFO_REGSET_ENTRY32(event_counters.sys_pm, SYS_PM_EVENT_COUNT),
PM_INFO_REGSET_ENTRY32(event_counters.local_ssm, SSM_EVENT_COUNT),
@@ -91,7 +91,7 @@ static struct pm_status_row pm_log_rows[] = {
PM_INFO_REGSET_ENTRY32(event_counters.unknown, UNKNOWN_EVENT_COUNT),
};
-static struct pm_status_row pm_event_rows[ICP_QAT_NUMBER_OF_PM_EVENTS] = {
+static const struct pm_status_row pm_event_rows[ICP_QAT_NUMBER_OF_PM_EVENTS] = {
PM_INFO_REGSET_ENTRY32(event_log[0], EVENT0),
PM_INFO_REGSET_ENTRY32(event_log[1], EVENT1),
PM_INFO_REGSET_ENTRY32(event_log[2], EVENT2),
@@ -102,14 +102,14 @@ static struct pm_status_row pm_event_rows[ICP_QAT_NUMBER_OF_PM_EVENTS] = {
PM_INFO_REGSET_ENTRY32(event_log[7], EVENT7),
};
-static struct pm_status_row pm_csrs_rows[] = {
+static const struct pm_status_row pm_csrs_rows[] = {
PM_INFO_REGSET_ENTRY32(pm.fw_init, CPM_PM_FW_INIT),
PM_INFO_REGSET_ENTRY32(pm.status, CPM_PM_STATUS),
PM_INFO_REGSET_ENTRY32(pm.main, CPM_PM_MASTER_FW),
PM_INFO_REGSET_ENTRY32(pm.pwrreq, CPM_PM_PWRREQ),
};
-static int pm_scnprint_table(char *buff, struct pm_status_row *table,
+static int pm_scnprint_table(char *buff, const struct pm_status_row *table,
u32 *pm_info_regs, size_t buff_size, int table_len,
bool lowercase)
{
@@ -131,7 +131,7 @@ static int pm_scnprint_table(char *buff, struct pm_status_row *table,
return wr;
}
-static int pm_scnprint_table_upper_keys(char *buff, struct pm_status_row *table,
+static int pm_scnprint_table_upper_keys(char *buff, const struct pm_status_row *table,
u32 *pm_info_regs, size_t buff_size,
int table_len)
{
@@ -139,7 +139,7 @@ static int pm_scnprint_table_upper_keys(char *buff, struct pm_status_row *table,
table_len, false);
}
-static int pm_scnprint_table_lower_keys(char *buff, struct pm_status_row *table,
+static int pm_scnprint_table_lower_keys(char *buff, const struct pm_status_row *table,
u32 *pm_info_regs, size_t buff_size,
int table_len)
{
diff --git a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
index 65bd26b25abc..f93d9cca70ce 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c
@@ -90,10 +90,6 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev)
hw_data->get_arb_info(&info);
- /* Reset arbiter configuration */
- for (i = 0; i < ADF_ARB_NUM; i++)
- WRITE_CSR_ARB_SARCONFIG(csr, arb_off, i, 0);
-
/* Unmap worker threads to service arbiters */
for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, 0);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
index c8241f5a0a26..f20ae7e35a0d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
@@ -473,22 +473,6 @@ unlock_and_exit:
}
DEFINE_SHOW_STORE_ATTRIBUTE(tl_control);
-static int get_rp_index_from_file(const struct file *f, u8 *rp_id, u8 rp_num)
-{
- char alpha;
- u8 index;
- int ret;
-
- ret = sscanf(f->f_path.dentry->d_name.name, ADF_TL_RP_REGS_FNAME, &alpha);
- if (ret != 1)
- return -EINVAL;
-
- index = ADF_TL_DBG_RP_INDEX_ALPHA(alpha);
- *rp_id = index;
-
- return 0;
-}
-
static int adf_tl_dbg_change_rp_index(struct adf_accel_dev *accel_dev,
unsigned int new_rp_num,
unsigned int rp_regs_index)
@@ -611,18 +595,11 @@ static int tl_rp_data_show(struct seq_file *s, void *unused)
{
struct adf_accel_dev *accel_dev = s->private;
u8 rp_regs_index;
- u8 max_rp;
- int ret;
if (!accel_dev)
return -EINVAL;
- max_rp = GET_TL_DATA(accel_dev).max_rp;
- ret = get_rp_index_from_file(s->file, &rp_regs_index, max_rp);
- if (ret) {
- dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n");
- return ret;
- }
+ rp_regs_index = debugfs_get_aux_num(s->file);
return tl_print_rp_data(accel_dev, s, rp_regs_index);
}
@@ -635,7 +612,6 @@ static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf,
struct adf_telemetry *telemetry;
unsigned int new_rp_num;
u8 rp_regs_index;
- u8 max_rp;
int ret;
accel_dev = seq_f->private;
@@ -643,15 +619,10 @@ static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf,
return -EINVAL;
telemetry = accel_dev->telemetry;
- max_rp = GET_TL_DATA(accel_dev).max_rp;
mutex_lock(&telemetry->wr_lock);
- ret = get_rp_index_from_file(file, &rp_regs_index, max_rp);
- if (ret) {
- dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n");
- goto unlock_and_exit;
- }
+ rp_regs_index = debugfs_get_aux_num(file);
ret = kstrtou32_from_user(userbuf, count, 10, &new_rp_num);
if (ret)
@@ -689,7 +660,8 @@ void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev)
for (i = 0; i < max_rp; i++) {
snprintf(name, sizeof(name), ADF_TL_RP_REGS_FNAME,
ADF_TL_DBG_RP_ALPHA_INDEX(i));
- debugfs_create_file(name, 0644, dir, accel_dev, &tl_rp_data_fops);
+ debugfs_create_file_aux_num(name, 0644, dir, accel_dev, i,
+ &tl_rp_data_fops);
}
}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c
index 317cafa9d11f..ef8a9cf74f0c 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
@@ -163,7 +163,7 @@ int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
return -EINVAL;
}
- /* Sets the accelaration engine context mode to either four or eight */
+ /* Sets the acceleration engine context mode to either four or eight */
csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr = IGNORE_W1C_MASK & csr;
new_csr = (mode == 4) ?
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
index 2a50cce41515..3137fc3b5cf6 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
@@ -252,4 +252,4 @@ MODULE_FIRMWARE(ADF_DH895XCC_FW);
MODULE_FIRMWARE(ADF_DH895XCC_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
-MODULE_IMPORT_NS(CRYPTO_QAT);
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
index 7cb015b55122..7cd528ee31e7 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
@@ -226,4 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
-MODULE_IMPORT_NS(CRYPTO_QAT);
+MODULE_IMPORT_NS("CRYPTO_QAT");
diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig
index 78217577aa54..4c25a78ab3ed 100644
--- a/drivers/crypto/marvell/Kconfig
+++ b/drivers/crypto/marvell/Kconfig
@@ -7,7 +7,7 @@ config CRYPTO_DEV_MARVELL
config CRYPTO_DEV_MARVELL_CESA
tristate "Marvell's Cryptographic Engine driver"
- depends on PLAT_ORION || ARCH_MVEBU
+ depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST
select CRYPTO_LIB_AES
select CRYPTO_LIB_DES
select CRYPTO_SKCIPHER
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 5fd31ba715c2..fa08f10e6f3f 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -375,7 +375,6 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
{
struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
struct mv_cesa_engine *engine = &cesa->engines[idx];
- const char *res_name = "sram";
struct resource *res;
engine->pool = of_gen_pool_get(cesa->dev->of_node,
@@ -391,19 +390,7 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
return -ENOMEM;
}
- if (cesa->caps->nengines > 1) {
- if (!idx)
- res_name = "sram0";
- else
- res_name = "sram1";
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- res_name);
- if (!res || resource_size(res) < cesa->sram_size)
- return -EINVAL;
-
- engine->sram = devm_ioremap_resource(cesa->dev, res);
+ engine->sram = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
if (IS_ERR(engine->sram))
return PTR_ERR(engine->sram);
@@ -510,25 +497,21 @@ static int mv_cesa_probe(struct platform_device *pdev)
* if the clock does not exist.
*/
snprintf(res_name, sizeof(res_name), "cesa%u", i);
- engine->clk = devm_clk_get(dev, res_name);
+ engine->clk = devm_clk_get_optional_enabled(dev, res_name);
if (IS_ERR(engine->clk)) {
- engine->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(engine->clk))
- engine->clk = NULL;
+ engine->clk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(engine->clk)) {
+ ret = PTR_ERR(engine->clk);
+ goto err_cleanup;
+ }
}
snprintf(res_name, sizeof(res_name), "cesaz%u", i);
- engine->zclk = devm_clk_get(dev, res_name);
- if (IS_ERR(engine->zclk))
- engine->zclk = NULL;
-
- ret = clk_prepare_enable(engine->clk);
- if (ret)
- goto err_cleanup;
-
- ret = clk_prepare_enable(engine->zclk);
- if (ret)
+ engine->zclk = devm_clk_get_optional_enabled(dev, res_name);
+ if (IS_ERR(engine->zclk)) {
+ ret = PTR_ERR(engine->zclk);
goto err_cleanup;
+ }
engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
@@ -570,13 +553,8 @@ static int mv_cesa_probe(struct platform_device *pdev)
return 0;
err_cleanup:
- for (i = 0; i < caps->nengines; i++) {
- clk_disable_unprepare(cesa->engines[i].zclk);
- clk_disable_unprepare(cesa->engines[i].clk);
+ for (i = 0; i < caps->nengines; i++)
mv_cesa_put_sram(pdev, i);
- if (cesa->engines[i].irq > 0)
- irq_set_affinity_hint(cesa->engines[i].irq, NULL);
- }
return ret;
}
@@ -588,12 +566,8 @@ static void mv_cesa_remove(struct platform_device *pdev)
mv_cesa_remove_algs(cesa);
- for (i = 0; i < cesa->caps->nengines; i++) {
- clk_disable_unprepare(cesa->engines[i].zclk);
- clk_disable_unprepare(cesa->engines[i].clk);
+ for (i = 0; i < cesa->caps->nengines; i++)
mv_cesa_put_sram(pdev, i);
- irq_set_affinity_hint(cesa->engines[i].irq, NULL);
- }
}
static const struct platform_device_id mv_cesa_plat_id_table[] = {
@@ -604,7 +578,7 @@ MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table);
static struct platform_driver marvell_cesa = {
.probe = mv_cesa_probe,
- .remove_new = mv_cesa_remove,
+ .remove = mv_cesa_remove,
.id_table = mv_cesa_plat_id_table,
.driver = {
.name = "marvell-cesa",
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index 0f37dfd42d85..cf62db50f958 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -489,7 +489,7 @@ static int mv_cesa_des_op(struct skcipher_request *req,
static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -500,7 +500,7 @@ static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -543,7 +543,7 @@ static int mv_cesa_cbc_des_op(struct skcipher_request *req,
static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
@@ -552,7 +552,7 @@ static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
@@ -596,7 +596,7 @@ static int mv_cesa_des3_op(struct skcipher_request *req,
static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -608,7 +608,7 @@ static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -649,7 +649,7 @@ static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_CBC |
@@ -661,7 +661,7 @@ static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_CBC |
@@ -725,7 +725,7 @@ static int mv_cesa_aes_op(struct skcipher_request *req,
static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -736,7 +736,7 @@ static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl,
CESA_SA_DESC_CFG_CRYPTCM_ECB |
@@ -778,7 +778,7 @@ static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
@@ -787,7 +787,7 @@ static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
{
- struct mv_cesa_op_ctx tmpl;
+ struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index 8d84ad45571c..f150861ceaf6 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -947,7 +947,7 @@ struct ahash_alg mv_md5_alg = {
.base = {
.cra_name = "md5",
.cra_driver_name = "mv-md5",
- .cra_priority = 300,
+ .cra_priority = 0,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
@@ -1018,7 +1018,7 @@ struct ahash_alg mv_sha1_alg = {
.base = {
.cra_name = "sha1",
.cra_driver_name = "mv-sha1",
- .cra_priority = 300,
+ .cra_priority = 0,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
@@ -1092,7 +1092,7 @@ struct ahash_alg mv_sha256_alg = {
.base = {
.cra_name = "sha256",
.cra_driver_name = "mv-sha256",
- .cra_priority = 300,
+ .cra_priority = 0,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
@@ -1302,7 +1302,7 @@ struct ahash_alg mv_ahmac_md5_alg = {
.base = {
.cra_name = "hmac(md5)",
.cra_driver_name = "mv-hmac-md5",
- .cra_priority = 300,
+ .cra_priority = 0,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
@@ -1373,7 +1373,7 @@ struct ahash_alg mv_ahmac_sha1_alg = {
.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "mv-hmac-sha1",
- .cra_priority = 300,
+ .cra_priority = 0,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
@@ -1444,7 +1444,7 @@ struct ahash_alg mv_ahmac_sha256_alg = {
.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "mv-hmac-sha256",
- .cra_priority = 300,
+ .cra_priority = 0,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
index 6bfc59e67747..5cae8fafa151 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
@@ -73,7 +73,7 @@ int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, "CRYPTO_DEV_OCTEONTX2_CPT");
int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
{
@@ -94,7 +94,7 @@ int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, "CRYPTO_DEV_OCTEONTX2_CPT");
void cn10k_cpt_hw_ctx_clear(struct pci_dev *pdev,
struct cn10k_cpt_errata_ctx *er_ctx)
@@ -110,7 +110,7 @@ void cn10k_cpt_hw_ctx_clear(struct pci_dev *pdev,
DMA_BIDIRECTIONAL);
kfree(er_ctx->hw_ctx);
}
-EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_clear, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_clear, "CRYPTO_DEV_OCTEONTX2_CPT");
void cn10k_cpt_hw_ctx_set(union cn10k_cpt_hw_ctx *hctx, u16 ctx_sz)
{
@@ -119,7 +119,7 @@ void cn10k_cpt_hw_ctx_set(union cn10k_cpt_hw_ctx *hctx, u16 ctx_sz)
hctx->w0.ctx_sz = ctx_sz;
hctx->w0.ctx_push_sz = 1;
}
-EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_set, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_set, "CRYPTO_DEV_OCTEONTX2_CPT");
int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
struct cn10k_cpt_errata_ctx *er_ctx)
@@ -149,7 +149,7 @@ int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_init, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_init, "CRYPTO_DEV_OCTEONTX2_CPT");
void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval)
{
@@ -168,7 +168,7 @@ void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval)
otx2_cpt_read64(lfs->reg_base, lfs->blkaddr, lfs->lf[0].slot,
OTX2_CPT_LF_CTX_ERR);
}
-EXPORT_SYMBOL_NS_GPL(cn10k_cpt_ctx_flush, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_ctx_flush, "CRYPTO_DEV_OCTEONTX2_CPT");
void cptvf_hw_ops_get(struct otx2_cptvf_dev *cptvf)
{
@@ -177,4 +177,4 @@ void cptvf_hw_ops_get(struct otx2_cptvf_dev *cptvf)
else
cptvf->lfs.ops = &otx2_hw_ops;
}
-EXPORT_SYMBOL_NS_GPL(cptvf_hw_ops_get, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(cptvf_hw_ops_get, "CRYPTO_DEV_OCTEONTX2_CPT");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
index 5be0103c1fb8..b8b7c8a3c0ca 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -19,7 +19,7 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
}
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_mbox_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
{
@@ -37,13 +37,13 @@ int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_ready_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_ready_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev)
{
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_af_reg_requests, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_af_reg_requests, "CRYPTO_DEV_OCTEONTX2_CPT");
static int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox,
struct pci_dev *pdev, u64 reg,
@@ -95,7 +95,7 @@ int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_add_write_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_add_write_af_reg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 *val, int blkaddr)
@@ -108,7 +108,7 @@ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_read_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_read_af_reg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 val, int blkaddr)
@@ -121,7 +121,7 @@ int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_write_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_write_af_reg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs)
{
@@ -180,7 +180,7 @@ int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs)
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_detach_rsrcs_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_detach_rsrcs_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
{
@@ -213,7 +213,7 @@ int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
}
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_msix_offset_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_msix_offset_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
{
@@ -228,7 +228,7 @@ int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
return otx2_mbox_check_rsp_msgs(mbox, 0);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_sync_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_sync_mbox_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot)
{
@@ -254,4 +254,4 @@ int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot)
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cpt_lf_reset_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_lf_reset_msg, "CRYPTO_DEV_OCTEONTX2_CPT");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
index b52728e3c0d1..b5d66afcc030 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -288,8 +288,7 @@ void otx2_cptlf_unregister_misc_interrupts(struct otx2_cptlfs_info *lfs)
cptlf_set_misc_intrs(lfs, false);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_misc_interrupts,
- CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_misc_interrupts, "CRYPTO_DEV_OCTEONTX2_CPT");
void otx2_cptlf_unregister_done_interrupts(struct otx2_cptlfs_info *lfs)
{
@@ -308,8 +307,7 @@ void otx2_cptlf_unregister_done_interrupts(struct otx2_cptlfs_info *lfs)
cptlf_set_done_intrs(lfs, false);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_done_interrupts,
- CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_done_interrupts, "CRYPTO_DEV_OCTEONTX2_CPT");
static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
int lf_num, int irq_offset,
@@ -351,8 +349,7 @@ free_irq:
otx2_cptlf_unregister_misc_interrupts(lfs);
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_misc_interrupts,
- CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_misc_interrupts, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cptlf_register_done_interrupts(struct otx2_cptlfs_info *lfs)
{
@@ -375,8 +372,7 @@ free_irq:
otx2_cptlf_unregister_done_interrupts(lfs);
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_done_interrupts,
- CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_done_interrupts, "CRYPTO_DEV_OCTEONTX2_CPT");
void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
{
@@ -390,7 +386,7 @@ void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
free_cpumask_var(lfs->lf[slot].affinity_mask);
}
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_free_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_free_irqs_affinity, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs)
{
@@ -423,7 +419,7 @@ free_affinity_mask:
otx2_cptlf_free_irqs_affinity(lfs);
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_set_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_set_irqs_affinity, "CRYPTO_DEV_OCTEONTX2_CPT");
int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
int lfs_num)
@@ -486,7 +482,7 @@ clear_lfs_num:
lfs->lfs_num = 0;
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, "CRYPTO_DEV_OCTEONTX2_CPT");
void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
{
@@ -498,7 +494,7 @@ void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
otx2_cpt_detach_rsrcs_msg(lfs);
lfs->lfs_num = 0;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, "CRYPTO_DEV_OCTEONTX2_CPT");
MODULE_AUTHOR("Marvell");
MODULE_DESCRIPTION("Marvell RVU CPT Common module");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 400e36d9908f..12971300296d 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -739,18 +739,22 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
dev_err(dev, "Unable to get usable DMA configuration\n");
goto clear_drvdata;
}
- /* Map PF's configuration registers */
- err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
- OTX2_CPT_DRV_NAME);
+ err = pcim_request_all_regions(pdev, OTX2_CPT_DRV_NAME);
if (err) {
- dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
+ dev_err(dev, "Couldn't request PCI resources 0x%x\n", err);
goto clear_drvdata;
}
pci_set_master(pdev);
pci_set_drvdata(pdev, cptpf);
cptpf->pdev = pdev;
- cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
+ /* Map PF's configuration registers */
+ cptpf->reg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
+ if (!cptpf->reg_base) {
+ err = -ENOMEM;
+ dev_err(dev, "Couldn't ioremap PCI resource 0x%x\n", err);
+ goto clear_drvdata;
+ }
/* Check if AF driver is up, otherwise defer probe */
err = cpt_is_pf_usable(cptpf);
@@ -864,7 +868,7 @@ static struct pci_driver otx2_cpt_pci_driver = {
module_pci_driver(otx2_cpt_pci_driver);
-MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
+MODULE_IMPORT_NS("CRYPTO_DEV_OCTEONTX2_CPT");
MODULE_AUTHOR("Marvell");
MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
index 527d34cc258b..d84eebdf2fa8 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -358,9 +358,8 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
dev_err(dev, "Unable to get usable DMA configuration\n");
goto clear_drvdata;
}
- /* Map VF's configuration registers */
- ret = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
- OTX2_CPTVF_DRV_NAME);
+
+ ret = pcim_request_all_regions(pdev, OTX2_CPTVF_DRV_NAME);
if (ret) {
dev_err(dev, "Couldn't get PCI resources 0x%x\n", ret);
goto clear_drvdata;
@@ -369,7 +368,13 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, cptvf);
cptvf->pdev = pdev;
- cptvf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
+ /* Map VF's configuration registers */
+ cptvf->reg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
+ if (!cptvf->reg_base) {
+ ret = -ENOMEM;
+ dev_err(dev, "Couldn't ioremap PCI resource 0x%x\n", ret);
+ goto clear_drvdata;
+ }
otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag);
@@ -448,7 +453,7 @@ static struct pci_driver otx2_cptvf_pci_driver = {
module_pci_driver(otx2_cptvf_pci_driver);
-MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
+MODULE_IMPORT_NS("CRYPTO_DEV_OCTEONTX2_CPT");
MODULE_AUTHOR("Marvell");
MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver");
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index c82775dbb557..d94a26c3541a 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -225,21 +225,22 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
struct skcipher_request *req, int init)
{
- dma_addr_t key_phys = 0;
- dma_addr_t src_phys, dst_phys;
+ dma_addr_t key_phys, src_phys, dst_phys;
struct dcp *sdcp = global_sdcp;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
bool key_referenced = actx->key_referenced;
int ret;
- if (!key_referenced) {
+ if (key_referenced)
+ key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key + AES_KEYSIZE_128,
+ AES_KEYSIZE_128, DMA_TO_DEVICE);
+ else
key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
- ret = dma_mapping_error(sdcp->dev, key_phys);
- if (ret)
- return ret;
- }
+ ret = dma_mapping_error(sdcp->dev, key_phys);
+ if (ret)
+ return ret;
src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
DCP_BUF_SZ, DMA_TO_DEVICE);
@@ -300,7 +301,10 @@ aes_done_run:
err_dst:
dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
err_src:
- if (!key_referenced)
+ if (key_referenced)
+ dma_unmap_single(sdcp->dev, key_phys, AES_KEYSIZE_128,
+ DMA_TO_DEVICE);
+ else
dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
DMA_TO_DEVICE);
return ret;
@@ -1243,7 +1247,7 @@ MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
static struct platform_driver mxs_dcp_driver = {
.probe = mxs_dcp_probe,
- .remove_new = mxs_dcp_remove,
+ .remove = mxs_dcp_remove,
.driver = {
.name = "mxs-dcp",
.of_match_table = mxs_dcp_dt_ids,
diff --git a/drivers/crypto/n2_asm.S b/drivers/crypto/n2_asm.S
deleted file mode 100644
index 9a67dbf340f4..000000000000
--- a/drivers/crypto/n2_asm.S
+++ /dev/null
@@ -1,96 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* n2_asm.S: Hypervisor calls for NCS support.
- *
- * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
- */
-
-#include <linux/linkage.h>
-#include <asm/hypervisor.h>
-#include "n2_core.h"
-
- /* o0: queue type
- * o1: RA of queue
- * o2: num entries in queue
- * o3: address of queue handle return
- */
-ENTRY(sun4v_ncs_qconf)
- mov HV_FAST_NCS_QCONF, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o3]
- retl
- nop
-ENDPROC(sun4v_ncs_qconf)
-
- /* %o0: queue handle
- * %o1: address of queue type return
- * %o2: address of queue base address return
- * %o3: address of queue num entries return
- */
-ENTRY(sun4v_ncs_qinfo)
- mov %o1, %g1
- mov %o2, %g2
- mov %o3, %g3
- mov HV_FAST_NCS_QINFO, %o5
- ta HV_FAST_TRAP
- stx %o1, [%g1]
- stx %o2, [%g2]
- stx %o3, [%g3]
- retl
- nop
-ENDPROC(sun4v_ncs_qinfo)
-
- /* %o0: queue handle
- * %o1: address of head offset return
- */
-ENTRY(sun4v_ncs_gethead)
- mov %o1, %o2
- mov HV_FAST_NCS_GETHEAD, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o2]
- retl
- nop
-ENDPROC(sun4v_ncs_gethead)
-
- /* %o0: queue handle
- * %o1: address of tail offset return
- */
-ENTRY(sun4v_ncs_gettail)
- mov %o1, %o2
- mov HV_FAST_NCS_GETTAIL, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o2]
- retl
- nop
-ENDPROC(sun4v_ncs_gettail)
-
- /* %o0: queue handle
- * %o1: new tail offset
- */
-ENTRY(sun4v_ncs_settail)
- mov HV_FAST_NCS_SETTAIL, %o5
- ta HV_FAST_TRAP
- retl
- nop
-ENDPROC(sun4v_ncs_settail)
-
- /* %o0: queue handle
- * %o1: address of devino return
- */
-ENTRY(sun4v_ncs_qhandle_to_devino)
- mov %o1, %o2
- mov HV_FAST_NCS_QHANDLE_TO_DEVINO, %o5
- ta HV_FAST_TRAP
- stx %o1, [%o2]
- retl
- nop
-ENDPROC(sun4v_ncs_qhandle_to_devino)
-
- /* %o0: queue handle
- * %o1: new head offset
- */
-ENTRY(sun4v_ncs_sethead_marker)
- mov HV_FAST_NCS_SETHEAD_MARKER, %o5
- ta HV_FAST_TRAP
- retl
- nop
-ENDPROC(sun4v_ncs_sethead_marker)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
deleted file mode 100644
index b11545cc5cb7..000000000000
--- a/drivers/crypto/n2_core.c
+++ /dev/null
@@ -1,2168 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
- *
- * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/cpumask.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/crypto.h>
-#include <crypto/md5.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/aes.h>
-#include <crypto/internal/des.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-
-#include <crypto/internal/hash.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/algapi.h>
-
-#include <asm/hypervisor.h>
-#include <asm/mdesc.h>
-
-#include "n2_core.h"
-
-#define DRV_MODULE_NAME "n2_crypto"
-#define DRV_MODULE_VERSION "0.2"
-#define DRV_MODULE_RELDATE "July 28, 2011"
-
-static const char version[] =
- DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-
-MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
-MODULE_DESCRIPTION("Niagara2 Crypto driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
-
-#define N2_CRA_PRIORITY 200
-
-static DEFINE_MUTEX(spu_lock);
-
-struct spu_queue {
- cpumask_t sharing;
- unsigned long qhandle;
-
- spinlock_t lock;
- u8 q_type;
- void *q;
- unsigned long head;
- unsigned long tail;
- struct list_head jobs;
-
- unsigned long devino;
-
- char irq_name[32];
- unsigned int irq;
-
- struct list_head list;
-};
-
-struct spu_qreg {
- struct spu_queue *queue;
- unsigned long type;
-};
-
-static struct spu_queue **cpu_to_cwq;
-static struct spu_queue **cpu_to_mau;
-
-static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
-{
- if (q->q_type == HV_NCS_QTYPE_MAU) {
- off += MAU_ENTRY_SIZE;
- if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
- off = 0;
- } else {
- off += CWQ_ENTRY_SIZE;
- if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
- off = 0;
- }
- return off;
-}
-
-struct n2_request_common {
- struct list_head entry;
- unsigned int offset;
-};
-#define OFFSET_NOT_RUNNING (~(unsigned int)0)
-
-/* An async job request records the final tail value it used in
- * n2_request_common->offset, test to see if that offset is in
- * the range old_head, new_head, inclusive.
- */
-static inline bool job_finished(struct spu_queue *q, unsigned int offset,
- unsigned long old_head, unsigned long new_head)
-{
- if (old_head <= new_head) {
- if (offset > old_head && offset <= new_head)
- return true;
- } else {
- if (offset > old_head || offset <= new_head)
- return true;
- }
- return false;
-}
-
-/* When the HEAD marker is unequal to the actual HEAD, we get
- * a virtual device INO interrupt. We should process the
- * completed CWQ entries and adjust the HEAD marker to clear
- * the IRQ.
- */
-static irqreturn_t cwq_intr(int irq, void *dev_id)
-{
- unsigned long off, new_head, hv_ret;
- struct spu_queue *q = dev_id;
-
- pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
- smp_processor_id(), q->qhandle);
-
- spin_lock(&q->lock);
-
- hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
-
- pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
- smp_processor_id(), new_head, hv_ret);
-
- for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
- /* XXX ... XXX */
- }
-
- hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
- if (hv_ret == HV_EOK)
- q->head = new_head;
-
- spin_unlock(&q->lock);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t mau_intr(int irq, void *dev_id)
-{
- struct spu_queue *q = dev_id;
- unsigned long head, hv_ret;
-
- spin_lock(&q->lock);
-
- pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
- smp_processor_id(), q->qhandle);
-
- hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
-
- pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
- smp_processor_id(), head, hv_ret);
-
- sun4v_ncs_sethead_marker(q->qhandle, head);
-
- spin_unlock(&q->lock);
-
- return IRQ_HANDLED;
-}
-
-static void *spu_queue_next(struct spu_queue *q, void *cur)
-{
- return q->q + spu_next_offset(q, cur - q->q);
-}
-
-static int spu_queue_num_free(struct spu_queue *q)
-{
- unsigned long head = q->head;
- unsigned long tail = q->tail;
- unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
- unsigned long diff;
-
- if (head > tail)
- diff = head - tail;
- else
- diff = (end - tail) + head;
-
- return (diff / CWQ_ENTRY_SIZE) - 1;
-}
-
-static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
-{
- int avail = spu_queue_num_free(q);
-
- if (avail >= num_entries)
- return q->q + q->tail;
-
- return NULL;
-}
-
-static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
-{
- unsigned long hv_ret, new_tail;
-
- new_tail = spu_next_offset(q, last - q->q);
-
- hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
- if (hv_ret == HV_EOK)
- q->tail = new_tail;
- return hv_ret;
-}
-
-static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
- int enc_type, int auth_type,
- unsigned int hash_len,
- bool sfas, bool sob, bool eob, bool encrypt,
- int opcode)
-{
- u64 word = (len - 1) & CONTROL_LEN;
-
- word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
- word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
- word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
- if (sfas)
- word |= CONTROL_STORE_FINAL_AUTH_STATE;
- if (sob)
- word |= CONTROL_START_OF_BLOCK;
- if (eob)
- word |= CONTROL_END_OF_BLOCK;
- if (encrypt)
- word |= CONTROL_ENCRYPT;
- if (hmac_key_len)
- word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
- if (hash_len)
- word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
-
- return word;
-}
-
-#if 0
-static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
-{
- if (this_len >= 64 ||
- qp->head != qp->tail)
- return true;
- return false;
-}
-#endif
-
-struct n2_ahash_alg {
- struct list_head entry;
- const u8 *hash_zero;
- const u8 *hash_init;
- u8 hw_op_hashsz;
- u8 digest_size;
- u8 auth_type;
- u8 hmac_type;
- struct ahash_alg alg;
-};
-
-static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct ahash_alg *ahash_alg;
-
- ahash_alg = container_of(alg, struct ahash_alg, halg.base);
-
- return container_of(ahash_alg, struct n2_ahash_alg, alg);
-}
-
-struct n2_hmac_alg {
- const char *child_alg;
- struct n2_ahash_alg derived;
-};
-
-static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct ahash_alg *ahash_alg;
-
- ahash_alg = container_of(alg, struct ahash_alg, halg.base);
-
- return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
-}
-
-struct n2_hash_ctx {
- struct crypto_ahash *fallback_tfm;
-};
-
-#define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
-
-struct n2_hmac_ctx {
- struct n2_hash_ctx base;
-
- struct crypto_shash *child_shash;
-
- int hash_key_len;
- unsigned char hash_key[N2_HASH_KEY_MAX];
-};
-
-struct n2_hash_req_ctx {
- union {
- struct md5_state md5;
- struct sha1_state sha1;
- struct sha256_state sha256;
- } u;
-
- struct ahash_request fallback_req;
-};
-
-static int n2_hash_async_init(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
- return crypto_ahash_init(&rctx->fallback_req);
-}
-
-static int n2_hash_async_update(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
-
- return crypto_ahash_update(&rctx->fallback_req);
-}
-
-static int n2_hash_async_final(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_final(&rctx->fallback_req);
-}
-
-static int n2_hash_async_finup(struct ahash_request *req)
-{
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_finup(&rctx->fallback_req);
-}
-
-static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
-{
- return -ENOSYS;
-}
-
-static int n2_hash_async_noexport(struct ahash_request *req, void *out)
-{
- return -ENOSYS;
-}
-
-static int n2_hash_cra_init(struct crypto_tfm *tfm)
-{
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct crypto_ahash *fallback_tfm;
- int err;
-
- fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback_tfm)) {
- pr_warn("Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
- }
-
- crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
- crypto_ahash_reqsize(fallback_tfm)));
-
- ctx->fallback_tfm = fallback_tfm;
- return 0;
-
-out:
- return err;
-}
-
-static void n2_hash_cra_exit(struct crypto_tfm *tfm)
-{
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-
- crypto_free_ahash(ctx->fallback_tfm);
-}
-
-static int n2_hmac_cra_init(struct crypto_tfm *tfm)
-{
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
- struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
- struct crypto_ahash *fallback_tfm;
- struct crypto_shash *child_shash;
- int err;
-
- fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback_tfm)) {
- pr_warn("Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
- }
-
- child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
- if (IS_ERR(child_shash)) {
- pr_warn("Child shash '%s' could not be loaded!\n",
- n2alg->child_alg);
- err = PTR_ERR(child_shash);
- goto out_free_fallback;
- }
-
- crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
- crypto_ahash_reqsize(fallback_tfm)));
-
- ctx->child_shash = child_shash;
- ctx->base.fallback_tfm = fallback_tfm;
- return 0;
-
-out_free_fallback:
- crypto_free_ahash(fallback_tfm);
-
-out:
- return err;
-}
-
-static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
-{
- struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
-
- crypto_free_ahash(ctx->base.fallback_tfm);
- crypto_free_shash(ctx->child_shash);
-}
-
-static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
- struct crypto_shash *child_shash = ctx->child_shash;
- struct crypto_ahash *fallback_tfm;
- int err, bs, ds;
-
- fallback_tfm = ctx->base.fallback_tfm;
- err = crypto_ahash_setkey(fallback_tfm, key, keylen);
- if (err)
- return err;
-
- bs = crypto_shash_blocksize(child_shash);
- ds = crypto_shash_digestsize(child_shash);
- BUG_ON(ds > N2_HASH_KEY_MAX);
- if (keylen > bs) {
- err = crypto_shash_tfm_digest(child_shash, key, keylen,
- ctx->hash_key);
- if (err)
- return err;
- keylen = ds;
- } else if (keylen <= N2_HASH_KEY_MAX)
- memcpy(ctx->hash_key, key, keylen);
-
- ctx->hash_key_len = keylen;
-
- return err;
-}
-
-static unsigned long wait_for_tail(struct spu_queue *qp)
-{
- unsigned long head, hv_ret;
-
- do {
- hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
- if (hv_ret != HV_EOK) {
- pr_err("Hypervisor error on gethead\n");
- break;
- }
- if (head == qp->tail) {
- qp->head = head;
- break;
- }
- } while (1);
- return hv_ret;
-}
-
-static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
- struct cwq_initial_entry *ent)
-{
- unsigned long hv_ret = spu_queue_submit(qp, ent);
-
- if (hv_ret == HV_EOK)
- hv_ret = wait_for_tail(qp);
-
- return hv_ret;
-}
-
-static int n2_do_async_digest(struct ahash_request *req,
- unsigned int auth_type, unsigned int digest_size,
- unsigned int result_size, void *hash_loc,
- unsigned long auth_key, unsigned int auth_key_len)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cwq_initial_entry *ent;
- struct crypto_hash_walk walk;
- struct spu_queue *qp;
- unsigned long flags;
- int err = -ENODEV;
- int nbytes, cpu;
-
- /* The total effective length of the operation may not
- * exceed 2^16.
- */
- if (unlikely(req->nbytes > (1 << 16))) {
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_digest(&rctx->fallback_req);
- }
-
- nbytes = crypto_hash_walk_first(req, &walk);
-
- cpu = get_cpu();
- qp = cpu_to_cwq[cpu];
- if (!qp)
- goto out;
-
- spin_lock_irqsave(&qp->lock, flags);
-
- /* XXX can do better, improve this later by doing a by-hand scatterlist
- * XXX walk, etc.
- */
- ent = qp->q + qp->tail;
-
- ent->control = control_word_base(nbytes, auth_key_len, 0,
- auth_type, digest_size,
- false, true, false, false,
- OPCODE_INPLACE_BIT |
- OPCODE_AUTH_MAC);
- ent->src_addr = __pa(walk.data);
- ent->auth_key_addr = auth_key;
- ent->auth_iv_addr = __pa(hash_loc);
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = 0UL;
- ent->enc_iv_addr = 0UL;
- ent->dest_addr = __pa(hash_loc);
-
- nbytes = crypto_hash_walk_done(&walk, 0);
- while (nbytes > 0) {
- ent = spu_queue_next(qp, ent);
-
- ent->control = (nbytes - 1);
- ent->src_addr = __pa(walk.data);
- ent->auth_key_addr = 0UL;
- ent->auth_iv_addr = 0UL;
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = 0UL;
- ent->enc_iv_addr = 0UL;
- ent->dest_addr = 0UL;
-
- nbytes = crypto_hash_walk_done(&walk, 0);
- }
- ent->control |= CONTROL_END_OF_BLOCK;
-
- if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
- err = -EINVAL;
- else
- err = 0;
-
- spin_unlock_irqrestore(&qp->lock, flags);
-
- if (!err)
- memcpy(req->result, hash_loc, result_size);
-out:
- put_cpu();
-
- return err;
-}
-
-static int n2_hash_async_digest(struct ahash_request *req)
-{
- struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- int ds;
-
- ds = n2alg->digest_size;
- if (unlikely(req->nbytes == 0)) {
- memcpy(req->result, n2alg->hash_zero, ds);
- return 0;
- }
- memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
-
- return n2_do_async_digest(req, n2alg->auth_type,
- n2alg->hw_op_hashsz, ds,
- &rctx->u, 0UL, 0);
-}
-
-static int n2_hmac_async_digest(struct ahash_request *req)
-{
- struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
- int ds;
-
- ds = n2alg->derived.digest_size;
- if (unlikely(req->nbytes == 0) ||
- unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
- struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
- struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
- rctx->fallback_req.base.flags =
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
- rctx->fallback_req.nbytes = req->nbytes;
- rctx->fallback_req.src = req->src;
- rctx->fallback_req.result = req->result;
-
- return crypto_ahash_digest(&rctx->fallback_req);
- }
- memcpy(&rctx->u, n2alg->derived.hash_init,
- n2alg->derived.hw_op_hashsz);
-
- return n2_do_async_digest(req, n2alg->derived.hmac_type,
- n2alg->derived.hw_op_hashsz, ds,
- &rctx->u,
- __pa(&ctx->hash_key),
- ctx->hash_key_len);
-}
-
-struct n2_skcipher_context {
- int key_len;
- int enc_type;
- union {
- u8 aes[AES_MAX_KEY_SIZE];
- u8 des[DES_KEY_SIZE];
- u8 des3[3 * DES_KEY_SIZE];
- } key;
-};
-
-#define N2_CHUNK_ARR_LEN 16
-
-struct n2_crypto_chunk {
- struct list_head entry;
- unsigned long iv_paddr : 44;
- unsigned long arr_len : 20;
- unsigned long dest_paddr;
- unsigned long dest_final;
- struct {
- unsigned long src_paddr : 44;
- unsigned long src_len : 20;
- } arr[N2_CHUNK_ARR_LEN];
-};
-
-struct n2_request_context {
- struct skcipher_walk walk;
- struct list_head chunk_list;
- struct n2_crypto_chunk chunk;
- u8 temp_iv[16];
-};
-
-/* The SPU allows some level of flexibility for partial cipher blocks
- * being specified in a descriptor.
- *
- * It merely requires that every descriptor's length field is at least
- * as large as the cipher block size. This means that a cipher block
- * can span at most 2 descriptors. However, this does not allow a
- * partial block to span into the final descriptor as that would
- * violate the rule (since every descriptor's length must be at lest
- * the block size). So, for example, assuming an 8 byte block size:
- *
- * 0xe --> 0xa --> 0x8
- *
- * is a valid length sequence, whereas:
- *
- * 0xe --> 0xb --> 0x7
- *
- * is not a valid sequence.
- */
-
-struct n2_skcipher_alg {
- struct list_head entry;
- u8 enc_type;
- struct skcipher_alg skcipher;
-};
-
-static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
-{
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
-
- return container_of(alg, struct n2_skcipher_alg, skcipher);
-}
-
-static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
-
- ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
-
- switch (keylen) {
- case AES_KEYSIZE_128:
- ctx->enc_type |= ENC_TYPE_ALG_AES128;
- break;
- case AES_KEYSIZE_192:
- ctx->enc_type |= ENC_TYPE_ALG_AES192;
- break;
- case AES_KEYSIZE_256:
- ctx->enc_type |= ENC_TYPE_ALG_AES256;
- break;
- default:
- return -EINVAL;
- }
-
- ctx->key_len = keylen;
- memcpy(ctx->key.aes, key, keylen);
- return 0;
-}
-
-static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
- int err;
-
- err = verify_skcipher_des_key(skcipher, key);
- if (err)
- return err;
-
- ctx->enc_type = n2alg->enc_type;
-
- ctx->key_len = keylen;
- memcpy(ctx->key.des, key, keylen);
- return 0;
-}
-
-static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
- struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
- struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
- int err;
-
- err = verify_skcipher_des3_key(skcipher, key);
- if (err)
- return err;
-
- ctx->enc_type = n2alg->enc_type;
-
- ctx->key_len = keylen;
- memcpy(ctx->key.des3, key, keylen);
- return 0;
-}
-
-static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
-{
- int this_len = nbytes;
-
- this_len -= (nbytes & (block_size - 1));
- return this_len > (1 << 16) ? (1 << 16) : this_len;
-}
-
-static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
- struct n2_crypto_chunk *cp,
- struct spu_queue *qp, bool encrypt)
-{
- struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
- struct cwq_initial_entry *ent;
- bool in_place;
- int i;
-
- ent = spu_queue_alloc(qp, cp->arr_len);
- if (!ent) {
- pr_info("queue_alloc() of %d fails\n",
- cp->arr_len);
- return -EBUSY;
- }
-
- in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
-
- ent->control = control_word_base(cp->arr[0].src_len,
- 0, ctx->enc_type, 0, 0,
- false, true, false, encrypt,
- OPCODE_ENCRYPT |
- (in_place ? OPCODE_INPLACE_BIT : 0));
- ent->src_addr = cp->arr[0].src_paddr;
- ent->auth_key_addr = 0UL;
- ent->auth_iv_addr = 0UL;
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = __pa(&ctx->key);
- ent->enc_iv_addr = cp->iv_paddr;
- ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
-
- for (i = 1; i < cp->arr_len; i++) {
- ent = spu_queue_next(qp, ent);
-
- ent->control = cp->arr[i].src_len - 1;
- ent->src_addr = cp->arr[i].src_paddr;
- ent->auth_key_addr = 0UL;
- ent->auth_iv_addr = 0UL;
- ent->final_auth_state_addr = 0UL;
- ent->enc_key_addr = 0UL;
- ent->enc_iv_addr = 0UL;
- ent->dest_addr = 0UL;
- }
- ent->control |= CONTROL_END_OF_BLOCK;
-
- return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
-}
-
-static int n2_compute_chunks(struct skcipher_request *req)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct skcipher_walk *walk = &rctx->walk;
- struct n2_crypto_chunk *chunk;
- unsigned long dest_prev;
- unsigned int tot_len;
- bool prev_in_place;
- int err, nbytes;
-
- err = skcipher_walk_async(walk, req);
- if (err)
- return err;
-
- INIT_LIST_HEAD(&rctx->chunk_list);
-
- chunk = &rctx->chunk;
- INIT_LIST_HEAD(&chunk->entry);
-
- chunk->iv_paddr = 0UL;
- chunk->arr_len = 0;
- chunk->dest_paddr = 0UL;
-
- prev_in_place = false;
- dest_prev = ~0UL;
- tot_len = 0;
-
- while ((nbytes = walk->nbytes) != 0) {
- unsigned long dest_paddr, src_paddr;
- bool in_place;
- int this_len;
-
- src_paddr = (page_to_phys(walk->src.phys.page) +
- walk->src.phys.offset);
- dest_paddr = (page_to_phys(walk->dst.phys.page) +
- walk->dst.phys.offset);
- in_place = (src_paddr == dest_paddr);
- this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
-
- if (chunk->arr_len != 0) {
- if (in_place != prev_in_place ||
- (!prev_in_place &&
- dest_paddr != dest_prev) ||
- chunk->arr_len == N2_CHUNK_ARR_LEN ||
- tot_len + this_len > (1 << 16)) {
- chunk->dest_final = dest_prev;
- list_add_tail(&chunk->entry,
- &rctx->chunk_list);
- chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
- if (!chunk) {
- err = -ENOMEM;
- break;
- }
- INIT_LIST_HEAD(&chunk->entry);
- }
- }
- if (chunk->arr_len == 0) {
- chunk->dest_paddr = dest_paddr;
- tot_len = 0;
- }
- chunk->arr[chunk->arr_len].src_paddr = src_paddr;
- chunk->arr[chunk->arr_len].src_len = this_len;
- chunk->arr_len++;
-
- dest_prev = dest_paddr + this_len;
- prev_in_place = in_place;
- tot_len += this_len;
-
- err = skcipher_walk_done(walk, nbytes - this_len);
- if (err)
- break;
- }
- if (!err && chunk->arr_len != 0) {
- chunk->dest_final = dest_prev;
- list_add_tail(&chunk->entry, &rctx->chunk_list);
- }
-
- return err;
-}
-
-static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct n2_crypto_chunk *c, *tmp;
-
- if (final_iv)
- memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
-
- list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
-
-}
-
-static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- int err = n2_compute_chunks(req);
- struct n2_crypto_chunk *c, *tmp;
- unsigned long flags, hv_ret;
- struct spu_queue *qp;
-
- if (err)
- return err;
-
- qp = cpu_to_cwq[get_cpu()];
- err = -ENODEV;
- if (!qp)
- goto out;
-
- spin_lock_irqsave(&qp->lock, flags);
-
- list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
- err = __n2_crypt_chunk(tfm, c, qp, encrypt);
- if (err)
- break;
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
- if (!err) {
- hv_ret = wait_for_tail(qp);
- if (hv_ret != HV_EOK)
- err = -EINVAL;
- }
-
- spin_unlock_irqrestore(&qp->lock, flags);
-
-out:
- put_cpu();
-
- n2_chunk_complete(req, NULL);
- return err;
-}
-
-static int n2_encrypt_ecb(struct skcipher_request *req)
-{
- return n2_do_ecb(req, true);
-}
-
-static int n2_decrypt_ecb(struct skcipher_request *req)
-{
- return n2_do_ecb(req, false);
-}
-
-static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
-{
- struct n2_request_context *rctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- unsigned long flags, hv_ret, iv_paddr;
- int err = n2_compute_chunks(req);
- struct n2_crypto_chunk *c, *tmp;
- struct spu_queue *qp;
- void *final_iv_addr;
-
- final_iv_addr = NULL;
-
- if (err)
- return err;
-
- qp = cpu_to_cwq[get_cpu()];
- err = -ENODEV;
- if (!qp)
- goto out;
-
- spin_lock_irqsave(&qp->lock, flags);
-
- if (encrypt) {
- iv_paddr = __pa(rctx->walk.iv);
- list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
- entry) {
- c->iv_paddr = iv_paddr;
- err = __n2_crypt_chunk(tfm, c, qp, true);
- if (err)
- break;
- iv_paddr = c->dest_final - rctx->walk.blocksize;
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
- final_iv_addr = __va(iv_paddr);
- } else {
- list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
- entry) {
- if (c == &rctx->chunk) {
- iv_paddr = __pa(rctx->walk.iv);
- } else {
- iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
- tmp->arr[tmp->arr_len-1].src_len -
- rctx->walk.blocksize);
- }
- if (!final_iv_addr) {
- unsigned long pa;
-
- pa = (c->arr[c->arr_len-1].src_paddr +
- c->arr[c->arr_len-1].src_len -
- rctx->walk.blocksize);
- final_iv_addr = rctx->temp_iv;
- memcpy(rctx->temp_iv, __va(pa),
- rctx->walk.blocksize);
- }
- c->iv_paddr = iv_paddr;
- err = __n2_crypt_chunk(tfm, c, qp, false);
- if (err)
- break;
- list_del(&c->entry);
- if (unlikely(c != &rctx->chunk))
- kfree(c);
- }
- }
- if (!err) {
- hv_ret = wait_for_tail(qp);
- if (hv_ret != HV_EOK)
- err = -EINVAL;
- }
-
- spin_unlock_irqrestore(&qp->lock, flags);
-
-out:
- put_cpu();
-
- n2_chunk_complete(req, err ? NULL : final_iv_addr);
- return err;
-}
-
-static int n2_encrypt_chaining(struct skcipher_request *req)
-{
- return n2_do_chaining(req, true);
-}
-
-static int n2_decrypt_chaining(struct skcipher_request *req)
-{
- return n2_do_chaining(req, false);
-}
-
-struct n2_skcipher_tmpl {
- const char *name;
- const char *drv_name;
- u8 block_size;
- u8 enc_type;
- struct skcipher_alg skcipher;
-};
-
-static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
- /* DES: ECB CBC and CFB are supported */
- { .name = "ecb(des)",
- .drv_name = "ecb-des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_DES |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = n2_des_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
- { .name = "cbc(des)",
- .drv_name = "cbc-des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_DES |
- ENC_TYPE_CHAINING_CBC),
- .skcipher = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = n2_des_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
-
- /* 3DES: ECB CBC and CFB are supported */
- { .name = "ecb(des3_ede)",
- .drv_name = "ecb-3des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_3DES |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = 3 * DES_KEY_SIZE,
- .max_keysize = 3 * DES_KEY_SIZE,
- .setkey = n2_3des_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
- { .name = "cbc(des3_ede)",
- .drv_name = "cbc-3des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_3DES |
- ENC_TYPE_CHAINING_CBC),
- .skcipher = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = 3 * DES_KEY_SIZE,
- .max_keysize = 3 * DES_KEY_SIZE,
- .setkey = n2_3des_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
-
- /* AES: ECB CBC and CTR are supported */
- { .name = "ecb(aes)",
- .drv_name = "ecb-aes",
- .block_size = AES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_AES128 |
- ENC_TYPE_CHAINING_ECB),
- .skcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = n2_aes_setkey,
- .encrypt = n2_encrypt_ecb,
- .decrypt = n2_decrypt_ecb,
- },
- },
- { .name = "cbc(aes)",
- .drv_name = "cbc-aes",
- .block_size = AES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_AES128 |
- ENC_TYPE_CHAINING_CBC),
- .skcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = n2_aes_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
- { .name = "ctr(aes)",
- .drv_name = "ctr-aes",
- .block_size = AES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_AES128 |
- ENC_TYPE_CHAINING_COUNTER),
- .skcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = n2_aes_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_encrypt_chaining,
- },
- },
-
-};
-#define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
-
-static LIST_HEAD(skcipher_algs);
-
-struct n2_hash_tmpl {
- const char *name;
- const u8 *hash_zero;
- const u8 *hash_init;
- u8 hw_op_hashsz;
- u8 digest_size;
- u8 statesize;
- u8 block_size;
- u8 auth_type;
- u8 hmac_type;
-};
-
-static const __le32 n2_md5_init[MD5_HASH_WORDS] = {
- cpu_to_le32(MD5_H0),
- cpu_to_le32(MD5_H1),
- cpu_to_le32(MD5_H2),
- cpu_to_le32(MD5_H3),
-};
-static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = {
- SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
-};
-static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = {
- SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
- SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
-};
-static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
- SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
- SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
-};
-
-static const struct n2_hash_tmpl hash_tmpls[] = {
- { .name = "md5",
- .hash_zero = md5_zero_message_hash,
- .hash_init = (u8 *)n2_md5_init,
- .auth_type = AUTH_TYPE_MD5,
- .hmac_type = AUTH_TYPE_HMAC_MD5,
- .hw_op_hashsz = MD5_DIGEST_SIZE,
- .digest_size = MD5_DIGEST_SIZE,
- .statesize = sizeof(struct md5_state),
- .block_size = MD5_HMAC_BLOCK_SIZE },
- { .name = "sha1",
- .hash_zero = sha1_zero_message_hash,
- .hash_init = (u8 *)n2_sha1_init,
- .auth_type = AUTH_TYPE_SHA1,
- .hmac_type = AUTH_TYPE_HMAC_SHA1,
- .hw_op_hashsz = SHA1_DIGEST_SIZE,
- .digest_size = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct sha1_state),
- .block_size = SHA1_BLOCK_SIZE },
- { .name = "sha256",
- .hash_zero = sha256_zero_message_hash,
- .hash_init = (u8 *)n2_sha256_init,
- .auth_type = AUTH_TYPE_SHA256,
- .hmac_type = AUTH_TYPE_HMAC_SHA256,
- .hw_op_hashsz = SHA256_DIGEST_SIZE,
- .digest_size = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_state),
- .block_size = SHA256_BLOCK_SIZE },
- { .name = "sha224",
- .hash_zero = sha224_zero_message_hash,
- .hash_init = (u8 *)n2_sha224_init,
- .auth_type = AUTH_TYPE_SHA256,
- .hmac_type = AUTH_TYPE_RESERVED,
- .hw_op_hashsz = SHA256_DIGEST_SIZE,
- .digest_size = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_state),
- .block_size = SHA224_BLOCK_SIZE },
-};
-#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
-
-static LIST_HEAD(ahash_algs);
-static LIST_HEAD(hmac_algs);
-
-static int algs_registered;
-
-static void __n2_unregister_algs(void)
-{
- struct n2_skcipher_alg *skcipher, *skcipher_tmp;
- struct n2_ahash_alg *alg, *alg_tmp;
- struct n2_hmac_alg *hmac, *hmac_tmp;
-
- list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
- crypto_unregister_skcipher(&skcipher->skcipher);
- list_del(&skcipher->entry);
- kfree(skcipher);
- }
- list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
- crypto_unregister_ahash(&hmac->derived.alg);
- list_del(&hmac->derived.entry);
- kfree(hmac);
- }
- list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
- crypto_unregister_ahash(&alg->alg);
- list_del(&alg->entry);
- kfree(alg);
- }
-}
-
-static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
-{
- crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
- return 0;
-}
-
-static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
-{
- struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct skcipher_alg *alg;
- int err;
-
- if (!p)
- return -ENOMEM;
-
- alg = &p->skcipher;
- *alg = tmpl->skcipher;
-
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
- alg->base.cra_priority = N2_CRA_PRIORITY;
- alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY;
- alg->base.cra_blocksize = tmpl->block_size;
- p->enc_type = tmpl->enc_type;
- alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
- alg->base.cra_module = THIS_MODULE;
- alg->init = n2_skcipher_init_tfm;
-
- list_add(&p->entry, &skcipher_algs);
- err = crypto_register_skcipher(alg);
- if (err) {
- pr_err("%s alg registration failed\n", alg->base.cra_name);
- list_del(&p->entry);
- kfree(p);
- } else {
- pr_info("%s alg registered\n", alg->base.cra_name);
- }
- return err;
-}
-
-static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
-{
- struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct ahash_alg *ahash;
- struct crypto_alg *base;
- int err;
-
- if (!p)
- return -ENOMEM;
-
- p->child_alg = n2ahash->alg.halg.base.cra_name;
- memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
- INIT_LIST_HEAD(&p->derived.entry);
-
- ahash = &p->derived.alg;
- ahash->digest = n2_hmac_async_digest;
- ahash->setkey = n2_hmac_async_setkey;
-
- base = &ahash->halg.base;
- err = -EINVAL;
- if (snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
- p->child_alg) >= CRYPTO_MAX_ALG_NAME)
- goto out_free_p;
- if (snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2",
- p->child_alg) >= CRYPTO_MAX_ALG_NAME)
- goto out_free_p;
-
- base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
- base->cra_init = n2_hmac_cra_init;
- base->cra_exit = n2_hmac_cra_exit;
-
- list_add(&p->derived.entry, &hmac_algs);
- err = crypto_register_ahash(ahash);
- if (err) {
- pr_err("%s alg registration failed\n", base->cra_name);
- list_del(&p->derived.entry);
-out_free_p:
- kfree(p);
- } else {
- pr_info("%s alg registered\n", base->cra_name);
- }
- return err;
-}
-
-static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
-{
- struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
- struct hash_alg_common *halg;
- struct crypto_alg *base;
- struct ahash_alg *ahash;
- int err;
-
- if (!p)
- return -ENOMEM;
-
- p->hash_zero = tmpl->hash_zero;
- p->hash_init = tmpl->hash_init;
- p->auth_type = tmpl->auth_type;
- p->hmac_type = tmpl->hmac_type;
- p->hw_op_hashsz = tmpl->hw_op_hashsz;
- p->digest_size = tmpl->digest_size;
-
- ahash = &p->alg;
- ahash->init = n2_hash_async_init;
- ahash->update = n2_hash_async_update;
- ahash->final = n2_hash_async_final;
- ahash->finup = n2_hash_async_finup;
- ahash->digest = n2_hash_async_digest;
- ahash->export = n2_hash_async_noexport;
- ahash->import = n2_hash_async_noimport;
-
- halg = &ahash->halg;
- halg->digestsize = tmpl->digest_size;
- halg->statesize = tmpl->statesize;
-
- base = &halg->base;
- snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
- base->cra_priority = N2_CRA_PRIORITY;
- base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK;
- base->cra_blocksize = tmpl->block_size;
- base->cra_ctxsize = sizeof(struct n2_hash_ctx);
- base->cra_module = THIS_MODULE;
- base->cra_init = n2_hash_cra_init;
- base->cra_exit = n2_hash_cra_exit;
-
- list_add(&p->entry, &ahash_algs);
- err = crypto_register_ahash(ahash);
- if (err) {
- pr_err("%s alg registration failed\n", base->cra_name);
- list_del(&p->entry);
- kfree(p);
- } else {
- pr_info("%s alg registered\n", base->cra_name);
- }
- if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
- err = __n2_register_one_hmac(p);
- return err;
-}
-
-static int n2_register_algs(void)
-{
- int i, err = 0;
-
- mutex_lock(&spu_lock);
- if (algs_registered++)
- goto out;
-
- for (i = 0; i < NUM_HASH_TMPLS; i++) {
- err = __n2_register_one_ahash(&hash_tmpls[i]);
- if (err) {
- __n2_unregister_algs();
- goto out;
- }
- }
- for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
- err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
- if (err) {
- __n2_unregister_algs();
- goto out;
- }
- }
-
-out:
- mutex_unlock(&spu_lock);
- return err;
-}
-
-static void n2_unregister_algs(void)
-{
- mutex_lock(&spu_lock);
- if (!--algs_registered)
- __n2_unregister_algs();
- mutex_unlock(&spu_lock);
-}
-
-/* To map CWQ queues to interrupt sources, the hypervisor API provides
- * a devino. This isn't very useful to us because all of the
- * interrupts listed in the device_node have been translated to
- * Linux virtual IRQ cookie numbers.
- *
- * So we have to back-translate, going through the 'intr' and 'ino'
- * property tables of the n2cp MDESC node, matching it with the OF
- * 'interrupts' property entries, in order to figure out which
- * devino goes to which already-translated IRQ.
- */
-static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
- unsigned long dev_ino)
-{
- const unsigned int *dev_intrs;
- unsigned int intr;
- int i;
-
- for (i = 0; i < ip->num_intrs; i++) {
- if (ip->ino_table[i].ino == dev_ino)
- break;
- }
- if (i == ip->num_intrs)
- return -ENODEV;
-
- intr = ip->ino_table[i].intr;
-
- dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
- if (!dev_intrs)
- return -ENODEV;
-
- for (i = 0; i < dev->archdata.num_irqs; i++) {
- if (dev_intrs[i] == intr)
- return i;
- }
-
- return -ENODEV;
-}
-
-static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
- const char *irq_name, struct spu_queue *p,
- irq_handler_t handler)
-{
- unsigned long herr;
- int index;
-
- herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
- if (herr)
- return -EINVAL;
-
- index = find_devino_index(dev, ip, p->devino);
- if (index < 0)
- return index;
-
- p->irq = dev->archdata.irqs[index];
-
- sprintf(p->irq_name, "%s-%d", irq_name, index);
-
- return request_irq(p->irq, handler, 0, p->irq_name, p);
-}
-
-static struct kmem_cache *queue_cache[2];
-
-static void *new_queue(unsigned long q_type)
-{
- return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
-}
-
-static void free_queue(void *p, unsigned long q_type)
-{
- kmem_cache_free(queue_cache[q_type - 1], p);
-}
-
-static int queue_cache_init(void)
-{
- if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
- queue_cache[HV_NCS_QTYPE_MAU - 1] =
- kmem_cache_create("mau_queue",
- (MAU_NUM_ENTRIES *
- MAU_ENTRY_SIZE),
- MAU_ENTRY_SIZE, 0, NULL);
- if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
- return -ENOMEM;
-
- if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
- queue_cache[HV_NCS_QTYPE_CWQ - 1] =
- kmem_cache_create("cwq_queue",
- (CWQ_NUM_ENTRIES *
- CWQ_ENTRY_SIZE),
- CWQ_ENTRY_SIZE, 0, NULL);
- if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
- kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
- queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
- return -ENOMEM;
- }
- return 0;
-}
-
-static void queue_cache_destroy(void)
-{
- kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
- kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
- queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
- queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
-}
-
-static long spu_queue_register_workfn(void *arg)
-{
- struct spu_qreg *qr = arg;
- struct spu_queue *p = qr->queue;
- unsigned long q_type = qr->type;
- unsigned long hv_ret;
-
- hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
- CWQ_NUM_ENTRIES, &p->qhandle);
- if (!hv_ret)
- sun4v_ncs_sethead_marker(p->qhandle, 0);
-
- return hv_ret ? -EINVAL : 0;
-}
-
-static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
-{
- int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
- struct spu_qreg qr = { .queue = p, .type = q_type };
-
- return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
-}
-
-static int spu_queue_setup(struct spu_queue *p)
-{
- int err;
-
- p->q = new_queue(p->q_type);
- if (!p->q)
- return -ENOMEM;
-
- err = spu_queue_register(p, p->q_type);
- if (err) {
- free_queue(p->q, p->q_type);
- p->q = NULL;
- }
-
- return err;
-}
-
-static void spu_queue_destroy(struct spu_queue *p)
-{
- unsigned long hv_ret;
-
- if (!p->q)
- return;
-
- hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
-
- if (!hv_ret)
- free_queue(p->q, p->q_type);
-}
-
-static void spu_list_destroy(struct list_head *list)
-{
- struct spu_queue *p, *n;
-
- list_for_each_entry_safe(p, n, list, list) {
- int i;
-
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_to_cwq[i] == p)
- cpu_to_cwq[i] = NULL;
- }
-
- if (p->irq) {
- free_irq(p->irq, p);
- p->irq = 0;
- }
- spu_queue_destroy(p);
- list_del(&p->list);
- kfree(p);
- }
-}
-
-/* Walk the backward arcs of a CWQ 'exec-unit' node,
- * gathering cpu membership information.
- */
-static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
- struct platform_device *dev,
- u64 node, struct spu_queue *p,
- struct spu_queue **table)
-{
- u64 arc;
-
- mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
- u64 tgt = mdesc_arc_target(mdesc, arc);
- const char *name = mdesc_node_name(mdesc, tgt);
- const u64 *id;
-
- if (strcmp(name, "cpu"))
- continue;
- id = mdesc_get_property(mdesc, tgt, "id", NULL);
- if (table[*id] != NULL) {
- dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
- dev->dev.of_node);
- return -EINVAL;
- }
- cpumask_set_cpu(*id, &p->sharing);
- table[*id] = p;
- }
- return 0;
-}
-
-/* Process an 'exec-unit' MDESC node of type 'cwq'. */
-static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
- struct platform_device *dev, struct mdesc_handle *mdesc,
- u64 node, const char *iname, unsigned long q_type,
- irq_handler_t handler, struct spu_queue **table)
-{
- struct spu_queue *p;
- int err;
-
- p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
- if (!p) {
- dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
- dev->dev.of_node);
- return -ENOMEM;
- }
-
- cpumask_clear(&p->sharing);
- spin_lock_init(&p->lock);
- p->q_type = q_type;
- INIT_LIST_HEAD(&p->jobs);
- list_add(&p->list, list);
-
- err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
- if (err)
- return err;
-
- err = spu_queue_setup(p);
- if (err)
- return err;
-
- return spu_map_ino(dev, ip, iname, p, handler);
-}
-
-static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
- struct spu_mdesc_info *ip, struct list_head *list,
- const char *exec_name, unsigned long q_type,
- irq_handler_t handler, struct spu_queue **table)
-{
- int err = 0;
- u64 node;
-
- mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
- const char *type;
-
- type = mdesc_get_property(mdesc, node, "type", NULL);
- if (!type || strcmp(type, exec_name))
- continue;
-
- err = handle_exec_unit(ip, list, dev, mdesc, node,
- exec_name, q_type, handler, table);
- if (err) {
- spu_list_destroy(list);
- break;
- }
- }
-
- return err;
-}
-
-static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
- struct spu_mdesc_info *ip)
-{
- const u64 *ino;
- int ino_len;
- int i;
-
- ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
- if (!ino) {
- printk("NO 'ino'\n");
- return -ENODEV;
- }
-
- ip->num_intrs = ino_len / sizeof(u64);
- ip->ino_table = kzalloc((sizeof(struct ino_blob) *
- ip->num_intrs),
- GFP_KERNEL);
- if (!ip->ino_table)
- return -ENOMEM;
-
- for (i = 0; i < ip->num_intrs; i++) {
- struct ino_blob *b = &ip->ino_table[i];
- b->intr = i + 1;
- b->ino = ino[i];
- }
-
- return 0;
-}
-
-static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
- struct platform_device *dev,
- struct spu_mdesc_info *ip,
- const char *node_name)
-{
- u64 node, reg;
-
- if (of_property_read_reg(dev->dev.of_node, 0, &reg, NULL) < 0)
- return -ENODEV;
-
- mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
- const char *name;
- const u64 *chdl;
-
- name = mdesc_get_property(mdesc, node, "name", NULL);
- if (!name || strcmp(name, node_name))
- continue;
- chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
- if (!chdl || (*chdl != reg))
- continue;
- ip->cfg_handle = *chdl;
- return get_irq_props(mdesc, node, ip);
- }
-
- return -ENODEV;
-}
-
-static unsigned long n2_spu_hvapi_major;
-static unsigned long n2_spu_hvapi_minor;
-
-static int n2_spu_hvapi_register(void)
-{
- int err;
-
- n2_spu_hvapi_major = 2;
- n2_spu_hvapi_minor = 0;
-
- err = sun4v_hvapi_register(HV_GRP_NCS,
- n2_spu_hvapi_major,
- &n2_spu_hvapi_minor);
-
- if (!err)
- pr_info("Registered NCS HVAPI version %lu.%lu\n",
- n2_spu_hvapi_major,
- n2_spu_hvapi_minor);
-
- return err;
-}
-
-static void n2_spu_hvapi_unregister(void)
-{
- sun4v_hvapi_unregister(HV_GRP_NCS);
-}
-
-static int global_ref;
-
-static int grab_global_resources(void)
-{
- int err = 0;
-
- mutex_lock(&spu_lock);
-
- if (global_ref++)
- goto out;
-
- err = n2_spu_hvapi_register();
- if (err)
- goto out;
-
- err = queue_cache_init();
- if (err)
- goto out_hvapi_release;
-
- err = -ENOMEM;
- cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
- GFP_KERNEL);
- if (!cpu_to_cwq)
- goto out_queue_cache_destroy;
-
- cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
- GFP_KERNEL);
- if (!cpu_to_mau)
- goto out_free_cwq_table;
-
- err = 0;
-
-out:
- if (err)
- global_ref--;
- mutex_unlock(&spu_lock);
- return err;
-
-out_free_cwq_table:
- kfree(cpu_to_cwq);
- cpu_to_cwq = NULL;
-
-out_queue_cache_destroy:
- queue_cache_destroy();
-
-out_hvapi_release:
- n2_spu_hvapi_unregister();
- goto out;
-}
-
-static void release_global_resources(void)
-{
- mutex_lock(&spu_lock);
- if (!--global_ref) {
- kfree(cpu_to_cwq);
- cpu_to_cwq = NULL;
-
- kfree(cpu_to_mau);
- cpu_to_mau = NULL;
-
- queue_cache_destroy();
- n2_spu_hvapi_unregister();
- }
- mutex_unlock(&spu_lock);
-}
-
-static struct n2_crypto *alloc_n2cp(void)
-{
- struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
-
- if (np)
- INIT_LIST_HEAD(&np->cwq_list);
-
- return np;
-}
-
-static void free_n2cp(struct n2_crypto *np)
-{
- kfree(np->cwq_info.ino_table);
- np->cwq_info.ino_table = NULL;
-
- kfree(np);
-}
-
-static void n2_spu_driver_version(void)
-{
- static int n2_spu_version_printed;
-
- if (n2_spu_version_printed++ == 0)
- pr_info("%s", version);
-}
-
-static int n2_crypto_probe(struct platform_device *dev)
-{
- struct mdesc_handle *mdesc;
- struct n2_crypto *np;
- int err;
-
- n2_spu_driver_version();
-
- pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
-
- np = alloc_n2cp();
- if (!np) {
- dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
- dev->dev.of_node);
- return -ENOMEM;
- }
-
- err = grab_global_resources();
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
- dev->dev.of_node);
- goto out_free_n2cp;
- }
-
- mdesc = mdesc_grab();
-
- if (!mdesc) {
- dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
- dev->dev.of_node);
- err = -ENODEV;
- goto out_free_global;
- }
- err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
- dev->dev.of_node);
- mdesc_release(mdesc);
- goto out_free_global;
- }
-
- err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
- "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
- cpu_to_cwq);
- mdesc_release(mdesc);
-
- if (err) {
- dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
- dev->dev.of_node);
- goto out_free_global;
- }
-
- err = n2_register_algs();
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
- dev->dev.of_node);
- goto out_free_spu_list;
- }
-
- dev_set_drvdata(&dev->dev, np);
-
- return 0;
-
-out_free_spu_list:
- spu_list_destroy(&np->cwq_list);
-
-out_free_global:
- release_global_resources();
-
-out_free_n2cp:
- free_n2cp(np);
-
- return err;
-}
-
-static void n2_crypto_remove(struct platform_device *dev)
-{
- struct n2_crypto *np = dev_get_drvdata(&dev->dev);
-
- n2_unregister_algs();
-
- spu_list_destroy(&np->cwq_list);
-
- release_global_resources();
-
- free_n2cp(np);
-}
-
-static struct n2_mau *alloc_ncp(void)
-{
- struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
-
- if (mp)
- INIT_LIST_HEAD(&mp->mau_list);
-
- return mp;
-}
-
-static void free_ncp(struct n2_mau *mp)
-{
- kfree(mp->mau_info.ino_table);
- mp->mau_info.ino_table = NULL;
-
- kfree(mp);
-}
-
-static int n2_mau_probe(struct platform_device *dev)
-{
- struct mdesc_handle *mdesc;
- struct n2_mau *mp;
- int err;
-
- n2_spu_driver_version();
-
- pr_info("Found NCP at %pOF\n", dev->dev.of_node);
-
- mp = alloc_ncp();
- if (!mp) {
- dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
- dev->dev.of_node);
- return -ENOMEM;
- }
-
- err = grab_global_resources();
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
- dev->dev.of_node);
- goto out_free_ncp;
- }
-
- mdesc = mdesc_grab();
-
- if (!mdesc) {
- dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
- dev->dev.of_node);
- err = -ENODEV;
- goto out_free_global;
- }
-
- err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
- if (err) {
- dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
- dev->dev.of_node);
- mdesc_release(mdesc);
- goto out_free_global;
- }
-
- err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
- "mau", HV_NCS_QTYPE_MAU, mau_intr,
- cpu_to_mau);
- mdesc_release(mdesc);
-
- if (err) {
- dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
- dev->dev.of_node);
- goto out_free_global;
- }
-
- dev_set_drvdata(&dev->dev, mp);
-
- return 0;
-
-out_free_global:
- release_global_resources();
-
-out_free_ncp:
- free_ncp(mp);
-
- return err;
-}
-
-static void n2_mau_remove(struct platform_device *dev)
-{
- struct n2_mau *mp = dev_get_drvdata(&dev->dev);
-
- spu_list_destroy(&mp->mau_list);
-
- release_global_resources();
-
- free_ncp(mp);
-}
-
-static const struct of_device_id n2_crypto_match[] = {
- {
- .name = "n2cp",
- .compatible = "SUNW,n2-cwq",
- },
- {
- .name = "n2cp",
- .compatible = "SUNW,vf-cwq",
- },
- {
- .name = "n2cp",
- .compatible = "SUNW,kt-cwq",
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, n2_crypto_match);
-
-static struct platform_driver n2_crypto_driver = {
- .driver = {
- .name = "n2cp",
- .of_match_table = n2_crypto_match,
- },
- .probe = n2_crypto_probe,
- .remove_new = n2_crypto_remove,
-};
-
-static const struct of_device_id n2_mau_match[] = {
- {
- .name = "ncp",
- .compatible = "SUNW,n2-mau",
- },
- {
- .name = "ncp",
- .compatible = "SUNW,vf-mau",
- },
- {
- .name = "ncp",
- .compatible = "SUNW,kt-mau",
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, n2_mau_match);
-
-static struct platform_driver n2_mau_driver = {
- .driver = {
- .name = "ncp",
- .of_match_table = n2_mau_match,
- },
- .probe = n2_mau_probe,
- .remove_new = n2_mau_remove,
-};
-
-static struct platform_driver * const drivers[] = {
- &n2_crypto_driver,
- &n2_mau_driver,
-};
-
-static int __init n2_init(void)
-{
- return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
-}
-
-static void __exit n2_exit(void)
-{
- platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
-}
-
-module_init(n2_init);
-module_exit(n2_exit);
diff --git a/drivers/crypto/n2_core.h b/drivers/crypto/n2_core.h
deleted file mode 100644
index 2406763b0306..000000000000
--- a/drivers/crypto/n2_core.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _N2_CORE_H
-#define _N2_CORE_H
-
-#ifndef __ASSEMBLY__
-
-struct ino_blob {
- u64 intr;
- u64 ino;
-};
-
-struct spu_mdesc_info {
- u64 cfg_handle;
- struct ino_blob *ino_table;
- int num_intrs;
-};
-
-struct n2_crypto {
- struct spu_mdesc_info cwq_info;
- struct list_head cwq_list;
-};
-
-struct n2_mau {
- struct spu_mdesc_info mau_info;
- struct list_head mau_list;
-};
-
-#define CWQ_ENTRY_SIZE 64
-#define CWQ_NUM_ENTRIES 64
-
-#define MAU_ENTRY_SIZE 64
-#define MAU_NUM_ENTRIES 64
-
-struct cwq_initial_entry {
- u64 control;
- u64 src_addr;
- u64 auth_key_addr;
- u64 auth_iv_addr;
- u64 final_auth_state_addr;
- u64 enc_key_addr;
- u64 enc_iv_addr;
- u64 dest_addr;
-};
-
-struct cwq_ext_entry {
- u64 len;
- u64 src_addr;
- u64 resv1;
- u64 resv2;
- u64 resv3;
- u64 resv4;
- u64 resv5;
- u64 resv6;
-};
-
-struct cwq_final_entry {
- u64 control;
- u64 src_addr;
- u64 resv1;
- u64 resv2;
- u64 resv3;
- u64 resv4;
- u64 resv5;
- u64 resv6;
-};
-
-#define CONTROL_LEN 0x000000000000ffffULL
-#define CONTROL_LEN_SHIFT 0
-#define CONTROL_HMAC_KEY_LEN 0x0000000000ff0000ULL
-#define CONTROL_HMAC_KEY_LEN_SHIFT 16
-#define CONTROL_ENC_TYPE 0x00000000ff000000ULL
-#define CONTROL_ENC_TYPE_SHIFT 24
-#define ENC_TYPE_ALG_RC4_STREAM 0x00ULL
-#define ENC_TYPE_ALG_RC4_NOSTREAM 0x04ULL
-#define ENC_TYPE_ALG_DES 0x08ULL
-#define ENC_TYPE_ALG_3DES 0x0cULL
-#define ENC_TYPE_ALG_AES128 0x10ULL
-#define ENC_TYPE_ALG_AES192 0x14ULL
-#define ENC_TYPE_ALG_AES256 0x18ULL
-#define ENC_TYPE_ALG_RESERVED 0x1cULL
-#define ENC_TYPE_ALG_MASK 0x1cULL
-#define ENC_TYPE_CHAINING_ECB 0x00ULL
-#define ENC_TYPE_CHAINING_CBC 0x01ULL
-#define ENC_TYPE_CHAINING_CFB 0x02ULL
-#define ENC_TYPE_CHAINING_COUNTER 0x03ULL
-#define ENC_TYPE_CHAINING_MASK 0x03ULL
-#define CONTROL_AUTH_TYPE 0x0000001f00000000ULL
-#define CONTROL_AUTH_TYPE_SHIFT 32
-#define AUTH_TYPE_RESERVED 0x00ULL
-#define AUTH_TYPE_MD5 0x01ULL
-#define AUTH_TYPE_SHA1 0x02ULL
-#define AUTH_TYPE_SHA256 0x03ULL
-#define AUTH_TYPE_CRC32 0x04ULL
-#define AUTH_TYPE_HMAC_MD5 0x05ULL
-#define AUTH_TYPE_HMAC_SHA1 0x06ULL
-#define AUTH_TYPE_HMAC_SHA256 0x07ULL
-#define AUTH_TYPE_TCP_CHECKSUM 0x08ULL
-#define AUTH_TYPE_SSL_HMAC_MD5 0x09ULL
-#define AUTH_TYPE_SSL_HMAC_SHA1 0x0aULL
-#define AUTH_TYPE_SSL_HMAC_SHA256 0x0bULL
-#define CONTROL_STRAND 0x000000e000000000ULL
-#define CONTROL_STRAND_SHIFT 37
-#define CONTROL_HASH_LEN 0x0000ff0000000000ULL
-#define CONTROL_HASH_LEN_SHIFT 40
-#define CONTROL_INTERRUPT 0x0001000000000000ULL
-#define CONTROL_STORE_FINAL_AUTH_STATE 0x0002000000000000ULL
-#define CONTROL_RESERVED 0x001c000000000000ULL
-#define CONTROL_HV_DONE 0x0004000000000000ULL
-#define CONTROL_HV_PROTOCOL_ERROR 0x0008000000000000ULL
-#define CONTROL_HV_HARDWARE_ERROR 0x0010000000000000ULL
-#define CONTROL_END_OF_BLOCK 0x0020000000000000ULL
-#define CONTROL_START_OF_BLOCK 0x0040000000000000ULL
-#define CONTROL_ENCRYPT 0x0080000000000000ULL
-#define CONTROL_OPCODE 0xff00000000000000ULL
-#define CONTROL_OPCODE_SHIFT 56
-#define OPCODE_INPLACE_BIT 0x80ULL
-#define OPCODE_SSL_KEYBLOCK 0x10ULL
-#define OPCODE_COPY 0x20ULL
-#define OPCODE_ENCRYPT 0x40ULL
-#define OPCODE_AUTH_MAC 0x41ULL
-
-#endif /* !(__ASSEMBLY__) */
-
-/* NCS v2.0 hypervisor interfaces */
-#define HV_NCS_QTYPE_MAU 0x01
-#define HV_NCS_QTYPE_CWQ 0x02
-
-/* ncs_qconf()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_QCONF
- * ARG0: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
- * ARG1: Real address of queue, or handle for unconfigure
- * ARG2: Number of entries in queue, zero for unconfigure
- * RET0: status
- * RET1: queue handle
- *
- * Configure a queue in the stream processing unit.
- *
- * The real address given as the base must be 64-byte
- * aligned.
- *
- * The queue size can range from a minimum of 2 to a maximum
- * of 64. The queue size must be a power of two.
- *
- * To unconfigure a queue, specify a length of zero and place
- * the queue handle into ARG1.
- *
- * On configure success the hypervisor will set the FIRST, HEAD,
- * and TAIL registers to the address of the first entry in the
- * queue. The LAST register will be set to point to the last
- * entry in the queue.
- */
-#define HV_FAST_NCS_QCONF 0x111
-
-/* ncs_qinfo()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_QINFO
- * ARG0: Queue handle
- * RET0: status
- * RET1: Queue type (HV_NCS_QTYPE_{MAU,CWQ})
- * RET2: Queue base address
- * RET3: Number of entries
- */
-#define HV_FAST_NCS_QINFO 0x112
-
-/* ncs_gethead()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_GETHEAD
- * ARG0: Queue handle
- * RET0: status
- * RET1: queue head offset
- */
-#define HV_FAST_NCS_GETHEAD 0x113
-
-/* ncs_gettail()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_GETTAIL
- * ARG0: Queue handle
- * RET0: status
- * RET1: queue tail offset
- */
-#define HV_FAST_NCS_GETTAIL 0x114
-
-/* ncs_settail()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_SETTAIL
- * ARG0: Queue handle
- * ARG1: New tail offset
- * RET0: status
- */
-#define HV_FAST_NCS_SETTAIL 0x115
-
-/* ncs_qhandle_to_devino()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_QHANDLE_TO_DEVINO
- * ARG0: Queue handle
- * RET0: status
- * RET1: devino
- */
-#define HV_FAST_NCS_QHANDLE_TO_DEVINO 0x116
-
-/* ncs_sethead_marker()
- * TRAP: HV_FAST_TRAP
- * FUNCTION: HV_FAST_NCS_SETHEAD_MARKER
- * ARG0: Queue handle
- * ARG1: New head offset
- * RET0: status
- */
-#define HV_FAST_NCS_SETHEAD_MARKER 0x117
-
-#ifndef __ASSEMBLY__
-extern unsigned long sun4v_ncs_qconf(unsigned long queue_type,
- unsigned long queue_ra,
- unsigned long num_entries,
- unsigned long *qhandle);
-extern unsigned long sun4v_ncs_qinfo(unsigned long qhandle,
- unsigned long *queue_type,
- unsigned long *queue_ra,
- unsigned long *num_entries);
-extern unsigned long sun4v_ncs_gethead(unsigned long qhandle,
- unsigned long *head);
-extern unsigned long sun4v_ncs_gettail(unsigned long qhandle,
- unsigned long *tail);
-extern unsigned long sun4v_ncs_settail(unsigned long qhandle,
- unsigned long tail);
-extern unsigned long sun4v_ncs_qhandle_to_devino(unsigned long qhandle,
- unsigned long *devino);
-extern unsigned long sun4v_ncs_sethead_marker(unsigned long qhandle,
- unsigned long head);
-#endif /* !(__ASSEMBLY__) */
-
-#endif /* _N2_CORE_H */
diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c
index 35f2d0d8507e..1660c5cf3641 100644
--- a/drivers/crypto/nx/nx-common-pseries.c
+++ b/drivers/crypto/nx/nx-common-pseries.c
@@ -133,7 +133,7 @@ struct nx842_devdata {
};
static struct nx842_devdata __rcu *devdata;
-static DEFINE_SPINLOCK(devdata_mutex);
+static DEFINE_SPINLOCK(devdata_spinlock);
#define NX842_COUNTER_INC(_x) \
static inline void nx842_inc_##_x( \
@@ -750,15 +750,15 @@ static int nx842_OF_upd(struct property *new_prop)
if (!new_devdata)
return -ENOMEM;
- spin_lock_irqsave(&devdata_mutex, flags);
+ spin_lock_irqsave(&devdata_spinlock, flags);
old_devdata = rcu_dereference_check(devdata,
- lockdep_is_held(&devdata_mutex));
+ lockdep_is_held(&devdata_spinlock));
if (old_devdata)
of_node = old_devdata->dev->of_node;
if (!old_devdata || !of_node) {
pr_err("%s: device is not available\n", __func__);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
kfree(new_devdata);
return -ENODEV;
}
@@ -810,7 +810,7 @@ out:
old_devdata->max_sg_len);
rcu_assign_pointer(devdata, new_devdata);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
dev_set_drvdata(new_devdata->dev, new_devdata);
kfree(old_devdata);
@@ -821,13 +821,13 @@ error_out:
dev_info(old_devdata->dev, "%s: device disabled\n", __func__);
nx842_OF_set_defaults(new_devdata);
rcu_assign_pointer(devdata, new_devdata);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
dev_set_drvdata(new_devdata->dev, new_devdata);
kfree(old_devdata);
} else {
dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
}
if (!ret)
@@ -1045,9 +1045,9 @@ static int nx842_probe(struct vio_dev *viodev,
return -ENOMEM;
}
- spin_lock_irqsave(&devdata_mutex, flags);
+ spin_lock_irqsave(&devdata_spinlock, flags);
old_devdata = rcu_dereference_check(devdata,
- lockdep_is_held(&devdata_mutex));
+ lockdep_is_held(&devdata_spinlock));
if (old_devdata && old_devdata->vdev != NULL) {
dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__);
@@ -1062,7 +1062,7 @@ static int nx842_probe(struct vio_dev *viodev,
nx842_OF_set_defaults(new_devdata);
rcu_assign_pointer(devdata, new_devdata);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
kfree(old_devdata);
@@ -1101,7 +1101,7 @@ static int nx842_probe(struct vio_dev *viodev,
return 0;
error_unlock:
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
if (new_devdata)
kfree(new_devdata->counters);
kfree(new_devdata);
@@ -1122,12 +1122,13 @@ static void nx842_remove(struct vio_dev *viodev)
crypto_unregister_alg(&nx842_pseries_alg);
- spin_lock_irqsave(&devdata_mutex, flags);
- old_devdata = rcu_dereference_check(devdata,
- lockdep_is_held(&devdata_mutex));
of_reconfig_notifier_unregister(&nx842_of_nb);
+
+ spin_lock_irqsave(&devdata_spinlock, flags);
+ old_devdata = rcu_dereference_check(devdata,
+ lockdep_is_held(&devdata_spinlock));
RCU_INIT_POINTER(devdata, NULL);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
dev_set_drvdata(&viodev->dev, NULL);
if (old_devdata)
@@ -1257,11 +1258,11 @@ static void __exit nx842_pseries_exit(void)
crypto_unregister_alg(&nx842_pseries_alg);
- spin_lock_irqsave(&devdata_mutex, flags);
+ spin_lock_irqsave(&devdata_spinlock, flags);
old_devdata = rcu_dereference_check(devdata,
- lockdep_is_held(&devdata_mutex));
+ lockdep_is_held(&devdata_spinlock));
RCU_INIT_POINTER(devdata, NULL);
- spin_unlock_irqrestore(&devdata_mutex, flags);
+ spin_unlock_irqrestore(&devdata_spinlock, flags);
synchronize_rcu();
if (old_devdata && old_devdata->dev)
dev_set_drvdata(old_devdata->dev, NULL);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index bad1adacbc84..551dd32a8db0 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -18,7 +18,6 @@
#include <crypto/internal/aead.h>
#include <crypto/internal/engine.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
@@ -272,9 +271,9 @@ static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
int ret;
if (dd->pio_only) {
- scatterwalk_start(&dd->in_walk, dd->in_sg);
+ dd->in_sg_offset = 0;
if (out_sg_len)
- scatterwalk_start(&dd->out_walk, dd->out_sg);
+ dd->out_sg_offset = 0;
/* Enable DATAIN interrupt and let it take
care of the rest */
@@ -871,21 +870,18 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
BUG_ON(!dd->in_sg);
- BUG_ON(_calc_walked(in) > dd->in_sg->length);
+ BUG_ON(dd->in_sg_offset > dd->in_sg->length);
- src = sg_virt(dd->in_sg) + _calc_walked(in);
+ src = sg_virt(dd->in_sg) + dd->in_sg_offset;
for (i = 0; i < AES_BLOCK_WORDS; i++) {
omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
-
- scatterwalk_advance(&dd->in_walk, 4);
- if (dd->in_sg->length == _calc_walked(in)) {
+ dd->in_sg_offset += 4;
+ if (dd->in_sg_offset == dd->in_sg->length) {
dd->in_sg = sg_next(dd->in_sg);
if (dd->in_sg) {
- scatterwalk_start(&dd->in_walk,
- dd->in_sg);
- src = sg_virt(dd->in_sg) +
- _calc_walked(in);
+ dd->in_sg_offset = 0;
+ src = sg_virt(dd->in_sg);
}
} else {
src++;
@@ -904,20 +900,18 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
BUG_ON(!dd->out_sg);
- BUG_ON(_calc_walked(out) > dd->out_sg->length);
+ BUG_ON(dd->out_sg_offset > dd->out_sg->length);
- dst = sg_virt(dd->out_sg) + _calc_walked(out);
+ dst = sg_virt(dd->out_sg) + dd->out_sg_offset;
for (i = 0; i < AES_BLOCK_WORDS; i++) {
*dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
- scatterwalk_advance(&dd->out_walk, 4);
- if (dd->out_sg->length == _calc_walked(out)) {
+ dd->out_sg_offset += 4;
+ if (dd->out_sg_offset == dd->out_sg->length) {
dd->out_sg = sg_next(dd->out_sg);
if (dd->out_sg) {
- scatterwalk_start(&dd->out_walk,
- dd->out_sg);
- dst = sg_virt(dd->out_sg) +
- _calc_walked(out);
+ dd->out_sg_offset = 0;
+ dst = sg_virt(dd->out_sg);
}
} else {
dst++;
@@ -1305,7 +1299,7 @@ static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume);
static struct platform_driver omap_aes_driver = {
.probe = omap_aes_probe,
- .remove_new = omap_aes_remove,
+ .remove = omap_aes_remove,
.driver = {
.name = "omap-aes",
.pm = &omap_aes_pm_ops,
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 0f35c9164764..41d67780fd45 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -14,8 +14,6 @@
#define DST_MAXBURST 4
#define DMA_MIN (DST_MAXBURST * sizeof(u32))
-#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
-
/*
* OMAP TRM gives bitfields as start:end, where start is the higher bit
* number. For example 7:0
@@ -186,8 +184,8 @@ struct omap_aes_dev {
struct scatterlist out_sgl;
struct scatterlist *orig_out;
- struct scatter_walk in_walk;
- struct scatter_walk out_walk;
+ unsigned int in_sg_offset;
+ unsigned int out_sg_offset;
struct dma_chan *dma_lch_in;
struct dma_chan *dma_lch_out;
int in_sg_len;
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 209d3dc03a9b..a099460d5f21 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -19,7 +19,6 @@
#include <crypto/engine.h>
#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
@@ -40,8 +39,6 @@
#define DES_BLOCK_WORDS (DES_BLOCK_SIZE >> 2)
-#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset)
-
#define DES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \
((x ^ 0x01) * 0x04))
@@ -152,8 +149,8 @@ struct omap_des_dev {
struct scatterlist out_sgl;
struct scatterlist *orig_out;
- struct scatter_walk in_walk;
- struct scatter_walk out_walk;
+ unsigned int in_sg_offset;
+ unsigned int out_sg_offset;
struct dma_chan *dma_lch_in;
struct dma_chan *dma_lch_out;
int in_sg_len;
@@ -379,8 +376,8 @@ static int omap_des_crypt_dma(struct crypto_tfm *tfm,
int ret;
if (dd->pio_only) {
- scatterwalk_start(&dd->in_walk, dd->in_sg);
- scatterwalk_start(&dd->out_walk, dd->out_sg);
+ dd->in_sg_offset = 0;
+ dd->out_sg_offset = 0;
/* Enable DATAIN interrupt and let it take
care of the rest */
@@ -836,21 +833,18 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
BUG_ON(!dd->in_sg);
- BUG_ON(_calc_walked(in) > dd->in_sg->length);
+ BUG_ON(dd->in_sg_offset > dd->in_sg->length);
- src = sg_virt(dd->in_sg) + _calc_walked(in);
+ src = sg_virt(dd->in_sg) + dd->in_sg_offset;
for (i = 0; i < DES_BLOCK_WORDS; i++) {
omap_des_write(dd, DES_REG_DATA_N(dd, i), *src);
-
- scatterwalk_advance(&dd->in_walk, 4);
- if (dd->in_sg->length == _calc_walked(in)) {
+ dd->in_sg_offset += 4;
+ if (dd->in_sg_offset == dd->in_sg->length) {
dd->in_sg = sg_next(dd->in_sg);
if (dd->in_sg) {
- scatterwalk_start(&dd->in_walk,
- dd->in_sg);
- src = sg_virt(dd->in_sg) +
- _calc_walked(in);
+ dd->in_sg_offset = 0;
+ src = sg_virt(dd->in_sg);
}
} else {
src++;
@@ -869,20 +863,18 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
BUG_ON(!dd->out_sg);
- BUG_ON(_calc_walked(out) > dd->out_sg->length);
+ BUG_ON(dd->out_sg_offset > dd->out_sg->length);
- dst = sg_virt(dd->out_sg) + _calc_walked(out);
+ dst = sg_virt(dd->out_sg) + dd->out_sg_offset;
for (i = 0; i < DES_BLOCK_WORDS; i++) {
*dst = omap_des_read(dd, DES_REG_DATA_N(dd, i));
- scatterwalk_advance(&dd->out_walk, 4);
- if (dd->out_sg->length == _calc_walked(out)) {
+ dd->out_sg_offset += 4;
+ if (dd->out_sg_offset == dd->out_sg->length) {
dd->out_sg = sg_next(dd->out_sg);
if (dd->out_sg) {
- scatterwalk_start(&dd->out_walk,
- dd->out_sg);
- dst = sg_virt(dd->out_sg) +
- _calc_walked(out);
+ dd->out_sg_offset = 0;
+ dst = sg_virt(dd->out_sg);
}
} else {
dst++;
@@ -1115,7 +1107,7 @@ static SIMPLE_DEV_PM_OPS(omap_des_pm_ops, omap_des_suspend, omap_des_resume);
static struct platform_driver omap_des_driver = {
.probe = omap_des_probe,
- .remove_new = omap_des_remove,
+ .remove = omap_des_remove,
.driver = {
.name = "omap-des",
.pm = &omap_des_pm_ops,
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 5bcd9ab0f72a..7021481bf027 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2216,7 +2216,7 @@ static void omap_sham_remove(struct platform_device *pdev)
static struct platform_driver omap_sham_driver = {
.probe = omap_sham_probe,
- .remove_new = omap_sham_remove,
+ .remove = omap_sham_remove,
.driver = {
.name = "omap-sham",
.of_match_table = omap_sham_of_match,
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index 7d811728f047..97b56e92ea33 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -786,7 +786,7 @@ static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_devi
alg->init = qce_aead_init;
alg->exit = qce_aead_exit;
- alg->base.cra_priority = 300;
+ alg->base.cra_priority = 275;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index 28b5fd823827..e95e84486d9a 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -3,14 +3,15 @@
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/clk.h>
+#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
@@ -37,9 +38,10 @@ static const struct qce_algo_ops *qce_ops[] = {
#endif
};
-static void qce_unregister_algs(struct qce_device *qce)
+static void qce_unregister_algs(void *data)
{
const struct qce_algo_ops *ops;
+ struct qce_device *qce = data;
int i;
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
@@ -48,19 +50,22 @@ static void qce_unregister_algs(struct qce_device *qce)
}
}
-static int qce_register_algs(struct qce_device *qce)
+static int devm_qce_register_algs(struct qce_device *qce)
{
const struct qce_algo_ops *ops;
- int i, ret = -ENODEV;
+ int i, j, ret = -ENODEV;
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
ops = qce_ops[i];
ret = ops->register_algs(qce);
- if (ret)
- break;
+ if (ret) {
+ for (j = i - 1; j >= 0; j--)
+ ops->unregister_algs(qce);
+ return ret;
+ }
}
- return ret;
+ return devm_add_action_or_reset(qce->dev, qce_unregister_algs, qce);
}
static int qce_handle_request(struct crypto_async_request *async_req)
@@ -84,55 +89,49 @@ static int qce_handle_queue(struct qce_device *qce,
struct crypto_async_request *req)
{
struct crypto_async_request *async_req, *backlog;
- unsigned long flags;
int ret = 0, err;
- spin_lock_irqsave(&qce->lock, flags);
+ scoped_guard(mutex, &qce->lock) {
+ if (req)
+ ret = crypto_enqueue_request(&qce->queue, req);
- if (req)
- ret = crypto_enqueue_request(&qce->queue, req);
+ /* busy, do not dequeue request */
+ if (qce->req)
+ return ret;
- /* busy, do not dequeue request */
- if (qce->req) {
- spin_unlock_irqrestore(&qce->lock, flags);
- return ret;
+ backlog = crypto_get_backlog(&qce->queue);
+ async_req = crypto_dequeue_request(&qce->queue);
+ if (async_req)
+ qce->req = async_req;
}
- backlog = crypto_get_backlog(&qce->queue);
- async_req = crypto_dequeue_request(&qce->queue);
- if (async_req)
- qce->req = async_req;
-
- spin_unlock_irqrestore(&qce->lock, flags);
-
if (!async_req)
return ret;
if (backlog) {
- spin_lock_bh(&qce->lock);
- crypto_request_complete(backlog, -EINPROGRESS);
- spin_unlock_bh(&qce->lock);
+ scoped_guard(mutex, &qce->lock)
+ crypto_request_complete(backlog, -EINPROGRESS);
}
err = qce_handle_request(async_req);
if (err) {
qce->result = err;
- tasklet_schedule(&qce->done_tasklet);
+ schedule_work(&qce->done_work);
}
return ret;
}
-static void qce_tasklet_req_done(unsigned long data)
+static void qce_req_done_work(struct work_struct *work)
{
- struct qce_device *qce = (struct qce_device *)data;
+ struct qce_device *qce = container_of(work, struct qce_device,
+ done_work);
struct crypto_async_request *req;
- unsigned long flags;
- spin_lock_irqsave(&qce->lock, flags);
- req = qce->req;
- qce->req = NULL;
- spin_unlock_irqrestore(&qce->lock, flags);
+ scoped_guard(mutex, &qce->lock) {
+ req = qce->req;
+ qce->req = NULL;
+ }
if (req)
crypto_request_complete(req, qce->result);
@@ -149,7 +148,7 @@ static int qce_async_request_enqueue(struct qce_device *qce,
static void qce_async_request_done(struct qce_device *qce, int ret)
{
qce->result = ret;
- tasklet_schedule(&qce->done_tasklet);
+ schedule_work(&qce->done_work);
}
static int qce_check_version(struct qce_device *qce)
@@ -209,15 +208,15 @@ static int qce_crypto_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- qce->core = devm_clk_get_optional(qce->dev, "core");
+ qce->core = devm_clk_get_optional_enabled(qce->dev, "core");
if (IS_ERR(qce->core))
return PTR_ERR(qce->core);
- qce->iface = devm_clk_get_optional(qce->dev, "iface");
+ qce->iface = devm_clk_get_optional_enabled(qce->dev, "iface");
if (IS_ERR(qce->iface))
return PTR_ERR(qce->iface);
- qce->bus = devm_clk_get_optional(qce->dev, "bus");
+ qce->bus = devm_clk_get_optional_enabled(qce->dev, "bus");
if (IS_ERR(qce->bus))
return PTR_ERR(qce->bus);
@@ -229,64 +228,25 @@ static int qce_crypto_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = clk_prepare_enable(qce->core);
+ ret = devm_qce_dma_request(qce->dev, &qce->dma);
if (ret)
- goto err_mem_path_disable;
-
- ret = clk_prepare_enable(qce->iface);
- if (ret)
- goto err_clks_core;
-
- ret = clk_prepare_enable(qce->bus);
- if (ret)
- goto err_clks_iface;
+ return ret;
- ret = qce_dma_request(qce->dev, &qce->dma);
+ ret = qce_check_version(qce);
if (ret)
- goto err_clks;
+ return ret;
- ret = qce_check_version(qce);
+ ret = devm_mutex_init(qce->dev, &qce->lock);
if (ret)
- goto err_clks;
+ return ret;
- spin_lock_init(&qce->lock);
- tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
- (unsigned long)qce);
+ INIT_WORK(&qce->done_work, qce_req_done_work);
crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
qce->async_req_enqueue = qce_async_request_enqueue;
qce->async_req_done = qce_async_request_done;
- ret = qce_register_algs(qce);
- if (ret)
- goto err_dma;
-
- return 0;
-
-err_dma:
- qce_dma_release(&qce->dma);
-err_clks:
- clk_disable_unprepare(qce->bus);
-err_clks_iface:
- clk_disable_unprepare(qce->iface);
-err_clks_core:
- clk_disable_unprepare(qce->core);
-err_mem_path_disable:
- icc_set_bw(qce->mem_path, 0, 0);
-
- return ret;
-}
-
-static void qce_crypto_remove(struct platform_device *pdev)
-{
- struct qce_device *qce = platform_get_drvdata(pdev);
-
- tasklet_kill(&qce->done_tasklet);
- qce_unregister_algs(qce);
- qce_dma_release(&qce->dma);
- clk_disable_unprepare(qce->bus);
- clk_disable_unprepare(qce->iface);
- clk_disable_unprepare(qce->core);
+ return devm_qce_register_algs(qce);
}
static const struct of_device_id qce_crypto_of_match[] = {
@@ -299,7 +259,6 @@ MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
static struct platform_driver qce_crypto_driver = {
.probe = qce_crypto_probe,
- .remove_new = qce_crypto_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = qce_crypto_of_match,
diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h
index 228fcd69ec51..eb6fa7a8b64a 100644
--- a/drivers/crypto/qce/core.h
+++ b/drivers/crypto/qce/core.h
@@ -6,13 +6,16 @@
#ifndef _CORE_H_
#define _CORE_H_
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
#include "dma.h"
/**
* struct qce_device - crypto engine device structure
* @queue: crypto request queue
* @lock: the lock protects queue and req
- * @done_tasklet: done tasklet object
+ * @done_work: workqueue context
* @req: current active request
* @result: result of current transform
* @base: virtual IO base
@@ -28,8 +31,8 @@
*/
struct qce_device {
struct crypto_queue queue;
- spinlock_t lock;
- struct tasklet_struct done_tasklet;
+ struct mutex lock;
+ struct work_struct done_work;
struct crypto_async_request *req;
int result;
void __iomem *base;
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 46db5bf366b4..1dec7aea852d 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -3,12 +3,22 @@
* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/device.h>
#include <linux/dmaengine.h>
#include <crypto/scatterwalk.h>
#include "dma.h"
-int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
+static void qce_dma_release(void *data)
+{
+ struct qce_dma_data *dma = data;
+
+ dma_release_channel(dma->txchan);
+ dma_release_channel(dma->rxchan);
+ kfree(dma->result_buf);
+}
+
+int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma)
{
int ret;
@@ -31,7 +41,8 @@ int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
- return 0;
+ return devm_add_action_or_reset(dev, qce_dma_release, dma);
+
error_nomem:
dma_release_channel(dma->rxchan);
error_rx:
@@ -39,13 +50,6 @@ error_rx:
return ret;
}
-void qce_dma_release(struct qce_dma_data *dma)
-{
- dma_release_channel(dma->txchan);
- dma_release_channel(dma->rxchan);
- kfree(dma->result_buf);
-}
-
struct scatterlist *
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
unsigned int max_len)
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
index 786402169360..31629185000e 100644
--- a/drivers/crypto/qce/dma.h
+++ b/drivers/crypto/qce/dma.h
@@ -34,8 +34,7 @@ struct qce_dma_data {
void *ignore_buf;
};
-int qce_dma_request(struct device *dev, struct qce_dma_data *dma);
-void qce_dma_release(struct qce_dma_data *dma);
+int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma);
int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
int in_ents, struct scatterlist *sg_out, int out_ents,
dma_async_tx_callback cb, void *cb_param);
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index fc72af8aa9a7..71b748183cfa 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -482,7 +482,7 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
base = &alg->halg.base;
base->cra_blocksize = def->blocksize;
- base->cra_priority = 300;
+ base->cra_priority = 175;
base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
base->cra_ctxsize = sizeof(struct qce_sha_ctx);
base->cra_alignmask = 0;
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 5b493fdc1e74..ffb334eb5b34 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -461,7 +461,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
alg->encrypt = qce_skcipher_encrypt;
alg->decrypt = qce_skcipher_decrypt;
- alg->base.cra_priority = 300;
+ alg->base.cra_priority = 275;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY;
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 09419e79e34c..0685ba122e8a 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -262,7 +262,7 @@ MODULE_DEVICE_TABLE(of, qcom_rng_of_match);
static struct platform_driver qcom_rng_driver = {
.probe = qcom_rng_probe,
- .remove_new = qcom_rng_remove,
+ .remove = qcom_rng_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = of_match_ptr(qcom_rng_of_match),
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index f74b3c81ba6d..b77bdce8e7fc 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -433,7 +433,7 @@ static void rk_crypto_remove(struct platform_device *pdev)
static struct platform_driver crypto_driver = {
.probe = rk_crypto_probe,
- .remove_new = rk_crypto_remove,
+ .remove = rk_crypto_remove,
.driver = {
.name = "rk3288-crypto",
.pm = &rk_crypto_pm_ops,
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 8b6e3f5c94de..57ab237e899e 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -2335,7 +2335,7 @@ static void s5p_aes_remove(struct platform_device *pdev)
static struct platform_driver s5p_aes_crypto = {
.probe = s5p_aes_probe,
- .remove_new = s5p_aes_remove,
+ .remove = s5p_aes_remove,
.driver = {
.name = "s5p-secss",
.of_match_table = s5p_sss_dt_match,
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 461eca40e878..091612b066f1 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -574,7 +574,7 @@ static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
/* Clear the command label */
memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
- /* Iniialize the command update structure */
+ /* Initialize the command update structure */
memzero_explicit(upd_info, sizeof(*upd_info));
if (cfg->enc_eng_id && cfg->auth_eng_id) {
@@ -2489,7 +2489,7 @@ static void sa_ul_remove(struct platform_device *pdev)
static struct platform_driver sa_ul_driver = {
.probe = sa_ul_probe,
- .remove_new = sa_ul_remove,
+ .remove = sa_ul_remove,
.driver = {
.name = "saul-crypto",
.of_match_table = of_match,
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 96d4af5d48a6..533080b0cddc 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1421,7 +1421,7 @@ static void sahara_remove(struct platform_device *pdev)
static struct platform_driver sahara_driver = {
.probe = sahara_probe,
- .remove_new = sahara_remove,
+ .remove = sahara_remove,
.driver = {
.name = SAHARA_NAME,
.of_match_table = sahara_dt_ids,
diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c
index e4dfed7ee0b0..42114e9364f0 100644
--- a/drivers/crypto/starfive/jh7110-cryp.c
+++ b/drivers/crypto/starfive/jh7110-cryp.c
@@ -151,7 +151,7 @@ static int starfive_cryp_probe(struct platform_device *pdev)
ret = starfive_aes_register_algs();
if (ret)
- goto err_algs_aes;
+ goto err_engine_start;
ret = starfive_hash_register_algs();
if (ret)
@@ -167,8 +167,6 @@ err_algs_rsa:
starfive_hash_unregister_algs();
err_algs_hash:
starfive_aes_unregister_algs();
-err_algs_aes:
- crypto_engine_stop(cryp->engine);
err_engine_start:
crypto_engine_exit(cryp->engine);
err_engine:
@@ -193,7 +191,6 @@ static void starfive_cryp_remove(struct platform_device *pdev)
starfive_hash_unregister_algs();
starfive_rsa_unregister_algs();
- crypto_engine_stop(cryp->engine);
crypto_engine_exit(cryp->engine);
starfive_dma_cleanup(cryp);
@@ -215,7 +212,7 @@ MODULE_DEVICE_TABLE(of, starfive_dt_ids);
static struct platform_driver starfive_cryp_driver = {
.probe = starfive_cryp_probe,
- .remove_new = starfive_cryp_remove,
+ .remove = starfive_cryp_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = starfive_dt_ids,
diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c
index a778c4846025..d109c743f076 100644
--- a/drivers/crypto/starfive/jh7110-rsa.c
+++ b/drivers/crypto/starfive/jh7110-rsa.c
@@ -565,8 +565,6 @@ static void starfive_rsa_exit_tfm(struct crypto_akcipher *tfm)
static struct akcipher_alg starfive_rsa = {
.encrypt = starfive_rsa_enc,
.decrypt = starfive_rsa_dec,
- .sign = starfive_rsa_dec,
- .verify = starfive_rsa_enc,
.set_pub_key = starfive_rsa_set_pub_key,
.set_priv_key = starfive_rsa_set_priv_key,
.max_size = starfive_rsa_max_size,
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index e0faddbf8990..de4d0402f133 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -465,7 +465,7 @@ MODULE_DEVICE_TABLE(of, stm32_dt_ids);
static struct platform_driver stm32_crc_driver = {
.probe = stm32_crc_probe,
- .remove_new = stm32_crc_remove,
+ .remove = stm32_crc_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &stm32_crc_pm_ops,
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 937f6dab8955..14c6339c2e43 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -2771,7 +2771,7 @@ static const struct dev_pm_ops stm32_cryp_pm_ops = {
static struct platform_driver stm32_cryp_driver = {
.probe = stm32_cryp_probe,
- .remove_new = stm32_cryp_remove,
+ .remove = stm32_cryp_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &stm32_cryp_pm_ops,
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 351827372ea6..768b27de4737 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -2532,7 +2532,7 @@ static const struct dev_pm_ops stm32_hash_pm_ops = {
static struct platform_driver stm32_hash_driver = {
.probe = stm32_hash_probe,
- .remove_new = stm32_hash_remove,
+ .remove = stm32_hash_remove,
.driver = {
.name = "stm32-hash",
.pm = &stm32_hash_pm_ops,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 511ddcb0efd4..e8c0db687c57 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -3560,7 +3560,7 @@ static struct platform_driver talitos_driver = {
.of_match_table = talitos_match,
},
.probe = talitos_probe,
- .remove_new = talitos_remove,
+ .remove = talitos_remove,
};
module_platform_driver(talitos_driver);
diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
index ae7a0f8435fc..d734c9a56786 100644
--- a/drivers/crypto/tegra/tegra-se-aes.c
+++ b/drivers/crypto/tegra/tegra-se-aes.c
@@ -1180,8 +1180,6 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
goto out;
} else {
rctx->cryptlen = req->cryptlen - ctx->authsize;
- if (ret)
- goto out;
/* CTR operation */
ret = tegra_ccm_do_ctr(ctx, rctx);
@@ -1752,10 +1750,13 @@ static int tegra_cmac_digest(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+ int ret;
- tegra_cmac_init(req);
- rctx->task |= SHA_UPDATE | SHA_FINAL;
+ ret = tegra_cmac_init(req);
+ if (ret)
+ return ret;
+ rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
index 4d4bd727f498..0b5cdd5676b1 100644
--- a/drivers/crypto/tegra/tegra-se-hash.c
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -615,13 +615,16 @@ static int tegra_sha_digest(struct ahash_request *req)
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ int ret;
if (ctx->fallback)
return tegra_sha_fallback_digest(req);
- tegra_sha_init(req);
- rctx->task |= SHA_UPDATE | SHA_FINAL;
+ ret = tegra_sha_init(req);
+ if (ret)
+ return ret;
+ rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c
index f94c0331b148..918c0b10614d 100644
--- a/drivers/crypto/tegra/tegra-se-main.c
+++ b/drivers/crypto/tegra/tegra-se-main.c
@@ -312,7 +312,6 @@ static int tegra_se_probe(struct platform_device *pdev)
ret = tegra_se_host1x_register(se);
if (ret) {
- crypto_engine_stop(se->engine);
crypto_engine_exit(se->engine);
return dev_err_probe(dev, ret, "failed to init host1x params\n");
}
@@ -324,7 +323,6 @@ static void tegra_se_remove(struct platform_device *pdev)
{
struct tegra_se *se = platform_get_drvdata(pdev);
- crypto_engine_stop(se->engine);
crypto_engine_exit(se->engine);
host1x_client_unregister(&se->client);
}
@@ -387,7 +385,7 @@ static struct platform_driver tegra_se_driver = {
.of_match_table = tegra_se_of_match,
},
.probe = tegra_se_probe,
- .remove_new = tegra_se_remove,
+ .remove = tegra_se_remove,
};
static int tegra_se_host1x_probe(struct host1x_device *dev)
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index cb92b7fa99c6..48fee07b7e51 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -83,23 +83,16 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
case VIRTIO_CRYPTO_BADMSG:
error = -EBADMSG;
break;
-
- case VIRTIO_CRYPTO_KEY_REJECTED:
- error = -EKEYREJECTED;
- break;
-
default:
error = -EIO;
break;
}
akcipher_req = vc_akcipher_req->akcipher_req;
- if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY) {
- /* actuall length maybe less than dst buffer */
- akcipher_req->dst_len = len - sizeof(vc_req->status);
- sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
- vc_akcipher_req->dst_buf, akcipher_req->dst_len);
- }
+ /* actual length maybe less than dst buffer */
+ akcipher_req->dst_len = len - sizeof(vc_req->status);
+ sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst),
+ vc_akcipher_req->dst_buf, akcipher_req->dst_len);
virtio_crypto_akcipher_finalize_req(vc_akcipher_req, akcipher_req, error);
}
@@ -230,36 +223,27 @@ static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request
int node = dev_to_node(&vcrypto->vdev->dev);
unsigned long flags;
int ret;
- bool verify = vc_akcipher_req->opcode == VIRTIO_CRYPTO_AKCIPHER_VERIFY;
- unsigned int src_len = verify ? req->src_len + req->dst_len : req->src_len;
/* out header */
sg_init_one(&outhdr_sg, req_data, sizeof(*req_data));
sgs[num_out++] = &outhdr_sg;
/* src data */
- src_buf = kcalloc_node(src_len, 1, GFP_KERNEL, node);
+ src_buf = kcalloc_node(req->src_len, 1, GFP_KERNEL, node);
if (!src_buf)
return -ENOMEM;
- if (verify) {
- /* for verify operation, both src and dst data work as OUT direction */
- sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
- sg_init_one(&srcdata_sg, src_buf, src_len);
- sgs[num_out++] = &srcdata_sg;
- } else {
- sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len);
- sg_init_one(&srcdata_sg, src_buf, src_len);
- sgs[num_out++] = &srcdata_sg;
+ sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, req->src_len);
+ sg_init_one(&srcdata_sg, src_buf, req->src_len);
+ sgs[num_out++] = &srcdata_sg;
- /* dst data */
- dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node);
- if (!dst_buf)
- goto free_src;
+ /* dst data */
+ dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node);
+ if (!dst_buf)
+ goto free_src;
- sg_init_one(&dstdata_sg, dst_buf, req->dst_len);
- sgs[num_out + num_in++] = &dstdata_sg;
- }
+ sg_init_one(&dstdata_sg, dst_buf, req->dst_len);
+ sgs[num_out + num_in++] = &dstdata_sg;
vc_akcipher_req->src_buf = src_buf;
vc_akcipher_req->dst_buf = dst_buf;
@@ -352,16 +336,6 @@ static int virtio_crypto_rsa_decrypt(struct akcipher_request *req)
return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_DECRYPT);
}
-static int virtio_crypto_rsa_sign(struct akcipher_request *req)
-{
- return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_SIGN);
-}
-
-static int virtio_crypto_rsa_verify(struct akcipher_request *req)
-{
- return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_VERIFY);
-}
-
static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm,
const void *key,
unsigned int keylen,
@@ -524,16 +498,19 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
.algo.base = {
.encrypt = virtio_crypto_rsa_encrypt,
.decrypt = virtio_crypto_rsa_decrypt,
- .sign = virtio_crypto_rsa_sign,
- .verify = virtio_crypto_rsa_verify,
+ /*
+ * Must specify an arbitrary hash algorithm upon
+ * set_{pub,priv}_key (even though it's not used
+ * by encrypt/decrypt) because qemu checks for it.
+ */
.set_pub_key = virtio_crypto_p1pad_rsa_sha1_set_pub_key,
.set_priv_key = virtio_crypto_p1pad_rsa_sha1_set_priv_key,
.max_size = virtio_crypto_rsa_max_size,
.init = virtio_crypto_rsa_init_tfm,
.exit = virtio_crypto_rsa_exit_tfm,
.base = {
- .cra_name = "pkcs1pad(rsa,sha1)",
- .cra_driver_name = "virtio-pkcs1-rsa-with-sha1",
+ .cra_name = "pkcs1pad(rsa)",
+ .cra_driver_name = "virtio-pkcs1-rsa",
.cra_priority = 150,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index 7f0ec6887a39..6e72d9229410 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -438,7 +438,7 @@ MODULE_DEVICE_TABLE(of, zynqmp_aes_dt_ids);
static struct platform_driver zynqmp_aes_driver = {
.probe = zynqmp_aes_aead_probe,
- .remove_new = zynqmp_aes_aead_remove,
+ .remove = zynqmp_aes_aead_remove,
.driver = {
.name = "zynqmp-aes",
.of_match_table = zynqmp_aes_dt_ids,
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
index 1bcec6f46c9c..580649f9bff8 100644
--- a/drivers/crypto/xilinx/zynqmp-sha.c
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -248,7 +248,7 @@ static void zynqmp_sha_remove(struct platform_device *pdev)
static struct platform_driver zynqmp_sha_driver = {
.probe = zynqmp_sha_probe,
- .remove_new = zynqmp_sha_remove,
+ .remove = zynqmp_sha_remove,
.driver = {
.name = "zynqmp-sha3-384",
},
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 29c192f20082..876469e23f7a 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -60,6 +60,7 @@ config CXL_ACPI
default CXL_BUS
select ACPI_TABLE_LIB
select ACPI_HMAT
+ select CXL_PORT
help
Enable support for host managed device memory (HDM) resources
published by a platform's ACPI CXL memory layout description. See
diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile
index db321f48ba52..2caa90fa4bf2 100644
--- a/drivers/cxl/Makefile
+++ b/drivers/cxl/Makefile
@@ -1,13 +1,21 @@
# SPDX-License-Identifier: GPL-2.0
+
+# Order is important here for the built-in case:
+# - 'core' first for fundamental init
+# - 'port' before platform root drivers like 'acpi' so that CXL-root ports
+# are immediately enabled
+# - 'mem' and 'pmem' before endpoint drivers so that memdevs are
+# immediately enabled
+# - 'pci' last, also mirrors the hardware enumeration hierarchy
obj-y += core/
-obj-$(CONFIG_CXL_PCI) += cxl_pci.o
-obj-$(CONFIG_CXL_MEM) += cxl_mem.o
+obj-$(CONFIG_CXL_PORT) += cxl_port.o
obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o
obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o
-obj-$(CONFIG_CXL_PORT) += cxl_port.o
+obj-$(CONFIG_CXL_MEM) += cxl_mem.o
+obj-$(CONFIG_CXL_PCI) += cxl_pci.o
-cxl_mem-y := mem.o
-cxl_pci-y := pci.o
+cxl_port-y := port.o
cxl_acpi-y := acpi.o
cxl_pmem-y := pmem.o security.o
-cxl_port-y := port.o
+cxl_mem-y := mem.o
+cxl_pci-y := pci.o
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index 82b78e331d8e..cb14829bb9be 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -924,8 +924,15 @@ static void __exit cxl_acpi_exit(void)
/* load before dax_hmem sees 'Soft Reserved' CXL ranges */
subsys_initcall(cxl_acpi_init);
+
+/*
+ * Arrange for host-bridge ports to be active synchronous with
+ * cxl_acpi_probe() exit.
+ */
+MODULE_SOFTDEP("pre: cxl_port");
+
module_exit(cxl_acpi_exit);
MODULE_DESCRIPTION("CXL ACPI: Platform Support");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(CXL);
-MODULE_IMPORT_NS(ACPI);
+MODULE_IMPORT_NS("CXL");
+MODULE_IMPORT_NS("ACPI");
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index ef1621d40f05..8153f8d83a16 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -247,8 +247,8 @@ static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
dpa_perf->dpa_range = dent->dpa_range;
dpa_perf->qos_class = dent->qos_class;
dev_dbg(dev,
- "DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
- dent->dpa_range.start, dpa_perf->qos_class,
+ "DSMAS: dpa: %pra qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
+ &dent->dpa_range, dpa_perf->qos_class,
dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth,
dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth,
dent->coord[ACCESS_COORDINATE_CPU].read_latency,
@@ -279,8 +279,8 @@ static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
range_contains(&pmem_range, &dent->dpa_range))
update_perf_entry(dev, dent, &mds->pmem_perf);
else
- dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
- dent->dpa_range.start);
+ dev_dbg(dev, "no partition for dsmas dpa: %pra\n",
+ &dent->dpa_range);
}
}
@@ -416,7 +416,7 @@ void cxl_endpoint_parse_cdat(struct cxl_port *port)
cxl_qos_class_verify(cxlmd);
cxl_memdev_update_perf(cxlmd);
}
-EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, "CXL");
static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
@@ -513,7 +513,7 @@ void cxl_switch_parse_cdat(struct cxl_port *port)
if (rc)
dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
}
-EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, "CXL");
static void __cxl_coordinates_combine(struct access_coordinate *out,
struct access_coordinate *c1,
@@ -545,7 +545,7 @@ void cxl_coordinates_combine(struct access_coordinate *out,
__cxl_coordinates_combine(&out[i], &c1[i], &c2[i]);
}
-MODULE_IMPORT_NS(CXL);
+MODULE_IMPORT_NS("CXL");
static void cxl_bandwidth_add(struct access_coordinate *coord,
struct access_coordinate *c1,
@@ -641,6 +641,9 @@ static int cxl_endpoint_gather_bandwidth(struct cxl_region *cxlr,
void *ptr;
int rc;
+ if (!dev_is_pci(cxlds->dev))
+ return -ENODEV;
+
if (cxlds->rcd)
return -ENODEV;
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 0c62b4069ba0..800466f96a68 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -89,6 +89,11 @@ resource_size_t __rcrb_to_component(struct device *dev,
enum cxl_rcrb which);
u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb);
+#define PCI_RCRB_CAP_LIST_ID_MASK GENMASK(7, 0)
+#define PCI_RCRB_CAP_HDR_ID_MASK GENMASK(7, 0)
+#define PCI_RCRB_CAP_HDR_NEXT_MASK GENMASK(15, 8)
+#define PCI_CAP_EXP_SIZEOF 0x3c
+
extern struct rw_semaphore cxl_dpa_rwsem;
extern struct rw_semaphore cxl_region_rwsem;
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 3df10517a327..50e6a45b30ba 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -73,7 +73,7 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, "CXL");
static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
{
@@ -199,7 +199,7 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
return cxlhdm;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, "CXL");
static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
{
@@ -221,7 +221,7 @@ void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
}
up_read(&cxl_dpa_rwsem);
}
-EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, "CXL");
/*
* Must be called in a context that synchronizes against this decoder's
@@ -358,7 +358,7 @@ int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, "CXL");
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
{
@@ -424,7 +424,6 @@ int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *dev = &cxled->cxld.dev;
- int rc;
switch (mode) {
case CXL_DECODER_RAM:
@@ -435,11 +434,9 @@ int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
return -EINVAL;
}
- down_write(&cxl_dpa_rwsem);
- if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
- rc = -EBUSY;
- goto out;
- }
+ guard(rwsem_write)(&cxl_dpa_rwsem);
+ if (cxled->cxld.flags & CXL_DECODER_F_ENABLE)
+ return -EBUSY;
/*
* Only allow modes that are supported by the current partition
@@ -447,21 +444,15 @@ int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
*/
if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
dev_dbg(dev, "no available pmem capacity\n");
- rc = -ENXIO;
- goto out;
+ return -ENXIO;
}
if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
dev_dbg(dev, "no available ram capacity\n");
- rc = -ENXIO;
- goto out;
+ return -ENXIO;
}
cxled->mode = mode;
- rc = 0;
-out:
- up_write(&cxl_dpa_rwsem);
-
- return rc;
+ return 0;
}
int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
@@ -712,7 +703,44 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
return 0;
}
-static int cxl_decoder_reset(struct cxl_decoder *cxld)
+static int commit_reap(struct device *dev, void *data)
+{
+ struct cxl_port *port = to_cxl_port(dev->parent);
+ struct cxl_decoder *cxld;
+
+ if (!is_switch_decoder(dev) && !is_endpoint_decoder(dev))
+ return 0;
+
+ cxld = to_cxl_decoder(dev);
+ if (port->commit_end == cxld->id &&
+ ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
+ port->commit_end--;
+ dev_dbg(&port->dev, "reap: %s commit_end: %d\n",
+ dev_name(&cxld->dev), port->commit_end);
+ }
+
+ return 0;
+}
+
+void cxl_port_commit_reap(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+
+ lockdep_assert_held_write(&cxl_region_rwsem);
+
+ /*
+ * Once the highest committed decoder is disabled, free any other
+ * decoders that were pinned allocated by out-of-order release.
+ */
+ port->commit_end--;
+ dev_dbg(&port->dev, "reap: %s commit_end: %d\n", dev_name(&cxld->dev),
+ port->commit_end);
+ device_for_each_child_reverse_from(&port->dev, &cxld->dev, NULL,
+ commit_reap);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_port_commit_reap, "CXL");
+
+static void cxl_decoder_reset(struct cxl_decoder *cxld)
{
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
@@ -721,14 +749,14 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
u32 ctrl;
if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
- return 0;
+ return;
- if (port->commit_end != id) {
+ if (port->commit_end == id)
+ cxl_port_commit_reap(cxld);
+ else
dev_dbg(&port->dev,
"%s: out of order reset, expected decoder%d.%d\n",
dev_name(&cxld->dev), port->id, port->commit_end);
- return -EBUSY;
- }
down_read(&cxl_dpa_rwsem);
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
@@ -741,7 +769,6 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
up_read(&cxl_dpa_rwsem);
- port->commit_end--;
cxld->flags &= ~CXL_DECODER_F_ENABLE;
/* Userspace is now responsible for reconfiguring this decoder */
@@ -751,8 +778,6 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
cxled = to_cxl_endpoint_decoder(&cxld->dev);
cxled->state = CXL_DECODER_STATE_MANUAL;
}
-
- return 0;
}
static int cxl_setup_hdm_decoder_from_dvsec(
@@ -1039,4 +1064,4 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, "CXL");
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 5175138c4fb7..548564c770c0 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -281,7 +281,7 @@ int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox,
return -EIO;
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, "CXL");
static bool cxl_mem_raw_command_allowed(u16 opcode)
{
@@ -854,7 +854,7 @@ out:
kvfree(gsl);
return rc;
}
-EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, "CXL");
void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
enum cxl_event_log_type type,
@@ -894,7 +894,7 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
trace_cxl_dram(cxlmd, type, cxlr, hpa, &evt->dram);
}
}
-EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL");
static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
enum cxl_event_log_type type,
@@ -1063,7 +1063,7 @@ void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status)
if (status & CXLDEV_EVENT_STATUS_INFO)
cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_INFO);
}
-EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, "CXL");
/**
* cxl_mem_get_partition_info - Get partition info
@@ -1155,7 +1155,7 @@ int cxl_dev_state_identify(struct cxl_memdev_state *mds)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, "CXL");
static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
{
@@ -1306,7 +1306,7 @@ int cxl_mem_create_range_info(struct cxl_memdev_state *mds)
mds->active_volatile_bytes,
mds->active_persistent_bytes, "pmem");
}
-EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, "CXL");
int cxl_set_timestamp(struct cxl_memdev_state *mds)
{
@@ -1333,7 +1333,7 @@ int cxl_set_timestamp(struct cxl_memdev_state *mds)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, "CXL");
int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
struct cxl_region *cxlr)
@@ -1384,7 +1384,7 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
mutex_unlock(&mds->poison.lock);
return rc;
}
-EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, "CXL");
static void free_poison_buf(void *buf)
{
@@ -1420,7 +1420,7 @@ int cxl_poison_state_init(struct cxl_memdev_state *mds)
mutex_init(&mds->poison.lock);
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, "CXL");
int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host)
{
@@ -1433,7 +1433,7 @@ int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init, "CXL");
struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
{
@@ -1455,7 +1455,7 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
return mds;
}
-EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, "CXL");
void __init cxl_mbox_init(void)
{
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index 84fefb76dafa..ae3dfcbe8938 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -250,7 +250,7 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
return rc;
}
-EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, "CXL");
static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
{
@@ -329,7 +329,7 @@ out:
return rc;
}
-EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, "CXL");
int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
{
@@ -393,7 +393,7 @@ out:
return rc;
}
-EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, "CXL");
static struct attribute *cxl_memdev_attributes[] = {
&dev_attr_serial.attr,
@@ -537,7 +537,7 @@ void cxl_memdev_update_perf(struct cxl_memdev *cxlmd)
sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_ram_attribute_group);
sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_pmem_attribute_group);
}
-EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, "CXL");
static const struct device_type cxl_memdev_type = {
.name = "cxl_memdev",
@@ -550,7 +550,7 @@ bool is_cxl_memdev(const struct device *dev)
{
return dev->type == &cxl_memdev_type;
}
-EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, CXL);
+EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, "CXL");
/**
* set_exclusive_cxl_commands() - atomically disable user cxl commands
@@ -569,7 +569,7 @@ void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
CXL_MEM_COMMAND_ID_MAX);
up_write(&cxl_memdev_rwsem);
}
-EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL);
+EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, "CXL");
/**
* clear_exclusive_cxl_commands() - atomically enable user cxl commands
@@ -584,7 +584,7 @@ void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
CXL_MEM_COMMAND_ID_MAX);
up_write(&cxl_memdev_rwsem);
}
-EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
+EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, "CXL");
static void cxl_memdev_shutdown(struct device *dev)
{
@@ -1006,7 +1006,7 @@ int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds)
return PTR_ERR(fwl);
return devm_add_action_or_reset(host, cxl_remove_fw_upload, fwl);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, "CXL");
static const struct file_operations cxl_memdev_fops = {
.owner = THIS_MODULE,
@@ -1060,7 +1060,7 @@ err:
put_device(dev);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, "CXL");
static void sanitize_teardown_notifier(void *data)
{
@@ -1105,7 +1105,7 @@ int devm_cxl_sanitize_setup_notifier(struct device *host,
return devm_add_action_or_reset(host, sanitize_teardown_notifier, mds);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, "CXL");
__init int cxl_memdev_init(void)
{
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 5b46bc46aaa9..013b869b66cb 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -101,7 +101,7 @@ int devm_cxl_port_enumerate_dports(struct cxl_port *port)
return ctx.error;
return ctx.count;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, "CXL");
static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id)
{
@@ -209,7 +209,7 @@ int cxl_await_media_ready(struct cxl_dev_state *cxlds)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, "CXL");
static int cxl_set_mem_enable(struct cxl_dev_state *cxlds, u16 val)
{
@@ -252,9 +252,9 @@ static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds)
}
/* require dvsec ranges to be covered by a locked platform window */
-static int dvsec_range_allowed(struct device *dev, void *arg)
+static int dvsec_range_allowed(struct device *dev, const void *arg)
{
- struct range *dev_range = arg;
+ const struct range *dev_range = arg;
struct cxl_decoder *cxld;
if (!is_root_decoder(dev))
@@ -291,11 +291,11 @@ static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
}
-int cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
+int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
struct cxl_endpoint_dvsec_info *info)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ struct device *dev = cxlds->dev;
int hdm_count, rc, i, ranges = 0;
int d = cxlds->cxl_dvsec;
u16 cap, ctrl;
@@ -386,7 +386,7 @@ int cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_dvsec_rr_decode, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_dvsec_rr_decode, "CXL");
/**
* cxl_hdm_decode_init() - Setup HDM decoding for the endpoint
@@ -464,7 +464,7 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
*/
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, "CXL");
#define CXL_DOE_TABLE_ACCESS_REQ_CODE 0x000000ff
#define CXL_DOE_TABLE_ACCESS_REQ_CODE_READ 0
@@ -648,7 +648,7 @@ err:
devm_kfree(dev, buf);
dev_err(dev, "Failed to read/validate CDAT.\n");
}
-EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
+EXPORT_SYMBOL_NS_GPL(read_cdat_data, "CXL");
static void __cxl_handle_cor_ras(struct cxl_dev_state *cxlds,
void __iomem *ras_base)
@@ -805,7 +805,7 @@ void cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device *host)
cxl_disable_rch_root_ints(dport);
}
}
-EXPORT_SYMBOL_NS_GPL(cxl_dport_init_ras_reporting, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_dport_init_ras_reporting, "CXL");
static void cxl_handle_rdport_cor_ras(struct cxl_dev_state *cxlds,
struct cxl_dport *dport)
@@ -916,7 +916,7 @@ void cxl_cor_error_detected(struct pci_dev *pdev)
cxl_handle_endpoint_cor_ras(cxlds);
}
}
-EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, "CXL");
pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
@@ -966,7 +966,7 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
}
return PCI_ERS_RESULT_NEED_RESET;
}
-EXPORT_SYMBOL_NS_GPL(cxl_error_detected, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_error_detected, "CXL");
static int cxl_flit_size(struct pci_dev *pdev)
{
@@ -1030,7 +1030,7 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port)
return device_for_each_child(&port->dev, port,
__cxl_endpoint_decoder_reset_detected);
}
-EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_reset_detected, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_reset_detected, "CXL");
int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c)
{
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index c00f3a933164..8853415c106a 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -49,18 +49,7 @@ struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
return NULL;
return container_of(dev, struct cxl_nvdimm_bridge, dev);
}
-EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, CXL);
-
-bool is_cxl_nvdimm_bridge(struct device *dev)
-{
- return dev->type == &cxl_nvdimm_bridge_type;
-}
-EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm_bridge, CXL);
-
-static int match_nvdimm_bridge(struct device *dev, void *data)
-{
- return is_cxl_nvdimm_bridge(dev);
-}
+EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, "CXL");
/**
* cxl_find_nvdimm_bridge() - find a bridge device relative to a port
@@ -75,14 +64,16 @@ struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port)
if (!cxl_root)
return NULL;
- dev = device_find_child(&cxl_root->port.dev, NULL, match_nvdimm_bridge);
+ dev = device_find_child(&cxl_root->port.dev,
+ &cxl_nvdimm_bridge_type,
+ device_match_type);
if (!dev)
return NULL;
return to_cxl_nvdimm_bridge(dev);
}
-EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, "CXL");
static struct lock_class_key cxl_nvdimm_bridge_key;
@@ -164,7 +155,7 @@ err:
put_device(dev);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, "CXL");
static void cxl_nvdimm_release(struct device *dev)
{
@@ -188,7 +179,7 @@ bool is_cxl_nvdimm(struct device *dev)
{
return dev->type == &cxl_nvdimm_type;
}
-EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm, CXL);
+EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm, "CXL");
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
{
@@ -197,7 +188,7 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
return NULL;
return container_of(dev, struct cxl_nvdimm, dev);
}
-EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, CXL);
+EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, "CXL");
static struct lock_class_key cxl_nvdimm_key;
@@ -293,4 +284,4 @@ err_alloc:
return rc;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, "CXL");
diff --git a/drivers/cxl/core/pmu.c b/drivers/cxl/core/pmu.c
index 5d8e06b0ba6e..b3136d7664ab 100644
--- a/drivers/cxl/core/pmu.c
+++ b/drivers/cxl/core/pmu.c
@@ -65,4 +65,4 @@ err:
put_device(&pmu->dev);
return rc;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_pmu_add, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_pmu_add, "CXL");
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index e666ec6a9085..78a5c2c25982 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -437,7 +437,7 @@ struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev)
return NULL;
return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev);
}
-EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL);
+EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, "CXL");
static void cxl_root_decoder_release(struct device *dev)
{
@@ -471,19 +471,19 @@ bool is_endpoint_decoder(struct device *dev)
{
return dev->type == &cxl_decoder_endpoint_type;
}
-EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, CXL);
+EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, "CXL");
bool is_root_decoder(struct device *dev)
{
return dev->type == &cxl_decoder_root_type;
}
-EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
+EXPORT_SYMBOL_NS_GPL(is_root_decoder, "CXL");
bool is_switch_decoder(struct device *dev)
{
return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type;
}
-EXPORT_SYMBOL_NS_GPL(is_switch_decoder, CXL);
+EXPORT_SYMBOL_NS_GPL(is_switch_decoder, "CXL");
struct cxl_decoder *to_cxl_decoder(struct device *dev)
{
@@ -493,7 +493,7 @@ struct cxl_decoder *to_cxl_decoder(struct device *dev)
return NULL;
return container_of(dev, struct cxl_decoder, dev);
}
-EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
+EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, "CXL");
struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev)
{
@@ -502,7 +502,7 @@ struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev)
return NULL;
return container_of(dev, struct cxl_endpoint_decoder, cxld.dev);
}
-EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL);
+EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, "CXL");
struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
{
@@ -511,7 +511,7 @@ struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
return NULL;
return container_of(dev, struct cxl_switch_decoder, cxld.dev);
}
-EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, CXL);
+EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, "CXL");
static void cxl_ep_release(struct cxl_ep *ep)
{
@@ -585,7 +585,7 @@ bool is_cxl_port(const struct device *dev)
{
return dev->type == &cxl_port_type;
}
-EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL);
+EXPORT_SYMBOL_NS_GPL(is_cxl_port, "CXL");
struct cxl_port *to_cxl_port(const struct device *dev)
{
@@ -594,7 +594,7 @@ struct cxl_port *to_cxl_port(const struct device *dev)
return NULL;
return container_of(dev, struct cxl_port, dev);
}
-EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL);
+EXPORT_SYMBOL_NS_GPL(to_cxl_port, "CXL");
static void unregister_port(void *_port)
{
@@ -942,7 +942,7 @@ struct cxl_port *devm_cxl_add_port(struct device *host,
return port;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, "CXL");
struct cxl_root *devm_cxl_add_root(struct device *host,
const struct cxl_root_ops *ops)
@@ -958,7 +958,7 @@ struct cxl_root *devm_cxl_add_root(struct device *host,
cxl_root->ops = ops;
return cxl_root;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_root, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_root, "CXL");
struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
{
@@ -974,7 +974,7 @@ struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
return xa_load(&cxl_root_buses, (unsigned long)port->uport_dev);
}
-EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, "CXL");
static void unregister_pci_bus(void *uport_dev)
{
@@ -995,7 +995,7 @@ int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
return rc;
return devm_add_action_or_reset(host, unregister_pci_bus, uport_dev);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, "CXL");
static bool dev_is_cxl_root_child(struct device *dev)
{
@@ -1027,7 +1027,7 @@ struct cxl_root *find_cxl_root(struct cxl_port *port)
get_device(&iter->dev);
return to_cxl_root(iter);
}
-EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
+EXPORT_SYMBOL_NS_GPL(find_cxl_root, "CXL");
void put_cxl_root(struct cxl_root *cxl_root)
{
@@ -1036,7 +1036,7 @@ void put_cxl_root(struct cxl_root *cxl_root)
put_device(&cxl_root->port.dev);
}
-EXPORT_SYMBOL_NS_GPL(put_cxl_root, CXL);
+EXPORT_SYMBOL_NS_GPL(put_cxl_root, "CXL");
static struct cxl_dport *find_dport(struct cxl_port *port, int id)
{
@@ -1230,7 +1230,7 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
return dport;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, "CXL");
/**
* devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port
@@ -1264,7 +1264,7 @@ struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
return dport;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, "CXL");
static int add_ep(struct cxl_ep *new)
{
@@ -1421,7 +1421,7 @@ int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
cxlmd->depth = endpoint->depth;
return devm_add_action_or_reset(dev, delete_endpoint, cxlmd);
}
-EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, "CXL");
/*
* The natural end of life of a non-root 'cxl_port' is when its parent port goes
@@ -1692,21 +1692,21 @@ retry:
return 0;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, "CXL");
struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev,
struct cxl_dport **dport)
{
return find_cxl_port(pdev->dev.parent, dport);
}
-EXPORT_SYMBOL_NS_GPL(cxl_pci_find_port, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_pci_find_port, "CXL");
struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
struct cxl_dport **dport)
{
return find_cxl_port(grandparent(&cxlmd->dev), dport);
}
-EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, "CXL");
static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
struct cxl_port *port, int *target_map)
@@ -1840,7 +1840,7 @@ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
cxlrd->qos_class = CXL_QOS_CLASS_INVALID;
return cxlrd;
}
-EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, "CXL");
/**
* cxl_switch_decoder_alloc - Allocate a switch level decoder
@@ -1877,7 +1877,7 @@ struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
cxld->dev.type = &cxl_decoder_switch_type;
return cxlsd;
}
-EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, "CXL");
/**
* cxl_endpoint_decoder_alloc - Allocate an endpoint decoder
@@ -1909,7 +1909,7 @@ struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
cxld->dev.type = &cxl_decoder_endpoint_type;
return cxled;
}
-EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, "CXL");
/**
* cxl_decoder_add_locked - Add a decoder with targets
@@ -1965,7 +1965,7 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
return device_add(dev);
}
-EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, "CXL");
/**
* cxl_decoder_add - Add a decoder with targets
@@ -1995,7 +1995,7 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
guard(device)(&port->dev);
return cxl_decoder_add_locked(cxld, target_map);
}
-EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, "CXL");
static void cxld_unregister(void *dev)
{
@@ -2013,7 +2013,7 @@ int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
{
return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
}
-EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, "CXL");
/**
* __cxl_driver_register - register a driver for the cxl bus
@@ -2046,13 +2046,13 @@ int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
return driver_register(&cxl_drv->drv);
}
-EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL);
+EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, "CXL");
void cxl_driver_unregister(struct cxl_driver *cxl_drv)
{
driver_unregister(&cxl_drv->drv);
}
-EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, "CXL");
static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
@@ -2084,11 +2084,18 @@ static void cxl_bus_remove(struct device *dev)
static struct workqueue_struct *cxl_bus_wq;
-static void cxl_bus_rescan_queue(struct work_struct *w)
+static int cxl_rescan_attach(struct device *dev, void *data)
{
- int rc = bus_rescan_devices(&cxl_bus_type);
+ int rc = device_attach(dev);
+
+ dev_vdbg(dev, "rescan: %s\n", rc ? "attach" : "detached");
- pr_debug("CXL bus rescan result: %d\n", rc);
+ return 0;
+}
+
+static void cxl_bus_rescan_queue(struct work_struct *w)
+{
+ bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_rescan_attach);
}
void cxl_bus_rescan(void)
@@ -2097,19 +2104,19 @@ void cxl_bus_rescan(void)
queue_work(cxl_bus_wq, &rescan_work);
}
-EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, "CXL");
void cxl_bus_drain(void)
{
drain_workqueue(cxl_bus_wq);
}
-EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, "CXL");
bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
{
return queue_work(cxl_bus_wq, &cxlmd->detach_work);
}
-EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
+EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, "CXL");
static void add_latency(struct access_coordinate *c, long latency)
{
@@ -2235,7 +2242,7 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_perf_coordinates, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_perf_coordinates, "CXL");
int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
struct access_coordinate *c)
@@ -2292,7 +2299,7 @@ struct bus_type cxl_bus_type = {
.remove = cxl_bus_remove,
.bus_groups = cxl_bus_attribute_groups,
};
-EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_bus_type, "CXL");
static struct dentry *cxl_debugfs;
@@ -2300,7 +2307,7 @@ struct dentry *cxl_debugfs_create_dir(const char *dir)
{
return debugfs_create_dir(dir, cxl_debugfs);
}
-EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, "CXL");
static __init int cxl_core_init(void)
{
@@ -2356,4 +2363,4 @@ subsys_initcall(cxl_core_init);
module_exit(cxl_core_exit);
MODULE_DESCRIPTION("CXL: Core Compute Express Link support");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(CXL);
+MODULE_IMPORT_NS("CXL");
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index e701e4b04032..e8d11a988fd9 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -232,8 +232,8 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
"Bypassing cpu_cache_invalidate_memregion() for testing!\n");
return 0;
} else {
- dev_err(&cxlr->dev,
- "Failed to synchronize CPU cache state\n");
+ dev_WARN(&cxlr->dev,
+ "Failed to synchronize CPU cache state\n");
return -ENXIO;
}
}
@@ -242,19 +242,17 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
return 0;
}
-static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
+static void cxl_region_decode_reset(struct cxl_region *cxlr, int count)
{
struct cxl_region_params *p = &cxlr->params;
- int i, rc = 0;
+ int i;
/*
- * Before region teardown attempt to flush, and if the flush
- * fails cancel the region teardown for data consistency
- * concerns
+ * Before region teardown attempt to flush, evict any data cached for
+ * this region, or scream loudly about missing arch / platform support
+ * for CXL teardown.
*/
- rc = cxl_region_invalidate_memregion(cxlr);
- if (rc)
- return rc;
+ cxl_region_invalidate_memregion(cxlr);
for (i = count - 1; i >= 0; i--) {
struct cxl_endpoint_decoder *cxled = p->targets[i];
@@ -277,23 +275,17 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
cxl_rr = cxl_rr_load(iter, cxlr);
cxld = cxl_rr->decoder;
if (cxld->reset)
- rc = cxld->reset(cxld);
- if (rc)
- return rc;
+ cxld->reset(cxld);
set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
}
endpoint_reset:
- rc = cxled->cxld.reset(&cxled->cxld);
- if (rc)
- return rc;
+ cxled->cxld.reset(&cxled->cxld);
set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
}
/* all decoders associated with this region have been torn down */
clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
-
- return 0;
}
static int commit_decoder(struct cxl_decoder *cxld)
@@ -409,16 +401,8 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
* still pending.
*/
if (p->state == CXL_CONFIG_RESET_PENDING) {
- rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
- /*
- * Revert to committed since there may still be active
- * decoders associated with this region, or move forward
- * to active to mark the reset successful
- */
- if (rc)
- p->state = CXL_CONFIG_COMMIT;
- else
- p->state = CXL_CONFIG_ACTIVE;
+ cxl_region_decode_reset(cxlr, p->interleave_ways);
+ p->state = CXL_CONFIG_ACTIVE;
}
}
@@ -794,31 +778,55 @@ out:
return rc;
}
-static int match_free_decoder(struct device *dev, void *data)
+static int check_commit_order(struct device *dev, void *data)
{
+ struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+ /*
+ * if port->commit_end is not the only free decoder, then out of
+ * order shutdown has occurred, block further allocations until
+ * that is resolved
+ */
+ if (((cxld->flags & CXL_DECODER_F_ENABLE) == 0))
+ return -EBUSY;
+ return 0;
+}
+
+static int match_free_decoder(struct device *dev, const void *data)
+{
+ struct cxl_port *port = to_cxl_port(dev->parent);
struct cxl_decoder *cxld;
- int *id = data;
+ int rc;
if (!is_switch_decoder(dev))
return 0;
cxld = to_cxl_decoder(dev);
- /* enforce ordered allocation */
- if (cxld->id != *id)
+ if (cxld->id != port->commit_end + 1)
return 0;
- if (!cxld->region)
- return 1;
-
- (*id)++;
+ if (cxld->region) {
+ dev_dbg(dev->parent,
+ "next decoder to commit (%s) is already reserved (%s)\n",
+ dev_name(dev), dev_name(&cxld->region->dev));
+ return 0;
+ }
- return 0;
+ rc = device_for_each_child_reverse_from(dev->parent, dev, NULL,
+ check_commit_order);
+ if (rc) {
+ dev_dbg(dev->parent,
+ "unable to allocate %s due to out of order shutdown\n",
+ dev_name(dev));
+ return 0;
+ }
+ return 1;
}
-static int match_auto_decoder(struct device *dev, void *data)
+static int match_auto_decoder(struct device *dev, const void *data)
{
- struct cxl_region_params *p = data;
+ const struct cxl_region_params *p = data;
struct cxl_decoder *cxld;
struct range *r;
@@ -840,7 +848,6 @@ cxl_region_find_decoder(struct cxl_port *port,
struct cxl_region *cxlr)
{
struct device *dev;
- int id = 0;
if (port == cxled_to_port(cxled))
return &cxled->cxld;
@@ -849,7 +856,7 @@ cxl_region_find_decoder(struct cxl_port *port,
dev = device_find_child(&port->dev, &cxlr->params,
match_auto_decoder);
else
- dev = device_find_child(&port->dev, &id, match_free_decoder);
+ dev = device_find_child(&port->dev, NULL, match_free_decoder);
if (!dev)
return NULL;
/*
@@ -1288,6 +1295,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
struct cxl_region_params *p = &cxlr->params;
struct cxl_decoder *cxld = cxl_rr->decoder;
struct cxl_switch_decoder *cxlsd;
+ struct cxl_port *iter = port;
u16 eig, peig;
u8 eiw, peiw;
@@ -1304,16 +1312,26 @@ static int cxl_port_setup_targets(struct cxl_port *port,
cxlsd = to_cxl_switch_decoder(&cxld->dev);
if (cxl_rr->nr_targets_set) {
- int i, distance;
+ int i, distance = 1;
+ struct cxl_region_ref *cxl_rr_iter;
/*
- * Passthrough decoders impose no distance requirements between
- * peers
+ * The "distance" between peer downstream ports represents which
+ * endpoint positions in the region interleave a given port can
+ * host.
+ *
+ * For example, at the root of a hierarchy the distance is
+ * always 1 as every index targets a different host-bridge. At
+ * each subsequent switch level those ports map every Nth region
+ * position where N is the width of the switch == distance.
*/
- if (cxl_rr->nr_targets == 1)
- distance = 0;
- else
- distance = p->nr_targets / cxl_rr->nr_targets;
+ do {
+ cxl_rr_iter = cxl_rr_load(iter, cxlr);
+ distance *= cxl_rr_iter->nr_targets;
+ iter = to_cxl_port(iter->dev.parent);
+ } while (!is_cxl_root(iter));
+ distance *= cxlrd->cxlsd.cxld.interleave_ways;
+
for (i = 0; i < cxl_rr->nr_targets_set; i++)
if (ep->dport == cxlsd->target[i]) {
rc = check_last_peer(cxled, ep, cxl_rr,
@@ -1715,10 +1733,12 @@ static struct cxl_port *next_port(struct cxl_port *port)
return port->parent_dport->port;
}
-static int match_switch_decoder_by_range(struct device *dev, void *data)
+static int match_switch_decoder_by_range(struct device *dev,
+ const void *data)
{
struct cxl_switch_decoder *cxlsd;
- struct range *r1, *r2 = data;
+ const struct range *r1, *r2 = data;
+
if (!is_switch_decoder(dev))
return 0;
@@ -2054,13 +2074,7 @@ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
get_device(&cxlr->dev);
if (p->state > CXL_CONFIG_ACTIVE) {
- /*
- * TODO: tear down all impacted regions if a device is
- * removed out of order
- */
- rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
- if (rc)
- goto out;
+ cxl_region_decode_reset(cxlr, p->interleave_ways);
p->state = CXL_CONFIG_ACTIVE;
}
@@ -2298,7 +2312,7 @@ bool is_cxl_region(struct device *dev)
{
return dev->type == &cxl_region_type;
}
-EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL);
+EXPORT_SYMBOL_NS_GPL(is_cxl_region, "CXL");
static struct cxl_region *to_cxl_region(struct device *dev)
{
@@ -2536,9 +2550,8 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM);
}
-static ssize_t create_pmem_region_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t create_region_store(struct device *dev, const char *buf,
+ size_t len, enum cxl_decoder_mode mode)
{
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
struct cxl_region *cxlr;
@@ -2548,31 +2561,26 @@ static ssize_t create_pmem_region_store(struct device *dev,
if (rc != 1)
return -EINVAL;
- cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id);
+ cxlr = __create_region(cxlrd, mode, id);
if (IS_ERR(cxlr))
return PTR_ERR(cxlr);
return len;
}
+
+static ssize_t create_pmem_region_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return create_region_store(dev, buf, len, CXL_DECODER_PMEM);
+}
DEVICE_ATTR_RW(create_pmem_region);
static ssize_t create_ram_region_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
- struct cxl_region *cxlr;
- int rc, id;
-
- rc = sscanf(buf, "region%d\n", &id);
- if (rc != 1)
- return -EINVAL;
-
- cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id);
- if (IS_ERR(cxlr))
- return PTR_ERR(cxlr);
-
- return len;
+ return create_region_store(dev, buf, len, CXL_DECODER_RAM);
}
DEVICE_ATTR_RW(create_ram_region);
@@ -2657,7 +2665,7 @@ bool is_cxl_pmem_region(struct device *dev)
{
return dev->type == &cxl_pmem_region_type;
}
-EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL);
+EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, "CXL");
struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
{
@@ -2666,7 +2674,7 @@ struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
return NULL;
return container_of(dev, struct cxl_pmem_region, dev);
}
-EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL);
+EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, "CXL");
struct cxl_poison_context {
struct cxl_port *port;
@@ -3020,7 +3028,7 @@ struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
return NULL;
return container_of(dev, struct cxl_dax_region, dev);
}
-EXPORT_SYMBOL_NS_GPL(to_cxl_dax_region, CXL);
+EXPORT_SYMBOL_NS_GPL(to_cxl_dax_region, "CXL");
static struct lock_class_key cxl_dax_region_key;
@@ -3181,9 +3189,10 @@ err:
return rc;
}
-static int match_root_decoder_by_range(struct device *dev, void *data)
+static int match_root_decoder_by_range(struct device *dev,
+ const void *data)
{
- struct range *r1, *r2 = data;
+ const struct range *r1, *r2 = data;
struct cxl_root_decoder *cxlrd;
if (!is_root_decoder(dev))
@@ -3194,11 +3203,11 @@ static int match_root_decoder_by_range(struct device *dev, void *data)
return range_contains(r1, r2);
}
-static int match_region_by_range(struct device *dev, void *data)
+static int match_region_by_range(struct device *dev, const void *data)
{
struct cxl_region_params *p;
struct cxl_region *cxlr;
- struct range *r = data;
+ const struct range *r = data;
int rc = 0;
if (!is_cxl_region(dev))
@@ -3364,7 +3373,7 @@ out:
put_device(cxlrd_dev);
return rc;
}
-EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, "CXL");
static int is_system_ram(struct resource *res, void *arg)
{
@@ -3467,6 +3476,6 @@ void cxl_region_exit(void)
cxl_driver_unregister(&cxl_region_driver);
}
-MODULE_IMPORT_NS(CXL);
-MODULE_IMPORT_NS(DEVMEM);
+MODULE_IMPORT_NS("CXL");
+MODULE_IMPORT_NS("DEVMEM");
MODULE_ALIAS_CXL(CXL_DEVICE_REGION);
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index e1082e749c69..117c2e94c761 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -52,7 +52,7 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base,
cap_array = readl(base + CXL_CM_CAP_HDR_OFFSET);
if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
- dev_err(dev,
+ dev_dbg(dev,
"Couldn't locate the CXL.cache and CXL.mem capability array header.\n");
return;
}
@@ -106,7 +106,7 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base,
rmap->size = length;
}
}
-EXPORT_SYMBOL_NS_GPL(cxl_probe_component_regs, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_probe_component_regs, "CXL");
/**
* cxl_probe_device_regs() - Detect CXL Device register blocks
@@ -174,7 +174,7 @@ void cxl_probe_device_regs(struct device *dev, void __iomem *base,
rmap->size = length;
}
}
-EXPORT_SYMBOL_NS_GPL(cxl_probe_device_regs, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_probe_device_regs, "CXL");
void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
resource_size_t length)
@@ -232,7 +232,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, "CXL");
int cxl_map_device_regs(const struct cxl_register_map *map,
struct cxl_device_regs *regs)
@@ -266,7 +266,7 @@ int cxl_map_device_regs(const struct cxl_register_map *map,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, "CXL");
static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
struct cxl_register_map *map)
@@ -289,21 +289,17 @@ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
return true;
}
-/**
- * cxl_find_regblock_instance() - Locate a register block by type / index
- * @pdev: The CXL PCI device to enumerate.
- * @type: Register Block Indicator id
- * @map: Enumeration output, clobbered on error
- * @index: Index into which particular instance of a regblock wanted in the
- * order found in register locator DVSEC.
- *
- * Return: 0 if register block enumerated, negative error code otherwise
+/*
+ * __cxl_find_regblock_instance() - Locate a register block or count instances by type / index
+ * Use CXL_INSTANCES_COUNT for @index if counting instances.
*
- * A CXL DVSEC may point to one or more register blocks, search for them
- * by @type and @index.
+ * __cxl_find_regblock_instance() may return:
+ * 0 - if register block enumerated.
+ * >= 0 - if counting instances.
+ * < 0 - error code otherwise.
*/
-int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
- struct cxl_register_map *map, int index)
+static int __cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
+ struct cxl_register_map *map, int index)
{
u32 regloc_size, regblocks;
int instance = 0;
@@ -342,9 +338,31 @@ int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
}
map->resource = CXL_RESOURCE_NONE;
+ if (index == CXL_INSTANCES_COUNT)
+ return instance;
+
return -ENODEV;
}
-EXPORT_SYMBOL_NS_GPL(cxl_find_regblock_instance, CXL);
+
+/**
+ * cxl_find_regblock_instance() - Locate a register block by type / index
+ * @pdev: The CXL PCI device to enumerate.
+ * @type: Register Block Indicator id
+ * @map: Enumeration output, clobbered on error
+ * @index: Index into which particular instance of a regblock wanted in the
+ * order found in register locator DVSEC.
+ *
+ * Return: 0 if register block enumerated, negative error code otherwise
+ *
+ * A CXL DVSEC may point to one or more register blocks, search for them
+ * by @type and @index.
+ */
+int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
+ struct cxl_register_map *map, unsigned int index)
+{
+ return __cxl_find_regblock_instance(pdev, type, map, index);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_find_regblock_instance, "CXL");
/**
* cxl_find_regblock() - Locate register blocks by type
@@ -360,9 +378,9 @@ EXPORT_SYMBOL_NS_GPL(cxl_find_regblock_instance, CXL);
int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
struct cxl_register_map *map)
{
- return cxl_find_regblock_instance(pdev, type, map, 0);
+ return __cxl_find_regblock_instance(pdev, type, map, 0);
}
-EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, "CXL");
/**
* cxl_count_regblock() - Count instances of a given regblock type.
@@ -371,21 +389,15 @@ EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, CXL);
*
* Some regblocks may be repeated. Count how many instances.
*
- * Return: count of matching regblocks.
+ * Return: non-negative count of matching regblocks, negative error code otherwise.
*/
int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type)
{
struct cxl_register_map map;
- int rc, count = 0;
- while (1) {
- rc = cxl_find_regblock_instance(pdev, type, &map, count);
- if (rc)
- return count;
- count++;
- }
+ return __cxl_find_regblock_instance(pdev, type, &map, CXL_INSTANCES_COUNT);
}
-EXPORT_SYMBOL_NS_GPL(cxl_count_regblock, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_count_regblock, "CXL");
int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs)
{
@@ -399,7 +411,7 @@ int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cxl_map_pmu_regs, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_map_pmu_regs, "CXL");
static int cxl_map_regblock(struct cxl_register_map *map)
{
@@ -468,7 +480,7 @@ int cxl_setup_regs(struct cxl_register_map *map)
return rc;
}
-EXPORT_SYMBOL_NS_GPL(cxl_setup_regs, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_setup_regs, "CXL");
u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb)
{
@@ -506,6 +518,62 @@ out:
return offset;
}
+static resource_size_t cxl_rcrb_to_linkcap(struct device *dev, struct cxl_dport *dport)
+{
+ resource_size_t rcrb = dport->rcrb.base;
+ void __iomem *addr;
+ u32 cap_hdr;
+ u16 offset;
+
+ if (!request_mem_region(rcrb, SZ_4K, "CXL RCRB"))
+ return CXL_RESOURCE_NONE;
+
+ addr = ioremap(rcrb, SZ_4K);
+ if (!addr) {
+ dev_err(dev, "Failed to map region %pr\n", addr);
+ release_mem_region(rcrb, SZ_4K);
+ return CXL_RESOURCE_NONE;
+ }
+
+ offset = FIELD_GET(PCI_RCRB_CAP_LIST_ID_MASK, readw(addr + PCI_CAPABILITY_LIST));
+ cap_hdr = readl(addr + offset);
+ while ((FIELD_GET(PCI_RCRB_CAP_HDR_ID_MASK, cap_hdr)) != PCI_CAP_ID_EXP) {
+ offset = FIELD_GET(PCI_RCRB_CAP_HDR_NEXT_MASK, cap_hdr);
+ if (offset == 0 || offset > SZ_4K) {
+ offset = 0;
+ break;
+ }
+ cap_hdr = readl(addr + offset);
+ }
+
+ iounmap(addr);
+ release_mem_region(rcrb, SZ_4K);
+ if (!offset)
+ return CXL_RESOURCE_NONE;
+
+ return offset;
+}
+
+int cxl_dport_map_rcd_linkcap(struct pci_dev *pdev, struct cxl_dport *dport)
+{
+ void __iomem *dport_pcie_cap = NULL;
+ resource_size_t pos;
+ struct cxl_rcrb_info *ri;
+
+ ri = &dport->rcrb;
+ pos = cxl_rcrb_to_linkcap(&pdev->dev, dport);
+ if (pos == CXL_RESOURCE_NONE)
+ return -ENXIO;
+
+ dport_pcie_cap = devm_cxl_iomap_block(&pdev->dev,
+ ri->base + pos,
+ PCI_CAP_EXP_SIZEOF);
+ dport->regs.rcd_pcie_cap = dport_pcie_cap;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_dport_map_rcd_linkcap, "CXL");
+
resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri,
enum cxl_rcrb which)
{
@@ -577,4 +645,4 @@ resource_size_t cxl_rcd_component_reg_phys(struct device *dev,
return CXL_RESOURCE_NONE;
return __rcrb_to_component(dev, &dport->rcrb, CXL_RCRB_UPSTREAM);
}
-EXPORT_SYMBOL_NS_GPL(cxl_rcd_component_reg_phys, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_rcd_component_reg_phys, "CXL");
diff --git a/drivers/cxl/core/suspend.c b/drivers/cxl/core/suspend.c
index a5984d96ea1d..29aa5cc5e565 100644
--- a/drivers/cxl/core/suspend.c
+++ b/drivers/cxl/core/suspend.c
@@ -15,10 +15,10 @@ void cxl_mem_active_inc(void)
{
atomic_inc(&mem_active);
}
-EXPORT_SYMBOL_NS_GPL(cxl_mem_active_inc, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_mem_active_inc, "CXL");
void cxl_mem_active_dec(void)
{
atomic_dec(&mem_active);
}
-EXPORT_SYMBOL_NS_GPL(cxl_mem_active_dec, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_mem_active_dec, "CXL");
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index 8672b42ee4d1..cea706b683b5 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -166,11 +166,13 @@ TRACE_EVENT(cxl_overflow,
#define CXL_EVENT_RECORD_FLAG_MAINT_NEEDED BIT(3)
#define CXL_EVENT_RECORD_FLAG_PERF_DEGRADED BIT(4)
#define CXL_EVENT_RECORD_FLAG_HW_REPLACE BIT(5)
+#define CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID BIT(6)
#define show_hdr_flags(flags) __print_flags(flags, " | ", \
{ CXL_EVENT_RECORD_FLAG_PERMANENT, "PERMANENT_CONDITION" }, \
{ CXL_EVENT_RECORD_FLAG_MAINT_NEEDED, "MAINTENANCE_NEEDED" }, \
{ CXL_EVENT_RECORD_FLAG_PERF_DEGRADED, "PERFORMANCE_DEGRADED" }, \
- { CXL_EVENT_RECORD_FLAG_HW_REPLACE, "HARDWARE_REPLACEMENT_NEEDED" } \
+ { CXL_EVENT_RECORD_FLAG_HW_REPLACE, "HARDWARE_REPLACEMENT_NEEDED" }, \
+ { CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID, "MAINT_OP_SUB_CLASS_VALID" } \
)
/*
@@ -197,7 +199,8 @@ TRACE_EVENT(cxl_overflow,
__field(u16, hdr_related_handle) \
__field(u64, hdr_timestamp) \
__field(u8, hdr_length) \
- __field(u8, hdr_maint_op_class)
+ __field(u8, hdr_maint_op_class) \
+ __field(u8, hdr_maint_op_sub_class)
#define CXL_EVT_TP_fast_assign(cxlmd, l, hdr) \
__assign_str(memdev); \
@@ -209,17 +212,19 @@ TRACE_EVENT(cxl_overflow,
__entry->hdr_handle = le16_to_cpu((hdr).handle); \
__entry->hdr_related_handle = le16_to_cpu((hdr).related_handle); \
__entry->hdr_timestamp = le64_to_cpu((hdr).timestamp); \
- __entry->hdr_maint_op_class = (hdr).maint_op_class
+ __entry->hdr_maint_op_class = (hdr).maint_op_class; \
+ __entry->hdr_maint_op_sub_class = (hdr).maint_op_sub_class
#define CXL_EVT_TP_printk(fmt, ...) \
TP_printk("memdev=%s host=%s serial=%lld log=%s : time=%llu uuid=%pUb " \
"len=%d flags='%s' handle=%x related_handle=%x " \
- "maint_op_class=%u : " fmt, \
+ "maint_op_class=%u maint_op_sub_class=%u : " fmt, \
__get_str(memdev), __get_str(host), __entry->serial, \
cxl_event_log_type_str(__entry->log), \
__entry->hdr_timestamp, &__entry->hdr_uuid, __entry->hdr_length,\
show_hdr_flags(__entry->hdr_flags), __entry->hdr_handle, \
__entry->hdr_related_handle, __entry->hdr_maint_op_class, \
+ __entry->hdr_maint_op_sub_class, \
##__VA_ARGS__)
TRACE_EVENT(cxl_generic_event,
@@ -264,8 +269,30 @@ TRACE_EVENT(cxl_generic_event,
)
/*
+ * Component ID Format
+ * CXL 3.1 section 8.2.9.2.1; Table 8-44
+ */
+#define CXL_PLDM_COMPONENT_ID_ENTITY_VALID BIT(0)
+#define CXL_PLDM_COMPONENT_ID_RES_VALID BIT(1)
+
+#define show_comp_id_pldm_flags(flags) __print_flags(flags, " | ", \
+ { CXL_PLDM_COMPONENT_ID_ENTITY_VALID, "PLDM Entity ID" }, \
+ { CXL_PLDM_COMPONENT_ID_RES_VALID, "Resource ID" } \
+)
+
+#define show_pldm_entity_id(flags, valid_comp_id, valid_id_format, comp_id) \
+ (flags & valid_comp_id && flags & valid_id_format) ? \
+ (comp_id[0] & CXL_PLDM_COMPONENT_ID_ENTITY_VALID) ? \
+ __print_hex(&comp_id[1], 6) : "0x00" : "0x00"
+
+#define show_pldm_resource_id(flags, valid_comp_id, valid_id_format, comp_id) \
+ (flags & valid_comp_id && flags & valid_id_format) ? \
+ (comp_id[0] & CXL_PLDM_COMPONENT_ID_RES_VALID) ? \
+ __print_hex(&comp_id[7], 4) : "0x00" : "0x00"
+
+/*
* General Media Event Record - GMER
- * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ * CXL rev 3.1 Section 8.2.9.2.1.1; Table 8-45
*/
#define CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT BIT(0)
#define CXL_GMER_EVT_DESC_THRESHOLD_EVENT BIT(1)
@@ -279,10 +306,18 @@ TRACE_EVENT(cxl_generic_event,
#define CXL_GMER_MEM_EVT_TYPE_ECC_ERROR 0x00
#define CXL_GMER_MEM_EVT_TYPE_INV_ADDR 0x01
#define CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x02
-#define show_mem_event_type(type) __print_symbolic(type, \
- { CXL_GMER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
- { CXL_GMER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
- { CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \
+#define CXL_GMER_MEM_EVT_TYPE_TE_STATE_VIOLATION 0x03
+#define CXL_GMER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR 0x04
+#define CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE 0x05
+#define CXL_GMER_MEM_EVT_TYPE_CKID_VIOLATION 0x06
+#define show_gmer_mem_event_type(type) __print_symbolic(type, \
+ { CXL_GMER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
+ { CXL_GMER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
+ { CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" }, \
+ { CXL_GMER_MEM_EVT_TYPE_TE_STATE_VIOLATION, "TE State Violation" }, \
+ { CXL_GMER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR, "Scrub Media ECC Error" }, \
+ { CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE, "Adv Prog CME Counter Expiration" }, \
+ { CXL_GMER_MEM_EVT_TYPE_CKID_VIOLATION, "CKID Violation" } \
)
#define CXL_GMER_TRANS_UNKNOWN 0x00
@@ -292,6 +327,8 @@ TRACE_EVENT(cxl_generic_event,
#define CXL_GMER_TRANS_HOST_INJECT_POISON 0x04
#define CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB 0x05
#define CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT 0x06
+#define CXL_GMER_TRANS_INTERNAL_MEDIA_ECS 0x07
+#define CXL_GMER_TRANS_MEDIA_INITIALIZATION 0x08
#define show_trans_type(type) __print_symbolic(type, \
{ CXL_GMER_TRANS_UNKNOWN, "Unknown" }, \
{ CXL_GMER_TRANS_HOST_READ, "Host Read" }, \
@@ -299,18 +336,57 @@ TRACE_EVENT(cxl_generic_event,
{ CXL_GMER_TRANS_HOST_SCAN_MEDIA, "Host Scan Media" }, \
{ CXL_GMER_TRANS_HOST_INJECT_POISON, "Host Inject Poison" }, \
{ CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB, "Internal Media Scrub" }, \
- { CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT, "Internal Media Management" } \
+ { CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT, "Internal Media Management" }, \
+ { CXL_GMER_TRANS_INTERNAL_MEDIA_ECS, "Internal Media Error Check Scrub" }, \
+ { CXL_GMER_TRANS_MEDIA_INITIALIZATION, "Media Initialization" } \
)
#define CXL_GMER_VALID_CHANNEL BIT(0)
#define CXL_GMER_VALID_RANK BIT(1)
#define CXL_GMER_VALID_DEVICE BIT(2)
#define CXL_GMER_VALID_COMPONENT BIT(3)
+#define CXL_GMER_VALID_COMPONENT_ID_FORMAT BIT(4)
#define show_valid_flags(flags) __print_flags(flags, "|", \
{ CXL_GMER_VALID_CHANNEL, "CHANNEL" }, \
{ CXL_GMER_VALID_RANK, "RANK" }, \
{ CXL_GMER_VALID_DEVICE, "DEVICE" }, \
- { CXL_GMER_VALID_COMPONENT, "COMPONENT" } \
+ { CXL_GMER_VALID_COMPONENT, "COMPONENT" }, \
+ { CXL_GMER_VALID_COMPONENT_ID_FORMAT, "COMPONENT PLDM FORMAT" } \
+)
+
+#define CXL_GMER_CME_EV_FLAG_CME_MULTIPLE_MEDIA BIT(0)
+#define CXL_GMER_CME_EV_FLAG_THRESHOLD_EXCEEDED BIT(1)
+#define show_cme_threshold_ev_flags(flags) __print_flags(flags, "|", \
+ { \
+ CXL_GMER_CME_EV_FLAG_CME_MULTIPLE_MEDIA, \
+ "Corrected Memory Errors in Multiple Media Components" \
+ }, { \
+ CXL_GMER_CME_EV_FLAG_THRESHOLD_EXCEEDED, \
+ "Exceeded Programmable Threshold" \
+ } \
+)
+
+#define CXL_GMER_MEM_EVT_SUB_TYPE_NOT_REPORTED 0x00
+#define CXL_GMER_MEM_EVT_SUB_TYPE_INTERNAL_DATAPATH_ERROR 0x01
+#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_COMMAND_TRAINING_ERROR 0x02
+#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CONTROL_TRAINING_ERROR 0x03
+#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_DATA_TRAINING_ERROR 0x04
+#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CRC_ERROR 0x05
+#define show_mem_event_sub_type(sub_type) __print_symbolic(sub_type, \
+ { CXL_GMER_MEM_EVT_SUB_TYPE_NOT_REPORTED, "Not Reported" }, \
+ { CXL_GMER_MEM_EVT_SUB_TYPE_INTERNAL_DATAPATH_ERROR, "Internal Datapath Error" }, \
+ { \
+ CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_COMMAND_TRAINING_ERROR, \
+ "Media Link Command Training Error" \
+ }, { \
+ CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CONTROL_TRAINING_ERROR, \
+ "Media Link Control Training Error" \
+ }, { \
+ CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_DATA_TRAINING_ERROR, \
+ "Media Link Data Training Error" \
+ }, { \
+ CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CRC_ERROR, "Media Link CRC Error" \
+ } \
)
TRACE_EVENT(cxl_general_media,
@@ -336,6 +412,9 @@ TRACE_EVENT(cxl_general_media,
__field(u16, validity_flags)
__field(u8, rank)
__field(u8, dpa_flags)
+ __field(u32, cme_count)
+ __field(u8, sub_type)
+ __field(u8, cme_threshold_ev_flags)
__string(region_name, cxlr ? dev_name(&cxlr->dev) : "")
),
@@ -350,6 +429,7 @@ TRACE_EVENT(cxl_general_media,
__entry->dpa &= CXL_DPA_MASK;
__entry->descriptor = rec->media_hdr.descriptor;
__entry->type = rec->media_hdr.type;
+ __entry->sub_type = rec->sub_type;
__entry->transaction_type = rec->media_hdr.transaction_type;
__entry->channel = rec->media_hdr.channel;
__entry->rank = rec->media_hdr.rank;
@@ -365,32 +445,62 @@ TRACE_EVENT(cxl_general_media,
__assign_str(region_name);
uuid_copy(&__entry->region_uuid, &uuid_null);
}
+ __entry->cme_threshold_ev_flags = rec->cme_threshold_ev_flags;
+ __entry->cme_count = get_unaligned_le24(rec->cme_count);
),
CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \
- "descriptor='%s' type='%s' transaction_type='%s' channel=%u rank=%u " \
- "device=%x comp_id=%s validity_flags='%s' " \
- "hpa=%llx region=%s region_uuid=%pUb",
+ "descriptor='%s' type='%s' sub_type='%s' " \
+ "transaction_type='%s' channel=%u rank=%u " \
+ "device=%x validity_flags='%s' " \
+ "comp_id=%s comp_id_pldm_valid_flags='%s' " \
+ "pldm_entity_id=%s pldm_resource_id=%s " \
+ "hpa=%llx region=%s region_uuid=%pUb " \
+ "cme_threshold_ev_flags='%s' cme_count=%u",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
- show_mem_event_type(__entry->type),
+ show_gmer_mem_event_type(__entry->type),
+ show_mem_event_sub_type(__entry->sub_type),
show_trans_type(__entry->transaction_type),
__entry->channel, __entry->rank, __entry->device,
- __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
show_valid_flags(__entry->validity_flags),
- __entry->hpa, __get_str(region_name), &__entry->region_uuid
+ __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
+ show_comp_id_pldm_flags(__entry->comp_id[0]),
+ show_pldm_entity_id(__entry->validity_flags, CXL_GMER_VALID_COMPONENT,
+ CXL_GMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ show_pldm_resource_id(__entry->validity_flags, CXL_GMER_VALID_COMPONENT,
+ CXL_GMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ __entry->hpa, __get_str(region_name), &__entry->region_uuid,
+ show_cme_threshold_ev_flags(__entry->cme_threshold_ev_flags), __entry->cme_count
)
);
/*
* DRAM Event Record - DER
*
- * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ * CXL rev 3.1 section 8.2.9.2.1.2; Table 8-46
*/
/*
* DRAM Event Record defines many fields the same as the General Media Event
* Record. Reuse those definitions as appropriate.
*/
+#define CXL_DER_MEM_EVT_TYPE_ECC_ERROR 0x00
+#define CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR 0x01
+#define CXL_DER_MEM_EVT_TYPE_INV_ADDR 0x02
+#define CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x03
+#define CXL_DER_MEM_EVT_TYPE_TE_STATE_VIOLATION 0x04
+#define CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE 0x05
+#define CXL_DER_MEM_EVT_TYPE_CKID_VIOLATION 0x06
+#define show_dram_mem_event_type(type) __print_symbolic(type, \
+ { CXL_DER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \
+ { CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR, "Scrub Media ECC Error" }, \
+ { CXL_DER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \
+ { CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" }, \
+ { CXL_DER_MEM_EVT_TYPE_TE_STATE_VIOLATION, "TE State Violation" }, \
+ { CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE, "Adv Prog CME Counter Expiration" }, \
+ { CXL_DER_MEM_EVT_TYPE_CKID_VIOLATION, "CKID Violation" } \
+)
+
#define CXL_DER_VALID_CHANNEL BIT(0)
#define CXL_DER_VALID_RANK BIT(1)
#define CXL_DER_VALID_NIBBLE BIT(2)
@@ -399,15 +509,21 @@ TRACE_EVENT(cxl_general_media,
#define CXL_DER_VALID_ROW BIT(5)
#define CXL_DER_VALID_COLUMN BIT(6)
#define CXL_DER_VALID_CORRECTION_MASK BIT(7)
-#define show_dram_valid_flags(flags) __print_flags(flags, "|", \
- { CXL_DER_VALID_CHANNEL, "CHANNEL" }, \
- { CXL_DER_VALID_RANK, "RANK" }, \
- { CXL_DER_VALID_NIBBLE, "NIBBLE" }, \
- { CXL_DER_VALID_BANK_GROUP, "BANK GROUP" }, \
- { CXL_DER_VALID_BANK, "BANK" }, \
- { CXL_DER_VALID_ROW, "ROW" }, \
- { CXL_DER_VALID_COLUMN, "COLUMN" }, \
- { CXL_DER_VALID_CORRECTION_MASK, "CORRECTION MASK" } \
+#define CXL_DER_VALID_COMPONENT BIT(8)
+#define CXL_DER_VALID_COMPONENT_ID_FORMAT BIT(9)
+#define CXL_DER_VALID_SUB_CHANNEL BIT(10)
+#define show_dram_valid_flags(flags) __print_flags(flags, "|", \
+ { CXL_DER_VALID_CHANNEL, "CHANNEL" }, \
+ { CXL_DER_VALID_RANK, "RANK" }, \
+ { CXL_DER_VALID_NIBBLE, "NIBBLE" }, \
+ { CXL_DER_VALID_BANK_GROUP, "BANK GROUP" }, \
+ { CXL_DER_VALID_BANK, "BANK" }, \
+ { CXL_DER_VALID_ROW, "ROW" }, \
+ { CXL_DER_VALID_COLUMN, "COLUMN" }, \
+ { CXL_DER_VALID_CORRECTION_MASK, "CORRECTION MASK" }, \
+ { CXL_DER_VALID_COMPONENT, "COMPONENT" }, \
+ { CXL_DER_VALID_COMPONENT_ID_FORMAT, "COMPONENT PLDM FORMAT" }, \
+ { CXL_DER_VALID_SUB_CHANNEL, "SUB CHANNEL" } \
)
TRACE_EVENT(cxl_dram,
@@ -436,6 +552,12 @@ TRACE_EVENT(cxl_dram,
__field(u8, bank_group) /* Out of order to pack trace record */
__field(u8, bank) /* Out of order to pack trace record */
__field(u8, dpa_flags) /* Out of order to pack trace record */
+ /* Following are out of order to pack trace record */
+ __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
+ __field(u32, cvme_count)
+ __field(u8, sub_type)
+ __field(u8, sub_channel)
+ __field(u8, cme_threshold_ev_flags)
__string(region_name, cxlr ? dev_name(&cxlr->dev) : "")
),
@@ -449,6 +571,7 @@ TRACE_EVENT(cxl_dram,
__entry->dpa &= CXL_DPA_MASK;
__entry->descriptor = rec->media_hdr.descriptor;
__entry->type = rec->media_hdr.type;
+ __entry->sub_type = rec->sub_type;
__entry->transaction_type = rec->media_hdr.transaction_type;
__entry->validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags);
__entry->channel = rec->media_hdr.channel;
@@ -468,30 +591,47 @@ TRACE_EVENT(cxl_dram,
__assign_str(region_name);
uuid_copy(&__entry->region_uuid, &uuid_null);
}
+ memcpy(__entry->comp_id, &rec->component_id,
+ CXL_EVENT_GEN_MED_COMP_ID_SIZE);
+ __entry->sub_channel = rec->sub_channel;
+ __entry->cme_threshold_ev_flags = rec->cme_threshold_ev_flags;
+ __entry->cvme_count = get_unaligned_le24(rec->cvme_count);
),
- CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' " \
+ CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' sub_type='%s' " \
"transaction_type='%s' channel=%u rank=%u nibble_mask=%x " \
"bank_group=%u bank=%u row=%u column=%u cor_mask=%s " \
"validity_flags='%s' " \
- "hpa=%llx region=%s region_uuid=%pUb",
+ "comp_id=%s comp_id_pldm_valid_flags='%s' " \
+ "pldm_entity_id=%s pldm_resource_id=%s " \
+ "hpa=%llx region=%s region_uuid=%pUb " \
+ "sub_channel=%u cme_threshold_ev_flags='%s' cvme_count=%u",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
- show_mem_event_type(__entry->type),
+ show_dram_mem_event_type(__entry->type),
+ show_mem_event_sub_type(__entry->sub_type),
show_trans_type(__entry->transaction_type),
__entry->channel, __entry->rank, __entry->nibble_mask,
__entry->bank_group, __entry->bank,
__entry->row, __entry->column,
__print_hex(__entry->cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE),
show_dram_valid_flags(__entry->validity_flags),
- __entry->hpa, __get_str(region_name), &__entry->region_uuid
+ __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
+ show_comp_id_pldm_flags(__entry->comp_id[0]),
+ show_pldm_entity_id(__entry->validity_flags, CXL_DER_VALID_COMPONENT,
+ CXL_DER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ show_pldm_resource_id(__entry->validity_flags, CXL_DER_VALID_COMPONENT,
+ CXL_DER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ __entry->hpa, __get_str(region_name), &__entry->region_uuid,
+ __entry->sub_channel, show_cme_threshold_ev_flags(__entry->cme_threshold_ev_flags),
+ __entry->cvme_count
)
);
/*
* Memory Module Event Record - MMER
*
- * CXL res 3.0 section 8.2.9.2.1.3; Table 8-45
+ * CXL res 3.1 section 8.2.9.2.1.3; Table 8-47
*/
#define CXL_MMER_HEALTH_STATUS_CHANGE 0x00
#define CXL_MMER_MEDIA_STATUS_CHANGE 0x01
@@ -499,27 +639,35 @@ TRACE_EVENT(cxl_dram,
#define CXL_MMER_TEMP_CHANGE 0x03
#define CXL_MMER_DATA_PATH_ERROR 0x04
#define CXL_MMER_LSA_ERROR 0x05
+#define CXL_MMER_UNRECOV_SIDEBAND_BUS_ERROR 0x06
+#define CXL_MMER_MEMORY_MEDIA_FRU_ERROR 0x07
+#define CXL_MMER_POWER_MANAGEMENT_FAULT 0x08
#define show_dev_evt_type(type) __print_symbolic(type, \
{ CXL_MMER_HEALTH_STATUS_CHANGE, "Health Status Change" }, \
{ CXL_MMER_MEDIA_STATUS_CHANGE, "Media Status Change" }, \
{ CXL_MMER_LIFE_USED_CHANGE, "Life Used Change" }, \
{ CXL_MMER_TEMP_CHANGE, "Temperature Change" }, \
{ CXL_MMER_DATA_PATH_ERROR, "Data Path Error" }, \
- { CXL_MMER_LSA_ERROR, "LSA Error" } \
+ { CXL_MMER_LSA_ERROR, "LSA Error" }, \
+ { CXL_MMER_UNRECOV_SIDEBAND_BUS_ERROR, "Unrecoverable Internal Sideband Bus Error" }, \
+ { CXL_MMER_MEMORY_MEDIA_FRU_ERROR, "Memory Media FRU Error" }, \
+ { CXL_MMER_POWER_MANAGEMENT_FAULT, "Power Management Fault" } \
)
/*
* Device Health Information - DHI
*
- * CXL res 3.0 section 8.2.9.8.3.1; Table 8-100
+ * CXL res 3.1 section 8.2.9.9.3.1; Table 8-133
*/
#define CXL_DHI_HS_MAINTENANCE_NEEDED BIT(0)
#define CXL_DHI_HS_PERFORMANCE_DEGRADED BIT(1)
#define CXL_DHI_HS_HW_REPLACEMENT_NEEDED BIT(2)
+#define CXL_DHI_HS_MEM_CAPACITY_DEGRADED BIT(3)
#define show_health_status_flags(flags) __print_flags(flags, "|", \
{ CXL_DHI_HS_MAINTENANCE_NEEDED, "MAINTENANCE_NEEDED" }, \
{ CXL_DHI_HS_PERFORMANCE_DEGRADED, "PERFORMANCE_DEGRADED" }, \
- { CXL_DHI_HS_HW_REPLACEMENT_NEEDED, "REPLACEMENT_NEEDED" } \
+ { CXL_DHI_HS_HW_REPLACEMENT_NEEDED, "REPLACEMENT_NEEDED" }, \
+ { CXL_DHI_HS_MEM_CAPACITY_DEGRADED, "MEM_CAPACITY_DEGRADED" } \
)
#define CXL_DHI_MS_NORMAL 0x00
@@ -573,6 +721,26 @@ TRACE_EVENT(cxl_dram,
#define CXL_DHI_AS_COR_VOL_ERR_CNT(as) ((as & 0x10) >> 4)
#define CXL_DHI_AS_COR_PER_ERR_CNT(as) ((as & 0x20) >> 5)
+#define CXL_MMER_VALID_COMPONENT BIT(0)
+#define CXL_MMER_VALID_COMPONENT_ID_FORMAT BIT(1)
+#define show_mem_module_valid_flags(flags) __print_flags(flags, "|", \
+ { CXL_MMER_VALID_COMPONENT, "COMPONENT" }, \
+ { CXL_MMER_VALID_COMPONENT_ID_FORMAT, "COMPONENT PLDM FORMAT" } \
+)
+#define CXL_MMER_DEV_EVT_SUB_TYPE_NOT_REPORTED 0x00
+#define CXL_MMER_DEV_EVT_SUB_TYPE_INVALID_CONFIG_DATA 0x01
+#define CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_CONFIG_DATA 0x02
+#define CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_MEM_MEDIA_FRU 0x03
+#define show_dev_event_sub_type(sub_type) __print_symbolic(sub_type, \
+ { CXL_MMER_DEV_EVT_SUB_TYPE_NOT_REPORTED, "Not Reported" }, \
+ { CXL_MMER_DEV_EVT_SUB_TYPE_INVALID_CONFIG_DATA, "Invalid Config Data" }, \
+ { CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_CONFIG_DATA, "Unsupported Config Data" }, \
+ { \
+ CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_MEM_MEDIA_FRU, \
+ "Unsupported Memory Media FRU" \
+ } \
+)
+
TRACE_EVENT(cxl_memory_module,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
@@ -595,6 +763,9 @@ TRACE_EVENT(cxl_memory_module,
__field(u32, cor_per_err_cnt)
__field(s16, device_temp)
__field(u8, add_status)
+ __field(u8, event_sub_type)
+ __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
+ __field(u16, validity_flags)
),
TP_fast_assign(
@@ -603,6 +774,7 @@ TRACE_EVENT(cxl_memory_module,
/* Memory Module Event */
__entry->event_type = rec->event_type;
+ __entry->event_sub_type = rec->event_sub_type;
/* Device Health Info */
__entry->health_status = rec->info.health_status;
@@ -613,13 +785,20 @@ TRACE_EVENT(cxl_memory_module,
__entry->cor_per_err_cnt = get_unaligned_le32(rec->info.cor_per_err_cnt);
__entry->device_temp = get_unaligned_le16(rec->info.device_temp);
__entry->add_status = rec->info.add_status;
+ __entry->validity_flags = get_unaligned_le16(rec->validity_flags);
+ memcpy(__entry->comp_id, &rec->component_id,
+ CXL_EVENT_GEN_MED_COMP_ID_SIZE);
),
- CXL_EVT_TP_printk("event_type='%s' health_status='%s' media_status='%s' " \
- "as_life_used=%s as_dev_temp=%s as_cor_vol_err_cnt=%s " \
+ CXL_EVT_TP_printk("event_type='%s' event_sub_type='%s' health_status='%s' " \
+ "media_status='%s' as_life_used=%s as_dev_temp=%s as_cor_vol_err_cnt=%s " \
"as_cor_per_err_cnt=%s life_used=%u device_temp=%d " \
- "dirty_shutdown_cnt=%u cor_vol_err_cnt=%u cor_per_err_cnt=%u",
+ "dirty_shutdown_cnt=%u cor_vol_err_cnt=%u cor_per_err_cnt=%u " \
+ "validity_flags='%s' " \
+ "comp_id=%s comp_id_pldm_valid_flags='%s' " \
+ "pldm_entity_id=%s pldm_resource_id=%s",
show_dev_evt_type(__entry->event_type),
+ show_dev_event_sub_type(__entry->event_sub_type),
show_health_status_flags(__entry->health_status),
show_media_status(__entry->media_status),
show_two_bit_status(CXL_DHI_AS_LIFE_USED(__entry->add_status)),
@@ -628,7 +807,14 @@ TRACE_EVENT(cxl_memory_module,
show_one_bit_status(CXL_DHI_AS_COR_PER_ERR_CNT(__entry->add_status)),
__entry->life_used, __entry->device_temp,
__entry->dirty_shutdown_cnt, __entry->cor_vol_err_cnt,
- __entry->cor_per_err_cnt
+ __entry->cor_per_err_cnt,
+ show_mem_module_valid_flags(__entry->validity_flags),
+ __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE),
+ show_comp_id_pldm_flags(__entry->comp_id[0]),
+ show_pldm_entity_id(__entry->validity_flags, CXL_MMER_VALID_COMPONENT,
+ CXL_MMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
+ show_pldm_resource_id(__entry->validity_flags, CXL_MMER_VALID_COMPONENT,
+ CXL_MMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id)
)
);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 0d8b810a51f0..bbbaa0d0a670 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -235,6 +235,14 @@ struct cxl_regs {
struct_group_tagged(cxl_rch_regs, rch_regs,
void __iomem *dport_aer;
);
+
+ /*
+ * RCD upstream port specific PCIe cap register
+ * @pcie_cap: CXL 3.0 8.2.1.2 RCD Upstream Port RCRB
+ */
+ struct_group_tagged(cxl_rcd_regs, rcd_regs,
+ void __iomem *rcd_pcie_cap;
+ );
};
struct cxl_reg_map {
@@ -294,16 +302,18 @@ int cxl_map_device_regs(const struct cxl_register_map *map,
struct cxl_device_regs *regs);
int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs);
+#define CXL_INSTANCES_COUNT -1
enum cxl_regloc_type;
int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type);
int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
- struct cxl_register_map *map, int index);
+ struct cxl_register_map *map, unsigned int index);
int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
struct cxl_register_map *map);
int cxl_setup_regs(struct cxl_register_map *map);
struct cxl_dport;
resource_size_t cxl_rcd_component_reg_phys(struct device *dev,
struct cxl_dport *dport);
+int cxl_dport_map_rcd_linkcap(struct pci_dev *pdev, struct cxl_dport *dport);
#define CXL_RESOURCE_NONE ((resource_size_t) -1)
#define CXL_TARGET_STRLEN 20
@@ -359,7 +369,7 @@ struct cxl_decoder {
struct cxl_region *region;
unsigned long flags;
int (*commit)(struct cxl_decoder *cxld);
- int (*reset)(struct cxl_decoder *cxld);
+ void (*reset)(struct cxl_decoder *cxld);
};
/*
@@ -730,6 +740,7 @@ static inline bool is_cxl_root(struct cxl_port *port)
int cxl_num_decoders_committed(struct cxl_port *port);
bool is_cxl_port(const struct device *dev);
struct cxl_port *to_cxl_port(const struct device *dev);
+void cxl_port_commit_reap(struct cxl_decoder *cxld);
struct pci_bus;
int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
struct pci_bus *bus);
@@ -811,7 +822,8 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
struct cxl_endpoint_dvsec_info *info);
int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
-int cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
+struct cxl_dev_state;
+int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
struct cxl_endpoint_dvsec_info *info);
bool is_cxl_region(struct device *dev);
@@ -854,7 +866,6 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
struct cxl_port *port);
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev);
-bool is_cxl_nvdimm_bridge(struct device *dev);
int devm_cxl_add_nvdimm(struct cxl_port *parent_port, struct cxl_memdev *cxlmd);
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port);
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index a9fd5cd5a0d2..2f03a4d5606e 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -252,7 +252,7 @@ module_cxl_driver(cxl_mem_driver);
MODULE_DESCRIPTION("CXL: Memory Expansion");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(CXL);
+MODULE_IMPORT_NS("CXL");
MODULE_ALIAS_CXL(CXL_DEVICE_MEMORY_EXPANDER);
/*
* create_endpoint() wants to validate port driver attach immediately after
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 188412d45e0d..a96e54c6259e 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -475,9 +475,9 @@ static bool is_cxl_restricted(struct pci_dev *pdev)
}
static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev,
- struct cxl_register_map *map)
+ struct cxl_register_map *map,
+ struct cxl_dport *dport)
{
- struct cxl_dport *dport;
resource_size_t component_reg_phys;
*map = (struct cxl_register_map) {
@@ -513,11 +513,24 @@ static int cxl_pci_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
* is an RCH and try to extract the Component Registers from
* an RCRB.
*/
- if (rc && type == CXL_REGLOC_RBI_COMPONENT && is_cxl_restricted(pdev))
- rc = cxl_rcrb_get_comp_regs(pdev, map);
-
- if (rc)
+ if (rc && type == CXL_REGLOC_RBI_COMPONENT && is_cxl_restricted(pdev)) {
+ struct cxl_dport *dport;
+ struct cxl_port *port __free(put_cxl_port) =
+ cxl_pci_find_port(pdev, &dport);
+ if (!port)
+ return -EPROBE_DEFER;
+
+ rc = cxl_rcrb_get_comp_regs(pdev, map, dport);
+ if (rc)
+ return rc;
+
+ rc = cxl_dport_map_rcd_linkcap(pdev, dport);
+ if (rc)
+ return rc;
+
+ } else if (rc) {
return rc;
+ }
return cxl_setup_regs(map);
}
@@ -764,10 +777,6 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
return 0;
}
- rc = cxl_mem_alloc_event_buf(mds);
- if (rc)
- return rc;
-
rc = cxl_event_get_int_policy(mds, &policy);
if (rc)
return rc;
@@ -781,6 +790,10 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
return -EBUSY;
}
+ rc = cxl_mem_alloc_event_buf(mds);
+ if (rc)
+ return rc;
+
rc = cxl_event_irqsetup(mds);
if (rc)
return rc;
@@ -807,6 +820,86 @@ static int cxl_pci_type3_init_mailbox(struct cxl_dev_state *cxlds)
return 0;
}
+static ssize_t rcd_pcie_cap_emit(struct device *dev, u16 offset, char *buf, size_t width)
+{
+ struct cxl_dev_state *cxlds = dev_get_drvdata(dev);
+ struct cxl_memdev *cxlmd = cxlds->cxlmd;
+ struct device *root_dev;
+ struct cxl_dport *dport;
+ struct cxl_port *root __free(put_cxl_port) =
+ cxl_mem_find_port(cxlmd, &dport);
+
+ if (!root)
+ return -ENXIO;
+
+ root_dev = root->uport_dev;
+ if (!root_dev)
+ return -ENXIO;
+
+ if (!dport->regs.rcd_pcie_cap)
+ return -ENXIO;
+
+ guard(device)(root_dev);
+ if (!root_dev->driver)
+ return -ENXIO;
+
+ switch (width) {
+ case 2:
+ return sysfs_emit(buf, "%#x\n",
+ readw(dport->regs.rcd_pcie_cap + offset));
+ case 4:
+ return sysfs_emit(buf, "%#x\n",
+ readl(dport->regs.rcd_pcie_cap + offset));
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t rcd_link_cap_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return rcd_pcie_cap_emit(dev, PCI_EXP_LNKCAP, buf, sizeof(u32));
+}
+static DEVICE_ATTR_RO(rcd_link_cap);
+
+static ssize_t rcd_link_ctrl_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return rcd_pcie_cap_emit(dev, PCI_EXP_LNKCTL, buf, sizeof(u16));
+}
+static DEVICE_ATTR_RO(rcd_link_ctrl);
+
+static ssize_t rcd_link_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return rcd_pcie_cap_emit(dev, PCI_EXP_LNKSTA, buf, sizeof(u16));
+}
+static DEVICE_ATTR_RO(rcd_link_status);
+
+static struct attribute *cxl_rcd_attrs[] = {
+ &dev_attr_rcd_link_cap.attr,
+ &dev_attr_rcd_link_ctrl.attr,
+ &dev_attr_rcd_link_status.attr,
+ NULL
+};
+
+static umode_t cxl_rcd_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (is_cxl_restricted(pdev))
+ return a->mode;
+
+ return 0;
+}
+
+static struct attribute_group cxl_rcd_group = {
+ .attrs = cxl_rcd_attrs,
+ .is_visible = cxl_rcd_visible,
+};
+__ATTRIBUTE_GROUPS(cxl_rcd);
+
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
@@ -814,7 +907,8 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct cxl_dev_state *cxlds;
struct cxl_register_map map;
struct cxl_memdev *cxlmd;
- int i, rc, pmu_count;
+ int rc, pmu_count;
+ unsigned int i;
bool irq_avail;
/*
@@ -916,6 +1010,9 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return rc;
pmu_count = cxl_count_regblock(pdev, CXL_REGLOC_RBI_PMU);
+ if (pmu_count < 0)
+ return pmu_count;
+
for (i = 0; i < pmu_count; i++) {
struct cxl_pmu_regs pmu_regs;
@@ -942,8 +1039,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- rc = cxl_pci_ras_unmask(pdev);
- if (rc)
+ if (cxl_pci_ras_unmask(pdev))
dev_dbg(&pdev->dev, "No RAS reporting unmasked\n");
pci_save_state(pdev);
@@ -1016,6 +1112,7 @@ static struct pci_driver cxl_pci_driver = {
.id_table = cxl_mem_pci_tbl,
.probe = cxl_pci_probe,
.err_handler = &cxl_error_handlers,
+ .dev_groups = cxl_rcd_groups,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
@@ -1093,4 +1190,4 @@ module_init(cxl_pci_driver_init);
module_exit(cxl_pci_driver_exit);
MODULE_DESCRIPTION("CXL: PCI manageability");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(CXL);
+MODULE_IMPORT_NS("CXL");
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index d2d43a4fc053..f9c95996e937 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -459,7 +459,7 @@ MODULE_DESCRIPTION("CXL PMEM: Persistent Memory Support");
MODULE_LICENSE("GPL v2");
module_init(cxl_pmem_init);
module_exit(cxl_pmem_exit);
-MODULE_IMPORT_NS(CXL);
+MODULE_IMPORT_NS("CXL");
MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE);
MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM);
MODULE_ALIAS_CXL(CXL_DEVICE_PMEM_REGION);
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index 861dde65768f..d2bfd1ff5492 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -98,7 +98,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
struct cxl_port *root;
int rc;
- rc = cxl_dvsec_rr_decode(cxlds->dev, port, &info);
+ rc = cxl_dvsec_rr_decode(cxlds, &info);
if (rc < 0)
return rc;
@@ -173,7 +173,7 @@ static ssize_t CDAT_read(struct file *filp, struct kobject *kobj,
static BIN_ATTR_ADMIN_RO(CDAT, 0);
static umode_t cxl_port_bin_attr_is_visible(struct kobject *kobj,
- struct bin_attribute *attr, int i)
+ const struct bin_attribute *attr, int i)
{
struct device *dev = kobj_to_dev(kobj);
struct cxl_port *port = to_cxl_port(dev);
@@ -208,8 +208,23 @@ static struct cxl_driver cxl_port_driver = {
},
};
-module_cxl_driver(cxl_port_driver);
+static int __init cxl_port_init(void)
+{
+ return cxl_driver_register(&cxl_port_driver);
+}
+/*
+ * Be ready to immediately enable ports emitted by the platform CXL root
+ * (e.g. cxl_acpi) when CONFIG_CXL_PORT=y.
+ */
+subsys_initcall(cxl_port_init);
+
+static void __exit cxl_port_exit(void)
+{
+ cxl_driver_unregister(&cxl_port_driver);
+}
+module_exit(cxl_port_exit);
+
MODULE_DESCRIPTION("CXL: Port enumeration and services");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(CXL);
+MODULE_IMPORT_NS("CXL");
MODULE_ALIAS_CXL(CXL_DEVICE_PORT);
diff --git a/drivers/dax/cxl.c b/drivers/dax/cxl.c
index 9b29e732b39a..13cd94d32ff7 100644
--- a/drivers/dax/cxl.c
+++ b/drivers/dax/cxl.c
@@ -46,4 +46,4 @@ MODULE_ALIAS_CXL(CXL_DEVICE_DAX_REGION);
MODULE_DESCRIPTION("CXL DAX: direct access to CXL regions");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Intel Corporation");
-MODULE_IMPORT_NS(CXL);
+MODULE_IMPORT_NS("CXL");
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
index 446617b73aea..0867115aeef2 100644
--- a/drivers/dax/dax-private.h
+++ b/drivers/dax/dax-private.h
@@ -40,6 +40,12 @@ struct dax_region {
struct device *youngest;
};
+/**
+ * struct dax_mapping - device to display mapping range attributes
+ * @dev: device representing this range
+ * @range_id: index within dev_dax ranges array
+ * @id: ida of this mapping
+ */
struct dax_mapping {
struct device dev;
int range_id;
@@ -47,6 +53,18 @@ struct dax_mapping {
};
/**
+ * struct dev_dax_range - tuple represenging a range of memory used by dev_dax
+ * @pgoff: page offset
+ * @range: resource-span
+ * @mapping: reference to the dax_mapping for this range
+ */
+struct dev_dax_range {
+ unsigned long pgoff;
+ struct range range;
+ struct dax_mapping *mapping;
+};
+
+/**
* struct dev_dax - instance data for a subdivision of a dax region, and
* data while the device is activated in the driver.
* @region - parent region
@@ -58,7 +76,7 @@ struct dax_mapping {
* @dev - device core
* @pgmap - pgmap for memmap setup / lifetime (driver owned)
* @nr_range: size of @ranges
- * @ranges: resource-span + pgoff tuples for the instance
+ * @ranges: range tuples of memory used
*/
struct dev_dax {
struct dax_region *region;
@@ -72,11 +90,7 @@ struct dev_dax {
struct dev_pagemap *pgmap;
bool memmap_on_memory;
int nr_range;
- struct dev_dax_range {
- unsigned long pgoff;
- struct range range;
- struct dax_mapping *mapping;
- } *ranges;
+ struct dev_dax_range *ranges;
};
/*
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 9c1a729cd77e..6d74e62bbee0 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -86,7 +86,7 @@ static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
nr_pages = 1;
pgoff = linear_page_index(vmf->vma,
- ALIGN(vmf->address, fault_size));
+ ALIGN_DOWN(vmf->address, fault_size));
for (i = 0; i < nr_pages; i++) {
struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
diff --git a/drivers/dax/pmem/Makefile b/drivers/dax/pmem/Makefile
deleted file mode 100644
index 191c31f0d4f0..000000000000
--- a/drivers/dax/pmem/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
-obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem_core.o
-
-dax_pmem-y := pmem.o
-dax_pmem_core-y := core.o
-dax_pmem_compat-y := compat.o
diff --git a/drivers/dax/pmem/pmem.c b/drivers/dax/pmem/pmem.c
deleted file mode 100644
index dfe91a2990fe..000000000000
--- a/drivers/dax/pmem/pmem.c
+++ /dev/null
@@ -1,10 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
-#include <linux/percpu-refcount.h>
-#include <linux/memremap.h>
-#include <linux/module.h>
-#include <linux/pfn_t.h>
-#include <linux/nd.h>
-#include "../bus.h"
-
-
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index 3ebac2496679..70219099c604 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -244,13 +244,9 @@ struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
edev = NULL;
out:
mutex_unlock(&devfreq_event_list_lock);
-
- if (!edev) {
- of_node_put(node);
- return ERR_PTR(-ENODEV);
- }
-
of_node_put(node);
+ if (!edev)
+ return ERR_PTR(-ENODEV);
return edev;
}
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index 5edc522f715c..6a3efd782ad0 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -284,7 +284,7 @@ static void exynos_nocp_remove(struct platform_device *pdev)
static struct platform_driver exynos_nocp_driver = {
.probe = exynos_nocp_probe,
- .remove_new = exynos_nocp_remove,
+ .remove = exynos_nocp_remove,
.driver = {
.name = "exynos-nocp",
.of_match_table = exynos_nocp_id_match,
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 7002df20a49e..88cd4dfe87e1 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -701,7 +701,7 @@ static void exynos_ppmu_remove(struct platform_device *pdev)
static struct platform_driver exynos_ppmu_driver = {
.probe = exynos_ppmu_probe,
- .remove_new = exynos_ppmu_remove,
+ .remove = exynos_ppmu_remove,
.driver = {
.name = "exynos-ppmu",
.of_match_table = exynos_ppmu_id_match,
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 7d06c476d8e9..b9ea7ad2e51b 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -236,8 +236,7 @@ err_regulator:
return ret;
}
-static int exynos_bus_parse_of(struct device_node *np,
- struct exynos_bus *bus)
+static int exynos_bus_parse_of(struct exynos_bus *bus)
{
struct device *dev = bus->dev;
struct dev_pm_opp *opp;
@@ -408,7 +407,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
}
/* Parse the device-tree to get the resource information */
- ret = exynos_bus_parse_of(np, bus);
+ ret = exynos_bus_parse_of(bus);
if (ret < 0)
goto err_reg;
diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c
index 7ad5225b0381..22fe9e631f8a 100644
--- a/drivers/devfreq/mtk-cci-devfreq.c
+++ b/drivers/devfreq/mtk-cci-devfreq.c
@@ -430,7 +430,7 @@ MODULE_DEVICE_TABLE(of, mtk_ccifreq_machines);
static struct platform_driver mtk_ccifreq_platdrv = {
.probe = mtk_ccifreq_probe,
- .remove_new = mtk_ccifreq_remove,
+ .remove = mtk_ccifreq_remove,
.driver = {
.name = "mtk-ccifreq",
.of_match_table = mtk_ccifreq_machines,
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index d405cee92c25..dbdce7636ca5 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -474,7 +474,7 @@ MODULE_DEVICE_TABLE(of, rk3399dmc_devfreq_of_match);
static struct platform_driver rk3399_dmcfreq_driver = {
.probe = rk3399_dmcfreq_probe,
- .remove_new = rk3399_dmcfreq_remove,
+ .remove = rk3399_dmcfreq_remove,
.driver = {
.name = "rk3399-dmc-freq",
.pm = &rk3399_dmcfreq_pm,
diff --git a/drivers/devfreq/sun8i-a33-mbus.c b/drivers/devfreq/sun8i-a33-mbus.c
index bcf654f4ff96..7c6ae91ede1f 100644
--- a/drivers/devfreq/sun8i-a33-mbus.c
+++ b/drivers/devfreq/sun8i-a33-mbus.c
@@ -495,7 +495,7 @@ static SIMPLE_DEV_PM_OPS(sun8i_a33_mbus_pm_ops,
static struct platform_driver sun8i_a33_mbus_driver = {
.probe = sun8i_a33_mbus_probe,
- .remove_new = sun8i_a33_mbus_remove,
+ .remove = sun8i_a33_mbus_remove,
.driver = {
.name = "sun8i-a33-mbus",
.of_match_table = sun8i_a33_mbus_of_match,
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index b46eb8a552d7..fee04fdb0822 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -36,6 +36,7 @@ config UDMABUF
depends on DMA_SHARED_BUFFER
depends on MEMFD_CREATE || COMPILE_TEST
depends on MMU
+ select VMAP_PFN
help
A driver to let userspace turn memfd regions into dma-bufs.
Qemu can use this to create host dmabufs for guest framebuffers.
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 8892bc701a66..5baa83b85515 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -60,7 +60,7 @@ static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
{
}
-static void __dma_buf_debugfs_list_del(struct file *file)
+static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
{
}
#endif
@@ -176,8 +176,9 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
dmabuf = file->private_data;
/* only support discovering the end of the buffer,
- but also allow SEEK_SET to maintain the idiomatic
- SEEK_END(0), SEEK_CUR(0) pattern */
+ * but also allow SEEK_SET to maintain the idiomatic
+ * SEEK_END(0), SEEK_CUR(0) pattern.
+ */
if (whence == SEEK_END)
base = dmabuf->size;
else if (whence == SEEK_SET)
@@ -558,7 +559,7 @@ static struct file *dma_buf_getfile(size_t size, int flags)
* Override ->i_ino with the unique and dmabuffs specific
* value.
*/
- inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
+ inode->i_ino = atomic64_inc_return(&dmabuf_inode);
flags &= O_ACCMODE | O_NONBLOCK;
file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
flags, &dma_buf_fops);
@@ -702,7 +703,7 @@ err_module:
module_put(exp_info->owner);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_export, "DMA_BUF");
/**
* dma_buf_fd - returns a file descriptor for the given struct dma_buf
@@ -726,7 +727,7 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags)
return fd;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_fd, "DMA_BUF");
/**
* dma_buf_get - returns the struct dma_buf related to an fd
@@ -752,7 +753,7 @@ struct dma_buf *dma_buf_get(int fd)
return file->private_data;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_get, "DMA_BUF");
/**
* dma_buf_put - decreases refcount of the buffer
@@ -771,7 +772,7 @@ void dma_buf_put(struct dma_buf *dmabuf)
fput(dmabuf->file);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF");
static void mangle_sg_table(struct sg_table *sg_table)
{
@@ -782,13 +783,14 @@ static void mangle_sg_table(struct sg_table *sg_table)
/* To catch abuse of the underlying struct page by importers mix
* up the bits, but take care to preserve the low SG_ bits to
* not corrupt the sgt. The mixing is undone in __unmap_dma_buf
- * before passing the sgt back to the exporter. */
+ * before passing the sgt back to the exporter.
+ */
for_each_sgtable_sg(sg_table, sg, i)
sg->page_link ^= ~0xffUL;
#endif
}
-static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
+static struct sg_table *__map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table;
@@ -976,7 +978,7 @@ err_unlock:
dma_buf_detach(dmabuf, attach);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");
/**
* dma_buf_attach - Wrapper for dma_buf_dynamic_attach
@@ -991,7 +993,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
{
return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");
static void __unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sg_table,
@@ -1035,7 +1037,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
kfree(attach);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_detach, "DMA_BUF");
/**
* dma_buf_pin - Lock down the DMA-buf
@@ -1065,7 +1067,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
return ret;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_pin, "DMA_BUF");
/**
* dma_buf_unpin - Unpin a DMA-buf
@@ -1086,7 +1088,7 @@ void dma_buf_unpin(struct dma_buf_attachment *attach)
if (dmabuf->ops->unpin)
dmabuf->ops->unpin(attach);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, "DMA_BUF");
/**
* dma_buf_map_attachment - Returns the scatterlist table of the attachment;
@@ -1174,7 +1176,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
#endif /* CONFIG_DMA_API_DEBUG */
return sg_table;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");
/**
* dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
@@ -1202,7 +1204,7 @@ dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
return sg_table;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, "DMA_BUF");
/**
* dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
@@ -1234,7 +1236,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
dma_buf_unpin(attach);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");
/**
* dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
@@ -1259,7 +1261,7 @@ void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
dma_buf_unmap_attachment(attach, sg_table, direction);
dma_resv_unlock(attach->dmabuf->resv);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, "DMA_BUF");
/**
* dma_buf_move_notify - notify attachments that DMA-buf is moving
@@ -1279,7 +1281,7 @@ void dma_buf_move_notify(struct dma_buf *dmabuf)
if (attach->importer_ops)
attach->importer_ops->move_notify(attach);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, "DMA_BUF");
/**
* DOC: cpu access
@@ -1296,10 +1298,12 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
* vmap interface is introduced. Note that on very old 32-bit architectures
* vmalloc space might be limited and result in vmap calls failing.
*
- * Interfaces::
+ * Interfaces:
*
- * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
- * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
+ * .. code-block:: c
+ *
+ * void *dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
+ * void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
*
* The vmap call can fail if there is no vmap support in the exporter, or if
* it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
@@ -1356,10 +1360,11 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
* enough, since adding interfaces to intercept pagefaults and allow pte
* shootdowns would increase the complexity quite a bit.
*
- * Interface::
+ * Interface:
+ *
+ * .. code-block:: c
*
- * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
- * unsigned long);
+ * int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long);
*
* If the importing subsystem simply provides a special-purpose mmap call to
* set up a mapping in userspace, calling do_mmap with &dma_buf.file will
@@ -1424,7 +1429,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
return ret;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, "DMA_BUF");
/**
* dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
@@ -1452,7 +1457,7 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
return ret;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, "DMA_BUF");
/**
@@ -1494,7 +1499,7 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
return dmabuf->ops->mmap(dmabuf, vma);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, "DMA_BUF");
/**
* dma_buf_vmap - Create virtual mapping for the buffer object into kernel
@@ -1547,7 +1552,7 @@ int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, "DMA_BUF");
/**
* dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
@@ -1574,7 +1579,7 @@ int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
return ret;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, "DMA_BUF");
/**
* dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
@@ -1598,7 +1603,7 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
iosys_map_clear(&dmabuf->vmap_ptr);
}
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, "DMA_BUF");
/**
* dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
@@ -1614,7 +1619,7 @@ void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
dma_buf_vunmap(dmabuf, map);
dma_resv_unlock(dmabuf->resv);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, "DMA_BUF");
#ifdef CONFIG_DEBUG_FS
static int dma_buf_debug_show(struct seq_file *s, void *unused)
@@ -1694,7 +1699,7 @@ static int dma_buf_init_debugfs(void)
dma_buf_debugfs_dir = d;
- d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
+ d = debugfs_create_file("bufinfo", 0444, dma_buf_debugfs_dir,
NULL, &dma_buf_debug_fops);
if (IS_ERR(d)) {
pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index 8a08ffde31e7..6657d4b30af9 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -103,10 +103,36 @@ static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
static bool dma_fence_array_signaled(struct dma_fence *fence)
{
struct dma_fence_array *array = to_dma_fence_array(fence);
+ int num_pending;
+ unsigned int i;
- if (atomic_read(&array->num_pending) > 0)
+ /*
+ * We need to read num_pending before checking the enable_signal bit
+ * to avoid racing with the enable_signaling() implementation, which
+ * might decrement the counter, and cause a partial check.
+ * atomic_read_acquire() pairs with atomic_dec_and_test() in
+ * dma_fence_array_enable_signaling()
+ *
+ * The !--num_pending check is here to account for the any_signaled case
+ * if we race with enable_signaling(), that means the !num_pending check
+ * in the is_signalling_enabled branch might be outdated (num_pending
+ * might have been decremented), but that's fine. The user will get the
+ * right value when testing again later.
+ */
+ num_pending = atomic_read_acquire(&array->num_pending);
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &array->base.flags)) {
+ if (num_pending <= 0)
+ goto signal;
return false;
+ }
+
+ for (i = 0; i < array->num_fences; ++i) {
+ if (dma_fence_is_signaled(array->fences[i]) && !--num_pending)
+ goto signal;
+ }
+ return false;
+signal:
dma_fence_array_clear_pending_error(array);
return true;
}
diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c
index 628af51c81af..6345062731f1 100644
--- a/drivers/dma-buf/dma-fence-unwrap.c
+++ b/drivers/dma-buf/dma-fence-unwrap.c
@@ -12,6 +12,7 @@
#include <linux/dma-fence-chain.h>
#include <linux/dma-fence-unwrap.h>
#include <linux/slab.h>
+#include <linux/sort.h>
/* Internal helper to start new array iteration, don't use directly */
static struct dma_fence *
@@ -59,6 +60,25 @@ struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
}
EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
+
+static int fence_cmp(const void *_a, const void *_b)
+{
+ struct dma_fence *a = *(struct dma_fence **)_a;
+ struct dma_fence *b = *(struct dma_fence **)_b;
+
+ if (a->context < b->context)
+ return -1;
+ else if (a->context > b->context)
+ return 1;
+
+ if (dma_fence_is_later(b, a))
+ return 1;
+ else if (dma_fence_is_later(a, b))
+ return -1;
+
+ return 0;
+}
+
/* Implementation for the dma_fence_merge() marco, don't use directly */
struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence **fences,
@@ -67,8 +87,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence_array *result;
struct dma_fence *tmp, **array;
ktime_t timestamp;
- unsigned int i;
- size_t count;
+ int i, j, count;
count = 0;
timestamp = ns_to_ktime(0);
@@ -96,78 +115,55 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
if (!array)
return NULL;
- /*
- * This trashes the input fence array and uses it as position for the
- * following merge loop. This works because the dma_fence_merge()
- * wrapper macro is creating this temporary array on the stack together
- * with the iterators.
- */
- for (i = 0; i < num_fences; ++i)
- fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]);
-
count = 0;
- do {
- unsigned int sel;
-
-restart:
- tmp = NULL;
- for (i = 0; i < num_fences; ++i) {
- struct dma_fence *next;
-
- while (fences[i] && dma_fence_is_signaled(fences[i]))
- fences[i] = dma_fence_unwrap_next(&iter[i]);
-
- next = fences[i];
- if (!next)
- continue;
-
- /*
- * We can't guarantee that inpute fences are ordered by
- * context, but it is still quite likely when this
- * function is used multiple times. So attempt to order
- * the fences by context as we pass over them and merge
- * fences with the same context.
- */
- if (!tmp || tmp->context > next->context) {
- tmp = next;
- sel = i;
-
- } else if (tmp->context < next->context) {
- continue;
-
- } else if (dma_fence_is_later(tmp, next)) {
- fences[i] = dma_fence_unwrap_next(&iter[i]);
- goto restart;
+ for (i = 0; i < num_fences; ++i) {
+ dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
+ if (!dma_fence_is_signaled(tmp)) {
+ array[count++] = dma_fence_get(tmp);
} else {
- fences[sel] = dma_fence_unwrap_next(&iter[sel]);
- goto restart;
+ ktime_t t = dma_fence_timestamp(tmp);
+
+ if (ktime_after(t, timestamp))
+ timestamp = t;
}
}
+ }
- if (tmp) {
- array[count++] = dma_fence_get(tmp);
- fences[sel] = dma_fence_unwrap_next(&iter[sel]);
- }
- } while (tmp);
+ if (count == 0 || count == 1)
+ goto return_fastpath;
- if (count == 0) {
- tmp = dma_fence_allocate_private_stub(ktime_get());
- goto return_tmp;
- }
+ sort(array, count, sizeof(*array), fence_cmp, NULL);
- if (count == 1) {
- tmp = array[0];
- goto return_tmp;
+ /*
+ * Only keep the most recent fence for each context.
+ */
+ j = 0;
+ for (i = 1; i < count; i++) {
+ if (array[i]->context == array[j]->context)
+ dma_fence_put(array[i]);
+ else
+ array[++j] = array[i];
}
-
- result = dma_fence_array_create(count, array,
- dma_fence_context_alloc(1),
- 1, false);
- if (!result) {
- tmp = NULL;
- goto return_tmp;
+ count = ++j;
+
+ if (count > 1) {
+ result = dma_fence_array_create(count, array,
+ dma_fence_context_alloc(1),
+ 1, false);
+ if (!result) {
+ for (i = 0; i < count; i++)
+ dma_fence_put(array[i]);
+ tmp = NULL;
+ goto return_tmp;
+ }
+ return &result->base;
}
- return &result->base;
+
+return_fastpath:
+ if (count == 0)
+ tmp = dma_fence_allocate_private_stub(timestamp);
+ else
+ tmp = array[0];
return_tmp:
kfree(array);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 0393a9bba3a8..f0cdd3e99d36 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -309,8 +309,8 @@ bool dma_fence_begin_signalling(void)
if (in_atomic())
return true;
- /* ... and non-recursive readlock */
- lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _RET_IP_);
+ /* ... and non-recursive successful read_trylock */
+ lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _RET_IP_);
return false;
}
@@ -341,7 +341,7 @@ void __dma_fence_might_wait(void)
lock_map_acquire(&dma_fence_lockdep_map);
lock_map_release(&dma_fence_lockdep_map);
if (tmp)
- lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _THIS_IP_);
+ lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _THIS_IP_);
}
#endif
@@ -412,7 +412,7 @@ int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
unsigned long flags;
int ret;
- if (!fence)
+ if (WARN_ON(!fence))
return -EINVAL;
spin_lock_irqsave(fence->lock, flags);
@@ -464,7 +464,7 @@ int dma_fence_signal(struct dma_fence *fence)
int ret;
bool tmp;
- if (!fence)
+ if (WARN_ON(!fence))
return -EINVAL;
tmp = dma_fence_begin_signalling();
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 93be88b805fe..9512d050563a 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -309,13 +309,13 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
struct page *page = cma_pages;
while (nr_clear_pages > 0) {
- void *vaddr = kmap_atomic(page);
+ void *vaddr = kmap_local_page(page);
memset(vaddr, 0, PAGE_SIZE);
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
/*
* Avoid wasting time zeroing memory if the process
- * has been killed by by SIGKILL
+ * has been killed by SIGKILL.
*/
if (fatal_signal_pending(current))
goto free_cma;
@@ -366,7 +366,7 @@ static const struct dma_heap_ops cma_heap_ops = {
.allocate = cma_heap_allocate,
};
-static int __add_cma_heap(struct cma *cma, void *data)
+static int __init __add_cma_heap(struct cma *cma, void *data)
{
struct cma_heap *cma_heap;
struct dma_heap_export_info exp_info;
@@ -391,7 +391,7 @@ static int __add_cma_heap(struct cma *cma, void *data)
return 0;
}
-static int add_default_cma_heap(void)
+static int __init add_default_cma_heap(void)
{
struct cma *default_cma = dev_get_cma_area(NULL);
int ret = 0;
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index d78cdb9d01e5..26d5dc89ea16 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -421,7 +421,7 @@ static const struct dma_heap_ops system_heap_ops = {
.allocate = system_heap_allocate,
};
-static int system_heap_create(void)
+static int __init system_heap_create(void)
{
struct dma_heap_export_info exp_info;
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index c353029789cf..f5905d67dedb 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -173,11 +173,6 @@ static bool timeline_fence_signaled(struct dma_fence *fence)
return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops);
}
-static bool timeline_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static void timeline_fence_value_str(struct dma_fence *fence,
char *str, int size)
{
@@ -211,7 +206,6 @@ static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadlin
static const struct dma_fence_ops timeline_fence_ops = {
.get_driver_name = timeline_fence_get_driver_name,
.get_timeline_name = timeline_fence_get_timeline_name,
- .enable_signaling = timeline_fence_enable_signaling,
.signaled = timeline_fence_signaled,
.release = timeline_fence_release,
.fence_value_str = timeline_fence_value_str,
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 047c3cd2ceff..cc7398cc17d6 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -27,15 +27,21 @@ MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is
struct udmabuf {
pgoff_t pagecount;
struct folio **folios;
+
+ /**
+ * Unlike folios, pinned_folios is only used for unpin.
+ * So, nr_pinned is not the same to pagecount, the pinned_folios
+ * only set each folio which already pinned when udmabuf_create.
+ * Note that, since a folio may be pinned multiple times, each folio
+ * can be added to pinned_folios multiple times, depending on how many
+ * times the folio has been pinned when create.
+ */
+ pgoff_t nr_pinned;
+ struct folio **pinned_folios;
+
struct sg_table *sg;
struct miscdevice *device;
pgoff_t *offsets;
- struct list_head unpin_list;
-};
-
-struct udmabuf_folio {
- struct folio *folio;
- struct list_head list;
};
static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
@@ -43,7 +49,8 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct udmabuf *ubuf = vma->vm_private_data;
pgoff_t pgoff = vmf->pgoff;
- unsigned long pfn;
+ unsigned long addr, pfn;
+ vm_fault_t ret;
if (pgoff >= ubuf->pagecount)
return VM_FAULT_SIGBUS;
@@ -51,7 +58,35 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
pfn = folio_pfn(ubuf->folios[pgoff]);
pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
- return vmf_insert_pfn(vma, vmf->address, pfn);
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
+ if (ret & VM_FAULT_ERROR)
+ return ret;
+
+ /* pre fault */
+ pgoff = vma->vm_pgoff;
+ addr = vma->vm_start;
+
+ for (; addr < vma->vm_end; pgoff++, addr += PAGE_SIZE) {
+ if (addr == vmf->address)
+ continue;
+
+ if (WARN_ON(pgoff >= ubuf->pagecount))
+ break;
+
+ pfn = folio_pfn(ubuf->folios[pgoff]);
+ pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
+
+ /**
+ * If the below vmf_insert_pfn() fails, we do not return an
+ * error here during this pre-fault step. However, an error
+ * will be returned if the failure occurs when the addr is
+ * truly accessed.
+ */
+ if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR)
+ break;
+ }
+
+ return ret;
}
static const struct vm_operations_struct udmabuf_vm_ops = {
@@ -74,21 +109,29 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
{
struct udmabuf *ubuf = buf->priv;
- struct page **pages;
+ unsigned long *pfns;
void *vaddr;
pgoff_t pg;
dma_resv_assert_held(buf->resv);
- pages = kmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
- if (!pages)
+ /**
+ * HVO may free tail pages, so just use pfn to map each folio
+ * into vmalloc area.
+ */
+ pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL);
+ if (!pfns)
return -ENOMEM;
- for (pg = 0; pg < ubuf->pagecount; pg++)
- pages[pg] = &ubuf->folios[pg]->page;
+ for (pg = 0; pg < ubuf->pagecount; pg++) {
+ unsigned long pfn = folio_pfn(ubuf->folios[pg]);
+
+ pfn += ubuf->offsets[pg] >> PAGE_SHIFT;
+ pfns[pg] = pfn;
+ }
- vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
- kfree(pages);
+ vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL);
+ kvfree(pfns);
if (!vaddr)
return -EINVAL;
@@ -159,34 +202,42 @@ static void unmap_udmabuf(struct dma_buf_attachment *at,
return put_sg_table(at->dev, sg, direction);
}
-static void unpin_all_folios(struct list_head *unpin_list)
+static void unpin_all_folios(struct udmabuf *ubuf)
{
- struct udmabuf_folio *ubuf_folio;
+ pgoff_t i;
- while (!list_empty(unpin_list)) {
- ubuf_folio = list_first_entry(unpin_list,
- struct udmabuf_folio, list);
- unpin_folio(ubuf_folio->folio);
+ for (i = 0; i < ubuf->nr_pinned; ++i)
+ unpin_folio(ubuf->pinned_folios[i]);
- list_del(&ubuf_folio->list);
- kfree(ubuf_folio);
- }
+ kvfree(ubuf->pinned_folios);
}
-static int add_to_unpin_list(struct list_head *unpin_list,
- struct folio *folio)
+static __always_inline int init_udmabuf(struct udmabuf *ubuf, pgoff_t pgcnt)
{
- struct udmabuf_folio *ubuf_folio;
+ ubuf->folios = kvmalloc_array(pgcnt, sizeof(*ubuf->folios), GFP_KERNEL);
+ if (!ubuf->folios)
+ return -ENOMEM;
+
+ ubuf->offsets = kvcalloc(pgcnt, sizeof(*ubuf->offsets), GFP_KERNEL);
+ if (!ubuf->offsets)
+ return -ENOMEM;
- ubuf_folio = kzalloc(sizeof(*ubuf_folio), GFP_KERNEL);
- if (!ubuf_folio)
+ ubuf->pinned_folios = kvmalloc_array(pgcnt,
+ sizeof(*ubuf->pinned_folios),
+ GFP_KERNEL);
+ if (!ubuf->pinned_folios)
return -ENOMEM;
- ubuf_folio->folio = folio;
- list_add_tail(&ubuf_folio->list, unpin_list);
return 0;
}
+static __always_inline void deinit_udmabuf(struct udmabuf *ubuf)
+{
+ unpin_all_folios(ubuf);
+ kvfree(ubuf->offsets);
+ kvfree(ubuf->folios);
+}
+
static void release_udmabuf(struct dma_buf *buf)
{
struct udmabuf *ubuf = buf->priv;
@@ -195,9 +246,7 @@ static void release_udmabuf(struct dma_buf *buf)
if (ubuf->sg)
put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
- unpin_all_folios(&ubuf->unpin_list);
- kfree(ubuf->offsets);
- kfree(ubuf->folios);
+ deinit_udmabuf(ubuf);
kfree(ubuf);
}
@@ -248,15 +297,12 @@ static const struct dma_buf_ops udmabuf_ops = {
};
#define SEALS_WANTED (F_SEAL_SHRINK)
-#define SEALS_DENIED (F_SEAL_WRITE)
+#define SEALS_DENIED (F_SEAL_WRITE|F_SEAL_FUTURE_WRITE)
static int check_memfd_seals(struct file *memfd)
{
int seals;
- if (!memfd)
- return -EBADFD;
-
if (!shmem_file(memfd) && !is_file_hugepages(memfd))
return -EBADFD;
@@ -271,12 +317,10 @@ static int check_memfd_seals(struct file *memfd)
return 0;
}
-static int export_udmabuf(struct udmabuf *ubuf,
- struct miscdevice *device,
- u32 flags)
+static struct dma_buf *export_udmabuf(struct udmabuf *ubuf,
+ struct miscdevice *device)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- struct dma_buf *buf;
ubuf->device = device;
exp_info.ops = &udmabuf_ops;
@@ -284,123 +328,151 @@ static int export_udmabuf(struct udmabuf *ubuf,
exp_info.priv = ubuf;
exp_info.flags = O_RDWR;
- buf = dma_buf_export(&exp_info);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
+ return dma_buf_export(&exp_info);
+}
+
+static long udmabuf_pin_folios(struct udmabuf *ubuf, struct file *memfd,
+ loff_t start, loff_t size, struct folio **folios)
+{
+ pgoff_t nr_pinned = ubuf->nr_pinned;
+ pgoff_t upgcnt = ubuf->pagecount;
+ u32 cur_folio, cur_pgcnt;
+ pgoff_t pgoff, pgcnt;
+ long nr_folios;
+ loff_t end;
+
+ pgcnt = size >> PAGE_SHIFT;
+ end = start + (pgcnt << PAGE_SHIFT) - 1;
+ nr_folios = memfd_pin_folios(memfd, start, end, folios, pgcnt, &pgoff);
+ if (nr_folios <= 0)
+ return nr_folios ? nr_folios : -EINVAL;
+
+ cur_pgcnt = 0;
+ for (cur_folio = 0; cur_folio < nr_folios; ++cur_folio) {
+ pgoff_t subpgoff = pgoff;
+ size_t fsize = folio_size(folios[cur_folio]);
+
+ ubuf->pinned_folios[nr_pinned++] = folios[cur_folio];
- return dma_buf_fd(buf, flags);
+ for (; subpgoff < fsize; subpgoff += PAGE_SIZE) {
+ ubuf->folios[upgcnt] = folios[cur_folio];
+ ubuf->offsets[upgcnt] = subpgoff;
+ ++upgcnt;
+
+ if (++cur_pgcnt >= pgcnt)
+ goto end;
+ }
+
+ /**
+ * In a given range, only the first subpage of the first folio
+ * has an offset, that is returned by memfd_pin_folios().
+ * The first subpages of other folios (in the range) have an
+ * offset of 0.
+ */
+ pgoff = 0;
+ }
+end:
+ ubuf->pagecount = upgcnt;
+ ubuf->nr_pinned = nr_pinned;
+ return 0;
}
static long udmabuf_create(struct miscdevice *device,
struct udmabuf_create_list *head,
struct udmabuf_create_item *list)
{
- pgoff_t pgoff, pgcnt, pglimit, pgbuf = 0;
- long nr_folios, ret = -EINVAL;
- struct file *memfd = NULL;
- struct folio **folios;
+ unsigned long max_nr_folios = 0;
+ struct folio **folios = NULL;
+ pgoff_t pgcnt = 0, pglimit;
struct udmabuf *ubuf;
- u32 i, j, k, flags;
- loff_t end;
+ struct dma_buf *dmabuf;
+ long ret = -EINVAL;
+ u32 i, flags;
ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
if (!ubuf)
return -ENOMEM;
- INIT_LIST_HEAD(&ubuf->unpin_list);
pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
for (i = 0; i < head->count; i++) {
- if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
- goto err;
- if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
- goto err;
- ubuf->pagecount += list[i].size >> PAGE_SHIFT;
- if (ubuf->pagecount > pglimit)
- goto err;
+ pgoff_t subpgcnt;
+
+ if (!PAGE_ALIGNED(list[i].offset))
+ goto err_noinit;
+ if (!PAGE_ALIGNED(list[i].size))
+ goto err_noinit;
+
+ subpgcnt = list[i].size >> PAGE_SHIFT;
+ pgcnt += subpgcnt;
+ if (pgcnt > pglimit)
+ goto err_noinit;
+
+ max_nr_folios = max_t(unsigned long, subpgcnt, max_nr_folios);
}
- if (!ubuf->pagecount)
- goto err;
+ if (!pgcnt)
+ goto err_noinit;
- ubuf->folios = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->folios),
- GFP_KERNEL);
- if (!ubuf->folios) {
- ret = -ENOMEM;
+ ret = init_udmabuf(ubuf, pgcnt);
+ if (ret)
goto err;
- }
- ubuf->offsets = kcalloc(ubuf->pagecount, sizeof(*ubuf->offsets),
- GFP_KERNEL);
- if (!ubuf->offsets) {
+
+ folios = kvmalloc_array(max_nr_folios, sizeof(*folios), GFP_KERNEL);
+ if (!folios) {
ret = -ENOMEM;
goto err;
}
- pgbuf = 0;
for (i = 0; i < head->count; i++) {
- memfd = fget(list[i].memfd);
- ret = check_memfd_seals(memfd);
- if (ret < 0)
- goto err;
+ struct file *memfd = fget(list[i].memfd);
- pgcnt = list[i].size >> PAGE_SHIFT;
- folios = kmalloc_array(pgcnt, sizeof(*folios), GFP_KERNEL);
- if (!folios) {
- ret = -ENOMEM;
+ if (!memfd) {
+ ret = -EBADFD;
goto err;
}
- end = list[i].offset + (pgcnt << PAGE_SHIFT) - 1;
- ret = memfd_pin_folios(memfd, list[i].offset, end,
- folios, pgcnt, &pgoff);
- if (ret <= 0) {
- kfree(folios);
- if (!ret)
- ret = -EINVAL;
- goto err;
- }
-
- nr_folios = ret;
- pgoff >>= PAGE_SHIFT;
- for (j = 0, k = 0; j < pgcnt; j++) {
- ubuf->folios[pgbuf] = folios[k];
- ubuf->offsets[pgbuf] = pgoff << PAGE_SHIFT;
-
- if (j == 0 || ubuf->folios[pgbuf-1] != folios[k]) {
- ret = add_to_unpin_list(&ubuf->unpin_list,
- folios[k]);
- if (ret < 0) {
- kfree(folios);
- goto err;
- }
- }
-
- pgbuf++;
- if (++pgoff == folio_nr_pages(folios[k])) {
- pgoff = 0;
- if (++k == nr_folios)
- break;
- }
- }
+ /*
+ * Take the inode lock to protect against concurrent
+ * memfd_add_seals(), which takes this lock in write mode.
+ */
+ inode_lock_shared(file_inode(memfd));
+ ret = check_memfd_seals(memfd);
+ if (ret)
+ goto out_unlock;
- kfree(folios);
+ ret = udmabuf_pin_folios(ubuf, memfd, list[i].offset,
+ list[i].size, folios);
+out_unlock:
+ inode_unlock_shared(file_inode(memfd));
fput(memfd);
- memfd = NULL;
+ if (ret)
+ goto err;
}
flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0;
- ret = export_udmabuf(ubuf, device, flags);
- if (ret < 0)
+ dmabuf = export_udmabuf(ubuf, device);
+ if (IS_ERR(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
goto err;
+ }
+ /*
+ * Ownership of ubuf is held by the dmabuf from here.
+ * If the following dma_buf_fd() fails, dma_buf_put() cleans up both the
+ * dmabuf and the ubuf (through udmabuf_ops.release).
+ */
+ ret = dma_buf_fd(dmabuf, flags);
+ if (ret < 0)
+ dma_buf_put(dmabuf);
+
+ kvfree(folios);
return ret;
err:
- if (memfd)
- fput(memfd);
- unpin_all_folios(&ubuf->unpin_list);
- kfree(ubuf->offsets);
- kfree(ubuf->folios);
+ deinit_udmabuf(ubuf);
+err_noinit:
kfree(ubuf);
+ kvfree(folios);
return ret;
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d9ec1e69e428..8afea2e23360 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -162,8 +162,8 @@ config DMA_SA11X0
config DMA_SUN4I
tristate "Allwinner A10 DMA SoCs support"
- depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I
- default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I)
+ depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I || MACH_SUNIV
+ default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I || MACH_SUNIV)
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -378,6 +378,20 @@ config LOONGSON1_APB_DMA
This selects support for the APB DMA controller in Loongson1 SoCs,
which is required by Loongson1 NAND and audio support.
+config LOONGSON2_APB_DMA
+ tristate "Loongson2 APB DMA support"
+ depends on LOONGARCH || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support for the Loongson2 APB DMA controller driver. The
+ DMA controller is having single DMA channel which can be
+ configured for different peripherals like audio, nand, sdio
+ etc which is in APB bus.
+
+ This DMA controller transfers data from memory to peripheral fifo.
+ It does not support memory to memory data transfer.
+
config LPC18XX_DMAMUX
bool "NXP LPC18xx/43xx DMA MUX for PL080"
depends on ARCH_LPC18XX || COMPILE_TEST
@@ -396,20 +410,6 @@ config LPC32XX_DMAMUX
Support for PL080 multiplexed DMA request lines on
LPC32XX platrofm.
-config LS2X_APB_DMA
- tristate "Loongson LS2X APB DMA support"
- depends on LOONGARCH || COMPILE_TEST
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- help
- Support for the Loongson LS2X APB DMA controller driver. The
- DMA controller is having single DMA channel which can be
- configured for different peripherals like audio, nand, sdio
- etc which is in APB bus.
-
- This DMA controller transfers data from memory to peripheral fifo.
- It does not support memory to memory data transfer.
-
config MCF_EDMA
tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
depends on M5441x || (COMPILE_TEST && FSL_EDMA=n)
@@ -740,8 +740,6 @@ source "drivers/dma/bestcomm/Kconfig"
source "drivers/dma/mediatek/Kconfig"
-source "drivers/dma/ptdma/Kconfig"
-
source "drivers/dma/qcom/Kconfig"
source "drivers/dma/dw/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index ad6a03c052ec..19ba465011a6 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
-obj-$(CONFIG_AMD_PTDMA) += ptdma/
obj-$(CONFIG_APPLE_ADMAC) += apple-admac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
@@ -50,9 +49,9 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-y += idxd/
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_LOONGSON1_APB_DMA) += loongson1-apb-dma.o
+obj-$(CONFIG_LOONGSON2_APB_DMA) += loongson2-apb-dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
obj-$(CONFIG_LPC32XX_DMAMUX) += lpc32xx-dmamux.o
-obj-$(CONFIG_LS2X_APB_DMA) += ls2x-apb-dma.o
obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o
obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index a58a1600dd65..2abbe11e797e 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -9,18 +9,21 @@
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
+#include <linux/acpi.h>
+#include <linux/acpi_dma.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/acpi.h>
-#include <linux/acpi_dma.h>
#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
static LIST_HEAD(acpi_dma_list);
static DEFINE_MUTEX(acpi_dma_lock);
@@ -236,7 +239,7 @@ int acpi_dma_controller_free(struct device *dev)
}
EXPORT_SYMBOL_GPL(acpi_dma_controller_free);
-static void devm_acpi_dma_release(struct device *dev, void *res)
+static void devm_acpi_dma_free(void *dev)
{
acpi_dma_controller_free(dev);
}
@@ -259,37 +262,15 @@ int devm_acpi_dma_controller_register(struct device *dev,
(struct acpi_dma_spec *, struct acpi_dma *),
void *data)
{
- void *res;
int ret;
- res = devres_alloc(devm_acpi_dma_release, 0, GFP_KERNEL);
- if (!res)
- return -ENOMEM;
-
ret = acpi_dma_controller_register(dev, acpi_dma_xlate, data);
- if (ret) {
- devres_free(res);
+ if (ret)
return ret;
- }
- devres_add(dev, res);
- return 0;
-}
-EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register);
-/**
- * devm_acpi_dma_controller_free - resource managed acpi_dma_controller_free()
- * @dev: device that is unregistering as DMA controller
- *
- * Unregister a DMA controller registered with
- * devm_acpi_dma_controller_register(). Normally this function will not need to
- * be called and the resource management code will ensure that the resource is
- * freed.
- */
-void devm_acpi_dma_controller_free(struct device *dev)
-{
- WARN_ON(devres_release(dev, devm_acpi_dma_release, NULL, NULL));
+ return devm_add_action_or_reset(dev, devm_acpi_dma_free, dev);
}
-EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
+EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register);
/**
* acpi_dma_update_dma_spec - prepare dma specifier to pass to translation function
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index e6a6566b309e..a203fdd84950 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -954,7 +954,7 @@ static struct platform_driver msgdma_driver = {
.of_match_table = of_match_ptr(msgdma_match),
},
.probe = msgdma_probe,
- .remove_new = msgdma_remove,
+ .remove = msgdma_remove,
};
module_platform_driver(msgdma_driver);
diff --git a/drivers/dma/amd/Kconfig b/drivers/dma/amd/Kconfig
index 7d1f51d69675..00d874872a8f 100644
--- a/drivers/dma/amd/Kconfig
+++ b/drivers/dma/amd/Kconfig
@@ -1,4 +1,32 @@
# SPDX-License-Identifier: GPL-2.0-only
+#
+
+config AMD_AE4DMA
+ tristate "AMD AE4DMA Engine"
+ depends on (X86_64 || COMPILE_TEST) && PCI
+ depends on AMD_PTDMA
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the AMD AE4DMA controller. This controller
+ provides DMA capabilities to perform high bandwidth memory to
+ memory and IO copy operations. It performs DMA transfer through
+ queue-based descriptor management. This DMA controller is intended
+ to be used with AMD Non-Transparent Bridge devices and not for
+ general purpose peripheral DMA.
+
+config AMD_PTDMA
+ tristate "AMD PassThru DMA Engine"
+ depends on X86_64 && PCI
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the AMD PTDMA controller. This controller
+ provides DMA capabilities to perform high bandwidth memory to
+ memory and IO copy operations. It performs DMA transfer through
+ queue-based descriptor management. This DMA controller is intended
+ to be used with AMD Non-Transparent Bridge devices and not for
+ general purpose peripheral DMA.
config AMD_QDMA
tristate "AMD Queue-based DMA"
diff --git a/drivers/dma/amd/Makefile b/drivers/dma/amd/Makefile
index 37212be9364f..11278c06374d 100644
--- a/drivers/dma/amd/Makefile
+++ b/drivers/dma/amd/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_AMD_AE4DMA) += ae4dma/
+obj-$(CONFIG_AMD_PTDMA) += ptdma/
obj-$(CONFIG_AMD_QDMA) += qdma/
diff --git a/drivers/dma/amd/ae4dma/Makefile b/drivers/dma/amd/ae4dma/Makefile
new file mode 100644
index 000000000000..e918f85a80ec
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# AMD AE4DMA driver
+#
+
+obj-$(CONFIG_AMD_AE4DMA) += ae4dma.o
+
+ae4dma-objs := ae4dma-dev.o
+
+ae4dma-$(CONFIG_PCI) += ae4dma-pci.o
diff --git a/drivers/dma/amd/ae4dma/ae4dma-dev.c b/drivers/dma/amd/ae4dma/ae4dma-dev.c
new file mode 100644
index 000000000000..8de3bef41b58
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/ae4dma-dev.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD AE4DMA driver
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#include "ae4dma.h"
+
+static unsigned int max_hw_q = 1;
+module_param(max_hw_q, uint, 0444);
+MODULE_PARM_DESC(max_hw_q, "max hw queues supported by engine (any non-zero value, default: 1)");
+
+static void ae4_pending_work(struct work_struct *work)
+{
+ struct ae4_cmd_queue *ae4cmd_q = container_of(work, struct ae4_cmd_queue, p_work.work);
+ struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
+ struct pt_cmd *cmd;
+ u32 cridx;
+
+ for (;;) {
+ wait_event_interruptible(ae4cmd_q->q_w,
+ ((atomic64_read(&ae4cmd_q->done_cnt)) <
+ atomic64_read(&ae4cmd_q->intr_cnt)));
+
+ atomic64_inc(&ae4cmd_q->done_cnt);
+
+ mutex_lock(&ae4cmd_q->cmd_lock);
+ cridx = readl(cmd_q->reg_control + AE4_RD_IDX_OFF);
+ while ((ae4cmd_q->dridx != cridx) && !list_empty(&ae4cmd_q->cmd)) {
+ cmd = list_first_entry(&ae4cmd_q->cmd, struct pt_cmd, entry);
+ list_del(&cmd->entry);
+
+ ae4_check_status_error(ae4cmd_q, ae4cmd_q->dridx);
+ cmd->pt_cmd_callback(cmd->data, cmd->ret);
+
+ ae4cmd_q->q_cmd_count--;
+ ae4cmd_q->dridx = (ae4cmd_q->dridx + 1) % CMD_Q_LEN;
+
+ complete_all(&ae4cmd_q->cmp);
+ }
+ mutex_unlock(&ae4cmd_q->cmd_lock);
+ }
+}
+
+static irqreturn_t ae4_core_irq_handler(int irq, void *data)
+{
+ struct ae4_cmd_queue *ae4cmd_q = data;
+ struct pt_cmd_queue *cmd_q;
+ struct pt_device *pt;
+ u32 status;
+
+ cmd_q = &ae4cmd_q->cmd_q;
+ pt = cmd_q->pt;
+
+ pt->total_interrupts++;
+ atomic64_inc(&ae4cmd_q->intr_cnt);
+
+ status = readl(cmd_q->reg_control + AE4_INTR_STS_OFF);
+ if (status & BIT(0)) {
+ status &= GENMASK(31, 1);
+ writel(status, cmd_q->reg_control + AE4_INTR_STS_OFF);
+ }
+
+ wake_up(&ae4cmd_q->q_w);
+
+ return IRQ_HANDLED;
+}
+
+void ae4_destroy_work(struct ae4_device *ae4)
+{
+ struct ae4_cmd_queue *ae4cmd_q;
+ int i;
+
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+
+ if (!ae4cmd_q->pws)
+ break;
+
+ cancel_delayed_work_sync(&ae4cmd_q->p_work);
+ destroy_workqueue(ae4cmd_q->pws);
+ }
+}
+
+int ae4_core_init(struct ae4_device *ae4)
+{
+ struct pt_device *pt = &ae4->pt;
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct device *dev = pt->dev;
+ struct pt_cmd_queue *cmd_q;
+ int i, ret = 0;
+
+ writel(max_hw_q, pt->io_regs);
+
+ for (i = 0; i < max_hw_q; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+ ae4cmd_q->id = ae4->cmd_q_count;
+ ae4->cmd_q_count++;
+
+ cmd_q = &ae4cmd_q->cmd_q;
+ cmd_q->pt = pt;
+
+ cmd_q->reg_control = pt->io_regs + ((i + 1) * AE4_Q_SZ);
+
+ ret = devm_request_irq(dev, ae4->ae4_irq[i], ae4_core_irq_handler, 0,
+ dev_name(pt->dev), ae4cmd_q);
+ if (ret)
+ return ret;
+
+ cmd_q->qsize = Q_SIZE(sizeof(struct ae4dma_desc));
+
+ cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, &cmd_q->qbase_dma,
+ GFP_KERNEL);
+ if (!cmd_q->qbase)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+
+ cmd_q = &ae4cmd_q->cmd_q;
+
+ cmd_q->reg_control = pt->io_regs + ((i + 1) * AE4_Q_SZ);
+
+ /* Update the device registers with queue information. */
+ writel(CMD_Q_LEN, cmd_q->reg_control + AE4_MAX_IDX_OFF);
+
+ cmd_q->qdma_tail = cmd_q->qbase_dma;
+ writel(lower_32_bits(cmd_q->qdma_tail), cmd_q->reg_control + AE4_Q_BASE_L_OFF);
+ writel(upper_32_bits(cmd_q->qdma_tail), cmd_q->reg_control + AE4_Q_BASE_H_OFF);
+
+ INIT_LIST_HEAD(&ae4cmd_q->cmd);
+ init_waitqueue_head(&ae4cmd_q->q_w);
+
+ ae4cmd_q->pws = alloc_ordered_workqueue("ae4dma_%d", WQ_MEM_RECLAIM, ae4cmd_q->id);
+ if (!ae4cmd_q->pws) {
+ ae4_destroy_work(ae4);
+ return -ENOMEM;
+ }
+ INIT_DELAYED_WORK(&ae4cmd_q->p_work, ae4_pending_work);
+ queue_delayed_work(ae4cmd_q->pws, &ae4cmd_q->p_work, usecs_to_jiffies(100));
+
+ init_completion(&ae4cmd_q->cmp);
+ }
+
+ ret = pt_dmaengine_register(pt);
+ if (ret)
+ ae4_destroy_work(ae4);
+ else
+ ptdma_debugfs_setup(pt);
+
+ return ret;
+}
diff --git a/drivers/dma/amd/ae4dma/ae4dma-pci.c b/drivers/dma/amd/ae4dma/ae4dma-pci.c
new file mode 100644
index 000000000000..aad0dc4294a3
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/ae4dma-pci.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD AE4DMA driver
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#include "ae4dma.h"
+
+static int ae4_get_irqs(struct ae4_device *ae4)
+{
+ struct ae4_msix *ae4_msix = ae4->ae4_msix;
+ struct pt_device *pt = &ae4->pt;
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev;
+ int i, v, ret;
+
+ pdev = to_pci_dev(dev);
+
+ for (v = 0; v < ARRAY_SIZE(ae4_msix->msix_entry); v++)
+ ae4_msix->msix_entry[v].entry = v;
+
+ ret = pci_alloc_irq_vectors(pdev, v, v, PCI_IRQ_MSIX);
+ if (ret != v) {
+ if (ret > 0)
+ pci_free_irq_vectors(pdev);
+
+ dev_err(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(dev, "could not enable MSI (%d)\n", ret);
+ return ret;
+ }
+
+ ret = pci_irq_vector(pdev, 0);
+ if (ret < 0) {
+ pci_free_irq_vectors(pdev);
+ return ret;
+ }
+
+ for (i = 0; i < MAX_AE4_HW_QUEUES; i++)
+ ae4->ae4_irq[i] = ret;
+
+ } else {
+ ae4_msix->msix_count = ret;
+ for (i = 0; i < MAX_AE4_HW_QUEUES; i++)
+ ae4->ae4_irq[i] = ae4_msix->msix_entry[i].vector;
+ }
+
+ return ret;
+}
+
+static void ae4_free_irqs(struct ae4_device *ae4)
+{
+ struct ae4_msix *ae4_msix = ae4->ae4_msix;
+ struct pt_device *pt = &ae4->pt;
+ struct device *dev = pt->dev;
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(dev);
+
+ if (ae4_msix && (ae4_msix->msix_count || ae4->ae4_irq[MAX_AE4_HW_QUEUES - 1]))
+ pci_free_irq_vectors(pdev);
+}
+
+static void ae4_deinit(struct ae4_device *ae4)
+{
+ ae4_free_irqs(ae4);
+}
+
+static int ae4_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct ae4_device *ae4;
+ struct pt_device *pt;
+ int bar_mask;
+ int ret = 0;
+
+ ae4 = devm_kzalloc(dev, sizeof(*ae4), GFP_KERNEL);
+ if (!ae4)
+ return -ENOMEM;
+
+ ae4->ae4_msix = devm_kzalloc(dev, sizeof(struct ae4_msix), GFP_KERNEL);
+ if (!ae4->ae4_msix)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ goto ae4_error;
+
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ ret = pcim_iomap_regions(pdev, bar_mask, "ae4dma");
+ if (ret)
+ goto ae4_error;
+
+ pt = &ae4->pt;
+ pt->dev = dev;
+ pt->ver = AE4_DMA_VERSION;
+
+ pt->io_regs = pcim_iomap_table(pdev)[0];
+ if (!pt->io_regs) {
+ ret = -ENOMEM;
+ goto ae4_error;
+ }
+
+ ret = ae4_get_irqs(ae4);
+ if (ret < 0)
+ goto ae4_error;
+
+ pci_set_master(pdev);
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+
+ dev_set_drvdata(dev, ae4);
+
+ ret = ae4_core_init(ae4);
+ if (ret)
+ goto ae4_error;
+
+ return 0;
+
+ae4_error:
+ ae4_deinit(ae4);
+
+ return ret;
+}
+
+static void ae4_pci_remove(struct pci_dev *pdev)
+{
+ struct ae4_device *ae4 = dev_get_drvdata(&pdev->dev);
+
+ ae4_destroy_work(ae4);
+ ae4_deinit(ae4);
+}
+
+static const struct pci_device_id ae4_pci_table[] = {
+ { PCI_VDEVICE(AMD, 0x14C8), },
+ { PCI_VDEVICE(AMD, 0x14DC), },
+ { PCI_VDEVICE(AMD, 0x149B), },
+ /* Last entry must be zero */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ae4_pci_table);
+
+static struct pci_driver ae4_pci_driver = {
+ .name = "ae4dma",
+ .id_table = ae4_pci_table,
+ .probe = ae4_pci_probe,
+ .remove = ae4_pci_remove,
+};
+
+module_pci_driver(ae4_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AMD AE4DMA driver");
diff --git a/drivers/dma/amd/ae4dma/ae4dma.h b/drivers/dma/amd/ae4dma/ae4dma.h
new file mode 100644
index 000000000000..265c5d436008
--- /dev/null
+++ b/drivers/dma/amd/ae4dma/ae4dma.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD AE4DMA driver
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+#ifndef __AE4DMA_H__
+#define __AE4DMA_H__
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include "../ptdma/ptdma.h"
+#include "../../virt-dma.h"
+
+#define MAX_AE4_HW_QUEUES 16
+
+#define AE4_DESC_COMPLETED 0x03
+
+#define AE4_MAX_IDX_OFF 0x08
+#define AE4_RD_IDX_OFF 0x0c
+#define AE4_WR_IDX_OFF 0x10
+#define AE4_INTR_STS_OFF 0x14
+#define AE4_Q_BASE_L_OFF 0x18
+#define AE4_Q_BASE_H_OFF 0x1c
+#define AE4_Q_SZ 0x20
+
+#define AE4_DMA_VERSION 4
+#define CMD_AE4_DESC_DW0_VAL 2
+
+struct ae4_msix {
+ int msix_count;
+ struct msix_entry msix_entry[MAX_AE4_HW_QUEUES];
+};
+
+struct ae4_cmd_queue {
+ struct ae4_device *ae4;
+ struct pt_cmd_queue cmd_q;
+ struct list_head cmd;
+ /* protect command operations */
+ struct mutex cmd_lock;
+ struct delayed_work p_work;
+ struct workqueue_struct *pws;
+ struct completion cmp;
+ wait_queue_head_t q_w;
+ atomic64_t intr_cnt;
+ atomic64_t done_cnt;
+ u64 q_cmd_count;
+ u32 dridx;
+ u32 tail_wi;
+ u32 id;
+};
+
+union dwou {
+ u32 dw0;
+ struct dword0 {
+ u8 byte0;
+ u8 byte1;
+ u16 timestamp;
+ } dws;
+};
+
+struct dword1 {
+ u8 status;
+ u8 err_code;
+ u16 desc_id;
+};
+
+struct ae4dma_desc {
+ union dwou dwouv;
+ struct dword1 dw1;
+ u32 length;
+ u32 rsvd;
+ u32 src_hi;
+ u32 src_lo;
+ u32 dst_hi;
+ u32 dst_lo;
+};
+
+struct ae4_device {
+ struct pt_device pt;
+ struct ae4_msix *ae4_msix;
+ struct ae4_cmd_queue ae4cmd_q[MAX_AE4_HW_QUEUES];
+ unsigned int ae4_irq[MAX_AE4_HW_QUEUES];
+ unsigned int cmd_q_count;
+};
+
+int ae4_core_init(struct ae4_device *ae4);
+void ae4_destroy_work(struct ae4_device *ae4);
+void ae4_check_status_error(struct ae4_cmd_queue *ae4cmd_q, int idx);
+#endif
diff --git a/drivers/dma/ptdma/Makefile b/drivers/dma/amd/ptdma/Makefile
index ce5410268a9a..ce5410268a9a 100644
--- a/drivers/dma/ptdma/Makefile
+++ b/drivers/dma/amd/ptdma/Makefile
diff --git a/drivers/dma/ptdma/ptdma-debugfs.c b/drivers/dma/amd/ptdma/ptdma-debugfs.c
index c8307d3044a3..c7c90bbf6fd8 100644
--- a/drivers/dma/ptdma/ptdma-debugfs.c
+++ b/drivers/dma/amd/ptdma/ptdma-debugfs.c
@@ -13,6 +13,7 @@
#include <linux/seq_file.h>
#include "ptdma.h"
+#include "../ae4dma/ae4dma.h"
/* DebugFS helpers */
#define RI_VERSION_NUM 0x0000003F
@@ -23,11 +24,19 @@
static int pt_debugfs_info_show(struct seq_file *s, void *p)
{
struct pt_device *pt = s->private;
+ struct ae4_device *ae4;
unsigned int regval;
seq_printf(s, "Device name: %s\n", dev_name(pt->dev));
- seq_printf(s, " # Queues: %d\n", 1);
- seq_printf(s, " # Cmds: %d\n", pt->cmd_count);
+
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ seq_printf(s, " # Queues: %d\n", ae4->cmd_q_count);
+ seq_printf(s, " # Cmds per queue: %d\n", CMD_Q_LEN);
+ } else {
+ seq_printf(s, " # Queues: %d\n", 1);
+ seq_printf(s, " # Cmds: %d\n", pt->cmd_count);
+ }
regval = ioread32(pt->io_regs + CMD_PT_VERSION);
@@ -55,6 +64,7 @@ static int pt_debugfs_stats_show(struct seq_file *s, void *p)
static int pt_debugfs_queue_show(struct seq_file *s, void *p)
{
struct pt_cmd_queue *cmd_q = s->private;
+ struct pt_device *pt;
unsigned int regval;
if (!cmd_q)
@@ -62,18 +72,24 @@ static int pt_debugfs_queue_show(struct seq_file *s, void *p)
seq_printf(s, " Pass-Thru: %ld\n", cmd_q->total_pt_ops);
- regval = ioread32(cmd_q->reg_control + 0x000C);
-
- seq_puts(s, " Enabled Interrupts:");
- if (regval & INT_EMPTY_QUEUE)
- seq_puts(s, " EMPTY");
- if (regval & INT_QUEUE_STOPPED)
- seq_puts(s, " STOPPED");
- if (regval & INT_ERROR)
- seq_puts(s, " ERROR");
- if (regval & INT_COMPLETION)
- seq_puts(s, " COMPLETION");
- seq_puts(s, "\n");
+ pt = cmd_q->pt;
+ if (pt->ver == AE4_DMA_VERSION) {
+ regval = readl(cmd_q->reg_control + 0x4);
+ seq_printf(s, " Enabled Interrupts:: status 0x%x\n", regval);
+ } else {
+ regval = ioread32(cmd_q->reg_control + 0x000C);
+
+ seq_puts(s, " Enabled Interrupts:");
+ if (regval & INT_EMPTY_QUEUE)
+ seq_puts(s, " EMPTY");
+ if (regval & INT_QUEUE_STOPPED)
+ seq_puts(s, " STOPPED");
+ if (regval & INT_ERROR)
+ seq_puts(s, " ERROR");
+ if (regval & INT_COMPLETION)
+ seq_puts(s, " COMPLETION");
+ seq_puts(s, "\n");
+ }
return 0;
}
@@ -84,8 +100,12 @@ DEFINE_SHOW_ATTRIBUTE(pt_debugfs_stats);
void ptdma_debugfs_setup(struct pt_device *pt)
{
- struct pt_cmd_queue *cmd_q;
struct dentry *debugfs_q_instance;
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct pt_cmd_queue *cmd_q;
+ struct ae4_device *ae4;
+ char name[30];
+ int i;
if (!debugfs_initialized())
return;
@@ -96,11 +116,28 @@ void ptdma_debugfs_setup(struct pt_device *pt)
debugfs_create_file("stats", 0400, pt->dma_dev.dbg_dev_root, pt,
&pt_debugfs_stats_fops);
- cmd_q = &pt->cmd_q;
-
- debugfs_q_instance =
- debugfs_create_dir("q", pt->dma_dev.dbg_dev_root);
- debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
- &pt_debugfs_queue_fops);
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+ cmd_q = &ae4cmd_q->cmd_q;
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, 29, "q%d", ae4cmd_q->id);
+
+ debugfs_q_instance =
+ debugfs_create_dir(name, pt->dma_dev.dbg_dev_root);
+
+ debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
+ &pt_debugfs_queue_fops);
+ }
+ } else {
+ debugfs_q_instance =
+ debugfs_create_dir("q", pt->dma_dev.dbg_dev_root);
+ cmd_q = &pt->cmd_q;
+ debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
+ &pt_debugfs_queue_fops);
+ }
}
+EXPORT_SYMBOL_GPL(ptdma_debugfs_setup);
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/amd/ptdma/ptdma-dev.c
index a2bf13ff18b6..a2bf13ff18b6 100644
--- a/drivers/dma/ptdma/ptdma-dev.c
+++ b/drivers/dma/amd/ptdma/ptdma-dev.c
diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
index f79240734807..35c84ec9608b 100644
--- a/drivers/dma/ptdma/ptdma-dmaengine.c
+++ b/drivers/dma/amd/ptdma/ptdma-dmaengine.c
@@ -9,9 +9,58 @@
* Author: Gary R Hook <gary.hook@amd.com>
*/
+#include <linux/bitfield.h>
#include "ptdma.h"
-#include "../dmaengine.h"
-#include "../virt-dma.h"
+#include "../ae4dma/ae4dma.h"
+#include "../../dmaengine.h"
+
+static char *ae4_error_codes[] = {
+ "",
+ "ERR 01: INVALID HEADER DW0",
+ "ERR 02: INVALID STATUS",
+ "ERR 03: INVALID LENGTH - 4 BYTE ALIGNMENT",
+ "ERR 04: INVALID SRC ADDR - 4 BYTE ALIGNMENT",
+ "ERR 05: INVALID DST ADDR - 4 BYTE ALIGNMENT",
+ "ERR 06: INVALID ALIGNMENT",
+ "ERR 07: INVALID DESCRIPTOR",
+};
+
+static void ae4_log_error(struct pt_device *d, int e)
+{
+ /* ERR 01 - 07 represents Invalid AE4 errors */
+ if (e <= 7)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", ae4_error_codes[e], e);
+ /* ERR 08 - 15 represents Invalid Descriptor errors */
+ else if (e > 7 && e <= 15)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "INVALID DESCRIPTOR", e);
+ /* ERR 16 - 31 represents Firmware errors */
+ else if (e > 15 && e <= 31)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "FIRMWARE ERROR", e);
+ /* ERR 32 - 63 represents Fatal errors */
+ else if (e > 31 && e <= 63)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "FATAL ERROR", e);
+ /* ERR 64 - 255 represents PTE errors */
+ else if (e > 63 && e <= 255)
+ dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "PTE ERROR", e);
+ else
+ dev_info(d->dev, "Unknown AE4DMA error");
+}
+
+void ae4_check_status_error(struct ae4_cmd_queue *ae4cmd_q, int idx)
+{
+ struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
+ struct ae4dma_desc desc;
+ u8 status;
+
+ memcpy(&desc, &cmd_q->qbase[idx], sizeof(struct ae4dma_desc));
+ status = desc.dw1.status;
+ if (status && status != AE4_DESC_COMPLETED) {
+ cmd_q->cmd_error = desc.dw1.err_code;
+ if (cmd_q->cmd_error)
+ ae4_log_error(cmd_q->pt, cmd_q->cmd_error);
+ }
+}
+EXPORT_SYMBOL_GPL(ae4_check_status_error);
static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
{
@@ -45,7 +94,71 @@ static void pt_do_cleanup(struct virt_dma_desc *vd)
kmem_cache_free(pt->dma_desc_cache, desc);
}
-static int pt_dma_start_desc(struct pt_dma_desc *desc)
+static struct pt_cmd_queue *pt_get_cmd_queue(struct pt_device *pt, struct pt_dma_chan *chan)
+{
+ struct ae4_cmd_queue *ae4cmd_q;
+ struct pt_cmd_queue *cmd_q;
+ struct ae4_device *ae4;
+
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ ae4cmd_q = &ae4->ae4cmd_q[chan->id];
+ cmd_q = &ae4cmd_q->cmd_q;
+ } else {
+ cmd_q = &pt->cmd_q;
+ }
+
+ return cmd_q;
+}
+
+static int ae4_core_execute_cmd(struct ae4dma_desc *desc, struct ae4_cmd_queue *ae4cmd_q)
+{
+ bool soc = FIELD_GET(DWORD0_SOC, desc->dwouv.dw0);
+ struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
+
+ if (soc) {
+ desc->dwouv.dw0 |= FIELD_PREP(DWORD0_IOC, desc->dwouv.dw0);
+ desc->dwouv.dw0 &= ~DWORD0_SOC;
+ }
+
+ mutex_lock(&ae4cmd_q->cmd_lock);
+ memcpy(&cmd_q->qbase[ae4cmd_q->tail_wi], desc, sizeof(struct ae4dma_desc));
+ ae4cmd_q->q_cmd_count++;
+ ae4cmd_q->tail_wi = (ae4cmd_q->tail_wi + 1) % CMD_Q_LEN;
+ writel(ae4cmd_q->tail_wi, cmd_q->reg_control + AE4_WR_IDX_OFF);
+ mutex_unlock(&ae4cmd_q->cmd_lock);
+
+ wake_up(&ae4cmd_q->q_w);
+
+ return 0;
+}
+
+static int pt_core_perform_passthru_ae4(struct pt_cmd_queue *cmd_q,
+ struct pt_passthru_engine *pt_engine)
+{
+ struct ae4_cmd_queue *ae4cmd_q = container_of(cmd_q, struct ae4_cmd_queue, cmd_q);
+ struct ae4dma_desc desc;
+
+ cmd_q->cmd_error = 0;
+ cmd_q->total_pt_ops++;
+ memset(&desc, 0, sizeof(desc));
+ desc.dwouv.dws.byte0 = CMD_AE4_DESC_DW0_VAL;
+
+ desc.dw1.status = 0;
+ desc.dw1.err_code = 0;
+ desc.dw1.desc_id = 0;
+
+ desc.length = pt_engine->src_len;
+
+ desc.src_lo = upper_32_bits(pt_engine->src_dma);
+ desc.src_hi = lower_32_bits(pt_engine->src_dma);
+ desc.dst_lo = upper_32_bits(pt_engine->dst_dma);
+ desc.dst_hi = lower_32_bits(pt_engine->dst_dma);
+
+ return ae4_core_execute_cmd(&desc, ae4cmd_q);
+}
+
+static int pt_dma_start_desc(struct pt_dma_desc *desc, struct pt_dma_chan *chan)
{
struct pt_passthru_engine *pt_engine;
struct pt_device *pt;
@@ -56,13 +169,18 @@ static int pt_dma_start_desc(struct pt_dma_desc *desc)
pt_cmd = &desc->pt_cmd;
pt = pt_cmd->pt;
- cmd_q = &pt->cmd_q;
+
+ cmd_q = pt_get_cmd_queue(pt, chan);
+
pt_engine = &pt_cmd->passthru;
pt->tdata.cmd = pt_cmd;
/* Execute the command */
- pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
+ if (pt->ver == AE4_DMA_VERSION)
+ pt_cmd->ret = pt_core_perform_passthru_ae4(cmd_q, pt_engine);
+ else
+ pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
return 0;
}
@@ -151,7 +269,7 @@ static void pt_cmd_callback(void *data, int err)
if (!desc)
break;
- ret = pt_dma_start_desc(desc);
+ ret = pt_dma_start_desc(desc, chan);
if (!ret)
break;
@@ -186,7 +304,10 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
struct pt_passthru_engine *pt_engine;
+ struct pt_device *pt = chan->pt;
+ struct ae4_cmd_queue *ae4cmd_q;
struct pt_dma_desc *desc;
+ struct ae4_device *ae4;
struct pt_cmd *pt_cmd;
desc = pt_alloc_dma_desc(chan, flags);
@@ -194,7 +315,7 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
return NULL;
pt_cmd = &desc->pt_cmd;
- pt_cmd->pt = chan->pt;
+ pt_cmd->pt = pt;
pt_engine = &pt_cmd->passthru;
pt_cmd->engine = PT_ENGINE_PASSTHRU;
pt_engine->src_dma = src;
@@ -205,6 +326,14 @@ static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
desc->len = len;
+ if (pt->ver == AE4_DMA_VERSION) {
+ ae4 = container_of(pt, struct ae4_device, pt);
+ ae4cmd_q = &ae4->ae4cmd_q[chan->id];
+ mutex_lock(&ae4cmd_q->cmd_lock);
+ list_add_tail(&pt_cmd->entry, &ae4cmd_q->cmd);
+ mutex_unlock(&ae4cmd_q->cmd_lock);
+ }
+
return desc;
}
@@ -258,24 +387,43 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
pt_cmd_callback(desc, 0);
}
+static void pt_check_status_trans_ae4(struct pt_device *pt, struct pt_cmd_queue *cmd_q)
+{
+ struct ae4_cmd_queue *ae4cmd_q = container_of(cmd_q, struct ae4_cmd_queue, cmd_q);
+ int i;
+
+ for (i = 0; i < CMD_Q_LEN; i++)
+ ae4_check_status_error(ae4cmd_q, i);
+}
+
static enum dma_status
pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct pt_device *pt = to_pt_chan(c)->pt;
- struct pt_cmd_queue *cmd_q = &pt->cmd_q;
+ struct pt_dma_chan *chan = to_pt_chan(c);
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
+
+ cmd_q = pt_get_cmd_queue(pt, chan);
+
+ if (pt->ver == AE4_DMA_VERSION)
+ pt_check_status_trans_ae4(pt, cmd_q);
+ else
+ pt_check_status_trans(pt, cmd_q);
- pt_check_status_trans(pt, cmd_q);
return dma_cookie_status(c, cookie, txstate);
}
static int pt_pause(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
unsigned long flags;
spin_lock_irqsave(&chan->vc.lock, flags);
- pt_stop_queue(&chan->pt->cmd_q);
+ cmd_q = pt_get_cmd_queue(pt, chan);
+ pt_stop_queue(cmd_q);
spin_unlock_irqrestore(&chan->vc.lock, flags);
return 0;
@@ -285,10 +433,13 @@ static int pt_resume(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
struct pt_dma_desc *desc = NULL;
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
unsigned long flags;
spin_lock_irqsave(&chan->vc.lock, flags);
- pt_start_queue(&chan->pt->cmd_q);
+ cmd_q = pt_get_cmd_queue(pt, chan);
+ pt_start_queue(cmd_q);
desc = pt_next_dma_desc(chan);
spin_unlock_irqrestore(&chan->vc.lock, flags);
@@ -302,11 +453,17 @@ static int pt_resume(struct dma_chan *dma_chan)
static int pt_terminate_all(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
+ struct pt_device *pt = chan->pt;
+ struct pt_cmd_queue *cmd_q;
unsigned long flags;
- struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q;
LIST_HEAD(head);
- iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
+ cmd_q = pt_get_cmd_queue(pt, chan);
+ if (pt->ver == AE4_DMA_VERSION)
+ pt_stop_queue(cmd_q);
+ else
+ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
+
spin_lock_irqsave(&chan->vc.lock, flags);
vchan_get_all_descriptors(&chan->vc, &head);
spin_unlock_irqrestore(&chan->vc.lock, flags);
@@ -319,14 +476,24 @@ static int pt_terminate_all(struct dma_chan *dma_chan)
int pt_dmaengine_register(struct pt_device *pt)
{
- struct pt_dma_chan *chan;
struct dma_device *dma_dev = &pt->dma_dev;
- char *cmd_cache_name;
+ struct ae4_cmd_queue *ae4cmd_q = NULL;
+ struct ae4_device *ae4 = NULL;
+ struct pt_dma_chan *chan;
char *desc_cache_name;
- int ret;
+ char *cmd_cache_name;
+ int ret, i;
+
+ if (pt->ver == AE4_DMA_VERSION)
+ ae4 = container_of(pt, struct ae4_device, pt);
+
+ if (ae4)
+ pt->pt_dma_chan = devm_kcalloc(pt->dev, ae4->cmd_q_count,
+ sizeof(*pt->pt_dma_chan), GFP_KERNEL);
+ else
+ pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
+ GFP_KERNEL);
- pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
- GFP_KERNEL);
if (!pt->pt_dma_chan)
return -ENOMEM;
@@ -368,9 +535,6 @@ int pt_dmaengine_register(struct pt_device *pt)
INIT_LIST_HEAD(&dma_dev->channels);
- chan = pt->pt_dma_chan;
- chan->pt = pt;
-
/* Set base and prep routines */
dma_dev->device_free_chan_resources = pt_free_chan_resources;
dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
@@ -382,8 +546,21 @@ int pt_dmaengine_register(struct pt_device *pt)
dma_dev->device_terminate_all = pt_terminate_all;
dma_dev->device_synchronize = pt_synchronize;
- chan->vc.desc_free = pt_do_cleanup;
- vchan_init(&chan->vc, dma_dev);
+ if (ae4) {
+ for (i = 0; i < ae4->cmd_q_count; i++) {
+ chan = pt->pt_dma_chan + i;
+ ae4cmd_q = &ae4->ae4cmd_q[i];
+ chan->id = ae4cmd_q->id;
+ chan->pt = pt;
+ chan->vc.desc_free = pt_do_cleanup;
+ vchan_init(&chan->vc, dma_dev);
+ }
+ } else {
+ chan = pt->pt_dma_chan;
+ chan->pt = pt;
+ chan->vc.desc_free = pt_do_cleanup;
+ vchan_init(&chan->vc, dma_dev);
+ }
ret = dma_async_device_register(dma_dev);
if (ret)
@@ -399,6 +576,7 @@ err_cache:
return ret;
}
+EXPORT_SYMBOL_GPL(pt_dmaengine_register);
void pt_dmaengine_unregister(struct pt_device *pt)
{
diff --git a/drivers/dma/ptdma/ptdma-pci.c b/drivers/dma/amd/ptdma/ptdma-pci.c
index 22739ff0c3c5..22739ff0c3c5 100644
--- a/drivers/dma/ptdma/ptdma-pci.c
+++ b/drivers/dma/amd/ptdma/ptdma-pci.c
diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/amd/ptdma/ptdma.h
index 39bc37268235..0a7939105e51 100644
--- a/drivers/dma/ptdma/ptdma.h
+++ b/drivers/dma/amd/ptdma/ptdma.h
@@ -22,7 +22,7 @@
#include <linux/wait.h>
#include <linux/dmapool.h>
-#include "../virt-dma.h"
+#include "../../virt-dma.h"
#define MAX_PT_NAME_LEN 16
#define MAX_DMAPOOL_NAME_LEN 32
@@ -184,6 +184,7 @@ struct pt_dma_desc {
struct pt_dma_chan {
struct virt_dma_chan vc;
struct pt_device *pt;
+ u32 id;
};
struct pt_cmd_queue {
@@ -262,6 +263,7 @@ struct pt_device {
unsigned long total_interrupts;
struct pt_tasklet_data tdata;
+ int ver;
};
/*
diff --git a/drivers/dma/amd/qdma/qdma.c b/drivers/dma/amd/qdma/qdma.c
index b0a1f3ad851b..8fb2d5e1df20 100644
--- a/drivers/dma/amd/qdma/qdma.c
+++ b/drivers/dma/amd/qdma/qdma.c
@@ -7,9 +7,9 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/dma-map-ops.h>
#include <linux/platform_device.h>
#include <linux/platform_data/amd_qdma.h>
#include <linux/regmap.h>
@@ -283,16 +283,20 @@ static int qdma_check_queue_status(struct qdma_device *qdev,
static int qdma_clear_queue_context(const struct qdma_queue *queue)
{
- enum qdma_ctxt_type h2c_types[] = { QDMA_CTXT_DESC_SW_H2C,
- QDMA_CTXT_DESC_HW_H2C,
- QDMA_CTXT_DESC_CR_H2C,
- QDMA_CTXT_PFTCH, };
- enum qdma_ctxt_type c2h_types[] = { QDMA_CTXT_DESC_SW_C2H,
- QDMA_CTXT_DESC_HW_C2H,
- QDMA_CTXT_DESC_CR_C2H,
- QDMA_CTXT_PFTCH, };
+ static const enum qdma_ctxt_type h2c_types[] = {
+ QDMA_CTXT_DESC_SW_H2C,
+ QDMA_CTXT_DESC_HW_H2C,
+ QDMA_CTXT_DESC_CR_H2C,
+ QDMA_CTXT_PFTCH,
+ };
+ static const enum qdma_ctxt_type c2h_types[] = {
+ QDMA_CTXT_DESC_SW_C2H,
+ QDMA_CTXT_DESC_HW_C2H,
+ QDMA_CTXT_DESC_CR_C2H,
+ QDMA_CTXT_PFTCH,
+ };
struct qdma_device *qdev = queue->qdev;
- enum qdma_ctxt_type *type;
+ const enum qdma_ctxt_type *type;
int ret, num, i;
if (queue->dir == DMA_MEM_TO_DEV) {
@@ -492,18 +496,9 @@ static int qdma_device_verify(struct qdma_device *qdev)
static int qdma_device_setup(struct qdma_device *qdev)
{
- struct device *dev = &qdev->pdev->dev;
u32 ring_sz = QDMA_DEFAULT_RING_SIZE;
int ret = 0;
- while (dev && get_dma_ops(dev))
- dev = dev->parent;
- if (!dev) {
- qdma_err(qdev, "dma device not found");
- return -EINVAL;
- }
- set_dma_ops(&qdev->pdev->dev, get_dma_ops(dev));
-
ret = qdma_setup_fmap_context(qdev);
if (ret) {
qdma_err(qdev, "Failed setup fmap context");
@@ -548,11 +543,12 @@ static void qdma_free_queue_resources(struct dma_chan *chan)
{
struct qdma_queue *queue = to_qdma_queue(chan);
struct qdma_device *qdev = queue->qdev;
- struct device *dev = qdev->dma_dev.dev;
+ struct qdma_platdata *pdata;
qdma_clear_queue_context(queue);
vchan_free_chan_resources(&queue->vchan);
- dma_free_coherent(dev, queue->ring_size * QDMA_MM_DESC_SIZE,
+ pdata = dev_get_platdata(&qdev->pdev->dev);
+ dma_free_coherent(pdata->dma_dev, queue->ring_size * QDMA_MM_DESC_SIZE,
queue->desc_base, queue->dma_desc_base);
}
@@ -565,6 +561,7 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
struct qdma_queue *queue = to_qdma_queue(chan);
struct qdma_device *qdev = queue->qdev;
struct qdma_ctxt_sw_desc desc;
+ struct qdma_platdata *pdata;
size_t size;
int ret;
@@ -572,8 +569,9 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
if (ret)
return ret;
+ pdata = dev_get_platdata(&qdev->pdev->dev);
size = queue->ring_size * QDMA_MM_DESC_SIZE;
- queue->desc_base = dma_alloc_coherent(qdev->dma_dev.dev, size,
+ queue->desc_base = dma_alloc_coherent(pdata->dma_dev, size,
&queue->dma_desc_base,
GFP_KERNEL);
if (!queue->desc_base) {
@@ -588,7 +586,7 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
if (ret) {
qdma_err(qdev, "Failed to setup SW desc ctxt for %s",
chan->name);
- dma_free_coherent(qdev->dma_dev.dev, size, queue->desc_base,
+ dma_free_coherent(pdata->dma_dev, size, queue->desc_base,
queue->dma_desc_base);
return ret;
}
@@ -948,8 +946,9 @@ static int qdma_init_error_irq(struct qdma_device *qdev)
static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
{
- u32 ctxt[QDMA_CTXT_REGMAP_LEN];
+ struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev);
struct device *dev = &qdev->pdev->dev;
+ u32 ctxt[QDMA_CTXT_REGMAP_LEN];
struct qdma_intr_ring *ring;
struct qdma_ctxt_intr intr_ctxt;
u32 vector;
@@ -969,7 +968,8 @@ static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
ring->msix_id = qdev->err_irq_idx + i + 1;
ring->ridx = i;
ring->color = 1;
- ring->base = dmam_alloc_coherent(dev, QDMA_INTR_RING_SIZE,
+ ring->base = dmam_alloc_coherent(pdata->dma_dev,
+ QDMA_INTR_RING_SIZE,
&ring->dev_base, GFP_KERNEL);
if (!ring->base) {
qdma_err(qdev, "Failed to alloc intr ring %d", i);
@@ -1133,7 +1133,7 @@ static struct platform_driver amd_qdma_driver = {
.name = "amd-qdma",
},
.probe = amd_qdma_probe,
- .remove_new = amd_qdma_remove,
+ .remove = amd_qdma_remove,
};
module_platform_driver(amd_qdma_driver);
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index 9588773dd2eb..bd49f0374291 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -153,6 +153,8 @@ static int admac_alloc_sram_carveout(struct admac_data *ad,
{
struct admac_sram *sram;
int i, ret = 0, nblocks;
+ ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
+ ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
if (dir == DMA_MEM_TO_DEV)
sram = &ad->txcache;
@@ -912,12 +914,7 @@ static int admac_probe(struct platform_device *pdev)
goto free_irq;
}
- ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
- ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
-
dev_info(&pdev->dev, "Audio DMA Controller\n");
- dev_info(&pdev->dev, "imprint %x TX cache %u RX cache %u\n",
- readl_relaxed(ad->base + REG_IMPRINT), ad->txcache.size, ad->rxcache.size);
return 0;
@@ -950,7 +947,7 @@ static struct platform_driver apple_admac_driver = {
.of_match_table = admac_of_match,
},
.probe = admac_probe,
- .remove_new = admac_remove,
+ .remove = admac_remove,
};
module_platform_driver(apple_admac_driver);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index baebddc740b0..2d147712cbc6 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -2250,7 +2250,7 @@ static const struct dev_pm_ops __maybe_unused at_dma_dev_pm_ops = {
};
static struct platform_driver at_dma_driver = {
- .remove_new = at_dma_remove,
+ .remove = at_dma_remove,
.shutdown = at_dma_shutdown,
.id_table = atdma_devtypes,
.driver = {
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 299396121e6d..ba25c23164e7 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1363,6 +1363,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
return NULL;
desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
+ if (!desc)
+ return NULL;
list_add_tail(&desc->desc_node, &desc->descs_list);
desc->tx_dma_desc.cookie = -EBUSY;
@@ -2476,7 +2478,7 @@ MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
static struct platform_driver at_xdmac_driver = {
.probe = at_xdmac_probe,
- .remove_new = at_xdmac_remove,
+ .remove = at_xdmac_remove,
.driver = {
.name = "at_xdmac",
.of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index cfa6e1167a1f..7f0e76439ce5 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -1756,7 +1756,7 @@ MODULE_DEVICE_TABLE(of, sba_of_match);
static struct platform_driver sba_driver = {
.probe = sba_probe,
- .remove_new = sba_remove,
+ .remove = sba_remove,
.driver = {
.name = "bcm-sba-raid",
.of_match_table = sba_of_match,
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index e1b92b4d7b05..20b10c15c696 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -875,6 +875,27 @@ static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
return chan;
}
+static int bcm2835_dma_suspend_late(struct device *dev)
+{
+ struct bcm2835_dmadev *od = dev_get_drvdata(dev);
+ struct bcm2835_chan *c, *next;
+
+ list_for_each_entry_safe(c, next, &od->ddev.channels,
+ vc.chan.device_node) {
+ void __iomem *chan_base = c->chan_base;
+
+ /* Check if DMA channel is busy */
+ if (readl(chan_base + BCM2835_DMA_ADDR))
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops bcm2835_dma_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(bcm2835_dma_suspend_late, NULL)
+};
+
static int bcm2835_dma_probe(struct platform_device *pdev)
{
struct bcm2835_dmadev *od;
@@ -1029,10 +1050,11 @@ static void bcm2835_dma_remove(struct platform_device *pdev)
static struct platform_driver bcm2835_dma_driver = {
.probe = bcm2835_dma_probe,
- .remove_new = bcm2835_dma_remove,
+ .remove = bcm2835_dma_remove,
.driver = {
.name = "bcm2835-dma",
.of_match_table = of_match_ptr(bcm2835_dma_of_match),
+ .pm = pm_ptr(&bcm2835_dma_pm_ops),
},
};
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
index 0bbaa7620bdd..6c4d655ffe77 100644
--- a/drivers/dma/bestcomm/bestcomm.c
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -486,7 +486,7 @@ MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match);
static struct platform_driver mpc52xx_bcom_of_platform_driver = {
.probe = mpc52xx_bcom_probe,
- .remove_new = mpc52xx_bcom_remove,
+ .remove = mpc52xx_bcom_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = mpc52xx_bcom_of_match,
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index c9cfa341db51..100057603fd4 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -1122,7 +1122,7 @@ MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
static struct platform_driver jz4780_dma_driver = {
.probe = jz4780_dma_probe,
- .remove_new = jz4780_dma_remove,
+ .remove = jz4780_dma_remove,
.driver = {
.name = "jz4780-dma",
.of_match_table = jz4780_dma_dt_match,
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index fffafa86d964..b23536645ff7 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -1676,7 +1676,7 @@ MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
static struct platform_driver dw_driver = {
.probe = dw_probe,
- .remove_new = dw_remove,
+ .remove = dw_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = dw_dma_of_id_table,
diff --git a/drivers/dma/dw/acpi.c b/drivers/dma/dw/acpi.c
index c510c109d2c3..b6452fffa657 100644
--- a/drivers/dma/dw/acpi.c
+++ b/drivers/dma/dw/acpi.c
@@ -8,13 +8,15 @@
static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
{
+ struct dw_dma *dw = to_dw_dma(chan->device);
+ struct dw_dma_chip_pdata *data = dev_get_drvdata(dw->dma.dev);
struct acpi_dma_spec *dma_spec = param;
struct dw_dma_slave slave = {
.dma_dev = dma_spec->dev,
.src_id = dma_spec->slave_id,
.dst_id = dma_spec->slave_id,
- .m_master = 0,
- .p_master = 1,
+ .m_master = data->m_master,
+ .p_master = data->p_master,
};
return dw_dma_filter(chan, &slave);
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h
index 563ce73488db..f1bd06a20cd6 100644
--- a/drivers/dma/dw/internal.h
+++ b/drivers/dma/dw/internal.h
@@ -51,11 +51,15 @@ struct dw_dma_chip_pdata {
int (*probe)(struct dw_dma_chip *chip);
int (*remove)(struct dw_dma_chip *chip);
struct dw_dma_chip *chip;
+ u8 m_master;
+ u8 p_master;
};
static __maybe_unused const struct dw_dma_chip_pdata dw_dma_chip_pdata = {
.probe = dw_dma_probe,
.remove = dw_dma_remove,
+ .m_master = 0,
+ .p_master = 1,
};
static const struct dw_dma_platform_data idma32_pdata = {
@@ -72,6 +76,8 @@ static __maybe_unused const struct dw_dma_chip_pdata idma32_chip_pdata = {
.pdata = &idma32_pdata,
.probe = idma32_dma_probe,
.remove = idma32_dma_remove,
+ .m_master = 0,
+ .p_master = 0,
};
static const struct dw_dma_platform_data xbar_pdata = {
@@ -88,6 +94,8 @@ static __maybe_unused const struct dw_dma_chip_pdata xbar_chip_pdata = {
.pdata = &xbar_pdata,
.probe = idma32_dma_probe,
.remove = idma32_dma_remove,
+ .m_master = 0,
+ .p_master = 0,
};
#endif /* _DMA_DW_INTERNAL_H */
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index ad2d4d012cf7..e8a0eb81726a 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -56,10 +56,10 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
if (ret)
return ret;
- dw_dma_acpi_controller_register(chip->dw);
-
pci_set_drvdata(pdev, data);
+ dw_dma_acpi_controller_register(chip->dw);
+
return 0;
}
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 47c58ad468cb..2606cf9cd429 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -191,7 +191,7 @@ static const struct dev_pm_ops dw_dev_pm_ops = {
static struct platform_driver dw_driver = {
.probe = dw_probe,
- .remove_new = dw_remove,
+ .remove = dw_remove,
.shutdown = dw_shutdown,
.driver = {
.name = DRV_NAME,
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 995427afe077..e424bb5c40e7 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -929,8 +929,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
/* Sanity check the channel parameters */
if (!edmac->edma->m2m) {
- if (edmac->dma_cfg.port < EP93XX_DMA_I2S1 ||
- edmac->dma_cfg.port > EP93XX_DMA_IRDA)
+ if (edmac->dma_cfg.port > EP93XX_DMA_IRDA)
return -EINVAL;
if (edmac->dma_cfg.dir != ep93xx_dma_chan_direction(chan))
return -EINVAL;
@@ -1391,11 +1390,12 @@ static struct ep93xx_dma_engine *ep93xx_dma_of_probe(struct platform_device *pde
INIT_LIST_HEAD(&dma_dev->channels);
for (i = 0; i < edma->num_channels; i++) {
struct ep93xx_dma_chan *edmac = &edma->channels[i];
+ int len;
edmac->chan.device = dma_dev;
edmac->regs = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(edmac->regs))
- return edmac->regs;
+ return ERR_CAST(edmac->regs);
edmac->irq = fwnode_irq_get(dev_fwnode(dev), i);
if (edmac->irq < 0)
@@ -1404,9 +1404,11 @@ static struct ep93xx_dma_engine *ep93xx_dma_of_probe(struct platform_device *pde
edmac->edma = edma;
if (edma->m2m)
- snprintf(dma_clk_name, sizeof(dma_clk_name), "m2m%u", i);
+ len = snprintf(dma_clk_name, sizeof(dma_clk_name), "m2m%u", i);
else
- snprintf(dma_clk_name, sizeof(dma_clk_name), "m2p%u", i);
+ len = snprintf(dma_clk_name, sizeof(dma_clk_name), "m2p%u", i);
+ if (len >= sizeof(dma_clk_name))
+ return ERR_PTR(-ENOBUFS);
edmac->clk = devm_clk_get(dev, dma_clk_name);
if (IS_ERR(edmac->clk)) {
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index b7f15ab96855..443b2430466c 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -480,8 +480,8 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
bool disable_req, bool enable_sg)
{
struct dma_slave_config *cfg = &fsl_chan->cfg;
+ u32 burst = 0;
u16 csr = 0;
- u32 burst;
/*
* eDMA hardware SGs require the TCDs to be stored in little
@@ -496,16 +496,30 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
fsl_edma_set_tcd_to_le(fsl_chan, tcd, soff, soff);
- if (fsl_chan->is_multi_fifo) {
- /* set mloff to support multiple fifo */
- burst = cfg->direction == DMA_DEV_TO_MEM ?
- cfg->src_maxburst : cfg->dst_maxburst;
- nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
- /* enable DMLOE/SMLOE */
- if (cfg->direction == DMA_MEM_TO_DEV) {
+ /* If we expect to have either multi_fifo or a port window size,
+ * we will use minor loop offset, meaning bits 29-10 will be used for
+ * address offset, while bits 9-0 will be used to tell DMA how much
+ * data to read from addr.
+ * If we don't have either of those, will use a major loop reading from addr
+ * nbytes (29bits).
+ */
+ if (cfg->direction == DMA_MEM_TO_DEV) {
+ if (fsl_chan->is_multi_fifo)
+ burst = cfg->dst_maxburst * 4;
+ if (cfg->dst_port_window_size)
+ burst = cfg->dst_port_window_size * cfg->dst_addr_width;
+ if (burst) {
+ nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-burst);
nbytes |= EDMA_V3_TCD_NBYTES_DMLOE;
nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE;
- } else {
+ }
+ } else {
+ if (fsl_chan->is_multi_fifo)
+ burst = cfg->src_maxburst * 4;
+ if (cfg->src_port_window_size)
+ burst = cfg->src_port_window_size * cfg->src_addr_width;
+ if (burst) {
+ nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-burst);
nbytes |= EDMA_V3_TCD_NBYTES_SMLOE;
nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE;
}
@@ -623,11 +637,15 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
dst_addr = fsl_chan->dma_dev_addr;
soff = fsl_chan->cfg.dst_addr_width;
doff = fsl_chan->is_multi_fifo ? 4 : 0;
+ if (fsl_chan->cfg.dst_port_window_size)
+ doff = fsl_chan->cfg.dst_addr_width;
} else if (direction == DMA_DEV_TO_MEM) {
src_addr = fsl_chan->dma_dev_addr;
dst_addr = dma_buf_next;
soff = fsl_chan->is_multi_fifo ? 4 : 0;
doff = fsl_chan->cfg.src_addr_width;
+ if (fsl_chan->cfg.src_port_window_size)
+ soff = fsl_chan->cfg.src_addr_width;
} else {
/* DMA_DEV_TO_DEV */
src_addr = fsl_chan->cfg.src_addr;
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index ce37e1ee9c46..10a5565ddfd7 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -68,6 +68,8 @@
#define EDMA_V3_CH_CSR_EEI BIT(2)
#define EDMA_V3_CH_CSR_DONE BIT(30)
#define EDMA_V3_CH_CSR_ACTIVE BIT(31)
+#define EDMA_V3_CH_ES_ERR BIT(31)
+#define EDMA_V3_MP_ES_VLD BIT(31)
enum fsl_edma_pm_state {
RUNNING = 0,
@@ -166,6 +168,7 @@ struct fsl_edma_chan {
struct work_struct issue_worker;
struct platform_device *pdev;
struct device *pd_dev;
+ struct device_link *pd_dev_link;
u32 srcid;
struct clk *clk;
int priority;
@@ -240,6 +243,7 @@ struct fsl_edma_engine {
const struct fsl_edma_drvdata *drvdata;
u32 n_chans;
int txirq;
+ int txirq_16_31;
int errirq;
bool big_endian;
struct edma_regs regs;
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index f9f1eda79254..f989b6c9c0a9 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -3,10 +3,11 @@
* drivers/dma/fsl-edma.c
*
* Copyright 2013-2014 Freescale Semiconductor, Inc.
+ * Copyright 2024 NXP
*
* Driver for the Freescale eDMA engine with flexible channel multiplexing
* capability for DMA request sources. The eDMA block can be found on some
- * Vybrid and Layerscape SoCs.
+ * Vybrid, Layerscape and S32G SoCs.
*/
#include <dt-bindings/dma/fsl-edma.h>
@@ -72,6 +73,60 @@ static irqreturn_t fsl_edma2_tx_handler(int irq, void *devi_id)
return fsl_edma_tx_handler(irq, fsl_chan->edma);
}
+static irqreturn_t fsl_edma3_or_tx_handler(int irq, void *dev_id,
+ u8 start, u8 end)
+{
+ struct fsl_edma_engine *fsl_edma = dev_id;
+ struct fsl_edma_chan *chan;
+ int i;
+
+ end = min(end, fsl_edma->n_chans);
+
+ for (i = start; i < end; i++) {
+ chan = &fsl_edma->chans[i];
+
+ fsl_edma3_tx_handler(irq, chan);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_edma3_tx_0_15_handler(int irq, void *dev_id)
+{
+ return fsl_edma3_or_tx_handler(irq, dev_id, 0, 16);
+}
+
+static irqreturn_t fsl_edma3_tx_16_31_handler(int irq, void *dev_id)
+{
+ return fsl_edma3_or_tx_handler(irq, dev_id, 16, 32);
+}
+
+static irqreturn_t fsl_edma3_or_err_handler(int irq, void *dev_id)
+{
+ struct fsl_edma_engine *fsl_edma = dev_id;
+ struct edma_regs *regs = &fsl_edma->regs;
+ unsigned int err, ch, ch_es;
+ struct fsl_edma_chan *chan;
+
+ err = edma_readl(fsl_edma, regs->es);
+ if (!(err & EDMA_V3_MP_ES_VLD))
+ return IRQ_NONE;
+
+ for (ch = 0; ch < fsl_edma->n_chans; ch++) {
+ chan = &fsl_edma->chans[ch];
+
+ ch_es = edma_readl_chreg(chan, ch_es);
+ if (!(ch_es & EDMA_V3_CH_ES_ERR))
+ continue;
+
+ edma_writel_chreg(chan, EDMA_V3_CH_ES_ERR, ch_es);
+ fsl_edma_disable_request(chan);
+ fsl_edma->chans[ch].status = DMA_ERROR;
+ }
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *fsl_edma = dev_id;
@@ -274,6 +329,49 @@ static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engi
return 0;
}
+static int fsl_edma3_or_irq_init(struct platform_device *pdev,
+ struct fsl_edma_engine *fsl_edma)
+{
+ int ret;
+
+ fsl_edma->txirq = platform_get_irq_byname(pdev, "tx-0-15");
+ if (fsl_edma->txirq < 0)
+ return fsl_edma->txirq;
+
+ fsl_edma->txirq_16_31 = platform_get_irq_byname(pdev, "tx-16-31");
+ if (fsl_edma->txirq_16_31 < 0)
+ return fsl_edma->txirq_16_31;
+
+ fsl_edma->errirq = platform_get_irq_byname(pdev, "err");
+ if (fsl_edma->errirq < 0)
+ return fsl_edma->errirq;
+
+ ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
+ fsl_edma3_tx_0_15_handler, 0, "eDMA tx0_15",
+ fsl_edma);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Can't register eDMA tx0_15 IRQ.\n");
+
+ if (fsl_edma->n_chans > 16) {
+ ret = devm_request_irq(&pdev->dev, fsl_edma->txirq_16_31,
+ fsl_edma3_tx_16_31_handler, 0,
+ "eDMA tx16_31", fsl_edma);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Can't register eDMA tx16_31 IRQ.\n");
+ }
+
+ ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
+ fsl_edma3_or_err_handler, 0, "eDMA err",
+ fsl_edma);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Can't register eDMA err IRQ.\n");
+
+ return 0;
+}
+
static int
fsl_edma2_irq_init(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma)
@@ -404,6 +502,14 @@ static struct fsl_edma_drvdata imx95_data5 = {
.setup_irq = fsl_edma3_irq_init,
};
+static const struct fsl_edma_drvdata s32g2_data = {
+ .dmamuxs = DMAMUX_NR,
+ .chreg_space_sz = EDMA_TCD,
+ .chreg_off = 0x4000,
+ .flags = FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MUX_SWAP,
+ .setup_irq = fsl_edma3_or_irq_init,
+};
+
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
{ .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
@@ -413,14 +519,38 @@ static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
{ .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
{ .compatible = "fsl,imx95-edma5", .data = &imx95_data5},
+ { .compatible = "nxp,s32g2-edma", .data = &s32g2_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
+static void fsl_edma3_detach_pd(struct fsl_edma_engine *fsl_edma)
+{
+ struct fsl_edma_chan *fsl_chan;
+ int i;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
+ fsl_chan = &fsl_edma->chans[i];
+ if (fsl_chan->pd_dev_link)
+ device_link_del(fsl_chan->pd_dev_link);
+ if (fsl_chan->pd_dev) {
+ dev_pm_domain_detach(fsl_chan->pd_dev, false);
+ pm_runtime_dont_use_autosuspend(fsl_chan->pd_dev);
+ pm_runtime_set_suspended(fsl_chan->pd_dev);
+ }
+ }
+}
+
+static void devm_fsl_edma3_detach_pd(void *data)
+{
+ fsl_edma3_detach_pd(data);
+}
+
static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
struct fsl_edma_chan *fsl_chan;
- struct device_link *link;
struct device *pd_chan;
struct device *dev;
int i;
@@ -436,15 +566,16 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
pd_chan = dev_pm_domain_attach_by_id(dev, i);
if (IS_ERR_OR_NULL(pd_chan)) {
dev_err(dev, "Failed attach pd %d\n", i);
- return -EINVAL;
+ goto detach;
}
- link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
+ fsl_chan->pd_dev_link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
- if (!link) {
+ if (!fsl_chan->pd_dev_link) {
dev_err(dev, "Failed to add device_link to %d\n", i);
- return -EINVAL;
+ dev_pm_domain_detach(pd_chan, false);
+ goto detach;
}
fsl_chan->pd_dev = pd_chan;
@@ -455,6 +586,10 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
}
return 0;
+
+detach:
+ fsl_edma3_detach_pd(fsl_edma);
+ return -EINVAL;
}
static int fsl_edma_probe(struct platform_device *pdev)
@@ -517,10 +652,6 @@ static int fsl_edma_probe(struct platform_device *pdev)
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
char clkname[32];
- /* eDMAv3 mux register move to TCD area if ch_mux exist */
- if (drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)
- break;
-
fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev,
1 + i);
if (IS_ERR(fsl_edma->muxbase[i])) {
@@ -544,6 +675,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
ret = fsl_edma3_attach_pd(pdev, fsl_edma);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&pdev->dev, devm_fsl_edma3_detach_pd, fsl_edma);
+ if (ret)
+ return ret;
}
if (drvdata->flags & FSL_EDMA_DRV_TCD64)
@@ -646,7 +780,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
}
ret = of_dma_controller_register(np,
- drvdata->flags & FSL_EDMA_DRV_SPLIT_REG ? fsl_edma3_xlate : fsl_edma_xlate,
+ drvdata->dmamuxs ? fsl_edma_xlate : fsl_edma3_xlate,
fsl_edma);
if (ret) {
dev_err(&pdev->dev,
@@ -740,7 +874,7 @@ static struct platform_driver fsl_edma_driver = {
.pm = &fsl_edma_pm_ops,
},
.probe = fsl_edma_probe,
- .remove_new = fsl_edma_remove,
+ .remove = fsl_edma_remove,
};
static int __init fsl_edma_init(void)
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 5005e138fc23..823f5c6bc2e1 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -1288,7 +1288,7 @@ static struct platform_driver fsl_qdma_driver = {
.of_match_table = fsl_qdma_dt_ids,
},
.probe = fsl_qdma_probe,
- .remove_new = fsl_qdma_remove,
+ .remove = fsl_qdma_remove,
};
module_platform_driver(fsl_qdma_driver);
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index 014ff523d5ec..6aa97e258a55 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -886,7 +886,7 @@ static struct platform_driver fsl_re_driver = {
.of_match_table = fsl_re_ids,
},
.probe = fsl_re_probe,
- .remove_new = fsl_re_remove,
+ .remove = fsl_re_remove,
};
module_platform_driver(fsl_re_driver);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 18a6c4bf6275..b5e7d18b9766 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1404,7 +1404,7 @@ static struct platform_driver fsldma_of_driver = {
#endif
},
.probe = fsldma_of_probe,
- .remove_new = fsldma_of_remove,
+ .remove = fsldma_of_remove,
};
/*----------------------------------------------------------------------------*/
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 3c648308a54a..d147353d47ab 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -693,7 +693,7 @@ static const struct dev_pm_ops idma64_dev_pm_ops = {
static struct platform_driver idma64_platform_driver = {
.probe = idma64_platform_probe,
- .remove_new = idma64_platform_remove,
+ .remove = idma64_platform_remove,
.driver = {
.name = LPSS_IDMA64_DRIVER_NAME,
.pm = &idma64_dev_pm_ops,
diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile
index 2b4a0d406e1e..9ff9d7b87b64 100644
--- a/drivers/dma/idxd/Makefile
+++ b/drivers/dma/idxd/Makefile
@@ -1,4 +1,4 @@
-ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"IDXD"'
obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
idxd_bus-y := bus.o
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 57f1bf2ab20b..ff94ee892339 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -28,7 +28,6 @@ struct idxd_cdev_context {
* global to avoid conflict file names.
*/
static DEFINE_IDA(file_ida);
-static DEFINE_MUTEX(ida_lock);
/*
* ictx is an array based off of accelerator types. enum idxd_type
@@ -123,9 +122,7 @@ static void idxd_file_dev_release(struct device *dev)
struct idxd_device *idxd = wq->idxd;
int rc;
- mutex_lock(&ida_lock);
ida_free(&file_ida, ctx->id);
- mutex_unlock(&ida_lock);
/* Wait for in-flight operations to complete. */
if (wq_shared(wq)) {
@@ -284,9 +281,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
}
idxd_cdev = wq->idxd_cdev;
- mutex_lock(&ida_lock);
ctx->id = ida_alloc(&file_ida, GFP_KERNEL);
- mutex_unlock(&ida_lock);
if (ctx->id < 0) {
dev_warn(dev, "ida alloc failure\n");
goto failed_ida;
diff --git a/drivers/dma/idxd/compat.c b/drivers/dma/idxd/compat.c
index a4adb0c17995..eff9943f1a42 100644
--- a/drivers/dma/idxd/compat.c
+++ b/drivers/dma/idxd/compat.c
@@ -103,4 +103,4 @@ struct idxd_device_driver dsa_drv = {
};
module_idxd_driver(dsa_drv);
-MODULE_IMPORT_NS(IDXD);
+MODULE_IMPORT_NS("IDXD");
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index c41ef195eeb9..5cf419fe6b46 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -161,7 +161,7 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
free_hw_descs(wq);
return rc;
}
-EXPORT_SYMBOL_NS_GPL(idxd_wq_alloc_resources, IDXD);
+EXPORT_SYMBOL_NS_GPL(idxd_wq_alloc_resources, "IDXD");
void idxd_wq_free_resources(struct idxd_wq *wq)
{
@@ -175,7 +175,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq)
dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
sbitmap_queue_free(&wq->sbq);
}
-EXPORT_SYMBOL_NS_GPL(idxd_wq_free_resources, IDXD);
+EXPORT_SYMBOL_NS_GPL(idxd_wq_free_resources, "IDXD");
int idxd_wq_enable(struct idxd_wq *wq)
{
@@ -407,7 +407,7 @@ int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
reinit_completion(&wq->wq_resurrect);
return 0;
}
-EXPORT_SYMBOL_NS_GPL(idxd_wq_init_percpu_ref, IDXD);
+EXPORT_SYMBOL_NS_GPL(idxd_wq_init_percpu_ref, "IDXD");
void __idxd_wq_quiesce(struct idxd_wq *wq)
{
@@ -417,7 +417,7 @@ void __idxd_wq_quiesce(struct idxd_wq *wq)
complete_all(&wq->wq_resurrect);
wait_for_completion(&wq->wq_dead);
}
-EXPORT_SYMBOL_NS_GPL(__idxd_wq_quiesce, IDXD);
+EXPORT_SYMBOL_NS_GPL(__idxd_wq_quiesce, "IDXD");
void idxd_wq_quiesce(struct idxd_wq *wq)
{
@@ -425,7 +425,7 @@ void idxd_wq_quiesce(struct idxd_wq *wq)
__idxd_wq_quiesce(wq);
mutex_unlock(&wq->wq_lock);
}
-EXPORT_SYMBOL_NS_GPL(idxd_wq_quiesce, IDXD);
+EXPORT_SYMBOL_NS_GPL(idxd_wq_quiesce, "IDXD");
/* Device control bits */
static inline bool idxd_is_enabled(struct idxd_device *idxd)
@@ -1494,7 +1494,7 @@ err_map_portal:
err:
return rc;
}
-EXPORT_SYMBOL_NS_GPL(idxd_drv_enable_wq, IDXD);
+EXPORT_SYMBOL_NS_GPL(idxd_drv_enable_wq, "IDXD");
void idxd_drv_disable_wq(struct idxd_wq *wq)
{
@@ -1516,7 +1516,7 @@ void idxd_drv_disable_wq(struct idxd_wq *wq)
wq->type = IDXD_WQT_NONE;
wq->client_count = 0;
}
-EXPORT_SYMBOL_NS_GPL(idxd_drv_disable_wq, IDXD);
+EXPORT_SYMBOL_NS_GPL(idxd_drv_disable_wq, "IDXD");
int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
{
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index d84e21daa991..214b8039439f 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -374,6 +374,17 @@ struct idxd_device {
struct dentry *dbgfs_evl_file;
bool user_submission_safe;
+
+ struct idxd_saved_states *idxd_saved;
+};
+
+struct idxd_saved_states {
+ struct idxd_device saved_idxd;
+ struct idxd_evl saved_evl;
+ struct idxd_engine **saved_engines;
+ struct idxd_wq **saved_wqs;
+ struct idxd_group **saved_groups;
+ unsigned long *saved_wq_enable_map;
};
static inline unsigned int evl_ent_size(struct idxd_device *idxd)
@@ -725,8 +736,6 @@ static inline void idxd_desc_complete(struct idxd_desc *desc,
&desc->txd, &status);
}
-int idxd_register_bus_type(void);
-void idxd_unregister_bus_type(void);
int idxd_register_devices(struct idxd_device *idxd);
void idxd_unregister_devices(struct idxd_device *idxd);
void idxd_wqs_quiesce(struct idxd_device *idxd);
@@ -742,6 +751,8 @@ void idxd_unmask_error_interrupts(struct idxd_device *idxd);
/* device control */
int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
+int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev,
+ const struct pci_device_id *id);
void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
int idxd_drv_enable_wq(struct idxd_wq *wq);
void idxd_drv_disable_wq(struct idxd_wq *wq);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 234c1c658ec7..b946f78f85e1 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -25,7 +25,7 @@ MODULE_VERSION(IDXD_DRIVER_VERSION);
MODULE_DESCRIPTION("Intel Data Streaming Accelerator and In-Memory Analytics Accelerator common driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
-MODULE_IMPORT_NS(IDXD);
+MODULE_IMPORT_NS("IDXD");
static bool sva = true;
module_param(sva, bool, 0644);
@@ -78,6 +78,8 @@ static struct pci_device_id idxd_pci_tbl[] = {
{ PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
/* IAA on DMR platforms */
{ PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) },
+ /* IAA PTL platforms */
+ { PCI_DEVICE_DATA(INTEL, IAA_PTL, &idxd_driver_data[IDXD_TYPE_IAX]) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
@@ -723,67 +725,464 @@ static void idxd_cleanup(struct idxd_device *idxd)
idxd_disable_sva(idxd->pdev);
}
-static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+/*
+ * Attach IDXD device to IDXD driver.
+ */
+static int idxd_bind(struct device_driver *drv, const char *buf)
{
- struct device *dev = &pdev->dev;
- struct idxd_device *idxd;
- struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data;
+ const struct bus_type *bus = drv->bus;
+ struct device *dev;
+ int err = -ENODEV;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (dev)
+ err = device_driver_attach(drv, dev);
+
+ put_device(dev);
+
+ return err;
+}
+
+/*
+ * Detach IDXD device from driver.
+ */
+static void idxd_unbind(struct device_driver *drv, const char *buf)
+{
+ const struct bus_type *bus = drv->bus;
+ struct device *dev;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (dev && dev->driver == drv)
+ device_release_driver(dev);
+
+ put_device(dev);
+}
+
+#define idxd_free_saved_configs(saved_configs, count) \
+ do { \
+ int i; \
+ \
+ for (i = 0; i < (count); i++) \
+ kfree(saved_configs[i]); \
+ } while (0)
+
+static void idxd_free_saved(struct idxd_group **saved_groups,
+ struct idxd_engine **saved_engines,
+ struct idxd_wq **saved_wqs,
+ struct idxd_device *idxd)
+{
+ if (saved_groups)
+ idxd_free_saved_configs(saved_groups, idxd->max_groups);
+ if (saved_engines)
+ idxd_free_saved_configs(saved_engines, idxd->max_engines);
+ if (saved_wqs)
+ idxd_free_saved_configs(saved_wqs, idxd->max_wqs);
+}
+
+/*
+ * Save IDXD device configurations including engines, groups, wqs etc.
+ * The saved configurations can be restored when needed.
+ */
+static int idxd_device_config_save(struct idxd_device *idxd,
+ struct idxd_saved_states *idxd_saved)
+{
+ struct device *dev = &idxd->pdev->dev;
+ int i;
+
+ memcpy(&idxd_saved->saved_idxd, idxd, sizeof(*idxd));
+
+ if (idxd->evl) {
+ memcpy(&idxd_saved->saved_evl, idxd->evl,
+ sizeof(struct idxd_evl));
+ }
+
+ struct idxd_group **saved_groups __free(kfree) =
+ kcalloc_node(idxd->max_groups,
+ sizeof(struct idxd_group *),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!saved_groups)
+ return -ENOMEM;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *saved_group __free(kfree) =
+ kzalloc_node(sizeof(*saved_group), GFP_KERNEL,
+ dev_to_node(dev));
+
+ if (!saved_group) {
+ /* Free saved groups */
+ idxd_free_saved(saved_groups, NULL, NULL, idxd);
+
+ return -ENOMEM;
+ }
+
+ memcpy(saved_group, idxd->groups[i], sizeof(*saved_group));
+ saved_groups[i] = no_free_ptr(saved_group);
+ }
+
+ struct idxd_engine **saved_engines =
+ kcalloc_node(idxd->max_engines,
+ sizeof(struct idxd_engine *),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!saved_engines) {
+ /* Free saved groups */
+ idxd_free_saved(saved_groups, NULL, NULL, idxd);
+
+ return -ENOMEM;
+ }
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *saved_engine __free(kfree) =
+ kzalloc_node(sizeof(*saved_engine), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!saved_engine) {
+ /* Free saved groups and engines */
+ idxd_free_saved(saved_groups, saved_engines, NULL,
+ idxd);
+
+ return -ENOMEM;
+ }
+
+ memcpy(saved_engine, idxd->engines[i], sizeof(*saved_engine));
+ saved_engines[i] = no_free_ptr(saved_engine);
+ }
+
+ unsigned long *saved_wq_enable_map __free(bitmap) =
+ bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL,
+ dev_to_node(dev));
+ if (!saved_wq_enable_map) {
+ /* Free saved groups and engines */
+ idxd_free_saved(saved_groups, saved_engines, NULL, idxd);
+
+ return -ENOMEM;
+ }
+
+ bitmap_copy(saved_wq_enable_map, idxd->wq_enable_map, idxd->max_wqs);
+
+ struct idxd_wq **saved_wqs __free(kfree) =
+ kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!saved_wqs) {
+ /* Free saved groups and engines */
+ idxd_free_saved(saved_groups, saved_engines, NULL, idxd);
+
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *saved_wq __free(kfree) =
+ kzalloc_node(sizeof(*saved_wq), GFP_KERNEL,
+ dev_to_node(dev));
+ struct idxd_wq *wq;
+
+ if (!saved_wq) {
+ /* Free saved groups, engines, and wqs */
+ idxd_free_saved(saved_groups, saved_engines, saved_wqs,
+ idxd);
+
+ return -ENOMEM;
+ }
+
+ if (!test_bit(i, saved_wq_enable_map))
+ continue;
+
+ wq = idxd->wqs[i];
+ mutex_lock(&wq->wq_lock);
+ memcpy(saved_wq, wq, sizeof(*saved_wq));
+ saved_wqs[i] = no_free_ptr(saved_wq);
+ mutex_unlock(&wq->wq_lock);
+ }
+
+ /* Save configurations */
+ idxd_saved->saved_groups = no_free_ptr(saved_groups);
+ idxd_saved->saved_engines = no_free_ptr(saved_engines);
+ idxd_saved->saved_wq_enable_map = no_free_ptr(saved_wq_enable_map);
+ idxd_saved->saved_wqs = no_free_ptr(saved_wqs);
+
+ return 0;
+}
+
+/*
+ * Restore IDXD device configurations including engines, groups, wqs etc
+ * that were saved before.
+ */
+static void idxd_device_config_restore(struct idxd_device *idxd,
+ struct idxd_saved_states *idxd_saved)
+{
+ struct idxd_evl *saved_evl = &idxd_saved->saved_evl;
+ int i;
+
+ idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit;
+
+ if (saved_evl)
+ idxd->evl->size = saved_evl->size;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ struct idxd_group *saved_group, *group;
+
+ saved_group = idxd_saved->saved_groups[i];
+ group = idxd->groups[i];
+
+ group->rdbufs_allowed = saved_group->rdbufs_allowed;
+ group->rdbufs_reserved = saved_group->rdbufs_reserved;
+ group->tc_a = saved_group->tc_a;
+ group->tc_b = saved_group->tc_b;
+ group->use_rdbuf_limit = saved_group->use_rdbuf_limit;
+
+ kfree(saved_group);
+ }
+ kfree(idxd_saved->saved_groups);
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ struct idxd_engine *saved_engine, *engine;
+
+ saved_engine = idxd_saved->saved_engines[i];
+ engine = idxd->engines[i];
+
+ engine->group = saved_engine->group;
+
+ kfree(saved_engine);
+ }
+ kfree(idxd_saved->saved_engines);
+
+ bitmap_copy(idxd->wq_enable_map, idxd_saved->saved_wq_enable_map,
+ idxd->max_wqs);
+ bitmap_free(idxd_saved->saved_wq_enable_map);
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *saved_wq, *wq;
+ size_t len;
+
+ if (!test_bit(i, idxd->wq_enable_map))
+ continue;
+
+ saved_wq = idxd_saved->saved_wqs[i];
+ wq = idxd->wqs[i];
+
+ mutex_lock(&wq->wq_lock);
+
+ wq->group = saved_wq->group;
+ wq->flags = saved_wq->flags;
+ wq->threshold = saved_wq->threshold;
+ wq->size = saved_wq->size;
+ wq->priority = saved_wq->priority;
+ wq->type = saved_wq->type;
+ len = strlen(saved_wq->name) + 1;
+ strscpy(wq->name, saved_wq->name, len);
+ wq->max_xfer_bytes = saved_wq->max_xfer_bytes;
+ wq->max_batch_size = saved_wq->max_batch_size;
+ wq->enqcmds_retries = saved_wq->enqcmds_retries;
+ wq->descs = saved_wq->descs;
+ wq->idxd_chan = saved_wq->idxd_chan;
+ len = strlen(saved_wq->driver_name) + 1;
+ strscpy(wq->driver_name, saved_wq->driver_name, len);
+
+ mutex_unlock(&wq->wq_lock);
+
+ kfree(saved_wq);
+ }
+
+ kfree(idxd_saved->saved_wqs);
+}
+
+static void idxd_reset_prepare(struct pci_dev *pdev)
+{
+ struct idxd_device *idxd = pci_get_drvdata(pdev);
+ struct device *dev = &idxd->pdev->dev;
+ const char *idxd_name;
int rc;
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
+ dev = &idxd->pdev->dev;
+ idxd_name = dev_name(idxd_confdev(idxd));
- dev_dbg(dev, "Alloc IDXD context\n");
- idxd = idxd_alloc(pdev, data);
- if (!idxd) {
- rc = -ENOMEM;
- goto err_idxd_alloc;
+ struct idxd_saved_states *idxd_saved __free(kfree) =
+ kzalloc_node(sizeof(*idxd_saved), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!idxd_saved) {
+ dev_err(dev, "HALT: no memory\n");
+
+ return;
}
- dev_dbg(dev, "Mapping BARs\n");
- idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
- if (!idxd->reg_base) {
- rc = -ENOMEM;
- goto err_iomap;
+ /* Save IDXD configurations. */
+ rc = idxd_device_config_save(idxd, idxd_saved);
+ if (rc < 0) {
+ dev_err(dev, "HALT: cannot save %s configs\n", idxd_name);
+
+ return;
+ }
+
+ idxd->idxd_saved = no_free_ptr(idxd_saved);
+
+ /* Save PCI device state. */
+ pci_save_state(idxd->pdev);
+}
+
+static void idxd_reset_done(struct pci_dev *pdev)
+{
+ struct idxd_device *idxd = pci_get_drvdata(pdev);
+ const char *idxd_name;
+ struct device *dev;
+ int rc, i;
+
+ if (!idxd->idxd_saved)
+ return;
+
+ dev = &idxd->pdev->dev;
+ idxd_name = dev_name(idxd_confdev(idxd));
+
+ /* Restore PCI device state. */
+ pci_restore_state(idxd->pdev);
+
+ /* Unbind idxd device from driver. */
+ idxd_unbind(&idxd_drv.drv, idxd_name);
+
+ /*
+ * Probe PCI device without allocating or changing
+ * idxd software data which keeps the same as before FLR.
+ */
+ idxd_pci_probe_alloc(idxd, NULL, NULL);
+
+ /* Restore IDXD configurations. */
+ idxd_device_config_restore(idxd, idxd->idxd_saved);
+
+ /* Re-configure IDXD device if allowed. */
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
+ rc = idxd_device_config(idxd);
+ if (rc < 0) {
+ dev_err(dev, "HALT: %s config fails\n", idxd_name);
+ goto out;
+ }
+ }
+
+ /* Bind IDXD device to driver. */
+ rc = idxd_bind(&idxd_drv.drv, idxd_name);
+ if (rc < 0) {
+ dev_err(dev, "HALT: binding %s to driver fails\n", idxd_name);
+ goto out;
}
- dev_dbg(dev, "Set DMA masks\n");
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ /* Bind enabled wq in the IDXD device to driver. */
+ for (i = 0; i < idxd->max_wqs; i++) {
+ if (test_bit(i, idxd->wq_enable_map)) {
+ struct idxd_wq *wq = idxd->wqs[i];
+ char wq_name[32];
+
+ wq->state = IDXD_WQ_DISABLED;
+ sprintf(wq_name, "wq%d.%d", idxd->id, wq->id);
+ /*
+ * Bind to user driver depending on wq type.
+ *
+ * Currently only support user type WQ. Will support
+ * kernel type WQ in the future.
+ */
+ if (wq->type == IDXD_WQT_USER)
+ rc = idxd_bind(&idxd_user_drv.drv, wq_name);
+ else
+ rc = -EINVAL;
+ if (rc < 0) {
+ clear_bit(i, idxd->wq_enable_map);
+ dev_err(dev,
+ "HALT: unable to re-enable wq %s\n",
+ dev_name(wq_confdev(wq)));
+ }
+ }
+ }
+out:
+ kfree(idxd->idxd_saved);
+}
+
+static const struct pci_error_handlers idxd_error_handler = {
+ .reset_prepare = idxd_reset_prepare,
+ .reset_done = idxd_reset_done,
+};
+
+/*
+ * Probe idxd PCI device.
+ * If idxd is not given, need to allocate idxd and set up its data.
+ *
+ * If idxd is given, idxd was allocated and setup already. Just need to
+ * configure device without re-allocating and re-configuring idxd data.
+ * This is useful for recovering from FLR.
+ */
+int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ bool alloc_idxd = idxd ? false : true;
+ struct idxd_driver_data *data;
+ struct device *dev;
+ int rc;
+
+ pdev = idxd ? idxd->pdev : pdev;
+ dev = &pdev->dev;
+ data = id ? (struct idxd_driver_data *)id->driver_data : NULL;
+ rc = pci_enable_device(pdev);
if (rc)
- goto err;
+ return rc;
+
+ if (alloc_idxd) {
+ dev_dbg(dev, "Alloc IDXD context\n");
+ idxd = idxd_alloc(pdev, data);
+ if (!idxd) {
+ rc = -ENOMEM;
+ goto err_idxd_alloc;
+ }
+
+ dev_dbg(dev, "Mapping BARs\n");
+ idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
+ if (!idxd->reg_base) {
+ rc = -ENOMEM;
+ goto err_iomap;
+ }
+
+ dev_dbg(dev, "Set DMA masks\n");
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ goto err;
+ }
dev_dbg(dev, "Set PCI master\n");
pci_set_master(pdev);
pci_set_drvdata(pdev, idxd);
- idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
- rc = idxd_probe(idxd);
- if (rc) {
- dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
- goto err;
- }
+ if (alloc_idxd) {
+ idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
+ rc = idxd_probe(idxd);
+ if (rc) {
+ dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
+ goto err;
+ }
+
+ if (data->load_device_defaults) {
+ rc = data->load_device_defaults(idxd);
+ if (rc)
+ dev_warn(dev, "IDXD loading device defaults failed\n");
+ }
+
+ rc = idxd_register_devices(idxd);
+ if (rc) {
+ dev_err(dev, "IDXD sysfs setup failed\n");
+ goto err_dev_register;
+ }
- if (data->load_device_defaults) {
- rc = data->load_device_defaults(idxd);
+ rc = idxd_device_init_debugfs(idxd);
if (rc)
- dev_warn(dev, "IDXD loading device defaults failed\n");
+ dev_warn(dev, "IDXD debugfs failed to setup\n");
}
- rc = idxd_register_devices(idxd);
- if (rc) {
- dev_err(dev, "IDXD sysfs setup failed\n");
- goto err_dev_register;
- }
+ if (!alloc_idxd) {
+ /* Release interrupts in the IDXD device. */
+ idxd_cleanup_interrupts(idxd);
- rc = idxd_device_init_debugfs(idxd);
- if (rc)
- dev_warn(dev, "IDXD debugfs failed to setup\n");
+ /* Re-enable interrupts in the IDXD device. */
+ rc = idxd_setup_interrupts(idxd);
+ if (rc)
+ dev_warn(dev, "IDXD interrupts failed to setup\n");
+ }
dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
idxd->hw.version);
- idxd->user_submission_safe = data->user_submission_safe;
+ if (data)
+ idxd->user_submission_safe = data->user_submission_safe;
return 0;
@@ -798,6 +1197,11 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return rc;
}
+static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ return idxd_pci_probe_alloc(NULL, pdev, id);
+}
+
void idxd_wqs_quiesce(struct idxd_device *idxd)
{
struct idxd_wq *wq;
@@ -864,6 +1268,7 @@ static struct pci_driver idxd_pci_driver = {
.probe = idxd_pci_probe,
.remove = idxd_remove,
.shutdown = idxd_shutdown,
+ .err_handler = &idxd_error_handler,
};
static int __init idxd_init_module(void)
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index fc049c9c9892..1107db3ce0a3 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -383,15 +383,65 @@ static void process_evl_entries(struct idxd_device *idxd)
mutex_unlock(&evl->lock);
}
+static void idxd_device_flr(struct work_struct *work)
+{
+ struct idxd_device *idxd = container_of(work, struct idxd_device, work);
+ int rc;
+
+ /*
+ * IDXD device requires a Function Level Reset (FLR).
+ * pci_reset_function() will reset the device with FLR.
+ */
+ rc = pci_reset_function(idxd->pdev);
+ if (rc)
+ dev_err(&idxd->pdev->dev, "FLR failed\n");
+}
+
+static irqreturn_t idxd_halt(struct idxd_device *idxd)
+{
+ union gensts_reg gensts;
+
+ gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+ if (gensts.state == IDXD_DEVICE_STATE_HALT) {
+ idxd->state = IDXD_DEV_HALTED;
+ if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
+ /*
+ * If we need a software reset, we will throw the work
+ * on a system workqueue in order to allow interrupts
+ * for the device command completions.
+ */
+ INIT_WORK(&idxd->work, idxd_device_reinit);
+ queue_work(idxd->wq, &idxd->work);
+ } else if (gensts.reset_type == IDXD_DEVICE_RESET_FLR) {
+ idxd->state = IDXD_DEV_HALTED;
+ idxd_mask_error_interrupts(idxd);
+ dev_dbg(&idxd->pdev->dev,
+ "idxd halted, doing FLR. After FLR, configs are restored\n");
+ INIT_WORK(&idxd->work, idxd_device_flr);
+ queue_work(idxd->wq, &idxd->work);
+
+ } else {
+ idxd->state = IDXD_DEV_HALTED;
+ idxd_wqs_quiesce(idxd);
+ idxd_wqs_unmap_portal(idxd);
+ idxd_device_clear_state(idxd);
+ dev_err(&idxd->pdev->dev,
+ "idxd halted, need system reset");
+
+ return -ENXIO;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
irqreturn_t idxd_misc_thread(int vec, void *data)
{
struct idxd_irq_entry *irq_entry = data;
struct idxd_device *idxd = ie_to_idxd(irq_entry);
struct device *dev = &idxd->pdev->dev;
- union gensts_reg gensts;
u32 val = 0;
int i;
- bool err = false;
u32 cause;
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
@@ -401,7 +451,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
if (cause & IDXD_INTC_HALT_STATE)
- goto halt;
+ return idxd_halt(idxd);
if (cause & IDXD_INTC_ERR) {
spin_lock(&idxd->dev_lock);
@@ -435,7 +485,6 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
for (i = 0; i < 4; i++)
dev_warn_ratelimited(dev, "err[%d]: %#16.16llx\n",
i, idxd->sw_err.bits[i]);
- err = true;
}
if (cause & IDXD_INTC_INT_HANDLE_REVOKED) {
@@ -480,34 +529,6 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
val);
- if (!err)
- goto out;
-
-halt:
- gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
- if (gensts.state == IDXD_DEVICE_STATE_HALT) {
- idxd->state = IDXD_DEV_HALTED;
- if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
- /*
- * If we need a software reset, we will throw the work
- * on a system workqueue in order to allow interrupts
- * for the device command completions.
- */
- INIT_WORK(&idxd->work, idxd_device_reinit);
- queue_work(idxd->wq, &idxd->work);
- } else {
- idxd->state = IDXD_DEV_HALTED;
- idxd_wqs_quiesce(idxd);
- idxd_wqs_unmap_portal(idxd);
- idxd_device_clear_state(idxd);
- dev_err(&idxd->pdev->dev,
- "idxd halted, need %s.\n",
- gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
- "FLR" : "system reset");
- }
- }
-
-out:
return IRQ_HANDLED;
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index e16dbf9ab324..006ba206ab1b 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -6,6 +6,11 @@
#include <uapi/linux/idxd.h>
/* PCI Config */
+#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb
+#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212
+#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216
+#define PCI_DEVICE_ID_INTEL_IAA_PTL 0xb02d
+
#define DEVICE_VERSION_1 0x100
#define DEVICE_VERSION_2 0x200
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 94eca25ae9b9..6db1c5fcedc5 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -61,7 +61,7 @@ struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
return __get_desc(wq, idx, cpu);
}
-EXPORT_SYMBOL_NS_GPL(idxd_alloc_desc, IDXD);
+EXPORT_SYMBOL_NS_GPL(idxd_alloc_desc, "IDXD");
void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
{
@@ -70,7 +70,7 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
desc->cpu = -1;
sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
}
-EXPORT_SYMBOL_NS_GPL(idxd_free_desc, IDXD);
+EXPORT_SYMBOL_NS_GPL(idxd_free_desc, "IDXD");
static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
struct idxd_desc *desc)
@@ -219,4 +219,4 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
percpu_ref_put(&wq->wq_active);
return 0;
}
-EXPORT_SYMBOL_NS_GPL(idxd_submit_desc, IDXD);
+EXPORT_SYMBOL_NS_GPL(idxd_submit_desc, "IDXD");
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index f706eae0e76b..6af493f6ba77 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1979,13 +1979,3 @@ void idxd_unregister_devices(struct idxd_device *idxd)
device_unregister(group_confdev(group));
}
}
-
-int idxd_register_bus_type(void)
-{
- return bus_register(&dsa_bus_type);
-}
-
-void idxd_unregister_bus_type(void)
-{
- bus_unregister(&dsa_bus_type);
-}
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 0532dd2640dc..4127c1bdcca7 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -1076,7 +1076,7 @@ static struct platform_driver mdc_dma_driver = {
.of_match_table = of_match_ptr(mdc_dma_of_match),
},
.probe = mdc_dma_probe,
- .remove_new = mdc_dma_remove,
+ .remove = mdc_dma_remove,
};
module_platform_driver(mdc_dma_driver);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index e913f0db99da..a651e0995ce8 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -1233,7 +1233,7 @@ static struct platform_driver imxdma_driver = {
.name = "imx-dma",
.of_match_table = imx_dma_of_dev_id,
},
- .remove_new = imxdma_remove,
+ .remove = imxdma_remove,
};
static int __init imxdma_module_init(void)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 72299a08af44..3449006cd14b 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -2440,7 +2440,7 @@ static struct platform_driver sdma_driver = {
.name = "imx-sdma",
.of_match_table = sdma_dt_ids,
},
- .remove_new = sdma_remove,
+ .remove = sdma_remove,
.probe = sdma_probe,
};
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 17f6b6367113..c9aba2304de7 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -10,6 +10,8 @@
#include <linux/interrupt.h>
#include <linux/dca.h>
+#include <asm/cpuid.h>
+
/* either a kernel change is needed, or we need something like this in kernel */
#ifndef CONFIG_SMP
#include <asm/smp.h>
@@ -58,11 +60,11 @@ static int dca_enabled_in_bios(struct pci_dev *pdev)
{
/* CPUID level 9 returns DCA configuration */
/* Bit 0 indicates DCA enabled by the BIOS */
- unsigned long cpuid_level_9;
+ u32 eax;
int res;
- cpuid_level_9 = cpuid_eax(9);
- res = test_bit(0, &cpuid_level_9);
+ eax = cpuid_eax(CPUID_LEAF_DCA);
+ res = eax & BIT(0);
if (!res)
dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 5de8c21d41e7..acc2983e28e0 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -1028,7 +1028,7 @@ static struct platform_driver k3_pdma_driver = {
.of_match_table = k3_pdma_dt_ids,
},
.probe = k3_dma_probe,
- .remove_new = k3_dma_remove,
+ .remove = k3_dma_remove,
};
module_platform_driver(k3_pdma_driver);
diff --git a/drivers/dma/ls2x-apb-dma.c b/drivers/dma/loongson2-apb-dma.c
index 9652e8666722..c528f02b9f84 100644
--- a/drivers/dma/ls2x-apb-dma.c
+++ b/drivers/dma/loongson2-apb-dma.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Driver for the Loongson LS2X APB DMA Controller
+ * Driver for the Loongson-2 APB DMA Controller
*
* Copyright (C) 2017-2023 Loongson Corporation
*/
@@ -31,7 +31,7 @@
#define LDMA_ASK_VALID BIT(2)
#define LDMA_START BIT(3) /* DMA start operation */
#define LDMA_STOP BIT(4) /* DMA stop operation */
-#define LDMA_CONFIG_MASK GENMASK(4, 0) /* DMA controller config bits mask */
+#define LDMA_CONFIG_MASK GENMASK_ULL(4, 0) /* DMA controller config bits mask */
/* Bitfields in ndesc_addr field of HW descriptor */
#define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */
@@ -692,7 +692,7 @@ MODULE_DEVICE_TABLE(of, ls2x_dma_of_match_table);
static struct platform_driver ls2x_dmac_driver = {
.probe = ls2x_dma_probe,
- .remove_new = ls2x_dma_remove,
+ .remove = ls2x_dma_remove,
.driver = {
.name = "ls2x-apbdma",
.of_match_table = ls2x_dma_of_match_table,
@@ -700,6 +700,6 @@ static struct platform_driver ls2x_dmac_driver = {
};
module_platform_driver(ls2x_dmac_driver);
-MODULE_DESCRIPTION("Loongson LS2X APB DMA Controller driver");
+MODULE_DESCRIPTION("Loongson-2 APB DMA Controller driver");
MODULE_AUTHOR("Loongson Technology Corporation Limited");
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/mcf-edma-main.c b/drivers/dma/mcf-edma-main.c
index 0c5862bf26f8..9e1c6400c77b 100644
--- a/drivers/dma/mcf-edma-main.c
+++ b/drivers/dma/mcf-edma-main.c
@@ -267,7 +267,7 @@ static struct platform_driver mcf_edma_driver = {
.name = "mcf-edma",
},
.probe = mcf_edma_probe,
- .remove_new = mcf_edma_remove,
+ .remove = mcf_edma_remove,
};
bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index b69eabf12a24..d5ddb4e30e71 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -922,7 +922,7 @@ static void mtk_cqdma_remove(struct platform_device *pdev)
static struct platform_driver mtk_cqdma_driver = {
.probe = mtk_cqdma_probe,
- .remove_new = mtk_cqdma_remove,
+ .remove = mtk_cqdma_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = mtk_cqdma_match,
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 58c7961ab9ad..fa77bb24a430 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -1038,7 +1038,7 @@ static void mtk_hsdma_remove(struct platform_device *pdev)
static struct platform_driver mtk_hsdma_driver = {
.probe = mtk_hsdma_probe,
- .remove_new = mtk_hsdma_remove,
+ .remove = mtk_hsdma_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = mtk_hsdma_match,
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index 1bdc1500be40..08e15177427b 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -637,7 +637,7 @@ static const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
static struct platform_driver mtk_uart_apdma_driver = {
.probe = mtk_uart_apdma_probe,
- .remove_new = mtk_uart_apdma_remove,
+ .remove = mtk_uart_apdma_remove,
.driver = {
.name = KBUILD_MODNAME,
.pm = &mtk_uart_apdma_pm_ops,
diff --git a/drivers/dma/milbeaut-hdmac.c b/drivers/dma/milbeaut-hdmac.c
index 7b41c670970a..9a5ec247ed6d 100644
--- a/drivers/dma/milbeaut-hdmac.c
+++ b/drivers/dma/milbeaut-hdmac.c
@@ -571,7 +571,7 @@ MODULE_DEVICE_TABLE(of, milbeaut_hdmac_match);
static struct platform_driver milbeaut_hdmac_driver = {
.probe = milbeaut_hdmac_probe,
- .remove_new = milbeaut_hdmac_remove,
+ .remove = milbeaut_hdmac_remove,
.driver = {
.name = "milbeaut-m10v-hdmac",
.of_match_table = milbeaut_hdmac_match,
diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c
index 2cce529b448e..58d4fd6df0bf 100644
--- a/drivers/dma/milbeaut-xdmac.c
+++ b/drivers/dma/milbeaut-xdmac.c
@@ -409,7 +409,7 @@ MODULE_DEVICE_TABLE(of, milbeaut_xdmac_match);
static struct platform_driver milbeaut_xdmac_driver = {
.probe = milbeaut_xdmac_probe,
- .remove_new = milbeaut_xdmac_remove,
+ .remove = milbeaut_xdmac_remove,
.driver = {
.name = "milbeaut-m10v-xdmac",
.of_match_table = milbeaut_xdmac_match,
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 136fcaeff8dd..a95d31103d30 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -1137,7 +1137,7 @@ static struct platform_driver mmp_pdma_driver = {
},
.id_table = mmp_pdma_id_table,
.probe = mmp_pdma_probe,
- .remove_new = mmp_pdma_remove,
+ .remove = mmp_pdma_remove,
};
module_platform_driver(mmp_pdma_driver);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index b76fe99e1151..c8dc504510f1 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -736,7 +736,7 @@ static struct platform_driver mmp_tdma_driver = {
.of_match_table = mmp_tdma_dt_ids,
},
.probe = mmp_tdma_probe,
- .remove_new = mmp_tdma_remove,
+ .remove = mmp_tdma_remove,
};
module_platform_driver(mmp_tdma_driver);
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 66dc6d31b603..de09e1ab7767 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -644,7 +644,7 @@ MODULE_DEVICE_TABLE(of, moxart_dma_match);
static struct platform_driver moxart_driver = {
.probe = moxart_probe,
- .remove_new = moxart_remove,
+ .remove = moxart_remove,
.driver = {
.name = "moxart-dma-engine",
.of_match_table = moxart_dma_match,
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 68c247a46321..bf131cb5db66 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -1110,7 +1110,7 @@ MODULE_DEVICE_TABLE(of, mpc_dma_match);
static struct platform_driver mpc_dma_driver = {
.probe = mpc_dma_probe,
- .remove_new = mpc_dma_remove,
+ .remove = mpc_dma_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = mpc_dma_match,
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 43efce77bb57..fa6e4646fdc2 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1369,10 +1369,9 @@ static int mv_xor_probe(struct platform_device *pdev)
return 0;
if (pdev->dev.of_node) {
- struct device_node *np;
int i = 0;
- for_each_child_of_node(pdev->dev.of_node, np) {
+ for_each_child_of_node_scoped(pdev->dev.of_node, np) {
struct mv_xor_chan *chan;
dma_cap_mask_t cap_mask;
int irq;
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index c8c67f4d982c..cad4d4fb51ac 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -635,7 +635,7 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
writel(MV_XOR_V2_DESC_NUM,
xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF);
- /* write the DESQ address to the DMA enngine*/
+ /* write the DESQ address to the DMA engine*/
writel(lower_32_bits(xor_dev->hw_desq),
xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF);
writel(upper_32_bits(xor_dev->hw_desq),
@@ -884,7 +884,7 @@ static struct platform_driver mv_xor_v2_driver = {
.probe = mv_xor_v2_probe,
.suspend = mv_xor_v2_suspend,
.resume = mv_xor_v2_resume,
- .remove_new = mv_xor_v2_remove,
+ .remove = mv_xor_v2_remove,
.driver = {
.name = "mv_xor_v2",
.of_match_table = of_match_ptr(mv_xor_v2_dt_ids),
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 3b011a91d48e..0d6324c4e2be 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1515,7 +1515,7 @@ static struct platform_driver nbpf_driver = {
},
.id_table = nbpf_ids,
.probe = nbpf_probe,
- .remove_new = nbpf_remove,
+ .remove = nbpf_remove,
};
module_platform_driver(nbpf_driver);
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index aa436f9e3571..57cec757d8f5 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -1252,7 +1252,7 @@ static void owl_dma_remove(struct platform_device *pdev)
static struct platform_driver owl_dma_driver = {
.probe = owl_dma_probe,
- .remove_new = owl_dma_remove,
+ .remove = owl_dma_remove,
.driver = {
.name = "dma-owl",
.of_match_table = of_match_ptr(owl_dma_match),
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 7b78759ac734..9d2a5a967a99 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4549,7 +4549,7 @@ MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
static struct platform_driver ppc440spe_adma_driver = {
.probe = ppc440spe_adma_probe,
- .remove_new = ppc440spe_adma_remove,
+ .remove = ppc440spe_adma_remove,
.driver = {
.name = "PPC440SP(E)-ADMA",
.of_match_table = ppc440spe_adma_of_match,
diff --git a/drivers/dma/ptdma/Kconfig b/drivers/dma/ptdma/Kconfig
deleted file mode 100644
index b430edd709f9..000000000000
--- a/drivers/dma/ptdma/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config AMD_PTDMA
- tristate "AMD PassThru DMA Engine"
- depends on X86_64 && PCI
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- help
- Enable support for the AMD PTDMA controller. This controller
- provides DMA capabilities to perform high bandwidth memory to
- memory and IO copy operations. It performs DMA transfer through
- queue-based descriptor management. This DMA controller is intended
- to be used with AMD Non-Transparent Bridge devices and not for
- general purpose peripheral DMA.
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 31f8da810c05..e50cf3357e5e 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -1442,7 +1442,7 @@ static struct platform_driver pxad_driver = {
},
.id_table = pxad_id_table,
.probe = pxad_probe,
- .remove_new = pxad_remove,
+ .remove = pxad_remove,
};
static bool pxad_filter_fn(struct dma_chan *chan, void *param)
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index d43a881e43b9..c14557efd577 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -59,6 +59,9 @@ struct bam_desc_hw {
#define DESC_FLAG_NWD BIT(12)
#define DESC_FLAG_CMD BIT(11)
+#define BAM_NDP_REVISION_START 0x20
+#define BAM_NDP_REVISION_END 0x27
+
struct bam_async_desc {
struct virt_dma_desc vd;
@@ -398,6 +401,7 @@ struct bam_device {
/* dma start transaction tasklet */
struct tasklet_struct task;
+ u32 bam_revision;
};
/**
@@ -441,8 +445,10 @@ static void bam_reset(struct bam_device *bdev)
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
/* set descriptor threshold, start with 4 bytes */
- writel_relaxed(DEFAULT_CNT_THRSHLD,
- bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
+ if (in_range(bdev->bam_revision, BAM_NDP_REVISION_START,
+ BAM_NDP_REVISION_END))
+ writel_relaxed(DEFAULT_CNT_THRSHLD,
+ bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
/* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
@@ -1000,9 +1006,10 @@ static void bam_apply_new_config(struct bam_chan *bchan,
maxburst = bchan->slave.src_maxburst;
else
maxburst = bchan->slave.dst_maxburst;
-
- writel_relaxed(maxburst,
- bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
+ if (in_range(bdev->bam_revision, BAM_NDP_REVISION_START,
+ BAM_NDP_REVISION_END))
+ writel_relaxed(maxburst,
+ bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
}
bchan->reconfigure = 0;
@@ -1192,10 +1199,11 @@ static int bam_init(struct bam_device *bdev)
u32 val;
/* read revision and configuration information */
- if (!bdev->num_ees) {
- val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
+ val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
+ if (!bdev->num_ees)
bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
- }
+
+ bdev->bam_revision = val & REVISION_MASK;
/* check that configured EE is within range */
if (bdev->ee >= bdev->num_ees)
@@ -1469,7 +1477,7 @@ static const struct dev_pm_ops bam_dma_pm_ops = {
static struct platform_driver bam_dma_driver = {
.probe = bam_dma_probe,
- .remove_new = bam_dma_remove,
+ .remove = bam_dma_remove,
.driver = {
.name = "bam-dma-engine",
.pm = &bam_dma_pm_ops,
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 52a7c8f2498f..b1f0001cc99c 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -18,6 +18,7 @@
#include "../virt-dma.h"
#define TRE_TYPE_DMA 0x10
+#define TRE_TYPE_IMMEDIATE_DMA 0x11
#define TRE_TYPE_GO 0x20
#define TRE_TYPE_CONFIG0 0x22
@@ -64,6 +65,7 @@
/* DMA TRE */
#define TRE_DMA_LEN GENMASK(23, 0)
+#define TRE_DMA_IMMEDIATE_LEN GENMASK(3, 0)
/* Register offsets from gpi-top */
#define GPII_n_CH_k_CNTXT_0_OFFS(n, k) (0x20000 + (0x4000 * (n)) + (0x80 * (k)))
@@ -1711,6 +1713,7 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
dma_addr_t address;
struct gpi_tre *tre;
unsigned int i;
+ int len;
/* first create config tre if applicable */
if (direction == DMA_MEM_TO_DEV && spi->set_config) {
@@ -1763,14 +1766,30 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
tre_idx++;
address = sg_dma_address(sgl);
- tre->dword[0] = lower_32_bits(address);
- tre->dword[1] = upper_32_bits(address);
+ len = sg_dma_len(sgl);
- tre->dword[2] = u32_encode_bits(sg_dma_len(sgl), TRE_DMA_LEN);
+ /* Support Immediate dma for write transfers for data length up to 8 bytes */
+ if (direction == DMA_MEM_TO_DEV && len <= 2 * sizeof(tre->dword[0])) {
+ /*
+ * For Immediate dma, data length may not always be length of 8 bytes,
+ * it can be length less than 8, hence initialize both dword's with 0
+ */
+ tre->dword[0] = 0;
+ tre->dword[1] = 0;
+ memcpy(&tre->dword[0], sg_virt(sgl), len);
- tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
- if (direction == DMA_MEM_TO_DEV)
- tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
+ tre->dword[2] = u32_encode_bits(len, TRE_DMA_IMMEDIATE_LEN);
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_IMMEDIATE_DMA, TRE_FLAGS_TYPE);
+ } else {
+ tre->dword[0] = lower_32_bits(address);
+ tre->dword[1] = upper_32_bits(address);
+
+ tre->dword[2] = u32_encode_bits(len, TRE_DMA_LEN);
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
+ }
+
+ tre->dword[3] |= u32_encode_bits(direction == DMA_MEM_TO_DEV,
+ TRE_FLAGS_IEOT);
for (i = 0; i < tre_idx; i++)
dev_dbg(dev, "TRE:%d %x:%x:%x:%x\n", i, desc->tre[i].dword[0],
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 4d2cd8d9ec74..c2b3e4452e71 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -948,7 +948,7 @@ MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
static struct platform_driver hidma_driver = {
.probe = hidma_probe,
- .remove_new = hidma_remove,
+ .remove = hidma_remove,
.shutdown = hidma_shutdown,
.driver = {
.name = "hidma",
diff --git a/drivers/dma/qcom/qcom_adm.c b/drivers/dma/qcom/qcom_adm.c
index c1db398adc84..6be54fddcee1 100644
--- a/drivers/dma/qcom/qcom_adm.c
+++ b/drivers/dma/qcom/qcom_adm.c
@@ -937,7 +937,7 @@ MODULE_DEVICE_TABLE(of, adm_of_match);
static struct platform_driver adm_dma_driver = {
.probe = adm_dma_probe,
- .remove_new = adm_dma_remove,
+ .remove = adm_dma_remove,
.driver = {
.name = "adm-dma-engine",
.of_match_table = adm_of_match,
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 01e656c69e6c..dc1a9a05252e 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -1079,7 +1079,7 @@ static struct platform_driver sa11x0_dma_driver = {
.pm = &sa11x0_dma_pm_ops,
},
.probe = sa11x0_dma_probe,
- .remove_new = sa11x0_dma_remove,
+ .remove = sa11x0_dma_remove,
};
static int __init sa11x0_dma_init(void)
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 428473611115..7ad3c29be146 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -354,7 +354,7 @@ static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
if (!residue) {
tasklet_hi_schedule(&chan->done_tasklet);
} else {
- /* submit next trascatioin if possible */
+ /* submit next transaction if possible */
struct sf_pdma_desc *desc = chan->desc;
desc->src_addr += desc->xfer_size - residue;
@@ -633,7 +633,7 @@ MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
static struct platform_driver sf_pdma_driver = {
.probe = sf_pdma_probe,
- .remove_new = sf_pdma_remove,
+ .remove = sf_pdma_remove,
.driver = {
.name = "sf-pdma",
.of_match_table = sf_pdma_dt_ids,
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index c0b2997ab7fd..6ea5a880b433 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -49,10 +49,10 @@ config RENESAS_USB_DMAC
SoCs.
config RZ_DMAC
- tristate "Renesas RZ/{G2L,V2L} DMA Controller"
- depends on ARCH_RZG2L || COMPILE_TEST
+ tristate "Renesas RZ DMA Controller"
+ depends on ARCH_R7S72100 || ARCH_RZG2L || COMPILE_TEST
select RENESAS_DMA
select DMA_VIRTUAL_CHANNELS
help
- This driver supports the general purpose DMA controller found in the
- Renesas RZ/{G2L,V2L} SoC variants.
+ This driver supports the general purpose DMA controller typically
+ found in the Renesas RZ SoC variants.
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 1094a2f82164..0c45ce8c74aa 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -2023,6 +2023,10 @@ static const struct of_device_id rcar_dmac_of_ids[] = {
.compatible = "renesas,rcar-gen4-dmac",
.data = &rcar_gen4_dmac_data,
}, {
+ /*
+ * Backward compatibility for between v5.12 - v5.19
+ * which didn't combined with "renesas,rcar-gen4-dmac"
+ */
.compatible = "renesas,dmac-r8a779a0",
.data = &rcar_gen4_dmac_data,
},
@@ -2037,7 +2041,7 @@ static struct platform_driver rcar_dmac_driver = {
.of_match_table = rcar_dmac_of_ids,
},
.probe = rcar_dmac_probe,
- .remove_new = rcar_dmac_remove,
+ .remove = rcar_dmac_remove,
.shutdown = rcar_dmac_shutdown,
};
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index 65a27c5a7bce..9235db551026 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -601,22 +601,25 @@ static int rz_dmac_config(struct dma_chan *chan,
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
u32 val;
- channel->src_per_address = config->src_addr;
channel->dst_per_address = config->dst_addr;
-
- val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
- if (val == CHCFG_DS_INVALID)
- return -EINVAL;
-
channel->chcfg &= ~CHCFG_FILL_DDS_MASK;
- channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
+ if (channel->dst_per_address) {
+ val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
+ if (val == CHCFG_DS_INVALID)
+ return -EINVAL;
- val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
- if (val == CHCFG_DS_INVALID)
- return -EINVAL;
+ channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
+ }
+ channel->src_per_address = config->src_addr;
channel->chcfg &= ~CHCFG_FILL_SDS_MASK;
- channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
+ if (channel->src_per_address) {
+ val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
+ if (val == CHCFG_DS_INVALID)
+ return -EINVAL;
+
+ channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
+ }
return 0;
}
@@ -890,7 +893,7 @@ static int rz_dmac_probe(struct platform_device *pdev)
/* Initialize the channels. */
INIT_LIST_HEAD(&dmac->engine.channels);
- dmac->rstc = devm_reset_control_array_get_exclusive(&pdev->dev);
+ dmac->rstc = devm_reset_control_array_get_optional_exclusive(&pdev->dev);
if (IS_ERR(dmac->rstc))
return dev_err_probe(&pdev->dev, PTR_ERR(dmac->rstc),
"failed to get resets\n");
@@ -1001,7 +1004,7 @@ static struct platform_driver rz_dmac_driver = {
.of_match_table = of_rz_dmac_match,
},
.probe = rz_dmac_probe,
- .remove_new = rz_dmac_remove,
+ .remove = rz_dmac_remove,
};
module_platform_driver(rz_dmac_driver);
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 588c5f409a80..fdd41e1c2263 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -961,7 +961,7 @@ void shdma_chan_probe(struct shdma_dev *sdev,
spin_lock_init(&schan->chan_lock);
- /* Init descripter manage list */
+ /* Init descriptor manage list */
INIT_LIST_HEAD(&schan->ld_queue);
INIT_LIST_HEAD(&schan->ld_free);
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 8ead0a1fd237..093e449e19ee 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -906,7 +906,7 @@ static struct platform_driver sh_dmae_driver = {
.pm = &sh_dmae_pm,
.name = SH_DMAE_DRV_NAME,
},
- .remove_new = sh_dmae_remove,
+ .remove = sh_dmae_remove,
};
static int __init sh_dmae_init(void)
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index f7cd0cad056c..7e2b6c97fa2f 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -301,7 +301,7 @@ static struct usb_dmac_desc *usb_dmac_desc_get(struct usb_dmac_chan *chan,
struct usb_dmac_desc *desc = NULL;
unsigned long flags;
- /* Get a freed descritpor */
+ /* Get a freed descriptor */
spin_lock_irqsave(&chan->vc.lock, flags);
list_for_each_entry(desc, &chan->desc_freed, node) {
if (sg_len <= desc->sg_allocated_len) {
@@ -899,7 +899,7 @@ static struct platform_driver usb_dmac_driver = {
.of_match_table = usb_dmac_of_ids,
},
.probe = usb_dmac_probe,
- .remove_new = usb_dmac_remove,
+ .remove = usb_dmac_remove,
.shutdown = usb_dmac_shutdown,
};
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 3f54ff37c5e0..187a090463ce 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -1298,7 +1298,7 @@ static const struct dev_pm_ops sprd_dma_pm_ops = {
static struct platform_driver sprd_dma_driver = {
.probe = sprd_dma_probe,
- .remove_new = sprd_dma_remove,
+ .remove = sprd_dma_remove,
.driver = {
.name = "sprd-dma",
.of_match_table = sprd_dma_match,
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
index 8880b5e336f8..c65ee0c7bfbd 100644
--- a/drivers/dma/st_fdma.c
+++ b/drivers/dma/st_fdma.c
@@ -858,7 +858,7 @@ static struct platform_driver st_fdma_platform_driver = {
.of_match_table = st_fdma_match,
},
.probe = st_fdma_probe,
- .remove_new = st_fdma_remove,
+ .remove = st_fdma_remove,
};
module_platform_driver(st_fdma_platform_driver);
diff --git a/drivers/dma/stm32/stm32-dma3.c b/drivers/dma/stm32/stm32-dma3.c
index 0be6e944df6f..0c6c4258b195 100644
--- a/drivers/dma/stm32/stm32-dma3.c
+++ b/drivers/dma/stm32/stm32-dma3.c
@@ -221,6 +221,8 @@ enum stm32_dma3_port_data_width {
#define STM32_DMA3_DT_BREQ BIT(8) /* CTR2_BREQ */
#define STM32_DMA3_DT_PFREQ BIT(9) /* CTR2_PFREQ */
#define STM32_DMA3_DT_TCEM GENMASK(13, 12) /* CTR2_TCEM */
+#define STM32_DMA3_DT_NOPACK BIT(16) /* CTR1_PAM */
+#define STM32_DMA3_DT_NOREFACT BIT(17)
/* struct stm32_dma3_chan .config_set bitfield */
#define STM32_DMA3_CFG_SET_DT BIT(0)
@@ -228,6 +230,8 @@ enum stm32_dma3_port_data_width {
#define STM32_DMA3_CFG_SET_BOTH (STM32_DMA3_CFG_SET_DT | STM32_DMA3_CFG_SET_DMA)
#define STM32_DMA3_MAX_BLOCK_SIZE ALIGN_DOWN(CBR1_BNDT, 64)
+#define STM32_DMA3_MAX_BURST_LEN (1 + min_t(u32, FIELD_MAX(CTR1_SBL_1), \
+ FIELD_MAX(CTR1_DBL_1)))
#define port_is_ahb(maxdw) ({ typeof(maxdw) (_maxdw) = (maxdw); \
((_maxdw) != DW_INVALID) && ((_maxdw) == DW_32); })
#define port_is_axi(maxdw) ({ typeof(maxdw) (_maxdw) = (maxdw); \
@@ -293,6 +297,10 @@ struct stm32_dma3_chan {
u32 dma_status;
};
+struct stm32_dma3_pdata {
+ u32 axi_max_burst_len;
+};
+
struct stm32_dma3_ddata {
struct dma_device dma_dev;
void __iomem *base;
@@ -301,6 +309,7 @@ struct stm32_dma3_ddata {
u32 dma_channels;
u32 dma_requests;
enum stm32_dma3_port_data_width ports_max_dw[2];
+ u32 axi_max_burst_len;
};
static inline struct stm32_dma3_ddata *to_stm32_dma3_ddata(struct stm32_dma3_chan *chan)
@@ -533,7 +542,8 @@ static enum dma_slave_buswidth stm32_dma3_get_max_dw(u32 chan_max_burst,
return 1 << __ffs(len | addr | max_dw);
}
-static u32 stm32_dma3_get_max_burst(u32 len, enum dma_slave_buswidth dw, u32 chan_max_burst)
+static u32 stm32_dma3_get_max_burst(u32 len, enum dma_slave_buswidth dw,
+ u32 chan_max_burst, u32 bus_max_burst)
{
u32 max_burst = chan_max_burst ? chan_max_burst / dw : 1;
@@ -544,8 +554,9 @@ static u32 stm32_dma3_get_max_burst(u32 len, enum dma_slave_buswidth dw, u32 cha
/*
* HW doesn't modify the burst if burst size <= half of the fifo size.
* If len is not a multiple of burst size, last burst is shortened by HW.
+ * Take care of maximum burst supported on interconnect bus.
*/
- return max_burst;
+ return min_t(u32, max_burst, bus_max_burst);
}
static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transfer_direction dir,
@@ -554,6 +565,7 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf
{
struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
struct dma_device dma_device = ddata->dma_dev;
+ u32 src_max_burst = STM32_DMA3_MAX_BURST_LEN, dst_max_burst = STM32_DMA3_MAX_BURST_LEN;
u32 sdw, ddw, sbl_max, dbl_max, tcem, init_dw, init_bl_max;
u32 _ctr1 = 0, _ctr2 = 0;
u32 ch_conf = chan->dt_config.ch_conf;
@@ -594,10 +606,14 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf
_ctr1 |= CTR1_SINC;
if (sap)
_ctr1 |= CTR1_SAP;
+ if (port_is_axi(sap_max_dw)) /* AXI - apply axi maximum burst limitation */
+ src_max_burst = ddata->axi_max_burst_len;
if (FIELD_GET(STM32_DMA3_DT_DINC, tr_conf))
_ctr1 |= CTR1_DINC;
if (dap)
_ctr1 |= CTR1_DAP;
+ if (port_is_axi(dap_max_dw)) /* AXI - apply axi maximum burst limitation */
+ dst_max_burst = ddata->axi_max_burst_len;
_ctr2 |= FIELD_PREP(CTR2_REQSEL, chan->dt_config.req_line) & ~CTR2_SWREQ;
if (FIELD_GET(STM32_DMA3_DT_BREQ, tr_conf))
@@ -617,11 +633,16 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf
/* Set destination (device) data width and burst */
ddw = min_t(u32, ddw, stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw,
len, dst_addr));
- dbl_max = min_t(u32, dbl_max, stm32_dma3_get_max_burst(len, ddw, chan->max_burst));
+ dbl_max = min_t(u32, dbl_max, stm32_dma3_get_max_burst(len, ddw, chan->max_burst,
+ dst_max_burst));
/* Set source (memory) data width and burst */
sdw = stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw, len, src_addr);
- sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst);
+ sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst, src_max_burst);
+ if (!!FIELD_GET(STM32_DMA3_DT_NOPACK, tr_conf)) {
+ sdw = ddw;
+ sbl_max = dbl_max;
+ }
_ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw));
_ctr1 |= FIELD_PREP(CTR1_SBL_1, sbl_max - 1);
@@ -647,11 +668,17 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf
/* Set source (device) data width and burst */
sdw = min_t(u32, sdw, stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw,
len, src_addr));
- sbl_max = min_t(u32, sbl_max, stm32_dma3_get_max_burst(len, sdw, chan->max_burst));
+ sbl_max = min_t(u32, sbl_max, stm32_dma3_get_max_burst(len, sdw, chan->max_burst,
+ src_max_burst));
/* Set destination (memory) data width and burst */
ddw = stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw, len, dst_addr);
- dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst);
+ dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst, dst_max_burst);
+ if (!!FIELD_GET(STM32_DMA3_DT_NOPACK, tr_conf) ||
+ ((_ctr2 & CTR2_PFREQ) && ddw > sdw)) { /* Packing to wider ddw not supported */
+ ddw = sdw;
+ dbl_max = sbl_max;
+ }
_ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw));
_ctr1 |= FIELD_PREP(CTR1_SBL_1, sbl_max - 1);
@@ -678,22 +705,24 @@ static int stm32_dma3_chan_prep_hw(struct stm32_dma3_chan *chan, enum dma_transf
init_dw = sdw;
init_bl_max = sbl_max;
sdw = stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw, len, src_addr);
- sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst);
+ sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst, src_max_burst);
if (chan->config_set & STM32_DMA3_CFG_SET_DMA) {
sdw = min_t(u32, init_dw, sdw);
- sbl_max = min_t(u32, init_bl_max,
- stm32_dma3_get_max_burst(len, sdw, chan->max_burst));
+ sbl_max = min_t(u32, init_bl_max, stm32_dma3_get_max_burst(len, sdw,
+ chan->max_burst,
+ src_max_burst));
}
/* Set destination (memory) data width and burst */
init_dw = ddw;
init_bl_max = dbl_max;
ddw = stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw, len, dst_addr);
- dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst);
+ dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst, dst_max_burst);
if (chan->config_set & STM32_DMA3_CFG_SET_DMA) {
ddw = min_t(u32, init_dw, ddw);
- dbl_max = min_t(u32, init_bl_max,
- stm32_dma3_get_max_burst(len, ddw, chan->max_burst));
+ dbl_max = min_t(u32, init_bl_max, stm32_dma3_get_max_burst(len, ddw,
+ chan->max_burst,
+ dst_max_burst));
}
_ctr1 |= FIELD_PREP(CTR1_SDW_LOG2, ilog2(sdw));
@@ -1116,6 +1145,28 @@ static void stm32_dma3_free_chan_resources(struct dma_chan *c)
chan->config_set = 0;
}
+static u32 stm32_dma3_get_ll_count(struct stm32_dma3_chan *chan, size_t len, bool prevent_refactor)
+{
+ u32 count;
+
+ if (prevent_refactor)
+ return DIV_ROUND_UP(len, STM32_DMA3_MAX_BLOCK_SIZE);
+
+ count = len / STM32_DMA3_MAX_BLOCK_SIZE;
+ len -= (len / STM32_DMA3_MAX_BLOCK_SIZE) * STM32_DMA3_MAX_BLOCK_SIZE;
+
+ if (len >= chan->max_burst) {
+ count += 1; /* len < STM32_DMA3_MAX_BLOCK_SIZE here, so it fits in one item */
+ len -= (len / chan->max_burst) * chan->max_burst;
+ }
+
+ /* Unaligned remainder fits in one extra item */
+ if (len > 0)
+ count += 1;
+
+ return count;
+}
+
static void stm32_dma3_init_chan_config_for_memcpy(struct stm32_dma3_chan *chan,
dma_addr_t dst, dma_addr_t src)
{
@@ -1150,8 +1201,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_dma_memcpy(struct dma_cha
struct stm32_dma3_swdesc *swdesc;
size_t next_size, offset;
u32 count, i, ctr1, ctr2;
+ bool prevent_refactor = !!FIELD_GET(STM32_DMA3_DT_NOPACK, chan->dt_config.tr_conf) ||
+ !!FIELD_GET(STM32_DMA3_DT_NOREFACT, chan->dt_config.tr_conf);
- count = DIV_ROUND_UP(len, STM32_DMA3_MAX_BLOCK_SIZE);
+ count = stm32_dma3_get_ll_count(chan, len, prevent_refactor);
swdesc = stm32_dma3_chan_desc_alloc(chan, count);
if (!swdesc)
@@ -1167,6 +1220,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_dma_memcpy(struct dma_cha
remaining = len - offset;
next_size = min_t(size_t, remaining, STM32_DMA3_MAX_BLOCK_SIZE);
+ if (!prevent_refactor &&
+ (next_size < STM32_DMA3_MAX_BLOCK_SIZE && next_size >= chan->max_burst))
+ next_size = chan->max_burst * (remaining / chan->max_burst);
+
ret = stm32_dma3_chan_prep_hw(chan, DMA_MEM_TO_MEM, &swdesc->ccr, &ctr1, &ctr2,
src + offset, dst + offset, next_size);
if (ret)
@@ -1203,14 +1260,13 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_slave_sg(struct dma_chan
size_t len;
dma_addr_t sg_addr, dev_addr, src, dst;
u32 i, j, count, ctr1, ctr2;
+ bool prevent_refactor = !!FIELD_GET(STM32_DMA3_DT_NOPACK, chan->dt_config.tr_conf) ||
+ !!FIELD_GET(STM32_DMA3_DT_NOREFACT, chan->dt_config.tr_conf);
int ret;
- count = sg_len;
- for_each_sg(sgl, sg, sg_len, i) {
- len = sg_dma_len(sg);
- if (len > STM32_DMA3_MAX_BLOCK_SIZE)
- count += DIV_ROUND_UP(len, STM32_DMA3_MAX_BLOCK_SIZE) - 1;
- }
+ count = 0;
+ for_each_sg(sgl, sg, sg_len, i)
+ count += stm32_dma3_get_ll_count(chan, sg_dma_len(sg), prevent_refactor);
swdesc = stm32_dma3_chan_desc_alloc(chan, count);
if (!swdesc)
@@ -1227,6 +1283,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_slave_sg(struct dma_chan
do {
size_t chunk = min_t(size_t, len, STM32_DMA3_MAX_BLOCK_SIZE);
+ if (!prevent_refactor &&
+ (chunk < STM32_DMA3_MAX_BLOCK_SIZE && chunk >= chan->max_burst))
+ chunk = chan->max_burst * (len / chan->max_burst);
+
if (dir == DMA_MEM_TO_DEV) {
src = sg_addr;
dst = dev_addr;
@@ -1259,6 +1319,10 @@ static struct dma_async_tx_descriptor *stm32_dma3_prep_slave_sg(struct dma_chan
} while (len);
}
+ if (count != sg_len && chan->tcem != CTR2_TCEM_CHANNEL)
+ dev_warn(chan2dev(chan), "Linked-list refactored, %d items instead of %d\n",
+ count, sg_len);
+
/* Enable Error interrupts */
swdesc->ccr |= CCR_USEIE | CCR_ULEIE | CCR_DTEIE;
/* Enable Transfer state interrupts */
@@ -1601,8 +1665,12 @@ static u32 stm32_dma3_check_rif(struct stm32_dma3_ddata *ddata)
return chan_reserved;
}
+static struct stm32_dma3_pdata stm32mp25_pdata = {
+ .axi_max_burst_len = 16,
+};
+
static const struct of_device_id stm32_dma3_of_match[] = {
- { .compatible = "st,stm32mp25-dma3", },
+ { .compatible = "st,stm32mp25-dma3", .data = &stm32mp25_pdata, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, stm32_dma3_of_match);
@@ -1610,6 +1678,7 @@ MODULE_DEVICE_TABLE(of, stm32_dma3_of_match);
static int stm32_dma3_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ const struct stm32_dma3_pdata *pdata;
struct stm32_dma3_ddata *ddata;
struct reset_control *reset;
struct stm32_dma3_chan *chan;
@@ -1704,6 +1773,16 @@ static int stm32_dma3_probe(struct platform_device *pdev)
else /* Dual master ports */
ddata->ports_max_dw[1] = FIELD_GET(G_M1_DATA_WIDTH_ENC, hwcfgr);
+ /* axi_max_burst_len is optional, if not defined, use STM32_DMA3_MAX_BURST_LEN */
+ ddata->axi_max_burst_len = STM32_DMA3_MAX_BURST_LEN;
+ pdata = device_get_match_data(&pdev->dev);
+ if (pdata && pdata->axi_max_burst_len) {
+ ddata->axi_max_burst_len = min_t(u32, pdata->axi_max_burst_len,
+ STM32_DMA3_MAX_BURST_LEN);
+ dev_dbg(&pdev->dev, "Burst is limited to %u beats through AXI port\n",
+ ddata->axi_max_burst_len);
+ }
+
ddata->chans = devm_kcalloc(&pdev->dev, ddata->dma_channels, sizeof(*ddata->chans),
GFP_KERNEL);
if (!ddata->chans) {
@@ -1827,7 +1906,7 @@ static const struct dev_pm_ops stm32_dma3_pm_ops = {
static struct platform_driver stm32_dma3_driver = {
.probe = stm32_dma3_probe,
- .remove_new = stm32_dma3_remove,
+ .remove = stm32_dma3_remove,
.driver = {
.name = "stm32-dma3",
.of_match_table = stm32_dma3_of_match,
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index 2e7f9b07fdd2..24796aaaddfa 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -13,7 +13,9 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_dma.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -31,12 +33,21 @@
#define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode) ((mode) << 5)
#define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type) (type)
+#define SUNIV_DMA_CFG_DST_DATA_WIDTH(width) ((width) << 24)
+#define SUNIV_DMA_CFG_SRC_DATA_WIDTH(width) ((width) << 8)
+
+#define SUN4I_MAX_BURST 8
+#define SUNIV_MAX_BURST 4
+
/** Normal DMA register values **/
/* Normal DMA source/destination data request type values */
#define SUN4I_NDMA_DRQ_TYPE_SDRAM 0x16
#define SUN4I_NDMA_DRQ_TYPE_LIMIT (0x1F + 1)
+#define SUNIV_NDMA_DRQ_TYPE_SDRAM 0x11
+#define SUNIV_NDMA_DRQ_TYPE_LIMIT (0x17 + 1)
+
/** Normal DMA register layout **/
/* Dedicated DMA source/destination address mode values */
@@ -50,6 +61,9 @@
#define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15)
#define SUN4I_NDMA_CFG_SRC_NON_SECURE BIT(6)
+#define SUNIV_NDMA_CFG_CONT_MODE BIT(29)
+#define SUNIV_NDMA_CFG_WAIT_STATE(n) ((n) << 26)
+
/** Dedicated DMA register values **/
/* Dedicated DMA source/destination address mode values */
@@ -62,6 +76,9 @@
#define SUN4I_DDMA_DRQ_TYPE_SDRAM 0x1
#define SUN4I_DDMA_DRQ_TYPE_LIMIT (0x1F + 1)
+#define SUNIV_DDMA_DRQ_TYPE_SDRAM 0x1
+#define SUNIV_DDMA_DRQ_TYPE_LIMIT (0x9 + 1)
+
/** Dedicated DMA register layout **/
/* Dedicated DMA configuration register layout */
@@ -115,6 +132,11 @@
#define SUN4I_DMA_NR_MAX_VCHANS \
(SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
+#define SUNIV_NDMA_NR_MAX_CHANNELS 4
+#define SUNIV_DDMA_NR_MAX_CHANNELS 4
+#define SUNIV_NDMA_NR_MAX_VCHANS (24 * 2 - 1)
+#define SUNIV_DDMA_NR_MAX_VCHANS 10
+
/* This set of SUN4I_DDMA timing parameters were found experimentally while
* working with the SPI driver and seem to make it behave correctly */
#define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
@@ -132,6 +154,33 @@
#define SUN4I_DDMA_MAX_SEG_SIZE SZ_16M
#define SUN4I_DMA_MAX_SEG_SIZE SUN4I_NDMA_MAX_SEG_SIZE
+/*
+ * Hardware channels / ports representation
+ *
+ * The hardware is used in several SoCs, with differing numbers
+ * of channels and endpoints. This structure ties those numbers
+ * to a certain compatible string.
+ */
+struct sun4i_dma_config {
+ u32 ndma_nr_max_channels;
+ u32 ndma_nr_max_vchans;
+
+ u32 ddma_nr_max_channels;
+ u32 ddma_nr_max_vchans;
+
+ u32 dma_nr_max_channels;
+
+ void (*set_dst_data_width)(u32 *p_cfg, s8 data_width);
+ void (*set_src_data_width)(u32 *p_cfg, s8 data_width);
+ int (*convert_burst)(u32 maxburst);
+
+ u8 ndma_drq_sdram;
+ u8 ddma_drq_sdram;
+
+ u8 max_burst;
+ bool has_reset;
+};
+
struct sun4i_dma_pchan {
/* Register base of channel */
void __iomem *base;
@@ -170,7 +219,7 @@ struct sun4i_dma_contract {
};
struct sun4i_dma_dev {
- DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS);
+ unsigned long *pchans_used;
struct dma_device slave;
struct sun4i_dma_pchan *pchans;
struct sun4i_dma_vchan *vchans;
@@ -178,6 +227,8 @@ struct sun4i_dma_dev {
struct clk *clk;
int irq;
spinlock_t lock;
+ const struct sun4i_dma_config *cfg;
+ struct reset_control *rst;
};
static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev)
@@ -200,7 +251,27 @@ static struct device *chan2dev(struct dma_chan *chan)
return &chan->dev->device;
}
-static int convert_burst(u32 maxburst)
+static void set_dst_data_width_a10(u32 *p_cfg, s8 data_width)
+{
+ *p_cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(data_width);
+}
+
+static void set_src_data_width_a10(u32 *p_cfg, s8 data_width)
+{
+ *p_cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(data_width);
+}
+
+static void set_dst_data_width_f1c100s(u32 *p_cfg, s8 data_width)
+{
+ *p_cfg |= SUNIV_DMA_CFG_DST_DATA_WIDTH(data_width);
+}
+
+static void set_src_data_width_f1c100s(u32 *p_cfg, s8 data_width)
+{
+ *p_cfg |= SUNIV_DMA_CFG_SRC_DATA_WIDTH(data_width);
+}
+
+static int convert_burst_a10(u32 maxburst)
{
if (maxburst > 8)
return -EINVAL;
@@ -209,6 +280,15 @@ static int convert_burst(u32 maxburst)
return (maxburst >> 2);
}
+static int convert_burst_f1c100s(u32 maxburst)
+{
+ if (maxburst > 4)
+ return -EINVAL;
+
+ /* 1 -> 0, 4 -> 1 */
+ return (maxburst >> 2);
+}
+
static int convert_buswidth(enum dma_slave_buswidth addr_width)
{
if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)
@@ -233,15 +313,15 @@ static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
int i, max;
/*
- * pchans 0-SUN4I_NDMA_NR_MAX_CHANNELS are normal, and
- * SUN4I_NDMA_NR_MAX_CHANNELS+ are dedicated ones
+ * pchans 0-priv->cfg->ndma_nr_max_channels are normal, and
+ * priv->cfg->ndma_nr_max_channels+ are dedicated ones
*/
if (vchan->is_dedicated) {
- i = SUN4I_NDMA_NR_MAX_CHANNELS;
- max = SUN4I_DMA_NR_MAX_CHANNELS;
+ i = priv->cfg->ndma_nr_max_channels;
+ max = priv->cfg->dma_nr_max_channels;
} else {
i = 0;
- max = SUN4I_NDMA_NR_MAX_CHANNELS;
+ max = priv->cfg->ndma_nr_max_channels;
}
spin_lock_irqsave(&priv->lock, flags);
@@ -444,6 +524,7 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
size_t len, struct dma_slave_config *sconfig,
enum dma_transfer_direction direction)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_promise *promise;
int ret;
@@ -467,13 +548,13 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
sconfig->src_addr_width, sconfig->dst_addr_width);
/* Source burst */
- ret = convert_burst(sconfig->src_maxburst);
+ ret = priv->cfg->convert_burst(sconfig->src_maxburst);
if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
/* Destination burst */
- ret = convert_burst(sconfig->dst_maxburst);
+ ret = priv->cfg->convert_burst(sconfig->dst_maxburst);
if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
@@ -482,13 +563,13 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
ret = convert_buswidth(sconfig->src_addr_width);
if (ret < 0)
goto fail;
- promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
+ priv->cfg->set_src_data_width(&promise->cfg, ret);
/* Destination bus width */
ret = convert_buswidth(sconfig->dst_addr_width);
if (ret < 0)
goto fail;
- promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
+ priv->cfg->set_dst_data_width(&promise->cfg, ret);
return promise;
@@ -510,6 +591,7 @@ static struct sun4i_dma_promise *
generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
size_t len, struct dma_slave_config *sconfig)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_promise *promise;
int ret;
@@ -524,13 +606,13 @@ generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN;
/* Source burst */
- ret = convert_burst(sconfig->src_maxburst);
+ ret = priv->cfg->convert_burst(sconfig->src_maxburst);
if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
/* Destination burst */
- ret = convert_burst(sconfig->dst_maxburst);
+ ret = priv->cfg->convert_burst(sconfig->dst_maxburst);
if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
@@ -539,13 +621,13 @@ generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
ret = convert_buswidth(sconfig->src_addr_width);
if (ret < 0)
goto fail;
- promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
+ priv->cfg->set_src_data_width(&promise->cfg, ret);
/* Destination bus width */
ret = convert_buswidth(sconfig->dst_addr_width);
if (ret < 0)
goto fail;
- promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
+ priv->cfg->set_dst_data_width(&promise->cfg, ret);
return promise;
@@ -622,6 +704,7 @@ static struct dma_async_tx_descriptor *
sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
dma_addr_t src, size_t len, unsigned long flags)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
struct dma_slave_config *sconfig = &vchan->cfg;
struct sun4i_dma_promise *promise;
@@ -638,8 +721,8 @@ sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
*/
sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- sconfig->src_maxburst = 8;
- sconfig->dst_maxburst = 8;
+ sconfig->src_maxburst = priv->cfg->max_burst;
+ sconfig->dst_maxburst = priv->cfg->max_burst;
if (vchan->is_dedicated)
promise = generate_ddma_promise(chan, src, dest, len, sconfig);
@@ -654,11 +737,13 @@ sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
/* Configure memcpy mode */
if (vchan->is_dedicated) {
- promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) |
- SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM);
+ promise->cfg |=
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(priv->cfg->ddma_drq_sdram) |
+ SUN4I_DMA_CFG_DST_DRQ_TYPE(priv->cfg->ddma_drq_sdram);
} else {
- promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
- SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
+ promise->cfg |=
+ SUN4I_DMA_CFG_SRC_DRQ_TYPE(priv->cfg->ndma_drq_sdram) |
+ SUN4I_DMA_CFG_DST_DRQ_TYPE(priv->cfg->ndma_drq_sdram);
}
/* Fill the contract with our only promise */
@@ -673,6 +758,7 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
size_t period_len, enum dma_transfer_direction dir,
unsigned long flags)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
struct dma_slave_config *sconfig = &vchan->cfg;
struct sun4i_dma_promise *promise;
@@ -696,11 +782,11 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
if (vchan->is_dedicated) {
io_mode = SUN4I_DDMA_ADDR_MODE_IO;
linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
- ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
+ ram_type = priv->cfg->ddma_drq_sdram;
} else {
io_mode = SUN4I_NDMA_ADDR_MODE_IO;
linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
- ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
+ ram_type = priv->cfg->ndma_drq_sdram;
}
if (dir == DMA_MEM_TO_DEV) {
@@ -793,6 +879,7 @@ sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction dir,
unsigned long flags, void *context)
{
+ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
struct dma_slave_config *sconfig = &vchan->cfg;
struct sun4i_dma_promise *promise;
@@ -818,11 +905,11 @@ sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (vchan->is_dedicated) {
io_mode = SUN4I_DDMA_ADDR_MODE_IO;
linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
- ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
+ ram_type = priv->cfg->ddma_drq_sdram;
} else {
io_mode = SUN4I_NDMA_ADDR_MODE_IO;
linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
- ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
+ ram_type = priv->cfg->ndma_drq_sdram;
}
if (dir == DMA_MEM_TO_DEV)
@@ -1150,6 +1237,10 @@ static int sun4i_dma_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->cfg = of_device_get_match_data(&pdev->dev);
+ if (!priv->cfg)
+ return -ENODEV;
+
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
@@ -1164,6 +1255,13 @@ static int sun4i_dma_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
+ if (priv->cfg->has_reset) {
+ priv->rst = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL);
+ if (IS_ERR(priv->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->rst),
+ "Failed to get reset control\n");
+ }
+
platform_set_drvdata(pdev, priv);
spin_lock_init(&priv->lock);
@@ -1197,23 +1295,26 @@ static int sun4i_dma_probe(struct platform_device *pdev)
priv->slave.dev = &pdev->dev;
- priv->pchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS,
+ priv->pchans = devm_kcalloc(&pdev->dev, priv->cfg->dma_nr_max_channels,
sizeof(struct sun4i_dma_pchan), GFP_KERNEL);
priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS,
sizeof(struct sun4i_dma_vchan), GFP_KERNEL);
- if (!priv->vchans || !priv->pchans)
+ priv->pchans_used = devm_kcalloc(&pdev->dev,
+ BITS_TO_LONGS(priv->cfg->dma_nr_max_channels),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!priv->vchans || !priv->pchans || !priv->pchans_used)
return -ENOMEM;
/*
- * [0..SUN4I_NDMA_NR_MAX_CHANNELS) are normal pchans, and
- * [SUN4I_NDMA_NR_MAX_CHANNELS..SUN4I_DMA_NR_MAX_CHANNELS) are
+ * [0..priv->cfg->ndma_nr_max_channels) are normal pchans, and
+ * [priv->cfg->ndma_nr_max_channels..priv->cfg->dma_nr_max_channels) are
* dedicated ones
*/
- for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++)
+ for (i = 0; i < priv->cfg->ndma_nr_max_channels; i++)
priv->pchans[i].base = priv->base +
SUN4I_NDMA_CHANNEL_REG_BASE(i);
- for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) {
+ for (j = 0; i < priv->cfg->dma_nr_max_channels; i++, j++) {
priv->pchans[i].base = priv->base +
SUN4I_DDMA_CHANNEL_REG_BASE(j);
priv->pchans[i].is_dedicated = 1;
@@ -1284,15 +1385,58 @@ static void sun4i_dma_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
}
+static struct sun4i_dma_config sun4i_a10_dma_cfg = {
+ .ndma_nr_max_channels = SUN4I_NDMA_NR_MAX_CHANNELS,
+ .ndma_nr_max_vchans = SUN4I_NDMA_NR_MAX_VCHANS,
+
+ .ddma_nr_max_channels = SUN4I_DDMA_NR_MAX_CHANNELS,
+ .ddma_nr_max_vchans = SUN4I_DDMA_NR_MAX_VCHANS,
+
+ .dma_nr_max_channels = SUN4I_DMA_NR_MAX_CHANNELS,
+
+ .set_dst_data_width = set_dst_data_width_a10,
+ .set_src_data_width = set_src_data_width_a10,
+ .convert_burst = convert_burst_a10,
+
+ .ndma_drq_sdram = SUN4I_NDMA_DRQ_TYPE_SDRAM,
+ .ddma_drq_sdram = SUN4I_DDMA_DRQ_TYPE_SDRAM,
+
+ .max_burst = SUN4I_MAX_BURST,
+ .has_reset = false,
+};
+
+static struct sun4i_dma_config suniv_f1c100s_dma_cfg = {
+ .ndma_nr_max_channels = SUNIV_NDMA_NR_MAX_CHANNELS,
+ .ndma_nr_max_vchans = SUNIV_NDMA_NR_MAX_VCHANS,
+
+ .ddma_nr_max_channels = SUNIV_DDMA_NR_MAX_CHANNELS,
+ .ddma_nr_max_vchans = SUNIV_DDMA_NR_MAX_VCHANS,
+
+ .dma_nr_max_channels = SUNIV_NDMA_NR_MAX_CHANNELS +
+ SUNIV_DDMA_NR_MAX_CHANNELS,
+
+ .set_dst_data_width = set_dst_data_width_f1c100s,
+ .set_src_data_width = set_src_data_width_f1c100s,
+ .convert_burst = convert_burst_f1c100s,
+
+ .ndma_drq_sdram = SUNIV_NDMA_DRQ_TYPE_SDRAM,
+ .ddma_drq_sdram = SUNIV_DDMA_DRQ_TYPE_SDRAM,
+
+ .max_burst = SUNIV_MAX_BURST,
+ .has_reset = true,
+};
+
static const struct of_device_id sun4i_dma_match[] = {
- { .compatible = "allwinner,sun4i-a10-dma" },
+ { .compatible = "allwinner,sun4i-a10-dma", .data = &sun4i_a10_dma_cfg },
+ { .compatible = "allwinner,suniv-f1c100s-dma",
+ .data = &suniv_f1c100s_dma_cfg },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, sun4i_dma_match);
static struct platform_driver sun4i_dma_driver = {
.probe = sun4i_dma_probe,
- .remove_new = sun4i_dma_remove,
+ .remove = sun4i_dma_remove,
.driver = {
.name = "sun4i-dma",
.of_match_table = sun4i_dma_match,
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 583bf49031cf..95ecb12caaa5 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -1488,7 +1488,7 @@ static void sun6i_dma_remove(struct platform_device *pdev)
static struct platform_driver sun6i_dma_driver = {
.probe = sun6i_dma_probe,
- .remove_new = sun6i_dma_remove,
+ .remove = sun6i_dma_remove,
.driver = {
.name = "sun6i-dma",
.of_match_table = sun6i_dma_match,
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 3642508e88bb..4d6fe0efa76e 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -231,6 +231,7 @@ struct tegra_dma_channel {
bool config_init;
char name[30];
enum dma_transfer_direction sid_dir;
+ enum dma_status status;
int id;
int irq;
int slave_id;
@@ -393,6 +394,8 @@ static int tegra_dma_pause(struct tegra_dma_channel *tdc)
tegra_dma_dump_chan_regs(tdc);
}
+ tdc->status = DMA_PAUSED;
+
return ret;
}
@@ -419,6 +422,8 @@ static void tegra_dma_resume(struct tegra_dma_channel *tdc)
val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
+
+ tdc->status = DMA_IN_PROGRESS;
}
static int tegra_dma_device_resume(struct dma_chan *dc)
@@ -544,6 +549,7 @@ static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc)
tegra_dma_sid_free(tdc);
tdc->dma_desc = NULL;
+ tdc->status = DMA_COMPLETE;
}
static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc,
@@ -716,6 +722,7 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
tdc->dma_desc = NULL;
}
+ tdc->status = DMA_COMPLETE;
tegra_dma_sid_free(tdc);
vchan_get_all_descriptors(&tdc->vc, &head);
spin_unlock_irqrestore(&tdc->vc.lock, flags);
@@ -769,6 +776,9 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
if (ret == DMA_COMPLETE)
return ret;
+ if (tdc->status == DMA_PAUSED)
+ ret = DMA_PAUSED;
+
spin_lock_irqsave(&tdc->vc.lock, flags);
vd = vchan_find_desc(&tdc->vc, cookie);
if (vd) {
@@ -1532,7 +1542,7 @@ static struct platform_driver tegra_dma_driver = {
.of_match_table = tegra_dma_of_match,
},
.probe = tegra_dma_probe,
- .remove_new = tegra_dma_remove,
+ .remove = tegra_dma_remove,
};
module_platform_driver(tegra_dma_driver);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 7d1acda2d72b..14a61e53a41b 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1675,7 +1675,7 @@ static struct platform_driver tegra_dmac_driver = {
.of_match_table = tegra_dma_of_match,
},
.probe = tegra_dma_probe,
- .remove_new = tegra_dma_remove,
+ .remove = tegra_dma_remove,
};
module_platform_driver(tegra_dmac_driver);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 24ad7077c53b..6896da8ac7ef 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -43,6 +43,10 @@
#define ADMA_CH_CONFIG_MAX_BUFS 8
#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
+#define TEGRA186_ADMA_GLOBAL_PAGE_CHGRP 0x30
+#define TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ 0x70
+#define TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ 0x84
+
#define ADMA_CH_FIFO_CTRL 0x2c
#define ADMA_CH_TX_FIFO_SIZE_SHIFT 8
#define ADMA_CH_RX_FIFO_SIZE_SHIFT 0
@@ -96,6 +100,7 @@ struct tegra_adma_chip_data {
unsigned int ch_fifo_size_mask;
unsigned int sreq_index_offset;
bool has_outstanding_reqs;
+ void (*set_global_pg_config)(struct tegra_adma *tdma);
};
/*
@@ -151,6 +156,7 @@ struct tegra_adma {
struct dma_device dma_dev;
struct device *dev;
void __iomem *base_addr;
+ void __iomem *ch_base_addr;
struct clk *ahub_clk;
unsigned int nr_channels;
unsigned long *dma_chan_mask;
@@ -159,6 +165,7 @@ struct tegra_adma {
/* Used to store global command register state when suspending */
unsigned int global_cmd;
+ unsigned int ch_page_no;
const struct tegra_adma_chip_data *cdata;
@@ -176,6 +183,11 @@ static inline u32 tdma_read(struct tegra_adma *tdma, u32 reg)
return readl(tdma->base_addr + tdma->cdata->global_reg_offset + reg);
}
+static inline void tdma_ch_global_write(struct tegra_adma *tdma, u32 reg, u32 val)
+{
+ writel(val, tdma->ch_base_addr + tdma->cdata->global_reg_offset + reg);
+}
+
static inline void tdma_ch_write(struct tegra_adma_chan *tdc, u32 reg, u32 val)
{
writel(val, tdc->chan_addr + reg);
@@ -217,13 +229,30 @@ static int tegra_adma_slave_config(struct dma_chan *dc,
return 0;
}
+static void tegra186_adma_global_page_config(struct tegra_adma *tdma)
+{
+ /*
+ * Clear the default page1 channel group configs and program
+ * the global registers based on the actual page usage
+ */
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_CHGRP, 0);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ, 0);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ, 0);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_CHGRP + (tdma->ch_page_no * 0x4), 0xff);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ + (tdma->ch_page_no * 0x4), 0x1ffffff);
+ tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ + (tdma->ch_page_no * 0x4), 0xffffff);
+}
+
static int tegra_adma_init(struct tegra_adma *tdma)
{
u32 status;
int ret;
- /* Clear any interrupts */
- tdma_write(tdma, tdma->cdata->ch_base_offset + tdma->cdata->global_int_clear, 0x1);
+ /* Clear any channels group global interrupts */
+ tdma_ch_global_write(tdma, tdma->cdata->global_int_clear, 0x1);
+
+ if (!tdma->base_addr)
+ return 0;
/* Assert soft reset */
tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);
@@ -237,6 +266,9 @@ static int tegra_adma_init(struct tegra_adma *tdma)
if (ret)
return ret;
+ if (tdma->cdata->set_global_pg_config)
+ tdma->cdata->set_global_pg_config(tdma);
+
/* Enable global ADMA registers */
tdma_write(tdma, ADMA_GLOBAL_CMD, 1);
@@ -736,7 +768,9 @@ static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
struct tegra_adma_chan *tdc;
int i;
- tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
+ if (tdma->base_addr)
+ tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
+
if (!tdma->global_cmd)
goto clk_disable;
@@ -777,7 +811,11 @@ static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
dev_err(dev, "ahub clk_enable failed: %d\n", ret);
return ret;
}
- tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
+ if (tdma->base_addr) {
+ tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
+ if (tdma->cdata->set_global_pg_config)
+ tdma->cdata->set_global_pg_config(tdma);
+ }
if (!tdma->global_cmd)
return 0;
@@ -817,6 +855,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
.ch_fifo_size_mask = 0xf,
.sreq_index_offset = 2,
.has_outstanding_reqs = false,
+ .set_global_pg_config = NULL,
};
static const struct tegra_adma_chip_data tegra186_chip_data = {
@@ -833,6 +872,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
.ch_fifo_size_mask = 0x1f,
.sreq_index_offset = 4,
.has_outstanding_reqs = true,
+ .set_global_pg_config = tegra186_adma_global_page_config,
};
static const struct of_device_id tegra_adma_of_match[] = {
@@ -846,7 +886,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
{
const struct tegra_adma_chip_data *cdata;
struct tegra_adma *tdma;
- int ret, i;
+ struct resource *res_page, *res_base;
+ int ret, i, page_no;
cdata = of_device_get_match_data(&pdev->dev);
if (!cdata) {
@@ -865,9 +906,35 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdma->nr_channels = cdata->nr_channels;
platform_set_drvdata(pdev, tdma);
- tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(tdma->base_addr))
- return PTR_ERR(tdma->base_addr);
+ res_page = platform_get_resource_byname(pdev, IORESOURCE_MEM, "page");
+ if (res_page) {
+ tdma->ch_base_addr = devm_ioremap_resource(&pdev->dev, res_page);
+ if (IS_ERR(tdma->ch_base_addr))
+ return PTR_ERR(tdma->ch_base_addr);
+
+ res_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "global");
+ if (res_base) {
+ page_no = (res_page->start - res_base->start) / cdata->ch_base_offset;
+ if (page_no <= 0)
+ return -EINVAL;
+ tdma->ch_page_no = page_no - 1;
+ tdma->base_addr = devm_ioremap_resource(&pdev->dev, res_base);
+ if (IS_ERR(tdma->base_addr))
+ return PTR_ERR(tdma->base_addr);
+ }
+ } else {
+ /* If no 'page' property found, then reg DT binding would be legacy */
+ res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_base) {
+ tdma->base_addr = devm_ioremap_resource(&pdev->dev, res_base);
+ if (IS_ERR(tdma->base_addr))
+ return PTR_ERR(tdma->base_addr);
+ } else {
+ return -ENODEV;
+ }
+
+ tdma->ch_base_addr = tdma->base_addr + cdata->ch_base_offset;
+ }
tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio");
if (IS_ERR(tdma->ahub_clk)) {
@@ -900,8 +967,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
if (!test_bit(i, tdma->dma_chan_mask))
continue;
- tdc->chan_addr = tdma->base_addr + cdata->ch_base_offset
- + (cdata->ch_reg_size * i);
+ tdc->chan_addr = tdma->ch_base_addr + (cdata->ch_reg_size * i);
tdc->irq = of_irq_get(pdev->dev.of_node, i);
if (tdc->irq <= 0) {
@@ -1008,7 +1074,7 @@ static struct platform_driver tegra_admac_driver = {
.of_match_table = tegra_adma_of_match,
},
.probe = tegra_adma_probe,
- .remove_new = tegra_adma_remove,
+ .remove = tegra_adma_remove,
};
module_platform_driver(tegra_admac_driver);
diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c
index a8bb70c2d109..8d8c3d6038fc 100644
--- a/drivers/dma/ti/cppi41.c
+++ b/drivers/dma/ti/cppi41.c
@@ -1243,7 +1243,7 @@ static const struct dev_pm_ops cppi41_pm_ops = {
static struct platform_driver cpp41_dma_driver = {
.probe = cppi41_dma_probe,
- .remove_new = cppi41_dma_remove,
+ .remove = cppi41_dma_remove,
.driver = {
.name = "cppi41-dma-engine",
.pm = &cppi41_pm_ops,
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 5f8d2e93ff3f..4ece125b2ae7 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -208,7 +208,6 @@ struct edma_desc {
struct edma_cc;
struct edma_tc {
- struct device_node *node;
u16 id;
};
@@ -2460,19 +2459,19 @@ static int edma_probe(struct platform_device *pdev)
goto err_reg1;
}
- for (i = 0;; i++) {
+ for (i = 0; i < ecc->num_tc; i++) {
ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
1, i, &tc_args);
- if (ret || i == ecc->num_tc)
+ if (ret)
break;
- ecc->tc_list[i].node = tc_args.np;
ecc->tc_list[i].id = i;
queue_priority_mapping[i][1] = tc_args.args[0];
if (queue_priority_mapping[i][1] > lowest_priority) {
lowest_priority = queue_priority_mapping[i][1];
info->default_queue = i;
}
+ of_node_put(tc_args.np);
}
/* See if we have optional dma-channel-mask array */
@@ -2636,7 +2635,7 @@ static const struct dev_pm_ops edma_pm_ops = {
static struct platform_driver edma_driver = {
.probe = edma_probe,
- .remove_new = edma_remove,
+ .remove = edma_remove,
.driver = {
.name = "edma",
.pm = &edma_pm_ops,
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 406ee199c2ac..7ed1956b4642 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -3185,27 +3185,40 @@ static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
d->static_tr.elcnt = elcnt;
- /*
- * PDMA must to close the packet when the channel is in packet mode.
- * For TR mode when the channel is not cyclic we also need PDMA to close
- * the packet otherwise the transfer will stall because PDMA holds on
- * the data it has received from the peripheral.
- */
if (uc->config.pkt_mode || !uc->cyclic) {
+ /*
+ * PDMA must close the packet when the channel is in packet mode.
+ * For TR mode when the channel is not cyclic we also need PDMA
+ * to close the packet otherwise the transfer will stall because
+ * PDMA holds on the data it has received from the peripheral.
+ */
unsigned int div = dev_width * elcnt;
if (uc->cyclic)
d->static_tr.bstcnt = d->residue / d->sglen / div;
else
d->static_tr.bstcnt = d->residue / div;
+ } else if (uc->ud->match_data->type == DMA_TYPE_BCDMA &&
+ uc->config.dir == DMA_DEV_TO_MEM &&
+ uc->cyclic) {
+ /*
+ * For cyclic mode with BCDMA we have to set EOP in each TR to
+ * prevent short packet errors seen on channel teardown. So the
+ * PDMA must close the packet after every TR transfer by setting
+ * burst count equal to the number of bytes transferred.
+ */
+ struct cppi5_tr_type1_t *tr_req = d->hwdesc[0].tr_req_base;
- if (uc->config.dir == DMA_DEV_TO_MEM &&
- d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
- return -EINVAL;
+ d->static_tr.bstcnt =
+ (tr_req->icnt0 * tr_req->icnt1) / dev_width;
} else {
d->static_tr.bstcnt = 0;
}
+ if (uc->config.dir == DMA_DEV_TO_MEM &&
+ d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
+ return -EINVAL;
+
return 0;
}
@@ -3450,8 +3463,9 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* static TR for remote PDMA */
if (udma_configure_statictr(uc, d, dev_width, burst)) {
dev_err(uc->ud->dev,
- "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
- __func__, d->static_tr.bstcnt);
+ "%s: StaticTR Z is limited to maximum %u (%u)\n",
+ __func__, uc->ud->match_data->statictr_z_mask,
+ d->static_tr.bstcnt);
udma_free_hwdesc(uc, d);
kfree(d);
@@ -3476,6 +3490,7 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
unsigned int i;
int num_tr;
+ u32 period_csf = 0;
num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
&tr0_cnt1, &tr1_cnt0);
@@ -3498,6 +3513,20 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
period_addr = buf_addr |
((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
+ /*
+ * For BCDMA <-> PDMA transfers, the EOP flag needs to be set on the
+ * last TR of a descriptor, to mark the packet as complete.
+ * This is required for getting the teardown completion message in case
+ * of TX, and to avoid short-packet error in case of RX.
+ *
+ * As we are in cyclic mode, we do not know which period might be the
+ * last one, so set the flag for each period.
+ */
+ if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
+ uc->ud->match_data->type == DMA_TYPE_BCDMA) {
+ period_csf = CPPI5_TR_CSF_EOP;
+ }
+
for (i = 0; i < periods; i++) {
int tr_idx = i * num_tr;
@@ -3525,8 +3554,10 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
}
if (!(flags & DMA_PREP_INTERRUPT))
- cppi5_tr_csf_set(&tr_req[tr_idx].flags,
- CPPI5_TR_CSF_SUPR_EVT);
+ period_csf |= CPPI5_TR_CSF_SUPR_EVT;
+
+ if (period_csf)
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags, period_csf);
period_addr += period_len;
}
@@ -3655,8 +3686,9 @@ udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
/* static TR for remote PDMA */
if (udma_configure_statictr(uc, d, dev_width, burst)) {
dev_err(uc->ud->dev,
- "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
- __func__, d->static_tr.bstcnt);
+ "%s: StaticTR Z is limited to maximum %u (%u)\n",
+ __func__, uc->ud->match_data->statictr_z_mask,
+ d->static_tr.bstcnt);
udma_free_hwdesc(uc, d);
kfree(d);
@@ -4372,6 +4404,18 @@ static struct udma_match_data j721s2_bcdma_csi_data = {
.soc_data = &j721s2_bcdma_csi_soc_data,
};
+static struct udma_match_data j722s_bcdma_csi_data = {
+ .type = DMA_TYPE_BCDMA,
+ .psil_base = 0x3100,
+ .enable_memcpy_support = false,
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
+ .soc_data = &j721s2_bcdma_csi_soc_data,
+};
+
static const struct of_device_id udma_of_match[] = {
{
.compatible = "ti,am654-navss-main-udmap",
@@ -4403,6 +4447,10 @@ static const struct of_device_id udma_of_match[] = {
.compatible = "ti,j721s2-dmss-bcdma-csi",
.data = &j721s2_bcdma_csi_data,
},
+ {
+ .compatible = "ti,j722s-dmss-bcdma-csi",
+ .data = &j722s_bcdma_csi_data,
+ },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, udma_of_match);
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 6ab9bfbdc480..8c023c6e623a 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1915,7 +1915,7 @@ MODULE_DEVICE_TABLE(of, omap_dma_match);
static struct platform_driver omap_dma_driver = {
.probe = omap_dma_probe,
- .remove_new = omap_dma_remove,
+ .remove = omap_dma_remove,
.driver = {
.name = "omap-dma-engine",
.of_match_table = omap_dma_match,
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 7410025605e0..ecaf002558af 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -761,7 +761,7 @@ static struct platform_driver td_driver = {
.name = DRIVER_NAME,
},
.probe = td_probe,
- .remove_new = td_remove,
+ .remove = td_remove,
};
module_platform_driver(td_driver);
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 44ba377b4b5a..35d5221683b2 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -1260,14 +1260,14 @@ static const struct dev_pm_ops txx9dmac_dev_pm_ops = {
};
static struct platform_driver txx9dmac_chan_driver = {
- .remove_new = txx9dmac_chan_remove,
+ .remove = txx9dmac_chan_remove,
.driver = {
.name = "txx9dmac-chan",
},
};
static struct platform_driver txx9dmac_driver = {
- .remove_new = txx9dmac_remove,
+ .remove = txx9dmac_remove,
.shutdown = txx9dmac_shutdown,
.driver = {
.name = "txx9dmac",
diff --git a/drivers/dma/uniphier-mdmac.c b/drivers/dma/uniphier-mdmac.c
index ad7125f6e2ca..7a99f86ecb5a 100644
--- a/drivers/dma/uniphier-mdmac.c
+++ b/drivers/dma/uniphier-mdmac.c
@@ -493,7 +493,7 @@ MODULE_DEVICE_TABLE(of, uniphier_mdmac_match);
static struct platform_driver uniphier_mdmac_driver = {
.probe = uniphier_mdmac_probe,
- .remove_new = uniphier_mdmac_remove,
+ .remove = uniphier_mdmac_remove,
.driver = {
.name = "uniphier-mio-dmac",
.of_match_table = uniphier_mdmac_match,
diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c
index 3ce2dc2ad9de..ceeb6171c9d1 100644
--- a/drivers/dma/uniphier-xdmac.c
+++ b/drivers/dma/uniphier-xdmac.c
@@ -603,7 +603,7 @@ MODULE_DEVICE_TABLE(of, uniphier_xdmac_match);
static struct platform_driver uniphier_xdmac_driver = {
.probe = uniphier_xdmac_probe,
- .remove_new = uniphier_xdmac_remove,
+ .remove = uniphier_xdmac_remove,
.driver = {
.name = "uniphier-xdmac",
.of_match_table = uniphier_xdmac_match,
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 275848a9c450..f64624ea44ad 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -1815,7 +1815,7 @@ MODULE_DEVICE_TABLE(of, xgene_dma_of_match_ptr);
static struct platform_driver xgene_dma_driver = {
.probe = xgene_dma_probe,
- .remove_new = xgene_dma_remove,
+ .remove = xgene_dma_remove,
.driver = {
.name = "X-Gene-DMA",
.of_match_table = xgene_dma_of_match_ptr,
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 718842fdaf98..0d88b1a670e1 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -390,15 +390,11 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
*/
static int xdma_xfer_stop(struct xdma_chan *xchan)
{
- int ret;
struct xdma_device *xdev = xchan->xdev_hdl;
/* clear run stop bit to prevent any further auto-triggering */
- ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
- CHAN_CTRL_RUN_STOP);
- if (ret)
- return ret;
- return ret;
+ return regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_RUN_STOP);
}
/**
@@ -1315,7 +1311,7 @@ static struct platform_driver xdma_driver = {
},
.id_table = xdma_id_table,
.probe = xdma_probe,
- .remove_new = xdma_remove,
+ .remove = xdma_remove,
};
module_platform_driver(xdma_driver);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 5eb51ae93e89..108a7287f4cd 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -1404,16 +1404,18 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
- j = chan->desc_submitcount;
- reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
- if (chan->direction == DMA_MEM_TO_DEV) {
- reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
- reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
- } else {
- reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
- reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
+ if (config->park) {
+ j = chan->desc_submitcount;
+ reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
+ reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
+ } else {
+ reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
+ reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
+ }
+ dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
}
- dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
/* Start the hardware */
xilinx_dma_start(chan);
@@ -3271,7 +3273,7 @@ static struct platform_driver xilinx_vdma_driver = {
.of_match_table = xilinx_dma_of_ids,
},
.probe = xilinx_dma_probe,
- .remove_new = xilinx_dma_remove,
+ .remove = xilinx_dma_remove,
};
module_platform_driver(xilinx_vdma_driver);
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index be87764af9e8..ee5d9fdbfd7f 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -1863,7 +1863,7 @@ MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match);
static struct platform_driver xilinx_dpdma_driver = {
.probe = xilinx_dpdma_probe,
- .remove_new = xilinx_dpdma_remove,
+ .remove = xilinx_dpdma_remove,
.driver = {
.name = "xilinx-zynqmp-dpdma",
.of_match_table = xilinx_dpdma_of_match,
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 9ae46f1198fe..d05fc5fcc77d 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -366,7 +366,7 @@ static void zynqmp_dma_init(struct zynqmp_dma_chan *chan)
}
writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
- /* Clearing the interrupt account rgisters */
+ /* Clearing the interrupt account registers */
val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
@@ -1192,7 +1192,7 @@ static struct platform_driver zynqmp_dma_driver = {
.pm = &zynqmp_dma_dev_pm_ops,
},
.probe = zynqmp_dma_probe,
- .remove_new = zynqmp_dma_remove,
+ .remove = zynqmp_dma_remove,
};
module_platform_driver(zynqmp_dma_driver);
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index fc0280dcddd1..c130f87147fa 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -170,6 +170,27 @@ dpll_msg_add_temp(struct sk_buff *msg, struct dpll_device *dpll,
}
static int
+dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX) = { 0 };
+ enum dpll_clock_quality_level ql;
+ int ret;
+
+ if (!ops->clock_quality_level_get)
+ return 0;
+ ret = ops->clock_quality_level_get(dpll, dpll_priv(dpll), qls, extack);
+ if (ret)
+ return ret;
+ for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX)
+ if (nla_put_u32(msg, DPLL_A_CLOCK_QUALITY_LEVEL, ql))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
dpll_msg_add_pin_prio(struct sk_buff *msg, struct dpll_pin *pin,
struct dpll_pin_ref *ref,
struct netlink_ext_ack *extack)
@@ -559,6 +580,9 @@ dpll_device_get_one(struct dpll_device *dpll, struct sk_buff *msg,
ret = dpll_msg_add_lock_status(msg, dpll, extack);
if (ret)
return ret;
+ ret = dpll_msg_add_clock_quality_level(msg, dpll, extack);
+ if (ret)
+ return ret;
ret = dpll_msg_add_mode(msg, dpll, extack);
if (ret)
return ret;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 81af6c344d6b..2051a7c944a5 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -78,6 +78,7 @@ config EDAC_GHES
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64)"
depends on AMD_NB && EDAC_DECODE_MCE
+ depends on AMD_NODE
imply AMD_ATL
help
Support for error detection and correction of DRAM ECC errors on
@@ -303,32 +304,6 @@ config EDAC_PASEMI
Support for error detection and correction on PA Semi
PWRficient.
-config EDAC_CELL
- tristate "Cell Broadband Engine memory controller"
- depends on PPC_CELL_COMMON
- help
- Support for error detection and correction on the
- Cell Broadband Engine internal memory controller
- on platform without a hypervisor
-
-config EDAC_AMD8131
- tristate "AMD8131 HyperTransport PCI-X Tunnel"
- depends on PCI && PPC_MAPLE
- help
- Support for error detection and correction on the
- AMD8131 HyperTransport PCI-X Tunnel chip.
- Note, add more Kconfig dependency if it's adopted
- on some machine other than Maple.
-
-config EDAC_AMD8111
- tristate "AMD8111 HyperTransport I/O Hub"
- depends on PCI && PPC_MAPLE
- help
- Support for error detection and correction on the
- AMD8111 HyperTransport I/O Hub chip.
- Note, add more Kconfig dependency if it's adopted
- on some machine other than Maple.
-
config EDAC_CPC925
tristate "IBM CPC925 Memory Controller (PPC970FX)"
depends on PPC64
@@ -564,5 +539,13 @@ config EDAC_VERSAL
Support injecting both correctable and uncorrectable errors
for debugging purposes.
+config EDAC_LOONGSON
+ tristate "Loongson Memory Controller"
+ depends on LOONGARCH && ACPI
+ help
+ Support for error detection and correction on the Loongson
+ family memory controller. This driver reports single bit
+ errors (CE) only. Loongson-3A5000/3C5000/3D5000/3A6000/3C6000
+ are compatible.
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index faf310eec4a6..89789ba8275f 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -62,10 +62,6 @@ obj-$(CONFIG_EDAC_SKX) += skx_edac.o skx_edac_common.o
i10nm_edac-y := i10nm_base.o
obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o skx_edac_common.o
-obj-$(CONFIG_EDAC_CELL) += cell_edac.o
-obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o
-obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o
-
obj-$(CONFIG_EDAC_HIGHBANK_MC) += highbank_mc_edac.o
obj-$(CONFIG_EDAC_HIGHBANK_L2) += highbank_l2_edac.o
@@ -88,3 +84,4 @@ obj-$(CONFIG_EDAC_DMC520) += dmc520_edac.o
obj-$(CONFIG_EDAC_NPCM) += npcm_edac.o
obj-$(CONFIG_EDAC_ZYNQMP) += zynqmp_edac.o
obj-$(CONFIG_EDAC_VERSAL) += versal_edac.o
+obj-$(CONFIG_EDAC_LOONGSON) += loongson_edac.o
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index fe89f5c4837f..3e971f902363 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -482,7 +482,7 @@ static const struct dev_pm_ops altr_sdram_pm_ops = {
static struct platform_driver altr_sdram_edac_driver = {
.probe = altr_sdram_probe,
- .remove_new = altr_sdram_remove,
+ .remove = altr_sdram_remove,
.driver = {
.name = "altr_sdram_edac",
#ifdef CONFIG_PM
@@ -816,7 +816,7 @@ static void altr_edac_device_remove(struct platform_device *pdev)
static struct platform_driver altr_edac_device_driver = {
.probe = altr_edac_device_probe,
- .remove_new = altr_edac_device_remove,
+ .remove = altr_edac_device_remove,
.driver = {
.name = "altr_edac_device",
.of_match_table = altr_edac_device_of_match,
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index ddfbdb66b794..8414ceb43e4a 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2,6 +2,7 @@
#include <linux/ras.h>
#include "amd64_edac.h"
#include <asm/amd_nb.h>
+#include <asm/amd_node.h>
static struct edac_pci_ctl_info *pci_ctl;
@@ -3362,36 +3363,24 @@ static bool dct_ecc_enabled(struct amd64_pvt *pvt)
static bool umc_ecc_enabled(struct amd64_pvt *pvt)
{
- u8 umc_en_mask = 0, ecc_en_mask = 0;
- u16 nid = pvt->mc_node_id;
struct amd64_umc *umc;
- u8 ecc_en = 0, i;
+ bool ecc_en = false;
+ int i;
+ /* Check whether at least one UMC is enabled: */
for_each_umc(i) {
umc = &pvt->umc[i];
- /* Only check enabled UMCs. */
- if (!(umc->sdp_ctrl & UMC_SDP_INIT))
- continue;
-
- umc_en_mask |= BIT(i);
-
- if (umc->umc_cap_hi & UMC_ECC_ENABLED)
- ecc_en_mask |= BIT(i);
+ if (umc->sdp_ctrl & UMC_SDP_INIT &&
+ umc->umc_cap_hi & UMC_ECC_ENABLED) {
+ ecc_en = true;
+ break;
+ }
}
- /* Check whether at least one UMC is enabled: */
- if (umc_en_mask)
- ecc_en = umc_en_mask == ecc_en_mask;
- else
- edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
-
- edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
+ edac_dbg(3, "Node %d: DRAM ECC %s.\n", pvt->mc_node_id, (ecc_en ? "enabled" : "disabled"));
- if (!ecc_en)
- return false;
- else
- return true;
+ return ecc_en;
}
static inline void
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
deleted file mode 100644
index a6d3013d5823..000000000000
--- a/drivers/edac/amd8111_edac.c
+++ /dev/null
@@ -1,596 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * amd8111_edac.c, AMD8111 Hyper Transport chip EDAC kernel module
- *
- * Copyright (c) 2008 Wind River Systems, Inc.
- *
- * Authors: Cao Qingtao <qingtao.cao@windriver.com>
- * Benjamin Walsh <benjamin.walsh@windriver.com>
- * Hu Yongqi <yongqi.hu@windriver.com>
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/bitops.h>
-#include <linux/edac.h>
-#include <linux/pci_ids.h>
-#include <asm/io.h>
-
-#include "edac_module.h"
-#include "amd8111_edac.h"
-
-#define AMD8111_EDAC_REVISION " Ver: 1.0.0"
-#define AMD8111_EDAC_MOD_STR "amd8111_edac"
-
-#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
-
-enum amd8111_edac_devs {
- LPC_BRIDGE = 0,
-};
-
-enum amd8111_edac_pcis {
- PCI_BRIDGE = 0,
-};
-
-/* Wrapper functions for accessing PCI configuration space */
-static int edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32)
-{
- int ret;
-
- ret = pci_read_config_dword(dev, reg, val32);
- if (ret != 0)
- printk(KERN_ERR AMD8111_EDAC_MOD_STR
- " PCI Access Read Error at 0x%x\n", reg);
-
- return ret;
-}
-
-static void edac_pci_read_byte(struct pci_dev *dev, int reg, u8 *val8)
-{
- int ret;
-
- ret = pci_read_config_byte(dev, reg, val8);
- if (ret != 0)
- printk(KERN_ERR AMD8111_EDAC_MOD_STR
- " PCI Access Read Error at 0x%x\n", reg);
-}
-
-static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32)
-{
- int ret;
-
- ret = pci_write_config_dword(dev, reg, val32);
- if (ret != 0)
- printk(KERN_ERR AMD8111_EDAC_MOD_STR
- " PCI Access Write Error at 0x%x\n", reg);
-}
-
-static void edac_pci_write_byte(struct pci_dev *dev, int reg, u8 val8)
-{
- int ret;
-
- ret = pci_write_config_byte(dev, reg, val8);
- if (ret != 0)
- printk(KERN_ERR AMD8111_EDAC_MOD_STR
- " PCI Access Write Error at 0x%x\n", reg);
-}
-
-/*
- * device-specific methods for amd8111 PCI Bridge Controller
- *
- * Error Reporting and Handling for amd8111 chipset could be found
- * in its datasheet 3.1.2 section, P37
- */
-static void amd8111_pci_bridge_init(struct amd8111_pci_info *pci_info)
-{
- u32 val32;
- struct pci_dev *dev = pci_info->dev;
-
- /* First clear error detection flags on the host interface */
-
- /* Clear SSE/SMA/STA flags in the global status register*/
- edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
- if (val32 & PCI_STSCMD_CLEAR_MASK)
- edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
-
- /* Clear CRC and Link Fail flags in HT Link Control reg */
- edac_pci_read_dword(dev, REG_HT_LINK, &val32);
- if (val32 & HT_LINK_CLEAR_MASK)
- edac_pci_write_dword(dev, REG_HT_LINK, val32);
-
- /* Second clear all fault on the secondary interface */
-
- /* Clear error flags in the memory-base limit reg. */
- edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
- if (val32 & MEM_LIMIT_CLEAR_MASK)
- edac_pci_write_dword(dev, REG_MEM_LIM, val32);
-
- /* Clear Discard Timer Expired flag in Interrupt/Bridge Control reg */
- edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
- if (val32 & PCI_INTBRG_CTRL_CLEAR_MASK)
- edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
-
- /* Last enable error detections */
- if (edac_op_state == EDAC_OPSTATE_POLL) {
- /* Enable System Error reporting in global status register */
- edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
- val32 |= PCI_STSCMD_SERREN;
- edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
-
- /* Enable CRC Sync flood packets to HyperTransport Link */
- edac_pci_read_dword(dev, REG_HT_LINK, &val32);
- val32 |= HT_LINK_CRCFEN;
- edac_pci_write_dword(dev, REG_HT_LINK, val32);
-
- /* Enable SSE reporting etc in Interrupt control reg */
- edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
- val32 |= PCI_INTBRG_CTRL_POLL_MASK;
- edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
- }
-}
-
-static void amd8111_pci_bridge_exit(struct amd8111_pci_info *pci_info)
-{
- u32 val32;
- struct pci_dev *dev = pci_info->dev;
-
- if (edac_op_state == EDAC_OPSTATE_POLL) {
- /* Disable System Error reporting */
- edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
- val32 &= ~PCI_STSCMD_SERREN;
- edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
-
- /* Disable CRC flood packets */
- edac_pci_read_dword(dev, REG_HT_LINK, &val32);
- val32 &= ~HT_LINK_CRCFEN;
- edac_pci_write_dword(dev, REG_HT_LINK, val32);
-
- /* Disable DTSERREN/MARSP/SERREN in Interrupt Control reg */
- edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
- val32 &= ~PCI_INTBRG_CTRL_POLL_MASK;
- edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
- }
-}
-
-static void amd8111_pci_bridge_check(struct edac_pci_ctl_info *edac_dev)
-{
- struct amd8111_pci_info *pci_info = edac_dev->pvt_info;
- struct pci_dev *dev = pci_info->dev;
- u32 val32;
-
- /* Check out PCI Bridge Status and Command Register */
- edac_pci_read_dword(dev, REG_PCI_STSCMD, &val32);
- if (val32 & PCI_STSCMD_CLEAR_MASK) {
- printk(KERN_INFO "Error(s) in PCI bridge status and command"
- "register on device %s\n", pci_info->ctl_name);
- printk(KERN_INFO "SSE: %d, RMA: %d, RTA: %d\n",
- (val32 & PCI_STSCMD_SSE) != 0,
- (val32 & PCI_STSCMD_RMA) != 0,
- (val32 & PCI_STSCMD_RTA) != 0);
-
- val32 |= PCI_STSCMD_CLEAR_MASK;
- edac_pci_write_dword(dev, REG_PCI_STSCMD, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-
- /* Check out HyperTransport Link Control Register */
- edac_pci_read_dword(dev, REG_HT_LINK, &val32);
- if (val32 & HT_LINK_LKFAIL) {
- printk(KERN_INFO "Error(s) in hypertransport link control"
- "register on device %s\n", pci_info->ctl_name);
- printk(KERN_INFO "LKFAIL: %d\n",
- (val32 & HT_LINK_LKFAIL) != 0);
-
- val32 |= HT_LINK_LKFAIL;
- edac_pci_write_dword(dev, REG_HT_LINK, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-
- /* Check out PCI Interrupt and Bridge Control Register */
- edac_pci_read_dword(dev, REG_PCI_INTBRG_CTRL, &val32);
- if (val32 & PCI_INTBRG_CTRL_DTSTAT) {
- printk(KERN_INFO "Error(s) in PCI interrupt and bridge control"
- "register on device %s\n", pci_info->ctl_name);
- printk(KERN_INFO "DTSTAT: %d\n",
- (val32 & PCI_INTBRG_CTRL_DTSTAT) != 0);
-
- val32 |= PCI_INTBRG_CTRL_DTSTAT;
- edac_pci_write_dword(dev, REG_PCI_INTBRG_CTRL, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-
- /* Check out PCI Bridge Memory Base-Limit Register */
- edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
- if (val32 & MEM_LIMIT_CLEAR_MASK) {
- printk(KERN_INFO
- "Error(s) in mem limit register on %s device\n",
- pci_info->ctl_name);
- printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n"
- "RTA: %d, STA: %d, MDPE: %d\n",
- (val32 & MEM_LIMIT_DPE) != 0,
- (val32 & MEM_LIMIT_RSE) != 0,
- (val32 & MEM_LIMIT_RMA) != 0,
- (val32 & MEM_LIMIT_RTA) != 0,
- (val32 & MEM_LIMIT_STA) != 0,
- (val32 & MEM_LIMIT_MDPE) != 0);
-
- val32 |= MEM_LIMIT_CLEAR_MASK;
- edac_pci_write_dword(dev, REG_MEM_LIM, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-}
-
-static struct resource *legacy_io_res;
-static int at_compat_reg_broken;
-#define LEGACY_NR_PORTS 1
-
-/* device-specific methods for amd8111 LPC Bridge device */
-static void amd8111_lpc_bridge_init(struct amd8111_dev_info *dev_info)
-{
- u8 val8;
- struct pci_dev *dev = dev_info->dev;
-
- /* First clear REG_AT_COMPAT[SERR, IOCHK] if necessary */
- legacy_io_res = request_region(REG_AT_COMPAT, LEGACY_NR_PORTS,
- AMD8111_EDAC_MOD_STR);
- if (!legacy_io_res)
- printk(KERN_INFO "%s: failed to request legacy I/O region "
- "start %d, len %d\n", __func__,
- REG_AT_COMPAT, LEGACY_NR_PORTS);
- else {
- val8 = __do_inb(REG_AT_COMPAT);
- if (val8 == 0xff) { /* buggy port */
- printk(KERN_INFO "%s: port %d is buggy, not supported"
- " by hardware?\n", __func__, REG_AT_COMPAT);
- at_compat_reg_broken = 1;
- release_region(REG_AT_COMPAT, LEGACY_NR_PORTS);
- legacy_io_res = NULL;
- } else {
- u8 out8 = 0;
- if (val8 & AT_COMPAT_SERR)
- out8 = AT_COMPAT_CLRSERR;
- if (val8 & AT_COMPAT_IOCHK)
- out8 |= AT_COMPAT_CLRIOCHK;
- if (out8 > 0)
- __do_outb(out8, REG_AT_COMPAT);
- }
- }
-
- /* Second clear error flags on LPC bridge */
- edac_pci_read_byte(dev, REG_IO_CTRL_1, &val8);
- if (val8 & IO_CTRL_1_CLEAR_MASK)
- edac_pci_write_byte(dev, REG_IO_CTRL_1, val8);
-}
-
-static void amd8111_lpc_bridge_exit(struct amd8111_dev_info *dev_info)
-{
- if (legacy_io_res)
- release_region(REG_AT_COMPAT, LEGACY_NR_PORTS);
-}
-
-static void amd8111_lpc_bridge_check(struct edac_device_ctl_info *edac_dev)
-{
- struct amd8111_dev_info *dev_info = edac_dev->pvt_info;
- struct pci_dev *dev = dev_info->dev;
- u8 val8;
-
- edac_pci_read_byte(dev, REG_IO_CTRL_1, &val8);
- if (val8 & IO_CTRL_1_CLEAR_MASK) {
- printk(KERN_INFO
- "Error(s) in IO control register on %s device\n",
- dev_info->ctl_name);
- printk(KERN_INFO "LPC ERR: %d, PW2LPC: %d\n",
- (val8 & IO_CTRL_1_LPC_ERR) != 0,
- (val8 & IO_CTRL_1_PW2LPC) != 0);
-
- val8 |= IO_CTRL_1_CLEAR_MASK;
- edac_pci_write_byte(dev, REG_IO_CTRL_1, val8);
-
- edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
- }
-
- if (at_compat_reg_broken == 0) {
- u8 out8 = 0;
- val8 = __do_inb(REG_AT_COMPAT);
- if (val8 & AT_COMPAT_SERR)
- out8 = AT_COMPAT_CLRSERR;
- if (val8 & AT_COMPAT_IOCHK)
- out8 |= AT_COMPAT_CLRIOCHK;
- if (out8 > 0) {
- __do_outb(out8, REG_AT_COMPAT);
- edac_device_handle_ue(edac_dev, 0, 0,
- edac_dev->ctl_name);
- }
- }
-}
-
-/* General devices represented by edac_device_ctl_info */
-static struct amd8111_dev_info amd8111_devices[] = {
- [LPC_BRIDGE] = {
- .err_dev = PCI_DEVICE_ID_AMD_8111_LPC,
- .ctl_name = "lpc",
- .init = amd8111_lpc_bridge_init,
- .exit = amd8111_lpc_bridge_exit,
- .check = amd8111_lpc_bridge_check,
- },
- {0},
-};
-
-/* PCI controllers represented by edac_pci_ctl_info */
-static struct amd8111_pci_info amd8111_pcis[] = {
- [PCI_BRIDGE] = {
- .err_dev = PCI_DEVICE_ID_AMD_8111_PCI,
- .ctl_name = "AMD8111_PCI_Controller",
- .init = amd8111_pci_bridge_init,
- .exit = amd8111_pci_bridge_exit,
- .check = amd8111_pci_bridge_check,
- },
- {0},
-};
-
-static int amd8111_dev_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- struct amd8111_dev_info *dev_info = &amd8111_devices[id->driver_data];
- int ret = -ENODEV;
-
- dev_info->dev = pci_get_device(PCI_VENDOR_ID_AMD,
- dev_info->err_dev, NULL);
-
- if (!dev_info->dev) {
- printk(KERN_ERR "EDAC device not found:"
- "vendor %x, device %x, name %s\n",
- PCI_VENDOR_ID_AMD, dev_info->err_dev,
- dev_info->ctl_name);
- goto err;
- }
-
- if (pci_enable_device(dev_info->dev)) {
- printk(KERN_ERR "failed to enable:"
- "vendor %x, device %x, name %s\n",
- PCI_VENDOR_ID_AMD, dev_info->err_dev,
- dev_info->ctl_name);
- goto err_dev_put;
- }
-
- /*
- * we do not allocate extra private structure for
- * edac_device_ctl_info, but make use of existing
- * one instead.
- */
- dev_info->edac_idx = edac_device_alloc_index();
- dev_info->edac_dev =
- edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1,
- NULL, 0, 0, dev_info->edac_idx);
- if (!dev_info->edac_dev) {
- ret = -ENOMEM;
- goto err_dev_put;
- }
-
- dev_info->edac_dev->pvt_info = dev_info;
- dev_info->edac_dev->dev = &dev_info->dev->dev;
- dev_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
- dev_info->edac_dev->ctl_name = dev_info->ctl_name;
- dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- dev_info->edac_dev->edac_check = dev_info->check;
-
- if (dev_info->init)
- dev_info->init(dev_info);
-
- if (edac_device_add_device(dev_info->edac_dev) > 0) {
- printk(KERN_ERR "failed to add edac_dev for %s\n",
- dev_info->ctl_name);
- goto err_edac_free_ctl;
- }
-
- printk(KERN_INFO "added one edac_dev on AMD8111 "
- "vendor %x, device %x, name %s\n",
- PCI_VENDOR_ID_AMD, dev_info->err_dev,
- dev_info->ctl_name);
-
- return 0;
-
-err_edac_free_ctl:
- edac_device_free_ctl_info(dev_info->edac_dev);
-err_dev_put:
- pci_dev_put(dev_info->dev);
-err:
- return ret;
-}
-
-static void amd8111_dev_remove(struct pci_dev *dev)
-{
- struct amd8111_dev_info *dev_info;
-
- for (dev_info = amd8111_devices; dev_info->err_dev; dev_info++)
- if (dev_info->dev->device == dev->device)
- break;
-
- if (!dev_info->err_dev) /* should never happen */
- return;
-
- if (dev_info->edac_dev) {
- edac_device_del_device(dev_info->edac_dev->dev);
- edac_device_free_ctl_info(dev_info->edac_dev);
- }
-
- if (dev_info->exit)
- dev_info->exit(dev_info);
-
- pci_dev_put(dev_info->dev);
-}
-
-static int amd8111_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- struct amd8111_pci_info *pci_info = &amd8111_pcis[id->driver_data];
- int ret = -ENODEV;
-
- pci_info->dev = pci_get_device(PCI_VENDOR_ID_AMD,
- pci_info->err_dev, NULL);
-
- if (!pci_info->dev) {
- printk(KERN_ERR "EDAC device not found:"
- "vendor %x, device %x, name %s\n",
- PCI_VENDOR_ID_AMD, pci_info->err_dev,
- pci_info->ctl_name);
- goto err;
- }
-
- if (pci_enable_device(pci_info->dev)) {
- printk(KERN_ERR "failed to enable:"
- "vendor %x, device %x, name %s\n",
- PCI_VENDOR_ID_AMD, pci_info->err_dev,
- pci_info->ctl_name);
- goto err_dev_put;
- }
-
- /*
- * we do not allocate extra private structure for
- * edac_pci_ctl_info, but make use of existing
- * one instead.
- */
- pci_info->edac_idx = edac_pci_alloc_index();
- pci_info->edac_dev = edac_pci_alloc_ctl_info(0, pci_info->ctl_name);
- if (!pci_info->edac_dev) {
- ret = -ENOMEM;
- goto err_dev_put;
- }
-
- pci_info->edac_dev->pvt_info = pci_info;
- pci_info->edac_dev->dev = &pci_info->dev->dev;
- pci_info->edac_dev->mod_name = AMD8111_EDAC_MOD_STR;
- pci_info->edac_dev->ctl_name = pci_info->ctl_name;
- pci_info->edac_dev->dev_name = dev_name(&pci_info->dev->dev);
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- pci_info->edac_dev->edac_check = pci_info->check;
-
- if (pci_info->init)
- pci_info->init(pci_info);
-
- if (edac_pci_add_device(pci_info->edac_dev, pci_info->edac_idx) > 0) {
- printk(KERN_ERR "failed to add edac_pci for %s\n",
- pci_info->ctl_name);
- goto err_edac_free_ctl;
- }
-
- printk(KERN_INFO "added one edac_pci on AMD8111 "
- "vendor %x, device %x, name %s\n",
- PCI_VENDOR_ID_AMD, pci_info->err_dev,
- pci_info->ctl_name);
-
- return 0;
-
-err_edac_free_ctl:
- edac_pci_free_ctl_info(pci_info->edac_dev);
-err_dev_put:
- pci_dev_put(pci_info->dev);
-err:
- return ret;
-}
-
-static void amd8111_pci_remove(struct pci_dev *dev)
-{
- struct amd8111_pci_info *pci_info;
-
- for (pci_info = amd8111_pcis; pci_info->err_dev; pci_info++)
- if (pci_info->dev->device == dev->device)
- break;
-
- if (!pci_info->err_dev) /* should never happen */
- return;
-
- if (pci_info->edac_dev) {
- edac_pci_del_device(pci_info->edac_dev->dev);
- edac_pci_free_ctl_info(pci_info->edac_dev);
- }
-
- if (pci_info->exit)
- pci_info->exit(pci_info);
-
- pci_dev_put(pci_info->dev);
-}
-
-/* PCI Device ID talbe for general EDAC device */
-static const struct pci_device_id amd8111_edac_dev_tbl[] = {
- {
- PCI_VEND_DEV(AMD, 8111_LPC),
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .class = 0,
- .class_mask = 0,
- .driver_data = LPC_BRIDGE,
- },
- {
- 0,
- } /* table is NULL-terminated */
-};
-MODULE_DEVICE_TABLE(pci, amd8111_edac_dev_tbl);
-
-static struct pci_driver amd8111_edac_dev_driver = {
- .name = "AMD8111_EDAC_DEV",
- .probe = amd8111_dev_probe,
- .remove = amd8111_dev_remove,
- .id_table = amd8111_edac_dev_tbl,
-};
-
-/* PCI Device ID table for EDAC PCI controller */
-static const struct pci_device_id amd8111_edac_pci_tbl[] = {
- {
- PCI_VEND_DEV(AMD, 8111_PCI),
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .class = 0,
- .class_mask = 0,
- .driver_data = PCI_BRIDGE,
- },
- {
- 0,
- } /* table is NULL-terminated */
-};
-MODULE_DEVICE_TABLE(pci, amd8111_edac_pci_tbl);
-
-static struct pci_driver amd8111_edac_pci_driver = {
- .name = "AMD8111_EDAC_PCI",
- .probe = amd8111_pci_probe,
- .remove = amd8111_pci_remove,
- .id_table = amd8111_edac_pci_tbl,
-};
-
-static int __init amd8111_edac_init(void)
-{
- int val;
-
- printk(KERN_INFO "AMD8111 EDAC driver " AMD8111_EDAC_REVISION "\n");
- printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n");
-
- /* Only POLL mode supported so far */
- edac_op_state = EDAC_OPSTATE_POLL;
-
- val = pci_register_driver(&amd8111_edac_dev_driver);
- val |= pci_register_driver(&amd8111_edac_pci_driver);
-
- return val;
-}
-
-static void __exit amd8111_edac_exit(void)
-{
- pci_unregister_driver(&amd8111_edac_pci_driver);
- pci_unregister_driver(&amd8111_edac_dev_driver);
-}
-
-
-module_init(amd8111_edac_init);
-module_exit(amd8111_edac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
-MODULE_DESCRIPTION("AMD8111 HyperTransport I/O Hub EDAC kernel module");
diff --git a/drivers/edac/amd8111_edac.h b/drivers/edac/amd8111_edac.h
deleted file mode 100644
index 200cab1b3e42..000000000000
--- a/drivers/edac/amd8111_edac.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * amd8111_edac.h, EDAC defs for AMD8111 hypertransport chip
- *
- * Copyright (c) 2008 Wind River Systems, Inc.
- *
- * Authors: Cao Qingtao <qingtao.cao@windriver.com>
- * Benjamin Walsh <benjamin.walsh@windriver.com>
- * Hu Yongqi <yongqi.hu@windriver.com>
- */
-
-#ifndef _AMD8111_EDAC_H_
-#define _AMD8111_EDAC_H_
-
-/************************************************************
- * PCI Bridge Status and Command Register, DevA:0x04
- ************************************************************/
-#define REG_PCI_STSCMD 0x04
-enum pci_stscmd_bits {
- PCI_STSCMD_SSE = BIT(30),
- PCI_STSCMD_RMA = BIT(29),
- PCI_STSCMD_RTA = BIT(28),
- PCI_STSCMD_SERREN = BIT(8),
- PCI_STSCMD_CLEAR_MASK = (PCI_STSCMD_SSE |
- PCI_STSCMD_RMA |
- PCI_STSCMD_RTA)
-};
-
-/************************************************************
- * PCI Bridge Memory Base-Limit Register, DevA:0x1c
- ************************************************************/
-#define REG_MEM_LIM 0x1c
-enum mem_limit_bits {
- MEM_LIMIT_DPE = BIT(31),
- MEM_LIMIT_RSE = BIT(30),
- MEM_LIMIT_RMA = BIT(29),
- MEM_LIMIT_RTA = BIT(28),
- MEM_LIMIT_STA = BIT(27),
- MEM_LIMIT_MDPE = BIT(24),
- MEM_LIMIT_CLEAR_MASK = (MEM_LIMIT_DPE |
- MEM_LIMIT_RSE |
- MEM_LIMIT_RMA |
- MEM_LIMIT_RTA |
- MEM_LIMIT_STA |
- MEM_LIMIT_MDPE)
-};
-
-/************************************************************
- * HyperTransport Link Control Register, DevA:0xc4
- ************************************************************/
-#define REG_HT_LINK 0xc4
-enum ht_link_bits {
- HT_LINK_LKFAIL = BIT(4),
- HT_LINK_CRCFEN = BIT(1),
- HT_LINK_CLEAR_MASK = (HT_LINK_LKFAIL)
-};
-
-/************************************************************
- * PCI Bridge Interrupt and Bridge Control, DevA:0x3c
- ************************************************************/
-#define REG_PCI_INTBRG_CTRL 0x3c
-enum pci_intbrg_ctrl_bits {
- PCI_INTBRG_CTRL_DTSERREN = BIT(27),
- PCI_INTBRG_CTRL_DTSTAT = BIT(26),
- PCI_INTBRG_CTRL_MARSP = BIT(21),
- PCI_INTBRG_CTRL_SERREN = BIT(17),
- PCI_INTBRG_CTRL_PEREN = BIT(16),
- PCI_INTBRG_CTRL_CLEAR_MASK = (PCI_INTBRG_CTRL_DTSTAT),
- PCI_INTBRG_CTRL_POLL_MASK = (PCI_INTBRG_CTRL_DTSERREN |
- PCI_INTBRG_CTRL_MARSP |
- PCI_INTBRG_CTRL_SERREN)
-};
-
-/************************************************************
- * I/O Control 1 Register, DevB:0x40
- ************************************************************/
-#define REG_IO_CTRL_1 0x40
-enum io_ctrl_1_bits {
- IO_CTRL_1_NMIONERR = BIT(7),
- IO_CTRL_1_LPC_ERR = BIT(6),
- IO_CTRL_1_PW2LPC = BIT(1),
- IO_CTRL_1_CLEAR_MASK = (IO_CTRL_1_LPC_ERR | IO_CTRL_1_PW2LPC)
-};
-
-/************************************************************
- * Legacy I/O Space Registers
- ************************************************************/
-#define REG_AT_COMPAT 0x61
-enum at_compat_bits {
- AT_COMPAT_SERR = BIT(7),
- AT_COMPAT_IOCHK = BIT(6),
- AT_COMPAT_CLRIOCHK = BIT(3),
- AT_COMPAT_CLRSERR = BIT(2),
-};
-
-struct amd8111_dev_info {
- u16 err_dev; /* PCI Device ID */
- struct pci_dev *dev;
- int edac_idx; /* device index */
- char *ctl_name;
- struct edac_device_ctl_info *edac_dev;
- void (*init)(struct amd8111_dev_info *dev_info);
- void (*exit)(struct amd8111_dev_info *dev_info);
- void (*check)(struct edac_device_ctl_info *edac_dev);
-};
-
-struct amd8111_pci_info {
- u16 err_dev; /* PCI Device ID */
- struct pci_dev *dev;
- int edac_idx; /* pci index */
- const char *ctl_name;
- struct edac_pci_ctl_info *edac_dev;
- void (*init)(struct amd8111_pci_info *dev_info);
- void (*exit)(struct amd8111_pci_info *dev_info);
- void (*check)(struct edac_pci_ctl_info *edac_dev);
-};
-
-#endif /* _AMD8111_EDAC_H_ */
diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c
deleted file mode 100644
index 28610ba514f4..000000000000
--- a/drivers/edac/amd8131_edac.c
+++ /dev/null
@@ -1,358 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * amd8131_edac.c, AMD8131 hypertransport chip EDAC kernel module
- *
- * Copyright (c) 2008 Wind River Systems, Inc.
- *
- * Authors: Cao Qingtao <qingtao.cao@windriver.com>
- * Benjamin Walsh <benjamin.walsh@windriver.com>
- * Hu Yongqi <yongqi.hu@windriver.com>
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <linux/edac.h>
-#include <linux/pci_ids.h>
-
-#include "edac_module.h"
-#include "amd8131_edac.h"
-
-#define AMD8131_EDAC_REVISION " Ver: 1.0.0"
-#define AMD8131_EDAC_MOD_STR "amd8131_edac"
-
-/* Wrapper functions for accessing PCI configuration space */
-static void edac_pci_read_dword(struct pci_dev *dev, int reg, u32 *val32)
-{
- int ret;
-
- ret = pci_read_config_dword(dev, reg, val32);
- if (ret != 0)
- printk(KERN_ERR AMD8131_EDAC_MOD_STR
- " PCI Access Read Error at 0x%x\n", reg);
-}
-
-static void edac_pci_write_dword(struct pci_dev *dev, int reg, u32 val32)
-{
- int ret;
-
- ret = pci_write_config_dword(dev, reg, val32);
- if (ret != 0)
- printk(KERN_ERR AMD8131_EDAC_MOD_STR
- " PCI Access Write Error at 0x%x\n", reg);
-}
-
-/* Support up to two AMD8131 chipsets on a platform */
-static struct amd8131_dev_info amd8131_devices[] = {
- {
- .inst = NORTH_A,
- .devfn = DEVFN_PCIX_BRIDGE_NORTH_A,
- .ctl_name = "AMD8131_PCIX_NORTH_A",
- },
- {
- .inst = NORTH_B,
- .devfn = DEVFN_PCIX_BRIDGE_NORTH_B,
- .ctl_name = "AMD8131_PCIX_NORTH_B",
- },
- {
- .inst = SOUTH_A,
- .devfn = DEVFN_PCIX_BRIDGE_SOUTH_A,
- .ctl_name = "AMD8131_PCIX_SOUTH_A",
- },
- {
- .inst = SOUTH_B,
- .devfn = DEVFN_PCIX_BRIDGE_SOUTH_B,
- .ctl_name = "AMD8131_PCIX_SOUTH_B",
- },
- {.inst = NO_BRIDGE,},
-};
-
-static void amd8131_pcix_init(struct amd8131_dev_info *dev_info)
-{
- u32 val32;
- struct pci_dev *dev = dev_info->dev;
-
- /* First clear error detection flags */
- edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
- if (val32 & MEM_LIMIT_MASK)
- edac_pci_write_dword(dev, REG_MEM_LIM, val32);
-
- /* Clear Discard Timer Timedout flag */
- edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
- if (val32 & INT_CTLR_DTS)
- edac_pci_write_dword(dev, REG_INT_CTLR, val32);
-
- /* Clear CRC Error flag on link side A */
- edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
- if (val32 & LNK_CTRL_CRCERR_A)
- edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
-
- /* Clear CRC Error flag on link side B */
- edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
- if (val32 & LNK_CTRL_CRCERR_B)
- edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
-
- /*
- * Then enable all error detections.
- *
- * Setup Discard Timer Sync Flood Enable,
- * System Error Enable and Parity Error Enable.
- */
- edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
- val32 |= INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE;
- edac_pci_write_dword(dev, REG_INT_CTLR, val32);
-
- /* Enable overall SERR Error detection */
- edac_pci_read_dword(dev, REG_STS_CMD, &val32);
- val32 |= STS_CMD_SERREN;
- edac_pci_write_dword(dev, REG_STS_CMD, val32);
-
- /* Setup CRC Flood Enable for link side A */
- edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
- val32 |= LNK_CTRL_CRCFEN;
- edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
-
- /* Setup CRC Flood Enable for link side B */
- edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
- val32 |= LNK_CTRL_CRCFEN;
- edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
-}
-
-static void amd8131_pcix_exit(struct amd8131_dev_info *dev_info)
-{
- u32 val32;
- struct pci_dev *dev = dev_info->dev;
-
- /* Disable SERR, PERR and DTSE Error detection */
- edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
- val32 &= ~(INT_CTLR_PERR | INT_CTLR_SERR | INT_CTLR_DTSE);
- edac_pci_write_dword(dev, REG_INT_CTLR, val32);
-
- /* Disable overall System Error detection */
- edac_pci_read_dword(dev, REG_STS_CMD, &val32);
- val32 &= ~STS_CMD_SERREN;
- edac_pci_write_dword(dev, REG_STS_CMD, val32);
-
- /* Disable CRC Sync Flood on link side A */
- edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
- val32 &= ~LNK_CTRL_CRCFEN;
- edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
-
- /* Disable CRC Sync Flood on link side B */
- edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
- val32 &= ~LNK_CTRL_CRCFEN;
- edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
-}
-
-static void amd8131_pcix_check(struct edac_pci_ctl_info *edac_dev)
-{
- struct amd8131_dev_info *dev_info = edac_dev->pvt_info;
- struct pci_dev *dev = dev_info->dev;
- u32 val32;
-
- /* Check PCI-X Bridge Memory Base-Limit Register for errors */
- edac_pci_read_dword(dev, REG_MEM_LIM, &val32);
- if (val32 & MEM_LIMIT_MASK) {
- printk(KERN_INFO "Error(s) in mem limit register "
- "on %s bridge\n", dev_info->ctl_name);
- printk(KERN_INFO "DPE: %d, RSE: %d, RMA: %d\n"
- "RTA: %d, STA: %d, MDPE: %d\n",
- val32 & MEM_LIMIT_DPE,
- val32 & MEM_LIMIT_RSE,
- val32 & MEM_LIMIT_RMA,
- val32 & MEM_LIMIT_RTA,
- val32 & MEM_LIMIT_STA,
- val32 & MEM_LIMIT_MDPE);
-
- val32 |= MEM_LIMIT_MASK;
- edac_pci_write_dword(dev, REG_MEM_LIM, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-
- /* Check if Discard Timer timed out */
- edac_pci_read_dword(dev, REG_INT_CTLR, &val32);
- if (val32 & INT_CTLR_DTS) {
- printk(KERN_INFO "Error(s) in interrupt and control register "
- "on %s bridge\n", dev_info->ctl_name);
- printk(KERN_INFO "DTS: %d\n", val32 & INT_CTLR_DTS);
-
- val32 |= INT_CTLR_DTS;
- edac_pci_write_dword(dev, REG_INT_CTLR, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-
- /* Check if CRC error happens on link side A */
- edac_pci_read_dword(dev, REG_LNK_CTRL_A, &val32);
- if (val32 & LNK_CTRL_CRCERR_A) {
- printk(KERN_INFO "Error(s) in link conf and control register "
- "on %s bridge\n", dev_info->ctl_name);
- printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_A);
-
- val32 |= LNK_CTRL_CRCERR_A;
- edac_pci_write_dword(dev, REG_LNK_CTRL_A, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-
- /* Check if CRC error happens on link side B */
- edac_pci_read_dword(dev, REG_LNK_CTRL_B, &val32);
- if (val32 & LNK_CTRL_CRCERR_B) {
- printk(KERN_INFO "Error(s) in link conf and control register "
- "on %s bridge\n", dev_info->ctl_name);
- printk(KERN_INFO "CRCERR: %d\n", val32 & LNK_CTRL_CRCERR_B);
-
- val32 |= LNK_CTRL_CRCERR_B;
- edac_pci_write_dword(dev, REG_LNK_CTRL_B, val32);
-
- edac_pci_handle_npe(edac_dev, edac_dev->ctl_name);
- }
-}
-
-static struct amd8131_info amd8131_chipset = {
- .err_dev = PCI_DEVICE_ID_AMD_8131_APIC,
- .devices = amd8131_devices,
- .init = amd8131_pcix_init,
- .exit = amd8131_pcix_exit,
- .check = amd8131_pcix_check,
-};
-
-/*
- * There are 4 PCIX Bridges on ATCA-6101 that share the same PCI Device ID,
- * so amd8131_probe() would be called by kernel 4 times, with different
- * address of pci_dev for each of them each time.
- */
-static int amd8131_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct amd8131_dev_info *dev_info;
-
- for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE;
- dev_info++)
- if (dev_info->devfn == dev->devfn)
- break;
-
- if (dev_info->inst == NO_BRIDGE) /* should never happen */
- return -ENODEV;
-
- /*
- * We can't call pci_get_device() as we are used to do because
- * there are 4 of them but pci_dev_get() instead.
- */
- dev_info->dev = pci_dev_get(dev);
-
- if (pci_enable_device(dev_info->dev)) {
- pci_dev_put(dev_info->dev);
- printk(KERN_ERR "failed to enable:"
- "vendor %x, device %x, devfn %x, name %s\n",
- PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
- dev_info->devfn, dev_info->ctl_name);
- return -ENODEV;
- }
-
- /*
- * we do not allocate extra private structure for
- * edac_pci_ctl_info, but make use of existing
- * one instead.
- */
- dev_info->edac_idx = edac_pci_alloc_index();
- dev_info->edac_dev = edac_pci_alloc_ctl_info(0, dev_info->ctl_name);
- if (!dev_info->edac_dev)
- return -ENOMEM;
-
- dev_info->edac_dev->pvt_info = dev_info;
- dev_info->edac_dev->dev = &dev_info->dev->dev;
- dev_info->edac_dev->mod_name = AMD8131_EDAC_MOD_STR;
- dev_info->edac_dev->ctl_name = dev_info->ctl_name;
- dev_info->edac_dev->dev_name = dev_name(&dev_info->dev->dev);
-
- if (edac_op_state == EDAC_OPSTATE_POLL)
- dev_info->edac_dev->edac_check = amd8131_chipset.check;
-
- if (amd8131_chipset.init)
- amd8131_chipset.init(dev_info);
-
- if (edac_pci_add_device(dev_info->edac_dev, dev_info->edac_idx) > 0) {
- printk(KERN_ERR "failed edac_pci_add_device() for %s\n",
- dev_info->ctl_name);
- edac_pci_free_ctl_info(dev_info->edac_dev);
- return -ENODEV;
- }
-
- printk(KERN_INFO "added one device on AMD8131 "
- "vendor %x, device %x, devfn %x, name %s\n",
- PCI_VENDOR_ID_AMD, amd8131_chipset.err_dev,
- dev_info->devfn, dev_info->ctl_name);
-
- return 0;
-}
-
-static void amd8131_remove(struct pci_dev *dev)
-{
- struct amd8131_dev_info *dev_info;
-
- for (dev_info = amd8131_chipset.devices; dev_info->inst != NO_BRIDGE;
- dev_info++)
- if (dev_info->devfn == dev->devfn)
- break;
-
- if (dev_info->inst == NO_BRIDGE) /* should never happen */
- return;
-
- if (dev_info->edac_dev) {
- edac_pci_del_device(dev_info->edac_dev->dev);
- edac_pci_free_ctl_info(dev_info->edac_dev);
- }
-
- if (amd8131_chipset.exit)
- amd8131_chipset.exit(dev_info);
-
- pci_dev_put(dev_info->dev);
-}
-
-static const struct pci_device_id amd8131_edac_pci_tbl[] = {
- {
- PCI_VEND_DEV(AMD, 8131_BRIDGE),
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .class = 0,
- .class_mask = 0,
- .driver_data = 0,
- },
- {
- 0,
- } /* table is NULL-terminated */
-};
-MODULE_DEVICE_TABLE(pci, amd8131_edac_pci_tbl);
-
-static struct pci_driver amd8131_edac_driver = {
- .name = AMD8131_EDAC_MOD_STR,
- .probe = amd8131_probe,
- .remove = amd8131_remove,
- .id_table = amd8131_edac_pci_tbl,
-};
-
-static int __init amd8131_edac_init(void)
-{
- printk(KERN_INFO "AMD8131 EDAC driver " AMD8131_EDAC_REVISION "\n");
- printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc.\n");
-
- /* Only POLL mode supported so far */
- edac_op_state = EDAC_OPSTATE_POLL;
-
- return pci_register_driver(&amd8131_edac_driver);
-}
-
-static void __exit amd8131_edac_exit(void)
-{
- pci_unregister_driver(&amd8131_edac_driver);
-}
-
-module_init(amd8131_edac_init);
-module_exit(amd8131_edac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
-MODULE_DESCRIPTION("AMD8131 HyperTransport PCI-X Tunnel EDAC kernel module");
diff --git a/drivers/edac/amd8131_edac.h b/drivers/edac/amd8131_edac.h
deleted file mode 100644
index 5f362abdaf12..000000000000
--- a/drivers/edac/amd8131_edac.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * amd8131_edac.h, EDAC defs for AMD8131 hypertransport chip
- *
- * Copyright (c) 2008 Wind River Systems, Inc.
- *
- * Authors: Cao Qingtao <qingtao.cao@windriver.com>
- * Benjamin Walsh <benjamin.walsh@windriver.com>
- * Hu Yongqi <yongqi.hu@windriver.com>
- */
-
-#ifndef _AMD8131_EDAC_H_
-#define _AMD8131_EDAC_H_
-
-#define DEVFN_PCIX_BRIDGE_NORTH_A 8
-#define DEVFN_PCIX_BRIDGE_NORTH_B 16
-#define DEVFN_PCIX_BRIDGE_SOUTH_A 24
-#define DEVFN_PCIX_BRIDGE_SOUTH_B 32
-
-/************************************************************
- * PCI-X Bridge Status and Command Register, DevA:0x04
- ************************************************************/
-#define REG_STS_CMD 0x04
-enum sts_cmd_bits {
- STS_CMD_SSE = BIT(30),
- STS_CMD_SERREN = BIT(8)
-};
-
-/************************************************************
- * PCI-X Bridge Interrupt and Bridge Control Register,
- ************************************************************/
-#define REG_INT_CTLR 0x3c
-enum int_ctlr_bits {
- INT_CTLR_DTSE = BIT(27),
- INT_CTLR_DTS = BIT(26),
- INT_CTLR_SERR = BIT(17),
- INT_CTLR_PERR = BIT(16)
-};
-
-/************************************************************
- * PCI-X Bridge Memory Base-Limit Register, DevA:0x1C
- ************************************************************/
-#define REG_MEM_LIM 0x1c
-enum mem_limit_bits {
- MEM_LIMIT_DPE = BIT(31),
- MEM_LIMIT_RSE = BIT(30),
- MEM_LIMIT_RMA = BIT(29),
- MEM_LIMIT_RTA = BIT(28),
- MEM_LIMIT_STA = BIT(27),
- MEM_LIMIT_MDPE = BIT(24),
- MEM_LIMIT_MASK = MEM_LIMIT_DPE|MEM_LIMIT_RSE|MEM_LIMIT_RMA|
- MEM_LIMIT_RTA|MEM_LIMIT_STA|MEM_LIMIT_MDPE
-};
-
-/************************************************************
- * Link Configuration And Control Register, side A
- ************************************************************/
-#define REG_LNK_CTRL_A 0xc4
-
-/************************************************************
- * Link Configuration And Control Register, side B
- ************************************************************/
-#define REG_LNK_CTRL_B 0xc8
-
-enum lnk_ctrl_bits {
- LNK_CTRL_CRCERR_A = BIT(9),
- LNK_CTRL_CRCERR_B = BIT(8),
- LNK_CTRL_CRCFEN = BIT(1)
-};
-
-enum pcix_bridge_inst {
- NORTH_A = 0,
- NORTH_B = 1,
- SOUTH_A = 2,
- SOUTH_B = 3,
- NO_BRIDGE = 4
-};
-
-struct amd8131_dev_info {
- int devfn;
- enum pcix_bridge_inst inst;
- struct pci_dev *dev;
- int edac_idx; /* pci device index */
- char *ctl_name;
- struct edac_pci_ctl_info *edac_dev;
-};
-
-/*
- * AMD8131 chipset has two pairs of PCIX Bridge and related IOAPIC
- * Controller, and ATCA-6101 has two AMD8131 chipsets, so there are
- * four PCIX Bridges on ATCA-6101 altogether.
- *
- * These PCIX Bridges share the same PCI Device ID and are all of
- * Function Zero, they could be discrimated by their pci_dev->devfn.
- * They share the same set of init/check/exit methods, and their
- * private structures are collected in the devices[] array.
- */
-struct amd8131_info {
- u16 err_dev; /* PCI Device ID for AMD8131 APIC*/
- struct amd8131_dev_info *devices;
- void (*init)(struct amd8131_dev_info *dev_info);
- void (*exit)(struct amd8131_dev_info *dev_info);
- void (*check)(struct edac_pci_ctl_info *edac_dev);
-};
-
-#endif /* _AMD8131_EDAC_H_ */
-
diff --git a/drivers/edac/armada_xp_edac.c b/drivers/edac/armada_xp_edac.c
index 589bc81f1249..d64248fcf4c0 100644
--- a/drivers/edac/armada_xp_edac.c
+++ b/drivers/edac/armada_xp_edac.c
@@ -364,7 +364,7 @@ static void axp_mc_remove(struct platform_device *pdev)
static struct platform_driver axp_mc_driver = {
.probe = axp_mc_probe,
- .remove_new = axp_mc_remove,
+ .remove = axp_mc_remove,
.driver = {
.name = "armada_xp_mc_edac",
.of_match_table = of_match_ptr(axp_mc_of_match),
@@ -579,7 +579,7 @@ static void aurora_l2_remove(struct platform_device *pdev)
static struct platform_driver aurora_l2_driver = {
.probe = aurora_l2_probe,
- .remove_new = aurora_l2_remove,
+ .remove = aurora_l2_remove,
.driver = {
.name = "aurora_l2_edac",
.of_match_table = of_match_ptr(aurora_l2_of_match),
diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c
index 157a480eb761..dadb8acbee3d 100644
--- a/drivers/edac/aspeed_edac.c
+++ b/drivers/edac/aspeed_edac.c
@@ -387,7 +387,7 @@ static struct platform_driver aspeed_driver = {
.of_match_table = aspeed_of_match
},
.probe = aspeed_probe,
- .remove_new = aspeed_remove
+ .remove = aspeed_remove
};
module_platform_driver(aspeed_driver);
diff --git a/drivers/edac/bluefield_edac.c b/drivers/edac/bluefield_edac.c
index 5b3164560648..4942a240c30f 100644
--- a/drivers/edac/bluefield_edac.c
+++ b/drivers/edac/bluefield_edac.c
@@ -47,13 +47,22 @@
#define MLXBF_EDAC_MAX_DIMM_PER_MC 2
#define MLXBF_EDAC_ERROR_GRAIN 8
+#define MLXBF_WRITE_REG_32 (0x82000009)
+#define MLXBF_READ_REG_32 (0x8200000A)
+#define MLXBF_SIP_SVC_VERSION (0x8200ff03)
+
+#define MLXBF_SMCCC_ACCESS_VIOLATION (-4)
+
+#define MLXBF_SVC_REQ_MAJOR 0
+#define MLXBF_SVC_REQ_MINOR 3
+
/*
- * Request MLNX_SIP_GET_DIMM_INFO
+ * Request MLXBF_SIP_GET_DIMM_INFO
*
* Retrieve information about DIMM on a certain slot.
*
* Call register usage:
- * a0: MLNX_SIP_GET_DIMM_INFO
+ * a0: MLXBF_SIP_GET_DIMM_INFO
* a1: (Memory controller index) << 16 | (Dimm index in memory controller)
* a2-7: not used.
*
@@ -61,7 +70,7 @@
* a0: MLXBF_DIMM_INFO defined below describing the DIMM.
* a1-3: not used.
*/
-#define MLNX_SIP_GET_DIMM_INFO 0x82000008
+#define MLXBF_SIP_GET_DIMM_INFO 0x82000008
/* Format for the SMC response about the memory information */
#define MLXBF_DIMM_INFO__SIZE_GB GENMASK_ULL(15, 0)
@@ -72,9 +81,15 @@
#define MLXBF_DIMM_INFO__PACKAGE_X GENMASK_ULL(31, 24)
struct bluefield_edac_priv {
+ /* pointer to device structure */
+ struct device *dev;
int dimm_ranks[MLXBF_EDAC_MAX_DIMM_PER_MC];
void __iomem *emi_base;
int dimm_per_mc;
+ /* access to secure regs supported */
+ bool svc_sreg_support;
+ /* SMC table# for secure regs access */
+ u32 sreg_tbl;
};
static u64 smc_call1(u64 smc_op, u64 smc_arg)
@@ -86,6 +101,71 @@ static u64 smc_call1(u64 smc_op, u64 smc_arg)
return res.a0;
}
+static int secure_readl(void __iomem *addr, u32 *result, u32 sreg_tbl)
+{
+ struct arm_smccc_res res;
+ int status;
+
+ arm_smccc_smc(MLXBF_READ_REG_32, sreg_tbl, (uintptr_t)addr,
+ 0, 0, 0, 0, 0, &res);
+
+ status = res.a0;
+
+ if (status == SMCCC_RET_NOT_SUPPORTED ||
+ status == MLXBF_SMCCC_ACCESS_VIOLATION)
+ return -1;
+
+ *result = (u32)res.a1;
+ return 0;
+}
+
+static int secure_writel(void __iomem *addr, u32 data, u32 sreg_tbl)
+{
+ struct arm_smccc_res res;
+ int status;
+
+ arm_smccc_smc(MLXBF_WRITE_REG_32, sreg_tbl, data, (uintptr_t)addr,
+ 0, 0, 0, 0, &res);
+
+ status = res.a0;
+
+ if (status == SMCCC_RET_NOT_SUPPORTED ||
+ status == MLXBF_SMCCC_ACCESS_VIOLATION)
+ return -1;
+ else
+ return 0;
+}
+
+static int bluefield_edac_readl(struct bluefield_edac_priv *priv, u32 offset, u32 *result)
+{
+ void __iomem *addr;
+ int err = 0;
+
+ addr = priv->emi_base + offset;
+
+ if (priv->svc_sreg_support)
+ err = secure_readl(addr, result, priv->sreg_tbl);
+ else
+ *result = readl(addr);
+
+ return err;
+}
+
+static int bluefield_edac_writel(struct bluefield_edac_priv *priv, u32 offset, u32 data)
+{
+ void __iomem *addr;
+ int err = 0;
+
+ addr = priv->emi_base + offset;
+
+ if (priv->svc_sreg_support)
+ err = secure_writel(addr, data, priv->sreg_tbl);
+ else
+ writel(data, addr);
+
+ return err;
+}
+
/*
* Gather the ECC information from the External Memory Interface registers
* and report it to the edac handler.
@@ -99,7 +179,7 @@ static void bluefield_gather_report_ecc(struct mem_ctl_info *mci,
u32 ecc_latch_select, dram_syndrom, serr, derr, syndrom;
enum hw_event_mc_err_type ecc_type;
u64 ecc_dimm_addr;
- int ecc_dimm;
+ int ecc_dimm, err;
ecc_type = is_single_ecc ? HW_EVENT_ERR_CORRECTED :
HW_EVENT_ERR_UNCORRECTED;
@@ -109,14 +189,19 @@ static void bluefield_gather_report_ecc(struct mem_ctl_info *mci,
* registers with information about the last ECC error occurrence.
*/
ecc_latch_select = MLXBF_ECC_LATCH_SEL__START;
- writel(ecc_latch_select, priv->emi_base + MLXBF_ECC_LATCH_SEL);
+ err = bluefield_edac_writel(priv, MLXBF_ECC_LATCH_SEL, ecc_latch_select);
+ if (err)
+ dev_err(priv->dev, "ECC latch select write failed.\n");
/*
* Verify that the ECC reported info in the registers is of the
* same type as the one asked to report. If not, just report the
* error without the detailed information.
*/
- dram_syndrom = readl(priv->emi_base + MLXBF_SYNDROM);
+ err = bluefield_edac_readl(priv, MLXBF_SYNDROM, &dram_syndrom);
+ if (err)
+ dev_err(priv->dev, "DRAM syndrom read failed.\n");
+
serr = FIELD_GET(MLXBF_SYNDROM__SERR, dram_syndrom);
derr = FIELD_GET(MLXBF_SYNDROM__DERR, dram_syndrom);
syndrom = FIELD_GET(MLXBF_SYNDROM__SYN, dram_syndrom);
@@ -127,13 +212,21 @@ static void bluefield_gather_report_ecc(struct mem_ctl_info *mci,
return;
}
- dram_additional_info = readl(priv->emi_base + MLXBF_ADD_INFO);
+ err = bluefield_edac_readl(priv, MLXBF_ADD_INFO, &dram_additional_info);
+ if (err)
+ dev_err(priv->dev, "DRAM additional info read failed.\n");
+
err_prank = FIELD_GET(MLXBF_ADD_INFO__ERR_PRANK, dram_additional_info);
ecc_dimm = (err_prank >= 2 && priv->dimm_ranks[0] <= 2) ? 1 : 0;
- edea0 = readl(priv->emi_base + MLXBF_ERR_ADDR_0);
- edea1 = readl(priv->emi_base + MLXBF_ERR_ADDR_1);
+ err = bluefield_edac_readl(priv, MLXBF_ERR_ADDR_0, &edea0);
+ if (err)
+ dev_err(priv->dev, "Error addr 0 read failed.\n");
+
+ err = bluefield_edac_readl(priv, MLXBF_ERR_ADDR_1, &edea1);
+ if (err)
+ dev_err(priv->dev, "Error addr 1 read failed.\n");
ecc_dimm_addr = ((u64)edea1 << 32) | edea0;
@@ -147,6 +240,7 @@ static void bluefield_edac_check(struct mem_ctl_info *mci)
{
struct bluefield_edac_priv *priv = mci->pvt_info;
u32 ecc_count, single_error_count, double_error_count, ecc_error = 0;
+ int err;
/*
* The memory controller might not be initialized by the firmware
@@ -155,7 +249,10 @@ static void bluefield_edac_check(struct mem_ctl_info *mci)
if (mci->edac_cap == EDAC_FLAG_NONE)
return;
- ecc_count = readl(priv->emi_base + MLXBF_ECC_CNT);
+ err = bluefield_edac_readl(priv, MLXBF_ECC_CNT, &ecc_count);
+ if (err)
+ dev_err(priv->dev, "ECC count read failed.\n");
+
single_error_count = FIELD_GET(MLXBF_ECC_CNT__SERR_CNT, ecc_count);
double_error_count = FIELD_GET(MLXBF_ECC_CNT__DERR_CNT, ecc_count);
@@ -172,15 +269,18 @@ static void bluefield_edac_check(struct mem_ctl_info *mci)
}
/* Write to clear reported errors. */
- if (ecc_count)
- writel(ecc_error, priv->emi_base + MLXBF_ECC_ERR);
+ if (ecc_count) {
+ err = bluefield_edac_writel(priv, MLXBF_ECC_ERR, ecc_error);
+ if (err)
+ dev_err(priv->dev, "ECC Error write failed.\n");
+ }
}
/* Initialize the DIMMs information for the given memory controller. */
static void bluefield_edac_init_dimms(struct mem_ctl_info *mci)
{
struct bluefield_edac_priv *priv = mci->pvt_info;
- int mem_ctrl_idx = mci->mc_idx;
+ u64 mem_ctrl_idx = mci->mc_idx;
struct dimm_info *dimm;
u64 smc_info, smc_arg;
int is_empty = 1, i;
@@ -189,7 +289,7 @@ static void bluefield_edac_init_dimms(struct mem_ctl_info *mci)
dimm = mci->dimms[i];
smc_arg = mem_ctrl_idx << 16 | i;
- smc_info = smc_call1(MLNX_SIP_GET_DIMM_INFO, smc_arg);
+ smc_info = smc_call1(MLXBF_SIP_GET_DIMM_INFO, smc_arg);
if (!FIELD_GET(MLXBF_DIMM_INFO__SIZE_GB, smc_info)) {
dimm->mtype = MEM_EMPTY;
@@ -244,6 +344,7 @@ static int bluefield_edac_mc_probe(struct platform_device *pdev)
struct bluefield_edac_priv *priv;
struct device *dev = &pdev->dev;
struct edac_mc_layer layers[1];
+ struct arm_smccc_res res;
struct mem_ctl_info *mci;
struct resource *emi_res;
unsigned int mc_idx, dimm_count;
@@ -279,13 +380,43 @@ static int bluefield_edac_mc_probe(struct platform_device *pdev)
return -ENOMEM;
priv = mci->pvt_info;
+ priv->dev = dev;
+
+ /*
+ * The "sec_reg_block" property in the ACPI table determines the method
+ * the driver uses to access the EMI registers:
+ * a) property is not present - directly access registers via readl/writel
+ * b) property is present - indirectly access registers via SMC calls
+ * (assuming required Silicon Provider service version found)
+ */
+ if (device_property_read_u32(dev, "sec_reg_block", &priv->sreg_tbl)) {
+ priv->svc_sreg_support = false;
+ } else {
+ /*
+ * Check for minimum required Arm Silicon Provider (SiP) service
+ * version, ensuring support of required SMC function IDs.
+ */
+ arm_smccc_smc(MLXBF_SIP_SVC_VERSION, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 == MLXBF_SVC_REQ_MAJOR &&
+ res.a1 >= MLXBF_SVC_REQ_MINOR) {
+ priv->svc_sreg_support = true;
+ } else {
+ dev_err(dev, "Required SMCs are not supported.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
priv->dimm_per_mc = dimm_count;
- priv->emi_base = devm_ioremap_resource(dev, emi_res);
- if (IS_ERR(priv->emi_base)) {
- dev_err(dev, "failed to map EMI IO resource\n");
- ret = PTR_ERR(priv->emi_base);
- goto err;
+ if (!priv->svc_sreg_support) {
+ priv->emi_base = devm_ioremap_resource(dev, emi_res);
+ if (IS_ERR(priv->emi_base)) {
+ dev_err(dev, "failed to map EMI IO resource\n");
+ ret = PTR_ERR(priv->emi_base);
+ goto err;
+ }
+ } else {
+ priv->emi_base = (void __iomem *)emi_res->start;
}
mci->pdev = dev;
@@ -320,7 +451,6 @@ err:
edac_mc_free(mci);
return ret;
-
}
static void bluefield_edac_mc_remove(struct platform_device *pdev)
@@ -344,7 +474,7 @@ static struct platform_driver bluefield_edac_mc_driver = {
.acpi_match_table = bluefield_mc_acpi_ids,
},
.probe = bluefield_edac_mc_probe,
- .remove_new = bluefield_edac_mc_remove,
+ .remove = bluefield_edac_mc_remove,
};
module_platform_driver(bluefield_edac_mc_driver);
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
deleted file mode 100644
index 2000f66fbf5c..000000000000
--- a/drivers/edac/cell_edac.c
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Cell MIC driver for ECC counting
- *
- * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
- * <benh@kernel.crashing.org>
- *
- * This file may be distributed under the terms of the
- * GNU General Public License.
- */
-#undef DEBUG
-
-#include <linux/edac.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/stop_machine.h>
-#include <linux/io.h>
-#include <linux/of_address.h>
-#include <asm/machdep.h>
-#include <asm/cell-regs.h>
-
-#include "edac_module.h"
-
-struct cell_edac_priv
-{
- struct cbe_mic_tm_regs __iomem *regs;
- int node;
- int chanmask;
-#ifdef DEBUG
- u64 prev_fir;
-#endif
-};
-
-static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
-{
- struct cell_edac_priv *priv = mci->pvt_info;
- struct csrow_info *csrow = mci->csrows[0];
- unsigned long address, pfn, offset, syndrome;
-
- dev_dbg(mci->pdev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n",
- priv->node, chan, ar);
-
- /* Address decoding is likely a bit bogus, to dbl check */
- address = (ar & 0xffffffffe0000000ul) >> 29;
- if (priv->chanmask == 0x3)
- address = (address << 1) | chan;
- pfn = address >> PAGE_SHIFT;
- offset = address & ~PAGE_MASK;
- syndrome = (ar & 0x000000001fe00000ul) >> 21;
-
- /* TODO: Decoding of the error address */
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
- csrow->first_page + pfn, offset, syndrome,
- 0, chan, -1, "", "");
-}
-
-static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
-{
- struct cell_edac_priv *priv = mci->pvt_info;
- struct csrow_info *csrow = mci->csrows[0];
- unsigned long address, pfn, offset;
-
- dev_dbg(mci->pdev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n",
- priv->node, chan, ar);
-
- /* Address decoding is likely a bit bogus, to dbl check */
- address = (ar & 0xffffffffe0000000ul) >> 29;
- if (priv->chanmask == 0x3)
- address = (address << 1) | chan;
- pfn = address >> PAGE_SHIFT;
- offset = address & ~PAGE_MASK;
-
- /* TODO: Decoding of the error address */
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
- csrow->first_page + pfn, offset, 0,
- 0, chan, -1, "", "");
-}
-
-static void cell_edac_check(struct mem_ctl_info *mci)
-{
- struct cell_edac_priv *priv = mci->pvt_info;
- u64 fir, addreg, clear = 0;
-
- fir = in_be64(&priv->regs->mic_fir);
-#ifdef DEBUG
- if (fir != priv->prev_fir) {
- dev_dbg(mci->pdev, "fir change : 0x%016lx\n", fir);
- priv->prev_fir = fir;
- }
-#endif
- if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_SINGLE_0_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
- clear |= CBE_MIC_FIR_ECC_SINGLE_0_RESET;
- cell_edac_count_ce(mci, 0, addreg);
- }
- if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_SINGLE_1_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
- clear |= CBE_MIC_FIR_ECC_SINGLE_1_RESET;
- cell_edac_count_ce(mci, 1, addreg);
- }
- if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_MULTI_0_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
- clear |= CBE_MIC_FIR_ECC_MULTI_0_RESET;
- cell_edac_count_ue(mci, 0, addreg);
- }
- if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_MULTI_1_ERR)) {
- addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
- clear |= CBE_MIC_FIR_ECC_MULTI_1_RESET;
- cell_edac_count_ue(mci, 1, addreg);
- }
-
- /* The procedure for clearing FIR bits is a bit ... weird */
- if (clear) {
- fir &= ~(CBE_MIC_FIR_ECC_ERR_MASK | CBE_MIC_FIR_ECC_SET_MASK);
- fir |= CBE_MIC_FIR_ECC_RESET_MASK;
- fir &= ~clear;
- out_be64(&priv->regs->mic_fir, fir);
- (void)in_be64(&priv->regs->mic_fir);
-
- mb(); /* sync up */
-#ifdef DEBUG
- fir = in_be64(&priv->regs->mic_fir);
- dev_dbg(mci->pdev, "fir clear : 0x%016lx\n", fir);
-#endif
- }
-}
-
-static void cell_edac_init_csrows(struct mem_ctl_info *mci)
-{
- struct csrow_info *csrow = mci->csrows[0];
- struct dimm_info *dimm;
- struct cell_edac_priv *priv = mci->pvt_info;
- struct device_node *np;
- int j;
- u32 nr_pages;
-
- for_each_node_by_name(np, "memory") {
- struct resource r;
-
- /* We "know" that the Cell firmware only creates one entry
- * in the "memory" nodes. If that changes, this code will
- * need to be adapted.
- */
- if (of_address_to_resource(np, 0, &r))
- continue;
- if (of_node_to_nid(np) != priv->node)
- continue;
- csrow->first_page = r.start >> PAGE_SHIFT;
- nr_pages = resource_size(&r) >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + nr_pages - 1;
-
- for (j = 0; j < csrow->nr_channels; j++) {
- dimm = csrow->channels[j]->dimm;
- dimm->mtype = MEM_XDR;
- dimm->edac_mode = EDAC_SECDED;
- dimm->nr_pages = nr_pages / csrow->nr_channels;
- }
- dev_dbg(mci->pdev,
- "Initialized on node %d, chanmask=0x%x,"
- " first_page=0x%lx, nr_pages=0x%x\n",
- priv->node, priv->chanmask,
- csrow->first_page, nr_pages);
- break;
- }
- of_node_put(np);
-}
-
-static int cell_edac_probe(struct platform_device *pdev)
-{
- struct cbe_mic_tm_regs __iomem *regs;
- struct mem_ctl_info *mci;
- struct edac_mc_layer layers[2];
- struct cell_edac_priv *priv;
- u64 reg;
- int rc, chanmask, num_chans;
-
- regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
- if (regs == NULL)
- return -ENODEV;
-
- edac_op_state = EDAC_OPSTATE_POLL;
-
- /* Get channel population */
- reg = in_be64(&regs->mic_mnt_cfg);
- dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016llx\n", reg);
- chanmask = 0;
- if (reg & CBE_MIC_MNT_CFG_CHAN_0_POP)
- chanmask |= 0x1;
- if (reg & CBE_MIC_MNT_CFG_CHAN_1_POP)
- chanmask |= 0x2;
- if (chanmask == 0) {
- dev_warn(&pdev->dev,
- "Yuck ! No channel populated ? Aborting !\n");
- return -ENODEV;
- }
- dev_dbg(&pdev->dev, "Initial FIR = 0x%016llx\n",
- in_be64(&regs->mic_fir));
-
- /* Allocate & init EDAC MC data structure */
- num_chans = chanmask == 3 ? 2 : 1;
-
- layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = 1;
- layers[0].is_virt_csrow = true;
- layers[1].type = EDAC_MC_LAYER_CHANNEL;
- layers[1].size = num_chans;
- layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
- sizeof(struct cell_edac_priv));
- if (mci == NULL)
- return -ENOMEM;
- priv = mci->pvt_info;
- priv->regs = regs;
- priv->node = pdev->id;
- priv->chanmask = chanmask;
- mci->pdev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_XDR;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
- mci->mod_name = "cell_edac";
- mci->ctl_name = "MIC";
- mci->dev_name = dev_name(&pdev->dev);
- mci->edac_check = cell_edac_check;
- cell_edac_init_csrows(mci);
-
- /* Register with EDAC core */
- rc = edac_mc_add_mc(mci);
- if (rc) {
- dev_err(&pdev->dev, "failed to register with EDAC core\n");
- edac_mc_free(mci);
- return rc;
- }
-
- return 0;
-}
-
-static void cell_edac_remove(struct platform_device *pdev)
-{
- struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
- if (mci)
- edac_mc_free(mci);
-}
-
-static struct platform_driver cell_edac_driver = {
- .driver = {
- .name = "cbe-mic",
- },
- .probe = cell_edac_probe,
- .remove_new = cell_edac_remove,
-};
-
-static int __init cell_edac_init(void)
-{
- /* Sanity check registers data structure */
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_df_ecc_address_0) != 0xf8);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_df_ecc_address_1) != 0x1b8);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_df_config) != 0x218);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_fir) != 0x230);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_mnt_cfg) != 0x210);
- BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
- mic_exc) != 0x208);
-
- return platform_driver_register(&cell_edac_driver);
-}
-
-static void __exit cell_edac_exit(void)
-{
- platform_driver_unregister(&cell_edac_driver);
-}
-
-module_init(cell_edac_init);
-module_exit(cell_edac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
-MODULE_DESCRIPTION("ECC counting for Cell MIC");
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index eb702bc3aa29..9c9e4369c041 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -1027,7 +1027,7 @@ static void cpc925_remove(struct platform_device *pdev)
static struct platform_driver cpc925_edac_driver = {
.probe = cpc925_probe,
- .remove_new = cpc925_remove,
+ .remove = cpc925_remove,
.driver = {
.name = "cpc925_edac",
}
diff --git a/drivers/edac/dmc520_edac.c b/drivers/edac/dmc520_edac.c
index 5e52d31db3b8..64a4d0a07032 100644
--- a/drivers/edac/dmc520_edac.c
+++ b/drivers/edac/dmc520_edac.c
@@ -640,7 +640,7 @@ static struct platform_driver dmc520_edac_driver = {
},
.probe = dmc520_edac_probe,
- .remove_new = dmc520_edac_remove
+ .remove = dmc520_edac_remove
};
module_platform_driver(dmc520_edac_driver);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index d6eed727b0cd..0959320fe51c 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -214,7 +214,7 @@ static int edac_mc_alloc_csrows(struct mem_ctl_info *mci)
unsigned int row, chn;
/*
- * Alocate and fill the csrow/channels structs
+ * Allocate and fill the csrow/channels structs
*/
mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
if (!mci->csrows)
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 4200aec04831..0f338adf7d93 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -422,7 +422,7 @@ static inline int nr_pages_per_csrow(struct csrow_info *csrow)
return nr_pages;
}
-/* Create a CSROW object under specifed edac_mc_device */
+/* Create a CSROW object under specified edac_mc_device */
static int edac_create_csrow_object(struct mem_ctl_info *mci,
struct csrow_info *csrow, int index)
{
@@ -449,7 +449,7 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
return 0;
}
-/* Create a CSROW object under specifed edac_mc_device */
+/* Create a CSROW object under specified edac_mc_device */
static int edac_create_csrow_objects(struct mem_ctl_info *mci)
{
int err, i;
@@ -636,7 +636,7 @@ static void dimm_release(struct device *dev)
*/
}
-/* Create a DIMM object under specifed memory controller device */
+/* Create a DIMM object under specified memory controller device */
static int edac_create_dimm_object(struct mem_ctl_info *mci,
struct dimm_info *dimm)
{
diff --git a/drivers/edac/fsl_ddr_edac.c b/drivers/edac/fsl_ddr_edac.c
index d148d262d0d4..e4eaec0aa81d 100644
--- a/drivers/edac/fsl_ddr_edac.c
+++ b/drivers/edac/fsl_ddr_edac.c
@@ -31,18 +31,30 @@
static int edac_mc_idx;
-static u32 orig_ddr_err_disable;
-static u32 orig_ddr_err_sbe;
-static bool little_endian;
+static inline void __iomem *ddr_reg_addr(struct fsl_mc_pdata *pdata, unsigned int off)
+{
+ if (pdata->flag == TYPE_IMX9 && off >= FSL_MC_DATA_ERR_INJECT_HI && off <= FSL_MC_ERR_SBE)
+ return pdata->inject_vbase + off - FSL_MC_DATA_ERR_INJECT_HI
+ + IMX9_MC_DATA_ERR_INJECT_OFF;
+
+ if (pdata->flag == TYPE_IMX9 && off >= IMX9_MC_ERR_EN)
+ return pdata->inject_vbase + off - IMX9_MC_ERR_EN;
-static inline u32 ddr_in32(void __iomem *addr)
+ return pdata->mc_vbase + off;
+}
+
+static inline u32 ddr_in32(struct fsl_mc_pdata *pdata, unsigned int off)
{
- return little_endian ? ioread32(addr) : ioread32be(addr);
+ void __iomem *addr = ddr_reg_addr(pdata, off);
+
+ return pdata->little_endian ? ioread32(addr) : ioread32be(addr);
}
-static inline void ddr_out32(void __iomem *addr, u32 value)
+static inline void ddr_out32(struct fsl_mc_pdata *pdata, unsigned int off, u32 value)
{
- if (little_endian)
+ void __iomem *addr = ddr_reg_addr(pdata, off);
+
+ if (pdata->little_endian)
iowrite32(value, addr);
else
iowrite32be(value, addr);
@@ -60,7 +72,7 @@ static ssize_t fsl_mc_inject_data_hi_show(struct device *dev,
struct mem_ctl_info *mci = to_mci(dev);
struct fsl_mc_pdata *pdata = mci->pvt_info;
return sprintf(data, "0x%08x",
- ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI));
+ ddr_in32(pdata, FSL_MC_DATA_ERR_INJECT_HI));
}
static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
@@ -70,7 +82,7 @@ static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
struct mem_ctl_info *mci = to_mci(dev);
struct fsl_mc_pdata *pdata = mci->pvt_info;
return sprintf(data, "0x%08x",
- ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO));
+ ddr_in32(pdata, FSL_MC_DATA_ERR_INJECT_LO));
}
static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
@@ -80,7 +92,7 @@ static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
struct mem_ctl_info *mci = to_mci(dev);
struct fsl_mc_pdata *pdata = mci->pvt_info;
return sprintf(data, "0x%08x",
- ddr_in32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT));
+ ddr_in32(pdata, FSL_MC_ECC_ERR_INJECT));
}
static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
@@ -97,7 +109,7 @@ static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
if (rc)
return rc;
- ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI, val);
+ ddr_out32(pdata, FSL_MC_DATA_ERR_INJECT_HI, val);
return count;
}
return 0;
@@ -117,7 +129,7 @@ static ssize_t fsl_mc_inject_data_lo_store(struct device *dev,
if (rc)
return rc;
- ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO, val);
+ ddr_out32(pdata, FSL_MC_DATA_ERR_INJECT_LO, val);
return count;
}
return 0;
@@ -137,7 +149,7 @@ static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
if (rc)
return rc;
- ddr_out32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT, val);
+ ddr_out32(pdata, FSL_MC_ECC_ERR_INJECT, val);
return count;
}
return 0;
@@ -286,7 +298,7 @@ static void fsl_mc_check(struct mem_ctl_info *mci)
int bad_data_bit;
int bad_ecc_bit;
- err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
+ err_detect = ddr_in32(pdata, FSL_MC_ERR_DETECT);
if (!err_detect)
return;
@@ -295,14 +307,14 @@ static void fsl_mc_check(struct mem_ctl_info *mci)
/* no more processing if not ECC bit errors */
if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
- ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
+ ddr_out32(pdata, FSL_MC_ERR_DETECT, err_detect);
return;
}
- syndrome = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ECC);
+ syndrome = ddr_in32(pdata, FSL_MC_CAPTURE_ECC);
/* Mask off appropriate bits of syndrome based on bus width */
- bus_width = (ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG) &
+ bus_width = (ddr_in32(pdata, FSL_MC_DDR_SDRAM_CFG) &
DSC_DBW_MASK) ? 32 : 64;
if (bus_width == 64)
syndrome &= 0xff;
@@ -310,8 +322,8 @@ static void fsl_mc_check(struct mem_ctl_info *mci)
syndrome &= 0xffff;
err_addr = make64(
- ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_EXT_ADDRESS),
- ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ADDRESS));
+ ddr_in32(pdata, FSL_MC_CAPTURE_EXT_ADDRESS),
+ ddr_in32(pdata, FSL_MC_CAPTURE_ADDRESS));
pfn = err_addr >> PAGE_SHIFT;
for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
@@ -320,29 +332,33 @@ static void fsl_mc_check(struct mem_ctl_info *mci)
break;
}
- cap_high = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_HI);
- cap_low = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_LO);
+ cap_high = ddr_in32(pdata, FSL_MC_CAPTURE_DATA_HI);
+ cap_low = ddr_in32(pdata, FSL_MC_CAPTURE_DATA_LO);
/*
* Analyze single-bit errors on 64-bit wide buses
* TODO: Add support for 32-bit wide buses
*/
if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
+ u64 cap = (u64)cap_high << 32 | cap_low;
+ u32 s = syndrome;
+
sbe_ecc_decode(cap_high, cap_low, syndrome,
&bad_data_bit, &bad_ecc_bit);
- if (bad_data_bit != -1)
- fsl_mc_printk(mci, KERN_ERR,
- "Faulty Data bit: %d\n", bad_data_bit);
- if (bad_ecc_bit != -1)
- fsl_mc_printk(mci, KERN_ERR,
- "Faulty ECC bit: %d\n", bad_ecc_bit);
+ if (bad_data_bit >= 0) {
+ fsl_mc_printk(mci, KERN_ERR, "Faulty Data bit: %d\n", bad_data_bit);
+ cap ^= 1ULL << bad_data_bit;
+ }
+
+ if (bad_ecc_bit >= 0) {
+ fsl_mc_printk(mci, KERN_ERR, "Faulty ECC bit: %d\n", bad_ecc_bit);
+ s ^= 1 << bad_ecc_bit;
+ }
fsl_mc_printk(mci, KERN_ERR,
"Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
- cap_high ^ (1 << (bad_data_bit - 32)),
- cap_low ^ (1 << bad_data_bit),
- syndrome ^ (1 << bad_ecc_bit));
+ upper_32_bits(cap), lower_32_bits(cap), s);
}
fsl_mc_printk(mci, KERN_ERR,
@@ -367,7 +383,7 @@ static void fsl_mc_check(struct mem_ctl_info *mci)
row_index, 0, -1,
mci->ctl_name, "");
- ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
+ ddr_out32(pdata, FSL_MC_ERR_DETECT, err_detect);
}
static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
@@ -376,7 +392,7 @@ static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
struct fsl_mc_pdata *pdata = mci->pvt_info;
u32 err_detect;
- err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
+ err_detect = ddr_in32(pdata, FSL_MC_ERR_DETECT);
if (!err_detect)
return IRQ_NONE;
@@ -396,7 +412,7 @@ static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
u32 cs_bnds;
int index;
- sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
+ sdram_ctl = ddr_in32(pdata, FSL_MC_DDR_SDRAM_CFG);
sdtype = sdram_ctl & DSC_SDTYPE_MASK;
if (sdram_ctl & DSC_RD_EN) {
@@ -431,6 +447,9 @@ static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
case 0x05000000:
mtype = MEM_DDR4;
break;
+ case 0x04000000:
+ mtype = MEM_LPDDR4;
+ break;
default:
mtype = MEM_UNKNOWN;
break;
@@ -444,7 +463,7 @@ static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
csrow = mci->csrows[index];
dimm = csrow->channels[0]->dimm;
- cs_bnds = ddr_in32(pdata->mc_vbase + FSL_MC_CS_BNDS_0 +
+ cs_bnds = ddr_in32(pdata, FSL_MC_CS_BNDS_0 +
(index * FSL_MC_CS_BNDS_OFS));
start = (cs_bnds & 0xffff0000) >> 16;
@@ -464,7 +483,9 @@ static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
dimm->grain = 8;
dimm->mtype = mtype;
dimm->dtype = DEV_UNKNOWN;
- if (sdram_ctl & DSC_X32_EN)
+ if (pdata->flag == TYPE_IMX9)
+ dimm->dtype = DEV_X16;
+ else if (sdram_ctl & DSC_X32_EN)
dimm->dtype = DEV_X32;
dimm->edac_mode = EDAC_SECDED;
}
@@ -476,6 +497,7 @@ int fsl_mc_err_probe(struct platform_device *op)
struct edac_mc_layer layers[2];
struct fsl_mc_pdata *pdata;
struct resource r;
+ u32 ecc_en_mask;
u32 sdram_ctl;
int res;
@@ -503,11 +525,13 @@ int fsl_mc_err_probe(struct platform_device *op)
mci->ctl_name = pdata->name;
mci->dev_name = pdata->name;
+ pdata->flag = (unsigned long)device_get_match_data(&op->dev);
+
/*
* Get the endianness of DDR controller registers.
* Default is big endian.
*/
- little_endian = of_property_read_bool(op->dev.of_node, "little-endian");
+ pdata->little_endian = of_property_read_bool(op->dev.of_node, "little-endian");
res = of_address_to_resource(op->dev.of_node, 0, &r);
if (res) {
@@ -531,8 +555,23 @@ int fsl_mc_err_probe(struct platform_device *op)
goto err;
}
- sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
- if (!(sdram_ctl & DSC_ECC_EN)) {
+ if (pdata->flag == TYPE_IMX9) {
+ pdata->inject_vbase = devm_platform_ioremap_resource_byname(op, "inject");
+ if (IS_ERR(pdata->inject_vbase)) {
+ res = -ENOMEM;
+ goto err;
+ }
+ }
+
+ if (pdata->flag == TYPE_IMX9) {
+ sdram_ctl = ddr_in32(pdata, IMX9_MC_ERR_EN);
+ ecc_en_mask = ERR_ECC_EN | ERR_INLINE_ECC;
+ } else {
+ sdram_ctl = ddr_in32(pdata, FSL_MC_DDR_SDRAM_CFG);
+ ecc_en_mask = DSC_ECC_EN;
+ }
+
+ if ((sdram_ctl & ecc_en_mask) != ecc_en_mask) {
/* no ECC */
pr_warn("%s: No ECC DIMMs discovered\n", __func__);
res = -ENODEV;
@@ -543,7 +582,8 @@ int fsl_mc_err_probe(struct platform_device *op)
mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR |
MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 |
MEM_FLAG_DDR3 | MEM_FLAG_RDDR3 |
- MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
+ MEM_FLAG_DDR4 | MEM_FLAG_RDDR4 |
+ MEM_FLAG_LPDDR4;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
@@ -558,11 +598,11 @@ int fsl_mc_err_probe(struct platform_device *op)
fsl_ddr_init_csrows(mci);
/* store the original error disable bits */
- orig_ddr_err_disable = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DISABLE);
- ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE, 0);
+ pdata->orig_ddr_err_disable = ddr_in32(pdata, FSL_MC_ERR_DISABLE);
+ ddr_out32(pdata, FSL_MC_ERR_DISABLE, 0);
/* clear all error bits */
- ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, ~0);
+ ddr_out32(pdata, FSL_MC_ERR_DETECT, ~0);
res = edac_mc_add_mc_with_groups(mci, fsl_ddr_dev_groups);
if (res) {
@@ -571,15 +611,15 @@ int fsl_mc_err_probe(struct platform_device *op)
}
if (edac_op_state == EDAC_OPSTATE_INT) {
- ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN,
+ ddr_out32(pdata, FSL_MC_ERR_INT_EN,
DDR_EIE_MBEE | DDR_EIE_SBEE);
/* store the original error management threshold */
- orig_ddr_err_sbe = ddr_in32(pdata->mc_vbase +
- FSL_MC_ERR_SBE) & 0xff0000;
+ pdata->orig_ddr_err_sbe = ddr_in32(pdata,
+ FSL_MC_ERR_SBE) & 0xff0000;
/* set threshold to 1 error per interrupt */
- ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, 0x10000);
+ ddr_out32(pdata, FSL_MC_ERR_SBE, 0x10000);
/* register interrupts */
pdata->irq = platform_get_irq(op, 0);
@@ -620,12 +660,13 @@ void fsl_mc_err_remove(struct platform_device *op)
edac_dbg(0, "\n");
if (edac_op_state == EDAC_OPSTATE_INT) {
- ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN, 0);
+ ddr_out32(pdata, FSL_MC_ERR_INT_EN, 0);
}
- ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE,
- orig_ddr_err_disable);
- ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, orig_ddr_err_sbe);
+ ddr_out32(pdata, FSL_MC_ERR_DISABLE,
+ pdata->orig_ddr_err_disable);
+ ddr_out32(pdata, FSL_MC_ERR_SBE, pdata->orig_ddr_err_sbe);
+
edac_mc_del_mc(&op->dev);
edac_mc_free(mci);
diff --git a/drivers/edac/fsl_ddr_edac.h b/drivers/edac/fsl_ddr_edac.h
index c0994a2a003c..73618f79e587 100644
--- a/drivers/edac/fsl_ddr_edac.h
+++ b/drivers/edac/fsl_ddr_edac.h
@@ -39,6 +39,9 @@
#define FSL_MC_CAPTURE_EXT_ADDRESS 0x0e54
#define FSL_MC_ERR_SBE 0x0e58
+#define IMX9_MC_ERR_EN 0x1000
+#define IMX9_MC_DATA_ERR_INJECT_OFF 0x100
+
#define DSC_MEM_EN 0x80000000
#define DSC_ECC_EN 0x20000000
#define DSC_RD_EN 0x10000000
@@ -46,6 +49,9 @@
#define DSC_DBW_32 0x00080000
#define DSC_DBW_64 0x00000000
+#define ERR_ECC_EN 0x80000000
+#define ERR_INLINE_ECC 0x40000000
+
#define DSC_SDTYPE_MASK 0x07000000
#define DSC_X32_EN 0x00000020
@@ -65,11 +71,18 @@
#define DDR_EDI_SBED 0x4 /* single-bit ECC error disable */
#define DDR_EDI_MBED 0x8 /* multi-bit ECC error disable */
+#define TYPE_IMX9 0x1 /* MC used by iMX9 having registers changed */
+
struct fsl_mc_pdata {
char *name;
int edac_idx;
void __iomem *mc_vbase;
+ void __iomem *inject_vbase;
int irq;
+ u32 orig_ddr_err_disable;
+ u32 orig_ddr_err_sbe;
+ bool little_endian;
+ unsigned long flag;
};
int fsl_mc_err_probe(struct platform_device *op);
void fsl_mc_err_remove(struct platform_device *op);
diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c
index 282ca6535f8f..24f163ff323f 100644
--- a/drivers/edac/highbank_l2_edac.c
+++ b/drivers/edac/highbank_l2_edac.c
@@ -128,7 +128,7 @@ static void highbank_l2_err_remove(struct platform_device *pdev)
static struct platform_driver highbank_l2_edac_driver = {
.probe = highbank_l2_err_probe,
- .remove_new = highbank_l2_err_remove,
+ .remove = highbank_l2_err_remove,
.driver = {
.name = "hb_l2_edac",
.of_match_table = hb_l2_err_of_match,
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
index 1c5b888ab11d..a8879d72d064 100644
--- a/drivers/edac/highbank_mc_edac.c
+++ b/drivers/edac/highbank_mc_edac.c
@@ -261,7 +261,7 @@ static void highbank_mc_remove(struct platform_device *pdev)
static struct platform_driver highbank_mc_edac_driver = {
.probe = highbank_mc_probe,
- .remove_new = highbank_mc_remove,
+ .remove = highbank_mc_remove,
.driver = {
.name = "hb_mc_edac",
.of_match_table = hb_ddr_ctrl_of_match,
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index e2a954de913b..f45d849d3f15 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -938,16 +938,18 @@ static struct res_config gnr_cfg = {
};
static const struct x86_cpu_id i10nm_cpuids[] = {
- X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
- X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
- X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
- X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
- X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
- X86_MATCH_VFM_STEPPINGS(INTEL_SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
- X86_MATCH_VFM_STEPPINGS(INTEL_EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
- X86_MATCH_VFM_STEPPINGS(INTEL_GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
- X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
- X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
+ X86_MATCH_VFM_STEPS(INTEL_ATOM_TREMONT_D, X86_STEP_MIN, 0x3, &i10nm_cfg0),
+ X86_MATCH_VFM_STEPS(INTEL_ATOM_TREMONT_D, 0x4, X86_STEP_MAX, &i10nm_cfg1),
+ X86_MATCH_VFM_STEPS(INTEL_ICELAKE_X, X86_STEP_MIN, 0x3, &i10nm_cfg0),
+ X86_MATCH_VFM_STEPS(INTEL_ICELAKE_X, 0x4, X86_STEP_MAX, &i10nm_cfg1),
+ X86_MATCH_VFM( INTEL_ICELAKE_D, &i10nm_cfg1),
+
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_cfg),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_cfg),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_cfg),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_cfg),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_cfg),
+ X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &gnr_cfg),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
@@ -1010,7 +1012,7 @@ static struct notifier_block i10nm_mce_dec = {
static int __init i10nm_init(void)
{
- u8 mc = 0, src_id = 0, node_id = 0;
+ u8 mc = 0, src_id = 0;
const struct x86_cpu_id *id;
struct res_config *cfg;
const char *owner;
@@ -1036,6 +1038,7 @@ static int __init i10nm_init(void)
return -ENODEV;
cfg = (struct res_config *)id->driver_data;
+ skx_set_res_cfg(cfg);
res_cfg = cfg;
rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm);
@@ -1069,19 +1072,14 @@ static int __init i10nm_init(void)
if (rc < 0)
goto fail;
- rc = skx_get_node_id(d, &node_id);
- if (rc < 0)
- goto fail;
-
- edac_dbg(2, "src_id = %d node_id = %d\n", src_id, node_id);
+ edac_dbg(2, "src_id = %d\n", src_id);
for (i = 0; i < imc_num; i++) {
if (!d->imc[i].mdev)
continue;
d->imc[i].mc = mc++;
d->imc[i].lmc = i;
- d->imc[i].src_id = src_id;
- d->imc[i].node_id = node_id;
+ d->imc[i].src_id = src_id;
if (d->imc[i].hbm_mc) {
d->imc[i].chan_mmio_sz = cfg->hbm_chan_mmio_sz;
d->imc[i].num_channels = cfg->hbm_chan_num;
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 4b5a71f8739d..4a1bebc1ff14 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -338,11 +338,11 @@ struct i5000_pvt {
u16 mir0, mir1, mir2;
- u16 b0_mtr[NUM_MTRS]; /* Memory Technlogy Reg */
+ u16 b0_mtr[NUM_MTRS]; /* Memory Technology Reg */
u16 b0_ambpresent0; /* Branch 0, Channel 0 */
- u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
+ u16 b0_ambpresent1; /* Branch 0, Channel 1 */
- u16 b1_mtr[NUM_MTRS]; /* Memory Technlogy Reg */
+ u16 b1_mtr[NUM_MTRS]; /* Memory Technology Reg */
u16 b1_ambpresent0; /* Branch 1, Channel 8 */
u16 b1_ambpresent1; /* Branch 1, Channel 1 */
@@ -1210,7 +1210,7 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
&pvt->b0_ambpresent1);
edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
- /* Only if we have 2 branchs (4 channels) */
+ /* Only if we have 2 branches (4 channels) */
if (pvt->maxch < CHANNELS_PER_BRANCH) {
pvt->b1_ambpresent0 = 0;
pvt->b1_ambpresent1 = 0;
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 9ef13570f2e5..4fc16922dc1a 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -19,7 +19,8 @@
* 0c04: Xeon E3-1200 v3/4th Gen Core Processor DRAM Controller
* 0c08: Xeon E3-1200 v3 Processor DRAM Controller
* 1918: Xeon E3-1200 v5 Skylake Host Bridge/DRAM Registers
- * 5918: Xeon E3-1200 Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers
+ * 590f: Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers
+ * 5918: Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers
* 190f: 6th Gen Core Dual-Core Processor Host Bridge/DRAM Registers
* 191f: 6th Gen Core Quad-Core Processor Host Bridge/DRAM Registers
* 3e..: 8th/9th Gen Core Processor Host Bridge/DRAM Registers
@@ -67,7 +68,8 @@
#define PCI_DEVICE_ID_INTEL_IE31200_HB_8 0x190F
#define PCI_DEVICE_ID_INTEL_IE31200_HB_9 0x1918
#define PCI_DEVICE_ID_INTEL_IE31200_HB_10 0x191F
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_11 0x5918
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_11 0x590f
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_12 0x5918
/* Coffee Lake-S */
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK 0x3e00
@@ -88,6 +90,7 @@
((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_9) || \
((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_10) || \
((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_11) || \
+ ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_12) || \
(((did) & PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK) == \
PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK))
@@ -587,6 +590,7 @@ static const struct pci_device_id ie31200_pci_tbl[] = {
{ PCI_VEND_DEV(INTEL, IE31200_HB_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_10), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_11), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_12), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index 189a2fc29e74..fdf3a84fe698 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -263,6 +263,11 @@ static struct work_struct ecclog_work;
#define DID_ARL_UH_SKU2 0x7d20
#define DID_ARL_UH_SKU3 0x7d30
+/* Compute die IDs for Panther Lake-H with IBECC */
+#define DID_PTL_H_SKU1 0xb000
+#define DID_PTL_H_SKU2 0xb001
+#define DID_PTL_H_SKU3 0xb002
+
static int get_mchbar(struct pci_dev *pdev, u64 *mchbar)
{
union {
@@ -605,6 +610,9 @@ static const struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_ARL_UH_SKU1), (kernel_ulong_t)&mtl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_ARL_UH_SKU2), (kernel_ulong_t)&mtl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_ARL_UH_SKU3), (kernel_ulong_t)&mtl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_PTL_H_SKU1), (kernel_ulong_t)&mtl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_PTL_H_SKU2), (kernel_ulong_t)&mtl_p_cfg },
+ { PCI_VDEVICE(INTEL, DID_PTL_H_SKU3), (kernel_ulong_t)&mtl_p_cfg },
{ },
};
MODULE_DEVICE_TABLE(pci, igen6_pci_tbl);
@@ -1170,6 +1178,20 @@ fail:
return -ENODEV;
}
+static void igen6_check(struct mem_ctl_info *mci)
+{
+ struct igen6_imc *imc = mci->pvt_info;
+ u64 ecclog;
+
+ /* errsts_clear() isn't NMI-safe. Delay it in the IRQ context */
+ ecclog = ecclog_read_and_clear(imc);
+ if (!ecclog)
+ return;
+
+ if (!ecclog_gen_pool_add(imc->mc, ecclog))
+ irq_work_queue(&ecclog_irq_work);
+}
+
static int igen6_register_mci(int mc, u64 mchbar, struct pci_dev *pdev)
{
struct edac_mc_layer layers[2];
@@ -1211,6 +1233,8 @@ static int igen6_register_mci(int mc, u64 mchbar, struct pci_dev *pdev)
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->dev_name = pci_name(pdev);
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ mci->edac_check = igen6_check;
mci->pvt_info = &igen6_pvt->imc[mc];
imc = mci->pvt_info;
@@ -1245,6 +1269,7 @@ static int igen6_register_mci(int mc, u64 mchbar, struct pci_dev *pdev)
imc->mci = mci;
return 0;
fail3:
+ mci->pvt_info = NULL;
kfree(mci->ctl_name);
fail2:
edac_mc_free(mci);
@@ -1269,6 +1294,7 @@ static void igen6_unregister_mcis(void)
edac_mc_del_mc(mci->pdev);
kfree(mci->ctl_name);
+ mci->pvt_info = NULL;
edac_mc_free(mci);
iounmap(imc->window);
}
@@ -1348,6 +1374,25 @@ static void unregister_err_handler(void)
unregister_nmi_handler(NMI_SERR, IGEN6_NMI_NAME);
}
+static void opstate_set(struct res_config *cfg, const struct pci_device_id *ent)
+{
+ /*
+ * Quirk: Certain SoCs' error reporting interrupts don't work.
+ * Force polling mode for them to ensure that memory error
+ * events can be handled.
+ */
+ if (ent->device == DID_ADL_N_SKU4) {
+ edac_op_state = EDAC_OPSTATE_POLL;
+ return;
+ }
+
+ /* Set the mode according to the configuration data. */
+ if (cfg->machine_check)
+ edac_op_state = EDAC_OPSTATE_INT;
+ else
+ edac_op_state = EDAC_OPSTATE_NMI;
+}
+
static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u64 mchbar;
@@ -1365,6 +1410,8 @@ static int igen6_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto fail;
+ opstate_set(res_cfg, ent);
+
for (i = 0; i < res_cfg->num_imc; i++) {
rc = igen6_register_mci(i, mchbar, pdev);
if (rc)
@@ -1448,8 +1495,6 @@ static int __init igen6_init(void)
if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
return -EBUSY;
- edac_op_state = EDAC_OPSTATE_NMI;
-
rc = pci_register_driver(&igen6_driver);
if (rc)
return rc;
diff --git a/drivers/edac/layerscape_edac.c b/drivers/edac/layerscape_edac.c
index 0d42c1238908..a2caa7fc5412 100644
--- a/drivers/edac/layerscape_edac.c
+++ b/drivers/edac/layerscape_edac.c
@@ -21,13 +21,14 @@
static const struct of_device_id fsl_ddr_mc_err_of_match[] = {
{ .compatible = "fsl,qoriq-memory-controller", },
+ { .compatible = "nxp,imx9-memory-controller", .data = (void *)TYPE_IMX9, },
{},
};
MODULE_DEVICE_TABLE(of, fsl_ddr_mc_err_of_match);
static struct platform_driver fsl_ddr_mc_err_driver = {
.probe = fsl_mc_err_probe,
- .remove_new = fsl_mc_err_remove,
+ .remove = fsl_mc_err_remove,
.driver = {
.name = "fsl_ddr_mc_err",
.of_match_table = fsl_ddr_mc_err_of_match,
diff --git a/drivers/edac/loongson_edac.c b/drivers/edac/loongson_edac.c
new file mode 100644
index 000000000000..38745800ed01
--- /dev/null
+++ b/drivers/edac/loongson_edac.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Loongson Technology Corporation Limited.
+ */
+
+#include <linux/acpi.h>
+#include <linux/edac.h>
+#include <linux/init.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "edac_module.h"
+
+#define ECC_CS_COUNT_REG 0x18
+
+struct loongson_edac_pvt {
+ void __iomem *ecc_base;
+
+ /*
+ * The ECC register in this controller records the number of errors
+ * encountered since reset and cannot be zeroed so in order to be able
+ * to report the error count at each check, this records the previous
+ * register state.
+ */
+ int last_ce_count;
+};
+
+static int read_ecc(struct mem_ctl_info *mci)
+{
+ struct loongson_edac_pvt *pvt = mci->pvt_info;
+ u64 ecc;
+ int cs;
+
+ ecc = readq(pvt->ecc_base + ECC_CS_COUNT_REG);
+ /* cs0 -- cs3 */
+ cs = ecc & 0xff;
+ cs += (ecc >> 8) & 0xff;
+ cs += (ecc >> 16) & 0xff;
+ cs += (ecc >> 24) & 0xff;
+
+ return cs;
+}
+
+static void edac_check(struct mem_ctl_info *mci)
+{
+ struct loongson_edac_pvt *pvt = mci->pvt_info;
+ int new, add;
+
+ new = read_ecc(mci);
+ add = new - pvt->last_ce_count;
+ pvt->last_ce_count = new;
+ if (add <= 0)
+ return;
+
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add,
+ 0, 0, 0, 0, 0, -1, "error", "");
+}
+
+static void dimm_config_init(struct mem_ctl_info *mci)
+{
+ struct dimm_info *dimm;
+ u32 size, npages;
+
+ /* size not used */
+ size = -1;
+ npages = MiB_TO_PAGES(size);
+
+ dimm = edac_get_dimm(mci, 0, 0, 0);
+ dimm->nr_pages = npages;
+ snprintf(dimm->label, sizeof(dimm->label),
+ "MC#%uChannel#%u_DIMM#%u", mci->mc_idx, 0, 0);
+ dimm->grain = 8;
+}
+
+static void pvt_init(struct mem_ctl_info *mci, void __iomem *vbase)
+{
+ struct loongson_edac_pvt *pvt = mci->pvt_info;
+
+ pvt->ecc_base = vbase;
+ pvt->last_ce_count = read_ecc(mci);
+}
+
+static int edac_probe(struct platform_device *pdev)
+{
+ struct edac_mc_layer layers[2];
+ struct mem_ctl_info *mci;
+ void __iomem *vbase;
+ int ret;
+
+ vbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vbase))
+ return PTR_ERR(vbase);
+
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(struct loongson_edac_pvt));
+ if (mci == NULL)
+ return -ENOMEM;
+
+ mci->mc_idx = edac_device_alloc_index();
+ mci->mtype_cap = MEM_FLAG_RDDR4;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+ mci->edac_cap = EDAC_FLAG_NONE;
+ mci->mod_name = "loongson_edac.c";
+ mci->ctl_name = "loongson_edac_ctl";
+ mci->dev_name = "loongson_edac_dev";
+ mci->ctl_page_to_phys = NULL;
+ mci->pdev = &pdev->dev;
+ mci->error_desc.grain = 8;
+ mci->edac_check = edac_check;
+
+ pvt_init(mci, vbase);
+ dimm_config_init(mci);
+
+ ret = edac_mc_add_mc(mci);
+ if (ret) {
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
+ edac_mc_free(mci);
+ return ret;
+ }
+ edac_op_state = EDAC_OPSTATE_POLL;
+
+ return 0;
+}
+
+static void edac_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
+
+ if (mci)
+ edac_mc_free(mci);
+}
+
+static const struct acpi_device_id loongson_edac_acpi_match[] = {
+ {"LOON0010", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, loongson_edac_acpi_match);
+
+static struct platform_driver loongson_edac_driver = {
+ .probe = edac_probe,
+ .remove = edac_remove,
+ .driver = {
+ .name = "loongson-mc-edac",
+ .acpi_match_table = loongson_edac_acpi_match,
+ },
+};
+module_platform_driver(loongson_edac_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Zhao Qunqin <zhaoqunqin@loongson.cn>");
+MODULE_DESCRIPTION("EDAC driver for loongson memory controller");
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 8130c3dc64da..50d74d3bf0f5 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -793,7 +793,9 @@ static int
amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
{
struct mce *m = (struct mce *)data;
+ struct mce_hw_err *err = to_mce_hw_err(m);
unsigned int fam = x86_family(m->cpuid);
+ u32 mca_config_lo = 0, dummy;
int ecc;
if (m->kflags & MCE_HANDLED_CEC)
@@ -813,11 +815,9 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
((m->status & MCI_STATUS_PCC) ? "PCC" : "-"));
if (boot_cpu_has(X86_FEATURE_SMCA)) {
- u32 low, high;
- u32 addr = MSR_AMD64_SMCA_MCx_CONFIG(m->bank);
+ rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(m->bank), &mca_config_lo, &dummy);
- if (!rdmsr_safe(addr, &low, &high) &&
- (low & MCI_CONFIG_MCAX))
+ if (mca_config_lo & MCI_CONFIG_MCAX)
pr_cont("|%s", ((m->status & MCI_STATUS_TCC) ? "TCC" : "-"));
pr_cont("|%s", ((m->status & MCI_STATUS_SYNDV) ? "SyndV" : "-"));
@@ -850,8 +850,18 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
if (boot_cpu_has(X86_FEATURE_SMCA)) {
pr_emerg(HW_ERR "IPID: 0x%016llx", m->ipid);
- if (m->status & MCI_STATUS_SYNDV)
- pr_cont(", Syndrome: 0x%016llx", m->synd);
+ if (m->status & MCI_STATUS_SYNDV) {
+ pr_cont(", Syndrome: 0x%016llx\n", m->synd);
+ if (mca_config_lo & MCI_CONFIG_FRUTEXT) {
+ char frutext[17];
+
+ frutext[16] = '\0';
+ memcpy(&frutext[0], &err->vendor.amd.synd1, 8);
+ memcpy(&frutext[8], &err->vendor.amd.synd2, 8);
+
+ pr_emerg(HW_ERR "FRU Text: %s", frutext);
+ }
+ }
pr_cont("\n");
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index d0266cbcbeda..a45dc6b35ede 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -323,7 +323,7 @@ static const struct platform_device_id mpc85xx_pci_err_match[] = {
static struct platform_driver mpc85xx_pci_err_driver = {
.probe = mpc85xx_pci_err_probe,
- .remove_new = mpc85xx_pci_err_remove,
+ .remove = mpc85xx_pci_err_remove,
.id_table = mpc85xx_pci_err_match,
.driver = {
.name = "mpc85xx_pci_err",
@@ -627,7 +627,7 @@ MODULE_DEVICE_TABLE(of, mpc85xx_l2_err_of_match);
static struct platform_driver mpc85xx_l2_err_driver = {
.probe = mpc85xx_l2_err_probe,
- .remove_new = mpc85xx_l2_err_remove,
+ .remove = mpc85xx_l2_err_remove,
.driver = {
.name = "mpc85xx_l2_err",
.of_match_table = mpc85xx_l2_err_of_match,
@@ -656,7 +656,7 @@ MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match);
static struct platform_driver mpc85xx_mc_err_driver = {
.probe = fsl_mc_err_probe,
- .remove_new = fsl_mc_err_remove,
+ .remove = fsl_mc_err_remove,
.driver = {
.name = "mpc85xx_mc_err",
.of_match_table = mpc85xx_mc_err_of_match,
diff --git a/drivers/edac/npcm_edac.c b/drivers/edac/npcm_edac.c
index 2e2133b784e9..e60a99eb8cfb 100644
--- a/drivers/edac/npcm_edac.c
+++ b/drivers/edac/npcm_edac.c
@@ -531,7 +531,7 @@ static struct platform_driver npcm_edac_driver = {
.of_match_table = npcm_edac_of_match,
},
.probe = edac_probe,
- .remove_new = edac_remove,
+ .remove = edac_remove,
};
module_platform_driver(npcm_edac_driver);
diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c
index 2adb9c8093f8..e6b1595a3cb5 100644
--- a/drivers/edac/octeon_edac-l2c.c
+++ b/drivers/edac/octeon_edac-l2c.c
@@ -194,7 +194,7 @@ static void octeon_l2c_remove(struct platform_device *pdev)
static struct platform_driver octeon_l2c_driver = {
.probe = octeon_l2c_probe,
- .remove_new = octeon_l2c_remove,
+ .remove = octeon_l2c_remove,
.driver = {
.name = "octeon_l2c_edac",
}
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index 4112c2ee34b8..f7176b95b4fe 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -312,7 +312,7 @@ static void octeon_lmc_edac_remove(struct platform_device *pdev)
static struct platform_driver octeon_lmc_edac_driver = {
.probe = octeon_lmc_edac_probe,
- .remove_new = octeon_lmc_edac_remove,
+ .remove = octeon_lmc_edac_remove,
.driver = {
.name = "octeon_lmc_edac",
}
diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c
index d9eeb40d2784..aa1219db0b17 100644
--- a/drivers/edac/octeon_edac-pc.c
+++ b/drivers/edac/octeon_edac-pc.c
@@ -130,7 +130,7 @@ static void co_cache_error_remove(struct platform_device *pdev)
static struct platform_driver co_cache_error_driver = {
.probe = co_cache_error_probe,
- .remove_new = co_cache_error_remove,
+ .remove = co_cache_error_remove,
.driver = {
.name = "octeon_pc_edac",
}
diff --git a/drivers/edac/octeon_edac-pci.c b/drivers/edac/octeon_edac-pci.c
index 4d368af2c5f0..c4f3bc33a971 100644
--- a/drivers/edac/octeon_edac-pci.c
+++ b/drivers/edac/octeon_edac-pci.c
@@ -97,7 +97,7 @@ static void octeon_pci_remove(struct platform_device *pdev)
static struct platform_driver octeon_pci_driver = {
.probe = octeon_pci_probe,
- .remove_new = octeon_pci_remove,
+ .remove = octeon_pci_remove,
.driver = {
.name = "octeon_pci_edac",
}
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index d3cd4cc54ace..04c42c83a2ba 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -342,9 +342,11 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
int ecc_irq;
int rc;
- rc = qcom_llcc_core_setup(llcc_driv_data, llcc_driv_data->bcast_regmap);
- if (rc)
- return rc;
+ if (!llcc_driv_data->ecc_irq_configured) {
+ rc = qcom_llcc_core_setup(llcc_driv_data, llcc_driv_data->bcast_regmap);
+ if (rc)
+ return rc;
+ }
/* Allocate edac control info */
edev_ctl = edac_device_alloc_ctl_info(0, "qcom-llcc", 1, "bank",
@@ -405,7 +407,7 @@ MODULE_DEVICE_TABLE(platform, qcom_llcc_edac_id_table);
static struct platform_driver qcom_llcc_edac_driver = {
.probe = qcom_llcc_edac_probe,
- .remove_new = qcom_llcc_edac_remove,
+ .remove = qcom_llcc_edac_remove,
.driver = {
.name = "qcom_llcc_edac",
},
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 14cfd394b469..29897b21fb8e 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -164,7 +164,7 @@ static struct res_config skx_cfg = {
};
static const struct x86_cpu_id skx_cpuids[] = {
- X86_MATCH_VFM_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPINGS(0x0, 0xf), &skx_cfg),
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, &skx_cfg),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
@@ -600,7 +600,7 @@ static int __init skx_init(void)
const struct munit *m;
const char *owner;
int rc = 0, i, off[3] = {0xd0, 0xd4, 0xd8};
- u8 mc = 0, src_id, node_id;
+ u8 mc = 0, src_id;
struct skx_dev *d;
edac_dbg(2, "\n");
@@ -650,15 +650,12 @@ static int __init skx_init(void)
rc = skx_get_src_id(d, 0xf0, &src_id);
if (rc < 0)
goto fail;
- rc = skx_get_node_id(d, &node_id);
- if (rc < 0)
- goto fail;
- edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id);
+
+ edac_dbg(2, "src_id = %d\n", src_id);
for (i = 0; i < SKX_NUM_IMC; i++) {
d->imc[i].mc = mc++;
d->imc[i].lmc = i;
d->imc[i].src_id = src_id;
- d->imc[i].node_id = node_id;
rc = skx_register_mci(&d->imc[i], d->imc[i].chan[0].cdev,
"Skylake Socket", EDAC_MOD_STR,
skx_get_dimm_config, cfg);
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 85713646957b..f7bd930e058f 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -19,6 +19,7 @@
#include <linux/adxl.h>
#include <acpi/nfit.h>
#include <asm/mce.h>
+#include <asm/uv/uv.h>
#include "edac_module.h"
#include "skx_common.h"
@@ -47,6 +48,7 @@ static skx_show_retry_log_f skx_show_retry_rd_err_log;
static u64 skx_tolm, skx_tohm;
static LIST_HEAD(dev_edac_list);
static bool skx_mem_cfg_2lm;
+static struct res_config *skx_res_cfg;
int skx_adxl_get(void)
{
@@ -119,7 +121,7 @@ void skx_adxl_put(void)
}
EXPORT_SYMBOL_GPL(skx_adxl_put);
-static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_mem)
+static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
{
struct skx_dev *d;
int i, len = 0;
@@ -135,8 +137,24 @@ static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_me
return false;
}
+ /*
+ * GNR with a Flat2LM memory configuration may mistakenly classify
+ * a near-memory error(DDR5) as a far-memory error(CXL), resulting
+ * in the incorrect selection of decoded ADXL components.
+ * To address this, prefetch the decoded far-memory controller ID
+ * and adjust the error source to near-memory if the far-memory
+ * controller ID is invalid.
+ */
+ if (skx_res_cfg && skx_res_cfg->type == GNR && err_src == ERR_SRC_2LM_FM) {
+ res->imc = (int)adxl_values[component_indices[INDEX_MEMCTRL]];
+ if (res->imc == -1) {
+ err_src = ERR_SRC_2LM_NM;
+ edac_dbg(0, "Adjust the error source to near-memory.\n");
+ }
+ }
+
res->socket = (int)adxl_values[component_indices[INDEX_SOCKET]];
- if (error_in_1st_level_mem) {
+ if (err_src == ERR_SRC_2LM_NM) {
res->imc = (adxl_nm_bitmap & BIT_NM_MEMCTRL) ?
(int)adxl_values[component_indices[INDEX_NM_MEMCTRL]] : -1;
res->channel = (adxl_nm_bitmap & BIT_NM_CHANNEL) ?
@@ -191,6 +209,12 @@ void skx_set_mem_cfg(bool mem_cfg_2lm)
}
EXPORT_SYMBOL_GPL(skx_set_mem_cfg);
+void skx_set_res_cfg(struct res_config *cfg)
+{
+ skx_res_cfg = cfg;
+}
+EXPORT_SYMBOL_GPL(skx_set_res_cfg);
+
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
{
driver_decode = decode;
@@ -198,33 +222,51 @@ void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
}
EXPORT_SYMBOL_GPL(skx_set_decode);
-int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
+static int skx_get_pkg_id(struct skx_dev *d, u8 *id)
{
- u32 reg;
+ int node;
+ int cpu;
- if (pci_read_config_dword(d->util_all, off, &reg)) {
- skx_printk(KERN_ERR, "Failed to read src id\n");
- return -ENODEV;
+ node = pcibus_to_node(d->util_all->bus);
+ if (numa_valid_node(node)) {
+ for_each_cpu(cpu, cpumask_of_pcibus(d->util_all->bus)) {
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ if (c->initialized && cpu_to_node(cpu) == node) {
+ *id = c->topo.pkg_id;
+ return 0;
+ }
+ }
}
- *id = GET_BITFIELD(reg, 12, 14);
- return 0;
+ skx_printk(KERN_ERR, "Failed to get package ID from NUMA information\n");
+ return -ENODEV;
}
-EXPORT_SYMBOL_GPL(skx_get_src_id);
-int skx_get_node_id(struct skx_dev *d, u8 *id)
+int skx_get_src_id(struct skx_dev *d, int off, u8 *id)
{
u32 reg;
- if (pci_read_config_dword(d->util_all, 0xf4, &reg)) {
- skx_printk(KERN_ERR, "Failed to read node id\n");
+ /*
+ * The 3-bit source IDs in PCI configuration space registers are limited
+ * to 8 unique IDs, and each ID is local to a UPI/QPI domain.
+ *
+ * Source IDs cannot be used to map devices to sockets on UV systems
+ * because they can exceed 8 sockets and have multiple UPI/QPI domains
+ * with identical, repeating source IDs.
+ */
+ if (is_uv_system())
+ return skx_get_pkg_id(d, id);
+
+ if (pci_read_config_dword(d->util_all, off, &reg)) {
+ skx_printk(KERN_ERR, "Failed to read src id\n");
return -ENODEV;
}
- *id = GET_BITFIELD(reg, 0, 2);
+ *id = GET_BITFIELD(reg, 12, 14);
return 0;
}
-EXPORT_SYMBOL_GPL(skx_get_node_id);
+EXPORT_SYMBOL_GPL(skx_get_src_id);
static int get_width(u32 mtr)
{
@@ -484,7 +526,7 @@ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
pvt->imc = imc;
mci->ctl_name = kasprintf(GFP_KERNEL, "%s#%d IMC#%d", ctl_name,
- imc->node_id, imc->lmc);
+ imc->src_id, imc->lmc);
if (!mci->ctl_name) {
rc = -ENOMEM;
goto fail0;
@@ -620,31 +662,27 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
optype, skx_msg);
}
-static bool skx_error_in_1st_level_mem(const struct mce *m)
+static enum error_source skx_error_source(const struct mce *m)
{
- u32 errcode;
+ u32 errcode = GET_BITFIELD(m->status, 0, 15) & MCACOD_MEM_ERR_MASK;
- if (!skx_mem_cfg_2lm)
- return false;
+ if (errcode != MCACOD_MEM_CTL_ERR && errcode != MCACOD_EXT_MEM_ERR)
+ return ERR_SRC_NOT_MEMORY;
- errcode = GET_BITFIELD(m->status, 0, 15) & MCACOD_MEM_ERR_MASK;
-
- return errcode == MCACOD_EXT_MEM_ERR;
-}
-
-static bool skx_error_in_mem(const struct mce *m)
-{
- u32 errcode;
+ if (!skx_mem_cfg_2lm)
+ return ERR_SRC_1LM;
- errcode = GET_BITFIELD(m->status, 0, 15) & MCACOD_MEM_ERR_MASK;
+ if (errcode == MCACOD_EXT_MEM_ERR)
+ return ERR_SRC_2LM_NM;
- return (errcode == MCACOD_MEM_CTL_ERR || errcode == MCACOD_EXT_MEM_ERR);
+ return ERR_SRC_2LM_FM;
}
int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
void *data)
{
struct mce *mce = (struct mce *)data;
+ enum error_source err_src;
struct decoded_addr res;
struct mem_ctl_info *mci;
char *type;
@@ -652,8 +690,10 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
if (mce->kflags & MCE_HANDLED_CEC)
return NOTIFY_DONE;
+ err_src = skx_error_source(mce);
+
/* Ignore unless this is memory related with an address */
- if (!skx_error_in_mem(mce) || !(mce->status & MCI_STATUS_ADDRV))
+ if (err_src == ERR_SRC_NOT_MEMORY || !(mce->status & MCI_STATUS_ADDRV))
return NOTIFY_DONE;
memset(&res, 0, sizeof(res));
@@ -667,7 +707,7 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
/* Try driver decoder first */
if (!(driver_decode && driver_decode(&res))) {
/* Then try firmware decoder (ACPI DSM methods) */
- if (!(adxl_component_count && skx_adxl_decode(&res, skx_error_in_1st_level_mem(mce))))
+ if (!(adxl_component_count && skx_adxl_decode(&res, err_src)))
return NOTIFY_DONE;
}
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index f945c1bf5ca4..b0845bdd4516 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -103,7 +103,7 @@ struct skx_dev {
bool hbm_mc;
u8 mc; /* system wide mc# */
u8 lmc; /* socket relative mc# */
- u8 src_id, node_id;
+ u8 src_id;
struct skx_channel {
struct pci_dev *cdev;
struct pci_dev *edev;
@@ -146,6 +146,13 @@ enum {
INDEX_MAX
};
+enum error_source {
+ ERR_SRC_1LM,
+ ERR_SRC_2LM_NM,
+ ERR_SRC_2LM_FM,
+ ERR_SRC_NOT_MEMORY,
+};
+
#define BIT_NM_MEMCTRL BIT_ULL(INDEX_NM_MEMCTRL)
#define BIT_NM_CHANNEL BIT_ULL(INDEX_NM_CHANNEL)
#define BIT_NM_DIMM BIT_ULL(INDEX_NM_DIMM)
@@ -234,9 +241,9 @@ int skx_adxl_get(void);
void skx_adxl_put(void);
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
void skx_set_mem_cfg(bool mem_cfg_2lm);
+void skx_set_res_cfg(struct res_config *cfg);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
-int skx_get_node_id(struct skx_dev *d, u8 *id);
int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list);
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index d7416166fd8a..5ed32a3299c4 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -1488,7 +1488,7 @@ static struct platform_driver synps_edac_mc_driver = {
.of_match_table = synps_edac_match,
},
.probe = mc_probe,
- .remove_new = mc_remove,
+ .remove = mc_remove,
};
module_platform_driver(synps_edac_mc_driver);
diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
index 29723c9592f7..39cc2ef9cac4 100644
--- a/drivers/edac/ti_edac.c
+++ b/drivers/edac/ti_edac.c
@@ -322,7 +322,7 @@ static void ti_edac_remove(struct platform_device *pdev)
static struct platform_driver ti_edac_driver = {
.probe = ti_edac_probe,
- .remove_new = ti_edac_remove,
+ .remove = ti_edac_remove,
.driver = {
.name = EDAC_MOD_NAME,
.of_match_table = ti_edac_of_match,
diff --git a/drivers/edac/versal_edac.c b/drivers/edac/versal_edac.c
index a556d23e8261..5a43b5d43ca2 100644
--- a/drivers/edac/versal_edac.c
+++ b/drivers/edac/versal_edac.c
@@ -1186,7 +1186,7 @@ static struct platform_driver xilinx_ddr_edac_mc_driver = {
.of_match_table = xlnx_edac_match,
},
.probe = mc_probe,
- .remove_new = mc_remove,
+ .remove = mc_remove,
};
module_platform_driver(xilinx_ddr_edac_mc_driver);
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index fd87f1b2c145..699c7d29d80c 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -1989,7 +1989,7 @@ MODULE_DEVICE_TABLE(of, xgene_edac_of_match);
static struct platform_driver xgene_edac_driver = {
.probe = xgene_edac_probe,
- .remove_new = xgene_edac_remove,
+ .remove = xgene_edac_remove,
.driver = {
.name = "xgene-edac",
.of_match_table = xgene_edac_of_match,
diff --git a/drivers/edac/zynqmp_edac.c b/drivers/edac/zynqmp_edac.c
index c9dc78d8c824..cdffc9e4194d 100644
--- a/drivers/edac/zynqmp_edac.c
+++ b/drivers/edac/zynqmp_edac.c
@@ -455,7 +455,7 @@ static struct platform_driver zynqmp_ocm_edac_driver = {
.of_match_table = zynqmp_ocm_edac_match,
},
.probe = edac_probe,
- .remove_new = edac_remove,
+ .remove = edac_remove,
};
module_platform_driver(zynqmp_ocm_edac_driver);
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 125016da7fde..46c40d85c2ac 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -196,7 +196,7 @@ static SIMPLE_DEV_PM_OPS(adc_jack_pm_ops,
static struct platform_driver adc_jack_driver = {
.probe = adc_jack_probe,
- .remove_new = adc_jack_remove,
+ .remove = adc_jack_remove,
.driver = {
.name = "adc-jack",
.pm = &adc_jack_pm_ops,
diff --git a/drivers/extcon/extcon-fsa9480.c b/drivers/extcon/extcon-fsa9480.c
index e458ce0c45ab..b11b43171063 100644
--- a/drivers/extcon/extcon-fsa9480.c
+++ b/drivers/extcon/extcon-fsa9480.c
@@ -350,7 +350,7 @@ static const struct dev_pm_ops fsa9480_pm_ops = {
};
static const struct i2c_device_id fsa9480_id[] = {
- { "fsa9480", 0 },
+ { "fsa9480" },
{}
};
MODULE_DEVICE_TABLE(i2c, fsa9480_id);
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index 93552dc3c895..8131a3d7d562 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -627,7 +627,7 @@ MODULE_DEVICE_TABLE(platform, cht_wc_extcon_table);
static struct platform_driver cht_wc_extcon_driver = {
.probe = cht_wc_extcon_probe,
- .remove_new = cht_wc_extcon_remove,
+ .remove = cht_wc_extcon_remove,
.id_table = cht_wc_extcon_table,
.driver = {
.name = "cht_wcove_pwrsrc",
diff --git a/drivers/extcon/extcon-intel-mrfld.c b/drivers/extcon/extcon-intel-mrfld.c
index a1f737f13d49..9219f4328d70 100644
--- a/drivers/extcon/extcon-intel-mrfld.c
+++ b/drivers/extcon/extcon-intel-mrfld.c
@@ -275,7 +275,7 @@ static struct platform_driver mrfld_extcon_driver = {
.name = "mrfld_bcove_pwrsrc",
},
.probe = mrfld_extcon_probe,
- .remove_new = mrfld_extcon_remove,
+ .remove = mrfld_extcon_remove,
.id_table = mrfld_extcon_id_table,
};
module_platform_driver(mrfld_extcon_driver);
diff --git a/drivers/extcon/extcon-max3355.c b/drivers/extcon/extcon-max3355.c
index e62ce7a8d131..b2ee4ff8b04d 100644
--- a/drivers/extcon/extcon-max3355.c
+++ b/drivers/extcon/extcon-max3355.c
@@ -127,7 +127,7 @@ MODULE_DEVICE_TABLE(of, max3355_match_table);
static struct platform_driver max3355_driver = {
.probe = max3355_probe,
- .remove_new = max3355_remove,
+ .remove = max3355_remove,
.driver = {
.name = "extcon-max3355",
.of_match_table = max3355_match_table,
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index 9849e3b8327e..2ae9f7f1a67f 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -956,7 +956,7 @@ static struct platform_driver max77843_muic_driver = {
.of_match_table = of_max77843_muic_dt_match,
},
.probe = max77843_muic_probe,
- .remove_new = max77843_muic_remove,
+ .remove = max77843_muic_remove,
.id_table = max77843_muic_id,
};
diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c
index 4616da7e5430..78ad86c4a3be 100644
--- a/drivers/extcon/extcon-ptn5150.c
+++ b/drivers/extcon/extcon-ptn5150.c
@@ -338,7 +338,7 @@ static const struct of_device_id ptn5150_dt_match[] = {
MODULE_DEVICE_TABLE(of, ptn5150_dt_match);
static const struct i2c_device_id ptn5150_i2c_id[] = {
- { "ptn5150", 0 },
+ { "ptn5150" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ptn5150_i2c_id);
diff --git a/drivers/extcon/extcon-rtk-type-c.c b/drivers/extcon/extcon-rtk-type-c.c
index 19a01e663733..82b60b927e41 100644
--- a/drivers/extcon/extcon-rtk-type-c.c
+++ b/drivers/extcon/extcon-rtk-type-c.c
@@ -1369,6 +1369,8 @@ static int extcon_rtk_type_c_probe(struct platform_device *pdev)
}
type_c->type_c_cfg = devm_kzalloc(dev, sizeof(*type_c_cfg), GFP_KERNEL);
+ if (!type_c->type_c_cfg)
+ return -ENOMEM;
memcpy(type_c->type_c_cfg, type_c_cfg, sizeof(*type_c_cfg));
@@ -1778,7 +1780,7 @@ static const struct dev_pm_ops extcon_rtk_type_c_pm_ops = {
static struct platform_driver extcon_rtk_type_c_driver = {
.probe = extcon_rtk_type_c_probe,
- .remove_new = extcon_rtk_type_c_remove,
+ .remove = extcon_rtk_type_c_remove,
.driver = {
.name = "extcon-rtk-type_c",
.of_match_table = extcon_rtk_type_c_match,
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index 9b61eb99b7dc..5e8ad21ad206 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -279,7 +279,7 @@ MODULE_DEVICE_TABLE(platform, usb_extcon_platform_ids);
static struct platform_driver usb_extcon_driver = {
.probe = usb_extcon_probe,
- .remove_new = usb_extcon_remove,
+ .remove = usb_extcon_remove,
.driver = {
.name = "extcon-usb-gpio",
.pm = &usb_extcon_pm_ops,
diff --git a/drivers/extcon/extcon-usbc-cros-ec.c b/drivers/extcon/extcon-usbc-cros-ec.c
index 805a47230689..1fb627ea8b50 100644
--- a/drivers/extcon/extcon-usbc-cros-ec.c
+++ b/drivers/extcon/extcon-usbc-cros-ec.c
@@ -529,7 +529,7 @@ static struct platform_driver extcon_cros_ec_driver = {
.of_match_table = of_match_ptr(extcon_cros_ec_of_match),
.pm = DEV_PM_OPS,
},
- .remove_new = extcon_cros_ec_remove,
+ .remove = extcon_cros_ec_remove,
.probe = extcon_cros_ec_probe,
};
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index a99fe35f1f0d..ec3e21ad2025 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -988,7 +988,7 @@ int fw_device_set_broadcast_channel(struct device *dev, void *gen)
return 0;
}
-static int compare_configuration_rom(struct device *dev, void *data)
+static int compare_configuration_rom(struct device *dev, const void *data)
{
const struct fw_device *old = fw_device(dev);
const u32 *config_rom = data;
@@ -1039,7 +1039,7 @@ static void fw_device_init(struct work_struct *work)
//
// serialize config_rom access.
scoped_guard(rwsem_read, &fw_device_rwsem) {
- found = device_find_child(card->device, (void *)device->config_rom,
+ found = device_find_child(card->device, device->config_rom,
compare_configuration_rom);
}
if (found) {
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 6adadb11962e..74a6aa7d8cc9 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -56,7 +56,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
* two cases: either the path goes through this node, in which case
* the hop count is the sum of the two biggest child depths plus 2.
* Or it could be the case that the max hop path is entirely
- * containted in a child tree, in which case the max hop count is just
+ * contained in a child tree, in which case the max hop count is just
* the max hop count of this child.
*/
static void update_hop_count(struct fw_node *node)
@@ -204,7 +204,7 @@ static struct fw_node *build_tree(struct fw_card *card, const u32 *sid, int self
// the node->ports array where the parent node should be. Later,
// when we handle the parent node, we fix up the reference.
++parent_count;
- node->color = i;
+ node->color = port_index;
break;
case PHY_PACKET_SELF_ID_PORT_STATUS_CHILD:
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 0ae2c84ecafe..9b298af1cac0 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -80,7 +80,7 @@ struct fw_card_driver {
/*
* Allow the specified node ID to do direct DMA out and in of
* host memory. The card will disable this for all node when
- * a bus reset happens, so driver need to reenable this after
+ * a bus reset happens, so driver need to re-enable this after
* bus reset. Returns 0 on success, -ENODEV if the card
* doesn't support this, -ESTALE if the generation doesn't
* match.
diff --git a/drivers/firewire/device-attribute-test.c b/drivers/firewire/device-attribute-test.c
index 2f123c6b0a16..97478a96d1c9 100644
--- a/drivers/firewire/device-attribute-test.c
+++ b/drivers/firewire/device-attribute-test.c
@@ -99,6 +99,7 @@ static void device_attr_simple_avc(struct kunit *test)
struct device *unit0_dev = (struct device *)&unit0.device;
static const int unit0_expected_ids[] = {0x00ffffff, 0x00ffffff, 0x0000a02d, 0x00010001};
char *buf = kunit_kzalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
int ids[4] = {0, 0, 0, 0};
// Ensure associations for node and unit devices.
@@ -180,6 +181,7 @@ static void device_attr_legacy_avc(struct kunit *test)
struct device *unit0_dev = (struct device *)&unit0.device;
static const int unit0_expected_ids[] = {0x00012345, 0x00fedcba, 0x00abcdef, 0x00543210};
char *buf = kunit_kzalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
int ids[4] = {0, 0, 0, 0};
// Ensure associations for node and unit devices.
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 7ee55c2804de..edaedd156a6d 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1384,7 +1384,7 @@ struct driver_data {
};
/*
- * This function apppends a packet to the DMA queue for transmission.
+ * This function appends a packet to the DMA queue for transmission.
* Must always be called with the ochi->lock held to ensure proper
* generation handling and locking around packet queue manipulation.
*/
@@ -2213,7 +2213,7 @@ static irqreturn_t irq_handler(int irq, void *data)
if (unlikely(param_debug > 0)) {
dev_notice_ratelimited(ohci->card.device,
- "The debug parameter is superceded by tracepoints events, and deprecated.");
+ "The debug parameter is superseded by tracepoints events, and deprecated.");
}
/*
@@ -2614,7 +2614,7 @@ static int ohci_set_config_rom(struct fw_card *card,
* ConfigRomHeader and BusOptions doesn't honor the
* noByteSwapData bit, so with a be32 config rom, the
* controller will load be32 values in to these registers
- * during the atomic update, even on litte endian
+ * during the atomic update, even on little endian
* architectures. The workaround we use is to put a 0 in the
* header quadlet; 0 is endian agnostic and means that the
* config rom isn't ready yet. In the bus reset tasklet we
@@ -3301,8 +3301,7 @@ static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
}
}
-#ifdef CONFIG_PM
-static void ohci_resume_iso_dma(struct fw_ohci *ohci)
+static void __maybe_unused ohci_resume_iso_dma(struct fw_ohci *ohci)
{
int i;
struct iso_context *ctx;
@@ -3319,7 +3318,6 @@ static void ohci_resume_iso_dma(struct fw_ohci *ohci)
ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
}
}
-#endif
static int queue_iso_transmit(struct iso_context *ctx,
struct fw_iso_packet *packet,
@@ -3726,12 +3724,11 @@ static int pci_probe(struct pci_dev *dev,
return -ENXIO;
}
- err = pcim_iomap_regions(dev, 1 << 0, ohci_driver_name);
- if (err) {
+ ohci->registers = pcim_iomap_region(dev, 0, ohci_driver_name);
+ if (IS_ERR(ohci->registers)) {
ohci_err(ohci, "request and map MMIO resource unavailable\n");
return -ENXIO;
}
- ohci->registers = pcim_iomap_table(dev)[0];
for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
if ((ohci_quirks[i].vendor == dev->vendor) &&
@@ -3889,39 +3886,25 @@ static void pci_remove(struct pci_dev *dev)
dev_notice(&dev->dev, "removing fw-ohci device\n");
}
-#ifdef CONFIG_PM
-static int pci_suspend(struct pci_dev *dev, pm_message_t state)
+static int __maybe_unused pci_suspend(struct device *dev)
{
- struct fw_ohci *ohci = pci_get_drvdata(dev);
- int err;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct fw_ohci *ohci = pci_get_drvdata(pdev);
software_reset(ohci);
- err = pci_save_state(dev);
- if (err) {
- ohci_err(ohci, "pci_save_state failed\n");
- return err;
- }
- err = pci_set_power_state(dev, pci_choose_state(dev, state));
- if (err)
- ohci_err(ohci, "pci_set_power_state failed with %d\n", err);
- pmac_ohci_off(dev);
+ pmac_ohci_off(pdev);
return 0;
}
-static int pci_resume(struct pci_dev *dev)
+
+static int __maybe_unused pci_resume(struct device *dev)
{
- struct fw_ohci *ohci = pci_get_drvdata(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct fw_ohci *ohci = pci_get_drvdata(pdev);
int err;
- pmac_ohci_on(dev);
- pci_set_power_state(dev, PCI_D0);
- pci_restore_state(dev);
- err = pci_enable_device(dev);
- if (err) {
- ohci_err(ohci, "pci_enable_device failed\n");
- return err;
- }
+ pmac_ohci_on(pdev);
/* Some systems don't setup GUID register on resume from ram */
if (!reg_read(ohci, OHCI1394_GUIDLo) &&
@@ -3938,7 +3921,6 @@ static int pci_resume(struct pci_dev *dev)
return 0;
}
-#endif
static const struct pci_device_id pci_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
@@ -3947,15 +3929,14 @@ static const struct pci_device_id pci_table[] = {
MODULE_DEVICE_TABLE(pci, pci_table);
+static SIMPLE_DEV_PM_OPS(pci_pm_ops, pci_suspend, pci_resume);
+
static struct pci_driver fw_ohci_pci_driver = {
.name = ohci_driver_name,
.id_table = pci_table,
.probe = pci_probe,
.remove = pci_remove,
-#ifdef CONFIG_PM
- .resume = pci_resume,
- .suspend = pci_suspend,
-#endif
+ .driver.pm = &pci_pm_ops,
};
static int __init fw_ohci_init(void)
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 827dee0f57dd..1a19828114cf 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1490,7 +1490,7 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
return retval;
}
-static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
+static int sbp2_scsi_sdev_init(struct scsi_device *sdev)
{
struct sbp2_logical_unit *lu = sdev->hostdata;
@@ -1506,8 +1506,8 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int sbp2_scsi_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int sbp2_scsi_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct sbp2_logical_unit *lu = sdev->hostdata;
@@ -1590,8 +1590,8 @@ static const struct scsi_host_template scsi_driver_template = {
.name = "SBP-2 IEEE-1394",
.proc_name = "sbp2",
.queuecommand = sbp2_scsi_queuecommand,
- .slave_alloc = sbp2_scsi_slave_alloc,
- .device_configure = sbp2_scsi_device_configure,
+ .sdev_init = sbp2_scsi_sdev_init,
+ .sdev_configure = sbp2_scsi_sdev_configure,
.eh_abort_handler = sbp2_scsi_abort,
.this_id = -1,
.sg_tablesize = SG_ALL,
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 71d8b26c4103..9f35f69e0f9e 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -106,7 +106,7 @@ config ISCSI_IBFT
select ISCSI_BOOT_SYSFS
select ISCSI_IBFT_FIND if X86
depends on ACPI && SCSI && SCSI_LOWLEVEL
- default n
+ default n
help
This option enables support for detection and exposing of iSCSI
Boot Firmware Table (iBFT) via sysfs to userspace. If you wish to
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index eb17d03b66fe..dfda5ffc14db 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -187,13 +187,18 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev)
return valid;
}
-struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
- const struct ffa_ops *ops)
+struct ffa_device *
+ffa_device_register(const struct ffa_partition_info *part_info,
+ const struct ffa_ops *ops)
{
int id, ret;
+ uuid_t uuid;
struct device *dev;
struct ffa_device *ffa_dev;
+ if (!part_info)
+ return NULL;
+
id = ida_alloc_min(&ffa_bus_id, 1, GFP_KERNEL);
if (id < 0)
return NULL;
@@ -210,9 +215,11 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
ffa_dev->id = id;
- ffa_dev->vm_id = vm_id;
+ ffa_dev->vm_id = part_info->id;
+ ffa_dev->properties = part_info->properties;
ffa_dev->ops = ops;
- uuid_copy(&ffa_dev->uuid, uuid);
+ import_uuid(&uuid, (u8 *)part_info->uuid);
+ uuid_copy(&ffa_dev->uuid, &uuid);
ret = device_register(&ffa_dev->dev);
if (ret) {
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 4d231bc375e0..2c2ec3c35f15 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -481,11 +481,16 @@ static int ffa_msg_send_direct_req2(u16 src_id, u16 dst_id, const uuid_t *uuid,
struct ffa_send_direct_data2 *data)
{
u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
+ union {
+ uuid_t uuid;
+ __le64 regs[2];
+ } uuid_regs = { .uuid = *uuid };
ffa_value_t ret, args = {
- .a0 = FFA_MSG_SEND_DIRECT_REQ2, .a1 = src_dst_ids,
+ .a0 = FFA_MSG_SEND_DIRECT_REQ2,
+ .a1 = src_dst_ids,
+ .a2 = le64_to_cpu(uuid_regs.regs[0]),
+ .a3 = le64_to_cpu(uuid_regs.regs[1]),
};
-
- export_uuid((u8 *)&args.a2, uuid);
memcpy((void *)&args + offsetof(ffa_value_t, a4), data, sizeof(*data));
invoke_ffa_fn(args, &ret);
@@ -496,7 +501,7 @@ static int ffa_msg_send_direct_req2(u16 src_id, u16 dst_id, const uuid_t *uuid,
return ffa_to_linux_errno((int)ret.a2);
if (ret.a0 == FFA_MSG_SEND_DIRECT_RESP2) {
- memcpy(data, &ret.a4, sizeof(*data));
+ memcpy(data, (void *)&ret + offsetof(ffa_value_t, a4), sizeof(*data));
return 0;
}
@@ -1382,7 +1387,6 @@ static struct notifier_block ffa_bus_nb = {
static int ffa_setup_partitions(void)
{
int count, idx, ret;
- uuid_t uuid;
struct ffa_device *ffa_dev;
struct ffa_dev_part_info *info;
struct ffa_partition_info *pbuf, *tpbuf;
@@ -1401,23 +1405,19 @@ static int ffa_setup_partitions(void)
xa_init(&drv_info->partition_info);
for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) {
- import_uuid(&uuid, (u8 *)tpbuf->uuid);
-
/* Note that if the UUID will be uuid_null, that will require
* ffa_bus_notifier() to find the UUID of this partition id
* with help of ffa_device_match_uuid(). FF-A v1.1 and above
* provides UUID here for each partition as part of the
* discovery API and the same is passed.
*/
- ffa_dev = ffa_device_register(&uuid, tpbuf->id, &ffa_drv_ops);
+ ffa_dev = ffa_device_register(tpbuf, &ffa_drv_ops);
if (!ffa_dev) {
pr_err("%s: failed to register partition ID 0x%x\n",
__func__, tpbuf->id);
continue;
}
- ffa_dev->properties = tpbuf->properties;
-
if (drv_info->version > FFA_VERSION_1_0 &&
!(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
ffa_mode_32bit_set(ffa_dev);
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 96b2e5f9a8ef..a3386bf36de5 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -238,10 +238,10 @@ static int scmi_dev_match(struct device *dev, const struct device_driver *drv)
return 0;
}
-static int scmi_match_by_id_table(struct device *dev, void *data)
+static int scmi_match_by_id_table(struct device *dev, const void *data)
{
struct scmi_device *sdev = to_scmi_dev(dev);
- struct scmi_device_id *id_table = data;
+ const struct scmi_device_id *id_table = data;
return sdev->protocol_id == id_table->protocol_id &&
(id_table->name && !strcmp(sdev->name, id_table->name));
@@ -325,7 +325,10 @@ EXPORT_SYMBOL_GPL(scmi_driver_unregister);
static void scmi_device_release(struct device *dev)
{
- kfree(to_scmi_dev(dev));
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ kfree_const(scmi_dev->name);
+ kfree(scmi_dev);
}
static void __scmi_device_destroy(struct scmi_device *scmi_dev)
@@ -338,7 +341,6 @@ static void __scmi_device_destroy(struct scmi_device *scmi_dev)
if (scmi_dev->protocol_id == SCMI_PROTOCOL_SYSTEM)
atomic_set(&scmi_syspower_registered, 0);
- kfree_const(scmi_dev->name);
ida_free(&scmi_bus_id, scmi_dev->id);
device_unregister(&scmi_dev->dev);
}
@@ -410,7 +412,6 @@ __scmi_device_create(struct device_node *np, struct device *parent,
return scmi_dev;
put_dev:
- kfree_const(scmi_dev->name);
put_device(&scmi_dev->dev);
ida_free(&scmi_bus_id, id);
return NULL;
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index c4b8e7ff88aa..10ea7962323e 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -31,6 +31,8 @@
#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
+#define SCMI_SHMEM_MAX_PAYLOAD_SIZE 104
+
enum scmi_error_codes {
SCMI_SUCCESS = 0, /* Success */
SCMI_ERR_SUPPORT = -1, /* Not supported */
@@ -163,7 +165,9 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
* used to initialize this channel
* @dev: Reference to device in the SCMI hierarchy corresponding to this
* channel
+ * @is_p2a: A flag to identify a channel as P2A (RX)
* @rx_timeout_ms: The configured RX timeout in milliseconds.
+ * @max_msg_size: Maximum size of message payload.
* @handle: Pointer to SCMI entity handle
* @no_completion_irq: Flag to indicate that this channel has no completion
* interrupt mechanism for synchronous commands.
@@ -174,7 +178,9 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
struct scmi_chan_info {
int id;
struct device *dev;
+ bool is_p2a;
unsigned int rx_timeout_ms;
+ unsigned int max_msg_size;
struct scmi_handle *handle;
bool no_completion_irq;
void *transport_info;
@@ -222,7 +228,13 @@ struct scmi_transport_ops {
* @max_msg: Maximum number of messages for a channel type (tx or rx) that can
* be pending simultaneously in the system. May be overridden by the
* get_max_msg op.
- * @max_msg_size: Maximum size of data per message that can be handled.
+ * @max_msg_size: Maximum size of data payload per message that can be handled.
+ * @atomic_threshold: Optional system wide DT-configured threshold, expressed
+ * in microseconds, for atomic operations.
+ * Only SCMI synchronous commands reported by the platform
+ * to have an execution latency lesser-equal to the threshold
+ * should be considered for atomic mode operation: such
+ * decision is finally left up to the SCMI drivers.
* @force_polling: Flag to force this whole transport to use SCMI core polling
* mechanism instead of completion interrupts even if available.
* @sync_cmds_completed_on_ret: Flag to indicate that the transport assures
@@ -241,6 +253,7 @@ struct scmi_desc {
int max_rx_timeout_ms;
int max_msg;
int max_msg_size;
+ unsigned int atomic_threshold;
const bool force_polling;
const bool sync_cmds_completed_on_ret;
const bool atomic_enabled;
@@ -309,6 +322,26 @@ enum scmi_bad_msg {
MSG_MBOX_SPURIOUS = -5,
};
+/* Used for compactness and signature validation of the function pointers being
+ * passed.
+ */
+typedef void (*shmem_copy_toio_t)(void __iomem *to, const void *from,
+ size_t count);
+typedef void (*shmem_copy_fromio_t)(void *to, const void __iomem *from,
+ size_t count);
+
+/**
+ * struct scmi_shmem_io_ops - I/O operations to read from/write to
+ * Shared Memory
+ *
+ * @toio: Copy data to the shared memory area
+ * @fromio: Copy data from the shared memory area
+ */
+struct scmi_shmem_io_ops {
+ shmem_copy_fromio_t fromio;
+ shmem_copy_toio_t toio;
+};
+
/* shmem related declarations */
struct scmi_shared_mem;
@@ -329,13 +362,16 @@ struct scmi_shared_mem;
struct scmi_shared_mem_operations {
void (*tx_prepare)(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer,
- struct scmi_chan_info *cinfo);
+ struct scmi_chan_info *cinfo,
+ shmem_copy_toio_t toio);
u32 (*read_header)(struct scmi_shared_mem __iomem *shmem);
void (*fetch_response)(struct scmi_shared_mem __iomem *shmem,
- struct scmi_xfer *xfer);
+ struct scmi_xfer *xfer,
+ shmem_copy_fromio_t fromio);
void (*fetch_notification)(struct scmi_shared_mem __iomem *shmem,
- size_t max_len, struct scmi_xfer *xfer);
+ size_t max_len, struct scmi_xfer *xfer,
+ shmem_copy_fromio_t fromio);
void (*clear_channel)(struct scmi_shared_mem __iomem *shmem);
bool (*poll_done)(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
@@ -343,7 +379,8 @@ struct scmi_shared_mem_operations {
bool (*channel_intr_enabled)(struct scmi_shared_mem __iomem *shmem);
void __iomem *(*setup_iomap)(struct scmi_chan_info *cinfo,
struct device *dev,
- bool tx, struct resource *res);
+ bool tx, struct resource *res,
+ struct scmi_shmem_io_ops **ops);
};
const struct scmi_shared_mem_operations *scmi_shared_mem_operations_get(void);
@@ -405,7 +442,7 @@ struct scmi_transport_core_operations {
*/
struct scmi_transport {
struct device *supplier;
- struct scmi_desc *desc;
+ struct scmi_desc desc;
struct scmi_transport_core_operations **core_ops;
};
@@ -431,7 +468,7 @@ static int __tag##_probe(struct platform_device *pdev) \
device_set_of_node_from_dev(&spdev->dev, dev); \
\
strans.supplier = dev; \
- strans.desc = &(__desc); \
+ memcpy(&strans.desc, &(__desc), sizeof(strans.desc)); \
strans.core_ops = &(__core_ops); \
\
ret = platform_device_add_data(spdev, &strans, sizeof(strans)); \
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 88c5c4ff4bb6..60050da54bf2 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/kernel.h>
+#include <linux/kmod.h>
#include <linux/ktime.h>
#include <linux/hashtable.h>
#include <linux/list.h>
@@ -43,6 +44,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/scmi.h>
+#define SCMI_VENDOR_MODULE_ALIAS_FMT "scmi-protocol-0x%02x-%s"
+
static DEFINE_IDA(scmi_id);
static DEFINE_XARRAY(scmi_protocols);
@@ -149,12 +152,6 @@ struct scmi_debug_info {
* base protocol
* @active_protocols: IDR storing device_nodes for protocols actually defined
* in the DT and confirmed as implemented by fw.
- * @atomic_threshold: Optional system wide DT-configured threshold, expressed
- * in microseconds, for atomic operations.
- * Only SCMI synchronous commands reported by the platform
- * to have an execution latency lesser-equal to the threshold
- * should be considered for atomic mode operation: such
- * decision is finally left up to the SCMI drivers.
* @notify_priv: Pointer to private data structure specific to notifications.
* @node: List head
* @users: Number of users of this instance
@@ -180,7 +177,6 @@ struct scmi_info {
struct mutex protocols_mtx;
u8 *protocols_imp;
struct idr active_protocols;
- unsigned int atomic_threshold;
void *notify_priv;
struct list_head node;
int users;
@@ -283,6 +279,44 @@ scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
}
static const struct scmi_protocol *
+scmi_vendor_protocol_get(int protocol_id, struct scmi_revision_info *version)
+{
+ const struct scmi_protocol *proto;
+
+ proto = scmi_vendor_protocol_lookup(protocol_id, version->vendor_id,
+ version->sub_vendor_id,
+ version->impl_ver);
+ if (!proto) {
+ int ret;
+
+ pr_debug("Looking for '" SCMI_VENDOR_MODULE_ALIAS_FMT "'\n",
+ protocol_id, version->vendor_id);
+
+ /* Note that vendor_id is mandatory for vendor protocols */
+ ret = request_module(SCMI_VENDOR_MODULE_ALIAS_FMT,
+ protocol_id, version->vendor_id);
+ if (ret) {
+ pr_warn("Problem loading module for protocol 0x%x\n",
+ protocol_id);
+ return NULL;
+ }
+
+ /* Lookup again, once modules loaded */
+ proto = scmi_vendor_protocol_lookup(protocol_id,
+ version->vendor_id,
+ version->sub_vendor_id,
+ version->impl_ver);
+ }
+
+ if (proto)
+ pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
+ protocol_id, proto->vendor_id ?: "",
+ proto->sub_vendor_id ?: "", proto->impl_ver);
+
+ return proto;
+}
+
+static const struct scmi_protocol *
scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
{
const struct scmi_protocol *proto = NULL;
@@ -290,10 +324,8 @@ scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
proto = xa_load(&scmi_protocols, protocol_id);
else
- proto = scmi_vendor_protocol_lookup(protocol_id,
- version->vendor_id,
- version->sub_vendor_id,
- version->impl_ver);
+ proto = scmi_vendor_protocol_get(protocol_id, version);
+
if (!proto || !try_module_get(proto->owner)) {
pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
return NULL;
@@ -301,11 +333,6 @@ scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
- if (protocol_id >= SCMI_PROTOCOL_VENDOR_BASE)
- pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
- protocol_id, proto->vendor_id ?: "",
- proto->sub_vendor_id ?: "", proto->impl_ver);
-
return proto;
}
@@ -373,7 +400,9 @@ int scmi_protocol_register(const struct scmi_protocol *proto)
return ret;
}
- pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
+ pr_debug("Registered SCMI Protocol 0x%x - %s %s 0x%08X\n",
+ proto->id, proto->vendor_id, proto->sub_vendor_id,
+ proto->impl_ver);
return 0;
}
@@ -1048,6 +1077,11 @@ static inline void scmi_xfer_command_release(struct scmi_info *info,
static inline void scmi_clear_channel(struct scmi_info *info,
struct scmi_chan_info *cinfo)
{
+ if (!cinfo->is_p2a) {
+ dev_warn(cinfo->dev, "Invalid clear on A2P channel !\n");
+ return;
+ }
+
if (info->desc->ops->clear_channel)
info->desc->ops->clear_channel(cinfo);
}
@@ -2440,7 +2474,7 @@ static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
ret = info->desc->atomic_enabled &&
is_transport_polling_capable(info->desc);
if (ret && atomic_threshold)
- *atomic_threshold = info->atomic_threshold;
+ *atomic_threshold = info->desc->atomic_threshold;
return ret;
}
@@ -2638,7 +2672,9 @@ static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
if (!cinfo)
return -ENOMEM;
+ cinfo->is_p2a = !tx;
cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
+ cinfo->max_msg_size = info->desc->max_msg_size;
/* Create a unique name for this transport device */
snprintf(name, 32, "__scmi_transport_device_%s_%02X",
@@ -2952,7 +2988,7 @@ static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
(char **)&dbg->name);
debugfs_create_u32("atomic_threshold_us", 0400, top_dentry,
- &info->atomic_threshold);
+ (u32 *)&info->desc->atomic_threshold);
debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
@@ -2976,10 +3012,8 @@ static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
dbg->top_dentry = top_dentry;
if (devm_add_action_or_reset(info->dev,
- scmi_debugfs_common_cleanup, dbg)) {
- scmi_debugfs_common_cleanup(dbg);
+ scmi_debugfs_common_cleanup, dbg))
return NULL;
- }
return dbg;
}
@@ -3030,7 +3064,7 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev)
int ret;
trans = dev_get_platdata(dev);
- if (!trans || !trans->desc || !trans->supplier || !trans->core_ops)
+ if (!trans || !trans->supplier || !trans->core_ops)
return NULL;
if (!device_link_add(dev, trans->supplier, DL_FLAG_AUTOREMOVE_CONSUMER)) {
@@ -3044,15 +3078,34 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev)
dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier));
- ret = of_property_read_u32(dev->of_node, "max-rx-timeout-ms",
- &trans->desc->max_rx_timeout_ms);
+ ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms",
+ &trans->desc.max_rx_timeout_ms);
+ if (ret && ret != -EINVAL)
+ dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n");
+
+ ret = of_property_read_u32(dev->of_node, "arm,max-msg-size",
+ &trans->desc.max_msg_size);
if (ret && ret != -EINVAL)
- dev_err(dev, "Malformed max-rx-timeout-ms DT property.\n");
+ dev_err(dev, "Malformed arm,max-msg-size DT property.\n");
- dev_info(dev, "SCMI max-rx-timeout: %dms\n",
- trans->desc->max_rx_timeout_ms);
+ ret = of_property_read_u32(dev->of_node, "arm,max-msg",
+ &trans->desc.max_msg);
+ if (ret && ret != -EINVAL)
+ dev_err(dev, "Malformed arm,max-msg DT property.\n");
+
+ dev_info(dev,
+ "SCMI max-rx-timeout: %dms / max-msg-size: %dbytes / max-msg: %d\n",
+ trans->desc.max_rx_timeout_ms, trans->desc.max_msg_size,
+ trans->desc.max_msg);
- return trans->desc;
+ /* System wide atomic threshold for atomic ops .. if any */
+ if (!of_property_read_u32(dev->of_node, "atomic-threshold-us",
+ &trans->desc.atomic_threshold))
+ dev_info(dev,
+ "SCMI System wide atomic threshold set to %u us\n",
+ trans->desc.atomic_threshold);
+
+ return &trans->desc;
}
static int scmi_probe(struct platform_device *pdev)
@@ -3101,13 +3154,6 @@ static int scmi_probe(struct platform_device *pdev)
handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
handle->devm_protocol_get = scmi_devm_protocol_get;
handle->devm_protocol_put = scmi_devm_protocol_put;
-
- /* System wide atomic threshold for atomic ops .. if any */
- if (!of_property_read_u32(np, "atomic-threshold-us",
- &info->atomic_threshold))
- dev_info(dev,
- "SCMI System wide atomic threshold set to %d us\n",
- info->atomic_threshold);
handle->is_transport_atomic = scmi_is_transport_atomic;
/* Setup all channels described in the DT at first */
@@ -3323,7 +3369,7 @@ static struct platform_driver scmi_driver = {
.dev_groups = versions_groups,
},
.probe = scmi_probe,
- .remove_new = scmi_remove,
+ .remove = scmi_remove,
};
static struct dentry *scmi_debugfs_init(void)
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 2d77b5f40ca7..c7e5a34b254b 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -373,7 +373,7 @@ static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
return 0;
}
-static inline void
+static inline int
process_response_opp(struct device *dev, struct perf_dom_info *dom,
struct scmi_opp *opp, unsigned int loop_idx,
const struct scmi_msg_resp_perf_describe_levels *r)
@@ -386,12 +386,16 @@ process_response_opp(struct device *dev, struct perf_dom_info *dom,
le16_to_cpu(r->opp[loop_idx].transition_latency_us);
ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
- if (ret)
- dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
+ if (ret) {
+ dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
opp->perf, dom->info.name, ret);
+ return ret;
+ }
+
+ return 0;
}
-static inline void
+static inline int
process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
struct scmi_opp *opp, unsigned int loop_idx,
const struct scmi_msg_resp_perf_describe_levels_v4 *r)
@@ -404,9 +408,11 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
le16_to_cpu(r->opp[loop_idx].transition_latency_us);
ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
- if (ret)
- dev_warn(dev, "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
+ if (ret) {
+ dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
opp->perf, dom->info.name, ret);
+ return ret;
+ }
/* Note that PERF v4 reports always five 32-bit words */
opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
@@ -415,13 +421,21 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
GFP_KERNEL);
- if (ret)
+ if (ret) {
dev_warn(dev,
"Failed to add opps_by_idx at %d for %s - ret:%d\n",
opp->level_index, dom->info.name, ret);
+ /* Cleanup by_lvl too */
+ xa_erase(&dom->opps_by_lvl, opp->perf);
+
+ return ret;
+ }
+
hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
}
+
+ return 0;
}
static int
@@ -429,16 +443,22 @@ iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
const void *response,
struct scmi_iterator_state *st, void *priv)
{
+ int ret;
struct scmi_opp *opp;
struct scmi_perf_ipriv *p = priv;
- opp = &p->perf_dom->opp[st->desc_index + st->loop_idx];
+ opp = &p->perf_dom->opp[p->perf_dom->opp_count];
if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
- process_response_opp(ph->dev, p->perf_dom, opp, st->loop_idx,
- response);
+ ret = process_response_opp(ph->dev, p->perf_dom, opp,
+ st->loop_idx, response);
else
- process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx,
- response);
+ ret = process_response_opp_v4(ph->dev, p->perf_dom, opp,
+ st->loop_idx, response);
+
+ /* Skip BAD duplicates received from firmware */
+ if (ret)
+ return ret == -EBUSY ? 0 : ret;
+
p->perf_dom->opp_count++;
dev_dbg(ph->dev, "Level %d Power %d Latency %dus Ifreq %d Index %d\n",
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index 9e89a6a763da..7cc0d616b8de 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -886,10 +886,8 @@ static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp,
static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
{
- u8 id;
struct scmi_raw_mode_info *raw;
struct scmi_dbg_raw_data *rd;
- const char *id_str = filp->f_path.dentry->d_parent->d_name.name;
if (!inode->i_private)
return -ENODEV;
@@ -915,8 +913,8 @@ static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
}
/* Grab channel ID from debugfs entry naming if any */
- if (!kstrtou8(id_str, 16, &id))
- rd->chan_id = id;
+ /* not set - reassing 0 we already had after kzalloc() */
+ rd->chan_id = debugfs_get_aux_num(filp);
rd->raw = raw;
filp->private_data = rd;
@@ -1225,10 +1223,12 @@ void *scmi_raw_mode_init(const struct scmi_handle *handle,
snprintf(cdir, 8, "0x%02X", channels[i]);
chd = debugfs_create_dir(cdir, top_chans);
- debugfs_create_file("message", 0600, chd, raw,
+ debugfs_create_file_aux_num("message", 0600, chd,
+ raw, channels[i],
&scmi_dbg_raw_mode_message_fops);
- debugfs_create_file("message_async", 0600, chd, raw,
+ debugfs_create_file_aux_num("message_async", 0600, chd,
+ raw, channels[i],
&scmi_dbg_raw_mode_message_async_fops);
}
}
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
index 01d8a9398fe8..11c347bff766 100644
--- a/drivers/firmware/arm_scmi/shmem.c
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -16,6 +16,8 @@
#include "common.h"
+#define SCMI_SHMEM_LAYOUT_OVERHEAD 24
+
/*
* SCMI specification requires all parameters, message headers, return
* arguments or any protocol data to be expressed in little endian
@@ -34,9 +36,59 @@ struct scmi_shared_mem {
u8 msg_payload[];
};
+static inline void shmem_memcpy_fromio32(void *to,
+ const void __iomem *from,
+ size_t count)
+{
+ WARN_ON(!IS_ALIGNED((unsigned long)from, 4) ||
+ !IS_ALIGNED((unsigned long)to, 4) ||
+ count % 4);
+
+ __ioread32_copy(to, from, count / 4);
+}
+
+static inline void shmem_memcpy_toio32(void __iomem *to,
+ const void *from,
+ size_t count)
+{
+ WARN_ON(!IS_ALIGNED((unsigned long)to, 4) ||
+ !IS_ALIGNED((unsigned long)from, 4) ||
+ count % 4);
+
+ __iowrite32_copy(to, from, count / 4);
+}
+
+static struct scmi_shmem_io_ops shmem_io_ops32 = {
+ .fromio = shmem_memcpy_fromio32,
+ .toio = shmem_memcpy_toio32,
+};
+
+/* Wrappers are needed for proper memcpy_{from,to}_io expansion by the
+ * pre-processor.
+ */
+static inline void shmem_memcpy_fromio(void *to,
+ const void __iomem *from,
+ size_t count)
+{
+ memcpy_fromio(to, from, count);
+}
+
+static inline void shmem_memcpy_toio(void __iomem *to,
+ const void *from,
+ size_t count)
+{
+ memcpy_toio(to, from, count);
+}
+
+static struct scmi_shmem_io_ops shmem_io_ops_default = {
+ .fromio = shmem_memcpy_fromio,
+ .toio = shmem_memcpy_toio,
+};
+
static void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer,
- struct scmi_chan_info *cinfo)
+ struct scmi_chan_info *cinfo,
+ shmem_copy_toio_t copy_toio)
{
ktime_t stop;
@@ -73,7 +125,7 @@ static void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length);
iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header);
if (xfer->tx.buf)
- memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
+ copy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
}
static u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
@@ -82,7 +134,8 @@ static u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
}
static void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
- struct scmi_xfer *xfer)
+ struct scmi_xfer *xfer,
+ shmem_copy_fromio_t copy_fromio)
{
size_t len = ioread32(&shmem->length);
@@ -91,11 +144,12 @@ static void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0);
/* Take a copy to the rx buffer.. */
- memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
+ copy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
}
static void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
- size_t max_len, struct scmi_xfer *xfer)
+ size_t max_len, struct scmi_xfer *xfer,
+ shmem_copy_fromio_t copy_fromio)
{
size_t len = ioread32(&shmem->length);
@@ -103,7 +157,7 @@ static void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0);
/* Take a copy to the rx buffer.. */
- memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
+ copy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
}
static void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
@@ -139,7 +193,8 @@ static bool shmem_channel_intr_enabled(struct scmi_shared_mem __iomem *shmem)
static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo,
struct device *dev, bool tx,
- struct resource *res)
+ struct resource *res,
+ struct scmi_shmem_io_ops **ops)
{
struct device_node *shmem __free(device_node);
const char *desc = tx ? "Tx" : "Rx";
@@ -148,6 +203,7 @@ static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo,
struct resource lres = {};
resource_size_t size;
void __iomem *addr;
+ u32 reg_io_width;
shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
if (!shmem)
@@ -167,12 +223,27 @@ static void __iomem *shmem_setup_iomap(struct scmi_chan_info *cinfo,
}
size = resource_size(res);
+ if (cinfo->max_msg_size + SCMI_SHMEM_LAYOUT_OVERHEAD > size) {
+ dev_err(dev, "misconfigured SCMI shared memory\n");
+ return IOMEM_ERR_PTR(-ENOSPC);
+ }
+
addr = devm_ioremap(dev, res->start, size);
if (!addr) {
dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc);
return IOMEM_ERR_PTR(-EADDRNOTAVAIL);
}
+ of_property_read_u32(shmem, "reg-io-width", &reg_io_width);
+ switch (reg_io_width) {
+ case 4:
+ *ops = &shmem_io_ops32;
+ break;
+ default:
+ *ops = &shmem_io_ops_default;
+ break;
+ }
+
return addr;
}
diff --git a/drivers/firmware/arm_scmi/transports/Makefile b/drivers/firmware/arm_scmi/transports/Makefile
index 362a406f08e6..3ba3d3bee151 100644
--- a/drivers/firmware/arm_scmi/transports/Makefile
+++ b/drivers/firmware/arm_scmi/transports/Makefile
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
-scmi_transport_mailbox-objs := mailbox.o
-obj-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += scmi_transport_mailbox.o
+# Keep before scmi_transport_mailbox.o to allow precedence
+# while matching the compatible.
scmi_transport_smc-objs := smc.o
obj-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += scmi_transport_smc.o
+scmi_transport_mailbox-objs := mailbox.o
+obj-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += scmi_transport_mailbox.o
scmi_transport_optee-objs := optee.o
obj-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += scmi_transport_optee.o
scmi_transport_virtio-objs := virtio.o
diff --git a/drivers/firmware/arm_scmi/transports/mailbox.c b/drivers/firmware/arm_scmi/transports/mailbox.c
index 1a754dee24f7..bd041c99b92b 100644
--- a/drivers/firmware/arm_scmi/transports/mailbox.c
+++ b/drivers/firmware/arm_scmi/transports/mailbox.c
@@ -25,6 +25,8 @@
* @chan_platform_receiver: Optional Platform Receiver mailbox unidirectional channel
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
+ * @chan_lock: Lock that prevents multiple xfers from being queued
+ * @io_ops: Transport specific I/O operations
*/
struct scmi_mailbox {
struct mbox_client cl;
@@ -33,6 +35,8 @@ struct scmi_mailbox {
struct mbox_chan *chan_platform_receiver;
struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem;
+ struct mutex chan_lock;
+ struct scmi_shmem_io_ops *io_ops;
};
#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
@@ -43,7 +47,8 @@ static void tx_prepare(struct mbox_client *cl, void *m)
{
struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
- core->shmem->tx_prepare(smbox->shmem, m, smbox->cinfo);
+ core->shmem->tx_prepare(smbox->shmem, m, smbox->cinfo,
+ smbox->io_ops->toio);
}
static void rx_callback(struct mbox_client *cl, void *m)
@@ -195,7 +200,8 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (!smbox)
return -ENOMEM;
- smbox->shmem = core->shmem->setup_iomap(cinfo, dev, tx, NULL);
+ smbox->shmem = core->shmem->setup_iomap(cinfo, dev, tx, NULL,
+ &smbox->io_ops);
if (IS_ERR(smbox->shmem))
return PTR_ERR(smbox->shmem);
@@ -238,6 +244,7 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
cinfo->transport_info = smbox;
smbox->cinfo = cinfo;
+ mutex_init(&smbox->chan_lock);
return 0;
}
@@ -267,13 +274,23 @@ static int mailbox_send_message(struct scmi_chan_info *cinfo,
struct scmi_mailbox *smbox = cinfo->transport_info;
int ret;
- ret = mbox_send_message(smbox->chan, xfer);
+ /*
+ * The mailbox layer has its own queue. However the mailbox queue
+ * confuses the per message SCMI timeouts since the clock starts when
+ * the message is submitted into the mailbox queue. So when multiple
+ * messages are queued up the clock starts on all messages instead of
+ * only the one inflight.
+ */
+ mutex_lock(&smbox->chan_lock);
- /* mbox_send_message returns non-negative value on success, so reset */
- if (ret > 0)
- ret = 0;
+ ret = mbox_send_message(smbox->chan, xfer);
+ /* mbox_send_message returns non-negative value on success */
+ if (ret < 0) {
+ mutex_unlock(&smbox->chan_lock);
+ return ret;
+ }
- return ret;
+ return 0;
}
static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret,
@@ -281,13 +298,10 @@ static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret,
{
struct scmi_mailbox *smbox = cinfo->transport_info;
- /*
- * NOTE: we might prefer not to need the mailbox ticker to manage the
- * transfer queueing since the protocol layer queues things by itself.
- * Unfortunately, we have to kick the mailbox framework after we have
- * received our message.
- */
mbox_client_txdone(smbox->chan, ret);
+
+ /* Release channel */
+ mutex_unlock(&smbox->chan_lock);
}
static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
@@ -295,7 +309,7 @@ static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
{
struct scmi_mailbox *smbox = cinfo->transport_info;
- core->shmem->fetch_response(smbox->shmem, xfer);
+ core->shmem->fetch_response(smbox->shmem, xfer, smbox->io_ops->fromio);
}
static void mailbox_fetch_notification(struct scmi_chan_info *cinfo,
@@ -303,7 +317,8 @@ static void mailbox_fetch_notification(struct scmi_chan_info *cinfo,
{
struct scmi_mailbox *smbox = cinfo->transport_info;
- core->shmem->fetch_notification(smbox->shmem, max_len, xfer);
+ core->shmem->fetch_notification(smbox->shmem, max_len, xfer,
+ smbox->io_ops->fromio);
}
static void mailbox_clear_channel(struct scmi_chan_info *cinfo)
@@ -356,13 +371,14 @@ static struct scmi_desc scmi_mailbox_desc = {
.ops = &scmi_mailbox_ops,
.max_rx_timeout_ms = 30, /* We may increase this if required */
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
- .max_msg_size = 128,
+ .max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE,
};
static const struct of_device_id scmi_of_match[] = {
{ .compatible = "arm,scmi" },
{ /* Sentinel */ },
};
+MODULE_DEVICE_TABLE(of, scmi_of_match);
DEFINE_SCMI_TRANSPORT_DRIVER(scmi_mailbox, scmi_mailbox_driver,
scmi_mailbox_desc, scmi_of_match, core);
diff --git a/drivers/firmware/arm_scmi/transports/optee.c b/drivers/firmware/arm_scmi/transports/optee.c
index 56fc63edf51e..3949a877e17d 100644
--- a/drivers/firmware/arm_scmi/transports/optee.c
+++ b/drivers/firmware/arm_scmi/transports/optee.c
@@ -17,8 +17,6 @@
#include "../common.h"
-#define SCMI_OPTEE_MAX_MSG_SIZE 128
-
enum scmi_optee_pta_cmd {
/*
* PTA_SCMI_CMD_CAPABILITIES - Get channel capabilities
@@ -114,6 +112,7 @@ enum scmi_optee_pta_cmd {
* @req.shmem: Virtual base address of the shared memory
* @req.msg: Shared memory protocol handle for SCMI request and
* synchronous response
+ * @io_ops: Transport specific I/O operations
* @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem
* @link: Reference in agent's channel list
*/
@@ -128,6 +127,7 @@ struct scmi_optee_channel {
struct scmi_shared_mem __iomem *shmem;
struct scmi_msg_payld *msg;
} req;
+ struct scmi_shmem_io_ops *io_ops;
struct tee_shm *tee_shm;
struct list_head link;
};
@@ -297,7 +297,7 @@ static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t
param[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
param[2].u.memref.shm = channel->tee_shm;
- param[2].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE;
+ param[2].u.memref.size = SCMI_SHMEM_MAX_PAYLOAD_SIZE;
ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
if (ret < 0 || arg.ret) {
@@ -330,7 +330,7 @@ static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo)
static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel)
{
- const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE;
+ const size_t msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE;
void *shbuf;
channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size);
@@ -350,7 +350,8 @@ static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *ch
static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo,
struct scmi_optee_channel *channel)
{
- channel->req.shmem = core->shmem->setup_iomap(cinfo, dev, true, NULL);
+ channel->req.shmem = core->shmem->setup_iomap(cinfo, dev, true, NULL,
+ &channel->io_ops);
if (IS_ERR(channel->req.shmem))
return PTR_ERR(channel->req.shmem);
@@ -465,7 +466,8 @@ static int scmi_optee_send_message(struct scmi_chan_info *cinfo,
ret = invoke_process_msg_channel(channel,
core->msg->command_size(xfer));
} else {
- core->shmem->tx_prepare(channel->req.shmem, xfer, cinfo);
+ core->shmem->tx_prepare(channel->req.shmem, xfer, cinfo,
+ channel->io_ops->toio);
ret = invoke_process_smt_channel(channel);
}
@@ -484,7 +486,8 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
core->msg->fetch_response(channel->req.msg,
channel->rx_len, xfer);
else
- core->shmem->fetch_response(channel->req.shmem, xfer);
+ core->shmem->fetch_response(channel->req.shmem, xfer,
+ channel->io_ops->fromio);
}
static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
@@ -514,7 +517,7 @@ static struct scmi_desc scmi_optee_desc = {
.ops = &scmi_optee_ops,
.max_rx_timeout_ms = 30,
.max_msg = 20,
- .max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE,
+ .max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE,
.sync_cmds_completed_on_ret = true,
};
diff --git a/drivers/firmware/arm_scmi/transports/smc.c b/drivers/firmware/arm_scmi/transports/smc.c
index f8dd108777f9..21abb571e4f2 100644
--- a/drivers/firmware/arm_scmi/transports/smc.c
+++ b/drivers/firmware/arm_scmi/transports/smc.c
@@ -45,6 +45,7 @@
* @irq: An optional IRQ for completion
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
+ * @io_ops: Transport specific I/O operations
* @shmem_lock: Lock to protect access to Tx/Rx shared memory area.
* Used when NOT operating in atomic mode.
* @inflight: Atomic flag to protect access to Tx/Rx shared memory area.
@@ -60,6 +61,7 @@ struct scmi_smc {
int irq;
struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem;
+ struct scmi_shmem_io_ops *io_ops;
/* Protect access to shmem area */
struct mutex shmem_lock;
#define INFLIGHT_NONE MSG_TOKEN_MAX
@@ -144,7 +146,8 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (!scmi_info)
return -ENOMEM;
- scmi_info->shmem = core->shmem->setup_iomap(cinfo, dev, tx, &res);
+ scmi_info->shmem = core->shmem->setup_iomap(cinfo, dev, tx, &res,
+ &scmi_info->io_ops);
if (IS_ERR(scmi_info->shmem))
return PTR_ERR(scmi_info->shmem);
@@ -229,7 +232,8 @@ static int smc_send_message(struct scmi_chan_info *cinfo,
*/
smc_channel_lock_acquire(scmi_info, xfer);
- core->shmem->tx_prepare(scmi_info->shmem, xfer, cinfo);
+ core->shmem->tx_prepare(scmi_info->shmem, xfer, cinfo,
+ scmi_info->io_ops->toio);
if (scmi_info->cap_id != ULONG_MAX)
arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->cap_id, 0,
@@ -253,7 +257,8 @@ static void smc_fetch_response(struct scmi_chan_info *cinfo,
{
struct scmi_smc *scmi_info = cinfo->transport_info;
- core->shmem->fetch_response(scmi_info->shmem, xfer);
+ core->shmem->fetch_response(scmi_info->shmem, xfer,
+ scmi_info->io_ops->fromio);
}
static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret,
@@ -277,7 +282,7 @@ static struct scmi_desc scmi_smc_desc = {
.ops = &scmi_smc_ops,
.max_rx_timeout_ms = 30,
.max_msg = 20,
- .max_msg_size = 128,
+ .max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE,
/*
* Setting .sync_cmds_atomic_replies to true for SMC assumes that,
* once the SMC instruction has completed successfully, the issued
@@ -296,6 +301,7 @@ static const struct of_device_id scmi_of_match[] = {
{ .compatible = "qcom,scmi-smc" },
{ /* Sentinel */ },
};
+MODULE_DEVICE_TABLE(of, scmi_of_match);
DEFINE_SCMI_TRANSPORT_DRIVER(scmi_smc, scmi_smc_driver, scmi_smc_desc,
scmi_of_match, core);
diff --git a/drivers/firmware/arm_scmi/transports/virtio.c b/drivers/firmware/arm_scmi/transports/virtio.c
index d349766bc0b2..cb934db9b2b4 100644
--- a/drivers/firmware/arm_scmi/transports/virtio.c
+++ b/drivers/firmware/arm_scmi/transports/virtio.c
@@ -32,8 +32,8 @@
#define VIRTIO_MAX_RX_TIMEOUT_MS 60000
#define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
-#define VIRTIO_SCMI_MAX_PDU_SIZE \
- (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD)
+#define VIRTIO_SCMI_MAX_PDU_SIZE(ci) \
+ ((ci)->max_msg_size + SCMI_MSG_MAX_PROT_OVERHEAD)
#define DESCRIPTORS_PER_TX_MSG 2
/**
@@ -90,6 +90,7 @@ enum poll_states {
* @input: SDU used for (delayed) responses and notifications
* @list: List which scmi_vio_msg may be part of
* @rx_len: Input SDU size in bytes, once input has been received
+ * @max_len: Maximumm allowed SDU size in bytes
* @poll_idx: Last used index registered for polling purposes if this message
* transaction reply was configured for polling.
* @poll_status: Polling state for this message.
@@ -102,6 +103,7 @@ struct scmi_vio_msg {
struct scmi_msg_payld *input;
struct list_head list;
unsigned int rx_len;
+ unsigned int max_len;
unsigned int poll_idx;
enum poll_states poll_status;
/* Lock to protect access to poll_status */
@@ -234,7 +236,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
unsigned long flags;
struct device *dev = &vioch->vqueue->vdev->dev;
- sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE);
+ sg_init_one(&sg_in, msg->input, msg->max_len);
spin_lock_irqsave(&vioch->lock, flags);
@@ -439,9 +441,9 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (!msg)
return -ENOMEM;
+ msg->max_len = VIRTIO_SCMI_MAX_PDU_SIZE(cinfo);
if (tx) {
- msg->request = devm_kzalloc(dev,
- VIRTIO_SCMI_MAX_PDU_SIZE,
+ msg->request = devm_kzalloc(dev, msg->max_len,
GFP_KERNEL);
if (!msg->request)
return -ENOMEM;
@@ -449,8 +451,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
refcount_set(&msg->users, 1);
}
- msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE,
- GFP_KERNEL);
+ msg->input = devm_kzalloc(dev, msg->max_len, GFP_KERNEL);
if (!msg->input)
return -ENOMEM;
@@ -920,6 +921,7 @@ static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID },
{ 0 }
};
+MODULE_DEVICE_TABLE(virtio, id_table);
static struct virtio_driver virtio_scmi_driver = {
.driver.name = "scmi-virtio",
diff --git a/drivers/firmware/arm_scmi/vendors/imx/Kconfig b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
index 2883ed24a84d..a01bf5e47301 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/Kconfig
+++ b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
@@ -15,6 +15,7 @@ config IMX_SCMI_BBM_EXT
config IMX_SCMI_MISC_EXT
tristate "i.MX SCMI MISC EXTENSION"
depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ depends on IMX_SCMI_MISC_DRV
default y if ARCH_MXC
help
This enables i.MX System MISC control logic such as gpio expander
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c
index 17799eacf06c..aa176c1a5eef 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-bbm.c
@@ -374,10 +374,11 @@ static const struct scmi_protocol scmi_imx_bbm = {
.ops = &scmi_imx_bbm_proto_ops,
.events = &scmi_imx_bbm_protocol_events,
.supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
- .vendor_id = "NXP",
- .sub_vendor_id = "IMX",
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
};
module_scmi_protocol(scmi_imx_bbm);
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_BBM) "-" SCMI_IMX_VENDOR);
MODULE_DESCRIPTION("i.MX SCMI BBM driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
index a86ab9b35953..83b69fc4fba5 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
@@ -309,10 +309,11 @@ static const struct scmi_protocol scmi_imx_misc = {
.ops = &scmi_imx_misc_proto_ops,
.events = &scmi_imx_misc_protocol_events,
.supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
- .vendor_id = "NXP",
- .sub_vendor_id = "IMX",
+ .vendor_id = SCMI_IMX_VENDOR,
+ .sub_vendor_id = SCMI_IMX_SUBVENDOR,
};
module_scmi_protocol(scmi_imx_misc);
+MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_MISC) "-" SCMI_IMX_VENDOR);
MODULE_DESCRIPTION("i.MX SCMI MISC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 94a6b4e667de..87c323de17b9 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -630,6 +630,9 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
if (ret)
return ERR_PTR(ret);
+ if (!buf.opp_count)
+ return ERR_PTR(-ENOENT);
+
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return ERR_PTR(-ENOMEM);
@@ -1046,7 +1049,7 @@ static struct platform_driver scpi_driver = {
.dev_groups = versions_groups,
},
.probe = scpi_probe,
- .remove_new = scpi_remove,
+ .remove = scpi_remove,
};
module_platform_driver(scpi_driver);
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index 285fe7ad490d..3e8051fe8296 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -763,7 +763,7 @@ static int sdei_device_freeze(struct device *dev)
int err;
/* unregister private events */
- cpuhp_remove_state(sdei_entry_point);
+ cpuhp_remove_state(sdei_hp_state);
err = sdei_unregister_shared();
if (err)
diff --git a/drivers/firmware/cirrus/Kconfig b/drivers/firmware/cirrus/Kconfig
index 3ccbe14e4b0c..0a883091259a 100644
--- a/drivers/firmware/cirrus/Kconfig
+++ b/drivers/firmware/cirrus/Kconfig
@@ -3,3 +3,21 @@
config FW_CS_DSP
tristate
default n
+
+config FW_CS_DSP_KUNIT_TEST_UTILS
+ tristate
+ depends on KUNIT && REGMAP
+ select FW_CS_DSP
+
+config FW_CS_DSP_KUNIT_TEST
+ tristate "KUnit tests for Cirrus Logic cs_dsp" if !KUNIT_ALL_TESTS
+ depends on KUNIT && REGMAP
+ default KUNIT_ALL_TESTS
+ select FW_CS_DSP
+ select FW_CS_DSP_KUNIT_TEST_UTILS
+ help
+ This builds KUnit tests for cs_dsp.
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+ If in doubt, say "N".
diff --git a/drivers/firmware/cirrus/Makefile b/drivers/firmware/cirrus/Makefile
index b91318ca0ff4..b32dfa869491 100644
--- a/drivers/firmware/cirrus/Makefile
+++ b/drivers/firmware/cirrus/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
#
obj-$(CONFIG_FW_CS_DSP) += cs_dsp.o
+
+obj-y += test/
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 419220fa42fd..5365e9a43000 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -378,7 +378,7 @@ const char *cs_dsp_mem_region_name(unsigned int type)
return NULL;
}
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_mem_region_name, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mem_region_name, "FW_CS_DSP");
#ifdef CONFIG_DEBUG_FS
static void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp, const char *s)
@@ -519,7 +519,7 @@ void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root)
dsp->debugfs_root = root;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, "FW_CS_DSP");
/**
* cs_dsp_cleanup_debugfs() - Removes DSP representation from debugfs
@@ -531,17 +531,17 @@ void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
debugfs_remove_recursive(dsp->debugfs_root);
dsp->debugfs_root = ERR_PTR(-ENODEV);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, "FW_CS_DSP");
#else
void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root)
{
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, "FW_CS_DSP");
void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
{
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, "FW_CS_DSP");
static inline void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp,
const char *s)
@@ -749,7 +749,7 @@ int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int
return -ETIMEDOUT;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_acked_control, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_acked_control, "FW_CS_DSP");
static int cs_dsp_coeff_write_ctrl_raw(struct cs_dsp_coeff_ctl *ctl,
unsigned int off, const void *buf, size_t len)
@@ -827,7 +827,7 @@ int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl,
return 1;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_ctrl, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_ctrl, "FW_CS_DSP");
/**
* cs_dsp_coeff_lock_and_write_ctrl() - Writes the given buffer to the given coefficient control
@@ -926,7 +926,7 @@ int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl,
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_read_ctrl, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_read_ctrl, "FW_CS_DSP");
/**
* cs_dsp_coeff_lock_and_read_ctrl() - Reads the given coefficient control into the given buffer
@@ -1679,7 +1679,7 @@ struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, in
return rslt;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_get_ctl, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_get_ctl, "FW_CS_DSP");
static void cs_dsp_ctl_fixup_base(struct cs_dsp *dsp,
const struct cs_dsp_alg_region *alg_region)
@@ -1769,7 +1769,7 @@ struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
return NULL;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_find_alg_region, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_find_alg_region, "FW_CS_DSP");
static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp,
int type, __be32 id,
@@ -2404,7 +2404,7 @@ int cs_dsp_adsp1_init(struct cs_dsp *dsp)
return cs_dsp_common_init(dsp);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_init, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_init, "FW_CS_DSP");
/**
* cs_dsp_adsp1_power_up() - Load and start the named firmware
@@ -2496,7 +2496,7 @@ err_mutex:
mutex_unlock(&dsp->pwr_lock);
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_up, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_up, "FW_CS_DSP");
/**
* cs_dsp_adsp1_power_down() - Halts the DSP
@@ -2528,7 +2528,7 @@ void cs_dsp_adsp1_power_down(struct cs_dsp *dsp)
mutex_unlock(&dsp->pwr_lock);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_down, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_down, "FW_CS_DSP");
static int cs_dsp_adsp2v2_enable_core(struct cs_dsp *dsp)
{
@@ -2680,7 +2680,7 @@ int cs_dsp_set_dspclk(struct cs_dsp *dsp, unsigned int freq)
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_set_dspclk, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_set_dspclk, "FW_CS_DSP");
static void cs_dsp_stop_watchdog(struct cs_dsp *dsp)
{
@@ -2770,7 +2770,7 @@ err_mutex:
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_power_up, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_power_up, "FW_CS_DSP");
/**
* cs_dsp_power_down() - Powers-down the DSP
@@ -2804,7 +2804,7 @@ void cs_dsp_power_down(struct cs_dsp *dsp)
cs_dsp_dbg(dsp, "Shutdown complete\n");
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_power_down, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_power_down, "FW_CS_DSP");
static int cs_dsp_adsp2_start_core(struct cs_dsp *dsp)
{
@@ -2890,7 +2890,7 @@ err:
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_run, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_run, "FW_CS_DSP");
/**
* cs_dsp_stop() - Stops the firmware
@@ -2929,7 +2929,7 @@ void cs_dsp_stop(struct cs_dsp *dsp)
cs_dsp_dbg(dsp, "Execution stopped\n");
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_stop, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_stop, "FW_CS_DSP");
static int cs_dsp_halo_start_core(struct cs_dsp *dsp)
{
@@ -2991,7 +2991,7 @@ int cs_dsp_adsp2_init(struct cs_dsp *dsp)
return cs_dsp_common_init(dsp);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_init, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_init, "FW_CS_DSP");
/**
* cs_dsp_halo_init() - Initialise a cs_dsp structure representing a HALO Core DSP
@@ -3008,7 +3008,7 @@ int cs_dsp_halo_init(struct cs_dsp *dsp)
return cs_dsp_common_init(dsp);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_init, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_init, "FW_CS_DSP");
/**
* cs_dsp_remove() - Clean a cs_dsp before deletion
@@ -3028,7 +3028,7 @@ void cs_dsp_remove(struct cs_dsp *dsp)
cs_dsp_free_ctl_blk(ctl);
}
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_remove, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_remove, "FW_CS_DSP");
/**
* cs_dsp_read_raw_data_block() - Reads a block of data from DSP memory
@@ -3065,7 +3065,7 @@ int cs_dsp_read_raw_data_block(struct cs_dsp *dsp, int mem_type, unsigned int me
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_read_raw_data_block, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_read_raw_data_block, "FW_CS_DSP");
/**
* cs_dsp_read_data_word() - Reads a word from DSP memory
@@ -3089,7 +3089,7 @@ int cs_dsp_read_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_add
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_read_data_word, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_read_data_word, "FW_CS_DSP");
/**
* cs_dsp_write_data_word() - Writes a word to DSP memory
@@ -3115,7 +3115,7 @@ int cs_dsp_write_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_ad
return regmap_raw_write(dsp->regmap, reg, &val, sizeof(val));
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_write_data_word, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_write_data_word, "FW_CS_DSP");
/**
* cs_dsp_remove_padding() - Convert unpacked words to packed bytes
@@ -3139,7 +3139,7 @@ void cs_dsp_remove_padding(u32 *buf, int nwords)
*pack_out++ = (u8)(word >> 16);
}
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_remove_padding, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_remove_padding, "FW_CS_DSP");
/**
* cs_dsp_adsp2_bus_error() - Handle a DSP bus error interrupt
@@ -3209,7 +3209,7 @@ void cs_dsp_adsp2_bus_error(struct cs_dsp *dsp)
error:
mutex_unlock(&dsp->pwr_lock);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_bus_error, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_bus_error, "FW_CS_DSP");
/**
* cs_dsp_halo_bus_error() - Handle a DSP bus error interrupt
@@ -3269,7 +3269,7 @@ void cs_dsp_halo_bus_error(struct cs_dsp *dsp)
exit_unlock:
mutex_unlock(&dsp->pwr_lock);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_bus_error, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_bus_error, "FW_CS_DSP");
/**
* cs_dsp_halo_wdt_expire() - Handle DSP watchdog expiry
@@ -3289,7 +3289,7 @@ void cs_dsp_halo_wdt_expire(struct cs_dsp *dsp)
mutex_unlock(&dsp->pwr_lock);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_wdt_expire, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_wdt_expire, "FW_CS_DSP");
static const struct cs_dsp_ops cs_dsp_adsp1_ops = {
.validate_version = cs_dsp_validate_version,
@@ -3419,7 +3419,7 @@ int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_write, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_write, "FW_CS_DSP");
/**
* cs_dsp_chunk_flush() - Pad remaining data with zero and commit to chunk
@@ -3438,7 +3438,7 @@ int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch)
return cs_dsp_chunk_write(ch, CS_DSP_DATA_WORD_BITS - ch->cachebits, 0);
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_flush, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_flush, "FW_CS_DSP");
/**
* cs_dsp_chunk_read() - Parse data from a DSP memory chunk
@@ -3480,7 +3480,7 @@ int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits)
return result;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_read, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_read, "FW_CS_DSP");
struct cs_dsp_wseq_op {
@@ -3605,7 +3605,7 @@ int cs_dsp_wseq_init(struct cs_dsp *dsp, struct cs_dsp_wseq *wseqs, unsigned int
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_init, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_init, "FW_CS_DSP");
static struct cs_dsp_wseq_op *cs_dsp_wseq_find_op(u32 addr, u8 op_code,
struct list_head *wseq_ops)
@@ -3720,7 +3720,7 @@ op_new_free:
return ret;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_write, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_write, "FW_CS_DSP");
/**
* cs_dsp_wseq_multi_write() - Add or update multiple entries in a write sequence
@@ -3752,7 +3752,7 @@ int cs_dsp_wseq_multi_write(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_multi_write, FW_CS_DSP);
+EXPORT_SYMBOL_NS_GPL(cs_dsp_wseq_multi_write, "FW_CS_DSP");
MODULE_DESCRIPTION("Cirrus Logic DSP Support");
MODULE_AUTHOR("Simon Trimmer <simont@opensource.cirrus.com>");
diff --git a/drivers/firmware/cirrus/test/Makefile b/drivers/firmware/cirrus/test/Makefile
new file mode 100644
index 000000000000..7a24a6079ddc
--- /dev/null
+++ b/drivers/firmware/cirrus/test/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+
+cs_dsp_test_utils-objs := \
+ cs_dsp_mock_mem_maps.o \
+ cs_dsp_mock_bin.o \
+ cs_dsp_mock_regmap.o \
+ cs_dsp_mock_utils.o \
+ cs_dsp_mock_wmfw.o
+
+cs_dsp_test-objs := \
+ cs_dsp_test_bin.o \
+ cs_dsp_test_bin_error.o \
+ cs_dsp_test_callbacks.o \
+ cs_dsp_test_control_parse.o \
+ cs_dsp_test_control_cache.o \
+ cs_dsp_test_control_rw.o \
+ cs_dsp_test_wmfw.o \
+ cs_dsp_test_wmfw_error.o \
+ cs_dsp_tests.o
+
+obj-$(CONFIG_FW_CS_DSP_KUNIT_TEST_UTILS) += cs_dsp_test_utils.o
+obj-$(CONFIG_FW_CS_DSP_KUNIT_TEST) += cs_dsp_test.o
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
new file mode 100644
index 000000000000..49d84f7e59e6
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// bin file builder for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/overflow.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/* Buffer large enough for bin file content */
+#define CS_DSP_MOCK_BIN_BUF_SIZE 32768
+
+KUNIT_DEFINE_ACTION_WRAPPER(vfree_action_wrapper, vfree, void *)
+
+struct cs_dsp_mock_bin_builder {
+ struct cs_dsp_test *test_priv;
+ void *buf;
+ void *write_p;
+ size_t bytes_used;
+};
+
+/**
+ * cs_dsp_mock_bin_get_firmware() - Get struct firmware wrapper for data.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ *
+ * Return: Pointer to a struct firmware wrapper for the data.
+ */
+struct firmware *cs_dsp_mock_bin_get_firmware(struct cs_dsp_mock_bin_builder *builder)
+{
+ struct firmware *fw;
+
+ fw = kunit_kzalloc(builder->test_priv->test, sizeof(*fw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, fw);
+
+ fw->data = builder->buf;
+ fw->size = builder->bytes_used;
+
+ return fw;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_get_firmware, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_add_raw_block() - Add a data block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @alg_id: Algorithm ID.
+ * @alg_ver: Algorithm version.
+ * @type: Type of the block.
+ * @offset: Offset.
+ * @payload_data: Pointer to buffer containing the payload data.
+ * @payload_len_bytes: Length of payload data in bytes.
+ */
+void cs_dsp_mock_bin_add_raw_block(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int type, unsigned int offset,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ struct wmfw_coeff_item *item;
+ size_t bytes_needed = struct_size_t(struct wmfw_coeff_item, data, payload_len_bytes);
+
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_BIN_BUF_SIZE));
+
+ item = builder->write_p;
+
+ item->offset = cpu_to_le16(offset);
+ item->type = cpu_to_le16(type);
+ item->id = cpu_to_le32(alg_id);
+ item->ver = cpu_to_le32(alg_ver << 8);
+ item->len = cpu_to_le32(payload_len_bytes);
+
+ if (payload_len_bytes)
+ memcpy(item->data, payload_data, payload_len_bytes);
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_raw_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static void cs_dsp_mock_bin_add_name_or_info(struct cs_dsp_mock_bin_builder *builder,
+ const char *info, int type)
+{
+ size_t info_len = strlen(info);
+ char *tmp = NULL;
+
+ if (info_len % 4) {
+ /* Create a padded string with length a multiple of 4 */
+ info_len = round_up(info_len, 4);
+ tmp = kunit_kzalloc(builder->test_priv->test, info_len, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, tmp);
+ memcpy(tmp, info, info_len);
+ info = tmp;
+ }
+
+ cs_dsp_mock_bin_add_raw_block(builder, 0, 0, WMFW_INFO_TEXT, 0, info, info_len);
+ kunit_kfree(builder->test_priv->test, tmp);
+}
+
+/**
+ * cs_dsp_mock_bin_add_info() - Add an info block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @info: Pointer to info string to be copied into the file.
+ *
+ * The string will be padded to a length that is a multiple of 4 bytes.
+ */
+void cs_dsp_mock_bin_add_info(struct cs_dsp_mock_bin_builder *builder,
+ const char *info)
+{
+ cs_dsp_mock_bin_add_name_or_info(builder, info, WMFW_INFO_TEXT);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_info, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_add_name() - Add a name block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @name: Pointer to name string to be copied into the file.
+ */
+void cs_dsp_mock_bin_add_name(struct cs_dsp_mock_bin_builder *builder,
+ const char *name)
+{
+ cs_dsp_mock_bin_add_name_or_info(builder, name, WMFW_NAME_TEXT);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_name, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_add_patch() - Add a patch data block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @alg_id: Algorithm ID for the patch.
+ * @alg_ver: Algorithm version for the patch.
+ * @mem_region: Memory region for the patch.
+ * @reg_addr_offset: Offset to start of data in register addresses.
+ * @payload_data: Pointer to buffer containing the payload data.
+ * @payload_len_bytes: Length of payload data in bytes.
+ */
+void cs_dsp_mock_bin_add_patch(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int mem_region, unsigned int reg_addr_offset,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ /* Payload length must be a multiple of 4 */
+ KUNIT_ASSERT_EQ(builder->test_priv->test, payload_len_bytes % 4, 0);
+
+ cs_dsp_mock_bin_add_raw_block(builder, alg_id, alg_ver,
+ mem_region, reg_addr_offset,
+ payload_data, payload_len_bytes);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_patch, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_init() - Initialize a struct cs_dsp_mock_bin_builder.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @format_version: Required bin format version.
+ * @fw_version: Firmware version to put in bin file.
+ *
+ * Return: Pointer to created struct cs_dsp_mock_bin_builder.
+ */
+struct cs_dsp_mock_bin_builder *cs_dsp_mock_bin_init(struct cs_dsp_test *priv,
+ int format_version,
+ unsigned int fw_version)
+{
+ struct cs_dsp_mock_bin_builder *builder;
+ struct wmfw_coeff_hdr *hdr;
+
+ builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
+ builder->test_priv = priv;
+
+ builder->buf = vmalloc(CS_DSP_MOCK_BIN_BUF_SIZE);
+ KUNIT_ASSERT_NOT_NULL(priv->test, builder->buf);
+ kunit_add_action_or_reset(priv->test, vfree_action_wrapper, builder->buf);
+
+ /* Create header */
+ hdr = builder->buf;
+ memcpy(hdr->magic, "WMDR", sizeof(hdr->magic));
+ hdr->len = cpu_to_le32(offsetof(struct wmfw_coeff_hdr, data));
+ hdr->ver = cpu_to_le32(fw_version | (format_version << 24));
+ hdr->core_ver = cpu_to_le32(((u32)priv->dsp->type << 24) | priv->dsp->rev);
+
+ builder->write_p = hdr->data;
+ builder->bytes_used = offsetof(struct wmfw_coeff_hdr, data);
+
+ return builder;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_init, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
new file mode 100644
index 000000000000..161272e47bda
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
@@ -0,0 +1,752 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Mock DSP memory maps for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/math.h>
+
+const struct cs_dsp_region cs_dsp_mock_halo_dsp1_regions[] = {
+ { .type = WMFW_HALO_PM_PACKED, .base = 0x3800000 },
+ { .type = WMFW_HALO_XM_PACKED, .base = 0x2000000 },
+ { .type = WMFW_HALO_YM_PACKED, .base = 0x2C00000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x2800000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x3400000 },
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/* List of sizes in bytes, for each entry above */
+const unsigned int cs_dsp_mock_halo_dsp1_region_sizes[] = {
+ 0x5000, /* PM_PACKED */
+ 0x6000, /* XM_PACKED */
+ 0x47F4, /* YM_PACKED */
+ 0x8000, /* XM_UNPACKED_24 */
+ 0x5FF8, /* YM_UNPACKED_24 */
+
+ 0 /* terminator */
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+const struct cs_dsp_region cs_dsp_mock_adsp2_32bit_dsp1_regions[] = {
+ { .type = WMFW_ADSP2_PM, .base = 0x080000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x0a0000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x0c0000 },
+ { .type = WMFW_ADSP2_ZM, .base = 0x0e0000 },
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/* List of sizes in bytes, for each entry above */
+const unsigned int cs_dsp_mock_adsp2_32bit_dsp1_region_sizes[] = {
+ 0x9000, /* PM */
+ 0xa000, /* ZM */
+ 0x2000, /* XM */
+ 0x2000, /* YM */
+
+ 0 /* terminator */
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+const struct cs_dsp_region cs_dsp_mock_adsp2_16bit_dsp1_regions[] = {
+ { .type = WMFW_ADSP2_PM, .base = 0x100000 },
+ { .type = WMFW_ADSP2_ZM, .base = 0x180000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x190000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x1a8000 },
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/* List of sizes in bytes, for each entry above */
+const unsigned int cs_dsp_mock_adsp2_16bit_dsp1_region_sizes[] = {
+ 0x6000, /* PM */
+ 0x800, /* ZM */
+ 0x800, /* XM */
+ 0x800, /* YM */
+
+ 0 /* terminator */
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+int cs_dsp_mock_count_regions(const unsigned int *region_sizes)
+{
+ int i;
+
+ for (i = 0; region_sizes[i]; ++i)
+ ;
+
+ return i;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_count_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_size_of_region() - Return size of given memory region.
+ *
+ * @dsp: Pointer to struct cs_dsp.
+ * @mem_type: Memory region type.
+ *
+ * Return: Size of region in bytes.
+ */
+unsigned int cs_dsp_mock_size_of_region(const struct cs_dsp *dsp, int mem_type)
+{
+ const unsigned int *sizes;
+ int i;
+
+ if (dsp->mem == cs_dsp_mock_halo_dsp1_regions)
+ sizes = cs_dsp_mock_halo_dsp1_region_sizes;
+ else if (dsp->mem == cs_dsp_mock_adsp2_32bit_dsp1_regions)
+ sizes = cs_dsp_mock_adsp2_32bit_dsp1_region_sizes;
+ else if (dsp->mem == cs_dsp_mock_adsp2_16bit_dsp1_regions)
+ sizes = cs_dsp_mock_adsp2_16bit_dsp1_region_sizes;
+ else
+ return 0;
+
+ for (i = 0; i < dsp->num_mems; ++i) {
+ if (dsp->mem[i].type == mem_type)
+ return sizes[i];
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_size_of_region, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_base_addr_for_mem() - Base register address for memory region.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Base register address of region.
+ */
+unsigned int cs_dsp_mock_base_addr_for_mem(struct cs_dsp_test *priv, int mem_type)
+{
+ int num_mems = priv->dsp->num_mems;
+ const struct cs_dsp_region *region = priv->dsp->mem;
+ int i;
+
+ for (i = 0; i < num_mems; ++i) {
+ if (region[i].type == mem_type)
+ return region[i].base;
+ }
+
+ KUNIT_FAIL(priv->test, "Unexpected region %d\n", mem_type);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_base_addr_for_mem, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_addr_inc_per_unpacked_word() - Unpacked register address increment per DSP word.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ *
+ * Return: Amount by which register address increments to move to the next
+ * DSP word in unpacked XM/YM/ZM.
+ */
+unsigned int cs_dsp_mock_reg_addr_inc_per_unpacked_word(struct cs_dsp_test *priv)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ return 2; /* two 16-bit register indexes per XM/YM/ZM word */
+ case WMFW_HALO:
+ return 4; /* one byte-addressed 32-bit register per XM/YM/ZM word */
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type\n");
+ return -1;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_addr_inc_per_unpacked_word, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_block_length_bytes() - Number of bytes in an access block.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Total number of bytes in a group of registers forming the
+ * smallest bus access size (including any padding bits). For unpacked
+ * memory this is the number of registers containing one DSP word.
+ * For packed memory this is the number of registers in one packed
+ * access block.
+ */
+unsigned int cs_dsp_mock_reg_block_length_bytes(struct cs_dsp_test *priv, int mem_type)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ switch (mem_type) {
+ case WMFW_ADSP2_PM:
+ return 3 * regmap_get_val_bytes(priv->dsp->regmap);
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_ADSP2_ZM:
+ return sizeof(u32);
+ default:
+ break;
+ }
+ break;
+ case WMFW_HALO:
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ return sizeof(u32);
+ case WMFW_HALO_PM_PACKED:
+ return 5 * sizeof(u32);
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ return 3 * sizeof(u32);
+ default:
+ break;
+ }
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type\n");
+ return 0;
+ }
+
+ KUNIT_FAIL(priv->test, "Unexpected mem type\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_bytes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_block_length_registers() - Number of registers in an access block.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Total number of register forming the smallest bus access size.
+ * For unpacked memory this is the number of registers containing one
+ * DSP word. For packed memory this is the number of registers in one
+ * packed access block.
+ */
+unsigned int cs_dsp_mock_reg_block_length_registers(struct cs_dsp_test *priv, int mem_type)
+{
+ return cs_dsp_mock_reg_block_length_bytes(priv, mem_type) /
+ regmap_get_val_bytes(priv->dsp->regmap);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_registers, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_block_length_dsp_words() - Number of dsp_words in an access block.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Total number of DSP words in a group of registers forming the
+ * smallest bus access size.
+ */
+unsigned int cs_dsp_mock_reg_block_length_dsp_words(struct cs_dsp_test *priv, int mem_type)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ switch (mem_type) {
+ case WMFW_ADSP2_PM:
+ return regmap_get_val_bytes(priv->dsp->regmap) / 2;
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_ADSP2_ZM:
+ return 1;
+ default:
+ break;
+ }
+ break;
+ case WMFW_HALO:
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ return 1;
+ case WMFW_HALO_PM_PACKED:
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ return 4;
+ default:
+ break;
+ }
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type\n");
+ return 0;
+ }
+
+ KUNIT_FAIL(priv->test, "Unexpected mem type\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_dsp_words, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_has_zm() - DSP has ZM
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ *
+ * Return: True if DSP has ZM.
+ */
+bool cs_dsp_mock_has_zm(struct cs_dsp_test *priv)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_has_zm, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_packed_to_unpacked_mem_type() - Unpacked region that is
+ * the same memory as a packed region.
+ *
+ * @packed_mem_type: Type of packed memory region.
+ *
+ * Return: unpacked type that is the same memory as packed_mem_type.
+ */
+int cs_dsp_mock_packed_to_unpacked_mem_type(int packed_mem_type)
+{
+ switch (packed_mem_type) {
+ case WMFW_HALO_XM_PACKED:
+ return WMFW_ADSP2_XM;
+ case WMFW_HALO_YM_PACKED:
+ return WMFW_ADSP2_YM;
+ default:
+ return -1;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_packed_to_unpacked_mem_type, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_num_dsp_words_to_num_packed_regs() - Number of DSP words
+ * to number of packed registers.
+ *
+ * @num_dsp_words: Number of DSP words.
+ *
+ * Convert number of DSP words to number of packed registers rounded
+ * down to the nearest register.
+ *
+ * Return: Number of packed registers.
+ */
+unsigned int cs_dsp_mock_num_dsp_words_to_num_packed_regs(unsigned int num_dsp_words)
+{
+ /* There are 3 registers for every 4 packed words */
+ return (num_dsp_words * 3) / 4;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_num_dsp_words_to_num_packed_regs, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct wmfw_halo_id_hdr cs_dsp_mock_halo_xm_hdr = {
+ .fw = {
+ .core_id = cpu_to_be32(WMFW_HALO << 16),
+ .block_rev = cpu_to_be32(3 << 16),
+ .vendor_id = cpu_to_be32(0x2),
+ .id = cpu_to_be32(0xabcdef),
+ .ver = cpu_to_be32(0x090101),
+ },
+
+ /*
+ * Leave enough space for this header and 40 algorithm descriptors.
+ * base and size are counted in DSP words.
+ */
+ .xm_base = cpu_to_be32(((sizeof(struct wmfw_halo_id_hdr) +
+ (40 * sizeof(struct wmfw_halo_alg_hdr)))
+ / 4) * 3),
+ .xm_size = cpu_to_be32(0x20),
+
+ /* Allocate a dummy word of YM */
+ .ym_base = cpu_to_be32(0),
+ .ym_size = cpu_to_be32(1),
+
+ .n_algs = 0,
+};
+
+static const struct wmfw_adsp2_id_hdr cs_dsp_mock_adsp2_xm_hdr = {
+ .fw = {
+ .core_id = cpu_to_be32(WMFW_ADSP2 << 16),
+ .core_rev = cpu_to_be32(2 << 16),
+ .id = cpu_to_be32(0xabcdef),
+ .ver = cpu_to_be32(0x090101),
+ },
+
+ /*
+ * Leave enough space for this header and 40 algorithm descriptors.
+ * base and size are counted in DSP words.
+ */
+ .xm = cpu_to_be32(((sizeof(struct wmfw_adsp2_id_hdr) +
+ (40 * sizeof(struct wmfw_adsp2_alg_hdr)))
+ / 4) * 3),
+
+ .ym = cpu_to_be32(0),
+ .zm = cpu_to_be32(0),
+
+ .n_algs = 0,
+};
+
+/**
+ * cs_dsp_mock_xm_header_get_alg_base_in_words() - Algorithm base offset in DSP words.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @alg_id: Algorithm ID.
+ * @mem_type: Memory region type.
+ *
+ * Lookup an algorithm in the XM header and return the base offset in
+ * DSP words of the algorithm data in the requested memory region.
+ *
+ * Return: Offset in DSP words.
+ */
+unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *priv,
+ unsigned int alg_id,
+ int mem_type)
+{
+ unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ union {
+ struct wmfw_adsp2_alg_hdr adsp2;
+ struct wmfw_halo_alg_hdr halo;
+ } alg;
+ unsigned int alg_hdr_addr;
+ unsigned int val, xm_base = 0, ym_base = 0, zm_base = 0;
+ int ret;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ alg_hdr_addr = xm + (sizeof(struct wmfw_adsp2_id_hdr) / 2);
+ for (;; alg_hdr_addr += sizeof(alg.adsp2) / 2) {
+ ret = regmap_read(priv->dsp->regmap, alg_hdr_addr, &val);
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ KUNIT_ASSERT_NE(priv->test, val, 0xbedead);
+ ret = regmap_raw_read(priv->dsp->regmap, alg_hdr_addr,
+ &alg.adsp2, sizeof(alg.adsp2));
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ if (be32_to_cpu(alg.adsp2.alg.id) == alg_id) {
+ xm_base = be32_to_cpu(alg.adsp2.xm);
+ ym_base = be32_to_cpu(alg.adsp2.ym);
+ zm_base = be32_to_cpu(alg.adsp2.zm);
+ break;
+ }
+ }
+ break;
+ case WMFW_HALO:
+ alg_hdr_addr = xm + sizeof(struct wmfw_halo_id_hdr);
+ for (;; alg_hdr_addr += sizeof(alg.halo)) {
+ ret = regmap_read(priv->dsp->regmap, alg_hdr_addr, &val);
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ KUNIT_ASSERT_NE(priv->test, val, 0xbedead);
+ ret = regmap_raw_read(priv->dsp->regmap, alg_hdr_addr,
+ &alg.halo, sizeof(alg.halo));
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ if (be32_to_cpu(alg.halo.alg.id) == alg_id) {
+ xm_base = be32_to_cpu(alg.halo.xm_base);
+ ym_base = be32_to_cpu(alg.halo.ym_base);
+ break;
+ }
+ }
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type %d\n", priv->dsp->type);
+ return 0;
+ }
+
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_HALO_XM_PACKED:
+ return xm_base;
+ case WMFW_ADSP2_YM:
+ case WMFW_HALO_YM_PACKED:
+ return ym_base;
+ case WMFW_ADSP2_ZM:
+ return zm_base;
+ default:
+ KUNIT_FAIL(priv->test, "Bad mem_type\n");
+ return 0;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_alg_base_in_words, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_xm_header_get_fw_version_from_regmap() - Firmware version.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ *
+ * Return: Firmware version word value.
+ */
+unsigned int cs_dsp_mock_xm_header_get_fw_version_from_regmap(struct cs_dsp_test *priv)
+{
+ unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ union {
+ struct wmfw_id_hdr adsp2;
+ struct wmfw_v3_id_hdr halo;
+ } hdr;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ regmap_raw_read(priv->dsp->regmap, xm, &hdr.adsp2, sizeof(hdr.adsp2));
+ return be32_to_cpu(hdr.adsp2.ver);
+ case WMFW_HALO:
+ regmap_raw_read(priv->dsp->regmap, xm, &hdr.halo, sizeof(hdr.halo));
+ return be32_to_cpu(hdr.halo.ver);
+ default:
+ KUNIT_FAIL(priv->test, NULL);
+ return 0;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_fw_version_from_regmap,
+ "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_xm_header_get_fw_version() - Firmware version.
+ *
+ * @header: Pointer to struct cs_dsp_mock_xm_header.
+ *
+ * Return: Firmware version word value.
+ */
+unsigned int cs_dsp_mock_xm_header_get_fw_version(struct cs_dsp_mock_xm_header *header)
+{
+ const struct wmfw_id_hdr *adsp2_hdr;
+ const struct wmfw_v3_id_hdr *halo_hdr;
+
+ switch (header->test_priv->dsp->type) {
+ case WMFW_ADSP2:
+ adsp2_hdr = header->blob_data;
+ return be32_to_cpu(adsp2_hdr->ver);
+ case WMFW_HALO:
+ halo_hdr = header->blob_data;
+ return be32_to_cpu(halo_hdr->ver);
+ default:
+ KUNIT_FAIL(header->test_priv->test, NULL);
+ return 0;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_fw_version, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_xm_header_drop_from_regmap_cache() - Drop XM header from regmap cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ */
+void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv)
+{
+ unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ unsigned int bytes;
+ __be32 num_algs_be32;
+ unsigned int num_algs;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ /*
+ * Could be one 32-bit register or two 16-bit registers.
+ * A raw read will read the requested number of bytes.
+ */
+ regmap_raw_read(priv->dsp->regmap,
+ xm + (offsetof(struct wmfw_adsp2_id_hdr, n_algs) / 2),
+ &num_algs_be32, sizeof(num_algs_be32));
+ num_algs = be32_to_cpu(num_algs_be32);
+ bytes = sizeof(struct wmfw_adsp2_id_hdr) +
+ (num_algs * sizeof(struct wmfw_adsp2_alg_hdr)) +
+ 4 /* terminator word */;
+
+ regcache_drop_region(priv->dsp->regmap, xm, xm + (bytes / 2) - 1);
+ break;
+ case WMFW_HALO:
+ regmap_read(priv->dsp->regmap,
+ xm + offsetof(struct wmfw_halo_id_hdr, n_algs),
+ &num_algs);
+ bytes = sizeof(struct wmfw_halo_id_hdr) +
+ (num_algs * sizeof(struct wmfw_halo_alg_hdr)) +
+ 4 /* terminator word */;
+
+ regcache_drop_region(priv->dsp->regmap, xm, xm + bytes - 4);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_drop_from_regmap_cache, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static void cs_dsp_mock_xm_header_add_adsp2_algs(struct cs_dsp_mock_xm_header *builder,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs)
+{
+ struct wmfw_adsp2_id_hdr *hdr = builder->blob_data;
+ unsigned int next_free_xm_word, next_free_ym_word, next_free_zm_word;
+
+ next_free_xm_word = be32_to_cpu(hdr->xm);
+ next_free_ym_word = be32_to_cpu(hdr->ym);
+ next_free_zm_word = be32_to_cpu(hdr->zm);
+
+ /* Set num_algs in XM header. */
+ hdr->n_algs = cpu_to_be32(num_algs);
+
+ /* Create algorithm descriptor list */
+ struct wmfw_adsp2_alg_hdr *alg_info =
+ (struct wmfw_adsp2_alg_hdr *)(&hdr[1]);
+
+ for (; num_algs > 0; num_algs--, algs++, alg_info++) {
+ unsigned int alg_xm_last, alg_ym_last, alg_zm_last;
+
+ alg_info->alg.id = cpu_to_be32(algs->id);
+ alg_info->alg.ver = cpu_to_be32(algs->ver);
+ alg_info->xm = cpu_to_be32(algs->xm_base_words);
+ alg_info->ym = cpu_to_be32(algs->ym_base_words);
+ alg_info->zm = cpu_to_be32(algs->zm_base_words);
+
+ /* Check if we need to auto-allocate base addresses */
+ if (!alg_info->xm && algs->xm_size_words)
+ alg_info->xm = cpu_to_be32(next_free_xm_word);
+
+ if (!alg_info->ym && algs->ym_size_words)
+ alg_info->ym = cpu_to_be32(next_free_ym_word);
+
+ if (!alg_info->zm && algs->zm_size_words)
+ alg_info->zm = cpu_to_be32(next_free_zm_word);
+
+ alg_xm_last = be32_to_cpu(alg_info->xm) + algs->xm_size_words - 1;
+ if (alg_xm_last > next_free_xm_word)
+ next_free_xm_word = alg_xm_last;
+
+ alg_ym_last = be32_to_cpu(alg_info->ym) + algs->ym_size_words - 1;
+ if (alg_ym_last > next_free_ym_word)
+ next_free_ym_word = alg_ym_last;
+
+ alg_zm_last = be32_to_cpu(alg_info->zm) + algs->zm_size_words - 1;
+ if (alg_zm_last > next_free_zm_word)
+ next_free_zm_word = alg_zm_last;
+ }
+
+ /* Write list terminator */
+ *(__be32 *)(alg_info) = cpu_to_be32(0xbedead);
+}
+
+static void cs_dsp_mock_xm_header_add_halo_algs(struct cs_dsp_mock_xm_header *builder,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs)
+{
+ struct wmfw_halo_id_hdr *hdr = builder->blob_data;
+ unsigned int next_free_xm_word, next_free_ym_word;
+
+ /* Assume we're starting with bare header */
+ next_free_xm_word = be32_to_cpu(hdr->xm_base) + be32_to_cpu(hdr->xm_size) - 1;
+ next_free_ym_word = be32_to_cpu(hdr->ym_base) + be32_to_cpu(hdr->ym_size) - 1;
+
+ /* Set num_algs in XM header */
+ hdr->n_algs = cpu_to_be32(num_algs);
+
+ /* Create algorithm descriptor list */
+ struct wmfw_halo_alg_hdr *alg_info =
+ (struct wmfw_halo_alg_hdr *)(&hdr[1]);
+
+ for (; num_algs > 0; num_algs--, algs++, alg_info++) {
+ unsigned int alg_xm_last, alg_ym_last;
+
+ alg_info->alg.id = cpu_to_be32(algs->id);
+ alg_info->alg.ver = cpu_to_be32(algs->ver);
+ alg_info->xm_base = cpu_to_be32(algs->xm_base_words);
+ alg_info->xm_size = cpu_to_be32(algs->xm_size_words);
+ alg_info->ym_base = cpu_to_be32(algs->ym_base_words);
+ alg_info->ym_size = cpu_to_be32(algs->ym_size_words);
+
+ /* Check if we need to auto-allocate base addresses */
+ if (!alg_info->xm_base && alg_info->xm_size)
+ alg_info->xm_base = cpu_to_be32(next_free_xm_word);
+
+ if (!alg_info->ym_base && alg_info->ym_size)
+ alg_info->ym_base = cpu_to_be32(next_free_ym_word);
+
+ alg_xm_last = be32_to_cpu(alg_info->xm_base) + be32_to_cpu(alg_info->xm_size) - 1;
+ if (alg_xm_last > next_free_xm_word)
+ next_free_xm_word = alg_xm_last;
+
+ alg_ym_last = be32_to_cpu(alg_info->ym_base) + be32_to_cpu(alg_info->ym_size) - 1;
+ if (alg_ym_last > next_free_ym_word)
+ next_free_ym_word = alg_ym_last;
+ }
+
+ /* Write list terminator */
+ *(__be32 *)(alg_info) = cpu_to_be32(0xbedead);
+}
+
+/**
+ * cs_dsp_mock_xm_header_write_to_regmap() - Write XM header to regmap.
+ *
+ * @header: Pointer to struct cs_dsp_mock_xm_header.
+ *
+ * The data in header is written to the XM addresses in the regmap.
+ *
+ * Return: 0 on success, else negative error code.
+ */
+int cs_dsp_mock_xm_header_write_to_regmap(struct cs_dsp_mock_xm_header *header)
+{
+ struct cs_dsp_test *priv = header->test_priv;
+ unsigned int reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+
+ /*
+ * One 32-bit word corresponds to one 32-bit unpacked XM word so the
+ * blob can be written directly to the regmap.
+ */
+ return regmap_raw_write(priv->dsp->regmap, reg_addr,
+ header->blob_data, header->blob_size_bytes);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_write_to_regmap, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_create_mock_xm_header() - Create a dummy XM header.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @algs: Pointer to array of struct cs_dsp_mock_alg_def listing the
+ * dummy algorithm entries to include in the XM header.
+ * @num_algs: Number of entries in the algs array.
+ *
+ * Return: Pointer to created struct cs_dsp_mock_xm_header.
+ */
+struct cs_dsp_mock_xm_header *cs_dsp_create_mock_xm_header(struct cs_dsp_test *priv,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs)
+{
+ struct cs_dsp_mock_xm_header *builder;
+ size_t total_bytes_required;
+ const void *header;
+ size_t header_size_bytes;
+
+ builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
+ builder->test_priv = priv;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ header = &cs_dsp_mock_adsp2_xm_hdr;
+ header_size_bytes = sizeof(cs_dsp_mock_adsp2_xm_hdr);
+ total_bytes_required = header_size_bytes +
+ (num_algs * sizeof(struct wmfw_adsp2_alg_hdr))
+ + 4; /* terminator word */
+ break;
+ case WMFW_HALO:
+ header = &cs_dsp_mock_halo_xm_hdr,
+ header_size_bytes = sizeof(cs_dsp_mock_halo_xm_hdr);
+ total_bytes_required = header_size_bytes +
+ (num_algs * sizeof(struct wmfw_halo_alg_hdr))
+ + 4; /* terminator word */
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "%s unexpected DSP type %d\n",
+ __func__, priv->dsp->type);
+ return NULL;
+ }
+
+ builder->blob_data = kunit_kzalloc(priv->test, total_bytes_required, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder->blob_data);
+ builder->blob_size_bytes = total_bytes_required;
+
+ memcpy(builder->blob_data, header, header_size_bytes);
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ cs_dsp_mock_xm_header_add_adsp2_algs(builder, algs, num_algs);
+ break;
+ case WMFW_HALO:
+ cs_dsp_mock_xm_header_add_halo_algs(builder, algs, num_algs);
+ break;
+ default:
+ break;
+ }
+
+ return builder;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_create_mock_xm_header, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c b/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c
new file mode 100644
index 000000000000..fb8e4a5d189a
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Mock regmap for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/regmap.h>
+
+static int cs_dsp_mock_regmap_read(void *context, const void *reg_buf,
+ const size_t reg_size, void *val_buf,
+ size_t val_size)
+{
+ struct cs_dsp_test *priv = context;
+
+ /* Should never get here because the regmap is cache-only */
+ KUNIT_FAIL(priv->test, "Unexpected bus read @%#x", *(u32 *)reg_buf);
+
+ return -EIO;
+}
+
+static int cs_dsp_mock_regmap_gather_write(void *context,
+ const void *reg_buf, size_t reg_size,
+ const void *val_buf, size_t val_size)
+{
+ struct cs_dsp_test *priv = context;
+
+ priv->saw_bus_write = true;
+
+ /* Should never get here because the regmap is cache-only */
+ KUNIT_FAIL(priv->test, "Unexpected bus gather_write @%#x", *(u32 *)reg_buf);
+
+ return -EIO;
+}
+
+static int cs_dsp_mock_regmap_write(void *context, const void *val_buf, size_t val_size)
+{
+ struct cs_dsp_test *priv = context;
+
+ priv->saw_bus_write = true;
+
+ /* Should never get here because the regmap is cache-only */
+ KUNIT_FAIL(priv->test, "Unexpected bus write @%#x", *(u32 *)val_buf);
+
+ return -EIO;
+}
+
+static const struct regmap_bus cs_dsp_mock_regmap_bus = {
+ .read = cs_dsp_mock_regmap_read,
+ .write = cs_dsp_mock_regmap_write,
+ .gather_write = cs_dsp_mock_regmap_gather_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static const struct reg_default adsp2_32bit_register_defaults[] = {
+ { 0xffe00, 0x0000 }, /* CONTROL */
+ { 0xffe02, 0x0000 }, /* CLOCKING */
+ { 0xffe04, 0x0001 }, /* STATUS1: RAM_RDY=1 */
+ { 0xffe30, 0x0000 }, /* WDMW_CONFIG_1 */
+ { 0xffe32, 0x0000 }, /* WDMA_CONFIG_2 */
+ { 0xffe34, 0x0000 }, /* RDMA_CONFIG_1 */
+ { 0xffe40, 0x0000 }, /* SCRATCH_0_1 */
+ { 0xffe42, 0x0000 }, /* SCRATCH_2_3 */
+};
+
+static const struct regmap_range adsp2_32bit_registers[] = {
+ regmap_reg_range(0x80000, 0x88ffe), /* PM */
+ regmap_reg_range(0xa0000, 0xa9ffe), /* XM */
+ regmap_reg_range(0xc0000, 0xc1ffe), /* YM */
+ regmap_reg_range(0xe0000, 0xe1ffe), /* ZM */
+ regmap_reg_range(0xffe00, 0xffe7c), /* CORE CTRL */
+};
+
+const unsigned int cs_dsp_mock_adsp2_32bit_sysbase = 0xffe00;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_sysbase, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct regmap_access_table adsp2_32bit_rw = {
+ .yes_ranges = adsp2_32bit_registers,
+ .n_yes_ranges = ARRAY_SIZE(adsp2_32bit_registers),
+};
+
+static const struct regmap_config cs_dsp_mock_regmap_adsp2_32bit = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 2,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .wr_table = &adsp2_32bit_rw,
+ .rd_table = &adsp2_32bit_rw,
+ .max_register = 0xffe7c,
+ .reg_defaults = adsp2_32bit_register_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adsp2_32bit_register_defaults),
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static const struct reg_default adsp2_16bit_register_defaults[] = {
+ { 0x1100, 0x0000 }, /* CONTROL */
+ { 0x1101, 0x0000 }, /* CLOCKING */
+ { 0x1104, 0x0001 }, /* STATUS1: RAM_RDY=1 */
+ { 0x1130, 0x0000 }, /* WDMW_CONFIG_1 */
+ { 0x1131, 0x0000 }, /* WDMA_CONFIG_2 */
+ { 0x1134, 0x0000 }, /* RDMA_CONFIG_1 */
+ { 0x1140, 0x0000 }, /* SCRATCH_0 */
+ { 0x1141, 0x0000 }, /* SCRATCH_1 */
+ { 0x1142, 0x0000 }, /* SCRATCH_2 */
+ { 0x1143, 0x0000 }, /* SCRATCH_3 */
+};
+
+static const struct regmap_range adsp2_16bit_registers[] = {
+ regmap_reg_range(0x001100, 0x001143), /* CORE CTRL */
+ regmap_reg_range(0x100000, 0x105fff), /* PM */
+ regmap_reg_range(0x180000, 0x1807ff), /* ZM */
+ regmap_reg_range(0x190000, 0x1947ff), /* XM */
+ regmap_reg_range(0x1a8000, 0x1a97ff), /* YM */
+};
+
+const unsigned int cs_dsp_mock_adsp2_16bit_sysbase = 0x001100;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_sysbase, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct regmap_access_table adsp2_16bit_rw = {
+ .yes_ranges = adsp2_16bit_registers,
+ .n_yes_ranges = ARRAY_SIZE(adsp2_16bit_registers),
+};
+
+static const struct regmap_config cs_dsp_mock_regmap_adsp2_16bit = {
+ .reg_bits = 32,
+ .val_bits = 16,
+ .reg_stride = 1,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .wr_table = &adsp2_16bit_rw,
+ .rd_table = &adsp2_16bit_rw,
+ .max_register = 0x1a97ff,
+ .reg_defaults = adsp2_16bit_register_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adsp2_16bit_register_defaults),
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static const struct reg_default halo_register_defaults[] = {
+ /* CORE */
+ { 0x2b80010, 0 }, /* HALO_CORE_SOFT_RESET */
+ { 0x2b805c0, 0 }, /* HALO_SCRATCH1 */
+ { 0x2b805c8, 0 }, /* HALO_SCRATCH2 */
+ { 0x2b805d0, 0 }, /* HALO_SCRATCH3 */
+ { 0x2b805c8, 0 }, /* HALO_SCRATCH4 */
+ { 0x2bc1000, 0 }, /* HALO_CCM_CORE_CONTROL */
+ { 0x2bc7000, 0 }, /* HALO_WDT_CONTROL */
+
+ /* SYSINFO */
+ { 0x25e2040, 0 }, /* HALO_AHBM_WINDOW_DEBUG_0 */
+ { 0x25e2044, 0 }, /* HALO_AHBM_WINDOW_DEBUG_1 */
+};
+
+static const struct regmap_range halo_readable_registers[] = {
+ regmap_reg_range(0x2000000, 0x2005fff), /* XM_PACKED */
+ regmap_reg_range(0x25e0000, 0x25e004f), /* SYSINFO */
+ regmap_reg_range(0x25e2000, 0x25e2047), /* SYSINFO */
+ regmap_reg_range(0x2800000, 0x2807fff), /* XM */
+ regmap_reg_range(0x2b80000, 0x2bc700b), /* CORE CTRL */
+ regmap_reg_range(0x2c00000, 0x2c047f3), /* YM_PACKED */
+ regmap_reg_range(0x3400000, 0x3405ff7), /* YM */
+ regmap_reg_range(0x3800000, 0x3804fff), /* PM_PACKED */
+};
+
+static const struct regmap_range halo_writeable_registers[] = {
+ regmap_reg_range(0x2000000, 0x2005fff), /* XM_PACKED */
+ regmap_reg_range(0x2800000, 0x2807fff), /* XM */
+ regmap_reg_range(0x2b80000, 0x2bc700b), /* CORE CTRL */
+ regmap_reg_range(0x2c00000, 0x2c047f3), /* YM_PACKED */
+ regmap_reg_range(0x3400000, 0x3405ff7), /* YM */
+ regmap_reg_range(0x3800000, 0x3804fff), /* PM_PACKED */
+};
+
+const unsigned int cs_dsp_mock_halo_core_base = 0x2b80000;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_core_base, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+const unsigned int cs_dsp_mock_halo_sysinfo_base = 0x25e0000;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_sysinfo_base, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct regmap_access_table halo_readable = {
+ .yes_ranges = halo_readable_registers,
+ .n_yes_ranges = ARRAY_SIZE(halo_readable_registers),
+};
+
+static const struct regmap_access_table halo_writeable = {
+ .yes_ranges = halo_writeable_registers,
+ .n_yes_ranges = ARRAY_SIZE(halo_writeable_registers),
+};
+
+static const struct regmap_config cs_dsp_mock_regmap_halo = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .wr_table = &halo_writeable,
+ .rd_table = &halo_readable,
+ .max_register = 0x3804ffc,
+ .reg_defaults = halo_register_defaults,
+ .num_reg_defaults = ARRAY_SIZE(halo_register_defaults),
+ .cache_type = REGCACHE_MAPLE,
+};
+
+/**
+ * cs_dsp_mock_regmap_drop_range() - drop a range of registers from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @first_reg: Address of first register to drop.
+ * @last_reg: Address of last register to drop.
+ */
+void cs_dsp_mock_regmap_drop_range(struct cs_dsp_test *priv,
+ unsigned int first_reg, unsigned int last_reg)
+{
+ regcache_drop_region(priv->dsp->regmap, first_reg, last_reg);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_range, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_drop_regs() - drop a number of registers from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @first_reg: Address of first register to drop.
+ * @num_regs: Number of registers to drop.
+ */
+void cs_dsp_mock_regmap_drop_regs(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_regs)
+{
+ int stride = regmap_get_reg_stride(priv->dsp->regmap);
+ unsigned int last = first_reg + (stride * (num_regs - 1));
+
+ cs_dsp_mock_regmap_drop_range(priv, first_reg, last);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_regs, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_drop_bytes() - drop a number of bytes from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @first_reg: Address of first register to drop.
+ * @num_bytes: Number of bytes to drop from the cache. Will be rounded
+ * down to a whole number of registers. Trailing bytes that
+ * are not a multiple of the register size will not be dropped.
+ * (This is intended to help detect math errors in test code.)
+ */
+void cs_dsp_mock_regmap_drop_bytes(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_bytes)
+{
+ size_t num_regs = num_bytes / regmap_get_val_bytes(priv->dsp->regmap);
+
+ cs_dsp_mock_regmap_drop_regs(priv, first_reg, num_regs);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_bytes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_drop_system_regs() - Drop DSP system registers from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ *
+ * Drops all DSP system registers from the regmap cache.
+ */
+void cs_dsp_mock_regmap_drop_system_regs(struct cs_dsp_test *priv)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ if (priv->dsp->base) {
+ regcache_drop_region(priv->dsp->regmap,
+ priv->dsp->base,
+ priv->dsp->base + 0x7c);
+ }
+ return;
+ case WMFW_HALO:
+ if (priv->dsp->base) {
+ regcache_drop_region(priv->dsp->regmap,
+ priv->dsp->base,
+ priv->dsp->base + 0x47000);
+ }
+
+ /* sysinfo registers are read-only so don't drop them */
+ return;
+ default:
+ return;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_system_regs, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_is_dirty() - Test for dirty registers in the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @drop_system_regs: If true the DSP system regs will be dropped from
+ * the cache before checking for dirty.
+ *
+ * All registers that are expected to be written must have been dropped
+ * from the cache (DSP system registers can be dropped by passing
+ * drop_system_regs == true). If any unexpected registers were written
+ * there will still be dirty entries in the cache and a cache sync will
+ * cause a write.
+ *
+ * Returns: true if there were dirty entries, false if not.
+ */
+bool cs_dsp_mock_regmap_is_dirty(struct cs_dsp_test *priv, bool drop_system_regs)
+{
+ if (drop_system_regs)
+ cs_dsp_mock_regmap_drop_system_regs(priv);
+
+ priv->saw_bus_write = false;
+ regcache_cache_only(priv->dsp->regmap, false);
+ regcache_sync(priv->dsp->regmap);
+ regcache_cache_only(priv->dsp->regmap, true);
+
+ return priv->saw_bus_write;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_is_dirty, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_init() - Initialize a mock regmap.
+ *
+ * @priv: Pointer to struct cs_dsp_test object. This must have a
+ * valid pointer to a struct cs_dsp in which the type and
+ * rev fields are set to the type of DSP to be simulated.
+ *
+ * On success the priv->dsp->regmap will point to the created
+ * regmap instance.
+ *
+ * Return: zero on success, else negative error code.
+ */
+int cs_dsp_mock_regmap_init(struct cs_dsp_test *priv)
+{
+ const struct regmap_config *config;
+ int ret;
+
+ switch (priv->dsp->type) {
+ case WMFW_HALO:
+ config = &cs_dsp_mock_regmap_halo;
+ break;
+ case WMFW_ADSP2:
+ if (priv->dsp->rev == 0)
+ config = &cs_dsp_mock_regmap_adsp2_16bit;
+ else
+ config = &cs_dsp_mock_regmap_adsp2_32bit;
+ break;
+ default:
+ config = NULL;
+ break;
+ }
+
+ priv->dsp->regmap = devm_regmap_init(priv->dsp->dev,
+ &cs_dsp_mock_regmap_bus,
+ priv,
+ config);
+ if (IS_ERR(priv->dsp->regmap)) {
+ ret = PTR_ERR(priv->dsp->regmap);
+ kunit_err(priv->test, "Failed to allocate register map: %d\n", ret);
+ return ret;
+ }
+
+ /* Put regmap in cache-only so it accumulates the writes done by cs_dsp */
+ regcache_cache_only(priv->dsp->regmap, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_init, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c b/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c
new file mode 100644
index 000000000000..cbd0bf72b7de
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Utility module for cs_dsp KUnit testing.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("Utilities for Cirrus Logic DSP driver testing");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("FW_CS_DSP");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
new file mode 100644
index 000000000000..5a3ac03ac37f
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// wmfw file builder for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/overflow.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/* Buffer large enough for bin file content */
+#define CS_DSP_MOCK_WMFW_BUF_SIZE 131072
+
+struct cs_dsp_mock_wmfw_builder {
+ struct cs_dsp_test *test_priv;
+ int format_version;
+ void *buf;
+ size_t buf_size_bytes;
+ void *write_p;
+ size_t bytes_used;
+
+ void *alg_data_header;
+ unsigned int num_coeffs;
+};
+
+struct wmfw_adsp2_halo_header {
+ struct wmfw_header header;
+ struct wmfw_adsp2_sizes sizes;
+ struct wmfw_footer footer;
+} __packed;
+
+struct wmfw_long_string {
+ __le16 len;
+ u8 data[] __nonstring __counted_by(len);
+} __packed;
+
+struct wmfw_short_string {
+ u8 len;
+ u8 data[] __nonstring __counted_by(len);
+} __packed;
+
+KUNIT_DEFINE_ACTION_WRAPPER(vfree_action_wrapper, vfree, void *)
+
+/**
+ * cs_dsp_mock_wmfw_format_version() - Return format version.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_wmfw_builder.
+ *
+ * Return: Format version.
+ */
+int cs_dsp_mock_wmfw_format_version(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ return builder->format_version;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_format_version, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_get_firmware() - Get struct firmware wrapper for data.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_wmfw_builder.
+ *
+ * Return: Pointer to a struct firmware wrapper for the data.
+ */
+struct firmware *cs_dsp_mock_wmfw_get_firmware(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ struct firmware *fw;
+
+ if (!builder)
+ return NULL;
+
+ fw = kunit_kzalloc(builder->test_priv->test, sizeof(*fw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, fw);
+
+ fw->data = builder->buf;
+ fw->size = builder->bytes_used;
+
+ return fw;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_get_firmware, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_add_raw_block() - Add a block to the wmfw file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @block_type: Block type.
+ * @offset: Offset.
+ * @payload_data: Pointer to buffer containing the payload data,
+ * or NULL if no data.
+ * @payload_len_bytes: Length of payload data in bytes, or zero.
+ */
+void cs_dsp_mock_wmfw_add_raw_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int block_type, unsigned int offset,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ struct wmfw_region *header = builder->write_p;
+ unsigned int bytes_needed = struct_size_t(struct wmfw_region, data, payload_len_bytes);
+
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ header->offset = cpu_to_le32(offset | (block_type << 24));
+ header->len = cpu_to_le32(payload_len_bytes);
+ if (payload_len_bytes > 0)
+ memcpy(header->data, payload_data, payload_len_bytes);
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_raw_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_add_info() - Add an info block to the wmfw file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @info: Pointer to info string to be copied into the file.
+ *
+ * The string will be padded to a length that is a multiple of 4 bytes.
+ */
+void cs_dsp_mock_wmfw_add_info(struct cs_dsp_mock_wmfw_builder *builder,
+ const char *info)
+{
+ size_t info_len = strlen(info);
+ char *tmp = NULL;
+
+ if (info_len % 4) {
+ /* Create a padded string with length a multiple of 4 */
+ info_len = round_up(info_len, 4);
+ tmp = kunit_kzalloc(builder->test_priv->test, info_len, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, tmp);
+ memcpy(tmp, info, info_len);
+ info = tmp;
+ }
+
+ cs_dsp_mock_wmfw_add_raw_block(builder, WMFW_INFO_TEXT, 0, info, info_len);
+ kunit_kfree(builder->test_priv->test, tmp);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_info, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_add_data_block() - Add a data block to the wmfw file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @mem_region: Memory region for the block.
+ * @mem_offset_dsp_words: Offset to start of destination in DSP words.
+ * @payload_data: Pointer to buffer containing the payload data.
+ * @payload_len_bytes: Length of payload data in bytes.
+ */
+void cs_dsp_mock_wmfw_add_data_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int mem_region, unsigned int mem_offset_dsp_words,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ /* Blob payload length must be a multiple of 4 */
+ KUNIT_ASSERT_EQ(builder->test_priv->test, payload_len_bytes % 4, 0);
+
+ cs_dsp_mock_wmfw_add_raw_block(builder, mem_region, mem_offset_dsp_words,
+ payload_data, payload_len_bytes);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_data_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+void cs_dsp_mock_wmfw_start_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder,
+ unsigned int alg_id,
+ const char *name,
+ const char *description)
+{
+ struct wmfw_region *rgn = builder->write_p;
+ struct wmfw_adsp_alg_data *v1;
+ struct wmfw_short_string *shortstring;
+ struct wmfw_long_string *longstring;
+ size_t bytes_needed, name_len, description_len;
+ int offset;
+
+ /* Bytes needed for region header */
+ bytes_needed = offsetof(struct wmfw_region, data);
+
+ builder->alg_data_header = builder->write_p;
+ builder->num_coeffs = 0;
+
+ switch (builder->format_version) {
+ case 0:
+ KUNIT_FAIL(builder->test_priv->test, "wmfwV0 does not have alg blocks\n");
+ return;
+ case 1:
+ bytes_needed += offsetof(struct wmfw_adsp_alg_data, data);
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ memset(builder->write_p, 0, bytes_needed);
+
+ /* Create region header */
+ rgn->offset = cpu_to_le32(WMFW_ALGORITHM_DATA << 24);
+
+ /* Create algorithm entry */
+ v1 = (struct wmfw_adsp_alg_data *)&rgn->data[0];
+ v1->id = cpu_to_le32(alg_id);
+ if (name)
+ strscpy(v1->name, name, sizeof(v1->name));
+
+ if (description)
+ strscpy(v1->descr, description, sizeof(v1->descr));
+ break;
+ default:
+ name_len = 0;
+ description_len = 0;
+
+ if (name)
+ name_len = strlen(name);
+
+ if (description)
+ description_len = strlen(description);
+
+ bytes_needed += sizeof(__le32); /* alg id */
+ bytes_needed += round_up(name_len + sizeof(u8), sizeof(__le32));
+ bytes_needed += round_up(description_len + sizeof(__le16), sizeof(__le32));
+ bytes_needed += sizeof(__le32); /* coeff count */
+
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ memset(builder->write_p, 0, bytes_needed);
+
+ /* Create region header */
+ rgn->offset = cpu_to_le32(WMFW_ALGORITHM_DATA << 24);
+
+ /* Create algorithm entry */
+ *(__force __le32 *)&rgn->data[0] = cpu_to_le32(alg_id);
+
+ shortstring = (struct wmfw_short_string *)&rgn->data[4];
+ shortstring->len = name_len;
+
+ if (name_len)
+ memcpy(shortstring->data, name, name_len);
+
+ /* Round up to next __le32 */
+ offset = round_up(4 + struct_size_t(struct wmfw_short_string, data, name_len),
+ sizeof(__le32));
+
+ longstring = (struct wmfw_long_string *)&rgn->data[offset];
+ longstring->len = cpu_to_le16(description_len);
+
+ if (description_len)
+ memcpy(longstring->data, description, description_len);
+ break;
+ }
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_start_alg_info_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+void cs_dsp_mock_wmfw_add_coeff_desc(struct cs_dsp_mock_wmfw_builder *builder,
+ const struct cs_dsp_mock_coeff_def *def)
+{
+ struct wmfw_adsp_coeff_data *v1;
+ struct wmfw_short_string *shortstring;
+ struct wmfw_long_string *longstring;
+ size_t bytes_needed, shortname_len, fullname_len, description_len;
+ __le32 *ple32;
+
+ KUNIT_ASSERT_NOT_NULL(builder->test_priv->test, builder->alg_data_header);
+
+ switch (builder->format_version) {
+ case 0:
+ return;
+ case 1:
+ bytes_needed = offsetof(struct wmfw_adsp_coeff_data, data);
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ v1 = (struct wmfw_adsp_coeff_data *)builder->write_p;
+ memset(v1, 0, sizeof(*v1));
+ v1->hdr.offset = cpu_to_le16(def->offset_dsp_words);
+ v1->hdr.type = cpu_to_le16(def->mem_type);
+ v1->hdr.size = cpu_to_le32(bytes_needed - sizeof(v1->hdr));
+ v1->ctl_type = cpu_to_le16(def->type);
+ v1->flags = cpu_to_le16(def->flags);
+ v1->len = cpu_to_le32(def->length_bytes);
+
+ if (def->fullname)
+ strscpy(v1->name, def->fullname, sizeof(v1->name));
+
+ if (def->description)
+ strscpy(v1->descr, def->description, sizeof(v1->descr));
+ break;
+ default:
+ fullname_len = 0;
+ description_len = 0;
+ shortname_len = strlen(def->shortname);
+
+ if (def->fullname)
+ fullname_len = strlen(def->fullname);
+
+ if (def->description)
+ description_len = strlen(def->description);
+
+ bytes_needed = sizeof(__le32) * 2; /* type, offset and size */
+ bytes_needed += round_up(shortname_len + sizeof(u8), sizeof(__le32));
+ bytes_needed += round_up(fullname_len + sizeof(u8), sizeof(__le32));
+ bytes_needed += round_up(description_len + sizeof(__le16), sizeof(__le32));
+ bytes_needed += sizeof(__le32) * 2; /* flags, type and length */
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ ple32 = (__force __le32 *)builder->write_p;
+ *ple32++ = cpu_to_le32(def->offset_dsp_words | (def->mem_type << 16));
+ *ple32++ = cpu_to_le32(bytes_needed - sizeof(__le32) - sizeof(__le32));
+
+ shortstring = (__force struct wmfw_short_string *)ple32;
+ shortstring->len = shortname_len;
+ memcpy(shortstring->data, def->shortname, shortname_len);
+
+ /* Round up to next __le32 multiple */
+ ple32 += round_up(struct_size_t(struct wmfw_short_string, data, shortname_len),
+ sizeof(*ple32)) / sizeof(*ple32);
+
+ shortstring = (__force struct wmfw_short_string *)ple32;
+ shortstring->len = fullname_len;
+ memcpy(shortstring->data, def->fullname, fullname_len);
+
+ /* Round up to next __le32 multiple */
+ ple32 += round_up(struct_size_t(struct wmfw_short_string, data, fullname_len),
+ sizeof(*ple32)) / sizeof(*ple32);
+
+ longstring = (__force struct wmfw_long_string *)ple32;
+ longstring->len = cpu_to_le16(description_len);
+ memcpy(longstring->data, def->description, description_len);
+
+ /* Round up to next __le32 multiple */
+ ple32 += round_up(struct_size_t(struct wmfw_long_string, data, description_len),
+ sizeof(*ple32)) / sizeof(*ple32);
+
+ *ple32++ = cpu_to_le32(def->type | (def->flags << 16));
+ *ple32 = cpu_to_le32(def->length_bytes);
+ break;
+ }
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+ builder->num_coeffs++;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_coeff_desc, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+void cs_dsp_mock_wmfw_end_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ struct wmfw_region *rgn = builder->alg_data_header;
+ struct wmfw_adsp_alg_data *v1;
+ const struct wmfw_short_string *shortstring;
+ const struct wmfw_long_string *longstring;
+ size_t offset;
+
+ KUNIT_ASSERT_NOT_NULL(builder->test_priv->test, rgn);
+
+ /* Fill in data size */
+ rgn->len = cpu_to_le32((u8 *)builder->write_p - (u8 *)rgn->data);
+
+ /* Fill in coefficient count */
+ switch (builder->format_version) {
+ case 0:
+ return;
+ case 1:
+ v1 = (struct wmfw_adsp_alg_data *)&rgn->data[0];
+ v1->ncoeff = cpu_to_le32(builder->num_coeffs);
+ break;
+ default:
+ offset = 4; /* skip alg id */
+
+ /* Get name length and round up to __le32 multiple */
+ shortstring = (const struct wmfw_short_string *)&rgn->data[offset];
+ offset += round_up(struct_size_t(struct wmfw_short_string, data, shortstring->len),
+ sizeof(__le32));
+
+ /* Get description length and round up to __le32 multiple */
+ longstring = (const struct wmfw_long_string *)&rgn->data[offset];
+ offset += round_up(struct_size_t(struct wmfw_long_string, data,
+ le16_to_cpu(longstring->len)),
+ sizeof(__le32));
+
+ *(__force __le32 *)&rgn->data[offset] = cpu_to_le32(builder->num_coeffs);
+ break;
+ }
+
+ builder->alg_data_header = NULL;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_end_alg_info_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static void cs_dsp_init_adsp2_halo_wmfw(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ struct wmfw_adsp2_halo_header *hdr = builder->buf;
+ const struct cs_dsp *dsp = builder->test_priv->dsp;
+
+ memcpy(hdr->header.magic, "WMFW", sizeof(hdr->header.magic));
+ hdr->header.len = cpu_to_le32(sizeof(*hdr));
+ hdr->header.ver = builder->format_version;
+ hdr->header.core = dsp->type;
+ hdr->header.rev = cpu_to_le16(dsp->rev);
+
+ hdr->sizes.pm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_PM));
+ hdr->sizes.xm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_XM));
+ hdr->sizes.ym = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_YM));
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ hdr->sizes.zm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_ZM));
+ break;
+ default:
+ break;
+ }
+
+ builder->write_p = &hdr[1];
+ builder->bytes_used += sizeof(*hdr);
+}
+
+/**
+ * cs_dsp_mock_wmfw_init() - Initialize a struct cs_dsp_mock_wmfw_builder.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @format_version: Required wmfw format version.
+ *
+ * Return: Pointer to created struct cs_dsp_mock_wmfw_builder.
+ */
+struct cs_dsp_mock_wmfw_builder *cs_dsp_mock_wmfw_init(struct cs_dsp_test *priv,
+ int format_version)
+{
+ struct cs_dsp_mock_wmfw_builder *builder;
+
+ /* If format version isn't given use the default for the target core */
+ if (format_version < 0) {
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ format_version = 2;
+ break;
+ default:
+ format_version = 3;
+ break;
+ }
+ }
+
+ builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
+
+ builder->test_priv = priv;
+ builder->format_version = format_version;
+
+ builder->buf = vmalloc(CS_DSP_MOCK_WMFW_BUF_SIZE);
+ KUNIT_ASSERT_NOT_NULL(priv->test, builder->buf);
+ kunit_add_action_or_reset(priv->test, vfree_action_wrapper, builder->buf);
+
+ builder->buf_size_bytes = CS_DSP_MOCK_WMFW_BUF_SIZE;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ case WMFW_HALO:
+ cs_dsp_init_adsp2_halo_wmfw(builder);
+ break;
+ default:
+ break;
+ }
+
+ return builder;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_init, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
new file mode 100644
index 000000000000..1e161bbc5b4a
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
@@ -0,0 +1,2556 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+
+/*
+ * Test method is:
+ *
+ * 1) Create a mock regmap in cache-only mode so that all writes will be cached.
+ * 2) Create a XM header with an algorithm list in the cached regmap.
+ * 3) Create dummy wmfw file to satisfy cs_dsp.
+ * 4) Create bin file content.
+ * 5) Call cs_dsp_power_up() with the bin file.
+ * 6) Readback the cached value of registers that should have been written and
+ * check they have the correct value.
+ * 7) All the registers that are expected to have been written are dropped from
+ * the cache (including the XM header). This should leave the cache clean.
+ * 8) If the cache is still dirty there have been unexpected writes.
+ *
+ * There are multiple different schemes used for addressing across
+ * ADSP2 and Halo Core DSPs:
+ *
+ * dsp words: The addressing scheme used by the DSP, pointers and lengths
+ * in DSP memory use this. A memory region (XM, YM, ZM) is
+ * also required to create a unique DSP memory address.
+ * registers: Addresses in the register map. Older ADSP2 devices have
+ * 16-bit registers with an address stride of 1. Newer ADSP2
+ * devices have 32-bit registers with an address stride of 2.
+ * Halo Core devices have 32-bit registers with a stride of 4.
+ * unpacked: Registers that have a 1:1 mapping to DSP words
+ * packed: Registers that pack multiple DSP words more efficiently into
+ * multiple 32-bit registers. Because of this the relationship
+ * between a packed _register_ address and the corresponding
+ * _dsp word_ address is different from unpacked registers.
+ * Packed registers can only be accessed as a group of
+ * multiple registers, therefore can only read/write a group
+ * of multiple DSP words.
+ * Packed registers only exist on Halo Core DSPs.
+ *
+ * Addresses can also be relative to the start of an algorithm, and this
+ * can be expressed in dsp words, register addresses, or bytes.
+ */
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *)
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *)
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_bin_builder *bin_builder;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ struct firmware *wmfw;
+};
+
+struct bin_test_param {
+ const char *name;
+ int mem_type;
+ unsigned int offset_words;
+ int alg_idx;
+};
+
+static const struct cs_dsp_mock_alg_def bin_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+ {
+ .id = 0xfbfb,
+ .ver = 0x100000,
+ .xm_size_words = 99,
+ .ym_size_words = 99,
+ .zm_size_words = 99,
+ },
+ {
+ .id = 0xc321,
+ .ver = 0x100000,
+ .xm_size_words = 120,
+ .ym_size_words = 120,
+ .zm_size_words = 120,
+ },
+ {
+ .id = 0xb123,
+ .ver = 0x100000,
+ .xm_size_words = 96,
+ .ym_size_words = 96,
+ .zm_size_words = 96,
+ },
+};
+
+/*
+ * Convert number of DSP words to number of packed registers rounded
+ * down to the nearest register.
+ * There are 3 registers for every 4 packed words.
+ */
+static unsigned int _num_words_to_num_packed_regs(unsigned int num_dsp_words)
+{
+ return (num_dsp_words * 3) / 4;
+}
+
+/* bin file that patches a single DSP word */
+static void bin_patch_one_word(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 reg_val, payload_data;
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ &payload_data, sizeof(payload_data));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file with a single payload that patches consecutive words */
+static void bin_patch_one_multiword(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 payload_data[16], readback[16];
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+
+ static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ payload_data, sizeof(payload_data));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr,
+ reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data)));
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file with a multiple one-word payloads that patch consecutive words */
+static void bin_patch_multi_oneword(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 payload_data[16], readback[16];
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ /* Add one payload per word */
+ for (i = 0; i < ARRAY_SIZE(payload_data); ++i) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (param->offset_words + i) * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr,
+ reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data)));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple one-word payloads that patch a block of consecutive
+ * words but the payloads are not in address order.
+ */
+static void bin_patch_multi_oneword_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 payload_data[16], readback[16];
+ static const u8 word_order[] = { 10, 2, 12, 4, 0, 11, 6, 1, 3, 15, 5, 13, 8, 7, 9, 14 };
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data));
+ static_assert(ARRAY_SIZE(word_order) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ /* Add one payload per word */
+ for (i = 0; i < ARRAY_SIZE(word_order); ++i) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (param->offset_words + word_order[i]) *
+ reg_inc_per_word,
+ &payload_data[word_order[i]], sizeof(payload_data[0]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr,
+ reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data)));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple one-word payloads. The payloads are not in address
+ * order and collectively do not patch a contiguous block of memory.
+ */
+static void bin_patch_multi_oneword_sparse_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ static const u8 word_offsets[] = {
+ 11, 69, 59, 61, 32, 75, 4, 38, 70, 13, 79, 47, 46, 53, 18, 44,
+ 54, 35, 51, 21, 26, 45, 27, 41, 66, 2, 17, 56, 40, 9, 8, 20,
+ 29, 19, 63, 42, 12, 16, 43, 3, 5, 55, 52, 22
+ };
+ u32 payload_data[44];
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+ u32 reg_val;
+ int i;
+
+ static_assert(ARRAY_SIZE(word_offsets) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ /* Add one payload per word */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ word_offsets[i] * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + word_offsets[i]) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &reg_val, &payload_data[i], sizeof(reg_val));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single DSP word in each of the memory regions
+ * of one algorithm.
+ */
+static void bin_patch_one_word_multiple_mems(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ unsigned int alg_xm_base_words, alg_ym_base_words, alg_zm_base_words;
+ unsigned int reg_addr;
+ u32 payload_data[3];
+ struct firmware *fw;
+ u32 reg_val;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_xm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_ADSP2_XM);
+ alg_ym_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_ADSP2_YM);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ alg_zm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_ADSP2_ZM);
+ } else {
+ alg_zm_base_words = 0;
+ }
+
+ /* Add words to XM, YM and ZM */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_ADSP2_XM,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[0], sizeof(payload_data[0]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_ADSP2_YM,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[1], sizeof(payload_data[1]));
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_ADSP2_ZM,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[2], sizeof(payload_data[2]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM) +
+ ((alg_xm_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[0]);
+
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM) +
+ ((alg_ym_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[1]);
+
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_ZM) +
+ ((alg_zm_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[2]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single DSP word in multiple algorithms.
+ */
+static void bin_patch_one_word_multiple_algs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 payload_data[ARRAY_SIZE(bin_test_mock_algs)];
+ unsigned int alg_base_words;
+ unsigned int reg_inc_per_word, reg_addr;
+ struct firmware *fw;
+ u32 reg_val;
+ int i;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ /* Add one payload per algorithm */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[i].id,
+ bin_test_mock_algs[i].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[i].id,
+ param->mem_type);
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[i]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single DSP word in multiple algorithms.
+ * The algorithms are not patched in the same order they appear in the XM header.
+ */
+static void bin_patch_one_word_multiple_algs_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 alg_order[] = { 3, 0, 2, 1 };
+ u32 payload_data[ARRAY_SIZE(bin_test_mock_algs)];
+ unsigned int alg_base_words;
+ unsigned int reg_inc_per_word, reg_addr;
+ struct firmware *fw;
+ u32 reg_val;
+ int i, alg_idx;
+
+ static_assert(ARRAY_SIZE(alg_order) == ARRAY_SIZE(bin_test_mock_algs));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ /* Add one payload per algorithm */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_idx = alg_order[i];
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[alg_idx].id,
+ bin_test_mock_algs[alg_idx].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_idx = alg_order[i];
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[alg_idx].id,
+ param->mem_type);
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[i]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file that patches a single packed block of DSP words */
+static void bin_patch_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_payload[3], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is one word longer than a packed block using one
+ * packed block followed by one unpacked word.
+ */
+static void bin_patch_1_packed_1_single_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[1], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked word following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is two words longer than a packed block using one
+ * packed block followed by two blocks of one unpacked word.
+ */
+static void bin_patch_1_packed_2_single_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payloads[2], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payloads));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payloads, sizeof(unpacked_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ &unpacked_payloads[0], sizeof(unpacked_payloads[0]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 5) - alg_base_words) * 4,
+ &unpacked_payloads[1], sizeof(unpacked_payloads[1]));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payloads */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payloads)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payloads, sizeof(unpacked_payloads));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is three words longer than a packed block using one
+ * packed block followed by three blocks of one unpacked word.
+ */
+static void bin_patch_1_packed_3_single_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payloads[3], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payloads));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payloads, sizeof(unpacked_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ &unpacked_payloads[0], sizeof(unpacked_payloads[0]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 5) - alg_base_words) * 4,
+ &unpacked_payloads[1], sizeof(unpacked_payloads[1]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 6) - alg_base_words) * 4,
+ &unpacked_payloads[2], sizeof(unpacked_payloads[2]));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payloads */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payloads)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payloads, sizeof(unpacked_payloads));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is two words longer than a packed block using one
+ * packed block followed by a block of two unpacked words.
+ */
+static void bin_patch_1_packed_2_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[2], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is three words longer than a packed block using one
+ * packed block followed by a block of three unpacked words.
+ */
+static void bin_patch_1_packed_3_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[3], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts one word before a packed boundary using one
+ * unpacked word followed by one packed block.
+ */
+static void bin_patch_1_single_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[1], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked word */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 1) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 1) * 4;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts two words before a packed boundary using two
+ * unpacked words followed by one packed block.
+ */
+static void bin_patch_2_single_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[2], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 2) - alg_base_words) * 4,
+ &unpacked_payload[0], sizeof(unpacked_payload[0]));
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 1) - alg_base_words) * 4,
+ &unpacked_payload[1], sizeof(unpacked_payload[1]));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 2) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts two words before a packed boundary using one
+ * block of two unpacked words followed by one packed block.
+ */
+static void bin_patch_2_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[2], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 2) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 2) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts three words before a packed boundary using three
+ * unpacked words followed by one packed block.
+ */
+static void bin_patch_3_single_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[3], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 3) - alg_base_words) * 4,
+ &unpacked_payload[0], sizeof(unpacked_payload[0]));
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 2) - alg_base_words) * 4,
+ &unpacked_payload[1], sizeof(unpacked_payload[1]));
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 1) - alg_base_words) * 4,
+ &unpacked_payload[2], sizeof(unpacked_payload[2]));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 3) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts three words before a packed boundary using one
+ * block of three unpacked words followed by one packed block.
+ */
+static void bin_patch_3_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[3], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 3) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 3) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file with a multiple payloads that each patch one packed block. */
+static void bin_patch_multi_onepacked(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_payloads[8][3], readback[8][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int payload_offset;
+ unsigned int reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(sizeof(readback) == sizeof(packed_payloads));
+
+ get_random_bytes(packed_payloads, sizeof(packed_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+
+ /* Add one payload per packed block */
+ for (i = 0; i < ARRAY_SIZE(packed_payloads); ++i) {
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words + (i * 4));
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ &packed_payloads[i], sizeof(packed_payloads[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payloads */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads, sizeof(packed_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple payloads that each patch one packed block.
+ * The payloads are not in address order.
+ */
+static void bin_patch_multi_onepacked_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 payload_order[] = { 4, 3, 6, 1, 0, 7, 5, 2 };
+ u32 packed_payloads[8][3], readback[8][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int payload_offset;
+ unsigned int reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(payload_order) == ARRAY_SIZE(packed_payloads));
+ static_assert(sizeof(readback) == sizeof(packed_payloads));
+
+ get_random_bytes(packed_payloads, sizeof(packed_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+
+ /* Add one payload per packed block */
+ for (i = 0; i < ARRAY_SIZE(payload_order); ++i) {
+ patch_pos_in_packed_regs =
+ _num_words_to_num_packed_regs(patch_pos_words + (payload_order[i] * 4));
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ &packed_payloads[payload_order[i]],
+ sizeof(packed_payloads[0]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content in registers should match the order of data in packed_payloads */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads, sizeof(packed_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple payloads that each patch one packed block.
+ * The payloads are not in address order. The patched memory is not contiguous.
+ */
+static void bin_patch_multi_onepacked_sparse_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 word_offsets[] = { 60, 24, 76, 4, 40, 52, 48, 36, 12 };
+ u32 packed_payloads[9][3], readback[3];
+ unsigned int alg_base_words, alg_base_in_packed_regs;
+ unsigned int patch_pos_words, patch_pos_in_packed_regs, payload_offset;
+ unsigned int reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(word_offsets) == ARRAY_SIZE(packed_payloads));
+ static_assert(sizeof(readback) == sizeof(packed_payloads[0]));
+
+ get_random_bytes(packed_payloads, sizeof(packed_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Add one payload per packed block */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + word_offsets[i], 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ &packed_payloads[i],
+ sizeof(packed_payloads[0]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payloads */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ patch_pos_words = round_up(alg_base_words + word_offsets[i], 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads[i], sizeof(packed_payloads[i]));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads[i]));
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single packed block in each of the memory regions
+ * of one algorithm.
+ */
+static void bin_patch_1_packed_multiple_mems(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_xm_payload[3], packed_ym_payload[3], readback[3];
+ unsigned int alg_xm_base_words, alg_ym_base_words;
+ unsigned int xm_patch_pos_words, ym_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_xm_payload));
+ static_assert(sizeof(readback) == sizeof(packed_ym_payload));
+
+ get_random_bytes(packed_xm_payload, sizeof(packed_xm_payload));
+ get_random_bytes(packed_ym_payload, sizeof(packed_ym_payload));
+
+ alg_xm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_HALO_XM_PACKED);
+ alg_ym_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_HALO_YM_PACKED);
+
+ /* Round patch start word up to a packed boundary */
+ xm_patch_pos_words = round_up(alg_xm_base_words + param->offset_words, 4);
+ ym_patch_pos_words = round_up(alg_ym_base_words + param->offset_words, 4);
+
+ /* Add XM and YM patches */
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_xm_base_words);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(xm_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_HALO_XM_PACKED,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ packed_xm_payload, sizeof(packed_xm_payload));
+
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_ym_base_words);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(ym_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_HALO_YM_PACKED,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ packed_ym_payload, sizeof(packed_ym_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed XM registers should match packed_xm_payload */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(xm_patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_XM_PACKED) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_xm_payload, sizeof(packed_xm_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_xm_payload));
+
+ /* Content of packed YM registers should match packed_ym_payload */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(ym_patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_YM_PACKED) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_ym_payload, sizeof(packed_ym_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_ym_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single packed block in multiple algorithms.
+ */
+static void bin_patch_1_packed_multiple_algs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_payload[ARRAY_SIZE(bin_test_mock_algs)][3];
+ u32 readback[ARRAY_SIZE(bin_test_mock_algs)][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr, payload_offset;
+ struct firmware *fw;
+ int i;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+
+ /* For each algorithm patch one DSP word to a value from packed_payload */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[i].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[i].id,
+ bin_test_mock_algs[i].ver,
+ param->mem_type,
+ payload_offset,
+ packed_payload[i], sizeof(packed_payload[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ memset(readback, 0, sizeof(readback));
+
+ /*
+ * Readback the registers that should have been written. Place
+ * the values into the expected location in readback[] so that
+ * the content of readback[] should match packed_payload[]
+ */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[i].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ readback[i], sizeof(readback[i])),
+ 0);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload[i]));
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single packed block in multiple algorithms.
+ * The algorithms are not patched in the same order they appear in the XM header.
+ */
+static void bin_patch_1_packed_multiple_algs_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 alg_order[] = { 3, 0, 2, 1 };
+ u32 packed_payload[ARRAY_SIZE(bin_test_mock_algs)][3];
+ u32 readback[ARRAY_SIZE(bin_test_mock_algs)][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr, payload_offset;
+ struct firmware *fw;
+ int i, alg_idx;
+
+ static_assert(ARRAY_SIZE(alg_order) == ARRAY_SIZE(bin_test_mock_algs));
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+
+ /*
+ * For each algorithm index in alg_order[] patch one DSP word in
+ * that algorithm to a value from packed_payload.
+ */
+ for (i = 0; i < ARRAY_SIZE(alg_order); ++i) {
+ alg_idx = alg_order[i];
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[alg_idx].id,
+ bin_test_mock_algs[alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ packed_payload[i], sizeof(packed_payload[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ memset(readback, 0, sizeof(readback));
+
+ /*
+ * Readback the registers that should have been written. Place
+ * the values into the expected location in readback[] so that
+ * the content of readback[] should match packed_payload[]
+ */
+ for (i = 0; i < ARRAY_SIZE(alg_order); ++i) {
+ alg_idx = alg_order[i];
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[alg_idx].id,
+ param->mem_type);
+
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ readback[i], sizeof(readback[i])),
+ 0);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload[i]));
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that contains a mix of packed and unpacked words.
+ * payloads are in random offset order. Offsets that are on a packed boundary
+ * are written as a packed block. Offsets that are not on a packed boundary
+ * are written as a single unpacked word.
+ */
+static void bin_patch_mixed_packed_unpacked_random(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 offset_words[] = {
+ 58, 68, 50, 10, 44, 17, 74, 36, 8, 7, 49, 11, 78, 57, 65, 2,
+ 48, 38, 22, 70, 77, 21, 61, 56, 75, 34, 27, 3, 31, 20, 43, 63,
+ 5, 30, 32, 25, 33, 79, 29, 0, 37, 60, 69, 52, 13, 12, 24, 26,
+ 4, 51, 76, 72, 16, 6, 39, 62, 15, 41, 28, 73, 53, 40, 45, 54,
+ 14, 55, 46, 66, 64, 59, 23, 9, 67, 47, 19, 71, 35, 18, 42, 1,
+ };
+ struct {
+ u32 packed[80][3];
+ u32 unpacked[80];
+ } *payload;
+ u32 readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr, payload_offset;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ struct firmware *fw;
+ int i;
+
+ payload = kunit_kmalloc(test, sizeof(*payload), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, payload);
+
+ get_random_bytes(payload->packed, sizeof(payload->packed));
+ get_random_bytes(payload->unpacked, sizeof(payload->unpacked));
+
+ /* Create a patch entry for every offset in offset_words[] */
+ for (i = 0; i < ARRAY_SIZE(offset_words); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ param->mem_type);
+ /*
+ * If the offset is on a packed boundary use a packed payload else
+ * use an unpacked word
+ */
+ patch_pos_words = alg_base_words + offset_words[i];
+ if ((patch_pos_words % 4) == 0) {
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[0].id,
+ bin_test_mock_algs[0].ver,
+ param->mem_type,
+ payload_offset,
+ payload->packed[i],
+ sizeof(payload->packed[i]));
+ } else {
+ payload_offset = offset_words[i] * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[0].id,
+ bin_test_mock_algs[0].ver,
+ unpacked_mem_type,
+ payload_offset,
+ &payload->unpacked[i],
+ sizeof(payload->unpacked[i]));
+ }
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /*
+ * Readback the packed registers that should have been written.
+ * Place the values into the expected location in readback[] so
+ * that the content of readback[] should match payload->packed[]
+ */
+ for (i = 0; i < ARRAY_SIZE(offset_words); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ param->mem_type);
+ patch_pos_words = alg_base_words + offset_words[i];
+
+ /* Skip if the offset is not on a packed boundary */
+ if ((patch_pos_words % 4) != 0)
+ continue;
+
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload->packed[i], sizeof(payload->packed[i]));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(payload->packed[i]));
+ }
+
+ /*
+ * Readback the unpacked registers that should have been written.
+ * Place the values into the expected location in readback[] so
+ * that the content of readback[] should match payload->unpacked[]
+ */
+ for (i = 0; i < ARRAY_SIZE(offset_words); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ unpacked_mem_type);
+
+ patch_pos_words = alg_base_words + offset_words[i];
+
+ /* Skip if the offset is on a packed boundary */
+ if ((patch_pos_words % 4) == 0)
+ continue;
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ ((patch_pos_words) * 4);
+
+ readback[0] = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &readback[0], sizeof(readback[0])),
+ 0);
+ KUNIT_EXPECT_EQ(test, readback[0], payload->unpacked[i]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(payload->unpacked[i]));
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Bin file with name and multiple info blocks */
+static void bin_patch_name_and_info(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 reg_val, payload_data;
+ char *infobuf;
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ WMFW_ADSP2_YM);
+
+ /* Add a name block and info block */
+ cs_dsp_mock_bin_add_name(priv->local->bin_builder, "The name");
+ cs_dsp_mock_bin_add_info(priv->local->bin_builder, "Some info");
+
+ /* Add a big block of info */
+ infobuf = kunit_kzalloc(test, 512, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, infobuf);
+
+ for (; strlcat(infobuf, "Waffle{Blah}\n", 512) < 512; )
+ ;
+
+ cs_dsp_mock_bin_add_info(priv->local->bin_builder, infobuf);
+
+ /* Add a patch */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[0].id,
+ bin_test_mock_algs[0].ver,
+ WMFW_ADSP2_YM,
+ 0,
+ &payload_data, sizeof(payload_data));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ reg_addr += alg_base_words * reg_inc_per_word;
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data);
+}
+
+static int cs_dsp_bin_test_common_init(struct kunit *test, struct cs_dsp *dsp)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_mock_xm_header *xm_hdr;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!priv->local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /* Create an XM header */
+ xm_hdr = cs_dsp_create_mock_xm_header(priv,
+ bin_test_mock_algs,
+ ARRAY_SIZE(bin_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xm_hdr);
+ ret = cs_dsp_mock_xm_header_write_to_regmap(xm_hdr);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ priv->local->bin_builder =
+ cs_dsp_mock_bin_init(priv, 1,
+ cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->local->bin_builder);
+
+ /* We must provide a dummy wmfw to load */
+ priv->local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, -1);
+ priv->local->wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_bin_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_bin_test_common_init(test, dsp);
+}
+
+static int cs_dsp_bin_test_adsp2_32bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_bin_test_common_init(test, dsp);
+}
+
+static int cs_dsp_bin_test_adsp2_16bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_bin_test_common_init(test, dsp);
+}
+
+/* Parameterize on choice of XM or YM with a range of word offsets */
+static const struct bin_test_param x_or_y_and_offset_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 0 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 3 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 4 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 23 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 22 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 21 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 20 },
+
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 0 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 3 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 4 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 23 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 22 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 21 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 20 },
+};
+
+/* Parameterize on ZM with a range of word offsets */
+static const struct bin_test_param z_and_offset_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 0 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 1 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 2 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 3 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 4 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 23 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 22 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 21 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 20 },
+};
+
+/* Parameterize on choice of packed XM or YM with a range of word offsets */
+static const struct bin_test_param packed_x_or_y_and_offset_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 0 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 4 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 8 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 12 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 0 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 4 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 8 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 12 },
+};
+
+static void x_or_y_or_z_and_offset_param_desc(const struct bin_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s@%u",
+ cs_dsp_mem_region_name(param->mem_type),
+ param->offset_words);
+}
+
+KUNIT_ARRAY_PARAM(x_or_y_and_offset,
+ x_or_y_and_offset_param_cases,
+ x_or_y_or_z_and_offset_param_desc);
+
+KUNIT_ARRAY_PARAM(z_and_offset,
+ z_and_offset_param_cases,
+ x_or_y_or_z_and_offset_param_desc);
+
+KUNIT_ARRAY_PARAM(packed_x_or_y_and_offset,
+ packed_x_or_y_and_offset_param_cases,
+ x_or_y_or_z_and_offset_param_desc);
+
+/* Parameterize on choice of packed XM or YM */
+static const struct bin_test_param packed_x_or_y_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 0 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 0 },
+};
+
+static void x_or_y_or_z_param_desc(const struct bin_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", cs_dsp_mem_region_name(param->mem_type));
+}
+
+KUNIT_ARRAY_PARAM(packed_x_or_y, packed_x_or_y_param_cases, x_or_y_or_z_param_desc);
+
+static const struct bin_test_param offset_param_cases[] = {
+ { .offset_words = 0 },
+ { .offset_words = 1 },
+ { .offset_words = 2 },
+ { .offset_words = 3 },
+ { .offset_words = 4 },
+ { .offset_words = 23 },
+ { .offset_words = 22 },
+ { .offset_words = 21 },
+ { .offset_words = 20 },
+};
+
+static void offset_param_desc(const struct bin_test_param *param, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "@%u", param->offset_words);
+}
+
+KUNIT_ARRAY_PARAM(offset, offset_param_cases, offset_param_desc);
+
+static const struct bin_test_param alg_param_cases[] = {
+ { .alg_idx = 0 },
+ { .alg_idx = 1 },
+ { .alg_idx = 2 },
+ { .alg_idx = 3 },
+};
+
+static void alg_param_desc(const struct bin_test_param *param, char *desc)
+{
+ WARN_ON(param->alg_idx >= ARRAY_SIZE(bin_test_mock_algs));
+
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg[%u] (%#x)",
+ param->alg_idx, bin_test_mock_algs[param->alg_idx].id);
+}
+
+KUNIT_ARRAY_PARAM(alg, alg_param_cases, alg_param_desc);
+
+static const struct bin_test_param x_or_y_and_alg_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 0 },
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 3 },
+
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 0 },
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 3 },
+};
+
+static void x_or_y_or_z_and_alg_param_desc(const struct bin_test_param *param, char *desc)
+{
+ WARN_ON(param->alg_idx >= ARRAY_SIZE(bin_test_mock_algs));
+
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s alg[%u] (%#x)",
+ cs_dsp_mem_region_name(param->mem_type),
+ param->alg_idx, bin_test_mock_algs[param->alg_idx].id);
+}
+
+KUNIT_ARRAY_PARAM(x_or_y_and_alg, x_or_y_and_alg_param_cases, x_or_y_or_z_and_alg_param_desc);
+
+static const struct bin_test_param z_and_alg_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 0 },
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 1 },
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 2 },
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 3 },
+};
+
+KUNIT_ARRAY_PARAM(z_and_alg, z_and_alg_param_cases, x_or_y_or_z_and_alg_param_desc);
+
+static const struct bin_test_param packed_x_or_y_and_alg_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 0 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 1 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 2 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 3 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 0 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 1 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 2 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 3 },
+};
+
+KUNIT_ARRAY_PARAM(packed_x_or_y_and_alg, packed_x_or_y_and_alg_param_cases,
+ x_or_y_or_z_and_alg_param_desc);
+
+static struct kunit_case cs_dsp_bin_test_cases_halo[] = {
+ /* Unpacked memory */
+ KUNIT_CASE_PARAM(bin_patch_one_word, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_multiword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, x_or_y_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, x_or_y_and_offset_gen_params),
+
+ /* Packed memory tests */
+ KUNIT_CASE_PARAM(bin_patch_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_1_single_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_2_single_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_3_single_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_2_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_3_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_single_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_2_single_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_2_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_3_single_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_3_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_onepacked,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_onepacked_unordered,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_mems, offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_mems, alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_onepacked_sparse_unordered,
+ packed_x_or_y_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_algs,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_algs_unordered,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_mixed_packed_unpacked_random,
+ packed_x_or_y_gen_params),
+
+ KUNIT_CASE(bin_patch_name_and_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_bin_test_cases_adsp2[] = {
+ /* XM and YM */
+ KUNIT_CASE_PARAM(bin_patch_one_word, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_multiword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, x_or_y_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, x_or_y_and_offset_gen_params),
+
+ /* ZM */
+ KUNIT_CASE_PARAM(bin_patch_one_word, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_multiword, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, z_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, z_and_offset_gen_params),
+
+ /* Other */
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, alg_gen_params),
+
+ KUNIT_CASE(bin_patch_name_and_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_bin_test_halo = {
+ .name = "cs_dsp_bin_halo",
+ .init = cs_dsp_bin_test_halo_init,
+ .test_cases = cs_dsp_bin_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_bin_test_adsp2_32bit = {
+ .name = "cs_dsp_bin_adsp2_32bit",
+ .init = cs_dsp_bin_test_adsp2_32bit_init,
+ .test_cases = cs_dsp_bin_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_bin_test_adsp2_16bit = {
+ .name = "cs_dsp_bin_adsp2_16bit",
+ .init = cs_dsp_bin_test_adsp2_16bit_init,
+ .test_cases = cs_dsp_bin_test_cases_adsp2,
+};
+
+kunit_test_suites(&cs_dsp_bin_test_halo,
+ &cs_dsp_bin_test_adsp2_32bit,
+ &cs_dsp_bin_test_adsp2_16bit);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
new file mode 100644
index 000000000000..5dcf62f19faf
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_bin_builder *bin_builder;
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ struct firmware *wmfw;
+ int wmfw_version;
+};
+
+struct cs_dsp_bin_test_param {
+ int block_type;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_bin_err_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+/* Load a bin containing unknown blocks. They should be skipped. */
+static void bin_load_with_unknown_blocks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ u8 random_data[8];
+ const unsigned int payload_size_bytes = 64;
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Add some unknown blocks at the start of the bin */
+ get_random_bytes(random_data, sizeof(random_data));
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ 0xf5, 0,
+ random_data, sizeof(random_data));
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ 0xf500, 0,
+ random_data, sizeof(random_data));
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ 0xc300, 0,
+ random_data, sizeof(random_data));
+
+ /* Add a single payload to be written to DSP memory */
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ WMFW_ADSP2_YM, 0,
+ payload_data, payload_size_bytes);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ /* Check that the payload was written to memory */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+}
+
+/* Load a bin that doesn't have a valid magic marker. */
+static void bin_err_wrong_magic(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+
+ memcpy((void *)bin->data, "WMFW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "xMDR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "WxDR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "WMxR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "WMDx", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memset((void *)bin->data, 0, 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+/* Load a bin that is too short for a valid header. */
+static void bin_err_too_short_for_header(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ do {
+ bin->size--;
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ } while (bin->size > 0);
+}
+
+/* Header length field isn't a valid header length. */
+static void bin_err_bad_header_length(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ struct wmfw_coeff_hdr *header;
+ unsigned int real_len, len;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header = (struct wmfw_coeff_hdr *)bin->data;
+ real_len = le32_to_cpu(header->len);
+
+ for (len = 0; len < real_len; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+
+ for (len = real_len + 1; len < real_len + 7; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+
+ header->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+/* Wrong core type in header. */
+static void bin_err_bad_core_type(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ struct wmfw_coeff_hdr *header;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header = (struct wmfw_coeff_hdr *)bin->data;
+
+ header->core_ver = cpu_to_le32(0);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->core_ver = cpu_to_le32(1);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->core_ver = cpu_to_le32(priv->dsp->type + 1);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->core_ver = cpu_to_le32(0xff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+/* File too short to contain a full block header */
+static void bin_too_short_for_block_header(struct kunit *test)
+{
+ const struct cs_dsp_bin_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ unsigned int header_length;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header_length = bin->size;
+ kunit_kfree(test, bin);
+
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ param->block_type, 0,
+ NULL, 0);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ KUNIT_ASSERT_GT(test, bin->size, header_length);
+
+ for (bin->size--; bin->size > header_length; bin->size--) {
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+}
+
+/* File too short to contain the block payload */
+static void bin_too_short_for_block_payload(struct kunit *test)
+{
+ const struct cs_dsp_bin_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ static const u8 payload[256] = { };
+ int i;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ param->block_type, 0,
+ payload, sizeof(payload));
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ for (i = 0; i < sizeof(payload); i++) {
+ bin->size--;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+}
+
+/* Block payload length is a garbage value */
+static void bin_block_payload_len_garbage(struct kunit *test)
+{
+ const struct cs_dsp_bin_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ struct wmfw_coeff_hdr *header;
+ struct wmfw_coeff_item *block;
+ u32 payload = 0;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ param->block_type, 0,
+ &payload, sizeof(payload));
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header = (struct wmfw_coeff_hdr *)bin->data;
+ block = (struct wmfw_coeff_item *)&bin->data[le32_to_cpu(header->len)];
+
+ /* Sanity check that we're looking at the correct part of the bin */
+ KUNIT_ASSERT_EQ(test, le16_to_cpu(block->type), param->block_type);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(block->len), sizeof(payload));
+
+ block->len = cpu_to_le32(0x8000);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0xffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+static void cs_dsp_bin_err_test_exit(struct kunit *test)
+{
+ /*
+ * Testing error conditions can produce a lot of log output
+ * from cs_dsp error messages, so rate limit the test cases.
+ */
+ usleep_range(200, 500);
+}
+
+static int cs_dsp_bin_err_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data payload.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_bin_err_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_bin_err_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ local->wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ local->bin_builder =
+ cs_dsp_mock_bin_init(priv, 1,
+ cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->bin_builder);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_bin_err_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_bin_err_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_bin_err_test_adsp2_32bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_bin_err_test_common_init(test, dsp, 2);
+}
+
+static int cs_dsp_bin_err_test_adsp2_16bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_bin_err_test_common_init(test, dsp, 1);
+}
+
+static struct kunit_case cs_dsp_bin_err_test_cases_halo[] = {
+
+ { } /* terminator */
+};
+
+static void cs_dsp_bin_err_block_types_desc(const struct cs_dsp_bin_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_type:%#x", param->block_type);
+}
+
+/* Some block types to test against, including illegal types */
+static const struct cs_dsp_bin_test_param bin_test_block_types_cases[] = {
+ { .block_type = WMFW_INFO_TEXT << 8 },
+ { .block_type = WMFW_METADATA << 8 },
+ { .block_type = WMFW_ADSP2_PM },
+ { .block_type = WMFW_ADSP2_XM },
+ { .block_type = 0x33 },
+ { .block_type = 0xf500 },
+ { .block_type = 0xc000 },
+};
+
+KUNIT_ARRAY_PARAM(bin_test_block_types,
+ bin_test_block_types_cases,
+ cs_dsp_bin_err_block_types_desc);
+
+static struct kunit_case cs_dsp_bin_err_test_cases_adsp2[] = {
+ KUNIT_CASE(bin_load_with_unknown_blocks),
+ KUNIT_CASE(bin_err_wrong_magic),
+ KUNIT_CASE(bin_err_too_short_for_header),
+ KUNIT_CASE(bin_err_bad_header_length),
+ KUNIT_CASE(bin_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(bin_too_short_for_block_header, bin_test_block_types_gen_params),
+ KUNIT_CASE_PARAM(bin_too_short_for_block_payload, bin_test_block_types_gen_params),
+ KUNIT_CASE_PARAM(bin_block_payload_len_garbage, bin_test_block_types_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_bin_err_test_halo = {
+ .name = "cs_dsp_bin_err_halo",
+ .init = cs_dsp_bin_err_test_halo_init,
+ .exit = cs_dsp_bin_err_test_exit,
+ .test_cases = cs_dsp_bin_err_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_bin_err_test_adsp2_32bit = {
+ .name = "cs_dsp_bin_err_adsp2_32bit",
+ .init = cs_dsp_bin_err_test_adsp2_32bit_init,
+ .exit = cs_dsp_bin_err_test_exit,
+ .test_cases = cs_dsp_bin_err_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_bin_err_test_adsp2_16bit = {
+ .name = "cs_dsp_bin_err_adsp2_16bit",
+ .init = cs_dsp_bin_err_test_adsp2_16bit_init,
+ .exit = cs_dsp_bin_err_test_exit,
+ .test_cases = cs_dsp_bin_err_test_cases_adsp2,
+};
+
+kunit_test_suites(&cs_dsp_bin_err_test_halo,
+ &cs_dsp_bin_err_test_adsp2_32bit,
+ &cs_dsp_bin_err_test_adsp2_16bit);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c
new file mode 100644
index 000000000000..8a9b66a3b7d3
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c
@@ -0,0 +1,688 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#define ADSP2_LOCK_REGION_CTRL 0x7A
+#define ADSP2_WDT_TIMEOUT_STS_MASK 0x2000
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *)
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *)
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+
+ int num_control_add;
+ int num_control_remove;
+ int num_pre_run;
+ int num_post_run;
+ int num_pre_stop;
+ int num_post_stop;
+ int num_watchdog_expired;
+
+ struct cs_dsp_coeff_ctl *passed_ctl[16];
+ struct cs_dsp *passed_dsp;
+};
+
+struct cs_dsp_callbacks_test_param {
+ const struct cs_dsp_client_ops *ops;
+ const char *case_name;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_callbacks_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_VOLATILE,
+ .length_bytes = 4,
+};
+
+static int cs_dsp_test_control_add_callback(struct cs_dsp_coeff_ctl *ctl)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_ctl[local->num_control_add] = ctl;
+ local->num_control_add++;
+
+ return 0;
+}
+
+static void cs_dsp_test_control_remove_callback(struct cs_dsp_coeff_ctl *ctl)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_ctl[local->num_control_remove] = ctl;
+ local->num_control_remove++;
+}
+
+static int cs_dsp_test_pre_run_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_pre_run++;
+
+ return 0;
+}
+
+static int cs_dsp_test_post_run_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_post_run++;
+
+ return 0;
+}
+
+static void cs_dsp_test_pre_stop_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_pre_stop++;
+}
+
+static void cs_dsp_test_post_stop_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_post_stop++;
+}
+
+static void cs_dsp_test_watchdog_expired_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_watchdog_expired++;
+}
+
+static const struct cs_dsp_client_ops cs_dsp_callback_test_client_ops = {
+ .control_add = cs_dsp_test_control_add_callback,
+ .control_remove = cs_dsp_test_control_remove_callback,
+ .pre_run = cs_dsp_test_pre_run_callback,
+ .post_run = cs_dsp_test_post_run_callback,
+ .pre_stop = cs_dsp_test_pre_stop_callback,
+ .post_stop = cs_dsp_test_post_stop_callback,
+ .watchdog_expired = cs_dsp_test_watchdog_expired_callback,
+};
+
+static const struct cs_dsp_client_ops cs_dsp_callback_test_empty_client_ops = {
+ /* No entries */
+};
+
+static void cs_dsp_test_run_stop_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 0);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 0);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+
+ cs_dsp_stop(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+
+ cs_dsp_stop(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 2);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 2);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+}
+
+static void cs_dsp_test_ctl_v1_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ /* Add a control for each memory */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_callbacks_test_mock_algs[0].id,
+ "dummyalg", NULL);
+ def.shortname = "zm";
+ def.mem_type = WMFW_ADSP2_ZM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.shortname = "ym";
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.shortname = "xm";
+ def.mem_type = WMFW_ADSP2_XM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* There should have been an add callback for each control */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 3);
+ KUNIT_EXPECT_EQ(test, local->num_control_add, 3);
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 0);
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+
+ /*
+ * Call cs_dsp_remove() and there should be a remove callback
+ * for each control
+ */
+ memset(local->passed_ctl, 0, sizeof(local->passed_ctl));
+ cs_dsp_remove(priv->dsp);
+
+ /* Prevent double cleanup */
+ kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp);
+
+ KUNIT_EXPECT_EQ(test, local->num_control_add, 3);
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 3);
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+}
+
+static void cs_dsp_test_ctl_v2_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char name[2] = { };
+ int i;
+
+ /* Add some controls */
+ def.shortname = name;
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_callbacks_test_mock_algs[0].id,
+ "dummyalg", NULL);
+ for (i = 0; i < ARRAY_SIZE(local->passed_ctl); ++i) {
+ name[0] = 'A' + i;
+ def.offset_dsp_words = i;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* There should have been an add callback for each control */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(local->passed_ctl));
+ KUNIT_EXPECT_EQ(test, local->num_control_add, ARRAY_SIZE(local->passed_ctl));
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 0);
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+
+ /*
+ * Call cs_dsp_remove() and there should be a remove callback
+ * for each control
+ */
+ memset(local->passed_ctl, 0, sizeof(local->passed_ctl));
+ cs_dsp_remove(priv->dsp);
+
+ /* Prevent double cleanup */
+ kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp);
+
+ KUNIT_EXPECT_EQ(test, local->num_control_add, ARRAY_SIZE(local->passed_ctl));
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, ARRAY_SIZE(local->passed_ctl));
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+}
+
+static void cs_dsp_test_no_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct firmware *wmfw;
+
+ /* Add a controls */
+ def.shortname = "A";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_callbacks_test_mock_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Run a sequence of ops that would invoke callbacks */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+ cs_dsp_stop(priv->dsp);
+ cs_dsp_remove(priv->dsp);
+
+ /* Prevent double cleanup */
+ kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp);
+
+ /* Something went very wrong if any of our callbacks were called */
+ KUNIT_EXPECT_EQ(test, local->num_control_add, 0);
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 0);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 0);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 0);
+}
+
+static void cs_dsp_test_adsp2v2_watchdog_callback(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Set the watchdog timeout bit */
+ regmap_write(priv->dsp->regmap, priv->dsp->base + ADSP2_LOCK_REGION_CTRL,
+ ADSP2_WDT_TIMEOUT_STS_MASK);
+
+ /* Notify an interrupt and the watchdog callback should be called */
+ cs_dsp_adsp2_bus_error(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+}
+
+static void cs_dsp_test_adsp2v2_watchdog_no_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Set the watchdog timeout bit */
+ regmap_write(priv->dsp->regmap, priv->dsp->base + ADSP2_LOCK_REGION_CTRL,
+ ADSP2_WDT_TIMEOUT_STS_MASK);
+
+ /* Notify an interrupt, which will look for a watchdog callback */
+ cs_dsp_adsp2_bus_error(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 0);
+}
+
+static void cs_dsp_test_halo_watchdog_callback(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Notify an interrupt and the watchdog callback should be called */
+ cs_dsp_halo_wdt_expire(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+}
+
+static void cs_dsp_test_halo_watchdog_no_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Notify an interrupt, which will look for a watchdog callback */
+ cs_dsp_halo_wdt_expire(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 0);
+}
+
+static int cs_dsp_callbacks_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ const struct cs_dsp_callbacks_test_param *param = test->param_value;
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ struct cs_dsp_mock_xm_header *xm_header;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm,
+ * so create a dummy one and pre-populate XM so the wmfw doesn't
+ * have to contain an XM blob.
+ */
+ xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_callbacks_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_callbacks_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xm_header);
+ cs_dsp_mock_xm_header_write_to_regmap(xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ xm_header->blob_data,
+ xm_header->blob_size_bytes);
+
+ /* Init cs_dsp */
+ dsp->client_ops = param->ops;
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_callbacks_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_callbacks_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_callbacks_test_adsp2_32bit_init(struct kunit *test, int rev)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = rev;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_callbacks_test_common_init(test, dsp, 2);
+}
+
+static int cs_dsp_callbacks_test_adsp2v2_32bit_init(struct kunit *test)
+{
+ return cs_dsp_callbacks_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_callbacks_test_adsp2v1_32bit_init(struct kunit *test)
+{
+ return cs_dsp_callbacks_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_callbacks_test_adsp2_16bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_callbacks_test_common_init(test, dsp, 1);
+}
+
+static void cs_dsp_callbacks_param_desc(const struct cs_dsp_callbacks_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", param->case_name);
+}
+
+/* Parameterize on different client callback ops tables */
+static const struct cs_dsp_callbacks_test_param cs_dsp_callbacks_ops_cases[] = {
+ { .ops = &cs_dsp_callback_test_client_ops, .case_name = "all ops" },
+};
+
+KUNIT_ARRAY_PARAM(cs_dsp_callbacks_ops,
+ cs_dsp_callbacks_ops_cases,
+ cs_dsp_callbacks_param_desc);
+
+static const struct cs_dsp_callbacks_test_param cs_dsp_no_callbacks_cases[] = {
+ { .ops = &cs_dsp_callback_test_empty_client_ops, .case_name = "empty ops" },
+};
+
+KUNIT_ARRAY_PARAM(cs_dsp_no_callbacks,
+ cs_dsp_no_callbacks_cases,
+ cs_dsp_callbacks_param_desc);
+
+static struct kunit_case cs_dsp_callbacks_adsp2_wmfwv1_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_ctl_v1_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_callbacks_adsp2_wmfwv2_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_ctl_v2_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_callbacks_halo_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_ctl_v2_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_watchdog_adsp2v2_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_adsp2v2_watchdog_callback, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_adsp2v2_watchdog_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_watchdog_halo_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_halo_watchdog_callback, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_halo_watchdog_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_halo = {
+ .name = "cs_dsp_callbacks_halo",
+ .init = cs_dsp_callbacks_test_halo_init,
+ .test_cases = cs_dsp_callbacks_halo_test_cases,
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_adsp2v2_32bit = {
+ .name = "cs_dsp_callbacks_adsp2v2_32bit_wmfwv2",
+ .init = cs_dsp_callbacks_test_adsp2v2_32bit_init,
+ .test_cases = cs_dsp_callbacks_adsp2_wmfwv2_test_cases,
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_adsp2v1_32bit = {
+ .name = "cs_dsp_callbacks_adsp2v1_32bit_wmfwv2",
+ .init = cs_dsp_callbacks_test_adsp2v1_32bit_init,
+ .test_cases = cs_dsp_callbacks_adsp2_wmfwv2_test_cases,
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_adsp2_16bit = {
+ .name = "cs_dsp_callbacks_adsp2_16bit_wmfwv1",
+ .init = cs_dsp_callbacks_test_adsp2_16bit_init,
+ .test_cases = cs_dsp_callbacks_adsp2_wmfwv1_test_cases,
+};
+
+static struct kunit_suite cs_dsp_watchdog_test_adsp2v2_32bit = {
+ .name = "cs_dsp_watchdog_adsp2v2_32bit",
+ .init = cs_dsp_callbacks_test_adsp2v2_32bit_init,
+ .test_cases = cs_dsp_watchdog_adsp2v2_test_cases,
+};
+
+static struct kunit_suite cs_dsp_watchdog_test_halo_32bit = {
+ .name = "cs_dsp_watchdog_halo",
+ .init = cs_dsp_callbacks_test_halo_init,
+ .test_cases = cs_dsp_watchdog_halo_test_cases,
+};
+
+kunit_test_suites(&cs_dsp_callbacks_test_halo,
+ &cs_dsp_callbacks_test_adsp2v2_32bit,
+ &cs_dsp_callbacks_test_adsp2v1_32bit,
+ &cs_dsp_callbacks_test_adsp2_16bit,
+ &cs_dsp_watchdog_test_adsp2v2_32bit,
+ &cs_dsp_watchdog_test_halo_32bit);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c
new file mode 100644
index 000000000000..83386cc978e3
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c
@@ -0,0 +1,3282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_stop_wrapper, cs_dsp_stop, struct cs_dsp *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_ctl_cache_test_param {
+ int mem_type;
+ int alg_id;
+ unsigned int offs_words;
+ unsigned int len_bytes;
+ u16 ctl_type;
+ u16 flags;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_ctl_cache_test_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_base_words = 60,
+ .xm_size_words = 1000,
+ .ym_base_words = 0,
+ .ym_size_words = 1000,
+ .zm_base_words = 0,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0xb,
+ .ver = 0x100001,
+ .xm_base_words = 1060,
+ .xm_size_words = 1000,
+ .ym_base_words = 1000,
+ .ym_size_words = 1000,
+ .zm_base_words = 1000,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0x9f1234,
+ .ver = 0x100500,
+ .xm_base_words = 2060,
+ .xm_size_words = 32,
+ .ym_base_words = 2000,
+ .ym_size_words = 32,
+ .zm_base_words = 2000,
+ .zm_size_words = 32,
+ },
+ {
+ .id = 0xff00ff,
+ .ver = 0x300113,
+ .xm_base_words = 2100,
+ .xm_size_words = 32,
+ .ym_base_words = 2032,
+ .ym_size_words = 32,
+ .zm_base_words = 2032,
+ .zm_size_words = 32,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ .length_bytes = 4,
+};
+
+static const char * const cs_dsp_ctl_cache_test_fw_names[] = {
+ "misc", "mbc/vss", "haps",
+};
+
+static int _find_alg_entry(struct kunit *test, unsigned int alg_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_cache_test_algs); ++i) {
+ if (cs_dsp_ctl_cache_test_algs[i].id == alg_id)
+ break;
+ }
+
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+
+ return i;
+}
+
+static int _get_alg_mem_base_words(struct kunit *test, int alg_index, int mem_type)
+{
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ return cs_dsp_ctl_cache_test_algs[alg_index].xm_base_words;
+ case WMFW_ADSP2_YM:
+ return cs_dsp_ctl_cache_test_algs[alg_index].ym_base_words;
+ case WMFW_ADSP2_ZM:
+ return cs_dsp_ctl_cache_test_algs[alg_index].zm_base_words;
+ default:
+ KUNIT_FAIL(test, "Bug in test: illegal memory type %d\n", mem_type);
+ return 0;
+ }
+}
+
+static struct cs_dsp_mock_wmfw_builder *_create_dummy_wmfw(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_wmfw_builder *builder;
+
+ builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder);
+
+ /* Init an XM header */
+ cs_dsp_mock_wmfw_add_data_block(builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ return builder;
+}
+
+/*
+ * Memory allocated for control cache must be large enough.
+ * This creates multiple controls of different sizes so only works on
+ * wmfw V2 and later.
+ */
+static void cs_dsp_ctl_v2_cache_alloc(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words, alg_size_bytes;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char ctl_name[4];
+ u32 *reg_vals;
+ int num_ctls;
+
+ /* Create some DSP data to initialize the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_YM);
+ alg_size_bytes = cs_dsp_ctl_cache_test_algs[0].ym_size_words *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ reg_vals = kunit_kzalloc(test, alg_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ reg += alg_base_words * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, alg_size_bytes);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create controls of different sizes */
+ def.mem_type = WMFW_ADSP2_YM;
+ def.shortname = ctl_name;
+ num_ctls = 0;
+ for (def.length_bytes = 4; def.length_bytes <= 64; def.length_bytes += 4) {
+ snprintf(ctl_name, ARRAY_SIZE(ctl_name), "%x", def.length_bytes);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ num_ctls++;
+ def.offset_dsp_words += def.length_bytes / sizeof(u32);
+ }
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&dsp->ctl_list), num_ctls);
+
+ /* Check that the block allocated for the cache is large enough */
+ list_for_each_entry(ctl, &dsp->ctl_list, list)
+ KUNIT_EXPECT_GE(test, ksize(ctl->cache), ctl->len);
+}
+
+/*
+ * Content of registers backing a control should be read into the
+ * control cache when the firmware is downloaded.
+ */
+static void cs_dsp_ctl_cache_init(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * For a non-volatile write-only control the cache should be zero-filled
+ * when the firmware is downloaded.
+ */
+static void cs_dsp_ctl_cache_init_write_only(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *readback, *zeros;
+
+ zeros = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, zeros);
+
+ readback = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create a non-volatile write-only control */
+ def.flags = param->flags & ~WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /*
+ * The control cache should have been zero-filled so should be
+ * readable through the control.
+ */
+ get_random_bytes(readback, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, zeros, param->len_bytes);
+}
+
+/*
+ * Multiple different firmware with identical controls.
+ * This is legal because different firmwares could contain the same
+ * algorithm.
+ * The control cache should be initialized only with the data from
+ * the firmware containing it.
+ */
+static void cs_dsp_ctl_cache_init_multiple_fw_same_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder[3];
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(reg_vals) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(cs_dsp_ctl_cache_test_fw_names) >= ARRAY_SIZE(builder));
+
+ /* Create an identical control in each firmware but with different alg id */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ builder[i] = _create_dummy_wmfw(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder[i]);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(builder[i],
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder[i], &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /*
+ * For each firmware create random content in the register backing
+ * the control. Then download, start, stop and power-down.
+ */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ alg_base_words = _get_alg_mem_base_words(test, 0, def.mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder[i]);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(dsp, wmfw,
+ cs_dsp_ctl_cache_test_fw_names[i],
+ NULL, NULL,
+ cs_dsp_ctl_cache_test_fw_names[i]),
+ 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+ }
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[0]) == 0)
+ ctl[0] = walkctl;
+ else if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[1]) == 0)
+ ctl[1] = walkctl;
+ else if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[2]) == 0)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Multiple different firmware with controls identical except for alg id.
+ * This is legal because the controls are qualified by algorithm id.
+ * The control cache should be initialized only with the data from
+ * the firmware containing it.
+ */
+static void cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder[3];
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(reg_vals) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(cs_dsp_ctl_cache_test_fw_names) >= ARRAY_SIZE(builder));
+
+ /* Create an identical control in each firmware but with different alg id */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ builder[i] = _create_dummy_wmfw(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder[i]);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(builder[i],
+ cs_dsp_ctl_cache_test_algs[i].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder[i], &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /*
+ * For each firmware create random content in the register backing
+ * the control. Then download, start, stop and power-down.
+ */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ alg_base_words = _get_alg_mem_base_words(test, i, def.mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder[i]);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(dsp, wmfw,
+ cs_dsp_ctl_cache_test_fw_names[i],
+ NULL, NULL,
+ cs_dsp_ctl_cache_test_fw_names[i]),
+ 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+ }
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (cs_dsp_ctl_cache_test_algs[0].id == walkctl->alg_region.alg)
+ ctl[0] = walkctl;
+ else if (cs_dsp_ctl_cache_test_algs[1].id == walkctl->alg_region.alg)
+ ctl[1] = walkctl;
+ else if (cs_dsp_ctl_cache_test_algs[2].id == walkctl->alg_region.alg)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Firmware with controls at the same position in different memories.
+ * The control cache should be initialized with content from the
+ * correct memory region.
+ */
+static void cs_dsp_ctl_cache_init_multiple_mems(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals));
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create controls identical except for memory region */
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.mem_type = WMFW_ADSP2_XM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ def.mem_type = WMFW_ADSP2_ZM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Create random content in the registers backing each control */
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_YM);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[0], def.length_bytes);
+
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_XM);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[1], def.length_bytes);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_ZM);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_ZM);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[2], def.length_bytes);
+ }
+
+ /* Download, run, stop and power-down the firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* There should now be 2 or 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list),
+ cs_dsp_mock_has_zm(priv) ? 3 : 2);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (walkctl->alg_region.type == WMFW_ADSP2_YM)
+ ctl[0] = walkctl;
+ if (walkctl->alg_region.type == WMFW_ADSP2_XM)
+ ctl[1] = walkctl;
+ if (walkctl->alg_region.type == WMFW_ADSP2_ZM)
+ ctl[2] = walkctl;
+ }
+
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+ }
+}
+
+/*
+ * Firmware with controls at the same position in different algorithms
+ * The control cache should be initialized with content from the
+ * memory of the algorithm it points to.
+ */
+static void cs_dsp_ctl_cache_init_multiple_algs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals));
+ static_assert(ARRAY_SIZE(reg_vals) <= ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create controls identical except for algorithm */
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[i].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ }
+
+ /* Create random content in the registers backing each control */
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ alg_base_words = _get_alg_mem_base_words(test, i, def.mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes);
+ }
+
+ /* Download, run, stop and power-down the firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[0].id)
+ ctl[0] = walkctl;
+ if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[1].id)
+ ctl[1] = walkctl;
+ if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[2].id)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Firmware with controls in the same algorithm and memory but at
+ * different offsets.
+ * The control cache should be initialized with content from the
+ * correct offset.
+ * Only for wmfw format V2 and later. V1 only supports one control per
+ * memory per algorithm.
+ */
+static void cs_dsp_ctl_cache_init_multiple_offsets(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words, alg_base_reg;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals));
+ static_assert(ARRAY_SIZE(reg_vals) <= ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create controls identical except for offset */
+ def.length_bytes = 8;
+ def.offset_dsp_words = 0;
+ def.shortname = "CtlA";
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.offset_dsp_words = 5;
+ def.shortname = "CtlB";
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.offset_dsp_words = 8;
+ def.shortname = "CtlC";
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Create random content in the registers backing each control */
+ alg_base_words = _get_alg_mem_base_words(test, 0, def.mem_type);
+ alg_base_reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ alg_base_reg += alg_base_words * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ reg = alg_base_reg;
+ regmap_raw_write(dsp->regmap, reg, reg_vals[0], def.length_bytes);
+ reg = alg_base_reg + (5 * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv));
+ regmap_raw_write(dsp->regmap, reg, reg_vals[1], def.length_bytes);
+ reg = alg_base_reg + (8 * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv));
+ regmap_raw_write(dsp->regmap, reg, reg_vals[2], def.length_bytes);
+
+ /* Download, run, stop and power-down the firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (walkctl->offset == 0)
+ ctl[0] = walkctl;
+ if (walkctl->offset == 5)
+ ctl[1] = walkctl;
+ if (walkctl->offset == 8)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Read from a cached control before the firmware is started.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control after the firmware has been stopped.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control after the DSP has been powered-up and
+ * then powered-down without running.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control after the firmware has been run and
+ * stopped, then the DSP has been powered-down.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power-down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control when a different firmware is currently
+ * loaded into the DSP.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control when a different firmware is currently
+ * running.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Power-up with a different firmware and run it */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control with non-zero flags while the firmware is
+ * running.
+ * Should return the data in the cache, not from the registers.
+ */
+static void cs_dsp_ctl_cache_read_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_reg_vals, *new_reg_vals, *readback;
+
+ init_reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_reg_vals);
+
+ new_reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create data in the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(init_reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware running */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * Change the values in the registers backing the control then drop
+ * them from the regmap cache. This allows checking that the control
+ * read is returning values from the control cache and not accessing
+ * the registers.
+ */
+ KUNIT_ASSERT_EQ(test,
+ regmap_raw_write(dsp->regmap, reg, new_reg_vals, param->len_bytes),
+ 0);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Control should readback the origin data from its cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, init_reg_vals, param->len_bytes);
+
+ /* Stop and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Control should readback from the cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, init_reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control with flags == 0 while the firmware is
+ * running.
+ * Should behave as volatile and read from the registers.
+ * (This is for backwards compatibility with old firmware versions)
+ */
+static void cs_dsp_ctl_cache_read_running_zero_flags(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_reg_vals, *new_reg_vals, *readback;
+
+ init_reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_reg_vals);
+
+ new_reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = 0;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware running */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Change the values in the registers backing the control */
+ get_random_bytes(new_reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, new_reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_reg_vals, param->len_bytes);
+
+ /* Stop and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Change the values in the registers backing the control */
+ regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes);
+
+ /* Control should readback from the cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control while the firmware is running.
+ * This should be a writethrough operation, writing to the cache and
+ * the registers.
+ */
+static void cs_dsp_ctl_cache_writethrough(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ memset(reg_vals, 0, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Write new data to the control, it should be written to the registers */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write unchanged data to a cached control while the firmware is running.
+ * The control write should return 0 to indicate that the content
+ * didn't change.
+ */
+static void cs_dsp_ctl_cache_writethrough_unchanged(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * If the control is write-only the cache will have been zero-initialized
+ * so the first write will always indicate a change.
+ */
+ if (def.flags && !(def.flags & WMFW_CTL_FLAG_READABLE)) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ param->len_bytes),
+ 1);
+ }
+
+ /*
+ * Write the same data to the control, cs_dsp_coeff_lock_and_write_ctrl()
+ * should return 0 to indicate the content didn't change.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write unchanged data to a cached control while the firmware is not started.
+ * The control write should return 0 to indicate that the cache content
+ * didn't change.
+ */
+static void cs_dsp_ctl_cache_write_unchanged_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /*
+ * If the control is write-only the cache will have been zero-initialized
+ * so the first write will always indicate a change.
+ */
+ if (def.flags && !(def.flags & WMFW_CTL_FLAG_READABLE)) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ param->len_bytes),
+ 1);
+ }
+
+ /*
+ * Write the same data to the control, cs_dsp_coeff_lock_and_write_ctrl()
+ * should return 0 to indicate the content didn't change.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control while the firmware is loaded but not
+ * started.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after the firmware has been loaded,
+ * started and stopped.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after the firmware has been loaded,
+ * then the DSP powered-down.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after the firmware has been loaded,
+ * started, stopped, and then the DSP powered-down.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power-down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control that is not in the currently loaded firmware.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Get the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Control from unloaded firmware should be disabled */
+ KUNIT_EXPECT_FALSE(test, ctl->enabled);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /*
+ * It should be possible to write new data to the control from
+ * the first firmware. But this should not be written to the
+ * registers.
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control that is not in the currently running firmware.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Get the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Power-up with a different firmware and run it */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Control from unloaded firmware should be disabled */
+ KUNIT_EXPECT_FALSE(test, ctl->enabled);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /*
+ * It should be possible to write new data to the control from
+ * the first firmware. But this should not be written to the
+ * registers.
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control before running the firmware.
+ * The value written to the cache should be synced out to the registers
+ * backing the control when the firmware is run.
+ */
+static void cs_dsp_ctl_cache_sync_write_before_run(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control while the firmware is running.
+ * The value written should be synced out to the registers
+ * backing the control when the firmware is next run.
+ */
+static void cs_dsp_ctl_cache_sync_write_while_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *ctl_vals, *readback;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ ctl_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP and start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Stop firmware and zero the registers backing the control */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, init_vals, param->len_bytes);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after stopping the firmware.
+ * The value written to the cache should be synced out to the registers
+ * backing the control when the firmware is next run.
+ */
+static void cs_dsp_ctl_cache_sync_write_after_stop(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control that is not in the currently loaded firmware.
+ * The value written to the cache should be synced out to the registers
+ * backing the control the next time the firmware containing the
+ * control is run.
+ */
+static void cs_dsp_ctl_cache_sync_write_not_current_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Get the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Write new data to the control, it should not be written to the registers */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Power-down DSP then power-up with the original firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * The value in the control cache should be synced out to the registers
+ * backing the control every time the firmware containing the control
+ * is run.
+ */
+static void cs_dsp_ctl_cache_sync_reapply_every_run(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *readback, *ctl_vals;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Stop the firmware and reset the registers */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Start the firmware again and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+/*
+ * The value in the control cache should be retained if the same
+ * firmware is downloaded again. It should be synced out to the
+ * registers backing the control after the firmware containing the
+ * control is downloaded again and run.
+ */
+static void cs_dsp_ctl_cache_sync_reapply_after_fw_reload(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *readback, *ctl_vals;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Stop the firmware and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Reset the registers */
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Download the firmware again, the cache content should not change */
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+/*
+ * The value in the control cache should be retained after a different
+ * firmware is downloaded.
+ * When the firmware containing the control is downloaded and run
+ * the value in the control cache should be synced out to the registers
+ * backing the control.
+ */
+static void cs_dsp_ctl_cache_sync_reapply_after_fw_swap(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *readback, *ctl_vals;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Stop the firmware and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Reset the registers */
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Download and run a different firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Reset the registers */
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Download the original firmware again */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_EXPECT_TRUE(test, ctl->set);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+static int cs_dsp_ctl_cache_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_ctl_cache_test_algs,
+ ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ /* Create wmfw builder */
+ local->wmfw_builder = _create_dummy_wmfw(test);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_ctl_cache_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_ctl_cache_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_ctl_cache_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_ctl_cache_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_ctl_all_param_desc(const struct cs_dsp_ctl_cache_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg:%#x %s@%u len:%u flags:%#x",
+ param->alg_id, cs_dsp_mem_region_name(param->mem_type),
+ param->offs_words, param->len_bytes, param->flags);
+}
+
+/* All parameters populated, with various lengths */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_len_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 8 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 12 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 16 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 48 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 100 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 1000 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_len, all_pop_varying_len_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various offsets */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_offset_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 0, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 2, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 3, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 8, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 10, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 128, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 180, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_offset, all_pop_varying_offset_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various X and Y memory regions */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_xy_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_XM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_xy, all_pop_varying_xy_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, using ZM */
+static const struct cs_dsp_ctl_cache_test_param all_pop_z_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_ZM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_z, all_pop_z_cases, cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various algorithm ids */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_alg_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xb, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0x9f1234, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xff00ff, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_alg, all_pop_varying_alg_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile readable control
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_flags,
+ all_pop_nonvol_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile readable control, except flags==0
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_readable_nonzero_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_nonzero_flags,
+ all_pop_nonvol_readable_nonzero_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile writeable control
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_writeable_flags,
+ all_pop_nonvol_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile write-only control of varying lengths
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_write_only_length_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_write_only_length,
+ all_pop_nonvol_write_only_length_cases,
+ cs_dsp_ctl_all_param_desc);
+
+static struct kunit_case cs_dsp_ctl_cache_test_cases_v1[] = {
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only,
+ all_pop_nonvol_write_only_length_gen_params),
+
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running,
+ all_pop_nonvol_readable_nonzero_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running_zero_flags,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_cache_test_cases_v2[] = {
+ KUNIT_CASE(cs_dsp_ctl_v2_cache_alloc),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only,
+ all_pop_nonvol_write_only_length_gen_params),
+
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_offsets),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running,
+ all_pop_nonvol_readable_nonzero_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running_zero_flags,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_cache_test_cases_v3[] = {
+ KUNIT_CASE(cs_dsp_ctl_v2_cache_alloc),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only,
+ all_pop_nonvol_write_only_length_gen_params),
+
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_offsets),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running,
+ all_pop_nonvol_readable_nonzero_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_halo = {
+ .name = "cs_dsp_ctl_cache_wmfwV3_halo",
+ .init = cs_dsp_ctl_cache_test_halo_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v3,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_ctl_cache_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_ctl_cache_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v2,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_ctl_cache_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_ctl_cache_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v2,
+};
+
+kunit_test_suites(&cs_dsp_ctl_cache_test_halo,
+ &cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1,
+ &cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2,
+ &cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1,
+ &cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
new file mode 100644
index 000000000000..cb90964740ea
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
@@ -0,0 +1,1851 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_ctl_parse_test_param {
+ int mem_type;
+ int alg_id;
+ unsigned int offset;
+ unsigned int length;
+ u16 ctl_type;
+ u16 flags;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_ctl_parse_test_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+ {
+ .id = 0xb,
+ .ver = 0x100001,
+ .xm_size_words = 8,
+ .ym_size_words = 8,
+ .zm_size_words = 8,
+ },
+ {
+ .id = 0x9f1234,
+ .ver = 0x100500,
+ .xm_size_words = 16,
+ .ym_size_words = 16,
+ .zm_size_words = 16,
+ },
+ {
+ .id = 0xff00ff,
+ .ver = 0x300113,
+ .xm_size_words = 16,
+ .ym_size_words = 16,
+ .zm_size_words = 16,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_VOLATILE,
+ .length_bytes = 4,
+};
+
+/* Algorithm info block without controls should load */
+static void cs_dsp_ctl_parse_no_coeffs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+}
+
+/*
+ * V1 controls do not have names, the name field in the coefficient entry
+ * should be ignored.
+ */
+static void cs_dsp_ctl_parse_v1_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = "Dummy";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * V1 controls do not have names, the name field in the coefficient entry
+ * should be ignored. Test with a zero-length name string.
+ */
+static void cs_dsp_ctl_parse_empty_v1_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = "\0";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * V1 controls do not have names, the name field in the coefficient entry
+ * should be ignored. Test with a maximum length name string.
+ */
+static void cs_dsp_ctl_parse_max_v1_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *name;
+
+ name = kunit_kzalloc(test, 256, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, name);
+ memset(name, 'A', 255);
+ def.fullname = name;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Short name from coeff descriptor should be used as control name. */
+static void cs_dsp_ctl_parse_short_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Short name from coeff descriptor should be used as control name.
+ * Test with a short name that is a single character.
+ */
+static void cs_dsp_ctl_parse_min_short_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.shortname = "Q";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 1);
+ KUNIT_EXPECT_EQ(test, ctl->subname[0], 'Q');
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Short name from coeff descriptor should be used as control name.
+ * Test with a maximum length name.
+ */
+static void cs_dsp_ctl_parse_max_short_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ char *name;
+ struct firmware *wmfw;
+
+ name = kunit_kmalloc(test, 255, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, name);
+ memset(name, 'A', 255);
+
+ def.shortname = name;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 255);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, name, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Full name from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a 1-character full name.
+ */
+static void cs_dsp_ctl_parse_with_min_fullname(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = "Q";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Full name from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a maximum length full name.
+ */
+static void cs_dsp_ctl_parse_with_max_fullname(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *fullname;
+
+ fullname = kunit_kmalloc(test, 255, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fullname);
+ memset(fullname, 'A', 255);
+ def.fullname = fullname;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Description from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a 1-character description
+ */
+static void cs_dsp_ctl_parse_with_min_description(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.description = "Q";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Description from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a maximum length description
+ */
+static void cs_dsp_ctl_parse_with_max_description(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *description;
+
+ description = kunit_kmalloc(test, 65535, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, description);
+ memset(description, 'A', 65535);
+ def.description = description;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Full name and description from coeff descriptor are variable length
+ * fields so affects the position of subsequent fields.
+ * Test with a maximum length full name and description
+ */
+static void cs_dsp_ctl_parse_with_max_fullname_and_description(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *fullname, *description;
+
+ fullname = kunit_kmalloc(test, 255, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fullname);
+ memset(fullname, 'A', 255);
+ def.fullname = fullname;
+
+ description = kunit_kmalloc(test, 65535, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, description);
+ memset(description, 'A', 65535);
+ def.description = description;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+static const char * const cs_dsp_ctl_alignment_test_names[] = {
+ "1", "12", "123", "1234", "12345", "123456", "1234567",
+ "12345678", "123456789", "123456789A", "123456789AB",
+ "123456789ABC", "123456789ABCD", "123456789ABCDE",
+ "123456789ABCDEF",
+};
+
+/*
+ * Variable-length string fields are padded to a multiple of 4-bytes.
+ * Test this with various lengths of short name.
+ */
+static void cs_dsp_ctl_shortname_alignment(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ def.shortname = cs_dsp_ctl_alignment_test_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_ctl_alignment_test_names[i],
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, i + 1);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_ctl_alignment_test_names[i],
+ ctl->subname_len);
+ /* Test fields that are parsed after the variable-length fields */
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+ }
+}
+
+/*
+ * Variable-length string fields are padded to a multiple of 4-bytes.
+ * Test this with various lengths of full name.
+ */
+static void cs_dsp_ctl_fullname_alignment(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ char ctl_name[4];
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ /*
+ * Create a unique control name of 3 characters so that
+ * the shortname field is exactly 4 bytes long including
+ * the length byte.
+ */
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+ KUNIT_ASSERT_EQ(test, strlen(ctl_name), 3);
+ def.shortname = ctl_name;
+
+ def.fullname = cs_dsp_ctl_alignment_test_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, ctl_name, def.mem_type,
+ cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 3);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, ctl_name, ctl->subname_len);
+ /* Test fields that are parsed after the variable-length fields */
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+ }
+}
+
+/*
+ * Variable-length string fields are padded to a multiple of 4-bytes.
+ * Test this with various lengths of description.
+ */
+static void cs_dsp_ctl_description_alignment(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ char ctl_name[4];
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ /*
+ * Create a unique control name of 3 characters so that
+ * the shortname field is exactly 4 bytes long including
+ * the length byte.
+ */
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+ KUNIT_ASSERT_EQ(test, strlen(ctl_name), 3);
+ def.shortname = ctl_name;
+
+ def.description = cs_dsp_ctl_alignment_test_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, ctl_name, def.mem_type,
+ cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 3);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, ctl_name, ctl->subname_len);
+ /* Test fields that are parsed after the variable-length fields */
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+ }
+}
+
+static const char * const cs_dsp_get_ctl_test_names[] = {
+ "Up", "Down", "Switch", "Mute",
+ "Left Up", "Left Down", "Right Up", "Right Down",
+ "Left Mute", "Right Mute",
+ "_trunc_1", "_trunc_2", " trunc",
+};
+
+/* Test using cs_dsp_get_ctl() to lookup various controls. */
+static void cs_dsp_get_ctl_test(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) {
+ def.shortname = cs_dsp_get_ctl_test_names[i];
+ def.offset_dsp_words = i;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) {
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_get_ctl_test_names[i],
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(cs_dsp_get_ctl_test_names[i]));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_get_ctl_test_names[i],
+ ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->offset, i);
+ }
+}
+
+/*
+ * cs_dsp_get_ctl() searches for the control in the currently loaded
+ * firmware, so create identical controls in multiple firmware and
+ * test that the correct one is found.
+ */
+static void cs_dsp_get_ctl_test_multiple_wmfw(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct cs_dsp_mock_wmfw_builder *builder2;
+ struct firmware *wmfw;
+
+ def.shortname = "_A_CONTROL";
+
+ /* Create a second mock wmfw builder */
+ builder2 = cs_dsp_mock_wmfw_init(priv,
+ cs_dsp_mock_wmfw_format_version(local->wmfw_builder));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2);
+ cs_dsp_mock_wmfw_add_data_block(builder2,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Load a 'misc' firmware with a control */
+ def.offset_dsp_words = 1;
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Load a 'mbc/vss' firmware with a control of the same name */
+ def.offset_dsp_words = 2;
+ cs_dsp_mock_wmfw_start_alg_info_block(builder2,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder2);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2", NULL, NULL, "mbc/vss"), 0);
+
+ /* A lookup should return the control for the current firmware */
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, def.shortname,
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, 2);
+
+ /* Re-load the 'misc' firmware and a lookup should return its control */
+ cs_dsp_power_down(priv->dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, def.shortname,
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, 1);
+}
+
+/* Test that the value of the memory type field is parsed correctly. */
+static void cs_dsp_ctl_parse_memory_type(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ /* kunit_skip() marks the test skipped forever, so just return */
+ if ((param->mem_type == WMFW_ADSP2_ZM) && !cs_dsp_mock_has_zm(priv))
+ return;
+
+ def.mem_type = param->mem_type;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.type, param->mem_type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Test that the algorithm id from the parent alg-info block is
+ * correctly stored in the cs_dsp_coeff_ctl.
+ */
+static void cs_dsp_ctl_parse_alg_id(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ param->alg_id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.alg, param->alg_id);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.type, def.mem_type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Test that the values of (alg id, memory type) tuple is parsed correctly.
+ * The alg id is parsed from the alg-info block, but the memory type is
+ * parsed from the coefficient info descriptor.
+ */
+static void cs_dsp_ctl_parse_alg_mem(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ /* kunit_skip() marks the test skipped forever, so just return */
+ if ((param->mem_type == WMFW_ADSP2_ZM) && !cs_dsp_mock_has_zm(priv))
+ return;
+
+ def.mem_type = param->mem_type;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ param->alg_id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.alg, param->alg_id);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.type, param->mem_type);
+}
+
+/* Test that the value of the offset field is parsed correctly. */
+static void cs_dsp_ctl_parse_offset(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.offset_dsp_words = param->offset;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, param->offset);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Test that the value of the length field is parsed correctly. */
+static void cs_dsp_ctl_parse_length(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.length_bytes = param->length;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, def.offset_dsp_words);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, param->length);
+}
+
+/* Test that the value of the control type field is parsed correctly. */
+static void cs_dsp_ctl_parse_ctl_type(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.type = param->ctl_type;
+ def.flags = param->flags;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->type, param->ctl_type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Test that the value of the flags field is parsed correctly. */
+static void cs_dsp_ctl_parse_flags(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 reg_val;
+
+ /*
+ * Non volatile controls will be read to initialize the cache
+ * so the regmap cache must contain something to read.
+ */
+ reg_val = 0xf11100;
+ regmap_raw_write(priv->dsp->regmap,
+ cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM),
+ &reg_val, sizeof(reg_val));
+
+ def.flags = param->flags;
+ def.mem_type = WMFW_ADSP2_YM;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, param->flags);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Test that invalid combinations of (control type, flags) are rejected. */
+static void cs_dsp_ctl_illegal_type_flags(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct firmware *wmfw;
+ u32 reg_val;
+
+ /*
+ * Non volatile controls will be read to initialize the cache
+ * so the regmap cache must contain something to read.
+ */
+ reg_val = 0xf11100;
+ regmap_raw_write(priv->dsp->regmap,
+ cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM),
+ &reg_val, sizeof(reg_val));
+
+ def.type = param->ctl_type;
+ def.flags = param->flags;
+ def.mem_type = WMFW_ADSP2_YM;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_LT(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+}
+
+/* Test that the correct firmware name is entered in the cs_dsp_coeff_ctl. */
+static void cs_dsp_ctl_parse_fw_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl1, *ctl2;
+ struct cs_dsp_mock_wmfw_builder *builder2;
+ struct firmware *wmfw;
+
+ /* Create a second mock wmfw builder */
+ builder2 = cs_dsp_mock_wmfw_init(priv,
+ cs_dsp_mock_wmfw_format_version(local->wmfw_builder));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2);
+ cs_dsp_mock_wmfw_add_data_block(builder2,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Load a 'misc' firmware with a control */
+ def.offset_dsp_words = 1;
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Load a 'mbc/vss' firmware with a control */
+ def.offset_dsp_words = 2;
+ cs_dsp_mock_wmfw_start_alg_info_block(builder2,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder2);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2", NULL, NULL, "mbc/vss"), 0);
+
+ /* Both controls should be in the list (order not guaranteed) */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = NULL;
+ ctl2 = NULL;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ if (strcmp(walkctl->fw_name, "misc") == 0)
+ ctl1 = walkctl;
+ else if (strcmp(walkctl->fw_name, "mbc/vss") == 0)
+ ctl2 = walkctl;
+ }
+
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, 1);
+ KUNIT_EXPECT_EQ(test, ctl2->offset, 2);
+}
+
+/* Controls are unique if the algorithm ID is different */
+static void cs_dsp_ctl_alg_id_uniqueness(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl1, *ctl2;
+ struct firmware *wmfw;
+
+ /* Create an algorithm containing the control */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Create a different algorithm containing an identical control */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[1].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Both controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ ctl2 = list_next_entry(ctl1, list);
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_NE(test, ctl1->alg_region.alg, ctl2->alg_region.alg);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.type, ctl2->alg_region.type);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset);
+ KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type);
+ KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags);
+ KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len);
+ KUNIT_EXPECT_STREQ(test, ctl1->fw_name, ctl2->fw_name);
+ KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len);
+ if (ctl1->subname_len)
+ KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len);
+}
+
+/* Controls are unique if the memory region is different */
+static void cs_dsp_ctl_mem_uniqueness(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl1, *ctl2;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ /* Create control in XM */
+ def.mem_type = WMFW_ADSP2_XM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ /* Create control in YM */
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Both controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ ctl2 = list_next_entry(ctl1, list);
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.alg, ctl2->alg_region.alg);
+ KUNIT_EXPECT_NE(test, ctl1->alg_region.type, ctl2->alg_region.type);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset);
+ KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type);
+ KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags);
+ KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len);
+ KUNIT_EXPECT_STREQ(test, ctl1->fw_name, ctl2->fw_name);
+ KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len);
+ if (ctl1->subname_len)
+ KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len);
+}
+
+/* Controls are unique if they are in different firmware */
+static void cs_dsp_ctl_fw_uniqueness(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl1, *ctl2;
+ struct cs_dsp_mock_wmfw_builder *builder2;
+ struct firmware *wmfw;
+
+ /* Create a second mock wmfw builder */
+ builder2 = cs_dsp_mock_wmfw_init(priv,
+ cs_dsp_mock_wmfw_format_version(local->wmfw_builder));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2);
+ cs_dsp_mock_wmfw_add_data_block(builder2,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Load a 'misc' firmware with a control */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Load a 'mbc/vss' firmware with the same control */
+ cs_dsp_mock_wmfw_start_alg_info_block(builder2,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder2);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2",
+ NULL, NULL, "mbc/vss"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Both controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ ctl2 = list_next_entry(ctl1, list);
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.alg, ctl2->alg_region.alg);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.type, ctl2->alg_region.type);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset);
+ KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type);
+ KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags);
+ KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len);
+ KUNIT_EXPECT_STRNEQ(test, ctl1->fw_name, ctl2->fw_name);
+ KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len);
+ if (ctl1->subname_len)
+ KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len);
+}
+
+/*
+ * Controls from a wmfw are only added to the list once. If the same
+ * wmfw is reloaded the controls are not added again.
+ * This creates multiple algorithms with one control each, which will
+ * work on both V1 format and >=V2 format controls.
+ */
+static void cs_dsp_ctl_squash_reloaded_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctls[ARRAY_SIZE(cs_dsp_ctl_parse_test_algs)];
+ struct cs_dsp_coeff_ctl *walkctl;
+ struct firmware *wmfw;
+ int i;
+
+ /* Create some algorithms with a control */
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_parse_test_algs); i++) {
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[i].id,
+ "dummyalg", NULL);
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* All controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_ctl_parse_test_algs));
+
+ /* Take a copy of the pointers to controls to compare against. */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ ctls[i++] = walkctl;
+ }
+
+
+ /* Load the wmfw again */
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* The number of controls should be the same */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_ctl_parse_test_algs));
+
+ /* And they should be the same objects */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ KUNIT_ASSERT_PTR_EQ(test, walkctl, ctls[i++]);
+ }
+}
+
+/*
+ * Controls from a wmfw are only added to the list once. If the same
+ * wmfw is reloaded the controls are not added again.
+ * This tests >=V2 firmware that can have multiple named controls in
+ * the same algorithm.
+ */
+static void cs_dsp_ctl_v2_squash_reloaded_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctls[ARRAY_SIZE(cs_dsp_get_ctl_test_names)];
+ struct cs_dsp_coeff_ctl *walkctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create some controls */
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) {
+ def.shortname = cs_dsp_get_ctl_test_names[i];
+ def.offset_dsp_words = i;
+ if (i & BIT(0))
+ def.mem_type = WMFW_ADSP2_XM;
+ else
+ def.mem_type = WMFW_ADSP2_YM;
+
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* All controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_get_ctl_test_names));
+
+ /* Take a copy of the pointers to controls to compare against. */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ ctls[i++] = walkctl;
+ }
+
+
+ /* Load the wmfw again */
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* The number of controls should be the same */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_get_ctl_test_names));
+
+ /* And they should be the same objects */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ KUNIT_ASSERT_PTR_EQ(test, walkctl, ctls[i++]);
+ }
+}
+
+static const char * const cs_dsp_ctl_v2_compare_len_names[] = {
+ "LEFT",
+ "LEFT_",
+ "LEFT_SPK",
+ "LEFT_SPK_V",
+ "LEFT_SPK_VOL",
+ "LEFT_SPK_MUTE",
+ "LEFT_SPK_1",
+ "LEFT_X",
+ "LEFT2",
+};
+
+/*
+ * When comparing shortnames the full length of both strings is
+ * considered, not only the characters in of the shortest string.
+ * So that "LEFT" is not the same as "LEFT2".
+ * This is specifically to test for the bug that was fixed by commit:
+ * 7ac1102b227b ("firmware: cs_dsp: Fix new control name check")
+ */
+static void cs_dsp_ctl_v2_compare_len(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_v2_compare_len_names); i++) {
+ def.shortname = cs_dsp_ctl_v2_compare_len_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_v2_compare_len_names); i++) {
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_ctl_v2_compare_len_names[i],
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len,
+ strlen(cs_dsp_ctl_v2_compare_len_names[i]));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_ctl_v2_compare_len_names[i],
+ ctl->subname_len);
+ }
+}
+
+static int cs_dsp_ctl_parse_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_ctl_parse_test_algs,
+ ARRAY_SIZE(cs_dsp_ctl_parse_test_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header blob to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_ctl_parse_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_ctl_parse_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_ctl_parse_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_ctl_parse_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_16bit_init(test, 2);
+}
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_mem_type_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_XM },
+ { .mem_type = WMFW_ADSP2_YM },
+ { .mem_type = WMFW_ADSP2_ZM },
+};
+
+static void cs_dsp_ctl_mem_type_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s",
+ cs_dsp_mem_region_name(param->mem_type));
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_mem_type,
+ cs_dsp_ctl_mem_type_param_cases,
+ cs_dsp_ctl_mem_type_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_alg_id_param_cases[] = {
+ { .alg_id = 0xb },
+ { .alg_id = 0xfafa },
+ { .alg_id = 0x9f1234 },
+ { .alg_id = 0xff00ff },
+};
+
+static void cs_dsp_ctl_alg_id_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg_id:%#x", param->alg_id);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_alg_id,
+ cs_dsp_ctl_alg_id_param_cases,
+ cs_dsp_ctl_alg_id_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_offset_param_cases[] = {
+ { .offset = 0x0 },
+ { .offset = 0x1 },
+ { .offset = 0x2 },
+ { .offset = 0x3 },
+ { .offset = 0x4 },
+ { .offset = 0x5 },
+ { .offset = 0x6 },
+ { .offset = 0x7 },
+ { .offset = 0xe0 },
+ { .offset = 0xf1 },
+ { .offset = 0xfffe },
+ { .offset = 0xffff },
+};
+
+static void cs_dsp_ctl_offset_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "offset:%#x", param->offset);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_offset,
+ cs_dsp_ctl_offset_param_cases,
+ cs_dsp_ctl_offset_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_length_param_cases[] = {
+ { .length = 0x4 },
+ { .length = 0x8 },
+ { .length = 0x18 },
+ { .length = 0xf000 },
+};
+
+static void cs_dsp_ctl_length_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "length:%#x", param->length);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_length,
+ cs_dsp_ctl_length_param_cases,
+ cs_dsp_ctl_length_desc);
+
+/* Note: some control types mandate specific flags settings */
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_type_param_cases[] = {
+ { .ctl_type = WMFW_CTL_TYPE_BYTES,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_SYS },
+};
+
+static void cs_dsp_ctl_type_flags_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "ctl_type:%#x flags:%#x",
+ param->ctl_type, param->flags);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_type,
+ cs_dsp_ctl_type_param_cases,
+ cs_dsp_ctl_type_flags_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_flags_param_cases[] = {
+ { .flags = 0 },
+ { .flags = WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+};
+
+static void cs_dsp_ctl_flags_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "flags:%#x", param->flags);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_flags,
+ cs_dsp_ctl_flags_param_cases,
+ cs_dsp_ctl_flags_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_illegal_type_flags_param_cases[] = {
+ /* ACKED control must be volatile + read + write */
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+
+ /* HOSTEVENT must be system + volatile + read + write */
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+
+ /* FWEVENT rules same as HOSTEVENT */
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+
+ /*
+ * HOSTBUFFER must be system + volatile + readable or
+ * system + volatile + readable + writeable
+ */
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE},
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+};
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_illegal_type_flags,
+ cs_dsp_ctl_illegal_type_flags_param_cases,
+ cs_dsp_ctl_type_flags_desc);
+
+static struct kunit_case cs_dsp_ctl_parse_test_cases_v1[] = {
+ KUNIT_CASE(cs_dsp_ctl_parse_no_coeffs),
+ KUNIT_CASE(cs_dsp_ctl_parse_v1_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_empty_v1_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_max_v1_name),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_memory_type, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_id, cs_dsp_ctl_alg_id_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_mem, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_offset, cs_dsp_ctl_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_length, cs_dsp_ctl_length_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_ctl_type, cs_dsp_ctl_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_flags, cs_dsp_ctl_flags_gen_params),
+ KUNIT_CASE(cs_dsp_ctl_parse_fw_name),
+
+ KUNIT_CASE(cs_dsp_ctl_alg_id_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_mem_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_fw_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_squash_reloaded_controls),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_parse_test_cases_v2_v3[] = {
+ KUNIT_CASE(cs_dsp_ctl_parse_no_coeffs),
+ KUNIT_CASE(cs_dsp_ctl_parse_short_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_min_short_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_max_short_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_min_fullname),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_max_fullname),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_min_description),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_max_description),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_max_fullname_and_description),
+ KUNIT_CASE(cs_dsp_ctl_shortname_alignment),
+ KUNIT_CASE(cs_dsp_ctl_fullname_alignment),
+ KUNIT_CASE(cs_dsp_ctl_description_alignment),
+ KUNIT_CASE(cs_dsp_get_ctl_test),
+ KUNIT_CASE(cs_dsp_get_ctl_test_multiple_wmfw),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_memory_type, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_id, cs_dsp_ctl_alg_id_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_mem, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_offset, cs_dsp_ctl_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_length, cs_dsp_ctl_length_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_ctl_type, cs_dsp_ctl_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_flags, cs_dsp_ctl_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_illegal_type_flags,
+ cs_dsp_ctl_illegal_type_flags_gen_params),
+ KUNIT_CASE(cs_dsp_ctl_parse_fw_name),
+
+ KUNIT_CASE(cs_dsp_ctl_alg_id_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_mem_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_fw_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_squash_reloaded_controls),
+ KUNIT_CASE(cs_dsp_ctl_v2_squash_reloaded_controls),
+ KUNIT_CASE(cs_dsp_ctl_v2_compare_len),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_halo = {
+ .name = "cs_dsp_ctl_parse_wmfwV3_halo",
+ .init = cs_dsp_ctl_parse_test_halo_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_ctl_parse_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_ctl_parse_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_ctl_parse_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_ctl_parse_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3,
+};
+
+kunit_test_suites(&cs_dsp_ctl_parse_test_halo,
+ &cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1,
+ &cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2,
+ &cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1,
+ &cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c
new file mode 100644
index 000000000000..bda00a95d4f9
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c
@@ -0,0 +1,2669 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_stop_wrapper, cs_dsp_stop, struct cs_dsp *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_ctl_rw_test_param {
+ int mem_type;
+ int alg_id;
+ unsigned int offs_words;
+ unsigned int len_bytes;
+ u16 ctl_type;
+ u16 flags;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_ctl_rw_test_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_base_words = 60,
+ .xm_size_words = 1000,
+ .ym_base_words = 0,
+ .ym_size_words = 1000,
+ .zm_base_words = 0,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0xb,
+ .ver = 0x100001,
+ .xm_base_words = 1060,
+ .xm_size_words = 1000,
+ .ym_base_words = 1000,
+ .ym_size_words = 1000,
+ .zm_base_words = 1000,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0x9f1234,
+ .ver = 0x100500,
+ .xm_base_words = 2060,
+ .xm_size_words = 32,
+ .ym_base_words = 2000,
+ .ym_size_words = 32,
+ .zm_base_words = 2000,
+ .zm_size_words = 32,
+ },
+ {
+ .id = 0xff00ff,
+ .ver = 0x300113,
+ .xm_base_words = 2100,
+ .xm_size_words = 32,
+ .ym_base_words = 2032,
+ .ym_size_words = 32,
+ .zm_base_words = 2032,
+ .zm_size_words = 32,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ .length_bytes = 4,
+};
+
+static int _find_alg_entry(struct kunit *test, unsigned int alg_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_rw_test_algs); ++i) {
+ if (cs_dsp_ctl_rw_test_algs[i].id == alg_id)
+ break;
+ }
+
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(cs_dsp_ctl_rw_test_algs));
+
+ return i;
+}
+
+static int _get_alg_mem_base_words(struct kunit *test, int alg_index, int mem_type)
+{
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ return cs_dsp_ctl_rw_test_algs[alg_index].xm_base_words;
+ case WMFW_ADSP2_YM:
+ return cs_dsp_ctl_rw_test_algs[alg_index].ym_base_words;
+ case WMFW_ADSP2_ZM:
+ return cs_dsp_ctl_rw_test_algs[alg_index].zm_base_words;
+ default:
+ KUNIT_FAIL(test, "Bug in test: illegal memory type %d\n", mem_type);
+ return 0;
+ }
+}
+
+static struct cs_dsp_mock_wmfw_builder *_create_dummy_wmfw(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_wmfw_builder *builder;
+
+ builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder);
+
+ /* Init an XM header */
+ cs_dsp_mock_wmfw_add_data_block(builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ return builder;
+}
+
+/*
+ * Write to a control while the firmware is running.
+ * This should write to the underlying registers.
+ */
+static void cs_dsp_ctl_write_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ memset(reg_vals, 0, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * Write new data to the control, it should be written to the registers
+ * and cs_dsp_coeff_lock_and_write_ctrl() should return 1 to indicate
+ * that the control content changed.
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Read from a volatile control while the firmware is running.
+ * This should return the current state of the underlying registers.
+ */
+static void cs_dsp_ctl_read_volatile_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ memset(reg_vals, 0, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Read the control, it should return the current register content */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /*
+ * Change the register content and read the control, it should return
+ * the new register content
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_ASSERT_EQ(test, regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes), 0);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a volatile control before the firmware is started.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control after the firmware has stopped.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control after the DSP has been powered down.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control when a different firmware is currently
+ * loaded into the DSP.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control when a different firmware is currently
+ * running.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Write to a volatile control before the firmware is started.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control after the firmware has stopped.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control after the DSP has been powered down.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control when a different firmware is currently
+ * loaded into the DSP.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control when a different firmware is currently
+ * running.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Read from an offset into the control data. Should return only the
+ * portion of data from the offset position.
+ */
+static void cs_dsp_ctl_read_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, &reg_vals[seek_words], len_bytes);
+ }
+}
+
+/*
+ * Read from an offset into the control cache. Should return only the
+ * portion of data from the offset position.
+ * Same as cs_dsp_ctl_read_with_seek() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_read_cache_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, &reg_vals[seek_words], len_bytes);
+ }
+}
+
+/*
+ * Read less than the full length of data from a control. Should return
+ * only the requested number of bytes.
+ */
+static void cs_dsp_ctl_read_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Reads are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, len_bytes);
+ KUNIT_EXPECT_MEMNEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Read less than the full length of data from a cached control.
+ * Should return only the requested number of bytes.
+ * Same as cs_dsp_ctl_read_truncated() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_read_cache_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Reads are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, len_bytes);
+ KUNIT_EXPECT_MEMNEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Write to an offset into the control data. Should only change the
+ * portion of data from the offset position.
+ */
+static void cs_dsp_ctl_write_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ /* Reset the register values to the test data */
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ new_data, len_bytes),
+ 1);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, def.length_bytes),
+ 0);
+ /* Initial portion of readback should be unchanged */
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, seek_words * sizeof(u32));
+ KUNIT_EXPECT_MEMEQ(test, &readback[seek_words], new_data, len_bytes);
+ }
+}
+
+/*
+ * Write to an offset into the control cache. Should only change the
+ * portion of data from the offset position.
+ * Same as cs_dsp_ctl_write_with_seek() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_write_cache_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ /* Reset the cache to the test data */
+ KUNIT_EXPECT_GE(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes),
+ 0);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ new_data, len_bytes),
+ 1);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback,
+ def.length_bytes),
+ 0);
+ /* Initial portion of readback should be unchanged */
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, seek_words * sizeof(u32));
+ KUNIT_EXPECT_MEMEQ(test, &readback[seek_words], new_data, len_bytes);
+ }
+}
+
+/*
+ * Write less than the full length of data to a control. Should only
+ * change the requested number of bytes.
+ */
+static void cs_dsp_ctl_write_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Writes are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ /* Reset the register values to the test data */
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, new_data, len_bytes),
+ 1);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_data, len_bytes);
+ KUNIT_EXPECT_MEMEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Write less than the full length of data to a cached control.
+ * Should only change the requested number of bytes.
+ * Same as cs_dsp_ctl_write_truncated() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_write_cache_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Writes are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ /* Reset the cache to the test data */
+ KUNIT_EXPECT_GE(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes),
+ 0);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, new_data, len_bytes),
+ 1);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_data, len_bytes);
+ KUNIT_EXPECT_MEMEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Read from an offset that is beyond the end of the control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_with_seek_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ seek_words = def.length_bytes / sizeof(u32);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+ }
+}
+
+/*
+ * Read more data than the length of the control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_with_length_overflow(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, def.length_bytes + 1),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals,
+ def.length_bytes + 1),
+ 0);
+ }
+}
+
+/*
+ * Read with a seek and length that ends beyond the end of control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_with_seek_and_length_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * Read full control length but at a start offset of 1 so that
+ * offset + length exceeds the length of the control.
+ */
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 1, reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 1, reg_vals,
+ def.length_bytes),
+ 0);
+ }
+}
+
+/*
+ * Write to an offset that is beyond the end of the control data.
+ * Should return an error without touching any registers.
+ */
+static void cs_dsp_ctl_write_with_seek_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ seek_words = def.length_bytes / sizeof(u32);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write more data than the length of the control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_with_length_overflow(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, def.length_bytes + 1),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes + 1),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write with a seek and length that ends beyond the end of control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_with_seek_and_length_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /*
+ * Write full control length but at a start offset of 1 so that
+ * offset + length exceeeds the length of the control.
+ */
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 1, reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 1, reg_vals,
+ def.length_bytes),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Read from a write-only control. This is legal because controls can
+ * always be read. Write-only only indicates that it is not useful to
+ * populate the cache from the DSP memory.
+ */
+static void cs_dsp_ctl_read_from_writeonly(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *ctl_vals, *readback;
+
+ /* Sanity check parameters */
+ KUNIT_ASSERT_TRUE(test, param->flags & WMFW_CTL_FLAG_WRITEABLE);
+ KUNIT_ASSERT_FALSE(test, param->flags & WMFW_CTL_FLAG_READABLE);
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ ctl_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Write some test data to the control */
+ get_random_bytes(ctl_vals, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, def.length_bytes),
+ 1);
+
+ /* Read back the data */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, def.length_bytes);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, def.length_bytes);
+ }
+}
+
+/*
+ * Write to a read-only control.
+ * This should return an error without writing registers.
+ */
+static void cs_dsp_ctl_write_to_readonly(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ /* Sanity check parameters */
+ KUNIT_ASSERT_FALSE(test, param->flags & WMFW_CTL_FLAG_WRITEABLE);
+ KUNIT_ASSERT_TRUE(test, param->flags & WMFW_CTL_FLAG_READABLE);
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+static int cs_dsp_ctl_rw_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_ctl_rw_test_algs,
+ ARRAY_SIZE(cs_dsp_ctl_rw_test_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ /* Create wmfw builder */
+ local->wmfw_builder = _create_dummy_wmfw(test);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_ctl_rw_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_ctl_rw_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_ctl_rw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_ctl_rw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_ctl_all_param_desc(const struct cs_dsp_ctl_rw_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg:%#x %s@%u len:%u flags:%#x",
+ param->alg_id, cs_dsp_mem_region_name(param->mem_type),
+ param->offs_words, param->len_bytes, param->flags);
+}
+
+/* All parameters populated, with various lengths */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_len_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 8 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 12 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 16 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 48 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 100 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 1000 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_len, all_pop_varying_len_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various offsets */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_offset_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 0, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 2, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 3, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 8, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 10, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 128, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 180, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_offset, all_pop_varying_offset_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various X and Y memory regions */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_xy_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_XM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_xy, all_pop_varying_xy_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, using ZM */
+static const struct cs_dsp_ctl_rw_test_param all_pop_z_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_ZM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_z, all_pop_z_cases, cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various algorithm ids */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_alg_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xb, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0x9f1234, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xff00ff, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_alg, all_pop_varying_alg_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * readable control.
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_readable_flags,
+ all_pop_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * read-only control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_readonly_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_readonly_flags,
+ all_pop_readonly_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile readable control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_nonvol_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_flags,
+ all_pop_nonvol_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * writeable control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_writeable_flags,
+ all_pop_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * write-only control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_writeonly_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_writeonly_flags,
+ all_pop_writeonly_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile writeable control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_nonvol_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_writeable_flags,
+ all_pop_nonvol_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * volatile readable control.
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_volatile_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0 /* flags == 0 is volatile while firmware is running */
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_volatile_readable_flags,
+ all_pop_volatile_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * volatile readable control.
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_volatile_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0 /* flags == 0 is volatile while firmware is running */
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_volatile_writeable_flags,
+ all_pop_volatile_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+static struct kunit_case cs_dsp_ctl_rw_test_cases_adsp[] = {
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_started,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped_powered_down,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_loaded_fw,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_running_fw,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_started,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped_powered_down,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_loaded_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_running_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_with_seek,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_truncated,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_truncated,
+ all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_with_seek,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_truncated,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_truncated,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_from_writeonly,
+ all_pop_writeonly_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_to_readonly,
+ all_pop_readonly_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_rw_test_cases_halo[] = {
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_started,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped_powered_down,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_loaded_fw,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_running_fw,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_started,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped_powered_down,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_loaded_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_running_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_with_seek,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_truncated,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_truncated,
+ all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_with_seek,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_truncated,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_truncated,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_from_writeonly,
+ all_pop_writeonly_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_to_readonly,
+ all_pop_readonly_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_halo = {
+ .name = "cs_dsp_ctl_rw_wmfwV3_halo",
+ .init = cs_dsp_ctl_rw_test_halo_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_ctl_rw_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_ctl_rw_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_ctl_rw_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_ctl_rw_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+kunit_test_suites(&cs_dsp_ctl_rw_test_halo,
+ &cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1,
+ &cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2,
+ &cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1,
+ &cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c
new file mode 100644
index 000000000000..9e997c4ee2d6
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c
@@ -0,0 +1,2211 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/*
+ * Test method is:
+ *
+ * 1) Create a mock regmap in cache-only mode so that all writes will be cached.
+ * 2) Create dummy wmfw file.
+ * 3) Call cs_dsp_power_up() with the bin file.
+ * 4) Readback the cached value of registers that should have been written and
+ * check they have the correct value.
+ * 5) All the registers that are expected to have been written are dropped from
+ * the cache. This should leave the cache clean.
+ * 6) If the cache is still dirty there have been unexpected writes.
+ */
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *)
+KUNIT_DEFINE_ACTION_WRAPPER(_vfree_wrapper, vfree, void *)
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *)
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_wmfw_test_param {
+ unsigned int num_blocks;
+ int mem_type;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_wmfw_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+/*
+ * wmfw that writes the XM header.
+ * cs_dsp always reads this back from unpacked XM.
+ */
+static void wmfw_write_xm_header_unpacked(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ unsigned int reg_addr;
+ u8 *readback;
+
+ /* XM header payload was added to wmfw by test case init function */
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* Read raw so endianness and register width don't matter */
+ readback = kunit_kzalloc(test, local->xm_header->blob_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ local->xm_header->blob_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write one payload of length param->num_blocks */
+static void wmfw_write_one_payload(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes;
+
+ payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ do {
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Add a single payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type, mem_offset_dsp_words,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write several smallest possible payloads for the given memory type */
+static void wmfw_write_multiple_oneblock_payloads(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes, payload_size_dsp_words;
+ const unsigned int num_payloads = param->num_blocks;
+ int i;
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ payload_size_dsp_words = 0;
+ payload_size_bytes = 0;
+ do {
+ payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv,
+ param->mem_type);
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ get_random_bytes(payload_data, num_payloads * payload_size_bytes);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes;
+
+ /* Add multiple payloads of one block each */
+ for (i = 0; i < num_payloads; ++i) {
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type,
+ mem_offset_dsp_words + (i * payload_size_dsp_words),
+ &payload_data[i * payload_size_bytes],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ num_payloads * payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, num_payloads * payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, num_payloads * payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write several smallest possible payloads of the given memory type
+ * in reverse address order
+ */
+static void wmfw_write_multiple_oneblock_payloads_reverse(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes, payload_size_dsp_words;
+ const unsigned int num_payloads = param->num_blocks;
+ int i;
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ payload_size_dsp_words = 0;
+ payload_size_bytes = 0;
+ do {
+ payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv,
+ param->mem_type);
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ get_random_bytes(payload_data, num_payloads * payload_size_bytes);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes;
+
+ /* Add multiple payloads of one block each */
+ for (i = num_payloads - 1; i >= 0; --i) {
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type,
+ mem_offset_dsp_words + (i * payload_size_dsp_words),
+ &payload_data[i * payload_size_bytes],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ num_payloads * payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, num_payloads * payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, num_payloads * payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write multiple payloads of length param->num_blocks.
+ * The payloads are not in address order and collectively do not patch
+ * a contiguous block of memory.
+ */
+static void wmfw_write_multiple_payloads_sparse_unordered(struct kunit *test)
+{
+ static const unsigned int random_offsets[] = {
+ 11, 69, 59, 61, 32, 75, 4, 38, 70, 13, 79, 47, 46, 53, 18, 44,
+ 54, 35, 51, 21, 26, 45, 27, 41, 66, 2, 17, 56, 40, 9, 8, 20,
+ 29, 19, 63, 42, 12, 16, 43, 3, 5, 55, 52, 22
+ };
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes, payload_size_dsp_words;
+ const int num_payloads = ARRAY_SIZE(random_offsets);
+ int i;
+
+ payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ payload_size_dsp_words = param->num_blocks *
+ cs_dsp_mock_reg_block_length_dsp_words(priv, param->mem_type);
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ do {
+ payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv,
+ param->mem_type);
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes;
+
+ /* Add multiple payloads of one block each at "random" locations */
+ for (i = 0; i < num_payloads; ++i) {
+ unsigned int offset = random_offsets[i] * payload_size_dsp_words;
+
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type,
+ mem_offset_dsp_words + offset,
+ &payload_data[i * payload_size_bytes],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ for (i = 0; i < num_payloads; ++i) {
+ unsigned int offset_num_regs = (random_offsets[i] * payload_size_bytes) /
+ regmap_get_val_bytes(priv->dsp->regmap);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &readback[i * payload_size_bytes],
+ payload_size_bytes),
+ 0);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write the whole of PM in a single unpacked payload */
+static void wmfw_write_all_unpacked_pm(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int payload_size_bytes;
+
+ payload_size_bytes = cs_dsp_mock_size_of_region(priv->dsp, WMFW_ADSP2_PM);
+ payload_data = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, payload_data);
+
+ readback = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, readback);
+ memset(readback, 0, payload_size_bytes);
+
+ /* Add a single PM payload */
+ get_random_bytes(payload_data, payload_size_bytes);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_PM, 0,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_PM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write the whole of PM in a single packed payload */
+static void wmfw_write_all_packed_pm(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int payload_size_bytes;
+
+ payload_size_bytes = cs_dsp_mock_size_of_region(priv->dsp, WMFW_HALO_PM_PACKED);
+ payload_data = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, payload_data);
+
+ readback = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, readback);
+ memset(readback, 0, payload_size_bytes);
+
+ /* Add a single PM payload */
+ get_random_bytes(payload_data, payload_size_bytes);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_HALO_PM_PACKED, 0,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_PM_PACKED);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write a series of payloads to various unpacked memory regions.
+ * The payloads are of various lengths and offsets, driven by the
+ * payload_defs table. The offset and length are both given as a
+ * number of minimum-sized register blocks to keep the maths simpler.
+ * (Where a minimum-sized register block is the smallest number of
+ * registers that contain a whole number of DSP words.)
+ */
+static void wmfw_write_multiple_unpacked_mem(struct kunit *test)
+{
+ static const struct {
+ int mem_type;
+ unsigned int offset_num_blocks;
+ unsigned int num_blocks;
+ } payload_defs[] = {
+ { WMFW_ADSP2_PM, 11, 60 },
+ { WMFW_ADSP2_ZM, 69, 8 },
+ { WMFW_ADSP2_YM, 32, 74 },
+ { WMFW_ADSP2_XM, 70, 38 },
+ { WMFW_ADSP2_PM, 84, 48 },
+ { WMFW_ADSP2_XM, 46, 18 },
+ { WMFW_ADSP2_PM, 0, 8 },
+ { WMFW_ADSP2_YM, 0, 30 },
+ { WMFW_ADSP2_PM, 160, 50 },
+ { WMFW_ADSP2_ZM, 21, 26 },
+ };
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int payload_size_bytes, offset_num_dsp_words;
+ unsigned int reg_addr, offset_bytes, offset_num_regs;
+ void **payload_data;
+ void *readback;
+ int i, ret;
+
+ payload_data = kunit_kcalloc(test, ARRAY_SIZE(payload_defs), sizeof(*payload_data),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ payload_data[i] = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data[i]);
+ get_random_bytes(payload_data[i], payload_size_bytes);
+
+ offset_num_dsp_words = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_dsp_words(priv,
+ payload_defs[i].mem_type);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ payload_defs[i].mem_type,
+ offset_num_dsp_words,
+ payload_data[i],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ offset_bytes = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, payload_defs[i].mem_type);
+ offset_num_regs = offset_bytes / regmap_get_val_bytes(priv->dsp->regmap);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, payload_defs[i].mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ ret = regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes);
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks, payload_defs[i].num_blocks);
+ KUNIT_EXPECT_MEMEQ_MSG(test, readback, payload_data[i], payload_size_bytes,
+ "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks,
+ payload_defs[i].num_blocks);
+
+ kunit_kfree(test, readback);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write a series of payloads to various packed and unpacked memory regions.
+ * The payloads are of various lengths and offsets, driven by the
+ * payload_defs table. The offset and length are both given as a
+ * number of minimum-sized register blocks to keep the maths simpler.
+ * (Where a minimum-sized register block is the smallest number of
+ * registers that contain a whole number of DSP words.)
+ */
+static void wmfw_write_multiple_packed_unpacked_mem(struct kunit *test)
+{
+ static const struct {
+ int mem_type;
+ unsigned int offset_num_blocks;
+ unsigned int num_blocks;
+ } payload_defs[] = {
+ { WMFW_HALO_PM_PACKED, 11, 60 },
+ { WMFW_ADSP2_YM, 69, 8 },
+ { WMFW_HALO_YM_PACKED, 32, 74 },
+ { WMFW_HALO_XM_PACKED, 70, 38 },
+ { WMFW_HALO_PM_PACKED, 84, 48 },
+ { WMFW_HALO_XM_PACKED, 46, 18 },
+ { WMFW_HALO_PM_PACKED, 0, 8 },
+ { WMFW_HALO_YM_PACKED, 0, 30 },
+ { WMFW_HALO_PM_PACKED, 160, 50 },
+ { WMFW_ADSP2_XM, 21, 26 },
+ };
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int payload_size_bytes, offset_num_dsp_words;
+ unsigned int reg_addr, offset_bytes, offset_num_regs;
+ void **payload_data;
+ void *readback;
+ int i, ret;
+
+ payload_data = kunit_kcalloc(test, ARRAY_SIZE(payload_defs), sizeof(*payload_data),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ payload_data[i] = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data[i]);
+ get_random_bytes(payload_data[i], payload_size_bytes);
+
+ offset_num_dsp_words = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_dsp_words(priv,
+ payload_defs[i].mem_type);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ payload_defs[i].mem_type,
+ offset_num_dsp_words,
+ payload_data[i],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ offset_bytes = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, payload_defs[i].mem_type);
+ offset_num_regs = offset_bytes / regmap_get_val_bytes(priv->dsp->regmap);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, payload_defs[i].mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ ret = regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes);
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks,
+ payload_defs[i].num_blocks);
+ KUNIT_EXPECT_MEMEQ_MSG(test, readback, payload_data[i], payload_size_bytes,
+ "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks,
+ payload_defs[i].num_blocks);
+
+ kunit_kfree(test, readback);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is one word longer than a packed block multiple,
+ * using one packed payload followed by one unpacked word.
+ */
+static void wmfw_write_packed_1_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[1];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add payload of one unpacked word to DSP memory right after
+ * the packed payload words.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked word was written correctly and drop
+ * it from the regmap cache. The unpacked payload is offset within
+ * unpacked register space by the number of DSP words that were
+ * written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * using one packed payload followed by one payload of two unpacked words.
+ */
+static void wmfw_write_packed_2_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add payload of two unpacked words to DSP memory right after
+ * the packed payload words.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked payload is offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * using one packed payload followed by one payload of three unpacked words.
+ */
+static void wmfw_write_packed_3_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add payload of three unpacked words to DSP memory right after
+ * the packed payload words.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked payload is offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * using one packed payload followed by two payloads of one unpacked word each.
+ */
+static void wmfw_write_packed_2_single_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add two unpacked words to DSP memory right after the packed
+ * payload words. Each unpacked word in its own payload.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words + 1,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked words are offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * using one packed payload followed by three payloads of one unpacked word each.
+ */
+static void wmfw_write_packed_3_single_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add three unpacked words to DSP memory right after the packed
+ * payload words. Each unpacked word in its own payload.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words + 1,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words + 2,
+ &unpacked_payload_data[2],
+ sizeof(unpacked_payload_data[2]));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked words are offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is one word longer than a packed block multiple,
+ * and does not start on a packed alignment. Use one unpacked word
+ * followed by a packed payload.
+ */
+static void wmfw_write_packed_1_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[1];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for an unaligned word before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 1;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /* Add a single unpacked word right before the first word of packed data */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 1,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Add payload of packed data to the DSP memory after the unpacked word. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked word was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 1) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use one payload of two unpacked
+ * words followed by a packed payload.
+ */
+static void wmfw_write_packed_2_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for two unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 2;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add two unpacked words as a single payload right before the
+ * first word of packed data
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 2,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 2) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use one payload of three unpacked
+ * words followed by a packed payload.
+ */
+static void wmfw_write_packed_3_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for three unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 3;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add three unpacked words as a single payload right before the
+ * first word of packed data
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 3,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 3) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use two payloads of one unpacked
+ * word each, followed by a packed payload.
+ */
+static void wmfw_write_packed_2_single_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for two unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 2;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add two unpacked words as two payloads each containing a single
+ * unpacked word.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 2,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 1,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 2) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use three payloads of one unpacked
+ * word each, followed by a packed payload.
+ */
+static void wmfw_write_packed_3_single_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for two unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 3;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add three unpacked words as three payloads each containing a single
+ * unpacked word.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 3,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 2,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 1,
+ &unpacked_payload_data[2],
+ sizeof(unpacked_payload_data[2]));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 3) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Load a wmfw containing multiple info blocks */
+static void wmfw_load_with_info(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ char *infobuf;
+ const unsigned int payload_size_bytes = 48;
+ int ret;
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Add a couple of info blocks at the start of the wmfw */
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "This is a timestamp");
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "This is some more info");
+
+ /* Add a single payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_YM, 0,
+ payload_data, payload_size_bytes);
+
+ /* Add a bigger info block then another small one*/
+ infobuf = kunit_kzalloc(test, 512, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, infobuf);
+
+ for (; strlcat(infobuf, "Waffle{Blah}\n", 512) < 512;)
+ ;
+
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, infobuf);
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "Another block of info");
+
+ /* Add another payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_YM, 64,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ ret = cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc");
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "cs_dsp_power_up failed: %d\n", ret);
+
+ /* Check first payload was written */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Check second payload was written */
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * 64;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+}
+
+static int cs_dsp_wmfw_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data payload.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_wmfw_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_wmfw_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_wmfw_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_wmfw_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_wmfw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_32bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_wmfw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_16bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_mem_param_desc(const struct cs_dsp_wmfw_test_param *param, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s num_blocks:%u",
+ cs_dsp_mem_region_name(param->mem_type),
+ param->num_blocks);
+}
+
+static const struct cs_dsp_wmfw_test_param adsp2_all_num_blocks_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 },
+};
+
+KUNIT_ARRAY_PARAM(adsp2_all_num_blocks,
+ adsp2_all_num_blocks_param_cases,
+ cs_dsp_mem_param_desc);
+
+static const struct cs_dsp_wmfw_test_param halo_all_num_blocks_param_cases[] = {
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 16 },
+};
+
+KUNIT_ARRAY_PARAM(halo_all_num_blocks,
+ halo_all_num_blocks_param_cases,
+ cs_dsp_mem_param_desc);
+
+static const struct cs_dsp_wmfw_test_param packed_xy_num_blocks_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 16 },
+};
+
+KUNIT_ARRAY_PARAM(packed_xy_num_blocks,
+ packed_xy_num_blocks_param_cases,
+ cs_dsp_mem_param_desc);
+
+static struct kunit_case cs_dsp_wmfw_test_cases_halo[] = {
+ KUNIT_CASE(wmfw_write_xm_header_unpacked),
+
+ KUNIT_CASE_PARAM(wmfw_write_one_payload,
+ halo_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads,
+ halo_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads_reverse,
+ halo_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_payloads_sparse_unordered,
+ halo_all_num_blocks_gen_params),
+
+ KUNIT_CASE(wmfw_write_all_packed_pm),
+ KUNIT_CASE(wmfw_write_multiple_packed_unpacked_mem),
+
+ KUNIT_CASE_PARAM(wmfw_write_packed_1_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_single_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_single_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_1_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_single_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_single_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+
+ KUNIT_CASE(wmfw_load_with_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_test_cases_adsp2[] = {
+ KUNIT_CASE(wmfw_write_xm_header_unpacked),
+ KUNIT_CASE_PARAM(wmfw_write_one_payload,
+ adsp2_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads,
+ adsp2_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads_reverse,
+ adsp2_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_payloads_sparse_unordered,
+ adsp2_all_num_blocks_gen_params),
+
+ KUNIT_CASE(wmfw_write_all_unpacked_pm),
+ KUNIT_CASE(wmfw_write_multiple_unpacked_mem),
+
+ KUNIT_CASE(wmfw_load_with_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_halo = {
+ .name = "cs_dsp_wmfwV3_halo",
+ .init = cs_dsp_wmfw_test_halo_init,
+ .test_cases = cs_dsp_wmfw_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_adsp2_32bit",
+ .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw0_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_adsp2_16bit",
+ .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw0_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+kunit_test_suites(&cs_dsp_wmfw_test_halo,
+ &cs_dsp_wmfw_test_adsp2_32bit_wmfw0,
+ &cs_dsp_wmfw_test_adsp2_32bit_wmfw1,
+ &cs_dsp_wmfw_test_adsp2_32bit_wmfw2,
+ &cs_dsp_wmfw_test_adsp2_16bit_wmfw0,
+ &cs_dsp_wmfw_test_adsp2_16bit_wmfw1,
+ &cs_dsp_wmfw_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c
new file mode 100644
index 000000000000..c309843261d7
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c
@@ -0,0 +1,1347 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_wmfw_test_param {
+ int block_type;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_wmfw_err_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_VOLATILE,
+ .length_bytes = 4,
+};
+
+/* Load a wmfw containing unknown blocks. They should be skipped. */
+static void wmfw_load_with_unknown_blocks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ u8 random_data[8];
+ const unsigned int payload_size_bytes = 64;
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Add some unknown blocks at the start of the wmfw */
+ get_random_bytes(random_data, sizeof(random_data));
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0xf5, 0,
+ random_data, sizeof(random_data));
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0xc0, 0, random_data,
+ sizeof(random_data));
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0x33, 0, NULL, 0);
+
+ /* Add a single payload to be written to DSP memory */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_YM, 0,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* Check that the payload was written to memory */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+}
+
+/* Load a wmfw that doesn't have a valid magic marker. */
+static void wmfw_err_wrong_magic(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ memcpy((void *)wmfw->data, "WMDR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "xMFW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "WxFW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "WMxW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "WMFx", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memset((void *)wmfw->data, 0, 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+}
+
+/* Load a wmfw that is too short for a valid header. */
+static void wmfw_err_too_short_for_header(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ do {
+ wmfw->size--;
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ } while (wmfw->size > 0);
+}
+
+/* Header length field isn't a valid header length. */
+static void wmfw_err_bad_header_length(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ unsigned int real_len, len;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ real_len = le32_to_cpu(header->len);
+
+ for (len = 0; len < real_len; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+
+ for (len = real_len + 1; len < real_len + 7; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+
+ header->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ header->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ header->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* Wrong core type in header. */
+static void wmfw_err_bad_core_type(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+
+ header->core = 0;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ header->core = 1;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ header->core = priv->dsp->type + 1;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ header->core = 0xff;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+}
+
+/* File too short to contain a full block header */
+static void wmfw_too_short_for_block_header(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int header_length;
+ u32 dummy_payload = 0;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ header_length = wmfw->size;
+ kunit_kfree(test, wmfw);
+
+ /* Add the block. A block must have at least 4 bytes of payload */
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0,
+ &dummy_payload, sizeof(dummy_payload));
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_GT(test, wmfw->size, header_length);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ for (wmfw->size--; wmfw->size > header_length; wmfw->size--) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+}
+
+/* File too short to contain the block payload */
+static void wmfw_too_short_for_block_payload(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ static const u8 payload[256] = { };
+ int i;
+
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0,
+ payload, sizeof(payload));
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ for (i = 0; i < sizeof(payload); i++) {
+ wmfw->size--;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+}
+
+/* Block payload length is a garbage value */
+static void wmfw_block_payload_len_garbage(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ u32 payload = 0;
+
+
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0,
+ &payload, sizeof(payload));
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+
+ /* Sanity check that we're looking at the correct part of the wmfw */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(region->offset) >> 24, param->block_type);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(region->len), sizeof(payload));
+
+ region->len = cpu_to_le32(0x8000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0xffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* File too short to contain an algorithm header */
+static void wmfw_too_short_for_alg_header(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int header_length;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ header_length = wmfw->size;
+ kunit_kfree(test, wmfw);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ NULL, NULL);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_GT(test, wmfw->size, header_length);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ for (wmfw->size--; wmfw->size > header_length; wmfw->size--) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+}
+
+/* V1 algorithm name does not have NUL terminator */
+static void wmfw_v1_alg_name_unterminated(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ struct wmfw_adsp_alg_data *alg_data;
+ struct cs_dsp_coeff_ctl *ctl;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (struct wmfw_adsp_alg_data *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Write a string to the alg name that overflows the array */
+ memset(alg_data->descr, 0, sizeof(alg_data->descr));
+ memset(alg_data->name, 'A', sizeof(alg_data->name));
+ memset(alg_data->descr, 'A', sizeof(alg_data->descr) - 1);
+
+ /*
+ * Sanity-check that a strlen would overflow alg_data->name.
+ * FORTIFY_STRING obstructs testing what strlen() would actually
+ * return, so instead verify that a strnlen() returns
+ * sizeof(alg_data->name[]), therefore it doesn't have a NUL.
+ */
+ KUNIT_ASSERT_EQ(test, strnlen(alg_data->name, sizeof(alg_data->name)),
+ sizeof(alg_data->name));
+
+ /*
+ * The alg name isn't stored, but cs_dsp parses the name field.
+ * It should load the file successfully and create the control.
+ * If FORTIFY_STRING is enabled it will detect a buffer overflow
+ * if cs_dsp string length walks past end of alg name array.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+}
+
+/* V2+ algorithm name exceeds length of containing block */
+static void wmfw_v2_alg_name_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", NULL);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /*
+ * Sanity check we're pointing at the alg header of
+ * [ alg_id ][name_len]abc
+ */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[1]), 3 | ('a' << 8) | ('b' << 16) | ('c' << 24));
+ KUNIT_ASSERT_EQ(test, *(u8 *)&alg_data[1], 3);
+
+ /* Set name string length longer than available space */
+ *(u8 *)&alg_data[1] = 4;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(u8 *)&alg_data[1] = 7;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(u8 *)&alg_data[1] = 0x80;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(u8 *)&alg_data[1] = 0xff;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ algorithm description exceeds length of containing block */
+static void wmfw_v2_alg_description_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /*
+ * Sanity check we're pointing at the alg header of
+ * [ alg_id ][name_len]abc[desc_len]de
+ */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[2]), 2 | ('d' << 16) | ('e' << 24));
+ KUNIT_ASSERT_EQ(test, le16_to_cpu(*(__le16 *)&alg_data[2]), 2);
+
+ /* Set name string length longer than available space */
+ *(__le16 *)&alg_data[2] = cpu_to_le16(4);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(7);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0x80);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0xff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0x8000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0xffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V1 coefficient count exceeds length of containing block */
+static void wmfw_v1_coeff_count_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ struct wmfw_adsp_alg_data *alg_data;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (struct wmfw_adsp_alg_data *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Add one to the coefficient count */
+ alg_data->ncoeff = cpu_to_le32(le32_to_cpu(alg_data->ncoeff) + 1);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Make the coefficient count garbage */
+ alg_data->ncoeff = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ alg_data->ncoeff = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ alg_data->ncoeff = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient count exceeds length of containing block */
+static void wmfw_v2_coeff_count_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *ncoeff;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ ncoeff = (__force __le32 *)&alg_data[3];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(*ncoeff), 1);
+
+ /* Add one to the coefficient count */
+ *ncoeff = cpu_to_le32(2);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Make the coefficient count garbage */
+ *ncoeff = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *ncoeff = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *ncoeff = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient block size exceeds length of containing block */
+static void wmfw_v2_coeff_block_size_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Add one to the block size */
+ coeff[1] = cpu_to_le32(le32_to_cpu(coeff[1]) + 1);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Make the block size garbage */
+ coeff[1] = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ coeff[1] = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ coeff[1] = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V1 coeff name does not have NUL terminator */
+static void wmfw_v1_coeff_name_unterminated(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ struct wmfw_adsp_alg_data *alg_data;
+ struct wmfw_adsp_coeff_data *coeff;
+ struct cs_dsp_coeff_ctl *ctl;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (struct wmfw_adsp_alg_data *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->ncoeff), 1);
+
+ coeff = (void *)alg_data->data;
+
+ /* Write a string to the coeff name that overflows the array */
+ memset(coeff->descr, 0, sizeof(coeff->descr));
+ memset(coeff->name, 'A', sizeof(coeff->name));
+ memset(coeff->descr, 'A', sizeof(coeff->descr) - 1);
+
+ /*
+ * Sanity-check that a strlen would overflow coeff->name.
+ * FORTIFY_STRING obstructs testing what strlen() would actually
+ * return, so instead verify that a strnlen() returns
+ * sizeof(coeff->name[]), therefore it doesn't have a NUL.
+ */
+ KUNIT_ASSERT_EQ(test, strnlen(coeff->name, sizeof(coeff->name)),
+ sizeof(coeff->name));
+
+ /*
+ * V1 controls do not have names, but cs_dsp parses the name
+ * field. It should load the file successfully and create the
+ * control.
+ * If FORTIFY_STRING is enabled it will detect a buffer overflow
+ * if cs_dsp string length walks past end of coeff name array.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+}
+
+/* V2+ coefficient shortname exceeds length of coeff block */
+static void wmfw_v2_coeff_shortname_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Add one to the shortname length */
+ coeff[2] = cpu_to_le32(le32_to_cpu(coeff[2]) + 1);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Maximum shortname length */
+ coeff[2] = cpu_to_le32(255);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient fullname exceeds length of coeff block */
+static void wmfw_v2_coeff_fullname_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff, *fullname;
+ size_t shortlen;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Fullname follows the shortname rounded up to a __le32 boundary */
+ shortlen = round_up(le32_to_cpu(coeff[2]) & 0xff, sizeof(__le32));
+ fullname = &coeff[2] + (shortlen / sizeof(*coeff));
+
+ /* Fullname increases in blocks of __le32 so increase past the current __le32 */
+ fullname[0] = cpu_to_le32(round_up(le32_to_cpu(fullname[0]) + 1, sizeof(__le32)));
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Maximum fullname length */
+ fullname[0] = cpu_to_le32(255);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient description exceeds length of coeff block */
+static void wmfw_v2_coeff_description_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff, *fullname, *description;
+ size_t namelen;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Description follows the shortname and fullname rounded up to __le32 boundaries */
+ namelen = round_up(le32_to_cpu(coeff[2]) & 0xff, sizeof(__le32));
+ fullname = &coeff[2] + (namelen / sizeof(*coeff));
+ namelen = round_up(le32_to_cpu(fullname[0]) & 0xff, sizeof(__le32));
+ description = fullname + (namelen / sizeof(*fullname));
+
+ /* Description increases in blocks of __le32 so increase past the current __le32 */
+ description[0] = cpu_to_le32(round_up(le32_to_cpu(fullname[0]) + 1, sizeof(__le32)));
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Maximum description length */
+ fullname[0] = cpu_to_le32(0xffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+static void cs_dsp_wmfw_err_test_exit(struct kunit *test)
+{
+ /*
+ * Testing error conditions can produce a lot of log output
+ * from cs_dsp error messages, so rate limit the test cases.
+ */
+ usleep_range(200, 500);
+}
+
+static int cs_dsp_wmfw_err_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm,
+ * so create a dummy one and pre-populate XM so the wmfw doesn't
+ * have to contain an XM blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_wmfw_err_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_wmfw_err_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+ cs_dsp_mock_xm_header_write_to_regmap(local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_wmfw_err_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_wmfw_err_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_wmfw_err_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_wmfw_err_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_wmfw_err_block_types_desc(const struct cs_dsp_wmfw_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_type:%#x", param->block_type);
+}
+
+static const struct cs_dsp_wmfw_test_param wmfw_valid_block_types_adsp2_cases[] = {
+ { .block_type = WMFW_INFO_TEXT },
+ { .block_type = WMFW_ADSP2_PM },
+ { .block_type = WMFW_ADSP2_YM },
+};
+
+KUNIT_ARRAY_PARAM(wmfw_valid_block_types_adsp2,
+ wmfw_valid_block_types_adsp2_cases,
+ cs_dsp_wmfw_err_block_types_desc);
+
+static const struct cs_dsp_wmfw_test_param wmfw_valid_block_types_halo_cases[] = {
+ { .block_type = WMFW_INFO_TEXT },
+ { .block_type = WMFW_HALO_PM_PACKED },
+ { .block_type = WMFW_ADSP2_YM },
+};
+
+KUNIT_ARRAY_PARAM(wmfw_valid_block_types_halo,
+ wmfw_valid_block_types_halo_cases,
+ cs_dsp_wmfw_err_block_types_desc);
+
+static const struct cs_dsp_wmfw_test_param wmfw_invalid_block_types_cases[] = {
+ { .block_type = 0x33 },
+ { .block_type = 0xf5 },
+ { .block_type = 0xc0 },
+};
+
+KUNIT_ARRAY_PARAM(wmfw_invalid_block_types,
+ wmfw_invalid_block_types_cases,
+ cs_dsp_wmfw_err_block_types_desc);
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v0[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v1[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ KUNIT_CASE(wmfw_too_short_for_alg_header),
+ KUNIT_CASE(wmfw_v1_alg_name_unterminated),
+ KUNIT_CASE(wmfw_v1_coeff_count_exceeds_block),
+ KUNIT_CASE(wmfw_v1_coeff_name_unterminated),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v2[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ KUNIT_CASE(wmfw_too_short_for_alg_header),
+ KUNIT_CASE(wmfw_v2_alg_name_exceeds_block),
+ KUNIT_CASE(wmfw_v2_alg_description_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_count_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_block_size_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_shortname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_fullname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_description_exceeds_block),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v3[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_halo_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_halo_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_halo_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ KUNIT_CASE(wmfw_too_short_for_alg_header),
+ KUNIT_CASE(wmfw_v2_alg_name_exceeds_block),
+ KUNIT_CASE(wmfw_v2_alg_description_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_count_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_block_size_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_shortname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_fullname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_description_exceeds_block),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_halo = {
+ .name = "cs_dsp_wmfwV3_err_halo",
+ .init = cs_dsp_wmfw_err_test_halo_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v3,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_err_adsp2_32bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v0,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_err_adsp2_32bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_err_adsp2_32bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_err_adsp2_16bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v0,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_err_adsp2_16bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_err_adsp2_16bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v2,
+};
+
+kunit_test_suites(&cs_dsp_wmfw_err_test_halo,
+ &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0,
+ &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1,
+ &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2,
+ &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0,
+ &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1,
+ &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_tests.c b/drivers/firmware/cirrus/test/cs_dsp_tests.c
new file mode 100644
index 000000000000..7b829a03ca52
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_tests.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Utility module for cs_dsp KUnit testing.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("KUnit tests for Cirrus Logic DSP driver");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("FW_CS_DSP");
+MODULE_IMPORT_NS("FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 72f2537d90ca..5fe61b9ab5f9 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -76,20 +76,14 @@ config EFI_ZBOOT
bool "Enable the generic EFI decompressor"
depends on EFI_GENERIC_STUB && !ARM
select HAVE_KERNEL_GZIP
- select HAVE_KERNEL_LZ4
- select HAVE_KERNEL_LZMA
- select HAVE_KERNEL_LZO
- select HAVE_KERNEL_XZ
select HAVE_KERNEL_ZSTD
help
Create the bootable image as an EFI application that carries the
actual kernel image in compressed form, and decompresses it into
- memory before executing it via LoadImage/StartImage EFI boot service
- calls. For compatibility with non-EFI loaders, the payload can be
- decompressed and executed by the loader as well, provided that the
- loader implements the decompression algorithm and that non-EFI boot
- is supported by the encapsulated image. (The compression algorithm
- used is described in the zboot image header)
+ memory before executing it. For compatibility with non-EFI loaders,
+ the payload can be decompressed and executed by the loader as well,
+ provided that the loader implements the decompression algorithm.
+ (The compression algorithm used is described in the zboot header)
config EFI_ARMSTUB_DTB_LOADER
bool "Enable the DTB loader"
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
index 937be269fee8..13ea141c0def 100644
--- a/drivers/firmware/efi/dev-path-parser.c
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -47,9 +47,9 @@ static long __init parse_acpi_path(const struct efi_dev_path *node,
return 0;
}
-static int __init match_pci_dev(struct device *dev, void *data)
+static int __init match_pci_dev(struct device *dev, const void *data)
{
- unsigned int devfn = *(unsigned int *)data;
+ unsigned int devfn = *(const unsigned int *)data;
return dev_is_pci(dev) && to_pci_dev(dev)->devfn == devfn;
}
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 552c78f5f059..a253b6144945 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -6,7 +6,7 @@
#include <linux/slab.h>
#include <linux/ucs2_string.h>
-MODULE_IMPORT_NS(EFIVAR);
+MODULE_IMPORT_NS("EFIVAR");
#define DUMP_NAME_LEN 66
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 70490bf2697b..8296bf985d1d 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -148,9 +148,6 @@ static ssize_t systab_show(struct kobject *kobj,
if (efi.smbios != EFI_INVALID_TABLE_ADDR)
str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
- if (IS_ENABLED(CONFIG_X86))
- str = efi_systab_show_arch(str);
-
return str - buf;
}
@@ -273,6 +270,7 @@ static __init int efivar_ssdt_load(void)
efi_char16_t *name = NULL;
efi_status_t status;
efi_guid_t guid;
+ int ret = 0;
if (!efivar_ssdt[0])
return 0;
@@ -294,8 +292,8 @@ static __init int efivar_ssdt_load(void)
efi_char16_t *name_tmp =
krealloc(name, name_size, GFP_KERNEL);
if (!name_tmp) {
- kfree(name);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
name = name_tmp;
continue;
@@ -309,26 +307,38 @@ static __init int efivar_ssdt_load(void)
pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
- if (status != EFI_BUFFER_TOO_SMALL || !data_size)
- return -EIO;
+ if (status != EFI_BUFFER_TOO_SMALL || !data_size) {
+ ret = -EIO;
+ goto out;
+ }
data = kmalloc(data_size, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ if (!data) {
+ ret = -ENOMEM;
+ goto out;
+ }
status = efi.get_variable(name, &guid, NULL, &data_size, data);
if (status == EFI_SUCCESS) {
- acpi_status ret = acpi_load_table(data, NULL);
- if (ret)
- pr_err("failed to load table: %u\n", ret);
- else
+ acpi_status acpi_ret = acpi_load_table(data, NULL);
+ if (ACPI_FAILURE(acpi_ret)) {
+ pr_err("efivar_ssdt: failed to load table: %u\n",
+ acpi_ret);
+ } else {
+ /*
+ * The @data will be in use by ACPI engine,
+ * do not free it!
+ */
continue;
+ }
} else {
- pr_err("failed to get var data: 0x%lx\n", status);
+ pr_err("efivar_ssdt: failed to get var data: 0x%lx\n", status);
}
kfree(data);
}
- return 0;
+out:
+ kfree(name);
+ return ret;
}
#else
static inline int efivar_ssdt_load(void) { return 0; }
@@ -433,7 +443,9 @@ static int __init efisubsys_init(void)
error = generic_ops_register();
if (error)
goto err_put;
- efivar_ssdt_load();
+ error = efivar_ssdt_load();
+ if (error)
+ pr_err("efi: failed to load SSDT, error %d.\n", error);
platform_device_register_simple("efivars", 0, NULL, 0);
}
diff --git a/drivers/firmware/efi/embedded-firmware.c b/drivers/firmware/efi/embedded-firmware.c
index f5be8e22305b..b49a09d7e665 100644
--- a/drivers/firmware/efi/embedded-firmware.c
+++ b/drivers/firmware/efi/embedded-firmware.c
@@ -16,9 +16,9 @@
/* Exported for use by lib/test_firmware.c only */
LIST_HEAD(efi_embedded_fw_list);
-EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_list, TEST_FIRMWARE);
+EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_list, "TEST_FIRMWARE");
bool efi_embedded_fw_checked;
-EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_checked, TEST_FIRMWARE);
+EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_checked, "TEST_FIRMWARE");
static const struct dmi_system_id * const embedded_fw_table[] = {
#ifdef CONFIG_TOUCHSCREEN_DMI
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 7a81c0ce4780..4bb7b0584bc9 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -75,8 +75,6 @@ static LIST_HEAD(entry_list);
struct esre_attribute {
struct attribute attr;
ssize_t (*show)(struct esre_entry *entry, char *buf);
- ssize_t (*store)(struct esre_entry *entry,
- const char *buf, size_t count);
};
static struct esre_entry *to_entry(struct kobject *kobj)
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index ed4e8ddbe76a..1141cd06011f 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -11,7 +11,7 @@ cflags-y := $(KBUILD_CFLAGS)
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
-cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \
+cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse -fshort-wchar \
-Wno-pointer-sign \
diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
index 65ffd0b760b2..48842b5c106b 100644
--- a/drivers/firmware/efi/libstub/Makefile.zboot
+++ b/drivers/firmware/efi/libstub/Makefile.zboot
@@ -12,22 +12,16 @@ quiet_cmd_copy_and_pad = PAD $@
$(obj)/vmlinux.bin: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE
$(call if_changed,copy_and_pad)
-comp-type-$(CONFIG_KERNEL_GZIP) := gzip
-comp-type-$(CONFIG_KERNEL_LZ4) := lz4
-comp-type-$(CONFIG_KERNEL_LZMA) := lzma
-comp-type-$(CONFIG_KERNEL_LZO) := lzo
-comp-type-$(CONFIG_KERNEL_XZ) := xzkern
-comp-type-$(CONFIG_KERNEL_ZSTD) := zstd22
-
# in GZIP, the appended le32 carrying the uncompressed size is part of the
# format, but in other cases, we just append it at the end for convenience,
# causing the original tools to complain when checking image integrity.
-# So disregard it when calculating the payload size in the zimage header.
-zboot-method-y := $(comp-type-y)_with_size
-zboot-size-len-y := 4
+comp-type-y := gzip
+zboot-method-y := gzip
+zboot-size-len-y := 0
-zboot-method-$(CONFIG_KERNEL_GZIP) := gzip
-zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0
+comp-type-$(CONFIG_KERNEL_ZSTD) := zstd
+zboot-method-$(CONFIG_KERNEL_ZSTD) := zstd22_with_size
+zboot-size-len-$(CONFIG_KERNEL_ZSTD) := 4
$(obj)/vmlinuz: $(obj)/vmlinux.bin FORCE
$(call if_changed,$(zboot-method-y))
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index de659f6a815f..fd6dc790c5a8 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -47,9 +47,10 @@ bool __pure __efi_soft_reserve_enabled(void)
*/
efi_status_t efi_parse_options(char const *cmdline)
{
- size_t len;
+ char *buf __free(efi_pool) = NULL;
efi_status_t status;
- char *str, *buf;
+ size_t len;
+ char *str;
if (!cmdline)
return EFI_SUCCESS;
@@ -102,7 +103,6 @@ efi_status_t efi_parse_options(char const *cmdline)
efi_parse_option_graphics(val + strlen("efifb:"));
}
}
- efi_bs_call(free_pool, buf);
return EFI_SUCCESS;
}
@@ -250,7 +250,7 @@ static efi_status_t efi_measure_tagged_event(unsigned long load_addr,
u64, const union efistub_event *);
struct { u32 hash_log_extend_event; } mixed_mode;
} method;
- struct efistub_measured_event *evt;
+ struct efistub_measured_event *evt __free(efi_pool) = NULL;
int size = struct_size(evt, tagged_event.tagged_event_data,
events[event].event_data_len);
efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
@@ -312,7 +312,6 @@ static efi_status_t efi_measure_tagged_event(unsigned long load_addr,
status = efi_fn_call(&method, hash_log_extend_event, protocol, 0,
load_addr, load_size, &evt->event_data);
- efi_bs_call(free_pool, evt);
if (status == EFI_SUCCESS)
return EFI_SUCCESS;
@@ -327,7 +326,7 @@ fail:
* Size of memory allocated return in *cmd_line_len.
* Returns NULL on error.
*/
-char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
+char *efi_convert_cmdline(efi_loaded_image_t *image)
{
const efi_char16_t *options = efi_table_attr(image, load_options);
u32 options_size = efi_table_attr(image, load_options_size);
@@ -405,7 +404,6 @@ char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
snprintf((char *)cmdline_addr, options_bytes, "%.*ls",
options_bytes - 1, options);
- *cmd_line_len = options_bytes;
return (char *)cmdline_addr;
}
@@ -621,10 +619,6 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image,
status = efi_load_initrd_dev_path(&initrd, hard_limit);
if (status == EFI_SUCCESS) {
efi_info("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n");
- if (initrd.size > 0 &&
- efi_measure_tagged_event(initrd.base, initrd.size,
- EFISTUB_EVT_INITRD) == EFI_SUCCESS)
- efi_info("Measured initrd data into PCR 9\n");
} else if (status == EFI_NOT_FOUND) {
status = efi_load_initrd_cmdline(image, &initrd, soft_limit,
hard_limit);
@@ -637,6 +631,11 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image,
if (status != EFI_SUCCESS)
goto failed;
+ if (initrd.size > 0 &&
+ efi_measure_tagged_event(initrd.base, initrd.size,
+ EFISTUB_EVT_INITRD) == EFI_SUCCESS)
+ efi_info("Measured initrd data into PCR 9\n");
+
status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(initrd),
(void **)&tbl);
if (status != EFI_SUCCESS)
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index 958a680e0660..874f63b4a383 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -10,6 +10,7 @@
*/
#include <linux/efi.h>
+#include <linux/screen_info.h>
#include <asm/efi.h>
#include "efistub.h"
@@ -53,25 +54,16 @@ void __weak free_screen_info(struct screen_info *si)
static struct screen_info *setup_graphics(void)
{
- efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
- efi_status_t status;
- unsigned long size;
- void **gop_handle = NULL;
- struct screen_info *si = NULL;
+ struct screen_info *si, tmp = {};
- size = 0;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &gop_proto, NULL, &size, gop_handle);
- if (status == EFI_BUFFER_TOO_SMALL) {
- si = alloc_screen_info();
- if (!si)
- return NULL;
- status = efi_setup_gop(si, &gop_proto, size);
- if (status != EFI_SUCCESS) {
- free_screen_info(si);
- return NULL;
- }
- }
+ if (efi_setup_gop(&tmp) != EFI_SUCCESS)
+ return NULL;
+
+ si = alloc_screen_info();
+ if (!si)
+ return NULL;
+
+ *si = tmp;
return si;
}
@@ -112,45 +104,40 @@ static u32 get_supported_rt_services(void)
efi_status_t efi_handle_cmdline(efi_loaded_image_t *image, char **cmdline_ptr)
{
- int cmdline_size = 0;
+ char *cmdline __free(efi_pool) = NULL;
efi_status_t status;
- char *cmdline;
/*
* Get the command line from EFI, using the LOADED_IMAGE
* protocol. We are going to copy the command line into the
* device tree, so this can be allocated anywhere.
*/
- cmdline = efi_convert_cmdline(image, &cmdline_size);
+ cmdline = efi_convert_cmdline(image);
if (!cmdline) {
efi_err("getting command line via LOADED_IMAGE_PROTOCOL\n");
return EFI_OUT_OF_RESOURCES;
}
- if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
- IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
- cmdline_size == 0) {
- status = efi_parse_options(CONFIG_CMDLINE);
+ if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
+ status = efi_parse_options(cmdline);
if (status != EFI_SUCCESS) {
- efi_err("Failed to parse options\n");
- goto fail_free_cmdline;
+ efi_err("Failed to parse EFI load options\n");
+ return status;
}
}
- if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0) {
- status = efi_parse_options(cmdline);
+ if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
+ IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
+ cmdline[0] == 0) {
+ status = efi_parse_options(CONFIG_CMDLINE);
if (status != EFI_SUCCESS) {
- efi_err("Failed to parse options\n");
- goto fail_free_cmdline;
+ efi_err("Failed to parse built-in command line\n");
+ return status;
}
}
- *cmdline_ptr = cmdline;
+ *cmdline_ptr = no_free_ptr(cmdline);
return EFI_SUCCESS;
-
-fail_free_cmdline:
- efi_bs_call(free_pool, cmdline_ptr);
- return status;
}
efi_status_t efi_stub_common(efi_handle_t handle,
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 685098f9626f..d96d4494070d 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -4,6 +4,7 @@
#define _DRIVERS_FIRMWARE_EFI_EFISTUB_H
#include <linux/compiler.h>
+#include <linux/cleanup.h>
#include <linux/efi.h>
#include <linux/kernel.h>
#include <linux/kern_levels.h>
@@ -122,11 +123,10 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
#define efi_get_handle_num(size) \
((size) / (efi_is_native() ? sizeof(efi_handle_t) : sizeof(u32)))
-#define for_each_efi_handle(handle, array, size, i) \
- for (i = 0; \
- i < efi_get_handle_num(size) && \
- ((handle = efi_get_handle_at((array), i)) || true); \
- i++)
+#define for_each_efi_handle(handle, array, num) \
+ for (int __i = 0; __i < (num) && \
+ ((handle = efi_get_handle_at((array), __i)) || true); \
+ __i++)
static inline
void efi_set_u64_split(u64 data, u32 *lo, u32 *hi)
@@ -171,7 +171,7 @@ void efi_set_u64_split(u64 data, u32 *lo, u32 *hi)
* the EFI memory map. Other related structures, e.g. x86 e820ext, need
* to factor in this headroom requirement as well.
*/
-#define EFI_MMAP_NR_SLACK_SLOTS 8
+#define EFI_MMAP_NR_SLACK_SLOTS 32
typedef struct efi_generic_dev_path efi_device_path_protocol_t;
@@ -314,7 +314,9 @@ union efi_boot_services {
void *close_protocol;
void *open_protocol_information;
void *protocols_per_handle;
- void *locate_handle_buffer;
+ efi_status_t (__efiapi *locate_handle_buffer)(int, efi_guid_t *,
+ void *, unsigned long *,
+ efi_handle_t **);
efi_status_t (__efiapi *locate_protocol)(efi_guid_t *, void *,
void **);
efi_status_t (__efiapi *install_multiple_protocol_interfaces)(efi_handle_t *, ...);
@@ -1053,10 +1055,11 @@ void efi_puts(const char *str);
__printf(1, 2) int efi_printk(char const *fmt, ...);
void efi_free(unsigned long size, unsigned long addr);
+DEFINE_FREE(efi_pool, void *, if (_T) efi_bs_call(free_pool, _T));
void efi_apply_loadoptions_quirk(const void **load_options, u32 *load_options_size);
-char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len);
+char *efi_convert_cmdline(efi_loaded_image_t *image);
efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
bool install_cfg_tbl);
@@ -1082,8 +1085,7 @@ efi_status_t efi_parse_options(char const *cmdline);
void efi_parse_option_graphics(char *option);
-efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size);
+efi_status_t efi_setup_gop(struct screen_info *si);
efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
const efi_char16_t *optstr,
diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
index d6a025df07dc..bd626d55dcbc 100644
--- a/drivers/firmware/efi/libstub/file.c
+++ b/drivers/firmware/efi/libstub/file.c
@@ -175,6 +175,12 @@ static efi_status_t efi_open_device_path(efi_file_protocol_t **volume,
return status;
}
+#ifndef CONFIG_CMDLINE
+#define CONFIG_CMDLINE
+#endif
+
+static const efi_char16_t builtin_cmdline[] = L"" CONFIG_CMDLINE;
+
/*
* Check the cmdline for a LILO-style file= arguments.
*
@@ -189,6 +195,8 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
unsigned long *load_addr,
unsigned long *load_size)
{
+ const bool ignore_load_options = IS_ENABLED(CONFIG_CMDLINE_OVERRIDE) ||
+ IS_ENABLED(CONFIG_CMDLINE_FORCE);
const efi_char16_t *cmdline = efi_table_attr(image, load_options);
u32 cmdline_len = efi_table_attr(image, load_options_size);
unsigned long efi_chunk_size = ULONG_MAX;
@@ -197,6 +205,7 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
unsigned long alloc_addr;
unsigned long alloc_size;
efi_status_t status;
+ bool twopass;
int offset;
if (!load_addr || !load_size)
@@ -209,6 +218,16 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
efi_chunk_size = EFI_READ_CHUNK_SIZE;
alloc_addr = alloc_size = 0;
+
+ if (!ignore_load_options && cmdline_len > 0) {
+ twopass = IS_ENABLED(CONFIG_CMDLINE_BOOL) ||
+ IS_ENABLED(CONFIG_CMDLINE_EXTEND);
+ } else {
+do_builtin: cmdline = builtin_cmdline;
+ cmdline_len = ARRAY_SIZE(builtin_cmdline) - 1;
+ twopass = false;
+ }
+
do {
struct finfo fi;
unsigned long size;
@@ -290,6 +309,9 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
efi_call_proto(volume, close);
} while (offset > 0);
+ if (twopass)
+ goto do_builtin;
+
*load_addr = alloc_addr;
*load_size = alloc_size;
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index ea5da307d542..3785fb4986b4 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -133,13 +133,11 @@ void efi_parse_option_graphics(char *option)
static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
{
- efi_status_t status;
-
+ efi_graphics_output_mode_info_t *info __free(efi_pool) = NULL;
efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
unsigned long info_size;
-
u32 max_mode, cur_mode;
+ efi_status_t status;
int pf;
mode = efi_table_attr(gop, mode);
@@ -154,17 +152,13 @@ static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
return cur_mode;
}
- status = efi_call_proto(gop, query_mode, cmdline.mode,
- &info_size, &info);
+ status = efi_call_proto(gop, query_mode, cmdline.mode, &info_size, &info);
if (status != EFI_SUCCESS) {
efi_err("Couldn't get mode information\n");
return cur_mode;
}
pf = info->pixel_format;
-
- efi_bs_call(free_pool, info);
-
if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX) {
efi_err("Invalid PixelFormat\n");
return cur_mode;
@@ -173,6 +167,28 @@ static u32 choose_mode_modenum(efi_graphics_output_protocol_t *gop)
return cmdline.mode;
}
+static u32 choose_mode(efi_graphics_output_protocol_t *gop,
+ bool (*match)(const efi_graphics_output_mode_info_t *, u32, void *),
+ void *ctx)
+{
+ efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode);
+ u32 max_mode = efi_table_attr(mode, max_mode);
+
+ for (u32 m = 0; m < max_mode; m++) {
+ efi_graphics_output_mode_info_t *info __free(efi_pool) = NULL;
+ unsigned long info_size;
+ efi_status_t status;
+
+ status = efi_call_proto(gop, query_mode, m, &info_size, &info);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ if (match(info, m, ctx))
+ return m;
+ }
+ return (unsigned long)ctx;
+}
+
static u8 pixel_bpp(int pixel_format, efi_pixel_bitmask_t pixel_info)
{
if (pixel_format == PIXEL_BIT_MASK) {
@@ -185,192 +201,117 @@ static u8 pixel_bpp(int pixel_format, efi_pixel_bitmask_t pixel_info)
return 32;
}
-static u32 choose_mode_res(efi_graphics_output_protocol_t *gop)
+static bool match_res(const efi_graphics_output_mode_info_t *info, u32 mode, void *ctx)
{
- efi_status_t status;
+ efi_pixel_bitmask_t pi = info->pixel_information;
+ int pf = info->pixel_format;
- efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
- unsigned long info_size;
-
- u32 max_mode, cur_mode;
- int pf;
- efi_pixel_bitmask_t pi;
- u32 m, w, h;
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
+ return false;
- mode = efi_table_attr(gop, mode);
+ return cmdline.res.width == info->horizontal_resolution &&
+ cmdline.res.height == info->vertical_resolution &&
+ (cmdline.res.format < 0 || cmdline.res.format == pf) &&
+ (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi));
+}
- cur_mode = efi_table_attr(mode, mode);
- info = efi_table_attr(mode, info);
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
+static u32 choose_mode_res(efi_graphics_output_protocol_t *gop)
+{
+ efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode);
+ unsigned long cur_mode = efi_table_attr(mode, mode);
- if (w == cmdline.res.width && h == cmdline.res.height &&
- (cmdline.res.format < 0 || cmdline.res.format == pf) &&
- (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)))
+ if (match_res(efi_table_attr(mode, info), cur_mode, NULL))
return cur_mode;
- max_mode = efi_table_attr(mode, max_mode);
-
- for (m = 0; m < max_mode; m++) {
- if (m == cur_mode)
- continue;
-
- status = efi_call_proto(gop, query_mode, m,
- &info_size, &info);
- if (status != EFI_SUCCESS)
- continue;
+ return choose_mode(gop, match_res, (void *)cur_mode);
+}
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
+struct match {
+ u32 mode;
+ u32 area;
+ u8 depth;
+};
- efi_bs_call(free_pool, info);
+static bool match_auto(const efi_graphics_output_mode_info_t *info, u32 mode, void *ctx)
+{
+ u32 area = info->horizontal_resolution * info->vertical_resolution;
+ efi_pixel_bitmask_t pi = info->pixel_information;
+ int pf = info->pixel_format;
+ u8 depth = pixel_bpp(pf, pi);
+ struct match *m = ctx;
- if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
- continue;
- if (w == cmdline.res.width && h == cmdline.res.height &&
- (cmdline.res.format < 0 || cmdline.res.format == pf) &&
- (!cmdline.res.depth || cmdline.res.depth == pixel_bpp(pf, pi)))
- return m;
- }
+ if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
+ return false;
- efi_err("Couldn't find requested mode\n");
+ if (area > m->area || (area == m->area && depth > m->depth))
+ *m = (struct match){ mode, area, depth };
- return cur_mode;
+ return false;
}
static u32 choose_mode_auto(efi_graphics_output_protocol_t *gop)
{
- efi_status_t status;
-
- efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
- unsigned long info_size;
-
- u32 max_mode, cur_mode, best_mode, area;
- u8 depth;
- int pf;
- efi_pixel_bitmask_t pi;
- u32 m, w, h, a;
- u8 d;
-
- mode = efi_table_attr(gop, mode);
-
- cur_mode = efi_table_attr(mode, mode);
- max_mode = efi_table_attr(mode, max_mode);
+ struct match match = {};
- info = efi_table_attr(mode, info);
-
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
-
- best_mode = cur_mode;
- area = w * h;
- depth = pixel_bpp(pf, pi);
+ choose_mode(gop, match_auto, &match);
- for (m = 0; m < max_mode; m++) {
- if (m == cur_mode)
- continue;
-
- status = efi_call_proto(gop, query_mode, m,
- &info_size, &info);
- if (status != EFI_SUCCESS)
- continue;
+ return match.mode;
+}
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
+static bool match_list(const efi_graphics_output_mode_info_t *info, u32 mode, void *ctx)
+{
+ efi_pixel_bitmask_t pi = info->pixel_information;
+ u32 cur_mode = (unsigned long)ctx;
+ int pf = info->pixel_format;
+ const char *dstr;
+ u8 depth = 0;
+ bool valid;
- efi_bs_call(free_pool, info);
+ valid = !(pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX);
- if (pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX)
- continue;
- a = w * h;
- if (a < area)
- continue;
- d = pixel_bpp(pf, pi);
- if (a > area || d > depth) {
- best_mode = m;
- area = a;
- depth = d;
- }
+ switch (pf) {
+ case PIXEL_RGB_RESERVED_8BIT_PER_COLOR:
+ dstr = "rgb";
+ break;
+ case PIXEL_BGR_RESERVED_8BIT_PER_COLOR:
+ dstr = "bgr";
+ break;
+ case PIXEL_BIT_MASK:
+ dstr = "";
+ depth = pixel_bpp(pf, pi);
+ break;
+ case PIXEL_BLT_ONLY:
+ dstr = "blt";
+ break;
+ default:
+ dstr = "xxx";
+ break;
}
- return best_mode;
+ efi_printk("Mode %3u %c%c: Resolution %ux%u-%s%.0hhu\n",
+ mode,
+ (mode == cur_mode) ? '*' : ' ',
+ !valid ? '-' : ' ',
+ info->horizontal_resolution,
+ info->vertical_resolution,
+ dstr, depth);
+
+ return false;
}
static u32 choose_mode_list(efi_graphics_output_protocol_t *gop)
{
- efi_status_t status;
-
- efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
- unsigned long info_size;
-
- u32 max_mode, cur_mode;
- int pf;
- efi_pixel_bitmask_t pi;
- u32 m, w, h;
- u8 d;
- const char *dstr;
- bool valid;
+ efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode);
+ unsigned long cur_mode = efi_table_attr(mode, mode);
+ u32 max_mode = efi_table_attr(mode, max_mode);
efi_input_key_t key;
-
- mode = efi_table_attr(gop, mode);
-
- cur_mode = efi_table_attr(mode, mode);
- max_mode = efi_table_attr(mode, max_mode);
+ efi_status_t status;
efi_printk("Available graphics modes are 0-%u\n", max_mode-1);
efi_puts(" * = current mode\n"
" - = unusable mode\n");
- for (m = 0; m < max_mode; m++) {
- status = efi_call_proto(gop, query_mode, m,
- &info_size, &info);
- if (status != EFI_SUCCESS)
- continue;
- pf = info->pixel_format;
- pi = info->pixel_information;
- w = info->horizontal_resolution;
- h = info->vertical_resolution;
-
- efi_bs_call(free_pool, info);
-
- valid = !(pf == PIXEL_BLT_ONLY || pf >= PIXEL_FORMAT_MAX);
- d = 0;
- switch (pf) {
- case PIXEL_RGB_RESERVED_8BIT_PER_COLOR:
- dstr = "rgb";
- break;
- case PIXEL_BGR_RESERVED_8BIT_PER_COLOR:
- dstr = "bgr";
- break;
- case PIXEL_BIT_MASK:
- dstr = "";
- d = pixel_bpp(pf, pi);
- break;
- case PIXEL_BLT_ONLY:
- dstr = "blt";
- break;
- default:
- dstr = "xxx";
- break;
- }
-
- efi_printk("Mode %3u %c%c: Resolution %ux%u-%s%.0hhu\n",
- m,
- m == cur_mode ? '*' : ' ',
- !valid ? '-' : ' ',
- w, h, dstr, d);
- }
+ choose_mode(gop, match_list, (void *)cur_mode);
efi_puts("\nPress any key to continue (or wait 10 seconds)\n");
status = efi_wait_for_key(10 * EFI_USEC_PER_SEC, &key);
@@ -461,26 +402,25 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
}
}
-static efi_graphics_output_protocol_t *
-find_gop(efi_guid_t *proto, unsigned long size, void **handles)
+static efi_graphics_output_protocol_t *find_gop(unsigned long num,
+ const efi_handle_t handles[])
{
efi_graphics_output_protocol_t *first_gop;
efi_handle_t h;
- int i;
first_gop = NULL;
- for_each_efi_handle(h, handles, size, i) {
+ for_each_efi_handle(h, handles, num) {
efi_status_t status;
efi_graphics_output_protocol_t *gop;
efi_graphics_output_protocol_mode_t *mode;
efi_graphics_output_mode_info_t *info;
-
- efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
void *dummy = NULL;
- status = efi_bs_call(handle_protocol, h, proto, (void **)&gop);
+ status = efi_bs_call(handle_protocol, h,
+ &EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID,
+ (void **)&gop);
if (status != EFI_SUCCESS)
continue;
@@ -500,7 +440,8 @@ find_gop(efi_guid_t *proto, unsigned long size, void **handles)
* Once we've found a GOP supporting ConOut,
* don't bother looking any further.
*/
- status = efi_bs_call(handle_protocol, h, &conout_proto, &dummy);
+ status = efi_bs_call(handle_protocol, h,
+ &EFI_CONSOLE_OUT_DEVICE_GUID, &dummy);
if (status == EFI_SUCCESS)
return gop;
@@ -511,16 +452,22 @@ find_gop(efi_guid_t *proto, unsigned long size, void **handles)
return first_gop;
}
-static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size, void **handles)
+efi_status_t efi_setup_gop(struct screen_info *si)
{
- efi_graphics_output_protocol_t *gop;
+ efi_handle_t *handles __free(efi_pool) = NULL;
efi_graphics_output_protocol_mode_t *mode;
efi_graphics_output_mode_info_t *info;
+ efi_graphics_output_protocol_t *gop;
+ efi_status_t status;
+ unsigned long num;
- gop = find_gop(proto, size, handles);
+ status = efi_bs_call(locate_handle_buffer, EFI_LOCATE_BY_PROTOCOL,
+ &EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID, NULL, &num,
+ &handles);
+ if (status != EFI_SUCCESS)
+ return status;
- /* Did we find any GOPs? */
+ gop = find_gop(num, handles);
if (!gop)
return EFI_NOT_FOUND;
@@ -552,29 +499,3 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
return EFI_SUCCESS;
}
-
-/*
- * See if we have Graphics Output Protocol
- */
-efi_status_t efi_setup_gop(struct screen_info *si, efi_guid_t *proto,
- unsigned long size)
-{
- efi_status_t status;
- void **gop_handle = NULL;
-
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
- (void **)&gop_handle);
- if (status != EFI_SUCCESS)
- return status;
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, proto, NULL,
- &size, gop_handle);
- if (status != EFI_SUCCESS)
- goto free_handle;
-
- status = setup_gop(si, proto, size, gop_handle);
-
-free_handle:
- efi_bs_call(free_pool, gop_handle);
- return status;
-}
diff --git a/drivers/firmware/efi/libstub/kaslr.c b/drivers/firmware/efi/libstub/kaslr.c
index 6318c40bda38..4bc963e999eb 100644
--- a/drivers/firmware/efi/libstub/kaslr.c
+++ b/drivers/firmware/efi/libstub/kaslr.c
@@ -57,7 +57,7 @@ u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle)
*/
static bool check_image_region(u64 base, u64 size)
{
- struct efi_boot_memmap *map;
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
efi_status_t status;
bool ret = false;
int map_offset;
@@ -80,8 +80,6 @@ static bool check_image_region(u64 base, u64 size)
}
}
- efi_bs_call(free_pool, map);
-
return ret;
}
diff --git a/drivers/firmware/efi/libstub/mem.c b/drivers/firmware/efi/libstub/mem.c
index 4f1fa302234d..9c82259eea81 100644
--- a/drivers/firmware/efi/libstub/mem.c
+++ b/drivers/firmware/efi/libstub/mem.c
@@ -20,10 +20,10 @@
efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
bool install_cfg_tbl)
{
+ struct efi_boot_memmap tmp, *m __free(efi_pool) = NULL;
int memtype = install_cfg_tbl ? EFI_ACPI_RECLAIM_MEMORY
: EFI_LOADER_DATA;
efi_guid_t tbl_guid = LINUX_EFI_BOOT_MEMMAP_GUID;
- struct efi_boot_memmap *m, tmp;
efi_status_t status;
unsigned long size;
@@ -48,24 +48,20 @@ efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
*/
status = efi_bs_call(install_configuration_table, &tbl_guid, m);
if (status != EFI_SUCCESS)
- goto free_map;
+ return status;
}
m->buff_size = m->map_size = size;
status = efi_bs_call(get_memory_map, &m->map_size, m->map, &m->map_key,
&m->desc_size, &m->desc_ver);
- if (status != EFI_SUCCESS)
- goto uninstall_table;
+ if (status != EFI_SUCCESS) {
+ if (install_cfg_tbl)
+ efi_bs_call(install_configuration_table, &tbl_guid, NULL);
+ return status;
+ }
- *map = m;
+ *map = no_free_ptr(m);
return EFI_SUCCESS;
-
-uninstall_table:
- if (install_cfg_tbl)
- efi_bs_call(install_configuration_table, &tbl_guid, NULL);
-free_map:
- efi_bs_call(free_pool, m);
- return status;
}
/**
diff --git a/drivers/firmware/efi/libstub/pci.c b/drivers/firmware/efi/libstub/pci.c
index 99fb25d2bcf5..1dccf77958d3 100644
--- a/drivers/firmware/efi/libstub/pci.c
+++ b/drivers/firmware/efi/libstub/pci.c
@@ -16,37 +16,20 @@
void efi_pci_disable_bridge_busmaster(void)
{
efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
- unsigned long pci_handle_size = 0;
- efi_handle_t *pci_handle = NULL;
+ efi_handle_t *pci_handle __free(efi_pool) = NULL;
+ unsigned long pci_handle_num;
efi_handle_t handle;
efi_status_t status;
u16 class, command;
- int i;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
- NULL, &pci_handle_size, NULL);
-
- if (status != EFI_BUFFER_TOO_SMALL) {
- if (status != EFI_SUCCESS && status != EFI_NOT_FOUND)
- efi_err("Failed to locate PCI I/O handles'\n");
- return;
- }
-
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, pci_handle_size,
- (void **)&pci_handle);
+ status = efi_bs_call(locate_handle_buffer, EFI_LOCATE_BY_PROTOCOL,
+ &pci_proto, NULL, &pci_handle_num, &pci_handle);
if (status != EFI_SUCCESS) {
- efi_err("Failed to allocate memory for 'pci_handle'\n");
+ efi_err("Failed to locate PCI I/O handles\n");
return;
}
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto,
- NULL, &pci_handle_size, pci_handle);
- if (status != EFI_SUCCESS) {
- efi_err("Failed to locate PCI I/O handles'\n");
- goto free_handle;
- }
-
- for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ for_each_efi_handle(handle, pci_handle, pci_handle_num) {
efi_pci_io_protocol_t *pci;
unsigned long segment_nr, bus_nr, device_nr, func_nr;
@@ -82,7 +65,7 @@ void efi_pci_disable_bridge_busmaster(void)
efi_bs_call(disconnect_controller, handle, NULL, NULL);
}
- for_each_efi_handle(handle, pci_handle, pci_handle_size, i) {
+ for_each_efi_handle(handle, pci_handle, pci_handle_num) {
efi_pci_io_protocol_t *pci;
status = efi_bs_call(handle_protocol, handle, &pci_proto,
@@ -108,7 +91,4 @@ void efi_pci_disable_bridge_busmaster(void)
if (status != EFI_SUCCESS)
efi_err("Failed to disable PCI busmastering\n");
}
-
-free_handle:
- efi_bs_call(free_pool, pci_handle);
}
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index c41e7b2091cd..e5872e38d9a4 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -59,9 +59,9 @@ efi_status_t efi_random_alloc(unsigned long size,
unsigned long alloc_min,
unsigned long alloc_max)
{
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
unsigned long total_slots = 0, target_slot;
unsigned long total_mirrored_slots = 0;
- struct efi_boot_memmap *map;
efi_status_t status;
int map_offset;
@@ -130,7 +130,5 @@ efi_status_t efi_random_alloc(unsigned long size,
break;
}
- efi_bs_call(free_pool, map);
-
return status;
}
diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c
index d694bcfa1074..99b45d1cd624 100644
--- a/drivers/firmware/efi/libstub/relocate.c
+++ b/drivers/firmware/efi/libstub/relocate.c
@@ -23,14 +23,14 @@
efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long min)
{
- struct efi_boot_memmap *map;
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
efi_status_t status;
unsigned long nr_pages;
int i;
status = efi_get_memory_map(&map, false);
if (status != EFI_SUCCESS)
- goto fail;
+ return status;
/*
* Enforce minimum alignment that EFI or Linux requires when
@@ -79,11 +79,9 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
}
if (i == map->map_size / map->desc_size)
- status = EFI_NOT_FOUND;
+ return EFI_NOT_FOUND;
- efi_bs_call(free_pool, map);
-fail:
- return status;
+ return EFI_SUCCESS;
}
/**
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index 1fd6823248ab..a5c6c4f163fc 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -57,7 +57,7 @@ static void efi_retrieve_tcg2_eventlog(int version, efi_physical_addr_t log_loca
struct linux_efi_tpm_eventlog *log_tbl = NULL;
unsigned long first_entry_addr, last_entry_addr;
size_t log_size, last_entry_size;
- int final_events_size = 0;
+ u32 final_events_size = 0;
first_entry_addr = (unsigned long) log_location;
@@ -110,9 +110,9 @@ static void efi_retrieve_tcg2_eventlog(int version, efi_physical_addr_t log_loca
*/
if (final_events_table && final_events_table->nr_events) {
struct tcg_pcr_event2_head *header;
- int offset;
+ u32 offset;
void *data;
- int event_size;
+ u32 event_size;
int i = final_events_table->nr_events;
data = (void *)final_events_table;
@@ -124,6 +124,9 @@ static void efi_retrieve_tcg2_eventlog(int version, efi_physical_addr_t log_loca
event_size = __calc_tpm2_event_size(header,
(void *)(long)log_location,
false);
+ /* If calc fails this is a malformed log */
+ if (!event_size)
+ break;
final_events_size += event_size;
i--;
}
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index f8e465da344d..863910e9eefc 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -42,7 +42,7 @@ union sev_memory_acceptance_protocol {
static efi_status_t
preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
{
- struct pci_setup_rom *rom = NULL;
+ struct pci_setup_rom *rom __free(efi_pool) = NULL;
efi_status_t status;
unsigned long size;
uint64_t romsize;
@@ -75,14 +75,13 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
rom->data.len = size - sizeof(struct setup_data);
rom->data.next = 0;
rom->pcilen = romsize;
- *__rom = rom;
status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
PCI_VENDOR_ID, 1, &rom->vendor);
if (status != EFI_SUCCESS) {
efi_err("Failed to read rom->vendor\n");
- goto free_struct;
+ return status;
}
status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
@@ -90,21 +89,18 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
if (status != EFI_SUCCESS) {
efi_err("Failed to read rom->devid\n");
- goto free_struct;
+ return status;
}
status = efi_call_proto(pci, get_location, &rom->segment, &rom->bus,
&rom->device, &rom->function);
if (status != EFI_SUCCESS)
- goto free_struct;
+ return status;
memcpy(rom->romdata, romimage, romsize);
- return status;
-
-free_struct:
- efi_bs_call(free_pool, rom);
- return status;
+ *__rom = no_free_ptr(rom);
+ return EFI_SUCCESS;
}
/*
@@ -119,38 +115,23 @@ free_struct:
static void setup_efi_pci(struct boot_params *params)
{
efi_status_t status;
- void **pci_handle = NULL;
+ efi_handle_t *pci_handle __free(efi_pool) = NULL;
efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
- unsigned long size = 0;
struct setup_data *data;
+ unsigned long num;
efi_handle_t h;
- int i;
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &pci_proto, NULL, &size, pci_handle);
-
- if (status == EFI_BUFFER_TOO_SMALL) {
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
- (void **)&pci_handle);
-
- if (status != EFI_SUCCESS) {
- efi_err("Failed to allocate memory for 'pci_handle'\n");
- return;
- }
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &pci_proto, NULL, &size, pci_handle);
- }
+ status = efi_bs_call(locate_handle_buffer, EFI_LOCATE_BY_PROTOCOL,
+ &pci_proto, NULL, &num, &pci_handle);
if (status != EFI_SUCCESS)
- goto free_handle;
+ return;
data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
while (data && data->next)
data = (struct setup_data *)(unsigned long)data->next;
- for_each_efi_handle(h, pci_handle, size, i) {
+ for_each_efi_handle(h, pci_handle, num) {
efi_pci_io_protocol_t *pci = NULL;
struct pci_setup_rom *rom;
@@ -170,9 +151,6 @@ static void setup_efi_pci(struct boot_params *params)
data = (struct setup_data *)rom;
}
-
-free_handle:
- efi_bs_call(free_pool, pci_handle);
}
static void retrieve_apple_device_properties(struct boot_params *boot_params)
@@ -405,116 +383,13 @@ static void setup_quirks(struct boot_params *boot_params)
}
}
-/*
- * See if we have Universal Graphics Adapter (UGA) protocol
- */
-static efi_status_t
-setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size)
-{
- efi_status_t status;
- u32 width, height;
- void **uga_handle = NULL;
- efi_uga_draw_protocol_t *uga = NULL, *first_uga;
- efi_handle_t handle;
- int i;
-
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
- (void **)&uga_handle);
- if (status != EFI_SUCCESS)
- return status;
-
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- uga_proto, NULL, &size, uga_handle);
- if (status != EFI_SUCCESS)
- goto free_handle;
-
- height = 0;
- width = 0;
-
- first_uga = NULL;
- for_each_efi_handle(handle, uga_handle, size, i) {
- efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
- u32 w, h, depth, refresh;
- void *pciio;
-
- status = efi_bs_call(handle_protocol, handle, uga_proto,
- (void **)&uga);
- if (status != EFI_SUCCESS)
- continue;
-
- pciio = NULL;
- efi_bs_call(handle_protocol, handle, &pciio_proto, &pciio);
-
- status = efi_call_proto(uga, get_mode, &w, &h, &depth, &refresh);
- if (status == EFI_SUCCESS && (!first_uga || pciio)) {
- width = w;
- height = h;
-
- /*
- * Once we've found a UGA supporting PCIIO,
- * don't bother looking any further.
- */
- if (pciio)
- break;
-
- first_uga = uga;
- }
- }
-
- if (!width && !height)
- goto free_handle;
-
- /* EFI framebuffer */
- si->orig_video_isVGA = VIDEO_TYPE_EFI;
-
- si->lfb_depth = 32;
- si->lfb_width = width;
- si->lfb_height = height;
-
- si->red_size = 8;
- si->red_pos = 16;
- si->green_size = 8;
- si->green_pos = 8;
- si->blue_size = 8;
- si->blue_pos = 0;
- si->rsvd_size = 8;
- si->rsvd_pos = 24;
-
-free_handle:
- efi_bs_call(free_pool, uga_handle);
-
- return status;
-}
-
static void setup_graphics(struct boot_params *boot_params)
{
- efi_guid_t graphics_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
- struct screen_info *si;
- efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
- efi_status_t status;
- unsigned long size;
- void **gop_handle = NULL;
- void **uga_handle = NULL;
-
- si = &boot_params->screen_info;
- memset(si, 0, sizeof(*si));
+ struct screen_info *si = memset(&boot_params->screen_info, 0, sizeof(*si));
- size = 0;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &graphics_proto, NULL, &size, gop_handle);
- if (status == EFI_BUFFER_TOO_SMALL)
- status = efi_setup_gop(si, &graphics_proto, size);
-
- if (status != EFI_SUCCESS) {
- size = 0;
- status = efi_bs_call(locate_handle, EFI_LOCATE_BY_PROTOCOL,
- &uga_proto, NULL, &size, uga_handle);
- if (status == EFI_BUFFER_TOO_SMALL)
- setup_uga(si, &uga_proto, size);
- }
+ efi_setup_gop(si);
}
-
static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status)
{
efi_bs_call(exit, handle, status, 0, NULL);
@@ -537,7 +412,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
struct boot_params *boot_params;
struct setup_header *hdr;
- int options_size = 0;
efi_status_t status;
unsigned long alloc;
char *cmdline_ptr;
@@ -569,7 +443,7 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
hdr->initrd_addr_max = INT_MAX;
/* Convert unicode cmdline to ascii */
- cmdline_ptr = efi_convert_cmdline(image, &options_size);
+ cmdline_ptr = efi_convert_cmdline(image);
if (!cmdline_ptr) {
efi_free(PARAM_SIZE, alloc);
efi_exit(handle, EFI_OUT_OF_RESOURCES);
@@ -738,7 +612,7 @@ static efi_status_t allocate_e820(struct boot_params *params,
struct setup_data **e820ext,
u32 *e820ext_size)
{
- struct efi_boot_memmap *map;
+ struct efi_boot_memmap *map __free(efi_pool) = NULL;
efi_status_t status;
__u32 nr_desc;
@@ -752,13 +626,14 @@ static efi_status_t allocate_e820(struct boot_params *params,
EFI_MMAP_NR_SLACK_SLOTS;
status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size);
+ if (status != EFI_SUCCESS)
+ return status;
}
- if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) && status == EFI_SUCCESS)
- status = allocate_unaccepted_bitmap(nr_desc, map);
+ if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY))
+ return allocate_unaccepted_bitmap(nr_desc, map);
- efi_bs_call(free_pool, map);
- return status;
+ return EFI_SUCCESS;
}
struct exit_boot_struct {
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index 164203429fa7..c38b1a335590 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -22,6 +22,7 @@ unsigned long __ro_after_init efi_mem_attr_table = EFI_INVALID_TABLE_ADDR;
int __init efi_memattr_init(void)
{
efi_memory_attributes_table_t *tbl;
+ unsigned long size;
if (efi_mem_attr_table == EFI_INVALID_TABLE_ADDR)
return 0;
@@ -39,7 +40,22 @@ int __init efi_memattr_init(void)
goto unmap;
}
- tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size;
+
+ /*
+ * Sanity check: the Memory Attributes Table contains up to 3 entries
+ * for each entry of type EfiRuntimeServicesCode in the EFI memory map.
+ * So if the size of the table exceeds 3x the size of the entire EFI
+ * memory map, there is clearly something wrong, and the table should
+ * just be ignored altogether.
+ */
+ size = tbl->num_entries * tbl->desc_size;
+ if (size > 3 * efi.memmap.nr_map * efi.memmap.desc_size) {
+ pr_warn(FW_BUG "Corrupted EFI Memory Attributes Table detected! (version == %u, desc_size == %u, num_entries == %u)\n",
+ tbl->version, tbl->desc_size, tbl->num_entries);
+ goto unmap;
+ }
+
+ tbl_size = sizeof(*tbl) + size;
memblock_reserve(efi_mem_attr_table, tbl_size);
set_bit(EFI_MEM_ATTR, &efi.flags);
diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
index cc807ed35aed..1e509595ac03 100644
--- a/drivers/firmware/efi/sysfb_efi.c
+++ b/drivers/firmware/efi/sysfb_efi.c
@@ -91,6 +91,7 @@ void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
_ret_; \
})
+#ifdef CONFIG_EFI
static int __init efifb_set_system(const struct dmi_system_id *id)
{
struct efifb_dmi_info *info = id->driver_data;
@@ -346,7 +347,6 @@ static const struct fwnode_operations efifb_fwnode_ops = {
.add_links = efifb_add_links,
};
-#ifdef CONFIG_EFI
static struct fwnode_handle efifb_fwnode;
__init void sysfb_apply_efi_quirks(void)
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
index e8d69bd548f3..cdd431027065 100644
--- a/drivers/firmware/efi/tpm.c
+++ b/drivers/firmware/efi/tpm.c
@@ -19,7 +19,7 @@ EXPORT_SYMBOL(efi_tpm_final_log_size);
static int __init tpm2_calc_event_log_size(void *data, int count, void *size_info)
{
struct tcg_pcr_event2_head *header;
- int event_size, size = 0;
+ u32 event_size, size = 0;
while (count > 0) {
header = data + size;
@@ -40,7 +40,8 @@ int __init efi_tpm_eventlog_init(void)
{
struct linux_efi_tpm_eventlog *log_tbl;
struct efi_tcg2_final_events_table *final_tbl;
- int tbl_size;
+ unsigned int tbl_size;
+ int final_tbl_size;
int ret = 0;
if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) {
@@ -60,7 +61,12 @@ int __init efi_tpm_eventlog_init(void)
}
tbl_size = sizeof(*log_tbl) + log_tbl->size;
- memblock_reserve(efi.tpm_log, tbl_size);
+ if (memblock_reserve(efi.tpm_log, tbl_size)) {
+ pr_err("TPM Event Log memblock reserve fails (0x%lx, 0x%x)\n",
+ efi.tpm_log, tbl_size);
+ ret = -ENOMEM;
+ goto out;
+ }
if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) {
pr_info("TPM Final Events table not present\n");
@@ -80,26 +86,26 @@ int __init efi_tpm_eventlog_init(void)
goto out;
}
- tbl_size = 0;
+ final_tbl_size = 0;
if (final_tbl->nr_events != 0) {
void *events = (void *)efi.tpm_final_log
+ sizeof(final_tbl->version)
+ sizeof(final_tbl->nr_events);
- tbl_size = tpm2_calc_event_log_size(events,
- final_tbl->nr_events,
- log_tbl->log);
+ final_tbl_size = tpm2_calc_event_log_size(events,
+ final_tbl->nr_events,
+ log_tbl->log);
}
- if (tbl_size < 0) {
+ if (final_tbl_size < 0) {
pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n");
ret = -EINVAL;
goto out_calc;
}
memblock_reserve(efi.tpm_final_log,
- tbl_size + sizeof(*final_tbl));
- efi_tpm_final_log_size = tbl_size;
+ final_tbl_size + sizeof(*final_tbl));
+ efi_tpm_final_log_size = final_tbl_size;
out_calc:
early_memunmap(final_tbl, sizeof(*final_tbl));
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 4056ba7f3440..3700e9869767 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -149,7 +149,7 @@ int efivar_lock(void)
}
return 0;
}
-EXPORT_SYMBOL_NS_GPL(efivar_lock, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_lock, "EFIVAR");
/*
* efivar_lock() - obtain the efivar lock if it is free
@@ -165,7 +165,7 @@ int efivar_trylock(void)
}
return 0;
}
-EXPORT_SYMBOL_NS_GPL(efivar_trylock, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_trylock, "EFIVAR");
/*
* efivar_unlock() - release the efivar lock
@@ -174,7 +174,7 @@ void efivar_unlock(void)
{
up(&efivars_lock);
}
-EXPORT_SYMBOL_NS_GPL(efivar_unlock, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_unlock, "EFIVAR");
/*
* efivar_get_variable() - retrieve a variable identified by name/vendor
@@ -186,7 +186,7 @@ efi_status_t efivar_get_variable(efi_char16_t *name, efi_guid_t *vendor,
{
return __efivars->ops->get_variable(name, vendor, attr, size, data);
}
-EXPORT_SYMBOL_NS_GPL(efivar_get_variable, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_get_variable, "EFIVAR");
/*
* efivar_get_next_variable() - enumerate the next name/vendor pair
@@ -198,7 +198,7 @@ efi_status_t efivar_get_next_variable(unsigned long *name_size,
{
return __efivars->ops->get_next_variable(name_size, name, vendor);
}
-EXPORT_SYMBOL_NS_GPL(efivar_get_next_variable, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_get_next_variable, "EFIVAR");
/*
* efivar_set_variable_locked() - set a variable identified by name/vendor
@@ -230,7 +230,7 @@ efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor,
return setvar(name, vendor, attr, data_size, data);
}
-EXPORT_SYMBOL_NS_GPL(efivar_set_variable_locked, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_set_variable_locked, "EFIVAR");
/*
* efivar_set_variable() - set a variable identified by name/vendor
@@ -252,7 +252,7 @@ efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor,
efivar_unlock();
return status;
}
-EXPORT_SYMBOL_NS_GPL(efivar_set_variable, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_set_variable, "EFIVAR");
efi_status_t efivar_query_variable_info(u32 attr,
u64 *storage_space,
@@ -264,4 +264,4 @@ efi_status_t efivar_query_variable_info(u32 attr,
return __efivars->ops->query_variable_info(attr, storage_space,
remaining_space, max_variable_size);
}
-EXPORT_SYMBOL_NS_GPL(efivar_query_variable_info, EFIVAR);
+EXPORT_SYMBOL_NS_GPL(efivar_query_variable_info, "EFIVAR");
diff --git a/drivers/firmware/google/cbmem.c b/drivers/firmware/google/cbmem.c
index 66042160b361..773d05078e0a 100644
--- a/drivers/firmware/google/cbmem.c
+++ b/drivers/firmware/google/cbmem.c
@@ -30,7 +30,7 @@ static struct cbmem_entry *to_cbmem_entry(struct kobject *kobj)
}
static ssize_t mem_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t pos,
+ const struct bin_attribute *bin_attr, char *buf, loff_t pos,
size_t count)
{
struct cbmem_entry *entry = to_cbmem_entry(kobj);
@@ -40,7 +40,7 @@ static ssize_t mem_read(struct file *filp, struct kobject *kobj,
}
static ssize_t mem_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t pos,
+ const struct bin_attribute *bin_attr, char *buf, loff_t pos,
size_t count)
{
struct cbmem_entry *entry = to_cbmem_entry(kobj);
@@ -53,7 +53,7 @@ static ssize_t mem_write(struct file *filp, struct kobject *kobj,
memcpy(entry->mem_file_buf + pos, buf, count);
return count;
}
-static BIN_ATTR_ADMIN_RW(mem, 0);
+static const BIN_ATTR_ADMIN_RW(mem, 0);
static ssize_t address_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -79,14 +79,14 @@ static struct attribute *attrs[] = {
NULL,
};
-static struct bin_attribute *bin_attrs[] = {
+static const struct bin_attribute *const bin_attrs[] = {
&bin_attr_mem,
NULL,
};
static const struct attribute_group cbmem_entry_group = {
.attrs = attrs,
- .bin_attrs = bin_attrs,
+ .bin_attrs_new = bin_attrs,
};
static const struct attribute_group *dev_groups[] = {
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 208652a8087c..882db32e51be 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -220,7 +220,7 @@ MODULE_DEVICE_TABLE(of, coreboot_of_match);
static struct platform_driver coreboot_table_driver = {
.probe = coreboot_table_probe,
- .remove_new = coreboot_table_remove,
+ .remove = coreboot_table_remove,
.driver = {
.name = "coreboot_table",
.acpi_match_table = ACPI_PTR(cros_coreboot_acpi_match),
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
index daadd71d8ddd..c68c9f56370f 100644
--- a/drivers/firmware/google/framebuffer-coreboot.c
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
+#include <linux/sysfb.h>
#include "coreboot_table.h"
@@ -36,6 +37,19 @@ static int framebuffer_probe(struct coreboot_device *dev)
.format = NULL,
};
+ /*
+ * On coreboot systems, the advertised LB_TAG_FRAMEBUFFER entry
+ * in the coreboot table should only be used if the payload did
+ * not pass a framebuffer information to the Linux kernel.
+ *
+ * If the global screen_info data has been filled, the Generic
+ * System Framebuffers (sysfb) will already register a platform
+ * device and pass that screen_info as platform_data to a driver
+ * that can scan-out using the system provided framebuffer.
+ */
+ if (sysfb_handles_screen_info())
+ return -ENODEV;
+
if (!fb->physical_address)
return -ENODEV;
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index d304913314e4..e8fb00dcaf65 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -488,7 +488,7 @@ static const struct efivar_operations efivar_ops = {
#endif /* CONFIG_EFI */
static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct gsmi_set_eventlog_param param = {
@@ -528,9 +528,9 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
}
-static struct bin_attribute eventlog_bin_attr = {
+static const struct bin_attribute eventlog_bin_attr = {
.attr = {.name = "append_to_eventlog", .mode = 0200},
- .write = eventlog_write,
+ .write_new = eventlog_write,
};
static ssize_t gsmi_clear_eventlog_store(struct kobject *kobj,
@@ -918,7 +918,8 @@ static __init int gsmi_init(void)
gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
if (IS_ERR(gsmi_dev.pdev)) {
printk(KERN_ERR "gsmi: unable to register platform device\n");
- return PTR_ERR(gsmi_dev.pdev);
+ ret = PTR_ERR(gsmi_dev.pdev);
+ goto out_unregister;
}
/* SMI access needs to be serialized */
@@ -1056,10 +1057,11 @@ out_err:
gsmi_buf_free(gsmi_dev.name_buf);
kmem_cache_destroy(gsmi_dev.mem_pool);
platform_device_unregister(gsmi_dev.pdev);
- pr_info("gsmi: failed to load: %d\n", ret);
+out_unregister:
#ifdef CONFIG_PM
platform_driver_unregister(&gsmi_driver_info);
#endif
+ pr_info("gsmi: failed to load: %d\n", ret);
return ret;
}
diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
index b9d99fe1ff0f..d957af6f9349 100644
--- a/drivers/firmware/google/memconsole.c
+++ b/drivers/firmware/google/memconsole.c
@@ -14,7 +14,7 @@
#include "memconsole.h"
static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
ssize_t (*memconsole_read_func)(char *, loff_t, size_t);
@@ -28,7 +28,7 @@ static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
static struct bin_attribute memconsole_bin_attr = {
.attr = {.name = "log", .mode = 0444},
- .read = memconsole_read,
+ .read_new = memconsole_read,
};
void memconsole_setup(ssize_t (*read_func)(char *, loff_t, size_t))
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 1749529f63d4..254ac6545d68 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -56,7 +56,7 @@ static struct vpd_section ro_vpd;
static struct vpd_section rw_vpd;
static ssize_t vpd_attrib_read(struct file *filp, struct kobject *kobp,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
struct vpd_attrib_info *info = bin_attr->private;
@@ -121,7 +121,7 @@ static int vpd_section_attrib_add(const u8 *key, u32 key_len,
info->bin_attr.attr.name = info->key;
info->bin_attr.attr.mode = 0444;
info->bin_attr.size = value_len;
- info->bin_attr.read = vpd_attrib_read;
+ info->bin_attr.read_new = vpd_attrib_read;
info->bin_attr.private = info;
info->value = value;
@@ -156,7 +156,7 @@ static void vpd_section_attrib_destroy(struct vpd_section *sec)
}
static ssize_t vpd_section_read(struct file *filp, struct kobject *kobp,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
struct vpd_section *sec = bin_attr->private;
@@ -201,7 +201,7 @@ static int vpd_section_init(const char *name, struct vpd_section *sec,
sec->bin_attr.attr.name = sec->raw_name;
sec->bin_attr.attr.mode = 0444;
sec->bin_attr.size = size;
- sec->bin_attr.read = vpd_section_read;
+ sec->bin_attr.read_new = vpd_section_read;
sec->bin_attr.private = sec;
err = sysfs_create_bin_file(vpd_kobj, &sec->bin_attr);
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index 477d3f32d99a..907cd149c40a 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -25,7 +25,6 @@ config IMX_SCU
config IMX_SCMI_MISC_DRV
tristate "IMX SCMI MISC Protocol driver"
- depends on IMX_SCMI_MISC_EXT || COMPILE_TEST
default y if ARCH_MXC
help
The System Controller Management Interface firmware (SCMI FW) is
diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
index 01c8ef14eaec..ed79e823157a 100644
--- a/drivers/firmware/imx/imx-dsp.c
+++ b/drivers/firmware/imx/imx-dsp.c
@@ -180,7 +180,7 @@ static struct platform_driver imx_dsp_driver = {
.name = "imx-dsp",
},
.probe = imx_dsp_probe,
- .remove_new = imx_dsp_remove,
+ .remove = imx_dsp_remove,
};
builtin_platform_driver(imx_dsp_driver);
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 6e9788324fea..371f24569b3b 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -310,7 +310,10 @@ static ssize_t ibft_attr_show_nic(void *data, int type, char *buf)
str += sprintf_ipaddr(str, nic->ip_addr);
break;
case ISCSI_BOOT_ETH_SUBNET_MASK:
- val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
+ if (nic->subnet_mask_prefix > 32)
+ val = cpu_to_be32(~0);
+ else
+ val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
str += sprintf(str, "%pI4", &val);
break;
case ISCSI_BOOT_ETH_PREFIX_LEN:
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 8e59be3782cb..55b9cfad8a04 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -116,7 +116,7 @@ static void __meminit release_firmware_map_entry(struct kobject *kobj)
kfree(entry);
}
-static struct kobj_type __refdata memmap_ktype = {
+static const struct kobj_type memmap_ktype = {
.release = release_firmware_map_entry,
.sysfs_ops = &memmap_attr_ops,
.default_groups = def_groups,
diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c
index 9ca5ee58edbd..e194f7acb2a9 100644
--- a/drivers/firmware/microchip/mpfs-auto-update.c
+++ b/drivers/firmware/microchip/mpfs-auto-update.c
@@ -76,14 +76,11 @@
#define AUTO_UPDATE_INFO_SIZE SZ_1M
#define AUTO_UPDATE_BITSTREAM_BASE (AUTO_UPDATE_DIRECTORY_SIZE + AUTO_UPDATE_INFO_SIZE)
-#define AUTO_UPDATE_TIMEOUT_MS 60000
-
struct mpfs_auto_update_priv {
struct mpfs_sys_controller *sys_controller;
struct device *dev;
struct mtd_info *flash;
struct fw_upload *fw_uploader;
- struct completion programming_complete;
size_t size_per_bitstream;
bool cancel_request;
};
@@ -156,19 +153,6 @@ static void mpfs_auto_update_cancel(struct fw_upload *fw_uploader)
static enum fw_upload_err mpfs_auto_update_poll_complete(struct fw_upload *fw_uploader)
{
- struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle;
- int ret;
-
- /*
- * There is no meaningful way to get the status of the programming while
- * it is in progress, so attempting anything other than waiting for it
- * to complete would be misplaced.
- */
- ret = wait_for_completion_timeout(&priv->programming_complete,
- msecs_to_jiffies(AUTO_UPDATE_TIMEOUT_MS));
- if (!ret)
- return FW_UPLOAD_ERR_TIMEOUT;
-
return FW_UPLOAD_ERR_NONE;
}
@@ -349,33 +333,23 @@ static enum fw_upload_err mpfs_auto_update_write(struct fw_upload *fw_uploader,
u32 offset, u32 size, u32 *written)
{
struct mpfs_auto_update_priv *priv = fw_uploader->dd_handle;
- enum fw_upload_err err = FW_UPLOAD_ERR_NONE;
int ret;
- reinit_completion(&priv->programming_complete);
-
ret = mpfs_auto_update_write_bitstream(fw_uploader, data, offset, size, written);
- if (ret) {
- err = FW_UPLOAD_ERR_RW_ERROR;
- goto out;
- }
+ if (ret)
+ return FW_UPLOAD_ERR_RW_ERROR;
- if (priv->cancel_request) {
- err = FW_UPLOAD_ERR_CANCELED;
- goto out;
- }
+ if (priv->cancel_request)
+ return FW_UPLOAD_ERR_CANCELED;
if (mpfs_auto_update_is_bitstream_info(data, size))
- goto out;
+ return FW_UPLOAD_ERR_NONE;
ret = mpfs_auto_update_verify_image(fw_uploader);
if (ret)
- err = FW_UPLOAD_ERR_FW_INVALID;
+ return FW_UPLOAD_ERR_FW_INVALID;
-out:
- complete(&priv->programming_complete);
-
- return err;
+ return FW_UPLOAD_ERR_NONE;
}
static const struct fw_upload_ops mpfs_auto_update_ops = {
@@ -428,10 +402,10 @@ static int mpfs_auto_update_available(struct mpfs_auto_update_priv *priv)
return -EIO;
/*
- * Bit 5 of byte 1 is "UL_Auto Update" & if it is set, Auto Update is
+ * Bit 5 of byte 1 is "UL_IAP" & if it is set, Auto Update is
* not possible.
*/
- if (response_msg[1] & AUTO_UPDATE_FEATURE_ENABLED)
+ if ((((u8 *)response_msg)[1] & AUTO_UPDATE_FEATURE_ENABLED))
return -EPERM;
return 0;
@@ -461,8 +435,6 @@ static int mpfs_auto_update_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret,
"The current bitstream does not support auto-update\n");
- init_completion(&priv->programming_complete);
-
fw_uploader = firmware_upload_register(THIS_MODULE, dev, "mpfs-auto-update",
&mpfs_auto_update_ops, priv);
if (IS_ERR(fw_uploader))
@@ -486,7 +458,7 @@ static struct platform_driver mpfs_auto_update_driver = {
.name = "mpfs-auto-update",
},
.probe = mpfs_auto_update_probe,
- .remove_new = mpfs_auto_update_remove,
+ .remove = mpfs_auto_update_remove,
};
module_platform_driver(mpfs_auto_update_driver);
diff --git a/drivers/firmware/mtk-adsp-ipc.c b/drivers/firmware/mtk-adsp-ipc.c
index a762302978de..2b79371c61c9 100644
--- a/drivers/firmware/mtk-adsp-ipc.c
+++ b/drivers/firmware/mtk-adsp-ipc.c
@@ -95,10 +95,9 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev)
adsp_chan->idx = i;
adsp_chan->ch = mbox_request_channel_byname(cl, adsp_mbox_ch_names[i]);
if (IS_ERR(adsp_chan->ch)) {
- ret = PTR_ERR(adsp_chan->ch);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request mbox chan %s ret %d\n",
- adsp_mbox_ch_names[i], ret);
+ ret = dev_err_probe(dev, PTR_ERR(adsp_chan->ch),
+ "Failed to request mbox channel %s\n",
+ adsp_mbox_ch_names[i]);
for (j = 0; j < i; j++) {
adsp_chan = &adsp_ipc->chans[j];
@@ -133,7 +132,7 @@ static struct platform_driver mtk_adsp_ipc_driver = {
.name = "mtk-adsp-ipc",
},
.probe = mtk_adsp_ipc_probe,
- .remove_new = mtk_adsp_ipc_remove,
+ .remove = mtk_adsp_ipc_remove,
};
builtin_platform_driver(mtk_adsp_ipc_driver);
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index 2328ca58bba6..a1ebbe9b73b1 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -78,6 +78,7 @@ struct psci_0_1_function_ids get_psci_0_1_function_ids(void)
static u32 psci_cpu_suspend_feature;
static bool psci_system_reset2_supported;
+static bool psci_system_off2_hibernate_supported;
static inline bool psci_has_ext_power_state(void)
{
@@ -333,6 +334,36 @@ static void psci_sys_poweroff(void)
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
}
+#ifdef CONFIG_HIBERNATION
+static int psci_sys_hibernate(struct sys_off_data *data)
+{
+ /*
+ * If no hibernate type is specified SYSTEM_OFF2 defaults to selecting
+ * HIBERNATE_OFF.
+ *
+ * There are hypervisors in the wild that do not align with the spec and
+ * reject calls that explicitly provide a hibernate type. For
+ * compatibility with these nonstandard implementations, pass 0 as the
+ * type.
+ */
+ if (system_entering_hibernation())
+ invoke_psci_fn(PSCI_FN_NATIVE(1_3, SYSTEM_OFF2), 0, 0, 0);
+ return NOTIFY_DONE;
+}
+
+static int __init psci_hibernate_init(void)
+{
+ if (psci_system_off2_hibernate_supported) {
+ /* Higher priority than EFI shutdown, but only for hibernate */
+ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_FIRMWARE + 2,
+ psci_sys_hibernate, NULL);
+ }
+ return 0;
+}
+subsys_initcall(psci_hibernate_init);
+#endif
+
static int psci_features(u32 psci_func_id)
{
return invoke_psci_fn(PSCI_1_0_FN_PSCI_FEATURES,
@@ -364,6 +395,7 @@ static const struct {
PSCI_ID_NATIVE(1_1, SYSTEM_RESET2),
PSCI_ID(1_1, MEM_PROTECT),
PSCI_ID_NATIVE(1_1, MEM_PROTECT_CHECK_RANGE),
+ PSCI_ID_NATIVE(1_3, SYSTEM_OFF2),
};
static int psci_debugfs_read(struct seq_file *s, void *data)
@@ -525,6 +557,18 @@ static void __init psci_init_system_reset2(void)
psci_system_reset2_supported = true;
}
+static void __init psci_init_system_off2(void)
+{
+ int ret;
+
+ ret = psci_features(PSCI_FN_NATIVE(1_3, SYSTEM_OFF2));
+ if (ret < 0)
+ return;
+
+ if (ret & PSCI_1_3_OFF_TYPE_HIBERNATE_OFF)
+ psci_system_off2_hibernate_supported = true;
+}
+
static void __init psci_init_system_suspend(void)
{
int ret;
@@ -655,6 +699,7 @@ static int __init psci_probe(void)
psci_init_cpu_suspend();
psci_init_system_suspend();
psci_init_system_reset2();
+ psci_init_system_off2();
kvm_init_hyp_services();
}
diff --git a/drivers/firmware/qcom/qcom_scm-smc.c b/drivers/firmware/qcom/qcom_scm-smc.c
index 2b4c2826f572..574930729ddd 100644
--- a/drivers/firmware/qcom/qcom_scm-smc.c
+++ b/drivers/firmware/qcom/qcom_scm-smc.c
@@ -152,7 +152,6 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
enum qcom_scm_convention qcom_convention,
struct qcom_scm_res *res, bool atomic)
{
- struct qcom_tzmem_pool *mempool = qcom_scm_get_tzmem_pool();
int arglen = desc->arginfo & 0xf;
int i, ret;
void *args_virt __free(qcom_tzmem) = NULL;
@@ -173,6 +172,11 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
+ struct qcom_tzmem_pool *mempool = qcom_scm_get_tzmem_pool();
+
+ if (!mempool)
+ return -EINVAL;
+
args_virt = qcom_tzmem_alloc(mempool,
SCM_SMC_N_EXT_ARGS * sizeof(u64),
flag);
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 10986cb11ec0..f0569bb9411f 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -112,6 +112,7 @@ enum qcom_scm_qseecom_tz_cmd_info {
};
#define QSEECOM_MAX_APP_NAME_SIZE 64
+#define SHMBRIDGE_RESULT_NOTSUPP 4
/* Each bit configures cold/warm boot address for one of the 4 CPUs */
static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
@@ -216,6 +217,9 @@ static DEFINE_SPINLOCK(scm_query_lock);
struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
{
+ if (!qcom_scm_is_available())
+ return NULL;
+
return __scm->mempool;
}
@@ -545,7 +549,7 @@ static void qcom_scm_set_download_mode(u32 dload_mode)
} else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode);
- } else {
+ } else if (dload_mode) {
dev_err(__scm->dev,
"No available mechanism for setting download mode\n");
}
@@ -903,6 +907,32 @@ int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
}
EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
+#define QCOM_SCM_CP_APERTURE_CONTEXT_MASK GENMASK(7, 0)
+
+bool qcom_scm_set_gpu_smmu_aperture_is_available(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
+ QCOM_SCM_MP_CP_SMMU_APERTURE_ID);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture_is_available);
+
+int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_CP_SMMU_APERTURE_ID,
+ .arginfo = QCOM_SCM_ARGS(4),
+ .args[0] = 0xffff0000 | FIELD_PREP(QCOM_SCM_CP_APERTURE_CONTEXT_MASK, context_bank),
+ .args[1] = 0xffffffff,
+ .args[2] = 0xffffffff,
+ .args[3] = 0xffffffff,
+ .owner = ARM_SMCCC_OWNER_SIP
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture);
+
int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
{
struct qcom_scm_desc desc = {
@@ -1252,6 +1282,220 @@ int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
}
EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
+bool qcom_scm_has_wrapped_key_support(void)
+{
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_DERIVE_SW_SECRET) &&
+ __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_GENERATE_ICE_KEY) &&
+ __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_PREPARE_ICE_KEY) &&
+ __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
+ QCOM_SCM_ES_IMPORT_ICE_KEY);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_has_wrapped_key_support);
+
+/**
+ * qcom_scm_derive_sw_secret() - Derive software secret from wrapped key
+ * @eph_key: an ephemerally-wrapped key
+ * @eph_key_size: size of @eph_key in bytes
+ * @sw_secret: output buffer for the software secret
+ * @sw_secret_size: size of the software secret to derive in bytes
+ *
+ * Derive a software secret from an ephemerally-wrapped key for software crypto
+ * operations. This is done by calling into the secure execution environment,
+ * which then calls into the hardware to unwrap and derive the secret.
+ *
+ * For more information on sw_secret, see the "Hardware-wrapped keys" section of
+ * Documentation/block/inline-encryption.rst.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size,
+ u8 *sw_secret, size_t sw_secret_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_DERIVE_SW_SECRET,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ eph_key_size,
+ GFP_KERNEL);
+ if (!eph_key_buf)
+ return -ENOMEM;
+
+ void *sw_secret_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ sw_secret_size,
+ GFP_KERNEL);
+ if (!sw_secret_buf)
+ return -ENOMEM;
+
+ memcpy(eph_key_buf, eph_key, eph_key_size);
+ desc.args[0] = qcom_tzmem_to_phys(eph_key_buf);
+ desc.args[1] = eph_key_size;
+ desc.args[2] = qcom_tzmem_to_phys(sw_secret_buf);
+ desc.args[3] = sw_secret_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(sw_secret, sw_secret_buf, sw_secret_size);
+
+ memzero_explicit(eph_key_buf, eph_key_size);
+ memzero_explicit(sw_secret_buf, sw_secret_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_derive_sw_secret);
+
+/**
+ * qcom_scm_generate_ice_key() - Generate a wrapped key for storage encryption
+ * @lt_key: output buffer for the long-term wrapped key
+ * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size
+ * used by the SoC.
+ *
+ * Generate a key using the built-in HW module in the SoC. The resulting key is
+ * returned wrapped with the platform-specific Key Encryption Key.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_GENERATE_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ lt_key_size,
+ GFP_KERNEL);
+ if (!lt_key_buf)
+ return -ENOMEM;
+
+ desc.args[0] = qcom_tzmem_to_phys(lt_key_buf);
+ desc.args[1] = lt_key_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(lt_key, lt_key_buf, lt_key_size);
+
+ memzero_explicit(lt_key_buf, lt_key_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_generate_ice_key);
+
+/**
+ * qcom_scm_prepare_ice_key() - Re-wrap a key with the per-boot ephemeral key
+ * @lt_key: a long-term wrapped key
+ * @lt_key_size: size of @lt_key in bytes
+ * @eph_key: output buffer for the ephemerally-wrapped key
+ * @eph_key_size: size of @eph_key in bytes. Must be the exact wrapped key size
+ * used by the SoC.
+ *
+ * Given a long-term wrapped key, re-wrap it with the per-boot ephemeral key for
+ * added protection. The resulting key will only be valid for the current boot.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size,
+ u8 *eph_key, size_t eph_key_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_PREPARE_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ lt_key_size,
+ GFP_KERNEL);
+ if (!lt_key_buf)
+ return -ENOMEM;
+
+ void *eph_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ eph_key_size,
+ GFP_KERNEL);
+ if (!eph_key_buf)
+ return -ENOMEM;
+
+ memcpy(lt_key_buf, lt_key, lt_key_size);
+ desc.args[0] = qcom_tzmem_to_phys(lt_key_buf);
+ desc.args[1] = lt_key_size;
+ desc.args[2] = qcom_tzmem_to_phys(eph_key_buf);
+ desc.args[3] = eph_key_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(eph_key, eph_key_buf, eph_key_size);
+
+ memzero_explicit(lt_key_buf, lt_key_size);
+ memzero_explicit(eph_key_buf, eph_key_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_prepare_ice_key);
+
+/**
+ * qcom_scm_import_ice_key() - Import key for storage encryption
+ * @raw_key: the raw key to import
+ * @raw_key_size: size of @raw_key in bytes
+ * @lt_key: output buffer for the long-term wrapped key
+ * @lt_key_size: size of @lt_key in bytes. Must be the exact wrapped key size
+ * used by the SoC.
+ *
+ * Import a raw key and return a long-term wrapped key. Uses the SoC's HWKM to
+ * wrap the raw key using the platform-specific Key Encryption Key.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size,
+ u8 *lt_key, size_t lt_key_size)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_ES,
+ .cmd = QCOM_SCM_ES_IMPORT_ICE_KEY,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RO, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+ int ret;
+
+ void *raw_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ raw_key_size,
+ GFP_KERNEL);
+ if (!raw_key_buf)
+ return -ENOMEM;
+
+ void *lt_key_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ lt_key_size,
+ GFP_KERNEL);
+ if (!lt_key_buf)
+ return -ENOMEM;
+
+ memcpy(raw_key_buf, raw_key, raw_key_size);
+ desc.args[0] = qcom_tzmem_to_phys(raw_key_buf);
+ desc.args[1] = raw_key_size;
+ desc.args[2] = qcom_tzmem_to_phys(lt_key_buf);
+ desc.args[3] = lt_key_size;
+
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
+ if (!ret)
+ memcpy(lt_key, lt_key_buf, lt_key_size);
+
+ memzero_explicit(raw_key_buf, raw_key_size);
+ memzero_explicit(lt_key_buf, lt_key_size);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_scm_import_ice_key);
+
/**
* qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
*
@@ -1361,6 +1605,8 @@ EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
int qcom_scm_shm_bridge_enable(void)
{
+ int ret;
+
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_MP,
.cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE,
@@ -1373,7 +1619,15 @@ int qcom_scm_shm_bridge_enable(void)
QCOM_SCM_MP_SHM_BRIDGE_ENABLE))
return -EOPNOTSUPP;
- return qcom_scm_call(__scm->dev, &desc, &res) ?: res.result[0];
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ if (ret)
+ return ret;
+
+ if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP)
+ return -EOPNOTSUPP;
+
+ return res.result[0];
}
EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable);
@@ -1731,14 +1985,23 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
+ any potential issues with this, only allow validated machines for now.
*/
static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
+ { .compatible = "asus,vivobook-s15" },
+ { .compatible = "dell,xps13-9345" },
+ { .compatible = "hp,omnibook-x14" },
+ { .compatible = "huawei,gaokun3" },
{ .compatible = "lenovo,flex-5g" },
{ .compatible = "lenovo,thinkpad-t14s" },
{ .compatible = "lenovo,thinkpad-x13s", },
+ { .compatible = "lenovo,yoga-slim7x" },
+ { .compatible = "microsoft,arcata", },
+ { .compatible = "microsoft,blackrock" },
{ .compatible = "microsoft,romulus13", },
{ .compatible = "microsoft,romulus15", },
{ .compatible = "qcom,sc8180x-primus" },
+ { .compatible = "qcom,x1e001de-devkit" },
{ .compatible = "qcom,x1e80100-crd" },
{ .compatible = "qcom,x1e80100-qcp" },
+ { .compatible = "qcom,x1p42100-crd" },
{ }
};
@@ -1826,7 +2089,8 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm)
*/
bool qcom_scm_is_available(void)
{
- return !!READ_ONCE(__scm);
+ /* Paired with smp_store_release() in qcom_scm_probe */
+ return !!smp_load_acquire(&__scm);
}
EXPORT_SYMBOL_GPL(qcom_scm_is_available);
@@ -1983,18 +2247,22 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Let all above stores be available after this */
+ /* Paired with smp_load_acquire() in qcom_scm_is_available(). */
smp_store_release(&__scm, scm);
irq = platform_get_irq_optional(pdev, 0);
if (irq < 0) {
- if (irq != -ENXIO)
- return irq;
+ if (irq != -ENXIO) {
+ ret = irq;
+ goto err;
+ }
} else {
ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
IRQF_ONESHOT, "qcom-scm", __scm);
- if (ret < 0)
- return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
+ if (ret < 0) {
+ dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
+ goto err;
+ }
}
__get_convention();
@@ -2013,14 +2281,18 @@ static int qcom_scm_probe(struct platform_device *pdev)
qcom_scm_disable_sdi();
ret = of_reserved_mem_device_init(__scm->dev);
- if (ret && ret != -ENODEV)
- return dev_err_probe(__scm->dev, ret,
- "Failed to setup the reserved memory region for TZ mem\n");
+ if (ret && ret != -ENODEV) {
+ dev_err_probe(__scm->dev, ret,
+ "Failed to setup the reserved memory region for TZ mem\n");
+ goto err;
+ }
ret = qcom_tzmem_enable(__scm->dev);
- if (ret)
- return dev_err_probe(__scm->dev, ret,
- "Failed to enable the TrustZone memory allocator\n");
+ if (ret) {
+ dev_err_probe(__scm->dev, ret,
+ "Failed to enable the TrustZone memory allocator\n");
+ goto err;
+ }
memset(&pool_config, 0, sizeof(pool_config));
pool_config.initial_size = 0;
@@ -2028,9 +2300,11 @@ static int qcom_scm_probe(struct platform_device *pdev)
pool_config.max_size = SZ_256K;
__scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
- if (IS_ERR(__scm->mempool))
- return dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
- "Failed to create the SCM memory pool\n");
+ if (IS_ERR(__scm->mempool)) {
+ dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
+ "Failed to create the SCM memory pool\n");
+ goto err;
+ }
/*
* Initialize the QSEECOM interface.
@@ -2046,6 +2320,12 @@ static int qcom_scm_probe(struct platform_device *pdev)
WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
return 0;
+
+err:
+ /* Paired with smp_load_acquire() in qcom_scm_is_available(). */
+ smp_store_release(&__scm, NULL);
+
+ return ret;
}
static void qcom_scm_shutdown(struct platform_device *pdev)
diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h
index 685b8f59e7a6..097369d38b84 100644
--- a/drivers/firmware/qcom/qcom_scm.h
+++ b/drivers/firmware/qcom/qcom_scm.h
@@ -116,6 +116,7 @@ struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void);
#define QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE 0x05
#define QCOM_SCM_MP_VIDEO_VAR 0x08
#define QCOM_SCM_MP_ASSIGN 0x16
+#define QCOM_SCM_MP_CP_SMMU_APERTURE_ID 0x1b
#define QCOM_SCM_MP_SHM_BRIDGE_ENABLE 0x1c
#define QCOM_SCM_MP_SHM_BRIDGE_DELETE 0x1d
#define QCOM_SCM_MP_SHM_BRIDGE_CREATE 0x1e
@@ -127,6 +128,10 @@ struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void);
#define QCOM_SCM_SVC_ES 0x10 /* Enterprise Security */
#define QCOM_SCM_ES_INVALIDATE_ICE_KEY 0x03
#define QCOM_SCM_ES_CONFIG_SET_ICE_KEY 0x04
+#define QCOM_SCM_ES_DERIVE_SW_SECRET 0x07
+#define QCOM_SCM_ES_GENERATE_ICE_KEY 0x08
+#define QCOM_SCM_ES_PREPARE_ICE_KEY 0x09
+#define QCOM_SCM_ES_IMPORT_ICE_KEY 0x0a
#define QCOM_SCM_SVC_HDCP 0x11
#define QCOM_SCM_HDCP_INVOKE 0x01
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 85c525745b31..d58da3e4500a 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -757,7 +757,7 @@ MODULE_DEVICE_TABLE(acpi, fw_cfg_sysfs_acpi_match);
static struct platform_driver fw_cfg_sysfs_driver = {
.probe = fw_cfg_sysfs_probe,
- .remove_new = fw_cfg_sysfs_remove,
+ .remove = fw_cfg_sysfs_remove,
.driver = {
.name = "fw_cfg",
.of_match_table = fw_cfg_sysfs_mmio_match,
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index 18cc34987108..7ecde6921a0a 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -406,7 +406,7 @@ static struct platform_driver rpi_firmware_driver = {
},
.probe = rpi_firmware_probe,
.shutdown = rpi_firmware_shutdown,
- .remove_new = rpi_firmware_remove,
+ .remove = rpi_firmware_remove,
};
module_platform_driver(rpi_firmware_driver);
diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c
index d670635914ec..a74600d9f2d7 100644
--- a/drivers/firmware/smccc/smccc.c
+++ b/drivers/firmware/smccc/smccc.c
@@ -16,7 +16,6 @@ static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
static enum arm_smccc_conduit smccc_conduit = SMCCC_CONDUIT_NONE;
bool __ro_after_init smccc_trng_available = false;
-u64 __ro_after_init smccc_has_sve_hint = false;
s32 __ro_after_init smccc_soc_id_version = SMCCC_RET_NOT_SUPPORTED;
s32 __ro_after_init smccc_soc_id_revision = SMCCC_RET_NOT_SUPPORTED;
@@ -28,9 +27,6 @@ void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit)
smccc_conduit = conduit;
smccc_trng_available = smccc_probe_trng();
- if (IS_ENABLED(CONFIG_ARM64_SVE) &&
- smccc_version >= ARM_SMCCC_VERSION_1_3)
- smccc_has_sve_hint = true;
if ((smccc_version >= ARM_SMCCC_VERSION_1_2) &&
(smccc_conduit != SMCCC_CONDUIT_NONE)) {
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index e20cee9c2d32..1ea39a0a76c7 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -802,7 +802,7 @@ static void stratix10_rsu_remove(struct platform_device *pdev)
static struct platform_driver stratix10_rsu_driver = {
.probe = stratix10_rsu_probe,
- .remove_new = stratix10_rsu_remove,
+ .remove = stratix10_rsu_remove,
.driver = {
.name = "stratix10-rsu",
.dev_groups = rsu_groups,
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 528f37417aea..3c52cb73237a 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -967,18 +967,15 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
/* first client will create kernel thread */
if (!chan->ctrl->task) {
chan->ctrl->task =
- kthread_create_on_node(svc_normal_to_secure_thread,
- (void *)chan->ctrl,
- cpu_to_node(cpu),
- "svc_smc_hvc_thread");
+ kthread_run_on_cpu(svc_normal_to_secure_thread,
+ (void *)chan->ctrl,
+ cpu, "svc_smc_hvc_thread");
if (IS_ERR(chan->ctrl->task)) {
dev_err(chan->ctrl->dev,
"failed to create svc_smc_hvc_thread\n");
kfree(p_data);
return -EINVAL;
}
- kthread_bind(chan->ctrl->task, cpu);
- wake_up_process(chan->ctrl->task);
}
pr_debug("%s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__,
@@ -1271,7 +1268,7 @@ static void stratix10_svc_drv_remove(struct platform_device *pdev)
static struct platform_driver stratix10_svc_driver = {
.probe = stratix10_svc_drv_probe,
- .remove_new = stratix10_svc_drv_remove,
+ .remove = stratix10_svc_drv_remove,
.driver = {
.name = "stratix10-svc",
.of_match_table = stratix10_svc_drv_match,
diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
index a3df782fa687..7c5c03f274b9 100644
--- a/drivers/firmware/sysfb.c
+++ b/drivers/firmware/sysfb.c
@@ -79,6 +79,25 @@ void sysfb_disable(struct device *dev)
}
EXPORT_SYMBOL_GPL(sysfb_disable);
+/**
+ * sysfb_handles_screen_info() - reports if sysfb handles the global screen_info
+ *
+ * Callers can use sysfb_handles_screen_info() to determine whether the Generic
+ * System Framebuffers (sysfb) can handle the global screen_info data structure
+ * or not. Drivers might need this information to know if they have to setup the
+ * system framebuffer, or if they have to delegate this action to sysfb instead.
+ *
+ * Returns:
+ * True if sysfb handles the global screen_info data structure.
+ */
+bool sysfb_handles_screen_info(void)
+{
+ const struct screen_info *si = &screen_info;
+
+ return !!screen_info_video_type(si);
+}
+EXPORT_SYMBOL_GPL(sysfb_handles_screen_info);
+
#if defined(CONFIG_PCI)
static bool sysfb_pci_dev_is_enabled(struct pci_dev *pdev)
{
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 2bee6e918f81..c3a1dc344961 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -3,7 +3,6 @@
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*/
-#include <linux/cleanup.h>
#include <linux/clk/tegra.h>
#include <linux/genalloc.h>
#include <linux/mailbox_client.h>
@@ -35,24 +34,29 @@ channel_to_ops(struct tegra_bpmp_channel *channel)
struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
{
- struct device_node *np __free(device_node);
struct platform_device *pdev;
struct tegra_bpmp *bpmp;
+ struct device_node *np;
np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
if (!np)
return ERR_PTR(-ENOENT);
pdev = of_find_device_by_node(np);
- if (!pdev)
- return ERR_PTR(-ENODEV);
+ if (!pdev) {
+ bpmp = ERR_PTR(-ENODEV);
+ goto put;
+ }
bpmp = platform_get_drvdata(pdev);
if (!bpmp) {
+ bpmp = ERR_PTR(-EPROBE_DEFER);
put_device(&pdev->dev);
- return ERR_PTR(-EPROBE_DEFER);
+ goto put;
}
+put:
+ of_node_put(np);
return bpmp;
}
EXPORT_SYMBOL_GPL(tegra_bpmp_get);
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 160968301b1f..806a975fff22 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -2,13 +2,14 @@
/*
* Texas Instruments System Control Interface Protocol Driver
*
- * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/bitmap.h>
+#include <linux/cpu.h>
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/io.h>
@@ -19,11 +20,14 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
#include <linux/property.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/soc/ti/ti-msgmgr.h>
#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/suspend.h>
+#include <linux/sys_soc.h>
#include <linux/reboot.h>
#include "ti_sci.h"
@@ -98,6 +102,7 @@ struct ti_sci_desc {
* @minfo: Message info
* @node: list head
* @host_id: Host ID
+ * @fw_caps: FW/SoC low power capabilities
* @users: Number of users of this instance
*/
struct ti_sci_info {
@@ -114,6 +119,7 @@ struct ti_sci_info {
struct ti_sci_xfers_info minfo;
struct list_head node;
u8 host_id;
+ u64 fw_caps;
/* protected by ti_sci_list_mutex */
int users;
};
@@ -1651,6 +1657,364 @@ fail:
return ret;
}
+/**
+ * ti_sci_cmd_prepare_sleep() - Prepare system for system suspend
+ * @handle: pointer to TI SCI handle
+ * @mode: Device identifier
+ * @ctx_lo: Low part of address for context save
+ * @ctx_hi: High part of address for context save
+ * @debug_flags: Debug flags to pass to firmware
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
+ u32 ctx_lo, u32 ctx_hi, u32 debug_flags)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_prepare_sleep *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PREPARE_SLEEP,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ req = (struct ti_sci_msg_req_prepare_sleep *)xfer->xfer_buf;
+ req->mode = mode;
+ req->ctx_lo = ctx_lo;
+ req->ctx_hi = ctx_hi;
+ req->debug_flags = debug_flags;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to prepare sleep\n");
+ ret = -ENODEV;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_msg_cmd_query_fw_caps() - Get the FW/SoC capabilities
+ * @handle: Pointer to TI SCI handle
+ * @fw_caps: Each bit in fw_caps indicating one FW/SOC capability
+ *
+ * Check if the firmware supports any optional low power modes.
+ * Old revisions of TIFS (< 08.04) will NACK the request which results in
+ * -ENODEV being returned.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_msg_cmd_query_fw_caps(const struct ti_sci_handle *handle,
+ u64 *fw_caps)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_msg_resp_query_fw_caps *resp;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_FW_CAPS,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(struct ti_sci_msg_hdr),
+ sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_query_fw_caps *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to get capabilities\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (fw_caps)
+ *fw_caps = resp->fw_caps;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_set_io_isolation() - Enable IO isolation in LPM
+ * @handle: Pointer to TI SCI handle
+ * @state: The desired state of the IO isolation
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_set_io_isolation(const struct ti_sci_handle *handle,
+ u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_set_io_isolation *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_IO_ISOLATION,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_set_io_isolation *)xfer->xfer_buf;
+ req->state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to set IO isolation\n");
+ ret = -ENODEV;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_msg_cmd_lpm_wake_reason() - Get the wakeup source from LPM
+ * @handle: Pointer to TI SCI handle
+ * @source: The wakeup source that woke the SoC from LPM
+ * @timestamp: Timestamp of the wakeup event
+ * @pin: The pin that has triggered wake up
+ * @mode: The last entered low power mode
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_msg_cmd_lpm_wake_reason(const struct ti_sci_handle *handle,
+ u32 *source, u64 *timestamp, u8 *pin, u8 *mode)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_msg_resp_lpm_wake_reason *resp;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_WAKE_REASON,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(struct ti_sci_msg_hdr),
+ sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_lpm_wake_reason *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to get wake reason\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ if (source)
+ *source = resp->wake_source;
+ if (timestamp)
+ *timestamp = resp->wake_timestamp;
+ if (pin)
+ *pin = resp->wake_pin;
+ if (mode)
+ *mode = resp->mode;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_set_device_constraint() - Set LPM constraint on behalf of a device
+ * @handle: pointer to TI SCI handle
+ * @id: Device identifier
+ * @state: The desired state of device constraint: set or clear
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_set_device_constraint(const struct ti_sci_handle *handle,
+ u32 id, u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_lpm_set_device_constraint *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_SET_DEVICE_CONSTRAINT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_lpm_set_device_constraint *)xfer->xfer_buf;
+ req->id = id;
+ req->state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to set device constraint\n");
+ ret = -ENODEV;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_set_latency_constraint() - Set LPM resume latency constraint
+ * @handle: pointer to TI SCI handle
+ * @latency: maximum acceptable latency (in ms) to wake up from LPM
+ * @state: The desired state of latency constraint: set or clear
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_set_latency_constraint(const struct ti_sci_handle *handle,
+ u16 latency, u8 state)
+{
+ struct ti_sci_info *info;
+ struct ti_sci_msg_req_lpm_set_latency_constraint *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_SET_LATENCY_CONSTRAINT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_lpm_set_latency_constraint *)xfer->xfer_buf;
+ req->latency = latency;
+ req->state = state;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to set device constraint\n");
+ ret = -ENODEV;
+ }
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
{
struct ti_sci_info *info;
@@ -2793,6 +3157,7 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
struct ti_sci_core_ops *core_ops = &ops->core_ops;
struct ti_sci_dev_ops *dops = &ops->dev_ops;
struct ti_sci_clk_ops *cops = &ops->clk_ops;
+ struct ti_sci_pm_ops *pmops = &ops->pm_ops;
struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
@@ -2832,6 +3197,13 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
cops->set_freq = ti_sci_cmd_clk_set_freq;
cops->get_freq = ti_sci_cmd_clk_get_freq;
+ if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
+ pr_debug("detected DM managed LPM in fw_caps\n");
+ pmops->lpm_wake_reason = ti_sci_msg_cmd_lpm_wake_reason;
+ pmops->set_device_constraint = ti_sci_cmd_set_device_constraint;
+ pmops->set_latency_constraint = ti_sci_cmd_set_latency_constraint;
+ }
+
rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
rm_core_ops->get_range_from_shost =
ti_sci_cmd_get_resource_range_from_shost;
@@ -3262,6 +3634,111 @@ static int tisci_reboot_handler(struct sys_off_data *data)
return NOTIFY_BAD;
}
+static int ti_sci_prepare_system_suspend(struct ti_sci_info *info)
+{
+ /*
+ * Map and validate the target Linux suspend state to TISCI LPM.
+ * Default is to let Device Manager select the low power mode.
+ */
+ switch (pm_suspend_target_state) {
+ case PM_SUSPEND_MEM:
+ if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
+ /*
+ * For the DM_MANAGED mode the context is reserved for
+ * internal use and can be 0
+ */
+ return ti_sci_cmd_prepare_sleep(&info->handle,
+ TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED,
+ 0, 0, 0);
+ } else {
+ /* DM Managed is not supported by the firmware. */
+ dev_err(info->dev, "Suspend to memory is not supported by the firmware\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ /*
+ * Do not fail if we don't have action to take for a
+ * specific suspend mode.
+ */
+ return 0;
+ }
+}
+
+static int __maybe_unused ti_sci_suspend(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+ struct device *cpu_dev, *cpu_dev_max = NULL;
+ s32 val, cpu_lat = 0;
+ int i, ret;
+
+ if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) {
+ for_each_possible_cpu(i) {
+ cpu_dev = get_cpu_device(i);
+ val = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_RESUME_LATENCY);
+ if (val != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) {
+ cpu_lat = max(cpu_lat, val);
+ cpu_dev_max = cpu_dev;
+ }
+ }
+ if (cpu_dev_max) {
+ dev_dbg(cpu_dev_max, "%s: sending max CPU latency=%u\n", __func__, cpu_lat);
+ ret = ti_sci_cmd_set_latency_constraint(&info->handle,
+ cpu_lat, TISCI_MSG_CONSTRAINT_SET);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = ti_sci_prepare_system_suspend(info);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __maybe_unused ti_sci_suspend_noirq(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __maybe_unused ti_sci_resume_noirq(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+ int ret = 0;
+ u32 source;
+ u64 time;
+ u8 pin;
+ u8 mode;
+
+ ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE);
+ if (ret)
+ return ret;
+
+ ret = ti_sci_msg_cmd_lpm_wake_reason(&info->handle, &source, &time, &pin, &mode);
+ /* Do not fail to resume on error as the wake reason is not critical */
+ if (!ret)
+ dev_info(dev, "ti_sci: wakeup source:0x%x, pin:0x%x, mode:0x%x\n",
+ source, pin, mode);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ti_sci_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = ti_sci_suspend,
+ .suspend_noirq = ti_sci_suspend_noirq,
+ .resume_noirq = ti_sci_resume_noirq,
+#endif
+};
+
/* Description for K2G */
static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
.default_host_id = 2,
@@ -3390,6 +3867,13 @@ static int ti_sci_probe(struct platform_device *pdev)
goto out;
}
+ ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps);
+ dev_dbg(dev, "Detected firmware capabilities: %s%s%s\n",
+ info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "",
+ info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "",
+ info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : ""
+ );
+
ti_sci_setup_ops(info);
ret = devm_register_restart_handler(dev, tisci_reboot_handler, info);
@@ -3421,8 +3905,9 @@ static struct platform_driver ti_sci_driver = {
.probe = ti_sci_probe,
.driver = {
.name = "ti-sci",
- .of_match_table = of_match_ptr(ti_sci_of_match),
+ .of_match_table = ti_sci_of_match,
.suppress_bind_attrs = true,
+ .pm = &ti_sci_pm_ops,
},
};
module_platform_driver(ti_sci_driver);
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index 5846c60220f5..053387d7baa0 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -6,7 +6,7 @@
* The system works in a message response protocol
* See: https://software-dl.ti.com/tisci/esd/latest/index.html for details
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __TI_SCI_H
@@ -19,6 +19,7 @@
#define TI_SCI_MSG_WAKE_REASON 0x0003
#define TI_SCI_MSG_GOODBYE 0x0004
#define TI_SCI_MSG_SYS_RESET 0x0005
+#define TI_SCI_MSG_QUERY_FW_CAPS 0x0022
/* Device requests */
#define TI_SCI_MSG_SET_DEVICE_STATE 0x0200
@@ -35,6 +36,13 @@
#define TI_SCI_MSG_QUERY_CLOCK_FREQ 0x010d
#define TI_SCI_MSG_GET_CLOCK_FREQ 0x010e
+/* Low Power Mode Requests */
+#define TI_SCI_MSG_PREPARE_SLEEP 0x0300
+#define TI_SCI_MSG_LPM_WAKE_REASON 0x0306
+#define TI_SCI_MSG_SET_IO_ISOLATION 0x0307
+#define TI_SCI_MSG_LPM_SET_DEVICE_CONSTRAINT 0x0309
+#define TI_SCI_MSG_LPM_SET_LATENCY_CONSTRAINT 0x030A
+
/* Resource Management Requests */
#define TI_SCI_MSG_GET_RESOURCE_RANGE 0x1500
@@ -133,6 +141,27 @@ struct ti_sci_msg_req_reboot {
} __packed;
/**
+ * struct ti_sci_msg_resp_query_fw_caps - Response for query firmware caps
+ * @hdr: Generic header
+ * @fw_caps: Each bit in fw_caps indicating one FW/SOC capability
+ * MSG_FLAG_CAPS_GENERIC: Generic capability (LPM not supported)
+ * MSG_FLAG_CAPS_LPM_PARTIAL_IO: Partial IO in LPM
+ * MSG_FLAG_CAPS_LPM_DM_MANAGED: LPM can be managed by DM
+ *
+ * Response to a generic message with message type TI_SCI_MSG_QUERY_FW_CAPS
+ * providing currently available SOC/firmware capabilities. SoC that don't
+ * support low power modes return only MSG_FLAG_CAPS_GENERIC capability.
+ */
+struct ti_sci_msg_resp_query_fw_caps {
+ struct ti_sci_msg_hdr hdr;
+#define MSG_FLAG_CAPS_GENERIC TI_SCI_MSG_FLAG(0)
+#define MSG_FLAG_CAPS_LPM_PARTIAL_IO TI_SCI_MSG_FLAG(4)
+#define MSG_FLAG_CAPS_LPM_DM_MANAGED TI_SCI_MSG_FLAG(5)
+#define MSG_MASK_CAPS_LPM GENMASK_ULL(4, 1)
+ u64 fw_caps;
+} __packed;
+
+/**
* struct ti_sci_msg_req_set_device_state - Set the desired state of the device
* @hdr: Generic header
* @id: Indicates which device to modify
@@ -545,6 +574,118 @@ struct ti_sci_msg_resp_get_clock_freq {
u64 freq_hz;
} __packed;
+/**
+ * struct tisci_msg_req_prepare_sleep - Request for TISCI_MSG_PREPARE_SLEEP.
+ *
+ * @hdr TISCI header to provide ACK/NAK flags to the host.
+ * @mode Low power mode to enter.
+ * @ctx_lo Low 32-bits of physical pointer to address to use for context save.
+ * @ctx_hi High 32-bits of physical pointer to address to use for context save.
+ * @debug_flags Flags that can be set to halt the sequence during suspend or
+ * resume to allow JTAG connection and debug.
+ *
+ * This message is used as the first step of entering a low power mode. It
+ * allows configurable information, including which state to enter to be
+ * easily shared from the application, as this is a non-secure message and
+ * therefore can be sent by anyone.
+ */
+struct ti_sci_msg_req_prepare_sleep {
+ struct ti_sci_msg_hdr hdr;
+
+#define TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED 0xfd
+ u8 mode;
+ u32 ctx_lo;
+ u32 ctx_hi;
+ u32 debug_flags;
+} __packed;
+
+/**
+ * struct tisci_msg_set_io_isolation_req - Request for TI_SCI_MSG_SET_IO_ISOLATION.
+ *
+ * @hdr: Generic header
+ * @state: The deseared state of the IO isolation.
+ *
+ * This message is used to enable/disable IO isolation for low power modes.
+ * Response is generic ACK / NACK message.
+ */
+struct ti_sci_msg_req_set_io_isolation {
+ struct ti_sci_msg_hdr hdr;
+ u8 state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_lpm_wake_reason - Response for TI_SCI_MSG_LPM_WAKE_REASON.
+ *
+ * @hdr: Generic header.
+ * @wake_source: The wake up source that woke soc from LPM.
+ * @wake_timestamp: Timestamp at which soc woke.
+ * @wake_pin: The pin that has triggered wake up.
+ * @mode: The last entered low power mode.
+ * @rsvd: Reserved for future use.
+ *
+ * Response to a generic message with message type TI_SCI_MSG_LPM_WAKE_REASON,
+ * used to query the wake up source, pin and entered low power mode.
+ */
+struct ti_sci_msg_resp_lpm_wake_reason {
+ struct ti_sci_msg_hdr hdr;
+ u32 wake_source;
+ u64 wake_timestamp;
+ u8 wake_pin;
+ u8 mode;
+ u32 rsvd[2];
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_lpm_set_device_constraint - Request for
+ * TISCI_MSG_LPM_SET_DEVICE_CONSTRAINT.
+ *
+ * @hdr: TISCI header to provide ACK/NAK flags to the host.
+ * @id: Device ID of device whose constraint has to be modified.
+ * @state: The desired state of device constraint: set or clear.
+ * @rsvd: Reserved for future use.
+ *
+ * This message is used by host to set constraint on the device. This can be
+ * sent anytime after boot before prepare sleep message. Any device can set a
+ * constraint on the low power mode that the SoC can enter. It allows
+ * configurable information to be easily shared from the application, as this
+ * is a non-secure message and therefore can be sent by anyone. By setting a
+ * constraint, the device ensures that it will not be powered off or reset in
+ * the selected mode. Note: Access Restriction: Exclusivity flag of Device will
+ * be honored. If some other host already has constraint on this device ID,
+ * NACK will be returned.
+ */
+struct ti_sci_msg_req_lpm_set_device_constraint {
+ struct ti_sci_msg_hdr hdr;
+ u32 id;
+ u8 state;
+ u32 rsvd[2];
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_lpm_set_latency_constraint - Request for
+ * TISCI_MSG_LPM_SET_LATENCY_CONSTRAINT.
+ *
+ * @hdr: TISCI header to provide ACK/NAK flags to the host.
+ * @wkup_latency: The maximum acceptable latency to wake up from low power mode
+ * in milliseconds. The deeper the state, the higher the latency.
+ * @state: The desired state of wakeup latency constraint: set or clear.
+ * @rsvd: Reserved for future use.
+ *
+ * This message is used by host to set wakeup latency from low power mode. This can
+ * be sent anytime after boot before prepare sleep message, and can be sent after
+ * current low power mode is exited. Any device can set a constraint on the low power
+ * mode that the SoC can enter. It allows configurable information to be easily shared
+ * from the application, as this is a non-secure message and therefore can be sent by
+ * anyone. By setting a wakeup latency constraint, the host ensures that the resume time
+ * from selected low power mode will be less than the constraint value.
+ */
+struct ti_sci_msg_req_lpm_set_latency_constraint {
+ struct ti_sci_msg_hdr hdr;
+ u16 latency;
+ u8 state;
+ u32 rsvd;
+} __packed;
+
#define TI_SCI_IRQ_SECONDARY_HOST_INVALID 0xff
/**
diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c
index f3bc0d427825..47fe6261f5a3 100644
--- a/drivers/firmware/turris-mox-rwtm.c
+++ b/drivers/firmware/turris-mox-rwtm.c
@@ -61,6 +61,27 @@ enum mbox_cmd {
MBOX_CMD_OTP_WRITE = 8,
};
+/**
+ * struct mox_rwtm - driver private data structure
+ * @mbox_client: rWTM mailbox client
+ * @mbox: rWTM mailbox channel
+ * @hwrng: RNG driver structure
+ * @reply: last mailbox reply, filled in receive callback
+ * @buf: DMA buffer
+ * @buf_phys: physical address of the DMA buffer
+ * @busy: mutex to protect mailbox command execution
+ * @cmd_done: command done completion
+ * @has_board_info: whether board information is present
+ * @serial_number: serial number of the device
+ * @board_version: board version / revision of the device
+ * @ram_size: RAM size of the device
+ * @mac_address1: first MAC address of the device
+ * @mac_address2: second MAC address of the device
+ * @has_pubkey: whether board ECDSA public key is present
+ * @pubkey: board ECDSA public key
+ * @last_sig: last ECDSA signature generated with board ECDSA private key
+ * @last_sig_done: whether the last ECDSA signing is complete
+ */
struct mox_rwtm {
struct mbox_client mbox_client;
struct mbox_chan *mbox;
@@ -74,13 +95,11 @@ struct mox_rwtm {
struct mutex busy;
struct completion cmd_done;
- /* board information */
bool has_board_info;
u64 serial_number;
int board_version, ram_size;
u8 mac_address1[ETH_ALEN], mac_address2[ETH_ALEN];
- /* public key burned in eFuse */
bool has_pubkey;
u8 pubkey[135];
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
index 8528850af889..22853ae0efdf 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.c
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -31,13 +31,51 @@ static char debugfs_buf[PAGE_SIZE];
#define PM_API(id) {id, #id, strlen(#id)}
static struct pm_api_info pm_api_list[] = {
+ PM_API(PM_FORCE_POWERDOWN),
+ PM_API(PM_REQUEST_WAKEUP),
+ PM_API(PM_SYSTEM_SHUTDOWN),
+ PM_API(PM_REQUEST_NODE),
+ PM_API(PM_RELEASE_NODE),
+ PM_API(PM_SET_REQUIREMENT),
PM_API(PM_GET_API_VERSION),
+ PM_API(PM_REGISTER_NOTIFIER),
+ PM_API(PM_RESET_ASSERT),
+ PM_API(PM_RESET_GET_STATUS),
+ PM_API(PM_GET_CHIPID),
+ PM_API(PM_PINCTRL_SET_FUNCTION),
+ PM_API(PM_PINCTRL_CONFIG_PARAM_GET),
+ PM_API(PM_PINCTRL_CONFIG_PARAM_SET),
+ PM_API(PM_IOCTL),
+ PM_API(PM_CLOCK_ENABLE),
+ PM_API(PM_CLOCK_DISABLE),
+ PM_API(PM_CLOCK_GETSTATE),
+ PM_API(PM_CLOCK_SETDIVIDER),
+ PM_API(PM_CLOCK_GETDIVIDER),
+ PM_API(PM_CLOCK_SETPARENT),
+ PM_API(PM_CLOCK_GETPARENT),
PM_API(PM_QUERY_DATA),
};
static struct dentry *firmware_debugfs_root;
/**
+ * zynqmp_pm_ioctl - PM IOCTL for device control and configs
+ * @node: Node ID of the device
+ * @ioctl: ID of the requested IOCTL
+ * @arg1: Argument 1 of requested IOCTL call
+ * @arg2: Argument 2 of requested IOCTL call
+ * @arg3: Argument 3 of requested IOCTL call
+ * @out: Returned output value
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_ioctl(const u32 node, const u32 ioctl, const u32 arg1,
+ const u32 arg2, const u32 arg3, u32 *out)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, out, 5, node, ioctl, arg1, arg2, arg3);
+}
+
+/**
* zynqmp_pm_argument_value() - Extract argument value from a PM-API request
* @arg: Entered PM-API argument in string format
*
@@ -95,6 +133,128 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
sprintf(debugfs_buf, "PM-API Version = %d.%d\n",
pm_api_version >> 16, pm_api_version & 0xffff);
break;
+ case PM_FORCE_POWERDOWN:
+ ret = zynqmp_pm_force_pwrdwn(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_REQUEST_ACK_NO);
+ break;
+ case PM_REQUEST_WAKEUP:
+ ret = zynqmp_pm_request_wake(pm_api_arg[0],
+ pm_api_arg[1], pm_api_arg[2],
+ pm_api_arg[3] ? pm_api_arg[3] :
+ ZYNQMP_PM_REQUEST_ACK_NO);
+ break;
+ case PM_SYSTEM_SHUTDOWN:
+ ret = zynqmp_pm_system_shutdown(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_REQUEST_NODE:
+ ret = zynqmp_pm_request_node(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_CAPABILITY_ACCESS,
+ pm_api_arg[2] ? pm_api_arg[2] : 0,
+ pm_api_arg[3] ? pm_api_arg[3] :
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ break;
+ case PM_RELEASE_NODE:
+ ret = zynqmp_pm_release_node(pm_api_arg[0]);
+ break;
+ case PM_SET_REQUIREMENT:
+ ret = zynqmp_pm_set_requirement(pm_api_arg[0],
+ pm_api_arg[1] ? pm_api_arg[1] :
+ ZYNQMP_PM_CAPABILITY_CONTEXT,
+ pm_api_arg[2] ?
+ pm_api_arg[2] : 0,
+ pm_api_arg[3] ? pm_api_arg[3] :
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ break;
+ case PM_REGISTER_NOTIFIER:
+ ret = zynqmp_pm_register_notifier(pm_api_arg[0],
+ pm_api_arg[1] ?
+ pm_api_arg[1] : 0,
+ pm_api_arg[2] ?
+ pm_api_arg[2] : 0,
+ pm_api_arg[3] ?
+ pm_api_arg[3] : 0);
+ break;
+ case PM_RESET_ASSERT:
+ ret = zynqmp_pm_reset_assert(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_RESET_GET_STATUS:
+ ret = zynqmp_pm_reset_get_status(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf, "Reset status: %u\n",
+ pm_api_ret[0]);
+ break;
+ case PM_GET_CHIPID:
+ ret = zynqmp_pm_get_chipid(&pm_api_ret[0], &pm_api_ret[1]);
+ if (!ret)
+ sprintf(debugfs_buf, "Idcode: %#x, Version:%#x\n",
+ pm_api_ret[0], pm_api_ret[1]);
+ break;
+ case PM_PINCTRL_SET_FUNCTION:
+ ret = zynqmp_pm_pinctrl_set_function(pm_api_arg[0],
+ pm_api_arg[1]);
+ break;
+ case PM_PINCTRL_CONFIG_PARAM_GET:
+ ret = zynqmp_pm_pinctrl_get_config(pm_api_arg[0], pm_api_arg[1],
+ &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "Pin: %llu, Param: %llu, Value: %u\n",
+ pm_api_arg[0], pm_api_arg[1],
+ pm_api_ret[0]);
+ break;
+ case PM_PINCTRL_CONFIG_PARAM_SET:
+ ret = zynqmp_pm_pinctrl_set_config(pm_api_arg[0],
+ pm_api_arg[1],
+ pm_api_arg[2]);
+ break;
+ case PM_IOCTL:
+ ret = zynqmp_pm_ioctl(pm_api_arg[0], pm_api_arg[1],
+ pm_api_arg[2], pm_api_arg[3],
+ pm_api_arg[4], &pm_api_ret[0]);
+ if (!ret && (pm_api_arg[1] == IOCTL_GET_RPU_OPER_MODE ||
+ pm_api_arg[1] == IOCTL_GET_PLL_FRAC_MODE ||
+ pm_api_arg[1] == IOCTL_GET_PLL_FRAC_DATA ||
+ pm_api_arg[1] == IOCTL_READ_GGS ||
+ pm_api_arg[1] == IOCTL_READ_PGGS ||
+ pm_api_arg[1] == IOCTL_READ_REG))
+ sprintf(debugfs_buf, "IOCTL return value: %u\n",
+ pm_api_ret[1]);
+ if (!ret && pm_api_arg[1] == IOCTL_GET_QOS)
+ sprintf(debugfs_buf, "Default QoS: %u\nCurrent QoS: %u\n",
+ pm_api_ret[1], pm_api_ret[2]);
+ break;
+ case PM_CLOCK_ENABLE:
+ ret = zynqmp_pm_clock_enable(pm_api_arg[0]);
+ break;
+ case PM_CLOCK_DISABLE:
+ ret = zynqmp_pm_clock_disable(pm_api_arg[0]);
+ break;
+ case PM_CLOCK_GETSTATE:
+ ret = zynqmp_pm_clock_getstate(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf, "Clock state: %u\n",
+ pm_api_ret[0]);
+ break;
+ case PM_CLOCK_SETDIVIDER:
+ ret = zynqmp_pm_clock_setdivider(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_CLOCK_GETDIVIDER:
+ ret = zynqmp_pm_clock_getdivider(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf, "Divider Value: %d\n",
+ pm_api_ret[0]);
+ break;
+ case PM_CLOCK_SETPARENT:
+ ret = zynqmp_pm_clock_setparent(pm_api_arg[0], pm_api_arg[1]);
+ break;
+ case PM_CLOCK_GETPARENT:
+ ret = zynqmp_pm_clock_getparent(pm_api_arg[0], &pm_api_ret[0]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "Clock parent Index: %u\n", pm_api_ret[0]);
+ break;
case PM_QUERY_DATA:
qdata.qid = pm_api_arg[0];
qdata.arg1 = pm_api_arg[1];
@@ -150,7 +310,7 @@ static ssize_t zynqmp_pm_debugfs_api_write(struct file *file,
char *kern_buff, *tmp_buff;
char *pm_api_req;
u32 pm_id = 0;
- u64 pm_api_arg[4] = {0, 0, 0, 0};
+ u64 pm_api_arg[5] = {0, 0, 0, 0, 0};
/* Return values from PM APIs calls */
u32 pm_api_ret[4] = {0, 0, 0, 0};
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index add8acf66a9c..720fa8b5d8e9 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -3,7 +3,7 @@
* Xilinx Zynq MPSoC Firmware layer
*
* Copyright (C) 2014-2022 Xilinx, Inc.
- * Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc.
+ * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc.
*
* Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -46,6 +46,7 @@ static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
static u32 ioctl_features[FEATURE_PAYLOAD_SIZE];
static u32 query_features[FEATURE_PAYLOAD_SIZE];
+static u32 sip_svc_version;
static struct platform_device *em_dev;
/**
@@ -151,6 +152,9 @@ static noinline int do_fw_call_smc(u32 *ret_payload, u32 num_args, ...)
ret_payload[1] = upper_32_bits(res.a0);
ret_payload[2] = lower_32_bits(res.a1);
ret_payload[3] = upper_32_bits(res.a1);
+ ret_payload[4] = lower_32_bits(res.a2);
+ ret_payload[5] = upper_32_bits(res.a2);
+ ret_payload[6] = lower_32_bits(res.a3);
}
return zynqmp_pm_ret_code((enum pm_ret_status)res.a0);
@@ -191,6 +195,9 @@ static noinline int do_fw_call_hvc(u32 *ret_payload, u32 num_args, ...)
ret_payload[1] = upper_32_bits(res.a0);
ret_payload[2] = lower_32_bits(res.a1);
ret_payload[3] = upper_32_bits(res.a1);
+ ret_payload[4] = lower_32_bits(res.a2);
+ ret_payload[5] = upper_32_bits(res.a2);
+ ret_payload[6] = lower_32_bits(res.a3);
}
return zynqmp_pm_ret_code((enum pm_ret_status)res.a0);
@@ -218,11 +225,14 @@ static int __do_feature_check_call(const u32 api_id, u32 *ret_payload)
* Feature check of TF-A APIs is done in the TF-A layer and it expects for
* MODULE_ID_MASK bits of SMC's arg[0] to be the same as PM_MODULE_ID.
*/
- if (module_id == TF_A_MODULE_ID)
+ if (module_id == TF_A_MODULE_ID) {
module_id = PM_MODULE_ID;
+ smc_arg[1] = api_id;
+ } else {
+ smc_arg[1] = (api_id & API_ID_MASK);
+ }
smc_arg[0] = PM_SIP_SVC | FIELD_PREP(MODULE_ID_MASK, module_id) | feature_check_api_id;
- smc_arg[1] = (api_id & API_ID_MASK);
ret = do_fw_call(ret_payload, 2, smc_arg[0], smc_arg[1]);
if (ret)
@@ -332,6 +342,70 @@ int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported);
/**
+ * zynqmp_pm_invoke_fw_fn() - Invoke the system-level platform management layer
+ * caller function depending on the configuration
+ * @pm_api_id: Requested PM-API call
+ * @ret_payload: Returned value array
+ * @num_args: Number of arguments to requested PM-API call
+ *
+ * Invoke platform management function for SMC or HVC call, depending on
+ * configuration.
+ * Following SMC Calling Convention (SMCCC) for SMC64:
+ * Pm Function Identifier,
+ * PM_SIP_SVC + PASS_THROUGH_FW_CMD_ID =
+ * ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT)
+ * ((SMC_64) << FUNCID_CC_SHIFT)
+ * ((SIP_START) << FUNCID_OEN_SHIFT)
+ * (PASS_THROUGH_FW_CMD_ID))
+ *
+ * PM_SIP_SVC - Registered ZynqMP SIP Service Call.
+ * PASS_THROUGH_FW_CMD_ID - Fixed SiP SVC call ID for FW specific calls.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_invoke_fw_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...)
+{
+ /*
+ * Added SIP service call Function Identifier
+ * Make sure to stay in x0 register
+ */
+ u64 smc_arg[SMC_ARG_CNT_64];
+ int ret, i;
+ va_list arg_list;
+ u32 args[SMC_ARG_CNT_32] = {0};
+ u32 module_id;
+
+ if (num_args > SMC_ARG_CNT_32)
+ return -EINVAL;
+
+ va_start(arg_list, num_args);
+
+ /* Check if feature is supported or not */
+ ret = zynqmp_pm_feature(pm_api_id);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < num_args; i++)
+ args[i] = va_arg(arg_list, u32);
+
+ va_end(arg_list);
+
+ module_id = FIELD_GET(PLM_MODULE_ID_MASK, pm_api_id);
+
+ if (module_id == 0)
+ module_id = XPM_MODULE_ID;
+
+ smc_arg[0] = PM_SIP_SVC | PASS_THROUGH_FW_CMD_ID;
+ smc_arg[1] = ((u64)args[0] << 32U) | FIELD_PREP(PLM_MODULE_ID_MASK, module_id) |
+ (pm_api_id & API_ID_MASK);
+ for (i = 1; i < (SMC_ARG_CNT_64 - 1); i++)
+ smc_arg[i + 1] = ((u64)args[(i * 2)] << 32U) | args[(i * 2) - 1];
+
+ return do_fw_call(ret_payload, 8, smc_arg[0], smc_arg[1], smc_arg[2], smc_arg[3],
+ smc_arg[4], smc_arg[5], smc_arg[6], smc_arg[7]);
+}
+
+/**
* zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer
* caller function depending on the configuration
* @pm_api_id: Requested PM-API call
@@ -489,6 +563,35 @@ int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
EXPORT_SYMBOL_GPL(zynqmp_pm_get_family_info);
/**
+ * zynqmp_pm_get_sip_svc_version() - Get SiP service call version
+ * @version: Returned version value
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_get_sip_svc_version(u32 *version)
+{
+ struct arm_smccc_res res;
+ u64 args[SMC_ARG_CNT_64] = {0};
+
+ if (!version)
+ return -EINVAL;
+
+ /* Check if SiP SVC version already verified */
+ if (sip_svc_version > 0) {
+ *version = sip_svc_version;
+ return 0;
+ }
+
+ args[0] = GET_SIP_SVC_VERSION;
+
+ arm_smccc_smc(args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], &res);
+
+ *version = ((lower_32_bits(res.a0) << 16U) | lower_32_bits(res.a1));
+
+ return zynqmp_pm_ret_code(XST_PM_SUCCESS);
+}
+
+/**
* zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version
* @version: Returned version value
*
@@ -552,10 +655,34 @@ static int get_set_conduit_method(struct device_node *np)
*/
int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
{
- int ret;
+ int ret, i = 0;
+ u32 ret_payload[PAYLOAD_ARG_CNT] = {0};
+
+ if (sip_svc_version >= SIP_SVC_PASSTHROUGH_VERSION) {
+ ret = zynqmp_pm_invoke_fw_fn(PM_QUERY_DATA, ret_payload, 4,
+ qdata.qid, qdata.arg1,
+ qdata.arg2, qdata.arg3);
+ /* To support backward compatibility */
+ if (!ret && !ret_payload[0]) {
+ /*
+ * TF-A passes return status on 0th index but
+ * api to get clock name reads data from 0th
+ * index so pass data at 0th index instead of
+ * return status
+ */
+ if (qdata.qid == PM_QID_CLOCK_GET_NAME ||
+ qdata.qid == PM_QID_PINCTRL_GET_FUNCTION_NAME)
+ i = 1;
+
+ for (; i < PAYLOAD_ARG_CNT; i++, out++)
+ *out = ret_payload[i];
+
+ return ret;
+ }
+ }
- ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, out, 4, qdata.qid, qdata.arg1, qdata.arg2,
- qdata.arg3);
+ ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, out, 4, qdata.qid,
+ qdata.arg1, qdata.arg2, qdata.arg3);
/*
* For clock name query, all bytes in SMC response are clock name
@@ -920,7 +1047,7 @@ int zynqmp_pm_set_boot_health_status(u32 value)
*
* Return: Returns status, either success or error+reason
*/
-int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+int zynqmp_pm_reset_assert(const u32 reset,
const enum zynqmp_pm_reset_action assert_flag)
{
return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, NULL, 2, reset, assert_flag);
@@ -934,7 +1061,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_reset_assert);
*
* Return: Returns status, either success or error+reason
*/
-int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status)
+int zynqmp_pm_reset_get_status(const u32 reset, u32 *status)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -1118,8 +1245,11 @@ int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
if (pm_family_code == ZYNQMP_FAMILY_CODE &&
param == PM_PINCTRL_CONFIG_TRI_STATE) {
ret = zynqmp_pm_feature(PM_PINCTRL_CONFIG_PARAM_SET);
- if (ret < PM_PINCTRL_PARAM_SET_VERSION)
+ if (ret < PM_PINCTRL_PARAM_SET_VERSION) {
+ pr_warn("The requested pinctrl feature is not supported in the current firmware.\n"
+ "Expected firmware version is 2023.1 and above for this feature to work.\r\n");
return -EOPNOTSUPP;
+ }
}
return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, NULL, 3, pin, param, value);
@@ -1887,6 +2017,11 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
if (ret)
return ret;
+ /* Get SiP SVC version number */
+ ret = zynqmp_pm_get_sip_svc_version(&sip_svc_version);
+ if (ret)
+ return ret;
+
ret = do_feature_check_call(PM_FEATURE_CHECK);
if (ret >= 0 && ((ret & FIRMWARE_VERSION_MASK) >= PM_API_VERSION_1))
feature_check_enabled = true;
@@ -1983,6 +2118,6 @@ static struct platform_driver zynqmp_firmware_driver = {
.dev_groups = zynqmp_firmware_groups,
},
.probe = zynqmp_firmware_probe,
- .remove_new = zynqmp_firmware_remove,
+ .remove = zynqmp_firmware_remove,
};
module_platform_driver(zynqmp_firmware_driver);
diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c
index f4de3fea0b2d..e41492988dd6 100644
--- a/drivers/fpga/altera-fpga2sdram.c
+++ b/drivers/fpga/altera-fpga2sdram.c
@@ -152,7 +152,7 @@ MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
static struct platform_driver altera_fpga_driver = {
.probe = alt_fpga_bridge_probe,
- .remove_new = alt_fpga_bridge_remove,
+ .remove = alt_fpga_bridge_remove,
.driver = {
.name = "altera_fpga2sdram_bridge",
.of_match_table = of_match_ptr(altera_fpga_of_match),
diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c
index 44061cb16f87..594693ff786e 100644
--- a/drivers/fpga/altera-freeze-bridge.c
+++ b/drivers/fpga/altera-freeze-bridge.c
@@ -262,7 +262,7 @@ static void altera_freeze_br_remove(struct platform_device *pdev)
static struct platform_driver altera_freeze_br_driver = {
.probe = altera_freeze_br_probe,
- .remove_new = altera_freeze_br_remove,
+ .remove = altera_freeze_br_remove,
.driver = {
.name = "altera_freeze_br",
.of_match_table = altera_freeze_br_of_match,
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
index 6f8e24be19c6..f2f1250689cb 100644
--- a/drivers/fpga/altera-hps2fpga.c
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -205,7 +205,7 @@ MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
static struct platform_driver alt_fpga_bridge_driver = {
.probe = alt_fpga_bridge_probe,
- .remove_new = alt_fpga_bridge_remove,
+ .remove = alt_fpga_bridge_remove,
.driver = {
.name = "altera_hps2fpga_bridge",
.of_match_table = of_match_ptr(altera_fpga_of_match),
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index 02b60fde0430..5aa7b8884374 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -16,26 +16,26 @@
#include "dfl-afu.h"
-void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
+void afu_dma_region_init(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
afu->dma_regions = RB_ROOT;
}
/**
* afu_dma_pin_pages - pin pages of given dma memory region
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma memory region to be pinned
*
* Pin all the pages of given dfl_afu_dma_region.
* Return 0 for success or negative error code.
*/
-static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
+static int afu_dma_pin_pages(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
int npages = region->length >> PAGE_SHIFT;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
int ret, pinned;
ret = account_locked_vm(current->mm, npages, true);
@@ -73,17 +73,17 @@ unlock_vm:
/**
* afu_dma_unpin_pages - unpin pages of given dma memory region
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma memory region to be unpinned
*
* Unpin all the pages of given dfl_afu_dma_region.
* Return 0 for success or negative error code.
*/
-static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
+static void afu_dma_unpin_pages(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
long npages = region->length >> PAGE_SHIFT;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
unpin_user_pages(region->pages, npages);
kfree(region->pages);
@@ -133,20 +133,20 @@ static bool dma_region_check_iova(struct dfl_afu_dma_region *region,
/**
* afu_dma_region_add - add given dma region to rbtree
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma region to be added
*
* Return 0 for success, -EEXIST if dma region has already been added.
*
- * Needs to be called with pdata->lock heold.
+ * Needs to be called with fdata->lock held.
*/
-static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
+static int afu_dma_region_add(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct rb_node **new, *parent = NULL;
- dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n",
+ dev_dbg(&fdata->dev->dev, "add region (iova = %llx)\n",
(unsigned long long)region->iova);
new = &afu->dma_regions.rb_node;
@@ -177,50 +177,50 @@ static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
/**
* afu_dma_region_remove - remove given dma region from rbtree
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @region: dma region to be removed
*
- * Needs to be called with pdata->lock heold.
+ * Needs to be called with fdata->lock held.
*/
-static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata,
+static void afu_dma_region_remove(struct dfl_feature_dev_data *fdata,
struct dfl_afu_dma_region *region)
{
struct dfl_afu *afu;
- dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ dev_dbg(&fdata->dev->dev, "del region (iova = %llx)\n",
(unsigned long long)region->iova);
- afu = dfl_fpga_pdata_get_private(pdata);
+ afu = dfl_fpga_fdata_get_private(fdata);
rb_erase(&region->node, &afu->dma_regions);
}
/**
* afu_dma_region_destroy - destroy all regions in rbtree
- * @pdata: feature device platform data
+ * @fdata: feature dev data
*
- * Needs to be called with pdata->lock heold.
+ * Needs to be called with fdata->lock held.
*/
-void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
+void afu_dma_region_destroy(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct rb_node *node = rb_first(&afu->dma_regions);
struct dfl_afu_dma_region *region;
while (node) {
region = container_of(node, struct dfl_afu_dma_region, node);
- dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
+ dev_dbg(&fdata->dev->dev, "del region (iova = %llx)\n",
(unsigned long long)region->iova);
rb_erase(node, &afu->dma_regions);
if (region->iova)
- dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
region->iova, region->length,
DMA_BIDIRECTIONAL);
if (region->pages)
- afu_dma_unpin_pages(pdata, region);
+ afu_dma_unpin_pages(fdata, region);
node = rb_next(node);
kfree(region);
@@ -229,7 +229,7 @@ void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
/**
* afu_dma_region_find - find the dma region from rbtree based on iova and size
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @iova: address of the dma memory area
* @size: size of the dma memory area
*
@@ -239,14 +239,14 @@ void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
* [@iova, @iova+size)
* If nothing is matched returns NULL.
*
- * Needs to be called with pdata->lock held.
+ * Needs to be called with fdata->lock held.
*/
struct dfl_afu_dma_region *
-afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
+afu_dma_region_find(struct dfl_feature_dev_data *fdata, u64 iova, u64 size)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct rb_node *node = afu->dma_regions.rb_node;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
while (node) {
struct dfl_afu_dma_region *region;
@@ -276,20 +276,20 @@ afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
/**
* afu_dma_region_find_iova - find the dma region from rbtree by iova
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @iova: address of the dma region
*
- * Needs to be called with pdata->lock held.
+ * Needs to be called with fdata->lock held.
*/
static struct dfl_afu_dma_region *
-afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
+afu_dma_region_find_iova(struct dfl_feature_dev_data *fdata, u64 iova)
{
- return afu_dma_region_find(pdata, iova, 0);
+ return afu_dma_region_find(fdata, iova, 0);
}
/**
* afu_dma_map_region - map memory region for dma
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @user_addr: address of the memory region
* @length: size of the memory region
* @iova: pointer of iova address
@@ -298,9 +298,10 @@ afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
* of the memory region via @iova.
* Return 0 for success, otherwise error code.
*/
-int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+int afu_dma_map_region(struct dfl_feature_dev_data *fdata,
u64 user_addr, u64 length, u64 *iova)
{
+ struct device *dev = &fdata->dev->dev;
struct dfl_afu_dma_region *region;
int ret;
@@ -323,47 +324,47 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
region->length = length;
/* Pin the user memory region */
- ret = afu_dma_pin_pages(pdata, region);
+ ret = afu_dma_pin_pages(fdata, region);
if (ret) {
- dev_err(&pdata->dev->dev, "failed to pin memory region\n");
+ dev_err(dev, "failed to pin memory region\n");
goto free_region;
}
/* Only accept continuous pages, return error else */
if (!afu_dma_check_continuous_pages(region)) {
- dev_err(&pdata->dev->dev, "pages are not continuous\n");
+ dev_err(dev, "pages are not continuous\n");
ret = -EINVAL;
goto unpin_pages;
}
/* As pages are continuous then start to do DMA mapping */
- region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata),
+ region->iova = dma_map_page(dfl_fpga_fdata_to_parent(fdata),
region->pages[0], 0,
region->length,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) {
- dev_err(&pdata->dev->dev, "failed to map for dma\n");
+ if (dma_mapping_error(dfl_fpga_fdata_to_parent(fdata), region->iova)) {
+ dev_err(dev, "failed to map for dma\n");
ret = -EFAULT;
goto unpin_pages;
}
*iova = region->iova;
- mutex_lock(&pdata->lock);
- ret = afu_dma_region_add(pdata, region);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ ret = afu_dma_region_add(fdata, region);
+ mutex_unlock(&fdata->lock);
if (ret) {
- dev_err(&pdata->dev->dev, "failed to add dma region\n");
+ dev_err(dev, "failed to add dma region\n");
goto unmap_dma;
}
return 0;
unmap_dma:
- dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
region->iova, region->length, DMA_BIDIRECTIONAL);
unpin_pages:
- afu_dma_unpin_pages(pdata, region);
+ afu_dma_unpin_pages(fdata, region);
free_region:
kfree(region);
return ret;
@@ -371,34 +372,34 @@ free_region:
/**
* afu_dma_unmap_region - unmap dma memory region
- * @pdata: feature device platform data
+ * @fdata: feature dev data
* @iova: dma address of the region
*
* Unmap dma memory region based on @iova.
* Return 0 for success, otherwise error code.
*/
-int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova)
+int afu_dma_unmap_region(struct dfl_feature_dev_data *fdata, u64 iova)
{
struct dfl_afu_dma_region *region;
- mutex_lock(&pdata->lock);
- region = afu_dma_region_find_iova(pdata, iova);
+ mutex_lock(&fdata->lock);
+ region = afu_dma_region_find_iova(fdata, iova);
if (!region) {
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return -EINVAL;
}
if (region->in_use) {
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return -EBUSY;
}
- afu_dma_region_remove(pdata, region);
- mutex_unlock(&pdata->lock);
+ afu_dma_region_remove(fdata, region);
+ mutex_unlock(&fdata->lock);
- dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
+ dma_unmap_page(dfl_fpga_fdata_to_parent(fdata),
region->iova, region->length, DMA_BIDIRECTIONAL);
- afu_dma_unpin_pages(pdata, region);
+ afu_dma_unpin_pages(fdata, region);
kfree(region);
return 0;
diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c
index ab7be6217368..0f392d1f6d45 100644
--- a/drivers/fpga/dfl-afu-error.c
+++ b/drivers/fpga/dfl-afu-error.c
@@ -28,37 +28,36 @@
#define ERROR_MASK GENMASK_ULL(63, 0)
/* mask or unmask port errors by the error mask register. */
-static void __afu_port_err_mask(struct device *dev, bool mask)
+static void __afu_port_err_mask(struct dfl_feature_dev_data *fdata, bool mask)
{
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK);
}
static void afu_port_err_mask(struct device *dev, bool mask)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
- mutex_lock(&pdata->lock);
- __afu_port_err_mask(dev, mask);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ __afu_port_err_mask(fdata, mask);
+ mutex_unlock(&fdata->lock);
}
/* clear port errors. */
static int afu_port_err_clear(struct device *dev, u64 err)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
- struct platform_device *pdev = to_platform_device(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base_err, *base_hdr;
int enable_ret = 0, ret = -EBUSY;
u64 v;
- base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
- base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base_err = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
+ base_hdr = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
/*
* clear Port Errors
@@ -80,12 +79,12 @@ static int afu_port_err_clear(struct device *dev, u64 err)
}
/* Halt Port by keeping Port in reset */
- ret = __afu_port_disable(pdev);
+ ret = __afu_port_disable(fdata);
if (ret)
goto done;
/* Mask all errors */
- __afu_port_err_mask(dev, true);
+ __afu_port_err_mask(fdata, true);
/* Clear errors if err input matches with current port errors.*/
v = readq(base_err + PORT_ERROR);
@@ -102,28 +101,28 @@ static int afu_port_err_clear(struct device *dev, u64 err)
}
/* Clear mask */
- __afu_port_err_mask(dev, false);
+ __afu_port_err_mask(fdata, false);
/* Enable the Port by clearing the reset */
- enable_ret = __afu_port_enable(pdev);
+ enable_ret = __afu_port_enable(fdata);
done:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return enable_ret ? enable_ret : ret;
}
static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 error;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
error = readq(base + PORT_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)error);
}
@@ -146,15 +145,15 @@ static DEVICE_ATTR_RW(errors);
static ssize_t first_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 error;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
error = readq(base + PORT_FIRST_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)error);
}
@@ -164,16 +163,16 @@ static ssize_t first_malformed_req_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 req0, req1;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
req0 = readq(base + PORT_MALFORMED_REQ0);
req1 = readq(base + PORT_MALFORMED_REQ1);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%016llx%016llx\n",
(unsigned long long)req1, (unsigned long long)req0);
@@ -191,12 +190,14 @@ static umode_t port_err_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
+ fdata = to_dfl_feature_dev_data(dev);
/*
* sysfs entries are visible only if related private feature is
* enumerated.
*/
- if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR))
+ if (!dfl_get_feature_by_id(fdata, PORT_FEATURE_ID_ERROR))
return 0;
return attr->mode;
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index 6b97c073849e..3bf8e7338dbe 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -26,7 +26,7 @@
/**
* __afu_port_enable - enable a port by clear reset
- * @pdev: port platform device.
+ * @fdata: port feature dev data.
*
* Enable Port by clear the port soft reset bit, which is set by default.
* The AFU is unable to respond to any MMIO access while in reset.
@@ -35,18 +35,17 @@
*
* The caller needs to hold lock for protection.
*/
-int __afu_port_enable(struct platform_device *pdev)
+int __afu_port_enable(struct dfl_feature_dev_data *fdata)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
u64 v;
- WARN_ON(!pdata->disable_count);
+ WARN_ON(!fdata->disable_count);
- if (--pdata->disable_count != 0)
+ if (--fdata->disable_count != 0)
return 0;
- base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
/* Clear port soft reset */
v = readq(base + PORT_HDR_CTRL);
@@ -60,7 +59,8 @@ int __afu_port_enable(struct platform_device *pdev)
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
!(v & PORT_CTRL_SFTRST_ACK),
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
- dev_err(&pdev->dev, "timeout, failure to enable device\n");
+ dev_err(fdata->dfl_cdev->parent,
+ "timeout, failure to enable device\n");
return -ETIMEDOUT;
}
@@ -69,22 +69,21 @@ int __afu_port_enable(struct platform_device *pdev)
/**
* __afu_port_disable - disable a port by hold reset
- * @pdev: port platform device.
+ * @fdata: port feature dev data.
*
* Disable Port by setting the port soft reset bit, it puts the port into reset.
*
* The caller needs to hold lock for protection.
*/
-int __afu_port_disable(struct platform_device *pdev)
+int __afu_port_disable(struct dfl_feature_dev_data *fdata)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
u64 v;
- if (pdata->disable_count++ != 0)
+ if (fdata->disable_count++ != 0)
return 0;
- base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
/* Set port soft reset */
v = readq(base + PORT_HDR_CTRL);
@@ -99,7 +98,8 @@ int __afu_port_disable(struct platform_device *pdev)
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
v & PORT_CTRL_SFTRST_ACK,
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
- dev_err(&pdev->dev, "timeout, failure to disable device\n");
+ dev_err(fdata->dfl_cdev->parent,
+ "timeout, failure to disable device\n");
return -ETIMEDOUT;
}
@@ -118,34 +118,34 @@ int __afu_port_disable(struct platform_device *pdev)
* (disabled). Any attempts on MMIO access to AFU while in reset, will
* result errors reported via port error reporting sub feature (if present).
*/
-static int __port_reset(struct platform_device *pdev)
+static int __port_reset(struct dfl_feature_dev_data *fdata)
{
int ret;
- ret = __afu_port_disable(pdev);
+ ret = __afu_port_disable(fdata);
if (ret)
return ret;
- return __afu_port_enable(pdev);
+ return __afu_port_enable(fdata);
}
static int port_reset(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
int ret;
- mutex_lock(&pdata->lock);
- ret = __port_reset(pdev);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ ret = __port_reset(fdata);
+ mutex_unlock(&fdata->lock);
return ret;
}
-static int port_get_id(struct platform_device *pdev)
+static int port_get_id(struct dfl_feature_dev_data *fdata)
{
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
}
@@ -153,7 +153,8 @@ static int port_get_id(struct platform_device *pdev)
static ssize_t
id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- int id = port_get_id(to_platform_device(dev));
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
+ int id = port_get_id(fdata);
return scnprintf(buf, PAGE_SIZE, "%d\n", id);
}
@@ -162,15 +163,15 @@ static DEVICE_ATTR_RO(id);
static ssize_t
ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_CTRL);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
}
@@ -179,7 +180,7 @@ static ssize_t
ltr_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
bool ltr;
u64 v;
@@ -187,14 +188,14 @@ ltr_store(struct device *dev, struct device_attribute *attr,
if (kstrtobool(buf, &ltr))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_CTRL);
v &= ~PORT_CTRL_LATENCY;
v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
writeq(v, base + PORT_HDR_CTRL);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -203,15 +204,15 @@ static DEVICE_ATTR_RW(ltr);
static ssize_t
ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
}
@@ -220,18 +221,18 @@ static ssize_t
ap1_event_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
bool clear;
if (kstrtobool(buf, &clear) || !clear)
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -241,15 +242,15 @@ static ssize_t
ap2_event_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
}
@@ -258,18 +259,18 @@ static ssize_t
ap2_event_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
bool clear;
if (kstrtobool(buf, &clear) || !clear)
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -278,15 +279,15 @@ static DEVICE_ATTR_RW(ap2_event);
static ssize_t
power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + PORT_HDR_STS);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
}
@@ -296,18 +297,18 @@ static ssize_t
userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freq_cmd;
void __iomem *base;
if (kstrtou64(buf, 0, &userclk_freq_cmd))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -317,18 +318,18 @@ static ssize_t
userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freqcntr_cmd;
void __iomem *base;
if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -338,15 +339,15 @@ static ssize_t
userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freqsts;
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
}
@@ -356,15 +357,15 @@ static ssize_t
userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
u64 userclk_freqcntrsts;
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n",
(unsigned long long)userclk_freqcntrsts);
@@ -388,10 +389,12 @@ static umode_t port_hdr_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
umode_t mode = attr->mode;
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
+ fdata = to_dfl_feature_dev_data(dev);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
if (dfl_feature_revision(base) > 0) {
/*
@@ -456,21 +459,21 @@ static const struct dfl_feature_ops port_hdr_ops = {
static ssize_t
afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 guidl, guidh;
- base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
+ base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_AFU);
- mutex_lock(&pdata->lock);
- if (pdata->disable_count) {
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ if (fdata->disable_count) {
+ mutex_unlock(&fdata->lock);
return -EBUSY;
}
guidl = readq(base + GUID_L);
guidh = readq(base + GUID_H);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
}
@@ -485,12 +488,14 @@ static umode_t port_afu_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
+ fdata = to_dfl_feature_dev_data(dev);
/*
* sysfs entries are visible only if related private feature is
* enumerated.
*/
- if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU))
+ if (!dfl_get_feature_by_id(fdata, PORT_FEATURE_ID_AFU))
return 0;
return attr->mode;
@@ -504,9 +509,10 @@ static const struct attribute_group port_afu_group = {
static int port_afu_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct resource *res = &pdev->resource[feature->resource_index];
- return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ return afu_mmio_region_add(fdata,
DFL_PORT_REGION_INDEX_AFU,
resource_size(res), res->start,
DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
@@ -525,9 +531,10 @@ static const struct dfl_feature_ops port_afu_ops = {
static int port_stp_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct resource *res = &pdev->resource[feature->resource_index];
- return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
+ return afu_mmio_region_add(fdata,
DFL_PORT_REGION_INDEX_STP,
resource_size(res), res->start,
DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
@@ -595,22 +602,18 @@ static struct dfl_feature_driver port_feature_drvs[] = {
static int afu_open(struct inode *inode, struct file *filp)
{
- struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata = dfl_fpga_inode_to_feature_dev_data(inode);
+ struct platform_device *fdev = fdata->dev;
int ret;
- pdata = dev_get_platdata(&fdev->dev);
- if (WARN_ON(!pdata))
- return -ENODEV;
-
- mutex_lock(&pdata->lock);
- ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ mutex_lock(&fdata->lock);
+ ret = dfl_feature_dev_use_begin(fdata, filp->f_flags & O_EXCL);
if (!ret) {
dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
- dfl_feature_dev_use_count(pdata));
+ dfl_feature_dev_use_count(fdata));
filp->private_data = fdev;
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
@@ -618,29 +621,29 @@ static int afu_open(struct inode *inode, struct file *filp)
static int afu_release(struct inode *inode, struct file *filp)
{
struct platform_device *pdev = filp->private_data;
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
struct dfl_feature *feature;
dev_dbg(&pdev->dev, "Device File Release\n");
- pdata = dev_get_platdata(&pdev->dev);
+ fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
- dfl_feature_dev_use_end(pdata);
+ mutex_lock(&fdata->lock);
+ dfl_feature_dev_use_end(fdata);
- if (!dfl_feature_dev_use_count(pdata)) {
- dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (!dfl_feature_dev_use_count(fdata)) {
+ dfl_fpga_dev_for_each_feature(fdata, feature)
dfl_fpga_set_irq_triggers(feature, 0,
feature->nr_irqs, NULL);
- __port_reset(pdev);
- afu_dma_region_destroy(pdata);
+ __port_reset(fdata);
+ afu_dma_region_destroy(fdata);
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
}
-static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
+static long afu_ioctl_check_extension(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
/* No extension support for now */
@@ -648,7 +651,7 @@ static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
}
static long
-afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
+afu_ioctl_get_info(struct dfl_feature_dev_data *fdata, void __user *arg)
{
struct dfl_fpga_port_info info;
struct dfl_afu *afu;
@@ -662,12 +665,12 @@ afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
if (info.argsz < minsz)
return -EINVAL;
- mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ afu = dfl_fpga_fdata_get_private(fdata);
info.flags = 0;
info.num_regions = afu->num_regions;
info.num_umsgs = afu->num_umsgs;
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
@@ -675,7 +678,7 @@ afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
return 0;
}
-static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
+static long afu_ioctl_get_region_info(struct dfl_feature_dev_data *fdata,
void __user *arg)
{
struct dfl_fpga_port_region_info rinfo;
@@ -691,7 +694,7 @@ static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
if (rinfo.argsz < minsz || rinfo.padding)
return -EINVAL;
- ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
+ ret = afu_mmio_region_get_by_index(fdata, rinfo.index, &region);
if (ret)
return ret;
@@ -706,7 +709,7 @@ static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
}
static long
-afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
+afu_ioctl_dma_map(struct dfl_feature_dev_data *fdata, void __user *arg)
{
struct dfl_fpga_port_dma_map map;
unsigned long minsz;
@@ -720,16 +723,16 @@ afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
if (map.argsz < minsz || map.flags)
return -EINVAL;
- ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
+ ret = afu_dma_map_region(fdata, map.user_addr, map.length, &map.iova);
if (ret)
return ret;
if (copy_to_user(arg, &map, sizeof(map))) {
- afu_dma_unmap_region(pdata, map.iova);
+ afu_dma_unmap_region(fdata, map.iova);
return -EFAULT;
}
- dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
+ dev_dbg(&fdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
(unsigned long long)map.user_addr,
(unsigned long long)map.length,
(unsigned long long)map.iova);
@@ -738,7 +741,7 @@ afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
}
static long
-afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
+afu_ioctl_dma_unmap(struct dfl_feature_dev_data *fdata, void __user *arg)
{
struct dfl_fpga_port_dma_unmap unmap;
unsigned long minsz;
@@ -751,33 +754,33 @@ afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
if (unmap.argsz < minsz || unmap.flags)
return -EINVAL;
- return afu_dma_unmap_region(pdata, unmap.iova);
+ return afu_dma_unmap_region(fdata, unmap.iova);
}
static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct platform_device *pdev = filp->private_data;
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
struct dfl_feature *f;
long ret;
dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
- pdata = dev_get_platdata(&pdev->dev);
+ fdata = to_dfl_feature_dev_data(&pdev->dev);
switch (cmd) {
case DFL_FPGA_GET_API_VERSION:
return DFL_FPGA_API_VERSION;
case DFL_FPGA_CHECK_EXTENSION:
- return afu_ioctl_check_extension(pdata, arg);
+ return afu_ioctl_check_extension(fdata, arg);
case DFL_FPGA_PORT_GET_INFO:
- return afu_ioctl_get_info(pdata, (void __user *)arg);
+ return afu_ioctl_get_info(fdata, (void __user *)arg);
case DFL_FPGA_PORT_GET_REGION_INFO:
- return afu_ioctl_get_region_info(pdata, (void __user *)arg);
+ return afu_ioctl_get_region_info(fdata, (void __user *)arg);
case DFL_FPGA_PORT_DMA_MAP:
- return afu_ioctl_dma_map(pdata, (void __user *)arg);
+ return afu_ioctl_dma_map(fdata, (void __user *)arg);
case DFL_FPGA_PORT_DMA_UNMAP:
- return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
+ return afu_ioctl_dma_unmap(fdata, (void __user *)arg);
default:
/*
* Let sub-feature's ioctl function to handle the cmd
@@ -785,7 +788,7 @@ static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
* handled in this sub feature, and returns 0 and other
* error code if cmd is handled.
*/
- dfl_fpga_dev_for_each_feature(pdata, f)
+ dfl_fpga_dev_for_each_feature(fdata, f)
if (f->ops && f->ops->ioctl) {
ret = f->ops->ioctl(pdev, f, cmd, arg);
if (ret != -ENODEV)
@@ -805,8 +808,8 @@ static const struct vm_operations_struct afu_vma_ops = {
static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct platform_device *pdev = filp->private_data;
- struct dfl_feature_platform_data *pdata;
u64 size = vma->vm_end - vma->vm_start;
+ struct dfl_feature_dev_data *fdata;
struct dfl_afu_mmio_region region;
u64 offset;
int ret;
@@ -814,10 +817,10 @@ static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- pdata = dev_get_platdata(&pdev->dev);
+ fdata = to_dfl_feature_dev_data(&pdev->dev);
offset = vma->vm_pgoff << PAGE_SHIFT;
- ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
+ ret = afu_mmio_region_get_by_offset(fdata, offset, size, &region);
if (ret)
return ret;
@@ -851,46 +854,45 @@ static const struct file_operations afu_fops = {
static int afu_dev_init(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_afu *afu;
afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
if (!afu)
return -ENOMEM;
- mutex_lock(&pdata->lock);
- dfl_fpga_pdata_set_private(pdata, afu);
- afu_mmio_region_init(pdata);
- afu_dma_region_init(pdata);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_fpga_fdata_set_private(fdata, afu);
+ afu_mmio_region_init(fdata);
+ afu_dma_region_init(fdata);
+ mutex_unlock(&fdata->lock);
return 0;
}
static int afu_dev_destroy(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
- afu_mmio_region_destroy(pdata);
- afu_dma_region_destroy(pdata);
- dfl_fpga_pdata_set_private(pdata, NULL);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ afu_mmio_region_destroy(fdata);
+ afu_dma_region_destroy(fdata);
+ dfl_fpga_fdata_set_private(fdata, NULL);
+ mutex_unlock(&fdata->lock);
return 0;
}
-static int port_enable_set(struct platform_device *pdev, bool enable)
+static int port_enable_set(struct dfl_feature_dev_data *fdata, bool enable)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
int ret;
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
if (enable)
- ret = __afu_port_enable(pdev);
+ ret = __afu_port_enable(fdata);
else
- ret = __afu_port_disable(pdev);
- mutex_unlock(&pdata->lock);
+ ret = __afu_port_disable(fdata);
+ mutex_unlock(&fdata->lock);
return ret;
}
@@ -947,12 +949,12 @@ static const struct attribute_group *afu_dev_groups[] = {
};
static struct platform_driver afu_driver = {
- .driver = {
- .name = DFL_FPGA_FEATURE_DEV_PORT,
+ .driver = {
+ .name = DFL_FPGA_FEATURE_DEV_PORT,
.dev_groups = afu_dev_groups,
},
- .probe = afu_probe,
- .remove_new = afu_remove,
+ .probe = afu_probe,
+ .remove = afu_remove,
};
static int __init afu_init(void)
diff --git a/drivers/fpga/dfl-afu-region.c b/drivers/fpga/dfl-afu-region.c
index 2e7b41629406..b11a5b21e666 100644
--- a/drivers/fpga/dfl-afu-region.c
+++ b/drivers/fpga/dfl-afu-region.c
@@ -12,11 +12,11 @@
/**
* afu_mmio_region_init - init function for afu mmio region support
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
*/
-void afu_mmio_region_init(struct dfl_feature_platform_data *pdata)
+void afu_mmio_region_init(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
INIT_LIST_HEAD(&afu->regions);
}
@@ -39,7 +39,7 @@ static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
/**
* afu_mmio_region_add - add a mmio region to given feature dev.
*
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
* @region_index: region index.
* @region_size: region size.
* @phys: region's physical address of this region.
@@ -47,14 +47,15 @@ static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
*
* Return: 0 on success, negative error code otherwise.
*/
-int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_add(struct dfl_feature_dev_data *fdata,
u32 region_index, u64 region_size, u64 phys, u32 flags)
{
+ struct device *dev = &fdata->dev->dev;
struct dfl_afu_mmio_region *region;
struct dfl_afu *afu;
int ret = 0;
- region = devm_kzalloc(&pdata->dev->dev, sizeof(*region), GFP_KERNEL);
+ region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
if (!region)
return -ENOMEM;
@@ -63,13 +64,13 @@ int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
region->phys = phys;
region->flags = flags;
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ afu = dfl_fpga_fdata_get_private(fdata);
/* check if @index already exists */
if (get_region_by_index(afu, region_index)) {
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
ret = -EEXIST;
goto exit;
}
@@ -80,37 +81,37 @@ int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
afu->region_cur_offset += region_size;
afu->num_regions++;
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
exit:
- devm_kfree(&pdata->dev->dev, region);
+ devm_kfree(dev, region);
return ret;
}
/**
* afu_mmio_region_destroy - destroy all mmio regions under given feature dev.
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
*/
-void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata)
+void afu_mmio_region_destroy(struct dfl_feature_dev_data *fdata)
{
- struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_afu *afu = dfl_fpga_fdata_get_private(fdata);
struct dfl_afu_mmio_region *tmp, *region;
list_for_each_entry_safe(region, tmp, &afu->regions, node)
- devm_kfree(&pdata->dev->dev, region);
+ devm_kfree(&fdata->dev->dev, region);
}
/**
* afu_mmio_region_get_by_index - find an afu region by index.
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
* @region_index: region index.
* @pregion: ptr to region for result.
*
* Return: 0 on success, negative error code otherwise.
*/
-int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_get_by_index(struct dfl_feature_dev_data *fdata,
u32 region_index,
struct dfl_afu_mmio_region *pregion)
{
@@ -118,8 +119,8 @@ int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
struct dfl_afu *afu;
int ret = 0;
- mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ afu = dfl_fpga_fdata_get_private(fdata);
region = get_region_by_index(afu, region_index);
if (!region) {
ret = -EINVAL;
@@ -127,14 +128,14 @@ int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
}
*pregion = *region;
exit:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
/**
* afu_mmio_region_get_by_offset - find an afu mmio region by offset and size
*
- * @pdata: afu platform device's pdata.
+ * @fdata: afu feature dev data
* @offset: region offset from start of the device fd.
* @size: region size.
* @pregion: ptr to region for result.
@@ -144,7 +145,7 @@ exit:
*
* Return: 0 on success, negative error code otherwise.
*/
-int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_get_by_offset(struct dfl_feature_dev_data *fdata,
u64 offset, u64 size,
struct dfl_afu_mmio_region *pregion)
{
@@ -152,8 +153,8 @@ int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
struct dfl_afu *afu;
int ret = 0;
- mutex_lock(&pdata->lock);
- afu = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ afu = dfl_fpga_fdata_get_private(fdata);
for_each_region(region, afu)
if (region->offset <= offset &&
region->offset + region->size >= offset + size) {
@@ -162,6 +163,6 @@ int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
}
ret = -EINVAL;
exit:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
index 7bef3e300aa2..03be4f0969c7 100644
--- a/drivers/fpga/dfl-afu.h
+++ b/drivers/fpga/dfl-afu.h
@@ -76,27 +76,27 @@ struct dfl_afu {
struct rb_root dma_regions;
};
-/* hold pdata->lock when call __afu_port_enable/disable */
-int __afu_port_enable(struct platform_device *pdev);
-int __afu_port_disable(struct platform_device *pdev);
+/* hold fdata->lock when call __afu_port_enable/disable */
+int __afu_port_enable(struct dfl_feature_dev_data *fdata);
+int __afu_port_disable(struct dfl_feature_dev_data *fdata);
-void afu_mmio_region_init(struct dfl_feature_platform_data *pdata);
-int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
+void afu_mmio_region_init(struct dfl_feature_dev_data *fdata);
+int afu_mmio_region_add(struct dfl_feature_dev_data *fdata,
u32 region_index, u64 region_size, u64 phys, u32 flags);
-void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata);
-int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
+void afu_mmio_region_destroy(struct dfl_feature_dev_data *fdata);
+int afu_mmio_region_get_by_index(struct dfl_feature_dev_data *fdata,
u32 region_index,
struct dfl_afu_mmio_region *pregion);
-int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
+int afu_mmio_region_get_by_offset(struct dfl_feature_dev_data *fdata,
u64 offset, u64 size,
struct dfl_afu_mmio_region *pregion);
-void afu_dma_region_init(struct dfl_feature_platform_data *pdata);
-void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata);
-int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
+void afu_dma_region_init(struct dfl_feature_dev_data *fdata);
+void afu_dma_region_destroy(struct dfl_feature_dev_data *fdata);
+int afu_dma_map_region(struct dfl_feature_dev_data *fdata,
u64 user_addr, u64 length, u64 *iova);
-int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova);
+int afu_dma_unmap_region(struct dfl_feature_dev_data *fdata, u64 iova);
struct dfl_afu_dma_region *
-afu_dma_region_find(struct dfl_feature_platform_data *pdata,
+afu_dma_region_find(struct dfl_feature_dev_data *fdata,
u64 iova, u64 size);
extern const struct dfl_feature_ops port_err_ops;
diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c
index 0b01b3895277..28b0f9d062ac 100644
--- a/drivers/fpga/dfl-fme-br.c
+++ b/drivers/fpga/dfl-fme-br.c
@@ -22,34 +22,34 @@
struct fme_br_priv {
struct dfl_fme_br_pdata *pdata;
struct dfl_fpga_port_ops *port_ops;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *port_fdata;
};
static int fme_bridge_enable_set(struct fpga_bridge *bridge, bool enable)
{
struct fme_br_priv *priv = bridge->priv;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *port_fdata;
struct dfl_fpga_port_ops *ops;
- if (!priv->port_pdev) {
- port_pdev = dfl_fpga_cdev_find_port(priv->pdata->cdev,
- &priv->pdata->port_id,
- dfl_fpga_check_port_id);
- if (!port_pdev)
+ if (!priv->port_fdata) {
+ port_fdata = dfl_fpga_cdev_find_port_data(priv->pdata->cdev,
+ &priv->pdata->port_id,
+ dfl_fpga_check_port_id);
+ if (!port_fdata)
return -ENODEV;
- priv->port_pdev = port_pdev;
+ priv->port_fdata = port_fdata;
}
- if (priv->port_pdev && !priv->port_ops) {
- ops = dfl_fpga_port_ops_get(priv->port_pdev);
+ if (priv->port_fdata && !priv->port_ops) {
+ ops = dfl_fpga_port_ops_get(priv->port_fdata);
if (!ops || !ops->enable_set)
return -ENOENT;
priv->port_ops = ops;
}
- return priv->port_ops->enable_set(priv->port_pdev, enable);
+ return priv->port_ops->enable_set(priv->port_fdata, enable);
}
static const struct fpga_bridge_ops fme_bridge_ops = {
@@ -85,18 +85,16 @@ static void fme_br_remove(struct platform_device *pdev)
fpga_bridge_unregister(br);
- if (priv->port_pdev)
- put_device(&priv->port_pdev->dev);
if (priv->port_ops)
dfl_fpga_port_ops_put(priv->port_ops);
}
static struct platform_driver fme_br_driver = {
- .driver = {
- .name = DFL_FPGA_FME_BRIDGE,
+ .driver = {
+ .name = DFL_FPGA_FME_BRIDGE,
},
- .probe = fme_br_probe,
- .remove_new = fme_br_remove,
+ .probe = fme_br_probe,
+ .remove = fme_br_remove,
};
module_platform_driver(fme_br_driver);
diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c
index 51c2892ec06d..f00d949efe69 100644
--- a/drivers/fpga/dfl-fme-error.c
+++ b/drivers/fpga/dfl-fme-error.c
@@ -42,15 +42,15 @@
static ssize_t pcie0_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + PCIE0_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -59,7 +59,7 @@ static ssize_t pcie0_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
int ret = 0;
u64 v, val;
@@ -67,9 +67,9 @@ static ssize_t pcie0_errors_store(struct device *dev,
if (kstrtou64(buf, 0, &val))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK);
v = readq(base + PCIE0_ERROR);
@@ -79,7 +79,7 @@ static ssize_t pcie0_errors_store(struct device *dev,
ret = -EINVAL;
writeq(0ULL, base + PCIE0_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(pcie0_errors);
@@ -87,15 +87,15 @@ static DEVICE_ATTR_RW(pcie0_errors);
static ssize_t pcie1_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + PCIE1_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -104,7 +104,7 @@ static ssize_t pcie1_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
int ret = 0;
u64 v, val;
@@ -112,9 +112,9 @@ static ssize_t pcie1_errors_store(struct device *dev,
if (kstrtou64(buf, 0, &val))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK);
v = readq(base + PCIE1_ERROR);
@@ -124,7 +124,7 @@ static ssize_t pcie1_errors_store(struct device *dev,
ret = -EINVAL;
writeq(0ULL, base + PCIE1_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(pcie1_errors);
@@ -132,9 +132,10 @@ static DEVICE_ATTR_RW(pcie1_errors);
static ssize_t nonfatal_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
return sprintf(buf, "0x%llx\n",
(unsigned long long)readq(base + RAS_NONFAT_ERROR));
@@ -144,9 +145,10 @@ static DEVICE_ATTR_RO(nonfatal_errors);
static ssize_t catfatal_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
return sprintf(buf, "0x%llx\n",
(unsigned long long)readq(base + RAS_CATFAT_ERROR));
@@ -156,15 +158,15 @@ static DEVICE_ATTR_RO(catfatal_errors);
static ssize_t inject_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + RAS_ERROR_INJECT);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n",
(unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v));
@@ -174,7 +176,7 @@ static ssize_t inject_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u8 inject_error;
u64 v;
@@ -185,14 +187,14 @@ static ssize_t inject_errors_store(struct device *dev,
if (inject_error & ~INJECT_ERROR_MASK)
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
v = readq(base + RAS_ERROR_INJECT);
v &= ~INJECT_ERROR_MASK;
v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error);
writeq(v, base + RAS_ERROR_INJECT);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return count;
}
@@ -201,15 +203,15 @@ static DEVICE_ATTR_RW(inject_errors);
static ssize_t fme_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + FME_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -218,7 +220,7 @@ static ssize_t fme_errors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v, val;
int ret = 0;
@@ -226,9 +228,9 @@ static ssize_t fme_errors_store(struct device *dev,
if (kstrtou64(buf, 0, &val))
return -EINVAL;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK);
v = readq(base + FME_ERROR);
@@ -240,7 +242,7 @@ static ssize_t fme_errors_store(struct device *dev,
/* Workaround: disable MBP_ERROR if feature revision is 0 */
writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR,
base + FME_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(fme_errors);
@@ -248,15 +250,15 @@ static DEVICE_ATTR_RW(fme_errors);
static ssize_t first_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + FME_FIRST_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -265,15 +267,15 @@ static DEVICE_ATTR_RO(first_error);
static ssize_t next_error_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 value;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
value = readq(base + FME_NEXT_ERROR);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return sprintf(buf, "0x%llx\n", (unsigned long long)value);
}
@@ -295,12 +297,14 @@ static umode_t fme_global_err_attrs_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature_dev_data *fdata;
+ fdata = to_dfl_feature_dev_data(dev);
/*
* sysfs entries are visible only if related private feature is
* enumerated.
*/
- if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR))
+ if (!dfl_get_feature_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR))
return 0;
return attr->mode;
@@ -314,12 +318,12 @@ const struct attribute_group fme_global_err_group = {
static void fme_err_mask(struct device *dev, bool mask)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_GLOBAL_ERR);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
/* Workaround: keep MBP_ERROR always masked if revision is 0 */
if (dfl_feature_revision(base))
@@ -332,7 +336,7 @@ static void fme_err_mask(struct device *dev, bool mask)
writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK);
writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
}
static int fme_global_err_init(struct platform_device *pdev,
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 864924f68f5e..8aca2fb20e87 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -28,10 +28,11 @@
static ssize_t ports_num_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -47,10 +48,11 @@ static DEVICE_ATTR_RO(ports_num);
static ssize_t bitstream_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_BITSTREAM_ID);
@@ -65,10 +67,11 @@ static DEVICE_ATTR_RO(bitstream_id);
static ssize_t bitstream_metadata_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_BITSTREAM_MD);
@@ -79,10 +82,11 @@ static DEVICE_ATTR_RO(bitstream_metadata);
static ssize_t cache_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -94,10 +98,11 @@ static DEVICE_ATTR_RO(cache_size);
static ssize_t fabric_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -109,10 +114,11 @@ static DEVICE_ATTR_RO(fabric_version);
static ssize_t socket_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_CAP);
@@ -135,10 +141,10 @@ static const struct attribute_group fme_hdr_group = {
.attrs = fme_hdr_attrs,
};
-static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
+static long fme_hdr_ioctl_release_port(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
- struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
+ struct dfl_fpga_cdev *cdev = fdata->dfl_cdev;
int port_id;
if (get_user(port_id, (int __user *)arg))
@@ -147,10 +153,10 @@ static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
return dfl_fpga_cdev_release_port(cdev, port_id);
}
-static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata,
+static long fme_hdr_ioctl_assign_port(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
- struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
+ struct dfl_fpga_cdev *cdev = fdata->dfl_cdev;
int port_id;
if (get_user(port_id, (int __user *)arg))
@@ -163,13 +169,13 @@ static long fme_hdr_ioctl(struct platform_device *pdev,
struct dfl_feature *feature,
unsigned int cmd, unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
switch (cmd) {
case DFL_FPGA_FME_PORT_RELEASE:
- return fme_hdr_ioctl_release_port(pdata, arg);
+ return fme_hdr_ioctl_release_port(fdata, arg);
case DFL_FPGA_FME_PORT_ASSIGN:
- return fme_hdr_ioctl_assign_port(pdata, arg);
+ return fme_hdr_ioctl_assign_port(fdata, arg);
}
return -ENODEV;
@@ -411,14 +417,14 @@ static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev->parent);
struct dfl_feature *feature = dev_get_drvdata(dev);
int ret = 0;
u64 v;
val = clamp_val(val / MICRO, 0, PWR_THRESHOLD_MAX);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
switch (attr) {
case hwmon_power_max:
@@ -438,7 +444,7 @@ static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
break;
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
@@ -589,7 +595,7 @@ static struct dfl_feature_driver fme_feature_drvs[] = {
},
};
-static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
+static long fme_ioctl_check_extension(struct dfl_feature_dev_data *fdata,
unsigned long arg)
{
/* No extension support for now */
@@ -598,49 +604,46 @@ static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
static int fme_open(struct inode *inode, struct file *filp)
{
- struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&fdev->dev);
+ struct dfl_feature_dev_data *fdata = dfl_fpga_inode_to_feature_dev_data(inode);
+ struct platform_device *fdev = fdata->dev;
int ret;
- if (WARN_ON(!pdata))
- return -ENODEV;
-
- mutex_lock(&pdata->lock);
- ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ mutex_lock(&fdata->lock);
+ ret = dfl_feature_dev_use_begin(fdata, filp->f_flags & O_EXCL);
if (!ret) {
dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
- dfl_feature_dev_use_count(pdata));
- filp->private_data = pdata;
+ dfl_feature_dev_use_count(fdata));
+ filp->private_data = fdata;
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
static int fme_release(struct inode *inode, struct file *filp)
{
- struct dfl_feature_platform_data *pdata = filp->private_data;
- struct platform_device *pdev = pdata->dev;
+ struct dfl_feature_dev_data *fdata = filp->private_data;
+ struct platform_device *pdev = fdata->dev;
struct dfl_feature *feature;
dev_dbg(&pdev->dev, "Device File Release\n");
- mutex_lock(&pdata->lock);
- dfl_feature_dev_use_end(pdata);
+ mutex_lock(&fdata->lock);
+ dfl_feature_dev_use_end(fdata);
- if (!dfl_feature_dev_use_count(pdata))
- dfl_fpga_dev_for_each_feature(pdata, feature)
+ if (!dfl_feature_dev_use_count(fdata))
+ dfl_fpga_dev_for_each_feature(fdata, feature)
dfl_fpga_set_irq_triggers(feature, 0,
feature->nr_irqs, NULL);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
}
static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = filp->private_data;
- struct platform_device *pdev = pdata->dev;
+ struct dfl_feature_dev_data *fdata = filp->private_data;
+ struct platform_device *pdev = fdata->dev;
struct dfl_feature *f;
long ret;
@@ -650,7 +653,7 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case DFL_FPGA_GET_API_VERSION:
return DFL_FPGA_API_VERSION;
case DFL_FPGA_CHECK_EXTENSION:
- return fme_ioctl_check_extension(pdata, arg);
+ return fme_ioctl_check_extension(fdata, arg);
default:
/*
* Let sub-feature's ioctl function to handle the cmd.
@@ -658,7 +661,7 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
* handled in this sub feature, and returns 0 or other
* error code if cmd is handled.
*/
- dfl_fpga_dev_for_each_feature(pdata, f) {
+ dfl_fpga_dev_for_each_feature(fdata, f) {
if (f->ops && f->ops->ioctl) {
ret = f->ops->ioctl(pdev, f, cmd, arg);
if (ret != -ENODEV)
@@ -672,27 +675,27 @@ static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static int fme_dev_init(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_fme *fme;
fme = devm_kzalloc(&pdev->dev, sizeof(*fme), GFP_KERNEL);
if (!fme)
return -ENOMEM;
- mutex_lock(&pdata->lock);
- dfl_fpga_pdata_set_private(pdata, fme);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_fpga_fdata_set_private(fdata, fme);
+ mutex_unlock(&fdata->lock);
return 0;
}
static void fme_dev_destroy(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
- dfl_fpga_pdata_set_private(pdata, NULL);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_fpga_fdata_set_private(fdata, NULL);
+ mutex_unlock(&fdata->lock);
}
static const struct file_operations fme_fops = {
@@ -742,12 +745,12 @@ static const struct attribute_group *fme_dev_groups[] = {
};
static struct platform_driver fme_driver = {
- .driver = {
- .name = DFL_FPGA_FEATURE_DEV_FME,
+ .driver = {
+ .name = DFL_FPGA_FEATURE_DEV_FME,
.dev_groups = fme_dev_groups,
},
- .probe = fme_probe,
- .remove_new = fme_remove,
+ .probe = fme_probe,
+ .remove = fme_remove,
};
module_platform_driver(fme_driver);
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index cdcf6dea4cc9..b878b260af38 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -65,7 +65,7 @@ static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id)
static int fme_pr(struct platform_device *pdev, unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
void __user *argp = (void __user *)arg;
struct dfl_fpga_fme_port_pr port_pr;
struct fpga_image_info *info;
@@ -87,8 +87,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
return -EINVAL;
/* get fme header region */
- fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
- FME_FEATURE_ID_HEADER);
+ fme_hdr = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
/* check port id */
v = readq(fme_hdr + FME_HDR_CAP);
@@ -123,8 +122,8 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
- mutex_lock(&pdata->lock);
- fme = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ fme = dfl_fpga_fdata_get_private(fdata);
/* fme device has been unregistered. */
if (!fme) {
ret = -EINVAL;
@@ -156,7 +155,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg)
put_device(&region->dev);
unlock_exit:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
free_exit:
vfree(buf);
return ret;
@@ -164,16 +163,16 @@ free_exit:
/**
* dfl_fme_create_mgr - create fpga mgr platform device as child device
+ * @fdata: fme feature dev data
* @feature: sub feature info
- * @pdata: fme platform_device's pdata
*
* Return: mgr platform device if successful, and error code otherwise.
*/
static struct platform_device *
-dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata,
+dfl_fme_create_mgr(struct dfl_feature_dev_data *fdata,
struct dfl_feature *feature)
{
- struct platform_device *mgr, *fme = pdata->dev;
+ struct platform_device *mgr, *fme = fdata->dev;
struct dfl_fme_mgr_pdata mgr_pdata;
int ret = -ENOMEM;
@@ -209,11 +208,11 @@ create_mgr_err:
/**
* dfl_fme_destroy_mgr - destroy fpga mgr platform device
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
*/
-static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
+static void dfl_fme_destroy_mgr(struct dfl_feature_dev_data *fdata)
{
- struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
platform_device_unregister(priv->mgr);
}
@@ -221,15 +220,15 @@ static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
/**
* dfl_fme_create_bridge - create fme fpga bridge platform device as child
*
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
* @port_id: port id for the bridge to be created.
*
* Return: bridge platform device if successful, and error code otherwise.
*/
static struct dfl_fme_bridge *
-dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
+dfl_fme_create_bridge(struct dfl_feature_dev_data *fdata, int port_id)
{
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
struct dfl_fme_br_pdata br_pdata;
struct dfl_fme_bridge *fme_br;
int ret = -ENOMEM;
@@ -238,7 +237,7 @@ dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
if (!fme_br)
return ERR_PTR(ret);
- br_pdata.cdev = pdata->dfl_cdev;
+ br_pdata.cdev = fdata->dfl_cdev;
br_pdata.port_id = port_id;
fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE,
@@ -274,11 +273,11 @@ static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
/**
* dfl_fme_destroy_bridges - destroy all fpga bridge platform device
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
*/
-static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
+static void dfl_fme_destroy_bridges(struct dfl_feature_dev_data *fdata)
{
- struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
struct dfl_fme_bridge *fbridge, *tmp;
list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
@@ -290,7 +289,7 @@ static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
/**
* dfl_fme_create_region - create fpga region platform device as child
*
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
* @mgr: mgr platform device needed for region
* @br: br platform device needed for region
* @port_id: port id
@@ -298,12 +297,12 @@ static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
* Return: fme region if successful, and error code otherwise.
*/
static struct dfl_fme_region *
-dfl_fme_create_region(struct dfl_feature_platform_data *pdata,
+dfl_fme_create_region(struct dfl_feature_dev_data *fdata,
struct platform_device *mgr,
struct platform_device *br, int port_id)
{
struct dfl_fme_region_pdata region_pdata;
- struct device *dev = &pdata->dev->dev;
+ struct device *dev = &fdata->dev->dev;
struct dfl_fme_region *fme_region;
int ret = -ENOMEM;
@@ -353,11 +352,11 @@ static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region)
/**
* dfl_fme_destroy_regions - destroy all fme regions
- * @pdata: fme platform device's pdata
+ * @fdata: fme feature dev data
*/
-static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
+static void dfl_fme_destroy_regions(struct dfl_feature_dev_data *fdata)
{
- struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
+ struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
struct dfl_fme_region *fme_region, *tmp;
list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
@@ -369,7 +368,7 @@ static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
static int pr_mgmt_init(struct platform_device *pdev,
struct dfl_feature *feature)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_fme_region *fme_region;
struct dfl_fme_bridge *fme_br;
struct platform_device *mgr;
@@ -378,18 +377,17 @@ static int pr_mgmt_init(struct platform_device *pdev,
int ret = -ENODEV, i = 0;
u64 fme_cap, port_offset;
- fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
- FME_FEATURE_ID_HEADER);
+ fme_hdr = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
- mutex_lock(&pdata->lock);
- priv = dfl_fpga_pdata_get_private(pdata);
+ mutex_lock(&fdata->lock);
+ priv = dfl_fpga_fdata_get_private(fdata);
/* Initialize the region and bridge sub device list */
INIT_LIST_HEAD(&priv->region_list);
INIT_LIST_HEAD(&priv->bridge_list);
/* Create fpga mgr platform device */
- mgr = dfl_fme_create_mgr(pdata, feature);
+ mgr = dfl_fme_create_mgr(fdata, feature);
if (IS_ERR(mgr)) {
dev_err(&pdev->dev, "fail to create fpga mgr pdev\n");
goto unlock;
@@ -405,7 +403,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
continue;
/* Create bridge for each port */
- fme_br = dfl_fme_create_bridge(pdata, i);
+ fme_br = dfl_fme_create_bridge(fdata, i);
if (IS_ERR(fme_br)) {
ret = PTR_ERR(fme_br);
goto destroy_region;
@@ -414,7 +412,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
list_add(&fme_br->node, &priv->bridge_list);
/* Create region for each port */
- fme_region = dfl_fme_create_region(pdata, mgr,
+ fme_region = dfl_fme_create_region(fdata, mgr,
fme_br->br, i);
if (IS_ERR(fme_region)) {
ret = PTR_ERR(fme_region);
@@ -423,30 +421,30 @@ static int pr_mgmt_init(struct platform_device *pdev,
list_add(&fme_region->node, &priv->region_list);
}
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return 0;
destroy_region:
- dfl_fme_destroy_regions(pdata);
- dfl_fme_destroy_bridges(pdata);
- dfl_fme_destroy_mgr(pdata);
+ dfl_fme_destroy_regions(fdata);
+ dfl_fme_destroy_bridges(fdata);
+ dfl_fme_destroy_mgr(fdata);
unlock:
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
return ret;
}
static void pr_mgmt_uinit(struct platform_device *pdev,
struct dfl_feature *feature)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
- dfl_fme_destroy_regions(pdata);
- dfl_fme_destroy_bridges(pdata);
- dfl_fme_destroy_mgr(pdata);
- mutex_unlock(&pdata->lock);
+ dfl_fme_destroy_regions(fdata);
+ dfl_fme_destroy_bridges(fdata);
+ dfl_fme_destroy_mgr(fdata);
+ mutex_unlock(&fdata->lock);
}
static long fme_pr_ioctl(struct platform_device *pdev,
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
index 71616f8b4982..c6cd63063c82 100644
--- a/drivers/fpga/dfl-fme-region.c
+++ b/drivers/fpga/dfl-fme-region.c
@@ -71,11 +71,11 @@ static void fme_region_remove(struct platform_device *pdev)
}
static struct platform_driver fme_region_driver = {
- .driver = {
- .name = DFL_FPGA_FME_REGION,
+ .driver = {
+ .name = DFL_FPGA_FME_REGION,
},
- .probe = fme_region_probe,
- .remove_new = fme_region_remove,
+ .probe = fme_region_probe,
+ .remove = fme_region_remove,
};
module_platform_driver(fme_region_driver);
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index 80cac3a5f976..602807d6afcc 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -39,14 +39,6 @@ struct cci_drvdata {
struct dfl_fpga_cdev *cdev; /* container device */
};
-static void __iomem *cci_pci_ioremap_bar0(struct pci_dev *pcidev)
-{
- if (pcim_iomap_regions(pcidev, BIT(0), DRV_NAME))
- return NULL;
-
- return pcim_iomap_table(pcidev)[0];
-}
-
static int cci_pci_alloc_irq(struct pci_dev *pcidev)
{
int ret, nvec = pci_msix_vec_count(pcidev);
@@ -235,9 +227,9 @@ static int find_dfls_by_default(struct pci_dev *pcidev,
u64 v;
/* start to find Device Feature List from Bar 0 */
- base = cci_pci_ioremap_bar0(pcidev);
- if (!base)
- return -ENOMEM;
+ base = pcim_iomap_region(pcidev, 0, DRV_NAME);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
/*
* PF device has FME and Ports/AFUs, and VF device only has one
@@ -296,7 +288,7 @@ static int find_dfls_by_default(struct pci_dev *pcidev,
}
/* release I/O mappings for next step enumeration */
- pcim_iounmap_regions(pcidev, BIT(0));
+ pcim_iounmap_region(pcidev, 0);
return ret;
}
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index c406b949026f..7022657243c0 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -119,17 +119,6 @@ static void dfl_id_free(enum dfl_id_type type, int id)
mutex_unlock(&dfl_id_mutex);
}
-static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
- if (!strcmp(dfl_devs[i].name, pdev->name))
- return i;
-
- return DFL_ID_MAX;
-}
-
static enum dfl_id_type dfh_id_to_type(u16 id)
{
int i;
@@ -156,12 +145,12 @@ static LIST_HEAD(dfl_port_ops_list);
/**
* dfl_fpga_port_ops_get - get matched port ops from the global list
- * @pdev: platform device to match with associated port ops.
+ * @fdata: feature dev data to match with associated port ops.
* Return: matched port ops on success, NULL otherwise.
*
* Please note that must dfl_fpga_port_ops_put after use the port_ops.
*/
-struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
+struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct dfl_feature_dev_data *fdata)
{
struct dfl_fpga_port_ops *ops = NULL;
@@ -171,7 +160,7 @@ struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
list_for_each_entry(ops, &dfl_port_ops_list, node) {
/* match port_ops using the name of platform device */
- if (!strcmp(pdev->name, ops->name)) {
+ if (!strcmp(fdata->pdev_name, ops->name)) {
if (!try_module_get(ops->owner))
ops = NULL;
goto done;
@@ -222,27 +211,26 @@ EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
/**
* dfl_fpga_check_port_id - check the port id
- * @pdev: port platform device.
+ * @fdata: port feature dev data.
* @pport_id: port id to compare.
*
* Return: 1 if port device matches with given port id, otherwise 0.
*/
-int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
+int dfl_fpga_check_port_id(struct dfl_feature_dev_data *fdata, void *pport_id)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct dfl_fpga_port_ops *port_ops;
- if (pdata->id != FEATURE_DEV_ID_UNUSED)
- return pdata->id == *(int *)pport_id;
+ if (fdata->id != FEATURE_DEV_ID_UNUSED)
+ return fdata->id == *(int *)pport_id;
- port_ops = dfl_fpga_port_ops_get(pdev);
+ port_ops = dfl_fpga_port_ops_get(fdata);
if (!port_ops || !port_ops->get_id)
return 0;
- pdata->id = port_ops->get_id(pdev);
+ fdata->id = port_ops->get_id(fdata);
dfl_fpga_port_ops_put(port_ops);
- return pdata->id == *(int *)pport_id;
+ return fdata->id == *(int *)pport_id;
}
EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
@@ -351,10 +339,10 @@ static void release_dfl_dev(struct device *dev)
}
static struct dfl_device *
-dfl_dev_add(struct dfl_feature_platform_data *pdata,
+dfl_dev_add(struct dfl_feature_dev_data *fdata,
struct dfl_feature *feature)
{
- struct platform_device *pdev = pdata->dev;
+ struct platform_device *pdev = fdata->dev;
struct resource *parent_res;
struct dfl_device *ddev;
int id, i, ret;
@@ -380,11 +368,11 @@ dfl_dev_add(struct dfl_feature_platform_data *pdata,
if (ret)
goto put_dev;
- ddev->type = feature_dev_id_type(pdev);
+ ddev->type = fdata->type;
ddev->feature_id = feature->id;
ddev->revision = feature->revision;
ddev->dfh_version = feature->dfh_version;
- ddev->cdev = pdata->dfl_cdev;
+ ddev->cdev = fdata->dfl_cdev;
if (feature->param_size) {
ddev->params = kmemdup(feature->params, feature->param_size, GFP_KERNEL);
if (!ddev->params) {
@@ -435,11 +423,11 @@ put_dev:
return ERR_PTR(ret);
}
-static void dfl_devs_remove(struct dfl_feature_platform_data *pdata)
+static void dfl_devs_remove(struct dfl_feature_dev_data *fdata)
{
struct dfl_feature *feature;
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (feature->ddev) {
device_unregister(&feature->ddev->dev);
feature->ddev = NULL;
@@ -447,13 +435,13 @@ static void dfl_devs_remove(struct dfl_feature_platform_data *pdata)
}
}
-static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
+static int dfl_devs_add(struct dfl_feature_dev_data *fdata)
{
struct dfl_feature *feature;
struct dfl_device *ddev;
int ret;
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (feature->ioaddr)
continue;
@@ -462,7 +450,7 @@ static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
goto err;
}
- ddev = dfl_dev_add(pdata, feature);
+ ddev = dfl_dev_add(fdata, feature);
if (IS_ERR(ddev)) {
ret = PTR_ERR(ddev);
goto err;
@@ -474,7 +462,7 @@ static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
return 0;
err:
- dfl_devs_remove(pdata);
+ dfl_devs_remove(fdata);
return ret;
}
@@ -504,12 +492,12 @@ EXPORT_SYMBOL(dfl_driver_unregister);
*/
void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_feature *feature;
- dfl_devs_remove(pdata);
+ dfl_devs_remove(fdata);
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (feature->ops) {
if (feature->ops->uinit)
feature->ops->uinit(pdev, feature);
@@ -520,7 +508,6 @@ void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
static int dfl_feature_instance_init(struct platform_device *pdev,
- struct dfl_feature_platform_data *pdata,
struct dfl_feature *feature,
struct dfl_feature_driver *drv)
{
@@ -579,16 +566,15 @@ static bool dfl_feature_drv_match(struct dfl_feature *feature,
int dfl_fpga_dev_feature_init(struct platform_device *pdev,
struct dfl_feature_driver *feature_drvs)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_feature_driver *drv = feature_drvs;
struct dfl_feature *feature;
int ret;
while (drv->ops) {
- dfl_fpga_dev_for_each_feature(pdata, feature) {
+ dfl_fpga_dev_for_each_feature(fdata, feature) {
if (dfl_feature_drv_match(feature, drv)) {
- ret = dfl_feature_instance_init(pdev, pdata,
- feature, drv);
+ ret = dfl_feature_instance_init(pdev, feature, drv);
if (ret)
goto exit;
}
@@ -596,7 +582,7 @@ int dfl_fpga_dev_feature_init(struct platform_device *pdev,
drv++;
}
- ret = dfl_devs_add(pdata);
+ ret = dfl_devs_add(fdata);
if (ret)
goto exit;
@@ -695,7 +681,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
* @nr_irqs: number of irqs for all feature devices.
* @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
* this device.
- * @feature_dev: current feature device.
+ * @type: the current FIU type.
* @ioaddr: header register region address of current FIU in enumeration.
* @start: register resource start of current FIU.
* @len: max register resource length of current FIU.
@@ -708,7 +694,7 @@ struct build_feature_devs_info {
unsigned int nr_irqs;
int *irq_table;
- struct platform_device *feature_dev;
+ enum dfl_id_type type;
void __iomem *ioaddr;
resource_size_t start;
resource_size_t len;
@@ -743,50 +729,62 @@ struct dfl_feature_info {
u64 params[];
};
-static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
- struct platform_device *port)
+static void dfl_fpga_cdev_add_port_data(struct dfl_fpga_cdev *cdev,
+ struct dfl_feature_dev_data *fdata)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
-
mutex_lock(&cdev->lock);
- list_add(&pdata->node, &cdev->port_dev_list);
- get_device(&pdata->dev->dev);
+ list_add(&fdata->node, &cdev->port_dev_list);
mutex_unlock(&cdev->lock);
}
-/*
- * register current feature device, it is called when we need to switch to
- * another feature parsing or we have parsed all features on given device
- * feature list.
- */
-static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+static void dfl_id_free_action(void *arg)
+{
+ struct dfl_feature_dev_data *fdata = arg;
+
+ dfl_id_free(fdata->type, fdata->pdev_id);
+}
+
+static struct dfl_feature_dev_data *
+binfo_create_feature_dev_data(struct build_feature_devs_info *binfo)
{
- struct platform_device *fdev = binfo->feature_dev;
- struct dfl_feature_platform_data *pdata;
+ enum dfl_id_type type = binfo->type;
struct dfl_feature_info *finfo, *p;
- enum dfl_id_type type;
+ struct dfl_feature_dev_data *fdata;
int ret, index = 0, res_idx = 0;
- type = feature_dev_id_type(fdev);
if (WARN_ON_ONCE(type >= DFL_ID_MAX))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
- /*
- * we do not need to care for the memory which is associated with
- * the platform device. After calling platform_device_unregister(),
- * it will be automatically freed by device's release() callback,
- * platform_device_release().
- */
- pdata = kzalloc(struct_size(pdata, features, binfo->feature_num), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ fdata = devm_kzalloc(binfo->dev, sizeof(*fdata), GFP_KERNEL);
+ if (!fdata)
+ return ERR_PTR(-ENOMEM);
+
+ fdata->features = devm_kcalloc(binfo->dev, binfo->feature_num,
+ sizeof(*fdata->features), GFP_KERNEL);
+ if (!fdata->features)
+ return ERR_PTR(-ENOMEM);
+
+ fdata->resources = devm_kcalloc(binfo->dev, binfo->feature_num,
+ sizeof(*fdata->resources), GFP_KERNEL);
+ if (!fdata->resources)
+ return ERR_PTR(-ENOMEM);
+
+ fdata->type = type;
- pdata->dev = fdev;
- pdata->num = binfo->feature_num;
- pdata->dfl_cdev = binfo->cdev;
- pdata->id = FEATURE_DEV_ID_UNUSED;
- mutex_init(&pdata->lock);
- lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
+ fdata->pdev_id = dfl_id_alloc(type, binfo->dev);
+ if (fdata->pdev_id < 0)
+ return ERR_PTR(fdata->pdev_id);
+
+ ret = devm_add_action_or_reset(binfo->dev, dfl_id_free_action, fdata);
+ if (ret)
+ return ERR_PTR(ret);
+
+ fdata->pdev_name = dfl_devs[type].name;
+ fdata->num = binfo->feature_num;
+ fdata->dfl_cdev = binfo->cdev;
+ fdata->id = FEATURE_DEV_ID_UNUSED;
+ mutex_init(&fdata->lock);
+ lockdep_set_class_and_name(&fdata->lock, &dfl_pdata_keys[type],
dfl_pdata_key_strings[type]);
/*
@@ -795,25 +793,15 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
* works properly for port device.
* and it should always be 0 for fme device.
*/
- WARN_ON(pdata->disable_count);
-
- fdev->dev.platform_data = pdata;
-
- /* each sub feature has one MMIO resource */
- fdev->num_resources = binfo->feature_num;
- fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
- GFP_KERNEL);
- if (!fdev->resource)
- return -ENOMEM;
+ WARN_ON(fdata->disable_count);
/* fill features and resource information for feature dev */
list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
- struct dfl_feature *feature = &pdata->features[index++];
+ struct dfl_feature *feature = &fdata->features[index++];
struct dfl_feature_irq_ctx *ctx;
unsigned int i;
/* save resource information for each feature */
- feature->dev = fdev;
feature->id = finfo->fid;
feature->revision = finfo->revision;
feature->dfh_version = finfo->dfh_version;
@@ -823,7 +811,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
finfo->params, finfo->param_size,
GFP_KERNEL);
if (!feature->params)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
feature->param_size = finfo->param_size;
}
@@ -840,17 +828,17 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
devm_ioremap_resource(binfo->dev,
&finfo->mmio_res);
if (IS_ERR(feature->ioaddr))
- return PTR_ERR(feature->ioaddr);
+ return ERR_CAST(feature->ioaddr);
} else {
feature->resource_index = res_idx;
- fdev->resource[res_idx++] = finfo->mmio_res;
+ fdata->resources[res_idx++] = finfo->mmio_res;
}
if (finfo->nr_irqs) {
ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs,
sizeof(*ctx), GFP_KERNEL);
if (!ctx)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
for (i = 0; i < finfo->nr_irqs; i++)
ctx[i].irq =
@@ -864,55 +852,94 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
kfree(finfo);
}
- ret = platform_device_add(binfo->feature_dev);
- if (!ret) {
- if (type == PORT_ID)
- dfl_fpga_cdev_add_port_dev(binfo->cdev,
- binfo->feature_dev);
- else
- binfo->cdev->fme_dev =
- get_device(&binfo->feature_dev->dev);
- /*
- * reset it to avoid build_info_free() freeing their resource.
- *
- * The resource of successfully registered feature devices
- * will be freed by platform_device_unregister(). See the
- * comments in build_info_create_dev().
- */
- binfo->feature_dev = NULL;
- }
+ fdata->resource_num = res_idx;
- return ret;
+ return fdata;
}
-static int
-build_info_create_dev(struct build_feature_devs_info *binfo,
- enum dfl_id_type type)
+/*
+ * register current feature device, it is called when we need to switch to
+ * another feature parsing or we have parsed all features on given device
+ * feature list.
+ */
+static int feature_dev_register(struct dfl_feature_dev_data *fdata)
{
+ struct dfl_feature_platform_data pdata = {};
struct platform_device *fdev;
+ struct dfl_feature *feature;
+ int ret;
- if (type >= DFL_ID_MAX)
- return -EINVAL;
-
- /*
- * we use -ENODEV as the initialization indicator which indicates
- * whether the id need to be reclaimed
- */
- fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
+ fdev = platform_device_alloc(fdata->pdev_name, fdata->pdev_id);
if (!fdev)
return -ENOMEM;
- binfo->feature_dev = fdev;
- binfo->feature_num = 0;
+ fdata->dev = fdev;
- INIT_LIST_HEAD(&binfo->sub_features);
+ fdev->dev.parent = &fdata->dfl_cdev->region->dev;
+ fdev->dev.devt = dfl_get_devt(dfl_devs[fdata->type].devt_type, fdev->id);
- fdev->id = dfl_id_alloc(type, &fdev->dev);
- if (fdev->id < 0)
- return fdev->id;
+ dfl_fpga_dev_for_each_feature(fdata, feature)
+ feature->dev = fdev;
+
+ ret = platform_device_add_resources(fdev, fdata->resources,
+ fdata->resource_num);
+ if (ret)
+ goto err_put_dev;
+
+ pdata.fdata = fdata;
+ ret = platform_device_add_data(fdev, &pdata, sizeof(pdata));
+ if (ret)
+ goto err_put_dev;
+
+ ret = platform_device_add(fdev);
+ if (ret)
+ goto err_put_dev;
+
+ return 0;
+
+err_put_dev:
+ platform_device_put(fdev);
+
+ fdata->dev = NULL;
+
+ dfl_fpga_dev_for_each_feature(fdata, feature)
+ feature->dev = NULL;
+
+ return ret;
+}
+
+static void feature_dev_unregister(struct dfl_feature_dev_data *fdata)
+{
+ struct dfl_feature *feature;
+
+ platform_device_unregister(fdata->dev);
+
+ fdata->dev = NULL;
+
+ dfl_fpga_dev_for_each_feature(fdata, feature)
+ feature->dev = NULL;
+}
+
+static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+{
+ struct dfl_feature_dev_data *fdata;
+ int ret;
+
+ fdata = binfo_create_feature_dev_data(binfo);
+ if (IS_ERR(fdata))
+ return PTR_ERR(fdata);
+
+ ret = feature_dev_register(fdata);
+ if (ret)
+ return ret;
+
+ if (binfo->type == PORT_ID)
+ dfl_fpga_cdev_add_port_data(binfo->cdev, fdata);
+ else
+ binfo->cdev->fme_dev = get_device(&fdata->dev->dev);
- fdev->dev.parent = &binfo->cdev->region->dev;
- fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
+ /* reset the binfo for next FIU */
+ binfo->type = DFL_ID_MAX;
return 0;
}
@@ -921,22 +948,11 @@ static void build_info_free(struct build_feature_devs_info *binfo)
{
struct dfl_feature_info *finfo, *p;
- /*
- * it is a valid id, free it. See comments in
- * build_info_create_dev()
- */
- if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
- dfl_id_free(feature_dev_id_type(binfo->feature_dev),
- binfo->feature_dev->id);
-
- list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
- list_del(&finfo->node);
- kfree(finfo);
- }
+ list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
+ list_del(&finfo->node);
+ kfree(finfo);
}
- platform_device_put(binfo->feature_dev);
-
devm_kfree(binfo->dev, binfo);
}
@@ -1025,7 +1041,7 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo,
* Instead, features with interrupt functionality provide
* the information in feature specific registers.
*/
- type = feature_dev_id_type(binfo->feature_dev);
+ type = binfo->type;
if (type == PORT_ID) {
switch (fid) {
case PORT_FEATURE_ID_UINT:
@@ -1217,7 +1233,7 @@ static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
return create_feature_instance(binfo, ofst, size, FEATURE_ID_AFU);
}
-#define is_feature_dev_detected(binfo) (!!(binfo)->feature_dev)
+#define is_feature_dev_detected(binfo) ((binfo)->type != DFL_ID_MAX)
static int parse_feature_afu(struct build_feature_devs_info *binfo,
resource_size_t ofst)
@@ -1227,12 +1243,11 @@ static int parse_feature_afu(struct build_feature_devs_info *binfo,
return -EINVAL;
}
- switch (feature_dev_id_type(binfo->feature_dev)) {
+ switch (binfo->type) {
case PORT_ID:
return parse_feature_port_afu(binfo, ofst);
default:
- dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
- binfo->feature_dev->name);
+ dev_info(binfo->dev, "AFU belonging to FIU is not supported yet.\n");
}
return 0;
@@ -1273,6 +1288,7 @@ static void build_info_complete(struct build_feature_devs_info *binfo)
static int parse_feature_fiu(struct build_feature_devs_info *binfo,
resource_size_t ofst)
{
+ enum dfl_id_type type;
int ret = 0;
u32 offset;
u16 id;
@@ -1294,10 +1310,13 @@ static int parse_feature_fiu(struct build_feature_devs_info *binfo,
v = readq(binfo->ioaddr + DFH);
id = FIELD_GET(DFH_ID, v);
- /* create platform device for dfl feature dev */
- ret = build_info_create_dev(binfo, dfh_id_to_type(id));
- if (ret)
- return ret;
+ type = dfh_id_to_type(id);
+ if (type >= DFL_ID_MAX)
+ return -EINVAL;
+
+ binfo->type = type;
+ binfo->feature_num = 0;
+ INIT_LIST_HEAD(&binfo->sub_features);
ret = create_feature_instance(binfo, 0, 0, 0);
if (ret)
@@ -1515,13 +1534,9 @@ EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq);
static int remove_feature_dev(struct device *dev, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
- enum dfl_id_type type = feature_dev_id_type(pdev);
- int id = pdev->id;
-
- platform_device_unregister(pdev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
- dfl_id_free(type, id);
+ feature_dev_unregister(fdata);
return 0;
}
@@ -1573,6 +1588,7 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
goto unregister_region_exit;
}
+ binfo->type = DFL_ID_MAX;
binfo->dev = info->dev;
binfo->cdev = cdev;
@@ -1614,25 +1630,10 @@ EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
*/
void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
{
- struct dfl_feature_platform_data *pdata, *ptmp;
-
mutex_lock(&cdev->lock);
if (cdev->fme_dev)
put_device(cdev->fme_dev);
- list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
- struct platform_device *port_dev = pdata->dev;
-
- /* remove released ports */
- if (!device_is_registered(&port_dev->dev)) {
- dfl_id_free(feature_dev_id_type(port_dev),
- port_dev->id);
- platform_device_put(port_dev);
- }
-
- list_del(&pdata->node);
- put_device(&port_dev->dev);
- }
mutex_unlock(&cdev->lock);
remove_feature_devs(cdev);
@@ -1643,7 +1644,7 @@ void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
/**
- * __dfl_fpga_cdev_find_port - find a port under given container device
+ * __dfl_fpga_cdev_find_port_data - find a port under given container device
*
* @cdev: container device
* @data: data passed to match function
@@ -1656,23 +1657,20 @@ EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
*
* NOTE: you will need to drop the device reference with put_device() after use.
*/
-struct platform_device *
-__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
- int (*match)(struct platform_device *, void *))
+struct dfl_feature_dev_data *
+__dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct dfl_feature_dev_data *, void *))
{
- struct dfl_feature_platform_data *pdata;
- struct platform_device *port_dev;
-
- list_for_each_entry(pdata, &cdev->port_dev_list, node) {
- port_dev = pdata->dev;
+ struct dfl_feature_dev_data *fdata;
- if (match(port_dev, data) && get_device(&port_dev->dev))
- return port_dev;
+ list_for_each_entry(fdata, &cdev->port_dev_list, node) {
+ if (match(fdata, data))
+ return fdata;
}
return NULL;
}
-EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
+EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port_data);
static int __init dfl_fpga_init(void)
{
@@ -1706,33 +1704,28 @@ static int __init dfl_fpga_init(void)
*/
int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
{
- struct dfl_feature_platform_data *pdata;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *fdata;
int ret = -ENODEV;
mutex_lock(&cdev->lock);
- port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
- dfl_fpga_check_port_id);
- if (!port_pdev)
+ fdata = __dfl_fpga_cdev_find_port_data(cdev, &port_id,
+ dfl_fpga_check_port_id);
+ if (!fdata)
goto unlock_exit;
- if (!device_is_registered(&port_pdev->dev)) {
+ if (!fdata->dev) {
ret = -EBUSY;
- goto put_dev_exit;
+ goto unlock_exit;
}
- pdata = dev_get_platdata(&port_pdev->dev);
-
- mutex_lock(&pdata->lock);
- ret = dfl_feature_dev_use_begin(pdata, true);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ ret = dfl_feature_dev_use_begin(fdata, true);
+ mutex_unlock(&fdata->lock);
if (ret)
- goto put_dev_exit;
+ goto unlock_exit;
- platform_device_del(port_pdev);
+ feature_dev_unregister(fdata);
cdev->released_port_num++;
-put_dev_exit:
- put_device(&port_pdev->dev);
unlock_exit:
mutex_unlock(&cdev->lock);
return ret;
@@ -1752,34 +1745,29 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
*/
int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
{
- struct dfl_feature_platform_data *pdata;
- struct platform_device *port_pdev;
+ struct dfl_feature_dev_data *fdata;
int ret = -ENODEV;
mutex_lock(&cdev->lock);
- port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
- dfl_fpga_check_port_id);
- if (!port_pdev)
+ fdata = __dfl_fpga_cdev_find_port_data(cdev, &port_id,
+ dfl_fpga_check_port_id);
+ if (!fdata)
goto unlock_exit;
- if (device_is_registered(&port_pdev->dev)) {
+ if (fdata->dev) {
ret = -EBUSY;
- goto put_dev_exit;
+ goto unlock_exit;
}
- ret = platform_device_add(port_pdev);
+ ret = feature_dev_register(fdata);
if (ret)
- goto put_dev_exit;
-
- pdata = dev_get_platdata(&port_pdev->dev);
+ goto unlock_exit;
- mutex_lock(&pdata->lock);
- dfl_feature_dev_use_end(pdata);
- mutex_unlock(&pdata->lock);
+ mutex_lock(&fdata->lock);
+ dfl_feature_dev_use_end(fdata);
+ mutex_unlock(&fdata->lock);
cdev->released_port_num--;
-put_dev_exit:
- put_device(&port_pdev->dev);
unlock_exit:
mutex_unlock(&cdev->lock);
return ret;
@@ -1789,10 +1777,11 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port);
static void config_port_access_mode(struct device *fme_dev, int port_id,
bool is_vf)
{
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(fme_dev);
void __iomem *base;
u64 v;
- base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER);
+ base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
v = readq(base + FME_HDR_PORT_OFST(port_id));
@@ -1816,14 +1805,14 @@ static void config_port_access_mode(struct device *fme_dev, int port_id,
*/
void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev)
{
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
mutex_lock(&cdev->lock);
- list_for_each_entry(pdata, &cdev->port_dev_list, node) {
- if (device_is_registered(&pdata->dev->dev))
+ list_for_each_entry(fdata, &cdev->port_dev_list, node) {
+ if (fdata->dev)
continue;
- config_port_pf_mode(cdev->fme_dev, pdata->id);
+ config_port_pf_mode(cdev->fme_dev, fdata->id);
}
mutex_unlock(&cdev->lock);
}
@@ -1842,7 +1831,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf);
*/
int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
{
- struct dfl_feature_platform_data *pdata;
+ struct dfl_feature_dev_data *fdata;
int ret = 0;
mutex_lock(&cdev->lock);
@@ -1856,11 +1845,11 @@ int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
goto done;
}
- list_for_each_entry(pdata, &cdev->port_dev_list, node) {
- if (device_is_registered(&pdata->dev->dev))
+ list_for_each_entry(fdata, &cdev->port_dev_list, node) {
+ if (fdata->dev)
continue;
- config_port_vf_mode(cdev->fme_dev, pdata->id);
+ config_port_vf_mode(cdev->fme_dev, fdata->id);
}
done:
mutex_unlock(&cdev->lock);
@@ -1993,7 +1982,7 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
struct dfl_feature *feature,
unsigned long arg)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
struct dfl_fpga_irq_set hdr;
s32 *fds;
long ret;
@@ -2013,9 +2002,9 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
if (IS_ERR(fds))
return PTR_ERR(fds);
- mutex_lock(&pdata->lock);
+ mutex_lock(&fdata->lock);
ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds);
- mutex_unlock(&pdata->lock);
+ mutex_unlock(&fdata->lock);
kfree(fds);
return ret;
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 5063d73b0d82..95539f1213cb 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -17,6 +17,7 @@
#include <linux/bitfield.h>
#include <linux/cdev.h>
#include <linux/delay.h>
+#include <linux/dfl.h>
#include <linux/eventfd.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
@@ -206,6 +207,8 @@
#define PORT_UINT_CAP_INT_NUM GENMASK_ULL(11, 0) /* Interrupts num */
#define PORT_UINT_CAP_FST_VECT GENMASK_ULL(23, 12) /* First Vector */
+struct dfl_feature_dev_data;
+
/**
* struct dfl_fpga_port_ops - port ops
*
@@ -219,15 +222,15 @@ struct dfl_fpga_port_ops {
const char *name;
struct module *owner;
struct list_head node;
- int (*get_id)(struct platform_device *pdev);
- int (*enable_set)(struct platform_device *pdev, bool enable);
+ int (*get_id)(struct dfl_feature_dev_data *fdata);
+ int (*enable_set)(struct dfl_feature_dev_data *fdata, bool enable);
};
void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops);
void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops);
-struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev);
+struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct dfl_feature_dev_data *fdata);
void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops);
-int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id);
+int dfl_fpga_check_port_id(struct dfl_feature_dev_data *fdata, void *pport_id);
/**
* struct dfl_feature_id - dfl private feature id
@@ -300,26 +303,32 @@ struct dfl_feature {
#define FEATURE_DEV_ID_UNUSED (-1)
/**
- * struct dfl_feature_platform_data - platform data for feature devices
+ * struct dfl_feature_dev_data - dfl enumeration data for dfl feature dev.
*
- * @node: node to link feature devs to container device's port_dev_list.
- * @lock: mutex to protect platform data.
- * @cdev: cdev of feature dev.
- * @dev: ptr to platform device linked with this platform data.
+ * @node: node to link the data structure to container device's port_dev_list.
+ * @lock: mutex to protect feature dev data.
+ * @dev: ptr to the feature's platform device linked with this structure.
+ * @type: type of DFL FIU for the feature dev. See enum dfl_id_type.
+ * @pdev_id: platform device id for the feature dev.
+ * @pdev_name: platform device name for the feature dev.
* @dfl_cdev: ptr to container device.
- * @id: id used for this feature device.
+ * @id: id used for the feature device.
* @disable_count: count for port disable.
* @excl_open: set on feature device exclusive open.
* @open_count: count for feature device open.
* @num: number for sub features.
* @private: ptr to feature dev private data.
- * @features: sub features of this feature dev.
+ * @features: sub features for the feature dev.
+ * @resource_num: number of resources for the feature dev.
+ * @resources: resources for the feature dev.
*/
-struct dfl_feature_platform_data {
+struct dfl_feature_dev_data {
struct list_head node;
struct mutex lock;
- struct cdev cdev;
struct platform_device *dev;
+ enum dfl_id_type type;
+ int pdev_id;
+ const char *pdev_name;
struct dfl_fpga_cdev *dfl_cdev;
int id;
unsigned int disable_count;
@@ -327,55 +336,68 @@ struct dfl_feature_platform_data {
int open_count;
void *private;
int num;
- struct dfl_feature features[];
+ struct dfl_feature *features;
+ int resource_num;
+ struct resource *resources;
+};
+
+/**
+ * struct dfl_feature_platform_data - platform data for feature devices
+ *
+ * @cdev: cdev of feature dev.
+ * @fdata: dfl enumeration data for the dfl feature device.
+ */
+struct dfl_feature_platform_data {
+ struct cdev cdev;
+ struct dfl_feature_dev_data *fdata;
};
static inline
-int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata,
+int dfl_feature_dev_use_begin(struct dfl_feature_dev_data *fdata,
bool excl)
{
- if (pdata->excl_open)
+ if (fdata->excl_open)
return -EBUSY;
if (excl) {
- if (pdata->open_count)
+ if (fdata->open_count)
return -EBUSY;
- pdata->excl_open = true;
+ fdata->excl_open = true;
}
- pdata->open_count++;
+ fdata->open_count++;
return 0;
}
static inline
-void dfl_feature_dev_use_end(struct dfl_feature_platform_data *pdata)
+void dfl_feature_dev_use_end(struct dfl_feature_dev_data *fdata)
{
- pdata->excl_open = false;
+ fdata->excl_open = false;
- if (WARN_ON(pdata->open_count <= 0))
+ if (WARN_ON(fdata->open_count <= 0))
return;
- pdata->open_count--;
+ fdata->open_count--;
}
static inline
-int dfl_feature_dev_use_count(struct dfl_feature_platform_data *pdata)
+int dfl_feature_dev_use_count(struct dfl_feature_dev_data *fdata)
{
- return pdata->open_count;
+ return fdata->open_count;
}
static inline
-void dfl_fpga_pdata_set_private(struct dfl_feature_platform_data *pdata,
+void dfl_fpga_fdata_set_private(struct dfl_feature_dev_data *fdata,
void *private)
{
- pdata->private = private;
+ fdata->private = private;
}
static inline
-void *dfl_fpga_pdata_get_private(struct dfl_feature_platform_data *pdata)
+void *dfl_fpga_fdata_get_private(struct dfl_feature_dev_data *fdata)
{
- return pdata->private;
+ return fdata->private;
}
struct dfl_feature_ops {
@@ -398,37 +420,36 @@ int dfl_fpga_dev_ops_register(struct platform_device *pdev,
struct module *owner);
void dfl_fpga_dev_ops_unregister(struct platform_device *pdev);
-static inline
-struct platform_device *dfl_fpga_inode_to_feature_dev(struct inode *inode)
+static inline struct dfl_feature_dev_data *
+dfl_fpga_inode_to_feature_dev_data(struct inode *inode)
{
struct dfl_feature_platform_data *pdata;
pdata = container_of(inode->i_cdev, struct dfl_feature_platform_data,
cdev);
- return pdata->dev;
+ return pdata->fdata;
}
-#define dfl_fpga_dev_for_each_feature(pdata, feature) \
- for ((feature) = (pdata)->features; \
- (feature) < (pdata)->features + (pdata)->num; (feature)++)
+#define dfl_fpga_dev_for_each_feature(fdata, feature) \
+ for ((feature) = (fdata)->features; \
+ (feature) < (fdata)->features + (fdata)->num; (feature)++)
-static inline
-struct dfl_feature *dfl_get_feature_by_id(struct device *dev, u16 id)
+static inline struct dfl_feature *
+dfl_get_feature_by_id(struct dfl_feature_dev_data *fdata, u16 id)
{
- struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
struct dfl_feature *feature;
- dfl_fpga_dev_for_each_feature(pdata, feature)
+ dfl_fpga_dev_for_each_feature(fdata, feature)
if (feature->id == id)
return feature;
return NULL;
}
-static inline
-void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u16 id)
+static inline void __iomem *
+dfl_get_feature_ioaddr_by_id(struct dfl_feature_dev_data *fdata, u16 id)
{
- struct dfl_feature *feature = dfl_get_feature_by_id(dev, id);
+ struct dfl_feature *feature = dfl_get_feature_by_id(fdata, id);
if (feature && feature->ioaddr)
return feature->ioaddr;
@@ -437,10 +458,18 @@ void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u16 id)
return NULL;
}
+static inline struct dfl_feature_dev_data *
+to_dfl_feature_dev_data(struct device *dev)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
+
+ return pdata->fdata;
+}
+
static inline
-struct device *dfl_fpga_pdata_to_parent(struct dfl_feature_platform_data *pdata)
+struct device *dfl_fpga_fdata_to_parent(struct dfl_feature_dev_data *fdata)
{
- return pdata->dev->dev.parent->parent;
+ return fdata->dev->dev.parent->parent;
}
static inline bool dfl_feature_is_fme(void __iomem *base)
@@ -522,26 +551,21 @@ struct dfl_fpga_cdev *
dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info);
void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev);
-/*
- * need to drop the device reference with put_device() after use port platform
- * device returned by __dfl_fpga_cdev_find_port and dfl_fpga_cdev_find_port
- * functions.
- */
-struct platform_device *
-__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
- int (*match)(struct platform_device *, void *));
+struct dfl_feature_dev_data *
+__dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct dfl_feature_dev_data *, void *));
-static inline struct platform_device *
-dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
- int (*match)(struct platform_device *, void *))
+static inline struct dfl_feature_dev_data *
+dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
+ int (*match)(struct dfl_feature_dev_data *, void *))
{
- struct platform_device *pdev;
+ struct dfl_feature_dev_data *fdata;
mutex_lock(&cdev->lock);
- pdev = __dfl_fpga_cdev_find_port(cdev, data, match);
+ fdata = __dfl_fpga_cdev_find_port_data(cdev, data, match);
mutex_unlock(&cdev->lock);
- return pdev;
+ return fdata;
}
int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id);
diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c
index 7ac9f9f5af12..10f678b9ed36 100644
--- a/drivers/fpga/intel-m10-bmc-sec-update.c
+++ b/drivers/fpga/intel-m10-bmc-sec-update.c
@@ -759,7 +759,7 @@ MODULE_DEVICE_TABLE(platform, intel_m10bmc_sec_ids);
static struct platform_driver intel_m10bmc_sec_driver = {
.probe = m10bmc_sec_probe,
- .remove_new = m10bmc_sec_remove,
+ .remove = m10bmc_sec_remove,
.driver = {
.name = "intel-m10bmc-sec-update",
.dev_groups = m10bmc_sec_attr_groups,
@@ -771,4 +771,4 @@ module_platform_driver(intel_m10bmc_sec_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel MAX10 BMC Secure Update");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(INTEL_M10_BMC_CORE);
+MODULE_IMPORT_NS("INTEL_M10_BMC_CORE");
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index 8526a5a86f0c..43db4bb77138 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -436,7 +436,7 @@ static void of_fpga_region_remove(struct platform_device *pdev)
static struct platform_driver of_fpga_region_driver = {
.probe = of_fpga_region_probe,
- .remove_new = of_fpga_region_remove,
+ .remove = of_fpga_region_remove,
.driver = {
.name = "of-fpga-region",
.of_match_table = of_match_ptr(fpga_region_of_match),
diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c
index 4c03513b8f03..0165a3c86932 100644
--- a/drivers/fpga/socfpga-a10.c
+++ b/drivers/fpga/socfpga-a10.c
@@ -535,7 +535,7 @@ MODULE_DEVICE_TABLE(of, socfpga_a10_fpga_of_match);
static struct platform_driver socfpga_a10_fpga_driver = {
.probe = socfpga_a10_fpga_probe,
- .remove_new = socfpga_a10_fpga_remove,
+ .remove = socfpga_a10_fpga_remove,
.driver = {
.name = "socfpga_a10_fpga_manager",
.of_match_table = socfpga_a10_fpga_of_match,
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 2c0def7d7cbb..0a295ccf1644 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -455,7 +455,7 @@ MODULE_DEVICE_TABLE(of, s10_of_match);
static struct platform_driver s10_driver = {
.probe = s10_probe,
- .remove_new = s10_remove,
+ .remove = s10_remove,
.driver = {
.name = "Stratix10 SoC FPGA manager",
.of_match_table = of_match_ptr(s10_of_match),
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index 788dd2f63a65..822751fad18a 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -162,7 +162,7 @@ static void xlnx_pr_decoupler_remove(struct platform_device *pdev)
static struct platform_driver xlnx_pr_decoupler_driver = {
.probe = xlnx_pr_decoupler_probe,
- .remove_new = xlnx_pr_decoupler_remove,
+ .remove = xlnx_pr_decoupler_remove,
.driver = {
.name = "xlnx_pr_decoupler",
.of_match_table = xlnx_pr_decoupler_of_match,
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 4db3d80e10b0..f7e08f7ea9ef 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -642,7 +642,7 @@ MODULE_DEVICE_TABLE(of, zynq_fpga_of_match);
static struct platform_driver zynq_fpga_driver = {
.probe = zynq_fpga_probe,
- .remove_new = zynq_fpga_remove,
+ .remove = zynq_fpga_remove,
.driver = {
.name = "zynq_fpga_manager",
.of_match_table = of_match_ptr(zynq_fpga_of_match),
diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c
index 6f5e1bdf7e40..bff897f77fe5 100644
--- a/drivers/fsi/fsi-master-aspeed.c
+++ b/drivers/fsi/fsi-master-aspeed.c
@@ -666,7 +666,7 @@ static struct platform_driver fsi_master_aspeed_driver = {
.of_match_table = fsi_master_aspeed_match,
},
.probe = fsi_master_aspeed_probe,
- .remove_new = fsi_master_aspeed_remove,
+ .remove = fsi_master_aspeed_remove,
};
module_platform_driver(fsi_master_aspeed_driver);
diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c
index a4c37ff8edd6..9f2fd444ceb6 100644
--- a/drivers/fsi/fsi-master-ast-cf.c
+++ b/drivers/fsi/fsi-master-ast-cf.c
@@ -1434,7 +1434,7 @@ static struct platform_driver fsi_master_acf = {
.of_match_table = fsi_master_acf_match,
},
.probe = fsi_master_acf_probe,
- .remove_new = fsi_master_acf_remove,
+ .remove = fsi_master_acf_remove,
};
module_platform_driver(fsi_master_acf);
diff --git a/drivers/fsi/fsi-master-gpio.c b/drivers/fsi/fsi-master-gpio.c
index f761344f4873..69de0b5b9cbd 100644
--- a/drivers/fsi/fsi-master-gpio.c
+++ b/drivers/fsi/fsi-master-gpio.c
@@ -888,7 +888,7 @@ static struct platform_driver fsi_master_gpio_driver = {
.of_match_table = fsi_master_gpio_match,
},
.probe = fsi_master_gpio_probe,
- .remove_new = fsi_master_gpio_remove,
+ .remove = fsi_master_gpio_remove,
};
module_platform_driver(fsi_master_gpio_driver);
diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
index a6d4c8f123a5..d3e6bf37878a 100644
--- a/drivers/fsi/fsi-occ.c
+++ b/drivers/fsi/fsi-occ.c
@@ -740,7 +740,7 @@ static struct platform_driver occ_driver = {
.of_match_table = occ_match,
},
.probe = occ_probe,
- .remove_new = occ_remove,
+ .remove = occ_remove,
};
static int occ_init(void)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d93cd4f722b4..98b4d1633b25 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -70,8 +70,7 @@ config GPIO_SYSFS
ioctl() operations instead.
config GPIO_CDEV
- bool
- prompt "Character device (/dev/gpiochipN) support" if EXPERT
+ bool "Character device (/dev/gpiochipN) support" if EXPERT
default y
help
Say Y here to add the character device /dev/gpiochipN interface
@@ -149,9 +148,7 @@ config GPIO_74XX_MMIO
config GPIO_ALTERA
tristate "Altera GPIO"
- depends on OF_GPIO
select GPIOLIB_IRQCHIP
- select OF_GPIO_MM_GPIOCHIP
help
Say Y or M here to build support for the Altera PIO device.
@@ -243,7 +240,7 @@ config GPIO_CLPS711X
config GPIO_DAVINCI
tristate "TI Davinci/Keystone GPIO support"
default y if ARCH_DAVINCI
- depends on (ARM || ARM64) && (ARCH_DAVINCI || ARCH_KEYSTONE || ARCH_K3)
+ depends on ((ARM || ARM64) && (ARCH_DAVINCI || ARCH_KEYSTONE || ARCH_K3)) || COMPILE_TEST
help
Say yes here to enable GPIO support for TI Davinci/Keystone SoCs.
@@ -341,7 +338,7 @@ config GPIO_GRANITERAPIDS
config GPIO_GRGPIO
tristate "Aeroflex Gaisler GRGPIO support"
- depends on OF_GPIO
+ depends on OF || COMPILE_TEST
select GPIO_GENERIC
select IRQ_DOMAIN
help
@@ -486,9 +483,9 @@ config GPIO_MT7621
Say yes here to support the Mediatek MT7621 SoC GPIO device.
config GPIO_MVEBU
- def_bool y
- depends on PLAT_ORION || ARCH_MVEBU
- depends on OF_GPIO
+ bool "Marvell Orion and EBU GPIO support" if COMPILE_TEST
+ depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST
+ default PLAT_ORION || ARCH_MVEBU
select GENERIC_IRQ_CHIP
select REGMAP_MMIO
@@ -533,9 +530,9 @@ config GPIO_OCTEON
family of SOCs.
config GPIO_OMAP
- tristate "TI OMAP GPIO support" if ARCH_OMAP2PLUS || COMPILE_TEST
+ tristate "TI OMAP GPIO support"
+ depends on ARCH_OMAP || COMPILE_TEST
default y if ARCH_OMAP
- depends on ARM
select GENERIC_IRQ_CHIP
select GPIOLIB_IRQCHIP
help
@@ -549,6 +546,12 @@ config GPIO_PL061
help
Say yes here to support the PrimeCell PL061 GPIO device.
+config GPIO_POLARFIRE_SOC
+ bool "Microchip FPGA GPIO support"
+ select REGMAP_MMIO
+ help
+ Say yes here to support the GPIO controllers on Microchip FPGAs.
+
config GPIO_PXA
bool "PXA GPIO support"
depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST
@@ -714,13 +717,13 @@ config GPIO_TEGRA
config GPIO_TEGRA186
tristate "NVIDIA Tegra186 GPIO support"
- default ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC
- depends on ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC || COMPILE_TEST
+ default ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC
+ depends on ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || COMPILE_TEST
depends on OF_GPIO
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
help
- Say yes here to support GPIO pins on NVIDIA Tegra186 SoCs.
+ Say yes here to support GPIO pins on NVIDIA Tegra186, 194 and 234 SoCs.
config GPIO_TS4800
tristate "TS-4800 DIO blocks and compatibles"
@@ -796,7 +799,6 @@ config GPIO_XGENE_SB
config GPIO_XILINX
tristate "Xilinx GPIO support"
select GPIOLIB_IRQCHIP
- depends on OF_GPIO
help
Say yes here to support the Xilinx FPGA GPIO device.
@@ -1287,6 +1289,16 @@ config GPIO_BD9571MWV
This driver can also be built as a module. If so, the module
will be called gpio-bd9571mwv.
+config GPIO_CGBC
+ tristate "Congatec Board Controller GPIO support"
+ depends on MFD_CGBC
+ help
+ Select this option to enable GPIO support for the Congatec Board
+ Controller.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-cgbc.
+
config GPIO_CROS_EC
tristate "ChromeOS EC GPIO support"
depends on CROS_EC
@@ -1844,6 +1856,13 @@ config GPIO_VIPERBOARD
River Tech's viperboard.h for detailed meaning
of the module parameters.
+config GPIO_MPSSE
+ tristate "FTDI MPSSE GPIO support"
+ select GPIOLIB_IRQCHIP
+ help
+ GPIO driver for FTDI's MPSSE interface. These can do input and
+ output. Each MPSSE provides 16 IO pins.
+
endmenu
menu "Virtual GPIO drivers"
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 1429e8c0229b..af3ba4d81b58 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_GPIO_BD9571MWV) += gpio-bd9571mwv.o
obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
obj-$(CONFIG_GPIO_CADENCE) += gpio-cadence.o
+obj-$(CONFIG_GPIO_CGBC) += gpio-cgbc.o
obj-$(CONFIG_GPIO_CLPS711X) += gpio-clps711x.o
obj-$(CONFIG_GPIO_SNPS_CREG) += gpio-creg-snps.o
obj-$(CONFIG_GPIO_CROS_EC) += gpio-cros-ec.o
@@ -114,6 +115,7 @@ obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
obj-$(CONFIG_GPIO_MOXTET) += gpio-moxtet.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
+obj-$(CONFIG_GPIO_MPSSE) += gpio-mpsse.o
obj-$(CONFIG_GPIO_MSC313) += gpio-msc313.o
obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o
obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
@@ -133,6 +135,7 @@ obj-$(CONFIG_GPIO_PCI_IDIO_16) += gpio-pci-idio-16.o
obj-$(CONFIG_GPIO_PISOSR) += gpio-pisosr.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
obj-$(CONFIG_GPIO_PMIC_EIC_SPRD) += gpio-pmic-eic-sprd.o
+obj-$(CONFIG_GPIO_POLARFIRE_SOC) += gpio-mpfs.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
obj-$(CONFIG_GPIO_RASPBERRYPI_EXP) += gpio-raspberrypi-exp.o
obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 189c3abe7e79..942d1cd2bd3c 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -61,8 +61,8 @@ Work items:
- Change all consumer drivers that #include <linux/of_gpio.h> to
#include <linux/gpio/consumer.h> and stop doing custom parsing of the
- GPIO lines from the device tree. This can be tricky and often ivolves
- changing boardfiles, etc.
+ GPIO lines from the device tree. This can be tricky and often involves
+ changing board files, etc.
- Pull semantics for legacy device tree (OF) GPIO lookups into
gpiolib-of.c: in some cases subsystems are doing custom flags and
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index 4df9becaf349..cf5a50102d49 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -22,7 +22,7 @@
#include "gpio-i8255.h"
-MODULE_IMPORT_NS(I8255);
+MODULE_IMPORT_NS("I8255");
#define DIO48E_EXTENT 16
#define MAX_NUM_DIO48E max_num_isa_dev(DIO48E_EXTENT)
@@ -339,4 +339,4 @@ module_isa_driver_with_irq(dio48e_driver, num_dio48e, num_irq);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-DIO-48E GPIO driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(I8254);
+MODULE_IMPORT_NS("I8254");
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index f03ccd0f534c..ffe7e1cb6b23 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -126,4 +126,4 @@ module_isa_driver_with_irq(idio_16_driver, num_idio_16, num_irq);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-IDIO-16 GPIO driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(GPIO_IDIO_16);
+MODULE_IMPORT_NS("GPIO_IDIO_16");
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 753e7be039e4..fca6cd2eb1dd 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -143,24 +143,17 @@ static int gen_74x164_probe(struct spi_device *spi)
chip->gpio_chip.parent = &spi->dev;
chip->gpio_chip.owner = THIS_MODULE;
- mutex_init(&chip->lock);
+ ret = devm_mutex_init(&spi->dev, &chip->lock);
+ if (ret)
+ return ret;
ret = __gen_74x164_write_config(chip);
- if (ret) {
- dev_err(&spi->dev, "Failed writing: %d\n", ret);
- goto exit_destroy;
- }
+ if (ret)
+ return dev_err_probe(&spi->dev, ret, "Config write failed\n");
gpiod_set_value_cansleep(chip->gpiod_oe, 1);
- ret = gpiochip_add_data(&chip->gpio_chip, chip);
- if (!ret)
- return 0;
-
-exit_destroy:
- mutex_destroy(&chip->lock);
-
- return ret;
+ return devm_gpiochip_add_data(&spi->dev, &chip->gpio_chip, chip);
}
static void gen_74x164_remove(struct spi_device *spi)
@@ -168,8 +161,6 @@ static void gen_74x164_remove(struct spi_device *spi)
struct gen_74x164_chip *chip = spi_get_drvdata(spi);
gpiod_set_value_cansleep(chip->gpiod_oe, 0);
- gpiochip_remove(&chip->gpio_chip);
- mutex_destroy(&chip->lock);
}
static const struct spi_device_id gen_74x164_spi_ids[] = {
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index 38e0fff9afe7..65f41cc3eafc 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -65,11 +65,11 @@ static int aggr_parse(struct gpio_aggregator *aggr)
{
char *args = skip_spaces(aggr->args);
char *name, *offsets, *p;
- unsigned long *bitmap;
unsigned int i, n = 0;
int error = 0;
- bitmap = bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL);
+ unsigned long *bitmap __free(bitmap) =
+ bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL);
if (!bitmap)
return -ENOMEM;
@@ -82,7 +82,7 @@ static int aggr_parse(struct gpio_aggregator *aggr)
/* Named GPIO line */
error = aggr_add_gpio(aggr, name, U16_MAX, &n);
if (error)
- goto free_bitmap;
+ return error;
name = offsets;
continue;
@@ -92,13 +92,13 @@ static int aggr_parse(struct gpio_aggregator *aggr)
error = bitmap_parselist(offsets, bitmap, AGGREGATOR_MAX_GPIOS);
if (error) {
pr_err("Cannot parse %s: %d\n", offsets, error);
- goto free_bitmap;
+ return error;
}
for_each_set_bit(i, bitmap, AGGREGATOR_MAX_GPIOS) {
error = aggr_add_gpio(aggr, name, i, &n);
if (error)
- goto free_bitmap;
+ return error;
}
args = next_arg(args, &name, &p);
@@ -106,12 +106,10 @@ static int aggr_parse(struct gpio_aggregator *aggr)
if (!n) {
pr_err("No GPIOs specified\n");
- error = -EINVAL;
+ return -EINVAL;
}
-free_bitmap:
- bitmap_free(bitmap);
- return error;
+ return 0;
}
static ssize_t new_device_store(struct device_driver *driver, const char *buf,
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index c2edfbb231fc..17ab039c7413 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -4,11 +4,19 @@
* Based on gpio-mpc8xxx.c
*/
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/gpio/driver.h>
-#include <linux/gpio/legacy-of-mm-gpiochip.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <linux/gpio/driver.h>
#define ALTERA_GPIO_MAX_NGPIO 32
#define ALTERA_GPIO_DATA 0x0
@@ -18,56 +26,52 @@
/**
* struct altera_gpio_chip
-* @mmchip : memory mapped chip structure.
+* @gc : GPIO chip structure.
+* @regs : memory mapped IO address for the controller registers.
* @gpio_lock : synchronization lock so that new irq/set/get requests
* will be blocked until the current one completes.
* @interrupt_trigger : specifies the hardware configured IRQ trigger type
* (rising, falling, both, high)
-* @mapped_irq : kernel mapped irq number.
*/
struct altera_gpio_chip {
- struct of_mm_gpio_chip mmchip;
+ struct gpio_chip gc;
+ void __iomem *regs;
raw_spinlock_t gpio_lock;
int interrupt_trigger;
- int mapped_irq;
};
static void altera_gpio_irq_unmask(struct irq_data *d)
{
- struct altera_gpio_chip *altera_gc;
- struct of_mm_gpio_chip *mm_gc;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
unsigned long flags;
u32 intmask;
- altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
- mm_gc = &altera_gc->mmchip;
- gpiochip_enable_irq(&mm_gc->gc, irqd_to_hwirq(d));
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
raw_spin_lock_irqsave(&altera_gc->gpio_lock, flags);
- intmask = readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
+ intmask = readl(altera_gc->regs + ALTERA_GPIO_IRQ_MASK);
/* Set ALTERA_GPIO_IRQ_MASK bit to unmask */
intmask |= BIT(irqd_to_hwirq(d));
- writel(intmask, mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
+ writel(intmask, altera_gc->regs + ALTERA_GPIO_IRQ_MASK);
raw_spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
}
static void altera_gpio_irq_mask(struct irq_data *d)
{
- struct altera_gpio_chip *altera_gc;
- struct of_mm_gpio_chip *mm_gc;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
unsigned long flags;
u32 intmask;
- altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
- mm_gc = &altera_gc->mmchip;
-
raw_spin_lock_irqsave(&altera_gc->gpio_lock, flags);
- intmask = readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
+ intmask = readl(altera_gc->regs + ALTERA_GPIO_IRQ_MASK);
/* Clear ALTERA_GPIO_IRQ_MASK bit to mask */
intmask &= ~BIT(irqd_to_hwirq(d));
- writel(intmask, mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
+ writel(intmask, altera_gc->regs + ALTERA_GPIO_IRQ_MASK);
raw_spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
- gpiochip_disable_irq(&mm_gc->gc, irqd_to_hwirq(d));
+
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
/*
@@ -77,9 +81,8 @@ static void altera_gpio_irq_mask(struct irq_data *d)
static int altera_gpio_irq_set_type(struct irq_data *d,
unsigned int type)
{
- struct altera_gpio_chip *altera_gc;
-
- altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
if (type == IRQ_TYPE_NONE) {
irq_set_handler_locked(d, handle_bad_irq);
@@ -105,49 +108,39 @@ static unsigned int altera_gpio_irq_startup(struct irq_data *d)
static int altera_gpio_get(struct gpio_chip *gc, unsigned offset)
{
- struct of_mm_gpio_chip *mm_gc;
+ struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
- mm_gc = to_of_mm_gpio_chip(gc);
-
- return !!(readl(mm_gc->regs + ALTERA_GPIO_DATA) & BIT(offset));
+ return !!(readl(altera_gc->regs + ALTERA_GPIO_DATA) & BIT(offset));
}
static void altera_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
{
- struct of_mm_gpio_chip *mm_gc;
- struct altera_gpio_chip *chip;
+ struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
unsigned long flags;
unsigned int data_reg;
- mm_gc = to_of_mm_gpio_chip(gc);
- chip = gpiochip_get_data(gc);
-
- raw_spin_lock_irqsave(&chip->gpio_lock, flags);
- data_reg = readl(mm_gc->regs + ALTERA_GPIO_DATA);
+ raw_spin_lock_irqsave(&altera_gc->gpio_lock, flags);
+ data_reg = readl(altera_gc->regs + ALTERA_GPIO_DATA);
if (value)
data_reg |= BIT(offset);
else
data_reg &= ~BIT(offset);
- writel(data_reg, mm_gc->regs + ALTERA_GPIO_DATA);
- raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ writel(data_reg, altera_gc->regs + ALTERA_GPIO_DATA);
+ raw_spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
}
static int altera_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
{
- struct of_mm_gpio_chip *mm_gc;
- struct altera_gpio_chip *chip;
+ struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
unsigned long flags;
unsigned int gpio_ddr;
- mm_gc = to_of_mm_gpio_chip(gc);
- chip = gpiochip_get_data(gc);
-
- raw_spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&altera_gc->gpio_lock, flags);
/* Set pin as input, assumes software controlled IP */
- gpio_ddr = readl(mm_gc->regs + ALTERA_GPIO_DIR);
+ gpio_ddr = readl(altera_gc->regs + ALTERA_GPIO_DIR);
gpio_ddr &= ~BIT(offset);
- writel(gpio_ddr, mm_gc->regs + ALTERA_GPIO_DIR);
- raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ writel(gpio_ddr, altera_gc->regs + ALTERA_GPIO_DIR);
+ raw_spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
return 0;
}
@@ -155,53 +148,46 @@ static int altera_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
static int altera_gpio_direction_output(struct gpio_chip *gc,
unsigned offset, int value)
{
- struct of_mm_gpio_chip *mm_gc;
- struct altera_gpio_chip *chip;
+ struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
unsigned long flags;
unsigned int data_reg, gpio_ddr;
- mm_gc = to_of_mm_gpio_chip(gc);
- chip = gpiochip_get_data(gc);
-
- raw_spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&altera_gc->gpio_lock, flags);
/* Sets the GPIO value */
- data_reg = readl(mm_gc->regs + ALTERA_GPIO_DATA);
+ data_reg = readl(altera_gc->regs + ALTERA_GPIO_DATA);
if (value)
data_reg |= BIT(offset);
else
data_reg &= ~BIT(offset);
- writel(data_reg, mm_gc->regs + ALTERA_GPIO_DATA);
+ writel(data_reg, altera_gc->regs + ALTERA_GPIO_DATA);
/* Set pin as output, assumes software controlled IP */
- gpio_ddr = readl(mm_gc->regs + ALTERA_GPIO_DIR);
+ gpio_ddr = readl(altera_gc->regs + ALTERA_GPIO_DIR);
gpio_ddr |= BIT(offset);
- writel(gpio_ddr, mm_gc->regs + ALTERA_GPIO_DIR);
- raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ writel(gpio_ddr, altera_gc->regs + ALTERA_GPIO_DIR);
+ raw_spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
return 0;
}
static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
{
- struct altera_gpio_chip *altera_gc;
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
+ struct irq_domain *irqdomain = gc->irq.domain;
struct irq_chip *chip;
- struct of_mm_gpio_chip *mm_gc;
- struct irq_domain *irqdomain;
unsigned long status;
int i;
- altera_gc = gpiochip_get_data(irq_desc_get_handler_data(desc));
chip = irq_desc_get_chip(desc);
- mm_gc = &altera_gc->mmchip;
- irqdomain = altera_gc->mmchip.gc.irq.domain;
chained_irq_enter(chip, desc);
while ((status =
- (readl(mm_gc->regs + ALTERA_GPIO_EDGE_CAP) &
- readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK)))) {
- writel(status, mm_gc->regs + ALTERA_GPIO_EDGE_CAP);
- for_each_set_bit(i, &status, mm_gc->gc.ngpio)
+ (readl(altera_gc->regs + ALTERA_GPIO_EDGE_CAP) &
+ readl(altera_gc->regs + ALTERA_GPIO_IRQ_MASK)))) {
+ writel(status, altera_gc->regs + ALTERA_GPIO_EDGE_CAP);
+ for_each_set_bit(i, &status, gc->ngpio)
generic_handle_domain_irq(irqdomain, i);
}
@@ -210,24 +196,21 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
{
- struct altera_gpio_chip *altera_gc;
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
+ struct irq_domain *irqdomain = gc->irq.domain;
struct irq_chip *chip;
- struct of_mm_gpio_chip *mm_gc;
- struct irq_domain *irqdomain;
unsigned long status;
int i;
- altera_gc = gpiochip_get_data(irq_desc_get_handler_data(desc));
chip = irq_desc_get_chip(desc);
- mm_gc = &altera_gc->mmchip;
- irqdomain = altera_gc->mmchip.gc.irq.domain;
chained_irq_enter(chip, desc);
- status = readl(mm_gc->regs + ALTERA_GPIO_DATA);
- status &= readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
+ status = readl(altera_gc->regs + ALTERA_GPIO_DATA);
+ status &= readl(altera_gc->regs + ALTERA_GPIO_IRQ_MASK);
- for_each_set_bit(i, &status, mm_gc->gc.ngpio)
+ for_each_set_bit(i, &status, gc->ngpio)
generic_handle_domain_irq(irqdomain, i);
chained_irq_exit(chip, desc);
@@ -246,10 +229,11 @@ static const struct irq_chip altera_gpio_irq_chip = {
static int altera_gpio_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
int reg, ret;
struct altera_gpio_chip *altera_gc;
struct gpio_irq_chip *girq;
+ int mapped_irq;
altera_gc = devm_kzalloc(&pdev->dev, sizeof(*altera_gc), GFP_KERNEL);
if (!altera_gc)
@@ -257,39 +241,47 @@ static int altera_gpio_probe(struct platform_device *pdev)
raw_spin_lock_init(&altera_gc->gpio_lock);
- if (of_property_read_u32(node, "altr,ngpio", &reg))
+ if (device_property_read_u32(dev, "altr,ngpio", &reg))
/* By default assume maximum ngpio */
- altera_gc->mmchip.gc.ngpio = ALTERA_GPIO_MAX_NGPIO;
+ altera_gc->gc.ngpio = ALTERA_GPIO_MAX_NGPIO;
else
- altera_gc->mmchip.gc.ngpio = reg;
+ altera_gc->gc.ngpio = reg;
- if (altera_gc->mmchip.gc.ngpio > ALTERA_GPIO_MAX_NGPIO) {
+ if (altera_gc->gc.ngpio > ALTERA_GPIO_MAX_NGPIO) {
dev_warn(&pdev->dev,
"ngpio is greater than %d, defaulting to %d\n",
ALTERA_GPIO_MAX_NGPIO, ALTERA_GPIO_MAX_NGPIO);
- altera_gc->mmchip.gc.ngpio = ALTERA_GPIO_MAX_NGPIO;
+ altera_gc->gc.ngpio = ALTERA_GPIO_MAX_NGPIO;
}
- altera_gc->mmchip.gc.direction_input = altera_gpio_direction_input;
- altera_gc->mmchip.gc.direction_output = altera_gpio_direction_output;
- altera_gc->mmchip.gc.get = altera_gpio_get;
- altera_gc->mmchip.gc.set = altera_gpio_set;
- altera_gc->mmchip.gc.owner = THIS_MODULE;
- altera_gc->mmchip.gc.parent = &pdev->dev;
+ altera_gc->gc.direction_input = altera_gpio_direction_input;
+ altera_gc->gc.direction_output = altera_gpio_direction_output;
+ altera_gc->gc.get = altera_gpio_get;
+ altera_gc->gc.set = altera_gpio_set;
+ altera_gc->gc.owner = THIS_MODULE;
+ altera_gc->gc.parent = &pdev->dev;
+ altera_gc->gc.base = -1;
- altera_gc->mapped_irq = platform_get_irq_optional(pdev, 0);
+ altera_gc->gc.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", dev_fwnode(dev));
+ if (!altera_gc->gc.label)
+ return -ENOMEM;
+
+ altera_gc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(altera_gc->regs))
+ return dev_err_probe(dev, PTR_ERR(altera_gc->regs), "failed to ioremap memory resource\n");
- if (altera_gc->mapped_irq < 0)
+ mapped_irq = platform_get_irq_optional(pdev, 0);
+ if (mapped_irq < 0)
goto skip_irq;
- if (of_property_read_u32(node, "altr,interrupt-type", &reg)) {
+ if (device_property_read_u32(dev, "altr,interrupt-type", &reg)) {
dev_err(&pdev->dev,
"altr,interrupt-type value not set in device tree\n");
return -EINVAL;
}
altera_gc->interrupt_trigger = reg;
- girq = &altera_gc->mmchip.gc.irq;
+ girq = &altera_gc->gc.irq;
gpio_irq_chip_set_chip(girq, &altera_gpio_irq_chip);
if (altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH)
@@ -303,27 +295,18 @@ static int altera_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
- girq->parents[0] = altera_gc->mapped_irq;
+ girq->parents[0] = mapped_irq;
skip_irq:
- ret = of_mm_gpiochip_add_data(node, &altera_gc->mmchip, altera_gc);
+ ret = devm_gpiochip_add_data(dev, &altera_gc->gc, altera_gc);
if (ret) {
dev_err(&pdev->dev, "Failed adding memory mapped gpiochip\n");
return ret;
}
- platform_set_drvdata(pdev, altera_gc);
-
return 0;
}
-static void altera_gpio_remove(struct platform_device *pdev)
-{
- struct altera_gpio_chip *altera_gc = platform_get_drvdata(pdev);
-
- of_mm_gpiochip_remove(&altera_gc->mmchip);
-}
-
static const struct of_device_id altera_gpio_of_match[] = {
{ .compatible = "altr,pio-1.0", },
{},
@@ -336,7 +319,6 @@ static struct platform_driver altera_gpio_driver = {
.of_match_table = altera_gpio_of_match,
},
.probe = altera_gpio_probe,
- .remove_new = altera_gpio_remove,
};
static int __init altera_gpio_init(void)
diff --git a/drivers/gpio/gpio-amdpt.c b/drivers/gpio/gpio-amdpt.c
index 0a2ea9db4682..b70036587d9c 100644
--- a/drivers/gpio/gpio-amdpt.c
+++ b/drivers/gpio/gpio-amdpt.c
@@ -106,7 +106,7 @@ static int pt_gpio_probe(struct platform_device *pdev)
pt_gpio->gc.free = pt_gpio_free;
pt_gpio->gc.ngpio = (uintptr_t)device_get_match_data(dev);
- ret = gpiochip_add_data(&pt_gpio->gc, pt_gpio);
+ ret = devm_gpiochip_add_data(dev, &pt_gpio->gc, pt_gpio);
if (ret) {
dev_err(dev, "Failed to register GPIO lib\n");
return ret;
@@ -122,13 +122,6 @@ static int pt_gpio_probe(struct platform_device *pdev)
return ret;
}
-static void pt_gpio_remove(struct platform_device *pdev)
-{
- struct pt_gpio_chip *pt_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&pt_gpio->gc);
-}
-
static const struct acpi_device_id pt_gpio_acpi_match[] = {
{ "AMDF030", PT_TOTAL_GPIO },
{ "AMDIF030", PT_TOTAL_GPIO },
@@ -143,7 +136,6 @@ static struct platform_driver pt_gpio_driver = {
.acpi_match_table = ACPI_PTR(pt_gpio_acpi_match),
},
.probe = pt_gpio_probe,
- .remove_new = pt_gpio_remove,
};
module_platform_driver(pt_gpio_driver);
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 72755fee6478..34eb26298e32 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -420,7 +420,7 @@ static void aspeed_sgpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
int offset;
irqd_to_aspeed_sgpio_data(d, &gpio, &bank, &bit, &offset);
- seq_printf(p, dev_name(gpio->dev));
+ seq_puts(p, dev_name(gpio->dev));
}
static const struct irq_chip aspeed_sgpio_irq_chip = {
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 04c03402db6d..40c1bd80f8b0 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -30,6 +30,27 @@
#include <linux/gpio/consumer.h>
#include "gpiolib.h"
+/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
+#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
+
+#define GPIO_G7_IRQ_STS_BASE 0x100
+#define GPIO_G7_IRQ_STS_OFFSET(x) (GPIO_G7_IRQ_STS_BASE + (x) * 0x4)
+#define GPIO_G7_CTRL_REG_BASE 0x180
+#define GPIO_G7_CTRL_REG_OFFSET(x) (GPIO_G7_CTRL_REG_BASE + (x) * 0x4)
+#define GPIO_G7_CTRL_OUT_DATA BIT(0)
+#define GPIO_G7_CTRL_DIR BIT(1)
+#define GPIO_G7_CTRL_IRQ_EN BIT(2)
+#define GPIO_G7_CTRL_IRQ_TYPE0 BIT(3)
+#define GPIO_G7_CTRL_IRQ_TYPE1 BIT(4)
+#define GPIO_G7_CTRL_IRQ_TYPE2 BIT(5)
+#define GPIO_G7_CTRL_RST_TOLERANCE BIT(6)
+#define GPIO_G7_CTRL_DEBOUNCE_SEL1 BIT(7)
+#define GPIO_G7_CTRL_DEBOUNCE_SEL2 BIT(8)
+#define GPIO_G7_CTRL_INPUT_MASK BIT(9)
+#define GPIO_G7_CTRL_IRQ_STS BIT(12)
+#define GPIO_G7_CTRL_IN_DATA BIT(13)
+
struct aspeed_bank_props {
unsigned int bank;
u32 input;
@@ -39,6 +60,10 @@ struct aspeed_bank_props {
struct aspeed_gpio_config {
unsigned int nr_gpios;
const struct aspeed_bank_props *props;
+ const struct aspeed_gpio_llops *llops;
+ const int *debounce_timers_array;
+ int debounce_timers_num;
+ bool require_dcache;
};
/*
@@ -77,7 +102,6 @@ struct aspeed_gpio_bank {
uint16_t debounce_regs;
uint16_t tolerance_regs;
uint16_t cmdsrc_regs;
- const char names[4][3];
};
/*
@@ -92,6 +116,22 @@ struct aspeed_gpio_bank {
*/
static const int debounce_timers[4] = { 0x00, 0x50, 0x54, 0x58 };
+static const int g7_debounce_timers[4] = { 0x00, 0x00, 0x04, 0x08 };
+
+/*
+ * The debounce timers array is used to configure the debounce timer settings.Here’s how it works:
+ * Array Value: Indicates the offset for configuring the debounce timer.
+ * Array Index: Corresponds to the debounce setting register.
+ * The debounce timers array follows this pattern for configuring the debounce setting registers:
+ * Array Index 0: No debounce timer is set;
+ * Array Value is irrelevant (don’t care).
+ * Array Index 1: Debounce setting #2 is set to 1, and debounce setting #1 is set to 0.
+ * Array Value: offset for configuring debounce timer 0 (g4: 0x50, g7: 0x00)
+ * Array Index 2: Debounce setting #2 is set to 0, and debounce setting #1 is set to 1.
+ * Array Value: offset for configuring debounce timer 1 (g4: 0x54, g7: 0x04)
+ * Array Index 3: Debounce setting #2 is set to 1, and debounce setting #1 is set to 1.
+ * Array Value: offset for configuring debounce timer 2 (g4: 0x58, g7: 0x8)
+ */
static const struct aspeed_gpio_copro_ops *copro_ops;
static void *copro_data;
@@ -104,7 +144,6 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.debounce_regs = 0x0040,
.tolerance_regs = 0x001c,
.cmdsrc_regs = 0x0060,
- .names = { "A", "B", "C", "D" },
},
{
.val_regs = 0x0020,
@@ -113,7 +152,6 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.debounce_regs = 0x0048,
.tolerance_regs = 0x003c,
.cmdsrc_regs = 0x0068,
- .names = { "E", "F", "G", "H" },
},
{
.val_regs = 0x0070,
@@ -122,7 +160,6 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.debounce_regs = 0x00b0,
.tolerance_regs = 0x00ac,
.cmdsrc_regs = 0x0090,
- .names = { "I", "J", "K", "L" },
},
{
.val_regs = 0x0078,
@@ -131,7 +168,6 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.debounce_regs = 0x0100,
.tolerance_regs = 0x00fc,
.cmdsrc_regs = 0x00e0,
- .names = { "M", "N", "O", "P" },
},
{
.val_regs = 0x0080,
@@ -140,7 +176,6 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.debounce_regs = 0x0130,
.tolerance_regs = 0x012c,
.cmdsrc_regs = 0x0110,
- .names = { "Q", "R", "S", "T" },
},
{
.val_regs = 0x0088,
@@ -149,7 +184,6 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.debounce_regs = 0x0160,
.tolerance_regs = 0x015c,
.cmdsrc_regs = 0x0140,
- .names = { "U", "V", "W", "X" },
},
{
.val_regs = 0x01E0,
@@ -158,7 +192,6 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.debounce_regs = 0x0190,
.tolerance_regs = 0x018c,
.cmdsrc_regs = 0x0170,
- .names = { "Y", "Z", "AA", "AB" },
},
{
.val_regs = 0x01e8,
@@ -167,7 +200,6 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.debounce_regs = 0x01c0,
.tolerance_regs = 0x01bc,
.cmdsrc_regs = 0x01a0,
- .names = { "AC", "", "", "" },
},
};
@@ -187,6 +219,19 @@ enum aspeed_gpio_reg {
reg_cmdsrc1,
};
+struct aspeed_gpio_llops {
+ void (*reg_bit_set)(struct aspeed_gpio *gpio, unsigned int offset,
+ const enum aspeed_gpio_reg reg, bool val);
+ bool (*reg_bit_get)(struct aspeed_gpio *gpio, unsigned int offset,
+ const enum aspeed_gpio_reg reg);
+ int (*reg_bank_get)(struct aspeed_gpio *gpio, unsigned int offset,
+ const enum aspeed_gpio_reg reg);
+ void (*privilege_ctrl)(struct aspeed_gpio *gpio, unsigned int offset, int owner);
+ void (*privilege_init)(struct aspeed_gpio *gpio);
+ bool (*copro_request)(struct aspeed_gpio *gpio, unsigned int offset);
+ void (*copro_release)(struct aspeed_gpio *gpio, unsigned int offset);
+};
+
#define GPIO_VAL_VALUE 0x00
#define GPIO_VAL_DIR 0x04
@@ -207,9 +252,9 @@ enum aspeed_gpio_reg {
#define GPIO_CMDSRC_RESERVED 3
/* This will be resolved at compile time */
-static inline void __iomem *bank_reg(struct aspeed_gpio *gpio,
- const struct aspeed_gpio_bank *bank,
- const enum aspeed_gpio_reg reg)
+static void __iomem *aspeed_gpio_g4_bank_reg(struct aspeed_gpio *gpio,
+ const struct aspeed_gpio_bank *bank,
+ const enum aspeed_gpio_reg reg)
{
switch (reg) {
case reg_val:
@@ -242,14 +287,43 @@ static inline void __iomem *bank_reg(struct aspeed_gpio *gpio,
BUG();
}
+static u32 aspeed_gpio_g7_reg_mask(const enum aspeed_gpio_reg reg)
+{
+ switch (reg) {
+ case reg_val:
+ return GPIO_G7_CTRL_OUT_DATA;
+ case reg_dir:
+ return GPIO_G7_CTRL_DIR;
+ case reg_irq_enable:
+ return GPIO_G7_CTRL_IRQ_EN;
+ case reg_irq_type0:
+ return GPIO_G7_CTRL_IRQ_TYPE0;
+ case reg_irq_type1:
+ return GPIO_G7_CTRL_IRQ_TYPE1;
+ case reg_irq_type2:
+ return GPIO_G7_CTRL_IRQ_TYPE2;
+ case reg_tolerance:
+ return GPIO_G7_CTRL_RST_TOLERANCE;
+ case reg_debounce_sel1:
+ return GPIO_G7_CTRL_DEBOUNCE_SEL1;
+ case reg_debounce_sel2:
+ return GPIO_G7_CTRL_DEBOUNCE_SEL2;
+ case reg_rdata:
+ return GPIO_G7_CTRL_OUT_DATA;
+ case reg_irq_status:
+ return GPIO_G7_CTRL_IRQ_STS;
+ case reg_cmdsrc0:
+ case reg_cmdsrc1:
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
#define GPIO_BANK(x) ((x) >> 5)
#define GPIO_OFFSET(x) ((x) & 0x1f)
#define GPIO_BIT(x) BIT(GPIO_OFFSET(x))
-#define _GPIO_SET_DEBOUNCE(t, o, i) ((!!((t) & BIT(i))) << GPIO_OFFSET(o))
-#define GPIO_SET_DEBOUNCE1(t, o) _GPIO_SET_DEBOUNCE(t, o, 1)
-#define GPIO_SET_DEBOUNCE2(t, o) _GPIO_SET_DEBOUNCE(t, o, 0)
-
static const struct aspeed_gpio_bank *to_bank(unsigned int offset)
{
unsigned int bank = GPIO_BANK(offset);
@@ -280,11 +354,11 @@ static inline const struct aspeed_bank_props *find_bank_props(
static inline bool have_gpio(struct aspeed_gpio *gpio, unsigned int offset)
{
const struct aspeed_bank_props *props = find_bank_props(gpio, offset);
- const struct aspeed_gpio_bank *bank = to_bank(offset);
- unsigned int group = GPIO_OFFSET(offset) / 8;
- return bank->names[group][0] != '\0' &&
- (!props || ((props->input | props->output) & GPIO_BIT(offset)));
+ if (offset >= gpio->chip.ngpio)
+ return false;
+
+ return (!props || ((props->input | props->output) & GPIO_BIT(offset)));
}
static inline bool have_input(struct aspeed_gpio *gpio, unsigned int offset)
@@ -304,108 +378,49 @@ static inline bool have_output(struct aspeed_gpio *gpio, unsigned int offset)
return !props || (props->output & GPIO_BIT(offset));
}
-static void aspeed_gpio_change_cmd_source(struct aspeed_gpio *gpio,
- const struct aspeed_gpio_bank *bank,
- int bindex, int cmdsrc)
+static void aspeed_gpio_change_cmd_source(struct aspeed_gpio *gpio, unsigned int offset, int cmdsrc)
{
- void __iomem *c0 = bank_reg(gpio, bank, reg_cmdsrc0);
- void __iomem *c1 = bank_reg(gpio, bank, reg_cmdsrc1);
- u32 bit, reg;
-
- /*
- * Each register controls 4 banks, so take the bottom 2
- * bits of the bank index, and use them to select the
- * right control bit (0, 8, 16 or 24).
- */
- bit = BIT((bindex & 3) << 3);
-
- /* Source 1 first to avoid illegal 11 combination */
- reg = ioread32(c1);
- if (cmdsrc & 2)
- reg |= bit;
- else
- reg &= ~bit;
- iowrite32(reg, c1);
-
- /* Then Source 0 */
- reg = ioread32(c0);
- if (cmdsrc & 1)
- reg |= bit;
- else
- reg &= ~bit;
- iowrite32(reg, c0);
+ if (gpio->config->llops->privilege_ctrl)
+ gpio->config->llops->privilege_ctrl(gpio, offset, cmdsrc);
}
static bool aspeed_gpio_copro_request(struct aspeed_gpio *gpio,
unsigned int offset)
{
- const struct aspeed_gpio_bank *bank = to_bank(offset);
-
- if (!copro_ops || !gpio->cf_copro_bankmap)
- return false;
- if (!gpio->cf_copro_bankmap[offset >> 3])
- return false;
- if (!copro_ops->request_access)
- return false;
-
- /* Pause the coprocessor */
- copro_ops->request_access(copro_data);
-
- /* Change command source back to ARM */
- aspeed_gpio_change_cmd_source(gpio, bank, offset >> 3, GPIO_CMDSRC_ARM);
-
- /* Update cache */
- gpio->dcache[GPIO_BANK(offset)] = ioread32(bank_reg(gpio, bank, reg_rdata));
+ if (gpio->config->llops->copro_request)
+ return gpio->config->llops->copro_request(gpio, offset);
- return true;
+ return false;
}
static void aspeed_gpio_copro_release(struct aspeed_gpio *gpio,
unsigned int offset)
{
- const struct aspeed_gpio_bank *bank = to_bank(offset);
-
- if (!copro_ops || !gpio->cf_copro_bankmap)
- return;
- if (!gpio->cf_copro_bankmap[offset >> 3])
- return;
- if (!copro_ops->release_access)
- return;
-
- /* Change command source back to ColdFire */
- aspeed_gpio_change_cmd_source(gpio, bank, offset >> 3,
- GPIO_CMDSRC_COLDFIRE);
+ if (gpio->config->llops->copro_release)
+ gpio->config->llops->copro_release(gpio, offset);
+}
- /* Restart the coprocessor */
- copro_ops->release_access(copro_data);
+static bool aspeed_gpio_support_copro(struct aspeed_gpio *gpio)
+{
+ return gpio->config->llops->copro_request && gpio->config->llops->copro_release &&
+ gpio->config->llops->privilege_ctrl && gpio->config->llops->privilege_init;
}
static int aspeed_gpio_get(struct gpio_chip *gc, unsigned int offset)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- const struct aspeed_gpio_bank *bank = to_bank(offset);
- return !!(ioread32(bank_reg(gpio, bank, reg_val)) & GPIO_BIT(offset));
+ return gpio->config->llops->reg_bit_get(gpio, offset, reg_val);
}
static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
int val)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- const struct aspeed_gpio_bank *bank = to_bank(offset);
- void __iomem *addr;
- u32 reg;
-
- addr = bank_reg(gpio, bank, reg_val);
- reg = gpio->dcache[GPIO_BANK(offset)];
- if (val)
- reg |= GPIO_BIT(offset);
- else
- reg &= ~GPIO_BIT(offset);
- gpio->dcache[GPIO_BANK(offset)] = reg;
-
- iowrite32(reg, addr);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_val, val);
+ /* Flush write */
+ gpio->config->llops->reg_bit_get(gpio, offset, reg_val);
}
static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
@@ -413,7 +428,7 @@ static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
unsigned long flags;
- bool copro;
+ bool copro = false;
raw_spin_lock_irqsave(&gpio->lock, flags);
copro = aspeed_gpio_copro_request(gpio, offset);
@@ -428,22 +443,16 @@ static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- const struct aspeed_gpio_bank *bank = to_bank(offset);
- void __iomem *addr = bank_reg(gpio, bank, reg_dir);
unsigned long flags;
- bool copro;
- u32 reg;
+ bool copro = false;
if (!have_input(gpio, offset))
return -ENOTSUPP;
raw_spin_lock_irqsave(&gpio->lock, flags);
- reg = ioread32(addr);
- reg &= ~GPIO_BIT(offset);
-
copro = aspeed_gpio_copro_request(gpio, offset);
- iowrite32(reg, addr);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_dir, 0);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
@@ -456,23 +465,17 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
unsigned int offset, int val)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- const struct aspeed_gpio_bank *bank = to_bank(offset);
- void __iomem *addr = bank_reg(gpio, bank, reg_dir);
unsigned long flags;
- bool copro;
- u32 reg;
+ bool copro = false;
if (!have_output(gpio, offset))
return -ENOTSUPP;
raw_spin_lock_irqsave(&gpio->lock, flags);
- reg = ioread32(addr);
- reg |= GPIO_BIT(offset);
-
copro = aspeed_gpio_copro_request(gpio, offset);
__aspeed_gpio_set(gc, offset, val);
- iowrite32(reg, addr);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_dir, 1);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
@@ -484,7 +487,6 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- const struct aspeed_gpio_bank *bank = to_bank(offset);
unsigned long flags;
u32 val;
@@ -496,7 +498,7 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
raw_spin_lock_irqsave(&gpio->lock, flags);
- val = ioread32(bank_reg(gpio, bank, reg_dir)) & GPIO_BIT(offset);
+ val = gpio->config->llops->reg_bit_get(gpio, offset, reg_dir);
raw_spin_unlock_irqrestore(&gpio->lock, flags);
@@ -505,8 +507,7 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
static inline int irqd_to_aspeed_gpio_data(struct irq_data *d,
struct aspeed_gpio **gpio,
- const struct aspeed_gpio_bank **bank,
- u32 *bit, int *offset)
+ int *offset)
{
struct aspeed_gpio *internal;
@@ -519,32 +520,25 @@ static inline int irqd_to_aspeed_gpio_data(struct irq_data *d,
return -ENOTSUPP;
*gpio = internal;
- *bank = to_bank(*offset);
- *bit = GPIO_BIT(*offset);
return 0;
}
static void aspeed_gpio_irq_ack(struct irq_data *d)
{
- const struct aspeed_gpio_bank *bank;
struct aspeed_gpio *gpio;
unsigned long flags;
- void __iomem *status_addr;
int rc, offset;
- bool copro;
- u32 bit;
+ bool copro = false;
- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset);
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &offset);
if (rc)
return;
- status_addr = bank_reg(gpio, bank, reg_irq_status);
-
raw_spin_lock_irqsave(&gpio->lock, flags);
copro = aspeed_gpio_copro_request(gpio, offset);
- iowrite32(bit, status_addr);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_status, 1);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
@@ -553,20 +547,15 @@ static void aspeed_gpio_irq_ack(struct irq_data *d)
static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
{
- const struct aspeed_gpio_bank *bank;
struct aspeed_gpio *gpio;
unsigned long flags;
- u32 reg, bit;
- void __iomem *addr;
int rc, offset;
- bool copro;
+ bool copro = false;
- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset);
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &offset);
if (rc)
return;
- addr = bank_reg(gpio, bank, reg_irq_enable);
-
/* Unmasking the IRQ */
if (set)
gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(d));
@@ -574,12 +563,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
raw_spin_lock_irqsave(&gpio->lock, flags);
copro = aspeed_gpio_copro_request(gpio, offset);
- reg = ioread32(addr);
- if (set)
- reg |= bit;
- else
- reg &= ~bit;
- iowrite32(reg, addr);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_enable, set);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
@@ -605,34 +589,31 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
u32 type0 = 0;
u32 type1 = 0;
u32 type2 = 0;
- u32 bit, reg;
- const struct aspeed_gpio_bank *bank;
irq_flow_handler_t handler;
struct aspeed_gpio *gpio;
unsigned long flags;
- void __iomem *addr;
int rc, offset;
- bool copro;
+ bool copro = false;
- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset);
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &offset);
if (rc)
return -EINVAL;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_BOTH:
- type2 |= bit;
+ type2 = 1;
fallthrough;
case IRQ_TYPE_EDGE_RISING:
- type0 |= bit;
+ type0 = 1;
fallthrough;
case IRQ_TYPE_EDGE_FALLING:
handler = handle_edge_irq;
break;
case IRQ_TYPE_LEVEL_HIGH:
- type0 |= bit;
+ type0 = 1;
fallthrough;
case IRQ_TYPE_LEVEL_LOW:
- type1 |= bit;
+ type1 = 1;
handler = handle_level_irq;
break;
default:
@@ -642,20 +623,9 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
raw_spin_lock_irqsave(&gpio->lock, flags);
copro = aspeed_gpio_copro_request(gpio, offset);
- addr = bank_reg(gpio, bank, reg_irq_type0);
- reg = ioread32(addr);
- reg = (reg & ~bit) | type0;
- iowrite32(reg, addr);
-
- addr = bank_reg(gpio, bank, reg_irq_type1);
- reg = ioread32(addr);
- reg = (reg & ~bit) | type1;
- iowrite32(reg, addr);
-
- addr = bank_reg(gpio, bank, reg_irq_type2);
- reg = ioread32(addr);
- reg = (reg & ~bit) | type2;
- iowrite32(reg, addr);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type0, type0);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type1, type1);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type2, type2);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
@@ -670,7 +640,6 @@ static void aspeed_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct irq_chip *ic = irq_desc_get_chip(desc);
- struct aspeed_gpio *data = gpiochip_get_data(gc);
unsigned int i, p, banks;
unsigned long reg;
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
@@ -679,9 +648,7 @@ static void aspeed_gpio_irq_handler(struct irq_desc *desc)
banks = DIV_ROUND_UP(gpio->chip.ngpio, 32);
for (i = 0; i < banks; i++) {
- const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i];
-
- reg = ioread32(bank_reg(data, bank, reg_irq_status));
+ reg = gpio->config->llops->reg_bank_get(gpio, i * 32, reg_irq_status);
for_each_set_bit(p, &reg, 32)
generic_handle_domain_irq(gc->irq.domain, i * 32 + p);
@@ -720,23 +687,12 @@ static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip,
{
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
unsigned long flags;
- void __iomem *treg;
- bool copro;
- u32 val;
-
- treg = bank_reg(gpio, to_bank(offset), reg_tolerance);
+ bool copro = false;
raw_spin_lock_irqsave(&gpio->lock, flags);
copro = aspeed_gpio_copro_request(gpio, offset);
- val = readl(treg);
-
- if (enable)
- val |= GPIO_BIT(offset);
- else
- val &= ~GPIO_BIT(offset);
-
- writel(val, treg);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_tolerance, enable);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
@@ -830,21 +786,11 @@ static inline bool timer_allocation_registered(struct aspeed_gpio *gpio,
static void configure_timer(struct aspeed_gpio *gpio, unsigned int offset,
unsigned int timer)
{
- const struct aspeed_gpio_bank *bank = to_bank(offset);
- const u32 mask = GPIO_BIT(offset);
- void __iomem *addr;
- u32 val;
-
/* Note: Debounce timer isn't under control of the command
* source registers, so no need to sync with the coprocessor
*/
- addr = bank_reg(gpio, bank, reg_debounce_sel1);
- val = ioread32(addr);
- iowrite32((val & ~mask) | GPIO_SET_DEBOUNCE1(timer, offset), addr);
-
- addr = bank_reg(gpio, bank, reg_debounce_sel2);
- val = ioread32(addr);
- iowrite32((val & ~mask) | GPIO_SET_DEBOUNCE2(timer, offset), addr);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_debounce_sel1, !!(timer & BIT(1)));
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_debounce_sel2, !!(timer & BIT(0)));
}
static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
@@ -875,15 +821,15 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
}
/* Try to find a timer already configured for the debounce period */
- for (i = 1; i < ARRAY_SIZE(debounce_timers); i++) {
+ for (i = 1; i < gpio->config->debounce_timers_num; i++) {
u32 cycles;
- cycles = ioread32(gpio->base + debounce_timers[i]);
+ cycles = ioread32(gpio->base + gpio->config->debounce_timers_array[i]);
if (requested_cycles == cycles)
break;
}
- if (i == ARRAY_SIZE(debounce_timers)) {
+ if (i == gpio->config->debounce_timers_num) {
int j;
/*
@@ -897,8 +843,8 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
if (j == ARRAY_SIZE(gpio->timer_users)) {
dev_warn(chip->parent,
- "Debounce timers exhausted, cannot debounce for period %luus\n",
- usecs);
+ "Debounce timers exhausted, cannot debounce for period %luus\n",
+ usecs);
rc = -EPERM;
@@ -914,7 +860,7 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
i = j;
- iowrite32(requested_cycles, gpio->base + debounce_timers[i]);
+ iowrite32(requested_cycles, gpio->base + gpio->config->debounce_timers_array[i]);
}
if (WARN(i == 0, "Cannot register index of disabled timer\n")) {
@@ -1017,6 +963,9 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
const struct aspeed_gpio_bank *bank = to_bank(offset);
unsigned long flags;
+ if (!aspeed_gpio_support_copro(gpio))
+ return -EOPNOTSUPP;
+
if (!gpio->cf_copro_bankmap)
gpio->cf_copro_bankmap = kzalloc(gpio->chip.ngpio >> 3, GFP_KERNEL);
if (!gpio->cf_copro_bankmap)
@@ -1036,7 +985,7 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
/* Switch command source */
if (gpio->cf_copro_bankmap[bindex] == 1)
- aspeed_gpio_change_cmd_source(gpio, bank, bindex,
+ aspeed_gpio_change_cmd_source(gpio, offset,
GPIO_CMDSRC_COLDFIRE);
if (vreg_offset)
@@ -1060,9 +1009,11 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc)
struct gpio_chip *chip = gpiod_to_chip(desc);
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
int rc = 0, bindex, offset = gpio_chip_hwgpio(desc);
- const struct aspeed_gpio_bank *bank = to_bank(offset);
unsigned long flags;
+ if (!aspeed_gpio_support_copro(gpio))
+ return -EOPNOTSUPP;
+
if (!gpio->cf_copro_bankmap)
return -ENXIO;
@@ -1081,7 +1032,7 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc)
/* Switch command source */
if (gpio->cf_copro_bankmap[bindex] == 0)
- aspeed_gpio_change_cmd_source(gpio, bank, bindex,
+ aspeed_gpio_change_cmd_source(gpio, offset,
GPIO_CMDSRC_ARM);
bail:
raw_spin_unlock_irqrestore(&gpio->lock, flags);
@@ -1091,16 +1042,14 @@ EXPORT_SYMBOL_GPL(aspeed_gpio_copro_release_gpio);
static void aspeed_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
{
- const struct aspeed_gpio_bank *bank;
struct aspeed_gpio *gpio;
- u32 bit;
int rc, offset;
- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset);
+ rc = irqd_to_aspeed_gpio_data(d, &gpio, &offset);
if (rc)
return;
- seq_printf(p, dev_name(gpio->dev));
+ seq_puts(p, dev_name(gpio->dev));
}
static const struct irq_chip aspeed_gpio_irq_chip = {
@@ -1113,6 +1062,173 @@ static const struct irq_chip aspeed_gpio_irq_chip = {
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
+static void aspeed_g4_reg_bit_set(struct aspeed_gpio *gpio, unsigned int offset,
+ const enum aspeed_gpio_reg reg, bool val)
+{
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ void __iomem *addr = aspeed_gpio_g4_bank_reg(gpio, bank, reg);
+ u32 temp;
+
+ if (reg == reg_val)
+ temp = gpio->dcache[GPIO_BANK(offset)];
+ else
+ temp = ioread32(addr);
+
+ if (val)
+ temp |= GPIO_BIT(offset);
+ else
+ temp &= ~GPIO_BIT(offset);
+
+ if (reg == reg_val)
+ gpio->dcache[GPIO_BANK(offset)] = temp;
+ iowrite32(temp, addr);
+}
+
+static bool aspeed_g4_reg_bit_get(struct aspeed_gpio *gpio, unsigned int offset,
+ const enum aspeed_gpio_reg reg)
+{
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ void __iomem *addr = aspeed_gpio_g4_bank_reg(gpio, bank, reg);
+
+ return !!(ioread32(addr) & GPIO_BIT(offset));
+}
+
+static int aspeed_g4_reg_bank_get(struct aspeed_gpio *gpio, unsigned int offset,
+ const enum aspeed_gpio_reg reg)
+{
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ void __iomem *addr = aspeed_gpio_g4_bank_reg(gpio, bank, reg);
+
+ if (reg == reg_rdata || reg == reg_irq_status)
+ return ioread32(addr);
+ else
+ return -EOPNOTSUPP;
+}
+
+static void aspeed_g4_privilege_ctrl(struct aspeed_gpio *gpio, unsigned int offset, int cmdsrc)
+{
+ /*
+ * The command source register is only valid in bits 0, 8, 16, and 24, so we use
+ * (offset & ~(0x7)) to ensure that reg_bits_set always targets a valid bit.
+ */
+ /* Source 1 first to avoid illegal 11 combination */
+ aspeed_g4_reg_bit_set(gpio, offset & ~(0x7), reg_cmdsrc1, !!(cmdsrc & BIT(1)));
+ /* Then Source 0 */
+ aspeed_g4_reg_bit_set(gpio, offset & ~(0x7), reg_cmdsrc0, !!(cmdsrc & BIT(0)));
+}
+
+static void aspeed_g4_privilege_init(struct aspeed_gpio *gpio)
+{
+ u32 i;
+
+ /* Switch all command sources to the ARM by default */
+ for (i = 0; i < DIV_ROUND_UP(gpio->chip.ngpio, 32); i++) {
+ aspeed_g4_privilege_ctrl(gpio, (i << 5) + 0, GPIO_CMDSRC_ARM);
+ aspeed_g4_privilege_ctrl(gpio, (i << 5) + 8, GPIO_CMDSRC_ARM);
+ aspeed_g4_privilege_ctrl(gpio, (i << 5) + 16, GPIO_CMDSRC_ARM);
+ aspeed_g4_privilege_ctrl(gpio, (i << 5) + 24, GPIO_CMDSRC_ARM);
+ }
+}
+
+static bool aspeed_g4_copro_request(struct aspeed_gpio *gpio, unsigned int offset)
+{
+ if (!copro_ops || !gpio->cf_copro_bankmap)
+ return false;
+ if (!gpio->cf_copro_bankmap[offset >> 3])
+ return false;
+ if (!copro_ops->request_access)
+ return false;
+
+ /* Pause the coprocessor */
+ copro_ops->request_access(copro_data);
+
+ /* Change command source back to ARM */
+ aspeed_g4_privilege_ctrl(gpio, offset, GPIO_CMDSRC_ARM);
+
+ /* Update cache */
+ gpio->dcache[GPIO_BANK(offset)] = aspeed_g4_reg_bank_get(gpio, offset, reg_rdata);
+
+ return true;
+}
+
+static void aspeed_g4_copro_release(struct aspeed_gpio *gpio, unsigned int offset)
+{
+ if (!copro_ops || !gpio->cf_copro_bankmap)
+ return;
+ if (!gpio->cf_copro_bankmap[offset >> 3])
+ return;
+ if (!copro_ops->release_access)
+ return;
+
+ /* Change command source back to ColdFire */
+ aspeed_g4_privilege_ctrl(gpio, offset, GPIO_CMDSRC_COLDFIRE);
+
+ /* Restart the coprocessor */
+ copro_ops->release_access(copro_data);
+}
+
+static const struct aspeed_gpio_llops aspeed_g4_llops = {
+ .reg_bit_set = aspeed_g4_reg_bit_set,
+ .reg_bit_get = aspeed_g4_reg_bit_get,
+ .reg_bank_get = aspeed_g4_reg_bank_get,
+ .privilege_ctrl = aspeed_g4_privilege_ctrl,
+ .privilege_init = aspeed_g4_privilege_init,
+ .copro_request = aspeed_g4_copro_request,
+ .copro_release = aspeed_g4_copro_release,
+};
+
+static void aspeed_g7_reg_bit_set(struct aspeed_gpio *gpio, unsigned int offset,
+ const enum aspeed_gpio_reg reg, bool val)
+{
+ u32 mask = aspeed_gpio_g7_reg_mask(reg);
+ void __iomem *addr = gpio->base + GPIO_G7_CTRL_REG_OFFSET(offset);
+ u32 write_val;
+
+ if (mask) {
+ write_val = (ioread32(addr) & ~(mask)) | field_prep(mask, val);
+ iowrite32(write_val, addr);
+ }
+}
+
+static bool aspeed_g7_reg_bit_get(struct aspeed_gpio *gpio, unsigned int offset,
+ const enum aspeed_gpio_reg reg)
+{
+ u32 mask = aspeed_gpio_g7_reg_mask(reg);
+ void __iomem *addr;
+
+ addr = gpio->base + GPIO_G7_CTRL_REG_OFFSET(offset);
+ if (reg == reg_val)
+ mask = GPIO_G7_CTRL_IN_DATA;
+
+ if (mask)
+ return field_get(mask, ioread32(addr));
+ else
+ return 0;
+}
+
+static int aspeed_g7_reg_bank_get(struct aspeed_gpio *gpio, unsigned int offset,
+ const enum aspeed_gpio_reg reg)
+{
+ void __iomem *addr;
+
+ if (reg == reg_irq_status) {
+ addr = gpio->base + GPIO_G7_IRQ_STS_OFFSET(offset >> 5);
+ return ioread32(addr);
+ } else {
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct aspeed_gpio_llops aspeed_g7_llops = {
+ .reg_bit_set = aspeed_g7_reg_bit_set,
+ .reg_bit_get = aspeed_g7_reg_bit_get,
+ .reg_bank_get = aspeed_g7_reg_bank_get,
+ .privilege_ctrl = NULL,
+ .privilege_init = NULL,
+ .copro_request = NULL,
+ .copro_release = NULL,
+};
+
/*
* Any banks not specified in a struct aspeed_bank_props array are assumed to
* have the properties:
@@ -1129,7 +1245,14 @@ static const struct aspeed_bank_props ast2400_bank_props[] = {
static const struct aspeed_gpio_config ast2400_config =
/* 220 for simplicity, really 216 with two 4-GPIO holes, four at end */
- { .nr_gpios = 220, .props = ast2400_bank_props, };
+ {
+ .nr_gpios = 220,
+ .props = ast2400_bank_props,
+ .llops = &aspeed_g4_llops,
+ .debounce_timers_array = debounce_timers,
+ .debounce_timers_num = ARRAY_SIZE(debounce_timers),
+ .require_dcache = true,
+ };
static const struct aspeed_bank_props ast2500_bank_props[] = {
/* input output */
@@ -1141,7 +1264,14 @@ static const struct aspeed_bank_props ast2500_bank_props[] = {
static const struct aspeed_gpio_config ast2500_config =
/* 232 for simplicity, actual number is 228 (4-GPIO hole in GPIOAB) */
- { .nr_gpios = 232, .props = ast2500_bank_props, };
+ {
+ .nr_gpios = 232,
+ .props = ast2500_bank_props,
+ .llops = &aspeed_g4_llops,
+ .debounce_timers_array = debounce_timers,
+ .debounce_timers_num = ARRAY_SIZE(debounce_timers),
+ .require_dcache = true,
+ };
static const struct aspeed_bank_props ast2600_bank_props[] = {
/* input output */
@@ -1157,17 +1287,48 @@ static const struct aspeed_gpio_config ast2600_config =
* We expect ngpio being set in the device tree and this is a fallback
* option.
*/
- { .nr_gpios = 208, .props = ast2600_bank_props, };
+ {
+ .nr_gpios = 208,
+ .props = ast2600_bank_props,
+ .llops = &aspeed_g4_llops,
+ .debounce_timers_array = debounce_timers,
+ .debounce_timers_num = ARRAY_SIZE(debounce_timers),
+ .require_dcache = true,
+ };
+
+static const struct aspeed_bank_props ast2700_bank_props[] = {
+ /* input output */
+ { 1, 0x0fffffff, 0x0fffffff }, /* E/F/G/H, 4-GPIO hole */
+ { 6, 0x00ffffff, 0x00ff0000 }, /* Y/Z/AA */
+ {},
+};
+
+static const struct aspeed_gpio_config ast2700_config =
+ /*
+ * ast2700 has two controllers one with 212 GPIOs and one with 16 GPIOs.
+ * 216 for simplicity, actual number is 212 (4-GPIO hole in GPIOH)
+ * We expect ngpio being set in the device tree and this is a fallback
+ * option.
+ */
+ {
+ .nr_gpios = 216,
+ .props = ast2700_bank_props,
+ .llops = &aspeed_g7_llops,
+ .debounce_timers_array = g7_debounce_timers,
+ .debounce_timers_num = ARRAY_SIZE(g7_debounce_timers),
+ .require_dcache = false,
+ };
static const struct of_device_id aspeed_gpio_of_table[] = {
{ .compatible = "aspeed,ast2400-gpio", .data = &ast2400_config, },
{ .compatible = "aspeed,ast2500-gpio", .data = &ast2500_config, },
{ .compatible = "aspeed,ast2600-gpio", .data = &ast2600_config, },
+ { .compatible = "aspeed,ast2700-gpio", .data = &ast2700_config, },
{}
};
MODULE_DEVICE_TABLE(of, aspeed_gpio_of_table);
-static int __init aspeed_gpio_probe(struct platform_device *pdev)
+static int aspeed_gpio_probe(struct platform_device *pdev)
{
const struct of_device_id *gpio_id;
struct gpio_irq_chip *girq;
@@ -1191,7 +1352,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
if (!gpio_id)
return -EINVAL;
- gpio->clk = of_clk_get(pdev->dev.of_node, 0);
+ gpio->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(gpio->clk)) {
dev_warn(&pdev->dev,
"Failed to get clock from devicetree, debouncing disabled\n");
@@ -1200,6 +1361,10 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
gpio->config = gpio_id->data;
+ if (!gpio->config->llops->reg_bit_set || !gpio->config->llops->reg_bit_get ||
+ !gpio->config->llops->reg_bank_get)
+ return -EINVAL;
+
gpio->chip.parent = &pdev->dev;
err = of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpio);
gpio->chip.ngpio = (u16) ngpio;
@@ -1216,27 +1381,23 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
- /* Allocate a cache of the output registers */
- banks = DIV_ROUND_UP(gpio->chip.ngpio, 32);
- gpio->dcache = devm_kcalloc(&pdev->dev,
- banks, sizeof(u32), GFP_KERNEL);
- if (!gpio->dcache)
- return -ENOMEM;
-
- /*
- * Populate it with initial values read from the HW and switch
- * all command sources to the ARM by default
- */
- for (i = 0; i < banks; i++) {
- const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i];
- void __iomem *addr = bank_reg(gpio, bank, reg_rdata);
- gpio->dcache[i] = ioread32(addr);
- aspeed_gpio_change_cmd_source(gpio, bank, 0, GPIO_CMDSRC_ARM);
- aspeed_gpio_change_cmd_source(gpio, bank, 1, GPIO_CMDSRC_ARM);
- aspeed_gpio_change_cmd_source(gpio, bank, 2, GPIO_CMDSRC_ARM);
- aspeed_gpio_change_cmd_source(gpio, bank, 3, GPIO_CMDSRC_ARM);
+ if (gpio->config->require_dcache) {
+ /* Allocate a cache of the output registers */
+ banks = DIV_ROUND_UP(gpio->chip.ngpio, 32);
+ gpio->dcache = devm_kcalloc(&pdev->dev, banks, sizeof(u32), GFP_KERNEL);
+ if (!gpio->dcache)
+ return -ENOMEM;
+ /*
+ * Populate it with initial values read from the HW
+ */
+ for (i = 0; i < banks; i++)
+ gpio->dcache[i] =
+ gpio->config->llops->reg_bank_get(gpio, (i << 5), reg_rdata);
}
+ if (gpio->config->llops->privilege_init)
+ gpio->config->llops->privilege_init(gpio);
+
/* Set up an irqchip */
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -1268,13 +1429,14 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
}
static struct platform_driver aspeed_gpio_driver = {
+ .probe = aspeed_gpio_probe,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = aspeed_gpio_of_table,
},
};
-module_platform_driver_probe(aspeed_gpio_driver, aspeed_gpio_probe);
+module_platform_driver(aspeed_gpio_driver);
MODULE_DESCRIPTION("Aspeed GPIO Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 5762e517338e..491b529d25f8 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -751,7 +751,7 @@ static struct platform_driver brcmstb_gpio_driver = {
.pm = &brcmstb_gpio_pm_ops,
},
.probe = brcmstb_gpio_probe,
- .remove_new = brcmstb_gpio_remove,
+ .remove = brcmstb_gpio_remove,
.shutdown = brcmstb_gpio_shutdown,
};
module_platform_driver(brcmstb_gpio_driver);
diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c
index 1b8ffd0ddab6..e9dd2564c54f 100644
--- a/drivers/gpio/gpio-cadence.c
+++ b/drivers/gpio/gpio-cadence.c
@@ -277,7 +277,7 @@ static struct platform_driver cdns_gpio_driver = {
.of_match_table = cdns_of_ids,
},
.probe = cdns_gpio_probe,
- .remove_new = cdns_gpio_remove,
+ .remove = cdns_gpio_remove,
};
module_platform_driver(cdns_gpio_driver);
diff --git a/drivers/gpio/gpio-cgbc.c b/drivers/gpio/gpio-cgbc.c
new file mode 100644
index 000000000000..9213faa11522
--- /dev/null
+++ b/drivers/gpio/gpio-cgbc.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Congatec Board Controller GPIO driver
+ *
+ * Copyright (C) 2024 Bootlin
+ * Author: Thomas Richard <thomas.richard@bootlin.com>
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/mfd/cgbc.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#define CGBC_GPIO_NGPIO 14
+
+#define CGBC_GPIO_CMD_GET 0x64
+#define CGBC_GPIO_CMD_SET 0x65
+#define CGBC_GPIO_CMD_DIR_GET 0x66
+#define CGBC_GPIO_CMD_DIR_SET 0x67
+
+struct cgbc_gpio_data {
+ struct gpio_chip chip;
+ struct cgbc_device_data *cgbc;
+ struct mutex lock;
+};
+
+static int cgbc_gpio_cmd(struct cgbc_device_data *cgbc,
+ u8 cmd0, u8 cmd1, u8 cmd2, u8 *value)
+{
+ u8 cmd[3] = {cmd0, cmd1, cmd2};
+
+ return cgbc_command(cgbc, cmd, sizeof(cmd), value, 1, NULL);
+}
+
+static int cgbc_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
+ struct cgbc_device_data *cgbc = gpio->cgbc;
+ int ret;
+ u8 val;
+
+ scoped_guard(mutex, &gpio->lock)
+ ret = cgbc_gpio_cmd(cgbc, CGBC_GPIO_CMD_GET, (offset > 7) ? 1 : 0, 0, &val);
+
+ offset %= 8;
+
+ if (ret)
+ return ret;
+ else
+ return (int)(val & (u8)BIT(offset));
+}
+
+static void __cgbc_gpio_set(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
+ struct cgbc_device_data *cgbc = gpio->cgbc;
+ u8 val;
+ int ret;
+
+ ret = cgbc_gpio_cmd(cgbc, CGBC_GPIO_CMD_GET, (offset > 7) ? 1 : 0, 0, &val);
+ if (ret)
+ return;
+
+ if (value)
+ val |= BIT(offset % 8);
+ else
+ val &= ~(BIT(offset % 8));
+
+ cgbc_gpio_cmd(cgbc, CGBC_GPIO_CMD_SET, (offset > 7) ? 1 : 0, val, &val);
+}
+
+static void cgbc_gpio_set(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
+
+ scoped_guard(mutex, &gpio->lock)
+ __cgbc_gpio_set(chip, offset, value);
+}
+
+static int cgbc_gpio_direction_set(struct gpio_chip *chip,
+ unsigned int offset, int direction)
+{
+ struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
+ struct cgbc_device_data *cgbc = gpio->cgbc;
+ int ret;
+ u8 val;
+
+ ret = cgbc_gpio_cmd(cgbc, CGBC_GPIO_CMD_DIR_GET, (offset > 7) ? 1 : 0, 0, &val);
+ if (ret)
+ goto end;
+
+ if (direction == GPIO_LINE_DIRECTION_IN)
+ val &= ~(BIT(offset % 8));
+ else
+ val |= BIT(offset % 8);
+
+ ret = cgbc_gpio_cmd(cgbc, CGBC_GPIO_CMD_DIR_SET, (offset > 7) ? 1 : 0, val, &val);
+
+end:
+ return ret;
+}
+
+static int cgbc_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
+
+ guard(mutex)(&gpio->lock);
+ return cgbc_gpio_direction_set(chip, offset, GPIO_LINE_DIRECTION_IN);
+}
+
+static int cgbc_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
+
+ guard(mutex)(&gpio->lock);
+
+ __cgbc_gpio_set(chip, offset, value);
+ return cgbc_gpio_direction_set(chip, offset, GPIO_LINE_DIRECTION_OUT);
+}
+
+static int cgbc_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
+ struct cgbc_device_data *cgbc = gpio->cgbc;
+ int ret;
+ u8 val;
+
+ scoped_guard(mutex, &gpio->lock)
+ ret = cgbc_gpio_cmd(cgbc, CGBC_GPIO_CMD_DIR_GET, (offset > 7) ? 1 : 0, 0, &val);
+
+ if (ret)
+ return ret;
+
+ if (val & BIT(offset % 8))
+ return GPIO_LINE_DIRECTION_OUT;
+ else
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int cgbc_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cgbc_device_data *cgbc = dev_get_drvdata(dev->parent);
+ struct cgbc_gpio_data *gpio;
+ struct gpio_chip *chip;
+ int ret;
+
+ gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ gpio->cgbc = cgbc;
+
+ platform_set_drvdata(pdev, gpio);
+
+ chip = &gpio->chip;
+ chip->label = dev_name(&pdev->dev);
+ chip->owner = THIS_MODULE;
+ chip->parent = dev;
+ chip->base = -1;
+ chip->direction_input = cgbc_gpio_direction_input;
+ chip->direction_output = cgbc_gpio_direction_output;
+ chip->get_direction = cgbc_gpio_get_direction;
+ chip->get = cgbc_gpio_get;
+ chip->set = cgbc_gpio_set;
+ chip->ngpio = CGBC_GPIO_NGPIO;
+
+ ret = devm_mutex_init(dev, &gpio->lock);
+ if (ret)
+ return ret;
+
+ ret = devm_gpiochip_add_data(dev, chip, gpio);
+ if (ret)
+ return dev_err_probe(dev, ret, "Could not register GPIO chip\n");
+
+ return 0;
+}
+
+static struct platform_driver cgbc_gpio_driver = {
+ .driver = {
+ .name = "cgbc-gpio",
+ },
+ .probe = cgbc_gpio_probe,
+};
+
+module_platform_driver(cgbc_gpio_driver);
+
+MODULE_DESCRIPTION("Congatec Board Controller GPIO Driver");
+MODULE_AUTHOR("Thomas Richard <thomas.richard@bootlin.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:cgbc-gpio");
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 76b58c70b257..8c033e8cf3c9 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -15,7 +15,6 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -159,14 +158,13 @@ static int davinci_gpio_probe(struct platform_device *pdev)
unsigned int ngpio, nbank, nirq, gpio_unbanked;
struct davinci_gpio_controller *chips;
struct device *dev = &pdev->dev;
- struct device_node *dn = dev_of_node(dev);
/*
* The gpio banks conceptually expose a segmented bitmap,
* and "ngpio" is one more than the largest zero-based
* bit index that's valid.
*/
- ret = of_property_read_u32(dn, "ti,ngpio", &ngpio);
+ ret = device_property_read_u32(dev, "ti,ngpio", &ngpio);
if (ret)
return dev_err_probe(dev, ret, "Failed to get the number of GPIOs\n");
if (ngpio == 0)
@@ -177,8 +175,8 @@ static int davinci_gpio_probe(struct platform_device *pdev)
* interrupts is equal to number of gpios else all are banked so
* number of interrupts is equal to number of banks(each with 16 gpios)
*/
- ret = of_property_read_u32(dn, "ti,davinci-gpio-unbanked",
- &gpio_unbanked);
+ ret = device_property_read_u32(dev, "ti,davinci-gpio-unbanked",
+ &gpio_unbanked);
if (ret)
return dev_err_probe(dev, ret, "Failed to get the unbanked GPIOs property\n");
@@ -662,7 +660,7 @@ static struct platform_driver davinci_gpio_driver = {
.driver = {
.name = "davinci_gpio",
.pm = pm_sleep_ptr(&davinci_gpio_dev_pm_ops),
- .of_match_table = of_match_ptr(davinci_gpio_ids),
+ .of_match_table = davinci_gpio_ids,
},
};
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 7ead1f51128a..596da59d4b13 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -512,7 +512,7 @@ static void dln2_gpio_remove(struct platform_device *pdev)
static struct platform_driver dln2_gpio_driver = {
.driver.name = "dln2-gpio",
.probe = dln2_gpio_probe,
- .remove_new = dln2_gpio_remove,
+ .remove = dln2_gpio_remove,
};
module_platform_driver(dln2_gpio_driver);
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 798235791f70..43b667b41f5d 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -571,7 +571,6 @@ static void dwapb_get_irq(struct device *dev, struct fwnode_handle *fwnode,
static struct dwapb_platform_data *dwapb_gpio_get_pdata(struct device *dev)
{
- struct fwnode_handle *fwnode;
struct dwapb_platform_data *pdata;
struct dwapb_port_property *pp;
int nports;
@@ -592,7 +591,7 @@ static struct dwapb_platform_data *dwapb_gpio_get_pdata(struct device *dev)
pdata->nports = nports;
i = 0;
- device_for_each_child_node(dev, fwnode) {
+ device_for_each_child_node_scoped(dev, fwnode) {
pp = &pdata->properties[i++];
pp->fwnode = fwnode;
@@ -600,7 +599,6 @@ static struct dwapb_platform_data *dwapb_gpio_get_pdata(struct device *dev)
pp->idx >= DWAPB_MAX_PORTS) {
dev_err(dev,
"missing/invalid port index for port%d\n", i);
- fwnode_handle_put(fwnode);
return ERR_PTR(-EINVAL);
}
@@ -694,6 +692,7 @@ static const struct acpi_device_id dwapb_acpi_match[] = {
{"HISI0181", GPIO_REG_OFFSET_V1},
{"APMC0D07", GPIO_REG_OFFSET_V1},
{"APMC0D81", GPIO_REG_OFFSET_V2},
+ {"FUJI200A", GPIO_REG_OFFSET_V1},
{ }
};
MODULE_DEVICE_TABLE(acpi, dwapb_acpi_match);
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index 2dd0e46c42ad..d4bf8d187e16 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -10,8 +10,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/notifier.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/spinlock.h>
/* EIC registers definition */
@@ -617,7 +617,7 @@ static int sprd_eic_probe(struct platform_device *pdev)
u16 num_banks = 0;
int ret, i;
- pdata = of_device_get_match_data(dev);
+ pdata = device_get_match_data(dev);
if (!pdata) {
dev_err(dev, "No matching driver data found.\n");
return -EINVAL;
diff --git a/drivers/gpio/gpio-elkhartlake.c b/drivers/gpio/gpio-elkhartlake.c
index 887c0fe99d39..95de52d2cc63 100644
--- a/drivers/gpio/gpio-elkhartlake.c
+++ b/drivers/gpio/gpio-elkhartlake.c
@@ -75,4 +75,4 @@ MODULE_AUTHOR("Pandith N <pandith.n@intel.com>");
MODULE_AUTHOR("Raag Jadav <raag.jadav@intel.com>");
MODULE_DESCRIPTION("Intel Elkhart Lake PSE GPIO driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(GPIO_TANGIER);
+MODULE_IMPORT_NS("GPIO_TANGIER");
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index ab798c848215..58d2464c07bc 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -249,7 +249,7 @@ static void ep93xx_irq_print_chip(struct irq_data *data, struct seq_file *p)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
- seq_printf(p, dev_name(gc->parent));
+ seq_puts(p, dev_name(gc->parent));
}
static const struct irq_chip gpio_eic_irq_chip = {
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index 5170fe7599cd..d5909a4f0433 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -99,11 +99,13 @@ static void exar_set_value(struct gpio_chip *chip, unsigned int offset,
struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
unsigned int addr = exar_offset_to_lvl_addr(exar_gpio, offset);
unsigned int bit = exar_offset_to_bit(exar_gpio, offset);
+ unsigned int bit_value = value ? BIT(bit) : 0;
- if (value)
- regmap_set_bits(exar_gpio->regmap, addr, BIT(bit));
- else
- regmap_clear_bits(exar_gpio->regmap, addr, BIT(bit));
+ /*
+ * regmap_write_bits() forces value to be written when an external
+ * pull up/down might otherwise indicate value was already set.
+ */
+ regmap_write_bits(exar_gpio->regmap, addr, BIT(bit), bit_value);
}
static int exar_direction_output(struct gpio_chip *chip, unsigned int offset,
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index 97d345b59352..c35eaa2851d8 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -253,18 +253,13 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- g->clk = devm_clk_get(dev, NULL);
- if (!IS_ERR(g->clk)) {
- ret = clk_prepare_enable(g->clk);
- if (ret)
- return ret;
- } else if (PTR_ERR(g->clk) == -EPROBE_DEFER) {
+ g->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(g->clk) && PTR_ERR(g->clk) == -EPROBE_DEFER)
/*
* Percolate deferrals, for anything else,
* just live without the clocking.
*/
return PTR_ERR(g->clk);
- }
ret = bgpio_init(&g->gc, dev, 4,
g->base + GPIO_DATA_IN,
@@ -273,10 +268,9 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
g->base + GPIO_DIR,
NULL,
0);
- if (ret) {
- dev_err(dev, "unable to init generic GPIO\n");
- goto dis_clk;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "unable to init generic GPIO\n");
+
g->gc.label = dev_name(dev);
g->gc.base = -1;
g->gc.parent = dev;
@@ -293,10 +287,9 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
girq->num_parents = 1;
girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
GFP_KERNEL);
- if (!girq->parents) {
- ret = -ENOMEM;
- goto dis_clk;
- }
+ if (!girq->parents)
+ return -ENOMEM;
+
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
girq->parents[0] = irq;
@@ -309,26 +302,7 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
/* Clear any use of debounce */
writel(0x0, g->base + GPIO_DEBOUNCE_EN);
- ret = devm_gpiochip_add_data(dev, &g->gc, g);
- if (ret)
- goto dis_clk;
-
- platform_set_drvdata(pdev, g);
- dev_info(dev, "FTGPIO010 @%p registered\n", g->base);
-
- return 0;
-
-dis_clk:
- clk_disable_unprepare(g->clk);
-
- return ret;
-}
-
-static void ftgpio_gpio_remove(struct platform_device *pdev)
-{
- struct ftgpio_gpio *g = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(g->clk);
+ return devm_gpiochip_add_data(dev, &g->gc, g);
}
static const struct of_device_id ftgpio_gpio_of_match[] = {
@@ -350,6 +324,5 @@ static struct platform_driver ftgpio_gpio_driver = {
.of_match_table = ftgpio_gpio_of_match,
},
.probe = ftgpio_gpio_probe,
- .remove_new = ftgpio_gpio_remove,
};
builtin_platform_driver(ftgpio_gpio_driver);
diff --git a/drivers/gpio/gpio-gpio-mm.c b/drivers/gpio/gpio-gpio-mm.c
index 43d823a56e59..fb7c510bf2fa 100644
--- a/drivers/gpio/gpio-gpio-mm.c
+++ b/drivers/gpio/gpio-gpio-mm.c
@@ -18,7 +18,7 @@
#include "gpio-i8255.h"
-MODULE_IMPORT_NS(I8255);
+MODULE_IMPORT_NS("I8255");
#define GPIOMM_EXTENT 8
#define MAX_NUM_GPIOMM max_num_isa_dev(GPIOMM_EXTENT)
diff --git a/drivers/gpio/gpio-graniterapids.c b/drivers/gpio/gpio-graniterapids.c
index f2e911a3d2ca..ad6a045fd3d2 100644
--- a/drivers/gpio/gpio-graniterapids.c
+++ b/drivers/gpio/gpio-graniterapids.c
@@ -32,12 +32,14 @@
#define GNR_PINS_PER_REG 32
#define GNR_NUM_REGS DIV_ROUND_UP(GNR_NUM_PINS, GNR_PINS_PER_REG)
-#define GNR_CFG_BAR 0x00
+#define GNR_CFG_PADBAR 0x00
#define GNR_CFG_LOCK_OFFSET 0x04
-#define GNR_GPI_STATUS_OFFSET 0x20
+#define GNR_GPI_STATUS_OFFSET 0x14
#define GNR_GPI_ENABLE_OFFSET 0x24
-#define GNR_CFG_DW_RX_MASK GENMASK(25, 22)
+#define GNR_CFG_DW_HOSTSW_MODE BIT(27)
+#define GNR_CFG_DW_RX_MASK GENMASK(23, 22)
+#define GNR_CFG_DW_INTSEL_MASK GENMASK(21, 14)
#define GNR_CFG_DW_RX_DISABLE FIELD_PREP(GNR_CFG_DW_RX_MASK, 2)
#define GNR_CFG_DW_RX_EDGE FIELD_PREP(GNR_CFG_DW_RX_MASK, 1)
#define GNR_CFG_DW_RX_LEVEL FIELD_PREP(GNR_CFG_DW_RX_MASK, 0)
@@ -50,6 +52,7 @@
* struct gnr_gpio - Intel Granite Rapids-D vGPIO driver state
* @gc: GPIO controller interface
* @reg_base: base address of the GPIO registers
+ * @pad_base: base address of the vGPIO pad configuration registers
* @ro_bitmap: bitmap of read-only pins
* @lock: guard the registers
* @pad_backup: backup of the register state for suspend
@@ -57,6 +60,7 @@
struct gnr_gpio {
struct gpio_chip gc;
void __iomem *reg_base;
+ void __iomem *pad_base;
DECLARE_BITMAP(ro_bitmap, GNR_NUM_PINS);
raw_spinlock_t lock;
u32 pad_backup[];
@@ -65,7 +69,7 @@ struct gnr_gpio {
static void __iomem *gnr_gpio_get_padcfg_addr(const struct gnr_gpio *priv,
unsigned int gpio)
{
- return priv->reg_base + gpio * sizeof(u32);
+ return priv->pad_base + gpio * sizeof(u32);
}
static int gnr_gpio_configure_line(struct gpio_chip *gc, unsigned int gpio,
@@ -88,6 +92,20 @@ static int gnr_gpio_configure_line(struct gpio_chip *gc, unsigned int gpio,
return 0;
}
+static int gnr_gpio_request(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct gnr_gpio *priv = gpiochip_get_data(gc);
+ u32 dw;
+
+ dw = readl(gnr_gpio_get_padcfg_addr(priv, gpio));
+ if (!(dw & GNR_CFG_DW_HOSTSW_MODE)) {
+ dev_warn(gc->parent, "GPIO %u is not owned by host", gpio);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
static int gnr_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
const struct gnr_gpio *priv = gpiochip_get_data(gc);
@@ -139,6 +157,7 @@ static int gnr_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio, in
static const struct gpio_chip gnr_gpio_chip = {
.owner = THIS_MODULE,
+ .request = gnr_gpio_request,
.get = gnr_gpio_get,
.set = gnr_gpio_set,
.get_direction = gnr_gpio_get_direction,
@@ -166,7 +185,7 @@ static void gnr_gpio_irq_ack(struct irq_data *d)
guard(raw_spinlock_irqsave)(&priv->lock);
reg = readl(addr);
- reg &= ~BIT(bit_idx);
+ reg |= BIT(bit_idx);
writel(reg, addr);
}
@@ -209,10 +228,18 @@ static void gnr_gpio_irq_unmask(struct irq_data *d)
static int gnr_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- irq_hw_number_t pin = irqd_to_hwirq(d);
- u32 mask = GNR_CFG_DW_RX_MASK;
+ struct gnr_gpio *priv = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ u32 reg;
u32 set;
+ /* Allow interrupts only if Interrupt Select field is non-zero */
+ reg = readl(gnr_gpio_get_padcfg_addr(priv, hwirq));
+ if (!(reg & GNR_CFG_DW_INTSEL_MASK)) {
+ dev_dbg(gc->parent, "GPIO %lu cannot be used as IRQ", hwirq);
+ return -EPERM;
+ }
+
/* Falling edge and level low triggers not supported by the GPIO controller */
switch (type) {
case IRQ_TYPE_NONE:
@@ -230,10 +257,11 @@ static int gnr_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- return gnr_gpio_configure_line(gc, pin, mask, set);
+ return gnr_gpio_configure_line(gc, hwirq, GNR_CFG_DW_RX_MASK, set);
}
static const struct irq_chip gnr_gpio_irq_chip = {
+ .name = "gpio-graniterapids",
.irq_ack = gnr_gpio_irq_ack,
.irq_mask = gnr_gpio_irq_mask,
.irq_unmask = gnr_gpio_irq_unmask,
@@ -291,6 +319,7 @@ static int gnr_gpio_probe(struct platform_device *pdev)
struct gnr_gpio *priv;
void __iomem *regs;
int irq, ret;
+ u32 offset;
priv = devm_kzalloc(dev, struct_size(priv, pad_backup, num_backup_pins), GFP_KERNEL);
if (!priv)
@@ -302,6 +331,10 @@ static int gnr_gpio_probe(struct platform_device *pdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
+ priv->reg_base = regs;
+ offset = readl(priv->reg_base + GNR_CFG_PADBAR);
+ priv->pad_base = priv->reg_base + offset;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -311,8 +344,6 @@ static int gnr_gpio_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "failed to request interrupt\n");
- priv->reg_base = regs + readl(regs + GNR_CFG_BAR);
-
gnr_gpio_init_pin_ro_bits(dev, priv->reg_base + GNR_CFG_LOCK_OFFSET,
priv->ro_bitmap);
@@ -324,7 +355,6 @@ static int gnr_gpio_probe(struct platform_device *pdev)
girq = &priv->gc.irq;
gpio_irq_chip_set_chip(girq, &gnr_gpio_irq_chip);
- girq->chip->name = dev_name(dev);
girq->parent_handler = NULL;
girq->num_parents = 0;
girq->parents = NULL;
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 017c7170eb57..169f33c41c59 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -16,20 +16,20 @@
* Contributors: Andreas Larsson <andreas@gaisler.com>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/gpio/driver.h>
-#include <linux/slab.h>
+#include <linux/bitops.h>
#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
-#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#define GRGPIO_MAX_NGPIO 32
@@ -318,6 +318,13 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
}
+static void grgpio_irq_domain_remove(void *data)
+{
+ struct irq_domain *domain = data;
+
+ irq_domain_remove(domain);
+}
+
static const struct irq_domain_ops grgpio_irq_domain_ops = {
.map = grgpio_irq_map,
.unmap = grgpio_irq_unmap,
@@ -328,6 +335,7 @@ static const struct irq_domain_ops grgpio_irq_domain_ops = {
static int grgpio_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
+ struct device *dev = &ofdev->dev;
void __iomem *regs;
struct gpio_chip *gc;
struct grgpio_priv *priv;
@@ -337,7 +345,7 @@ static int grgpio_probe(struct platform_device *ofdev)
int size;
int i;
- priv = devm_kzalloc(&ofdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -346,28 +354,31 @@ static int grgpio_probe(struct platform_device *ofdev)
return PTR_ERR(regs);
gc = &priv->gc;
- err = bgpio_init(gc, &ofdev->dev, 4, regs + GRGPIO_DATA,
+ err = bgpio_init(gc, dev, 4, regs + GRGPIO_DATA,
regs + GRGPIO_OUTPUT, NULL, regs + GRGPIO_DIR, NULL,
BGPIOF_BIG_ENDIAN_BYTE_ORDER);
if (err) {
- dev_err(&ofdev->dev, "bgpio_init() failed\n");
+ dev_err(dev, "bgpio_init() failed\n");
return err;
}
priv->regs = regs;
priv->imask = gc->read_reg(regs + GRGPIO_IMASK);
- priv->dev = &ofdev->dev;
+ priv->dev = dev;
gc->owner = THIS_MODULE;
gc->to_irq = grgpio_to_irq;
- gc->label = devm_kasprintf(&ofdev->dev, GFP_KERNEL, "%pOF", np);
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
+ if (!gc->label)
+ return -ENOMEM;
+
gc->base = -1;
err = of_property_read_u32(np, "nbits", &prop);
if (err || prop <= 0 || prop > GRGPIO_MAX_NGPIO) {
gc->ngpio = GRGPIO_MAX_NGPIO;
- dev_dbg(&ofdev->dev,
- "No or invalid nbits property: assume %d\n", gc->ngpio);
+ dev_dbg(dev, "No or invalid nbits property: assume %d\n",
+ gc->ngpio);
} else {
gc->ngpio = prop;
}
@@ -379,7 +390,7 @@ static int grgpio_probe(struct platform_device *ofdev)
irqmap = (s32 *)of_get_property(np, "irqmap", &size);
if (irqmap) {
if (size < gc->ngpio) {
- dev_err(&ofdev->dev,
+ dev_err(dev,
"irqmap shorter than ngpio (%d < %d)\n",
size, gc->ngpio);
return -EINVAL;
@@ -389,10 +400,15 @@ static int grgpio_probe(struct platform_device *ofdev)
&grgpio_irq_domain_ops,
priv);
if (!priv->domain) {
- dev_err(&ofdev->dev, "Could not add irq domain\n");
+ dev_err(dev, "Could not add irq domain\n");
return -EINVAL;
}
+ err = devm_add_action_or_reset(dev, grgpio_irq_domain_remove,
+ priv->domain);
+ if (err)
+ return err;
+
for (i = 0; i < gc->ngpio; i++) {
struct grgpio_lirq *lirq;
int ret;
@@ -415,32 +431,18 @@ static int grgpio_probe(struct platform_device *ofdev)
}
}
- platform_set_drvdata(ofdev, priv);
-
- err = gpiochip_add_data(gc, priv);
+ err = devm_gpiochip_add_data(dev, gc, priv);
if (err) {
- dev_err(&ofdev->dev, "Could not add gpiochip\n");
- if (priv->domain)
- irq_domain_remove(priv->domain);
+ dev_err(dev, "Could not add gpiochip\n");
return err;
}
- dev_info(&ofdev->dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n",
+ dev_info(dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n",
priv->regs, gc->base, gc->ngpio, priv->domain ? "on" : "off");
return 0;
}
-static void grgpio_remove(struct platform_device *ofdev)
-{
- struct grgpio_priv *priv = platform_get_drvdata(ofdev);
-
- gpiochip_remove(&priv->gc);
-
- if (priv->domain)
- irq_domain_remove(priv->domain);
-}
-
static const struct of_device_id grgpio_match[] = {
{.name = "GAISLER_GPIO"},
{.name = "01_01a"},
@@ -455,7 +457,6 @@ static struct platform_driver grgpio_driver = {
.of_match_table = grgpio_match,
},
.probe = grgpio_probe,
- .remove_new = grgpio_remove,
};
module_platform_driver(grgpio_driver);
diff --git a/drivers/gpio/gpio-hlwd.c b/drivers/gpio/gpio-hlwd.c
index 1bcfc1835dae..0580f6712bea 100644
--- a/drivers/gpio/gpio-hlwd.c
+++ b/drivers/gpio/gpio-hlwd.c
@@ -210,7 +210,7 @@ static void hlwd_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p)
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- seq_printf(p, dev_name(hlwd->dev));
+ seq_puts(p, dev_name(hlwd->dev));
}
static const struct irq_chip hlwd_gpio_irq_chip = {
diff --git a/drivers/gpio/gpio-i8255.c b/drivers/gpio/gpio-i8255.c
index 64ab80fc4a1e..953018bfa2b1 100644
--- a/drivers/gpio/gpio-i8255.c
+++ b/drivers/gpio/gpio-i8255.c
@@ -134,7 +134,7 @@ int devm_i8255_regmap_register(struct device *const dev,
return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
}
-EXPORT_SYMBOL_NS_GPL(devm_i8255_regmap_register, I8255);
+EXPORT_SYMBOL_NS_GPL(devm_i8255_regmap_register, "I8255");
MODULE_AUTHOR("William Breathitt Gray");
MODULE_DESCRIPTION("Intel 8255 Programmable Peripheral Interface");
diff --git a/drivers/gpio/gpio-idio-16.c b/drivers/gpio/gpio-idio-16.c
index 53b1eb876a12..0103be977c66 100644
--- a/drivers/gpio/gpio-idio-16.c
+++ b/drivers/gpio/gpio-idio-16.c
@@ -3,6 +3,9 @@
* GPIO library for the ACCES IDIO-16 family
* Copyright (C) 2022 William Breathitt Gray
*/
+
+#define DEFAULT_SYMBOL_NAMESPACE "GPIO_IDIO_16"
+
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -14,8 +17,6 @@
#include "gpio-idio-16.h"
-#define DEFAULT_SYMBOL_NAMESPACE GPIO_IDIO_16
-
#define IDIO_16_DAT_BASE 0x0
#define IDIO_16_OUT_BASE IDIO_16_DAT_BASE
#define IDIO_16_IN_BASE (IDIO_16_DAT_BASE + 1)
diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c
index dfec9fbfc7a9..817ecb12d550 100644
--- a/drivers/gpio/gpio-ljca.c
+++ b/drivers/gpio/gpio-ljca.c
@@ -82,9 +82,9 @@ static int ljca_gpio_config(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id,
int ret;
mutex_lock(&ljca_gpio->trans_lock);
+ packet->num = 1;
packet->item[0].index = gpio_id;
packet->item[0].value = config | ljca_gpio->connect_mode[gpio_id];
- packet->num = 1;
ret = ljca_transfer(ljca_gpio->ljca, LJCA_GPIO_CONFIG, (u8 *)packet,
struct_size(packet, item, packet->num), NULL, 0);
@@ -420,8 +420,14 @@ static int ljca_gpio_probe(struct auxiliary_device *auxdev,
if (!ljca_gpio->connect_mode)
return -ENOMEM;
- mutex_init(&ljca_gpio->irq_lock);
- mutex_init(&ljca_gpio->trans_lock);
+ ret = devm_mutex_init(&auxdev->dev, &ljca_gpio->irq_lock);
+ if (ret)
+ return ret;
+
+ ret = devm_mutex_init(&auxdev->dev, &ljca_gpio->trans_lock);
+ if (ret)
+ return ret;
+
ljca_gpio->gc.direction_input = ljca_gpio_direction_input;
ljca_gpio->gc.direction_output = ljca_gpio_direction_output;
ljca_gpio->gc.get_direction = ljca_gpio_get_direction;
@@ -453,11 +459,8 @@ static int ljca_gpio_probe(struct auxiliary_device *auxdev,
INIT_WORK(&ljca_gpio->work, ljca_gpio_async);
ret = gpiochip_add_data(&ljca_gpio->gc, ljca_gpio);
- if (ret) {
+ if (ret)
ljca_unregister_event_cb(ljca);
- mutex_destroy(&ljca_gpio->irq_lock);
- mutex_destroy(&ljca_gpio->trans_lock);
- }
return ret;
}
@@ -469,8 +472,6 @@ static void ljca_gpio_remove(struct auxiliary_device *auxdev)
gpiochip_remove(&ljca_gpio->gc);
ljca_unregister_event_cb(ljca_gpio->ljca);
cancel_work_sync(&ljca_gpio->work);
- mutex_destroy(&ljca_gpio->irq_lock);
- mutex_destroy(&ljca_gpio->trans_lock);
}
static const struct auxiliary_device_id ljca_gpio_id_table[] = {
@@ -491,4 +492,4 @@ MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
MODULE_AUTHOR("Lixu Zhang <lixu.zhang@intel.com>");
MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB-GPIO driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(LJCA);
+MODULE_IMPORT_NS("LJCA");
diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c
index 6749d4dd6d64..7f4d78fd800e 100644
--- a/drivers/gpio/gpio-loongson-64bit.c
+++ b/drivers/gpio/gpio-loongson-64bit.c
@@ -237,9 +237,9 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data1 = {
static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data2 = {
.label = "ls2k2000_gpio",
.mode = BIT_CTRL_MODE,
- .conf_offset = 0x84,
- .in_offset = 0x88,
- .out_offset = 0x80,
+ .conf_offset = 0x4,
+ .in_offset = 0x8,
+ .out_offset = 0x0,
};
static const struct loongson_gpio_chip_data loongson_gpio_ls3a5000_data = {
diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c
index e7c0ef6e54fa..2cf9fb4637a2 100644
--- a/drivers/gpio/gpio-lpc18xx.c
+++ b/drivers/gpio/gpio-lpc18xx.c
@@ -388,7 +388,7 @@ MODULE_DEVICE_TABLE(of, lpc18xx_gpio_match);
static struct platform_driver lpc18xx_gpio_driver = {
.probe = lpc18xx_gpio_probe,
- .remove_new = lpc18xx_gpio_remove,
+ .remove = lpc18xx_gpio_remove,
.driver = {
.name = "lpc18xx-gpio",
.of_match_table = lpc18xx_gpio_match,
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index 701795b9d329..e688c13c8cc3 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -165,7 +165,10 @@ int __max730x_probe(struct max7301 *ts)
pdata = dev_get_platdata(dev);
- mutex_init(&ts->lock);
+ ret = devm_mutex_init(ts->dev, &ts->lock);
+ if (ret)
+ return ret;
+
dev_set_drvdata(dev, ts);
/* Power up the chip and disable IRQ output */
@@ -206,17 +209,11 @@ int __max730x_probe(struct max7301 *ts)
int offset = (i - 1) * 4 + j;
ret = max7301_direction_input(&ts->chip, offset);
if (ret)
- goto exit_destroy;
+ return ret;
}
}
- ret = gpiochip_add_data(&ts->chip, ts);
- if (!ret)
- return ret;
-
-exit_destroy:
- mutex_destroy(&ts->lock);
- return ret;
+ return devm_gpiochip_add_data(ts->dev, &ts->chip, ts);
}
EXPORT_SYMBOL_GPL(__max730x_probe);
@@ -226,8 +223,6 @@ void __max730x_remove(struct device *dev)
/* Power down the chip and disable IRQ output */
ts->write(dev, 0x04, 0x00);
- gpiochip_remove(&ts->chip);
- mutex_destroy(&ts->lock);
}
EXPORT_SYMBOL_GPL(__max730x_remove);
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index ccbb63c21d6f..7ee891ef6905 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -145,8 +145,6 @@ static int mb86s70_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
irq = platform_get_irq(to_platform_device(gc->parent), index);
if (irq < 0)
return irq;
- if (irq == 0)
- break;
if (irq_get_irq_data(irq)->hwirq == offset)
return irq;
}
@@ -227,7 +225,7 @@ static struct platform_driver mb86s70_gpio_driver = {
.acpi_match_table = ACPI_PTR(mb86s70_gpio_acpi_ids),
},
.probe = mb86s70_gpio_probe,
- .remove_new = mb86s70_gpio_remove,
+ .remove = mb86s70_gpio_remove,
};
module_platform_driver(mb86s70_gpio_driver);
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index a035a9bcb57c..ebe5da4933bc 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -127,6 +127,13 @@ static int men_z127_set_config(struct gpio_chip *gc, unsigned offset,
return -ENOTSUPP;
}
+static void men_z127_release_mem(void *data)
+{
+ struct resource *res = data;
+
+ mcb_release_mem(res);
+}
+
static int men_z127_probe(struct mcb_device *mdev,
const struct mcb_device_id *id)
{
@@ -140,17 +147,19 @@ static int men_z127_probe(struct mcb_device *mdev,
return -ENOMEM;
men_z127_gpio->mem = mcb_request_mem(mdev, dev_name(dev));
- if (IS_ERR(men_z127_gpio->mem)) {
- dev_err(dev, "failed to request device memory");
- return PTR_ERR(men_z127_gpio->mem);
- }
+ if (IS_ERR(men_z127_gpio->mem))
+ return dev_err_probe(dev, PTR_ERR(men_z127_gpio->mem),
+ "failed to request device memory");
- men_z127_gpio->reg_base = ioremap(men_z127_gpio->mem->start,
- resource_size(men_z127_gpio->mem));
- if (men_z127_gpio->reg_base == NULL) {
- ret = -ENXIO;
- goto err_release;
- }
+ ret = devm_add_action_or_reset(dev, men_z127_release_mem,
+ men_z127_gpio->mem);
+ if (ret)
+ return ret;
+
+ men_z127_gpio->reg_base = devm_ioremap(dev, men_z127_gpio->mem->start,
+ resource_size(men_z127_gpio->mem));
+ if (men_z127_gpio->reg_base == NULL)
+ return -ENXIO;
mcb_set_drvdata(mdev, men_z127_gpio);
@@ -161,34 +170,16 @@ static int men_z127_probe(struct mcb_device *mdev,
men_z127_gpio->reg_base + MEN_Z127_GPIODR,
NULL, 0);
if (ret)
- goto err_unmap;
+ return ret;
men_z127_gpio->gc.set_config = men_z127_set_config;
- ret = gpiochip_add_data(&men_z127_gpio->gc, men_z127_gpio);
- if (ret) {
- dev_err(dev, "failed to register MEN 16Z127 GPIO controller");
- goto err_unmap;
- }
-
- dev_info(dev, "MEN 16Z127 GPIO driver registered");
+ ret = devm_gpiochip_add_data(dev, &men_z127_gpio->gc, men_z127_gpio);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to register MEN 16Z127 GPIO controller");
return 0;
-
-err_unmap:
- iounmap(men_z127_gpio->reg_base);
-err_release:
- mcb_release_mem(men_z127_gpio->mem);
- return ret;
-}
-
-static void men_z127_remove(struct mcb_device *mdev)
-{
- struct men_z127_gpio *men_z127_gpio = mcb_get_drvdata(mdev);
-
- gpiochip_remove(&men_z127_gpio->gc);
- iounmap(men_z127_gpio->reg_base);
- mcb_release_mem(men_z127_gpio->mem);
}
static const struct mcb_device_id men_z127_ids[] = {
@@ -202,7 +193,6 @@ static struct mcb_driver men_z127_driver = {
.name = "z127-gpio",
},
.probe = men_z127_probe,
- .remove = men_z127_remove,
.id_table = men_z127_ids,
};
module_mcb_driver(men_z127_driver);
@@ -211,4 +201,4 @@ MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
MODULE_DESCRIPTION("MEN 16z127 GPIO Controller");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("mcb:16z127");
-MODULE_IMPORT_NS(MCB);
+MODULE_IMPORT_NS("MCB");
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 421d7e3a6c66..4335a5d8e4f6 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -78,24 +78,25 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
if (retval)
return retval;
- retval = pcim_iomap_regions(pdev, BIT(1) | BIT(0), pci_name(pdev));
- if (retval)
- return dev_err_probe(dev, retval, "I/O memory mapping error\n");
-
- base = pcim_iomap_table(pdev)[1];
+ base = pcim_iomap_region(pdev, 1, pci_name(pdev));
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base), "I/O memory mapping error\n");
irq_base = readl(base + 0 * sizeof(u32));
gpio_base = readl(base + 1 * sizeof(u32));
/* Release the IO mapping, since we already get the info from BAR1 */
- pcim_iounmap_regions(pdev, BIT(1));
+ pcim_iounmap_region(pdev, 1);
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
- priv->reg_base = pcim_iomap_table(pdev)[0];
+ priv->reg_base = pcim_iomap_region(pdev, 0, pci_name(pdev));
+ if (IS_ERR(priv->reg_base))
+ return dev_err_probe(dev, PTR_ERR(priv->reg_base),
+ "I/O memory mapping error\n");
priv->pin_info.pin_ranges = mrfld_gpio_ranges;
priv->pin_info.nranges = ARRAY_SIZE(mrfld_gpio_ranges);
@@ -141,4 +142,4 @@ module_pci_driver(mrfld_gpio_driver);
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
MODULE_DESCRIPTION("Intel Merrifield SoC GPIO driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(GPIO_TANGIER);
+MODULE_IMPORT_NS("GPIO_TANGIER");
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
index 6abe01bc39c3..6f3dda6b635f 100644
--- a/drivers/gpio/gpio-mlxbf2.c
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -331,7 +331,7 @@ static void mlxbf2_gpio_irq_print_chip(struct irq_data *irqd,
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc);
- seq_printf(p, dev_name(gs->dev));
+ seq_puts(p, dev_name(gs->dev));
}
static const struct irq_chip mlxbf2_gpio_irq_chip = {
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
index e855c68c981b..14ae25783438 100644
--- a/drivers/gpio/gpio-mm-lantiq.c
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -136,7 +136,7 @@ MODULE_DEVICE_TABLE(of, ltq_mm_match);
static struct platform_driver ltq_mm_driver = {
.probe = ltq_mm_probe,
- .remove_new = ltq_mm_remove,
+ .remove = ltq_mm_remove,
.driver = {
.name = "gpio-mm-ltq",
.of_match_table = ltq_mm_match,
diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c
index a199dce3394a..091d96f2d682 100644
--- a/drivers/gpio/gpio-mpc5200.c
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -183,7 +183,7 @@ static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
.of_match_table = mpc52xx_wkup_gpiochip_match,
},
.probe = mpc52xx_wkup_gpiochip_probe,
- .remove_new = mpc52xx_gpiochip_remove,
+ .remove = mpc52xx_gpiochip_remove,
};
/*
@@ -336,7 +336,7 @@ static struct platform_driver mpc52xx_simple_gpiochip_driver = {
.of_match_table = mpc52xx_simple_gpiochip_match,
},
.probe = mpc52xx_simple_gpiochip_probe,
- .remove_new = mpc52xx_gpiochip_remove,
+ .remove = mpc52xx_gpiochip_remove,
};
static struct platform_driver * const drivers[] = {
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 685ec31db409..0cd4c36ae8aa 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -15,7 +15,6 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -286,6 +285,7 @@ static const struct mpc8xxx_gpio_devtype mpc8xxx_gpio_devtype_default = {
};
static const struct of_device_id mpc8xxx_gpio_ids[] = {
+ { .compatible = "fsl,mpc8314-gpio", },
{ .compatible = "fsl,mpc8349-gpio", },
{ .compatible = "fsl,mpc8572-gpio", .data = &mpc8572_gpio_devtype, },
{ .compatible = "fsl,mpc8610-gpio", },
@@ -300,14 +300,14 @@ static const struct of_device_id mpc8xxx_gpio_ids[] = {
static int mpc8xxx_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
- struct mpc8xxx_gpio_chip *mpc8xxx_gc;
- struct gpio_chip *gc;
const struct mpc8xxx_gpio_devtype *devtype = NULL;
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc;
+ struct device *dev = &pdev->dev;
struct fwnode_handle *fwnode;
+ struct gpio_chip *gc;
int ret;
- mpc8xxx_gc = devm_kzalloc(&pdev->dev, sizeof(*mpc8xxx_gc), GFP_KERNEL);
+ mpc8xxx_gc = devm_kzalloc(dev, sizeof(*mpc8xxx_gc), GFP_KERNEL);
if (!mpc8xxx_gc)
return -ENOMEM;
@@ -320,32 +320,28 @@ static int mpc8xxx_probe(struct platform_device *pdev)
return PTR_ERR(mpc8xxx_gc->regs);
gc = &mpc8xxx_gc->gc;
- gc->parent = &pdev->dev;
-
- if (device_property_read_bool(&pdev->dev, "little-endian")) {
- ret = bgpio_init(gc, &pdev->dev, 4,
- mpc8xxx_gc->regs + GPIO_DAT,
- NULL, NULL,
- mpc8xxx_gc->regs + GPIO_DIR, NULL,
- BGPIOF_BIG_ENDIAN);
+ gc->parent = dev;
+
+ if (device_property_read_bool(dev, "little-endian")) {
+ ret = bgpio_init(gc, dev, 4, mpc8xxx_gc->regs + GPIO_DAT,
+ NULL, NULL, mpc8xxx_gc->regs + GPIO_DIR,
+ NULL, BGPIOF_BIG_ENDIAN);
if (ret)
return ret;
- dev_dbg(&pdev->dev, "GPIO registers are LITTLE endian\n");
+ dev_dbg(dev, "GPIO registers are LITTLE endian\n");
} else {
- ret = bgpio_init(gc, &pdev->dev, 4,
- mpc8xxx_gc->regs + GPIO_DAT,
- NULL, NULL,
- mpc8xxx_gc->regs + GPIO_DIR, NULL,
- BGPIOF_BIG_ENDIAN
+ ret = bgpio_init(gc, dev, 4, mpc8xxx_gc->regs + GPIO_DAT,
+ NULL, NULL, mpc8xxx_gc->regs + GPIO_DIR,
+ NULL, BGPIOF_BIG_ENDIAN
| BGPIOF_BIG_ENDIAN_BYTE_ORDER);
if (ret)
return ret;
- dev_dbg(&pdev->dev, "GPIO registers are BIG endian\n");
+ dev_dbg(dev, "GPIO registers are BIG endian\n");
}
mpc8xxx_gc->direction_output = gc->direction_output;
- devtype = device_get_match_data(&pdev->dev);
+ devtype = device_get_match_data(dev);
if (!devtype)
devtype = &mpc8xxx_gpio_devtype_default;
@@ -370,10 +366,10 @@ static int mpc8xxx_probe(struct platform_device *pdev)
* associated input enable must be set (GPIOxGPIE[IEn]=1) to propagate
* the port value to the GPIO Data Register.
*/
- fwnode = dev_fwnode(&pdev->dev);
- if (of_device_is_compatible(np, "fsl,qoriq-gpio") ||
- of_device_is_compatible(np, "fsl,ls1028a-gpio") ||
- of_device_is_compatible(np, "fsl,ls1088a-gpio") ||
+ fwnode = dev_fwnode(dev);
+ if (device_is_compatible(dev, "fsl,qoriq-gpio") ||
+ device_is_compatible(dev, "fsl,ls1028a-gpio") ||
+ device_is_compatible(dev, "fsl,ls1088a-gpio") ||
is_acpi_node(fwnode)) {
gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
/* Also, latch state of GPIOs configured as output by bootloader. */
@@ -381,9 +377,9 @@ static int mpc8xxx_probe(struct platform_device *pdev)
gc->read_reg(mpc8xxx_gc->regs + GPIO_DIR);
}
- ret = devm_gpiochip_add_data(&pdev->dev, gc, mpc8xxx_gc);
+ ret = devm_gpiochip_add_data(dev, gc, mpc8xxx_gc);
if (ret) {
- dev_err(&pdev->dev,
+ dev_err(dev,
"GPIO chip registration failed with status %d\n", ret);
return ret;
}
@@ -404,18 +400,17 @@ static int mpc8xxx_probe(struct platform_device *pdev)
gc->write_reg(mpc8xxx_gc->regs + GPIO_IER, 0xffffffff);
gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR, 0);
- ret = devm_request_irq(&pdev->dev, mpc8xxx_gc->irqn,
+ ret = devm_request_irq(dev, mpc8xxx_gc->irqn,
mpc8xxx_gpio_irq_cascade,
IRQF_NO_THREAD | IRQF_SHARED, "gpio-cascade",
mpc8xxx_gc);
if (ret) {
- dev_err(&pdev->dev,
- "failed to devm_request_irq(%d), ret = %d\n",
+ dev_err(dev, "failed to devm_request_irq(%d), ret = %d\n",
mpc8xxx_gc->irqn, ret);
goto err;
}
- device_init_wakeup(&pdev->dev, true);
+ device_init_wakeup(dev, true);
return 0;
err:
@@ -466,7 +461,7 @@ MODULE_DEVICE_TABLE(acpi, gpio_acpi_ids);
static struct platform_driver mpc8xxx_plat_driver = {
.probe = mpc8xxx_probe,
- .remove_new = mpc8xxx_remove,
+ .remove = mpc8xxx_remove,
.driver = {
.name = "gpio-mpc8xxx",
.of_match_table = mpc8xxx_gpio_ids,
diff --git a/drivers/gpio/gpio-mpfs.c b/drivers/gpio/gpio-mpfs.c
new file mode 100644
index 000000000000..561a961c97a6
--- /dev/null
+++ b/drivers/gpio/gpio-mpfs.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: (GPL-2.0)
+/*
+ * Microchip PolarFire SoC (MPFS) GPIO controller driver
+ *
+ * Copyright (c) 2018-2024 Microchip Technology Inc. and its subsidiaries
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#define MPFS_GPIO_CTRL(i) (0x4 * (i))
+#define MPFS_MAX_NUM_GPIO 32
+#define MPFS_GPIO_EN_INT 3
+#define MPFS_GPIO_EN_OUT_BUF BIT(2)
+#define MPFS_GPIO_EN_IN BIT(1)
+#define MPFS_GPIO_EN_OUT BIT(0)
+#define MPFS_GPIO_DIR_MASK GENMASK(2, 0)
+
+#define MPFS_GPIO_TYPE_INT_EDGE_BOTH 0x80
+#define MPFS_GPIO_TYPE_INT_EDGE_NEG 0x60
+#define MPFS_GPIO_TYPE_INT_EDGE_POS 0x40
+#define MPFS_GPIO_TYPE_INT_LEVEL_LOW 0x20
+#define MPFS_GPIO_TYPE_INT_LEVEL_HIGH 0x00
+#define MPFS_GPIO_TYPE_INT_MASK GENMASK(7, 5)
+#define MPFS_IRQ_REG 0x80
+
+#define MPFS_INP_REG 0x84
+#define COREGPIO_INP_REG 0x90
+#define MPFS_OUTP_REG 0x88
+#define COREGPIO_OUTP_REG 0xA0
+
+struct mpfs_gpio_reg_offsets {
+ u8 inp;
+ u8 outp;
+};
+
+struct mpfs_gpio_chip {
+ struct regmap *regs;
+ const struct mpfs_gpio_reg_offsets *offsets;
+ struct gpio_chip gc;
+};
+
+static const struct regmap_config mpfs_gpio_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+};
+
+static int mpfs_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio_index)
+{
+ struct mpfs_gpio_chip *mpfs_gpio = gpiochip_get_data(gc);
+
+ regmap_update_bits(mpfs_gpio->regs, MPFS_GPIO_CTRL(gpio_index),
+ MPFS_GPIO_DIR_MASK, MPFS_GPIO_EN_IN);
+
+ return 0;
+}
+
+static int mpfs_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio_index, int value)
+{
+ struct mpfs_gpio_chip *mpfs_gpio = gpiochip_get_data(gc);
+
+ regmap_update_bits(mpfs_gpio->regs, MPFS_GPIO_CTRL(gpio_index),
+ MPFS_GPIO_DIR_MASK, MPFS_GPIO_EN_IN);
+ regmap_update_bits(mpfs_gpio->regs, mpfs_gpio->offsets->outp, BIT(gpio_index),
+ value << gpio_index);
+
+ return 0;
+}
+
+static int mpfs_gpio_get_direction(struct gpio_chip *gc,
+ unsigned int gpio_index)
+{
+ struct mpfs_gpio_chip *mpfs_gpio = gpiochip_get_data(gc);
+ unsigned int gpio_cfg;
+
+ regmap_read(mpfs_gpio->regs, MPFS_GPIO_CTRL(gpio_index), &gpio_cfg);
+ if (gpio_cfg & MPFS_GPIO_EN_IN)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int mpfs_gpio_get(struct gpio_chip *gc, unsigned int gpio_index)
+{
+ struct mpfs_gpio_chip *mpfs_gpio = gpiochip_get_data(gc);
+
+ if (mpfs_gpio_get_direction(gc, gpio_index) == GPIO_LINE_DIRECTION_OUT)
+ return regmap_test_bits(mpfs_gpio->regs, mpfs_gpio->offsets->outp, BIT(gpio_index));
+ else
+ return regmap_test_bits(mpfs_gpio->regs, mpfs_gpio->offsets->inp, BIT(gpio_index));
+}
+
+static void mpfs_gpio_set(struct gpio_chip *gc, unsigned int gpio_index, int value)
+{
+ struct mpfs_gpio_chip *mpfs_gpio = gpiochip_get_data(gc);
+
+ mpfs_gpio_get(gc, gpio_index);
+
+ regmap_update_bits(mpfs_gpio->regs, mpfs_gpio->offsets->outp, BIT(gpio_index),
+ value << gpio_index);
+
+ mpfs_gpio_get(gc, gpio_index);
+}
+
+static int mpfs_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mpfs_gpio_chip *mpfs_gpio;
+ struct clk *clk;
+ void __iomem *base;
+ int ngpios;
+
+ mpfs_gpio = devm_kzalloc(dev, sizeof(*mpfs_gpio), GFP_KERNEL);
+ if (!mpfs_gpio)
+ return -ENOMEM;
+
+ mpfs_gpio->offsets = device_get_match_data(&pdev->dev);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base), "failed to ioremap memory resource\n");
+
+ mpfs_gpio->regs = devm_regmap_init_mmio(dev, base, &mpfs_gpio_regmap_config);
+ if (IS_ERR(mpfs_gpio->regs))
+ return dev_err_probe(dev, PTR_ERR(mpfs_gpio->regs),
+ "failed to initialise regmap\n");
+
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to get and enable clock\n");
+
+ ngpios = MPFS_MAX_NUM_GPIO;
+ device_property_read_u32(dev, "ngpios", &ngpios);
+ if (ngpios > MPFS_MAX_NUM_GPIO)
+ ngpios = MPFS_MAX_NUM_GPIO;
+
+ mpfs_gpio->gc.direction_input = mpfs_gpio_direction_input;
+ mpfs_gpio->gc.direction_output = mpfs_gpio_direction_output;
+ mpfs_gpio->gc.get_direction = mpfs_gpio_get_direction;
+ mpfs_gpio->gc.get = mpfs_gpio_get;
+ mpfs_gpio->gc.set = mpfs_gpio_set;
+ mpfs_gpio->gc.base = -1;
+ mpfs_gpio->gc.ngpio = ngpios;
+ mpfs_gpio->gc.label = dev_name(dev);
+ mpfs_gpio->gc.parent = dev;
+ mpfs_gpio->gc.owner = THIS_MODULE;
+
+ return devm_gpiochip_add_data(dev, &mpfs_gpio->gc, mpfs_gpio);
+}
+
+static const struct mpfs_gpio_reg_offsets mpfs_reg_offsets = {
+ .inp = MPFS_INP_REG,
+ .outp = MPFS_OUTP_REG,
+};
+
+static const struct mpfs_gpio_reg_offsets coregpio_reg_offsets = {
+ .inp = COREGPIO_INP_REG,
+ .outp = COREGPIO_OUTP_REG,
+};
+
+static const struct of_device_id mpfs_gpio_of_ids[] = {
+ {
+ .compatible = "microchip,mpfs-gpio",
+ .data = &mpfs_reg_offsets,
+ }, {
+ .compatible = "microchip,coregpio-rtl-v3",
+ .data = &coregpio_reg_offsets,
+ },
+ { /* end of list */ }
+};
+
+static struct platform_driver mpfs_gpio_driver = {
+ .probe = mpfs_gpio_probe,
+ .driver = {
+ .name = "microchip,mpfs-gpio",
+ .of_match_table = mpfs_gpio_of_ids,
+ },
+};
+builtin_platform_driver(mpfs_gpio_driver);
diff --git a/drivers/gpio/gpio-mpsse.c b/drivers/gpio/gpio-mpsse.c
new file mode 100644
index 000000000000..3ea32c5e33d1
--- /dev/null
+++ b/drivers/gpio/gpio-mpsse.c
@@ -0,0 +1,527 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * FTDI MPSSE GPIO support
+ *
+ * Based on code by Anatolij Gustschin
+ *
+ * Copyright (C) 2024 Mary Strodl <mstrodl@csh.rit.edu>
+ */
+
+#include <linux/cleanup.h>
+#include <linux/gpio/driver.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+
+struct mpsse_priv {
+ struct gpio_chip gpio;
+ struct usb_device *udev; /* USB device encompassing all MPSSEs */
+ struct usb_interface *intf; /* USB interface for this MPSSE */
+ u8 intf_id; /* USB interface number for this MPSSE */
+ struct work_struct irq_work; /* polling work thread */
+ struct mutex irq_mutex; /* lock over irq_data */
+ atomic_t irq_type[16]; /* pin -> edge detection type */
+ atomic_t irq_enabled;
+ int id;
+
+ u8 gpio_outputs[2]; /* Output states for GPIOs [L, H] */
+ u8 gpio_dir[2]; /* Directions for GPIOs [L, H] */
+
+ u8 *bulk_in_buf; /* Extra recv buffer to grab status bytes */
+
+ struct usb_endpoint_descriptor *bulk_in;
+ struct usb_endpoint_descriptor *bulk_out;
+
+ struct mutex io_mutex; /* sync I/O with disconnect */
+};
+
+struct bulk_desc {
+ bool tx; /* direction of bulk transfer */
+ u8 *data; /* input (tx) or output (rx) */
+ int len; /* Length of `data` if tx, or length of */
+ /* Data to read if rx */
+ int len_actual; /* Length successfully transferred */
+ int timeout;
+};
+
+static const struct usb_device_id gpio_mpsse_table[] = {
+ { USB_DEVICE(0x0c52, 0xa064) }, /* SeaLevel Systems, Inc. */
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, gpio_mpsse_table);
+
+static DEFINE_IDA(gpio_mpsse_ida);
+
+/* MPSSE commands */
+#define SET_BITS_CMD 0x80
+#define GET_BITS_CMD 0x81
+
+#define SET_BITMODE_REQUEST 0x0B
+#define MODE_MPSSE (2 << 8)
+#define MODE_RESET 0
+
+/* Arbitrarily decided. This could probably be much less */
+#define MPSSE_WRITE_TIMEOUT 5000
+#define MPSSE_READ_TIMEOUT 5000
+
+/* 1 millisecond, also pretty arbitrary */
+#define MPSSE_POLL_INTERVAL 1000
+
+static int mpsse_bulk_xfer(struct usb_interface *intf, struct bulk_desc *desc)
+{
+ struct mpsse_priv *priv = usb_get_intfdata(intf);
+ struct usb_device *udev = priv->udev;
+ unsigned int pipe;
+ int ret;
+
+ if (desc->tx)
+ pipe = usb_sndbulkpipe(udev, priv->bulk_out->bEndpointAddress);
+ else
+ pipe = usb_rcvbulkpipe(udev, priv->bulk_in->bEndpointAddress);
+
+ ret = usb_bulk_msg(udev, pipe, desc->data, desc->len,
+ &desc->len_actual, desc->timeout);
+ if (ret)
+ dev_dbg(&udev->dev, "mpsse: bulk transfer failed: %d\n", ret);
+
+ return ret;
+}
+
+static int mpsse_write(struct usb_interface *intf,
+ u8 *buf, size_t len)
+{
+ int ret;
+ struct bulk_desc desc;
+
+ desc.len_actual = 0;
+ desc.tx = true;
+ desc.data = buf;
+ desc.len = len;
+ desc.timeout = MPSSE_WRITE_TIMEOUT;
+
+ ret = mpsse_bulk_xfer(intf, &desc);
+
+ return ret;
+}
+
+static int mpsse_read(struct usb_interface *intf, u8 *buf, size_t len)
+{
+ int ret;
+ struct bulk_desc desc;
+ struct mpsse_priv *priv = usb_get_intfdata(intf);
+
+ desc.len_actual = 0;
+ desc.tx = false;
+ desc.data = priv->bulk_in_buf;
+ /* Device sends 2 additional status bytes, read len + 2 */
+ desc.len = min_t(size_t, len + 2, usb_endpoint_maxp(priv->bulk_in));
+ desc.timeout = MPSSE_READ_TIMEOUT;
+
+ ret = mpsse_bulk_xfer(intf, &desc);
+ if (ret)
+ return ret;
+
+ /* Did we get enough data? */
+ if (desc.len_actual < desc.len)
+ return -EIO;
+
+ memcpy(buf, desc.data + 2, desc.len_actual - 2);
+
+ return ret;
+}
+
+static int gpio_mpsse_set_bank(struct mpsse_priv *priv, u8 bank)
+{
+ int ret;
+ u8 tx_buf[3] = {
+ SET_BITS_CMD | (bank << 1),
+ priv->gpio_outputs[bank],
+ priv->gpio_dir[bank],
+ };
+
+ ret = mpsse_write(priv->intf, tx_buf, 3);
+
+ return ret;
+}
+
+static int gpio_mpsse_get_bank(struct mpsse_priv *priv, u8 bank)
+{
+ int ret;
+ u8 buf = GET_BITS_CMD | (bank << 1);
+
+ ret = mpsse_write(priv->intf, &buf, 1);
+ if (ret)
+ return ret;
+
+ ret = mpsse_read(priv->intf, &buf, 1);
+ if (ret)
+ return ret;
+
+ return buf;
+}
+
+static void gpio_mpsse_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ unsigned long i, bank, bank_mask, bank_bits;
+ int ret;
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+
+ guard(mutex)(&priv->io_mutex);
+ for_each_set_clump8(i, bank_mask, mask, chip->ngpio) {
+ bank = i / 8;
+
+ if (bank_mask) {
+ bank_bits = bitmap_get_value8(bits, i);
+ /* Zero out pins we want to change */
+ priv->gpio_outputs[bank] &= ~bank_mask;
+ /* Set pins we care about */
+ priv->gpio_outputs[bank] |= bank_bits & bank_mask;
+
+ ret = gpio_mpsse_set_bank(priv, bank);
+ if (ret)
+ dev_err(&priv->intf->dev,
+ "Couldn't set values for bank %ld!",
+ bank);
+ }
+ }
+}
+
+static int gpio_mpsse_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ unsigned long i, bank, bank_mask;
+ int ret;
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+
+ guard(mutex)(&priv->io_mutex);
+ for_each_set_clump8(i, bank_mask, mask, chip->ngpio) {
+ bank = i / 8;
+
+ if (bank_mask) {
+ ret = gpio_mpsse_get_bank(priv, bank);
+ if (ret < 0)
+ return ret;
+
+ bitmap_set_value8(bits, ret & bank_mask, i);
+ }
+ }
+
+ return 0;
+}
+
+static int gpio_mpsse_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ int err;
+ unsigned long mask = 0, bits = 0;
+
+ __set_bit(offset, &mask);
+ err = gpio_mpsse_get_multiple(chip, &mask, &bits);
+ if (err)
+ return err;
+
+ /* == is not guaranteed to give 1 if true */
+ if (bits)
+ return 1;
+ else
+ return 0;
+}
+
+static void gpio_mpsse_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ unsigned long mask = 0, bits = 0;
+
+ __set_bit(offset, &mask);
+ if (value)
+ __set_bit(offset, &bits);
+
+ gpio_mpsse_set_multiple(chip, &mask, &bits);
+}
+
+static int gpio_mpsse_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+ int bank = (offset & 8) >> 3;
+ int bank_offset = offset & 7;
+
+ scoped_guard(mutex, &priv->io_mutex)
+ priv->gpio_dir[bank] |= BIT(bank_offset);
+
+ gpio_mpsse_gpio_set(chip, offset, value);
+
+ return 0;
+}
+
+static int gpio_mpsse_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+ int bank = (offset & 8) >> 3;
+ int bank_offset = offset & 7;
+
+ guard(mutex)(&priv->io_mutex);
+ priv->gpio_dir[bank] &= ~BIT(bank_offset);
+ gpio_mpsse_set_bank(priv, bank);
+
+ return 0;
+}
+
+static int gpio_mpsse_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ int ret;
+ int bank = (offset & 8) >> 3;
+ int bank_offset = offset & 7;
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+
+ guard(mutex)(&priv->io_mutex);
+ /* MPSSE directions are inverted */
+ if (priv->gpio_dir[bank] & BIT(bank_offset))
+ ret = GPIO_LINE_DIRECTION_OUT;
+ else
+ ret = GPIO_LINE_DIRECTION_IN;
+
+ return ret;
+}
+
+static void gpio_mpsse_poll(struct work_struct *work)
+{
+ unsigned long pin_mask, pin_states, flags;
+ int irq_enabled, offset, err, value, fire_irq,
+ irq, old_value[16], irq_type[16];
+ struct mpsse_priv *priv = container_of(work, struct mpsse_priv,
+ irq_work);
+
+ for (offset = 0; offset < priv->gpio.ngpio; ++offset)
+ old_value[offset] = -1;
+
+ while ((irq_enabled = atomic_read(&priv->irq_enabled))) {
+ usleep_range(MPSSE_POLL_INTERVAL, MPSSE_POLL_INTERVAL + 1000);
+ /* Cleanup will trigger at the end of the loop */
+ guard(mutex)(&priv->irq_mutex);
+
+ pin_mask = 0;
+ pin_states = 0;
+ for (offset = 0; offset < priv->gpio.ngpio; ++offset) {
+ irq_type[offset] = atomic_read(&priv->irq_type[offset]);
+ if (irq_type[offset] != IRQ_TYPE_NONE &&
+ irq_enabled & BIT(offset))
+ pin_mask |= BIT(offset);
+ else
+ old_value[offset] = -1;
+ }
+
+ err = gpio_mpsse_get_multiple(&priv->gpio, &pin_mask,
+ &pin_states);
+ if (err) {
+ dev_err_ratelimited(&priv->intf->dev,
+ "Error polling!\n");
+ continue;
+ }
+
+ /* Check each value */
+ for (offset = 0; offset < priv->gpio.ngpio; ++offset) {
+ if (old_value[offset] == -1)
+ continue;
+
+ fire_irq = 0;
+ value = pin_states & BIT(offset);
+
+ switch (irq_type[offset]) {
+ case IRQ_TYPE_EDGE_RISING:
+ fire_irq = value > old_value[offset];
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ fire_irq = value < old_value[offset];
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ fire_irq = value != old_value[offset];
+ break;
+ }
+ if (!fire_irq)
+ continue;
+
+ irq = irq_find_mapping(priv->gpio.irq.domain,
+ offset);
+ local_irq_save(flags);
+ generic_handle_irq(irq);
+ local_irq_disable();
+ local_irq_restore(flags);
+ }
+
+ /* Sync back values so we can refer to them next tick */
+ for (offset = 0; offset < priv->gpio.ngpio; ++offset)
+ if (irq_type[offset] != IRQ_TYPE_NONE &&
+ irq_enabled & BIT(offset))
+ old_value[offset] = pin_states & BIT(offset);
+ }
+}
+
+static int gpio_mpsse_set_irq_type(struct irq_data *irqd, unsigned int type)
+{
+ int offset;
+ struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
+
+ offset = irqd->hwirq;
+ atomic_set(&priv->irq_type[offset], type & IRQ_TYPE_EDGE_BOTH);
+
+ return 0;
+}
+
+static void gpio_mpsse_irq_disable(struct irq_data *irqd)
+{
+ struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
+
+ atomic_and(~BIT(irqd->hwirq), &priv->irq_enabled);
+ gpiochip_disable_irq(&priv->gpio, irqd->hwirq);
+}
+
+static void gpio_mpsse_irq_enable(struct irq_data *irqd)
+{
+ struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
+
+ gpiochip_enable_irq(&priv->gpio, irqd->hwirq);
+ /* If no-one else was using the IRQ, enable it */
+ if (!atomic_fetch_or(BIT(irqd->hwirq), &priv->irq_enabled)) {
+ INIT_WORK(&priv->irq_work, gpio_mpsse_poll);
+ schedule_work(&priv->irq_work);
+ }
+}
+
+static const struct irq_chip gpio_mpsse_irq_chip = {
+ .name = "gpio-mpsse-irq",
+ .irq_enable = gpio_mpsse_irq_enable,
+ .irq_disable = gpio_mpsse_irq_disable,
+ .irq_set_type = gpio_mpsse_set_irq_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void gpio_mpsse_ida_remove(void *data)
+{
+ struct mpsse_priv *priv = data;
+
+ ida_free(&gpio_mpsse_ida, priv->id);
+}
+
+static int gpio_mpsse_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct mpsse_priv *priv;
+ struct device *dev;
+ int err;
+
+ dev = &interface->dev;
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->udev = usb_get_dev(interface_to_usbdev(interface));
+ priv->intf = interface;
+ priv->intf_id = interface->cur_altsetting->desc.bInterfaceNumber;
+
+ priv->id = ida_alloc(&gpio_mpsse_ida, GFP_KERNEL);
+ if (priv->id < 0)
+ return priv->id;
+
+ err = devm_add_action_or_reset(dev, gpio_mpsse_ida_remove, priv);
+ if (err)
+ return err;
+
+ err = devm_mutex_init(dev, &priv->io_mutex);
+ if (err)
+ return err;
+
+ err = devm_mutex_init(dev, &priv->irq_mutex);
+ if (err)
+ return err;
+
+ priv->gpio.label = devm_kasprintf(dev, GFP_KERNEL,
+ "gpio-mpsse.%d.%d",
+ priv->id, priv->intf_id);
+ if (!priv->gpio.label)
+ return -ENOMEM;
+
+ priv->gpio.owner = THIS_MODULE;
+ priv->gpio.parent = interface->usb_dev;
+ priv->gpio.get_direction = gpio_mpsse_get_direction;
+ priv->gpio.direction_input = gpio_mpsse_direction_input;
+ priv->gpio.direction_output = gpio_mpsse_direction_output;
+ priv->gpio.get = gpio_mpsse_gpio_get;
+ priv->gpio.set = gpio_mpsse_gpio_set;
+ priv->gpio.get_multiple = gpio_mpsse_get_multiple;
+ priv->gpio.set_multiple = gpio_mpsse_set_multiple;
+ priv->gpio.base = -1;
+ priv->gpio.ngpio = 16;
+ priv->gpio.offset = priv->intf_id * priv->gpio.ngpio;
+ priv->gpio.can_sleep = 1;
+
+ err = usb_find_common_endpoints(interface->cur_altsetting,
+ &priv->bulk_in, &priv->bulk_out,
+ NULL, NULL);
+ if (err)
+ return err;
+
+ priv->bulk_in_buf = devm_kmalloc(dev, usb_endpoint_maxp(priv->bulk_in),
+ GFP_KERNEL);
+ if (!priv->bulk_in_buf)
+ return -ENOMEM;
+
+ usb_set_intfdata(interface, priv);
+
+ /* Reset mode, needed to correctly enter MPSSE mode */
+ err = usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
+ SET_BITMODE_REQUEST,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+ MODE_RESET, priv->intf_id + 1, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ if (err)
+ return err;
+
+ /* Enter MPSSE mode */
+ err = usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
+ SET_BITMODE_REQUEST,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+ MODE_MPSSE, priv->intf_id + 1, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ if (err)
+ return err;
+
+ gpio_irq_chip_set_chip(&priv->gpio.irq, &gpio_mpsse_irq_chip);
+
+ priv->gpio.irq.parent_handler = NULL;
+ priv->gpio.irq.num_parents = 0;
+ priv->gpio.irq.parents = NULL;
+ priv->gpio.irq.default_type = IRQ_TYPE_NONE;
+ priv->gpio.irq.handler = handle_simple_irq;
+
+ err = devm_gpiochip_add_data(dev, &priv->gpio, priv);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void gpio_mpsse_disconnect(struct usb_interface *intf)
+{
+ struct mpsse_priv *priv = usb_get_intfdata(intf);
+
+ priv->intf = NULL;
+ usb_set_intfdata(intf, NULL);
+ usb_put_dev(priv->udev);
+}
+
+static struct usb_driver gpio_mpsse_driver = {
+ .name = "gpio-mpsse",
+ .probe = gpio_mpsse_probe,
+ .disconnect = gpio_mpsse_disconnect,
+ .id_table = gpio_mpsse_table,
+};
+
+module_usb_driver(gpio_mpsse_driver);
+
+MODULE_AUTHOR("Mary Strodl <mstrodl@csh.rit.edu>");
+MODULE_DESCRIPTION("MPSSE GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 8cfd3a89c018..5ffb332e9849 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -794,8 +794,8 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
u32 set;
if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) {
- int ret = of_property_read_u32(dev->of_node,
- "marvell,pwm-offset", &offset);
+ int ret = device_property_read_u32(dev, "marvell,pwm-offset",
+ &offset);
if (ret < 0)
return 0;
} else {
@@ -1106,7 +1106,7 @@ static int mvebu_gpio_probe_syscon(struct platform_device *pdev,
if (IS_ERR(mvchip->regs))
return PTR_ERR(mvchip->regs);
- if (of_property_read_u32(pdev->dev.of_node, "offset", &mvchip->offset))
+ if (device_property_read_u32(&pdev->dev, "offset", &mvchip->offset))
return -EINVAL;
return 0;
@@ -1147,7 +1147,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mvchip);
- if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) {
+ if (device_property_read_u32(&pdev->dev, "ngpios", &ngpios)) {
dev_err(&pdev->dev, "Missing ngpios OF property\n");
return -ENODEV;
}
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 4cb455b2bdee..619b6fb9d833 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -490,8 +490,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
port->gc.request = mxc_gpio_request;
port->gc.free = mxc_gpio_free;
port->gc.to_irq = mxc_gpio_to_irq;
- port->gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 :
- pdev->id * 32;
+ port->gc.base = of_alias_get_id(np, "gpio") * 32;
err = devm_gpiochip_add_data(&pdev->dev, &port->gc, port);
if (err)
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 76d5d87e9681..54c4bfdccf56 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -715,7 +715,7 @@ static void omap_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
{
struct gpio_bank *bank = omap_irq_data_get_bank(d);
- seq_printf(p, dev_name(bank->dev));
+ seq_puts(p, dev_name(bank->dev));
}
static const struct irq_chip omap_gpio_irq_chip = {
@@ -1557,7 +1557,7 @@ static const struct dev_pm_ops gpio_pm_ops = {
static struct platform_driver omap_gpio_driver = {
.probe = omap_gpio_probe,
- .remove_new = omap_gpio_remove,
+ .remove = omap_gpio_remove,
.driver = {
.name = "omap_gpio",
.pm = &gpio_pm_ops,
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 3f2d33ee20cc..d63c1030e6ac 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -815,7 +815,7 @@ static void pca953x_irq_print_chip(struct irq_data *data, struct seq_file *p)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
- seq_printf(p, dev_name(gc->parent));
+ seq_puts(p, dev_name(gc->parent));
}
static const struct irq_chip pca953x_irq_chip = {
@@ -841,25 +841,6 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
DECLARE_BITMAP(trigger, MAX_LINE);
int ret;
- if (chip->driver_data & PCA_PCAL) {
- /* Read the current interrupt status from the device */
- ret = pca953x_read_regs(chip, PCAL953X_INT_STAT, trigger);
- if (ret)
- return false;
-
- /* Check latched inputs and clear interrupt status */
- ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
- if (ret)
- return false;
-
- /* Apply filter for rising/falling edge selection */
- bitmap_replace(new_stat, chip->irq_trig_fall, chip->irq_trig_raise, cur_stat, gc->ngpio);
-
- bitmap_and(pending, new_stat, trigger, gc->ngpio);
-
- return !bitmap_empty(pending, gc->ngpio);
- }
-
ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
if (ret)
return false;
@@ -1088,7 +1069,8 @@ static int pca953x_probe(struct i2c_client *client)
*/
reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(reset_gpio))
- return PTR_ERR(reset_gpio);
+ return dev_err_probe(dev, PTR_ERR(reset_gpio),
+ "Failed to get reset gpio\n");
}
chip->client = client;
diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c
index 44c0a21b1d1d..476cea1b5ed7 100644
--- a/drivers/gpio/gpio-pci-idio-16.c
+++ b/drivers/gpio/gpio-pci-idio-16.c
@@ -70,24 +70,17 @@ static int idio_16_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct device *const dev = &pdev->dev;
int err;
const size_t pci_bar_index = 2;
- const char *const name = pci_name(pdev);
struct idio_16_regmap_config config = {};
void __iomem *regs;
struct regmap *map;
err = pcim_enable_device(pdev);
- if (err) {
- dev_err(dev, "Failed to enable PCI device (%d)\n", err);
- return err;
- }
-
- err = pcim_iomap_regions(pdev, BIT(pci_bar_index), name);
- if (err) {
- dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err);
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "Failed to enable PCI device\n");
- regs = pcim_iomap_table(pdev)[pci_bar_index];
+ regs = pcim_iomap_region(pdev, pci_bar_index, pci_name(pdev));
+ if (IS_ERR(regs))
+ return dev_err_probe(dev, PTR_ERR(regs), "Unable to map PCI I/O addresses\n");
map = devm_regmap_init_mmio(dev, regs, &idio_16_regmap_config);
if (IS_ERR(map))
@@ -119,4 +112,4 @@ module_pci_driver(idio_16_driver);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES PCI-IDIO-16 GPIO driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(GPIO_IDIO_16);
+MODULE_IMPORT_NS("GPIO_IDIO_16");
diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c
index 7f7f95ad4343..80c0ba0afa67 100644
--- a/drivers/gpio/gpio-pcie-idio-24.c
+++ b/drivers/gpio/gpio-pcie-idio-24.c
@@ -305,19 +305,16 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct regmap_irq_chip_data *chip_data;
err = pcim_enable_device(pdev);
- if (err) {
- dev_err(dev, "Failed to enable PCI device (%d)\n", err);
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "Failed to enable PCI device\n");
- err = pcim_iomap_regions(pdev, BIT(pci_plx_bar_index) | BIT(pci_bar_index), name);
- if (err) {
- dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err);
- return err;
- }
+ pex8311_regs = pcim_iomap_region(pdev, pci_plx_bar_index, "pex8311");
+ if (IS_ERR(pex8311_regs))
+ return dev_err_probe(dev, PTR_ERR(pex8311_regs), "Unable to map PEX 8311 I/O addresses\n");
- pex8311_regs = pcim_iomap_table(pdev)[pci_plx_bar_index];
- idio_24_regs = pcim_iomap_table(pdev)[pci_bar_index];
+ idio_24_regs = pcim_iomap_region(pdev, pci_bar_index, name);
+ if (IS_ERR(idio_24_regs))
+ return dev_err_probe(dev, PTR_ERR(idio_24_regs), "Unable to map PCIe-IDIO-24 I/O addresses\n");
intcsr_map = devm_regmap_init_mmio(dev, pex8311_regs, &pex8311_intcsr_regmap_config);
if (IS_ERR(intcsr_map))
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index a211a02d4b4a..1c273727ffa3 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -291,7 +291,7 @@ static void pl061_irq_print_chip(struct irq_data *data, struct seq_file *p)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
- seq_printf(p, dev_name(gc->parent));
+ seq_puts(p, dev_name(gc->parent));
}
static const struct irq_chip pl061_irq_chip = {
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 6159fda38d5d..2ecee3269a0c 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -657,7 +657,7 @@ static SIMPLE_DEV_PM_OPS(gpio_rcar_pm_ops, gpio_rcar_suspend, gpio_rcar_resume);
static struct platform_driver gpio_rcar_device_driver = {
.probe = gpio_rcar_probe,
- .remove_new = gpio_rcar_remove,
+ .remove = gpio_rcar_remove,
.driver = {
.name = "gpio_rcar",
.pm = &gpio_rcar_pm_ops,
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index 71684dee2ca5..05f8781b5204 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -262,6 +262,8 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
chip->label = config->label ?: dev_name(config->parent);
chip->can_sleep = regmap_might_sleep(config->regmap);
+ chip->request = gpiochip_generic_request;
+ chip->free = gpiochip_generic_free;
chip->get = gpio_regmap_get;
if (gpio->reg_set_base && gpio->reg_clr_base)
chip->set = gpio_regmap_set_with_clear;
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index 365ab947983c..01a3b3dac58b 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -26,9 +26,16 @@
#include "../pinctrl/core.h"
#include "../pinctrl/pinctrl-rockchip.h"
+/*
+ * Version ID Register
+ * Bits [31:24] - Major Version
+ * Bits [23:16] - Minor Version
+ * Bits [15:0] - Revision Number
+ */
#define GPIO_TYPE_V1 (0) /* GPIO Version ID reserved */
-#define GPIO_TYPE_V2 (0x01000C2B) /* GPIO Version ID 0x01000C2B */
-#define GPIO_TYPE_V2_1 (0x0101157C) /* GPIO Version ID 0x0101157C */
+#define GPIO_TYPE_V2 (0x01000C2B)
+#define GPIO_TYPE_V2_1 (0x0101157C)
+#define GPIO_TYPE_V2_2 (0x010219C8)
static const struct rockchip_gpio_regs gpio_regs_v1 = {
.port_dr = 0x00,
@@ -602,7 +609,7 @@ static int rockchip_gpiolib_register(struct rockchip_pin_bank *bank)
* files which don't set the "gpio-ranges" property or systems that
* utilize ACPI the driver has to call gpiochip_add_pin_range().
*/
- if (!of_property_read_bool(bank->of_node, "gpio-ranges")) {
+ if (!of_property_present(bank->of_node, "gpio-ranges")) {
struct device_node *pctlnp = of_get_parent(bank->of_node);
struct pinctrl_dev *pctldev = NULL;
@@ -661,8 +668,10 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank)
clk_prepare_enable(bank->clk);
id = readl(bank->reg_base + gpio_regs_v2.version_id);
- /* If not gpio v2, that is default to v1. */
- if (id == GPIO_TYPE_V2 || id == GPIO_TYPE_V2_1) {
+ switch (id) {
+ case GPIO_TYPE_V2:
+ case GPIO_TYPE_V2_1:
+ case GPIO_TYPE_V2_2:
bank->gpio_regs = &gpio_regs_v2;
bank->gpio_type = GPIO_TYPE_V2;
bank->db_clk = of_clk_get(bank->of_node, 1);
@@ -671,9 +680,14 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank)
clk_disable_unprepare(bank->clk);
return -EINVAL;
}
- } else {
+ break;
+ case GPIO_TYPE_V1:
bank->gpio_regs = &gpio_regs_v1;
bank->gpio_type = GPIO_TYPE_V1;
+ break;
+ default:
+ dev_err(bank->dev, "unsupported version ID: 0x%08x\n", id);
+ return -ENODEV;
}
return 0;
@@ -795,7 +809,7 @@ static const struct of_device_id rockchip_gpio_match[] = {
static struct platform_driver rockchip_gpio_driver = {
.probe = rockchip_gpio_probe,
- .remove_new = rockchip_gpio_remove,
+ .remove = rockchip_gpio_remove,
.driver = {
.name = "rockchip-gpio",
.of_match_table = rockchip_gpio_match,
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index dcca1d7f173e..b6c230fab840 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -413,11 +413,6 @@ static int gpio_sim_setup_sysfs(struct gpio_sim_chip *chip)
return devm_add_action_or_reset(dev, gpio_sim_sysfs_remove, chip);
}
-static int gpio_sim_dev_match_fwnode(struct device *dev, void *data)
-{
- return device_match_fwnode(dev, data);
-}
-
static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
{
struct gpio_sim_chip *chip;
@@ -503,7 +498,7 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
if (ret)
return ret;
- chip->dev = device_find_child(dev, swnode, gpio_sim_dev_match_fwnode);
+ chip->dev = device_find_child(dev, swnode, device_match_fwnode);
if (!chip->dev)
return -ENODEV;
@@ -520,15 +515,12 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
static int gpio_sim_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct fwnode_handle *swnode;
int ret;
- device_for_each_child_node(dev, swnode) {
+ device_for_each_child_node_scoped(dev, swnode) {
ret = gpio_sim_add_bank(swnode, dev);
- if (ret) {
- fwnode_handle_put(swnode);
+ if (ret)
return ret;
- }
}
return 0;
@@ -1030,6 +1022,33 @@ static void gpio_sim_device_deactivate(struct gpio_sim_device *dev)
dev->pdev = NULL;
}
+static void
+gpio_sim_device_lockup_configfs(struct gpio_sim_device *dev, bool lock)
+{
+ struct configfs_subsystem *subsys = dev->group.cg_subsys;
+ struct gpio_sim_bank *bank;
+ struct gpio_sim_line *line;
+ struct config_item *item;
+
+ /*
+ * The device only needs to depend on leaf entries. This is
+ * sufficient to lock up all the configfs entries that the
+ * instantiated, alive device depends on.
+ */
+ list_for_each_entry(bank, &dev->bank_list, siblings) {
+ list_for_each_entry(line, &bank->line_list, siblings) {
+ item = line->hog ? &line->hog->item
+ : &line->group.cg_item;
+
+ if (lock)
+ WARN_ON(configfs_depend_item_unlocked(subsys,
+ item));
+ else
+ configfs_undepend_item_unlocked(item);
+ }
+ }
+}
+
static ssize_t
gpio_sim_device_config_live_store(struct config_item *item,
const char *page, size_t count)
@@ -1042,14 +1061,24 @@ gpio_sim_device_config_live_store(struct config_item *item,
if (ret)
return ret;
- guard(mutex)(&dev->lock);
+ if (live)
+ gpio_sim_device_lockup_configfs(dev, true);
- if (live == gpio_sim_device_is_live(dev))
- ret = -EPERM;
- else if (live)
- ret = gpio_sim_device_activate(dev);
- else
- gpio_sim_device_deactivate(dev);
+ scoped_guard(mutex, &dev->lock) {
+ if (live == gpio_sim_device_is_live(dev))
+ ret = -EPERM;
+ else if (live)
+ ret = gpio_sim_device_activate(dev);
+ else
+ gpio_sim_device_deactivate(dev);
+ }
+
+ /*
+ * Undepend is required only if device disablement (live == 0)
+ * succeeds or if device enablement (live == 1) fails.
+ */
+ if (live == !!ret)
+ gpio_sim_device_lockup_configfs(dev, false);
return ret ?: count;
}
diff --git a/drivers/gpio/gpio-sloppy-logic-analyzer.c b/drivers/gpio/gpio-sloppy-logic-analyzer.c
index 07e0d7180579..8cf3b171c599 100644
--- a/drivers/gpio/gpio-sloppy-logic-analyzer.c
+++ b/drivers/gpio/gpio-sloppy-logic-analyzer.c
@@ -234,7 +234,9 @@ static int gpio_la_poll_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- devm_mutex_init(dev, &priv->blob_lock);
+ ret = devm_mutex_init(dev, &priv->blob_lock);
+ if (ret)
+ return ret;
fops_buf_size_set(priv, GPIO_LA_DEFAULT_BUF_SIZE);
@@ -311,7 +313,7 @@ MODULE_DEVICE_TABLE(of, gpio_la_poll_of_match);
static struct platform_driver gpio_la_poll_device_driver = {
.probe = gpio_la_poll_probe,
- .remove_new = gpio_la_poll_remove,
+ .remove = gpio_la_poll_remove,
.driver = {
.name = GPIO_LA_NAME,
.of_match_table = gpio_la_poll_of_match,
diff --git a/drivers/gpio/gpio-tangier.c b/drivers/gpio/gpio-tangier.c
index 4b29abafecf6..a415e6d36173 100644
--- a/drivers/gpio/gpio-tangier.c
+++ b/drivers/gpio/gpio-tangier.c
@@ -459,7 +459,7 @@ int devm_tng_gpio_probe(struct device *dev, struct tng_gpio *gpio)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(devm_tng_gpio_probe, GPIO_TANGIER);
+EXPORT_SYMBOL_NS_GPL(devm_tng_gpio_probe, "GPIO_TANGIER");
static int tng_gpio_suspend(struct device *dev)
{
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index e8c1485b9c73..b6335cde455f 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -235,7 +235,7 @@ MODULE_DEVICE_TABLE(of, tb10x_gpio_dt_ids);
static struct platform_driver tb10x_gpio_driver = {
.probe = tb10x_gpio_probe,
- .remove_new = tb10x_gpio_remove,
+ .remove = tb10x_gpio_remove,
.driver = {
.name = "tb10x-gpio",
.of_match_table = tb10x_gpio_dt_ids,
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 6d3a39a03f58..9ad286adf263 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -600,7 +600,7 @@ static void tegra_gpio_irq_print_chip(struct irq_data *d, struct seq_file *s)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
- seq_printf(s, dev_name(chip->parent));
+ seq_puts(s, dev_name(chip->parent));
}
static const struct irq_chip tegra_gpio_irq_chip = {
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 1ecb733a5e88..6895b65c86af 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -610,7 +610,7 @@ static void tegra186_irq_print_chip(struct irq_data *data, struct seq_file *p)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
- seq_printf(p, dev_name(gc->parent));
+ seq_puts(p, dev_name(gc->parent));
}
static const struct irq_chip tegra186_gpio_irq_chip = {
diff --git a/drivers/gpio/gpio-tps65219.c b/drivers/gpio/gpio-tps65219.c
index cd1f17041f8c..526640c39a11 100644
--- a/drivers/gpio/gpio-tps65219.c
+++ b/drivers/gpio/gpio-tps65219.c
@@ -15,8 +15,6 @@
#define TPS65219_GPIO0_DIR_MASK BIT(3)
#define TPS65219_GPIO0_OFFSET 2
#define TPS65219_GPIO0_IDX 0
-#define TPS65219_GPIO_DIR_IN 1
-#define TPS65219_GPIO_DIR_OUT 0
struct tps65219_gpio {
struct gpio_chip gpio_chip;
@@ -61,7 +59,7 @@ static int tps65219_gpio_get(struct gpio_chip *gc, unsigned int offset)
* status bit.
*/
- if (tps65219_gpio_get_direction(gc, offset) == TPS65219_GPIO_DIR_OUT)
+ if (tps65219_gpio_get_direction(gc, offset) == GPIO_LINE_DIRECTION_OUT)
return -ENOTSUPP;
return ret;
@@ -124,10 +122,10 @@ static int tps65219_gpio_direction_input(struct gpio_chip *gc, unsigned int offs
return -ENOTSUPP;
}
- if (tps65219_gpio_get_direction(gc, offset) == TPS65219_GPIO_DIR_IN)
+ if (tps65219_gpio_get_direction(gc, offset) == GPIO_LINE_DIRECTION_IN)
return 0;
- return tps65219_gpio_change_direction(gc, offset, TPS65219_GPIO_DIR_IN);
+ return tps65219_gpio_change_direction(gc, offset, GPIO_LINE_DIRECTION_IN);
}
static int tps65219_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value)
@@ -136,10 +134,10 @@ static int tps65219_gpio_direction_output(struct gpio_chip *gc, unsigned int off
if (offset != TPS65219_GPIO0_IDX)
return 0;
- if (tps65219_gpio_get_direction(gc, offset) == TPS65219_GPIO_DIR_OUT)
+ if (tps65219_gpio_get_direction(gc, offset) == GPIO_LINE_DIRECTION_OUT)
return 0;
- return tps65219_gpio_change_direction(gc, offset, TPS65219_GPIO_DIR_OUT);
+ return tps65219_gpio_change_direction(gc, offset, GPIO_LINE_DIRECTION_OUT);
}
static const struct gpio_chip tps65219_template_chip = {
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index f2e7e8754d95..18f523a15b3c 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -29,18 +29,22 @@
#define TQMX86_GPIIC 3 /* GPI Interrupt Configuration Register */
#define TQMX86_GPIIS 4 /* GPI Interrupt Status Register */
-#define TQMX86_GPII_NONE 0
-#define TQMX86_GPII_FALLING BIT(0)
-#define TQMX86_GPII_RISING BIT(1)
-/* Stored in irq_type as a trigger type, but not actually valid as a register
- * value, so the name doesn't use "GPII"
+/*
+ * NONE, FALLING and RISING use the same bit patterns that can be programmed to
+ * the GPII register (after passing them to the TQMX86_GPII_ macros to shift
+ * them to the right position)
*/
-#define TQMX86_INT_BOTH (BIT(0) | BIT(1))
-#define TQMX86_GPII_MASK (BIT(0) | BIT(1))
-#define TQMX86_GPII_BITS 2
+#define TQMX86_INT_TRIG_NONE 0
+#define TQMX86_INT_TRIG_FALLING BIT(0)
+#define TQMX86_INT_TRIG_RISING BIT(1)
+#define TQMX86_INT_TRIG_BOTH (BIT(0) | BIT(1))
+#define TQMX86_INT_TRIG_MASK (BIT(0) | BIT(1))
/* Stored in irq_type with GPII bits */
#define TQMX86_INT_UNMASKED BIT(2)
+#define TQMX86_GPIIC_CONFIG(i, v) ((v) << (2 * (i)))
+#define TQMX86_GPIIC_MASK(i) TQMX86_GPIIC_CONFIG(i, TQMX86_INT_TRIG_MASK)
+
struct tqmx86_gpio_data {
struct gpio_chip chip;
void __iomem *io_base;
@@ -48,7 +52,7 @@ struct tqmx86_gpio_data {
/* Lock must be held for accessing output and irq_type fields */
raw_spinlock_t spinlock;
DECLARE_BITMAP(output, TQMX86_NGPIO);
- u8 irq_type[TQMX86_NGPI];
+ u8 irq_type[TQMX86_NGPIO];
};
static u8 tqmx86_gpio_read(struct tqmx86_gpio_data *gd, unsigned int reg)
@@ -62,6 +66,18 @@ static void tqmx86_gpio_write(struct tqmx86_gpio_data *gd, u8 val,
iowrite8(val, gd->io_base + reg);
}
+static void tqmx86_gpio_clrsetbits(struct tqmx86_gpio_data *gpio,
+ u8 clr, u8 set, unsigned int reg)
+ __must_hold(&gpio->spinlock)
+{
+ u8 val = tqmx86_gpio_read(gpio, reg);
+
+ val &= ~clr;
+ val |= set;
+
+ tqmx86_gpio_write(gpio, val, reg);
+}
+
static int tqmx86_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
@@ -69,127 +85,137 @@ static int tqmx86_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(tqmx86_gpio_read(gpio, TQMX86_GPIOD) & BIT(offset));
}
+static void _tqmx86_gpio_set(struct tqmx86_gpio_data *gpio, unsigned int offset,
+ int value)
+ __must_hold(&gpio->spinlock)
+{
+ __assign_bit(offset, gpio->output, value);
+ tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD);
+}
+
static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
- unsigned long flags;
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- __assign_bit(offset, gpio->output, value);
- tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD);
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ _tqmx86_gpio_set(gpio, offset, value);
}
static int tqmx86_gpio_direction_input(struct gpio_chip *chip,
unsigned int offset)
{
- /* Direction cannot be changed. Validate is an input. */
- if (BIT(offset) & TQMX86_DIR_INPUT_MASK)
- return 0;
- else
- return -EINVAL;
+ struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ tqmx86_gpio_clrsetbits(gpio, BIT(offset), 0, TQMX86_GPIODD);
+
+ return 0;
}
static int tqmx86_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset,
int value)
{
- /* Direction cannot be changed, validate is an output */
- if (BIT(offset) & TQMX86_DIR_INPUT_MASK)
- return -EINVAL;
+ struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ _tqmx86_gpio_set(gpio, offset, value);
+ tqmx86_gpio_clrsetbits(gpio, 0, BIT(offset), TQMX86_GPIODD);
- tqmx86_gpio_set(chip, offset, value);
return 0;
}
static int tqmx86_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
- if (TQMX86_DIR_INPUT_MASK & BIT(offset))
- return GPIO_LINE_DIRECTION_IN;
+ struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
+ u8 val;
+
+ val = tqmx86_gpio_read(gpio, TQMX86_GPIODD);
+
+ if (val & BIT(offset))
+ return GPIO_LINE_DIRECTION_OUT;
- return GPIO_LINE_DIRECTION_OUT;
+ return GPIO_LINE_DIRECTION_IN;
}
-static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int offset)
+static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int hwirq)
__must_hold(&gpio->spinlock)
{
- u8 type = TQMX86_GPII_NONE, gpiic;
+ u8 type = TQMX86_INT_TRIG_NONE;
+ int gpiic_irq = hwirq - TQMX86_NGPO;
- if (gpio->irq_type[offset] & TQMX86_INT_UNMASKED) {
- type = gpio->irq_type[offset] & TQMX86_GPII_MASK;
+ if (gpio->irq_type[hwirq] & TQMX86_INT_UNMASKED) {
+ type = gpio->irq_type[hwirq] & TQMX86_INT_TRIG_MASK;
- if (type == TQMX86_INT_BOTH)
- type = tqmx86_gpio_get(&gpio->chip, offset + TQMX86_NGPO)
- ? TQMX86_GPII_FALLING
- : TQMX86_GPII_RISING;
+ if (type == TQMX86_INT_TRIG_BOTH)
+ type = tqmx86_gpio_get(&gpio->chip, hwirq)
+ ? TQMX86_INT_TRIG_FALLING
+ : TQMX86_INT_TRIG_RISING;
}
- gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
- gpiic &= ~(TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS));
- gpiic |= type << (offset * TQMX86_GPII_BITS);
- tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
+ tqmx86_gpio_clrsetbits(gpio,
+ TQMX86_GPIIC_MASK(gpiic_irq),
+ TQMX86_GPIIC_CONFIG(gpiic_irq, type),
+ TQMX86_GPIIC);
}
static void tqmx86_gpio_irq_mask(struct irq_data *data)
{
- unsigned int offset = (data->hwirq - TQMX86_NGPO);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
- unsigned long flags;
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpio->irq_type[offset] &= ~TQMX86_INT_UNMASKED;
- tqmx86_gpio_irq_config(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ scoped_guard(raw_spinlock_irqsave, &gpio->spinlock) {
+ gpio->irq_type[data->hwirq] &= ~TQMX86_INT_UNMASKED;
+ tqmx86_gpio_irq_config(gpio, data->hwirq);
+ }
gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(data));
}
static void tqmx86_gpio_irq_unmask(struct irq_data *data)
{
- unsigned int offset = (data->hwirq - TQMX86_NGPO);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
- unsigned long flags;
gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(data));
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpio->irq_type[offset] |= TQMX86_INT_UNMASKED;
- tqmx86_gpio_irq_config(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ gpio->irq_type[data->hwirq] |= TQMX86_INT_UNMASKED;
+ tqmx86_gpio_irq_config(gpio, data->hwirq);
}
static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
- unsigned int offset = (data->hwirq - TQMX86_NGPO);
unsigned int edge_type = type & IRQF_TRIGGER_MASK;
- unsigned long flags;
u8 new_type;
switch (edge_type) {
case IRQ_TYPE_EDGE_RISING:
- new_type = TQMX86_GPII_RISING;
+ new_type = TQMX86_INT_TRIG_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
- new_type = TQMX86_GPII_FALLING;
+ new_type = TQMX86_INT_TRIG_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
- new_type = TQMX86_INT_BOTH;
+ new_type = TQMX86_INT_TRIG_BOTH;
break;
default:
return -EINVAL; /* not supported */
}
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpio->irq_type[offset] &= ~TQMX86_GPII_MASK;
- gpio->irq_type[offset] |= new_type;
- tqmx86_gpio_irq_config(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->spinlock);
+
+ gpio->irq_type[data->hwirq] &= ~TQMX86_INT_TRIG_MASK;
+ gpio->irq_type[data->hwirq] |= new_type;
+ tqmx86_gpio_irq_config(gpio, data->hwirq);
return 0;
}
@@ -199,8 +225,8 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
struct irq_chip *irq_chip = irq_desc_get_chip(desc);
- unsigned long irq_bits, flags;
- int i;
+ unsigned long irq_bits;
+ int i, hwirq;
u8 irq_status;
chained_irq_enter(irq_chip, desc);
@@ -210,32 +236,38 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
irq_bits = irq_status;
- raw_spin_lock_irqsave(&gpio->spinlock, flags);
- for_each_set_bit(i, &irq_bits, TQMX86_NGPI) {
- /*
- * Edge-both triggers are implemented by flipping the edge
- * trigger after each interrupt, as the controller only supports
- * either rising or falling edge triggers, but not both.
- *
- * Internally, the TQMx86 GPIO controller has separate status
- * registers for rising and falling edge interrupts. GPIIC
- * configures which bits from which register are visible in the
- * interrupt status register GPIIS and defines what triggers the
- * parent IRQ line. Writing to GPIIS always clears both rising
- * and falling interrupt flags internally, regardless of the
- * currently configured trigger.
- *
- * In consequence, we can cleanly implement the edge-both
- * trigger in software by first clearing the interrupt and then
- * setting the new trigger based on the current GPIO input in
- * tqmx86_gpio_irq_config() - even if an edge arrives between
- * reading the input and setting the trigger, we will have a new
- * interrupt pending.
- */
- if ((gpio->irq_type[i] & TQMX86_GPII_MASK) == TQMX86_INT_BOTH)
- tqmx86_gpio_irq_config(gpio, i);
+ scoped_guard(raw_spinlock_irqsave, &gpio->spinlock) {
+ for_each_set_bit(i, &irq_bits, TQMX86_NGPI) {
+ hwirq = i + TQMX86_NGPO;
+
+ /*
+ * Edge-both triggers are implemented by flipping the
+ * edge trigger after each interrupt, as the controller
+ * only supports either rising or falling edge triggers,
+ * but not both.
+ *
+ * Internally, the TQMx86 GPIO controller has separate
+ * status registers for rising and falling edge
+ * interrupts. GPIIC configures which bits from which
+ * register are visible in the interrupt status register
+ * GPIIS and defines what triggers the parent IRQ line.
+ * Writing to GPIIS always clears both rising and
+ * falling interrupt flags internally, regardless of the
+ * currently configured trigger.
+ *
+ * In consequence, we can cleanly implement the
+ * edge-both trigger in software by first clearing the
+ * interrupt and then setting the new trigger based on
+ * the current GPIO input in tqmx86_gpio_irq_config() -
+ * even if an edge arrives between reading the input and
+ * setting the trigger, we will have a new interrupt
+ * pending.
+ */
+ if ((gpio->irq_type[hwirq] & TQMX86_INT_TRIG_MASK) ==
+ TQMX86_INT_TRIG_BOTH)
+ tqmx86_gpio_irq_config(gpio, hwirq);
+ }
}
- raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
for_each_set_bit(i, &irq_bits, TQMX86_NGPI)
generic_handle_domain_irq(gpio->chip.irq.domain,
@@ -275,7 +307,7 @@ static void tqmx86_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- seq_printf(p, gc->label);
+ seq_puts(p, gc->label);
}
static const struct irq_chip tqmx86_gpio_irq_chip = {
diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c
index 0f6397b77c9d..5c806140fdf0 100644
--- a/drivers/gpio/gpio-ts4900.c
+++ b/drivers/gpio/gpio-ts4900.c
@@ -8,8 +8,8 @@
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
-#include <linux/of.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#define DEFAULT_PIN_NUMBER 32
@@ -142,7 +142,7 @@ static int ts4900_gpio_probe(struct i2c_client *client)
u32 ngpio;
int ret;
- if (of_property_read_u32(client->dev.of_node, "ngpios", &ngpio))
+ if (device_property_read_u32(&client->dev, "ngpios", &ngpio))
ngpio = DEFAULT_PIN_NUMBER;
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
@@ -153,7 +153,7 @@ static int ts4900_gpio_probe(struct i2c_client *client)
priv->gpio_chip.label = "ts4900-gpio";
priv->gpio_chip.ngpio = ngpio;
priv->gpio_chip.parent = &client->dev;
- priv->input_bit = (uintptr_t)of_device_get_match_data(&client->dev);
+ priv->input_bit = (uintptr_t)device_get_match_data(&client->dev);
priv->regmap = devm_regmap_init_i2c(client, &ts4900_regmap_config);
if (IS_ERR(priv->regmap)) {
diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c
index 90f8e9e9915e..61cbec5c06a7 100644
--- a/drivers/gpio/gpio-ts5500.c
+++ b/drivers/gpio/gpio-ts5500.c
@@ -433,7 +433,7 @@ static struct platform_driver ts5500_dio_driver = {
.name = "ts5500-dio",
},
.probe = ts5500_dio_probe,
- .remove_new = ts5500_dio_remove,
+ .remove = ts5500_dio_remove,
.id_table = ts5500_dio_ids,
};
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index 6c3fbf382dba..b9171bf66168 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -22,7 +22,7 @@
static int twl6040gpo_get(struct gpio_chip *chip, unsigned offset)
{
- struct twl6040 *twl6040 = dev_get_drvdata(chip->parent->parent);
+ struct twl6040 *twl6040 = gpiochip_get_data(chip);
int ret = 0;
ret = twl6040_reg_read(twl6040, TWL6040_REG_GPOCTL);
@@ -46,7 +46,7 @@ static int twl6040gpo_direction_out(struct gpio_chip *chip, unsigned offset,
static void twl6040gpo_set(struct gpio_chip *chip, unsigned offset, int value)
{
- struct twl6040 *twl6040 = dev_get_drvdata(chip->parent->parent);
+ struct twl6040 *twl6040 = gpiochip_get_data(chip);
int ret;
u8 gpoctl;
@@ -91,7 +91,7 @@ static int gpo_twl6040_probe(struct platform_device *pdev)
twl6040gpo_chip.parent = &pdev->dev;
- ret = devm_gpiochip_add_data(&pdev->dev, &twl6040gpo_chip, NULL);
+ ret = devm_gpiochip_add_data(&pdev->dev, &twl6040gpo_chip, twl6040);
if (ret < 0) {
dev_err(&pdev->dev, "could not register gpiochip, %d\n", ret);
twl6040gpo_chip.ngpio = 0;
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index da99ba13e82d..d738da8718f9 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -481,7 +481,7 @@ MODULE_DEVICE_TABLE(of, uniphier_gpio_match);
static struct platform_driver uniphier_gpio_driver = {
.probe = uniphier_gpio_probe,
- .remove_new = uniphier_gpio_remove,
+ .remove = uniphier_gpio_remove,
.driver = {
.name = "uniphier-gpio",
.of_match_table = uniphier_gpio_match,
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 27eff741fe9a..c4f34a347cb6 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -15,10 +15,9 @@
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#define VF610_GPIO_PER_PORT 32
@@ -297,7 +296,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
if (!port)
return -ENOMEM;
- port->sdata = of_device_get_match_data(dev);
+ port->sdata = device_get_match_data(dev);
dual_base = port->sdata->have_dual_base;
diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
index 91b6352c957c..e89f299f2140 100644
--- a/drivers/gpio/gpio-virtuser.c
+++ b/drivers/gpio/gpio-virtuser.c
@@ -1410,7 +1410,7 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
size_t num_entries = gpio_virtuser_get_lookup_count(dev);
struct gpio_virtuser_lookup_entry *entry;
struct gpio_virtuser_lookup *lookup;
- unsigned int i = 0;
+ unsigned int i = 0, idx;
lockdep_assert_held(&dev->lock);
@@ -1424,12 +1424,12 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
return -ENOMEM;
list_for_each_entry(lookup, &dev->lookup_list, siblings) {
+ idx = 0;
list_for_each_entry(entry, &lookup->entry_list, siblings) {
- table->table[i] =
+ table->table[i++] =
GPIO_LOOKUP_IDX(entry->key,
entry->offset < 0 ? U16_MAX : entry->offset,
- lookup->con_id, i, entry->flags);
- i++;
+ lookup->con_id, idx++, entry->flags);
}
}
@@ -1439,6 +1439,15 @@ gpio_virtuser_make_lookup_table(struct gpio_virtuser_device *dev)
return 0;
}
+static void
+gpio_virtuser_remove_lookup_table(struct gpio_virtuser_device *dev)
+{
+ gpiod_remove_lookup_table(dev->lookup_table);
+ kfree(dev->lookup_table->dev_id);
+ kfree(dev->lookup_table);
+ dev->lookup_table = NULL;
+}
+
static struct fwnode_handle *
gpio_virtuser_make_device_swnode(struct gpio_virtuser_device *dev)
{
@@ -1487,10 +1496,8 @@ gpio_virtuser_device_activate(struct gpio_virtuser_device *dev)
pdevinfo.fwnode = swnode;
ret = gpio_virtuser_make_lookup_table(dev);
- if (ret) {
- fwnode_remove_software_node(swnode);
- return ret;
- }
+ if (ret)
+ goto err_remove_swnode;
reinit_completion(&dev->probe_completion);
dev->driver_bound = false;
@@ -1498,23 +1505,31 @@ gpio_virtuser_device_activate(struct gpio_virtuser_device *dev)
pdev = platform_device_register_full(&pdevinfo);
if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
- fwnode_remove_software_node(swnode);
- return PTR_ERR(pdev);
+ goto err_remove_lookup_table;
}
wait_for_completion(&dev->probe_completion);
bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
if (!dev->driver_bound) {
- platform_device_unregister(pdev);
- fwnode_remove_software_node(swnode);
- return -ENXIO;
+ ret = -ENXIO;
+ goto err_unregister_pdev;
}
dev->pdev = pdev;
return 0;
+
+err_unregister_pdev:
+ platform_device_unregister(pdev);
+err_remove_lookup_table:
+ gpio_virtuser_remove_lookup_table(dev);
+err_remove_swnode:
+ fwnode_remove_software_node(swnode);
+
+ return ret;
}
static void
@@ -1526,10 +1541,33 @@ gpio_virtuser_device_deactivate(struct gpio_virtuser_device *dev)
swnode = dev_fwnode(&dev->pdev->dev);
platform_device_unregister(dev->pdev);
+ gpio_virtuser_remove_lookup_table(dev);
fwnode_remove_software_node(swnode);
dev->pdev = NULL;
- gpiod_remove_lookup_table(dev->lookup_table);
- kfree(dev->lookup_table);
+}
+
+static void
+gpio_virtuser_device_lockup_configfs(struct gpio_virtuser_device *dev, bool lock)
+{
+ struct configfs_subsystem *subsys = dev->group.cg_subsys;
+ struct gpio_virtuser_lookup_entry *entry;
+ struct gpio_virtuser_lookup *lookup;
+
+ /*
+ * The device only needs to depend on leaf lookup entries. This is
+ * sufficient to lock up all the configfs entries that the
+ * instantiated, alive device depends on.
+ */
+ list_for_each_entry(lookup, &dev->lookup_list, siblings) {
+ list_for_each_entry(entry, &lookup->entry_list, siblings) {
+ if (lock)
+ WARN_ON(configfs_depend_item_unlocked(
+ subsys, &entry->group.cg_item));
+ else
+ configfs_undepend_item_unlocked(
+ &entry->group.cg_item);
+ }
+ }
}
static ssize_t
@@ -1544,15 +1582,24 @@ gpio_virtuser_device_config_live_store(struct config_item *item,
if (ret)
return ret;
- guard(mutex)(&dev->lock);
+ if (live)
+ gpio_virtuser_device_lockup_configfs(dev, true);
- if (live == gpio_virtuser_device_is_live(dev))
- return -EPERM;
+ scoped_guard(mutex, &dev->lock) {
+ if (live == gpio_virtuser_device_is_live(dev))
+ ret = -EPERM;
+ else if (live)
+ ret = gpio_virtuser_device_activate(dev);
+ else
+ gpio_virtuser_device_deactivate(dev);
+ }
- if (live)
- ret = gpio_virtuser_device_activate(dev);
- else
- gpio_virtuser_device_deactivate(dev);
+ /*
+ * Undepend is required only if device disablement (live == 0)
+ * succeeds or if device enablement (live == 1) fails.
+ */
+ if (live == !!ret)
+ gpio_virtuser_device_lockup_configfs(dev, false);
return ret ?: count;
}
diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c
index ebc71ecdb6cf..5bd965c18a46 100644
--- a/drivers/gpio/gpio-visconti.c
+++ b/drivers/gpio/gpio-visconti.c
@@ -142,7 +142,7 @@ static void visconti_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct visconti_gpio *priv = gpiochip_get_data(gc);
- seq_printf(p, dev_name(priv->dev));
+ seq_puts(p, dev_name(priv->dev));
}
static const struct irq_chip visconti_gpio_irq_chip = {
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index bd5befa807c3..48b829733b15 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -8,20 +8,22 @@
* Quan Nguyen <qnguyen@apm.com>.
*/
-#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/types.h>
+
#include <linux/gpio/driver.h>
-#include <linux/acpi.h>
#include "gpiolib-acpi.h"
-/* Common property names */
-#define XGENE_NIRQ_PROPERTY "apm,nr-irqs"
-#define XGENE_NGPIO_PROPERTY "apm,nr-gpios"
-#define XGENE_IRQ_START_PROPERTY "apm,irq-start"
-
#define XGENE_DFLT_MAX_NGPIO 22
#define XGENE_DFLT_MAX_NIRQ 6
#define XGENE_DFLT_IRQ_START_PIN 8
@@ -252,18 +254,17 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
/* Retrieve start irq pin, use default if property not found */
priv->irq_start = XGENE_DFLT_IRQ_START_PIN;
- if (!device_property_read_u32(&pdev->dev,
- XGENE_IRQ_START_PROPERTY, &val32))
+ if (!device_property_read_u32(&pdev->dev, "apm,irq-start", &val32))
priv->irq_start = val32;
/* Retrieve number irqs, use default if property not found */
priv->nirq = XGENE_DFLT_MAX_NIRQ;
- if (!device_property_read_u32(&pdev->dev, XGENE_NIRQ_PROPERTY, &val32))
+ if (!device_property_read_u32(&pdev->dev, "apm,nr-irqs", &val32))
priv->nirq = val32;
/* Retrieve number gpio, use default if property not found */
priv->gc.ngpio = XGENE_DFLT_MAX_NGPIO;
- if (!device_property_read_u32(&pdev->dev, XGENE_NGPIO_PROPERTY, &val32))
+ if (!device_property_read_u32(&pdev->dev, "apm,nr-gpios", &val32))
priv->gc.ngpio = val32;
dev_info(&pdev->dev, "Support %d gpios, %d irqs start from pin %d\n",
@@ -305,27 +306,25 @@ static void xgene_gpio_sb_remove(struct platform_device *pdev)
}
static const struct of_device_id xgene_gpio_sb_of_match[] = {
- {.compatible = "apm,xgene-gpio-sb", },
- {},
+ { .compatible = "apm,xgene-gpio-sb" },
+ {}
};
MODULE_DEVICE_TABLE(of, xgene_gpio_sb_of_match);
-#ifdef CONFIG_ACPI
static const struct acpi_device_id xgene_gpio_sb_acpi_match[] = {
- {"APMC0D15", 0},
- {},
+ { "APMC0D15" },
+ {}
};
MODULE_DEVICE_TABLE(acpi, xgene_gpio_sb_acpi_match);
-#endif
static struct platform_driver xgene_gpio_sb_driver = {
.driver = {
.name = "xgene-gpio-sb",
.of_match_table = xgene_gpio_sb_of_match,
- .acpi_match_table = ACPI_PTR(xgene_gpio_sb_acpi_match),
- },
+ .acpi_match_table = xgene_gpio_sb_acpi_match,
+ },
.probe = xgene_gpio_sb_probe,
- .remove_new = xgene_gpio_sb_remove,
+ .remove = xgene_gpio_sb_remove,
};
module_platform_driver(xgene_gpio_sb_driver);
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
index d445eea03687..93544e98ccbd 100644
--- a/drivers/gpio/gpio-xgs-iproc.c
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -198,7 +198,7 @@ static void iproc_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct iproc_gpio_chip *chip = to_iproc_gpio(gc);
- seq_printf(p, dev_name(chip->dev));
+ seq_puts(p, dev_name(chip->dev));
}
static const struct irq_chip iproc_gpio_irq_chip = {
@@ -316,7 +316,7 @@ static struct platform_driver bcm_iproc_gpio_driver = {
.of_match_table = bcm_iproc_gpio_of_match,
},
.probe = iproc_gpio_probe,
- .remove_new = iproc_gpio_remove,
+ .remove = iproc_gpio_remove,
};
module_platform_driver(bcm_iproc_gpio_driver);
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index afcf432a1573..792d94c49077 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -15,9 +15,9 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/slab.h>
/* Register Offset Definitions */
@@ -65,7 +65,7 @@ struct xgpio_instance {
DECLARE_BITMAP(state, 64);
DECLARE_BITMAP(last_irq_read, 64);
DECLARE_BITMAP(dir, 64);
- spinlock_t gpio_lock; /* For serializing operations */
+ raw_spinlock_t gpio_lock; /* For serializing operations */
int irq;
DECLARE_BITMAP(enable, 64);
DECLARE_BITMAP(rising_edge, 64);
@@ -179,14 +179,14 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
struct xgpio_instance *chip = gpiochip_get_data(gc);
int bit = xgpio_to_bit(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write to GPIO signal and set its direction to output */
__assign_bit(bit, chip->state, val);
xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -210,7 +210,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
bitmap_remap(hw_mask, mask, chip->sw_map, chip->hw_map, 64);
bitmap_remap(hw_bits, bits, chip->sw_map, chip->hw_map, 64);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
bitmap_replace(state, chip->state, hw_bits, hw_mask, 64);
@@ -218,7 +218,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
bitmap_copy(chip->state, state, 64);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -236,13 +236,13 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
struct xgpio_instance *chip = gpiochip_get_data(gc);
int bit = xgpio_to_bit(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Set the GPIO bit in shadow register and set direction as input */
__set_bit(bit, chip->dir);
xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -265,7 +265,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
struct xgpio_instance *chip = gpiochip_get_data(gc);
int bit = xgpio_to_bit(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write state of GPIO signal */
__assign_bit(bit, chip->state, val);
@@ -275,7 +275,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
__clear_bit(bit, chip->dir);
xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -398,7 +398,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
int bit = xgpio_to_bit(chip, irq_offset);
u32 mask = BIT(bit / 32), temp;
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
__clear_bit(bit, chip->enable);
@@ -408,7 +408,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
temp &= ~mask;
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp);
}
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
gpiochip_disable_irq(&chip->gc, irq_offset);
}
@@ -428,7 +428,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
gpiochip_enable_irq(&chip->gc, irq_offset);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
__set_bit(bit, chip->enable);
@@ -447,7 +447,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, val);
}
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -512,7 +512,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
chained_irq_enter(irqchip, desc);
- spin_lock(&chip->gpio_lock);
+ raw_spin_lock(&chip->gpio_lock);
xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, all);
@@ -529,7 +529,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
bitmap_copy(chip->last_irq_read, all, 64);
bitmap_or(all, rising, falling, 64);
- spin_unlock(&chip->gpio_lock);
+ raw_spin_unlock(&chip->gpio_lock);
dev_dbg(gc->parent, "IRQ rising %*pb falling %*pb\n", 64, rising, 64, falling);
@@ -561,9 +561,9 @@ static const struct irq_chip xgpio_irq_chip = {
*/
static int xgpio_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct xgpio_instance *chip;
int status = 0;
- struct device_node *np = pdev->dev.of_node;
u32 is_dual = 0;
u32 width[2];
u32 state[2];
@@ -571,14 +571,14 @@ static int xgpio_probe(struct platform_device *pdev)
struct gpio_irq_chip *girq;
u32 temp;
- chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
platform_set_drvdata(pdev, chip);
/* First, check if the device is dual-channel */
- of_property_read_u32(np, "xlnx,is-dual", &is_dual);
+ device_property_read_u32(dev, "xlnx,is-dual", &is_dual);
/* Setup defaults */
memset32(width, 0, ARRAY_SIZE(width));
@@ -586,14 +586,14 @@ static int xgpio_probe(struct platform_device *pdev)
memset32(dir, 0xFFFFFFFF, ARRAY_SIZE(dir));
/* Update GPIO state shadow register with default value */
- of_property_read_u32(np, "xlnx,dout-default", &state[0]);
- of_property_read_u32(np, "xlnx,dout-default-2", &state[1]);
+ device_property_read_u32(dev, "xlnx,dout-default", &state[0]);
+ device_property_read_u32(dev, "xlnx,dout-default-2", &state[1]);
bitmap_from_arr32(chip->state, state, 64);
/* Update GPIO direction shadow register with default value */
- of_property_read_u32(np, "xlnx,tri-default", &dir[0]);
- of_property_read_u32(np, "xlnx,tri-default-2", &dir[1]);
+ device_property_read_u32(dev, "xlnx,tri-default", &dir[0]);
+ device_property_read_u32(dev, "xlnx,tri-default-2", &dir[1]);
bitmap_from_arr32(chip->dir, dir, 64);
@@ -601,13 +601,13 @@ static int xgpio_probe(struct platform_device *pdev)
* Check device node and parent device node for device width
* and assume default width of 32
*/
- if (of_property_read_u32(np, "xlnx,gpio-width", &width[0]))
+ if (device_property_read_u32(dev, "xlnx,gpio-width", &width[0]))
width[0] = 32;
if (width[0] > 32)
return -EINVAL;
- if (is_dual && of_property_read_u32(np, "xlnx,gpio2-width", &width[1]))
+ if (is_dual && device_property_read_u32(dev, "xlnx,gpio2-width", &width[1]))
width[1] = 32;
if (width[1] > 32)
@@ -620,11 +620,11 @@ static int xgpio_probe(struct platform_device *pdev)
bitmap_set(chip->hw_map, 0, width[0]);
bitmap_set(chip->hw_map, 32, width[1]);
- spin_lock_init(&chip->gpio_lock);
+ raw_spin_lock_init(&chip->gpio_lock);
chip->gc.base = -1;
chip->gc.ngpio = bitmap_weight(chip->hw_map, 64);
- chip->gc.parent = &pdev->dev;
+ chip->gc.parent = dev;
chip->gc.direction_input = xgpio_dir_in;
chip->gc.direction_output = xgpio_dir_out;
chip->gc.get = xgpio_get;
@@ -633,21 +633,21 @@ static int xgpio_probe(struct platform_device *pdev)
chip->gc.free = xgpio_free;
chip->gc.set_multiple = xgpio_set_multiple;
- chip->gc.label = dev_name(&pdev->dev);
+ chip->gc.label = dev_name(dev);
chip->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->regs)) {
- dev_err(&pdev->dev, "failed to ioremap memory resource\n");
+ dev_err(dev, "failed to ioremap memory resource\n");
return PTR_ERR(chip->regs);
}
- chip->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+ chip->clk = devm_clk_get_optional_enabled(dev, NULL);
if (IS_ERR(chip->clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(chip->clk), "input clock not found.\n");
+ return dev_err_probe(dev, PTR_ERR(chip->clk), "input clock not found.\n");
- pm_runtime_get_noresume(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
xgpio_save_regs(chip);
@@ -667,8 +667,7 @@ static int xgpio_probe(struct platform_device *pdev)
gpio_irq_chip_set_chip(girq, &xgpio_irq_chip);
girq->parent_handler = xgpio_irqhandler;
girq->num_parents = 1;
- girq->parents = devm_kcalloc(&pdev->dev, 1,
- sizeof(*girq->parents),
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
GFP_KERNEL);
if (!girq->parents) {
status = -ENOMEM;
@@ -679,18 +678,18 @@ static int xgpio_probe(struct platform_device *pdev)
girq->handler = handle_bad_irq;
skip_irq:
- status = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip);
+ status = devm_gpiochip_add_data(dev, &chip->gc, chip);
if (status) {
- dev_err(&pdev->dev, "failed to add GPIO chip\n");
+ dev_err(dev, "failed to add GPIO chip\n");
goto err_pm_put;
}
- pm_runtime_put(&pdev->dev);
+ pm_runtime_put(dev);
return 0;
err_pm_put:
- pm_runtime_disable(&pdev->dev);
- pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
return status;
}
@@ -703,7 +702,7 @@ MODULE_DEVICE_TABLE(of, xgpio_of_match);
static struct platform_driver xgpio_plat_driver = {
.probe = xgpio_probe,
- .remove_new = xgpio_remove,
+ .remove = xgpio_remove,
.driver = {
.name = "gpio-xilinx",
.of_match_table = xgpio_of_match,
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
index 2de61337ad3b..d7230fd83f5d 100644
--- a/drivers/gpio/gpio-zevio.c
+++ b/drivers/gpio/gpio-zevio.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -169,6 +170,7 @@ static const struct gpio_chip zevio_gpio_chip = {
/* Initialization */
static int zevio_gpio_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct zevio_gpio *controller;
int status, i;
@@ -180,6 +182,10 @@ static int zevio_gpio_probe(struct platform_device *pdev)
controller->chip = zevio_gpio_chip;
controller->chip.parent = &pdev->dev;
+ controller->chip.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", dev_fwnode(dev));
+ if (!controller->chip.label)
+ return -ENOMEM;
+
controller->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(controller->regs))
return dev_err_probe(&pdev->dev, PTR_ERR(controller->regs),
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 1a42336dfc1d..be81fa2b17ab 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -1023,7 +1023,7 @@ static struct platform_driver zynq_gpio_driver = {
.of_match_table = zynq_gpio_of_match,
},
.probe = zynq_gpio_probe,
- .remove_new = zynq_gpio_remove,
+ .remove = zynq_gpio_remove,
};
module_platform_driver(zynq_gpio_driver);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 78ecd56123a3..1f9fe50bba00 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1315,9 +1315,8 @@ acpi_gpiochip_parse_own_gpio(struct acpi_gpio_chip *achip,
static void acpi_gpiochip_scan_gpios(struct acpi_gpio_chip *achip)
{
struct gpio_chip *chip = achip->chip;
- struct fwnode_handle *fwnode;
- device_for_each_child_node(chip->parent, fwnode) {
+ device_for_each_child_node_scoped(chip->parent, fwnode) {
unsigned long lflags;
enum gpiod_flags dflags;
struct gpio_desc *desc;
@@ -1335,7 +1334,6 @@ static void acpi_gpiochip_scan_gpios(struct acpi_gpio_chip *achip)
ret = gpiod_hog(desc, name, lflags, dflags);
if (ret) {
dev_err(chip->parent, "Failed to hog GPIO\n");
- fwnode_handle_put(fwnode);
return;
}
}
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 78c9d9ed3d68..40f76a90fd7d 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -16,16 +16,15 @@
#include <linux/hte.h>
#include <linux/interrupt.h>
#include <linux/irqreturn.h>
-#include <linux/kernel.h>
#include <linux/kfifo.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/overflow.h>
#include <linux/pinctrl/consumer.h>
#include <linux/poll.h>
-#include <linux/rbtree.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
+#include <linux/string.h>
#include <linux/timekeeping.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
@@ -143,18 +142,22 @@ static int linehandle_validate_flags(u32 flags)
static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp)
{
- assign_bit(FLAG_ACTIVE_LOW, flagsp,
+ unsigned long flags = READ_ONCE(*flagsp);
+
+ assign_bit(FLAG_ACTIVE_LOW, &flags,
lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
- assign_bit(FLAG_OPEN_DRAIN, flagsp,
+ assign_bit(FLAG_OPEN_DRAIN, &flags,
lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
- assign_bit(FLAG_OPEN_SOURCE, flagsp,
+ assign_bit(FLAG_OPEN_SOURCE, &flags,
lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
- assign_bit(FLAG_PULL_UP, flagsp,
+ assign_bit(FLAG_PULL_UP, &flags,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
- assign_bit(FLAG_PULL_DOWN, flagsp,
+ assign_bit(FLAG_PULL_DOWN, &flags,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
- assign_bit(FLAG_BIAS_DISABLE, flagsp,
+ assign_bit(FLAG_BIAS_DISABLE, &flags,
lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
+
+ WRITE_ONCE(*flagsp, flags);
}
static long linehandle_set_config(struct linehandle_state *lh,
@@ -184,11 +187,11 @@ static long linehandle_set_config(struct linehandle_state *lh,
if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
int val = !!gcnf.default_values[i];
- ret = gpiod_direction_output(desc, val);
+ ret = gpiod_direction_output_nonotify(desc, val);
if (ret)
return ret;
} else {
- ret = gpiod_direction_input(desc);
+ ret = gpiod_direction_input_nonotify(desc);
if (ret)
return ret;
}
@@ -359,11 +362,11 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
int val = !!handlereq.default_values[i];
- ret = gpiod_direction_output(desc, val);
+ ret = gpiod_direction_output_nonotify(desc, val);
if (ret)
goto out_free_lh;
} else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
- ret = gpiod_direction_input(desc);
+ ret = gpiod_direction_input_nonotify(desc);
if (ret)
goto out_free_lh;
}
@@ -417,7 +420,6 @@ out_free_lh:
/**
* struct line - contains the state of a requested line
- * @node: to store the object in supinfo_tree if supplemental
* @desc: the GPIO descriptor for this line.
* @req: the corresponding line request
* @irq: the interrupt triggered in response to events on this GPIO
@@ -430,7 +432,6 @@ out_free_lh:
* @line_seqno: the seqno for the current edge event in the sequence of
* events for this line.
* @work: the worker that implements software debouncing
- * @debounce_period_us: the debounce period in microseconds
* @sw_debounced: flag indicating if the software debouncer is active
* @level: the current debounced physical level of the line
* @hdesc: the Hardware Timestamp Engine (HTE) descriptor
@@ -439,7 +440,6 @@ out_free_lh:
* @last_seqno: the last sequence number before debounce period expires
*/
struct line {
- struct rb_node node;
struct gpio_desc *desc;
/*
* -- edge detector specific fields --
@@ -450,7 +450,7 @@ struct line {
* The flags for the active edge detector configuration.
*
* edflags is set by linereq_create(), linereq_free(), and
- * linereq_set_config_unlocked(), which are themselves mutually
+ * linereq_set_config(), which are themselves mutually
* exclusive, and is accessed by edge_irq_thread(),
* process_hw_ts_thread() and debounce_work_func(),
* which can all live with a slightly stale value.
@@ -474,15 +474,6 @@ struct line {
*/
struct delayed_work work;
/*
- * debounce_period_us is accessed by debounce_irq_handler() and
- * process_hw_ts() which are disabled when modified by
- * debounce_setup(), edge_detector_setup() or edge_detector_stop()
- * or can live with a stale version when updated by
- * edge_detector_update().
- * The modifying functions are themselves mutually exclusive.
- */
- unsigned int debounce_period_us;
- /*
* sw_debounce is accessed by linereq_set_config(), which is the
* only setter, and linereq_get_values(), which can live with a
* slightly stale value.
@@ -514,17 +505,6 @@ struct line {
#endif /* CONFIG_HTE */
};
-/*
- * a rbtree of the struct lines containing supplemental info.
- * Used to populate gpio_v2_line_info with cdev specific fields not contained
- * in the struct gpio_desc.
- * A line is determined to contain supplemental information by
- * line_has_supinfo().
- */
-static struct rb_root supinfo_tree = RB_ROOT;
-/* covers supinfo_tree */
-static DEFINE_SPINLOCK(supinfo_lock);
-
/**
* struct linereq - contains the state of a userspace line request
* @gdev: the GPIO device the line request pertains to
@@ -538,8 +518,7 @@ static DEFINE_SPINLOCK(supinfo_lock);
* this line request. Note that this is not used when @num_lines is 1, as
* the line_seqno is then the same and is cheaper to calculate.
* @config_mutex: mutex for serializing ioctl() calls to ensure consistency
- * of configuration, particularly multi-step accesses to desc flags and
- * changes to supinfo status.
+ * of configuration, particularly multi-step accesses to desc flags.
* @lines: the lines held by this line request, with @num_lines elements.
*/
struct linereq {
@@ -555,103 +534,6 @@ struct linereq {
struct line lines[] __counted_by(num_lines);
};
-static void supinfo_insert(struct line *line)
-{
- struct rb_node **new = &(supinfo_tree.rb_node), *parent = NULL;
- struct line *entry;
-
- guard(spinlock)(&supinfo_lock);
-
- while (*new) {
- entry = container_of(*new, struct line, node);
-
- parent = *new;
- if (line->desc < entry->desc) {
- new = &((*new)->rb_left);
- } else if (line->desc > entry->desc) {
- new = &((*new)->rb_right);
- } else {
- /* this should never happen */
- WARN(1, "duplicate line inserted");
- return;
- }
- }
-
- rb_link_node(&line->node, parent, new);
- rb_insert_color(&line->node, &supinfo_tree);
-}
-
-static void supinfo_erase(struct line *line)
-{
- guard(spinlock)(&supinfo_lock);
-
- rb_erase(&line->node, &supinfo_tree);
-}
-
-static struct line *supinfo_find(struct gpio_desc *desc)
-{
- struct rb_node *node = supinfo_tree.rb_node;
- struct line *line;
-
- while (node) {
- line = container_of(node, struct line, node);
- if (desc < line->desc)
- node = node->rb_left;
- else if (desc > line->desc)
- node = node->rb_right;
- else
- return line;
- }
- return NULL;
-}
-
-static void supinfo_to_lineinfo(struct gpio_desc *desc,
- struct gpio_v2_line_info *info)
-{
- struct gpio_v2_line_attribute *attr;
- struct line *line;
-
- guard(spinlock)(&supinfo_lock);
-
- line = supinfo_find(desc);
- if (!line)
- return;
-
- attr = &info->attrs[info->num_attrs];
- attr->id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
- attr->debounce_period_us = READ_ONCE(line->debounce_period_us);
- info->num_attrs++;
-}
-
-static inline bool line_has_supinfo(struct line *line)
-{
- return READ_ONCE(line->debounce_period_us);
-}
-
-/*
- * Checks line_has_supinfo() before and after the change to avoid unnecessary
- * supinfo_tree access.
- * Called indirectly by linereq_create() or linereq_set_config() so line
- * is already protected from concurrent changes.
- */
-static void line_set_debounce_period(struct line *line,
- unsigned int debounce_period_us)
-{
- bool was_suppl = line_has_supinfo(line);
-
- WRITE_ONCE(line->debounce_period_us, debounce_period_us);
-
- /* if supinfo status is unchanged then we're done */
- if (line_has_supinfo(line) == was_suppl)
- return;
-
- /* supinfo status has changed, so update the tree */
- if (was_suppl)
- supinfo_erase(line);
- else
- supinfo_insert(line);
-}
-
#define GPIO_V2_LINE_BIAS_FLAGS \
(GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
@@ -819,7 +701,7 @@ static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
line->total_discard_seq++;
line->last_seqno = ts->seq;
mod_delayed_work(system_wq, &line->work,
- usecs_to_jiffies(READ_ONCE(line->debounce_period_us)));
+ usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
} else {
if (unlikely(ts->seq < line->line_seqno))
return HTE_CB_HANDLED;
@@ -960,7 +842,7 @@ static irqreturn_t debounce_irq_handler(int irq, void *p)
struct line *line = p;
mod_delayed_work(system_wq, &line->work,
- usecs_to_jiffies(READ_ONCE(line->debounce_period_us)));
+ usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
return IRQ_HANDLED;
}
@@ -1040,12 +922,13 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
int ret, level, irq;
char *label;
- /* try hardware */
- ret = gpiod_set_debounce(line->desc, debounce_period_us);
- if (!ret) {
- line_set_debounce_period(line, debounce_period_us);
- return ret;
- }
+ /*
+ * Try hardware. Skip gpiod_set_config() to avoid emitting two
+ * CHANGED_CONFIG line state events.
+ */
+ ret = gpio_do_set_config(line->desc,
+ pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE,
+ debounce_period_us));
if (ret != -ENOTSUPP)
return ret;
@@ -1128,7 +1011,8 @@ static void edge_detector_stop(struct line *line)
cancel_delayed_work_sync(&line->work);
WRITE_ONCE(line->sw_debounced, 0);
WRITE_ONCE(line->edflags, 0);
- line_set_debounce_period(line, 0);
+ if (line->desc)
+ WRITE_ONCE(line->desc->debounce_period_us, 0);
/* do not change line->level - see comment in debounced_value() */
}
@@ -1161,7 +1045,7 @@ static int edge_detector_setup(struct line *line,
ret = debounce_setup(line, debounce_period_us);
if (ret)
return ret;
- line_set_debounce_period(line, debounce_period_us);
+ WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
}
/* detection disabled or sw debouncer will provide edge detection */
@@ -1209,12 +1093,12 @@ static int edge_detector_update(struct line *line,
gpio_v2_line_config_debounce_period(lc, line_idx);
if ((active_edflags == edflags) &&
- (READ_ONCE(line->debounce_period_us) == debounce_period_us))
+ (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
return 0;
/* sw debounced and still will be...*/
if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
- line_set_debounce_period(line, debounce_period_us);
+ WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
/*
* ensure event fifo is initialised if edge detection
* is now enabled.
@@ -1331,7 +1215,7 @@ static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc,
if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX)
return -EINVAL;
- if (memchr_inv(lc->padding, 0, sizeof(lc->padding)))
+ if (!mem_is_zero(lc->padding, sizeof(lc->padding)))
return -EINVAL;
for (i = 0; i < num_lines; i++) {
@@ -1348,38 +1232,42 @@ static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc,
return 0;
}
-static void gpio_v2_line_config_flags_to_desc_flags(u64 flags,
+static void gpio_v2_line_config_flags_to_desc_flags(u64 lflags,
unsigned long *flagsp)
{
- assign_bit(FLAG_ACTIVE_LOW, flagsp,
- flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
+ unsigned long flags = READ_ONCE(*flagsp);
+
+ assign_bit(FLAG_ACTIVE_LOW, &flags,
+ lflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
- if (flags & GPIO_V2_LINE_FLAG_OUTPUT)
- set_bit(FLAG_IS_OUT, flagsp);
- else if (flags & GPIO_V2_LINE_FLAG_INPUT)
- clear_bit(FLAG_IS_OUT, flagsp);
+ if (lflags & GPIO_V2_LINE_FLAG_OUTPUT)
+ set_bit(FLAG_IS_OUT, &flags);
+ else if (lflags & GPIO_V2_LINE_FLAG_INPUT)
+ clear_bit(FLAG_IS_OUT, &flags);
- assign_bit(FLAG_EDGE_RISING, flagsp,
- flags & GPIO_V2_LINE_FLAG_EDGE_RISING);
- assign_bit(FLAG_EDGE_FALLING, flagsp,
- flags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
+ assign_bit(FLAG_EDGE_RISING, &flags,
+ lflags & GPIO_V2_LINE_FLAG_EDGE_RISING);
+ assign_bit(FLAG_EDGE_FALLING, &flags,
+ lflags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
- assign_bit(FLAG_OPEN_DRAIN, flagsp,
- flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
- assign_bit(FLAG_OPEN_SOURCE, flagsp,
- flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
+ assign_bit(FLAG_OPEN_DRAIN, &flags,
+ lflags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
+ assign_bit(FLAG_OPEN_SOURCE, &flags,
+ lflags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
- assign_bit(FLAG_PULL_UP, flagsp,
- flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
- assign_bit(FLAG_PULL_DOWN, flagsp,
- flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
- assign_bit(FLAG_BIAS_DISABLE, flagsp,
- flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
+ assign_bit(FLAG_PULL_UP, &flags,
+ lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
+ assign_bit(FLAG_PULL_DOWN, &flags,
+ lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
+ assign_bit(FLAG_BIAS_DISABLE, &flags,
+ lflags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
- assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp,
- flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
- assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp,
- flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
+ assign_bit(FLAG_EVENT_CLOCK_REALTIME, &flags,
+ lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
+ assign_bit(FLAG_EVENT_CLOCK_HTE, &flags,
+ lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
+
+ WRITE_ONCE(*flagsp, flags);
}
static long linereq_get_values(struct linereq *lr, void __user *ip)
@@ -1546,11 +1434,11 @@ static long linereq_set_config(struct linereq *lr, void __user *ip)
int val = gpio_v2_line_config_output_value(&lc, i);
edge_detector_stop(line);
- ret = gpiod_direction_output(desc, val);
+ ret = gpiod_direction_output_nonotify(desc, val);
if (ret)
return ret;
} else {
- ret = gpiod_direction_input(desc);
+ ret = gpiod_direction_input_nonotify(desc);
if (ret)
return ret;
@@ -1669,7 +1557,6 @@ static ssize_t linereq_read(struct file *file, char __user *buf,
static void linereq_free(struct linereq *lr)
{
- struct line *line;
unsigned int i;
if (lr->device_unregistered_nb.notifier_call)
@@ -1677,14 +1564,10 @@ static void linereq_free(struct linereq *lr)
&lr->device_unregistered_nb);
for (i = 0; i < lr->num_lines; i++) {
- line = &lr->lines[i];
- if (!line->desc)
- continue;
-
- edge_detector_stop(line);
- if (line_has_supinfo(line))
- supinfo_erase(line);
- gpiod_free(line->desc);
+ if (lr->lines[i].desc) {
+ edge_detector_stop(&lr->lines[i]);
+ gpiod_free(lr->lines[i].desc);
+ }
}
kfifo_free(&lr->events);
kfree(lr->label);
@@ -1746,7 +1629,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX))
return -EINVAL;
- if (memchr_inv(ulr.padding, 0, sizeof(ulr.padding)))
+ if (!mem_is_zero(ulr.padding, sizeof(ulr.padding)))
return -EINVAL;
lc = &ulr.config;
@@ -1818,11 +1701,11 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
int val = gpio_v2_line_config_output_value(lc, i);
- ret = gpiod_direction_output(desc, val);
+ ret = gpiod_direction_output_nonotify(desc, val);
if (ret)
goto out_free_linereq;
} else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
- ret = gpiod_direction_input(desc);
+ ret = gpiod_direction_input_nonotify(desc);
if (ret)
goto out_free_linereq;
@@ -2353,8 +2236,9 @@ static void gpio_v2_line_info_changed_to_v1(
#endif /* CONFIG_GPIO_CDEV_V1 */
static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
- struct gpio_v2_line_info *info)
+ struct gpio_v2_line_info *info, bool atomic)
{
+ u32 debounce_period_us;
unsigned long dflags;
const char *label;
@@ -2391,12 +2275,14 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
*/
if (test_bit(FLAG_REQUESTED, &dflags) ||
test_bit(FLAG_IS_HOGGED, &dflags) ||
- test_bit(FLAG_USED_AS_IRQ, &dflags) ||
test_bit(FLAG_EXPORT, &dflags) ||
test_bit(FLAG_SYSFS, &dflags) ||
- !gpiochip_line_is_valid(guard.gc, info->offset) ||
- !pinctrl_gpio_can_use_line(guard.gc, info->offset))
+ !gpiochip_line_is_valid(guard.gc, info->offset)) {
info->flags |= GPIO_V2_LINE_FLAG_USED;
+ } else if (!atomic) {
+ if (!pinctrl_gpio_can_use_line(guard.gc, info->offset))
+ info->flags |= GPIO_V2_LINE_FLAG_USED;
+ }
if (test_bit(FLAG_IS_OUT, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
@@ -2427,6 +2313,14 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
else if (test_bit(FLAG_EVENT_CLOCK_HTE, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
+
+ debounce_period_us = READ_ONCE(desc->debounce_period_us);
+ if (debounce_period_us) {
+ info->attrs[info->num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
+ info->attrs[info->num_attrs].debounce_period_us =
+ debounce_period_us;
+ info->num_attrs++;
+ }
}
struct gpio_chardev_data {
@@ -2439,6 +2333,7 @@ struct gpio_chardev_data {
#ifdef CONFIG_GPIO_CDEV_V1
atomic_t watch_abi_version;
#endif
+ struct file *fp;
};
static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
@@ -2494,7 +2389,7 @@ static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
return -EBUSY;
}
- gpio_desc_to_lineinfo(desc, &lineinfo_v2);
+ gpio_desc_to_lineinfo(desc, &lineinfo_v2, false);
gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
@@ -2516,7 +2411,7 @@ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
return -EFAULT;
- if (memchr_inv(lineinfo.padding, 0, sizeof(lineinfo.padding)))
+ if (!mem_is_zero(lineinfo.padding, sizeof(lineinfo.padding)))
return -EINVAL;
desc = gpio_device_get_desc(cdev->gdev, lineinfo.offset);
@@ -2531,8 +2426,7 @@ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
if (test_and_set_bit(lineinfo.offset, cdev->watched_lines))
return -EBUSY;
}
- gpio_desc_to_lineinfo(desc, &lineinfo);
- supinfo_to_lineinfo(desc, &lineinfo);
+ gpio_desc_to_lineinfo(desc, &lineinfo, false);
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
if (watch)
@@ -2609,29 +2503,86 @@ static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
}
#endif
+struct lineinfo_changed_ctx {
+ struct work_struct work;
+ struct gpio_v2_line_info_changed chg;
+ struct gpio_device *gdev;
+ struct gpio_chardev_data *cdev;
+};
+
+static void lineinfo_changed_func(struct work_struct *work)
+{
+ struct lineinfo_changed_ctx *ctx =
+ container_of(work, struct lineinfo_changed_ctx, work);
+ struct gpio_chip *gc;
+ int ret;
+
+ if (!(ctx->chg.info.flags & GPIO_V2_LINE_FLAG_USED)) {
+ /*
+ * If nobody set the USED flag earlier, let's see with pinctrl
+ * now. We're doing this late because it's a sleeping function.
+ * Pin functions are in general much more static and while it's
+ * not 100% bullet-proof, it's good enough for most cases.
+ */
+ scoped_guard(srcu, &ctx->gdev->srcu) {
+ gc = srcu_dereference(ctx->gdev->chip, &ctx->gdev->srcu);
+ if (gc &&
+ !pinctrl_gpio_can_use_line(gc, ctx->chg.info.offset))
+ ctx->chg.info.flags |= GPIO_V2_LINE_FLAG_USED;
+ }
+ }
+
+ ret = kfifo_in_spinlocked(&ctx->cdev->events, &ctx->chg, 1,
+ &ctx->cdev->wait.lock);
+ if (ret)
+ wake_up_poll(&ctx->cdev->wait, EPOLLIN);
+ else
+ pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
+
+ gpio_device_put(ctx->gdev);
+ fput(ctx->cdev->fp);
+ kfree(ctx);
+}
+
static int lineinfo_changed_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct gpio_chardev_data *cdev =
container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
- struct gpio_v2_line_info_changed chg;
+ struct lineinfo_changed_ctx *ctx;
struct gpio_desc *desc = data;
- int ret;
if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines))
return NOTIFY_DONE;
- memset(&chg, 0, sizeof(chg));
- chg.event_type = action;
- chg.timestamp_ns = ktime_get_ns();
- gpio_desc_to_lineinfo(desc, &chg.info);
- supinfo_to_lineinfo(desc, &chg.info);
+ /*
+ * If this is called from atomic context (for instance: with a spinlock
+ * taken by the atomic notifier chain), any sleeping calls must be done
+ * outside of this function in process context of the dedicated
+ * workqueue.
+ *
+ * Let's gather as much info as possible from the descriptor and
+ * postpone just the call to pinctrl_gpio_can_use_line() until the work
+ * is executed.
+ */
- ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock);
- if (ret)
- wake_up_poll(&cdev->wait, EPOLLIN);
- else
- pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
+ ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx) {
+ pr_err("Failed to allocate memory for line info notification\n");
+ return NOTIFY_DONE;
+ }
+
+ ctx->chg.event_type = action;
+ ctx->chg.timestamp_ns = ktime_get_ns();
+ gpio_desc_to_lineinfo(desc, &ctx->chg.info, true);
+ /* Keep the GPIO device alive until we emit the event. */
+ ctx->gdev = gpio_device_get(desc->gdev);
+ ctx->cdev = cdev;
+ /* Keep the file descriptor alive too. */
+ get_file(ctx->cdev->fp);
+
+ INIT_WORK(&ctx->work, lineinfo_changed_func);
+ queue_work(ctx->gdev->line_state_wq, &ctx->work);
return NOTIFY_OK;
}
@@ -2778,8 +2729,8 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
cdev->gdev = gpio_device_get(gdev);
cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
- ret = blocking_notifier_chain_register(&gdev->line_state_notifier,
- &cdev->lineinfo_changed_nb);
+ ret = atomic_notifier_chain_register(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
if (ret)
goto out_free_bitmap;
@@ -2791,6 +2742,7 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
goto out_unregister_line_notifier;
file->private_data = cdev;
+ cdev->fp = file;
ret = nonseekable_open(inode, file);
if (ret)
@@ -2802,8 +2754,8 @@ out_unregister_device_notifier:
blocking_notifier_chain_unregister(&gdev->device_notifier,
&cdev->device_unregistered_nb);
out_unregister_line_notifier:
- blocking_notifier_chain_unregister(&gdev->line_state_notifier,
- &cdev->lineinfo_changed_nb);
+ atomic_notifier_chain_unregister(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
out_free_bitmap:
gpio_device_put(gdev);
bitmap_free(cdev->watched_lines);
@@ -2827,8 +2779,8 @@ static int gpio_chrdev_release(struct inode *inode, struct file *file)
blocking_notifier_chain_unregister(&gdev->device_notifier,
&cdev->device_unregistered_nb);
- blocking_notifier_chain_unregister(&gdev->line_state_notifier,
- &cdev->lineinfo_changed_nb);
+ atomic_notifier_chain_unregister(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
bitmap_free(cdev->watched_lines);
gpio_device_put(gdev);
kfree(cdev);
@@ -2857,6 +2809,11 @@ int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
gdev->chrdev.owner = THIS_MODULE;
gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id);
+ gdev->line_state_wq = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
+ dev_name(&gdev->dev));
+ if (!gdev->line_state_wq)
+ return -ENOMEM;
+
ret = cdev_device_add(&gdev->chrdev, &gdev->dev);
if (ret)
return ret;
@@ -2873,6 +2830,7 @@ int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
void gpiolib_cdev_unregister(struct gpio_device *gdev)
{
+ destroy_workqueue(gdev->line_state_wq);
cdev_device_del(&gdev->chrdev, &gdev->dev);
blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL);
}
diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
index 28f1046fb670..aeae6df8bec9 100644
--- a/drivers/gpio/gpiolib-legacy.c
+++ b/drivers/gpio/gpiolib-legacy.c
@@ -46,9 +46,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
if (err)
return err;
- if (flags & GPIOF_ACTIVE_LOW)
- set_bit(FLAG_ACTIVE_LOW, &desc->flags);
-
if (flags & GPIOF_IN)
err = gpiod_direction_input(desc);
else
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 880f1efcaca5..2e537ee979f3 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -337,7 +337,7 @@ static void of_gpio_flags_quirks(const struct device_node *np,
* to determine if the flags should have inverted semantics.
*/
if (IS_ENABLED(CONFIG_SPI_MASTER) && !strcmp(propname, "cs-gpios") &&
- of_property_read_bool(np, "cs-gpios")) {
+ of_property_present(np, "cs-gpios")) {
u32 cs;
int ret;
diff --git a/drivers/gpio/gpiolib-swnode.c b/drivers/gpio/gpiolib-swnode.c
index 2b2dd7e92211..f21dbc28cf2c 100644
--- a/drivers/gpio/gpiolib-swnode.c
+++ b/drivers/gpio/gpiolib-swnode.c
@@ -64,7 +64,7 @@ struct gpio_desc *swnode_find_gpio(struct fwnode_handle *fwnode,
struct fwnode_reference_args args;
struct gpio_desc *desc;
char propname[32]; /* 32 is max size of property name */
- int ret;
+ int ret = 0;
swnode = to_software_node(fwnode);
if (!swnode)
@@ -141,7 +141,7 @@ int swnode_gpio_count(const struct fwnode_handle *fwnode, const char *con_id)
const struct software_node swnode_gpio_undefined = {
.name = GPIOLIB_SWNODE_UNDEFINED_NAME,
};
-EXPORT_SYMBOL_NS_GPL(swnode_gpio_undefined, GPIO_SWNODE);
+EXPORT_SYMBOL_NS_GPL(swnode_gpio_undefined, "GPIO_SWNODE");
static int __init swnode_gpio_init(void)
{
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 17ed229412af..1acfa43bf1ab 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -21,6 +21,8 @@
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
+#include <uapi/linux/gpio.h>
+
#include "gpiolib.h"
#include "gpiolib-sysfs.h"
@@ -77,12 +79,10 @@ static ssize_t direction_show(struct device *dev,
struct gpio_desc *desc = data->desc;
int value;
- mutex_lock(&data->mutex);
-
- gpiod_get_direction(desc);
- value = !!test_bit(FLAG_IS_OUT, &desc->flags);
-
- mutex_unlock(&data->mutex);
+ scoped_guard(mutex, &data->mutex) {
+ gpiod_get_direction(desc);
+ value = !!test_bit(FLAG_IS_OUT, &desc->flags);
+ }
return sysfs_emit(buf, "%s\n", value ? "out" : "in");
}
@@ -94,7 +94,7 @@ static ssize_t direction_store(struct device *dev,
struct gpio_desc *desc = data->desc;
ssize_t status;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
if (sysfs_streq(buf, "high"))
status = gpiod_direction_output_raw(desc, 1);
@@ -105,8 +105,6 @@ static ssize_t direction_store(struct device *dev,
else
status = -EINVAL;
- mutex_unlock(&data->mutex);
-
return status ? : size;
}
static DEVICE_ATTR_RW(direction);
@@ -118,11 +116,8 @@ static ssize_t value_show(struct device *dev,
struct gpio_desc *desc = data->desc;
ssize_t status;
- mutex_lock(&data->mutex);
-
- status = gpiod_get_value_cansleep(desc);
-
- mutex_unlock(&data->mutex);
+ scoped_guard(mutex, &data->mutex)
+ status = gpiod_get_value_cansleep(desc);
if (status < 0)
return status;
@@ -140,18 +135,17 @@ static ssize_t value_store(struct device *dev,
status = kstrtol(buf, 0, &value);
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
- if (!test_bit(FLAG_IS_OUT, &desc->flags)) {
- status = -EPERM;
- } else if (status == 0) {
- gpiod_set_value_cansleep(desc, value);
- status = size;
- }
+ if (!test_bit(FLAG_IS_OUT, &desc->flags))
+ return -EPERM;
+
+ if (status)
+ return status;
- mutex_unlock(&data->mutex);
+ gpiod_set_value_cansleep(desc, value);
- return status;
+ return size;
}
static DEVICE_ATTR_PREALLOC(value, S_IWUSR | S_IRUGO, value_show, value_store);
@@ -185,12 +179,16 @@ static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
return -ENODEV;
irq_flags = IRQF_SHARED;
- if (flags & GPIO_IRQF_TRIGGER_FALLING)
+ if (flags & GPIO_IRQF_TRIGGER_FALLING) {
irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
- if (flags & GPIO_IRQF_TRIGGER_RISING)
+ set_bit(FLAG_EDGE_FALLING, &desc->flags);
+ }
+ if (flags & GPIO_IRQF_TRIGGER_RISING) {
irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
+ set_bit(FLAG_EDGE_RISING, &desc->flags);
+ }
/*
* FIXME: This should be done in the irq_request_resources callback
@@ -216,6 +214,8 @@ static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
err_unlock:
gpiochip_unlock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
err_put_kn:
+ clear_bit(FLAG_EDGE_RISING, &desc->flags);
+ clear_bit(FLAG_EDGE_FALLING, &desc->flags);
sysfs_put(data->value_kn);
return ret;
@@ -237,6 +237,8 @@ static void gpio_sysfs_free_irq(struct device *dev)
data->irq_flags = 0;
free_irq(data->irq, data);
gpiochip_unlock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
+ clear_bit(FLAG_EDGE_RISING, &desc->flags);
+ clear_bit(FLAG_EDGE_FALLING, &desc->flags);
sysfs_put(data->value_kn);
}
@@ -253,11 +255,8 @@ static ssize_t edge_show(struct device *dev,
struct gpiod_data *data = dev_get_drvdata(dev);
int flags;
- mutex_lock(&data->mutex);
-
- flags = data->irq_flags;
-
- mutex_unlock(&data->mutex);
+ scoped_guard(mutex, &data->mutex)
+ flags = data->irq_flags;
if (flags >= ARRAY_SIZE(trigger_names))
return 0;
@@ -276,26 +275,24 @@ static ssize_t edge_store(struct device *dev,
if (flags < 0)
return flags;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
- if (flags == data->irq_flags) {
- status = size;
- goto out_unlock;
- }
+ if (flags == data->irq_flags)
+ return size;
if (data->irq_flags)
gpio_sysfs_free_irq(dev);
- if (flags) {
- status = gpio_sysfs_request_irq(dev, flags);
- if (!status)
- status = size;
- }
+ if (!flags)
+ return size;
-out_unlock:
- mutex_unlock(&data->mutex);
+ status = gpio_sysfs_request_irq(dev, flags);
+ if (status)
+ return status;
- return status;
+ gpiod_line_state_notify(data->desc, GPIO_V2_LINE_CHANGED_CONFIG);
+
+ return size;
}
static DEVICE_ATTR_RW(edge);
@@ -320,6 +317,8 @@ static int gpio_sysfs_set_active_low(struct device *dev, int value)
status = gpio_sysfs_request_irq(dev, flags);
}
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
+
return status;
}
@@ -330,11 +329,8 @@ static ssize_t active_low_show(struct device *dev,
struct gpio_desc *desc = data->desc;
int value;
- mutex_lock(&data->mutex);
-
- value = !!test_bit(FLAG_ACTIVE_LOW, &desc->flags);
-
- mutex_unlock(&data->mutex);
+ scoped_guard(mutex, &data->mutex)
+ value = !!test_bit(FLAG_ACTIVE_LOW, &desc->flags);
return sysfs_emit(buf, "%d\n", value);
}
@@ -350,13 +346,9 @@ static ssize_t active_low_store(struct device *dev,
if (status)
return status;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
- status = gpio_sysfs_set_active_low(dev, value);
-
- mutex_unlock(&data->mutex);
-
- return status ? : size;
+ return gpio_sysfs_set_active_low(dev, value) ?: size;
}
static DEVICE_ATTR_RW(active_low);
@@ -463,7 +455,7 @@ static ssize_t export_store(const struct class *class,
desc = gpio_to_desc(gpio);
/* reject invalid GPIOs */
if (!desc) {
- pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
+ pr_debug_ratelimited("%s: invalid GPIO %ld\n", __func__, gpio);
return -EINVAL;
}
@@ -473,7 +465,7 @@ static ssize_t export_store(const struct class *class,
offset = gpio_chip_hwgpio(desc);
if (!gpiochip_line_is_valid(guard.gc, offset)) {
- pr_warn("%s: GPIO %ld masked\n", __func__, gpio);
+ pr_debug_ratelimited("%s: GPIO %ld masked\n", __func__, gpio);
return -EINVAL;
}
@@ -493,10 +485,12 @@ static ssize_t export_store(const struct class *class,
}
status = gpiod_export(desc, true);
- if (status < 0)
+ if (status < 0) {
gpiod_free(desc);
- else
+ } else {
set_bit(FLAG_SYSFS, &desc->flags);
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
+ }
done:
if (status)
@@ -520,7 +514,7 @@ static ssize_t unexport_store(const struct class *class,
desc = gpio_to_desc(gpio);
/* reject bogus commands (gpiod_unexport() ignores them) */
if (!desc) {
- pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
+ pr_debug_ratelimited("%s: invalid GPIO %ld\n", __func__, gpio);
return -EINVAL;
}
@@ -549,12 +543,11 @@ static struct attribute *gpio_class_attrs[] = {
};
ATTRIBUTE_GROUPS(gpio_class);
-static struct class gpio_class = {
+static const struct class gpio_class = {
.name = "gpio",
- .class_groups = gpio_class_groups,
+ .class_groups = gpio_class_groups,
};
-
/**
* gpiod_export - export a GPIO through sysfs
* @desc: GPIO to make available, already requested
@@ -573,11 +566,10 @@ static struct class gpio_class = {
*/
int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
{
- const char *ioname = NULL;
struct gpio_device *gdev;
struct gpiod_data *data;
struct device *dev;
- int status, offset;
+ int status;
/* can't export until sysfs is available ... */
if (!class_is_registered(&gpio_class)) {
@@ -599,24 +591,24 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
gdev = desc->gdev;
- mutex_lock(&sysfs_lock);
+ guard(mutex)(&sysfs_lock);
/* check if chip is being removed */
if (!gdev->mockdev) {
status = -ENODEV;
- goto err_unlock;
+ goto err_clear_bit;
}
if (!test_bit(FLAG_REQUESTED, &desc->flags)) {
gpiod_dbg(desc, "%s: unavailable (not requested)\n", __func__);
status = -EPERM;
- goto err_unlock;
+ goto err_clear_bit;
}
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
status = -ENOMEM;
- goto err_unlock;
+ goto err_clear_bit;
}
data->desc = desc;
@@ -626,26 +618,19 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
else
data->direction_can_change = false;
- offset = gpio_chip_hwgpio(desc);
- if (guard.gc->names && guard.gc->names[offset])
- ioname = guard.gc->names[offset];
-
dev = device_create_with_groups(&gpio_class, &gdev->dev,
MKDEV(0, 0), data, gpio_groups,
- ioname ? ioname : "gpio%u",
- desc_to_gpio(desc));
+ "gpio%u", desc_to_gpio(desc));
if (IS_ERR(dev)) {
status = PTR_ERR(dev);
goto err_free_data;
}
- mutex_unlock(&sysfs_lock);
return 0;
err_free_data:
kfree(data);
-err_unlock:
- mutex_unlock(&sysfs_lock);
+err_clear_bit:
clear_bit(FLAG_EXPORT, &desc->flags);
gpiod_dbg(desc, "%s: status %d\n", __func__, status);
return status;
@@ -709,36 +694,28 @@ void gpiod_unexport(struct gpio_desc *desc)
return;
}
- mutex_lock(&sysfs_lock);
-
- if (!test_bit(FLAG_EXPORT, &desc->flags))
- goto err_unlock;
-
- dev = class_find_device(&gpio_class, NULL, desc, match_export);
- if (!dev)
- goto err_unlock;
-
- data = dev_get_drvdata(dev);
+ scoped_guard(mutex, &sysfs_lock) {
+ if (!test_bit(FLAG_EXPORT, &desc->flags))
+ return;
- clear_bit(FLAG_EXPORT, &desc->flags);
+ dev = class_find_device(&gpio_class, NULL, desc, match_export);
+ if (!dev)
+ return;
- device_unregister(dev);
+ data = dev_get_drvdata(dev);
+ clear_bit(FLAG_EXPORT, &desc->flags);
+ device_unregister(dev);
- /*
- * Release irq after deregistration to prevent race with edge_store.
- */
- if (data->irq_flags)
- gpio_sysfs_free_irq(dev);
-
- mutex_unlock(&sysfs_lock);
+ /*
+ * Release irq after deregistration to prevent race with
+ * edge_store.
+ */
+ if (data->irq_flags)
+ gpio_sysfs_free_irq(dev);
+ }
put_device(dev);
kfree(data);
-
- return;
-
-err_unlock:
- mutex_unlock(&sysfs_lock);
}
EXPORT_SYMBOL_GPL(gpiod_unexport);
@@ -779,9 +756,8 @@ int gpiochip_sysfs_register(struct gpio_device *gdev)
if (IS_ERR(dev))
return PTR_ERR(dev);
- mutex_lock(&sysfs_lock);
+ guard(mutex)(&sysfs_lock);
gdev->mockdev = dev;
- mutex_unlock(&sysfs_lock);
return 0;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index d5952ab7752c..679ed764cb14 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -14,6 +14,7 @@
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdesc.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/lockdep.h>
@@ -23,7 +24,6 @@
#include <linux/pinctrl/consumer.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
#include <linux/srcu.h>
#include <linux/string.h>
@@ -713,6 +713,45 @@ bool gpiochip_line_is_valid(const struct gpio_chip *gc,
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
+static void gpiod_free_irqs(struct gpio_desc *desc)
+{
+ int irq = gpiod_to_irq(desc);
+ struct irq_desc *irqd = irq_to_desc(irq);
+ void *cookie;
+
+ for (;;) {
+ /*
+ * Make sure the action doesn't go away while we're
+ * dereferencing it. Retrieve and store the cookie value.
+ * If the irq is freed after we release the lock, that's
+ * alright - the underlying maple tree lookup will return NULL
+ * and nothing will happen in free_irq().
+ */
+ scoped_guard(mutex, &irqd->request_mutex) {
+ if (!irq_desc_has_action(irqd))
+ return;
+
+ cookie = irqd->action->dev_id;
+ }
+
+ free_irq(irq, cookie);
+ }
+}
+
+/*
+ * The chip is going away but there may be users who had requested interrupts
+ * on its GPIO lines who have no idea about its removal and have no way of
+ * being notified about it. We need to free any interrupts still in use here or
+ * we'll leak memory and resources (like procfs files).
+ */
+static void gpiochip_free_remaining_irqs(struct gpio_chip *gc)
+{
+ struct gpio_desc *desc;
+
+ for_each_gpio_desc_with_flag(gc, desc, FLAG_USED_AS_IRQ)
+ gpiod_free_irqs(desc);
+}
+
static void gpiodev_release(struct device *dev)
{
struct gpio_device *gdev = to_gpio_device(dev);
@@ -986,10 +1025,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
}
}
- for (desc_index = 0; desc_index < gc->ngpio; desc_index++)
- gdev->descs[desc_index].gdev = gdev;
-
- BLOCKING_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
+ ATOMIC_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier);
ret = init_srcu_struct(&gdev->srcu);
@@ -1018,6 +1054,8 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
for (desc_index = 0; desc_index < gc->ngpio; desc_index++) {
struct gpio_desc *desc = &gdev->descs[desc_index];
+ desc->gdev = gdev;
+
if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index)) {
assign_bit(FLAG_IS_OUT,
&desc->flags, !gc->get_direction(gc, desc_index));
@@ -1125,6 +1163,7 @@ void gpiochip_remove(struct gpio_chip *gc)
/* FIXME: should the legacy sysfs handling be moved to gpio_device? */
gpiochip_sysfs_unregister(gdev);
gpiochip_free_hogs(gc);
+ gpiochip_free_remaining_irqs(gc);
scoped_guard(mutex, &gpio_devices_lock)
list_del_rcu(&gdev->list);
@@ -1183,11 +1222,6 @@ struct gpio_device *gpio_device_find(const void *data,
struct gpio_device *gdev;
struct gpio_chip *gc;
- /*
- * Not yet but in the future the spinlock below will become a mutex.
- * Annotate this function before anyone tries to use it in interrupt
- * context like it happened with gpiochip_find().
- */
might_sleep();
guard(srcu)(&gpio_devices_srcu);
@@ -2392,8 +2426,10 @@ static void gpiod_free_commit(struct gpio_desc *desc)
#endif
desc_set_label(desc, NULL);
WRITE_ONCE(desc->flags, flags);
-
- gpiod_line_state_notify(desc, GPIOLINE_CHANGED_RELEASED);
+#ifdef CONFIG_GPIO_CDEV
+ WRITE_ONCE(desc->debounce_period_us, 0);
+#endif
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_RELEASED);
}
}
@@ -2492,6 +2528,8 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
return ERR_PTR(ret);
}
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
+
return desc;
}
EXPORT_SYMBOL_GPL(gpiochip_request_own_desc);
@@ -2520,13 +2558,28 @@ EXPORT_SYMBOL_GPL(gpiochip_free_own_desc);
* rely on gpio_request() having been called beforehand.
*/
-static int gpio_do_set_config(struct gpio_chip *gc, unsigned int offset,
- unsigned long config)
+int gpio_do_set_config(struct gpio_desc *desc, unsigned long config)
{
- if (!gc->set_config)
+ int ret;
+
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return -ENODEV;
+
+ if (!guard.gc->set_config)
return -ENOTSUPP;
- return gc->set_config(gc, offset, config);
+ ret = guard.gc->set_config(guard.gc, gpio_chip_hwgpio(desc), config);
+#ifdef CONFIG_GPIO_CDEV
+ /*
+ * Special case - if we're setting debounce period, we need to store
+ * it in the descriptor in case user-space wants to know it.
+ */
+ if (!ret && pinconf_to_config_param(config) == PIN_CONFIG_INPUT_DEBOUNCE)
+ WRITE_ONCE(desc->debounce_period_us,
+ pinconf_to_config_argument(config));
+#endif
+ return ret;
}
static int gpio_set_config_with_argument(struct gpio_desc *desc,
@@ -2535,12 +2588,8 @@ static int gpio_set_config_with_argument(struct gpio_desc *desc,
{
unsigned long config;
- CLASS(gpio_chip_guard, guard)(desc);
- if (!guard.gc)
- return -ENODEV;
-
config = pinconf_to_config_packed(mode, argument);
- return gpio_do_set_config(guard.gc, gpio_chip_hwgpio(desc), config);
+ return gpio_do_set_config(desc, config);
}
static int gpio_set_config_with_argument_optional(struct gpio_desc *desc,
@@ -2615,9 +2664,15 @@ static int gpio_set_bias(struct gpio_desc *desc)
*/
int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce)
{
- return gpio_set_config_with_argument_optional(desc,
- PIN_CONFIG_INPUT_DEBOUNCE,
- debounce);
+ int ret;
+
+ ret = gpio_set_config_with_argument_optional(desc,
+ PIN_CONFIG_INPUT_DEBOUNCE,
+ debounce);
+ if (!ret)
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
+
+ return ret;
}
/**
@@ -2632,10 +2687,22 @@ int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce)
*/
int gpiod_direction_input(struct gpio_desc *desc)
{
- int ret = 0;
+ int ret;
VALIDATE_DESC(desc);
+ ret = gpiod_direction_input_nonotify(desc);
+ if (ret == 0)
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gpiod_direction_input);
+
+int gpiod_direction_input_nonotify(struct gpio_desc *desc)
+{
+ int ret = 0;
+
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
return -ENODEV;
@@ -2678,7 +2745,6 @@ int gpiod_direction_input(struct gpio_desc *desc)
return ret;
}
-EXPORT_SYMBOL_GPL(gpiod_direction_input);
static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
{
@@ -2740,8 +2806,15 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
*/
int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
+ int ret;
+
VALIDATE_DESC(desc);
- return gpiod_direction_output_raw_commit(desc, value);
+
+ ret = gpiod_direction_output_raw_commit(desc, value);
+ if (ret == 0)
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
@@ -2760,11 +2833,23 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
*/
int gpiod_direction_output(struct gpio_desc *desc, int value)
{
- unsigned long flags;
int ret;
VALIDATE_DESC(desc);
+ ret = gpiod_direction_output_nonotify(desc, value);
+ if (ret == 0)
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gpiod_direction_output);
+
+int gpiod_direction_output_nonotify(struct gpio_desc *desc, int value)
+{
+ unsigned long flags;
+ int ret;
+
flags = READ_ONCE(desc->flags);
if (test_bit(FLAG_ACTIVE_LOW, &flags))
@@ -2788,7 +2873,7 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
goto set_output_value;
/* Emulate open drain by not actively driving the line high */
if (value) {
- ret = gpiod_direction_input(desc);
+ ret = gpiod_direction_input_nonotify(desc);
goto set_output_flag;
}
} else if (test_bit(FLAG_OPEN_SOURCE, &flags)) {
@@ -2797,7 +2882,7 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
goto set_output_value;
/* Emulate open source by not actively driving the line low */
if (!value) {
- ret = gpiod_direction_input(desc);
+ ret = gpiod_direction_input_nonotify(desc);
goto set_output_flag;
}
} else {
@@ -2821,7 +2906,6 @@ set_output_flag:
set_bit(FLAG_IS_OUT, &desc->flags);
return ret;
}
-EXPORT_SYMBOL_GPL(gpiod_direction_output);
/**
* gpiod_enable_hw_timestamp_ns - Enable hardware timestamp in nanoseconds.
@@ -2900,13 +2984,30 @@ EXPORT_SYMBOL_GPL(gpiod_disable_hw_timestamp_ns);
*/
int gpiod_set_config(struct gpio_desc *desc, unsigned long config)
{
+ int ret;
+
VALIDATE_DESC(desc);
- CLASS(gpio_chip_guard, guard)(desc);
- if (!guard.gc)
- return -ENODEV;
+ ret = gpio_do_set_config(desc, config);
+ if (!ret) {
+ /* These are the only options we notify the userspace about. */
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ case PIN_CONFIG_DRIVE_OPEN_SOURCE:
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ gpiod_line_state_notify(desc,
+ GPIO_V2_LINE_CHANGED_CONFIG);
+ break;
+ default:
+ break;
+ }
+ }
- return gpio_do_set_config(guard.gc, gpio_chip_hwgpio(desc), config);
+ return ret;
}
EXPORT_SYMBOL_GPL(gpiod_set_config);
@@ -2973,6 +3074,7 @@ void gpiod_toggle_active_low(struct gpio_desc *desc)
{
VALIDATE_DESC_VOID(desc);
change_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
}
EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
@@ -3617,9 +3719,15 @@ EXPORT_SYMBOL_GPL(gpiod_cansleep);
*/
int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
{
+ int ret;
+
VALIDATE_DESC(desc);
- return desc_set_label(desc, name);
+ ret = desc_set_label(desc, name);
+ if (ret == 0)
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(gpiod_set_consumer_name);
@@ -4047,8 +4155,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_array_value_cansleep);
void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action)
{
- blocking_notifier_call_chain(&desc->gdev->line_state_notifier,
- action, desc);
+ atomic_notifier_call_chain(&desc->gdev->line_state_notifier,
+ action, desc);
}
/**
@@ -4325,7 +4433,7 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
return ERR_PTR(ret);
}
- gpiod_line_state_notify(desc, GPIOLINE_CHANGED_REQUESTED);
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
return desc;
}
@@ -4497,10 +4605,10 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
/* Process flags */
if (dflags & GPIOD_FLAGS_BIT_DIR_OUT)
- ret = gpiod_direction_output(desc,
+ ret = gpiod_direction_output_nonotify(desc,
!!(dflags & GPIOD_FLAGS_BIT_DIR_VAL));
else
- ret = gpiod_direction_input(desc);
+ ret = gpiod_direction_input_nonotify(desc);
return ret;
}
@@ -4926,6 +5034,8 @@ static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos)
return NULL;
s->private = priv;
+ if (*pos > 0)
+ priv->newline = true;
priv->idx = srcu_read_lock(&gpio_devices_srcu);
list_for_each_entry_srcu(gdev, &gpio_devices, list,
@@ -4965,19 +5075,19 @@ static int gpiolib_seq_show(struct seq_file *s, void *v)
struct gpio_chip *gc;
struct device *parent;
+ if (priv->newline)
+ seq_putc(s, '\n');
+
guard(srcu)(&gdev->srcu);
gc = srcu_dereference(gdev->chip, &gdev->srcu);
if (!gc) {
- seq_printf(s, "%s%s: (dangling chip)",
- priv->newline ? "\n" : "",
- dev_name(&gdev->dev));
+ seq_printf(s, "%s: (dangling chip)\n", dev_name(&gdev->dev));
return 0;
}
- seq_printf(s, "%s%s: GPIOs %u-%u", priv->newline ? "\n" : "",
- dev_name(&gdev->dev),
- gdev->base, gdev->base + gdev->ngpio - 1);
+ seq_printf(s, "%s: GPIOs %u-%u", dev_name(&gdev->dev), gdev->base,
+ gdev->base + gdev->ngpio - 1);
parent = gc->parent;
if (parent)
seq_printf(s, ", parent: %s/%s",
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 067197d61d57..83690f72f7e5 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/srcu.h>
+#include <linux/workqueue.h>
#define GPIOCHIP_NAME "gpiochip"
@@ -44,6 +45,8 @@
* @list: links gpio_device:s together for traversal
* @line_state_notifier: used to notify subscribers about lines being
* requested, released or reconfigured
+ * @line_state_wq: used to emit line state events from a separate thread in
+ * process context
* @device_notifier: used to notify character device wait queues about the GPIO
* device being unregistered
* @srcu: protects the pointer to the underlying GPIO chip
@@ -69,7 +72,8 @@ struct gpio_device {
const char *label;
void *data;
struct list_head list;
- struct blocking_notifier_head line_state_notifier;
+ struct atomic_notifier_head line_state_notifier;
+ struct workqueue_struct *line_state_wq;
struct blocking_notifier_head device_notifier;
struct srcu_struct srcu;
@@ -151,6 +155,8 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action);
+int gpiod_direction_output_nonotify(struct gpio_desc *desc, int value);
+int gpiod_direction_input_nonotify(struct gpio_desc *desc);
struct gpio_desc_label {
struct rcu_head rh;
@@ -165,6 +171,7 @@ struct gpio_desc_label {
* @label: Name of the consumer
* @name: Line name
* @hog: Pointer to the device node that hogs this line (if any)
+ * @debounce_period_us: Debounce period in microseconds
*
* These are obtained using gpiod_get() and are preferable to the old
* integer-based handles.
@@ -202,6 +209,10 @@ struct gpio_desc {
#ifdef CONFIG_OF_DYNAMIC
struct device_node *hog;
#endif
+#ifdef CONFIG_GPIO_CDEV
+ /* debounce period in microseconds */
+ unsigned int debounce_period_us;
+#endif
};
#define gpiod_not_found(desc) (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT)
@@ -249,6 +260,7 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
const char *label,
bool platform_lookup_allowed);
+int gpio_do_set_config(struct gpio_desc *desc, unsigned long config);
int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
unsigned long lflags, enum gpiod_flags dflags);
int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 1cb5a4f19293..fbef3f471bd0 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,9 +9,6 @@ menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA
select DRM_PANEL_ORIENTATION_QUIRKS
- select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
- select FB_CORE if DRM_FBDEV_EMULATION
- select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
select HDMI
select I2C
select DMA_SHARED_BUFFER
@@ -102,13 +99,19 @@ config DRM_KUNIT_TEST
config DRM_KMS_HELPER
tristate
depends on DRM
+ select FB_CORE if DRM_FBDEV_EMULATION
help
CRTC helpers for KMS drivers.
+config DRM_DRAW
+ bool
+ depends on DRM
+
config DRM_PANIC
bool "Display a user-friendly message when a kernel panic occurs"
depends on DRM
select FONT_SUPPORT
+ select DRM_DRAW
help
Enable a drm panic handler, which will display a user-friendly message
when a kernel panic occurs. It's useful when using a user-space
@@ -152,6 +155,7 @@ config DRM_PANIC_SCREEN
config DRM_PANIC_SCREEN_QR_CODE
bool "Add a panic screen with a QR code"
depends on DRM_PANIC && RUST
+ select ZLIB_DEFLATE
help
This option adds a QR code generator, and a panic screen with a QR
code. The QR code will contain the last lines of kmsg and other debug
@@ -210,46 +214,16 @@ config DRM_DEBUG_MODESET_LOCK
If in doubt, say "N".
-config DRM_FBDEV_EMULATION
- bool "Enable legacy fbdev support for your modesetting driver"
+config DRM_CLIENT
+ bool
depends on DRM
- select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
- default FB
- help
- Choose this option if you have a need for the legacy fbdev
- support. Note that this support also provides the linux console
- support on top of your modesetting driver.
-
- If in doubt, say "Y".
-
-config DRM_FBDEV_OVERALLOC
- int "Overallocation of the fbdev buffer"
- depends on DRM_FBDEV_EMULATION
- default 100
help
- Defines the fbdev buffer overallocation in percent. Default
- is 100. Typical values for double buffering will be 200,
- triple buffering 300.
+ Enables support for DRM clients. DRM drivers that need
+ struct drm_client_dev and its interfaces should select this
+ option. Drivers that support the default clients should
+ select DRM_CLIENT_SELECTION instead.
-config DRM_FBDEV_LEAK_PHYS_SMEM
- bool "Shamelessly allow leaking of fbdev physical address (DANGEROUS)"
- depends on DRM_FBDEV_EMULATION && EXPERT
- default n
- help
- In order to keep user-space compatibility, we want in certain
- use-cases to keep leaking the fbdev physical address to the
- user-space program handling the fbdev buffer.
- This affects, not only, Amlogic, Allwinner or Rockchip devices
- with ARM Mali GPUs using an userspace Blob.
- This option is not supported by upstream developers and should be
- removed as soon as possible and be considered as a broken and
- legacy behaviour from a modern fbdev device driver.
-
- Please send any bug reports when using this to your proprietary
- software vendor that requires this.
-
- If in doubt, say "N" or spread the word to your closed source
- library vendor.
+source "drivers/gpu/drm/clients/Kconfig"
config DRM_LOAD_EDID_FIRMWARE
bool "Allow to specify an EDID data set instead of probing for it"
@@ -320,19 +294,27 @@ config DRM_TTM_HELPER
tristate
depends on DRM
select DRM_TTM
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
+ select FB_CORE if DRM_FBDEV_EMULATION
+ select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
Helpers for ttm-based gem objects
config DRM_GEM_DMA_HELPER
tristate
depends on DRM
- select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
+ select FB_CORE if DRM_FBDEV_EMULATION
+ select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
Choose this if you need the GEM DMA helper functions
config DRM_GEM_SHMEM_HELPER
tristate
depends on DRM && MMU
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
+ select FB_CORE if DRM_FBDEV_EMULATION
+ select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
Choose this if you need the GEM shmem helper functions
@@ -472,6 +454,7 @@ source "drivers/gpu/drm/imagination/Kconfig"
config DRM_HYPERV
tristate "DRM Support for Hyper-V synthetic video device"
depends on DRM && PCI && MMU && HYPERV
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
help
@@ -485,6 +468,10 @@ config DRM_HYPERV
config DRM_EXPORT_FOR_TESTS
bool
+# Separate option as not all DRM drivers use it
+config DRM_PANEL_BACKLIGHT_QUIRKS
+ tristate
+
config DRM_LIB_RANDOM
bool
default n
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 784229d4504d..19fb370fbc56 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -34,15 +34,12 @@ endif
subdir-ccflags-$(CONFIG_DRM_WERROR) += -Werror
drm-y := \
- drm_aperture.o \
drm_atomic.o \
drm_atomic_uapi.o \
drm_auth.o \
drm_blend.o \
drm_bridge.o \
drm_cache.o \
- drm_client.o \
- drm_client_modeset.o \
drm_color_mgmt.o \
drm_connector.o \
drm_crtc.o \
@@ -68,6 +65,7 @@ drm-y := \
drm_prime.o \
drm_print.o \
drm_property.o \
+ drm_rect.o \
drm_syncobj.o \
drm_sysfs.o \
drm_trace_points.o \
@@ -75,6 +73,10 @@ drm-y := \
drm_vblank_work.o \
drm_vma_manager.o \
drm_writeback.o
+drm-$(CONFIG_DRM_CLIENT) += \
+ drm_client.o \
+ drm_client_event.o \
+ drm_client_modeset.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
@@ -89,10 +91,12 @@ drm-$(CONFIG_DRM_PRIVACY_SCREEN) += \
drm_privacy_screen_x86.o
drm-$(CONFIG_DRM_ACCEL) += ../../accel/drm_accel.o
drm-$(CONFIG_DRM_PANIC) += drm_panic.o
+drm-$(CONFIG_DRM_DRAW) += drm_draw.o
drm-$(CONFIG_DRM_PANIC_SCREEN_QR_CODE) += drm_panic_qr.o
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
+obj-$(CONFIG_DRM_PANEL_BACKLIGHT_QUIRKS) += drm_panel_backlight_quirks.o
#
# Memory-management helpers
@@ -140,7 +144,6 @@ drm_kms_helper-y := \
drm_modeset_helper.o \
drm_plane_helper.o \
drm_probe_helper.o \
- drm_rect.o \
drm_self_refresh_helper.o \
drm_simple_kms_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
@@ -156,6 +159,7 @@ obj-y += tests/
obj-$(CONFIG_DRM_MIPI_DBI) += drm_mipi_dbi.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-y += arm/
+obj-y += clients/
obj-y += display/
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_SCHED) += scheduler/
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 0051fb1b437f..1a11cab741ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -5,7 +5,10 @@ config DRM_AMDGPU
depends on DRM && PCI && MMU
depends on !UML
select FW_LOADER
+ select DRM_CLIENT
+ select DRM_CLIENT_SELECTION
select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_DSC_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HELPER
@@ -23,6 +26,7 @@ config DRM_AMDGPU
select DRM_BUDDY
select DRM_SUBALLOC_HELPER
select DRM_EXEC
+ select DRM_PANEL_BACKLIGHT_QUIRKS
# amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
# ACPI_VIDEO's dependencies must also be selected.
select INPUT if ACPI
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index c7b18c52825d..5b21674b07fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -1,5 +1,5 @@
#
-# Copyright 2017 Advanced Micro Devices, Inc.
+# Copyright 2017-2024 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
@@ -105,7 +105,7 @@ amdgpu-y += \
# add UMC block
amdgpu-y += \
- umc_v6_0.o umc_v6_1.o umc_v6_7.o umc_v8_7.o umc_v8_10.o umc_v12_0.o
+ umc_v6_0.o umc_v6_1.o umc_v6_7.o umc_v8_7.o umc_v8_10.o umc_v12_0.o umc_v8_14.o
# add IH block
amdgpu-y += \
@@ -200,6 +200,7 @@ amdgpu-y += \
vcn_v4_0_3.o \
vcn_v4_0_5.o \
vcn_v5_0_0.o \
+ vcn_v5_0_1.o \
amdgpu_jpeg.o \
jpeg_v1_0.o \
jpeg_v2_0.o \
@@ -208,7 +209,8 @@ amdgpu-y += \
jpeg_v4_0.o \
jpeg_v4_0_3.o \
jpeg_v4_0_5.o \
- jpeg_v5_0_0.o
+ jpeg_v5_0_0.o \
+ jpeg_v5_0_1.o
# add VPE block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index b0f95a7649bf..e13fbd974141 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -85,16 +85,9 @@ static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_SDMA))
continue;
- r = adev->ip_blocks[i].version->funcs->suspend(adev);
-
- if (r) {
- dev_err(adev->dev,
- "suspend of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
-
- adev->ip_blocks[i].status.hw = false;
}
return 0;
@@ -246,7 +239,7 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
dev_err(adev->dev, "Failed to get BIF handle\n");
return -EINVAL;
}
- r = cmn_block->version->funcs->resume(adev);
+ r = amdgpu_ip_block_resume(cmn_block);
if (r)
return r;
@@ -282,15 +275,10 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type ==
AMD_IP_BLOCK_TYPE_SDMA))
continue;
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- dev_err(adev->dev,
- "resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- return r;
- }
- adev->ip_blocks[i].status.hw = true;
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
+ return r;
}
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -304,7 +292,7 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->funcs->late_init) {
r = adev->ip_blocks[i].version->funcs->late_init(
- (void *)adev);
+ &adev->ip_blocks[i]);
if (r) {
dev_err(adev->dev,
"late_init of IP block <%s> failed %d after reset\n",
@@ -342,8 +330,12 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
}
list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ amdgpu_set_init_level(tmp_adev,
+ AMDGPU_INIT_LEVEL_RESET_RECOVERY);
dev_info(tmp_adev->dev,
"GPU reset succeeded, trying to resume\n");
+ /*TBD: Ideally should clear only GFX, SDMA blocks*/
+ amdgpu_ras_clear_err_state(tmp_adev);
r = aldebaran_mode2_restore_ip(tmp_adev);
if (r)
goto end;
@@ -387,6 +379,8 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
tmp_adev);
if (!r) {
+ amdgpu_set_init_level(tmp_adev,
+ AMDGPU_INIT_LEVEL_DEFAULT);
amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
r = amdgpu_ib_ring_tests(tmp_adev);
@@ -417,6 +411,7 @@ static struct amdgpu_reset_handler aldebaran_mode2_handler = {
static struct amdgpu_reset_handler
*aldebaran_rst_handlers[AMDGPU_RESET_MAX_HANDLERS] = {
&aldebaran_mode2_handler,
+ &xgmi_reset_on_init_handler,
};
int aldebaran_reset_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9b1e0ede05a4..69895fccb474 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -118,7 +118,7 @@
#define MAX_GPU_INSTANCE 64
-#define GFX_SLICE_PERIOD msecs_to_jiffies(250)
+#define GFX_SLICE_PERIOD_MS 250
struct amdgpu_gpu_instance {
struct amdgpu_device *adev;
@@ -131,10 +131,6 @@ struct amdgpu_mgpu_info {
uint32_t num_gpu;
uint32_t num_dgpu;
uint32_t num_apu;
-
- /* delayed reset_func for XGMI configuration if necessary */
- struct delayed_work delayed_reset_work;
- bool pending_reset;
};
enum amdgpu_ss {
@@ -303,6 +299,12 @@ extern int amdgpu_wbrf;
#define AMDGPU_RESET_VCE (1 << 13)
#define AMDGPU_RESET_VCE1 (1 << 14)
+/* reset mask */
+#define AMDGPU_RESET_TYPE_FULL (1 << 0) /* full adapter reset, mode1/mode2/BACO/etc. */
+#define AMDGPU_RESET_TYPE_SOFT_RESET (1 << 1) /* IP level soft reset */
+#define AMDGPU_RESET_TYPE_PER_QUEUE (1 << 2) /* per queue */
+#define AMDGPU_RESET_TYPE_PER_PIPE (1 << 3) /* per pipe */
+
/* max cursor sizes (in pixels) */
#define CIK_CURSOR_WIDTH 128
#define CIK_CURSOR_HEIGHT 128
@@ -365,8 +367,11 @@ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags);
int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
-bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
+bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
+int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block);
+
+int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block);
#define AMDGPU_MAX_IP_NUM 16
@@ -389,6 +394,7 @@ struct amdgpu_ip_block_version {
struct amdgpu_ip_block {
struct amdgpu_ip_block_status status;
const struct amdgpu_ip_block_version *version;
+ struct amdgpu_device *adev;
};
int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
@@ -563,6 +569,7 @@ enum amd_reset_method {
AMD_RESET_METHOD_MODE2,
AMD_RESET_METHOD_BACO,
AMD_RESET_METHOD_PCI,
+ AMD_RESET_METHOD_ON_INIT,
};
struct amdgpu_video_codec_info {
@@ -821,6 +828,25 @@ struct amdgpu_mqd {
struct amdgpu_mqd_prop *p);
};
+/*
+ * Custom Init levels could be defined for different situations where a full
+ * initialization of all hardware blocks are not expected. Sample cases are
+ * custom init sequences after resume after S0i3/S3, reset on initialization,
+ * partial reset of blocks etc. Presently, this defines only two levels. Levels
+ * are described in corresponding struct definitions - amdgpu_init_default,
+ * amdgpu_init_minimal_xgmi.
+ */
+enum amdgpu_init_lvl_id {
+ AMDGPU_INIT_LEVEL_DEFAULT,
+ AMDGPU_INIT_LEVEL_MINIMAL_XGMI,
+ AMDGPU_INIT_LEVEL_RESET_RECOVERY,
+};
+
+struct amdgpu_init_level {
+ enum amdgpu_init_lvl_id level;
+ uint32_t hwini_ip_block_mask;
+};
+
#define AMDGPU_RESET_MAGIC_NUM 64
#define AMDGPU_MAX_DF_PERFMONS 4
struct amdgpu_reset_domain;
@@ -854,6 +880,7 @@ struct amdgpu_device {
bool need_swiotlb;
bool accel_working;
struct notifier_block acpi_nb;
+ struct notifier_block pm_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
struct debugfs_blob_wrapper debugfs_vbios_blob;
struct debugfs_blob_wrapper debugfs_discovery_blob;
@@ -1092,8 +1119,6 @@ struct amdgpu_device {
bool in_s3;
bool in_s4;
bool in_s0ix;
- /* indicate amdgpu suspension status */
- bool suspend_complete;
enum pp_mp1_state mp1_state;
struct amdgpu_doorbell_index doorbell_index;
@@ -1150,7 +1175,6 @@ struct amdgpu_device {
struct work_struct reset_work;
- bool job_hang;
bool dc_enabled;
/* Mask of active clusters */
uint32_t aid_mask;
@@ -1166,6 +1190,8 @@ struct amdgpu_device {
bool enforce_isolation[MAX_XCP];
/* Added this mutex for cleaner shader isolation between GFX and compute processes */
struct mutex enforce_isolation_mutex;
+
+ struct amdgpu_init_level *init_lvl;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
@@ -1261,6 +1287,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
int amdgpu_do_asic_reset(struct list_head *device_list_handle,
struct amdgpu_reset_context *reset_context);
+int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context);
+
int emu_soc_asic_init(struct amdgpu_device *adev);
/*
@@ -1443,6 +1471,8 @@ struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev);
struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
struct dma_fence *gang);
bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
+ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring);
+ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
@@ -1450,23 +1480,15 @@ void amdgpu_register_atpx_handler(void);
void amdgpu_unregister_atpx_handler(void);
bool amdgpu_has_atpx_dgpu_power_cntl(void);
bool amdgpu_is_atpx_hybrid(void);
-bool amdgpu_atpx_dgpu_req_power_for_displays(void);
bool amdgpu_has_atpx(void);
#else
static inline void amdgpu_register_atpx_handler(void) {}
static inline void amdgpu_unregister_atpx_handler(void) {}
static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
-static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
static inline bool amdgpu_has_atpx(void) { return false; }
#endif
-#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
-void *amdgpu_atpx_get_dhandle(void);
-#else
-static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
-#endif
-
/*
* KMS
*/
@@ -1619,4 +1641,6 @@ extern const struct attribute_group amdgpu_vram_mgr_attr_group;
extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
extern const struct attribute_group amdgpu_flash_attr_group;
+void amdgpu_set_init_level(struct amdgpu_device *adev,
+ enum amdgpu_init_lvl_id lvl);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index 2ca127173135..9d6345146495 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -158,7 +158,7 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
return -EINVAL;
}
- if (start + count >= max_count)
+ if (start + count > max_count)
return -EINVAL;
count = min_t(int, count, max_count);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
index 5ef6b745f222..f3289d289913 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
@@ -71,6 +71,11 @@ struct ras_query_context;
#define ACA_ERROR_CE_MASK BIT_MASK(ACA_ERROR_TYPE_CE)
#define ACA_ERROR_DEFERRED_MASK BIT_MASK(ACA_ERROR_TYPE_DEFERRED)
+#define mmSMNAID_AID0_MCA_SMU 0x03b30400 /* SMN AID AID0 */
+#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */
+#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
+#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
+
enum aca_reg_idx {
ACA_REG_IDX_CTL = 0,
ACA_REG_IDX_STATUS = 1,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index bf6c4a0d0525..deb0785350e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -98,9 +98,9 @@ enum {
ACP_TILE_DSP2,
};
-static int acp_sw_init(void *handle)
+static int acp_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->acp.parent = adev->dev;
@@ -112,9 +112,9 @@ static int acp_sw_init(void *handle)
return 0;
}
-static int acp_sw_fini(void *handle)
+static int acp_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->acp.cgs_device)
amdgpu_cgs_destroy_device(adev->acp.cgs_device);
@@ -140,7 +140,7 @@ static int acp_poweroff(struct generic_pm_domain *genpd)
* 2. power off the acp tiles
* 3. check and enter ulv state
*/
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
return 0;
}
@@ -157,7 +157,7 @@ static int acp_poweron(struct generic_pm_domain *genpd)
* 2. turn on acp clock
* 3. power on acp tiles
*/
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
return 0;
}
@@ -219,10 +219,10 @@ static const struct dmi_system_id acp_quirk_table[] = {
/**
* acp_hw_init - start and test ACP block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int acp_hw_init(void *handle)
+static int acp_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
u64 acp_base;
@@ -230,19 +230,13 @@ static int acp_hw_init(void *handle)
u32 count = 0;
struct i2s_platform_data *i2s_pdata = NULL;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- const struct amdgpu_ip_block *ip_block =
- amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
-
- if (!ip_block)
- return -EINVAL;
+ struct amdgpu_device *adev = ip_block->adev;
r = amd_acp_hw_init(adev->acp.cgs_device,
ip_block->version->major, ip_block->version->minor);
/* -ENODEV means board uses AZ rather than ACP */
if (r == -ENODEV) {
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
return 0;
} else if (r) {
return r;
@@ -503,18 +497,18 @@ failure:
/**
* acp_hw_fini - stop the hardware block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int acp_hw_fini(void *handle)
+static int acp_hw_fini(struct amdgpu_ip_block *ip_block)
{
u32 val = 0;
u32 count = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* return early if no ACP */
if (!adev->acp.acp_genpd) {
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
return 0;
}
@@ -565,28 +559,23 @@ static int acp_hw_fini(void *handle)
return 0;
}
-static int acp_suspend(void *handle)
+static int acp_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* power up on suspend */
if (!adev->acp.acp_cell)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
return 0;
}
-static int acp_resume(void *handle)
+static int acp_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* power down again on resume */
if (!adev->acp.acp_cell)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
- return 0;
-}
-
-static int acp_early_init(void *handle)
-{
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
return 0;
}
@@ -595,37 +584,25 @@ static bool acp_is_idle(void *handle)
return true;
}
-static int acp_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int acp_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int acp_set_clockgating_state(void *handle,
+static int acp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int acp_set_powergating_state(void *handle,
+static int acp_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable, 0);
return 0;
}
static const struct amd_ip_funcs acp_ip_funcs = {
.name = "acp_ip",
- .early_init = acp_early_init,
- .late_init = NULL,
.sw_init = acp_sw_init,
.sw_fini = acp_sw_fini,
.hw_init = acp_hw_init,
@@ -633,12 +610,8 @@ static const struct amd_ip_funcs acp_ip_funcs = {
.suspend = acp_suspend,
.resume = acp_resume,
.is_idle = acp_is_idle,
- .wait_for_idle = acp_wait_for_idle,
- .soft_reset = acp_soft_reset,
.set_clockgating_state = acp_set_clockgating_state,
.set_powergating_state = acp_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version acp_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index f85ace0384d2..b8d4e07d2043 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -147,6 +147,7 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
struct acpi_buffer *params)
{
acpi_status status;
+ union acpi_object *obj;
union acpi_object atif_arg_elements[2];
struct acpi_object_list atif_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -169,16 +170,24 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
&buffer);
+ obj = (union acpi_object *)buffer.pointer;
- /* Fail only if calling the method fails and ATIF is supported */
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ /* Fail if calling the method fails */
+ if (ACPI_FAILURE(status)) {
DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
acpi_format_exception(status));
- kfree(buffer.pointer);
+ kfree(obj);
return NULL;
}
- return buffer.pointer;
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ DRM_DEBUG_DRIVER("bad object returned from ATIF: %d\n",
+ obj->type);
+ kfree(obj);
+ return NULL;
+ }
+
+ return obj;
}
/**
@@ -791,6 +800,7 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
return -EIO;
}
+ kfree(info);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 4f08b153cb66..2c1b38c5cfc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -368,7 +368,7 @@ void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj)
{
struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj;
- amdgpu_bo_reserve(*bo, true);
+ (void)amdgpu_bo_reserve(*bo, true);
amdgpu_bo_kunmap(*bo);
amdgpu_bo_unpin(*bo);
amdgpu_bo_unreserve(*bo);
@@ -715,8 +715,9 @@ err:
void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
{
enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE;
- if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
- ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) {
+ if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
+ ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) ||
+ (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 12)) {
pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
amdgpu_gfx_off_ctrl(adev, idle);
} else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
@@ -724,7 +725,9 @@ void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
/* Disable GFXOFF and PG. Temporary workaround
* to fix some compute applications issue on GFX9.
*/
- adev->ip_blocks[AMD_IP_BLOCK_TYPE_GFX].version->funcs->set_powergating_state((void *)adev, state);
+ struct amdgpu_ip_block *gfx_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+ if (gfx_block != NULL)
+ gfx_block->version->funcs->set_powergating_state((void *)gfx_block, state);
}
amdgpu_dpm_switch_power_profile(adev,
PP_SMC_POWER_PROFILE_COMPUTE,
@@ -834,6 +837,9 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
+ return 0;
+
ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL);
if (!ring_funcs)
return -ENOMEM;
@@ -858,8 +864,14 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0);
- if (kiq_ring->sched.ready && !adev->job_hang)
- r = amdgpu_ring_test_helper(kiq_ring);
+ /* Submit unmap queue packet */
+ amdgpu_ring_commit(kiq_ring);
+ /*
+ * Ring test will do a basic scratch register change check. Just run
+ * this to ensure that unmap queues that is submitted before got
+ * processed successfully before returning.
+ */
+ r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
@@ -889,3 +901,27 @@ int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id)
return kgd2kfd_start_sched(adev->kfd.dev, node_id);
}
+
+/* check if there are KFD queues active */
+bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id)
+{
+ if (!adev->kfd.init_complete)
+ return false;
+
+ return kgd2kfd_compute_active(adev->kfd.dev, node_id);
+}
+
+/* Config CGTT_SQ_CLK_CTRL */
+int amdgpu_amdkfd_config_sq_perfmon(struct amdgpu_device *adev, uint32_t xcp_id,
+ bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable)
+{
+ int r;
+
+ if (!adev->kfd.init_complete)
+ return 0;
+
+ r = psp_config_sq_perfmon(&adev->psp, xcp_id, core_override_enable,
+ reg_override_enable, perfmon_override_enable);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index f9d119448442..8af67f18500a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -266,6 +266,10 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
u32 inst);
int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id);
int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id);
+int amdgpu_amdkfd_config_sq_perfmon(struct amdgpu_device *adev, uint32_t xcp_id,
+ bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable);
+bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id);
+
/* Read user wptr from a specified user address space with page fault
* disabled. The memory must be pinned and mapped to the hardware when
@@ -428,6 +432,10 @@ int kgd2kfd_check_and_lock_kfd(void);
void kgd2kfd_unlock_kfd(void);
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
+bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
+bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
+ bool retry_fault);
+
#else
static inline int kgd2kfd_init(void)
{
@@ -508,5 +516,17 @@ static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
return 0;
}
+
+static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
+{
+ return false;
+}
+
+static inline bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
+ bool retry_fault)
+{
+ return false;
+}
+
#endif
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 9435af2e6bdc..9abf29b58ac7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -299,7 +299,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus
if (r)
goto out;
} else {
- drm_sched_start(&ring->sched);
+ drm_sched_start(&ring->sched, 0);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 3bc0cbf45bc5..441568163e20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -944,9 +944,7 @@ static void unlock_spi_csq_mutexes(struct amdgpu_device *adev)
*
* @adev: Handle of device whose registers are to be read
* @queue_idx: Index of queue in the queue-map bit-field
- * @wave_cnt: Output parameter updated with number of waves in flight
- * @vmid: Output parameter updated with VMID of queue whose wave count
- * is being collected
+ * @queue_cnt: Stores the wave count and doorbell offset for an active queue
* @inst: xcc's instance number on a multi-XCC setup
*/
static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
@@ -1133,8 +1131,7 @@ uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
uint32_t low, high;
uint64_t queue_addr = 0;
- if (!adev->debug_exp_resets &&
- !adev->gfx.num_gfx_rings)
+ if (!amdgpu_gpu_recovery)
return 0;
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
@@ -1185,6 +1182,9 @@ uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
uint32_t low, high, pipe_reset_data = 0;
uint64_t queue_addr = 0;
+ if (!amdgpu_gpu_recovery)
+ return 0;
+
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index ce5ca304dba9..1e998f972c30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -730,7 +730,7 @@ kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
return;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
sg_free_table(ttm->sg);
@@ -779,7 +779,7 @@ kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
}
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
@@ -989,7 +989,7 @@ unwind:
if (!attachment[i])
continue;
if (attachment[i]->bo_va) {
- amdgpu_bo_reserve(bo[i], true);
+ (void)amdgpu_bo_reserve(bo[i], true);
if (--attachment[i]->bo_va->ref_count == 0)
amdgpu_vm_bo_del(adev, attachment[i]->bo_va);
amdgpu_bo_unreserve(bo[i]);
@@ -1259,11 +1259,11 @@ static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
return -EBUSY;
}
- amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
+ (void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
- amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
+ (void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
- amdgpu_sync_fence(sync, bo_va->last_pt_update);
+ (void)amdgpu_sync_fence(sync, bo_va->last_pt_update);
return 0;
}
@@ -1439,8 +1439,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
list_add_tail(&vm->vm_list_node,
&(vm->process_info->vm_list_head));
vm->process_info->n_vms++;
-
- *ef = dma_fence_get(&vm->process_info->eviction_fence->base);
+ if (ef)
+ *ef = dma_fence_get(&vm->process_info->eviction_fence->base);
mutex_unlock(&vm->process_info->lock);
return 0;
@@ -2352,7 +2352,7 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
{
struct amdgpu_bo *bo = mem->bo;
- amdgpu_bo_reserve(bo, true);
+ (void)amdgpu_bo_reserve(bo, true);
amdgpu_bo_kunmap(bo);
amdgpu_bo_unpin(bo);
amdgpu_bo_unreserve(bo);
@@ -2524,11 +2524,14 @@ int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
/* First eviction, stop the queues */
r = kgd2kfd_quiesce_mm(mni->mm,
KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
- if (r)
+
+ if (r && r != -ESRCH)
pr_err("Failed to quiesce KFD\n");
- queue_delayed_work(system_freezable_wq,
- &process_info->restore_userptr_work,
- msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
+
+ if (r != -ESRCH)
+ queue_delayed_work(system_freezable_wq,
+ &process_info->restore_userptr_work,
+ msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
}
mutex_unlock(&process_info->notifier_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 0c8975ac5af9..093141ad6ed0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1145,8 +1145,8 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
return 0;
}
-void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
- u32 eng_clock, u32 mem_clock)
+int amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
+ u32 eng_clock, u32 mem_clock)
{
SET_ENGINE_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
@@ -1161,8 +1161,8 @@ void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
if (mem_clock)
args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
- sizeof(args));
+ return amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 0811474e8fd3..0e16432d9a72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -163,8 +163,8 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
bool strobe_mode,
struct atom_mpll_param *mpll_param);
-void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
- u32 eng_clock, u32 mem_clock);
+int amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
+ u32 eng_clock, u32 mem_clock);
bool
amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 375f02002579..3893e6fc2f03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -89,18 +89,6 @@ bool amdgpu_is_atpx_hybrid(void)
return amdgpu_atpx_priv.atpx.is_hybrid;
}
-bool amdgpu_atpx_dgpu_req_power_for_displays(void)
-{
- return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
-}
-
-#if defined(CONFIG_ACPI)
-void *amdgpu_atpx_get_dhandle(void)
-{
- return amdgpu_atpx_priv.dhandle;
-}
-#endif
-
/**
* amdgpu_atpx_call - call an ATPX method
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 45affc02548c..423fd2eebe1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -47,35 +47,37 @@
/* Check if current bios is an ATOM BIOS.
* Return true if it is ATOM BIOS. Otherwise, return false.
*/
-static bool check_atom_bios(uint8_t *bios, size_t size)
+static bool check_atom_bios(struct amdgpu_device *adev, size_t size)
{
uint16_t tmp, bios_header_start;
+ uint8_t *bios = adev->bios;
if (!bios || size < 0x49) {
- DRM_INFO("vbios mem is null or mem size is wrong\n");
+ dev_dbg(adev->dev, "VBIOS mem is null or mem size is wrong\n");
return false;
}
if (!AMD_IS_VALID_VBIOS(bios)) {
- DRM_INFO("BIOS signature incorrect %x %x\n", bios[0], bios[1]);
+ dev_dbg(adev->dev, "VBIOS signature incorrect %x %x\n", bios[0],
+ bios[1]);
return false;
}
bios_header_start = bios[0x48] | (bios[0x49] << 8);
if (!bios_header_start) {
- DRM_INFO("Can't locate bios header\n");
+ dev_dbg(adev->dev, "Can't locate VBIOS header\n");
return false;
}
tmp = bios_header_start + 4;
if (size < tmp) {
- DRM_INFO("BIOS header is broken\n");
+ dev_dbg(adev->dev, "VBIOS header is broken\n");
return false;
}
if (!memcmp(bios + tmp, "ATOM", 4) ||
!memcmp(bios + tmp, "MOTA", 4)) {
- DRM_DEBUG("ATOMBIOS detected\n");
+ dev_dbg(adev->dev, "ATOMBIOS detected\n");
return true;
}
@@ -118,7 +120,7 @@ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
memcpy_fromio(adev->bios, bios, size);
iounmap(bios);
- if (!check_atom_bios(adev->bios, size)) {
+ if (!check_atom_bios(adev, size)) {
kfree(adev->bios);
return false;
}
@@ -146,7 +148,7 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
memcpy_fromio(adev->bios, bios, size);
pci_unmap_rom(adev->pdev, bios);
- if (!check_atom_bios(adev->bios, size)) {
+ if (!check_atom_bios(adev, size)) {
kfree(adev->bios);
return false;
}
@@ -186,7 +188,7 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
/* read complete BIOS */
amdgpu_asic_read_bios_from_rom(adev, adev->bios, len);
- if (!check_atom_bios(adev->bios, len)) {
+ if (!check_atom_bios(adev, len)) {
kfree(adev->bios);
return false;
}
@@ -216,7 +218,7 @@ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
memcpy_fromio(adev->bios, bios, romlen);
iounmap(bios);
- if (!check_atom_bios(adev->bios, romlen))
+ if (!check_atom_bios(adev, romlen))
goto free_bios;
adev->bios_size = romlen;
@@ -324,7 +326,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
break;
}
- if (!check_atom_bios(adev->bios, size)) {
+ if (!check_atom_bios(adev, size)) {
kfree(adev->bios);
return false;
}
@@ -389,7 +391,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
vhdr->ImageLength,
GFP_KERNEL);
- if (!check_atom_bios(adev->bios, vhdr->ImageLength)) {
+ if (!check_atom_bios(adev, vhdr->ImageLength)) {
kfree(adev->bios);
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 16153d275d7a..68bce6a6d09d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -414,7 +414,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
return -EINVAL;
}
- err = amdgpu_ucode_request(adev, &adev->pm.fw, "%s", fw_name);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw,
+ AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name);
if (err) {
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
amdgpu_ucode_release(&adev->pm.fw);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 1e475eb01417..5cc5f59e3018 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -265,7 +265,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
/* Only a single BO list is allowed to simplify handling. */
if (p->bo_list)
- ret = -EINVAL;
+ goto free_partial_kdata;
ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
if (ret)
@@ -1105,7 +1105,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
* We can't use gang submit on with reserved VMIDs when the VM changes
* can't be invalidated by more than one engine at the same time.
*/
- if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) {
+ if (p->gang_size > 1 && !adev->vm_manager.concurrent_flush) {
for (i = 0; i < p->gang_size; ++i) {
struct drm_sched_entity *entity = p->entities[i];
struct drm_gpu_scheduler *sched = entity->rq->sched;
@@ -1189,7 +1189,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (!bo)
continue;
- amdgpu_vm_bo_invalidate(adev, bo, false);
+ amdgpu_vm_bo_invalidate(bo, false);
}
}
@@ -1801,13 +1801,18 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
return -EINVAL;
+ /* Make sure VRAM is allocated contigiously */
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
- for (i = 0; i < (*bo)->placement.num_placement; i++)
- (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
- r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
- if (r)
- return r;
+ if ((*bo)->tbo.resource->mem_type == TTM_PL_VRAM &&
+ !((*bo)->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) {
+
+ amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
+ for (i = 0; i < (*bo)->placement.num_placement; i++)
+ (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
+ r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
+ if (r)
+ return r;
+ }
return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index cbef720de779..49ca8c814455 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -402,7 +402,7 @@ static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, siz
int r;
uint32_t *data, x;
- if (size & 0x3 || *pos & 0x3)
+ if (size > 4096 || size & 0x3 || *pos & 0x3)
return -EINVAL;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
@@ -1648,7 +1648,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
ent = debugfs_create_file(debugfs_regs_names[i],
- S_IFREG | 0444, root,
+ S_IFREG | 0400, root,
adev, debugfs_regs[i]);
if (!i && !IS_ERR_OR_NULL(ent))
i_size_write(ent->d_inode, adev->rmmio_size);
@@ -2095,16 +2095,22 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
if (amdgpu_umsch_mm & amdgpu_umsch_mm_fwlog)
amdgpu_debugfs_umsch_fwlog_init(adev, &adev->umsch_mm);
+ amdgpu_debugfs_vcn_sched_mask_init(adev);
+ amdgpu_debugfs_jpeg_sched_mask_init(adev);
+ amdgpu_debugfs_gfx_sched_mask_init(adev);
+ amdgpu_debugfs_compute_sched_mask_init(adev);
+ amdgpu_debugfs_sdma_sched_mask_init(adev);
+
amdgpu_ras_debugfs_create_all(adev);
amdgpu_rap_debugfs_init(adev);
amdgpu_securedisplay_debugfs_init(adev);
amdgpu_fw_attestation_debugfs_init(adev);
- debugfs_create_file("amdgpu_evict_vram", 0444, root, adev,
+ debugfs_create_file("amdgpu_evict_vram", 0400, root, adev,
&amdgpu_evict_vram_fops);
- debugfs_create_file("amdgpu_evict_gtt", 0444, root, adev,
+ debugfs_create_file("amdgpu_evict_gtt", 0400, root, adev,
&amdgpu_evict_gtt_fops);
- debugfs_create_file("amdgpu_test_ib", 0444, root, adev,
+ debugfs_create_file("amdgpu_test_ib", 0400, root, adev,
&amdgpu_debugfs_test_ib_fops);
debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
&amdgpu_debugfs_vm_info_fops);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index 5ac59b62020c..824f9da5b6ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -203,6 +203,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
struct amdgpu_coredump_info *coredump = data;
struct drm_print_iterator iter;
struct amdgpu_vm_fault_info *fault_info;
+ struct amdgpu_ip_block *ip_block;
int ver;
iter.data = buffer;
@@ -282,13 +283,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
/* dump the ip state for each ip */
drm_printf(&p, "IP Dump\n");
for (int i = 0; i < coredump->adev->num_ip_blocks; i++) {
- if (coredump->adev->ip_blocks[i].version->funcs->print_ip_state) {
- drm_printf(&p, "IP: %s\n",
- coredump->adev->ip_blocks[i]
- .version->funcs->name);
- coredump->adev->ip_blocks[i]
- .version->funcs->print_ip_state(
- (void *)coredump->adev, &p);
+ ip_block = &coredump->adev->ip_blocks[i];
+ if (ip_block->version->funcs->print_ip_state) {
+ drm_printf(&p, "IP: %s\n", ip_block->version->funcs->name);
+ ip_block->version->funcs->print_ip_state(ip_block, &p);
drm_printf(&p, "\n");
}
}
@@ -345,11 +343,10 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
coredump->skip_vram_check = skip_vram_check;
coredump->reset_vram_lost = vram_lost;
- if (job && job->vm) {
- struct amdgpu_vm *vm = job->vm;
+ if (job && job->pasid) {
struct amdgpu_task_info *ti;
- ti = amdgpu_vm_get_task_info_vm(vm);
+ ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
if (ti) {
coredump->reset_task_info = *ti;
amdgpu_vm_put_task_info(ti);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c2394c8b4d6b..d100bb7a137c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -25,6 +25,8 @@
* Alex Deucher
* Jerome Glisse
*/
+
+#include <linux/aperture.h>
#include <linux/power_supply.h>
#include <linux/kthread.h>
#include <linux/module.h>
@@ -35,10 +37,9 @@
#include <linux/pci-p2pdma.h>
#include <linux/apple-gmux.h>
-#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_client_event.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include <linux/device.h>
@@ -144,15 +145,70 @@ const char *amdgpu_asic_name[] = {
"LAST",
};
+#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMD_IP_BLOCK_TYPE_NUM - 1, 0)
+/*
+ * Default init level where all blocks are expected to be initialized. This is
+ * the level of initialization expected by default and also after a full reset
+ * of the device.
+ */
+struct amdgpu_init_level amdgpu_init_default = {
+ .level = AMDGPU_INIT_LEVEL_DEFAULT,
+ .hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
+};
+
+struct amdgpu_init_level amdgpu_init_recovery = {
+ .level = AMDGPU_INIT_LEVEL_RESET_RECOVERY,
+ .hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
+};
+
+/*
+ * Minimal blocks needed to be initialized before a XGMI hive can be reset. This
+ * is used for cases like reset on initialization where the entire hive needs to
+ * be reset before first use.
+ */
+struct amdgpu_init_level amdgpu_init_minimal_xgmi = {
+ .level = AMDGPU_INIT_LEVEL_MINIMAL_XGMI,
+ .hwini_ip_block_mask =
+ BIT(AMD_IP_BLOCK_TYPE_GMC) | BIT(AMD_IP_BLOCK_TYPE_SMC) |
+ BIT(AMD_IP_BLOCK_TYPE_COMMON) | BIT(AMD_IP_BLOCK_TYPE_IH) |
+ BIT(AMD_IP_BLOCK_TYPE_PSP)
+};
+
+static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev,
+ enum amd_ip_block_type block)
+{
+ return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0;
+}
+
+void amdgpu_set_init_level(struct amdgpu_device *adev,
+ enum amdgpu_init_lvl_id lvl)
+{
+ switch (lvl) {
+ case AMDGPU_INIT_LEVEL_MINIMAL_XGMI:
+ adev->init_lvl = &amdgpu_init_minimal_xgmi;
+ break;
+ case AMDGPU_INIT_LEVEL_RESET_RECOVERY:
+ adev->init_lvl = &amdgpu_init_recovery;
+ break;
+ case AMDGPU_INIT_LEVEL_DEFAULT:
+ fallthrough;
+ default:
+ adev->init_lvl = &amdgpu_init_default;
+ break;
+ }
+}
+
static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev);
+static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
+ void *data);
/**
* DOC: pcie_replay_count
*
* The amdgpu driver provides a sysfs API for reporting the total number
- * of PCIe replays (NAKs)
+ * of PCIe replays (NAKs).
* The file pcie_replay_count is used for this and returns the total
- * number of replays as a sum of the NAKs generated and NAKs received
+ * number of replays as a sum of the NAKs generated and NAKs received.
*/
static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
@@ -227,6 +283,42 @@ void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
}
+int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block)
+{
+ int r;
+
+ if (ip_block->version->funcs->suspend) {
+ r = ip_block->version->funcs->suspend(ip_block);
+ if (r) {
+ dev_err(ip_block->adev->dev,
+ "suspend of IP block <%s> failed %d\n",
+ ip_block->version->funcs->name, r);
+ return r;
+ }
+ }
+
+ ip_block->status.hw = false;
+ return 0;
+}
+
+int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block)
+{
+ int r;
+
+ if (ip_block->version->funcs->resume) {
+ r = ip_block->version->funcs->resume(ip_block);
+ if (r) {
+ dev_err(ip_block->adev->dev,
+ "resume of IP block <%s> failed %d\n",
+ ip_block->version->funcs->name, r);
+ return r;
+ }
+ }
+
+ ip_block->status.hw = true;
+ return 0;
+}
+
/**
* DOC: board_info
*
@@ -327,6 +419,9 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
+ if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
+ return false;
+
if (adev->has_pr3 ||
((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
return true;
@@ -339,8 +434,8 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
* @dev: drm_device pointer
*
* Return:
- * 1 if the device supporte BACO;
- * 3 if the device support MACO (only works if BACO is supported)
+ * 1 if the device supports BACO;
+ * 3 if the device supports MACO (only works if BACO is supported)
* otherwise return 0.
*/
int amdgpu_device_supports_baco(struct drm_device *dev)
@@ -487,7 +582,7 @@ void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
}
/**
- * amdgpu_device_aper_access - access vram by vram aperature
+ * amdgpu_device_aper_access - access vram by vram aperture
*
* @adev: amdgpu_device pointer
* @pos: offset of the buffer in vram
@@ -578,7 +673,7 @@ bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
* here is that the GPU reset is not running on another thread in parallel.
*
* For this we trylock the read side of the reset semaphore, if that succeeds
- * we know that the reset is not running in paralell.
+ * we know that the reset is not running in parallel.
*
* If the trylock fails we assert that we are either already holding the read
* side of the lock or are the reset thread itself and hold the write side of
@@ -1309,6 +1404,7 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
amdgpu_psp_wait_for_bootloader(adev);
ret = amdgpu_atomfirmware_asic_init(adev, true);
@@ -1643,7 +1739,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
uint32_t fw_ver;
err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
- /* force vPost if error occured */
+ /* force vPost if error occurred */
if (err)
return true;
@@ -1655,7 +1751,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
}
/* Don't post if we need to reset whole hive on init */
- if (adev->gmc.xgmi.pending_reset)
+ if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
return false;
if (adev->has_hw_reset) {
@@ -2075,7 +2171,7 @@ int amdgpu_device_ip_set_clockgating_state(void *dev,
if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
continue;
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
- (void *)adev, state);
+ &adev->ip_blocks[i], state);
if (r)
DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
@@ -2109,7 +2205,7 @@ int amdgpu_device_ip_set_powergating_state(void *dev,
if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
continue;
r = adev->ip_blocks[i].version->funcs->set_powergating_state(
- (void *)adev, state);
+ &adev->ip_blocks[i], state);
if (r)
DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
@@ -2159,9 +2255,12 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->type == block_type) {
- r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
- if (r)
- return r;
+ if (adev->ip_blocks[i].version->funcs->wait_for_idle) {
+ r = adev->ip_blocks[i].version->funcs->wait_for_idle(
+ &adev->ip_blocks[i]);
+ if (r)
+ return r;
+ }
break;
}
}
@@ -2170,26 +2269,24 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
}
/**
- * amdgpu_device_ip_is_idle - is the hardware IP idle
+ * amdgpu_device_ip_is_valid - is the hardware IP enabled
*
* @adev: amdgpu_device pointer
* @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
*
- * Check if the hardware IP is idle or not.
- * Returns true if it the IP is idle, false if not.
+ * Check if the hardware IP is enable or not.
+ * Returns true if it the IP is enable, false if not.
*/
-bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type)
+bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
{
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.valid)
- continue;
if (adev->ip_blocks[i].version->type == block_type)
- return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
+ return adev->ip_blocks[i].status.valid;
}
- return true;
+ return false;
}
@@ -2268,8 +2365,10 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
break;
}
- DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
- ip_block_version->funcs->name);
+ dev_info(adev->dev, "detected ip block number %d <%s>\n",
+ adev->num_ip_blocks, ip_block_version->funcs->name);
+
+ adev->ip_blocks[adev->num_ip_blocks].adev = adev;
adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
@@ -2285,7 +2384,7 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
* the module parameter virtual_display. This feature provides a virtual
* display hardware on headless boards or in virtualized environments.
* This function parses and validates the configuration string specified by
- * the user and configues the virtual display configuration (number of
+ * the user and configures the virtual display configuration (number of
* virtual connectors, crtcs, etc.) specified.
*/
static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
@@ -2348,7 +2447,7 @@ void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
*
* Parses the asic configuration parameters specified in the gpu info
- * firmware and makes them availale to the driver for use in configuring
+ * firmware and makes them available to the driver for use in configuring
* the asic.
* Returns 0 on success, -EINVAL on failure.
*/
@@ -2389,6 +2488,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_gpu_info.bin", chip_name);
if (err) {
dev_err(adev->dev,
@@ -2408,7 +2508,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
/*
- * Should be droped when DAL no longer needs it.
+ * Should be dropped when DAL no longer needs it.
*/
if (adev->asic_type == CHIP_NAVI12)
goto parse_soc_bounding_box;
@@ -2566,25 +2666,25 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
total = true;
for (i = 0; i < adev->num_ip_blocks; i++) {
+ ip_block = &adev->ip_blocks[i];
+
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_WARN("disabled ip block: %d <%s>\n",
i, adev->ip_blocks[i].version->funcs->name);
adev->ip_blocks[i].status.valid = false;
- } else {
- if (adev->ip_blocks[i].version->funcs->early_init) {
- r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
- if (r == -ENOENT) {
- adev->ip_blocks[i].status.valid = false;
- } else if (r) {
- DRM_ERROR("early_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- total = false;
- } else {
- adev->ip_blocks[i].status.valid = true;
- }
+ } else if (ip_block->version->funcs->early_init) {
+ r = ip_block->version->funcs->early_init(ip_block);
+ if (r == -ENOENT) {
+ adev->ip_blocks[i].status.valid = false;
+ } else if (r) {
+ DRM_ERROR("early_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ total = false;
} else {
adev->ip_blocks[i].status.valid = true;
}
+ } else {
+ adev->ip_blocks[i].status.valid = true;
}
/* get the vbios after the asic_funcs are set up */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
@@ -2633,10 +2733,13 @@ static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].status.hw)
continue;
+ if (!amdgpu_ip_member_of_hwini(
+ adev, adev->ip_blocks[i].version->type))
+ continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
(amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
- r = adev->ip_blocks[i].version->funcs->hw_init(adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
@@ -2658,7 +2761,10 @@ static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].status.hw)
continue;
- r = adev->ip_blocks[i].version->funcs->hw_init(adev);
+ if (!amdgpu_ip_member_of_hwini(
+ adev, adev->ip_blocks[i].version->type))
+ continue;
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
@@ -2681,6 +2787,10 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
continue;
+ if (!amdgpu_ip_member_of_hwini(adev,
+ AMD_IP_BLOCK_TYPE_PSP))
+ break;
+
if (!adev->ip_blocks[i].status.sw)
continue;
@@ -2689,22 +2799,18 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
break;
if (amdgpu_in_reset(adev) || adev->in_suspend) {
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- DRM_ERROR("resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
} else {
- r = adev->ip_blocks[i].version->funcs->hw_init(adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
+ adev->ip_blocks[i].status.hw = true;
}
-
- adev->ip_blocks[i].status.hw = true;
break;
}
}
@@ -2786,6 +2892,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
*/
static int amdgpu_device_ip_init(struct amdgpu_device *adev)
{
+ bool init_badpage;
int i, r;
r = amdgpu_ras_init(adev);
@@ -2795,17 +2902,23 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
- r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
- if (r) {
- DRM_ERROR("sw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- goto init_failed;
+ if (adev->ip_blocks[i].version->funcs->sw_init) {
+ r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]);
+ if (r) {
+ DRM_ERROR("sw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ goto init_failed;
+ }
}
adev->ip_blocks[i].status.sw = true;
+ if (!amdgpu_ip_member_of_hwini(
+ adev, adev->ip_blocks[i].version->type))
+ continue;
+
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
/* need to do common hw init early so everything is set up for gmc */
- r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r);
goto init_failed;
@@ -2822,7 +2935,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
goto init_failed;
}
- r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r);
goto init_failed;
@@ -2895,7 +3008,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
* Note: theoretically, this should be called before all vram allocations
* to protect retired page from abusing
*/
- r = amdgpu_ras_recovery_init(adev);
+ init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
+ r = amdgpu_ras_recovery_init(adev, init_badpage);
if (r)
goto init_failed;
@@ -2935,7 +3049,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
/* Don't init kfd if whole hive need to be reset during init */
- if (!adev->gmc.xgmi.pending_reset) {
+ if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
kgd2kfd_init_zone_device(adev);
amdgpu_amdkfd_device_init(adev);
}
@@ -2954,7 +3068,7 @@ init_failed:
*
* Writes a reset magic value to the gart pointer in VRAM. The driver calls
* this function before a GPU reset. If the value is retained after a
- * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
+ * GPU reset, VRAM has not been lost. Some GPU resets may destroy VRAM contents.
*/
static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
{
@@ -3030,7 +3144,7 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_clockgating_state) {
/* enable clockgating to save power */
- r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i],
state);
if (r) {
DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
@@ -3067,7 +3181,7 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_powergating_state) {
/* enable powergating to save power */
- r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i],
state);
if (r) {
DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
@@ -3135,7 +3249,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].version->funcs->late_init) {
- r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]);
if (r) {
DRM_ERROR("late_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
@@ -3151,7 +3265,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
return r;
}
- if (!amdgpu_in_reset(adev))
+ if (!amdgpu_reset_in_recovery(adev))
amdgpu_ras_set_error_query_ready(adev, true);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
@@ -3206,6 +3320,25 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
return 0;
}
+static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ int r;
+
+ if (!ip_block->version->funcs->hw_fini) {
+ DRM_ERROR("hw_fini of IP block <%s> not defined\n",
+ ip_block->version->funcs->name);
+ } else {
+ r = ip_block->version->funcs->hw_fini(ip_block);
+ /* XXX handle errors */
+ if (r) {
+ DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
+ ip_block->version->funcs->name, r);
+ }
+ }
+
+ ip_block->status.hw = false;
+}
+
/**
* amdgpu_device_smu_fini_early - smu hw_fini wrapper
*
@@ -3215,7 +3348,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
*/
static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
{
- int i, r;
+ int i;
if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
return;
@@ -3224,13 +3357,7 @@ static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
- r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
- /* XXX handle errors */
- if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- }
- adev->ip_blocks[i].status.hw = false;
+ amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
break;
}
}
@@ -3244,7 +3371,7 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].version->funcs->early_fini)
continue;
- r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]);
if (r) {
DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
@@ -3256,21 +3383,14 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
amdgpu_amdkfd_suspend(adev, false);
- /* Workaroud for ASICs need to disable SMC first */
+ /* Workaround for ASICs need to disable SMC first */
amdgpu_device_smu_fini_early(adev);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.hw)
continue;
- r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
- /* XXX handle errors */
- if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- }
-
- adev->ip_blocks[i].status.hw = false;
+ amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
}
if (amdgpu_sriov_vf(adev)) {
@@ -3316,12 +3436,13 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
amdgpu_ib_pool_fini(adev);
amdgpu_seq64_fini(adev);
}
-
- r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
- /* XXX handle errors */
- if (r) {
- DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ if (adev->ip_blocks[i].version->funcs->sw_fini) {
+ r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]);
+ /* XXX handle errors */
+ if (r) {
+ DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ }
}
adev->ip_blocks[i].status.sw = false;
adev->ip_blocks[i].status.valid = false;
@@ -3331,7 +3452,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.late_initialized)
continue;
if (adev->ip_blocks[i].version->funcs->late_fini)
- adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
+ adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]);
adev->ip_blocks[i].status.late_initialized = false;
}
@@ -3364,7 +3485,7 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
WARN_ON_ONCE(adev->gfx.gfx_off_state);
WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
- if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
+ if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true, 0))
adev->gfx.gfx_off_state = true;
}
@@ -3403,15 +3524,9 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
continue;
/* XXX handle errors */
- r = adev->ip_blocks[i].version->funcs->suspend(adev);
- /* XXX handle errors */
- if (r) {
- DRM_ERROR("suspend of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
-
- adev->ip_blocks[i].status.hw = false;
}
return 0;
@@ -3449,14 +3564,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
}
/* skip unnecessary suspend if we do not initialize them yet */
- if (adev->gmc.xgmi.pending_reset &&
- !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
- adev->ip_blocks[i].status.hw = false;
+ if (!amdgpu_ip_member_of_hwini(
+ adev, adev->ip_blocks[i].version->type))
continue;
- }
/* skip suspend of gfx/mes and psp for S0ix
* gfx is in gfxoff state, so on resume it will exit gfxoff just
@@ -3490,13 +3600,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
continue;
/* XXX handle errors */
- r = adev->ip_blocks[i].version->funcs->suspend(adev);
- /* XXX handle errors */
- if (r) {
- DRM_ERROR("suspend of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- }
+ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
adev->ip_blocks[i].status.hw = false;
+
/* handle putting the SMC in the appropriate state */
if (!amdgpu_sriov_vf(adev)) {
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
@@ -3570,10 +3676,12 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
!block->status.valid)
continue;
- r = block->version->funcs->hw_init(adev);
- DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
- if (r)
+ r = block->version->funcs->hw_init(&adev->ip_blocks[i]);
+ if (r) {
+ dev_err(adev->dev, "RE-INIT-early: %s failed\n",
+ block->version->funcs->name);
return r;
+ }
block->status.hw = true;
}
}
@@ -3583,7 +3691,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
{
- int i, r;
+ struct amdgpu_ip_block *block;
+ int i, r = 0;
static enum amd_ip_block_type ip_order[] = {
AMD_IP_BLOCK_TYPE_SMC,
@@ -3598,30 +3707,28 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
};
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
- int j;
- struct amdgpu_ip_block *block;
-
- for (j = 0; j < adev->num_ip_blocks; j++) {
- block = &adev->ip_blocks[j];
+ block = amdgpu_device_ip_get_ip_block(adev, ip_order[i]);
- if (block->version->type != ip_order[i] ||
- !block->status.valid ||
- block->status.hw)
- continue;
+ if (!block)
+ continue;
- if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
- r = block->version->funcs->resume(adev);
- else
- r = block->version->funcs->hw_init(adev);
+ if (block->status.valid && !block->status.hw) {
+ if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) {
+ r = amdgpu_ip_block_resume(block);
+ } else {
+ r = block->version->funcs->hw_init(block);
+ }
- DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
- if (r)
- return r;
+ if (r) {
+ dev_err(adev->dev, "RE-INIT-late: %s failed\n",
+ block->version->funcs->name);
+ break;
+ }
block->status.hw = true;
}
}
- return 0;
+ return r;
}
/**
@@ -3648,13 +3755,9 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- DRM_ERROR("resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
- adev->ip_blocks[i].status.hw = true;
}
}
@@ -3666,7 +3769,7 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
*
* @adev: amdgpu_device pointer
*
- * First resume function for hardware IPs. The list of all the hardware
+ * Second resume function for hardware IPs. The list of all the hardware
* IPs that make up the asic is walked and the resume callbacks are run for
* all blocks except COMMON, GMC, and IH. resume puts the hardware into a
* functional state after a suspend and updates the software state as
@@ -3684,15 +3787,42 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
continue;
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- DRM_ERROR("resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
return r;
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Third resume function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked and the resume callbacks are run for
+ * all DCE. resume puts the hardware into a functional state after a suspend
+ * and updates the software state as necessary. This function is also used
+ * for restoring the GPU after a GPU reset.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
+ continue;
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
+ return r;
}
- adev->ip_blocks[i].status.hw = true;
}
return 0;
@@ -3727,6 +3857,13 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
if (adev->mman.buffer_funcs_ring->sched.ready)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ if (r)
+ return r;
+
+ amdgpu_fence_driver_hw_init(adev);
+
+ r = amdgpu_device_ip_resume_phase3(adev);
+
return r;
}
@@ -4149,7 +4286,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* for throttling interrupt) = 60 seconds.
*/
ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
+ ratelimit_state_init(&adev->virt.ras_telemetry_rs, 5 * HZ, 1);
+
ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
+ ratelimit_set_flags(&adev->virt.ras_telemetry_rs, RATELIMIT_MSG_ON_RELEASE);
/* Registers mapping */
/* TODO: block userspace mapping of io register */
@@ -4173,7 +4313,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/*
* Reset domain needs to be present early, before XGMI hive discovered
- * (if any) and intitialized to use reset sem and in_gpu reset flag
+ * (if any) and initialized to use reset sem and in_gpu reset flag
* early on during init and before calling to RREG32.
*/
adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
@@ -4193,13 +4333,19 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_device_set_mcbp(adev);
+ /*
+ * By default, use default mode where all blocks are expected to be
+ * initialized. At present a 'swinit' of blocks is required to be
+ * completed before the need for a different level is detected.
+ */
+ amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT);
/* early init functions */
r = amdgpu_device_ip_early_init(adev);
if (r)
return r;
/* Get rid of things like offb */
- r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
+ r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name);
if (r)
return r;
@@ -4265,20 +4411,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
if (adev->gmc.xgmi.num_physical_nodes) {
dev_info(adev->dev, "Pending hive reset.\n");
- adev->gmc.xgmi.pending_reset = true;
- /* Only need to init necessary block for SMU to handle the reset */
- for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.valid)
- continue;
- if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
- DRM_DEBUG("IP %s disabled for hw_init.\n",
- adev->ip_blocks[i].version->funcs->name);
- adev->ip_blocks[i].status.hw = true;
- }
- }
+ amdgpu_set_init_level(adev,
+ AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
} else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
!amdgpu_device_has_display_hardware(adev)) {
r = psp_gpu_reset(adev);
@@ -4386,7 +4520,7 @@ fence_driver_init:
/* enable clockgating, etc. after ib tests, etc. since some blocks require
* explicit gating rather than handling it automatically.
*/
- if (!adev->gmc.xgmi.pending_reset) {
+ if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
r = amdgpu_device_ip_late_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
@@ -4436,6 +4570,7 @@ fence_driver_init:
amdgpu_fru_sysfs_init(adev);
amdgpu_reg_state_sysfs_init(adev);
+ amdgpu_xcp_cfg_sysfs_init(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
r = amdgpu_pmu_init(adev);
@@ -4463,12 +4598,16 @@ fence_driver_init:
if (px)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
- if (adev->gmc.xgmi.pending_reset)
- queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
- msecs_to_jiffies(AMDGPU_RESUME_MS));
+ if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
+ amdgpu_xgmi_reset_on_init(adev);
amdgpu_device_check_iommu_direct_map(adev);
+ adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
+ r = register_pm_notifier(&adev->pm_nb);
+ if (r)
+ goto failed;
+
return 0;
release_ras_con:
@@ -4533,6 +4672,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
drain_workqueue(adev->mman.bdev.wq);
adev->shutdown = true;
+ unregister_pm_notifier(&adev->pm_nb);
+
/* make sure IB test finished before entering exclusive mode
* to avoid preemption on IB test
*/
@@ -4559,6 +4700,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_fru_sysfs_fini(adev);
amdgpu_reg_state_sysfs_fini(adev);
+ amdgpu_xcp_cfg_sysfs_fini(adev);
/* disable ras feature must before hw fini */
amdgpu_ras_pre_fini(adev);
@@ -4584,8 +4726,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
int idx;
bool px;
- amdgpu_fence_driver_sw_fini(adev);
amdgpu_device_ip_fini(adev);
+ amdgpu_fence_driver_sw_fini(adev);
amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
adev->accel_working = false;
dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
@@ -4650,8 +4792,8 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
{
int ret;
- /* No need to evict vram on APUs for suspend to ram or s2idle */
- if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
+ /* No need to evict vram on APUs unless going to S4 */
+ if (!adev->in_s4 && (adev->flags & AMD_IS_APU))
return 0;
ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
@@ -4664,6 +4806,41 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
* Suspend & resume.
*/
/**
+ * amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events
+ * @nb: notifier block
+ * @mode: suspend mode
+ * @data: data
+ *
+ * This function is called when the system is about to suspend or hibernate.
+ * It is used to evict resources from the device before the system goes to
+ * sleep while there is still access to swap.
+ */
+static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
+ void *data)
+{
+ struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
+ int r;
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ adev->in_s4 = true;
+ fallthrough;
+ case PM_SUSPEND_PREPARE:
+ r = amdgpu_device_evict_resources(adev);
+ /*
+ * This is considered non-fatal at this time because
+ * amdgpu_device_prepare() will also fatally evict resources.
+ * See https://gitlab.freedesktop.org/drm/amd/-/issues/3781
+ */
+ if (r)
+ drm_warn(adev_to_drm(adev), "Failed to evict resources, freeze active processes if problems occur: %d\n", r);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/**
* amdgpu_device_prepare - prepare for device suspend
*
* @dev: drm dev pointer
@@ -4694,7 +4871,7 @@ int amdgpu_device_prepare(struct drm_device *dev)
continue;
if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
continue;
- r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
+ r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]);
if (r)
goto unprepare;
}
@@ -4702,7 +4879,7 @@ int amdgpu_device_prepare(struct drm_device *dev)
return 0;
unprepare:
- adev->in_s0ix = adev->in_s3 = false;
+ adev->in_s0ix = adev->in_s3 = adev->in_s4 = false;
return r;
}
@@ -4711,13 +4888,13 @@ unprepare:
* amdgpu_device_suspend - initiate device suspend
*
* @dev: drm dev pointer
- * @fbcon : notify the fbdev of suspend
+ * @notify_clients: notify in-kernel DRM clients
*
* Puts the hw in the suspend state (all asics).
* Returns 0 for success or an error on failure.
* Called at driver suspend.
*/
-int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
+int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
{
struct amdgpu_device *adev = drm_to_adev(dev);
int r = 0;
@@ -4737,8 +4914,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
DRM_WARN("smart shift update failed\n");
- if (fbcon)
- drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
+ if (notify_clients)
+ drm_client_dev_suspend(adev_to_drm(adev), false);
cancel_delayed_work_sync(&adev->delayed_init_work);
@@ -4773,13 +4950,13 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
* amdgpu_device_resume - initiate device resume
*
* @dev: drm dev pointer
- * @fbcon : notify the fbdev of resume
+ * @notify_clients: notify in-kernel DRM clients
*
* Bring the hw back to operating state (all asics).
* Returns 0 for success or an error on failure.
* Called at driver resume.
*/
-int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
+int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
{
struct amdgpu_device *adev = drm_to_adev(dev);
int r = 0;
@@ -4809,7 +4986,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
goto exit;
}
- amdgpu_fence_driver_hw_init(adev);
if (!adev->in_s0ix) {
r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
@@ -4835,8 +5011,8 @@ exit:
/* Make sure IB tests flushed */
flush_delayed_work(&adev->delayed_init_work);
- if (fbcon)
- drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
+ if (notify_clients)
+ drm_client_dev_resume(adev_to_drm(adev), false);
amdgpu_ras_resume(adev);
@@ -4898,7 +5074,8 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].version->funcs->check_soft_reset)
adev->ip_blocks[i].status.hang =
- adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
+ adev->ip_blocks[i].version->funcs->check_soft_reset(
+ &adev->ip_blocks[i]);
if (adev->ip_blocks[i].status.hang) {
dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
asic_hang = true;
@@ -4927,7 +5104,7 @@ static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].status.hang &&
adev->ip_blocks[i].version->funcs->pre_soft_reset) {
- r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
+ r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]);
if (r)
return r;
}
@@ -4989,7 +5166,7 @@ static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].status.hang &&
adev->ip_blocks[i].version->funcs->soft_reset) {
- r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
+ r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]);
if (r)
return r;
}
@@ -5018,7 +5195,7 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].status.hang &&
adev->ip_blocks[i].version->funcs->post_soft_reset)
- r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
+ r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]);
if (r)
return r;
}
@@ -5053,7 +5230,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
return r;
- amdgpu_ras_set_fed(adev, false);
+ amdgpu_ras_clear_err_state(adev);
amdgpu_irq_gpu_reset_resume_helper(adev);
/* some sw clean up VF needs to do before recover */
@@ -5103,20 +5280,25 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
amdgpu_ras_resume(adev);
+
+ amdgpu_virt_ras_telemetry_post_reset(adev);
+
return 0;
}
/**
- * amdgpu_device_has_job_running - check if there is any job in mirror list
+ * amdgpu_device_has_job_running - check if there is any unfinished job
*
* @adev: amdgpu_device pointer
*
- * check if there is any job in mirror list
+ * check if there is any job running on the device when guest driver receives
+ * FLR notification from host driver. If there are still jobs running, then
+ * the guest driver will not respond the FLR reset. Instead, let the job hit
+ * the timeout and guest driver then issue the reset request.
*/
bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
{
int i;
- struct drm_sched_job *job;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -5124,11 +5306,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
if (!amdgpu_ring_sched_ready(ring))
continue;
- spin_lock(&ring->sched.job_list_lock);
- job = list_first_entry_or_null(&ring->sched.pending_list,
- struct drm_sched_job, list);
- spin_unlock(&ring->sched.job_list_lock);
- if (job)
+ if (amdgpu_fence_count_emitted(ring))
return true;
}
return false;
@@ -5309,7 +5487,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
for (i = 0; i < tmp_adev->num_ip_blocks; i++)
if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
tmp_adev->ip_blocks[i].version->funcs
- ->dump_ip_state((void *)tmp_adev);
+ ->dump_ip_state((void *)&tmp_adev->ip_blocks[i]);
dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
}
@@ -5325,76 +5503,35 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
return r;
}
-int amdgpu_do_asic_reset(struct list_head *device_list_handle,
- struct amdgpu_reset_context *reset_context)
+int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
{
- struct amdgpu_device *tmp_adev = NULL;
- bool need_full_reset, skip_hw_reset, vram_lost = false;
- int r = 0;
+ struct list_head *device_list_handle;
+ bool full_reset, vram_lost = false;
+ struct amdgpu_device *tmp_adev;
+ int r, init_level;
- /* Try reset handler method first */
- tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
- reset_list);
+ device_list_handle = reset_context->reset_device_list;
- reset_context->reset_device_list = device_list_handle;
- r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
- /* If reset handler not implemented, continue; otherwise return */
- if (r == -EOPNOTSUPP)
- r = 0;
- else
- return r;
+ if (!device_list_handle)
+ return -EINVAL;
- /* Reset handler not implemented, use the default method */
- need_full_reset =
- test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
- skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
+ full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
- /*
- * ASIC reset has to be done on all XGMI hive nodes ASAP
- * to allow proper links negotiation in FW (within 1 sec)
+ /**
+ * If it's reset on init, it's default init level, otherwise keep level
+ * as recovery level.
*/
- if (!skip_hw_reset && need_full_reset) {
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- /* For XGMI run all resets in parallel to speed up the process */
- if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
- tmp_adev->gmc.xgmi.pending_reset = false;
- if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
- r = -EALREADY;
- } else
- r = amdgpu_asic_reset(tmp_adev);
-
- if (r) {
- dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
- r, adev_to_drm(tmp_adev)->unique);
- goto out;
- }
- }
-
- /* For XGMI wait for all resets to complete before proceed */
- if (!r) {
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
- flush_work(&tmp_adev->xgmi_reset_work);
- r = tmp_adev->asic_reset_res;
- if (r)
- break;
- }
- }
- }
- }
-
- if (!r && amdgpu_ras_intr_triggered()) {
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB);
- }
-
- amdgpu_ras_intr_cleared();
- }
+ if (reset_context->method == AMD_RESET_METHOD_ON_INIT)
+ init_level = AMDGPU_INIT_LEVEL_DEFAULT;
+ else
+ init_level = AMDGPU_INIT_LEVEL_RESET_RECOVERY;
+ r = 0;
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- if (need_full_reset) {
+ amdgpu_set_init_level(tmp_adev, init_level);
+ if (full_reset) {
/* post card */
- amdgpu_ras_set_fed(tmp_adev, false);
+ amdgpu_ras_clear_err_state(tmp_adev);
r = amdgpu_device_asic_init(tmp_adev);
if (r) {
dev_warn(tmp_adev->dev, "asic atom init failed!");
@@ -5431,6 +5568,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
+ r = amdgpu_device_ip_resume_phase3(tmp_adev);
+ if (r)
+ goto out;
+
if (vram_lost)
amdgpu_device_fill_reset_magic(tmp_adev);
@@ -5448,7 +5589,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
if (r)
goto out;
- drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
+ drm_client_dev_resume(adev_to_drm(tmp_adev), false);
/*
* The GPU enters bad state once faulty pages
@@ -5478,11 +5619,13 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
out:
if (!r) {
+ /* IP init is complete now, set level as default */
+ amdgpu_set_init_level(tmp_adev,
+ AMDGPU_INIT_LEVEL_DEFAULT);
amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
r = amdgpu_ib_ring_tests(tmp_adev);
if (r) {
dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
- need_full_reset = true;
r = -EAGAIN;
goto end;
}
@@ -5493,10 +5636,85 @@ out:
}
end:
- if (need_full_reset)
+ return r;
+}
+
+int amdgpu_do_asic_reset(struct list_head *device_list_handle,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ bool need_full_reset, skip_hw_reset;
+ int r = 0;
+
+ /* Try reset handler method first */
+ tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
+ reset_list);
+
+ reset_context->reset_device_list = device_list_handle;
+ r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
+ /* If reset handler not implemented, continue; otherwise return */
+ if (r == -EOPNOTSUPP)
+ r = 0;
+ else
+ return r;
+
+ /* Reset handler not implemented, use the default method */
+ need_full_reset =
+ test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+ skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
+
+ /*
+ * ASIC reset has to be done on all XGMI hive nodes ASAP
+ * to allow proper links negotiation in FW (within 1 sec)
+ */
+ if (!skip_hw_reset && need_full_reset) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ /* For XGMI run all resets in parallel to speed up the process */
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!queue_work(system_unbound_wq,
+ &tmp_adev->xgmi_reset_work))
+ r = -EALREADY;
+ } else
+ r = amdgpu_asic_reset(tmp_adev);
+
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "ASIC reset failed with error, %d for drm dev, %s",
+ r, adev_to_drm(tmp_adev)->unique);
+ goto out;
+ }
+ }
+
+ /* For XGMI wait for all resets to complete before proceed */
+ if (!r) {
+ list_for_each_entry(tmp_adev, device_list_handle,
+ reset_list) {
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ flush_work(&tmp_adev->xgmi_reset_work);
+ r = tmp_adev->asic_reset_res;
+ if (r)
+ break;
+ }
+ }
+ }
+ }
+
+ if (!r && amdgpu_ras_intr_triggered()) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ amdgpu_ras_reset_error_count(tmp_adev,
+ AMDGPU_RAS_BLOCK__MMHUB);
+ }
+
+ amdgpu_ras_intr_cleared();
+ }
+
+ r = amdgpu_device_reinit_after_reset(reset_context);
+ if (r == -EAGAIN)
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
else
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+
+out:
return r;
}
@@ -5647,6 +5865,18 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
/*
+ * If it reaches here because of hang/timeout and a RAS error is
+ * detected at the same time, let RAS recovery take care of it.
+ */
+ if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
+ !amdgpu_sriov_vf(adev) &&
+ reset_context->src != AMDGPU_RESET_SRC_RAS) {
+ dev_dbg(adev->dev,
+ "Gpu recovery from source: %d yielding to RAS error recovery handling",
+ reset_context->src);
+ return 0;
+ }
+ /*
* Special case: RAS triggered and full reset isn't supported
*/
need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
@@ -5729,12 +5959,12 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
amdgpu_amdkfd_pre_reset(tmp_adev, reset_context);
/*
- * Mark these ASICs to be reseted as untracked first
+ * Mark these ASICs to be reset as untracked first
* And add them back after reset completed
*/
amdgpu_unregister_gpu_instance(tmp_adev);
- drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
+ drm_client_dev_suspend(adev_to_drm(tmp_adev), false);
/* disable ras on ALL IPs */
if (!need_emergency_restart &&
@@ -5824,7 +6054,7 @@ skip_hw_reset:
if (!amdgpu_ring_sched_ready(ring))
continue;
- drm_sched_start(&ring->sched);
+ drm_sched_start(&ring->sched, 0);
}
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
@@ -5928,19 +6158,56 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
}
/**
+ * amdgpu_device_gpu_bandwidth - find the bandwidth of the GPU
+ *
+ * @adev: amdgpu_device pointer
+ * @speed: pointer to the speed of the link
+ * @width: pointer to the width of the link
+ *
+ * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
+ * AMD dGPU which may be a virtual upstream bridge.
+ */
+static void amdgpu_device_gpu_bandwidth(struct amdgpu_device *adev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ struct pci_dev *parent = adev->pdev;
+
+ if (!speed || !width)
+ return;
+
+ parent = pci_upstream_bridge(parent);
+ if (parent && parent->vendor == PCI_VENDOR_ID_ATI) {
+ /* use the upstream/downstream switches internal to dGPU */
+ *speed = pcie_get_speed_cap(parent);
+ *width = pcie_get_width_cap(parent);
+ while ((parent = pci_upstream_bridge(parent))) {
+ if (parent->vendor == PCI_VENDOR_ID_ATI) {
+ /* use the upstream/downstream switches internal to dGPU */
+ *speed = pcie_get_speed_cap(parent);
+ *width = pcie_get_width_cap(parent);
+ }
+ }
+ } else {
+ /* use the device itself */
+ *speed = pcie_get_speed_cap(adev->pdev);
+ *width = pcie_get_width_cap(adev->pdev);
+ }
+}
+
+/**
* amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
*
* @adev: amdgpu_device pointer
*
- * Fetchs and stores in the driver the PCIE capabilities (gen speed
+ * Fetches and stores in the driver the PCIE capabilities (gen speed
* and lanes) of the slot the device is in. Handles APUs and
* virtualized environments where PCIE config space may not be available.
*/
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
{
- struct pci_dev *pdev;
enum pci_bus_speed speed_cap, platform_speed_cap;
- enum pcie_link_width platform_link_width;
+ enum pcie_link_width platform_link_width, link_width;
if (amdgpu_pcie_gen_cap)
adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
@@ -5962,11 +6229,10 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
&platform_link_width);
+ amdgpu_device_gpu_bandwidth(adev, &speed_cap, &link_width);
if (adev->pm.pcie_gen_mask == 0) {
/* asic caps */
- pdev = adev->pdev;
- speed_cap = pcie_get_speed_cap(pdev);
if (speed_cap == PCI_SPEED_UNKNOWN) {
adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
@@ -6022,51 +6288,103 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
}
}
if (adev->pm.pcie_mlw_mask == 0) {
+ /* asic caps */
+ if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
+ adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK;
+ } else {
+ switch (link_width) {
+ case PCIE_LNK_X32:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X16:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X12:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X8:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X4:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X2:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X1:
+ adev->pm.pcie_mlw_mask |= CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1;
+ break;
+ default:
+ break;
+ }
+ }
+ /* platform caps */
if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
} else {
switch (platform_link_width) {
case PCIE_LNK_X32:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X16:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X12:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X8:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X4:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X2:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case PCIE_LNK_X1:
- adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
+ adev->pm.pcie_mlw_mask |= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
break;
default:
break;
@@ -6092,6 +6410,9 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
bool p2p_access =
!adev->gmc.xgmi.connected_to_cpu &&
!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
+ if (!p2p_access)
+ dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n",
+ pci_name(peer_adev->pdev));
bool is_large_bar = adev->gmc.visible_vram_size &&
adev->gmc.real_vram_size == adev->gmc.visible_vram_size;
@@ -6331,7 +6652,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
if (!amdgpu_ring_sched_ready(ring))
continue;
- drm_sched_start(&ring->sched);
+ drm_sched_start(&ring->sched, 0);
}
amdgpu_device_unset_mp1_state(adev);
@@ -6344,6 +6665,9 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
struct amdgpu_device *adev = drm_to_adev(dev);
int r;
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
r = pci_save_state(pdev);
if (!r) {
kfree(adev->pci_state);
@@ -6604,3 +6928,47 @@ uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
}
return ret;
}
+
+ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring)
+{
+ ssize_t size = 0;
+
+ if (!ring || !ring->adev)
+ return size;
+
+ if (amdgpu_device_should_recover_gpu(ring->adev))
+ size |= AMDGPU_RESET_TYPE_FULL;
+
+ if (unlikely(!ring->adev->debug_disable_soft_recovery) &&
+ !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery)
+ size |= AMDGPU_RESET_TYPE_SOFT_RESET;
+
+ return size;
+}
+
+ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset)
+{
+ ssize_t size = 0;
+
+ if (supported_reset == 0) {
+ size += sysfs_emit_at(buf, size, "unsupported");
+ size += sysfs_emit_at(buf, size, "\n");
+ return size;
+
+ }
+
+ if (supported_reset & AMDGPU_RESET_TYPE_SOFT_RESET)
+ size += sysfs_emit_at(buf, size, "soft ");
+
+ if (supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
+ size += sysfs_emit_at(buf, size, "queue ");
+
+ if (supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)
+ size += sysfs_emit_at(buf, size, "pipe ");
+
+ if (supported_reset & AMDGPU_RESET_TYPE_FULL)
+ size += sysfs_emit_at(buf, size, "full ");
+
+ size += sysfs_emit_at(buf, size, "\n");
+ return size;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 4bd61c169ca8..949d74eff294 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2018 Advanced Micro Devices, Inc.
+ * Copyright 2018-2024 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -104,7 +104,9 @@
#include "smuio_v13_0_6.h"
#include "smuio_v14_0_2.h"
#include "vcn_v5_0_0.h"
+#include "vcn_v5_0_1.h"
#include "jpeg_v5_0_0.h"
+#include "jpeg_v5_0_1.h"
#include "amdgpu_vpe.h"
#if defined(CONFIG_DRM_AMD_ISP)
@@ -1340,7 +1342,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
*/
if (adev->vcn.num_vcn_inst <
AMDGPU_MAX_VCN_INSTANCES) {
- adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
+ adev->vcn.inst[adev->vcn.num_vcn_inst].vcn_config =
ip->revision & 0xc0;
adev->vcn.num_vcn_inst++;
adev->vcn.inst_mask |=
@@ -1705,7 +1707,7 @@ static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
* so this won't overflow.
*/
for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
- adev->vcn.vcn_codec_disable_mask[v] =
+ adev->vcn.inst[v].vcn_codec_disable_mask =
le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
}
break;
@@ -1723,45 +1725,85 @@ union nps_info {
struct nps_info_v1_0 v1;
};
+static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev,
+ union nps_info *nps_data)
+{
+ uint64_t vram_size, pos, offset;
+ struct nps_info_header *nhdr;
+ struct binary_header bhdr;
+ uint16_t checksum;
+
+ vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+ pos = vram_size - DISCOVERY_TMR_OFFSET;
+ amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false);
+
+ offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset);
+ checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum);
+
+ amdgpu_device_vram_access(adev, (pos + offset), nps_data,
+ sizeof(*nps_data), false);
+
+ nhdr = (struct nps_info_header *)(nps_data);
+ if (!amdgpu_discovery_verify_checksum((uint8_t *)nps_data,
+ le32_to_cpu(nhdr->size_bytes),
+ checksum)) {
+ dev_err(adev->dev, "nps data refresh, checksum mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
uint32_t *nps_type,
struct amdgpu_gmc_memrange **ranges,
- int *range_cnt)
+ int *range_cnt, bool refresh)
{
struct amdgpu_gmc_memrange *mem_ranges;
struct binary_header *bhdr;
union nps_info *nps_info;
+ union nps_info nps_data;
u16 offset;
- int i;
+ int i, r;
if (!nps_type || !range_cnt || !ranges)
return -EINVAL;
- if (!adev->mman.discovery_bin) {
- dev_err(adev->dev,
- "fetch mem range failed, ip discovery uninitialized\n");
- return -EINVAL;
- }
+ if (refresh) {
+ r = amdgpu_discovery_refresh_nps_info(adev, &nps_data);
+ if (r)
+ return r;
+ nps_info = &nps_data;
+ } else {
+ if (!adev->mman.discovery_bin) {
+ dev_err(adev->dev,
+ "fetch mem range failed, ip discovery uninitialized\n");
+ return -EINVAL;
+ }
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
- if (!offset)
- return -ENOENT;
+ if (!offset)
+ return -ENOENT;
- /* If verification fails, return as if NPS table doesn't exist */
- if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
- return -ENOENT;
+ /* If verification fails, return as if NPS table doesn't exist */
+ if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
+ return -ENOENT;
- nps_info = (union nps_info *)(adev->mman.discovery_bin + offset);
+ nps_info =
+ (union nps_info *)(adev->mman.discovery_bin + offset);
+ }
switch (le16_to_cpu(nps_info->v1.header.version_major)) {
case 1:
+ mem_ranges = kvcalloc(nps_info->v1.count,
+ sizeof(*mem_ranges),
+ GFP_KERNEL);
+ if (!mem_ranges)
+ return -ENOMEM;
*nps_type = nps_info->v1.nps_type;
*range_cnt = nps_info->v1.count;
- mem_ranges = kvzalloc(
- *range_cnt * sizeof(struct amdgpu_gmc_memrange),
- GFP_KERNEL);
for (i = 0; i < *range_cnt; i++) {
mem_ranges[i].base_address =
nps_info->v1.instance_info[i].base_address;
@@ -1796,6 +1838,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
break;
case IP_VERSION(10, 1, 10):
@@ -1850,6 +1893,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
break;
case IP_VERSION(10, 1, 10):
@@ -1973,6 +2017,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 11):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
case IP_VERSION(14, 0, 0):
case IP_VERSION(14, 0, 1):
@@ -2144,6 +2189,7 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
break;
case IP_VERSION(10, 1, 10):
@@ -2198,6 +2244,7 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(4, 4, 2):
case IP_VERSION(4, 4, 5):
+ case IP_VERSION(4, 4, 4):
amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
break;
case IP_VERSION(5, 0, 0):
@@ -2321,6 +2368,10 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
break;
+ case IP_VERSION(5, 0, 1):
+ amdgpu_device_ip_block_add(adev, &vcn_v5_0_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v5_0_1_ip_block);
+ break;
default:
dev_err(adev->dev,
"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
@@ -2365,6 +2416,7 @@ static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
aqua_vanjaram_init_soc_config(adev);
break;
default:
@@ -2492,6 +2544,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
+ adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
} else {
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
@@ -2508,6 +2561,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
+ adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
}
break;
case CHIP_VEGA20:
@@ -2610,6 +2664,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
adev->family = AMDGPU_FAMILY_AI;
break;
case IP_VERSION(9, 1, 0):
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index f5d36525ec3e..b44d56465c5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -33,6 +33,6 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
uint32_t *nps_type,
struct amdgpu_gmc_memrange **ranges,
- int *range_cnt);
+ int *range_cnt, bool refresh);
#endif /* __AMDGPU_DISCOVERY__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b119d27271c1..35c778426a7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -33,6 +33,7 @@
#include "soc15_common.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
+#include "bif/bif_4_1_d.h"
#include <asm/div64.h>
#include <linux/pci.h>
@@ -1788,3 +1789,82 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev)
return 0;
}
+/* panic_bo is set in amdgpu_dm_plane_get_scanout_buffer() and only used in amdgpu_dm_set_pixel()
+ * they are called from the panic handler, and protected by the drm_panic spinlock.
+ */
+static struct amdgpu_bo *panic_abo;
+
+/* Use the indirect MMIO to write each pixel to the GPU VRAM,
+ * This is a simplified version of amdgpu_device_mm_access()
+ */
+static void amdgpu_display_set_pixel(struct drm_scanout_buffer *sb,
+ unsigned int x,
+ unsigned int y,
+ u32 color)
+{
+ struct amdgpu_res_cursor cursor;
+ unsigned long offset;
+ struct amdgpu_bo *abo = panic_abo;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+ uint32_t tmp;
+
+ offset = x * 4 + y * sb->pitch[0];
+ amdgpu_res_first(abo->tbo.resource, offset, 4, &cursor);
+
+ tmp = cursor.start >> 31;
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t) cursor.start) | 0x80000000);
+ if (tmp != 0xffffffff)
+ WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
+ WREG32_NO_KIQ(mmMM_DATA, color);
+}
+
+int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct amdgpu_bo *abo;
+ struct drm_framebuffer *fb = plane->state->fb;
+
+ if (!fb)
+ return -EINVAL;
+
+ DRM_DEBUG_KMS("Framebuffer %dx%d %p4cc\n", fb->width, fb->height, &fb->format->format);
+
+ abo = gem_to_amdgpu_bo(fb->obj[0]);
+ if (!abo)
+ return -EINVAL;
+
+ sb->width = fb->width;
+ sb->height = fb->height;
+ /* Use the generic linear format, because tiling will be disabled in panic_flush() */
+ sb->format = drm_format_info(fb->format->format);
+ if (!sb->format)
+ return -EINVAL;
+
+ sb->pitch[0] = fb->pitches[0];
+
+ if (abo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) {
+ if (abo->tbo.resource->mem_type != TTM_PL_VRAM) {
+ drm_warn(plane->dev, "amdgpu panic, framebuffer not in VRAM\n");
+ return -EINVAL;
+ }
+ /* Only handle 32bits format, to simplify mmio access */
+ if (fb->format->cpp[0] != 4) {
+ drm_warn(plane->dev, "amdgpu panic, pixel format is not 32bits\n");
+ return -EINVAL;
+ }
+ sb->set_pixel = amdgpu_display_set_pixel;
+ panic_abo = abo;
+ return 0;
+ }
+ if (!abo->kmap.virtual &&
+ ttm_bo_kmap(&abo->tbo, 0, PFN_UP(abo->tbo.base.size), &abo->kmap)) {
+ drm_warn(plane->dev, "amdgpu bo map failed, panic won't be displayed\n");
+ return -ENOMEM;
+ }
+ if (abo->kmap.bo_kmap_type & TTM_BO_MAP_IOMEM_MASK)
+ iosys_map_set_vaddr_iomem(&sb->map[0], abo->kmap.virtual);
+ else
+ iosys_map_set_vaddr(&sb->map[0], abo->kmap.virtual);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index 9d19940f73c8..dfa0d642ac16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -23,6 +23,8 @@
#ifndef __AMDGPU_DISPLAY_H__
#define __AMDGPU_DISPLAY_H__
+#include <drm/drm_panic.h>
+
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
@@ -49,4 +51,7 @@ amdgpu_lookup_format_info(u32 format, uint64_t modifier);
int amdgpu_display_suspend_helper(struct amdgpu_device *adev);
int amdgpu_display_resume_helper(struct amdgpu_device *adev);
+int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 8e81a83d37d8..9f627caedc3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -36,6 +36,7 @@
#include "amdgpu_gem.h"
#include "amdgpu_dma_buf.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_vm.h"
#include <drm/amdgpu_drm.h>
#include <drm/ttm/ttm_tt.h>
#include <linux/dma-buf.h>
@@ -60,6 +61,8 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
+ amdgpu_vm_bo_update_shared(bo);
+
return 0;
}
@@ -345,7 +348,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
/* FIXME: This should be after the "if", but needs a fix to make sure
* DMABuf imports are initialized in the right VM list.
*/
- amdgpu_vm_bo_invalidate(adev, bo, false);
+ amdgpu_vm_bo_invalidate(bo, false);
if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 81d9877c8735..dce9323fb410 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -23,6 +23,7 @@
*/
#include <drm/amdgpu_drm.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem.h>
@@ -118,9 +119,10 @@
* - 3.57.0 - Compute tunneling on GFX10+
* - 3.58.0 - Add GFX12 DCC support
* - 3.59.0 - Cleared VRAM
+ * - 3.60.0 - Add AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE (Vulkan requirement)
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 59
+#define KMS_DRIVER_MINOR 60
#define KMS_DRIVER_PATCHLEVEL 0
/*
@@ -231,8 +233,6 @@ int amdgpu_wbrf = -1;
int amdgpu_damage_clips = -1; /* auto */
int amdgpu_umsch_mm_fwlog;
-static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
-
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
"DRM_UT_DRIVER",
@@ -247,9 +247,6 @@ DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
- .delayed_reset_work = __DELAYED_WORK_INITIALIZER(
- mgpu_info.delayed_reset_work,
- amdgpu_drv_delayed_reset_work_handler, 0),
};
int amdgpu_ras_enable = -1;
uint amdgpu_ras_mask = 0xffffffff;
@@ -284,7 +281,7 @@ module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
/**
* DOC: gttsize (int)
* Restrict the size of GTT domain (for userspace use) in MiB for testing.
- * The default is -1 (Use 1/2 RAM, minimum value is 3GB).
+ * The default is -1 (Use value specified by TTM).
*/
MODULE_PARM_DESC(gttsize, "Size of the GTT userspace domain in megabytes (-1 = auto)");
module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
@@ -403,7 +400,7 @@ module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
* the kernel log for the list of IPs on the asic. The default is 0xffffffff (enable all blocks on a device).
*/
MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
-module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
+module_param_named_unsafe(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
/**
* DOC: bapm (int)
@@ -461,7 +458,7 @@ module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
* Enable experimental hw support (1 = enable). The default is 0 (disabled).
*/
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
-module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
+module_param_named_unsafe(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
/**
* DOC: dc (int)
@@ -572,14 +569,14 @@ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
* Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
*/
MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
-module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+module_param_named_unsafe(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
/**
* DOC: emu_mode (int)
* Set value 1 to enable emulation mode. This is only needed when running on an emulator. The default is 0 (disabled).
*/
MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
-module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
+module_param_named_unsafe(emu_mode, amdgpu_emu_mode, int, 0444);
/**
* DOC: ras_enable (int)
@@ -734,7 +731,7 @@ module_param_named(noretry, amdgpu_noretry, int, 0644);
*/
MODULE_PARM_DESC(force_asic_type,
"A non negative value used to specify the asic type for all supported GPUs");
-module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444);
+module_param_named_unsafe(force_asic_type, amdgpu_force_asic_type, int, 0444);
/**
* DOC: use_xgmi_p2p (int)
@@ -753,7 +750,7 @@ module_param_named(use_xgmi_p2p, amdgpu_use_xgmi_p2p, int, 0444);
* assigns queues to HQDs.
*/
int sched_policy = KFD_SCHED_POLICY_HWS;
-module_param(sched_policy, int, 0444);
+module_param_unsafe(sched_policy, int, 0444);
MODULE_PARM_DESC(sched_policy,
"Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)");
@@ -803,7 +800,7 @@ MODULE_PARM_DESC(send_sigterm,
* Setting 1 enables halt on hang.
*/
int halt_if_hws_hang;
-module_param(halt_if_hws_hang, int, 0644);
+module_param_unsafe(halt_if_hws_hang, int, 0644);
MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
/**
@@ -812,7 +809,7 @@ MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (defau
* check says. Default value: false (rely on MEC2 firmware version check).
*/
bool hws_gws_support;
-module_param(hws_gws_support, bool, 0444);
+module_param_unsafe(hws_gws_support, bool, 0444);
MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)");
/**
@@ -845,7 +842,7 @@ MODULE_PARM_DESC(no_system_mem_limit, "disable system memory limit (false = defa
*/
int amdgpu_no_queue_eviction_on_vm_fault;
MODULE_PARM_DESC(no_queue_eviction_on_vm_fault, "No queue eviction on VM fault (0 = queue eviction, 1 = no queue eviction)");
-module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444);
+module_param_named_unsafe(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444);
#endif
/**
@@ -853,7 +850,7 @@ module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm
*/
int amdgpu_mtype_local;
MODULE_PARM_DESC(mtype_local, "MTYPE for local memory (0 = MTYPE_RW (default), 1 = MTYPE_NC, 2 = MTYPE_CC)");
-module_param_named(mtype_local, amdgpu_mtype_local, int, 0444);
+module_param_named_unsafe(mtype_local, amdgpu_mtype_local, int, 0444);
/**
* DOC: pcie_p2p (bool)
@@ -892,7 +889,7 @@ module_param_named(visualconfirm, amdgpu_dc_visual_confirm, uint, 0444);
* the ABM algorithm, with 1 being the least reduction and 4 being the most
* reduction.
*
- * Defaults to -1, or disabled. Userspace can only override this level after
+ * Defaults to -1, or auto. Userspace can only override this level after
* boot if it's set to auto.
*/
int amdgpu_dm_abm_level = -1;
@@ -957,7 +954,7 @@ module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
* GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
*/
MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)");
-module_param_named(reset_method, amdgpu_reset_method, int, 0644);
+module_param_named_unsafe(reset_method, amdgpu_reset_method, int, 0644);
/**
* DOC: bad_page_threshold (int) Bad page threshold is specifies the
@@ -1053,7 +1050,7 @@ module_param_named(seamless, amdgpu_seamless, int, 0444);
* - 0x4: Disable GPU soft recovery, always do a full reset
*/
MODULE_PARM_DESC(debug_mask, "debug options for amdgpu, disabled by default");
-module_param_named(debug_mask, amdgpu_debug_mask, uint, 0444);
+module_param_named_unsafe(debug_mask, amdgpu_debug_mask, uint, 0444);
/**
* DOC: agp (int)
@@ -2365,11 +2362,15 @@ retry_init:
*/
if (adev->mode_info.mode_config_initialized &&
!list_empty(&adev_to_drm(adev)->mode_config.connector_list)) {
+ const struct drm_format_info *format;
+
/* select 8 bpp console on low vram cards */
if (adev->gmc.real_vram_size <= (32*1024*1024))
- drm_fbdev_ttm_setup(adev_to_drm(adev), 8);
+ format = drm_format_info(DRM_FORMAT_C8);
else
- drm_fbdev_ttm_setup(adev_to_drm(adev), 32);
+ format = NULL;
+
+ drm_client_setup(adev_to_drm(adev), format);
}
ret = amdgpu_debugfs_init(adev);
@@ -2434,6 +2435,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
struct amdgpu_device *adev = drm_to_adev(dev);
amdgpu_xcp_dev_unplug(adev);
+ amdgpu_gmc_prepare_nps_mode_change(adev);
drm_dev_unplug(dev);
if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
@@ -2472,82 +2474,6 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
adev->mp1_state = PP_MP1_STATE_NONE;
}
-/**
- * amdgpu_drv_delayed_reset_work_handler - work handler for reset
- *
- * @work: work_struct.
- */
-static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
-{
- struct list_head device_list;
- struct amdgpu_device *adev;
- int i, r;
- struct amdgpu_reset_context reset_context;
-
- memset(&reset_context, 0, sizeof(reset_context));
-
- mutex_lock(&mgpu_info.mutex);
- if (mgpu_info.pending_reset == true) {
- mutex_unlock(&mgpu_info.mutex);
- return;
- }
- mgpu_info.pending_reset = true;
- mutex_unlock(&mgpu_info.mutex);
-
- /* Use a common context, just need to make sure full reset is done */
- reset_context.method = AMD_RESET_METHOD_NONE;
- set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
-
- for (i = 0; i < mgpu_info.num_dgpu; i++) {
- adev = mgpu_info.gpu_ins[i].adev;
- reset_context.reset_req_dev = adev;
- r = amdgpu_device_pre_asic_reset(adev, &reset_context);
- if (r) {
- dev_err(adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
- r, adev_to_drm(adev)->unique);
- }
- if (!queue_work(system_unbound_wq, &adev->xgmi_reset_work))
- r = -EALREADY;
- }
- for (i = 0; i < mgpu_info.num_dgpu; i++) {
- adev = mgpu_info.gpu_ins[i].adev;
- flush_work(&adev->xgmi_reset_work);
- adev->gmc.xgmi.pending_reset = false;
- }
-
- /* reset function will rebuild the xgmi hive info , clear it now */
- for (i = 0; i < mgpu_info.num_dgpu; i++)
- amdgpu_xgmi_remove_device(mgpu_info.gpu_ins[i].adev);
-
- INIT_LIST_HEAD(&device_list);
-
- for (i = 0; i < mgpu_info.num_dgpu; i++)
- list_add_tail(&mgpu_info.gpu_ins[i].adev->reset_list, &device_list);
-
- /* unregister the GPU first, reset function will add them back */
- list_for_each_entry(adev, &device_list, reset_list)
- amdgpu_unregister_gpu_instance(adev);
-
- /* Use a common context, just need to make sure full reset is done */
- set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
- set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
- r = amdgpu_do_asic_reset(&device_list, &reset_context);
-
- if (r) {
- DRM_ERROR("reinit gpus failure");
- return;
- }
- for (i = 0; i < mgpu_info.num_dgpu; i++) {
- adev = mgpu_info.gpu_ins[i].adev;
- if (!adev->kfd.init_complete) {
- kgd2kfd_init_zone_device(adev);
- amdgpu_amdkfd_device_init(adev);
- amdgpu_amdkfd_drm_client_create(adev);
- }
- amdgpu_ttm_set_buffer_funcs_status(adev, true);
- }
-}
-
static int amdgpu_pmops_prepare(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
@@ -2580,7 +2506,6 @@ static int amdgpu_pmops_suspend(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
- adev->suspend_complete = false;
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
@@ -2595,7 +2520,6 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
- adev->suspend_complete = true;
if (amdgpu_acpi_should_gpu_reset(adev))
return amdgpu_asic_reset(adev);
@@ -2629,7 +2553,6 @@ static int amdgpu_pmops_freeze(struct device *dev)
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
- adev->in_s4 = true;
r = amdgpu_device_suspend(drm_dev, true);
adev->in_s4 = false;
if (r)
@@ -2982,6 +2905,7 @@ static const struct drm_driver amdgpu_kms_driver = {
.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
.dumb_create = amdgpu_mode_dumb_create,
.dumb_map_offset = amdgpu_mode_dumb_mmap,
+ DRM_FBDEV_TTM_DRIVER_OPS,
.fops = &amdgpu_driver_kms_fops,
.release = &amdgpu_driver_release_kms,
#ifdef CONFIG_PROC_FS
@@ -2992,7 +2916,6 @@ static const struct drm_driver amdgpu_kms_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = KMS_DRIVER_MAJOR,
.minor = KMS_DRIVER_MINOR,
.patchlevel = KMS_DRIVER_PATCHLEVEL,
@@ -3008,6 +2931,7 @@ const struct drm_driver amdgpu_partition_driver = {
.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
.dumb_create = amdgpu_mode_dumb_create,
.dumb_map_offset = amdgpu_mode_dumb_mmap,
+ DRM_FBDEV_TTM_DRIVER_OPS,
.fops = &amdgpu_driver_kms_fops,
.release = &amdgpu_driver_release_kms,
@@ -3015,7 +2939,6 @@ const struct drm_driver amdgpu_partition_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = KMS_DRIVER_MAJOR,
.minor = KMS_DRIVER_MINOR,
.patchlevel = KMS_DRIVER_PATCHLEVEL,
@@ -3068,6 +2991,12 @@ static int __init amdgpu_init(void)
/* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
amdgpu_amdkfd_init();
+ if (amdgpu_pp_feature_mask & PP_OVERDRIVE_MASK) {
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+ pr_crit("Overdrive is enabled, please disable it before "
+ "reporting any bugs unrelated to overdrive.\n");
+ }
+
/* let modprobe override vga console setting */
return pci_register_driver(&amdgpu_kms_pci_driver);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
index 5bc2cb661af7..2d86cc6f7f4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
@@ -40,7 +40,6 @@
#define DRIVER_NAME "amdgpu"
#define DRIVER_DESC "AMD GPU"
-#define DRIVER_DATE "20150101"
extern const struct drm_driver amdgpu_partition_driver;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
index 35fee3e8cde2..8cd69836dd99 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
@@ -200,7 +200,7 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
dev_err_ratelimited(&i2c_adap->dev,
"maddr:0x%04X size:0x%02X:quirk max_%s_len must be > %d",
eeprom_addr, buf_size,
- read ? "read" : "write", EEPROM_OFFSET_SIZE);
+ str_read_write(read), EEPROM_OFFSET_SIZE);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index c7df7fa3459f..91d638098889 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -33,6 +33,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
@@ -59,20 +60,20 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
struct amdgpu_fpriv *fpriv = file->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
- struct amdgpu_mem_stats stats;
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
ktime_t usage[AMDGPU_HW_IP_NUM];
- unsigned int hw_ip;
- int ret;
-
- memset(&stats, 0, sizeof(stats));
-
- ret = amdgpu_bo_reserve(vm->root.bo, false);
- if (ret)
- return;
-
- amdgpu_vm_get_memory(vm, &stats);
- amdgpu_bo_unreserve(vm->root.bo);
+ const char *pl_name[] = {
+ [TTM_PL_VRAM] = "vram",
+ [TTM_PL_TT] = "gtt",
+ [TTM_PL_SYSTEM] = "cpu",
+ [AMDGPU_PL_GDS] = "gds",
+ [AMDGPU_PL_GWS] = "gws",
+ [AMDGPU_PL_OA] = "oa",
+ [AMDGPU_PL_DOORBELL] = "doorbell",
+ };
+ unsigned int hw_ip, i;
+ amdgpu_vm_get_memory(vm, stats);
amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage);
/*
@@ -82,24 +83,35 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
*/
drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid);
- drm_printf(p, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL);
- drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL);
- drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL);
- drm_printf(p, "amd-memory-visible-vram:\t%llu KiB\n",
- stats.visible_vram/1024UL);
+
+ for (i = 0; i < ARRAY_SIZE(pl_name); i++) {
+ if (!pl_name[i])
+ continue;
+
+ drm_print_memory_stats(p,
+ &stats[i].drm,
+ DRM_GEM_OBJECT_RESIDENT |
+ DRM_GEM_OBJECT_PURGEABLE,
+ pl_name[i]);
+ }
+
+ /* Legacy amdgpu keys, alias to drm-resident-memory-: */
+ drm_printf(p, "drm-memory-vram:\t%llu KiB\n",
+ stats[TTM_PL_VRAM].drm.resident/1024UL);
+ drm_printf(p, "drm-memory-gtt: \t%llu KiB\n",
+ stats[TTM_PL_TT].drm.resident/1024UL);
+ drm_printf(p, "drm-memory-cpu: \t%llu KiB\n",
+ stats[TTM_PL_SYSTEM].drm.resident/1024UL);
+
+ /* Amdgpu specific memory accounting keys: */
drm_printf(p, "amd-evicted-vram:\t%llu KiB\n",
- stats.evicted_vram/1024UL);
- drm_printf(p, "amd-evicted-visible-vram:\t%llu KiB\n",
- stats.evicted_visible_vram/1024UL);
+ stats[TTM_PL_VRAM].evicted/1024UL);
drm_printf(p, "amd-requested-vram:\t%llu KiB\n",
- stats.requested_vram/1024UL);
- drm_printf(p, "amd-requested-visible-vram:\t%llu KiB\n",
- stats.requested_visible_vram/1024UL);
+ (stats[TTM_PL_VRAM].drm.shared +
+ stats[TTM_PL_VRAM].drm.private) / 1024UL);
drm_printf(p, "amd-requested-gtt:\t%llu KiB\n",
- stats.requested_gtt/1024UL);
- drm_printf(p, "drm-shared-vram:\t%llu KiB\n", stats.vram_shared/1024UL);
- drm_printf(p, "drm-shared-gtt:\t%llu KiB\n", stats.gtt_shared/1024UL);
- drm_printf(p, "drm-shared-cpu:\t%llu KiB\n", stats.cpu_shared/1024UL);
+ (stats[TTM_PL_TT].drm.shared +
+ stats[TTM_PL_TT].drm.private) / 1024UL);
for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
if (!usage[hw_ip])
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index ceb5163480f4..09c9194d5bd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -384,7 +384,7 @@ int amdgpu_fru_sysfs_init(struct amdgpu_device *adev)
void amdgpu_fru_sysfs_fini(struct amdgpu_device *adev)
{
- if (!is_fru_eeprom_supported(adev, NULL) || !adev->fru_info)
+ if (!adev->fru_info)
return;
sysfs_remove_files(&adev->dev->kobj, amdgpu_fru_attributes);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
index bc58dca18035..98f3196599ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
@@ -32,7 +32,7 @@ struct amdgpu_fru_info {
char product_name[AMDGPU_PRODUCT_NAME_LEN];
char serial[20];
char manufacturer_name[32];
- char fru_id[32];
+ char fru_id[50];
};
int amdgpu_fru_get_product_info(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
index 2d4b67175b55..328a1b963548 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
@@ -122,6 +122,10 @@ static int amdgpu_is_fw_attestation_supported(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return 0;
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 2) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 3))
+ return 0;
+
if (adev->asic_type >= CHIP_SIENNA_CICHLID)
return 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 256b95232de5..b2033f8352f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -78,8 +78,9 @@ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
if (adev->dummy_page_addr)
return 0;
- adev->dummy_page_addr = dma_map_page(&adev->pdev->dev, dummy_page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ adev->dummy_page_addr = dma_map_page_attrs(&adev->pdev->dev, dummy_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(&adev->pdev->dev, adev->dummy_page_addr)) {
dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
adev->dummy_page_addr = 0;
@@ -99,8 +100,9 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
{
if (!adev->dummy_page_addr)
return;
- dma_unmap_page(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_unmap_page_attrs(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
adev->dummy_page_addr = 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 1a5df8b94661..69429df09477 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -42,6 +42,7 @@
#include "amdgpu_dma_buf.h"
#include "amdgpu_hmm.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_vm.h"
static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
{
@@ -87,10 +88,8 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj);
- if (aobj) {
- amdgpu_hmm_unregister(aobj);
- ttm_bo_put(&aobj->tbo);
- }
+ amdgpu_hmm_unregister(aobj);
+ ttm_bo_put(&aobj->tbo);
}
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
@@ -179,6 +178,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
if (r)
return r;
+ amdgpu_vm_bo_update_shared(abo);
bo_va = amdgpu_vm_bo_find(vm, abo);
if (!bo_va)
bo_va = amdgpu_vm_bo_add(adev, vm, abo);
@@ -252,6 +252,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
goto out_unlock;
amdgpu_vm_bo_del(adev, bo_va);
+ amdgpu_vm_bo_update_shared(bo);
if (!amdgpu_vm_ready(vm))
goto out_unlock;
@@ -839,7 +840,6 @@ error:
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj;
struct amdgpu_vm_bo_base *base;
@@ -899,7 +899,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
- amdgpu_vm_bo_invalidate(adev, robj, true);
+ amdgpu_vm_bo_invalidate(robj, true);
amdgpu_bo_unreserve(robj);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 83e54697f0ee..784b03abb3a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -87,16 +87,6 @@ int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
return bit;
}
-void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
- int *me, int *pipe, int *queue)
-{
- *queue = bit % adev->gfx.me.num_queue_per_pipe;
- *pipe = (bit / adev->gfx.me.num_queue_per_pipe)
- % adev->gfx.me.num_pipe_per_me;
- *me = (bit / adev->gfx.me.num_queue_per_pipe)
- / adev->gfx.me.num_pipe_per_me;
-}
-
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
int me, int pipe, int queue)
{
@@ -415,7 +405,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
}
/* prepare MQD backup */
- kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL);
+ kiq->mqd_backup = kzalloc(mqd_size, GFP_KERNEL);
if (!kiq->mqd_backup) {
dev_warn(adev->dev,
"no memory to create MQD backup for ring %s\n", ring->name);
@@ -438,7 +428,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
ring->mqd_size = mqd_size;
/* prepare MQD backup */
- adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
+ adev->gfx.me.mqd_backup[i] = kzalloc(mqd_size, GFP_KERNEL);
if (!adev->gfx.me.mqd_backup[i]) {
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
return -ENOMEM;
@@ -462,7 +452,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
ring->mqd_size = mqd_size;
/* prepare MQD backup */
- adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL);
+ adev->gfx.mec.mqd_backup[j] = kzalloc(mqd_size, GFP_KERNEL);
if (!adev->gfx.mec.mqd_backup[j]) {
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
return -ENOMEM;
@@ -525,6 +515,9 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
+ return 0;
+
spin_lock(&kiq->ring_lock);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
adev->gfx.num_compute_rings)) {
@@ -538,20 +531,15 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
&adev->gfx.compute_ring[j],
RESET_QUEUES, 0, 0);
}
-
- /**
- * This is workaround: only skip kiq_ring test
- * during ras recovery in suspend stage for gfx9.4.3
+ /* Submit unmap queue packet */
+ amdgpu_ring_commit(kiq_ring);
+ /*
+ * Ring test will do a basic scratch register change check. Just run
+ * this to ensure that unmap queues that is submitted before got
+ * processed successfully before returning.
*/
- if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
- amdgpu_ras_in_recovery(adev)) {
- spin_unlock(&kiq->ring_lock);
- return 0;
- }
+ r = amdgpu_ring_test_helper(kiq_ring);
- if (kiq_ring->sched.ready && !adev->job_hang)
- r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
return r;
@@ -579,8 +567,11 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
- spin_lock(&kiq->ring_lock);
+ if (!adev->gfx.kiq[0].ring.sched.ready || amdgpu_in_reset(adev))
+ return 0;
+
if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+ spin_lock(&kiq->ring_lock);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
adev->gfx.num_gfx_rings)) {
spin_unlock(&kiq->ring_lock);
@@ -593,11 +584,17 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
&adev->gfx.gfx_ring[j],
PREEMPT_QUEUES, 0, 0);
}
- }
+ /* Submit unmap queue packet */
+ amdgpu_ring_commit(kiq_ring);
- if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
+ /*
+ * Ring test will do a basic scratch register change check.
+ * Just run this to ensure that unmap queues that is submitted
+ * before got processed successfully before returning.
+ */
r = amdgpu_ring_test_helper(kiq_ring);
- spin_unlock(&kiq->ring_lock);
+ spin_unlock(&kiq->ring_lock);
+ }
return r;
}
@@ -702,7 +699,13 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
kiq->pmf->kiq_map_queues(kiq_ring,
&adev->gfx.compute_ring[j]);
}
-
+ /* Submit map queue packet */
+ amdgpu_ring_commit(kiq_ring);
+ /*
+ * Ring test will do a basic scratch register change check. Just run
+ * this to ensure that map queues that is submitted before got
+ * processed successfully before returning.
+ */
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
if (r)
@@ -753,7 +756,13 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
&adev->gfx.gfx_ring[j]);
}
}
-
+ /* Submit map queue packet */
+ amdgpu_ring_commit(kiq_ring);
+ /*
+ * Ring test will do a basic scratch register change check. Just run
+ * this to ensure that map queues that is submitted before got
+ * processed successfully before returning.
+ */
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
if (r)
@@ -797,7 +806,7 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
/* If going to s2idle, no need to wait */
if (adev->in_s0ix) {
if (!amdgpu_dpm_set_powergating_by_smu(adev,
- AMD_IP_BLOCK_TYPE_GFX, true))
+ AMD_IP_BLOCK_TYPE_GFX, true, 0))
adev->gfx.gfx_off_state = true;
} else {
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
@@ -809,7 +818,7 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
if (adev->gfx.gfx_off_state &&
- !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
+ !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false, 0)) {
adev->gfx.gfx_off_state = false;
if (adev->gfx.funcs->init_spm_golden) {
@@ -895,6 +904,9 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
if (r)
return r;
+ if (amdgpu_sriov_vf(adev))
+ return r;
+
if (adev->gfx.cp_ecc_error_irq.funcs) {
r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
if (r)
@@ -1363,35 +1375,35 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
return count;
}
+static const char *xcp_desc[] = {
+ [AMDGPU_SPX_PARTITION_MODE] = "SPX",
+ [AMDGPU_DPX_PARTITION_MODE] = "DPX",
+ [AMDGPU_TPX_PARTITION_MODE] = "TPX",
+ [AMDGPU_QPX_PARTITION_MODE] = "QPX",
+ [AMDGPU_CPX_PARTITION_MODE] = "CPX",
+};
+
static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
struct device_attribute *addr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- char *supported_partition;
+ struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
+ int size = 0, mode;
+ char *sep = "";
- /* TBD */
- switch (NUM_XCC(adev->gfx.xcc_mask)) {
- case 8:
- supported_partition = "SPX, DPX, QPX, CPX";
- break;
- case 6:
- supported_partition = "SPX, TPX, CPX";
- break;
- case 4:
- supported_partition = "SPX, DPX, CPX";
- break;
- /* this seems only existing in emulation phase */
- case 2:
- supported_partition = "SPX, CPX";
- break;
- default:
- supported_partition = "Not supported";
- break;
+ if (!xcp_mgr || !xcp_mgr->avail_xcp_modes)
+ return sysfs_emit(buf, "Not supported\n");
+
+ for_each_inst(mode, xcp_mgr->avail_xcp_modes) {
+ size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
+ sep = ", ";
}
- return sysfs_emit(buf, "%s\n", supported_partition);
+ size += sysfs_emit_at(buf, size, "\n");
+
+ return size;
}
static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
@@ -1472,6 +1484,24 @@ static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id)
return 0;
}
+/**
+ * amdgpu_gfx_set_run_cleaner_shader - Execute the AMDGPU GFX Cleaner Shader
+ * @dev: The device structure
+ * @attr: The device attribute structure
+ * @buf: The buffer containing the input data
+ * @count: The size of the input data
+ *
+ * Provides the sysfs interface to manually run a cleaner shader, which is
+ * used to clear the GPU state between different tasks. Writing a value to the
+ * 'run_cleaner_shader' sysfs file triggers the cleaner shader execution.
+ * The value written corresponds to the partition index on multi-partition
+ * devices. On single-partition devices, the value should be '0'.
+ *
+ * The cleaner shader clears the Local Data Store (LDS) and General Purpose
+ * Registers (GPRs) to ensure data isolation between GPU workloads.
+ *
+ * Return: The number of bytes written to the sysfs file.
+ */
static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -1520,6 +1550,19 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
return count;
}
+/**
+ * amdgpu_gfx_get_enforce_isolation - Query AMDGPU GFX Enforce Isolation Settings
+ * @dev: The device structure
+ * @attr: The device attribute structure
+ * @buf: The buffer to store the output data
+ *
+ * Provides the sysfs read interface to get the current settings of the 'enforce_isolation'
+ * feature for each GPU partition. Reading from the 'enforce_isolation'
+ * sysfs file returns the isolation settings for all partitions, where '0'
+ * indicates disabled and '1' indicates enabled.
+ *
+ * Return: The number of bytes read from the sysfs file.
+ */
static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1543,6 +1586,20 @@ static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
return size;
}
+/**
+ * amdgpu_gfx_set_enforce_isolation - Control AMDGPU GFX Enforce Isolation
+ * @dev: The device structure
+ * @attr: The device attribute structure
+ * @buf: The buffer containing the input data
+ * @count: The size of the input data
+ *
+ * This function allows control over the 'enforce_isolation' feature, which
+ * serializes access to the graphics engine. Writing '1' or '0' to the
+ * 'enforce_isolation' sysfs file enables or disables process isolation for
+ * each partition. The input should specify the setting for all partitions.
+ *
+ * Return: The number of bytes written to the sysfs file.
+ */
static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -1586,9 +1643,11 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
if (adev->enforce_isolation[i] && !partition_values[i]) {
/* Going from enabled to disabled */
amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
+ amdgpu_mes_set_enforce_isolation(adev, i, false);
} else if (!adev->enforce_isolation[i] && partition_values[i]) {
/* Going from disabled to enabled */
amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
+ amdgpu_mes_set_enforce_isolation(adev, i, true);
}
adev->enforce_isolation[i] = partition_values[i];
}
@@ -1598,6 +1657,32 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
return count;
}
+static ssize_t amdgpu_gfx_get_gfx_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->gfx.gfx_supported_reset);
+}
+
+static ssize_t amdgpu_gfx_get_compute_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->gfx.compute_supported_reset);
+}
+
static DEVICE_ATTR(run_cleaner_shader, 0200,
NULL, amdgpu_gfx_set_run_cleaner_shader);
@@ -1611,48 +1696,138 @@ static DEVICE_ATTR(current_compute_partition, 0644,
static DEVICE_ATTR(available_compute_partition, 0444,
amdgpu_gfx_get_available_compute_partition, NULL);
+static DEVICE_ATTR(gfx_reset_mask, 0444,
+ amdgpu_gfx_get_gfx_reset_mask, NULL);
-int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
+static DEVICE_ATTR(compute_reset_mask, 0444,
+ amdgpu_gfx_get_compute_reset_mask, NULL);
+
+static int amdgpu_gfx_sysfs_xcp_init(struct amdgpu_device *adev)
{
+ struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
+ bool xcp_switch_supported;
int r;
+ if (!xcp_mgr)
+ return 0;
+
+ xcp_switch_supported =
+ (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
+
+ if (!xcp_switch_supported)
+ dev_attr_current_compute_partition.attr.mode &=
+ ~(S_IWUSR | S_IWGRP | S_IWOTH);
+
r = device_create_file(adev->dev, &dev_attr_current_compute_partition);
if (r)
return r;
- r = device_create_file(adev->dev, &dev_attr_available_compute_partition);
+ if (xcp_switch_supported)
+ r = device_create_file(adev->dev,
+ &dev_attr_available_compute_partition);
return r;
}
-void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
+static void amdgpu_gfx_sysfs_xcp_fini(struct amdgpu_device *adev)
{
+ struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
+ bool xcp_switch_supported;
+
+ if (!xcp_mgr)
+ return;
+
+ xcp_switch_supported =
+ (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
device_remove_file(adev->dev, &dev_attr_current_compute_partition);
- device_remove_file(adev->dev, &dev_attr_available_compute_partition);
+
+ if (xcp_switch_supported)
+ device_remove_file(adev->dev,
+ &dev_attr_available_compute_partition);
}
-int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
+static int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
{
int r;
- if (!amdgpu_sriov_vf(adev)) {
- r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
+ r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
+ if (r)
+ return r;
+ if (adev->gfx.enable_cleaner_shader)
+ r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
+
+ return r;
+}
+
+static void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
+{
+ device_remove_file(adev->dev, &dev_attr_enforce_isolation);
+ if (adev->gfx.enable_cleaner_shader)
+ device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
+}
+
+static int amdgpu_gfx_sysfs_reset_mask_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (!amdgpu_gpu_recovery)
+ return r;
+
+ if (adev->gfx.num_gfx_rings) {
+ r = device_create_file(adev->dev, &dev_attr_gfx_reset_mask);
if (r)
return r;
}
- r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
- if (r)
+ if (adev->gfx.num_compute_rings) {
+ r = device_create_file(adev->dev, &dev_attr_compute_reset_mask);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+static void amdgpu_gfx_sysfs_reset_mask_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_gpu_recovery)
+ return;
+
+ if (adev->gfx.num_gfx_rings)
+ device_remove_file(adev->dev, &dev_attr_gfx_reset_mask);
+
+ if (adev->gfx.num_compute_rings)
+ device_remove_file(adev->dev, &dev_attr_compute_reset_mask);
+}
+
+int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_gfx_sysfs_xcp_init(adev);
+ if (r) {
+ dev_err(adev->dev, "failed to create xcp sysfs files");
return r;
+ }
- return 0;
+ r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
+ if (r)
+ dev_err(adev->dev, "failed to create isolation sysfs files");
+
+ r = amdgpu_gfx_sysfs_reset_mask_init(adev);
+ if (r)
+ dev_err(adev->dev, "failed to create reset mask sysfs files");
+
+ return r;
}
-void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
+void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
{
- if (!amdgpu_sriov_vf(adev))
- device_remove_file(adev->dev, &dev_attr_enforce_isolation);
- device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
+ if (adev->dev->kobj.sd) {
+ amdgpu_gfx_sysfs_xcp_fini(adev);
+ amdgpu_gfx_sysfs_isolation_shader_fini(adev);
+ amdgpu_gfx_sysfs_reset_mask_fini(adev);
+ }
}
int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
@@ -1740,7 +1915,7 @@ static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
if (adev->gfx.kfd_sch_req_count[idx] == 0 &&
adev->gfx.kfd_sch_inactive[idx]) {
schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
- GFX_SLICE_PERIOD);
+ msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx]));
}
} else {
if (adev->gfx.kfd_sch_req_count[idx] == 0) {
@@ -1795,8 +1970,9 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
}
if (fences) {
+ /* we've already had our timeslice, so let's wrap this up */
schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
- GFX_SLICE_PERIOD);
+ msecs_to_jiffies(1));
} else {
/* Tell KFD to resume the runqueue */
if (adev->kfd.init_complete) {
@@ -1809,10 +1985,76 @@ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
mutex_unlock(&adev->enforce_isolation_mutex);
}
+/**
+ * amdgpu_gfx_enforce_isolation_wait_for_kfd - Manage KFD wait period for process isolation
+ * @adev: amdgpu_device pointer
+ * @idx: Index of the GPU partition
+ *
+ * When kernel submissions come in, the jobs are given a time slice and once
+ * that time slice is up, if there are KFD user queues active, kernel
+ * submissions are blocked until KFD has had its time slice. Once the KFD time
+ * slice is up, KFD user queues are preempted and kernel submissions are
+ * unblocked and allowed to run again.
+ */
+static void
+amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
+ u32 idx)
+{
+ unsigned long cjiffies;
+ bool wait = false;
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+ if (adev->enforce_isolation[idx]) {
+ /* set the initial values if nothing is set */
+ if (!adev->gfx.enforce_isolation_jiffies[idx]) {
+ adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
+ adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
+ }
+ /* Make sure KFD gets a chance to run */
+ if (amdgpu_amdkfd_compute_active(adev, idx)) {
+ cjiffies = jiffies;
+ if (time_after(cjiffies, adev->gfx.enforce_isolation_jiffies[idx])) {
+ cjiffies -= adev->gfx.enforce_isolation_jiffies[idx];
+ if ((jiffies_to_msecs(cjiffies) >= GFX_SLICE_PERIOD_MS)) {
+ /* if our time is up, let KGD work drain before scheduling more */
+ wait = true;
+ /* reset the timer period */
+ adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
+ } else {
+ /* set the timer period to what's left in our time slice */
+ adev->gfx.enforce_isolation_time[idx] =
+ GFX_SLICE_PERIOD_MS - jiffies_to_msecs(cjiffies);
+ }
+ } else {
+ /* if jiffies wrap around we will just wait a little longer */
+ adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
+ }
+ } else {
+ /* if there is no KFD work, then set the full slice period */
+ adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
+ adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
+ }
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (wait)
+ msleep(GFX_SLICE_PERIOD_MS);
+}
+
+/**
+ * amdgpu_gfx_enforce_isolation_ring_begin_use - Begin use of a ring with enforced isolation
+ * @ring: Pointer to the amdgpu_ring structure
+ *
+ * Ring begin_use helper implementation for gfx which serializes access to the
+ * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
+ * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
+ * each get a time slice when both are active.
+ */
void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 idx;
+ bool sched_work = false;
if (!adev->gfx.enable_cleaner_shader)
return;
@@ -1825,18 +2067,34 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
if (idx >= MAX_XCP)
return;
+ /* Don't submit more work until KFD has had some time */
+ amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx);
+
mutex_lock(&adev->enforce_isolation_mutex);
if (adev->enforce_isolation[idx]) {
if (adev->kfd.init_complete)
- amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
+ sched_work = true;
}
mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (sched_work)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
}
+/**
+ * amdgpu_gfx_enforce_isolation_ring_end_use - End use of a ring with enforced isolation
+ * @ring: Pointer to the amdgpu_ring structure
+ *
+ * Ring end_use helper implementation for gfx which serializes access to the
+ * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
+ * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
+ * each get a time slice when both are active.
+ */
void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 idx;
+ bool sched_work = false;
if (!adev->gfx.enable_cleaner_shader)
return;
@@ -1852,7 +2110,151 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
mutex_lock(&adev->enforce_isolation_mutex);
if (adev->enforce_isolation[idx]) {
if (adev->kfd.init_complete)
- amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
+ sched_work = true;
}
mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (sched_work)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
+}
+
+/*
+ * debugfs for to enable/disable gfx job submission to specific core.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_gfx_sched_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+
+ mask = (1ULL << adev->gfx.num_gfx_rings) - 1;
+ if ((val & mask) == 0)
+ return -EINVAL;
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
+ ring = &adev->gfx.gfx_ring[i];
+ if (val & (1 << i))
+ ring->sched.ready = true;
+ else
+ ring->sched.ready = false;
+ }
+ /* publish sched.ready flag update effective immediately across smp */
+ smp_rmb();
+ return 0;
+}
+
+static int amdgpu_debugfs_gfx_sched_mask_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+ for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
+ ring = &adev->gfx.gfx_ring[i];
+ if (ring->sched.ready)
+ mask |= 1ULL << i;
+ }
+
+ *val = mask;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gfx_sched_mask_fops,
+ amdgpu_debugfs_gfx_sched_mask_get,
+ amdgpu_debugfs_gfx_sched_mask_set, "%llx\n");
+
+#endif
+
+void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ if (!(adev->gfx.num_gfx_rings > 1))
+ return;
+ sprintf(name, "amdgpu_gfx_sched_mask");
+ debugfs_create_file(name, 0600, root, adev,
+ &amdgpu_debugfs_gfx_sched_mask_fops);
+#endif
+}
+
+/*
+ * debugfs for to enable/disable compute job submission to specific core.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_compute_sched_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+
+ mask = (1ULL << adev->gfx.num_compute_rings) - 1;
+ if ((val & mask) == 0)
+ return -EINVAL;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
+ ring = &adev->gfx.compute_ring[i];
+ if (val & (1 << i))
+ ring->sched.ready = true;
+ else
+ ring->sched.ready = false;
+ }
+
+ /* publish sched.ready flag update effective immediately across smp */
+ smp_rmb();
+ return 0;
+}
+
+static int amdgpu_debugfs_compute_sched_mask_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+ for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->sched.ready)
+ mask |= 1ULL << i;
+ }
+
+ *val = mask;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_compute_sched_mask_fops,
+ amdgpu_debugfs_compute_sched_mask_get,
+ amdgpu_debugfs_compute_sched_mask_set, "%llx\n");
+
+#endif
+
+void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ if (!(adev->gfx.num_compute_rings > 1))
+ return;
+ sprintf(name, "amdgpu_compute_sched_mask");
+ debugfs_create_file(name, 0600, root, adev,
+ &amdgpu_debugfs_compute_sched_mask_fops);
+#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 5644e10a86a9..8b5bd63b5773 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -424,6 +424,8 @@ struct amdgpu_gfx {
/* reset mask */
uint32_t grbm_soft_reset;
uint32_t srbm_soft_reset;
+ uint32_t gfx_supported_reset;
+ uint32_t compute_supported_reset;
/* gfx off */
bool gfx_off_state; /* true: enabled, false: disabled */
@@ -472,6 +474,8 @@ struct amdgpu_gfx {
struct mutex kfd_sch_mutex;
u64 kfd_sch_req_count[MAX_XCP];
bool kfd_sch_inactive[MAX_XCP];
+ unsigned long enforce_isolation_jiffies[MAX_XCP];
+ unsigned long enforce_isolation_time[MAX_XCP];
};
struct amdgpu_gfx_ras_reg_entry {
@@ -540,8 +544,6 @@ bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
int pipe, int queue);
-void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
- int *me, int *pipe, int *queue);
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
@@ -579,11 +581,11 @@ void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev);
void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
unsigned int cleaner_shader_size,
const void *cleaner_shader_ptr);
-int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev);
-void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev);
void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work);
void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
+void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev);
static inline const char *amdgpu_gfx_compute_mode_desc(int mode)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 17a19d49d30a..1c19a65e6553 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -1065,18 +1065,6 @@ uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo));
}
-/**
- * amdgpu_gmc_vram_cpu_pa - calculate vram buffer object's physical address
- * from CPU's view
- *
- * @adev: amdgpu_device pointer
- * @bo: amdgpu buffer object
- */
-uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
-{
- return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base;
-}
-
int amdgpu_gmc_vram_checking(struct amdgpu_device *adev)
{
struct amdgpu_bo *vram_bo = NULL;
@@ -1130,6 +1118,79 @@ release_buffer:
return ret;
}
+static const char *nps_desc[] = {
+ [AMDGPU_NPS1_PARTITION_MODE] = "NPS1",
+ [AMDGPU_NPS2_PARTITION_MODE] = "NPS2",
+ [AMDGPU_NPS3_PARTITION_MODE] = "NPS3",
+ [AMDGPU_NPS4_PARTITION_MODE] = "NPS4",
+ [AMDGPU_NPS6_PARTITION_MODE] = "NPS6",
+ [AMDGPU_NPS8_PARTITION_MODE] = "NPS8",
+};
+
+static ssize_t available_memory_partition_show(struct device *dev,
+ struct device_attribute *addr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int size = 0, mode;
+ char *sep = "";
+
+ for_each_inst(mode, adev->gmc.supported_nps_modes) {
+ size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]);
+ sep = ", ";
+ }
+ size += sysfs_emit_at(buf, size, "\n");
+
+ return size;
+}
+
+static ssize_t current_memory_partition_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ enum amdgpu_memory_partition mode;
+ struct amdgpu_hive_info *hive;
+ int i;
+
+ mode = UNKNOWN_MEMORY_PARTITION_MODE;
+ for_each_inst(i, adev->gmc.supported_nps_modes) {
+ if (!strncasecmp(nps_desc[i], buf, strlen(nps_desc[i]))) {
+ mode = i;
+ break;
+ }
+ }
+
+ if (mode == UNKNOWN_MEMORY_PARTITION_MODE)
+ return -EINVAL;
+
+ if (mode == adev->gmc.gmc_funcs->query_mem_partition_mode(adev)) {
+ dev_info(
+ adev->dev,
+ "requested NPS mode is same as current NPS mode, skipping\n");
+ return count;
+ }
+
+ /* If device is part of hive, all devices in the hive should request the
+ * same mode. Hence store the requested mode in hive.
+ */
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ atomic_set(&hive->requested_nps_mode, mode);
+ amdgpu_put_xgmi_hive(hive);
+ } else {
+ adev->gmc.requested_nps_mode = mode;
+ }
+
+ dev_info(
+ adev->dev,
+ "NPS mode change requested, please remove and reload the driver\n");
+
+ return count;
+}
+
static ssize_t current_memory_partition_show(
struct device *dev, struct device_attribute *addr, char *buf)
{
@@ -1138,53 +1199,65 @@ static ssize_t current_memory_partition_show(
enum amdgpu_memory_partition mode;
mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
- switch (mode) {
- case AMDGPU_NPS1_PARTITION_MODE:
- return sysfs_emit(buf, "NPS1\n");
- case AMDGPU_NPS2_PARTITION_MODE:
- return sysfs_emit(buf, "NPS2\n");
- case AMDGPU_NPS3_PARTITION_MODE:
- return sysfs_emit(buf, "NPS3\n");
- case AMDGPU_NPS4_PARTITION_MODE:
- return sysfs_emit(buf, "NPS4\n");
- case AMDGPU_NPS6_PARTITION_MODE:
- return sysfs_emit(buf, "NPS6\n");
- case AMDGPU_NPS8_PARTITION_MODE:
- return sysfs_emit(buf, "NPS8\n");
- default:
+ if ((mode >= ARRAY_SIZE(nps_desc)) ||
+ (BIT(mode) & AMDGPU_ALL_NPS_MASK) != BIT(mode))
return sysfs_emit(buf, "UNKNOWN\n");
- }
+
+ return sysfs_emit(buf, "%s\n", nps_desc[mode]);
}
-static DEVICE_ATTR_RO(current_memory_partition);
+static DEVICE_ATTR_RW(current_memory_partition);
+static DEVICE_ATTR_RO(available_memory_partition);
int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev)
{
+ bool nps_switch_support;
+ int r = 0;
+
if (!adev->gmc.gmc_funcs->query_mem_partition_mode)
return 0;
+ nps_switch_support = (hweight32(adev->gmc.supported_nps_modes &
+ AMDGPU_ALL_NPS_MASK) > 1);
+ if (!nps_switch_support)
+ dev_attr_current_memory_partition.attr.mode &=
+ ~(S_IWUSR | S_IWGRP | S_IWOTH);
+ else
+ r = device_create_file(adev->dev,
+ &dev_attr_available_memory_partition);
+
+ if (r)
+ return r;
+
return device_create_file(adev->dev,
&dev_attr_current_memory_partition);
}
void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev)
{
+ if (!adev->gmc.gmc_funcs->query_mem_partition_mode)
+ return;
+
device_remove_file(adev->dev, &dev_attr_current_memory_partition);
+ device_remove_file(adev->dev, &dev_attr_available_memory_partition);
}
int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
struct amdgpu_mem_partition_info *mem_ranges,
- int exp_ranges)
+ uint8_t *exp_ranges)
{
struct amdgpu_gmc_memrange *ranges;
int range_cnt, ret, i, j;
uint32_t nps_type;
+ bool refresh;
- if (!mem_ranges)
+ if (!mem_ranges || !exp_ranges)
return -EINVAL;
+ refresh = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) &&
+ (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS);
ret = amdgpu_discovery_get_nps_info(adev, &nps_type, &ranges,
- &range_cnt);
+ &range_cnt, refresh);
if (ret)
return ret;
@@ -1192,16 +1265,16 @@ int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
/* TODO: For now, expect ranges and partition count to be the same.
* Adjust if there are holes expected in any NPS domain.
*/
- if (range_cnt != exp_ranges) {
+ if (*exp_ranges && (range_cnt != *exp_ranges)) {
dev_warn(
adev->dev,
"NPS config mismatch - expected ranges: %d discovery - nps mode: %d, nps ranges: %d",
- exp_ranges, nps_type, range_cnt);
+ *exp_ranges, nps_type, range_cnt);
ret = -EINVAL;
goto err;
}
- for (i = 0; i < exp_ranges; ++i) {
+ for (i = 0; i < range_cnt; ++i) {
if (ranges[i].base_address >= ranges[i].limit_address) {
dev_warn(
adev->dev,
@@ -1242,8 +1315,81 @@ int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
ranges[i].limit_address - ranges[i].base_address + 1;
}
+ if (!*exp_ranges)
+ *exp_ranges = range_cnt;
err:
kfree(ranges);
return ret;
}
+
+int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev,
+ int nps_mode)
+{
+ /* Not supported on VF devices and APUs */
+ if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
+ return -EOPNOTSUPP;
+
+ if (!adev->psp.funcs) {
+ dev_err(adev->dev,
+ "PSP interface not available for nps mode change request");
+ return -EINVAL;
+ }
+
+ return psp_memory_partition(&adev->psp, nps_mode);
+}
+
+static inline bool amdgpu_gmc_need_nps_switch_req(struct amdgpu_device *adev,
+ int req_nps_mode,
+ int cur_nps_mode)
+{
+ return (((BIT(req_nps_mode) & adev->gmc.supported_nps_modes) ==
+ BIT(req_nps_mode)) &&
+ req_nps_mode != cur_nps_mode);
+}
+
+void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev)
+{
+ int req_nps_mode, cur_nps_mode, r;
+ struct amdgpu_hive_info *hive;
+
+ if (amdgpu_sriov_vf(adev) || !adev->gmc.supported_nps_modes ||
+ !adev->gmc.gmc_funcs->request_mem_partition_mode)
+ return;
+
+ cur_nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ req_nps_mode = atomic_read(&hive->requested_nps_mode);
+ if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode,
+ cur_nps_mode)) {
+ amdgpu_put_xgmi_hive(hive);
+ return;
+ }
+ r = amdgpu_xgmi_request_nps_change(adev, hive, req_nps_mode);
+ amdgpu_put_xgmi_hive(hive);
+ goto out;
+ }
+
+ req_nps_mode = adev->gmc.requested_nps_mode;
+ if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode, cur_nps_mode))
+ return;
+
+ /* even if this fails, we should let driver unload w/o blocking */
+ r = adev->gmc.gmc_funcs->request_mem_partition_mode(adev, req_nps_mode);
+out:
+ if (r)
+ dev_err(adev->dev, "NPS mode change request failed\n");
+ else
+ dev_info(
+ adev->dev,
+ "NPS mode change request done, reload driver to complete the change\n");
+}
+
+bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev)
+{
+ if (adev->gmc.gmc_funcs->need_reset_on_init)
+ return adev->gmc.gmc_funcs->need_reset_on_init(adev);
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 4d951a1baefa..459a30fe239f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -73,6 +73,13 @@ enum amdgpu_memory_partition {
AMDGPU_NPS8_PARTITION_MODE = 8,
};
+#define AMDGPU_ALL_NPS_MASK \
+ (BIT(AMDGPU_NPS1_PARTITION_MODE) | BIT(AMDGPU_NPS2_PARTITION_MODE) | \
+ BIT(AMDGPU_NPS3_PARTITION_MODE) | BIT(AMDGPU_NPS4_PARTITION_MODE) | \
+ BIT(AMDGPU_NPS6_PARTITION_MODE) | BIT(AMDGPU_NPS8_PARTITION_MODE))
+
+#define AMDGPU_GMC_INIT_RESET_NPS BIT(0)
+
/*
* GMC page fault information
*/
@@ -161,6 +168,10 @@ struct amdgpu_gmc_funcs {
enum amdgpu_memory_partition (*query_mem_partition_mode)(
struct amdgpu_device *adev);
+ /* Request NPS mode */
+ int (*request_mem_partition_mode)(struct amdgpu_device *adev,
+ int nps_mode);
+ bool (*need_reset_on_init)(struct amdgpu_device *adev);
};
struct amdgpu_xgmi_ras {
@@ -182,7 +193,6 @@ struct amdgpu_xgmi {
bool supported;
struct ras_common_if *ras_if;
bool connected_to_cpu;
- bool pending_reset;
struct amdgpu_xgmi_ras *ras;
};
@@ -305,6 +315,9 @@ struct amdgpu_gmc {
struct amdgpu_mem_partition_info *mem_partitions;
uint8_t num_mem_partitions;
const struct amdgpu_gmc_funcs *gmc_funcs;
+ enum amdgpu_memory_partition requested_nps_mode;
+ uint32_t supported_nps_modes;
+ uint32_t reset_flags;
struct amdgpu_xgmi xgmi;
struct amdgpu_irq_src ecc_irq;
@@ -447,13 +460,17 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev);
void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev);
uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr);
uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
-uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
int amdgpu_gmc_vram_checking(struct amdgpu_device *adev);
int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev);
void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev);
int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
struct amdgpu_mem_partition_info *mem_ranges,
- int exp_ranges);
+ uint8_t *exp_ranges);
+
+int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev,
+ int nps_mode);
+void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev);
+bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 00d6211e0fbf..f0765ccde668 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -225,15 +225,6 @@ void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c)
kfree(i2c);
}
-/* Add the default buses */
-void amdgpu_i2c_init(struct amdgpu_device *adev)
-{
- if (amdgpu_hw_i2c)
- DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
-
- amdgpu_atombios_i2c_init(adev);
-}
-
/* remove all the buses */
void amdgpu_i2c_fini(struct amdgpu_device *adev)
{
@@ -247,22 +238,6 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev)
}
}
-/* Add additional buses */
-void amdgpu_i2c_add(struct amdgpu_device *adev,
- const struct amdgpu_i2c_bus_rec *rec,
- const char *name)
-{
- struct drm_device *dev = adev_to_drm(adev);
- int i;
-
- for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
- if (!adev->i2c_bus[i]) {
- adev->i2c_bus[i] = amdgpu_i2c_create(dev, rec, name);
- return;
- }
- }
-}
-
/* looks up bus based on id */
struct amdgpu_i2c_chan *
amdgpu_i2c_lookup(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
index 63c2ff7499e1..21e3d1dad0a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
@@ -28,11 +28,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
const struct amdgpu_i2c_bus_rec *rec,
const char *name);
void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c);
-void amdgpu_i2c_init(struct amdgpu_device *adev);
void amdgpu_i2c_fini(struct amdgpu_device *adev);
-void amdgpu_i2c_add(struct amdgpu_device *adev,
- const struct amdgpu_i2c_bus_rec *rec,
- const char *name);
struct amdgpu_i2c_chan *
amdgpu_i2c_lookup(struct amdgpu_device *adev,
const struct amdgpu_i2c_bus_rec *i2c_bus);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 8b512dc28df8..2ea98ec60220 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -89,16 +89,14 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
/**
* amdgpu_ib_free - free an IB (Indirect Buffer)
*
- * @adev: amdgpu_device pointer
* @ib: IB object to free
* @f: the fence SA bo need wait on for the ib alloation
*
* Free an IB (all asics).
*/
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct dma_fence *f)
+void amdgpu_ib_free(struct amdgpu_ib *ib, struct dma_fence *f)
{
- amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
+ amdgpu_sa_bo_free(&ib->sa_bo, f);
}
/**
@@ -193,8 +191,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
need_ctx_switch = ring->current_ctx != fence_ctx;
if (ring->funcs->emit_pipeline_sync && job &&
((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) ||
- (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
- amdgpu_vm_need_pipeline_sync(ring, job))) {
+ need_ctx_switch || amdgpu_vm_need_pipeline_sync(ring, job))) {
+
need_pipe_sync = true;
if (tmp)
@@ -299,7 +297,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_patch_cond_exec(ring, cond_exec);
ring->current_ctx = fence_ctx;
- if (vm && ring->funcs->emit_switch_buffer)
+ if (job && ring->funcs->emit_switch_buffer)
amdgpu_ring_emit_switch_buffer(ring);
if (ring->funcs->emit_wave_limit &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 92d27d32de41..8e712a11aba5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -342,15 +342,13 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
* @ring: ring we want to submit job to
* @job: job who wants to use the VMID
* @id: resulting VMID
- * @fence: fence to wait for if no id could be grabbed
*
* Try to reuse a VMID for this submission.
*/
static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct amdgpu_ring *ring,
struct amdgpu_job *job,
- struct amdgpu_vmid **id,
- struct dma_fence **fence)
+ struct amdgpu_vmid **id)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->vm_hub;
@@ -429,7 +427,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r || !id)
goto error;
} else {
- r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence);
+ r = amdgpu_vmid_grab_used(vm, ring, job, &id);
if (r)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index f3b0aaf3ebc6..901f8b12c672 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -298,3 +298,9 @@ uint64_t amdgpu_ih_decode_iv_ts_helper(struct amdgpu_ih_ring *ih, u32 rptr,
dw2 = le32_to_cpu(ih->ring[ring_index + 2]);
return dw1 | ((u64)(dw2 & 0xffff) << 32);
}
+
+const char *amdgpu_ih_ring_name(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
+{
+ return ih == &adev->irq.ih ? "ih" : ih == &adev->irq.ih_soft ? "sw ih" :
+ ih == &adev->irq.ih1 ? "ih1" : ih == &adev->irq.ih2 ? "ih2" : "unknown";
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 508f02eb0cf8..7d4395a5d8ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -110,4 +110,5 @@ void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
uint64_t amdgpu_ih_decode_iv_ts_helper(struct amdgpu_ih_ring *ih, u32 rptr,
signed int offset);
+const char *amdgpu_ih_ring_name(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 4766e99dd98f..732744488b03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -33,33 +33,17 @@
#include "isp_v4_1_0.h"
#include "isp_v4_1_1.h"
-static int isp_sw_init(void *handle)
-{
- return 0;
-}
-
-static int isp_sw_fini(void *handle)
-{
- return 0;
-}
-
/**
* isp_hw_init - start and test isp block
*
- * @handle: handle for amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int isp_hw_init(void *handle)
+static int isp_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_isp *isp = &adev->isp;
- const struct amdgpu_ip_block *ip_block =
- amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ISP);
-
- if (!ip_block)
- return -EINVAL;
-
if (isp->funcs->hw_init != NULL)
return isp->funcs->hw_init(isp);
@@ -69,13 +53,12 @@ static int isp_hw_init(void *handle)
/**
* isp_hw_fini - stop the hardware block
*
- * @handle: handle for amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int isp_hw_fini(void *handle)
+static int isp_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_isp *isp = &adev->isp;
+ struct amdgpu_isp *isp = &ip_block->adev->isp;
if (isp->funcs->hw_fini != NULL)
return isp->funcs->hw_fini(isp);
@@ -83,16 +66,6 @@ static int isp_hw_fini(void *handle)
return -ENODEV;
}
-static int isp_suspend(void *handle)
-{
- return 0;
-}
-
-static int isp_resume(void *handle)
-{
- return 0;
-}
-
static int isp_load_fw_by_psp(struct amdgpu_device *adev)
{
const struct common_firmware_header *hdr;
@@ -104,7 +77,8 @@ static int isp_load_fw_by_psp(struct amdgpu_device *adev)
sizeof(ucode_prefix));
/* read isp fw */
- r = amdgpu_ucode_request(adev, &adev->isp.fw, "amdgpu/%s.bin", ucode_prefix);
+ r = amdgpu_ucode_request(adev, &adev->isp.fw, AMDGPU_UCODE_OPTIONAL,
+ "amdgpu/%s.bin", ucode_prefix);
if (r) {
amdgpu_ucode_release(&adev->isp.fw);
return r;
@@ -122,9 +96,10 @@ static int isp_load_fw_by_psp(struct amdgpu_device *adev)
return r;
}
-static int isp_early_init(void *handle)
+static int isp_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_isp *isp = &adev->isp;
switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) {
@@ -154,23 +129,13 @@ static bool isp_is_idle(void *handle)
return true;
}
-static int isp_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int isp_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int isp_set_clockgating_state(void *handle,
+static int isp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int isp_set_powergating_state(void *handle,
+static int isp_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -179,16 +144,9 @@ static int isp_set_powergating_state(void *handle,
static const struct amd_ip_funcs isp_ip_funcs = {
.name = "isp_ip",
.early_init = isp_early_init,
- .late_init = NULL,
- .sw_init = isp_sw_init,
- .sw_fini = isp_sw_fini,
.hw_init = isp_hw_init,
.hw_fini = isp_hw_fini,
- .suspend = isp_suspend,
- .resume = isp_resume,
.is_idle = isp_is_idle,
- .wait_for_idle = isp_wait_for_idle,
- .soft_reset = isp_soft_reset,
.set_clockgating_state = isp_set_clockgating_state,
.set_powergating_state = isp_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 16f2605ac50b..100f04475943 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -42,7 +42,7 @@ static void amdgpu_job_do_core_dump(struct amdgpu_device *adev,
for (i = 0; i < adev->num_ip_blocks; i++)
if (adev->ip_blocks[i].version->funcs->dump_ip_state)
adev->ip_blocks[i].version->funcs
- ->dump_ip_state((void *)adev);
+ ->dump_ip_state((void *)&adev->ip_blocks[i]);
dev_info(adev->dev, "Dumping IP State Completed\n");
amdgpu_coredump(adev, true, false, job);
@@ -102,8 +102,6 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
return DRM_GPU_SCHED_STAT_ENODEV;
}
- adev->job_hang = true;
-
/*
* Do the coredump immediately after a job timeout to get a very
* close dump/snapshot/representation of GPU's current error status
@@ -137,6 +135,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
/* attempt a per ring reset */
if (amdgpu_gpu_recovery &&
ring->funcs->reset) {
+ dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name);
/* stop the scheduler, but don't mess with the
* bad job yet because if ring reset fails
* we'll fall back to full GPU reset.
@@ -149,9 +148,10 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
atomic_inc(&ring->adev->gpu_reset_counter);
amdgpu_fence_driver_force_completion(ring);
if (amdgpu_ring_sched_ready(ring))
- drm_sched_start(&ring->sched);
+ drm_sched_start(&ring->sched, 0);
goto exit;
}
+ dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name);
}
if (amdgpu_device_should_recover_gpu(ring->adev)) {
@@ -179,7 +179,6 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
}
exit:
- adev->job_hang = false;
drm_dev_exit(idx);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -195,11 +194,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!*job)
return -ENOMEM;
- /*
- * Initialize the scheduler to at least some ring so that we always
- * have a pointer to adev.
- */
- (*job)->base.sched = &adev->rings[0]->sched;
(*job)->vm = vm;
amdgpu_sync_create(&(*job)->explicit_sync);
@@ -253,7 +247,6 @@ void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
struct dma_fence *f;
unsigned i;
@@ -266,7 +259,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
f = NULL;
for (i = 0; i < job->num_ibs; ++i)
- amdgpu_ib_free(ring->adev, &job->ibs[i], f);
+ amdgpu_ib_free(&job->ibs[i], f);
}
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
@@ -356,15 +349,22 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
if (r)
goto error;
- if (!fence && job->gang_submit)
+ if (job->gang_submit)
fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
- while (!fence && job->vm && !job->vmid) {
+ if (!fence && job->vm && !job->vmid) {
r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
if (r) {
dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
goto error;
}
+ /*
+ * The VM structure might be released after the VMID is
+ * assigned, we had multiple problems with people trying to use
+ * the VM pointer so better set it to NULL.
+ */
+ if (!fence)
+ job->vm = NULL;
}
return fence;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 6df99cb00d9a..b6d2eb049f54 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -47,7 +47,7 @@ int amdgpu_jpeg_sw_init(struct amdgpu_device *adev)
adev->jpeg.indirect_sram = true;
for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
- if (adev->jpeg.harvest_config & (1 << i))
+ if (adev->jpeg.harvest_config & (1U << i))
continue;
if (adev->jpeg.indirect_sram) {
@@ -73,7 +73,7 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
int i, j;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- if (adev->jpeg.harvest_config & (1 << i))
+ if (adev->jpeg.harvest_config & (1U << i))
continue;
amdgpu_bo_free_kernel(
@@ -110,7 +110,7 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work)
unsigned int i, j;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- if (adev->jpeg.harvest_config & (1 << i))
+ if (adev->jpeg.harvest_config & (1U << i))
continue;
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j)
@@ -342,3 +342,113 @@ int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
return psp_execute_ip_fw_load(&adev->psp, &ucode);
}
+
+/*
+ * debugfs for to enable/disable jpeg job submission to specific core.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i, j;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+
+ mask = (1ULL << (adev->jpeg.num_jpeg_inst * adev->jpeg.num_jpeg_rings)) - 1;
+ if ((val & mask) == 0)
+ return -EINVAL;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ if (val & (1 << ((i * adev->jpeg.num_jpeg_rings) + j)))
+ ring->sched.ready = true;
+ else
+ ring->sched.ready = false;
+ }
+ }
+ /* publish sched.ready flag update effective immediately across smp */
+ smp_rmb();
+ return 0;
+}
+
+static int amdgpu_debugfs_jpeg_sched_mask_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i, j;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ if (ring->sched.ready)
+ mask |= 1ULL << ((i * adev->jpeg.num_jpeg_rings) + j);
+ }
+ }
+ *val = mask;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_jpeg_sched_mask_fops,
+ amdgpu_debugfs_jpeg_sched_mask_get,
+ amdgpu_debugfs_jpeg_sched_mask_set, "%llx\n");
+
+#endif
+
+void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ if (!(adev->jpeg.num_jpeg_inst > 1) && !(adev->jpeg.num_jpeg_rings > 1))
+ return;
+ sprintf(name, "amdgpu_jpeg_sched_mask");
+ debugfs_create_file(name, 0600, root, adev,
+ &amdgpu_debugfs_jpeg_sched_mask_fops);
+#endif
+}
+
+static ssize_t amdgpu_get_jpeg_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->jpeg.supported_reset);
+}
+
+static DEVICE_ATTR(jpeg_reset_mask, 0444,
+ amdgpu_get_jpeg_reset_mask, NULL);
+
+int amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->jpeg.num_jpeg_inst) {
+ r = device_create_file(adev->dev, &dev_attr_jpeg_reset_mask);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev)
+{
+ if (adev->dev->kobj.sd) {
+ if (adev->jpeg.num_jpeg_inst)
+ device_remove_file(adev->dev, &dev_attr_jpeg_reset_mask);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index f9cdd873ac9b..d9cb343a8708 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -27,7 +27,8 @@
#include "amdgpu_ras.h"
#define AMDGPU_MAX_JPEG_INSTANCES 4
-#define AMDGPU_MAX_JPEG_RINGS 8
+#define AMDGPU_MAX_JPEG_RINGS 10
+#define AMDGPU_MAX_JPEG_RINGS_4_0_3 8
#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
@@ -128,6 +129,7 @@ struct amdgpu_jpeg {
uint16_t inst_mask;
uint8_t num_inst_per_aid;
bool indirect_sram;
+ uint32_t supported_reset;
};
int amdgpu_jpeg_sw_init(struct amdgpu_device *adev);
@@ -149,5 +151,8 @@ int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev,
int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
enum AMDGPU_UCODE_ID ucode_id);
+void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev);
+int amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device *adev);
+void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev);
#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 016a6f6c4267..98528ee94c15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -846,7 +846,7 @@ out:
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device *dev_info;
uint64_t vm_size;
- uint32_t pcie_gen_mask;
+ uint32_t pcie_gen_mask, pcie_width_mask;
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
if (!dev_info)
@@ -934,15 +934,18 @@ out:
dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
/* Combine the chip gen mask with the platform (CPU/mobo) mask. */
- pcie_gen_mask = adev->pm.pcie_gen_mask & (adev->pm.pcie_gen_mask >> 16);
+ pcie_gen_mask = adev->pm.pcie_gen_mask &
+ (adev->pm.pcie_gen_mask >> CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT);
+ pcie_width_mask = adev->pm.pcie_mlw_mask &
+ (adev->pm.pcie_mlw_mask >> CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT);
dev_info->pcie_gen = fls(pcie_gen_mask);
dev_info->pcie_num_lanes =
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
- adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
dev_info->tcp_cache_size = adev->gfx.config.gc_tcp_l1_size;
dev_info->num_sqc_per_wgp = adev->gfx.config.gc_num_sqc_per_wgp;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
index 18ee60378727..3ca03b5e0f91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -348,6 +348,24 @@ static bool amdgpu_mca_bank_should_update(struct amdgpu_device *adev, enum amdgp
return ret;
}
+static bool amdgpu_mca_bank_should_dump(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
+ struct mca_bank_entry *entry)
+{
+ bool ret;
+
+ switch (type) {
+ case AMDGPU_MCA_ERROR_TYPE_CE:
+ ret = amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]);
+ break;
+ case AMDGPU_MCA_ERROR_TYPE_UE:
+ default:
+ ret = true;
+ break;
+ }
+
+ return ret;
+}
+
static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set,
struct ras_query_context *qctx)
{
@@ -373,7 +391,8 @@ static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mc
amdgpu_mca_bank_set_add_entry(mca_set, &entry);
- amdgpu_mca_smu_mca_bank_dump(adev, i, &entry, qctx);
+ if (amdgpu_mca_bank_should_dump(adev, type, &entry))
+ amdgpu_mca_smu_mca_bank_dump(adev, i, &entry, qctx);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 10b61ff63802..32b27a1658e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -104,7 +104,7 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
return 0;
r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_VRAM,
&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
&adev->mes.event_log_cpu_addr);
@@ -192,17 +192,6 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
}
- r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
- if (r) {
- dev_err(adev->dev,
- "(%d) read_val_offs alloc failed\n", r);
- goto error;
- }
- adev->mes.read_val_gpu_addr =
- adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
- adev->mes.read_val_ptr =
- (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
-
r = amdgpu_mes_doorbell_init(adev);
if (r)
goto error;
@@ -223,8 +212,6 @@ error:
amdgpu_device_wb_free(adev,
adev->mes.query_status_fence_offs[i]);
}
- if (adev->mes.read_val_ptr)
- amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
idr_destroy(&adev->mes.pasid_idr);
idr_destroy(&adev->mes.gang_id_idr);
@@ -249,8 +236,6 @@ void amdgpu_mes_fini(struct amdgpu_device *adev)
amdgpu_device_wb_free(adev,
adev->mes.query_status_fence_offs[i]);
}
- if (adev->mes.read_val_ptr)
- amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
amdgpu_mes_doorbell_free(adev);
@@ -905,7 +890,7 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
queue_input.me_id = ring->me;
queue_input.pipe_id = ring->pipe;
queue_input.queue_id = ring->queue;
- queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+ queue_input.mqd_addr = ring->mqd_obj ? amdgpu_bo_gpu_offset(ring->mqd_obj) : 0;
queue_input.wptr_addr = ring->wptr_gpu_addr;
queue_input.vmid = vmid;
queue_input.use_mmio = use_mmio;
@@ -921,10 +906,19 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
{
struct mes_misc_op_input op_input;
int r, val = 0;
+ uint32_t addr_offset = 0;
+ uint64_t read_val_gpu_addr;
+ uint32_t *read_val_ptr;
+ if (amdgpu_device_wb_get(adev, &addr_offset)) {
+ DRM_ERROR("critical bug! too many mes readers\n");
+ goto error;
+ }
+ read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
+ read_val_ptr = (uint32_t *)&adev->wb.wb[addr_offset];
op_input.op = MES_MISC_OP_READ_REG;
op_input.read_reg.reg_offset = reg;
- op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
+ op_input.read_reg.buffer_addr = read_val_gpu_addr;
if (!adev->mes.funcs->misc_op) {
DRM_ERROR("mes rreg is not supported!\n");
@@ -935,9 +929,11 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
if (r)
DRM_ERROR("failed to read reg (0x%x)\n", reg);
else
- val = *(adev->mes.read_val_ptr);
+ val = *(read_val_ptr);
error:
+ if (addr_offset)
+ amdgpu_device_wb_free(adev, addr_offset);
return val;
}
@@ -1203,8 +1199,10 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
- if (r)
+ if (r) {
+ amdgpu_mes_unlock(&adev->mes);
goto clean_up_memory;
+ }
amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
@@ -1237,7 +1235,6 @@ clean_up_ring:
amdgpu_ring_fini(ring);
clean_up_memory:
kfree(ring);
- amdgpu_mes_unlock(&adev->mes);
return r;
}
@@ -1593,6 +1590,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
char ucode_prefix[30];
char fw_name[50];
bool need_retry = false;
+ u32 *ucode_ptr;
int r;
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
@@ -1612,10 +1610,12 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
}
- r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], "%s", fw_name);
+ r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name);
if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mes.bin", ucode_prefix);
}
@@ -1630,6 +1630,10 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
adev->mes.data_start_addr[pipe] =
le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
+ ucode_ptr = (u32 *)(adev->mes.fw[pipe]->data +
+ sizeof(union amdgpu_firmware_header));
+ adev->mes.fw_version[pipe] =
+ le32_to_cpu(ucode_ptr[24]) & AMDGPU_MES_VERSION_MASK;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
int ucode, ucode_data;
@@ -1676,6 +1680,29 @@ bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
return is_supported;
}
+/* Fix me -- node_id is used to identify the correct MES instances in the future */
+int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable)
+{
+ struct mes_misc_op_input op_input = {0};
+ int r;
+
+ op_input.op = MES_MISC_OP_CHANGE_CONFIG;
+ op_input.change_config.option.limit_single_process = enable ? 1 : 0;
+
+ if (!adev->mes.funcs->misc_op) {
+ dev_err(adev->dev, "mes change config is not supported!\n");
+ r = -EINVAL;
+ goto error;
+ }
+
+ r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ if (r)
+ dev_err(adev->dev, "failed to change_config.\n");
+
+error:
+ return r;
+}
+
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 96788c0f42f1..2df2444ee892 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -40,6 +40,7 @@
#define AMDGPU_MES_VERSION_MASK 0x00000fff
#define AMDGPU_MES_API_VERSION_MASK 0x00fff000
#define AMDGPU_MES_FEAT_VERSION_MASK 0xff000000
+#define AMDGPU_MES_MSCRATCH_SIZE 0x40000
enum amdgpu_mes_priority_level {
AMDGPU_MES_PRIORITY_LEVEL_LOW = 0,
@@ -75,6 +76,7 @@ struct amdgpu_mes {
uint32_t sched_version;
uint32_t kiq_version;
+ uint32_t fw_version[AMDGPU_MAX_MES_PIPES];
bool enable_legacy_queue_map;
uint32_t total_max_queue;
@@ -119,9 +121,6 @@ struct amdgpu_mes {
uint32_t query_status_fence_offs[AMDGPU_MAX_MES_PIPES];
uint64_t query_status_fence_gpu_addr[AMDGPU_MAX_MES_PIPES];
uint64_t *query_status_fence_ptr[AMDGPU_MAX_MES_PIPES];
- uint32_t read_val_offs;
- uint64_t read_val_gpu_addr;
- uint32_t *read_val_ptr;
uint32_t saved_flags;
@@ -310,6 +309,7 @@ enum mes_misc_opcode {
MES_MISC_OP_WRM_REG_WAIT,
MES_MISC_OP_WRM_REG_WR_WAIT,
MES_MISC_OP_SET_SHADER_DEBUGGER,
+ MES_MISC_OP_CHANGE_CONFIG,
};
struct mes_misc_op_input {
@@ -348,6 +348,21 @@ struct mes_misc_op_input {
uint32_t tcp_watch_cntl[4];
uint32_t trap_en;
} set_shader_debugger;
+
+ struct {
+ union {
+ struct {
+ uint32_t limit_single_process : 1;
+ uint32_t enable_hws_logging_buffer : 1;
+ uint32_t reserved : 30;
+ };
+ uint32_t all;
+ } option;
+ struct {
+ uint32_t tdr_level;
+ uint32_t tdr_delay;
+ } tdr_config;
+ } change_config;
};
};
@@ -518,4 +533,7 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
}
bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev);
+
+int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable);
+
#endif /* __AMDGPU_MES_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index f61d117b0caf..79c2f807b9fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -101,6 +101,7 @@ struct amdgpu_nbio_funcs {
int (*get_compute_partition_mode)(struct amdgpu_device *adev);
u32 (*get_memory_partition_mode)(struct amdgpu_device *adev,
u32 *supp_modes);
+ bool (*is_nps_switch_requested)(struct amdgpu_device *adev);
u64 (*get_pcie_replay_count)(struct amdgpu_device *adev);
void (*set_reg_remap)(struct amdgpu_device *adev);
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 44819cdba7fb..96f4b8904e9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -40,6 +40,8 @@
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_vram_mgr.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_dma_buf.h"
/**
* DOC: amdgpu_object
@@ -161,7 +163,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
* When GTT is just an alternative to VRAM make sure that we
* only use it as fallback and still try to fill up VRAM first.
*/
- if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
+ if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
+ !(adev->flags & AMD_IS_APU))
places[c].flags |= TTM_PL_FLAG_FALLBACK;
c++;
}
@@ -322,6 +325,9 @@ error_free:
*
* Allocates and pins a BO for kernel internal use.
*
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to create and access the kernel BO.
+ *
* Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
*
* Returns:
@@ -345,6 +351,76 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
return 0;
}
+EXPORT_SYMBOL(amdgpu_bo_create_kernel);
+
+/**
+ * amdgpu_bo_create_isp_user - create user BO for isp
+ *
+ * @adev: amdgpu device object
+ * @dma_buf: DMABUF handle for isp buffer
+ * @domain: where to place it
+ * @bo: used to initialize BOs in structures
+ * @gpu_addr: GPU addr of the pinned BO
+ *
+ * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does
+ * GART alloc to generate gpu_addr for BO to make it accessible through the
+ * GART aperture for ISP HW.
+ *
+ * This function is exported to allow the V4L2 isp device external to drm device
+ * to create and access the isp user BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_isp_user(struct amdgpu_device *adev,
+ struct dma_buf *dma_buf, u32 domain, struct amdgpu_bo **bo,
+ u64 *gpu_addr)
+
+{
+ struct drm_gem_object *gem_obj;
+ int r;
+
+ gem_obj = amdgpu_gem_prime_import(&adev->ddev, dma_buf);
+ *bo = gem_to_amdgpu_bo(gem_obj);
+ if (!(*bo)) {
+ dev_err(adev->dev, "failed to get valid isp user bo\n");
+ return -EINVAL;
+ }
+
+ r = amdgpu_bo_reserve(*bo, false);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to reserve isp user bo\n", r);
+ return r;
+ }
+
+ r = amdgpu_bo_pin(*bo, domain);
+ if (r) {
+ dev_err(adev->dev, "(%d) isp user bo pin failed\n", r);
+ goto error_unreserve;
+ }
+
+ r = amdgpu_ttm_alloc_gart(&(*bo)->tbo);
+ if (r) {
+ dev_err(adev->dev, "%p bind failed\n", *bo);
+ goto error_unpin;
+ }
+
+ if (!WARN_ON(!gpu_addr))
+ *gpu_addr = amdgpu_bo_gpu_offset(*bo);
+
+ amdgpu_bo_unreserve(*bo);
+
+ return 0;
+
+error_unpin:
+ amdgpu_bo_unpin(*bo);
+error_unreserve:
+ amdgpu_bo_unreserve(*bo);
+ amdgpu_bo_unref(bo);
+
+ return r;
+}
+EXPORT_SYMBOL(amdgpu_bo_create_isp_user);
/**
* amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
@@ -421,6 +497,9 @@ error:
* @cpu_addr: pointer to where the BO's CPU memory space address was stored
*
* unmaps and unpin a BO for kernel internal use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the kernel BO.
*/
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr)
@@ -445,6 +524,30 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
if (cpu_addr)
*cpu_addr = NULL;
}
+EXPORT_SYMBOL(amdgpu_bo_free_kernel);
+
+/**
+ * amdgpu_bo_free_isp_user - free BO for isp use
+ *
+ * @bo: amdgpu isp user BO to free
+ *
+ * unpin and unref BO for isp internal use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the isp user BO.
+ */
+void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo)
+{
+ if (bo == NULL)
+ return;
+
+ if (amdgpu_bo_reserve(bo, true) == 0) {
+ amdgpu_bo_unpin(bo);
+ amdgpu_bo_unreserve(bo);
+ }
+ amdgpu_bo_unref(&bo);
+}
+EXPORT_SYMBOL(amdgpu_bo_free_isp_user);
/* Validate bo size is bit bigger than the request domain */
static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
@@ -1148,7 +1251,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_resource *old_mem = bo->resource;
struct amdgpu_bo *abo;
@@ -1156,7 +1258,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
abo = ttm_to_amdgpu_bo(bo);
- amdgpu_vm_bo_invalidate(adev, abo, evict);
+ amdgpu_vm_bo_move(abo, new_mem, evict);
amdgpu_bo_kunmap(abo);
@@ -1169,58 +1271,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
old_mem ? old_mem->mem_type : -1);
}
-void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
- struct amdgpu_mem_stats *stats)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct ttm_resource *res = bo->tbo.resource;
- uint64_t size = amdgpu_bo_size(bo);
- struct drm_gem_object *obj;
- bool shared;
-
- /* Abort if the BO doesn't currently have a backing store */
- if (!res)
- return;
-
- obj = &bo->tbo.base;
- shared = drm_gem_object_is_shared_for_memory_stats(obj);
-
- switch (res->mem_type) {
- case TTM_PL_VRAM:
- stats->vram += size;
- if (amdgpu_res_cpu_visible(adev, res))
- stats->visible_vram += size;
- if (shared)
- stats->vram_shared += size;
- break;
- case TTM_PL_TT:
- stats->gtt += size;
- if (shared)
- stats->gtt_shared += size;
- break;
- case TTM_PL_SYSTEM:
- default:
- stats->cpu += size;
- if (shared)
- stats->cpu_shared += size;
- break;
- }
-
- if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) {
- stats->requested_vram += size;
- if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
- stats->requested_visible_vram += size;
-
- if (res->mem_type != TTM_PL_VRAM) {
- stats->evicted_vram += size;
- if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
- stats->evicted_visible_vram += size;
- }
- } else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) {
- stats->requested_gtt += size;
- }
-}
-
/**
* amdgpu_bo_release_notify - notification about a BO being released
* @bo: pointer to a buffer object
@@ -1436,6 +1486,45 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
}
/**
+ * amdgpu_bo_mem_stats_placement - bo placement for memory accounting
+ * @bo: the buffer object we should look at
+ *
+ * BO can have multiple preferred placements, to avoid double counting we want
+ * to file it under a single placement for memory stats.
+ * Luckily, if we take the highest set bit in preferred_domains the result is
+ * quite sensible.
+ *
+ * Returns:
+ * Which of the placements should the BO be accounted under.
+ */
+uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo)
+{
+ uint32_t domain = bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK;
+
+ if (!domain)
+ return TTM_PL_SYSTEM;
+
+ switch (rounddown_pow_of_two(domain)) {
+ case AMDGPU_GEM_DOMAIN_CPU:
+ return TTM_PL_SYSTEM;
+ case AMDGPU_GEM_DOMAIN_GTT:
+ return TTM_PL_TT;
+ case AMDGPU_GEM_DOMAIN_VRAM:
+ return TTM_PL_VRAM;
+ case AMDGPU_GEM_DOMAIN_GDS:
+ return AMDGPU_PL_GDS;
+ case AMDGPU_GEM_DOMAIN_GWS:
+ return AMDGPU_PL_GWS;
+ case AMDGPU_GEM_DOMAIN_OA:
+ return AMDGPU_PL_OA;
+ case AMDGPU_GEM_DOMAIN_DOORBELL:
+ return AMDGPU_PL_DOORBELL;
+ default:
+ return TTM_PL_SYSTEM;
+ }
+}
+
+/**
* amdgpu_bo_get_preferred_domain - get preferred domain
* @adev: amdgpu device object
* @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 717e47b46167..375448627f7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -139,33 +139,6 @@ struct amdgpu_bo_vm {
struct amdgpu_vm_bo_base entries[];
};
-struct amdgpu_mem_stats {
- /* current VRAM usage, includes visible VRAM */
- uint64_t vram;
- /* current shared VRAM usage, includes visible VRAM */
- uint64_t vram_shared;
- /* current visible VRAM usage */
- uint64_t visible_vram;
- /* current GTT usage */
- uint64_t gtt;
- /* current shared GTT usage */
- uint64_t gtt_shared;
- /* current system memory usage */
- uint64_t cpu;
- /* current shared system memory usage */
- uint64_t cpu_shared;
- /* sum of evicted buffers, includes visible VRAM */
- uint64_t evicted_vram;
- /* sum of evicted buffers due to CPU access */
- uint64_t evicted_visible_vram;
- /* how much userspace asked for, includes vis.VRAM */
- uint64_t requested_vram;
- /* how much userspace asked for */
- uint64_t requested_visible_vram;
- /* how much userspace asked for */
- uint64_t requested_gtt;
-};
-
static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
{
return container_of(tbo, struct amdgpu_bo, tbo);
@@ -287,6 +260,10 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
u64 *gpu_addr, void **cpu_addr);
+int amdgpu_bo_create_isp_user(struct amdgpu_device *adev,
+ struct dma_buf *dbuf, u32 domain,
+ struct amdgpu_bo **bo,
+ u64 *gpu_addr);
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
uint64_t offset, uint64_t size,
struct amdgpu_bo **bo_ptr, void **cpu_addr);
@@ -298,6 +275,7 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
struct amdgpu_bo_vm **ubo_ptr);
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr);
+void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
@@ -327,8 +305,7 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
-void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
- struct amdgpu_mem_stats *stats);
+uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo);
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
uint32_t domain);
@@ -363,8 +340,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct drm_suballoc **sa_bo,
unsigned int size);
-void amdgpu_sa_bo_free(struct amdgpu_device *adev,
- struct drm_suballoc **sa_bo,
+void amdgpu_sa_bo_free(struct drm_suballoc **sa_bo,
struct dma_fence *fence);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
index e8adfd0a570a..34b5e22b44e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
@@ -137,7 +137,8 @@ void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev)
if (ret)
return;
- device_remove_file(adev->dev, &dev_attr_mem_info_preempt_used);
+ if (adev->dev->kobj.sd)
+ device_remove_file(adev->dev, &dev_attr_mem_info_preempt_used);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 0b28b2cf1517..babe94ade247 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -159,9 +159,9 @@ static int psp_init_sriov_microcode(struct psp_context *psp)
return ret;
}
-static int psp_early_init(void *handle)
+static int psp_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
psp->autoload_supported = true;
@@ -208,6 +208,7 @@ static int psp_early_init(void *handle)
psp->boot_time_tmr = false;
fallthrough;
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = false;
@@ -359,6 +360,7 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
int i;
if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
return false;
@@ -421,9 +423,9 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
return ret;
}
-static int psp_sw_init(void *handle)
+static int psp_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
int ret;
struct psp_runtime_boot_cfg_entry boot_cfg_entry;
@@ -527,9 +529,9 @@ failed1:
return ret;
}
-static int psp_sw_fini(void *handle)
+static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
struct psp_gfx_cmd_resp *cmd = psp->cmd;
@@ -639,6 +641,8 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
return "AUTOLOAD_RLC";
case GFX_CMD_ID_BOOT_CFG:
return "BOOT_CFG";
+ case GFX_CMD_ID_CONFIG_SQ_PERFMON:
+ return "CONFIG_SQ_PERFMON";
default:
return "UNKNOWN CMD";
}
@@ -868,6 +872,7 @@ static bool psp_skip_tmr(struct psp_context *psp)
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
return true;
default:
@@ -1043,6 +1048,31 @@ static int psp_rl_load(struct amdgpu_device *adev)
return ret;
}
+int psp_memory_partition(struct psp_context *psp, int mode)
+{
+ struct psp_gfx_cmd_resp *cmd;
+ int ret;
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE;
+ cmd->cmd.cmd_memory_part.mode = mode;
+
+ dev_info(psp->adev->dev,
+ "Requesting %d memory partition change through PSP", mode);
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "PSP request failed to change to NPS%d mode\n", mode);
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
+}
+
int psp_spatial_partition(struct psp_context *psp, int mode)
{
struct psp_gfx_cmd_resp *cmd;
@@ -1807,6 +1837,9 @@ int psp_ras_initialize(struct psp_context *psp)
ras_cmd->ras_in_message.init_flags.xcc_mask =
adev->gfx.xcc_mask;
ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ ras_cmd->ras_in_message.init_flags.nps_mode =
+ adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
ret = psp_ta_load(psp, &psp->ras_context.context);
@@ -2234,7 +2267,8 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
return -EINVAL;
if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
- ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
+ ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC &&
+ ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2)
return -EINVAL;
ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
@@ -2264,6 +2298,19 @@ bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
}
}
+bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+
+ if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
+ return false;
+
+ if (psp->funcs && psp->funcs->is_reload_needed)
+ return psp->funcs->is_reload_needed(psp);
+
+ return false;
+}
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -2342,6 +2389,15 @@ static int psp_hw_start(struct psp_context *psp)
}
}
+ if ((is_psp_fw_valid(psp->spdm_drv)) &&
+ (psp->funcs->bootloader_load_spdm_drv != NULL)) {
+ ret = psp_bootloader_load_spdm_drv(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load spdm_drv failed!\n");
+ return ret;
+ }
+ }
+
if ((is_psp_fw_valid(psp->sos)) &&
(psp->funcs->bootloader_load_sos != NULL)) {
ret = psp_bootloader_load_sos(psp);
@@ -2958,16 +3014,13 @@ failed:
return ret;
}
-static int psp_hw_init(void *handle)
+static int psp_hw_init(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
mutex_lock(&adev->firmware.mutex);
- /*
- * This sequence is just used on hw_init only once, no need on
- * resume.
- */
+
ret = amdgpu_ucode_init_bo(adev);
if (ret)
goto failed;
@@ -2987,9 +3040,9 @@ failed:
return -EINVAL;
}
-static int psp_hw_fini(void *handle)
+static int psp_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
if (psp->ta_fw) {
@@ -3011,10 +3064,10 @@ static int psp_hw_fini(void *handle)
return 0;
}
-static int psp_suspend(void *handle)
+static int psp_suspend(struct amdgpu_ip_block *ip_block)
{
int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
if (adev->gmc.xgmi.num_physical_nodes > 1 &&
@@ -3074,10 +3127,10 @@ out:
return ret;
}
-static int psp_resume(void *handle)
+static int psp_resume(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
dev_info(adev->dev, "PSP is resuming...\n");
@@ -3092,6 +3145,10 @@ static int psp_resume(void *handle)
mutex_lock(&adev->firmware.mutex);
+ ret = amdgpu_ucode_init_bo(adev);
+ if (ret)
+ goto failed;
+
ret = psp_hw_start(psp);
if (ret)
goto failed;
@@ -3246,7 +3303,8 @@ int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
const struct psp_firmware_header_v1_0 *asd_hdr;
int err = 0;
- err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, "amdgpu/%s_asd.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_asd.bin", chip_name);
if (err)
goto out;
@@ -3268,7 +3326,8 @@ int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
const struct psp_firmware_header_v1_0 *toc_hdr;
int err = 0;
- err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, "amdgpu/%s_toc.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_toc.bin", chip_name);
if (err)
goto out;
@@ -3364,6 +3423,12 @@ static int parse_sos_bin_descriptor(struct psp_context *psp,
psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes);
psp->ipkeymgr_drv.start_addr = ucode_start_addr;
break;
+ case PSP_FW_TYPE_PSP_SPDM_DRV:
+ psp->spdm_drv.fw_version = le32_to_cpu(desc->fw_version);
+ psp->spdm_drv.feature_version = le32_to_cpu(desc->fw_version);
+ psp->spdm_drv.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->spdm_drv.start_addr = ucode_start_addr;
+ break;
default:
dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
break;
@@ -3431,7 +3496,8 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
uint8_t *ucode_array_start_addr;
int err = 0;
- err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sos.bin", chip_name);
if (err)
goto out;
@@ -3523,6 +3589,36 @@ out:
return err;
}
+static bool is_ta_fw_applicable(struct psp_context *psp,
+ const struct psp_fw_bin_desc *desc)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t fw_version;
+
+ switch (desc->fw_type) {
+ case TA_FW_TYPE_PSP_XGMI:
+ case TA_FW_TYPE_PSP_XGMI_AUX:
+ /* for now, AUX TA only exists on 13.0.6 ta bin,
+ * from v20.00.0x.14
+ */
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
+ IP_VERSION(13, 0, 6)) {
+ fw_version = le32_to_cpu(desc->fw_version);
+
+ if (adev->flags & AMD_IS_APU &&
+ (fw_version & 0xff) >= 0x14)
+ return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX;
+ else
+ return desc->fw_type == TA_FW_TYPE_PSP_XGMI;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
static int parse_ta_bin_descriptor(struct psp_context *psp,
const struct psp_fw_bin_desc *desc,
const struct ta_firmware_header_v2_0 *ta_hdr)
@@ -3532,6 +3628,9 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
if (!psp || !desc || !ta_hdr)
return -EINVAL;
+ if (!is_ta_fw_applicable(psp, desc))
+ return 0;
+
ucode_start_addr = (uint8_t *)ta_hdr +
le32_to_cpu(desc->offset_bytes) +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
@@ -3544,6 +3643,7 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
psp->asd_context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_XGMI:
+ case TA_FW_TYPE_PSP_XGMI_AUX:
psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr;
@@ -3673,7 +3773,8 @@ int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
struct amdgpu_device *adev = psp->adev;
int err;
- err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_ta.bin", chip_name);
if (err)
return err;
@@ -3708,7 +3809,8 @@ int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
return -EINVAL;
}
- err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, "amdgpu/%s_cap.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL,
+ "amdgpu/%s_cap.bin", chip_name);
if (err) {
if (err == -ENODEV) {
dev_warn(adev->dev, "cap microcode does not exist, skip\n");
@@ -3736,13 +3838,49 @@ out:
return err;
}
-static int psp_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
+int psp_config_sq_perfmon(struct psp_context *psp,
+ uint32_t xcp_id, bool core_override_enable,
+ bool reg_override_enable, bool perfmon_override_enable)
+{
+ int ret;
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (xcp_id > MAX_XCP) {
+ dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id);
+ return -EINVAL;
+ }
+
+ if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) {
+ dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n",
+ amdgpu_ip_version(psp->adev, MP0_HWIP, 0));
+ return -EINVAL;
+ }
+ struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON;
+ cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id);
+ cmd->cmd.config_sq_perfmon.core_override = core_override_enable;
+ cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable;
+ cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable;
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+ if (ret)
+ dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n",
+ xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable);
+
+ release_psp_cmd_buf(psp);
+ return ret;
+}
+
+static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
{
return 0;
}
-static int psp_set_powergating_state(void *handle,
+static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -3754,10 +3892,12 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct amdgpu_ip_block *ip_block;
uint32_t fw_ver;
int ret;
- if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
+ if (!ip_block || !ip_block->status.late_initialized) {
dev_info(adev->dev, "PSP block is not ready yet\n.");
return -EBUSY;
}
@@ -3786,8 +3926,10 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
struct amdgpu_bo *fw_buf_bo = NULL;
uint64_t fw_pri_mc_addr;
void *fw_pri_cpu_addr;
+ struct amdgpu_ip_block *ip_block;
- if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
+ if (!ip_block || !ip_block->status.late_initialized) {
dev_err(adev->dev, "PSP block is not ready yet.");
return -EBUSY;
}
@@ -3795,7 +3937,8 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
if (!drm_dev_enter(ddev, &idx))
return -ENODEV;
- ret = amdgpu_ucode_request(adev, &usbc_pd_fw, "amdgpu/%s", buf);
+ ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s", buf);
if (ret)
goto fail;
@@ -3999,7 +4142,7 @@ static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribu
}
static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
int idx)
{
struct device *dev = kobj_to_dev(kobj);
@@ -4019,17 +4162,12 @@ const struct attribute_group amdgpu_flash_attr_group = {
const struct amd_ip_funcs psp_ip_funcs = {
.name = "psp",
.early_init = psp_early_init,
- .late_init = NULL,
.sw_init = psp_sw_init,
.sw_fini = psp_sw_fini,
.hw_init = psp_hw_init,
.hw_fini = psp_hw_fini,
.suspend = psp_suspend,
.resume = psp_resume,
- .is_idle = NULL,
- .check_soft_reset = NULL,
- .wait_for_idle = NULL,
- .soft_reset = NULL,
.set_clockgating_state = psp_set_clockgating_state,
.set_powergating_state = psp_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index e8abbbcb4326..8d5acc415d38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -80,6 +80,7 @@ enum psp_bootloader_cmd {
PSP_BL__DRAM_LONG_TRAIN = 0x100000,
PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000,
+ PSP_BL__LOAD_SPDMDRV = 0x20000000,
};
enum psp_ring_type {
@@ -120,6 +121,7 @@ struct psp_funcs {
int (*bootloader_load_dbg_drv)(struct psp_context *psp);
int (*bootloader_load_ras_drv)(struct psp_context *psp);
int (*bootloader_load_ipkeymgr_drv)(struct psp_context *psp);
+ int (*bootloader_load_spdm_drv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp);
int (*ring_create)(struct psp_context *psp,
enum psp_ring_type ring_type);
@@ -139,6 +141,7 @@ struct psp_funcs {
int (*fatal_error_recovery_quirk)(struct psp_context *psp);
bool (*get_ras_capability)(struct psp_context *psp);
bool (*is_aux_sos_load_required)(struct psp_context *psp);
+ bool (*is_reload_needed)(struct psp_context *psp);
};
struct ta_funcs {
@@ -342,6 +345,7 @@ struct psp_context {
struct psp_bin_desc dbg_drv;
struct psp_bin_desc ras_drv;
struct psp_bin_desc ipkeymgr_drv;
+ struct psp_bin_desc spdm_drv;
/* tmr buffer */
struct amdgpu_bo *tmr_bo;
@@ -433,6 +437,9 @@ struct amdgpu_psp_funcs {
#define psp_bootloader_load_ipkeymgr_drv(psp) \
((psp)->funcs->bootloader_load_ipkeymgr_drv ? \
(psp)->funcs->bootloader_load_ipkeymgr_drv((psp)) : 0)
+#define psp_bootloader_load_spdm_drv(psp) \
+ ((psp)->funcs->bootloader_load_spdm_drv ? \
+ (psp)->funcs->bootloader_load_spdm_drv((psp)) : 0)
#define psp_bootloader_load_sos(psp) \
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
@@ -552,9 +559,15 @@ int psp_load_fw_list(struct psp_context *psp,
void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size);
int psp_spatial_partition(struct psp_context *psp, int mode);
+int psp_memory_partition(struct psp_context *psp, int mode);
int is_psp_fw_valid(struct psp_bin_desc bin);
int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev);
bool amdgpu_psp_get_ras_capability(struct psp_context *psp);
+
+int psp_config_sq_perfmon(struct psp_context *psp, uint32_t xcp_id,
+ bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable);
+bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 1a1395c5fff1..f0924aa3f4e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -36,6 +36,7 @@
#include "amdgpu_xgmi.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include "nbio_v4_3.h"
+#include "nbif_v6_3_1.h"
#include "nbio_v7_9.h"
#include "atom.h"
#include "amdgpu_reset.h"
@@ -192,7 +193,7 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
- err_data.err_addr_cnt);
+ err_data.err_addr_cnt, false);
amdgpu_ras_save_bad_pages(adev, NULL);
}
@@ -1214,6 +1215,42 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
}
}
+static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev,
+ struct ras_query_if *query_if,
+ struct ras_err_data *err_data,
+ struct ras_query_context *qctx)
+{
+ unsigned long new_ue, new_ce, new_de;
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head);
+ const char *blk_name = get_ras_block_str(&query_if->head);
+ u64 event_id = qctx->evid.event_id;
+
+ new_ce = err_data->ce_count - obj->err_data.ce_count;
+ new_ue = err_data->ue_count - obj->err_data.ue_count;
+ new_de = err_data->de_count - obj->err_data.de_count;
+
+ if (new_ce) {
+ RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors "
+ "detected in %s block\n",
+ new_ce,
+ blk_name);
+ }
+
+ if (new_ue) {
+ RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors "
+ "detected in %s block\n",
+ new_ue,
+ blk_name);
+ }
+
+ if (new_de) {
+ RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors "
+ "detected in %s block\n",
+ new_de,
+ blk_name);
+ }
+}
+
static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
{
struct ras_err_node *err_node;
@@ -1237,6 +1274,15 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s
}
}
+static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj,
+ struct ras_err_data *err_data)
+{
+ /* Host reports absolute counts */
+ obj->err_data.ue_count = err_data->ue_count;
+ obj->err_data.ce_count = err_data->ce_count;
+ obj->err_data.de_count = err_data->de_count;
+}
+
static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
{
struct ras_common_if head;
@@ -1253,7 +1299,7 @@ int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
struct ras_manager *obj;
/* in resume phase, no need to create aca fs node */
- if (adev->in_suspend || amdgpu_in_reset(adev))
+ if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
return 0;
obj = get_ras_manager(adev, blk);
@@ -1323,7 +1369,9 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
return -EINVAL;
- if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
+ if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
+ return amdgpu_virt_req_ras_err_count(adev, blk, err_data);
+ } else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
amdgpu_ras_get_ecc_info(adev, err_data);
} else {
@@ -1405,14 +1453,22 @@ static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev,
if (ret)
goto out_fini_err_data;
- amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
+ if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
+ amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
+ amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
+ } else {
+ /* Host provides absolute error counts. First generate the report
+ * using the previous VF internal count against new host count.
+ * Then Update VF internal count.
+ */
+ amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx);
+ amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data);
+ }
info->ue_count = obj->err_data.ue_count;
info->ce_count = obj->err_data.ce_count;
info->de_count = obj->err_data.de_count;
- amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
-
out_fini_err_data:
amdgpu_ras_error_data_fini(&err_data);
@@ -1960,6 +2016,7 @@ static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
ret = true;
break;
@@ -2101,6 +2158,16 @@ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
/* Fatal error events are handled on host side */
if (amdgpu_sriov_vf(adev))
return;
+ /**
+ * If the current interrupt is caused by a non-fatal RAS error, skip
+ * check for fatal error. For fatal errors, FED status of all devices
+ * in XGMI hive gets set when the first device gets fatal error
+ * interrupt. The error gets propagated to other devices as well, so
+ * make sure to ack the interrupt regardless of FED status.
+ */
+ if (!amdgpu_ras_get_fed_status(adev) &&
+ amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY))
+ return;
if (adev->nbio.ras &&
adev->nbio.ras->handle_ras_controller_intr_no_bifring)
@@ -2130,6 +2197,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
if (ret)
return;
+ amdgpu_ras_set_err_poison(adev, block_obj->ras_comm.block);
/* both query_poison_status and handle_poison_consumption are optional,
* but at least one of them should be implemented if we need poison
* consumption handler
@@ -2605,6 +2673,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
reset_context.src = AMDGPU_RESET_SRC_RAS;
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
/* Perform full reset in fatal error mode */
if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
@@ -2661,40 +2730,203 @@ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
return 0;
}
+static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps,
+ struct ras_err_data *err_data)
+{
+ struct ta_ras_query_address_input addr_in;
+ uint32_t socket = 0;
+ int ret = 0;
+
+ if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
+ socket = adev->smuio.funcs->get_socket_id(adev);
+
+ /* reinit err_data */
+ err_data->err_addr_cnt = 0;
+ err_data->err_addr_len = adev->umc.retire_unit;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = bps->address;
+ addr_in.ma.socket_id = socket;
+ addr_in.ma.ch_inst = bps->mem_channel;
+ /* tell RAS TA the node instance is not used */
+ addr_in.ma.node_inst = TA_RAS_INV_NODE;
+
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
+ ret = adev->umc.ras->convert_ras_err_addr(adev, err_data,
+ &addr_in, NULL, false);
+
+ return ret;
+}
+
+static int amdgpu_ras_mca2pa(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps,
+ struct ras_err_data *err_data)
+{
+ struct ta_ras_query_address_input addr_in;
+ uint32_t die_id, socket = 0;
+
+ if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
+ socket = adev->smuio.funcs->get_socket_id(adev);
+
+ /* although die id is gotten from PA in nps1 mode, the id is
+ * fitable for any nps mode
+ */
+ if (adev->umc.ras && adev->umc.ras->get_die_id_from_pa)
+ die_id = adev->umc.ras->get_die_id_from_pa(adev, bps->address,
+ bps->retired_page << AMDGPU_GPU_PAGE_SHIFT);
+ else
+ return -EINVAL;
+
+ /* reinit err_data */
+ err_data->err_addr_cnt = 0;
+ err_data->err_addr_len = adev->umc.retire_unit;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = bps->address;
+ addr_in.ma.ch_inst = bps->mem_channel;
+ addr_in.ma.umc_inst = bps->mcumc_id;
+ addr_in.ma.node_inst = die_id;
+ addr_in.ma.socket_id = socket;
+
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
+ return adev->umc.ras->convert_ras_err_addr(adev, err_data,
+ &addr_in, NULL, false);
+ else
+ return -EINVAL;
+}
+
/* it deal with vram only. */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
- struct eeprom_table_record *bps, int pages)
+ struct eeprom_table_record *bps, int pages, bool from_rom)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
+ struct ras_err_data err_data;
+ struct eeprom_table_record *err_rec;
+ struct amdgpu_ras_eeprom_control *control =
+ &adev->psp.ras_context.ras->eeprom_control;
+ enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
int ret = 0;
- uint32_t i;
+ uint32_t i, j, loop_cnt = 1;
+ bool find_pages_per_pa = false;
if (!con || !con->eh_data || !bps || pages <= 0)
return 0;
+ if (from_rom) {
+ err_data.err_addr =
+ kcalloc(adev->umc.retire_unit,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ if (!err_data.err_addr) {
+ dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ err_rec = err_data.err_addr;
+ loop_cnt = adev->umc.retire_unit;
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ }
+
mutex_lock(&con->recovery_lock);
data = con->eh_data;
- if (!data)
- goto out;
+ if (!data) {
+ /* Returning 0 as the absence of eh_data is acceptable */
+ goto free;
+ }
for (i = 0; i < pages; i++) {
- if (amdgpu_ras_check_bad_page_unlock(con,
- bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
- continue;
+ if (from_rom &&
+ control->rec_type == AMDGPU_RAS_EEPROM_REC_MCA) {
+ if (!find_pages_per_pa) {
+ if (amdgpu_ras_mca2pa_by_idx(adev, &bps[i], &err_data)) {
+ if (!i && nps == AMDGPU_NPS1_PARTITION_MODE) {
+ /* may use old RAS TA, use PA to find pages in
+ * one row
+ */
+ if (amdgpu_umc_pages_in_a_row(adev, &err_data,
+ bps[i].retired_page <<
+ AMDGPU_GPU_PAGE_SHIFT)) {
+ ret = -EINVAL;
+ goto free;
+ } else {
+ find_pages_per_pa = true;
+ }
+ } else {
+ /* unsupported cases */
+ ret = -EOPNOTSUPP;
+ goto free;
+ }
+ }
+ } else {
+ if (amdgpu_umc_pages_in_a_row(adev, &err_data,
+ bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) {
+ ret = -EINVAL;
+ goto free;
+ }
+ }
+ } else {
+ if (from_rom && !find_pages_per_pa) {
+ if (bps[i].retired_page & UMC_CHANNEL_IDX_V2) {
+ /* bad page in any NPS mode in eeprom */
+ if (amdgpu_ras_mca2pa_by_idx(adev, &bps[i], &err_data)) {
+ ret = -EINVAL;
+ goto free;
+ }
+ } else {
+ /* legacy bad page in eeprom, generated only in
+ * NPS1 mode
+ */
+ if (amdgpu_ras_mca2pa(adev, &bps[i], &err_data)) {
+ /* old RAS TA or ASICs which don't support to
+ * convert addrss via mca address
+ */
+ if (!i && nps == AMDGPU_NPS1_PARTITION_MODE) {
+ find_pages_per_pa = true;
+ err_rec = &bps[i];
+ loop_cnt = 1;
+ } else {
+ /* non-nps1 mode, old RAS TA
+ * can't support it
+ */
+ ret = -EOPNOTSUPP;
+ goto free;
+ }
+ }
+ }
- if (!data->space_left &&
- amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
- ret = -ENOMEM;
- goto out;
+ if (!find_pages_per_pa)
+ i += (adev->umc.retire_unit - 1);
+ } else {
+ err_rec = &bps[i];
+ }
}
- amdgpu_ras_reserve_page(adev, bps[i].retired_page);
+ for (j = 0; j < loop_cnt; j++) {
+ if (amdgpu_ras_check_bad_page_unlock(con,
+ err_rec[j].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ continue;
+
+ if (!data->space_left &&
+ amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ amdgpu_ras_reserve_page(adev, err_rec[j].retired_page);
- memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
- data->count++;
- data->space_left--;
+ memcpy(&data->bps[data->count], &(err_rec[j]),
+ sizeof(struct eeprom_table_record));
+ data->count++;
+ data->space_left--;
+ }
}
+
+free:
+ if (from_rom)
+ kfree(err_data.err_addr);
out:
mutex_unlock(&con->recovery_lock);
@@ -2712,7 +2944,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
struct amdgpu_ras_eeprom_control *control;
- int save_count;
+ int save_count, unit_num, bad_page_num, i;
if (!con || !con->eh_data) {
if (new_cnt)
@@ -2724,19 +2956,32 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
- save_count = data->count - control->ras_num_recs;
+ bad_page_num = control->ras_num_bad_pages;
+ save_count = data->count - bad_page_num;
mutex_unlock(&con->recovery_lock);
+ unit_num = save_count / adev->umc.retire_unit;
if (new_cnt)
- *new_cnt = save_count / adev->umc.retire_unit;
+ *new_cnt = unit_num;
/* only new entries are saved */
if (save_count > 0) {
- if (amdgpu_ras_eeprom_append(control,
- &data->bps[control->ras_num_recs],
- save_count)) {
- dev_err(adev->dev, "Failed to save EEPROM table data!");
- return -EIO;
+ if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA) {
+ if (amdgpu_ras_eeprom_append(control,
+ &data->bps[control->ras_num_recs],
+ save_count)) {
+ dev_err(adev->dev, "Failed to save EEPROM table data!");
+ return -EIO;
+ }
+ } else {
+ for (i = 0; i < unit_num; i++) {
+ if (amdgpu_ras_eeprom_append(control,
+ &data->bps[bad_page_num + i * adev->umc.retire_unit],
+ 1)) {
+ dev_err(adev->dev, "Failed to save EEPROM table data!");
+ return -EIO;
+ }
+ }
}
dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
@@ -2765,11 +3010,32 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
return -ENOMEM;
ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
- if (ret)
+ if (ret) {
dev_err(adev->dev, "Failed to load EEPROM table records!");
- else
- ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
+ } else {
+ if (control->ras_num_recs > 1 &&
+ adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
+ if ((bps[0].address == bps[1].address) &&
+ (bps[0].mem_channel == bps[1].mem_channel))
+ control->rec_type = AMDGPU_RAS_EEPROM_REC_PA;
+ else
+ control->rec_type = AMDGPU_RAS_EEPROM_REC_MCA;
+ }
+
+ ret = amdgpu_ras_eeprom_check(control);
+ if (ret)
+ goto out;
+
+ /* HW not usable */
+ if (amdgpu_ras_is_rma(adev)) {
+ ret = -EHWPOISON;
+ goto out;
+ }
+
+ ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
+ }
+out:
kfree(bps);
return ret;
}
@@ -3146,7 +3412,47 @@ static int amdgpu_ras_page_retirement_thread(void *param)
return 0;
}
-int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
+int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control;
+ int ret;
+
+ if (!con || amdgpu_sriov_vf(adev))
+ return 0;
+
+ control = &con->eeprom_control;
+ ret = amdgpu_ras_eeprom_init(control);
+ if (ret)
+ return ret;
+
+ if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
+ control->rec_type = AMDGPU_RAS_EEPROM_REC_PA;
+
+ /* default status is MCA storage */
+ if (control->ras_num_recs <= 1 &&
+ adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
+ control->rec_type = AMDGPU_RAS_EEPROM_REC_MCA;
+
+ if (control->ras_num_recs) {
+ ret = amdgpu_ras_load_bad_pages(adev);
+ if (ret)
+ return ret;
+
+ amdgpu_dpm_send_hbm_bad_pages_num(
+ adev, control->ras_num_bad_pages);
+
+ if (con->update_channel_flag == true) {
+ amdgpu_dpm_send_hbm_bad_channel_flag(
+ adev, control->bad_channel_bitmap);
+ con->update_channel_flag = false;
+ }
+ }
+
+ return ret;
+}
+
+int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data **data;
@@ -3181,31 +3487,10 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
- /* Todo: During test the SMU might fail to read the eeprom through I2C
- * when the GPU is pending on XGMI reset during probe time
- * (Mostly after second bus reset), skip it now
- */
- if (adev->gmc.xgmi.pending_reset)
- return 0;
- ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
- /*
- * This calling fails when is_rma is true or
- * ret != 0.
- */
- if (amdgpu_ras_is_rma(adev) || ret)
- goto free;
-
- if (con->eeprom_control.ras_num_recs) {
- ret = amdgpu_ras_load_bad_pages(adev);
+ if (init_bp_info) {
+ ret = amdgpu_ras_init_badpage_info(adev);
if (ret)
goto free;
-
- amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
-
- if (con->update_channel_flag == true) {
- amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
- con->update_channel_flag = false;
- }
}
mutex_init(&con->page_rsv_lock);
@@ -3296,6 +3581,7 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
return true;
default:
@@ -3308,7 +3594,9 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
+ case IP_VERSION(14, 0, 3):
return true;
default:
return false;
@@ -3438,6 +3726,11 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
if (!amdgpu_ras_asic_supported(adev))
return;
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_virt_get_ras_capability(adev))
+ goto init_ras_enabled_flag;
+ }
+
/* query ras capability from psp */
if (amdgpu_psp_get_ras_capability(&adev->psp))
goto init_ras_enabled_flag;
@@ -3535,7 +3828,7 @@ static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;
/* init event manager with node 0 on xgmi system */
- if (!amdgpu_in_reset(adev)) {
+ if (!amdgpu_reset_in_recovery(adev)) {
if (!hive || adev->gmc.xgmi.node_id == 0)
ras_event_mgr_init(ras->event_mgr);
}
@@ -3554,6 +3847,7 @@ static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE;
break;
@@ -3629,7 +3923,19 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
* check DF RAS */
adev->nbio.ras = &nbio_v4_3_ras;
break;
+ case IP_VERSION(6, 3, 1):
+ if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
+ /* unlike other generation of nbio ras,
+ * nbif v6_3_1 only support fatal error interrupt
+ * to inform software that DF is freezed due to
+ * system fatal error event. driver should not
+ * enable nbio ras in such case. Instead,
+ * check DF RAS
+ */
+ adev->nbio.ras = &nbif_v6_3_1_ras;
+ break;
case IP_VERSION(7, 9, 0):
+ case IP_VERSION(7, 9, 1):
if (!adev->gmc.is_app_apu)
adev->nbio.ras = &nbio_v7_9_ras;
break;
@@ -3750,7 +4056,7 @@ int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
if (r) {
- if (adev->in_suspend || amdgpu_in_reset(adev)) {
+ if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) {
/* in resume phase, if fail to enable ras,
* clean up all ras fs nodes, and disable ras */
goto cleanup;
@@ -3762,7 +4068,7 @@ int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
amdgpu_persistent_edc_harvesting(adev, ras_block);
/* in resume phase, no need to create ras fs node */
- if (adev->in_suspend || amdgpu_in_reset(adev))
+ if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
return 0;
ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
@@ -3892,7 +4198,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
amdgpu_ras_event_mgr_init(adev);
if (amdgpu_ras_aca_is_supported(adev)) {
- if (amdgpu_in_reset(adev)) {
+ if (amdgpu_reset_in_recovery(adev)) {
if (amdgpu_aca_is_enabled(adev))
r = amdgpu_aca_reset(adev);
else
@@ -3910,7 +4216,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
}
/* Guest side doesn't need init ras feature */
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev))
return 0;
list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
@@ -4008,7 +4314,7 @@ bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
if (!ras)
return false;
- return atomic_read(&ras->fed);
+ return test_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
}
void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
@@ -4016,8 +4322,48 @@ void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
struct amdgpu_ras *ras;
ras = amdgpu_ras_get_context(adev);
+ if (ras) {
+ if (status)
+ set_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
+ else
+ clear_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
+ }
+}
+
+void amdgpu_ras_clear_err_state(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
+ if (ras)
+ ras->ras_err_state = 0;
+}
+
+void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
if (ras)
- atomic_set(&ras->fed, !!status);
+ set_bit(block, &ras->ras_err_state);
+}
+
+bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
+ if (ras) {
+ if (block == AMDGPU_RAS_BLOCK__ANY)
+ return (ras->ras_err_state != 0);
+ else
+ return test_bit(block, &ras->ras_err_state) ||
+ test_bit(AMDGPU_RAS_BLOCK__LAST,
+ &ras->ras_err_state);
+ }
+
+ return false;
}
static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev)
@@ -4294,8 +4640,27 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
}
- if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
+ if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) {
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ int hive_ras_recovery = 0;
+
+ if (hive) {
+ hive_ras_recovery = atomic_read(&hive->ras_recovery);
+ amdgpu_put_xgmi_hive(hive);
+ }
+ /* In the case of multiple GPUs, after a GPU has started
+ * resetting all GPUs on hive, other GPUs do not need to
+ * trigger GPU reset again.
+ */
+ if (!hive_ras_recovery)
+ amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
+ else
+ atomic_set(&ras->in_recovery, 0);
+ } else {
+ flush_work(&ras->recovery_work);
amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
+ }
+
return 0;
}
@@ -4358,11 +4723,14 @@ bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
return false;
}
- if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode))
+ if (amdgpu_sriov_vf(adev)) {
+ *error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY;
+ } else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) {
*error_query_mode =
(con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
- else
+ } else {
*error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 669720a9c60a..82db986c36a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -99,7 +99,8 @@ enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__IH,
AMDGPU_RAS_BLOCK__MPIO,
- AMDGPU_RAS_BLOCK__LAST
+ AMDGPU_RAS_BLOCK__LAST,
+ AMDGPU_RAS_BLOCK__ANY = -1
};
enum amdgpu_ras_mca_block {
@@ -365,6 +366,7 @@ enum amdgpu_ras_error_query_mode {
AMDGPU_RAS_INVALID_ERROR_QUERY = 0,
AMDGPU_RAS_DIRECT_ERROR_QUERY = 1,
AMDGPU_RAS_FIRMWARE_ERROR_QUERY = 2,
+ AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY = 3,
};
/* ras error status reisger fields */
@@ -481,6 +483,8 @@ struct ras_ecc_err {
uint64_t ipid;
uint64_t addr;
uint64_t pa_pfn;
+ /* save global channel index across all UMC instances */
+ uint32_t channel_idx;
struct ras_err_pages err_pages;
};
@@ -557,8 +561,8 @@ struct amdgpu_ras {
struct ras_ecc_log_info umc_ecc_log;
struct delayed_work page_retirement_dwork;
- /* Fatal error detected flag */
- atomic_t fed;
+ /* ras errors detected */
+ unsigned long ras_err_state;
/* RAS event manager */
struct ras_event_manager __event_mgr;
@@ -736,8 +740,8 @@ struct amdgpu_ras_block_hw_ops {
* 8: feature disable
*/
-
-int amdgpu_ras_recovery_init(struct amdgpu_device *adev);
+int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev);
+int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info);
void amdgpu_ras_resume(struct amdgpu_device *adev);
void amdgpu_ras_suspend(struct amdgpu_device *adev);
@@ -749,7 +753,7 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
- struct eeprom_table_record *bps, int pages);
+ struct eeprom_table_record *bps, int pages, bool from_rom);
int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
unsigned long *new_cnt);
@@ -951,6 +955,10 @@ ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *a
void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status);
bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev);
+void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block);
+void amdgpu_ras_clear_err_state(struct amdgpu_device *adev);
+bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block);
u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type);
int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index f28f6b4ba765..52c16bfeccaa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -470,9 +470,10 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
res = __write_table_ras_info(control);
control->ras_num_recs = 0;
+ control->ras_num_bad_pages = 0;
control->ras_fri = 0;
- amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_recs);
+ amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_bad_pages);
control->bad_channel_bitmap = 0;
amdgpu_dpm_send_hbm_bad_channel_flag(adev, control->bad_channel_bitmap);
@@ -559,7 +560,7 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
if (con->eeprom_control.tbl_hdr.header == RAS_TABLE_HDR_BAD) {
if (amdgpu_bad_page_threshold == -1) {
dev_warn(adev->dev, "RAS records:%d exceed threshold:%d",
- con->eeprom_control.ras_num_recs, con->bad_page_cnt_threshold);
+ con->eeprom_control.ras_num_bad_pages, con->bad_page_cnt_threshold);
dev_warn(adev->dev,
"But GPU can be operated due to bad_page_threshold = -1.\n");
return false;
@@ -621,6 +622,7 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
const u32 num)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(to_amdgpu_device(control));
+ struct amdgpu_device *adev = to_amdgpu_device(control);
u32 a, b, i;
u8 *buf, *pp;
int res;
@@ -723,6 +725,12 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
control->ras_num_recs = 1 + (control->ras_max_record_count + b
- control->ras_fri)
% control->ras_max_record_count;
+
+ if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA)
+ control->ras_num_bad_pages = control->ras_num_recs;
+ else
+ control->ras_num_bad_pages =
+ control->ras_num_recs * adev->umc.retire_unit;
Out:
kfree(buf);
return res;
@@ -740,10 +748,10 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
/* Modify the header if it exceeds.
*/
if (amdgpu_bad_page_threshold != 0 &&
- control->ras_num_recs >= ras->bad_page_cnt_threshold) {
+ control->ras_num_bad_pages >= ras->bad_page_cnt_threshold) {
dev_warn(adev->dev,
"Saved bad pages %d reaches threshold value %d\n",
- control->ras_num_recs, ras->bad_page_cnt_threshold);
+ control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
if (control->tbl_hdr.version == RAS_TABLE_VER_V2_1) {
control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
@@ -798,9 +806,9 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
*/
if (amdgpu_bad_page_threshold != 0 &&
control->tbl_hdr.version == RAS_TABLE_VER_V2_1 &&
- control->ras_num_recs < ras->bad_page_cnt_threshold)
+ control->ras_num_bad_pages < ras->bad_page_cnt_threshold)
control->tbl_rai.health_percent = ((ras->bad_page_cnt_threshold -
- control->ras_num_recs) * 100) /
+ control->ras_num_bad_pages) * 100) /
ras->bad_page_cnt_threshold;
/* Recalc the checksum.
@@ -841,7 +849,7 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
const u32 num)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
- int res;
+ int res, i;
if (!__is_ras_eeprom_supported(adev))
return 0;
@@ -855,6 +863,10 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
return -EINVAL;
}
+ /* set the new channel index flag */
+ for (i = 0; i < num; i++)
+ record[i].retired_page |= UMC_CHANNEL_IDX_V2;
+
mutex_lock(&control->ras_tbl_mutex);
res = amdgpu_ras_eeprom_append_table(control, record, num);
@@ -864,6 +876,11 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
amdgpu_ras_debugfs_set_ret_size(control);
mutex_unlock(&control->ras_tbl_mutex);
+
+ /* clear channel index flag, the flag is only saved on eeprom */
+ for (i = 0; i < num; i++)
+ record[i].retired_page &= ~UMC_CHANNEL_IDX_V2;
+
return res;
}
@@ -1373,9 +1390,35 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
}
control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
+ return 0;
+}
+
+int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int res;
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+
+ /* Verify i2c adapter is initialized */
+ if (!adev->pm.ras_eeprom_i2c_bus || !adev->pm.ras_eeprom_i2c_bus->algo)
+ return -ENOENT;
+
+ if (!__get_eeprom_i2c_addr(adev, control))
+ return -EINVAL;
+
+ if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA)
+ control->ras_num_bad_pages = control->ras_num_recs;
+ else
+ control->ras_num_bad_pages =
+ control->ras_num_recs * adev->umc.retire_unit;
+
if (hdr->header == RAS_TABLE_HDR_VAL) {
DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
- control->ras_num_recs);
+ control->ras_num_bad_pages);
if (hdr->version == RAS_TABLE_VER_V2_1) {
res = __read_table_ras_info(control);
@@ -1390,9 +1433,9 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
/* Warn if we are at 90% of the threshold or above
*/
- if (10 * control->ras_num_recs >= 9 * ras->bad_page_cnt_threshold)
+ if (10 * control->ras_num_bad_pages >= 9 * ras->bad_page_cnt_threshold)
dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d",
- control->ras_num_recs,
+ control->ras_num_bad_pages,
ras->bad_page_cnt_threshold);
} else if (hdr->header == RAS_TABLE_HDR_BAD &&
amdgpu_bad_page_threshold != 0) {
@@ -1403,10 +1446,12 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
}
res = __verify_ras_table_checksum(control);
- if (res)
- DRM_ERROR("RAS Table incorrect checksum or error:%d\n",
+ if (res) {
+ dev_err(adev->dev, "RAS Table incorrect checksum or error:%d\n",
res);
- if (ras->bad_page_cnt_threshold > control->ras_num_recs) {
+ return -EINVAL;
+ }
+ if (ras->bad_page_cnt_threshold > control->ras_num_bad_pages) {
/* This means that, the threshold was increased since
* the last time the system was booted, and now,
* ras->bad_page_cnt_threshold - control->num_recs > 0,
@@ -1416,13 +1461,13 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
dev_info(adev->dev,
"records:%d threshold:%d, resetting "
"RAS table header signature",
- control->ras_num_recs,
+ control->ras_num_bad_pages,
ras->bad_page_cnt_threshold);
res = amdgpu_ras_eeprom_correct_header_tag(control,
RAS_TABLE_HDR_VAL);
} else {
dev_err(adev->dev, "RAS records:%d exceed threshold:%d",
- control->ras_num_recs, ras->bad_page_cnt_threshold);
+ control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
if (amdgpu_bad_page_threshold == -1) {
dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -1.");
res = 0;
@@ -1431,7 +1476,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
dev_err(adev->dev,
"RAS records:%d exceed threshold:%d, "
"GPU will not be initialized. Replace this GPU or increase the threshold",
- control->ras_num_recs, ras->bad_page_cnt_threshold);
+ control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
}
}
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index b9ebda577797..81d55cb7b397 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -43,6 +43,19 @@ enum amdgpu_ras_eeprom_err_type {
AMDGPU_RAS_EEPROM_ERR_COUNT,
};
+/*
+ * one UMC MCA address could map to multiply physical address (PA),
+ * such as 1:16, we use eeprom_table_record.address to store MCA
+ * address and use eeprom_table_record.retired_page to save PA.
+ *
+ * AMDGPU_RAS_EEPROM_REC_PA: one record store one PA
+ * AMDGPU_RAS_EEPROM_REC_MCA: one record store one MCA address
+ */
+enum amdgpu_ras_eeprom_rec_type {
+ AMDGPU_RAS_EEPROM_REC_PA,
+ AMDGPU_RAS_EEPROM_REC_MCA,
+};
+
struct amdgpu_ras_eeprom_table_header {
uint32_t header;
uint32_t version;
@@ -82,6 +95,11 @@ struct amdgpu_ras_eeprom_control {
*/
u32 ras_num_recs;
+ /* the bad page number is ras_num_recs or
+ * ras_num_recs * umc.retire_unit
+ */
+ u32 ras_num_bad_pages;
+
/* First record index to read, 0-based.
* Range is [0, num_recs-1]. This is
* an absolute index, starting right after
@@ -102,6 +120,7 @@ struct amdgpu_ras_eeprom_control {
/* Record channel info which occurred bad pages
*/
u32 bad_channel_bitmap;
+ enum amdgpu_ras_eeprom_rec_type rec_type;
};
/*
@@ -145,6 +164,8 @@ uint32_t amdgpu_ras_eeprom_max_record_count(struct amdgpu_ras_eeprom_control *co
void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
+int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control);
+
extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops;
extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index 66c1a868c0e1..dabfbdf6f1ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -26,6 +26,156 @@
#include "sienna_cichlid.h"
#include "smu_v13_0_10.h"
+static int amdgpu_reset_xgmi_reset_on_init_suspend(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ if (!adev->ip_blocks[i].status.hw)
+ continue;
+ /* displays are handled in phase1 */
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
+ continue;
+
+ /* XXX handle errors */
+ amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
+ adev->ip_blocks[i].status.hw = false;
+ }
+
+ /* VCN FW shared region is in frambuffer, there are some flags
+ * initialized in that region during sw_init. Make sure the region is
+ * backed up.
+ */
+ amdgpu_vcn_save_vcpu_bo(adev);
+
+ return 0;
+}
+
+static int amdgpu_reset_xgmi_reset_on_init_prep_hwctxt(
+ struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *tmp_adev;
+ int r;
+
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ amdgpu_unregister_gpu_instance(tmp_adev);
+ r = amdgpu_reset_xgmi_reset_on_init_suspend(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "xgmi reset on init: prepare for reset failed");
+ return r;
+ }
+ }
+
+ return r;
+}
+
+static int amdgpu_reset_xgmi_reset_on_init_restore_hwctxt(
+ struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *tmp_adev = NULL;
+ int r;
+
+ r = amdgpu_device_reinit_after_reset(reset_context);
+ if (r)
+ return r;
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ if (!tmp_adev->kfd.init_complete) {
+ kgd2kfd_init_zone_device(tmp_adev);
+ amdgpu_amdkfd_device_init(tmp_adev);
+ amdgpu_amdkfd_drm_client_create(tmp_adev);
+ }
+ }
+
+ return r;
+}
+
+static int amdgpu_reset_xgmi_reset_on_init_perform_reset(
+ struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *tmp_adev = NULL;
+ int r;
+
+ dev_dbg(adev->dev, "xgmi roi - hw reset\n");
+
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ mutex_lock(&tmp_adev->reset_cntl->reset_lock);
+ tmp_adev->reset_cntl->active_reset =
+ amdgpu_asic_reset_method(adev);
+ }
+ r = 0;
+ /* Mode1 reset needs to be triggered on all devices together */
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ /* For XGMI run all resets in parallel to speed up the process */
+ if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
+ r = -EALREADY;
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "xgmi reset on init: reset failed with error, %d",
+ r);
+ break;
+ }
+ }
+
+ /* For XGMI wait for all resets to complete before proceed */
+ if (!r) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ flush_work(&tmp_adev->xgmi_reset_work);
+ r = tmp_adev->asic_reset_res;
+ if (r)
+ break;
+ }
+ }
+
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
+ tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
+ }
+
+ return r;
+}
+
+int amdgpu_reset_do_xgmi_reset_on_init(
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *adev;
+ int r;
+
+ if (!reset_device_list || list_empty(reset_device_list) ||
+ list_is_singular(reset_device_list))
+ return -EINVAL;
+
+ adev = list_first_entry(reset_device_list, struct amdgpu_device,
+ reset_list);
+ r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
+ if (r)
+ return r;
+
+ r = amdgpu_reset_perform_reset(adev, reset_context);
+
+ return r;
+}
+
+struct amdgpu_reset_handler xgmi_reset_on_init_handler = {
+ .reset_method = AMD_RESET_METHOD_ON_INIT,
+ .prepare_env = NULL,
+ .prepare_hwcontext = amdgpu_reset_xgmi_reset_on_init_prep_hwctxt,
+ .perform_reset = amdgpu_reset_xgmi_reset_on_init_perform_reset,
+ .restore_hwcontext = amdgpu_reset_xgmi_reset_on_init_restore_hwctxt,
+ .restore_env = NULL,
+ .do_reset = NULL,
+};
+
int amdgpu_reset_init(struct amdgpu_device *adev)
{
int ret = 0;
@@ -33,6 +183,7 @@ int amdgpu_reset_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
ret = aldebaran_reset_init(adev);
break;
@@ -56,6 +207,7 @@ int amdgpu_reset_fini(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
ret = aldebaran_reset_fini(adev);
break;
@@ -192,3 +344,8 @@ void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf,
strscpy(buf, "unknown", len);
}
}
+
+bool amdgpu_reset_in_recovery(struct amdgpu_device *adev)
+{
+ return (adev->init_lvl->level == AMDGPU_INIT_LEVEL_RESET_RECOVERY);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 1cb920abc2fe..4d9b9701139b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -153,4 +153,11 @@ void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf,
for (i = 0; (i < AMDGPU_RESET_MAX_HANDLERS) && \
(handler = (*reset_ctl->reset_handlers)[i]); \
++i)
+
+extern struct amdgpu_reset_handler xgmi_reset_on_init_handler;
+int amdgpu_reset_do_xgmi_reset_on_init(
+ struct amdgpu_reset_context *reset_context);
+
+bool amdgpu_reset_in_recovery(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 690976665cf6..a6e28fe3f8d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -108,10 +108,22 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)
*/
void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
- int i;
+ uint32_t occupied, chunk1, chunk2;
- for (i = 0; i < count; i++)
- amdgpu_ring_write(ring, ring->funcs->nop);
+ occupied = ring->wptr & ring->buf_mask;
+ chunk1 = ring->buf_mask + 1 - occupied;
+ chunk1 = (chunk1 >= count) ? count : chunk1;
+ chunk2 = count - chunk1;
+
+ if (chunk1)
+ memset32(&ring->ring[occupied], ring->funcs->nop, chunk1);
+
+ if (chunk2)
+ memset32(ring->ring, ring->funcs->nop, chunk2);
+
+ ring->wptr += count;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw -= count;
}
/**
@@ -141,6 +153,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
{
uint32_t count;
+ if (ring->count_dw < 0)
+ DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
+
/* We pad to match fetch size */
count = ring->funcs->align_mask + 1 -
(ring->wptr & ring->funcs->align_mask);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index f93f51002201..dee5a1b4e572 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -246,7 +246,7 @@ struct amdgpu_ring {
struct drm_gpu_scheduler sched;
struct amdgpu_bo *ring_obj;
- volatile uint32_t *ring;
+ uint32_t *ring;
unsigned rptr_offs;
u64 rptr_gpu_addr;
volatile u32 *rptr_cpu_addr;
@@ -288,7 +288,7 @@ struct amdgpu_ring {
u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr;
unsigned int set_q_mode_offs;
- volatile u32 *set_q_mode_ptr;
+ u32 *set_q_mode_ptr;
u64 set_q_mode_token;
unsigned vm_hub;
unsigned vm_inv_eng;
@@ -377,8 +377,6 @@ static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
{
- if (ring->count_dw <= 0)
- DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
ring->ring[ring->wptr++ & ring->buf_mask] = v;
ring->wptr &= ring->ptr_mask;
ring->count_dw--;
@@ -388,13 +386,8 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
void *src, int count_dw)
{
unsigned occupied, chunk1, chunk2;
- void *dst;
-
- if (unlikely(ring->count_dw < count_dw))
- DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
occupied = ring->wptr & ring->buf_mask;
- dst = (void *)&ring->ring[occupied];
chunk1 = ring->buf_mask + 1 - occupied;
chunk1 = (chunk1 >= count_dw) ? count_dw : chunk1;
chunk2 = count_dw - chunk1;
@@ -402,12 +395,11 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
chunk2 <<= 2;
if (chunk1)
- memcpy(dst, src, chunk1);
+ memcpy(&ring->ring[occupied], src, chunk1);
if (chunk2) {
src += chunk1;
- dst = (void *)ring->ring;
- memcpy(dst, src, chunk2);
+ memcpy(ring->ring, src, chunk2);
}
ring->wptr += count_dw;
@@ -470,8 +462,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size,
enum amdgpu_ib_pool_type pool,
struct amdgpu_ib *ib);
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct dma_fence *f);
+void amdgpu_ib_free(struct amdgpu_ib *ib, struct dma_fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ibs, struct amdgpu_job *job,
struct dma_fence **f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 10df731998b2..39070b2a4c04 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -93,8 +93,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
return 0;
}
-void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct drm_suballoc **sa_bo,
- struct dma_fence *fence)
+void amdgpu_sa_bo_free(struct drm_suballoc **sa_bo, struct dma_fence *fence)
{
if (sa_bo == NULL || *sa_bo == NULL) {
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index b0a8abc7a8ec..341beec59537 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -35,21 +35,19 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
int fd,
int32_t priority)
{
- struct fd f = fdget(fd);
+ CLASS(fd, f)(fd);
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx_mgr *mgr;
struct amdgpu_ctx *ctx;
uint32_t id;
int r;
- if (!fd_file(f))
+ if (fd_empty(f))
return -EINVAL;
r = amdgpu_file_to_fpriv(fd_file(f), &fpriv);
- if (r) {
- fdput(f);
+ if (r)
return r;
- }
mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
@@ -57,7 +55,6 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
amdgpu_ctx_priority_override(ctx, priority);
mutex_unlock(&mgr->lock);
- fdput(f);
return 0;
}
@@ -66,31 +63,25 @@ static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev,
unsigned ctx_id,
int32_t priority)
{
- struct fd f = fdget(fd);
+ CLASS(fd, f)(fd);
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx *ctx;
int r;
- if (!fd_file(f))
+ if (fd_empty(f))
return -EINVAL;
r = amdgpu_file_to_fpriv(fd_file(f), &fpriv);
- if (r) {
- fdput(f);
+ if (r)
return r;
- }
ctx = amdgpu_ctx_get(fpriv, ctx_id);
- if (!ctx) {
- fdput(f);
+ if (!ctx)
return -EINVAL;
- }
amdgpu_ctx_priority_override(ctx, priority);
amdgpu_ctx_put(ctx);
- fdput(f);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 183a976ba29d..174badca27e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -219,9 +219,11 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix));
if (instance == 0)
err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s.bin", ucode_prefix);
else
err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s%d.bin", ucode_prefix, instance);
if (err)
goto out;
@@ -261,6 +263,8 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
if ((amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
IP_VERSION(4, 4, 2) ||
amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
+ IP_VERSION(4, 4, 4) ||
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
IP_VERSION(4, 4, 5)) &&
adev->firmware.load_type ==
AMDGPU_FW_LOAD_PSP &&
@@ -343,3 +347,116 @@ int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev)
return 0;
}
+
+/*
+ * debugfs for to enable/disable sdma job submission to specific core.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+
+ mask = BIT_ULL(adev->sdma.num_instances) - 1;
+ if ((val & mask) == 0)
+ return -EINVAL;
+
+ for (i = 0; i < adev->sdma.num_instances; ++i) {
+ ring = &adev->sdma.instance[i].ring;
+ if (val & BIT_ULL(i))
+ ring->sched.ready = true;
+ else
+ ring->sched.ready = false;
+ }
+ /* publish sched.ready flag update effective immediately across smp */
+ smp_rmb();
+ return 0;
+}
+
+static int amdgpu_debugfs_sdma_sched_mask_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+ for (i = 0; i < adev->sdma.num_instances; ++i) {
+ ring = &adev->sdma.instance[i].ring;
+ if (ring->sched.ready)
+ mask |= 1 << i;
+ }
+
+ *val = mask;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_sdma_sched_mask_fops,
+ amdgpu_debugfs_sdma_sched_mask_get,
+ amdgpu_debugfs_sdma_sched_mask_set, "%llx\n");
+
+#endif
+
+void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ if (!(adev->sdma.num_instances > 1))
+ return;
+ sprintf(name, "amdgpu_sdma_sched_mask");
+ debugfs_create_file(name, 0600, root, adev,
+ &amdgpu_debugfs_sdma_sched_mask_fops);
+#endif
+}
+
+static ssize_t amdgpu_get_sdma_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->sdma.supported_reset);
+}
+
+static DEVICE_ATTR(sdma_reset_mask, 0444,
+ amdgpu_get_sdma_reset_mask, NULL);
+
+int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (!amdgpu_gpu_recovery)
+ return r;
+
+ if (adev->sdma.num_instances) {
+ r = device_create_file(adev->dev, &dev_attr_sdma_reset_mask);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_gpu_recovery)
+ return;
+
+ if (adev->dev->kobj.sd) {
+ if (adev->sdma.num_instances)
+ device_remove_file(adev->dev, &dev_attr_sdma_reset_mask);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 087ce0f6fa07..5f60736051d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -107,6 +107,7 @@ struct amdgpu_sdma {
struct amdgpu_irq_src doorbell_invalid_irq;
struct amdgpu_irq_src pool_timeout_irq;
struct amdgpu_irq_src srbm_write_irq;
+ struct amdgpu_irq_src ctxt_empty_irq;
int num_instances;
uint32_t sdma_mask;
@@ -116,6 +117,7 @@ struct amdgpu_sdma {
struct ras_common_if *ras_if;
struct amdgpu_sdma_ras *ras;
uint32_t *ip_dump;
+ uint32_t supported_reset;
};
/*
@@ -175,5 +177,7 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, u32 instance,
void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
bool duplicate);
int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev);
-
+void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev);
+int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev);
+void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 74adb983ab03..01ae2f88dec8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -61,7 +61,7 @@
#include "amdgpu_res_cursor.h"
#include "bif/bif_4_1_d.h"
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
#define AMDGPU_TTM_VRAM_MAX_DW_READ ((size_t)128)
@@ -309,7 +309,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
mutex_lock(&adev->mman.gtt_window_lock);
while (src_mm.remaining) {
uint64_t from, to, cur_size, tiling_flags;
- uint32_t num_type, data_format, max_com;
+ uint32_t num_type, data_format, max_com, write_compress_disable;
struct dma_fence *next;
/* Never copy more than 256MiB at once to avoid a timeout */
@@ -340,9 +340,13 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
max_com = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
num_type = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE);
data_format = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT);
+ write_compress_disable =
+ AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_WRITE_COMPRESS_DISABLE);
copy_flags |= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED, max_com) |
AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE, num_type) |
- AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format));
+ AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format) |
+ AMDGPU_COPY_FLAGS_SET(WRITE_COMPRESS_DISABLE,
+ write_compress_disable));
}
r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
@@ -812,7 +816,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
/* Map SG to device */
r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
if (r)
- goto release_sg;
+ goto release_sg_table;
/* convert SG to linear array of pages and dma addresses */
drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
@@ -820,6 +824,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
return 0;
+release_sg_table:
+ sg_free_table(ttm->sg);
release_sg:
kfree(ttm->sg);
ttm->sg = NULL;
@@ -1760,7 +1766,8 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
if (!adev->bios &&
(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)))
reserve_size = max(reserve_size, (uint32_t)280 << 20);
else if (!reserve_size)
reserve_size = DISCOVERY_TMR_OFFSET;
@@ -1849,6 +1856,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock);
+ dma_set_max_seg_size(adev->dev, UINT_MAX);
/* No others user of address space so set it to 0 */
r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping,
@@ -2062,6 +2070,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 138d80017f35..208b7d1d8a27 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -26,14 +26,15 @@
#include <linux/dma-direction.h>
#include <drm/gpu_scheduler.h>
+#include <drm/ttm/ttm_placement.h>
#include "amdgpu_vram_mgr.h"
-#include "amdgpu.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
#define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3)
#define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4)
+#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 5)
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
@@ -118,6 +119,8 @@ struct amdgpu_copy_mem {
#define AMDGPU_COPY_FLAGS_NUMBER_TYPE_MASK 0x07
#define AMDGPU_COPY_FLAGS_DATA_FORMAT_SHIFT 8
#define AMDGPU_COPY_FLAGS_DATA_FORMAT_MASK 0x3f
+#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_SHIFT 14
+#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_MASK 0x1
#define AMDGPU_COPY_FLAGS_SET(field, value) \
(((__u32)(value) & AMDGPU_COPY_FLAGS_##field##_MASK) << AMDGPU_COPY_FLAGS_##field##_SHIFT)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 4c7b53648a50..cf700824b960 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -1434,6 +1434,7 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,
*
* @adev: amdgpu device
* @fw: pointer to load firmware to
+ * @required: whether the firmware is required
* @fmt: firmware name format string
* @...: variable arguments
*
@@ -1442,7 +1443,7 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,
* the error code to -ENODEV, so that early_init functions will fail to load.
*/
int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
- const char *fmt, ...)
+ enum amdgpu_ucode_required required, const char *fmt, ...)
{
char fname[AMDGPU_UCODE_NAME_MAX];
va_list ap;
@@ -1456,16 +1457,24 @@ int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
return -EOVERFLOW;
}
- r = request_firmware(fw, fname, adev->dev);
+ if (required == AMDGPU_UCODE_REQUIRED)
+ r = request_firmware(fw, fname, adev->dev);
+ else {
+ r = firmware_request_nowarn(fw, fname, adev->dev);
+ if (r)
+ drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fname);
+ }
if (r)
return -ENODEV;
r = amdgpu_ucode_validate(*fw);
- if (r) {
+ if (r)
+ /*
+ * The amdgpu_ucode_request() should be paired with amdgpu_ucode_release()
+ * regardless of success/failure, and the amdgpu_ucode_release() takes care of
+ * firmware release and need to avoid redundant release FW operation here.
+ */
dev_dbg(adev->dev, "\"%s\" failed to validate\n", fname);
- release_firmware(*fw);
- *fw = NULL;
- }
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 4e23419b92d4..4eedd92f000b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -126,6 +126,7 @@ enum psp_fw_type {
PSP_FW_TYPE_PSP_DBG_DRV,
PSP_FW_TYPE_PSP_RAS_DRV,
PSP_FW_TYPE_PSP_IPKEYMGR_DRV,
+ PSP_FW_TYPE_PSP_SPDM_DRV,
PSP_FW_TYPE_MAX_INDEX,
};
@@ -163,6 +164,7 @@ enum ta_fw_type {
TA_FW_TYPE_PSP_DTM,
TA_FW_TYPE_PSP_RAP,
TA_FW_TYPE_PSP_SECUREDISPLAY,
+ TA_FW_TYPE_PSP_XGMI_AUX,
TA_FW_TYPE_MAX_INDEX,
};
@@ -550,6 +552,11 @@ enum amdgpu_firmware_load_type {
AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO,
};
+enum amdgpu_ucode_required {
+ AMDGPU_UCODE_OPTIONAL,
+ AMDGPU_UCODE_REQUIRED,
+};
+
/* conform to smu_ucode_xfer_cz.h */
#define AMDGPU_SDMA0_UCODE_LOADED 0x00000001
#define AMDGPU_SDMA1_UCODE_LOADED 0x00000002
@@ -603,9 +610,9 @@ void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr);
-__printf(3, 4)
+__printf(4, 5)
int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
- const char *fmt, ...);
+ enum amdgpu_ucode_required required, const char *fmt, ...);
void amdgpu_ucode_release(const struct firmware **fw);
bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
uint16_t hdr_major, uint16_t hdr_minor);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index bb7b9b2eaac1..eafe20d8fe0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -78,7 +78,7 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
- err_data.err_addr_cnt);
+ err_data.err_addr_cnt, false);
amdgpu_ras_save_bad_pages(adev, NULL);
}
@@ -166,10 +166,11 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
if ((amdgpu_bad_page_threshold != 0) &&
err_data->err_addr_cnt) {
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
- err_data->err_addr_cnt);
+ err_data->err_addr_cnt, false);
amdgpu_ras_save_bad_pages(adev, &err_count);
- amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
+ amdgpu_dpm_send_hbm_bad_pages_num(adev,
+ con->eeprom_control.ras_num_bad_pages);
if (con->update_channel_flag == true) {
amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
@@ -318,6 +319,9 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
if (r)
return r;
+ if (amdgpu_sriov_vf(adev))
+ return r;
+
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
if (r)
@@ -441,3 +445,77 @@ int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
return ret;
}
+
+int amdgpu_umc_pages_in_a_row(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t pa_addr)
+{
+ struct ta_ras_query_address_output addr_out;
+
+ /* reinit err_data */
+ err_data->err_addr_cnt = 0;
+ err_data->err_addr_len = adev->umc.retire_unit;
+
+ addr_out.pa.pa = pa_addr;
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
+ return adev->umc.ras->convert_ras_err_addr(adev, err_data, NULL,
+ &addr_out, false);
+ else
+ return -EINVAL;
+}
+
+int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
+ uint64_t pa_addr, uint64_t *pfns, int len)
+{
+ int i, ret;
+ struct ras_err_data err_data;
+
+ err_data.err_addr = kcalloc(adev->umc.retire_unit,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ if (!err_data.err_addr) {
+ dev_warn(adev->dev, "Failed to alloc memory in bad page lookup!\n");
+ return 0;
+ }
+
+ ret = amdgpu_umc_pages_in_a_row(adev, &err_data, pa_addr);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ if (i >= len)
+ goto out;
+
+ pfns[i] = err_data.err_addr[i].retired_page;
+ }
+ ret = i;
+
+out:
+ kfree(err_data.err_addr);
+ return ret;
+}
+
+int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
+ uint64_t err_addr, uint32_t ch, uint32_t umc,
+ uint32_t node, uint32_t socket,
+ struct ta_ras_query_address_output *addr_out, bool dump_addr)
+{
+ struct ta_ras_query_address_input addr_in;
+ int ret;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = ch;
+ addr_in.ma.umc_inst = umc;
+ addr_in.ma.node_inst = node;
+ addr_in.ma.socket_id = socket;
+
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
+ ret = adev->umc.ras->convert_ras_err_addr(adev, NULL, &addr_in,
+ addr_out, dump_addr);
+ if (ret)
+ return ret;
+ } else {
+ return 0;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index ce4179db2a6d..a4a7e61817aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -54,6 +54,22 @@
/* Page retirement tag */
#define UMC_ECC_NEW_DETECTED_TAG 0x1
+/*
+ * a flag to indicate v2 of channel index stored in eeprom
+ *
+ * v1 (legacy way): store channel index within a umc instance in eeprom
+ * range in UMC v12: 0 ~ 7
+ * v2: store global channel index in eeprom
+ * range in UMC v12: 0 ~ 127
+ *
+ * NOTE: it's better to store it in eeprom_table_record.mem_channel,
+ * but there is only 8 bits in mem_channel, and the channel number may
+ * increase in the future, we decide to save it in
+ * eeprom_table_record.retired_page. retired_page is useless in v2,
+ * we depend on eeprom_table_record.address instead of retired_page in v2.
+ * Only 48 bits are saved on eeprom, use bit 47 here.
+ */
+#define UMC_CHANNEL_IDX_V2 BIT_ULL(47)
typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst,
uint32_t umc_inst, uint32_t ch_inst, void *data);
@@ -70,6 +86,13 @@ struct amdgpu_umc_ras {
enum amdgpu_mca_error_type type, void *ras_error_status);
int (*update_ecc_status)(struct amdgpu_device *adev,
uint64_t status, uint64_t ipid, uint64_t addr);
+ int (*convert_ras_err_addr)(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ struct ta_ras_query_address_input *addr_in,
+ struct ta_ras_query_address_output *addr_out,
+ bool dump_addr);
+ uint32_t (*get_die_id_from_pa)(struct amdgpu_device *adev,
+ uint64_t mca_addr, uint64_t retired_page);
};
struct amdgpu_umc_funcs {
@@ -134,4 +157,12 @@ int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
void *ras_error_status);
+int amdgpu_umc_pages_in_a_row(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t pa_addr);
+int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
+ uint64_t pa_addr, uint64_t *pfns, int len);
+int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
+ uint64_t err_addr, uint32_t ch, uint32_t umc,
+ uint32_t node, uint32_t socket,
+ struct ta_ras_query_address_output *addr_out, bool dump_addr);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
index 6162582d0aa2..dde15c6a96e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -587,7 +587,8 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
break;
}
- r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, "%s", fw_name);
+ r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name);
if (r) {
release_firmware(adev->umsch_mm.fw);
adev->umsch_mm.fw = NULL;
@@ -765,9 +766,9 @@ static int umsch_mm_init(struct amdgpu_device *adev)
}
-static int umsch_mm_early_init(void *handle)
+static int umsch_mm_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
case IP_VERSION(4, 0, 5):
@@ -784,9 +785,9 @@ static int umsch_mm_early_init(void *handle)
return 0;
}
-static int umsch_mm_late_init(void *handle)
+static int umsch_mm_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend)
return 0;
@@ -794,9 +795,9 @@ static int umsch_mm_late_init(void *handle)
return umsch_mm_test(adev);
}
-static int umsch_mm_sw_init(void *handle)
+static int umsch_mm_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = umsch_mm_init(adev);
@@ -815,9 +816,9 @@ static int umsch_mm_sw_init(void *handle)
return 0;
}
-static int umsch_mm_sw_fini(void *handle)
+static int umsch_mm_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
release_firmware(adev->umsch_mm.fw);
adev->umsch_mm.fw = NULL;
@@ -839,9 +840,9 @@ static int umsch_mm_sw_fini(void *handle)
return 0;
}
-static int umsch_mm_hw_init(void *handle)
+static int umsch_mm_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = umsch_mm_load_microcode(&adev->umsch_mm);
@@ -857,9 +858,9 @@ static int umsch_mm_hw_init(void *handle)
return 0;
}
-static int umsch_mm_hw_fini(void *handle)
+static int umsch_mm_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
umsch_mm_ring_stop(&adev->umsch_mm);
@@ -873,18 +874,14 @@ static int umsch_mm_hw_fini(void *handle)
return 0;
}
-static int umsch_mm_suspend(void *handle)
+static int umsch_mm_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return umsch_mm_hw_fini(adev);
+ return umsch_mm_hw_fini(ip_block);
}
-static int umsch_mm_resume(void *handle)
+static int umsch_mm_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return umsch_mm_hw_init(adev);
+ return umsch_mm_hw_init(ip_block);
}
void amdgpu_umsch_fwlog_init(struct amdgpu_umsch_mm *umsch_mm)
@@ -997,8 +994,6 @@ static const struct amd_ip_funcs umsch_mm_v4_0_ip_funcs = {
.hw_fini = umsch_mm_hw_fini,
.suspend = umsch_mm_suspend,
.resume = umsch_mm_resume,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version umsch_mm_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 31fd30dcd593..74758b5ffc6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -260,7 +260,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
return -EINVAL;
}
- r = amdgpu_ucode_request(adev, &adev->uvd.fw, "%s", fw_name);
+ r = amdgpu_ucode_request(adev, &adev->uvd.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
fw_name);
@@ -551,6 +551,8 @@ static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
for (i = 0; i < abo->placement.num_placement; ++i) {
abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
+ if (abo->placements[i].mem_type == TTM_PL_VRAM)
+ abo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 74fdbf71d95b..b9060bcd4806 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -158,7 +158,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
return -EINVAL;
}
- r = amdgpu_ucode_request(adev, &adev->vce.fw, "%s", fw_name);
+ r = amdgpu_ucode_request(adev, &adev->vce.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
fw_name);
@@ -214,15 +214,15 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
drm_sched_entity_destroy(&adev->vce.entity);
- amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
- (void **)&adev->vce.cpu_addr);
-
for (i = 0; i < adev->vce.num_rings; i++)
amdgpu_ring_fini(&adev->vce.ring[i]);
amdgpu_ucode_release(&adev->vce.fw);
mutex_destroy(&adev->vce.idle_mutex);
+ amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
+ (void **)&adev->vce.cpu_addr);
+
return 0;
}
@@ -503,7 +503,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
r = amdgpu_job_submit_direct(job, ring, &f);
- amdgpu_ib_free(ring->adev, &ib_msg, f);
+ amdgpu_ib_free(&ib_msg, f);
if (r)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 43f44cc201cb..83faf6e6788a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2016-2024 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -62,6 +62,7 @@
#define FIRMWARE_VCN4_0_6 "amdgpu/vcn_4_0_6.bin"
#define FIRMWARE_VCN4_0_6_1 "amdgpu/vcn_4_0_6_1.bin"
#define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin"
+#define FIRMWARE_VCN5_0_1 "amdgpu/vcn_5_0_1.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_PICASSO);
@@ -88,6 +89,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
+MODULE_FIRMWARE(FIRMWARE_VCN5_0_1);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
@@ -99,11 +101,15 @@ int amdgpu_vcn_early_init(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (i == 1 && amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6))
- r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], "amdgpu/%s_%d.bin", ucode_prefix, i);
+ r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_%d.bin", ucode_prefix, i);
else
- r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], "amdgpu/%s.bin", ucode_prefix);
+ r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (r) {
- amdgpu_ucode_release(&adev->vcn.fw[i]);
+ amdgpu_ucode_release(&adev->vcn.inst[i].fw);
return r;
}
}
@@ -151,7 +157,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
adev->vcn.using_unified_queue =
amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0);
- hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[0].fw->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
/* Bit 20-23, it is encode major and non-zero for new naming convention.
@@ -270,7 +276,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
- amdgpu_ucode_release(&adev->vcn.fw[j]);
+ amdgpu_ucode_release(&adev->vcn.inst[j].fw);
}
mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
@@ -282,7 +288,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
{
bool ret = false;
- int vcn_config = adev->vcn.vcn_config[vcn_instance];
+ int vcn_config = adev->vcn.inst[vcn_instance].vcn_config;
if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK))
ret = true;
@@ -294,21 +300,12 @@ bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type t
return ret;
}
-int amdgpu_vcn_suspend(struct amdgpu_device *adev)
+int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev)
{
unsigned int size;
void *ptr;
int i, idx;
- bool in_ras_intr = amdgpu_ras_intr_triggered();
-
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
- /* err_event_athub will corrupt VCPU buffer, so we need to
- * restore fw data and clear buffer in amdgpu_vcn_resume() */
- if (in_ras_intr)
- return 0;
-
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -327,9 +324,24 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
drm_dev_exit(idx);
}
}
+
return 0;
}
+int amdgpu_vcn_suspend(struct amdgpu_device *adev)
+{
+ bool in_ras_intr = amdgpu_ras_intr_triggered();
+
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+ /* err_event_athub will corrupt VCPU buffer, so we need to
+ * restore fw data and clear buffer in amdgpu_vcn_resume() */
+ if (in_ras_intr)
+ return 0;
+
+ return amdgpu_vcn_save_vcpu_bo(adev);
+}
+
int amdgpu_vcn_resume(struct amdgpu_device *adev)
{
unsigned int size;
@@ -356,12 +368,12 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
const struct common_firmware_header *hdr;
unsigned int offset;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(adev->vcn.inst[i].cpu_addr,
- adev->vcn.fw[i]->data + offset,
+ adev->vcn.inst[i].fw->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx);
}
@@ -574,7 +586,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
if (r)
goto err_free;
- amdgpu_ib_free(adev, ib_msg, f);
+ amdgpu_ib_free(ib_msg, f);
if (fence)
*fence = dma_fence_get(f);
@@ -585,7 +597,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
err_free:
amdgpu_job_free(job);
err:
- amdgpu_ib_free(adev, ib_msg, f);
+ amdgpu_ib_free(ib_msg, f);
return r;
}
@@ -767,7 +779,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
if (r)
goto err_free;
- amdgpu_ib_free(adev, ib_msg, f);
+ amdgpu_ib_free(ib_msg, f);
if (fence)
*fence = dma_fence_get(f);
@@ -778,7 +790,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
err_free:
amdgpu_job_free(job);
err:
- amdgpu_ib_free(adev, ib_msg, f);
+ amdgpu_ib_free(ib_msg, f);
return r;
}
@@ -1008,7 +1020,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
error:
- amdgpu_ib_free(adev, &ib, fence);
+ amdgpu_ib_free(&ib, fence);
dma_fence_put(fence);
return r;
@@ -1019,7 +1031,8 @@ int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct amdgpu_device *adev = ring->adev;
long r;
- if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) {
+ if ((amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) &&
+ (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(5, 0, 1))) {
r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
if (r)
goto error;
@@ -1057,7 +1070,7 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
/* currently only support 2 FW instances */
if (i >= 2) {
dev_info(adev->dev, "More then 2 VCN FW instances!\n");
@@ -1065,12 +1078,14 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
}
idx = AMDGPU_UCODE_ID_VCN + i;
adev->firmware.ucode[idx].ucode_id = idx;
- adev->firmware.ucode[idx].fw = adev->vcn.fw[i];
+ adev->firmware.ucode[idx].fw = adev->vcn.inst[i].fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
- IP_VERSION(4, 0, 3))
+ IP_VERSION(4, 0, 3) ||
+ amdgpu_ip_version(adev, UVD_HWIP, 0) ==
+ IP_VERSION(5, 0, 1))
break;
}
}
@@ -1277,3 +1292,108 @@ int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
return psp_execute_ip_fw_load(&adev->psp, &ucode);
}
+
+static ssize_t amdgpu_get_vcn_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->vcn.supported_reset);
+}
+
+static DEVICE_ATTR(vcn_reset_mask, 0444,
+ amdgpu_get_vcn_reset_mask, NULL);
+
+int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->vcn.num_vcn_inst) {
+ r = device_create_file(adev->dev, &dev_attr_vcn_reset_mask);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev)
+{
+ if (adev->dev->kobj.sd) {
+ if (adev->vcn.num_vcn_inst)
+ device_remove_file(adev->dev, &dev_attr_vcn_reset_mask);
+ }
+}
+
+/*
+ * debugfs to enable/disable vcn job submission to specific core or
+ * instance. It is created only if the queue type is unified.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_vcn_sched_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+
+ mask = (1ULL << adev->vcn.num_vcn_inst) - 1;
+ if ((val & mask) == 0)
+ return -EINVAL;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ if (val & (1ULL << i))
+ ring->sched.ready = true;
+ else
+ ring->sched.ready = false;
+ }
+ /* publish sched.ready flag update effective immediately across smp */
+ smp_rmb();
+ return 0;
+}
+
+static int amdgpu_debugfs_vcn_sched_mask_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ if (ring->sched.ready)
+ mask |= 1ULL << i;
+ }
+ *val = mask;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_vcn_sched_mask_fops,
+ amdgpu_debugfs_vcn_sched_mask_get,
+ amdgpu_debugfs_vcn_sched_mask_set, "%llx\n");
+#endif
+
+void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ if (adev->vcn.num_vcn_inst <= 1 || !adev->vcn.using_unified_queue)
+ return;
+ sprintf(name, "amdgpu_vcn_sched_mask");
+ debugfs_create_file(name, 0600, root, adev,
+ &amdgpu_debugfs_vcn_sched_mask_fops);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 2a1f3dbb14d3..adaf4388ad28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2016-2024 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -163,20 +163,30 @@
#define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \
({ \
uint32_t internal_reg_offset, addr; \
- bool video_range, aon_range; \
+ bool video_range, video1_range, aon_range, aon1_range; \
\
addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
addr <<= 2; \
video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS)) && \
((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS + 0x2600))))); \
+ video1_range = ((((0xFFFFF & addr) >= (VCN1_VID_SOC_ADDRESS)) && \
+ ((0xFFFFF & addr) < ((VCN1_VID_SOC_ADDRESS + 0x2600))))); \
aon_range = ((((0xFFFFF & addr) >= (VCN_AON_SOC_ADDRESS)) && \
((0xFFFFF & addr) < ((VCN_AON_SOC_ADDRESS + 0x600))))); \
+ aon1_range = ((((0xFFFFF & addr) >= (VCN1_AON_SOC_ADDRESS)) && \
+ ((0xFFFFF & addr) < ((VCN1_AON_SOC_ADDRESS + 0x600))))); \
if (video_range) \
internal_reg_offset = ((0xFFFFF & addr) - (VCN_VID_SOC_ADDRESS) + \
(VCN_VID_IP_ADDRESS)); \
else if (aon_range) \
internal_reg_offset = ((0xFFFFF & addr) - (VCN_AON_SOC_ADDRESS) + \
(VCN_AON_IP_ADDRESS)); \
+ else if (video1_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN1_VID_SOC_ADDRESS) + \
+ (VCN_VID_IP_ADDRESS)); \
+ else if (aon1_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN1_AON_SOC_ADDRESS) + \
+ (VCN_AON_IP_ADDRESS)); \
else \
internal_reg_offset = (0xFFFFF & addr); \
\
@@ -297,6 +307,9 @@ struct amdgpu_vcn_inst {
atomic_t dpg_enc_submission_cnt;
struct amdgpu_vcn_fw_shared fw_shared;
uint8_t aid_id;
+ const struct firmware *fw; /* VCN firmware */
+ uint8_t vcn_config;
+ uint32_t vcn_codec_disable_mask;
};
struct amdgpu_vcn_ras {
@@ -306,15 +319,12 @@ struct amdgpu_vcn_ras {
struct amdgpu_vcn {
unsigned fw_version;
struct delayed_work idle_work;
- const struct firmware *fw[AMDGPU_MAX_VCN_INSTANCES]; /* VCN firmware */
unsigned num_enc_rings;
enum amd_powergating_state cur_state;
bool indirect_sram;
uint8_t num_vcn_inst;
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
- uint8_t vcn_config[AMDGPU_MAX_VCN_INSTANCES];
- uint32_t vcn_codec_disable_mask[AMDGPU_MAX_VCN_INSTANCES];
struct amdgpu_vcn_reg internal;
struct mutex vcn_pg_lock;
struct mutex vcn1_jpeg1_workaround;
@@ -333,6 +343,8 @@ struct amdgpu_vcn {
/* IP reg dump */
uint32_t *ip_dump;
+
+ uint32_t supported_reset;
};
struct amdgpu_fw_shared_rb_ptrs_struct {
@@ -518,5 +530,9 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
enum AMDGPU_UCODE_ID ucode_id);
+int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev);
+int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev);
+void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev);
+void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index b6397d3229e1..0af469ec6fcc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -523,6 +523,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
adev->unique_id =
((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
+ adev->virt.ras_en_caps.all = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->ras_en_caps.all;
+ adev->virt.ras_telemetry_en_caps.all =
+ ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->ras_telemetry_en_caps.all;
break;
default:
dev_err(adev->dev, "invalid pf2vf version: 0x%x\n", pf2vf_info->version);
@@ -703,6 +706,8 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
adev->virt.fw_reserve.p_vf2pf =
(struct amd_sriov_msg_vf2pf_info_header *)
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
+ adev->virt.fw_reserve.ras_telemetry =
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10));
} else if (adev->mman.drv_vram_usage_va) {
adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)
@@ -710,6 +715,8 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
adev->virt.fw_reserve.p_vf2pf =
(struct amd_sriov_msg_vf2pf_info_header *)
(adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
+ adev->virt.fw_reserve.ras_telemetry =
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10));
}
amdgpu_virt_read_pf2vf_data(adev);
@@ -1144,3 +1151,183 @@ bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev)
return xnack_mode;
}
+
+bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!amdgpu_sriov_ras_caps_en(adev))
+ return false;
+
+ if (adev->virt.ras_en_caps.bits.block_umc)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__UMC);
+ if (adev->virt.ras_en_caps.bits.block_sdma)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SDMA);
+ if (adev->virt.ras_en_caps.bits.block_gfx)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__GFX);
+ if (adev->virt.ras_en_caps.bits.block_mmhub)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MMHUB);
+ if (adev->virt.ras_en_caps.bits.block_athub)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__ATHUB);
+ if (adev->virt.ras_en_caps.bits.block_pcie_bif)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__PCIE_BIF);
+ if (adev->virt.ras_en_caps.bits.block_hdp)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__HDP);
+ if (adev->virt.ras_en_caps.bits.block_xgmi_wafl)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__XGMI_WAFL);
+ if (adev->virt.ras_en_caps.bits.block_df)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__DF);
+ if (adev->virt.ras_en_caps.bits.block_smn)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SMN);
+ if (adev->virt.ras_en_caps.bits.block_sem)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SEM);
+ if (adev->virt.ras_en_caps.bits.block_mp0)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MP0);
+ if (adev->virt.ras_en_caps.bits.block_mp1)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MP1);
+ if (adev->virt.ras_en_caps.bits.block_fuse)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__FUSE);
+ if (adev->virt.ras_en_caps.bits.block_mca)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MCA);
+ if (adev->virt.ras_en_caps.bits.block_vcn)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__VCN);
+ if (adev->virt.ras_en_caps.bits.block_jpeg)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__JPEG);
+ if (adev->virt.ras_en_caps.bits.block_ih)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__IH);
+ if (adev->virt.ras_en_caps.bits.block_mpio)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MPIO);
+
+ if (adev->virt.ras_en_caps.bits.poison_propogation_mode)
+ con->poison_supported = true; /* Poison is handled by host */
+
+ return true;
+}
+
+static inline enum amd_sriov_ras_telemetry_gpu_block
+amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block block) {
+ switch (block) {
+ case AMDGPU_RAS_BLOCK__UMC:
+ return RAS_TELEMETRY_GPU_BLOCK_UMC;
+ case AMDGPU_RAS_BLOCK__SDMA:
+ return RAS_TELEMETRY_GPU_BLOCK_SDMA;
+ case AMDGPU_RAS_BLOCK__GFX:
+ return RAS_TELEMETRY_GPU_BLOCK_GFX;
+ case AMDGPU_RAS_BLOCK__MMHUB:
+ return RAS_TELEMETRY_GPU_BLOCK_MMHUB;
+ case AMDGPU_RAS_BLOCK__ATHUB:
+ return RAS_TELEMETRY_GPU_BLOCK_ATHUB;
+ case AMDGPU_RAS_BLOCK__PCIE_BIF:
+ return RAS_TELEMETRY_GPU_BLOCK_PCIE_BIF;
+ case AMDGPU_RAS_BLOCK__HDP:
+ return RAS_TELEMETRY_GPU_BLOCK_HDP;
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ return RAS_TELEMETRY_GPU_BLOCK_XGMI_WAFL;
+ case AMDGPU_RAS_BLOCK__DF:
+ return RAS_TELEMETRY_GPU_BLOCK_DF;
+ case AMDGPU_RAS_BLOCK__SMN:
+ return RAS_TELEMETRY_GPU_BLOCK_SMN;
+ case AMDGPU_RAS_BLOCK__SEM:
+ return RAS_TELEMETRY_GPU_BLOCK_SEM;
+ case AMDGPU_RAS_BLOCK__MP0:
+ return RAS_TELEMETRY_GPU_BLOCK_MP0;
+ case AMDGPU_RAS_BLOCK__MP1:
+ return RAS_TELEMETRY_GPU_BLOCK_MP1;
+ case AMDGPU_RAS_BLOCK__FUSE:
+ return RAS_TELEMETRY_GPU_BLOCK_FUSE;
+ case AMDGPU_RAS_BLOCK__MCA:
+ return RAS_TELEMETRY_GPU_BLOCK_MCA;
+ case AMDGPU_RAS_BLOCK__VCN:
+ return RAS_TELEMETRY_GPU_BLOCK_VCN;
+ case AMDGPU_RAS_BLOCK__JPEG:
+ return RAS_TELEMETRY_GPU_BLOCK_JPEG;
+ case AMDGPU_RAS_BLOCK__IH:
+ return RAS_TELEMETRY_GPU_BLOCK_IH;
+ case AMDGPU_RAS_BLOCK__MPIO:
+ return RAS_TELEMETRY_GPU_BLOCK_MPIO;
+ default:
+ dev_err(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n", block);
+ return RAS_TELEMETRY_GPU_BLOCK_COUNT;
+ }
+}
+
+static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev,
+ struct amdsriov_ras_telemetry *host_telemetry)
+{
+ struct amd_sriov_ras_telemetry_error_count *tmp = NULL;
+ uint32_t checksum, used_size;
+
+ checksum = host_telemetry->header.checksum;
+ used_size = host_telemetry->header.used_size;
+
+ if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ return 0;
+
+ tmp = kmemdup(&host_telemetry->body.error_count, used_size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
+ goto out;
+
+ memcpy(&adev->virt.count_cache, tmp,
+ min(used_size, sizeof(adev->virt.count_cache)));
+out:
+ kfree(tmp);
+
+ return 0;
+}
+
+static int amdgpu_virt_req_ras_err_count_internal(struct amdgpu_device *adev, bool force_update)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ /* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
+ * will ignore incoming guest messages. Ratelimit the guest messages to
+ * prevent guest self DOS.
+ */
+ if (__ratelimit(&adev->virt.ras_telemetry_rs) || force_update) {
+ if (!virt->ops->req_ras_err_count(adev))
+ amdgpu_virt_cache_host_error_counts(adev,
+ adev->virt.fw_reserve.ras_telemetry);
+ }
+
+ return 0;
+}
+
+/* Bypass ACA interface and query ECC counts directly from host */
+int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block,
+ struct ras_err_data *err_data)
+{
+ enum amd_sriov_ras_telemetry_gpu_block sriov_block;
+
+ sriov_block = amdgpu_ras_block_to_sriov(adev, block);
+
+ if (sriov_block >= RAS_TELEMETRY_GPU_BLOCK_COUNT ||
+ !amdgpu_sriov_ras_telemetry_block_en(adev, sriov_block))
+ return -EOPNOTSUPP;
+
+ /* Host Access may be lost during reset, just return last cached data. */
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_req_ras_err_count_internal(adev, false);
+ up_read(&adev->reset_domain->sem);
+ }
+
+ err_data->ue_count = adev->virt.count_cache.block[sriov_block].ue_count;
+ err_data->ce_count = adev->virt.count_cache.block[sriov_block].ce_count;
+ err_data->de_count = adev->virt.count_cache.block[sriov_block].de_count;
+
+ return 0;
+}
+
+int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev)
+{
+ unsigned long ue_count, ce_count;
+
+ if (amdgpu_sriov_ras_telemetry_en(adev)) {
+ amdgpu_virt_req_ras_err_count_internal(adev, true);
+ amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index b650a2032c42..5381b8d596e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -95,6 +95,7 @@ struct amdgpu_virt_ops {
void (*ras_poison_handler)(struct amdgpu_device *adev,
enum amdgpu_ras_block block);
bool (*rcvd_ras_intr)(struct amdgpu_device *adev);
+ int (*req_ras_err_count)(struct amdgpu_device *adev);
};
/*
@@ -103,6 +104,7 @@ struct amdgpu_virt_ops {
struct amdgpu_virt_fw_reserve {
struct amd_sriov_msg_pf2vf_info_header *p_pf2vf;
struct amd_sriov_msg_vf2pf_info_header *p_vf2pf;
+ void *ras_telemetry;
unsigned int checksum_key;
};
@@ -136,6 +138,8 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_VCN_RB_DECOUPLE = (1 << 7),
/* MES info */
AMDGIM_FEATURE_MES_INFO_ENABLE = (1 << 8),
+ AMDGIM_FEATURE_RAS_CAPS = (1 << 9),
+ AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10),
};
enum AMDGIM_REG_ACCESS_FLAG {
@@ -276,6 +280,12 @@ struct amdgpu_virt {
uint32_t autoload_ucode_id;
struct mutex rlcg_reg_lock;
+
+ union amd_sriov_ras_caps ras_en_caps;
+ union amd_sriov_ras_caps ras_telemetry_en_caps;
+
+ struct ratelimit_state ras_telemetry_rs;
+ struct amd_sriov_ras_telemetry_error_count count_cache;
};
struct amdgpu_video_codec_info;
@@ -320,6 +330,15 @@ struct amdgpu_video_codec_info;
#define amdgpu_sriov_vf_mmio_access_protection(adev) \
((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT)
+#define amdgpu_sriov_ras_caps_en(adev) \
+((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CAPS)
+
+#define amdgpu_sriov_ras_telemetry_en(adev) \
+(((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_TELEMETRY) && (adev)->virt.fw_reserve.ras_telemetry)
+
+#define amdgpu_sriov_ras_telemetry_block_en(adev, sriov_blk) \
+(amdgpu_sriov_ras_telemetry_en((adev)) && (adev)->virt.ras_telemetry_en_caps.all & BIT(sriov_blk))
+
static inline bool is_virtual_machine(void)
{
#if defined(CONFIG_X86)
@@ -383,4 +402,8 @@ bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
u32 acc_flags, u32 hwip,
bool write, u32 *rlcg_flag);
u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id);
+bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev);
+int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block,
+ struct ras_err_data *err_data);
+int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index d4c2afafbb73..03308261f894 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -493,10 +493,10 @@ const struct drm_mode_config_funcs amdgpu_vkms_mode_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static int amdgpu_vkms_sw_init(void *handle)
+static int amdgpu_vkms_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc,
sizeof(struct amdgpu_vkms_output), GFP_KERNEL);
@@ -536,9 +536,9 @@ static int amdgpu_vkms_sw_init(void *handle)
return 0;
}
-static int amdgpu_vkms_sw_fini(void *handle)
+static int amdgpu_vkms_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i = 0;
for (i = 0; i < adev->mode_info.num_crtc; i++)
@@ -555,9 +555,9 @@ static int amdgpu_vkms_sw_fini(void *handle)
return 0;
}
-static int amdgpu_vkms_hw_init(void *handle)
+static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_SI
@@ -600,31 +600,31 @@ static int amdgpu_vkms_hw_init(void *handle)
return 0;
}
-static int amdgpu_vkms_hw_fini(void *handle)
+static int amdgpu_vkms_hw_fini(struct amdgpu_ip_block *ip_block)
{
return 0;
}
-static int amdgpu_vkms_suspend(void *handle)
+static int amdgpu_vkms_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = drm_mode_config_helper_suspend(adev_to_drm(adev));
if (r)
return r;
- return amdgpu_vkms_hw_fini(handle);
+
+ return 0;
}
-static int amdgpu_vkms_resume(void *handle)
+static int amdgpu_vkms_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_vkms_hw_init(handle);
+ r = amdgpu_vkms_hw_init(ip_block);
if (r)
return r;
- return drm_mode_config_helper_resume(adev_to_drm(adev));
+ return drm_mode_config_helper_resume(adev_to_drm(ip_block->adev));
}
static bool amdgpu_vkms_is_idle(void *handle)
@@ -632,23 +632,13 @@ static bool amdgpu_vkms_is_idle(void *handle)
return true;
}
-static int amdgpu_vkms_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int amdgpu_vkms_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int amdgpu_vkms_set_clockgating_state(void *handle,
+static int amdgpu_vkms_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int amdgpu_vkms_set_powergating_state(void *handle,
+static int amdgpu_vkms_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -656,8 +646,6 @@ static int amdgpu_vkms_set_powergating_state(void *handle,
static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = {
.name = "amdgpu_vkms",
- .early_init = NULL,
- .late_init = NULL,
.sw_init = amdgpu_vkms_sw_init,
.sw_fini = amdgpu_vkms_sw_fini,
.hw_init = amdgpu_vkms_hw_init,
@@ -665,12 +653,8 @@ static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = {
.suspend = amdgpu_vkms_suspend,
.resume = amdgpu_vkms_resume,
.is_idle = amdgpu_vkms_is_idle,
- .wait_for_idle = amdgpu_vkms_wait_for_idle,
- .soft_reset = amdgpu_vkms_soft_reset,
.set_clockgating_state = amdgpu_vkms_set_clockgating_state,
.set_powergating_state = amdgpu_vkms_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6005280f5f38..5c07777d3239 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -36,6 +36,7 @@
#include <drm/ttm/ttm_tt.h>
#include <drm/drm_exec.h>
#include "amdgpu.h"
+#include "amdgpu_vm.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_gmc.h"
@@ -311,6 +312,111 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
}
/**
+ * amdgpu_vm_update_shared - helper to update shared memory stat
+ * @base: base structure for tracking BO usage in a VM
+ *
+ * Takes the vm status_lock and updates the shared memory stat. If the basic
+ * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
+ * as well.
+ */
+static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
+{
+ struct amdgpu_vm *vm = base->vm;
+ struct amdgpu_bo *bo = base->bo;
+ uint64_t size = amdgpu_bo_size(bo);
+ uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
+ bool shared;
+
+ spin_lock(&vm->status_lock);
+ shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
+ if (base->shared != shared) {
+ base->shared = shared;
+ if (shared) {
+ vm->stats[bo_memtype].drm.shared += size;
+ vm->stats[bo_memtype].drm.private -= size;
+ } else {
+ vm->stats[bo_memtype].drm.shared -= size;
+ vm->stats[bo_memtype].drm.private += size;
+ }
+ }
+ spin_unlock(&vm->status_lock);
+}
+
+/**
+ * amdgpu_vm_bo_update_shared - callback when bo gets shared/unshared
+ * @bo: amdgpu buffer object
+ *
+ * Update the per VM stats for all the vm if needed from private to shared or
+ * vice versa.
+ */
+void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
+{
+ struct amdgpu_vm_bo_base *base;
+
+ for (base = bo->vm_bo; base; base = base->next)
+ amdgpu_vm_update_shared(base);
+}
+
+/**
+ * amdgpu_vm_update_stats_locked - helper to update normal memory stat
+ * @base: base structure for tracking BO usage in a VM
+ * @res: the ttm_resource to use for the purpose of accounting, may or may not
+ * be bo->tbo.resource
+ * @sign: if we should add (+1) or subtract (-1) from the stat
+ *
+ * Caller need to have the vm status_lock held. Useful for when multiple update
+ * need to happen at the same time.
+ */
+static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
+ struct ttm_resource *res, int sign)
+{
+ struct amdgpu_vm *vm = base->vm;
+ struct amdgpu_bo *bo = base->bo;
+ int64_t size = sign * amdgpu_bo_size(bo);
+ uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
+
+ /* For drm-total- and drm-shared-, BO are accounted by their preferred
+ * placement, see also amdgpu_bo_mem_stats_placement.
+ */
+ if (base->shared)
+ vm->stats[bo_memtype].drm.shared += size;
+ else
+ vm->stats[bo_memtype].drm.private += size;
+
+ if (res && res->mem_type < __AMDGPU_PL_NUM) {
+ uint32_t res_memtype = res->mem_type;
+
+ vm->stats[res_memtype].drm.resident += size;
+ /* BO only count as purgeable if it is resident,
+ * since otherwise there's nothing to purge.
+ */
+ if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
+ vm->stats[res_memtype].drm.purgeable += size;
+ if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
+ vm->stats[bo_memtype].evicted += size;
+ }
+}
+
+/**
+ * amdgpu_vm_update_stats - helper to update normal memory stat
+ * @base: base structure for tracking BO usage in a VM
+ * @res: the ttm_resource to use for the purpose of accounting, may or may not
+ * be bo->tbo.resource
+ * @sign: if we should add (+1) or subtract (-1) from the stat
+ *
+ * Updates the basic memory stat when bo is added/deleted/moved.
+ */
+void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
+ struct ttm_resource *res, int sign)
+{
+ struct amdgpu_vm *vm = base->vm;
+
+ spin_lock(&vm->status_lock);
+ amdgpu_vm_update_stats_locked(base, res, sign);
+ spin_unlock(&vm->status_lock);
+}
+
+/**
* amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
*
* @base: base structure for tracking BO usage in a VM
@@ -333,6 +439,11 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
base->next = bo->vm_bo;
bo->vm_bo = base;
+ spin_lock(&vm->status_lock);
+ base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
+ amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
+ spin_unlock(&vm->status_lock);
+
if (!amdgpu_vm_is_bo_always_valid(vm, bo))
return;
@@ -674,12 +785,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
ring->funcs->emit_wreg;
- if (adev->gfx.enable_cleaner_shader &&
- ring->funcs->emit_cleaner_shader &&
- job->enforce_isolation)
- ring->funcs->emit_cleaner_shader(ring);
-
- if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
+ if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
+ !(job->enforce_isolation && !job->vmid))
return 0;
amdgpu_ring_ib_begin(ring);
@@ -690,6 +797,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
if (need_pipe_sync)
amdgpu_ring_emit_pipeline_sync(ring);
+ if (adev->gfx.enable_cleaner_shader &&
+ ring->funcs->emit_cleaner_shader &&
+ job->enforce_isolation)
+ ring->funcs->emit_cleaner_shader(ring);
+
if (vm_flush_needed) {
trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
@@ -1082,51 +1194,11 @@ error_free:
return r;
}
-static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
- struct amdgpu_mem_stats *stats)
-{
- struct amdgpu_vm *vm = bo_va->base.vm;
- struct amdgpu_bo *bo = bo_va->base.bo;
-
- if (!bo)
- return;
-
- /*
- * For now ignore BOs which are currently locked and potentially
- * changing their location.
- */
- if (!amdgpu_vm_is_bo_always_valid(vm, bo) &&
- !dma_resv_trylock(bo->tbo.base.resv))
- return;
-
- amdgpu_bo_get_memory(bo, stats);
- if (!amdgpu_vm_is_bo_always_valid(vm, bo))
- dma_resv_unlock(bo->tbo.base.resv);
-}
-
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
- struct amdgpu_mem_stats *stats)
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
{
- struct amdgpu_bo_va *bo_va, *tmp;
-
spin_lock(&vm->status_lock);
- list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats);
-
- list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
- amdgpu_vm_bo_get_memory(bo_va, stats);
+ memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
spin_unlock(&vm->status_lock);
}
@@ -1159,7 +1231,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
int r;
amdgpu_sync_create(&sync);
- if (clear || !bo) {
+ if (clear) {
mem = NULL;
/* Implicitly sync to command submissions in the same VM before
@@ -1174,6 +1246,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
if (r)
goto error_free;
}
+ } else if (!bo) {
+ mem = NULL;
+
+ /* PRT map operations don't need to sync to anything. */
} else {
struct drm_gem_object *obj = &bo->tbo.base;
@@ -1259,10 +1335,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
* next command submission.
*/
if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
- uint32_t mem_type = bo->tbo.resource->mem_type;
-
- if (!(bo->preferred_domains &
- amdgpu_mem_type_to_domain(mem_type)))
+ if (bo->tbo.resource &&
+ !(bo->preferred_domains &
+ amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
amdgpu_vm_bo_evicted(&bo_va->base);
else
amdgpu_vm_bo_idle(&bo_va->base);
@@ -2069,6 +2144,7 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
if (*base != &bo_va->base)
continue;
+ amdgpu_vm_update_stats(*base, bo->tbo.resource, -1);
*base = bo_va->base.next;
break;
}
@@ -2137,14 +2213,12 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
/**
* amdgpu_vm_bo_invalidate - mark the bo as invalid
*
- * @adev: amdgpu_device pointer
* @bo: amdgpu buffer object
* @evicted: is the BO evicted
*
* Mark @bo as invalid.
*/
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo, bool evicted)
+void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted)
{
struct amdgpu_vm_bo_base *bo_base;
@@ -2170,6 +2244,32 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
}
/**
+ * amdgpu_vm_bo_move - handle BO move
+ *
+ * @bo: amdgpu buffer object
+ * @new_mem: the new placement of the BO move
+ * @evicted: is the BO evicted
+ *
+ * Update the memory stats for the new placement and mark @bo as invalid.
+ */
+void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
+ bool evicted)
+{
+ struct amdgpu_vm_bo_base *bo_base;
+
+ for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
+ struct amdgpu_vm *vm = bo_base->vm;
+
+ spin_lock(&vm->status_lock);
+ amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
+ amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
+ spin_unlock(&vm->status_lock);
+ }
+
+ amdgpu_vm_bo_invalidate(bo, evicted);
+}
+
+/**
* amdgpu_vm_get_block_size - calculate VM page table size as power of two
*
* @vm_size: VM size
@@ -2588,6 +2688,16 @@ void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->is_compute_context = false;
}
+static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
+{
+ for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
+ if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) &&
+ vm->stats[i].evicted == 0))
+ return false;
+ }
+ return true;
+}
+
/**
* amdgpu_vm_fini - tear down a vm instance
*
@@ -2611,7 +2721,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
root = amdgpu_bo_ref(vm->root.bo);
amdgpu_bo_reserve(root, true);
- amdgpu_vm_put_task_info(vm->task_info);
amdgpu_vm_set_pasid(adev, vm, 0);
dma_fence_wait(vm->last_unlocked, false);
dma_fence_put(vm->last_unlocked);
@@ -2660,6 +2769,16 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
+
+ if (!amdgpu_vm_stats_is_zero(vm)) {
+ struct amdgpu_task_info *ti = vm->task_info;
+
+ dev_warn(adev->dev,
+ "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
+ ti->process_name, ti->pid, ti->task_name, ti->tgid);
+ }
+
+ amdgpu_vm_put_task_info(vm->task_info);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 52dd7cdfdc81..a3e128e373bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -35,6 +35,7 @@
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
#include "amdgpu_ids.h"
+#include "amdgpu_ttm.h"
struct drm_exec;
@@ -42,7 +43,6 @@ struct amdgpu_bo_va;
struct amdgpu_job;
struct amdgpu_bo_list_entry;
struct amdgpu_bo_vm;
-struct amdgpu_mem_stats;
/*
* GPUVM handling
@@ -203,9 +203,13 @@ struct amdgpu_vm_bo_base {
/* protected by bo being reserved */
struct amdgpu_vm_bo_base *next;
- /* protected by spinlock */
+ /* protected by vm status_lock */
struct list_head vm_status;
+ /* if the bo is counted as shared in mem stats
+ * protected by vm status_lock */
+ bool shared;
+
/* protected by the BO being reserved */
bool moved;
};
@@ -322,6 +326,13 @@ struct amdgpu_vm_fault_info {
unsigned int vmhub;
};
+struct amdgpu_mem_stats {
+ struct drm_memory_stats drm;
+
+ /* buffers that requested this placement but are currently evicted */
+ uint64_t evicted;
+};
+
struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root_cached va;
@@ -336,6 +347,9 @@ struct amdgpu_vm {
/* Lock to protect vm_bo add/del/move on all lists of vm */
spinlock_t status_lock;
+ /* Memory statistics for this vm, protected by status_lock */
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
+
/* Per-VM and PT BOs who needs a validation */
struct list_head evicted;
@@ -515,8 +529,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo, bool evicted);
+void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted);
+void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
+ struct ttm_resource *new_res, int sign);
+void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo);
+void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
+ bool evicted);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
@@ -567,7 +585,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
- struct amdgpu_mem_stats *stats);
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM]);
int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_vm *vmbo, bool immediate);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index f78a0434a48f..b0bf21682115 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -537,6 +537,7 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
if (!entry->bo)
return;
+ amdgpu_vm_update_stats(entry, entry->bo->tbo.resource, -1);
entry->bo->vm_bo = NULL;
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 5acd20ff5979..121ee17b522b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -236,7 +236,8 @@ int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe)
int ret;
amdgpu_ucode_ip_version_decode(adev, VPE_HWIP, fw_prefix, sizeof(fw_prefix));
- ret = amdgpu_ucode_request(adev, &adev->vpe.fw, "amdgpu/%s.bin", fw_prefix);
+ ret = amdgpu_ucode_request(adev, &adev->vpe.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", fw_prefix);
if (ret)
goto out;
@@ -295,9 +296,9 @@ int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe)
return 0;
}
-static int vpe_early_init(void *handle)
+static int vpe_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
@@ -356,9 +357,9 @@ static int vpe_common_init(struct amdgpu_vpe *vpe)
return 0;
}
-static int vpe_sw_init(void *handle)
+static int vpe_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
int ret;
@@ -377,18 +378,26 @@ static int vpe_sw_init(void *handle)
ret = vpe_init_microcode(vpe);
if (ret)
goto out;
+
+ /* TODO: Add queue reset mask when FW fully supports it */
+ adev->vpe.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vpe.ring);
+ ret = amdgpu_vpe_sysfs_reset_mask_init(adev);
+ if (ret)
+ goto out;
out:
return ret;
}
-static int vpe_sw_fini(void *handle)
+static int vpe_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
release_firmware(vpe->fw);
vpe->fw = NULL;
+ amdgpu_vpe_sysfs_reset_mask_fini(adev);
vpe_ring_fini(vpe);
amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj,
@@ -398,9 +407,9 @@ static int vpe_sw_fini(void *handle)
return 0;
}
-static int vpe_hw_init(void *handle)
+static int vpe_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
int ret;
@@ -421,9 +430,9 @@ static int vpe_hw_init(void *handle)
return 0;
}
-static int vpe_hw_fini(void *handle)
+static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
vpe_ring_stop(vpe);
@@ -434,20 +443,18 @@ static int vpe_hw_fini(void *handle)
return 0;
}
-static int vpe_suspend(void *handle)
+static int vpe_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vpe.idle_work);
- return vpe_hw_fini(adev);
+ return vpe_hw_fini(ip_block);
}
-static int vpe_resume(void *handle)
+static int vpe_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return vpe_hw_init(adev);
+ return vpe_hw_init(ip_block);
}
static void vpe_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
@@ -640,16 +647,16 @@ static int vpe_ring_preempt_ib(struct amdgpu_ring *ring)
return r;
}
-static int vpe_set_clockgating_state(void *handle,
+static int vpe_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int vpe_set_powergating_state(void *handle,
+static int vpe_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
if (!adev->pm.dpm_enabled)
@@ -827,7 +834,7 @@ static int vpe_ring_test_ib(struct amdgpu_ring *ring, long timeout)
ret = (le32_to_cpu(adev->wb.wb[index]) == test_pattern) ? 0 : -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -867,6 +874,43 @@ static void vpe_ring_end_use(struct amdgpu_ring *ring)
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
}
+static ssize_t amdgpu_get_vpe_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->vpe.supported_reset);
+}
+
+static DEVICE_ATTR(vpe_reset_mask, 0444,
+ amdgpu_get_vpe_reset_mask, NULL);
+
+int amdgpu_vpe_sysfs_reset_mask_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->vpe.num_instances) {
+ r = device_create_file(adev->dev, &dev_attr_vpe_reset_mask);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+void amdgpu_vpe_sysfs_reset_mask_fini(struct amdgpu_device *adev)
+{
+ if (adev->dev->kobj.sd) {
+ if (adev->vpe.num_instances)
+ device_remove_file(adev->dev, &dev_attr_vpe_reset_mask);
+ }
+}
+
static const struct amdgpu_ring_funcs vpe_ring_funcs = {
.type = AMDGPU_RING_TYPE_VPE,
.align_mask = 0xf,
@@ -908,14 +952,12 @@ static void vpe_set_ring_funcs(struct amdgpu_device *adev)
const struct amd_ip_funcs vpe_ip_funcs = {
.name = "vpe_v6_1",
.early_init = vpe_early_init,
- .late_init = NULL,
.sw_init = vpe_sw_init,
.sw_fini = vpe_sw_fini,
.hw_init = vpe_hw_init,
.hw_fini = vpe_hw_fini,
.suspend = vpe_suspend,
.resume = vpe_resume,
- .soft_reset = NULL,
.set_clockgating_state = vpe_set_clockgating_state,
.set_powergating_state = vpe_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h
index 231d86d0953e..695da740a97e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h
@@ -79,6 +79,7 @@ struct amdgpu_vpe {
uint32_t num_instances;
bool collaborate_mode;
+ uint32_t supported_reset;
};
int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev);
@@ -86,6 +87,8 @@ int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe);
int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe);
int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe);
int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe);
+void amdgpu_vpe_sysfs_reset_mask_fini(struct amdgpu_device *adev);
+int amdgpu_vpe_sysfs_reset_mask_init(struct amdgpu_device *adev);
#define vpe_ring_init(vpe) ((vpe)->funcs->ring_init ? (vpe)->funcs->ring_init((vpe)) : 0)
#define vpe_ring_start(vpe) ((vpe)->funcs->ring_start ? (vpe)->funcs->ring_start((vpe)) : 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 7d26a962f811..ff5e52025266 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -567,7 +567,6 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
else
remaining_size -= size;
}
- mutex_unlock(&mgr->lock);
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
struct drm_buddy_block *dcc_block;
@@ -584,6 +583,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
(u64)vres->base.size,
&vres->blocks);
}
+ mutex_unlock(&mgr->lock);
vres->base.start = 0;
size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index a6d456ec6aeb..23b6f7a4aa4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -427,9 +427,298 @@ void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
return;
sched = entity->entity.rq->sched;
- if (sched->ready) {
+ if (drm_sched_wqueue_ready(sched)) {
ring = to_amdgpu_ring(entity->entity.rq->sched);
atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
}
}
+#define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \
+ static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \
+ struct amdgpu_xcp_res_details *xcp_res, char *buf) \
+ { \
+ return sysfs_emit(buf, "%d\n", xcp_res->_name); \
+ }
+
+struct amdgpu_xcp_res_sysfs_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct amdgpu_xcp_res_details *xcp_res, char *buf);
+};
+
+#define XCP_CFG_SYSFS_RES_ATTR(_name) \
+ struct amdgpu_xcp_res_sysfs_attribute xcp_res_sysfs_attr_##_name = { \
+ .attr = { .name = __stringify(_name), .mode = 0400 }, \
+ .show = amdgpu_xcp_res_sysfs_##_name##_show, \
+ }
+
+XCP_CFG_SYSFS_RES_ATTR_SHOW(num_inst)
+XCP_CFG_SYSFS_RES_ATTR(num_inst);
+XCP_CFG_SYSFS_RES_ATTR_SHOW(num_shared)
+XCP_CFG_SYSFS_RES_ATTR(num_shared);
+
+#define XCP_CFG_SYSFS_RES_ATTR_PTR(_name) xcp_res_sysfs_attr_##_name.attr
+
+static struct attribute *xcp_cfg_res_sysfs_attrs[] = {
+ &XCP_CFG_SYSFS_RES_ATTR_PTR(num_inst),
+ &XCP_CFG_SYSFS_RES_ATTR_PTR(num_shared), NULL
+};
+
+static const char *xcp_desc[] = {
+ [AMDGPU_SPX_PARTITION_MODE] = "SPX",
+ [AMDGPU_DPX_PARTITION_MODE] = "DPX",
+ [AMDGPU_TPX_PARTITION_MODE] = "TPX",
+ [AMDGPU_QPX_PARTITION_MODE] = "QPX",
+ [AMDGPU_CPX_PARTITION_MODE] = "CPX",
+};
+
+static const char *nps_desc[] = {
+ [UNKNOWN_MEMORY_PARTITION_MODE] = "UNKNOWN",
+ [AMDGPU_NPS1_PARTITION_MODE] = "NPS1",
+ [AMDGPU_NPS2_PARTITION_MODE] = "NPS2",
+ [AMDGPU_NPS3_PARTITION_MODE] = "NPS3",
+ [AMDGPU_NPS4_PARTITION_MODE] = "NPS4",
+ [AMDGPU_NPS6_PARTITION_MODE] = "NPS6",
+ [AMDGPU_NPS8_PARTITION_MODE] = "NPS8",
+};
+
+ATTRIBUTE_GROUPS(xcp_cfg_res_sysfs);
+
+#define to_xcp_attr(x) \
+ container_of(x, struct amdgpu_xcp_res_sysfs_attribute, attr)
+#define to_xcp_res(x) container_of(x, struct amdgpu_xcp_res_details, kobj)
+
+static ssize_t xcp_cfg_res_sysfs_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct amdgpu_xcp_res_sysfs_attribute *attribute;
+ struct amdgpu_xcp_res_details *xcp_res;
+
+ attribute = to_xcp_attr(attr);
+ xcp_res = to_xcp_res(kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(xcp_res, buf);
+}
+
+static const struct sysfs_ops xcp_cfg_res_sysfs_ops = {
+ .show = xcp_cfg_res_sysfs_attr_show,
+};
+
+static const struct kobj_type xcp_cfg_res_sysfs_ktype = {
+ .sysfs_ops = &xcp_cfg_res_sysfs_ops,
+ .default_groups = xcp_cfg_res_sysfs_groups,
+};
+
+const char *xcp_res_names[] = {
+ [AMDGPU_XCP_RES_XCC] = "xcc",
+ [AMDGPU_XCP_RES_DMA] = "dma",
+ [AMDGPU_XCP_RES_DEC] = "dec",
+ [AMDGPU_XCP_RES_JPEG] = "jpeg",
+};
+
+static int amdgpu_xcp_get_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
+ int mode,
+ struct amdgpu_xcp_cfg *xcp_cfg)
+{
+ if (xcp_mgr->funcs && xcp_mgr->funcs->get_xcp_res_info)
+ return xcp_mgr->funcs->get_xcp_res_info(xcp_mgr, mode, xcp_cfg);
+
+ return -EOPNOTSUPP;
+}
+
+#define to_xcp_cfg(x) container_of(x, struct amdgpu_xcp_cfg, kobj)
+static ssize_t supported_xcp_configs_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
+ struct amdgpu_xcp_mgr *xcp_mgr = xcp_cfg->xcp_mgr;
+ int size = 0, mode;
+ char *sep = "";
+
+ if (!xcp_mgr || !xcp_mgr->supp_xcp_modes)
+ return sysfs_emit(buf, "Not supported\n");
+
+ for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
+ size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
+ sep = ", ";
+ }
+
+ size += sysfs_emit_at(buf, size, "\n");
+
+ return size;
+}
+
+static ssize_t supported_nps_configs_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
+ int size = 0, mode;
+ char *sep = "";
+
+ if (!xcp_cfg || !xcp_cfg->compatible_nps_modes)
+ return sysfs_emit(buf, "Not supported\n");
+
+ for_each_inst(mode, xcp_cfg->compatible_nps_modes) {
+ size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]);
+ sep = ", ";
+ }
+
+ size += sysfs_emit_at(buf, size, "\n");
+
+ return size;
+}
+
+static ssize_t xcp_config_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
+
+ return sysfs_emit(buf, "%s\n",
+ amdgpu_gfx_compute_mode_desc(xcp_cfg->mode));
+}
+
+static ssize_t xcp_config_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
+ int mode, r;
+
+ if (!strncasecmp("SPX", buf, strlen("SPX")))
+ mode = AMDGPU_SPX_PARTITION_MODE;
+ else if (!strncasecmp("DPX", buf, strlen("DPX")))
+ mode = AMDGPU_DPX_PARTITION_MODE;
+ else if (!strncasecmp("TPX", buf, strlen("TPX")))
+ mode = AMDGPU_TPX_PARTITION_MODE;
+ else if (!strncasecmp("QPX", buf, strlen("QPX")))
+ mode = AMDGPU_QPX_PARTITION_MODE;
+ else if (!strncasecmp("CPX", buf, strlen("CPX")))
+ mode = AMDGPU_CPX_PARTITION_MODE;
+ else
+ return -EINVAL;
+
+ r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
+
+ if (r)
+ return r;
+
+ xcp_cfg->mode = mode;
+ return size;
+}
+
+static struct kobj_attribute xcp_cfg_sysfs_mode =
+ __ATTR_RW_MODE(xcp_config, 0644);
+
+static void xcp_cfg_sysfs_release(struct kobject *kobj)
+{
+ struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
+
+ kfree(xcp_cfg);
+}
+
+static const struct kobj_type xcp_cfg_sysfs_ktype = {
+ .release = xcp_cfg_sysfs_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static struct kobj_attribute supp_part_sysfs_mode =
+ __ATTR_RO(supported_xcp_configs);
+
+static struct kobj_attribute supp_nps_sysfs_mode =
+ __ATTR_RO(supported_nps_configs);
+
+static const struct attribute *xcp_attrs[] = {
+ &supp_part_sysfs_mode.attr,
+ &xcp_cfg_sysfs_mode.attr,
+ NULL,
+};
+
+void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_xcp_res_details *xcp_res;
+ struct amdgpu_xcp_cfg *xcp_cfg;
+ int i, r, j, rid, mode;
+
+ if (!adev->xcp_mgr)
+ return;
+
+ xcp_cfg = kzalloc(sizeof(*xcp_cfg), GFP_KERNEL);
+ if (!xcp_cfg)
+ return;
+ xcp_cfg->xcp_mgr = adev->xcp_mgr;
+
+ r = kobject_init_and_add(&xcp_cfg->kobj, &xcp_cfg_sysfs_ktype,
+ &adev->dev->kobj, "compute_partition_config");
+ if (r)
+ goto err1;
+
+ r = sysfs_create_files(&xcp_cfg->kobj, xcp_attrs);
+ if (r)
+ goto err1;
+
+ if (adev->gmc.supported_nps_modes != 0) {
+ r = sysfs_create_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
+ if (r) {
+ sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
+ goto err1;
+ }
+ }
+
+ mode = (xcp_cfg->xcp_mgr->mode ==
+ AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) ?
+ AMDGPU_SPX_PARTITION_MODE :
+ xcp_cfg->xcp_mgr->mode;
+ r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
+ if (r) {
+ sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
+ sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
+ goto err1;
+ }
+
+ xcp_cfg->mode = mode;
+ for (i = 0; i < xcp_cfg->num_res; i++) {
+ xcp_res = &xcp_cfg->xcp_res[i];
+ rid = xcp_res->id;
+ r = kobject_init_and_add(&xcp_res->kobj,
+ &xcp_cfg_res_sysfs_ktype,
+ &xcp_cfg->kobj, "%s",
+ xcp_res_names[rid]);
+ if (r)
+ goto err;
+ }
+
+ adev->xcp_mgr->xcp_cfg = xcp_cfg;
+ return;
+err:
+ for (j = 0; j < i; j++) {
+ xcp_res = &xcp_cfg->xcp_res[i];
+ kobject_put(&xcp_res->kobj);
+ }
+
+ sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
+ sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
+err1:
+ kobject_put(&xcp_cfg->kobj);
+}
+
+void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_xcp_res_details *xcp_res;
+ struct amdgpu_xcp_cfg *xcp_cfg;
+ int i;
+
+ if (!adev->xcp_mgr)
+ return;
+
+ xcp_cfg = adev->xcp_mgr->xcp_cfg;
+ for (i = 0; i < xcp_cfg->num_res; i++) {
+ xcp_res = &xcp_cfg->xcp_res[i];
+ kobject_put(&xcp_res->kobj);
+ }
+
+ sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
+ sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
+ kobject_put(&xcp_cfg->kobj);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
index 32775260556f..b63f53242c57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
@@ -56,6 +56,30 @@ enum AMDGPU_XCP_STATE {
AMDGPU_XCP_RESUME,
};
+enum amdgpu_xcp_res_id {
+ AMDGPU_XCP_RES_XCC,
+ AMDGPU_XCP_RES_DMA,
+ AMDGPU_XCP_RES_DEC,
+ AMDGPU_XCP_RES_JPEG,
+ AMDGPU_XCP_RES_MAX,
+};
+
+struct amdgpu_xcp_res_details {
+ enum amdgpu_xcp_res_id id;
+ u8 num_inst;
+ u8 num_shared;
+ struct kobject kobj;
+};
+
+struct amdgpu_xcp_cfg {
+ u8 mode;
+ struct amdgpu_xcp_res_details xcp_res[AMDGPU_XCP_RES_MAX];
+ u8 num_res;
+ struct amdgpu_xcp_mgr *xcp_mgr;
+ struct kobject kobj;
+ u16 compatible_nps_modes;
+};
+
struct amdgpu_xcp_ip_funcs {
int (*prepare_suspend)(void *handle, uint32_t inst_mask);
int (*suspend)(void *handle, uint32_t inst_mask);
@@ -97,6 +121,9 @@ struct amdgpu_xcp_mgr {
/* Used to determine KFD memory size limits per XCP */
unsigned int num_xcp_per_mem_partition;
+ struct amdgpu_xcp_cfg *xcp_cfg;
+ uint32_t supp_xcp_modes;
+ uint32_t avail_xcp_modes;
};
struct amdgpu_xcp_mgr_funcs {
@@ -108,7 +135,9 @@ struct amdgpu_xcp_mgr_funcs {
struct amdgpu_xcp_ip *ip);
int (*get_xcp_mem_id)(struct amdgpu_xcp_mgr *xcp_mgr,
struct amdgpu_xcp *xcp, uint8_t *mem_id);
-
+ int (*get_xcp_res_info)(struct amdgpu_xcp_mgr *xcp_mgr,
+ int mode,
+ struct amdgpu_xcp_cfg *xcp_cfg);
int (*prepare_suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
@@ -146,6 +175,9 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
struct amdgpu_ctx_entity *entity);
+void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev);
+void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev);
+
#define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \
((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
(adev)->xcp_mgr->funcs->select_scheds ? \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 7de449fae1e3..74b4349e345a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -40,6 +40,11 @@
#define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210
#define smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK 0x12200218
+#define XGMI_STATE_DISABLE 0xD1
+#define XGMI_STATE_LS0 0x81
+#define XGMI_LINK_ACTIVE 1
+#define XGMI_LINK_INACTIVE 0
+
static DEFINE_MUTEX(xgmi_mutex);
#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4
@@ -289,6 +294,42 @@ static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = {
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxCMDPktErr)},
};
+static u32 xgmi_v6_4_get_link_status(struct amdgpu_device *adev, int global_link_num)
+{
+ const u32 smnpcs_xgmi3x16_pcs_state_hist1 = 0x11a00070;
+ const int xgmi_inst = 2;
+ u32 link_inst;
+ u64 addr;
+
+ link_inst = global_link_num % xgmi_inst;
+
+ addr = (smnpcs_xgmi3x16_pcs_state_hist1 | (link_inst << 20)) +
+ adev->asic_funcs->encode_ext_smn_addressing(global_link_num / xgmi_inst);
+
+ return RREG32_PCIE_EXT(addr);
+}
+
+int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev, int global_link_num)
+{
+ u32 xgmi_state_reg_val;
+
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ xgmi_state_reg_val = xgmi_v6_4_get_link_status(adev, global_link_num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if ((xgmi_state_reg_val & 0xFF) == XGMI_STATE_DISABLE)
+ return -ENOLINK;
+
+ if ((xgmi_state_reg_val & 0xFF) == XGMI_STATE_LS0)
+ return XGMI_LINK_ACTIVE;
+
+ return XGMI_LINK_INACTIVE;
+}
+
/**
* DOC: AMDGPU XGMI Support
*
@@ -667,6 +708,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
task_barrier_init(&hive->tb);
hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
hive->hi_req_gpu = NULL;
+ atomic_set(&hive->requested_nps_mode, UNKNOWN_MEMORY_PARTITION_MODE);
/*
* hive pstate on boot is high in vega20 so we have to go to low
@@ -800,6 +842,23 @@ int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
return -EINVAL;
}
+bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev,
+ struct amdgpu_device *peer_adev)
+{
+ struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ int i;
+
+ /* Sharing should always be enabled for non-SRIOV. */
+ if (!amdgpu_sriov_vf(adev))
+ return true;
+
+ for (i = 0 ; i < top->num_nodes; ++i)
+ if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
+ return !!top->nodes[i].is_sharing_enabled;
+
+ return false;
+}
+
/*
* Devices that support extended data require the entire hive to initialize with
* the shared memory buffer flag set.
@@ -860,8 +919,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
if (!adev->gmc.xgmi.supported)
return 0;
- if (!adev->gmc.xgmi.pending_reset &&
- amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
+ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
ret = psp_xgmi_initialize(&adev->psp, false, true);
if (ret) {
dev_err(adev->dev,
@@ -907,8 +965,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
task_barrier_add_task(&hive->tb);
- if (!adev->gmc.xgmi.pending_reset &&
- amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
+ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
/* update node list for other device in the hive */
if (tmp_adev != adev) {
@@ -985,7 +1042,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
}
}
- if (!ret && !adev->gmc.xgmi.pending_reset)
+ if (!ret)
ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
exit_unlock:
@@ -1500,3 +1557,117 @@ int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev)
return 0;
}
+
+static void amdgpu_xgmi_reset_on_init_work(struct work_struct *work)
+{
+ struct amdgpu_hive_info *hive =
+ container_of(work, struct amdgpu_hive_info, reset_on_init_work);
+ struct amdgpu_reset_context reset_context;
+ struct amdgpu_device *tmp_adev;
+ struct list_head device_list;
+ int r;
+
+ mutex_lock(&hive->hive_lock);
+
+ INIT_LIST_HEAD(&device_list);
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+
+ tmp_adev = list_first_entry(&device_list, struct amdgpu_device,
+ reset_list);
+ amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
+
+ reset_context.method = AMD_RESET_METHOD_ON_INIT;
+ reset_context.reset_req_dev = tmp_adev;
+ reset_context.hive = hive;
+ reset_context.reset_device_list = &device_list;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+
+ amdgpu_reset_do_xgmi_reset_on_init(&reset_context);
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ r = amdgpu_ras_init_badpage_info(tmp_adev);
+ if (r && r != -EHWPOISON)
+ dev_err(tmp_adev->dev,
+ "error during bad page data initialization");
+ }
+}
+
+static void amdgpu_xgmi_schedule_reset_on_init(struct amdgpu_hive_info *hive)
+{
+ INIT_WORK(&hive->reset_on_init_work, amdgpu_xgmi_reset_on_init_work);
+ amdgpu_reset_domain_schedule(hive->reset_domain,
+ &hive->reset_on_init_work);
+}
+
+int amdgpu_xgmi_reset_on_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_hive_info *hive;
+ bool reset_scheduled;
+ int num_devs;
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (!hive)
+ return -EINVAL;
+
+ mutex_lock(&hive->hive_lock);
+ num_devs = atomic_read(&hive->number_devices);
+ reset_scheduled = false;
+ if (num_devs == adev->gmc.xgmi.num_physical_nodes) {
+ amdgpu_xgmi_schedule_reset_on_init(hive);
+ reset_scheduled = true;
+ }
+
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
+
+ if (reset_scheduled)
+ flush_work(&hive->reset_on_init_work);
+
+ return 0;
+}
+
+int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive,
+ int req_nps_mode)
+{
+ struct amdgpu_device *tmp_adev;
+ int cur_nps_mode, r;
+
+ /* This is expected to be called only during unload of driver. The
+ * request needs to be placed only once for all devices in the hive. If
+ * one of them fail, revert the request for previous successful devices.
+ * After placing the request, make hive mode as UNKNOWN so that other
+ * devices don't request anymore.
+ */
+ mutex_lock(&hive->hive_lock);
+ if (atomic_read(&hive->requested_nps_mode) ==
+ UNKNOWN_MEMORY_PARTITION_MODE) {
+ dev_dbg(adev->dev, "Unexpected entry for hive NPS change");
+ mutex_unlock(&hive->hive_lock);
+ return 0;
+ }
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ r = adev->gmc.gmc_funcs->request_mem_partition_mode(
+ tmp_adev, req_nps_mode);
+ if (r)
+ break;
+ }
+ if (r) {
+ /* Request back current mode if one of the requests failed */
+ cur_nps_mode =
+ adev->gmc.gmc_funcs->query_mem_partition_mode(tmp_adev);
+ list_for_each_entry_continue_reverse(
+ tmp_adev, &hive->device_list, gmc.xgmi.head)
+ adev->gmc.gmc_funcs->request_mem_partition_mode(
+ tmp_adev, cur_nps_mode);
+ }
+ /* Set to UNKNOWN so that other devices don't request anymore */
+ atomic_set(&hive->requested_nps_mode, UNKNOWN_MEMORY_PARTITION_MODE);
+ mutex_unlock(&hive->hive_lock);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index a3bfc16de6d4..d1282b4c6348 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -45,6 +45,8 @@ struct amdgpu_hive_info {
struct amdgpu_reset_domain *reset_domain;
atomic_t ras_recovery;
struct ras_event_manager event_mgr;
+ struct work_struct reset_on_init_work;
+ atomic_t requested_nps_mode;
};
struct amdgpu_pcs_ras_field {
@@ -64,6 +66,8 @@ int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
+bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev,
+ struct amdgpu_device *peer_adev);
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
uint64_t addr);
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
@@ -75,5 +79,12 @@ static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
}
int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_xgmi_reset_on_init(struct amdgpu_device *adev);
+
+int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive,
+ int req_nps_mode);
+int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev,
+ int global_link_num);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index 6e9eeaeb3de1..b4f9c2f4e92c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -28,17 +28,21 @@
#define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64
#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB
#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4
-
+#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048
+#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB 2
+#define AMD_SRIOV_RAS_TELEMETRY_SIZE_KB 64
/*
* layout
- * 0 64KB 65KB 66KB
- * | VBIOS | PF2VF | VF2PF | Bad Page | ...
- * | 64KB | 1KB | 1KB |
+ * 0 64KB 65KB 66KB 68KB 132KB
+ * | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ...
+ * | 64KB | 1KB | 1KB | 2KB | 64KB | ...
*/
+
#define AMD_SRIOV_MSG_SIZE_KB 1
#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB
#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
+#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB)
/*
* PF2VF history log:
@@ -86,30 +90,59 @@ enum amd_sriov_ucode_engine_id {
union amd_sriov_msg_feature_flags {
struct {
- uint32_t error_log_collect : 1;
- uint32_t host_load_ucodes : 1;
- uint32_t host_flr_vramlost : 1;
- uint32_t mm_bw_management : 1;
- uint32_t pp_one_vf_mode : 1;
- uint32_t reg_indirect_acc : 1;
- uint32_t av1_support : 1;
- uint32_t vcn_rb_decouple : 1;
- uint32_t mes_info_enable : 1;
- uint32_t reserved : 23;
+ uint32_t error_log_collect : 1;
+ uint32_t host_load_ucodes : 1;
+ uint32_t host_flr_vramlost : 1;
+ uint32_t mm_bw_management : 1;
+ uint32_t pp_one_vf_mode : 1;
+ uint32_t reg_indirect_acc : 1;
+ uint32_t av1_support : 1;
+ uint32_t vcn_rb_decouple : 1;
+ uint32_t mes_info_dump_enable : 1;
+ uint32_t ras_caps : 1;
+ uint32_t ras_telemetry : 1;
+ uint32_t reserved : 21;
} flags;
uint32_t all;
};
union amd_sriov_reg_access_flags {
struct {
- uint32_t vf_reg_access_ih : 1;
- uint32_t vf_reg_access_mmhub : 1;
- uint32_t vf_reg_access_gc : 1;
- uint32_t reserved : 29;
+ uint32_t vf_reg_access_ih : 1;
+ uint32_t vf_reg_access_mmhub : 1;
+ uint32_t vf_reg_access_gc : 1;
+ uint32_t reserved : 29;
} flags;
uint32_t all;
};
+union amd_sriov_ras_caps {
+ struct {
+ uint64_t block_umc : 1;
+ uint64_t block_sdma : 1;
+ uint64_t block_gfx : 1;
+ uint64_t block_mmhub : 1;
+ uint64_t block_athub : 1;
+ uint64_t block_pcie_bif : 1;
+ uint64_t block_hdp : 1;
+ uint64_t block_xgmi_wafl : 1;
+ uint64_t block_df : 1;
+ uint64_t block_smn : 1;
+ uint64_t block_sem : 1;
+ uint64_t block_mp0 : 1;
+ uint64_t block_mp1 : 1;
+ uint64_t block_fuse : 1;
+ uint64_t block_mca : 1;
+ uint64_t block_vcn : 1;
+ uint64_t block_jpeg : 1;
+ uint64_t block_ih : 1;
+ uint64_t block_mpio : 1;
+ uint64_t poison_propogation_mode : 1;
+ uint64_t reserved : 44;
+ } bits;
+ uint64_t all;
+};
+
union amd_sriov_msg_os_info {
struct {
uint32_t windows : 1;
@@ -158,7 +191,7 @@ struct amd_sriov_msg_pf2vf_info_header {
uint32_t reserved[2];
};
-#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (49)
+#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (55)
struct amd_sriov_msg_pf2vf_info {
/* header contains size and version */
struct amd_sriov_msg_pf2vf_info_header header;
@@ -211,6 +244,12 @@ struct amd_sriov_msg_pf2vf_info {
uint32_t pcie_atomic_ops_support_flags;
/* Portion of GPU memory occupied by VF. MAX value is 65535, but set to uint32_t to maintain alignment with reserved size */
uint32_t gpu_capacity;
+ /* vf bdf on host pci tree for debug only */
+ uint32_t bdf_on_host;
+ uint32_t more_bp; //Reserved for future use.
+ union amd_sriov_ras_caps ras_en_caps;
+ union amd_sriov_ras_caps ras_telemetry_en_caps;
+
/* reserved */
uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE];
} __packed;
@@ -283,8 +322,12 @@ enum amd_sriov_mailbox_request_message {
MB_REQ_MSG_REL_GPU_FINI_ACCESS,
MB_REQ_MSG_REQ_GPU_RESET_ACCESS,
MB_REQ_MSG_REQ_GPU_INIT_DATA,
+ MB_REQ_MSG_PSP_VF_CMD_RELAY,
MB_REQ_MSG_LOG_VF_ERROR = 200,
+ MB_REQ_MSG_READY_TO_RESET = 201,
+ MB_REQ_MSG_RAS_POISON = 202,
+ MB_REQ_RAS_ERROR_COUNT = 203,
};
/* mailbox message send from host to guest */
@@ -297,10 +340,60 @@ enum amd_sriov_mailbox_response_message {
MB_RES_MSG_FAIL,
MB_RES_MSG_QUERY_ALIVE,
MB_RES_MSG_GPU_INIT_DATA_READY,
+ MB_RES_MSG_RAS_ERROR_COUNT_READY = 11,
MB_RES_MSG_TEXT_MESSAGE = 255
};
+enum amd_sriov_ras_telemetry_gpu_block {
+ RAS_TELEMETRY_GPU_BLOCK_UMC = 0,
+ RAS_TELEMETRY_GPU_BLOCK_SDMA = 1,
+ RAS_TELEMETRY_GPU_BLOCK_GFX = 2,
+ RAS_TELEMETRY_GPU_BLOCK_MMHUB = 3,
+ RAS_TELEMETRY_GPU_BLOCK_ATHUB = 4,
+ RAS_TELEMETRY_GPU_BLOCK_PCIE_BIF = 5,
+ RAS_TELEMETRY_GPU_BLOCK_HDP = 6,
+ RAS_TELEMETRY_GPU_BLOCK_XGMI_WAFL = 7,
+ RAS_TELEMETRY_GPU_BLOCK_DF = 8,
+ RAS_TELEMETRY_GPU_BLOCK_SMN = 9,
+ RAS_TELEMETRY_GPU_BLOCK_SEM = 10,
+ RAS_TELEMETRY_GPU_BLOCK_MP0 = 11,
+ RAS_TELEMETRY_GPU_BLOCK_MP1 = 12,
+ RAS_TELEMETRY_GPU_BLOCK_FUSE = 13,
+ RAS_TELEMETRY_GPU_BLOCK_MCA = 14,
+ RAS_TELEMETRY_GPU_BLOCK_VCN = 15,
+ RAS_TELEMETRY_GPU_BLOCK_JPEG = 16,
+ RAS_TELEMETRY_GPU_BLOCK_IH = 17,
+ RAS_TELEMETRY_GPU_BLOCK_MPIO = 18,
+ RAS_TELEMETRY_GPU_BLOCK_COUNT = 19,
+};
+
+struct amd_sriov_ras_telemetry_header {
+ uint32_t checksum;
+ uint32_t used_size;
+ uint32_t reserved[2];
+};
+
+struct amd_sriov_ras_telemetry_error_count {
+ struct {
+ uint32_t ce_count;
+ uint32_t ue_count;
+ uint32_t de_count;
+ uint32_t ce_overflow_count;
+ uint32_t ue_overflow_count;
+ uint32_t de_overflow_count;
+ uint32_t reserved[6];
+ } block[RAS_TELEMETRY_GPU_BLOCK_COUNT];
+};
+
+struct amdsriov_ras_telemetry {
+ struct amd_sriov_ras_telemetry_header header;
+
+ union {
+ struct amd_sriov_ras_telemetry_error_count error_count;
+ } body;
+};
+
/* version data stored in MAILBOX_MSGBUF_RCV_DW1 for future expansion */
enum amd_sriov_gpu_init_data_version {
GPU_INIT_DATA_READY_V1 = 1,
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 5e8833e4fed2..e157d6d857b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -447,6 +447,72 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x
return 0;
}
+static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
+ int mode,
+ struct amdgpu_xcp_cfg *xcp_cfg)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ int max_res[AMDGPU_XCP_RES_MAX] = {};
+ bool res_lt_xcp;
+ int num_xcp, i;
+ u16 nps_modes;
+
+ if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
+ return -EINVAL;
+
+ max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
+ max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
+ max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
+ max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
+
+ switch (mode) {
+ case AMDGPU_SPX_PARTITION_MODE:
+ num_xcp = 1;
+ nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
+ break;
+ case AMDGPU_DPX_PARTITION_MODE:
+ num_xcp = 2;
+ nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
+ break;
+ case AMDGPU_TPX_PARTITION_MODE:
+ num_xcp = 3;
+ nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
+ break;
+ case AMDGPU_QPX_PARTITION_MODE:
+ num_xcp = 4;
+ nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
+ break;
+ case AMDGPU_CPX_PARTITION_MODE:
+ num_xcp = NUM_XCC(adev->gfx.xcc_mask);
+ nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ xcp_cfg->compatible_nps_modes =
+ (adev->gmc.supported_nps_modes & nps_modes);
+ xcp_cfg->num_res = ARRAY_SIZE(max_res);
+
+ for (i = 0; i < xcp_cfg->num_res; i++) {
+ res_lt_xcp = max_res[i] < num_xcp;
+ xcp_cfg->xcp_res[i].id = i;
+ xcp_cfg->xcp_res[i].num_inst =
+ res_lt_xcp ? 1 : max_res[i] / num_xcp;
+ xcp_cfg->xcp_res[i].num_inst =
+ i == AMDGPU_XCP_RES_JPEG ?
+ xcp_cfg->xcp_res[i].num_inst *
+ adev->jpeg.num_jpeg_rings : xcp_cfg->xcp_res[i].num_inst;
+ xcp_cfg->xcp_res[i].num_shared =
+ res_lt_xcp ? num_xcp / max_res[i] : 1;
+ }
+
+ return 0;
+}
+
static enum amdgpu_gfx_partition
__aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
{
@@ -482,7 +548,7 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
case AMDGPU_SPX_PARTITION_MODE:
return adev->gmc.num_mem_partitions == 1 && num_xcc > 0;
case AMDGPU_DPX_PARTITION_MODE:
- return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0;
+ return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0;
case AMDGPU_TPX_PARTITION_MODE:
return (adev->gmc.num_mem_partitions == 1 ||
adev->gmc.num_mem_partitions == 3) &&
@@ -530,6 +596,57 @@ static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr,
return ret;
}
+static void
+__aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+
+ xcp_mgr->supp_xcp_modes = 0;
+
+ switch (NUM_XCC(adev->gfx.xcc_mask)) {
+ case 8:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_DPX_PARTITION_MODE) |
+ BIT(AMDGPU_QPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 6:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_TPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 4:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_DPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ /* this seems only existing in emulation phase */
+ case 2:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 1:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ int mode;
+
+ xcp_mgr->avail_xcp_modes = 0;
+
+ for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
+ if (__aqua_vanjaram_is_valid_mode(xcp_mgr, mode))
+ xcp_mgr->avail_xcp_modes |= BIT(mode);
+ }
+}
+
static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
int mode, int *num_xcps)
{
@@ -578,6 +695,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags);
+ if (!ret)
+ __aqua_vanjaram_update_available_partition_mode(xcp_mgr);
unlock:
if (flags & AMDGPU_XCP_OPS_KFD)
amdgpu_amdkfd_unlock_kfd(adev);
@@ -656,9 +775,11 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
.switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
.query_partition_mode = &aqua_vanjaram_query_partition_mode,
.get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
+ .get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
.get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
.select_scheds = &aqua_vanjaram_select_scheds,
- .update_partition_sched_list = &aqua_vanjaram_update_partition_sched_list
+ .update_partition_sched_list =
+ &aqua_vanjaram_update_partition_sched_list
};
static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
@@ -673,6 +794,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
if (ret)
return ret;
+ __aqua_vanjaram_update_supported_modes(adev->xcp_mgr);
/* TODO: Default memory node affinity init */
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index cf1d5d462b67..08d6787893b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1985,9 +1985,9 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.query_video_codecs = &cik_query_video_codecs,
};
-static int cik_common_early_init(void *handle)
+static int cik_common_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->smc_rreg = &cik_smc_rreg;
adev->smc_wreg = &cik_smc_wreg;
@@ -2124,19 +2124,9 @@ static int cik_common_early_init(void *handle)
return 0;
}
-static int cik_common_sw_init(void *handle)
+static int cik_common_hw_init(struct amdgpu_ip_block *ip_block)
{
- return 0;
-}
-
-static int cik_common_sw_fini(void *handle)
-{
- return 0;
-}
-
-static int cik_common_hw_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* move the golden regs per IP block */
cik_init_golden_registers(adev);
@@ -2148,23 +2138,14 @@ static int cik_common_hw_init(void *handle)
return 0;
}
-static int cik_common_hw_fini(void *handle)
+static int cik_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
return 0;
}
-static int cik_common_suspend(void *handle)
+static int cik_common_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cik_common_hw_fini(adev);
-}
-
-static int cik_common_resume(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cik_common_hw_init(adev);
+ return cik_common_hw_init(ip_block);
}
static bool cik_common_is_idle(void *handle)
@@ -2172,24 +2153,21 @@ static bool cik_common_is_idle(void *handle)
return true;
}
-static int cik_common_wait_for_idle(void *handle)
-{
- return 0;
-}
-static int cik_common_soft_reset(void *handle)
+
+static int cik_common_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* XXX hard reset?? */
return 0;
}
-static int cik_common_set_clockgating_state(void *handle,
+static int cik_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int cik_common_set_powergating_state(void *handle,
+static int cik_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -2198,20 +2176,13 @@ static int cik_common_set_powergating_state(void *handle,
static const struct amd_ip_funcs cik_common_ip_funcs = {
.name = "cik_common",
.early_init = cik_common_early_init,
- .late_init = NULL,
- .sw_init = cik_common_sw_init,
- .sw_fini = cik_common_sw_fini,
.hw_init = cik_common_hw_init,
.hw_fini = cik_common_hw_fini,
- .suspend = cik_common_suspend,
.resume = cik_common_resume,
.is_idle = cik_common_is_idle,
- .wait_for_idle = cik_common_wait_for_idle,
.soft_reset = cik_common_soft_reset,
.set_clockgating_state = cik_common_set_clockgating_state,
.set_powergating_state = cik_common_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ip_block_version cik_common_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 576baa9dbb0e..444563486769 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -283,9 +283,9 @@ static void cik_ih_set_rptr(struct amdgpu_device *adev,
WREG32(mmIH_RB_RPTR, ih->rptr);
}
-static int cik_ih_early_init(void *handle)
+static int cik_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
ret = amdgpu_irq_add_domain(adev);
@@ -297,10 +297,10 @@ static int cik_ih_early_init(void *handle)
return 0;
}
-static int cik_ih_sw_init(void *handle)
+static int cik_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
@@ -311,9 +311,9 @@ static int cik_ih_sw_init(void *handle)
return r;
}
-static int cik_ih_sw_fini(void *handle)
+static int cik_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
amdgpu_irq_remove_domain(adev);
@@ -321,34 +321,28 @@ static int cik_ih_sw_fini(void *handle)
return 0;
}
-static int cik_ih_hw_init(void *handle)
+static int cik_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return cik_ih_irq_init(adev);
}
-static int cik_ih_hw_fini(void *handle)
+static int cik_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- cik_ih_irq_disable(adev);
+ cik_ih_irq_disable(ip_block->adev);
return 0;
}
-static int cik_ih_suspend(void *handle)
+static int cik_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cik_ih_hw_fini(adev);
+ return cik_ih_hw_fini(ip_block);
}
-static int cik_ih_resume(void *handle)
+static int cik_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cik_ih_hw_init(adev);
+ return cik_ih_hw_init(ip_block);
}
static bool cik_ih_is_idle(void *handle)
@@ -362,11 +356,11 @@ static bool cik_ih_is_idle(void *handle)
return true;
}
-static int cik_ih_wait_for_idle(void *handle)
+static int cik_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -378,9 +372,9 @@ static int cik_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int cik_ih_soft_reset(void *handle)
+static int cik_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -408,13 +402,13 @@ static int cik_ih_soft_reset(void *handle)
return 0;
}
-static int cik_ih_set_clockgating_state(void *handle,
+static int cik_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int cik_ih_set_powergating_state(void *handle,
+static int cik_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -423,7 +417,6 @@ static int cik_ih_set_powergating_state(void *handle,
static const struct amd_ip_funcs cik_ih_ip_funcs = {
.name = "cik_ih",
.early_init = cik_ih_early_init,
- .late_init = NULL,
.sw_init = cik_ih_sw_init,
.sw_fini = cik_ih_sw_fini,
.hw_init = cik_ih_hw_init,
@@ -435,8 +428,6 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = {
.soft_reset = cik_ih_soft_reset,
.set_clockgating_state = cik_ih_set_clockgating_state,
.set_powergating_state = cik_ih_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs cik_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 952737de9411..d9bd8f3f17e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -54,7 +54,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
-static int cik_sdma_soft_reset(void *handle);
+static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block);
MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin");
MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin");
@@ -133,9 +133,11 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sdma.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sdma1.bin", chip_name);
if (err)
goto out;
@@ -696,7 +698,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -918,9 +920,9 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
}
}
-static int cik_sdma_early_init(void *handle)
+static int cik_sdma_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
adev->sdma.num_instances = SDMA_MAX_INSTANCE;
@@ -937,10 +939,10 @@ static int cik_sdma_early_init(void *handle)
return 0;
}
-static int cik_sdma_sw_init(void *handle)
+static int cik_sdma_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r, i;
/* SDMA trap event */
@@ -977,9 +979,9 @@ static int cik_sdma_sw_init(void *handle)
return r;
}
-static int cik_sdma_sw_fini(void *handle)
+static int cik_sdma_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
@@ -989,10 +991,10 @@ static int cik_sdma_sw_fini(void *handle)
return 0;
}
-static int cik_sdma_hw_init(void *handle)
+static int cik_sdma_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = cik_sdma_start(adev);
if (r)
@@ -1001,9 +1003,9 @@ static int cik_sdma_hw_init(void *handle)
return r;
}
-static int cik_sdma_hw_fini(void *handle)
+static int cik_sdma_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cik_ctx_switch_enable(adev, false);
cik_sdma_enable(adev, false);
@@ -1011,20 +1013,16 @@ static int cik_sdma_hw_fini(void *handle)
return 0;
}
-static int cik_sdma_suspend(void *handle)
+static int cik_sdma_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cik_sdma_hw_fini(adev);
+ return cik_sdma_hw_fini(ip_block);
}
-static int cik_sdma_resume(void *handle)
+static int cik_sdma_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- cik_sdma_soft_reset(handle);
+ cik_sdma_soft_reset(ip_block);
- return cik_sdma_hw_init(adev);
+ return cik_sdma_hw_init(ip_block);
}
static bool cik_sdma_is_idle(void *handle)
@@ -1039,11 +1037,11 @@ static bool cik_sdma_is_idle(void *handle)
return true;
}
-static int cik_sdma_wait_for_idle(void *handle)
+static int cik_sdma_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
@@ -1056,10 +1054,10 @@ static int cik_sdma_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int cik_sdma_soft_reset(void *handle)
+static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp;
/* sdma0 */
@@ -1193,11 +1191,11 @@ static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int cik_sdma_set_clockgating_state(void *handle,
+static int cik_sdma_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_CG_STATE_GATE)
gate = true;
@@ -1208,7 +1206,7 @@ static int cik_sdma_set_clockgating_state(void *handle,
return 0;
}
-static int cik_sdma_set_powergating_state(void *handle,
+static int cik_sdma_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1217,7 +1215,6 @@ static int cik_sdma_set_powergating_state(void *handle,
static const struct amd_ip_funcs cik_sdma_ip_funcs = {
.name = "cik_sdma",
.early_init = cik_sdma_early_init,
- .late_init = NULL,
.sw_init = cik_sdma_sw_init,
.sw_fini = cik_sdma_sw_fini,
.hw_init = cik_sdma_hw_init,
@@ -1229,8 +1226,6 @@ static const struct amd_ip_funcs cik_sdma_ip_funcs = {
.soft_reset = cik_sdma_soft_reset,
.set_clockgating_state = cik_sdma_set_clockgating_state,
.set_powergating_state = cik_sdma_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 072643787384..82586b76aeda 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -274,9 +274,9 @@ static void cz_ih_set_rptr(struct amdgpu_device *adev,
WREG32(mmIH_RB_RPTR, ih->rptr);
}
-static int cz_ih_early_init(void *handle)
+static int cz_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
ret = amdgpu_irq_add_domain(adev);
@@ -288,10 +288,10 @@ static int cz_ih_early_init(void *handle)
return 0;
}
-static int cz_ih_sw_init(void *handle)
+static int cz_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
@@ -302,9 +302,9 @@ static int cz_ih_sw_init(void *handle)
return r;
}
-static int cz_ih_sw_fini(void *handle)
+static int cz_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
amdgpu_irq_remove_domain(adev);
@@ -312,10 +312,10 @@ static int cz_ih_sw_fini(void *handle)
return 0;
}
-static int cz_ih_hw_init(void *handle)
+static int cz_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = cz_ih_irq_init(adev);
if (r)
@@ -324,27 +324,21 @@ static int cz_ih_hw_init(void *handle)
return 0;
}
-static int cz_ih_hw_fini(void *handle)
+static int cz_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- cz_ih_irq_disable(adev);
+ cz_ih_irq_disable(ip_block->adev);
return 0;
}
-static int cz_ih_suspend(void *handle)
+static int cz_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cz_ih_hw_fini(adev);
+ return cz_ih_hw_fini(ip_block);
}
-static int cz_ih_resume(void *handle)
+static int cz_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cz_ih_hw_init(adev);
+ return cz_ih_hw_init(ip_block);
}
static bool cz_ih_is_idle(void *handle)
@@ -358,11 +352,11 @@ static bool cz_ih_is_idle(void *handle)
return true;
}
-static int cz_ih_wait_for_idle(void *handle)
+static int cz_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -374,10 +368,10 @@ static int cz_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int cz_ih_soft_reset(void *handle)
+static int cz_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
@@ -404,14 +398,14 @@ static int cz_ih_soft_reset(void *handle)
return 0;
}
-static int cz_ih_set_clockgating_state(void *handle,
+static int cz_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
// TODO
return 0;
}
-static int cz_ih_set_powergating_state(void *handle,
+static int cz_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
// TODO
@@ -421,7 +415,6 @@ static int cz_ih_set_powergating_state(void *handle,
static const struct amd_ip_funcs cz_ih_ip_funcs = {
.name = "cz_ih",
.early_init = cz_ih_early_init,
- .late_init = NULL,
.sw_init = cz_ih_sw_init,
.sw_fini = cz_ih_sw_fini,
.hw_init = cz_ih_hw_init,
@@ -433,8 +426,6 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = {
.soft_reset = cz_ih_soft_reset,
.set_clockgating_state = cz_ih_set_clockgating_state,
.set_powergating_state = cz_ih_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs cz_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 70c1399f738d..c5e3d2251b18 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2687,6 +2687,32 @@ static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
.get_scanout_position = amdgpu_crtc_get_scanout_position,
};
+static void dce_v10_0_panic_flush(struct drm_plane *plane)
+{
+ struct drm_framebuffer *fb;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_device *adev;
+ uint32_t fb_format;
+
+ if (!plane->fb)
+ return;
+
+ fb = plane->fb;
+ amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
+ adev = drm_to_adev(fb->dev);
+
+ /* Disable DC tiling */
+ fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
+ fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
+ WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+
+}
+
+static const struct drm_plane_helper_funcs dce_v10_0_drm_primary_plane_helper_funcs = {
+ .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
+ .panic_flush = dce_v10_0_panic_flush,
+};
+
static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
@@ -2734,13 +2760,14 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v10_0_crtc_helper_funcs);
+ drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v10_0_drm_primary_plane_helper_funcs);
return 0;
}
-static int dce_v10_0_early_init(void *handle)
+static int dce_v10_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg;
adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg;
@@ -2765,10 +2792,10 @@ static int dce_v10_0_early_init(void *handle)
return 0;
}
-static int dce_v10_0_sw_init(void *handle)
+static int dce_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
@@ -2844,9 +2871,9 @@ static int dce_v10_0_sw_init(void *handle)
return 0;
}
-static int dce_v10_0_sw_fini(void *handle)
+static int dce_v10_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
drm_edid_free(adev->mode_info.bios_hardcoded_edid);
@@ -2862,10 +2889,10 @@ static int dce_v10_0_sw_fini(void *handle)
return 0;
}
-static int dce_v10_0_hw_init(void *handle)
+static int dce_v10_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v10_0_init_golden_registers(adev);
@@ -2887,10 +2914,10 @@ static int dce_v10_0_hw_init(void *handle)
return 0;
}
-static int dce_v10_0_hw_fini(void *handle)
+static int dce_v10_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v10_0_hpd_fini(adev);
@@ -2905,9 +2932,9 @@ static int dce_v10_0_hw_fini(void *handle)
return 0;
}
-static int dce_v10_0_suspend(void *handle)
+static int dce_v10_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_display_suspend_helper(adev);
@@ -2917,18 +2944,18 @@ static int dce_v10_0_suspend(void *handle)
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
- return dce_v10_0_hw_fini(handle);
+ return dce_v10_0_hw_fini(ip_block);
}
-static int dce_v10_0_resume(void *handle)
+static int dce_v10_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
adev->mode_info.bl_level);
- ret = dce_v10_0_hw_init(handle);
+ ret = dce_v10_0_hw_init(ip_block);
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
@@ -2948,22 +2975,17 @@ static bool dce_v10_0_is_idle(void *handle)
return true;
}
-static int dce_v10_0_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static bool dce_v10_0_check_soft_reset(void *handle)
+static bool dce_v10_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return dce_v10_0_is_display_hung(adev);
}
-static int dce_v10_0_soft_reset(void *handle)
+static int dce_v10_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0, tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (dce_v10_0_is_display_hung(adev))
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
@@ -3307,13 +3329,13 @@ static int dce_v10_0_hpd_irq(struct amdgpu_device *adev,
return 0;
}
-static int dce_v10_0_set_clockgating_state(void *handle,
+static int dce_v10_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int dce_v10_0_set_powergating_state(void *handle,
+static int dce_v10_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -3322,7 +3344,6 @@ static int dce_v10_0_set_powergating_state(void *handle,
static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.name = "dce_v10_0",
.early_init = dce_v10_0_early_init,
- .late_init = NULL,
.sw_init = dce_v10_0_sw_init,
.sw_fini = dce_v10_0_sw_fini,
.hw_init = dce_v10_0_hw_init,
@@ -3330,13 +3351,10 @@ static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.suspend = dce_v10_0_suspend,
.resume = dce_v10_0_resume,
.is_idle = dce_v10_0_is_idle,
- .wait_for_idle = dce_v10_0_wait_for_idle,
.check_soft_reset = dce_v10_0_check_soft_reset,
.soft_reset = dce_v10_0_soft_reset,
.set_clockgating_state = dce_v10_0_set_clockgating_state,
.set_powergating_state = dce_v10_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static void
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index f154c24499c8..ea42a4472bf6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2800,6 +2800,32 @@ static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
.get_scanout_position = amdgpu_crtc_get_scanout_position,
};
+static void dce_v11_0_panic_flush(struct drm_plane *plane)
+{
+ struct drm_framebuffer *fb;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_device *adev;
+ uint32_t fb_format;
+
+ if (!plane->fb)
+ return;
+
+ fb = plane->fb;
+ amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
+ adev = drm_to_adev(fb->dev);
+
+ /* Disable DC tiling */
+ fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
+ fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
+ WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+
+}
+
+static const struct drm_plane_helper_funcs dce_v11_0_drm_primary_plane_helper_funcs = {
+ .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
+ .panic_flush = dce_v11_0_panic_flush,
+};
+
static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
@@ -2847,13 +2873,14 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs);
+ drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v11_0_drm_primary_plane_helper_funcs);
return 0;
}
-static int dce_v11_0_early_init(void *handle)
+static int dce_v11_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg;
adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg;
@@ -2891,10 +2918,10 @@ static int dce_v11_0_early_init(void *handle)
return 0;
}
-static int dce_v11_0_sw_init(void *handle)
+static int dce_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
@@ -2971,9 +2998,9 @@ static int dce_v11_0_sw_init(void *handle)
return 0;
}
-static int dce_v11_0_sw_fini(void *handle)
+static int dce_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
drm_edid_free(adev->mode_info.bios_hardcoded_edid);
@@ -2989,10 +3016,10 @@ static int dce_v11_0_sw_fini(void *handle)
return 0;
}
-static int dce_v11_0_hw_init(void *handle)
+static int dce_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v11_0_init_golden_registers(adev);
@@ -3025,10 +3052,10 @@ static int dce_v11_0_hw_init(void *handle)
return 0;
}
-static int dce_v11_0_hw_fini(void *handle)
+static int dce_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v11_0_hpd_fini(adev);
@@ -3043,9 +3070,9 @@ static int dce_v11_0_hw_fini(void *handle)
return 0;
}
-static int dce_v11_0_suspend(void *handle)
+static int dce_v11_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_display_suspend_helper(adev);
@@ -3055,18 +3082,18 @@ static int dce_v11_0_suspend(void *handle)
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
- return dce_v11_0_hw_fini(handle);
+ return dce_v11_0_hw_fini(ip_block);
}
-static int dce_v11_0_resume(void *handle)
+static int dce_v11_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
adev->mode_info.bl_level);
- ret = dce_v11_0_hw_init(handle);
+ ret = dce_v11_0_hw_init(ip_block);
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
@@ -3086,15 +3113,10 @@ static bool dce_v11_0_is_idle(void *handle)
return true;
}
-static int dce_v11_0_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int dce_v11_0_soft_reset(void *handle)
+static int dce_v11_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0, tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (dce_v11_0_is_display_hung(adev))
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
@@ -3439,13 +3461,13 @@ static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
return 0;
}
-static int dce_v11_0_set_clockgating_state(void *handle,
+static int dce_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int dce_v11_0_set_powergating_state(void *handle,
+static int dce_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -3454,7 +3476,6 @@ static int dce_v11_0_set_powergating_state(void *handle,
static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.name = "dce_v11_0",
.early_init = dce_v11_0_early_init,
- .late_init = NULL,
.sw_init = dce_v11_0_sw_init,
.sw_fini = dce_v11_0_sw_fini,
.hw_init = dce_v11_0_hw_init,
@@ -3462,12 +3483,9 @@ static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.suspend = dce_v11_0_suspend,
.resume = dce_v11_0_resume,
.is_idle = dce_v11_0_is_idle,
- .wait_for_idle = dce_v11_0_wait_for_idle,
.soft_reset = dce_v11_0_soft_reset,
.set_clockgating_state = dce_v11_0_set_clockgating_state,
.set_powergating_state = dce_v11_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static void
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index a7fcb135827f..915804a6a1d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2602,6 +2602,32 @@ static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
.get_scanout_position = amdgpu_crtc_get_scanout_position,
};
+static void dce_v6_0_panic_flush(struct drm_plane *plane)
+{
+ struct drm_framebuffer *fb;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_device *adev;
+ uint32_t fb_format;
+
+ if (!plane->fb)
+ return;
+
+ fb = plane->fb;
+ amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
+ adev = drm_to_adev(fb->dev);
+
+ /* Disable DC tiling */
+ fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
+ fb_format &= ~GRPH_ARRAY_MODE(0x7);
+ WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+
+}
+
+static const struct drm_plane_helper_funcs dce_v6_0_drm_primary_plane_helper_funcs = {
+ .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
+ .panic_flush = dce_v6_0_panic_flush,
+};
+
static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
@@ -2629,13 +2655,14 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
+ drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v6_0_drm_primary_plane_helper_funcs);
return 0;
}
-static int dce_v6_0_early_init(void *handle)
+static int dce_v6_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
@@ -2664,11 +2691,11 @@ static int dce_v6_0_early_init(void *handle)
return 0;
}
-static int dce_v6_0_sw_init(void *handle)
+static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
bool ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
@@ -2743,9 +2770,9 @@ static int dce_v6_0_sw_init(void *handle)
return r;
}
-static int dce_v6_0_sw_fini(void *handle)
+static int dce_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
drm_edid_free(adev->mode_info.bios_hardcoded_edid);
@@ -2760,10 +2787,10 @@ static int dce_v6_0_sw_fini(void *handle)
return 0;
}
-static int dce_v6_0_hw_init(void *handle)
+static int dce_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* disable vga render */
dce_v6_0_set_vga_render_state(adev, false);
@@ -2783,10 +2810,10 @@ static int dce_v6_0_hw_init(void *handle)
return 0;
}
-static int dce_v6_0_hw_fini(void *handle)
+static int dce_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v6_0_hpd_fini(adev);
@@ -2801,9 +2828,9 @@ static int dce_v6_0_hw_fini(void *handle)
return 0;
}
-static int dce_v6_0_suspend(void *handle)
+static int dce_v6_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_display_suspend_helper(adev);
@@ -2812,18 +2839,18 @@ static int dce_v6_0_suspend(void *handle)
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
- return dce_v6_0_hw_fini(handle);
+ return dce_v6_0_hw_fini(ip_block);
}
-static int dce_v6_0_resume(void *handle)
+static int dce_v6_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
adev->mode_info.bl_level);
- ret = dce_v6_0_hw_init(handle);
+ ret = dce_v6_0_hw_init(ip_block);
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
@@ -2843,12 +2870,7 @@ static bool dce_v6_0_is_idle(void *handle)
return true;
}
-static int dce_v6_0_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int dce_v6_0_soft_reset(void *handle)
+static int dce_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
return 0;
@@ -3129,13 +3151,13 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
}
-static int dce_v6_0_set_clockgating_state(void *handle,
+static int dce_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int dce_v6_0_set_powergating_state(void *handle,
+static int dce_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -3144,7 +3166,6 @@ static int dce_v6_0_set_powergating_state(void *handle,
static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.name = "dce_v6_0",
.early_init = dce_v6_0_early_init,
- .late_init = NULL,
.sw_init = dce_v6_0_sw_init,
.sw_fini = dce_v6_0_sw_fini,
.hw_init = dce_v6_0_hw_init,
@@ -3152,12 +3173,9 @@ static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.suspend = dce_v6_0_suspend,
.resume = dce_v6_0_resume,
.is_idle = dce_v6_0_is_idle,
- .wait_for_idle = dce_v6_0_wait_for_idle,
.soft_reset = dce_v6_0_soft_reset,
.set_clockgating_state = dce_v6_0_set_clockgating_state,
.set_powergating_state = dce_v6_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static void
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 77ac3f114d24..f2edc0fece5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2613,6 +2613,31 @@ static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
.get_scanout_position = amdgpu_crtc_get_scanout_position,
};
+static void dce_v8_0_panic_flush(struct drm_plane *plane)
+{
+ struct drm_framebuffer *fb;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_device *adev;
+ uint32_t fb_format;
+
+ if (!plane->fb)
+ return;
+
+ fb = plane->fb;
+ amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
+ adev = drm_to_adev(fb->dev);
+
+ /* Disable DC tiling */
+ fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
+ fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
+ WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+}
+
+static const struct drm_plane_helper_funcs dce_v8_0_drm_primary_plane_helper_funcs = {
+ .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
+ .panic_flush = dce_v8_0_panic_flush,
+};
+
static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
@@ -2640,13 +2665,14 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
+ drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v8_0_drm_primary_plane_helper_funcs);
return 0;
}
-static int dce_v8_0_early_init(void *handle)
+static int dce_v8_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
@@ -2680,10 +2706,10 @@ static int dce_v8_0_early_init(void *handle)
return 0;
}
-static int dce_v8_0_sw_init(void *handle)
+static int dce_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
@@ -2764,9 +2790,9 @@ static int dce_v8_0_sw_init(void *handle)
return 0;
}
-static int dce_v8_0_sw_fini(void *handle)
+static int dce_v8_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
drm_edid_free(adev->mode_info.bios_hardcoded_edid);
@@ -2782,10 +2808,10 @@ static int dce_v8_0_sw_fini(void *handle)
return 0;
}
-static int dce_v8_0_hw_init(void *handle)
+static int dce_v8_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* disable vga render */
dce_v8_0_set_vga_render_state(adev, false);
@@ -2805,10 +2831,10 @@ static int dce_v8_0_hw_init(void *handle)
return 0;
}
-static int dce_v8_0_hw_fini(void *handle)
+static int dce_v8_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v8_0_hpd_fini(adev);
@@ -2823,9 +2849,9 @@ static int dce_v8_0_hw_fini(void *handle)
return 0;
}
-static int dce_v8_0_suspend(void *handle)
+static int dce_v8_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_display_suspend_helper(adev);
@@ -2835,18 +2861,18 @@ static int dce_v8_0_suspend(void *handle)
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
- return dce_v8_0_hw_fini(handle);
+ return dce_v8_0_hw_fini(ip_block);
}
-static int dce_v8_0_resume(void *handle)
+static int dce_v8_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
adev->mode_info.bl_level);
- ret = dce_v8_0_hw_init(handle);
+ ret = dce_v8_0_hw_init(ip_block);
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
@@ -2866,15 +2892,10 @@ static bool dce_v8_0_is_idle(void *handle)
return true;
}
-static int dce_v8_0_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int dce_v8_0_soft_reset(void *handle)
+static int dce_v8_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0, tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (dce_v8_0_is_display_hung(adev))
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
@@ -3217,13 +3238,13 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
}
-static int dce_v8_0_set_clockgating_state(void *handle,
+static int dce_v8_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int dce_v8_0_set_powergating_state(void *handle,
+static int dce_v8_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -3232,7 +3253,6 @@ static int dce_v8_0_set_powergating_state(void *handle,
static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
.name = "dce_v8_0",
.early_init = dce_v8_0_early_init,
- .late_init = NULL,
.sw_init = dce_v8_0_sw_init,
.sw_fini = dce_v8_0_sw_fini,
.hw_init = dce_v8_0_hw_init,
@@ -3240,12 +3260,9 @@ static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
.suspend = dce_v8_0_suspend,
.resume = dce_v8_0_resume,
.is_idle = dce_v8_0_is_idle,
- .wait_for_idle = dce_v8_0_wait_for_idle,
.soft_reset = dce_v8_0_soft_reset,
.set_clockgating_state = dce_v8_0_set_clockgating_state,
.set_powergating_state = dce_v8_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static void
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 483a441b46aa..621aeca53880 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -254,8 +254,8 @@ static void df_v3_6_sw_init(struct amdgpu_device *adev)
static void df_v3_6_sw_fini(struct amdgpu_device *adev)
{
-
- device_remove_file(adev->dev, &dev_attr_df_cntr_avail);
+ if (adev->dev->kobj.sd)
+ device_remove_file(adev->dev, &dev_attr_df_cntr_avail);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 45ed97038df0..5ba263fe5512 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -45,6 +45,7 @@
#include "clearstate_gfx10.h"
#include "v10_structs.h"
#include "gfx_v10_0.h"
+#include "gfx_v10_0_cleaner_shader.h"
#include "nbio_v2_3.h"
/*
@@ -3673,17 +3674,23 @@ static void gfx_v10_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev,
unsigned int vmid);
-static int gfx_v10_0_set_powergating_state(void *handle,
+static int gfx_v10_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ u64 shader_mc_addr;
+
+ /* Cleaner shader MC address */
+ shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
+
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
+ amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
+ amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
amdgpu_ring_write(kiq_ring, 0); /* oac mask */
amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
}
@@ -4030,7 +4037,7 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
else
r = -EINVAL;
err2:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
amdgpu_device_wb_free(adev, index);
@@ -4132,18 +4139,21 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp%s.bin", ucode_prefix, wks);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me%s.bin", ucode_prefix, wks);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_ce%s.bin", ucode_prefix, wks);
if (err)
goto out;
@@ -4167,6 +4177,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec%s.bin", ucode_prefix, wks);
if (err)
goto out;
@@ -4174,6 +4185,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec2%s.bin", ucode_prefix, wks);
if (!err) {
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
@@ -4683,11 +4695,11 @@ static void gfx_v10_0_alloc_ip_dump(struct amdgpu_device *adev)
}
}
-static int gfx_v10_0_sw_init(void *handle)
+static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int i, j, k, r, ring_id = 0;
int xcc_id = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(10, 1, 10):
@@ -4726,6 +4738,28 @@ static int gfx_v10_0_sw_init(void *handle)
adev->gfx.mec.num_queue_per_pipe = 8;
break;
}
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ adev->gfx.cleaner_shader_ptr = gfx_10_3_0_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_10_3_0_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 64 &&
+ adev->gfx.pfp_fw_version >= 100 &&
+ adev->gfx.mec_fw_version >= 122) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ default:
+ adev->gfx.enable_cleaner_shader = false;
+ break;
+ }
/* KIQ event */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
@@ -4814,6 +4848,11 @@ static int gfx_v10_0_sw_init(void *handle)
}
}
}
+ /* TODO: Add queue reset mask when FW fully supports it */
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0);
if (r) {
@@ -4842,6 +4881,10 @@ static int gfx_v10_0_sw_init(void *handle)
gfx_v10_0_alloc_ip_dump(adev);
+ r = amdgpu_gfx_sysfs_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -4866,10 +4909,10 @@ static void gfx_v10_0_me_fini(struct amdgpu_device *adev)
(void **)&adev->gfx.me.me_fw_ptr);
}
-static int gfx_v10_0_sw_fini(void *handle)
+static int gfx_v10_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -4881,6 +4924,8 @@ static int gfx_v10_0_sw_fini(void *handle)
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
amdgpu_gfx_kiq_fini(adev, 0);
+ amdgpu_gfx_cleaner_shader_sw_fini(adev);
+
gfx_v10_0_pfp_fini(adev);
gfx_v10_0_ce_fini(adev);
gfx_v10_0_me_fini(adev);
@@ -4891,6 +4936,7 @@ static int gfx_v10_0_sw_fini(void *handle)
gfx_v10_0_rlc_backdoor_autoload_buffer_fini(adev);
gfx_v10_0_free_microcode(adev);
+ amdgpu_gfx_sysfs_fini(adev);
kfree(adev->gfx.ip_dump_core);
kfree(adev->gfx.ip_dump_compute_queues);
@@ -5929,7 +5975,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
else
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
- if (adev->job_hang && !enable)
+ if (amdgpu_in_reset(adev) && !enable)
return 0;
for (i = 0; i < adev->usec_timeout; i++) {
@@ -6374,7 +6420,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
rptr_addr = ring->rptr_gpu_addr;
WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
@@ -6412,7 +6458,7 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
ring->wptr = 0;
WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
- /* Set the wb address wether it's enabled or not */
+ /* Set the wb address whether it's enabled or not */
rptr_addr = ring->rptr_gpu_addr;
WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
@@ -6576,17 +6622,13 @@ static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp | 0x80);
break;
default:
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp | 0x80);
break;
}
}
@@ -7366,14 +7408,17 @@ static void gfx_v10_0_disable_gpa_mode(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmCPG_PSP_DEBUG, data);
}
-static int gfx_v10_0_hw_init(void *handle)
+static int gfx_v10_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!amdgpu_emu_mode)
gfx_v10_0_init_golden_registers(adev);
+ amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
+ adev->gfx.cleaner_shader_ptr);
+
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
/**
* For gfx 10, rlc firmware loading relies on smu firmware is
@@ -7418,9 +7463,9 @@ static int gfx_v10_0_hw_init(void *handle)
return r;
}
-static int gfx_v10_0_hw_fini(void *handle)
+static int gfx_v10_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -7431,7 +7476,7 @@ static int gfx_v10_0_hw_fini(void *handle)
* otherwise the gfxoff disallowing will be failed to set.
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 1))
- gfx_v10_0_set_powergating_state(handle, AMD_PG_STATE_UNGATE);
+ gfx_v10_0_set_powergating_state(ip_block, AMD_PG_STATE_UNGATE);
if (!adev->no_hw_access) {
if (amdgpu_async_gfx_ring) {
@@ -7456,14 +7501,14 @@ static int gfx_v10_0_hw_fini(void *handle)
return 0;
}
-static int gfx_v10_0_suspend(void *handle)
+static int gfx_v10_0_suspend(struct amdgpu_ip_block *ip_block)
{
- return gfx_v10_0_hw_fini(handle);
+ return gfx_v10_0_hw_fini(ip_block);
}
-static int gfx_v10_0_resume(void *handle)
+static int gfx_v10_0_resume(struct amdgpu_ip_block *ip_block)
{
- return gfx_v10_0_hw_init(handle);
+ return gfx_v10_0_hw_init(ip_block);
}
static bool gfx_v10_0_is_idle(void *handle)
@@ -7477,11 +7522,11 @@ static bool gfx_v10_0_is_idle(void *handle)
return true;
}
-static int gfx_v10_0_wait_for_idle(void *handle)
+static int gfx_v10_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned int i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -7495,11 +7540,11 @@ static int gfx_v10_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int gfx_v10_0_soft_reset(void *handle)
+static int gfx_v10_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 grbm_soft_reset = 0;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* GRBM_STATUS */
tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
@@ -7678,9 +7723,9 @@ static void gfx_v10_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
(1 << (oa_size + oa_base)) - (1 << oa_base));
}
-static int gfx_v10_0_early_init(void *handle)
+static int gfx_v10_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->gfx.funcs = &gfx_v10_0_gfx_funcs;
@@ -7722,9 +7767,9 @@ static int gfx_v10_0_early_init(void *handle)
return gfx_v10_0_init_microcode(adev);
}
-static int gfx_v10_0_late_init(void *handle)
+static int gfx_v10_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
@@ -8319,10 +8364,10 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = {
.is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
};
-static int gfx_v10_0_set_powergating_state(void *handle,
+static int gfx_v10_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
@@ -8357,10 +8402,10 @@ static int gfx_v10_0_set_powergating_state(void *handle,
return 0;
}
-static int gfx_v10_0_set_clockgating_state(void *handle,
+static int gfx_v10_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -9402,8 +9447,6 @@ static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring)
static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
{
- int i;
-
/* Header itself is a NOP packet */
if (num_nop == 1) {
amdgpu_ring_write(ring, ring->funcs->nop);
@@ -9414,8 +9457,7 @@ static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
/* Header is at index 0, followed by num_nops - 1 NOP packet's */
- for (i = 1; i < num_nop; i++)
- amdgpu_ring_write(ring, ring->funcs->nop);
+ amdgpu_ring_insert_nop(ring, num_nop - 1);
}
static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
@@ -9568,9 +9610,9 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
return amdgpu_ring_test_ring(ring);
}
-static void gfx_v10_ip_print(void *handle, struct drm_printer *p)
+static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k, reg, index = 0;
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1);
@@ -9632,9 +9674,9 @@ static void gfx_v10_ip_print(void *handle, struct drm_printer *p)
}
}
-static void gfx_v10_ip_dump(void *handle)
+static void gfx_v10_ip_dump(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k, reg, index = 0;
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1);
@@ -9699,6 +9741,13 @@ static void gfx_v10_ip_dump(void *handle)
amdgpu_gfx_off_ctrl(adev, true);
}
+static void gfx_v10_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
+{
+ /* Emit the cleaner shader */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
+}
+
static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
.name = "gfx_v10_0",
.early_init = gfx_v10_0_early_init,
@@ -9749,7 +9798,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
2 + /* SWITCH_BUFFER */
- 8, /* gfx_v10_0_emit_mem_sync */
+ 8 + /* gfx_v10_0_emit_mem_sync */
+ 2, /* gfx_v10_0_ring_emit_cleaner_shader */
.emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */
.emit_ib = gfx_v10_0_ring_emit_ib_gfx,
.emit_fence = gfx_v10_0_ring_emit_fence,
@@ -9772,6 +9822,9 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
.reset = gfx_v10_0_reset_kgq,
+ .emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@@ -9791,7 +9844,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v10_0_ring_emit_vm_flush */
8 + 8 + 8 + /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
- 8, /* gfx_v10_0_emit_mem_sync */
+ 8 + /* gfx_v10_0_emit_mem_sync */
+ 2, /* gfx_v10_0_ring_emit_cleaner_shader */
.emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
.emit_ib = gfx_v10_0_ring_emit_ib_compute,
.emit_fence = gfx_v10_0_ring_emit_fence,
@@ -9809,6 +9863,9 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
.reset = gfx_v10_0_reset_kcq,
+ .emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
new file mode 100644
index 000000000000..663c2572d440
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* Define the cleaner shader gfx_10_3_0 */
+static const u32 gfx_10_3_0_cleaner_shader_hex[] = {
+ 0xb0804004, 0xbf8a0000,
+ 0xbe8203b8, 0xbefc0380,
+ 0x7e008480, 0x7e028480,
+ 0x7e048480, 0x7e068480,
+ 0x7e088480, 0x7e0a8480,
+ 0x7e0c8480, 0x7e0e8480,
+ 0xbefc0302, 0x80828802,
+ 0xbf84fff5, 0xbe8203ff,
+ 0x80000000, 0x87020002,
+ 0xbf840012, 0xbefe03c1,
+ 0xbeff03c1, 0xd7650001,
+ 0x0001007f, 0xd7660001,
+ 0x0002027e, 0x16020288,
+ 0xbe8203bf, 0xbefc03c1,
+ 0xd9382000, 0x00020201,
+ 0xd9386040, 0x00040401,
+ 0xd70f6a01, 0x000202ff,
+ 0x00000400, 0x80828102,
+ 0xbf84fff7, 0xbefc03ff,
+ 0x00000068, 0xbe803080,
+ 0xbe813080, 0xbe823080,
+ 0xbe833080, 0x80fc847c,
+ 0xbf84fffa, 0xbeea0480,
+ 0xbeec0480, 0xbeee0480,
+ 0xbef00480, 0xbef20480,
+ 0xbef40480, 0xbef60480,
+ 0xbef80480, 0xbefa0480,
+ 0xbf810000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm
new file mode 100644
index 000000000000..0e1c246166c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader.
+//To turn this shader program on for complitaion change this to main and lower shader main to main_1
+
+// GFX10.3 : Clear SGPRs, VGPRs and LDS
+// Launch 32 waves per CU (16 per SIMD) as a workgroup (threadgroup) to fill every wave slot
+// Waves are "wave32" and have 64 VGPRs each, which uses all 1024 VGPRs per SIMD
+// Waves are launched in "CU" mode, and the workgroup shares 64KB of LDS (half of the WGP's LDS)
+// It takes 2 workgroups to use all of LDS: one on each CU of the WGP
+// Each wave clears SGPRs 0 - 107
+// Each wave clears VGPRs 0 - 63
+// The first wave of the workgroup clears its 64KB of LDS
+// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup
+// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared.
+
+
+shader main
+ asic(GFX10)
+ type(CS)
+ wave_size(32)
+// Note: original source code from SQ team
+
+//
+// Create 32 waves in a threadgroup (CS waves)
+// Each allocates 64 VGPRs
+// The workgroup allocates all of LDS (64kbytes)
+//
+// Takes about 2500 clocks to run.
+// (theorhetical fastest = 1024clks vgpr + 640lds = 1660 clks)
+//
+ S_BARRIER
+ s_mov_b32 s2, 0x00000038 // Loop 64/8=8 times (loop unrolled for performance)
+ s_mov_b32 m0, 0
+ //
+ // CLEAR VGPRs
+ //
+label_0005:
+ v_movreld_b32 v0, 0
+ v_movreld_b32 v1, 0
+ v_movreld_b32 v2, 0
+ v_movreld_b32 v3, 0
+ v_movreld_b32 v4, 0
+ v_movreld_b32 v5, 0
+ v_movreld_b32 v6, 0
+ v_movreld_b32 v7, 0
+ s_mov_b32 m0, s2
+ s_sub_u32 s2, s2, 8
+ s_cbranch_scc0 label_0005
+ //
+ s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
+ s_and_b32 s2, s2, s0 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
+ s_cbranch_scc0 label_0023 // Clean LDS if its first wave of ThreadGroup/WorkGroup
+ // CLEAR LDS
+ //
+ s_mov_b32 exec_lo, 0xffffffff
+ s_mov_b32 exec_hi, 0xffffffff
+ v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63)
+ v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63)
+ v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte)
+ s_mov_b32 s2, 0x00000003f // 64 loop iterations
+ s_mov_b32 m0, 0xffffffff
+ // Clear all of LDS space
+ // Each FirstWave of WorkGroup clears 64kbyte block
+
+label_001F:
+ ds_write2_b64 v1, v[2:3], v[2:3] offset1:32
+ ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96
+ v_add_co_u32 v1, vcc, 0x00000400, v1
+ s_sub_u32 s2, s2, 1
+ s_cbranch_scc0 label_001F
+
+ //
+ // CLEAR SGPRs
+ //
+label_0023:
+ s_mov_b32 m0, 0x00000068 // Loop 108/4=27 times (loop unrolled for performance)
+label_sgpr_loop:
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop
+
+ //clear vcc
+ s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 vcc, 0 //clear vcc
+ s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1
+ s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3
+ s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5
+ s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7
+ s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9
+ s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11
+ s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13
+ s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15
+
+ s_endpgm
+
+end
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index d3e8be82a172..56c06b72a70a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -46,6 +46,7 @@
#include "clearstate_gfx11.h"
#include "v11_structs.h"
#include "gfx_v11_0.h"
+#include "gfx_v11_0_cleaner_shader.h"
#include "gfx_v11_0_3.h"
#include "nbio_v4_3.h"
#include "mes_v11_0.h"
@@ -293,14 +294,20 @@ static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ u64 shader_mc_addr;
+
+ /* Cleaner shader MC address */
+ shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
+
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */
PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
- amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
+ amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
+ amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
amdgpu_ring_write(kiq_ring, 0); /* oac mask */
amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
}
@@ -483,8 +490,6 @@ static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
{
- int i;
-
/* Header itself is a NOP packet */
if (num_nop == 1) {
amdgpu_ring_write(ring, ring->funcs->nop);
@@ -495,8 +500,7 @@ static void gfx_v11_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
/* Header is at index 0, followed by num_nops - 1 NOP packet's */
- for (i = 1; i < num_nop; i++)
- amdgpu_ring_write(ring, ring->funcs->nop);
+ amdgpu_ring_insert_nop(ring, num_nop - 1);
}
static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring)
@@ -611,7 +615,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err2:
if (!ring->is_mes_queue)
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
if (!ring->is_mes_queue)
@@ -635,6 +639,7 @@ static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *
int err = 0;
err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_toc.bin", ucode_prefix);
if (err)
goto out;
@@ -684,6 +689,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", ucode_prefix);
if (err)
goto out;
@@ -701,6 +707,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", ucode_prefix);
if (err)
goto out;
@@ -716,9 +723,11 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 0) &&
adev->pdev->revision == 0xCE)
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/gc_11_0_0_rlc_1.bin");
else
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", ucode_prefix);
if (err)
goto out;
@@ -731,6 +740,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", ucode_prefix);
if (err)
goto out;
@@ -1536,11 +1546,11 @@ static void gfx_v11_0_alloc_ip_dump(struct amdgpu_device *adev)
}
}
-static int gfx_v11_0_sw_init(void *handle)
+static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int i, j, k, r, ring_id = 0;
int xcc_id = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
@@ -1575,6 +1585,29 @@ static int gfx_v11_0_sw_init(void *handle)
break;
}
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 2280 &&
+ adev->gfx.pfp_fw_version >= 2370 &&
+ adev->gfx.mec_fw_version >= 2450 &&
+ adev->mes.fw_version[0] >= 99) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
+ default:
+ adev->gfx.enable_cleaner_shader = false;
+ break;
+ }
+
/* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3) &&
amdgpu_sriov_is_pp_one_vf(adev))
@@ -1666,6 +1699,24 @@ static int gfx_v11_0_sw_init(void *handle)
}
}
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ if ((adev->gfx.me_fw_version >= 2280) &&
+ (adev->gfx.mec_fw_version >= 2410)) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ }
+ break;
+ default:
+ break;
+ }
+
if (!adev->enable_mes_kiq) {
r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0);
if (r) {
@@ -1700,6 +1751,10 @@ static int gfx_v11_0_sw_init(void *handle)
gfx_v11_0_alloc_ip_dump(adev);
+ r = amdgpu_gfx_sysfs_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -1732,10 +1787,10 @@ static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
(void **)&adev->gfx.rlc.rlc_autoload_ptr);
}
-static int gfx_v11_0_sw_fini(void *handle)
+static int gfx_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -1749,6 +1804,8 @@ static int gfx_v11_0_sw_fini(void *handle)
amdgpu_gfx_kiq_fini(adev, 0);
}
+ amdgpu_gfx_cleaner_shader_sw_fini(adev);
+
gfx_v11_0_pfp_fini(adev);
gfx_v11_0_me_fini(adev);
gfx_v11_0_rlc_fini(adev);
@@ -1759,6 +1816,8 @@ static int gfx_v11_0_sw_fini(void *handle)
gfx_v11_0_free_microcode(adev);
+ amdgpu_gfx_sysfs_fini(adev);
+
kfree(adev->gfx.ip_dump_core);
kfree(adev->gfx.ip_dump_compute_queues);
kfree(adev->gfx.ip_dump_gfx_queues);
@@ -1832,6 +1891,7 @@ static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
{
+ u32 rb_bitmap_per_sa;
u32 rb_bitmap_width_per_sa;
u32 max_sa;
u32 active_sa_bitmap;
@@ -1849,9 +1909,11 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
adev->gfx.config.max_sh_per_se;
rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
+ rb_bitmap_per_sa = amdgpu_gfx_create_bitmask(rb_bitmap_width_per_sa);
+
for (i = 0; i < max_sa; i++) {
if (active_sa_bitmap & (1 << i))
- active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
+ active_rb_bitmap |= (rb_bitmap_per_sa << (i * rb_bitmap_width_per_sa));
}
active_rb_bitmap &= global_active_rb_bitmap;
@@ -1893,8 +1955,10 @@ static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev)
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- /* Initialize all compute VMIDs to have no GDS, GWS, or OA
- acccess. These should be enabled by FW for target VMIDs. */
+ /*
+ * Initialize all compute VMIDs to have no GDS, GWS, or OA
+ * access. These should be enabled by FW for target VMIDs.
+ */
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0);
WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0);
@@ -3555,7 +3619,7 @@ static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
rptr_addr = ring->rptr_gpu_addr;
WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
@@ -3593,7 +3657,7 @@ static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev)
ring->wptr = 0;
WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
- /* Set the wb address wether it's enabled or not */
+ /* Set the wb address whether it's enabled or not */
rptr_addr = ring->rptr_gpu_addr;
WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
@@ -3863,9 +3927,7 @@ static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
}
static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev)
@@ -4568,10 +4630,13 @@ static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data);
}
-static int gfx_v11_0_hw_init(void *handle)
+static int gfx_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
+
+ amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
+ adev->gfx.cleaner_shader_ptr);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
if (adev->gfx.imu.funcs) {
@@ -4665,9 +4730,9 @@ static int gfx_v11_0_hw_init(void *handle)
return r;
}
-static int gfx_v11_0_hw_fini(void *handle)
+static int gfx_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -4703,14 +4768,14 @@ static int gfx_v11_0_hw_fini(void *handle)
return 0;
}
-static int gfx_v11_0_suspend(void *handle)
+static int gfx_v11_0_suspend(struct amdgpu_ip_block *ip_block)
{
- return gfx_v11_0_hw_fini(handle);
+ return gfx_v11_0_hw_fini(ip_block);
}
-static int gfx_v11_0_resume(void *handle)
+static int gfx_v11_0_resume(struct amdgpu_ip_block *ip_block)
{
- return gfx_v11_0_hw_init(handle);
+ return gfx_v11_0_hw_init(ip_block);
}
static bool gfx_v11_0_is_idle(void *handle)
@@ -4724,11 +4789,11 @@ static bool gfx_v11_0_is_idle(void *handle)
return true;
}
-static int gfx_v11_0_wait_for_idle(void *handle)
+static int gfx_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -4774,12 +4839,12 @@ int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v11_0_soft_reset(void *handle)
+static int gfx_v11_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 grbm_soft_reset = 0;
u32 tmp;
int r, i, j, k;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
@@ -4905,10 +4970,10 @@ static int gfx_v11_0_soft_reset(void *handle)
return gfx_v11_0_cp_resume(adev);
}
-static bool gfx_v11_0_check_soft_reset(void *handle)
+static bool gfx_v11_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
int i, r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
long tmo = msecs_to_jiffies(1000);
@@ -4929,12 +4994,13 @@ static bool gfx_v11_0_check_soft_reset(void *handle)
return false;
}
-static int gfx_v11_0_post_soft_reset(void *handle)
+static int gfx_v11_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
{
+ struct amdgpu_device *adev = ip_block->adev;
/**
* GFX soft reset will impact MES, need resume MES when do GFX soft reset
*/
- return amdgpu_mes_resume((struct amdgpu_device *)handle);
+ return amdgpu_mes_resume(adev);
}
static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
@@ -4995,9 +5061,9 @@ static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
(1 << (oa_size + oa_base)) - (1 << oa_base));
}
-static int gfx_v11_0_early_init(void *handle)
+static int gfx_v11_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->gfx.funcs = &gfx_v11_0_gfx_funcs;
@@ -5018,9 +5084,9 @@ static int gfx_v11_0_early_init(void *handle)
return gfx_v11_0_init_microcode(adev);
}
-static int gfx_v11_0_late_init(void *handle)
+static int gfx_v11_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
@@ -5399,10 +5465,10 @@ static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
-static int gfx_v11_0_set_powergating_state(void *handle,
+static int gfx_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
@@ -5435,10 +5501,10 @@ static int gfx_v11_0_set_powergating_state(void *handle,
return 0;
}
-static int gfx_v11_0_set_clockgating_state(void *handle,
+static int gfx_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -6587,30 +6653,14 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
- int i, r = 0;
+ int r = 0;
if (amdgpu_sriov_vf(adev))
return -EINVAL;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- mutex_lock(&adev->srbm_mutex);
- soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
- WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
- WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
-
- /* make sure dequeue is complete*/
- for (i = 0; i < adev->usec_timeout; i++) {
- if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
- break;
- udelay(1);
- }
- if (i >= adev->usec_timeout)
- r = -ETIMEDOUT;
- soc21_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
if (r) {
- dev_err(adev->dev, "fail to wait on hqd deactivate\n");
+ dev_err(adev->dev, "reset via MMIO failed %d\n", r);
return r;
}
@@ -6639,9 +6689,9 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
return amdgpu_ring_test_ring(ring);
}
-static void gfx_v11_ip_print(void *handle, struct drm_printer *p)
+static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k, reg, index = 0;
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0);
@@ -6703,9 +6753,9 @@ static void gfx_v11_ip_print(void *handle, struct drm_printer *p)
}
}
-static void gfx_v11_ip_dump(void *handle)
+static void gfx_v11_ip_dump(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k, reg, index = 0;
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_11_0);
@@ -6769,6 +6819,13 @@ static void gfx_v11_ip_dump(void *handle)
amdgpu_gfx_off_ctrl(adev, true);
}
+static void gfx_v11_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
+{
+ /* Emit the cleaner shader */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
+}
+
static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
.name = "gfx_v11_0",
.early_init = gfx_v11_0_early_init,
@@ -6818,7 +6875,8 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
5 + /* HDP_INVL */
22 + /* SET_Q_PREEMPTION_MODE */
8 + 8 + /* FENCE x2 */
- 8, /* gfx_v11_0_emit_mem_sync */
+ 8 + /* gfx_v11_0_emit_mem_sync */
+ 2, /* gfx_v11_0_ring_emit_cleaner_shader */
.emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */
.emit_ib = gfx_v11_0_ring_emit_ib_gfx,
.emit_fence = gfx_v11_0_ring_emit_fence,
@@ -6841,6 +6899,9 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
.reset = gfx_v11_0_reset_kgq,
+ .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
@@ -6861,7 +6922,8 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v11_0_ring_emit_vm_flush */
8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */
- 8, /* gfx_v11_0_emit_mem_sync */
+ 8 + /* gfx_v11_0_emit_mem_sync */
+ 2, /* gfx_v11_0_ring_emit_cleaner_shader */
.emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
.emit_ib = gfx_v11_0_ring_emit_ib_compute,
.emit_fence = gfx_v11_0_ring_emit_fence,
@@ -6879,6 +6941,9 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
.soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
.reset = gfx_v11_0_reset_kcq,
+ .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3_cleaner_shader.asm
new file mode 100644
index 000000000000..9b90b66368c7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3_cleaner_shader.asm
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader.
+//To turn this shader program on for complitaion change this to main and lower shader main to main_1
+
+// Navi3 : Clear SGPRs, VGPRs and LDS
+// Launch 32 waves per CU (16 per SIMD) as a workgroup (threadgroup) to fill every wave slot
+// Waves are "wave32" and have 64 VGPRs each, which uses all 1024 VGPRs per SIMD
+// Waves are launched in "CU" mode, and the workgroup shares 64KB of LDS (half of the WGP's LDS)
+// It takes 2 workgroups to use all of LDS: one on each CU of the WGP
+// Each wave clears SGPRs 0 - 107
+// Each wave clears VGPRs 0 - 63
+// The first wave of the workgroup clears its 64KB of LDS
+// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup
+// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared.
+
+shader main
+ asic(GFX11)
+ type(CS)
+ wave_size(32)
+// Note: original source code from SQ team
+
+// Takes about 2500 clocks to run.
+// (theorhetical fastest = 1024clks vgpr + 640lds = 1660 clks)
+//
+ S_BARRIER
+
+ //
+ // CLEAR VGPRs
+ //
+ s_mov_b32 m0, 0x00000058 // Loop 96/8=12 times (loop unrolled for performance)
+
+label_0005:
+ v_movreld_b32 v0, 0
+ v_movreld_b32 v1, 0
+ v_movreld_b32 v2, 0
+ v_movreld_b32 v3, 0
+ v_movreld_b32 v4, 0
+ v_movreld_b32 v5, 0
+ v_movreld_b32 v6, 0
+ v_movreld_b32 v7, 0
+ s_sub_u32 m0, m0, 8
+ s_cbranch_scc0 label_0005
+ //
+ //
+
+ s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
+ s_and_b32 s2, s2, s0 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
+ s_cbranch_scc0 label_0023 // Clean LDS if its first wave of ThreadGroup/WorkGroup
+ // CLEAR LDS
+ //
+ s_mov_b32 exec_lo, 0xffffffff
+ s_mov_b32 exec_hi, 0xffffffff
+ v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63)
+ v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63)
+ v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte)
+ s_mov_b32 s2, 0x00000003f // 64 loop iterations
+ s_mov_b32 m0, 0xffffffff
+ // Clear all of LDS space
+ // Each FirstWave of WorkGroup clears 64kbyte block
+
+label_001F:
+ ds_write2_b64 v1, v[2:3], v[2:3] offset1:32
+ ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96
+ v_add_co_u32 v1, vcc, 0x00000400, v1
+ s_sub_u32 s2, s2, 1
+ s_cbranch_scc0 label_001F
+ //
+ // CLEAR SGPRs
+ //
+label_0023:
+ s_mov_b32 m0, 0x00000068 // Loop 108/4=27 times (loop unrolled for performance)
+label_sgpr_loop:
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop
+
+ //clear vcc
+ s_mov_b64 vcc, 0 //clear vcc
+ s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1
+ s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3
+ s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5
+ s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7
+ s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9
+ s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11
+ s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13
+ s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15
+
+ s_endpgm
+
+end
+
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_cleaner_shader.h
new file mode 100644
index 000000000000..3218cc04f543
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_cleaner_shader.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* Define the cleaner shader gfx_11_0_3 */
+static const u32 gfx_11_0_3_cleaner_shader_hex[] = {
+ 0xb0804006, 0xbe8200ff,
+ 0x00000058, 0xbefd0080,
+ 0x7e008480, 0x7e028480,
+ 0x7e048480, 0x7e068480,
+ 0x7e088480, 0x7e0a8480,
+ 0x7e0c8480, 0x7e0e8480,
+ 0xbefd0002, 0x80828802,
+ 0xbfa1fff5, 0xbe8200ff,
+ 0x80000000, 0x8b020002,
+ 0xbfa10012, 0xbefe00c1,
+ 0xbeff00c1, 0xd71f0001,
+ 0x0001007f, 0xd7200001,
+ 0x0002027e, 0x16020288,
+ 0xbe8200bf, 0xbefd00c1,
+ 0xd9382000, 0x00020201,
+ 0xd9386040, 0x00040401,
+ 0xd7006a01, 0x000202ff,
+ 0x00000400, 0x80828102,
+ 0xbfa1fff7, 0xbefd00ff,
+ 0x00000068, 0xbe804280,
+ 0xbe814280, 0xbe824280,
+ 0xbe834280, 0x80fd847d,
+ 0xbfa1fffa, 0xbeea0180,
+ 0xbeec0180, 0xbeee0180,
+ 0xbef00180, 0xbef20180,
+ 0xbef40180, 0xbef60180,
+ 0xbef80180, 0xbefa0180,
+ 0xbfb00000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 47b47d21f464..2523221a2519 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -513,7 +513,7 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err2:
if (!ring->is_mes_queue)
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
if (!ring->is_mes_queue)
@@ -537,6 +537,7 @@ static int gfx_v12_0_init_toc_microcode(struct amdgpu_device *adev, const char *
int err = 0;
err = amdgpu_ucode_request(adev, &adev->psp.toc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_toc.bin", ucode_prefix);
if (err)
goto out;
@@ -566,6 +567,7 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", ucode_prefix);
if (err)
goto out;
@@ -573,6 +575,7 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", ucode_prefix);
if (err)
goto out;
@@ -581,6 +584,7 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) {
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", ucode_prefix);
if (err)
goto out;
@@ -593,6 +597,7 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", ucode_prefix);
if (err)
goto out;
@@ -1319,12 +1324,12 @@ static void gfx_v12_0_alloc_ip_dump(struct amdgpu_device *adev)
}
}
-static int gfx_v12_0_sw_init(void *handle)
+static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int i, j, k, r, ring_id = 0;
unsigned num_compute_rings;
int xcc_id = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 0, 0):
@@ -1346,6 +1351,20 @@ static int gfx_v12_0_sw_init(void *handle)
break;
}
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ if (adev->gfx.me_fw_version >= 2480 &&
+ adev->gfx.pfp_fw_version >= 2530 &&
+ adev->gfx.mec_fw_version >= 2680 &&
+ adev->mes.fw_version[0] >= 100)
+ adev->gfx.enable_cleaner_shader = true;
+ break;
+ default:
+ adev->gfx.enable_cleaner_shader = false;
+ break;
+ }
+
/* recalculate compute rings to use based on hardware configuration */
num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe) / 2;
@@ -1431,6 +1450,20 @@ static int gfx_v12_0_sw_init(void *handle)
}
}
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ if ((adev->gfx.me_fw_version >= 2660) &&
+ (adev->gfx.mec_fw_version >= 2920)) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ }
+ }
+
if (!adev->enable_mes_kiq) {
r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, 0);
if (r) {
@@ -1460,6 +1493,10 @@ static int gfx_v12_0_sw_init(void *handle)
gfx_v12_0_alloc_ip_dump(adev);
+ r = amdgpu_gfx_sysfs_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -1492,10 +1529,10 @@ static void gfx_v12_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
(void **)&adev->gfx.rlc.rlc_autoload_ptr);
}
-static int gfx_v12_0_sw_fini(void *handle)
+static int gfx_v12_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -1519,6 +1556,8 @@ static int gfx_v12_0_sw_fini(void *handle)
gfx_v12_0_free_microcode(adev);
+ amdgpu_gfx_sysfs_fini(adev);
+
kfree(adev->gfx.ip_dump_core);
kfree(adev->gfx.ip_dump_compute_queues);
kfree(adev->gfx.ip_dump_gfx_queues);
@@ -1592,6 +1631,7 @@ static u32 gfx_v12_0_get_rb_active_bitmap(struct amdgpu_device *adev)
static void gfx_v12_0_setup_rb(struct amdgpu_device *adev)
{
+ u32 rb_bitmap_per_sa;
u32 rb_bitmap_width_per_sa;
u32 max_sa;
u32 active_sa_bitmap;
@@ -1609,12 +1649,14 @@ static void gfx_v12_0_setup_rb(struct amdgpu_device *adev)
adev->gfx.config.max_sh_per_se;
rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
+ rb_bitmap_per_sa = amdgpu_gfx_create_bitmask(rb_bitmap_width_per_sa);
+
for (i = 0; i < max_sa; i++) {
if (active_sa_bitmap & (1 << i))
- active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
+ active_rb_bitmap |= (rb_bitmap_per_sa << (i * rb_bitmap_width_per_sa));
}
- active_rb_bitmap |= global_active_rb_bitmap;
+ active_rb_bitmap &= global_active_rb_bitmap;
adev->gfx.config.backend_enable_mask = active_rb_bitmap;
adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
}
@@ -2601,7 +2643,7 @@ static int gfx_v12_0_cp_gfx_resume(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
rptr_addr = ring->rptr_gpu_addr;
WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
@@ -2814,9 +2856,7 @@ static void gfx_v12_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
}
static void gfx_v12_0_cp_set_doorbell_range(struct amdgpu_device *adev)
@@ -3513,10 +3553,10 @@ static void gfx_v12_0_init_golden_registers(struct amdgpu_device *adev)
}
}
-static int gfx_v12_0_hw_init(void *handle)
+static int gfx_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
@@ -3603,9 +3643,9 @@ static int gfx_v12_0_hw_init(void *handle)
return r;
}
-static int gfx_v12_0_hw_fini(void *handle)
+static int gfx_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t tmp;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
@@ -3643,14 +3683,14 @@ static int gfx_v12_0_hw_fini(void *handle)
return 0;
}
-static int gfx_v12_0_suspend(void *handle)
+static int gfx_v12_0_suspend(struct amdgpu_ip_block *ip_block)
{
- return gfx_v12_0_hw_fini(handle);
+ return gfx_v12_0_hw_fini(ip_block);
}
-static int gfx_v12_0_resume(void *handle)
+static int gfx_v12_0_resume(struct amdgpu_ip_block *ip_block)
{
- return gfx_v12_0_hw_init(handle);
+ return gfx_v12_0_hw_init(ip_block);
}
static bool gfx_v12_0_is_idle(void *handle)
@@ -3664,11 +3704,11 @@ static bool gfx_v12_0_is_idle(void *handle)
return true;
}
-static int gfx_v12_0_wait_for_idle(void *handle)
+static int gfx_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -3695,9 +3735,9 @@ static uint64_t gfx_v12_0_get_gpu_clock_counter(struct amdgpu_device *adev)
return clock;
}
-static int gfx_v12_0_early_init(void *handle)
+static int gfx_v12_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->gfx.funcs = &gfx_v12_0_gfx_funcs;
@@ -3717,9 +3757,9 @@ static int gfx_v12_0_early_init(void *handle)
return gfx_v12_0_init_microcode(adev);
}
-static int gfx_v12_0_late_init(void *handle)
+static int gfx_v12_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
@@ -3846,10 +3886,10 @@ static void gfx_v12_cntl_pg(struct amdgpu_device *adev, bool enable)
}
#endif
-static int gfx_v12_0_set_powergating_state(void *handle,
+static int gfx_v12_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
@@ -3981,17 +4021,6 @@ static void gfx_v12_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
if (def != data)
WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
-
- data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
- data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
- WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
-
- /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
- if (adev->sdma.num_instances > 1) {
- data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
- data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
- WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
- }
}
}
@@ -4097,15 +4126,15 @@ static int gfx_v12_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v12_0_set_clockgating_state(void *handle,
+static int gfx_v12_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->ip_versions[GC_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
gfx_v12_0_update_gfx_clock_gating(adev,
@@ -5022,8 +5051,6 @@ static void gfx_v12_0_emit_mem_sync(struct amdgpu_ring *ring)
static void gfx_v12_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
{
- int i;
-
/* Header itself is a NOP packet */
if (num_nop == 1) {
amdgpu_ring_write(ring, ring->funcs->nop);
@@ -5034,13 +5061,19 @@ static void gfx_v12_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
/* Header is at index 0, followed by num_nops - 1 NOP packet's */
- for (i = 1; i < num_nop; i++)
- amdgpu_ring_write(ring, ring->funcs->nop);
+ amdgpu_ring_insert_nop(ring, num_nop - 1);
}
-static void gfx_v12_ip_print(void *handle, struct drm_printer *p)
+static void gfx_v12_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* Emit the cleaner shader */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
+ amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
+}
+
+static void gfx_v12_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k, reg, index = 0;
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0);
@@ -5102,9 +5135,9 @@ static void gfx_v12_ip_print(void *handle, struct drm_printer *p)
}
}
-static void gfx_v12_ip_dump(void *handle)
+static void gfx_v12_ip_dump(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k, reg, index = 0;
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_12_0);
@@ -5211,24 +5244,16 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
- int r, i;
+ int r;
if (amdgpu_sriov_vf(adev))
return -EINVAL;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- mutex_lock(&adev->srbm_mutex);
- soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
- WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
- WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
- for (i = 0; i < adev->usec_timeout; i++) {
- if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
- break;
- udelay(1);
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true);
+ if (r) {
+ dev_err(adev->dev, "reset via MMIO failed %d\n", r);
+ return r;
}
- soc24_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0)) {
@@ -5297,7 +5322,8 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
3 + /* CNTX_CTRL */
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
- 8, /* gfx_v12_0_emit_mem_sync */
+ 8 + /* gfx_v12_0_emit_mem_sync */
+ 2, /* gfx_v12_0_ring_emit_cleaner_shader */
.emit_ib_size = 4, /* gfx_v12_0_ring_emit_ib_gfx */
.emit_ib = gfx_v12_0_ring_emit_ib_gfx,
.emit_fence = gfx_v12_0_ring_emit_fence,
@@ -5318,6 +5344,9 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
.soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
.reset = gfx_v12_0_reset_kgq,
+ .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
@@ -5336,7 +5365,8 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v12_0_ring_emit_vm_flush */
8 + 8 + 8 + /* gfx_v12_0_ring_emit_fence x3 for user fence, vm fence */
- 8, /* gfx_v12_0_emit_mem_sync */
+ 8 + /* gfx_v12_0_emit_mem_sync */
+ 2, /* gfx_v12_0_ring_emit_cleaner_shader */
.emit_ib_size = 7, /* gfx_v12_0_ring_emit_ib_compute */
.emit_ib = gfx_v12_0_ring_emit_ib_compute,
.emit_fence = gfx_v12_0_ring_emit_fence,
@@ -5353,6 +5383,9 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
.soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
.reset = gfx_v12_0_reset_kcq,
+ .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader,
+ .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
+ .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};
static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h
index bcc9c72ccbde..f7184b2dc4e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h
@@ -26,4 +26,6 @@
extern const struct amdgpu_ip_block_version gfx_v12_0_ip_block;
+int gfx_v12_0_request_gfx_index_mutex(struct amdgpu_device *adev,
+ bool req);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 564f0b9336b6..f26e2cdec07a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -337,6 +337,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", chip_name);
if (err)
goto out;
@@ -345,6 +346,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", chip_name);
if (err)
goto out;
@@ -353,6 +355,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_ce.bin", chip_name);
if (err)
goto out;
@@ -361,6 +364,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", chip_name);
if (err)
goto out;
@@ -1906,7 +1910,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
error:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
return r;
}
@@ -3023,9 +3027,9 @@ static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = {
.start = gfx_v6_0_rlc_start
};
-static int gfx_v6_0_early_init(void *handle)
+static int gfx_v6_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->gfx.xcc_mask = 1;
adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS;
@@ -3039,10 +3043,10 @@ static int gfx_v6_0_early_init(void *handle)
return 0;
}
-static int gfx_v6_0_sw_init(void *handle)
+static int gfx_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, r;
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
@@ -3107,10 +3111,10 @@ static int gfx_v6_0_sw_init(void *handle)
return r;
}
-static int gfx_v6_0_sw_fini(void *handle)
+static int gfx_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -3122,10 +3126,10 @@ static int gfx_v6_0_sw_fini(void *handle)
return 0;
}
-static int gfx_v6_0_hw_init(void *handle)
+static int gfx_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gfx_v6_0_constants_init(adev);
@@ -3142,9 +3146,9 @@ static int gfx_v6_0_hw_init(void *handle)
return r;
}
-static int gfx_v6_0_hw_fini(void *handle)
+static int gfx_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gfx_v6_0_cp_enable(adev, false);
adev->gfx.rlc.funcs->stop(adev);
@@ -3153,18 +3157,14 @@ static int gfx_v6_0_hw_fini(void *handle)
return 0;
}
-static int gfx_v6_0_suspend(void *handle)
+static int gfx_v6_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return gfx_v6_0_hw_fini(adev);
+ return gfx_v6_0_hw_fini(ip_block);
}
-static int gfx_v6_0_resume(void *handle)
+static int gfx_v6_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return gfx_v6_0_hw_init(adev);
+ return gfx_v6_0_hw_init(ip_block);
}
static bool gfx_v6_0_is_idle(void *handle)
@@ -3177,24 +3177,19 @@ static bool gfx_v6_0_is_idle(void *handle)
return true;
}
-static int gfx_v6_0_wait_for_idle(void *handle)
+static int gfx_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v6_0_is_idle(handle))
+ if (gfx_v6_0_is_idle(adev))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
-static int gfx_v6_0_soft_reset(void *handle)
-{
- return 0;
-}
-
static void gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
@@ -3382,11 +3377,11 @@ static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v6_0_set_clockgating_state(void *handle,
+static int gfx_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_CG_STATE_GATE)
gate = true;
@@ -3404,11 +3399,11 @@ static int gfx_v6_0_set_clockgating_state(void *handle,
return 0;
}
-static int gfx_v6_0_set_powergating_state(void *handle,
+static int gfx_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_PG_STATE_GATE)
gate = true;
@@ -3444,7 +3439,6 @@ static void gfx_v6_0_emit_mem_sync(struct amdgpu_ring *ring)
static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
.name = "gfx_v6_0",
.early_init = gfx_v6_0_early_init,
- .late_init = NULL,
.sw_init = gfx_v6_0_sw_init,
.sw_fini = gfx_v6_0_sw_fini,
.hw_init = gfx_v6_0_hw_init,
@@ -3453,11 +3447,8 @@ static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
.resume = gfx_v6_0_resume,
.is_idle = gfx_v6_0_is_idle,
.wait_for_idle = gfx_v6_0_wait_for_idle,
- .soft_reset = gfx_v6_0_soft_reset,
.set_clockgating_state = gfx_v6_0_set_clockgating_state,
.set_powergating_state = gfx_v6_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index f146806c4633..84745b2453ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -934,33 +934,39 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
}
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", chip_name);
if (err)
goto out;
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", chip_name);
if (err)
goto out;
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_ce.bin", chip_name);
if (err)
goto out;
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", chip_name);
if (err)
goto out;
if (adev->asic_type == CHIP_KAVERI) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec2.bin", chip_name);
if (err)
goto out;
}
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", chip_name);
out:
if (err) {
@@ -2324,7 +2330,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
error:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
return r;
}
@@ -2559,7 +2565,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
ring->wptr = 0;
WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
rptr_addr = ring->rptr_gpu_addr;
WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
@@ -2876,7 +2882,7 @@ static void gfx_v7_0_mqd_init(struct amdgpu_device *adev,
mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
wb_gpu_addr = ring->rptr_gpu_addr;
mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_rptr_report_addr_hi =
@@ -4134,9 +4140,9 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
.update_spm_vmid = gfx_v7_0_update_spm_vmid
};
-static int gfx_v7_0_early_init(void *handle)
+static int gfx_v7_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->gfx.xcc_mask = 1;
adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
@@ -4151,9 +4157,9 @@ static int gfx_v7_0_early_init(void *handle)
return 0;
}
-static int gfx_v7_0_late_init(void *handle)
+static int gfx_v7_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
@@ -4343,10 +4349,10 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
return 0;
}
-static int gfx_v7_0_sw_init(void *handle)
+static int gfx_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j, k, r, ring_id;
switch (adev->asic_type) {
@@ -4439,9 +4445,9 @@ static int gfx_v7_0_sw_init(void *handle)
return r;
}
-static int gfx_v7_0_sw_fini(void *handle)
+static int gfx_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
@@ -4465,10 +4471,10 @@ static int gfx_v7_0_sw_fini(void *handle)
return 0;
}
-static int gfx_v7_0_hw_init(void *handle)
+static int gfx_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gfx_v7_0_constants_init(adev);
@@ -4486,9 +4492,9 @@ static int gfx_v7_0_hw_init(void *handle)
return r;
}
-static int gfx_v7_0_hw_fini(void *handle)
+static int gfx_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -4499,18 +4505,14 @@ static int gfx_v7_0_hw_fini(void *handle)
return 0;
}
-static int gfx_v7_0_suspend(void *handle)
+static int gfx_v7_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return gfx_v7_0_hw_fini(adev);
+ return gfx_v7_0_hw_fini(ip_block);
}
-static int gfx_v7_0_resume(void *handle)
+static int gfx_v7_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return gfx_v7_0_hw_init(adev);
+ return gfx_v7_0_hw_init(ip_block);
}
static bool gfx_v7_0_is_idle(void *handle)
@@ -4523,11 +4525,11 @@ static bool gfx_v7_0_is_idle(void *handle)
return true;
}
-static int gfx_v7_0_wait_for_idle(void *handle)
+static int gfx_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -4540,11 +4542,11 @@ static int gfx_v7_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int gfx_v7_0_soft_reset(void *handle)
+static int gfx_v7_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* GRBM_STATUS */
tmp = RREG32(mmGRBM_STATUS);
@@ -4850,11 +4852,11 @@ static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v7_0_set_clockgating_state(void *handle,
+static int gfx_v7_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_CG_STATE_GATE)
gate = true;
@@ -4873,11 +4875,11 @@ static int gfx_v7_0_set_clockgating_state(void *handle,
return 0;
}
-static int gfx_v7_0_set_powergating_state(void *handle,
+static int gfx_v7_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_PG_STATE_GATE)
gate = true;
@@ -5009,8 +5011,6 @@ static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.soft_reset = gfx_v7_0_soft_reset,
.set_clockgating_state = gfx_v7_0_set_clockgating_state,
.set_powergating_state = gfx_v7_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index bc8295812cc8..6a025438f9d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -914,7 +914,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err2:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
amdgpu_device_wb_free(adev, index);
@@ -982,13 +982,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_pfp_2.bin", chip_name);
if (err == -ENODEV) {
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", chip_name);
}
} else {
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", chip_name);
}
if (err)
@@ -999,13 +1002,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_me_2.bin", chip_name);
if (err == -ENODEV) {
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", chip_name);
}
} else {
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", chip_name);
}
if (err)
@@ -1017,13 +1023,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_ce_2.bin", chip_name);
if (err == -ENODEV) {
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_ce.bin", chip_name);
}
} else {
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_ce.bin", chip_name);
}
if (err)
@@ -1044,6 +1053,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->virt.chained_ib_support = false;
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", chip_name);
if (err)
goto out;
@@ -1093,13 +1103,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_mec_2.bin", chip_name);
if (err == -ENODEV) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", chip_name);
}
} else {
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", chip_name);
}
if (err)
@@ -1112,13 +1125,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
(adev->asic_type != CHIP_TOPAZ)) {
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s_mec2_2.bin", chip_name);
if (err == -ENODEV) {
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec2.bin", chip_name);
}
} else {
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec2.bin", chip_name);
}
if (!err) {
@@ -1640,7 +1656,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
RREG32(sec_ded_counter_registers[i]);
fail:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
return r;
@@ -1894,12 +1910,12 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
static void gfx_v8_0_sq_irq_work_func(struct work_struct *work);
-static int gfx_v8_0_sw_init(void *handle)
+static int gfx_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int i, j, k, r, ring_id;
int xcc_id = 0;
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (adev->asic_type) {
case CHIP_TONGA:
@@ -2037,9 +2053,9 @@ static int gfx_v8_0_sw_init(void *handle)
return 0;
}
-static int gfx_v8_0_sw_fini(void *handle)
+static int gfx_v8_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
@@ -4260,7 +4276,7 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
ring->wptr = 0;
WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
rptr_addr = ring->rptr_gpu_addr;
WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
@@ -4304,9 +4320,7 @@ static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32(mmRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32(mmRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32(mmRLC_CP_SCHEDULERS, tmp);
+ WREG32(mmRLC_CP_SCHEDULERS, tmp | 0x80);
}
static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
@@ -4783,10 +4797,10 @@ static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable)
gfx_v8_0_cp_compute_enable(adev, enable);
}
-static int gfx_v8_0_hw_init(void *handle)
+static int gfx_v8_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gfx_v8_0_init_golden_registers(adev);
gfx_v8_0_constants_init(adev);
@@ -4823,6 +4837,13 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
}
+ /* Submit unmap queue packet */
+ amdgpu_ring_commit(kiq_ring);
+ /*
+ * Ring test will do a basic scratch register change check. Just run
+ * this to ensure that unmap queues that is submitted before got
+ * processed successfully before returning.
+ */
r = amdgpu_ring_test_helper(kiq_ring);
if (r)
DRM_ERROR("KCQ disable failed\n");
@@ -4865,13 +4886,13 @@ static int gfx_v8_0_wait_for_rlc_idle(void *handle)
return -ETIMEDOUT;
}
-static int gfx_v8_0_wait_for_idle(void *handle)
+static int gfx_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v8_0_is_idle(handle))
+ if (gfx_v8_0_is_idle(adev))
return 0;
udelay(1);
@@ -4879,9 +4900,9 @@ static int gfx_v8_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int gfx_v8_0_hw_fini(void *handle)
+static int gfx_v8_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -4897,8 +4918,9 @@ static int gfx_v8_0_hw_fini(void *handle)
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
}
+
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
- if (!gfx_v8_0_wait_for_idle(adev))
+ if (!gfx_v8_0_wait_for_idle(ip_block))
gfx_v8_0_cp_enable(adev, false);
else
pr_err("cp is busy, skip halt cp\n");
@@ -4911,19 +4933,19 @@ static int gfx_v8_0_hw_fini(void *handle)
return 0;
}
-static int gfx_v8_0_suspend(void *handle)
+static int gfx_v8_0_suspend(struct amdgpu_ip_block *ip_block)
{
- return gfx_v8_0_hw_fini(handle);
+ return gfx_v8_0_hw_fini(ip_block);
}
-static int gfx_v8_0_resume(void *handle)
+static int gfx_v8_0_resume(struct amdgpu_ip_block *ip_block)
{
- return gfx_v8_0_hw_init(handle);
+ return gfx_v8_0_hw_init(ip_block);
}
-static bool gfx_v8_0_check_soft_reset(void *handle)
+static bool gfx_v8_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp;
@@ -4983,9 +5005,9 @@ static bool gfx_v8_0_check_soft_reset(void *handle)
}
}
-static int gfx_v8_0_pre_soft_reset(void *handle)
+static int gfx_v8_0_pre_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 grbm_soft_reset = 0;
if ((!adev->gfx.grbm_soft_reset) &&
@@ -5024,9 +5046,9 @@ static int gfx_v8_0_pre_soft_reset(void *handle)
return 0;
}
-static int gfx_v8_0_soft_reset(void *handle)
+static int gfx_v8_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp;
@@ -5086,9 +5108,9 @@ static int gfx_v8_0_soft_reset(void *handle)
return 0;
}
-static int gfx_v8_0_post_soft_reset(void *handle)
+static int gfx_v8_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 grbm_soft_reset = 0;
if ((!adev->gfx.grbm_soft_reset) &&
@@ -5254,9 +5276,9 @@ static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
.select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
};
-static int gfx_v8_0_early_init(void *handle)
+static int gfx_v8_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->gfx.xcc_mask = 1;
adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
@@ -5271,9 +5293,9 @@ static int gfx_v8_0_early_init(void *handle)
return 0;
}
-static int gfx_v8_0_late_init(void *handle)
+static int gfx_v8_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
@@ -5313,7 +5335,7 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
(adev->asic_type == CHIP_POLARIS12) ||
(adev->asic_type == CHIP_VEGAM))
/* Send msg to SMU via Powerplay */
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable, 0);
WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
}
@@ -5359,10 +5381,10 @@ static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
}
}
-static int gfx_v8_0_set_powergating_state(void *handle,
+static int gfx_v8_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
@@ -5617,8 +5639,6 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t temp, data;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
@@ -5712,8 +5732,6 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
/* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
}
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -5723,8 +5741,6 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
@@ -5805,12 +5821,12 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
}
gfx_v8_0_wait_for_rlc_serdes(adev);
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
{
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
if (enable) {
/* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
* === MGCG + MGLS + TS(CG/LS) ===
@@ -5824,6 +5840,8 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
}
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -5974,10 +5992,10 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v8_0_set_clockgating_state(void *handle,
+static int gfx_v8_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -6947,8 +6965,6 @@ static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.set_clockgating_state = gfx_v8_0_set_clockgating_state,
.set_powergating_state = gfx_v8_0_set_powergating_state,
.get_clockgating_state = gfx_v8_0_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 23f0573ae47b..fa572b40989e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1243,7 +1243,7 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err2:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
amdgpu_device_wb_free(adev, index);
@@ -1429,18 +1429,21 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
int err;
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_pfp.bin", chip_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_me.bin", chip_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_ce.bin", chip_name);
if (err)
goto out;
@@ -1476,6 +1479,7 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
(((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc_am4.bin", chip_name);
else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
(smu_version >= 0x41e2b))
@@ -1483,9 +1487,11 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
*SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
*/
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_kicker_rlc.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", chip_name);
if (err)
goto out;
@@ -1518,9 +1524,11 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
- "amdgpu/%s_sjt_mec.bin", chip_name);
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sjt_mec.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec.bin", chip_name);
if (err)
goto out;
@@ -1531,9 +1539,11 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sjt_mec2.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_mec2.bin", chip_name);
if (!err) {
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
@@ -2198,12 +2208,12 @@ static void gfx_v9_0_alloc_ip_dump(struct amdgpu_device *adev)
}
}
-static int gfx_v9_0_sw_init(void *handle)
+static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int i, j, k, r, ring_id;
int xcc_id = 0;
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
unsigned int hw_prio;
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
@@ -2223,6 +2233,18 @@ static int gfx_v9_0_sw_init(void *handle)
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 2):
+ adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
+ if (adev->gfx.mec_fw_version >= 88) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
default:
adev->gfx.enable_cleaner_shader = false;
break;
@@ -2362,6 +2384,12 @@ static int gfx_v9_0_sw_init(void *handle)
}
}
+ /* TODO: Add queue reset mask when FW fully supports it */
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+
r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
if (r) {
DRM_ERROR("Failed to init KIQ BOs!\n");
@@ -2390,7 +2418,7 @@ static int gfx_v9_0_sw_init(void *handle)
gfx_v9_0_alloc_ip_dump(adev);
- r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
+ r = amdgpu_gfx_sysfs_init(adev);
if (r)
return r;
@@ -2398,10 +2426,10 @@ static int gfx_v9_0_sw_init(void *handle)
}
-static int gfx_v9_0_sw_fini(void *handle)
+static int gfx_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
@@ -2418,6 +2446,8 @@ static int gfx_v9_0_sw_fini(void *handle)
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
amdgpu_gfx_kiq_fini(adev, 0);
+ amdgpu_gfx_cleaner_shader_sw_fini(adev);
+
gfx_v9_0_mec_fini(adev);
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
@@ -2429,7 +2459,7 @@ static int gfx_v9_0_sw_fini(void *handle)
}
gfx_v9_0_free_microcode(adev);
- amdgpu_gfx_sysfs_isolation_shader_fini(adev);
+ amdgpu_gfx_sysfs_fini(adev);
kfree(adev->gfx.ip_dump_core);
kfree(adev->gfx.ip_dump_compute_queues);
@@ -3184,6 +3214,15 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_INVALIDATE_ICACHE, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_INVALIDATE_ICACHE, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_INVALIDATE_ICACHE, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE0_RESET, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE1_RESET, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
@@ -3265,8 +3304,8 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
* confirmed that the APU gfx10/gfx11 needn't such update.
*/
if (adev->flags & AMD_IS_APU &&
- adev->in_s3 && !adev->suspend_complete) {
- DRM_INFO(" Will skip the CSB packet resubmit\n");
+ adev->in_s3 && !pm_resume_via_firmware()) {
+ DRM_INFO("Will skip the CSB packet resubmit\n");
return 0;
}
r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
@@ -3346,7 +3385,7 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
rptr_addr = ring->rptr_gpu_addr;
WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
@@ -3393,7 +3432,15 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
} else {
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
- (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
+ (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK |
+ CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK |
+ CP_MEC_CNTL__MEC_ME1_HALT_MASK |
+ CP_MEC_CNTL__MEC_ME2_HALT_MASK));
adev->gfx.kiq[0].ring.sched.ready = false;
}
udelay(50);
@@ -3451,9 +3498,7 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp | 0x80);
}
static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
@@ -3914,6 +3959,10 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
return r;
}
+ if (adev->gfx.num_gfx_rings)
+ gfx_v9_0_cp_gfx_enable(adev, false);
+ gfx_v9_0_cp_compute_enable(adev, false);
+
r = gfx_v9_0_kiq_resume(adev);
if (r)
return r;
@@ -3970,10 +4019,10 @@ static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
gfx_v9_0_cp_compute_enable(adev, enable);
}
-static int gfx_v9_0_hw_init(void *handle)
+static int gfx_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
adev->gfx.cleaner_shader_ptr);
@@ -3999,9 +4048,9 @@ static int gfx_v9_0_hw_init(void *handle)
return r;
}
-static int gfx_v9_0_hw_fini(void *handle)
+static int gfx_v9_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
@@ -4051,14 +4100,14 @@ static int gfx_v9_0_hw_fini(void *handle)
return 0;
}
-static int gfx_v9_0_suspend(void *handle)
+static int gfx_v9_0_suspend(struct amdgpu_ip_block *ip_block)
{
- return gfx_v9_0_hw_fini(handle);
+ return gfx_v9_0_hw_fini(ip_block);
}
-static int gfx_v9_0_resume(void *handle)
+static int gfx_v9_0_resume(struct amdgpu_ip_block *ip_block)
{
- return gfx_v9_0_hw_init(handle);
+ return gfx_v9_0_hw_init(ip_block);
}
static bool gfx_v9_0_is_idle(void *handle)
@@ -4072,24 +4121,24 @@ static bool gfx_v9_0_is_idle(void *handle)
return true;
}
-static int gfx_v9_0_wait_for_idle(void *handle)
+static int gfx_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v9_0_is_idle(handle))
+ if (gfx_v9_0_is_idle(adev))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
-static int gfx_v9_0_soft_reset(void *handle)
+static int gfx_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 grbm_soft_reset = 0;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* GRBM_STATUS */
tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
@@ -4739,15 +4788,15 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
}
fail:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
return r;
}
-static int gfx_v9_0_early_init(void *handle)
+static int gfx_v9_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
@@ -4771,9 +4820,9 @@ static int gfx_v9_0_early_init(void *handle)
return gfx_v9_0_init_microcode(adev);
}
-static int gfx_v9_0_ecc_late_init(void *handle)
+static int gfx_v9_0_ecc_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
/*
@@ -4805,9 +4854,9 @@ static int gfx_v9_0_ecc_late_init(void *handle)
return 0;
}
-static int gfx_v9_0_late_init(void *handle)
+static int gfx_v9_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
@@ -4822,7 +4871,7 @@ static int gfx_v9_0_late_init(void *handle)
if (r)
return r;
- r = gfx_v9_0_ecc_late_init(handle);
+ r = gfx_v9_0_ecc_late_init(ip_block);
if (r)
return r;
@@ -4915,8 +4964,6 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t data, def;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
@@ -4981,8 +5028,6 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
}
}
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
@@ -4993,8 +5038,6 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
if (!adev->gfx.num_gfx_rings)
return;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
/* Enable 3D CGCG/CGLS */
if (enable) {
/* write cmd to clear cgcg/cgls ov */
@@ -5036,8 +5079,6 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
}
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -5045,8 +5086,6 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t def, data;
- amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
-
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
/* unset CGCG override */
@@ -5088,13 +5127,12 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
}
-
- amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
{
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (enable) {
/* CGCG/CGLS should be enabled after MGCG/MGLS
* === MGCG + MGLS ===
@@ -5114,6 +5152,7 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
/* === MGCG + MGLS === */
gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
}
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -5191,10 +5230,10 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
};
-static int gfx_v9_0_set_powergating_state(void *handle,
+static int gfx_v9_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
@@ -5236,10 +5275,10 @@ static int gfx_v9_0_set_powergating_state(void *handle,
return 0;
}
-static int gfx_v9_0_set_clockgating_state(void *handle,
+static int gfx_v9_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -7167,8 +7206,6 @@ static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
{
- int i;
-
/* Header itself is a NOP packet */
if (num_nop == 1) {
amdgpu_ring_write(ring, ring->funcs->nop);
@@ -7179,8 +7216,7 @@ static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
/* Header is at index 0, followed by num_nops - 1 NOP packet's */
- for (i = 1; i < num_nop; i++)
- amdgpu_ring_write(ring, ring->funcs->nop);
+ amdgpu_ring_insert_nop(ring, num_nop - 1);
}
static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
@@ -7237,10 +7273,6 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
unsigned long flags;
int i, r;
- if (!adev->debug_exp_resets &&
- !adev->gfx.num_gfx_rings)
- return -EINVAL;
-
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -7316,9 +7348,9 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
return amdgpu_ring_test_ring(ring);
}
-static void gfx_v9_ip_print(void *handle, struct drm_printer *p)
+static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k, reg, index = 0;
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);
@@ -7356,9 +7388,9 @@ static void gfx_v9_ip_print(void *handle, struct drm_printer *p)
}
-static void gfx_v9_ip_dump(void *handle)
+static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k, reg, index = 0;
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h
index 36c0292b5110..0b6bd09b7529 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2018 Advanced Micro Devices, Inc.
+ * Copyright 2024 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -24,3 +24,45 @@
static const u32 __maybe_unused gfx_9_0_cleaner_shader_hex[] = {
/* Add the cleaner shader code here */
};
+
+/* Define the cleaner shader gfx_9_4_2 */
+static const u32 gfx_9_4_2_cleaner_shader_hex[] = {
+ 0xbf068100, 0xbf84003b,
+ 0xbf8a0000, 0xb07c0000,
+ 0xbe8200ff, 0x00000078,
+ 0xbf110802, 0x7e000280,
+ 0x7e020280, 0x7e040280,
+ 0x7e060280, 0x7e080280,
+ 0x7e0a0280, 0x7e0c0280,
+ 0x7e0e0280, 0x80828802,
+ 0xbe803202, 0xbf84fff5,
+ 0xbf9c0000, 0xbe8200ff,
+ 0x80000000, 0x86020102,
+ 0xbf840011, 0xbefe00c1,
+ 0xbeff00c1, 0xd28c0001,
+ 0x0001007f, 0xd28d0001,
+ 0x0002027e, 0x10020288,
+ 0xbe8200bf, 0xbefc00c1,
+ 0xd89c2000, 0x00020201,
+ 0xd89c6040, 0x00040401,
+ 0x320202ff, 0x00000400,
+ 0x80828102, 0xbf84fff8,
+ 0xbefc00ff, 0x0000005c,
+ 0xbf800000, 0xbe802c80,
+ 0xbe812c80, 0xbe822c80,
+ 0xbe832c80, 0x80fc847c,
+ 0xbf84fffa, 0xbee60080,
+ 0xbee70080, 0xbeea0180,
+ 0xbeec0180, 0xbeee0180,
+ 0xbef00180, 0xbef20180,
+ 0xbef40180, 0xbef60180,
+ 0xbef80180, 0xbefa0180,
+ 0xbf810000, 0xbf8d0001,
+ 0xbefc00ff, 0x0000005c,
+ 0xbf800000, 0xbe802c80,
+ 0xbe812c80, 0xbe822c80,
+ 0xbe832c80, 0x80fc847c,
+ 0xbf84fffa, 0xbee60080,
+ 0xbee70080, 0xbeea01ff,
+ 0x000000ee, 0xbf810000,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index 3f4fd2f08163..d81449f9d822 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -412,7 +412,7 @@ static int gfx_v9_4_2_run_shader(struct amdgpu_device *adev,
r = amdgpu_ib_schedule(ring, 1, ib, NULL, fence_ptr);
if (r) {
dev_err(adev->dev, "ib submit failed (%d).\n", r);
- amdgpu_ib_free(adev, ib, NULL);
+ amdgpu_ib_free(ib, NULL);
}
return r;
}
@@ -611,16 +611,16 @@ static int gfx_v9_4_2_do_sgprs_init(struct amdgpu_device *adev)
}
disp2_failed:
- amdgpu_ib_free(adev, &disp_ibs[2], NULL);
+ amdgpu_ib_free(&disp_ibs[2], NULL);
dma_fence_put(fences[2]);
disp1_failed:
- amdgpu_ib_free(adev, &disp_ibs[1], NULL);
+ amdgpu_ib_free(&disp_ibs[1], NULL);
dma_fence_put(fences[1]);
disp0_failed:
- amdgpu_ib_free(adev, &disp_ibs[0], NULL);
+ amdgpu_ib_free(&disp_ibs[0], NULL);
dma_fence_put(fences[0]);
pro_end:
- amdgpu_ib_free(adev, &wb_ib, NULL);
+ amdgpu_ib_free(&wb_ib, NULL);
if (r)
dev_info(adev->dev, "Init SGPRS Failed\n");
@@ -687,10 +687,10 @@ static int gfx_v9_4_2_do_vgprs_init(struct amdgpu_device *adev)
}
disp_failed:
- amdgpu_ib_free(adev, &disp_ib, NULL);
+ amdgpu_ib_free(&disp_ib, NULL);
dma_fence_put(fence);
pro_end:
- amdgpu_ib_free(adev, &wb_ib, NULL);
+ amdgpu_ib_free(&wb_ib, NULL);
if (r)
dev_info(adev->dev, "Init VGPRS Failed\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm
new file mode 100644
index 000000000000..35b8cf9070bd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 192 Dwords cleaner shader.
+//To turn this shader program on for complitaion change this to main and lower shader main to main_1
+
+// MI200 : Clear SGPRs, VGPRs and LDS
+// Uses two kernels launched separately:
+// 1. Clean VGPRs, LDS, and lower SGPRs
+// Launches one workgroup per CU, each workgroup with 4x wave64 per SIMD in the CU
+// Waves are "wave64" and have 128 VGPRs each, which uses all 512 VGPRs per SIMD
+// Waves in the workgroup share the 64KB of LDS
+// Each wave clears SGPRs 0 - 95. Because there are 4 waves/SIMD, this is physical SGPRs 0-383
+// Each wave clears 128 VGPRs, so all 512 in the SIMD
+// The first wave of the workgroup clears its 64KB of LDS
+// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup
+// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared.
+// 2. Clean remaining SGPRs
+// Launches a workgroup with 24 waves per workgroup, yielding 6 waves per SIMD in each CU
+// Waves are allocating 96 SGPRs
+// CP sets up SPI_RESOURCE_RESERVE_* registers to prevent these waves from allocating SGPRs 0-223.
+// As such, these 6 waves per SIMD are allocated physical SGPRs 224-799
+// Barriers do not work for >16 waves per workgroup, so we cannot start with S_BARRIER
+// Instead, the shader starts with an S_SETHALT 1. Once all waves are launched CP will send unhalt command
+// The shader then clears all SGPRs allocated to it, cleaning out physical SGPRs 224-799
+
+shader main
+ asic(MI200)
+ type(CS)
+ wave_size(64)
+// Note: original source code from SQ team
+
+// (theorhetical fastest = ~512clks vgpr + 1536 lds + ~128 sgpr = 2176 clks)
+
+ s_cmp_eq_u32 s0, 1 // Bit0 is set, sgpr0 is set then clear VGPRS and LDS as FW set COMPUTE_USER_DATA_3
+ s_cbranch_scc0 label_0023 // Clean VGPRs and LDS if sgpr0 of wave is set, scc = (s3 == 1)
+ S_BARRIER
+
+ s_movk_i32 m0, 0x0000
+ s_mov_b32 s2, 0x00000078 // Loop 128/8=16 times (loop unrolled for performance)
+ //
+ // CLEAR VGPRs
+ //
+ s_set_gpr_idx_on s2, 0x8 // enable Dest VGPR indexing
+label_0005:
+ v_mov_b32 v0, 0
+ v_mov_b32 v1, 0
+ v_mov_b32 v2, 0
+ v_mov_b32 v3, 0
+ v_mov_b32 v4, 0
+ v_mov_b32 v5, 0
+ v_mov_b32 v6, 0
+ v_mov_b32 v7, 0
+ s_sub_u32 s2, s2, 8
+ s_set_gpr_idx_idx s2
+ s_cbranch_scc0 label_0005
+ s_set_gpr_idx_off
+
+ //
+ //
+
+ s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
+ s_and_b32 s2, s2, s1 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
+ s_cbranch_scc0 label_clean_sgpr_1 // Clean LDS if its first wave of ThreadGroup/WorkGroup
+ // CLEAR LDS
+ //
+ s_mov_b32 exec_lo, 0xffffffff
+ s_mov_b32 exec_hi, 0xffffffff
+ v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63)
+ v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63)
+ v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte)
+ s_mov_b32 s2, 0x00000003f // 64 loop iterations
+ s_mov_b32 m0, 0xffffffff
+ // Clear all of LDS space
+ // Each FirstWave of WorkGroup clears 64kbyte block
+
+label_001F:
+ ds_write2_b64 v1, v[2:3], v[2:3] offset1:32
+ ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96
+ v_add_co_u32 v1, vcc, 0x00000400, v1
+ s_sub_u32 s2, s2, 1
+ s_cbranch_scc0 label_001F
+ //
+ // CLEAR SGPRs
+ //
+label_clean_sgpr_1:
+ s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance)
+ s_nop 0
+label_sgpr_loop:
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop
+
+ //clear vcc, flat scratch
+ s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 vcc, 0 //clear vcc
+ s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1
+ s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3
+ s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5
+ s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7
+ s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9
+ s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11
+ s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13
+ s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15
+s_endpgm
+
+label_0023:
+
+ s_sethalt 1
+
+ s_mov_b32 m0, 0x0000005c // Loop 96/4=24 times (loop unrolled for performance)
+ s_nop 0
+label_sgpr_loop1:
+
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop1
+
+ //clear vcc, flat scratch
+ s_mov_b32 flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ s_mov_b32 flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 vcc, 0xee //clear vcc
+
+s_endpgm
+end
+
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index c100845409f7..2ba185875baa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -43,8 +43,12 @@
MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin");
+MODULE_FIRMWARE("amdgpu/gc_9_5_0_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_9_5_0_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_9_4_3_sjt_mec.bin");
+MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin");
#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
@@ -52,10 +56,6 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
#define GOLDEN_GB_ADDR_CONFIG 0x2a114042
#define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
-#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */
-#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
-#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
-
#define XCC_REG_RANGE_0_LOW 0x2000 /* XCC gfxdec0 lower Bound */
#define XCC_REG_RANGE_0_HIGH 0x3400 /* XCC gfxdec0 upper Bound */
#define XCC_REG_RANGE_1_LOW 0xA000 /* XCC gfxdec1 lower Bound */
@@ -349,13 +349,17 @@ static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
GOLDEN_GB_ADDR_CONFIG);
- /* Golden settings applied by driver for ASIC with rev_id 0 */
- if (adev->rev_id == 0) {
- WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
- REDUCE_FIFO_DEPTH_BY_2, 2);
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) {
+ WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, SPARE, 0x1);
} else {
- WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
- SPARE, 0x1);
+ /* Golden settings applied by driver for ASIC with rev_id 0 */
+ if (adev->rev_id == 0) {
+ WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
+ REDUCE_FIFO_DEPTH_BY_2, 2);
+ } else {
+ WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
+ SPARE, 0x1);
+ }
}
}
}
@@ -499,7 +503,7 @@ static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err2:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err1:
amdgpu_device_wb_free(adev, index);
@@ -543,6 +547,7 @@ static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_rlc.bin", chip_name);
if (err)
goto out;
@@ -574,8 +579,19 @@ static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
{
int err;
- err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
- "amdgpu/%s_mec.bin", chip_name);
+ if (amdgpu_sriov_vf(adev)) {
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sjt_mec.bin", chip_name);
+
+ if (err)
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_mec.bin", chip_name);
+ } else
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_mec.bin", chip_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
@@ -929,6 +945,7 @@ static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -1049,10 +1066,10 @@ static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev)
}
}
-static int gfx_v9_4_3_sw_init(void *handle)
+static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
{
int i, j, k, r, ring_id, xcc_id, num_xcc;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
@@ -1157,6 +1174,19 @@ static int gfx_v9_4_3_sw_init(void *handle)
return r;
}
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ if (adev->gfx.mec_fw_version >= 155) {
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
+ }
+ break;
+ default:
+ break;
+ }
r = gfx_v9_4_3_gpu_early_init(adev);
if (r)
return r;
@@ -1165,26 +1195,19 @@ static int gfx_v9_4_3_sw_init(void *handle)
if (r)
return r;
-
- if (!amdgpu_sriov_vf(adev)) {
- r = amdgpu_gfx_sysfs_init(adev);
- if (r)
- return r;
- }
-
- gfx_v9_4_3_alloc_ip_dump(adev);
-
- r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
+ r = amdgpu_gfx_sysfs_init(adev);
if (r)
return r;
+ gfx_v9_4_3_alloc_ip_dump(adev);
+
return 0;
}
-static int gfx_v9_4_3_sw_fini(void *handle)
+static int gfx_v9_4_3_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i, num_xcc;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
@@ -1201,9 +1224,7 @@ static int gfx_v9_4_3_sw_fini(void *handle)
gfx_v9_4_3_mec_fini(adev);
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
gfx_v9_4_3_free_microcode(adev);
- if (!amdgpu_sriov_vf(adev))
- amdgpu_gfx_sysfs_fini(adev);
- amdgpu_gfx_sysfs_isolation_shader_fini(adev);
+ amdgpu_gfx_sysfs_fini(adev);
kfree(adev->gfx.ip_dump_core);
kfree(adev->gfx.ip_dump_compute_queues);
@@ -1247,8 +1268,10 @@ static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev,
soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
mutex_unlock(&adev->srbm_mutex);
- /* Initialize all compute VMIDs to have no GDS, GWS, or OA
- acccess. These should be enabled by FW for target VMIDs. */
+ /*
+ * Initialize all compute VMIDs to have no GDS, GWS, or OA
+ * access. These should be enabled by FW for target VMIDs.
+ */
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0);
WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0);
@@ -1773,9 +1796,7 @@ static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id)
tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp | 0x80);
}
static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
@@ -2343,10 +2364,10 @@ static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
}
-static int gfx_v9_4_3_hw_init(void *handle)
+static int gfx_v9_4_3_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
adev->gfx.cleaner_shader_ptr);
@@ -2367,9 +2388,9 @@ static int gfx_v9_4_3_hw_init(void *handle)
return r;
}
-static int gfx_v9_4_3_hw_fini(void *handle)
+static int gfx_v9_4_3_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, num_xcc;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
@@ -2384,14 +2405,14 @@ static int gfx_v9_4_3_hw_fini(void *handle)
return 0;
}
-static int gfx_v9_4_3_suspend(void *handle)
+static int gfx_v9_4_3_suspend(struct amdgpu_ip_block *ip_block)
{
- return gfx_v9_4_3_hw_fini(handle);
+ return gfx_v9_4_3_hw_fini(ip_block);
}
-static int gfx_v9_4_3_resume(void *handle)
+static int gfx_v9_4_3_resume(struct amdgpu_ip_block *ip_block)
{
- return gfx_v9_4_3_hw_init(handle);
+ return gfx_v9_4_3_hw_init(ip_block);
}
static bool gfx_v9_4_3_is_idle(void *handle)
@@ -2408,24 +2429,24 @@ static bool gfx_v9_4_3_is_idle(void *handle)
return true;
}
-static int gfx_v9_4_3_wait_for_idle(void *handle)
+static int gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gfx_v9_4_3_is_idle(handle))
+ if (gfx_v9_4_3_is_idle(adev))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
-static int gfx_v9_4_3_soft_reset(void *handle)
+static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 grbm_soft_reset = 0;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* GRBM_STATUS */
tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS);
@@ -2509,9 +2530,9 @@ static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring,
(1 << (oa_size + oa_base)) - (1 << oa_base));
}
-static int gfx_v9_4_3_early_init(void *handle)
+static int gfx_v9_4_3_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
AMDGPU_MAX_COMPUTE_RINGS);
@@ -2527,9 +2548,9 @@ static int gfx_v9_4_3_early_init(void *handle)
return gfx_v9_4_3_init_microcode(adev);
}
-static int gfx_v9_4_3_late_init(void *handle)
+static int gfx_v9_4_3_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
@@ -2758,16 +2779,16 @@ static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
.is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range,
};
-static int gfx_v9_4_3_set_powergating_state(void *handle,
+static int gfx_v9_4_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
}
-static int gfx_v9_4_3_set_clockgating_state(void *handle,
+static int gfx_v9_4_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, num_xcc;
if (amdgpu_sriov_vf(adev))
@@ -3056,9 +3077,6 @@ static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev;
uint32_t value = 0;
- if (!adev->debug_exp_resets)
- return;
-
value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
@@ -3574,9 +3592,6 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
unsigned long flags;
int r;
- if (!adev->debug_exp_resets)
- return -EINVAL;
-
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -4567,8 +4582,6 @@ static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
{
- int i;
-
/* Header itself is a NOP packet */
if (num_nop == 1) {
amdgpu_ring_write(ring, ring->funcs->nop);
@@ -4579,13 +4592,12 @@ static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_no
amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
/* Header is at index 0, followed by num_nops - 1 NOP packet's */
- for (i = 1; i < num_nop; i++)
- amdgpu_ring_write(ring, ring->funcs->nop);
+ amdgpu_ring_insert_nop(ring, num_nop - 1);
}
-static void gfx_v9_4_3_ip_print(void *handle, struct drm_printer *p)
+static void gfx_v9_4_3_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k;
uint32_t xcc_id, xcc_offset, inst_offset;
uint32_t num_xcc, reg, num_inst;
@@ -4643,9 +4655,9 @@ static void gfx_v9_4_3_ip_print(void *handle, struct drm_printer *p)
}
}
-static void gfx_v9_4_3_ip_dump(void *handle)
+static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t i, j, k;
uint32_t num_xcc, reg, num_inst;
uint32_t xcc_id, xcc_offset, inst_offset;
@@ -4656,7 +4668,6 @@ static void gfx_v9_4_3_ip_dump(void *handle)
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
- amdgpu_gfx_off_ctrl(adev, false);
for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
xcc_offset = xcc_id * reg_count;
for (i = 0; i < reg_count; i++)
@@ -4664,7 +4675,6 @@ static void gfx_v9_4_3_ip_dump(void *handle)
RREG32(SOC15_REG_ENTRY_OFFSET_INST(gc_reg_list_9_4_3[i],
GET_INST(GC, xcc_id)));
}
- amdgpu_gfx_off_ctrl(adev, true);
/* dump compute queue registers for all instances */
if (!adev->gfx.ip_dump_compute_queues)
@@ -4673,7 +4683,6 @@ static void gfx_v9_4_3_ip_dump(void *handle)
num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe;
reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
- amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->srbm_mutex);
for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
xcc_offset = xcc_id * reg_count * num_inst;
@@ -4700,7 +4709,6 @@ static void gfx_v9_4_3_ip_dump(void *handle)
}
soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- amdgpu_gfx_off_ctrl(adev, true);
}
static void gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
@@ -4863,6 +4871,7 @@ static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
/* 9.4.3 removed all the GDS internal memory,
* only support GWS opcode in kernel, like barrier
* semaphore.etc */
@@ -4876,6 +4885,7 @@ static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
/* deprecated for 9.4.3, no usage at all */
adev->gds.gds_compute_max_wave_id = 0;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index ed8e130c7d19..5470cef7e9bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -368,7 +368,9 @@ static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev,
amdgpu_ip_version(adev, GC_HWIP, 0) ==
IP_VERSION(9, 4, 3) ||
amdgpu_ip_version(adev, GC_HWIP, 0) ==
- IP_VERSION(9, 4, 4));
+ IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) ==
+ IP_VERSION(9, 5, 0));
WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 9784a2892185..9bedca9a79c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -175,7 +175,10 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
addr, entry->client_id,
soc15_ih_clientid_name[entry->client_id]);
- if (!amdgpu_sriov_vf(adev))
+ /* Only print L2 fault status if the status register could be read and
+ * contains useful information
+ */
+ if (status != 0)
hub->vmhub_funcs->print_l2_protection_fault_status(adev,
status);
@@ -630,9 +633,9 @@ static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
}
-static int gmc_v10_0_early_init(void *handle)
+static int gmc_v10_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v10_0_set_mmhub_funcs(adev);
gmc_v10_0_set_gfxhub_funcs(adev);
@@ -651,9 +654,9 @@ static int gmc_v10_0_early_init(void *handle)
return 0;
}
-static int gmc_v10_0_late_init(void *handle)
+static int gmc_v10_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_gmc_allocate_vm_inv_eng(adev);
@@ -769,10 +772,10 @@ static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
return amdgpu_gart_table_vram_alloc(adev);
}
-static int gmc_v10_0_sw_init(void *handle)
+static int gmc_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->gfxhub.funcs->init(adev);
@@ -920,9 +923,9 @@ static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
amdgpu_gart_table_vram_free(adev);
}
-static int gmc_v10_0_sw_fini(void *handle)
+static int gmc_v10_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_vm_manager_fini(adev);
gmc_v10_0_gart_fini(adev);
@@ -985,9 +988,9 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
return 0;
}
-static int gmc_v10_0_hw_init(void *handle)
+static int gmc_v10_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode;
@@ -1032,9 +1035,9 @@ static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
adev->mmhub.funcs->gart_disable(adev);
}
-static int gmc_v10_0_hw_fini(void *handle)
+static int gmc_v10_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v10_0_gart_disable(adev);
@@ -1053,25 +1056,22 @@ static int gmc_v10_0_hw_fini(void *handle)
return 0;
}
-static int gmc_v10_0_suspend(void *handle)
+static int gmc_v10_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- gmc_v10_0_hw_fini(adev);
+ gmc_v10_0_hw_fini(ip_block);
return 0;
}
-static int gmc_v10_0_resume(void *handle)
+static int gmc_v10_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = gmc_v10_0_hw_init(adev);
+ r = gmc_v10_0_hw_init(ip_block);
if (r)
return r;
- amdgpu_vmid_reset_all(adev);
+ amdgpu_vmid_reset_all(ip_block->adev);
return 0;
}
@@ -1082,22 +1082,17 @@ static bool gmc_v10_0_is_idle(void *handle)
return true;
}
-static int gmc_v10_0_wait_for_idle(void *handle)
+static int gmc_v10_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* There is no need to wait for MC idle in GMC v10.*/
return 0;
}
-static int gmc_v10_0_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int gmc_v10_0_set_clockgating_state(void *handle,
+static int gmc_v10_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/*
* The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled
@@ -1136,7 +1131,7 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u64 *flags)
athub_v2_0_get_clockgating(adev, flags);
}
-static int gmc_v10_0_set_powergating_state(void *handle,
+static int gmc_v10_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1154,7 +1149,6 @@ const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
.resume = gmc_v10_0_resume,
.is_idle = gmc_v10_0_is_idle,
.wait_for_idle = gmc_v10_0_wait_for_idle,
- .soft_reset = gmc_v10_0_soft_reset,
.set_clockgating_state = gmc_v10_0_set_clockgating_state,
.set_powergating_state = gmc_v10_0_set_powergating_state,
.get_clockgating_state = gmc_v10_0_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 2797fd84432b..72751ab4c766 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -144,7 +144,10 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
addr, entry->client_id);
- if (!amdgpu_sriov_vf(adev))
+ /* Only print L2 fault status if the status register could be read and
+ * contains useful information
+ */
+ if (status != 0)
hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
}
@@ -601,9 +604,9 @@ static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
}
}
-static int gmc_v11_0_early_init(void *handle)
+static int gmc_v11_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v11_0_set_gfxhub_funcs(adev);
gmc_v11_0_set_mmhub_funcs(adev);
@@ -622,9 +625,9 @@ static int gmc_v11_0_early_init(void *handle)
return 0;
}
-static int gmc_v11_0_late_init(void *handle)
+static int gmc_v11_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_gmc_allocate_vm_inv_eng(adev);
@@ -729,10 +732,10 @@ static int gmc_v11_0_gart_init(struct amdgpu_device *adev)
return amdgpu_gart_table_vram_alloc(adev);
}
-static int gmc_v11_0_sw_init(void *handle)
+static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->mmhub.funcs->init(adev);
@@ -849,9 +852,9 @@ static void gmc_v11_0_gart_fini(struct amdgpu_device *adev)
amdgpu_gart_table_vram_free(adev);
}
-static int gmc_v11_0_sw_fini(void *handle)
+static int gmc_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_vm_manager_fini(adev);
gmc_v11_0_gart_fini(adev);
@@ -908,9 +911,9 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
return 0;
}
-static int gmc_v11_0_hw_init(void *handle)
+static int gmc_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode;
@@ -940,9 +943,9 @@ static void gmc_v11_0_gart_disable(struct amdgpu_device *adev)
adev->mmhub.funcs->gart_disable(adev);
}
-static int gmc_v11_0_hw_fini(void *handle)
+static int gmc_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) {
/* full access mode, so don't touch any GMC register */
@@ -961,25 +964,22 @@ static int gmc_v11_0_hw_fini(void *handle)
return 0;
}
-static int gmc_v11_0_suspend(void *handle)
+static int gmc_v11_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- gmc_v11_0_hw_fini(adev);
+ gmc_v11_0_hw_fini(ip_block);
return 0;
}
-static int gmc_v11_0_resume(void *handle)
+static int gmc_v11_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = gmc_v11_0_hw_init(adev);
+ r = gmc_v11_0_hw_init(ip_block);
if (r)
return r;
- amdgpu_vmid_reset_all(adev);
+ amdgpu_vmid_reset_all(ip_block->adev);
return 0;
}
@@ -990,22 +990,17 @@ static bool gmc_v11_0_is_idle(void *handle)
return true;
}
-static int gmc_v11_0_wait_for_idle(void *handle)
+static int gmc_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* There is no need to wait for MC idle in GMC v11.*/
return 0;
}
-static int gmc_v11_0_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int gmc_v11_0_set_clockgating_state(void *handle,
+static int gmc_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = adev->mmhub.funcs->set_clockgating(adev, state);
if (r)
@@ -1023,7 +1018,7 @@ static void gmc_v11_0_get_clockgating_state(void *handle, u64 *flags)
athub_v3_0_get_clockgating(adev, flags);
}
-static int gmc_v11_0_set_powergating_state(void *handle,
+static int gmc_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1041,7 +1036,6 @@ const struct amd_ip_funcs gmc_v11_0_ip_funcs = {
.resume = gmc_v11_0_resume,
.is_idle = gmc_v11_0_is_idle,
.wait_for_idle = gmc_v11_0_wait_for_idle,
- .soft_reset = gmc_v11_0_soft_reset,
.set_clockgating_state = gmc_v11_0_set_clockgating_state,
.set_powergating_state = gmc_v11_0_set_powergating_state,
.get_clockgating_state = gmc_v11_0_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index edcb5351f8cc..b749f1c3f6a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -40,7 +40,7 @@
#include "gfxhub_v12_0.h"
#include "mmhub_v4_1_0.h"
#include "athub_v4_1_0.h"
-
+#include "umc_v8_14.h"
static int gmc_v12_0_ecc_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
@@ -137,7 +137,10 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
addr, entry->client_id);
- if (!amdgpu_sriov_vf(adev))
+ /* Only print L2 fault status if the status register could be read and
+ * contains useful information
+ */
+ if (status != 0)
hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
}
@@ -578,6 +581,18 @@ static void gmc_v12_0_set_gmc_funcs(struct amdgpu_device *adev)
static void gmc_v12_0_set_umc_funcs(struct amdgpu_device *adev)
{
+ switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
+ case IP_VERSION(8, 14, 0):
+ adev->umc.channel_inst_num = UMC_V8_14_CHANNEL_INSTANCE_NUM;
+ adev->umc.umc_inst_num = UMC_V8_14_UMC_INSTANCE_NUM(adev);
+ adev->umc.node_inst_num = 0;
+ adev->umc.max_ras_err_cnt_per_query = UMC_V8_14_TOTAL_CHANNEL_NUM(adev);
+ adev->umc.channel_offs = UMC_V8_14_PER_CHANNEL_OFFSET;
+ adev->umc.ras = &umc_v8_14_ras;
+ break;
+ default:
+ break;
+ }
}
@@ -604,9 +619,9 @@ static void gmc_v12_0_set_gfxhub_funcs(struct amdgpu_device *adev)
}
}
-static int gmc_v12_0_early_init(void *handle)
+static int gmc_v12_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v12_0_set_gfxhub_funcs(adev);
gmc_v12_0_set_mmhub_funcs(adev);
@@ -624,9 +639,9 @@ static int gmc_v12_0_early_init(void *handle)
return 0;
}
-static int gmc_v12_0_late_init(void *handle)
+static int gmc_v12_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_gmc_allocate_vm_inv_eng(adev);
@@ -731,10 +746,10 @@ static int gmc_v12_0_gart_init(struct amdgpu_device *adev)
return amdgpu_gart_table_vram_alloc(adev);
}
-static int gmc_v12_0_sw_init(void *handle)
+static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->mmhub.funcs->init(adev);
@@ -826,6 +841,10 @@ static int gmc_v12_0_sw_init(void *handle)
amdgpu_vm_manager_init(adev);
+ r = amdgpu_gmc_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -841,9 +860,9 @@ static void gmc_v12_0_gart_fini(struct amdgpu_device *adev)
amdgpu_gart_table_vram_free(adev);
}
-static int gmc_v12_0_sw_fini(void *handle)
+static int gmc_v12_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_vm_manager_fini(adev);
gmc_v12_0_gart_fini(adev);
@@ -894,10 +913,10 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
return 0;
}
-static int gmc_v12_0_hw_init(void *handle)
+static int gmc_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* The sequence of these two function calls matters.*/
gmc_v12_0_init_golden_registers(adev);
@@ -924,9 +943,9 @@ static void gmc_v12_0_gart_disable(struct amdgpu_device *adev)
adev->mmhub.funcs->gart_disable(adev);
}
-static int gmc_v12_0_hw_fini(void *handle)
+static int gmc_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) {
/* full access mode, so don't touch any GMC register */
@@ -945,25 +964,22 @@ static int gmc_v12_0_hw_fini(void *handle)
return 0;
}
-static int gmc_v12_0_suspend(void *handle)
+static int gmc_v12_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- gmc_v12_0_hw_fini(adev);
+ gmc_v12_0_hw_fini(ip_block);
return 0;
}
-static int gmc_v12_0_resume(void *handle)
+static int gmc_v12_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = gmc_v12_0_hw_init(adev);
+ r = gmc_v12_0_hw_init(ip_block);
if (r)
return r;
- amdgpu_vmid_reset_all(adev);
+ amdgpu_vmid_reset_all(ip_block->adev);
return 0;
}
@@ -974,22 +990,17 @@ static bool gmc_v12_0_is_idle(void *handle)
return true;
}
-static int gmc_v12_0_wait_for_idle(void *handle)
+static int gmc_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* There is no need to wait for MC idle in GMC v11.*/
return 0;
}
-static int gmc_v12_0_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int gmc_v12_0_set_clockgating_state(void *handle,
+static int gmc_v12_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = adev->mmhub.funcs->set_clockgating(adev, state);
if (r)
@@ -1007,7 +1018,7 @@ static void gmc_v12_0_get_clockgating_state(void *handle, u64 *flags)
athub_v4_1_0_get_clockgating(adev, flags);
}
-static int gmc_v12_0_set_powergating_state(void *handle,
+static int gmc_v12_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1025,7 +1036,6 @@ const struct amd_ip_funcs gmc_v12_0_ip_funcs = {
.resume = gmc_v12_0_resume,
.is_idle = gmc_v12_0_is_idle,
.wait_for_idle = gmc_v12_0_wait_for_idle,
- .soft_reset = gmc_v12_0_soft_reset,
.set_clockgating_state = gmc_v12_0_set_clockgating_state,
.set_powergating_state = gmc_v12_0_set_powergating_state,
.get_clockgating_state = gmc_v12_0_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index d36725666b54..2245dda92021 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -43,7 +43,7 @@
static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
-static int gmc_v6_0_wait_for_idle(void *handle);
+static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
@@ -64,8 +64,13 @@ MODULE_FIRMWARE("amdgpu/si58_mc.bin");
static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
{
u32 blackout;
+ struct amdgpu_ip_block *ip_block;
- gmc_v6_0_wait_for_idle((void *)adev);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
+ if (!ip_block)
+ return;
+
+ gmc_v6_0_wait_for_idle(ip_block);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
@@ -126,7 +131,8 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
chip_name = "si58";
- err = amdgpu_ucode_request(adev, &adev->gmc.fw, "amdgpu/%s_mc.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_mc.bin", chip_name);
if (err) {
dev_err(adev->dev,
"si_mc: Failed to load firmware \"%s_mc.bin\"\n",
@@ -213,6 +219,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
{
int i, j;
+ struct amdgpu_ip_block *ip_block;
+
/* Initialize HDP */
for (i = 0, j = 0; i < 32; i++, j += 0x6) {
@@ -224,7 +232,11 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
- if (gmc_v6_0_wait_for_idle((void *)adev))
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
+ if (!ip_block)
+ return;
+
+ if (gmc_v6_0_wait_for_idle(ip_block))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
if (adev->mode_info.num_crtc) {
@@ -251,7 +263,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22);
WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22);
- if (gmc_v6_0_wait_for_idle((void *)adev))
+ if (gmc_v6_0_wait_for_idle(ip_block))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
@@ -762,9 +774,9 @@ static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
}
}
-static int gmc_v6_0_early_init(void *handle)
+static int gmc_v6_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v6_0_set_gmc_funcs(adev);
gmc_v6_0_set_irq_funcs(adev);
@@ -772,9 +784,9 @@ static int gmc_v6_0_early_init(void *handle)
return 0;
}
-static int gmc_v6_0_late_init(void *handle)
+static int gmc_v6_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
@@ -799,10 +811,10 @@ static unsigned int gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
return size;
}
-static int gmc_v6_0_sw_init(void *handle)
+static int gmc_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
@@ -876,9 +888,9 @@ static int gmc_v6_0_sw_init(void *handle)
return 0;
}
-static int gmc_v6_0_sw_fini(void *handle)
+static int gmc_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
@@ -889,10 +901,10 @@ static int gmc_v6_0_sw_fini(void *handle)
return 0;
}
-static int gmc_v6_0_hw_init(void *handle)
+static int gmc_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v6_0_mc_program(adev);
@@ -914,9 +926,9 @@ static int gmc_v6_0_hw_init(void *handle)
return 0;
}
-static int gmc_v6_0_hw_fini(void *handle)
+static int gmc_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v6_0_gart_disable(adev);
@@ -924,21 +936,19 @@ static int gmc_v6_0_hw_fini(void *handle)
return 0;
}
-static int gmc_v6_0_suspend(void *handle)
+static int gmc_v6_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- gmc_v6_0_hw_fini(adev);
+ gmc_v6_0_hw_fini(ip_block);
return 0;
}
-static int gmc_v6_0_resume(void *handle)
+static int gmc_v6_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
- r = gmc_v6_0_hw_init(adev);
+ r = gmc_v6_0_hw_init(ip_block);
if (r)
return r;
@@ -950,6 +960,7 @@ static int gmc_v6_0_resume(void *handle)
static bool gmc_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
@@ -959,13 +970,13 @@ static bool gmc_v6_0_is_idle(void *handle)
return true;
}
-static int gmc_v6_0_wait_for_idle(void *handle)
+static int gmc_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (gmc_v6_0_is_idle(handle))
+ if (gmc_v6_0_is_idle(adev))
return 0;
udelay(1);
}
@@ -973,9 +984,10 @@ static int gmc_v6_0_wait_for_idle(void *handle)
}
-static int gmc_v6_0_soft_reset(void *handle)
+static int gmc_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
+
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -992,7 +1004,8 @@ static int gmc_v6_0_soft_reset(void *handle)
if (srbm_soft_reset) {
gmc_v6_0_mc_stop(adev);
- if (gmc_v6_0_wait_for_idle(adev))
+
+ if (gmc_v6_0_wait_for_idle(ip_block))
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
tmp = RREG32(mmSRBM_SOFT_RESET);
@@ -1082,13 +1095,13 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int gmc_v6_0_set_clockgating_state(void *handle,
+static int gmc_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int gmc_v6_0_set_powergating_state(void *handle,
+static int gmc_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1109,8 +1122,6 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
.soft_reset = gmc_v6_0_soft_reset,
.set_clockgating_state = gmc_v6_0_set_clockgating_state,
.set_powergating_state = gmc_v6_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 994432fb57ea..9aac4b1101e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -52,7 +52,7 @@
static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
-static int gmc_v7_0_wait_for_idle(void *handle);
+static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
@@ -87,9 +87,14 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
static void gmc_v7_0_mc_stop(struct amdgpu_device *adev)
{
+ struct amdgpu_ip_block *ip_block;
u32 blackout;
- gmc_v7_0_wait_for_idle((void *)adev);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
+ if (!ip_block)
+ return;
+
+ gmc_v7_0_wait_for_idle(ip_block);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
@@ -152,7 +157,8 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
return -EINVAL;
}
- err = amdgpu_ucode_request(adev, &adev->gmc.fw, "amdgpu/%s_mc.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_mc.bin", chip_name);
if (err) {
pr_err("cik_mc: Failed to load firmware \"%s_mc.bin\"\n", chip_name);
amdgpu_ucode_release(&adev->gmc.fw);
@@ -251,9 +257,14 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
{
+ struct amdgpu_ip_block *ip_block;
u32 tmp;
int i, j;
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
+ if (!ip_block)
+ return;
+
/* Initialize HDP */
for (i = 0, j = 0; i < 32; i++, j += 0x6) {
WREG32((0xb05 + j), 0x00000000);
@@ -264,7 +275,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
- if (gmc_v7_0_wait_for_idle((void *)adev))
+ if (gmc_v7_0_wait_for_idle(ip_block))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
if (adev->mode_info.num_crtc) {
@@ -288,7 +299,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22);
WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22);
- if (gmc_v7_0_wait_for_idle((void *)adev))
+ if (gmc_v7_0_wait_for_idle(ip_block))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
@@ -921,9 +932,9 @@ static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type)
}
}
-static int gmc_v7_0_early_init(void *handle)
+static int gmc_v7_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v7_0_set_gmc_funcs(adev);
gmc_v7_0_set_irq_funcs(adev);
@@ -940,9 +951,9 @@ static int gmc_v7_0_early_init(void *handle)
return 0;
}
-static int gmc_v7_0_late_init(void *handle)
+static int gmc_v7_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
@@ -968,10 +979,10 @@ static unsigned int gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
return size;
}
-static int gmc_v7_0_sw_init(void *handle)
+static int gmc_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
@@ -1060,9 +1071,9 @@ static int gmc_v7_0_sw_init(void *handle)
return 0;
}
-static int gmc_v7_0_sw_fini(void *handle)
+static int gmc_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
@@ -1074,10 +1085,10 @@ static int gmc_v7_0_sw_fini(void *handle)
return 0;
}
-static int gmc_v7_0_hw_init(void *handle)
+static int gmc_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v7_0_init_golden_registers(adev);
@@ -1101,9 +1112,9 @@ static int gmc_v7_0_hw_init(void *handle)
return 0;
}
-static int gmc_v7_0_hw_fini(void *handle)
+static int gmc_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v7_0_gart_disable(adev);
@@ -1111,25 +1122,22 @@ static int gmc_v7_0_hw_fini(void *handle)
return 0;
}
-static int gmc_v7_0_suspend(void *handle)
+static int gmc_v7_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- gmc_v7_0_hw_fini(adev);
+ gmc_v7_0_hw_fini(ip_block);
return 0;
}
-static int gmc_v7_0_resume(void *handle)
+static int gmc_v7_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = gmc_v7_0_hw_init(adev);
+ r = gmc_v7_0_hw_init(ip_block);
if (r)
return r;
- amdgpu_vmid_reset_all(adev);
+ amdgpu_vmid_reset_all(ip_block->adev);
return 0;
}
@@ -1146,11 +1154,11 @@ static bool gmc_v7_0_is_idle(void *handle)
return true;
}
-static int gmc_v7_0_wait_for_idle(void *handle)
+static int gmc_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned int i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -1167,9 +1175,9 @@ static int gmc_v7_0_wait_for_idle(void *handle)
}
-static int gmc_v7_0_soft_reset(void *handle)
+static int gmc_v7_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -1186,7 +1194,7 @@ static int gmc_v7_0_soft_reset(void *handle)
if (srbm_soft_reset) {
gmc_v7_0_mc_stop(adev);
- if (gmc_v7_0_wait_for_idle((void *)adev))
+ if (gmc_v7_0_wait_for_idle(ip_block))
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
tmp = RREG32(mmSRBM_SOFT_RESET);
@@ -1310,11 +1318,11 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int gmc_v7_0_set_clockgating_state(void *handle,
+static int gmc_v7_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_CG_STATE_GATE)
gate = true;
@@ -1330,7 +1338,7 @@ static int gmc_v7_0_set_clockgating_state(void *handle,
return 0;
}
-static int gmc_v7_0_set_powergating_state(void *handle,
+static int gmc_v7_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1351,8 +1359,6 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.soft_reset = gmc_v7_0_soft_reset,
.set_clockgating_state = gmc_v7_0_set_clockgating_state,
.set_powergating_state = gmc_v7_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 86488c052f82..d06585207c33 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -53,7 +53,7 @@
static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
-static int gmc_v8_0_wait_for_idle(void *handle);
+static int gmc_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
@@ -170,8 +170,13 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
{
u32 blackout;
+ struct amdgpu_ip_block *ip_block;
- gmc_v8_0_wait_for_idle(adev);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
+ if (!ip_block)
+ return;
+
+ gmc_v8_0_wait_for_idle(ip_block);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
@@ -254,7 +259,8 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
return -EINVAL;
}
- err = amdgpu_ucode_request(adev, &adev->gmc.fw, "amdgpu/%s_mc.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_mc.bin", chip_name);
if (err) {
pr_err("mc: Failed to load firmware \"%s_mc.bin\"\n", chip_name);
amdgpu_ucode_release(&adev->gmc.fw);
@@ -426,6 +432,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
{
+ struct amdgpu_ip_block *ip_block;
u32 tmp;
int i, j;
@@ -439,7 +446,11 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
- if (gmc_v8_0_wait_for_idle((void *)adev))
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GMC);
+ if (!ip_block)
+ return;
+
+ if (gmc_v8_0_wait_for_idle(ip_block))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
if (adev->mode_info.num_crtc) {
@@ -474,7 +485,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22);
WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22);
- if (gmc_v8_0_wait_for_idle((void *)adev))
+ if (gmc_v8_0_wait_for_idle(ip_block))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
@@ -1027,9 +1038,9 @@ static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
}
}
-static int gmc_v8_0_early_init(void *handle)
+static int gmc_v8_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v8_0_set_gmc_funcs(adev);
gmc_v8_0_set_irq_funcs(adev);
@@ -1046,9 +1057,9 @@ static int gmc_v8_0_early_init(void *handle)
return 0;
}
-static int gmc_v8_0_late_init(void *handle)
+static int gmc_v8_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
@@ -1076,10 +1087,10 @@ static unsigned int gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
#define mmMC_SEQ_MISC0_FIJI 0xA71
-static int gmc_v8_0_sw_init(void *handle)
+static int gmc_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
@@ -1173,9 +1184,9 @@ static int gmc_v8_0_sw_init(void *handle)
return 0;
}
-static int gmc_v8_0_sw_fini(void *handle)
+static int gmc_v8_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
@@ -1187,10 +1198,10 @@ static int gmc_v8_0_sw_fini(void *handle)
return 0;
}
-static int gmc_v8_0_hw_init(void *handle)
+static int gmc_v8_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v8_0_init_golden_registers(adev);
@@ -1222,9 +1233,9 @@ static int gmc_v8_0_hw_init(void *handle)
return 0;
}
-static int gmc_v8_0_hw_fini(void *handle)
+static int gmc_v8_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v8_0_gart_disable(adev);
@@ -1232,25 +1243,22 @@ static int gmc_v8_0_hw_fini(void *handle)
return 0;
}
-static int gmc_v8_0_suspend(void *handle)
+static int gmc_v8_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- gmc_v8_0_hw_fini(adev);
+ gmc_v8_0_hw_fini(ip_block);
return 0;
}
-static int gmc_v8_0_resume(void *handle)
+static int gmc_v8_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = gmc_v8_0_hw_init(adev);
+ r = gmc_v8_0_hw_init(ip_block);
if (r)
return r;
- amdgpu_vmid_reset_all(adev);
+ amdgpu_vmid_reset_all(ip_block->adev);
return 0;
}
@@ -1267,11 +1275,11 @@ static bool gmc_v8_0_is_idle(void *handle)
return true;
}
-static int gmc_v8_0_wait_for_idle(void *handle)
+static int gmc_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned int i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -1289,10 +1297,10 @@ static int gmc_v8_0_wait_for_idle(void *handle)
}
-static bool gmc_v8_0_check_soft_reset(void *handle)
+static bool gmc_v8_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
@@ -1316,23 +1324,23 @@ static bool gmc_v8_0_check_soft_reset(void *handle)
return false;
}
-static int gmc_v8_0_pre_soft_reset(void *handle)
+static int gmc_v8_0_pre_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!adev->gmc.srbm_soft_reset)
return 0;
gmc_v8_0_mc_stop(adev);
- if (gmc_v8_0_wait_for_idle(adev))
+ if (gmc_v8_0_wait_for_idle(ip_block))
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
return 0;
}
-static int gmc_v8_0_soft_reset(void *handle)
+static int gmc_v8_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset;
if (!adev->gmc.srbm_soft_reset)
@@ -1361,9 +1369,9 @@ static int gmc_v8_0_soft_reset(void *handle)
return 0;
}
-static int gmc_v8_0_post_soft_reset(void *handle)
+static int gmc_v8_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!adev->gmc.srbm_soft_reset)
return 0;
@@ -1651,10 +1659,10 @@ static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
}
}
-static int gmc_v8_0_set_clockgating_state(void *handle,
+static int gmc_v8_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1672,7 +1680,7 @@ static int gmc_v8_0_set_clockgating_state(void *handle,
return 0;
}
-static int gmc_v8_0_set_powergating_state(void *handle,
+static int gmc_v8_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1715,8 +1723,6 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.set_clockgating_state = gmc_v8_0_set_clockgating_state,
.set_powergating_state = gmc_v8_0_set_powergating_state,
.get_clockgating_state = gmc_v8_0_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index c76ac0dfe572..291549765c38 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -623,6 +623,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
}
}
+ if (kgd2kfd_vmfault_fast_path(adev, entry, retry_fault))
+ return 1;
+
if (!printk_ratelimit())
return 0;
@@ -645,7 +648,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
soc15_ih_clientid_name[entry->client_id]);
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n",
node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
@@ -672,6 +676,12 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
(amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2)))
return 0;
+ /* Only print L2 fault status if the status register could be read and
+ * contains useful information
+ */
+ if (!status)
+ return 0;
+
if (!amdgpu_sriov_vf(adev))
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
@@ -789,7 +799,8 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
{
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
return false;
return ((vmhub == AMDGPU_MMHUB0(0) ||
@@ -1124,16 +1135,21 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
uint64_t *flags)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
- bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM;
- bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT | AMDGPU_GEM_CREATE_EXT_COHERENT);
+ bool is_vram = bo->tbo.resource &&
+ bo->tbo.resource->mem_type == TTM_PL_VRAM;
+ bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
+ AMDGPU_GEM_CREATE_EXT_COHERENT);
bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
struct amdgpu_vm *vm = mapping->bo_va->base.vm;
unsigned int mtype_local, mtype;
+ uint32_t gc_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0);
bool snoop = false;
bool is_local;
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ dma_resv_assert_held(bo->tbo.base.resv);
+
+ switch (gc_ip_version) {
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
if (is_vram) {
@@ -1147,10 +1163,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
/* FIXME: is this still needed? Or does
* amdgpu_ttm_tt_pde_flags already handle this?
*/
- if ((amdgpu_ip_version(adev, GC_HWIP, 0) ==
- IP_VERSION(9, 4, 2) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) ==
- IP_VERSION(9, 4, 3)) &&
+ if (gc_ip_version == IP_VERSION(9, 4, 2) &&
adev->gmc.xgmi.connected_to_cpu)
snoop = true;
} else {
@@ -1174,6 +1187,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
/* Only local VRAM BOs or system memory on non-NUMA APUs
* can be assumed to be local in their entirety. Choose
* MTYPE_NC as safe fallback for all system memory BOs on
@@ -1198,7 +1212,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
if (uncached) {
mtype = MTYPE_UC;
} else if (ext_coherent) {
- if (adev->rev_id)
+ if (gc_ip_version == IP_VERSION(9, 5, 0) || adev->rev_id)
mtype = is_local ? MTYPE_CC : MTYPE_UC;
else
mtype = MTYPE_UC;
@@ -1208,10 +1222,10 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
/* dGPU */
if (is_local)
mtype = mtype_local;
- else if (is_vram)
- mtype = MTYPE_NC;
- else
+ else if (gc_ip_version < IP_VERSION(9, 5, 0) && !is_vram)
mtype = MTYPE_UC;
+ else
+ mtype = MTYPE_NC;
}
break;
@@ -1251,9 +1265,8 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_VALID;
}
- if (bo && bo->tbo.resource)
- gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo,
- mapping, flags);
+ if ((*flags & AMDGPU_PTE_VALID) && bo)
+ gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags);
}
static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
@@ -1266,7 +1279,8 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
* memory can use more efficient MTYPEs.
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 5, 0))
return;
/* Only direct-mapped memory allows us to determine the NUMA node from
@@ -1387,14 +1401,44 @@ gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
}
static enum amdgpu_memory_partition
+gmc_v9_0_query_vf_memory_partition(struct amdgpu_device *adev)
+{
+ switch (adev->gmc.num_mem_partitions) {
+ case 0:
+ return UNKNOWN_MEMORY_PARTITION_MODE;
+ case 1:
+ return AMDGPU_NPS1_PARTITION_MODE;
+ case 2:
+ return AMDGPU_NPS2_PARTITION_MODE;
+ case 4:
+ return AMDGPU_NPS4_PARTITION_MODE;
+ default:
+ return AMDGPU_NPS1_PARTITION_MODE;
+ }
+
+ return AMDGPU_NPS1_PARTITION_MODE;
+}
+
+static enum amdgpu_memory_partition
gmc_v9_0_query_memory_partition(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev))
- return AMDGPU_NPS1_PARTITION_MODE;
+ return gmc_v9_0_query_vf_memory_partition(adev);
return gmc_v9_0_get_memory_partition(adev, NULL);
}
+static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev)
+{
+ if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested &&
+ adev->nbio.funcs->is_nps_switch_requested(adev)) {
+ adev->gmc.reset_flags |= AMDGPU_GMC_INIT_RESET_NPS;
+ return true;
+ }
+
+ return false;
+}
+
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
@@ -1406,6 +1450,8 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
.query_mem_partition_mode = &gmc_v9_0_query_memory_partition,
+ .request_mem_partition_mode = &amdgpu_gmc_request_memory_partition,
+ .need_reset_on_init = &gmc_v9_0_need_reset_on_init,
};
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
@@ -1499,6 +1545,7 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
adev->mmhub.ras = &mmhub_v1_7_ras;
break;
case IP_VERSION(1, 8, 0):
+ case IP_VERSION(1, 8, 1):
adev->mmhub.ras = &mmhub_v1_8_ras;
break;
default:
@@ -1510,7 +1557,8 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
else
adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
@@ -1545,9 +1593,31 @@ static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev)
adev->gmc.xgmi.ras = &xgmi_ras;
}
-static int gmc_v9_0_early_init(void *handle)
+static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->gmc.supported_nps_modes = 0;
+
+ if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
+ return;
+
+ /*TODO: Check PSP version also which supports NPS switch. Otherwise keep
+ * supported modes as 0.
+ */
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ adev->gmc.supported_nps_modes =
+ BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
+ break;
+ default:
+ break;
+ }
+}
+
+static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
/*
* 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined
@@ -1556,7 +1626,8 @@ static int gmc_v9_0_early_init(void *handle)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
adev->gmc.xgmi.supported = true;
if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) {
@@ -1601,9 +1672,9 @@ static int gmc_v9_0_early_init(void *handle)
return 0;
}
-static int gmc_v9_0_late_init(void *handle)
+static int gmc_v9_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_gmc_allocate_vm_inv_eng(adev);
@@ -1729,6 +1800,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
default:
adev->gmc.gart_size = 512ULL << 20;
break;
@@ -1900,6 +1972,8 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
switch (mode) {
case UNKNOWN_MEMORY_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 0;
+ break;
case AMDGPU_NPS1_PARTITION_MODE:
adev->gmc.num_mem_partitions = 1;
break;
@@ -1919,7 +1993,7 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
/* Use NPS range info, if populated */
r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges,
- adev->gmc.num_mem_partitions);
+ &adev->gmc.num_mem_partitions);
if (!r) {
l = 0;
for (i = 1; i < adev->gmc.num_mem_partitions; ++i) {
@@ -1929,6 +2003,11 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
}
} else {
+ if (!adev->gmc.num_mem_partitions) {
+ dev_err(adev->dev,
+ "Not able to detect NPS mode, fall back to NPS1");
+ adev->gmc.num_mem_partitions = 1;
+ }
/* Fallback to sw based calculation */
size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
size /= adev->gmc.num_mem_partitions;
@@ -1987,10 +2066,10 @@ static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
adev->gmc.vram_width = 128 * 64;
}
-static int gmc_v9_0_sw_init(void *handle)
+static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
unsigned long inst_mask = adev->aid_mask;
adev->gfxhub.funcs->init(adev);
@@ -2000,7 +2079,8 @@ static int gmc_v9_0_sw_init(void *handle)
spin_lock_init(&adev->gmc.invalidate_lock);
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) {
gmc_v9_4_3_init_vram_info(adev);
} else if (!adev->bios) {
if (adev->flags & AMD_IS_APU) {
@@ -2084,6 +2164,7 @@ static int gmc_v9_0_sw_init(void *handle)
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0),
NUM_XCC(adev->gfx.xcc_mask));
@@ -2150,7 +2231,8 @@ static int gmc_v9_0_sw_init(void *handle)
amdgpu_gmc_get_vbios_allocations(adev);
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) {
r = gmc_v9_0_init_mem_ranges(adev);
if (r)
return r;
@@ -2165,6 +2247,7 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
+ gmc_v9_0_init_nps_details(adev);
/*
* number of VMs
* VMID 0 is reserved for System
@@ -2179,7 +2262,8 @@ static int gmc_v9_0_sw_init(void *handle)
(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) ?
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) ?
3 :
8;
@@ -2192,18 +2276,20 @@ static int gmc_v9_0_sw_init(void *handle)
return r;
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
amdgpu_gmc_sysfs_init(adev);
return 0;
}
-static int gmc_v9_0_sw_fini(void *handle)
+static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
amdgpu_gmc_sysfs_fini(adev);
amdgpu_gmc_ras_fini(adev);
@@ -2308,9 +2394,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
return 0;
}
-static int gmc_v9_0_hw_init(void *handle)
+static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool value;
int i, r;
@@ -2393,9 +2479,9 @@ static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
adev->mmhub.funcs->gart_disable(adev);
}
-static int gmc_v9_0_hw_fini(void *handle)
+static int gmc_v9_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
gmc_v9_0_gart_disable(adev);
@@ -2413,32 +2499,44 @@ static int gmc_v9_0_hw_fini(void *handle)
if (adev->mmhub.funcs->update_power_gating)
adev->mmhub.funcs->update_power_gating(adev, false);
- amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+ /*
+ * For minimal init, late_init is not called, hence VM fault/RAS irqs
+ * are not enabled.
+ */
+ if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
+ amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
- if (adev->gmc.ecc_irq.funcs &&
- amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
- amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+ if (adev->gmc.ecc_irq.funcs &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
+ amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+ }
return 0;
}
-static int gmc_v9_0_suspend(void *handle)
+static int gmc_v9_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return gmc_v9_0_hw_fini(adev);
+ return gmc_v9_0_hw_fini(ip_block);
}
-static int gmc_v9_0_resume(void *handle)
+static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block)
{
+ struct amdgpu_device *adev = ip_block->adev;
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = gmc_v9_0_hw_init(adev);
+ /* If a reset is done for NPS mode switch, read the memory range
+ * information again.
+ */
+ if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) {
+ gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
+ adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS;
+ }
+
+ r = gmc_v9_0_hw_init(ip_block);
if (r)
return r;
- amdgpu_vmid_reset_all(adev);
+ amdgpu_vmid_reset_all(ip_block->adev);
return 0;
}
@@ -2449,22 +2547,22 @@ static bool gmc_v9_0_is_idle(void *handle)
return true;
}
-static int gmc_v9_0_wait_for_idle(void *handle)
+static int gmc_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* There is no need to wait for MC idle in GMC v9.*/
return 0;
}
-static int gmc_v9_0_soft_reset(void *handle)
+static int gmc_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* XXX for emulation.*/
return 0;
}
-static int gmc_v9_0_set_clockgating_state(void *handle,
+static int gmc_v9_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->mmhub.funcs->set_clockgating(adev, state);
@@ -2482,7 +2580,7 @@ static void gmc_v9_0_get_clockgating_state(void *handle, u64 *flags)
athub_v1_0_get_clockgating(adev, flags);
}
-static int gmc_v9_0_set_powergating_state(void *handle,
+static int gmc_v9_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index e019249883fb..194026e9be33 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -40,10 +40,12 @@
static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
- if (!ring || !ring->funcs->emit_wreg)
+ if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
+ RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ } else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ }
}
static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
@@ -54,11 +56,13 @@ static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 5))
return;
- if (!ring || !ring->funcs->emit_wreg)
+ if (!ring || !ring->funcs->emit_wreg) {
WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
- else
+ RREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE);
+ } else {
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
+ }
}
static void hdp_v4_0_query_ras_error_count(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
index ed7facacf2fe..d3962d469088 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -31,10 +31,12 @@
static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
- if (!ring || !ring->funcs->emit_wreg)
+ if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
+ RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ } else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ }
}
static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
@@ -42,6 +44,7 @@ static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+ RREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE);
} else {
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
index 29c3484ae1f1..f52552c5fa27 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
@@ -31,13 +31,15 @@
static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
- if (!ring || !ring->funcs->emit_wreg)
+ if (!ring || !ring->funcs->emit_wreg) {
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
0);
- else
+ RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ } else {
amdgpu_ring_emit_wreg(ring,
(adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
0);
+ }
}
static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
index 33736d361dd0..6948fe9956ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
@@ -34,10 +34,12 @@
static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
- if (!ring || !ring->funcs->emit_wreg)
+ if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
+ RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ } else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ }
}
static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
index 1c99bb09e2a1..63820329f67e 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
@@ -31,10 +31,12 @@
static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
- if (!ring || !ring->funcs->emit_wreg)
+ if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- else
+ RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ } else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
+ }
}
static void hdp_v7_0_update_clock_gating(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 07984f7c3ae7..8ac3d3282268 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -273,9 +273,9 @@ static void iceland_ih_set_rptr(struct amdgpu_device *adev,
WREG32(mmIH_RB_RPTR, ih->rptr);
}
-static int iceland_ih_early_init(void *handle)
+static int iceland_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
ret = amdgpu_irq_add_domain(adev);
@@ -287,10 +287,10 @@ static int iceland_ih_early_init(void *handle)
return 0;
}
-static int iceland_ih_sw_init(void *handle)
+static int iceland_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
@@ -301,9 +301,9 @@ static int iceland_ih_sw_init(void *handle)
return r;
}
-static int iceland_ih_sw_fini(void *handle)
+static int iceland_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
amdgpu_irq_remove_domain(adev);
@@ -311,34 +311,28 @@ static int iceland_ih_sw_fini(void *handle)
return 0;
}
-static int iceland_ih_hw_init(void *handle)
+static int iceland_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return iceland_ih_irq_init(adev);
}
-static int iceland_ih_hw_fini(void *handle)
+static int iceland_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- iceland_ih_irq_disable(adev);
+ iceland_ih_irq_disable(ip_block->adev);
return 0;
}
-static int iceland_ih_suspend(void *handle)
+static int iceland_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return iceland_ih_hw_fini(adev);
+ return iceland_ih_hw_fini(ip_block);
}
-static int iceland_ih_resume(void *handle)
+static int iceland_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return iceland_ih_hw_init(adev);
+ return iceland_ih_hw_init(ip_block);
}
static bool iceland_ih_is_idle(void *handle)
@@ -352,11 +346,11 @@ static bool iceland_ih_is_idle(void *handle)
return true;
}
-static int iceland_ih_wait_for_idle(void *handle)
+static int iceland_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -368,10 +362,10 @@ static int iceland_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int iceland_ih_soft_reset(void *handle)
+static int iceland_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
@@ -398,13 +392,13 @@ static int iceland_ih_soft_reset(void *handle)
return 0;
}
-static int iceland_ih_set_clockgating_state(void *handle,
+static int iceland_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int iceland_ih_set_powergating_state(void *handle,
+static int iceland_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -413,7 +407,6 @@ static int iceland_ih_set_powergating_state(void *handle,
static const struct amd_ip_funcs iceland_ih_ip_funcs = {
.name = "iceland_ih",
.early_init = iceland_ih_early_init,
- .late_init = NULL,
.sw_init = iceland_ih_sw_init,
.sw_fini = iceland_ih_sw_fini,
.hw_init = iceland_ih_hw_init,
@@ -425,8 +418,6 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = {
.soft_reset = iceland_ih_soft_reset,
.set_clockgating_state = iceland_ih_set_clockgating_state,
.set_powergating_state = iceland_ih_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs iceland_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 18a761d6ef33..f8a485164437 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -559,19 +559,19 @@ static void ih_v6_0_set_self_irq_funcs(struct amdgpu_device *adev)
adev->irq.self_irq.funcs = &ih_v6_0_self_irq_funcs;
}
-static int ih_v6_0_early_init(void *handle)
+static int ih_v6_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
ih_v6_0_set_interrupt_funcs(adev);
ih_v6_0_set_self_irq_funcs(adev);
return 0;
}
-static int ih_v6_0_sw_init(void *handle)
+static int ih_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool use_bus_addr;
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0,
@@ -614,19 +614,19 @@ static int ih_v6_0_sw_init(void *handle)
return r;
}
-static int ih_v6_0_sw_fini(void *handle)
+static int ih_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
return 0;
}
-static int ih_v6_0_hw_init(void *handle)
+static int ih_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = ih_v6_0_irq_init(adev);
if (r)
@@ -635,27 +635,21 @@ static int ih_v6_0_hw_init(void *handle)
return 0;
}
-static int ih_v6_0_hw_fini(void *handle)
+static int ih_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ih_v6_0_irq_disable(adev);
+ ih_v6_0_irq_disable(ip_block->adev);
return 0;
}
-static int ih_v6_0_suspend(void *handle)
+static int ih_v6_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return ih_v6_0_hw_fini(adev);
+ return ih_v6_0_hw_fini(ip_block);
}
-static int ih_v6_0_resume(void *handle)
+static int ih_v6_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return ih_v6_0_hw_init(adev);
+ return ih_v6_0_hw_init(ip_block);
}
static bool ih_v6_0_is_idle(void *handle)
@@ -664,13 +658,13 @@ static bool ih_v6_0_is_idle(void *handle)
return true;
}
-static int ih_v6_0_wait_for_idle(void *handle)
+static int ih_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return -ETIMEDOUT;
}
-static int ih_v6_0_soft_reset(void *handle)
+static int ih_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
return 0;
@@ -699,10 +693,10 @@ static void ih_v6_0_update_clockgating_state(struct amdgpu_device *adev,
}
}
-static int ih_v6_0_set_clockgating_state(void *handle,
+static int ih_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
ih_v6_0_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
@@ -762,10 +756,10 @@ static void ih_v6_0_update_ih_mem_power_gating(struct amdgpu_device *adev,
WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
}
-static int ih_v6_0_set_powergating_state(void *handle,
+static int ih_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
@@ -785,7 +779,6 @@ static void ih_v6_0_get_clockgating_state(void *handle, u64 *flags)
static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
.name = "ih_v6_0",
.early_init = ih_v6_0_early_init,
- .late_init = NULL,
.sw_init = ih_v6_0_sw_init,
.sw_fini = ih_v6_0_sw_fini,
.hw_init = ih_v6_0_hw_init,
@@ -798,8 +791,6 @@ static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
.set_clockgating_state = ih_v6_0_set_clockgating_state,
.set_powergating_state = ih_v6_0_set_powergating_state,
.get_clockgating_state = ih_v6_0_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs ih_v6_0_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
index 2e0469feca1e..dd0042efceec 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
@@ -532,9 +532,9 @@ static void ih_v6_1_set_self_irq_funcs(struct amdgpu_device *adev)
adev->irq.self_irq.funcs = &ih_v6_1_self_irq_funcs;
}
-static int ih_v6_1_early_init(void *handle)
+static int ih_v6_1_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
ret = amdgpu_irq_add_domain(adev);
@@ -547,10 +547,10 @@ static int ih_v6_1_early_init(void *handle)
return 0;
}
-static int ih_v6_1_sw_init(void *handle)
+static int ih_v6_1_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool use_bus_addr;
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0,
@@ -593,19 +593,19 @@ static int ih_v6_1_sw_init(void *handle)
return r;
}
-static int ih_v6_1_sw_fini(void *handle)
+static int ih_v6_1_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
return 0;
}
-static int ih_v6_1_hw_init(void *handle)
+static int ih_v6_1_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = ih_v6_1_irq_init(adev);
if (r)
@@ -614,27 +614,21 @@ static int ih_v6_1_hw_init(void *handle)
return 0;
}
-static int ih_v6_1_hw_fini(void *handle)
+static int ih_v6_1_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ih_v6_1_irq_disable(adev);
+ ih_v6_1_irq_disable(ip_block->adev);
return 0;
}
-static int ih_v6_1_suspend(void *handle)
+static int ih_v6_1_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return ih_v6_1_hw_fini(adev);
+ return ih_v6_1_hw_fini(ip_block);
}
-static int ih_v6_1_resume(void *handle)
+static int ih_v6_1_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return ih_v6_1_hw_init(adev);
+ return ih_v6_1_hw_init(ip_block);
}
static bool ih_v6_1_is_idle(void *handle)
@@ -643,13 +637,13 @@ static bool ih_v6_1_is_idle(void *handle)
return true;
}
-static int ih_v6_1_wait_for_idle(void *handle)
+static int ih_v6_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return -ETIMEDOUT;
}
-static int ih_v6_1_soft_reset(void *handle)
+static int ih_v6_1_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
return 0;
@@ -680,10 +674,10 @@ static void ih_v6_1_update_clockgating_state(struct amdgpu_device *adev,
return;
}
-static int ih_v6_1_set_clockgating_state(void *handle,
+static int ih_v6_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
ih_v6_1_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
@@ -743,10 +737,10 @@ static void ih_v6_1_update_ih_mem_power_gating(struct amdgpu_device *adev,
WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
}
-static int ih_v6_1_set_powergating_state(void *handle,
+static int ih_v6_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
@@ -768,7 +762,6 @@ static void ih_v6_1_get_clockgating_state(void *handle, u64 *flags)
static const struct amd_ip_funcs ih_v6_1_ip_funcs = {
.name = "ih_v6_1",
.early_init = ih_v6_1_early_init,
- .late_init = NULL,
.sw_init = ih_v6_1_sw_init,
.sw_fini = ih_v6_1_sw_fini,
.hw_init = ih_v6_1_hw_init,
@@ -781,8 +774,6 @@ static const struct amd_ip_funcs ih_v6_1_ip_funcs = {
.set_clockgating_state = ih_v6_1_set_clockgating_state,
.set_powergating_state = ih_v6_1_set_powergating_state,
.get_clockgating_state = ih_v6_1_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs ih_v6_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
index 6852081fcff2..8f9b15c171f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -528,19 +528,19 @@ static void ih_v7_0_set_self_irq_funcs(struct amdgpu_device *adev)
adev->irq.self_irq.funcs = &ih_v7_0_self_irq_funcs;
}
-static int ih_v7_0_early_init(void *handle)
+static int ih_v7_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
ih_v7_0_set_interrupt_funcs(adev);
ih_v7_0_set_self_irq_funcs(adev);
return 0;
}
-static int ih_v7_0_sw_init(void *handle)
+static int ih_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool use_bus_addr;
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0,
@@ -583,19 +583,19 @@ static int ih_v7_0_sw_init(void *handle)
return r;
}
-static int ih_v7_0_sw_fini(void *handle)
+static int ih_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
return 0;
}
-static int ih_v7_0_hw_init(void *handle)
+static int ih_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = ih_v7_0_irq_init(adev);
if (r)
@@ -604,27 +604,21 @@ static int ih_v7_0_hw_init(void *handle)
return 0;
}
-static int ih_v7_0_hw_fini(void *handle)
+static int ih_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ih_v7_0_irq_disable(adev);
+ ih_v7_0_irq_disable(ip_block->adev);
return 0;
}
-static int ih_v7_0_suspend(void *handle)
+static int ih_v7_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return ih_v7_0_hw_fini(adev);
+ return ih_v7_0_hw_fini(ip_block);
}
-static int ih_v7_0_resume(void *handle)
+static int ih_v7_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return ih_v7_0_hw_init(adev);
+ return ih_v7_0_hw_init(ip_block);
}
static bool ih_v7_0_is_idle(void *handle)
@@ -633,13 +627,13 @@ static bool ih_v7_0_is_idle(void *handle)
return true;
}
-static int ih_v7_0_wait_for_idle(void *handle)
+static int ih_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return -ETIMEDOUT;
}
-static int ih_v7_0_soft_reset(void *handle)
+static int ih_v7_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
return 0;
@@ -670,10 +664,10 @@ static void ih_v7_0_update_clockgating_state(struct amdgpu_device *adev,
return;
}
-static int ih_v7_0_set_clockgating_state(void *handle,
+static int ih_v7_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
ih_v7_0_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
@@ -733,10 +727,10 @@ static void ih_v7_0_update_ih_mem_power_gating(struct amdgpu_device *adev,
WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
}
-static int ih_v7_0_set_powergating_state(void *handle,
+static int ih_v7_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_PG_STATE_GATE);
if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
@@ -758,7 +752,6 @@ static void ih_v7_0_get_clockgating_state(void *handle, u64 *flags)
static const struct amd_ip_funcs ih_v7_0_ip_funcs = {
.name = "ih_v7_0",
.early_init = ih_v7_0_early_init,
- .late_init = NULL,
.sw_init = ih_v7_0_sw_init,
.sw_fini = ih_v7_0_sw_fini,
.hw_init = ih_v7_0_hw_init,
@@ -771,8 +764,6 @@ static const struct amd_ip_funcs ih_v7_0_ip_funcs = {
.set_clockgating_state = ih_v7_0_set_clockgating_state,
.set_powergating_state = ih_v7_0_set_powergating_state,
.get_clockgating_state = ih_v7_0_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs ih_v7_0_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index d4f72e47ae9e..aeca5c08ea2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -50,7 +50,8 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, "amdgpu/%s_imu.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
index 1341f0292031..df898dbb746e 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c
@@ -47,7 +47,8 @@ static int imu_v12_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, "amdgpu/%s_imu.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 6e0e88076224..03b8b7cd5229 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -458,13 +458,13 @@ static int jpeg_v1_0_process_interrupt(struct amdgpu_device *adev,
/**
* jpeg_v1_0_early_init - set function pointers
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
*/
-int jpeg_v1_0_early_init(void *handle)
+int jpeg_v1_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->jpeg.num_jpeg_inst = 1;
adev->jpeg.num_jpeg_rings = 1;
@@ -478,12 +478,12 @@ int jpeg_v1_0_early_init(void *handle)
/**
* jpeg_v1_0_sw_init - sw init for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-int jpeg_v1_0_sw_init(void *handle)
+int jpeg_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int r;
@@ -509,13 +509,13 @@ int jpeg_v1_0_sw_init(void *handle)
/**
* jpeg_v1_0_sw_fini - sw fini for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* JPEG free up sw allocation
*/
-void jpeg_v1_0_sw_fini(void *handle)
+void jpeg_v1_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_ring_fini(adev->jpeg.inst->ring_dec);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
index 9654d22e0376..097328635083 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
@@ -24,9 +24,9 @@
#ifndef __JPEG_V1_0_H__
#define __JPEG_V1_0_H__
-int jpeg_v1_0_early_init(void *handle);
-int jpeg_v1_0_sw_init(void *handle);
-void jpeg_v1_0_sw_fini(void *handle);
+int jpeg_v1_0_early_init(struct amdgpu_ip_block *ip_block);
+int jpeg_v1_0_sw_init(struct amdgpu_ip_block *ip_block);
+void jpeg_v1_0_sw_fini(struct amdgpu_ip_block *ip_block);
void jpeg_v1_0_start(struct amdgpu_device *adev, int mode);
#define JPEG_V1_REG_RANGE_START 0x8000
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 41c0f8750dc1..7c9251c03815 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -35,19 +35,19 @@
static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v2_0_set_powergating_state(void *handle,
+static int jpeg_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
/**
* jpeg_v2_0_early_init - set function pointers
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
*/
-static int jpeg_v2_0_early_init(void *handle)
+static int jpeg_v2_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->jpeg.num_jpeg_inst = 1;
adev->jpeg.num_jpeg_rings = 1;
@@ -61,13 +61,13 @@ static int jpeg_v2_0_early_init(void *handle)
/**
* jpeg_v2_0_sw_init - sw init for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int jpeg_v2_0_sw_init(void *handle)
+static int jpeg_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int r;
@@ -104,14 +104,14 @@ static int jpeg_v2_0_sw_init(void *handle)
/**
* jpeg_v2_0_sw_fini - sw fini for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* JPEG suspend and free up sw allocation
*/
-static int jpeg_v2_0_sw_fini(void *handle)
+static int jpeg_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_jpeg_suspend(adev);
if (r)
@@ -125,12 +125,12 @@ static int jpeg_v2_0_sw_fini(void *handle)
/**
* jpeg_v2_0_hw_init - start and test JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int jpeg_v2_0_hw_init(void *handle)
+static int jpeg_v2_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
@@ -142,19 +142,19 @@ static int jpeg_v2_0_hw_init(void *handle)
/**
* jpeg_v2_0_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v2_0_hw_fini(void *handle)
+static int jpeg_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ cancel_delayed_work_sync(&adev->jpeg.idle_work);
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
- jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ jpeg_v2_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
return 0;
}
@@ -162,20 +162,19 @@ static int jpeg_v2_0_hw_fini(void *handle)
/**
* jpeg_v2_0_suspend - suspend JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend JPEG block
*/
-static int jpeg_v2_0_suspend(void *handle)
+static int jpeg_v2_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = jpeg_v2_0_hw_fini(adev);
+ r = jpeg_v2_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
@@ -183,20 +182,19 @@ static int jpeg_v2_0_suspend(void *handle)
/**
* jpeg_v2_0_resume - resume JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init JPEG block
*/
-static int jpeg_v2_0_resume(void *handle)
+static int jpeg_v2_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_jpeg_resume(adev);
+ r = amdgpu_jpeg_resume(ip_block->adev);
if (r)
return r;
- r = jpeg_v2_0_hw_init(adev);
+ r = jpeg_v2_0_hw_init(ip_block);
return r;
}
@@ -666,9 +664,9 @@ static bool jpeg_v2_0_is_idle(void *handle)
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
}
-static int jpeg_v2_0_wait_for_idle(void *handle)
+static int jpeg_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
ret = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
@@ -677,14 +675,14 @@ static int jpeg_v2_0_wait_for_idle(void *handle)
return ret;
}
-static int jpeg_v2_0_set_clockgating_state(void *handle,
+static int jpeg_v2_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
- if (!jpeg_v2_0_is_idle(handle))
+ if (!jpeg_v2_0_is_idle(adev))
return -EBUSY;
jpeg_v2_0_enable_clock_gating(adev);
} else {
@@ -694,10 +692,10 @@ static int jpeg_v2_0_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v2_0_set_powergating_state(void *handle,
+static int jpeg_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (state == adev->jpeg.cur_state)
@@ -744,7 +742,6 @@ static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev,
static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
.name = "jpeg_v2_0",
.early_init = jpeg_v2_0_early_init,
- .late_init = NULL,
.sw_init = jpeg_v2_0_sw_init,
.sw_fini = jpeg_v2_0_sw_fini,
.hw_init = jpeg_v2_0_hw_init,
@@ -753,14 +750,8 @@ static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
.resume = jpeg_v2_0_resume,
.is_idle = jpeg_v2_0_is_idle,
.wait_for_idle = jpeg_v2_0_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = jpeg_v2_0_set_clockgating_state,
.set_powergating_state = jpeg_v2_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index eedb9a829d95..11f6af2646e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -38,7 +38,7 @@
static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v2_5_set_powergating_state(void *handle,
+static int jpeg_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev);
@@ -50,13 +50,13 @@ static int amdgpu_ih_clientid_jpeg[] = {
/**
* jpeg_v2_5_early_init - set function pointers
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
*/
-static int jpeg_v2_5_early_init(void *handle)
+static int jpeg_v2_5_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 harvest;
int i;
@@ -81,15 +81,15 @@ static int jpeg_v2_5_early_init(void *handle)
/**
* jpeg_v2_5_sw_init - sw init for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int jpeg_v2_5_sw_init(void *handle)
+static int jpeg_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
@@ -153,14 +153,14 @@ static int jpeg_v2_5_sw_init(void *handle)
/**
* jpeg_v2_5_sw_fini - sw fini for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* JPEG suspend and free up sw allocation
*/
-static int jpeg_v2_5_sw_fini(void *handle)
+static int jpeg_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_jpeg_suspend(adev);
if (r)
@@ -174,12 +174,12 @@ static int jpeg_v2_5_sw_fini(void *handle)
/**
* jpeg_v2_5_hw_init - start and test JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int jpeg_v2_5_hw_init(void *handle)
+static int jpeg_v2_5_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r;
@@ -202,16 +202,16 @@ static int jpeg_v2_5_hw_init(void *handle)
/**
* jpeg_v2_5_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v2_5_hw_fini(void *handle)
+static int jpeg_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ cancel_delayed_work_sync(&adev->jpeg.idle_work);
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
@@ -219,7 +219,7 @@ static int jpeg_v2_5_hw_fini(void *handle)
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
- jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ jpeg_v2_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
amdgpu_irq_put(adev, &adev->jpeg.inst[i].ras_poison_irq, 0);
@@ -231,20 +231,19 @@ static int jpeg_v2_5_hw_fini(void *handle)
/**
* jpeg_v2_5_suspend - suspend JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend JPEG block
*/
-static int jpeg_v2_5_suspend(void *handle)
+static int jpeg_v2_5_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = jpeg_v2_5_hw_fini(adev);
+ r = jpeg_v2_5_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
@@ -252,20 +251,19 @@ static int jpeg_v2_5_suspend(void *handle)
/**
* jpeg_v2_5_resume - resume JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init JPEG block
*/
-static int jpeg_v2_5_resume(void *handle)
+static int jpeg_v2_5_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_jpeg_resume(adev);
+ r = amdgpu_jpeg_resume(ip_block->adev);
if (r)
return r;
- r = jpeg_v2_5_hw_init(adev);
+ r = jpeg_v2_5_hw_init(ip_block);
return r;
}
@@ -501,9 +499,9 @@ static bool jpeg_v2_5_is_idle(void *handle)
return ret;
}
-static int jpeg_v2_5_wait_for_idle(void *handle)
+static int jpeg_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
@@ -520,10 +518,10 @@ static int jpeg_v2_5_wait_for_idle(void *handle)
return 0;
}
-static int jpeg_v2_5_set_clockgating_state(void *handle,
+static int jpeg_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
int i;
@@ -532,7 +530,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle,
continue;
if (enable) {
- if (!jpeg_v2_5_is_idle(handle))
+ if (!jpeg_v2_5_is_idle(adev))
return -EBUSY;
jpeg_v2_5_enable_clock_gating(adev, i);
} else {
@@ -543,10 +541,10 @@ static int jpeg_v2_5_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v2_5_set_powergating_state(void *handle,
+static int jpeg_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (state == adev->jpeg.cur_state)
@@ -615,7 +613,6 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
.name = "jpeg_v2_5",
.early_init = jpeg_v2_5_early_init,
- .late_init = NULL,
.sw_init = jpeg_v2_5_sw_init,
.sw_fini = jpeg_v2_5_sw_fini,
.hw_init = jpeg_v2_5_hw_init,
@@ -624,20 +621,13 @@ static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
.resume = jpeg_v2_5_resume,
.is_idle = jpeg_v2_5_is_idle,
.wait_for_idle = jpeg_v2_5_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = jpeg_v2_5_set_clockgating_state,
.set_powergating_state = jpeg_v2_5_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = {
.name = "jpeg_v2_6",
.early_init = jpeg_v2_5_early_init,
- .late_init = NULL,
.sw_init = jpeg_v2_5_sw_init,
.sw_fini = jpeg_v2_5_sw_fini,
.hw_init = jpeg_v2_5_hw_init,
@@ -646,14 +636,8 @@ static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = {
.resume = jpeg_v2_5_resume,
.is_idle = jpeg_v2_5_is_idle,
.wait_for_idle = jpeg_v2_5_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = jpeg_v2_5_set_clockgating_state,
.set_powergating_state = jpeg_v2_5_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index b1e7fd25afbc..4eca65ea9053 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -36,19 +36,19 @@
static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v3_0_set_powergating_state(void *handle,
+static int jpeg_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
/**
* jpeg_v3_0_early_init - set function pointers
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
*/
-static int jpeg_v3_0_early_init(void *handle)
+static int jpeg_v3_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 harvest;
@@ -75,13 +75,13 @@ static int jpeg_v3_0_early_init(void *handle)
/**
* jpeg_v3_0_sw_init - sw init for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int jpeg_v3_0_sw_init(void *handle)
+static int jpeg_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int r;
@@ -118,13 +118,13 @@ static int jpeg_v3_0_sw_init(void *handle)
/**
* jpeg_v3_0_sw_fini - sw fini for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* JPEG suspend and free up sw allocation
*/
-static int jpeg_v3_0_sw_fini(void *handle)
+static int jpeg_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_jpeg_suspend(adev);
@@ -139,12 +139,12 @@ static int jpeg_v3_0_sw_fini(void *handle)
/**
* jpeg_v3_0_hw_init - start and test JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int jpeg_v3_0_hw_init(void *handle)
+static int jpeg_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
@@ -156,19 +156,19 @@ static int jpeg_v3_0_hw_init(void *handle)
/**
* jpeg_v3_0_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v3_0_hw_fini(void *handle)
+static int jpeg_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ cancel_delayed_work_sync(&adev->jpeg.idle_work);
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
- jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ jpeg_v3_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
return 0;
}
@@ -176,20 +176,19 @@ static int jpeg_v3_0_hw_fini(void *handle)
/**
* jpeg_v3_0_suspend - suspend JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend JPEG block
*/
-static int jpeg_v3_0_suspend(void *handle)
+static int jpeg_v3_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = jpeg_v3_0_hw_fini(adev);
+ r = jpeg_v3_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
@@ -197,20 +196,19 @@ static int jpeg_v3_0_suspend(void *handle)
/**
* jpeg_v3_0_resume - resume JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init JPEG block
*/
-static int jpeg_v3_0_resume(void *handle)
+static int jpeg_v3_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_jpeg_resume(adev);
+ r = amdgpu_jpeg_resume(ip_block->adev);
if (r)
return r;
- r = jpeg_v3_0_hw_init(adev);
+ r = jpeg_v3_0_hw_init(ip_block);
return r;
}
@@ -459,23 +457,23 @@ static bool jpeg_v3_0_is_idle(void *handle)
return ret;
}
-static int jpeg_v3_0_wait_for_idle(void *handle)
+static int jpeg_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
}
-static int jpeg_v3_0_set_clockgating_state(void *handle,
+static int jpeg_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
- if (!jpeg_v3_0_is_idle(handle))
+ if (!jpeg_v3_0_is_idle(adev))
return -EBUSY;
jpeg_v3_0_enable_clock_gating(adev);
} else {
@@ -485,10 +483,10 @@ static int jpeg_v3_0_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v3_0_set_powergating_state(void *handle,
+static int jpeg_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if(state == adev->jpeg.cur_state)
@@ -535,7 +533,6 @@ static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev,
static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
.name = "jpeg_v3_0",
.early_init = jpeg_v3_0_early_init,
- .late_init = NULL,
.sw_init = jpeg_v3_0_sw_init,
.sw_fini = jpeg_v3_0_sw_fini,
.hw_init = jpeg_v3_0_hw_init,
@@ -544,14 +541,8 @@ static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
.resume = jpeg_v3_0_resume,
.is_idle = jpeg_v3_0_is_idle,
.wait_for_idle = jpeg_v3_0_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = jpeg_v3_0_set_clockgating_state,
.set_powergating_state = jpeg_v3_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index 6c5c1a68a9b7..0aef1f64afd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -39,7 +39,7 @@
static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev);
static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v4_0_set_powergating_state(void *handle,
+static int jpeg_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev);
@@ -48,13 +48,13 @@ static void jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
/**
* jpeg_v4_0_early_init - set function pointers
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
*/
-static int jpeg_v4_0_early_init(void *handle)
+static int jpeg_v4_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->jpeg.num_jpeg_inst = 1;
@@ -70,13 +70,13 @@ static int jpeg_v4_0_early_init(void *handle)
/**
* jpeg_v4_0_sw_init - sw init for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int jpeg_v4_0_sw_init(void *handle)
+static int jpeg_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int r;
@@ -123,6 +123,12 @@ static int jpeg_v4_0_sw_init(void *handle)
r = amdgpu_jpeg_ras_sw_init(adev);
if (r)
return r;
+ /* TODO: Add queue reset mask when FW fully supports it */
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
return 0;
}
@@ -130,19 +136,20 @@ static int jpeg_v4_0_sw_init(void *handle)
/**
* jpeg_v4_0_sw_fini - sw fini for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* JPEG suspend and free up sw allocation
*/
-static int jpeg_v4_0_sw_fini(void *handle)
+static int jpeg_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_jpeg_suspend(adev);
if (r)
return r;
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
r = amdgpu_jpeg_sw_fini(adev);
return r;
@@ -151,12 +158,12 @@ static int jpeg_v4_0_sw_fini(void *handle)
/**
* jpeg_v4_0_hw_init - start and test JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int jpeg_v4_0_hw_init(void *handle)
+static int jpeg_v4_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
@@ -187,19 +194,19 @@ static int jpeg_v4_0_hw_init(void *handle)
/**
* jpeg_v4_0_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v4_0_hw_fini(void *handle)
+static int jpeg_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ cancel_delayed_work_sync(&adev->jpeg.idle_work);
if (!amdgpu_sriov_vf(adev)) {
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
- jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ jpeg_v4_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
@@ -210,20 +217,19 @@ static int jpeg_v4_0_hw_fini(void *handle)
/**
* jpeg_v4_0_suspend - suspend JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend JPEG block
*/
-static int jpeg_v4_0_suspend(void *handle)
+static int jpeg_v4_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = jpeg_v4_0_hw_fini(adev);
+ r = jpeg_v4_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
@@ -231,20 +237,19 @@ static int jpeg_v4_0_suspend(void *handle)
/**
* jpeg_v4_0_resume - resume JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init JPEG block
*/
-static int jpeg_v4_0_resume(void *handle)
+static int jpeg_v4_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_jpeg_resume(adev);
+ r = amdgpu_jpeg_resume(ip_block->adev);
if (r)
return r;
- r = jpeg_v4_0_hw_init(adev);
+ r = jpeg_v4_0_hw_init(ip_block);
return r;
}
@@ -621,23 +626,23 @@ static bool jpeg_v4_0_is_idle(void *handle)
return ret;
}
-static int jpeg_v4_0_wait_for_idle(void *handle)
+static int jpeg_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
}
-static int jpeg_v4_0_set_clockgating_state(void *handle,
+static int jpeg_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
- if (!jpeg_v4_0_is_idle(handle))
+ if (!jpeg_v4_0_is_idle(adev))
return -EBUSY;
jpeg_v4_0_enable_clock_gating(adev);
} else {
@@ -647,10 +652,10 @@ static int jpeg_v4_0_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v4_0_set_powergating_state(void *handle,
+static int jpeg_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (amdgpu_sriov_vf(adev)) {
@@ -702,7 +707,6 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
.name = "jpeg_v4_0",
.early_init = jpeg_v4_0_early_init,
- .late_init = NULL,
.sw_init = jpeg_v4_0_sw_init,
.sw_fini = jpeg_v4_0_sw_fini,
.hw_init = jpeg_v4_0_hw_init,
@@ -711,14 +715,8 @@ static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
.resume = jpeg_v4_0_resume,
.is_idle = jpeg_v4_0_is_idle,
.wait_for_idle = jpeg_v4_0_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = jpeg_v4_0_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 86958cb2c2ab..88f9771c1686 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -43,7 +43,7 @@ enum jpeg_engin_status {
static void jpeg_v4_0_3_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v4_0_3_set_powergating_state(void *handle,
+static int jpeg_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static void jpeg_v4_0_3_set_ras_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring);
@@ -68,15 +68,15 @@ static inline bool jpeg_v4_0_3_normalizn_reqd(struct amdgpu_device *adev)
/**
* jpeg_v4_0_3_early_init - set function pointers
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
*/
-static int jpeg_v4_0_3_early_init(void *handle)
+static int jpeg_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
- adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS;
+ adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
jpeg_v4_0_3_set_dec_ring_funcs(adev);
jpeg_v4_0_3_set_irq_funcs(adev);
@@ -88,13 +88,13 @@ static int jpeg_v4_0_3_early_init(void *handle)
/**
* jpeg_v4_0_3_sw_init - sw init for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int jpeg_v4_0_3_sw_init(void *handle)
+static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, j, r, jpeg_inst;
@@ -159,25 +159,33 @@ static int jpeg_v4_0_3_sw_init(void *handle)
}
}
+ /* TODO: Add queue reset mask when FW fully supports it */
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return 0;
}
/**
* jpeg_v4_0_3_sw_fini - sw fini for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* JPEG suspend and free up sw allocation
*/
-static int jpeg_v4_0_3_sw_fini(void *handle)
+static int jpeg_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_jpeg_suspend(adev);
if (r)
return r;
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
r = amdgpu_jpeg_sw_fini(adev);
return r;
@@ -299,12 +307,12 @@ static int jpeg_v4_0_3_start_sriov(struct amdgpu_device *adev)
/**
* jpeg_v4_0_3_hw_init - start and test JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int jpeg_v4_0_3_hw_init(void *handle)
+static int jpeg_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, j, r, jpeg_inst;
@@ -313,7 +321,7 @@ static int jpeg_v4_0_3_hw_init(void *handle)
if (r)
return r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
ring->wptr = 0;
@@ -358,20 +366,20 @@ static int jpeg_v4_0_3_hw_init(void *handle)
/**
* jpeg_v4_0_3_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v4_0_3_hw_fini(void *handle)
+static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 0;
cancel_delayed_work_sync(&adev->jpeg.idle_work);
if (!amdgpu_sriov_vf(adev)) {
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
- ret = jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ ret = jpeg_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
return ret;
@@ -380,20 +388,19 @@ static int jpeg_v4_0_3_hw_fini(void *handle)
/**
* jpeg_v4_0_3_suspend - suspend JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend JPEG block
*/
-static int jpeg_v4_0_3_suspend(void *handle)
+static int jpeg_v4_0_3_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = jpeg_v4_0_3_hw_fini(adev);
+ r = jpeg_v4_0_3_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
@@ -401,20 +408,19 @@ static int jpeg_v4_0_3_suspend(void *handle)
/**
* jpeg_v4_0_3_resume - resume JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init JPEG block
*/
-static int jpeg_v4_0_3_resume(void *handle)
+static int jpeg_v4_0_3_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_jpeg_resume(adev);
+ r = amdgpu_jpeg_resume(ip_block->adev);
if (r)
return r;
- r = jpeg_v4_0_3_hw_init(adev);
+ r = jpeg_v4_0_3_hw_init(ip_block);
return r;
}
@@ -674,11 +680,12 @@ void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */
- }
- amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x80004000);
+ amdgpu_ring_write(ring,
+ PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0,
+ 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x80004000);
+ }
}
/**
@@ -694,11 +701,12 @@ void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x62a04);
- }
- amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x00004000);
+ amdgpu_ring_write(ring,
+ PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, 0,
+ 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x00004000);
+ }
}
/**
@@ -743,14 +751,6 @@ void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x3fbc);
-
- amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
- 0, 0, PACKETJ_TYPE0));
- amdgpu_ring_write(ring, 0x1);
-
amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
amdgpu_ring_write(ring, 0);
@@ -929,9 +929,9 @@ static bool jpeg_v4_0_3_is_idle(void *handle)
return ret;
}
-static int jpeg_v4_0_3_wait_for_idle(void *handle)
+static int jpeg_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 0;
int i, j;
@@ -949,16 +949,16 @@ static int jpeg_v4_0_3_wait_for_idle(void *handle)
return ret;
}
-static int jpeg_v4_0_3_set_clockgating_state(void *handle,
+static int jpeg_v4_0_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (enable) {
- if (!jpeg_v4_0_3_is_idle(handle))
+ if (!jpeg_v4_0_3_is_idle(adev))
return -EBUSY;
jpeg_v4_0_3_enable_clock_gating(adev, i);
} else {
@@ -968,10 +968,10 @@ static int jpeg_v4_0_3_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v4_0_3_set_powergating_state(void *handle,
+static int jpeg_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (amdgpu_sriov_vf(adev)) {
@@ -1058,7 +1058,6 @@ static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev,
static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = {
.name = "jpeg_v4_0_3",
.early_init = jpeg_v4_0_3_early_init,
- .late_init = NULL,
.sw_init = jpeg_v4_0_3_sw_init,
.sw_fini = jpeg_v4_0_3_sw_fini,
.hw_init = jpeg_v4_0_3_hw_init,
@@ -1067,14 +1066,8 @@ static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = {
.resume = jpeg_v4_0_3_resume,
.is_idle = jpeg_v4_0_3_is_idle,
.wait_for_idle = jpeg_v4_0_3_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = jpeg_v4_0_3_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_3_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
@@ -1088,7 +1081,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
8 + /* jpeg_v4_0_3_dec_ring_emit_vm_flush */
- 22 + 22 + /* jpeg_v4_0_3_dec_ring_emit_fence x2 vm fence */
+ 18 + 18 + /* jpeg_v4_0_3_dec_ring_emit_fence x2 vm fence */
8 + 16,
.emit_ib_size = 22, /* jpeg_v4_0_3_dec_ring_emit_ib */
.emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
@@ -1238,9 +1231,95 @@ static const struct amdgpu_ras_block_hw_ops jpeg_v4_0_3_ras_hw_ops = {
.reset_ras_error_count = jpeg_v4_0_3_reset_ras_error_count,
};
+static int jpeg_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ struct aca_bank_info info;
+ u64 misc0;
+ int ret;
+
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
+ 1ULL);
+ break;
+ case ACA_SMU_TYPE_CE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ ACA_REG__MISC0__ERRCNT(misc0));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* reference to smu driver if header file */
+static int jpeg_v4_0_3_err_codes[] = {
+ 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-7][S|D] */
+ 24, 25, 26, 27, 28, 29, 30, 31
+};
+
+static bool jpeg_v4_0_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ u32 instlo;
+
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ if (aca_bank_check_error_codes(handle->adev, bank,
+ jpeg_v4_0_3_err_codes,
+ ARRAY_SIZE(jpeg_v4_0_3_err_codes)))
+ return false;
+
+ return true;
+}
+
+static const struct aca_bank_ops jpeg_v4_0_3_aca_bank_ops = {
+ .aca_bank_parser = jpeg_v4_0_3_aca_bank_parser,
+ .aca_bank_is_valid = jpeg_v4_0_3_aca_bank_is_valid,
+};
+
+static const struct aca_info jpeg_v4_0_3_aca_info = {
+ .hwip = ACA_HWIP_TYPE_SMU,
+ .mask = ACA_ERROR_UE_MASK,
+ .bank_ops = &jpeg_v4_0_3_aca_bank_ops,
+};
+
+static int jpeg_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
+ &jpeg_v4_0_3_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
+}
+
static struct amdgpu_jpeg_ras jpeg_v4_0_3_ras = {
.ras_block = {
.hw_ops = &jpeg_v4_0_3_ras_hw_ops,
+ .ras_late_init = jpeg_v4_0_3_ras_late_init,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 44eeed445ea9..6b3656984957 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -48,7 +48,7 @@
static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v4_0_5_set_powergating_state(void *handle,
+static int jpeg_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring);
@@ -61,13 +61,13 @@ static int amdgpu_ih_clientid_jpeg[] = {
/**
* jpeg_v4_0_5_early_init - set function pointers
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
*/
-static int jpeg_v4_0_5_early_init(void *handle)
+static int jpeg_v4_0_5_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
case IP_VERSION(4, 0, 5):
@@ -94,13 +94,13 @@ static int jpeg_v4_0_5_early_init(void *handle)
/**
* jpeg_v4_0_5_sw_init - sw init for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int jpeg_v4_0_5_sw_init(void *handle)
+static int jpeg_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int r, i;
@@ -153,25 +153,33 @@ static int jpeg_v4_0_5_sw_init(void *handle)
adev->jpeg.inst[i].external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, i, regUVD_JPEG_PITCH);
}
+ /* TODO: Add queue reset mask when FW fully supports it */
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return 0;
}
/**
* jpeg_v4_0_5_sw_fini - sw fini for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* JPEG suspend and free up sw allocation
*/
-static int jpeg_v4_0_5_sw_fini(void *handle)
+static int jpeg_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_jpeg_suspend(adev);
if (r)
return r;
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
r = amdgpu_jpeg_sw_fini(adev);
return r;
@@ -180,12 +188,12 @@ static int jpeg_v4_0_5_sw_fini(void *handle)
/**
* jpeg_v4_0_5_hw_init - start and test JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int jpeg_v4_0_5_hw_init(void *handle)
+static int jpeg_v4_0_5_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r = 0;
@@ -210,16 +218,16 @@ static int jpeg_v4_0_5_hw_init(void *handle)
/**
* jpeg_v4_0_5_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v4_0_5_hw_fini(void *handle)
+static int jpeg_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ cancel_delayed_work_sync(&adev->jpeg.idle_work);
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
@@ -228,7 +236,7 @@ static int jpeg_v4_0_5_hw_fini(void *handle)
if (!amdgpu_sriov_vf(adev)) {
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, i, regUVD_JRBC_STATUS))
- jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ jpeg_v4_0_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
}
return 0;
@@ -237,20 +245,19 @@ static int jpeg_v4_0_5_hw_fini(void *handle)
/**
* jpeg_v4_0_5_suspend - suspend JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend JPEG block
*/
-static int jpeg_v4_0_5_suspend(void *handle)
+static int jpeg_v4_0_5_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = jpeg_v4_0_5_hw_fini(adev);
+ r = jpeg_v4_0_5_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
@@ -258,20 +265,19 @@ static int jpeg_v4_0_5_suspend(void *handle)
/**
* jpeg_v4_0_5_resume - resume JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init JPEG block
*/
-static int jpeg_v4_0_5_resume(void *handle)
+static int jpeg_v4_0_5_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_jpeg_resume(adev);
+ r = amdgpu_jpeg_resume(ip_block->adev);
if (r)
return r;
- r = jpeg_v4_0_5_hw_init(adev);
+ r = jpeg_v4_0_5_hw_init(ip_block);
return r;
}
@@ -637,9 +643,9 @@ static bool jpeg_v4_0_5_is_idle(void *handle)
return ret;
}
-static int jpeg_v4_0_5_wait_for_idle(void *handle)
+static int jpeg_v4_0_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
@@ -654,10 +660,10 @@ static int jpeg_v4_0_5_wait_for_idle(void *handle)
return 0;
}
-static int jpeg_v4_0_5_set_clockgating_state(void *handle,
+static int jpeg_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
@@ -666,7 +672,7 @@ static int jpeg_v4_0_5_set_clockgating_state(void *handle,
continue;
if (enable) {
- if (!jpeg_v4_0_5_is_idle(handle))
+ if (!jpeg_v4_0_5_is_idle(adev))
return -EBUSY;
jpeg_v4_0_5_enable_clock_gating(adev, i);
@@ -678,10 +684,10 @@ static int jpeg_v4_0_5_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v4_0_5_set_powergating_state(void *handle,
+static int jpeg_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (amdgpu_sriov_vf(adev)) {
@@ -743,7 +749,6 @@ static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev,
static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = {
.name = "jpeg_v4_0_5",
.early_init = jpeg_v4_0_5_early_init,
- .late_init = NULL,
.sw_init = jpeg_v4_0_5_sw_init,
.sw_fini = jpeg_v4_0_5_sw_fini,
.hw_init = jpeg_v4_0_5_hw_init,
@@ -752,14 +757,8 @@ static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = {
.resume = jpeg_v4_0_5_resume,
.is_idle = jpeg_v4_0_5_is_idle,
.wait_for_idle = jpeg_v4_0_5_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = jpeg_v4_0_5_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_5_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index d662aa841f97..d5cf0f2799d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -31,24 +31,24 @@
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
-#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
#include "jpeg_v5_0_0.h"
static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
-static int jpeg_v5_0_0_set_powergating_state(void *handle,
+static int jpeg_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
/**
* jpeg_v5_0_0_early_init - set function pointers
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
*/
-static int jpeg_v5_0_0_early_init(void *handle)
+static int jpeg_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->jpeg.num_jpeg_inst = 1;
adev->jpeg.num_jpeg_rings = 1;
@@ -62,19 +62,19 @@ static int jpeg_v5_0_0_early_init(void *handle)
/**
* jpeg_v5_0_0_sw_init - sw init for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int jpeg_v5_0_0_sw_init(void *handle)
+static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int r;
/* JPEG TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
- VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
+ VCN_5_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
if (r)
return r;
@@ -100,25 +100,32 @@ static int jpeg_v5_0_0_sw_init(void *handle)
adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH);
+ /* TODO: Add queue reset mask when FW fully supports it */
+ adev->jpeg.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
+ r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
return 0;
}
/**
* jpeg_v5_0_0_sw_fini - sw fini for JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* JPEG suspend and free up sw allocation
*/
-static int jpeg_v5_0_0_sw_fini(void *handle)
+static int jpeg_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_jpeg_suspend(adev);
if (r)
return r;
+ amdgpu_jpeg_sysfs_reset_mask_fini(adev);
r = amdgpu_jpeg_sw_fini(adev);
return r;
@@ -127,12 +134,12 @@ static int jpeg_v5_0_0_sw_fini(void *handle)
/**
* jpeg_v5_0_0_hw_init - start and test JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int jpeg_v5_0_0_hw_init(void *handle)
+static int jpeg_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
@@ -153,19 +160,19 @@ static int jpeg_v5_0_0_hw_init(void *handle)
/**
* jpeg_v5_0_0_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the JPEG block, mark ring as not ready any more
*/
-static int jpeg_v5_0_0_hw_fini(void *handle)
+static int jpeg_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
+ cancel_delayed_work_sync(&adev->jpeg.idle_work);
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
- jpeg_v5_0_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ jpeg_v5_0_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
return 0;
}
@@ -173,20 +180,19 @@ static int jpeg_v5_0_0_hw_fini(void *handle)
/**
* jpeg_v5_0_0_suspend - suspend JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend JPEG block
*/
-static int jpeg_v5_0_0_suspend(void *handle)
+static int jpeg_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = jpeg_v5_0_0_hw_fini(adev);
+ r = jpeg_v5_0_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_jpeg_suspend(adev);
+ r = amdgpu_jpeg_suspend(ip_block->adev);
return r;
}
@@ -194,20 +200,19 @@ static int jpeg_v5_0_0_suspend(void *handle)
/**
* jpeg_v5_0_0_resume - resume JPEG block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init JPEG block
*/
-static int jpeg_v5_0_0_resume(void *handle)
+static int jpeg_v5_0_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_jpeg_resume(adev);
+ r = amdgpu_jpeg_resume(ip_block->adev);
if (r)
return r;
- r = jpeg_v5_0_0_hw_init(adev);
+ r = jpeg_v5_0_0_hw_init(ip_block);
return r;
}
@@ -546,23 +551,23 @@ static bool jpeg_v5_0_0_is_idle(void *handle)
return ret;
}
-static int jpeg_v5_0_0_wait_for_idle(void *handle)
+static int jpeg_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
}
-static int jpeg_v5_0_0_set_clockgating_state(void *handle,
+static int jpeg_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
if (enable) {
- if (!jpeg_v5_0_0_is_idle(handle))
+ if (!jpeg_v5_0_0_is_idle(adev))
return -EBUSY;
jpeg_v5_0_0_enable_clock_gating(adev);
} else {
@@ -572,10 +577,10 @@ static int jpeg_v5_0_0_set_clockgating_state(void *handle,
return 0;
}
-static int jpeg_v5_0_0_set_powergating_state(void *handle,
+static int jpeg_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (state == adev->jpeg.cur_state)
@@ -607,7 +612,7 @@ static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev,
DRM_DEBUG("IH: JPEG TRAP\n");
switch (entry->src_id) {
- case VCN_4_0__SRCID__JPEG_DECODE:
+ case VCN_5_0__SRCID__JPEG_DECODE:
amdgpu_fence_process(adev->jpeg.inst->ring_dec);
break;
default:
@@ -622,7 +627,6 @@ static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev,
static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = {
.name = "jpeg_v5_0_0",
.early_init = jpeg_v5_0_0_early_init,
- .late_init = NULL,
.sw_init = jpeg_v5_0_0_sw_init,
.sw_fini = jpeg_v5_0_0_sw_fini,
.hw_init = jpeg_v5_0_0_hw_init,
@@ -631,14 +635,8 @@ static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = {
.resume = jpeg_v5_0_0_resume,
.is_idle = jpeg_v5_0_0_is_idle,
.wait_for_idle = jpeg_v5_0_0_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = jpeg_v5_0_0_set_clockgating_state,
.set_powergating_state = jpeg_v5_0_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
new file mode 100644
index 000000000000..40d4c32a8c2a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -0,0 +1,708 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2014-2024 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "amdgpu_pm.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "jpeg_v4_0_3.h"
+#include "jpeg_v5_0_1.h"
+
+#include "vcn/vcn_5_0_0_offset.h"
+#include "vcn/vcn_5_0_0_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
+
+static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev);
+static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
+static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state);
+static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring);
+
+static int amdgpu_ih_srcid_jpeg[] = {
+ VCN_5_0__SRCID__JPEG_DECODE,
+ VCN_5_0__SRCID__JPEG1_DECODE,
+ VCN_5_0__SRCID__JPEG2_DECODE,
+ VCN_5_0__SRCID__JPEG3_DECODE,
+ VCN_5_0__SRCID__JPEG4_DECODE,
+ VCN_5_0__SRCID__JPEG5_DECODE,
+ VCN_5_0__SRCID__JPEG6_DECODE,
+ VCN_5_0__SRCID__JPEG7_DECODE,
+ VCN_5_0__SRCID__JPEG8_DECODE,
+ VCN_5_0__SRCID__JPEG9_DECODE,
+};
+
+static int jpeg_v5_0_1_core_reg_offset(u32 pipe)
+{
+ if (pipe <= AMDGPU_MAX_JPEG_RINGS_4_0_3)
+ return ((0x40 * pipe) - 0xc80);
+ else
+ return ((0x40 * pipe) - 0x440);
+}
+
+/**
+ * jpeg_v5_0_1_early_init - set function pointers
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Set ring and irq function pointers
+ */
+static int jpeg_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ if (!adev->jpeg.num_jpeg_inst || adev->jpeg.num_jpeg_inst > AMDGPU_MAX_JPEG_INSTANCES)
+ return -ENOENT;
+
+ adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS;
+ jpeg_v5_0_1_set_dec_ring_funcs(adev);
+ jpeg_v5_0_1_set_irq_funcs(adev);
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_1_sw_init - sw init for JPEG block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Load firmware and sw initialization
+ */
+static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ring *ring;
+ int i, j, r, jpeg_inst;
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ /* JPEG TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ amdgpu_ih_srcid_jpeg[j], &adev->jpeg.inst->irq);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_jpeg_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ jpeg_inst = GET_INST(JPEG, i);
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ ring->use_doorbell = false;
+ ring->vm_hub = AMDGPU_MMHUB0(adev->jpeg.inst[i].aid_id);
+ if (!amdgpu_sriov_vf(adev)) {
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 1 + j + 11 * jpeg_inst;
+ } else {
+ if (j < 4)
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 4 + j + 32 * jpeg_inst;
+ else
+ ring->doorbell_index =
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 8 + j + 32 * jpeg_inst;
+ }
+ sprintf(ring->name, "jpeg_dec_%d.%d", adev->jpeg.inst[i].aid_id, j);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+ if (r)
+ return r;
+
+ adev->jpeg.internal.jpeg_pitch[j] =
+ regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET;
+ adev->jpeg.inst[i].external.jpeg_pitch[j] =
+ SOC15_REG_OFFSET1(JPEG, jpeg_inst, regUVD_JRBC_SCRATCH0,
+ (j ? jpeg_v5_0_1_core_reg_offset(j) : 0));
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_1_sw_fini - sw fini for JPEG block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * JPEG suspend and free up sw allocation
+ */
+static int jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_jpeg_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_sw_fini(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v5_0_1_hw_init - start and test JPEG block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ */
+static int jpeg_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ring *ring;
+ int i, j, r, jpeg_inst;
+
+ if (amdgpu_sriov_vf(adev)) {
+ /* jpeg_v5_0_1_start_sriov(adev); */
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ jpeg_v5_0_1_dec_ring_set_wptr(ring);
+ ring->sched.ready = true;
+ }
+ }
+ return 0;
+ }
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ jpeg_inst = GET_INST(JPEG, i);
+ ring = adev->jpeg.inst[i].ring_dec;
+ if (ring->use_doorbell)
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 11 * jpeg_inst,
+ adev->jpeg.inst[i].aid_id);
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ if (ring->use_doorbell)
+ WREG32_SOC15_OFFSET(VCN, GET_INST(VCN, i), regVCN_JPEG_DB_CTRL,
+ (ring->pipe ? (ring->pipe - 0x15) : 0),
+ ring->doorbell_index <<
+ VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
+ VCN_JPEG_DB_CTRL__EN_MASK);
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_1_hw_fini - stop the hardware block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Stop the JPEG block, mark ring as not ready any more
+ */
+static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int ret = 0;
+
+ cancel_delayed_work_sync(&adev->jpeg.idle_work);
+
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
+ ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
+
+ return ret;
+}
+
+/**
+ * jpeg_v5_0_1_suspend - suspend JPEG block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * HW fini and suspend JPEG block
+ */
+static int jpeg_v5_0_1_suspend(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = jpeg_v5_0_1_hw_fini(ip_block);
+ if (r)
+ return r;
+
+ r = amdgpu_jpeg_suspend(adev);
+
+ return r;
+}
+
+/**
+ * jpeg_v5_0_1_resume - resume JPEG block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Resume firmware and hw init JPEG block
+ */
+static int jpeg_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_jpeg_resume(adev);
+ if (r)
+ return r;
+
+ r = jpeg_v5_0_1_hw_init(ip_block);
+
+ return r;
+}
+
+static int jpeg_v5_0_1_disable_antihang(struct amdgpu_device *adev, int inst_idx)
+{
+ int jpeg_inst;
+
+ jpeg_inst = GET_INST(JPEG, inst_idx);
+ /* disable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+ /* keep the JPEG in static PG mode */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS), 0,
+ ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
+
+ return 0;
+}
+
+static int jpeg_v5_0_1_enable_antihang(struct amdgpu_device *adev, int inst_idx)
+{
+ int jpeg_inst;
+
+ jpeg_inst = GET_INST(JPEG, inst_idx);
+ /* enable anti hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JPEG_POWER_STATUS),
+ UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
+ ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_1_start - start JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the JPEG block
+ */
+static int jpeg_v5_0_1_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int i, j, jpeg_inst, r;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ jpeg_inst = GET_INST(JPEG, i);
+
+ /* disable antihang */
+ r = jpeg_v5_0_1_disable_antihang(adev, i);
+ if (r)
+ return r;
+
+ /* MJPEG global tiling registers */
+ WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* enable JMI channel */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL), 0,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0);
+ u32 reg, data, mask;
+
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+
+ /* enable System Interrupt for JRBC */
+ reg = SOC15_REG_OFFSET(JPEG, jpeg_inst, regJPEG_SYS_INT_EN);
+ if (j < AMDGPU_MAX_JPEG_RINGS_4_0_3) {
+ data = JPEG_SYS_INT_EN__DJRBC0_MASK << j;
+ mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << j);
+ WREG32_P(reg, data, mask);
+ } else {
+ data = JPEG_SYS_INT_EN__DJRBC0_MASK << (j+12);
+ mask = ~(JPEG_SYS_INT_EN__DJRBC0_MASK << (j+12));
+ WREG32_P(reg, data, mask);
+ }
+
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_LMI_JRBC_RB_VMID,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_CNTL,
+ reg_offset,
+ (0x00000001L | 0x00000002L));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
+ reg_offset, lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
+ reg_offset, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_RPTR,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_WPTR,
+ reg_offset, 0);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_CNTL,
+ reg_offset, 0x00000002L);
+ WREG32_SOC15_OFFSET(JPEG, jpeg_inst,
+ regUVD_JRBC_RB_SIZE,
+ reg_offset, ring->ring_size / 4);
+ ring->wptr = RREG32_SOC15_OFFSET(JPEG, jpeg_inst, regUVD_JRBC_RB_WPTR,
+ reg_offset);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_1_stop - stop JPEG block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * stop the JPEG block
+ */
+static int jpeg_v5_0_1_stop(struct amdgpu_device *adev)
+{
+ int i, jpeg_inst, r;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ jpeg_inst = GET_INST(JPEG, i);
+ /* reset JMI */
+ WREG32_P(SOC15_REG_OFFSET(JPEG, jpeg_inst, regUVD_JMI_CNTL),
+ UVD_JMI_CNTL__SOFT_RESET_MASK,
+ ~UVD_JMI_CNTL__SOFT_RESET_MASK);
+
+ /* enable antihang */
+ r = jpeg_v5_0_1_enable_antihang(adev, i);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * jpeg_v5_0_1_dec_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t jpeg_v5_0_1_dec_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC_RB_RPTR,
+ ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0);
+}
+
+/**
+ * jpeg_v5_0_1_dec_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t jpeg_v5_0_1_dec_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell)
+ return adev->wb.wb[ring->wptr_offs];
+
+ return RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me), regUVD_JRBC_RB_WPTR,
+ ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0);
+}
+
+/**
+ * jpeg_v5_0_1_dec_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell) {
+ adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, ring->me),
+ regUVD_JRBC_RB_WPTR,
+ (ring->pipe ? jpeg_v5_0_1_core_reg_offset(ring->pipe) : 0),
+ lower_32_bits(ring->wptr));
+ }
+}
+
+static bool jpeg_v5_0_1_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool ret = false;
+ int i, j;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0);
+
+ ret &= ((RREG32_SOC15_OFFSET(JPEG, GET_INST(JPEG, i),
+ regUVD_JRBC_STATUS, reg_offset) &
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
+ }
+ }
+
+ return ret;
+}
+
+static int jpeg_v5_0_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int ret = 0;
+ int i, j;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ int reg_offset = (j ? jpeg_v5_0_1_core_reg_offset(j) : 0);
+
+ ret &= SOC15_WAIT_ON_RREG_OFFSET(JPEG, GET_INST(JPEG, i),
+ regUVD_JRBC_STATUS, reg_offset,
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
+ UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
+ }
+ }
+ return ret;
+}
+
+static int jpeg_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+
+ int i;
+
+ if (!enable)
+ return 0;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (!jpeg_v5_0_1_is_idle(adev))
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int ret;
+
+ if (state == adev->jpeg.cur_state)
+ return 0;
+
+ if (state == AMD_PG_STATE_GATE)
+ ret = jpeg_v5_0_1_stop(adev);
+ else
+ ret = jpeg_v5_0_1_start(adev);
+
+ if (!ret)
+ adev->jpeg.cur_state = state;
+
+ return ret;
+}
+
+static int jpeg_v5_0_1_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ return 0;
+}
+
+static int jpeg_v5_0_1_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ u32 i, inst;
+
+ i = node_id_to_phys_map[entry->node_id];
+ DRM_DEV_DEBUG(adev->dev, "IH: JPEG TRAP\n");
+
+ for (inst = 0; inst < adev->jpeg.num_jpeg_inst; ++inst)
+ if (adev->jpeg.inst[inst].aid_id == i)
+ break;
+
+ if (inst >= adev->jpeg.num_jpeg_inst) {
+ dev_WARN_ONCE(adev->dev, 1,
+ "Interrupt received for unknown JPEG instance %d",
+ entry->node_id);
+ return 0;
+ }
+
+ switch (entry->src_id) {
+ case VCN_5_0__SRCID__JPEG_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[0]);
+ break;
+ case VCN_5_0__SRCID__JPEG1_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[1]);
+ break;
+ case VCN_5_0__SRCID__JPEG2_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[2]);
+ break;
+ case VCN_5_0__SRCID__JPEG3_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[3]);
+ break;
+ case VCN_5_0__SRCID__JPEG4_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[4]);
+ break;
+ case VCN_5_0__SRCID__JPEG5_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[5]);
+ break;
+ case VCN_5_0__SRCID__JPEG6_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[6]);
+ break;
+ case VCN_5_0__SRCID__JPEG7_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[7]);
+ break;
+ case VCN_5_0__SRCID__JPEG8_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[8]);
+ break;
+ case VCN_5_0__SRCID__JPEG9_DECODE:
+ amdgpu_fence_process(&adev->jpeg.inst[inst].ring_dec[9]);
+ break;
+ default:
+ DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = {
+ .name = "jpeg_v5_0_1",
+ .early_init = jpeg_v5_0_1_early_init,
+ .late_init = NULL,
+ .sw_init = jpeg_v5_0_1_sw_init,
+ .sw_fini = jpeg_v5_0_1_sw_fini,
+ .hw_init = jpeg_v5_0_1_hw_init,
+ .hw_fini = jpeg_v5_0_1_hw_fini,
+ .suspend = jpeg_v5_0_1_suspend,
+ .resume = jpeg_v5_0_1_resume,
+ .is_idle = jpeg_v5_0_1_is_idle,
+ .wait_for_idle = jpeg_v5_0_1_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = jpeg_v5_0_1_set_clockgating_state,
+ .set_powergating_state = jpeg_v5_0_1_set_powergating_state,
+ .dump_ip_state = NULL,
+ .print_ip_state = NULL,
+};
+
+static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_JPEG,
+ .align_mask = 0xf,
+ .get_rptr = jpeg_v5_0_1_dec_ring_get_rptr,
+ .get_wptr = jpeg_v5_0_1_dec_ring_get_wptr,
+ .set_wptr = jpeg_v5_0_1_dec_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* jpeg_v5_0_1_dec_ring_emit_vm_flush */
+ 22 + 22 + /* jpeg_v5_0_1_dec_ring_emit_fence x2 vm fence */
+ 8 + 16,
+ .emit_ib_size = 22, /* jpeg_v5_0_1_dec_ring_emit_ib */
+ .emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
+ .emit_fence = jpeg_v4_0_3_dec_ring_emit_fence,
+ .emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush,
+ .test_ring = amdgpu_jpeg_dec_ring_test_ring,
+ .test_ib = amdgpu_jpeg_dec_ring_test_ib,
+ .insert_nop = jpeg_v4_0_3_dec_ring_nop,
+ .insert_start = jpeg_v4_0_3_dec_ring_insert_start,
+ .insert_end = jpeg_v4_0_3_dec_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_jpeg_ring_begin_use,
+ .end_use = amdgpu_jpeg_ring_end_use,
+ .emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
+ .emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+static void jpeg_v5_0_1_set_dec_ring_funcs(struct amdgpu_device *adev)
+{
+ int i, j, jpeg_inst;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ adev->jpeg.inst[i].ring_dec[j].funcs = &jpeg_v5_0_1_dec_ring_vm_funcs;
+ adev->jpeg.inst[i].ring_dec[j].me = i;
+ adev->jpeg.inst[i].ring_dec[j].pipe = j;
+ }
+ jpeg_inst = GET_INST(JPEG, i);
+ adev->jpeg.inst[i].aid_id =
+ jpeg_inst / adev->jpeg.num_inst_per_aid;
+ }
+}
+
+static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_irq_funcs = {
+ .set = jpeg_v5_0_1_set_interrupt_state,
+ .process = jpeg_v5_0_1_process_interrupt,
+};
+
+static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i)
+ adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings;
+
+ adev->jpeg.inst->irq.funcs = &jpeg_v5_0_1_irq_funcs;
+}
+
+const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_JPEG,
+ .major = 5,
+ .minor = 0,
+ .rev = 1,
+ .funcs = &jpeg_v5_0_1_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
new file mode 100644
index 000000000000..8ce146c00bb6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __JPEG_V5_0_1_H__
+#define __JPEG_V5_0_1_H__
+
+extern const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block;
+
+#endif /* __JPEG_V5_0_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 231a3d490ea8..65f389eb65e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -55,8 +55,8 @@ MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes_2.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_2_mes1.bin");
-static int mes_v11_0_hw_init(void *handle);
-static int mes_v11_0_hw_fini(void *handle);
+static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block);
+static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block);
static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
@@ -366,7 +366,7 @@ static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ
uint32_t queue_id, uint32_t vmid)
{
struct amdgpu_device *adev = mes->adev;
- uint32_t value;
+ uint32_t value, reg;
int i, r = 0;
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
@@ -424,6 +424,31 @@ static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_typ
}
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
+ } else if (queue_type == AMDGPU_RING_TYPE_SDMA) {
+ dev_info(adev->dev, "reset sdma queue (%d:%d:%d)\n",
+ me_id, pipe_id, queue_id);
+ switch (me_id) {
+ case 1:
+ reg = SOC15_REG_OFFSET(GC, 0, regSDMA1_QUEUE_RESET_REQ);
+ break;
+ case 0:
+ default:
+ reg = SOC15_REG_OFFSET(GC, 0, regSDMA0_QUEUE_RESET_REQ);
+ break;
+ }
+
+ value = 1 << queue_id;
+ WREG32(reg, value);
+ /* wait for queue reset done */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32(reg) & value))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on sdma queue reset done\n");
+ r = -ETIMEDOUT;
+ }
}
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
@@ -619,6 +644,18 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl));
misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en;
break;
+ case MES_MISC_OP_CHANGE_CONFIG:
+ if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) {
+ dev_err(mes->adev->dev, "MES FW versoin must be larger than 0x63 to support limit single process feature.\n");
+ return -EINVAL;
+ }
+ misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG;
+ misc_pkt.change_config.opcode =
+ MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS;
+ misc_pkt.change_config.option.bits.limit_single_process =
+ input->change_config.option.limit_single_process;
+ break;
+
default:
DRM_ERROR("unsupported misc op (%d) \n", input->op);
return -EINVAL;
@@ -683,6 +720,9 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes->event_log_gpu_addr;
}
+ if (enforce_isolation)
+ mes_set_hw_res_pkt.limit_single_process = 1;
+
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
offsetof(union MESAPI_SET_HW_RESOURCES, api_status));
@@ -883,6 +923,16 @@ static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable)
uint32_t pipe, data = 0;
if (enable) {
+ if (amdgpu_mes_log_enable) {
+ WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO,
+ lower_32_bits(adev->mes.event_log_gpu_addr + AMDGPU_MES_LOG_BUFFER_SIZE));
+ WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI,
+ upper_32_bits(adev->mes.event_log_gpu_addr + AMDGPU_MES_LOG_BUFFER_SIZE));
+ dev_info(adev->dev, "Setup CP MES MSCRATCH address : 0x%x. 0x%x\n",
+ RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI),
+ RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO));
+ }
+
data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
data = REG_SET_FIELD(data, CP_MES_CNTL,
@@ -1336,16 +1386,16 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
return 0;
}
-static int mes_v11_0_sw_init(void *handle)
+static int mes_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
adev->mes.funcs = &mes_v11_0_funcs;
adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init;
adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini;
- adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
+ adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE;
r = amdgpu_mes_init(adev);
if (r)
@@ -1377,9 +1427,9 @@ static int mes_v11_0_sw_init(void *handle)
return 0;
}
-static int mes_v11_0_sw_fini(void *handle)
+static int mes_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int pipe;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
@@ -1455,9 +1505,7 @@ static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
}
static void mes_v11_0_kiq_clear(struct amdgpu_device *adev)
@@ -1473,6 +1521,7 @@ static void mes_v11_0_kiq_clear(struct amdgpu_device *adev)
static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
{
int r = 0;
+ struct amdgpu_ip_block *ip_block;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
@@ -1496,6 +1545,12 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES);
+ if (unlikely(!ip_block)) {
+ dev_err(adev->dev, "Failed to get MES handle\n");
+ return -EINVAL;
+ }
+
r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
if (r)
goto failure;
@@ -1506,7 +1561,7 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
adev->mes.enable_legacy_queue_map = false;
if (adev->mes.enable_legacy_queue_map) {
- r = mes_v11_0_hw_init(adev);
+ r = mes_v11_0_hw_init(ip_block);
if (r)
goto failure;
}
@@ -1514,7 +1569,7 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
return r;
failure:
- mes_v11_0_hw_fini(adev);
+ mes_v11_0_hw_fini(ip_block);
return r;
}
@@ -1535,10 +1590,10 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
return 0;
}
-static int mes_v11_0_hw_init(void *handle)
+static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->mes.ring[0].sched.ready)
goto out;
@@ -1590,13 +1645,13 @@ out:
return 0;
failure:
- mes_v11_0_hw_fini(adev);
+ mes_v11_0_hw_fini(ip_block);
return r;
}
-static int mes_v11_0_hw_fini(void *handle)
+static int mes_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_is_mes_info_enable(adev)) {
amdgpu_bo_free_kernel(&adev->mes.resource_1, &adev->mes.resource_1_gpu_addr,
&adev->mes.resource_1_addr);
@@ -1604,33 +1659,31 @@ static int mes_v11_0_hw_fini(void *handle)
return 0;
}
-static int mes_v11_0_suspend(void *handle)
+static int mes_v11_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_mes_suspend(adev);
+ r = amdgpu_mes_suspend(ip_block->adev);
if (r)
return r;
- return mes_v11_0_hw_fini(adev);
+ return mes_v11_0_hw_fini(ip_block);
}
-static int mes_v11_0_resume(void *handle)
+static int mes_v11_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = mes_v11_0_hw_init(adev);
+ r = mes_v11_0_hw_init(ip_block);
if (r)
return r;
- return amdgpu_mes_resume(adev);
+ return amdgpu_mes_resume(ip_block->adev);
}
-static int mes_v11_0_early_init(void *handle)
+static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
@@ -1644,9 +1697,9 @@ static int mes_v11_0_early_init(void *handle)
return 0;
}
-static int mes_v11_0_late_init(void *handle)
+static int mes_v11_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* it's only intended for use in mes_self_test case, not for s0ix and reset */
if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
@@ -1666,8 +1719,6 @@ static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
.hw_fini = mes_v11_0_hw_fini,
.suspend = mes_v11_0_suspend,
.resume = mes_v11_0_resume,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version mes_v11_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 8d27421689c9..901e924e69ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -24,6 +24,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include "amdgpu.h"
+#include "gfx_v12_0.h"
#include "soc15_common.h"
#include "soc21.h"
#include "gc/gc_12_0_0_offset.h"
@@ -39,8 +40,8 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_1_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_1_uni_mes.bin");
-static int mes_v12_0_hw_init(void *handle);
-static int mes_v12_0_hw_fini(void *handle);
+static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block);
+static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block);
static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev);
static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev);
@@ -350,6 +351,132 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
}
+int gfx_v12_0_request_gfx_index_mutex(struct amdgpu_device *adev,
+ bool req)
+{
+ u32 i, tmp, val;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ /* Request with MeId=2, PipeId=0 */
+ tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req);
+ tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4);
+ WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp);
+
+ val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX);
+ if (req) {
+ if (val == tmp)
+ break;
+ } else {
+ tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX,
+ REQUEST, 1);
+
+ /* unlocked or locked by firmware */
+ if (val != tmp)
+ break;
+ }
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mes_v12_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_type,
+ uint32_t me_id, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t vmid)
+{
+ struct amdgpu_device *adev = mes->adev;
+ uint32_t value, reg;
+ int i, r = 0;
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
+ if (queue_type == AMDGPU_RING_TYPE_GFX) {
+ dev_info(adev->dev, "reset gfx queue (%d:%d:%d: vmid:%d)\n",
+ me_id, pipe_id, queue_id, vmid);
+
+ mutex_lock(&adev->gfx.reset_sem_mutex);
+ gfx_v12_0_request_gfx_index_mutex(adev, true);
+ /* all se allow writes */
+ WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX,
+ (uint32_t)(0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
+ value = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ if (pipe_id == 0)
+ value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE0_QUEUES, 1 << queue_id);
+ else
+ value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE1_QUEUES, 1 << queue_id);
+ WREG32_SOC15(GC, 0, regCP_VMID_RESET, value);
+ gfx_v12_0_request_gfx_index_mutex(adev, false);
+ mutex_unlock(&adev->gfx.reset_sem_mutex);
+
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on gfx hqd deactivate\n");
+ r = -ETIMEDOUT;
+ }
+
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ } else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+ dev_info(adev->dev, "reset compute queue (%d:%d:%d)\n",
+ me_id, pipe_id, queue_id);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
+
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on hqd deactivate\n");
+ r = -ETIMEDOUT;
+ }
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ } else if (queue_type == AMDGPU_RING_TYPE_SDMA) {
+ dev_info(adev->dev, "reset sdma queue (%d:%d:%d)\n",
+ me_id, pipe_id, queue_id);
+ switch (me_id) {
+ case 1:
+ reg = SOC15_REG_OFFSET(GC, 0, regSDMA1_QUEUE_RESET_REQ);
+ break;
+ case 0:
+ default:
+ reg = SOC15_REG_OFFSET(GC, 0, regSDMA0_QUEUE_RESET_REQ);
+ break;
+ }
+
+ value = 1 << queue_id;
+ WREG32(reg, value);
+ /* wait for queue reset done */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32(reg) & value))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on sdma queue reset done\n");
+ r = -ETIMEDOUT;
+ }
+ }
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
+
static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
struct mes_reset_queue_input *input)
{
@@ -531,6 +658,14 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes,
sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl));
misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en;
break;
+ case MES_MISC_OP_CHANGE_CONFIG:
+ misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG;
+ misc_pkt.change_config.opcode =
+ MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS;
+ misc_pkt.change_config.option.bits.limit_single_process =
+ input->change_config.option.limit_single_process;
+ break;
+
default:
DRM_ERROR("unsupported misc op (%d) \n", input->op);
return -EINVAL;
@@ -550,7 +685,7 @@ static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes, int pipe)
mes_set_hw_res_1_pkt.header.type = MES_API_TYPE_SCHEDULER;
mes_set_hw_res_1_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1;
mes_set_hw_res_1_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
- mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 100;
+ mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 0xa;
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&mes_set_hw_res_1_pkt, sizeof(mes_set_hw_res_1_pkt),
@@ -621,9 +756,13 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
if (amdgpu_mes_log_enable) {
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
- mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
+ mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr +
+ pipe * (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE);
}
+ if (enforce_isolation)
+ mes_set_hw_res_pkt.limit_single_process = 1;
+
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
offsetof(union MESAPI_SET_HW_RESOURCES, api_status));
@@ -710,6 +849,11 @@ static int mes_v12_0_reset_legacy_queue(struct amdgpu_mes *mes,
union MESAPI__RESET mes_reset_queue_pkt;
int pipe;
+ if (input->use_mmio)
+ return mes_v12_0_reset_queue_mmio(mes, input->queue_type,
+ input->me_id, input->pipe_id,
+ input->queue_id, input->vmid);
+
memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
@@ -840,29 +984,50 @@ static void mes_v12_0_enable(struct amdgpu_device *adev, bool enable)
uint32_t pipe, data = 0;
if (enable) {
- data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
- data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
- data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET, 1);
- WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
-
mutex_lock(&adev->srbm_mutex);
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
soc21_grbm_select(adev, 3, pipe, 0, 0);
+ if (amdgpu_mes_log_enable) {
+ u32 log_size = AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE;
+ /* In case uni mes is not enabled, only program for pipe 0 */
+ if (adev->mes.event_log_size >= (pipe + 1) * log_size) {
+ WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO,
+ lower_32_bits(adev->mes.event_log_gpu_addr +
+ pipe * log_size + AMDGPU_MES_LOG_BUFFER_SIZE));
+ WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI,
+ upper_32_bits(adev->mes.event_log_gpu_addr +
+ pipe * log_size + AMDGPU_MES_LOG_BUFFER_SIZE));
+ dev_info(adev->dev, "Setup CP MES MSCRATCH address : 0x%x. 0x%x\n",
+ RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI),
+ RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO));
+ }
+ }
+
+ data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
+ if (pipe == 0)
+ data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
+ else
+ data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET, 1);
+ WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START,
lower_32_bits(ucode_addr));
WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI,
upper_32_bits(ucode_addr));
+
+ /* unhalt MES and activate one pipe each loop */
+ data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
+ if (pipe)
+ data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 1);
+ dev_info(adev->dev, "program CP_MES_CNTL : 0x%x\n", data);
+
+ WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
+
}
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- /* unhalt MES and activate pipe0 */
- data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
- data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 1);
- WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
-
if (amdgpu_emu_mode)
msleep(100);
else if (adev->enable_uni_mes)
@@ -1326,9 +1491,9 @@ static int mes_v12_0_mqd_sw_init(struct amdgpu_device *adev,
return 0;
}
-static int mes_v12_0_sw_init(void *handle)
+static int mes_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
adev->mes.funcs = &mes_v12_0_funcs;
@@ -1336,8 +1501,9 @@ static int mes_v12_0_sw_init(void *handle)
adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
adev->mes.enable_legacy_queue_map = true;
- adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
-
+ adev->mes.event_log_size = adev->enable_uni_mes ?
+ (AMDGPU_MAX_MES_PIPES * (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE)) :
+ (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE);
r = amdgpu_mes_init(adev);
if (r)
return r;
@@ -1362,9 +1528,9 @@ static int mes_v12_0_sw_init(void *handle)
return 0;
}
-static int mes_v12_0_sw_fini(void *handle)
+static int mes_v12_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int pipe;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
@@ -1444,14 +1610,13 @@ static void mes_v12_0_kiq_setting(struct amdgpu_ring *ring)
tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
- tmp |= 0x80;
- WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
+ WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80);
}
static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
{
int r = 0;
+ struct amdgpu_ip_block *ip_block;
if (adev->enable_uni_mes)
mes_v12_0_kiq_setting(&adev->mes.ring[AMDGPU_MES_KIQ_PIPE]);
@@ -1479,6 +1644,12 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
mes_v12_0_enable(adev, true);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_MES);
+ if (unlikely(!ip_block)) {
+ dev_err(adev->dev, "Failed to get MES handle\n");
+ return -EINVAL;
+ }
+
r = mes_v12_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
if (r)
goto failure;
@@ -1492,7 +1663,7 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
}
if (adev->mes.enable_legacy_queue_map) {
- r = mes_v12_0_hw_init(adev);
+ r = mes_v12_0_hw_init(ip_block);
if (r)
goto failure;
}
@@ -1500,7 +1671,7 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
return r;
failure:
- mes_v12_0_hw_fini(adev);
+ mes_v12_0_hw_fini(ip_block);
return r;
}
@@ -1522,10 +1693,10 @@ static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev)
return 0;
}
-static int mes_v12_0_hw_init(void *handle)
+static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->mes.ring[0].sched.ready)
goto out;
@@ -1584,42 +1755,40 @@ out:
return 0;
failure:
- mes_v12_0_hw_fini(adev);
+ mes_v12_0_hw_fini(ip_block);
return r;
}
-static int mes_v12_0_hw_fini(void *handle)
+static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
return 0;
}
-static int mes_v12_0_suspend(void *handle)
+static int mes_v12_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_mes_suspend(adev);
+ r = amdgpu_mes_suspend(ip_block->adev);
if (r)
return r;
- return mes_v12_0_hw_fini(adev);
+ return mes_v12_0_hw_fini(ip_block);
}
-static int mes_v12_0_resume(void *handle)
+static int mes_v12_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = mes_v12_0_hw_init(adev);
+ r = mes_v12_0_hw_init(ip_block);
if (r)
return r;
- return amdgpu_mes_resume(adev);
+ return amdgpu_mes_resume(ip_block->adev);
}
-static int mes_v12_0_early_init(void *handle)
+static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
@@ -1631,9 +1800,9 @@ static int mes_v12_0_early_init(void *handle)
return 0;
}
-static int mes_v12_0_late_init(void *handle)
+static int mes_v12_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* it's only intended for use in mes_self_test case, not for s0ix and reset */
if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index e3ddd22aa172..243eabda0607 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -229,6 +229,52 @@ static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
0);
}
+static void mmhub_v1_0_init_saw(struct amdgpu_device *adev)
+{
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+ uint32_t tmp;
+
+ /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 */
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ lower_32_bits(pt_base >> 12));
+
+ /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 */
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ upper_32_bits(pt_base >> 12));
+
+ /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 */
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.gart_start >> 12));
+
+ /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 */
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.gart_start >> 44));
+
+ /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 */
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+
+ /* VM_9_X_REGISTER_VM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 */
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+
+ /* Program SAW CONTEXT0 CNTL */
+ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_CNTL);
+ tmp |= 1 << CONTEXT0_CNTL_ENABLE_OFFSET;
+ tmp &= ~(3 << CONTEXT0_CNTL_PAGE_TABLE_DEPTH_OFFSET);
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXT0_CNTL, tmp);
+
+ /* Disable all Contexts except Context0 */
+ tmp = 0xfffe;
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CONTEXTS_DISABLE, tmp);
+
+ /* Program SAW CNTL4 */
+ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CNTL4);
+ tmp |= 1 << VMC_TAP_PDE_REQUEST_SNOOP_OFFSET;
+ tmp |= 1 << VMC_TAP_PTE_REQUEST_SNOOP_OFFSET;
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_SAW_CNTL4, tmp);
+}
+
static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
@@ -283,6 +329,9 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
i * hub->ctx_addr_distance,
upper_32_bits(adev->vm_manager.max_pfn - 1));
}
+
+ if (amdgpu_ip_version(adev, ISP_HWIP, 0))
+ mmhub_v1_0_init_saw(adev);
}
static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
@@ -307,7 +356,7 @@ static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
if (adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
amdgpu_dpm_set_powergating_by_smu(adev,
AMD_IP_BLOCK_TYPE_GMC,
- enable);
+ enable, 0);
}
static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index b01bb759d0f4..e646e5cef0a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -33,7 +33,6 @@
#define regVM_L2_CNTL3_DEFAULT 0x80100007
#define regVM_L2_CNTL4_DEFAULT 0x000000c1
-#define mmSMNAID_AID0_MCA_SMU 0x03b30400
static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
index 0fbc3be81f14..f2ab5001b492 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
@@ -108,7 +108,7 @@ mmhub_v4_1_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
dev_err(adev->dev,
"MMVM_L2_PROTECTION_FAULT_STATUS_LO32:0x%08X\n",
status);
- switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(4, 1, 0):
mmhub_cid = mmhub_client_ids_v4_1_0[cid][rw];
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index f47bd7ada4d7..4dcb72d1bdda 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -61,15 +61,18 @@ static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
enum idh_event event)
{
+ int r = 0;
u32 reg;
reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
- if (reg != event)
+ if (reg == IDH_FAIL)
+ r = -EINVAL;
+ else if (reg != event)
return -ENOENT;
xgpu_nv_mailbox_send_ack(adev);
- return 0;
+ return r;
}
static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
@@ -178,6 +181,9 @@ send_request:
if (data1 != 0)
event = IDH_RAS_POISON_READY;
break;
+ case IDH_REQ_RAS_ERROR_COUNT:
+ event = IDH_RAS_ERROR_COUNT_READY;
+ break;
default:
break;
}
@@ -456,6 +462,11 @@ static bool xgpu_nv_rcvd_ras_intr(struct amdgpu_device *adev)
return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF);
}
+static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev)
+{
+ return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT);
+}
+
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
@@ -466,4 +477,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.trans_msg = xgpu_nv_mailbox_trans_msg,
.ras_poison_handler = xgpu_nv_ras_poison_handler,
.rcvd_ras_intr = xgpu_nv_rcvd_ras_intr,
+ .req_ras_err_count = xgpu_nv_req_ras_err_count,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 1d099ffb3a5a..9d61d76e1bf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -40,6 +40,7 @@ enum idh_request {
IDH_LOG_VF_ERROR = 200,
IDH_READY_TO_RESET = 201,
IDH_RAS_POISON = 202,
+ IDH_REQ_RAS_ERROR_COUNT = 203,
};
enum idh_event {
@@ -54,6 +55,8 @@ enum idh_event {
IDH_RAS_POISON_READY,
IDH_PF_SOFT_FLR_NOTIFICATION,
IDH_RAS_ERROR_DETECTED,
+ IDH_RAS_ERROR_COUNT_READY = 11,
+
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index b281462093f1..62cdfe10e6f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -434,9 +434,8 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
* this should allow us to catch up.
*/
tmp = (wptr + 32) & ih->ptr_mask;
- dev_warn(adev->dev, "IH ring buffer overflow "
- "(0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, tmp);
+ dev_warn(adev->dev, "%s ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ amdgpu_ih_ring_name(adev, ih), wptr, ih->rptr, tmp);
ih->rptr = tmp;
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
@@ -542,19 +541,19 @@ static void navi10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
adev->irq.self_irq.funcs = &navi10_ih_self_irq_funcs;
}
-static int navi10_ih_early_init(void *handle)
+static int navi10_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
navi10_ih_set_interrupt_funcs(adev);
navi10_ih_set_self_irq_funcs(adev);
return 0;
}
-static int navi10_ih_sw_init(void *handle)
+static int navi10_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool use_bus_addr;
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
@@ -593,43 +592,37 @@ static int navi10_ih_sw_init(void *handle)
return r;
}
-static int navi10_ih_sw_fini(void *handle)
+static int navi10_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
return 0;
}
-static int navi10_ih_hw_init(void *handle)
+static int navi10_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return navi10_ih_irq_init(adev);
}
-static int navi10_ih_hw_fini(void *handle)
+static int navi10_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- navi10_ih_irq_disable(adev);
+ navi10_ih_irq_disable(ip_block->adev);
return 0;
}
-static int navi10_ih_suspend(void *handle)
+static int navi10_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return navi10_ih_hw_fini(adev);
+ return navi10_ih_hw_fini(ip_block);
}
-static int navi10_ih_resume(void *handle)
+static int navi10_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return navi10_ih_hw_init(adev);
+ return navi10_ih_hw_init(ip_block);
}
static bool navi10_ih_is_idle(void *handle)
@@ -638,13 +631,13 @@ static bool navi10_ih_is_idle(void *handle)
return true;
}
-static int navi10_ih_wait_for_idle(void *handle)
+static int navi10_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return -ETIMEDOUT;
}
-static int navi10_ih_soft_reset(void *handle)
+static int navi10_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
return 0;
@@ -673,17 +666,17 @@ static void navi10_ih_update_clockgating_state(struct amdgpu_device *adev,
}
}
-static int navi10_ih_set_clockgating_state(void *handle,
+static int navi10_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
navi10_ih_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
return 0;
}
-static int navi10_ih_set_powergating_state(void *handle,
+static int navi10_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -700,7 +693,6 @@ static void navi10_ih_get_clockgating_state(void *handle, u64 *flags)
static const struct amd_ip_funcs navi10_ih_ip_funcs = {
.name = "navi10_ih",
.early_init = navi10_ih_early_init,
- .late_init = NULL,
.sw_init = navi10_ih_sw_init,
.sw_fini = navi10_ih_sw_fini,
.hw_init = navi10_ih_hw_init,
@@ -713,8 +705,6 @@ static const struct amd_ip_funcs navi10_ih_ip_funcs = {
.set_clockgating_state = navi10_ih_set_clockgating_state,
.set_powergating_state = navi10_ih_set_powergating_state,
.get_clockgating_state = navi10_ih_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs navi10_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
index a5b60c9a2418..c88284ff92d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
@@ -68,6 +68,7 @@
#define SDMA_SUBOP_POLL_REG_WRITE_MEM 1
#define SDMA_SUBOP_POLL_DBIT_WRITE_MEM 2
#define SDMA_SUBOP_POLL_MEM_VERIFY 3
+#define SDMA_SUBOP_VM_INVALIDATION 4
#define HEADER_AGENT_DISPATCH 4
#define HEADER_BARRIER 5
#define SDMA_OP_AQL_COPY 0
@@ -4041,6 +4042,69 @@
/*
+** Definitions for SDMA_PKT_VM_INVALIDATION packet
+*/
+
+/*define for HEADER word*/
+/*define for op field*/
+#define SDMA_PKT_VM_INVALIDATION_HEADER_op_offset 0
+#define SDMA_PKT_VM_INVALIDATION_HEADER_op_mask 0x000000FF
+#define SDMA_PKT_VM_INVALIDATION_HEADER_op_shift 0
+#define SDMA_PKT_VM_INVALIDATION_HEADER_OP(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_op_shift)
+
+/*define for sub_op field*/
+#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_offset 0
+#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask 0x000000FF
+#define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift 8
+#define SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift)
+
+/*define for gfx_eng_id field*/
+#define SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_offset 0
+#define SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_mask 0x0000001F
+#define SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_shift 16
+#define SDMA_PKT_VM_INVALIDATION_HEADER_GFX_ENG_ID(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_gfx_eng_id_shift)
+
+/*define for mm_eng_id field*/
+#define SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_offset 0
+#define SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_mask 0x0000001F
+#define SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_shift 24
+#define SDMA_PKT_VM_INVALIDATION_HEADER_MM_ENG_ID(x) (((x) & SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_mm_eng_id_shift)
+
+/*define for INVALIDATEREQ word*/
+/*define for invalidatereq field*/
+#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_offset 1
+#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask 0xFFFFFFFF
+#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift 0
+#define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_INVALIDATEREQ(x) (((x) & SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask) << SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift)
+
+/*define for ADDRESSRANGELO word*/
+/*define for addressrangelo field*/
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_offset 2
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask 0xFFFFFFFF
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift 0
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_ADDRESSRANGELO(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift)
+
+/*define for ADDRESSRANGEHI word*/
+/*define for invalidateack field*/
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_offset 3
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask 0x0000FFFF
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift 0
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift)
+
+/*define for addressrangehi field*/
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_offset 3
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask 0x0000001F
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift 16
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift)
+
+/*define for reserved field*/
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_offset 3
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask 0x000001FF
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift 23
+#define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_RESERVED(x) (((x) & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift)
+
+
+/*
** Definitions for SDMA_PKT_ATOMIC packet
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
index 39919e0892c1..c92875ceb31f 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c
@@ -28,6 +28,7 @@
#include "nbif/nbif_6_3_1_sh_mask.h"
#include "pcie/pcie_6_1_0_offset.h"
#include "pcie/pcie_6_1_0_sh_mask.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include <uapi/linux/kfd_ioctl.h>
static void nbif_v6_3_1_remap_hdp_registers(struct amdgpu_device *adev)
@@ -518,3 +519,83 @@ const struct amdgpu_nbio_funcs nbif_v6_3_1_sriov_funcs = {
.get_rom_offset = nbif_v6_3_1_get_rom_offset,
.set_reg_remap = nbif_v6_3_1_set_reg_remap,
};
+
+static int nbif_v6_3_1_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ /* The ras_controller_irq enablement should be done in psp bl when it
+ * tries to enable ras feature. Driver only need to set the correct interrupt
+ * vector for bare-metal and sriov use case respectively
+ */
+ uint32_t bif_doorbell_int_cntl;
+
+ bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
+ bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 0 : 1);
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl);
+
+ return 0;
+}
+
+static int nbif_v6_3_1_process_err_event_athub_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for err_event_athub_irq should be written
+ * to bif ring. since bif ring is not enabled, just leave process callback
+ * as a dummy one.
+ */
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs nbif_v6_3_1_ras_err_event_athub_irq_funcs = {
+ .set = nbif_v6_3_1_set_ras_err_event_athub_irq_state,
+ .process = nbif_v6_3_1_process_err_event_athub_irq,
+};
+
+static void nbif_v6_3_1_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
+{
+ uint32_t bif_doorbell_int_cntl;
+
+ bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
+ if (REG_GET_FIELD(bif_doorbell_int_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl);
+ amdgpu_ras_global_ras_isr(adev);
+ }
+}
+
+static int nbif_v6_3_1_init_ras_err_event_athub_interrupt(struct amdgpu_device *adev)
+{
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_err_event_athub_irq.funcs =
+ &nbif_v6_3_1_ras_err_event_athub_irq_funcs;
+ adev->nbio.ras_err_event_athub_irq.num_types = 1;
+
+ /* register ras err event athub interrupt
+ * nbif v6_3_1 uses the same irq source as nbio v7_4
+ */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
+ &adev->nbio.ras_err_event_athub_irq);
+
+ return r;
+}
+
+struct amdgpu_nbio_ras nbif_v6_3_1_ras = {
+ .handle_ras_err_event_athub_intr_no_bifring =
+ nbif_v6_3_1_handle_ras_err_event_athub_intr_no_bifring,
+ .init_ras_err_event_athub_interrupt =
+ nbif_v6_3_1_init_ras_err_event_athub_interrupt,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
index b7f2e0d88905..9ac4831d39e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h
@@ -29,5 +29,6 @@
extern const struct nbio_hdp_flush_reg nbif_v6_3_1_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbif_v6_3_1_funcs;
extern const struct amdgpu_nbio_funcs nbif_v6_3_1_sriov_funcs;
+extern struct amdgpu_nbio_ras nbif_v6_3_1_ras;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index b1b57dcc5a73..d1032e9992b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -271,8 +271,19 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
+#define regRCC_DEV0_EPF6_STRAP4 0xd304
+#define regRCC_DEV0_EPF6_STRAP4_BASE_IDX 5
+
static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
{
+ uint32_t data;
+
+ switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
+ case IP_VERSION(2, 5, 0):
+ data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF6_STRAP4) & ~BIT(23);
+ WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF6_STRAP4, data);
+ break;
+ }
}
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
index 7a9adfda5814..41421da63a08 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
@@ -275,6 +275,15 @@ static void nbio_v7_11_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data);
+ switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
+ case IP_VERSION(7, 11, 0):
+ case IP_VERSION(7, 11, 1):
+ case IP_VERSION(7, 11, 2):
+ case IP_VERSION(7, 11, 3):
+ data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & ~BIT(23);
+ WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data);
+ break;
+ }
}
static void nbio_v7_11_update_medium_grain_clock_gating(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 8d80df94bd8b..a26a9be58eac 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -414,8 +414,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
/* ras_controller_int is dedicated for nbif ras error,
* not the global interrupt for sync flood
*/
- amdgpu_ras_set_fed(adev, true);
- amdgpu_ras_reset_gpu(adev);
+ amdgpu_ras_global_ras_isr(adev);
}
amdgpu_ras_error_data_fini(&err_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
index fb37e354a9d5..3fb6d2aa7e3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
@@ -247,6 +247,12 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data);
+ switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
+ case IP_VERSION(7, 7, 0):
+ data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & ~BIT(23);
+ WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data);
+ break;
+ }
}
static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index d1bd79bbae53..8a0a63ac88d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -401,6 +401,17 @@ static int nbio_v7_9_get_compute_partition_mode(struct amdgpu_device *adev)
return px;
}
+static bool nbio_v7_9_is_nps_switch_requested(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_STATUS);
+ tmp = REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_MEM_STATUS,
+ CHANGE_STATUE);
+
+ /* 0x8 - NPS switch requested */
+ return (tmp == 0x8);
+}
static u32 nbio_v7_9_get_memory_partition_mode(struct amdgpu_device *adev,
u32 *supp_modes)
{
@@ -508,6 +519,7 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = {
.remap_hdp_registers = nbio_v7_9_remap_hdp_registers,
.get_compute_partition_mode = nbio_v7_9_get_compute_partition_mode,
.get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode,
+ .is_nps_switch_requested = nbio_v7_9_is_nps_switch_requested,
.init_registers = nbio_v7_9_init_registers,
.get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count,
.set_reg_remap = nbio_v7_9_set_reg_remap,
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 4938e6b340e9..47db483c3516 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -67,8 +67,8 @@ static const struct amd_ip_funcs nv_common_ip_funcs;
/* Navi */
static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs nv_video_codecs_encode = {
@@ -94,8 +94,8 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode = {
/* Sienna Cichlid */
static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs sc_video_codecs_encode = {
@@ -136,8 +136,8 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 = {
/* SRIOV Sienna Cichlid, not const since data is controlled by host */
static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
};
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = {
@@ -634,9 +634,9 @@ static const struct amdgpu_asic_funcs nv_asic_funcs = {
.query_video_codecs = &nv_query_video_codecs,
};
-static int nv_common_early_init(void *handle)
+static int nv_common_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->nbio.funcs->set_reg_remap(adev);
adev->smc_rreg = NULL;
@@ -944,9 +944,9 @@ static int nv_common_early_init(void *handle)
return 0;
}
-static int nv_common_late_init(void *handle)
+static int nv_common_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) {
xgpu_nv_mailbox_get_irq(adev);
@@ -973,9 +973,9 @@ static int nv_common_late_init(void *handle)
return 0;
}
-static int nv_common_sw_init(void *handle)
+static int nv_common_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
xgpu_nv_mailbox_add_irq_id(adev);
@@ -983,14 +983,9 @@ static int nv_common_sw_init(void *handle)
return 0;
}
-static int nv_common_sw_fini(void *handle)
+static int nv_common_hw_init(struct amdgpu_ip_block *ip_block)
{
- return 0;
-}
-
-static int nv_common_hw_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->nbio.funcs->apply_lc_spc_mode_wa)
adev->nbio.funcs->apply_lc_spc_mode_wa(adev);
@@ -1014,9 +1009,9 @@ static int nv_common_hw_init(void *handle)
return 0;
}
-static int nv_common_hw_fini(void *handle)
+static int nv_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* Disable the doorbell aperture and selfring doorbell aperture
* separately in hw_fini because nv_enable_doorbell_aperture
@@ -1029,18 +1024,14 @@ static int nv_common_hw_fini(void *handle)
return 0;
}
-static int nv_common_suspend(void *handle)
+static int nv_common_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return nv_common_hw_fini(adev);
+ return nv_common_hw_fini(ip_block);
}
-static int nv_common_resume(void *handle)
+static int nv_common_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return nv_common_hw_init(adev);
+ return nv_common_hw_init(ip_block);
}
static bool nv_common_is_idle(void *handle)
@@ -1048,20 +1039,10 @@ static bool nv_common_is_idle(void *handle)
return true;
}
-static int nv_common_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int nv_common_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int nv_common_set_clockgating_state(void *handle,
+static int nv_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1089,7 +1070,7 @@ static int nv_common_set_clockgating_state(void *handle,
return 0;
}
-static int nv_common_set_powergating_state(void *handle,
+static int nv_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* TODO */
@@ -1115,17 +1096,12 @@ static const struct amd_ip_funcs nv_common_ip_funcs = {
.early_init = nv_common_early_init,
.late_init = nv_common_late_init,
.sw_init = nv_common_sw_init,
- .sw_fini = nv_common_sw_fini,
.hw_init = nv_common_hw_init,
.hw_fini = nv_common_hw_fini,
.suspend = nv_common_suspend,
.resume = nv_common_resume,
.is_idle = nv_common_is_idle,
- .wait_for_idle = nv_common_wait_for_idle,
- .soft_reset = nv_common_soft_reset,
.set_clockgating_state = nv_common_set_clockgating_state,
.set_powergating_state = nv_common_set_powergating_state,
.get_clockgating_state = nv_common_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 37b5ddd6f13b..f4a91b126c73 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -103,6 +103,10 @@ enum psp_gfx_cmd_id
GFX_CMD_ID_AUTOLOAD_RLC = 0x00000021, /* Indicates all graphics fw loaded, start RLC autoload */
GFX_CMD_ID_BOOT_CFG = 0x00000022, /* Boot Config */
GFX_CMD_ID_SRIOV_SPATIAL_PART = 0x00000027, /* Configure spatial partitioning mode */
+ /*IDs of performance monitoring/profiling*/
+ GFX_CMD_ID_CONFIG_SQ_PERFMON = 0x00000046, /* Config CGTT_SQ_CLK_CTRL */
+ /* Dynamic memory partitioninig (NPS mode change)*/
+ GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */
};
/* PSP boot config sub-commands */
@@ -351,6 +355,20 @@ struct psp_gfx_cmd_sriov_spatial_part {
uint32_t override_this_aid;
};
+/*Structure for sq performance monitoring/profiling enable/disable*/
+struct psp_gfx_cmd_config_sq_perfmon {
+ uint32_t gfx_xcp_mask;
+ uint8_t core_override;
+ uint8_t reg_override;
+ uint8_t perfmon_override;
+ uint8_t reserved[5];
+};
+
+struct psp_gfx_cmd_fb_memory_part {
+ uint32_t mode; /* requested NPS mode */
+ uint32_t resvd;
+};
+
/* All GFX ring buffer commands. */
union psp_gfx_commands
{
@@ -365,6 +383,8 @@ union psp_gfx_commands
struct psp_gfx_cmd_load_toc cmd_load_toc;
struct psp_gfx_cmd_boot_cfg boot_cfg;
struct psp_gfx_cmd_sriov_spatial_part cmd_spatial_part;
+ struct psp_gfx_cmd_config_sq_perfmon config_sq_perfmon;
+ struct psp_gfx_cmd_fb_memory_part cmd_memory_part;
};
struct psp_gfx_uresp_reserved
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 51e470e8d67d..cc621064610f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -51,6 +51,8 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_6_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_12_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_12_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_14_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_14_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_14_0_0_toc.bin");
@@ -122,6 +124,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
@@ -177,6 +180,7 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
retry_cnt =
((amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))) ?
PSP_VMBX_POLLING_LIMIT :
10;
@@ -203,6 +207,7 @@ static int psp_v13_0_wait_for_bootloader_steady_state(struct psp_context *psp)
int ret;
if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
ret = psp_v13_0_wait_for_vmbx_ready(psp);
if (ret)
@@ -288,6 +293,11 @@ static int psp_v13_0_bootloader_load_ras_drv(struct psp_context *psp)
return psp_v13_0_bootloader_load_component(psp, &psp->ras_drv, PSP_BL__LOAD_RASDRV);
}
+static int psp_v13_0_bootloader_load_spdm_drv(struct psp_context *psp)
+{
+ return psp_v13_0_bootloader_load_component(psp, &psp->spdm_drv, PSP_BL__LOAD_SPDMDRV);
+}
+
static inline void psp_v13_0_init_sos_version(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -798,6 +808,7 @@ static bool psp_v13_0_get_ras_capability(struct psp_context *psp)
return false;
if ((amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) &&
(!(adev->flags & AMD_IS_APU))) {
reg_data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_127);
@@ -823,6 +834,30 @@ static bool psp_v13_0_is_aux_sos_load_required(struct psp_context *psp)
return (pmfw_ver < 0x557300);
}
+static bool psp_v13_0_is_reload_needed(struct psp_context *psp)
+{
+ uint32_t ucode_ver;
+
+ if (!psp_v13_0_is_sos_alive(psp))
+ return false;
+
+ /* Restrict reload support only to specific IP versions */
+ switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
+ /* TOS version read from microcode header */
+ ucode_ver = psp->sos.fw_version;
+ /* Read TOS version from hardware */
+ psp_v13_0_init_sos_version(psp);
+ return (ucode_ver != psp->sos.fw_version);
+ default:
+ return false;
+ }
+
+ return false;
+}
+
static const struct psp_funcs psp_v13_0_funcs = {
.init_microcode = psp_v13_0_init_microcode,
.wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state,
@@ -833,6 +868,7 @@ static const struct psp_funcs psp_v13_0_funcs = {
.bootloader_load_intf_drv = psp_v13_0_bootloader_load_intf_drv,
.bootloader_load_dbg_drv = psp_v13_0_bootloader_load_dbg_drv,
.bootloader_load_ras_drv = psp_v13_0_bootloader_load_ras_drv,
+ .bootloader_load_spdm_drv = psp_v13_0_bootloader_load_spdm_drv,
.bootloader_load_sos = psp_v13_0_bootloader_load_sos,
.ring_create = psp_v13_0_ring_create,
.ring_stop = psp_v13_0_ring_stop,
@@ -847,6 +883,7 @@ static const struct psp_funcs psp_v13_0_funcs = {
.fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk,
.get_ras_capability = psp_v13_0_get_ras_capability,
.is_aux_sos_load_required = psp_v13_0_is_aux_sos_load_required,
+ .is_reload_needed = psp_v13_0_is_reload_needed,
};
void psp_v13_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 725392522267..135c5099bfb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -145,9 +145,11 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sdma.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sdma1.bin", chip_name);
if (err)
goto out;
@@ -631,7 +633,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -807,9 +809,9 @@ static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, val);
}
-static int sdma_v2_4_early_init(void *handle)
+static int sdma_v2_4_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
adev->sdma.num_instances = SDMA_MAX_INSTANCE;
@@ -826,11 +828,11 @@ static int sdma_v2_4_early_init(void *handle)
return 0;
}
-static int sdma_v2_4_sw_init(void *handle)
+static int sdma_v2_4_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
@@ -866,9 +868,9 @@ static int sdma_v2_4_sw_init(void *handle)
return r;
}
-static int sdma_v2_4_sw_fini(void *handle)
+static int sdma_v2_4_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
@@ -878,10 +880,10 @@ static int sdma_v2_4_sw_fini(void *handle)
return 0;
}
-static int sdma_v2_4_hw_init(void *handle)
+static int sdma_v2_4_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
sdma_v2_4_init_golden_registers(adev);
@@ -892,27 +894,21 @@ static int sdma_v2_4_hw_init(void *handle)
return r;
}
-static int sdma_v2_4_hw_fini(void *handle)
+static int sdma_v2_4_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- sdma_v2_4_enable(adev, false);
+ sdma_v2_4_enable(ip_block->adev, false);
return 0;
}
-static int sdma_v2_4_suspend(void *handle)
+static int sdma_v2_4_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v2_4_hw_fini(adev);
+ return sdma_v2_4_hw_fini(ip_block);
}
-static int sdma_v2_4_resume(void *handle)
+static int sdma_v2_4_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v2_4_hw_init(adev);
+ return sdma_v2_4_hw_init(ip_block);
}
static bool sdma_v2_4_is_idle(void *handle)
@@ -927,11 +923,11 @@ static bool sdma_v2_4_is_idle(void *handle)
return true;
}
-static int sdma_v2_4_wait_for_idle(void *handle)
+static int sdma_v2_4_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
@@ -944,10 +940,10 @@ static int sdma_v2_4_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int sdma_v2_4_soft_reset(void *handle)
+static int sdma_v2_4_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
@@ -1086,14 +1082,14 @@ static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int sdma_v2_4_set_clockgating_state(void *handle,
+static int sdma_v2_4_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
/* XXX handled via the smc on VI */
return 0;
}
-static int sdma_v2_4_set_powergating_state(void *handle,
+static int sdma_v2_4_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1102,7 +1098,6 @@ static int sdma_v2_4_set_powergating_state(void *handle,
static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
.name = "sdma_v2_4",
.early_init = sdma_v2_4_early_init,
- .late_init = NULL,
.sw_init = sdma_v2_4_sw_init,
.sw_fini = sdma_v2_4_sw_fini,
.hw_init = sdma_v2_4_hw_init,
@@ -1114,8 +1109,6 @@ static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
.soft_reset = sdma_v2_4_soft_reset,
.set_clockgating_state = sdma_v2_4_set_clockgating_state,
.set_powergating_state = sdma_v2_4_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index e65194fe94af..c611328671ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -305,9 +305,11 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sdma.bin", chip_name);
else
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
"amdgpu/%s_sdma1.bin", chip_name);
if (err)
goto out;
@@ -904,7 +906,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
else
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -1080,9 +1082,9 @@ static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, val);
}
-static int sdma_v3_0_early_init(void *handle)
+static int sdma_v3_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
switch (adev->asic_type) {
@@ -1106,11 +1108,11 @@ static int sdma_v3_0_early_init(void *handle)
return 0;
}
-static int sdma_v3_0_sw_init(void *handle)
+static int sdma_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
@@ -1152,9 +1154,9 @@ static int sdma_v3_0_sw_init(void *handle)
return r;
}
-static int sdma_v3_0_sw_fini(void *handle)
+static int sdma_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
@@ -1164,10 +1166,10 @@ static int sdma_v3_0_sw_fini(void *handle)
return 0;
}
-static int sdma_v3_0_hw_init(void *handle)
+static int sdma_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
sdma_v3_0_init_golden_registers(adev);
@@ -1178,9 +1180,9 @@ static int sdma_v3_0_hw_init(void *handle)
return r;
}
-static int sdma_v3_0_hw_fini(void *handle)
+static int sdma_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
sdma_v3_0_ctx_switch_enable(adev, false);
sdma_v3_0_enable(adev, false);
@@ -1188,18 +1190,14 @@ static int sdma_v3_0_hw_fini(void *handle)
return 0;
}
-static int sdma_v3_0_suspend(void *handle)
+static int sdma_v3_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v3_0_hw_fini(adev);
+ return sdma_v3_0_hw_fini(ip_block);
}
-static int sdma_v3_0_resume(void *handle)
+static int sdma_v3_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v3_0_hw_init(adev);
+ return sdma_v3_0_hw_init(ip_block);
}
static bool sdma_v3_0_is_idle(void *handle)
@@ -1214,11 +1212,11 @@ static bool sdma_v3_0_is_idle(void *handle)
return true;
}
-static int sdma_v3_0_wait_for_idle(void *handle)
+static int sdma_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
@@ -1231,9 +1229,9 @@ static int sdma_v3_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static bool sdma_v3_0_check_soft_reset(void *handle)
+static bool sdma_v3_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS2);
@@ -1252,9 +1250,9 @@ static bool sdma_v3_0_check_soft_reset(void *handle)
}
}
-static int sdma_v3_0_pre_soft_reset(void *handle)
+static int sdma_v3_0_pre_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
if (!adev->sdma.srbm_soft_reset)
@@ -1271,9 +1269,9 @@ static int sdma_v3_0_pre_soft_reset(void *handle)
return 0;
}
-static int sdma_v3_0_post_soft_reset(void *handle)
+static int sdma_v3_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
if (!adev->sdma.srbm_soft_reset)
@@ -1290,9 +1288,9 @@ static int sdma_v3_0_post_soft_reset(void *handle)
return 0;
}
-static int sdma_v3_0_soft_reset(void *handle)
+static int sdma_v3_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
u32 tmp;
@@ -1487,10 +1485,10 @@ static void sdma_v3_0_update_sdma_medium_grain_light_sleep(
}
}
-static int sdma_v3_0_set_clockgating_state(void *handle,
+static int sdma_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1510,7 +1508,7 @@ static int sdma_v3_0_set_clockgating_state(void *handle,
return 0;
}
-static int sdma_v3_0_set_powergating_state(void *handle,
+static int sdma_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1538,7 +1536,6 @@ static void sdma_v3_0_get_clockgating_state(void *handle, u64 *flags)
static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.name = "sdma_v3_0",
.early_init = sdma_v3_0_early_init,
- .late_init = NULL,
.sw_init = sdma_v3_0_sw_init,
.sw_fini = sdma_v3_0_sw_fini,
.hw_init = sdma_v3_0_hw_init,
@@ -1554,8 +1551,6 @@ static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.set_clockgating_state = sdma_v3_0_set_clockgating_state,
.set_powergating_state = sdma_v3_0_set_powergating_state,
.get_clockgating_state = sdma_v3_0_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 23ef4eb36b40..b48d9c0b2e1c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1565,7 +1565,7 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -1751,9 +1751,9 @@ static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
}
}
-static int sdma_v4_0_early_init(void *handle)
+static int sdma_v4_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = sdma_v4_0_init_microcode(adev);
@@ -1780,9 +1780,9 @@ static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
void *err_data,
struct amdgpu_iv_entry *entry);
-static int sdma_v4_0_late_init(void *handle)
+static int sdma_v4_0_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
sdma_v4_0_setup_ulv(adev);
@@ -1792,11 +1792,11 @@ static int sdma_v4_0_late_init(void *handle)
return 0;
}
-static int sdma_v4_0_sw_init(void *handle)
+static int sdma_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0);
uint32_t *ptr;
@@ -1929,9 +1929,9 @@ static int sdma_v4_0_sw_init(void *handle)
return r;
}
-static int sdma_v4_0_sw_fini(void *handle)
+static int sdma_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1951,12 +1951,12 @@ static int sdma_v4_0_sw_fini(void *handle)
return 0;
}
-static int sdma_v4_0_hw_init(void *handle)
+static int sdma_v4_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->flags & AMD_IS_APU)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false, 0);
if (!amdgpu_sriov_vf(adev))
sdma_v4_0_init_golden_registers(adev);
@@ -1964,9 +1964,9 @@ static int sdma_v4_0_hw_init(void *handle)
return sdma_v4_0_start(adev);
}
-static int sdma_v4_0_hw_fini(void *handle)
+static int sdma_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
if (amdgpu_sriov_vf(adev))
@@ -1983,14 +1983,14 @@ static int sdma_v4_0_hw_fini(void *handle)
sdma_v4_0_enable(adev, false);
if (adev->flags & AMD_IS_APU)
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true);
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true, 0);
return 0;
}
-static int sdma_v4_0_suspend(void *handle)
+static int sdma_v4_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* SMU saves SDMA state for us */
if (adev->in_s0ix) {
@@ -1998,12 +1998,12 @@ static int sdma_v4_0_suspend(void *handle)
return 0;
}
- return sdma_v4_0_hw_fini(adev);
+ return sdma_v4_0_hw_fini(ip_block);
}
-static int sdma_v4_0_resume(void *handle)
+static int sdma_v4_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* SMU restores SDMA state for us */
if (adev->in_s0ix) {
@@ -2012,7 +2012,7 @@ static int sdma_v4_0_resume(void *handle)
return 0;
}
- return sdma_v4_0_hw_init(adev);
+ return sdma_v4_0_hw_init(ip_block);
}
static bool sdma_v4_0_is_idle(void *handle)
@@ -2030,11 +2030,11 @@ static bool sdma_v4_0_is_idle(void *handle)
return true;
}
-static int sdma_v4_0_wait_for_idle(void *handle)
+static int sdma_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i, j;
u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
for (j = 0; j < adev->sdma.num_instances; j++) {
@@ -2049,7 +2049,7 @@ static int sdma_v4_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int sdma_v4_0_soft_reset(void *handle)
+static int sdma_v4_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
@@ -2297,10 +2297,10 @@ static void sdma_v4_0_update_medium_grain_light_sleep(
}
}
-static int sdma_v4_0_set_clockgating_state(void *handle,
+static int sdma_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -2312,10 +2312,10 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
return 0;
}
-static int sdma_v4_0_set_powergating_state(void *handle,
+static int sdma_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(4, 1, 0):
@@ -2350,9 +2350,9 @@ static void sdma_v4_0_get_clockgating_state(void *handle, u64 *flags)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
-static void sdma_v4_0_print_ip_state(void *handle, struct drm_printer *p)
+static void sdma_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0);
uint32_t instance_offset;
@@ -2371,9 +2371,9 @@ static void sdma_v4_0_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void sdma_v4_0_dump_ip_state(void *handle)
+static void sdma_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t instance_offset;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_0);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index c77889040760..5e0066cd6c51 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -189,6 +189,7 @@ static int sdma_v4_4_2_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2) ||
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 4) ||
amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 5)) {
ret = amdgpu_sdma_init_microcode(adev, 0, true);
break;
@@ -667,11 +668,12 @@ static uint32_t sdma_v4_4_2_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
*
* @adev: amdgpu_device pointer
* @i: instance to resume
+ * @restore: used to restore wptr when restart
*
* Set up the gfx DMA ring buffers and enable them.
* Returns 0 for success, error for failure.
*/
-static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
+static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i, bool restore)
{
struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
u32 rb_cntl, ib_cntl, wptr_poll_cntl;
@@ -698,16 +700,24 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
WREG32_SDMA(i, regSDMA_GFX_RB_BASE, ring->gpu_addr >> 8);
WREG32_SDMA(i, regSDMA_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
- ring->wptr = 0;
+ if (!restore)
+ ring->wptr = 0;
/* before programing wptr to a less value, need set minor_ptr_update first */
WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1);
/* Initialize the ring buffer's read and write pointers */
- WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
- WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
- WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0);
- WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0);
+ if (restore) {
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(ring->wptr << 2));
+ } else {
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0);
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0);
+ }
doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET);
@@ -755,11 +765,12 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
*
* @adev: amdgpu_device pointer
* @i: instance to resume
+ * @restore: boolean to say restore needed or not
*
* Set up the page DMA ring buffers and enable them.
* Returns 0 for success, error for failure.
*/
-static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
+static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i, bool restore)
{
struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
u32 rb_cntl, ib_cntl, wptr_poll_cntl;
@@ -775,10 +786,17 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
/* Initialize the ring buffer's read and write pointers */
- WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0);
- WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0);
- WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, 0);
- WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, 0);
+ if (restore) {
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(ring->wptr << 2));
+ WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(ring->wptr << 2));
+ } else {
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, 0);
+ WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, 0);
+ }
/* set the wb address whether it's enabled or not */
WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_HI,
@@ -792,7 +810,8 @@ static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
WREG32_SDMA(i, regSDMA_PAGE_RB_BASE, ring->gpu_addr >> 8);
WREG32_SDMA(i, regSDMA_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
- ring->wptr = 0;
+ if (!restore)
+ ring->wptr = 0;
/* before programing wptr to a less value, need set minor_ptr_update first */
WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 1);
@@ -911,12 +930,13 @@ static int sdma_v4_4_2_inst_load_microcode(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @inst_mask: mask of dma engine instances to be enabled
+ * @restore: boolean to say restore needed or not
*
* Set up the DMA engines and enable them.
* Returns 0 for success, error for failure.
*/
static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
- uint32_t inst_mask)
+ uint32_t inst_mask, bool restore)
{
struct amdgpu_ring *ring;
uint32_t tmp_mask;
@@ -927,7 +947,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
sdma_v4_4_2_inst_enable(adev, false, inst_mask);
} else {
/* bypass sdma microcode loading on Gopher */
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP &&
+ if (!restore && adev->firmware.load_type != AMDGPU_FW_LOAD_PSP &&
adev->sdma.instance[0].fw) {
r = sdma_v4_4_2_inst_load_microcode(adev, inst_mask);
if (r)
@@ -946,17 +966,19 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
uint32_t temp;
WREG32_SDMA(i, regSDMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
- sdma_v4_4_2_gfx_resume(adev, i);
+ sdma_v4_4_2_gfx_resume(adev, i, restore);
if (adev->sdma.has_page_queue)
- sdma_v4_4_2_page_resume(adev, i);
+ sdma_v4_4_2_page_resume(adev, i, restore);
/* set utc l1 enable flag always to 1 */
temp = RREG32_SDMA(i, regSDMA_CNTL);
temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
- /* enable context empty interrupt during initialization */
- temp = REG_SET_FIELD(temp, SDMA_CNTL, CTXEMPTY_INT_ENABLE, 1);
- WREG32_SDMA(i, regSDMA_CNTL, temp);
+ if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < IP_VERSION(4, 4, 5)) {
+ /* enable context empty interrupt during initialization */
+ temp = REG_SET_FIELD(temp, SDMA_CNTL, CTXEMPTY_INT_ENABLE, 1);
+ WREG32_SDMA(i, regSDMA_CNTL, temp);
+ }
if (!amdgpu_sriov_vf(adev)) {
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
/* unhalt engine */
@@ -1110,7 +1132,7 @@ static int sdma_v4_4_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -1290,9 +1312,9 @@ static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
}
}
-static int sdma_v4_4_2_early_init(void *handle)
+static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = sdma_v4_4_2_init_microcode(adev);
@@ -1318,9 +1340,9 @@ static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
#endif
-static int sdma_v4_4_2_late_init(void *handle)
+static int sdma_v4_4_2_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
#if 0
struct ras_ih_if ih_info = {
.cb = sdma_v4_4_2_process_ras_data_cb,
@@ -1332,11 +1354,11 @@ static int sdma_v4_4_2_late_init(void *handle)
return 0;
}
-static int sdma_v4_4_2_sw_init(void *handle)
+static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 aid_id;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
uint32_t *ptr;
@@ -1384,6 +1406,12 @@ static int sdma_v4_4_2_sw_init(void *handle)
&adev->sdma.srbm_write_irq);
if (r)
return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_CTXEMPTY,
+ &adev->sdma.ctxt_empty_irq);
+ if (r)
+ return r;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1430,6 +1458,10 @@ static int sdma_v4_4_2_sw_init(void *handle)
}
}
+ /* TODO: Add queue reset mask when FW fully supports it */
+ adev->sdma.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
+
if (amdgpu_sdma_ras_sw_init(adev)) {
dev_err(adev->dev, "fail to initialize sdma ras block\n");
return -EINVAL;
@@ -1442,12 +1474,16 @@ static int sdma_v4_4_2_sw_init(void *handle)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+ r = amdgpu_sdma_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return r;
}
-static int sdma_v4_4_2_sw_fini(void *handle)
+static int sdma_v4_4_2_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1456,7 +1492,9 @@ static int sdma_v4_4_2_sw_fini(void *handle)
amdgpu_ring_fini(&adev->sdma.instance[i].page);
}
+ amdgpu_sdma_sysfs_reset_mask_fini(adev);
if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2) ||
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 4) ||
amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 5))
amdgpu_sdma_destroy_inst_ctx(adev, true);
else
@@ -1467,24 +1505,24 @@ static int sdma_v4_4_2_sw_fini(void *handle)
return 0;
}
-static int sdma_v4_4_2_hw_init(void *handle)
+static int sdma_v4_4_2_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t inst_mask;
inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
if (!amdgpu_sriov_vf(adev))
sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask);
- r = sdma_v4_4_2_inst_start(adev, inst_mask);
+ r = sdma_v4_4_2_inst_start(adev, inst_mask, false);
return r;
}
-static int sdma_v4_4_2_hw_fini(void *handle)
+static int sdma_v4_4_2_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t inst_mask;
int i;
@@ -1505,24 +1543,22 @@ static int sdma_v4_4_2_hw_fini(void *handle)
return 0;
}
-static int sdma_v4_4_2_set_clockgating_state(void *handle,
+static int sdma_v4_4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
-static int sdma_v4_4_2_suspend(void *handle)
+static int sdma_v4_4_2_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_in_reset(adev))
- sdma_v4_4_2_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+ sdma_v4_4_2_set_clockgating_state(ip_block, AMD_CG_STATE_UNGATE);
- return sdma_v4_4_2_hw_fini(adev);
+ return sdma_v4_4_2_hw_fini(ip_block);
}
-static int sdma_v4_4_2_resume(void *handle)
+static int sdma_v4_4_2_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v4_4_2_hw_init(adev);
+ return sdma_v4_4_2_hw_init(ip_block);
}
static bool sdma_v4_4_2_is_idle(void *handle)
@@ -1540,11 +1576,11 @@ static bool sdma_v4_4_2_is_idle(void *handle)
return true;
}
-static int sdma_v4_4_2_wait_for_idle(void *handle)
+static int sdma_v4_4_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i, j;
u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
for (j = 0; j < adev->sdma.num_instances; j++) {
@@ -1559,13 +1595,49 @@ static int sdma_v4_4_2_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int sdma_v4_4_2_soft_reset(void *handle)
+static int sdma_v4_4_2_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
return 0;
}
+static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i, r;
+ u32 inst_mask;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ /* stop queue */
+ inst_mask = 1 << ring->me;
+ sdma_v4_4_2_inst_gfx_stop(adev, inst_mask);
+ if (adev->sdma.has_page_queue)
+ sdma_v4_4_2_inst_page_stop(adev, inst_mask);
+
+ r = amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, ring->me));
+ if (r)
+ return r;
+
+ udelay(50);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!REG_GET_FIELD(RREG32_SDMA(ring->me, regSDMA_F32_CNTL), SDMA_F32_CNTL, HALT))
+ break;
+ udelay(1);
+ }
+
+ if (i == adev->usec_timeout) {
+ dev_err(adev->dev, "timed out waiting for SDMA%d unhalt after reset\n",
+ ring->me);
+ return -ETIMEDOUT;
+ }
+
+ return sdma_v4_4_2_inst_start(adev, inst_mask, true);
+}
+
static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -1748,6 +1820,16 @@ static int sdma_v4_4_2_process_srbm_write_irq(struct amdgpu_device *adev,
return 0;
}
+static int sdma_v4_4_2_process_ctxt_empty_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* There is nothing useful to be done here, only kept for debug */
+ dev_dbg_ratelimited(adev->dev, "SDMA context empty interrupt");
+ sdma_v4_4_2_print_iv_entry(adev, entry);
+ return 0;
+}
+
static void sdma_v4_4_2_inst_update_medium_grain_light_sleep(
struct amdgpu_device *adev, bool enable, uint32_t inst_mask)
{
@@ -1814,10 +1896,10 @@ static void sdma_v4_4_2_inst_update_medium_grain_clock_gating(
}
}
-static int sdma_v4_4_2_set_clockgating_state(void *handle,
+static int sdma_v4_4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t inst_mask;
if (amdgpu_sriov_vf(adev))
@@ -1832,7 +1914,7 @@ static int sdma_v4_4_2_set_clockgating_state(void *handle,
return 0;
}
-static int sdma_v4_4_2_set_powergating_state(void *handle,
+static int sdma_v4_4_2_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1857,9 +1939,9 @@ static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
-static void sdma_v4_4_2_print_ip_state(void *handle, struct drm_printer *p)
+static void sdma_v4_4_2_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
uint32_t instance_offset;
@@ -1878,9 +1960,9 @@ static void sdma_v4_4_2_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void sdma_v4_4_2_dump_ip_state(void *handle)
+static void sdma_v4_4_2_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t instance_offset;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
@@ -1888,7 +1970,6 @@ static void sdma_v4_4_2_dump_ip_state(void *handle)
if (!adev->sdma.ip_dump)
return;
- amdgpu_gfx_off_ctrl(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
instance_offset = i * reg_count;
for (j = 0; j < reg_count; j++)
@@ -1896,7 +1977,6 @@ static void sdma_v4_4_2_dump_ip_state(void *handle)
RREG32(sdma_v4_4_2_get_reg_offset(adev, i,
sdma_reg_list_4_4_2[j].reg_offset));
}
- amdgpu_gfx_off_ctrl(adev, true);
}
const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = {
@@ -1948,6 +2028,7 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
.emit_wreg = sdma_v4_4_2_ring_emit_wreg,
.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = sdma_v4_4_2_reset_queue,
};
static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
@@ -2031,6 +2112,10 @@ static const struct amdgpu_irq_src_funcs sdma_v4_4_2_srbm_write_irq_funcs = {
.process = sdma_v4_4_2_process_srbm_write_irq,
};
+static const struct amdgpu_irq_src_funcs sdma_v4_4_2_ctxt_empty_irq_funcs = {
+ .process = sdma_v4_4_2_process_ctxt_empty_irq,
+};
+
static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
{
adev->sdma.trap_irq.num_types = adev->sdma.num_instances;
@@ -2039,6 +2124,7 @@ static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances;
adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances;
adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances;
+ adev->sdma.ctxt_empty_irq.num_types = adev->sdma.num_instances;
adev->sdma.trap_irq.funcs = &sdma_v4_4_2_trap_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v4_4_2_illegal_inst_irq_funcs;
@@ -2047,6 +2133,7 @@ static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_4_2_doorbell_invalid_irq_funcs;
adev->sdma.pool_timeout_irq.funcs = &sdma_v4_4_2_pool_timeout_irq_funcs;
adev->sdma.srbm_write_irq.funcs = &sdma_v4_4_2_srbm_write_irq_funcs;
+ adev->sdma.ctxt_empty_irq.funcs = &sdma_v4_4_2_ctxt_empty_irq_funcs;
}
/**
@@ -2160,7 +2247,7 @@ static int sdma_v4_4_2_xcp_resume(void *handle, uint32_t inst_mask)
if (!amdgpu_sriov_vf(adev))
sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask);
- r = sdma_v4_4_2_inst_start(adev, inst_mask);
+ r = sdma_v4_4_2_inst_start(adev, inst_mask, false);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 3e48ea38385d..b764550834a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -705,14 +705,16 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
}
/**
- * sdma_v5_0_gfx_resume - setup and start the async dma engines
+ * sdma_v5_0_gfx_resume_instance - start/restart a certain sdma engine
*
* @adev: amdgpu_device pointer
+ * @i: instance
+ * @restore: used to restore wptr when restart
*
- * Set up the gfx DMA ring buffers and enable them (NAVI10).
- * Returns 0 for success, error for failure.
+ * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr.
+ * Return 0 for success.
*/
-static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
+static int sdma_v5_0_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore)
{
struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
@@ -722,142 +724,163 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
u32 temp;
u32 wptr_poll_cntl;
u64 wptr_gpu_addr;
- int i, r;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- ring = &adev->sdma.instance[i].ring;
+ ring = &adev->sdma.instance[i].ring;
- if (!amdgpu_sriov_vf(adev))
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
- /* Set ring buffer size in dwords */
- rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
- RPTR_WRITEBACK_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
+ RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ if (restore) {
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ } else {
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
-
- /* setup the wptr shadow polling */
- wptr_gpu_addr = ring->wptr_gpu_addr;
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
- lower_32_bits(wptr_gpu_addr));
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
- upper_32_bits(wptr_gpu_addr));
- wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
- mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
- wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
- SDMA0_GFX_RB_WPTR_POLL_CNTL,
- F32_POLL_ENABLE, 1);
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
- wptr_poll_cntl);
-
- /* set the wb address whether it's enabled or not */
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
- upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
- lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
-
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
-
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE),
- ring->gpu_addr >> 8);
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI),
- ring->gpu_addr >> 40);
-
+ }
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = ring->wptr_gpu_addr;
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
+ mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA0_GFX_RB_WPTR_POLL_CNTL,
+ F32_POLL_ENABLE, 1);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
+ wptr_poll_cntl);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
+ upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
+ lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE),
+ ring->gpu_addr >> 8);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI),
+ ring->gpu_addr >> 40);
+
+ if (!restore)
ring->wptr = 0;
- /* before programing wptr to a less value, need set minor_ptr_update first */
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
- if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR),
- lower_32_bits(ring->wptr << 2));
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI),
- upper_32_bits(ring->wptr << 2));
- }
+ if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR),
+ lower_32_bits(ring->wptr << 2));
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI),
+ upper_32_bits(ring->wptr << 2));
+ }
- doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
- doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
- mmSDMA0_GFX_DOORBELL_OFFSET));
+ doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
+ doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
+ mmSDMA0_GFX_DOORBELL_OFFSET));
- if (ring->use_doorbell) {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
- doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
- OFFSET, ring->doorbell_index);
- } else {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
- }
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET),
- doorbell_offset);
+ if (ring->use_doorbell) {
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
+ OFFSET, ring->doorbell_index);
+ } else {
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
+ }
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET),
+ doorbell_offset);
- adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
- ring->doorbell_index, 20);
+ adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ ring->doorbell_index, 20);
- if (amdgpu_sriov_vf(adev))
- sdma_v5_0_ring_set_wptr(ring);
+ if (amdgpu_sriov_vf(adev))
+ sdma_v5_0_ring_set_wptr(ring);
- /* set minor_ptr_update to 0 after wptr programed */
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
- if (!amdgpu_sriov_vf(adev)) {
- /* set utc l1 enable flag always to 1 */
- temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
-
- /* enable MCBP */
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
-
- /* Set up RESP_MODE to non-copy addresses */
- temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
-
- /* program default cache read and write policy */
- temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
- /* clean read policy and write policy bits */
- temp &= 0xFF0FFF;
- temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
- }
+ if (!amdgpu_sriov_vf(adev)) {
+ /* set utc l1 enable flag always to 1 */
+ temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
+
+ /* enable MCBP */
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
+
+ /* Set up RESP_MODE to non-copy addresses */
+ temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
+
+ /* program default cache read and write policy */
+ temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
+ /* clean read policy and write policy bits */
+ temp &= 0xFF0FFF;
+ temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
+ }
- if (!amdgpu_sriov_vf(adev)) {
- /* unhalt engine */
- temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
- }
+ if (!amdgpu_sriov_vf(adev)) {
+ /* unhalt engine */
+ temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
+ }
- /* enable DMA RB */
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
- ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
+ ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
- /* enable DMA IBs */
- WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ /* enable DMA IBs */
+ WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
- if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
- sdma_v5_0_ctx_switch_enable(adev, true);
- sdma_v5_0_enable(adev, true);
- }
+ if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
+ sdma_v5_0_ctx_switch_enable(adev, true);
+ sdma_v5_0_enable(adev, true);
+ }
+
+ return amdgpu_ring_test_helper(ring);
+}
+
+/**
+ * sdma_v5_0_gfx_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the gfx DMA ring buffers and enable them (NAVI10).
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
+{
+ int i, r;
- r = amdgpu_ring_test_helper(ring);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = sdma_v5_0_gfx_resume_instance(adev, i, false);
if (r)
return r;
}
@@ -1171,7 +1194,7 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
if (!ring->is_mes_queue)
@@ -1366,9 +1389,9 @@ static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
-static int sdma_v5_0_early_init(void *handle)
+static int sdma_v5_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = sdma_v5_0_init_microcode(adev);
@@ -1385,11 +1408,11 @@ static int sdma_v5_0_early_init(void *handle)
}
-static int sdma_v5_0_sw_init(void *handle)
+static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
uint32_t *ptr;
@@ -1429,6 +1452,19 @@ static int sdma_v5_0_sw_init(void *handle)
return r;
}
+ adev->sdma.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 5):
+ if (adev->sdma.instance[0].fw_version >= 35)
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ break;
+ default:
+ break;
+ }
+
/* Allocate memory for SDMA IP Dump buffer */
ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
if (ptr)
@@ -1436,17 +1472,22 @@ static int sdma_v5_0_sw_init(void *handle)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+ r = amdgpu_sdma_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return r;
}
-static int sdma_v5_0_sw_fini(void *handle)
+static int sdma_v5_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ amdgpu_sdma_sysfs_reset_mask_fini(adev);
amdgpu_sdma_destroy_inst_ctx(adev, false);
kfree(adev->sdma.ip_dump);
@@ -1454,10 +1495,10 @@ static int sdma_v5_0_sw_fini(void *handle)
return 0;
}
-static int sdma_v5_0_hw_init(void *handle)
+static int sdma_v5_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
sdma_v5_0_init_golden_registers(adev);
@@ -1466,9 +1507,9 @@ static int sdma_v5_0_hw_init(void *handle)
return r;
}
-static int sdma_v5_0_hw_fini(void *handle)
+static int sdma_v5_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1479,18 +1520,14 @@ static int sdma_v5_0_hw_fini(void *handle)
return 0;
}
-static int sdma_v5_0_suspend(void *handle)
+static int sdma_v5_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v5_0_hw_fini(adev);
+ return sdma_v5_0_hw_fini(ip_block);
}
-static int sdma_v5_0_resume(void *handle)
+static int sdma_v5_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v5_0_hw_init(adev);
+ return sdma_v5_0_hw_init(ip_block);
}
static bool sdma_v5_0_is_idle(void *handle)
@@ -1508,11 +1545,11 @@ static bool sdma_v5_0_is_idle(void *handle)
return true;
}
-static int sdma_v5_0_wait_for_idle(void *handle)
+static int sdma_v5_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 sdma0, sdma1;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
sdma0 = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
@@ -1525,13 +1562,100 @@ static int sdma_v5_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int sdma_v5_0_soft_reset(void *handle)
+static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
return 0;
}
+static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i, j, r;
+ u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (ring == &adev->sdma.instance[i].ring)
+ break;
+ }
+
+ if (i == adev->sdma.num_instances) {
+ DRM_ERROR("sdma instance not found\n");
+ return -EINVAL;
+ }
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
+ /* stop queue */
+ ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+
+ rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+
+ /* engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
+ freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 1);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+
+ for (j = 0; j < adev->usec_timeout; j++) {
+ freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ if (REG_GET_FIELD(freeze, SDMA0_FREEZE, FROZEN) & 1)
+ break;
+ udelay(1);
+ }
+
+ /* check sdma copy engine all idle if frozen not received*/
+ if (j == adev->usec_timeout) {
+ stat1_reg = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_STATUS1_REG));
+ if ((stat1_reg & 0x3FF) != 0x3FF) {
+ DRM_ERROR("cannot soft reset as sdma not idle\n");
+ r = -ETIMEDOUT;
+ goto err0;
+ }
+ }
+
+ f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
+
+ cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
+
+ /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */
+ preempt = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT));
+ preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt);
+
+ soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i;
+
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+
+ udelay(50);
+
+ soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+
+ /* unfreeze*/
+ freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+
+ r = sdma_v5_0_gfx_resume_instance(adev, i, true);
+
+err0:
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
+
static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
@@ -1729,10 +1853,10 @@ static void sdma_v5_0_update_medium_grain_light_sleep(struct amdgpu_device *adev
}
}
-static int sdma_v5_0_set_clockgating_state(void *handle,
+static int sdma_v5_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1753,7 +1877,7 @@ static int sdma_v5_0_set_clockgating_state(void *handle,
return 0;
}
-static int sdma_v5_0_set_powergating_state(void *handle,
+static int sdma_v5_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1778,9 +1902,9 @@ static void sdma_v5_0_get_clockgating_state(void *handle, u64 *flags)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
-static void sdma_v5_0_print_ip_state(void *handle, struct drm_printer *p)
+static void sdma_v5_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
uint32_t instance_offset;
@@ -1799,9 +1923,9 @@ static void sdma_v5_0_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void sdma_v5_0_dump_ip_state(void *handle)
+static void sdma_v5_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t instance_offset;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_0);
@@ -1823,7 +1947,6 @@ static void sdma_v5_0_dump_ip_state(void *handle)
static const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
.name = "sdma_v5_0",
.early_init = sdma_v5_0_early_init,
- .late_init = NULL,
.sw_init = sdma_v5_0_sw_init,
.sw_fini = sdma_v5_0_sw_fini,
.hw_init = sdma_v5_0_hw_init,
@@ -1874,6 +1997,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
.emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait,
.init_cond_exec = sdma_v5_0_ring_init_cond_exec,
.preempt_ib = sdma_v5_0_ring_preempt_ib,
+ .reset = sdma_v5_0_reset_queue,
};
static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index bc9b240a3488..b1818e87889a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -522,14 +522,17 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
}
/**
- * sdma_v5_2_gfx_resume - setup and start the async dma engines
+ * sdma_v5_2_gfx_resume_instance - start/restart a certain sdma engine
*
* @adev: amdgpu_device pointer
+ * @i: instance
+ * @restore: used to restore wptr when restart
*
- * Set up the gfx DMA ring buffers and enable them.
- * Returns 0 for success, error for failure.
+ * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr.
+ * Return 0 for success.
*/
-static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
+
+static int sdma_v5_2_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore)
{
struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
@@ -539,139 +542,161 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
u32 temp;
u32 wptr_poll_cntl;
u64 wptr_gpu_addr;
- int i, r;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- ring = &adev->sdma.instance[i].ring;
+ ring = &adev->sdma.instance[i].ring;
- if (!amdgpu_sriov_vf(adev))
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
- /* Set ring buffer size in dwords */
- rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
- RPTR_WRITEBACK_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
+ RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ if (restore) {
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ } else {
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
+ }
- /* setup the wptr shadow polling */
- wptr_gpu_addr = ring->wptr_gpu_addr;
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
- lower_32_bits(wptr_gpu_addr));
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
- upper_32_bits(wptr_gpu_addr));
- wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i,
- mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
- wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
- SDMA0_GFX_RB_WPTR_POLL_CNTL,
- F32_POLL_ENABLE, 1);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
- wptr_poll_cntl);
-
- /* set the wb address whether it's enabled or not */
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
- upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
- lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
-
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
-
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
-
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = ring->wptr_gpu_addr;
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i,
+ mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA0_GFX_RB_WPTR_POLL_CNTL,
+ F32_POLL_ENABLE, 1);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
+ wptr_poll_cntl);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
+ upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
+ lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
+
+ if (!restore)
ring->wptr = 0;
- /* before programing wptr to a less value, need set minor_ptr_update first */
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
- if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
- }
+ if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ }
- doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
- doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
+ doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
+ doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
- if (ring->use_doorbell) {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
- doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
- OFFSET, ring->doorbell_index);
- } else {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
- }
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
+ if (ring->use_doorbell) {
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
+ OFFSET, ring->doorbell_index);
+ } else {
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
+ }
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
- adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
- ring->doorbell_index,
- adev->doorbell_index.sdma_doorbell_range);
+ adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range);
- if (amdgpu_sriov_vf(adev))
- sdma_v5_2_ring_set_wptr(ring);
+ if (amdgpu_sriov_vf(adev))
+ sdma_v5_2_ring_set_wptr(ring);
- /* set minor_ptr_update to 0 after wptr programed */
+ /* set minor_ptr_update to 0 after wptr programed */
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
- /* SRIOV VF has no control of any of registers below */
- if (!amdgpu_sriov_vf(adev)) {
- /* set utc l1 enable flag always to 1 */
- temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
-
- /* enable MCBP */
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
-
- /* Set up RESP_MODE to non-copy addresses */
- temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
-
- /* program default cache read and write policy */
- temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
- /* clean read policy and write policy bits */
- temp &= 0xFF0FFF;
- temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
- (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
- SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
-
- /* unhalt engine */
- temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
- }
+ /* SRIOV VF has no control of any of registers below */
+ if (!amdgpu_sriov_vf(adev)) {
+ /* set utc l1 enable flag always to 1 */
+ temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
+
+ /* enable MCBP */
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
+
+ /* Set up RESP_MODE to non-copy addresses */
+ temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
+
+ /* program default cache read and write policy */
+ temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
+ /* clean read policy and write policy bits */
+ temp &= 0xFF0FFF;
+ temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
+ (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
+ SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
+
+ /* unhalt engine */
+ temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
+ }
- /* enable DMA RB */
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
- ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
+ ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
- /* enable DMA IBs */
- WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ /* enable DMA IBs */
+ WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
- if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
- sdma_v5_2_ctx_switch_enable(adev, true);
- sdma_v5_2_enable(adev, true);
- }
+ if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
+ sdma_v5_2_ctx_switch_enable(adev, true);
+ sdma_v5_2_enable(adev, true);
+ }
+
+ return amdgpu_ring_test_helper(ring);
+}
- r = amdgpu_ring_test_helper(ring);
+/**
+ * sdma_v5_2_gfx_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the gfx DMA ring buffers and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = sdma_v5_2_gfx_resume_instance(adev, i, false);
if (r)
return r;
}
@@ -736,9 +761,9 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
return 0;
}
-static int sdma_v5_2_soft_reset(void *handle)
+static int sdma_v5_2_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 grbm_soft_reset;
u32 tmp;
int i;
@@ -778,6 +803,7 @@ static int sdma_v5_2_soft_reset(void *handle)
static int sdma_v5_2_start(struct amdgpu_device *adev)
{
int r = 0;
+ struct amdgpu_ip_block *ip_block;
if (amdgpu_sriov_vf(adev)) {
sdma_v5_2_ctx_switch_enable(adev, false);
@@ -798,7 +824,11 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
msleep(1000);
}
- sdma_v5_2_soft_reset(adev);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SDMA);
+ if (!ip_block)
+ return -EINVAL;
+
+ sdma_v5_2_soft_reset(ip_block);
/* unhalt the MEs */
sdma_v5_2_enable(adev, true);
/* enable sdma ring preemption */
@@ -1020,7 +1050,7 @@ static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
if (!ring->is_mes_queue)
@@ -1180,7 +1210,28 @@ static void sdma_v5_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void sdma_v5_2_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
- amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
+ uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
+
+ /* Update the PD address for this VMID. */
+ amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
+ (hub->ctx_addr_distance * vmid),
+ lower_32_bits(pd_addr));
+ amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
+ (hub->ctx_addr_distance * vmid),
+ upper_32_bits(pd_addr));
+
+ /* Trigger invalidation. */
+ amdgpu_ring_write(ring,
+ SDMA_PKT_VM_INVALIDATION_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(SDMA_SUBOP_VM_INVALIDATION) |
+ SDMA_PKT_VM_INVALIDATION_HEADER_GFX_ENG_ID(ring->vm_inv_eng) |
+ SDMA_PKT_VM_INVALIDATION_HEADER_MM_ENG_ID(0x1f));
+ amdgpu_ring_write(ring, req);
+ amdgpu_ring_write(ring, 0xFFFFFFFF);
+ amdgpu_ring_write(ring,
+ SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(1 << vmid) |
+ SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(0x1F));
}
static void sdma_v5_2_ring_emit_wreg(struct amdgpu_ring *ring,
@@ -1216,9 +1267,9 @@ static void sdma_v5_2_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
-static int sdma_v5_2_early_init(void *handle)
+static int sdma_v5_2_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_sdma_init_microcode(adev, 0, true);
@@ -1268,11 +1319,11 @@ static unsigned sdma_v5_2_seq_to_trap_id(int seq_num)
return -EINVAL;
}
-static int sdma_v5_2_sw_init(void *handle)
+static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2);
uint32_t *ptr;
@@ -1306,6 +1357,24 @@ static int sdma_v5_2_sw_init(void *handle)
return r;
}
+ adev->sdma.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 2):
+ case IP_VERSION(5, 2, 3):
+ case IP_VERSION(5, 2, 4):
+ if (adev->sdma.instance[0].fw_version >= 76)
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ break;
+ case IP_VERSION(5, 2, 5):
+ if (adev->sdma.instance[0].fw_version >= 34)
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ break;
+ default:
+ break;
+ }
+
/* Allocate memory for SDMA IP Dump buffer */
ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
if (ptr)
@@ -1313,17 +1382,22 @@ static int sdma_v5_2_sw_init(void *handle)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+ r = amdgpu_sdma_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return r;
}
-static int sdma_v5_2_sw_fini(void *handle)
+static int sdma_v5_2_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ amdgpu_sdma_sysfs_reset_mask_fini(adev);
amdgpu_sdma_destroy_inst_ctx(adev, true);
kfree(adev->sdma.ip_dump);
@@ -1331,16 +1405,16 @@ static int sdma_v5_2_sw_fini(void *handle)
return 0;
}
-static int sdma_v5_2_hw_init(void *handle)
+static int sdma_v5_2_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return sdma_v5_2_start(adev);
}
-static int sdma_v5_2_hw_fini(void *handle)
+static int sdma_v5_2_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1351,18 +1425,14 @@ static int sdma_v5_2_hw_fini(void *handle)
return 0;
}
-static int sdma_v5_2_suspend(void *handle)
+static int sdma_v5_2_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v5_2_hw_fini(adev);
+ return sdma_v5_2_hw_fini(ip_block);
}
-static int sdma_v5_2_resume(void *handle)
+static int sdma_v5_2_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v5_2_hw_init(adev);
+ return sdma_v5_2_hw_init(ip_block);
}
static bool sdma_v5_2_is_idle(void *handle)
@@ -1380,11 +1450,11 @@ static bool sdma_v5_2_is_idle(void *handle)
return true;
}
-static int sdma_v5_2_wait_for_idle(void *handle)
+static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 sdma0, sdma1, sdma2, sdma3;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
sdma0 = RREG32(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
@@ -1399,6 +1469,96 @@ static int sdma_v5_2_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
+static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i, j, r;
+ u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (ring == &adev->sdma.instance[i].ring)
+ break;
+ }
+
+ if (i == adev->sdma.num_instances) {
+ DRM_ERROR("sdma instance not found\n");
+ return -EINVAL;
+ }
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
+ /* stop queue */
+ ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+
+ rb_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+
+ /*engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */
+ freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 1);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+
+ for (j = 0; j < adev->usec_timeout; j++) {
+ freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+
+ if (REG_GET_FIELD(freeze, SDMA0_FREEZE, FROZEN) & 1)
+ break;
+ udelay(1);
+ }
+
+
+ if (j == adev->usec_timeout) {
+ stat1_reg = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_STATUS1_REG));
+ if ((stat1_reg & 0x3FF) != 0x3FF) {
+ DRM_ERROR("cannot soft reset as sdma not idle\n");
+ r = -ETIMEDOUT;
+ goto err0;
+ }
+ }
+
+ f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
+
+ cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
+
+ /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */
+ preempt = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT));
+ preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt);
+
+ soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i;
+
+
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+
+ udelay(50);
+
+ soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i);
+
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
+
+ /* unfreeze and unhalt */
+ freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+
+ r = sdma_v5_2_gfx_resume_instance(adev, i, true);
+
+err0:
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
+
static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
@@ -1652,10 +1812,10 @@ static void sdma_v5_2_update_medium_grain_light_sleep(struct amdgpu_device *adev
}
}
-static int sdma_v5_2_set_clockgating_state(void *handle,
+static int sdma_v5_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1681,7 +1841,7 @@ static int sdma_v5_2_set_clockgating_state(void *handle,
return 0;
}
-static int sdma_v5_2_set_powergating_state(void *handle,
+static int sdma_v5_2_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1736,9 +1896,9 @@ static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring)
amdgpu_gfx_off_ctrl(adev, true);
}
-static void sdma_v5_2_print_ip_state(void *handle, struct drm_printer *p)
+static void sdma_v5_2_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2);
uint32_t instance_offset;
@@ -1757,9 +1917,9 @@ static void sdma_v5_2_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void sdma_v5_2_dump_ip_state(void *handle)
+static void sdma_v5_2_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t instance_offset;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_5_2);
@@ -1781,7 +1941,6 @@ static void sdma_v5_2_dump_ip_state(void *handle)
static const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
.name = "sdma_v5_2",
.early_init = sdma_v5_2_early_init,
- .late_init = NULL,
.sw_init = sdma_v5_2_sw_init,
.sw_fini = sdma_v5_2_sw_fini,
.hw_init = sdma_v5_2_hw_init,
@@ -1834,6 +1993,7 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
.emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait,
.init_cond_exec = sdma_v5_2_ring_init_cond_exec,
.preempt_ib = sdma_v5_2_ring_preempt_ib,
+ .reset = sdma_v5_2_reset_queue,
};
static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 208a1fa9d4e7..1a023b45f0be 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -469,14 +469,16 @@ static void sdma_v6_0_enable(struct amdgpu_device *adev, bool enable)
}
/**
- * sdma_v6_0_gfx_resume - setup and start the async dma engines
+ * sdma_v6_0_gfx_resume_instance - start/restart a certain sdma engine
*
* @adev: amdgpu_device pointer
+ * @i: instance
+ * @restore: used to restore wptr when restart
*
- * Set up the gfx DMA ring buffers and enable them.
- * Returns 0 for success, error for failure.
+ * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr.
+ * Return 0 for success.
*/
-static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
+static int sdma_v6_0_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore)
{
struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
@@ -485,132 +487,152 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
u32 doorbell_offset;
u32 temp;
u64 wptr_gpu_addr;
- int i, r;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- ring = &adev->sdma.instance[i].ring;
- if (!amdgpu_sriov_vf(adev))
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ ring = &adev->sdma.instance[i].ring;
+ if (!amdgpu_sriov_vf(adev))
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
- /* Set ring buffer size in dwords */
- rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL,
- RPTR_WRITEBACK_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL,
+ RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1);
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ if (restore) {
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ } else {
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), 0);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), 0);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), 0);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), 0);
+ }
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = ring->wptr_gpu_addr;
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO),
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI),
+ upper_32_bits(wptr_gpu_addr));
+
+ /* set the wb address whether it's enabled or not */
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI),
+ upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO),
+ lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1);
+
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
+
+ if (!restore)
+ ring->wptr = 0;
- /* setup the wptr shadow polling */
- wptr_gpu_addr = ring->wptr_gpu_addr;
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO),
- lower_32_bits(wptr_gpu_addr));
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI),
- upper_32_bits(wptr_gpu_addr));
-
- /* set the wb address whether it's enabled or not */
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI),
- upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO),
- lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
-
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1);
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
+ if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
+ }
- ring->wptr = 0;
+ doorbell = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL));
+ doorbell_offset = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET));
- /* before programing wptr to a less value, need set minor_ptr_update first */
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1);
+ if (ring->use_doorbell) {
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET,
+ OFFSET, ring->doorbell_index);
+ } else {
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0);
+ }
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell);
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
- if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
- }
+ if (i == 0)
+ adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances);
- doorbell = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL));
- doorbell_offset = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET));
+ if (amdgpu_sriov_vf(adev))
+ sdma_v6_0_ring_set_wptr(ring);
+
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
+
+ /* Set up sdma hang watchdog */
+ temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL));
+ /* 100ms per unit */
+ temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT,
+ max(adev->usec_timeout/100000, 1));
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp);
+
+ /* Set up RESP_MODE to non-copy addresses */
+ temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp);
+
+ /* program default cache read and write policy */
+ temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE));
+ /* clean read policy and write policy bits */
+ temp &= 0xFF0FFF;
+ temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
+ (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
+ SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp);
- if (ring->use_doorbell) {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
- doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET,
- OFFSET, ring->doorbell_index);
- } else {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0);
- }
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
-
- if (i == 0)
- adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
- ring->doorbell_index,
- adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances);
-
- if (amdgpu_sriov_vf(adev))
- sdma_v6_0_ring_set_wptr(ring);
-
- /* set minor_ptr_update to 0 after wptr programed */
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
-
- /* Set up sdma hang watchdog */
- temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL));
- /* 100ms per unit */
- temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT,
- max(adev->usec_timeout/100000, 1));
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp);
-
- /* Set up RESP_MODE to non-copy addresses */
- temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp);
-
- /* program default cache read and write policy */
- temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE));
- /* clean read policy and write policy bits */
- temp &= 0xFF0FFF;
- temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
- (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
- SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp);
-
- if (!amdgpu_sriov_vf(adev)) {
- /* unhalt engine */
- temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
- temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, TH1_RESET, 0);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), temp);
- }
+ if (!amdgpu_sriov_vf(adev)) {
+ /* unhalt engine */
+ temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
+ temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, TH1_RESET, 0);
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), temp);
+ }
- /* enable DMA RB */
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1);
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
- ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1);
+ ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
- /* enable DMA IBs */
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
+ /* enable DMA IBs */
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
+
+ if (amdgpu_sriov_vf(adev))
+ sdma_v6_0_enable(adev, true);
+
+ return amdgpu_ring_test_helper(ring);
+}
- if (amdgpu_sriov_vf(adev))
- sdma_v6_0_enable(adev, true);
+/**
+ * sdma_v6_0_gfx_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the gfx DMA ring buffers and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
+{
+ int i, r;
- r = amdgpu_ring_test_helper(ring);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = sdma_v6_0_gfx_resume_instance(adev, i, false);
if (r)
return r;
}
@@ -733,9 +755,9 @@ static int sdma_v6_0_load_microcode(struct amdgpu_device *adev)
return 0;
}
-static int sdma_v6_0_soft_reset(void *handle)
+static int sdma_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp;
int i;
@@ -769,9 +791,9 @@ static int sdma_v6_0_soft_reset(void *handle)
return sdma_v6_0_start(adev);
}
-static bool sdma_v6_0_check_soft_reset(void *handle)
+static bool sdma_v6_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r;
long tmo = msecs_to_jiffies(1000);
@@ -1041,7 +1063,7 @@ static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
if (!ring->is_mes_queue)
@@ -1272,9 +1294,9 @@ static void sdma_v6_0_set_ras_funcs(struct amdgpu_device *adev)
}
}
-static int sdma_v6_0_early_init(void *handle)
+static int sdma_v6_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_sdma_init_microcode(adev, 0, true);
@@ -1291,11 +1313,11 @@ static int sdma_v6_0_early_init(void *handle)
return 0;
}
-static int sdma_v6_0_sw_init(void *handle)
+static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0);
uint32_t *ptr;
@@ -1328,6 +1350,19 @@ static int sdma_v6_0_sw_init(void *handle)
return r;
}
+ adev->sdma.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
+ if (adev->sdma.instance[0].fw_version >= 21)
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ break;
+ default:
+ break;
+ }
+
if (amdgpu_sdma_ras_sw_init(adev)) {
dev_err(adev->dev, "Failed to initialize sdma ras block!\n");
return -EINVAL;
@@ -1340,17 +1375,22 @@ static int sdma_v6_0_sw_init(void *handle)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
+ r = amdgpu_sdma_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return r;
}
-static int sdma_v6_0_sw_fini(void *handle)
+static int sdma_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ amdgpu_sdma_sysfs_reset_mask_fini(adev);
amdgpu_sdma_destroy_inst_ctx(adev, true);
kfree(adev->sdma.ip_dump);
@@ -1358,16 +1398,16 @@ static int sdma_v6_0_sw_fini(void *handle)
return 0;
}
-static int sdma_v6_0_hw_init(void *handle)
+static int sdma_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return sdma_v6_0_start(adev);
}
-static int sdma_v6_0_hw_fini(void *handle)
+static int sdma_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1378,18 +1418,14 @@ static int sdma_v6_0_hw_fini(void *handle)
return 0;
}
-static int sdma_v6_0_suspend(void *handle)
+static int sdma_v6_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v6_0_hw_fini(adev);
+ return sdma_v6_0_hw_fini(ip_block);
}
-static int sdma_v6_0_resume(void *handle)
+static int sdma_v6_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v6_0_hw_init(adev);
+ return sdma_v6_0_hw_init(ip_block);
}
static bool sdma_v6_0_is_idle(void *handle)
@@ -1407,11 +1443,11 @@ static bool sdma_v6_0_is_idle(void *handle)
return true;
}
-static int sdma_v6_0_wait_for_idle(void *handle)
+static int sdma_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 sdma0, sdma1;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
sdma0 = RREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG));
@@ -1469,6 +1505,31 @@ static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring)
return r;
}
+static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i, r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (ring == &adev->sdma.instance[i].ring)
+ break;
+ }
+
+ if (i == adev->sdma.num_instances) {
+ DRM_ERROR("sdma instance not found\n");
+ return -EINVAL;
+ }
+
+ r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
+ if (r)
+ return r;
+
+ return sdma_v6_0_gfx_resume_instance(adev, i, true);
+}
+
static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -1540,13 +1601,13 @@ static int sdma_v6_0_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int sdma_v6_0_set_clockgating_state(void *handle,
+static int sdma_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int sdma_v6_0_set_powergating_state(void *handle,
+static int sdma_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1556,9 +1617,9 @@ static void sdma_v6_0_get_clockgating_state(void *handle, u64 *flags)
{
}
-static void sdma_v6_0_print_ip_state(void *handle, struct drm_printer *p)
+static void sdma_v6_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0);
uint32_t instance_offset;
@@ -1577,9 +1638,9 @@ static void sdma_v6_0_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void sdma_v6_0_dump_ip_state(void *handle)
+static void sdma_v6_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t instance_offset;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_6_0);
@@ -1601,7 +1662,6 @@ static void sdma_v6_0_dump_ip_state(void *handle)
const struct amd_ip_funcs sdma_v6_0_ip_funcs = {
.name = "sdma_v6_0",
.early_init = sdma_v6_0_early_init,
- .late_init = NULL,
.sw_init = sdma_v6_0_sw_init,
.sw_fini = sdma_v6_0_sw_fini,
.hw_init = sdma_v6_0_hw_init,
@@ -1652,6 +1712,7 @@ static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = {
.emit_reg_write_reg_wait = sdma_v6_0_ring_emit_reg_write_reg_wait,
.init_cond_exec = sdma_v6_0_ring_init_cond_exec,
.preempt_ib = sdma_v6_0_ring_preempt_ib,
+ .reset = sdma_v6_0_reset_queue,
};
static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1726,7 +1787,7 @@ static void sdma_v6_0_emit_fill_buffer(struct amdgpu_ib *ib,
uint64_t dst_offset,
uint32_t byte_count)
{
- ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL);
+ ib->ptr[ib->length_dw++] = SDMA_PKT_CONSTANT_FILL_HEADER_OP(SDMA_OP_CONST_FILL);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = src_data;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index a8763496aed3..7e10e94624e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -51,6 +51,12 @@ MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin");
#define SDMA0_HYP_DEC_REG_END 0x589a
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
+/*define for compression field for sdma7*/
+#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_offset 0
+#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_mask 0x00000001
+#define SDMA_PKT_CONSTANT_FILL_HEADER_compress_shift 16
+#define SDMA_PKT_CONSTANT_FILL_HEADER_COMPRESS(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_compress_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_compress_shift)
+
static const struct amdgpu_hwip_reg_entry sdma_reg_list_7_0[] = {
SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS_REG),
SOC15_REG_ENTRY_STR(GC, 0, regSDMA0_STATUS1_REG),
@@ -484,162 +490,185 @@ static void sdma_v7_0_enable(struct amdgpu_device *adev, bool enable)
}
/**
- * sdma_v7_0_gfx_resume - setup and start the async dma engines
+ * sdma_v7_0_gfx_resume_instance - start/restart a certain sdma engine
*
* @adev: amdgpu_device pointer
+ * @i: instance
+ * @restore: used to restore wptr when restart
*
- * Set up the gfx DMA ring buffers and enable them.
- * Returns 0 for success, error for failure.
+ * Set up the gfx DMA ring buffers and enable them. On restart, we will restore wptr and rptr.
+ * Return 0 for success.
*/
-static int sdma_v7_0_gfx_resume(struct amdgpu_device *adev)
+static int sdma_v7_0_gfx_resume_instance(struct amdgpu_device *adev, int i, bool restore)
{
struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
u32 rb_bufsz;
u32 doorbell;
u32 doorbell_offset;
- u32 tmp;
+ u32 temp;
u64 wptr_gpu_addr;
- int i, r;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- ring = &adev->sdma.instance[i].ring;
+ int r;
- //if (!amdgpu_sriov_vf(adev))
- // WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ ring = &adev->sdma.instance[i].ring;
- /* Set ring buffer size in dwords */
- rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL,
- RPTR_WRITEBACK_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL,
+ RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ if (restore) {
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), upper_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr << 2));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ } else {
WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), 0);
WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), 0);
WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), 0);
WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), 0);
+ }
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = ring->wptr_gpu_addr;
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO),
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI),
+ upper_32_bits(wptr_gpu_addr));
+
+ /* set the wb address whether it's enabled or not */
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI),
+ upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO),
+ lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+ if (amdgpu_sriov_vf(adev))
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 1);
+ else
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
- /* setup the wptr shadow polling */
- wptr_gpu_addr = ring->wptr_gpu_addr;
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO),
- lower_32_bits(wptr_gpu_addr));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI),
- upper_32_bits(wptr_gpu_addr));
-
- /* set the wb address whether it's enabled or not */
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI),
- upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO),
- lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
-
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
- if (amdgpu_sriov_vf(adev))
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 1);
- else
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, MCU_WPTR_POLL_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, MCU_WPTR_POLL_ENABLE, 1);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
+ if (!restore)
ring->wptr = 0;
- /* before programing wptr to a less value, need set minor_ptr_update first */
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1);
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1);
- if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
- }
+ if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
+ }
- doorbell = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL));
- doorbell_offset = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET));
+ doorbell = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL));
+ doorbell_offset = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET));
- if (ring->use_doorbell) {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
- doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET,
- OFFSET, ring->doorbell_index);
- } else {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0);
- }
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
-
- if (i == 0)
- adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
- ring->doorbell_index,
- adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances);
-
- if (amdgpu_sriov_vf(adev))
- sdma_v7_0_ring_set_wptr(ring);
-
- /* set minor_ptr_update to 0 after wptr programed */
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
-
- /* Set up sdma hang watchdog */
- tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL));
- /* 100ms per unit */
- tmp = REG_SET_FIELD(tmp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT,
- max(adev->usec_timeout/100000, 1));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), tmp);
-
- /* Set up RESP_MODE to non-copy addresses */
- tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
- tmp = REG_SET_FIELD(tmp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
- tmp = REG_SET_FIELD(tmp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), tmp);
-
- /* program default cache read and write policy */
- tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE));
- /* clean read policy and write policy bits */
- tmp &= 0xFF0FFF;
- tmp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
- (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), tmp);
-
- if (!amdgpu_sriov_vf(adev)) {
- /* unhalt engine */
- tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_MCU_CNTL));
- tmp = REG_SET_FIELD(tmp, SDMA0_MCU_CNTL, HALT, 0);
- tmp = REG_SET_FIELD(tmp, SDMA0_MCU_CNTL, RESET, 0);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_MCU_CNTL), tmp);
- }
+ if (ring->use_doorbell) {
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET,
+ OFFSET, ring->doorbell_index);
+ } else {
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0);
+ }
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
- /* enable DMA RB */
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1);
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
+ if (i == 0)
+ adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances);
- ib_cntl = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1);
+ if (amdgpu_sriov_vf(adev))
+ sdma_v7_0_ring_set_wptr(ring);
+
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
+
+ /* Set up sdma hang watchdog */
+ temp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL));
+ /* 100ms per unit */
+ temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT,
+ max(adev->usec_timeout/100000, 1));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp);
+
+ /* Set up RESP_MODE to non-copy addresses */
+ temp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp);
+
+ /* program default cache read and write policy */
+ temp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE));
+ /* clean read policy and write policy bits */
+ temp &= 0xFF0FFF;
+ temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
+ (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ /* unhalt engine */
+ temp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_MCU_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_MCU_CNTL, HALT, 0);
+ temp = REG_SET_FIELD(temp, SDMA0_MCU_CNTL, RESET, 0);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_MCU_CNTL), temp);
+ }
+
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
+
+ ib_cntl = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
- /* enable DMA IBs */
- WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
+ /* enable DMA IBs */
+ WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
+ ring->sched.ready = true;
- ring->sched.ready = true;
+ if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
+ sdma_v7_0_ctx_switch_enable(adev, true);
+ sdma_v7_0_enable(adev, true);
+ }
- if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
- sdma_v7_0_ctx_switch_enable(adev, true);
- sdma_v7_0_enable(adev, true);
- }
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ ring->sched.ready = false;
- r = amdgpu_ring_test_helper(ring);
- if (r) {
- ring->sched.ready = false;
- return r;
- }
+ return r;
+}
+/**
+ * sdma_v7_0_gfx_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set up the gfx DMA ring buffers and enable them.
+ * Returns 0 for success, error for failure.
+ */
+static int sdma_v7_0_gfx_resume(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = sdma_v7_0_gfx_resume_instance(adev, i, false);
+ if (r)
+ return r;
}
return 0;
+
}
/**
@@ -747,9 +776,9 @@ static int sdma_v7_0_load_microcode(struct amdgpu_device *adev)
return 0;
}
-static int sdma_v7_0_soft_reset(void *handle)
+static int sdma_v7_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp;
int i;
@@ -783,9 +812,9 @@ static int sdma_v7_0_soft_reset(void *handle)
return sdma_v7_0_start(adev);
}
-static bool sdma_v7_0_check_soft_reset(void *handle)
+static bool sdma_v7_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r;
long tmo = msecs_to_jiffies(1000);
@@ -800,6 +829,31 @@ static bool sdma_v7_0_check_soft_reset(void *handle)
return false;
}
+static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i, r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (ring == &adev->sdma.instance[i].ring)
+ break;
+ }
+
+ if (i == adev->sdma.num_instances) {
+ DRM_ERROR("sdma instance not found\n");
+ return -EINVAL;
+ }
+
+ r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true);
+ if (r)
+ return r;
+
+ return sdma_v7_0_gfx_resume_instance(adev, i, true);
+}
+
/**
* sdma_v7_0_start - setup and start the async dma engines
*
@@ -1054,7 +1108,7 @@ static int sdma_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
if (!ring->is_mes_queue)
@@ -1253,9 +1307,9 @@ static void sdma_v7_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
-static int sdma_v7_0_early_init(void *handle)
+static int sdma_v7_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_sdma_init_microcode(adev, 0, true);
@@ -1273,11 +1327,11 @@ static int sdma_v7_0_early_init(void *handle)
return 0;
}
-static int sdma_v7_0_sw_init(void *handle)
+static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0);
uint32_t *ptr;
@@ -1310,6 +1364,13 @@ static int sdma_v7_0_sw_init(void *handle)
return r;
}
+ adev->sdma.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
+ adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
+ r = amdgpu_sdma_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
/* Allocate memory for SDMA IP Dump buffer */
ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
if (ptr)
@@ -1320,14 +1381,15 @@ static int sdma_v7_0_sw_init(void *handle)
return r;
}
-static int sdma_v7_0_sw_fini(void *handle)
+static int sdma_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ amdgpu_sdma_sysfs_reset_mask_fini(adev);
amdgpu_sdma_destroy_inst_ctx(adev, true);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)
@@ -1338,16 +1400,16 @@ static int sdma_v7_0_sw_fini(void *handle)
return 0;
}
-static int sdma_v7_0_hw_init(void *handle)
+static int sdma_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return sdma_v7_0_start(adev);
}
-static int sdma_v7_0_hw_fini(void *handle)
+static int sdma_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1358,18 +1420,14 @@ static int sdma_v7_0_hw_fini(void *handle)
return 0;
}
-static int sdma_v7_0_suspend(void *handle)
+static int sdma_v7_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v7_0_hw_fini(adev);
+ return sdma_v7_0_hw_fini(ip_block);
}
-static int sdma_v7_0_resume(void *handle)
+static int sdma_v7_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return sdma_v7_0_hw_init(adev);
+ return sdma_v7_0_hw_init(ip_block);
}
static bool sdma_v7_0_is_idle(void *handle)
@@ -1387,11 +1445,11 @@ static bool sdma_v7_0_is_idle(void *handle)
return true;
}
-static int sdma_v7_0_wait_for_idle(void *handle)
+static int sdma_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 sdma0, sdma1;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
sdma0 = RREG32(sdma_v7_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG));
@@ -1522,13 +1580,13 @@ static int sdma_v7_0_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int sdma_v7_0_set_clockgating_state(void *handle,
+static int sdma_v7_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int sdma_v7_0_set_powergating_state(void *handle,
+static int sdma_v7_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1538,9 +1596,9 @@ static void sdma_v7_0_get_clockgating_state(void *handle, u64 *flags)
{
}
-static void sdma_v7_0_print_ip_state(void *handle, struct drm_printer *p)
+static void sdma_v7_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0);
uint32_t instance_offset;
@@ -1559,9 +1617,9 @@ static void sdma_v7_0_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void sdma_v7_0_dump_ip_state(void *handle)
+static void sdma_v7_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t instance_offset;
uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_7_0);
@@ -1634,6 +1692,7 @@ static const struct amdgpu_ring_funcs sdma_v7_0_ring_funcs = {
.emit_reg_write_reg_wait = sdma_v7_0_ring_emit_reg_write_reg_wait,
.init_cond_exec = sdma_v7_0_ring_init_cond_exec,
.preempt_ib = sdma_v7_0_ring_preempt_ib,
+ .reset = sdma_v7_0_reset_queue,
};
static void sdma_v7_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1682,11 +1741,12 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint32_t byte_count,
uint32_t copy_flags)
{
- uint32_t num_type, data_format, max_com;
+ uint32_t num_type, data_format, max_com, write_cm;
max_com = AMDGPU_COPY_FLAGS_GET(copy_flags, MAX_COMPRESSED);
data_format = AMDGPU_COPY_FLAGS_GET(copy_flags, DATA_FORMAT);
num_type = AMDGPU_COPY_FLAGS_GET(copy_flags, NUMBER_TYPE);
+ write_cm = AMDGPU_COPY_FLAGS_GET(copy_flags, WRITE_COMPRESS_DISABLE) ? 2 : 1;
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
@@ -1703,7 +1763,7 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
if ((copy_flags & (AMDGPU_COPY_FLAGS_READ_DECOMPRESSED | AMDGPU_COPY_FLAGS_WRITE_COMPRESSED)))
ib->ptr[ib->length_dw++] = SDMA_DCC_DATA_FORMAT(data_format) | SDMA_DCC_NUM_TYPE(num_type) |
((copy_flags & AMDGPU_COPY_FLAGS_READ_DECOMPRESSED) ? SDMA_DCC_READ_CM(2) : 0) |
- ((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(1) : 0) |
+ ((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(write_cm) : 0) |
SDMA_DCC_MAX_COM(max_com) | SDMA_DCC_MAX_UCOM(1);
else
ib->ptr[ib->length_dw++] = 0;
@@ -1724,7 +1784,8 @@ static void sdma_v7_0_emit_fill_buffer(struct amdgpu_ib *ib,
uint64_t dst_offset,
uint32_t byte_count)
{
- ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL);
+ ib->ptr[ib->length_dw++] = SDMA_PKT_CONSTANT_FILL_HEADER_OP(SDMA_OP_CONST_FILL) |
+ SDMA_PKT_CONSTANT_FILL_HEADER_COMPRESS(1);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = src_data;
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 85235470e872..77ef7da2e4fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -2022,9 +2022,9 @@ static uint32_t si_get_rev_id(struct amdgpu_device *adev)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
}
-static int si_common_early_init(void *handle)
+static int si_common_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->smc_rreg = &si_smc_rreg;
adev->smc_wreg = &si_smc_wreg;
@@ -2148,17 +2148,6 @@ static int si_common_early_init(void *handle)
return 0;
}
-static int si_common_sw_init(void *handle)
-{
- return 0;
-}
-
-static int si_common_sw_fini(void *handle)
-{
- return 0;
-}
-
-
static void si_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
@@ -2633,9 +2622,9 @@ static void si_fix_pci_max_read_req_size(struct amdgpu_device *adev)
pcie_set_readrq(adev->pdev, 512);
}
-static int si_common_hw_init(void *handle)
+static int si_common_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
si_fix_pci_max_read_req_size(adev);
si_init_golden_registers(adev);
@@ -2645,23 +2634,14 @@ static int si_common_hw_init(void *handle)
return 0;
}
-static int si_common_hw_fini(void *handle)
+static int si_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
return 0;
}
-static int si_common_suspend(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return si_common_hw_fini(adev);
-}
-
-static int si_common_resume(void *handle)
+static int si_common_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return si_common_hw_init(adev);
+ return si_common_hw_init(ip_block);
}
static bool si_common_is_idle(void *handle)
@@ -2669,23 +2649,13 @@ static bool si_common_is_idle(void *handle)
return true;
}
-static int si_common_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int si_common_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int si_common_set_clockgating_state(void *handle,
+static int si_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int si_common_set_powergating_state(void *handle,
+static int si_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -2694,20 +2664,12 @@ static int si_common_set_powergating_state(void *handle,
static const struct amd_ip_funcs si_common_ip_funcs = {
.name = "si_common",
.early_init = si_common_early_init,
- .late_init = NULL,
- .sw_init = si_common_sw_init,
- .sw_fini = si_common_sw_fini,
.hw_init = si_common_hw_init,
.hw_fini = si_common_hw_fini,
- .suspend = si_common_suspend,
.resume = si_common_resume,
.is_idle = si_common_is_idle,
- .wait_for_idle = si_common_wait_for_idle,
- .soft_reset = si_common_soft_reset,
.set_clockgating_state = si_common_set_clockgating_state,
.set_powergating_state = si_common_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ip_block_version si_common_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 11db5b755832..dbd78d5345a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -286,7 +286,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = -EINVAL;
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
@@ -457,9 +457,9 @@ static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, val);
}
-static int si_dma_early_init(void *handle)
+static int si_dma_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->sdma.num_instances = 2;
@@ -471,11 +471,11 @@ static int si_dma_early_init(void *handle)
return 0;
}
-static int si_dma_sw_init(void *handle)
+static int si_dma_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* DMA0 trap event */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
@@ -506,9 +506,9 @@ static int si_dma_sw_init(void *handle)
return r;
}
-static int si_dma_sw_fini(void *handle)
+static int si_dma_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
@@ -517,39 +517,34 @@ static int si_dma_sw_fini(void *handle)
return 0;
}
-static int si_dma_hw_init(void *handle)
+static int si_dma_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return si_dma_start(adev);
}
-static int si_dma_hw_fini(void *handle)
+static int si_dma_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- si_dma_stop(adev);
+ si_dma_stop(ip_block->adev);
return 0;
}
-static int si_dma_suspend(void *handle)
+static int si_dma_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return si_dma_hw_fini(adev);
+ return si_dma_hw_fini(ip_block);
}
-static int si_dma_resume(void *handle)
+static int si_dma_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return si_dma_hw_init(adev);
+ return si_dma_hw_init(ip_block);
}
static bool si_dma_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
u32 tmp = RREG32(SRBM_STATUS2);
if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
@@ -558,20 +553,20 @@ static bool si_dma_is_idle(void *handle)
return true;
}
-static int si_dma_wait_for_idle(void *handle)
+static int si_dma_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (si_dma_is_idle(handle))
+ if (si_dma_is_idle(adev))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
-static int si_dma_soft_reset(void *handle)
+static int si_dma_soft_reset(struct amdgpu_ip_block *ip_block)
{
DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n");
return 0;
@@ -634,13 +629,13 @@ static int si_dma_process_trap_irq(struct amdgpu_device *adev,
return 0;
}
-static int si_dma_set_clockgating_state(void *handle,
+static int si_dma_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
u32 orig, data, offset;
int i;
bool enable;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
enable = (state == AMD_CG_STATE_GATE);
@@ -677,12 +672,12 @@ static int si_dma_set_clockgating_state(void *handle,
return 0;
}
-static int si_dma_set_powergating_state(void *handle,
+static int si_dma_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
WREG32(DMA_PGFSM_WRITE, 0x00002000);
WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
@@ -696,7 +691,6 @@ static int si_dma_set_powergating_state(void *handle,
static const struct amd_ip_funcs si_dma_ip_funcs = {
.name = "si_dma",
.early_init = si_dma_early_init,
- .late_init = NULL,
.sw_init = si_dma_sw_init,
.sw_fini = si_dma_sw_fini,
.hw_init = si_dma_hw_init,
@@ -708,8 +702,6 @@ static const struct amd_ip_funcs si_dma_ip_funcs = {
.soft_reset = si_dma_soft_reset,
.set_clockgating_state = si_dma_set_clockgating_state,
.set_powergating_state = si_dma_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 5237395e4fab..a32b6243c1f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -156,19 +156,19 @@ static void si_ih_set_rptr(struct amdgpu_device *adev,
WREG32(IH_RB_RPTR, ih->rptr);
}
-static int si_ih_early_init(void *handle)
+static int si_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
si_ih_set_interrupt_funcs(adev);
return 0;
}
-static int si_ih_sw_init(void *handle)
+static int si_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
@@ -177,43 +177,37 @@ static int si_ih_sw_init(void *handle)
return amdgpu_irq_init(adev);
}
-static int si_ih_sw_fini(void *handle)
+static int si_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
return 0;
}
-static int si_ih_hw_init(void *handle)
+static int si_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return si_ih_irq_init(adev);
}
-static int si_ih_hw_fini(void *handle)
+static int si_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- si_ih_irq_disable(adev);
+ si_ih_irq_disable(ip_block->adev);
return 0;
}
-static int si_ih_suspend(void *handle)
+static int si_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return si_ih_hw_fini(adev);
+ return si_ih_hw_fini(ip_block);
}
-static int si_ih_resume(void *handle)
+static int si_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return si_ih_hw_init(adev);
+ return si_ih_hw_init(ip_block);
}
static bool si_ih_is_idle(void *handle)
@@ -227,22 +221,22 @@ static bool si_ih_is_idle(void *handle)
return true;
}
-static int si_ih_wait_for_idle(void *handle)
+static int si_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (si_ih_is_idle(handle))
+ if (si_ih_is_idle(adev))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
-static int si_ih_soft_reset(void *handle)
+static int si_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(SRBM_STATUS);
@@ -269,13 +263,13 @@ static int si_ih_soft_reset(void *handle)
return 0;
}
-static int si_ih_set_clockgating_state(void *handle,
+static int si_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int si_ih_set_powergating_state(void *handle,
+static int si_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -284,7 +278,6 @@ static int si_ih_set_powergating_state(void *handle,
static const struct amd_ip_funcs si_ih_ip_funcs = {
.name = "si_ih",
.early_init = si_ih_early_init,
- .late_init = NULL,
.sw_init = si_ih_sw_init,
.sw_fini = si_ih_sw_fini,
.hw_init = si_ih_hw_init,
@@ -296,8 +289,6 @@ static const struct amd_ip_funcs si_ih_ip_funcs = {
.soft_reset = si_ih_soft_reset,
.set_clockgating_state = si_ih_set_clockgating_state,
.set_powergating_state = si_ih_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs si_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
index 481217c32d85..2594467bdd87 100644
--- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
+++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
@@ -81,15 +81,9 @@ static int sienna_cichlid_mode2_suspend_ip(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_SDMA))
continue;
- r = adev->ip_blocks[i].version->funcs->suspend(adev);
-
- if (r) {
- dev_err(adev->dev,
- "suspend of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
- adev->ip_blocks[i].status.hw = false;
}
return 0;
@@ -175,15 +169,9 @@ static int sienna_cichlid_mode2_restore_ip(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) {
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- dev_err(adev->dev,
- "resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
-
- adev->ip_blocks[i].status.hw = true;
}
}
@@ -193,15 +181,9 @@ static int sienna_cichlid_mode2_restore_ip(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type ==
AMD_IP_BLOCK_TYPE_SDMA))
continue;
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- dev_err(adev->dev,
- "resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
-
- adev->ip_blocks[i].status.hw = true;
}
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -213,7 +195,7 @@ static int sienna_cichlid_mode2_restore_ip(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->funcs->late_init) {
r = adev->ip_blocks[i].version->funcs->late_init(
- (void *)adev);
+ &adev->ip_blocks[i]);
if (r) {
dev_err(adev->dev,
"late_init of IP block <%s> failed %d after reset\n",
@@ -238,6 +220,7 @@ sienna_cichlid_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
int r;
struct amdgpu_device *tmp_adev = (struct amdgpu_device *)reset_ctl->handle;
+ amdgpu_set_init_level(tmp_adev, AMDGPU_INIT_LEVEL_RESET_RECOVERY);
dev_info(tmp_adev->dev,
"GPU reset succeeded, trying to resume\n");
r = sienna_cichlid_mode2_restore_ip(tmp_adev);
@@ -255,6 +238,7 @@ sienna_cichlid_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+ amdgpu_set_init_level(tmp_adev, AMDGPU_INIT_LEVEL_DEFAULT);
r = amdgpu_ib_ring_tests(tmp_adev);
if (r) {
dev_err(tmp_adev->dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c
index 0af648931df5..70569ea906bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c
@@ -80,15 +80,9 @@ static int smu_v13_0_10_mode2_suspend_ip(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_MES))
continue;
- r = adev->ip_blocks[i].version->funcs->suspend(adev);
-
- if (r) {
- dev_err(adev->dev,
- "suspend of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
- adev->ip_blocks[i].status.hw = false;
}
return 0;
@@ -186,15 +180,9 @@ static int smu_v13_0_10_mode2_restore_ip(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type ==
AMD_IP_BLOCK_TYPE_SDMA))
continue;
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- dev_err(adev->dev,
- "resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
-
- adev->ip_blocks[i].status.hw = true;
}
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -208,7 +196,7 @@ static int smu_v13_0_10_mode2_restore_ip(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->funcs->late_init) {
r = adev->ip_blocks[i].version->funcs->late_init(
- (void *)adev);
+ &adev->ip_blocks[i]);
if (r) {
dev_err(adev->dev,
"late_init of IP block <%s> failed %d after reset\n",
@@ -233,6 +221,7 @@ smu_v13_0_10_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
int r;
struct amdgpu_device *tmp_adev = (struct amdgpu_device *)reset_ctl->handle;
+ amdgpu_set_init_level(tmp_adev, AMDGPU_INIT_LEVEL_RESET_RECOVERY);
dev_info(tmp_adev->dev,
"GPU reset succeeded, trying to resume\n");
r = smu_v13_0_10_mode2_restore_ip(tmp_adev);
@@ -246,6 +235,7 @@ smu_v13_0_10_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+ amdgpu_set_init_level(tmp_adev, AMDGPU_INIT_LEVEL_DEFAULT);
r = amdgpu_ib_ring_tests(tmp_adev);
if (r) {
dev_err(tmp_adev->dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8d16dacdc172..a59b4c36cad7 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -90,8 +90,8 @@ static const struct amd_ip_funcs soc15_common_ip_funcs;
/* Vega, Raven, Arcturus */
static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs vega_video_codecs_encode =
@@ -171,6 +171,24 @@ static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_encode = {
.codec_array = NULL,
};
+static const struct amdgpu_video_codecs vcn_5_0_1_video_codecs_encode_vcn0 = {
+ .codec_count = 0,
+ .codec_array = NULL,
+};
+
+static const struct amdgpu_video_codec_info vcn_5_0_1_video_codecs_decode_array_vcn0[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+};
+
+static const struct amdgpu_video_codecs vcn_5_0_1_video_codecs_decode_vcn0 = {
+ .codec_count = ARRAY_SIZE(vcn_5_0_1_video_codecs_decode_array_vcn0),
+ .codec_array = vcn_5_0_1_video_codecs_decode_array_vcn0,
+};
+
static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
@@ -209,6 +227,12 @@ static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
else
*codecs = &vcn_4_0_3_video_codecs_decode;
return 0;
+ case IP_VERSION(5, 0, 1):
+ if (encode)
+ *codecs = &vcn_5_0_1_video_codecs_encode_vcn0;
+ else
+ *codecs = &vcn_5_0_1_video_codecs_decode_vcn0;
+ return 0;
default:
return -EINVAL;
}
@@ -327,6 +351,7 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 0) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 1) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) ||
amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14))
return 10000;
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(10, 0, 0) ||
@@ -556,6 +581,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
break;
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
/* Use gpu_recovery param to target a reset method.
* Enable triggering of GPU reset only if specified
* by module parameter.
@@ -578,20 +604,16 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
{
- u32 sol_reg;
-
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
-
/* Will reset for the following suspend abort cases.
- * 1) Only reset limit on APU side, dGPU hasn't checked yet.
- * 2) S3 suspend abort and TOS already launched.
+ * 1) Only reset on APU side, dGPU hasn't checked yet.
+ * 2) S3 suspend aborted in the normal S3 suspend or
+ * performing pm core test.
*/
if (adev->flags & AMD_IS_APU && adev->in_s3 &&
- !adev->suspend_complete &&
- sol_reg)
+ !pm_resume_via_firmware())
return true;
-
- return false;
+ else
+ return false;
}
static int soc15_asic_reset(struct amdgpu_device *adev)
@@ -601,11 +623,17 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
* successfully. So now, temporarily enable it for the
* S3 suspend abort case.
*/
- if (((adev->apu_flags & AMD_APU_IS_RAVEN) ||
- (adev->apu_flags & AMD_APU_IS_RAVEN2)) &&
- !soc15_need_reset_on_resume(adev))
+
+ if ((adev->apu_flags & AMD_APU_IS_PICASSO ||
+ !(adev->apu_flags & AMD_APU_IS_RAVEN)) &&
+ soc15_need_reset_on_resume(adev))
+ goto asic_reset;
+
+ if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
+ (adev->apu_flags & AMD_APU_IS_RAVEN2))
return 0;
+asic_reset:
switch (soc15_asic_reset_method(adev)) {
case AMD_RESET_METHOD_PCI:
dev_info(adev->dev, "PCI reset\n");
@@ -829,6 +857,10 @@ static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_RENOIR)
return true;
+ if (amdgpu_gmc_need_reset_on_init(adev))
+ return true;
+ if (amdgpu_psp_tos_reload_needed(adev))
+ return true;
/* Just return false for soc15 GPUs. Reset does not seem to
* be necessary.
*/
@@ -929,9 +961,9 @@ static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs =
.get_reg_state = &aqua_vanjaram_get_reg_state,
};
-static int soc15_common_early_init(void *handle)
+static int soc15_common_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->nbio.funcs->set_reg_remap(adev);
adev->smc_rreg = NULL;
@@ -1171,6 +1203,7 @@ static int soc15_common_early_init(void *handle)
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
adev->asic_funcs = &aqua_vanjaram_asic_funcs;
adev->cg_flags =
AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG |
@@ -1198,9 +1231,9 @@ static int soc15_common_early_init(void *handle)
return 0;
}
-static int soc15_common_late_init(void *handle)
+static int soc15_common_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
@@ -1213,9 +1246,9 @@ static int soc15_common_late_init(void *handle)
return 0;
}
-static int soc15_common_sw_init(void *handle)
+static int soc15_common_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_add_irq_id(adev);
@@ -1227,9 +1260,9 @@ static int soc15_common_sw_init(void *handle)
return 0;
}
-static int soc15_common_sw_fini(void *handle)
+static int soc15_common_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->df.funcs &&
adev->df.funcs->sw_fini)
@@ -1251,9 +1284,9 @@ static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev)
}
}
-static int soc15_common_hw_init(void *handle)
+static int soc15_common_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* enable aspm */
soc15_program_aspm(adev);
@@ -1280,9 +1313,9 @@ static int soc15_common_hw_init(void *handle)
return 0;
}
-static int soc15_common_hw_fini(void *handle)
+static int soc15_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* Disable the doorbell aperture and selfring doorbell aperture
* separately in hw_fini because soc15_enable_doorbell_aperture
@@ -1295,7 +1328,12 @@ static int soc15_common_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_put_irq(adev);
+ /*
+ * For minimal init, late_init is not called, hence RAS irqs are not
+ * enabled.
+ */
if ((!amdgpu_sriov_vf(adev)) &&
+ (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) &&
adev->nbio.ras_if &&
amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
if (adev->nbio.ras &&
@@ -1309,22 +1347,20 @@ static int soc15_common_hw_fini(void *handle)
return 0;
}
-static int soc15_common_suspend(void *handle)
+static int soc15_common_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return soc15_common_hw_fini(adev);
+ return soc15_common_hw_fini(ip_block);
}
-static int soc15_common_resume(void *handle)
+static int soc15_common_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (soc15_need_reset_on_resume(adev)) {
dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n");
soc15_asic_reset(adev);
}
- return soc15_common_hw_init(adev);
+ return soc15_common_hw_init(ip_block);
}
static bool soc15_common_is_idle(void *handle)
@@ -1332,16 +1368,6 @@ static bool soc15_common_is_idle(void *handle)
return true;
}
-static int soc15_common_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int soc15_common_soft_reset(void *handle)
-{
- return 0;
-}
-
static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
{
uint32_t def, data;
@@ -1386,10 +1412,10 @@ static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
}
-static int soc15_common_set_clockgating_state(void *handle,
+static int soc15_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1454,6 +1480,7 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2)) &&
(amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) &&
+ (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 12)) &&
(amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 14))) {
/* AMD_CG_SUPPORT_DRM_MGCG */
data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
@@ -1474,7 +1501,7 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
adev->df.funcs->get_clockgating_state(adev, flags);
}
-static int soc15_common_set_powergating_state(void *handle,
+static int soc15_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* todo */
@@ -1492,11 +1519,7 @@ static const struct amd_ip_funcs soc15_common_ip_funcs = {
.suspend = soc15_common_suspend,
.resume = soc15_common_resume,
.is_idle = soc15_common_is_idle,
- .wait_for_idle = soc15_common_wait_for_idle,
- .soft_reset = soc15_common_soft_reset,
.set_clockgating_state = soc15_common_set_clockgating_state,
.set_powergating_state = soc15_common_set_powergating_state,
.get_clockgating_state= soc15_common_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index d30ad7d56def..62ad67d0b598 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -49,13 +49,13 @@ static const struct amd_ip_funcs soc21_common_ip_funcs;
/* SOC21 */
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
};
@@ -96,14 +96,14 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 = {
/* SRIOV SOC21, not const since data is controlled by host */
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
};
static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = {
@@ -556,9 +556,9 @@ static const struct amdgpu_asic_funcs soc21_asic_funcs = {
.update_umd_stable_pstate = &soc21_update_umd_stable_pstate,
};
-static int soc21_common_early_init(void *handle)
+static int soc21_common_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->nbio.funcs->set_reg_remap(adev);
adev->smc_rreg = NULL;
@@ -794,9 +794,9 @@ static int soc21_common_early_init(void *handle)
return 0;
}
-static int soc21_common_late_init(void *handle)
+static int soc21_common_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) {
xgpu_nv_mailbox_get_irq(adev);
@@ -832,9 +832,9 @@ static int soc21_common_late_init(void *handle)
return 0;
}
-static int soc21_common_sw_init(void *handle)
+static int soc21_common_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
xgpu_nv_mailbox_add_irq_id(adev);
@@ -842,14 +842,9 @@ static int soc21_common_sw_init(void *handle)
return 0;
}
-static int soc21_common_sw_fini(void *handle)
-{
- return 0;
-}
-
-static int soc21_common_hw_init(void *handle)
+static int soc21_common_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* enable aspm */
soc21_program_aspm(adev);
@@ -867,9 +862,9 @@ static int soc21_common_hw_init(void *handle)
return 0;
}
-static int soc21_common_hw_fini(void *handle)
+static int soc21_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* Disable the doorbell aperture and selfring doorbell aperture
* separately in hw_fini because soc21_enable_doorbell_aperture
@@ -890,11 +885,9 @@ static int soc21_common_hw_fini(void *handle)
return 0;
}
-static int soc21_common_suspend(void *handle)
+static int soc21_common_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return soc21_common_hw_fini(adev);
+ return soc21_common_hw_fini(ip_block);
}
static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
@@ -904,9 +897,10 @@ static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
/* Will reset for the following suspend abort cases.
* 1) Only reset dGPU side.
* 2) S3 suspend got aborted and TOS is active.
+ * As for dGPU suspend abort cases the SOL value
+ * will be kept as zero at this resume point.
*/
- if (!(adev->flags & AMD_IS_APU) && adev->in_s3 &&
- !adev->suspend_complete) {
+ if (!(adev->flags & AMD_IS_APU) && adev->in_s3) {
sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
msleep(100);
sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
@@ -917,16 +911,16 @@ static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
return false;
}
-static int soc21_common_resume(void *handle)
+static int soc21_common_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (soc21_need_reset_on_resume(adev)) {
dev_info(adev->dev, "S3 suspend aborted, resetting...");
soc21_asic_reset(adev);
}
- return soc21_common_hw_init(adev);
+ return soc21_common_hw_init(ip_block);
}
static bool soc21_common_is_idle(void *handle)
@@ -934,20 +928,10 @@ static bool soc21_common_is_idle(void *handle)
return true;
}
-static int soc21_common_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int soc21_common_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int soc21_common_set_clockgating_state(void *handle,
+static int soc21_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
case IP_VERSION(4, 3, 0):
@@ -970,10 +954,10 @@ static int soc21_common_set_clockgating_state(void *handle,
return 0;
}
-static int soc21_common_set_powergating_state(void *handle,
+static int soc21_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
case IP_VERSION(6, 0, 0):
@@ -1002,17 +986,12 @@ static const struct amd_ip_funcs soc21_common_ip_funcs = {
.early_init = soc21_common_early_init,
.late_init = soc21_common_late_init,
.sw_init = soc21_common_sw_init,
- .sw_fini = soc21_common_sw_fini,
.hw_init = soc21_common_hw_init,
.hw_fini = soc21_common_hw_fini,
.suspend = soc21_common_suspend,
.resume = soc21_common_resume,
.is_idle = soc21_common_is_idle,
- .wait_for_idle = soc21_common_wait_for_idle,
- .soft_reset = soc21_common_soft_reset,
.set_clockgating_state = soc21_common_set_clockgating_state,
.set_powergating_state = soc21_common_set_powergating_state,
.get_clockgating_state = soc21_common_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c
index fd4c3d4f8387..6b8e078ee7c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc24.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc24.c
@@ -48,7 +48,7 @@
static const struct amd_ip_funcs soc24_common_ip_funcs;
static const struct amdgpu_video_codec_info vcn_5_0_0_video_codecs_encode_array_vcn0[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
@@ -363,9 +363,9 @@ static const struct amdgpu_asic_funcs soc24_asic_funcs = {
.update_umd_stable_pstate = &soc24_update_umd_stable_pstate,
};
-static int soc24_common_early_init(void *handle)
+static int soc24_common_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->nbio.funcs->set_reg_remap(adev);
adev->smc_rreg = NULL;
@@ -440,12 +440,22 @@ static int soc24_common_early_init(void *handle)
return 0;
}
-static int soc24_common_late_init(void *handle)
+static int soc24_common_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
xgpu_nv_mailbox_get_irq(adev);
+ } else {
+ if (adev->nbio.ras &&
+ adev->nbio.ras_err_event_athub_irq.funcs)
+ /* don't need to fail gpu late init
+ * if enabling athub_err_event interrupt failed
+ * nbif v6_3_1 only support fatal error hanlding
+ * just enable the interrupt directly
+ */
+ amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ }
/* Enable selfring doorbell aperture late because doorbell BAR
* aperture will change if resize BAR successfully in gmc sw_init.
@@ -455,9 +465,9 @@ static int soc24_common_late_init(void *handle)
return 0;
}
-static int soc24_common_sw_init(void *handle)
+static int soc24_common_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
xgpu_nv_mailbox_add_irq_id(adev);
@@ -465,14 +475,9 @@ static int soc24_common_sw_init(void *handle)
return 0;
}
-static int soc24_common_sw_fini(void *handle)
-{
- return 0;
-}
-
-static int soc24_common_hw_init(void *handle)
+static int soc24_common_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* enable aspm */
soc24_program_aspm(adev);
@@ -494,9 +499,9 @@ static int soc24_common_hw_init(void *handle)
return 0;
}
-static int soc24_common_hw_fini(void *handle)
+static int soc24_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* Disable the doorbell aperture and selfring doorbell aperture
* separately in hw_fini because soc21_enable_doorbell_aperture
@@ -506,24 +511,25 @@ static int soc24_common_hw_fini(void *handle)
adev->nbio.funcs->enable_doorbell_aperture(adev, false);
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
xgpu_nv_mailbox_put_irq(adev);
+ } else {
+ if (adev->nbio.ras &&
+ adev->nbio.ras_err_event_athub_irq.funcs)
+ amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ }
return 0;
}
-static int soc24_common_suspend(void *handle)
+static int soc24_common_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return soc24_common_hw_fini(adev);
+ return soc24_common_hw_fini(ip_block);
}
-static int soc24_common_resume(void *handle)
+static int soc24_common_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return soc24_common_hw_init(adev);
+ return soc24_common_hw_init(ip_block);
}
static bool soc24_common_is_idle(void *handle)
@@ -531,20 +537,10 @@ static bool soc24_common_is_idle(void *handle)
return true;
}
-static int soc24_common_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int soc24_common_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int soc24_common_set_clockgating_state(void *handle,
+static int soc24_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
case IP_VERSION(6, 3, 1):
@@ -561,10 +557,10 @@ static int soc24_common_set_clockgating_state(void *handle,
return 0;
}
-static int soc24_common_set_powergating_state(void *handle,
+static int soc24_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
case IP_VERSION(7, 0, 0):
@@ -595,14 +591,11 @@ static const struct amd_ip_funcs soc24_common_ip_funcs = {
.early_init = soc24_common_early_init,
.late_init = soc24_common_late_init,
.sw_init = soc24_common_sw_init,
- .sw_fini = soc24_common_sw_fini,
.hw_init = soc24_common_hw_init,
.hw_fini = soc24_common_hw_fini,
.suspend = soc24_common_suspend,
.resume = soc24_common_resume,
.is_idle = soc24_common_is_idle,
- .wait_for_idle = soc24_common_wait_for_idle,
- .soft_reset = soc24_common_soft_reset,
.set_clockgating_state = soc24_common_set_clockgating_state,
.set_powergating_state = soc24_common_set_powergating_state,
.get_clockgating_state = soc24_common_get_clockgating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index 3ac56a9645eb..64891f099366 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -30,6 +30,9 @@
#define RSP_ID_MASK (1U << 31)
#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
+/* invalid node instance value */
+#define TA_RAS_INV_NODE 0xffff
+
/* RAS related enumerations */
/**********************************************************/
enum ras_command {
@@ -113,6 +116,14 @@ enum ta_ras_address_type {
TA_RAS_PA_TO_MCA,
};
+enum ta_ras_nps_mode {
+ TA_RAS_UNKNOWN_MODE = 0,
+ TA_RAS_NPS1_MODE = 1,
+ TA_RAS_NPS2_MODE = 2,
+ TA_RAS_NPS4_MODE = 4,
+ TA_RAS_NPS8_MODE = 8,
+};
+
/* Input/output structures for RAS commands */
/**********************************************************/
@@ -139,6 +150,7 @@ struct ta_ras_init_flags {
uint8_t dgpu_mode;
uint16_t xcc_mask;
uint8_t channel_dis_num;
+ uint8_t nps_mode;
};
struct ta_ras_mca_addr {
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
index 00d8bdb8254f..9ec2e03d41c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
@@ -31,10 +31,12 @@
* Secure Display Command ID
*/
enum ta_securedisplay_command {
- /* Query whether TA is responding used only for validation purpose */
+ /* Query whether TA is responding. It is used only for validation purpose */
TA_SECUREDISPLAY_COMMAND__QUERY_TA = 1,
/* Send region of Interest and CRC value to I2C */
TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC = 2,
+ /* V2 to send multiple regions of Interest and CRC value to I2C */
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2 = 3,
/* Maximum Command ID */
TA_SECUREDISPLAY_COMMAND__MAX_ID = 0x7FFFFFFF,
};
@@ -83,6 +85,8 @@ enum ta_securedisplay_ta_query_cmd_ret {
enum ta_securedisplay_buffer_size {
/* 15 bytes = 8 byte (ROI) + 6 byte(CRC) + 1 byte(phy_id) */
TA_SECUREDISPLAY_I2C_BUFFER_SIZE = 15,
+ /* 16 bytes = 8 byte (ROI) + 6 byte(CRC) + 1 byte(phy_id) + 1 byte(roi_idx) */
+ TA_SECUREDISPLAY_V2_I2C_BUFFER_SIZE = 16,
};
/** Input/output structures for Secure Display commands */
@@ -95,7 +99,15 @@ enum ta_securedisplay_buffer_size {
* Physical ID to determine which DIO scratch register should be used to get ROI
*/
struct ta_securedisplay_send_roi_crc_input {
- uint32_t phy_id; /* Physical ID */
+ /* Physical ID */
+ uint32_t phy_id;
+};
+
+struct ta_securedisplay_send_roi_crc_v2_input {
+ /* Physical ID */
+ uint32_t phy_id;
+ /* Region of interest index */
+ uint8_t roi_idx;
};
/** @union ta_securedisplay_cmd_input
@@ -104,6 +116,8 @@ struct ta_securedisplay_send_roi_crc_input {
union ta_securedisplay_cmd_input {
/* send ROI and CRC input buffer format */
struct ta_securedisplay_send_roi_crc_input send_roi_crc;
+ /* send ROI and CRC input buffer format, v2 adds a ROI index */
+ struct ta_securedisplay_send_roi_crc_v2_input send_roi_crc_v2;
uint32_t reserved[4];
};
@@ -128,6 +142,10 @@ struct ta_securedisplay_send_roi_crc_output {
uint8_t reserved;
};
+struct ta_securedisplay_send_roi_crc_v2_output {
+ uint8_t i2c_buf[TA_SECUREDISPLAY_V2_I2C_BUFFER_SIZE]; /* I2C buffer */
+};
+
/** @union ta_securedisplay_cmd_output
* Output buffer
*/
@@ -136,6 +154,8 @@ union ta_securedisplay_cmd_output {
struct ta_securedisplay_query_ta_output query_ta;
/* Send ROI CRC output buffer format used only for validation purpose */
struct ta_securedisplay_send_roi_crc_output send_roi_crc;
+ /* Send ROI CRC output buffer format used only for validation purpose */
+ struct ta_securedisplay_send_roi_crc_v2_output send_roi_crc_v2;
uint32_t reserved[4];
};
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 24d49d813607..0968e551f7b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -283,9 +283,9 @@ static void tonga_ih_set_rptr(struct amdgpu_device *adev,
}
}
-static int tonga_ih_early_init(void *handle)
+static int tonga_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
ret = amdgpu_irq_add_domain(adev);
@@ -297,10 +297,10 @@ static int tonga_ih_early_init(void *handle)
return 0;
}
-static int tonga_ih_sw_init(void *handle)
+static int tonga_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, true);
if (r)
@@ -314,9 +314,9 @@ static int tonga_ih_sw_init(void *handle)
return r;
}
-static int tonga_ih_sw_fini(void *handle)
+static int tonga_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
amdgpu_irq_remove_domain(adev);
@@ -324,10 +324,10 @@ static int tonga_ih_sw_fini(void *handle)
return 0;
}
-static int tonga_ih_hw_init(void *handle)
+static int tonga_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = tonga_ih_irq_init(adev);
if (r)
@@ -336,27 +336,21 @@ static int tonga_ih_hw_init(void *handle)
return 0;
}
-static int tonga_ih_hw_fini(void *handle)
+static int tonga_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- tonga_ih_irq_disable(adev);
+ tonga_ih_irq_disable(ip_block->adev);
return 0;
}
-static int tonga_ih_suspend(void *handle)
+static int tonga_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return tonga_ih_hw_fini(adev);
+ return tonga_ih_hw_fini(ip_block);
}
-static int tonga_ih_resume(void *handle)
+static int tonga_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return tonga_ih_hw_init(adev);
+ return tonga_ih_hw_init(ip_block);
}
static bool tonga_ih_is_idle(void *handle)
@@ -370,11 +364,11 @@ static bool tonga_ih_is_idle(void *handle)
return true;
}
-static int tonga_ih_wait_for_idle(void *handle)
+static int tonga_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -386,9 +380,9 @@ static int tonga_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static bool tonga_ih_check_soft_reset(void *handle)
+static bool tonga_ih_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -405,29 +399,27 @@ static bool tonga_ih_check_soft_reset(void *handle)
}
}
-static int tonga_ih_pre_soft_reset(void *handle)
+static int tonga_ih_pre_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!adev->irq.srbm_soft_reset)
+ if (!ip_block->adev->irq.srbm_soft_reset)
return 0;
- return tonga_ih_hw_fini(adev);
+ return tonga_ih_hw_fini(ip_block);
}
-static int tonga_ih_post_soft_reset(void *handle)
+static int tonga_ih_post_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!adev->irq.srbm_soft_reset)
return 0;
- return tonga_ih_hw_init(adev);
+ return tonga_ih_hw_init(ip_block);
}
-static int tonga_ih_soft_reset(void *handle)
+static int tonga_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset;
if (!adev->irq.srbm_soft_reset)
@@ -456,13 +448,13 @@ static int tonga_ih_soft_reset(void *handle)
return 0;
}
-static int tonga_ih_set_clockgating_state(void *handle,
+static int tonga_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int tonga_ih_set_powergating_state(void *handle,
+static int tonga_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -471,7 +463,6 @@ static int tonga_ih_set_powergating_state(void *handle,
static const struct amd_ip_funcs tonga_ih_ip_funcs = {
.name = "tonga_ih",
.early_init = tonga_ih_early_init,
- .late_init = NULL,
.sw_init = tonga_ih_sw_init,
.sw_fini = tonga_ih_sw_fini,
.hw_init = tonga_ih_hw_init,
@@ -486,8 +477,6 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = {
.post_soft_reset = tonga_ih_post_soft_reset,
.set_clockgating_state = tonga_ih_set_clockgating_state,
.set_powergating_state = tonga_ih_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ih_funcs tonga_ih_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index 1a8ea834efa6..a7b9c358a2d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -173,156 +173,96 @@ static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev,
umc_v12_0_reset_error_count(adev);
}
-static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
+static int umc_v12_0_convert_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
- struct ta_ras_query_address_input *addr_in)
+ struct ta_ras_query_address_input *addr_in,
+ struct ta_ras_query_address_output *addr_out,
+ bool dump_addr)
{
- uint32_t col, row, row_xor, bank, channel_index;
- uint64_t soc_pa, retired_page, column, err_addr;
- struct ta_ras_query_address_output addr_out;
+ uint32_t col, col_lower, row, row_lower, bank;
+ uint32_t channel_index = 0, umc_inst = 0;
+ uint32_t i, loop_bits[UMC_V12_0_RETIRE_LOOP_BITS];
+ uint64_t soc_pa, column, err_addr;
+ struct ta_ras_query_address_output addr_out_tmp;
+ struct ta_ras_query_address_output *paddr_out;
+ enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
+ int ret = 0;
+
+ if (!addr_out)
+ paddr_out = &addr_out_tmp;
+ else
+ paddr_out = addr_out;
- err_addr = addr_in->ma.err_addr;
- addr_in->addr_type = TA_RAS_MCA_TO_PA;
- if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) {
- dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
- err_addr);
+ err_addr = bank = 0;
+ if (addr_in) {
+ err_addr = addr_in->ma.err_addr;
+ addr_in->addr_type = TA_RAS_MCA_TO_PA;
+ ret = psp_ras_query_address(&adev->psp, addr_in, paddr_out);
+ if (ret) {
+ dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
+ err_addr);
- return;
- }
+ goto out;
+ }
- soc_pa = addr_out.pa.pa;
- bank = addr_out.pa.bank;
- channel_index = addr_out.pa.channel_idx;
-
- col = (err_addr >> 1) & 0x1fULL;
- row = (err_addr >> 10) & 0x3fffULL;
- row_xor = row ^ (0x1ULL << 13);
- /* clear [C3 C2] in soc physical address */
- soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
- /* clear [C4] in soc physical address */
- soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
-
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
- retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
- /* include column bit 0 and 1 */
- col &= 0x3;
- col |= (column << 2);
- dev_info(adev->dev,
- "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
- retired_page, row, col, bank, channel_index);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, addr_in->ma.umc_inst);
-
- /* shift R13 bit */
- retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
- dev_info(adev->dev,
- "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
- retired_page, row_xor, col, bank, channel_index);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, addr_in->ma.umc_inst);
+ bank = paddr_out->pa.bank;
+ /* no need to care about umc inst if addr_in is NULL */
+ umc_inst = addr_in->ma.umc_inst;
}
-}
-static void umc_v12_0_dump_addr_info(struct amdgpu_device *adev,
- struct ta_ras_query_address_output *addr_out,
- uint64_t err_addr)
-{
- uint32_t col, row, row_xor, bank, channel_index;
- uint64_t soc_pa, retired_page, column;
-
- soc_pa = addr_out->pa.pa;
- bank = addr_out->pa.bank;
- channel_index = addr_out->pa.channel_idx;
-
- col = (err_addr >> 1) & 0x1fULL;
- row = (err_addr >> 10) & 0x3fffULL;
- row_xor = row ^ (0x1ULL << 13);
- /* clear [C3 C2] in soc physical address */
- soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
- /* clear [C4] in soc physical address */
- soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
-
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
- retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
- /* include column bit 0 and 1 */
- col &= 0x3;
- col |= (column << 2);
- dev_info(adev->dev,
- "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
- retired_page, row, col, bank, channel_index);
-
- /* shift R13 bit */
- retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
- dev_info(adev->dev,
- "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
- retired_page, row_xor, col, bank, channel_index);
- }
-}
+ loop_bits[0] = UMC_V12_0_PA_C2_BIT;
+ loop_bits[1] = UMC_V12_0_PA_C3_BIT;
+ loop_bits[2] = UMC_V12_0_PA_C4_BIT;
+ loop_bits[3] = UMC_V12_0_PA_R13_BIT;
-static int umc_v12_0_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
- uint64_t pa_addr, uint64_t *pfns, int len)
-{
- uint64_t soc_pa, retired_page, column;
- uint32_t pos = 0;
-
- soc_pa = pa_addr;
- /* clear [C3 C2] in soc physical address */
- soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
- /* clear [C4] in soc physical address */
- soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
-
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
- retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
-
- if (pos >= len)
- return 0;
- pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
-
- /* shift R13 bit */
- retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
-
- if (pos >= len)
- return 0;
- pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ /* other nps modes are taken as nps1 */
+ if (nps == AMDGPU_NPS4_PARTITION_MODE) {
+ loop_bits[0] = UMC_V12_0_PA_CH4_BIT;
+ loop_bits[1] = UMC_V12_0_PA_CH5_BIT;
+ loop_bits[2] = UMC_V12_0_PA_B0_BIT;
+ loop_bits[3] = UMC_V12_0_PA_R11_BIT;
}
- return pos;
-}
-
-static int umc_v12_0_convert_mca_to_addr(struct amdgpu_device *adev,
- uint64_t err_addr, uint32_t ch, uint32_t umc,
- uint32_t node, uint32_t socket,
- uint64_t *addr, bool dump_addr)
-{
- struct ta_ras_query_address_input addr_in;
- struct ta_ras_query_address_output addr_out;
-
- memset(&addr_in, 0, sizeof(addr_in));
- addr_in.ma.err_addr = err_addr;
- addr_in.ma.ch_inst = ch;
- addr_in.ma.umc_inst = umc;
- addr_in.ma.node_inst = node;
- addr_in.ma.socket_id = socket;
- addr_in.addr_type = TA_RAS_MCA_TO_PA;
- if (psp_ras_query_address(&adev->psp, &addr_in, &addr_out)) {
- dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
- err_addr);
- return -EINVAL;
+ soc_pa = paddr_out->pa.pa;
+ channel_index = paddr_out->pa.channel_idx;
+ /* clear loop bits in soc physical address */
+ for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++)
+ soc_pa &= ~BIT_ULL(loop_bits[i]);
+
+ paddr_out->pa.pa = soc_pa;
+ /* get column bit 0 and 1 in mca address */
+ col_lower = (err_addr >> 1) & 0x3ULL;
+ /* MA_R13_BIT will be handled later */
+ row_lower = (err_addr >> UMC_V12_0_MA_R0_BIT) & 0x1fffULL;
+
+ if (!err_data && !dump_addr)
+ goto out;
+
+ /* loop for all possibilities of retired bits */
+ for (column = 0; column < UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; column++) {
+ soc_pa = paddr_out->pa.pa;
+ for (i = 0; i < UMC_V12_0_RETIRE_LOOP_BITS; i++)
+ soc_pa |= (((column >> i) & 0x1ULL) << loop_bits[i]);
+
+ col = ((column & 0x7) << 2) | col_lower;
+ /* add row bit 13 */
+ row = ((column >> 3) << 13) | row_lower;
+
+ if (dump_addr)
+ dev_info(adev->dev,
+ "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
+ soc_pa, row, col, bank, channel_index);
+
+ if (err_data)
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ soc_pa, channel_index, umc_inst);
}
- if (dump_addr)
- umc_v12_0_dump_addr_info(adev, &addr_out, err_addr);
-
- *addr = addr_out.pa.pa;
-
- return 0;
+out:
+ return ret;
}
static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
@@ -374,7 +314,7 @@ static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
addr_in.ma.umc_inst = umc_inst;
addr_in.ma.node_inst = node_inst;
- umc_v12_0_convert_error_address(adev, err_data, &addr_in);
+ umc_v12_0_convert_error_address(adev, err_data, &addr_in, NULL, true);
}
/* clear umc status */
@@ -526,6 +466,9 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL];
uint64_t err_addr, pa_addr = 0;
struct ras_ecc_err *ecc_err;
+ struct ta_ras_query_address_output addr_out;
+ enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
+ uint32_t shift_bit = UMC_V12_0_PA_C4_BIT;
int count, ret, i;
hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
@@ -552,10 +495,10 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
MCA_IPID_2_UMC_CH(ipid),
err_addr);
- ret = umc_v12_0_convert_mca_to_addr(adev,
+ ret = amdgpu_umc_mca_to_addr(adev,
err_addr, MCA_IPID_2_UMC_CH(ipid),
MCA_IPID_2_UMC_INST(ipid), MCA_IPID_2_DIE_ID(ipid),
- MCA_IPID_2_SOCKET_ID(ipid), &pa_addr, true);
+ MCA_IPID_2_SOCKET_ID(ipid), &addr_out, true);
if (ret)
return ret;
@@ -563,14 +506,21 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
if (!ecc_err)
return -ENOMEM;
+ pa_addr = addr_out.pa.pa;
ecc_err->status = status;
ecc_err->ipid = ipid;
ecc_err->addr = addr;
- ecc_err->pa_pfn = UMC_V12_ADDR_MASK_BAD_COLS(pa_addr) >> AMDGPU_GPU_PAGE_SHIFT;
+ ecc_err->pa_pfn = pa_addr >> AMDGPU_GPU_PAGE_SHIFT;
+ ecc_err->channel_idx = addr_out.pa.channel_idx;
+
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ if (nps == AMDGPU_NPS4_PARTITION_MODE)
+ shift_bit = UMC_V12_0_PA_B0_BIT;
/* If converted pa_pfn is 0, use pa C4 pfn. */
if (!ecc_err->pa_pfn)
- ecc_err->pa_pfn = BIT_ULL(UMC_V12_0_PA_C4_BIT) >> AMDGPU_GPU_PAGE_SHIFT;
+ ecc_err->pa_pfn = BIT_ULL(shift_bit) >> AMDGPU_GPU_PAGE_SHIFT;
ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err);
if (ret) {
@@ -586,7 +536,7 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
con->umc_ecc_log.de_queried_count++;
memset(page_pfn, 0, sizeof(page_pfn));
- count = umc_v12_0_lookup_bad_pages_in_a_row(adev,
+ count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
pa_addr,
page_pfn, ARRAY_SIZE(page_pfn));
if (count <= 0) {
@@ -629,7 +579,7 @@ static int umc_v12_0_fill_error_record(struct amdgpu_device *adev,
return -EINVAL;
memset(page_pfn, 0, sizeof(page_pfn));
- count = umc_v12_0_lookup_bad_pages_in_a_row(adev,
+ count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
ecc_err->pa_pfn << AMDGPU_GPU_PAGE_SHIFT,
page_pfn, ARRAY_SIZE(page_pfn));
@@ -637,7 +587,7 @@ static int umc_v12_0_fill_error_record(struct amdgpu_device *adev,
ret = amdgpu_umc_fill_error_record(err_data,
ecc_err->addr,
page_pfn[i] << AMDGPU_GPU_PAGE_SHIFT,
- MCA_IPID_2_UMC_CH(ecc_err->ipid),
+ ecc_err->channel_idx,
MCA_IPID_2_UMC_INST(ecc_err->ipid));
if (ret)
break;
@@ -676,6 +626,31 @@ static void umc_v12_0_query_ras_ecc_err_addr(struct amdgpu_device *adev,
mutex_unlock(&con->umc_ecc_log.lock);
}
+static uint32_t umc_v12_0_get_die_id(struct amdgpu_device *adev,
+ uint64_t mca_addr, uint64_t retired_page)
+{
+ uint32_t die = 0;
+
+ /* we only calculate die id for nps1 mode right now */
+ die += ((((retired_page >> 12) & 0x1ULL)^
+ ((retired_page >> 20) & 0x1ULL) ^
+ ((retired_page >> 27) & 0x1ULL) ^
+ ((retired_page >> 34) & 0x1ULL) ^
+ ((retired_page >> 41) & 0x1ULL)) << 0);
+
+ /* the original PA_C4 and PA_R13 may be cleared in retired_page, so
+ * get them from mca_addr.
+ */
+ die += ((((retired_page >> 13) & 0x1ULL) ^
+ ((mca_addr >> 5) & 0x1ULL) ^
+ ((retired_page >> 28) & 0x1ULL) ^
+ ((mca_addr >> 23) & 0x1ULL) ^
+ ((retired_page >> 42) & 0x1ULL)) << 1);
+ die &= 3;
+
+ return die;
+}
+
struct amdgpu_umc_ras umc_v12_0_ras = {
.ras_block = {
.hw_ops = &umc_v12_0_ras_hw_ops,
@@ -686,5 +661,7 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
.ecc_info_query_ras_error_address = umc_v12_0_query_ras_ecc_err_addr,
.check_ecc_err_status = umc_v12_0_check_ecc_err_status,
.update_ecc_status = umc_v12_0_update_ecc_status,
+ .convert_ras_err_addr = umc_v12_0_convert_error_address,
+ .get_die_id_from_pa = umc_v12_0_get_die_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
index be5598d76c1d..9298018d938f 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
@@ -55,12 +55,24 @@
#define UMC_V12_0_NA_MAP_PA_NUM 8
/* R13 bit shift should be considered, double the number */
#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2)
+/* C2, C3, C4, R13, four bits in MCA address are looped in retirement */
+#define UMC_V12_0_RETIRE_LOOP_BITS 4
/* column bits in SOC physical address */
#define UMC_V12_0_PA_C2_BIT 15
+#define UMC_V12_0_PA_C3_BIT 16
#define UMC_V12_0_PA_C4_BIT 21
/* row bits in SOC physical address */
+#define UMC_V12_0_PA_R0_BIT 22
+#define UMC_V12_0_PA_R11_BIT 33
#define UMC_V12_0_PA_R13_BIT 35
+/* channel bit in SOC physical address */
+#define UMC_V12_0_PA_CH4_BIT 12
+#define UMC_V12_0_PA_CH5_BIT 13
+/* bank bit in SOC physical address */
+#define UMC_V12_0_PA_B0_BIT 19
+/* row bits in MCA address */
+#define UMC_V12_0_MA_R0_BIT 10
#define MCA_UMC_HWID_V12_0 0x96
#define MCA_UMC_MCATYPE_V12_0 0x0
@@ -81,11 +93,6 @@
(((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo) & 0x1) << 2) | \
(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) & 0x03))
-#define UMC_V12_ADDR_MASK_BAD_COLS(addr) \
- ((addr) & ~((0x3ULL << UMC_V12_0_PA_C2_BIT) | \
- (0x1ULL << UMC_V12_0_PA_C4_BIT) | \
- (0x1ULL << UMC_V12_0_PA_R13_BIT)))
-
bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_14.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_14.c
new file mode 100644
index 000000000000..eaca10a3c4a9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_14.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "umc_v8_14.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_umc.h"
+#include "amdgpu.h"
+#include "umc/umc_8_14_0_offset.h"
+#include "umc/umc_8_14_0_sh_mask.h"
+
+static inline uint32_t get_umc_v8_14_reg_offset(struct amdgpu_device *adev,
+ uint32_t umc_inst,
+ uint32_t ch_inst)
+{
+ return adev->umc.channel_offs * ch_inst + UMC_V8_14_INST_DIST * umc_inst;
+}
+
+static int umc_v8_14_clear_error_count_per_channel(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
+{
+ uint32_t ecc_err_cnt_addr;
+ uint32_t umc_reg_offset =
+ get_umc_v8_14_reg_offset(adev, umc_inst, ch_inst);
+
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_GeccErrCnt);
+
+ /* clear error count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+ UMC_V8_14_CE_CNT_INIT);
+
+ return 0;
+}
+
+static void umc_v8_14_clear_error_count(struct amdgpu_device *adev)
+{
+ amdgpu_umc_loop_channels(adev,
+ umc_v8_14_clear_error_count_per_channel, NULL);
+}
+
+static void umc_v8_14_query_correctable_error_count(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset,
+ unsigned long *error_count)
+{
+ uint32_t ecc_err_cnt, ecc_err_cnt_addr;
+
+ /* UMC 8_14 registers */
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_GeccErrCnt);
+
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+ *error_count +=
+ (REG_GET_FIELD(ecc_err_cnt, UMCCH0_GeccErrCnt, GeccErrCnt) -
+ UMC_V8_14_CE_CNT_INIT);
+}
+
+static void umc_v8_14_query_uncorrectable_error_count(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset,
+ unsigned long *error_count)
+{
+ uint32_t ecc_err_cnt, ecc_err_cnt_addr;
+ /* UMC 8_14 registers */
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_GeccErrCnt);
+
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+ *error_count +=
+ (REG_GET_FIELD(ecc_err_cnt, UMCCH0_GeccErrCnt, GeccUnCorrErrCnt) -
+ UMC_V8_14_CE_CNT_INIT);
+}
+
+static int umc_v8_14_query_error_count_per_channel(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)data;
+ uint32_t umc_reg_offset =
+ get_umc_v8_14_reg_offset(adev, umc_inst, ch_inst);
+
+ umc_v8_14_query_correctable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ce_count));
+ umc_v8_14_query_uncorrectable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ue_count));
+
+ return 0;
+}
+
+static void umc_v8_14_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ amdgpu_umc_loop_channels(adev,
+ umc_v8_14_query_error_count_per_channel, ras_error_status);
+
+ umc_v8_14_clear_error_count(adev);
+}
+
+static int umc_v8_14_err_cnt_init_per_channel(struct amdgpu_device *adev,
+ uint32_t node_inst, uint32_t umc_inst,
+ uint32_t ch_inst, void *data)
+{
+ uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+ uint32_t ecc_err_cnt_addr;
+ uint32_t umc_reg_offset =
+ get_umc_v8_14_reg_offset(adev, umc_inst, ch_inst);
+
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_GeccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_GeccErrCnt);
+
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
+
+ /* set ce error interrupt type to APIC based interrupt */
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_GeccErrCntSel,
+ GeccErrInt, 0x1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+ /* set error count to initial value */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V8_14_CE_CNT_INIT);
+
+ return 0;
+}
+
+static void umc_v8_14_err_cnt_init(struct amdgpu_device *adev)
+{
+ amdgpu_umc_loop_channels(adev,
+ umc_v8_14_err_cnt_init_per_channel, NULL);
+}
+
+const struct amdgpu_ras_block_hw_ops umc_v8_14_ras_hw_ops = {
+ .query_ras_error_count = umc_v8_14_query_ras_error_count,
+};
+
+struct amdgpu_umc_ras umc_v8_14_ras = {
+ .ras_block = {
+ .hw_ops = &umc_v8_14_ras_hw_ops,
+ },
+ .err_cnt_init = umc_v8_14_err_cnt_init,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_14.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_14.h
new file mode 100644
index 000000000000..20a258f0017a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_14.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __UMC_V8_14_H__
+#define __UMC_V8_14_H__
+
+#include "soc15_common.h"
+#include "amdgpu.h"
+
+/* number of umc channel instance with memory map register access */
+#define UMC_V8_14_CHANNEL_INSTANCE_NUM 2
+/* number of umc instance with memory map register access */
+#define UMC_V8_14_UMC_INSTANCE_NUM(adev) ((adev)->umc.node_inst_num)
+
+/* Total channel instances for all available umc nodes */
+#define UMC_V8_14_TOTAL_CHANNEL_NUM(adev) \
+ (UMC_V8_14_CHANNEL_INSTANCE_NUM * (adev)->gmc.num_umc)
+
+/* UMC register per channel offset */
+#define UMC_V8_14_PER_CHANNEL_OFFSET 0x400
+
+#define UMC_V8_14_INST_DIST 0x40000
+
+/* EccErrCnt max value */
+#define UMC_V8_14_CE_CNT_MAX 0xffff
+/* umc ce interrupt threshold */
+#define UMC_V8_14_CE_INT_THRESHOLD 0xffff
+/* umc ce count initial value */
+#define UMC_V8_14_CE_CNT_INIT (UMC_V8_14_CE_CNT_MAX - UMC_V8_14_CE_INT_THRESHOLD)
+
+extern struct amdgpu_umc_ras umc_v8_14_ras;
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 805d6662c88b..5830e799c0a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -531,9 +531,9 @@ static void uvd_v3_1_set_irq_funcs(struct amdgpu_device *adev)
}
-static int uvd_v3_1_early_init(void *handle)
+static int uvd_v3_1_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->uvd.num_uvd_inst = 1;
uvd_v3_1_set_ring_funcs(adev);
@@ -542,10 +542,10 @@ static int uvd_v3_1_early_init(void *handle)
return 0;
}
-static int uvd_v3_1_sw_init(void *handle)
+static int uvd_v3_1_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
void *ptr;
uint32_t ucode_len;
@@ -580,10 +580,10 @@ static int uvd_v3_1_sw_init(void *handle)
return r;
}
-static int uvd_v3_1_sw_fini(void *handle)
+static int uvd_v3_1_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_uvd_suspend(adev);
if (r)
@@ -621,13 +621,13 @@ static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev,
/**
* uvd_v3_1_hw_init - start and test UVD block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int uvd_v3_1_hw_init(void *handle)
+static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp;
int r;
@@ -688,13 +688,13 @@ done:
/**
* uvd_v3_1_hw_fini - stop the hardware block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the UVD block, mark ring as not ready any more
*/
-static int uvd_v3_1_hw_fini(void *handle)
+static int uvd_v3_1_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->uvd.idle_work);
@@ -704,17 +704,17 @@ static int uvd_v3_1_hw_fini(void *handle)
return 0;
}
-static int uvd_v3_1_prepare_suspend(void *handle)
+static int uvd_v3_1_prepare_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return amdgpu_uvd_prepare_suspend(adev);
}
-static int uvd_v3_1_suspend(void *handle)
+static int uvd_v3_1_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/*
* Proper cleanups before halting the HW engine:
@@ -740,23 +740,22 @@ static int uvd_v3_1_suspend(void *handle)
AMD_CG_STATE_GATE);
}
- r = uvd_v3_1_hw_fini(adev);
+ r = uvd_v3_1_hw_fini(ip_block);
if (r)
return r;
return amdgpu_uvd_suspend(adev);
}
-static int uvd_v3_1_resume(void *handle)
+static int uvd_v3_1_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_resume(adev);
+ r = amdgpu_uvd_resume(ip_block->adev);
if (r)
return r;
- return uvd_v3_1_hw_init(adev);
+ return uvd_v3_1_hw_init(ip_block);
}
static bool uvd_v3_1_is_idle(void *handle)
@@ -766,10 +765,10 @@ static bool uvd_v3_1_is_idle(void *handle)
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
-static int uvd_v3_1_wait_for_idle(void *handle)
+static int uvd_v3_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
@@ -778,9 +777,9 @@ static int uvd_v3_1_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int uvd_v3_1_soft_reset(void *handle)
+static int uvd_v3_1_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uvd_v3_1_stop(adev);
@@ -791,13 +790,13 @@ static int uvd_v3_1_soft_reset(void *handle)
return uvd_v3_1_start(adev);
}
-static int uvd_v3_1_set_clockgating_state(void *handle,
+static int uvd_v3_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int uvd_v3_1_set_powergating_state(void *handle,
+static int uvd_v3_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -806,7 +805,6 @@ static int uvd_v3_1_set_powergating_state(void *handle,
static const struct amd_ip_funcs uvd_v3_1_ip_funcs = {
.name = "uvd_v3_1",
.early_init = uvd_v3_1_early_init,
- .late_init = NULL,
.sw_init = uvd_v3_1_sw_init,
.sw_fini = uvd_v3_1_sw_fini,
.hw_init = uvd_v3_1_hw_init,
@@ -819,8 +817,6 @@ static const struct amd_ip_funcs uvd_v3_1_ip_funcs = {
.soft_reset = uvd_v3_1_soft_reset,
.set_clockgating_state = uvd_v3_1_set_clockgating_state,
.set_powergating_state = uvd_v3_1_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version uvd_v3_1_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 3f19c606f4de..f93079e09215 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -44,7 +44,7 @@ static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v4_2_start(struct amdgpu_device *adev);
static void uvd_v4_2_stop(struct amdgpu_device *adev);
-static int uvd_v4_2_set_clockgating_state(void *handle,
+static int uvd_v4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
bool sw_mode);
@@ -90,9 +90,9 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
-static int uvd_v4_2_early_init(void *handle)
+static int uvd_v4_2_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->uvd.num_uvd_inst = 1;
uvd_v4_2_set_ring_funcs(adev);
@@ -101,10 +101,10 @@ static int uvd_v4_2_early_init(void *handle)
return 0;
}
-static int uvd_v4_2_sw_init(void *handle)
+static int uvd_v4_2_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
/* UVD TRAP */
@@ -130,10 +130,10 @@ static int uvd_v4_2_sw_init(void *handle)
return r;
}
-static int uvd_v4_2_sw_fini(void *handle)
+static int uvd_v4_2_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_uvd_suspend(adev);
if (r)
@@ -147,13 +147,13 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
/**
* uvd_v4_2_hw_init - start and test UVD block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int uvd_v4_2_hw_init(void *handle)
+static int uvd_v4_2_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp;
int r;
@@ -202,13 +202,13 @@ done:
/**
* uvd_v4_2_hw_fini - stop the hardware block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the UVD block, mark ring as not ready any more
*/
-static int uvd_v4_2_hw_fini(void *handle)
+static int uvd_v4_2_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->uvd.idle_work);
@@ -218,17 +218,17 @@ static int uvd_v4_2_hw_fini(void *handle)
return 0;
}
-static int uvd_v4_2_prepare_suspend(void *handle)
+static int uvd_v4_2_prepare_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return amdgpu_uvd_prepare_suspend(adev);
}
-static int uvd_v4_2_suspend(void *handle)
+static int uvd_v4_2_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/*
* Proper cleanups before halting the HW engine:
@@ -254,23 +254,22 @@ static int uvd_v4_2_suspend(void *handle)
AMD_CG_STATE_GATE);
}
- r = uvd_v4_2_hw_fini(adev);
+ r = uvd_v4_2_hw_fini(ip_block);
if (r)
return r;
return amdgpu_uvd_suspend(adev);
}
-static int uvd_v4_2_resume(void *handle)
+static int uvd_v4_2_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_resume(adev);
+ r = amdgpu_uvd_resume(ip_block->adev);
if (r)
return r;
- return uvd_v4_2_hw_init(adev);
+ return uvd_v4_2_hw_init(ip_block);
}
/**
@@ -666,10 +665,10 @@ static bool uvd_v4_2_is_idle(void *handle)
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
-static int uvd_v4_2_wait_for_idle(void *handle)
+static int uvd_v4_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
@@ -678,9 +677,9 @@ static int uvd_v4_2_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int uvd_v4_2_soft_reset(void *handle)
+static int uvd_v4_2_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uvd_v4_2_stop(adev);
@@ -709,13 +708,13 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int uvd_v4_2_set_clockgating_state(void *handle,
+static int uvd_v4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int uvd_v4_2_set_powergating_state(void *handle,
+static int uvd_v4_2_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the UVD block.
@@ -725,7 +724,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_PG_STATE_GATE) {
uvd_v4_2_stop(adev);
@@ -756,7 +755,6 @@ static int uvd_v4_2_set_powergating_state(void *handle,
static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
.name = "uvd_v4_2",
.early_init = uvd_v4_2_early_init,
- .late_init = NULL,
.sw_init = uvd_v4_2_sw_init,
.sw_fini = uvd_v4_2_sw_fini,
.hw_init = uvd_v4_2_hw_init,
@@ -769,8 +767,6 @@ static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
.soft_reset = uvd_v4_2_soft_reset,
.set_clockgating_state = uvd_v4_2_set_clockgating_state,
.set_powergating_state = uvd_v4_2_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index efd903c21d48..050a0f309390 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -42,7 +42,7 @@ static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v5_0_start(struct amdgpu_device *adev);
static void uvd_v5_0_stop(struct amdgpu_device *adev);
-static int uvd_v5_0_set_clockgating_state(void *handle,
+static int uvd_v5_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
bool enable);
@@ -88,9 +88,9 @@ static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
-static int uvd_v5_0_early_init(void *handle)
+static int uvd_v5_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->uvd.num_uvd_inst = 1;
uvd_v5_0_set_ring_funcs(adev);
@@ -99,10 +99,10 @@ static int uvd_v5_0_early_init(void *handle)
return 0;
}
-static int uvd_v5_0_sw_init(void *handle)
+static int uvd_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
/* UVD TRAP */
@@ -128,10 +128,10 @@ static int uvd_v5_0_sw_init(void *handle)
return r;
}
-static int uvd_v5_0_sw_fini(void *handle)
+static int uvd_v5_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_uvd_suspend(adev);
if (r)
@@ -143,19 +143,19 @@ static int uvd_v5_0_sw_fini(void *handle)
/**
* uvd_v5_0_hw_init - start and test UVD block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int uvd_v5_0_hw_init(void *handle)
+static int uvd_v5_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp;
int r;
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
- uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+ uvd_v5_0_set_clockgating_state(ip_block, AMD_CG_STATE_UNGATE);
uvd_v5_0_enable_mgcg(adev, true);
r = amdgpu_ring_test_helper(ring);
@@ -200,13 +200,13 @@ done:
/**
* uvd_v5_0_hw_fini - stop the hardware block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the UVD block, mark ring as not ready any more
*/
-static int uvd_v5_0_hw_fini(void *handle)
+static int uvd_v5_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->uvd.idle_work);
@@ -216,17 +216,17 @@ static int uvd_v5_0_hw_fini(void *handle)
return 0;
}
-static int uvd_v5_0_prepare_suspend(void *handle)
+static int uvd_v5_0_prepare_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return amdgpu_uvd_prepare_suspend(adev);
}
-static int uvd_v5_0_suspend(void *handle)
+static int uvd_v5_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/*
* Proper cleanups before halting the HW engine:
@@ -252,23 +252,22 @@ static int uvd_v5_0_suspend(void *handle)
AMD_CG_STATE_GATE);
}
- r = uvd_v5_0_hw_fini(adev);
+ r = uvd_v5_0_hw_fini(ip_block);
if (r)
return r;
return amdgpu_uvd_suspend(adev);
}
-static int uvd_v5_0_resume(void *handle)
+static int uvd_v5_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_resume(adev);
+ r = amdgpu_uvd_resume(ip_block->adev);
if (r)
return r;
- return uvd_v5_0_hw_init(adev);
+ return uvd_v5_0_hw_init(ip_block);
}
/**
@@ -588,10 +587,10 @@ static bool uvd_v5_0_is_idle(void *handle)
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
-static int uvd_v5_0_wait_for_idle(void *handle)
+static int uvd_v5_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
@@ -600,9 +599,9 @@ static int uvd_v5_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int uvd_v5_0_soft_reset(void *handle)
+static int uvd_v5_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
uvd_v5_0_stop(adev);
@@ -791,15 +790,15 @@ static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
}
}
-static int uvd_v5_0_set_clockgating_state(void *handle,
+static int uvd_v5_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
- if (uvd_v5_0_wait_for_idle(handle))
+ if (uvd_v5_0_wait_for_idle(ip_block))
return -EBUSY;
uvd_v5_0_enable_clock_gating(adev, true);
@@ -813,7 +812,7 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
return 0;
}
-static int uvd_v5_0_set_powergating_state(void *handle,
+static int uvd_v5_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the UVD block.
@@ -823,7 +822,7 @@ static int uvd_v5_0_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 0;
if (state == AMD_PG_STATE_GATE) {
@@ -863,7 +862,6 @@ out:
static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
.name = "uvd_v5_0",
.early_init = uvd_v5_0_early_init,
- .late_init = NULL,
.sw_init = uvd_v5_0_sw_init,
.sw_fini = uvd_v5_0_sw_fini,
.hw_init = uvd_v5_0_hw_init,
@@ -877,8 +875,6 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
.set_clockgating_state = uvd_v5_0_set_clockgating_state,
.set_powergating_state = uvd_v5_0_set_powergating_state,
.get_clockgating_state = uvd_v5_0_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 495de5068455..d9d036ee51fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -48,7 +48,7 @@ static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v6_0_start(struct amdgpu_device *adev);
static void uvd_v6_0_stop(struct amdgpu_device *adev);
static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
-static int uvd_v6_0_set_clockgating_state(void *handle,
+static int uvd_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
bool enable);
@@ -354,9 +354,9 @@ error:
return r;
}
-static int uvd_v6_0_early_init(void *handle)
+static int uvd_v6_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->uvd.num_uvd_inst = 1;
if (!(adev->flags & AMD_IS_APU) &&
@@ -375,11 +375,11 @@ static int uvd_v6_0_early_init(void *handle)
return 0;
}
-static int uvd_v6_0_sw_init(void *handle)
+static int uvd_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* UVD TRAP */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
@@ -435,10 +435,10 @@ static int uvd_v6_0_sw_init(void *handle)
return r;
}
-static int uvd_v6_0_sw_fini(void *handle)
+static int uvd_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i, r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_uvd_suspend(adev);
if (r)
@@ -455,19 +455,19 @@ static int uvd_v6_0_sw_fini(void *handle)
/**
* uvd_v6_0_hw_init - start and test UVD block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int uvd_v6_0_hw_init(void *handle)
+static int uvd_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp;
int i, r;
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
- uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
+ uvd_v6_0_set_clockgating_state(ip_block, AMD_CG_STATE_UNGATE);
uvd_v6_0_enable_mgcg(adev, true);
r = amdgpu_ring_test_helper(ring);
@@ -524,13 +524,13 @@ done:
/**
* uvd_v6_0_hw_fini - stop the hardware block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the UVD block, mark ring as not ready any more
*/
-static int uvd_v6_0_hw_fini(void *handle)
+static int uvd_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->uvd.idle_work);
@@ -540,17 +540,17 @@ static int uvd_v6_0_hw_fini(void *handle)
return 0;
}
-static int uvd_v6_0_prepare_suspend(void *handle)
+static int uvd_v6_0_prepare_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return amdgpu_uvd_prepare_suspend(adev);
}
-static int uvd_v6_0_suspend(void *handle)
+static int uvd_v6_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/*
* Proper cleanups before halting the HW engine:
@@ -576,23 +576,22 @@ static int uvd_v6_0_suspend(void *handle)
AMD_CG_STATE_GATE);
}
- r = uvd_v6_0_hw_fini(adev);
+ r = uvd_v6_0_hw_fini(ip_block);
if (r)
return r;
return amdgpu_uvd_suspend(adev);
}
-static int uvd_v6_0_resume(void *handle)
+static int uvd_v6_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_resume(adev);
+ r = amdgpu_uvd_resume(ip_block->adev);
if (r)
return r;
- return uvd_v6_0_hw_init(adev);
+ return uvd_v6_0_hw_init(ip_block);
}
/**
@@ -1151,22 +1150,22 @@ static bool uvd_v6_0_is_idle(void *handle)
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
-static int uvd_v6_0_wait_for_idle(void *handle)
+static int uvd_v6_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- if (uvd_v6_0_is_idle(handle))
+ if (uvd_v6_0_is_idle(adev))
return 0;
}
return -ETIMEDOUT;
}
#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
-static bool uvd_v6_0_check_soft_reset(void *handle)
+static bool uvd_v6_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -1184,9 +1183,9 @@ static bool uvd_v6_0_check_soft_reset(void *handle)
}
}
-static int uvd_v6_0_pre_soft_reset(void *handle)
+static int uvd_v6_0_pre_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!adev->uvd.inst->srbm_soft_reset)
return 0;
@@ -1195,9 +1194,9 @@ static int uvd_v6_0_pre_soft_reset(void *handle)
return 0;
}
-static int uvd_v6_0_soft_reset(void *handle)
+static int uvd_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset;
if (!adev->uvd.inst->srbm_soft_reset)
@@ -1226,9 +1225,9 @@ static int uvd_v6_0_soft_reset(void *handle)
return 0;
}
-static int uvd_v6_0_post_soft_reset(void *handle)
+static int uvd_v6_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!adev->uvd.inst->srbm_soft_reset)
return 0;
@@ -1451,15 +1450,15 @@ static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
}
}
-static int uvd_v6_0_set_clockgating_state(void *handle,
+static int uvd_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
- if (uvd_v6_0_wait_for_idle(handle))
+ if (uvd_v6_0_wait_for_idle(ip_block))
return -EBUSY;
uvd_v6_0_enable_clock_gating(adev, true);
/* enable HW gates because UVD is idle */
@@ -1472,7 +1471,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
return 0;
}
-static int uvd_v6_0_set_powergating_state(void *handle,
+static int uvd_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the UVD block.
@@ -1482,7 +1481,7 @@ static int uvd_v6_0_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 0;
WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
@@ -1528,7 +1527,6 @@ out:
static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.name = "uvd_v6_0",
.early_init = uvd_v6_0_early_init,
- .late_init = NULL,
.sw_init = uvd_v6_0_sw_init,
.sw_fini = uvd_v6_0_sw_fini,
.hw_init = uvd_v6_0_hw_init,
@@ -1545,8 +1543,6 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.set_clockgating_state = uvd_v6_0_set_clockgating_state,
.set_powergating_state = uvd_v6_0_set_powergating_state,
.get_clockgating_state = uvd_v6_0_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 6068b784dc69..9d237b5937fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -361,9 +361,9 @@ error:
return r;
}
-static int uvd_v7_0_early_init(void *handle)
+static int uvd_v7_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->asic_type == CHIP_VEGA20) {
u32 harvest;
@@ -395,12 +395,12 @@ static int uvd_v7_0_early_init(void *handle)
return 0;
}
-static int uvd_v7_0_sw_init(void *handle)
+static int uvd_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, j, r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
if (adev->uvd.harvest_config & (1 << j))
@@ -487,10 +487,10 @@ static int uvd_v7_0_sw_init(void *handle)
return r;
}
-static int uvd_v7_0_sw_fini(void *handle)
+static int uvd_v7_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i, j, r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_virt_free_mm_table(adev);
@@ -510,13 +510,13 @@ static int uvd_v7_0_sw_fini(void *handle)
/**
* uvd_v7_0_hw_init - start and test UVD block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int uvd_v7_0_hw_init(void *handle)
+static int uvd_v7_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
uint32_t tmp;
int i, j, r;
@@ -588,13 +588,13 @@ done:
/**
* uvd_v7_0_hw_fini - stop the hardware block
*
- * @handle: handle used to pass amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the UVD block, mark ring as not ready any more
*/
-static int uvd_v7_0_hw_fini(void *handle)
+static int uvd_v7_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->uvd.idle_work);
@@ -608,17 +608,17 @@ static int uvd_v7_0_hw_fini(void *handle)
return 0;
}
-static int uvd_v7_0_prepare_suspend(void *handle)
+static int uvd_v7_0_prepare_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return amdgpu_uvd_prepare_suspend(adev);
}
-static int uvd_v7_0_suspend(void *handle)
+static int uvd_v7_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/*
* Proper cleanups before halting the HW engine:
@@ -644,23 +644,22 @@ static int uvd_v7_0_suspend(void *handle)
AMD_CG_STATE_GATE);
}
- r = uvd_v7_0_hw_fini(adev);
+ r = uvd_v7_0_hw_fini(ip_block);
if (r)
return r;
return amdgpu_uvd_suspend(adev);
}
-static int uvd_v7_0_resume(void *handle)
+static int uvd_v7_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_resume(adev);
+ r = amdgpu_uvd_resume(ip_block->adev);
if (r)
return r;
- return uvd_v7_0_hw_init(adev);
+ return uvd_v7_0_hw_init(ip_block);
}
/**
@@ -1289,7 +1288,7 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
struct amdgpu_job *job,
struct amdgpu_ib *ib)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
+ struct amdgpu_ring *ring = amdgpu_job_ring(job);
unsigned i;
/* No patching necessary for the first instance */
@@ -1463,104 +1462,6 @@ static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, val);
}
-#if 0
-static bool uvd_v7_0_is_idle(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
-}
-
-static int uvd_v7_0_wait_for_idle(void *handle)
-{
- unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (uvd_v7_0_is_idle(handle))
- return 0;
- }
- return -ETIMEDOUT;
-}
-
-#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
-static bool uvd_v7_0_check_soft_reset(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 srbm_soft_reset = 0;
- u32 tmp = RREG32(mmSRBM_STATUS);
-
- if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
- REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
- (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
- AMDGPU_UVD_STATUS_BUSY_MASK))
- srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
- SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
-
- if (srbm_soft_reset) {
- adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
- return true;
- } else {
- adev->uvd.inst[ring->me].srbm_soft_reset = 0;
- return false;
- }
-}
-
-static int uvd_v7_0_pre_soft_reset(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!adev->uvd.inst[ring->me].srbm_soft_reset)
- return 0;
-
- uvd_v7_0_stop(adev);
- return 0;
-}
-
-static int uvd_v7_0_soft_reset(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 srbm_soft_reset;
-
- if (!adev->uvd.inst[ring->me].srbm_soft_reset)
- return 0;
- srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
-
- if (srbm_soft_reset) {
- u32 tmp;
-
- tmp = RREG32(mmSRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~srbm_soft_reset;
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- /* Wait a little for things to settle down */
- udelay(50);
- }
-
- return 0;
-}
-
-static int uvd_v7_0_post_soft_reset(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!adev->uvd.inst[ring->me].srbm_soft_reset)
- return 0;
-
- mdelay(5);
-
- return uvd_v7_0_start(adev);
-}
-#endif
-
static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -1610,172 +1511,7 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-#if 0
-static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
-{
- uint32_t data, data1, data2, suvd_flags;
-
- data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
- data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
- data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
-
- data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
- UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
-
- suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
- UVD_SUVD_CGC_GATE__SIT_MASK |
- UVD_SUVD_CGC_GATE__SMP_MASK |
- UVD_SUVD_CGC_GATE__SCM_MASK |
- UVD_SUVD_CGC_GATE__SDB_MASK;
-
- data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
- (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
- (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
-
- data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
- UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
- UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
- UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
- UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
- UVD_CGC_CTRL__SYS_MODE_MASK |
- UVD_CGC_CTRL__UDEC_MODE_MASK |
- UVD_CGC_CTRL__MPEG2_MODE_MASK |
- UVD_CGC_CTRL__REGS_MODE_MASK |
- UVD_CGC_CTRL__RBC_MODE_MASK |
- UVD_CGC_CTRL__LMI_MC_MODE_MASK |
- UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
- UVD_CGC_CTRL__IDCT_MODE_MASK |
- UVD_CGC_CTRL__MPRD_MODE_MASK |
- UVD_CGC_CTRL__MPC_MODE_MASK |
- UVD_CGC_CTRL__LBSI_MODE_MASK |
- UVD_CGC_CTRL__LRBBM_MODE_MASK |
- UVD_CGC_CTRL__WCB_MODE_MASK |
- UVD_CGC_CTRL__VCPU_MODE_MASK |
- UVD_CGC_CTRL__JPEG_MODE_MASK |
- UVD_CGC_CTRL__JPEG2_MODE_MASK |
- UVD_CGC_CTRL__SCPU_MODE_MASK);
- data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
- UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
- UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
- UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
- UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
- data1 |= suvd_flags;
-
- WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
- WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
- WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
- WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
-}
-
-static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
-{
- uint32_t data, data1, cgc_flags, suvd_flags;
-
- data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
- data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
-
- cgc_flags = UVD_CGC_GATE__SYS_MASK |
- UVD_CGC_GATE__UDEC_MASK |
- UVD_CGC_GATE__MPEG2_MASK |
- UVD_CGC_GATE__RBC_MASK |
- UVD_CGC_GATE__LMI_MC_MASK |
- UVD_CGC_GATE__IDCT_MASK |
- UVD_CGC_GATE__MPRD_MASK |
- UVD_CGC_GATE__MPC_MASK |
- UVD_CGC_GATE__LBSI_MASK |
- UVD_CGC_GATE__LRBBM_MASK |
- UVD_CGC_GATE__UDEC_RE_MASK |
- UVD_CGC_GATE__UDEC_CM_MASK |
- UVD_CGC_GATE__UDEC_IT_MASK |
- UVD_CGC_GATE__UDEC_DB_MASK |
- UVD_CGC_GATE__UDEC_MP_MASK |
- UVD_CGC_GATE__WCB_MASK |
- UVD_CGC_GATE__VCPU_MASK |
- UVD_CGC_GATE__SCPU_MASK |
- UVD_CGC_GATE__JPEG_MASK |
- UVD_CGC_GATE__JPEG2_MASK;
-
- suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
- UVD_SUVD_CGC_GATE__SIT_MASK |
- UVD_SUVD_CGC_GATE__SMP_MASK |
- UVD_SUVD_CGC_GATE__SCM_MASK |
- UVD_SUVD_CGC_GATE__SDB_MASK;
-
- data |= cgc_flags;
- data1 |= suvd_flags;
-
- WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
- WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
-}
-
-static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
-{
- u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
-
- if (enable)
- tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
- GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
- else
- tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
- GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
-
- WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
-}
-
-
-static int uvd_v7_0_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE);
-
- uvd_v7_0_set_bypass_mode(adev, enable);
-
- if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
- return 0;
-
- if (enable) {
- /* disable HW gating and enable Sw gating */
- uvd_v7_0_set_sw_clock_gating(adev);
- } else {
- /* wait for STATUS to clear */
- if (uvd_v7_0_wait_for_idle(handle))
- return -EBUSY;
-
- /* enable HW gates because UVD is idle */
- /* uvd_v7_0_set_hw_clock_gating(adev); */
- }
-
- return 0;
-}
-
-static int uvd_v7_0_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- /* This doesn't actually powergate the UVD block.
- * That's done in the dpm code via the SMC. This
- * just re-inits the block as necessary. The actual
- * gating still happens in the dpm code. We should
- * revisit this when there is a cleaner line between
- * the smc and the hw blocks
- */
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
- return 0;
-
- WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
-
- if (state == AMD_PG_STATE_GATE) {
- uvd_v7_0_stop(adev);
- return 0;
- } else {
- return uvd_v7_0_start(adev);
- }
-}
-#endif
-
-static int uvd_v7_0_set_clockgating_state(void *handle,
+static int uvd_v7_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
/* needed for driver unload*/
@@ -1785,7 +1521,6 @@ static int uvd_v7_0_set_clockgating_state(void *handle,
const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
.name = "uvd_v7_0",
.early_init = uvd_v7_0_early_init,
- .late_init = NULL,
.sw_init = uvd_v7_0_sw_init,
.sw_fini = uvd_v7_0_sw_fini,
.hw_init = uvd_v7_0_hw_init,
@@ -1793,12 +1528,6 @@ const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
.prepare_suspend = uvd_v7_0_prepare_suspend,
.suspend = uvd_v7_0_suspend,
.resume = uvd_v7_0_resume,
- .is_idle = NULL /* uvd_v7_0_is_idle */,
- .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
- .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
- .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
- .soft_reset = NULL /* uvd_v7_0_soft_reset */,
- .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
.set_clockgating_state = uvd_v7_0_set_clockgating_state,
.set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 66fada199bda..c633b7ff2943 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -208,13 +208,13 @@ static bool vce_v2_0_is_idle(void *handle)
return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
}
-static int vce_v2_0_wait_for_idle(void *handle)
+static int vce_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
unsigned i;
for (i = 0; i < adev->usec_timeout; i++) {
- if (vce_v2_0_is_idle(handle))
+ if (vce_v2_0_is_idle(adev))
return 0;
}
return -ETIMEDOUT;
@@ -274,15 +274,21 @@ static int vce_v2_0_start(struct amdgpu_device *adev)
static int vce_v2_0_stop(struct amdgpu_device *adev)
{
+ struct amdgpu_ip_block *ip_block;
int i;
int status;
+
if (vce_v2_0_lmi_clean(adev)) {
DRM_INFO("vce is not idle \n");
return 0;
}
- if (vce_v2_0_wait_for_idle(adev)) {
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCN);
+ if (!ip_block)
+ return -EINVAL;
+
+ if (vce_v2_0_wait_for_idle(ip_block)) {
DRM_INFO("VCE is busy, Can't set clock gating");
return 0;
}
@@ -398,9 +404,9 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
}
}
-static int vce_v2_0_early_init(void *handle)
+static int vce_v2_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->vce.num_rings = 2;
@@ -410,11 +416,11 @@ static int vce_v2_0_early_init(void *handle)
return 0;
}
-static int vce_v2_0_sw_init(void *handle)
+static int vce_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* VCE */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
@@ -444,10 +450,10 @@ static int vce_v2_0_sw_init(void *handle)
return r;
}
-static int vce_v2_0_sw_fini(void *handle)
+static int vce_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_vce_suspend(adev);
if (r)
@@ -456,10 +462,10 @@ static int vce_v2_0_sw_fini(void *handle)
return amdgpu_vce_sw_fini(adev);
}
-static int vce_v2_0_hw_init(void *handle)
+static int vce_v2_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
vce_v2_0_enable_mgcg(adev, true, false);
@@ -475,19 +481,17 @@ static int vce_v2_0_hw_init(void *handle)
return 0;
}
-static int vce_v2_0_hw_fini(void *handle)
+static int vce_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- cancel_delayed_work_sync(&adev->vce.idle_work);
+ cancel_delayed_work_sync(&ip_block->adev->vce.idle_work);
return 0;
}
-static int vce_v2_0_suspend(void *handle)
+static int vce_v2_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/*
@@ -513,28 +517,27 @@ static int vce_v2_0_suspend(void *handle)
AMD_CG_STATE_GATE);
}
- r = vce_v2_0_hw_fini(adev);
+ r = vce_v2_0_hw_fini(ip_block);
if (r)
return r;
return amdgpu_vce_suspend(adev);
}
-static int vce_v2_0_resume(void *handle)
+static int vce_v2_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_vce_resume(adev);
+ r = amdgpu_vce_resume(ip_block->adev);
if (r)
return r;
- return vce_v2_0_hw_init(adev);
+ return vce_v2_0_hw_init(ip_block);
}
-static int vce_v2_0_soft_reset(void *handle)
+static int vce_v2_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
mdelay(5);
@@ -575,13 +578,13 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int vce_v2_0_set_clockgating_state(void *handle,
+static int vce_v2_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
bool gate = false;
bool sw_cg = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_CG_STATE_GATE) {
gate = true;
@@ -593,7 +596,7 @@ static int vce_v2_0_set_clockgating_state(void *handle,
return 0;
}
-static int vce_v2_0_set_powergating_state(void *handle,
+static int vce_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCE block.
@@ -603,7 +606,7 @@ static int vce_v2_0_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_PG_STATE_GATE)
return vce_v2_0_stop(adev);
@@ -614,7 +617,6 @@ static int vce_v2_0_set_powergating_state(void *handle,
static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
.name = "vce_v2_0",
.early_init = vce_v2_0_early_init,
- .late_init = NULL,
.sw_init = vce_v2_0_sw_init,
.sw_fini = vce_v2_0_sw_fini,
.hw_init = vce_v2_0_hw_init,
@@ -626,8 +628,6 @@ static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
.soft_reset = vce_v2_0_soft_reset,
.set_clockgating_state = vce_v2_0_set_clockgating_state,
.set_powergating_state = vce_v2_0_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 4bfba2931b08..f8bddcd19b68 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -64,8 +64,8 @@
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vce_v3_0_wait_for_idle(void *handle);
-static int vce_v3_0_set_clockgating_state(void *handle,
+static int vce_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block);
+static int vce_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
/**
* vce_v3_0_ring_get_rptr - get read pointer
@@ -396,9 +396,9 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
}
}
-static int vce_v3_0_early_init(void *handle)
+static int vce_v3_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
@@ -415,9 +415,9 @@ static int vce_v3_0_early_init(void *handle)
return 0;
}
-static int vce_v3_0_sw_init(void *handle)
+static int vce_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int r, i;
@@ -453,10 +453,10 @@ static int vce_v3_0_sw_init(void *handle)
return r;
}
-static int vce_v3_0_sw_fini(void *handle)
+static int vce_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_vce_suspend(adev);
if (r)
@@ -465,10 +465,10 @@ static int vce_v3_0_sw_fini(void *handle)
return amdgpu_vce_sw_fini(adev);
}
-static int vce_v3_0_hw_init(void *handle)
+static int vce_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
vce_v3_0_override_vce_clock_gating(adev, true);
@@ -485,25 +485,25 @@ static int vce_v3_0_hw_init(void *handle)
return 0;
}
-static int vce_v3_0_hw_fini(void *handle)
+static int vce_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vce.idle_work);
- r = vce_v3_0_wait_for_idle(handle);
+ r = vce_v3_0_wait_for_idle(ip_block);
if (r)
return r;
vce_v3_0_stop(adev);
- return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
+ return vce_v3_0_set_clockgating_state(ip_block, AMD_CG_STATE_GATE);
}
-static int vce_v3_0_suspend(void *handle)
+static int vce_v3_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/*
* Proper cleanups before halting the HW engine:
@@ -528,23 +528,22 @@ static int vce_v3_0_suspend(void *handle)
AMD_CG_STATE_GATE);
}
- r = vce_v3_0_hw_fini(adev);
+ r = vce_v3_0_hw_fini(ip_block);
if (r)
return r;
return amdgpu_vce_suspend(adev);
}
-static int vce_v3_0_resume(void *handle)
+static int vce_v3_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_vce_resume(adev);
+ r = amdgpu_vce_resume(ip_block->adev);
if (r)
return r;
- return vce_v3_0_hw_init(adev);
+ return vce_v3_0_hw_init(ip_block);
}
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
@@ -609,13 +608,13 @@ static bool vce_v3_0_is_idle(void *handle)
return !(RREG32(mmSRBM_STATUS2) & mask);
}
-static int vce_v3_0_wait_for_idle(void *handle)
+static int vce_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++)
- if (vce_v3_0_is_idle(handle))
+ if (vce_v3_0_is_idle(adev))
return 0;
return -ETIMEDOUT;
@@ -627,9 +626,9 @@ static int vce_v3_0_wait_for_idle(void *handle)
#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
-static bool vce_v3_0_check_soft_reset(void *handle)
+static bool vce_v3_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
/* According to VCE team , we should use VCE_STATUS instead
@@ -668,9 +667,9 @@ static bool vce_v3_0_check_soft_reset(void *handle)
}
}
-static int vce_v3_0_soft_reset(void *handle)
+static int vce_v3_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset;
if (!adev->vce.srbm_soft_reset)
@@ -699,29 +698,29 @@ static int vce_v3_0_soft_reset(void *handle)
return 0;
}
-static int vce_v3_0_pre_soft_reset(void *handle)
+static int vce_v3_0_pre_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!adev->vce.srbm_soft_reset)
return 0;
mdelay(5);
- return vce_v3_0_suspend(adev);
+ return vce_v3_0_suspend(ip_block);
}
-static int vce_v3_0_post_soft_reset(void *handle)
+static int vce_v3_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!adev->vce.srbm_soft_reset)
return 0;
mdelay(5);
- return vce_v3_0_resume(adev);
+ return vce_v3_0_resume(ip_block);
}
static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
@@ -761,10 +760,10 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int vce_v3_0_set_clockgating_state(void *handle,
+static int vce_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
int i;
@@ -802,7 +801,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
return 0;
}
-static int vce_v3_0_set_powergating_state(void *handle,
+static int vce_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCE block.
@@ -812,7 +811,7 @@ static int vce_v3_0_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret = 0;
if (state == AMD_PG_STATE_GATE) {
@@ -897,7 +896,6 @@ static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.name = "vce_v3_0",
.early_init = vce_v3_0_early_init,
- .late_init = NULL,
.sw_init = vce_v3_0_sw_init,
.sw_fini = vce_v3_0_sw_fini,
.hw_init = vce_v3_0_hw_init,
@@ -913,8 +911,6 @@ static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.set_clockgating_state = vce_v3_0_set_clockgating_state,
.set_powergating_state = vce_v3_0_set_powergating_state,
.get_clockgating_state = vce_v3_0_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 0748bf44c880..335bda64ff5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -407,9 +407,9 @@ static int vce_v4_0_stop(struct amdgpu_device *adev)
return 0;
}
-static int vce_v4_0_early_init(void *handle)
+static int vce_v4_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) /* currently only VCN0 support SRIOV */
adev->vce.num_rings = 1;
@@ -422,9 +422,9 @@ static int vce_v4_0_early_init(void *handle)
return 0;
}
-static int vce_v4_0_sw_init(void *handle)
+static int vce_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
unsigned size;
@@ -493,10 +493,10 @@ static int vce_v4_0_sw_init(void *handle)
return r;
}
-static int vce_v4_0_sw_fini(void *handle)
+static int vce_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* free MM table */
amdgpu_virt_free_mm_table(adev);
@@ -513,10 +513,10 @@ static int vce_v4_0_sw_fini(void *handle)
return amdgpu_vce_sw_fini(adev);
}
-static int vce_v4_0_hw_init(void *handle)
+static int vce_v4_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
r = vce_v4_0_sriov_start(adev);
@@ -536,14 +536,14 @@ static int vce_v4_0_hw_init(void *handle)
return 0;
}
-static int vce_v4_0_hw_fini(void *handle)
+static int vce_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vce.idle_work);
if (!amdgpu_sriov_vf(adev)) {
- /* vce_v4_0_wait_for_idle(handle); */
+ /* vce_v4_0_wait_for_idle(ip_block); */
vce_v4_0_stop(adev);
} else {
/* full access mode, so don't touch any VCE register */
@@ -553,9 +553,9 @@ static int vce_v4_0_hw_fini(void *handle)
return 0;
}
-static int vce_v4_0_suspend(void *handle)
+static int vce_v4_0_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r, idx;
if (adev->vce.vcpu_bo == NULL)
@@ -594,16 +594,16 @@ static int vce_v4_0_suspend(void *handle)
AMD_CG_STATE_GATE);
}
- r = vce_v4_0_hw_fini(adev);
+ r = vce_v4_0_hw_fini(ip_block);
if (r)
return r;
return amdgpu_vce_suspend(adev);
}
-static int vce_v4_0_resume(void *handle)
+static int vce_v4_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r, idx;
if (adev->vce.vcpu_bo == NULL)
@@ -624,7 +624,7 @@ static int vce_v4_0_resume(void *handle)
return r;
}
- return vce_v4_0_hw_init(adev);
+ return vce_v4_0_hw_init(ip_block);
}
static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
@@ -684,281 +684,14 @@ static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
}
-static int vce_v4_0_set_clockgating_state(void *handle,
+static int vce_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
/* needed for driver unload*/
return 0;
}
-#if 0
-static bool vce_v4_0_is_idle(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 mask = 0;
-
- mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
- mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
-
- return !(RREG32(mmSRBM_STATUS2) & mask);
-}
-
-static int vce_v4_0_wait_for_idle(void *handle)
-{
- unsigned i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- for (i = 0; i < adev->usec_timeout; i++)
- if (vce_v4_0_is_idle(handle))
- return 0;
-
- return -ETIMEDOUT;
-}
-
-#define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */
-#define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */
-#define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */
-#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
- VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
-
-static bool vce_v4_0_check_soft_reset(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 srbm_soft_reset = 0;
-
- /* According to VCE team , we should use VCE_STATUS instead
- * SRBM_STATUS.VCE_BUSY bit for busy status checking.
- * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
- * instance's registers are accessed
- * (0 for 1st instance, 10 for 2nd instance).
- *
- *VCE_STATUS
- *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB |
- *|----+----+-----------+----+----+----+----------+---------+----|
- *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0|
- *
- * VCE team suggest use bit 3--bit 6 for busy status check
- */
- mutex_lock(&adev->grbm_idx_mutex);
- WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
- if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
- srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
- srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
- }
- WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
- if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
- srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
- srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
- }
- WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- if (srbm_soft_reset) {
- adev->vce.srbm_soft_reset = srbm_soft_reset;
- return true;
- } else {
- adev->vce.srbm_soft_reset = 0;
- return false;
- }
-}
-
-static int vce_v4_0_soft_reset(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 srbm_soft_reset;
-
- if (!adev->vce.srbm_soft_reset)
- return 0;
- srbm_soft_reset = adev->vce.srbm_soft_reset;
-
- if (srbm_soft_reset) {
- u32 tmp;
-
- tmp = RREG32(mmSRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~srbm_soft_reset;
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- /* Wait a little for things to settle down */
- udelay(50);
- }
-
- return 0;
-}
-
-static int vce_v4_0_pre_soft_reset(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!adev->vce.srbm_soft_reset)
- return 0;
-
- mdelay(5);
-
- return vce_v4_0_suspend(adev);
-}
-
-
-static int vce_v4_0_post_soft_reset(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!adev->vce.srbm_soft_reset)
- return 0;
-
- mdelay(5);
-
- return vce_v4_0_resume(adev);
-}
-
-static void vce_v4_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
-{
- u32 tmp, data;
-
- tmp = data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL));
- if (override)
- data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
- else
- data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
-
- if (tmp != data)
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL), data);
-}
-
-static void vce_v4_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
- bool gated)
-{
- u32 data;
-
- /* Set Override to disable Clock Gating */
- vce_v4_0_override_vce_clock_gating(adev, true);
-
- /* This function enables MGCG which is controlled by firmware.
- With the clocks in the gated state the core is still
- accessible but the firmware will throttle the clocks on the
- fly as necessary.
- */
- if (gated) {
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
- data |= 0x1ff;
- data &= ~0xef0000;
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
-
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
- data |= 0x3ff000;
- data &= ~0xffc00000;
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
-
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
- data |= 0x2;
- data &= ~0x00010000;
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
-
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
- data |= 0x37f;
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
-
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
- data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
- 0x8;
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
- } else {
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
- data &= ~0x80010;
- data |= 0xe70008;
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
-
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
- data |= 0xffc00000;
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
-
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
- data |= 0x10000;
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
-
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
- data &= ~0xffc00000;
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
-
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
- data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
- VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
- 0x8);
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
- }
- vce_v4_0_override_vce_clock_gating(adev, false);
-}
-
-static void vce_v4_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
-{
- u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
-
- if (enable)
- tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
- else
- tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
-
- WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
-}
-
-static int vce_v4_0_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE);
- int i;
-
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_TONGA) ||
- (adev->asic_type == CHIP_FIJI))
- vce_v4_0_set_bypass_mode(adev, enable);
-
- if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
- return 0;
-
- mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < 2; i++) {
- /* Program VCE Instance 0 or 1 if not harvested */
- if (adev->vce.harvest_config & (1 << i))
- continue;
-
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
-
- if (enable) {
- /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
- uint32_t data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A);
- data &= ~(0xf | 0xff0);
- data |= ((0x0 << 0) | (0x04 << 4));
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A, data);
-
- /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
- data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING);
- data &= ~(0xf | 0xff0);
- data |= ((0x0 << 0) | (0x04 << 4));
- WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING, data);
- }
-
- vce_v4_0_set_vce_sw_clock_gating(adev, enable);
- }
-
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- return 0;
-}
-#endif
-
-static int vce_v4_0_set_powergating_state(void *handle,
+static int vce_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCE block.
@@ -968,7 +701,7 @@ static int vce_v4_0_set_powergating_state(void *handle,
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_PG_STATE_GATE)
return vce_v4_0_stop(adev);
@@ -1076,19 +809,12 @@ static int vce_v4_0_process_interrupt(struct amdgpu_device *adev,
const struct amd_ip_funcs vce_v4_0_ip_funcs = {
.name = "vce_v4_0",
.early_init = vce_v4_0_early_init,
- .late_init = NULL,
.sw_init = vce_v4_0_sw_init,
.sw_fini = vce_v4_0_sw_fini,
.hw_init = vce_v4_0_hw_init,
.hw_fini = vce_v4_0_hw_fini,
.suspend = vce_v4_0_suspend,
.resume = vce_v4_0_resume,
- .is_idle = NULL /* vce_v4_0_is_idle */,
- .wait_for_idle = NULL /* vce_v4_0_wait_for_idle */,
- .check_soft_reset = NULL /* vce_v4_0_check_soft_reset */,
- .pre_soft_reset = NULL /* vce_v4_0_pre_soft_reset */,
- .soft_reset = NULL /* vce_v4_0_soft_reset */,
- .post_soft_reset = NULL /* vce_v4_0_post_soft_reset */,
.set_clockgating_state = vce_v4_0_set_clockgating_state,
.set_powergating_state = vce_v4_0_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index ecdfbfefd66a..5ea96c983517 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -85,7 +85,8 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev);
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
+static int vcn_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state);
static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -95,14 +96,14 @@ static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
/**
* vcn_v1_0_early_init - set function pointers and load microcode
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
* Load microcode from filesystem
*/
-static int vcn_v1_0_early_init(void *handle)
+static int vcn_v1_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->vcn.num_enc_rings = 2;
@@ -110,7 +111,7 @@ static int vcn_v1_0_early_init(void *handle)
vcn_v1_0_set_enc_ring_funcs(adev);
vcn_v1_0_set_irq_funcs(adev);
- jpeg_v1_0_early_init(handle);
+ jpeg_v1_0_early_init(ip_block);
return amdgpu_vcn_early_init(adev);
}
@@ -118,17 +119,17 @@ static int vcn_v1_0_early_init(void *handle)
/**
* vcn_v1_0_sw_init - sw init for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int vcn_v1_0_sw_init(void *handle)
+static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, r;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
uint32_t *ptr;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@@ -197,7 +198,7 @@ static int vcn_v1_0_sw_init(void *handle)
amdgpu_vcn_fwlog_init(adev->vcn.inst);
}
- r = jpeg_v1_0_sw_init(handle);
+ r = jpeg_v1_0_sw_init(ip_block);
/* Allocate memory for VCN IP Dump buffer */
ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
@@ -213,20 +214,20 @@ static int vcn_v1_0_sw_init(void *handle)
/**
* vcn_v1_0_sw_fini - sw fini for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* VCN suspend and free up sw allocation
*/
-static int vcn_v1_0_sw_fini(void *handle)
+static int vcn_v1_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_vcn_suspend(adev);
if (r)
return r;
- jpeg_v1_0_sw_fini(handle);
+ jpeg_v1_0_sw_fini(ip_block);
r = amdgpu_vcn_sw_fini(adev);
@@ -238,13 +239,13 @@ static int vcn_v1_0_sw_fini(void *handle)
/**
* vcn_v1_0_hw_init - start and test VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int vcn_v1_0_hw_init(void *handle)
+static int vcn_v1_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
int i, r;
@@ -268,20 +269,20 @@ static int vcn_v1_0_hw_init(void *handle)
/**
* vcn_v1_0_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v1_0_hw_fini(void *handle)
+static int vcn_v1_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
- vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ vcn_v1_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
return 0;
@@ -290,23 +291,23 @@ static int vcn_v1_0_hw_fini(void *handle)
/**
* vcn_v1_0_suspend - suspend VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend VCN block
*/
-static int vcn_v1_0_suspend(void *handle)
+static int vcn_v1_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool idle_work_unexecuted;
idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
if (idle_work_unexecuted) {
if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ amdgpu_dpm_enable_vcn(adev, false, 0);
}
- r = vcn_v1_0_hw_fini(adev);
+ r = vcn_v1_0_hw_fini(ip_block);
if (r)
return r;
@@ -318,20 +319,19 @@ static int vcn_v1_0_suspend(void *handle)
/**
* vcn_v1_0_resume - resume VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init VCN block
*/
-static int vcn_v1_0_resume(void *handle)
+static int vcn_v1_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(ip_block->adev);
if (r)
return r;
- r = vcn_v1_0_hw_init(adev);
+ r = vcn_v1_0_hw_init(ip_block);
return r;
}
@@ -345,7 +345,7 @@ static int vcn_v1_0_resume(void *handle)
*/
static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -412,7 +412,7 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -1384,9 +1384,9 @@ static bool vcn_v1_0_is_idle(void *handle)
return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
}
-static int vcn_v1_0_wait_for_idle(void *handle)
+static int vcn_v1_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
@@ -1395,15 +1395,15 @@ static int vcn_v1_0_wait_for_idle(void *handle)
return ret;
}
-static int vcn_v1_0_set_clockgating_state(void *handle,
+static int vcn_v1_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
- if (!vcn_v1_0_is_idle(handle))
+ if (!vcn_v1_0_is_idle(adev))
return -EBUSY;
vcn_v1_0_enable_clock_gating(adev);
} else {
@@ -1800,7 +1800,7 @@ static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t coun
}
}
-static int vcn_v1_0_set_powergating_state(void *handle,
+static int vcn_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCN block.
@@ -1811,7 +1811,7 @@ static int vcn_v1_0_set_powergating_state(void *handle,
* the smc and the hw blocks
*/
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == adev->vcn.cur_state)
return 0;
@@ -1857,7 +1857,7 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
if (fences == 0) {
amdgpu_gfx_off_ctrl(adev, true);
if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ amdgpu_dpm_enable_vcn(adev, false, 0);
else
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
@@ -1887,7 +1887,7 @@ void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
if (set_clocks) {
amdgpu_gfx_off_ctrl(adev, false);
if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ amdgpu_dpm_enable_vcn(adev, true, 0);
else
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
@@ -1925,9 +1925,9 @@ void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
}
-static void vcn_v1_0_print_ip_state(void *handle, struct drm_printer *p)
+static void vcn_v1_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_1_0);
uint32_t inst_off, is_powered;
@@ -1957,9 +1957,9 @@ static void vcn_v1_0_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void vcn_v1_0_dump_ip_state(void *handle)
+static void vcn_v1_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
bool is_powered;
uint32_t inst_off;
@@ -1988,7 +1988,6 @@ static void vcn_v1_0_dump_ip_state(void *handle)
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.name = "vcn_v1_0",
.early_init = vcn_v1_0_early_init,
- .late_init = NULL,
.sw_init = vcn_v1_0_sw_init,
.sw_fini = vcn_v1_0_sw_fini,
.hw_init = vcn_v1_0_hw_init,
@@ -1997,10 +1996,6 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.resume = vcn_v1_0_resume,
.is_idle = vcn_v1_0_is_idle,
.wait_for_idle = vcn_v1_0_wait_for_idle,
- .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
- .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
- .soft_reset = NULL /* vcn_v1_0_soft_reset */,
- .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
.set_clockgating_state = vcn_v1_0_set_clockgating_state,
.set_powergating_state = vcn_v1_0_set_powergating_state,
.dump_ip_state = vcn_v1_0_dump_ip_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index bfd067e2d2f1..e42cfc731ad8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -92,7 +92,7 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_0[] = {
static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v2_0_set_powergating_state(void *handle,
+static int vcn_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -100,14 +100,14 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
/**
* vcn_v2_0_early_init - set function pointers and load microcode
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
* Load microcode from filesystem
*/
-static int vcn_v2_0_early_init(void *handle)
+static int vcn_v2_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
adev->vcn.num_enc_rings = 1;
@@ -124,17 +124,17 @@ static int vcn_v2_0_early_init(void *handle)
/**
* vcn_v2_0_sw_init - sw init for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int vcn_v2_0_sw_init(void *handle)
+static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, r;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
uint32_t *ptr;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
volatile struct amdgpu_fw_shared *fw_shared;
/* VCN DEC TRAP */
@@ -237,14 +237,14 @@ static int vcn_v2_0_sw_init(void *handle)
/**
* vcn_v2_0_sw_fini - sw fini for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* VCN suspend and free up sw allocation
*/
-static int vcn_v2_0_sw_fini(void *handle)
+static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r, idx;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
@@ -268,13 +268,13 @@ static int vcn_v2_0_sw_fini(void *handle)
/**
* vcn_v2_0_hw_init - start and test VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int vcn_v2_0_hw_init(void *handle)
+static int vcn_v2_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
int i, r;
@@ -305,20 +305,20 @@ static int vcn_v2_0_hw_init(void *handle)
/**
* vcn_v2_0_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v2_0_hw_fini(void *handle)
+static int vcn_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
- vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ vcn_v2_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
return 0;
}
@@ -326,20 +326,19 @@ static int vcn_v2_0_hw_fini(void *handle)
/**
* vcn_v2_0_suspend - suspend VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend VCN block
*/
-static int vcn_v2_0_suspend(void *handle)
+static int vcn_v2_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = vcn_v2_0_hw_fini(adev);
+ r = vcn_v2_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
@@ -347,20 +346,19 @@ static int vcn_v2_0_suspend(void *handle)
/**
* vcn_v2_0_resume - resume VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init VCN block
*/
-static int vcn_v2_0_resume(void *handle)
+static int vcn_v2_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(ip_block->adev);
if (r)
return r;
- r = vcn_v2_0_hw_init(adev);
+ r = vcn_v2_0_hw_init(ip_block);
return r;
}
@@ -374,7 +372,7 @@ static int vcn_v2_0_resume(void *handle)
*/
static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
if (amdgpu_sriov_vf(adev))
@@ -430,7 +428,7 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -980,7 +978,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
int i, j, r;
if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ amdgpu_dpm_enable_vcn(adev, true, 0);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
return vcn_v2_0_start_dpg_mode(adev, adev->vcn.indirect_sram);
@@ -1237,7 +1235,7 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev)
power_off:
if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ amdgpu_dpm_enable_vcn(adev, false, 0);
return 0;
}
@@ -1326,9 +1324,9 @@ static bool vcn_v2_0_is_idle(void *handle)
return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
}
-static int vcn_v2_0_wait_for_idle(void *handle)
+static int vcn_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
@@ -1337,10 +1335,10 @@ static int vcn_v2_0_wait_for_idle(void *handle)
return ret;
}
-static int vcn_v2_0_set_clockgating_state(void *handle,
+static int vcn_v2_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
@@ -1348,7 +1346,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
if (enable) {
/* wait for STATUS to clear */
- if (!vcn_v2_0_is_idle(handle))
+ if (!vcn_v2_0_is_idle(adev))
return -EBUSY;
vcn_v2_0_enable_clock_gating(adev);
} else {
@@ -1798,7 +1796,7 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
}
-static int vcn_v2_0_set_powergating_state(void *handle,
+static int vcn_v2_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCN block.
@@ -1809,7 +1807,7 @@ static int vcn_v2_0_set_powergating_state(void *handle,
* the smc and the hw blocks
*/
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
@@ -1922,7 +1920,7 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
init_table += header->vcn_table_offset;
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
@@ -2034,9 +2032,9 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
}
-static void vcn_v2_0_print_ip_state(void *handle, struct drm_printer *p)
+static void vcn_v2_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
uint32_t inst_off, is_powered;
@@ -2066,9 +2064,9 @@ static void vcn_v2_0_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void vcn_v2_0_dump_ip_state(void *handle)
+static void vcn_v2_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
bool is_powered;
uint32_t inst_off;
@@ -2097,7 +2095,6 @@ static void vcn_v2_0_dump_ip_state(void *handle)
static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.name = "vcn_v2_0",
.early_init = vcn_v2_0_early_init,
- .late_init = NULL,
.sw_init = vcn_v2_0_sw_init,
.sw_fini = vcn_v2_0_sw_fini,
.hw_init = vcn_v2_0_hw_init,
@@ -2106,10 +2103,6 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.resume = vcn_v2_0_resume,
.is_idle = vcn_v2_0_is_idle,
.wait_for_idle = vcn_v2_0_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_0_set_clockgating_state,
.set_powergating_state = vcn_v2_0_set_powergating_state,
.dump_ip_state = vcn_v2_0_dump_ip_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 04e9e806e318..b518202955ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -95,7 +95,7 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_5[] = {
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v2_5_set_powergating_state(void *handle,
+static int vcn_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -110,14 +110,14 @@ static int amdgpu_ih_clientid_vcns[] = {
/**
* vcn_v2_5_early_init - set function pointers and load microcode
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
* Load microcode from filesystem
*/
-static int vcn_v2_5_early_init(void *handle)
+static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.num_vcn_inst = 2;
@@ -151,17 +151,17 @@ static int vcn_v2_5_early_init(void *handle)
/**
* vcn_v2_5_sw_init - sw init for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int vcn_v2_5_sw_init(void *handle)
+static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, j, r;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
uint32_t *ptr;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
if (adev->vcn.harvest_config & (1 << j))
@@ -295,14 +295,14 @@ static int vcn_v2_5_sw_init(void *handle)
/**
* vcn_v2_5_sw_fini - sw fini for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* VCN suspend and free up sw allocation
*/
-static int vcn_v2_5_sw_fini(void *handle)
+static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i, r, idx;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
volatile struct amdgpu_fw_shared *fw_shared;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
@@ -333,13 +333,13 @@ static int vcn_v2_5_sw_fini(void *handle)
/**
* vcn_v2_5_hw_init - start and test VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int vcn_v2_5_hw_init(void *handle)
+static int vcn_v2_5_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, j, r = 0;
@@ -381,13 +381,13 @@ static int vcn_v2_5_hw_init(void *handle)
/**
* vcn_v2_5_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v2_5_hw_fini(void *handle)
+static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -399,7 +399,7 @@ static int vcn_v2_5_hw_fini(void *handle)
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, mmUVD_STATUS)))
- vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ vcn_v2_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
@@ -411,20 +411,19 @@ static int vcn_v2_5_hw_fini(void *handle)
/**
* vcn_v2_5_suspend - suspend VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend VCN block
*/
-static int vcn_v2_5_suspend(void *handle)
+static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = vcn_v2_5_hw_fini(adev);
+ r = vcn_v2_5_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
@@ -432,20 +431,19 @@ static int vcn_v2_5_suspend(void *handle)
/**
* vcn_v2_5_resume - resume VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init VCN block
*/
-static int vcn_v2_5_resume(void *handle)
+static int vcn_v2_5_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(ip_block->adev);
if (r)
return r;
- r = vcn_v2_5_hw_init(adev);
+ r = vcn_v2_5_hw_init(ip_block);
return r;
}
@@ -467,7 +465,7 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
@@ -516,7 +514,7 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -1014,8 +1012,10 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
uint32_t rb_bufsz, tmp;
int i, j, k, r;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+ }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -1287,7 +1287,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V1_0_INSERT_DIRECT_WT(
@@ -1487,8 +1487,10 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
}
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+ }
return 0;
}
@@ -1780,15 +1782,16 @@ static bool vcn_v2_5_is_idle(void *handle)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
+
ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
}
return ret;
}
-static int vcn_v2_5_wait_for_idle(void *handle)
+static int vcn_v2_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1803,17 +1806,17 @@ static int vcn_v2_5_wait_for_idle(void *handle)
return ret;
}
-static int vcn_v2_5_set_clockgating_state(void *handle,
+static int vcn_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
return 0;
if (enable) {
- if (!vcn_v2_5_is_idle(handle))
+ if (!vcn_v2_5_is_idle(adev))
return -EBUSY;
vcn_v2_5_enable_clock_gating(adev);
} else {
@@ -1823,10 +1826,10 @@ static int vcn_v2_5_set_clockgating_state(void *handle,
return 0;
}
-static int vcn_v2_5_set_powergating_state(void *handle,
+static int vcn_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (amdgpu_sriov_vf(adev))
@@ -1926,9 +1929,9 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v2_5_print_ip_state(void *handle, struct drm_printer *p)
+static void vcn_v2_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
uint32_t inst_off, is_powered;
@@ -1958,9 +1961,9 @@ static void vcn_v2_5_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void vcn_v2_5_dump_ip_state(void *handle)
+static void vcn_v2_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
bool is_powered;
uint32_t inst_off;
@@ -1989,7 +1992,6 @@ static void vcn_v2_5_dump_ip_state(void *handle)
static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.name = "vcn_v2_5",
.early_init = vcn_v2_5_early_init,
- .late_init = NULL,
.sw_init = vcn_v2_5_sw_init,
.sw_fini = vcn_v2_5_sw_fini,
.hw_init = vcn_v2_5_hw_init,
@@ -1998,10 +2000,6 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.resume = vcn_v2_5_resume,
.is_idle = vcn_v2_5_is_idle,
.wait_for_idle = vcn_v2_5_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_v2_5_set_powergating_state,
.dump_ip_state = vcn_v2_5_dump_ip_state,
@@ -2011,7 +2009,6 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
.name = "vcn_v2_6",
.early_init = vcn_v2_5_early_init,
- .late_init = NULL,
.sw_init = vcn_v2_5_sw_init,
.sw_fini = vcn_v2_5_sw_fini,
.hw_init = vcn_v2_5_hw_init,
@@ -2020,10 +2017,6 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
.resume = vcn_v2_5_resume,
.is_idle = vcn_v2_5_is_idle,
.wait_for_idle = vcn_v2_5_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_v2_5_set_powergating_state,
.dump_ip_state = vcn_v2_5_dump_ip_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 65dd68b32280..63ddd4cca910 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -105,7 +105,7 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v3_0_set_powergating_state(void *handle,
+static int vcn_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -116,14 +116,14 @@ static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
/**
* vcn_v3_0_early_init - set function pointers and load microcode
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
* Load microcode from filesystem
*/
-static int vcn_v3_0_early_init(void *handle)
+static int vcn_v3_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
@@ -153,18 +153,18 @@ static int vcn_v3_0_early_init(void *handle)
/**
* vcn_v3_0_sw_init - sw init for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int vcn_v3_0_sw_init(void *handle)
+static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, j, r;
int vcn_doorbell_index = 0;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
uint32_t *ptr;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -299,13 +299,13 @@ static int vcn_v3_0_sw_init(void *handle)
/**
* vcn_v3_0_sw_fini - sw fini for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* VCN suspend and free up sw allocation
*/
-static int vcn_v3_0_sw_fini(void *handle)
+static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, r, idx;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
@@ -338,13 +338,13 @@ static int vcn_v3_0_sw_fini(void *handle)
/**
* vcn_v3_0_hw_init - start and test VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int vcn_v3_0_hw_init(void *handle)
+static int vcn_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, j, r;
@@ -413,13 +413,13 @@ static int vcn_v3_0_hw_init(void *handle)
/**
* vcn_v3_0_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v3_0_hw_fini(void *handle)
+static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -430,9 +430,9 @@ static int vcn_v3_0_hw_fini(void *handle)
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
- vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
+ vcn_v3_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
}
}
@@ -443,20 +443,19 @@ static int vcn_v3_0_hw_fini(void *handle)
/**
* vcn_v3_0_suspend - suspend VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend VCN block
*/
-static int vcn_v3_0_suspend(void *handle)
+static int vcn_v3_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = vcn_v3_0_hw_fini(adev);
+ r = vcn_v3_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
@@ -464,20 +463,19 @@ static int vcn_v3_0_suspend(void *handle)
/**
* vcn_v3_0_resume - resume VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init VCN block
*/
-static int vcn_v3_0_resume(void *handle)
+static int vcn_v3_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(ip_block->adev);
if (r)
return r;
- r = vcn_v3_0_hw_init(adev);
+ r = vcn_v3_0_hw_init(ip_block);
return r;
}
@@ -492,7 +490,7 @@ static int vcn_v3_0_resume(void *handle)
*/
static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst].fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -542,7 +540,7 @@ static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -1143,8 +1141,10 @@ static int vcn_v3_0_start(struct amdgpu_device *adev)
uint32_t rb_bufsz, tmp;
int i, j, k, r;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+ }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -1375,7 +1375,7 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
@@ -1634,8 +1634,10 @@ static int vcn_v3_0_stop(struct amdgpu_device *adev)
vcn_v3_0_enable_static_power_gating(adev, i);
}
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+ }
return 0;
}
@@ -2116,9 +2118,9 @@ static bool vcn_v3_0_is_idle(void *handle)
return ret;
}
-static int vcn_v3_0_wait_for_idle(void *handle)
+static int vcn_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -2134,10 +2136,10 @@ static int vcn_v3_0_wait_for_idle(void *handle)
return ret;
}
-static int vcn_v3_0_set_clockgating_state(void *handle,
+static int vcn_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = state == AMD_CG_STATE_GATE;
int i;
@@ -2157,10 +2159,10 @@ static int vcn_v3_0_set_clockgating_state(void *handle,
return 0;
}
-static int vcn_v3_0_set_powergating_state(void *handle,
+static int vcn_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
/* for SRIOV, guest should not control VCN Power-gating
@@ -2251,9 +2253,9 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v3_0_print_ip_state(void *handle, struct drm_printer *p)
+static void vcn_v3_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
uint32_t inst_off;
@@ -2284,9 +2286,9 @@ static void vcn_v3_0_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void vcn_v3_0_dump_ip_state(void *handle)
+static void vcn_v3_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
bool is_powered;
uint32_t inst_off;
@@ -2315,7 +2317,6 @@ static void vcn_v3_0_dump_ip_state(void *handle)
static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.name = "vcn_v3_0",
.early_init = vcn_v3_0_early_init,
- .late_init = NULL,
.sw_init = vcn_v3_0_sw_init,
.sw_fini = vcn_v3_0_sw_fini,
.hw_init = vcn_v3_0_hw_init,
@@ -2324,10 +2325,6 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.resume = vcn_v3_0_resume,
.is_idle = vcn_v3_0_is_idle,
.wait_for_idle = vcn_v3_0_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = vcn_v3_0_set_clockgating_state,
.set_powergating_state = vcn_v3_0_set_powergating_state,
.dump_ip_state = vcn_v3_0_dump_ip_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 26c6f10a8c8f..00551d6f0370 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -96,7 +96,7 @@ static int amdgpu_ih_clientid_vcns[] = {
static int vcn_v4_0_start_sriov(struct amdgpu_device *adev);
static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v4_0_set_powergating_state(void *handle,
+static int vcn_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -106,14 +106,14 @@ static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
/**
* vcn_v4_0_early_init - set function pointers and load microcode
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
* Load microcode from filesystem
*/
-static int vcn_v4_0_early_init(void *handle)
+static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
if (amdgpu_sriov_vf(adev)) {
@@ -164,14 +164,14 @@ static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
/**
* vcn_v4_0_sw_init - sw init for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int vcn_v4_0_sw_init(void *handle)
+static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, r;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
uint32_t *ptr;
@@ -225,6 +225,10 @@ static int vcn_v4_0_sw_init(void *handle)
vcn_v4_0_fw_shared_init(adev, i);
}
+ /* TODO: Add queue reset mask when FW fully supports it */
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -247,19 +251,23 @@ static int vcn_v4_0_sw_init(void *handle)
adev->vcn.ip_dump = ptr;
}
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return 0;
}
/**
* vcn_v4_0_sw_fini - sw fini for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* VCN suspend and free up sw allocation
*/
-static int vcn_v4_0_sw_fini(void *handle)
+static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, r, idx;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
@@ -284,6 +292,7 @@ static int vcn_v4_0_sw_fini(void *handle)
if (r)
return r;
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
r = amdgpu_vcn_sw_fini(adev);
kfree(adev->vcn.ip_dump);
@@ -294,13 +303,13 @@ static int vcn_v4_0_sw_fini(void *handle)
/**
* vcn_v4_0_hw_init - start and test VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int vcn_v4_0_hw_init(void *handle)
+static int vcn_v4_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r;
@@ -341,13 +350,13 @@ static int vcn_v4_0_hw_init(void *handle)
/**
* vcn_v4_0_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v4_0_hw_fini(void *handle)
+static int vcn_v4_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -357,9 +366,9 @@ static int vcn_v4_0_hw_fini(void *handle)
continue;
if (!amdgpu_sriov_vf(adev)) {
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
- (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
- RREG32_SOC15(VCN, i, regUVD_STATUS))) {
- vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+ RREG32_SOC15(VCN, i, regUVD_STATUS))) {
+ vcn_v4_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
}
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
@@ -372,20 +381,19 @@ static int vcn_v4_0_hw_fini(void *handle)
/**
* vcn_v4_0_suspend - suspend VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend VCN block
*/
-static int vcn_v4_0_suspend(void *handle)
+static int vcn_v4_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = vcn_v4_0_hw_fini(adev);
+ r = vcn_v4_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
@@ -393,20 +401,19 @@ static int vcn_v4_0_suspend(void *handle)
/**
* vcn_v4_0_resume - resume VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init VCN block
*/
-static int vcn_v4_0_resume(void *handle)
+static int vcn_v4_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(ip_block->adev);
if (r)
return r;
- r = vcn_v4_0_hw_init(adev);
+ r = vcn_v4_0_hw_init(ip_block);
return r;
}
@@ -424,7 +431,7 @@ static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -484,7 +491,7 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
{
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -1090,8 +1097,10 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
uint32_t tmp;
int i, j, k, r;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+ }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -1334,7 +1343,7 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
regUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
@@ -1616,8 +1625,10 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev)
vcn_v4_0_enable_static_power_gating(adev, i);
}
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+ }
return 0;
}
@@ -1975,13 +1986,13 @@ static bool vcn_v4_0_is_idle(void *handle)
/**
* vcn_v4_0_wait_for_idle - wait for VCN block idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Wait for VCN block idle
*/
-static int vcn_v4_0_wait_for_idle(void *handle)
+static int vcn_v4_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -2000,14 +2011,15 @@ static int vcn_v4_0_wait_for_idle(void *handle)
/**
* vcn_v4_0_set_clockgating_state - set VCN block clockgating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: clock gating state
*
* Set VCN block clockgating state
*/
-static int vcn_v4_0_set_clockgating_state(void *handle, enum amd_clockgating_state state)
+static int vcn_v4_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = state == AMD_CG_STATE_GATE;
int i;
@@ -2030,14 +2042,15 @@ static int vcn_v4_0_set_clockgating_state(void *handle, enum amd_clockgating_sta
/**
* vcn_v4_0_set_powergating_state - set VCN block powergating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: power gating state
*
* Set VCN block powergating state
*/
-static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_state state)
+static int vcn_v4_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
/* for SRIOV, guest should not control VCN Power-gating
@@ -2158,9 +2171,9 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v4_0_print_ip_state(void *handle, struct drm_printer *p)
+static void vcn_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
uint32_t inst_off, is_powered;
@@ -2190,9 +2203,9 @@ static void vcn_v4_0_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void vcn_v4_0_dump_ip_state(void *handle)
+static void vcn_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
bool is_powered;
uint32_t inst_off;
@@ -2222,7 +2235,6 @@ static void vcn_v4_0_dump_ip_state(void *handle)
static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.name = "vcn_v4_0",
.early_init = vcn_v4_0_early_init,
- .late_init = NULL,
.sw_init = vcn_v4_0_sw_init,
.sw_fini = vcn_v4_0_sw_fini,
.hw_init = vcn_v4_0_hw_init,
@@ -2231,10 +2243,6 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.resume = vcn_v4_0_resume,
.is_idle = vcn_v4_0_is_idle,
.wait_for_idle = vcn_v4_0_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_set_clockgating_state,
.set_powergating_state = vcn_v4_0_set_powergating_state,
.dump_ip_state = vcn_v4_0_dump_ip_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 0fda70336300..ecdc027f8220 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -87,7 +87,7 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev);
static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v4_0_3_set_powergating_state(void *handle,
+static int vcn_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -95,16 +95,23 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
int inst_idx, bool indirect);
+
+static inline bool vcn_v4_0_3_normalizn_reqd(struct amdgpu_device *adev)
+{
+ return (amdgpu_sriov_vf(adev) ||
+ (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)));
+}
+
/**
* vcn_v4_0_3_early_init - set function pointers
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
*/
-static int vcn_v4_0_3_early_init(void *handle)
+static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* re-use enc ring as unified ring */
adev->vcn.num_enc_rings = 1;
@@ -116,16 +123,30 @@ static int vcn_v4_0_3_early_init(void *handle)
return amdgpu_vcn_early_init(adev);
}
+static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
+{
+ struct amdgpu_vcn4_fw_shared *fw_shared;
+
+ fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
+ fw_shared->sq.is_enabled = 1;
+
+ if (amdgpu_vcnfw_log)
+ amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
+
+ return 0;
+}
+
/**
* vcn_v4_0_3_sw_init - sw init for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int vcn_v4_0_3_sw_init(void *handle)
+static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r, vcn_inst;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
@@ -148,8 +169,6 @@ static int vcn_v4_0_3_sw_init(void *handle)
return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
-
vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
@@ -172,14 +191,13 @@ static int vcn_v4_0_3_sw_init(void *handle)
if (r)
return r;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
- fw_shared->sq.is_enabled = true;
-
- if (amdgpu_vcnfw_log)
- amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+ vcn_v4_0_3_fw_shared_init(adev, i);
}
+ /* TODO: Add queue reset mask when FW fully supports it */
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -206,19 +224,23 @@ static int vcn_v4_0_3_sw_init(void *handle)
adev->vcn.ip_dump = ptr;
}
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return 0;
}
/**
* vcn_v4_0_3_sw_fini - sw fini for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* VCN suspend and free up sw allocation
*/
-static int vcn_v4_0_3_sw_fini(void *handle)
+static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, r, idx;
if (drm_dev_enter(&adev->ddev, &idx)) {
@@ -239,6 +261,7 @@ static int vcn_v4_0_3_sw_fini(void *handle)
if (r)
return r;
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
r = amdgpu_vcn_sw_fini(adev);
kfree(adev->vcn.ip_dump);
@@ -249,13 +272,13 @@ static int vcn_v4_0_3_sw_fini(void *handle)
/**
* vcn_v4_0_3_hw_init - start and test VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int vcn_v4_0_3_hw_init(void *handle)
+static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r, vcn_inst;
@@ -273,6 +296,8 @@ static int vcn_v4_0_3_hw_init(void *handle)
}
} else {
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn4_fw_shared *fw_shared;
+
vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
@@ -296,6 +321,11 @@ static int vcn_v4_0_3_hw_init(void *handle)
regVCN_RB1_DB_CTRL);
}
+ /* Re-init fw_shared when RAS fatal error occurred */
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ if (!fw_shared->sq.is_enabled)
+ vcn_v4_0_3_fw_shared_init(adev, i);
+
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
@@ -308,18 +338,18 @@ static int vcn_v4_0_3_hw_init(void *handle)
/**
* vcn_v4_0_3_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v4_0_3_hw_fini(void *handle)
+static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if (adev->vcn.cur_state != AMD_PG_STATE_GATE)
- vcn_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ vcn_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
return 0;
}
@@ -327,20 +357,19 @@ static int vcn_v4_0_3_hw_fini(void *handle)
/**
* vcn_v4_0_3_suspend - suspend VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend VCN block
*/
-static int vcn_v4_0_3_suspend(void *handle)
+static int vcn_v4_0_3_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = vcn_v4_0_3_hw_fini(adev);
+ r = vcn_v4_0_3_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
@@ -348,20 +377,19 @@ static int vcn_v4_0_3_suspend(void *handle)
/**
* vcn_v4_0_3_resume - resume VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init VCN block
*/
-static int vcn_v4_0_3_resume(void *handle)
+static int vcn_v4_0_3_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(ip_block->adev);
if (r)
return r;
- r = vcn_v4_0_3_hw_init(adev);
+ r = vcn_v4_0_3_hw_init(ip_block);
return r;
}
@@ -379,7 +407,7 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
uint32_t offset, size, vcn_inst;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
vcn_inst = GET_INST(VCN, inst_idx);
@@ -454,7 +482,7 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -929,6 +957,8 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
vcn_inst = GET_INST(VCN, i);
+ vcn_v4_0_3_fw_shared_init(adev, vcn_inst);
+
memset(&header, 0, sizeof(struct mmsch_v4_0_3_init_header));
header.version = MMSCH_VERSION;
header.total_size = sizeof(struct mmsch_v4_0_3_init_header) >> 2;
@@ -941,7 +971,7 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
@@ -1093,8 +1123,10 @@ static int vcn_v4_0_3_start(struct amdgpu_device *adev)
int i, j, k, r, vcn_inst;
uint32_t tmp;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+ }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
@@ -1367,8 +1399,10 @@ static int vcn_v4_0_3_stop(struct amdgpu_device *adev)
vcn_v4_0_3_enable_clock_gating(adev, i);
}
Done:
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+ }
return 0;
}
@@ -1430,8 +1464,8 @@ static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask)
{
- /* For VF, only local offsets should be used */
- if (amdgpu_sriov_vf(ring->adev))
+ /* Use normalized offsets when required */
+ if (vcn_v4_0_3_normalizn_reqd(ring->adev))
reg = NORMALIZE_VCN_REG_OFFSET(reg);
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
@@ -1442,8 +1476,8 @@ static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t
static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
{
- /* For VF, only local offsets should be used */
- if (amdgpu_sriov_vf(ring->adev))
+ /* Use normalized offsets when required */
+ if (vcn_v4_0_3_normalizn_reqd(ring->adev))
reg = NORMALIZE_VCN_REG_OFFSET(reg);
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
@@ -1567,13 +1601,13 @@ static bool vcn_v4_0_3_is_idle(void *handle)
/**
* vcn_v4_0_3_wait_for_idle - wait for VCN block idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Wait for VCN block idle
*/
-static int vcn_v4_0_3_wait_for_idle(void *handle)
+static int vcn_v4_0_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1588,15 +1622,15 @@ static int vcn_v4_0_3_wait_for_idle(void *handle)
/* vcn_v4_0_3_set_clockgating_state - set VCN block clockgating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: clock gating state
*
* Set VCN block clockgating state
*/
-static int vcn_v4_0_3_set_clockgating_state(void *handle,
+static int vcn_v4_0_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = state == AMD_CG_STATE_GATE;
int i;
@@ -1616,15 +1650,15 @@ static int vcn_v4_0_3_set_clockgating_state(void *handle,
/**
* vcn_v4_0_3_set_powergating_state - set VCN block powergating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: power gating state
*
* Set VCN block powergating state
*/
-static int vcn_v4_0_3_set_powergating_state(void *handle,
+static int vcn_v4_0_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
/* for SRIOV, guest should not control VCN Power-gating
@@ -1733,9 +1767,9 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
}
-static void vcn_v4_0_3_print_ip_state(void *handle, struct drm_printer *p)
+static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
uint32_t inst_off, is_powered;
@@ -1765,9 +1799,9 @@ static void vcn_v4_0_3_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void vcn_v4_0_3_dump_ip_state(void *handle)
+static void vcn_v4_0_3_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
bool is_powered;
uint32_t inst_off, inst_id;
@@ -1798,7 +1832,6 @@ static void vcn_v4_0_3_dump_ip_state(void *handle)
static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.name = "vcn_v4_0_3",
.early_init = vcn_v4_0_3_early_init,
- .late_init = NULL,
.sw_init = vcn_v4_0_3_sw_init,
.sw_fini = vcn_v4_0_3_sw_fini,
.hw_init = vcn_v4_0_3_hw_init,
@@ -1807,10 +1840,6 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.resume = vcn_v4_0_3_resume,
.is_idle = vcn_v4_0_3_is_idle,
.wait_for_idle = vcn_v4_0_3_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
.set_powergating_state = vcn_v4_0_3_set_powergating_state,
.dump_ip_state = vcn_v4_0_3_dump_ip_state,
@@ -1888,9 +1917,94 @@ static const struct amdgpu_ras_block_hw_ops vcn_v4_0_3_ras_hw_ops = {
.reset_ras_error_count = vcn_v4_0_3_reset_ras_error_count,
};
+static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ struct aca_bank_info info;
+ u64 misc0;
+ int ret;
+
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
+
+ misc0 = bank->regs[ACA_REG_IDX_MISC0];
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
+ 1ULL);
+ break;
+ case ACA_SMU_TYPE_CE:
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE,
+ ACA_REG__MISC0__ERRCNT(misc0));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* reference to smu driver if header file */
+static int vcn_v4_0_3_err_codes[] = {
+ 14, 15, /* VCN */
+};
+
+static bool vcn_v4_0_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ u32 instlo;
+
+ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+ instlo &= GENMASK(31, 1);
+
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ if (aca_bank_check_error_codes(handle->adev, bank,
+ vcn_v4_0_3_err_codes,
+ ARRAY_SIZE(vcn_v4_0_3_err_codes)))
+ return false;
+
+ return true;
+}
+
+static const struct aca_bank_ops vcn_v4_0_3_aca_bank_ops = {
+ .aca_bank_parser = vcn_v4_0_3_aca_bank_parser,
+ .aca_bank_is_valid = vcn_v4_0_3_aca_bank_is_valid,
+};
+
+static const struct aca_info vcn_v4_0_3_aca_info = {
+ .hwip = ACA_HWIP_TYPE_SMU,
+ .mask = ACA_ERROR_UE_MASK,
+ .bank_ops = &vcn_v4_0_3_aca_bank_ops,
+};
+
+static int vcn_v4_0_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN,
+ &vcn_v4_0_3_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
+}
+
static struct amdgpu_vcn_ras vcn_v4_0_3_ras = {
.ras_block = {
.hw_ops = &vcn_v4_0_3_ras_hw_ops,
+ .ras_late_init = vcn_v4_0_3_ras_late_init,
},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 9d4f5352a62c..23d3c16c9d9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -95,7 +95,7 @@ static int amdgpu_ih_clientid_vcns[] = {
static void vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v4_0_5_set_powergating_state(void *handle,
+static int vcn_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -104,14 +104,14 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring);
/**
* vcn_v4_0_5_early_init - set function pointers and load microcode
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
* Load microcode from filesystem
*/
-static int vcn_v4_0_5_early_init(void *handle)
+static int vcn_v4_0_5_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* re-use enc ring as unified ring */
adev->vcn.num_enc_rings = 1;
@@ -124,14 +124,14 @@ static int vcn_v4_0_5_early_init(void *handle)
/**
* vcn_v4_0_5_sw_init - sw init for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int vcn_v4_0_5_sw_init(void *handle)
+static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, r;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
uint32_t *ptr;
@@ -220,13 +220,13 @@ static int vcn_v4_0_5_sw_init(void *handle)
/**
* vcn_v4_0_5_sw_fini - sw fini for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* VCN suspend and free up sw allocation
*/
-static int vcn_v4_0_5_sw_fini(void *handle)
+static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, r, idx;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
@@ -261,13 +261,13 @@ static int vcn_v4_0_5_sw_fini(void *handle)
/**
* vcn_v4_0_5_hw_init - start and test VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int vcn_v4_0_5_hw_init(void *handle)
+static int vcn_v4_0_5_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r;
@@ -291,13 +291,13 @@ static int vcn_v4_0_5_hw_init(void *handle)
/**
* vcn_v4_0_5_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v4_0_5_hw_fini(void *handle)
+static int vcn_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -309,7 +309,7 @@ static int vcn_v4_0_5_hw_fini(void *handle)
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, regUVD_STATUS))) {
- vcn_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ vcn_v4_0_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
}
}
@@ -320,20 +320,19 @@ static int vcn_v4_0_5_hw_fini(void *handle)
/**
* vcn_v4_0_5_suspend - suspend VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend VCN block
*/
-static int vcn_v4_0_5_suspend(void *handle)
+static int vcn_v4_0_5_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = vcn_v4_0_5_hw_fini(adev);
+ r = vcn_v4_0_5_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
@@ -341,20 +340,19 @@ static int vcn_v4_0_5_suspend(void *handle)
/**
* vcn_v4_0_5_resume - resume VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init VCN block
*/
-static int vcn_v4_0_5_resume(void *handle)
+static int vcn_v4_0_5_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(ip_block->adev);
if (r)
return r;
- r = vcn_v4_0_5_hw_init(adev);
+ r = vcn_v4_0_5_hw_init(ip_block);
return r;
}
@@ -372,7 +370,7 @@ static void vcn_v4_0_5_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -433,7 +431,7 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -1002,8 +1000,10 @@ static int vcn_v4_0_5_start(struct amdgpu_device *adev)
uint32_t tmp;
int i, j, k, r;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+ }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -1279,8 +1279,10 @@ static int vcn_v4_0_5_stop(struct amdgpu_device *adev)
vcn_v4_0_5_enable_static_power_gating(adev, i);
}
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+ }
return 0;
}
@@ -1469,13 +1471,13 @@ static bool vcn_v4_0_5_is_idle(void *handle)
/**
* vcn_v4_0_5_wait_for_idle - wait for VCN block idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Wait for VCN block idle
*/
-static int vcn_v4_0_5_wait_for_idle(void *handle)
+static int vcn_v4_0_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1494,14 +1496,15 @@ static int vcn_v4_0_5_wait_for_idle(void *handle)
/**
* vcn_v4_0_5_set_clockgating_state - set VCN block clockgating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: clock gating state
*
* Set VCN block clockgating state
*/
-static int vcn_v4_0_5_set_clockgating_state(void *handle, enum amd_clockgating_state state)
+static int vcn_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
@@ -1524,14 +1527,15 @@ static int vcn_v4_0_5_set_clockgating_state(void *handle, enum amd_clockgating_s
/**
* vcn_v4_0_5_set_powergating_state - set VCN block powergating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: power gating state
*
* Set VCN block powergating state
*/
-static int vcn_v4_0_5_set_powergating_state(void *handle, enum amd_powergating_state state)
+static int vcn_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (state == adev->vcn.cur_state)
@@ -1616,9 +1620,9 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v4_0_5_print_ip_state(void *handle, struct drm_printer *p)
+static void vcn_v4_0_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
uint32_t inst_off, is_powered;
@@ -1648,9 +1652,9 @@ static void vcn_v4_0_5_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void vcn_v4_0_5_dump_ip_state(void *handle)
+static void vcn_v4_0_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
bool is_powered;
uint32_t inst_off;
@@ -1680,7 +1684,6 @@ static void vcn_v4_0_5_dump_ip_state(void *handle)
static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.name = "vcn_v4_0_5",
.early_init = vcn_v4_0_5_early_init,
- .late_init = NULL,
.sw_init = vcn_v4_0_5_sw_init,
.sw_fini = vcn_v4_0_5_sw_fini,
.hw_init = vcn_v4_0_5_hw_init,
@@ -1689,10 +1692,6 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.resume = vcn_v4_0_5_resume,
.is_idle = vcn_v4_0_5_is_idle,
.wait_for_idle = vcn_v4_0_5_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = vcn_v4_0_5_set_clockgating_state,
.set_powergating_state = vcn_v4_0_5_set_powergating_state,
.dump_ip_state = vcn_v4_0_5_dump_ip_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index c305386358b4..b6d78381ebfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -32,7 +32,7 @@
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
-#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
#include "vcn_v5_0_0.h"
#include <drm/drm_drv.h>
@@ -78,7 +78,7 @@ static int amdgpu_ih_clientid_vcns[] = {
static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
-static int vcn_v5_0_0_set_powergating_state(void *handle,
+static int vcn_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
@@ -87,14 +87,14 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
/**
* vcn_v5_0_0_early_init - set function pointers and load microcode
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Set ring and irq function pointers
* Load microcode from filesystem
*/
-static int vcn_v5_0_0_early_init(void *handle)
+static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* re-use enc ring as unified ring */
adev->vcn.num_enc_rings = 1;
@@ -105,20 +105,33 @@ static int vcn_v5_0_0_early_init(void *handle)
return amdgpu_vcn_early_init(adev);
}
+void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev)
+{
+ uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
+ uint32_t *ptr;
+
+ /* Allocate memory for VCN IP Dump buffer */
+ ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
+ if (!ptr) {
+ DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
+ adev->vcn.ip_dump = NULL;
+ } else {
+ adev->vcn.ip_dump = ptr;
+ }
+}
+
/**
* vcn_v5_0_0_sw_init - sw init for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Load firmware and sw initialization
*/
-static int vcn_v5_0_0_sw_init(void *handle)
+static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
- uint32_t *ptr;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -140,13 +153,13 @@ static int vcn_v5_0_0_sw_init(void *handle)
/* VCN UNIFIED TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
- VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
+ VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
if (r)
return r;
/* VCN POISON TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
- VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
+ VCN_5_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
if (r)
return r;
@@ -170,30 +183,32 @@ static int vcn_v5_0_0_sw_init(void *handle)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
}
+ /* TODO: Add queue reset mask when FW fully supports it */
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ vcn_v5_0_0_alloc_ip_dump(adev);
+
+ r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ if (r)
+ return r;
+
return 0;
}
/**
* vcn_v5_0_0_sw_fini - sw fini for VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* VCN suspend and free up sw allocation
*/
-static int vcn_v5_0_0_sw_fini(void *handle)
+static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, r, idx;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
@@ -215,6 +230,7 @@ static int vcn_v5_0_0_sw_fini(void *handle)
if (r)
return r;
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
r = amdgpu_vcn_sw_fini(adev);
kfree(adev->vcn.ip_dump);
@@ -225,13 +241,13 @@ static int vcn_v5_0_0_sw_fini(void *handle)
/**
* vcn_v5_0_0_hw_init - start and test VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
-static int vcn_v5_0_0_hw_init(void *handle)
+static int vcn_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r;
@@ -255,13 +271,13 @@ static int vcn_v5_0_0_hw_init(void *handle)
/**
* vcn_v5_0_0_hw_fini - stop the hardware block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Stop the VCN block, mark ring as not ready any more
*/
-static int vcn_v5_0_0_hw_fini(void *handle)
+static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
@@ -273,7 +289,7 @@ static int vcn_v5_0_0_hw_fini(void *handle)
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, regUVD_STATUS))) {
- vcn_v5_0_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ vcn_v5_0_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
}
}
@@ -284,20 +300,19 @@ static int vcn_v5_0_0_hw_fini(void *handle)
/**
* vcn_v5_0_0_suspend - suspend VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* HW fini and suspend VCN block
*/
-static int vcn_v5_0_0_suspend(void *handle)
+static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = vcn_v5_0_0_hw_fini(adev);
+ r = vcn_v5_0_0_hw_fini(ip_block);
if (r)
return r;
- r = amdgpu_vcn_suspend(adev);
+ r = amdgpu_vcn_suspend(ip_block->adev);
return r;
}
@@ -305,20 +320,19 @@ static int vcn_v5_0_0_suspend(void *handle)
/**
* vcn_v5_0_0_resume - resume VCN block
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Resume firmware and hw init VCN block
*/
-static int vcn_v5_0_0_resume(void *handle)
+static int vcn_v5_0_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_vcn_resume(adev);
+ r = amdgpu_vcn_resume(ip_block->adev);
if (r)
return r;
- r = vcn_v5_0_0_hw_init(adev);
+ r = vcn_v5_0_0_hw_init(ip_block);
return r;
}
@@ -336,7 +350,7 @@ static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -397,7 +411,7 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -763,8 +777,10 @@ static int vcn_v5_0_0_start(struct amdgpu_device *adev)
uint32_t tmp;
int i, j, k, r;
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
+ }
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -1010,8 +1026,10 @@ static int vcn_v5_0_0_stop(struct amdgpu_device *adev)
vcn_v5_0_0_enable_static_power_gating(adev, i);
}
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_uvd(adev, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
+ }
return 0;
}
@@ -1196,13 +1214,13 @@ static bool vcn_v5_0_0_is_idle(void *handle)
/**
* vcn_v5_0_0_wait_for_idle - wait for VCN block idle
*
- * @handle: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Wait for VCN block idle
*/
-static int vcn_v5_0_0_wait_for_idle(void *handle)
+static int vcn_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, ret = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1221,14 +1239,15 @@ static int vcn_v5_0_0_wait_for_idle(void *handle)
/**
* vcn_v5_0_0_set_clockgating_state - set VCN block clockgating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: clock gating state
*
* Set VCN block clockgating state
*/
-static int vcn_v5_0_0_set_clockgating_state(void *handle, enum amd_clockgating_state state)
+static int vcn_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
@@ -1251,14 +1270,15 @@ static int vcn_v5_0_0_set_clockgating_state(void *handle, enum amd_clockgating_s
/**
* vcn_v5_0_0_set_powergating_state - set VCN block powergating state
*
- * @handle: amdgpu_device pointer
+ * @ip_block: amdgpu_ip_block pointer
* @state: power gating state
*
* Set VCN block powergating state
*/
-static int vcn_v5_0_0_set_powergating_state(void *handle, enum amd_powergating_state state)
+static int vcn_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
if (state == adev->vcn.cur_state)
@@ -1304,10 +1324,10 @@ static int vcn_v5_0_0_process_interrupt(struct amdgpu_device *adev, struct amdgp
DRM_DEBUG("IH: VCN TRAP\n");
switch (entry->src_id) {
- case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
+ case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
break;
- case VCN_4_0__SRCID_UVD_POISON:
+ case VCN_5_0__SRCID_UVD_POISON:
amdgpu_vcn_process_poison_irq(adev, source, entry);
break;
default:
@@ -1343,9 +1363,10 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v5_0_print_ip_state(void *handle, struct drm_printer *p)
+void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
+ struct drm_printer *p)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
uint32_t inst_off, is_powered;
@@ -1375,9 +1396,9 @@ static void vcn_v5_0_print_ip_state(void *handle, struct drm_printer *p)
}
}
-static void vcn_v5_0_dump_ip_state(void *handle)
+void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i, j;
bool is_powered;
uint32_t inst_off;
@@ -1406,7 +1427,6 @@ static void vcn_v5_0_dump_ip_state(void *handle)
static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.name = "vcn_v5_0_0",
.early_init = vcn_v5_0_0_early_init,
- .late_init = NULL,
.sw_init = vcn_v5_0_0_sw_init,
.sw_fini = vcn_v5_0_0_sw_fini,
.hw_init = vcn_v5_0_0_hw_init,
@@ -1415,14 +1435,10 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.resume = vcn_v5_0_0_resume,
.is_idle = vcn_v5_0_0_is_idle,
.wait_for_idle = vcn_v5_0_0_wait_for_idle,
- .check_soft_reset = NULL,
- .pre_soft_reset = NULL,
- .soft_reset = NULL,
- .post_soft_reset = NULL,
.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
.set_powergating_state = vcn_v5_0_0_set_powergating_state,
- .dump_ip_state = vcn_v5_0_dump_ip_state,
- .print_ip_state = vcn_v5_0_print_ip_state,
+ .dump_ip_state = vcn_v5_0_0_dump_ip_state,
+ .print_ip_state = vcn_v5_0_0_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
index 51bbccd4360f..b8927652bc50 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
@@ -32,6 +32,11 @@
#define VCN_VID_IP_ADDRESS 0x0
#define VCN_AON_IP_ADDRESS 0x30000
+void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev);
+void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
+ struct drm_printer *p);
+void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block);
+
extern const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block;
#endif /* __VCN_V5_0_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
new file mode 100644
index 000000000000..8b463c977d08
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -0,0 +1,1118 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_vcn.h"
+#include "amdgpu_pm.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "soc15_hw_ip.h"
+#include "vcn_v2_0.h"
+
+#include "vcn/vcn_5_0_0_offset.h"
+#include "vcn/vcn_5_0_0_sh_mask.h"
+#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
+#include "vcn_v5_0_0.h"
+#include "vcn_v5_0_1.h"
+
+#include <drm/drm_drv.h>
+
+static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
+static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
+static int vcn_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state);
+static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring);
+
+/**
+ * vcn_v5_0_1_early_init - set function pointers and load microcode
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Set ring and irq function pointers
+ * Load microcode from filesystem
+ */
+static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ /* re-use enc ring as unified ring */
+ adev->vcn.num_enc_rings = 1;
+
+ vcn_v5_0_1_set_unified_ring_funcs(adev);
+ vcn_v5_0_1_set_irq_funcs(adev);
+
+ return amdgpu_vcn_early_init(adev);
+}
+
+/**
+ * vcn_v5_0_1_sw_init - sw init for VCN block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Load firmware and sw initialization
+ */
+static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ring *ring;
+ int i, r, vcn_inst;
+
+ r = amdgpu_vcn_sw_init(adev);
+ if (r)
+ return r;
+
+ amdgpu_vcn_setup_ucode(adev);
+
+ r = amdgpu_vcn_resume(adev);
+ if (r)
+ return r;
+
+ /* VCN UNIFIED TRAP */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+ VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+
+ vcn_inst = GET_INST(VCN, i);
+
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ ring->use_doorbell = true;
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst;
+
+ ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
+ sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
+
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT, &adev->vcn.inst[i].sched_score);
+ if (r)
+ return r;
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
+ fw_shared->sq.is_enabled = true;
+
+ if (amdgpu_vcnfw_log)
+ amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+ }
+
+ /* TODO: Add queue reset mask when FW fully supports it */
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
+ vcn_v5_0_0_alloc_ip_dump(adev);
+
+ return amdgpu_vcn_sysfs_reset_mask_init(adev);
+}
+
+/**
+ * vcn_v5_0_1_sw_fini - sw fini for VCN block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * VCN suspend and free up sw allocation
+ */
+static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, r, idx;
+
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->present_flag_0 = 0;
+ fw_shared->sq.is_enabled = 0;
+ }
+
+ drm_dev_exit(idx);
+ }
+
+ r = amdgpu_vcn_suspend(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_sw_fini(adev);
+
+ amdgpu_vcn_sysfs_reset_mask_fini(adev);
+
+ kfree(adev->vcn.ip_dump);
+
+ return r;
+}
+
+/**
+ * vcn_v5_0_1_hw_init - start and test VCN block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Initialize the hardware, boot up the VCPU and do some testing
+ */
+static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ring *ring;
+ int i, r, vcn_inst;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ vcn_inst = GET_INST(VCN, i);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ if (ring->use_doorbell)
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 9 * vcn_inst),
+ adev->vcn.inst[i].aid_id);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_1_hw_fini - stop the hardware block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Stop the VCN block, mark ring as not ready any more
+ */
+static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_1_suspend - suspend VCN block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * HW fini and suspend VCN block
+ */
+static int vcn_v5_0_1_suspend(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = vcn_v5_0_1_hw_fini(ip_block);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_suspend(adev);
+
+ return r;
+}
+
+/**
+ * vcn_v5_0_1_resume - resume VCN block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Resume firmware and hw init VCN block
+ */
+static int vcn_v5_0_1_resume(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vcn_resume(adev);
+ if (r)
+ return r;
+
+ r = vcn_v5_0_1_hw_init(ip_block);
+
+ return r;
+}
+
+/**
+ * vcn_v5_0_1_mc_resume - memory controller programming
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Let the VCN memory controller know it's offsets
+ */
+static void vcn_v5_0_1_mc_resume(struct amdgpu_device *adev, int inst)
+{
+ uint32_t offset, size, vcn_inst;
+ const struct common_firmware_header *hdr;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
+ size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+
+ vcn_inst = GET_INST(VCN, inst);
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0);
+ offset = 0;
+ } else {
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].gpu_addr));
+ offset = size;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0,
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size);
+
+ /* cache window 1: stack */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
+
+ /* cache window 2: context */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+
+ /* non-cache window */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+}
+
+/**
+ * vcn_v5_0_1_mc_resume_dpg_mode - memory controller programming for dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ * @indirect: indirectly write sram
+ *
+ * Let the VCN memory controller know it's offsets with dpg mode
+ */
+static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+ uint32_t offset, size;
+ const struct common_firmware_header *hdr;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
+ size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+
+ /* cache window 0: fw */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (!indirect) {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
+ inst_idx].tmr_mc_addr_lo), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
+ inst_idx].tmr_mc_addr_hi), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ } else {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ }
+ offset = 0;
+ } else {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
+ offset = size;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
+ }
+
+ if (!indirect)
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+ else
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
+
+ /* cache window 1: stack */
+ if (!indirect) {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ } else {
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ }
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
+
+ /* cache window 2: context */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
+
+ /* non-cache window */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
+
+ /* VCN global tiling registers */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+}
+
+/**
+ * vcn_v5_0_1_disable_clock_gating - disable VCN clock gating
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Disable clock gating for VCN block
+ */
+static void vcn_v5_0_1_disable_clock_gating(struct amdgpu_device *adev, int inst)
+{
+}
+
+/**
+ * vcn_v5_0_1_enable_clock_gating - enable VCN clock gating
+ *
+ * @adev: amdgpu_device pointer
+ * @inst: instance number
+ *
+ * Enable clock gating for VCN block
+ */
+static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_device *adev, int inst)
+{
+}
+
+/**
+ * vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ * @indirect: indirectly write sram
+ *
+ * Start VCN block with dpg mode
+ */
+static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared =
+ adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_ring *ring;
+ int vcn_inst;
+ uint32_t tmp;
+
+ vcn_inst = GET_INST(VCN, inst_idx);
+
+ /* disable register anti-hang mechanism */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1,
+ ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+
+ /* enable dynamic power gating mode */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
+ tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
+
+ if (indirect) {
+ adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
+ (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
+ /* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */
+ WREG32_SOC24_DPG_MODE(inst_idx, 0xDEADBEEF,
+ adev->vcn.inst[inst_idx].aid_id, 0, true);
+ }
+
+ /* enable VCPU clock */
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
+
+ /* disable master interrupt */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__REQ_MODE_MASK |
+ UVD_LMI_CTRL__CRC_RESET_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ 0x00100000L);
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
+
+ vcn_v5_0_1_mc_resume_dpg_mode(adev, inst_idx, indirect);
+
+ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+ tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
+
+ /* enable LMI MC and UMC channels */
+ tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
+
+ /* enable master interrupt */
+ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
+ VCN, 0, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
+
+ if (indirect)
+ amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+
+ ring = &adev->vcn.inst[inst_idx].ring_enc[0];
+
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t));
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+ /* Read DB_CTRL to flush the write DB_CTRL command. */
+ RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_1_start - VCN start
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Start VCN block
+ */
+static int vcn_v5_0_1_start(struct amdgpu_device *adev)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_ring *ring;
+ uint32_t tmp;
+ int i, j, k, r, vcn_inst;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ r = vcn_v5_0_1_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
+ continue;
+ }
+
+ vcn_inst = GET_INST(VCN, i);
+
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
+
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ vcn_v5_0_1_mc_resume(adev, i);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(100);
+ if (amdgpu_emu_mode == 1)
+ msleep(20);
+ }
+
+ if (amdgpu_emu_mode == 1) {
+ r = -1;
+ if (status & 2) {
+ r = 0;
+ break;
+ }
+ } else {
+ r = 0;
+ if (status & 2)
+ break;
+
+ dev_err(adev->dev,
+ "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
+ }
+ }
+
+ if (r) {
+ dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
+ return r;
+ }
+
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
+
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ /* Read DB_CTRL to flush the write DB_CTRL command. */
+ RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL);
+
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / 4);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_1_stop_dpg_mode - VCN stop with dpg mode
+ *
+ * @adev: amdgpu_device pointer
+ * @inst_idx: instance number index
+ *
+ * Stop VCN block with dpg mode
+ */
+static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+{
+ uint32_t tmp;
+ int vcn_inst;
+
+ vcn_inst = GET_INST(VCN, inst_idx);
+
+ /* Wait for power status to be 1 */
+ SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+
+ /* wait for read ptr to be equal to write ptr */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
+ SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
+
+ /* disable dynamic power gating mode */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
+ ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
+}
+
+/**
+ * vcn_v5_0_1_stop - VCN stop
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop VCN block
+ */
+static int vcn_v5_0_1_stop(struct amdgpu_device *adev)
+{
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ uint32_t tmp;
+ int i, r = 0, vcn_inst;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ vcn_inst = GET_INST(VCN, i);
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v5_0_1_stop_dpg_mode(adev, i);
+ continue;
+ }
+
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ return r;
+
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ return r;
+
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ return r;
+
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
+
+ /* clear status */
+ WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
+ }
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_1_unified_ring_get_rptr - get unified read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware unified read pointer
+ */
+static uint64_t vcn_v5_0_1_unified_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
+ DRM_ERROR("wrong ring id is identified in %s", __func__);
+
+ return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR);
+}
+
+/**
+ * vcn_v5_0_1_unified_ring_get_wptr - get unified write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware unified write pointer
+ */
+static uint64_t vcn_v5_0_1_unified_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
+ DRM_ERROR("wrong ring id is identified in %s", __func__);
+
+ if (ring->use_doorbell)
+ return *ring->wptr_cpu_addr;
+ else
+ return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR);
+}
+
+/**
+ * vcn_v5_0_1_unified_ring_set_wptr - set enc write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the enc write pointer to the hardware
+ */
+static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
+ DRM_ERROR("wrong ring id is identified in %s", __func__);
+
+ if (ring->use_doorbell) {
+ *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ } else {
+ WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR,
+ lower_32_bits(ring->wptr));
+ }
+}
+
+static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_ENC,
+ .align_mask = 0x3f,
+ .nop = VCN_ENC_CMD_NO_OP,
+ .get_rptr = vcn_v5_0_1_unified_ring_get_rptr,
+ .get_wptr = vcn_v5_0_1_unified_ring_get_wptr,
+ .set_wptr = vcn_v5_0_1_unified_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
+ 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
+ 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
+ 1, /* vcn_v2_0_enc_ring_insert_end */
+ .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
+ .emit_ib = vcn_v2_0_enc_ring_emit_ib,
+ .emit_fence = vcn_v2_0_enc_ring_emit_fence,
+ .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
+ .test_ring = amdgpu_vcn_enc_ring_test_ring,
+ .test_ib = amdgpu_vcn_unified_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .insert_end = vcn_v2_0_enc_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vcn_ring_begin_use,
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
+ .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
+/**
+ * vcn_v5_0_1_set_unified_ring_funcs - set unified ring functions
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set unified ring functions
+ */
+static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev)
+{
+ int i, vcn_inst;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_1_unified_ring_vm_funcs;
+ adev->vcn.inst[i].ring_enc[0].me = i;
+ vcn_inst = GET_INST(VCN, i);
+ adev->vcn.inst[i].aid_id = vcn_inst / adev->vcn.num_inst_per_aid;
+ }
+}
+
+/**
+ * vcn_v5_0_1_is_idle - check VCN block is idle
+ *
+ * @handle: amdgpu_device pointer
+ *
+ * Check whether VCN block is idle
+ */
+static bool vcn_v5_0_1_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, ret = 1;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) == UVD_STATUS__IDLE);
+
+ return ret;
+}
+
+/**
+ * vcn_v5_0_1_wait_for_idle - wait for VCN block idle
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Wait for VCN block idle
+ */
+static int vcn_v5_0_1_wait_for_idle(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, ret = 0;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS, UVD_STATUS__IDLE,
+ UVD_STATUS__IDLE);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * vcn_v5_0_1_set_clockgating_state - set VCN block clockgating state
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ * @state: clock gating state
+ *
+ * Set VCN block clockgating state
+ */
+static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ bool enable = state == AMD_CG_STATE_GATE;
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (enable) {
+ if (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) != UVD_STATUS__IDLE)
+ return -EBUSY;
+ vcn_v5_0_1_enable_clock_gating(adev, i);
+ } else {
+ vcn_v5_0_1_disable_clock_gating(adev, i);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * vcn_v5_0_1_set_powergating_state - set VCN block powergating state
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ * @state: power gating state
+ *
+ * Set VCN block powergating state
+ */
+static int vcn_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int ret;
+
+ if (state == adev->vcn.cur_state)
+ return 0;
+
+ if (state == AMD_PG_STATE_GATE)
+ ret = vcn_v5_0_1_stop(adev);
+ else
+ ret = vcn_v5_0_1_start(adev);
+
+ if (!ret)
+ adev->vcn.cur_state = state;
+
+ return ret;
+}
+
+/**
+ * vcn_v5_0_1_process_interrupt - process VCN block interrupt
+ *
+ * @adev: amdgpu_device pointer
+ * @source: interrupt sources
+ * @entry: interrupt entry from clients and sources
+ *
+ * Process VCN block interrupt
+ */
+static int vcn_v5_0_1_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t i, inst;
+
+ i = node_id_to_phys_map[entry->node_id];
+
+ DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
+
+ for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst)
+ if (adev->vcn.inst[inst].aid_id == i)
+ break;
+ if (inst >= adev->vcn.num_vcn_inst) {
+ dev_WARN_ONCE(adev->dev, 1,
+ "Interrupt received for unknown VCN instance %d",
+ entry->node_id);
+ return 0;
+ }
+
+ switch (entry->src_id) {
+ case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
+ amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]);
+ break;
+ default:
+ DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs vcn_v5_0_1_irq_funcs = {
+ .process = vcn_v5_0_1_process_interrupt,
+};
+
+/**
+ * vcn_v5_0_1_set_irq_funcs - set VCN block interrupt irq functions
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set VCN block interrupt irq functions
+ */
+static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+ adev->vcn.inst->irq.num_types++;
+ adev->vcn.inst->irq.funcs = &vcn_v5_0_1_irq_funcs;
+}
+
+static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
+ .name = "vcn_v5_0_1",
+ .early_init = vcn_v5_0_1_early_init,
+ .late_init = NULL,
+ .sw_init = vcn_v5_0_1_sw_init,
+ .sw_fini = vcn_v5_0_1_sw_fini,
+ .hw_init = vcn_v5_0_1_hw_init,
+ .hw_fini = vcn_v5_0_1_hw_fini,
+ .suspend = vcn_v5_0_1_suspend,
+ .resume = vcn_v5_0_1_resume,
+ .is_idle = vcn_v5_0_1_is_idle,
+ .wait_for_idle = vcn_v5_0_1_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = vcn_v5_0_1_set_clockgating_state,
+ .set_powergating_state = vcn_v5_0_1_set_powergating_state,
+ .dump_ip_state = vcn_v5_0_0_dump_ip_state,
+ .print_ip_state = vcn_v5_0_0_print_ip_state,
+};
+
+const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_VCN,
+ .major = 5,
+ .minor = 0,
+ .rev = 1,
+ .funcs = &vcn_v5_0_1_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
new file mode 100644
index 000000000000..82ac709f44bf
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VCN_v5_0_1_H__
+#define __VCN_v5_0_1_H__
+
+extern const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block;
+
+#endif /* __VCN_v5_0_1_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index bf68e18e3824..98fc6941159e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -364,9 +364,8 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
* this should allow us to catchup.
*/
tmp = (wptr + 32) & ih->ptr_mask;
- dev_warn(adev->dev, "IH ring buffer overflow "
- "(0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, tmp);
+ dev_warn_ratelimited(adev->dev, "%s ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ amdgpu_ih_ring_name(adev, ih), wptr, ih->rptr, tmp);
ih->rptr = tmp;
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
@@ -472,18 +471,18 @@ static void vega10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
adev->irq.self_irq.funcs = &vega10_ih_self_irq_funcs;
}
-static int vega10_ih_early_init(void *handle)
+static int vega10_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
vega10_ih_set_interrupt_funcs(adev);
vega10_ih_set_self_irq_funcs(adev);
return 0;
}
-static int vega10_ih_sw_init(void *handle)
+static int vega10_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
@@ -525,43 +524,35 @@ static int vega10_ih_sw_init(void *handle)
return r;
}
-static int vega10_ih_sw_fini(void *handle)
+static int vega10_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
return 0;
}
-static int vega10_ih_hw_init(void *handle)
+static int vega10_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return vega10_ih_irq_init(adev);
+ return vega10_ih_irq_init(ip_block->adev);
}
-static int vega10_ih_hw_fini(void *handle)
+static int vega10_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- vega10_ih_irq_disable(adev);
+ vega10_ih_irq_disable(ip_block->adev);
return 0;
}
-static int vega10_ih_suspend(void *handle)
+static int vega10_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return vega10_ih_hw_fini(adev);
+ return vega10_ih_hw_fini(ip_block);
}
-static int vega10_ih_resume(void *handle)
+static int vega10_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return vega10_ih_hw_init(adev);
+ return vega10_ih_hw_init(ip_block);
}
static bool vega10_ih_is_idle(void *handle)
@@ -570,13 +561,13 @@ static bool vega10_ih_is_idle(void *handle)
return true;
}
-static int vega10_ih_wait_for_idle(void *handle)
+static int vega10_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return -ETIMEDOUT;
}
-static int vega10_ih_soft_reset(void *handle)
+static int vega10_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
@@ -613,10 +604,10 @@ static void vega10_ih_update_clockgating_state(struct amdgpu_device *adev,
}
}
-static int vega10_ih_set_clockgating_state(void *handle,
+static int vega10_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
vega10_ih_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
@@ -624,7 +615,7 @@ static int vega10_ih_set_clockgating_state(void *handle,
}
-static int vega10_ih_set_powergating_state(void *handle,
+static int vega10_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -633,7 +624,6 @@ static int vega10_ih_set_powergating_state(void *handle,
const struct amd_ip_funcs vega10_ih_ip_funcs = {
.name = "vega10_ih",
.early_init = vega10_ih_early_init,
- .late_init = NULL,
.sw_init = vega10_ih_sw_init,
.sw_fini = vega10_ih_sw_fini,
.hw_init = vega10_ih_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index ac439f0565e3..e9e3b2ed4b7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -114,6 +114,33 @@ static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
+ if (enable) {
+ /* Unset the CLEAR_OVERFLOW bit to make sure the next step
+ * is switching the bit from 0 to 1
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
+ return -ETIMEDOUT;
+ } else {
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ /* Clear RB_OVERFLOW bit */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
+ return -ETIMEDOUT;
+ } else {
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ }
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ }
+
/* enable_intr field is only valid in ring0 */
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
@@ -339,6 +366,7 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
/* Enable IH Retry CAM */
if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 0) ||
amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2) ||
+ amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 4) ||
amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 5))
WREG32_FIELD15(OSSSYS, 0, IH_RETRY_INT_CAM_CNTL_ALDEBARAN,
ENABLE, 1);
@@ -416,9 +444,8 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
* this should allow us to catchup.
*/
tmp = (wptr + 32) & ih->ptr_mask;
- dev_warn(adev->dev, "IH ring buffer overflow "
- "(0x%08X, 0x%08X, 0x%08X)\n",
- wptr, ih->rptr, tmp);
+ dev_warn_ratelimited(adev->dev, "%s ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ amdgpu_ih_ring_name(adev, ih), wptr, ih->rptr, tmp);
ih->rptr = tmp;
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
@@ -526,18 +553,18 @@ static void vega20_ih_set_self_irq_funcs(struct amdgpu_device *adev)
adev->irq.self_irq.funcs = &vega20_ih_self_irq_funcs;
}
-static int vega20_ih_early_init(void *handle)
+static int vega20_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
vega20_ih_set_interrupt_funcs(adev);
vega20_ih_set_self_irq_funcs(adev);
return 0;
}
-static int vega20_ih_sw_init(void *handle)
+static int vega20_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
bool use_bus_addr = true;
int r;
@@ -586,19 +613,19 @@ static int vega20_ih_sw_init(void *handle)
return r;
}
-static int vega20_ih_sw_fini(void *handle)
+static int vega20_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_irq_fini_sw(adev);
return 0;
}
-static int vega20_ih_hw_init(void *handle)
+static int vega20_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = vega20_ih_irq_init(adev);
if (r)
@@ -607,27 +634,21 @@ static int vega20_ih_hw_init(void *handle)
return 0;
}
-static int vega20_ih_hw_fini(void *handle)
+static int vega20_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- vega20_ih_irq_disable(adev);
+ vega20_ih_irq_disable(ip_block->adev);
return 0;
}
-static int vega20_ih_suspend(void *handle)
+static int vega20_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return vega20_ih_hw_fini(adev);
+ return vega20_ih_hw_fini(ip_block);
}
-static int vega20_ih_resume(void *handle)
+static int vega20_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return vega20_ih_hw_init(adev);
+ return vega20_ih_hw_init(ip_block);
}
static bool vega20_ih_is_idle(void *handle)
@@ -636,13 +657,13 @@ static bool vega20_ih_is_idle(void *handle)
return true;
}
-static int vega20_ih_wait_for_idle(void *handle)
+static int vega20_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* todo */
return -ETIMEDOUT;
}
-static int vega20_ih_soft_reset(void *handle)
+static int vega20_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* todo */
@@ -676,10 +697,10 @@ static void vega20_ih_update_clockgating_state(struct amdgpu_device *adev,
}
}
-static int vega20_ih_set_clockgating_state(void *handle,
+static int vega20_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
vega20_ih_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
@@ -687,7 +708,7 @@ static int vega20_ih_set_clockgating_state(void *handle,
}
-static int vega20_ih_set_powergating_state(void *handle,
+static int vega20_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -696,7 +717,6 @@ static int vega20_ih_set_powergating_state(void *handle,
const struct amd_ip_funcs vega20_ih_ip_funcs = {
.name = "vega20_ih",
.early_init = vega20_ih_early_init,
- .late_init = NULL,
.sw_init = vega20_ih_sw_init,
.sw_fini = vega20_ih_sw_fini,
.hw_init = vega20_ih_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index d39c670f6220..06615f160331 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -136,15 +136,15 @@ static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[]
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
.max_width = 4096,
- .max_height = 2304,
- .max_pixels_per_frame = 4096 * 2304,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
.max_level = 0,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
.max_width = 4096,
- .max_height = 2304,
- .max_pixels_per_frame = 4096 * 2304,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
.max_level = 0,
},
};
@@ -1455,9 +1455,9 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
#define CZ_REV_BRISTOL(rev) \
((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
-static int vi_common_early_init(void *handle)
+static int vi_common_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->flags & AMD_IS_APU) {
adev->smc_rreg = &cz_smc_rreg;
@@ -1679,9 +1679,9 @@ static int vi_common_early_init(void *handle)
return 0;
}
-static int vi_common_late_init(void *handle)
+static int vi_common_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
xgpu_vi_mailbox_get_irq(adev);
@@ -1689,9 +1689,9 @@ static int vi_common_late_init(void *handle)
return 0;
}
-static int vi_common_sw_init(void *handle)
+static int vi_common_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
xgpu_vi_mailbox_add_irq_id(adev);
@@ -1699,14 +1699,9 @@ static int vi_common_sw_init(void *handle)
return 0;
}
-static int vi_common_sw_fini(void *handle)
-{
- return 0;
-}
-
-static int vi_common_hw_init(void *handle)
+static int vi_common_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* move the golden regs per IP block */
vi_init_golden_registers(adev);
@@ -1718,9 +1713,9 @@ static int vi_common_hw_init(void *handle)
return 0;
}
-static int vi_common_hw_fini(void *handle)
+static int vi_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* enable the doorbell aperture */
vi_enable_doorbell_aperture(adev, false);
@@ -1731,18 +1726,14 @@ static int vi_common_hw_fini(void *handle)
return 0;
}
-static int vi_common_suspend(void *handle)
+static int vi_common_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return vi_common_hw_fini(adev);
+ return vi_common_hw_fini(ip_block);
}
-static int vi_common_resume(void *handle)
+static int vi_common_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return vi_common_hw_init(adev);
+ return vi_common_hw_init(ip_block);
}
static bool vi_common_is_idle(void *handle)
@@ -1750,16 +1741,6 @@ static bool vi_common_is_idle(void *handle)
return true;
}
-static int vi_common_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int vi_common_soft_reset(void *handle)
-{
- return 0;
-}
-
static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
@@ -1964,10 +1945,10 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
return 0;
}
-static int vi_common_set_clockgating_state(void *handle,
+static int vi_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev))
return 0;
@@ -2007,7 +1988,7 @@ static int vi_common_set_clockgating_state(void *handle,
return 0;
}
-static int vi_common_set_powergating_state(void *handle,
+static int vi_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -2047,19 +2028,14 @@ static const struct amd_ip_funcs vi_common_ip_funcs = {
.early_init = vi_common_early_init,
.late_init = vi_common_late_init,
.sw_init = vi_common_sw_init,
- .sw_fini = vi_common_sw_fini,
.hw_init = vi_common_hw_init,
.hw_fini = vi_common_hw_fini,
.suspend = vi_common_suspend,
.resume = vi_common_resume,
.is_idle = vi_common_is_idle,
- .wait_for_idle = vi_common_wait_for_idle,
- .soft_reset = vi_common_soft_reset,
.set_clockgating_state = vi_common_set_clockgating_state,
.set_powergating_state = vi_common_set_powergating_state,
.get_clockgating_state = vi_common_get_clockgating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
static const struct amdgpu_ip_block_version vi_common_ip_block =
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 02f7ba8c93cd..984f0e705078 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -274,7 +274,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
static const uint32_t cwsr_trap_gfx9_hex[] = {
- 0xbf820001, 0xbf820258,
+ 0xbf820001, 0xbf820259,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
@@ -390,141 +390,98 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0xbefe007c, 0xbefc0070,
0xc0611c7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0x867aff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f7a, 0xb8f02a05,
- 0x80708170, 0x8e708a70,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b847b, 0x8e76827b,
- 0xbef600ff, 0x01000000,
- 0xbef20174, 0x80747074,
- 0x82758075, 0xbefc0080,
- 0xbf800000, 0xbe802b00,
- 0xbe822b02, 0xbe842b04,
- 0xbe862b06, 0xbe882b08,
- 0xbe8a2b0a, 0xbe8c2b0c,
- 0xbe8e2b0e, 0xc06b003a,
- 0x00000000, 0xbf8cc07f,
- 0xc06b013a, 0x00000010,
- 0xbf8cc07f, 0xc06b023a,
- 0x00000020, 0xbf8cc07f,
- 0xc06b033a, 0x00000030,
- 0xbf8cc07f, 0x8074c074,
- 0x82758075, 0x807c907c,
- 0xbf0a7b7c, 0xbf85ffe7,
- 0xbef40172, 0xbef00080,
- 0xbefe00c1, 0xbeff00c1,
- 0xbee80080, 0xbee90080,
- 0xbef600ff, 0x01000000,
- 0x867aff78, 0x00400000,
- 0xbf850003, 0xb8faf803,
- 0x897a7aff, 0x10000000,
- 0xbf85004d, 0xbe840080,
- 0xd2890000, 0x00000900,
- 0x80048104, 0xd2890001,
- 0x00000900, 0x80048104,
- 0xd2890002, 0x00000900,
- 0x80048104, 0xd2890003,
- 0x00000900, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
+ 0xbefc007e, 0xbf108080,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0xb8fb1605,
+ 0x807b817b, 0x8e7b847b,
+ 0x8e76827b, 0xbef600ff,
+ 0x01000000, 0xbef20174,
+ 0x80747074, 0x82758075,
+ 0xbefc0080, 0xbf800000,
+ 0xbe802b00, 0xbe822b02,
+ 0xbe842b04, 0xbe862b06,
+ 0xbe882b08, 0xbe8a2b0a,
+ 0xbe8c2b0c, 0xbe8e2b0e,
+ 0xc06b003a, 0x00000000,
+ 0xbf8cc07f, 0xc06b013a,
+ 0x00000010, 0xbf8cc07f,
+ 0xc06b023a, 0x00000020,
+ 0xbf8cc07f, 0xc06b033a,
+ 0x00000030, 0xbf8cc07f,
+ 0x8074c074, 0x82758075,
+ 0x807c907c, 0xbf0a7b7c,
+ 0xbf85ffe7, 0xbef40172,
+ 0xbef00080, 0xbefe00c1,
+ 0xbeff00c1, 0xbee80080,
+ 0xbee90080, 0xbef600ff,
+ 0x01000000, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf85004d,
0xbe840080, 0xd2890000,
- 0x00000901, 0x80048104,
- 0xd2890001, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
0x80048104, 0xd2890002,
- 0x00000901, 0x80048104,
- 0xd2890003, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000902,
+ 0xd2890000, 0x00000901,
0x80048104, 0xd2890001,
- 0x00000902, 0x80048104,
- 0xd2890002, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
0x80048104, 0xd2890003,
- 0x00000902, 0x80048104,
+ 0x00000901, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000903, 0x80048104,
- 0xd2890001, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
0x80048104, 0xd2890002,
- 0x00000903, 0x80048104,
- 0xd2890003, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbf820008,
- 0xe0724000, 0x701d0000,
- 0xe0724100, 0x701d0100,
- 0xe0724200, 0x701d0200,
- 0xe0724300, 0x701d0300,
- 0xbefe00c1, 0xbeff00c1,
- 0xb8fb4306, 0x867bc17b,
- 0xbf840063, 0xbf8a0000,
- 0x867aff6f, 0x04000000,
- 0xbf84005f, 0x8e7b867b,
- 0x8e7b827b, 0xbef6007b,
- 0xb8f02a05, 0x80708170,
- 0x8e708a70, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
- 0x867aff78, 0x00400000,
- 0xbf850003, 0xb8faf803,
- 0x897a7aff, 0x10000000,
- 0xbf850030, 0x24040682,
- 0xd86e4000, 0x00000002,
- 0xbf8cc07f, 0xbe840080,
- 0xd2890000, 0x00000900,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
0x80048104, 0xd2890001,
- 0x00000900, 0x80048104,
- 0xd2890002, 0x00000900,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
0x80048104, 0xd2890003,
- 0x00000900, 0x80048104,
+ 0x00000903, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000901, 0x80048104,
- 0xd2890001, 0x00000901,
- 0x80048104, 0xd2890002,
- 0x00000901, 0x80048104,
- 0xd2890003, 0x00000901,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0x680404ff,
- 0x00000200, 0xd0c9006a,
- 0x0000f702, 0xbf87ffd2,
- 0xbf820015, 0xd1060002,
- 0x00011103, 0x7e0602ff,
- 0x00000200, 0xbefc00ff,
- 0x00010000, 0xbe800077,
- 0x8677ff77, 0xff7fffff,
- 0x8777ff77, 0x00058000,
- 0xd8ec0000, 0x00000002,
- 0xbf8cc07f, 0xe0765000,
- 0x701d0002, 0x68040702,
- 0xd0c9006a, 0x0000f702,
- 0xbf87fff7, 0xbef70000,
- 0xbef000ff, 0x00000400,
- 0xbefe00c1, 0xbeff00c1,
- 0xb8fb2a05, 0x807b817b,
- 0x8e7b827b, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a7b7c, 0xbf84006d,
- 0xbf11017c, 0x807bff7b,
- 0x00001000, 0x867aff78,
+ 0xbf820008, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb4306,
+ 0x867bc17b, 0xbf840063,
+ 0xbf8a0000, 0x867aff6f,
+ 0x04000000, 0xbf84005f,
+ 0x8e7b867b, 0x8e7b827b,
+ 0xbef6007b, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850051,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -544,137 +501,181 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2a05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
+ 0x807bff7b, 0x00001000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850051, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
+ 0xd2890000, 0x00000902,
0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
+ 0x00000902, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffb1, 0xbf9c0000,
- 0xbf820012, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffef,
- 0xbf9c0000, 0xbf8200c7,
- 0xbef4007e, 0x8675ff7f,
- 0x0000ffff, 0x8775ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x04000000,
- 0xbf84001e, 0xbefe00c1,
- 0xbeff00c1, 0xb8ef4306,
- 0x866fc16f, 0xbf840019,
- 0x8e6f866f, 0x8e6f826f,
- 0xbef6006f, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0x8078ff78, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xbefc0080, 0xe0510000,
- 0x781d0000, 0xe0510100,
- 0x781d0000, 0x807cff7c,
- 0x00000200, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85fff6, 0xbefe00c1,
- 0xbeff00c1, 0xbef600ff,
- 0x01000000, 0xb8ef2a05,
- 0x806f816f, 0x8e6f826f,
- 0x806fff6f, 0x00008000,
- 0xbef80080, 0xbeee0078,
- 0x8078ff78, 0x00000400,
- 0xbefc0084, 0xbf11087c,
- 0xe0524000, 0x781d0000,
- 0xe0524100, 0x781d0100,
- 0xe0524200, 0x781d0200,
- 0xe0524300, 0x781d0300,
- 0xbf8c0f70, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffee,
- 0xbf9c0000, 0xe0524000,
- 0x6e1d0000, 0xe0524100,
- 0x6e1d0100, 0xe0524200,
- 0x6e1d0200, 0xe0524300,
- 0x6e1d0300, 0xbf8c0f70,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x807c847c,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7c,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xbf8200c7, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0x866eff7f,
+ 0x04000000, 0xbf84001e,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8ef4306, 0x866fc16f,
+ 0xbf840019, 0x8e6f866f,
+ 0x8e6f826f, 0xbef6006f,
0xb8f82a05, 0x80788178,
0x8e788a78, 0xb8ee1605,
0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
+ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
+ 0xbefe00c1, 0xbeff00c1,
0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82a05,
+ 0xb8ef2a05, 0x806f816f,
+ 0x8e6f826f, 0x806fff6f,
+ 0x00008000, 0xbef80080,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
+ 0xbf11087c, 0xe0524000,
+ 0x781d0000, 0xe0524100,
+ 0x781d0100, 0xe0524200,
+ 0x781d0200, 0xe0524300,
+ 0x781d0300, 0xbf8c0f70,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0x807c847c, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7c,
+ 0xbf85ffee, 0xbf9c0000,
+ 0xe0524000, 0x6e1d0000,
+ 0xe0524100, 0x6e1d0100,
+ 0xe0524200, 0x6e1d0200,
+ 0xe0524300, 0x6e1d0300,
+ 0xbf8c0f70, 0xb8f82a05,
0x80788178, 0x8e788a78,
0xb8ee1605, 0x806e816e,
0x8e6e866e, 0x80786e78,
- 0xbef60084, 0xbef600ff,
- 0x01000000, 0xc0211bfa,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0xbef60084,
+ 0xbef600ff, 0x01000000,
+ 0xc0211bfa, 0x00000078,
+ 0x80788478, 0xc0211b3a,
0x00000078, 0x80788478,
- 0xc0211b3a, 0x00000078,
- 0x80788478, 0xc0211b7a,
+ 0xc0211b7a, 0x00000078,
+ 0x80788478, 0xc0211c3a,
0x00000078, 0x80788478,
- 0xc0211c3a, 0x00000078,
- 0x80788478, 0xc0211c7a,
+ 0xc0211c7a, 0x00000078,
+ 0x80788478, 0xc0211eba,
0x00000078, 0x80788478,
- 0xc0211eba, 0x00000078,
- 0x80788478, 0xc0211efa,
+ 0xc0211efa, 0x00000078,
+ 0x80788478, 0xc0211a3a,
0x00000078, 0x80788478,
- 0xc0211a3a, 0x00000078,
- 0x80788478, 0xc0211a7a,
+ 0xc0211a7a, 0x00000078,
+ 0x80788478, 0xc0211cfa,
0x00000078, 0x80788478,
- 0xc0211cfa, 0x00000078,
- 0x80788478, 0xbf8cc07f,
- 0xbefc006f, 0xbefe0070,
- 0xbeff0071, 0x866f7bff,
- 0x000003ff, 0xb96f4803,
- 0x866f7bff, 0xfffff800,
- 0x8f6f8b6f, 0xb96fa2c3,
- 0xb973f801, 0xb8ee2a05,
- 0x806e816e, 0x8e6e8a6e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc00b1c37, 0x00000050,
- 0xc00b1d37, 0x00000060,
- 0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x8f6e8b77,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x866dff6d,
- 0x0000ffff, 0x86fe7e7e,
- 0x86ea6a6a, 0x8f6e837a,
- 0xb96ee0c2, 0xbf800002,
- 0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf9b0000,
+ 0xbf8cc07f, 0xbefc006f,
+ 0xbefe0070, 0xbeff0071,
+ 0x866f7bff, 0x000003ff,
+ 0xb96f4803, 0x866f7bff,
+ 0xfffff800, 0x8f6f8b6f,
+ 0xb96fa2c3, 0xb973f801,
+ 0xb8ee2a05, 0x806e816e,
+ 0x8e6e8a6e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b77, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
};
static const uint32_t cwsr_trap_nv1x_hex[] = {
@@ -1302,7 +1303,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
};
static const uint32_t cwsr_trap_arcturus_hex[] = {
- 0xbf820001, 0xbf8202d4,
+ 0xbf820001, 0xbf8202d5,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
@@ -1419,99 +1420,37 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0xbefe007c, 0xbefc0070,
0xc0611c7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0x867aff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f7a, 0xb8f02a05,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fb1605,
- 0x807b817b, 0x8e7b847b,
- 0x8e76827b, 0xbef600ff,
- 0x01000000, 0xbef20174,
- 0x80747074, 0x82758075,
- 0xbefc0080, 0xbf800000,
- 0xbe802b00, 0xbe822b02,
- 0xbe842b04, 0xbe862b06,
- 0xbe882b08, 0xbe8a2b0a,
- 0xbe8c2b0c, 0xbe8e2b0e,
- 0xc06b003a, 0x00000000,
- 0xbf8cc07f, 0xc06b013a,
- 0x00000010, 0xbf8cc07f,
- 0xc06b023a, 0x00000020,
- 0xbf8cc07f, 0xc06b033a,
- 0x00000030, 0xbf8cc07f,
- 0x8074c074, 0x82758075,
- 0x807c907c, 0xbf0a7b7c,
- 0xbf85ffe7, 0xbef40172,
- 0xbef00080, 0xbefe00c1,
- 0xbeff00c1, 0xbee80080,
- 0xbee90080, 0xbef600ff,
- 0x01000000, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf85004d,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
- 0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
- 0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
- 0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbf820008, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb4306,
- 0x867bc17b, 0xbf840064,
- 0xbf8a0000, 0x867aff6f,
- 0x04000000, 0xbf840060,
- 0x8e7b867b, 0x8e7b827b,
- 0xbef6007b, 0xb8f02a05,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
+ 0xbefc007e, 0xbf108080,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850030, 0x24040682,
- 0xd86e4000, 0x00000002,
- 0xbf8cc07f, 0xbe840080,
+ 0xbf85004d, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -1530,31 +1469,50 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0x680404ff,
- 0x00000200, 0xd0c9006a,
- 0x0000f702, 0xbf87ffd2,
- 0xbf820015, 0xd1060002,
- 0x00011103, 0x7e0602ff,
- 0x00000200, 0xbefc00ff,
- 0x00010000, 0xbe800077,
- 0x8677ff77, 0xff7fffff,
- 0x8777ff77, 0x00058000,
- 0xd8ec0000, 0x00000002,
- 0xbf8cc07f, 0xe0765000,
- 0x701d0002, 0x68040702,
- 0xd0c9006a, 0x0000f702,
- 0xbf87fff7, 0xbef70000,
- 0xbef000ff, 0x00000400,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
0xbefe00c1, 0xbeff00c1,
- 0xb8fb2a05, 0x807b817b,
- 0x8e7b827b, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a7b7c, 0xbf84006d,
- 0xbf11017c, 0x807bff7b,
- 0x00001000, 0x867aff78,
+ 0xb8fb4306, 0x867bc17b,
+ 0xbf840064, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf840060, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850051,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -1574,215 +1532,259 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2a05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
+ 0x807bff7b, 0x00001000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850051, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
+ 0xd2890000, 0x00000902,
0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
+ 0x00000902, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffb1, 0xbf9c0000,
- 0xbf820012, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffef,
- 0xbf9c0000, 0xbefc0080,
- 0xbf11017c, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850059,
- 0xd3d84000, 0x18000100,
- 0xd3d84001, 0x18000101,
- 0xd3d84002, 0x18000102,
- 0xd3d84003, 0x18000103,
0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
+ 0xbf84ffee, 0x807c847c,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7c,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xbefc0080, 0xbf11017c,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850059, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xbe840080,
+ 0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
+ 0x00000900, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
+ 0xd2890000, 0x00000902,
0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
+ 0x00000902, 0x80048104,
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffa9, 0xbf9c0000,
- 0xbf820016, 0xd3d84000,
- 0x18000100, 0xd3d84001,
- 0x18000101, 0xd3d84002,
- 0x18000102, 0xd3d84003,
- 0x18000103, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffeb,
- 0xbf9c0000, 0xbf8200e3,
- 0xbef4007e, 0x8675ff7f,
- 0x0000ffff, 0x8775ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x04000000,
- 0xbf84001f, 0xbefe00c1,
- 0xbeff00c1, 0xb8ef4306,
- 0x866fc16f, 0xbf84001a,
- 0x8e6f866f, 0x8e6f826f,
- 0xbef6006f, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x8078ff78,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xe0510000, 0x781d0000,
- 0xe0510100, 0x781d0000,
- 0x807cff7c, 0x00000200,
- 0x8078ff78, 0x00000200,
- 0xbf0a6f7c, 0xbf85fff6,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x807c847c,
+ 0xbf0a7b7c, 0xbf85ffa9,
+ 0xbf9c0000, 0xbf820016,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7c,
+ 0xbf85ffeb, 0xbf9c0000,
+ 0xbf8200e3, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0x866eff7f,
+ 0x04000000, 0xbf84001f,
0xbefe00c1, 0xbeff00c1,
+ 0xb8ef4306, 0x866fc16f,
+ 0xbf84001a, 0x8e6f866f,
+ 0x8e6f826f, 0xbef6006f,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x8078ff78, 0x00000080,
0xbef600ff, 0x01000000,
- 0xb8ef2a05, 0x806f816f,
- 0x8e6f826f, 0x806fff6f,
- 0x00008000, 0xbef80080,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefc0084,
- 0xbf11087c, 0xe0524000,
- 0x781d0000, 0xe0524100,
- 0x781d0100, 0xe0524200,
- 0x781d0200, 0xe0524300,
- 0x781d0300, 0xbf8c0f70,
- 0x7e000300, 0x7e020301,
- 0x7e040302, 0x7e060303,
- 0x807c847c, 0x8078ff78,
- 0x00000400, 0xbf0a6f7c,
- 0xbf85ffee, 0xbefc0080,
- 0xbf11087c, 0xe0524000,
- 0x781d0000, 0xe0524100,
- 0x781d0100, 0xe0524200,
- 0x781d0200, 0xe0524300,
- 0x781d0300, 0xbf8c0f70,
- 0xd3d94000, 0x18000100,
- 0xd3d94001, 0x18000101,
- 0xd3d94002, 0x18000102,
- 0xd3d94003, 0x18000103,
- 0x807c847c, 0x8078ff78,
- 0x00000400, 0xbf0a6f7c,
- 0xbf85ffea, 0xbf9c0000,
- 0xe0524000, 0x6e1d0000,
- 0xe0524100, 0x6e1d0100,
- 0xe0524200, 0x6e1d0200,
- 0xe0524300, 0x6e1d0300,
- 0xbf8c0f70, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
- 0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xc0211bfa, 0x00000078,
- 0x80788478, 0xc0211b3a,
+ 0xbefc0080, 0xe0510000,
+ 0x781d0000, 0xe0510100,
+ 0x781d0000, 0x807cff7c,
+ 0x00000200, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7c,
+ 0xbf85fff6, 0xbefe00c1,
+ 0xbeff00c1, 0xbef600ff,
+ 0x01000000, 0xb8ef2a05,
+ 0x806f816f, 0x8e6f826f,
+ 0x806fff6f, 0x00008000,
+ 0xbef80080, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefc0084, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0x7e000300,
+ 0x7e020301, 0x7e040302,
+ 0x7e060303, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffee,
+ 0xbefc0080, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffea,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xbf8c0f70,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
0x00000078, 0x80788478,
- 0xc0211b7a, 0x00000078,
- 0x80788478, 0xc0211c3a,
+ 0xc0211b3a, 0x00000078,
+ 0x80788478, 0xc0211b7a,
0x00000078, 0x80788478,
- 0xc0211c7a, 0x00000078,
- 0x80788478, 0xc0211eba,
+ 0xc0211c3a, 0x00000078,
+ 0x80788478, 0xc0211c7a,
0x00000078, 0x80788478,
- 0xc0211efa, 0x00000078,
- 0x80788478, 0xc0211a3a,
+ 0xc0211eba, 0x00000078,
+ 0x80788478, 0xc0211efa,
0x00000078, 0x80788478,
- 0xc0211a7a, 0x00000078,
- 0x80788478, 0xc0211cfa,
+ 0xc0211a3a, 0x00000078,
+ 0x80788478, 0xc0211a7a,
0x00000078, 0x80788478,
- 0xbf8cc07f, 0xbefc006f,
- 0xbefe0070, 0xbeff0071,
- 0x866f7bff, 0x000003ff,
- 0xb96f4803, 0x866f7bff,
- 0xfffff800, 0x8f6f8b6f,
- 0xb96fa2c3, 0xb973f801,
- 0xb8ee2a05, 0x806e816e,
- 0x8e6e8a6e, 0x8e6e816e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc00b1c37, 0x00000050,
- 0xc00b1d37, 0x00000060,
- 0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x8f6e8b77,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x866dff6d,
- 0x0000ffff, 0x86fe7e7e,
- 0x86ea6a6a, 0x8f6e837a,
- 0xb96ee0c2, 0xbf800002,
- 0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf9b0000,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+ 0xbefc006f, 0xbefe0070,
+ 0xbeff0071, 0x866f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x866f7bff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee2a05,
+ 0x806e816e, 0x8e6e8a6e,
+ 0x8e6e816e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b77, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
};
static const uint32_t cwsr_trap_aldebaran_hex[] = {
- 0xbf820001, 0xbf8202df,
+ 0xbf820001, 0xbf8202e0,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
@@ -1899,99 +1901,37 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0xbefe007c, 0xbefc0070,
0xc0611c7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0x867aff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f7a, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fb1605,
- 0x807b817b, 0x8e7b847b,
- 0x8e76827b, 0xbef600ff,
- 0x01000000, 0xbef20174,
- 0x80747074, 0x82758075,
- 0xbefc0080, 0xbf800000,
- 0xbe802b00, 0xbe822b02,
- 0xbe842b04, 0xbe862b06,
- 0xbe882b08, 0xbe8a2b0a,
- 0xbe8c2b0c, 0xbe8e2b0e,
- 0xc06b003a, 0x00000000,
- 0xbf8cc07f, 0xc06b013a,
- 0x00000010, 0xbf8cc07f,
- 0xc06b023a, 0x00000020,
- 0xbf8cc07f, 0xc06b033a,
- 0x00000030, 0xbf8cc07f,
- 0x8074c074, 0x82758075,
- 0x807c907c, 0xbf0a7b7c,
- 0xbf85ffe7, 0xbef40172,
- 0xbef00080, 0xbefe00c1,
- 0xbeff00c1, 0xbee80080,
- 0xbee90080, 0xbef600ff,
- 0x01000000, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf85004d,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
- 0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
- 0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
- 0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbf820008, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb4306,
- 0x867bc17b, 0xbf840064,
- 0xbf8a0000, 0x867aff6f,
- 0x04000000, 0xbf840060,
- 0x8e7b867b, 0x8e7b827b,
- 0xbef6007b, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
+ 0xbefc007e, 0xbf108080,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850030, 0x24040682,
- 0xd86e4000, 0x00000002,
- 0xbf8cc07f, 0xbe840080,
+ 0xbf85004d, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -2010,31 +1950,50 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0x680404ff,
- 0x00000200, 0xd0c9006a,
- 0x0000f702, 0xbf87ffd2,
- 0xbf820015, 0xd1060002,
- 0x00011103, 0x7e0602ff,
- 0x00000200, 0xbefc00ff,
- 0x00010000, 0xbe800077,
- 0x8677ff77, 0xff7fffff,
- 0x8777ff77, 0x00058000,
- 0xd8ec0000, 0x00000002,
- 0xbf8cc07f, 0xe0765000,
- 0x701d0002, 0x68040702,
- 0xd0c9006a, 0x0000f702,
- 0xbf87fff7, 0xbef70000,
- 0xbef000ff, 0x00000400,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
0xbefe00c1, 0xbeff00c1,
- 0xb8fb2b05, 0x807b817b,
- 0x8e7b827b, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a7b7c, 0xbf84006d,
- 0xbf11017c, 0x807bff7b,
- 0x00001000, 0x867aff78,
+ 0xb8fb4306, 0x867bc17b,
+ 0xbf840064, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf840060, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850051,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -2054,51 +2013,31 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffb1, 0xbf9c0000,
- 0xbf820012, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffef,
- 0xbf9c0000, 0xb8fb2985,
- 0x807b817b, 0x8e7b837b,
- 0xb8fa2b05, 0x807a817a,
- 0x8e7a827a, 0x80fb7a7b,
- 0x867b7b7b, 0xbf84007a,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2b05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
0x807bff7b, 0x00001000,
- 0xbefc0080, 0xbf11017c,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850059, 0xd3d84000,
- 0x18000100, 0xd3d84001,
- 0x18000101, 0xd3d84002,
- 0x18000102, 0xd3d84003,
- 0x18000103, 0xbe840080,
+ 0xbf850051, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -2137,139 +2076,203 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0x807c847c,
- 0xbf0a7b7c, 0xbf85ffa9,
- 0xbf9c0000, 0xbf820016,
- 0xd3d84000, 0x18000100,
- 0xd3d84001, 0x18000101,
- 0xd3d84002, 0x18000102,
- 0xd3d84003, 0x18000103,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0xe0724000, 0x701d0000,
0xe0724100, 0x701d0100,
0xe0724200, 0x701d0200,
0xe0724300, 0x701d0300,
0x807c847c, 0x8070ff70,
0x00000400, 0xbf0a7b7c,
- 0xbf85ffeb, 0xbf9c0000,
- 0xbf8200ee, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x866eff7f,
- 0x04000000, 0xbf84001f,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xb8fb2985, 0x807b817b,
+ 0x8e7b837b, 0xb8fa2b05,
+ 0x807a817a, 0x8e7a827a,
+ 0x80fb7a7b, 0x867b7b7b,
+ 0xbf84007a, 0x807bff7b,
+ 0x00001000, 0xbefc0080,
+ 0xbf11017c, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850059,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffa9, 0xbf9c0000,
+ 0xbf820016, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffeb,
+ 0xbf9c0000, 0xbf8200ee,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x866eff7f, 0x04000000,
+ 0xbf84001f, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef4306,
+ 0x866fc16f, 0xbf84001a,
+ 0x8e6f866f, 0x8e6f826f,
+ 0xbef6006f, 0xb8f82985,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
0xbefe00c1, 0xbeff00c1,
- 0xb8ef4306, 0x866fc16f,
- 0xbf84001a, 0x8e6f866f,
- 0x8e6f826f, 0xbef6006f,
- 0xb8f82985, 0x80788178,
- 0x8e788a78, 0x8e788178,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0x8078ff78, 0x00000080,
0xbef600ff, 0x01000000,
- 0xbefc0080, 0xe0510000,
- 0x781d0000, 0xe0510100,
- 0x781d0000, 0x807cff7c,
- 0x00000200, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85fff6, 0xbefe00c1,
- 0xbeff00c1, 0xbef600ff,
- 0x01000000, 0xb8ef2b05,
- 0x806f816f, 0x8e6f826f,
- 0x806fff6f, 0x00008000,
- 0xbef80080, 0xbeee0078,
- 0x8078ff78, 0x00000400,
- 0xbefc0084, 0xbf11087c,
- 0xe0524000, 0x781d0000,
- 0xe0524100, 0x781d0100,
- 0xe0524200, 0x781d0200,
- 0xe0524300, 0x781d0300,
- 0xbf8c0f70, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffee,
- 0xb8ef2985, 0x806f816f,
- 0x8e6f836f, 0xb8f92b05,
- 0x80798179, 0x8e798279,
- 0x80ef796f, 0x866f6f6f,
- 0xbf84001a, 0x806fff6f,
- 0x00008000, 0xbefc0080,
+ 0xb8ef2b05, 0x806f816f,
+ 0x8e6f826f, 0x806fff6f,
+ 0x00008000, 0xbef80080,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
0xbf11087c, 0xe0524000,
0x781d0000, 0xe0524100,
0x781d0100, 0xe0524200,
0x781d0200, 0xe0524300,
0x781d0300, 0xbf8c0f70,
- 0xd3d94000, 0x18000100,
- 0xd3d94001, 0x18000101,
- 0xd3d94002, 0x18000102,
- 0xd3d94003, 0x18000103,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0x807c847c, 0x8078ff78,
0x00000400, 0xbf0a6f7c,
- 0xbf85ffea, 0xbf9c0000,
- 0xe0524000, 0x6e1d0000,
- 0xe0524100, 0x6e1d0100,
- 0xe0524200, 0x6e1d0200,
- 0xe0524300, 0x6e1d0300,
- 0xbf8c0f70, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
- 0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xc0211bfa, 0x00000078,
- 0x80788478, 0xc0211b3a,
+ 0xbf85ffee, 0xb8ef2985,
+ 0x806f816f, 0x8e6f836f,
+ 0xb8f92b05, 0x80798179,
+ 0x8e798279, 0x80ef796f,
+ 0x866f6f6f, 0xbf84001a,
+ 0x806fff6f, 0x00008000,
+ 0xbefc0080, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffea,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xbf8c0f70,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
0x00000078, 0x80788478,
- 0xc0211b7a, 0x00000078,
- 0x80788478, 0xc0211c3a,
+ 0xc0211b3a, 0x00000078,
+ 0x80788478, 0xc0211b7a,
0x00000078, 0x80788478,
- 0xc0211c7a, 0x00000078,
- 0x80788478, 0xc0211eba,
+ 0xc0211c3a, 0x00000078,
+ 0x80788478, 0xc0211c7a,
0x00000078, 0x80788478,
- 0xc0211efa, 0x00000078,
- 0x80788478, 0xc0211a3a,
+ 0xc0211eba, 0x00000078,
+ 0x80788478, 0xc0211efa,
0x00000078, 0x80788478,
- 0xc0211a7a, 0x00000078,
- 0x80788478, 0xc0211cfa,
+ 0xc0211a3a, 0x00000078,
+ 0x80788478, 0xc0211a7a,
0x00000078, 0x80788478,
- 0xbf8cc07f, 0xbefc006f,
- 0xbefe0070, 0xbeff0071,
- 0x866f7bff, 0x000003ff,
- 0xb96f4803, 0x866f7bff,
- 0xfffff800, 0x8f6f8b6f,
- 0xb96fa2c3, 0xb973f801,
- 0xb8ee2985, 0x806e816e,
- 0x8e6e8a6e, 0x8e6e816e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc00b1c37, 0x00000050,
- 0xc00b1d37, 0x00000060,
- 0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x8f6e8b77,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x866dff6d,
- 0x0000ffff, 0x86fe7e7e,
- 0x86ea6a6a, 0x8f6e837a,
- 0xb96ee0c2, 0xbf800002,
- 0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf9b0000,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+ 0xbefc006f, 0xbefe0070,
+ 0xbeff0071, 0x866f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x866f7bff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee2985,
+ 0x806e816e, 0x8e6e8a6e,
+ 0x8e6e816e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b77, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
};
static const uint32_t cwsr_trap_gfx10_hex[] = {
@@ -3151,7 +3154,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
};
static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
- 0xbf820001, 0xbf8202db,
+ 0xbf820001, 0xbf8202dc,
0xb8f8f802, 0x8978ff78,
0x00020006, 0xb8fbf803,
0x866eff78, 0x00002000,
@@ -3266,99 +3269,37 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0xbefe007c, 0xbefc0070,
0xc0611c7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0x867aff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f7a, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fb1605,
- 0x807b817b, 0x8e7b847b,
- 0x8e76827b, 0xbef600ff,
- 0x01000000, 0xbef20174,
- 0x80747074, 0x82758075,
- 0xbefc0080, 0xbf800000,
- 0xbe802b00, 0xbe822b02,
- 0xbe842b04, 0xbe862b06,
- 0xbe882b08, 0xbe8a2b0a,
- 0xbe8c2b0c, 0xbe8e2b0e,
- 0xc06b003a, 0x00000000,
- 0xbf8cc07f, 0xc06b013a,
- 0x00000010, 0xbf8cc07f,
- 0xc06b023a, 0x00000020,
- 0xbf8cc07f, 0xc06b033a,
- 0x00000030, 0xbf8cc07f,
- 0x8074c074, 0x82758075,
- 0x807c907c, 0xbf0a7b7c,
- 0xbf85ffe7, 0xbef40172,
- 0xbef00080, 0xbefe00c1,
- 0xbeff00c1, 0xbee80080,
- 0xbee90080, 0xbef600ff,
- 0x01000000, 0x867aff78,
- 0x00400000, 0xbf850003,
- 0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf85004d,
- 0xbe840080, 0xd2890000,
- 0x00000900, 0x80048104,
- 0xd2890001, 0x00000900,
- 0x80048104, 0xd2890002,
- 0x00000900, 0x80048104,
- 0xd2890003, 0x00000900,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000901,
- 0x80048104, 0xd2890001,
- 0x00000901, 0x80048104,
- 0xd2890002, 0x00000901,
- 0x80048104, 0xd2890003,
- 0x00000901, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0xbf820008, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0xbefe00c1,
- 0xbeff00c1, 0xb8fb4306,
- 0x867bc17b, 0xbf840064,
- 0xbf8a0000, 0x867aff6f,
- 0x04000000, 0xbf840060,
- 0x8e7b867b, 0x8e7b827b,
- 0xbef6007b, 0xb8f02985,
- 0x80708170, 0x8e708a70,
- 0x8e708170, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
+ 0xbefc007e, 0xbf108080,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850030, 0x24040682,
- 0xd86e4000, 0x00000002,
- 0xbf8cc07f, 0xbe840080,
+ 0xbf85004d, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -3377,31 +3318,50 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0x80048104, 0xc069003a,
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
- 0xbf84ffee, 0x680404ff,
- 0x00000200, 0xd0c9006a,
- 0x0000f702, 0xbf87ffd2,
- 0xbf820015, 0xd1060002,
- 0x00011103, 0x7e0602ff,
- 0x00000200, 0xbefc00ff,
- 0x00010000, 0xbe800077,
- 0x8677ff77, 0xff7fffff,
- 0x8777ff77, 0x00058000,
- 0xd8ec0000, 0x00000002,
- 0xbf8cc07f, 0xe0765000,
- 0x701d0002, 0x68040702,
- 0xd0c9006a, 0x0000f702,
- 0xbf87fff7, 0xbef70000,
- 0xbef000ff, 0x00000400,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
0xbefe00c1, 0xbeff00c1,
- 0xb8fb2b05, 0x807b817b,
- 0x8e7b827b, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a7b7c, 0xbf84006d,
- 0xbf11017c, 0x807bff7b,
- 0x00001000, 0x867aff78,
+ 0xb8fb4306, 0x867bc17b,
+ 0xbf840064, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf840060, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
0x00400000, 0xbf850003,
0xb8faf803, 0x897a7aff,
- 0x10000000, 0xbf850051,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
0xbe840080, 0xd2890000,
0x00000900, 0x80048104,
0xd2890001, 0x00000900,
@@ -3421,51 +3381,31 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0xc069003a, 0x00000070,
0xbf8cc07f, 0x80709070,
0xbf06c004, 0xbf84ffee,
- 0xbe840080, 0xd2890000,
- 0x00000902, 0x80048104,
- 0xd2890001, 0x00000902,
- 0x80048104, 0xd2890002,
- 0x00000902, 0x80048104,
- 0xd2890003, 0x00000902,
- 0x80048104, 0xc069003a,
- 0x00000070, 0xbf8cc07f,
- 0x80709070, 0xbf06c004,
- 0xbf84ffee, 0xbe840080,
- 0xd2890000, 0x00000903,
- 0x80048104, 0xd2890001,
- 0x00000903, 0x80048104,
- 0xd2890002, 0x00000903,
- 0x80048104, 0xd2890003,
- 0x00000903, 0x80048104,
- 0xc069003a, 0x00000070,
- 0xbf8cc07f, 0x80709070,
- 0xbf06c004, 0xbf84ffee,
- 0x807c847c, 0xbf0a7b7c,
- 0xbf85ffb1, 0xbf9c0000,
- 0xbf820012, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x701d0000, 0xe0724100,
- 0x701d0100, 0xe0724200,
- 0x701d0200, 0xe0724300,
- 0x701d0300, 0x807c847c,
- 0x8070ff70, 0x00000400,
- 0xbf0a7b7c, 0xbf85ffef,
- 0xbf9c0000, 0xb8fb2985,
- 0x807b817b, 0x8e7b837b,
- 0xb8fa2b05, 0x807a817a,
- 0x8e7a827a, 0x80fb7a7b,
- 0x867b7b7b, 0xbf84007a,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2b05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
0x807bff7b, 0x00001000,
- 0xbefc0080, 0xbf11017c,
0x867aff78, 0x00400000,
0xbf850003, 0xb8faf803,
0x897a7aff, 0x10000000,
- 0xbf850059, 0xd3d84000,
- 0x18000100, 0xd3d84001,
- 0x18000101, 0xd3d84002,
- 0x18000102, 0xd3d84003,
- 0x18000103, 0xbe840080,
+ 0xbf850051, 0xbe840080,
0xd2890000, 0x00000900,
0x80048104, 0xd2890001,
0x00000900, 0x80048104,
@@ -3504,139 +3444,203 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0x00000070, 0xbf8cc07f,
0x80709070, 0xbf06c004,
0xbf84ffee, 0x807c847c,
- 0xbf0a7b7c, 0xbf85ffa9,
- 0xbf9c0000, 0xbf820016,
- 0xd3d84000, 0x18000100,
- 0xd3d84001, 0x18000101,
- 0xd3d84002, 0x18000102,
- 0xd3d84003, 0x18000103,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0xe0724000, 0x701d0000,
0xe0724100, 0x701d0100,
0xe0724200, 0x701d0200,
0xe0724300, 0x701d0300,
0x807c847c, 0x8070ff70,
0x00000400, 0xbf0a7b7c,
- 0xbf85ffeb, 0xbf9c0000,
- 0xbf8200ee, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x866eff7f,
- 0x04000000, 0xbf84001f,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xb8fb2985, 0x807b817b,
+ 0x8e7b837b, 0xb8fa2b05,
+ 0x807a817a, 0x8e7a827a,
+ 0x80fb7a7b, 0x867b7b7b,
+ 0xbf84007a, 0x807bff7b,
+ 0x00001000, 0xbefc0080,
+ 0xbf11017c, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850059,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffa9, 0xbf9c0000,
+ 0xbf820016, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffeb,
+ 0xbf9c0000, 0xbf8200ee,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x866eff7f, 0x04000000,
+ 0xbf84001f, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef4306,
+ 0x866fc16f, 0xbf84001a,
+ 0x8e6f866f, 0x8e6f826f,
+ 0xbef6006f, 0xb8f82985,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
0xbefe00c1, 0xbeff00c1,
- 0xb8ef4306, 0x866fc16f,
- 0xbf84001a, 0x8e6f866f,
- 0x8e6f826f, 0xbef6006f,
- 0xb8f82985, 0x80788178,
- 0x8e788a78, 0x8e788178,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0x8078ff78, 0x00000080,
0xbef600ff, 0x01000000,
- 0xbefc0080, 0xe0510000,
- 0x781d0000, 0xe0510100,
- 0x781d0000, 0x807cff7c,
- 0x00000200, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85fff6, 0xbefe00c1,
- 0xbeff00c1, 0xbef600ff,
- 0x01000000, 0xb8ef2b05,
- 0x806f816f, 0x8e6f826f,
- 0x806fff6f, 0x00008000,
- 0xbef80080, 0xbeee0078,
- 0x8078ff78, 0x00000400,
- 0xbefc0084, 0xbf11087c,
- 0xe0524000, 0x781d0000,
- 0xe0524100, 0x781d0100,
- 0xe0524200, 0x781d0200,
- 0xe0524300, 0x781d0300,
- 0xbf8c0f70, 0x7e000300,
- 0x7e020301, 0x7e040302,
- 0x7e060303, 0x807c847c,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7c, 0xbf85ffee,
- 0xb8ef2985, 0x806f816f,
- 0x8e6f836f, 0xb8f92b05,
- 0x80798179, 0x8e798279,
- 0x80ef796f, 0x866f6f6f,
- 0xbf84001a, 0x806fff6f,
- 0x00008000, 0xbefc0080,
+ 0xb8ef2b05, 0x806f816f,
+ 0x8e6f826f, 0x806fff6f,
+ 0x00008000, 0xbef80080,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
0xbf11087c, 0xe0524000,
0x781d0000, 0xe0524100,
0x781d0100, 0xe0524200,
0x781d0200, 0xe0524300,
0x781d0300, 0xbf8c0f70,
- 0xd3d94000, 0x18000100,
- 0xd3d94001, 0x18000101,
- 0xd3d94002, 0x18000102,
- 0xd3d94003, 0x18000103,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0x807c847c, 0x8078ff78,
0x00000400, 0xbf0a6f7c,
- 0xbf85ffea, 0xbf9c0000,
- 0xe0524000, 0x6e1d0000,
- 0xe0524100, 0x6e1d0100,
- 0xe0524200, 0x6e1d0200,
- 0xe0524300, 0x6e1d0300,
- 0xbf8c0f70, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
- 0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82985,
- 0x80788178, 0x8e788a78,
- 0x8e788178, 0xb8ee1605,
- 0x806e816e, 0x8e6e866e,
- 0x80786e78, 0xbef60084,
- 0xbef600ff, 0x01000000,
- 0xc0211bfa, 0x00000078,
- 0x80788478, 0xc0211b3a,
+ 0xbf85ffee, 0xb8ef2985,
+ 0x806f816f, 0x8e6f836f,
+ 0xb8f92b05, 0x80798179,
+ 0x8e798279, 0x80ef796f,
+ 0x866f6f6f, 0xbf84001a,
+ 0x806fff6f, 0x00008000,
+ 0xbefc0080, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffea,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xbf8c0f70,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
0x00000078, 0x80788478,
- 0xc0211b7a, 0x00000078,
- 0x80788478, 0xc0211c3a,
+ 0xc0211b3a, 0x00000078,
+ 0x80788478, 0xc0211b7a,
0x00000078, 0x80788478,
- 0xc0211c7a, 0x00000078,
- 0x80788478, 0xc0211eba,
+ 0xc0211c3a, 0x00000078,
+ 0x80788478, 0xc0211c7a,
0x00000078, 0x80788478,
- 0xc0211efa, 0x00000078,
- 0x80788478, 0xc0211a3a,
+ 0xc0211eba, 0x00000078,
+ 0x80788478, 0xc0211efa,
0x00000078, 0x80788478,
- 0xc0211a7a, 0x00000078,
- 0x80788478, 0xc0211cfa,
+ 0xc0211a3a, 0x00000078,
+ 0x80788478, 0xc0211a7a,
0x00000078, 0x80788478,
- 0xbf8cc07f, 0xbefc006f,
- 0xbefe0070, 0xbeff0071,
- 0x866f7bff, 0x000003ff,
- 0xb96f4803, 0x866f7bff,
- 0xfffff800, 0x8f6f8b6f,
- 0xb96fa2c3, 0xb973f801,
- 0xb8ee2985, 0x806e816e,
- 0x8e6e8a6e, 0x8e6e816e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc00b1c37, 0x00000050,
- 0xc00b1d37, 0x00000060,
- 0xc0031e77, 0x00000074,
- 0xbf8cc07f, 0x8f6e8b79,
- 0x866eff6e, 0x001f8000,
- 0xb96ef807, 0x866dff6d,
- 0x0000ffff, 0x86fe7e7e,
- 0x86ea6a6a, 0x8f6e837a,
- 0xb96ee0c2, 0xbf800002,
- 0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf9b0000,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+ 0xbefc006f, 0xbefe0070,
+ 0xbeff0071, 0x866f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x866f7bff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee2985,
+ 0x806e816e, 0x8e6e8a6e,
+ 0x8e6e816e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b79, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
};
static const uint32_t cwsr_trap_gfx12_hex[] = {
@@ -4122,3 +4126,487 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
};
+
+static const uint32_t cwsr_trap_gfx9_5_0_hex[] = {
+ 0xbf820001, 0xbf8202ca,
+ 0xb8f8f802, 0x8978ff78,
+ 0x00020006, 0xb8fbf803,
+ 0x866eff78, 0x00002000,
+ 0xbf840009, 0x866eff6d,
+ 0x00ff0000, 0xbf85001a,
+ 0x866eff7b, 0x00000400,
+ 0xbf850051, 0xbf8e0010,
+ 0xb8fbf803, 0xbf82fffa,
+ 0x866eff7b, 0x03c00900,
+ 0xbf850011, 0x866eff7b,
+ 0x000071ff, 0xbf840008,
+ 0x866fff7b, 0x00007080,
+ 0xbf840001, 0xbeee1a87,
+ 0xb8eff801, 0x8e6e8c6e,
+ 0x866e6f6e, 0xbf850006,
+ 0x866eff6d, 0x00ff0000,
+ 0xbf850003, 0x866eff7b,
+ 0x00000400, 0xbf85003a,
+ 0xb8faf807, 0x867aff7a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8979ff79, 0xfc000000,
+ 0x87797a79, 0xba7ff807,
+ 0x00000000, 0xb8faf812,
+ 0xb8fbf813, 0x8efa887a,
+ 0xbf0d8f7b, 0xbf840002,
+ 0x877bff7b, 0xffff0000,
+ 0xc0031bbd, 0x00000010,
+ 0xbf8cc07f, 0x8e6e976e,
+ 0x8979ff79, 0x00800000,
+ 0x87796e79, 0xc0071bbd,
+ 0x00000000, 0xbf8cc07f,
+ 0xc0071ebd, 0x00000008,
+ 0xbf8cc07f, 0x86ee6e6e,
+ 0xbf840001, 0xbe801d6e,
+ 0x866eff6d, 0x01ff0000,
+ 0xbf850005, 0x8778ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbf820005,
+ 0x866eff6d, 0x01000000,
+ 0xbf850002, 0x806c846c,
+ 0x826d806d, 0x866dff6d,
+ 0x0000ffff, 0x8f7a8b79,
+ 0x867aff7a, 0x001f8000,
+ 0xb97af807, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f6e8378,
+ 0xb96ee0c2, 0xbf800002,
+ 0xb9780002, 0xbe801f6c,
+ 0x866dff6d, 0x0000ffff,
+ 0xbefa0080, 0xb97a0283,
+ 0xb8faf807, 0x867aff7a,
+ 0x001f8000, 0x8e7a8b7a,
+ 0x8979ff79, 0xfc000000,
+ 0x87797a79, 0xba7ff807,
+ 0x00000000, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbf900004, 0x877a8478,
+ 0xb97af802, 0xbf8e0002,
+ 0xbf88fffe, 0xb8fa2985,
+ 0x807a817a, 0x8e7a8a7a,
+ 0x8e7a817a, 0xb8fb1605,
+ 0x807b817b, 0x8e7b867b,
+ 0x807a7b7a, 0x807a7e7a,
+ 0x827b807f, 0x867bff7b,
+ 0x0000ffff, 0xc04b1c3d,
+ 0x00000050, 0xbf8cc07f,
+ 0xc04b1d3d, 0x00000060,
+ 0xbf8cc07f, 0xc0431e7d,
+ 0x00000074, 0xbf8cc07f,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0xbef1007c, 0xbef00080,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xbefe007c,
+ 0xbefc0070, 0xc0611c7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611b3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611b7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611bba, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611bfa,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611e3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8fbf803,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611efa, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611a3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611a7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xb8f1f801,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611c7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbf108080,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf85004d, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
+ 0x80048104, 0xd2890002,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8fb5306, 0x867bc17b,
+ 0xbf840052, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf84004e, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02985, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf85001d,
+ 0x24040682, 0xd86c0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x680404ff,
+ 0x00000100, 0xd0c9006a,
+ 0x0000f702, 0xbf87ffe5,
+ 0xbf820016, 0xd1060002,
+ 0x00011103, 0x7e0602ff,
+ 0x00000200, 0xbefc00ff,
+ 0x00010000, 0xbe800077,
+ 0x8677ff77, 0xff7fffff,
+ 0x8777ff77, 0x00058000,
+ 0xd8ec0000, 0x00000002,
+ 0xbf8cc07f, 0xe0765000,
+ 0x701d0002, 0x68040702,
+ 0xd0c9006a, 0x0000f702,
+ 0xbefe016a, 0xbf87fff6,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2b05,
+ 0x807b817b, 0x8e7b827b,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a7b7c,
+ 0xbf84006d, 0xbf11017c,
+ 0x807bff7b, 0x00001000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf850051, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
+ 0x80048104, 0xd2890002,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0x807c847c,
+ 0xbf0a7b7c, 0xbf85ffb1,
+ 0xbf9c0000, 0xbf820012,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0x807c847c, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7c,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xb8fb2985, 0x807b817b,
+ 0x8e7b837b, 0xb8fa2b05,
+ 0x807a817a, 0x8e7a827a,
+ 0x80fb7a7b, 0x867b7b7b,
+ 0xbf84007a, 0x807bff7b,
+ 0x00001000, 0xbefc0080,
+ 0xbf11017c, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850059,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffa9, 0xbf9c0000,
+ 0xbf820016, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffeb,
+ 0xbf9c0000, 0xbf8200f4,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x866eff7f, 0x04000000,
+ 0xbf840025, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef5306,
+ 0x866fc16f, 0xbf840020,
+ 0x8e6f866f, 0x8e6f826f,
+ 0xbef6006f, 0xb8f82985,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0xe0510200, 0x781d0000,
+ 0xe0510300, 0x781d0000,
+ 0xe0510400, 0x781d0000,
+ 0x807cff7c, 0x00000500,
+ 0x8078ff78, 0x00000500,
+ 0xbf0a6f7c, 0xbf85fff0,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbef600ff, 0x01000000,
+ 0xb8ef2b05, 0x806f816f,
+ 0x8e6f826f, 0x806fff6f,
+ 0x00008000, 0xbef80080,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbefc0084,
+ 0xbf11087c, 0xe0524000,
+ 0x781d0000, 0xe0524100,
+ 0x781d0100, 0xe0524200,
+ 0x781d0200, 0xe0524300,
+ 0x781d0300, 0xbf8c0f70,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0x807c847c, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7c,
+ 0xbf85ffee, 0xb8ef2985,
+ 0x806f816f, 0x8e6f836f,
+ 0xb8f92b05, 0x80798179,
+ 0x8e798279, 0x80ef796f,
+ 0x866f6f6f, 0xbf84001a,
+ 0x806fff6f, 0x00008000,
+ 0xbefc0080, 0xbf11087c,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffea,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xbf8c0f70,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb8f82985, 0x80788178,
+ 0x8e788a78, 0x8e788178,
+ 0xb8ee1605, 0x806e816e,
+ 0x8e6e866e, 0x80786e78,
+ 0xbef60084, 0xbef600ff,
+ 0x01000000, 0xc0211bfa,
+ 0x00000078, 0x80788478,
+ 0xc0211b3a, 0x00000078,
+ 0x80788478, 0xc0211b7a,
+ 0x00000078, 0x80788478,
+ 0xc0211c3a, 0x00000078,
+ 0x80788478, 0xc0211c7a,
+ 0x00000078, 0x80788478,
+ 0xc0211eba, 0x00000078,
+ 0x80788478, 0xc0211efa,
+ 0x00000078, 0x80788478,
+ 0xc0211a3a, 0x00000078,
+ 0x80788478, 0xc0211a7a,
+ 0x00000078, 0x80788478,
+ 0xc0211cfa, 0x00000078,
+ 0x80788478, 0xbf8cc07f,
+ 0xbefc006f, 0xbefe0070,
+ 0xbeff0071, 0x866f7bff,
+ 0x000003ff, 0xb96f4803,
+ 0x866f7bff, 0xfffff800,
+ 0x8f6f8b6f, 0xb96fa2c3,
+ 0xb973f801, 0xb8ee2985,
+ 0x806e816e, 0x8e6e8a6e,
+ 0x8e6e816e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc00b1c37,
+ 0x00000050, 0xc00b1d37,
+ 0x00000060, 0xc0031e77,
+ 0x00000074, 0xbf8cc07f,
+ 0x8f6e8b79, 0x866eff6e,
+ 0x001f8000, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0xbe801f6c,
+ 0xbf9b0000, 0x00000000,
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index 44772eec9ef4..96fbb16ceb21 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -34,41 +34,24 @@
* cpp -DASIC_FAMILY=CHIP_PLUM_BONITO cwsr_trap_handler_gfx10.asm -P -o gfx11.sp3
* sp3 gfx11.sp3 -hex gfx11.hex
*
- * gfx12:
- * cpp -DASIC_FAMILY=CHIP_GFX12 cwsr_trap_handler_gfx10.asm -P -o gfx12.sp3
- * sp3 gfx12.sp3 -hex gfx12.hex
*/
#define CHIP_NAVI10 26
#define CHIP_SIENNA_CICHLID 30
#define CHIP_PLUM_BONITO 36
-#define CHIP_GFX12 37
#define NO_SQC_STORE (ASIC_FAMILY >= CHIP_SIENNA_CICHLID)
#define HAVE_XNACK (ASIC_FAMILY < CHIP_SIENNA_CICHLID)
#define HAVE_SENDMSG_RTN (ASIC_FAMILY >= CHIP_PLUM_BONITO)
#define HAVE_BUFFER_LDS_LOAD (ASIC_FAMILY < CHIP_PLUM_BONITO)
-#define SW_SA_TRAP (ASIC_FAMILY >= CHIP_PLUM_BONITO && ASIC_FAMILY < CHIP_GFX12)
+#define SW_SA_TRAP (ASIC_FAMILY == CHIP_PLUM_BONITO)
#define SAVE_AFTER_XNACK_ERROR (HAVE_XNACK && !NO_SQC_STORE) // workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger
#define SINGLE_STEP_MISSED_WORKAROUND 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
-#if ASIC_FAMILY < CHIP_GFX12
#define S_COHERENCE glc:1
#define V_COHERENCE slc:1 glc:1
#define S_WAITCNT_0 s_waitcnt 0
-#else
-#define S_COHERENCE scope:SCOPE_SYS
-#define V_COHERENCE scope:SCOPE_SYS
-#define S_WAITCNT_0 s_wait_idle
-
-#define HW_REG_SHADER_FLAT_SCRATCH_LO HW_REG_WAVE_SCRATCH_BASE_LO
-#define HW_REG_SHADER_FLAT_SCRATCH_HI HW_REG_WAVE_SCRATCH_BASE_HI
-#define HW_REG_GPR_ALLOC HW_REG_WAVE_GPR_ALLOC
-#define HW_REG_LDS_ALLOC HW_REG_WAVE_LDS_ALLOC
-#define HW_REG_MODE HW_REG_WAVE_MODE
-#endif
-#if ASIC_FAMILY < CHIP_GFX12
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
var SQ_WAVE_STATUS_HALT_MASK = 0x2000
var SQ_WAVE_STATUS_ECC_ERR_MASK = 0x20000
@@ -81,21 +64,6 @@ var S_STATUS_ALWAYS_CLEAR_MASK = SQ_WAVE_STATUS_SPI_PRIO_MASK|SQ_WAVE_STATUS_E
var S_STATUS_HALT_MASK = SQ_WAVE_STATUS_HALT_MASK
var S_SAVE_PC_HI_TRAP_ID_MASK = 0x00FF0000
var S_SAVE_PC_HI_HT_MASK = 0x01000000
-#else
-var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK = 0x4
-var SQ_WAVE_STATE_PRIV_SCC_SHIFT = 9
-var SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK = 0xC00
-var SQ_WAVE_STATE_PRIV_HALT_MASK = 0x4000
-var SQ_WAVE_STATE_PRIV_POISON_ERR_MASK = 0x8000
-var SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT = 15
-var SQ_WAVE_STATUS_WAVE64_SHIFT = 29
-var SQ_WAVE_STATUS_WAVE64_SIZE = 1
-var SQ_WAVE_LDS_ALLOC_GRANULARITY = 9
-var S_STATUS_HWREG = HW_REG_WAVE_STATE_PRIV
-var S_STATUS_ALWAYS_CLEAR_MASK = SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK|SQ_WAVE_STATE_PRIV_POISON_ERR_MASK
-var S_STATUS_HALT_MASK = SQ_WAVE_STATE_PRIV_HALT_MASK
-var S_SAVE_PC_HI_TRAP_ID_MASK = 0xF0000000
-#endif
var SQ_WAVE_STATUS_NO_VGPRS_SHIFT = 24
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
@@ -110,7 +78,6 @@ var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8
var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 12
#endif
-#if ASIC_FAMILY < CHIP_GFX12
var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400
var SQ_WAVE_TRAPSTS_EXCP_MASK = 0x1FF
var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10
@@ -161,39 +128,6 @@ var S_TRAPSTS_RESTORE_PART_3_SIZE = 32 - S_TRAPSTS_RESTORE_PART_3_SHIFT
var S_TRAPSTS_HWREG = HW_REG_TRAPSTS
var S_TRAPSTS_SAVE_CONTEXT_MASK = SQ_WAVE_TRAPSTS_SAVECTX_MASK
var S_TRAPSTS_SAVE_CONTEXT_SHIFT = SQ_WAVE_TRAPSTS_SAVECTX_SHIFT
-#else
-var SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK = 0xF
-var SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK = 0x10
-var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT = 5
-var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK = 0x20
-var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK = 0x40
-var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT = 6
-var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK = 0x80
-var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT = 7
-var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK = 0x100
-var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT = 8
-var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK = 0x200
-var SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK = 0x800
-var SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK = 0x80
-var SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK = 0x200
-
-var S_TRAPSTS_HWREG = HW_REG_WAVE_EXCP_FLAG_PRIV
-var S_TRAPSTS_SAVE_CONTEXT_MASK = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK
-var S_TRAPSTS_SAVE_CONTEXT_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
-var S_TRAPSTS_NON_MASKABLE_EXCP_MASK = SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK |\
- SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK
-var S_TRAPSTS_RESTORE_PART_1_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
-var S_TRAPSTS_RESTORE_PART_2_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
-var S_TRAPSTS_RESTORE_PART_2_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
-var S_TRAPSTS_RESTORE_PART_3_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT
-var S_TRAPSTS_RESTORE_PART_3_SIZE = 32 - S_TRAPSTS_RESTORE_PART_3_SHIFT
-var BARRIER_STATE_SIGNAL_OFFSET = 16
-var BARRIER_STATE_VALID_OFFSET = 0
-#endif
// bits [31:24] unused by SPI debug data
var TTMP11_SAVE_REPLAY_W64H_SHIFT = 31
@@ -305,11 +239,7 @@ L_TRAP_NO_BARRIER:
L_HALTED:
// Host trap may occur while wave is halted.
-#if ASIC_FAMILY < CHIP_GFX12
s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
-#else
- s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
-#endif
s_cbranch_scc1 L_FETCH_2ND_TRAP
L_CHECK_SAVE:
@@ -336,7 +266,6 @@ L_NOT_HALTED:
// Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
// Maskable exceptions only cause the wave to enter the trap handler if
// their respective bit in mode.excp_en is set.
-#if ASIC_FAMILY < CHIP_GFX12
s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCP_MASK|SQ_WAVE_TRAPSTS_EXCP_HI_MASK
s_cbranch_scc0 L_CHECK_TRAP_ID
@@ -349,17 +278,6 @@ L_NOT_ADDR_WATCH:
s_lshl_b32 ttmp2, ttmp2, SQ_WAVE_MODE_EXCP_EN_SHIFT
s_and_b32 ttmp2, ttmp2, ttmp3
s_cbranch_scc1 L_FETCH_2ND_TRAP
-#else
- s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
- s_and_b32 ttmp3, s_save_trapsts, SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK
- s_cbranch_scc0 L_NOT_ADDR_WATCH
- s_or_b32 ttmp2, ttmp2, SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK
-
-L_NOT_ADDR_WATCH:
- s_getreg_b32 ttmp3, hwreg(HW_REG_WAVE_TRAP_CTRL)
- s_and_b32 ttmp2, ttmp3, ttmp2
- s_cbranch_scc1 L_FETCH_2ND_TRAP
-#endif
L_CHECK_TRAP_ID:
// Check trap_id != 0
@@ -369,13 +287,8 @@ L_CHECK_TRAP_ID:
#if SINGLE_STEP_MISSED_WORKAROUND
// Prioritize single step exception over context save.
// Second-level trap will halt wave and RFE, re-entering for SAVECTX.
-#if ASIC_FAMILY < CHIP_GFX12
s_getreg_b32 ttmp2, hwreg(HW_REG_MODE)
s_and_b32 ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
-#else
- // WAVE_TRAP_CTRL is already in ttmp3.
- s_and_b32 ttmp3, ttmp3, SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK
-#endif
s_cbranch_scc1 L_FETCH_2ND_TRAP
#endif
@@ -425,12 +338,7 @@ L_NO_NEXT_TRAP:
s_cbranch_scc1 L_TRAP_CASE
// Host trap will not cause trap re-entry.
-#if ASIC_FAMILY < CHIP_GFX12
s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_HT_MASK
-#else
- s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
- s_and_b32 ttmp2, ttmp2, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
-#endif
s_cbranch_scc1 L_EXIT_TRAP
s_or_b32 s_save_status, s_save_status, S_STATUS_HALT_MASK
@@ -457,16 +365,7 @@ L_EXIT_TRAP:
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
-#if ASIC_FAMILY < CHIP_GFX12
s_setreg_b32 hwreg(S_STATUS_HWREG), s_save_status
-#else
- // STATE_PRIV.BARRIER_COMPLETE may have changed since we read it.
- // Only restore fields which the trap handler changes.
- s_lshr_b32 s_save_status, s_save_status, SQ_WAVE_STATE_PRIV_SCC_SHIFT
- s_setreg_b32 hwreg(S_STATUS_HWREG, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \
- SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_status
-#endif
-
s_rfe_b64 [ttmp0, ttmp1]
L_SAVE:
@@ -478,14 +377,6 @@ L_SAVE:
s_endpgm
L_HAVE_VGPRS:
#endif
-#if ASIC_FAMILY >= CHIP_GFX12
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
- s_bitcmp1_b32 s_save_tmp, SQ_WAVE_STATUS_NO_VGPRS_SHIFT
- s_cbranch_scc0 L_HAVE_VGPRS
- s_endpgm
-L_HAVE_VGPRS:
-#endif
-
s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
s_mov_b32 s_save_tmp, 0
s_setreg_b32 hwreg(S_TRAPSTS_HWREG, S_TRAPSTS_SAVE_CONTEXT_SHIFT, 1), s_save_tmp //clear saveCtx bit
@@ -671,19 +562,6 @@ L_SAVE_HWREG:
s_mov_b32 m0, 0x0 //Next lane of v2 to write to
#endif
-#if ASIC_FAMILY >= CHIP_GFX12
- // Ensure no further changes to barrier or LDS state.
- // STATE_PRIV.BARRIER_COMPLETE may change up to this point.
- s_barrier_signal -2
- s_barrier_wait -2
-
- // Re-read final state of BARRIER_COMPLETE field for save.
- s_getreg_b32 s_save_tmp, hwreg(S_STATUS_HWREG)
- s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
- s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
- s_or_b32 s_save_status, s_save_status, s_save_tmp
-#endif
-
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset)
s_andn2_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
@@ -707,21 +585,6 @@ L_SAVE_HWREG:
s_getreg_b32 s_save_m0, hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI)
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
-#if ASIC_FAMILY >= CHIP_GFX12
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
- write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
-
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL)
- write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
-
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
- write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset)
-
- s_get_barrier_state s_save_tmp, -1
- s_wait_kmcnt (0)
- write_hwreg_to_mem(s_save_tmp, s_save_buf_rsrc0, s_save_mem_offset)
-#endif
-
#if NO_SQC_STORE
// Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
s_mov_b32 exec_lo, 0xFFFF
@@ -814,9 +677,7 @@ L_SAVE_LDS_NORMAL:
s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero?
s_cbranch_scc0 L_SAVE_LDS_DONE //no lds used? jump to L_SAVE_DONE
-#if ASIC_FAMILY < CHIP_GFX12
s_barrier //LDS is used? wait for other waves in the same TG
-#endif
s_and_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
s_cbranch_scc0 L_SAVE_LDS_DONE
@@ -1081,11 +942,6 @@ L_RESTORE:
s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes)
s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
-#if ASIC_FAMILY >= CHIP_GFX12
- // Save s_restore_spi_init_hi for later use.
- s_mov_b32 s_restore_spi_init_hi_save, s_restore_spi_init_hi
-#endif
-
//determine it is wave32 or wave64
get_wave_size2(s_restore_size)
@@ -1320,9 +1176,7 @@ L_RESTORE_SGPR:
// s_barrier with MODE.DEBUG_EN=1, STATUS.PRIV=1 incorrectly asserts debug exception.
// Clear DEBUG_EN before and restore MODE after the barrier.
s_setreg_imm32_b32 hwreg(HW_REG_MODE), 0
-#if ASIC_FAMILY < CHIP_GFX12
s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG
-#endif
/* restore HW registers */
L_RESTORE_HWREG:
@@ -1334,11 +1188,6 @@ L_RESTORE_HWREG:
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
-#if ASIC_FAMILY >= CHIP_GFX12
- // Restore s_restore_spi_init_hi before the saved value gets clobbered.
- s_mov_b32 s_restore_spi_init_hi, s_restore_spi_init_hi_save
-#endif
-
read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset)
read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
@@ -1358,44 +1207,6 @@ L_RESTORE_HWREG:
s_setreg_b32 hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI), s_restore_flat_scratch
-#if ASIC_FAMILY >= CHIP_GFX12
- read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
- S_WAITCNT_0
- s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_USER), s_restore_tmp
-
- read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
- S_WAITCNT_0
- s_setreg_b32 hwreg(HW_REG_WAVE_TRAP_CTRL), s_restore_tmp
-
- // Only the first wave needs to restore the workgroup barrier.
- s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
- s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
-
- // Skip over WAVE_STATUS, since there is no state to restore from it
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 4
-
- read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
- S_WAITCNT_0
-
- s_bitcmp1_b32 s_restore_tmp, BARRIER_STATE_VALID_OFFSET
- s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
-
- // extract the saved signal count from s_restore_tmp
- s_lshr_b32 s_restore_tmp, s_restore_tmp, BARRIER_STATE_SIGNAL_OFFSET
-
- // We need to call s_barrier_signal repeatedly to restore the signal
- // count of the work group barrier. The member count is already
- // initialized with the number of waves in the work group.
-L_BARRIER_RESTORE_LOOP:
- s_and_b32 s_restore_tmp, s_restore_tmp, s_restore_tmp
- s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
- s_barrier_signal -1
- s_add_i32 s_restore_tmp, s_restore_tmp, -1
- s_branch L_BARRIER_RESTORE_LOOP
-
-L_SKIP_BARRIER_RESTORE:
-#endif
-
s_mov_b32 m0, s_restore_m0
s_mov_b32 exec_lo, s_restore_exec_lo
s_mov_b32 exec_hi, s_restore_exec_hi
@@ -1453,13 +1264,6 @@ L_RETURN_WITHOUT_PRIV:
s_setreg_b32 hwreg(S_STATUS_HWREG), s_restore_status // SCC is included, which is changed by previous salu
-#if ASIC_FAMILY >= CHIP_GFX12
- // Make barrier and LDS state visible to all waves in the group.
- // STATE_PRIV.BARRIER_COMPLETE may change after this point.
- s_barrier_signal -2
- s_barrier_wait -2
-#endif
-
s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
L_END_PGM:
@@ -1598,11 +1402,7 @@ function get_hwreg_size_bytes
end
function get_wave_size2(s_reg)
-#if ASIC_FAMILY < CHIP_GFX12
s_getreg_b32 s_reg, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE)
-#else
- s_getreg_b32 s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE)
-#endif
s_lshl_b32 s_reg, s_reg, S_WAVE_SIZE
end
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
new file mode 100644
index 000000000000..1740e98c6719
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
@@ -0,0 +1,1126 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* To compile this assembly code:
+ *
+ * gfx12:
+ * cpp -DASIC_FAMILY=CHIP_GFX12 cwsr_trap_handler_gfx12.asm -P -o gfx12.sp3
+ * sp3 gfx12.sp3 -hex gfx12.hex
+ */
+
+#define CHIP_GFX12 37
+
+#define SINGLE_STEP_MISSED_WORKAROUND 1 //workaround for lost TRAP_AFTER_INST exception when SAVECTX raised
+
+var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK = 0x4
+var SQ_WAVE_STATE_PRIV_SCC_SHIFT = 9
+var SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK = 0xC00
+var SQ_WAVE_STATE_PRIV_HALT_MASK = 0x4000
+var SQ_WAVE_STATE_PRIV_POISON_ERR_MASK = 0x8000
+var SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT = 15
+var SQ_WAVE_STATUS_WAVE64_SHIFT = 29
+var SQ_WAVE_STATUS_WAVE64_SIZE = 1
+var SQ_WAVE_STATUS_NO_VGPRS_SHIFT = 24
+var SQ_WAVE_STATE_PRIV_ALWAYS_CLEAR_MASK = SQ_WAVE_STATE_PRIV_SYS_PRIO_MASK|SQ_WAVE_STATE_PRIV_POISON_ERR_MASK
+var S_SAVE_PC_HI_TRAP_ID_MASK = 0xF0000000
+
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 8
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 12
+var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT = 24
+var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE = 4
+var SQ_WAVE_LDS_ALLOC_GRANULARITY = 9
+
+var SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK = 0xF
+var SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK = 0x10
+var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT = 5
+var SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK = 0x20
+var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK = 0x40
+var SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT = 6
+var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK = 0x80
+var SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT = 7
+var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK = 0x100
+var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT = 8
+var SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK = 0x200
+var SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK = 0x800
+var SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK = 0x80
+var SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK = 0x200
+
+var SQ_WAVE_EXCP_FLAG_PRIV_NON_MASKABLE_EXCP_MASK= SQ_WAVE_EXCP_FLAG_PRIV_MEM_VIOL_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_WAVE_END_MASK |\
+ SQ_WAVE_EXCP_FLAG_PRIV_TRAP_AFTER_INST_MASK
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_1_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE = SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_ILLEGAL_INST_SHIFT
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT = SQ_WAVE_EXCP_FLAG_PRIV_WAVE_START_SHIFT
+var SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE = 32 - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT
+var BARRIER_STATE_SIGNAL_OFFSET = 16
+var BARRIER_STATE_VALID_OFFSET = 0
+
+var TTMP11_DEBUG_TRAP_ENABLED_SHIFT = 23
+var TTMP11_DEBUG_TRAP_ENABLED_MASK = 0x800000
+
+// SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14]
+// when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
+var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000
+var S_SAVE_BUF_RSRC_WORD3_MISC = 0x10807FAC
+var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000
+var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26
+
+var S_SAVE_PC_HI_FIRST_WAVE_MASK = 0x80000000
+var S_SAVE_PC_HI_FIRST_WAVE_SHIFT = 31
+
+var s_sgpr_save_num = 108
+
+var s_save_spi_init_lo = exec_lo
+var s_save_spi_init_hi = exec_hi
+var s_save_pc_lo = ttmp0
+var s_save_pc_hi = ttmp1
+var s_save_exec_lo = ttmp2
+var s_save_exec_hi = ttmp3
+var s_save_state_priv = ttmp12
+var s_save_excp_flag_priv = ttmp15
+var s_save_xnack_mask = s_save_excp_flag_priv
+var s_wave_size = ttmp7
+var s_save_buf_rsrc0 = ttmp8
+var s_save_buf_rsrc1 = ttmp9
+var s_save_buf_rsrc2 = ttmp10
+var s_save_buf_rsrc3 = ttmp11
+var s_save_mem_offset = ttmp4
+var s_save_alloc_size = s_save_excp_flag_priv
+var s_save_tmp = ttmp14
+var s_save_m0 = ttmp5
+var s_save_ttmps_lo = s_save_tmp
+var s_save_ttmps_hi = s_save_excp_flag_priv
+
+var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE
+var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC
+
+var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000
+var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26
+var S_WAVE_SIZE = 25
+
+var s_restore_spi_init_lo = exec_lo
+var s_restore_spi_init_hi = exec_hi
+var s_restore_mem_offset = ttmp12
+var s_restore_alloc_size = ttmp3
+var s_restore_tmp = ttmp2
+var s_restore_mem_offset_save = s_restore_tmp
+var s_restore_m0 = s_restore_alloc_size
+var s_restore_mode = ttmp7
+var s_restore_flat_scratch = s_restore_tmp
+var s_restore_pc_lo = ttmp0
+var s_restore_pc_hi = ttmp1
+var s_restore_exec_lo = ttmp4
+var s_restore_exec_hi = ttmp5
+var s_restore_state_priv = ttmp14
+var s_restore_excp_flag_priv = ttmp15
+var s_restore_xnack_mask = ttmp13
+var s_restore_buf_rsrc0 = ttmp8
+var s_restore_buf_rsrc1 = ttmp9
+var s_restore_buf_rsrc2 = ttmp10
+var s_restore_buf_rsrc3 = ttmp11
+var s_restore_size = ttmp6
+var s_restore_ttmps_lo = s_restore_tmp
+var s_restore_ttmps_hi = s_restore_alloc_size
+var s_restore_spi_init_hi_save = s_restore_exec_hi
+
+shader main
+ asic(DEFAULT)
+ type(CS)
+ wave_size(32)
+
+ s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save
+
+L_JUMP_TO_RESTORE:
+ s_branch L_RESTORE
+
+L_SKIP_RESTORE:
+ s_getreg_b32 s_save_state_priv, hwreg(HW_REG_WAVE_STATE_PRIV) //save STATUS since we will change SCC
+
+ // Clear SPI_PRIO: do not save with elevated priority.
+ // Clear ECC_ERR: prevents SQC store and triggers FATAL_HALT if setreg'd.
+ s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_ALWAYS_CLEAR_MASK
+
+ s_getreg_b32 s_save_excp_flag_priv, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+
+ s_and_b32 ttmp2, s_save_state_priv, SQ_WAVE_STATE_PRIV_HALT_MASK
+ s_cbranch_scc0 L_NOT_HALTED
+
+L_HALTED:
+ // Host trap may occur while wave is halted.
+ s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+L_CHECK_SAVE:
+ s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK
+ s_cbranch_scc1 L_SAVE
+
+ // Wave is halted but neither host trap nor SAVECTX is raised.
+ // Caused by instruction fetch memory violation.
+ // Spin wait until context saved to prevent interrupt storm.
+ s_sleep 0x10
+ s_getreg_b32 s_save_excp_flag_priv, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+ s_branch L_CHECK_SAVE
+
+L_NOT_HALTED:
+ // Let second-level handle non-SAVECTX exception or trap.
+ // Any concurrent SAVECTX will be handled upon re-entry once halted.
+
+ // Check non-maskable exceptions. memory_violation, illegal_instruction
+ // and xnack_error exceptions always cause the wave to enter the trap
+ // handler.
+ s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_NON_MASKABLE_EXCP_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+ // Check for maskable exceptions in trapsts.excp and trapsts.excp_hi.
+ // Maskable exceptions only cause the wave to enter the trap handler if
+ // their respective bit in mode.excp_en is set.
+ s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
+ s_and_b32 ttmp3, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_ADDR_WATCH_MASK
+ s_cbranch_scc0 L_NOT_ADDR_WATCH
+ s_or_b32 ttmp2, ttmp2, SQ_WAVE_TRAP_CTRL_ADDR_WATCH_MASK
+
+L_NOT_ADDR_WATCH:
+ s_getreg_b32 ttmp3, hwreg(HW_REG_WAVE_TRAP_CTRL)
+ s_and_b32 ttmp2, ttmp3, ttmp2
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+L_CHECK_TRAP_ID:
+ // Check trap_id != 0
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+
+#if SINGLE_STEP_MISSED_WORKAROUND
+ // Prioritize single step exception over context save.
+ // Second-level trap will halt wave and RFE, re-entering for SAVECTX.
+ // WAVE_TRAP_CTRL is already in ttmp3.
+ s_and_b32 ttmp3, ttmp3, SQ_WAVE_TRAP_CTRL_TRAP_AFTER_INST_MASK
+ s_cbranch_scc1 L_FETCH_2ND_TRAP
+#endif
+
+ s_and_b32 ttmp2, s_save_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_MASK
+ s_cbranch_scc1 L_SAVE
+
+L_FETCH_2ND_TRAP:
+ // Read second-level TBA/TMA from first-level TMA and jump if available.
+ // ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
+ // ttmp12 holds SQ_WAVE_STATUS
+ s_sendmsg_rtn_b64 [ttmp14, ttmp15], sendmsg(MSG_RTN_GET_TMA)
+ s_wait_idle
+ s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+
+ s_bitcmp1_b32 ttmp15, 0xF
+ s_cbranch_scc0 L_NO_SIGN_EXTEND_TMA
+ s_or_b32 ttmp15, ttmp15, 0xFFFF0000
+L_NO_SIGN_EXTEND_TMA:
+
+ s_load_dword ttmp2, [ttmp14, ttmp15], 0x10 scope:SCOPE_SYS // debug trap enabled flag
+ s_wait_idle
+ s_lshl_b32 ttmp2, ttmp2, TTMP11_DEBUG_TRAP_ENABLED_SHIFT
+ s_andn2_b32 ttmp11, ttmp11, TTMP11_DEBUG_TRAP_ENABLED_MASK
+ s_or_b32 ttmp11, ttmp11, ttmp2
+
+ s_load_dwordx2 [ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 scope:SCOPE_SYS // second-level TBA
+ s_wait_idle
+ s_load_dwordx2 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 scope:SCOPE_SYS // second-level TMA
+ s_wait_idle
+
+ s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
+ s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set
+ s_setpc_b64 [ttmp2, ttmp3] // jump to second-level trap handler
+
+L_NO_NEXT_TRAP:
+ // If not caused by trap then halt wave to prevent re-entry.
+ s_and_b32 ttmp2, s_save_pc_hi, S_SAVE_PC_HI_TRAP_ID_MASK
+ s_cbranch_scc1 L_TRAP_CASE
+
+ // Host trap will not cause trap re-entry.
+ s_getreg_b32 ttmp2, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+ s_and_b32 ttmp2, ttmp2, SQ_WAVE_EXCP_FLAG_PRIV_HOST_TRAP_MASK
+ s_cbranch_scc1 L_EXIT_TRAP
+ s_or_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_HALT_MASK
+
+ // If the PC points to S_ENDPGM then context save will fail if STATE_PRIV.HALT is set.
+ // Rewind the PC to prevent this from occurring.
+ s_sub_u32 ttmp0, ttmp0, 0x8
+ s_subb_u32 ttmp1, ttmp1, 0x0
+
+ s_branch L_EXIT_TRAP
+
+L_TRAP_CASE:
+ // Advance past trap instruction to prevent re-entry.
+ s_add_u32 ttmp0, ttmp0, 0x4
+ s_addc_u32 ttmp1, ttmp1, 0x0
+
+L_EXIT_TRAP:
+ s_and_b32 ttmp1, ttmp1, 0xFFFF
+
+ // Restore SQ_WAVE_STATUS.
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+
+ // STATE_PRIV.BARRIER_COMPLETE may have changed since we read it.
+ // Only restore fields which the trap handler changes.
+ s_lshr_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_SCC_SHIFT
+ s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV, SQ_WAVE_STATE_PRIV_SCC_SHIFT, \
+ SQ_WAVE_STATE_PRIV_POISON_ERR_SHIFT - SQ_WAVE_STATE_PRIV_SCC_SHIFT + 1), s_save_state_priv
+
+ s_rfe_b64 [ttmp0, ttmp1]
+
+L_SAVE:
+ // If VGPRs have been deallocated then terminate the wavefront.
+ // It has no remaining program to run and cannot save without VGPRs.
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
+ s_bitcmp1_b32 s_save_tmp, SQ_WAVE_STATUS_NO_VGPRS_SHIFT
+ s_cbranch_scc0 L_HAVE_VGPRS
+ s_endpgm
+L_HAVE_VGPRS:
+
+ s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
+ s_mov_b32 s_save_tmp, 0
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_SAVE_CONTEXT_SHIFT, 1), s_save_tmp //clear saveCtx bit
+
+ /* inform SPI the readiness and wait for SPI's go signal */
+ s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI
+ s_mov_b32 s_save_exec_hi, exec_hi
+ s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive
+
+ s_sendmsg_rtn_b64 [exec_lo, exec_hi], sendmsg(MSG_RTN_SAVE_WAVE)
+ s_wait_idle
+
+ // Save first_wave flag so we can clear high bits of save address.
+ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK
+ s_lshl_b32 s_save_tmp, s_save_tmp, (S_SAVE_PC_HI_FIRST_WAVE_SHIFT - S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT)
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+
+ // Trap temporaries must be saved via VGPR but all VGPRs are in use.
+ // There is no ttmp space to hold the resource constant for VGPR save.
+ // Save v0 by itself since it requires only two SGPRs.
+ s_mov_b32 s_save_ttmps_lo, exec_lo
+ s_and_b32 s_save_ttmps_hi, exec_hi, 0xFFFF
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+ global_store_dword_addtid v0, [s_save_ttmps_lo, s_save_ttmps_hi] scope:SCOPE_SYS
+ v_mov_b32 v0, 0x0
+ s_mov_b32 exec_lo, s_save_ttmps_lo
+ s_mov_b32 exec_hi, s_save_ttmps_hi
+
+ // Save trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
+ // ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
+ get_wave_size2(s_save_ttmps_hi)
+ get_vgpr_size_bytes(s_save_ttmps_lo, s_save_ttmps_hi)
+ get_svgpr_size_bytes(s_save_ttmps_hi)
+ s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_ttmps_hi
+ s_and_b32 s_save_ttmps_hi, s_save_spi_init_hi, 0xFFFF
+ s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, get_sgpr_size_bytes()
+ s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_spi_init_lo
+ s_addc_u32 s_save_ttmps_hi, s_save_ttmps_hi, 0x0
+
+ v_writelane_b32 v0, ttmp4, 0x4
+ v_writelane_b32 v0, ttmp5, 0x5
+ v_writelane_b32 v0, ttmp6, 0x6
+ v_writelane_b32 v0, ttmp7, 0x7
+ v_writelane_b32 v0, ttmp8, 0x8
+ v_writelane_b32 v0, ttmp9, 0x9
+ v_writelane_b32 v0, ttmp10, 0xA
+ v_writelane_b32 v0, ttmp11, 0xB
+ v_writelane_b32 v0, ttmp13, 0xD
+ v_writelane_b32 v0, exec_lo, 0xE
+ v_writelane_b32 v0, exec_hi, 0xF
+
+ s_mov_b32 exec_lo, 0x3FFF
+ s_mov_b32 exec_hi, 0x0
+ global_store_dword_addtid v0, [s_save_ttmps_lo, s_save_ttmps_hi] offset:0x40 scope:SCOPE_SYS
+ v_readlane_b32 ttmp14, v0, 0xE
+ v_readlane_b32 ttmp15, v0, 0xF
+ s_mov_b32 exec_lo, ttmp14
+ s_mov_b32 exec_hi, ttmp15
+
+ /* setup Resource Contants */
+ s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo
+ s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
+ s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
+ s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC
+
+ s_mov_b32 s_save_m0, m0
+
+ /* global mem offset */
+ s_mov_b32 s_save_mem_offset, 0x0
+ get_wave_size2(s_wave_size)
+
+ /* save first 4 VGPRs, needed for SGPR save */
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SAVE_4VGPR_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_SAVE_4VGPR_WAVE32
+L_ENABLE_SAVE_4VGPR_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+ s_branch L_SAVE_4VGPR_WAVE64
+L_SAVE_4VGPR_WAVE32:
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR Allocated in 4-GPR granularity
+
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*3
+ s_branch L_SAVE_HWREG
+
+L_SAVE_4VGPR_WAVE64:
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR Allocated in 4-GPR granularity
+
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*3
+
+ /* save HW registers */
+
+L_SAVE_HWREG:
+ // HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
+ get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
+ get_svgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes()
+
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ v_mov_b32 v0, 0x0 //Offset[31:0] from buffer resource
+ v_mov_b32 v1, 0x0 //Offset[63:32] from buffer resource
+ v_mov_b32 v2, 0x0 //Set of SGPRs for TCP store
+ s_mov_b32 m0, 0x0 //Next lane of v2 to write to
+
+ // Ensure no further changes to barrier or LDS state.
+ // STATE_PRIV.BARRIER_COMPLETE may change up to this point.
+ s_barrier_signal -2
+ s_barrier_wait -2
+
+ // Re-read final state of BARRIER_COMPLETE field for save.
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATE_PRIV)
+ s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
+ s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
+ s_or_b32 s_save_state_priv, s_save_state_priv, s_save_tmp
+
+ write_hwreg_to_v2(s_save_m0)
+ write_hwreg_to_v2(s_save_pc_lo)
+ s_andn2_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
+ write_hwreg_to_v2(s_save_tmp)
+ write_hwreg_to_v2(s_save_exec_lo)
+ write_hwreg_to_v2(s_save_exec_hi)
+ write_hwreg_to_v2(s_save_state_priv)
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
+ write_hwreg_to_v2(s_save_tmp)
+
+ write_hwreg_to_v2(s_save_xnack_mask)
+
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_MODE)
+ write_hwreg_to_v2(s_save_m0)
+
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO)
+ write_hwreg_to_v2(s_save_m0)
+
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI)
+ write_hwreg_to_v2(s_save_m0)
+
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
+ write_hwreg_to_v2(s_save_m0)
+
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL)
+ write_hwreg_to_v2(s_save_m0)
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
+ write_hwreg_to_v2(s_save_tmp)
+
+ s_get_barrier_state s_save_tmp, -1
+ s_wait_kmcnt (0)
+ write_hwreg_to_v2(s_save_tmp)
+
+ // Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
+ s_mov_b32 exec_lo, 0xFFFF
+ s_mov_b32 exec_hi, 0x0
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+
+ // Write SGPRs with 32 VGPR lanes. This works in wave32 and wave64 mode.
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+
+ /* save SGPRs */
+ // Save SGPR before LDS save, then the s0 to s4 can be used during LDS save...
+
+ // SGPR SR memory offset : size(VGPR)+size(SVGPR)
+ get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
+ get_svgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ s_mov_b32 ttmp13, 0x0 //next VGPR lane to copy SGPR into
+
+ s_mov_b32 m0, 0x0 //SGPR initial index value =0
+ s_nop 0x0 //Manually inserted wait states
+L_SAVE_SGPR_LOOP:
+ // SGPR is allocated in 16 SGPR granularity
+ s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0]
+ s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0]
+ s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0]
+ s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0]
+ s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0]
+ s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0]
+ s_movrels_b64 s12, s12 //s12 = s[12+m0], s13 = s[13+m0]
+ s_movrels_b64 s14, s14 //s14 = s[14+m0], s15 = s[15+m0]
+
+ write_16sgpr_to_v2(s0)
+
+ s_cmp_eq_u32 ttmp13, 0x20 //have 32 VGPR lanes filled?
+ s_cbranch_scc0 L_SAVE_SGPR_SKIP_TCP_STORE
+
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 0x80
+ s_mov_b32 ttmp13, 0x0
+ v_mov_b32 v2, 0x0
+L_SAVE_SGPR_SKIP_TCP_STORE:
+
+ s_add_u32 m0, m0, 16 //next sgpr index
+ s_cmp_lt_u32 m0, 96 //scc = (m0 < first 96 SGPR) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_SGPR_LOOP //first 96 SGPR save is complete?
+
+ //save the rest 12 SGPR
+ s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0]
+ s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0]
+ s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0]
+ s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0]
+ s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0]
+ s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0]
+ write_12sgpr_to_v2(s0)
+
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+
+ /* save LDS */
+
+L_SAVE_LDS:
+ // Change EXEC to all threads...
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SAVE_LDS_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_SAVE_LDS_NORMAL
+L_ENABLE_SAVE_LDS_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_SAVE_LDS_NORMAL:
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
+ s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero?
+ s_cbranch_scc0 L_SAVE_LDS_DONE //no lds used? jump to L_SAVE_DONE
+
+ s_and_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
+ s_cbranch_scc0 L_SAVE_LDS_DONE
+
+ // first wave do LDS save;
+
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, SQ_WAVE_LDS_ALLOC_GRANULARITY
+ s_mov_b32 s_save_buf_rsrc2, s_save_alloc_size //NUM_RECORDS in bytes
+
+ // LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG)
+ //
+ get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
+ get_svgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes()
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
+
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ //load 0~63*4(byte address) to vgpr v0
+ v_mbcnt_lo_u32_b32 v0, -1, 0
+ v_mbcnt_hi_u32_b32 v0, -1, v0
+ v_mul_u32_u24 v0, 4, v0
+
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_mov_b32 m0, 0x0
+ s_cbranch_scc1 L_SAVE_LDS_W64
+
+L_SAVE_LDS_W32:
+ s_mov_b32 s3, 128
+ s_nop 0
+ s_nop 0
+ s_nop 0
+L_SAVE_LDS_LOOP_W32:
+ ds_read_b32 v1, v0
+ s_wait_idle
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+
+ s_add_u32 m0, m0, s3 //every buffer_store_lds does 128 bytes
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s3
+ v_add_nc_u32 v0, v0, 128 //mem offset increased by 128 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_LDS_LOOP_W32 //LDS save is complete?
+
+ s_branch L_SAVE_LDS_DONE
+
+L_SAVE_LDS_W64:
+ s_mov_b32 s3, 256
+ s_nop 0
+ s_nop 0
+ s_nop 0
+L_SAVE_LDS_LOOP_W64:
+ ds_read_b32 v1, v0
+ s_wait_idle
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+
+ s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s3
+ v_add_nc_u32 v0, v0, 256 //mem offset increased by 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_LDS_LOOP_W64 //LDS save is complete?
+
+L_SAVE_LDS_DONE:
+ /* save VGPRs - set the Rest VGPRs */
+L_SAVE_VGPR:
+ // VGPR SR memory offset: 0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SAVE_VGPR_EXEC_HI
+ s_mov_b32 s_save_mem_offset, (0+128*4) // for the rest VGPRs
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_SAVE_VGPR_NORMAL
+L_ENABLE_SAVE_VGPR_EXEC_HI:
+ s_mov_b32 s_save_mem_offset, (0+256*4) // for the rest VGPRs
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_SAVE_VGPR_NORMAL:
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
+ //determine it is wave32 or wave64
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_SAVE_VGPR_WAVE64
+
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR Allocated in 4-GPR granularity
+
+ // VGPR store using dw burst
+ s_mov_b32 m0, 0x4 //VGPR initial index value =4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc0 L_SAVE_VGPR_END
+
+L_SAVE_VGPR_W32_LOOP:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ v_movrels_b32 v1, v1 //v1 = v[1+m0]
+ v_movrels_b32 v2, v2 //v2 = v[2+m0]
+ v_movrels_b32 v3, v3 //v3 = v[3+m0]
+
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:128*3
+
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 128*4 //every buffer_store_dword does 128 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_VGPR_W32_LOOP //VGPR save is complete?
+
+ s_branch L_SAVE_VGPR_END
+
+L_SAVE_VGPR_WAVE64:
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR store using dw burst
+ s_mov_b32 m0, 0x4 //VGPR initial index value =4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc0 L_SAVE_SHARED_VGPR
+
+L_SAVE_VGPR_W64_LOOP:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ v_movrels_b32 v1, v1 //v1 = v[1+m0]
+ v_movrels_b32 v2, v2 //v2 = v[2+m0]
+ v_movrels_b32 v3, v3 //v3 = v[3+m0]
+
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS offset:256*3
+
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_VGPR_W64_LOOP //VGPR save is complete?
+
+L_SAVE_SHARED_VGPR:
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
+ s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
+ s_cbranch_scc0 L_SAVE_VGPR_END //no shared_vgpr used? jump to L_SAVE_LDS
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value)
+ //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
+ //save shared_vgpr will start from the index of m0
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, m0
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+ s_mov_b32 exec_hi, 0x00000000
+
+L_SAVE_SHARED_VGPR_WAVE64_LOOP:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
+ s_add_u32 m0, m0, 1 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 128
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_SHARED_VGPR_WAVE64_LOOP //SHARED_VGPR save is complete?
+
+L_SAVE_VGPR_END:
+ s_branch L_END_PGM
+
+L_RESTORE:
+ /* Setup Resource Contants */
+ s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo
+ s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
+ s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes)
+ s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
+
+ // Save s_restore_spi_init_hi for later use.
+ s_mov_b32 s_restore_spi_init_hi_save, s_restore_spi_init_hi
+
+ //determine it is wave32 or wave64
+ get_wave_size2(s_restore_size)
+
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
+ s_cbranch_scc0 L_RESTORE_VGPR
+
+ /* restore LDS */
+L_RESTORE_LDS:
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_RESTORE_LDS_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_RESTORE_LDS_NORMAL
+L_ENABLE_RESTORE_LDS_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_RESTORE_LDS_NORMAL:
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
+ s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //lds_size is zero?
+ s_cbranch_scc0 L_RESTORE_VGPR //no lds used? jump to L_RESTORE_VGPR
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, SQ_WAVE_LDS_ALLOC_GRANULARITY
+ s_mov_b32 s_restore_buf_rsrc2, s_restore_alloc_size //NUM_RECORDS in bytes
+
+ // LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG)
+ //
+ get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
+ get_svgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes()
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_mov_b32 m0, 0x0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64
+
+L_RESTORE_LDS_LOOP_W32:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
+ s_wait_idle
+ ds_store_addtid_b32 v0
+ s_add_u32 m0, m0, 128 // 128 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //mem offset increased by 128DW
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP_W32 //LDS restore is complete?
+ s_branch L_RESTORE_VGPR
+
+L_RESTORE_LDS_LOOP_W64:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset
+ s_wait_idle
+ ds_store_addtid_b32 v0
+ s_add_u32 m0, m0, 256 // 256 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //mem offset increased by 256DW
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64 //LDS restore is complete?
+
+ /* restore VGPRs */
+L_RESTORE_VGPR:
+ // VGPR SR memory offset : 0
+ s_mov_b32 s_restore_mem_offset, 0x0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_RESTORE_VGPR_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_RESTORE_VGPR_NORMAL
+L_ENABLE_RESTORE_VGPR_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_RESTORE_VGPR_NORMAL:
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
+ //determine it is wave32 or wave64
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_RESTORE_VGPR_WAVE64
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR load using dw burst
+ s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128*4
+ s_mov_b32 m0, 4 //VGPR initial index value = 4
+ s_cmp_lt_u32 m0, s_restore_alloc_size
+ s_cbranch_scc0 L_RESTORE_SGPR
+
+L_RESTORE_VGPR_WAVE32_LOOP:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:128*3
+ s_wait_idle
+ v_movreld_b32 v0, v0 //v[0+m0] = v0
+ v_movreld_b32 v1, v1
+ v_movreld_b32 v2, v2
+ v_movreld_b32 v3, v3
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128*4 //every buffer_load_dword does 128 bytes
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_VGPR_WAVE32_LOOP //VGPR restore (except v0) is complete?
+
+ /* VGPR restore on v0 */
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:128*3
+ s_wait_idle
+
+ s_branch L_RESTORE_SGPR
+
+L_RESTORE_VGPR_WAVE64:
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR load using dw burst
+ s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v4, v0 will be the last
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+ s_mov_b32 m0, 4 //VGPR initial index value = 4
+ s_cmp_lt_u32 m0, s_restore_alloc_size
+ s_cbranch_scc0 L_RESTORE_SHARED_VGPR
+
+L_RESTORE_VGPR_WAVE64_LOOP:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS offset:256*3
+ s_wait_idle
+ v_movreld_b32 v0, v0 //v[0+m0] = v0
+ v_movreld_b32 v1, v1
+ v_movreld_b32 v2, v2
+ v_movreld_b32 v3, v3
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 //every buffer_load_dword does 256 bytes
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete?
+
+L_RESTORE_SHARED_VGPR:
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) //shared_vgpr_size
+ s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
+ s_cbranch_scc0 L_RESTORE_V0 //no shared_vgpr used?
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value)
+ //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
+ //restore shared_vgpr will start from the index of m0
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, m0
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+ s_mov_b32 exec_hi, 0x00000000
+L_RESTORE_SHARED_VGPR_WAVE64_LOOP:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset scope:SCOPE_SYS
+ s_wait_idle
+ v_movreld_b32 v0, v0 //v[0+m0] = v0
+ s_add_u32 m0, m0, 1 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_SHARED_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete?
+
+ s_mov_b32 exec_hi, 0xFFFFFFFF //restore back exec_hi before restoring V0!!
+
+ /* VGPR restore on v0 */
+L_RESTORE_V0:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save scope:SCOPE_SYS offset:256*3
+ s_wait_idle
+
+ /* restore SGPRs */
+ //will be 2+8+16*6
+ // SGPR SR memory offset : size(VGPR)+size(SVGPR)
+L_RESTORE_SGPR:
+ get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
+ get_svgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
+ s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 20*4 //s108~s127 is not saved
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ s_mov_b32 m0, s_sgpr_save_num
+
+ read_4sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_sub_u32 m0, m0, 4 // Restore from S[0] to S[104]
+ s_nop 0 // hazard SALU M0=> S_MOVREL
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+
+ read_8sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_sub_u32 m0, m0, 8 // Restore from S[0] to S[96]
+ s_nop 0 // hazard SALU M0=> S_MOVREL
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+ s_movreld_b64 s4, s4
+ s_movreld_b64 s6, s6
+
+ L_RESTORE_SGPR_LOOP:
+ read_16sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_sub_u32 m0, m0, 16 // Restore from S[n] to S[0]
+ s_nop 0 // hazard SALU M0=> S_MOVREL
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+ s_movreld_b64 s4, s4
+ s_movreld_b64 s6, s6
+ s_movreld_b64 s8, s8
+ s_movreld_b64 s10, s10
+ s_movreld_b64 s12, s12
+ s_movreld_b64 s14, s14
+
+ s_cmp_eq_u32 m0, 0 //scc = (m0 < s_sgpr_save_num) ? 1 : 0
+ s_cbranch_scc0 L_RESTORE_SGPR_LOOP
+
+ // s_barrier with STATE_PRIV.TRAP_AFTER_INST=1, STATUS.PRIV=1 incorrectly asserts debug exception.
+ // Clear DEBUG_EN before and restore MODE after the barrier.
+ s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE), 0
+
+ /* restore HW registers */
+L_RESTORE_HWREG:
+ // HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
+ get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
+ get_svgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // Restore s_restore_spi_init_hi before the saved value gets clobbered.
+ s_mov_b32 s_restore_spi_init_hi, s_restore_spi_init_hi_save
+
+ read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_state_priv, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_excp_flag_priv, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_xnack_mask, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_setreg_b32 hwreg(HW_REG_WAVE_SCRATCH_BASE_LO), s_restore_flat_scratch
+
+ read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_setreg_b32 hwreg(HW_REG_WAVE_SCRATCH_BASE_HI), s_restore_flat_scratch
+
+ read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_USER), s_restore_tmp
+
+ read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+ s_setreg_b32 hwreg(HW_REG_WAVE_TRAP_CTRL), s_restore_tmp
+
+ // Only the first wave needs to restore the workgroup barrier.
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
+ s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
+
+ // Skip over WAVE_STATUS, since there is no state to restore from it
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 4
+
+ read_hwreg_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_wait_idle
+
+ s_bitcmp1_b32 s_restore_tmp, BARRIER_STATE_VALID_OFFSET
+ s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
+
+ // extract the saved signal count from s_restore_tmp
+ s_lshr_b32 s_restore_tmp, s_restore_tmp, BARRIER_STATE_SIGNAL_OFFSET
+
+ // We need to call s_barrier_signal repeatedly to restore the signal
+ // count of the work group barrier. The member count is already
+ // initialized with the number of waves in the work group.
+L_BARRIER_RESTORE_LOOP:
+ s_and_b32 s_restore_tmp, s_restore_tmp, s_restore_tmp
+ s_cbranch_scc0 L_SKIP_BARRIER_RESTORE
+ s_barrier_signal -1
+ s_add_i32 s_restore_tmp, s_restore_tmp, -1
+ s_branch L_BARRIER_RESTORE_LOOP
+
+L_SKIP_BARRIER_RESTORE:
+
+ s_mov_b32 m0, s_restore_m0
+ s_mov_b32 exec_lo, s_restore_exec_lo
+ s_mov_b32 exec_hi, s_restore_exec_hi
+
+ // EXCP_FLAG_PRIV.SAVE_CONTEXT and HOST_TRAP may have changed.
+ // Only restore the other fields to avoid clobbering them.
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, 0, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_1_SIZE), s_restore_excp_flag_priv
+ s_lshr_b32 s_restore_excp_flag_priv, s_restore_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SIZE), s_restore_excp_flag_priv
+ s_lshr_b32 s_restore_excp_flag_priv, s_restore_excp_flag_priv, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT - SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_2_SHIFT
+ s_setreg_b32 hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SHIFT, SQ_WAVE_EXCP_FLAG_PRIV_RESTORE_PART_3_SIZE), s_restore_excp_flag_priv
+
+ s_setreg_b32 hwreg(HW_REG_WAVE_MODE), s_restore_mode
+
+ // Restore trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
+ // ttmp SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)+0x40
+ get_vgpr_size_bytes(s_restore_ttmps_lo, s_restore_size)
+ get_svgpr_size_bytes(s_restore_ttmps_hi)
+ s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_ttmps_hi
+ s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, get_sgpr_size_bytes()
+ s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_buf_rsrc0
+ s_addc_u32 s_restore_ttmps_hi, s_restore_buf_rsrc1, 0x0
+ s_and_b32 s_restore_ttmps_hi, s_restore_ttmps_hi, 0xFFFF
+ s_load_dwordx4 [ttmp4, ttmp5, ttmp6, ttmp7], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x50 scope:SCOPE_SYS
+ s_load_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x60 scope:SCOPE_SYS
+ s_load_dword ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x74 scope:SCOPE_SYS
+ s_wait_idle
+
+ s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+
+ s_setreg_b32 hwreg(HW_REG_WAVE_STATE_PRIV), s_restore_state_priv // SCC is included, which is changed by previous salu
+
+ // Make barrier and LDS state visible to all waves in the group.
+ // STATE_PRIV.BARRIER_COMPLETE may change after this point.
+ s_barrier_signal -2
+ s_barrier_wait -2
+
+ s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
+
+L_END_PGM:
+ s_endpgm_saved
+end
+
+function write_hwreg_to_v2(s)
+ // Copy into VGPR for later TCP store.
+ v_writelane_b32 v2, s, m0
+ s_add_u32 m0, m0, 0x1
+end
+
+
+function write_16sgpr_to_v2(s)
+ // Copy into VGPR for later TCP store.
+ for var sgpr_idx = 0; sgpr_idx < 16; sgpr_idx ++
+ v_writelane_b32 v2, s[sgpr_idx], ttmp13
+ s_add_u32 ttmp13, ttmp13, 0x1
+ end
+end
+
+function write_12sgpr_to_v2(s)
+ // Copy into VGPR for later TCP store.
+ for var sgpr_idx = 0; sgpr_idx < 12; sgpr_idx ++
+ v_writelane_b32 v2, s[sgpr_idx], ttmp13
+ s_add_u32 ttmp13, ttmp13, 0x1
+ end
+end
+
+function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
+ s_buffer_load_dword s, s_rsrc, s_mem_offset scope:SCOPE_SYS
+ s_add_u32 s_mem_offset, s_mem_offset, 4
+end
+
+function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*16
+ s_buffer_load_dwordx16 s, s_rsrc, s_mem_offset scope:SCOPE_SYS
+end
+
+function read_8sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*8
+ s_buffer_load_dwordx8 s, s_rsrc, s_mem_offset scope:SCOPE_SYS
+end
+
+function read_4sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*4
+ s_buffer_load_dwordx4 s, s_rsrc, s_mem_offset scope:SCOPE_SYS
+end
+
+function get_vgpr_size_bytes(s_vgpr_size_byte, s_size)
+ s_getreg_b32 s_vgpr_size_byte, hwreg(HW_REG_WAVE_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
+ s_add_u32 s_vgpr_size_byte, s_vgpr_size_byte, 1
+ s_bitcmp1_b32 s_size, S_WAVE_SIZE
+ s_cbranch_scc1 L_ENABLE_SHIFT_W64
+ s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+7) //Number of VGPRs = (vgpr_size + 1) * 4 * 32 * 4 (non-zero value)
+ s_branch L_SHIFT_DONE
+L_ENABLE_SHIFT_W64:
+ s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+8) //Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4 (non-zero value)
+L_SHIFT_DONE:
+end
+
+function get_svgpr_size_bytes(s_svgpr_size_byte)
+ s_getreg_b32 s_svgpr_size_byte, hwreg(HW_REG_WAVE_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
+ s_lshl_b32 s_svgpr_size_byte, s_svgpr_size_byte, (3+7)
+end
+
+function get_sgpr_size_bytes
+ return 512
+end
+
+function get_hwreg_size_bytes
+ return 128
+end
+
+function get_wave_size2(s_reg)
+ s_getreg_b32 s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE)
+ s_lshl_b32 s_reg, s_reg, S_WAVE_SIZE
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index bb26338204f4..6869e07a2fff 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -37,17 +37,28 @@
* gc_9_4_3:
* cpp -DASIC_FAMILY=GC_9_4_3 cwsr_trap_handler_gfx9.asm -P -o gc_9_4_3.sp3
* sp3 gc_9_4_3.sp3 -hex gc_9_4_3.hex
+ *
+ * gc_9_5_0:
+ * cpp -DASIC_FAMILY=GC_9_5_0 cwsr_trap_handler_gfx9.asm -P -o gc_9_5_0.sp3
+ * sp3 gc_9_5_0.sp3 -hex gc_9_5_0.hex
*/
#define CHIP_VEGAM 18
#define CHIP_ARCTURUS 23
#define CHIP_ALDEBARAN 25
#define CHIP_GC_9_4_3 26
+#define CHIP_GC_9_5_0 27
var ACK_SQC_STORE = 1 //workaround for suspected SQC store bug causing incorrect stores under concurrency
var SAVE_AFTER_XNACK_ERROR = 1 //workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger
var SINGLE_STEP_MISSED_WORKAROUND = (ASIC_FAMILY <= CHIP_ALDEBARAN) //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
+#if ASIC_FAMILY < CHIP_GC_9_4_3
+#define VMEM_MODIFIERS slc:1 glc:1
+#else
+#define VMEM_MODIFIERS sc0:1 nt:1
+#endif
+
/**************************************************************************/
/* variables */
/**************************************************************************/
@@ -62,7 +73,13 @@ var SQ_WAVE_STATUS_ALLOW_REPLAY_MASK = 0x400000
var SQ_WAVE_STATUS_ECC_ERR_MASK = 0x20000
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+#if ASIC_FAMILY >= CHIP_GC_9_5_0
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 11
+var LDS_RESTORE_GRANULARITY_BYTES = 1280
+#else
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
+var LDS_RESTORE_GRANULARITY_BYTES = 512
+#endif
var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 6
var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE = 3 //FIXME sq.blk still has 4 bits at this time while SQ programming guide has 3 bits
var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT = 24
@@ -430,7 +447,9 @@ L_SAVE:
s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) //MODE
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
-
+ // Clear VSKIP state now that MODE.VSKIP has been saved.
+ // If user shader set it then vector instructions would be skipped.
+ s_setvskip 0,0
/* the first wave in the threadgroup */
s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK // extract fisrt wave bit
@@ -557,12 +576,21 @@ if SAVE_AFTER_XNACK_ERROR
v_lshlrev_b32 v2, 2, v3
L_SAVE_LDS_LOOP_SQC:
+#if ASIC_FAMILY < CHIP_GC_9_5_0
ds_read2_b32 v[0:1], v2 offset0:0 offset1:0x40
s_waitcnt lgkmcnt(0)
-
write_vgprs_to_mem_with_sqc(v0, 2, s_save_buf_rsrc0, s_save_mem_offset)
v_add_u32 v2, 0x200, v2
+#else
+ // gfx950 needs to save in multiple of 256 bytes.
+ ds_read_b32 v0, v2
+ s_waitcnt lgkmcnt(0)
+ write_vgprs_to_mem_with_sqc(v0, 1, s_save_buf_rsrc0, s_save_mem_offset)
+
+ v_add_u32 v2, 0x100, v2
+#endif
+
v_cmp_lt_u32 vcc[0:1], v2, s_save_alloc_size
s_cbranch_vccnz L_SAVE_LDS_LOOP_SQC
@@ -581,11 +609,14 @@ end
L_SAVE_LDS_LOOP_VECTOR:
ds_read_b64 v[0:1], v2 //x =LDS[a], byte address
s_waitcnt lgkmcnt(0)
- buffer_store_dwordx2 v[0:1], v2, s_save_buf_rsrc0, s_save_mem_offset offen:1 glc:1 slc:1
+ buffer_store_dwordx2 v[0:1], v2, s_save_buf_rsrc0, s_save_mem_offset VMEM_MODIFIERS offen:1
// s_waitcnt vmcnt(0)
// v_add_u32 v2, vcc[0:1], v2, v3
v_add_u32 v2, v2, v3
v_cmp_lt_u32 vcc[0:1], v2, s_save_alloc_size
+#if ASIC_FAMILY >= CHIP_GC_9_5_0
+ s_mov_b64 exec, vcc
+#endif
s_cbranch_vccnz L_SAVE_LDS_LOOP_VECTOR
// restore rsrc3
@@ -748,8 +779,13 @@ L_RESTORE:
L_RESTORE_LDS_LOOP:
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:256 // second 64DW
- s_add_u32 m0, m0, 256*2 // 128 DW
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*2 //mem offset increased by 128DW
+#if ASIC_FAMILY >= CHIP_GC_9_5_0
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:512 // third 64DW
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:768 // forth 64DW
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:1024 // fifth 64DW
+#endif
+ s_add_u32 m0, m0, LDS_RESTORE_GRANULARITY_BYTES // 128/320 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, LDS_RESTORE_GRANULARITY_BYTES //mem offset increased by 128/320 DW
s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
s_cbranch_scc1 L_RESTORE_LDS_LOOP //LDS restore is complete?
@@ -979,17 +1015,17 @@ L_TCP_STORE_CHECK_DONE:
end
function write_4vgprs_to_mem(s_rsrc, s_mem_offset)
- buffer_store_dword v0, v0, s_rsrc, s_mem_offset slc:1 glc:1
- buffer_store_dword v1, v0, s_rsrc, s_mem_offset slc:1 glc:1 offset:256
- buffer_store_dword v2, v0, s_rsrc, s_mem_offset slc:1 glc:1 offset:256*2
- buffer_store_dword v3, v0, s_rsrc, s_mem_offset slc:1 glc:1 offset:256*3
+ buffer_store_dword v0, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS
+ buffer_store_dword v1, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS offset:256
+ buffer_store_dword v2, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS offset:256*2
+ buffer_store_dword v3, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS offset:256*3
end
function read_4vgprs_from_mem(s_rsrc, s_mem_offset)
- buffer_load_dword v0, v0, s_rsrc, s_mem_offset slc:1 glc:1
- buffer_load_dword v1, v0, s_rsrc, s_mem_offset slc:1 glc:1 offset:256
- buffer_load_dword v2, v0, s_rsrc, s_mem_offset slc:1 glc:1 offset:256*2
- buffer_load_dword v3, v0, s_rsrc, s_mem_offset slc:1 glc:1 offset:256*3
+ buffer_load_dword v0, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS
+ buffer_load_dword v1, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS offset:256
+ buffer_load_dword v2, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS offset:256*2
+ buffer_load_dword v3, v0, s_rsrc, s_mem_offset VMEM_MODIFIERS offset:256*3
s_waitcnt vmcnt(0)
end
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 9044bdb38cf4..065d87841459 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -365,7 +365,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
p->pasid,
dev->id);
- err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id,
+ err = pqm_create_queue(&p->pqm, dev, &q_properties, &queue_id,
NULL, NULL, NULL, &doorbell_offset_in_process);
if (err != 0)
goto err_create_queue;
@@ -1148,7 +1148,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
size >>= 1;
- WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size));
+ atomic64_add(PAGE_ALIGN(size), &pdd->vram_usage);
}
mutex_unlock(&p->mutex);
@@ -1219,7 +1219,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
kfd_process_device_remove_obj_handle(
pdd, GET_IDR_HANDLE(args->handle));
- WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
+ atomic64_sub(size, &pdd->vram_usage);
err_unlock:
err_pdd:
@@ -2347,7 +2347,7 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
bo_bucket->restored_offset = offset;
/* Update the VRAM usage count */
- WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size);
+ atomic64_add(bo_bucket->size, &pdd->vram_usage);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 48caecf7e72e..693469c18c60 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -28,6 +28,7 @@
#include "kfd_topology.h"
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_xgmi.h"
/* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
* GPU processor ID are expressed with Bit[31]=1.
@@ -1422,6 +1423,7 @@ err:
static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
+ bool cache_line_size_missing,
struct kfd_gpu_cache_info *pcache_info)
{
struct amdgpu_device *adev = kdev->adev;
@@ -1436,6 +1438,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_tcp_per_wpg / 2;
pcache_info[i].cache_line_size = adev->gfx.config.gc_tcp_cache_line_size;
+ if (cache_line_size_missing && !pcache_info[i].cache_line_size)
+ pcache_info[i].cache_line_size = 128;
i++;
}
/* Scalar L1 Instruction Cache per SQC */
@@ -1448,6 +1452,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2;
pcache_info[i].cache_line_size = adev->gfx.config.gc_instruction_cache_line_size;
+ if (cache_line_size_missing && !pcache_info[i].cache_line_size)
+ pcache_info[i].cache_line_size = 128;
i++;
}
/* Scalar L1 Data Cache per SQC */
@@ -1459,6 +1465,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_sqc_per_wgp * 2;
pcache_info[i].cache_line_size = adev->gfx.config.gc_scalar_data_cache_line_size;
+ if (cache_line_size_missing && !pcache_info[i].cache_line_size)
+ pcache_info[i].cache_line_size = 64;
i++;
}
/* GL1 Data Cache per SA */
@@ -1471,7 +1479,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
- pcache_info[i].cache_line_size = 0;
+ if (cache_line_size_missing)
+ pcache_info[i].cache_line_size = 128;
i++;
}
/* L2 Data Cache per GPU (Total Tex Cache) */
@@ -1483,6 +1492,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
pcache_info[i].cache_line_size = adev->gfx.config.gc_tcc_cache_line_size;
+ if (cache_line_size_missing && !pcache_info[i].cache_line_size)
+ pcache_info[i].cache_line_size = 128;
i++;
}
/* L3 Data Cache per GPU */
@@ -1493,7 +1504,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
- pcache_info[i].cache_line_size = 0;
+ pcache_info[i].cache_line_size = 64;
i++;
}
return i;
@@ -1509,6 +1520,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
if (adev->gfx.config.gc_tcp_size_per_cu) {
pcache_info[i].cache_size = adev->gfx.config.gc_tcp_size_per_cu;
pcache_info[i].cache_level = 1;
+ /* Cacheline size not available in IP discovery for gc943,gc944 */
+ pcache_info[i].cache_line_size = 128;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
@@ -1520,6 +1533,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
pcache_info[i].cache_size =
adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
pcache_info[i].cache_level = 1;
+ pcache_info[i].cache_line_size = 64;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
@@ -1530,6 +1544,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
if (adev->gfx.config.gc_l1_data_cache_size_per_sqc) {
pcache_info[i].cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
pcache_info[i].cache_level = 1;
+ pcache_info[i].cache_line_size = 64;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
@@ -1540,6 +1555,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
if (adev->gfx.config.gc_tcc_size) {
pcache_info[i].cache_size = adev->gfx.config.gc_tcc_size;
pcache_info[i].cache_level = 2;
+ pcache_info[i].cache_line_size = 128;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
@@ -1550,6 +1566,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
if (adev->gmc.mall_size) {
pcache_info[i].cache_size = adev->gmc.mall_size / 1024;
pcache_info[i].cache_level = 3;
+ pcache_info[i].cache_line_size = 64;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
@@ -1562,6 +1579,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info)
{
int num_of_cache_types = 0;
+ bool cache_line_size_missing = false;
switch (kdev->adev->asic_type) {
case CHIP_KAVERI:
@@ -1621,6 +1639,7 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
num_of_cache_types =
kfd_fill_gpu_cache_info_from_gfx_config_v2(kdev->kfd,
*pcache_info);
@@ -1685,10 +1704,17 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
case IP_VERSION(11, 5, 2):
+ /* Cacheline size not available in IP discovery for gc11.
+ * kfd_fill_gpu_cache_info_from_gfx_config to hard code it
+ */
+ cache_line_size_missing = true;
+ fallthrough;
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
num_of_cache_types =
- kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, *pcache_info);
+ kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd,
+ cache_line_size_missing,
+ *pcache_info);
break;
default:
*pcache_info = dummy_cache_info;
@@ -2329,6 +2355,8 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
continue;
if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id)
continue;
+ if (!amdgpu_xgmi_get_is_sharing_enabled(kdev->adev, peer_dev->gpu->adev))
+ continue;
sub_type_hdr = (typeof(sub_type_hdr))(
(char *)sub_type_hdr +
sizeof(struct crat_subtype_iolink));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
index 312dfa84f29f..a8abc3091801 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
@@ -350,10 +350,27 @@ int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en)
{
uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode;
uint32_t flags = pdd->process->dbg_flags;
+ struct amdgpu_device *adev = pdd->dev->adev;
+ int r;
if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
return 0;
+ if (!pdd->proc_ctx_cpu_ptr) {
+ r = amdgpu_amdkfd_alloc_gtt_mem(adev,
+ AMDGPU_MES_PROC_CTX_SIZE,
+ &pdd->proc_ctx_bo,
+ &pdd->proc_ctx_gpu_addr,
+ &pdd->proc_ctx_cpu_ptr,
+ false);
+ if (r) {
+ dev_err(adev->dev,
+ "failed to allocate process context bo\n");
+ return r;
+ }
+ memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
+ }
+
return amdgpu_mes_set_shader_debugger(pdd->dev->adev, pdd->proc_ctx_gpu_addr, spi_dbg_cntl,
pdd->watch_points, flags, sq_trap_en);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
index 924d0fd85dfb..27aa1a5b120f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h
@@ -79,6 +79,7 @@ static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev)
return (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0) ||
KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0));
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index fad1c8f2bc83..a29374c86405 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -85,6 +85,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
case IP_VERSION(4, 4, 0):/* ALDEBARAN */
case IP_VERSION(4, 4, 2):
case IP_VERSION(4, 4, 5):
+ case IP_VERSION(4, 4, 4):
case IP_VERSION(5, 0, 0):/* NAVI10 */
case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
case IP_VERSION(5, 0, 2):/* NAVI14 */
@@ -152,6 +153,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
break;
case IP_VERSION(9, 4, 3): /* GC 9.4.3 */
case IP_VERSION(9, 4, 4): /* GC 9.4.4 */
+ case IP_VERSION(9, 5, 0): /* GC 9.5.0 */
kfd->device_info.event_interrupt_class =
&event_interrupt_class_v9_4_3;
break;
@@ -235,6 +237,9 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
*/
kfd->device_info.needs_pci_atomics = true;
kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0;
+ } else if (gc_version < IP_VERSION(13, 0, 0)) {
+ kfd->device_info.needs_pci_atomics = true;
+ kfd->device_info.no_atomic_fw_version = 2090;
} else {
kfd->device_info.needs_pci_atomics = true;
}
@@ -353,6 +358,10 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
gfx_target_version = 90402;
f2g = &gc_9_4_3_kfd2kgd;
break;
+ case IP_VERSION(9, 5, 0):
+ gfx_target_version = 90500;
+ f2g = &gc_9_4_3_kfd2kgd;
+ break;
/* Navi10 */
case IP_VERSION(10, 1, 10):
gfx_target_version = 100100;
@@ -512,6 +521,10 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
> KFD_CWSR_TMA_OFFSET);
kfd->cwsr_isa = cwsr_trap_gfx9_4_3_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_4_3_hex);
+ } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 5, 0)) {
+ BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_5_0_hex) > PAGE_SIZE);
+ kfd->cwsr_isa = cwsr_trap_gfx9_5_0_hex;
+ kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_5_0_hex);
} else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex)
> KFD_CWSR_TMA_OFFSET);
@@ -534,7 +547,8 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
kfd->cwsr_isa = cwsr_trap_gfx11_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
} else {
- BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex) > PAGE_SIZE);
+ BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex)
+ > KFD_CWSR_TMA_OFFSET);
kfd->cwsr_isa = cwsr_trap_gfx12_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx12_hex);
}
@@ -563,6 +577,7 @@ static int kfd_gws_init(struct kfd_node *node)
&& kfd->mec2_fw_version >= 0x28) ||
(KFD_GC_VERSION(node) == IP_VERSION(9, 4, 3) ||
KFD_GC_VERSION(node) == IP_VERSION(9, 4, 4)) ||
+ (KFD_GC_VERSION(node) == IP_VERSION(9, 5, 0)) ||
(KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0)
&& KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0)
&& kfd->mec2_fw_version >= 0x6b) ||
@@ -634,6 +649,14 @@ static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes)
struct kfd_node *knode;
unsigned int i;
+ /*
+ * flush_work ensures that there are no outstanding
+ * work-queue items that will access interrupt_ring. New work items
+ * can't be created because we stopped interrupt handling above.
+ */
+ flush_workqueue(kfd->ih_wq);
+ destroy_workqueue(kfd->ih_wq);
+
for (i = 0; i < num_nodes; i++) {
knode = kfd->nodes[i];
device_queue_manager_uninit(knode->dqm);
@@ -729,14 +752,14 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1;
- /* For GFX9.4.3, we need special handling for VMIDs depending on
- * partition mode.
+ /* For multi-partition capable GPUs, we need special handling for VMIDs
+ * depending on partition mode.
* In CPX mode, the VMID range needs to be shared between XCDs.
* Additionally, there are 13 VMIDs (3-15) available for KFD. To
* divide them equally, we change starting VMID to 4 and not use
* VMID 3.
- * If the VMID range changes for GFX9.4.3, then this code MUST be
- * revisited.
+ * If the VMID range changes for multi-partition capable GPUs, then
+ * this code MUST be revisited.
*/
if (kfd->adev->xcp_mgr) {
partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr,
@@ -801,14 +824,12 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
/*
- * For GFX9.4.3, the KFD abstracts all partitions within a socket as
- * xGMI connected in the topology so assign a unique hive id per
- * device based on the pci device location if device is in PCIe mode.
+ * For multi-partition capable GPUs, the KFD abstracts all partitions
+ * within a socket as xGMI connected in the topology so assign a unique
+ * hive id per device based on the pci device location if device is in
+ * PCIe mode.
*/
- if (!kfd->hive_id &&
- (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 4)) &&
- kfd->num_nodes > 1)
+ if (!kfd->hive_id && kfd->num_nodes > 1)
kfd->hive_id = pci_dev_id(kfd->adev->pdev);
kfd->noretry = kfd->adev->gmc.noretry;
@@ -846,12 +867,11 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
KFD_XCP_MEMORY_SIZE(node->adev, node->node_id) >> 20);
}
- if ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 4)) &&
- partition_mode == AMDGPU_CPX_PARTITION_MODE &&
+ if (partition_mode == AMDGPU_CPX_PARTITION_MODE &&
kfd->num_nodes != 1) {
- /* For GFX9.4.3 and CPX mode, first XCD gets VMID range
- * 4-9 and second XCD gets VMID range 10-15.
+ /* For multi-partition capable GPUs and CPX mode, first
+ * XCD gets VMID range 4-9 and second XCD gets VMID
+ * range 10-15.
*/
node->vm_info.first_vmid_kfd = (i%2 == 0) ?
@@ -875,8 +895,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
amdgpu_amdkfd_get_local_mem_info(kfd->adev,
&node->local_mem_info, node->xcp);
- if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 4))
+ if (kfd->adev->xcp_mgr)
kfd_setup_interrupt_bitmap(node, i);
/* Initialize the KFD node */
@@ -1055,21 +1074,6 @@ static int kfd_resume(struct kfd_node *node)
return err;
}
-static inline void kfd_queue_work(struct workqueue_struct *wq,
- struct work_struct *work)
-{
- int cpu, new_cpu;
-
- cpu = new_cpu = smp_processor_id();
- do {
- new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
- if (cpu_to_node(new_cpu) == numa_node_id())
- break;
- } while (cpu != new_cpu);
-
- queue_work_on(new_cpu, wq, work);
-}
-
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
@@ -1095,7 +1099,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
patched_ihre, &is_patched)
&& enqueue_ih_ring_entry(node,
is_patched ? patched_ihre : ih_ring_entry)) {
- kfd_queue_work(node->ih_wq, &node->interrupt_work);
+ queue_work(node->kfd->ih_wq, &node->interrupt_work);
spin_unlock_irqrestore(&node->interrupt_lock, flags);
return;
}
@@ -1392,6 +1396,13 @@ void kfd_dec_compute_active(struct kfd_node *node)
WARN_ONCE(count < 0, "Compute profile ref. count error");
}
+static bool kfd_compute_active(struct kfd_node *node)
+{
+ if (atomic_read(&node->kfd->compute_profile))
+ return true;
+ return false;
+}
+
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
{
/*
@@ -1485,6 +1496,91 @@ int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
return node->dqm->ops.halt(node->dqm);
}
+bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
+{
+ struct kfd_node *node;
+
+ if (!kfd->init_complete)
+ return false;
+
+ if (node_id >= kfd->num_nodes) {
+ dev_warn(kfd->adev->dev, "Invalid node ID: %u exceeds %u\n",
+ node_id, kfd->num_nodes - 1);
+ return false;
+ }
+
+ node = kfd->nodes[node_id];
+
+ return kfd_compute_active(node);
+}
+
+/**
+ * kgd2kfd_vmfault_fast_path() - KFD vm page fault interrupt handling fast path for gmc v9
+ * @adev: amdgpu device
+ * @entry: vm fault interrupt vector
+ * @retry_fault: if this is retry fault
+ *
+ * retry fault -
+ * with CAM enabled, adev primary ring
+ * | gmc_v9_0_process_interrupt()
+ * adev soft_ring
+ * | gmc_v9_0_process_interrupt() worker failed to recover page fault
+ * KFD node ih_fifo
+ * | KFD interrupt_wq worker
+ * kfd_signal_vm_fault_event
+ *
+ * without CAM, adev primary ring1
+ * | gmc_v9_0_process_interrupt worker failed to recvoer page fault
+ * KFD node ih_fifo
+ * | KFD interrupt_wq worker
+ * kfd_signal_vm_fault_event
+ *
+ * no-retry fault -
+ * adev primary ring
+ * | gmc_v9_0_process_interrupt()
+ * KFD node ih_fifo
+ * | KFD interrupt_wq worker
+ * kfd_signal_vm_fault_event
+ *
+ * fast path - After kfd_signal_vm_fault_event, gmc_v9_0_process_interrupt drop the page fault
+ * of same process, don't copy interrupt to KFD node ih_fifo.
+ * With gdb debugger enabled, need convert the retry fault to no-retry fault for
+ * debugger, cannot use the fast path.
+ *
+ * Return:
+ * true - use the fast path to handle this fault
+ * false - use normal path to handle it
+ */
+bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
+ bool retry_fault)
+{
+ struct kfd_process *p;
+ u32 cam_index;
+
+ if (entry->ih == &adev->irq.ih_soft || entry->ih == &adev->irq.ih1) {
+ p = kfd_lookup_process_by_pasid(entry->pasid);
+ if (!p)
+ return true;
+
+ if (p->gpu_page_fault && !p->debug_trap_enabled) {
+ if (retry_fault && adev->irq.retry_cam_enabled) {
+ cam_index = entry->src_data[2] & 0x3ff;
+ WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
+ }
+
+ kfd_unref_process(p);
+ return true;
+ }
+
+ /*
+ * This is the first page fault, set flag and then signal user space
+ */
+ p->gpu_page_fault = true;
+ kfd_unref_process(p);
+ }
+ return false;
+}
+
#if defined(CONFIG_DEBUG_FS)
/* This function will send a package to HIQ to hang the HWS
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 648f40091aa3..d4593374e7a1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -202,9 +202,26 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
int r, queue_type;
uint64_t wptr_addr_off;
+ if (!dqm->sched_running || dqm->sched_halt)
+ return 0;
if (!down_read_trylock(&adev->reset_domain->sem))
return -EIO;
+ if (!pdd->proc_ctx_cpu_ptr) {
+ r = amdgpu_amdkfd_alloc_gtt_mem(adev,
+ AMDGPU_MES_PROC_CTX_SIZE,
+ &pdd->proc_ctx_bo,
+ &pdd->proc_ctx_gpu_addr,
+ &pdd->proc_ctx_cpu_ptr,
+ false);
+ if (r) {
+ dev_err(adev->dev,
+ "failed to allocate process context bo\n");
+ return r;
+ }
+ memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
+ }
+
memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
queue_input.process_id = qpd->pqm->process->pasid;
queue_input.page_table_base_addr = qpd->page_table_base;
@@ -270,6 +287,8 @@ static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
int r;
struct mes_remove_queue_input queue_input;
+ if (!dqm->sched_running || dqm->sched_halt)
+ return 0;
if (!down_read_trylock(&adev->reset_domain->sem))
return -EIO;
@@ -292,7 +311,7 @@ static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
return r;
}
-static int remove_all_queues_mes(struct device_queue_manager *dqm)
+static int remove_all_kfd_queues_mes(struct device_queue_manager *dqm)
{
struct device_process_node *cur;
struct device *dev = dqm->dev->adev->dev;
@@ -319,6 +338,33 @@ static int remove_all_queues_mes(struct device_queue_manager *dqm)
return retval;
}
+static int add_all_kfd_queues_mes(struct device_queue_manager *dqm)
+{
+ struct device_process_node *cur;
+ struct device *dev = dqm->dev->adev->dev;
+ struct qcm_process_device *qpd;
+ struct queue *q;
+ int retval = 0;
+
+ list_for_each_entry(cur, &dqm->queues, list) {
+ qpd = cur->qpd;
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (!q->properties.is_active)
+ continue;
+ retval = add_queue_mes(dqm, q, qpd);
+ if (retval) {
+ dev_err(dev, "%s: Failed to add queue %d for dev %d",
+ __func__,
+ q->properties.queue_id,
+ dqm->dev->id);
+ return retval;
+ }
+ }
+ }
+
+ return retval;
+}
+
static int suspend_all_queues_mes(struct device_queue_manager *dqm)
{
struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
@@ -1742,7 +1788,7 @@ static int halt_cpsch(struct device_queue_manager *dqm)
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
USE_DEFAULT_GRACE_PERIOD, false);
else
- ret = remove_all_queues_mes(dqm);
+ ret = remove_all_kfd_queues_mes(dqm);
}
dqm->sched_halt = true;
dqm_unlock(dqm);
@@ -1768,6 +1814,9 @@ static int unhalt_cpsch(struct device_queue_manager *dqm)
ret = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
0, USE_DEFAULT_GRACE_PERIOD);
+ else
+ ret = add_all_kfd_queues_mes(dqm);
+
dqm_unlock(dqm);
return ret;
@@ -1867,7 +1916,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
if (!dqm->dev->kfd->shared_resources.enable_mes)
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
else
- remove_all_queues_mes(dqm);
+ remove_all_kfd_queues_mes(dqm);
dqm->sched_running = false;
@@ -2048,7 +2097,7 @@ int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm,
{
unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
struct device *dev = dqm->dev->adev->dev;
- uint64_t *fence_addr = dqm->fence_addr;
+ uint64_t *fence_addr = dqm->fence_addr;
while (*fence_addr != fence_value) {
/* Fatal err detected, this response won't come */
@@ -2254,6 +2303,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
goto out;
*dqm->fence_addr = KFD_FENCE_INIT;
+ mb();
pm_send_query_status(&dqm->packet_mgr, dqm->fence_gpu_addr,
KFD_FENCE_COMPLETED);
/* should be timed out */
@@ -2275,9 +2325,9 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
*/
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) {
+ while (halt_if_hws_hang)
+ schedule();
if (reset_queues_on_hws_hang(dqm)) {
- while (halt_if_hws_hang)
- schedule();
dqm->is_hws_hang = true;
kfd_hws_hang(dqm);
retval = -ETIME;
@@ -2338,6 +2388,9 @@ static int wait_on_destroy_queue(struct device_queue_manager *dqm,
q->process);
int ret = 0;
+ if (WARN_ON(!pdd))
+ return ret;
+
if (pdd->qpd.is_debug)
return ret;
@@ -3173,7 +3226,7 @@ struct copy_context_work_handler_workarea {
struct kfd_process *p;
};
-static void copy_context_work_handler (struct work_struct *work)
+static void copy_context_work_handler(struct work_struct *work)
{
struct copy_context_work_handler_workarea *workarea;
struct mqd_manager *mqd_mgr;
@@ -3200,6 +3253,9 @@ static void copy_context_work_handler (struct work_struct *work)
struct qcm_process_device *qpd = &pdd->qpd;
list_for_each_entry(q, &qpd->queues_list, list) {
+ if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE)
+ continue;
+
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
/* We ignore the return value from get_wave_state
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index 210bcc048f4c..67137e674f1d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -64,7 +64,8 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4))
+ KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 5, 0))
qpd->sh_mem_config |=
(1 << SH_MEM_CONFIG__F8_MODE__SHIFT);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index ea3792249209..d075f24e5f9f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -748,6 +748,16 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
uint64_t *slots = page_slots(p->signal_page);
uint32_t id;
+ /*
+ * If id is valid but slot is not signaled, GPU may signal the same event twice
+ * before driver have chance to process the first interrupt, then signal slot is
+ * auto-reset after set_event wakeup the user space, just drop the second event as
+ * the application only need wakeup once.
+ */
+ if ((valid_id_bits > 31 || (1U << valid_id_bits) >= KFD_SIGNAL_EVENT_LIMIT) &&
+ partial_id < KFD_SIGNAL_EVENT_LIMIT && slots[partial_id] == UNSIGNALED_EVENT_SLOT)
+ goto out_unlock;
+
if (valid_id_bits)
pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
partial_id, valid_id_bits);
@@ -776,6 +786,7 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
}
}
+out_unlock:
rcu_read_unlock();
kfd_unref_process(p);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index d46a13156ee9..0cb5c582ce7d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -184,6 +184,7 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
} else {
reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
}
+ amdgpu_ras_set_err_poison(dev->adev, AMDGPU_RAS_BLOCK__GFX);
break;
case SOC15_IH_CLIENTID_VMC:
case SOC15_IH_CLIENTID_VMC1:
@@ -213,6 +214,7 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
} else {
reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
}
+ amdgpu_ras_set_err_poison(dev->adev, AMDGPU_RAS_BLOCK__SDMA);
break;
default:
dev_warn(dev->adev->dev,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index 9b6b6e882593..783c2f5a04e4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -46,7 +46,7 @@
#include <linux/kfifo.h>
#include "kfd_priv.h"
-#define KFD_IH_NUM_ENTRIES 8192
+#define KFD_IH_NUM_ENTRIES 16384
static void interrupt_wq(struct work_struct *);
@@ -62,11 +62,14 @@ int kfd_interrupt_init(struct kfd_node *node)
return r;
}
- node->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
- if (unlikely(!node->ih_wq)) {
- kfifo_free(&node->ih_fifo);
- dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n");
- return -ENOMEM;
+ if (!node->kfd->ih_wq) {
+ node->kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI | WQ_UNBOUND,
+ node->kfd->num_nodes);
+ if (unlikely(!node->kfd->ih_wq)) {
+ kfifo_free(&node->ih_fifo);
+ dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n");
+ return -ENOMEM;
+ }
}
spin_lock_init(&node->interrupt_lock);
@@ -96,16 +99,6 @@ void kfd_interrupt_exit(struct kfd_node *node)
spin_lock_irqsave(&node->interrupt_lock, flags);
node->interrupts_active = false;
spin_unlock_irqrestore(&node->interrupt_lock, flags);
-
- /*
- * flush_work ensures that there are no outstanding
- * work-queue items that will access interrupt_ring. New work items
- * can't be created because we stopped interrupt handling above.
- */
- flush_workqueue(node->ih_wq);
-
- destroy_workqueue(node->ih_wq);
-
kfifo_free(&node->ih_fifo);
}
@@ -114,55 +107,48 @@ void kfd_interrupt_exit(struct kfd_node *node)
*/
bool enqueue_ih_ring_entry(struct kfd_node *node, const void *ih_ring_entry)
{
- int count;
-
- count = kfifo_in(&node->ih_fifo, ih_ring_entry,
- node->kfd->device_info.ih_ring_entry_size);
- if (count != node->kfd->device_info.ih_ring_entry_size) {
- dev_dbg_ratelimited(node->adev->dev,
- "Interrupt ring overflow, dropping interrupt %d\n",
- count);
+ if (kfifo_is_full(&node->ih_fifo)) {
+ dev_warn_ratelimited(node->adev->dev, "KFD node %d ih_fifo overflow\n",
+ node->node_id);
return false;
}
+ kfifo_in(&node->ih_fifo, ih_ring_entry, node->kfd->device_info.ih_ring_entry_size);
return true;
}
/*
* Assumption: single reader/writer. This function is not re-entrant
*/
-static bool dequeue_ih_ring_entry(struct kfd_node *node, void *ih_ring_entry)
+static bool dequeue_ih_ring_entry(struct kfd_node *node, u32 **ih_ring_entry)
{
int count;
- count = kfifo_out(&node->ih_fifo, ih_ring_entry,
- node->kfd->device_info.ih_ring_entry_size);
-
- WARN_ON(count && count != node->kfd->device_info.ih_ring_entry_size);
+ if (kfifo_is_empty(&node->ih_fifo))
+ return false;
+ count = kfifo_out_linear_ptr(&node->ih_fifo, ih_ring_entry,
+ node->kfd->device_info.ih_ring_entry_size);
+ WARN_ON(count != node->kfd->device_info.ih_ring_entry_size);
return count == node->kfd->device_info.ih_ring_entry_size;
}
static void interrupt_wq(struct work_struct *work)
{
- struct kfd_node *dev = container_of(work, struct kfd_node,
- interrupt_work);
- uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE];
+ struct kfd_node *dev = container_of(work, struct kfd_node, interrupt_work);
+ uint32_t *ih_ring_entry;
unsigned long start_jiffies = jiffies;
- if (dev->kfd->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) {
- dev_err_once(dev->adev->dev, "Ring entry too small\n");
- return;
- }
-
- while (dequeue_ih_ring_entry(dev, ih_ring_entry)) {
+ while (dequeue_ih_ring_entry(dev, &ih_ring_entry)) {
dev->kfd->device_info.event_interrupt_class->interrupt_wq(dev,
ih_ring_entry);
+ kfifo_skip_count(&dev->ih_fifo, dev->kfd->device_info.ih_ring_entry_size);
+
if (time_is_before_jiffies(start_jiffies + HZ)) {
/* If we spent more than a second processing signals,
* reschedule the worker to avoid soft-lockup warnings
*/
- queue_work(dev->ih_wq, &dev->interrupt_work);
+ queue_work(dev->kfd->ih_wq, &dev->interrupt_work);
break;
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 4843dcb9a5f7..2b0a830f5b29 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -125,7 +125,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
memset(kq->pq_kernel_addr, 0, queue_size);
memset(kq->rptr_kernel, 0, sizeof(*kq->rptr_kernel));
- memset(kq->wptr_kernel, 0, sizeof(*kq->wptr_kernel));
+ memset(kq->wptr_kernel, 0, dev->kfd->device_info.doorbell_size);
prop.queue_size = queue_size;
prop.is_interop = false;
@@ -306,12 +306,17 @@ int kq_submit_packet(struct kernel_queue *kq)
if (amdgpu_amdkfd_is_fed(kq->dev->adev))
return -EIO;
+ /* Make sure ring buffer is updated before wptr updated */
+ mb();
+
if (kq->dev->kfd->device_info.doorbell_size == 8) {
*kq->wptr64_kernel = kq->pending_wptr64;
+ mb(); /* Make sure wptr updated before ring doorbell */
write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
kq->pending_wptr64);
} else {
*kq->wptr_kernel = kq->pending_wptr;
+ mb(); /* Make sure wptr updated before ring doorbell */
write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
kq->pending_wptr);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 8ee3d07ffbdf..d05d199b5e44 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -278,10 +278,11 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
dma_addr_t *scratch, uint64_t ttm_res_offset)
{
- uint64_t npages = migrate->cpages;
+ uint64_t npages = migrate->npages;
struct amdgpu_device *adev = node->adev;
struct device *dev = adev->dev;
struct amdgpu_res_cursor cursor;
+ uint64_t mpages = 0;
dma_addr_t *src;
uint64_t *dst;
uint64_t i, j;
@@ -295,18 +296,20 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
- for (i = j = 0; i < npages; i++) {
+ for (i = j = 0; (i < npages) && (mpages < migrate->cpages); i++) {
struct page *spage;
- dst[i] = cursor.start + (j << PAGE_SHIFT);
- migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
- svm_migrate_get_vram_page(prange, migrate->dst[i]);
- migrate->dst[i] = migrate_pfn(migrate->dst[i]);
-
+ if (migrate->src[i] & MIGRATE_PFN_MIGRATE) {
+ dst[i] = cursor.start + (j << PAGE_SHIFT);
+ migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
+ svm_migrate_get_vram_page(prange, migrate->dst[i]);
+ migrate->dst[i] = migrate_pfn(migrate->dst[i]);
+ mpages++;
+ }
spage = migrate_pfn_to_page(migrate->src[i]);
if (spage && !is_zone_device_page(spage)) {
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
r = dma_mapping_error(dev, src[i]);
if (r) {
dev_err(dev, "%s: fail %d dma_map_page\n",
@@ -353,9 +356,12 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
out_free_vram_pages:
if (r) {
pr_debug("failed %d to copy memory to vram\n", r);
- while (i--) {
+ for (i = 0; i < npages && mpages; i++) {
+ if (!dst[i])
+ continue;
svm_migrate_put_vram_page(adev, dst[i]);
migrate->dst[i] = 0;
+ mpages--;
}
}
@@ -445,14 +451,13 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
mpages, cpages, migrate.npages);
- kfd_smi_event_migration_end(node, p->lead_thread->pid,
- start >> PAGE_SHIFT, end >> PAGE_SHIFT,
- 0, node->id, trigger);
-
svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
out_free:
kvfree(buf);
+ kfd_smi_event_migration_end(node, p->lead_thread->pid,
+ start >> PAGE_SHIFT, end >> PAGE_SHIFT,
+ 0, node->id, trigger, r);
out:
if (!r && mpages) {
pdd = svm_range_get_pdd_by_node(prange, node);
@@ -630,7 +635,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
goto out_oom;
}
- dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
r = dma_mapping_error(dev, dst[i]);
if (r) {
dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
@@ -751,14 +756,13 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
- kfd_smi_event_migration_end(node, p->lead_thread->pid,
- start >> PAGE_SHIFT, end >> PAGE_SHIFT,
- node->id, 0, trigger);
-
svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
out_free:
kvfree(buf);
+ kfd_smi_event_migration_end(node, p->lead_thread->pid,
+ start >> PAGE_SHIFT, end >> PAGE_SHIFT,
+ node->id, 0, trigger, r);
out:
if (!r && cpages) {
mpages = cpages - upages;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 84e8ea3a8a0c..ff417d5361c4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -78,7 +78,8 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
m->compute_static_thread_mgmt_se2 = se_mask[2];
m->compute_static_thread_mgmt_se3 = se_mask[3];
if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3) &&
- KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4)) {
+ KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4) &&
+ KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 5, 0)) {
m->compute_static_thread_mgmt_se4 = se_mask[4];
m->compute_static_thread_mgmt_se5 = se_mask[5];
m->compute_static_thread_mgmt_se6 = se_mask[6];
@@ -301,7 +302,8 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_ctx_save_control = 0;
if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3) &&
- KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4))
+ KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4) &&
+ KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 5, 0))
update_cu_mask(mm, mqd, minfo, 0);
set_priority(m, q);
@@ -885,7 +887,8 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)) {
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0)) {
mqd->init_mqd = init_mqd_v9_4_3;
mqd->load_mqd = load_mqd_v9_4_3;
mqd->update_mqd = update_mqd_v9_4_3;
@@ -909,8 +912,10 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
+ mqd->check_preemption_failed = check_preemption_failed;
if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)) {
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0)) {
mqd->init_mqd = init_mqd_hiq_v9_4_3;
mqd->load_mqd = hiq_load_mqd_kiq_v9_4_3;
mqd->destroy_mqd = destroy_hiq_mqd_v9_4_3;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 37930629edc5..4984b41cd372 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -28,6 +28,10 @@
#include "kfd_kernel_queue.h"
#include "kfd_priv.h"
+#define OVER_SUBSCRIPTION_PROCESS_COUNT (1 << 0)
+#define OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT (1 << 1)
+#define OVER_SUBSCRIPTION_GWS_QUEUE_COUNT (1 << 2)
+
static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
unsigned int buffer_size_bytes)
{
@@ -40,7 +44,7 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int *rlib_size,
- bool *over_subscription)
+ int *over_subscription)
{
unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
unsigned int map_queue_size;
@@ -58,17 +62,20 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
* hws_max_conc_proc has been done in
* kgd2kfd_device_init().
*/
- *over_subscription = false;
+ *over_subscription = 0;
if (node->max_proc_per_quantum > 1)
max_proc_per_quantum = node->max_proc_per_quantum;
- if ((process_count > max_proc_per_quantum) ||
- compute_queue_count > get_cp_queues_num(pm->dqm) ||
- gws_queue_count > 1) {
- *over_subscription = true;
+ if (process_count > max_proc_per_quantum)
+ *over_subscription |= OVER_SUBSCRIPTION_PROCESS_COUNT;
+ if (compute_queue_count > get_cp_queues_num(pm->dqm))
+ *over_subscription |= OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT;
+ if (gws_queue_count > 1)
+ *over_subscription |= OVER_SUBSCRIPTION_GWS_QUEUE_COUNT;
+
+ if (*over_subscription)
dev_dbg(dev, "Over subscribed runlist\n");
- }
map_queue_size = pm->pmf->map_queues_size;
/* calculate run list ib allocation size */
@@ -89,7 +96,7 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
unsigned int **rl_buffer,
uint64_t *rl_gpu_buffer,
unsigned int *rl_buffer_size,
- bool *is_over_subscription)
+ int *is_over_subscription)
{
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
@@ -134,7 +141,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
struct qcm_process_device *qpd;
struct queue *q;
struct kernel_queue *kq;
- bool is_over_subscription;
+ int is_over_subscription;
rl_wptr = retval = processes_mapped = 0;
@@ -213,15 +220,20 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
if (is_over_subscription) {
if (!pm->is_over_subscription)
- dev_warn(
- dev,
- "Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
+ dev_warn(dev, "Runlist is getting oversubscribed due to%s%s%s. Expect reduced ROCm performance.\n",
+ is_over_subscription & OVER_SUBSCRIPTION_PROCESS_COUNT ?
+ " too many processes." : "",
+ is_over_subscription & OVER_SUBSCRIPTION_COMPUTE_QUEUE_COUNT ?
+ " too many queues." : "",
+ is_over_subscription & OVER_SUBSCRIPTION_GWS_QUEUE_COUNT ?
+ " multiple processes using cooperative launch." : "");
+
retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
*rl_gpu_addr,
alloc_size_bytes / sizeof(uint32_t),
true);
}
- pm->is_over_subscription = is_over_subscription;
+ pm->is_over_subscription = !!is_over_subscription;
for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
pr_debug("0x%2X ", rl_buffer[i]);
@@ -248,7 +260,8 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
default:
if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2) ||
KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 4))
+ KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 5, 0))
pm->pmf = &kfd_aldebaran_pm_funcs;
else if (KFD_GC_VERSION(dqm->dev) >= IP_VERSION(9, 0, 1))
pm->pmf = &kfd_v9_pm_funcs;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d6530febabad..d8cd913aa772 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -32,7 +32,7 @@
#include <linux/atomic.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
-#include <linux/kfd_ioctl.h>
+#include <uapi/linux/kfd_ioctl.h>
#include <linux/idr.h>
#include <linux/kfifo.h>
#include <linux/seq_file.h>
@@ -207,7 +207,8 @@ enum cache_policy {
#define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\
((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) || \
- (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)))
+ (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)) || \
+ (KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0)))
struct kfd_node;
@@ -273,7 +274,6 @@ struct kfd_node {
/* Interrupts */
struct kfifo ih_fifo;
- struct workqueue_struct *ih_wq;
struct work_struct interrupt_work;
spinlock_t interrupt_lock;
@@ -366,6 +366,8 @@ struct kfd_dev {
struct kfd_node *nodes[MAX_KFD_NODES];
unsigned int num_nodes;
+ struct workqueue_struct *ih_wq;
+
/* Kernel doorbells for KFD device */
struct amdgpu_bo *doorbells;
@@ -775,7 +777,7 @@ struct kfd_process_device {
enum kfd_pdd_bound bound;
/* VRAM usage */
- uint64_t vram_usage;
+ atomic64_t vram_usage;
struct attribute attr_vram;
char vram_filename[MAX_SYSFS_FILENAME_LEN];
@@ -1002,6 +1004,9 @@ struct kfd_process {
struct semaphore runtime_enable_sema;
bool is_runtime_retry;
struct kfd_runtime_info runtime_info;
+
+ /* if gpu page fault sent to KFD */
+ bool gpu_page_fault;
};
#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
@@ -1150,7 +1155,8 @@ static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev,
uint32_t i;
if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) &&
- KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4))
+ KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4) &&
+ KFD_GC_VERSION(dev) != IP_VERSION(9, 5, 0))
return dev->nodes[0];
for (i = 0; i < dev->num_nodes; i++)
@@ -1347,7 +1353,6 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
void pqm_uninit(struct process_queue_manager *pqm);
int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_node *dev,
- struct file *f,
struct queue_properties *properties,
unsigned int *qid,
const struct kfd_criu_queue_priv_data *q_data,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index d07acf1b2f93..083f83c94531 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -271,11 +271,9 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
struct kfd_process *proc = NULL;
struct kfd_process_device *pdd = NULL;
int i;
- struct kfd_cu_occupancy cu_occupancy[AMDGPU_MAX_QUEUES];
+ struct kfd_cu_occupancy *cu_occupancy;
u32 queue_format;
- memset(cu_occupancy, 0x0, sizeof(cu_occupancy));
-
pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
dev = pdd->dev;
if (dev->kfd2kgd->get_cu_occupancy == NULL)
@@ -293,6 +291,10 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
wave_cnt = 0;
max_waves_per_cu = 0;
+ cu_occupancy = kcalloc(AMDGPU_MAX_QUEUES, sizeof(*cu_occupancy), GFP_KERNEL);
+ if (!cu_occupancy)
+ return -ENOMEM;
+
/*
* For GFX 9.4.3, fetch the CU occupancy from the first XCC in the partition.
* For AQL queues, because of cooperative dispatch we multiply the wave count
@@ -318,6 +320,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
/* Translate wave count to number of compute units */
cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
+ kfree(cu_occupancy);
return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
}
@@ -332,14 +335,14 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
} else if (strncmp(attr->name, "vram_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_vram);
- return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
+ return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage));
} else if (strncmp(attr->name, "sdma_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_sdma);
struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
- INIT_WORK(&sdma_activity_work_handler.sdma_activity_work,
- kfd_sdma_activity_worker);
+ INIT_WORK_ONSTACK(&sdma_activity_work_handler.sdma_activity_work,
+ kfd_sdma_activity_worker);
sdma_activity_work_handler.pdd = pdd;
sdma_activity_work_handler.sdma_activity_counter = 0;
@@ -347,6 +350,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
schedule_work(&sdma_activity_work_handler.sdma_activity_work);
flush_work(&sdma_activity_work_handler.sdma_activity_work);
+ destroy_work_on_stack(&sdma_activity_work_handler.sdma_activity_work);
return snprintf(buffer, PAGE_SIZE, "%llu\n",
(sdma_activity_work_handler.sdma_activity_counter)/
@@ -850,8 +854,10 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
goto out;
}
- /* A prior open of /dev/kfd could have already created the process. */
- process = find_process(thread, false);
+ /* A prior open of /dev/kfd could have already created the process.
+ * find_process will increase process kref in this case
+ */
+ process = find_process(thread, true);
if (process) {
pr_debug("Process already found\n");
} else {
@@ -899,8 +905,6 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
init_waitqueue_head(&process->wait_irq_drain);
}
out:
- if (!IS_ERR(process))
- kref_get(&process->ref);
mutex_unlock(&kfd_processes_mutex);
mmput(thread->mm);
@@ -1072,7 +1076,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
kfd_free_process_doorbells(pdd->dev->kfd, pdd);
- if (pdd->dev->kfd->shared_resources.enable_mes)
+ if (pdd->dev->kfd->shared_resources.enable_mes &&
+ pdd->proc_ctx_cpu_ptr)
amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
&pdd->proc_ctx_bo);
/*
@@ -1155,7 +1160,8 @@ static void kfd_process_wq_release(struct work_struct *work)
*/
synchronize_rcu();
ef = rcu_access_pointer(p->ef);
- dma_fence_signal(ef);
+ if (ef)
+ dma_fence_signal(ef);
kfd_process_remove_sysfs(p);
@@ -1186,10 +1192,8 @@ static void kfd_process_ref_release(struct kref *ref)
static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
{
- int idx = srcu_read_lock(&kfd_processes_srcu);
- struct kfd_process *p = find_process_by_mm(mm);
-
- srcu_read_unlock(&kfd_processes_srcu, idx);
+ /* This increments p->ref counter if kfd process p exists */
+ struct kfd_process *p = kfd_lookup_process_by_mm(mm);
return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
}
@@ -1606,7 +1610,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
struct kfd_process *p)
{
struct kfd_process_device *pdd = NULL;
- int retval = 0;
if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
return NULL;
@@ -1625,26 +1628,11 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
pdd->runtime_inuse = false;
- pdd->vram_usage = 0;
+ atomic64_set(&pdd->vram_usage, 0);
pdd->sdma_past_activity_counter = 0;
pdd->user_gpu_id = dev->id;
atomic64_set(&pdd->evict_duration_counter, 0);
- if (dev->kfd->shared_resources.enable_mes) {
- retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
- AMDGPU_MES_PROC_CTX_SIZE,
- &pdd->proc_ctx_bo,
- &pdd->proc_ctx_gpu_addr,
- &pdd->proc_ctx_cpu_ptr,
- false);
- if (retval) {
- dev_err(dev->adev->dev,
- "failed to allocate process context bo\n");
- goto err_free_pdd;
- }
- memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
- }
-
p->pdds[p->n_pdds++] = pdd;
if (kfd_dbg_is_per_vmid_supported(pdd->dev))
pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
@@ -1656,10 +1644,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
idr_init(&pdd->alloc_idr);
return pdd;
-
-err_free_pdd:
- kfree(pdd);
- return NULL;
}
/**
@@ -1702,12 +1686,15 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
&p->kgd_process_info,
- &ef);
+ p->ef ? NULL : &ef);
if (ret) {
dev_err(dev->adev->dev, "Failed to create process VM object\n");
return ret;
}
- RCU_INIT_POINTER(p->ef, ef);
+
+ if (!p->ef)
+ RCU_INIT_POINTER(p->ef, ef);
+
pdd->drm_priv = drm_file->private_data;
ret = kfd_process_device_reserve_ib_mem(pdd);
@@ -2141,10 +2128,11 @@ int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
irq_drain_fence[3] = pdd->process->pasid;
/*
- * For GFX 9.4.3, send the NodeId also in IH cookie DW[3]
+ * For GFX 9.4.3/9.5.0, send the NodeId also in IH cookie DW[3]
*/
if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 4)) {
+ KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 5, 0)) {
node_id = ffs(pdd->dev->interrupt_bitmap) - 1;
irq_drain_fence[3] |= node_id << 16;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 01b960b15274..bcddd989c7f3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -86,9 +86,12 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
if (pdd->already_dequeued)
return;
-
+ /* The MES context flush needs to filter out the case which the
+ * KFD process is created without setting up the MES context and
+ * queue for creating a compute queue.
+ */
dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
- if (dev->kfd->shared_resources.enable_mes &&
+ if (dev->kfd->shared_resources.enable_mes && !!pdd->proc_ctx_gpu_addr &&
down_read_trylock(&dev->adev->reset_domain->sem)) {
amdgpu_mes_flush_shader_debugger(dev->adev,
pdd->proc_ctx_gpu_addr);
@@ -131,8 +134,9 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
if (!gws && pdd->qpd.num_gws == 0)
return -EINVAL;
- if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) &&
- KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4) &&
+ if ((KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) &&
+ KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4) &&
+ KFD_GC_VERSION(dev) != IP_VERSION(9, 5, 0)) &&
!dev->kfd->shared_resources.enable_mes) {
if (gws)
ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
@@ -197,6 +201,7 @@ static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
if (pqn->q->gws) {
if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 4) &&
+ KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 5, 0) &&
!dev->kfd->shared_resources.enable_mes)
amdgpu_amdkfd_remove_gws_from_process(
pqm->process->kgd_process_info, pqn->q->gws);
@@ -212,13 +217,17 @@ static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
void pqm_uninit(struct process_queue_manager *pqm)
{
struct process_queue_node *pqn, *next;
- struct kfd_process_device *pdd;
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
if (pqn->q) {
- pdd = kfd_get_process_device_data(pqn->q->device, pqm->process);
- kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
- kfd_queue_release_buffers(pdd, &pqn->q->properties);
+ struct kfd_process_device *pdd = kfd_get_process_device_data(pqn->q->device,
+ pqm->process);
+ if (pdd) {
+ kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
+ kfd_queue_release_buffers(pdd, &pqn->q->properties);
+ } else {
+ WARN_ON(!pdd);
+ }
pqm_clean_queue_resource(pqm, pqn);
}
@@ -235,7 +244,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
static int init_user_queue(struct process_queue_manager *pqm,
struct kfd_node *dev, struct queue **q,
struct queue_properties *q_properties,
- struct file *f, unsigned int qid)
+ unsigned int qid)
{
int retval;
@@ -300,7 +309,6 @@ cleanup:
int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_node *dev,
- struct file *f,
struct queue_properties *properties,
unsigned int *qid,
const struct kfd_criu_queue_priv_data *q_data,
@@ -317,11 +325,12 @@ int pqm_create_queue(struct process_queue_manager *pqm,
unsigned int max_queues = 127; /* HWS limit */
/*
- * On GFX 9.4.3, increase the number of queues that
- * can be created to 255. No HWS limit on GFX 9.4.3.
+ * On GFX 9.4.3/9.5.0, increase the number of queues that
+ * can be created to 255. No HWS limit on GFX 9.4.3/9.5.0.
*/
if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4))
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0))
max_queues = 255;
q = NULL;
@@ -374,7 +383,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
* allocate_sdma_queue() in create_queue() has the
* corresponding check logic.
*/
- retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
+ retval = init_user_queue(pqm, dev, &q, properties, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
@@ -395,7 +404,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
}
- retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
+ retval = init_user_queue(pqm, dev, &q, properties, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
@@ -1029,8 +1038,7 @@ int kfd_criu_restore_queue(struct kfd_process *p,
print_queue_properties(&qp);
- ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, q_data, mqd, ctl_stack,
- NULL);
+ ret = pqm_create_queue(&p->pqm, pdd->dev, &qp, &queue_id, q_data, mqd, ctl_stack, NULL);
if (ret) {
pr_err("Failed to create new queue err:%d\n", ret);
goto exit;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index ad29634f8b44..ecccd7adbab4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -394,7 +394,8 @@ static u32 kfd_get_vgpr_size_per_cu(u32 gfxv)
if ((gfxv / 100 * 100) == 90400 || /* GFX_VERSION_AQUA_VANJARAM */
gfxv == 90010 || /* GFX_VERSION_ALDEBARAN */
- gfxv == 90008) /* GFX_VERSION_ARCTURUS */
+ gfxv == 90008 || /* GFX_VERSION_ARCTURUS */
+ gfxv == 90500)
vgpr_size = 0x80000;
else if (gfxv == 110000 || /* GFX_VERSION_PLUM_BONITO */
gfxv == 110001 || /* GFX_VERSION_WHEAT_NAS */
@@ -405,9 +406,10 @@ static u32 kfd_get_vgpr_size_per_cu(u32 gfxv)
return vgpr_size;
}
-#define WG_CONTEXT_DATA_SIZE_PER_CU(gfxv) \
+#define WG_CONTEXT_DATA_SIZE_PER_CU(gfxv, props) \
(kfd_get_vgpr_size_per_cu(gfxv) + SGPR_SIZE_PER_CU +\
- LDS_SIZE_PER_CU + HWREG_SIZE_PER_CU)
+ (((gfxv) == 90500) ? (props->lds_size_in_kb << 10) : LDS_SIZE_PER_CU) +\
+ HWREG_SIZE_PER_CU)
#define CNTL_STACK_BYTES_PER_WAVE(gfxv) \
((gfxv) >= 100100 ? 12 : 8) /* GFX_VERSION_NAVI10*/
@@ -431,7 +433,7 @@ void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev)
min(cu_num * 40, props->array_count / props->simd_arrays_per_engine * 512)
: cu_num * 32;
- wg_data_size = ALIGN(cu_num * WG_CONTEXT_DATA_SIZE_PER_CU(gfxv), PAGE_SIZE);
+ wg_data_size = ALIGN(cu_num * WG_CONTEXT_DATA_SIZE_PER_CU(gfxv, props), PAGE_SIZE);
ctl_stack_size = wave_num * CNTL_STACK_BYTES_PER_WAVE(gfxv) + 8;
ctl_stack_size = ALIGN(SIZEOF_HSA_USER_CONTEXT_SAVE_AREA_HEADER + ctl_stack_size,
PAGE_SIZE);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index de8b9abf7afc..9b8169761ec5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -44,7 +44,7 @@ struct kfd_smi_client {
bool suser;
};
-#define MAX_KFIFO_SIZE 1024
+#define KFD_MAX_KFIFO_SIZE 8192
static __poll_t kfd_smi_ev_poll(struct file *, struct poll_table_struct *);
static ssize_t kfd_smi_ev_read(struct file *, char __user *, size_t, loff_t *);
@@ -86,7 +86,7 @@ static ssize_t kfd_smi_ev_read(struct file *filep, char __user *user,
struct kfd_smi_client *client = filep->private_data;
unsigned char *buf;
- size = min_t(size_t, size, MAX_KFIFO_SIZE);
+ size = min_t(size_t, size, KFD_MAX_KFIFO_SIZE);
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -292,12 +292,13 @@ void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid,
void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid,
unsigned long start, unsigned long end,
- uint32_t from, uint32_t to, uint32_t trigger)
+ uint32_t from, uint32_t to, uint32_t trigger,
+ int error_code)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_END,
KFD_EVENT_FMT_MIGRATE_END(
ktime_get_boottime_ns(), pid, start, end - start,
- from, to, trigger));
+ from, to, trigger, error_code));
}
void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid,
@@ -354,7 +355,7 @@ int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd)
return -ENOMEM;
INIT_LIST_HEAD(&client->list);
- ret = kfifo_alloc(&client->fifo, MAX_KFIFO_SIZE, GFP_KERNEL);
+ ret = kfifo_alloc(&client->fifo, KFD_MAX_KFIFO_SIZE, GFP_KERNEL);
if (ret) {
kfree(client);
return ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
index 85010b8307f8..503bff13d815 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h
@@ -44,7 +44,8 @@ void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid,
uint32_t trigger);
void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid,
unsigned long start, unsigned long end,
- uint32_t from, uint32_t to, uint32_t trigger);
+ uint32_t from, uint32_t to, uint32_t trigger,
+ int error_code);
void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid,
uint32_t trigger);
void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 04e746923697..bd3e20d981e0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -405,6 +405,27 @@ static void svm_range_bo_release(struct kref *kref)
spin_lock(&svm_bo->list_lock);
}
spin_unlock(&svm_bo->list_lock);
+
+ if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
+ struct kfd_process_device *pdd;
+ struct kfd_process *p;
+ struct mm_struct *mm;
+
+ mm = svm_bo->eviction_fence->mm;
+ /*
+ * The forked child process takes svm_bo device pages ref, svm_bo could be
+ * released after parent process is gone.
+ */
+ p = kfd_lookup_process_by_mm(mm);
+ if (p) {
+ pdd = kfd_get_process_device_data(svm_bo->node, p);
+ if (pdd)
+ atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage);
+ kfd_unref_process(p);
+ }
+ mmput(mm);
+ }
+
if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
/* We're not in the eviction worker. Signal the fence. */
dma_fence_signal(&svm_bo->eviction_fence->base);
@@ -532,6 +553,7 @@ int
svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
bool clear)
{
+ struct kfd_process_device *pdd;
struct amdgpu_bo_param bp;
struct svm_range_bo *svm_bo;
struct amdgpu_bo_user *ubo;
@@ -623,6 +645,10 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
list_add(&prange->svm_bo_list, &svm_bo->range_list);
spin_unlock(&svm_bo->list_lock);
+ pdd = svm_range_get_pdd_by_node(prange, node);
+ if (pdd)
+ atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage);
+
return 0;
reserve_bo_failed:
@@ -1169,6 +1195,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
struct kfd_node *bo_node;
uint32_t flags = prange->flags;
uint32_t mapping_flags = 0;
+ uint32_t gc_ip_version = KFD_GC_VERSION(node);
uint64_t pte_flags;
bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
bool coherent = flags & (KFD_IOCTL_SVM_FLAG_COHERENT | KFD_IOCTL_SVM_FLAG_EXT_COHERENT);
@@ -1178,7 +1205,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
if (domain == SVM_RANGE_VRAM_DOMAIN)
bo_node = prange->svm_bo->node;
- switch (amdgpu_ip_version(node->adev, GC_HWIP, 0)) {
+ switch (gc_ip_version) {
case IP_VERSION(9, 4, 1):
if (domain == SVM_RANGE_VRAM_DOMAIN) {
if (bo_node == node) {
@@ -1215,8 +1242,10 @@ svm_range_get_pte_flags(struct kfd_node *node,
break;
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
if (ext_coherent)
- mtype_local = node->adev->rev_id ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_UC;
+ mtype_local = (gc_ip_version < IP_VERSION(9, 5, 0) && !node->adev->rev_id) ?
+ AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_CC;
else
mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
@@ -1231,9 +1260,13 @@ svm_range_get_pte_flags(struct kfd_node *node,
*/
else if (svm_nodes_in_same_hive(bo_node, node) && !ext_coherent)
mapping_flags |= AMDGPU_VM_MTYPE_NC;
- /* PCIe P2P or extended system scope coherence */
- else
+ /* PCIe P2P on GPUs pre-9.5.0 */
+ else if (gc_ip_version < IP_VERSION(9, 5, 0) &&
+ !svm_nodes_in_same_hive(bo_node, node))
mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ /* Other remote memory */
+ else
+ mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
/* system memory accessed by the APU */
} else if (node->adev->flags & AMD_IS_APU) {
/* On NUMA systems, locality is determined per-page
@@ -1245,7 +1278,10 @@ svm_range_get_pte_flags(struct kfd_node *node,
mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
/* system memory accessed by the dGPU */
} else {
- mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ if (gc_ip_version < IP_VERSION(9, 5, 0))
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ else
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
}
break;
case IP_VERSION(12, 0, 0):
@@ -1273,7 +1309,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
pte_flags = AMDGPU_PTE_VALID;
pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
- if (KFD_GC_VERSION(node) >= IP_VERSION(12, 0, 0))
+ if (gc_ip_version >= IP_VERSION(12, 0, 0))
pte_flags |= AMDGPU_PTE_IS_PTE;
pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
@@ -3085,8 +3121,6 @@ retry_write_locked:
start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start);
last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last);
if (prange->actual_loc != 0 || best_loc != 0) {
- migration = true;
-
if (best_loc) {
r = svm_migrate_to_vram(prange, best_loc, start, last,
mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
@@ -3109,7 +3143,9 @@ retry_write_locked:
if (r) {
pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
r, svms, start, last);
- goto out_unlock_range;
+ goto out_migrate_fail;
+ } else {
+ migration = true;
}
}
@@ -3119,6 +3155,7 @@ retry_write_locked:
pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
r, svms, start, last);
+out_migrate_fail:
kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
migration);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 3871591c9aec..ceb9fb475ef1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1714,7 +1714,8 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
pcache->cacheline_size = pcache_info[cache_type].cache_line_size;
if (KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 4))
+ KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 4) ||
+ KFD_GC_VERSION(knode) == IP_VERSION(9, 5, 0))
mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
else
mode = UNKNOWN_MEMORY_PARTITION_MODE;
@@ -1776,7 +1777,7 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct
struct amdgpu_cu_info *cu_info = &kdev->adev->gfx.cu_info;
struct amdgpu_gfx_config *gfx_info = &kdev->adev->gfx.config;
int gpu_processor_id;
- struct kfd_cache_properties *props_ext;
+ struct kfd_cache_properties *props_ext = NULL;
int num_of_entries = 0;
int num_of_cache_types = 0;
struct kfd_gpu_cache_info cache_info[KFD_MAX_CACHE_TYPES];
@@ -1998,6 +1999,8 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 4, 2))
dev->node_props.capability |=
HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
+
+ dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
} else {
dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index df17e79c45c7..abd3b6564373 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -7,20 +7,23 @@ menu "Display Engine Configuration"
config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
- depends on BROKEN || !CC_IS_CLANG || ARM64 || RISCV || SPARC64 || X86_64
+ depends on BROKEN || !CC_IS_CLANG || ARM64 || LOONGARCH || RISCV || SPARC64 || X86_64
+ select CEC_CORE
+ select CEC_NOTIFIER
select SND_HDA_COMPONENT if SND_HDA_CORE
# !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752
- select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && !(CC_IS_CLANG && (ARM64 || RISCV))
+ select DRM_AMD_DC_FP if ARCH_HAS_KERNEL_FPU_SUPPORT && !(CC_IS_CLANG && (ARM64 || LOONGARCH || RISCV))
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
Raven ASICs.
- calculate_bandwidth() is presently broken on all !(X86_64 || SPARC64 || ARM64)
- architectures built with Clang (all released versions), whereby the stack
- frame gets blown up to well over 5k. This would cause an immediate kernel
- panic on most architectures. We'll revert this when the following bug report
- has been resolved: https://github.com/llvm/llvm-project/issues/41896.
+ calculate_bandwidth() is presently broken on all !(X86_64 || SPARC64 ||
+ ARM64 || LOONGARCH || RISCV) architectures built with Clang (all released
+ versions), whereby the stack frame gets blown up to well over 5k. This
+ would cause an immediate kernel panic on most architectures. We'll revert
+ this when the following bug report has been resolved:
+ https://github.com/llvm/llvm-project/issues/41896.
config DRM_AMD_DC_FP
def_bool n
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 60c617fcc97e..ac3fd81fecef 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -93,10 +93,12 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
+#include <drm/drm_utils.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <media/cec-notifier.h>
#include <acpi/video.h>
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
@@ -320,18 +322,18 @@ static bool dm_is_idle(void *handle)
return true;
}
-static int dm_wait_for_idle(void *handle)
+static int dm_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* XXX todo */
return 0;
}
-static bool dm_check_soft_reset(void *handle)
+static bool dm_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
return false;
}
-static int dm_soft_reset(void *handle)
+static int dm_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* XXX todo */
return 0;
@@ -955,20 +957,20 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
}
}
-static int dm_set_clockgating_state(void *handle,
+static int dm_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int dm_set_powergating_state(void *handle,
+static int dm_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
}
/* Prototypes of private functions */
-static int dm_early_init(void *handle);
+static int dm_early_init(struct amdgpu_ip_block *ip_block);
/* Allocate memory for FBC compressed data */
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
@@ -1036,8 +1038,10 @@ static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
continue;
*enabled = true;
+ mutex_lock(&connector->eld_mutex);
ret = drm_eld_size(connector->eld);
memcpy(buf, connector->eld, min(max_bytes, ret));
+ mutex_unlock(&connector->eld_mutex);
break;
}
@@ -1307,6 +1311,29 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
adev->dm.dmcub_fw_version);
+ /* Keeping sanity checks off if
+ * DCN31 >= 4.0.59.0
+ * DCN314 >= 8.0.16.0
+ * Otherwise, turn on sanity checks
+ */
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ if (adev->dm.dmcub_fw_version &&
+ adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
+ adev->dm.dmcub_fw_version < DMUB_FW_VERSION(4, 0, 59))
+ adev->dm.dc->debug.sanity_checks = true;
+ break;
+ case IP_VERSION(3, 1, 4):
+ if (adev->dm.dmcub_fw_version &&
+ adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
+ adev->dm.dmcub_fw_version < DMUB_FW_VERSION(8, 0, 16))
+ adev->dm.dc->debug.sanity_checks = true;
+ break;
+ default:
+ break;
+ }
+
return 0;
}
@@ -1696,6 +1723,26 @@ dm_allocate_gpu_mem(
return da->cpu_ptr;
}
+void
+dm_free_gpu_mem(
+ struct amdgpu_device *adev,
+ enum dc_gpu_mem_alloc_type type,
+ void *pvMem)
+{
+ struct dal_allocation *da;
+
+ /* walk the da list in DM */
+ list_for_each_entry(da, &adev->dm.da_list, list) {
+ if (pvMem == da->cpu_ptr) {
+ amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
+ list_del(&da->list);
+ kfree(da);
+ break;
+ }
+ }
+
+}
+
static enum dmub_status
dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev,
enum dmub_gpint_command command_code,
@@ -1762,16 +1809,20 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
/* Send the chunk */
ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000);
if (ret != DMUB_STATUS_OK)
- /* No need to free bb here since it shall be done in dm_sw_fini() */
- return NULL;
+ goto free_bb;
}
/* Now ask DMUB to copy the bb */
ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000);
if (ret != DMUB_STATUS_OK)
- return NULL;
+ goto free_bb;
return bb;
+
+free_bb:
+ dm_free_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, (void *) bb);
+ return NULL;
+
}
static enum dmub_ips_disable_type dm_get_default_ips_mode(
@@ -1886,7 +1937,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
else
init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0);
} else {
- init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(2, 0, 3))
+ init_data.flags.gpu_vm_support = (amdgpu_sg_display == 1);
+ else
+ init_data.flags.gpu_vm_support =
+ (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
}
adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support;
@@ -1979,6 +2034,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
adev->dm.dc->debug.force_subvp_mclk_switch = true;
+ if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP)
+ adev->dm.dc->debug.force_disable_subvp = true;
+
if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) {
adev->dm.dc->debug.using_dml2 = true;
adev->dm.dc->debug.using_dml21 = true;
@@ -2101,9 +2159,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
- if (!adev->dm.secure_display_ctxs)
+ amdgpu_dm_crtc_secure_display_create_contexts(adev);
+ if (!adev->dm.secure_display_ctx.crtc_ctx)
DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
+
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1))
+ adev->dm.secure_display_ctx.support_mul_roi = true;
+
#endif
DRM_DEBUG_DRIVER("KMS initialized.\n");
@@ -2115,9 +2177,9 @@ error:
return -EINVAL;
}
-static int amdgpu_dm_early_fini(void *handle)
+static int amdgpu_dm_early_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_dm_audio_fini(adev);
@@ -2146,15 +2208,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
amdgpu_dm_destroy_drm_device(&adev->dm);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- if (adev->dm.secure_display_ctxs) {
+ if (adev->dm.secure_display_ctx.crtc_ctx) {
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->dm.secure_display_ctxs[i].crtc) {
- flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
- flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
+ if (adev->dm.secure_display_ctx.crtc_ctx[i].crtc) {
+ flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].notify_ta_work);
+ flush_work(&adev->dm.secure_display_ctx.crtc_ctx[i].forward_roi_work);
}
}
- kfree(adev->dm.secure_display_ctxs);
- adev->dm.secure_display_ctxs = NULL;
+ kfree(adev->dm.secure_display_ctx.crtc_ctx);
+ adev->dm.secure_display_ctx.crtc_ctx = NULL;
}
#endif
if (adev->dm.hdcp_workqueue) {
@@ -2287,7 +2349,8 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
}
- r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, "%s", fw_name_dmcu);
+ r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name_dmcu);
if (r == -ENODEV) {
/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
@@ -2509,9 +2572,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
return 0;
}
-static int dm_sw_init(void *handle)
+static int dm_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
@@ -2531,9 +2594,9 @@ static int dm_sw_init(void *handle)
return load_dmcu_fw(adev);
}
-static int dm_sw_fini(void *handle)
+static int dm_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct dal_allocation *da;
list_for_each_entry(da, &adev->dm.da_list, list) {
@@ -2541,11 +2604,11 @@ static int dm_sw_fini(void *handle)
amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
list_del(&da->list);
kfree(da);
+ adev->dm.bb_from_dmub = NULL;
break;
}
}
- adev->dm.bb_from_dmub = NULL;
kfree(adev->dm.dmub_fb_info);
adev->dm.dmub_fb_info = NULL;
@@ -2598,9 +2661,9 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
return ret;
}
-static int dm_late_init(void *handle)
+static int dm_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct dmcu_iram_parameters params;
unsigned int linear_lut[16];
@@ -2695,6 +2758,48 @@ out_fail:
mutex_unlock(&mgr->lock);
}
+void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector)
+{
+ struct cec_notifier *n = aconnector->notifier;
+
+ if (!n)
+ return;
+
+ cec_notifier_phys_addr_invalidate(n);
+}
+
+void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct cec_notifier *n = aconnector->notifier;
+
+ if (!n)
+ return;
+
+ cec_notifier_set_phys_addr(n,
+ connector->display_info.source_physical_address);
+}
+
+static void s3_handle_hdmi_cec(struct drm_device *ddev, bool suspend)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(ddev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (suspend)
+ hdmi_cec_unset_edid(aconnector);
+ else
+ hdmi_cec_set_edid(aconnector);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
@@ -2790,7 +2895,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
/**
* dm_hw_init() - Initialize DC device
- * @handle: The base driver device containing the amdgpu_dm device.
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Initialize the &struct amdgpu_display_manager device. This involves calling
* the initializers of each DM component, then populating the struct with them.
@@ -2808,9 +2913,9 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
* - Vblank support
* - Debug FS entries, if enabled
*/
-static int dm_hw_init(void *handle)
+static int dm_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r;
/* Create DAL display manager */
@@ -2824,15 +2929,15 @@ static int dm_hw_init(void *handle)
/**
* dm_hw_fini() - Teardown DC device
- * @handle: The base driver device containing the amdgpu_dm device.
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
* Teardown components within &struct amdgpu_display_manager that require
* cleanup. This involves cleaning up the DRM device, DC, and any modules that
* were loaded. Also flush IRQ workqueues and disable them.
*/
-static int dm_hw_fini(void *handle)
+static int dm_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
amdgpu_dm_hpd_fini(adev);
@@ -2936,9 +3041,9 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
}
}
-static int dm_suspend(void *handle)
+static int dm_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_display_manager *dm = &adev->dm;
int ret = 0;
@@ -2966,16 +3071,19 @@ static int dm_suspend(void *handle)
if (IS_ERR(adev->dm.cached_state))
return PTR_ERR(adev->dm.cached_state);
+ s3_handle_hdmi_cec(adev_to_drm(adev), true);
+
s3_handle_mst(adev_to_drm(adev), true);
amdgpu_dm_irq_suspend(adev);
hpd_rx_irq_work_suspend(dm);
- if (adev->dm.dc->caps.ips_support)
- dc_allow_idle_optimizations(adev->dm.dc, true);
-
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
+
+ if (dm->dc->caps.ips_support && adev->in_s0ix)
+ dc_allow_idle_optimizations(dm->dc, true);
+
dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
return 0;
@@ -3124,9 +3232,9 @@ cleanup:
kfree(bundle);
}
-static int dm_resume(void *handle)
+static int dm_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct drm_device *ddev = adev_to_drm(adev);
struct amdgpu_display_manager *dm = &adev->dm;
struct amdgpu_dm_connector *aconnector;
@@ -3141,8 +3249,7 @@ static int dm_resume(void *handle)
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
struct dc_state *dc_state;
- int i, r, j, ret;
- bool need_hotplug = false;
+ int i, r, j;
struct dc_commit_streams_params commit_params = {};
if (dm->dc->caps.ips_support) {
@@ -3238,6 +3345,8 @@ static int dm_resume(void *handle)
*/
amdgpu_dm_irq_resume_early(adev);
+ s3_handle_hdmi_cec(ddev, false);
+
/* On resume we need to rewrite the MSTM control bits to enable MST*/
s3_handle_mst(ddev, false);
@@ -3331,23 +3440,16 @@ static int dm_resume(void *handle)
aconnector->mst_root)
continue;
- ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
-
- if (ret < 0) {
- dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
- aconnector->dc_link);
- need_hotplug = true;
- }
+ drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
}
drm_connector_list_iter_end(&iter);
- if (need_hotplug)
- drm_kms_helper_hotplug_event(ddev);
-
amdgpu_dm_irq_resume_late(adev);
amdgpu_dm_smu_write_watermarks_table(adev);
+ drm_kms_helper_hotplug_event(ddev);
+
return 0;
}
@@ -3378,8 +3480,6 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
.soft_reset = dm_soft_reset,
.set_clockgating_state = dm_set_clockgating_state,
.set_powergating_state = dm_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version dm_ip_block = {
@@ -3415,6 +3515,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
struct drm_connector *conn_base;
struct amdgpu_device *adev;
struct drm_luminance_range_info *luminance_range;
+ int min_input_signal_override;
if (aconnector->bl_idx == -1 ||
aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
@@ -3439,6 +3540,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->aux_support = false;
else if (amdgpu_backlight == 1)
caps->aux_support = true;
+ if (caps->aux_support)
+ aconnector->dc_link->backlight_control_type = BACKLIGHT_CONTROL_AMD_AUX;
luminance_range = &conn_base->display_info.luminance_range;
@@ -3449,6 +3552,10 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->aux_min_input_signal = 0;
caps->aux_max_input_signal = 512;
}
+
+ min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid);
+ if (min_input_signal_override >= 0)
+ caps->min_input_signal = min_input_signal_override;
}
void amdgpu_dm_update_connector_after_detect(
@@ -3494,7 +3601,7 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink);
amdgpu_dm_update_freesync_caps(connector,
- aconnector->edid);
+ aconnector->drm_edid);
} else {
amdgpu_dm_update_freesync_caps(connector, NULL);
if (!aconnector->dc_sink) {
@@ -3553,18 +3660,21 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink);
if (sink->dc_edid.length == 0) {
- aconnector->edid = NULL;
+ aconnector->drm_edid = NULL;
+ hdmi_cec_unset_edid(aconnector);
if (aconnector->dc_link->aux_mode) {
- drm_dp_cec_unset_edid(
- &aconnector->dm_dp_aux.aux);
+ drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
}
} else {
- aconnector->edid =
- (struct edid *)sink->dc_edid.raw_edid;
+ const struct edid *edid = (const struct edid *)sink->dc_edid.raw_edid;
+ aconnector->drm_edid = drm_edid_alloc(edid, sink->dc_edid.length);
+ drm_edid_connector_update(connector, aconnector->drm_edid);
+
+ hdmi_cec_set_edid(aconnector);
if (aconnector->dc_link->aux_mode)
- drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
- aconnector->edid);
+ drm_dp_cec_attach(&aconnector->dm_dp_aux.aux,
+ connector->display_info.source_physical_address);
}
if (!aconnector->timing_requested) {
@@ -3575,17 +3685,17 @@ void amdgpu_dm_update_connector_after_detect(
"failed to create aconnector->requested_timing\n");
}
- drm_connector_update_edid_property(connector, aconnector->edid);
- amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
+ amdgpu_dm_update_freesync_caps(connector, aconnector->drm_edid);
update_connector_ext_caps(aconnector);
} else {
+ hdmi_cec_unset_edid(aconnector);
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
amdgpu_dm_update_freesync_caps(connector, NULL);
- drm_connector_update_edid_property(connector, NULL);
aconnector->num_modes = 0;
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
- aconnector->edid = NULL;
+ drm_edid_free(aconnector->drm_edid);
+ aconnector->drm_edid = NULL;
kfree(aconnector->timing_requested);
aconnector->timing_requested = NULL;
/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
@@ -4621,7 +4731,12 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
if (!rc)
DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
} else {
- rc = dc_link_set_backlight_level(link, brightness, 0);
+ struct set_backlight_level_params backlight_level_params = { 0 };
+
+ backlight_level_params.backlight_pwm_u16_16 = brightness;
+ backlight_level_params.transition_time_in_ms = 0;
+
+ rc = dc_link_set_backlight_level(link, &backlight_level_params);
if (!rc)
DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
}
@@ -5176,15 +5291,20 @@ static ssize_t s3_debug_store(struct device *device,
int s3_state;
struct drm_device *drm_dev = dev_get_drvdata(device);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ struct amdgpu_ip_block *ip_block;
+
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE);
+ if (!ip_block)
+ return -EINVAL;
ret = kstrtoint(buf, 0, &s3_state);
if (ret == 0) {
if (s3_state) {
- dm_resume(adev);
+ dm_resume(ip_block);
drm_kms_helper_hotplug_event(adev_to_drm(adev));
} else
- dm_suspend(adev);
+ dm_suspend(ip_block);
}
return ret == 0 ? count : 0;
@@ -5252,13 +5372,14 @@ static int dm_init_microcode(struct amdgpu_device *adev)
/* ASIC doesn't support DMUB. */
return 0;
}
- r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, "%s", fw_name_dmub);
+ r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name_dmub);
return r;
}
-static int dm_early_init(void *handle)
+static int dm_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_mode_info *mode_info = &adev->mode_info;
struct atom_context *ctx = mode_info->atom_context;
int index = GetIndexIntoMasterTable(DATA, Object_Header);
@@ -5468,8 +5589,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
const u64 tiling_flags,
struct dc_plane_info *plane_info,
struct dc_plane_address *address,
- bool tmz_surface,
- bool force_disable_dcc)
+ bool tmz_surface)
{
const struct drm_framebuffer *fb = plane_state->fb;
const struct amdgpu_framebuffer *afb =
@@ -5568,7 +5688,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
&plane_info->tiling_info,
&plane_info->plane_size,
&plane_info->dcc, address,
- tmz_surface, force_disable_dcc);
+ tmz_surface);
if (ret)
return ret;
@@ -5589,7 +5709,6 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
struct dc_scaling_info scaling_info;
struct dc_plane_info plane_info;
int ret;
- bool force_disable_dcc = false;
ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
if (ret)
@@ -5600,13 +5719,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->clip_rect = scaling_info.clip_rect;
dc_plane_state->scaling_quality = scaling_info.scaling_quality;
- force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
ret = fill_dc_plane_info_and_addr(adev, plane_state,
afb->tiling_flags,
&plane_info,
&dc_plane_state->address,
- afb->tmz_surface,
- force_disable_dcc);
+ afb->tmz_surface);
if (ret)
return ret;
@@ -6761,7 +6878,7 @@ create_stream_for_sink(struct drm_connector *connector,
if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22;
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
- aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
+ aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
}
finish:
@@ -6988,6 +7105,7 @@ static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
if (amdgpu_dm_should_create_sysfs(amdgpu_dm_connector))
sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group);
+ cec_notifier_conn_unregister(amdgpu_dm_connector->notifier);
drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
}
@@ -7121,32 +7239,24 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
- struct edid *edid;
- struct i2c_adapter *ddc;
-
- if (dc_link && dc_link->aux_mode)
- ddc = &aconnector->dm_dp_aux.aux.ddc;
- else
- ddc = &aconnector->i2c->base;
+ const struct drm_edid *drm_edid;
- /*
- * Note: drm_get_edid gets edid in the following order:
- * 1) override EDID if set via edid_override debugfs,
- * 2) firmware EDID if set via edid_firmware module parameter
- * 3) regular DDC read.
- */
- edid = drm_get_edid(connector, ddc);
- if (!edid) {
+ drm_edid = drm_edid_read(connector);
+ drm_edid_connector_update(connector, drm_edid);
+ if (!drm_edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
return;
}
- aconnector->edid = edid;
-
+ aconnector->drm_edid = drm_edid;
/* Update emulated (virtual) sink's EDID */
if (dc_em_sink && dc_link) {
+ // FIXME: Get rid of drm_edid_raw()
+ const struct edid *edid = drm_edid_raw(drm_edid);
+
memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps));
- memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH);
+ memmove(dc_em_sink->dc_edid.raw_edid, edid,
+ (edid->extensions + 1) * EDID_LENGTH);
dm_helpers_parse_edid_caps(
dc_link,
&dc_em_sink->dc_edid,
@@ -7176,36 +7286,26 @@ static int get_modes(struct drm_connector *connector)
static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
- struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_VIRTUAL
};
- struct edid *edid;
- struct i2c_adapter *ddc;
+ const struct drm_edid *drm_edid;
+ const struct edid *edid;
- if (dc_link->aux_mode)
- ddc = &aconnector->dm_dp_aux.aux.ddc;
- else
- ddc = &aconnector->i2c->base;
-
- /*
- * Note: drm_get_edid gets edid in the following order:
- * 1) override EDID if set via edid_override debugfs,
- * 2) firmware EDID if set via edid_firmware module parameter
- * 3) regular DDC read.
- */
- edid = drm_get_edid(connector, ddc);
- if (!edid) {
+ drm_edid = drm_edid_read(connector);
+ drm_edid_connector_update(connector, drm_edid);
+ if (!drm_edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
return;
}
- if (drm_detect_hdmi_monitor(edid))
+ if (connector->display_info.is_hdmi)
init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
- aconnector->edid = edid;
+ aconnector->drm_edid = drm_edid;
+ edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
aconnector->dc_em_sink = dc_link_add_remote_sink(
aconnector->dc_link,
(uint8_t *)edid,
@@ -7312,10 +7412,15 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
enum dc_status dc_result = DC_OK;
+ uint8_t bpc_limit = 6;
if (!dm_state)
return NULL;
+ if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+ aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ bpc_limit = 8;
+
do {
stream = create_stream_for_sink(connector, drm_mode,
dm_state, old_stream,
@@ -7336,11 +7441,12 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
if (dc_result != DC_OK) {
- DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
+ DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n",
drm_mode->hdisplay,
drm_mode->vdisplay,
drm_mode->clock,
- dc_result,
+ dc_pixel_encoding_to_str(stream->timing.pixel_encoding),
+ dc_color_depth_to_str(stream->timing.display_color_depth),
dc_status_to_str(dc_result));
dc_stream_release(stream);
@@ -7348,10 +7454,13 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
requested_bpc -= 2; /* lower bpc to retry validation */
}
- } while (stream == NULL && requested_bpc >= 6);
+ } while (stream == NULL && requested_bpc >= bpc_limit);
- if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
- DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
+ if ((dc_result == DC_FAIL_ENC_VALIDATE ||
+ dc_result == DC_EXCEED_DONGLE_CAP) &&
+ !aconnector->force_yuv420_output) {
+ DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n",
+ __func__, __LINE__);
aconnector->force_yuv420_output = true;
stream = create_validate_stream_for_sink(aconnector, drm_mode,
@@ -7892,16 +8001,16 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector)
}
static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
- struct edid *edid)
+ const struct drm_edid *drm_edid)
{
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
- if (edid) {
+ if (drm_edid) {
/* empty probed_modes */
INIT_LIST_HEAD(&connector->probed_modes);
amdgpu_dm_connector->num_modes =
- drm_add_edid_modes(connector, edid);
+ drm_edid_connector_add_modes(connector);
/* sorting the probed modes before calling function
* amdgpu_dm_get_native_mode() since EDID can have
@@ -7915,10 +8024,10 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
amdgpu_dm_get_native_mode(connector);
/* Freesync capabilities are reset by calling
- * drm_add_edid_modes() and need to be
+ * drm_edid_connector_add_modes() and need to be
* restored here.
*/
- amdgpu_dm_update_freesync_caps(connector, edid);
+ amdgpu_dm_update_freesync_caps(connector, drm_edid);
} else {
amdgpu_dm_connector->num_modes = 0;
}
@@ -8014,12 +8123,12 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
}
static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
- struct edid *edid)
+ const struct drm_edid *drm_edid)
{
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
- if (!(amdgpu_freesync_vid_mode && edid))
+ if (!(amdgpu_freesync_vid_mode && drm_edid))
return;
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
@@ -8032,24 +8141,24 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
struct drm_encoder *encoder;
- struct edid *edid = amdgpu_dm_connector->edid;
+ const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid;
struct dc_link_settings *verified_link_cap =
&amdgpu_dm_connector->dc_link->verified_link_cap;
const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
encoder = amdgpu_dm_connector_to_encoder(connector);
- if (!drm_edid_is_valid(edid)) {
+ if (!drm_edid) {
amdgpu_dm_connector->num_modes =
drm_add_modes_noedid(connector, 640, 480);
if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
amdgpu_dm_connector->num_modes +=
drm_add_modes_noedid(connector, 1920, 1080);
} else {
- amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_ddc_get_modes(connector, drm_edid);
if (encoder)
amdgpu_dm_connector_add_common_modes(encoder, connector);
- amdgpu_dm_connector_add_freesync_modes(connector, edid);
+ amdgpu_dm_connector_add_freesync_modes(connector, drm_edid);
}
amdgpu_dm_fbc_init(connector);
@@ -8233,6 +8342,27 @@ create_i2c(struct ddc_service *ddc_service,
return i2c;
}
+int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector)
+{
+ struct cec_connector_info conn_info;
+ struct drm_device *ddev = aconnector->base.dev;
+ struct device *hdmi_dev = ddev->dev;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_HDMI_CEC) {
+ drm_info(ddev, "HDMI-CEC feature masked\n");
+ return -EINVAL;
+ }
+
+ cec_fill_conn_info_from_drm(&conn_info, &aconnector->base);
+ aconnector->notifier =
+ cec_notifier_conn_register(hdmi_dev, NULL, &conn_info);
+ if (!aconnector->notifier) {
+ drm_err(ddev, "Failed to create cec notifier\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
/*
* Note: this function assumes that dc_link_detect() was called for the
@@ -8296,6 +8426,10 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
drm_connector_attach_encoder(
&aconnector->base, &aencoder->base);
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ amdgpu_dm_initialize_hdmi_connector(aconnector);
+
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
|| connector_type == DRM_MODE_CONNECTOR_eDP)
amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
@@ -8355,16 +8489,6 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dm_crtc_state *acrtc_state)
{
- /*
- * We have no guarantee that the frontend index maps to the same
- * backend index - some even map to more than one.
- *
- * TODO: Use a different interrupt or check DC itself for the mapping.
- */
- int irq_type =
- amdgpu_display_crtc_idx_to_irq_type(
- adev,
- acrtc->crtc_id);
struct drm_vblank_crtc_config config = {0};
struct dc_crtc_timing *timing;
int offdelay;
@@ -8373,7 +8497,8 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
if (amdgpu_ip_version(adev, DCE_HWIP, 0) <
IP_VERSION(3, 5, 0) ||
acrtc_state->stream->link->psr_settings.psr_version <
- DC_PSR_VERSION_UNSUPPORTED) {
+ DC_PSR_VERSION_UNSUPPORTED ||
+ !(adev->flags & AMD_IS_APU)) {
timing = &acrtc_state->stream->timing;
/* at least 2 frames */
@@ -8389,28 +8514,7 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
drm_crtc_vblank_on_config(&acrtc->base,
&config);
-
- amdgpu_irq_get(
- adev,
- &adev->pageflip_irq,
- irq_type);
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- amdgpu_irq_get(
- adev,
- &adev->vline0_irq,
- irq_type);
-#endif
} else {
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- amdgpu_irq_put(
- adev,
- &adev->vline0_irq,
- irq_type);
-#endif
- amdgpu_irq_put(
- adev,
- &adev->pageflip_irq,
- irq_type);
drm_crtc_vblank_off(&acrtc->base);
}
}
@@ -8873,6 +8977,58 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane,
}
}
+static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
+ const struct dm_crtc_state *acrtc_state,
+ const u64 current_ts)
+{
+ struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
+ struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+ bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
+
+ if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+ if (pr->config.replay_supported && !pr->replay_feature_enabled)
+ amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
+ else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+ !psr->psr_feature_enabled)
+ if (!aconn->disallow_edp_enter_psr)
+ amdgpu_dm_link_setup_psr(acrtc_state->stream);
+ }
+
+ /* Decrement skip count when SR is enabled and we're doing fast updates. */
+ if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
+ (psr->psr_feature_enabled || pr->config.replay_supported)) {
+ if (aconn->sr_skip_count > 0)
+ aconn->sr_skip_count--;
+
+ /* Allow SR when skip count is 0. */
+ acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count;
+
+ /*
+ * If sink supports PSR SU/Panel Replay, there is no need to rely on
+ * a vblank event disable request to enable PSR/RP. PSR SU/RP
+ * can be enabled immediately once OS demonstrates an
+ * adequate number of fast atomic commits to notify KMD
+ * of update events. See `vblank_control_worker()`.
+ */
+ if (!vrr_active &&
+ acrtc_attach->dm_irq_params.allow_sr_entry &&
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+#endif
+ (current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) {
+ if (pr->replay_feature_enabled && !pr->replay_allow_active)
+ amdgpu_dm_replay_enable(acrtc_state->stream, true);
+ if (psr->psr_version == DC_PSR_VERSION_SU_1 &&
+ !psr->psr_allow_active && !aconn->disallow_edp_enter_psr)
+ amdgpu_dm_psr_enable(acrtc_state->stream);
+ }
+ } else {
+ acrtc_attach->dm_irq_params.allow_sr_entry = false;
+ }
+}
+
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_device *dev,
struct amdgpu_display_manager *dm,
@@ -9001,7 +9157,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
afb->tiling_flags,
&bundle->plane_infos[planes_count],
&bundle->flip_addrs[planes_count].address,
- afb->tmz_surface, false);
+ afb->tmz_surface);
drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
new_plane_state->plane->index,
@@ -9026,7 +9182,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* during the PSR-SU was disabled.
*/
if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
- acrtc_attach->dm_irq_params.allow_psr_entry &&
+ acrtc_attach->dm_irq_params.allow_sr_entry &&
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
@@ -9035,7 +9191,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
timestamp_ns;
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
+ amdgpu_dm_psr_disable(acrtc_state->stream, true);
mutex_unlock(&dm->dc_lock);
}
}
@@ -9201,9 +9357,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->stream_update.abm_level = &acrtc_state->abm_level;
mutex_lock(&dm->dc_lock);
- if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
- acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) || vrr_active) {
+ if (acrtc_state->stream->link->replay_settings.replay_allow_active)
+ amdgpu_dm_replay_disable(acrtc_state->stream);
+ if (acrtc_state->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_disable(acrtc_state->stream, true);
+ }
mutex_unlock(&dm->dc_lock);
/*
@@ -9244,57 +9403,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dm_update_pflip_irq_state(drm_to_adev(dev),
acrtc_attach);
- if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
- if (acrtc_state->stream->link->replay_settings.config.replay_supported &&
- !acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
- struct amdgpu_dm_connector *aconn =
- (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
- amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
- } else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
- !acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
-
- struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
- acrtc_state->stream->dm_stream_context;
-
- if (!aconn->disallow_edp_enter_psr)
- amdgpu_dm_link_setup_psr(acrtc_state->stream);
- }
- }
-
- /* Decrement skip count when PSR is enabled and we're doing fast updates. */
- if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
- acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
- struct amdgpu_dm_connector *aconn =
- (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
-
- if (aconn->psr_skip_count > 0)
- aconn->psr_skip_count--;
-
- /* Allow PSR when skip count is 0. */
- acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
-
- /*
- * If sink supports PSR SU, there is no need to rely on
- * a vblank event disable request to enable PSR. PSR SU
- * can be enabled immediately once OS demonstrates an
- * adequate number of fast atomic commits to notify KMD
- * of update events. See `vblank_control_worker()`.
- */
- if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
- acrtc_attach->dm_irq_params.allow_psr_entry &&
-#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
- !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
-#endif
- !acrtc_state->stream->link->psr_settings.psr_allow_active &&
- !aconn->disallow_edp_enter_psr &&
- (timestamp_ns -
- acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
- 500000000)
- amdgpu_dm_psr_enable(acrtc_state->stream);
- } else {
- acrtc_attach->dm_irq_params.allow_psr_entry = false;
- }
-
+ amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns);
mutex_unlock(&dm->dc_lock);
}
@@ -9427,6 +9536,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
bool mode_set_reset_required = false;
u32 i;
struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count};
+ bool set_backlight_level = false;
/* Disable writeback */
for_each_old_connector_in_state(state, connector, old_con_state, i) {
@@ -9546,6 +9656,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
acrtc->hw_mode = new_crtc_state->mode;
crtc->hwmode = new_crtc_state->mode;
mode_set_reset_required = true;
+ set_backlight_level = true;
} else if (modereset_required(new_crtc_state)) {
drm_dbg_atomic(dev,
"Atomic commit: RESET. crtc id %d:[%p]\n",
@@ -9573,7 +9684,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
WARN_ON(!dc_commit_streams(dm->dc, &params));
/* Allow idle optimization when vblank count is 0 for display off */
- if (dm->active_vblank_irq_count == 0)
+ if ((dm->active_vblank_irq_count == 0) && amdgpu_dm_is_headless(dm->adev))
dc_allow_idle_optimizations(dm->dc, true);
mutex_unlock(&dm->dc_lock);
@@ -9597,6 +9708,19 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
acrtc->otg_inst = status->primary_otg_inst;
}
}
+
+ /* During boot up and resume the DC layer will reset the panel brightness
+ * to fix a flicker issue.
+ * It will cause the dm->actual_brightness is not the current panel brightness
+ * level. (the dm->brightness is the correct panel level)
+ * So we set the backlight level with dm->brightness value after set mode
+ */
+ if (set_backlight_level) {
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (dm->backlight_dev[i])
+ amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
+ }
+ }
}
static void dm_set_writeback(struct amdgpu_display_manager *dm,
@@ -9996,14 +10120,19 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
+ uint8_t cnt;
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
- acrtc->dm_irq_params.window_param.update_win = true;
-
- /**
- * It takes 2 frames for HW to stably generate CRC when
- * resuming from suspend, so we set skip_frame_cnt 2.
- */
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
+ for (cnt = 0; cnt < MAX_CRC_WINDOW_NUM; cnt++) {
+ if (acrtc->dm_irq_params.window_param[cnt].enable) {
+ acrtc->dm_irq_params.window_param[cnt].update_win = true;
+
+ /**
+ * It takes 2 frames for HW to stably generate CRC when
+ * resuming from suspend, so we set skip_frame_cnt 2.
+ */
+ acrtc->dm_irq_params.window_param[cnt].skip_frame_cnt = 2;
+ }
+ }
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
#endif
@@ -10104,6 +10233,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for (i = 0; i < crtc_disable_count; i++)
pm_runtime_put_autosuspend(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
+
+ trace_amdgpu_dm_atomic_commit_tail_finish(state);
}
static int dm_force_atomic_commit(struct drm_connector *connector)
@@ -11089,8 +11220,8 @@ dm_get_plane_scale(struct drm_plane_state *plane_state,
int plane_src_w, plane_src_h;
dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
- *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
- *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
+ *out_plane_scale_w = plane_src_w ? plane_state->crtc_w * 1000 / plane_src_w : 0;
+ *out_plane_scale_h = plane_src_h ? plane_state->crtc_h * 1000 / plane_src_h : 0;
}
/*
@@ -11344,6 +11475,30 @@ static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev,
return 0;
}
+static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state, *old_plane_state;
+
+ drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
+ new_plane_state = drm_atomic_get_plane_state(state, plane);
+ old_plane_state = drm_atomic_get_plane_state(state, plane);
+
+ if (IS_ERR(new_plane_state) || IS_ERR(old_plane_state)) {
+ DRM_ERROR("Failed to get plane state for plane %s\n", plane->name);
+ return false;
+ }
+
+ if (old_plane_state->fb && new_plane_state->fb &&
+ get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb))
+ return true;
+ }
+
+ return false;
+}
+
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
*
@@ -11541,10 +11696,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
- if (old_plane_state->fb && new_plane_state->fb &&
- get_mem_type(old_plane_state->fb) !=
- get_mem_type(new_plane_state->fb))
- lock_and_validation_needed = true;
ret = dm_update_plane_state(dc, state, plane,
old_plane_state,
@@ -11839,9 +11990,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/*
* Only allow async flips for fast updates that don't change
- * the FB pitch, the DCC state, rotation, etc.
+ * the FB pitch, the DCC state, rotation, mem_type, etc.
*/
- if (new_crtc_state->async_flip && lock_and_validation_needed) {
+ if (new_crtc_state->async_flip &&
+ (lock_and_validation_needed ||
+ amdgpu_dm_crtc_mem_type_changed(dev, state, new_crtc_state))) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);
@@ -12004,7 +12157,7 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
}
static void parse_edid_displayid_vrr(struct drm_connector *connector,
- struct edid *edid)
+ const struct edid *edid)
{
u8 *edid_ext = NULL;
int i;
@@ -12047,7 +12200,7 @@ static void parse_edid_displayid_vrr(struct drm_connector *connector,
}
static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
- struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
+ const struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
u8 *edid_ext = NULL;
int i;
@@ -12063,7 +12216,7 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
break;
}
- while (j < EDID_LENGTH) {
+ while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) {
struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j];
unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]);
@@ -12082,7 +12235,8 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
}
static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
- struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
+ const struct edid *edid,
+ struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
u8 *edid_ext = NULL;
int i;
@@ -12116,7 +12270,7 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
* amdgpu_dm_update_freesync_caps - Update Freesync capabilities
*
* @connector: Connector to query.
- * @edid: EDID from monitor
+ * @drm_edid: DRM EDID from monitor
*
* Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
* track of some of the display information in the internal data struct used by
@@ -12124,19 +12278,16 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
* FreeSync parameters.
*/
void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
- struct edid *edid)
+ const struct drm_edid *drm_edid)
{
int i = 0;
- struct detailed_timing *timing;
- struct detailed_non_pixel *data;
- struct detailed_data_monitor_range *range;
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
struct dm_connector_state *dm_con_state = NULL;
struct dc_sink *sink;
-
struct amdgpu_device *adev = drm_to_adev(connector->dev);
struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
+ const struct edid *edid;
bool freesync_capable = false;
enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
@@ -12149,13 +12300,13 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
amdgpu_dm_connector->dc_sink :
amdgpu_dm_connector->dc_em_sink;
- if (!edid || !sink) {
+ drm_edid_connector_update(connector, drm_edid);
+
+ if (!drm_edid || !sink) {
dm_con_state = to_dm_connector_state(connector->state);
amdgpu_dm_connector->min_vfreq = 0;
amdgpu_dm_connector->max_vfreq = 0;
- connector->display_info.monitor_range.min_vfreq = 0;
- connector->display_info.monitor_range.max_vfreq = 0;
freesync_capable = false;
goto update;
@@ -12166,6 +12317,8 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (!adev->dm.freesync_module)
goto update;
+ edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
+
/* Some eDP panels only have the refresh rate range info in DisplayID */
if ((connector->display_info.monitor_range.min_vfreq == 0 ||
connector->display_info.monitor_range.max_vfreq == 0))
@@ -12173,67 +12326,14 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
sink->sink_signal == SIGNAL_TYPE_EDP)) {
- bool edid_check_required = false;
-
if (amdgpu_dm_connector->dc_link &&
amdgpu_dm_connector->dc_link->dpcd_caps.allow_invalid_MSA_timing_param) {
- if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
- amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
- amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
- if (amdgpu_dm_connector->max_vfreq -
- amdgpu_dm_connector->min_vfreq > 10)
- freesync_capable = true;
- } else {
- edid_check_required = edid->version > 1 ||
- (edid->version == 1 &&
- edid->revision > 1);
- }
- }
-
- if (edid_check_required) {
- for (i = 0; i < 4; i++) {
-
- timing = &edid->detailed_timings[i];
- data = &timing->data.other_data;
- range = &data->data.range;
- /*
- * Check if monitor has continuous frequency mode
- */
- if (data->type != EDID_DETAIL_MONITOR_RANGE)
- continue;
- /*
- * Check for flag range limits only. If flag == 1 then
- * no additional timing information provided.
- * Default GTF, GTF Secondary curve and CVT are not
- * supported
- */
- if (range->flags != 1)
- continue;
-
- connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
- connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
-
- if (edid->revision >= 4) {
- if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
- connector->display_info.monitor_range.min_vfreq += 255;
- if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
- connector->display_info.monitor_range.max_vfreq += 255;
- }
-
- amdgpu_dm_connector->min_vfreq =
- connector->display_info.monitor_range.min_vfreq;
- amdgpu_dm_connector->max_vfreq =
- connector->display_info.monitor_range.max_vfreq;
-
- break;
- }
-
- if (amdgpu_dm_connector->max_vfreq -
- amdgpu_dm_connector->min_vfreq > 10) {
-
+ amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
freesync_capable = true;
- }
}
+
parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
if (vsdb_info.replay_mode) {
@@ -12242,12 +12342,9 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP;
}
- } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ } else if (drm_edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
if (i >= 0 && vsdb_info.freesync_supported) {
- timing = &edid->detailed_timings[i];
- data = &timing->data.other_data;
-
amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 15d4690c74d6..d2703ca7dff3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -541,12 +541,12 @@ struct amdgpu_display_manager {
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/**
- * @secure_display_ctxs:
+ * @secure_display_ctx:
*
- * Store the ROI information and the work_struct to command dmub and psp for
- * all crtcs.
+ * Store secure display relevant info. e.g. the ROI information
+ * , the work_struct to command dmub, etc.
*/
- struct secure_display_context *secure_display_ctxs;
+ struct secure_display_context secure_display_ctx;
#endif
/**
* @hpd_rx_offload_wq:
@@ -671,9 +671,11 @@ struct amdgpu_dm_connector {
uint32_t connector_id;
int bl_idx;
+ struct cec_notifier *notifier;
+
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
- struct edid *edid;
+ const struct drm_edid *drm_edid;
/* shared with amdgpu */
struct amdgpu_hpd hpd;
@@ -697,6 +699,8 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *mst_output_port;
struct amdgpu_dm_connector *mst_root;
struct drm_dp_aux *dsc_aux;
+ uint32_t mst_local_bw;
+ uint16_t vc_full_pbn;
struct mutex handle_mst_msg_ready;
/* TODO see if we can merge with ddc_bus or make a dm_connector */
@@ -727,7 +731,7 @@ struct amdgpu_dm_connector {
/* Cached display modes */
struct drm_display_mode freesync_vid_base;
- int psr_skip_count;
+ int sr_skip_count;
bool disallow_edp_enter_psr;
/* Record progress status of mst*/
@@ -951,7 +955,7 @@ void dm_restore_drm_connector_state(struct drm_device *dev,
struct drm_connector *connector);
void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
- struct edid *edid);
+ const struct drm_edid *drm_edid);
void amdgpu_dm_trigger_timing_sync(struct drm_device *dev);
@@ -1004,7 +1008,14 @@ void *dm_allocate_gpu_mem(struct amdgpu_device *adev,
enum dc_gpu_mem_alloc_type type,
size_t size,
long long *addr);
+void dm_free_gpu_mem(struct amdgpu_device *adev,
+ enum dc_gpu_mem_alloc_type type,
+ void *addr);
bool amdgpu_dm_is_headless(struct amdgpu_device *adev);
+void hdmi_cec_set_edid(struct amdgpu_dm_connector *aconnector);
+void hdmi_cec_unset_edid(struct amdgpu_dm_connector *aconnector);
+int amdgpu_dm_initialize_hdmi_connector(struct amdgpu_dm_connector *aconnector);
+
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index f936a35fa9eb..033bd817d871 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -30,6 +30,7 @@
#include "amdgpu_dm.h"
#include "dc.h"
#include "amdgpu_securedisplay.h"
+#include "amdgpu_dm_psr.h"
static const char *const pipe_crc_sources[] = {
"none",
@@ -83,45 +84,274 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
}
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+static void update_phy_id_mapping(struct amdgpu_device *adev)
+{
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_connector *connector;
+ struct amdgpu_dm_connector *aconnector;
+ struct amdgpu_dm_connector *sort_connector[AMDGPU_DM_MAX_CRTC] = {NULL};
+ struct drm_connector_list_iter iter;
+ uint8_t idx = 0, idx_2 = 0, connector_cnt = 0;
+
+ dm->secure_display_ctx.phy_mapping_updated = false;
+
+ mutex_lock(&ddev->mode_config.mutex);
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->status != connector_status_connected)
+ continue;
+
+ if (idx >= AMDGPU_DM_MAX_CRTC) {
+ DRM_WARN("%s connected connectors exceed max crtc\n", __func__);
+ mutex_unlock(&ddev->mode_config.mutex);
+ return;
+ }
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ sort_connector[idx] = aconnector;
+ idx++;
+ connector_cnt++;
+ }
+ drm_connector_list_iter_end(&iter);
+
+ /* sort connectors by link_enc_hw_instance first */
+ for (idx = connector_cnt; idx > 1 ; idx--) {
+ for (idx_2 = 0; idx_2 < (idx - 1); idx_2++) {
+ if (sort_connector[idx_2]->dc_link->link_enc_hw_inst >
+ sort_connector[idx_2 + 1]->dc_link->link_enc_hw_inst)
+ swap(sort_connector[idx_2], sort_connector[idx_2 + 1]);
+ }
+ }
+
+ /*
+ * Sort mst connectors by RAD. mst connectors with the same enc_hw_instance are already
+ * sorted together above.
+ */
+ for (idx = 0; idx < connector_cnt; /*Do nothing*/) {
+ if (sort_connector[idx]->mst_root) {
+ uint8_t i, j, k;
+ uint8_t mst_con_cnt = 1;
+
+ for (idx_2 = (idx + 1); idx_2 < connector_cnt; idx_2++) {
+ if (sort_connector[idx_2]->mst_root == sort_connector[idx]->mst_root)
+ mst_con_cnt++;
+ else
+ break;
+ }
+
+ for (i = mst_con_cnt; i > 1; i--) {
+ for (j = idx; j < (idx + i - 2); j++) {
+ int mstb_lct = sort_connector[j]->mst_output_port->parent->lct;
+ int next_mstb_lct = sort_connector[j + 1]->mst_output_port->parent->lct;
+ u8 *rad;
+ u8 *next_rad;
+ bool swap = false;
+
+ /* Sort by mst tree depth first. Then compare RAD if depth is the same*/
+ if (mstb_lct > next_mstb_lct) {
+ swap = true;
+ } else if (mstb_lct == next_mstb_lct) {
+ if (mstb_lct == 1) {
+ if (sort_connector[j]->mst_output_port->port_num > sort_connector[j + 1]->mst_output_port->port_num)
+ swap = true;
+ } else if (mstb_lct > 1) {
+ rad = sort_connector[j]->mst_output_port->parent->rad;
+ next_rad = sort_connector[j + 1]->mst_output_port->parent->rad;
+
+ for (k = 0; k < mstb_lct - 1; k++) {
+ int shift = (k % 2) ? 0 : 4;
+ int port_num = (rad[k / 2] >> shift) & 0xf;
+ int next_port_num = (next_rad[k / 2] >> shift) & 0xf;
+
+ if (port_num > next_port_num) {
+ swap = true;
+ break;
+ }
+ }
+ } else {
+ DRM_ERROR("MST LCT shouldn't be set as < 1");
+ mutex_unlock(&ddev->mode_config.mutex);
+ return;
+ }
+ }
+
+ if (swap)
+ swap(sort_connector[j], sort_connector[j + 1]);
+ }
+ }
+
+ idx += mst_con_cnt;
+ } else {
+ idx++;
+ }
+ }
+
+ /* Complete sorting. Assign relavant result to dm->secure_display_ctx.phy_id_mapping[]*/
+ memset(dm->secure_display_ctx.phy_id_mapping, 0, sizeof(dm->secure_display_ctx.phy_id_mapping));
+ for (idx = 0; idx < connector_cnt; idx++) {
+ aconnector = sort_connector[idx];
+
+ dm->secure_display_ctx.phy_id_mapping[idx].assigned = true;
+ dm->secure_display_ctx.phy_id_mapping[idx].is_mst = false;
+ dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst = aconnector->dc_link->link_enc_hw_inst;
+
+ if (sort_connector[idx]->mst_root) {
+ dm->secure_display_ctx.phy_id_mapping[idx].is_mst = true;
+ dm->secure_display_ctx.phy_id_mapping[idx].lct = aconnector->mst_output_port->parent->lct;
+ dm->secure_display_ctx.phy_id_mapping[idx].port_num = aconnector->mst_output_port->port_num;
+ memcpy(dm->secure_display_ctx.phy_id_mapping[idx].rad,
+ aconnector->mst_output_port->parent->rad, sizeof(aconnector->mst_output_port->parent->rad));
+ }
+ }
+ mutex_unlock(&ddev->mode_config.mutex);
+
+ dm->secure_display_ctx.phy_id_mapping_cnt = connector_cnt;
+ dm->secure_display_ctx.phy_mapping_updated = true;
+}
+
+static bool get_phy_id(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector, uint8_t *phy_id)
+{
+ int idx, idx_2;
+ bool found = false;
+
+ /*
+ * Assume secure display start after all connectors are probed. The connection
+ * config is static as well
+ */
+ if (!dm->secure_display_ctx.phy_mapping_updated) {
+ DRM_WARN("%s Should update the phy id table before get it's value", __func__);
+ return false;
+ }
+
+ for (idx = 0; idx < dm->secure_display_ctx.phy_id_mapping_cnt; idx++) {
+ if (!dm->secure_display_ctx.phy_id_mapping[idx].assigned) {
+ DRM_ERROR("phy_id_mapping[%d] should be assigned", idx);
+ return false;
+ }
+
+ if (aconnector->dc_link->link_enc_hw_inst ==
+ dm->secure_display_ctx.phy_id_mapping[idx].enc_hw_inst) {
+ if (!dm->secure_display_ctx.phy_id_mapping[idx].is_mst) {
+ found = true;
+ goto out;
+ } else {
+ /* Could caused by wrongly pass mst root connector */
+ if (!aconnector->mst_output_port) {
+ DRM_ERROR("%s Check mst case but connector without a port assigned", __func__);
+ return false;
+ }
+
+ if (aconnector->mst_root &&
+ aconnector->mst_root->mst_mgr.mst_primary == NULL) {
+ DRM_WARN("%s pass in a stale mst connector", __func__);
+ }
+
+ if (aconnector->mst_output_port->parent->lct == dm->secure_display_ctx.phy_id_mapping[idx].lct &&
+ aconnector->mst_output_port->port_num == dm->secure_display_ctx.phy_id_mapping[idx].port_num) {
+ if (aconnector->mst_output_port->parent->lct == 1) {
+ found = true;
+ goto out;
+ } else if (aconnector->mst_output_port->parent->lct > 1) {
+ /* Check RAD */
+ for (idx_2 = 0; idx_2 < aconnector->mst_output_port->parent->lct - 1; idx_2++) {
+ int shift = (idx_2 % 2) ? 0 : 4;
+ int port_num = (aconnector->mst_output_port->parent->rad[idx_2 / 2] >> shift) & 0xf;
+ int port_num2 = (dm->secure_display_ctx.phy_id_mapping[idx].rad[idx_2 / 2] >> shift) & 0xf;
+
+ if (port_num != port_num2)
+ break;
+ }
+
+ if (idx_2 == aconnector->mst_output_port->parent->lct - 1) {
+ found = true;
+ goto out;
+ }
+ } else {
+ DRM_ERROR("lCT should be >= 1");
+ return false;
+ }
+ }
+ }
+ }
+ }
+
+out:
+ if (found) {
+ DRM_DEBUG_DRIVER("Associated secure display PHY ID as %d", idx);
+ *phy_id = idx;
+ } else {
+ DRM_WARN("Can't find associated phy ID");
+ return false;
+ }
+
+ return true;
+}
+
static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_stream_state *stream)
{
struct drm_device *drm_dev = crtc->dev;
struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_dm_connector *aconnector;
bool was_activated;
+ uint8_t phy_id;
+ unsigned long flags;
+ int i;
- spin_lock_irq(&drm_dev->event_lock);
- was_activated = acrtc->dm_irq_params.window_param.activated;
- acrtc->dm_irq_params.window_param.x_start = 0;
- acrtc->dm_irq_params.window_param.y_start = 0;
- acrtc->dm_irq_params.window_param.x_end = 0;
- acrtc->dm_irq_params.window_param.y_end = 0;
- acrtc->dm_irq_params.window_param.activated = false;
- acrtc->dm_irq_params.window_param.update_win = false;
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
- spin_unlock_irq(&drm_dev->event_lock);
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ was_activated = acrtc->dm_irq_params.crc_window_activated;
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ acrtc->dm_irq_params.window_param[i].x_start = 0;
+ acrtc->dm_irq_params.window_param[i].y_start = 0;
+ acrtc->dm_irq_params.window_param[i].x_end = 0;
+ acrtc->dm_irq_params.window_param[i].y_end = 0;
+ acrtc->dm_irq_params.window_param[i].enable = false;
+ acrtc->dm_irq_params.window_param[i].update_win = false;
+ acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 0;
+ }
+ acrtc->dm_irq_params.crc_window_activated = false;
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
/* Disable secure_display if it was enabled */
- if (was_activated) {
+ if (was_activated && dm->secure_display_ctx.op_mode == LEGACY_MODE) {
/* stop ROI update on this crtc */
- flush_work(&dm->secure_display_ctxs[crtc->index].notify_ta_work);
- flush_work(&dm->secure_display_ctxs[crtc->index].forward_roi_work);
- dc_stream_forward_crc_window(stream, NULL, true);
+ flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].notify_ta_work);
+ flush_work(&dm->secure_display_ctx.crtc_ctx[crtc->index].forward_roi_work);
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+
+ if (aconnector && get_phy_id(dm, aconnector, &phy_id)) {
+ if (dm->secure_display_ctx.support_mul_roi)
+ dc_stream_forward_multiple_crc_window(stream, NULL, phy_id, true);
+ else
+ dc_stream_forward_crc_window(stream, NULL, phy_id, true);
+ } else {
+ DRM_DEBUG_DRIVER("%s Can't find matching phy id", __func__);
+ }
}
}
static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
{
- struct secure_display_context *secure_display_ctx;
+ struct secure_display_crtc_context *crtc_ctx;
struct psp_context *psp;
struct ta_securedisplay_cmd *securedisplay_cmd;
struct drm_crtc *crtc;
struct dc_stream_state *stream;
+ struct amdgpu_dm_connector *aconnector;
uint8_t phy_inst;
+ struct amdgpu_display_manager *dm;
+ struct crc_data crc_cpy[MAX_CRC_WINDOW_NUM];
+ unsigned long flags;
+ uint8_t roi_idx = 0;
int ret;
+ int i;
- secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work);
- crtc = secure_display_ctx->crtc;
+ crtc_ctx = container_of(work, struct secure_display_crtc_context, notify_ta_work);
+ crtc = crtc_ctx->crtc;
if (!crtc)
return;
@@ -133,21 +363,50 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
return;
}
+ dm = &drm_to_adev(crtc->dev)->dm;
stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
- phy_inst = stream->link->link_enc_hw_inst;
-
- /* need lock for multiple crtcs to use the command buffer */
- mutex_lock(&psp->securedisplay_context.mutex);
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ if (!aconnector)
+ return;
- psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
- TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ mutex_lock(&crtc->dev->mode_config.mutex);
+ if (!get_phy_id(dm, aconnector, &phy_inst)) {
+ DRM_WARN("%s Can't find mapping phy id!", __func__);
+ mutex_unlock(&crtc->dev->mode_config.mutex);
+ return;
+ }
+ mutex_unlock(&crtc->dev->mode_config.mutex);
- securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ memcpy(crc_cpy, crtc_ctx->crc_info.crc, sizeof(struct crc_data) * MAX_CRC_WINDOW_NUM);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ /* need lock for multiple crtcs to use the command buffer */
+ mutex_lock(&psp->securedisplay_context.mutex);
/* PSP TA is expected to finish data transmission over I2C within current frame,
* even there are up to 4 crtcs request to send in this frame.
*/
- ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ if (dm->secure_display_ctx.support_mul_roi) {
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
+
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.phy_id = phy_inst;
+
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ if (crc_cpy[i].crc_ready)
+ roi_idx |= 1 << i;
+ }
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc_v2.roi_idx = roi_idx;
+
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2);
+ } else {
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
+
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ }
if (!ret) {
if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS)
@@ -160,22 +419,47 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
static void
amdgpu_dm_forward_crc_window(struct work_struct *work)
{
- struct secure_display_context *secure_display_ctx;
+ struct secure_display_crtc_context *crtc_ctx;
struct amdgpu_display_manager *dm;
struct drm_crtc *crtc;
struct dc_stream_state *stream;
+ struct amdgpu_dm_connector *aconnector;
+ struct crc_window roi_cpy[MAX_CRC_WINDOW_NUM];
+ unsigned long flags;
+ uint8_t phy_id;
- secure_display_ctx = container_of(work, struct secure_display_context, forward_roi_work);
- crtc = secure_display_ctx->crtc;
+ crtc_ctx = container_of(work, struct secure_display_crtc_context, forward_roi_work);
+ crtc = crtc_ctx->crtc;
if (!crtc)
return;
dm = &drm_to_adev(crtc->dev)->dm;
stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
+ aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+
+ if (!aconnector)
+ return;
+
+ mutex_lock(&crtc->dev->mode_config.mutex);
+ if (!get_phy_id(dm, aconnector, &phy_id)) {
+ DRM_WARN("%s Can't find mapping phy id!", __func__);
+ mutex_unlock(&crtc->dev->mode_config.mutex);
+ return;
+ }
+ mutex_unlock(&crtc->dev->mode_config.mutex);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ memcpy(roi_cpy, crtc_ctx->roi, sizeof(struct crc_window) * MAX_CRC_WINDOW_NUM);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
mutex_lock(&dm->dc_lock);
- dc_stream_forward_crc_window(stream, &secure_display_ctx->rect, false);
+ if (dm->secure_display_ctx.support_mul_roi)
+ dc_stream_forward_multiple_crc_window(stream, roi_cpy,
+ phy_id, false);
+ else
+ dc_stream_forward_crc_window(stream, &roi_cpy[0].rect,
+ phy_id, false);
mutex_unlock(&dm->dc_lock);
}
@@ -186,7 +470,7 @@ bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
bool ret = false;
spin_lock_irq(&drm_dev->event_lock);
- ret = acrtc->dm_irq_params.window_param.activated;
+ ret = acrtc->dm_irq_params.crc_window_activated;
spin_unlock_irq(&drm_dev->event_lock);
return ret;
@@ -224,10 +508,14 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
mutex_lock(&adev->dm.dc_lock);
+ /* For PSR1, check that the panel has exited PSR */
+ if (stream_state->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
+ amdgpu_dm_psr_wait_disable(stream_state);
+
/* Enable or disable CRTC CRC generation */
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
if (!dc_stream_configure_crc(stream_state->ctx->dc,
- stream_state, NULL, enable, enable)) {
+ stream_state, NULL, enable, enable, 0, true)) {
ret = -EINVAL;
goto unlock;
}
@@ -258,6 +546,10 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
struct drm_crtc_commit *commit;
struct dm_crtc_state *crtc_state;
struct drm_device *drm_dev = crtc->dev;
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+#endif
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct drm_dp_aux *aux = NULL;
bool enable = false;
@@ -357,6 +649,17 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
}
+ /*
+ * Reading the CRC requires the vblank interrupt handler to be
+ * enabled. Keep a reference until CRC capture stops.
+ */
+ enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
+ if (!enabled && enable) {
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret)
+ goto cleanup;
+ }
+
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/* Reset secure_display when we change crc source from debugfs */
amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream);
@@ -367,16 +670,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
goto cleanup;
}
- /*
- * Reading the CRC requires the vblank interrupt handler to be
- * enabled. Keep a reference until CRC capture stops.
- */
- enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
if (!enabled && enable) {
- ret = drm_crtc_vblank_get(crtc);
- if (ret)
- goto cleanup;
-
if (dm_is_crc_source_dprx(source)) {
if (drm_dp_start_crc(aux, crtc)) {
DRM_DEBUG_DRIVER("dp start crc failed\n");
@@ -402,6 +696,13 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
/* Reset crc_skipped on dm state */
crtc_state->crc_skip_count = 0;
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ /* Initialize phy id mapping table for secure display*/
+ if (dm->secure_display_ctx.op_mode == LEGACY_MODE &&
+ !dm->secure_display_ctx.phy_mapping_updated)
+ update_phy_id_mapping(adev);
+#endif
+
cleanup:
if (commit)
drm_crtc_commit_put(commit);
@@ -456,7 +757,7 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
}
if (dm_is_crc_source_crtc(cur_crc_src)) {
- if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state,
+ if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, 0,
&crcs[0], &crcs[1], &crcs[2]))
return;
@@ -472,8 +773,17 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
enum amdgpu_dm_pipe_crc_source cur_crc_src;
struct amdgpu_crtc *acrtc = NULL;
struct amdgpu_device *adev = NULL;
- struct secure_display_context *secure_display_ctx = NULL;
+ struct secure_display_crtc_context *crtc_ctx = NULL;
+ bool reset_crc_frame_count[MAX_CRC_WINDOW_NUM] = {false};
+ uint32_t crc_r[MAX_CRC_WINDOW_NUM] = {0};
+ uint32_t crc_g[MAX_CRC_WINDOW_NUM] = {0};
+ uint32_t crc_b[MAX_CRC_WINDOW_NUM] = {0};
unsigned long flags1;
+ bool forward_roi_change = false;
+ bool notify_ta = false;
+ bool all_crc_ready = true;
+ struct dc_stream_state *stream_state;
+ int i;
if (crtc == NULL)
return;
@@ -481,78 +791,160 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
acrtc = to_amdgpu_crtc(crtc);
adev = drm_to_adev(crtc->dev);
drm_dev = crtc->dev;
+ stream_state = to_dm_crtc_state(crtc->state)->stream;
spin_lock_irqsave(&drm_dev->event_lock, flags1);
cur_crc_src = acrtc->dm_irq_params.crc_src;
/* Early return if CRC capture is not enabled. */
if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) ||
- !dm_is_crc_source_crtc(cur_crc_src))
- goto cleanup;
-
- if (!acrtc->dm_irq_params.window_param.activated)
- goto cleanup;
+ !dm_is_crc_source_crtc(cur_crc_src)) {
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
+ return;
+ }
- if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
- acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
- goto cleanup;
+ if (!acrtc->dm_irq_params.crc_window_activated) {
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
+ return;
}
- secure_display_ctx = &adev->dm.secure_display_ctxs[acrtc->crtc_id];
- if (WARN_ON(secure_display_ctx->crtc != crtc)) {
- /* We have set the crtc when creating secure_display_context,
+ crtc_ctx = &adev->dm.secure_display_ctx.crtc_ctx[acrtc->crtc_id];
+ if (WARN_ON(crtc_ctx->crtc != crtc)) {
+ /* We have set the crtc when creating secure_display_crtc_context,
* don't expect it to be changed here.
*/
- secure_display_ctx->crtc = crtc;
+ crtc_ctx->crtc = crtc;
}
- if (acrtc->dm_irq_params.window_param.update_win) {
- /* prepare work for dmub to update ROI */
- secure_display_ctx->rect.x = acrtc->dm_irq_params.window_param.x_start;
- secure_display_ctx->rect.y = acrtc->dm_irq_params.window_param.y_start;
- secure_display_ctx->rect.width = acrtc->dm_irq_params.window_param.x_end -
- acrtc->dm_irq_params.window_param.x_start;
- secure_display_ctx->rect.height = acrtc->dm_irq_params.window_param.y_end -
- acrtc->dm_irq_params.window_param.y_start;
- schedule_work(&secure_display_ctx->forward_roi_work);
-
- acrtc->dm_irq_params.window_param.update_win = false;
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ struct crc_params crc_window = {
+ .windowa_x_start = acrtc->dm_irq_params.window_param[i].x_start,
+ .windowa_y_start = acrtc->dm_irq_params.window_param[i].y_start,
+ .windowa_x_end = acrtc->dm_irq_params.window_param[i].x_end,
+ .windowa_y_end = acrtc->dm_irq_params.window_param[i].y_end,
+ .windowb_x_start = acrtc->dm_irq_params.window_param[i].x_start,
+ .windowb_y_start = acrtc->dm_irq_params.window_param[i].y_start,
+ .windowb_x_end = acrtc->dm_irq_params.window_param[i].x_end,
+ .windowb_y_end = acrtc->dm_irq_params.window_param[i].y_end,
+ };
+
+ crtc_ctx->roi[i].enable = acrtc->dm_irq_params.window_param[i].enable;
+
+ if (!acrtc->dm_irq_params.window_param[i].enable) {
+ crtc_ctx->crc_info.crc[i].crc_ready = false;
+ continue;
+ }
- /* Statically skip 1 frame, because we may need to wait below things
- * before sending ROI to dmub:
- * 1. We defer the work by using system workqueue.
- * 2. We may need to wait for dc_lock before accessing dmub.
- */
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 1;
+ if (acrtc->dm_irq_params.window_param[i].skip_frame_cnt) {
+ acrtc->dm_irq_params.window_param[i].skip_frame_cnt -= 1;
+ crtc_ctx->crc_info.crc[i].crc_ready = false;
+ continue;
+ }
- } else {
- /* prepare work for psp to read ROI/CRC and send to I2C */
- schedule_work(&secure_display_ctx->notify_ta_work);
+ if (acrtc->dm_irq_params.window_param[i].update_win) {
+ crtc_ctx->roi[i].rect.x = crc_window.windowa_x_start;
+ crtc_ctx->roi[i].rect.y = crc_window.windowa_y_start;
+ crtc_ctx->roi[i].rect.width = crc_window.windowa_x_end -
+ crc_window.windowa_x_start;
+ crtc_ctx->roi[i].rect.height = crc_window.windowa_y_end -
+ crc_window.windowa_y_start;
+
+ if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
+ /* forward task to dmub to update ROI */
+ forward_roi_change = true;
+ else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
+ /* update ROI via dm*/
+ dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
+ &crc_window, true, true, i, false);
+
+ reset_crc_frame_count[i] = true;
+
+ acrtc->dm_irq_params.window_param[i].update_win = false;
+
+ /* Statically skip 1 frame, because we may need to wait below things
+ * before sending ROI to dmub:
+ * 1. We defer the work by using system workqueue.
+ * 2. We may need to wait for dc_lock before accessing dmub.
+ */
+ acrtc->dm_irq_params.window_param[i].skip_frame_cnt = 1;
+ crtc_ctx->crc_info.crc[i].crc_ready = false;
+ } else {
+ if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, i,
+ &crc_r[i], &crc_g[i], &crc_b[i]))
+ DRM_ERROR("Secure Display: fail to get crc from engine %d\n", i);
+
+ if (adev->dm.secure_display_ctx.op_mode == LEGACY_MODE)
+ /* forward task to psp to read ROI/CRC and output via I2C */
+ notify_ta = true;
+ else if (adev->dm.secure_display_ctx.op_mode == DISPLAY_CRC_MODE)
+ /* Avoid ROI window get changed, keep overwriting. */
+ dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
+ &crc_window, true, true, i, false);
+
+ /* crc ready for psp to read out */
+ crtc_ctx->crc_info.crc[i].crc_ready = true;
+ }
}
-cleanup:
spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
+
+ if (forward_roi_change)
+ schedule_work(&crtc_ctx->forward_roi_work);
+
+ if (notify_ta)
+ schedule_work(&crtc_ctx->notify_ta_work);
+
+ spin_lock_irqsave(&crtc_ctx->crc_info.lock, flags1);
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ crtc_ctx->crc_info.crc[i].crc_R = crc_r[i];
+ crtc_ctx->crc_info.crc[i].crc_G = crc_g[i];
+ crtc_ctx->crc_info.crc[i].crc_B = crc_b[i];
+
+ if (!crtc_ctx->roi[i].enable) {
+ crtc_ctx->crc_info.crc[i].frame_count = 0;
+ continue;
+ }
+
+ if (!crtc_ctx->crc_info.crc[i].crc_ready)
+ all_crc_ready = false;
+
+ if (reset_crc_frame_count[i] || crtc_ctx->crc_info.crc[i].frame_count == UINT_MAX)
+ /* Reset the reference frame count after user update the ROI
+ * or it reaches the maximum value.
+ */
+ crtc_ctx->crc_info.crc[i].frame_count = 0;
+ else
+ crtc_ctx->crc_info.crc[i].frame_count += 1;
+ }
+ spin_unlock_irqrestore(&crtc_ctx->crc_info.lock, flags1);
+
+ if (all_crc_ready)
+ complete_all(&crtc_ctx->crc_info.completion);
}
-struct secure_display_context *
-amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
+void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
{
- struct secure_display_context *secure_display_ctxs = NULL;
+ struct secure_display_crtc_context *crtc_ctx = NULL;
int i;
- secure_display_ctxs = kcalloc(adev->mode_info.num_crtc,
- sizeof(struct secure_display_context),
+ crtc_ctx = kcalloc(adev->mode_info.num_crtc,
+ sizeof(struct secure_display_crtc_context),
GFP_KERNEL);
- if (!secure_display_ctxs)
- return NULL;
+ if (!crtc_ctx) {
+ adev->dm.secure_display_ctx.crtc_ctx = NULL;
+ return;
+ }
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- INIT_WORK(&secure_display_ctxs[i].forward_roi_work, amdgpu_dm_forward_crc_window);
- INIT_WORK(&secure_display_ctxs[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
- secure_display_ctxs[i].crtc = &adev->mode_info.crtcs[i]->base;
+ INIT_WORK(&crtc_ctx[i].forward_roi_work, amdgpu_dm_forward_crc_window);
+ INIT_WORK(&crtc_ctx[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
+ crtc_ctx[i].crtc = &adev->mode_info.crtcs[i]->base;
+ spin_lock_init(&crtc_ctx[i].crc_info.lock);
}
- return secure_display_ctxs;
+ adev->dm.secure_display_ctx.crtc_ctx = crtc_ctx;
+
+ adev->dm.secure_display_ctx.op_mode = DISPLAY_CRC_MODE;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 748e80ef40d0..3da056c8d20b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -40,20 +40,53 @@ enum amdgpu_dm_pipe_crc_source {
};
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+#define MAX_CRTC 6
+
+enum secure_display_mode {
+ /* via dmub + psp */
+ LEGACY_MODE = 0,
+ /* driver directly */
+ DISPLAY_CRC_MODE,
+ SECURE_DISPLAY_MODE_MAX,
+};
+
+struct phy_id_mapping {
+ bool assigned;
+ bool is_mst;
+ uint8_t enc_hw_inst;
+ u8 lct;
+ u8 port_num;
+ u8 rad[8];
+};
+
+struct crc_data {
+ uint32_t crc_R;
+ uint32_t crc_G;
+ uint32_t crc_B;
+ uint32_t frame_count;
+ bool crc_ready;
+};
+
+struct crc_info {
+ struct crc_data crc[MAX_CRC_WINDOW_NUM];
+ struct completion completion;
+ spinlock_t lock;
+};
+
struct crc_window_param {
uint16_t x_start;
uint16_t y_start;
uint16_t x_end;
uint16_t y_end;
/* CRC window is activated or not*/
- bool activated;
+ bool enable;
/* Update crc window during vertical blank or not */
bool update_win;
/* skip reading/writing for few frames */
int skip_frame_cnt;
};
-struct secure_display_context {
+struct secure_display_crtc_context {
/* work to notify PSP TA*/
struct work_struct notify_ta_work;
@@ -63,7 +96,20 @@ struct secure_display_context {
struct drm_crtc *crtc;
/* Region of Interest (ROI) */
- struct rect rect;
+ struct crc_window roi[MAX_CRC_WINDOW_NUM];
+
+ struct crc_info crc_info;
+};
+
+struct secure_display_context {
+
+ struct secure_display_crtc_context *crtc_ctx;
+ /* Whether dmub support multiple ROI setting */
+ bool support_mul_roi;
+ enum secure_display_mode op_mode;
+ bool phy_mapping_updated;
+ int phy_id_mapping_cnt;
+ struct phy_id_mapping phy_id_mapping[MAX_CRTC];
};
#endif
@@ -95,8 +141,7 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc);
void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc);
-struct secure_display_context *amdgpu_dm_crtc_secure_display_create_contexts(
- struct amdgpu_device *adev);
+void amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev);
#else
#define amdgpu_dm_crc_window_is_activated(x)
#define amdgpu_dm_crtc_handle_crc_window_irq(x)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index a2cf2c066a76..36a830a7440f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -35,8 +35,8 @@
#include "amdgpu_dm_trace.h"
#include "amdgpu_dm_debugfs.h"
-#define HPD_DETECTION_PERIOD_uS 5000000
-#define HPD_DETECTION_TIME_uS 1000
+#define HPD_DETECTION_PERIOD_uS 2000000
+#define HPD_DETECTION_TIME_uS 100000
void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
{
@@ -93,7 +93,7 @@ int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
return rc;
}
-bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
+bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state)
{
return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
@@ -142,7 +142,7 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
amdgpu_dm_replay_enable(vblank_work->stream, true);
} else if (vblank_enabled) {
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
- amdgpu_dm_psr_disable(vblank_work->stream);
+ amdgpu_dm_psr_disable(vblank_work->stream, false);
} else if (link->psr_settings.psr_feature_enabled &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
@@ -154,6 +154,7 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
amdgpu_dm_psr_enable(vblank_work->stream);
if (dm->idle_workqueue &&
+ (dm->dc->config.disable_ips == DMUB_IPS_ENABLE) &&
dm->dc->idle_optimizations_allowed &&
dm->idle_workqueue->enable &&
!dm->idle_workqueue->running)
@@ -251,10 +252,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
else if (dm->active_vblank_irq_count)
dm->active_vblank_irq_count--;
- if (dm->active_vblank_irq_count > 0) {
- DRM_DEBUG_KMS("Allow idle optimizations (MALL): false\n");
+ if (dm->active_vblank_irq_count > 0)
dc_allow_idle_optimizations(dm->dc, false);
- }
/*
* Control PSR based on vblank requirements from OS
@@ -266,17 +265,14 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
* where the SU region is the full hactive*vactive region. See
* fill_dc_dirty_rects().
*/
- if (vblank_work->stream && vblank_work->stream->link) {
+ if (vblank_work->stream && vblank_work->stream->link && vblank_work->acrtc) {
amdgpu_dm_crtc_set_panel_sr_feature(
vblank_work, vblank_work->enable,
- vblank_work->acrtc->dm_irq_params.allow_psr_entry ||
- vblank_work->stream->link->replay_settings.replay_feature_enabled);
+ vblank_work->acrtc->dm_irq_params.allow_sr_entry);
}
- if (dm->active_vblank_irq_count == 0) {
- DRM_DEBUG_KMS("Allow idle optimizations (MALL): true\n");
+ if (dm->active_vblank_irq_count == 0)
dc_allow_idle_optimizations(dm->dc, true);
- }
mutex_unlock(&dm->dc_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
index 17e948753f59..c1212947a77b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
@@ -37,7 +37,7 @@ int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable);
bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc);
-bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state);
+bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state);
int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index db56b0aa5454..049046c60462 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -25,6 +25,7 @@
#include <linux/string_helpers.h>
#include <linux/uaccess.h>
+#include <media/cec-notifier.h>
#include "dc.h"
#include "amdgpu.h"
@@ -258,7 +259,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
struct dc_link *link = connector->dc_link;
struct amdgpu_device *adev = drm_to_adev(connector->base.dev);
struct dc *dc = (struct dc *)link->dc;
- struct dc_link_settings prefer_link_settings;
+ struct dc_link_settings prefer_link_settings = {0};
char *wr_buf = NULL;
const uint32_t wr_buf_size = 40;
/* 0: lane_count; 1: link_rate */
@@ -389,7 +390,7 @@ static ssize_t dp_mst_link_setting(struct file *f, const char __user *buf,
struct dc_link *link = aconnector->dc_link;
struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
struct dc *dc = (struct dc *)link->dc;
- struct dc_link_settings prefer_link_settings;
+ struct dc_link_settings prefer_link_settings = {0};
char *wr_buf = NULL;
const uint32_t wr_buf_size = 40;
/* 0: lane_count; 1: link_rate */
@@ -613,7 +614,7 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
uint32_t wr_buf_size = 40;
long param[3];
bool use_prefer_link_setting;
- struct link_training_settings link_lane_settings;
+ struct link_training_settings link_lane_settings = {0};
int max_param_num = 3;
uint8_t param_nums = 0;
int r = 0;
@@ -768,7 +769,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN,
LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
- struct link_training_settings link_training_settings;
+ struct link_training_settings link_training_settings = {0};
int i;
if (size == 0)
@@ -902,9 +903,10 @@ static int dmub_tracebuffer_show(struct seq_file *m, void *data)
{
struct amdgpu_device *adev = m->private;
struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+ struct dmub_fw_meta_info *fw_meta_info = NULL;
struct dmub_debugfs_trace_entry *entries;
uint8_t *tbuf_base;
- uint32_t tbuf_size, max_entries, num_entries, i;
+ uint32_t tbuf_size, max_entries, num_entries, first_entry, i;
if (!fb_info)
return 0;
@@ -913,20 +915,42 @@ static int dmub_tracebuffer_show(struct seq_file *m, void *data)
if (!tbuf_base)
return 0;
- tbuf_size = fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size;
+ if (adev->dm.dmub_srv)
+ fw_meta_info = &adev->dm.dmub_srv->meta_info;
+
+ tbuf_size = fw_meta_info ? fw_meta_info->trace_buffer_size :
+ DMUB_TRACE_BUFFER_SIZE;
max_entries = (tbuf_size - sizeof(struct dmub_debugfs_trace_header)) /
sizeof(struct dmub_debugfs_trace_entry);
num_entries =
((struct dmub_debugfs_trace_header *)tbuf_base)->entry_count;
+ /* DMCUB tracebuffer is a ring. If it rolled over, print a hint that
+ * entries are being overwritten.
+ */
+ if (num_entries > max_entries)
+ seq_printf(m, "...\n");
+
+ first_entry = num_entries % max_entries;
num_entries = min(num_entries, max_entries);
entries = (struct dmub_debugfs_trace_entry
*)(tbuf_base +
sizeof(struct dmub_debugfs_trace_header));
- for (i = 0; i < num_entries; ++i) {
+ /* To print entries chronologically, start from the first entry till the
+ * top of buffer, then from base of buffer to first entry.
+ */
+ for (i = first_entry; i < num_entries; ++i) {
+ struct dmub_debugfs_trace_entry *entry = &entries[i];
+
+ seq_printf(m,
+ "trace_code=%u tick_count=%u param0=%u param1=%u\n",
+ entry->trace_code, entry->tick_count, entry->param0,
+ entry->param1);
+ }
+ for (i = 0; i < first_entry; ++i) {
struct dmub_debugfs_trace_entry *entry = &entries[i];
seq_printf(m,
@@ -1529,7 +1553,6 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
char *rd_buf = NULL;
- char *rd_buf_ptr = NULL;
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
struct display_stream_compressor *dsc;
struct dcn_dsc_state dsc_state = {0};
@@ -1543,8 +1566,6 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
if (!rd_buf)
return -ENOMEM;
- rd_buf_ptr = rd_buf;
-
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
@@ -1558,10 +1579,9 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
- snprintf(rd_buf_ptr, str_len,
+ snprintf(rd_buf, str_len,
"%d\n",
dsc_state.dsc_clock_en);
- rd_buf_ptr += str_len;
while (size) {
if (*pos >= rd_buf_size)
@@ -1719,7 +1739,6 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
char *rd_buf = NULL;
- char *rd_buf_ptr = NULL;
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
struct display_stream_compressor *dsc;
struct dcn_dsc_state dsc_state = {0};
@@ -1733,8 +1752,6 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
if (!rd_buf)
return -ENOMEM;
- rd_buf_ptr = rd_buf;
-
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
@@ -1748,10 +1765,9 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
- snprintf(rd_buf_ptr, str_len,
+ snprintf(rd_buf, str_len,
"%d\n",
dsc_state.dsc_slice_width);
- rd_buf_ptr += str_len;
while (size) {
if (*pos >= rd_buf_size)
@@ -1907,7 +1923,6 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
char *rd_buf = NULL;
- char *rd_buf_ptr = NULL;
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
struct display_stream_compressor *dsc;
struct dcn_dsc_state dsc_state = {0};
@@ -1921,8 +1936,6 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
if (!rd_buf)
return -ENOMEM;
- rd_buf_ptr = rd_buf;
-
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
@@ -1936,10 +1949,9 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
- snprintf(rd_buf_ptr, str_len,
+ snprintf(rd_buf, str_len,
"%d\n",
dsc_state.dsc_slice_height);
- rd_buf_ptr += str_len;
while (size) {
if (*pos >= rd_buf_size)
@@ -2091,7 +2103,6 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
char *rd_buf = NULL;
- char *rd_buf_ptr = NULL;
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
struct display_stream_compressor *dsc;
struct dcn_dsc_state dsc_state = {0};
@@ -2105,8 +2116,6 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
if (!rd_buf)
return -ENOMEM;
- rd_buf_ptr = rd_buf;
-
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
@@ -2120,10 +2129,9 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
- snprintf(rd_buf_ptr, str_len,
+ snprintf(rd_buf, str_len,
"%d\n",
dsc_state.dsc_bits_per_pixel);
- rd_buf_ptr += str_len;
while (size) {
if (*pos >= rd_buf_size)
@@ -2270,7 +2278,6 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
char *rd_buf = NULL;
- char *rd_buf_ptr = NULL;
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
struct display_stream_compressor *dsc;
struct dcn_dsc_state dsc_state = {0};
@@ -2284,8 +2291,6 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
if (!rd_buf)
return -ENOMEM;
- rd_buf_ptr = rd_buf;
-
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
@@ -2299,10 +2304,9 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
- snprintf(rd_buf_ptr, str_len,
+ snprintf(rd_buf, str_len,
"%d\n",
dsc_state.dsc_pic_width);
- rd_buf_ptr += str_len;
while (size) {
if (*pos >= rd_buf_size)
@@ -2328,7 +2332,6 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
char *rd_buf = NULL;
- char *rd_buf_ptr = NULL;
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
struct display_stream_compressor *dsc;
struct dcn_dsc_state dsc_state = {0};
@@ -2342,8 +2345,6 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
if (!rd_buf)
return -ENOMEM;
- rd_buf_ptr = rd_buf;
-
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
@@ -2357,10 +2358,9 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
- snprintf(rd_buf_ptr, str_len,
+ snprintf(rd_buf, str_len,
"%d\n",
dsc_state.dsc_pic_height);
- rd_buf_ptr += str_len;
while (size) {
if (*pos >= rd_buf_size)
@@ -2401,7 +2401,6 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
char *rd_buf = NULL;
- char *rd_buf_ptr = NULL;
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
struct display_stream_compressor *dsc;
struct dcn_dsc_state dsc_state = {0};
@@ -2415,8 +2414,6 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
if (!rd_buf)
return -ENOMEM;
- rd_buf_ptr = rd_buf;
-
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
@@ -2430,10 +2427,9 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
- snprintf(rd_buf_ptr, str_len,
+ snprintf(rd_buf, str_len,
"%d\n",
dsc_state.dsc_chunk_size);
- rd_buf_ptr += str_len;
while (size) {
if (*pos >= rd_buf_size)
@@ -2474,7 +2470,6 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
char *rd_buf = NULL;
- char *rd_buf_ptr = NULL;
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
struct display_stream_compressor *dsc;
struct dcn_dsc_state dsc_state = {0};
@@ -2488,8 +2483,6 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
if (!rd_buf)
return -ENOMEM;
- rd_buf_ptr = rd_buf;
-
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream &&
@@ -2503,10 +2496,9 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
- snprintf(rd_buf_ptr, str_len,
+ snprintf(rd_buf, str_len,
"%d\n",
dsc_state.dsc_slice_bpg_offset);
- rd_buf_ptr += str_len;
while (size) {
if (*pos >= rd_buf_size)
@@ -2857,6 +2849,67 @@ static int is_dpia_link_show(struct seq_file *m, void *data)
return 0;
}
+/**
+ * hdmi_cec_state_show - Read out the HDMI-CEC feature status
+ * @m: sequence file.
+ * @data: unused.
+ *
+ * Return 0 on success
+ */
+static int hdmi_cec_state_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ seq_printf(m, "%s:%d\n", connector->name, connector->base.id);
+ seq_printf(m, "HDMI-CEC status: %d\n", aconnector->notifier ? 1 : 0);
+
+ return 0;
+}
+
+/**
+ * hdmi_cec_state_write - Enable/Disable HDMI-CEC feature from driver side
+ * @f: file structure.
+ * @buf: userspace buffer. set to '1' to enable; '0' to disable cec feature.
+ * @size: size of buffer from userpsace.
+ * @pos: unused.
+ *
+ * Return size on success, error code on failure
+ */
+static ssize_t hdmi_cec_state_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int ret;
+ bool enable;
+ struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
+ struct drm_device *ddev = aconnector->base.dev;
+
+ if (size == 0)
+ return -EINVAL;
+
+ ret = kstrtobool_from_user(buf, size, &enable);
+ if (ret) {
+ drm_dbg_driver(ddev, "invalid user data !\n");
+ return ret;
+ }
+
+ if (enable) {
+ if (aconnector->notifier)
+ return -EINVAL;
+ ret = amdgpu_dm_initialize_hdmi_connector(aconnector);
+ if (ret)
+ return ret;
+ hdmi_cec_set_edid(aconnector);
+ } else {
+ if (!aconnector->notifier)
+ return -EINVAL;
+ cec_notifier_conn_unregister(aconnector->notifier);
+ aconnector->notifier = NULL;
+ }
+
+ return size;
+}
+
DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
@@ -2869,6 +2922,7 @@ DEFINE_SHOW_ATTRIBUTE(psr_capability);
DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector);
DEFINE_SHOW_ATTRIBUTE(dp_mst_progress_status);
DEFINE_SHOW_ATTRIBUTE(is_dpia_link);
+DEFINE_SHOW_STORE_ATTRIBUTE(hdmi_cec_state);
static const struct file_operations dp_dsc_clock_en_debugfs_fops = {
.owner = THIS_MODULE,
@@ -3004,7 +3058,8 @@ static const struct {
char *name;
const struct file_operations *fops;
} hdmi_debugfs_entries[] = {
- {"hdcp_sink_capability", &hdcp_sink_capability_fops}
+ {"hdcp_sink_capability", &hdcp_sink_capability_fops},
+ {"hdmi_cec_state", &hdmi_cec_state_fops}
};
/*
@@ -3489,8 +3544,8 @@ static int crc_win_x_start_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.x_start = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].x_start = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3506,7 +3561,7 @@ static int crc_win_x_start_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.x_start;
+ *val = acrtc->dm_irq_params.window_param[0].x_start;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3526,8 +3581,8 @@ static int crc_win_y_start_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.y_start = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].y_start = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3543,7 +3598,7 @@ static int crc_win_y_start_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.y_start;
+ *val = acrtc->dm_irq_params.window_param[0].y_start;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3562,8 +3617,8 @@ static int crc_win_x_end_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.x_end = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].x_end = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3579,7 +3634,7 @@ static int crc_win_x_end_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.x_end;
+ *val = acrtc->dm_irq_params.window_param[0].x_end;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3598,8 +3653,8 @@ static int crc_win_y_end_set(void *data, u64 val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- acrtc->dm_irq_params.window_param.y_end = (uint16_t) val;
- acrtc->dm_irq_params.window_param.update_win = false;
+ acrtc->dm_irq_params.window_param[0].y_end = (uint16_t) val;
+ acrtc->dm_irq_params.window_param[0].update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3615,7 +3670,7 @@ static int crc_win_y_end_get(void *data, u64 *val)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
- *val = acrtc->dm_irq_params.window_param.y_end;
+ *val = acrtc->dm_irq_params.window_param[0].y_end;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
@@ -3638,13 +3693,14 @@ static int crc_win_update_set(void *data, u64 val)
/* PSR may write to OTG CRC window control register,
* so close it before starting secure_display.
*/
- amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream);
+ amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream, true);
spin_lock_irq(&adev_to_drm(adev)->event_lock);
- acrtc->dm_irq_params.window_param.activated = true;
- acrtc->dm_irq_params.window_param.update_win = true;
- acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
+ acrtc->dm_irq_params.window_param[0].enable = true;
+ acrtc->dm_irq_params.window_param[0].update_win = true;
+ acrtc->dm_irq_params.window_param[0].skip_frame_cnt = 0;
+ acrtc->dm_irq_params.crc_window_activated = true;
spin_unlock_irq(&adev_to_drm(adev)->event_lock);
mutex_unlock(&adev->dm.dc_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 069e0195e50a..fbd80d8545a8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -23,6 +23,8 @@
*
*/
+#include <acpi/video.h>
+
#include <linux/string.h>
#include <linux/acpi.h>
#include <linux/i2c.h>
@@ -44,6 +46,7 @@
#include "dm_helpers.h"
#include "ddc_service_types.h"
+#include "clk_mgr.h"
static u32 edid_extract_panel_id(struct edid *edid)
{
@@ -641,6 +644,8 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
// write rc data
memmove(rc_data, data, length);
ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data));
+ if (ret < 0)
+ goto err;
}
// write rc offset
@@ -649,20 +654,21 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF;
rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF;
ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset));
+ if (ret < 0)
+ goto err;
// write rc length
rc_length[0] = (unsigned char) length & 0xFF;
rc_length[1] = (unsigned char) (length >> 8) & 0xFF;
ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length));
+ if (ret < 0)
+ goto err;
// write rc cmd
rc_cmd = cmd | 0x80;
ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd));
-
- if (ret < 0) {
- DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret);
- return false;
- }
+ if (ret < 0)
+ goto err;
// poll until active is 0
for (i = 0; i < 10; i++) {
@@ -685,6 +691,10 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
drm_dbg_dp(aux->drm_dev, "success = %d\n", success);
return success;
+
+err:
+ DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret);
+ return false;
}
static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
@@ -875,6 +885,12 @@ bool dm_helpers_dp_write_dsc_enable(
return ret;
}
+bool dm_helpers_dp_write_hblank_reduction(struct dc_context *ctx, const struct dc_stream_state *stream)
+{
+ // TODO
+ return false;
+}
+
bool dm_helpers_is_dp_sink_present(struct dc_link *link)
{
bool dp_sink_present;
@@ -891,6 +907,67 @@ bool dm_helpers_is_dp_sink_present(struct dc_link *link)
return dp_sink_present;
}
+static int
+dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len)
+{
+ struct drm_connector *connector = data;
+ struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev);
+ unsigned char start = block * EDID_LENGTH;
+ struct edid *edid;
+ int r;
+
+ if (!acpidev)
+ return -ENODEV;
+
+ /* fetch the entire edid from BIOS */
+ r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, (void *)&edid);
+ if (r < 0) {
+ drm_dbg(connector->dev, "Failed to get EDID from ACPI: %d\n", r);
+ return r;
+ }
+ if (len > r || start > r || start + len > r) {
+ r = -EINVAL;
+ goto cleanup;
+ }
+
+ /* sanity check */
+ if (edid->revision < 4 || !(edid->input & DRM_EDID_INPUT_DIGITAL) ||
+ (edid->input & DRM_EDID_DIGITAL_TYPE_MASK) == DRM_EDID_DIGITAL_TYPE_UNDEF) {
+ r = -EINVAL;
+ goto cleanup;
+ }
+
+ memcpy(buf, (void *)edid + start, len);
+ r = 0;
+
+cleanup:
+ kfree(edid);
+
+ return r;
+}
+
+static const struct drm_edid *
+dm_helpers_read_acpi_edid(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_ACPI_EDID)
+ return NULL;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ break;
+ default:
+ return NULL;
+ }
+
+ if (connector->force == DRM_FORCE_OFF)
+ return NULL;
+
+ return drm_edid_read_custom(connector, dm_helpers_probe_acpi_edid, connector);
+}
+
enum dc_edid_status dm_helpers_read_local_edid(
struct dc_context *ctx,
struct dc_link *link,
@@ -901,7 +978,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
struct i2c_adapter *ddc;
int retry = 3;
enum dc_edid_status edid_status;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
+ const struct edid *edid;
if (link->aux_mode)
ddc = &aconnector->dm_dp_aux.aux.ddc;
@@ -912,26 +990,31 @@ enum dc_edid_status dm_helpers_read_local_edid(
* do check sum and retry to make sure read correct edid.
*/
do {
-
- edid = drm_get_edid(&aconnector->base, ddc);
+ drm_edid = dm_helpers_read_acpi_edid(aconnector);
+ if (drm_edid)
+ drm_info(connector->dev, "Using ACPI provided EDID for %s\n", connector->name);
+ else
+ drm_edid = drm_edid_read_ddc(connector, ddc);
+ drm_edid_connector_update(connector, drm_edid);
/* DP Compliance Test 4.2.2.6 */
if (link->aux_mode && connector->edid_corrupt)
drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum);
- if (!edid && connector->edid_corrupt) {
+ if (!drm_edid && connector->edid_corrupt) {
connector->edid_corrupt = false;
return EDID_BAD_CHECKSUM;
}
- if (!edid)
+ if (!drm_edid)
return EDID_NO_RESPONSE;
+ edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
/* We don't need the original edid anymore */
- kfree(edid);
+ drm_edid_free(drm_edid);
edid_status = dm_helpers_parse_edid_caps(
link,
@@ -1054,17 +1137,8 @@ void dm_helpers_free_gpu_mem(
void *pvMem)
{
struct amdgpu_device *adev = ctx->driver_context;
- struct dal_allocation *da;
-
- /* walk the da list in DM */
- list_for_each_entry(da, &adev->dm.da_list, list) {
- if (pvMem == da->cpu_ptr) {
- amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr);
- list_del(&da->list);
- kfree(da);
- break;
- }
- }
+
+ dm_free_gpu_mem(adev, type, pvMem);
}
bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable)
@@ -1121,6 +1195,8 @@ bool dm_helpers_dp_handle_test_pattern_request(
struct pipe_ctx *pipe_ctx = NULL;
struct amdgpu_dm_connector *aconnector = link->priv;
struct drm_device *dev = aconnector->base.dev;
+ struct dc_state *dc_state = ctx->dc->current_state;
+ struct clk_mgr *clk_mgr = ctx->dc->clk_mgr;
int i;
for (i = 0; i < MAX_PIPES; i++) {
@@ -1221,6 +1297,16 @@ bool dm_helpers_dp_handle_test_pattern_request(
pipe_ctx->stream->test_pattern.type = test_pattern;
pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space;
+ /* Temp W/A for compliance test failure */
+ dc_state->bw_ctx.bw.dcn.clk.p_state_change_support = false;
+ dc_state->bw_ctx.bw.dcn.clk.dramclk_khz = clk_mgr->dc_mode_softmax_enabled ?
+ clk_mgr->bw_params->dc_mode_softmax_memclk : clk_mgr->bw_params->max_memclk_mhz;
+ dc_state->bw_ctx.bw.dcn.clk.idle_dramclk_khz = dc_state->bw_ctx.bw.dcn.clk.dramclk_khz;
+ ctx->dc->clk_mgr->funcs->update_clocks(
+ ctx->dc->clk_mgr,
+ dc_state,
+ false);
+
dc_link_dp_set_test_pattern(
(struct dc_link *) link,
test_pattern,
@@ -1301,4 +1387,4 @@ bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream
{
// TODO
return false;
-} \ No newline at end of file
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
index 5c9303241aeb..6c9de834455b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
@@ -33,13 +33,15 @@ struct dm_irq_params {
struct mod_vrr_params vrr_params;
struct dc_stream_state *stream;
int active_planes;
- bool allow_psr_entry;
+ bool allow_sr_entry;
struct mod_freesync_config freesync_config;
#ifdef CONFIG_DEBUG_FS
enum amdgpu_dm_pipe_crc_source crc_src;
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
- struct crc_window_param window_param;
+ struct crc_window_param window_param[MAX_CRC_WINDOW_NUM];
+ /* At least one CRC window is activated or not*/
+ bool crc_window_activated;
#endif
#endif
};
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index a08e8a0b696c..07e744da7bf4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -129,7 +129,7 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
dc_sink_release(aconnector->dc_sink);
}
- kfree(aconnector->edid);
+ drm_edid_free(aconnector->drm_edid);
drm_connector_cleanup(connector);
drm_dp_mst_put_port_malloc(aconnector->mst_output_port);
@@ -155,6 +155,17 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
return 0;
}
+
+static inline void
+amdgpu_dm_mst_reset_mst_connector_setting(struct amdgpu_dm_connector *aconnector)
+{
+ aconnector->drm_edid = NULL;
+ aconnector->dsc_aux = NULL;
+ aconnector->mst_output_port->passthrough_aux = NULL;
+ aconnector->mst_local_bw = 0;
+ aconnector->vc_full_pbn = 0;
+}
+
static void
amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
{
@@ -182,9 +193,7 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
dc_sink_release(dc_sink);
aconnector->dc_sink = NULL;
- aconnector->edid = NULL;
- aconnector->dsc_aux = NULL;
- port->passthrough_aux = NULL;
+ amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
}
aconnector->mst_status = MST_STATUS_DEFAULT;
@@ -302,16 +311,18 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
if (!aconnector)
return drm_add_edid_modes(connector, NULL);
- if (!aconnector->edid) {
- struct edid *edid;
+ if (!aconnector->drm_edid) {
+ const struct drm_edid *drm_edid;
- edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port);
+ drm_edid = drm_dp_mst_edid_read(connector,
+ &aconnector->mst_root->mst_mgr,
+ aconnector->mst_output_port);
- if (!edid) {
+ if (!drm_edid) {
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_REMOTE_EDID, false);
- drm_connector_update_edid_property(
+ drm_edid_connector_update(
&aconnector->base,
NULL);
@@ -345,7 +356,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
return ret;
}
- aconnector->edid = edid;
+ aconnector->drm_edid = drm_edid;
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_REMOTE_EDID, true);
}
@@ -360,10 +371,13 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+ const struct edid *edid;
+
+ edid = drm_edid_raw(aconnector->drm_edid); // FIXME: Get rid of drm_edid_raw()
dc_sink = dc_link_add_remote_sink(
aconnector->dc_link,
- (uint8_t *)aconnector->edid,
- (aconnector->edid->extensions + 1) * EDID_LENGTH,
+ (uint8_t *)edid,
+ (edid->extensions + 1) * EDID_LENGTH,
&init_params);
if (!dc_sink) {
@@ -405,7 +419,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(
- connector, aconnector->edid);
+ connector, aconnector->drm_edid);
#if defined(CONFIG_DRM_AMD_DC_FP)
if (!validate_dsc_caps_on_connector(aconnector))
@@ -419,10 +433,9 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
}
}
- drm_connector_update_edid_property(
- &aconnector->base, aconnector->edid);
+ drm_edid_connector_update(&aconnector->base, aconnector->drm_edid);
- ret = drm_add_edid_modes(connector, aconnector->edid);
+ ret = drm_edid_connector_add_modes(connector);
return ret;
}
@@ -500,9 +513,7 @@ dm_dp_mst_detect(struct drm_connector *connector,
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
- aconnector->edid = NULL;
- aconnector->dsc_aux = NULL;
- port->passthrough_aux = NULL;
+ amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD,
@@ -586,11 +597,12 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_PROBE, true);
- if (drm_connector_init(
+ if (drm_connector_dynamic_init(
dev,
connector,
&dm_dp_mst_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort)) {
+ DRM_MODE_CONNECTOR_DisplayPort,
+ NULL)) {
kfree(aconnector);
return NULL;
}
@@ -1120,6 +1132,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int i, k, ret;
bool debugfs_overwrite = false;
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
+ struct drm_connector_state *new_conn_state;
memset(params, 0, sizeof(params));
@@ -1127,7 +1140,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
return PTR_ERR(mst_state);
/* Set up params */
- DRM_DEBUG_DRIVER("%s: MST_DSC Set up params for %d streams\n", __func__, dc_state->stream_count);
+ DRM_DEBUG_DRIVER("%s: MST_DSC Try to set up params from %d streams\n", __func__, dc_state->stream_count);
for (i = 0; i < dc_state->stream_count; i++) {
struct dc_dsc_policy dsc_policy = {0};
@@ -1143,6 +1156,14 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (!aconnector->mst_output_port)
continue;
+ new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base);
+
+ if (!new_conn_state) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC Skip the stream 0x%p with invalid new_conn_state\n",
+ __func__, __LINE__, stream);
+ continue;
+ }
+
stream->timing.flags.DSC = 0;
params[count].timing = &stream->timing;
@@ -1175,6 +1196,8 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
count++;
}
+ DRM_DEBUG_DRIVER("%s: MST_DSC Params set up for %d streams\n", __func__, count);
+
if (count == 0) {
ASSERT(0);
return 0;
@@ -1302,7 +1325,7 @@ static bool is_dsc_need_re_compute(
continue;
aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context;
- if (!aconnector || !aconnector->dsc_aux)
+ if (!aconnector)
continue;
stream_on_link[new_stream_on_link_num] = aconnector;
@@ -1673,16 +1696,16 @@ clean_exit:
return ret;
}
-static unsigned int kbps_from_pbn(unsigned int pbn)
+static uint32_t kbps_from_pbn(unsigned int pbn)
{
- unsigned int kbps = pbn;
+ uint64_t kbps = (uint64_t)pbn;
kbps *= (1000000 / PEAK_FACTOR_X1000);
kbps *= 8;
kbps *= 54;
kbps /= 64;
- return kbps;
+ return (uint32_t)kbps;
}
static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
@@ -1804,9 +1827,18 @@ enum dc_status dm_dp_mst_is_port_support_mode(
struct drm_dp_mst_port *immediate_upstream_port = NULL;
uint32_t end_link_bw = 0;
- /*Get last DP link BW capability*/
- if (dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw)) {
- if (stream_kbps > end_link_bw) {
+ /*Get last DP link BW capability. Mode shall be supported by Legacy peer*/
+ if (aconnector->mst_output_port->pdt != DP_PEER_DEVICE_DP_LEGACY_CONV &&
+ aconnector->mst_output_port->pdt != DP_PEER_DEVICE_NONE) {
+ if (aconnector->vc_full_pbn != aconnector->mst_output_port->full_pbn) {
+ dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw);
+ aconnector->vc_full_pbn = aconnector->mst_output_port->full_pbn;
+ aconnector->mst_local_bw = end_link_bw;
+ } else {
+ end_link_bw = aconnector->mst_local_bw;
+ }
+
+ if (end_link_bw > 0 && stream_kbps > end_link_bw) {
DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
"Mode required bw can't fit into last link\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
@@ -1820,11 +1852,15 @@ enum dc_status dm_dp_mst_is_port_support_mode(
if (immediate_upstream_port) {
virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
- if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
- DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
- "Max dsc compression can't fit into MST available bw\n");
- return DC_FAIL_BANDWIDTH_VALIDATE;
- }
+ } else {
+ /* For topology LCT 1 case - only one mstb*/
+ virtual_channel_bw_in_kbps = root_link_bw_in_kbps;
+ }
+
+ if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ "Max dsc compression can't fit into MST available bw\n");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
}
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 495e3cd70426..774cc3f4f3fd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
+#include "drm/drm_framebuffer.h"
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -176,7 +177,7 @@ static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier
return AMD_FMT_MOD_GET(TILE, modifier);
}
-static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
+static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(struct dc_tiling_info *tiling_info,
uint64_t tiling_flags)
{
/* Fill GFX8 params */
@@ -189,6 +190,7 @@ static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_inf
tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+ tiling_info->gfxversion = DcGfxVersion8;
/* XXX fix me for VI */
tiling_info->gfx8.num_banks = num_banks;
tiling_info->gfx8.array_mode =
@@ -209,7 +211,7 @@ static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_inf
}
static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
- union dc_tiling_info *tiling_info)
+ struct dc_tiling_info *tiling_info)
{
/* Fill GFX9 params */
tiling_info->gfx9.num_pipes =
@@ -230,7 +232,7 @@ static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgp
}
static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
uint64_t modifier)
{
unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
@@ -260,7 +262,7 @@ static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amd
static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
- const union dc_tiling_info *tiling_info,
+ const struct dc_tiling_info *tiling_info,
const struct dc_plane_dcc_param *dcc,
const struct dc_plane_address *address,
const struct plane_size *plane_size)
@@ -307,18 +309,18 @@ static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdg
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const struct plane_size *plane_size,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct dc_plane_dcc_param *dcc,
- struct dc_plane_address *address,
- const bool force_disable_dcc)
+ struct dc_plane_address *address)
{
const uint64_t modifier = afb->base.modifier;
int ret = 0;
amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
+ tiling_info->gfxversion = DcGfxVersion9;
- if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) {
+ if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
uint64_t dcc_address = afb->address + afb->base.offsets[1];
bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
@@ -358,10 +360,9 @@ static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amd
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const struct plane_size *plane_size,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct dc_plane_dcc_param *dcc,
- struct dc_plane_address *address,
- const bool force_disable_dcc)
+ struct dc_plane_address *address)
{
const uint64_t modifier = afb->base.modifier;
int ret = 0;
@@ -370,8 +371,9 @@ static int amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(struct amd
amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info);
tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
+ tiling_info->gfxversion = DcGfxAddr3;
- if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) {
+ if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
int max_compressed_block = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier);
dcc->enable = 1;
@@ -835,12 +837,11 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const uint64_t tiling_flags,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address,
- bool tmz_surface,
- bool force_disable_dcc)
+ bool tmz_surface)
{
const struct drm_framebuffer *fb = &afb->base;
int ret;
@@ -900,16 +901,14 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
ret = amdgpu_dm_plane_fill_gfx12_plane_attributes_from_modifiers(adev, afb, format,
rotation, plane_size,
tiling_info, dcc,
- address,
- force_disable_dcc);
+ address);
if (ret)
return ret;
} else if (adev->family >= AMDGPU_FAMILY_AI) {
ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
rotation, plane_size,
tiling_info, dcc,
- address,
- force_disable_dcc);
+ address);
if (ret)
return ret;
} else {
@@ -1000,14 +999,13 @@ static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
struct dc_plane_state *plane_state =
dm_plane_state_new->dc_state;
- bool force_disable_dcc = !plane_state->dcc.enable;
amdgpu_dm_plane_fill_plane_buffer_attributes(
adev, afb, plane_state->format, plane_state->rotation,
afb->tiling_flags,
&plane_state->tiling_info, &plane_state->plane_size,
&plane_state->dcc, &plane_state->address,
- afb->tmz_surface, force_disable_dcc);
+ afb->tmz_surface);
}
return 0;
@@ -1421,6 +1419,20 @@ static void amdgpu_dm_plane_atomic_async_update(struct drm_plane *plane,
amdgpu_dm_plane_handle_cursor_update(plane, old_state);
}
+static void amdgpu_dm_plane_panic_flush(struct drm_plane *plane)
+{
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane->state);
+ struct drm_framebuffer *fb = plane->state->fb;
+ struct dc_plane_state *dc_plane_state;
+
+ if (!dm_plane_state || !dm_plane_state->dc_state)
+ return;
+
+ dc_plane_state = dm_plane_state->dc_state;
+
+ dc_plane_force_update_for_panic(dc_plane_state, fb->modifier ? true : false);
+}
+
static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
.prepare_fb = amdgpu_dm_plane_helper_prepare_fb,
.cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb,
@@ -1429,6 +1441,16 @@ static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
.atomic_async_update = amdgpu_dm_plane_atomic_async_update
};
+static const struct drm_plane_helper_funcs dm_primary_plane_helper_funcs = {
+ .prepare_fb = amdgpu_dm_plane_helper_prepare_fb,
+ .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb,
+ .atomic_check = amdgpu_dm_plane_atomic_check,
+ .atomic_async_check = amdgpu_dm_plane_atomic_async_check,
+ .atomic_async_update = amdgpu_dm_plane_atomic_async_update,
+ .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
+ .panic_flush = amdgpu_dm_plane_panic_flush,
+};
+
static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane)
{
struct dm_plane_state *amdgpu_state = NULL;
@@ -1855,7 +1877,10 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
plane->type != DRM_PLANE_TYPE_CURSOR)
drm_plane_enable_fb_damage_clips(plane);
- drm_plane_helper_add(plane, &dm_plane_helper_funcs);
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_helper_add(plane, &dm_primary_plane_helper_funcs);
+ else
+ drm_plane_helper_add(plane, &dm_plane_helper_funcs);
#ifdef AMD_PRIVATE_COLOR
dm_atomic_plane_attach_color_mgmt_properties(dm, plane);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index 6498359bff6f..615d2ab2b803 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -47,12 +47,11 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const uint64_t tiling_flags,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address,
- bool tmz_surface,
- bool force_disable_dcc);
+ bool tmz_surface);
int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index f40240aafe98..45858bf1523d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -201,14 +201,13 @@ void amdgpu_dm_psr_enable(struct dc_stream_state *stream)
*
* Return: true if success
*/
-bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
+bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait)
{
- unsigned int power_opt = 0;
bool psr_enable = false;
DRM_DEBUG_DRIVER("Disabling psr...\n");
- return dc_link_set_psr_allow_active(stream->link, &psr_enable, true, false, &power_opt);
+ return dc_link_set_psr_allow_active(stream->link, &psr_enable, wait, false, NULL);
}
/*
@@ -251,3 +250,33 @@ bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm)
return allow_active;
}
+
+/**
+ * amdgpu_dm_psr_wait_disable() - Wait for eDP panel to exit PSR
+ * @stream: stream state attached to the eDP link
+ *
+ * Waits for a max of 500ms for the eDP panel to exit PSR.
+ *
+ * Return: true if panel exited PSR, false otherwise.
+ */
+bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream)
+{
+ enum dc_psr_state psr_state = PSR_STATE0;
+ struct dc_link *link = stream->link;
+ int retry_count;
+
+ if (link == NULL)
+ return false;
+
+ for (retry_count = 0; retry_count <= 1000; retry_count++) {
+ dc_link_get_psr_state(link, &psr_state);
+ if (psr_state == PSR_STATE0)
+ break;
+ udelay(500);
+ }
+
+ if (retry_count == 1000)
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index cd2d45c2b5ef..e2366321a3c1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -34,8 +34,9 @@
void amdgpu_dm_set_psr_caps(struct dc_link *link);
void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
-bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
+bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait);
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm);
+bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream);
#endif /* AMDGPU_DM_AMDGPU_DM_PSR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 0d8498ab9b23..a62f6c51301c 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -3088,11 +3088,12 @@ static enum bp_result construct_integrated_info(
info->ext_disp_conn_info.path[i].ext_encoder_obj_id.id,
info->ext_disp_conn_info.path[i].caps
);
- if (info->ext_disp_conn_info.path[i].caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)
- DC_LOG_BIOS("BIOS EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
+ if ((info->ext_disp_conn_info.path[i].caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)
+ DC_LOG_BIOS("BIOS AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
else if (bp->base.ctx->dc->config.force_bios_fixed_vs) {
- info->ext_disp_conn_info.path[i].caps |= EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN;
- DC_LOG_BIOS("driver forced EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
+ info->ext_disp_conn_info.path[i].caps &= ~AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ info->ext_disp_conn_info.path[i].caps |= AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN;
+ DC_LOG_BIOS("driver forced AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
}
}
// Log the Checksum and Voltage Swing
@@ -3122,7 +3123,7 @@ static enum bp_result bios_parser_get_vram_info(
struct dc_vram_info *info)
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
- static enum bp_result result = BP_RESULT_BADBIOSTABLE;
+ enum bp_result result = BP_RESULT_BADBIOSTABLE;
struct atom_common_table_header *header;
struct atom_data_revision revision;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
index adc710fe4a45..8d2cf95ae739 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
@@ -78,10 +78,3 @@ void bios_set_scratch_critical_state(
uint32_t critial_state = state ? 1 : 0;
REG_UPDATE(BIOS_SCRATCH_6, S6_CRITICAL_STATE, critial_state);
}
-
-uint32_t bios_get_vga_enabled_displays(
- struct dc_bios *bios)
-{
- return REG_READ(BIOS_SCRATCH_3) & 0XFFFF;
-}
-
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
index e1b4a40a353d..ab162f2fe577 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
@@ -34,7 +34,6 @@ uint8_t *bios_get_image(struct dc_bios *bp, uint32_t offset,
bool bios_is_accelerated_mode(struct dc_bios *bios);
void bios_set_scratch_acc_mode_change(struct dc_bios *bios, uint32_t state);
void bios_set_scratch_critical_state(struct dc_bios *bios, bool state);
-uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios);
#define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type)))
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index ab1132bc896a..d9955c5d2e5e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -174,7 +174,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN32)
###############################################################################
# DCN35
###############################################################################
-CLK_MGR_DCN35 = dcn35_smu.o dcn35_clk_mgr.o
+CLK_MGR_DCN35 = dcn35_smu.o dcn351_clk_mgr.o dcn35_clk_mgr.o
AMD_DAL_CLK_MGR_DCN35 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn35/,$(CLK_MGR_DCN35))
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 0e243f4344d0..4c3e58c730b1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -355,8 +355,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
BREAK_TO_DEBUGGER();
return NULL;
}
+ if (ctx->dce_version == DCN_VERSION_3_51)
+ dcn351_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ else
+ dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
- dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base.base;
}
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
index 7920f6f1aa62..76c612ecfe3c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -34,8 +34,8 @@
#include "dm_services.h"
#include "cyan_skillfish_ip_offset.h"
-#include "dcn/dcn_2_0_3_offset.h"
-#include "dcn/dcn_2_0_3_sh_mask.h"
+#include "dcn/dcn_2_0_1_offset.h"
+#include "dcn/dcn_2_0_1_sh_mask.h"
#include "clk/clk_11_0_1_offset.h"
#include "clk/clk_11_0_1_sh_mask.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index e93df3d6222e..bc123f1884da 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -50,12 +50,13 @@
#include "link.h"
#include "logger_types.h"
+
+
+#include "yellow_carp_offset.h"
#undef DC_LOGGER
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
-#include "yellow_carp_offset.h"
-
#define regCLK1_CLK_PLL_REQ 0x0237
#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 29eff386505a..91d872d6d392 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -53,9 +53,6 @@
#include "logger_types.h"
-#undef DC_LOGGER
-#define DC_LOGGER \
- clk_mgr->base.base.ctx->logger
#define MAX_INSTANCE 7
@@ -77,6 +74,9 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
{ { 0x0001B200, 0x0242DC00, 0, 0, 0, 0, 0, 0 } },
{ { 0x0001B400, 0x0242E000, 0, 0, 0, 0, 0, 0 } } } };
+#undef DC_LOGGER
+#define DC_LOGGER \
+ clk_mgr->base.base.ctx->logger
#define regCLK1_CLK_PLL_REQ 0x0237
#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
new file mode 100644
index 000000000000..6a6ae618650b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "dcn35_clk_mgr.h"
+
+#define DCN_BASE__INST0_SEG1 0x000000C0
+#define mmCLK1_CLK_PLL_REQ 0x16E37
+
+#define mmCLK1_CLK0_DFS_CNTL 0x16E69
+#define mmCLK1_CLK1_DFS_CNTL 0x16E6C
+#define mmCLK1_CLK2_DFS_CNTL 0x16E6F
+#define mmCLK1_CLK3_DFS_CNTL 0x16E72
+#define mmCLK1_CLK4_DFS_CNTL 0x16E75
+#define mmCLK1_CLK5_DFS_CNTL 0x16E78
+
+#define mmCLK1_CLK0_CURRENT_CNT 0x16EFC
+#define mmCLK1_CLK1_CURRENT_CNT 0x16EFD
+#define mmCLK1_CLK2_CURRENT_CNT 0x16EFE
+#define mmCLK1_CLK3_CURRENT_CNT 0x16EFF
+#define mmCLK1_CLK4_CURRENT_CNT 0x16F00
+#define mmCLK1_CLK5_CURRENT_CNT 0x16F01
+
+#define mmCLK1_CLK0_BYPASS_CNTL 0x16E8A
+#define mmCLK1_CLK1_BYPASS_CNTL 0x16E93
+#define mmCLK1_CLK2_BYPASS_CNTL 0x16E9C
+#define mmCLK1_CLK3_BYPASS_CNTL 0x16EA5
+#define mmCLK1_CLK4_BYPASS_CNTL 0x16EAE
+#define mmCLK1_CLK5_BYPASS_CNTL 0x16EB7
+
+#define mmCLK1_CLK0_DS_CNTL 0x16E83
+#define mmCLK1_CLK1_DS_CNTL 0x16E8C
+#define mmCLK1_CLK2_DS_CNTL 0x16E95
+#define mmCLK1_CLK3_DS_CNTL 0x16E9E
+#define mmCLK1_CLK4_DS_CNTL 0x16EA7
+#define mmCLK1_CLK5_DS_CNTL 0x16EB0
+
+#define mmCLK1_CLK0_ALLOW_DS 0x16E84
+#define mmCLK1_CLK1_ALLOW_DS 0x16E8D
+#define mmCLK1_CLK2_ALLOW_DS 0x16E96
+#define mmCLK1_CLK3_ALLOW_DS 0x16E9F
+#define mmCLK1_CLK4_ALLOW_DS 0x16EA8
+#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
+
+#define mmCLK5_spll_field_8 0x1B04B
+#define mmDENTIST_DISPCLK_CNTL 0x0124
+#define regDENTIST_DISPCLK_CNTL 0x0064
+#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
+
+#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
+#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
+#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
+#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
+#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
+#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
+
+// DENTIST_DISPCLK_CNTL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
+
+#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+
+#define REG(reg) \
+ (clk_mgr->regs->reg)
+
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+
+#define CLK_SR_DCN35(reg_name)\
+ .reg_name = mm ## reg_name
+
+static const struct clk_mgr_registers clk_mgr_regs_dcn351 = {
+ CLK_REG_LIST_DCN35()
+};
+
+static const struct clk_mgr_shift clk_mgr_shift_dcn351 = {
+ CLK_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
+};
+
+static const struct clk_mgr_mask clk_mgr_mask_dcn351 = {
+ CLK_COMMON_MASK_SH_LIST_DCN32(_MASK)
+};
+
+#define TO_CLK_MGR_DCN35(clk_mgr)\
+ container_of(clk_mgr, struct clk_mgr_dcn35, base)
+
+
+void dcn351_clk_mgr_construct(
+ struct dc_context *ctx,
+ struct clk_mgr_dcn35 *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg)
+{
+ /*register offset changed*/
+ clk_mgr->base.regs = &clk_mgr_regs_dcn351;
+ clk_mgr->base.clk_mgr_shift = &clk_mgr_shift_dcn351;
+ clk_mgr->base.clk_mgr_mask = &clk_mgr_mask_dcn351;
+
+ dcn35_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index b46a3afe48ca..1648226586e2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -36,15 +36,11 @@
#include "dcn20/dcn20_clk_mgr.h"
-
-
#include "reg_helper.h"
#include "core_types.h"
#include "dcn35_smu.h"
#include "dm_helpers.h"
-/* TODO: remove this include once we ported over remaining clk mgr functions*/
-#include "dcn30/dcn30_clk_mgr.h"
#include "dcn31/dcn31_clk_mgr.h"
#include "dc_dmub_srv.h"
@@ -55,34 +51,102 @@
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
-#define regCLK1_CLK_PLL_REQ 0x0237
-#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
+#define DCN_BASE__INST0_SEG1 0x000000C0
+#define mmCLK1_CLK_PLL_REQ 0x16E37
+
+#define mmCLK1_CLK0_DFS_CNTL 0x16E69
+#define mmCLK1_CLK1_DFS_CNTL 0x16E6C
+#define mmCLK1_CLK2_DFS_CNTL 0x16E6F
+#define mmCLK1_CLK3_DFS_CNTL 0x16E72
+#define mmCLK1_CLK4_DFS_CNTL 0x16E75
+#define mmCLK1_CLK5_DFS_CNTL 0x16E78
+
+#define mmCLK1_CLK0_CURRENT_CNT 0x16EFB
+#define mmCLK1_CLK1_CURRENT_CNT 0x16EFC
+#define mmCLK1_CLK2_CURRENT_CNT 0x16EFD
+#define mmCLK1_CLK3_CURRENT_CNT 0x16EFE
+#define mmCLK1_CLK4_CURRENT_CNT 0x16EFF
+#define mmCLK1_CLK5_CURRENT_CNT 0x16F00
+
+#define mmCLK1_CLK0_BYPASS_CNTL 0x16E8A
+#define mmCLK1_CLK1_BYPASS_CNTL 0x16E93
+#define mmCLK1_CLK2_BYPASS_CNTL 0x16E9C
+#define mmCLK1_CLK3_BYPASS_CNTL 0x16EA5
+#define mmCLK1_CLK4_BYPASS_CNTL 0x16EAE
+#define mmCLK1_CLK5_BYPASS_CNTL 0x16EB7
+
+#define mmCLK1_CLK0_DS_CNTL 0x16E83
+#define mmCLK1_CLK1_DS_CNTL 0x16E8C
+#define mmCLK1_CLK2_DS_CNTL 0x16E95
+#define mmCLK1_CLK3_DS_CNTL 0x16E9E
+#define mmCLK1_CLK4_DS_CNTL 0x16EA7
+#define mmCLK1_CLK5_DS_CNTL 0x16EB0
+
+#define mmCLK1_CLK0_ALLOW_DS 0x16E84
+#define mmCLK1_CLK1_ALLOW_DS 0x16E8D
+#define mmCLK1_CLK2_ALLOW_DS 0x16E96
+#define mmCLK1_CLK3_ALLOW_DS 0x16E9F
+#define mmCLK1_CLK4_ALLOW_DS 0x16EA8
+#define mmCLK1_CLK5_ALLOW_DS 0x16EB1
+
+#define mmCLK5_spll_field_8 0x1B24B
+#define mmDENTIST_DISPCLK_CNTL 0x0124
+#define regDENTIST_DISPCLK_CNTL 0x0064
+#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
+
+#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
+#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
+#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
+#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
+#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
+#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
+// DENTIST_DISPCLK_CNTL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
+
+#define CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
-#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
-#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
-#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
-#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
-#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
-#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
-#define regCLK1_CLK2_BYPASS_CNTL 0x029c
-#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0
+#define REG(reg) \
+ (clk_mgr->regs->reg)
-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10
-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
-#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
-#define regCLK5_0_CLK5_spll_field_8 0x464b
-#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0
+#define BASE(seg) BASE_INNER(seg)
-#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd
-#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+#define SR(reg_name)\
+ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
-#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
+#define CLK_SR_DCN35(reg_name)\
+ .reg_name = mm ## reg_name
-#define REG(reg_name) \
- (ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+static const struct clk_mgr_registers clk_mgr_regs_dcn35 = {
+ CLK_REG_LIST_DCN35()
+};
+
+static const struct clk_mgr_shift clk_mgr_shift_dcn35 = {
+ CLK_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
+};
+
+static const struct clk_mgr_mask clk_mgr_mask_dcn35 = {
+ CLK_COMMON_MASK_SH_LIST_DCN32(_MASK)
+};
#define TO_CLK_MGR_DCN35(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn35, base)
@@ -132,6 +196,8 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+ struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dccg *dccg = clk_mgr_internal->dccg;
struct pipe_ctx *pipe = safe_to_lower
? &context->res_ctx.pipe_ctx[i]
: &dc->current_state->res_ctx.pipe_ctx[i];
@@ -148,8 +214,21 @@ static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *
new_pipe->stream_res.stream_enc &&
new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled &&
new_pipe->stream_res.stream_enc->funcs->is_fifo_enabled(new_pipe->stream_res.stream_enc);
- if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
- !pipe->stream->link_enc) && !stream_changed_otg_dig_on) {
+
+ bool has_active_hpo = false;
+
+ if (old_pipe->stream && new_pipe->stream && old_pipe->stream == new_pipe->stream) {
+ has_active_hpo = dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(old_pipe) &&
+ dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(new_pipe);
+
+ }
+
+
+ if (!has_active_hpo && !dccg->ctx->dc->link_srv->dp_is_128b_132b_signal(pipe) &&
+ (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
+ !pipe->stream->link_enc) && !stream_changed_otg_dig_on)) {
+
+
/* This w/a should not trigger when we have a dig active */
if (disable) {
if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
@@ -257,11 +336,11 @@ static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
uint32_t host_router_bw_kbps[MAX_HOST_ROUTERS_NUM] = { 0 };
int i;
-
for (i = 0; i < context->stream_count; ++i) {
const struct dc_stream_state *stream = context->streams[i];
const struct dc_link *link = stream->link;
- uint8_t lowest_dpia_index = 0, hr_index = 0;
+ uint8_t lowest_dpia_index = 0;
+ unsigned int hr_index = 0;
if (!link)
continue;
@@ -271,6 +350,8 @@ static void dcn35_notify_host_router_bw(struct clk_mgr *clk_mgr_base, struct dc_
continue;
hr_index = (link->link_index - lowest_dpia_index) / 2;
+ if (hr_index >= MAX_HOST_ROUTERS_NUM)
+ continue;
host_router_bw_kbps[hr_index] += dc_bandwidth_in_kbps_from_timing(
&stream->timing, dc_link_get_highest_encoding_format(link));
}
@@ -320,6 +401,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
if (clk_mgr->base.ctx->dc->config.allow_0_dtb_clk)
dcn35_smu_set_dtbclk(clk_mgr, false);
+
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
}
/* check that we're not already in lower */
@@ -337,11 +419,17 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
- dcn35_smu_set_dtbclk(clk_mgr, true);
- clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+ int actual_dtbclk = 0;
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
- clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
+ dcn35_smu_set_dtbclk(clk_mgr, true);
+
+ actual_dtbclk = REG_READ(CLK1_CLK4_CURRENT_CNT);
+
+ if (actual_dtbclk) {
+ clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
+ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+ }
}
/* check that we're not already in D0 */
@@ -434,7 +522,6 @@ static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
struct fixed31_32 pll_req;
unsigned int fbmult_frac_val = 0;
unsigned int fbmult_int_val = 0;
- struct dc_context *ctx = clk_mgr->base.ctx;
/*
* Register value of fbmult is in 8.16 format, we are converting to 314.32
@@ -494,22 +581,20 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct dc_context *ctx = clk_mgr->base.ctx;
+
uint32_t ssc_enable;
- REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable);
+ ssc_enable = REG_READ(CLK5_spll_field_8) & CLK5_spll_field_8__spll_ssc_en_MASK;
- return ssc_enable == 1;
+ return ssc_enable != 0;
}
static void init_clk_states(struct clk_mgr *clk_mgr)
{
- struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
- if (clk_mgr_int->smu_ver >= SMU_VER_THRESHOLD)
- clk_mgr->clks.dtbclk_en = true; // request DTBCLK disable on first commit
clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
clk_mgr->clks.p_state_change_support = true;
clk_mgr->clks.prev_p_state_change_support = true;
@@ -520,6 +605,7 @@ static void init_clk_states(struct clk_mgr *clk_mgr)
void dcn35_init_clocks(struct clk_mgr *clk_mgr)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+
init_clk_states(clk_mgr);
// to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
@@ -614,6 +700,7 @@ static struct wm_table lpddr5_wm_table = {
};
static DpmClocks_t_dcn35 dummy_clocks;
+static DpmClocks_t_dcn351 dummy_clocks_dcn351;
static struct dcn35_watermarks dummy_wms = { 0 };
@@ -624,10 +711,10 @@ static struct dcn35_ss_info_table ss_info_table = {
static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
{
- struct dc_context *ctx = clk_mgr->base.ctx;
- uint32_t clock_source;
+ uint32_t clock_source = 0;
+
+ clock_source = REG_READ(CLK1_CLK2_BYPASS_CNTL) & CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK;
- REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
// If it's DFS mode, clock_source is 0.
if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
@@ -737,6 +824,22 @@ static void dcn35_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
dcn35_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
}
+static void dcn351_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
+ struct dcn351_smu_dpm_clks *smu_dpm_clks)
+{
+ DpmClocks_t_dcn351 *table = smu_dpm_clks->dpm_clks;
+
+ if (!clk_mgr->smu_ver)
+ return;
+ if (!table || smu_dpm_clks->mc_address.quad_part == 0)
+ return;
+ memset(table, 0, sizeof(*table));
+ dcn35_smu_set_dram_addr_high(clk_mgr,
+ smu_dpm_clks->mc_address.high_part);
+ dcn35_smu_set_dram_addr_low(clk_mgr,
+ smu_dpm_clks->mc_address.low_part);
+ dcn35_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
+}
static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
{
uint32_t max = 0;
@@ -975,11 +1078,8 @@ static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base)
static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- bool ips_supported = true;
-
- ips_supported = dcn35_smu_get_ips_supported(clk_mgr) ? true : false;
- return ips_supported;
+ return dcn35_smu_get_ips_supported(clk_mgr) ? true : false;
}
static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr)
@@ -1078,6 +1178,57 @@ struct clk_mgr_funcs dcn35_fpga_funcs = {
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
};
+static void translate_to_DpmClocks_t_dcn35(struct dcn351_smu_dpm_clks *smu_dpm_clks_a,
+ struct dcn35_smu_dpm_clks *smu_dpm_clks_b)
+{
+ /*translate two structures and only take need clock tables*/
+ uint8_t i;
+
+ if (smu_dpm_clks_a == NULL || smu_dpm_clks_b == NULL ||
+ smu_dpm_clks_a->dpm_clks == NULL || smu_dpm_clks_b->dpm_clks == NULL)
+ return;
+
+ for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++)
+ smu_dpm_clks_b->dpm_clks->DcfClocks[i] = smu_dpm_clks_a->dpm_clks->DcfClocks[i];
+
+ for (i = 0; i < NUM_DISPCLK_DPM_LEVELS; i++)
+ smu_dpm_clks_b->dpm_clks->DispClocks[i] = smu_dpm_clks_a->dpm_clks->DispClocks[i];
+
+ for (i = 0; i < NUM_DPPCLK_DPM_LEVELS; i++)
+ smu_dpm_clks_b->dpm_clks->DppClocks[i] = smu_dpm_clks_a->dpm_clks->DppClocks[i];
+
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+ smu_dpm_clks_b->dpm_clks->FclkClocks_Freq[i] = smu_dpm_clks_a->dpm_clks->FclkClocks_Freq[i];
+ smu_dpm_clks_b->dpm_clks->FclkClocks_Voltage[i] = smu_dpm_clks_a->dpm_clks->FclkClocks_Voltage[i];
+ }
+ for (i = 0; i < NUM_MEM_PSTATE_LEVELS; i++) {
+ smu_dpm_clks_b->dpm_clks->MemPstateTable[i].MemClk =
+ smu_dpm_clks_a->dpm_clks->MemPstateTable[i].MemClk;
+ smu_dpm_clks_b->dpm_clks->MemPstateTable[i].UClk =
+ smu_dpm_clks_a->dpm_clks->MemPstateTable[i].UClk;
+ smu_dpm_clks_b->dpm_clks->MemPstateTable[i].Voltage =
+ smu_dpm_clks_a->dpm_clks->MemPstateTable[i].Voltage;
+ smu_dpm_clks_b->dpm_clks->MemPstateTable[i].WckRatio =
+ smu_dpm_clks_a->dpm_clks->MemPstateTable[i].WckRatio;
+ }
+ smu_dpm_clks_b->dpm_clks->MaxGfxClk = smu_dpm_clks_a->dpm_clks->MaxGfxClk;
+ smu_dpm_clks_b->dpm_clks->MinGfxClk = smu_dpm_clks_a->dpm_clks->MinGfxClk;
+ smu_dpm_clks_b->dpm_clks->NumDcfClkLevelsEnabled =
+ smu_dpm_clks_a->dpm_clks->NumDcfClkLevelsEnabled;
+ smu_dpm_clks_b->dpm_clks->NumDispClkLevelsEnabled =
+ smu_dpm_clks_a->dpm_clks->NumDispClkLevelsEnabled;
+ smu_dpm_clks_b->dpm_clks->NumFclkLevelsEnabled =
+ smu_dpm_clks_a->dpm_clks->NumFclkLevelsEnabled;
+ smu_dpm_clks_b->dpm_clks->NumMemPstatesEnabled =
+ smu_dpm_clks_a->dpm_clks->NumMemPstatesEnabled;
+ smu_dpm_clks_b->dpm_clks->NumSocClkLevelsEnabled =
+ smu_dpm_clks_a->dpm_clks->NumSocClkLevelsEnabled;
+
+ for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
+ smu_dpm_clks_b->dpm_clks->SocClocks[i] = smu_dpm_clks_a->dpm_clks->SocClocks[i];
+ smu_dpm_clks_b->dpm_clks->SocVoltage[i] = smu_dpm_clks_a->dpm_clks->SocVoltage[i];
+ }
+}
void dcn35_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn35 *clk_mgr,
@@ -1085,6 +1236,7 @@ void dcn35_clk_mgr_construct(
struct dccg *dccg)
{
struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 };
+ struct dcn351_smu_dpm_clks smu_dpm_clks_dcn351 = { 0 };
clk_mgr->base.base.ctx = ctx;
clk_mgr->base.base.funcs = &dcn35_funcs;
@@ -1097,6 +1249,12 @@ void dcn35_clk_mgr_construct(
clk_mgr->base.dprefclk_ss_divider = 1000;
clk_mgr->base.ss_on_dprefclk = false;
clk_mgr->base.dfs_ref_freq_khz = 48000;
+ if (ctx->dce_version == DCN_VERSION_3_5) {
+ clk_mgr->base.regs = &clk_mgr_regs_dcn35;
+ clk_mgr->base.clk_mgr_shift = &clk_mgr_shift_dcn35;
+ clk_mgr->base.clk_mgr_mask = &clk_mgr_mask_dcn35;
+ }
+
clk_mgr->smu_wm_set.wm_set = (struct dcn35_watermarks *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
@@ -1115,14 +1273,24 @@ void dcn35_clk_mgr_construct(
DC_MEM_ALLOC_TYPE_GART,
sizeof(DpmClocks_t_dcn35),
&smu_dpm_clks.mc_address.quad_part);
-
if (smu_dpm_clks.dpm_clks == NULL) {
smu_dpm_clks.dpm_clks = &dummy_clocks;
smu_dpm_clks.mc_address.quad_part = 0;
}
-
ASSERT(smu_dpm_clks.dpm_clks);
+ if (ctx->dce_version == DCN_VERSION_3_51) {
+ smu_dpm_clks_dcn351.dpm_clks = (DpmClocks_t_dcn351 *)dm_helpers_allocate_gpu_mem(
+ clk_mgr->base.base.ctx,
+ DC_MEM_ALLOC_TYPE_GART,
+ sizeof(DpmClocks_t_dcn351),
+ &smu_dpm_clks_dcn351.mc_address.quad_part);
+ if (smu_dpm_clks_dcn351.dpm_clks == NULL) {
+ smu_dpm_clks_dcn351.dpm_clks = &dummy_clocks_dcn351;
+ smu_dpm_clks_dcn351.mc_address.quad_part = 0;
+ }
+ }
+
clk_mgr->base.smu_ver = dcn35_smu_get_smu_version(&clk_mgr->base);
if (clk_mgr->base.smu_ver)
@@ -1151,7 +1319,11 @@ void dcn35_clk_mgr_construct(
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
int i;
- dcn35_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
+ if (ctx->dce_version == DCN_VERSION_3_51) {
+ dcn351_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks_dcn351);
+ translate_to_DpmClocks_t_dcn35(&smu_dpm_clks_dcn351, &smu_dpm_clks);
+ } else
+ dcn35_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
"NumDispClkLevelsEnabled: %d\n"
"NumSocClkLevelsEnabled: %d\n"
@@ -1212,6 +1384,10 @@ void dcn35_clk_mgr_construct(
dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_GART,
smu_dpm_clks.dpm_clks);
+ if (smu_dpm_clks_dcn351.dpm_clks && smu_dpm_clks_dcn351.mc_address.quad_part != 0)
+ dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_GART,
+ smu_dpm_clks_dcn351.dpm_clks);
+
if (ctx->dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
bool ips_support = false;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h
index 1203dc605b12..a12a9bf90806 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h
@@ -60,4 +60,8 @@ void dcn35_clk_mgr_construct(struct dc_context *ctx,
void dcn35_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
+void dcn351_clk_mgr_construct(struct dc_context *ctx,
+ struct clk_mgr_dcn35 *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg);
#endif //__DCN35_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
index 3fae13c73934..ab9d21ba0c43 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h
@@ -126,18 +126,31 @@ typedef struct {
uint32_t MaxGfxClk;
} DpmClocks_t_dcn35;
-
-// Throttler Status Bitmask
-
-
-
-
-
-
-
-
-
-
+typedef struct {
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks0[NUM_VCN_DPM_LEVELS];
+ uint32_t VClocks1[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks0[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks1[NUM_VCN_DPM_LEVELS];
+ uint32_t VPEClocks[NUM_VPE_DPM_LEVELS];
+ uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS];
+ uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS];
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; // Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t Vcn0ClkLevelsEnabled; // Applies to both Vclk0 and Dclk0
+ uint8_t Vcn1ClkLevelsEnabled; // Applies to both Vclk1 and Dclk1
+ uint8_t VpeClkLevelsEnabled;
+ uint8_t NumMemPstatesEnabled;
+ uint8_t NumFclkLevelsEnabled;
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
+} DpmClocks_t_dcn351;
#define TABLE_BIOS_IF 0 // Called by BIOS
#define TABLE_WATERMARKS 1 // Called by DAL through VBIOS
@@ -163,6 +176,10 @@ struct dcn35_smu_dpm_clks {
union large_integer mc_address;
};
+struct dcn351_smu_dpm_clks {
+ DpmClocks_t_dcn351 *dpm_clks;
+ union large_integer mc_address;
+};
/* TODO: taken from vgh, may not be correct */
struct display_idle_optimization {
unsigned int df_request_disabled : 1;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h
index dbfdd3487da5..2e0d34fd7512 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h
@@ -43,7 +43,9 @@
#define DALSMC_MSG_ActiveUclkFclk 0x18
#define DALSMC_MSG_IdleUclkFclk 0x19
#define DALSMC_MSG_SetUclkPstateAllow 0x1A
-#define DALSMC_Message_Count 0x1B
+#define DALSMC_MSG_SubvpUclkFclk 0x1B
+#define DALSMC_MSG_GetNumUmcChannels 0x1C
+#define DALSMC_Message_Count 0x1D
typedef enum {
FCLK_SWITCH_DISALLOW,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index 8cfc5f435937..8082bb877611 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -141,6 +141,20 @@ static bool dcn401_is_ppclk_idle_dpm_enabled(struct clk_mgr_internal *clk_mgr, P
return ppclk_idle_dpm_enabled;
}
+static bool dcn401_is_df_throttle_opt_enabled(struct clk_mgr_internal *clk_mgr)
+{
+ bool is_df_throttle_opt_enabled = false;
+
+ if (ASICREV_IS_GC_12_0_1_A0(clk_mgr->base.ctx->asic_id.hw_internal_rev) &&
+ clk_mgr->smu_ver >= 0x663500) {
+ is_df_throttle_opt_enabled = !clk_mgr->base.ctx->dc->debug.force_subvp_df_throttle;
+ }
+
+ is_df_throttle_opt_enabled &= clk_mgr->smu_present;
+
+ return is_df_throttle_opt_enabled;
+}
+
/* Query SMU for all clock states for a particular clock */
static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, unsigned int *entry_0,
unsigned int *num_levels)
@@ -614,207 +628,6 @@ static void dcn401_update_clocks_update_dentist(
}
-static void dcn401_update_clocks_legacy(struct clk_mgr *clk_mgr_base,
- struct dc_state *context,
- bool safe_to_lower)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
- struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count;
- bool update_dppclk = false;
- bool update_dispclk = false;
- bool enter_display_off = false;
- bool dpp_clock_lowered = false;
- struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
- bool force_reset = false;
- bool update_uclk = false, update_fclk = false;
- bool p_state_change_support;
- bool fclk_p_state_change_support;
- int total_plane_count;
-
- if (dc->work_arounds.skip_clock_update)
- return;
-
- if (clk_mgr_base->clks.dispclk_khz == 0 ||
- (dc->debug.force_clock_mode & 0x1)) {
- /* This is from resume or boot up, if forced_clock cfg option used,
- * we bypass program dispclk and DPPCLK, but need set them for S3.
- */
- force_reset = true;
-
- dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
-
- /* Force_clock_mode 0x1: force reset the clock even it is the same clock
- * as long as it is in Passive level.
- */
- }
- display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
-
- if (display_count == 0)
- enter_display_off = true;
-
- if (clk_mgr->smu_present) {
- if (enter_display_off == safe_to_lower)
- dcn401_smu_set_num_of_displays(clk_mgr, display_count);
-
- clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
-
- total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
- fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
-
- if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support)) {
- clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
-
- /* To enable FCLK P-state switching, send PSTATE_SUPPORTED message to PMFW */
- if (clk_mgr_base->clks.fclk_p_state_change_support) {
- /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
- dcn401_smu_send_fclk_pstate_message(clk_mgr, true);
- }
- }
-
- if (dc->debug.force_min_dcfclk_mhz > 0)
- new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
- new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
-
- if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
- clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DCFCLK))
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCFCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_khz));
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
- clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DCFCLK))
- dcn401_smu_set_min_deep_sleep_dcef_clk(clk_mgr, khz_to_mhz_ceil(clk_mgr_base->clks.dcfclk_deep_sleep_khz));
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz))
- /* We don't actually care about socclk, don't notify SMU of hard min */
- clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
-
- clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- clk_mgr_base->clks.prev_num_ways = clk_mgr_base->clks.num_ways;
-
- if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
- clk_mgr_base->clks.num_ways < new_clocks->num_ways) {
- clk_mgr_base->clks.num_ways = new_clocks->num_ways;
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
- dcn401_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
- }
-
-
- p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
- if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.prev_p_state_change_support)) {
- clk_mgr_base->clks.p_state_change_support = p_state_change_support;
- clk_mgr_base->clks.fw_based_mclk_switching = p_state_change_support && new_clocks->fw_based_mclk_switching;
-
- /* to disable P-State switching, set UCLK min = max */
- if (!clk_mgr_base->clks.p_state_change_support && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
- }
-
- /* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
- if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
- update_fclk = true;
- }
-
- if (!clk_mgr_base->clks.fclk_p_state_change_support &&
- update_fclk &&
- dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_FCLK)) {
- /* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
- dcn401_smu_send_fclk_pstate_message(clk_mgr, false);
- }
-
- /* Always update saved value, even if new value not set due to P-State switching unsupported */
- if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
- clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
- update_uclk = true;
- }
-
- /* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
- if (clk_mgr_base->clks.p_state_change_support &&
- (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) &&
- dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
-
- if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
- clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
- clk_mgr_base->clks.num_ways = new_clocks->num_ways;
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_UCLK))
- dcn401_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
- }
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {
- if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)
- dpp_clock_lowered = true;
-
- clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
- clk_mgr_base->clks.actual_dppclk_khz = new_clocks->dppclk_khz;
-
- if (clk_mgr->smu_present && !dpp_clock_lowered && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DPPCLK))
- clk_mgr_base->clks.actual_dppclk_khz = dcn401_set_hard_min_by_freq_optimized(clk_mgr, PPCLK_DPPCLK, clk_mgr_base->clks.dppclk_khz);
- update_dppclk = true;
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
-
- if (clk_mgr->smu_present && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK))
- clk_mgr_base->clks.actual_dispclk_khz = dcn401_set_hard_min_by_freq_optimized(clk_mgr, PPCLK_DISPCLK, clk_mgr_base->clks.dispclk_khz);
-
- update_dispclk = true;
- }
-
- if (!new_clocks->dtbclk_en && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DTBCLK)) {
- new_clocks->ref_dtbclk_khz = clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
- }
-
- /* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */
- if (!dc->debug.disable_dtb_ref_clk_switch &&
- should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000, clk_mgr_base->clks.ref_dtbclk_khz / 1000) &&
- dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DTBCLK)) {
- /* DCCG requires KHz precision for DTBCLK */
- clk_mgr_base->clks.ref_dtbclk_khz =
- dcn401_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz));
-
- dcn401_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
- }
-
- if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
- if (dpp_clock_lowered) {
- /* if clock is being lowered, increase DTO before lowering refclk */
- dcn401_update_clocks_update_dpp_dto(clk_mgr, context,
- safe_to_lower, clk_mgr_base->clks.dppclk_khz);
- dcn401_update_clocks_update_dentist(clk_mgr, context);
- if (clk_mgr->smu_present && dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DPPCLK)) {
- clk_mgr_base->clks.actual_dppclk_khz = dcn401_set_hard_min_by_freq_optimized(clk_mgr, PPCLK_DPPCLK,
- clk_mgr_base->clks.dppclk_khz);
- dcn401_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower,
- clk_mgr_base->clks.actual_dppclk_khz);
- }
-
- } else {
- /* if clock is being raised, increase refclk before lowering DTO */
- if (update_dppclk || update_dispclk)
- dcn401_update_clocks_update_dentist(clk_mgr, context);
- /* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
- * that we do not lower dto when it is not safe to lower. We do not need to
- * compare the current and new dppclk before calling this function.
- */
- dcn401_update_clocks_update_dpp_dto(clk_mgr, context,
- safe_to_lower, clk_mgr_base->clks.actual_dppclk_khz);
- }
- }
-
- if (update_dispclk && dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
- /*update dmcu for wait_loop count*/
- dmcu->funcs->set_psr_wait_loop(dmcu,
- clk_mgr_base->clks.dispclk_khz / 1000 / 7);
-}
-
static void dcn401_execute_block_sequence(struct clk_mgr *clk_mgr_base, unsigned int num_steps)
{
struct clk_mgr_internal *clk_mgr_internal = TO_CLK_MGR_INTERNAL(clk_mgr_base);
@@ -869,6 +682,12 @@ static void dcn401_execute_block_sequence(struct clk_mgr *clk_mgr_base, unsigned
params->update_idle_hardmin_params.uclk_mhz,
params->update_idle_hardmin_params.fclk_mhz);
break;
+ case CLK_MGR401_UPDATE_SUBVP_HARDMINS:
+ dcn401_smu_set_subvp_uclk_fclk_hardmin(
+ clk_mgr_internal,
+ params->update_idle_hardmin_params.uclk_mhz,
+ params->update_idle_hardmin_params.fclk_mhz);
+ break;
case CLK_MGR401_UPDATE_DEEP_SLEEP_DCFCLK:
dcn401_smu_set_min_deep_sleep_dcef_clk(
clk_mgr_internal,
@@ -945,15 +764,21 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
bool update_active_uclk = false;
bool update_idle_fclk = false;
bool update_idle_uclk = false;
+ bool update_subvp_prefetch_dramclk = false;
+ bool update_subvp_prefetch_fclk = false;
bool is_idle_dpm_enabled = dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_UCLK) &&
dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK) &&
dcn401_is_ppclk_idle_dpm_enabled(clk_mgr_internal, PPCLK_UCLK) &&
dcn401_is_ppclk_idle_dpm_enabled(clk_mgr_internal, PPCLK_FCLK);
+ bool is_df_throttle_opt_enabled = is_idle_dpm_enabled &&
+ dcn401_is_df_throttle_opt_enabled(clk_mgr_internal);
int total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
int active_uclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz);
int active_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.fclk_khz);
int idle_uclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.idle_dramclk_khz);
int idle_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.idle_fclk_khz);
+ int subvp_prefetch_dramclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.subvp_prefetch_dramclk_khz);
+ int subvp_prefetch_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.subvp_prefetch_fclk_khz);
unsigned int num_steps = 0;
@@ -982,15 +807,15 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
update_active_fclk = true;
update_idle_fclk = true;
- /* To enable FCLK P-state switching, send PSTATE_SUPPORTED message to PMFW */
- if (clk_mgr_base->clks.fclk_p_state_change_support) {
- /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
- if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
- block_sequence[num_steps].params.update_pstate_support_params.support = true;
- block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
- num_steps++;
- }
- }
+ /* To enable FCLK P-state switching, send PSTATE_SUPPORTED message to PMFW (message not supported on DCN401)*/
+ // if (clk_mgr_base->clks.fclk_p_state_change_support) {
+ // /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
+ // if (dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
+ // block_sequence[num_steps].params.update_pstate_support_params.support = true;
+ // block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
+ // num_steps++;
+ // }
+ // }
}
if (!clk_mgr_base->clks.fclk_p_state_change_support && dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
@@ -1109,6 +934,12 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
}
}
+ if (should_set_clock(safe_to_lower, new_clocks->subvp_prefetch_dramclk_khz, clk_mgr_base->clks.subvp_prefetch_dramclk_khz)) {
+ clk_mgr_base->clks.subvp_prefetch_dramclk_khz = new_clocks->subvp_prefetch_dramclk_khz;
+ update_subvp_prefetch_dramclk = true;
+ subvp_prefetch_dramclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.subvp_prefetch_dramclk_khz);
+ }
+
/* FCLK */
/* Always update saved value, even if new value not set due to P-State switching unsupported */
if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr_base->clks.fclk_khz)) {
@@ -1129,6 +960,12 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
}
}
+ if (should_set_clock(safe_to_lower, new_clocks->subvp_prefetch_fclk_khz, clk_mgr_base->clks.subvp_prefetch_fclk_khz)) {
+ clk_mgr_base->clks.subvp_prefetch_fclk_khz = new_clocks->subvp_prefetch_fclk_khz;
+ update_subvp_prefetch_fclk = true;
+ subvp_prefetch_fclk_mhz = khz_to_mhz_ceil(clk_mgr_base->clks.subvp_prefetch_fclk_khz);
+ }
+
/* When idle DPM is enabled, need to send active and idle hardmins separately */
/* CLK_MGR401_UPDATE_ACTIVE_HARDMINS */
if ((update_active_uclk || update_active_fclk) && is_idle_dpm_enabled) {
@@ -1146,6 +983,14 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
num_steps++;
}
+ /* CLK_MGR401_UPDATE_SUBVP_HARDMINS */
+ if ((update_subvp_prefetch_dramclk || update_subvp_prefetch_fclk) && is_df_throttle_opt_enabled) {
+ block_sequence[num_steps].params.update_idle_hardmin_params.uclk_mhz = subvp_prefetch_dramclk_mhz;
+ block_sequence[num_steps].params.update_idle_hardmin_params.fclk_mhz = subvp_prefetch_fclk_mhz;
+ block_sequence[num_steps].func = CLK_MGR401_UPDATE_SUBVP_HARDMINS;
+ num_steps++;
+ }
+
/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
if (update_active_uclk || update_idle_uclk) {
if (!is_idle_dpm_enabled) {
@@ -1178,14 +1023,14 @@ static unsigned int dcn401_build_update_bandwidth_clocks_sequence(
// (*num_steps)++;
// }
- /* disable FCLK P-State support if needed */
- if (!fclk_p_state_change_support &&
- should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_prev_p_state_change_support) &&
- dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
- block_sequence[num_steps].params.update_pstate_support_params.support = false;
- block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
- num_steps++;
- }
+ /* disable FCLK P-State support if needed (message not supported on DCN401)*/
+ // if (!fclk_p_state_change_support &&
+ // should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_prev_p_state_change_support) &&
+ // dcn401_is_ppclk_dpm_enabled(clk_mgr_internal, PPCLK_FCLK)) {
+ // block_sequence[num_steps].params.update_pstate_support_params.support = false;
+ // block_sequence[num_steps].func = CLK_MGR401_UPDATE_FCLK_PSTATE_SUPPORT;
+ // num_steps++;
+ // }
}
if (new_clocks->fw_based_mclk_switching != clk_mgr_base->clks.fw_based_mclk_switching &&
@@ -1366,11 +1211,6 @@ static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base,
unsigned int num_steps = 0;
- if (dc->debug.enable_legacy_clock_update) {
- dcn401_update_clocks_legacy(clk_mgr_base, context, safe_to_lower);
- return;
- }
-
/* build bandwidth related clocks update sequence */
num_steps = dcn401_build_update_bandwidth_clocks_sequence(clk_mgr_base,
context,
@@ -1505,6 +1345,20 @@ static void dcn401_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool curren
dcn401_execute_block_sequence(clk_mgr_base, num_steps);
}
+static int dcn401_get_hard_min_memclk(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return clk_mgr->base.ctx->dc->current_state->bw_ctx.bw.dcn.clk.dramclk_khz;
+}
+
+static int dcn401_get_hard_min_fclk(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ return clk_mgr->base.ctx->dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz;
+}
+
/* Get current memclk states, update bounding box */
static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
{
@@ -1549,6 +1403,15 @@ static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
if (clk_mgr->dpm_present && !num_levels)
clk_mgr->dpm_present = false;
+ clk_mgr_base->bw_params->num_channels = dcn401_smu_get_num_of_umc_channels(clk_mgr);
+ if (clk_mgr_base->ctx->dc_bios) {
+ /* use BIOS values if none provided by PMFW */
+ if (clk_mgr_base->bw_params->num_channels == 0) {
+ clk_mgr_base->bw_params->num_channels = clk_mgr_base->ctx->dc_bios->vram_info.num_chans;
+ }
+ clk_mgr_base->bw_params->dram_channel_width_bytes = clk_mgr_base->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+ }
+
/* Refresh bounding box */
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
@@ -1638,6 +1501,8 @@ static struct clk_mgr_funcs dcn401_funcs = {
.enable_pme_wa = dcn401_enable_pme_wa,
.is_smu_present = dcn401_is_smu_present,
.get_dispclk_from_dentist = dcn401_get_dispclk_from_dentist,
+ .get_hard_min_memclk = dcn401_get_hard_min_memclk,
+ .get_hard_min_fclk = dcn401_get_hard_min_fclk,
};
struct clk_mgr_internal *dcn401_clk_mgr_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
index 8b0461992b22..6c9ae5ca2c7e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h
@@ -90,6 +90,7 @@ enum dcn401_clk_mgr_block_sequence_func {
CLK_MGR401_UPDATE_DTBCLK_DTO,
CLK_MGR401_UPDATE_DENTIST,
CLK_MGR401_UPDATE_PSR_WAIT_LOOP,
+ CLK_MGR401_UPDATE_SUBVP_HARDMINS,
};
struct dcn401_clk_mgr_block_sequence {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
index 7700477d019b..21c35528f61f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
@@ -21,6 +21,14 @@
#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); }
+/* temporary define */
+#ifndef DALSMC_MSG_SubvpUclkFclk
+#define DALSMC_MSG_SubvpUclkFclk 0x1B
+#endif
+#ifndef DALSMC_MSG_GetNumUmcChannels
+#define DALSMC_MSG_GetNumUmcChannels 0x1C
+#endif
+
/*
* Function to be used instead of REG_WAIT macro because the wait ends when
* the register is NOT EQUAL to zero, and because the translation in msg_if.h
@@ -296,6 +304,24 @@ bool dcn401_smu_set_active_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
return success;
}
+bool dcn401_smu_set_subvp_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
+ uint16_t uclk_freq_mhz,
+ uint16_t fclk_freq_mhz)
+{
+ uint32_t response = 0;
+ bool success;
+
+ /* 15:0 for uclk, 32:16 for fclk */
+ uint32_t param = (fclk_freq_mhz << 16) | uclk_freq_mhz;
+
+ smu_print("SMU Set active hardmin by freq: uclk_freq_mhz = %d MHz, fclk_freq_mhz = %d MHz\n", uclk_freq_mhz, fclk_freq_mhz);
+
+ success = dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SubvpUclkFclk, param, &response);
+
+ return success;
+}
+
void dcn401_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz)
{
smu_print("SMU Set min deep sleep dcef clk: freq_mhz = %d MHz\n", freq_mhz);
@@ -311,3 +337,14 @@ void dcn401_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t n
dcn401_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_NumOfDisplays, num_displays, NULL);
}
+
+unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr)
+{
+ unsigned int response = 0;
+
+ dcn401_smu_send_msg_with_param(clk_mgr, DALSMC_MSG_GetNumUmcChannels, 0, &response);
+
+ smu_print("SMU Get Num UMC Channels: num_umc_channels = %d\n", response);
+
+ return response;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
index 651fb8d62864..e02eb1294b37 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
@@ -23,7 +23,11 @@ bool dcn401_smu_set_idle_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
bool dcn401_smu_set_active_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
uint16_t uclk_freq_mhz,
uint16_t fclk_freq_mhz);
+bool dcn401_smu_set_subvp_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
+ uint16_t uclk_freq_mhz,
+ uint16_t fclk_freq_mhz);
void dcn401_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz);
void dcn401_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays);
+unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr);
#endif /* __DCN401_CLK_MGR_SMU_MSG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 5c39390ecbd5..f84e795e35f5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -579,7 +579,7 @@ dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
bool
dc_stream_forward_crc_window(struct dc_stream_state *stream,
- struct rect *rect, bool is_stop)
+ struct rect *rect, uint8_t phy_id, bool is_stop)
{
struct dmcu *dmcu;
struct dc_dmub_srv *dmub_srv;
@@ -598,7 +598,7 @@ dc_stream_forward_crc_window(struct dc_stream_state *stream,
if (i == MAX_PIPES)
return false;
- mux_mapping.phy_output_num = stream->link->link_enc_hw_inst;
+ mux_mapping.phy_output_num = phy_id;
mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
dmcu = dc->res_pool->dmcu;
@@ -615,25 +615,89 @@ dc_stream_forward_crc_window(struct dc_stream_state *stream,
return true;
}
+
+static void
+dc_stream_forward_dmub_multiple_crc_window(struct dc_dmub_srv *dmub_srv,
+ struct crc_window *window, struct otg_phy_mux *mux_mapping, bool stop)
+{
+ int i;
+ union dmub_rb_cmd cmd = {0};
+
+ cmd.secure_display.mul_roi_ctl.phy_id = mux_mapping->phy_output_num;
+ cmd.secure_display.mul_roi_ctl.otg_id = mux_mapping->otg_output_num;
+
+ cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
+
+ if (stop) {
+ cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_STOP_UPDATE;
+ } else {
+ cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_WIN_NOTIFY;
+ for (i = 0; i < MAX_CRC_WINDOW_NUM; i++) {
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].x_start = window[i].rect.x;
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].y_start = window[i].rect.y;
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].x_end = window[i].rect.x + window[i].rect.width;
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].y_end = window[i].rect.y + window[i].rect.height;
+ cmd.secure_display.mul_roi_ctl.roi_ctl[i].enable = window[i].enable;
+ }
+ }
+
+ dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+}
+
+bool
+dc_stream_forward_multiple_crc_window(struct dc_stream_state *stream,
+ struct crc_window *window, uint8_t phy_id, bool stop)
+{
+ struct dc_dmub_srv *dmub_srv;
+ struct otg_phy_mux mux_mapping;
+ struct pipe_ctx *pipe;
+ int i;
+ struct dc *dc = stream->ctx->dc;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
+ break;
+ }
+
+ /* Stream not found */
+ if (i == MAX_PIPES)
+ return false;
+
+ mux_mapping.phy_output_num = phy_id;
+ mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
+
+ dmub_srv = dc->ctx->dmub_srv;
+
+ /* forward to dmub only. no dmcu support*/
+ if (dmub_srv)
+ dc_stream_forward_dmub_multiple_crc_window(dmub_srv, window, &mux_mapping, stop);
+ else
+ return false;
+
+ return true;
+}
#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
/**
* dc_stream_configure_crc() - Configure CRC capture for the given stream.
* @dc: DC Object
* @stream: The stream to configure CRC on.
- * @enable: Enable CRC if true, disable otherwise.
* @crc_window: CRC window (x/y start/end) information
+ * @enable: Enable CRC if true, disable otherwise.
* @continuous: Capture CRC on every frame if true. Otherwise, only capture
* once.
+ * @idx: Capture CRC on which CRC engine instance
+ * @reset: Reset CRC engine before the configuration
*
- * By default, only CRC0 is configured, and the entire frame is used to
- * calculate the CRC.
+ * By default, the entire frame is used to calculate the CRC.
*
* Return: %false if the stream is not found or CRC capture is not supported;
* %true if the stream has been configured.
*/
bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
- struct crc_params *crc_window, bool enable, bool continuous)
+ struct crc_params *crc_window, bool enable, bool continuous,
+ uint8_t idx, bool reset)
{
struct pipe_ctx *pipe;
struct crc_params param;
@@ -677,6 +741,9 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
param.continuous_mode = continuous;
param.enable = enable;
+ param.crc_eng_inst = idx;
+ param.reset = reset;
+
tg = pipe->stream_res.tg;
/* Only call if supported */
@@ -691,6 +758,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
*
* @dc: DC object.
* @stream: The DC stream state of the stream to get CRCs from.
+ * @idx: index of crc engine to get CRC from
* @r_cr: CRC value for the red component.
* @g_y: CRC value for the green component.
* @b_cb: CRC value for the blue component.
@@ -700,7 +768,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
* Return:
* %false if stream is not found, or if CRCs are not enabled.
*/
-bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
+bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
int i;
@@ -721,7 +789,7 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
tg = pipe->stream_res.tg;
if (tg->funcs->get_crc)
- return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
+ return tg->funcs->get_crc(tg, idx, r_cr, g_y, b_cb);
DC_LOG_WARNING("CRC capture not supported.");
return false;
}
@@ -1157,6 +1225,8 @@ static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *conte
get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR)
+ get_cursor_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else {
if (dc->ctx->dce_version < DCN_VERSION_2_0)
color_space_to_black_color(
@@ -1171,6 +1241,8 @@ static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *conte
get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2)
get_fams2_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_VABC)
+ get_vabc_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
}
}
}
@@ -1233,16 +1305,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
*/
if (is_phantom) {
if (tg->funcs->enable_crtc) {
- int main_pipe_width = 0, main_pipe_height = 0;
- struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream);
-
- if (old_paired_stream) {
- main_pipe_width = old_paired_stream->dst.width;
- main_pipe_height = old_paired_stream->dst.height;
- }
-
- if (dc->hwss.blank_phantom)
- dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
+ if (dc->hwseq->funcs.blank_pixel_data)
+ dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
tg->funcs->enable_crtc(tg);
}
}
@@ -1437,6 +1501,7 @@ void dc_hardware_init(struct dc *dc)
detect_edp_presence(dc);
if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
dc->hwss.init_hw(dc);
+ dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
}
void dc_init_callbacks(struct dc *dc,
@@ -1876,6 +1941,41 @@ void dc_z10_save_init(struct dc *dc)
dc->hwss.z10_save_init(dc);
}
+/* Set a pipe unlock order based on the change in DET allocation and stores it in dc scratch memory
+ * Prevents over allocation of DET during unlock process
+ * e.g. 2 pipe config with different streams with a max of 20 DET segments
+ * Before: After:
+ * - Pipe0: 10 DET segments - Pipe0: 12 DET segments
+ * - Pipe1: 10 DET segments - Pipe1: 8 DET segments
+ * If Pipe0 gets updated first, 22 DET segments will be allocated
+ */
+static void determine_pipe_unlock_order(struct dc *dc, struct dc_state *context)
+{
+ unsigned int i = 0;
+ struct pipe_ctx *pipe = NULL;
+ struct timing_generator *tg = NULL;
+
+ if (!dc->config.set_pipe_unlock_order)
+ return;
+
+ memset(dc->scratch.pipes_to_unlock_first, 0, sizeof(dc->scratch.pipes_to_unlock_first));
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ tg = pipe->stream_res.tg;
+
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !tg->funcs->is_tg_enabled(tg) ||
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ continue;
+ }
+
+ if (resource_calculate_det_for_stream(context, pipe) <
+ resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i])) {
+ dc->scratch.pipes_to_unlock_first[i] = true;
+ }
+ }
+}
+
/**
* dc_commit_state_no_check - Apply context to the hardware
*
@@ -1974,6 +2074,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed;
}
+ determine_pipe_unlock_order(dc, context);
/* Program all planes within new context*/
if (dc->res_pool->funcs->prepare_mcache_programming)
dc->res_pool->funcs->prepare_mcache_programming(dc, context);
@@ -2032,7 +2133,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
- if (context->stream_count > get_seamless_boot_stream_count(context) ||
+ if (get_seamless_boot_stream_count(context) == 0 ||
context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize bw */
hwss_wait_for_no_pipes_pending(dc, context);
@@ -2122,6 +2223,11 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
struct dc_stream_state *stream = params->streams[i];
struct dc_stream_status *status = dc_stream_get_status(stream);
+ /* revalidate streams */
+ res = dc_validate_stream(dc, stream);
+ if (res != DC_OK)
+ return res;
+
dc_stream_log(dc, stream);
set[i].stream = stream;
@@ -2156,6 +2262,14 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
context->power_source = params->power_source;
res = dc_validate_with_context(dc, set, params->stream_count, context, false);
+
+ /*
+ * Only update link encoder to stream assignment after bandwidth validation passed.
+ */
+ if (res == DC_OK && dc->res_pool->funcs->link_encs_assign)
+ dc->res_pool->funcs->link_encs_assign(
+ dc, context, context->streams, context->stream_count);
+
if (res != DC_OK) {
BREAK_TO_DEBUGGER();
goto fail;
@@ -2448,7 +2562,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
- sizeof(union dc_tiling_info)) != 0) {
+ sizeof(struct dc_tiling_info)) != 0) {
update_flags->bits.swizzle_change = 1;
elevate_update_type(&update_type, UPDATE_TYPE_MED);
@@ -2477,41 +2591,35 @@ static enum surface_update_type get_scaling_info_update_type(
if (!u->scaling_info)
return UPDATE_TYPE_FAST;
- if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width
+ if (u->scaling_info->src_rect.width != u->surface->src_rect.width
+ || u->scaling_info->src_rect.height != u->surface->src_rect.height
+ || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
|| u->scaling_info->dst_rect.height != u->surface->dst_rect.height
+ || u->scaling_info->clip_rect.width != u->surface->clip_rect.width
+ || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
|| u->scaling_info->scaling_quality.integer_scaling !=
- u->surface->scaling_quality.integer_scaling
- ) {
+ u->surface->scaling_quality.integer_scaling) {
update_flags->bits.scaling_change = 1;
+ if (u->scaling_info->src_rect.width > u->surface->src_rect.width
+ || u->scaling_info->src_rect.height > u->surface->src_rect.height)
+ /* Making src rect bigger requires a bandwidth change */
+ update_flags->bits.clock_change = 1;
+
if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
|| u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
&& (u->scaling_info->dst_rect.width < u->surface->src_rect.width
|| u->scaling_info->dst_rect.height < u->surface->src_rect.height))
/* Making dst rect smaller requires a bandwidth change */
update_flags->bits.bandwidth_change = 1;
- }
-
- if (u->scaling_info->src_rect.width != u->surface->src_rect.width
- || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
- update_flags->bits.scaling_change = 1;
- if (u->scaling_info->src_rect.width > u->surface->src_rect.width
- || u->scaling_info->src_rect.height > u->surface->src_rect.height)
- /* Making src rect bigger requires a bandwidth change */
- update_flags->bits.clock_change = 1;
+ if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
+ (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
+ u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
+ /* Changing clip size of a large surface may result in MPC slice count change */
+ update_flags->bits.bandwidth_change = 1;
}
- if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
- (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
- u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
- /* Changing clip size of a large surface may result in MPC slice count change */
- update_flags->bits.bandwidth_change = 1;
-
- if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width ||
- u->scaling_info->clip_rect.height != u->surface->clip_rect.height)
- update_flags->bits.clip_size_change = 1;
-
if (u->scaling_info->src_rect.x != u->surface->src_rect.x
|| u->scaling_info->src_rect.y != u->surface->src_rect.y
|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
@@ -2520,13 +2628,13 @@ static enum surface_update_type get_scaling_info_update_type(
|| u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
update_flags->bits.position_change = 1;
+ /* process every update flag before returning */
if (update_flags->bits.clock_change
|| update_flags->bits.bandwidth_change
|| update_flags->bits.scaling_change)
return UPDATE_TYPE_FULL;
- if (update_flags->bits.position_change ||
- update_flags->bits.clip_size_change)
+ if (update_flags->bits.position_change)
return UPDATE_TYPE_MED;
return UPDATE_TYPE_FAST;
@@ -2617,7 +2725,8 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
elevate_update_type(&overall_type, type);
}
- if (update_flags->bits.lut_3d) {
+ if (update_flags->bits.lut_3d &&
+ u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
type = UPDATE_TYPE_FULL;
elevate_update_type(&overall_type, type);
}
@@ -2637,6 +2746,29 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
return overall_type;
}
+/* May need to flip the desktop plane in cases where MPO plane receives a flip but desktop plane doesn't
+ * while both planes are flip_immediate
+ */
+static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_update *updates, int surface_count)
+{
+ bool has_flip_immediate_plane = false;
+ int i;
+
+ for (i = 0; i < surface_count; i++) {
+ if (updates[i].surface->flip_immediate) {
+ has_flip_immediate_plane = true;
+ break;
+ }
+ }
+
+ if (has_flip_immediate_plane && surface_count > 1) {
+ for (i = 0; i < surface_count; i++) {
+ if (updates[i].surface->flip_immediate)
+ updates[i].surface->update_flags.bits.addr_update = 1;
+ }
+ }
+}
+
static enum surface_update_type check_update_surfaces_for_stream(
struct dc *dc,
struct dc_surface_update *updates,
@@ -2699,6 +2831,9 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->scaler_sharpener_update)
su_flags->bits.scaler_sharpener = 1;
+ if (stream_update->sharpening_required)
+ su_flags->bits.sharpening_required = 1;
+
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
@@ -2870,10 +3005,20 @@ static void copy_surface_update_to_plane(
sizeof(struct dc_transfer_func_distributed_points));
}
- if (srf_update->func_shaper)
+ if (srf_update->cm2_params) {
+ surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting;
+ surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable;
+ surface->mcm_luts = srf_update->cm2_params->cm2_luts;
+ }
+
+ if (srf_update->func_shaper) {
memcpy(&surface->in_shaper_func, srf_update->func_shaper,
sizeof(surface->in_shaper_func));
+ if (surface->mcm_shaper_3dlut_setting >= DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER)
+ surface->mcm_luts.shaper = &surface->in_shaper_func;
+ }
+
if (srf_update->lut3d_func)
memcpy(&surface->lut3d_func, srf_update->lut3d_func,
sizeof(surface->lut3d_func));
@@ -2886,10 +3031,17 @@ static void copy_surface_update_to_plane(
surface->sdr_white_level_nits =
srf_update->sdr_white_level_nits;
- if (srf_update->blend_tf)
+ if (srf_update->blend_tf) {
memcpy(&surface->blend_tf, srf_update->blend_tf,
sizeof(surface->blend_tf));
+ if (surface->mcm_lut1d_enable)
+ surface->mcm_luts.lut1d_func = &surface->blend_tf;
+ }
+
+ if (srf_update->cm2_params || srf_update->blend_tf)
+ surface->lut_bank_a = !surface->lut_bank_a;
+
if (srf_update->input_csc_color_matrix)
surface->input_csc_color_matrix =
*srf_update->input_csc_color_matrix;
@@ -2901,14 +3053,14 @@ static void copy_surface_update_to_plane(
if (srf_update->gamut_remap_matrix)
surface->gamut_remap_matrix =
*srf_update->gamut_remap_matrix;
- if (srf_update->cm2_params) {
- surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting;
- surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable;
- surface->mcm_luts = srf_update->cm2_params->cm2_luts;
- }
+
if (srf_update->cursor_csc_color_matrix)
surface->cursor_csc_color_matrix =
*srf_update->cursor_csc_color_matrix;
+
+ if (srf_update->bias_and_scale.bias_and_scale_valid)
+ surface->bias_and_scale =
+ srf_update->bias_and_scale;
}
static void copy_stream_update_to_stream(struct dc *dc,
@@ -3037,6 +3189,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
}
if (update->scaler_sharpener_update)
stream->scaler_sharpener_update = *update->scaler_sharpener_update;
+ if (update->sharpening_required)
+ stream->sharpening_required = *update->sharpening_required;
}
static void backup_planes_and_stream_state(
@@ -3066,7 +3220,10 @@ static void restore_planes_and_stream_state(
return;
for (i = 0; i < status->plane_count; i++) {
+ /* refcount will always be valid, restore everything else */
+ struct kref refcount = status->plane_states[i]->refcount;
*status->plane_states[i] = scratch->plane_states[i];
+ status->plane_states[i]->refcount = refcount;
}
*stream = scratch->stream_state;
}
@@ -3153,6 +3310,11 @@ static bool update_planes_and_stream_state(struct dc *dc,
context = dc->current_state;
update_type = dc_check_update_surfaces_for_stream(
dc, srf_updates, surface_count, stream_update, stream_status);
+ /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
+ * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
+ * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
+ */
+ force_immediate_gsl_plane_flip(dc, srf_updates, surface_count);
if (update_type == UPDATE_TYPE_FULL)
backup_planes_and_stream_state(&dc->scratch.current_state, stream);
@@ -3225,8 +3387,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
if (update_type != UPDATE_TYPE_MED)
continue;
- if (surface->update_flags.bits.clip_size_change ||
- surface->update_flags.bits.position_change) {
+ if (surface->update_flags.bits.position_change) {
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
@@ -3625,6 +3786,10 @@ static void commit_planes_for_stream_fast(struct dc *dc,
struct pipe_ctx *top_pipe_to_program = NULL;
struct dc_stream_status *stream_status = NULL;
bool should_offload_fams2_flip = false;
+ bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
+
+ if (should_lock_all_pipes)
+ determine_pipe_unlock_order(dc, context);
if (dc->debug.fams2_config.bits.enable &&
dc->debug.fams2_config.bits.enable_offload_flip &&
@@ -3677,13 +3842,14 @@ static void commit_planes_for_stream_fast(struct dc *dc,
if (!pipe_ctx->plane_state)
continue;
- if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
+ if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
continue;
+
pipe_ctx->plane_state->triplebuffer_flips = false;
if (update_type == UPDATE_TYPE_FAST &&
- dc->hwss.program_triplebuffer != NULL &&
- !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
- /*triple buffer for VUpdate only*/
+ dc->hwss.program_triplebuffer != NULL &&
+ !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
+ /*triple buffer for VUpdate only*/
pipe_ctx->plane_state->triplebuffer_flips = true;
}
}
@@ -3742,6 +3908,8 @@ static void commit_planes_for_stream(struct dc *dc,
bool subvp_curr_use = false;
uint8_t current_stream_mask = 0;
+ if (should_lock_all_pipes)
+ determine_pipe_unlock_order(dc, context);
// Once we apply the new subvp context to hardware it won't be in the
// dc->current_state anymore, so we have to cache it before we apply
// the new SubVP context
@@ -3749,7 +3917,7 @@ static void commit_planes_for_stream(struct dc *dc,
dc_exit_ips_for_hw_access(dc);
dc_z10_restore(dc);
- if (update_type == UPDATE_TYPE_FULL)
+ if (update_type == UPDATE_TYPE_FULL && dc->optimized_required)
hwss_process_outstanding_hw_updates(dc, dc->current_state);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -3776,6 +3944,9 @@ static void commit_planes_for_stream(struct dc *dc,
context_clock_trace(dc, context);
}
+ if (update_type == UPDATE_TYPE_FULL)
+ hwss_wait_for_outstanding_hw_updates(dc, dc->current_state);
+
top_pipe_to_program = resource_get_otg_master_for_stream(
&context->res_ctx,
stream);
@@ -3920,19 +4091,20 @@ static void commit_planes_for_stream(struct dc *dc,
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
if (!pipe_ctx->plane_state)
continue;
- if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
+ if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
continue;
pipe_ctx->plane_state->triplebuffer_flips = false;
if (update_type == UPDATE_TYPE_FAST &&
- dc->hwss.program_triplebuffer != NULL &&
- !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
- /*triple buffer for VUpdate only*/
- pipe_ctx->plane_state->triplebuffer_flips = true;
+ dc->hwss.program_triplebuffer != NULL &&
+ !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
+ /*triple buffer for VUpdate only*/
+ pipe_ctx->plane_state->triplebuffer_flips = true;
}
}
if (update_type == UPDATE_TYPE_FULL) {
/* force vsync flip when reconfiguring pipes to prevent underflow */
plane_state->flip_immediate = false;
+ plane_state->triplebuffer_flips = false;
}
}
@@ -3953,7 +4125,6 @@ static void commit_planes_for_stream(struct dc *dc,
continue;
ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
-
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
@@ -4028,7 +4199,7 @@ static void commit_planes_for_stream(struct dc *dc,
/*program triple buffer after lock based on flip type*/
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
- /*only enable triplebuffer for fast_update*/
+ /*only enable triplebuffer for fast_update*/
dc->hwss.program_triplebuffer(
dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
}
@@ -4418,7 +4589,7 @@ static bool commit_minimal_transition_based_on_current_context(struct dc *dc,
struct pipe_split_policy_backup policy;
struct dc_state *intermediate_context;
struct dc_state *old_current_state = dc->current_state;
- struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0};
+ struct dc_surface_update srf_updates[MAX_SURFACES] = {0};
int surface_count;
/*
@@ -4777,6 +4948,11 @@ static bool update_planes_and_stream_v1(struct dc *dc,
update_type = dc_check_update_surfaces_for_stream(
dc, srf_updates, surface_count, stream_update, stream_status);
+ /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
+ * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
+ * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
+ */
+ force_immediate_gsl_plane_flip(dc, srf_updates, surface_count);
if (update_type >= UPDATE_TYPE_FULL) {
@@ -5065,11 +5241,26 @@ static bool update_planes_and_stream_v3(struct dc *dc,
return true;
}
+static void clear_update_flags(struct dc_surface_update *srf_updates,
+ int surface_count, struct dc_stream_state *stream)
+{
+ int i;
+
+ if (stream)
+ stream->update_flags.raw = 0;
+
+ for (i = 0; i < surface_count; i++)
+ if (srf_updates[i].surface)
+ srf_updates[i].surface->update_flags.raw = 0;
+}
+
bool dc_update_planes_and_stream(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update)
{
+ bool ret = false;
+
dc_exit_ips_for_hw_access(dc);
/*
* update planes and stream version 3 separates FULL and FAST updates
@@ -5086,10 +5277,16 @@ bool dc_update_planes_and_stream(struct dc *dc,
* features as they are now transparent to the new sequence.
*/
if (dc->ctx->dce_version >= DCN_VERSION_4_01)
- return update_planes_and_stream_v3(dc, srf_updates,
+ ret = update_planes_and_stream_v3(dc, srf_updates,
surface_count, stream, stream_update);
- return update_planes_and_stream_v2(dc, srf_updates,
+ else
+ ret = update_planes_and_stream_v2(dc, srf_updates,
surface_count, stream, stream_update);
+
+ if (ret)
+ clear_update_flags(srf_updates, surface_count, stream);
+
+ return ret;
}
void dc_commit_updates_for_stream(struct dc *dc,
@@ -5099,6 +5296,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_stream_update *stream_update,
struct dc_state *state)
{
+ bool ret = false;
+
dc_exit_ips_for_hw_access(dc);
/* TODO: Since change commit sequence can have a huge impact,
* we decided to only enable it for DCN3x. However, as soon as
@@ -5106,17 +5305,17 @@ void dc_commit_updates_for_stream(struct dc *dc,
* the new sequence for all ASICs.
*/
if (dc->ctx->dce_version >= DCN_VERSION_4_01) {
- update_planes_and_stream_v3(dc, srf_updates, surface_count,
+ ret = update_planes_and_stream_v3(dc, srf_updates, surface_count,
stream, stream_update);
- return;
- }
- if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
- update_planes_and_stream_v2(dc, srf_updates, surface_count,
+ } else if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ ret = update_planes_and_stream_v2(dc, srf_updates, surface_count,
stream, stream_update);
- return;
- }
- update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
- stream_update, state);
+ } else
+ ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
+ stream_update, state);
+
+ if (ret)
+ clear_update_flags(srf_updates, surface_count, stream);
}
uint8_t dc_get_current_stream_count(struct dc *dc)
@@ -5187,11 +5386,9 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state)
dc->vm_pa_config.valid) {
dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
}
-
break;
default:
ASSERT(dc->current_state->stream_count == 0);
-
dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
dc_state_destruct(dc->current_state);
@@ -5315,8 +5512,15 @@ bool dc_set_ips_disable(struct dc *dc, unsigned int disable_ips)
void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name)
{
- if (dc->debug.disable_idle_power_optimizations)
+ int idle_fclk_khz = 0, idle_dramclk_khz = 0, i = 0;
+ enum mall_stream_type subvp_pipe_type[MAX_PIPES] = {0};
+ struct pipe_ctx *pipe = NULL;
+ struct dc_state *context = dc->current_state;
+
+ if (dc->debug.disable_idle_power_optimizations) {
+ DC_LOG_DEBUG("%s: disabled\n", __func__);
return;
+ }
if (allow != dc->idle_optimizations_allowed)
DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__,
@@ -5333,8 +5537,27 @@ void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const
return;
if (dc->hwss.apply_idle_power_optimizations && dc->clk_mgr != NULL &&
- dc->hwss.apply_idle_power_optimizations(dc, allow))
+ dc->hwss.apply_idle_power_optimizations(dc, allow)) {
dc->idle_optimizations_allowed = allow;
+ DC_LOG_DEBUG("%s: %s\n", __func__, allow ? "enabled" : "disabled");
+ }
+
+ // log idle clocks and sub vp pipe types at idle optimization time
+ if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_fclk)
+ idle_fclk_khz = dc->clk_mgr->funcs->get_hard_min_fclk(dc->clk_mgr);
+
+ if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->get_hard_min_memclk)
+ idle_dramclk_khz = dc->clk_mgr->funcs->get_hard_min_memclk(dc->clk_mgr);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(context, pipe);
+ }
+
+ DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n",
+ __func__, allow, idle_fclk_khz, idle_dramclk_khz, subvp_pipe_type[0], subvp_pipe_type[1], subvp_pipe_type[2],
+ subvp_pipe_type[3], subvp_pipe_type[4], subvp_pipe_type[5], caller_name);
+
}
void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name)
@@ -5932,7 +6155,7 @@ void dc_query_current_properties(struct dc *dc, struct dc_current_properties *pr
bool subvp_sw_cursor_req = false;
for (i = 0; i < dc->current_state->stream_count; i++) {
- if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i])) {
+ if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i]) && !dc->current_state->streams[i]->hw_cursor_req) {
subvp_sw_cursor_req = true;
break;
}
@@ -5976,7 +6199,30 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
{
struct dc_power_profile profile = { 0 };
- profile.power_level += !context->bw_ctx.bw.dcn.clk.p_state_change_support;
+ profile.power_level = !context->bw_ctx.bw.dcn.clk.p_state_change_support;
+ if (!context->clk_mgr || !context->clk_mgr->ctx || !context->clk_mgr->ctx->dc)
+ return profile;
+ struct dc *dc = context->clk_mgr->ctx->dc;
+ if (dc->res_pool->funcs->get_power_profile)
+ profile.power_level = dc->res_pool->funcs->get_power_profile(context);
return profile;
}
+
+/*
+ **********************************************************************************
+ * dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state
+ *
+ * Called when DM wants to log detile buffer size from dc_state
+ *
+ **********************************************************************************
+ */
+unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
+{
+ struct dc *dc = context->clk_mgr->ctx->dc;
+
+ if (dc->res_pool->funcs->get_det_buffer_size)
+ return dc->res_pool->funcs->get_det_buffer_size(context);
+ else
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 801cdbc8117d..af1ea5792560 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -46,11 +46,6 @@
DC_LOG_IF_TRACE(__VA_ARGS__); \
} while (0)
-#define TIMING_TRACE(...) do {\
- if (dc->debug.timing_trace) \
- DC_LOG_SYNC(__VA_ARGS__); \
-} while (0)
-
#define CLOCK_TRACE(...) do {\
if (dc->debug.clock_trace) \
DC_LOG_BANDWIDTH_CALCS(__VA_ARGS__); \
@@ -306,43 +301,6 @@ void post_surface_trace(struct dc *dc)
}
-void context_timing_trace(
- struct dc *dc,
- struct resource_context *res_ctx)
-{
- int i;
- int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0};
- struct crtc_position position;
- unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
- DC_LOGGER_INIT(dc->ctx->logger);
-
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
- /* get_position() returns CRTC vertical/horizontal counter
- * hence not applicable for underlay pipe
- */
- if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx)
- continue;
-
- pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position);
- h_pos[i] = position.horizontal_count;
- v_pos[i] = position.vertical_count;
- }
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
-
- if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx)
- continue;
-
- TIMING_TRACE("OTG_%d H_tot:%d V_tot:%d H_pos:%d V_pos:%d\n",
- pipe_ctx->stream_res.tg->inst,
- pipe_ctx->stream->timing.h_total,
- pipe_ctx->stream->timing.v_total,
- h_pos[i], v_pos[i]);
- }
-}
-
void context_clock_trace(
struct dc *dc,
struct dc_state *context)
@@ -434,3 +392,43 @@ char *dc_status_to_str(enum dc_status status)
return "Unexpected status error";
}
+
+char *dc_pixel_encoding_to_str(enum dc_pixel_encoding pixel_encoding)
+{
+ switch (pixel_encoding) {
+ case PIXEL_ENCODING_RGB:
+ return "RGB";
+ case PIXEL_ENCODING_YCBCR422:
+ return "YUV422";
+ case PIXEL_ENCODING_YCBCR444:
+ return "YUV444";
+ case PIXEL_ENCODING_YCBCR420:
+ return "YUV420";
+ default:
+ return "Unknown";
+ }
+}
+
+char *dc_color_depth_to_str(enum dc_color_depth color_depth)
+{
+ switch (color_depth) {
+ case COLOR_DEPTH_666:
+ return "6-bpc";
+ case COLOR_DEPTH_888:
+ return "8-bpc";
+ case COLOR_DEPTH_101010:
+ return "10-bpc";
+ case COLOR_DEPTH_121212:
+ return "12-bpc";
+ case COLOR_DEPTH_141414:
+ return "14-bpc";
+ case COLOR_DEPTH_161616:
+ return "16-bpc";
+ case COLOR_DEPTH_999:
+ return "9-bpc";
+ case COLOR_DEPTH_111111:
+ return "11-bpc";
+ default:
+ return "Unknown";
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 7ee2be8f82c4..6eb9bae3af91 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -312,11 +312,11 @@ void get_mpctree_visual_confirm_color(
{
const struct tg_color pipe_colors[6] = {
{MAX_TG_COLOR_VALUE, 0, 0}, /* red */
- {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE / 4, 0}, /* orange */
{MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* yellow */
{0, MAX_TG_COLOR_VALUE, 0}, /* green */
+ {0, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* cyan */
{0, 0, MAX_TG_COLOR_VALUE}, /* blue */
- {MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, /* purple */
+ {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, /* magenta */
};
struct pipe_ctx *top_pipe = pipe_ctx;
@@ -425,6 +425,44 @@ void get_hdr_visual_confirm_color(
}
}
+/* Visual Confirm color definition for VABC */
+void get_vabc_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+ struct dc_link *edp_link = NULL;
+
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link) {
+ if (pipe_ctx->stream->link->connector_signal == SIGNAL_TYPE_EDP)
+ edp_link = pipe_ctx->stream->link;
+ }
+
+ if (edp_link) {
+ switch (edp_link->backlight_control_type) {
+ case BACKLIGHT_CONTROL_PWM:
+ color->color_r_cr = color_value;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ break;
+ case BACKLIGHT_CONTROL_AMD_AUX:
+ color->color_r_cr = 0;
+ color->color_g_y = color_value;
+ color->color_b_cb = 0;
+ break;
+ case BACKLIGHT_CONTROL_VESA_AUX:
+ color->color_r_cr = 0;
+ color->color_g_y = 0;
+ color->color_b_cb = color_value;
+ break;
+ }
+ } else {
+ color->color_r_cr = 0;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ }
+}
+
void get_subvp_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
@@ -497,6 +535,23 @@ void get_mclk_switch_visual_confirm_color(
}
}
+void get_cursor_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+
+ if (pipe_ctx->stream && pipe_ctx->stream->cursor_position.enable) {
+ color->color_r_cr = color_value;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ } else {
+ color->color_r_cr = 0;
+ color->color_g_y = 0;
+ color->color_b_cb = color_value;
+ }
+}
+
void set_p_state_switch_method(
struct dc *dc,
struct dc_state *context,
@@ -881,6 +936,9 @@ void hwss_setup_dpp(union block_sequence_params *params)
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ if (!plane_state)
+ return;
+
if (dpp && dpp->funcs->dpp_setup) {
// program the input csc
dpp->funcs->dpp_setup(dpp,
@@ -1071,8 +1129,13 @@ void hwss_wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_con
if (!pipe_ctx->stream)
continue;
- if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
- pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
+ /* For full update we must wait for all double buffer updates, not just DRR updates. This
+ * is particularly important for minimal transitions. Only check for OTG_MASTER pipes,
+ * as non-OTG Master pipes share the same OTG as
+ */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && dc->hwss.wait_for_all_pending_updates) {
+ dc->hwss.wait_for_all_pending_updates(pipe_ctx);
+ }
hubp = pipe_ctx->plane_res.hubp;
if (!hubp)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index dfdfe22d9e85..c1b79b379447 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -125,6 +125,14 @@ uint32_t dc_link_bandwidth_kbps(
return link->dc->link_srv->dp_link_bandwidth_kbps(link, link_settings);
}
+uint32_t dc_link_required_hblank_size_bytes(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params)
+{
+ return link->dc->link_srv->dp_required_hblank_size_bytes(link,
+ audio_params);
+}
+
void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map)
{
dc->link_srv->get_cur_res_map(dc, map);
@@ -430,11 +438,10 @@ bool dc_link_get_backlight_level_nits(struct dc_link *link,
}
bool dc_link_set_backlight_level(const struct dc_link *link,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp)
+ struct set_backlight_level_params *backlight_level_params)
{
return link->dc->link_srv->edp_set_backlight_level(link,
- backlight_pwm_u16_16, frame_ramp);
+ backlight_level_params);
}
bool dc_link_set_backlight_level_nits(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index c7599c40d4be..520a34a42827 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -765,25 +765,6 @@ static inline void get_vp_scan_direction(
*flip_horz_scan_dir = !*flip_horz_scan_dir;
}
-/*
- * This is a preliminary vp size calculation to allow us to check taps support.
- * The result is completely overridden afterwards.
- */
-static void calculate_viewport_size(struct pipe_ctx *pipe_ctx)
-{
- struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
-
- data->viewport.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz, data->recout.width));
- data->viewport.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert, data->recout.height));
- data->viewport_c.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz_c, data->recout.width));
- data->viewport_c.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert_c, data->recout.height));
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
- pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
- swap(data->viewport.width, data->viewport.height);
- swap(data->viewport_c.width, data->viewport_c.height);
- }
-}
-
static struct rect intersect_rec(const struct rect *r0, const struct rect *r1)
{
struct rect rec;
@@ -1468,6 +1449,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
const struct rect odm_slice_src = resource_get_odm_slice_src_rect(pipe_ctx);
+ struct scaling_taps temp = {0};
bool res = false;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
@@ -1519,14 +1501,16 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
res = spl_calculate_scaler_params(spl_in, spl_out);
// Convert respective out params from SPL to scaler data
translate_SPL_out_params_to_pipe_ctx(pipe_ctx, spl_out);
+
+ /* Ignore scaler failure if pipe context plane is phantom plane */
+ if (!res && plane_state->is_phantom)
+ res = true;
} else {
#endif
/* depends on h_active */
calculate_recout(pipe_ctx);
/* depends on pixel format */
calculate_scaling_ratios(pipe_ctx);
- /* depends on scaling ratios and recout, does not calculate offset yet */
- calculate_viewport_size(pipe_ctx);
/*
* LB calculations depend on vp size, h/v_active and scaling ratios
@@ -1547,6 +1531,24 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
+ // get TAP value with 100x100 dummy data for max scaling qualify, override
+ // if a new scaling quality required
+ pipe_ctx->plane_res.scl_data.viewport.width = 100;
+ pipe_ctx->plane_res.scl_data.viewport.height = 100;
+ pipe_ctx->plane_res.scl_data.viewport_c.width = 100;
+ pipe_ctx->plane_res.scl_data.viewport_c.height = 100;
+ if (pipe_ctx->plane_res.xfm != NULL)
+ res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+
+ if (pipe_ctx->plane_res.dpp != NULL)
+ res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+
+ temp = pipe_ctx->plane_res.scl_data.taps;
+
+ calculate_inits_and_viewports(pipe_ctx);
+
if (pipe_ctx->plane_res.xfm != NULL)
res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
@@ -1573,11 +1575,14 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
&plane_state->scaling_quality);
}
- /*
- * Depends on recout, scaling ratios, h_active and taps
- * May need to re-check lb size after this in some obscure scenario
- */
- if (res)
+ /* Ignore scaler failure if pipe context plane is phantom plane */
+ if (!res && plane_state->is_phantom)
+ res = true;
+
+ if (res && (pipe_ctx->plane_res.scl_data.taps.v_taps != temp.v_taps ||
+ pipe_ctx->plane_res.scl_data.taps.h_taps != temp.h_taps ||
+ pipe_ctx->plane_res.scl_data.taps.v_taps_c != temp.v_taps_c ||
+ pipe_ctx->plane_res.scl_data.taps.h_taps_c != temp.h_taps_c))
calculate_inits_and_viewports(pipe_ctx);
/*
@@ -2089,7 +2094,8 @@ int resource_get_odm_slice_dst_width(struct pipe_ctx *otg_master,
count = resource_get_odm_slice_count(otg_master);
h_active = timing->h_addressable +
timing->h_border_left +
- timing->h_border_right;
+ timing->h_border_right +
+ otg_master->hblank_borrow;
width = h_active / count;
if (otg_master->stream_res.tg)
@@ -4022,6 +4028,41 @@ fail:
}
/**
+ * decide_hblank_borrow - Decides the horizontal blanking borrow value for a given pipe context.
+ * @pipe_ctx: Pointer to the pipe context structure.
+ *
+ * This function calculates the horizontal blanking borrow value for a given pipe context based on the
+ * display stream compression (DSC) configuration. If the horizontal active pixels (hactive) are less
+ * than the total width of the DSC slices, it sets the hblank_borrow value to the difference. If the
+ * total horizontal timing minus the hblank_borrow value is less than 32, it resets the hblank_borrow
+ * value to 0.
+ */
+static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
+{
+ uint32_t hactive;
+ uint32_t ceil_slice_width;
+ struct dc_stream_state *stream = NULL;
+
+ if (!pipe_ctx)
+ return;
+
+ stream = pipe_ctx->stream;
+
+ if (stream->timing.flags.DSC) {
+ hactive = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+
+ /* Assume if determined slices does not divide Hactive evenly, Hborrow is needed for padding*/
+ if (hactive % stream->timing.dsc_cfg.num_slices_h != 0) {
+ ceil_slice_width = (hactive / stream->timing.dsc_cfg.num_slices_h) + 1;
+ pipe_ctx->hblank_borrow = ceil_slice_width * stream->timing.dsc_cfg.num_slices_h - hactive;
+
+ if (stream->timing.h_total - hactive - pipe_ctx->hblank_borrow < 32)
+ pipe_ctx->hblank_borrow = 0;
+ }
+ }
+}
+
+/**
* dc_validate_global_state() - Determine if hardware can support a given state
*
* @dc: dc struct for this driver
@@ -4059,6 +4100,10 @@ enum dc_status dc_validate_global_state(
if (pipe_ctx->stream != stream)
continue;
+ /* Decide whether hblank borrow is needed and save it in pipe_ctx */
+ if (dc->debug.enable_hblank_borrow)
+ decide_hblank_borrow(pipe_ctx);
+
if (dc->res_pool->funcs->patch_unknown_plane_state &&
pipe_ctx->plane_state &&
pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) {
@@ -4094,14 +4139,6 @@ enum dc_status dc_validate_global_state(
if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
result = DC_FAIL_BANDWIDTH_VALIDATE;
- /*
- * Only update link encoder to stream assignment after bandwidth validation passed.
- * TODO: Split out assignment and validation.
- */
- if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false)
- dc->res_pool->funcs->link_encs_assign(
- dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
-
return result;
}
@@ -4441,7 +4478,7 @@ static void set_hfvs_info_packet(
static void adaptive_sync_override_dp_info_packets_sdp_line_num(
const struct dc_crtc_timing *timing,
struct enc_sdp_line_num *sdp_line_num,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dlg_param)
+ unsigned int vstartup_start)
{
uint32_t asic_blank_start = 0;
uint32_t asic_blank_end = 0;
@@ -4456,8 +4493,8 @@ static void adaptive_sync_override_dp_info_packets_sdp_line_num(
asic_blank_end = (asic_blank_start - tg->v_border_bottom -
tg->v_addressable - tg->v_border_top);
- if (pipe_dlg_param->vstartup_start > asic_blank_end) {
- v_update = (tg->v_total - (pipe_dlg_param->vstartup_start - asic_blank_end));
+ if (vstartup_start > asic_blank_end) {
+ v_update = (tg->v_total - (vstartup_start - asic_blank_end));
sdp_line_num->adaptive_sync_line_num_valid = true;
sdp_line_num->adaptive_sync_line_num = (tg->v_total - v_update - 1);
} else {
@@ -4470,7 +4507,7 @@ static void set_adaptive_sync_info_packet(
struct dc_info_packet *info_packet,
const struct dc_stream_state *stream,
struct encoder_info_frame *info_frame,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dlg_param)
+ unsigned int vstartup_start)
{
if (!stream->adaptive_sync_infopacket.valid)
return;
@@ -4478,7 +4515,7 @@ static void set_adaptive_sync_info_packet(
adaptive_sync_override_dp_info_packets_sdp_line_num(
&stream->timing,
&info_frame->sdp_line_num,
- pipe_dlg_param);
+ vstartup_start);
*info_packet = stream->adaptive_sync_infopacket;
}
@@ -4511,6 +4548,7 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
{
enum signal_type signal = SIGNAL_TYPE_NONE;
struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
+ unsigned int vstartup_start = 0;
/* default all packets to invalid */
info->avi.valid = false;
@@ -4524,6 +4562,9 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
info->adaptive_sync.valid = false;
signal = pipe_ctx->stream->signal;
+ if (pipe_ctx->stream->ctx->dc->res_pool->funcs->get_vstartup_for_pipe)
+ vstartup_start = pipe_ctx->stream->ctx->dc->res_pool->funcs->get_vstartup_for_pipe(pipe_ctx);
+
/* HDMi and DP have different info packets*/
if (dc_is_hdmi_signal(signal)) {
set_avi_info_frame(&info->avi, pipe_ctx);
@@ -4545,7 +4586,7 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
set_adaptive_sync_info_packet(&info->adaptive_sync,
pipe_ctx->stream,
info,
- &pipe_ctx->pipe_dlg_param);
+ vstartup_start);
}
patch_gamut_packet_checksum(&info->gamut);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 2597e3fd562b..1b2cce127981 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -265,6 +265,9 @@ struct dc_state *dc_state_create_copy(struct dc_state *src_state)
dc_state_copy_internal(new_state, src_state);
#ifdef CONFIG_DRM_AMD_DC_FP
+ new_state->bw_ctx.dml2 = NULL;
+ new_state->bw_ctx.dml2_dc_power_source = NULL;
+
if (src_state->bw_ctx.dml2 &&
!dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) {
dc_state_release(new_state);
@@ -480,9 +483,9 @@ bool dc_state_add_plane(
if (stream_status == NULL) {
dm_error("Existing stream not found; failed to attach surface!\n");
goto out;
- } else if (stream_status->plane_count == MAX_SURFACE_NUM) {
+ } else if (stream_status->plane_count == MAX_SURFACES) {
dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
- plane_state, MAX_SURFACE_NUM);
+ plane_state, MAX_SURFACES);
goto out;
} else if (!otg_master_pipe) {
goto out;
@@ -597,7 +600,7 @@ bool dc_state_rem_all_planes_for_stream(
{
int i, old_plane_count;
struct dc_stream_status *stream_status = NULL;
- struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+ struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
for (i = 0; i < state->stream_count; i++)
if (state->streams[i] == stream) {
@@ -872,7 +875,7 @@ bool dc_state_rem_all_phantom_planes_for_stream(
{
int i, old_plane_count;
struct dc_stream_status *stream_status = NULL;
- struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+ struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
for (i = 0; i < state->stream_count; i++)
if (state->streams[i] == phantom_stream) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 9a406d74c0dd..e8134c47fe0d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -37,6 +37,8 @@
#define DC_LOGGER dc->ctx->logger
#ifndef MIN
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+#ifndef MAX
#define MAX(x, y) ((x > y) ? x : y)
#endif
@@ -292,7 +294,9 @@ bool dc_stream_set_cursor_attributes(
* 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz
* 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz
*/
- if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384) {
+ if (dc->debug.allow_sw_cursor_fallback &&
+ attributes->height * attributes->width * 4 > 16384 &&
+ !stream->hw_cursor_req) {
if (check_subvp_sw_cursor_fallback_req(dc, stream))
return false;
}
@@ -421,7 +425,6 @@ bool dc_stream_program_cursor_position(
/* apply/update visual confirm */
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HW_CURSOR) {
/* update software state */
- uint32_t color_value = MAX_TG_COLOR_VALUE;
int i;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -429,15 +432,7 @@ bool dc_stream_program_cursor_position(
/* adjust visual confirm color for all pipes with current stream */
if (stream == pipe_ctx->stream) {
- if (stream->cursor_position.enable) {
- pipe_ctx->visual_confirm_color.color_r_cr = color_value;
- pipe_ctx->visual_confirm_color.color_g_y = 0;
- pipe_ctx->visual_confirm_color.color_b_cb = 0;
- } else {
- pipe_ctx->visual_confirm_color.color_r_cr = 0;
- pipe_ctx->visual_confirm_color.color_g_y = 0;
- pipe_ctx->visual_confirm_color.color_b_cb = color_value;
- }
+ get_cursor_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
/* programming hardware */
if (pipe_ctx->plane_state)
@@ -612,17 +607,6 @@ bool dc_stream_remove_writeback(struct dc *dc,
return true;
}
-bool dc_stream_warmup_writeback(struct dc *dc,
- int num_dwb,
- struct dc_writeback_info *wb_info)
-{
- dc_exit_ips_for_hw_access(dc);
-
- if (dc->hwss.mmhubbub_warmup)
- return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info);
- else
- return false;
-}
uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
{
uint8_t i;
@@ -819,12 +803,12 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
stream->dst.height,
stream->output_color_space);
DC_LOG_DC(
- "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n",
+ "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixel_encoding:%s, color_depth:%s\n",
stream->timing.pix_clk_100hz / 10,
stream->timing.h_total,
stream->timing.v_total,
- stream->timing.pixel_encoding,
- stream->timing.display_color_depth);
+ dc_pixel_encoding_to_str(stream->timing.pixel_encoding),
+ dc_color_depth_to_str(stream->timing.display_color_depth));
DC_LOG_DC(
"\tlink: %d\n",
stream->link->link_index);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index ccbb15f1638c..f3471d45b312 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -83,13 +83,6 @@ uint8_t dc_plane_get_pipe_mask(struct dc_state *dc_state, const struct dc_plane
/*******************************************************************************
* Public functions
******************************************************************************/
-void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
- uint32_t controller_id)
-{
- plane_state->irq_source = controller_id + DC_IRQ_SOURCE_PFLIP1 - 1;
- /*register_flip_interrupt(surface);*/
-}
-
struct dc_plane_state *dc_create_plane_state(const struct dc *dc)
{
struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state),
@@ -277,4 +270,50 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut)
kref_get(&lut->refcount);
}
+void dc_plane_force_update_for_panic(struct dc_plane_state *plane_state,
+ bool clear_tiling)
+{
+ struct dc *dc;
+ int i;
+
+ if (!plane_state)
+ return;
+
+ dc = plane_state->ctx->dc;
+ if (!dc || !dc->current_state)
+ return;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx)
+ continue;
+
+ if (dc->ctx->dce_version >= DCE_VERSION_MAX) {
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ if (!hubp)
+ continue;
+ /* if framebuffer is tiled, disable tiling */
+ if (clear_tiling && hubp->funcs->hubp_clear_tiling)
+ hubp->funcs->hubp_clear_tiling(hubp);
+
+ /* force page flip to see the new content of the framebuffer */
+ hubp->funcs->hubp_program_surface_flip_and_addr(hubp,
+ &plane_state->address,
+ true);
+ } else {
+ struct mem_input *mi = pipe_ctx->plane_res.mi;
+ if (!mi)
+ continue;
+ /* if framebuffer is tiled, disable tiling */
+ if (clear_tiling && mi->funcs->mem_input_clear_tiling)
+ mi->funcs->mem_input_clear_tiling(mi);
+
+ /* force page flip to see the new content of the framebuffer */
+ mi->funcs->mem_input_program_surface_flip_and_addr(mi,
+ &plane_state->address,
+ true);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 3992ad73165b..053481ab69ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -55,9 +55,9 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.301"
+#define DC_VER "3.2.316"
-#define MAX_SURFACES 3
+#define MAX_SURFACES 4
#define MAX_PLANES 6
#define MAX_STREAMS 6
#define MIN_VIEWPORT_SIZE 12
@@ -225,6 +225,11 @@ struct dc_dmub_caps {
bool subvp_psr;
bool gecc_enable;
uint8_t fams_ver;
+ bool aux_backlight_support;
+};
+
+struct dc_scl_caps {
+ bool sharpener_support;
};
struct dc_caps {
@@ -285,6 +290,7 @@ struct dc_caps {
uint16_t subvp_vertical_int_margin_us;
bool seamless_odm;
uint32_t max_v_total;
+ bool vtotal_limited_by_fp2;
uint32_t max_disp_clock_khz_at_vmin;
uint8_t subvp_drr_vblank_start_margin_us;
bool cursor_not_scaled;
@@ -292,6 +298,7 @@ struct dc_caps {
bool sequential_ono;
/* Conservative limit for DCC cases which require ODM4:1 to support*/
uint32_t dcc_plane_width_limit;
+ struct dc_scl_caps scl_caps;
};
struct dc_bug_wa {
@@ -456,6 +463,7 @@ struct dc_config {
bool enable_auto_dpm_test_logs;
unsigned int disable_ips;
unsigned int disable_ips_in_vpb;
+ bool disable_ips_in_dpms_off;
bool usb4_bw_alloc_support;
bool allow_0_dtb_clk;
bool use_assr_psp_message;
@@ -463,6 +471,8 @@ struct dc_config {
unsigned int enable_fpo_flicker_detection;
bool disable_hbr_audio_dp2;
bool consolidated_dpia_dp_lt;
+ bool set_pipe_unlock_order;
+ bool enable_dpia_pre_training;
};
enum visual_confirm {
@@ -479,6 +489,7 @@ enum visual_confirm {
VISUAL_CONFIRM_MCLK_SWITCH = 16,
VISUAL_CONFIRM_FAMS2 = 19,
VISUAL_CONFIRM_HW_CURSOR = 20,
+ VISUAL_CONFIRM_VABC = 21,
};
enum dc_psr_power_opts {
@@ -620,6 +631,8 @@ struct dc_clocks {
int bw_dispclk_khz;
int idle_dramclk_khz;
int idle_fclk_khz;
+ int subvp_prefetch_dramclk_khz;
+ int subvp_prefetch_fclk_khz;
};
struct dc_bw_validation_profile {
@@ -764,7 +777,8 @@ union dpia_debug_options {
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
uint32_t disable_usb4_pm_support:1; /* bit 5 */
uint32_t enable_consolidated_dpia_dp_lt:1; /* bit 6 */
- uint32_t reserved:25;
+ uint32_t enable_dpia_pre_training:1; /* bit 7 */
+ uint32_t reserved:24;
} bits;
uint32_t raw;
};
@@ -862,7 +876,6 @@ struct dc_debug_options {
bool sanity_checks;
bool max_disp_clk;
bool surface_trace;
- bool timing_trace;
bool clock_trace;
bool validation_trace;
bool bandwidth_calcs_trace;
@@ -1048,8 +1061,8 @@ struct dc_debug_options {
bool dml21_force_pstate_method;
uint32_t dml21_force_pstate_method_values[MAX_PIPES];
uint32_t dml21_disable_pstate_method_mask;
+ union fw_assisted_mclk_switch_version fams_version;
union dmub_fams2_global_feature_config fams2_config;
- bool enable_legacy_clock_update;
unsigned int force_cositing;
unsigned int disable_spl;
unsigned int force_easf;
@@ -1061,6 +1074,9 @@ struct dc_debug_options {
unsigned int sharpen_policy;
unsigned int scale_to_sharpness_policy;
bool skip_full_updated_if_possible;
+ unsigned int enable_oled_edp_power_up_opt;
+ bool enable_hblank_borrow;
+ bool force_subvp_df_throttle;
};
@@ -1253,7 +1269,6 @@ union surface_update_flags {
uint32_t rotation_change:1;
uint32_t swizzle_change:1;
uint32_t scaling_change:1;
- uint32_t clip_size_change: 1;
uint32_t position_change:1;
uint32_t in_transfer_func_change:1;
uint32_t input_csc_change:1;
@@ -1292,7 +1307,7 @@ struct dc_plane_state {
struct rect clip_rect;
struct plane_size plane_size;
- union dc_tiling_info tiling_info;
+ struct dc_tiling_info tiling_info;
struct dc_plane_dcc_param dcc;
@@ -1355,6 +1370,7 @@ struct dc_plane_state {
enum mpcc_movable_cm_location mcm_location;
struct dc_csc_transform cursor_csc_color_matrix;
bool adaptive_sharpness_en;
+ int adaptive_sharpness_policy;
int sharpness_level;
enum linear_light_scaling linear_light_scaling;
unsigned int sdr_white_level_nits;
@@ -1362,7 +1378,7 @@ struct dc_plane_state {
struct dc_plane_info {
struct plane_size plane_size;
- union dc_tiling_info tiling_info;
+ struct dc_tiling_info tiling_info;
struct dc_plane_dcc_param dcc;
enum surface_pixel_format format;
enum dc_rotation_angle rotation;
@@ -1389,7 +1405,7 @@ struct dc_scratch_space {
* store current value in plane states so we can still recover
* a valid current state during dc update.
*/
- struct dc_plane_state plane_states[MAX_SURFACE_NUM];
+ struct dc_plane_state plane_states[MAX_SURFACES];
struct dc_stream_state stream_state;
};
@@ -1461,6 +1477,7 @@ struct dc {
struct dc_scratch_space current_state;
struct dc_scratch_space new_state;
struct dc_stream_state temp_stream; // Used so we don't need to allocate stream on the stack
+ bool pipes_to_unlock_first[MAX_PIPES]; /* Any of the pipes indicated here should be unlocked first */
} scratch;
struct dml2_configuration_options dml2_options;
@@ -1513,9 +1530,10 @@ struct dc_surface_update {
* change cm2_params.component_settings: Full update
* change cm2_params.cm2_luts: Fast update
*/
- struct dc_cm2_parameters *cm2_params;
+ const struct dc_cm2_parameters *cm2_params;
const struct dc_csc_transform *cursor_csc_color_matrix;
unsigned int sdr_white_level_nits;
+ struct dc_bias_and_scale bias_and_scale;
};
/*
@@ -1770,7 +1788,6 @@ struct dc_link {
bool dongle_mode_timing_override;
bool blank_stream_on_ocs_change;
bool read_dpcd204h_on_irq_hpd;
- bool disable_assr_for_uhbr;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -1786,6 +1803,7 @@ struct dc_link {
// BW ALLOCATON USB4 ONLY
struct dc_dpia_bw_alloc dpia_bw_alloc_config;
bool skip_implict_edp_power_control;
+ enum backlight_control_type backlight_control_type;
};
/* Return an enumerated dc_link.
@@ -2009,6 +2027,24 @@ uint32_t dc_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_setting);
+struct dp_audio_bandwidth_params {
+ const struct dc_crtc_timing *crtc_timing;
+ enum dp_link_encoding link_encoding;
+ uint32_t channel_count;
+ uint32_t sample_rate_hz;
+};
+
+/* The function calculates the minimum size of hblank (in bytes) needed to
+ * support the specified channel count and sample rate combination, given the
+ * link encoding and timing to be used. This calculation is not supported
+ * for 8b/10b SST.
+ *
+ * return - min hblank size in bytes, 0 if 8b/10b SST.
+ */
+uint32_t dc_link_required_hblank_size_bytes(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params);
+
/* The function takes a snapshot of current link resource allocation state
* @dc: pointer to dc of the dm calling this
* @map: a dc link resource snapshot defined internally to dc.
@@ -2203,8 +2239,7 @@ void dc_link_edp_panel_backlight_power_on(struct dc_link *link,
* and 16 bit fractional, where 1.0 is max backlight value.
*/
bool dc_link_set_backlight_level(const struct dc_link *dc_link,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp);
+ struct set_backlight_level_params *backlight_level_params);
/* Set/get nits-based backlight level of an embedded panel (eDP, LVDS). */
bool dc_link_set_backlight_level_nits(struct dc_link *link,
@@ -2369,6 +2404,13 @@ struct dc_sink_dsc_caps {
struct dsc_dec_dpcd_caps dsc_dec_caps;
};
+struct dc_sink_hblank_expansion_caps {
+ // 'true' if these are virtual DPCD's HBlank expansion caps (immediately upstream of sink in MST topology),
+ // 'false' if they are sink's HBlank expansion caps
+ bool is_virtual_dpcd_hblank_expansion;
+ struct hblank_expansion_dpcd_caps dpcd_caps;
+};
+
struct dc_sink_fec_caps {
bool is_rx_fec_supported;
bool is_topology_fec_supported;
@@ -2395,6 +2437,7 @@ struct dc_sink {
struct scdc_caps scdc_caps;
struct dc_sink_dsc_caps dsc_caps;
struct dc_sink_fec_caps fec_caps;
+ struct dc_sink_hblank_expansion_caps hblank_expansion_caps;
bool is_vsc_sdp_colorimetry_supported;
@@ -2543,6 +2586,8 @@ struct dc_power_profile {
struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context);
+unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context);
+
/* DSC Interfaces */
#include "dc_dsc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 1e7de0f03290..44ff9abe2880 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -519,7 +519,8 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
union dmub_rb_cmd cmd = { 0 };
unsigned int panel_inst = 0;
- if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst))
+ if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst) &&
+ dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE)
return;
memset(&cmd, 0, sizeof(cmd));
@@ -1012,7 +1013,6 @@ static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
r2 = test_pipe->plane_res.scl_data.recout;
r2_r = r2.x + r2.width;
r2_b = r2.y + r2.height;
- split_pipe = test_pipe;
/**
* There is another half plane on same layer because of
@@ -1245,7 +1245,7 @@ static int count_active_streams(const struct dc *dc)
for (i = 0; i < dc->current_state->stream_count; ++i) {
struct dc_stream_state *stream = dc->current_state->streams[i];
- if (stream && !stream->dpms_off)
+ if (stream && (!stream->dpms_off || dc->config.disable_ips_in_dpms_off))
count += 1;
}
@@ -1294,6 +1294,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
memset(&new_signals, 0, sizeof(new_signals));
+ new_signals.bits.allow_idle = 1; /* always set */
+
if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
new_signals.bits.allow_pg = 1;
@@ -1389,7 +1391,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
*/
dc_dmub_srv->needs_idle_wake = false;
- if (prev_driver_signals.bits.allow_ips2 &&
+ if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
(!dc->debug.optimize_ips_handshake ||
ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
DC_LOG_IPS(
@@ -1450,7 +1452,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
}
dc_dmub_srv_notify_idle(dc, false);
- if (prev_driver_signals.bits.allow_ips1) {
+ if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) {
DC_LOG_IPS(
"wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)",
ips_fw->signals.bits.ips1_commit,
@@ -1692,10 +1694,10 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
{
uint8_t num_cmds = 1;
uint32_t i;
- union dmub_rb_cmd cmd[MAX_STREAMS + 1];
+ union dmub_rb_cmd cmd[2 * MAX_STREAMS + 1];
struct dmub_rb_cmd_fams2 *global_cmd = &cmd[0].fams2_config;
- memset(cmd, 0, sizeof(union dmub_rb_cmd) * (MAX_STREAMS + 1));
+ memset(cmd, 0, sizeof(union dmub_rb_cmd) * (2 * MAX_STREAMS + 1));
/* fill in generic command header */
global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
@@ -1712,17 +1714,26 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
/* construct per-stream configs */
for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) {
- struct dmub_rb_cmd_fams2 *stream_cmd = &cmd[i+1].fams2_config;
+ struct dmub_rb_cmd_fams2 *stream_base_cmd = &cmd[i+1].fams2_config;
+ struct dmub_rb_cmd_fams2 *stream_sub_state_cmd = &cmd[i+1+context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config;
/* configure command header */
- stream_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
- stream_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
- stream_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
- stream_cmd->header.multi_cmd_pending = 1;
- /* copy stream static state */
- memcpy(&stream_cmd->config.stream,
- &context->bw_ctx.bw.dcn.fams2_stream_params[i],
- sizeof(struct dmub_fams2_stream_static_state));
+ stream_base_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ stream_base_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
+ stream_base_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_base_cmd->header.multi_cmd_pending = 1;
+ stream_sub_state_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ stream_sub_state_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
+ stream_sub_state_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_sub_state_cmd->header.multi_cmd_pending = 1;
+ /* copy stream static base state */
+ memcpy(&stream_base_cmd->config,
+ &context->bw_ctx.bw.dcn.fams2_stream_base_params[i],
+ sizeof(union dmub_cmd_fams2_config));
+ /* copy stream static sub state */
+ memcpy(&stream_sub_state_cmd->config,
+ &context->bw_ctx.bw.dcn.fams2_stream_sub_params[i],
+ sizeof(union dmub_cmd_fams2_config));
}
}
@@ -1733,8 +1744,8 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc,
if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) {
/* set multi pending for global, and unset for last stream cmd */
global_cmd->header.multi_cmd_pending = 1;
- cmd[context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0;
- num_cmds += context->bw_ctx.bw.dcn.fams2_global_config.num_streams;
+ cmd[2 * context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0;
+ num_cmds += 2 * context->bw_ctx.bw.dcn.fams2_global_config.num_streams;
}
dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT);
@@ -1862,3 +1873,81 @@ void dc_dmub_srv_fams2_passthrough_flip(
dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmds, DM_DMUB_WAIT_TYPE_WAIT);
}
}
+
+bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement)
+{
+ bool result;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ result = dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IPS_RESIDENCY,
+ start_measurement, NULL, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return result;
+}
+
+void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output)
+{
+ uint32_t i;
+ enum dmub_gpint_command command_code;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return;
+
+ switch (output->ips_mode) {
+ case DMUB_IPS_MODE_IPS1_MAX:
+ command_code = DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER;
+ break;
+ case DMUB_IPS_MODE_IPS2:
+ command_code = DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER;
+ break;
+ case DMUB_IPS_MODE_IPS1_RCG:
+ command_code = DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER;
+ break;
+ case DMUB_IPS_MODE_IPS1_ONO2_ON:
+ command_code = DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER;
+ break;
+ default:
+ command_code = DMUB_GPINT__INVALID_COMMAND;
+ break;
+ }
+
+ if (command_code == DMUB_GPINT__INVALID_COMMAND)
+ return;
+
+ // send gpint commands and wait for ack
+ if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT,
+ (uint16_t)(output->ips_mode),
+ &output->residency_percent, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ output->residency_percent = 0;
+
+ if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER,
+ (uint16_t)(output->ips_mode),
+ &output->entry_counter, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ output->entry_counter = 0;
+
+ if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO,
+ (uint16_t)(output->ips_mode),
+ &output->total_active_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ output->total_active_time_us[0] = 0;
+ if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI,
+ (uint16_t)(output->ips_mode),
+ &output->total_active_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ output->total_active_time_us[1] = 0;
+
+ if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO,
+ (uint16_t)(output->ips_mode),
+ &output->total_inactive_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ output->total_inactive_time_us[0] = 0;
+ if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI,
+ (uint16_t)(output->ips_mode),
+ &output->total_inactive_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ output->total_inactive_time_us[1] = 0;
+
+ // NUM_IPS_HISTOGRAM_BUCKETS = 16
+ for (i = 0; i < 16; i++)
+ if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, command_code, i, &output->histogram[i],
+ DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ output->histogram[i] = 0;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 42f0cb672d8b..10b48198b7a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -209,4 +209,43 @@ void dc_dmub_srv_fams2_passthrough_flip(
struct dc_stream_state *stream,
struct dc_surface_update *srf_updates,
int surface_count);
+
+/**
+ * struct ips_residency_info - struct containing info from dmub_ips_residency_stats
+ *
+ * @ips_mode: The mode of IPS that the follow stats appertain to
+ * @residency_percent: The percentage of time spent in given IPS mode in millipercent
+ * @entry_counter: The number of entries made in to this IPS state
+ * @total_active_time_us: uint32_t array of length 2 representing time in the given IPS mode
+ * in microseconds. Index 0 is lower 32 bits, index 1 is upper 32 bits.
+ * @total_inactive_time_us: uint32_t array of length 2 representing time outside the given IPS mode
+ * in microseconds. Index 0 is lower 32 bits, index 1 is upper 32 bits.
+ * @histogram: Histogram of given IPS state durations - bucket definitions in dmub_ips.c
+ */
+struct ips_residency_info {
+ enum dmub_ips_mode ips_mode;
+ unsigned int residency_percent;
+ unsigned int entry_counter;
+ unsigned int total_active_time_us[2];
+ unsigned int total_inactive_time_us[2];
+ unsigned int histogram[16];
+};
+
+/**
+ * bool dc_dmub_srv_ips_residency_cntl() - Controls IPS residency measurement status
+ *
+ * @dc_dmub_srv: The DC DMUB service pointer
+ * @start_measurement: Describes whether to start or stop measurement
+ *
+ * Return: true if GPINT was sent successfully, false otherwise
+ */
+bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement);
+
+/**
+ * bool dc_dmub_srv_ips_query_residency_info() - Queries DMCUB for residency info
+ *
+ * @dc_dmub_srv: The DC DMUB service pointer
+ * @output: Output struct to copy the the residency info to
+ */
+void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 41bd95e9177a..94ce8fe74481 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -969,6 +969,21 @@ union dp_sink_video_fallback_formats {
uint8_t raw;
};
+union dp_receive_port0_cap {
+ struct {
+ uint8_t RESERVED :1;
+ uint8_t LOCAL_EDID_PRESENT :1;
+ uint8_t ASSOCIATED_TO_PRECEDING_PORT:1;
+ uint8_t HBLANK_EXPANSION_CAPABLE :1;
+ uint8_t BUFFER_SIZE_UNIT :1;
+ uint8_t BUFFER_SIZE_PER_PORT :1;
+ uint8_t HBLANK_REDUCTION_CAPABLE :1;
+ uint8_t RESERVED2:1;
+ uint8_t BUFFER_SIZE:8;
+ } bits;
+ uint8_t raw[2];
+};
+
union dpcd_max_uncompressed_pixel_rate_cap {
struct {
uint16_t max_uncompressed_pixel_rate_cap :15;
@@ -1166,6 +1181,7 @@ struct dpcd_caps {
int8_t branch_dev_name[6];
int8_t branch_hw_revision;
int8_t branch_fw_revision[2];
+ int8_t branch_vendor_specific_data[4];
bool allow_invalid_MSA_timing_param;
bool panel_mode_edp;
@@ -1191,6 +1207,8 @@ struct dpcd_caps {
struct edp_psr_info psr_info;
struct replay_info pr_info;
+ uint16_t edp_oled_emission_rate;
+ union dp_receive_port0_cap receive_port0_cap;
};
union dpcd_sink_ext_caps {
@@ -1204,7 +1222,7 @@ union dpcd_sink_ext_caps {
uint8_t oled : 1;
uint8_t reserved_2 : 1;
uint8_t miniled : 1;
- uint8_t reserved : 1;
+ uint8_t emission_output : 1;
} bits;
uint8_t raw;
};
@@ -1358,6 +1376,9 @@ struct dp_trace {
#ifndef DP_TUNNELING_IRQ
#define DP_TUNNELING_IRQ (1 << 5)
#endif
+#ifndef DP_BRANCH_VENDOR_SPECIFIC_START
+#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C
+#endif
/** USB4 DPCD BW Allocation Registers Chapter 10.7 **/
#ifndef DP_TUNNELING_CAPABILITIES
#define DP_TUNNELING_CAPABILITIES 0xE000D /* 1.4a */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 9014c2409817..9d18f1c08079 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -94,6 +94,11 @@ uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
const int num_slices_h,
const bool is_dp);
+void dc_dsc_dump_decoder_caps(const struct display_stream_compressor *dsc,
+ const struct dsc_dec_dpcd_caps *dsc_sink_caps);
+void dc_dsc_dump_encoder_caps(const struct display_stream_compressor *dsc,
+ const struct dc_crtc_timing *timing);
+
/* TODO - Hardware/specs limitation should be owned by dc dsc and returned to DM,
* and DM can choose to OVERRIDE the limitation on CASE BY CASE basis.
* Hardware/specs limitation should not be writable by DM.
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index c10567ec1c81..5ac55601a6da 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -341,89 +341,101 @@ enum swizzle_mode_addr3_values {
DC_ADDR3_SW_UNKNOWN = DC_ADDR3_SW_MAX
};
-union dc_tiling_info {
-
- struct {
- /* Specifies the number of memory banks for tiling
- * purposes.
- * Only applies to 2D and 3D tiling modes.
- * POSSIBLE VALUES: 2,4,8,16
- */
- unsigned int num_banks;
- /* Specifies the number of tiles in the x direction
- * to be incorporated into the same bank.
- * Only applies to 2D and 3D tiling modes.
- * POSSIBLE VALUES: 1,2,4,8
- */
- unsigned int bank_width;
- unsigned int bank_width_c;
- /* Specifies the number of tiles in the y direction to
- * be incorporated into the same bank.
- * Only applies to 2D and 3D tiling modes.
- * POSSIBLE VALUES: 1,2,4,8
- */
- unsigned int bank_height;
- unsigned int bank_height_c;
- /* Specifies the macro tile aspect ratio. Only applies
- * to 2D and 3D tiling modes.
- */
- unsigned int tile_aspect;
- unsigned int tile_aspect_c;
- /* Specifies the number of bytes that will be stored
- * contiguously for each tile.
- * If the tile data requires more storage than this
- * amount, it is split into multiple slices.
- * This field must not be larger than
- * GB_ADDR_CONFIG.DRAM_ROW_SIZE.
- * Only applies to 2D and 3D tiling modes.
- * For color render targets, TILE_SPLIT >= 256B.
- */
- enum tile_split_values tile_split;
- enum tile_split_values tile_split_c;
- /* Specifies the addressing within a tile.
- * 0x0 - DISPLAY_MICRO_TILING
- * 0x1 - THIN_MICRO_TILING
- * 0x2 - DEPTH_MICRO_TILING
- * 0x3 - ROTATED_MICRO_TILING
- */
- enum tile_mode_values tile_mode;
- enum tile_mode_values tile_mode_c;
- /* Specifies the number of pipes and how they are
- * interleaved in the surface.
- * Refer to memory addressing document for complete
- * details and constraints.
- */
- unsigned int pipe_config;
- /* Specifies the tiling mode of the surface.
- * THIN tiles use an 8x8x1 tile size.
- * THICK tiles use an 8x8x4 tile size.
- * 2D tiling modes rotate banks for successive Z slices
- * 3D tiling modes rotate pipes and banks for Z slices
- * Refer to memory addressing document for complete
- * details and constraints.
- */
- enum array_mode_values array_mode;
- } gfx8;
+enum dc_gfxversion {
+ DcGfxVersion7 = 0,
+ DcGfxVersion8,
+ DcGfxVersion9,
+ DcGfxVersion10,
+ DcGfxVersion11,
+ DcGfxAddr3,
+ DcGfxVersionUnknown
+};
+
+ struct dc_tiling_info {
+ unsigned int gfxversion; // Specifies which part of the union to use. Must use DalGfxVersion enum
+ union {
+ struct {
+ /* Specifies the number of memory banks for tiling
+ * purposes.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 2,4,8,16
+ */
+ unsigned int num_banks;
+ /* Specifies the number of tiles in the x direction
+ * to be incorporated into the same bank.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 1,2,4,8
+ */
+ unsigned int bank_width;
+ unsigned int bank_width_c;
+ /* Specifies the number of tiles in the y direction to
+ * be incorporated into the same bank.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 1,2,4,8
+ */
+ unsigned int bank_height;
+ unsigned int bank_height_c;
+ /* Specifies the macro tile aspect ratio. Only applies
+ * to 2D and 3D tiling modes.
+ */
+ unsigned int tile_aspect;
+ unsigned int tile_aspect_c;
+ /* Specifies the number of bytes that will be stored
+ * contiguously for each tile.
+ * If the tile data requires more storage than this
+ * amount, it is split into multiple slices.
+ * This field must not be larger than
+ * GB_ADDR_CONFIG.DRAM_ROW_SIZE.
+ * Only applies to 2D and 3D tiling modes.
+ * For color render targets, TILE_SPLIT >= 256B.
+ */
+ enum tile_split_values tile_split;
+ enum tile_split_values tile_split_c;
+ /* Specifies the addressing within a tile.
+ * 0x0 - DISPLAY_MICRO_TILING
+ * 0x1 - THIN_MICRO_TILING
+ * 0x2 - DEPTH_MICRO_TILING
+ * 0x3 - ROTATED_MICRO_TILING
+ */
+ enum tile_mode_values tile_mode;
+ enum tile_mode_values tile_mode_c;
+ /* Specifies the number of pipes and how they are
+ * interleaved in the surface.
+ * Refer to memory addressing document for complete
+ * details and constraints.
+ */
+ unsigned int pipe_config;
+ /* Specifies the tiling mode of the surface.
+ * THIN tiles use an 8x8x1 tile size.
+ * THICK tiles use an 8x8x4 tile size.
+ * 2D tiling modes rotate banks for successive Z slices
+ * 3D tiling modes rotate pipes and banks for Z slices
+ * Refer to memory addressing document for complete
+ * details and constraints.
+ */
+ enum array_mode_values array_mode;
+ } gfx8;
- struct {
- enum swizzle_mode_values swizzle;
- unsigned int num_pipes;
- unsigned int max_compressed_frags;
- unsigned int pipe_interleave;
-
- unsigned int num_banks;
- unsigned int num_shader_engines;
- unsigned int num_rb_per_se;
- bool shaderEnable;
-
- bool meta_linear;
- bool rb_aligned;
- bool pipe_aligned;
- unsigned int num_pkrs;
- } gfx9;/*gfx9, gfx10 and above*/
- struct {
- enum swizzle_mode_addr3_values swizzle;
- } gfx_addr3;/*gfx with addr3 and above*/
+ struct {
+ enum swizzle_mode_values swizzle;
+ unsigned int num_pipes;
+ unsigned int max_compressed_frags;
+ unsigned int pipe_interleave;
+
+ unsigned int num_banks;
+ unsigned int num_shader_engines;
+ unsigned int num_rb_per_se;
+ bool shaderEnable;
+
+ bool meta_linear;
+ bool rb_aligned;
+ bool pipe_aligned;
+ unsigned int num_pkrs;
+ } gfx9;/*gfx9, gfx10 and above*/
+ struct {
+ enum swizzle_mode_addr3_values swizzle;
+ } gfx_addr3;/*gfx with addr3 and above*/
+ };
};
/* Rotation angle */
@@ -975,6 +987,9 @@ struct dc_crtc_timing {
struct dc_crtc_timing_flags flags;
uint32_t dsc_fixed_bits_per_pixel_x16; /* DSC target bitrate in 1/16 of bpp (e.g. 128 -> 8bpp) */
struct dc_dsc_config dsc_cfg;
+
+ /* The number of pixels that HBlank has been expanded by from the original EDID timing. */
+ uint32_t expanded_hblank;
};
enum trigger_delay {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
index 44afcd989224..fabcefeda288 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_plane.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h
@@ -26,7 +26,6 @@
#ifndef _DC_PLANE_H_
#define _DC_PLANE_H_
-#include "dc.h"
#include "dc_hw_types.h"
struct dc_plane_state *dc_create_plane_state(const struct dc *dc);
@@ -35,4 +34,7 @@ const struct dc_plane_status *dc_plane_get_status(
void dc_plane_state_retain(struct dc_plane_state *plane_state);
void dc_plane_state_release(struct dc_plane_state *plane_state);
+void dc_plane_force_update_for_panic(struct dc_plane_state *plane_state,
+ bool clear_tiling);
+
#endif /* _DC_PLANE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index 603552dbd771..3518eb1b8cd1 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -8,13 +8,13 @@
#include "dcn32/dcn32_dpp.h"
#include "dcn401/dcn401_dpp.h"
-static struct spl_funcs dcn2_spl_funcs = {
+static struct spl_callbacks dcn2_spl_callbacks = {
.spl_calc_lb_num_partitions = dscl2_spl_calc_lb_num_partitions,
};
-static struct spl_funcs dcn32_spl_funcs = {
+static struct spl_callbacks dcn32_spl_callbacks = {
.spl_calc_lb_num_partitions = dscl32_spl_calc_lb_num_partitions,
};
-static struct spl_funcs dcn401_spl_funcs = {
+static struct spl_callbacks dcn401_spl_callbacks = {
.spl_calc_lb_num_partitions = dscl401_spl_calc_lb_num_partitions,
};
static void populate_splrect_from_rect(struct spl_rect *spl_rect, const struct rect *rect)
@@ -38,6 +38,7 @@ static void populate_spltaps_from_taps(struct spl_taps *spl_scaling_quality,
spl_scaling_quality->h_taps = scaling_quality->h_taps;
spl_scaling_quality->v_taps_c = scaling_quality->v_taps_c;
spl_scaling_quality->v_taps = scaling_quality->v_taps;
+ spl_scaling_quality->integer_scaling = scaling_quality->integer_scaling;
}
static void populate_taps_from_spltaps(struct scaling_taps *scaling_quality,
const struct spl_taps *spl_scaling_quality)
@@ -63,6 +64,13 @@ static void populate_inits_from_splinits(struct scl_inits *inits,
inits->h_c = dc_fixpt_from_int_dy(spl_inits->h_filter_init_int_c, spl_inits->h_filter_init_frac_c >> 5, 0, 19);
inits->v_c = dc_fixpt_from_int_dy(spl_inits->v_filter_init_int_c, spl_inits->v_filter_init_frac_c >> 5, 0, 19);
}
+static void populate_splformat_from_format(enum spl_pixel_format *spl_pixel_format, const enum pixel_format pixel_format)
+{
+ if (pixel_format < PIXEL_FORMAT_INVALID)
+ *spl_pixel_format = (enum spl_pixel_format)pixel_format;
+ else
+ *spl_pixel_format = SPL_PIXEL_FORMAT_INVALID;
+}
/// @brief Translate SPL input parameters from pipe context
/// @param pipe_ctx
/// @param spl_in
@@ -76,19 +84,19 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
// This is used to determine the vtap support
switch (plane_state->ctx->dce_version) {
case DCN_VERSION_2_0:
- spl_in->funcs = &dcn2_spl_funcs;
+ spl_in->callbacks = dcn2_spl_callbacks;
break;
case DCN_VERSION_3_2:
- spl_in->funcs = &dcn32_spl_funcs;
+ spl_in->callbacks = dcn32_spl_callbacks;
break;
case DCN_VERSION_4_01:
- spl_in->funcs = &dcn401_spl_funcs;
+ spl_in->callbacks = dcn401_spl_callbacks;
break;
default:
- spl_in->funcs = &dcn2_spl_funcs;
+ spl_in->callbacks = dcn2_spl_callbacks;
}
// Make format field from spl_in point to plane_res scl_data format
- spl_in->basic_in.format = (enum spl_pixel_format)pipe_ctx->plane_res.scl_data.format;
+ populate_splformat_from_format(&spl_in->basic_in.format, pipe_ctx->plane_res.scl_data.format);
// Make view_format from basic_out point to view_format from stream
spl_in->basic_out.view_format = (enum spl_view_3d)stream->view_format;
// Populate spl input basic input clip rect from plane state clip rect
@@ -107,19 +115,21 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->basic_in.horizontal_mirror = plane_state->horizontal_mirror;
// Calculate horizontal splits and split index
- spl_in->basic_in.mpc_combine_h = resource_get_mpc_slice_count(pipe_ctx);
+ spl_in->basic_in.num_h_slices_recout_width_align.use_recout_width_aligned = false;
+ spl_in->basic_in.num_h_slices_recout_width_align.num_slices_recout_width.mpc_num_h_slices =
+ resource_get_mpc_slice_count(pipe_ctx);
if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
- spl_in->basic_in.mpc_combine_v = 0;
+ spl_in->basic_in.mpc_h_slice_index = 0;
else
- spl_in->basic_in.mpc_combine_v = resource_get_mpc_slice_index(pipe_ctx);
+ spl_in->basic_in.mpc_h_slice_index = resource_get_mpc_slice_index(pipe_ctx);
populate_splrect_from_rect(&spl_in->basic_out.odm_slice_rect, &odm_slice_src);
spl_in->basic_out.odm_combine_factor = 0;
spl_in->odm_slice_index = resource_get_odm_slice_index(pipe_ctx);
// Make spl input basic out info output_size width point to stream h active
spl_in->basic_out.output_size.width =
- stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+ stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->hblank_borrow;
// Make spl input basic out info output_size height point to v active
spl_in->basic_out.output_size.height =
stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
@@ -187,14 +197,14 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->h_active = pipe_ctx->plane_res.scl_data.h_active;
spl_in->v_active = pipe_ctx->plane_res.scl_data.v_active;
- spl_in->debug.sharpen_policy = (enum sharpen_policy)pipe_ctx->stream->ctx->dc->debug.sharpen_policy;
+ spl_in->sharpen_policy = (enum sharpen_policy)plane_state->adaptive_sharpness_policy;
spl_in->debug.scale_to_sharpness_policy =
(enum scale_to_sharpness_policy)pipe_ctx->stream->ctx->dc->debug.scale_to_sharpness_policy;
/* Check if it is stream is in fullscreen and if its HDR.
* Use this to determine sharpness levels
*/
- spl_in->is_fullscreen = dm_helpers_is_fullscreen(pipe_ctx->stream->ctx, pipe_ctx->stream);
+ spl_in->is_fullscreen = pipe_ctx->stream->sharpening_required;
spl_in->is_hdr_on = dm_helpers_is_hdr_on(pipe_ctx->stream->ctx, pipe_ctx->stream);
spl_in->sdr_white_level_nits = plane_state->sdr_white_level_nits;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h
index caa45db50232..db1e63a7d460 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_state.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_state.h
@@ -26,7 +26,6 @@
#ifndef _DC_STATE_H_
#define _DC_STATE_H_
-#include "dc.h"
#include "inc/core_status.h"
struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 14ea47eda0c8..3e303c7808fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -56,7 +56,7 @@ struct dc_stream_status {
int plane_count;
int audio_inst;
struct timing_sync_info timing_sync_info;
- struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
+ struct dc_plane_state *plane_states[MAX_SURFACES];
bool is_abm_supported;
struct mall_stream_config mall_stream_config;
bool fpo_in_use;
@@ -143,6 +143,7 @@ union stream_update_flags {
uint32_t crtc_timing_adjust : 1;
uint32_t fams_changed : 1;
uint32_t scaler_sharpener : 1;
+ uint32_t sharpening_required : 1;
} bits;
uint32_t raw;
@@ -310,6 +311,7 @@ struct dc_stream_state {
struct luminance_data lumin_data;
bool scaler_sharpener_update;
+ bool sharpening_required;
};
#define ABM_LEVEL_IMMEDIATE_DISABLE 255
@@ -356,6 +358,7 @@ struct dc_stream_update {
struct dc_cursor_position *cursor_position;
bool *hw_cursor_req;
bool *scaler_sharpener_update;
+ bool *sharpening_required;
};
bool dc_is_stream_unchanged(
@@ -444,10 +447,6 @@ enum dc_status dc_stream_add_dsc_to_resource(struct dc *dc,
struct dc_state *state,
struct dc_stream_state *stream);
-bool dc_stream_warmup_writeback(struct dc *dc,
- int num_dwb,
- struct dc_writeback_info *wb_info);
-
bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream);
bool dc_stream_set_dynamic_metadata(struct dc *dc,
@@ -538,17 +537,26 @@ bool dc_stream_get_crtc_position(struct dc *dc,
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
bool dc_stream_forward_crc_window(struct dc_stream_state *stream,
struct rect *rect,
+ uint8_t phy_id,
bool is_stop);
+
+bool dc_stream_forward_multiple_crc_window(struct dc_stream_state *stream,
+ struct crc_window *window,
+ uint8_t phy_id,
+ bool stop);
#endif
bool dc_stream_configure_crc(struct dc *dc,
struct dc_stream_state *stream,
struct crc_params *crc_window,
bool enable,
- bool continuous);
+ bool continuous,
+ uint8_t idx,
+ bool reset);
bool dc_stream_get_crc(struct dc *dc,
struct dc_stream_state *stream,
+ uint8_t idx,
uint32_t *r_cr,
uint32_t *g_y,
uint32_t *b_cb);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 6d7989b751e2..0c2aa91f0a11 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -76,7 +76,6 @@ struct dc_perf_trace {
unsigned long last_entry_write;
};
-#define MAX_SURFACE_NUM 6
#define NUM_PIXEL_FORMATS 10
enum tiling_mode {
@@ -179,6 +178,9 @@ struct dc_panel_patch {
unsigned int mst_start_top_delay;
unsigned int remove_sink_ext_caps;
unsigned int disable_colorimetry;
+ uint8_t blankstream_before_otg_off;
+ bool oled_optimize_display_on;
+ unsigned int force_mst_blocked_discovery;
};
struct dc_edid_caps {
@@ -872,6 +874,14 @@ struct dsc_dec_dpcd_caps {
bool is_dp; /* Decoded format */
};
+struct hblank_expansion_dpcd_caps {
+ bool expansion_supported;
+ bool reduction_supported;
+ bool buffer_unit_bytes; /* True: buffer size in bytes. False: buffer size in pixels*/
+ bool buffer_per_port; /* True: buffer size per port. False: buffer size per lane*/
+ uint32_t buffer_size; /* Add 1 to value and multiply by 32 */
+};
+
struct dc_golden_table {
uint16_t dc_golden_table_ver;
uint32_t aux_dphy_rx_control0_val;
@@ -922,11 +932,24 @@ struct display_endpoint_id {
enum display_endpoint_type ep_type;
};
+enum backlight_control_type {
+ BACKLIGHT_CONTROL_PWM = 0,
+ BACKLIGHT_CONTROL_VESA_AUX = 1,
+ BACKLIGHT_CONTROL_AMD_AUX = 2,
+};
+
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+#define MAX_CRC_WINDOW_NUM 2
+
struct otg_phy_mux {
uint8_t phy_output_num;
uint8_t otg_output_num;
};
+
+struct crc_window {
+ struct rect rect;
+ bool enable;
+};
#endif
enum dc_detect_reason {
@@ -1043,10 +1066,13 @@ enum replay_FW_Message_type {
union replay_error_status {
struct {
- unsigned char STATE_TRANSITION_ERROR :1;
- unsigned char LINK_CRC_ERROR :1;
- unsigned char DESYNC_ERROR :1;
- unsigned char RESERVED :5;
+ unsigned int STATE_TRANSITION_ERROR :1;
+ unsigned int LINK_CRC_ERROR :1;
+ unsigned int DESYNC_ERROR :1;
+ unsigned int RESERVED_3 :1;
+ unsigned int LOW_RR_INCORRECT_VTOTAL :1;
+ unsigned int NO_DOUBLED_RR :1;
+ unsigned int RESERVED_6_7 :2;
} bits;
unsigned char raw;
};
@@ -1093,6 +1119,8 @@ struct replay_config {
union replay_error_status replay_error_status;
/* Replay Low Hz enable Options */
union replay_low_refresh_rate_enable_options low_rr_enable_options;
+ /* Replay coasting vtotal is within low refresh rate range. */
+ bool low_rr_activated;
};
/* Replay feature flags*/
@@ -1117,10 +1145,12 @@ struct replay_settings {
uint32_t defer_update_coasting_vtotal_table[PR_COASTING_TYPE_NUM];
/* Maximum link off frame count */
uint32_t link_off_frame_count;
- /* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */
- uint16_t abm_with_ips_on_full_screen_video_pseudo_vtotal;
+ /* Replay pseudo vtotal for low refresh rate*/
+ uint16_t low_rr_full_screen_video_pseudo_vtotal;
/* Replay last pseudo vtotal set to DMUB */
uint16_t last_pseudo_vtotal;
+ /* Replay desync error */
+ uint32_t replay_desync_error_fail_count;
};
/* To split out "global" and "per-panel" config settings.
@@ -1295,4 +1325,31 @@ struct dc_commit_streams_params {
enum dc_power_source_type power_source;
};
+struct set_backlight_level_params {
+ /* backlight in pwm */
+ uint32_t backlight_pwm_u16_16;
+ /* brightness ramping */
+ uint32_t frame_ramp;
+ /* backlight control type
+ * 0: PWM backlight control
+ * 1: VESA AUX backlight control
+ * 2: AMD AUX backlight control
+ */
+ enum backlight_control_type control_type;
+ /* backlight in millinits */
+ uint32_t backlight_millinits;
+ /* transition time in ms */
+ uint32_t transition_time_in_ms;
+ /* minimum luminance in nits */
+ uint32_t min_luminance;
+ /* maximum luminance in nits */
+ uint32_t max_luminance;
+ /* minimum backlight in pwm */
+ uint32_t min_backlight_pwm;
+ /* maximum backlight in pwm */
+ uint32_t max_backlight_pwm;
+ /* AUX HW instance */
+ uint8_t aux_inst;
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
index 838d72eaa87f..b363f5360818 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
@@ -1392,10 +1392,10 @@ static void dccg35_set_dtbclk_dto(
/* The recommended programming sequence to enable DTBCLK DTO to generate
* valid pixel HPO DPSTREAM ENCODER, specifies that DTO source select should
- * be set only after DTO is enabled
+ * be set only after DTO is enabled.
+ * PIPEx_DTO_SRC_SEL should not be programmed during DTBCLK update since OTG may still be on, and the
+ * programming is handled in program_pix_clk() regardless, so it can be removed from here.
*/
- REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
- PIPE_DTO_SRC_SEL[params->otg_inst], 2);
} else {
switch (params->otg_inst) {
case 0:
@@ -1412,9 +1412,12 @@ static void dccg35_set_dtbclk_dto(
break;
}
- REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst],
- DTBCLK_DTO_ENABLE[params->otg_inst], 0,
- PIPE_DTO_SRC_SEL[params->otg_inst], params->is_hdmi ? 0 : 1);
+ /**
+ * PIPEx_DTO_SRC_SEL should not be programmed during DTBCLK update since OTG may still be on, and the
+ * programming is handled in program_pix_clk() regardless, so it can be removed from here.
+ */
+ REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
+ DTBCLK_DTO_ENABLE[params->otg_inst], 0);
REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);
REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index 0b889004509a..d3e46c3cfa57 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -580,9 +580,6 @@ static void dccg401_set_dpstreamclk(
int otg_inst,
int dp_hpo_inst)
{
- /* set the dtbclk_p source */
- dccg401_set_dtbclk_p_src(dccg, src, otg_inst);
-
/* enabled to select one of the DTBCLKs for pipe */
if (src == REFCLK)
dccg401_disable_dpstreamclk(dccg, dp_hpo_inst);
@@ -805,33 +802,6 @@ static void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- switch (link_enc_inst) {
- case 0:
- REG_UPDATE(SYMCLKA_CLOCK_ENABLE,
- SYMCLKA_CLOCK_ENABLE, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, 1);
- break;
- case 1:
- REG_UPDATE(SYMCLKB_CLOCK_ENABLE,
- SYMCLKB_CLOCK_ENABLE, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, 1);
- break;
- case 2:
- REG_UPDATE(SYMCLKC_CLOCK_ENABLE,
- SYMCLKC_CLOCK_ENABLE, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, 1);
- break;
- case 3:
- REG_UPDATE(SYMCLKD_CLOCK_ENABLE,
- SYMCLKD_CLOCK_ENABLE, 1);
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, 1);
- break;
- }
-
switch (stream_enc_inst) {
case 0:
REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE,
@@ -864,37 +834,8 @@ static void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst
}
}
-/*get other front end connected to this backend*/
-static uint8_t dccg401_get_number_enabled_symclk_fe_connected_to_be(struct dccg *dccg, uint32_t link_enc_inst)
-{
- uint8_t num_enabled_symclk_fe = 0;
- uint32_t fe_clk_en[4] = {0}, be_clk_sel[4] = {0};
- struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- uint8_t i;
-
- REG_GET_2(SYMCLKA_CLOCK_ENABLE, SYMCLKA_FE_EN, &fe_clk_en[0],
- SYMCLKA_FE_SRC_SEL, &be_clk_sel[0]);
-
- REG_GET_2(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_EN, &fe_clk_en[1],
- SYMCLKB_FE_SRC_SEL, &be_clk_sel[1]);
-
- REG_GET_2(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_EN, &fe_clk_en[2],
- SYMCLKC_FE_SRC_SEL, &be_clk_sel[2]);
-
- REG_GET_2(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_EN, &fe_clk_en[3],
- SYMCLKD_FE_SRC_SEL, &be_clk_sel[3]);
-
- for (i = 0; i < ARRAY_SIZE(fe_clk_en); i++) {
- if (fe_clk_en[i] && be_clk_sel[i] == link_enc_inst)
- num_enabled_symclk_fe++;
- }
-
- return num_enabled_symclk_fe;
-}
-
static void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
{
- uint8_t num_enabled_symclk_fe = 0;
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
switch (stream_enc_inst) {
@@ -919,31 +860,6 @@ static void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_ins
SYMCLKD_FE_SRC_SEL, 0);
break;
}
-
- /*check other enabled symclk fe connected to this be */
- num_enabled_symclk_fe = dccg401_get_number_enabled_symclk_fe_connected_to_be(dccg, link_enc_inst);
- /*only turn off backend clk if other front ends attached to this backend are all off,
- for mst, only turn off the backend if this is the last front end*/
- if (num_enabled_symclk_fe == 0) {
- switch (link_enc_inst) {
- case 0:
- REG_UPDATE(SYMCLKA_CLOCK_ENABLE,
- SYMCLKA_CLOCK_ENABLE, 0);
- break;
- case 1:
- REG_UPDATE(SYMCLKB_CLOCK_ENABLE,
- SYMCLKB_CLOCK_ENABLE, 0);
- break;
- case 2:
- REG_UPDATE(SYMCLKC_CLOCK_ENABLE,
- SYMCLKC_CLOCK_ENABLE, 0);
- break;
- case 3:
- REG_UPDATE(SYMCLKD_CLOCK_ENABLE,
- SYMCLKD_CLOCK_ENABLE, 0);
- break;
- }
- }
}
static const struct dccg_funcs dccg401_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index b700608e4240..077337698e0a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -1105,6 +1105,9 @@ static bool dcn401_program_pix_clk(
&dto_params);
} else {
+ if (pll_settings->actual_pix_clk_100hz > 6000000UL)
+ return false;
+
/* disables DP DTO when provided with TMDS signal type */
clock_source->ctx->dc->res_pool->dccg->funcs->set_dp_dto(
clock_source->ctx->dc->res_pool->dccg,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index f5e1d9caee4c..1c2009e38aa1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -98,7 +98,7 @@ static enum mi_bits_per_pixel get_mi_bpp(
}
static enum mi_tiling_format get_mi_tiling(
- union dc_tiling_info *tiling_info)
+ struct dc_tiling_info *tiling_info)
{
switch (tiling_info->gfx8.array_mode) {
case DC_ARRAY_1D_TILED_THIN1:
@@ -133,7 +133,7 @@ static bool is_vert_scan(enum dc_rotation_angle rotation)
static void dce_mi_program_pte_vm(
struct mem_input *mi,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum dc_rotation_angle rotation)
{
struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
@@ -430,7 +430,7 @@ static void dce120_mi_program_display_marks(struct mem_input *mi,
}
static void program_tiling(
- struct dce_mem_input *dce_mi, const union dc_tiling_info *info)
+ struct dce_mem_input *dce_mi, const struct dc_tiling_info *info)
{
if (dce_mi->masks->GRPH_SW_MODE) { /* GFX9 */
REG_UPDATE_6(GRPH_CONTROL,
@@ -481,7 +481,6 @@ static void program_tiling(
}
}
-
static void program_size_and_rotation(
struct dce_mem_input *dce_mi,
enum dc_rotation_angle rotation,
@@ -627,10 +626,31 @@ static void program_grph_pixel_format(
GRPH_PRESCALE_B_SIGN, sign);
}
+static void dce_mi_clear_tiling(
+ struct mem_input *mi)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+
+ if (dce_mi->masks->GRPH_SW_MODE) { /* GFX9 */
+ REG_UPDATE(GRPH_CONTROL,
+ GRPH_SW_MODE, DC_SW_LINEAR);
+ }
+
+ if (dce_mi->masks->GRPH_MICRO_TILE_MODE) { /* GFX8 */
+ REG_UPDATE(GRPH_CONTROL,
+ GRPH_ARRAY_MODE, DC_SW_LINEAR);
+ }
+
+ if (dce_mi->masks->GRPH_ARRAY_MODE) { /* GFX6 but reuses gfx8 struct */
+ REG_UPDATE(GRPH_CONTROL,
+ GRPH_ARRAY_MODE, DC_SW_LINEAR);
+ }
+}
+
static void dce_mi_program_surface_config(
struct mem_input *mi,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -650,7 +670,7 @@ static void dce_mi_program_surface_config(
static void dce60_mi_program_surface_config(
struct mem_input *mi,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation, /* not used in DCE6 */
struct dc_plane_dcc_param *dcc,
@@ -884,7 +904,8 @@ static const struct mem_input_funcs dce_mi_funcs = {
.mem_input_program_pte_vm = dce_mi_program_pte_vm,
.mem_input_program_surface_config =
dce_mi_program_surface_config,
- .mem_input_is_flip_pending = dce_mi_is_flip_pending
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending,
+ .mem_input_clear_tiling = dce_mi_clear_tiling,
};
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -897,7 +918,8 @@ static const struct mem_input_funcs dce60_mi_funcs = {
.mem_input_program_pte_vm = dce_mi_program_pte_vm,
.mem_input_program_surface_config =
dce60_mi_program_surface_config,
- .mem_input_is_flip_pending = dce_mi_is_flip_pending
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending,
+ .mem_input_clear_tiling = dce_mi_clear_tiling,
};
#endif
@@ -910,7 +932,8 @@ static const struct mem_input_funcs dce112_mi_funcs = {
.mem_input_program_pte_vm = dce_mi_program_pte_vm,
.mem_input_program_surface_config =
dce_mi_program_surface_config,
- .mem_input_is_flip_pending = dce_mi_is_flip_pending
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending,
+ .mem_input_clear_tiling = dce_mi_clear_tiling,
};
static const struct mem_input_funcs dce120_mi_funcs = {
@@ -922,7 +945,8 @@ static const struct mem_input_funcs dce120_mi_funcs = {
.mem_input_program_pte_vm = dce_mi_program_pte_vm,
.mem_input_program_surface_config =
dce_mi_program_surface_config,
- .mem_input_is_flip_pending = dce_mi_is_flip_pending
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending,
+ .mem_input_clear_tiling = dce_mi_clear_tiling,
};
void dce_mem_input_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 5c2825bc9a87..d199e4ed2e59 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -277,7 +277,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
uint32_t misc1 = 0;
uint32_t h_blank;
uint32_t h_back_porch;
- uint8_t synchronous_clock = 0; /* asynchronous mode */
uint8_t colorimetry_bpc;
uint8_t dynamic_range_rgb = 0; /*full range*/
uint8_t dynamic_range_ycbcr = 1; /*bt709*/
@@ -380,7 +379,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
break;
}
- misc0 = misc0 | synchronous_clock;
misc0 = colorimetry_bpc << 5;
if (REG(DP_MSA_TIMING_PARAM1)) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index cae18f8c1c9a..88c75c243bf8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -390,8 +390,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1,
sizeof(DP_SINK_DEVICE_STR_ID_1)))
link->psr_settings.force_ffu_mode = 1;
- else
- link->psr_settings.force_ffu_mode = 0;
+
copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode;
if (((link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index db7557a1c613..2c43c2422638 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -76,7 +76,6 @@ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C_MAS
mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C,
value);
- temp = 0;
value = 0;
temp = address.low_part >>
UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C__GRPH_PRIMARY_SURFACE_ADDRESS_C__SHIFT;
@@ -112,7 +111,6 @@ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L_MAS
mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L,
value);
- temp = 0;
value = 0;
temp = address.low_part >>
UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L__GRPH_PRIMARY_SURFACE_ADDRESS_L__SHIFT;
@@ -164,7 +162,7 @@ static void enable(struct dce_mem_input *mem_input110)
static void program_tiling(
struct dce_mem_input *mem_input110,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
uint32_t value = 0;
@@ -525,7 +523,7 @@ static const unsigned int dvmm_Hw_Setting_Linear[4][9] = {
/* Helper to get table entry from surface info */
static const unsigned int *get_dvmm_hw_setting(
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum surface_pixel_format format,
bool chroma)
{
@@ -565,7 +563,7 @@ static const unsigned int *get_dvmm_hw_setting(
static void dce_mem_input_v_program_pte_vm(
struct mem_input *mem_input,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum dc_rotation_angle rotation)
{
struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
@@ -638,7 +636,7 @@ static void dce_mem_input_v_program_pte_vm(
static void dce_mem_input_v_program_surface_config(
struct mem_input *mem_input,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index fa422a8cbced..61b0807693fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -2127,70 +2127,131 @@ bool dce110_configure_crc(struct timing_generator *tg,
cntl_addr = CRTC_REG(mmCRTC_CRC_CNTL);
- /* First, disable CRC before we configure it. */
- dm_write_reg(tg->ctx, cntl_addr, 0);
+ if (!params->enable || params->reset)
+ /* First, disable CRC before we configure it. */
+ dm_write_reg(tg->ctx, cntl_addr, 0);
if (!params->enable)
return true;
/* Program frame boundaries */
- /* Window A x axis start and end. */
- value = 0;
- addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_X_CONTROL);
- set_reg_field_value(value, params->windowa_x_start,
- CRTC_CRC0_WINDOWA_X_CONTROL,
- CRTC_CRC0_WINDOWA_X_START);
- set_reg_field_value(value, params->windowa_x_end,
- CRTC_CRC0_WINDOWA_X_CONTROL,
- CRTC_CRC0_WINDOWA_X_END);
- dm_write_reg(tg->ctx, addr, value);
-
- /* Window A y axis start and end. */
- value = 0;
- addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_Y_CONTROL);
- set_reg_field_value(value, params->windowa_y_start,
- CRTC_CRC0_WINDOWA_Y_CONTROL,
- CRTC_CRC0_WINDOWA_Y_START);
- set_reg_field_value(value, params->windowa_y_end,
- CRTC_CRC0_WINDOWA_Y_CONTROL,
- CRTC_CRC0_WINDOWA_Y_END);
- dm_write_reg(tg->ctx, addr, value);
-
- /* Window B x axis start and end. */
- value = 0;
- addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_X_CONTROL);
- set_reg_field_value(value, params->windowb_x_start,
- CRTC_CRC0_WINDOWB_X_CONTROL,
- CRTC_CRC0_WINDOWB_X_START);
- set_reg_field_value(value, params->windowb_x_end,
- CRTC_CRC0_WINDOWB_X_CONTROL,
- CRTC_CRC0_WINDOWB_X_END);
- dm_write_reg(tg->ctx, addr, value);
-
- /* Window B y axis start and end. */
- value = 0;
- addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_Y_CONTROL);
- set_reg_field_value(value, params->windowb_y_start,
- CRTC_CRC0_WINDOWB_Y_CONTROL,
- CRTC_CRC0_WINDOWB_Y_START);
- set_reg_field_value(value, params->windowb_y_end,
- CRTC_CRC0_WINDOWB_Y_CONTROL,
- CRTC_CRC0_WINDOWB_Y_END);
- dm_write_reg(tg->ctx, addr, value);
-
- /* Set crc mode and selection, and enable. Only using CRC0*/
- value = 0;
- set_reg_field_value(value, params->continuous_mode ? 1 : 0,
- CRTC_CRC_CNTL, CRTC_CRC_CONT_EN);
- set_reg_field_value(value, params->selection,
- CRTC_CRC_CNTL, CRTC_CRC0_SELECT);
- set_reg_field_value(value, 1, CRTC_CRC_CNTL, CRTC_CRC_EN);
- dm_write_reg(tg->ctx, cntl_addr, value);
+ switch (params->crc_eng_inst) {
+ case 0:
+ /* Window A x axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_X_CONTROL);
+ set_reg_field_value(value, params->windowa_x_start,
+ CRTC_CRC0_WINDOWA_X_CONTROL,
+ CRTC_CRC0_WINDOWA_X_START);
+ set_reg_field_value(value, params->windowa_x_end,
+ CRTC_CRC0_WINDOWA_X_CONTROL,
+ CRTC_CRC0_WINDOWA_X_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window A y axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWA_Y_CONTROL);
+ set_reg_field_value(value, params->windowa_y_start,
+ CRTC_CRC0_WINDOWA_Y_CONTROL,
+ CRTC_CRC0_WINDOWA_Y_START);
+ set_reg_field_value(value, params->windowa_y_end,
+ CRTC_CRC0_WINDOWA_Y_CONTROL,
+ CRTC_CRC0_WINDOWA_Y_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window B x axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_X_CONTROL);
+ set_reg_field_value(value, params->windowb_x_start,
+ CRTC_CRC0_WINDOWB_X_CONTROL,
+ CRTC_CRC0_WINDOWB_X_START);
+ set_reg_field_value(value, params->windowb_x_end,
+ CRTC_CRC0_WINDOWB_X_CONTROL,
+ CRTC_CRC0_WINDOWB_X_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window B y axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC0_WINDOWB_Y_CONTROL);
+ set_reg_field_value(value, params->windowb_y_start,
+ CRTC_CRC0_WINDOWB_Y_CONTROL,
+ CRTC_CRC0_WINDOWB_Y_START);
+ set_reg_field_value(value, params->windowb_y_end,
+ CRTC_CRC0_WINDOWB_Y_CONTROL,
+ CRTC_CRC0_WINDOWB_Y_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Set crc mode and selection, and enable.*/
+ value = 0;
+ set_reg_field_value(value, params->continuous_mode ? 1 : 0,
+ CRTC_CRC_CNTL, CRTC_CRC_CONT_EN);
+ set_reg_field_value(value, params->selection,
+ CRTC_CRC_CNTL, CRTC_CRC0_SELECT);
+ set_reg_field_value(value, 1, CRTC_CRC_CNTL, CRTC_CRC_EN);
+ dm_write_reg(tg->ctx, cntl_addr, value);
+ break;
+ case 1:
+ /* Window A x axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC1_WINDOWA_X_CONTROL);
+ set_reg_field_value(value, params->windowa_x_start,
+ CRTC_CRC1_WINDOWA_X_CONTROL,
+ CRTC_CRC1_WINDOWA_X_START);
+ set_reg_field_value(value, params->windowa_x_end,
+ CRTC_CRC1_WINDOWA_X_CONTROL,
+ CRTC_CRC1_WINDOWA_X_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window A y axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC1_WINDOWA_Y_CONTROL);
+ set_reg_field_value(value, params->windowa_y_start,
+ CRTC_CRC1_WINDOWA_Y_CONTROL,
+ CRTC_CRC1_WINDOWA_Y_START);
+ set_reg_field_value(value, params->windowa_y_end,
+ CRTC_CRC1_WINDOWA_Y_CONTROL,
+ CRTC_CRC1_WINDOWA_Y_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window B x axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC1_WINDOWB_X_CONTROL);
+ set_reg_field_value(value, params->windowb_x_start,
+ CRTC_CRC1_WINDOWB_X_CONTROL,
+ CRTC_CRC1_WINDOWB_X_START);
+ set_reg_field_value(value, params->windowb_x_end,
+ CRTC_CRC1_WINDOWB_X_CONTROL,
+ CRTC_CRC1_WINDOWB_X_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Window B y axis start and end. */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_CRC1_WINDOWB_Y_CONTROL);
+ set_reg_field_value(value, params->windowb_y_start,
+ CRTC_CRC1_WINDOWB_Y_CONTROL,
+ CRTC_CRC1_WINDOWB_Y_START);
+ set_reg_field_value(value, params->windowb_y_end,
+ CRTC_CRC1_WINDOWB_Y_CONTROL,
+ CRTC_CRC1_WINDOWB_Y_END);
+ dm_write_reg(tg->ctx, addr, value);
+
+ /* Set crc mode and selection, and enable.*/
+ value = 0;
+ set_reg_field_value(value, params->continuous_mode ? 1 : 0,
+ CRTC_CRC_CNTL, CRTC_CRC_CONT_EN);
+ set_reg_field_value(value, params->selection,
+ CRTC_CRC_CNTL, CRTC_CRC1_SELECT);
+ set_reg_field_value(value, 1, CRTC_CRC_CNTL, CRTC_CRC_EN);
+ dm_write_reg(tg->ctx, cntl_addr, value);
+ break;
+ default:
+ return false;
+ }
return true;
}
-bool dce110_get_crc(struct timing_generator *tg,
+bool dce110_get_crc(struct timing_generator *tg, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
uint32_t addr = 0;
@@ -2206,14 +2267,30 @@ bool dce110_get_crc(struct timing_generator *tg,
if (!field)
return false;
- addr = CRTC_REG(mmCRTC_CRC0_DATA_RG);
- value = dm_read_reg(tg->ctx, addr);
- *r_cr = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_R_CR);
- *g_y = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_G_Y);
+ switch (idx) {
+ case 0:
+ addr = CRTC_REG(mmCRTC_CRC0_DATA_RG);
+ value = dm_read_reg(tg->ctx, addr);
+ *r_cr = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_R_CR);
+ *g_y = get_reg_field_value(value, CRTC_CRC0_DATA_RG, CRC0_G_Y);
- addr = CRTC_REG(mmCRTC_CRC0_DATA_B);
- value = dm_read_reg(tg->ctx, addr);
- *b_cb = get_reg_field_value(value, CRTC_CRC0_DATA_B, CRC0_B_CB);
+ addr = CRTC_REG(mmCRTC_CRC0_DATA_B);
+ value = dm_read_reg(tg->ctx, addr);
+ *b_cb = get_reg_field_value(value, CRTC_CRC0_DATA_B, CRC0_B_CB);
+ break;
+ case 1:
+ addr = CRTC_REG(mmCRTC_CRC1_DATA_RG);
+ value = dm_read_reg(tg->ctx, addr);
+ *r_cr = get_reg_field_value(value, CRTC_CRC1_DATA_RG, CRC1_R_CR);
+ *g_y = get_reg_field_value(value, CRTC_CRC1_DATA_RG, CRC1_G_Y);
+
+ addr = CRTC_REG(mmCRTC_CRC1_DATA_B);
+ value = dm_read_reg(tg->ctx, addr);
+ *b_cb = get_reg_field_value(value, CRTC_CRC1_DATA_B, CRC1_B_CB);
+ break;
+ default:
+ return false;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
index ee4de740aceb..e4f5cad64f32 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
@@ -286,7 +286,7 @@ bool dce110_arm_vert_intr(
bool dce110_configure_crc(struct timing_generator *tg,
const struct crc_params *params);
-bool dce110_get_crc(struct timing_generator *tg,
+bool dce110_get_crc(struct timing_generator *tg, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
bool dce110_is_two_pixels_per_container(const struct dc_crtc_timing *timing);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index fcf59348eb62..31c4f44ceaac 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -1100,45 +1100,79 @@ static bool dce120_configure_crc(struct timing_generator *tg,
if (!dce120_is_tg_enabled(tg))
return false;
- /* First, disable CRC before we configure it. */
- dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC_CNTL,
- tg110->offsets.crtc, 0);
+ if (!params->enable || params->reset)
+ /* First, disable CRC before we configure it. */
+ dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC_CNTL,
+ tg110->offsets.crtc, 0);
if (!params->enable)
return true;
/* Program frame boundaries */
- /* Window A x axis start and end. */
- CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_X_CONTROL,
- CRTC_CRC0_WINDOWA_X_START, params->windowa_x_start,
- CRTC_CRC0_WINDOWA_X_END, params->windowa_x_end);
-
- /* Window A y axis start and end. */
- CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_Y_CONTROL,
- CRTC_CRC0_WINDOWA_Y_START, params->windowa_y_start,
- CRTC_CRC0_WINDOWA_Y_END, params->windowa_y_end);
-
- /* Window B x axis start and end. */
- CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_X_CONTROL,
- CRTC_CRC0_WINDOWB_X_START, params->windowb_x_start,
- CRTC_CRC0_WINDOWB_X_END, params->windowb_x_end);
-
- /* Window B y axis start and end. */
- CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_Y_CONTROL,
- CRTC_CRC0_WINDOWB_Y_START, params->windowb_y_start,
- CRTC_CRC0_WINDOWB_Y_END, params->windowb_y_end);
-
- /* Set crc mode and selection, and enable. Only using CRC0*/
- CRTC_REG_UPDATE_3(CRTC0_CRTC_CRC_CNTL,
- CRTC_CRC_EN, params->continuous_mode ? 1 : 0,
- CRTC_CRC0_SELECT, params->selection,
- CRTC_CRC_EN, 1);
+ switch (params->crc_eng_inst) {
+ case 0:
+ /* Window A x axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_X_CONTROL,
+ CRTC_CRC0_WINDOWA_X_START, params->windowa_x_start,
+ CRTC_CRC0_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWA_Y_CONTROL,
+ CRTC_CRC0_WINDOWA_Y_START, params->windowa_y_start,
+ CRTC_CRC0_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_X_CONTROL,
+ CRTC_CRC0_WINDOWB_X_START, params->windowb_x_start,
+ CRTC_CRC0_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC0_WINDOWB_Y_CONTROL,
+ CRTC_CRC0_WINDOWB_Y_START, params->windowb_y_start,
+ CRTC_CRC0_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable.*/
+ CRTC_REG_UPDATE_3(CRTC0_CRTC_CRC_CNTL,
+ CRTC_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ CRTC_CRC0_SELECT, params->selection,
+ CRTC_CRC_EN, 1);
+ break;
+ case 1:
+ /* Window A x axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC1_WINDOWA_X_CONTROL,
+ CRTC_CRC1_WINDOWA_X_START, params->windowa_x_start,
+ CRTC_CRC1_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC1_WINDOWA_Y_CONTROL,
+ CRTC_CRC1_WINDOWA_Y_START, params->windowa_y_start,
+ CRTC_CRC1_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC1_WINDOWB_X_CONTROL,
+ CRTC_CRC1_WINDOWB_X_START, params->windowb_x_start,
+ CRTC_CRC1_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_CRC1_WINDOWB_Y_CONTROL,
+ CRTC_CRC1_WINDOWB_Y_START, params->windowb_y_start,
+ CRTC_CRC1_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable */
+ CRTC_REG_UPDATE_3(CRTC0_CRTC_CRC_CNTL,
+ CRTC_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ CRTC_CRC1_SELECT, params->selection,
+ CRTC_CRC_EN, 1);
+ break;
+ default:
+ return false;
+ }
return true;
}
-static bool dce120_get_crc(struct timing_generator *tg, uint32_t *r_cr,
- uint32_t *g_y, uint32_t *b_cb)
+static bool dce120_get_crc(struct timing_generator *tg, uint8_t idx,
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
uint32_t value, field;
@@ -1151,14 +1185,30 @@ static bool dce120_get_crc(struct timing_generator *tg, uint32_t *r_cr,
if (!field)
return false;
- value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_RG,
- tg110->offsets.crtc);
- *r_cr = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_R_CR);
- *g_y = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_G_Y);
+ switch (idx) {
+ case 0:
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_RG,
+ tg110->offsets.crtc);
+ *r_cr = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_R_CR);
+ *g_y = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_RG, CRC0_G_Y);
- value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_B,
- tg110->offsets.crtc);
- *b_cb = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_B, CRC0_B_CB);
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC0_DATA_B,
+ tg110->offsets.crtc);
+ *b_cb = get_reg_field_value(value, CRTC0_CRTC_CRC0_DATA_B, CRC0_B_CB);
+ break;
+ case 1:
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC1_DATA_RG,
+ tg110->offsets.crtc);
+ *r_cr = get_reg_field_value(value, CRTC0_CRTC_CRC1_DATA_RG, CRC1_R_CR);
+ *g_y = get_reg_field_value(value, CRTC0_CRTC_CRC1_DATA_RG, CRC1_G_Y);
+
+ value = dm_read_reg_soc15(tg->ctx, mmCRTC0_CRTC_CRC1_DATA_B,
+ tg110->offsets.crtc);
+ *b_cb = get_reg_field_value(value, CRTC0_CRTC_CRC1_DATA_B, CRC1_B_CB);
+ break;
+ default:
+ return false;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
index 8db9f7514466..889f314cac65 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_resource.c
@@ -717,7 +717,7 @@ static struct link_encoder *dce60_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110)
+ if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index eaed5d1c398a..dcd2cdfe91eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -365,23 +365,18 @@ bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx,
region_start = -MAX_LOW_POINT;
region_end = NUMBER_REGIONS - MAX_LOW_POINT;
} else {
- /* 11 segments
- * segment is from 2^-10 to 2^1
+ /* 13 segments
+ * segment is from 2^-12 to 2^0
* There are less than 256 points, for optimization
*/
- seg_distr[0] = 3;
- seg_distr[1] = 4;
- seg_distr[2] = 4;
- seg_distr[3] = 4;
- seg_distr[4] = 4;
- seg_distr[5] = 4;
- seg_distr[6] = 4;
- seg_distr[7] = 4;
- seg_distr[8] = 4;
- seg_distr[9] = 4;
- seg_distr[10] = 1;
-
- region_start = -10;
+ const uint8_t SEG_COUNT = 12;
+
+ for (i = 0; i < SEG_COUNT; i++)
+ seg_distr[i] = 4;
+
+ seg_distr[SEG_COUNT] = 1;
+
+ region_start = -SEG_COUNT;
region_end = 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 05df502a54f2..88cf47a5ea75 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -46,7 +46,7 @@
#include "clk_mgr.h"
__printf(3, 4)
-unsigned int snprintf_count(char *pbuf, unsigned int bufsize, char *fmt, ...)
+unsigned int snprintf_count(char *pbuf, unsigned int bufsize, const char *fmt, ...)
{
int ret_vsnprintf;
unsigned int chars_printed;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
index f31f0e3abfc0..0690c346f2c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
@@ -140,23 +140,18 @@ bool cm3_helper_translate_curve_to_hw_format(
region_start = -MAX_LOW_POINT;
region_end = NUMBER_REGIONS - MAX_LOW_POINT;
} else {
- /* 11 segments
- * segment is from 2^-10 to 2^0
+ /* 13 segments
+ * segment is from 2^-12 to 2^0
* There are less than 256 points, for optimization
*/
- seg_distr[0] = 3;
- seg_distr[1] = 4;
- seg_distr[2] = 4;
- seg_distr[3] = 4;
- seg_distr[4] = 4;
- seg_distr[5] = 4;
- seg_distr[6] = 4;
- seg_distr[7] = 4;
- seg_distr[8] = 4;
- seg_distr[9] = 4;
- seg_distr[10] = 1;
-
- region_start = -10;
+ const uint8_t SEG_COUNT = 12;
+
+ for (i = 0; i < SEG_COUNT; i++)
+ seg_distr[i] = 4;
+
+ seg_distr[SEG_COUNT] = 1;
+
+ region_start = -SEG_COUNT;
region_end = 1;
}
@@ -285,157 +280,6 @@ bool cm3_helper_translate_curve_to_hw_format(
return true;
}
-#define NUM_DEGAMMA_REGIONS 12
-
-
-bool cm3_helper_translate_curve_to_degamma_hw_format(
- const struct dc_transfer_func *output_tf,
- struct pwl_params *lut_params)
-{
- struct curve_points3 *corner_points;
- struct pwl_result_data *rgb_resulted;
- struct pwl_result_data *rgb;
- struct pwl_result_data *rgb_plus_1;
-
- int32_t region_start, region_end;
- int32_t i;
- uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
-
- if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
- return false;
-
- corner_points = lut_params->corner_points;
- rgb_resulted = lut_params->rgb_resulted;
- hw_points = 0;
-
- memset(lut_params, 0, sizeof(struct pwl_params));
- memset(seg_distr, 0, sizeof(seg_distr));
-
- region_start = -NUM_DEGAMMA_REGIONS;
- region_end = 0;
-
-
- for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
- seg_distr[i] = -1;
- /* 12 segments
- * segments are from 2^-12 to 0
- */
- for (i = 0; i < NUM_DEGAMMA_REGIONS ; i++)
- seg_distr[i] = 4;
-
- for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
- if (seg_distr[k] != -1)
- hw_points += (1 << seg_distr[k]);
- }
-
- j = 0;
- for (k = 0; k < (region_end - region_start); k++) {
- increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]);
- start_index = (region_start + k + MAX_LOW_POINT) *
- NUMBER_SW_SEGMENTS;
- for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS;
- i += increment) {
- if (j == hw_points - 1)
- break;
- if (i >= TRANSFER_FUNC_POINTS)
- return false;
- rgb_resulted[j].red = output_tf->tf_pts.red[i];
- rgb_resulted[j].green = output_tf->tf_pts.green[i];
- rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
- j++;
- }
- }
-
- /* last point */
- start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS;
- rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
- rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
- rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
-
- corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
- dc_fixpt_from_int(region_start));
- corner_points[0].green.x = corner_points[0].red.x;
- corner_points[0].blue.x = corner_points[0].red.x;
- corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
- dc_fixpt_from_int(region_end));
- corner_points[1].green.x = corner_points[1].red.x;
- corner_points[1].blue.x = corner_points[1].red.x;
-
- corner_points[0].red.y = rgb_resulted[0].red;
- corner_points[0].green.y = rgb_resulted[0].green;
- corner_points[0].blue.y = rgb_resulted[0].blue;
-
- /* see comment above, m_arrPoints[1].y should be the Y value for the
- * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
- */
- corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
- corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
- corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
- corner_points[1].red.slope = dc_fixpt_zero;
- corner_points[1].green.slope = dc_fixpt_zero;
- corner_points[1].blue.slope = dc_fixpt_zero;
-
- if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
- /* for PQ, we want to have a straight line from last HW X point,
- * and the slope to be such that we hit 1.0 at 10000 nits.
- */
- const struct fixed31_32 end_value =
- dc_fixpt_from_int(125);
-
- corner_points[1].red.slope = dc_fixpt_div(
- dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
- dc_fixpt_sub(end_value, corner_points[1].red.x));
- corner_points[1].green.slope = dc_fixpt_div(
- dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
- dc_fixpt_sub(end_value, corner_points[1].green.x));
- corner_points[1].blue.slope = dc_fixpt_div(
- dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
- dc_fixpt_sub(end_value, corner_points[1].blue.x));
- }
-
- lut_params->hw_points_num = hw_points;
-
- k = 0;
- for (i = 1; i < MAX_REGIONS_NUMBER; i++) {
- if (seg_distr[k] != -1) {
- lut_params->arr_curve_points[k].segments_num =
- seg_distr[k];
- lut_params->arr_curve_points[i].offset =
- lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
- }
- k++;
- }
-
- if (seg_distr[k] != -1)
- lut_params->arr_curve_points[k].segments_num = seg_distr[k];
-
- rgb = rgb_resulted;
- rgb_plus_1 = rgb_resulted + 1;
-
- i = 1;
- while (i != hw_points + 1) {
- if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
- rgb_plus_1->red = rgb->red;
- if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
- rgb_plus_1->green = rgb->green;
- if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
- rgb_plus_1->blue = rgb->blue;
-
- rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
- rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
- rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
-
- ++rgb_plus_1;
- ++rgb;
- ++i;
- }
- cm3_helper_convert_to_custom_float(rgb_resulted,
- lut_params->corner_points,
- hw_points, false);
-
- return true;
-}
-
bool cm3_helper_convert_to_custom_float(
struct pwl_result_data *rgb_resulted,
struct curve_points3 *corner_points,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 573898984726..f9961a6446f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -168,31 +168,33 @@ void dcn31_panel_cntl_construct(
struct dcn31_panel_cntl *dcn31_panel_cntl,
const struct panel_cntl_init_data *init_data)
{
- uint8_t pwrseq_inst = 0xF;
dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs;
dcn31_panel_cntl->base.ctx = init_data->ctx;
dcn31_panel_cntl->base.inst = init_data->inst;
- switch (init_data->eng_id) {
- case ENGINE_ID_DIGA:
- pwrseq_inst = 0;
- break;
- case ENGINE_ID_DIGB:
- pwrseq_inst = 1;
- break;
- default:
- DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", init_data->eng_id);
- ASSERT(false);
- break;
- }
-
- if (dcn31_panel_cntl->base.ctx->dc->config.support_edp0_on_dp1)
+ if (dcn31_panel_cntl->base.ctx->dc->config.support_edp0_on_dp1) {
//If supported, power sequencer mapping shall follow the DIG instance
+ uint8_t pwrseq_inst = 0xF;
+
+ switch (init_data->eng_id) {
+ case ENGINE_ID_DIGA:
+ pwrseq_inst = 0;
+ break;
+ case ENGINE_ID_DIGB:
+ pwrseq_inst = 1;
+ break;
+ default:
+ DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", init_data->eng_id);
+ ASSERT(false);
+ break;
+ }
+
dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst;
- else
+ } else {
/* If not supported, pwrseq will be assigned in order,
* so first pwrseq will be assigned to first panel instance (legacy behavior)
*/
dcn31_panel_cntl->base.pwrseq_inst = dcn31_panel_cntl->base.inst;
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
index f496e952ceec..d01a8b8f9595 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
@@ -255,7 +255,6 @@ void enc1_stream_encoder_dp_set_stream_attribute(
uint32_t misc1 = 0;
uint32_t h_blank;
uint32_t h_back_porch;
- uint8_t synchronous_clock = 0; /* asynchronous mode */
uint8_t colorimetry_bpc;
uint8_t dp_pixel_encoding = 0;
uint8_t dp_component_depth = 0;
@@ -362,7 +361,6 @@ void enc1_stream_encoder_dp_set_stream_attribute(
break;
}
- misc0 = misc0 | synchronous_clock;
misc0 = colorimetry_bpc << 5;
switch (output_color_space) {
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
index b2cea59ba5d4..9a92f73d5b7f 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
@@ -653,8 +653,9 @@ void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_lin
if (!query_dp_alt_from_dmub(enc, &cmd))
return;
- if (cmd.query_dp_alt.data.is_usb &&
- cmd.query_dp_alt.data.is_dp4 == 0)
+ if (cmd.query_dp_alt.data.is_dp_alt_disable == 0 &&
+ cmd.query_dp_alt.data.is_usb &&
+ cmd.query_dp_alt.data.is_dp4 == 0)
link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
return;
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
index 5b343f745cf3..ae81451a3a72 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
@@ -83,6 +83,15 @@ void enc314_disable_fifo(struct stream_encoder *enc)
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0);
}
+static bool enc314_is_fifo_enabled(struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t reset_val;
+
+ REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &reset_val);
+ return (reset_val != 0);
+}
+
void enc314_dp_set_odm_combine(
struct stream_encoder *enc,
bool odm_combine)
@@ -468,6 +477,7 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = {
.enable_fifo = enc314_enable_fifo,
.disable_fifo = enc314_disable_fifo,
+ .is_fifo_enabled = enc314_is_fifo_enabled,
.set_input_mode = enc314_set_dig_input_mode,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
index d4a3e811aa39..ea0c9a9d0bd6 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c
@@ -28,6 +28,7 @@
#include "link_encoder.h"
#include "dcn31/dcn31_dio_link_encoder.h"
#include "dcn35_dio_link_encoder.h"
+#include "dc_dmub_srv.h"
#define CTX \
enc10->base.ctx
#define DC_LOGGER \
@@ -159,6 +160,8 @@ static const struct link_encoder_funcs dcn35_link_enc_funcs = {
.is_in_alt_mode = dcn31_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn31_link_encoder_get_max_link_cap,
.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
+ .enable_dpia_output = dcn35_link_encoder_enable_dpia_output,
+ .disable_dpia_output = dcn35_link_encoder_disable_dpia_output,
};
void dcn35_link_encoder_construct(
@@ -265,3 +268,80 @@ void dcn35_link_encoder_construct(
enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
}
+
+/* DPIA equivalent of link_transmitter_control. */
+static bool link_dpia_control(struct dc_context *dc_ctx,
+ struct dmub_cmd_dig_dpia_control_data *dpia_control)
+{
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.dig1_dpia_control.header.type = DMUB_CMD__DPIA;
+ cmd.dig1_dpia_control.header.sub_type =
+ DMUB_CMD__DPIA_DIG1_DPIA_CONTROL;
+ cmd.dig1_dpia_control.header.payload_bytes =
+ sizeof(cmd.dig1_dpia_control) -
+ sizeof(cmd.dig1_dpia_control.header);
+
+ cmd.dig1_dpia_control.dpia_control = *dpia_control;
+
+ dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+}
+
+static void link_encoder_disable(struct dcn10_link_encoder *enc10)
+{
+ /* reset training complete */
+ REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0);
+}
+
+void dcn35_link_encoder_enable_dpia_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t dpia_id,
+ uint8_t digmode,
+ uint8_t fec_rdy)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
+
+ enc1_configure_encoder(enc10, link_settings);
+
+ dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_ENABLE;
+ dpia_control.enc_id = enc->preferred_engine;
+ dpia_control.mode_laneset.digmode = digmode;
+ dpia_control.lanenum = (uint8_t)link_settings->lane_count;
+ dpia_control.symclk_10khz = link_settings->link_rate *
+ LINK_RATE_REF_FREQ_IN_KHZ / 10;
+ /* DIG_BE_CNTL.DIG_HPD_SELECT set to 5 (hpdsel - 1) to indicate HPD pin unused by DPIA. */
+ dpia_control.hpdsel = 6;
+ dpia_control.dpia_id = dpia_id;
+ dpia_control.fec_rdy = fec_rdy;
+
+ DC_LOG_DEBUG("%s: DPIA(%d) - enc_id(%d)\n", __func__, dpia_control.dpia_id, dpia_control.enc_id);
+ link_dpia_control(enc->ctx, &dpia_control);
+}
+
+void dcn35_link_encoder_disable_dpia_output(
+ struct link_encoder *enc,
+ uint8_t dpia_id,
+ uint8_t digmode)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
+
+ if (enc->funcs->is_dig_enabled && !enc->funcs->is_dig_enabled(enc))
+ return;
+
+ dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_DISABLE;
+ dpia_control.enc_id = enc->preferred_engine;
+ dpia_control.mode_laneset.digmode = digmode;
+ dpia_control.dpia_id = dpia_id;
+
+ DC_LOG_DEBUG("%s: DPIA(%d) - enc_id(%d)\n", __func__, dpia_control.dpia_id, dpia_control.enc_id);
+ link_dpia_control(enc->ctx, &dpia_control);
+
+ link_encoder_disable(enc10);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
index d546a3676304..f9d4221f4b43 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h
@@ -144,4 +144,22 @@ bool dcn35_is_dig_enabled(struct link_encoder *enc);
enum signal_type dcn35_get_dig_mode(struct link_encoder *enc);
void dcn35_link_encoder_setup(struct link_encoder *enc, enum signal_type signal);
+/*
+ * Enable DP transmitter and its encoder for dpia port.
+ */
+void dcn35_link_encoder_enable_dpia_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t dpia_id,
+ uint8_t digmode,
+ uint8_t fec_rdy);
+
+/*
+ * Disable transmitter and its encoder for dpia port.
+ */
+void dcn35_link_encoder_disable_dpia_output(
+ struct link_encoder *enc,
+ uint8_t dpia_id,
+ uint8_t digmode);
+
#endif /* __DC_LINK_ENCODER__DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
index 0a27e0942a12..098c2a01a850 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
@@ -447,7 +447,6 @@ void enc401_stream_encoder_dp_set_stream_attribute(
uint32_t misc1 = 0;
uint32_t h_blank;
uint32_t h_back_porch;
- uint8_t synchronous_clock = 0; /* asynchronous mode */
uint8_t colorimetry_bpc;
uint8_t dp_pixel_encoding = 0;
uint8_t dp_component_depth = 0;
@@ -603,7 +602,6 @@ void enc401_stream_encoder_dp_set_stream_attribute(
break;
}
- misc0 = misc0 | synchronous_clock;
misc0 = colorimetry_bpc << 5;
switch (output_color_space) {
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 2e4a46f1b499..5efddd48d5c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -158,6 +158,11 @@ bool dm_helpers_dp_write_dsc_enable(
const struct dc_stream_state *stream,
bool enable
);
+
+bool dm_helpers_dp_write_hblank_reduction(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream);
+
bool dm_helpers_is_dp_sink_present(
struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 9405c47ee2a9..f81e5a4e1d6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -143,7 +143,7 @@ void generic_reg_wait(const struct dc_context *ctx,
unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
const char *func_name, int line);
-unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...);
+unsigned int snprintf_count(char *pBuf, unsigned int bufSize, const char *fmt, ...);
/* These macros need to be used with soc15 registers in order to retrieve
* the actual offset.
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 46f9c05de16e..e1d500633dfa 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -29,11 +29,15 @@ dml_ccflags := $(CC_FLAGS_FPU)
dml_rcflags := $(CC_FLAGS_NO_FPU)
ifneq ($(CONFIG_FRAME_WARN),0)
-ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
-frame_warn_flag := -Wframe-larger-than=3072
-else
-frame_warn_flag := -Wframe-larger-than=2048
-endif
+ ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+ frame_warn_limit := 3072
+ else
+ frame_warn_limit := 2048
+ endif
+
+ ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
+ frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
+ endif
endif
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index 39525721c976..f1235bf9a596 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -1312,138 +1312,6 @@ bool dcn_validate_bandwidth(
return false;
}
-static unsigned int dcn_find_normalized_clock_vdd_Level(
- const struct dc *dc,
- enum dm_pp_clock_type clocks_type,
- int clocks_in_khz)
-{
- int vdd_level = dcn_bw_v_min0p65;
-
- if (clocks_in_khz == 0)/*todo some clock not in the considerations*/
- return vdd_level;
-
- switch (clocks_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmax0p9*1000) {
- vdd_level = dcn_bw_v_max0p91;
- BREAK_TO_DEBUGGER();
- } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vnom0p8*1000) {
- vdd_level = dcn_bw_v_max0p9;
- } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmid0p72*1000) {
- vdd_level = dcn_bw_v_nom0p8;
- } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmin0p65*1000) {
- vdd_level = dcn_bw_v_mid0p72;
- } else
- vdd_level = dcn_bw_v_min0p65;
- break;
- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
- if (clocks_in_khz > dc->dcn_soc->phyclkv_max0p9*1000) {
- vdd_level = dcn_bw_v_max0p91;
- BREAK_TO_DEBUGGER();
- } else if (clocks_in_khz > dc->dcn_soc->phyclkv_nom0p8*1000) {
- vdd_level = dcn_bw_v_max0p9;
- } else if (clocks_in_khz > dc->dcn_soc->phyclkv_mid0p72*1000) {
- vdd_level = dcn_bw_v_nom0p8;
- } else if (clocks_in_khz > dc->dcn_soc->phyclkv_min0p65*1000) {
- vdd_level = dcn_bw_v_mid0p72;
- } else
- vdd_level = dcn_bw_v_min0p65;
- break;
-
- case DM_PP_CLOCK_TYPE_DPPCLK:
- if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmax0p9*1000) {
- vdd_level = dcn_bw_v_max0p91;
- BREAK_TO_DEBUGGER();
- } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vnom0p8*1000) {
- vdd_level = dcn_bw_v_max0p9;
- } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmid0p72*1000) {
- vdd_level = dcn_bw_v_nom0p8;
- } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmin0p65*1000) {
- vdd_level = dcn_bw_v_mid0p72;
- } else
- vdd_level = dcn_bw_v_min0p65;
- break;
-
- case DM_PP_CLOCK_TYPE_MEMORY_CLK:
- {
- unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
-
- if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9*1000000/factor) {
- vdd_level = dcn_bw_v_max0p91;
- BREAK_TO_DEBUGGER();
- } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8*1000000/factor) {
- vdd_level = dcn_bw_v_max0p9;
- } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72*1000000/factor) {
- vdd_level = dcn_bw_v_nom0p8;
- } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65*1000000/factor) {
- vdd_level = dcn_bw_v_mid0p72;
- } else
- vdd_level = dcn_bw_v_min0p65;
- }
- break;
-
- case DM_PP_CLOCK_TYPE_DCFCLK:
- if (clocks_in_khz > dc->dcn_soc->dcfclkv_max0p9*1000) {
- vdd_level = dcn_bw_v_max0p91;
- BREAK_TO_DEBUGGER();
- } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_nom0p8*1000) {
- vdd_level = dcn_bw_v_max0p9;
- } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_mid0p72*1000) {
- vdd_level = dcn_bw_v_nom0p8;
- } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_min0p65*1000) {
- vdd_level = dcn_bw_v_mid0p72;
- } else
- vdd_level = dcn_bw_v_min0p65;
- break;
-
- default:
- break;
- }
- return vdd_level;
-}
-
-unsigned int dcn_find_dcfclk_suits_all(
- const struct dc *dc,
- struct dc_clocks *clocks)
-{
- unsigned vdd_level, vdd_level_temp;
- unsigned dcf_clk;
-
- /*find a common supported voltage level*/
- vdd_level = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_khz);
- vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_khz);
-
- vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
- vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_khz);
- vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
-
- vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->fclk_khz);
- vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
- vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
- dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclk_khz);
-
- /*find that level conresponding dcfclk*/
- vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
- if (vdd_level == dcn_bw_v_max0p91) {
- BREAK_TO_DEBUGGER();
- dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000;
- } else if (vdd_level == dcn_bw_v_max0p9)
- dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000;
- else if (vdd_level == dcn_bw_v_nom0p8)
- dcf_clk = dc->dcn_soc->dcfclkv_nom0p8*1000;
- else if (vdd_level == dcn_bw_v_mid0p72)
- dcf_clk = dc->dcn_soc->dcfclkv_mid0p72*1000;
- else
- dcf_clk = dc->dcn_soc->dcfclkv_min0p65*1000;
-
- DC_LOG_BANDWIDTH_CALCS("\tdcf_clk for voltage = %d\n", dcf_clk);
- return dcf_clk;
-}
-
void dcn_bw_update_from_pplib_fclks(
struct dc *dc,
struct dm_pp_clock_levels_with_voltage *fclks)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index 565f3c492477..0c8c4a080c50 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -785,12 +785,9 @@ static bool CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- TimeForFetchingMetaPTE = 0;
- TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBW = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 9d6675ecc5f1..c935903b68e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -845,12 +845,9 @@ static bool CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- TimeForFetchingMetaPTE = 0;
- TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBW = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 4fce64a030b6..390c1a77fda6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -443,8 +443,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
blk_bytes = surf_linear ?
256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
- log2_blk_height = 0;
- log2_blk_width = 0;
// remember log rule
// "+" in log is multiply
@@ -491,8 +489,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
- log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
- log2_meta_row_height = 0;
- meta_row_width_ub = 0;
// the dimensions of a meta row are meta_row_width x meta_row_height in elements.
// calculate upper bound of the meta_row_width
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 3fa9a5da02f6..843d6004258c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -443,8 +443,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
blk_bytes = surf_linear ?
256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
- log2_blk_height = 0;
- log2_blk_width = 0;
// remember log rule
// "+" in log is multiply
@@ -491,8 +489,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
- log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
- log2_meta_row_height = 0;
- meta_row_width_ub = 0;
// the dimensions of a meta row are meta_row_width x meta_row_height in elements.
// calculate upper bound of the meta_row_width
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index eb3ed965e48b..cd8cca651419 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -1049,12 +1049,9 @@ static bool CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- TimeForFetchingMetaPTE = 0;
- TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index 9e1c18b90805..5718000627b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -435,8 +435,6 @@ static void get_meta_and_pte_attr(
blk_bytes = surf_linear ?
256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
- log2_blk_height = 0;
- log2_blk_width = 0;
// remember log rule
// "+" in log is multiply
@@ -485,8 +483,6 @@ static void get_meta_and_pte_attr(
- log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
- log2_meta_row_height = 0;
- meta_row_width_ub = 0;
// the dimensions of a meta row are meta_row_width x meta_row_height in elements.
// calculate upper bound of the meta_row_width
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 1c10ba4dcdde..cee1b351e105 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -1280,12 +1280,9 @@ static bool CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- TimeForFetchingMetaPTE = 0;
- TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
@@ -1775,15 +1772,6 @@ static unsigned int CalculateVMAndRowBytes(
*PixelPTEReqWidth = 32768.0 / BytePerPixel;
*PTERequestSize = 64;
FractionOfPTEReturnDrop = 0;
- } else if (MacroTileSizeBytes == 4096) {
- PixelPTEReqHeightPTEs = 1;
- *PixelPTEReqHeight = MacroTileHeight;
- *PixelPTEReqWidth = 8 * *MacroTileWidth;
- *PTERequestSize = 64;
- if (ScanDirection != dm_vert)
- FractionOfPTEReturnDrop = 0;
- else
- FractionOfPTEReturnDrop = 7.0 / 8;
} else if (GPUVMMinPageSize == 4 && MacroTileSizeBytes > 4096) {
PixelPTEReqHeightPTEs = 16;
*PixelPTEReqHeight = 16 * BlockHeight256Bytes;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index b28fcc8608ff..8d4873f80df0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -392,8 +392,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
blk_bytes = surf_linear ?
256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double)blk_bytes);
- log2_blk_height = 0;
- log2_blk_width = 0;
// remember log rule
// "+" in log is multiply
@@ -464,8 +462,6 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
- log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
- log2_meta_row_height = 0;
- meta_row_width_ub = 0;
// the dimensions of a meta row are meta_row_width x meta_row_height in elements.
// calculate upper bound of the meta_row_width
@@ -1566,6 +1562,7 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
+
disp_dlg_regs->refcyc_per_pte_group_vblank_l =
(unsigned int)(dst_y_per_row_vblank * (double)htotal
* ref_freq_to_pix_freq / (double)dpte_groups_per_row_ub_l);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 2b275e680379..f567a9023682 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -1444,12 +1444,9 @@ static bool CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- TimeForFetchingMetaPTE = 0;
- TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index b57b095cd4a8..c46bda2141ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -413,8 +413,6 @@ static void get_meta_and_pte_attr(
log2_blk256_height = dml_log2((double) blk256_height);
blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
- log2_blk_height = 0;
- log2_blk_width = 0;
// remember log rule
// "+" in log is multiply
@@ -481,8 +479,6 @@ static void get_meta_and_pte_attr(
log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element - log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
- log2_meta_row_height = 0;
- meta_row_width_ub = 0;
// the dimensions of a meta row are meta_row_width x meta_row_height in elements.
// calculate upper bound of the meta_row_width
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index debfa31583a6..5865e8fa2d8e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -1461,12 +1461,9 @@ static bool CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- TimeForFetchingMetaPTE = 0;
- TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
index 61b3bebf24c9..b7d2a0caec11 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
@@ -501,8 +501,6 @@ static void get_meta_and_pte_attr(
log2_blk256_height = dml_log2((double) blk256_height);
blk_bytes = surf_linear ? 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
- log2_blk_height = 0;
- log2_blk_width = 0;
// remember log rule
// "+" in log is multiply
@@ -569,8 +567,6 @@ static void get_meta_and_pte_attr(
log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element - log2_meta_req_height;
meta_req_width = 1 << log2_meta_req_width;
meta_req_height = 1 << log2_meta_req_height;
- log2_meta_row_height = 0;
- meta_row_width_ub = 0;
// the dimensions of a meta row are meta_row_width x meta_row_height in elements.
// calculate upper bound of the meta_row_width
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index d92fb428ee96..0748ef36a16a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -1595,6 +1595,7 @@ double dml32_TruncToValidBPP(
unsigned int NonDSCBPP0;
unsigned int NonDSCBPP1;
unsigned int NonDSCBPP2;
+ unsigned int NonDSCBPP3 = BPP_INVALID;
if (Format == dm_420) {
NonDSCBPP0 = 12;
@@ -1603,6 +1604,7 @@ double dml32_TruncToValidBPP(
MinDSCBPP = 6;
MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1.0 / 16;
} else if (Format == dm_444) {
+ NonDSCBPP3 = 18;
NonDSCBPP0 = 24;
NonDSCBPP1 = 30;
NonDSCBPP2 = 36;
@@ -1667,6 +1669,8 @@ double dml32_TruncToValidBPP(
return NonDSCBPP1;
else if (MaxLinkBPP >= NonDSCBPP0)
return 16.0;
+ else if ((Output == dm_dp2p0 || Output == dm_dp) && NonDSCBPP3 != BPP_INVALID && MaxLinkBPP >= NonDSCBPP3)
+ return NonDSCBPP3; // Special case to allow 6bpc RGB for DP connections.
else
return BPP_INVALID;
}
@@ -4097,12 +4101,9 @@ bool dml32_CalculatePrefetchSchedule(
if (MyError) {
*PrefetchBandwidth = 0;
- TimeForFetchingMetaPTE = 0;
- TimeForFetchingRowInVBlank = 0;
*DestinationLinesToRequestVMInVBlank = 0;
*DestinationLinesToRequestRowInVBlank = 0;
*DestinationLinesForPrefetch = 0;
- LinesToRequestPrefetchPixelData = 0;
*VRatioPrefetchY = 0;
*VRatioPrefetchC = 0;
*RequiredPrefetchPixDataBWLuma = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index beed7adbbd43..47d785204f29 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.dcn_downspread_percent = 0.5,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = 1,
+ .do_urgent_latency_adjustment = 0,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
};
void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index a201dbb743d7..d9e63c4fdd95 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -204,8 +204,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
.num_states = 8,
.sr_exit_time_us = 28.0,
.sr_enter_plus_exit_time_us = 30.0,
- .sr_exit_z8_time_us = 250.0,
- .sr_enter_plus_exit_z8_time_us = 350.0,
+ .sr_exit_z8_time_us = 263.0,
+ .sr_enter_plus_exit_z8_time_us = 363.0,
.fclk_change_latency_us = 24.0,
.usr_retraining_latency_us = 2,
.writeback_latency_us = 12.0,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index d8bfc85e5dcd..88dc2b97e7bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -559,12 +559,11 @@ static void get_surf_rq_param(
const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param,
bool is_chroma)
{
- bool mode_422 = 0;
unsigned int vp_width = 0;
unsigned int vp_height = 0;
unsigned int data_pitch = 0;
unsigned int meta_pitch = 0;
- unsigned int ppe = mode_422 ? 2 : 1;
+ unsigned int ppe = 1;
bool surf_linear;
bool surf_vert;
unsigned int bytes_per_element;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index 072bd0539605..6b2ab4ec2b5f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -66,11 +66,15 @@ static inline double dml_max5(double a, double b, double c, double d, double e)
static inline double dml_ceil(double a, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_ceil2(a, granularity);
}
static inline double dml_floor(double a, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_floor2(a, granularity);
}
@@ -114,11 +118,15 @@ static inline double dml_ceil_2(double f)
static inline double dml_ceil_ex(double x, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_ceil2(x, granularity);
}
static inline double dml_floor_ex(double x, double granularity)
{
+ if (granularity == 0)
+ return 0;
return (double) dcn_bw_floor2(x, granularity);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
index c4378e620cbf..21fd466dba26 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
@@ -28,11 +28,19 @@ dml2_ccflags := $(CC_FLAGS_FPU)
dml2_rcflags := $(CC_FLAGS_NO_FPU)
ifneq ($(CONFIG_FRAME_WARN),0)
-ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
-frame_warn_flag := -Wframe-larger-than=3072
-else
-frame_warn_flag := -Wframe-larger-than=2048
-endif
+ ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+ ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
+ frame_warn_limit := 4096
+ else
+ frame_warn_limit := 3072
+ endif
+ else
+ frame_warn_limit := 2048
+ endif
+
+ ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
+ frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
+ endif
endif
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2
@@ -73,9 +81,8 @@ AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2))
AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top_mcache.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_optimization := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
@@ -94,9 +101,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_ccflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top_mcache.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
@@ -113,9 +119,8 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_r
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_rcflags)
-DML21 := src/dml2_top/dml_top.o
-DML21 += src/dml2_top/dml_top_mcache.o
-DML21 += src/dml2_top/dml2_top_optimization.o
+DML21 := src/dml2_top/dml2_top_interfaces.o
+DML21 += src/dml2_top/dml2_top_soc15.o
DML21 += src/inc/dml2_debug.o
DML21 += src/dml2_core/dml2_core_dcn4.o
DML21 += src/dml2_core/dml2_core_factory.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index d851c081e376..84a2de9a76d4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -1222,6 +1222,7 @@ static dml_bool_t CalculatePrefetchSchedule(struct display_mode_lib_scratch_st *
s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto;
s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + dml_max(p->TWait + p->TCalc, *p->Tdmdl)) / s->LineTime - (*p->DSTYAfterScaler + (dml_float_t) *p->DSTXAfterScaler / (dml_float_t)p->myPipe->HTotal);
+ s->dst_y_prefetch_equ = dml_min(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
@@ -1735,7 +1736,7 @@ static void CalculateBytePerPixelAndBlockSizes(
#endif
} // CalculateBytePerPixelAndBlockSizes
-static dml_float_t CalculateTWait(
+static noinline_for_stack dml_float_t CalculateTWait(
dml_uint_t PrefetchMode,
enum dml_use_mall_for_pstate_change_mode UseMALLForPStateChange,
dml_bool_t SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
@@ -4457,7 +4458,7 @@ static void CalculateSwathWidth(
}
} // CalculateSwathWidth
-static dml_float_t CalculateExtraLatency(
+static noinline_for_stack dml_float_t CalculateExtraLatency(
dml_uint_t RoundTripPingLatencyCycles,
dml_uint_t ReorderingBytes,
dml_float_t DCFCLK,
@@ -5914,7 +5915,7 @@ static dml_uint_t DSCDelayRequirement(
return DSCDelayRequirement_val;
}
-static dml_bool_t CalculateVActiveBandwithSupport(dml_uint_t NumberOfActiveSurfaces,
+static noinline_for_stack dml_bool_t CalculateVActiveBandwithSupport(dml_uint_t NumberOfActiveSurfaces,
dml_float_t ReturnBW,
dml_bool_t NotUrgentLatencyHiding[],
dml_float_t ReadBandwidthLuma[],
@@ -6018,7 +6019,7 @@ static void CalculatePrefetchBandwithSupport(
#endif
}
-static dml_float_t CalculateBandwidthAvailableForImmediateFlip(
+static noinline_for_stack dml_float_t CalculateBandwidthAvailableForImmediateFlip(
dml_uint_t NumberOfActiveSurfaces,
dml_float_t ReturnBW,
dml_float_t ReadBandwidthLuma[],
@@ -6212,7 +6213,7 @@ static dml_uint_t CalculateMaxVStartup(
return max_vstartup_lines;
}
-static void set_calculate_prefetch_schedule_params(struct display_mode_lib_st *mode_lib,
+static noinline_for_stack void set_calculate_prefetch_schedule_params(struct display_mode_lib_st *mode_lib,
struct CalculatePrefetchSchedule_params_st *CalculatePrefetchSchedule_params,
dml_uint_t j,
dml_uint_t k)
@@ -6264,7 +6265,7 @@ static void set_calculate_prefetch_schedule_params(struct display_mode_lib_st *m
CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
}
-static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
+static noinline_for_stack void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
{
struct dml_core_mode_support_locals_st *s = &mode_lib->scratch.dml_core_mode_support_locals;
struct CalculatePrefetchSchedule_params_st *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
@@ -6300,9 +6301,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.meta_row_bandwidth_this_state,
mode_lib->ms.dpte_row_bandwidth_this_state,
mode_lib->ms.NoOfDPPThisState,
- mode_lib->ms.UrgentBurstFactorLuma,
- mode_lib->ms.UrgentBurstFactorChroma,
- mode_lib->ms.UrgentBurstFactorCursor);
+ mode_lib->ms.UrgentBurstFactorLuma[j],
+ mode_lib->ms.UrgentBurstFactorChroma[j],
+ mode_lib->ms.UrgentBurstFactorCursor[j]);
s->VMDataOnlyReturnBWPerState = dml_get_return_bw_mbps_vm_only(
&mode_lib->ms.soc,
@@ -6433,7 +6434,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
/* Output */
&mode_lib->ms.UrgentBurstFactorCursorPre[k],
&mode_lib->ms.UrgentBurstFactorLumaPre[k],
- &mode_lib->ms.UrgentBurstFactorChroma[k],
+ &mode_lib->ms.UrgentBurstFactorChromaPre[k],
&mode_lib->ms.NotUrgentLatencyHidingPre[k]);
mode_lib->ms.cursor_bw_pre[k] = mode_lib->ms.cache_display_cfg.plane.NumberOfCursors[k] * mode_lib->ms.cache_display_cfg.plane.CursorWidth[k] *
@@ -6457,9 +6458,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.cursor_bw_pre,
mode_lib->ms.prefetch_vmrow_bw,
mode_lib->ms.NoOfDPPThisState,
- mode_lib->ms.UrgentBurstFactorLuma,
- mode_lib->ms.UrgentBurstFactorChroma,
- mode_lib->ms.UrgentBurstFactorCursor,
+ mode_lib->ms.UrgentBurstFactorLuma[j],
+ mode_lib->ms.UrgentBurstFactorChroma[j],
+ mode_lib->ms.UrgentBurstFactorCursor[j],
mode_lib->ms.UrgentBurstFactorLumaPre,
mode_lib->ms.UrgentBurstFactorChromaPre,
mode_lib->ms.UrgentBurstFactorCursorPre,
@@ -6516,9 +6517,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.cursor_bw,
mode_lib->ms.cursor_bw_pre,
mode_lib->ms.NoOfDPPThisState,
- mode_lib->ms.UrgentBurstFactorLuma,
- mode_lib->ms.UrgentBurstFactorChroma,
- mode_lib->ms.UrgentBurstFactorCursor,
+ mode_lib->ms.UrgentBurstFactorLuma[j],
+ mode_lib->ms.UrgentBurstFactorChroma[j],
+ mode_lib->ms.UrgentBurstFactorCursor[j],
mode_lib->ms.UrgentBurstFactorLumaPre,
mode_lib->ms.UrgentBurstFactorChromaPre,
mode_lib->ms.UrgentBurstFactorCursorPre);
@@ -6585,9 +6586,9 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.cursor_bw_pre,
mode_lib->ms.prefetch_vmrow_bw,
mode_lib->ms.NoOfDPP[j], // VBA_ERROR DPPPerSurface is not assigned at this point, should use NoOfDpp here
- mode_lib->ms.UrgentBurstFactorLuma,
- mode_lib->ms.UrgentBurstFactorChroma,
- mode_lib->ms.UrgentBurstFactorCursor,
+ mode_lib->ms.UrgentBurstFactorLuma[j],
+ mode_lib->ms.UrgentBurstFactorChroma[j],
+ mode_lib->ms.UrgentBurstFactorCursor[j],
mode_lib->ms.UrgentBurstFactorLumaPre,
mode_lib->ms.UrgentBurstFactorChromaPre,
mode_lib->ms.UrgentBurstFactorCursorPre,
@@ -7808,9 +7809,9 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
mode_lib->ms.DETBufferSizeYThisState[k],
mode_lib->ms.DETBufferSizeCThisState[k],
/* Output */
- &mode_lib->ms.UrgentBurstFactorCursor[k],
- &mode_lib->ms.UrgentBurstFactorLuma[k],
- &mode_lib->ms.UrgentBurstFactorChroma[k],
+ &mode_lib->ms.UrgentBurstFactorCursor[j][k],
+ &mode_lib->ms.UrgentBurstFactorLuma[j][k],
+ &mode_lib->ms.UrgentBurstFactorChroma[j][k],
&mode_lib->ms.NotUrgentLatencyHiding[k]);
}
@@ -8317,7 +8318,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
if (clk_cfg->dcfclk_option != dml_use_override_freq)
locals->Dcfclk = mode_lib->ms.DCFCLK;
else
- locals->Dcfclk = clk_cfg->dcfclk_freq_mhz;
+ locals->Dcfclk = clk_cfg->dcfclk_mhz;
#ifdef __DML_VBA_DEBUG__
dml_print_dml_policy(&mode_lib->ms.policy);
@@ -8370,7 +8371,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
if (clk_cfg->dispclk_option == dml_use_required_freq)
locals->Dispclk = locals->Dispclk_calculated;
else if (clk_cfg->dispclk_option == dml_use_override_freq)
- locals->Dispclk = clk_cfg->dispclk_freq_mhz;
+ locals->Dispclk = clk_cfg->dispclk_mhz;
else
locals->Dispclk = mode_lib->ms.state.dispclk_mhz;
#ifdef __DML_VBA_DEBUG__
@@ -8411,7 +8412,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
if (clk_cfg->dppclk_option[k] == dml_use_required_freq)
locals->Dppclk[k] = locals->Dppclk_calculated[k];
else if (clk_cfg->dppclk_option[k] == dml_use_override_freq)
- locals->Dppclk[k] = clk_cfg->dppclk_freq_mhz[k];
+ locals->Dppclk[k] = clk_cfg->dppclk_mhz[k];
else
locals->Dppclk[k] = mode_lib->ms.state.dppclk_mhz;
#ifdef __DML_VBA_DEBUG__
@@ -9189,6 +9190,8 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
&locals->FractionOfUrgentBandwidth,
&s->dummy_boolean[0]); // dml_bool_t *PrefetchBandwidthSupport
+
+
if (s->VRatioPrefetchMoreThanMax != false || s->DestinationLineTimesForPrefetchLessThan2 != false) {
dml_print("DML::%s: VRatioPrefetchMoreThanMax = %u\n", __func__, s->VRatioPrefetchMoreThanMax);
dml_print("DML::%s: DestinationLineTimesForPrefetchLessThan2 = %u\n", __func__, s->DestinationLineTimesForPrefetchLessThan2);
@@ -9203,6 +9206,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
}
}
+
if (locals->PrefetchModeSupported == true && mode_lib->ms.support.ImmediateFlipSupport == true) {
locals->BandwidthAvailableForImmediateFlip = CalculateBandwidthAvailableForImmediateFlip(
mode_lib->ms.num_active_planes,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
index f951936bb579..dd3f43181a6e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
@@ -28,6 +28,7 @@
#define __DISPLAY_MODE_CORE_STRUCT_H__
#include "display_mode_lib_defines.h"
+#include "dml_top_display_cfg_types.h"
enum dml_project_id {
dml_project_invalid = 0,
@@ -49,7 +50,9 @@ enum dml_use_mall_for_pstate_change_mode {
dml_use_mall_pstate_change_disable = 0,
dml_use_mall_pstate_change_full_frame = 1,
dml_use_mall_pstate_change_sub_viewport = 2,
- dml_use_mall_pstate_change_phantom_pipe = 3
+ dml_use_mall_pstate_change_phantom_pipe = 3,
+ dml_use_mall_pstate_change_phantom_pipe_no_data_return = 4,
+ dml_use_mall_pstate_change_imall = 5
};
enum dml_use_mall_for_static_screen_mode {
dml_use_mall_static_screen_disable = 0,
@@ -171,7 +174,11 @@ enum dml_swizzle_mode {
dml_sw_256kb_z_x = 28,
dml_sw_256kb_s_x = 29,
dml_sw_256kb_d_x = 30,
- dml_sw_256kb_r_x = 31
+ dml_sw_256kb_r_x = 31,
+ dml_sw_256b_2d = 32,
+ dml_sw_4kb_2d = 33,
+ dml_sw_64kb_2d = 34,
+ dml_sw_256kb_2d = 35
};
enum dml_lb_depth {
dml_lb_6 = 0,
@@ -223,24 +230,28 @@ enum dml_mpc_use_policy {
dml_mpc_disabled = 0,
dml_mpc_as_possible = 1,
dml_mpc_as_needed_for_voltage = 2,
- dml_mpc_as_needed_for_pstate_and_voltage = 3
+ dml_mpc_as_needed_for_pstate_and_voltage = 3,
+ dml_mpc_as_needed = 4,
+ dml_mpc_2to1 = 5
};
enum dml_odm_use_policy {
dml_odm_use_policy_bypass = 0,
dml_odm_use_policy_combine_as_needed = 1,
dml_odm_use_policy_combine_2to1 = 2,
- dml_odm_use_policy_combine_4to1 = 3,
- dml_odm_use_policy_split_1to2 = 4,
- dml_odm_use_policy_mso_1to2 = 5,
- dml_odm_use_policy_mso_1to4 = 6
+ dml_odm_use_policy_combine_3to1 = 3,
+ dml_odm_use_policy_combine_4to1 = 4,
+ dml_odm_use_policy_split_1to2 = 5,
+ dml_odm_use_policy_mso_1to2 = 6,
+ dml_odm_use_policy_mso_1to4 = 7
};
enum dml_odm_mode {
dml_odm_mode_bypass = 0,
dml_odm_mode_combine_2to1 = 1,
- dml_odm_mode_combine_4to1 = 2,
- dml_odm_mode_split_1to2 = 3,
- dml_odm_mode_mso_1to2 = 4,
- dml_odm_mode_mso_1to4 = 5
+ dml_odm_mode_combine_3to1 = 2,
+ dml_odm_mode_combine_4to1 = 3,
+ dml_odm_mode_split_1to2 = 4,
+ dml_odm_mode_mso_1to2 = 5,
+ dml_odm_mode_mso_1to4 = 6
};
enum dml_writeback_configuration {
dml_whole_buffer_for_single_stream_no_interleave = 0,
@@ -289,6 +300,17 @@ struct soc_state_bounding_box_st {
dml_float_t fclk_change_latency_us;
dml_float_t usr_retraining_latency_us;
dml_bool_t use_ideal_dram_bw_strobe;
+ dml_float_t g6_temp_read_blackout_us;
+
+ struct {
+ dml_uint_t urgent_ramp_uclk_cycles;
+ dml_uint_t trip_to_memory_uclk_cycles;
+ dml_uint_t meta_trip_to_memory_uclk_cycles;
+ dml_uint_t maximum_latency_when_urgent_uclk_cycles;
+ dml_uint_t average_latency_when_urgent_uclk_cycles;
+ dml_uint_t maximum_latency_when_non_urgent_uclk_cycles;
+ dml_uint_t average_latency_when_non_urgent_uclk_cycles;
+ } dml_dcn401_uclk_dpm_dependent_soc_qos_params;
};
struct soc_bounding_box_st {
@@ -297,7 +319,7 @@ struct soc_bounding_box_st {
dml_float_t pcierefclk_mhz;
dml_float_t refclk_mhz;
dml_float_t amclk_mhz;
- dml_float_t max_outstanding_reqs;
+ dml_uint_t max_outstanding_reqs;
dml_float_t pct_ideal_sdp_bw_after_urgent;
dml_float_t pct_ideal_fabric_bw_after_urgent;
dml_float_t pct_ideal_dram_bw_after_urgent_pixel_only;
@@ -308,6 +330,16 @@ struct soc_bounding_box_st {
dml_float_t max_avg_fabric_bw_use_normal_percent;
dml_float_t max_avg_dram_bw_use_normal_percent;
dml_float_t max_avg_dram_bw_use_normal_strobe_percent;
+
+ dml_float_t svp_prefetch_pct_ideal_sdp_bw_after_urgent;
+ dml_float_t svp_prefetch_pct_ideal_fabric_bw_after_urgent;
+ dml_float_t svp_prefetch_pct_ideal_dram_bw_after_urgent_pixel_only;
+ dml_float_t svp_prefetch_pct_ideal_dram_bw_after_urgent_pixel_and_vm;
+ dml_float_t svp_prefetch_pct_ideal_dram_bw_after_urgent_vm_only;
+ dml_float_t svp_prefetch_max_avg_sdp_bw_use_normal_percent;
+ dml_float_t svp_prefetch_max_avg_fabric_bw_use_normal_percent;
+ dml_float_t svp_prefetch_max_avg_dram_bw_use_normal_percent;
+
dml_uint_t round_trip_ping_latency_dcfclk_cycles;
dml_uint_t urgent_out_of_order_return_per_channel_pixel_only_bytes;
dml_uint_t urgent_out_of_order_return_per_channel_pixel_and_vm_bytes;
@@ -324,6 +356,26 @@ struct soc_bounding_box_st {
dml_uint_t mall_allocated_for_dcn_mbytes;
dml_float_t dispclk_dppclk_vco_speed_mhz;
dml_bool_t do_urgent_latency_adjustment;
+
+ dml_uint_t mem_word_bytes;
+ dml_uint_t num_dcc_mcaches;
+ dml_uint_t mcache_size_bytes;
+ dml_uint_t mcache_line_size_bytes;
+
+ struct {
+ dml_bool_t UseNewDCN401SOCParameters;
+ dml_uint_t df_qos_response_time_fclk_cycles;
+ dml_uint_t max_round_trip_to_furthest_cs_fclk_cycles;
+ dml_uint_t mall_overhead_fclk_cycles;
+ dml_uint_t meta_trip_adder_fclk_cycles;
+ dml_uint_t average_transport_distance_fclk_cycles;
+ dml_float_t umc_urgent_ramp_latency_margin;
+ dml_float_t umc_max_latency_margin;
+ dml_float_t umc_average_latency_margin;
+ dml_float_t fabric_max_transport_latency_margin;
+ dml_float_t fabric_average_transport_latency_margin;
+ } dml_dcn401_soc_qos_params;
+
};
struct ip_params_st {
@@ -515,6 +567,10 @@ struct dml_plane_cfg_st {
dml_uint_t CursorWidth[__DML_NUM_PLANES__];
dml_uint_t CursorBPP[__DML_NUM_PLANES__];
+ dml_bool_t setup_for_tdlut[__DML_NUM_PLANES__];
+ enum dml2_tdlut_addressing_mode tdlut_addressing_mode[__DML_NUM_PLANES__];
+ enum dml2_tdlut_width_mode tdlut_width_mode[__DML_NUM_PLANES__];
+
enum dml_use_mall_for_static_screen_mode UseMALLForStaticScreen[__DML_NUM_PLANES__];
enum dml_use_mall_for_pstate_change_mode UseMALLForPStateChange[__DML_NUM_PLANES__];
@@ -604,6 +660,17 @@ struct dml_hw_resource_st {
dml_float_t DLGRefClkFreqMHz; /// <brief DLG Global Reference timer
};
+/// @brief To control the clk usage for model programming
+struct dml_clk_cfg_st {
+ enum dml_clk_cfg_policy dcfclk_option; ///< brief Use for mode_program; user can select between use the min require clk req as calculated by DML or use the test-specific freq
+ enum dml_clk_cfg_policy dispclk_option; ///< brief Use for mode_program; user can select between use the min require clk req as calculated by DML or use the test-specific freq
+ enum dml_clk_cfg_policy dppclk_option[__DML_NUM_PLANES__];
+
+ dml_float_t dcfclk_mhz;
+ dml_float_t dispclk_mhz;
+ dml_float_t dppclk_mhz[__DML_NUM_PLANES__];
+}; // dml_clk_cfg_st
+
/// @brief DML display configuration.
/// Describe how to display a surface in multi-plane setup and output to different output and writeback using the specified timgin
struct dml_display_cfg_st {
@@ -616,19 +683,9 @@ struct dml_display_cfg_st {
unsigned int num_timings;
struct dml_hw_resource_st hw; //< brief for mode programming
+ struct dml_clk_cfg_st clk_overrides; //< brief for mode programming clk override
}; // dml_display_cfg_st
-/// @brief To control the clk usage for model programming
-struct dml_clk_cfg_st {
- enum dml_clk_cfg_policy dcfclk_option; ///< brief Use for mode_program; user can select between use the min require clk req as calculated by DML or use the test-specific freq
- enum dml_clk_cfg_policy dispclk_option; ///< brief Use for mode_program; user can select between use the min require clk req as calculated by DML or use the test-specific freq
- enum dml_clk_cfg_policy dppclk_option[__DML_NUM_PLANES__];
-
- dml_float_t dcfclk_freq_mhz;
- dml_float_t dispclk_freq_mhz;
- dml_float_t dppclk_freq_mhz[__DML_NUM_PLANES__];
-}; // dml_clk_cfg_st
-
/// @brief DML mode evaluation and programming policy
/// Those knobs that affect mode support and mode programming
struct dml_mode_eval_policy_st {
@@ -884,11 +941,11 @@ struct mode_support_st {
dml_uint_t meta_row_height[__DML_NUM_PLANES__];
dml_uint_t meta_row_height_chroma[__DML_NUM_PLANES__];
dml_float_t UrgLatency;
- dml_float_t UrgentBurstFactorCursor[__DML_NUM_PLANES__];
+ dml_float_t UrgentBurstFactorCursor[2][__DML_NUM_PLANES__];
dml_float_t UrgentBurstFactorCursorPre[__DML_NUM_PLANES__];
- dml_float_t UrgentBurstFactorLuma[__DML_NUM_PLANES__];
+ dml_float_t UrgentBurstFactorLuma[2][__DML_NUM_PLANES__];
dml_float_t UrgentBurstFactorLumaPre[__DML_NUM_PLANES__];
- dml_float_t UrgentBurstFactorChroma[__DML_NUM_PLANES__];
+ dml_float_t UrgentBurstFactorChroma[2][__DML_NUM_PLANES__];
dml_float_t UrgentBurstFactorChromaPre[__DML_NUM_PLANES__];
dml_float_t MaximumSwathWidthInLineBufferLuma;
dml_float_t MaximumSwathWidthInLineBufferChroma;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c
index c247aee89caf..89890c88fd66 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c
@@ -690,12 +690,12 @@ __DML_DLL_EXPORT__ void dml_print_clk_cfg(const struct dml_clk_cfg_st *clk_cfg)
dml_print("DML: clk_cfg: dcfclk_option = %d\n", clk_cfg->dcfclk_option);
dml_print("DML: clk_cfg: dispclk_option = %d\n", clk_cfg->dispclk_option);
- dml_print("DML: clk_cfg: dcfclk_freq_mhz = %f\n", clk_cfg->dcfclk_freq_mhz);
- dml_print("DML: clk_cfg: dispclk_freq_mhz = %f\n", clk_cfg->dispclk_freq_mhz);
+ dml_print("DML: clk_cfg: dcfclk_mhz = %f\n", clk_cfg->dcfclk_mhz);
+ dml_print("DML: clk_cfg: dispclk_mhz = %f\n", clk_cfg->dispclk_mhz);
for (dml_uint_t i = 0; i < DCN_DML__NUM_PLANE; i++) {
dml_print("DML: clk_cfg: i=%d, dppclk_option = %d\n", i, clk_cfg->dppclk_option[i]);
- dml_print("DML: clk_cfg: i=%d, dppclk_freq_mhz = %f\n", i, clk_cfg->dppclk_freq_mhz[i]);
+ dml_print("DML: clk_cfg: i=%d, dppclk_mhz = %f\n", i, clk_cfg->dppclk_mhz[i]);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index 8697eac1e1f7..0c8ec30ea672 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -10,7 +10,6 @@
#include "dml21_utils.h"
#include "dml21_translation_helper.h"
#include "bounding_boxes/dcn4_soc_bb.h"
-#include "bounding_boxes/dcn3_soc_bb.h"
static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
@@ -20,10 +19,6 @@ static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_
const struct dml2_soc_qos_parameters *qos_params;
switch (in_dc->ctx->dce_version) {
- case DCN_VERSION_3_2: // TODO : Temporary for N-1 validation. Remove this after N-1 validation phase is complete.
- soc_bb = &dml2_socbb_dcn31;
- qos_params = &dml_dcn31_soc_qos_params;
- break;
case DCN_VERSION_4_01:
default:
if (config->bb_from_dmub)
@@ -60,9 +55,6 @@ static void dml21_init_ip_params(struct dml2_initialize_instance_in_out *dml_ini
const struct dml2_ip_capabilities *ip_caps;
switch (in_dc->ctx->dce_version) {
- case DCN_VERSION_3_2: // TODO : Temporary for N-1 validation. Remove this after N-1 validation phase is complete.
- ip_caps = &dml2_dcn31_max_ip_caps;
- break;
case DCN_VERSION_4_01:
default:
ip_caps = &dml2_dcn401_max_ip_caps;
@@ -302,12 +294,17 @@ void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_in
dml_soc_bb->power_management_parameters.stutter_exit_latency_us =
(in_dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns + 9) / 10;
- if (in_dc->ctx->dc_bios->vram_info.num_chans) {
+ if (dc_bw_params->num_channels) {
+ dml_clk_table->dram_config.channel_count = dc_bw_params->num_channels;
+ dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576;
+ } else if (in_dc->ctx->dc_bios->vram_info.num_chans) {
dml_clk_table->dram_config.channel_count = in_dc->ctx->dc_bios->vram_info.num_chans;
dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576;
}
- if (in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) {
+ if (dc_bw_params->dram_channel_width_bytes) {
+ dml_clk_table->dram_config.channel_width_bytes = dc_bw_params->dram_channel_width_bytes;
+ } else if (in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) {
dml_clk_table->dram_config.channel_width_bytes = in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
}
@@ -339,11 +336,22 @@ void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_in
// }
}
+static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
+{
+ unsigned int max_hw_v_total = stream->ctx->dc->caps.max_v_total;
+
+ if (stream->ctx->dc->caps.vtotal_limited_by_fp2) {
+ max_hw_v_total -= stream->timing.v_front_porch + 1;
+ }
+
+ return max_hw_v_total;
+}
+
static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cfg *timing,
struct dc_stream_state *stream,
struct dml2_context *dml_ctx)
{
- unsigned int hblank_start, vblank_start;
+ unsigned int hblank_start, vblank_start, min_hardware_refresh_in_uhz;
timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
timing->v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
@@ -371,11 +379,23 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
- stream->timing.v_border_top - stream->timing.v_border_bottom;
timing->drr_config.enabled = stream->ignore_msa_timing_param;
- timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz;
timing->drr_config.drr_active_variable = stream->vrr_active_variable;
timing->drr_config.drr_active_fixed = stream->vrr_active_fixed;
timing->drr_config.disallowed = !stream->allow_freesync;
+ /* limit min refresh rate to DC cap */
+ min_hardware_refresh_in_uhz = stream->timing.min_refresh_in_uhz;
+ if (stream->ctx->dc->caps.max_v_total != 0) {
+ min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL),
+ (stream->timing.h_total * (long long)calc_max_hardware_v_total(stream)));
+ }
+
+ if (stream->timing.min_refresh_in_uhz > min_hardware_refresh_in_uhz) {
+ timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz;
+ } else {
+ timing->drr_config.min_refresh_uhz = min_hardware_refresh_in_uhz;
+ }
+
if (dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase &&
stream->ctx->dc->config.enable_fpo_flicker_detection == 1)
timing->drr_config.max_instant_vtotal_delta = dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase(stream, false);
@@ -422,6 +442,21 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
timing->vblank_nom = timing->v_total - timing->v_active;
}
+/**
+ * adjust_dml21_hblank_timing_config_from_pipe_ctx - Adjusts the horizontal blanking timing configuration
+ * based on the pipe context.
+ * @timing: Pointer to the dml2_timing_cfg structure to be adjusted.
+ * @pipe: Pointer to the pipe_ctx structure containing the horizontal blanking borrow value.
+ *
+ * This function modifies the horizontal active and blank end timings by adding and subtracting
+ * the horizontal blanking borrow value from the pipe context, respectively.
+ */
+static void adjust_dml21_hblank_timing_config_from_pipe_ctx(struct dml2_timing_cfg *timing, struct pipe_ctx *pipe)
+{
+ timing->h_active += pipe->hblank_borrow;
+ timing->h_blank_end -= pipe->hblank_borrow;
+}
+
static void populate_dml21_output_config_from_stream_state(struct dml2_link_output_cfg *output,
struct dc_stream_state *stream, const struct pipe_ctx *pipe)
{
@@ -683,11 +718,21 @@ static void populate_dml21_surface_config_from_plane_state(
surface->dcc.informative.fraction_of_zero_size_request_plane1 = plane_state->dcc.independent_64b_blks_c;
surface->dcc.plane0.pitch = plane_state->dcc.meta_pitch;
surface->dcc.plane1.pitch = plane_state->dcc.meta_pitch_c;
- if (in_dc->ctx->dce_version < DCN_VERSION_4_01) {
- /* needed for N-1 testing */
+
+ // Update swizzle / array mode based on the gfx_format
+ switch (plane_state->tiling_info.gfxversion) {
+ case DcGfxVersion7:
+ case DcGfxVersion8:
+ // Placeholder for programming the array_mode
+ break;
+ case DcGfxVersion9:
+ case DcGfxVersion10:
+ case DcGfxVersion11:
surface->tiling = gfx9_to_dml2_swizzle_mode(plane_state->tiling_info.gfx9.swizzle);
- } else {
+ break;
+ case DcGfxAddr3:
surface->tiling = gfx_addr3_to_dml2_swizzle_mode(plane_state->tiling_info.gfx_addr3.swizzle);
+ break;
}
}
@@ -709,6 +754,7 @@ static const struct scaler_data *get_scaler_data_for_plane(
temp_pipe->plane_state = pipe->plane_state;
temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps;
temp_pipe->stream_res = pipe->stream_res;
+ temp_pipe->hblank_borrow = pipe->hblank_borrow;
dml_ctx->config.callbacks.build_scaling_params(temp_pipe);
break;
}
@@ -859,7 +905,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->immediate_flip = plane_state->flip_immediate;
plane->composition.rect_out_height_spans_vactive =
- plane_state->dst_rect.height >= stream->timing.v_addressable &&
+ plane_state->dst_rect.height >= stream->src.height &&
stream->dst.height >= stream->timing.v_addressable;
}
@@ -971,8 +1017,9 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
if (disp_cfg_stream_location < 0)
disp_cfg_stream_location = dml_dispcfg->num_streams++;
- ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx);
+ adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]);
populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]);
populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index]);
@@ -995,7 +1042,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
if (disp_cfg_plane_location < 0)
disp_cfg_plane_location = dml_dispcfg->num_planes++;
- ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml21_surface_config_from_plane_state(in_dc, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location].surface, context->stream_status[stream_index].plane_states[plane_index]);
populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index);
@@ -1036,28 +1083,9 @@ void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state
context->bw_ctx.bw.dcn.clk.p_state_change_support = in_ctx->v21.mode_programming.programming->uclk_pstate_supported;
context->bw_ctx.bw.dcn.clk.dtbclk_en = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz > 0;
context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.dtbrefclk_khz;
-}
-
-void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_watermarks *watermark, enum dml2_dchub_watermark_reg_set_index reg_set_idx, struct dml2_context *in_ctx)
-{
- struct dml2_core_internal_display_mode_lib *mode_lib = &in_ctx->v21.dml_init.dml2_instance->core_instance.clean_me_up.mode_lib;
- double refclk_freq_in_mhz = (in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz > 0) ? (double)in_ctx->v21.display_config.overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;
-
- if (reg_set_idx >= DML2_DCHUB_WATERMARK_SET_NUM) {
- /* invalid register set index */
- return;
- }
-
- /* convert to legacy format (time in ns) */
- watermark->urgent_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].urgent / refclk_freq_in_mhz) * 1000.0;
- watermark->pte_meta_urgent_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].urgent / refclk_freq_in_mhz) * 1000.0;
- watermark->cstate_pstate.cstate_enter_plus_exit_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].sr_enter / refclk_freq_in_mhz) * 1000.0;
- watermark->cstate_pstate.cstate_exit_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].sr_exit / refclk_freq_in_mhz) * 1000.0;
- watermark->cstate_pstate.pstate_change_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].uclk_pstate / refclk_freq_in_mhz) * 1000.0;
- watermark->urgent_latency_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].urgent / refclk_freq_in_mhz) * 1000.0;
- watermark->cstate_pstate.fclk_pstate_change_ns = ((double)in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].fclk_pstate / refclk_freq_in_mhz) * 1000.0;
- watermark->frac_urg_bw_flip = in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].frac_urg_bw_flip;
- watermark->frac_urg_bw_nom = in_ctx->v21.mode_programming.programming->global_regs.wm_regs[reg_set_idx].frac_urg_bw_nom;
+ context->bw_ctx.bw.dcn.clk.socclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.socclk_khz;
+ context->bw_ctx.bw.dcn.clk.subvp_prefetch_dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz;
+ context->bw_ctx.bw.dcn.clk.subvp_prefetch_fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz;
}
static struct dml2_dchub_watermark_regs *wm_set_index_to_dc_wm_set(union dcn_watermark_set *watermarks, const enum dml2_dchub_watermark_reg_set_index wm_index)
@@ -1103,53 +1131,6 @@ void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_se
}
}
-
-void dml21_populate_pipe_ctx_dlg_params(struct dml2_context *dml_ctx, struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming)
-{
- unsigned int hactive, vactive, hblank_start, vblank_start, hblank_end, vblank_end;
- struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
- union dml2_global_sync_programming *global_sync = &stream_programming->global_sync;
-
- hactive = timing->h_addressable + timing->h_border_left + timing->h_border_right;
- vactive = timing->v_addressable + timing->v_border_bottom + timing->v_border_top;
- hblank_start = pipe_ctx->stream->timing.h_total - pipe_ctx->stream->timing.h_front_porch;
- vblank_start = pipe_ctx->stream->timing.v_total - pipe_ctx->stream->timing.v_front_porch;
-
- hblank_end = hblank_start - timing->h_addressable - timing->h_border_left - timing->h_border_right;
- vblank_end = vblank_start - timing->v_addressable - timing->v_border_top - timing->v_border_bottom;
-
- if (dml_ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
- /* phantom has its own global sync */
- global_sync = &stream_programming->phantom_stream.global_sync;
- }
-
- pipe_ctx->pipe_dlg_param.vstartup_start = global_sync->dcn4x.vstartup_lines;
- pipe_ctx->pipe_dlg_param.vupdate_offset = global_sync->dcn4x.vupdate_offset_pixels;
- pipe_ctx->pipe_dlg_param.vupdate_width = global_sync->dcn4x.vupdate_vupdate_width_pixels;
- pipe_ctx->pipe_dlg_param.vready_offset = global_sync->dcn4x.vready_offset_pixels;
- pipe_ctx->pipe_dlg_param.pstate_keepout = global_sync->dcn4x.pstate_keepout_start_lines;
-
- pipe_ctx->pipe_dlg_param.otg_inst = pipe_ctx->stream_res.tg->inst;
-
- pipe_ctx->pipe_dlg_param.hactive = hactive;
- pipe_ctx->pipe_dlg_param.vactive = vactive;
- pipe_ctx->pipe_dlg_param.htotal = pipe_ctx->stream->timing.h_total;
- pipe_ctx->pipe_dlg_param.vtotal = pipe_ctx->stream->timing.v_total;
- pipe_ctx->pipe_dlg_param.hblank_end = hblank_end;
- pipe_ctx->pipe_dlg_param.vblank_end = vblank_end;
- pipe_ctx->pipe_dlg_param.hblank_start = hblank_start;
- pipe_ctx->pipe_dlg_param.vblank_start = vblank_start;
- pipe_ctx->pipe_dlg_param.vfront_porch = pipe_ctx->stream->timing.v_front_porch;
- pipe_ctx->pipe_dlg_param.pixel_rate_mhz = pipe_ctx->stream->timing.pix_clk_100hz / 10000.00;
- pipe_ctx->pipe_dlg_param.refresh_rate = ((timing->pix_clk_100hz * 100) / timing->h_total) / timing->v_total;
- pipe_ctx->pipe_dlg_param.vtotal_max = pipe_ctx->stream->adjust.v_total_max;
- pipe_ctx->pipe_dlg_param.vtotal_min = pipe_ctx->stream->adjust.v_total_min;
- pipe_ctx->pipe_dlg_param.recout_height = pipe_ctx->plane_res.scl_data.recout.height;
- pipe_ctx->pipe_dlg_param.recout_width = pipe_ctx->plane_res.scl_data.recout.width;
- pipe_ctx->pipe_dlg_param.full_recout_height = pipe_ctx->plane_res.scl_data.recout.height;
- pipe_ctx->pipe_dlg_param.full_recout_width = pipe_ctx->plane_res.scl_data.recout.width;
-}
-
void dml21_map_hw_resources(struct dml2_context *dml_ctx)
{
unsigned int i = 0;
@@ -1185,22 +1166,22 @@ void dml21_set_dc_p_state_type(
bool sub_vp_enabled)
{
switch (stream_programming->uclk_pstate_method) {
- case dml2_uclk_pstate_support_method_vactive:
- case dml2_uclk_pstate_support_method_fw_vactive_drr:
+ case dml2_pstate_method_vactive:
+ case dml2_pstate_method_fw_vactive_drr:
pipe_ctx->p_state_type = P_STATE_V_ACTIVE;
break;
- case dml2_uclk_pstate_support_method_vblank:
- case dml2_uclk_pstate_support_method_fw_vblank_drr:
+ case dml2_pstate_method_vblank:
+ case dml2_pstate_method_fw_vblank_drr:
if (sub_vp_enabled)
pipe_ctx->p_state_type = P_STATE_V_BLANK_SUB_VP;
else
pipe_ctx->p_state_type = P_STATE_V_BLANK;
break;
- case dml2_uclk_pstate_support_method_fw_subvp_phantom:
- case dml2_uclk_pstate_support_method_fw_subvp_phantom_drr:
+ case dml2_pstate_method_fw_svp:
+ case dml2_pstate_method_fw_svp_drr:
pipe_ctx->p_state_type = P_STATE_SUB_VP;
break;
- case dml2_uclk_pstate_support_method_fw_drr:
+ case dml2_pstate_method_fw_drr:
if (sub_vp_enabled)
pipe_ctx->p_state_type = P_STATE_DRR_SUB_VP;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
index 476a7f6e4875..069b939c672a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
@@ -21,8 +21,6 @@ void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_
void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc);
bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state *context);
-void dml21_populate_pipe_ctx_dlg_params(struct dml2_context *dml_ctx, struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming);
-void dml21_extract_legacy_watermark_set(const struct dc *in_dc, struct dcn_watermarks *watermark, enum dml2_dchub_watermark_reg_set_index reg_set_idx, struct dml2_context *in_ctx);
void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_set *watermarks, struct dml2_context *in_ctx);
void dml21_map_hw_resources(struct dml2_context *dml_ctx);
void dml21_get_pipe_mcache_config(struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_pipe_configuration_descriptor *mcache_pipe_config);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
index 51d491bffa32..1e56d995cd0e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
@@ -142,108 +142,21 @@ int dml21_find_dc_pipes_for_plane(const struct dc *in_dc,
return num_pipes;
}
-
-void dml21_update_pipe_ctx_dchub_regs(struct dml2_display_rq_regs *rq_regs,
- struct dml2_display_dlg_regs *disp_dlg_regs,
- struct dml2_display_ttu_regs *disp_ttu_regs,
- struct pipe_ctx *out)
+void dml21_pipe_populate_global_sync(struct dml2_context *dml_ctx,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx,
+ struct dml2_per_stream_programming *stream_programming)
{
- memset(&out->rq_regs, 0, sizeof(out->rq_regs));
- out->rq_regs.rq_regs_l.chunk_size = rq_regs->rq_regs_l.chunk_size;
- out->rq_regs.rq_regs_l.min_chunk_size = rq_regs->rq_regs_l.min_chunk_size;
- //out->rq_regs.rq_regs_l.meta_chunk_size = rq_regs->rq_regs_l.meta_chunk_size;
- //out->rq_regs.rq_regs_l.min_meta_chunk_size = rq_regs->rq_regs_l.min_meta_chunk_size;
- out->rq_regs.rq_regs_l.dpte_group_size = rq_regs->rq_regs_l.dpte_group_size;
- out->rq_regs.rq_regs_l.mpte_group_size = rq_regs->rq_regs_l.mpte_group_size;
- out->rq_regs.rq_regs_l.swath_height = rq_regs->rq_regs_l.swath_height;
- out->rq_regs.rq_regs_l.pte_row_height_linear = rq_regs->rq_regs_l.pte_row_height_linear;
-
- out->rq_regs.rq_regs_c.chunk_size = rq_regs->rq_regs_c.chunk_size;
- out->rq_regs.rq_regs_c.min_chunk_size = rq_regs->rq_regs_c.min_chunk_size;
- //out->rq_regs.rq_regs_c.meta_chunk_size = rq_regs->rq_regs_c.meta_chunk_size;
- //out->rq_regs.rq_regs_c.min_meta_chunk_size = rq_regs->rq_regs_c.min_meta_chunk_size;
- out->rq_regs.rq_regs_c.dpte_group_size = rq_regs->rq_regs_c.dpte_group_size;
- out->rq_regs.rq_regs_c.mpte_group_size = rq_regs->rq_regs_c.mpte_group_size;
- out->rq_regs.rq_regs_c.swath_height = rq_regs->rq_regs_c.swath_height;
- out->rq_regs.rq_regs_c.pte_row_height_linear = rq_regs->rq_regs_c.pte_row_height_linear;
-
- out->rq_regs.drq_expansion_mode = rq_regs->drq_expansion_mode;
- out->rq_regs.prq_expansion_mode = rq_regs->prq_expansion_mode;
- //out->rq_regs.mrq_expansion_mode = rq_regs->mrq_expansion_mode;
- out->rq_regs.crq_expansion_mode = rq_regs->crq_expansion_mode;
- out->rq_regs.plane1_base_address = rq_regs->plane1_base_address;
- out->unbounded_req = rq_regs->unbounded_request_enabled;
-
- memset(&out->dlg_regs, 0, sizeof(out->dlg_regs));
- out->dlg_regs.refcyc_h_blank_end = disp_dlg_regs->refcyc_h_blank_end;
- out->dlg_regs.dlg_vblank_end = disp_dlg_regs->dlg_vblank_end;
- out->dlg_regs.min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
- out->dlg_regs.refcyc_per_htotal = disp_dlg_regs->refcyc_per_htotal;
- out->dlg_regs.refcyc_x_after_scaler = disp_dlg_regs->refcyc_x_after_scaler;
- out->dlg_regs.dst_y_after_scaler = disp_dlg_regs->dst_y_after_scaler;
- out->dlg_regs.dst_y_prefetch = disp_dlg_regs->dst_y_prefetch;
- out->dlg_regs.dst_y_per_vm_vblank = disp_dlg_regs->dst_y_per_vm_vblank;
- out->dlg_regs.dst_y_per_row_vblank = disp_dlg_regs->dst_y_per_row_vblank;
- out->dlg_regs.dst_y_per_vm_flip = disp_dlg_regs->dst_y_per_vm_flip;
- out->dlg_regs.dst_y_per_row_flip = disp_dlg_regs->dst_y_per_row_flip;
- out->dlg_regs.ref_freq_to_pix_freq = disp_dlg_regs->ref_freq_to_pix_freq;
- out->dlg_regs.vratio_prefetch = disp_dlg_regs->vratio_prefetch;
- out->dlg_regs.vratio_prefetch_c = disp_dlg_regs->vratio_prefetch_c;
- out->dlg_regs.refcyc_per_tdlut_group = disp_dlg_regs->refcyc_per_tdlut_group;
- out->dlg_regs.refcyc_per_pte_group_vblank_l = disp_dlg_regs->refcyc_per_pte_group_vblank_l;
- out->dlg_regs.refcyc_per_pte_group_vblank_c = disp_dlg_regs->refcyc_per_pte_group_vblank_c;
- //out->dlg_regs.refcyc_per_meta_chunk_vblank_l = disp_dlg_regs->refcyc_per_meta_chunk_vblank_l;
- //out->dlg_regs.refcyc_per_meta_chunk_vblank_c = disp_dlg_regs->refcyc_per_meta_chunk_vblank_c;
- out->dlg_regs.refcyc_per_pte_group_flip_l = disp_dlg_regs->refcyc_per_pte_group_flip_l;
- out->dlg_regs.refcyc_per_pte_group_flip_c = disp_dlg_regs->refcyc_per_pte_group_flip_c;
- //out->dlg_regs.refcyc_per_meta_chunk_flip_l = disp_dlg_regs->refcyc_per_meta_chunk_flip_l;
- //out->dlg_regs.refcyc_per_meta_chunk_flip_c = disp_dlg_regs->refcyc_per_meta_chunk_flip_c;
- out->dlg_regs.dst_y_per_pte_row_nom_l = disp_dlg_regs->dst_y_per_pte_row_nom_l;
- out->dlg_regs.dst_y_per_pte_row_nom_c = disp_dlg_regs->dst_y_per_pte_row_nom_c;
- out->dlg_regs.refcyc_per_pte_group_nom_l = disp_dlg_regs->refcyc_per_pte_group_nom_l;
- out->dlg_regs.refcyc_per_pte_group_nom_c = disp_dlg_regs->refcyc_per_pte_group_nom_c;
- //out->dlg_regs.dst_y_per_meta_row_nom_l = disp_dlg_regs->dst_y_per_meta_row_nom_l;
- //out->dlg_regs.dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_c;
- //out->dlg_regs.refcyc_per_meta_chunk_nom_l = disp_dlg_regs->refcyc_per_meta_chunk_nom_l;
- //out->dlg_regs.refcyc_per_meta_chunk_nom_c = disp_dlg_regs->refcyc_per_meta_chunk_nom_c;
- out->dlg_regs.refcyc_per_line_delivery_pre_l = disp_dlg_regs->refcyc_per_line_delivery_pre_l;
- out->dlg_regs.refcyc_per_line_delivery_pre_c = disp_dlg_regs->refcyc_per_line_delivery_pre_c;
- out->dlg_regs.refcyc_per_line_delivery_l = disp_dlg_regs->refcyc_per_line_delivery_l;
- out->dlg_regs.refcyc_per_line_delivery_c = disp_dlg_regs->refcyc_per_line_delivery_c;
- out->dlg_regs.refcyc_per_vm_group_vblank = disp_dlg_regs->refcyc_per_vm_group_vblank;
- out->dlg_regs.refcyc_per_vm_group_flip = disp_dlg_regs->refcyc_per_vm_group_flip;
- out->dlg_regs.refcyc_per_vm_req_vblank = disp_dlg_regs->refcyc_per_vm_req_vblank;
- out->dlg_regs.refcyc_per_vm_req_flip = disp_dlg_regs->refcyc_per_vm_req_flip;
- out->dlg_regs.dst_y_offset_cur0 = disp_dlg_regs->dst_y_offset_cur0;
- out->dlg_regs.chunk_hdl_adjust_cur0 = disp_dlg_regs->chunk_hdl_adjust_cur0;
- //out->dlg_regs.dst_y_offset_cur1 = disp_dlg_regs->dst_y_offset_cur1;
- //out->dlg_regs.chunk_hdl_adjust_cur1 = disp_dlg_regs->chunk_hdl_adjust_cur1;
- out->dlg_regs.vready_after_vcount0 = disp_dlg_regs->vready_after_vcount0;
- out->dlg_regs.dst_y_delta_drq_limit = disp_dlg_regs->dst_y_delta_drq_limit;
- out->dlg_regs.refcyc_per_vm_dmdata = disp_dlg_regs->refcyc_per_vm_dmdata;
- out->dlg_regs.dmdata_dl_delta = disp_dlg_regs->dmdata_dl_delta;
-
- memset(&out->ttu_regs, 0, sizeof(out->ttu_regs));
- out->ttu_regs.qos_level_low_wm = disp_ttu_regs->qos_level_low_wm;
- out->ttu_regs.qos_level_high_wm = disp_ttu_regs->qos_level_high_wm;
- out->ttu_regs.min_ttu_vblank = disp_ttu_regs->min_ttu_vblank;
- out->ttu_regs.qos_level_flip = disp_ttu_regs->qos_level_flip;
- out->ttu_regs.refcyc_per_req_delivery_l = disp_ttu_regs->refcyc_per_req_delivery_l;
- out->ttu_regs.refcyc_per_req_delivery_c = disp_ttu_regs->refcyc_per_req_delivery_c;
- out->ttu_regs.refcyc_per_req_delivery_cur0 = disp_ttu_regs->refcyc_per_req_delivery_cur0;
- //out->ttu_regs.refcyc_per_req_delivery_cur1 = disp_ttu_regs->refcyc_per_req_delivery_cur1;
- out->ttu_regs.refcyc_per_req_delivery_pre_l = disp_ttu_regs->refcyc_per_req_delivery_pre_l;
- out->ttu_regs.refcyc_per_req_delivery_pre_c = disp_ttu_regs->refcyc_per_req_delivery_pre_c;
- out->ttu_regs.refcyc_per_req_delivery_pre_cur0 = disp_ttu_regs->refcyc_per_req_delivery_pre_cur0;
- //out->ttu_regs.refcyc_per_req_delivery_pre_cur1 = disp_ttu_regs->refcyc_per_req_delivery_pre_cur1;
- out->ttu_regs.qos_level_fixed_l = disp_ttu_regs->qos_level_fixed_l;
- out->ttu_regs.qos_level_fixed_c = disp_ttu_regs->qos_level_fixed_c;
- out->ttu_regs.qos_level_fixed_cur0 = disp_ttu_regs->qos_level_fixed_cur0;
- //out->ttu_regs.qos_level_fixed_cur1 = disp_ttu_regs->qos_level_fixed_cur1;
- out->ttu_regs.qos_ramp_disable_l = disp_ttu_regs->qos_ramp_disable_l;
- out->ttu_regs.qos_ramp_disable_c = disp_ttu_regs->qos_ramp_disable_c;
- out->ttu_regs.qos_ramp_disable_cur0 = disp_ttu_regs->qos_ramp_disable_cur0;
- //out->ttu_regs.qos_ramp_disable_cur1 = disp_ttu_regs->qos_ramp_disable_cur1;
+ union dml2_global_sync_programming *global_sync = &stream_programming->global_sync;
+
+ if (dml_ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
+ /* phantom has its own global sync */
+ global_sync = &stream_programming->phantom_stream.global_sync;
+ }
+
+ memcpy(&pipe_ctx->global_sync,
+ global_sync,
+ sizeof(union dml2_global_sync_programming));
}
void dml21_populate_mall_allocation_size(struct dc_state *context,
@@ -301,28 +214,16 @@ void dml21_program_dc_pipe(struct dml2_context *dml_ctx, struct dc_state *contex
{
unsigned int pipe_reg_index = 0;
- dml21_populate_pipe_ctx_dlg_params(dml_ctx, context, pipe_ctx, stream_prog);
+ dml21_pipe_populate_global_sync(dml_ctx, context, pipe_ctx, stream_prog);
find_pipe_regs_idx(dml_ctx, pipe_ctx, &pipe_reg_index);
if (dml_ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
memcpy(&pipe_ctx->hubp_regs, pln_prog->phantom_plane.pipe_regs[pipe_reg_index], sizeof(struct dml2_dchub_per_pipe_register_set));
pipe_ctx->unbounded_req = false;
-
- /* legacy only, should be removed later */
- dml21_update_pipe_ctx_dchub_regs(&pln_prog->phantom_plane.pipe_regs[pipe_reg_index]->rq_regs,
- &pln_prog->phantom_plane.pipe_regs[pipe_reg_index]->dlg_regs,
- &pln_prog->phantom_plane.pipe_regs[pipe_reg_index]->ttu_regs, pipe_ctx);
-
pipe_ctx->det_buffer_size_kb = 0;
} else {
memcpy(&pipe_ctx->hubp_regs, pln_prog->pipe_regs[pipe_reg_index], sizeof(struct dml2_dchub_per_pipe_register_set));
pipe_ctx->unbounded_req = pln_prog->pipe_regs[pipe_reg_index]->rq_regs.unbounded_request_enabled;
-
- /* legacy only, should be removed later */
- dml21_update_pipe_ctx_dchub_regs(&pln_prog->pipe_regs[pipe_reg_index]->rq_regs,
- &pln_prog->pipe_regs[pipe_reg_index]->dlg_regs,
- &pln_prog->pipe_regs[pipe_reg_index]->ttu_regs, pipe_ctx);
-
pipe_ctx->det_buffer_size_kb = pln_prog->pipe_regs[pipe_reg_index]->det_size * 64;
}
@@ -482,7 +383,8 @@ void dml21_build_fams2_programming(const struct dc *dc,
unsigned int num_fams2_streams = 0;
/* reset fams2 data */
- memset(&context->bw_ctx.bw.dcn.fams2_stream_params, 0, sizeof(struct dmub_fams2_stream_static_state) * DML2_MAX_PLANES);
+ memset(&context->bw_ctx.bw.dcn.fams2_stream_base_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES);
+ memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES);
memset(&context->bw_ctx.bw.dcn.fams2_global_config, 0, sizeof(struct dmub_cmd_fams2_global_config));
if (dml_ctx->v21.mode_programming.programming->fams2_required) {
@@ -490,8 +392,10 @@ void dml21_build_fams2_programming(const struct dc *dc,
int dml_stream_idx;
struct dc_stream_state *phantom_stream;
struct dc_stream_status *phantom_status;
+ enum fams2_stream_type type = 0;
- struct dmub_fams2_stream_static_state *static_state = &context->bw_ctx.bw.dcn.fams2_stream_params[num_fams2_streams];
+ union dmub_cmd_fams2_config *static_base_state = &context->bw_ctx.bw.dcn.fams2_stream_base_params[num_fams2_streams];
+ union dmub_cmd_fams2_config *static_sub_state = &context->bw_ctx.bw.dcn.fams2_stream_sub_params[num_fams2_streams];
struct dc_stream_state *stream = context->streams[i];
@@ -508,28 +412,38 @@ void dml21_build_fams2_programming(const struct dc *dc,
}
/* copy static state from PMO */
- memcpy(static_state,
- &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_params,
- sizeof(struct dmub_fams2_stream_static_state));
-
- /* get information from context */
- static_state->num_planes = context->stream_status[i].plane_count;
- static_state->otg_inst = context->stream_status[i].primary_otg_inst;
-
- /* populate pipe masks for planes */
- for (j = 0; j < context->stream_status[i].plane_count; j++) {
- for (k = 0; k < dc->res_pool->pipe_count; k++) {
- if (context->res_ctx.pipe_ctx[k].stream &&
- context->res_ctx.pipe_ctx[k].stream->stream_id == stream->stream_id &&
- context->res_ctx.pipe_ctx[k].plane_state == context->stream_status[i].plane_states[j]) {
- static_state->pipe_mask |= (1 << k);
- static_state->plane_pipe_masks[j] |= (1 << k);
+ memcpy(static_base_state,
+ &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_base_params,
+ sizeof(union dmub_cmd_fams2_config));
+ memcpy(static_sub_state,
+ &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params,
+ sizeof(union dmub_cmd_fams2_config));
+
+ switch (dc->debug.fams_version.minor) {
+ case 1:
+ default:
+ type = static_base_state->stream_v1.base.type;
+
+ /* get information from context */
+ static_base_state->stream_v1.base.num_planes = context->stream_status[i].plane_count;
+ static_base_state->stream_v1.base.otg_inst = context->stream_status[i].primary_otg_inst;
+
+ /* populate pipe masks for planes */
+ for (j = 0; j < context->stream_status[i].plane_count; j++) {
+ for (k = 0; k < dc->res_pool->pipe_count; k++) {
+ if (context->res_ctx.pipe_ctx[k].stream &&
+ context->res_ctx.pipe_ctx[k].stream->stream_id == stream->stream_id &&
+ context->res_ctx.pipe_ctx[k].plane_state == context->stream_status[i].plane_states[j]) {
+ static_base_state->stream_v1.base.pipe_mask |= (1 << k);
+ static_base_state->stream_v1.base.plane_pipe_masks[j] |= (1 << k);
+ }
}
}
}
+
/* get per method programming */
- switch (static_state->type) {
+ switch (type) {
case FAMS2_STREAM_TYPE_VBLANK:
case FAMS2_STREAM_TYPE_VACTIVE:
case FAMS2_STREAM_TYPE_DRR:
@@ -543,16 +457,27 @@ void dml21_build_fams2_programming(const struct dc *dc,
/* phantom status should always be present */
ASSERT(phantom_status);
- static_state->sub_state.subvp.phantom_otg_inst = phantom_status->primary_otg_inst;
+ if (!phantom_status)
+ break;
- /* populate pipe masks for phantom planes */
- for (j = 0; j < phantom_status->plane_count; j++) {
- for (k = 0; k < dc->res_pool->pipe_count; k++) {
- if (context->res_ctx.pipe_ctx[k].stream &&
- context->res_ctx.pipe_ctx[k].stream->stream_id == phantom_stream->stream_id &&
- context->res_ctx.pipe_ctx[k].plane_state == phantom_status->plane_states[j]) {
- static_state->sub_state.subvp.phantom_pipe_mask |= (1 << k);
- static_state->sub_state.subvp.phantom_plane_pipe_masks[j] |= (1 << k);
+ switch (dc->debug.fams_version.minor) {
+ case 1:
+ default:
+ static_sub_state->stream_v1.sub_state.subvp.phantom_otg_inst = phantom_status->primary_otg_inst;
+
+ /* populate pipe masks for phantom planes */
+ for (j = 0; j < phantom_status->plane_count; j++) {
+ for (k = 0; k < dc->res_pool->pipe_count; k++) {
+ if (context->res_ctx.pipe_ctx[k].stream &&
+ context->res_ctx.pipe_ctx[k].stream->stream_id == phantom_stream->stream_id &&
+ context->res_ctx.pipe_ctx[k].plane_state == phantom_status->plane_states[j]) {
+ switch (dc->debug.fams_version.minor) {
+ case 1:
+ default:
+ static_sub_state->stream_v1.sub_state.subvp.phantom_pipe_mask |= (1 << k);
+ static_sub_state->stream_v1.sub_state.subvp.phantom_plane_pipe_masks[j] |= (1 << k);
+ }
+ }
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h
index d5153fbac921..4bff52eaaef8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h
@@ -18,10 +18,10 @@ struct dml2_display_ttu_regs;
int dml21_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id);
int dml21_find_dml_pipe_idx_by_plane_id(struct dml2_context *ctx, unsigned int plane_id);
bool dml21_get_plane_id(const struct dc_state *state, const struct dc_plane_state *plane, unsigned int *plane_id);
-void dml21_update_pipe_ctx_dchub_regs(struct dml2_display_rq_regs *rq_regs,
- struct dml2_display_dlg_regs *disp_dlg_regs,
- struct dml2_display_ttu_regs *disp_ttu_regs,
- struct pipe_ctx *out);
+void dml21_pipe_populate_global_sync(struct dml2_context *dml_ctx,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx,
+ struct dml2_per_stream_programming *stream_programming);
void dml21_populate_mall_allocation_size(struct dc_state *context,
struct dml2_context *in_ctx,
struct dml2_per_plane_programming *pln_prog,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index d35dd507cb9f..fb80ba9287b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -13,11 +13,11 @@
static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
{
- *dml_ctx = (struct dml2_context *)kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
+ *dml_ctx = kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
if (!(*dml_ctx))
return false;
- (*dml_ctx)->v21.dml_init.dml2_instance = (struct dml2_instance *)kzalloc(sizeof(struct dml2_instance), GFP_KERNEL);
+ (*dml_ctx)->v21.dml_init.dml2_instance = kzalloc(sizeof(struct dml2_instance), GFP_KERNEL);
if (!((*dml_ctx)->v21.dml_init.dml2_instance))
return false;
@@ -27,7 +27,7 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
(*dml_ctx)->v21.mode_support.display_config = &(*dml_ctx)->v21.display_config;
(*dml_ctx)->v21.mode_programming.display_config = (*dml_ctx)->v21.mode_support.display_config;
- (*dml_ctx)->v21.mode_programming.programming = (struct dml2_display_cfg_programming *)kzalloc(sizeof(struct dml2_display_cfg_programming), GFP_KERNEL);
+ (*dml_ctx)->v21.mode_programming.programming = kzalloc(sizeof(struct dml2_display_cfg_programming), GFP_KERNEL);
if (!((*dml_ctx)->v21.mode_programming.programming))
return false;
@@ -75,7 +75,6 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, co
{
switch (in_dc->ctx->dce_version) {
case DCN_VERSION_4_01:
- case DCN_VERSION_3_2: // TODO : Temporary for N-1 validation. Remove this after N-1 validation phase is complete.
(*dml_ctx)->v21.dml_init.options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
break;
default:
@@ -233,13 +232,6 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
dml21_calculate_rq_and_dlg_params(in_dc, context, &context->res_ctx, dml_ctx, in_dc->res_pool->pipe_count);
dml21_copy_clocks_to_dc_state(dml_ctx, context);
dml21_extract_watermark_sets(in_dc, &context->bw_ctx.bw.dcn.watermarks, dml_ctx);
- if (in_dc->ctx->dce_version == DCN_VERSION_3_2) {
- dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.a, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
- dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.b, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
- dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.c, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
- dml21_extract_legacy_watermark_set(in_dc, &context->bw_ctx.bw.dcn.watermarks.d, DML2_DCHUB_WATERMARK_SET_A, dml_ctx);
- }
-
dml21_build_fams2_programming(in_dc, context, dml_ctx);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h
deleted file mode 100644
index d82c681a5402..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn3_soc_bb.h
+++ /dev/null
@@ -1,401 +0,0 @@
-/*
- * Copyright 2022 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef __DML_DML_DCN3_SOC_BB__
-#define __DML_DML_DCN3_SOC_BB__
-
-#include "dml_top_soc_parameter_types.h"
-
-static const struct dml2_soc_qos_parameters dml_dcn31_soc_qos_params = {
- .derate_table = {
- .system_active_urgent = {
- .dram_derate_percent_pixel = 22,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 76,
- .dcfclk_derate_percent = 100,
- },
- .system_active_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 75,
- },
- .dcn_mall_prefetch_urgent = {
- .dram_derate_percent_pixel = 22,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 76,
- .dcfclk_derate_percent = 100,
- },
- .dcn_mall_prefetch_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 75,
- },
- .system_idle_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 100,
- },
- },
- .writeback = {
- .base_latency_us = 12,
- .scaling_factor_us = 0,
- .scaling_factor_mhz = 0,
- },
- .qos_params = {
- .dcn4x = {
- .df_qos_response_time_fclk_cycles = 300,
- .max_round_trip_to_furthest_cs_fclk_cycles = 350,
- .mall_overhead_fclk_cycles = 50,
- .meta_trip_adder_fclk_cycles = 36,
- .average_transport_distance_fclk_cycles = 257,
- .umc_urgent_ramp_latency_margin = 50,
- .umc_max_latency_margin = 30,
- .umc_average_latency_margin = 20,
- .fabric_max_transport_latency_margin = 20,
- .fabric_average_transport_latency_margin = 10,
-
- .per_uclk_dpm_params = {
- {
- .minimum_uclk_khz = 97,
- .urgent_ramp_uclk_cycles = 472,
- .trip_to_memory_uclk_cycles = 827,
- .meta_trip_to_memory_uclk_cycles = 827,
- .maximum_latency_when_urgent_uclk_cycles = 72,
- .average_latency_when_urgent_uclk_cycles = 61,
- .maximum_latency_when_non_urgent_uclk_cycles = 827,
- .average_latency_when_non_urgent_uclk_cycles = 118,
- },
- {
- .minimum_uclk_khz = 435,
- .urgent_ramp_uclk_cycles = 546,
- .trip_to_memory_uclk_cycles = 848,
- .meta_trip_to_memory_uclk_cycles = 848,
- .maximum_latency_when_urgent_uclk_cycles = 146,
- .average_latency_when_urgent_uclk_cycles = 90,
- .maximum_latency_when_non_urgent_uclk_cycles = 848,
- .average_latency_when_non_urgent_uclk_cycles = 135,
- },
- {
- .minimum_uclk_khz = 731,
- .urgent_ramp_uclk_cycles = 632,
- .trip_to_memory_uclk_cycles = 874,
- .meta_trip_to_memory_uclk_cycles = 874,
- .maximum_latency_when_urgent_uclk_cycles = 232,
- .average_latency_when_urgent_uclk_cycles = 124,
- .maximum_latency_when_non_urgent_uclk_cycles = 874,
- .average_latency_when_non_urgent_uclk_cycles = 155,
- },
- {
- .minimum_uclk_khz = 1187,
- .urgent_ramp_uclk_cycles = 716,
- .trip_to_memory_uclk_cycles = 902,
- .meta_trip_to_memory_uclk_cycles = 902,
- .maximum_latency_when_urgent_uclk_cycles = 316,
- .average_latency_when_urgent_uclk_cycles = 160,
- .maximum_latency_when_non_urgent_uclk_cycles = 902,
- .average_latency_when_non_urgent_uclk_cycles = 177,
- },
- },
- },
- },
- .qos_type = dml2_qos_param_type_dcn4x,
-};
-
-static const struct dml2_soc_bb dml2_socbb_dcn31 = {
- .clk_table = {
- .uclk = {
- .clk_values_khz = {97000, 435000, 731000, 1187000},
- .num_clk_values = 4,
- },
- .fclk = {
- .clk_values_khz = {300000, 2500000},
- .num_clk_values = 2,
- },
- .dcfclk = {
- .clk_values_khz = {200000, 1800000},
- .num_clk_values = 2,
- },
- .dispclk = {
- .clk_values_khz = {100000, 2000000},
- .num_clk_values = 2,
- },
- .dppclk = {
- .clk_values_khz = {100000, 2000000},
- .num_clk_values = 2,
- },
- .dtbclk = {
- .clk_values_khz = {100000, 2000000},
- .num_clk_values = 2,
- },
- .phyclk = {
- .clk_values_khz = {810000, 810000},
- .num_clk_values = 2,
- },
- .socclk = {
- .clk_values_khz = {300000, 1600000},
- .num_clk_values = 2,
- },
- .dscclk = {
- .clk_values_khz = {666667, 666667},
- .num_clk_values = 2,
- },
- .phyclk_d18 = {
- .clk_values_khz = {625000, 625000},
- .num_clk_values = 2,
- },
- .phyclk_d32 = {
- .clk_values_khz = {2000000, 2000000},
- .num_clk_values = 2,
- },
- .dram_config = {
- .channel_width_bytes = 2,
- .channel_count = 16,
- .transactions_per_clock = 16,
- },
- },
-
- .qos_parameters = {
- .derate_table = {
- .system_active_urgent = {
- .dram_derate_percent_pixel = 22,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 76,
- .dcfclk_derate_percent = 100,
- },
- .system_active_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 75,
- },
- .dcn_mall_prefetch_urgent = {
- .dram_derate_percent_pixel = 22,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 76,
- .dcfclk_derate_percent = 100,
- },
- .dcn_mall_prefetch_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 75,
- },
- .system_idle_average = {
- .dram_derate_percent_pixel = 17,
- .dram_derate_percent_vm = 0,
- .dram_derate_percent_pixel_and_vm = 0,
- .fclk_derate_percent = 57,
- .dcfclk_derate_percent = 100,
- },
- },
- .writeback = {
- .base_latency_us = 0,
- .scaling_factor_us = 0,
- .scaling_factor_mhz = 0,
- },
- .qos_params = {
- .dcn4x = {
- .df_qos_response_time_fclk_cycles = 300,
- .max_round_trip_to_furthest_cs_fclk_cycles = 350,
- .mall_overhead_fclk_cycles = 50,
- .meta_trip_adder_fclk_cycles = 36,
- .average_transport_distance_fclk_cycles = 260,
- .umc_urgent_ramp_latency_margin = 50,
- .umc_max_latency_margin = 30,
- .umc_average_latency_margin = 20,
- .fabric_max_transport_latency_margin = 20,
- .fabric_average_transport_latency_margin = 10,
-
- .per_uclk_dpm_params = {
- {
- // State 1
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 472,
- .trip_to_memory_uclk_cycles = 827,
- .meta_trip_to_memory_uclk_cycles = 827,
- .maximum_latency_when_urgent_uclk_cycles = 72,
- .average_latency_when_urgent_uclk_cycles = 72,
- .maximum_latency_when_non_urgent_uclk_cycles = 827,
- .average_latency_when_non_urgent_uclk_cycles = 117,
- },
- {
- // State 2
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 546,
- .trip_to_memory_uclk_cycles = 848,
- .meta_trip_to_memory_uclk_cycles = 848,
- .maximum_latency_when_urgent_uclk_cycles = 146,
- .average_latency_when_urgent_uclk_cycles = 146,
- .maximum_latency_when_non_urgent_uclk_cycles = 848,
- .average_latency_when_non_urgent_uclk_cycles = 133,
- },
- {
- // State 3
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 564,
- .trip_to_memory_uclk_cycles = 853,
- .meta_trip_to_memory_uclk_cycles = 853,
- .maximum_latency_when_urgent_uclk_cycles = 164,
- .average_latency_when_urgent_uclk_cycles = 164,
- .maximum_latency_when_non_urgent_uclk_cycles = 853,
- .average_latency_when_non_urgent_uclk_cycles = 136,
- },
- {
- // State 4
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 613,
- .trip_to_memory_uclk_cycles = 869,
- .meta_trip_to_memory_uclk_cycles = 869,
- .maximum_latency_when_urgent_uclk_cycles = 213,
- .average_latency_when_urgent_uclk_cycles = 213,
- .maximum_latency_when_non_urgent_uclk_cycles = 869,
- .average_latency_when_non_urgent_uclk_cycles = 149,
- },
- {
- // State 5
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 632,
- .trip_to_memory_uclk_cycles = 874,
- .meta_trip_to_memory_uclk_cycles = 874,
- .maximum_latency_when_urgent_uclk_cycles = 232,
- .average_latency_when_urgent_uclk_cycles = 232,
- .maximum_latency_when_non_urgent_uclk_cycles = 874,
- .average_latency_when_non_urgent_uclk_cycles = 153,
- },
- {
- // State 6
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 665,
- .trip_to_memory_uclk_cycles = 885,
- .meta_trip_to_memory_uclk_cycles = 885,
- .maximum_latency_when_urgent_uclk_cycles = 265,
- .average_latency_when_urgent_uclk_cycles = 265,
- .maximum_latency_when_non_urgent_uclk_cycles = 885,
- .average_latency_when_non_urgent_uclk_cycles = 161,
- },
- {
- // State 7
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 689,
- .trip_to_memory_uclk_cycles = 895,
- .meta_trip_to_memory_uclk_cycles = 895,
- .maximum_latency_when_urgent_uclk_cycles = 289,
- .average_latency_when_urgent_uclk_cycles = 289,
- .maximum_latency_when_non_urgent_uclk_cycles = 895,
- .average_latency_when_non_urgent_uclk_cycles = 167,
- },
- {
- // State 8
- .minimum_uclk_khz = 0,
- .urgent_ramp_uclk_cycles = 716,
- .trip_to_memory_uclk_cycles = 902,
- .meta_trip_to_memory_uclk_cycles = 902,
- .maximum_latency_when_urgent_uclk_cycles = 316,
- .average_latency_when_urgent_uclk_cycles = 316,
- .maximum_latency_when_non_urgent_uclk_cycles = 902,
- .average_latency_when_non_urgent_uclk_cycles = 174,
- },
- },
- },
- },
- .qos_type = dml2_qos_param_type_dcn4x,
- },
-
- .power_management_parameters = {
- .dram_clk_change_blackout_us = 400,
- .fclk_change_blackout_us = 0,
- .g7_ppt_blackout_us = 0,
- .stutter_enter_plus_exit_latency_us = 50,
- .stutter_exit_latency_us = 43,
- .z8_stutter_enter_plus_exit_latency_us = 0,
- .z8_stutter_exit_latency_us = 0,
- },
-
- .vmin_limit = {
- .dispclk_khz = 600 * 1000,
- },
-
- .dprefclk_mhz = 700,
- .xtalclk_mhz = 100,
- .pcie_refclk_mhz = 100,
- .dchub_refclk_mhz = 50,
- .mall_allocated_for_dcn_mbytes = 64,
- .max_outstanding_reqs = 512,
- .fabric_datapath_to_dcn_data_return_bytes = 64,
- .return_bus_width_bytes = 64,
- .hostvm_min_page_size_kbytes = 0,
- .gpuvm_min_page_size_kbytes = 256,
- .phy_downspread_percent = 0,
- .dcn_downspread_percent = 0,
- .dispclk_dppclk_vco_speed_mhz = 4500,
- .do_urgent_latency_adjustment = 0,
- .mem_word_bytes = 32,
- .num_dcc_mcaches = 8,
- .mcache_size_bytes = 2048,
- .mcache_line_size_bytes = 32,
- .max_fclk_for_uclk_dpm_khz = 1250 * 1000,
-};
-
-static const struct dml2_ip_capabilities dml2_dcn31_max_ip_caps = {
- .pipe_count = 4,
- .otg_count = 4,
- .num_dsc = 4,
- .max_num_dp2p0_streams = 4,
- .max_num_hdmi_frl_outputs = 1,
- .max_num_dp2p0_outputs = 4,
- .rob_buffer_size_kbytes = 192,
- .config_return_buffer_size_in_kbytes = 1152,
- .meta_fifo_size_in_kentries = 22,
- .compressed_buffer_segment_size_in_kbytes = 64,
- .subvp_drr_scheduling_margin_us = 100,
- .subvp_prefetch_end_to_mall_start_us = 15,
- .subvp_fw_processing_delay = 15,
-
- .fams2 = {
- .max_allow_delay_us = 100 * 1000,
- .scheduling_delay_us = 50,
- .vertical_interrupt_ack_delay_us = 18,
- .allow_programming_delay_us = 18,
- .min_allow_width_us = 20,
- .subvp_df_throttle_delay_us = 100,
- .subvp_programming_delay_us = 18,
- .subvp_prefetch_to_mall_delay_us = 18,
- .drr_programming_delay_us = 18,
- },
-};
-
-#endif /* __DML_DML_DCN3_SOC_BB__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
index 8ef7977841de..793e1c038efd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
@@ -344,6 +344,7 @@ static const struct dml2_ip_capabilities dml2_dcn401_max_ip_caps = {
.config_return_buffer_segment_size_in_kbytes = 64,
.meta_fifo_size_in_kentries = 22,
.compressed_buffer_segment_size_in_kbytes = 64,
+ .cursor_buffer_size = 24,
.max_flip_time_us = 80,
.max_flip_time_lines = 32,
.hostvm_mode = 0,
@@ -354,7 +355,7 @@ static const struct dml2_ip_capabilities dml2_dcn401_max_ip_caps = {
.fams2 = {
.max_allow_delay_us = 100 * 1000,
- .scheduling_delay_us = 125,
+ .scheduling_delay_us = 550,
.vertical_interrupt_ack_delay_us = 40,
.allow_programming_delay_us = 18,
.min_allow_width_us = 20,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
index 83fc15bf13cf..25b607e7b726 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
@@ -88,6 +88,7 @@ struct dml2_display_arb_regs {
uint32_t sdpif_request_rate_limit;
uint32_t allow_sdpif_rate_limit_when_cstate_req;
uint32_t dcfclk_deep_sleep_hysteresis;
+ uint32_t pstate_stall_threshold;
};
struct dml2_cursor_dlg_regs{
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
index b132f676a68d..5e1ab6d97640 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
@@ -10,9 +10,10 @@
#define DML2_MAX_PLANES 8
#define DML2_MAX_DCN_PIPES 8
#define DML2_MAX_MCACHES 8 // assume plane is going to be supported by a max of 8 mcaches
+#define DML2_MAX_WRITEBACK 3
enum dml2_swizzle_mode {
- dml2_sw_linear,
+ dml2_sw_linear, // SW_LINEAR accepts 256 byte aligned pitch and also 128 byte aligned pitch if DCC is not enabled
dml2_sw_256b_2d,
dml2_sw_4kb_2d,
dml2_sw_64kb_2d,
@@ -24,7 +25,8 @@ enum dml2_swizzle_mode {
dml2_gfx11_sw_64kb_d_x,
dml2_gfx11_sw_64kb_r_x,
dml2_gfx11_sw_256kb_d_x,
- dml2_gfx11_sw_256kb_r_x
+ dml2_gfx11_sw_256kb_r_x,
+
};
enum dml2_source_format_class {
@@ -38,7 +40,13 @@ enum dml2_source_format_class {
dml2_rgbe_alpha = 9,
dml2_rgbe = 10,
dml2_mono_8 = 11,
- dml2_mono_16 = 12
+ dml2_mono_16 = 12,
+ dml2_422_planar_8 = 13,
+ dml2_422_planar_10 = 14,
+ dml2_422_planar_12 = 15,
+ dml2_422_packed_8 = 16,
+ dml2_422_packed_10 = 17,
+ dml2_422_packed_12 = 18
};
enum dml2_rotation_angle {
@@ -121,15 +129,6 @@ enum dml2_dsc_enable_option {
dml2_dsc_enable_if_necessary = 2
};
-enum dml2_pstate_support_method {
- dml2_pstate_method_uninitialized,
- dml2_pstate_method_not_supported,
- dml2_pstate_method_vactive,
- dml2_pstate_method_vblank,
- dml2_pstate_method_svp,
- dml2_pstate_method_drr
-};
-
enum dml2_tdlut_addressing_mode {
dml2_tdlut_sw_linear = 0,
dml2_tdlut_simple_linear = 1
@@ -287,22 +286,23 @@ struct dml2_link_output_cfg {
bool validate_output; // Do not validate the link configuration for this display stream.
};
-struct dml2_writeback_cfg {
- bool enable;
+struct dml2_writeback_info {
enum dml2_source_format_class pixel_format;
- unsigned int active_writebacks_per_surface;
+ unsigned long input_width;
+ unsigned long input_height;
+ unsigned long output_width;
+ unsigned long output_height;
+ unsigned long v_taps;
+ unsigned long h_taps;
+ unsigned long v_taps_chroma;
+ unsigned long h_taps_chroma;
+ double h_ratio;
+ double v_ratio;
+};
- struct {
- bool enabled;
- unsigned long input_width;
- unsigned long input_height;
- unsigned long output_width;
- unsigned long output_height;
- unsigned long v_taps;
- unsigned long h_taps;
- double h_ratio;
- double v_ratio;
- } scaling_info;
+struct dml2_writeback_cfg {
+ unsigned int active_writebacks_per_stream;
+ struct dml2_writeback_info writeback_stream[DML2_MAX_WRITEBACK];
};
struct dml2_plane_parameters {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
index ebd8abe894a9..5f0bc42d1d2f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
@@ -167,11 +167,13 @@ struct dml2_ip_capabilities {
unsigned int max_num_dp2p0_streams;
unsigned int max_num_hdmi_frl_outputs;
unsigned int max_num_dp2p0_outputs;
+ unsigned int max_num_wb;
unsigned int rob_buffer_size_kbytes;
unsigned int config_return_buffer_size_in_kbytes;
unsigned int config_return_buffer_segment_size_in_kbytes;
unsigned int meta_fifo_size_in_kentries;
unsigned int compressed_buffer_segment_size_in_kbytes;
+ unsigned int cursor_buffer_size;
unsigned int max_flip_time_us;
unsigned int max_flip_time_lines;
unsigned int hostvm_mode;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
index eeb96c455658..d2d053f2354d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
@@ -26,20 +26,14 @@ enum dml2_project_id {
dml2_project_dcn4x_stage2_auto_drr_svp = 3,
};
-enum dml2_dram_clock_change_support {
- dml2_dram_clock_change_vactive = 0,
- dml2_dram_clock_change_vblank = 1,
- dml2_dram_clock_change_vblank_and_vactive = 2,
- dml2_dram_clock_change_drr = 3,
- dml2_dram_clock_change_mall_svp = 4,
- dml2_dram_clock_change_mall_full_frame = 6,
- dml2_dram_clock_change_unsupported = 7
-};
-
-enum dml2_fclock_change_support {
- dml2_fclock_change_vactive = 0,
- dml2_fclock_change_vblank = 1,
- dml2_fclock_change_unsupported = 2
+enum dml2_pstate_change_support {
+ dml2_pstate_change_vactive = 0,
+ dml2_pstate_change_vblank = 1,
+ dml2_pstate_change_vblank_and_vactive = 2,
+ dml2_pstate_change_drr = 3,
+ dml2_pstate_change_mall_svp = 4,
+ dml2_pstate_change_mall_full_frame = 6,
+ dml2_pstate_change_unsupported = 7
};
enum dml2_output_type_and_rate__type {
@@ -202,24 +196,23 @@ struct dml2_mcache_surface_allocation {
} informative;
};
-enum dml2_uclk_pstate_support_method {
- dml2_uclk_pstate_support_method_not_supported = 0,
- /* hw */
- dml2_uclk_pstate_support_method_vactive = 1,
- dml2_uclk_pstate_support_method_vblank = 2,
- dml2_uclk_pstate_support_method_reserved_hw = 5,
- /* fw */
- dml2_uclk_pstate_support_method_fw_subvp_phantom = 6,
- dml2_uclk_pstate_support_method_reserved_fw = 10,
- /* fw w/drr */
- dml2_uclk_pstate_support_method_fw_vactive_drr = 11,
- dml2_uclk_pstate_support_method_fw_vblank_drr = 12,
- dml2_uclk_pstate_support_method_fw_subvp_phantom_drr = 13,
- dml2_uclk_pstate_support_method_reserved_fw_drr_fixed = 20,
- dml2_uclk_pstate_support_method_fw_drr = 21,
- dml2_uclk_pstate_support_method_reserved_fw_drr_var = 22,
-
- dml2_uclk_pstate_support_method_count
+enum dml2_pstate_method {
+ dml2_pstate_method_na = 0,
+ /* hw exclusive modes */
+ dml2_pstate_method_vactive = 1,
+ dml2_pstate_method_vblank = 2,
+ dml2_pstate_method_reserved_hw = 5,
+ /* fw assisted exclusive modes */
+ dml2_pstate_method_fw_svp = 6,
+ dml2_pstate_method_reserved_fw = 10,
+ /* fw assisted modes requiring drr modulation */
+ dml2_pstate_method_fw_vactive_drr = 11,
+ dml2_pstate_method_fw_vblank_drr = 12,
+ dml2_pstate_method_fw_svp_drr = 13,
+ dml2_pstate_method_reserved_fw_drr_clamped = 20,
+ dml2_pstate_method_fw_drr = 21,
+ dml2_pstate_method_reserved_fw_drr_var = 22,
+ dml2_pstate_method_count
};
struct dml2_per_plane_programming {
@@ -241,7 +234,7 @@ struct dml2_per_plane_programming {
// If a stream is using odm split, then this value is always 1
unsigned int num_dpps_required;
- enum dml2_uclk_pstate_support_method uclk_pstate_support_method;
+ enum dml2_pstate_method uclk_pstate_support_method;
// MALL size requirements for MALL SS and SubVP
unsigned int surface_size_mall_bytes;
@@ -281,7 +274,7 @@ struct dml2_per_stream_programming {
unsigned int num_odms_required;
- enum dml2_uclk_pstate_support_method uclk_pstate_method;
+ enum dml2_pstate_method uclk_pstate_method;
struct {
bool enabled;
@@ -289,7 +282,8 @@ struct dml2_per_stream_programming {
union dml2_global_sync_programming global_sync;
} phantom_stream;
- struct dmub_fams2_stream_static_state fams2_params;
+ union dmub_cmd_fams2_config fams2_base_params;
+ union dmub_cmd_fams2_config fams2_sub_params;
};
//-----------------
@@ -339,7 +333,7 @@ struct dml2_mode_support_info {
bool DCCMetaBufferSizeNotExceeded;
bool TotalVerticalActiveBandwidthSupport;
bool VActiveBandwidthSupport;
- enum dml2_fclock_change_support FCLKChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES];
bool USRRetrainingSupport;
bool PrefetchSupported;
bool DynamicMetadataSupported;
@@ -361,6 +355,7 @@ struct dml2_mode_support_info {
unsigned int AlignedYPitch[DML2_MAX_PLANES];
unsigned int AlignedCPitch[DML2_MAX_PLANES];
bool g6_temp_read_support;
+ bool temp_read_or_ppt_support;
}; // dml2_mode_support_info
struct dml2_display_cfg_programming {
@@ -392,6 +387,11 @@ struct dml2_display_cfg_programming {
unsigned long fclk_khz;
unsigned long dcfclk_khz;
} svp_prefetch;
+ struct {
+ unsigned long uclk_khz;
+ unsigned long fclk_khz;
+ unsigned long dcfclk_khz;
+ } svp_prefetch_no_throttle;
unsigned long deepsleep_dcfclk_khz;
unsigned long dispclk_khz;
@@ -444,7 +444,7 @@ struct dml2_display_cfg_programming {
double pstate_change_us;
double fclk_pstate_change_us;
double usr_retraining_us;
- double g6_temp_read_watermark_us;
+ double temp_read_or_ppt_watermark_us;
} watermarks;
struct {
@@ -653,6 +653,7 @@ struct dml2_display_cfg_programming {
double DisplayPipeLineDeliveryTimeLumaPrefetch[DML2_MAX_PLANES];
double DisplayPipeLineDeliveryTimeChromaPrefetch[DML2_MAX_PLANES];
+ double WritebackRequiredBandwidth;
double WritebackAllowDRAMClockChangeEndPosition[DML2_MAX_PLANES];
double WritebackAllowFCLKChangeEndPosition[DML2_MAX_PLANES];
double DSCCLK_calculated[DML2_MAX_PLANES];
@@ -662,6 +663,7 @@ struct dml2_display_cfg_programming {
double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES];
unsigned int PrefetchMode[DML2_MAX_PLANES]; // LEGACY_ONLY
bool ROBUrgencyAvoidance;
+ double LowestPrefetchMargin;
} misc;
struct dml2_mode_support_info mode_support_info;
@@ -675,6 +677,7 @@ struct dml2_display_cfg_programming {
bool failed_mcache_validation;
bool failed_dpmm;
bool failed_mode_programming;
+ bool failed_map_watermarks;
} informative;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
index 0aa4e4d343b0..d68b4567e218 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -9,7 +9,7 @@
#include "dml2_debug.h"
#include "lib_float_math.h"
-static const struct dml2_core_ip_params core_dcn4_ip_caps_base = {
+struct dml2_core_ip_params core_dcn4_ip_caps_base = {
// Hardcoded values for DCN3x
.vblank_nom_default_us = 668,
.remote_iommu_outstanding_translations = 256,
@@ -90,6 +90,7 @@ static void patch_ip_caps_with_explicit_ip_params(struct dml2_ip_capabilities *i
ip_caps->config_return_buffer_segment_size_in_kbytes = ip_params->config_return_buffer_segment_size_in_kbytes;
ip_caps->meta_fifo_size_in_kentries = ip_params->meta_fifo_size_in_kentries;
ip_caps->compressed_buffer_segment_size_in_kbytes = ip_params->compressed_buffer_segment_size_in_kbytes;
+ ip_caps->cursor_buffer_size = ip_params->cursor_buffer_size;
ip_caps->max_flip_time_us = ip_params->max_flip_time_us;
ip_caps->max_flip_time_lines = ip_params->max_flip_time_lines;
ip_caps->hostvm_mode = ip_params->hostvm_mode;
@@ -114,6 +115,7 @@ static void patch_ip_params_with_ip_caps(struct dml2_core_ip_params *ip_params,
ip_params->config_return_buffer_segment_size_in_kbytes = ip_caps->config_return_buffer_segment_size_in_kbytes;
ip_params->meta_fifo_size_in_kentries = ip_caps->meta_fifo_size_in_kentries;
ip_params->compressed_buffer_segment_size_in_kbytes = ip_caps->compressed_buffer_segment_size_in_kbytes;
+ ip_params->cursor_buffer_size = ip_caps->cursor_buffer_size;
ip_params->max_flip_time_us = ip_caps->max_flip_time_us;
ip_params->max_flip_time_lines = ip_caps->max_flip_time_lines;
ip_params->hostvm_mode = ip_caps->hostvm_mode;
@@ -159,6 +161,7 @@ static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters
phantom->timing.v_total = meta->v_total;
phantom->timing.v_active = meta->v_active;
phantom->timing.v_front_porch = meta->v_front_porch;
+ phantom->timing.v_blank_end = phantom->timing.v_total - phantom->timing.v_front_porch - phantom->timing.v_active;
phantom->timing.vblank_nom = phantom->timing.v_total - phantom->timing.v_active;
phantom->timing.drr_config.enabled = false;
}
@@ -315,28 +318,9 @@ static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_in
// Setup the appropriate p-state strategy
if (display_cfg->stage3.performed && display_cfg->stage3.success) {
- switch (display_cfg->stage3.pstate_switch_modes[plane_index]) {
- case dml2_uclk_pstate_support_method_vactive:
- case dml2_uclk_pstate_support_method_vblank:
- case dml2_uclk_pstate_support_method_fw_subvp_phantom:
- case dml2_uclk_pstate_support_method_fw_drr:
- case dml2_uclk_pstate_support_method_fw_vactive_drr:
- case dml2_uclk_pstate_support_method_fw_vblank_drr:
- case dml2_uclk_pstate_support_method_fw_subvp_phantom_drr:
- programming->plane_programming[plane_index].uclk_pstate_support_method = display_cfg->stage3.pstate_switch_modes[plane_index];
- break;
- case dml2_uclk_pstate_support_method_reserved_hw:
- case dml2_uclk_pstate_support_method_reserved_fw:
- case dml2_uclk_pstate_support_method_reserved_fw_drr_fixed:
- case dml2_uclk_pstate_support_method_reserved_fw_drr_var:
- case dml2_uclk_pstate_support_method_not_supported:
- case dml2_uclk_pstate_support_method_count:
- default:
- programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_not_supported;
- break;
- }
+ programming->plane_programming[plane_index].uclk_pstate_support_method = display_cfg->stage3.pstate_switch_modes[plane_index];
} else {
- programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_not_supported;
+ programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_na;
}
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &programming->plane_programming[plane_index].surface_size_mall_bytes, dml_internal_pipe_index);
@@ -359,7 +343,8 @@ static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_in
/* unconditionally populate fams2 params */
dml2_core_calcs_get_stream_fams2_programming(&core->clean_me_up.mode_lib,
display_cfg,
- &programming->stream_programming[main_plane->stream_index].fams2_params,
+ &programming->stream_programming[main_plane->stream_index].fams2_base_params,
+ &programming->stream_programming[main_plane->stream_index].fams2_sub_params,
programming->stream_programming[main_plane->stream_index].uclk_pstate_method,
plane_index);
@@ -571,18 +556,18 @@ bool core_dcn4_mode_programming(struct dml2_core_mode_programming_in_out *in_out
in_out->programming->plane_programming[plane_index].num_dpps_required = core->clean_me_up.mode_lib.mp.NoOfDPP[plane_index];
if (in_out->programming->display_config.plane_descriptors[plane_index].overrides.legacy_svp_config == dml2_svp_mode_override_main_pipe)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_fw_svp;
else if (in_out->programming->display_config.plane_descriptors[plane_index].overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_fw_svp;
else if (in_out->programming->display_config.plane_descriptors[plane_index].overrides.legacy_svp_config == dml2_svp_mode_override_phantom_pipe_no_data_return)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_fw_subvp_phantom;
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_fw_svp;
else {
if (core->clean_me_up.mode_lib.mp.MaxActiveDRAMClockChangeLatencySupported[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vactive;
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_vactive;
else if (core->clean_me_up.mode_lib.mp.TWait[plane_index] >= core->clean_me_up.mode_lib.soc.power_management_parameters.dram_clk_change_blackout_us)
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_vblank;
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_vblank;
else
- in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_uclk_pstate_support_method_not_supported;
+ in_out->programming->plane_programming[plane_index].uclk_pstate_support_method = dml2_pstate_method_na;
}
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &in_out->programming->plane_programming[plane_index].surface_size_mall_bytes, dml_internal_pipe_index);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index 3ea54fd52e46..8ed49a9df378 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -11,6 +11,10 @@
#define DML2_MAX_FMT_420_BUFFER_WIDTH 4096
#define DML_MAX_NUM_OF_SLICES_PER_DSC 4
+#define DML_MAX_COMPRESSION_RATIO 4
+//#define DML_MODE_SUPPORT_USE_DPM_DRAM_BW
+//#define DML_GLOBAL_PREFETCH_CHECK
+#define ALLOW_SDPIF_RATE_LIMIT_PRE_CSTATE
const char *dml2_core_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type)
{
@@ -131,9 +135,9 @@ static void dml2_print_mode_support_info(const struct dml2_core_internal_mode_su
dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
if (!fail_only || support->VRatioInPrefetchSupported == 0)
dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
- if (!fail_only || support->PTEBufferSizeNotExceeded == 1)
+ if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
- if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 1)
+ if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 0)
dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
if (!fail_only || support->ExceededMALLSize == 1)
dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
@@ -314,12 +318,11 @@ dml_get_var_func(meta_trip_memory_us, double, mode_lib->mp.MetaTripToMemory);
dml_get_var_func(wm_fclk_change, double, mode_lib->mp.Watermark.FCLKChangeWatermark);
dml_get_var_func(wm_usr_retraining, double, mode_lib->mp.Watermark.USRRetrainingWatermark);
-dml_get_var_func(wm_g6_temp_read, double, mode_lib->mp.Watermark.g6_temp_read_watermark_us);
+dml_get_var_func(wm_temp_read_or_ppt, double, mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us);
dml_get_var_func(wm_dram_clock_change, double, mode_lib->mp.Watermark.DRAMClockChangeWatermark);
dml_get_var_func(fraction_of_urgent_bandwidth, double, mode_lib->mp.FractionOfUrgentBandwidth);
dml_get_var_func(fraction_of_urgent_bandwidth_imm_flip, double, mode_lib->mp.FractionOfUrgentBandwidthImmediateFlip);
dml_get_var_func(fraction_of_urgent_bandwidth_mall, double, mode_lib->mp.FractionOfUrgentBandwidthMALL);
-dml_get_var_func(urgent_latency, double, mode_lib->mp.UrgentLatency);
dml_get_var_func(wm_writeback_dram_clock_change, double, mode_lib->mp.Watermark.WritebackDRAMClockChangeWatermark);
dml_get_var_func(wm_writeback_fclk_change, double, mode_lib->mp.Watermark.WritebackFCLKChangeWatermark);
dml_get_var_func(stutter_efficiency, double, mode_lib->mp.StutterEfficiency);
@@ -354,7 +357,9 @@ dml_get_var_func(svp_prefetch_urg_bw_available_sdp, double, mode_lib->mp.urg_ban
dml_get_var_func(svp_prefetch_urg_bw_available_dram, double, mode_lib->mp.urg_bandwidth_available[dml2_core_internal_soc_state_svp_prefetch][dml2_core_internal_bw_dram]);
dml_get_var_func(svp_prefetch_urg_bw_available_dram_vm_only, double, mode_lib->mp.urg_bandwidth_available_vm_only[dml2_core_internal_soc_state_svp_prefetch]);
+dml_get_var_func(urgent_latency, double, mode_lib->mp.UrgentLatency);
dml_get_var_func(max_urgent_latency_us, double, mode_lib->ms.support.max_urgent_latency_us);
+dml_get_var_func(max_non_urgent_latency_us, double, mode_lib->ms.support.max_non_urgent_latency_us);
dml_get_var_func(avg_non_urgent_latency_us, double, mode_lib->ms.support.avg_non_urgent_latency_us);
dml_get_var_func(avg_urgent_latency_us, double, mode_lib->ms.support.avg_urgent_latency_us);
@@ -465,6 +470,24 @@ static bool dml_is_420(enum dml2_source_format_class source_format)
case dml2_420_12:
val = 1;
break;
+ case dml2_422_planar_8:
+ val = 0;
+ break;
+ case dml2_422_planar_10:
+ val = 0;
+ break;
+ case dml2_422_planar_12:
+ val = 0;
+ break;
+ case dml2_422_packed_8:
+ val = 0;
+ break;
+ case dml2_422_packed_10:
+ val = 0;
+ break;
+ case dml2_422_packed_12:
+ val = 0;
+ break;
case dml2_rgbe_alpha:
val = 0;
break;
@@ -486,32 +509,31 @@ static bool dml_is_420(enum dml2_source_format_class source_format)
static unsigned int dml_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode)
{
- switch (sw_mode) {
- case (dml2_sw_linear):
- return 256; break;
- case (dml2_sw_256b_2d):
- return 256; break;
- case (dml2_sw_4kb_2d):
- return 4096; break;
- case (dml2_sw_64kb_2d):
- return 65536; break;
- case (dml2_sw_256kb_2d):
- return 262144; break;
- case (dml2_gfx11_sw_linear):
- return 256; break;
- case (dml2_gfx11_sw_64kb_d):
- return 65536; break;
- case (dml2_gfx11_sw_64kb_d_t):
- return 65536; break;
- case (dml2_gfx11_sw_64kb_d_x):
- return 65536; break;
- case (dml2_gfx11_sw_64kb_r_x):
- return 65536; break;
- case (dml2_gfx11_sw_256kb_d_x):
- return 262144; break;
- case (dml2_gfx11_sw_256kb_r_x):
- return 262144; break;
- default:
+ if (sw_mode == dml2_sw_linear)
+ return 256;
+ else if (sw_mode == dml2_sw_256b_2d)
+ return 256;
+ else if (sw_mode == dml2_sw_4kb_2d)
+ return 4096;
+ else if (sw_mode == dml2_sw_64kb_2d)
+ return 65536;
+ else if (sw_mode == dml2_sw_256kb_2d)
+ return 262144;
+ else if (sw_mode == dml2_gfx11_sw_linear)
+ return 256;
+ else if (sw_mode == dml2_gfx11_sw_64kb_d)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_64kb_d_t)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_64kb_d_x)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_64kb_r_x)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_256kb_d_x)
+ return 262144;
+ else if (sw_mode == dml2_gfx11_sw_256kb_r_x)
+ return 262144;
+ else {
DML2_ASSERT(0);
return 256;
}
@@ -578,8 +600,8 @@ static void CalculateBytePerPixelAndBlockSizes(
{
*BytePerPixelDETY = 0;
*BytePerPixelDETC = 0;
- *BytePerPixelY = 0;
- *BytePerPixelC = 0;
+ *BytePerPixelY = 1;
+ *BytePerPixelC = 1;
if (SourcePixelFormat == dml2_444_64) {
*BytePerPixelDETY = 8;
@@ -819,7 +841,7 @@ static void CalculateSwathWidth(
// Output
unsigned int req_per_swath_ub_l[],
unsigned int req_per_swath_ub_c[],
- unsigned int SwathWidthSingleDPPY[],
+ unsigned int SwathWidthSingleDPPY[], // post-rotated plane width
unsigned int SwathWidthSingleDPPC[],
unsigned int SwathWidthY[], // per-pipe
unsigned int SwathWidthC[], // per-pipe
@@ -1402,7 +1424,6 @@ static unsigned int dscceComputeDelay(
// N422/N420 operate at 2 pixels per clock
unsigned int pixelsPerClock, padding_pixels, ssm_group_priming_delay, ssm_pipeline_delay, obsm_pipeline_delay, slice_padded_pixels, ixd_plus_padding, ixd_plus_padding_groups, cycles_per_group, group_delay, pipeline_delay, pixels, additional_group_delay, lines_to_reach_ixd, groups_to_reach_ixd, slice_width_groups, initial_xmit_delay, number_of_lines_to_reach_ixd, slice_width_modified;
-
if (pixelFormat == dml2_420)
pixelsPerClock = 2;
// #all other modes operate at 1 pixel per clock
@@ -1427,7 +1448,6 @@ static unsigned int dscceComputeDelay(
}
}
-
//sub-stream multiplexer balance fifo priming delay in groups as per dsc standard
if (bpc == 8)
ssm_group_priming_delay = 83;
@@ -1446,9 +1466,6 @@ static unsigned int dscceComputeDelay(
//determine number of padded pixels in the last group of a slice line, computed as
slice_padded_pixels = 3 * slice_width_groups - slice_width_modified;
-
-
-
//determine integer number of complete slice lines required to reach initial transmit delay without ssm delay considered
number_of_lines_to_reach_ixd = initial_xmit_delay / slice_width_modified;
@@ -1462,7 +1479,6 @@ static unsigned int dscceComputeDelay(
//number of groups required for a slice to reach initial transmit delay is the sum of the padded initial transmit delay plus the ssm group priming delay
groups_to_reach_ixd = ixd_plus_padding_groups + ssm_group_priming_delay;
-
//number of lines required to reach padded initial transmit delay in groups in slices to the left of the last horizontal slice
//needs to be rounded up as a complete slice lines are buffered prior to initial transmit delay being reached in the last horizontal slice
lines_to_reach_ixd = (groups_to_reach_ixd + slice_width_groups - 1) / slice_width_groups; //round up lines to reach ixd to next
@@ -1505,7 +1521,6 @@ static unsigned int dscceComputeDelay(
return pixels;
}
-
//updated in dcn4
static unsigned int dscComputeDelay(enum dml2_output_format_class pixelFormat, enum dml2_output_encoder_class Output)
{
@@ -2089,7 +2104,6 @@ static void CalculateDCCConfiguration(
yuv420 = 1;
else
yuv420 = 0;
-
horz_div_l = 1;
horz_div_c = 1;
vert_div_l = 1;
@@ -2560,8 +2574,7 @@ static void calculate_mcache_setting(
if (*p->num_mcaches_l) {
l->avg_mcache_element_size_l = l->meta_row_width_l / *p->num_mcaches_l;
}
-
- if (l->is_dual_plane && *p->num_mcaches_c) {
+ if (l->is_dual_plane) {
l->avg_mcache_element_size_c = l->meta_row_width_c / *p->num_mcaches_c;
if (!p->imall_enable || (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c)) {
@@ -2681,12 +2694,12 @@ static double dml_get_return_bandwidth_available(
bool is_avg_bw,
bool is_hvm_en,
bool is_hvm_only,
- double dcflk_mhz,
+ double dcfclk_mhz,
double fclk_mhz,
double dram_bw_mbps)
{
double return_bw_mbps = 0.;
- double ideal_sdp_bandwidth = (double)soc->return_bus_width_bytes * dcflk_mhz;
+ double ideal_sdp_bandwidth = (double)soc->return_bus_width_bytes * dcfclk_mhz;
double ideal_fabric_bandwidth = fclk_mhz * (double)soc->fabric_datapath_to_dcn_data_return_bytes;
double ideal_dram_bandwidth = dram_bw_mbps; //dram_speed_mts * soc->clk_table.dram_config.channel_count * soc->clk_table.dram_config.channel_width_bytes;
@@ -2752,7 +2765,7 @@ static double dml_get_return_bandwidth_available(
dml2_printf("DML::%s: is_hvm_only = %u\n", __func__, is_hvm_only);
dml2_printf("DML::%s: state_type = %s\n", __func__, dml2_core_internal_soc_state_type_str(state_type));
dml2_printf("DML::%s: bw_type = %s\n", __func__, dml2_core_internal_bw_type_str(bw_type));
- dml2_printf("DML::%s: dcflk_mhz = %f\n", __func__, dcflk_mhz);
+ dml2_printf("DML::%s: dcfclk_mhz = %f\n", __func__, dcfclk_mhz);
dml2_printf("DML::%s: fclk_mhz = %f\n", __func__, fclk_mhz);
dml2_printf("DML::%s: ideal_sdp_bandwidth = %f\n", __func__, ideal_sdp_bandwidth);
dml2_printf("DML::%s: ideal_fabric_bandwidth = %f\n", __func__, ideal_fabric_bandwidth);
@@ -2765,7 +2778,7 @@ static double dml_get_return_bandwidth_available(
return return_bw_mbps;
}
-static void calculate_bandwidth_available(
+static noinline_for_stack void calculate_bandwidth_available(
double avg_bandwidth_available_min[dml2_core_internal_soc_state_max],
double avg_bandwidth_available[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
double urg_bandwidth_available_min[dml2_core_internal_soc_state_max], // min between SDP and DRAM
@@ -3515,10 +3528,9 @@ static void CalculateUrgentBurstFactor(
dml2_printf("DML::%s: UrgentBurstFactorChroma = %f\n", __func__, *UrgentBurstFactorChroma);
dml2_printf("DML::%s: NotEnoughUrgentLatencyHiding = %d\n", __func__, *NotEnoughUrgentLatencyHiding);
#endif
-
}
-static void CalculateDCFCLKDeepSleep(
+static void CalculateDCFCLKDeepSleepTdlut(
const struct dml2_display_cfg *display_cfg,
unsigned int NumberOfActiveSurfaces,
unsigned int BytePerPixelY[],
@@ -3533,6 +3545,10 @@ static void CalculateDCFCLKDeepSleep(
double ReadBandwidthChroma[],
unsigned int ReturnBusWidth,
+ double dispclk,
+ unsigned int tdlut_bytes_to_deliver[],
+ double prefetch_swath_time_us[],
+
// Output
double *DCFClkDeepSleep)
{
@@ -3567,6 +3583,22 @@ static void CalculateDCFCLKDeepSleep(
}
DCFClkDeepSleepPerSurface[k] = math_max2(DCFClkDeepSleepPerSurface[k], pixel_rate_mhz / 16);
+ // adjust for 3dlut delivery time
+ if (display_cfg->plane_descriptors[k].tdlut.setup_for_tdlut && tdlut_bytes_to_deliver[k] > 0) {
+ double tdlut_required_deepsleep_dcfclk = (double) tdlut_bytes_to_deliver[k] / 64.0 / prefetch_swath_time_us[k];
+
+ dml2_printf("DML::%s: k=%d, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
+ dml2_printf("DML::%s: k=%d, tdlut_bytes_to_deliver = %d\n", __func__, k, tdlut_bytes_to_deliver[k]);
+ dml2_printf("DML::%s: k=%d, prefetch_swath_time_us = %f\n", __func__, k, prefetch_swath_time_us[k]);
+ dml2_printf("DML::%s: k=%d, tdlut_required_deepsleep_dcfclk = %f\n", __func__, k, tdlut_required_deepsleep_dcfclk);
+
+ // increase the deepsleep dcfclk to match the original dispclk throughput rate
+ if (tdlut_required_deepsleep_dcfclk > DCFClkDeepSleepPerSurface[k]) {
+ DCFClkDeepSleepPerSurface[k] = math_max2(DCFClkDeepSleepPerSurface[k], tdlut_required_deepsleep_dcfclk);
+ DCFClkDeepSleepPerSurface[k] = math_max2(DCFClkDeepSleepPerSurface[k], dispclk / 4.0);
+ }
+ }
+
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: k=%u, PixelClock = %f\n", __func__, k, pixel_rate_mhz);
dml2_printf("DML::%s: k=%u, DCFClkDeepSleepPerSurface = %f\n", __func__, k, DCFClkDeepSleepPerSurface[k]);
@@ -3589,9 +3621,56 @@ static void CalculateDCFCLKDeepSleep(
for (unsigned int k = 0; k < NumberOfActiveSurfaces; ++k) {
*DCFClkDeepSleep = math_max2(*DCFClkDeepSleep, DCFClkDeepSleepPerSurface[k]);
}
+
dml2_printf("DML::%s: DCFClkDeepSleep = %f (final)\n", __func__, *DCFClkDeepSleep);
}
+static noinline_for_stack void CalculateDCFCLKDeepSleep(
+ const struct dml2_display_cfg *display_cfg,
+ unsigned int NumberOfActiveSurfaces,
+ unsigned int BytePerPixelY[],
+ unsigned int BytePerPixelC[],
+ unsigned int SwathWidthY[],
+ unsigned int SwathWidthC[],
+ unsigned int DPPPerSurface[],
+ double PSCL_THROUGHPUT[],
+ double PSCL_THROUGHPUT_CHROMA[],
+ double Dppclk[],
+ double ReadBandwidthLuma[],
+ double ReadBandwidthChroma[],
+ unsigned int ReturnBusWidth,
+
+ // Output
+ double *DCFClkDeepSleep)
+{
+ double zero_double[DML2_MAX_PLANES];
+ unsigned int zero_integer[DML2_MAX_PLANES];
+
+ memset(zero_double, 0, DML2_MAX_PLANES * sizeof(double));
+ memset(zero_integer, 0, DML2_MAX_PLANES * sizeof(unsigned int));
+
+ CalculateDCFCLKDeepSleepTdlut(
+ display_cfg,
+ NumberOfActiveSurfaces,
+ BytePerPixelY,
+ BytePerPixelC,
+ SwathWidthY,
+ SwathWidthC,
+ DPPPerSurface,
+ PSCL_THROUGHPUT,
+ PSCL_THROUGHPUT_CHROMA,
+ Dppclk,
+ ReadBandwidthLuma,
+ ReadBandwidthChroma,
+ ReturnBusWidth,
+ 0,
+ zero_integer, //tdlut_bytes_to_deliver,
+ zero_double, //prefetch_swath_time_us,
+
+ // Output
+ DCFClkDeepSleep);
+}
+
static double CalculateWriteBackDelay(
enum dml2_source_format_class WritebackPixelFormat,
double WritebackHRatio,
@@ -3815,8 +3894,8 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
p->SwathHeightC[k] = MaximumSwathHeightC[k] / 2;
RoundedUpSwathSizeBytesY[k] = p->full_swath_bytes_l[k] / 2;
RoundedUpSwathSizeBytesC[k] = p->full_swath_bytes_c[k] / 2;
- p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
- p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;
+ p->request_size_bytes_luma[k] = ((p->BytePerPixY[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;;
+ p->request_size_bytes_chroma[k] = ((p->BytePerPixC[k] == 2) == dml_is_vertical_rotation(p->display_cfg->plane_descriptors[k].composition.rotation_angle)) ? 128 : 64;;
}
if (p->SwathHeightC[k] == 0)
@@ -3886,6 +3965,10 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
#endif
*p->hw_debug5 = false;
+#ifdef ALLOW_SDPIF_RATE_LIMIT_PRE_CSTATE
+ if (p->NumberOfActiveSurfaces > 1)
+ *p->hw_debug5 = true;
+#else
for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
if (!(p->mrq_present) && (!(*p->UnboundedRequestEnabled)) && (TotalActiveDPP == 1)
&& p->display_cfg->plane_descriptors[k].surface.dcc.enable
@@ -3901,6 +3984,7 @@ static void CalculateSwathAndDETConfiguration(struct dml2_core_internal_scratch
dml2_printf("DML::%s: k=%u hw_debug5 = %u\n", __func__, k, *p->hw_debug5);
#endif
}
+#endif
}
static enum dml2_odm_mode DecideODMMode(unsigned int HActive,
@@ -4058,7 +4142,7 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode,
return true;
}
-static void CalculateODMMode(
+static noinline_for_stack void CalculateODMMode(
unsigned int MaximumPixelsPerLinePerDSCUnit,
unsigned int HActive,
enum dml2_output_format_class OutFormat,
@@ -4155,7 +4239,7 @@ static void CalculateODMMode(
#endif
}
-static void CalculateOutputLink(
+static noinline_for_stack void CalculateOutputLink(
struct dml2_core_internal_scratch *s,
double PHYCLK,
double PHYCLKD18,
@@ -4586,6 +4670,7 @@ static void calculate_tdlut_setting(
*p->tdlut_groups_per_2row_ub = 0;
*p->tdlut_opt_time = 0;
*p->tdlut_drain_time = 0;
+ *p->tdlut_bytes_to_deliver = 0;
*p->tdlut_bytes_per_group = 0;
*p->tdlut_pte_bytes_per_frame = 0;
*p->tdlut_bytes_per_frame = 0;
@@ -4654,6 +4739,7 @@ static void calculate_tdlut_setting(
*p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2((double) *p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1);
*p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
*p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate;
+ *p->tdlut_bytes_to_deliver = (unsigned int) (p->cursor_buffer_size * 1024.0);
}
#ifdef __DML_VBA_DEBUG__
@@ -4674,6 +4760,7 @@ static void calculate_tdlut_setting(
dml2_printf("DML::%s: tdlut_delivery_cycles = %u\n", __func__, tdlut_delivery_cycles);
dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, *p->tdlut_opt_time);
dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, *p->tdlut_drain_time);
+ dml2_printf("DML::%s: tdlut_bytes_to_deliver = %d\n", __func__, *p->tdlut_bytes_to_deliver);
dml2_printf("DML::%s: tdlut_groups_per_2row_ub = %d\n", __func__, *p->tdlut_groups_per_2row_ub);
#endif
}
@@ -5063,20 +5150,18 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->trip_to_mem = 0.0;
*p->Tvm_trips = 0.0;
*p->Tr0_trips = 0.0;
- s->Tvm_no_trip_oto = 0.0;
- s->Tr0_no_trip_oto = 0.0;
s->Tvm_trips_rounded = 0.0;
s->Tr0_trips_rounded = 0.0;
s->max_Tsw = 0.0;
s->Lsw_oto = 0.0;
- s->Tpre_rounded = 0.0;
+ *p->Tpre_rounded = 0.0;
s->prefetch_bw_equ = 0.0;
s->Tvm_equ = 0.0;
s->Tr0_equ = 0.0;
s->Tdmbf = 0.0;
s->Tdmec = 0.0;
s->Tdmsks = 0.0;
- s->prefetch_sw_bytes = 0.0;
+ *p->prefetch_sw_bytes = 0.0;
s->prefetch_bw_pr = 0.0;
s->bytes_pp = 0.0;
s->dep_bytes = 0.0;
@@ -5201,6 +5286,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: setup_for_tdlut = %u\n", __func__, p->setup_for_tdlut);
dml2_printf("DML::%s: tdlut_opt_time = %f\n", __func__, p->tdlut_opt_time);
dml2_printf("DML::%s: tdlut_pte_bytes_per_frame = %u\n", __func__, p->tdlut_pte_bytes_per_frame);
+ dml2_printf("DML::%s: tdlut_drain_time = %f\n", __func__, p->tdlut_drain_time);
#endif
if (p->OutputFormat == dml2_420 || (p->myPipe->InterlaceEnable && p->myPipe->ProgressiveToInterlaceUnitInOPP))
@@ -5271,23 +5357,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->bytes_pp = p->myPipe->BytePerPixelY + p->myPipe->BytePerPixelC;
}
- s->prefetch_bw_pr = s->bytes_pp * p->myPipe->PixelClock / (double)p->myPipe->DPPPerSurface;
- if (p->myPipe->VRatio < 1.0)
- s->prefetch_bw_pr = p->myPipe->VRatio * s->prefetch_bw_pr;
- s->max_Tsw = (math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) * s->LineTime);
-
- s->prefetch_sw_bytes = p->PrefetchSourceLinesY * p->swath_width_luma_ub * p->myPipe->BytePerPixelY + p->PrefetchSourceLinesC * p->swath_width_chroma_ub * p->myPipe->BytePerPixelC;
- s->prefetch_bw_pr = s->prefetch_bw_pr * p->mall_prefetch_sdp_overhead_factor;
- s->prefetch_sw_bytes = s->prefetch_sw_bytes * p->mall_prefetch_sdp_overhead_factor;
- s->prefetch_bw_oto = math_max2(s->prefetch_bw_pr, s->prefetch_sw_bytes / s->max_Tsw);
-
- s->min_Lsw_oto = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_OTO__;
- s->min_Lsw_oto = math_max2(s->min_Lsw_oto, 2.0);
- s->min_Lsw_oto = math_max2(s->min_Lsw_oto, p->tdlut_drain_time / s->LineTime);
-
- s->min_Lsw_equ = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_EQU__;
- s->min_Lsw_equ = math_max2(s->min_Lsw_equ, 2.0);
- s->min_Lsw_equ = math_max2(s->min_Lsw_equ, p->tdlut_drain_time / s->LineTime);
+ *p->prefetch_sw_bytes = p->PrefetchSourceLinesY * p->swath_width_luma_ub * p->myPipe->BytePerPixelY + p->PrefetchSourceLinesC * p->swath_width_chroma_ub * p->myPipe->BytePerPixelC;
+ *p->prefetch_sw_bytes = *p->prefetch_sw_bytes * p->mall_prefetch_sdp_overhead_factor;
vm_bytes = p->vm_bytes; // vm_bytes is dpde0_bytes_per_frame_ub_l + dpde0_bytes_per_frame_ub_c + 2*extra_dpde_bytes;
extra_tdpe_bytes = (unsigned int)math_max2(0, (p->display_cfg->gpuvm_max_page_table_levels - 1) * 128);
@@ -5296,57 +5367,103 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
vm_bytes = vm_bytes + p->tdlut_pte_bytes_per_frame + (p->display_cfg->gpuvm_enable ? extra_tdpe_bytes : 0);
tdlut_row_bytes = (unsigned long) math_ceil2(p->tdlut_bytes_per_frame/2.0, 1.0);
+
+ s->min_Lsw_oto = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_OTO__;
+ s->min_Lsw_oto = math_max2(s->min_Lsw_oto, p->tdlut_drain_time / s->LineTime);
+ s->min_Lsw_oto = math_max2(s->min_Lsw_oto, 2.0);
+
+ // use vactive swath bw for prefetch oto and also cap prefetch_bw_oto to max_vratio_oto
+ // Note: in prefetch calculation, acounting is done mostly per-pipe.
+ // vactive swath bw represents the per-surface (aka per dml plane) bw to move vratio_l/c lines of bytes_l/c per line time
+ s->per_pipe_vactive_sw_bw = p->vactive_sw_bw_l / (double)p->myPipe->DPPPerSurface;
+
+ // one-to-one prefetch bw as one line of bytes per line time (as per vratio_pre_l/c = 1)
+ s->prefetch_bw_oto = (p->swath_width_luma_ub * p->myPipe->BytePerPixelY) / s->LineTime;
+
+ if (p->myPipe->BytePerPixelC > 0) {
+ s->per_pipe_vactive_sw_bw += p->vactive_sw_bw_c / (double)p->myPipe->DPPPerSurface;
+ s->prefetch_bw_oto += (p->swath_width_chroma_ub * p->myPipe->BytePerPixelC) / s->LineTime;
+ }
+
+ s->prefetch_bw_oto = math_max2(s->per_pipe_vactive_sw_bw, s->prefetch_bw_oto) * p->mall_prefetch_sdp_overhead_factor;
+
+ s->prefetch_bw_oto = math_min2(s->prefetch_bw_oto, *p->prefetch_sw_bytes/(s->min_Lsw_oto*s->LineTime));
+
+ s->Lsw_oto = math_ceil2(4.0 * *p->prefetch_sw_bytes / s->prefetch_bw_oto / s->LineTime, 1.0) / 4.0;
+
s->prefetch_bw_oto = math_max3(s->prefetch_bw_oto,
p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
- s->Lsw_oto = math_ceil2(4.0 * math_max2(s->prefetch_sw_bytes / s->prefetch_bw_oto / s->LineTime, s->min_Lsw_oto), 1.0) / 4.0;
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l);
+ dml2_printf("DML::%s: vactive_sw_bw_c = %f\n", __func__, p->vactive_sw_bw_c);
+ dml2_printf("DML::%s: per_pipe_vactive_sw_bw = %f\n", __func__, s->per_pipe_vactive_sw_bw);
+#endif
if (p->display_cfg->gpuvm_enable == true) {
- s->Tvm_no_trip_oto = math_max2(
+ s->Tvm_oto = math_max3(
+ *p->Tvm_trips,
*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto,
s->LineTime / 4.0);
- s->Tvm_oto = math_max2(
- *p->Tvm_trips,
- s->Tvm_no_trip_oto);
+
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips);
dml2_printf("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto);
dml2_printf("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0);
#endif
} else {
- s->Tvm_no_trip_oto = s->Tvm_trips_rounded;
s->Tvm_oto = s->Tvm_trips_rounded;
}
if ((p->display_cfg->gpuvm_enable == true || p->setup_for_tdlut || dcc_mrq_enable)) {
- s->Tr0_no_trip_oto = math_max2(
+ s->Tr0_oto = math_max3(
+ *p->Tr0_trips,
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto,
s->LineTime / 4.0);
- s->Tr0_oto = math_max2(
- *p->Tr0_trips,
- s->Tr0_no_trip_oto);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips);
dml2_printf("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto);
dml2_printf("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4);
#endif
- } else {
- s->Tr0_no_trip_oto = (s->LineTime - s->Tvm_oto) / 4.0;
- s->Tr0_oto = s->Tr0_no_trip_oto;
- }
+ } else
+ s->Tr0_oto = s->LineTime / 4.0;
s->Tvm_oto_lines = math_ceil2(4.0 * s->Tvm_oto / s->LineTime, 1) / 4.0;
s->Tr0_oto_lines = math_ceil2(4.0 * s->Tr0_oto / s->LineTime, 1) / 4.0;
s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto;
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ dml2_printf("DML::%s: impacted_Tpre = %f\n", __func__, p->impacted_dst_y_pre);
+ if (p->impacted_dst_y_pre > 0) {
+ dml2_printf("DML::%s: dst_y_prefetch_oto = %f\n", __func__, s->dst_y_prefetch_oto);
+ s->dst_y_prefetch_oto = math_max2(s->dst_y_prefetch_oto, p->impacted_dst_y_pre);
+ dml2_printf("DML::%s: dst_y_prefetch_oto = %f (impacted)\n", __func__, s->dst_y_prefetch_oto);
+ }
+#endif
+ *p->Tpre_oto = s->dst_y_prefetch_oto * s->LineTime;
+
//To (time for delay after scaler) in line time
Lo = (unsigned int)(*p->DSTYAfterScaler + (double)*p->DSTXAfterScaler / (double)p->myPipe->HTotal);
+ s->min_Lsw_equ = math_max2(p->PrefetchSourceLinesY, p->PrefetchSourceLinesC) / __DML2_CALCS_MAX_VRATIO_PRE_EQU__;
+ s->min_Lsw_equ = math_max2(s->min_Lsw_equ, p->tdlut_drain_time / s->LineTime);
+ s->min_Lsw_equ = math_max2(s->min_Lsw_equ, 2.0);
//Tpre_equ in line time
if (p->DynamicMetadataVMEnabled && p->DynamicMetadataEnable)
s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + math_max2(p->TCalc, *p->Tvm_trips) + s->TWait_p) / s->LineTime - Lo;
else
s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + math_max2(p->TCalc, p->ExtraLatencyPrefetch) + s->TWait_p) / s->LineTime - Lo;
+
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ s->dst_y_prefetch_equ_impacted = math_max2(p->impacted_dst_y_pre, s->dst_y_prefetch_equ);
+
+ s->dst_y_prefetch_equ_impacted = math_min2(s->dst_y_prefetch_equ_impacted, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
+
+ if (s->dst_y_prefetch_equ_impacted > s->dst_y_prefetch_equ)
+ s->dst_y_prefetch_equ -= s->dst_y_prefetch_equ_impacted - s->dst_y_prefetch_equ;
+#endif
+
s->dst_y_prefetch_equ = math_min2(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
#ifdef __DML_VBA_DEBUG__
@@ -5364,7 +5481,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: BytePerPixelC = %u\n", __func__, p->myPipe->BytePerPixelC);
dml2_printf("DML::%s: PrefetchSourceLinesC = %f\n", __func__, p->PrefetchSourceLinesC);
dml2_printf("DML::%s: swath_width_chroma_ub = %u\n", __func__, p->swath_width_chroma_ub);
- dml2_printf("DML::%s: prefetch_sw_bytes = %f\n", __func__, s->prefetch_sw_bytes);
+ dml2_printf("DML::%s: prefetch_sw_bytes = %f\n", __func__, *p->prefetch_sw_bytes);
dml2_printf("DML::%s: max_Tsw = %f\n", __func__, s->max_Tsw);
dml2_printf("DML::%s: bytes_pp = %f\n", __func__, s->bytes_pp);
dml2_printf("DML::%s: vm_bytes = %u\n", __func__, vm_bytes);
@@ -5388,7 +5505,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
#endif
double Tpre = s->dst_y_prefetch_equ * s->LineTime;
s->dst_y_prefetch_equ = math_floor2(4.0 * (s->dst_y_prefetch_equ + 0.125), 1) / 4.0;
- s->Tpre_rounded = s->dst_y_prefetch_equ * s->LineTime;
+ *p->Tpre_rounded = s->dst_y_prefetch_equ * s->LineTime;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: dst_y_prefetch_equ: %f (after round)\n", __func__, s->dst_y_prefetch_equ);
@@ -5414,7 +5531,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: vm_bytes: %f (hvm inefficiency scaled)\n", __func__, vm_bytes*p->HostVMInefficiencyFactor);
dml2_printf("DML::%s: row_bytes: %f (hvm inefficiency scaled, 1 row)\n", __func__, p->PixelPTEBytesPerRow*p->HostVMInefficiencyFactor+p->meta_row_bytes+tdlut_row_bytes);
dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, s->Tpre_rounded, (s->Tpre_rounded - Tpre));
+ dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, *p->Tpre_rounded, (*p->Tpre_rounded - Tpre));
dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
#endif
@@ -5428,78 +5545,85 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
// Tpre_rounded is Tpre rounding to 2-bit fraction
// Tvm_trips_rounded is Tvm_trips ceiling to 1/4 line time
// Tr0_trips_rounded is Tr0_trips ceiling to 1/4 line time
- // So that means prefetch bw calculated can be higher since the total time availabe for prefetch is less
- bool min_Lsw_equ_ok = s->Tpre_rounded >= s->Tvm_trips_rounded + 2.0*s->Tr0_trips_rounded + s->min_Lsw_equ*s->LineTime;
+ // So that means prefetch bw calculated can be higher since the total time available for prefetch is less
+ bool min_Lsw_equ_ok = *p->Tpre_rounded >= s->Tvm_trips_rounded + 2.0*s->Tr0_trips_rounded + s->min_Lsw_equ*s->LineTime;
+ bool tpre_gt_req_latency = true;
+#if 0
+ // Check that Tpre_rounded is big enough if all of the stages of the prefetch are time constrained.
+ // The terms Tvm_trips_rounded and Tr0_trips_rounded represent the min time constraints for the VM and row stages.
+ // Normally, these terms cover the overall time constraint for Tpre >= (Tex + max{Ttrip, Turg}), but if these terms are at their minimum, an explicit check is necessary.
+ tpre_gt_req_latency = *p->Tpre_rounded > (math_max2(p->Turg, s->trip_to_mem) + p->ExtraLatencyPrefetch);
+#endif
- if (s->dst_y_prefetch_equ > 1 && min_Lsw_equ_ok) {
+ if (s->dst_y_prefetch_equ > 1 && min_Lsw_equ_ok && tpre_gt_req_latency) {
s->prefetch_bw1 = 0.;
s->prefetch_bw2 = 0.;
s->prefetch_bw3 = 0.;
s->prefetch_bw4 = 0.;
// prefetch_bw1: VM + 2*R0 + SW
- if (s->Tpre_rounded - *p->Tno_bw > 0) {
+ if (*p->Tpre_rounded - *p->Tno_bw > 0) {
s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor
+ 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)
- + s->prefetch_sw_bytes)
- / (s->Tpre_rounded - *p->Tno_bw);
- s->Tsw_est1 = s->prefetch_sw_bytes / s->prefetch_bw1;
+ + *p->prefetch_sw_bytes)
+ / (*p->Tpre_rounded - *p->Tno_bw);
+ s->Tsw_est1 = *p->prefetch_sw_bytes / s->prefetch_bw1;
} else
s->prefetch_bw1 = 0;
dml2_printf("DML::%s: prefetch_bw1: %f\n", __func__, s->prefetch_bw1);
- if ((s->Tsw_est1 < s->min_Lsw_equ * s->LineTime) && (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0)) {
+ if ((s->Tsw_est1 < s->min_Lsw_equ * s->LineTime) && (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw > 0)) {
s->prefetch_bw1 = (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) /
- (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw);
+ (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: vm and 2 rows bytes = %f\n", __func__, (vm_bytes * p->HostVMInefficiencyFactor + 2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)));
- dml2_printf("DML::%s: Tpre_rounded = %f\n", __func__, s->Tpre_rounded);
+ dml2_printf("DML::%s: Tpre_rounded = %f\n", __func__, *p->Tpre_rounded);
dml2_printf("DML::%s: minus term = %f\n", __func__, s->min_Lsw_equ * s->LineTime + 0.75 * s->LineTime + *p->Tno_bw);
dml2_printf("DML::%s: min_Lsw_equ = %f\n", __func__, s->min_Lsw_equ);
dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
dml2_printf("DML::%s: Tno_bw = %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
+ dml2_printf("DML::%s: Time to fetch vm and 2 rows = %f\n", __func__, (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.75 * s->LineTime - *p->Tno_bw));
dml2_printf("DML::%s: prefetch_bw1: %f (updated)\n", __func__, s->prefetch_bw1);
#endif
}
// prefetch_bw2: VM + SW
- if (s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded > 0) {
- s->prefetch_bw2 = (vm_bytes * p->HostVMInefficiencyFactor + s->prefetch_sw_bytes) /
- (s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded);
- s->Tsw_est2 = s->prefetch_sw_bytes / s->prefetch_bw2;
+ if (*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded > 0) {
+ s->prefetch_bw2 = (vm_bytes * p->HostVMInefficiencyFactor + *p->prefetch_sw_bytes) /
+ (*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded);
+ s->Tsw_est2 = *p->prefetch_sw_bytes / s->prefetch_bw2;
} else
s->prefetch_bw2 = 0;
dml2_printf("DML::%s: prefetch_bw2: %f\n", __func__, s->prefetch_bw2);
- if ((s->Tsw_est2 < s->min_Lsw_equ * s->LineTime) && ((s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime) > 0)) {
- s->prefetch_bw2 = vm_bytes * p->HostVMInefficiencyFactor / (s->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime);
+ if ((s->Tsw_est2 < s->min_Lsw_equ * s->LineTime) && ((*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime) > 0)) {
+ s->prefetch_bw2 = vm_bytes * p->HostVMInefficiencyFactor / (*p->Tpre_rounded - *p->Tno_bw - 2.0 * s->Tr0_trips_rounded - s->min_Lsw_equ * s->LineTime - 0.25 * s->LineTime);
dml2_printf("DML::%s: prefetch_bw2: %f (updated)\n", __func__, s->prefetch_bw2);
}
// prefetch_bw3: 2*R0 + SW
- if (s->Tpre_rounded - s->Tvm_trips_rounded > 0) {
- s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) + s->prefetch_sw_bytes) /
- (s->Tpre_rounded - s->Tvm_trips_rounded);
- s->Tsw_est3 = s->prefetch_sw_bytes / s->prefetch_bw3;
+ if (*p->Tpre_rounded - s->Tvm_trips_rounded > 0) {
+ s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) + *p->prefetch_sw_bytes) /
+ (*p->Tpre_rounded - s->Tvm_trips_rounded);
+ s->Tsw_est3 = *p->prefetch_sw_bytes / s->prefetch_bw3;
} else
s->prefetch_bw3 = 0;
dml2_printf("DML::%s: prefetch_bw3: %f\n", __func__, s->prefetch_bw3);
- if ((s->Tsw_est3 < s->min_Lsw_equ * s->LineTime) && ((s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded) > 0)) {
- s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (s->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded);
+ if ((s->Tsw_est3 < s->min_Lsw_equ * s->LineTime) && ((*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded) > 0)) {
+ s->prefetch_bw3 = (2 * (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes)) / (*p->Tpre_rounded - s->min_Lsw_equ * s->LineTime - 0.5 * s->LineTime - s->Tvm_trips_rounded);
dml2_printf("DML::%s: prefetch_bw3: %f (updated)\n", __func__, s->prefetch_bw3);
}
// prefetch_bw4: SW
- if (s->Tpre_rounded - s->Tvm_trips_rounded - 2 * s->Tr0_trips_rounded > 0)
- s->prefetch_bw4 = s->prefetch_sw_bytes / (s->Tpre_rounded - s->Tvm_trips_rounded - 2 * s->Tr0_trips_rounded);
+ if (*p->Tpre_rounded - s->Tvm_trips_rounded - 2 * s->Tr0_trips_rounded > 0)
+ s->prefetch_bw4 = *p->prefetch_sw_bytes / (*p->Tpre_rounded - s->Tvm_trips_rounded - 2 * s->Tr0_trips_rounded);
else
s->prefetch_bw4 = 0;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Tno_bw: %f\n", __func__, *p->Tno_bw);
- dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, s->Tpre_rounded, (s->Tpre_rounded - Tpre));
+ dml2_printf("DML::%s: Tpre=%f Tpre_rounded: %f, delta=%f\n", __func__, Tpre, *p->Tpre_rounded, (*p->Tpre_rounded - Tpre));
dml2_printf("DML::%s: Tvm_trips=%f Tvm_trips_rounded: %f, delta=%f\n", __func__, *p->Tvm_trips, s->Tvm_trips_rounded, (s->Tvm_trips_rounded - *p->Tvm_trips));
dml2_printf("DML::%s: Tr0_trips=%f Tr0_trips_rounded: %f, delta=%f\n", __func__, *p->Tr0_trips, s->Tr0_trips_rounded, (s->Tr0_trips_rounded - *p->Tr0_trips));
dml2_printf("DML::%s: Tsw_est1: %f\n", __func__, s->Tsw_est1);
@@ -5611,9 +5735,6 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ);
dml2_printf("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ);
#endif
- // Lsw = dst_y_prefetch - (dst_y_per_vm_vblank + 2*dst_y_per_row_vblank)
- s->Lsw_equ = s->dst_y_prefetch_equ - math_ceil2(4.0 * (s->Tvm_equ + 2 * s->Tr0_equ) / s->LineTime, 1.0) / 4.0;
-
// Use the more stressful prefetch schedule
if (s->dst_y_prefetch_oto < s->dst_y_prefetch_equ) {
*p->dst_y_prefetch = s->dst_y_prefetch_oto;
@@ -5622,31 +5743,33 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
*p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
- s->dst_y_per_vm_no_trip_vblank = math_ceil2(4.0 * s->Tvm_no_trip_oto / s->LineTime, 1.0) / 4.0;
- s->dst_y_per_row_no_trip_vblank = math_ceil2(4.0 * s->Tr0_no_trip_oto / s->LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Using oto scheduling for prefetch\n", __func__);
#endif
+
} else {
*p->dst_y_prefetch = s->dst_y_prefetch_equ;
+
+ if (s->dst_y_prefetch_equ < s->dst_y_prefetch_equ_impacted)
+ *p->dst_y_prefetch = s->dst_y_prefetch_equ_impacted;
+
s->TimeForFetchingVM = s->Tvm_equ;
s->TimeForFetchingRowInVBlank = s->Tr0_equ;
- *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
- s->dst_y_per_vm_no_trip_vblank = *p->dst_y_per_vm_vblank;
- s->dst_y_per_row_no_trip_vblank = *p->dst_y_per_row_vblank;
+ *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
+ *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
#endif
}
- /* take worst case Lsw to calculate bandwidth requirement regardless of schedule */
- s->LinesToRequestPrefetchPixelData = math_min2(s->Lsw_equ, s->Lsw_oto); // Lsw
+ // Lsw = dst_y_prefetch - (dst_y_per_vm_vblank + 2*dst_y_per_row_vblank)
+ s->LinesToRequestPrefetchPixelData = *p->dst_y_prefetch - *p->dst_y_per_vm_vblank - 2 * *p->dst_y_per_row_vblank; // Lsw
s->cursor_prefetch_bytes = (unsigned int)math_max2(p->cursor_bytes_per_chunk, 4 * p->cursor_bytes_per_line);
*p->prefetch_cursor_bw = p->num_cursors * s->cursor_prefetch_bytes / (s->LinesToRequestPrefetchPixelData * s->LineTime);
+ *p->prefetch_swath_time_us = (s->LinesToRequestPrefetchPixelData * s->LineTime);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: TimeForFetchingVM = %f\n", __func__, s->TimeForFetchingVM);
@@ -5657,6 +5780,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: dst_y_per_row_vblank = %f\n", __func__, *p->dst_y_per_row_vblank);
dml2_printf("DML::%s: LinesToRequestPrefetchPixelData = %f\n", __func__, s->LinesToRequestPrefetchPixelData);
dml2_printf("DML::%s: PrefetchSourceLinesY = %f\n", __func__, p->PrefetchSourceLinesY);
+ dml2_printf("DML::%s: prefetch_swath_time_us = %f\n", __func__, *p->prefetch_swath_time_us);
dml2_printf("DML::%s: cursor_bytes_per_chunk = %d\n", __func__, p->cursor_bytes_per_chunk);
dml2_printf("DML::%s: cursor_bytes_per_line = %d\n", __func__, p->cursor_bytes_per_line);
@@ -5743,8 +5867,10 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
} else {
dml2_printf("DML::%s: No time to prefetch! dst_y_prefetch_equ = %f (should be > 1)\n", __func__, s->dst_y_prefetch_equ);
- dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n",
- __func__, min_Lsw_equ_ok, s->Tpre_rounded, s->Tvm_trips_rounded, 2.0*s->Tr0_trips_rounded, s->min_Lsw_equ*s->LineTime);
+ dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded (%f) should be >= Tvm_trips_rounded (%f) + 2.0*Tr0_trips_rounded (%f) + min_Tsw_equ (%f)\n",
+ __func__, min_Lsw_equ_ok, *p->Tpre_rounded, s->Tvm_trips_rounded, 2.0*s->Tr0_trips_rounded, s->min_Lsw_equ*s->LineTime);
+ dml2_printf("DML::%s: No time to prefetch! min_Lsw_equ_ok = %d, Tpre_rounded+Tvm_trips_rounded+2.0*Tr0_trips_rounded+min_Tsw_equ (%f) should be > \n",
+ __func__, tpre_gt_req_latency, (s->min_Lsw_equ*s->LineTime + s->Tvm_trips_rounded + 2.0*s->Tr0_trips_rounded), p->Turg, s->trip_to_mem, p->ExtraLatencyPrefetch);
s->NoTimeToPrefetch = true;
s->TimeForFetchingVM = 0;
s->TimeForFetchingRowInVBlank = 0;
@@ -5763,13 +5889,13 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (vm_bytes == 0) {
prefetch_vm_bw = 0;
- } else if (s->dst_y_per_vm_no_trip_vblank > 0) {
+ } else if (*p->dst_y_per_vm_vblank > 0) {
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
#endif
- prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (s->dst_y_per_vm_no_trip_vblank * s->LineTime);
+ prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (*p->dst_y_per_vm_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
@@ -5781,8 +5907,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (p->PixelPTEBytesPerRow == 0 && tdlut_row_bytes == 0) {
prefetch_row_bw = 0;
- } else if (s->dst_y_per_row_no_trip_vblank > 0) {
- prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (s->dst_y_per_row_no_trip_vblank * s->LineTime);
+ } else if (*p->dst_y_per_row_vblank > 0) {
+ prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (*p->dst_y_per_row_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
@@ -5822,6 +5948,171 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
return s->NoTimeToPrefetch;
}
+static unsigned int get_num_lb_source_lines(unsigned int max_line_buffer_lines,
+ unsigned int line_buffer_size_bits,
+ unsigned int num_pipes,
+ unsigned int vp_width,
+ unsigned int vp_height,
+ double h_ratio,
+ enum dml2_rotation_angle rotation_angle)
+{
+ unsigned int num_lb_source_lines = 0;
+ double lb_bit_per_pixel = 57.0;
+ unsigned recin_width = vp_width/num_pipes;
+
+ if (dml_is_vertical_rotation(rotation_angle))
+ recin_width = vp_height/num_pipes;
+
+ num_lb_source_lines = (unsigned int) math_min2((double) max_line_buffer_lines,
+ math_floor2(line_buffer_size_bits / lb_bit_per_pixel / (recin_width / math_max2(h_ratio, 1.0)), 1.0));
+
+ return num_lb_source_lines;
+}
+
+static unsigned int find_max_impact_plane(unsigned int this_plane_idx, unsigned int num_planes, unsigned int Trpd_dcfclk_cycles[])
+{
+ int max_value = -1;
+ int max_idx = -1;
+ for (unsigned int i = 0; i < num_planes; i++) {
+ if (i != this_plane_idx && (int) Trpd_dcfclk_cycles[i] > max_value) {
+ max_value = Trpd_dcfclk_cycles[i];
+ max_idx = i;
+ }
+ }
+ if (max_idx <= 0) {
+ dml2_assert(max_idx >= 0);
+ max_idx = this_plane_idx;
+ }
+
+ return max_idx;
+}
+
+static double calculate_impacted_Tsw(unsigned int exclude_plane_idx, unsigned int num_planes, double *prefetch_swath_bytes, double bw_mbps)
+{
+ double sum = 0.;
+ for (unsigned int i = 0; i < num_planes; i++) {
+ if (i != exclude_plane_idx) {
+ sum += prefetch_swath_bytes[i];
+ }
+ }
+ return sum / bw_mbps;
+}
+
+// a global check against the aggregate effect of the per plane prefetch schedule
+static noinline_for_stack bool CheckGlobalPrefetchAdmissibility(struct dml2_core_internal_scratch *scratch,
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *p)
+{
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_locals *s = &scratch->CheckGlobalPrefetchAdmissibility_locals;
+ unsigned int i, k;
+
+ memset(s, 0, sizeof(struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_locals));
+
+ *p->recalc_prefetch_schedule = 0;
+ s->prefetch_global_check_passed = 1;
+ // worst case if the rob and cdb is fully hogged
+ s->max_Trpd_dcfclk_cycles = (unsigned int) math_ceil2((p->rob_buffer_size_kbytes*1024 + p->compressed_buffer_size_kbytes*DML_MAX_COMPRESSION_RATIO*1024)/64.0, 1.0);
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: num_active_planes = %d\n", __func__, p->num_active_planes);
+ dml2_printf("DML::%s: rob_buffer_size_kbytes = %d\n", __func__, p->rob_buffer_size_kbytes);
+ dml2_printf("DML::%s: compressed_buffer_size_kbytes = %d\n", __func__, p->compressed_buffer_size_kbytes);
+ dml2_printf("DML::%s: estimated_urg_bandwidth_required_mbps = %f\n", __func__, p->estimated_urg_bandwidth_required_mbps);
+ dml2_printf("DML::%s: estimated_dcfclk_mhz = %f\n", __func__, p->estimated_dcfclk_mhz);
+ dml2_printf("DML::%s: max_Trpd_dcfclk_cycles = %u\n", __func__, s->max_Trpd_dcfclk_cycles);
+#endif
+
+ // calculate the return impact from each plane, request is 256B per dcfclk
+ for (i = 0; i < p->num_active_planes; i++) {
+ s->src_detile_buf_size_bytes_l[i] = p->detile_buffer_size_bytes_l[i];
+ s->src_detile_buf_size_bytes_c[i] = p->detile_buffer_size_bytes_c[i];
+ s->src_swath_bytes_l[i] = p->full_swath_bytes_l[i];
+ s->src_swath_bytes_c[i] = p->full_swath_bytes_c[i];
+
+ if (p->pixel_format[i] == dml2_420_10) {
+ s->src_detile_buf_size_bytes_l[i] = (unsigned int) (s->src_detile_buf_size_bytes_l[i] * 1.5);
+ s->src_detile_buf_size_bytes_c[i] = (unsigned int) (s->src_detile_buf_size_bytes_c[i] * 1.5);
+ s->src_swath_bytes_l[i] = (unsigned int) (s->src_swath_bytes_l[i] * 1.5);
+ s->src_swath_bytes_c[i] = (unsigned int) (s->src_swath_bytes_c[i] * 1.5);
+ }
+
+ s->burst_bytes_to_fill_det = (unsigned int) (math_floor2(s->src_detile_buf_size_bytes_l[i] / p->chunk_bytes_l, 1) * p->chunk_bytes_l);
+ s->burst_bytes_to_fill_det += (unsigned int) (math_floor2(p->lb_source_lines_l[i] / p->swath_height_l[i], 1) * s->src_swath_bytes_l[i]);
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: i=%u pixel_format = %d\n", __func__, i, p->pixel_format[i]);
+ dml2_printf("DML::%s: i=%u chunk_bytes_l = %d\n", __func__, i, p->chunk_bytes_l);
+ dml2_printf("DML::%s: i=%u lb_source_lines_l = %d\n", __func__, i, p->lb_source_lines_l[i]);
+ dml2_printf("DML::%s: i=%u src_detile_buf_size_bytes_l=%d\n", __func__, i, s->src_detile_buf_size_bytes_l[i]);
+ dml2_printf("DML::%s: i=%u src_swath_bytes_l=%d\n", __func__, i, s->src_swath_bytes_l[i]);
+ dml2_printf("DML::%s: i=%u burst_bytes_to_fill_det=%d (luma)\n", __func__, i, s->burst_bytes_to_fill_det);
+#endif
+
+ if (s->src_swath_bytes_c[i] > 0) { // dual_plane
+ s->burst_bytes_to_fill_det += (unsigned int) (math_floor2(s->src_detile_buf_size_bytes_c[i] / p->chunk_bytes_c, 1) * p->chunk_bytes_c);
+
+ if (p->pixel_format[i] == dml2_422_planar_8 || p->pixel_format[i] == dml2_422_planar_10 || p->pixel_format[i] == dml2_422_planar_12) {
+ s->burst_bytes_to_fill_det += (unsigned int) (math_floor2(p->lb_source_lines_c[i] / p->swath_height_c[i], 1) * s->src_swath_bytes_c[i]);
+ }
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: i=%u chunk_bytes_c = %d\n", __func__, i, p->chunk_bytes_c);
+ dml2_printf("DML::%s: i=%u lb_source_lines_c = %d\n", __func__, i, p->lb_source_lines_c[i]);
+ dml2_printf("DML::%s: i=%u src_detile_buf_size_bytes_c=%d\n", __func__, i, s->src_detile_buf_size_bytes_c[i]);
+ dml2_printf("DML::%s: i=%u src_swath_bytes_c=%d\n", __func__, i, s->src_swath_bytes_c[i]);
+#endif
+ }
+
+ s->time_to_fill_det_us = (double) s->burst_bytes_to_fill_det / (256 * p->estimated_dcfclk_mhz); // fill time assume full burst at request rate
+ s->accumulated_return_path_dcfclk_cycles[i] = (unsigned int) math_ceil2(((DML_MAX_COMPRESSION_RATIO-1) * 64 * p->estimated_dcfclk_mhz) * s->time_to_fill_det_us / 64.0, 1.0); //for 64B per DCFClk
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: i=%u burst_bytes_to_fill_det=%d\n", __func__, i, s->burst_bytes_to_fill_det);
+ dml2_printf("DML::%s: i=%u time_to_fill_det_us=%f\n", __func__, i, s->time_to_fill_det_us);
+ dml2_printf("DML::%s: i=%u accumulated_return_path_dcfclk_cycles=%u\n", __func__, i, s->accumulated_return_path_dcfclk_cycles[i]);
+#endif
+ // clamping to worst case delay which is one which occupy the full rob+cdb
+ if (s->accumulated_return_path_dcfclk_cycles[i] > s->max_Trpd_dcfclk_cycles)
+ s->accumulated_return_path_dcfclk_cycles[i] = s->max_Trpd_dcfclk_cycles;
+ }
+
+ // Figure out the impacted prefetch time for each plane
+ // if impacted_Tre is > equ bw Tpre, we need to fail the prefetch schedule as we need a higher state to support the bw
+ for (i = 0; i < p->num_active_planes; i++) {
+ k = find_max_impact_plane(i, p->num_active_planes, s->accumulated_return_path_dcfclk_cycles); // plane k causes most impact to plane i
+ // the rest of planes (except for k) complete for bw
+ p->impacted_dst_y_pre[i] = s->accumulated_return_path_dcfclk_cycles[k]/p->estimated_dcfclk_mhz;
+ p->impacted_dst_y_pre[i] += calculate_impacted_Tsw(k, p->num_active_planes, p->prefetch_sw_bytes, p->estimated_urg_bandwidth_required_mbps);
+ p->impacted_dst_y_pre[i] = math_ceil2(p->impacted_dst_y_pre[i] / p->line_time[i], 0.25);
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: i=%u impacted_Tpre=%f (k=%u)\n", __func__, i, p->impacted_dst_y_pre[i], k);
+#endif
+ }
+
+ if (p->Tpre_rounded != NULL && p->Tpre_oto != NULL) {
+ for (i = 0; i < p->num_active_planes; i++) {
+ if (p->impacted_dst_y_pre[i] > p->dst_y_prefetch[i]) {
+ s->prefetch_global_check_passed = 0;
+ *p->recalc_prefetch_schedule = 1;
+ }
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: i=%u Tpre_rounded=%f\n", __func__, i, p->Tpre_rounded[i]);
+ dml2_printf("DML::%s: i=%u Tpre_oto=%f\n", __func__, i, p->Tpre_oto[i]);
+#endif
+ }
+ } else {
+ // likely a mode programming calls, assume support, and no recalc - not used anyways
+ s->prefetch_global_check_passed = 1;
+ *p->recalc_prefetch_schedule = 0;
+ }
+
+#ifdef __DML_VBA_DEBUG__
+ dml2_printf("DML::%s: prefetch_global_check_passed=%u\n", __func__, s->prefetch_global_check_passed);
+ dml2_printf("DML::%s: recalc_prefetch_schedule=%u\n", __func__, *p->recalc_prefetch_schedule);
+#endif
+
+ return s->prefetch_global_check_passed;
+}
+
static void calculate_peak_bandwidth_required(
struct dml2_core_internal_scratch *s,
struct dml2_core_calcs_calculate_peak_bandwidth_required_params *p)
@@ -6040,7 +6331,7 @@ static void check_urgent_bandwidth_support(
double *frac_urg_bandwidth_nom,
double *frac_urg_bandwidth_mall,
bool *vactive_bandwidth_support_ok, // vactive ok
- bool *bandwidth_support_ok, // max of vm, prefetch, vactive all ok
+ bool *bandwidth_support_ok,// max of vm, prefetch, vactive all ok
unsigned int mall_allocated_for_dcn_mbytes,
double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max],
@@ -6110,7 +6401,6 @@ static void check_urgent_bandwidth_support(
}
}
#endif
-
}
static double get_bandwidth_available_for_immediate_flip(enum dml2_core_internal_soc_state_type eval_state,
@@ -6432,7 +6722,7 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
p->Watermark->Z8StutterExitWatermark += p->mmSOCParameters.max_urgent_latency_us + p->mmSOCParameters.df_response_time_us;
p->Watermark->Z8StutterEnterPlusExitWatermark += p->mmSOCParameters.max_urgent_latency_us + p->mmSOCParameters.df_response_time_us;
}
- p->Watermark->g6_temp_read_watermark_us = p->mmSOCParameters.g6_temp_read_blackout_us + p->Watermark->UrgentWatermark;
+ p->Watermark->temp_read_or_ppt_watermark_us = p->mmSOCParameters.g6_temp_read_blackout_us + p->Watermark->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: UrgentLatency = %f\n", __func__, p->mmSOCParameters.UrgentLatency);
@@ -6448,12 +6738,12 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
dml2_printf("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->StutterEnterPlusExitWatermark);
dml2_printf("DML::%s: Z8StutterExitWatermark = %f\n", __func__, p->Watermark->Z8StutterExitWatermark);
dml2_printf("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n", __func__, p->Watermark->Z8StutterEnterPlusExitWatermark);
- dml2_printf("DML::%s: g6_temp_read_watermark_us = %f\n", __func__, p->Watermark->g6_temp_read_watermark_us);
+ dml2_printf("DML::%s: temp_read_or_ppt_watermark_us = %f\n", __func__, p->Watermark->temp_read_or_ppt_watermark_us);
#endif
s->TotalActiveWriteback = 0;
for (unsigned int k = 0; k < p->NumberOfActiveSurfaces; ++k) {
- if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
+ if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
s->TotalActiveWriteback = s->TotalActiveWriteback + 1;
}
}
@@ -6516,7 +6806,7 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
s->LBLatencyHidingSourceLinesC[k] = (unsigned int)(math_min2((double)p->MaxLineBufferLines, math_floor2((double)p->LineBufferSize / LBBitPerPixel / ((double)p->SwathWidthC[k] / math_max2(h_ratio_c, 1.0)), 1)) - (v_taps_c - 1));
#ifdef __DML_VBA_DEBUG__
- dml2_printf("DML::%s: k=%u, MaxLineBufferLines= %u\n", __func__, k, p->MaxLineBufferLines);
+ dml2_printf("DML::%s: k=%u, MaxLineBufferLines = %u\n", __func__, k, p->MaxLineBufferLines);
dml2_printf("DML::%s: k=%u, LineBufferSize = %u\n", __func__, k, p->LineBufferSize);
dml2_printf("DML::%s: k=%u, LBBitPerPixel = %u\n", __func__, k, LBBitPerPixel);
dml2_printf("DML::%s: k=%u, HRatio = %f\n", __func__, k, h_ratio);
@@ -6557,7 +6847,7 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
s->ActiveDRAMClockChangeLatencyMargin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->DRAMClockChangeWatermark;
s->ActiveFCLKChangeLatencyMargin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->FCLKChangeWatermark;
s->USRRetrainingLatencyMargin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->USRRetrainingWatermark;
- s->g6_temp_read_latency_margin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->g6_temp_read_watermark_us;
+ s->g6_temp_read_latency_margin[k] = s->ActiveClockChangeLatencyHiding - p->Watermark->temp_read_or_ppt_watermark_us;
if (p->VActiveLatencyHidingMargin)
p->VActiveLatencyHidingMargin[k] = s->ActiveDRAMClockChangeLatencyMargin[k];
@@ -6565,9 +6855,12 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
if (p->VActiveLatencyHidingUs)
p->VActiveLatencyHidingUs[k] = s->ActiveClockChangeLatencyHiding;
- if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.enable) {
- s->WritebackLatencyHiding = (double)p->WritebackInterfaceBufferSize * 1024.0 / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * (double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height * (double)h_total / pixel_clock_mhz) * 4.0);
- if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format == dml2_444_64) {
+ if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
+ s->WritebackLatencyHiding = (double)p->WritebackInterfaceBufferSize * 1024.0
+ / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_height
+ * (double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_width
+ / ((double)p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].input_height * (double)h_total / pixel_clock_mhz) * 4.0);
+ if (p->display_cfg->stream_descriptors[p->display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].pixel_format == dml2_444_64) {
s->WritebackLatencyHiding = s->WritebackLatencyHiding / 2;
}
s->WritebackDRAMClockChangeLatencyMargin = s->WritebackLatencyHiding - p->Watermark->WritebackDRAMClockChangeWatermark;
@@ -6582,36 +6875,36 @@ static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
uclk_pstate_change_strategy = p->display_cfg->plane_descriptors[k].overrides.uclk_pstate_change_strategy;
reserved_vblank_time_us = (double)p->display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns / 1000;
- p->FCLKChangeSupport[k] = dml2_fclock_change_unsupported;
+ p->FCLKChangeSupport[k] = dml2_pstate_change_unsupported;
if (s->ActiveFCLKChangeLatencyMargin[k] > 0)
- p->FCLKChangeSupport[k] = dml2_fclock_change_vactive;
+ p->FCLKChangeSupport[k] = dml2_pstate_change_vactive;
else if (reserved_vblank_time_us >= p->mmSOCParameters.FCLKChangeLatency)
- p->FCLKChangeSupport[k] = dml2_fclock_change_vblank;
+ p->FCLKChangeSupport[k] = dml2_pstate_change_vblank;
- if (p->FCLKChangeSupport[k] == dml2_fclock_change_unsupported)
+ if (p->FCLKChangeSupport[k] == dml2_pstate_change_unsupported)
*p->global_fclk_change_supported = false;
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_unsupported;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_unsupported;
if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_auto) {
if (p->display_cfg->overrides.all_streams_blanked ||
(s->ActiveDRAMClockChangeLatencyMargin[k] > 0 && reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency))
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vblank_and_vactive;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_vblank_and_vactive;
else if (s->ActiveDRAMClockChangeLatencyMargin[k] > 0)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vactive;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_vactive;
else if (reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vblank;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_vblank;
} else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_vactive && s->ActiveDRAMClockChangeLatencyMargin[k] > 0)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vactive;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_vactive;
else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_vblank && reserved_vblank_time_us >= p->mmSOCParameters.DRAMClockChangeLatency)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_vblank;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_vblank;
else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_drr)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_drr;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_drr;
else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_mall_svp)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_mall_svp;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_mall_svp;
else if (uclk_pstate_change_strategy == dml2_uclk_pstate_change_strategy_force_mall_full_frame)
- p->DRAMClockChangeSupport[k] = dml2_dram_clock_change_mall_full_frame;
+ p->DRAMClockChangeSupport[k] = dml2_pstate_change_mall_full_frame;
- if (p->DRAMClockChangeSupport[k] == dml2_dram_clock_change_unsupported)
+ if (p->DRAMClockChangeSupport[k] == dml2_pstate_change_unsupported)
*p->global_dram_clock_change_supported = false;
s->dst_y_pstate = (unsigned int)(math_ceil2((p->mmSOCParameters.DRAMClockChangeLatency + p->mmSOCParameters.UrgentLatency) / (h_total / pixel_clock_mhz), 1));
@@ -6719,7 +7012,7 @@ static void calculate_bytes_to_fetch_required_to_hide_latency(
}
}
-static void calculate_vactive_det_fill_latency(
+static noinline_for_stack void calculate_vactive_det_fill_latency(
const struct dml2_display_cfg *display_cfg,
unsigned int num_active_planes,
unsigned int bytes_required_l[],
@@ -6909,8 +7202,7 @@ struct dml2_core_internal_g6_temp_read_blackouts_table {
} entries[DML_MAX_CLK_TABLE_SIZE];
};
-static const struct dml2_core_internal_g6_temp_read_blackouts_table
- core_dcn4_g6_temp_read_blackout_table = {
+struct dml2_core_internal_g6_temp_read_blackouts_table core_dcn4_g6_temp_read_blackout_table = {
.entries = {
{
.uclk_khz = 96000,
@@ -7030,6 +7322,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
struct dml2_core_calcs_CalculateVMRowAndSwath_params *CalculateVMRowAndSwath_params = &mode_lib->scratch.CalculateVMRowAndSwath_params;
struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *CalculateSwathAndDETConfiguration_params = &mode_lib->scratch.CalculateSwathAndDETConfiguration_params;
struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *CheckGlobalPrefetchAdmissibility_params = &mode_lib->scratch.CheckGlobalPrefetchAdmissibility_params;
+#endif
struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
struct dml2_core_calcs_calculate_mcache_setting_params *calculate_mcache_setting_params = &mode_lib->scratch.calculate_mcache_setting_params;
struct dml2_core_calcs_calculate_peak_bandwidth_required_params *calculate_peak_bandwidth_params = &mode_lib->scratch.calculate_peak_bandwidth_params;
@@ -7077,12 +7372,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
for (k = 0; k < mode_lib->ms.num_active_planes; k++)
dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
-
- // dml2_printf_dml_policy(&mode_lib->ms.policy);
- // dml2_printf_dml_display_cfg_timing(&display_cfg->timing, mode_lib->ms.num_active_planes);
- // dml2_printf_dml_display_cfg_plane(&display_cfg->plane, mode_lib->ms.num_active_planes);
- // dml2_printf_dml_display_cfg_surface(&display_cfg->surface, mode_lib->ms.num_active_planes);
- // dml2_printf_dml_display_cfg_output(&display_cfg->output, mode_lib->ms.num_active_planes);
#endif
CalculateMaxDETAndMinCompressedBufferSize(
@@ -7177,8 +7466,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- mode_lib->ms.SurfaceReadBandwidthLuma[k] = mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- mode_lib->ms.SurfaceReadBandwidthChroma[k] = mode_lib->ms.SwathWidthCSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
+ mode_lib->ms.vactive_sw_bw_l[k] = mode_lib->ms.SwathWidthYSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelY[k], 1.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
+ mode_lib->ms.vactive_sw_bw_c[k] = mode_lib->ms.SwathWidthCSingleDPP[k] * math_ceil2(mode_lib->ms.BytePerPixelC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
mode_lib->ms.cursor_bw[k] = display_cfg->plane_descriptors[k].cursor.num_cursors * display_cfg->plane_descriptors[k].cursor.cursor_width *
display_cfg->plane_descriptors[k].cursor.cursor_bpp / 8.0 / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
@@ -7188,35 +7477,35 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
old_ReadBandwidthChroma = mode_lib->ms.SwathWidthYSingleDPP[k] / 2 * math_ceil2(mode_lib->ms.BytePerPixelInDETC[k], 2.0) / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio / 2.0;
dml2_printf("DML::%s: k=%u, old_ReadBandwidthLuma = %f\n", __func__, k, old_ReadBandwidthLuma);
dml2_printf("DML::%s: k=%u, old_ReadBandwidthChroma = %f\n", __func__, k, old_ReadBandwidthChroma);
- dml2_printf("DML::%s: k=%u, ReadBandwidthLuma = %f\n", __func__, k, mode_lib->ms.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%u, ReadBandwidthChroma = %f\n", __func__, k, mode_lib->ms.SurfaceReadBandwidthChroma[k]);
+ dml2_printf("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_l[k]);
+ dml2_printf("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->ms.vactive_sw_bw_c[k]);
#endif
}
// Writeback bandwidth
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format == dml2_444_64) {
- mode_lib->ms.WriteBandwidth[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height
- * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width
- / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0 && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].pixel_format == dml2_444_64) {
+ mode_lib->ms.WriteBandwidth[k][0] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_height
+ * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_width
+ / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].input_height
* display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total
/ ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 8.0;
- } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- mode_lib->ms.WriteBandwidth[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height
- * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width
- / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height
+ } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
+ mode_lib->ms.WriteBandwidth[k][0] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_height
+ * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_width
+ / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].input_height
* display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total
/ ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 4.0;
} else {
- mode_lib->ms.WriteBandwidth[k] = 0.0;
+ mode_lib->ms.WriteBandwidth[k][0] = 0.0;
}
}
/*Writeback Latency support check*/
mode_lib->ms.support.WritebackLatencySupport = true;
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true &&
- (mode_lib->ms.WriteBandwidth[k] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024 / ((double)mode_lib->soc.qos_parameters.writeback.base_latency_us))) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0 &&
+ (mode_lib->ms.WriteBandwidth[k][0] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024 / ((double)mode_lib->soc.qos_parameters.writeback.base_latency_us))) {
mode_lib->ms.support.WritebackLatencySupport = false;
}
}
@@ -7225,19 +7514,19 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
/* Writeback Scale Ratio and Taps Support Check */
mode_lib->ms.support.WritebackScaleRatioAndTapsSupport = true;
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio > mode_lib->ip.writeback_max_hscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio > mode_lib->ip.writeback_max_vscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio < mode_lib->ip.writeback_min_hscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio < mode_lib->ip.writeback_min_vscl_ratio
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps > (unsigned int) mode_lib->ip.writeback_max_hscl_taps
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps > (unsigned int) mode_lib->ip.writeback_max_vscl_taps
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio > (unsigned int)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps
- || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio > (unsigned int)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps
- || (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps > 2.0 && ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps % 2) == 1))) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_ratio > mode_lib->ip.writeback_max_hscl_ratio
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_ratio > mode_lib->ip.writeback_max_vscl_ratio
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_ratio < mode_lib->ip.writeback_min_hscl_ratio
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_ratio < mode_lib->ip.writeback_min_vscl_ratio
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_taps > (unsigned int) mode_lib->ip.writeback_max_hscl_taps
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_taps > (unsigned int) mode_lib->ip.writeback_max_vscl_taps
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_ratio > (unsigned int)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_taps
+ || display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_ratio > (unsigned int)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_taps
+ || (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_taps > 2.0 && ((display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_taps % 2) == 1))) {
mode_lib->ms.support.WritebackScaleRatioAndTapsSupport = false;
}
- if (2.0 * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps - 1) * 57 > mode_lib->ip.writeback_line_buffer_buffer_size) {
+ if (2.0 * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_height * (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_taps - 1) * 57 > mode_lib->ip.writeback_line_buffer_buffer_size) {
mode_lib->ms.support.WritebackScaleRatioAndTapsSupport = false;
}
}
@@ -7417,8 +7706,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculateSwathAndDETConfiguration_params->nomDETInKByte = mode_lib->ms.NomDETInKByte;
CalculateSwathAndDETConfiguration_params->ConfigReturnBufferSegmentSizeInkByte = mode_lib->ip.config_return_buffer_segment_size_in_kbytes;
CalculateSwathAndDETConfiguration_params->CompressedBufferSegmentSizeInkByte = mode_lib->ip.compressed_buffer_segment_size_in_kbytes;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthLuma = mode_lib->ms.SurfaceReadBandwidthLuma;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthChroma = mode_lib->ms.SurfaceReadBandwidthChroma;
+ CalculateSwathAndDETConfiguration_params->ReadBandwidthLuma = mode_lib->ms.vactive_sw_bw_l;
+ CalculateSwathAndDETConfiguration_params->ReadBandwidthChroma = mode_lib->ms.vactive_sw_bw_c;
CalculateSwathAndDETConfiguration_params->MaximumSwathWidthLuma = mode_lib->ms.MaximumSwathWidthLuma;
CalculateSwathAndDETConfiguration_params->MaximumSwathWidthChroma = mode_lib->ms.MaximumSwathWidthChroma;
CalculateSwathAndDETConfiguration_params->Read256BytesBlockHeightY = mode_lib->ms.Read256BlockHeightY;
@@ -7665,16 +7954,16 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
//DISPCLK/DPPCLK
mode_lib->ms.WritebackRequiredDISPCLK = 0;
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
mode_lib->ms.WritebackRequiredDISPCLK = math_max2(mode_lib->ms.WritebackRequiredDISPCLK,
- CalculateWriteBackDISPCLK(display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format,
+ CalculateWriteBackDISPCLK(display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].pixel_format,
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_ratio,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_ratio,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_taps,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_taps,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].input_width,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_width,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total,
mode_lib->ip.writeback_line_buffer_buffer_size));
}
@@ -7706,7 +7995,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if (!s->stream_visited[display_cfg->plane_descriptors[k].stream_index]) {
s->stream_visited[display_cfg->plane_descriptors[k].stream_index] = 1;
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true)
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0)
s->TotalNumberOfActiveWriteback = s->TotalNumberOfActiveWriteback + 1;
s->TotalNumberOfActiveOTG = s->TotalNumberOfActiveOTG + 1;
@@ -8250,23 +8539,23 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.PSCL_FACTOR,
mode_lib->ms.PSCL_FACTOR_CHROMA,
mode_lib->ms.RequiredDPPCLK,
- mode_lib->ms.SurfaceReadBandwidthLuma,
- mode_lib->ms.SurfaceReadBandwidthChroma,
+ mode_lib->ms.vactive_sw_bw_l,
+ mode_lib->ms.vactive_sw_bw_c,
mode_lib->soc.return_bus_width_bytes,
/* Output */
&mode_lib->ms.dcfclk_deepsleep);
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
mode_lib->ms.WritebackDelayTime[k] = mode_lib->soc.qos_parameters.writeback.base_latency_us + CalculateWriteBackDelay(
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].pixel_format,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_ratio,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_ratio,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_taps,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_width,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_height,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].input_height,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total) / mode_lib->ms.RequiredDISPCLK;
} else {
mode_lib->ms.WritebackDelayTime[k] = 0.0;
@@ -8343,7 +8632,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
dml2_printf("DML::%s: mode_lib->ms.DCFCLK = %f\n", __func__, mode_lib->ms.DCFCLK);
dml2_printf("DML::%s: mode_lib->ms.FabricClock = %f\n", __func__, mode_lib->ms.FabricClock);
dml2_printf("DML::%s: mode_lib->ms.uclk_freq_mhz = %f\n", __func__, mode_lib->ms.uclk_freq_mhz);
- dml2_printf("DML::%s: urgent latency tolerance = %f\n", __func__, ((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024 / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes)));
+ dml2_printf("DML::%s: urgent latency tolarance = %f\n", __func__, ((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024 / (mode_lib->ms.DCFCLK * mode_lib->soc.return_bus_width_bytes)));
#endif
mode_lib->ms.support.OutstandingRequestsSupport = true;
@@ -8361,6 +8650,13 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
+ mode_lib->soc.qos_parameters.qos_params.dcn4x.average_transport_distance_fclk_cycles / mode_lib->ms.FabricClock)
* (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_average_transport_latency_margin / 100.0);
+ mode_lib->ms.support.max_non_urgent_latency_us
+ = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->ms.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles
+ / mode_lib->ms.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0)
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->ms.FabricClock
+ + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->ms.FabricClock
+ * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0);
+
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
@@ -8402,7 +8698,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
memset(calculate_mcache_setting_params, 0, sizeof(struct dml2_core_calcs_calculate_mcache_setting_params));
- if (mode_lib->soc.mall_allocated_for_dcn_mbytes == 0 || mode_lib->ip.dcn_mrq_present) {
+ if (mode_lib->soc.mcache_size_bytes == 0 || mode_lib->ip.dcn_mrq_present) {
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
mode_lib->ms.mall_prefetch_sdp_overhead_factor[k] = 1.0;
mode_lib->ms.mall_prefetch_dram_overhead_factor[k] = 1.0;
@@ -8509,8 +8805,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
display_cfg->hostvm_enable,
mode_lib->ms.MaxDCFCLK,
mode_lib->ms.MaxFabricClock,
+#ifdef DML_MODE_SUPPORT_USE_DPM_DRAM_BW
+ mode_lib->ms.dram_bw_mbps);
+#else
mode_lib->ms.max_dram_bw_mbps);
-
+#endif
// Average BW support check
calculate_avg_bandwidth_required(
@@ -8518,8 +8817,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
// input
display_cfg,
mode_lib->ms.num_active_planes,
- mode_lib->ms.SurfaceReadBandwidthLuma,
- mode_lib->ms.SurfaceReadBandwidthChroma,
+ mode_lib->ms.vactive_sw_bw_l,
+ mode_lib->ms.vactive_sw_bw_c,
mode_lib->ms.cursor_bw,
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0,
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1,
@@ -8589,6 +8888,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
+ calculate_tdlut_setting_params->tdlut_bytes_to_deliver = &s->tdlut_bytes_to_deliver[k];
calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
@@ -8632,9 +8932,32 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
&mode_lib->ms.ExtraLatency_sr,
&mode_lib->ms.ExtraLatencyPrefetch);
- {
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++)
+ s->impacted_dst_y_pre[k] = 0;
+
+ s->recalc_prefetch_schedule = 0;
+ s->recalc_prefetch_done = 0;
+ do {
mode_lib->ms.support.PrefetchSupported = true;
+
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+ s->pixel_format[k] = display_cfg->plane_descriptors[k].pixel_format;
+
+ s->lb_source_lines_l[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->ms.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
+ s->lb_source_lines_c[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->ms.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
struct dml2_core_internal_DmlPipe *myPipe = &s->myPipe;
mode_lib->ms.TWait[k] = CalculateTWait(
@@ -8724,6 +9047,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->ms.meta_row_bytes[k];
CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor[k];
+ CalculatePrefetchSchedule_params->impacted_dst_y_pre = s->impacted_dst_y_pre[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_l = mode_lib->ms.vactive_sw_bw_l[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_c = mode_lib->ms.vactive_sw_bw_c[k];
// output
CalculatePrefetchSchedule_params->DSTXAfterScaler = &s->DSTXAfterScaler[k];
@@ -8752,6 +9078,10 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculatePrefetchSchedule_params->VUpdateWidthPix = &s->dummy_integer[1];
CalculatePrefetchSchedule_params->VReadyOffsetPix = &s->dummy_integer[2];
CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->ms.prefetch_cursor_bw[k];
+ CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k];
+ CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k];
+ CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k];
+ CalculatePrefetchSchedule_params->prefetch_swath_time_us = &s->prefetch_swath_time_us[k];
mode_lib->ms.NoTimeForPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
@@ -8760,6 +9090,27 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
dml2_printf("DML::%s: k=%d, dst_y_per_row_vblank = %f\n", __func__, k, *CalculatePrefetchSchedule_params->dst_y_per_row_vblank);
} // for k num_planes
+ CalculateDCFCLKDeepSleepTdlut(
+ display_cfg,
+ mode_lib->ms.num_active_planes,
+ mode_lib->ms.BytePerPixelY,
+ mode_lib->ms.BytePerPixelC,
+ mode_lib->ms.SwathWidthY,
+ mode_lib->ms.SwathWidthC,
+ mode_lib->ms.NoOfDPP,
+ mode_lib->ms.PSCL_FACTOR,
+ mode_lib->ms.PSCL_FACTOR_CHROMA,
+ mode_lib->ms.RequiredDPPCLK,
+ mode_lib->ms.vactive_sw_bw_l,
+ mode_lib->ms.vactive_sw_bw_c,
+ mode_lib->soc.return_bus_width_bytes,
+ mode_lib->ms.RequiredDISPCLK,
+ s->tdlut_bytes_to_deliver,
+ s->prefetch_swath_time_us,
+
+ /* Output */
+ &mode_lib->ms.dcfclk_deepsleep);
+
for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
if (mode_lib->ms.dst_y_prefetch[k] < 2.0
|| mode_lib->ms.LinesForVM[k] >= 32.0
@@ -8783,7 +9134,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
mode_lib->ms.support.VRatioInPrefetchSupported = true;
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
if (mode_lib->ms.VRatioPreY[k] > __DML2_CALCS_MAX_VRATIO_PRE__ ||
mode_lib->ms.VRatioPreC[k] > __DML2_CALCS_MAX_VRATIO_PRE__) {
mode_lib->ms.support.VRatioInPrefetchSupported = false;
@@ -8793,10 +9144,14 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
}
+ mode_lib->ms.support.PrefetchSupported &= mode_lib->ms.support.VRatioInPrefetchSupported;
+
+ // By default, do not recalc prefetch schedule
+ s->recalc_prefetch_schedule = 0;
+
// Only do urg vs prefetch bandwidth check, flip schedule check, power saving feature support check IF the Prefetch Schedule Check is ok
if (mode_lib->ms.support.PrefetchSupported) {
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- double line_time_us = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
// Calculate Urgent burst factor for prefetch
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: k=%d, Calling CalculateUrgentBurstFactor (for prefetch)\n", __func__, k);
@@ -8809,7 +9164,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.swath_width_chroma_ub[k],
mode_lib->ms.SwathHeightY[k],
mode_lib->ms.SwathHeightC[k],
- line_time_us,
+ s->line_times[k],
mode_lib->ms.UrgLatency,
mode_lib->ms.VRatioPreY[k],
mode_lib->ms.VRatioPreC[k],
@@ -8846,8 +9201,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.SurfaceReadBandwidthLuma;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.SurfaceReadBandwidthChroma;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
@@ -8893,127 +9248,164 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
}
}
+#ifdef DML_GLOBAL_PREFETCH_CHECK
+ if (mode_lib->ms.support.PrefetchSupported && mode_lib->ms.num_active_planes > 1 && s->recalc_prefetch_done == 0) {
+ CheckGlobalPrefetchAdmissibility_params->num_active_planes = mode_lib->ms.num_active_planes;
+ CheckGlobalPrefetchAdmissibility_params->pixel_format = s->pixel_format;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_l = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_c = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_l = s->lb_source_lines_l;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_c = s->lb_source_lines_c;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_l = mode_lib->ms.SwathHeightY;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_c = mode_lib->ms.SwathHeightC;
+ CheckGlobalPrefetchAdmissibility_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
+ CheckGlobalPrefetchAdmissibility_params->compressed_buffer_size_kbytes = mode_lib->ms.CompressedBufferSizeInkByte;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_l = mode_lib->ms.DETBufferSizeY;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_c = mode_lib->ms.DETBufferSizeC;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_l = s->full_swath_bytes_l;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_c = s->full_swath_bytes_c;
+ CheckGlobalPrefetchAdmissibility_params->prefetch_sw_bytes = s->prefetch_sw_bytes;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_rounded = s->Tpre_rounded;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_oto = s->Tpre_oto;
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = mode_lib->ms.support.urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
+ CheckGlobalPrefetchAdmissibility_params->line_time = s->line_times;
+ CheckGlobalPrefetchAdmissibility_params->dst_y_prefetch = mode_lib->ms.dst_y_prefetch;
+ if (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps < 10 * 1024)
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = 10 * 1024;
+
+ CheckGlobalPrefetchAdmissibility_params->estimated_dcfclk_mhz = (CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps / (double) mode_lib->soc.return_bus_width_bytes) /
+ ((double)mode_lib->soc.qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100.0);
+
+ // if recalc_prefetch_schedule is set, recalculate the prefetch schedule with the new impacted_Tpre, prefetch should be possible
+ CheckGlobalPrefetchAdmissibility_params->recalc_prefetch_schedule = &s->recalc_prefetch_schedule;
+ CheckGlobalPrefetchAdmissibility_params->impacted_dst_y_pre = s->impacted_dst_y_pre;
+ mode_lib->ms.support.PrefetchSupported = CheckGlobalPrefetchAdmissibility(&mode_lib->scratch, CheckGlobalPrefetchAdmissibility_params);
+ s->recalc_prefetch_done = 1;
+ s->recalc_prefetch_schedule = 1;
+ }
+#endif
+ } // prefetch schedule ok, do urg bw and flip schedule
+ } while (s->recalc_prefetch_schedule);
- // Both prefetch schedule and BW okay
- if (mode_lib->ms.support.PrefetchSupported == true && mode_lib->ms.support.VRatioInPrefetchSupported == true) {
- mode_lib->ms.BandwidthAvailableForImmediateFlip =
- get_bandwidth_available_for_immediate_flip(
- dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required_qual, // no flip
- mode_lib->ms.support.urg_bandwidth_available);
-
- mode_lib->ms.TotImmediateFlipBytes = 0;
- for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip) {
- s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
- s->HostVMInefficiencyFactor,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.meta_row_bytes[k]);
- } else {
- s->per_pipe_flip_bytes[k] = 0;
- }
- mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k];
+ // Flip Schedule
+ // Both prefetch schedule and BW okay
+ if (mode_lib->ms.support.PrefetchSupported == true) {
+ mode_lib->ms.BandwidthAvailableForImmediateFlip =
+ get_bandwidth_available_for_immediate_flip(
+ dml2_core_internal_soc_state_sys_active,
+ mode_lib->ms.support.urg_bandwidth_required_qual, // no flip
+ mode_lib->ms.support.urg_bandwidth_available);
- }
+ mode_lib->ms.TotImmediateFlipBytes = 0;
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ if (display_cfg->plane_descriptors[k].immediate_flip) {
+ s->per_pipe_flip_bytes[k] = get_pipe_flip_bytes(
+ s->HostVMInefficiencyFactor,
+ mode_lib->ms.vm_bytes[k],
+ mode_lib->ms.DPTEBytesPerRow[k],
+ mode_lib->ms.meta_row_bytes[k]);
+ } else {
+ s->per_pipe_flip_bytes[k] = 0;
+ }
+ mode_lib->ms.TotImmediateFlipBytes += s->per_pipe_flip_bytes[k] * mode_lib->ms.NoOfDPP[k];
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- CalculateFlipSchedule(
- &mode_lib->scratch,
- display_cfg->plane_descriptors[k].immediate_flip,
- 1, // use_lb_flip_bw
- s->HostVMInefficiencyFactor,
- s->Tvm_trips_flip[k],
- s->Tr0_trips_flip[k],
- s->Tvm_trips_flip_rounded[k],
- s->Tr0_trips_flip_rounded[k],
- display_cfg->gpuvm_enable,
- mode_lib->ms.vm_bytes[k],
- mode_lib->ms.DPTEBytesPerRow[k],
- mode_lib->ms.BandwidthAvailableForImmediateFlip,
- mode_lib->ms.TotImmediateFlipBytes,
- display_cfg->plane_descriptors[k].pixel_format,
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)),
- display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
- display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
- mode_lib->ms.Tno_bw_flip[k],
- mode_lib->ms.dpte_row_height[k],
- mode_lib->ms.dpte_row_height_chroma[k],
- mode_lib->ms.use_one_row_for_frame_flip[k],
- mode_lib->ip.max_flip_time_us,
- mode_lib->ip.max_flip_time_lines,
- s->per_pipe_flip_bytes[k],
- mode_lib->ms.meta_row_bytes[k],
- s->meta_row_height_luma[k],
- s->meta_row_height_chroma[k],
- mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
-
- /* Output */
- &mode_lib->ms.dst_y_per_vm_flip[k],
- &mode_lib->ms.dst_y_per_row_flip[k],
- &mode_lib->ms.final_flip_bw[k],
- &mode_lib->ms.ImmediateFlipSupportedForPipe[k]);
- }
+ }
- calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = s->dummy_bw;
- calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required_flip;
- calculate_peak_bandwidth_params->urg_bandwidth_required_qual = s->dummy_bw;
- calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required_flip;
- calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = s->surface_dummy_bw;
- calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
-
- calculate_peak_bandwidth_params->display_cfg = display_cfg;
- calculate_peak_bandwidth_params->inc_flip_bw = 1;
- calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
- calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
- calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
- calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
- calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
-
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.SurfaceReadBandwidthLuma;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.SurfaceReadBandwidthChroma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
- calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
- calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
- calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
- calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
- calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
- calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
- calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
- calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
- calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
- calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
- calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
- calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
-
- calculate_peak_bandwidth_required(
- &mode_lib->scratch,
- calculate_peak_bandwidth_params);
-
- calculate_immediate_flip_bandwidth_support(
- &s->dummy_single[0], // double* frac_urg_bandwidth_flip
- &mode_lib->ms.support.ImmediateFlipSupport,
-
- dml2_core_internal_soc_state_sys_active,
- mode_lib->ms.support.urg_bandwidth_required_flip,
- mode_lib->ms.support.non_urg_bandwidth_required_flip,
- mode_lib->ms.support.urg_bandwidth_available);
-
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
- if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false)
- mode_lib->ms.support.ImmediateFlipSupport = false;
- }
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
+ CalculateFlipSchedule(
+ &mode_lib->scratch,
+ display_cfg->plane_descriptors[k].immediate_flip,
+ 1, // use_lb_flip_bw
+ s->HostVMInefficiencyFactor,
+ s->Tvm_trips_flip[k],
+ s->Tr0_trips_flip[k],
+ s->Tvm_trips_flip_rounded[k],
+ s->Tr0_trips_flip_rounded[k],
+ display_cfg->gpuvm_enable,
+ mode_lib->ms.vm_bytes[k],
+ mode_lib->ms.DPTEBytesPerRow[k],
+ mode_lib->ms.BandwidthAvailableForImmediateFlip,
+ mode_lib->ms.TotImmediateFlipBytes,
+ display_cfg->plane_descriptors[k].pixel_format,
+ (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)),
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
+ mode_lib->ms.Tno_bw_flip[k],
+ mode_lib->ms.dpte_row_height[k],
+ mode_lib->ms.dpte_row_height_chroma[k],
+ mode_lib->ms.use_one_row_for_frame_flip[k],
+ mode_lib->ip.max_flip_time_us,
+ mode_lib->ip.max_flip_time_lines,
+ s->per_pipe_flip_bytes[k],
+ mode_lib->ms.meta_row_bytes[k],
+ s->meta_row_height_luma[k],
+ s->meta_row_height_chroma[k],
+ mode_lib->ip.dcn_mrq_present && display_cfg->plane_descriptors[k].surface.dcc.enable,
+
+ /* Output */
+ &mode_lib->ms.dst_y_per_vm_flip[k],
+ &mode_lib->ms.dst_y_per_row_flip[k],
+ &mode_lib->ms.final_flip_bw[k],
+ &mode_lib->ms.ImmediateFlipSupportedForPipe[k]);
+ }
+
+ calculate_peak_bandwidth_params->urg_vactive_bandwidth_required = s->dummy_bw;
+ calculate_peak_bandwidth_params->urg_bandwidth_required = mode_lib->ms.support.urg_bandwidth_required_flip;
+ calculate_peak_bandwidth_params->urg_bandwidth_required_qual = s->dummy_bw;
+ calculate_peak_bandwidth_params->non_urg_bandwidth_required = mode_lib->ms.support.non_urg_bandwidth_required_flip;
+ calculate_peak_bandwidth_params->surface_avg_vactive_required_bw = s->surface_dummy_bw;
+ calculate_peak_bandwidth_params->surface_peak_required_bw = mode_lib->ms.surface_peak_required_bw;
+
+ calculate_peak_bandwidth_params->display_cfg = display_cfg;
+ calculate_peak_bandwidth_params->inc_flip_bw = 1;
+ calculate_peak_bandwidth_params->num_active_planes = mode_lib->ms.num_active_planes;
+ calculate_peak_bandwidth_params->num_of_dpp = mode_lib->ms.NoOfDPP;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_nom_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p0 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p0;
+ calculate_peak_bandwidth_params->dcc_dram_bw_pref_overhead_factor_p1 = mode_lib->ms.dcc_dram_bw_pref_overhead_factor_p1;
+ calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->ms.mall_prefetch_sdp_overhead_factor;
+ calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->ms.mall_prefetch_dram_overhead_factor;
+
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->ms.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
+ calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
+ calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
+ calculate_peak_bandwidth_params->dpte_row_bw = mode_lib->ms.dpte_row_bw;
+ calculate_peak_bandwidth_params->meta_row_bw = mode_lib->ms.meta_row_bw;
+ calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->ms.prefetch_cursor_bw;
+ calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->ms.prefetch_vmrow_bw;
+ calculate_peak_bandwidth_params->flip_bw = mode_lib->ms.final_flip_bw;
+ calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->ms.UrgentBurstFactorLuma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->ms.UrgentBurstFactorChroma;
+ calculate_peak_bandwidth_params->urgent_burst_factor_cursor = mode_lib->ms.UrgentBurstFactorCursor;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_l = mode_lib->ms.UrgentBurstFactorLumaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_c = mode_lib->ms.UrgentBurstFactorChromaPre;
+ calculate_peak_bandwidth_params->urgent_burst_factor_prefetch_cursor = mode_lib->ms.UrgentBurstFactorCursorPre;
+
+ calculate_peak_bandwidth_required(
+ &mode_lib->scratch,
+ calculate_peak_bandwidth_params);
+
+ calculate_immediate_flip_bandwidth_support(
+ &s->dummy_single[0], // double* frac_urg_bandwidth_flip
+ &mode_lib->ms.support.ImmediateFlipSupport,
- } else { // if prefetch not support, assume iflip is not supported too
+ dml2_core_internal_soc_state_sys_active,
+ mode_lib->ms.support.urg_bandwidth_required_flip,
+ mode_lib->ms.support.non_urg_bandwidth_required_flip,
+ mode_lib->ms.support.urg_bandwidth_available);
+
+ for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ if (display_cfg->plane_descriptors[k].immediate_flip == true && mode_lib->ms.ImmediateFlipSupportedForPipe[k] == false)
mode_lib->ms.support.ImmediateFlipSupport = false;
- }
- } // prefetch schedule
+ }
+
+ } else { // if prefetch not support, assume iflip is not supported too
+ mode_lib->ms.support.ImmediateFlipSupport = false;
}
s->mSOCParameters.UrgentLatency = mode_lib->ms.UrgLatency;
@@ -9110,8 +9502,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
s->pstate_bytes_required_c,
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0,
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1,
- mode_lib->ms.SurfaceReadBandwidthLuma,
- mode_lib->ms.SurfaceReadBandwidthChroma,
+ mode_lib->ms.vactive_sw_bw_l,
+ mode_lib->ms.vactive_sw_bw_c,
mode_lib->ms.surface_avg_vactive_required_bw,
mode_lib->ms.surface_peak_required_bw,
/* outputs */
@@ -9181,12 +9573,12 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
dml2_printf("DML::%s: ModeSupport = %u\n", __func__, mode_lib->ms.support.ModeSupport);
dml2_printf("DML::%s: ImmediateFlipSupport = %u\n", __func__, mode_lib->ms.support.ImmediateFlipSupport);
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
mode_lib->ms.support.MPCCombineEnable[k] = mode_lib->ms.MPCCombine[k];
mode_lib->ms.support.DPPPerSurface[k] = mode_lib->ms.NoOfDPP[k];
}
- for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
+ for (k = 0; k < mode_lib->ms.num_active_planes; k++) {
mode_lib->ms.support.ODMMode[k] = mode_lib->ms.ODMMode[k];
mode_lib->ms.support.DSCEnabled[k] = mode_lib->ms.RequiresDSC[k];
mode_lib->ms.support.FECEnabled[k] = mode_lib->ms.RequiresFEC[k];
@@ -9223,7 +9615,7 @@ unsigned int dml2_core_calcs_mode_support_ex(struct dml2_core_calcs_mode_support
dml2_printf("DML::%s: is_mode_support = %u (min_clk_index=%d)\n", __func__, result, in_out_params->min_clk_index);
for (unsigned int k = 0; k < in_out_params->in_display_cfg->num_planes; k++)
- dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
+ dml2_printf("DML::%s: plane_%d: reserved_vblank_time_ns = %u\n", __func__, k, in_out_params->in_display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns);
dml2_printf("DML::%s: ------------- DONE ----------\n", __func__);
@@ -9876,7 +10268,7 @@ static void CalculateStutterEfficiency(struct dml2_core_internal_scratch *scratc
if (!dml_is_phantom_pipe(&p->display_cfg->plane_descriptors[k])) {
if (!l->stream_visited[p->display_cfg->plane_descriptors[k].stream_index]) {
- if (p->display_cfg->stream_descriptors[k].writeback.enable)
+ if (p->display_cfg->stream_descriptors[k].writeback.active_writebacks_per_stream > 0)
l->TotalActiveWriteback = l->TotalActiveWriteback + 1;
if (TotalNumberOfActiveOTG == 0) { // first otg
@@ -9978,6 +10370,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params *CalculateSwathAndDETConfiguration_params = &mode_lib->scratch.CalculateSwathAndDETConfiguration_params;
struct dml2_core_calcs_CalculateStutterEfficiency_params *CalculateStutterEfficiency_params = &mode_lib->scratch.CalculateStutterEfficiency_params;
struct dml2_core_calcs_CalculatePrefetchSchedule_params *CalculatePrefetchSchedule_params = &mode_lib->scratch.CalculatePrefetchSchedule_params;
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params *CheckGlobalPrefetchAdmissibility_params = &mode_lib->scratch.CheckGlobalPrefetchAdmissibility_params;
struct dml2_core_calcs_calculate_mcache_setting_params *calculate_mcache_setting_params = &mode_lib->scratch.calculate_mcache_setting_params;
struct dml2_core_calcs_calculate_tdlut_setting_params *calculate_tdlut_setting_params = &mode_lib->scratch.calculate_tdlut_setting_params;
struct dml2_core_shared_CalculateMetaAndPTETimes_params *CalculateMetaAndPTETimes_params = &mode_lib->scratch.CalculateMetaAndPTETimes_params;
@@ -10069,12 +10462,6 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
dml2_assert(s->SOCCLK > 0);
#ifdef __DML_VBA_DEBUG__
- // dml2_printf_dml_display_cfg_timing(&display_cfg->timing, s->num_active_planes);
- // dml2_printf_dml_display_cfg_plane(&display_cfg->plane, s->num_active_planes);
- // dml2_printf_dml_display_cfg_surface(&display_cfg->surface, s->num_active_planes);
- // dml2_printf_dml_display_cfg_output(&display_cfg->output, s->num_active_planes);
- // dml2_printf_dml_display_cfg_hw_resource(&display_cfg->hw, s->num_active_planes);
-
dml2_printf("DML::%s: num_active_planes = %u\n", __func__, s->num_active_planes);
dml2_printf("DML::%s: num_active_pipes = %u\n", __func__, mode_lib->mp.num_active_pipes);
dml2_printf("DML::%s: Dcfclk = %f\n", __func__, mode_lib->mp.Dcfclk);
@@ -10192,10 +10579,10 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
mode_lib->mp.cursor_bw[k] = display_cfg->plane_descriptors[k].cursor.num_cursors * display_cfg->plane_descriptors[k].cursor.cursor_width * display_cfg->plane_descriptors[k].cursor.cursor_bpp / 8.0 /
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000));
- mode_lib->mp.SurfaceReadBandwidthLuma[k] = mode_lib->mp.SwathWidthSingleDPPY[k] * mode_lib->mp.BytePerPixelY[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
- mode_lib->mp.SurfaceReadBandwidthChroma[k] = mode_lib->mp.SwathWidthSingleDPPC[k] * mode_lib->mp.BytePerPixelC[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
- dml2_printf("DML::%s: ReadBandwidthSurfaceLuma[%i] = %fBps\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: ReadBandwidthSurfaceChroma[%i] = %fBps\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthChroma[k]);
+ mode_lib->mp.vactive_sw_bw_l[k] = mode_lib->mp.SwathWidthSingleDPPY[k] * mode_lib->mp.BytePerPixelY[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio;
+ mode_lib->mp.vactive_sw_bw_c[k] = mode_lib->mp.SwathWidthSingleDPPC[k] * mode_lib->mp.BytePerPixelC[k] / (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio;
+ dml2_printf("DML::%s: vactive_sw_bw_l[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ dml2_printf("DML::%s: vactive_sw_bw_c[%i] = %fBps\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
}
CalculateSwathAndDETConfiguration_params->display_cfg = display_cfg;
@@ -10211,8 +10598,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculateSwathAndDETConfiguration_params->nomDETInKByte = s->NomDETInKByte;
CalculateSwathAndDETConfiguration_params->ConfigReturnBufferSegmentSizeInkByte = mode_lib->ip.config_return_buffer_segment_size_in_kbytes;
CalculateSwathAndDETConfiguration_params->CompressedBufferSegmentSizeInkByte = mode_lib->ip.compressed_buffer_segment_size_in_kbytes;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthLuma = mode_lib->mp.SurfaceReadBandwidthLuma;
- CalculateSwathAndDETConfiguration_params->ReadBandwidthChroma = mode_lib->mp.SurfaceReadBandwidthChroma;
+ CalculateSwathAndDETConfiguration_params->ReadBandwidthLuma = mode_lib->mp.vactive_sw_bw_l;
+ CalculateSwathAndDETConfiguration_params->ReadBandwidthChroma = mode_lib->mp.vactive_sw_bw_c;
CalculateSwathAndDETConfiguration_params->MaximumSwathWidthLuma = s->dummy_single_array[0];
CalculateSwathAndDETConfiguration_params->MaximumSwathWidthChroma = s->dummy_single_array[1];
CalculateSwathAndDETConfiguration_params->Read256BytesBlockHeightY = mode_lib->mp.Read256BlockHeightY;
@@ -10533,8 +10920,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_tdlut_setting_params->tdlut_groups_per_2row_ub = &s->tdlut_groups_per_2row_ub[k];
calculate_tdlut_setting_params->tdlut_opt_time = &s->tdlut_opt_time[k];
calculate_tdlut_setting_params->tdlut_drain_time = &s->tdlut_drain_time[k];
+ calculate_tdlut_setting_params->tdlut_bytes_to_deliver = &s->tdlut_bytes_to_deliver[k];
calculate_tdlut_setting_params->tdlut_bytes_per_group = &s->tdlut_bytes_per_group[k];
-
calculate_tdlut_setting(&mode_lib->scratch, calculate_tdlut_setting_params);
}
@@ -10577,17 +10964,17 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.TCalc = 24.0 / mode_lib->mp.DCFCLKDeepSleep;
for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
mode_lib->mp.WritebackDelay[k] =
mode_lib->soc.qos_parameters.writeback.base_latency_us
+ CalculateWriteBackDelay(
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.h_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_ratio,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.v_taps,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height,
- display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].pixel_format,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].h_ratio,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_ratio,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].v_taps,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_width,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].output_height,
+ display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.writeback_stream[0].input_height,
display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total) / mode_lib->mp.Dispclk;
} else
mode_lib->mp.WritebackDelay[k] = 0;
@@ -10673,10 +11060,25 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
for (k = 0; k < s->num_active_planes; ++k) {
bool cursor_not_enough_urgent_latency_hiding = 0;
- double line_time_us = 0.0;
-
- line_time_us = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
+ s->line_times[k] = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000);
+
+ s->pixel_format[k] = display_cfg->plane_descriptors[k].pixel_format;
+
+ s->lb_source_lines_l[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->mp.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane0.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane0.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
+ s->lb_source_lines_c[k] = get_num_lb_source_lines(mode_lib->ip.max_line_buffer_lines, mode_lib->ip.line_buffer_size_bits,
+ mode_lib->mp.NoOfDPP[k],
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.width,
+ display_cfg->plane_descriptors[k].composition.viewport.plane1.height,
+ display_cfg->plane_descriptors[k].composition.scaler_info.plane1.h_ratio,
+ display_cfg->plane_descriptors[k].composition.rotation_angle);
+
if (display_cfg->plane_descriptors[k].cursor.num_cursors > 0) {
calculate_cursor_req_attributes(
display_cfg->plane_descriptors[k].cursor.cursor_width,
@@ -10693,7 +11095,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
display_cfg->plane_descriptors[k].cursor.cursor_width,
s->cursor_bytes_per_chunk[k],
s->cursor_lines_per_chunk[k],
- line_time_us,
+ s->line_times[k],
mode_lib->mp.UrgentLatency,
// output
@@ -10708,7 +11110,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.swath_width_chroma_ub[k],
mode_lib->mp.SwathHeightY[k],
mode_lib->mp.SwathHeightC[k],
- line_time_us,
+ s->line_times[k],
mode_lib->mp.UrgentLatency,
display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio,
display_cfg->plane_descriptors[k].composition.scaler_info.plane1.v_ratio,
@@ -10746,6 +11148,35 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
dml2_printf("DML::%s: immediate_flip_required = %u\n", __func__, s->immediate_flip_required);
#endif
+ if (s->num_active_planes > 1) {
+ CheckGlobalPrefetchAdmissibility_params->num_active_planes = s->num_active_planes;
+ CheckGlobalPrefetchAdmissibility_params->pixel_format = s->pixel_format;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_l = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->chunk_bytes_c = mode_lib->ip.pixel_chunk_size_kbytes * 1024;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_l = s->lb_source_lines_l;
+ CheckGlobalPrefetchAdmissibility_params->lb_source_lines_c = s->lb_source_lines_c;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_l = mode_lib->mp.SwathHeightY;
+ CheckGlobalPrefetchAdmissibility_params->swath_height_c = mode_lib->mp.SwathHeightC;
+ CheckGlobalPrefetchAdmissibility_params->rob_buffer_size_kbytes = mode_lib->ip.rob_buffer_size_kbytes;
+ CheckGlobalPrefetchAdmissibility_params->compressed_buffer_size_kbytes = mode_lib->mp.CompressedBufferSizeInkByte;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_l = mode_lib->mp.DETBufferSizeY;
+ CheckGlobalPrefetchAdmissibility_params->detile_buffer_size_bytes_c = mode_lib->mp.DETBufferSizeC;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_l = s->full_swath_bytes_l;
+ CheckGlobalPrefetchAdmissibility_params->full_swath_bytes_c = s->full_swath_bytes_c;
+ CheckGlobalPrefetchAdmissibility_params->prefetch_sw_bytes = s->prefetch_sw_bytes;
+ CheckGlobalPrefetchAdmissibility_params->Tpre_rounded = 0; // don't care
+ CheckGlobalPrefetchAdmissibility_params->Tpre_oto = 0; // don't care
+ CheckGlobalPrefetchAdmissibility_params->estimated_urg_bandwidth_required_mbps = mode_lib->mp.urg_bandwidth_available[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp];
+ CheckGlobalPrefetchAdmissibility_params->estimated_dcfclk_mhz = mode_lib->mp.Dcfclk;
+ CheckGlobalPrefetchAdmissibility_params->line_time = s->line_times;
+ CheckGlobalPrefetchAdmissibility_params->dst_y_prefetch = mode_lib->mp.dst_y_prefetch;
+
+ // if recalc_prefetch_schedule is set, recalculate the prefetch schedule with the new impacted_Tpre, prefetch should be possible
+ CheckGlobalPrefetchAdmissibility_params->recalc_prefetch_schedule = &s->dummy_boolean[0];
+ CheckGlobalPrefetchAdmissibility_params->impacted_dst_y_pre = s->impacted_dst_y_pre;
+ CheckGlobalPrefetchAdmissibility(&mode_lib->scratch, CheckGlobalPrefetchAdmissibility_params); // dont care about the check output for mode programming
+ }
+
{
s->DestinationLineTimesForPrefetchLessThan2 = false;
s->VRatioPrefetchMoreThanMax = false;
@@ -10757,11 +11188,11 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
dml2_printf("DML::%s: k=%d MaxVStartupLines = %u\n", __func__, k, s->MaxVStartupLines[k]);
mode_lib->mp.TWait[k] = CalculateTWait(
- display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
- mode_lib->mp.UrgentLatency,
- mode_lib->mp.TripToMemory,
- !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
- get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->mp.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0);
+ display_cfg->plane_descriptors[k].overrides.reserved_vblank_time_ns,
+ mode_lib->mp.UrgentLatency,
+ mode_lib->mp.TripToMemory,
+ !dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
+ get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->mp.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0);
myPipe->Dppclk = mode_lib->mp.Dppclk[k];
myPipe->Dispclk = mode_lib->mp.Dispclk;
@@ -10842,6 +11273,9 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculatePrefetchSchedule_params->mrq_present = mode_lib->ip.dcn_mrq_present;
CalculatePrefetchSchedule_params->meta_row_bytes = mode_lib->mp.meta_row_bytes[k];
CalculatePrefetchSchedule_params->mall_prefetch_sdp_overhead_factor = mode_lib->mp.mall_prefetch_sdp_overhead_factor[k];
+ CalculatePrefetchSchedule_params->impacted_dst_y_pre = s->impacted_dst_y_pre[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_l = mode_lib->mp.vactive_sw_bw_l[k];
+ CalculatePrefetchSchedule_params->vactive_sw_bw_c = mode_lib->mp.vactive_sw_bw_c[k];
// output
CalculatePrefetchSchedule_params->DSTXAfterScaler = &mode_lib->mp.DSTXAfterScaler[k];
@@ -10870,9 +11304,18 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculatePrefetchSchedule_params->VUpdateWidthPix = &mode_lib->mp.VUpdateWidthPix[k];
CalculatePrefetchSchedule_params->VReadyOffsetPix = &mode_lib->mp.VReadyOffsetPix[k];
CalculatePrefetchSchedule_params->prefetch_cursor_bw = &mode_lib->mp.prefetch_cursor_bw[k];
+ CalculatePrefetchSchedule_params->prefetch_sw_bytes = &s->prefetch_sw_bytes[k];
+ CalculatePrefetchSchedule_params->Tpre_rounded = &s->Tpre_rounded[k];
+ CalculatePrefetchSchedule_params->Tpre_oto = &s->Tpre_oto[k];
+ CalculatePrefetchSchedule_params->prefetch_swath_time_us = &s->dummy_single[0];
mode_lib->mp.NoTimeToPrefetch[k] = CalculatePrefetchSchedule(&mode_lib->scratch, CalculatePrefetchSchedule_params);
+ if (s->impacted_dst_y_pre[k] > 0)
+ mode_lib->mp.impacted_prefetch_margin_us[k] = (mode_lib->mp.dst_y_prefetch[k] - s->impacted_dst_y_pre[k]) * s->line_times[k];
+ else
+ mode_lib->mp.impacted_prefetch_margin_us[k] = 0;
+
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: k=%0u NoTimeToPrefetch=%0d\n", __func__, k, mode_lib->mp.NoTimeToPrefetch[k]);
#endif
@@ -10950,8 +11393,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
dml2_printf("DML::%s: k=%0u VRatioY=%f\n", __func__, k, display_cfg->plane_descriptors[k].composition.scaler_info.plane0.v_ratio);
dml2_printf("DML::%s: k=%0u prefetch_vmrow_bw=%f\n", __func__, k, mode_lib->mp.prefetch_vmrow_bw[k]);
- dml2_printf("DML::%s: k=%0u ReadBandwidthSurfaceLuma=%f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%0u ReadBandwidthSurfaceChroma=%f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthChroma[k]);
+ dml2_printf("DML::%s: k=%0u vactive_sw_bw_l=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ dml2_printf("DML::%s: k=%0u vactive_sw_bw_c=%f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
dml2_printf("DML::%s: k=%0u cursor_bw=%f\n", __func__, k, mode_lib->mp.cursor_bw[k]);
dml2_printf("DML::%s: k=%0u dpte_row_bw=%f\n", __func__, k, mode_lib->mp.dpte_row_bw[k]);
dml2_printf("DML::%s: k=%0u meta_row_bw=%f\n", __func__, k, mode_lib->mp.meta_row_bw[k]);
@@ -10982,8 +11425,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->mp.mall_prefetch_sdp_overhead_factor;
calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->mp.mall_prefetch_dram_overhead_factor;
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->mp.SurfaceReadBandwidthLuma;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.SurfaceReadBandwidthChroma;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->mp.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->mp.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->mp.RequiredPrefetchPixelDataBWChroma;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->mp.excess_vactive_fill_bw_l;
@@ -11114,8 +11557,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->mall_prefetch_sdp_overhead_factor = mode_lib->mp.mall_prefetch_sdp_overhead_factor;
calculate_peak_bandwidth_params->mall_prefetch_dram_overhead_factor = mode_lib->mp.mall_prefetch_dram_overhead_factor;
- calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->mp.SurfaceReadBandwidthLuma;
- calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.SurfaceReadBandwidthChroma;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_l = mode_lib->mp.vactive_sw_bw_l;
+ calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->mp.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->mp.RequiredPrefetchPixelDataBWChroma;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->mp.excess_vactive_fill_bw_l;
@@ -11232,8 +11675,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
s->mmSOCParameters.USRRetrainingLatency = 0;
s->mmSOCParameters.SMNLatency = 0;
s->mmSOCParameters.g6_temp_read_blackout_us = get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->mp.uclk_freq_mhz * 1000), in_out_params->min_clk_index);
- s->mmSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->ms.uclk_freq_mhz, mode_lib->ms.FabricClock, in_out_params->min_clk_index);
- s->mmSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->ms.FabricClock;
+ s->mmSOCParameters.max_urgent_latency_us = get_max_urgent_latency_us(&mode_lib->soc.qos_parameters.qos_params.dcn4x, mode_lib->mp.uclk_freq_mhz, mode_lib->mp.FabricClock, in_out_params->min_clk_index);
+ s->mmSOCParameters.df_response_time_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.df_qos_response_time_fclk_cycles / mode_lib->mp.FabricClock;
s->mmSOCParameters.qos_type = mode_lib->soc.qos_parameters.qos_type;
CalculateWatermarks_params->display_cfg = display_cfg;
@@ -11283,7 +11726,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(&mode_lib->scratch, CalculateWatermarks_params);
for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
+ if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.active_writebacks_per_stream > 0) {
mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k] = math_max2(0, mode_lib->mp.VStartupMin[k] * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000) - mode_lib->mp.Watermark.WritebackDRAMClockChangeWatermark);
mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k] = math_max2(0, mode_lib->mp.VStartupMin[k] * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total /
@@ -11469,25 +11912,25 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
//Maximum Bandwidth Used
s->TotalWRBandwidth = 0;
- s->WRBandwidth = 0;
- for (k = 0; k < s->num_active_planes; ++k) {
- if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.pixel_format == dml2_444_32) {
- s->WRBandwidth = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width /
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 4;
- } else if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true) {
- s->WRBandwidth = display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_height * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.output_width /
- (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.h_total * display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.scaling_info.input_height / ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000)) * 8;
+ for (k = 0; k < display_cfg->num_streams; ++k) {
+ s->WRBandwidth = 0;
+ if (display_cfg->stream_descriptors[k].writeback.active_writebacks_per_stream > 0) {
+ s->WRBandwidth = display_cfg->stream_descriptors[k].writeback.writeback_stream[0].output_height
+ * display_cfg->stream_descriptors[k].writeback.writeback_stream[0].output_width /
+ (display_cfg->stream_descriptors[k].timing.h_total * display_cfg->stream_descriptors[k].writeback.writeback_stream[0].input_height
+ / ((double)display_cfg->stream_descriptors[k].timing.pixel_clock_khz / 1000))
+ * (display_cfg->stream_descriptors[k].writeback.writeback_stream[0].pixel_format == dml2_444_32 ? 4.0 : 8.0);
+ s->TotalWRBandwidth = s->TotalWRBandwidth + s->WRBandwidth;
}
- s->TotalWRBandwidth = s->TotalWRBandwidth + s->WRBandwidth;
}
mode_lib->mp.TotalDataReadBandwidth = 0;
for (k = 0; k < s->num_active_planes; ++k) {
- mode_lib->mp.TotalDataReadBandwidth = mode_lib->mp.TotalDataReadBandwidth + mode_lib->mp.SurfaceReadBandwidthLuma[k] + mode_lib->mp.SurfaceReadBandwidthChroma[k];
+ mode_lib->mp.TotalDataReadBandwidth = mode_lib->mp.TotalDataReadBandwidth + mode_lib->mp.vactive_sw_bw_l[k] + mode_lib->mp.vactive_sw_bw_c[k];
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: k=%u, TotalDataReadBandwidth = %f\n", __func__, k, mode_lib->mp.TotalDataReadBandwidth);
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceLuma = %f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthLuma[k]);
- dml2_printf("DML::%s: k=%u, ReadBandwidthSurfaceChroma = %f\n", __func__, k, mode_lib->mp.SurfaceReadBandwidthChroma[k]);
+ dml2_printf("DML::%s: k=%u, vactive_sw_bw_l = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_l[k]);
+ dml2_printf("DML::%s: k=%u, vactive_sw_bw_c = %f\n", __func__, k, mode_lib->mp.vactive_sw_bw_c[k]);
#endif
}
@@ -11524,8 +11967,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculateStutterEfficiency_params->BlockWidth256BytesC = mode_lib->mp.Read256BlockWidthC;
CalculateStutterEfficiency_params->DCCYMaxUncompressedBlock = mode_lib->mp.DCCYMaxUncompressedBlock;
CalculateStutterEfficiency_params->DCCCMaxUncompressedBlock = mode_lib->mp.DCCCMaxUncompressedBlock;
- CalculateStutterEfficiency_params->ReadBandwidthSurfaceLuma = mode_lib->mp.SurfaceReadBandwidthLuma;
- CalculateStutterEfficiency_params->ReadBandwidthSurfaceChroma = mode_lib->mp.SurfaceReadBandwidthChroma;
+ CalculateStutterEfficiency_params->ReadBandwidthSurfaceLuma = mode_lib->mp.vactive_sw_bw_l;
+ CalculateStutterEfficiency_params->ReadBandwidthSurfaceChroma = mode_lib->mp.vactive_sw_bw_c;
CalculateStutterEfficiency_params->dpte_row_bw = mode_lib->mp.dpte_row_bw;
CalculateStutterEfficiency_params->meta_row_bw = mode_lib->mp.meta_row_bw;
CalculateStutterEfficiency_params->rob_alloc_compressed = mode_lib->ip.dcn_mrq_present;
@@ -11736,7 +12179,7 @@ static void rq_dlg_get_wm_regs(const struct dml2_display_cfg *display_cfg, const
wm_regs->fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
wm_regs->sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
wm_regs->sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
- wm_regs->temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.g6_temp_read_watermark_us * refclk_freq_in_mhz);
+ wm_regs->temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
wm_regs->uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
wm_regs->urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
wm_regs->usr = (int unsigned)(mode_lib->mp.Watermark.USRRetrainingWatermark * refclk_freq_in_mhz);
@@ -12236,6 +12679,8 @@ static void rq_dlg_get_dlg_reg(
static void rq_dlg_get_arb_params(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_arb_regs *arb_param)
{
+ double refclk_freq_in_mhz = (display_cfg->overrides.hw.dlg_ref_clk_mhz > 0) ? (double)display_cfg->overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;
+
arb_param->max_req_outstanding = mode_lib->soc.max_outstanding_reqs;
arb_param->min_req_outstanding = mode_lib->soc.max_outstanding_reqs; // turn off the sat level feature if this set to max
arb_param->sdpif_request_rate_limit = (3 * mode_lib->ip.words_per_channel * mode_lib->soc.clk_table.dram_config.channel_count) / 4;
@@ -12247,6 +12692,7 @@ static void rq_dlg_get_arb_params(const struct dml2_display_cfg *display_cfg, co
arb_param->compbuf_size = mode_lib->mp.CompressedBufferSizeInkByte / mode_lib->ip.compressed_buffer_segment_size_in_kbytes;
arb_param->allow_sdpif_rate_limit_when_cstate_req = dml_get_hw_debug5(mode_lib);
arb_param->dcfclk_deep_sleep_hysteresis = dml_get_dcfclk_deep_sleep_hysteresis(mode_lib);
+ arb_param->pstate_stall_threshold = (unsigned int)(mode_lib->ip_caps.fams2.max_allow_delay_us * refclk_freq_in_mhz);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: max_req_outstanding = %d\n", __func__, arb_param->max_req_outstanding);
@@ -12312,14 +12758,18 @@ void dml2_core_calcs_get_global_fams2_programming(const struct dml2_core_interna
void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib,
const struct display_configuation_with_meta *display_cfg,
- struct dmub_fams2_stream_static_state *fams2_programming,
- enum dml2_uclk_pstate_support_method pstate_method,
+ union dmub_cmd_fams2_config *fams2_base_programming,
+ union dmub_cmd_fams2_config *fams2_sub_programming,
+ enum dml2_pstate_method pstate_method,
int plane_index)
{
const struct dml2_plane_parameters *plane_descriptor = &display_cfg->display_config.plane_descriptors[plane_index];
const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[plane_descriptor->stream_index];
const struct dml2_fams2_meta *stream_fams2_meta = &display_cfg->stage3.stream_fams2_meta[plane_descriptor->stream_index];
+ struct dmub_fams2_cmd_stream_static_base_state *base_programming = &fams2_base_programming->stream_v1.base;
+ union dmub_fams2_cmd_stream_static_sub_state *sub_programming = &fams2_sub_programming->stream_v1.sub_state;
+
unsigned int i;
if (display_cfg->display_config.overrides.all_streams_blanked) {
@@ -12328,110 +12778,110 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
}
/* from display configuration */
- fams2_programming->htotal = (uint16_t)stream_descriptor->timing.h_total;
- fams2_programming->vtotal = (uint16_t)stream_descriptor->timing.v_total;
- fams2_programming->vblank_start = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->htotal = (uint16_t)stream_descriptor->timing.h_total;
+ base_programming->vtotal = (uint16_t)stream_descriptor->timing.v_total;
+ base_programming->vblank_start = (uint16_t)(stream_fams2_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch);
- fams2_programming->vblank_end = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->vblank_end = (uint16_t)(stream_fams2_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch -
stream_descriptor->timing.v_active);
- fams2_programming->config.bits.is_drr = stream_descriptor->timing.drr_config.enabled;
+ base_programming->config.bits.is_drr = stream_descriptor->timing.drr_config.enabled;
/* from meta */
- fams2_programming->otg_vline_time_ns =
+ base_programming->otg_vline_time_ns =
(unsigned int)(stream_fams2_meta->otg_vline_time_us * 1000.0);
- fams2_programming->scheduling_delay_otg_vlines = (uint8_t)stream_fams2_meta->scheduling_delay_otg_vlines;
- fams2_programming->contention_delay_otg_vlines = (uint8_t)stream_fams2_meta->contention_delay_otg_vlines;
- fams2_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines;
- fams2_programming->drr_keepout_otg_vline = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_fams2_meta->scheduling_delay_otg_vlines;
+ base_programming->contention_delay_otg_vlines = (uint8_t)stream_fams2_meta->contention_delay_otg_vlines;
+ base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines;
+ base_programming->drr_keepout_otg_vline = (uint16_t)(stream_fams2_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch -
stream_fams2_meta->method_drr.programming_delay_otg_vlines);
- fams2_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_fams2_meta->allow_to_target_delay_otg_vlines;
- fams2_programming->max_vtotal = (uint16_t)stream_fams2_meta->max_vtotal;
+ base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_fams2_meta->allow_to_target_delay_otg_vlines;
+ base_programming->max_vtotal = (uint16_t)stream_fams2_meta->max_vtotal;
/* from core */
- fams2_programming->config.bits.min_ttu_vblank_usable = true;
+ base_programming->config.bits.min_ttu_vblank_usable = true;
for (i = 0; i < display_cfg->display_config.num_planes; i++) {
/* check if all planes support p-state in blank */
if (display_cfg->display_config.plane_descriptors[i].stream_index == plane_descriptor->stream_index &&
mode_lib->mp.MinTTUVBlank[i] <= mode_lib->mp.Watermark.DRAMClockChangeWatermark) {
- fams2_programming->config.bits.min_ttu_vblank_usable = false;
+ base_programming->config.bits.min_ttu_vblank_usable = false;
break;
}
}
switch (pstate_method) {
- case dml2_uclk_pstate_support_method_vactive:
- case dml2_uclk_pstate_support_method_fw_vactive_drr:
+ case dml2_pstate_method_vactive:
+ case dml2_pstate_method_fw_vactive_drr:
/* legacy vactive */
- fams2_programming->type = FAMS2_STREAM_TYPE_VACTIVE;
- fams2_programming->sub_state.legacy.vactive_det_fill_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
- fams2_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_vactive.common.allow_start_otg_vline;
- fams2_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_vactive.common.allow_end_otg_vline;
- fams2_programming->config.bits.clamp_vtotal_min = true;
+ base_programming->type = FAMS2_STREAM_TYPE_VACTIVE;
+ sub_programming->legacy.vactive_det_fill_delay_otg_vlines =
+ (uint8_t)stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
+ base_programming->allow_start_otg_vline =
+ (uint16_t)stream_fams2_meta->method_vactive.common.allow_start_otg_vline;
+ base_programming->allow_end_otg_vline =
+ (uint16_t)stream_fams2_meta->method_vactive.common.allow_end_otg_vline;
+ base_programming->config.bits.clamp_vtotal_min = true;
break;
- case dml2_uclk_pstate_support_method_vblank:
- case dml2_uclk_pstate_support_method_fw_vblank_drr:
+ case dml2_pstate_method_vblank:
+ case dml2_pstate_method_fw_vblank_drr:
/* legacy vblank */
- fams2_programming->type = FAMS2_STREAM_TYPE_VBLANK;
- fams2_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_vblank.common.allow_start_otg_vline;
- fams2_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_vblank.common.allow_end_otg_vline;
- fams2_programming->config.bits.clamp_vtotal_min = true;
+ base_programming->type = FAMS2_STREAM_TYPE_VBLANK;
+ base_programming->allow_start_otg_vline =
+ (uint16_t)stream_fams2_meta->method_vblank.common.allow_start_otg_vline;
+ base_programming->allow_end_otg_vline =
+ (uint16_t)stream_fams2_meta->method_vblank.common.allow_end_otg_vline;
+ base_programming->config.bits.clamp_vtotal_min = true;
break;
- case dml2_uclk_pstate_support_method_fw_drr:
+ case dml2_pstate_method_fw_drr:
/* drr */
- fams2_programming->type = FAMS2_STREAM_TYPE_DRR;
- fams2_programming->sub_state.drr.programming_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_drr.programming_delay_otg_vlines;
- fams2_programming->sub_state.drr.nom_stretched_vtotal =
- (uint16_t)stream_fams2_meta->method_drr.stretched_vtotal;
- fams2_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_drr.common.allow_start_otg_vline;
- fams2_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_drr.common.allow_end_otg_vline;
+ base_programming->type = FAMS2_STREAM_TYPE_DRR;
+ sub_programming->drr.programming_delay_otg_vlines =
+ (uint8_t)stream_fams2_meta->method_drr.programming_delay_otg_vlines;
+ sub_programming->drr.nom_stretched_vtotal =
+ (uint16_t)stream_fams2_meta->method_drr.stretched_vtotal;
+ base_programming->allow_start_otg_vline =
+ (uint16_t)stream_fams2_meta->method_drr.common.allow_start_otg_vline;
+ base_programming->allow_end_otg_vline =
+ (uint16_t)stream_fams2_meta->method_drr.common.allow_end_otg_vline;
/* drr only clamps to vtotal min for single display */
- fams2_programming->config.bits.clamp_vtotal_min = display_cfg->display_config.num_streams == 1;
- fams2_programming->sub_state.drr.only_stretch_if_required = true;
+ base_programming->config.bits.clamp_vtotal_min = display_cfg->display_config.num_streams == 1;
+ sub_programming->drr.only_stretch_if_required = true;
break;
- case dml2_uclk_pstate_support_method_fw_subvp_phantom:
- case dml2_uclk_pstate_support_method_fw_subvp_phantom_drr:
+ case dml2_pstate_method_fw_svp:
+ case dml2_pstate_method_fw_svp_drr:
/* subvp */
- fams2_programming->type = FAMS2_STREAM_TYPE_SUBVP;
- fams2_programming->sub_state.subvp.vratio_numerator =
- (uint16_t)(plane_descriptor->composition.scaler_info.plane0.v_ratio * 1000.0);
- fams2_programming->sub_state.subvp.vratio_denominator = 1000;
- fams2_programming->sub_state.subvp.programming_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_subvp.programming_delay_otg_vlines;
- fams2_programming->sub_state.subvp.prefetch_to_mall_otg_vlines =
- (uint8_t)stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines;
- fams2_programming->sub_state.subvp.phantom_vtotal =
- (uint16_t)stream_fams2_meta->method_subvp.phantom_vtotal;
- fams2_programming->sub_state.subvp.phantom_vactive =
- (uint16_t)stream_fams2_meta->method_subvp.phantom_vactive;
- fams2_programming->sub_state.subvp.config.bits.is_multi_planar =
- plane_descriptor->surface.plane1.height > 0;
- fams2_programming->sub_state.subvp.config.bits.is_yuv420 =
- plane_descriptor->pixel_format == dml2_420_8 ||
- plane_descriptor->pixel_format == dml2_420_10 ||
- plane_descriptor->pixel_format == dml2_420_12;
-
- fams2_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_subvp.common.allow_start_otg_vline;
- fams2_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_subvp.common.allow_end_otg_vline;
- fams2_programming->config.bits.clamp_vtotal_min = true;
+ base_programming->type = FAMS2_STREAM_TYPE_SUBVP;
+ sub_programming->subvp.vratio_numerator =
+ (uint16_t)(plane_descriptor->composition.scaler_info.plane0.v_ratio * 1000.0);
+ sub_programming->subvp.vratio_denominator = 1000;
+ sub_programming->subvp.programming_delay_otg_vlines =
+ (uint8_t)stream_fams2_meta->method_subvp.programming_delay_otg_vlines;
+ sub_programming->subvp.prefetch_to_mall_otg_vlines =
+ (uint8_t)stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines;
+ sub_programming->subvp.phantom_vtotal =
+ (uint16_t)stream_fams2_meta->method_subvp.phantom_vtotal;
+ sub_programming->subvp.phantom_vactive =
+ (uint16_t)stream_fams2_meta->method_subvp.phantom_vactive;
+ sub_programming->subvp.config.bits.is_multi_planar =
+ plane_descriptor->surface.plane1.height > 0;
+ sub_programming->subvp.config.bits.is_yuv420 =
+ plane_descriptor->pixel_format == dml2_420_8 ||
+ plane_descriptor->pixel_format == dml2_420_10 ||
+ plane_descriptor->pixel_format == dml2_420_12;
+
+ base_programming->allow_start_otg_vline =
+ (uint16_t)stream_fams2_meta->method_subvp.common.allow_start_otg_vline;
+ base_programming->allow_end_otg_vline =
+ (uint16_t)stream_fams2_meta->method_subvp.common.allow_end_otg_vline;
+ base_programming->config.bits.clamp_vtotal_min = true;
break;
- case dml2_uclk_pstate_support_method_reserved_hw:
- case dml2_uclk_pstate_support_method_reserved_fw:
- case dml2_uclk_pstate_support_method_reserved_fw_drr_fixed:
- case dml2_uclk_pstate_support_method_reserved_fw_drr_var:
- case dml2_uclk_pstate_support_method_not_supported:
- case dml2_uclk_pstate_support_method_count:
+ case dml2_pstate_method_reserved_hw:
+ case dml2_pstate_method_reserved_fw:
+ case dml2_pstate_method_reserved_fw_drr_clamped:
+ case dml2_pstate_method_reserved_fw_drr_var:
+ case dml2_pstate_method_na:
+ case dml2_pstate_method_count:
default:
/* this should never happen */
break;
@@ -12560,6 +13010,8 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.InvalidCombinationOfMALLUseForPState = mode_lib->ms.support.InvalidCombinationOfMALLUseForPState;
out->informative.mode_support_info.ExceededMALLSize = mode_lib->ms.support.ExceededMALLSize;
out->informative.mode_support_info.EnoughWritebackUnits = mode_lib->ms.support.EnoughWritebackUnits;
+ out->informative.mode_support_info.temp_read_or_ppt_support = mode_lib->ms.support.temp_read_or_ppt_support;
+ out->informative.mode_support_info.g6_temp_read_support = mode_lib->ms.support.g6_temp_read_support;
out->informative.mode_support_info.ExceededMultistreamSlots = mode_lib->ms.support.ExceededMultistreamSlots;
out->informative.mode_support_info.NotEnoughDSCUnits = mode_lib->ms.support.NotEnoughDSCUnits;
@@ -12653,7 +13105,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.watermarks.pstate_change_us = dml_get_wm_dram_clock_change(mode_lib);
out->informative.watermarks.fclk_pstate_change_us = dml_get_wm_fclk_change(mode_lib);
out->informative.watermarks.usr_retraining_us = dml_get_wm_usr_retraining(mode_lib);
- out->informative.watermarks.g6_temp_read_watermark_us = dml_get_wm_g6_temp_read(mode_lib);
+ out->informative.watermarks.temp_read_or_ppt_watermark_us = dml_get_wm_temp_read_or_ppt(mode_lib);
out->informative.mall.total_surface_size_in_mall_bytes = 0;
for (k = 0; k < out->display_config.num_planes; ++k)
@@ -12736,6 +13188,8 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.qos.max_active_fclk_change_latency_supported = dml_get_fclk_change_latency(mode_lib);
+ out->informative.misc.LowestPrefetchMargin = 10 * 1000 * 1000;
+
for (k = 0; k < out->display_config.num_planes; k++) {
if ((out->display_config.plane_descriptors->overrides.reserved_vblank_time_ns >= 1000.0 * mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us)
@@ -12815,6 +13269,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.misc.DisplayPipeLineDeliveryTimeLumaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[k];
out->informative.misc.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[k];
+ out->informative.misc.WritebackRequiredBandwidth = mode_lib->scratch.dml_core_mode_programming_locals.TotalWRBandwidth / 1000.0;
out->informative.misc.WritebackAllowDRAMClockChangeEndPosition[k] = mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k];
out->informative.misc.WritebackAllowFCLKChangeEndPosition[k] = mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k];
out->informative.misc.DSCCLK_calculated[k] = mode_lib->mp.DSCCLK[k];
@@ -12822,6 +13277,9 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.misc.PTE_BUFFER_MODE[k] = mode_lib->mp.PTE_BUFFER_MODE[k];
out->informative.misc.DSCDelay[k] = mode_lib->mp.DSCDelay[k];
out->informative.misc.MaxActiveDRAMClockChangeLatencySupported[k] = mode_lib->mp.MaxActiveDRAMClockChangeLatencySupported[k];
+
+ if (mode_lib->mp.impacted_prefetch_margin_us[k] < out->informative.misc.LowestPrefetchMargin)
+ out->informative.misc.LowestPrefetchMargin = mode_lib->mp.impacted_prefetch_margin_us[k];
}
// For this DV informative layer, all pipes in the same planes will just use the same id
@@ -12844,16 +13302,11 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.non_optimized_mcache_allocation[k].global_mcache_ids_plane1[n] = k;
}
}
-
- out->informative.qos.max_non_urgent_latency_us = mode_lib->soc.qos_parameters.qos_params.dcn4x.per_uclk_dpm_params[mode_lib->mp.qos_param_index].maximum_latency_when_non_urgent_uclk_cycles
- / mode_lib->mp.uclk_freq_mhz * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.umc_max_latency_margin / 100.0)
- + mode_lib->soc.qos_parameters.qos_params.dcn4x.mall_overhead_fclk_cycles / mode_lib->mp.FabricClock
- + mode_lib->soc.qos_parameters.qos_params.dcn4x.max_round_trip_to_furthest_cs_fclk_cycles / mode_lib->mp.FabricClock
- * (1 + mode_lib->soc.qos_parameters.qos_params.dcn4x.fabric_max_transport_latency_margin / 100.0);
+ out->informative.qos.max_non_urgent_latency_us = dml_get_max_non_urgent_latency_us(mode_lib);
if (mode_lib->soc.qos_parameters.qos_type == dml2_qos_param_type_dcn4x) {
if (((mode_lib->ip.rob_buffer_size_kbytes - mode_lib->ip.pixel_chunk_size_kbytes) * 1024
- / mode_lib->mp.non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= out->informative.qos.max_non_urgent_latency_us) {
+ / mode_lib->ms.support.non_urg_bandwidth_required[dml2_core_internal_soc_state_sys_active][dml2_core_internal_bw_sdp]) >= out->informative.qos.max_non_urgent_latency_us) {
out->informative.misc.ROBUrgencyAvoidance = true;
} else {
out->informative.misc.ROBUrgencyAvoidance = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
index df2d1550a14b..27ef0e096b25 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
@@ -28,7 +28,7 @@ void dml2_core_calcs_get_plane_support_info(const struct dml2_display_cfg *displ
void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mode_lib *mode_lib, struct dml2_display_cfg_programming *out);
void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_stream_support_info *out, int plane_index);
void dml2_core_calcs_get_mall_allocation(struct dml2_core_internal_display_mode_lib *mode_lib, unsigned int *out, int pipe_index);
-void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, const struct display_configuation_with_meta *display_cfg, struct dmub_fams2_stream_static_state *fams2_programming, enum dml2_uclk_pstate_support_method pstate_method, int plane_index);
+void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, const struct display_configuation_with_meta *display_cfg, union dmub_cmd_fams2_config *fams2_base_programming, union dmub_cmd_fams2_config *fams2_sub_programming, enum dml2_pstate_method pstate_method, int plane_index);
void dml2_core_calcs_get_global_fams2_programming(const struct dml2_core_internal_display_mode_lib *mode_lib, const struct display_configuation_with_meta *display_cfg, struct dmub_cmd_fams2_global_config *fams2_global_config);
void dml2_core_calcs_get_dpte_row_height(unsigned int *dpte_row_height, struct dml2_core_internal_display_mode_lib *mode_lib, bool is_plane1, enum dml2_source_format_class SourcePixelFormat, enum dml2_swizzle_mode SurfaceTiling, enum dml2_rotation_angle ScanDirection, unsigned int pitch, unsigned int GPUVMMinPageSizeKBytes);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
index cbdfbd5a0bde..23c0fca5515f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -201,7 +201,7 @@ struct dml2_core_internal_watermarks {
double Z8StutterExitWatermark;
double Z8StutterEnterPlusExitWatermark;
double USRRetrainingWatermark;
- double g6_temp_read_watermark_us;
+ double temp_read_or_ppt_watermark_us;
};
struct dml2_core_internal_mode_support_info {
@@ -252,8 +252,8 @@ struct dml2_core_internal_mode_support_info {
bool PTEBufferSizeNotExceeded;
bool DCCMetaBufferSizeNotExceeded;
- enum dml2_dram_clock_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
- enum dml2_fclock_change_support FCLKChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES];
bool global_dram_clock_change_supported;
bool global_fclk_change_supported;
bool USRRetrainingSupport;
@@ -318,12 +318,15 @@ struct dml2_core_internal_mode_support_info {
bool avg_bandwidth_support_ok[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max];
double max_urgent_latency_us;
+ double max_non_urgent_latency_us;
double avg_non_urgent_latency_us;
double avg_urgent_latency_us;
+ double df_response_time_us;
bool incorrect_imall_usage;
bool g6_temp_read_support;
+ bool temp_read_or_ppt_support;
struct dml2_core_internal_watermarks watermarks;
};
@@ -378,8 +381,8 @@ struct dml2_core_internal_mode_support {
unsigned int DETBufferSizeC[DML2_MAX_PLANES];
unsigned int SwathHeightY[DML2_MAX_PLANES];
unsigned int SwathHeightC[DML2_MAX_PLANES];
- unsigned int SwathWidthY[DML2_MAX_PLANES];
- unsigned int SwathWidthC[DML2_MAX_PLANES];
+ unsigned int SwathWidthY[DML2_MAX_PLANES]; // per-pipe
+ unsigned int SwathWidthC[DML2_MAX_PLANES]; // per-pipe
// ----------------------------------
// Intermediates/Informational
@@ -476,9 +479,9 @@ struct dml2_core_internal_mode_support {
// Bandwidth Related Info
double BandwidthAvailableForImmediateFlip;
- double SurfaceReadBandwidthLuma[DML2_MAX_PLANES]; // no dcc overhead, for the plane
- double SurfaceReadBandwidthChroma[DML2_MAX_PLANES];
- double WriteBandwidth[DML2_MAX_PLANES];
+ double vactive_sw_bw_l[DML2_MAX_PLANES]; // no dcc overhead, for the plane
+ double vactive_sw_bw_c[DML2_MAX_PLANES];
+ double WriteBandwidth[DML2_MAX_PLANES][DML2_MAX_WRITEBACK];
double RequiredPrefetchPixelDataBWLuma[DML2_MAX_PLANES];
double RequiredPrefetchPixelDataBWChroma[DML2_MAX_PLANES];
double cursor_bw[DML2_MAX_PLANES];
@@ -539,7 +542,7 @@ struct dml2_core_internal_mode_program {
unsigned int qos_param_index; // to access the uclk dependent dpm table
unsigned int active_min_uclk_dpm_index; // to access the min_clk table
double FabricClock; /// <brief Basically just the clock freq at the min (or given) state
- double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
+ //double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting
double dram_bw_mbps;
double uclk_freq_mhz;
unsigned int NoOfDPP[DML2_MAX_PLANES];
@@ -562,14 +565,14 @@ struct dml2_core_internal_mode_program {
double BytePerPixelInDETC[DML2_MAX_PLANES];
unsigned int BytePerPixelY[DML2_MAX_PLANES];
unsigned int BytePerPixelC[DML2_MAX_PLANES];
- unsigned int SwathWidthY[DML2_MAX_PLANES];
- unsigned int SwathWidthC[DML2_MAX_PLANES];
+ unsigned int SwathWidthY[DML2_MAX_PLANES]; // per-pipe
+ unsigned int SwathWidthC[DML2_MAX_PLANES]; // per-pipe
unsigned int req_per_swath_ub_l[DML2_MAX_PLANES];
unsigned int req_per_swath_ub_c[DML2_MAX_PLANES];
unsigned int SwathWidthSingleDPPY[DML2_MAX_PLANES];
unsigned int SwathWidthSingleDPPC[DML2_MAX_PLANES];
- double SurfaceReadBandwidthLuma[DML2_MAX_PLANES];
- double SurfaceReadBandwidthChroma[DML2_MAX_PLANES];
+ double vactive_sw_bw_l[DML2_MAX_PLANES];
+ double vactive_sw_bw_c[DML2_MAX_PLANES];
double excess_vactive_fill_bw_l[DML2_MAX_PLANES];
double excess_vactive_fill_bw_c[DML2_MAX_PLANES];
@@ -797,8 +800,9 @@ struct dml2_core_internal_mode_program {
double MaxActiveFCLKChangeLatencySupported;
bool USRRetrainingSupport;
bool g6_temp_read_support;
- enum dml2_fclock_change_support FCLKChangeSupport[DML2_MAX_PLANES];
- enum dml2_dram_clock_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
+ bool temp_read_or_ppt_support;
+ enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
bool global_dram_clock_change_supported;
bool global_fclk_change_supported;
double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES];
@@ -846,6 +850,8 @@ struct dml2_core_internal_mode_program {
bool mall_comb_mcache_l[DML2_MAX_PLANES];
bool mall_comb_mcache_c[DML2_MAX_PLANES];
bool lc_comb_mcache[DML2_MAX_PLANES];
+
+ double impacted_prefetch_margin_us[DML2_MAX_PLANES];
};
struct dml2_core_internal_SOCParametersList {
@@ -862,6 +868,7 @@ struct dml2_core_internal_SOCParametersList {
double USRRetrainingLatency;
double SMNLatency;
double g6_temp_read_blackout_us;
+ double temp_read_or_ppt_blackout_us;
double max_urgent_latency_us;
double df_response_time_us;
enum dml2_qos_param_type qos_type;
@@ -951,6 +958,7 @@ struct dml2_core_calcs_mode_support_locals {
unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES];
double tdlut_opt_time[DML2_MAX_PLANES];
double tdlut_drain_time[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_to_deliver[DML2_MAX_PLANES];
unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES];
unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
@@ -961,6 +969,18 @@ struct dml2_core_calcs_mode_support_locals {
unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+
+ double prefetch_sw_bytes[DML2_MAX_PLANES];
+ double Tpre_rounded[DML2_MAX_PLANES];
+ double Tpre_oto[DML2_MAX_PLANES];
+ bool recalc_prefetch_schedule;
+ bool recalc_prefetch_done;
+ double impacted_dst_y_pre[DML2_MAX_PLANES];
+ double line_times[DML2_MAX_PLANES];
+ enum dml2_source_format_class pixel_format[DML2_MAX_PLANES];
+ unsigned int lb_source_lines_l[DML2_MAX_PLANES];
+ unsigned int lb_source_lines_c[DML2_MAX_PLANES];
+ double prefetch_swath_time_us[DML2_MAX_PLANES];
};
struct dml2_core_calcs_mode_programming_locals {
@@ -1024,6 +1044,7 @@ struct dml2_core_calcs_mode_programming_locals {
unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES];
double tdlut_opt_time[DML2_MAX_PLANES];
double tdlut_drain_time[DML2_MAX_PLANES];
+ unsigned int tdlut_bytes_to_deliver[DML2_MAX_PLANES];
unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES];
unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
@@ -1041,6 +1062,16 @@ struct dml2_core_calcs_mode_programming_locals {
unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+
+ double prefetch_sw_bytes[DML2_MAX_PLANES];
+ double Tpre_rounded[DML2_MAX_PLANES];
+ double Tpre_oto[DML2_MAX_PLANES];
+ bool recalc_prefetch_schedule;
+ double impacted_dst_y_pre[DML2_MAX_PLANES];
+ double line_times[DML2_MAX_PLANES];
+ enum dml2_source_format_class pixel_format[DML2_MAX_PLANES];
+ unsigned int lb_source_lines_l[DML2_MAX_PLANES];
+ unsigned int lb_source_lines_c[DML2_MAX_PLANES];
};
struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals {
@@ -1048,6 +1079,7 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_local
double ActiveFCLKChangeLatencyMargin[DML2_MAX_PLANES];
double USRRetrainingLatencyMargin[DML2_MAX_PLANES];
double g6_temp_read_latency_margin[DML2_MAX_PLANES];
+ double temp_read_or_ppt_latency_margin[DML2_MAX_PLANES];
double EffectiveLBLatencyHidingY;
double EffectiveLBLatencyHidingC;
@@ -1185,17 +1217,14 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_locals {
double LineTime;
double dst_y_prefetch_equ;
double prefetch_bw_oto;
+ double per_pipe_vactive_sw_bw;
double Tvm_oto;
double Tr0_oto;
- double Tvm_no_trip_oto;
- double Tr0_no_trip_oto;
double Tvm_oto_lines;
double Tr0_oto_lines;
double dst_y_prefetch_oto;
double TimeForFetchingVM;
double TimeForFetchingRowInVBlank;
- double dst_y_per_vm_no_trip_vblank;
- double dst_y_per_row_no_trip_vblank;
double LinesToRequestPrefetchPixelData;
unsigned int HostVMDynamicLevelsTrips;
double trip_to_mem;
@@ -1203,15 +1232,12 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_locals {
double Tr0_trips_rounded;
double max_Tsw;
double Lsw_oto;
- double Lsw_equ;
- double Tpre_rounded;
double prefetch_bw_equ;
double Tvm_equ;
double Tr0_equ;
double Tdmbf;
double Tdmec;
double Tdmsks;
- double prefetch_sw_bytes;
double total_row_bytes;
double prefetch_bw_pr;
double bytes_pp;
@@ -1225,6 +1251,7 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_locals {
double prefetch_bw2;
double prefetch_bw3;
double prefetch_bw4;
+ double dst_y_prefetch_equ_impacted;
double TWait_p;
unsigned int cursor_prefetch_bytes;
@@ -1545,17 +1572,18 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_param
// Output
struct dml2_core_internal_watermarks *Watermark;
- enum dml2_dram_clock_change_support *DRAMClockChangeSupport;
+ enum dml2_pstate_change_support *DRAMClockChangeSupport;
bool *global_dram_clock_change_supported;
double *MaxActiveDRAMClockChangeLatencySupported;
unsigned int *SubViewportLinesNeededInMALL;
- enum dml2_fclock_change_support *FCLKChangeSupport;
+ enum dml2_pstate_change_support *FCLKChangeSupport;
bool *global_fclk_change_supported;
double *MaxActiveFCLKChangeLatencySupported;
bool *USRRetrainingSupport;
double *VActiveLatencyHidingMargin;
double *VActiveLatencyHidingUs;
bool *g6_temp_read_support;
+ bool *temp_read_or_ppt_support;
};
@@ -1727,8 +1755,8 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
double PrefetchSourceLinesC;
unsigned int VInitPreFillC;
unsigned int MaxNumSwathC;
- unsigned int swath_width_luma_ub;
- unsigned int swath_width_chroma_ub;
+ unsigned int swath_width_luma_ub; // per-pipe
+ unsigned int swath_width_chroma_ub; // per-pipe
unsigned int SwathHeightY;
unsigned int SwathHeightC;
double TWait;
@@ -1750,6 +1778,10 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
unsigned int meta_row_bytes;
double mall_prefetch_sdp_overhead_factor;
+ double impacted_dst_y_pre;
+ double vactive_sw_bw_l; // per surface bw
+ double vactive_sw_bw_c; // per surface bw
+
// output
unsigned int *DSTXAfterScaler;
unsigned int *DSTYAfterScaler;
@@ -1767,6 +1799,8 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
double *Tdmdl_vm;
double *Tdmdl;
double *TSetup;
+ double *Tpre_rounded;
+ double *Tpre_oto;
double *Tvm_trips;
double *Tr0_trips;
double *Tvm_trips_flip;
@@ -1777,6 +1811,48 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
unsigned int *VUpdateWidthPix;
unsigned int *VReadyOffsetPix;
double *prefetch_cursor_bw;
+ double *prefetch_sw_bytes;
+ double *prefetch_swath_time_us;
+};
+
+struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params {
+ unsigned int num_active_planes;
+ enum dml2_source_format_class *pixel_format;
+ unsigned int rob_buffer_size_kbytes;
+ unsigned int compressed_buffer_size_kbytes;
+ unsigned int chunk_bytes_l; // same for all planes
+ unsigned int chunk_bytes_c;
+ unsigned int *detile_buffer_size_bytes_l;
+ unsigned int *detile_buffer_size_bytes_c;
+ unsigned int *full_swath_bytes_l;
+ unsigned int *full_swath_bytes_c;
+ unsigned int *lb_source_lines_l;
+ unsigned int *lb_source_lines_c;
+ unsigned int *swath_height_l;
+ unsigned int *swath_height_c;
+ double *prefetch_sw_bytes;
+ double *Tpre_rounded;
+ double *Tpre_oto;
+ double estimated_dcfclk_mhz;
+ double estimated_urg_bandwidth_required_mbps;
+ double *line_time;
+ double *dst_y_prefetch;
+
+ // output
+ bool *recalc_prefetch_schedule;
+ double *impacted_dst_y_pre;
+};
+
+struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_locals {
+ unsigned int max_Trpd_dcfclk_cycles;
+ unsigned int burst_bytes_to_fill_det;
+ double time_to_fill_det_us;
+ unsigned int accumulated_return_path_dcfclk_cycles[DML2_MAX_PLANES];
+ bool prefetch_global_check_passed;
+ unsigned int src_swath_bytes_l[DML2_MAX_PLANES];
+ unsigned int src_swath_bytes_c[DML2_MAX_PLANES];
+ unsigned int src_detile_buf_size_bytes_l[DML2_MAX_PLANES];
+ unsigned int src_detile_buf_size_bytes_c[DML2_MAX_PLANES];
};
struct dml2_core_calcs_calculate_mcache_row_bytes_params {
@@ -1921,6 +1997,7 @@ struct dml2_core_calcs_calculate_tdlut_setting_params {
unsigned int *tdlut_groups_per_2row_ub;
double *tdlut_opt_time;
double *tdlut_drain_time;
+ unsigned int *tdlut_bytes_to_deliver;
unsigned int *tdlut_bytes_per_group;
};
@@ -2004,6 +2081,7 @@ struct dml2_core_internal_scratch {
struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_locals;
struct dml2_core_calcs_CalculateVMRowAndSwath_locals CalculateVMRowAndSwath_locals;
struct dml2_core_calcs_CalculatePrefetchSchedule_locals CalculatePrefetchSchedule_locals;
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_locals CheckGlobalPrefetchAdmissibility_locals;
struct dml2_core_shared_CalculateSwathAndDETConfiguration_locals CalculateSwathAndDETConfiguration_locals;
struct dml2_core_shared_TruncToValidBPP_locals TruncToValidBPP_locals;
struct dml2_core_shared_CalculateDETBufferSize_locals CalculateDETBufferSize_locals;
@@ -2019,6 +2097,7 @@ struct dml2_core_internal_scratch {
struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params CalculateSwathAndDETConfiguration_params;
struct dml2_core_calcs_CalculateStutterEfficiency_params CalculateStutterEfficiency_params;
struct dml2_core_calcs_CalculatePrefetchSchedule_params CalculatePrefetchSchedule_params;
+ struct dml2_core_calcs_CheckGlobalPrefetchAdmissibility_params CheckGlobalPrefetchAdmissibility_params;
struct dml2_core_calcs_calculate_mcache_setting_params calculate_mcache_setting_params;
struct dml2_core_calcs_calculate_tdlut_setting_params calculate_tdlut_setting_params;
struct dml2_core_shared_calculate_vm_and_row_bytes_params calculate_vm_and_row_bytes_params;
@@ -2038,7 +2117,6 @@ struct dml2_core_internal_display_mode_lib {
// Used to hold input; intermediate and output of the calculations
struct dml2_core_internal_mode_support ms; // struct for mode support
struct dml2_core_internal_mode_program mp; // struct for mode programming
-
// Available overridable calculators for core_shared.
// if null, core_shared will use default calculators.
struct dml2_core_shared_calculation_funcs funcs;
@@ -2051,7 +2129,6 @@ struct dml2_core_calcs_mode_support_ex {
const struct dml2_display_cfg *in_display_cfg;
const struct dml2_mcg_min_clock_table *min_clk_table;
int min_clk_index;
-
//unsigned int in_state_index;
struct dml2_core_internal_mode_support_info *out_evaluation_info;
};
@@ -2064,9 +2141,7 @@ struct dml2_core_calcs_mode_programming_ex {
const struct dml2_mcg_min_clock_table *min_clk_table;
const struct core_display_cfg_support_info *cfg_support_info;
int min_clk_index;
-
struct dml2_display_cfg_programming *programming;
-
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
index ab229e1598ae..456b3f8a6d38 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
@@ -63,6 +63,150 @@ bool dml2_core_utils_is_420(enum dml2_source_format_class source_format)
case dml2_mono_16:
val = 0;
break;
+ case dml2_422_planar_8:
+ val = 0;
+ break;
+ case dml2_422_planar_10:
+ val = 0;
+ break;
+ case dml2_422_planar_12:
+ val = 0;
+ break;
+ case dml2_422_packed_8:
+ val = 0;
+ break;
+ case dml2_422_packed_10:
+ val = 0;
+ break;
+ case dml2_422_packed_12:
+ val = 0;
+ break;
+ default:
+ DML2_ASSERT(0);
+ break;
+ }
+ return val;
+}
+
+bool dml2_core_utils_is_422_planar(enum dml2_source_format_class source_format)
+{
+ bool val = false;
+
+ switch (source_format) {
+ case dml2_444_8:
+ val = 0;
+ break;
+ case dml2_444_16:
+ val = 0;
+ break;
+ case dml2_444_32:
+ val = 0;
+ break;
+ case dml2_444_64:
+ val = 0;
+ break;
+ case dml2_420_8:
+ val = 0;
+ break;
+ case dml2_420_10:
+ val = 0;
+ break;
+ case dml2_420_12:
+ val = 0;
+ break;
+ case dml2_rgbe_alpha:
+ val = 0;
+ break;
+ case dml2_rgbe:
+ val = 0;
+ break;
+ case dml2_mono_8:
+ val = 0;
+ break;
+ case dml2_mono_16:
+ val = 0;
+ break;
+ case dml2_422_planar_8:
+ val = 1;
+ break;
+ case dml2_422_planar_10:
+ val = 1;
+ break;
+ case dml2_422_planar_12:
+ val = 1;
+ break;
+ case dml2_422_packed_8:
+ val = 0;
+ break;
+ case dml2_422_packed_10:
+ val = 0;
+ break;
+ case dml2_422_packed_12:
+ val = 0;
+ break;
+ default:
+ DML2_ASSERT(0);
+ break;
+ }
+ return val;
+}
+
+bool dml2_core_utils_is_422_packed(enum dml2_source_format_class source_format)
+{
+ bool val = false;
+
+ switch (source_format) {
+ case dml2_444_8:
+ val = 0;
+ break;
+ case dml2_444_16:
+ val = 0;
+ break;
+ case dml2_444_32:
+ val = 0;
+ break;
+ case dml2_444_64:
+ val = 0;
+ break;
+ case dml2_420_8:
+ val = 0;
+ break;
+ case dml2_420_10:
+ val = 0;
+ break;
+ case dml2_420_12:
+ val = 0;
+ break;
+ case dml2_rgbe_alpha:
+ val = 0;
+ break;
+ case dml2_rgbe:
+ val = 0;
+ break;
+ case dml2_mono_8:
+ val = 0;
+ break;
+ case dml2_mono_16:
+ val = 0;
+ break;
+ case dml2_422_planar_8:
+ val = 0;
+ break;
+ case dml2_422_planar_10:
+ val = 0;
+ break;
+ case dml2_422_planar_12:
+ val = 0;
+ break;
+ case dml2_422_packed_8:
+ val = 1;
+ break;
+ case dml2_422_packed_10:
+ val = 1;
+ break;
+ case dml2_422_packed_12:
+ val = 1;
+ break;
default:
DML2_ASSERT(0);
break;
@@ -154,9 +298,9 @@ void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mod
dml2_printf("DML: support: DynamicMetadataSupported = %d\n", support->DynamicMetadataSupported);
if (!fail_only || support->VRatioInPrefetchSupported == 0)
dml2_printf("DML: support: VRatioInPrefetchSupported = %d\n", support->VRatioInPrefetchSupported);
- if (!fail_only || support->PTEBufferSizeNotExceeded == 1)
+ if (!fail_only || support->PTEBufferSizeNotExceeded == 0)
dml2_printf("DML: support: PTEBufferSizeNotExceeded = %d\n", support->PTEBufferSizeNotExceeded);
- if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 1)
+ if (!fail_only || support->DCCMetaBufferSizeNotExceeded == 0)
dml2_printf("DML: support: DCCMetaBufferSizeNotExceeded = %d\n", support->DCCMetaBufferSizeNotExceeded);
if (!fail_only || support->ExceededMALLSize == 1)
dml2_printf("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
@@ -280,39 +424,49 @@ bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_c
return is_phantom;
}
-unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode)
-{
- switch (sw_mode) {
- case (dml2_sw_linear):
- return 256; break;
- case (dml2_sw_256b_2d):
- return 256; break;
- case (dml2_sw_4kb_2d):
- return 4096; break;
- case (dml2_sw_64kb_2d):
- return 65536; break;
- case (dml2_sw_256kb_2d):
- return 262144; break;
- case (dml2_gfx11_sw_linear):
- return 256; break;
- case (dml2_gfx11_sw_64kb_d):
- return 65536; break;
- case (dml2_gfx11_sw_64kb_d_t):
- return 65536; break;
- case (dml2_gfx11_sw_64kb_d_x):
- return 65536; break;
- case (dml2_gfx11_sw_64kb_r_x):
- return 65536; break;
- case (dml2_gfx11_sw_256kb_d_x):
- return 262144; break;
- case (dml2_gfx11_sw_256kb_r_x):
- return 262144; break;
- default:
+unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel)
+{
+
+ if (sw_mode == dml2_sw_linear)
+ return 256;
+ else if (sw_mode == dml2_sw_256b_2d)
+ return 256;
+ else if (sw_mode == dml2_sw_4kb_2d)
+ return 4096;
+ else if (sw_mode == dml2_sw_64kb_2d)
+ return 65536;
+ else if (sw_mode == dml2_sw_256kb_2d)
+ return 262144;
+ else if (sw_mode == dml2_gfx11_sw_linear)
+ return 256;
+ else if (sw_mode == dml2_gfx11_sw_64kb_d)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_64kb_d_t)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_64kb_d_x)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_64kb_r_x)
+ return 65536;
+ else if (sw_mode == dml2_gfx11_sw_256kb_d_x)
+ return 262144;
+ else if (sw_mode == dml2_gfx11_sw_256kb_r_x)
+ return 262144;
+ else {
DML2_ASSERT(0);
return 256;
};
}
+bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel)
+{
+ return (byte_per_pixel != 2);
+}
+
+bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode)
+{
+ return (sw_mode == dml2_sw_linear || sw_mode == dml2_sw_linear_256b || sw_mode == dml2_linear_64elements);
+};
+
bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan)
{
@@ -325,7 +479,6 @@ bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan)
return is_vert;
}
-
int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode)
{
int unsigned version = 0;
@@ -334,17 +487,17 @@ int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode)
sw_mode == dml2_sw_256b_2d ||
sw_mode == dml2_sw_4kb_2d ||
sw_mode == dml2_sw_64kb_2d ||
- sw_mode == dml2_sw_256kb_2d) {
+ sw_mode == dml2_sw_256kb_2d)
version = 12;
- } else if (sw_mode == dml2_gfx11_sw_linear ||
+ else if (sw_mode == dml2_gfx11_sw_linear ||
sw_mode == dml2_gfx11_sw_64kb_d ||
sw_mode == dml2_gfx11_sw_64kb_d_t ||
sw_mode == dml2_gfx11_sw_64kb_d_x ||
sw_mode == dml2_gfx11_sw_64kb_r_x ||
sw_mode == dml2_gfx11_sw_256kb_d_x ||
- sw_mode == dml2_gfx11_sw_256kb_r_x) {
+ sw_mode == dml2_gfx11_sw_256kb_r_x)
version = 11;
- } else {
+ else {
dml2_printf("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
DML2_ASSERT(0);
}
@@ -403,7 +556,7 @@ bool dml2_core_utils_is_dual_plane(enum dml2_source_format_class source_format)
{
bool ret_val = 0;
- if ((source_format == dml2_420_12) || (source_format == dml2_420_8) || (source_format == dml2_420_10) || (source_format == dml2_rgbe_alpha))
+ if (dml2_core_utils_is_420(source_format) || dml2_core_utils_is_422_planar(source_format) || (source_format == dml2_rgbe_alpha))
ret_val = 1;
return ret_val;
@@ -425,6 +578,7 @@ static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters
phantom->timing.v_total = meta->v_total;
phantom->timing.v_active = meta->v_active;
phantom->timing.v_front_porch = meta->v_front_porch;
+ phantom->timing.v_blank_end = phantom->timing.v_total - phantom->timing.v_front_porch - phantom->timing.v_active;
phantom->timing.vblank_nom = phantom->timing.v_total - phantom->timing.v_active;
phantom->timing.drr_config.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h
index a5cc6a07167a..95f0d017add4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h
@@ -11,6 +11,8 @@
double dml2_core_utils_div_rem(double dividend, unsigned int divisor, unsigned int *remainder);
const char *dml2_core_utils_internal_bw_type_str(enum dml2_core_internal_bw_type bw_type);
bool dml2_core_utils_is_420(enum dml2_source_format_class source_format);
+bool dml2_core_utils_is_422_planar(enum dml2_source_format_class source_format);
+bool dml2_core_utils_is_422_packed(enum dml2_source_format_class source_format);
void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mode_support_info *support, bool fail_only);
const char *dml2_core_utils_internal_soc_state_type_str(enum dml2_core_internal_soc_state_type dml2_core_internal_soc_state_type);
void dml2_core_utils_get_stream_output_bpp(double *out_bpp, const struct dml2_display_cfg *display_cfg);
@@ -18,8 +20,10 @@ unsigned int dml2_core_utils_round_to_multiple(unsigned int num, unsigned int mu
unsigned int dml2_core_util_get_num_active_pipes(int unsigned num_planes, const struct core_display_cfg_support_info *cfg_support_info);
void dml2_core_utils_pipe_plane_mapping(const struct core_display_cfg_support_info *cfg_support_info, unsigned int *pipe_plane);
bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_cfg);
-unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode);
+unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel);
+bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel);
bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan);
+bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode);
int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode);
unsigned int dml2_core_utils_get_qos_param_index(unsigned long uclk_freq_khz, const struct dml2_dcn4_uclk_dpm_dependent_qos_params *per_uclk_dpm_params);
unsigned int dml2_core_utils_get_active_min_uclk_dpm_index(unsigned long uclk_freq_khz, const struct dml2_soc_state_table *clk_table);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index 8869ea089312..fc77fb34a19a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
@@ -96,6 +96,7 @@ static void calculate_svp_prefetch_minimums(struct dml2_dpmm_map_mode_to_soc_dpm
double min_uclk_latency;
const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result;
+ /* assumes DF throttling is enabled */
min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.average_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config);
min_uclk_avg = (double)min_uclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.dcn_mall_prefetch_average.dram_derate_percent_pixel / 100);
@@ -125,6 +126,37 @@ static void calculate_svp_prefetch_minimums(struct dml2_dpmm_map_mode_to_soc_dpm
in_out->programming->min_clocks.dcn4x.svp_prefetch.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency);
in_out->programming->min_clocks.dcn4x.svp_prefetch.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency);
in_out->programming->min_clocks.dcn4x.svp_prefetch.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency);
+
+ /* assumes DF throttling is disabled */
+ min_uclk_avg = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.average_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config);
+ min_uclk_avg = (double)min_uclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dram_derate_percent_pixel / 100);
+
+ min_uclk_urgent = dram_bw_kbps_to_uclk_khz(mode_support_result->global.svp_prefetch.urgent_bw_dram_kbps, &in_out->soc_bb->clk_table.dram_config);
+ min_uclk_urgent = (double)min_uclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dram_derate_percent_pixel / 100);
+
+ min_uclk_bw = min_uclk_urgent > min_uclk_avg ? min_uclk_urgent : min_uclk_avg;
+
+ min_fclk_avg = (double)mode_support_result->global.svp_prefetch.average_bw_sdp_kbps / in_out->soc_bb->fabric_datapath_to_dcn_data_return_bytes;
+ min_fclk_avg = (double)min_fclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.fclk_derate_percent / 100);
+
+ min_fclk_urgent = (double)mode_support_result->global.svp_prefetch.urgent_bw_sdp_kbps / in_out->soc_bb->fabric_datapath_to_dcn_data_return_bytes;
+ min_fclk_urgent = (double)min_fclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.fclk_derate_percent / 100);
+
+ min_fclk_bw = min_fclk_urgent > min_fclk_avg ? min_fclk_urgent : min_fclk_avg;
+
+ min_dcfclk_avg = (double)mode_support_result->global.svp_prefetch.average_bw_sdp_kbps / in_out->soc_bb->return_bus_width_bytes;
+ min_dcfclk_avg = (double)min_dcfclk_avg / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_average.dcfclk_derate_percent / 100);
+
+ min_dcfclk_urgent = (double)mode_support_result->global.svp_prefetch.urgent_bw_sdp_kbps / in_out->soc_bb->return_bus_width_bytes;
+ min_dcfclk_urgent = (double)min_dcfclk_urgent / ((double)in_out->soc_bb->qos_parameters.derate_table.system_active_urgent.dcfclk_derate_percent / 100);
+
+ min_dcfclk_bw = min_dcfclk_urgent > min_dcfclk_avg ? min_dcfclk_urgent : min_dcfclk_avg;
+
+ get_minimum_clocks_for_latency(in_out, &min_uclk_latency, &min_fclk_latency, &min_dcfclk_latency);
+
+ in_out->programming->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz = dml_round_up(min_uclk_bw > min_uclk_latency ? min_uclk_bw : min_uclk_latency);
+ in_out->programming->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz = dml_round_up(min_fclk_bw > min_fclk_latency ? min_fclk_bw : min_fclk_latency);
+ in_out->programming->min_clocks.dcn4x.svp_prefetch_no_throttle.dcfclk_khz = dml_round_up(min_dcfclk_bw > min_dcfclk_latency ? min_dcfclk_bw : min_dcfclk_latency);
}
static void calculate_idle_minimums(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out)
@@ -272,6 +304,17 @@ static bool map_soc_min_clocks_to_dpm_fine_grained(struct dml2_display_cfg_progr
if (result)
result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.idle.uclk_khz, &state_table->uclk);
+ /* these clocks are optional, so they can fail to map, in which case map all to 0 */
+ if (result) {
+ if (!round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.dcfclk_khz, &state_table->dcfclk) ||
+ !round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz, &state_table->fclk) ||
+ !round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz, &state_table->uclk)) {
+ display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.dcfclk_khz = 0;
+ display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz = 0;
+ display_cfg->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz = 0;
+ }
+ }
+
return result;
}
@@ -374,11 +417,11 @@ static bool map_min_clocks_to_dpm(const struct dml2_core_mode_support_result *mo
static bool are_timings_trivially_synchronizable(struct dml2_display_cfg *display_config, int mask)
{
- unsigned char i;
+ unsigned int i;
bool identical = true;
bool contains_drr = false;
- unsigned char remap_array[DML2_MAX_PLANES];
- unsigned char remap_array_size = 0;
+ unsigned int remap_array[DML2_MAX_PLANES];
+ unsigned int remap_array_size = 0;
// Create a remap array to enable simple iteration through only masked stream indicies
for (i = 0; i < display_config->num_streams; i++) {
@@ -413,10 +456,10 @@ static bool are_timings_trivially_synchronizable(struct dml2_display_cfg *displa
static int find_smallest_idle_time_in_vblank_us(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out, int mask)
{
- unsigned char i;
+ unsigned int i;
int min_idle_us = 0;
- unsigned char remap_array[DML2_MAX_PLANES];
- unsigned char remap_array_size = 0;
+ unsigned int remap_array[DML2_MAX_PLANES];
+ unsigned int remap_array_size = 0;
const struct dml2_core_mode_support_result *mode_support_result = &in_out->display_cfg->mode_support_result;
// Create a remap array to enable simple iteration through only masked stream indicies
@@ -711,7 +754,7 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
- dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.g6_temp_read_watermark_us * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].usr = (int unsigned)(mode_lib->mp.Watermark.USRRetrainingWatermark * refclk_freq_in_mhz);
@@ -725,7 +768,7 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
- dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.g6_temp_read_watermark_us * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B].usr = (int unsigned)(mode_lib->mp.Watermark.USRRetrainingWatermark * refclk_freq_in_mhz);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
index a31db5742675..e763c8e45da8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
@@ -195,11 +195,11 @@ static int count_planes_with_stream_index(const struct dml2_display_cfg *display
static bool are_timings_trivially_synchronizable(struct display_configuation_with_meta *display_config, int mask)
{
- unsigned char i;
+ unsigned int i;
bool identical = true;
bool contains_drr = false;
- unsigned char remap_array[DML2_MAX_PLANES];
- unsigned char remap_array_size = 0;
+ unsigned int remap_array[DML2_MAX_PLANES];
+ unsigned int remap_array_size = 0;
// Create a remap array to enable simple iteration through only masked stream indicies
for (i = 0; i < display_config->display_config.num_streams; i++) {
@@ -347,8 +347,12 @@ static int find_highest_odm_load_stream_index(
int odm_load, highest_odm_load = -1, highest_odm_load_index = -1;
for (i = 0; i < display_config->num_streams; i++) {
- odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
+ if (mode_support_result->cfg_support_info.stream_support_info[i].odms_used > 0)
+ odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
/ mode_support_result->cfg_support_info.stream_support_info[i].odms_used;
+ else
+ odm_load = 0;
+
if (odm_load > highest_odm_load) {
highest_odm_load_index = i;
highest_odm_load = odm_load;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index 1cf9015e854a..a3324f7b9ba6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -8,36 +8,37 @@
#include "dml2_pmo_dcn4_fams2.h"
static const double MIN_VACTIVE_MARGIN_PCT = 0.25; // We need more than non-zero margin because DET buffer granularity can alter vactive latency hiding
+static const double MIN_BLANK_STUTTER_FACTOR = 3.0;
static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = {
// VActive Preferred
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Then SVP
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Then VBlank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = false,
},
// Then DRR
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Finally VBlank, but allow base clocks for latency to increase
/*
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
*/
@@ -48,56 +49,56 @@ static const int base_strategy_list_1_display_size = sizeof(base_strategy_list_1
static const struct dml2_pmo_pstate_strategy base_strategy_list_2_display[] = {
// VActive only is preferred
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Then VActive + VBlank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = false,
},
// Then VBlank only
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = false,
},
// Then SVP + VBlank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = false,
},
// Then SVP + DRR
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Then SVP + SVP
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_fw_svp, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_fw_svp, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Then DRR + VActive
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Then DRR + DRR
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
// Finally VBlank, but allow base clocks for latency to increase
/*
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
.allow_state_increase = true,
},
*/
@@ -108,32 +109,32 @@ static const int base_strategy_list_2_display_size = sizeof(base_strategy_list_2
static const struct dml2_pmo_pstate_strategy base_strategy_list_3_display[] = {
// All VActive
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_na },
.allow_state_increase = true,
},
// VActive + 1 VBlank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vblank, dml2_pstate_method_na },
.allow_state_increase = false,
},
// All VBlank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na },
.allow_state_increase = false,
},
// All DRR
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_na },
.allow_state_increase = true,
},
// All VBlank, with state increase allowed
/*
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_na },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na },
.allow_state_increase = true,
},
*/
@@ -144,32 +145,32 @@ static const int base_strategy_list_3_display_size = sizeof(base_strategy_list_3
static const struct dml2_pmo_pstate_strategy base_strategy_list_4_display[] = {
// All VActive
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive },
.allow_state_increase = true,
},
// VActive + 1 VBlank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vactive, dml2_pmo_pstate_strategy_vblank },
+ .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vblank },
.allow_state_increase = false,
},
// All Vblank
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank },
.allow_state_increase = false,
},
// All DRR
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr, dml2_pmo_pstate_strategy_fw_drr },
+ .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr },
.allow_state_increase = true,
},
// All VBlank, with state increase allowed
/*
{
- .per_stream_pstate_method = { dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank, dml2_pmo_pstate_strategy_vblank },
+ .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank },
.allow_state_increase = true,
},
*/
@@ -354,29 +355,30 @@ bool pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_o
return result;
}
-static enum dml2_pmo_pstate_method convert_strategy_to_drr_variant(const enum dml2_pmo_pstate_method base_strategy)
+static enum dml2_pstate_method convert_strategy_to_drr_variant(const enum dml2_pstate_method base_strategy)
{
- enum dml2_pmo_pstate_method variant_strategy = 0;
+ enum dml2_pstate_method variant_strategy = 0;
switch (base_strategy) {
- case dml2_pmo_pstate_strategy_vactive:
- variant_strategy = dml2_pmo_pstate_strategy_fw_vactive_drr;
+ case dml2_pstate_method_vactive:
+ variant_strategy = dml2_pstate_method_fw_vactive_drr;
break;
- case dml2_pmo_pstate_strategy_vblank:
- variant_strategy = dml2_pmo_pstate_strategy_fw_vblank_drr;
+ case dml2_pstate_method_vblank:
+ variant_strategy = dml2_pstate_method_fw_vblank_drr;
break;
- case dml2_pmo_pstate_strategy_fw_svp:
- variant_strategy = dml2_pmo_pstate_strategy_fw_svp_drr;
+ case dml2_pstate_method_fw_svp:
+ variant_strategy = dml2_pstate_method_fw_svp_drr;
break;
- case dml2_pmo_pstate_strategy_fw_vactive_drr:
- case dml2_pmo_pstate_strategy_fw_vblank_drr:
- case dml2_pmo_pstate_strategy_fw_svp_drr:
- case dml2_pmo_pstate_strategy_fw_drr:
- case dml2_pmo_pstate_strategy_reserved_hw:
- case dml2_pmo_pstate_strategy_reserved_fw:
- case dml2_pmo_pstate_strategy_reserved_fw_drr_clamped:
- case dml2_pmo_pstate_strategy_reserved_fw_drr_var:
- case dml2_pmo_pstate_strategy_na:
+ case dml2_pstate_method_fw_vactive_drr:
+ case dml2_pstate_method_fw_vblank_drr:
+ case dml2_pstate_method_fw_svp_drr:
+ case dml2_pstate_method_fw_drr:
+ case dml2_pstate_method_reserved_hw:
+ case dml2_pstate_method_reserved_fw:
+ case dml2_pstate_method_reserved_fw_drr_clamped:
+ case dml2_pstate_method_reserved_fw_drr_var:
+ case dml2_pstate_method_count:
+ case dml2_pstate_method_na:
default:
/* no variant for this mode */
variant_strategy = base_strategy;
@@ -418,23 +420,22 @@ static unsigned int get_num_expanded_strategies(
static void insert_strategy_into_expanded_list(
const struct dml2_pmo_pstate_strategy *per_stream_pstate_strategy,
- int stream_count,
- struct dml2_pmo_init_data *init_data)
+ const int stream_count,
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list,
+ unsigned int *num_expanded_strategies)
{
- struct dml2_pmo_pstate_strategy *expanded_strategy_list = NULL;
-
- expanded_strategy_list = get_expanded_strategy_list(init_data, stream_count);
+ if (expanded_strategy_list && num_expanded_strategies) {
+ memcpy(&expanded_strategy_list[*num_expanded_strategies], per_stream_pstate_strategy, sizeof(struct dml2_pmo_pstate_strategy));
- if (expanded_strategy_list) {
- memcpy(&expanded_strategy_list[init_data->pmo_dcn4.num_expanded_strategies_per_list[stream_count - 1]], per_stream_pstate_strategy, sizeof(struct dml2_pmo_pstate_strategy));
-
- init_data->pmo_dcn4.num_expanded_strategies_per_list[stream_count - 1]++;
+ (*num_expanded_strategies)++;
}
}
-static void expand_base_strategy(struct dml2_pmo_instance *pmo,
+static void expand_base_strategy(
const struct dml2_pmo_pstate_strategy *base_strategy,
- unsigned int stream_count)
+ const unsigned int stream_count,
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list,
+ unsigned int *num_expanded_strategies)
{
bool skip_to_next_stream;
bool expanded_strategy_added;
@@ -472,7 +473,7 @@ static void expand_base_strategy(struct dml2_pmo_instance *pmo,
if (i >= stream_count - 1) {
/* insert into strategy list */
- insert_strategy_into_expanded_list(&cur_strategy_list, stream_count, &pmo->init_data);
+ insert_strategy_into_expanded_list(&cur_strategy_list, stream_count, expanded_strategy_list, num_expanded_strategies);
expanded_strategy_added = true;
} else {
/* skip to next stream */
@@ -511,9 +512,9 @@ static void expand_base_strategy(struct dml2_pmo_instance *pmo,
static bool is_variant_method_valid(const struct dml2_pmo_pstate_strategy *base_strategy,
const struct dml2_pmo_pstate_strategy *variant_strategy,
- unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS],
- unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS],
- unsigned int stream_count)
+ const unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS],
+ const unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS],
+ const unsigned int stream_count)
{
bool valid = true;
unsigned int i;
@@ -521,7 +522,7 @@ static bool is_variant_method_valid(const struct dml2_pmo_pstate_strategy *base_
/* check all restrictions are met */
for (i = 0; i < stream_count; i++) {
/* vblank + vblank_drr variants are invalid */
- if (base_strategy->per_stream_pstate_method[i] == dml2_pmo_pstate_strategy_vblank &&
+ if (base_strategy->per_stream_pstate_method[i] == dml2_pstate_method_vblank &&
((num_streams_per_base_method[i] > 0 && num_streams_per_variant_method[i] > 0) ||
num_streams_per_variant_method[i] > 1)) {
valid = false;
@@ -532,9 +533,12 @@ static bool is_variant_method_valid(const struct dml2_pmo_pstate_strategy *base_
return valid;
}
-static void expand_variant_strategy(struct dml2_pmo_instance *pmo,
+static void expand_variant_strategy(
const struct dml2_pmo_pstate_strategy *base_strategy,
- unsigned int stream_count)
+ const unsigned int stream_count,
+ const bool should_permute,
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list,
+ unsigned int *num_expanded_strategies)
{
bool variant_found;
unsigned int i, j;
@@ -543,7 +547,7 @@ static void expand_variant_strategy(struct dml2_pmo_instance *pmo,
unsigned int num_streams_per_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
- enum dml2_pmo_pstate_method per_stream_variant_method[DML2_MAX_PLANES];
+ enum dml2_pstate_method per_stream_variant_method[DML2_MAX_PLANES];
struct dml2_pmo_pstate_strategy variant_strategy = { 0 };
/* determine number of displays per method */
@@ -584,7 +588,13 @@ static void expand_variant_strategy(struct dml2_pmo_instance *pmo,
}
if (variant_found && is_variant_method_valid(base_strategy, &variant_strategy, num_streams_per_base_method, num_streams_per_variant_method, stream_count)) {
- expand_base_strategy(pmo, &variant_strategy, stream_count);
+ if (should_permute) {
+ /* permutations are permitted, proceed to expand */
+ expand_base_strategy(&variant_strategy, stream_count, expanded_strategy_list, num_expanded_strategies);
+ } else {
+ /* no permutations allowed, so add to list now */
+ insert_strategy_into_expanded_list(&variant_strategy, stream_count, expanded_strategy_list, num_expanded_strategies);
+ }
}
/* rollback to earliest method with bases remaining */
@@ -611,18 +621,19 @@ static void expand_variant_strategy(struct dml2_pmo_instance *pmo,
}
}
-static void expand_base_strategies(
- struct dml2_pmo_instance *pmo,
- const struct dml2_pmo_pstate_strategy *base_strategies_list,
- const unsigned int num_base_strategies,
- unsigned int stream_count)
+void pmo_dcn4_fams2_expand_base_pstate_strategies(
+ const struct dml2_pmo_pstate_strategy *base_strategies_list,
+ const unsigned int num_base_strategies,
+ const unsigned int stream_count,
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list,
+ unsigned int *num_expanded_strategies)
{
unsigned int i;
/* expand every explicit base strategy (except all DRR) */
for (i = 0; i < num_base_strategies; i++) {
- expand_base_strategy(pmo, &base_strategies_list[i], stream_count);
- expand_variant_strategy(pmo, &base_strategies_list[i], stream_count);
+ expand_base_strategy(&base_strategies_list[i], stream_count, expanded_strategy_list, num_expanded_strategies);
+ expand_variant_strategy(&base_strategies_list[i], stream_count, true, expanded_strategy_list, num_expanded_strategies);
}
}
@@ -651,25 +662,45 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
DML2_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
- expand_base_strategies(pmo, base_strategy_list_1_display, base_strategy_list_1_display_size, 1);
+ pmo_dcn4_fams2_expand_base_pstate_strategies(
+ base_strategy_list_1_display,
+ base_strategy_list_1_display_size,
+ i,
+ pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display,
+ &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 2:
DML2_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
- expand_base_strategies(pmo, base_strategy_list_2_display, base_strategy_list_2_display_size, 2);
+ pmo_dcn4_fams2_expand_base_pstate_strategies(
+ base_strategy_list_2_display,
+ base_strategy_list_2_display_size,
+ i,
+ pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display,
+ &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 3:
DML2_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
- expand_base_strategies(pmo, base_strategy_list_3_display, base_strategy_list_3_display_size, 3);
+ pmo_dcn4_fams2_expand_base_pstate_strategies(
+ base_strategy_list_3_display,
+ base_strategy_list_3_display_size,
+ i,
+ pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display,
+ &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
case 4:
DML2_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
/* populate list */
- expand_base_strategies(pmo, base_strategy_list_4_display, base_strategy_list_4_display_size, 4);
+ pmo_dcn4_fams2_expand_base_pstate_strategies(
+ base_strategy_list_4_display,
+ base_strategy_list_4_display_size,
+ i,
+ pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display,
+ &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
break;
}
}
@@ -782,8 +813,12 @@ static int find_highest_odm_load_stream_index(
int odm_load, highest_odm_load = -1, highest_odm_load_index = -1;
for (i = 0; i < display_config->num_streams; i++) {
- odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
+ if (mode_support_result->cfg_support_info.stream_support_info[i].odms_used > 0)
+ odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
/ mode_support_result->cfg_support_info.stream_support_info[i].odms_used;
+ else
+ odm_load = 0;
+
if (odm_load > highest_odm_load) {
highest_odm_load_index = i;
highest_odm_load = odm_load;
@@ -940,11 +975,8 @@ static void build_synchronized_timing_groups(
/* find synchronizable timing groups */
for (j = i + 1; j < display_config->display_config.num_streams; j++) {
if (memcmp(master_timing,
- &display_config->display_config.stream_descriptors[j].timing,
- sizeof(struct dml2_timing_cfg)) == 0 &&
- display_config->display_config.stream_descriptors[i].output.output_encoder == display_config->display_config.stream_descriptors[j].output.output_encoder &&
- (display_config->display_config.stream_descriptors[i].output.output_encoder != dml2_hdmi || //hdmi requires formats match
- display_config->display_config.stream_descriptors[i].output.output_format == display_config->display_config.stream_descriptors[j].output.output_format)) {
+ &display_config->display_config.stream_descriptors[j].timing,
+ sizeof(struct dml2_timing_cfg)) == 0) {
set_bit_in_bitfield(&pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], j);
set_bit_in_bitfield(&stream_mapped_mask, j);
}
@@ -958,7 +990,7 @@ static bool all_timings_support_vactive(const struct dml2_pmo_instance *pmo,
const struct display_configuation_with_meta *display_config,
unsigned int mask)
{
- unsigned char i;
+ unsigned int i;
bool valid = true;
// Create a remap array to enable simple iteration through only masked stream indicies
@@ -1007,7 +1039,7 @@ static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo,
const struct display_configuation_with_meta *display_config,
unsigned int mask)
{
- unsigned char i;
+ unsigned int i;
for (i = 0; i < DML2_MAX_PLANES; i++) {
const struct dml2_stream_parameters *stream_descriptor;
const struct dml2_fams2_meta *stream_fams2_meta;
@@ -1049,7 +1081,7 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
const struct dml2_plane_parameters *plane_descriptor;
const struct dml2_fams2_meta *stream_fams2_meta;
unsigned int microschedule_vlines;
- unsigned char i;
+ unsigned int i;
unsigned int num_planes_per_stream[DML2_MAX_PLANES] = { 0 };
@@ -1105,24 +1137,73 @@ static void insert_into_candidate_list(const struct dml2_pmo_pstate_strategy *ps
scratch->pmo_dcn4.num_pstate_candidates++;
}
-static bool all_planes_match_method(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pmo_pstate_method method)
+static enum dml2_pstate_method uclk_pstate_strategy_override_to_pstate_method(const enum dml2_uclk_pstate_change_strategy override_strategy)
{
- unsigned char i;
- enum dml2_uclk_pstate_change_strategy matching_strategy = (enum dml2_uclk_pstate_change_strategy) dml2_pmo_pstate_strategy_na;
+ enum dml2_pstate_method method = dml2_pstate_method_na;
+
+ switch (override_strategy) {
+ case dml2_uclk_pstate_change_strategy_force_vactive:
+ method = dml2_pstate_method_vactive;
+ break;
+ case dml2_uclk_pstate_change_strategy_force_vblank:
+ method = dml2_pstate_method_vblank;
+ break;
+ case dml2_uclk_pstate_change_strategy_force_drr:
+ method = dml2_pstate_method_fw_drr;
+ break;
+ case dml2_uclk_pstate_change_strategy_force_mall_svp:
+ method = dml2_pstate_method_fw_svp;
+ break;
+ case dml2_uclk_pstate_change_strategy_force_mall_full_frame:
+ case dml2_uclk_pstate_change_strategy_auto:
+ default:
+ method = dml2_pstate_method_na;
+ }
+
+ return method;
+}
+
+static enum dml2_uclk_pstate_change_strategy pstate_method_to_uclk_pstate_strategy_override(const enum dml2_pstate_method method)
+{
+ enum dml2_uclk_pstate_change_strategy override_strategy = dml2_uclk_pstate_change_strategy_auto;
+
+ switch (method) {
+ case dml2_pstate_method_vactive:
+ case dml2_pstate_method_fw_vactive_drr:
+ override_strategy = dml2_uclk_pstate_change_strategy_force_vactive;
+ break;
+ case dml2_pstate_method_vblank:
+ case dml2_pstate_method_fw_vblank_drr:
+ override_strategy = dml2_uclk_pstate_change_strategy_force_vblank;
+ break;
+ case dml2_pstate_method_fw_svp:
+ case dml2_pstate_method_fw_svp_drr:
+ override_strategy = dml2_uclk_pstate_change_strategy_force_mall_svp;
+ break;
+ case dml2_pstate_method_fw_drr:
+ override_strategy = dml2_uclk_pstate_change_strategy_force_drr;
+ break;
+ case dml2_pstate_method_reserved_hw:
+ case dml2_pstate_method_reserved_fw:
+ case dml2_pstate_method_reserved_fw_drr_clamped:
+ case dml2_pstate_method_reserved_fw_drr_var:
+ case dml2_pstate_method_count:
+ case dml2_pstate_method_na:
+ default:
+ override_strategy = dml2_uclk_pstate_change_strategy_auto;
+ }
+
+ return override_strategy;
+}
- if (method == dml2_pmo_pstate_strategy_vactive || method == dml2_pmo_pstate_strategy_fw_vactive_drr)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_vactive;
- else if (method == dml2_pmo_pstate_strategy_vblank || method == dml2_pmo_pstate_strategy_fw_vblank_drr)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_vblank;
- else if (method == dml2_pmo_pstate_strategy_fw_svp)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_mall_svp;
- else if (method == dml2_pmo_pstate_strategy_fw_drr)
- matching_strategy = dml2_uclk_pstate_change_strategy_force_drr;
+static bool all_planes_match_method(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pstate_method method)
+{
+ unsigned int i;
for (i = 0; i < DML2_MAX_PLANES; i++) {
if (is_bit_set_in_bitfield(plane_mask, i)) {
if (display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != dml2_uclk_pstate_change_strategy_auto &&
- display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != matching_strategy)
+ display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != pstate_method_to_uclk_pstate_strategy_override(method))
return false;
}
}
@@ -1148,32 +1229,33 @@ static void build_method_scheduling_params(
static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
struct dml2_pmo_instance *pmo,
- enum dml2_pmo_pstate_method stream_pstate_method,
+ enum dml2_pstate_method stream_pstate_method,
int stream_idx)
{
struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta = NULL;
switch (stream_pstate_method) {
- case dml2_pmo_pstate_strategy_vactive:
- case dml2_pmo_pstate_strategy_fw_vactive_drr:
+ case dml2_pstate_method_vactive:
+ case dml2_pstate_method_fw_vactive_drr:
stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vactive.common;
break;
- case dml2_pmo_pstate_strategy_vblank:
- case dml2_pmo_pstate_strategy_fw_vblank_drr:
+ case dml2_pstate_method_vblank:
+ case dml2_pstate_method_fw_vblank_drr:
stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vblank.common;
break;
- case dml2_pmo_pstate_strategy_fw_svp:
- case dml2_pmo_pstate_strategy_fw_svp_drr:
+ case dml2_pstate_method_fw_svp:
+ case dml2_pstate_method_fw_svp_drr:
stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_subvp.common;
break;
- case dml2_pmo_pstate_strategy_fw_drr:
+ case dml2_pstate_method_fw_drr:
stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_drr.common;
break;
- case dml2_pmo_pstate_strategy_reserved_hw:
- case dml2_pmo_pstate_strategy_reserved_fw:
- case dml2_pmo_pstate_strategy_reserved_fw_drr_clamped:
- case dml2_pmo_pstate_strategy_reserved_fw_drr_var:
- case dml2_pmo_pstate_strategy_na:
+ case dml2_pstate_method_reserved_hw:
+ case dml2_pstate_method_reserved_fw:
+ case dml2_pstate_method_reserved_fw_drr_clamped:
+ case dml2_pstate_method_reserved_fw_drr_var:
+ case dml2_pstate_method_count:
+ case dml2_pstate_method_na:
default:
stream_method_fams2_meta = NULL;
}
@@ -1214,7 +1296,7 @@ static bool is_timing_group_schedulable(
if (is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i)) {
stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i);
if (!stream_method_fams2_meta)
- return false;
+ continue;
if (group_fams2_meta->allow_start_otg_vline < stream_method_fams2_meta->allow_start_otg_vline) {
/* set group allow start to larger otg vline */
@@ -1294,7 +1376,7 @@ static bool is_config_schedulable(
if (j_disallow_us < jp1_disallow_us) {
/* swap as A < B */
swap(s->pmo_dcn4.sorted_group_gtl_disallow_index[j],
- s->pmo_dcn4.sorted_group_gtl_disallow_index[j+1]);
+ s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]);
swapped = true;
}
}
@@ -1353,7 +1435,7 @@ static bool is_config_schedulable(
if (j_period_us < jp1_period_us) {
/* swap as A < B */
swap(s->pmo_dcn4.sorted_group_gtl_period_index[j],
- s->pmo_dcn4.sorted_group_gtl_period_index[j+1]);
+ s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]);
swapped = true;
}
}
@@ -1412,7 +1494,7 @@ static bool is_config_schedulable(
static bool stream_matches_drr_policy(struct dml2_pmo_instance *pmo,
const struct display_configuation_with_meta *display_cfg,
- const enum dml2_pmo_pstate_method stream_pstate_method,
+ const enum dml2_pstate_method stream_pstate_method,
unsigned int stream_index)
{
const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[stream_index];
@@ -1467,7 +1549,7 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
{
struct dml2_pmo_scratch *s = &pmo->scratch;
- unsigned char stream_index = 0;
+ unsigned int stream_index = 0;
unsigned int svp_count = 0;
unsigned int svp_stream_mask = 0;
@@ -1493,19 +1575,19 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
strategy_matches_drr_requirements &=
stream_matches_drr_policy(pmo, display_cfg, pstate_strategy->per_stream_pstate_method[stream_index], stream_index);
- if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp ||
- pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
+ if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp ||
+ pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp_drr) {
svp_count++;
set_bit_in_bitfield(&svp_stream_mask, stream_index);
- } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
+ } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
drr_count++;
set_bit_in_bitfield(&drr_stream_mask, stream_index);
- } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vactive ||
- pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
+ } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive ||
+ pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
vactive_count++;
set_bit_in_bitfield(&vactive_stream_mask, stream_index);
- } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vblank ||
- pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
+ } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank ||
+ pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
vblank_count++;
set_bit_in_bitfield(&vblank_stream_mask, stream_index);
}
@@ -1531,7 +1613,7 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
static int get_vactive_pstate_margin(const struct display_configuation_with_meta *display_cfg, int plane_mask)
{
- unsigned char i;
+ unsigned int i;
int min_vactive_margin_us = 0xFFFFFFF;
for (i = 0; i < DML2_MAX_PLANES; i++) {
@@ -1624,7 +1706,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
/* for single stream, guarantee at least an instant of allow */
stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor(
math_max2(0.0,
- timing->v_active - stream_fams2_meta->min_allow_width_otg_vlines - stream_fams2_meta->dram_clk_change_blackout_otg_vlines));
+ timing->v_active - math_max2(1.0, stream_fams2_meta->min_allow_width_otg_vlines) - stream_fams2_meta->dram_clk_change_blackout_otg_vlines));
} else {
/* for multi stream, bound to a max fill time defined by IP caps */
stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines =
@@ -1737,8 +1819,10 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
struct display_configuation_with_meta *display_config;
const struct dml2_plane_parameters *plane_descriptor;
const struct dml2_pmo_pstate_strategy *strategy_list = NULL;
+ struct dml2_pmo_pstate_strategy override_base_strategy = { 0 };
unsigned int strategy_list_size = 0;
- unsigned char plane_index, stream_index, i;
+ unsigned int plane_index, stream_index, i;
+ bool build_override_strategy = true;
state->performed = true;
in_out->base_display_config->stage3.min_clk_index_for_latency = in_out->base_display_config->stage1.min_clk_index_for_latency;
@@ -1762,7 +1846,11 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
set_bit_in_bitfield(&s->pmo_dcn4.stream_plane_mask[plane_descriptor->stream_index], plane_index);
- state->pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vactive;
+ state->pstate_switch_modes[plane_index] = dml2_pstate_method_vactive;
+
+ build_override_strategy &= plane_descriptor->overrides.uclk_pstate_change_strategy != dml2_uclk_pstate_change_strategy_auto;
+ override_base_strategy.per_stream_pstate_method[plane_descriptor->stream_index] =
+ uclk_pstate_strategy_override_to_pstate_method(plane_descriptor->overrides.uclk_pstate_change_strategy);
}
// Figure out which streams can do vactive, and also build up implicit SVP and FAMS2 meta
@@ -1780,13 +1868,30 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
/* get synchronized timing groups */
build_synchronized_timing_groups(pmo, display_config);
- strategy_list = get_expanded_strategy_list(&pmo->init_data, display_config->display_config.num_streams);
- if (!strategy_list)
- return false;
-
- strategy_list_size = get_num_expanded_strategies(&pmo->init_data, display_config->display_config.num_streams);
+ if (build_override_strategy) {
+ /* build expanded override strategy list (no permutations) */
+ override_base_strategy.allow_state_increase = true;
+ s->pmo_dcn4.num_expanded_override_strategies = 0;
+ insert_strategy_into_expanded_list(&override_base_strategy,
+ display_config->display_config.num_streams,
+ s->pmo_dcn4.expanded_override_strategy_list,
+ &s->pmo_dcn4.num_expanded_override_strategies);
+ expand_variant_strategy(&override_base_strategy,
+ display_config->display_config.num_streams,
+ false,
+ s->pmo_dcn4.expanded_override_strategy_list,
+ &s->pmo_dcn4.num_expanded_override_strategies);
+
+ /* use override strategy list */
+ strategy_list = s->pmo_dcn4.expanded_override_strategy_list;
+ strategy_list_size = s->pmo_dcn4.num_expanded_override_strategies;
+ } else {
+ /* use predefined strategy list */
+ strategy_list = get_expanded_strategy_list(&pmo->init_data, display_config->display_config.num_streams);
+ strategy_list_size = get_num_expanded_strategies(&pmo->init_data, display_config->display_config.num_streams);
+ }
- if (strategy_list_size == 0)
+ if (!strategy_list || strategy_list_size == 0)
return false;
s->pmo_dcn4.num_pstate_candidates = 0;
@@ -1798,6 +1903,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
}
if (s->pmo_dcn4.num_pstate_candidates > 0) {
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.num_pstate_candidates-1].allow_state_increase = true;
s->pmo_dcn4.cur_pstate_candidate = -1;
return true;
} else {
@@ -1830,7 +1936,7 @@ static void reset_display_configuration(struct display_configuation_with_meta *d
// Reset strategy to auto
plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_auto;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_not_supported;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_na;
}
}
@@ -1838,7 +1944,7 @@ static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
struct dml2_plane_parameters *plane;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -1847,7 +1953,7 @@ static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *
plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_force_drr;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_drr;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_drr;
}
}
@@ -1859,13 +1965,13 @@ static void setup_planes_for_svp_by_mask(struct display_configuation_with_meta *
{
struct dml2_pmo_scratch *scratch = &pmo->scratch;
- unsigned char plane_index;
+ unsigned int plane_index;
int stream_index = -1;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
stream_index = (char)display_config->display_config.plane_descriptors[plane_index].stream_index;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_subvp_phantom;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_svp;
}
}
@@ -1882,13 +1988,13 @@ static void setup_planes_for_svp_drr_by_mask(struct display_configuation_with_me
{
struct dml2_pmo_scratch *scratch = &pmo->scratch;
- unsigned char plane_index;
+ unsigned int plane_index;
int stream_index = -1;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
stream_index = (char)display_config->display_config.plane_descriptors[plane_index].stream_index;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_subvp_phantom_drr;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_svp_drr;
}
}
@@ -1903,7 +2009,7 @@ static void setup_planes_for_vblank_by_mask(struct display_configuation_with_met
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
struct dml2_plane_parameters *plane;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -1913,7 +2019,7 @@ static void setup_planes_for_vblank_by_mask(struct display_configuation_with_met
plane->overrides.reserved_vblank_time_ns = (long)math_max2(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000.0,
plane->overrides.reserved_vblank_time_ns);
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vblank;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vblank;
}
}
@@ -1923,7 +2029,7 @@ static void setup_planes_for_vblank_drr_by_mask(struct display_configuation_with
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
struct dml2_plane_parameters *plane;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -1931,7 +2037,7 @@ static void setup_planes_for_vblank_drr_by_mask(struct display_configuation_with
plane = &display_config->display_config.plane_descriptors[plane_index];
plane->overrides.reserved_vblank_time_ns = (long)(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000);
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_vblank_drr;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vblank_drr;
}
}
}
@@ -1940,14 +2046,14 @@ static void setup_planes_for_vactive_by_mask(struct display_configuation_with_me
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
unsigned int stream_index;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
stream_index = display_config->display_config.plane_descriptors[plane_index].stream_index;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_vactive;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vactive;
if (!pmo->options->disable_vactive_det_fill_bw_pad) {
display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
@@ -1961,14 +2067,14 @@ static void setup_planes_for_vactive_drr_by_mask(struct display_configuation_wit
struct dml2_pmo_instance *pmo,
int plane_mask)
{
- unsigned char plane_index;
+ unsigned int plane_index;
unsigned int stream_index;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
stream_index = display_config->display_config.plane_descriptors[plane_index].stream_index;
- display_config->stage3.pstate_switch_modes[plane_index] = dml2_uclk_pstate_support_method_fw_vactive_drr;
+ display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vactive_drr;
if (!pmo->options->disable_vactive_det_fill_bw_pad) {
display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
@@ -1990,26 +2096,26 @@ static bool setup_display_config(struct display_configuation_with_meta *display_
for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
- if (pmo->scratch.pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_na) {
+ if (pmo->scratch.pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_na) {
success = false;
break;
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vactive) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive) {
setup_planes_for_vactive_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vblank) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank) {
setup_planes_for_vblank_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp) {
fams2_required = true;
setup_planes_for_svp_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
fams2_required = true;
setup_planes_for_vactive_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
fams2_required = true;
setup_planes_for_vblank_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp_drr) {
fams2_required = true;
setup_planes_for_svp_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
- } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
+ } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
fams2_required = true;
setup_planes_for_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
}
@@ -2029,7 +2135,7 @@ static bool setup_display_config(struct display_configuation_with_meta *display_
static int get_minimum_reserved_time_us_for_planes(struct display_configuation_with_meta *display_config, int plane_mask)
{
int min_time_us = 0xFFFFFF;
- unsigned char plane_index = 0;
+ unsigned int plane_index = 0;
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
@@ -2064,34 +2170,34 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp
for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) {
struct dml2_fams2_meta *stream_fams2_meta = &s->pmo_dcn4.stream_fams2_meta[stream_index];
- if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vactive ||
- s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vactive_drr) {
+ if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive ||
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) ||
get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_vblank ||
- s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_vblank_drr) {
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank ||
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
if (get_minimum_reserved_time_us_for_planes(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) <
REQUIRED_RESERVED_TIME ||
get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_VBLANK) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp ||
- s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_svp_drr) {
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp ||
+ s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp_drr) {
if (in_out->base_display_config->stage3.stream_svp_meta[stream_index].valid == false) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_fw_drr) {
- if (!all_planes_match_method(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pmo_pstate_strategy_fw_drr) ||
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
+ if (!all_planes_match_method(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pstate_method_fw_drr) ||
get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_DRR) {
p_state_supported = false;
break;
}
- } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pmo_pstate_strategy_na) {
+ } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_na) {
p_state_supported = false;
break;
}
@@ -2139,6 +2245,7 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
struct dml2_pmo_instance *pmo = in_out->instance;
bool stutter_period_meets_z8_eco = true;
bool z8_stutter_optimization_too_expensive = false;
+ bool stutter_optimization_too_expensive = false;
double line_time_us, vblank_nom_time_us;
unsigned int i;
@@ -2160,10 +2267,15 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
line_time_us = (double)in_out->base_display_config->display_config.stream_descriptors[i].timing.h_total / (in_out->base_display_config->display_config.stream_descriptors[i].timing.pixel_clock_khz * 1000) * 1000000;
vblank_nom_time_us = line_time_us * in_out->base_display_config->display_config.stream_descriptors[i].timing.vblank_nom;
- if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us) {
+ if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) {
z8_stutter_optimization_too_expensive = true;
break;
}
+
+ if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) {
+ stutter_optimization_too_expensive = true;
+ break;
+ }
}
pmo->scratch.pmo_dcn4.num_stutter_candidates = 0;
@@ -2179,7 +2291,7 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
pmo->scratch.pmo_dcn4.z8_vblank_optimizable = false;
}
- if (pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) {
+ if (!stutter_optimization_too_expensive && pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) {
pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.num_stutter_candidates] = (unsigned int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us;
pmo->scratch.pmo_dcn4.num_stutter_candidates++;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
index 0c25bd3e9ac0..6baab7ad6ecc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
@@ -23,4 +23,11 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
bool pmo_dcn4_fams2_test_for_stutter(struct dml2_pmo_test_for_stutter_in_out *in_out);
bool pmo_dcn4_fams2_optimize_for_stutter(struct dml2_pmo_optimize_for_stutter_in_out *in_out);
+void pmo_dcn4_fams2_expand_base_pstate_strategies(
+ const struct dml2_pmo_pstate_strategy *base_strategies_list,
+ const unsigned int num_base_strategies,
+ const unsigned int stream_count,
+ struct dml2_pmo_pstate_strategy *expanded_strategy_list,
+ unsigned int *num_expanded_strategies);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
index add51d41a515..7ed0242a4b33 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
@@ -72,7 +72,6 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *
out->init_for_stutter = pmo_dcn4_fams2_init_for_stutter;
out->test_for_stutter = pmo_dcn4_fams2_test_for_stutter;
out->optimize_for_stutter = pmo_dcn4_fams2_optimize_for_stutter;
-
result = true;
break;
case dml2_project_invalid:
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c
new file mode 100644
index 000000000000..f88931ccbc5e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "dml_top.h"
+#include "dml2_internal_shared_types.h"
+#include "dml2_top_soc15.h"
+
+unsigned int dml2_get_instance_size_bytes(void)
+{
+ return sizeof(struct dml2_instance);
+}
+
+bool dml2_initialize_instance(struct dml2_initialize_instance_in_out *in_out)
+{
+ switch (in_out->options.project_id) {
+ case dml2_project_dcn4x_stage1:
+ case dml2_project_dcn4x_stage2:
+ case dml2_project_dcn4x_stage2_auto_drr_svp:
+ return dml2_top_soc15_initialize_instance(in_out);
+ case dml2_project_invalid:
+ default:
+ return false;
+ }
+}
+
+bool dml2_check_mode_supported(struct dml2_check_mode_supported_in_out *in_out)
+{
+ if (!in_out->dml2_instance->funcs.check_mode_supported)
+ return false;
+
+ return in_out->dml2_instance->funcs.check_mode_supported(in_out);
+}
+
+bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_out)
+{
+ if (!in_out->dml2_instance->funcs.build_mode_programming)
+ return false;
+
+ return in_out->dml2_instance->funcs.build_mode_programming(in_out);
+}
+
+bool dml2_build_mcache_programming(struct dml2_build_mcache_programming_in_out *in_out)
+{
+ if (!in_out->dml2_instance->funcs.build_mcache_programming)
+ return false;
+
+ return in_out->dml2_instance->funcs.build_mcache_programming(in_out);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c
new file mode 100644
index 000000000000..5e14d85821e2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "dml2_top_legacy.h"
+#include "dml2_top_soc15.h"
+#include "dml2_core_factory.h"
+#include "dml2_pmo_factory.h"
+#include "display_mode_core_structs.h"
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h
new file mode 100644
index 000000000000..14d0ae03dce6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#ifndef __DML2_TOP_LEGACY_H__
+#define __DML2_TOP_LEGACY_H__
+#include "dml2_internal_shared_types.h"
+bool dml2_top_legacy_initialize_instance(struct dml2_initialize_instance_in_out *in_out);
+#endif /* __DML2_TOP_LEGACY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c
deleted file mode 100644
index d0e026d981b5..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c
+++ /dev/null
@@ -1,307 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#include "dml2_top_optimization.h"
-#include "dml2_internal_shared_types.h"
-#include "dml_top_mcache.h"
-
-static void copy_display_configuration_with_meta(struct display_configuation_with_meta *dst, const struct display_configuation_with_meta *src)
-{
- memcpy(dst, src, sizeof(struct display_configuation_with_meta));
-}
-
-bool dml2_top_optimization_init_function_min_clk_for_latency(const struct optimization_init_function_params *params)
-{
- struct dml2_optimization_stage1_state *state = &params->display_config->stage1;
-
- state->performed = true;
-
- return true;
-}
-
-bool dml2_top_optimization_test_function_min_clk_for_latency(const struct optimization_test_function_params *params)
-{
- struct dml2_optimization_stage1_state *state = &params->display_config->stage1;
-
- return state->min_clk_index_for_latency == 0;
-}
-
-bool dml2_top_optimization_optimize_function_min_clk_for_latency(const struct optimization_optimize_function_params *params)
-{
- bool result = false;
-
- if (params->display_config->stage1.min_clk_index_for_latency > 0) {
- copy_display_configuration_with_meta(params->optimized_display_config, params->display_config);
- params->optimized_display_config->stage1.min_clk_index_for_latency--;
- result = true;
- }
-
- return result;
-}
-
-bool dml2_top_optimization_test_function_mcache(const struct optimization_test_function_params *params)
-{
- struct dml2_optimization_test_function_locals *l = params->locals;
- bool mcache_success = false;
- bool result = false;
-
- memset(l, 0, sizeof(struct dml2_optimization_test_function_locals));
-
- l->test_mcache.calc_mcache_count_params.dml2_instance = params->dml;
- l->test_mcache.calc_mcache_count_params.display_config = &params->display_config->display_config;
- l->test_mcache.calc_mcache_count_params.mcache_allocations = params->display_config->stage2.mcache_allocations;
-
- result = dml2_top_mcache_calc_mcache_count_and_offsets(&l->test_mcache.calc_mcache_count_params); // use core to get the basic mcache_allocations
-
- if (result) {
- l->test_mcache.assign_global_mcache_ids_params.allocations = params->display_config->stage2.mcache_allocations;
- l->test_mcache.assign_global_mcache_ids_params.num_allocations = params->display_config->display_config.num_planes;
-
- dml2_top_mcache_assign_global_mcache_ids(&l->test_mcache.assign_global_mcache_ids_params);
-
- l->test_mcache.validate_admissibility_params.dml2_instance = params->dml;
- l->test_mcache.validate_admissibility_params.display_cfg = &params->display_config->display_config;
- l->test_mcache.validate_admissibility_params.mcache_allocations = params->display_config->stage2.mcache_allocations;
- l->test_mcache.validate_admissibility_params.cfg_support_info = &params->display_config->mode_support_result.cfg_support_info;
-
- mcache_success = dml2_top_mcache_validate_admissability(&l->test_mcache.validate_admissibility_params); // also find the shift to make mcache allocation works
-
- memcpy(params->display_config->stage2.per_plane_mcache_support, l->test_mcache.validate_admissibility_params.per_plane_status, sizeof(bool) * DML2_MAX_PLANES);
- }
-
- return mcache_success;
-}
-
-bool dml2_top_optimization_optimize_function_mcache(const struct optimization_optimize_function_params *params)
-{
- struct dml2_optimization_optimize_function_locals *l = params->locals;
- bool optimize_success = false;
-
- if (params->last_candidate_supported == false)
- return false;
-
- copy_display_configuration_with_meta(params->optimized_display_config, params->display_config);
-
- l->optimize_mcache.optimize_mcache_params.instance = &params->dml->pmo_instance;
- l->optimize_mcache.optimize_mcache_params.dcc_mcache_supported = params->display_config->stage2.per_plane_mcache_support;
- l->optimize_mcache.optimize_mcache_params.display_config = &params->display_config->display_config;
- l->optimize_mcache.optimize_mcache_params.optimized_display_cfg = &params->optimized_display_config->display_config;
- l->optimize_mcache.optimize_mcache_params.cfg_support_info = &params->optimized_display_config->mode_support_result.cfg_support_info;
-
- optimize_success = params->dml->pmo_instance.optimize_dcc_mcache(&l->optimize_mcache.optimize_mcache_params);
-
- return optimize_success;
-}
-
-bool dml2_top_optimization_init_function_vmin(const struct optimization_init_function_params *params)
-{
- struct dml2_optimization_init_function_locals *l = params->locals;
-
- l->vmin.init_params.instance = &params->dml->pmo_instance;
- l->vmin.init_params.base_display_config = params->display_config;
- return params->dml->pmo_instance.init_for_vmin(&l->vmin.init_params);
-}
-
-bool dml2_top_optimization_test_function_vmin(const struct optimization_test_function_params *params)
-{
- struct dml2_optimization_test_function_locals *l = params->locals;
-
- l->test_vmin.pmo_test_vmin_params.instance = &params->dml->pmo_instance;
- l->test_vmin.pmo_test_vmin_params.display_config = params->display_config;
- l->test_vmin.pmo_test_vmin_params.vmin_limits = &params->dml->soc_bbox.vmin_limit;
- return params->dml->pmo_instance.test_for_vmin(&l->test_vmin.pmo_test_vmin_params);
-}
-
-bool dml2_top_optimization_optimize_function_vmin(const struct optimization_optimize_function_params *params)
-{
- struct dml2_optimization_optimize_function_locals *l = params->locals;
-
- if (params->last_candidate_supported == false)
- return false;
-
- l->optimize_vmin.pmo_optimize_vmin_params.instance = &params->dml->pmo_instance;
- l->optimize_vmin.pmo_optimize_vmin_params.base_display_config = params->display_config;
- l->optimize_vmin.pmo_optimize_vmin_params.optimized_display_config = params->optimized_display_config;
- return params->dml->pmo_instance.optimize_for_vmin(&l->optimize_vmin.pmo_optimize_vmin_params);
-}
-
-bool dml2_top_optimization_perform_optimization_phase(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params)
-{
- bool test_passed = false;
- bool optimize_succeeded = true;
- bool candidate_validation_passed = true;
- struct optimization_init_function_params init_params = { 0 };
- struct optimization_test_function_params test_params = { 0 };
- struct optimization_optimize_function_params optimize_params = { 0 };
-
- if (!params->dml ||
- !params->optimize_function ||
- !params->test_function ||
- !params->display_config ||
- !params->optimized_display_config)
- return false;
-
- copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, params->display_config);
-
- init_params.locals = &l->init_function_locals;
- init_params.dml = params->dml;
- init_params.display_config = &l->cur_candidate_display_cfg;
-
- if (params->init_function && !params->init_function(&init_params))
- return false;
-
- test_params.locals = &l->test_function_locals;
- test_params.dml = params->dml;
- test_params.display_config = &l->cur_candidate_display_cfg;
-
- test_passed = params->test_function(&test_params);
-
- while (!test_passed && optimize_succeeded) {
- memset(&optimize_params, 0, sizeof(struct optimization_optimize_function_params));
-
- optimize_params.locals = &l->optimize_function_locals;
- optimize_params.dml = params->dml;
- optimize_params.display_config = &l->cur_candidate_display_cfg;
- optimize_params.optimized_display_config = &l->next_candidate_display_cfg;
- optimize_params.last_candidate_supported = candidate_validation_passed;
-
- optimize_succeeded = params->optimize_function(&optimize_params);
-
- if (optimize_succeeded) {
- l->mode_support_params.instance = &params->dml->core_instance;
- l->mode_support_params.display_cfg = &l->next_candidate_display_cfg;
- l->mode_support_params.min_clk_table = &params->dml->min_clk_table;
-
- if (l->next_candidate_display_cfg.stage3.performed)
- l->mode_support_params.min_clk_index = l->next_candidate_display_cfg.stage3.min_clk_index_for_latency;
- else
- l->mode_support_params.min_clk_index = l->next_candidate_display_cfg.stage1.min_clk_index_for_latency;
-
- candidate_validation_passed = params->dml->core_instance.mode_support(&l->mode_support_params);
-
- l->next_candidate_display_cfg.mode_support_result = l->mode_support_params.mode_support_result;
- }
-
- if (optimize_succeeded && candidate_validation_passed) {
- memset(&test_params, 0, sizeof(struct optimization_test_function_params));
- test_params.locals = &l->test_function_locals;
- test_params.dml = params->dml;
- test_params.display_config = &l->next_candidate_display_cfg;
- test_passed = params->test_function(&test_params);
-
- copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, &l->next_candidate_display_cfg);
-
- // If optimization is not all or nothing, then store partial progress in output
- if (!params->all_or_nothing)
- copy_display_configuration_with_meta(params->optimized_display_config, &l->next_candidate_display_cfg);
- }
- }
-
- if (test_passed)
- copy_display_configuration_with_meta(params->optimized_display_config, &l->cur_candidate_display_cfg);
-
- return test_passed;
-}
-
-bool dml2_top_optimization_perform_optimization_phase_1(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params)
-{
- int highest_state, lowest_state, cur_state;
- bool supported = false;
-
- if (!params->dml ||
- !params->optimize_function ||
- !params->test_function ||
- !params->display_config ||
- !params->optimized_display_config)
- return false;
-
- copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, params->display_config);
- highest_state = l->cur_candidate_display_cfg.stage1.min_clk_index_for_latency;
- lowest_state = 0;
-
- while (highest_state > lowest_state) {
- cur_state = (highest_state + lowest_state) / 2;
-
- l->mode_support_params.instance = &params->dml->core_instance;
- l->mode_support_params.display_cfg = &l->cur_candidate_display_cfg;
- l->mode_support_params.min_clk_table = &params->dml->min_clk_table;
- l->mode_support_params.min_clk_index = cur_state;
-
- supported = params->dml->core_instance.mode_support(&l->mode_support_params);
-
- if (supported) {
- l->cur_candidate_display_cfg.mode_support_result = l->mode_support_params.mode_support_result;
- highest_state = cur_state;
- } else {
- lowest_state = cur_state + 1;
- }
- }
- l->cur_candidate_display_cfg.stage1.min_clk_index_for_latency = lowest_state;
-
- copy_display_configuration_with_meta(params->optimized_display_config, &l->cur_candidate_display_cfg);
-
- return true;
-}
-
-bool dml2_top_optimization_init_function_uclk_pstate(const struct optimization_init_function_params *params)
-{
- struct dml2_optimization_init_function_locals *l = params->locals;
-
- l->uclk_pstate.init_params.instance = &params->dml->pmo_instance;
- l->uclk_pstate.init_params.base_display_config = params->display_config;
-
- return params->dml->pmo_instance.init_for_uclk_pstate(&l->uclk_pstate.init_params);
-}
-
-bool dml2_top_optimization_test_function_uclk_pstate(const struct optimization_test_function_params *params)
-{
- struct dml2_optimization_test_function_locals *l = params->locals;
-
- l->uclk_pstate.test_params.instance = &params->dml->pmo_instance;
- l->uclk_pstate.test_params.base_display_config = params->display_config;
-
- return params->dml->pmo_instance.test_for_uclk_pstate(&l->uclk_pstate.test_params);
-}
-
-bool dml2_top_optimization_optimize_function_uclk_pstate(const struct optimization_optimize_function_params *params)
-{
- struct dml2_optimization_optimize_function_locals *l = params->locals;
-
- l->uclk_pstate.optimize_params.instance = &params->dml->pmo_instance;
- l->uclk_pstate.optimize_params.base_display_config = params->display_config;
- l->uclk_pstate.optimize_params.optimized_display_config = params->optimized_display_config;
- l->uclk_pstate.optimize_params.last_candidate_failed = !params->last_candidate_supported;
-
- return params->dml->pmo_instance.optimize_for_uclk_pstate(&l->uclk_pstate.optimize_params);
-}
-
-bool dml2_top_optimization_init_function_stutter(const struct optimization_init_function_params *params)
-{
- struct dml2_optimization_init_function_locals *l = params->locals;
-
- l->uclk_pstate.init_params.instance = &params->dml->pmo_instance;
- l->uclk_pstate.init_params.base_display_config = params->display_config;
-
- return params->dml->pmo_instance.init_for_stutter(&l->stutter.stutter_params);
-}
-
-bool dml2_top_optimization_test_function_stutter(const struct optimization_test_function_params *params)
-{
- struct dml2_optimization_test_function_locals *l = params->locals;
-
- l->stutter.stutter_params.instance = &params->dml->pmo_instance;
- l->stutter.stutter_params.base_display_config = params->display_config;
- return params->dml->pmo_instance.test_for_stutter(&l->stutter.stutter_params);
-}
-
-bool dml2_top_optimization_optimize_function_stutter(const struct optimization_optimize_function_params *params)
-{
- struct dml2_optimization_optimize_function_locals *l = params->locals;
-
- l->stutter.stutter_params.instance = &params->dml->pmo_instance;
- l->stutter.stutter_params.base_display_config = params->display_config;
- l->stutter.stutter_params.optimized_display_config = params->optimized_display_config;
- l->stutter.stutter_params.last_candidate_failed = !params->last_candidate_supported;
- return params->dml->pmo_instance.optimize_for_stutter(&l->stutter.stutter_params);
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h
deleted file mode 100644
index 9f22ab33eab1..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#ifndef __DML2_TOP_OPTIMIZATION_H__
-#define __DML2_TOP_OPTIMIZATION_H__
-
-#include "dml2_external_lib_deps.h"
-#include "dml2_internal_shared_types.h"
-
-bool dml2_top_optimization_perform_optimization_phase(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params);
-bool dml2_top_optimization_perform_optimization_phase_1(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params);
-
-bool dml2_top_optimization_init_function_min_clk_for_latency(const struct optimization_init_function_params *params);
-bool dml2_top_optimization_test_function_min_clk_for_latency(const struct optimization_test_function_params *params);
-bool dml2_top_optimization_optimize_function_min_clk_for_latency(const struct optimization_optimize_function_params *params);
-
-bool dml2_top_optimization_test_function_mcache(const struct optimization_test_function_params *params);
-bool dml2_top_optimization_optimize_function_mcache(const struct optimization_optimize_function_params *params);
-
-bool dml2_top_optimization_init_function_uclk_pstate(const struct optimization_init_function_params *params);
-bool dml2_top_optimization_test_function_uclk_pstate(const struct optimization_test_function_params *params);
-bool dml2_top_optimization_optimize_function_uclk_pstate(const struct optimization_optimize_function_params *params);
-
-bool dml2_top_optimization_init_function_vmin(const struct optimization_init_function_params *params);
-bool dml2_top_optimization_test_function_vmin(const struct optimization_test_function_params *params);
-bool dml2_top_optimization_optimize_function_vmin(const struct optimization_optimize_function_params *params);
-
-bool dml2_top_optimization_init_function_stutter(const struct optimization_init_function_params *params);
-bool dml2_top_optimization_test_function_stutter(const struct optimization_test_function_params *params);
-bool dml2_top_optimization_optimize_function_stutter(const struct optimization_optimize_function_params *params);
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
new file mode 100644
index 000000000000..a8f58f8448e4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
@@ -0,0 +1,1178 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "dml2_top_soc15.h"
+#include "dml2_mcg_factory.h"
+#include "dml2_dpmm_factory.h"
+#include "dml2_core_factory.h"
+#include "dml2_pmo_factory.h"
+#include "lib_float_math.h"
+#include "dml2_debug.h"
+static void setup_unoptimized_display_config_with_meta(const struct dml2_instance *dml, struct display_configuation_with_meta *out, const struct dml2_display_cfg *display_config)
+{
+ memcpy(&out->display_config, display_config, sizeof(struct dml2_display_cfg));
+ out->stage1.min_clk_index_for_latency = dml->min_clk_table.dram_bw_table.num_entries - 1; //dml->min_clk_table.clean_me_up.soc_bb.num_states - 1;
+}
+
+static void setup_speculative_display_config_with_meta(const struct dml2_instance *dml, struct display_configuation_with_meta *out, const struct dml2_display_cfg *display_config)
+{
+ memcpy(&out->display_config, display_config, sizeof(struct dml2_display_cfg));
+ out->stage1.min_clk_index_for_latency = 0;
+}
+
+static void copy_display_configuration_with_meta(struct display_configuation_with_meta *dst, const struct display_configuation_with_meta *src)
+{
+ memcpy(dst, src, sizeof(struct display_configuation_with_meta));
+}
+
+static bool dml2_top_optimization_init_function_min_clk_for_latency(const struct optimization_init_function_params *params)
+{
+ struct dml2_optimization_stage1_state *state = &params->display_config->stage1;
+
+ state->performed = true;
+
+ return true;
+}
+
+static bool dml2_top_optimization_test_function_min_clk_for_latency(const struct optimization_test_function_params *params)
+{
+ struct dml2_optimization_stage1_state *state = &params->display_config->stage1;
+
+ return state->min_clk_index_for_latency == 0;
+}
+
+static bool dml2_top_optimization_optimize_function_min_clk_for_latency(const struct optimization_optimize_function_params *params)
+{
+ bool result = false;
+
+ if (params->display_config->stage1.min_clk_index_for_latency > 0) {
+ copy_display_configuration_with_meta(params->optimized_display_config, params->display_config);
+ params->optimized_display_config->stage1.min_clk_index_for_latency--;
+ result = true;
+ }
+
+ return result;
+}
+
+static bool dml2_top_optimization_test_function_mcache(const struct optimization_test_function_params *params)
+{
+ struct dml2_optimization_test_function_locals *l = params->locals;
+ bool mcache_success = false;
+ bool result = false;
+
+ memset(l, 0, sizeof(struct dml2_optimization_test_function_locals));
+
+ l->test_mcache.calc_mcache_count_params.dml2_instance = params->dml;
+ l->test_mcache.calc_mcache_count_params.display_config = &params->display_config->display_config;
+ l->test_mcache.calc_mcache_count_params.mcache_allocations = params->display_config->stage2.mcache_allocations;
+
+ result = dml2_top_mcache_calc_mcache_count_and_offsets(&l->test_mcache.calc_mcache_count_params); // use core to get the basic mcache_allocations
+
+ if (result) {
+ l->test_mcache.assign_global_mcache_ids_params.allocations = params->display_config->stage2.mcache_allocations;
+ l->test_mcache.assign_global_mcache_ids_params.num_allocations = params->display_config->display_config.num_planes;
+
+ dml2_top_mcache_assign_global_mcache_ids(&l->test_mcache.assign_global_mcache_ids_params);
+
+ l->test_mcache.validate_admissibility_params.dml2_instance = params->dml;
+ l->test_mcache.validate_admissibility_params.display_cfg = &params->display_config->display_config;
+ l->test_mcache.validate_admissibility_params.mcache_allocations = params->display_config->stage2.mcache_allocations;
+ l->test_mcache.validate_admissibility_params.cfg_support_info = &params->display_config->mode_support_result.cfg_support_info;
+
+ mcache_success = dml2_top_mcache_validate_admissability(&l->test_mcache.validate_admissibility_params); // also find the shift to make mcache allocation works
+
+ memcpy(params->display_config->stage2.per_plane_mcache_support, l->test_mcache.validate_admissibility_params.per_plane_status, sizeof(bool) * DML2_MAX_PLANES);
+ }
+
+ return mcache_success;
+}
+
+static bool dml2_top_optimization_optimize_function_mcache(const struct optimization_optimize_function_params *params)
+{
+ struct dml2_optimization_optimize_function_locals *l = params->locals;
+ bool optimize_success = false;
+
+ if (params->last_candidate_supported == false)
+ return false;
+
+ copy_display_configuration_with_meta(params->optimized_display_config, params->display_config);
+
+ l->optimize_mcache.optimize_mcache_params.instance = &params->dml->pmo_instance;
+ l->optimize_mcache.optimize_mcache_params.dcc_mcache_supported = params->display_config->stage2.per_plane_mcache_support;
+ l->optimize_mcache.optimize_mcache_params.display_config = &params->display_config->display_config;
+ l->optimize_mcache.optimize_mcache_params.optimized_display_cfg = &params->optimized_display_config->display_config;
+ l->optimize_mcache.optimize_mcache_params.cfg_support_info = &params->optimized_display_config->mode_support_result.cfg_support_info;
+
+ optimize_success = params->dml->pmo_instance.optimize_dcc_mcache(&l->optimize_mcache.optimize_mcache_params);
+
+ return optimize_success;
+}
+
+static bool dml2_top_optimization_init_function_vmin(const struct optimization_init_function_params *params)
+{
+ struct dml2_optimization_init_function_locals *l = params->locals;
+
+ l->vmin.init_params.instance = &params->dml->pmo_instance;
+ l->vmin.init_params.base_display_config = params->display_config;
+ return params->dml->pmo_instance.init_for_vmin(&l->vmin.init_params);
+}
+
+static bool dml2_top_optimization_test_function_vmin(const struct optimization_test_function_params *params)
+{
+ struct dml2_optimization_test_function_locals *l = params->locals;
+
+ l->test_vmin.pmo_test_vmin_params.instance = &params->dml->pmo_instance;
+ l->test_vmin.pmo_test_vmin_params.display_config = params->display_config;
+ l->test_vmin.pmo_test_vmin_params.vmin_limits = &params->dml->soc_bbox.vmin_limit;
+ return params->dml->pmo_instance.test_for_vmin(&l->test_vmin.pmo_test_vmin_params);
+}
+
+static bool dml2_top_optimization_optimize_function_vmin(const struct optimization_optimize_function_params *params)
+{
+ struct dml2_optimization_optimize_function_locals *l = params->locals;
+
+ if (params->last_candidate_supported == false)
+ return false;
+
+ l->optimize_vmin.pmo_optimize_vmin_params.instance = &params->dml->pmo_instance;
+ l->optimize_vmin.pmo_optimize_vmin_params.base_display_config = params->display_config;
+ l->optimize_vmin.pmo_optimize_vmin_params.optimized_display_config = params->optimized_display_config;
+ return params->dml->pmo_instance.optimize_for_vmin(&l->optimize_vmin.pmo_optimize_vmin_params);
+}
+
+static bool dml2_top_optimization_init_function_uclk_pstate(const struct optimization_init_function_params *params)
+{
+ struct dml2_optimization_init_function_locals *l = params->locals;
+
+ l->uclk_pstate.init_params.instance = &params->dml->pmo_instance;
+ l->uclk_pstate.init_params.base_display_config = params->display_config;
+
+ return params->dml->pmo_instance.init_for_uclk_pstate(&l->uclk_pstate.init_params);
+}
+
+static bool dml2_top_optimization_test_function_uclk_pstate(const struct optimization_test_function_params *params)
+{
+ struct dml2_optimization_test_function_locals *l = params->locals;
+
+ l->uclk_pstate.test_params.instance = &params->dml->pmo_instance;
+ l->uclk_pstate.test_params.base_display_config = params->display_config;
+
+ return params->dml->pmo_instance.test_for_uclk_pstate(&l->uclk_pstate.test_params);
+}
+
+static bool dml2_top_optimization_optimize_function_uclk_pstate(const struct optimization_optimize_function_params *params)
+{
+ struct dml2_optimization_optimize_function_locals *l = params->locals;
+
+ l->uclk_pstate.optimize_params.instance = &params->dml->pmo_instance;
+ l->uclk_pstate.optimize_params.base_display_config = params->display_config;
+ l->uclk_pstate.optimize_params.optimized_display_config = params->optimized_display_config;
+ l->uclk_pstate.optimize_params.last_candidate_failed = !params->last_candidate_supported;
+
+ return params->dml->pmo_instance.optimize_for_uclk_pstate(&l->uclk_pstate.optimize_params);
+}
+
+static bool dml2_top_optimization_init_function_stutter(const struct optimization_init_function_params *params)
+{
+ struct dml2_optimization_init_function_locals *l = params->locals;
+
+ l->uclk_pstate.init_params.instance = &params->dml->pmo_instance;
+ l->uclk_pstate.init_params.base_display_config = params->display_config;
+
+ return params->dml->pmo_instance.init_for_stutter(&l->stutter.stutter_params);
+}
+
+static bool dml2_top_optimization_test_function_stutter(const struct optimization_test_function_params *params)
+{
+ struct dml2_optimization_test_function_locals *l = params->locals;
+
+ l->stutter.stutter_params.instance = &params->dml->pmo_instance;
+ l->stutter.stutter_params.base_display_config = params->display_config;
+ return params->dml->pmo_instance.test_for_stutter(&l->stutter.stutter_params);
+}
+
+static bool dml2_top_optimization_optimize_function_stutter(const struct optimization_optimize_function_params *params)
+{
+ struct dml2_optimization_optimize_function_locals *l = params->locals;
+
+ l->stutter.stutter_params.instance = &params->dml->pmo_instance;
+ l->stutter.stutter_params.base_display_config = params->display_config;
+ l->stutter.stutter_params.optimized_display_config = params->optimized_display_config;
+ l->stutter.stutter_params.last_candidate_failed = !params->last_candidate_supported;
+ return params->dml->pmo_instance.optimize_for_stutter(&l->stutter.stutter_params);
+}
+
+static bool dml2_top_optimization_perform_optimization_phase(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params)
+{
+ bool test_passed = false;
+ bool optimize_succeeded = true;
+ bool candidate_validation_passed = true;
+ struct optimization_init_function_params init_params = { 0 };
+ struct optimization_test_function_params test_params = { 0 };
+ struct optimization_optimize_function_params optimize_params = { 0 };
+
+ if (!params->dml ||
+ !params->optimize_function ||
+ !params->test_function ||
+ !params->display_config ||
+ !params->optimized_display_config)
+ return false;
+
+ copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, params->display_config);
+
+ init_params.locals = &l->init_function_locals;
+ init_params.dml = params->dml;
+ init_params.display_config = &l->cur_candidate_display_cfg;
+
+ if (params->init_function && !params->init_function(&init_params))
+ return false;
+
+ test_params.locals = &l->test_function_locals;
+ test_params.dml = params->dml;
+ test_params.display_config = &l->cur_candidate_display_cfg;
+
+ test_passed = params->test_function(&test_params);
+
+ while (!test_passed && optimize_succeeded) {
+ memset(&optimize_params, 0, sizeof(struct optimization_optimize_function_params));
+
+ optimize_params.locals = &l->optimize_function_locals;
+ optimize_params.dml = params->dml;
+ optimize_params.display_config = &l->cur_candidate_display_cfg;
+ optimize_params.optimized_display_config = &l->next_candidate_display_cfg;
+ optimize_params.last_candidate_supported = candidate_validation_passed;
+
+ optimize_succeeded = params->optimize_function(&optimize_params);
+
+ if (optimize_succeeded) {
+ l->mode_support_params.instance = &params->dml->core_instance;
+ l->mode_support_params.display_cfg = &l->next_candidate_display_cfg;
+ l->mode_support_params.min_clk_table = &params->dml->min_clk_table;
+
+ if (l->next_candidate_display_cfg.stage3.performed)
+ l->mode_support_params.min_clk_index = l->next_candidate_display_cfg.stage3.min_clk_index_for_latency;
+ else
+ l->mode_support_params.min_clk_index = l->next_candidate_display_cfg.stage1.min_clk_index_for_latency;
+ candidate_validation_passed = params->dml->core_instance.mode_support(&l->mode_support_params);
+ l->next_candidate_display_cfg.mode_support_result = l->mode_support_params.mode_support_result;
+ }
+
+ if (optimize_succeeded && candidate_validation_passed) {
+ memset(&test_params, 0, sizeof(struct optimization_test_function_params));
+ test_params.locals = &l->test_function_locals;
+ test_params.dml = params->dml;
+ test_params.display_config = &l->next_candidate_display_cfg;
+ test_passed = params->test_function(&test_params);
+
+ copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, &l->next_candidate_display_cfg);
+
+ // If optimization is not all or nothing, then store partial progress in output
+ if (!params->all_or_nothing)
+ copy_display_configuration_with_meta(params->optimized_display_config, &l->next_candidate_display_cfg);
+ }
+ }
+
+ if (test_passed)
+ copy_display_configuration_with_meta(params->optimized_display_config, &l->cur_candidate_display_cfg);
+
+ return test_passed;
+}
+
+static bool dml2_top_optimization_perform_optimization_phase_1(struct dml2_optimization_phase_locals *l, const struct optimization_phase_params *params)
+{
+ int highest_state, lowest_state, cur_state;
+ bool supported = false;
+
+ if (!params->dml ||
+ !params->optimize_function ||
+ !params->test_function ||
+ !params->display_config ||
+ !params->optimized_display_config)
+ return false;
+
+ copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, params->display_config);
+ highest_state = l->cur_candidate_display_cfg.stage1.min_clk_index_for_latency;
+ lowest_state = 0;
+
+ while (highest_state > lowest_state) {
+ cur_state = (highest_state + lowest_state) / 2;
+
+ l->mode_support_params.instance = &params->dml->core_instance;
+ l->mode_support_params.display_cfg = &l->cur_candidate_display_cfg;
+ l->mode_support_params.min_clk_table = &params->dml->min_clk_table;
+ l->mode_support_params.min_clk_index = cur_state;
+ supported = params->dml->core_instance.mode_support(&l->mode_support_params);
+
+ if (supported) {
+ l->cur_candidate_display_cfg.mode_support_result = l->mode_support_params.mode_support_result;
+ highest_state = cur_state;
+ } else {
+ lowest_state = cur_state + 1;
+ }
+ }
+ l->cur_candidate_display_cfg.stage1.min_clk_index_for_latency = lowest_state;
+
+ copy_display_configuration_with_meta(params->optimized_display_config, &l->cur_candidate_display_cfg);
+
+ return true;
+}
+
+/*
+* Takes an input set of mcache boundaries and finds the appropriate setting of cache programming.
+* Returns true if a valid set of programming can be made, and false otherwise. "Valid" means
+* that the horizontal viewport does not span more than 2 cache slices.
+*
+* It optionally also can apply a constant shift to all the cache boundaries.
+*/
+static const uint32_t MCACHE_ID_UNASSIGNED = 0xF;
+static const uint32_t SPLIT_LOCATION_UNDEFINED = 0xFFFF;
+
+static bool calculate_first_second_splitting(const int *mcache_boundaries, int num_boundaries, int shift,
+ int pipe_h_vp_start, int pipe_h_vp_end, int *first_offset, int *second_offset)
+{
+ const int MAX_VP = 0xFFFFFF;
+ int left_cache_id;
+ int right_cache_id;
+ int range_start;
+ int range_end;
+ bool success = false;
+
+ if (num_boundaries <= 1) {
+ if (first_offset && second_offset) {
+ *first_offset = 0;
+ *second_offset = -1;
+ }
+ success = true;
+ return success;
+ } else {
+ range_start = 0;
+ for (left_cache_id = 0; left_cache_id < num_boundaries; left_cache_id++) {
+ range_end = mcache_boundaries[left_cache_id] - shift - 1;
+
+ if (range_start <= pipe_h_vp_start && pipe_h_vp_start <= range_end)
+ break;
+
+ range_start = range_end + 1;
+ }
+
+ range_end = MAX_VP;
+ for (right_cache_id = num_boundaries - 1; right_cache_id >= -1; right_cache_id--) {
+ if (right_cache_id >= 0)
+ range_start = mcache_boundaries[right_cache_id] - shift;
+ else
+ range_start = 0;
+
+ if (range_start <= pipe_h_vp_end && pipe_h_vp_end <= range_end) {
+ break;
+ }
+ range_end = range_start - 1;
+ }
+ right_cache_id = (right_cache_id + 1) % num_boundaries;
+
+ if (right_cache_id == left_cache_id) {
+ if (first_offset && second_offset) {
+ *first_offset = left_cache_id;
+ *second_offset = -1;
+ }
+ success = true;
+ } else if (right_cache_id == (left_cache_id + 1) % num_boundaries) {
+ if (first_offset && second_offset) {
+ *first_offset = left_cache_id;
+ *second_offset = right_cache_id;
+ }
+ success = true;
+ }
+ }
+
+ return success;
+}
+
+/*
+* For a given set of pipe start/end x positions, checks to see it can support the input mcache splitting.
+* It also attempts to "optimize" by finding a shift if the default 0 shift does not work.
+*/
+static bool find_shift_for_valid_cache_id_assignment(int *mcache_boundaries, unsigned int num_boundaries,
+ int *pipe_vp_startx, int *pipe_vp_endx, unsigned int pipe_count, int shift_granularity, int *shift)
+{
+ int max_shift = 0xFFFF;
+ unsigned int pipe_index;
+ unsigned int i, slice_width;
+ bool success = false;
+
+ for (i = 0; i < num_boundaries; i++) {
+ if (i == 0)
+ slice_width = mcache_boundaries[i];
+ else
+ slice_width = mcache_boundaries[i] - mcache_boundaries[i - 1];
+
+ if (max_shift > (int)slice_width) {
+ max_shift = slice_width;
+ }
+ }
+
+ for (*shift = 0; *shift <= max_shift; *shift += shift_granularity) {
+ success = true;
+ for (pipe_index = 0; pipe_index < pipe_count; pipe_index++) {
+ if (!calculate_first_second_splitting(mcache_boundaries, num_boundaries, *shift,
+ pipe_vp_startx[pipe_index], pipe_vp_endx[pipe_index], 0, 0)) {
+ success = false;
+ break;
+ }
+ }
+ if (success)
+ break;
+ }
+
+ return success;
+}
+
+/*
+* Counts the number of elements inside input array within the given span length.
+* Formally, what is the size of the largest subset of the array where the largest and smallest element
+* differ no more than the span.
+*/
+static unsigned int count_elements_in_span(int *array, unsigned int array_size, unsigned int span)
+{
+ unsigned int i;
+ unsigned int span_start_value;
+ unsigned int span_start_index;
+ unsigned int greatest_element_count;
+
+ if (array_size == 0)
+ return 1;
+
+ if (span == 0)
+ return array_size > 0 ? 1 : 0;
+
+ span_start_value = 0;
+ span_start_index = 0;
+ greatest_element_count = 0;
+
+ while (span_start_index < array_size) {
+ for (i = span_start_index; i < array_size; i++) {
+ if (array[i] - span_start_value <= span) {
+ if (i - span_start_index + 1 > greatest_element_count) {
+ greatest_element_count = i - span_start_index + 1;
+ }
+ } else
+ break;
+ }
+
+ span_start_index++;
+
+ if (span_start_index < array_size) {
+ span_start_value = array[span_start_index - 1] + 1;
+ }
+ }
+
+ return greatest_element_count;
+}
+
+static bool calculate_h_split_for_scaling_transform(int full_vp_width, int h_active, int num_pipes,
+ enum dml2_scaling_transform scaling_transform, int *pipe_vp_x_start, int *pipe_vp_x_end)
+{
+ int i, slice_width;
+ const char MAX_SCL_VP_OVERLAP = 3;
+ bool success = false;
+
+ switch (scaling_transform) {
+ case dml2_scaling_transform_centered:
+ case dml2_scaling_transform_aspect_ratio:
+ case dml2_scaling_transform_fullscreen:
+ slice_width = full_vp_width / num_pipes;
+ for (i = 0; i < num_pipes; i++) {
+ pipe_vp_x_start[i] = i * slice_width;
+ pipe_vp_x_end[i] = (i + 1) * slice_width - 1;
+
+ if (pipe_vp_x_start[i] < MAX_SCL_VP_OVERLAP)
+ pipe_vp_x_start[i] = 0;
+ else
+ pipe_vp_x_start[i] -= MAX_SCL_VP_OVERLAP;
+
+ if (pipe_vp_x_end[i] > full_vp_width - MAX_SCL_VP_OVERLAP - 1)
+ pipe_vp_x_end[i] = full_vp_width - 1;
+ else
+ pipe_vp_x_end[i] += MAX_SCL_VP_OVERLAP;
+ }
+ break;
+ case dml2_scaling_transform_explicit:
+ default:
+ success = false;
+ break;
+ }
+
+ return success;
+}
+
+bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissability_in_out *params)
+{
+ struct dml2_instance *dml = (struct dml2_instance *)params->dml2_instance;
+ struct dml2_top_mcache_validate_admissability_locals *l = &dml->scratch.mcache_validate_admissability_locals;
+
+ const int MAX_PIXEL_OVERLAP = 6;
+ int max_per_pipe_vp_p0 = 0;
+ int max_per_pipe_vp_p1 = 0;
+ int temp, p0shift, p1shift;
+ unsigned int plane_index = 0;
+ unsigned int i;
+ unsigned int odm_combine_factor;
+ unsigned int mpc_combine_factor;
+ unsigned int num_dpps;
+ unsigned int num_boundaries;
+ enum dml2_scaling_transform scaling_transform;
+ const struct dml2_plane_parameters *plane;
+ const struct dml2_stream_parameters *stream;
+
+ bool p0pass = false;
+ bool p1pass = false;
+ bool all_pass = true;
+
+ for (plane_index = 0; plane_index < params->display_cfg->num_planes; plane_index++) {
+ if (!params->display_cfg->plane_descriptors[plane_index].surface.dcc.enable)
+ continue;
+
+ plane = &params->display_cfg->plane_descriptors[plane_index];
+ stream = &params->display_cfg->stream_descriptors[plane->stream_index];
+
+ num_dpps = odm_combine_factor = params->cfg_support_info->stream_support_info[plane->stream_index].odms_used;
+
+ if (odm_combine_factor == 1)
+ num_dpps = mpc_combine_factor = (unsigned int)params->cfg_support_info->plane_support_info[plane_index].dpps_used;
+ else
+ mpc_combine_factor = 1;
+
+ if (odm_combine_factor > 1) {
+ max_per_pipe_vp_p0 = plane->surface.plane0.width;
+ temp = (unsigned int)math_ceil(plane->composition.scaler_info.plane0.h_ratio * stream->timing.h_active / odm_combine_factor);
+
+ if (temp < max_per_pipe_vp_p0)
+ max_per_pipe_vp_p0 = temp;
+
+ max_per_pipe_vp_p1 = plane->surface.plane1.width;
+ temp = (unsigned int)math_ceil(plane->composition.scaler_info.plane1.h_ratio * stream->timing.h_active / odm_combine_factor);
+
+ if (temp < max_per_pipe_vp_p1)
+ max_per_pipe_vp_p1 = temp;
+ } else {
+ max_per_pipe_vp_p0 = plane->surface.plane0.width / mpc_combine_factor;
+ max_per_pipe_vp_p1 = plane->surface.plane1.width / mpc_combine_factor;
+ }
+
+ max_per_pipe_vp_p0 += 2 * MAX_PIXEL_OVERLAP;
+ max_per_pipe_vp_p1 += MAX_PIXEL_OVERLAP;
+
+ p0shift = 0;
+ p1shift = 0;
+
+ // The last element in the unshifted boundary array will always be the first pixel outside the
+ // plane, which means theres no mcache associated with it, so -1
+ num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane0 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane0 - 1;
+ if ((count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane0,
+ num_boundaries, max_per_pipe_vp_p0) <= 1) && (num_boundaries <= num_dpps)) {
+ p0pass = true;
+ }
+ num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane1 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane1 - 1;
+ if ((count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane1,
+ num_boundaries, max_per_pipe_vp_p1) <= 1) && (num_boundaries <= num_dpps)) {
+ p1pass = true;
+ }
+
+ if (!p0pass || !p1pass) {
+ if (odm_combine_factor > 1) {
+ num_dpps = odm_combine_factor;
+ scaling_transform = plane->composition.scaling_transform;
+ } else {
+ num_dpps = mpc_combine_factor;
+ scaling_transform = dml2_scaling_transform_fullscreen;
+ }
+
+ if (!p0pass) {
+ if (plane->composition.viewport.stationary) {
+ calculate_h_split_for_scaling_transform(plane->surface.plane0.width,
+ stream->timing.h_active, num_dpps, scaling_transform,
+ &l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index]);
+ p0pass = find_shift_for_valid_cache_id_assignment(params->mcache_allocations[plane_index].mcache_x_offsets_plane0,
+ params->mcache_allocations[plane_index].num_mcaches_plane0,
+ &l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index], num_dpps,
+ params->mcache_allocations[plane_index].shift_granularity.p0, &p0shift);
+ }
+ }
+ if (!p1pass) {
+ if (plane->composition.viewport.stationary) {
+ calculate_h_split_for_scaling_transform(plane->surface.plane1.width,
+ stream->timing.h_active, num_dpps, scaling_transform,
+ &l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index]);
+ p1pass = find_shift_for_valid_cache_id_assignment(params->mcache_allocations[plane_index].mcache_x_offsets_plane1,
+ params->mcache_allocations[plane_index].num_mcaches_plane1,
+ &l->plane1.pipe_vp_startx[plane_index], &l->plane1.pipe_vp_endx[plane_index], num_dpps,
+ params->mcache_allocations[plane_index].shift_granularity.p1, &p1shift);
+ }
+ }
+ }
+
+ if (p0pass && p1pass) {
+ for (i = 0; i < params->mcache_allocations[plane_index].num_mcaches_plane0; i++) {
+ params->mcache_allocations[plane_index].mcache_x_offsets_plane0[i] -= p0shift;
+ }
+ for (i = 0; i < params->mcache_allocations[plane_index].num_mcaches_plane1; i++) {
+ params->mcache_allocations[plane_index].mcache_x_offsets_plane1[i] -= p1shift;
+ }
+ }
+
+ params->per_plane_status[plane_index] = p0pass && p1pass;
+ all_pass &= p0pass && p1pass;
+ }
+
+ return all_pass;
+}
+
+static void reset_mcache_allocations(struct dml2_hubp_pipe_mcache_regs *per_plane_pipe_mcache_regs)
+{
+ // Initialize all entries to special valid MCache ID and special valid split coordinate
+ per_plane_pipe_mcache_regs->main.p0.mcache_id_first = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->main.p0.mcache_id_second = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->main.p0.split_location = SPLIT_LOCATION_UNDEFINED;
+
+ per_plane_pipe_mcache_regs->mall.p0.mcache_id_first = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->mall.p0.mcache_id_second = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->mall.p0.split_location = SPLIT_LOCATION_UNDEFINED;
+
+ per_plane_pipe_mcache_regs->main.p1.mcache_id_first = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->main.p1.mcache_id_second = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->main.p1.split_location = SPLIT_LOCATION_UNDEFINED;
+
+ per_plane_pipe_mcache_regs->mall.p1.mcache_id_first = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->mall.p1.mcache_id_second = MCACHE_ID_UNASSIGNED;
+ per_plane_pipe_mcache_regs->mall.p1.split_location = SPLIT_LOCATION_UNDEFINED;
+}
+
+void dml2_top_mcache_assign_global_mcache_ids(struct top_mcache_assign_global_mcache_ids_in_out *params)
+{
+ int i;
+ unsigned int j;
+ int next_unused_cache_id = 0;
+
+ for (i = 0; i < params->num_allocations; i++) {
+ if (!params->allocations[i].valid)
+ continue;
+
+ for (j = 0; j < params->allocations[i].num_mcaches_plane0; j++) {
+ params->allocations[i].global_mcache_ids_plane0[j] = next_unused_cache_id++;
+ }
+ for (j = 0; j < params->allocations[i].num_mcaches_plane1; j++) {
+ params->allocations[i].global_mcache_ids_plane1[j] = next_unused_cache_id++;
+ }
+
+ // The "psuedo-last" slice is always wrapped around
+ params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0] =
+ params->allocations[i].global_mcache_ids_plane0[0];
+ params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1] =
+ params->allocations[i].global_mcache_ids_plane1[0];
+
+ // If we need dedicated caches for mall requesting, then we assign them here.
+ if (params->allocations[i].requires_dedicated_mall_mcache) {
+ for (j = 0; j < params->allocations[i].num_mcaches_plane0; j++) {
+ params->allocations[i].global_mcache_ids_mall_plane0[j] = next_unused_cache_id++;
+ }
+ for (j = 0; j < params->allocations[i].num_mcaches_plane1; j++) {
+ params->allocations[i].global_mcache_ids_mall_plane1[j] = next_unused_cache_id++;
+ }
+
+ // The "psuedo-last" slice is always wrapped around
+ params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0] =
+ params->allocations[i].global_mcache_ids_mall_plane0[0];
+ params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1] =
+ params->allocations[i].global_mcache_ids_mall_plane1[0];
+ }
+
+ // If P0 and P1 are sharing caches, then it means the largest mcache IDs for p0 and p1 can be the same
+ // since mcache IDs are always ascending, then it means the largest mcacheID of p1 should be the
+ // largest mcacheID of P0
+ if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].num_mcaches_plane1 > 0 &&
+ params->allocations[i].last_slice_sharing.plane0_plane1) {
+ params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
+ params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0 - 1];
+ }
+
+ // If we need dedicated caches handle last slice sharing
+ if (params->allocations[i].requires_dedicated_mall_mcache) {
+ if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].num_mcaches_plane1 > 0 &&
+ params->allocations[i].last_slice_sharing.plane0_plane1) {
+ params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
+ params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0 - 1];
+ }
+ // If mall_comb_mcache_l is set then it means that largest mcache ID for MALL p0 can be same as regular read p0
+ if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].last_slice_sharing.mall_comb_mcache_p0) {
+ params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0 - 1] =
+ params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0 - 1];
+ }
+ // If mall_comb_mcache_c is set then it means that largest mcache ID for MALL p1 can be same as regular
+ // read p1 (which can be same as regular read p0 if plane0_plane1 is also set)
+ if (params->allocations[i].num_mcaches_plane1 > 0 && params->allocations[i].last_slice_sharing.mall_comb_mcache_p1) {
+ params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
+ params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1 - 1];
+ }
+ }
+
+ // If you don't need dedicated mall mcaches, the mall mcache assignments are identical to the normal requesting
+ if (!params->allocations[i].requires_dedicated_mall_mcache) {
+ memcpy(params->allocations[i].global_mcache_ids_mall_plane0, params->allocations[i].global_mcache_ids_plane0,
+ sizeof(params->allocations[i].global_mcache_ids_mall_plane0));
+ memcpy(params->allocations[i].global_mcache_ids_mall_plane1, params->allocations[i].global_mcache_ids_plane1,
+ sizeof(params->allocations[i].global_mcache_ids_mall_plane1));
+ }
+ }
+}
+
+bool dml2_top_mcache_calc_mcache_count_and_offsets(struct top_mcache_calc_mcache_count_and_offsets_in_out *params)
+{
+ struct dml2_instance *dml = (struct dml2_instance *)params->dml2_instance;
+ struct dml2_top_mcache_verify_mcache_size_locals *l = &dml->scratch.mcache_verify_mcache_size_locals;
+
+ unsigned int total_mcaches_required;
+ unsigned int i;
+ bool result = false;
+
+ if (dml->soc_bbox.num_dcc_mcaches == 0) {
+ return true;
+ }
+
+ total_mcaches_required = 0;
+ l->calc_mcache_params.instance = &dml->core_instance;
+ for (i = 0; i < params->display_config->num_planes; i++) {
+ if (!params->display_config->plane_descriptors[i].surface.dcc.enable) {
+ memset(&params->mcache_allocations[i], 0, sizeof(struct dml2_mcache_surface_allocation));
+ continue;
+ }
+
+ l->calc_mcache_params.plane_descriptor = &params->display_config->plane_descriptors[i];
+ l->calc_mcache_params.mcache_allocation = &params->mcache_allocations[i];
+ l->calc_mcache_params.plane_index = i;
+
+ if (!dml->core_instance.calculate_mcache_allocation(&l->calc_mcache_params)) {
+ result = false;
+ break;
+ }
+
+ if (params->mcache_allocations[i].valid) {
+ total_mcaches_required += params->mcache_allocations[i].num_mcaches_plane0 + params->mcache_allocations[i].num_mcaches_plane1;
+ if (params->mcache_allocations[i].last_slice_sharing.plane0_plane1)
+ total_mcaches_required--;
+ }
+ }
+ dml2_printf("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required);
+
+ if (total_mcaches_required > dml->soc_bbox.num_dcc_mcaches) {
+ result = false;
+ } else {
+ result = true;
+ }
+
+ return result;
+}
+
+static bool dml2_top_soc15_check_mode_supported(struct dml2_check_mode_supported_in_out *in_out)
+{
+ struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
+ struct dml2_check_mode_supported_locals *l = &dml->scratch.check_mode_supported_locals;
+ struct dml2_display_cfg_programming *dpmm_programming = &dml->dpmm_instance.dpmm_scratch.programming;
+
+ bool result = false;
+ bool mcache_success = false;
+ memset(dpmm_programming, 0, sizeof(struct dml2_display_cfg_programming));
+
+ setup_unoptimized_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
+
+ l->mode_support_params.instance = &dml->core_instance;
+ l->mode_support_params.display_cfg = &l->base_display_config_with_meta;
+ l->mode_support_params.min_clk_table = &dml->min_clk_table;
+ l->mode_support_params.min_clk_index = l->base_display_config_with_meta.stage1.min_clk_index_for_latency;
+ result = dml->core_instance.mode_support(&l->mode_support_params);
+ l->base_display_config_with_meta.mode_support_result = l->mode_support_params.mode_support_result;
+
+ if (result) {
+ struct optimization_phase_params mcache_phase = {
+ .dml = dml,
+ .display_config = &l->base_display_config_with_meta,
+ .test_function = dml2_top_optimization_test_function_mcache,
+ .optimize_function = dml2_top_optimization_optimize_function_mcache,
+ .optimized_display_config = &l->optimized_display_config_with_meta,
+ .all_or_nothing = false,
+ };
+ mcache_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &mcache_phase);
+ }
+
+ /*
+ * Call DPMM to map all requirements to minimum clock state
+ */
+ if (result) {
+ l->dppm_map_mode_params.min_clk_table = &dml->min_clk_table;
+ l->dppm_map_mode_params.display_cfg = &l->base_display_config_with_meta;
+ l->dppm_map_mode_params.programming = dpmm_programming;
+ l->dppm_map_mode_params.soc_bb = &dml->soc_bbox;
+ l->dppm_map_mode_params.ip = &dml->core_instance.clean_me_up.mode_lib.ip;
+ result = dml->dpmm_instance.map_mode_to_soc_dpm(&l->dppm_map_mode_params);
+ }
+
+ in_out->is_supported = mcache_success;
+ result = result && in_out->is_supported;
+
+ return result;
+}
+
+static bool dml2_top_soc15_build_mode_programming(struct dml2_build_mode_programming_in_out *in_out)
+{
+ struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
+ struct dml2_build_mode_programming_locals *l = &dml->scratch.build_mode_programming_locals;
+
+ bool result = false;
+ bool mcache_success = false;
+ bool uclk_pstate_success = false;
+ bool vmin_success = false;
+ bool stutter_success = false;
+ unsigned int i;
+
+ memset(l, 0, sizeof(struct dml2_build_mode_programming_locals));
+ memset(in_out->programming, 0, sizeof(struct dml2_display_cfg_programming));
+
+ memcpy(&in_out->programming->display_config, in_out->display_config, sizeof(struct dml2_display_cfg));
+
+ setup_speculative_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
+
+ l->mode_support_params.instance = &dml->core_instance;
+ l->mode_support_params.display_cfg = &l->base_display_config_with_meta;
+ l->mode_support_params.min_clk_table = &dml->min_clk_table;
+ l->mode_support_params.min_clk_index = l->base_display_config_with_meta.stage1.min_clk_index_for_latency;
+ result = dml->core_instance.mode_support(&l->mode_support_params);
+
+ l->base_display_config_with_meta.mode_support_result = l->mode_support_params.mode_support_result;
+
+ if (!result) {
+ setup_unoptimized_display_config_with_meta(dml, &l->base_display_config_with_meta, in_out->display_config);
+
+ l->mode_support_params.instance = &dml->core_instance;
+ l->mode_support_params.display_cfg = &l->base_display_config_with_meta;
+ l->mode_support_params.min_clk_table = &dml->min_clk_table;
+ l->mode_support_params.min_clk_index = l->base_display_config_with_meta.stage1.min_clk_index_for_latency;
+ result = dml->core_instance.mode_support(&l->mode_support_params);
+ l->base_display_config_with_meta.mode_support_result = l->mode_support_params.mode_support_result;
+
+ if (!result) {
+ l->informative_params.instance = &dml->core_instance;
+ l->informative_params.programming = in_out->programming;
+ l->informative_params.mode_is_supported = false;
+ dml->core_instance.populate_informative(&l->informative_params);
+
+ return false;
+ }
+
+ /*
+ * Phase 1: Determine minimum clocks to satisfy latency requirements for this mode
+ */
+ memset(&l->min_clock_for_latency_phase, 0, sizeof(struct optimization_phase_params));
+ l->min_clock_for_latency_phase.dml = dml;
+ l->min_clock_for_latency_phase.display_config = &l->base_display_config_with_meta;
+ l->min_clock_for_latency_phase.init_function = dml2_top_optimization_init_function_min_clk_for_latency;
+ l->min_clock_for_latency_phase.test_function = dml2_top_optimization_test_function_min_clk_for_latency;
+ l->min_clock_for_latency_phase.optimize_function = dml2_top_optimization_optimize_function_min_clk_for_latency;
+ l->min_clock_for_latency_phase.optimized_display_config = &l->optimized_display_config_with_meta;
+ l->min_clock_for_latency_phase.all_or_nothing = false;
+
+ dml2_top_optimization_perform_optimization_phase_1(&l->optimization_phase_locals, &l->min_clock_for_latency_phase);
+
+ memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
+ }
+
+ /*
+ * Phase 2: Satisfy DCC mcache requirements
+ */
+ memset(&l->mcache_phase, 0, sizeof(struct optimization_phase_params));
+ l->mcache_phase.dml = dml;
+ l->mcache_phase.display_config = &l->base_display_config_with_meta;
+ l->mcache_phase.test_function = dml2_top_optimization_test_function_mcache;
+ l->mcache_phase.optimize_function = dml2_top_optimization_optimize_function_mcache;
+ l->mcache_phase.optimized_display_config = &l->optimized_display_config_with_meta;
+ l->mcache_phase.all_or_nothing = true;
+
+ mcache_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->mcache_phase);
+
+ if (!mcache_success) {
+ l->informative_params.instance = &dml->core_instance;
+ l->informative_params.programming = in_out->programming;
+ l->informative_params.mode_is_supported = false;
+
+ dml->core_instance.populate_informative(&l->informative_params);
+
+ in_out->programming->informative.failed_mcache_validation = true;
+ return false;
+ }
+
+ memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
+
+ /*
+ * Phase 3: Optimize for Pstate
+ */
+ memset(&l->uclk_pstate_phase, 0, sizeof(struct optimization_phase_params));
+ l->uclk_pstate_phase.dml = dml;
+ l->uclk_pstate_phase.display_config = &l->base_display_config_with_meta;
+ l->uclk_pstate_phase.init_function = dml2_top_optimization_init_function_uclk_pstate;
+ l->uclk_pstate_phase.test_function = dml2_top_optimization_test_function_uclk_pstate;
+ l->uclk_pstate_phase.optimize_function = dml2_top_optimization_optimize_function_uclk_pstate;
+ l->uclk_pstate_phase.optimized_display_config = &l->optimized_display_config_with_meta;
+ l->uclk_pstate_phase.all_or_nothing = true;
+
+ uclk_pstate_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->uclk_pstate_phase);
+
+ if (uclk_pstate_success) {
+ memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
+ l->base_display_config_with_meta.stage3.success = true;
+ }
+
+ /*
+ * Phase 4: Optimize for Vmin
+ */
+ memset(&l->vmin_phase, 0, sizeof(struct optimization_phase_params));
+ l->vmin_phase.dml = dml;
+ l->vmin_phase.display_config = &l->base_display_config_with_meta;
+ l->vmin_phase.init_function = dml2_top_optimization_init_function_vmin;
+ l->vmin_phase.test_function = dml2_top_optimization_test_function_vmin;
+ l->vmin_phase.optimize_function = dml2_top_optimization_optimize_function_vmin;
+ l->vmin_phase.optimized_display_config = &l->optimized_display_config_with_meta;
+ l->vmin_phase.all_or_nothing = false;
+
+ vmin_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->vmin_phase);
+
+ if (l->optimized_display_config_with_meta.stage4.performed) {
+ /*
+ * when performed is true, optimization has applied to
+ * optimized_display_config_with_meta and it has passed mode
+ * support. However it may or may not pass the test function to
+ * reach actual Vmin. As long as voltage is optimized even if it
+ * doesn't reach Vmin level, there is still power benefit so in
+ * this case we will still copy this optimization into base
+ * display config.
+ */
+ memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
+ l->base_display_config_with_meta.stage4.success = vmin_success;
+ }
+
+ /*
+ * Phase 5: Optimize for Stutter
+ */
+ memset(&l->stutter_phase, 0, sizeof(struct optimization_phase_params));
+ l->stutter_phase.dml = dml;
+ l->stutter_phase.display_config = &l->base_display_config_with_meta;
+ l->stutter_phase.init_function = dml2_top_optimization_init_function_stutter;
+ l->stutter_phase.test_function = dml2_top_optimization_test_function_stutter;
+ l->stutter_phase.optimize_function = dml2_top_optimization_optimize_function_stutter;
+ l->stutter_phase.optimized_display_config = &l->optimized_display_config_with_meta;
+ l->stutter_phase.all_or_nothing = true;
+
+ stutter_success = dml2_top_optimization_perform_optimization_phase(&l->optimization_phase_locals, &l->stutter_phase);
+
+ if (stutter_success) {
+ memcpy(&l->base_display_config_with_meta, &l->optimized_display_config_with_meta, sizeof(struct display_configuation_with_meta));
+ l->base_display_config_with_meta.stage5.success = true;
+ }
+
+ /*
+ * Populate mcache programming
+ */
+ for (i = 0; i < in_out->display_config->num_planes; i++) {
+ in_out->programming->plane_programming[i].mcache_allocation = l->base_display_config_with_meta.stage2.mcache_allocations[i];
+ }
+
+ /*
+ * Call DPMM to map all requirements to minimum clock state
+ */
+ if (result) {
+ l->dppm_map_mode_params.min_clk_table = &dml->min_clk_table;
+ l->dppm_map_mode_params.display_cfg = &l->base_display_config_with_meta;
+ l->dppm_map_mode_params.programming = in_out->programming;
+ l->dppm_map_mode_params.soc_bb = &dml->soc_bbox;
+ l->dppm_map_mode_params.ip = &dml->core_instance.clean_me_up.mode_lib.ip;
+ result = dml->dpmm_instance.map_mode_to_soc_dpm(&l->dppm_map_mode_params);
+ if (!result)
+ in_out->programming->informative.failed_dpmm = true;
+ }
+
+ if (result) {
+ l->mode_programming_params.instance = &dml->core_instance;
+ l->mode_programming_params.display_cfg = &l->base_display_config_with_meta;
+ l->mode_programming_params.cfg_support_info = &l->base_display_config_with_meta.mode_support_result.cfg_support_info;
+ l->mode_programming_params.programming = in_out->programming;
+ result = dml->core_instance.mode_programming(&l->mode_programming_params);
+ if (!result)
+ in_out->programming->informative.failed_mode_programming = true;
+ }
+
+ if (result) {
+ l->dppm_map_watermarks_params.core = &dml->core_instance;
+ l->dppm_map_watermarks_params.display_cfg = &l->base_display_config_with_meta;
+ l->dppm_map_watermarks_params.programming = in_out->programming;
+ result = dml->dpmm_instance.map_watermarks(&l->dppm_map_watermarks_params);
+ }
+
+ l->informative_params.instance = &dml->core_instance;
+ l->informative_params.programming = in_out->programming;
+ l->informative_params.mode_is_supported = result;
+
+ dml->core_instance.populate_informative(&l->informative_params);
+
+ return result;
+}
+
+bool dml2_top_soc15_build_mcache_programming(struct dml2_build_mcache_programming_in_out *params)
+{
+ bool success = true;
+ int config_index, pipe_index;
+ int first_offset, second_offset;
+ int free_per_plane_reg_index = 0;
+
+ memset(params->per_plane_pipe_mcache_regs, 0, DML2_MAX_PLANES * DML2_MAX_DCN_PIPES * sizeof(struct dml2_hubp_pipe_mcache_regs *));
+
+ for (config_index = 0; config_index < params->num_configurations; config_index++) {
+ for (pipe_index = 0; pipe_index < params->mcache_configurations[config_index].num_pipes; pipe_index++) {
+ // Allocate storage for the mcache regs
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index] = &params->mcache_regs_set[free_per_plane_reg_index++];
+
+ reset_mcache_allocations(params->per_plane_pipe_mcache_regs[config_index][pipe_index]);
+
+ if (params->mcache_configurations[config_index].plane_descriptor->surface.dcc.enable) {
+ // P0 always enabled
+ if (!calculate_first_second_splitting(params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0,
+ params->mcache_configurations[config_index].mcache_allocation->num_mcaches_plane0,
+ 0,
+ params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_x_start,
+ params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_x_start +
+ params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_width - 1,
+ &first_offset, &second_offset)) {
+ success = false;
+ break;
+ }
+
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.mcache_id_first =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane0[first_offset];
+
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.mcache_id_first =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane0[first_offset];
+
+ if (second_offset >= 0) {
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.mcache_id_second =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane0[second_offset];
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.split_location =
+ params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0[first_offset] - 1;
+
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.mcache_id_second =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane0[second_offset];
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.split_location =
+ params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0[first_offset] - 1;
+ }
+
+ // Populate P1 if enabled
+ if (params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1_enabled) {
+ if (!calculate_first_second_splitting(params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1,
+ params->mcache_configurations[config_index].mcache_allocation->num_mcaches_plane1,
+ 0,
+ params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_x_start,
+ params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_x_start +
+ params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_width - 1,
+ &first_offset, &second_offset)) {
+ success = false;
+ break;
+ }
+
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.mcache_id_first =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane1[first_offset];
+
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.mcache_id_first =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane1[first_offset];
+
+ if (second_offset >= 0) {
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.mcache_id_second =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane1[second_offset];
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.split_location =
+ params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1[first_offset] - 1;
+
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.mcache_id_second =
+ params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane1[second_offset];
+ params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.split_location =
+ params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1[first_offset] - 1;
+ }
+ }
+ }
+ }
+ }
+
+ return success;
+}
+
+static const struct dml2_top_funcs soc15_funcs = {
+ .check_mode_supported = dml2_top_soc15_check_mode_supported,
+ .build_mode_programming = dml2_top_soc15_build_mode_programming,
+ .build_mcache_programming = dml2_top_soc15_build_mcache_programming,
+};
+
+bool dml2_top_soc15_initialize_instance(struct dml2_initialize_instance_in_out *in_out)
+{
+ struct dml2_instance *dml = (struct dml2_instance *)in_out->dml2_instance;
+ struct dml2_initialize_instance_locals *l = &dml->scratch.initialize_instance_locals;
+ struct dml2_core_initialize_in_out core_init_params = { 0 };
+ struct dml2_mcg_build_min_clock_table_params_in_out mcg_build_min_clk_params = { 0 };
+ struct dml2_pmo_initialize_in_out pmo_init_params = { 0 };
+ bool result = false;
+
+ memset(l, 0, sizeof(struct dml2_initialize_instance_locals));
+ memset(dml, 0, sizeof(struct dml2_instance));
+
+ memcpy(&dml->ip_caps, &in_out->ip_caps, sizeof(struct dml2_ip_capabilities));
+ memcpy(&dml->soc_bbox, &in_out->soc_bb, sizeof(struct dml2_soc_bb));
+
+ dml->project_id = in_out->options.project_id;
+ dml->pmo_options = in_out->options.pmo_options;
+
+ // Initialize All Components
+ result = dml2_mcg_create(in_out->options.project_id, &dml->mcg_instance);
+
+ if (result)
+ result = dml2_dpmm_create(in_out->options.project_id, &dml->dpmm_instance);
+
+ if (result)
+ result = dml2_core_create(in_out->options.project_id, &dml->core_instance);
+
+ if (result) {
+ mcg_build_min_clk_params.soc_bb = &in_out->soc_bb;
+ mcg_build_min_clk_params.min_clk_table = &dml->min_clk_table;
+ result = dml->mcg_instance.build_min_clock_table(&mcg_build_min_clk_params);
+ }
+
+ if (result) {
+ core_init_params.project_id = in_out->options.project_id;
+ core_init_params.instance = &dml->core_instance;
+ core_init_params.minimum_clock_table = &dml->min_clk_table;
+ core_init_params.explicit_ip_bb = in_out->overrides.explicit_ip_bb;
+ core_init_params.explicit_ip_bb_size = in_out->overrides.explicit_ip_bb_size;
+ core_init_params.ip_caps = &in_out->ip_caps;
+ core_init_params.soc_bb = &in_out->soc_bb;
+ result = dml->core_instance.initialize(&core_init_params);
+
+ if (core_init_params.explicit_ip_bb && core_init_params.explicit_ip_bb_size > 0) {
+ memcpy(&dml->ip_caps, &in_out->ip_caps, sizeof(struct dml2_ip_capabilities));
+ }
+ }
+
+ if (result)
+ result = dml2_pmo_create(in_out->options.project_id, &dml->pmo_instance);
+
+ if (result) {
+ pmo_init_params.instance = &dml->pmo_instance;
+ pmo_init_params.soc_bb = &dml->soc_bbox;
+ pmo_init_params.ip_caps = &dml->ip_caps;
+ pmo_init_params.mcg_clock_table_size = dml->min_clk_table.dram_bw_table.num_entries;
+ pmo_init_params.options = &dml->pmo_options;
+ dml->pmo_instance.initialize(&pmo_init_params);
+ }
+ dml->funcs = soc15_funcs;
+ return result;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h
index 7b1f6f7143d0..53bd8602f9ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h
@@ -2,22 +2,13 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-#ifndef __DML_TOP_MCACHE_H__
-#define __DML_TOP_MCACHE_H__
-
-#include "dml2_external_lib_deps.h"
-#include "dml_top_display_cfg_types.h"
-#include "dml_top_types.h"
+#ifndef __DML2_TOP_SOC15_H__
+#define __DML2_TOP_SOC15_H__
#include "dml2_internal_shared_types.h"
+bool dml2_top_soc15_initialize_instance(struct dml2_initialize_instance_in_out *in_out);
bool dml2_top_mcache_calc_mcache_count_and_offsets(struct top_mcache_calc_mcache_count_and_offsets_in_out *params);
-
void dml2_top_mcache_assign_global_mcache_ids(struct top_mcache_assign_global_mcache_ids_in_out *params);
-
bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissability_in_out *params);
-
-bool dml2_top_mcache_build_mcache_programming(struct dml2_build_mcache_programming_in_out *params);
-
-bool dml2_top_mcache_unit_test(void);
-
-#endif
+bool dml2_top_soc15_build_mcache_programming(struct dml2_build_mcache_programming_in_out *params);
+#endif /* __DML2_TOP_SOC15_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c
deleted file mode 100644
index a342ebfbe4e7..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml_top_mcache.c
+++ /dev/null
@@ -1,549 +0,0 @@
-// SPDX-License-Identifier: MIT
-//
-// Copyright 2024 Advanced Micro Devices, Inc.
-
-#include "dml2_debug.h"
-
-#include "dml_top_mcache.h"
-#include "lib_float_math.h"
-
-#include "dml2_internal_shared_types.h"
-
-/*
-* Takes an input set of mcache boundaries and finds the appropriate setting of cache programming.
-* Returns true if a valid set of programming can be made, and false otherwise. "Valid" means
-* that the horizontal viewport does not span more than 2 cache slices.
-*
-* It optionally also can apply a constant shift to all the cache boundaries.
-*/
-static const uint32_t MCACHE_ID_UNASSIGNED = 0xF;
-static const uint32_t SPLIT_LOCATION_UNDEFINED = 0xFFFF;
-
-static bool calculate_first_second_splitting(const int *mcache_boundaries, int num_boundaries, int shift,
- int pipe_h_vp_start, int pipe_h_vp_end, int *first_offset, int *second_offset)
-{
- const int MAX_VP = 0xFFFFFF;
- int left_cache_id;
- int right_cache_id;
- int range_start;
- int range_end;
- bool success = false;
-
- if (num_boundaries <= 1) {
- if (first_offset && second_offset) {
- *first_offset = 0;
- *second_offset = -1;
- }
- success = true;
- return success;
- } else {
- range_start = 0;
- for (left_cache_id = 0; left_cache_id < num_boundaries; left_cache_id++) {
- range_end = mcache_boundaries[left_cache_id] - shift - 1;
-
- if (range_start <= pipe_h_vp_start && pipe_h_vp_start <= range_end)
- break;
-
- range_start = range_end + 1;
- }
-
- range_end = MAX_VP;
- for (right_cache_id = num_boundaries - 1; right_cache_id >= -1; right_cache_id--) {
- if (right_cache_id >= 0)
- range_start = mcache_boundaries[right_cache_id] - shift;
- else
- range_start = 0;
-
- if (range_start <= pipe_h_vp_end && pipe_h_vp_end <= range_end) {
- break;
- }
- range_end = range_start - 1;
- }
- right_cache_id = (right_cache_id + 1) % num_boundaries;
-
- if (right_cache_id == left_cache_id) {
- if (first_offset && second_offset) {
- *first_offset = left_cache_id;
- *second_offset = -1;
- }
- success = true;
- } else if (right_cache_id == (left_cache_id + 1) % num_boundaries) {
- if (first_offset && second_offset) {
- *first_offset = left_cache_id;
- *second_offset = right_cache_id;
- }
- success = true;
- }
- }
-
- return success;
-}
-
-/*
-* For a given set of pipe start/end x positions, checks to see it can support the input mcache splitting.
-* It also attempts to "optimize" by finding a shift if the default 0 shift does not work.
-*/
-static bool find_shift_for_valid_cache_id_assignment(int *mcache_boundaries, unsigned int num_boundaries,
- int *pipe_vp_startx, int *pipe_vp_endx, unsigned int pipe_count, int shift_granularity, int *shift)
-{
- int max_shift = 0xFFFF;
- unsigned int pipe_index;
- unsigned int i, slice_width;
- bool success = false;
-
- for (i = 0; i < num_boundaries; i++) {
- if (i == 0)
- slice_width = mcache_boundaries[i];
- else
- slice_width = mcache_boundaries[i] - mcache_boundaries[i - 1];
-
- if (max_shift > (int)slice_width) {
- max_shift = slice_width;
- }
- }
-
- for (*shift = 0; *shift <= max_shift; *shift += shift_granularity) {
- success = true;
- for (pipe_index = 0; pipe_index < pipe_count; pipe_index++) {
- if (!calculate_first_second_splitting(mcache_boundaries, num_boundaries, *shift,
- pipe_vp_startx[pipe_index], pipe_vp_endx[pipe_index], 0, 0)) {
- success = false;
- break;
- }
- }
- if (success)
- break;
- }
-
- return success;
-}
-
-/*
-* Counts the number of elements inside input array within the given span length.
-* Formally, what is the size of the largest subset of the array where the largest and smallest element
-* differ no more than the span.
-*/
-static unsigned int count_elements_in_span(int *array, unsigned int array_size, unsigned int span)
-{
- unsigned int i;
- unsigned int span_start_value;
- unsigned int span_start_index;
- unsigned int greatest_element_count;
-
- if (array_size == 0)
- return 1;
-
- if (span == 0)
- return array_size > 0 ? 1 : 0;
-
- span_start_value = 0;
- span_start_index = 0;
- greatest_element_count = 0;
-
- while (span_start_index < array_size) {
- for (i = span_start_index; i < array_size; i++) {
- if (array[i] - span_start_value <= span) {
- if (i - span_start_index + 1 > greatest_element_count) {
- greatest_element_count = i - span_start_index + 1;
- }
- } else
- break;
- }
-
- span_start_index++;
-
- if (span_start_index < array_size) {
- span_start_value = array[span_start_index - 1] + 1;
- }
- }
-
- return greatest_element_count;
-}
-
-static bool calculate_h_split_for_scaling_transform(int full_vp_width, int h_active, int num_pipes,
- enum dml2_scaling_transform scaling_transform, int *pipe_vp_x_start, int *pipe_vp_x_end)
-{
- int i, slice_width;
- const char MAX_SCL_VP_OVERLAP = 3;
- bool success = false;
-
- switch (scaling_transform) {
- case dml2_scaling_transform_centered:
- case dml2_scaling_transform_aspect_ratio:
- case dml2_scaling_transform_fullscreen:
- slice_width = full_vp_width / num_pipes;
- for (i = 0; i < num_pipes; i++) {
- pipe_vp_x_start[i] = i * slice_width;
- pipe_vp_x_end[i] = (i + 1) * slice_width - 1;
-
- if (pipe_vp_x_start[i] < MAX_SCL_VP_OVERLAP)
- pipe_vp_x_start[i] = 0;
- else
- pipe_vp_x_start[i] -= MAX_SCL_VP_OVERLAP;
-
- if (pipe_vp_x_end[i] > full_vp_width - MAX_SCL_VP_OVERLAP - 1)
- pipe_vp_x_end[i] = full_vp_width - 1;
- else
- pipe_vp_x_end[i] += MAX_SCL_VP_OVERLAP;
- }
- break;
- case dml2_scaling_transform_explicit:
- default:
- success = false;
- break;
- }
-
- return success;
-}
-
-bool dml2_top_mcache_validate_admissability(struct top_mcache_validate_admissability_in_out *params)
-{
- struct dml2_instance *dml = (struct dml2_instance *)params->dml2_instance;
- struct dml2_top_mcache_validate_admissability_locals *l = &dml->scratch.mcache_validate_admissability_locals;
-
- const int MAX_PIXEL_OVERLAP = 6;
- int max_per_pipe_vp_p0 = 0;
- int max_per_pipe_vp_p1 = 0;
- int temp, p0shift, p1shift;
- unsigned int plane_index = 0;
- unsigned int i;
- unsigned int odm_combine_factor;
- unsigned int mpc_combine_factor;
- unsigned int num_dpps;
- unsigned int num_boundaries;
- enum dml2_scaling_transform scaling_transform;
- const struct dml2_plane_parameters *plane;
- const struct dml2_stream_parameters *stream;
-
- bool p0pass = false;
- bool p1pass = false;
- bool all_pass = true;
-
- for (plane_index = 0; plane_index < params->display_cfg->num_planes; plane_index++) {
- if (!params->display_cfg->plane_descriptors[plane_index].surface.dcc.enable)
- continue;
-
- plane = &params->display_cfg->plane_descriptors[plane_index];
- stream = &params->display_cfg->stream_descriptors[plane->stream_index];
-
- num_dpps = odm_combine_factor = params->cfg_support_info->stream_support_info[plane->stream_index].odms_used;
-
- if (odm_combine_factor == 1)
- num_dpps = mpc_combine_factor = (unsigned int)params->cfg_support_info->plane_support_info[plane_index].dpps_used;
- else
- mpc_combine_factor = 1;
-
- if (odm_combine_factor > 1) {
- max_per_pipe_vp_p0 = plane->surface.plane0.width;
- temp = (unsigned int)math_ceil(plane->composition.scaler_info.plane0.h_ratio * stream->timing.h_active / odm_combine_factor);
-
- if (temp < max_per_pipe_vp_p0)
- max_per_pipe_vp_p0 = temp;
-
- max_per_pipe_vp_p1 = plane->surface.plane1.width;
- temp = (unsigned int)math_ceil(plane->composition.scaler_info.plane1.h_ratio * stream->timing.h_active / odm_combine_factor);
-
- if (temp < max_per_pipe_vp_p1)
- max_per_pipe_vp_p1 = temp;
- } else {
- max_per_pipe_vp_p0 = plane->surface.plane0.width / mpc_combine_factor;
- max_per_pipe_vp_p1 = plane->surface.plane1.width / mpc_combine_factor;
- }
-
- max_per_pipe_vp_p0 += 2 * MAX_PIXEL_OVERLAP;
- max_per_pipe_vp_p1 += MAX_PIXEL_OVERLAP;
-
- p0shift = 0;
- p1shift = 0;
-
- // The last element in the unshifted boundary array will always be the first pixel outside the
- // plane, which means theres no mcache associated with it, so -1
- num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane0 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane0 - 1;
- if ((count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane0,
- num_boundaries, max_per_pipe_vp_p0) <= 1) && (num_boundaries <= num_dpps)) {
- p0pass = true;
- }
- num_boundaries = params->mcache_allocations[plane_index].num_mcaches_plane1 == 0 ? 0 : params->mcache_allocations[plane_index].num_mcaches_plane1 - 1;
- if ((count_elements_in_span(params->mcache_allocations[plane_index].mcache_x_offsets_plane1,
- num_boundaries, max_per_pipe_vp_p1) <= 1) && (num_boundaries <= num_dpps)) {
- p1pass = true;
- }
-
- if (!p0pass || !p1pass) {
- if (odm_combine_factor > 1) {
- num_dpps = odm_combine_factor;
- scaling_transform = plane->composition.scaling_transform;
- } else {
- num_dpps = mpc_combine_factor;
- scaling_transform = dml2_scaling_transform_fullscreen;
- }
-
- if (!p0pass) {
- if (plane->composition.viewport.stationary) {
- calculate_h_split_for_scaling_transform(plane->surface.plane0.width,
- stream->timing.h_active, num_dpps, scaling_transform,
- &l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index]);
- p0pass = find_shift_for_valid_cache_id_assignment(params->mcache_allocations[plane_index].mcache_x_offsets_plane0,
- params->mcache_allocations[plane_index].num_mcaches_plane0,
- &l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index], num_dpps,
- params->mcache_allocations[plane_index].shift_granularity.p0, &p0shift);
- }
- }
- if (!p1pass) {
- if (plane->composition.viewport.stationary) {
- calculate_h_split_for_scaling_transform(plane->surface.plane1.width,
- stream->timing.h_active, num_dpps, scaling_transform,
- &l->plane0.pipe_vp_startx[plane_index], &l->plane0.pipe_vp_endx[plane_index]);
- p1pass = find_shift_for_valid_cache_id_assignment(params->mcache_allocations[plane_index].mcache_x_offsets_plane1,
- params->mcache_allocations[plane_index].num_mcaches_plane1,
- &l->plane1.pipe_vp_startx[plane_index], &l->plane1.pipe_vp_endx[plane_index], num_dpps,
- params->mcache_allocations[plane_index].shift_granularity.p1, &p1shift);
- }
- }
- }
-
- if (p0pass && p1pass) {
- for (i = 0; i < params->mcache_allocations[plane_index].num_mcaches_plane0; i++) {
- params->mcache_allocations[plane_index].mcache_x_offsets_plane0[i] -= p0shift;
- }
- for (i = 0; i < params->mcache_allocations[plane_index].num_mcaches_plane1; i++) {
- params->mcache_allocations[plane_index].mcache_x_offsets_plane1[i] -= p1shift;
- }
- }
-
- params->per_plane_status[plane_index] = p0pass && p1pass;
- all_pass &= p0pass && p1pass;
- }
-
- return all_pass;
-}
-
-static void reset_mcache_allocations(struct dml2_hubp_pipe_mcache_regs *per_plane_pipe_mcache_regs)
-{
- // Initialize all entries to special valid MCache ID and special valid split coordinate
- per_plane_pipe_mcache_regs->main.p0.mcache_id_first = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->main.p0.mcache_id_second = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->main.p0.split_location = SPLIT_LOCATION_UNDEFINED;
-
- per_plane_pipe_mcache_regs->mall.p0.mcache_id_first = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->mall.p0.mcache_id_second = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->mall.p0.split_location = SPLIT_LOCATION_UNDEFINED;
-
- per_plane_pipe_mcache_regs->main.p1.mcache_id_first = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->main.p1.mcache_id_second = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->main.p1.split_location = SPLIT_LOCATION_UNDEFINED;
-
- per_plane_pipe_mcache_regs->mall.p1.mcache_id_first = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->mall.p1.mcache_id_second = MCACHE_ID_UNASSIGNED;
- per_plane_pipe_mcache_regs->mall.p1.split_location = SPLIT_LOCATION_UNDEFINED;
-}
-
-bool dml2_top_mcache_build_mcache_programming(struct dml2_build_mcache_programming_in_out *params)
-{
- bool success = true;
- int config_index, pipe_index;
- int first_offset, second_offset;
- int free_per_plane_reg_index = 0;
-
- memset(params->per_plane_pipe_mcache_regs, 0, DML2_MAX_PLANES * DML2_MAX_DCN_PIPES * sizeof(struct dml2_hubp_pipe_mcache_regs *));
-
- for (config_index = 0; config_index < params->num_configurations; config_index++) {
- for (pipe_index = 0; pipe_index < params->mcache_configurations[config_index].num_pipes; pipe_index++) {
- // Allocate storage for the mcache regs
- params->per_plane_pipe_mcache_regs[config_index][pipe_index] = &params->mcache_regs_set[free_per_plane_reg_index++];
-
- reset_mcache_allocations(params->per_plane_pipe_mcache_regs[config_index][pipe_index]);
-
- if (params->mcache_configurations[config_index].plane_descriptor->surface.dcc.enable) {
- // P0 always enabled
- if (!calculate_first_second_splitting(params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0,
- params->mcache_configurations[config_index].mcache_allocation->num_mcaches_plane0,
- 0,
- params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_x_start,
- params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_x_start +
- params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane0.viewport_width - 1,
- &first_offset, &second_offset)) {
- success = false;
- break;
- }
-
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.mcache_id_first =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane0[first_offset];
-
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.mcache_id_first =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane0[first_offset];
-
- if (second_offset >= 0) {
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.mcache_id_second =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane0[second_offset];
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p0.split_location =
- params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0[first_offset] - 1;
-
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.mcache_id_second =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane0[second_offset];
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p0.split_location =
- params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane0[first_offset] - 1;
- }
-
- // Populate P1 if enabled
- if (params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1_enabled) {
- if (!calculate_first_second_splitting(params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1,
- params->mcache_configurations[config_index].mcache_allocation->num_mcaches_plane1,
- 0,
- params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_x_start,
- params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_x_start +
- params->mcache_configurations[config_index].pipe_configurations[pipe_index].plane1.viewport_width - 1,
- &first_offset, &second_offset)) {
- success = false;
- break;
- }
-
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.mcache_id_first =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane1[first_offset];
-
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.mcache_id_first =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane1[first_offset];
-
- if (second_offset >= 0) {
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.mcache_id_second =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_plane1[second_offset];
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->main.p1.split_location =
- params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1[first_offset] - 1;
-
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.mcache_id_second =
- params->mcache_configurations[config_index].mcache_allocation->global_mcache_ids_mall_plane1[second_offset];
- params->per_plane_pipe_mcache_regs[config_index][pipe_index]->mall.p1.split_location =
- params->mcache_configurations[config_index].mcache_allocation->mcache_x_offsets_plane1[first_offset] - 1;
- }
- }
- }
- }
- }
-
- return success;
-}
-
-void dml2_top_mcache_assign_global_mcache_ids(struct top_mcache_assign_global_mcache_ids_in_out *params)
-{
- int i;
- unsigned int j;
- int next_unused_cache_id = 0;
-
- for (i = 0; i < params->num_allocations; i++) {
- if (!params->allocations[i].valid)
- continue;
-
- for (j = 0; j < params->allocations[i].num_mcaches_plane0; j++) {
- params->allocations[i].global_mcache_ids_plane0[j] = next_unused_cache_id++;
- }
- for (j = 0; j < params->allocations[i].num_mcaches_plane1; j++) {
- params->allocations[i].global_mcache_ids_plane1[j] = next_unused_cache_id++;
- }
-
- // The "psuedo-last" slice is always wrapped around
- params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0] =
- params->allocations[i].global_mcache_ids_plane0[0];
- params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1] =
- params->allocations[i].global_mcache_ids_plane1[0];
-
- // If we need dedicated caches for mall requesting, then we assign them here.
- if (params->allocations[i].requires_dedicated_mall_mcache) {
- for (j = 0; j < params->allocations[i].num_mcaches_plane0; j++) {
- params->allocations[i].global_mcache_ids_mall_plane0[j] = next_unused_cache_id++;
- }
- for (j = 0; j < params->allocations[i].num_mcaches_plane1; j++) {
- params->allocations[i].global_mcache_ids_mall_plane1[j] = next_unused_cache_id++;
- }
-
- // The "psuedo-last" slice is always wrapped around
- params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0] =
- params->allocations[i].global_mcache_ids_mall_plane0[0];
- params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1] =
- params->allocations[i].global_mcache_ids_mall_plane1[0];
- }
-
- // If P0 and P1 are sharing caches, then it means the largest mcache IDs for p0 and p1 can be the same
- // since mcache IDs are always ascending, then it means the largest mcacheID of p1 should be the
- // largest mcacheID of P0
- if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].num_mcaches_plane1 > 0 &&
- params->allocations[i].last_slice_sharing.plane0_plane1) {
- params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
- params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0 - 1];
- }
-
- // If we need dedicated caches handle last slice sharing
- if (params->allocations[i].requires_dedicated_mall_mcache) {
- if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].num_mcaches_plane1 > 0 &&
- params->allocations[i].last_slice_sharing.plane0_plane1) {
- params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
- params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0 - 1];
- }
- // If mall_comb_mcache_l is set then it means that largest mcache ID for MALL p0 can be same as regular read p0
- if (params->allocations[i].num_mcaches_plane0 > 0 && params->allocations[i].last_slice_sharing.mall_comb_mcache_p0) {
- params->allocations[i].global_mcache_ids_mall_plane0[params->allocations[i].num_mcaches_plane0 - 1] =
- params->allocations[i].global_mcache_ids_plane0[params->allocations[i].num_mcaches_plane0 - 1];
- }
- // If mall_comb_mcache_c is set then it means that largest mcache ID for MALL p1 can be same as regular
- // read p1 (which can be same as regular read p0 if plane0_plane1 is also set)
- if (params->allocations[i].num_mcaches_plane1 > 0 && params->allocations[i].last_slice_sharing.mall_comb_mcache_p1) {
- params->allocations[i].global_mcache_ids_mall_plane1[params->allocations[i].num_mcaches_plane1 - 1] =
- params->allocations[i].global_mcache_ids_plane1[params->allocations[i].num_mcaches_plane1 - 1];
- }
- }
-
- // If you don't need dedicated mall mcaches, the mall mcache assignments are identical to the normal requesting
- if (!params->allocations[i].requires_dedicated_mall_mcache) {
- memcpy(params->allocations[i].global_mcache_ids_mall_plane0, params->allocations[i].global_mcache_ids_plane0,
- sizeof(params->allocations[i].global_mcache_ids_mall_plane0));
- memcpy(params->allocations[i].global_mcache_ids_mall_plane1, params->allocations[i].global_mcache_ids_plane1,
- sizeof(params->allocations[i].global_mcache_ids_mall_plane1));
- }
- }
-}
-
-bool dml2_top_mcache_calc_mcache_count_and_offsets(struct top_mcache_calc_mcache_count_and_offsets_in_out *params)
-{
- struct dml2_instance *dml = (struct dml2_instance *)params->dml2_instance;
- struct dml2_top_mcache_verify_mcache_size_locals *l = &dml->scratch.mcache_verify_mcache_size_locals;
-
- unsigned int total_mcaches_required;
- unsigned int i;
- bool result = false;
-
- if (dml->soc_bbox.num_dcc_mcaches == 0) {
- return true;
- }
-
- total_mcaches_required = 0;
- l->calc_mcache_params.instance = &dml->core_instance;
- for (i = 0; i < params->display_config->num_planes; i++) {
- if (!params->display_config->plane_descriptors[i].surface.dcc.enable) {
- memset(&params->mcache_allocations[i], 0, sizeof(struct dml2_mcache_surface_allocation));
- continue;
- }
-
- l->calc_mcache_params.plane_descriptor = &params->display_config->plane_descriptors[i];
- l->calc_mcache_params.mcache_allocation = &params->mcache_allocations[i];
- l->calc_mcache_params.plane_index = i;
-
- if (!dml->core_instance.calculate_mcache_allocation(&l->calc_mcache_params)) {
- result = false;
- break;
- }
-
- if (params->mcache_allocations[i].valid) {
- total_mcaches_required += params->mcache_allocations[i].num_mcaches_plane0 + params->mcache_allocations[i].num_mcaches_plane1;
- if (params->mcache_allocations[i].last_slice_sharing.plane0_plane1)
- total_mcaches_required--;
- }
- }
- dml2_printf("DML_CORE_DCN3::%s: plane_%d, total_mcaches_required=%d\n", __func__, i, total_mcaches_required);
-
- if (total_mcaches_required > dml->soc_bbox.num_dcc_mcaches) {
- result = false;
- } else {
- result = true;
- }
-
- return result;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
index e9b8e10695ae..f95c7ff56f15 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.c
@@ -4,6 +4,11 @@
#include "dml2_debug.h"
+int dml2_log_internal(const char *format, ...)
+{
+ return 0;
+}
+
int dml2_printf(const char *format, ...)
{
#ifdef _DEBUG
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
index d51a1b6c62f2..a27792b56f7e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
@@ -8,9 +8,53 @@
#ifdef _DEBUG
#define DML2_ASSERT(condition) dml2_assert(condition)
#else
-#define DML2_ASSERT(condition)
+#define DML2_ASSERT(condition) ((void)0)
+#endif
+/*
+ * DML_LOG_FATAL - fatal errors for unrecoverable DML states until a restart.
+ * DML_LOG_ERROR - unexpected but recoverable failures inside DML
+ * DML_LOG_WARN - unexpected inputs or events to DML
+ * DML_LOG_INFO - high level tracing of DML interfaces
+ * DML_LOG_DEBUG - detailed tracing of DML internal components
+ * DML_LOG_VERBOSE - detailed tracing of DML calculation procedure
+ */
+#if !defined(DML_LOG_LEVEL)
+#if defined(_DEBUG) && defined(_DEBUG_PRINTS)
+/* for backward compatibility with old macros */
+#define DML_LOG_LEVEL 5
+#else
+#define DML_LOG_LEVEL 0
+#endif
+#endif
+
+#define DML_LOG_FATAL(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#if DML_LOG_LEVEL >= 1
+#define DML_LOG_ERROR(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_ERROR(fmt, ...) ((void)0)
+#endif
+#if DML_LOG_LEVEL >= 2
+#define DML_LOG_WARN(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_WARN(fmt, ...) ((void)0)
+#endif
+#if DML_LOG_LEVEL >= 3
+#define DML_LOG_INFO(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_INFO(fmt, ...) ((void)0)
+#endif
+#if DML_LOG_LEVEL >= 4
+#define DML_LOG_DEBUG(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_DEBUG(fmt, ...) ((void)0)
+#endif
+#if DML_LOG_LEVEL >= 5
+#define DML_LOG_VERBOSE(fmt, ...) dml2_log_internal(fmt, ## __VA_ARGS__)
+#else
+#define DML_LOG_VERBOSE(fmt, ...) ((void)0)
#endif
+int dml2_log_internal(const char *format, ...);
int dml2_printf(const char *format, ...);
void dml2_assert(int condition);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
index aeac9f159fa5..7fb6026bcb49 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
@@ -8,7 +8,6 @@
#include "dml2_external_lib_deps.h"
#include "dml_top_types.h"
#include "dml2_core_shared_types.h"
-
/*
* DML2 MCG Types and Interfaces
*/
@@ -63,7 +62,6 @@ struct dml2_mcg_build_min_clock_table_params_in_out {
*/
struct dml2_mcg_min_clock_table *min_clk_table;
};
-
struct dml2_mcg_instance {
bool (*build_min_clock_table)(struct dml2_mcg_build_min_clock_table_params_in_out *in_out);
bool (*unit_test)(void);
@@ -81,7 +79,6 @@ struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out {
struct dml2_soc_bb *soc_bb;
struct dml2_mcg_min_clock_table *min_clk_table;
const struct display_configuation_with_meta *display_cfg;
-
struct {
bool perform_pseudo_map;
struct dml2_core_internal_soc_bb *soc_bb;
@@ -309,7 +306,7 @@ struct dml2_optimization_stage3_state {
// The pstate support mode for each plane
// The number of valid elements == display_cfg.num_planes
// The indexing of pstate_switch_modes matches plane_descriptors[]
- enum dml2_uclk_pstate_support_method pstate_switch_modes[DML2_MAX_PLANES];
+ enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES];
// Meta-data for implicit SVP generation, indexed by stream index
struct dml2_implicit_svp_meta stream_svp_meta[DML2_MAX_PLANES];
@@ -356,6 +353,10 @@ struct display_configuation_with_meta {
struct dml2_optimization_stage5_state stage5;
};
+struct dml2_pmo_pstate_strategy {
+ enum dml2_pstate_method per_stream_pstate_method[DML2_MAX_PLANES];
+ bool allow_state_increase;
+};
struct dml2_core_mode_support_in_out {
/*
* Inputs
@@ -365,7 +366,6 @@ struct dml2_core_mode_support_in_out {
struct dml2_mcg_min_clock_table *min_clk_table;
int min_clk_index;
-
/*
* Outputs
*/
@@ -395,7 +395,6 @@ struct dml2_core_mode_programming_in_out {
struct dml2_core_instance *instance;
const struct display_configuation_with_meta *display_cfg;
const struct core_display_cfg_support_info *cfg_support_info;
-
/*
* Outputs (also Input the clk freq are also from programming struct)
*/
@@ -445,6 +444,7 @@ struct dml2_core_internal_state_intermediates {
struct dml2_core_mode_support_locals {
struct dml2_core_calcs_mode_support_ex mode_support_ex_params;
struct dml2_display_cfg svp_expanded_display_cfg;
+ struct dml2_calculate_mcache_allocation_in_out calc_mcache_allocation_params;
};
struct dml2_core_mode_programming_locals {
@@ -600,34 +600,11 @@ struct dml2_pmo_optimize_for_stutter_in_out {
struct display_configuation_with_meta *optimized_display_config;
};
-enum dml2_pmo_pstate_method {
- dml2_pmo_pstate_strategy_na = 0,
- /* hw exclusive modes */
- dml2_pmo_pstate_strategy_vactive = 1,
- dml2_pmo_pstate_strategy_vblank = 2,
- dml2_pmo_pstate_strategy_reserved_hw = 5,
- /* fw assisted exclusive modes */
- dml2_pmo_pstate_strategy_fw_svp = 6,
- dml2_pmo_pstate_strategy_reserved_fw = 10,
- /* fw assisted modes requiring drr modulation */
- dml2_pmo_pstate_strategy_fw_vactive_drr = 11,
- dml2_pmo_pstate_strategy_fw_vblank_drr = 12,
- dml2_pmo_pstate_strategy_fw_svp_drr = 13,
- dml2_pmo_pstate_strategy_reserved_fw_drr_clamped = 20,
- dml2_pmo_pstate_strategy_fw_drr = 21,
- dml2_pmo_pstate_strategy_reserved_fw_drr_var = 22,
-};
-
-struct dml2_pmo_pstate_strategy {
- enum dml2_pmo_pstate_method per_stream_pstate_method[DML2_MAX_PLANES];
- bool allow_state_increase;
-};
-
-#define PMO_NO_DRR_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw - dml2_pmo_pstate_strategy_na + 1)) - 1) << dml2_pmo_pstate_strategy_na)
-#define PMO_DRR_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_var - dml2_pmo_pstate_strategy_fw_vactive_drr + 1)) - 1) << dml2_pmo_pstate_strategy_fw_vactive_drr)
-#define PMO_DRR_CLAMPED_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_clamped - dml2_pmo_pstate_strategy_fw_vactive_drr + 1)) - 1) << dml2_pmo_pstate_strategy_fw_vactive_drr)
-#define PMO_DRR_VAR_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_var - dml2_pmo_pstate_strategy_fw_drr + 1)) - 1) << dml2_pmo_pstate_strategy_fw_drr)
-#define PMO_FW_STRATEGY_MASK (((1 << (dml2_pmo_pstate_strategy_reserved_fw_drr_var - dml2_pmo_pstate_strategy_fw_svp + 1)) - 1) << dml2_pmo_pstate_strategy_fw_svp)
+#define PMO_NO_DRR_STRATEGY_MASK (((1 << (dml2_pstate_method_reserved_fw - dml2_pstate_method_na + 1)) - 1) << dml2_pstate_method_na)
+#define PMO_DRR_STRATEGY_MASK (((1 << (dml2_pstate_method_reserved_fw_drr_var - dml2_pstate_method_fw_vactive_drr + 1)) - 1) << dml2_pstate_method_fw_vactive_drr)
+#define PMO_DRR_CLAMPED_STRATEGY_MASK (((1 << (dml2_pstate_method_reserved_fw_drr_clamped - dml2_pstate_method_fw_vactive_drr + 1)) - 1) << dml2_pstate_method_fw_vactive_drr)
+#define PMO_DRR_VAR_STRATEGY_MASK (((1 << (dml2_pstate_method_reserved_fw_drr_var - dml2_pstate_method_fw_drr + 1)) - 1) << dml2_pstate_method_fw_drr)
+#define PMO_FW_STRATEGY_MASK (((1 << (dml2_pstate_method_reserved_fw_drr_var - dml2_pstate_method_fw_svp + 1)) - 1) << dml2_pstate_method_fw_svp)
#define PMO_DCN4_MAX_DISPLAYS 4
#define PMO_DCN4_MAX_NUM_VARIANTS 2
@@ -645,6 +622,8 @@ struct dml2_pmo_scratch {
int stream_mask;
} pmo_dcn3;
struct {
+ struct dml2_pmo_pstate_strategy expanded_override_strategy_list[2 * 2 * 2 * 2];
+ unsigned int num_expanded_override_strategies;
struct dml2_pmo_pstate_strategy pstate_strategy_candidates[DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE];
int num_pstate_candidates;
int cur_pstate_candidate;
@@ -706,7 +685,6 @@ struct dml2_pmo_instance {
int mpc_combine_limit;
int odm_combine_limit;
int mcg_clock_table_size;
-
union {
struct {
struct {
@@ -963,7 +941,13 @@ struct dml2_top_mcache_validate_admissability_locals {
struct dml2_top_display_cfg_support_info {
const struct dml2_display_cfg *display_config;
struct core_display_cfg_support_info core_info;
- enum dml2_pstate_support_method per_plane_pstate_method[DML2_MAX_PLANES];
+};
+
+struct dml2_top_funcs {
+ bool (*check_mode_supported)(struct dml2_check_mode_supported_in_out *in_out);
+ bool (*build_mode_programming)(struct dml2_build_mode_programming_in_out *in_out);
+ bool (*build_mcache_programming)(struct dml2_build_mcache_programming_in_out *in_out);
+ bool (*unit_test)(void);
};
struct dml2_instance {
@@ -978,8 +962,8 @@ struct dml2_instance {
struct dml2_ip_capabilities ip_caps;
struct dml2_mcg_min_clock_table min_clk_table;
-
struct dml2_pmo_options pmo_options;
+ struct dml2_top_funcs funcs;
struct {
struct dml2_initialize_instance_locals initialize_instance_locals;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index 6eccf0241d85..1ed21c1b86a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -258,12 +258,25 @@ static unsigned int find_preferred_pipe_candidates(const struct dc_state *existi
* However this condition comes with a caveat. We need to ignore pipes that will
* require a change in OPP but still have the same stream id. For example during
* an MPC to ODM transiton.
+ *
+ * Adding check to avoid pipe select on the head pipe by utilizing dc resource
+ * helper function resource_get_primary_dpp_pipe and comparing the pipe index.
*/
if (existing_state) {
for (i = 0; i < pipe_count; i++) {
if (existing_state->res_ctx.pipe_ctx[i].stream && existing_state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id) {
+ struct pipe_ctx *head_pipe =
+ resource_is_pipe_type(&existing_state->res_ctx.pipe_ctx[i], DPP_PIPE) ?
+ resource_get_primary_dpp_pipe(&existing_state->res_ctx.pipe_ctx[i]) :
+ NULL;
+
+ // we should always respect the head pipe from selection
+ if (head_pipe && head_pipe->pipe_idx == i)
+ continue;
if (existing_state->res_ctx.pipe_ctx[i].plane_res.hubp &&
- existing_state->res_ctx.pipe_ctx[i].plane_res.hubp->opp_id != i)
+ existing_state->res_ctx.pipe_ctx[i].plane_res.hubp->opp_id != i &&
+ (existing_state->res_ctx.pipe_ctx[i].prev_odm_pipe ||
+ existing_state->res_ctx.pipe_ctx[i].next_odm_pipe))
continue;
preferred_pipe_candidates[num_preferred_candidates++] = i;
@@ -292,6 +305,14 @@ static unsigned int find_last_resort_pipe_candidates(const struct dc_state *exis
*/
if (existing_state) {
for (i = 0; i < pipe_count; i++) {
+ struct pipe_ctx *head_pipe =
+ resource_is_pipe_type(&existing_state->res_ctx.pipe_ctx[i], DPP_PIPE) ?
+ resource_get_primary_dpp_pipe(&existing_state->res_ctx.pipe_ctx[i]) :
+ NULL;
+
+ // we should always respect the head pipe from selection
+ if (head_pipe && head_pipe->pipe_idx == i)
+ continue;
if ((existing_state->res_ctx.pipe_ctx[i].plane_res.hubp &&
existing_state->res_ctx.pipe_ctx[i].plane_res.hubp->opp_id != i) ||
existing_state->res_ctx.pipe_ctx[i].stream_res.tg)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
index 3d29169dd6bb..6b3b8803e0ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
@@ -813,7 +813,7 @@ static bool remove_all_phantom_planes_for_stream(struct dml2_context *ctx, struc
{
int i, old_plane_count;
struct dc_stream_status *stream_status = NULL;
- struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+ struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
index 11c904ae2958..c4c52173ef22 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
@@ -303,6 +303,7 @@ void build_unoptimized_policy_settings(enum dml_project_id project, struct dml_m
if (project == dml_project_dcn35 ||
project == dml_project_dcn351) {
policy->DCCProgrammingAssumesScanDirectionUnknownFinal = false;
+ policy->EnhancedPrefetchScheduleAccelerationFinal = 0;
policy->AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter_if_possible; /*new*/
policy->UseOnlyMaxPrefetchModes = 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index bde4250853b1..b8a34abaf519 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -553,13 +553,53 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
}
- dml2_policy_build_synthetic_soc_states(s, p);
- if (dml2->v20.dml_core_ctx.project == dml_project_dcn35) {
- // Override last out_state with data from last in_state
- // This will ensure that out_state contains max fclk
- memcpy(&p->out_states->state_array[p->out_states->num_states - 1],
- &p->in_states->state_array[p->in_states->num_states - 1],
- sizeof(struct soc_state_bounding_box_st));
+ if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 ||
+ dml2->v20.dml_core_ctx.project == dml_project_dcn351) {
+ int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0,
+ max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0, max_socclk_mhz = 0;
+
+ for (i = 0; i < p->in_states->num_states; i++) {
+ if (p->in_states->state_array[i].dcfclk_mhz > max_dcfclk_mhz)
+ max_dcfclk_mhz = (int)p->in_states->state_array[i].dcfclk_mhz;
+ if (p->in_states->state_array[i].fabricclk_mhz > max_fclk_mhz)
+ max_fclk_mhz = (int)p->in_states->state_array[i].fabricclk_mhz;
+ if (p->in_states->state_array[i].socclk_mhz > max_socclk_mhz)
+ max_socclk_mhz = (int)p->in_states->state_array[i].socclk_mhz;
+ if (p->in_states->state_array[i].dram_speed_mts > max_uclk_mhz)
+ max_uclk_mhz = (int)p->in_states->state_array[i].dram_speed_mts;
+ if (p->in_states->state_array[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = (int)p->in_states->state_array[i].dispclk_mhz;
+ if (p->in_states->state_array[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = (int)p->in_states->state_array[i].dppclk_mhz;
+ if (p->in_states->state_array[i].phyclk_mhz > max_phyclk_mhz)
+ max_phyclk_mhz = (int)p->in_states->state_array[i].phyclk_mhz;
+ if (p->in_states->state_array[i].dtbclk_mhz > max_dtbclk_mhz)
+ max_dtbclk_mhz = (int)p->in_states->state_array[i].dtbclk_mhz;
+ }
+
+ for (i = 0; i < p->in_states->num_states; i++) {
+ /* Independent states - including base (unlisted) parameters from state 0. */
+ p->out_states->state_array[i] = p->in_states->state_array[0];
+
+ p->out_states->state_array[i].dispclk_mhz = max_dispclk_mhz;
+ p->out_states->state_array[i].dppclk_mhz = max_dppclk_mhz;
+ p->out_states->state_array[i].dtbclk_mhz = max_dtbclk_mhz;
+ p->out_states->state_array[i].phyclk_mhz = max_phyclk_mhz;
+
+ p->out_states->state_array[i].dscclk_mhz = max_dispclk_mhz / 3.0;
+ p->out_states->state_array[i].phyclk_mhz = max_phyclk_mhz;
+ p->out_states->state_array[i].dtbclk_mhz = max_dtbclk_mhz;
+
+ /* Dependent states. */
+ p->out_states->state_array[i].dram_speed_mts = p->in_states->state_array[i].dram_speed_mts;
+ p->out_states->state_array[i].fabricclk_mhz = p->in_states->state_array[i].fabricclk_mhz;
+ p->out_states->state_array[i].socclk_mhz = p->in_states->state_array[i].socclk_mhz;
+ p->out_states->state_array[i].dcfclk_mhz = p->in_states->state_array[i].dcfclk_mhz;
+ }
+
+ p->out_states->num_states = p->in_states->num_states;
+ } else {
+ dml2_policy_build_synthetic_soc_states(s, p);
}
}
@@ -746,7 +786,7 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *
case SIGNAL_TYPE_DISPLAY_PORT_MST:
case SIGNAL_TYPE_DISPLAY_PORT:
out->OutputEncoder[location] = dml_dp;
- if (dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1)
+ if (location < MAX_HPO_DP2_ENCODERS && dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1)
out->OutputEncoder[dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location]] = dml_dp2p0;
break;
case SIGNAL_TYPE_EDP:
@@ -1303,7 +1343,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
if (disp_cfg_stream_location < 0)
disp_cfg_stream_location = dml_dispcfg->num_timings++;
- ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]);
populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context, dml2);
@@ -1343,7 +1383,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
if (disp_cfg_plane_location < 0)
disp_cfg_plane_location = dml_dispcfg->num_surfaces++;
- ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]);
populate_dml_plane_cfg_from_plane_state(
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 866b0abcff1b..68b882d28195 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -209,8 +209,6 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe
p->cur_display_config->output.OutputEncoder[0], p->cur_mode_support_info->DSCEnabled[0]) - 1;
if (odms_needed <= unused_dpps) {
- unused_dpps -= odms_needed;
-
if (odms_needed == 1) {
p->new_policy->ODMUse[0] = dml_odm_use_policy_combine_2to1;
optimization_done = true;
@@ -533,14 +531,21 @@ static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct d
static bool call_dml_mode_support_and_programming(struct dc_state *context)
{
unsigned int result = 0;
- unsigned int min_state;
+ unsigned int min_state = 0;
int min_state_for_g6_temp_read = 0;
+
+
+ if (!context)
+ return false;
+
struct dml2_context *dml2 = context->bw_ctx.dml2;
struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
- min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context);
+ if (!context->streams[0]->sink->link->dc->caps.is_apu) {
+ min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context);
- ASSERT(min_state_for_g6_temp_read >= 0);
+ ASSERT(min_state_for_g6_temp_read >= 0);
+ }
if (!dml2->config.use_native_pstate_optimization) {
result = optimize_pstate_with_svp_and_drr(dml2, context);
@@ -551,14 +556,20 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context)
/* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on.
* Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly.
*/
- if (min_state_for_g6_temp_read >= 0)
- min_state = min_state_for_g6_temp_read > s->mode_support_params.out_lowest_state_idx ? min_state_for_g6_temp_read : s->mode_support_params.out_lowest_state_idx;
- else
- min_state = s->mode_support_params.out_lowest_state_idx;
-
- if (result)
- result = dml_mode_programming(&dml2->v20.dml_core_ctx, min_state, &s->cur_display_config, true);
+ if (!context->streams[0]->sink->link->dc->caps.is_apu) {
+ if (min_state_for_g6_temp_read >= 0)
+ min_state = min_state_for_g6_temp_read > s->mode_support_params.out_lowest_state_idx ? min_state_for_g6_temp_read : s->mode_support_params.out_lowest_state_idx;
+ else
+ min_state = s->mode_support_params.out_lowest_state_idx;
+ }
+ if (result) {
+ if (!context->streams[0]->sink->link->dc->caps.is_apu) {
+ result = dml_mode_programming(&dml2->v20.dml_core_ctx, min_state, &s->cur_display_config, true);
+ } else {
+ result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
+ }
+ }
return result;
}
@@ -687,6 +698,8 @@ static bool dml2_validate_only(struct dc_state *context)
build_unoptimized_policy_settings(dml2->v20.dml_core_ctx.project, &dml2->v20.dml_core_ctx.policy);
map_dc_state_into_dml_display_cfg(dml2, context, &dml2->v20.scratch.cur_display_config);
+ if (!dml2->config.skip_hw_state_mapping)
+ dml2_apply_det_buffer_allocation_policy(dml2, &dml2->v20.scratch.cur_display_config);
result = pack_and_call_dml_mode_support_ex(dml2,
&dml2->v20.scratch.cur_display_config,
@@ -734,11 +747,10 @@ static inline struct dml2_context *dml2_allocate_memory(void)
static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
- // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) {
- dml21_reinit(in_dc, dml2, config);
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) {
+ dml21_reinit(in_dc, dml2, config);
return;
- }
+ }
// Store config options
(*dml2)->config = *config;
@@ -773,10 +785,8 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
- // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) {
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01))
return dml21_create(in_dc, dml2, config);
- }
// Allocate Mode Lib Ctx
*dml2 = dml2_allocate_memory();
@@ -844,8 +854,7 @@ void dml2_reinit(const struct dc *in_dc,
const struct dml2_configuration_options *config,
struct dml2_context **dml2)
{
- // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) {
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) {
dml21_reinit(in_dc, dml2, config);
return;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c
index 377ef6d01ae5..00d22e542469 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c
@@ -427,18 +427,6 @@ void dml_rq_dlg_get_dlg_reg(dml_display_dlg_regs_st *disp_dlg_regs,
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
- // hack for FPGA
- /* NOTE: We dont have getenv defined in driver and it does not make any sense in the driver */
- /*char* fpga_env = getenv("FPGA_FPDIV");
- if(fpga_env !=NULL)
- {
- if(disp_dlg_regs->vratio_prefetch >= (dml_uint_t)dml_pow(2, 22))
- {
- disp_dlg_regs->vratio_prefetch = (dml_uint_t)dml_pow(2, 22)-1;
- dml_print("FPGA msg: vratio_prefetch exceed the max value, the register field is [21:0]\n");
- }
- }*/
-
disp_dlg_regs->refcyc_per_vm_group_vblank = (dml_uint_t)(dml_get_refcyc_per_vm_group_vblank_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz);
disp_dlg_regs->refcyc_per_vm_group_flip = (dml_uint_t)(dml_get_refcyc_per_vm_group_flip_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz);
disp_dlg_regs->refcyc_per_vm_req_vblank = (dml_uint_t)(dml_get_refcyc_per_vm_req_vblank_in_us(mode_lib, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10));
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
index e1da48b05d00..75fb77bca83b 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
@@ -194,6 +194,9 @@ void dpp_reset(struct dpp *dpp_base)
dpp->filter_h = NULL;
dpp->filter_v = NULL;
+ memset(&dpp_base->pos, 0, sizeof(dpp_base->pos));
+ memset(&dpp_base->att, 0, sizeof(dpp_base->att));
+
memset(&dpp->scl_data, 0, sizeof(dpp->scl_data));
memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data));
}
@@ -480,10 +483,11 @@ void dpp1_set_cursor_position(
if (src_y_offset + cursor_height <= 0)
cur_en = 0; /* not visible beyond top edge*/
- REG_UPDATE(CURSOR0_CONTROL,
- CUR0_ENABLE, cur_en);
+ if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
- dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ }
}
void dpp1_cnv_set_optional_cursor_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
index cd1706d301e7..f09cba8e29cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
@@ -690,6 +690,7 @@ struct dcn20_dpp {
int lb_memory_size;
int lb_bits_per_entry;
bool is_write_to_ram_a_safe;
+ bool dispclk_r_gate_disable;
struct scaler_data scl_data;
struct pwl_params pwl_data;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
index b110f35ef66b..f236824126e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
@@ -572,6 +572,7 @@ struct dcn3_dpp {
int lb_memory_size;
int lb_bits_per_entry;
bool is_write_to_ram_a_safe;
+ bool dispclk_r_gate_disable;
struct scaler_data scl_data;
struct pwl_params pwl_data;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
index 8473c694bfdc..62b7012cda43 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
@@ -50,13 +50,21 @@ void dpp35_dppclk_control(
DPPCLK_RATE_CONTROL, dppclk_div,
DPP_CLOCK_ENABLE, 1);
else
- REG_UPDATE_2(DPP_CONTROL,
+ if (dpp->dispclk_r_gate_disable)
+ REG_UPDATE_2(DPP_CONTROL,
DPP_CLOCK_ENABLE, 1,
DISPCLK_R_GATE_DISABLE, 1);
+ else
+ REG_UPDATE(DPP_CONTROL,
+ DPP_CLOCK_ENABLE, 1);
} else
- REG_UPDATE_2(DPP_CONTROL,
+ if (dpp->dispclk_r_gate_disable)
+ REG_UPDATE_2(DPP_CONTROL,
DPP_CLOCK_ENABLE, 0,
DISPCLK_R_GATE_DISABLE, 0);
+ else
+ REG_UPDATE(DPP_CONTROL,
+ DPP_CLOCK_ENABLE, 0);
}
void dpp35_program_bias_and_scale_fcnv(
@@ -128,6 +136,10 @@ bool dpp35_construct(
(const struct dcn3_dpp_mask *)(tf_mask));
dpp->base.funcs = &dcn35_dpp_funcs;
+
+ // w/a for cursor memory stuck in LS by programming DISPCLK_R_GATE_DISABLE, limit w/a to some ASIC revs
+ if (dpp->base.ctx->asic_id.hw_internal_rev <= 0x10)
+ dpp->dispclk_r_gate_disable = true;
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
index 3b6ca7974e18..1236e0f9a256 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
@@ -154,9 +154,11 @@ void dpp401_set_cursor_position(
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
uint32_t cur_en = pos->enable ? 1 : 0;
- REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
+ if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
- dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ }
}
void dpp401_set_optional_cursor_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
index 5105fd580017..2f92e7d4981b 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
@@ -1091,7 +1091,8 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
/* ISHARP_DELTA_LUT */
dpp401_dscl_set_isharp_filter(dpp, scl_data->dscl_prog_data.isharp_delta);
dpp->scl_data.dscl_prog_data.sharpness_level = scl_data->dscl_prog_data.sharpness_level;
- dpp->scl_data.dscl_prog_data.isharp_delta = scl_data->dscl_prog_data.isharp_delta;
+ memcpy(dpp->scl_data.dscl_prog_data.isharp_delta, scl_data->dscl_prog_data.isharp_delta,
+ sizeof(uint32_t) * ISHARP_LUT_TABLE_SIZE);
if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0)
return;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index ebd5df1a36e8..11535922b5ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -30,6 +30,9 @@
#include "rc_calc.h"
#include "fixed31_32.h"
+#define DC_LOGGER \
+ dsc->ctx->logger
+
/* This module's internal functions */
/* default DSC policy target bitrate limit is 16bpp */
@@ -480,6 +483,48 @@ bool dc_dsc_compute_bandwidth_range(
return is_dsc_possible;
}
+void dc_dsc_dump_encoder_caps(const struct display_stream_compressor *dsc,
+ const struct dc_crtc_timing *timing)
+{
+ struct dsc_enc_caps dsc_enc_caps;
+
+ get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
+
+ DC_LOG_DSC("dsc encoder caps:");
+ DC_LOG_DSC("\tdsc_version 0x%x", dsc_enc_caps.dsc_version);
+ DC_LOG_DSC("\tslice_caps 0x%x", dsc_enc_caps.slice_caps.raw);
+ DC_LOG_DSC("\tlb_bit_depth %d", dsc_enc_caps.lb_bit_depth);
+ DC_LOG_DSC("\tis_block_pred_supported %d", dsc_enc_caps.is_block_pred_supported);
+ DC_LOG_DSC("\tcolor_formats 0x%x", dsc_enc_caps.color_formats.raw);
+ DC_LOG_DSC("\tcolor_depth 0x%x", dsc_enc_caps.color_depth.raw);
+ DC_LOG_DSC("\tmax_total_throughput_mps %d", dsc_enc_caps.max_total_throughput_mps);
+ DC_LOG_DSC("\tmax_slice_width %d", dsc_enc_caps.max_slice_width);
+ DC_LOG_DSC("\tbpp_increment_div %d", dsc_enc_caps.bpp_increment_div);
+}
+
+void dc_dsc_dump_decoder_caps(const struct display_stream_compressor *dsc,
+ const struct dsc_dec_dpcd_caps *dsc_sink_caps)
+{
+ DC_LOG_DSC("dsc decoder caps:");
+ DC_LOG_DSC("\tis_dsc_supported %d", dsc_sink_caps->is_dsc_supported);
+ DC_LOG_DSC("\tdsc_version 0x%x", dsc_sink_caps->dsc_version);
+ DC_LOG_DSC("\trc_buffer_size %d", dsc_sink_caps->rc_buffer_size);
+ DC_LOG_DSC("\tslice_caps1 0x%x", dsc_sink_caps->slice_caps1.raw);
+ DC_LOG_DSC("\tslice_caps2 0x%x", dsc_sink_caps->slice_caps2.raw);
+ DC_LOG_DSC("\tlb_bit_depth %d", dsc_sink_caps->lb_bit_depth);
+ DC_LOG_DSC("\tis_block_pred_supported %d", dsc_sink_caps->is_block_pred_supported);
+ DC_LOG_DSC("\tedp_max_bits_per_pixel %d", dsc_sink_caps->edp_max_bits_per_pixel);
+ DC_LOG_DSC("\tcolor_formats 0x%x", dsc_sink_caps->color_formats.raw);
+ DC_LOG_DSC("\tthroughput_mode_0_mps %d", dsc_sink_caps->throughput_mode_0_mps);
+ DC_LOG_DSC("\tthroughput_mode_1_mps %d", dsc_sink_caps->throughput_mode_1_mps);
+ DC_LOG_DSC("\tmax_slice_width %d", dsc_sink_caps->max_slice_width);
+ DC_LOG_DSC("\tbpp_increment_div %d", dsc_sink_caps->bpp_increment_div);
+ DC_LOG_DSC("\tbranch_overall_throughput_0_mps %d", dsc_sink_caps->branch_overall_throughput_0_mps);
+ DC_LOG_DSC("\tbranch_overall_throughput_1_mps %d", dsc_sink_caps->branch_overall_throughput_1_mps);
+ DC_LOG_DSC("\tbranch_max_line_width %d", dsc_sink_caps->branch_max_line_width);
+ DC_LOG_DSC("\tis_dp %d", dsc_sink_caps->is_dp);
+}
+
static void get_dsc_enc_caps(
const struct display_stream_compressor *dsc,
struct dsc_enc_caps *dsc_enc_caps,
@@ -1093,14 +1138,11 @@ static bool setup_dsc_config(
if (!is_dsc_possible)
goto done;
- // Final decission: can we do DSC or not?
- if (is_dsc_possible) {
- // Fill out the rest of DSC settings
- dsc_cfg->block_pred_enable = dsc_common_caps.is_block_pred_supported;
- dsc_cfg->linebuf_depth = dsc_common_caps.lb_bit_depth;
- dsc_cfg->version_minor = (dsc_common_caps.dsc_version & 0xf0) >> 4;
- dsc_cfg->is_dp = dsc_sink_caps->is_dp;
- }
+ /* Fill out the rest of DSC settings */
+ dsc_cfg->block_pred_enable = dsc_common_caps.is_block_pred_supported;
+ dsc_cfg->linebuf_depth = dsc_common_caps.lb_bit_depth;
+ dsc_cfg->version_minor = (dsc_common_caps.dsc_version & 0xf0) >> 4;
+ dsc_cfg->is_dp = dsc_sink_caps->is_dp;
done:
if (!is_dsc_possible)
diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h
index bd98b327a6c7..b86347c9b038 100644
--- a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h
@@ -63,10 +63,6 @@ bool cm3_helper_translate_curve_to_hw_format(
const struct dc_transfer_func *output_tf,
struct pwl_params *lut_params, bool fixpoint);
-bool cm3_helper_translate_curve_to_degamma_hw_format(
- const struct dc_transfer_func *output_tf,
- struct pwl_params *lut_params);
-
bool cm3_helper_convert_to_custom_float(
struct pwl_result_data *rgb_resulted,
struct curve_points3 *corner_points,
diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c
index fae98cf52020..bc058f682438 100644
--- a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c
@@ -270,16 +270,3 @@ void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30,
dwbc30->dwbc_shift = dwbc_shift;
dwbc30->dwbc_mask = dwbc_mask;
}
-
-void dwb3_set_host_read_rate_control(struct dwbc *dwbc, bool host_read_delay)
-{
- struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
-
- /*
- * Set maximum delay of host read access to DWBSCL LUT or OGAM LUT if there are no
- * idle cycles in HW pipeline (in number of clock cycles times 4)
- */
- REG_UPDATE(DWB_HOST_READ_CONTROL, DWB_HOST_READ_RATE_CONTROL, host_read_delay);
-
- DC_LOG_DWB("%s dwb3_rate_control at inst = %d", __func__, dwbc->inst);
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h
index 0f3f7c5fbaec..7f053f49ec6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h
@@ -914,7 +914,6 @@ bool dwb3_ogam_set_input_transfer_func(
struct dwbc *dwbc,
const struct dc_transfer_func *in_transfer_func_dwb_ogam);
-void dwb3_set_host_read_rate_control(struct dwbc *dwbc, bool host_read_delay);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index f344478e9bd4..b099989d9364 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -443,7 +443,6 @@ struct gpio *dal_gpio_create_irq(
case GPIO_ID_GPIO_PAD:
break;
default:
- id = GPIO_ID_HPD;
ASSERT_CRITICAL(false);
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h
index a1e2cde9c4cc..9fbd45c7dfef 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h
@@ -198,6 +198,9 @@ struct dcn_hubbub_registers {
uint32_t DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B;
uint32_t DCHUBBUB_ARB_FRAC_URG_BW_MALL_A;
uint32_t DCHUBBUB_ARB_FRAC_URG_BW_MALL_B;
+ uint32_t DCHUBBUB_TIMEOUT_DETECTION_CTRL1;
+ uint32_t DCHUBBUB_TIMEOUT_DETECTION_CTRL2;
+ uint32_t DCHUBBUB_CTRL_STATUS;
};
#define HUBBUB_REG_FIELD_LIST_DCN32(type) \
@@ -313,7 +316,17 @@ struct dcn_hubbub_registers {
type DCN_VM_ERROR_VMID;\
type DCN_VM_ERROR_TABLE_LEVEL;\
type DCN_VM_ERROR_PIPE;\
- type DCN_VM_ERROR_INTERRUPT_STATUS
+ type DCN_VM_ERROR_INTERRUPT_STATUS;\
+ type DCHUBBUB_TIMEOUT_ERROR_STATUS;\
+ type DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD;\
+ type DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD;\
+ type DCHUBBUB_TIMEOUT_DETECTION_EN;\
+ type DCHUBBUB_TIMEOUT_TIMER_RESET;\
+ type ROB_UNDERFLOW_STATUS;\
+ type ROB_OVERFLOW_STATUS;\
+ type ROB_OVERFLOW_CLEAR;\
+ type DCHUBBUB_HW_DEBUG;\
+ type CSTATE_SWATH_CHK_GOOD_MODE
#define HUBBUB_STUTTER_REG_FIELD_LIST(type) \
type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;\
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.h
index 036bb3e6c957..46d8f5c70750 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.h
@@ -96,6 +96,7 @@ struct dcn20_hubbub {
unsigned int det1_size;
unsigned int det2_size;
unsigned int det3_size;
+ bool allow_sdpif_rate_limit_when_cstate_req;
};
void hubbub2_construct(struct dcn20_hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
index fe741100c0f8..d347bb06577a 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
@@ -129,7 +129,8 @@ bool hubbub3_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
return wm_pending;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
index 7fb5523f9722..b98505b240a7 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
@@ -750,7 +750,8 @@ static bool hubbub31_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
return wm_pending;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
index 5264dc26cce1..32a6be543105 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
@@ -786,7 +786,8 @@ static bool hubbub32_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
index 5eb3da8d5206..dce7269959ce 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
@@ -326,7 +326,8 @@ static bool hubbub35_program_watermarks(
DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);/*hw delta*/
REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
index 37d26fa0b6fb..92fab471b183 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
@@ -1192,6 +1192,37 @@ static void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst)
}
}
+static bool dcn401_program_arbiter(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+ bool wm_pending = false;
+ uint32_t temp;
+
+ /* request backpressure and outstanding return threshold (unused)*/
+ //REG_UPDATE(DCHUBBUB_TIMEOUT_DETECTION_CTRL1, DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD, arb_regs->req_stall_threshold);
+
+ /* P-State stall threshold */
+ REG_UPDATE(DCHUBBUB_TIMEOUT_DETECTION_CTRL2, DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD, arb_regs->pstate_stall_threshold);
+
+ if (safe_to_lower || arb_regs->allow_sdpif_rate_limit_when_cstate_req > hubbub2->allow_sdpif_rate_limit_when_cstate_req) {
+ hubbub2->allow_sdpif_rate_limit_when_cstate_req = arb_regs->allow_sdpif_rate_limit_when_cstate_req;
+
+ /* only update the required bits */
+ REG_GET(DCHUBBUB_CTRL_STATUS, DCHUBBUB_HW_DEBUG, &temp);
+ if (hubbub2->allow_sdpif_rate_limit_when_cstate_req) {
+ temp |= (1 << 5);
+ } else {
+ temp &= ~(1 << 5);
+ }
+ REG_UPDATE(DCHUBBUB_CTRL_STATUS, DCHUBBUB_HW_DEBUG, temp);
+ } else {
+ wm_pending = true;
+ }
+
+ return wm_pending;
+}
+
static const struct hubbub_funcs hubbub4_01_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
@@ -1215,6 +1246,7 @@ static const struct hubbub_funcs hubbub4_01_funcs = {
.program_det_segments = dcn401_program_det_segments,
.program_compbuf_segments = dcn401_program_compbuf_segments,
.wait_for_det_update = dcn401_wait_for_det_update,
+ .program_arbiter = dcn401_program_arbiter,
};
void hubbub401_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h
index f35f19ba3e18..b1d9ea9d1c3d 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h
@@ -123,8 +123,17 @@
HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\
HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\
HUBBUB_SF(DCHUBBUB_SDPIF_CFG1, SDPIF_MAX_NUM_OUTSTANDING, mask_sh),\
- HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh)
-
+ HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL1, DCHUBBUB_TIMEOUT_ERROR_STATUS, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL1, DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL2, DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL2, DCHUBBUB_TIMEOUT_DETECTION_EN, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_TIMEOUT_DETECTION_CTRL2, DCHUBBUB_TIMEOUT_TIMER_RESET, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_CTRL_STATUS, ROB_UNDERFLOW_STATUS, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_CTRL_STATUS, ROB_OVERFLOW_STATUS, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_CTRL_STATUS, ROB_OVERFLOW_CLEAR, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_CTRL_STATUS, DCHUBBUB_HW_DEBUG, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_CTRL_STATUS, CSTATE_SWATH_CHK_GOOD_MODE, mask_sh)
bool hubbub401_program_urgent_watermarks(
struct hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
index 22ac2b7e49ae..9b026600b90e 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
@@ -140,7 +140,7 @@ void hubp1_vready_workaround(struct hubp *hubp,
void hubp1_program_tiling(
struct hubp *hubp,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
@@ -518,6 +518,20 @@ bool hubp1_program_surface_flip_and_addr(
return true;
}
+void hubp1_clear_tiling(struct hubp *hubp)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, 0);
+ REG_UPDATE(DCSURF_TILING_CONFIG, SW_MODE, DC_SW_LINEAR);
+
+ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, 0,
+ PRIMARY_SURFACE_DCC_IND_64B_BLK, 0,
+ SECONDARY_SURFACE_DCC_EN, 0,
+ SECONDARY_SURFACE_DCC_IND_64B_BLK, 0);
+}
+
void hubp1_dcc_control(struct hubp *hubp, bool enable,
enum hubp_ind_block_size independent_64b_blks)
{
@@ -532,10 +546,16 @@ void hubp1_dcc_control(struct hubp *hubp, bool enable,
SECONDARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk);
}
+void hubp_reset(struct hubp *hubp)
+{
+ memset(&hubp->pos, 0, sizeof(hubp->pos));
+ memset(&hubp->att, 0, sizeof(hubp->att));
+}
+
void hubp1_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -1337,8 +1357,9 @@ static void hubp1_wait_pipe_read_start(struct hubp *hubp)
void hubp1_init(struct hubp *hubp)
{
- //do nothing
+ hubp_reset(hubp);
}
+
static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_program_surface_flip_and_addr =
hubp1_program_surface_flip_and_addr,
@@ -1351,6 +1372,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_set_vm_context0_settings = hubp1_set_vm_context0_settings,
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_hubp_blank_en = hubp1_set_hubp_blank_en,
.set_cursor_attributes = hubp1_cursor_set_attributes,
@@ -1363,6 +1385,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_disable_control = hubp1_disable_control,
.hubp_get_underflow_status = hubp1_get_underflow_status,
.hubp_init = hubp1_init,
+ .hubp_clear_tiling = hubp1_clear_tiling,
.dmdata_set_attributes = NULL,
.dmdata_load = NULL,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
index 69119b2fdce2..c7765e6f09e6 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
@@ -706,7 +706,7 @@ struct dcn10_hubp {
void hubp1_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -739,13 +739,15 @@ void hubp1_program_rotation(
void hubp1_program_tiling(
struct hubp *hubp,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format);
void hubp1_dcc_control(struct hubp *hubp,
bool enable,
enum hubp_ind_block_size independent_64b_blks);
+void hubp_reset(struct hubp *hubp);
+
bool hubp1_program_surface_flip_and_addr(
struct hubp *hubp,
const struct dc_plane_address *address,
@@ -794,4 +796,6 @@ void hubp1_soft_reset(struct hubp *hubp, bool reset);
void hubp1_set_flip_int(struct hubp *hubp);
+void hubp1_clear_tiling(struct hubp *hubp);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
index 0637e4c552d8..91259b896e03 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
@@ -310,7 +310,7 @@ void hubp2_setup_interdependent(
*/
static void hubp2_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
REG_UPDATE_3(DCSURF_ADDR_CONFIG,
@@ -406,6 +406,20 @@ void hubp2_program_rotation(
H_MIRROR_EN, mirror);
}
+void hubp2_clear_tiling(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, 0);
+ REG_UPDATE(DCSURF_TILING_CONFIG, SW_MODE, DC_SW_LINEAR);
+
+ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, 0,
+ PRIMARY_SURFACE_DCC_IND_64B_BLK, 0,
+ SECONDARY_SURFACE_DCC_EN, 0,
+ SECONDARY_SURFACE_DCC_IND_64B_BLK, 0);
+}
+
void hubp2_dcc_control(struct hubp *hubp, bool enable,
enum hubp_ind_block_size independent_64b_blks)
{
@@ -536,7 +550,7 @@ void hubp2_program_pixel_format(
void hubp2_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -1044,11 +1058,13 @@ void hubp2_cursor_set_position(
if (src_y_offset + cursor_height <= 0)
cur_en = 0; /* not visible beyond top edge*/
- if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
- hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+ if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) {
+ if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+ hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
- REG_UPDATE(CURSOR_CONTROL,
+ REG_UPDATE(CURSOR_CONTROL,
CURSOR_ENABLE, cur_en);
+ }
REG_SET_2(CURSOR_POSITION, 0,
CURSOR_X_POSITION, pos->x,
@@ -1660,6 +1676,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp2_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
@@ -1676,6 +1693,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.hubp_soft_reset = hubp1_soft_reset,
.hubp_set_flip_int = hubp1_set_flip_int,
+ .hubp_clear_tiling = hubp2_clear_tiling,
};
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
index 18e194507e36..6968087a3605 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
@@ -382,7 +382,7 @@ void hubp2_program_pixel_format(
void hubp2_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -409,6 +409,8 @@ void hubp2_read_state_common(struct hubp *hubp);
void hubp2_read_state(struct hubp *hubp);
+void hubp2_clear_tiling(struct hubp *hubp);
+
#endif /* __DC_MEM_INPUT_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
index cd2bfcc51276..ec88ee424a7f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c
@@ -42,7 +42,7 @@
static void hubp201_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -121,6 +121,7 @@ static struct hubp_funcs dcn201_hubp_funcs = {
.set_cursor_position = hubp1_cursor_set_position,
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.hubp_clk_cntl = hubp1_clk_cntl,
.hubp_vtg_sel = hubp1_vtg_sel,
@@ -131,6 +132,7 @@ static struct hubp_funcs dcn201_hubp_funcs = {
.hubp_clear_underflow = hubp1_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp1_init,
+ .hubp_clear_tiling = hubp1_clear_tiling,
};
bool dcn201_hubp_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
index e13d69a22c1c..e2740482e1cf 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
@@ -811,6 +811,8 @@ static void hubp21_init(struct hubp *hubp)
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
//hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
+
+ hubp_reset(hubp);
}
static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
@@ -823,6 +825,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_set_vm_system_aperture_settings = hubp21_set_vm_system_aperture_settings,
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = hubp21_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp1_cursor_set_position,
@@ -837,6 +840,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_init = hubp21_init,
.validate_dml_output = hubp21_validate_dml_output,
.hubp_set_flip_int = hubp1_set_flip_int,
+ .hubp_clear_tiling = hubp1_clear_tiling,
};
bool hubp21_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
index 60a64d290352..0da70b50e86d 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
@@ -318,7 +318,7 @@ bool hubp3_program_surface_flip_and_addr(
void hubp3_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
REG_UPDATE_4(DCSURF_ADDR_CONFIG,
@@ -334,6 +334,22 @@ void hubp3_program_tiling(
}
+void hubp3_clear_tiling(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, 0);
+ REG_UPDATE(DCSURF_TILING_CONFIG, SW_MODE, DC_SW_LINEAR);
+
+ REG_UPDATE_6(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, 0,
+ PRIMARY_SURFACE_DCC_IND_BLK, 0,
+ PRIMARY_SURFACE_DCC_IND_BLK_C, 0,
+ SECONDARY_SURFACE_DCC_EN, 0,
+ SECONDARY_SURFACE_DCC_IND_BLK, 0,
+ SECONDARY_SURFACE_DCC_IND_BLK_C, 0);
+}
+
void hubp3_dcc_control(struct hubp *hubp, bool enable,
enum hubp_ind_block_size blk_size)
{
@@ -395,7 +411,7 @@ void hubp3_dmdata_set_attributes(
void hubp3_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -483,6 +499,10 @@ void hubp3_init(struct hubp *hubp)
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
//hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_TTU_DISABLE, 0);
+
+ hubp_reset(hubp);
}
static struct hubp_funcs dcn30_hubp_funcs = {
@@ -497,6 +517,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp3_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
@@ -512,6 +533,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.hubp_soft_reset = hubp1_soft_reset,
.hubp_set_flip_int = hubp1_set_flip_int,
+ .hubp_clear_tiling = hubp3_clear_tiling,
};
bool hubp3_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
index b010531a7fe8..b7d7adf0b58c 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
@@ -264,7 +264,7 @@ bool hubp3_program_surface_flip_and_addr(
void hubp3_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -280,7 +280,7 @@ void hubp3_setup(
void hubp3_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format);
void hubp3_dcc_control(struct hubp *hubp, bool enable,
@@ -297,6 +297,8 @@ void hubp3_read_state(struct hubp *hubp);
void hubp3_init(struct hubp *hubp);
+void hubp3_clear_tiling(struct hubp *hubp);
+
#endif /* __DC_HUBP_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
index 8394e8c06919..c2900c79a2d3 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
@@ -79,6 +79,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
.dcc_control = hubp3_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
@@ -96,6 +97,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank,
+ .hubp_clear_tiling = hubp3_clear_tiling,
};
bool hubp31_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
index ca5b4b28a664..f3a21c623f44 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
@@ -168,6 +168,8 @@ void hubp32_init(struct hubp *hubp)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_WRITE(HUBPREQ_DEBUG_DB, 1 << 8);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_TTU_DISABLE, 0);
}
static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
@@ -181,6 +183,7 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp3_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp32_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
@@ -201,7 +204,8 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_update_force_cursor_pstate_disallow = hubp32_update_force_cursor_pstate_disallow,
.phantom_hubp_post_enable = hubp32_phantom_hubp_post_enable,
.hubp_update_mall_sel = hubp32_update_mall_sel,
- .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering
+ .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
+ .hubp_clear_tiling = hubp3_clear_tiling,
};
bool hubp32_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
index d1f05b82b3dd..5661d7a80d54 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
@@ -172,7 +172,7 @@ void hubp35_program_pixel_format(
void hubp35_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -199,6 +199,7 @@ static struct hubp_funcs dcn35_hubp_funcs = {
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
.dcc_control = hubp3_dcc_control,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp2_cursor_set_position,
@@ -216,6 +217,7 @@ static struct hubp_funcs dcn35_hubp_funcs = {
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank_value,
+ .hubp_clear_tiling = hubp3_clear_tiling,
};
bool hubp35_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h
index 586b43aa5834..d913f80b3130 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h
@@ -65,7 +65,7 @@ void hubp35_program_pixel_format(
void hubp35_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index b1ebf5053b4f..5ed195377a6c 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -40,7 +40,7 @@
#define FN(reg_name, field_name) \
hubp2->hubp_shift->field_name, hubp2->hubp_mask->field_name
-static void hubp401_program_3dlut_fl_addr(struct hubp *hubp,
+void hubp401_program_3dlut_fl_addr(struct hubp *hubp,
const struct dc_plane_address address)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -49,14 +49,14 @@ static void hubp401_program_3dlut_fl_addr(struct hubp *hubp,
REG_WRITE(HUBP_3DLUT_ADDRESS_LOW, address.lut3d.addr.low_part);
}
-static void hubp401_program_3dlut_fl_dlg_param(struct hubp *hubp, int refcyc_per_3dlut_group)
+void hubp401_program_3dlut_fl_dlg_param(struct hubp *hubp, int refcyc_per_3dlut_group)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(HUBP_3DLUT_DLG_PARAM, REFCYC_PER_3DLUT_GROUP, refcyc_per_3dlut_group);
}
-static void hubp401_enable_3dlut_fl(struct hubp *hubp, bool enable)
+void hubp401_enable_3dlut_fl(struct hubp *hubp, bool enable)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -72,28 +72,28 @@ int hubp401_get_3dlut_fl_done(struct hubp *hubp)
return ret;
}
-static void hubp401_program_3dlut_fl_addressing_mode(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode)
+void hubp401_program_3dlut_fl_addressing_mode(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_ADDRESSING_MODE, addr_mode);
}
-static void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width)
+void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_WIDTH, width);
}
-static void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled)
+void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_enabled ? 1 : 0);
}
-static void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
+void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r)
@@ -106,21 +106,21 @@ static void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
HUBP_3DLUT_CROSSBAR_SELECT_CR_R, bit_slice_cr_r);
}
-static void hubp401_update_3dlut_fl_bias_scale(struct hubp *hubp, uint16_t bias, uint16_t scale)
+void hubp401_update_3dlut_fl_bias_scale(struct hubp *hubp, uint16_t bias, uint16_t scale)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE_2(_3DLUT_FL_BIAS_SCALE, HUBP0_3DLUT_FL_BIAS, bias, HUBP0_3DLUT_FL_SCALE, scale);
}
-static void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode)
+void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_UPDATE(_3DLUT_FL_CONFIG, HUBP0_3DLUT_FL_MODE, mode);
}
-static void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_format format)
+void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_format format)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -141,34 +141,48 @@ void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor
void hubp401_init(struct hubp *hubp)
{
- //For now nothing to do, HUBPREQ_DEBUG_DB register is removed on DCN4x.
+ hubp_reset(hubp);
}
void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing)
{
- uint32_t value = 0;
+ unsigned int vstartup_lines = pipe_global_sync->dcn4x.vstartup_lines;
+ unsigned int vupdate_offset_pixels = pipe_global_sync->dcn4x.vupdate_offset_pixels;
+ unsigned int vupdate_width_pixels = pipe_global_sync->dcn4x.vupdate_vupdate_width_pixels;
+ unsigned int vready_offset_pixels = pipe_global_sync->dcn4x.vready_offset_pixels;
+ unsigned int htotal = timing->h_total;
+ unsigned int vblank_start = 0;
+ unsigned int vblank_end = 0;
+ unsigned int pixel_width = 0;
+ uint32_t reg_value = 0;
+ bool is_vready_at_or_after_vsync = false;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
/*
* if (VSTARTUP_START - (VREADY_OFFSET+VUPDATE_WIDTH+VUPDATE_OFFSET)/htotal) <= OTG_V_BLANK_END
* Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 1
* else
* Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
*/
- if (pipe_dest->htotal != 0) {
- if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
- + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
- value = 1;
- } else
- value = 0;
+ if (htotal != 0) {
+ vblank_start = timing->v_total - timing->v_front_porch;
+ vblank_end = vblank_start - timing->v_addressable - timing->v_border_top - timing->v_border_bottom;
+ pixel_width = vready_offset_pixels + vupdate_width_pixels + vupdate_offset_pixels;
+
+ is_vready_at_or_after_vsync = (vstartup_lines - pixel_width / htotal) <= vblank_end;
+
+ if (is_vready_at_or_after_vsync)
+ reg_value = 1;
}
- REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
+ REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, reg_value);
}
void hubp401_program_requestor(
struct hubp *hubp,
- struct _vcs_dpi_display_rq_regs_st *rq_regs)
+ struct dml2_display_rq_regs *rq_regs)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -196,8 +210,8 @@ void hubp401_program_requestor(
void hubp401_program_deadline(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+ struct dml2_display_dlg_regs *dlg_attr,
+ struct dml2_display_ttu_regs *ttu_attr)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -294,66 +308,64 @@ void hubp401_program_deadline(
void hubp401_setup(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
- struct _vcs_dpi_display_rq_regs_st *rq_regs,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+ struct dml2_dchub_per_pipe_register_set *pipe_regs,
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing)
{
/* otg is locked when this func is called. Register are double buffered.
* disable the requestors is not needed
*/
- hubp401_vready_at_or_After_vsync(hubp, pipe_dest);
- hubp401_program_requestor(hubp, rq_regs);
- hubp401_program_deadline(hubp, dlg_attr, ttu_attr);
+ hubp401_vready_at_or_After_vsync(hubp, pipe_global_sync, timing);
+ hubp401_program_requestor(hubp, &pipe_regs->rq_regs);
+ hubp401_program_deadline(hubp, &pipe_regs->dlg_regs, &pipe_regs->ttu_regs);
}
void hubp401_setup_interdependent(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+ struct dml2_dchub_per_pipe_register_set *pipe_regs)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_SET_2(PREFETCH_SETTINGS, 0,
- DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
- VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
+ DST_Y_PREFETCH, pipe_regs->dlg_regs.dst_y_prefetch,
+ VRATIO_PREFETCH, pipe_regs->dlg_regs.vratio_prefetch);
REG_SET(PREFETCH_SETTINGS_C, 0,
- VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
+ VRATIO_PREFETCH_C, pipe_regs->dlg_regs.vratio_prefetch_c);
REG_SET_2(VBLANK_PARAMETERS_0, 0,
- DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
- DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
+ DST_Y_PER_VM_VBLANK, pipe_regs->dlg_regs.dst_y_per_vm_vblank,
+ DST_Y_PER_ROW_VBLANK, pipe_regs->dlg_regs.dst_y_per_row_vblank);
REG_SET_2(FLIP_PARAMETERS_0, 0,
- DST_Y_PER_VM_FLIP, dlg_attr->dst_y_per_vm_flip,
- DST_Y_PER_ROW_FLIP, dlg_attr->dst_y_per_row_flip);
+ DST_Y_PER_VM_FLIP, pipe_regs->dlg_regs.dst_y_per_vm_flip,
+ DST_Y_PER_ROW_FLIP, pipe_regs->dlg_regs.dst_y_per_row_flip);
REG_SET(VBLANK_PARAMETERS_3, 0,
- REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
+ REFCYC_PER_META_CHUNK_VBLANK_L, pipe_regs->dlg_regs.refcyc_per_meta_chunk_vblank_l);
REG_SET(VBLANK_PARAMETERS_4, 0,
- REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
+ REFCYC_PER_META_CHUNK_VBLANK_C, pipe_regs->dlg_regs.refcyc_per_meta_chunk_vblank_c);
REG_SET(FLIP_PARAMETERS_2, 0,
- REFCYC_PER_META_CHUNK_FLIP_L, dlg_attr->refcyc_per_meta_chunk_flip_l);
+ REFCYC_PER_META_CHUNK_FLIP_L, pipe_regs->dlg_regs.refcyc_per_meta_chunk_flip_l);
REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
- REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
- REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
+ REFCYC_PER_LINE_DELIVERY_PRE_L, pipe_regs->dlg_regs.refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, pipe_regs->dlg_regs.refcyc_per_line_delivery_pre_c);
REG_SET(DCN_SURF0_TTU_CNTL1, 0,
REFCYC_PER_REQ_DELIVERY_PRE,
- ttu_attr->refcyc_per_req_delivery_pre_l);
+ pipe_regs->ttu_regs.refcyc_per_req_delivery_pre_l);
REG_SET(DCN_SURF1_TTU_CNTL1, 0,
REFCYC_PER_REQ_DELIVERY_PRE,
- ttu_attr->refcyc_per_req_delivery_pre_c);
+ pipe_regs->ttu_regs.refcyc_per_req_delivery_pre_c);
REG_SET(DCN_CUR0_TTU_CNTL1, 0,
- REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
+ REFCYC_PER_REQ_DELIVERY_PRE, pipe_regs->ttu_regs.refcyc_per_req_delivery_pre_cur0);
REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
- MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
- QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
+ MIN_TTU_VBLANK, pipe_regs->ttu_regs.min_ttu_vblank,
+ QoS_LEVEL_FLIP, pipe_regs->ttu_regs.qos_level_flip);
}
@@ -508,6 +520,18 @@ bool hubp401_program_surface_flip_and_addr(
return true;
}
+void hubp401_clear_tiling(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, 0);
+ REG_UPDATE(DCSURF_TILING_CONFIG, SW_MODE, DC_SW_LINEAR);
+
+ REG_UPDATE_2(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, 0,
+ SECONDARY_SURFACE_DCC_EN, 0);
+}
+
void hubp401_dcc_control(struct hubp *hubp,
struct dc_plane_dcc_param *dcc)
{
@@ -520,7 +544,7 @@ void hubp401_dcc_control(struct hubp *hubp,
void hubp401_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
/* DCSURF_ADDR_CONFIG still shows up in reg spec, but does not need to be programmed for DCN4x
@@ -568,7 +592,7 @@ void hubp401_program_size(
void hubp401_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -718,11 +742,13 @@ void hubp401_cursor_set_position(
dc_fixpt_from_int(dst_x_offset),
param->h_scale_ratio));
- if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
- hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+ if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) {
+ if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+ hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
- REG_UPDATE(CURSOR_CONTROL,
- CURSOR_ENABLE, cur_en);
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, cur_en);
+ }
REG_SET_2(CURSOR_POSITION, 0,
CURSOR_X_POSITION, x_pos,
@@ -969,11 +995,12 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_program_surface_flip_and_addr = hubp401_program_surface_flip_and_addr,
.hubp_program_surface_config = hubp401_program_surface_config,
.hubp_is_flip_pending = hubp2_is_flip_pending,
- .hubp_setup = hubp401_setup,
- .hubp_setup_interdependent = hubp401_setup_interdependent,
+ .hubp_setup2 = hubp401_setup,
+ .hubp_setup_interdependent2 = hubp401_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
.set_blank_regs = hubp2_set_blank_regs,
+ .hubp_reset = hubp_reset,
.mem_program_viewport = hubp401_set_viewport,
.set_cursor_attributes = hubp32_cursor_set_attributes,
.set_cursor_position = hubp401_cursor_set_position,
@@ -1004,7 +1031,8 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_program_3dlut_fl_width = hubp401_program_3dlut_fl_width,
.hubp_program_3dlut_fl_tmz_protected = hubp401_program_3dlut_fl_tmz_protected,
.hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar,
- .hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done
+ .hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
+ .hubp_clear_tiling = hubp2_clear_tiling,
};
bool hubp401_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
index e52fdb5b0cd0..6e1d4c90ddd4 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
@@ -256,29 +256,15 @@
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor);
-void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
-
-void hubp401_program_requestor(
- struct hubp *hubp,
- struct _vcs_dpi_display_rq_regs_st *rq_regs);
-
-void hubp401_program_deadline(
- struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr);
-
void hubp401_setup(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
- struct _vcs_dpi_display_rq_regs_st *rq_regs,
- struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+ struct dml2_dchub_per_pipe_register_set *pipe_regs,
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing);
void hubp401_setup_interdependent(
struct hubp *hubp,
- struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
- struct _vcs_dpi_display_ttu_regs_st *ttu_attr);
+ struct dml2_dchub_per_pipe_register_set *pipe_regs);
bool hubp401_program_surface_flip_and_addr(
struct hubp *hubp,
@@ -290,7 +276,7 @@ void hubp401_dcc_control(struct hubp *hubp,
void hubp401_program_tiling(
struct dcn20_hubp *hubp2,
- const union dc_tiling_info *info,
+ const struct dc_tiling_info *info,
const enum surface_pixel_format pixel_format);
void hubp401_program_size(
@@ -302,7 +288,7 @@ void hubp401_program_size(
void hubp401_program_surface_config(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -340,4 +326,42 @@ int hubp401_get_3dlut_fl_done(struct hubp *hubp);
void hubp401_set_unbounded_requesting(struct hubp *hubp, bool enable);
+void hubp401_update_3dlut_fl_bias_scale(struct hubp *hubp, uint16_t bias, uint16_t scale);
+
+void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
+
+void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled);
+
+void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width);
+
+void hubp401_program_3dlut_fl_addressing_mode(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode);
+
+void hubp401_enable_3dlut_fl(struct hubp *hubp, bool enable);
+
+void hubp401_program_3dlut_fl_dlg_param(struct hubp *hubp, int refcyc_per_3dlut_group);
+
+void hubp401_program_3dlut_fl_addr(struct hubp *hubp, const struct dc_plane_address address);
+
+void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_format format);
+
+void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode);
+
+void hubp401_clear_tiling(struct hubp *hubp);
+
+void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing);
+
+void hubp401_program_requestor(
+ struct hubp *hubp,
+ struct dml2_display_rq_regs *rq_regs);
+
+void hubp401_program_deadline(
+ struct hubp *hubp,
+ struct dml2_display_dlg_regs *dlg_attr,
+ struct dml2_display_ttu_regs *ttu_attr);
+
#endif /* __DC_HUBP_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 4fbed0298adf..81f4c386c287 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -1039,7 +1039,8 @@ void dce110_edp_backlight_control(
link_transmitter_control(ctx->dc_bios, &cntl);
if (enable && link->dpcd_sink_ext_caps.bits.oled &&
- !link->dc->config.edp_no_power_sequencing) {
+ !link->dc->config.edp_no_power_sequencing &&
+ !link->local_sink->edid_caps.panel_patch.oled_optimize_display_on) {
post_T7_delay += link->panel_config.pps.extra_post_t7_ms;
msleep(post_T7_delay);
}
@@ -3142,9 +3143,10 @@ static void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
}
bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp)
+ struct set_backlight_level_params *backlight_level_params)
{
+ uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16;
+ uint32_t frame_ramp = backlight_level_params->frame_ramp;
struct dc_link *link = pipe_ctx->stream->link;
struct dc *dc = link->ctx->dc;
struct abm *abm = pipe_ctx->stream_res.abm;
@@ -3315,7 +3317,7 @@ void dce110_disable_link_output(struct dc_link *link,
* from enable/disable link output and only call edp panel control
* in enable_link_dp and disable_link_dp once.
*/
- if (dmcu != NULL && dmcu->funcs->lock_phy)
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
index ed3cc3648e8e..06789ac3a224 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
@@ -88,8 +88,7 @@ void dce110_edp_wait_for_hpd_ready(
bool power_up);
bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp);
+ struct set_backlight_level_params *params);
void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx);
void dce110_set_pipe(struct pipe_ctx *pipe_ctx);
void dce110_disable_link_output(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index a6a1db5ba8ba..44e405e9bc97 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -1286,6 +1286,7 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
if (hws->funcs.hubp_pg_control)
hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+ hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
REG_SET(DC_IP_REQUEST_CNTL, 0,
@@ -1447,6 +1448,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
/* Disable on the current state so the new one isn't cleared. */
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream_res.tg = tg;
@@ -3453,7 +3455,6 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
r2 = test_pipe->plane_res.scl_data.recout;
r2_r = r2.x + r2.width;
r2_b = r2.y + r2.height;
- split_pipe = test_pipe;
/**
* There is another half plane on same layer because of
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index a80c08582932..a5e18ab72394 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -1288,7 +1288,7 @@ static void dcn20_power_on_plane_resources(
}
}
-static void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
+void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
//if (dc->debug.sanity_checks) {
@@ -1458,12 +1458,16 @@ void dcn20_pipe_control_lock(
} else {
if (lock)
pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
- else
- pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
+ else {
+ if (dc->hwseq->funcs.perform_3dlut_wa_unlock)
+ dc->hwseq->funcs.perform_3dlut_wa_unlock(pipe);
+ else
+ pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
+ }
}
}
-static void dcn20_detect_pipe_changes(struct dc_state *old_state,
+void dcn20_detect_pipe_changes(struct dc_state *old_state,
struct dc_state *new_state,
struct pipe_ctx *old_pipe,
struct pipe_ctx *new_pipe)
@@ -1651,7 +1655,7 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
}
}
-static void dcn20_update_dchubp_dpp(
+void dcn20_update_dchubp_dpp(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
@@ -1674,25 +1678,41 @@ static void dcn20_update_dchubp_dpp(
* VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
* VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
*/
+
if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
- hubp->funcs->hubp_setup(
- hubp,
- &pipe_ctx->dlg_regs,
- &pipe_ctx->ttu_regs,
- &pipe_ctx->rq_regs,
- &pipe_ctx->pipe_dlg_param);
+ if (hubp->funcs->hubp_setup2) {
+ hubp->funcs->hubp_setup2(
+ hubp,
+ &pipe_ctx->hubp_regs,
+ &pipe_ctx->global_sync,
+ &pipe_ctx->stream->timing);
+ } else {
+ hubp->funcs->hubp_setup(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs,
+ &pipe_ctx->rq_regs,
+ &pipe_ctx->pipe_dlg_param);
+ }
}
if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting)
hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req);
- if (pipe_ctx->update_flags.bits.hubp_interdependent)
- hubp->funcs->hubp_setup_interdependent(
- hubp,
- &pipe_ctx->dlg_regs,
- &pipe_ctx->ttu_regs);
+ if (pipe_ctx->update_flags.bits.hubp_interdependent) {
+ if (hubp->funcs->hubp_setup_interdependent2) {
+ hubp->funcs->hubp_setup_interdependent2(
+ hubp,
+ &pipe_ctx->hubp_regs);
+ } else {
+ hubp->funcs->hubp_setup_interdependent(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs);
+ }
+ }
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->update_flags.bits.plane_changed ||
@@ -1732,7 +1752,6 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.scaler ||
plane_state->update_flags.bits.scaling_change ||
plane_state->update_flags.bits.position_change ||
- plane_state->update_flags.bits.clip_size_change ||
plane_state->update_flags.bits.per_pixel_alpha_change ||
pipe_ctx->stream->update_flags.bits.scaling) {
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
@@ -1745,7 +1764,6 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.viewport ||
(context == dc->current_state && plane_state->update_flags.bits.position_change) ||
(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
- (context == dc->current_state && plane_state->update_flags.bits.clip_size_change) ||
(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
hubp->funcs->mem_program_viewport(
@@ -1754,10 +1772,9 @@ static void dcn20_update_dchubp_dpp(
&pipe_ctx->plane_res.scl_data.viewport_c);
viewport_changed = true;
}
- if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate)
- hubp->funcs->hubp_program_mcache_id_and_split_coordinate(
- hubp,
- &pipe_ctx->mcache_regs);
+
+ if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate)
+ hubp->funcs->hubp_program_mcache_id_and_split_coordinate(hubp, &pipe_ctx->mcache_regs);
/* Any updates are handled in dc interface, just need to apply existing for plane enable */
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
@@ -1836,7 +1853,7 @@ static void dcn20_update_dchubp_dpp(
hubp->funcs->phantom_hubp_post_enable(hubp);
}
-static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
+static int dcn20_calculate_vready_offset_for_group(struct pipe_ctx *pipe)
{
struct pipe_ctx *other_pipe;
int vready_offset = pipe->pipe_dlg_param.vready_offset;
@@ -1862,6 +1879,30 @@ static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
return vready_offset;
}
+static void dcn20_program_tg(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dce_hwseq *hws)
+{
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ dcn20_calculate_vready_offset_for_group(pipe_ctx),
+ pipe_ctx->pipe_dlg_param.vstartup_start,
+ pipe_ctx->pipe_dlg_param.vupdate_offset,
+ pipe_ctx->pipe_dlg_param.vupdate_width,
+ pipe_ctx->pipe_dlg_param.pstate_keepout);
+
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
+
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+}
+
static void dcn20_program_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -1872,33 +1913,17 @@ static void dcn20_program_pipe(
/* Only need to unblank on top pipe */
if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->update_flags.bits.odm ||
- pipe_ctx->stream->update_flags.bits.abm_level)
+ pipe_ctx->update_flags.bits.odm ||
+ pipe_ctx->stream->update_flags.bits.abm_level)
hws->funcs.blank_pixel_data(dc, pipe_ctx,
- !pipe_ctx->plane_state ||
- !pipe_ctx->plane_state->visible);
+ !pipe_ctx->plane_state ||
+ !pipe_ctx->plane_state->visible);
}
/* Only update TG on top pipe */
if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
- && !pipe_ctx->prev_odm_pipe) {
- pipe_ctx->stream_res.tg->funcs->program_global_sync(
- pipe_ctx->stream_res.tg,
- calculate_vready_offset_for_group(pipe_ctx),
- pipe_ctx->pipe_dlg_param.vstartup_start,
- pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width,
- pipe_ctx->pipe_dlg_param.pstate_keepout);
-
- if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
- pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
-
- pipe_ctx->stream_res.tg->funcs->set_vtg_params(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
-
- if (hws->funcs.setup_vupdate_interrupt)
- hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
- }
+ && !pipe_ctx->prev_odm_pipe)
+ dcn20_program_tg(dc, pipe_ctx, context, hws);
if (pipe_ctx->update_flags.bits.odm)
hws->funcs.update_odm(dc, context, pipe_ctx);
@@ -1923,28 +1948,28 @@ static void dcn20_program_pipe(
dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
}
- if (pipe_ctx->update_flags.raw ||
- (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) ||
- pipe_ctx->stream->update_flags.raw)
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
+ pipe_ctx->plane_state->update_flags.raw ||
+ pipe_ctx->stream->update_flags.raw))
dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->plane_state->update_flags.bits.hdr_mult))
+ pipe_ctx->plane_state->update_flags.bits.hdr_mult))
hws->funcs.set_hdr_multiplier(pipe_ctx);
if (hws->funcs.populate_mcm_luts) {
if (pipe_ctx->plane_state) {
hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
- pipe_ctx->plane_state->lut_bank_a);
+ pipe_ctx->plane_state->lut_bank_a);
pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
}
}
if (pipe_ctx->plane_state &&
- (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
- pipe_ctx->plane_state->update_flags.bits.gamma_change ||
- pipe_ctx->plane_state->update_flags.bits.lut_3d ||
- pipe_ctx->update_flags.bits.enable))
+ (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change ||
+ pipe_ctx->plane_state->update_flags.bits.lut_3d ||
+ pipe_ctx->update_flags.bits.enable))
hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
/* dcn10_translate_regamma_to_hw_format takes 750us to finish
@@ -1952,10 +1977,10 @@ static void dcn20_program_pipe(
* updating on slave planes
*/
if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->update_flags.bits.plane_changed ||
- pipe_ctx->stream->update_flags.bits.out_tf ||
- (pipe_ctx->plane_state &&
- pipe_ctx->plane_state->update_flags.bits.output_tf_change))
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf ||
+ (pipe_ctx->plane_state &&
+ pipe_ctx->plane_state->update_flags.bits.output_tf_change))
hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
/* If the pipe has been enabled or has a different opp, we
@@ -1964,7 +1989,7 @@ static void dcn20_program_pipe(
* causes a different pipe to be chosen to odm combine with.
*/
if (pipe_ctx->update_flags.bits.enable
- || pipe_ctx->update_flags.bits.opp_changed) {
+ || pipe_ctx->update_flags.bits.opp_changed) {
pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
pipe_ctx->stream_res.opp,
@@ -1994,14 +2019,14 @@ static void dcn20_program_pipe(
memset(&params, 0, sizeof(params));
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
dc->hwss.set_disp_pattern_generator(dc,
- pipe_ctx,
- pipe_ctx->stream_res.test_pattern_params.test_pattern,
- pipe_ctx->stream_res.test_pattern_params.color_space,
- pipe_ctx->stream_res.test_pattern_params.color_depth,
- NULL,
- pipe_ctx->stream_res.test_pattern_params.width,
- pipe_ctx->stream_res.test_pattern_params.height,
- pipe_ctx->stream_res.test_pattern_params.offset);
+ pipe_ctx,
+ pipe_ctx->stream_res.test_pattern_params.test_pattern,
+ pipe_ctx->stream_res.test_pattern_params.color_space,
+ pipe_ctx->stream_res.test_pattern_params.color_depth,
+ NULL,
+ pipe_ctx->stream_res.test_pattern_params.width,
+ pipe_ctx->stream_res.test_pattern_params.height,
+ pipe_ctx->stream_res.test_pattern_params.offset);
}
}
@@ -2010,11 +2035,12 @@ void dcn20_program_front_end_for_ctx(
struct dc_state *context)
{
int i;
- struct dce_hwseq *hws = dc->hwseq;
- DC_LOGGER_INIT(dc->ctx->logger);
unsigned int prev_hubp_count = 0;
unsigned int hubp_count = 0;
- struct pipe_ctx *pipe;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct pipe_ctx *pipe = NULL;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
if (resource_is_pipe_topology_changed(dc->current_state, context))
resource_log_pipe_topology_update(dc, context);
@@ -2027,7 +2053,7 @@ void dcn20_program_front_end_for_ctx(
ASSERT(!pipe->plane_state->triplebuffer_flips);
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
- dc, pipe, pipe->plane_state->triplebuffer_flips);
+ dc, pipe, pipe->plane_state->triplebuffer_flips);
}
}
}
@@ -2042,14 +2068,14 @@ void dcn20_program_front_end_for_ctx(
if (prev_hubp_count == 0 && hubp_count > 0) {
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
- dc->res_pool->hubbub, true, false);
+ dc->res_pool->hubbub, true, false);
udelay(500);
}
/* Set pipe update flags and lock pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
dcn20_detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i],
- &context->res_ctx.pipe_ctx[i]);
+ &context->res_ctx.pipe_ctx[i]);
/* When disabling phantom pipes, turn on phantom OTG first (so we can get double
* buffer updates properly)
@@ -2057,22 +2083,16 @@ void dcn20_program_front_end_for_ctx(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
- dc_state_get_pipe_subvp_type(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) {
+ dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
if (tg->funcs->enable_crtc) {
- if (dc->hwss.blank_phantom) {
- int main_pipe_width = 0, main_pipe_height = 0;
- struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(dc->current_state, dc->current_state->res_ctx.pipe_ctx[i].stream);
+ if (dc->hwseq->funcs.blank_pixel_data)
+ dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
- if (phantom_stream) {
- main_pipe_width = phantom_stream->dst.width;
- main_pipe_height = phantom_stream->dst.height;
- }
-
- dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
- }
tg->funcs->enable_crtc(tg);
}
}
@@ -2080,15 +2100,15 @@ void dcn20_program_front_end_for_ctx(
/* OTG blank before disabling all front ends */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
- && !context->res_ctx.pipe_ctx[i].top_pipe
- && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
- && context->res_ctx.pipe_ctx[i].stream)
+ && !context->res_ctx.pipe_ctx[i].top_pipe
+ && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
+ && context->res_ctx.pipe_ctx[i].stream)
hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
- || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
+ || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
struct hubbub *hubbub = dc->res_pool->hubbub;
/* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom
@@ -2098,13 +2118,18 @@ void dcn20_program_front_end_for_ctx(
* DET allocation.
*/
if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
- (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM))) {
+ (context->res_ctx.pipe_ctx[i].plane_state &&
+ dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i])
+ == SUBVP_PHANTOM))) {
if (hubbub->funcs->program_det_size)
- hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ hubbub->funcs->program_det_size(hubbub,
+ dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
if (dc->res_pool->hubbub->funcs->program_det_segments)
- dc->res_pool->hubbub->funcs->program_det_segments(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ dc->res_pool->hubbub->funcs->program_det_segments(
+ hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
}
- hws->funcs.plane_atomic_disconnect(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
+ hws->funcs.plane_atomic_disconnect(dc, dc->current_state,
+ &dc->current_state->res_ctx.pipe_ctx[i]);
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
@@ -2112,9 +2137,9 @@ void dcn20_program_front_end_for_ctx(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
if (resource_is_pipe_type(pipe, OTG_MASTER) &&
- !resource_is_pipe_type(pipe, DPP_PIPE) &&
- pipe->update_flags.bits.odm &&
- hws->funcs.update_odm)
+ !resource_is_pipe_type(pipe, DPP_PIPE) &&
+ pipe->update_flags.bits.odm &&
+ hws->funcs.update_odm)
hws->funcs.update_odm(dc, context, pipe);
}
@@ -2132,25 +2157,28 @@ void dcn20_program_front_end_for_ctx(
else {
/* Don't program phantom pipes in the regular front end programming sequence.
* There is an MPO transition case where a pipe being used by a video plane is
- * transitioned directly to be a phantom pipe when closing the MPO video. However
- * the phantom pipe will program a new HUBP_VTG_SEL (update takes place right away),
- * but the MPO still exists until the double buffered update of the main pipe so we
- * will get a frame of underflow if the phantom pipe is programmed here.
+ * transitioned directly to be a phantom pipe when closing the MPO video.
+ * However the phantom pipe will program a new HUBP_VTG_SEL (update takes place
+ * right away) but the MPO still exists until the double buffered update of the
+ * main pipe so we will get a frame of underflow if the phantom pipe is
+ * programmed here.
*/
- if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
+ if (pipe->stream &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
dcn20_program_pipe(dc, pipe, context);
}
pipe = pipe->bottom_pipe;
}
}
+
/* Program secondary blending tree and writeback pipes */
pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->top_pipe && !pipe->prev_odm_pipe
- && pipe->stream && pipe->stream->num_wb_info > 0
- && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
- || pipe->stream->update_flags.raw)
- && hws->funcs.program_all_writeback_pipes_in_tree)
+ && pipe->stream && pipe->stream->num_wb_info > 0
+ && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
+ || pipe->stream->update_flags.raw)
+ && hws->funcs.program_all_writeback_pipes_in_tree)
hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
/* Avoid underflow by check of pipe line read when adding 2nd plane. */
@@ -2169,7 +2197,7 @@ void dcn20_program_front_end_for_ctx(
* buffered pending status clear and reset opp head pipe's none double buffered
* registers to their initial state.
*/
-static void post_unlock_reset_opp(struct dc *dc,
+void dcn20_post_unlock_reset_opp(struct dc *dc,
struct pipe_ctx *opp_head)
{
struct display_stream_compressor *dsc = opp_head->stream_res.dsc;
@@ -2206,16 +2234,17 @@ void dcn20_post_unlock_program_front_end(
struct dc *dc,
struct dc_state *context)
{
- int i;
- const unsigned int TIMEOUT_FOR_PIPE_ENABLE_US = 100000;
+ // Timeout for pipe enable
+ unsigned int timeout_us = 100000;
unsigned int polling_interval_us = 1;
struct dce_hwseq *hwseq = dc->hwseq;
+ int i;
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) &&
- !resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
- post_unlock_reset_opp(dc,
- &dc->current_state->res_ctx.pipe_ctx[i]);
+ !resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
+ dcn20_post_unlock_reset_opp(dc,
+ &dc->current_state->res_ctx.pipe_ctx[i]);
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
@@ -2231,11 +2260,12 @@ void dcn20_post_unlock_program_front_end(
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// Don't check flip pending on phantom pipes
if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
- dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
struct hubp *hubp = pipe->plane_res.hubp;
int j = 0;
- for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us
- && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+
+ for (j = 0; j < timeout_us / polling_interval_us
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
udelay(polling_interval_us);
}
}
@@ -2249,15 +2279,14 @@ void dcn20_post_unlock_program_front_end(
* before we've transitioned to 2:1 or 4:1
*/
if (resource_is_pipe_type(old_pipe, OTG_MASTER) && resource_is_pipe_type(pipe, OTG_MASTER) &&
- resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
- dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
+ resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
int j = 0;
struct timing_generator *tg = pipe->stream_res.tg;
-
- if (tg->funcs->get_double_buffer_pending) {
- for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us
- && tg->funcs->get_double_buffer_pending(tg); j++)
+ if (tg->funcs->get_optc_double_buffer_pending) {
+ for (j = 0; j < timeout_us / polling_interval_us
+ && tg->funcs->get_optc_double_buffer_pending(tg); j++)
udelay(polling_interval_us);
}
}
@@ -2265,7 +2294,7 @@ void dcn20_post_unlock_program_front_end(
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
- dc->res_pool->hubbub, false, false);
+ dc->res_pool->hubbub, false, false);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -2296,11 +2325,11 @@ void dcn20_post_unlock_program_front_end(
return;
/* P-State support transitions:
- * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe
- * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally)
- * Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe
- * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe
- * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes
+ * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe
+ * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally)
+ * Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe
+ * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe
+ * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes
*/
if (hwseq->funcs.update_force_pstate)
dc->hwseq->funcs.update_force_pstate(dc, context);
@@ -2315,12 +2344,11 @@ void dcn20_post_unlock_program_front_end(
if (hwseq->wa.DEGVIDCN21)
dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
-
/* WA for stutter underflow during MPO transitions when adding 2nd plane */
if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
if (dc->current_state->stream_status[0].plane_count == 1 &&
- context->stream_status[0].plane_count > 1) {
+ context->stream_status[0].plane_count > 1) {
struct timing_generator *tg = dc->res_pool->timing_generators[0];
@@ -2468,7 +2496,7 @@ bool dcn20_update_bandwidth(
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg,
- calculate_vready_offset_for_group(pipe_ctx),
+ dcn20_calculate_vready_offset_for_group(pipe_ctx),
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width,
@@ -2771,7 +2799,6 @@ void dcn20_reset_back_end_for_pipe(
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
- int i;
struct dc_link *link = pipe_ctx->stream->link;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
@@ -2838,19 +2865,16 @@ void dcn20_reset_back_end_for_pipe(
}
}
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
- break;
-
- if (i == dc->res_pool->pipe_count)
- return;
-
/*
* In case of a dangling plane, setting this to NULL unconditionally
* causes failures during reset hw ctx where, if stream is NULL,
* it is expected that the pipe_ctx pointers to pipes and plane are NULL.
*/
pipe_ctx->stream = NULL;
+ pipe_ctx->top_pipe = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ pipe_ctx->next_odm_pipe = NULL;
+ pipe_ctx->prev_odm_pipe = NULL;
DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
index 5c874f7b0683..9d1ad3b29ca5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
@@ -154,6 +154,21 @@ void dcn20_setup_gsl_group_as_lock(
const struct dc *dc,
struct pipe_ctx *pipe_ctx,
bool enable);
-
+void dcn20_detect_pipe_changes(
+ struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe);
+void dcn20_enable_plane(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+void dcn20_update_dchubp_dpp(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+void dcn20_post_unlock_reset_opp(
+ struct dc *dc,
+ struct pipe_ctx *opp_head);
#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
index 1ea95f8d4cbc..61efb15572ff 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
@@ -137,7 +137,7 @@ void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
pipe_ctx->stream->dpms_off = true;
}
-static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst,
+bool dcn21_dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst,
uint32_t option, uint32_t panel_inst, uint32_t pwrseq_inst)
{
union dmub_rb_cmd cmd;
@@ -199,7 +199,7 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE,
panel_cntl->inst, panel_cntl->pwrseq_inst);
} else {
- dmub_abm_set_pipe(abm,
+ dcn21_dmub_abm_set_pipe(abm,
otg_inst,
SET_ABM_PIPE_IMMEDIATELY_DISABLE,
panel_cntl->inst,
@@ -234,7 +234,7 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
panel_cntl->inst,
panel_cntl->pwrseq_inst);
} else {
- dmub_abm_set_pipe(abm, otg_inst,
+ dcn21_dmub_abm_set_pipe(abm, otg_inst,
SET_ABM_PIPE_NORMAL,
panel_cntl->inst,
panel_cntl->pwrseq_inst);
@@ -242,14 +242,15 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
}
bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp)
+ struct set_backlight_level_params *backlight_level_params)
{
struct dc_context *dc = pipe_ctx->stream->ctx;
struct abm *abm = pipe_ctx->stream_res.abm;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
uint32_t otg_inst;
+ uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16;
+ uint32_t frame_ramp = backlight_level_params->frame_ramp;
if (!abm || !tg || !panel_cntl)
return false;
@@ -257,7 +258,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
otg_inst = tg->inst;
if (dc->dc->res_pool->dmcu) {
- dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp);
+ dce110_set_backlight_level(pipe_ctx, backlight_level_params);
return true;
}
@@ -268,7 +269,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
panel_cntl->inst,
panel_cntl->pwrseq_inst);
} else {
- dmub_abm_set_pipe(abm,
+ dcn21_dmub_abm_set_pipe(abm,
otg_inst,
SET_ABM_PIPE_NORMAL,
panel_cntl->inst,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h
index 9cee9bdb8de9..f72a27ac1bf1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h
@@ -47,11 +47,12 @@ void dcn21_optimize_pwr_state(
void dcn21_PLAT_58856_wa(struct dc_state *context,
struct pipe_ctx *pipe_ctx);
+bool dcn21_dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst,
+ uint32_t option, uint32_t panel_inst, uint32_t pwrseq_inst);
void dcn21_set_pipe(struct pipe_ctx *pipe_ctx);
void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx);
bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp);
+ struct set_backlight_level_params *params);
bool dcn21_is_abm_supported(struct dc *dc,
struct dc_state *context, struct dc_stream_state *stream);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index bded33575493..e89ebfda4873 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -245,6 +245,7 @@ static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx,
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
bool result = false;
int acquired_rmu = 0;
@@ -283,8 +284,14 @@ static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx,
result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d,
stream->lut3d_func->state.bits.rmu_mux_num);
+ if (!result)
+ DC_LOG_ERROR("%s: program_3dlut failed\n", __func__);
+
result = mpc->funcs->program_shaper(mpc, shaper_lut,
stream->lut3d_func->state.bits.rmu_mux_num);
+ if (!result)
+ DC_LOG_ERROR("%s: program_shaper failed\n", __func__);
+
} else {
// loop through the available mux and release the requested mpcc_id
mpc->funcs->release_rmu(mpc, mpcc_id);
@@ -486,7 +493,6 @@ bool dcn30_mmhubbub_warmup(
}
/*following is the original: warmup each DWB's mcif buffer*/
for (i = 0; i < num_dwb; i++) {
- dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst];
mcif_wb = dc->res_pool->mcif_wb[wb_info[i].dwb_pipe_inst];
/*warmup is for VM mode only*/
if (wb_info[i].mcif_buf_params.p_vmid == 0)
@@ -1185,3 +1191,30 @@ void dcn30_prepare_bandwidth(struct dc *dc,
if (!dc->clk_mgr->clks.fw_based_mclk_switching)
dc_dmub_srv_p_state_delegate(dc, false, context);
}
+
+void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx)
+{
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ bool pending_updates = false;
+ unsigned int i;
+
+ if (tg && tg->funcs->is_tg_enabled(tg)) {
+ // Poll for 100ms maximum
+ for (i = 0; i < 100000; i++) {
+ pending_updates = false;
+ if (tg->funcs->get_optc_double_buffer_pending)
+ pending_updates |= tg->funcs->get_optc_double_buffer_pending(tg);
+
+ if (tg->funcs->get_otg_double_buffer_pending)
+ pending_updates |= tg->funcs->get_otg_double_buffer_pending(tg);
+
+ if (tg->funcs->get_pipe_update_pending && pipe_ctx->plane_state)
+ pending_updates |= tg->funcs->get_pipe_update_pending(tg);
+
+ if (!pending_updates)
+ break;
+
+ udelay(1);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
index 6a153e7ce910..4b90b781c4f2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
@@ -96,4 +96,6 @@ void dcn30_set_hubp_blank(const struct dc *dc,
void dcn30_prepare_bandwidth(struct dc *dc,
struct dc_state *context);
+void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index 2a8dc40d2847..c32764aef884 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
@@ -86,7 +86,6 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
@@ -108,7 +107,8 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
- .is_abm_supported = dcn21_is_abm_supported
+ .is_abm_supported = dcn21_is_abm_supported,
+ .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
};
static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
index 93e49d87a67c..dcb27cdbce73 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
@@ -86,7 +86,6 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
@@ -107,6 +106,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.optimize_pwr_state = dcn21_optimize_pwr_state,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
+ .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
};
static const struct hwseq_private_funcs dcn301_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h
index 0bca48ccbfa2..a6e0115a53ee 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h
@@ -23,8 +23,8 @@
*
*/
-#ifndef __DC_DCN30_INIT_H__
-#define __DC_DCN30_INIT_H__
+#ifndef __DC_DCN301_INIT_H__
+#define __DC_DCN301_INIT_H__
struct dc;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 3d4b31bd9946..03ba01f4ace1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -47,9 +47,11 @@
#include "dce/dmub_outbox.h"
#include "link.h"
#include "dcn10/dcn10_hwseq.h"
+#include "dcn21/dcn21_hwseq.h"
#include "inc/link_enc_cfg.h"
#include "dcn30/dcn30_vpg.h"
#include "dce/dce_i2c_hw.h"
+#include "dce/dmub_abm_lcd.h"
#define DC_LOGGER_INIT(logger)
@@ -517,10 +519,18 @@ static void dcn31_reset_back_end_for_pipe(
dc->hwss.set_abm_immediate_disable(pipe_ctx);
+ link = pipe_ctx->stream->link;
+
+ if ((!pipe_ctx->stream->dpms_off || link->link_status.link_active) &&
+ (link->connector_signal == SIGNAL_TYPE_EDP))
+ dc->hwss.blank_stream(pipe_ctx);
+
pipe_ctx->stream_res.tg->funcs->set_dsc_config(
pipe_ctx->stream_res.tg,
OPTC_DSC_DISABLED, 0, 0);
+
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
+
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
@@ -532,7 +542,6 @@ static void dcn31_reset_back_end_for_pipe(
pipe_ctx->stream_res.tg->funcs->set_drr(
pipe_ctx->stream_res.tg, NULL);
- link = pipe_ctx->stream->link;
/* DPMS may already disable or */
/* dpms_off status is incorrect due to fastboot
* feature. When system resume from S4 with second
@@ -633,3 +642,51 @@ void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx,
pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg,
triggers, params->num_frames);
}
+
+static void dmub_abm_set_backlight(struct dc_context *dc,
+ struct set_backlight_level_params *backlight_level_params, uint32_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
+ cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = backlight_level_params->frame_ramp;
+ cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_level_params->backlight_pwm_u16_16;
+ cmd.abm_set_backlight.abm_set_backlight_data.backlight_control_type =
+ (enum dmub_backlight_control_type) backlight_level_params->control_type;
+ cmd.abm_set_backlight.abm_set_backlight_data.min_luminance = backlight_level_params->min_luminance;
+ cmd.abm_set_backlight.abm_set_backlight_data.max_luminance = backlight_level_params->max_luminance;
+ cmd.abm_set_backlight.abm_set_backlight_data.min_backlight_pwm = backlight_level_params->min_backlight_pwm;
+ cmd.abm_set_backlight.abm_set_backlight_data.max_backlight_pwm = backlight_level_params->max_backlight_pwm;
+ cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
+ cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst);
+ cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
+
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+bool dcn31_set_backlight_level(struct pipe_ctx *pipe_ctx,
+ struct set_backlight_level_params *backlight_level_params)
+{
+ struct dc_context *dc = pipe_ctx->stream->ctx;
+ struct abm *abm = pipe_ctx->stream_res.abm;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+ uint32_t otg_inst;
+
+ if (!abm || !tg || !panel_cntl)
+ return false;
+
+ otg_inst = tg->inst;
+
+ dcn21_dmub_abm_set_pipe(abm,
+ otg_inst,
+ SET_ABM_PIPE_NORMAL,
+ panel_cntl->inst,
+ panel_cntl->pwrseq_inst);
+
+ dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h
index b8bc939da155..0d09aa8cfb65 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h
@@ -51,6 +51,8 @@ int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_
void dcn31_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context);
+bool dcn31_set_backlight_level(struct pipe_ctx *pipe_ctx,
+ struct set_backlight_level_params *params);
bool dcn31_is_abm_supported(struct dc *dc,
struct dc_state *context, struct dc_stream_state *stream);
void dcn31_init_pipes(struct dc *dc, struct dc_state *context);
@@ -59,5 +61,4 @@ void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable);
void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx,
int num_pipes, const struct dc_static_screen_params *params);
-
#endif /* __DC_HWSS_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
index 56f3c70d4b55..fb2ffb637931 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
@@ -89,7 +89,6 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index 4e93eeedfc1b..be26c925fdfa 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -162,6 +162,8 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx
int opp_inst[MAX_PIPES] = {0};
int odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false);
int last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true);
+ struct mpc *mpc = dc->res_pool->mpc;
+ int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -174,6 +176,16 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ if (mpc->funcs->set_out_rate_control) {
+ for (i = 0; i < opp_cnt; ++i) {
+ mpc->funcs->set_out_rate_control(
+ mpc, opp_inst[i],
+ false,
+ 0,
+ NULL);
+ }
+ }
+
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
@@ -355,6 +367,20 @@ void dcn314_calculate_pix_rate_divider(
}
}
+static bool dcn314_is_pipe_dig_fifo_on(struct pipe_ctx *pipe)
+{
+ return pipe && pipe->stream
+ // Check dig's otg instance.
+ && pipe->stream_res.stream_enc
+ && pipe->stream_res.stream_enc->funcs->dig_source_otg
+ && pipe->stream_res.tg->inst == pipe->stream_res.stream_enc->funcs->dig_source_otg(pipe->stream_res.stream_enc)
+ && pipe->stream->link && pipe->stream->link->link_enc
+ && pipe->stream->link->link_enc->funcs->is_dig_enabled
+ && pipe->stream->link->link_enc->funcs->is_dig_enabled(pipe->stream->link->link_enc)
+ && pipe->stream_res.stream_enc->funcs->is_fifo_enabled
+ && pipe->stream_res.stream_enc->funcs->is_fifo_enabled(pipe->stream_res.stream_enc);
+}
+
void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context, unsigned int current_pipe_idx)
{
unsigned int i;
@@ -371,7 +397,11 @@ void dcn314_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal)) &&
+ !pipe->stream->apply_seamless_boot_optimization &&
+ !pipe->stream->apply_edp_fast_boot_optimization) {
+ if (dcn314_is_pipe_dig_fifo_on(pipe))
+ continue;
pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
otg_disabled[i] = true;
@@ -478,7 +508,7 @@ void dcn314_disable_link_output(struct dc_link *link,
* from enable/disable link output and only call edp panel control
* in enable_link_dp and disable_link_dp once.
*/
- if (dmcu != NULL && dmcu->funcs->lock_phy)
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index 68e6de6b5758..21ef03a76229 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
@@ -91,7 +91,6 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index 2e8c9f738259..ee4de9ddfef4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -439,6 +439,7 @@ bool dcn32_set_mpc_shaper_3dlut(
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
bool result = false;
@@ -458,13 +459,13 @@ bool dcn32_set_mpc_shaper_3dlut(
if (stream->lut3d_func &&
stream->lut3d_func->state.bits.initialized == 1) {
- result = mpc->funcs->program_3dlut(mpc,
- &stream->lut3d_func->lut_3d,
- mpcc_id);
+ result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d, mpcc_id);
+ if (!result)
+ DC_LOG_ERROR("%s: program_3dlut failed\n", __func__);
- result = mpc->funcs->program_shaper(mpc,
- shaper_lut,
- mpcc_id);
+ result = mpc->funcs->program_shaper(mpc, shaper_lut, mpcc_id);
+ if (!result)
+ DC_LOG_ERROR("%s: program_shaper failed\n", __func__);
}
return result;
@@ -984,6 +985,7 @@ void dcn32_init_hw(struct dc *dc)
dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support;
dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
+ dc->caps.dmub_caps.aux_backlight_support = dc->ctx->dmub_srv->dmub->feature_caps.abm_aux_backlight_support;
/* for DCN401 testing only */
dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
@@ -1048,7 +1050,8 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
}
/* Enable DSC hw block */
- dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
+ dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow +
+ stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
@@ -1396,12 +1399,12 @@ void dcn32_disable_link_output(struct dc_link *link,
link_hwss->disable_link_output(link, link_res, signal);
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
-
- if (signal == SIGNAL_TYPE_EDP &&
- link->dc->hwss.edp_backlight_control &&
- !link->skip_implict_edp_power_control)
- link->dc->hwss.edp_power_control(link, false);
- else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ /*
+ * Add the logic to extract BOTH power up and power down sequences
+ * from enable/disable link output and only call edp panel control
+ * in enable_link_dp and disable_link_dp once.
+ */
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
@@ -1698,52 +1701,6 @@ void dcn32_init_blank(
hws->funcs.wait_for_blank_complete(opp);
}
-void dcn32_blank_phantom(struct dc *dc,
- struct timing_generator *tg,
- int width,
- int height)
-{
- struct dce_hwseq *hws = dc->hwseq;
- enum dc_color_space color_space;
- struct tg_color black_color = {0};
- struct output_pixel_processor *opp = NULL;
- uint32_t num_opps, opp_id_src0, opp_id_src1;
- uint32_t otg_active_width, otg_active_height;
- uint32_t i;
-
- /* program opp dpg blank color */
- color_space = COLOR_SPACE_SRGB;
- color_space_to_black_color(dc, color_space, &black_color);
-
- otg_active_width = width;
- otg_active_height = height;
-
- /* get the OPTC source */
- tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
- ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
-
- for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
- if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
- opp = dc->res_pool->opps[i];
- break;
- }
- }
-
- if (opp && opp->funcs->opp_set_disp_pattern_generator)
- opp->funcs->opp_set_disp_pattern_generator(
- opp,
- CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
- CONTROLLER_DP_COLOR_SPACE_UDEFINED,
- COLOR_DEPTH_UNDEFINED,
- &black_color,
- otg_active_width,
- otg_active_height,
- 0);
-
- if (tg->funcs->is_tg_enabled(tg))
- hws->funcs.wait_for_blank_complete(opp);
-}
-
/* phantom stream id's can change often, but can be identical between contexts.
* This function checks for the condition the streams are identical to avoid
* redundant pipe transitions.
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
index cac4a08b92a4..0303a5953673 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
@@ -119,11 +119,6 @@ void dcn32_init_blank(
struct dc *dc,
struct timing_generator *tg);
-void dcn32_blank_phantom(struct dc *dc,
- struct timing_generator *tg,
- int width,
- int height);
-
bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
const struct dc_state *cur_ctx,
const struct dc_state *new_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index 3422b564ae98..e4d149eff10f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -87,7 +87,6 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
@@ -98,7 +97,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.calc_vupdate_position = dcn10_calc_vupdate_position,
.apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations,
.does_plane_fit_in_mall = NULL,
- .set_backlight_level = dcn21_set_backlight_level,
+ .set_backlight_level = dcn31_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.hardware_release = dcn30_hardware_release,
.set_pipe = dcn21_set_pipe,
@@ -117,10 +116,10 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.update_phantom_vp_position = dcn32_update_phantom_vp_position,
.update_dsc_pg = dcn32_update_dsc_pg,
.apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,
- .blank_phantom = dcn32_blank_phantom,
.is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
.program_outstanding_updates = dcn32_program_outstanding_updates,
+ .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
};
static const struct hwseq_private_funcs dcn32_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index bd309dbdf7b2..b907ad1acedd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -236,7 +236,8 @@ void dcn35_init_hw(struct dc *dc)
}
hws->funcs.init_pipes(dc, dc->current_state);
- if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
+ if (dc->res_pool->hubbub->funcs->allow_self_refresh_control &&
+ !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
@@ -309,6 +310,7 @@ void dcn35_init_hw(struct dc *dc)
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
+ dc->caps.dmub_caps.aux_backlight_support = dc->ctx->dmub_srv->dmub->feature_caps.abm_aux_backlight_support;
}
if (dc->res_pool->pg_cntl) {
@@ -425,6 +427,8 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
int opp_inst[MAX_PIPES] = {0};
int odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false);
int last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true);
+ struct mpc *mpc = dc->res_pool->mpc;
+ int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -437,6 +441,16 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ if (mpc->funcs->set_out_rate_control) {
+ for (i = 0; i < opp_cnt; ++i) {
+ mpc->funcs->set_out_rate_control(
+ mpc, opp_inst[i],
+ false,
+ 0,
+ NULL);
+ }
+ }
+
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
@@ -787,6 +801,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
/* Disable on the current state so the new one isn't cleared. */
pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream_res.tg = tg;
@@ -841,6 +856,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
uint32_t num_opps = 0;
uint32_t opp_id_src0 = OPP_ID_INVALID;
uint32_t opp_id_src1 = OPP_ID_INVALID;
+ uint32_t optc_dsc_state = 0;
// Step 1: To find out which OPTC is running & OPTC DSC is ON
// We can't use res_pool->res_cap->num_timing_generator to check
@@ -849,7 +865,6 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
// Some ASICs would be fused display pipes less than the default setting.
// In dcnxx_resource_construct function, driver would obatin real information.
for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
- uint32_t optc_dsc_state = 0;
struct timing_generator *tg = dc->res_pool->timing_generators[i];
if (tg->funcs->is_tg_enabled(tg)) {
@@ -864,15 +879,18 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
}
}
- // Step 2: To power down DSC but skip DSC of running OPTC
+ // Step 2: To power down DSC but skip DSC of running OPTC
for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
struct dcn_dsc_state s = {0};
- dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
+ /* avoid reading DSC state when it is not in use as it may be power gated */
+ if (optc_dsc_state) {
+ dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
- if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
- s.dsc_clock_en && s.dsc_fw_en)
- continue;
+ if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
+ s.dsc_clock_en && s.dsc_fw_en)
+ continue;
+ }
pg_cntl->funcs->dsc_pg_control(pg_cntl, dc->res_pool->dscs[i]->inst, false);
}
@@ -940,6 +958,7 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
/*to do, need to support both case*/
hubp->power_gated = true;
+ hubp->funcs->hubp_reset(hubp);
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream = NULL;
@@ -1016,8 +1035,13 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false;
- if (pipe_ctx->stream_res.dsc)
+ if (pipe_ctx->stream_res.dsc) {
update_state->pg_pipe_res_update[PG_DSC][pipe_ctx->stream_res.dsc->inst] = false;
+ if (dc->caps.sequential_ono) {
+ update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false;
+ update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false;
+ }
+ }
if (pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false;
@@ -1575,3 +1599,37 @@ bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
return false;
}
+
+/*
+ * Set powerup to true for every pipe to match pre-OS configuration.
+ */
+static void dcn35_calc_blocks_to_ungate_for_hw_release(struct dc *dc, struct pg_block_update *update_state)
+{
+ int i = 0, j = 0;
+
+ memset(update_state, 0, sizeof(struct pg_block_update));
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++)
+ update_state->pg_pipe_res_update[j][i] = true;
+
+ update_state->pg_res_update[PG_HPO] = true;
+ update_state->pg_res_update[PG_DWB] = true;
+}
+
+/*
+ * The purpose is to power up all gatings to restore optimization to pre-OS env.
+ * Re-use hwss func and existing PG&RCG flags to decide powerup sequence.
+ */
+void dcn35_hardware_release(struct dc *dc)
+{
+ struct pg_block_update pg_update_state;
+
+ dcn35_calc_blocks_to_ungate_for_hw_release(dc, &pg_update_state);
+
+ if (dc->hwss.root_clock_control)
+ dc->hwss.root_clock_control(dc, &pg_update_state, true);
+ /*power up required HW block*/
+ if (dc->hwss.hw_block_power_up)
+ dc->hwss.hw_block_power_up(dc, &pg_update_state);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index e27b3609020f..0b1d6f608edd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -99,4 +99,6 @@ void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
+void dcn35_hardware_release(struct dc *dc);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index 2bbf1fef94fd..c7acaf97974c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -92,7 +92,6 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
@@ -101,7 +100,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .set_backlight_level = dcn21_set_backlight_level,
+ .set_backlight_level = dcn31_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
.enable_lvds_link_output = dce110_enable_lvds_link_output,
@@ -123,7 +122,11 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.root_clock_control = dcn35_root_clock_control,
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
- .program_outstanding_updates = dcn32_program_outstanding_updates,
+ .hardware_release = dcn35_hardware_release,
+ .detect_pipe_changes = dcn20_detect_pipe_changes,
+ .enable_plane = dcn20_enable_plane,
+ .update_dchubp_dpp = dcn20_update_dchubp_dpp,
+ .post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
};
static const struct hwseq_private_funcs dcn35_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index d00822e8daa5..4f73e7f551ac 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -91,7 +91,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
@@ -100,7 +99,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
- .set_backlight_level = dcn21_set_backlight_level,
+ .set_backlight_level = dcn31_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
.enable_lvds_link_output = dce110_enable_lvds_link_output,
@@ -122,7 +121,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.root_clock_control = dcn35_root_clock_control,
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
- .program_outstanding_updates = dcn32_program_outstanding_updates,
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
};
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 0b743669f23b..555a9f590cd7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -3,6 +3,7 @@
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dm_services.h"
+#include "basics/dc_common.h"
#include "dm_helpers.h"
#include "core_types.h"
#include "resource.h"
@@ -126,91 +127,6 @@ void dcn401_program_gamut_remap(struct pipe_ctx *pipe_ctx)
mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
}
-struct ips_ono_region_state dcn401_read_ono_state(struct dc *dc, uint8_t region)
-{
- struct dce_hwseq *hws = dc->hwseq;
- struct ips_ono_region_state state = {0, 0};
-
- switch (region) {
- case 0:
- /* dccg, dio, dcio */
- REG_GET_2(DOMAIN22_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 1:
- /* dchubbub, dchvm, dchubbubmem */
- REG_GET_2(DOMAIN23_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 2:
- /* mpc, opp, optc, dwb */
- REG_GET_2(DOMAIN24_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 3:
- /* hpo */
- REG_GET_2(DOMAIN25_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 4:
- /* dchubp0, dpp0 */
- REG_GET_2(DOMAIN0_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 5:
- /* dsc0 */
- REG_GET_2(DOMAIN16_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 6:
- /* dchubp1, dpp1 */
- REG_GET_2(DOMAIN1_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 7:
- /* dsc1 */
- REG_GET_2(DOMAIN17_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 8:
- /* dchubp2, dpp2 */
- REG_GET_2(DOMAIN2_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 9:
- /* dsc2 */
- REG_GET_2(DOMAIN18_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 10:
- /* dchubp3, dpp3 */
- REG_GET_2(DOMAIN3_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- case 11:
- /* dsc3 */
- REG_GET_2(DOMAIN19_PG_STATUS,
- DOMAIN_DESIRED_PWR_STATE, &state.desire_pwr_state,
- DOMAIN_PGFSM_PWR_STATUS, &state.current_pwr_state);
- break;
- default:
- break;
- }
-
- return state;
-}
-
void dcn401_init_hw(struct dc *dc)
{
struct abm **abms = dc->res_pool->multiple_abms;
@@ -435,7 +351,8 @@ void dcn401_init_hw(struct dc *dc)
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0;
dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
- dc->debug.fams2_config.bits.enable &= dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver == 2;
+ dc->debug.fams2_config.bits.enable &=
+ dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver; // sw & fw fams versions must match for support
if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box)
|| res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) {
/* update bounding box if FAMS2 disabled, or if dchub clk has changed */
@@ -506,7 +423,7 @@ void dcn401_populate_mcm_luts(struct dc *dc,
dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
/* 1D LUT */
- if (mcm_luts.lut1d_func && lut3d_xable != MCM_LUT_DISABLE) {
+ if (mcm_luts.lut1d_func) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL)
m_lut_params.pwl = &mcm_luts.lut1d_func->pwl;
@@ -521,7 +438,7 @@ void dcn401_populate_mcm_luts(struct dc *dc,
mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, m_lut_params, lut_bank_a, mpcc_id);
}
if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable, lut_bank_a, mpcc_id);
+ mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable && m_lut_params.pwl, lut_bank_a, mpcc_id);
}
/* Shaper */
@@ -669,11 +586,17 @@ bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
- struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
+ struct dc *dc = pipe_ctx->stream_res.opp->ctx->dc;
+ struct mpc *mpc = dc->res_pool->mpc;
bool result;
const struct pwl_params *lut_params = NULL;
bool rval;
+ if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
+ dcn401_populate_mcm_luts(dc, pipe_ctx, plane_state->mcm_luts, plane_state->lut_bank_a);
+ return true;
+ }
+
mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
// 1D LUT
@@ -814,7 +737,8 @@ enum dc_status dcn401_enable_stream_timing(
int opp_cnt = 1;
int opp_inst[MAX_PIPES] = {0};
struct pipe_ctx *opp_heads[MAX_PIPES] = {0};
- bool manual_mode;
+ struct dc_crtc_timing patched_crtc_timing = stream->timing;
+ bool manual_mode = false;
unsigned int tmds_div = PIXEL_RATE_DIV_NA;
unsigned int unused_div = PIXEL_RATE_DIV_NA;
int odm_slice_width;
@@ -844,6 +768,13 @@ enum dc_status dcn401_enable_stream_timing(
odm_slice_width, last_odm_slice_width);
}
+ /* set DTBCLK_P */
+ if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) {
+ if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
+ dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, DPREFCLK, pipe_ctx->stream_res.tg->inst);
+ }
+ }
+
/* HW program guide assume display already disable
* by unplug sequence. OTG assume stop.
*/
@@ -861,16 +792,20 @@ enum dc_status dcn401_enable_stream_timing(
if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
+ /* if we are borrowing from hblank, h_addressable needs to be adjusted */
+ if (dc->debug.enable_hblank_borrow)
+ patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow;
+
pipe_ctx->stream_res.tg->funcs->program_timing(
- pipe_ctx->stream_res.tg,
- &stream->timing,
- pipe_ctx->pipe_dlg_param.vready_offset,
- pipe_ctx->pipe_dlg_param.vstartup_start,
- pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width,
- pipe_ctx->pipe_dlg_param.pstate_keepout,
- pipe_ctx->stream->signal,
- true);
+ pipe_ctx->stream_res.tg,
+ &patched_crtc_timing,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vready_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines,
+ pipe_ctx->stream->signal,
+ true);
for (i = 0; i < opp_cnt; i++) {
opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
@@ -1004,8 +939,6 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
} else {
- /* need to set DTBCLK_P source to DPREFCLK for DP8B10B */
- dccg->funcs->set_dtbclk_p_src(dccg, DPREFCLK, tg->inst);
dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
}
@@ -1063,7 +996,6 @@ static bool dcn401_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
r2 = test_pipe->plane_res.scl_data.recout;
r2_r = r2.x + r2.width;
r2_b = r2.y + r2.height;
- split_pipe = test_pipe;
/**
* There is another half plane on same layer because of
@@ -1097,6 +1029,58 @@ void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct
}
}
+static void disable_link_output_symclk_on_tx_off(struct dc_link *link, enum dp_link_encoding link_encoding)
+{
+ struct dc *dc = link->ctx->dc;
+ struct pipe_ctx *pipe_ctx = NULL;
+ uint8_t i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
+ pipe_ctx->clock_source->funcs->program_pix_clk(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ link_encoding,
+ &pipe_ctx->pll_settings);
+ break;
+ }
+ }
+}
+
+void dcn401_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ struct dc *dc = link->ctx->dc;
+ const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (signal == SIGNAL_TYPE_EDP &&
+ link->dc->hwss.edp_backlight_control &&
+ !link->skip_implict_edp_power_control)
+ link->dc->hwss.edp_backlight_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
+ if (dc_is_tmds_signal(signal) && link->phy_state.symclk_ref_cnts.otg > 0) {
+ disable_link_output_symclk_on_tx_off(link, DP_UNKNOWN_ENCODING);
+ link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
+ } else {
+ link_hwss->disable_link_output(link, link_res, signal);
+ link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
+ }
+
+ if (signal == SIGNAL_TYPE_EDP &&
+ link->dc->hwss.edp_backlight_control &&
+ !link->skip_implict_edp_power_control)
+ link->dc->hwss.edp_power_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+}
+
void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
@@ -1426,6 +1410,10 @@ void dcn401_prepare_bandwidth(struct dc *dc,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
+ /* update timeout thresholds */
+ if (hubbub->funcs->program_arbiter) {
+ dc->wm_optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false);
+ }
/* decrease compbuf size */
if (hubbub->funcs->program_compbuf_segments) {
@@ -1467,6 +1455,10 @@ void dcn401_optimize_bandwidth(
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
+ /* update timeout thresholds */
+ if (hubbub->funcs->program_arbiter) {
+ hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, true);
+ }
if (dc->clk_mgr->dc_mode_softmax_enabled)
if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
@@ -1669,7 +1661,7 @@ void dcn401_hardware_release(struct dc *dc)
}
}
-void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master)
+void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master)
{
struct pipe_ctx *opp_heads[MAX_PIPES];
struct pipe_ctx *dpp_pipes[MAX_PIPES];
@@ -1695,6 +1687,9 @@ void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context,
hubbub->funcs->wait_for_det_update)
hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst);
}
+ } else {
+ if (hubbub && opp_heads[slice_idx]->plane_res.hubp && hubbub->funcs->wait_for_det_update)
+ hubbub->funcs->wait_for_det_update(hubbub, opp_heads[slice_idx]->plane_res.hubp->inst);
}
}
}
@@ -1705,7 +1700,6 @@ void dcn401_interdependent_update_lock(struct dc *dc,
unsigned int i = 0;
struct pipe_ctx *pipe = NULL;
struct timing_generator *tg = NULL;
- bool pipe_unlocked[MAX_PIPES] = {0};
if (lock) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1719,48 +1713,91 @@ void dcn401_interdependent_update_lock(struct dc *dc,
dc->hwss.pipe_control_lock(dc, pipe, true);
}
} else {
- /* Unlock pipes based on the change in DET allocation instead of pipe index
- * Prevents over allocation of DET during unlock process
- * e.g. 2 pipe config with different streams with a max of 20 DET segments
- * Before: After:
- * - Pipe0: 10 DET segments - Pipe0: 12 DET segments
- * - Pipe1: 10 DET segments - Pipe1: 8 DET segments
- * If Pipe0 gets updated first, 22 DET segments will be allocated
- */
+ /* Need to free DET being used first and have pipe update, then unlock the remaining pipes*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
tg = pipe->stream_res.tg;
- int current_pipe_idx = i;
if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
!tg->funcs->is_tg_enabled(tg) ||
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
- pipe_unlocked[i] = true;
continue;
}
- // If the same stream exists in old context, ensure the OTG_MASTER pipes for the same stream get compared
- struct pipe_ctx *old_otg_master = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, pipe->stream);
-
- if (old_otg_master)
- current_pipe_idx = old_otg_master->pipe_idx;
- if (resource_calculate_det_for_stream(context, pipe) <
- resource_calculate_det_for_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[current_pipe_idx])) {
+ if (dc->scratch.pipes_to_unlock_first[i]) {
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
dc->hwss.pipe_control_lock(dc, pipe, false);
- pipe_unlocked[i] = true;
- dcn401_wait_for_det_buffer_update(dc, context, pipe);
+ /* Assumes pipe of the same index in current_state is also an OTG_MASTER pipe*/
+ dcn401_wait_for_det_buffer_update_under_otg_master(dc, dc->current_state, old_pipe);
}
}
+ /* Unlocking the rest of the pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (pipe_unlocked[i])
+ if (dc->scratch.pipes_to_unlock_first[i])
continue;
+
pipe = &context->res_ctx.pipe_ctx[i];
+ tg = pipe->stream_res.tg;
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !tg->funcs->is_tg_enabled(tg) ||
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ continue;
+ }
+
dc->hwss.pipe_control_lock(dc, pipe, false);
}
}
}
+void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx)
+{
+ /* If 3DLUT FL is enabled and 3DLUT is in use, follow the workaround sequence for pipe unlock to make sure that
+ * HUBP will properly fetch 3DLUT contents after unlock.
+ *
+ * This is meant to work around a known HW issue where VREADY will cancel the pending 3DLUT_ENABLE signal regardless
+ * of whether OTG lock is currently being held or not.
+ */
+ struct pipe_ctx *wa_pipes[MAX_PIPES] = { NULL };
+ struct pipe_ctx *odm_pipe, *mpc_pipe;
+ int i, wa_pipe_ct = 0;
+
+ for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) {
+ for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) {
+ if (mpc_pipe->plane_state && mpc_pipe->plane_state->mcm_luts.lut3d_data.lut3d_src
+ == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM
+ && mpc_pipe->plane_state->mcm_shaper_3dlut_setting
+ == DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT) {
+ wa_pipes[wa_pipe_ct++] = mpc_pipe;
+ }
+ }
+ }
+
+ if (wa_pipe_ct > 0) {
+ if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
+ pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, true);
+
+ for (i = 0; i < wa_pipe_ct; ++i) {
+ if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
+ wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
+ }
+
+ pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
+ if (pipe_ctx->stream_res.tg->funcs->wait_update_lock_status)
+ pipe_ctx->stream_res.tg->funcs->wait_update_lock_status(pipe_ctx->stream_res.tg, false);
+
+ for (i = 0; i < wa_pipe_ct; ++i) {
+ if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
+ wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
+ }
+
+ if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
+ pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, false);
+ } else {
+ pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
+ }
+}
+
void dcn401_program_outstanding_updates(struct dc *dc,
struct dc_state *context)
{
@@ -1770,3 +1807,852 @@ void dcn401_program_outstanding_updates(struct dc *dc,
if (hubbub->funcs->program_compbuf_segments)
hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
}
+
+void dcn401_reset_back_end_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+ if (pipe_ctx->stream_res.stream_enc == NULL) {
+ pipe_ctx->stream = NULL;
+ return;
+ }
+
+ /* DPMS may already disable or */
+ /* dpms_off status is incorrect due to fastboot
+ * feature. When system resume from S4 with second
+ * screen only, the dpms_off would be true but
+ * VBIOS lit up eDP, so check link status too.
+ */
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
+ dc->link_srv->set_dpms_off(pipe_ctx);
+ else if (pipe_ctx->stream_res.audio)
+ dc->hwss.disable_audio_stream(pipe_ctx);
+
+ /* free acquired resources */
+ if (pipe_ctx->stream_res.audio) {
+ /*disable az_endpoint*/
+ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+
+ /*free audio*/
+ if (dc->caps.dynamic_audio == true) {
+ /*we have to dynamic arbitrate the audio endpoints*/
+ /*we free the resource, need reset is_audio_acquired*/
+ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
+ pipe_ctx->stream_res.audio, false);
+ pipe_ctx->stream_res.audio = NULL;
+ }
+ }
+
+ /* by upper caller loop, parent pipe: pipe0, will be reset last.
+ * back end share by all pipes and will be disable only when disable
+ * parent pipe.
+ */
+ if (pipe_ctx->top_pipe == NULL) {
+
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
+
+ pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
+
+ pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
+ if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
+ pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+
+ if (pipe_ctx->stream_res.tg->funcs->set_drr)
+ pipe_ctx->stream_res.tg->funcs->set_drr(
+ pipe_ctx->stream_res.tg, NULL);
+ /* TODO - convert symclk_ref_cnts for otg to a bit map to solve
+ * the case where the same symclk is shared across multiple otg
+ * instances
+ */
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
+ link->phy_state.symclk_ref_cnts.otg = 0;
+ if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
+ link_hwss->disable_link_output(link,
+ &pipe_ctx->link_res, pipe_ctx->stream->signal);
+ link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
+ }
+
+ /* reset DTBCLK_P */
+ if (dc->res_pool->dccg->funcs->set_dtbclk_p_src)
+ dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, REFCLK, pipe_ctx->stream_res.tg->inst);
+ }
+
+/*
+ * In case of a dangling plane, setting this to NULL unconditionally
+ * causes failures during reset hw ctx where, if stream is NULL,
+ * it is expected that the pipe_ctx pointers to pipes and plane are NULL.
+ */
+ pipe_ctx->stream = NULL;
+ pipe_ctx->top_pipe = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ pipe_ctx->next_odm_pipe = NULL;
+ pipe_ctx->prev_odm_pipe = NULL;
+ DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
+ pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
+}
+
+void dcn401_reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* Reset Back End*/
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx_old->stream)
+ continue;
+
+ if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
+ continue;
+
+ if (!pipe_ctx->stream ||
+ pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
+ struct clock_source *old_clk = pipe_ctx_old->clock_source;
+
+ if (hws->funcs.reset_back_end_for_pipe)
+ hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+ if (hws->funcs.enable_stream_gating)
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
+ if (old_clk)
+ old_clk->funcs->cs_power_down(old_clk);
+ }
+ }
+}
+
+static unsigned int dcn401_calculate_vready_offset_for_group(struct pipe_ctx *pipe)
+{
+ struct pipe_ctx *other_pipe;
+ unsigned int vready_offset = pipe->global_sync.dcn4x.vready_offset_pixels;
+
+ /* Always use the largest vready_offset of all connected pipes */
+ for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
+ if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
+ vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
+ }
+ for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
+ if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
+ vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
+ }
+ for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
+ if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
+ vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
+ }
+ for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
+ if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
+ vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
+ }
+
+ return vready_offset;
+}
+
+static void dcn401_program_tg(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dce_hwseq *hws)
+{
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ dcn401_calculate_vready_offset_for_group(pipe_ctx),
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
+
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
+
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+}
+
+static void dcn401_program_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* Only need to unblank on top pipe */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.odm ||
+ pipe_ctx->stream->update_flags.bits.abm_level)
+ hws->funcs.blank_pixel_data(dc, pipe_ctx,
+ !pipe_ctx->plane_state ||
+ !pipe_ctx->plane_state->visible);
+ }
+
+ /* Only update TG on top pipe */
+ if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
+ && !pipe_ctx->prev_odm_pipe)
+ dcn401_program_tg(dc, pipe_ctx, context, hws);
+
+ if (pipe_ctx->update_flags.bits.odm)
+ hws->funcs.update_odm(dc, context, pipe_ctx);
+
+ if (pipe_ctx->update_flags.bits.enable) {
+ if (hws->funcs.enable_plane)
+ hws->funcs.enable_plane(dc, pipe_ctx, context);
+ else
+ dc->hwss.enable_plane(dc, pipe_ctx, context);
+
+ if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
+ dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
+ }
+
+ if (pipe_ctx->update_flags.bits.det_size) {
+ if (dc->res_pool->hubbub->funcs->program_det_size)
+ dc->res_pool->hubbub->funcs->program_det_size(
+ dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
+ if (dc->res_pool->hubbub->funcs->program_det_segments)
+ dc->res_pool->hubbub->funcs->program_det_segments(
+ dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
+ }
+
+ if (pipe_ctx->update_flags.raw ||
+ (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) ||
+ pipe_ctx->stream->update_flags.raw)
+ dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context);
+
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->plane_state->update_flags.bits.hdr_mult))
+ hws->funcs.set_hdr_multiplier(pipe_ctx);
+
+ if (hws->funcs.populate_mcm_luts) {
+ if (pipe_ctx->plane_state) {
+ hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
+ pipe_ctx->plane_state->lut_bank_a);
+ pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
+ }
+ }
+
+ if (pipe_ctx->plane_state &&
+ (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change ||
+ pipe_ctx->plane_state->update_flags.bits.lut_3d ||
+ pipe_ctx->update_flags.bits.enable))
+ hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for powering on, internal memcmp to avoid
+ * updating on slave planes
+ */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf ||
+ (pipe_ctx->plane_state &&
+ pipe_ctx->plane_state->update_flags.bits.output_tf_change))
+ hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
+
+ /* If the pipe has been enabled or has a different opp, we
+ * should reprogram the fmt. This deals with cases where
+ * interation between mpc and odm combine on different streams
+ * causes a different pipe to be chosen to odm combine with.
+ */
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->update_flags.bits.opp_changed) {
+
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ pipe_ctx->stream->timing.display_color_depth,
+ pipe_ctx->stream->signal);
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &pipe_ctx->stream->bit_depth_params,
+ &pipe_ctx->stream->clamping);
+ }
+
+ /* Set ABM pipe after other pipe configurations done */
+ if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
+ if (pipe_ctx->stream_res.abm) {
+ dc->hwss.set_pipe(pipe_ctx);
+ pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,
+ pipe_ctx->stream->abm_level);
+ }
+ }
+
+ if (pipe_ctx->update_flags.bits.test_pattern_changed) {
+ struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp;
+ struct bit_depth_reduction_params params;
+
+ memset(&params, 0, sizeof(params));
+ odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
+ dc->hwss.set_disp_pattern_generator(dc,
+ pipe_ctx,
+ pipe_ctx->stream_res.test_pattern_params.test_pattern,
+ pipe_ctx->stream_res.test_pattern_params.color_space,
+ pipe_ctx->stream_res.test_pattern_params.color_depth,
+ NULL,
+ pipe_ctx->stream_res.test_pattern_params.width,
+ pipe_ctx->stream_res.test_pattern_params.height,
+ pipe_ctx->stream_res.test_pattern_params.offset);
+ }
+}
+
+void dcn401_program_front_end_for_ctx(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ unsigned int prev_hubp_count = 0;
+ unsigned int hubp_count = 0;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct pipe_ctx *pipe = NULL;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (resource_is_pipe_topology_changed(dc->current_state, context))
+ resource_log_pipe_topology_update(dc, context);
+
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+ if (pipe->plane_state->triplebuffer_flips)
+ BREAK_TO_DEBUGGER();
+
+ /*turn off triple buffer for full update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe, pipe->plane_state->triplebuffer_flips);
+ }
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
+ prev_hubp_count++;
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ hubp_count++;
+ }
+
+ if (prev_hubp_count == 0 && hubp_count > 0) {
+ if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
+ dc->res_pool->hubbub->funcs->force_pstate_change_control(
+ dc->res_pool->hubbub, true, false);
+ udelay(500);
+ }
+
+ /* Set pipe update flags and lock pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ dc->hwss.detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i],
+ &context->res_ctx.pipe_ctx[i]);
+
+ /* When disabling phantom pipes, turn on phantom OTG first (so we can get double
+ * buffer updates properly)
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
+
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
+ dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
+ struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
+
+ if (tg->funcs->enable_crtc) {
+ if (dc->hwseq->funcs.blank_pixel_data)
+ dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
+
+ tg->funcs->enable_crtc(tg);
+ }
+ }
+ }
+ /* OTG blank before disabling all front ends */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ && !context->res_ctx.pipe_ctx[i].top_pipe
+ && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
+ && context->res_ctx.pipe_ctx[i].stream)
+ hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
+
+
+ /* Disconnect mpcc */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ /* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom
+ * then we want to do the programming here (effectively it's being disabled). If we do
+ * the programming later the DET won't be updated until the OTG for the phantom pipe is
+ * turned on (i.e. in an MCLK switch) which can come in too late and cause issues with
+ * DET allocation.
+ */
+ if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
+ (context->res_ctx.pipe_ctx[i].plane_state &&
+ dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) ==
+ SUBVP_PHANTOM))) {
+ if (hubbub->funcs->program_det_size)
+ hubbub->funcs->program_det_size(hubbub,
+ dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ if (dc->res_pool->hubbub->funcs->program_det_segments)
+ dc->res_pool->hubbub->funcs->program_det_segments(
+ hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ }
+ hws->funcs.plane_atomic_disconnect(dc, dc->current_state,
+ &dc->current_state->res_ctx.pipe_ctx[i]);
+ DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
+ }
+
+ /* update ODM for blanked OTG master pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+ !resource_is_pipe_type(pipe, DPP_PIPE) &&
+ pipe->update_flags.bits.odm &&
+ hws->funcs.update_odm)
+ hws->funcs.update_odm(dc, context, pipe);
+ }
+
+ /*
+ * Program all updated pipes, order matters for mpcc setup. Start with
+ * top pipe and program all pipes that follow in order
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe) {
+ while (pipe) {
+ if (hws->funcs.program_pipe)
+ hws->funcs.program_pipe(dc, pipe, context);
+ else {
+ /* Don't program phantom pipes in the regular front end programming sequence.
+ * There is an MPO transition case where a pipe being used by a video plane is
+ * transitioned directly to be a phantom pipe when closing the MPO video.
+ * However the phantom pipe will program a new HUBP_VTG_SEL (update takes place
+ * right away) but the MPO still exists until the double buffered update of the
+ * main pipe so we will get a frame of underflow if the phantom pipe is
+ * programmed here.
+ */
+ if (pipe->stream &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
+ dcn401_program_pipe(dc, pipe, context);
+ }
+
+ pipe = pipe->bottom_pipe;
+ }
+ }
+
+ /* Program secondary blending tree and writeback pipes */
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe
+ && pipe->stream && pipe->stream->num_wb_info > 0
+ && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
+ || pipe->stream->update_flags.raw)
+ && hws->funcs.program_all_writeback_pipes_in_tree)
+ hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
+
+ /* Avoid underflow by check of pipe line read when adding 2nd plane. */
+ if (hws->wa.wait_hubpret_read_start_during_mpo_transition &&
+ !pipe->top_pipe &&
+ pipe->stream &&
+ pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
+ dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
+ pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
+ }
+ }
+}
+
+void dcn401_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ // Timeout for pipe enable
+ unsigned int timeout_us = 100000;
+ unsigned int polling_interval_us = 1;
+ struct dce_hwseq *hwseq = dc->hwseq;
+ int i;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) &&
+ !resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
+ dc->hwss.post_unlock_reset_opp(dc,
+ &dc->current_state->res_ctx.pipe_ctx[i]);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
+ dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
+
+ /*
+ * If we are enabling a pipe, we need to wait for pending clear as this is a critical
+ * part of the enable operation otherwise, DM may request an immediate flip which
+ * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
+ * is unsupported on DCN.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ // Don't check flip pending on phantom pipes
+ if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
+ struct hubp *hubp = pipe->plane_res.hubp;
+ int j = 0;
+
+ for (j = 0; j < timeout_us / polling_interval_us
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+ udelay(polling_interval_us);
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* When going from a smaller ODM slice count to larger, we must ensure double
+ * buffer update completes before we return to ensure we don't reduce DISPCLK
+ * before we've transitioned to 2:1 or 4:1
+ */
+ if (resource_is_pipe_type(old_pipe, OTG_MASTER) && resource_is_pipe_type(pipe, OTG_MASTER) &&
+ resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
+ int j = 0;
+ struct timing_generator *tg = pipe->stream_res.tg;
+
+ if (tg->funcs->get_optc_double_buffer_pending) {
+ for (j = 0; j < timeout_us / polling_interval_us
+ && tg->funcs->get_optc_double_buffer_pending(tg); j++)
+ udelay(polling_interval_us);
+ }
+ }
+ }
+
+ if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
+ dc->res_pool->hubbub->funcs->force_pstate_change_control(
+ dc->res_pool->hubbub, false, false);
+
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe) {
+ /* Program phantom pipe here to prevent a frame of underflow in the MPO transition
+ * case (if a pipe being used for a video plane transitions to a phantom pipe, it
+ * can underflow due to HUBP_VTG_SEL programming if done in the regular front end
+ * programming sequence).
+ */
+ while (pipe) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ /* When turning on the phantom pipe we want to run through the
+ * entire enable sequence, so apply all the "enable" flags.
+ */
+ if (dc->hwss.apply_update_flags_for_phantom)
+ dc->hwss.apply_update_flags_for_phantom(pipe);
+ if (dc->hwss.update_phantom_vp_position)
+ dc->hwss.update_phantom_vp_position(dc, context, pipe);
+ dcn401_program_pipe(dc, pipe, context);
+ }
+ pipe = pipe->bottom_pipe;
+ }
+ }
+ }
+
+ if (!hwseq)
+ return;
+
+ /* P-State support transitions:
+ * Natural -> FPO: P-State disabled in prepare, force disallow anytime is safe
+ * FPO -> Natural: Unforce anytime after FW disable is safe (P-State will assert naturally)
+ * Unsupported -> FPO: P-State enabled in optimize, force disallow anytime is safe
+ * FPO -> Unsupported: P-State disabled in prepare, unforce disallow anytime is safe
+ * FPO <-> SubVP: Force disallow is maintained on the FPO / SubVP pipes
+ */
+ if (hwseq->funcs.update_force_pstate)
+ dc->hwseq->funcs.update_force_pstate(dc, context);
+
+ /* Only program the MALL registers after all the main and phantom pipes
+ * are done programming.
+ */
+ if (hwseq->funcs.program_mall_pipe_config)
+ hwseq->funcs.program_mall_pipe_config(dc, context);
+
+ /* WA to apply WM setting*/
+ if (hwseq->wa.DEGVIDCN21)
+ dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
+
+
+ /* WA for stutter underflow during MPO transitions when adding 2nd plane */
+ if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
+
+ if (dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
+
+ struct timing_generator *tg = dc->res_pool->timing_generators[0];
+
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
+
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame =
+ tg->funcs->get_frame_count(tg);
+ }
+ }
+}
+
+bool dcn401_update_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* recalculate DML parameters */
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
+ return false;
+
+ /* apply updated bandwidth parameters */
+ dc->hwss.prepare_bandwidth(dc, context);
+
+ /* update hubp configs for all pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state == NULL)
+ continue;
+
+ if (pipe_ctx->top_pipe == NULL) {
+ bool blank = !is_pipe_tree_visible(pipe_ctx);
+
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ dcn401_calculate_vready_offset_for_group(pipe_ctx),
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
+
+ if (pipe_ctx->prev_odm_pipe == NULL)
+ hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
+
+ if (hws->funcs.setup_vupdate_interrupt)
+ hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+ }
+
+ if (pipe_ctx->plane_res.hubp->funcs->hubp_setup2)
+ pipe_ctx->plane_res.hubp->funcs->hubp_setup2(
+ pipe_ctx->plane_res.hubp,
+ &pipe_ctx->hubp_regs,
+ &pipe_ctx->global_sync,
+ &pipe_ctx->stream->timing);
+ }
+
+ return true;
+}
+
+void dcn401_detect_pipe_changes(struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe)
+{
+ bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM;
+ bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM;
+
+ unsigned int old_pipe_vready_offset_pixels = old_pipe->global_sync.dcn4x.vready_offset_pixels;
+ unsigned int new_pipe_vready_offset_pixels = new_pipe->global_sync.dcn4x.vready_offset_pixels;
+ unsigned int old_pipe_vstartup_lines = old_pipe->global_sync.dcn4x.vstartup_lines;
+ unsigned int new_pipe_vstartup_lines = new_pipe->global_sync.dcn4x.vstartup_lines;
+ unsigned int old_pipe_vupdate_offset_pixels = old_pipe->global_sync.dcn4x.vupdate_offset_pixels;
+ unsigned int new_pipe_vupdate_offset_pixels = new_pipe->global_sync.dcn4x.vupdate_offset_pixels;
+ unsigned int old_pipe_vupdate_width_pixels = old_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
+ unsigned int new_pipe_vupdate_width_pixels = new_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
+
+ new_pipe->update_flags.raw = 0;
+
+ /* If non-phantom pipe is being transitioned to a phantom pipe,
+ * set disable and return immediately. This is because the pipe
+ * that was previously in use must be fully disabled before we
+ * can "enable" it as a phantom pipe (since the OTG will certainly
+ * be different). The post_unlock sequence will set the correct
+ * update flags to enable the phantom pipe.
+ */
+ if (old_pipe->plane_state && !old_is_phantom &&
+ new_pipe->plane_state && new_is_phantom) {
+ new_pipe->update_flags.bits.disable = 1;
+ return;
+ }
+
+ if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
+ resource_is_odm_topology_changed(new_pipe, old_pipe))
+ /* Detect odm changes */
+ new_pipe->update_flags.bits.odm = 1;
+
+ /* Exit on unchanged, unused pipe */
+ if (!old_pipe->plane_state && !new_pipe->plane_state)
+ return;
+ /* Detect pipe enable/disable */
+ if (!old_pipe->plane_state && new_pipe->plane_state) {
+ new_pipe->update_flags.bits.enable = 1;
+ new_pipe->update_flags.bits.mpcc = 1;
+ new_pipe->update_flags.bits.dppclk = 1;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ new_pipe->update_flags.bits.unbounded_req = 1;
+ new_pipe->update_flags.bits.gamut_remap = 1;
+ new_pipe->update_flags.bits.scaler = 1;
+ new_pipe->update_flags.bits.viewport = 1;
+ new_pipe->update_flags.bits.det_size = 1;
+ if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE &&
+ new_pipe->stream_res.test_pattern_params.width != 0 &&
+ new_pipe->stream_res.test_pattern_params.height != 0)
+ new_pipe->update_flags.bits.test_pattern_changed = 1;
+ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
+ new_pipe->update_flags.bits.odm = 1;
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
+ return;
+ }
+
+ /* For SubVP we need to unconditionally enable because any phantom pipes are
+ * always removed then newly added for every full updates whenever SubVP is in use.
+ * The remove-add sequence of the phantom pipe always results in the pipe
+ * being blanked in enable_stream_timing (DPG).
+ */
+ if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM)
+ new_pipe->update_flags.bits.enable = 1;
+
+ /* Phantom pipes are effectively disabled, if the pipe was previously phantom
+ * we have to enable
+ */
+ if (old_pipe->plane_state && old_is_phantom &&
+ new_pipe->plane_state && !new_is_phantom)
+ new_pipe->update_flags.bits.enable = 1;
+
+ if (old_pipe->plane_state && !new_pipe->plane_state) {
+ new_pipe->update_flags.bits.disable = 1;
+ return;
+ }
+
+ /* Detect plane change */
+ if (old_pipe->plane_state != new_pipe->plane_state)
+ new_pipe->update_flags.bits.plane_changed = true;
+
+ /* Detect top pipe only changes */
+ if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
+ /* Detect global sync changes */
+ if ((old_pipe_vready_offset_pixels != new_pipe_vready_offset_pixels)
+ || (old_pipe_vstartup_lines != new_pipe_vstartup_lines)
+ || (old_pipe_vupdate_offset_pixels != new_pipe_vupdate_offset_pixels)
+ || (old_pipe_vupdate_width_pixels != new_pipe_vupdate_width_pixels))
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
+
+ if (old_pipe->det_buffer_size_kb != new_pipe->det_buffer_size_kb)
+ new_pipe->update_flags.bits.det_size = 1;
+
+ /*
+ * Detect opp / tg change, only set on change, not on enable
+ * Assume mpcc inst = pipe index, if not this code needs to be updated
+ * since mpcc is what is affected by these. In fact all of our sequence
+ * makes this assumption at the moment with how hubp reset is matched to
+ * same index mpcc reset.
+ */
+ if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.opp_changed = 1;
+ if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
+ new_pipe->update_flags.bits.tg_changed = 1;
+
+ /*
+ * Detect mpcc blending changes, only dpp inst and opp matter here,
+ * mpccs getting removed/inserted update connected ones during their own
+ * programming
+ */
+ if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.mpcc = 1;
+
+ /* Detect dppclk change */
+ if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
+ new_pipe->update_flags.bits.dppclk = 1;
+
+ /* Check for scl update */
+ if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
+ new_pipe->update_flags.bits.scaler = 1;
+ /* Check for vp update */
+ if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
+ || memcmp(&old_pipe->plane_res.scl_data.viewport_c,
+ &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
+ new_pipe->update_flags.bits.viewport = 1;
+
+ /* Detect dlg/ttu/rq updates */
+ {
+ struct dml2_display_dlg_regs old_dlg_regs = old_pipe->hubp_regs.dlg_regs;
+ struct dml2_display_ttu_regs old_ttu_regs = old_pipe->hubp_regs.ttu_regs;
+ struct dml2_display_rq_regs old_rq_regs = old_pipe->hubp_regs.rq_regs;
+ struct dml2_display_dlg_regs *new_dlg_regs = &new_pipe->hubp_regs.dlg_regs;
+ struct dml2_display_ttu_regs *new_ttu_regs = &new_pipe->hubp_regs.ttu_regs;
+ struct dml2_display_rq_regs *new_rq_regs = &new_pipe->hubp_regs.rq_regs;
+
+ /* Detect pipe interdependent updates */
+ if ((old_dlg_regs.dst_y_prefetch != new_dlg_regs->dst_y_prefetch)
+ || (old_dlg_regs.vratio_prefetch != new_dlg_regs->vratio_prefetch)
+ || (old_dlg_regs.vratio_prefetch_c != new_dlg_regs->vratio_prefetch_c)
+ || (old_dlg_regs.dst_y_per_vm_vblank != new_dlg_regs->dst_y_per_vm_vblank)
+ || (old_dlg_regs.dst_y_per_row_vblank != new_dlg_regs->dst_y_per_row_vblank)
+ || (old_dlg_regs.dst_y_per_vm_flip != new_dlg_regs->dst_y_per_vm_flip)
+ || (old_dlg_regs.dst_y_per_row_flip != new_dlg_regs->dst_y_per_row_flip)
+ || (old_dlg_regs.refcyc_per_meta_chunk_vblank_l != new_dlg_regs->refcyc_per_meta_chunk_vblank_l)
+ || (old_dlg_regs.refcyc_per_meta_chunk_vblank_c != new_dlg_regs->refcyc_per_meta_chunk_vblank_c)
+ || (old_dlg_regs.refcyc_per_meta_chunk_flip_l != new_dlg_regs->refcyc_per_meta_chunk_flip_l)
+ || (old_dlg_regs.refcyc_per_line_delivery_pre_l != new_dlg_regs->refcyc_per_line_delivery_pre_l)
+ || (old_dlg_regs.refcyc_per_line_delivery_pre_c != new_dlg_regs->refcyc_per_line_delivery_pre_c)
+ || (old_ttu_regs.refcyc_per_req_delivery_pre_l != new_ttu_regs->refcyc_per_req_delivery_pre_l)
+ || (old_ttu_regs.refcyc_per_req_delivery_pre_c != new_ttu_regs->refcyc_per_req_delivery_pre_c)
+ || (old_ttu_regs.refcyc_per_req_delivery_pre_cur0 !=
+ new_ttu_regs->refcyc_per_req_delivery_pre_cur0)
+ || (old_ttu_regs.min_ttu_vblank != new_ttu_regs->min_ttu_vblank)
+ || (old_ttu_regs.qos_level_flip != new_ttu_regs->qos_level_flip)) {
+ old_dlg_regs.dst_y_prefetch = new_dlg_regs->dst_y_prefetch;
+ old_dlg_regs.vratio_prefetch = new_dlg_regs->vratio_prefetch;
+ old_dlg_regs.vratio_prefetch_c = new_dlg_regs->vratio_prefetch_c;
+ old_dlg_regs.dst_y_per_vm_vblank = new_dlg_regs->dst_y_per_vm_vblank;
+ old_dlg_regs.dst_y_per_row_vblank = new_dlg_regs->dst_y_per_row_vblank;
+ old_dlg_regs.dst_y_per_vm_flip = new_dlg_regs->dst_y_per_vm_flip;
+ old_dlg_regs.dst_y_per_row_flip = new_dlg_regs->dst_y_per_row_flip;
+ old_dlg_regs.refcyc_per_meta_chunk_vblank_l = new_dlg_regs->refcyc_per_meta_chunk_vblank_l;
+ old_dlg_regs.refcyc_per_meta_chunk_vblank_c = new_dlg_regs->refcyc_per_meta_chunk_vblank_c;
+ old_dlg_regs.refcyc_per_meta_chunk_flip_l = new_dlg_regs->refcyc_per_meta_chunk_flip_l;
+ old_dlg_regs.refcyc_per_line_delivery_pre_l = new_dlg_regs->refcyc_per_line_delivery_pre_l;
+ old_dlg_regs.refcyc_per_line_delivery_pre_c = new_dlg_regs->refcyc_per_line_delivery_pre_c;
+ old_ttu_regs.refcyc_per_req_delivery_pre_l = new_ttu_regs->refcyc_per_req_delivery_pre_l;
+ old_ttu_regs.refcyc_per_req_delivery_pre_c = new_ttu_regs->refcyc_per_req_delivery_pre_c;
+ old_ttu_regs.refcyc_per_req_delivery_pre_cur0 = new_ttu_regs->refcyc_per_req_delivery_pre_cur0;
+ old_ttu_regs.min_ttu_vblank = new_ttu_regs->min_ttu_vblank;
+ old_ttu_regs.qos_level_flip = new_ttu_regs->qos_level_flip;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ }
+ /* Detect any other updates to ttu/rq/dlg */
+ if (memcmp(&old_dlg_regs, new_dlg_regs, sizeof(old_dlg_regs)) ||
+ memcmp(&old_ttu_regs, new_ttu_regs, sizeof(old_ttu_regs)) ||
+ memcmp(&old_rq_regs, new_rq_regs, sizeof(old_rq_regs)))
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ }
+
+ if (old_pipe->unbounded_req != new_pipe->unbounded_req)
+ new_pipe->update_flags.bits.unbounded_req = 1;
+
+ if (memcmp(&old_pipe->stream_res.test_pattern_params,
+ &new_pipe->stream_res.test_pattern_params, sizeof(struct test_pattern_params))) {
+ new_pipe->update_flags.bits.test_pattern_changed = 1;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index a27e62081685..17cea748789e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -55,12 +55,14 @@ void dcn401_populate_mcm_luts(struct dc *dc,
bool lut_bank_a);
void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable);
+void dcn401_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
+
void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx);
bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable);
-struct ips_ono_region_state dcn401_read_ono_state(struct dc *dc,
- uint8_t region);
void dcn401_wait_for_dcc_meta_propagation(const struct dc *dc,
const struct pipe_ctx *top_pipe_to_program);
@@ -81,7 +83,23 @@ void dcn401_hardware_release(struct dc *dc);
void dcn401_update_odm(struct dc *dc, struct dc_state *context,
struct pipe_ctx *otg_master);
void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy);
-void dcn401_wait_for_det_buffer_update(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master);
+void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master);
void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock);
void dcn401_program_outstanding_updates(struct dc *dc, struct dc_state *context);
+void dcn401_reset_back_end_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+void dcn401_reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context);
+void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx);
+void dcn401_program_front_end_for_ctx(struct dc *dc, struct dc_state *context);
+void dcn401_post_unlock_program_front_end(struct dc *dc, struct dc_state *context);
+bool dcn401_update_bandwidth(struct dc *dc, struct dc_state *context);
+void dcn401_detect_pipe_changes(
+ struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe);
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index a2ca07235c83..44cb376f97c1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -17,9 +17,9 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.init_hw = dcn401_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
- .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .program_front_end_for_ctx = dcn401_program_front_end_for_ctx,
.wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
- .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
+ .post_unlock_program_front_end = dcn401_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
@@ -42,7 +42,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn401_prepare_bandwidth,
.optimize_bandwidth = dcn401_optimize_bandwidth,
- .update_bandwidth = dcn20_update_bandwidth,
+ .update_bandwidth = dcn401_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn31_set_static_screen_control,
@@ -66,7 +66,6 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.enable_writeback = dcn30_enable_writeback,
.disable_writeback = dcn30_disable_writeback,
.update_writeback = dcn30_update_writeback,
- .mmhubbub_warmup = dcn30_mmhubbub_warmup,
.dmdata_status_done = dcn20_dmdata_status_done,
.program_dmdata_engine = dcn30_program_dmdata_engine,
.set_dmdata_attributes = dcn20_set_dmdata_attributes,
@@ -77,14 +76,14 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.calc_vupdate_position = dcn10_calc_vupdate_position,
.apply_idle_power_optimizations = dcn401_apply_idle_power_optimizations,
.does_plane_fit_in_mall = NULL,
- .set_backlight_level = dcn21_set_backlight_level,
+ .set_backlight_level = dcn31_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.hardware_release = dcn401_hardware_release,
.set_pipe = dcn21_set_pipe,
.enable_lvds_link_output = dce110_enable_lvds_link_output,
.enable_tmds_link_output = dce110_enable_tmds_link_output,
.enable_dp_link_output = dce110_enable_dp_link_output,
- .disable_link_output = dcn32_disable_link_output,
+ .disable_link_output = dcn401_disable_link_output,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.enable_phantom_streams = dcn32_enable_phantom_streams,
@@ -93,13 +92,17 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.update_phantom_vp_position = dcn32_update_phantom_vp_position,
.update_dsc_pg = dcn32_update_dsc_pg,
.apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,
- .blank_phantom = dcn32_blank_phantom,
.wait_for_dcc_meta_propagation = dcn401_wait_for_dcc_meta_propagation,
.is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
.fams2_global_control_lock = dcn401_fams2_global_control_lock,
.fams2_update_config = dcn401_fams2_update_config,
.fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast,
.program_outstanding_updates = dcn401_program_outstanding_updates,
+ .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
+ .detect_pipe_changes = dcn401_detect_pipe_changes,
+ .enable_plane = dcn20_enable_plane,
+ .update_dchubp_dpp = dcn20_update_dchubp_dpp,
+ .post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
};
static const struct hwseq_private_funcs dcn401_private_funcs = {
@@ -111,7 +114,7 @@ static const struct hwseq_private_funcs dcn401_private_funcs = {
.power_down = dce110_power_down,
.enable_display_power_gating = dcn10_dummy_display_power_gating,
.blank_pixel_data = dcn20_blank_pixel_data,
- .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
+ .reset_hw_ctx_wrap = dcn401_reset_hw_ctx_wrap,
.enable_stream_timing = dcn401_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
@@ -136,8 +139,9 @@ static const struct hwseq_private_funcs dcn401_private_funcs = {
.update_mall_sel = dcn32_update_mall_sel,
.calculate_dccg_k1_k2_values = NULL,
.apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
- .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe,
+ .reset_back_end_for_pipe = dcn401_reset_back_end_for_pipe,
.populate_mcm_luts = NULL,
+ .perform_3dlut_wa_unlock = dcn401_perform_3dlut_wa_unlock,
};
void dcn401_hw_sequencer_init_functions(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index ac9205625623..a7d66cfd93c9 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -194,7 +194,6 @@ enum block_sequence_func {
DMUB_SUBVP_SAVE_SURF_ADDR,
HUBP_WAIT_FOR_DCC_META_PROP,
DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST,
-
};
struct block_sequence {
@@ -331,10 +330,6 @@ struct hw_sequencer_funcs {
void (*disable_writeback)(struct dc *dc,
unsigned int dwb_pipe_inst);
- bool (*mmhubbub_warmup)(struct dc *dc,
- unsigned int num_dwb,
- struct dc_writeback_info *wb_info);
-
/* Clock Related */
enum dc_status (*set_clock)(struct dc *dc,
enum dc_clock_type clock_type,
@@ -365,8 +360,7 @@ struct hw_sequencer_funcs {
void (*clear_status_bits)(struct dc *dc, unsigned int mask);
bool (*set_backlight_level)(struct pipe_ctx *pipe_ctx,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp);
+ struct set_backlight_level_params *params);
void (*set_abm_immediate_disable)(struct pipe_ctx *pipe_ctx);
@@ -462,6 +456,19 @@ struct hw_sequencer_funcs {
void (*program_outstanding_updates)(struct dc *dc,
struct dc_state *context);
void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable);
+ void (*wait_for_all_pending_updates)(const struct pipe_ctx *pipe_ctx);
+ void (*detect_pipe_changes)(struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe);
+ void (*enable_plane)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+ void (*update_dchubp_dpp)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+ void (*post_unlock_reset_opp)(struct dc *dc,
+ struct pipe_ctx *opp_head);
};
void color_space_to_black_color(
@@ -489,11 +496,12 @@ void get_hdr_visual_confirm_color(
void get_mpctree_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
-
+void get_vabc_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
void get_subvp_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
-
void get_fams2_visual_confirm_color(
struct dc *dc,
struct dc_state *context,
@@ -504,6 +512,10 @@ void get_mclk_switch_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
+void get_cursor_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+
void set_p_state_switch_method(
struct dc *dc,
struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 0ac675456979..22a5d4a03c98 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -182,6 +182,7 @@ struct hwseq_private_funcs {
struct pipe_ctx *pipe_ctx,
struct dc_cm2_func_luts mcm_luts,
bool lut_bank_a);
+ void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index fa5edd03d004..b5afd8c3103d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -60,5 +60,7 @@ enum dc_status {
};
char *dc_status_to_str(enum dc_status status);
+char *dc_pixel_encoding_to_str(enum dc_pixel_encoding pixel_encoding);
+char *dc_color_depth_to_str(enum dc_color_depth color_depth);
#endif /* _CORE_STATUS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index bfb8b8502d20..d558efc6e12f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -45,9 +45,6 @@
#define MAX_SVP_PHANTOM_STREAMS 2
#define MAX_SVP_PHANTOM_PLANES 2
-void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
- uint32_t controller_id);
-
#include "grph_object_id.h"
#include "link_encoder.h"
#include "stream_encoder.h"
@@ -215,6 +212,12 @@ struct resource_funcs {
void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
void (*build_pipe_pix_clk_params)(struct pipe_ctx *pipe_ctx);
+ /*
+ * Get indicator of power from a context that went through full validation
+ */
+ int (*get_power_profile)(const struct dc_state *context);
+ unsigned int (*get_det_buffer_size)(const struct dc_state *context);
+ unsigned int (*get_vstartup_for_pipe)(struct pipe_ctx *pipe_ctx);
};
struct audio_support{
@@ -463,6 +466,7 @@ struct pipe_ctx {
unsigned int surface_size_in_mall_bytes;
struct dml2_dchub_per_pipe_register_set hubp_regs;
struct dml2_hubp_pipe_mcache_regs mcache_regs;
+ union dml2_global_sync_programming global_sync;
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
@@ -473,6 +477,8 @@ struct pipe_ctx {
/* subvp_index: only valid if the pipe is a SUBVP_MAIN*/
uint8_t subvp_index;
struct pixel_rate_divider pixel_rate_divider;
+ /* pixels borrowed from hblank to hactive */
+ uint8_t hblank_borrow;
};
/* Data used for dynamic link encoder assignment.
@@ -535,7 +541,8 @@ struct dcn_bw_output {
bool legacy_svp_drr_stream_index_valid;
struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES];
struct dmub_cmd_fams2_global_config fams2_global_config;
- struct dmub_fams2_stream_static_state fams2_stream_params[DML2_MAX_PLANES];
+ union dmub_cmd_fams2_config fams2_stream_base_params[DML2_MAX_PLANES];
+ union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES];
struct dml2_display_arb_regs arb_regs;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 55529c5f471c..d19a595c2be4 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -624,10 +624,6 @@ bool dcn_validate_bandwidth(
struct dc_state *context,
bool fast_validate);
-unsigned int dcn_find_dcfclk_suits_all(
- const struct dc *dc,
- struct dc_clocks *clocks);
-
void dcn_get_soc_clks(
struct dc *dc,
int *min_fclk_khz,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 2d06067ff36d..c14d64687a3d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -306,6 +306,9 @@ struct clk_mgr_funcs {
*/
void (*set_hard_min_memclk)(struct clk_mgr *clk_mgr, bool current_mode);
+ int (*get_hard_min_memclk)(struct clk_mgr *clk_mgr);
+ int (*get_hard_min_fclk)(struct clk_mgr *clk_mgr);
+
/* Send message to PMFW to set hard max memclk frequency to highest DPM */
void (*set_hard_max_memclk)(struct clk_mgr *clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index c2dd061892f4..7a1ca1e98059 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -166,6 +166,41 @@ enum dentist_divider_range {
CLK_SR_DCN32(CLK1_CLK4_CURRENT_CNT), \
CLK_SR_DCN32(CLK4_CLK0_CURRENT_CNT)
+#define CLK_REG_LIST_DCN35() \
+ CLK_SR_DCN35(CLK1_CLK_PLL_REQ), \
+ CLK_SR_DCN35(CLK1_CLK0_DFS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK1_DFS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK2_DFS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK3_DFS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK4_DFS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK5_DFS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK0_CURRENT_CNT), \
+ CLK_SR_DCN35(CLK1_CLK1_CURRENT_CNT), \
+ CLK_SR_DCN35(CLK1_CLK2_CURRENT_CNT), \
+ CLK_SR_DCN35(CLK1_CLK3_CURRENT_CNT), \
+ CLK_SR_DCN35(CLK1_CLK4_CURRENT_CNT), \
+ CLK_SR_DCN35(CLK1_CLK5_CURRENT_CNT), \
+ CLK_SR_DCN35(CLK1_CLK0_BYPASS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK1_BYPASS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK2_BYPASS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK3_BYPASS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK4_BYPASS_CNTL),\
+ CLK_SR_DCN35(CLK1_CLK5_BYPASS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK0_DS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK1_DS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK2_DS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK3_DS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK4_DS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK5_DS_CNTL), \
+ CLK_SR_DCN35(CLK1_CLK0_ALLOW_DS), \
+ CLK_SR_DCN35(CLK1_CLK1_ALLOW_DS), \
+ CLK_SR_DCN35(CLK1_CLK2_ALLOW_DS), \
+ CLK_SR_DCN35(CLK1_CLK3_ALLOW_DS), \
+ CLK_SR_DCN35(CLK1_CLK4_ALLOW_DS), \
+ CLK_SR_DCN35(CLK1_CLK5_ALLOW_DS), \
+ CLK_SR_DCN35(CLK5_spll_field_8), \
+ SR(DENTIST_DISPCLK_CNTL), \
+
#define CLK_COMMON_MASK_SH_LIST_DCN32(mask_sh) \
CLK_COMMON_MASK_SH_LIST_DCN20_BASE(mask_sh),\
CLK_SF(CLK1_CLK_PLL_REQ, FbMult_int, mask_sh),\
@@ -236,6 +271,7 @@ struct clk_mgr_registers {
uint32_t CLK1_CLK2_DFS_CNTL;
uint32_t CLK1_CLK3_DFS_CNTL;
uint32_t CLK1_CLK4_DFS_CNTL;
+ uint32_t CLK1_CLK5_DFS_CNTL;
uint32_t CLK2_CLK2_DFS_CNTL;
uint32_t CLK1_CLK0_CURRENT_CNT;
@@ -243,11 +279,34 @@ struct clk_mgr_registers {
uint32_t CLK1_CLK2_CURRENT_CNT;
uint32_t CLK1_CLK3_CURRENT_CNT;
uint32_t CLK1_CLK4_CURRENT_CNT;
+ uint32_t CLK1_CLK5_CURRENT_CNT;
uint32_t CLK0_CLK0_DFS_CNTL;
uint32_t CLK0_CLK1_DFS_CNTL;
uint32_t CLK0_CLK3_DFS_CNTL;
uint32_t CLK0_CLK4_DFS_CNTL;
+ uint32_t CLK1_CLK0_BYPASS_CNTL;
+ uint32_t CLK1_CLK1_BYPASS_CNTL;
+ uint32_t CLK1_CLK2_BYPASS_CNTL;
+ uint32_t CLK1_CLK3_BYPASS_CNTL;
+ uint32_t CLK1_CLK4_BYPASS_CNTL;
+ uint32_t CLK1_CLK5_BYPASS_CNTL;
+
+ uint32_t CLK1_CLK0_DS_CNTL;
+ uint32_t CLK1_CLK1_DS_CNTL;
+ uint32_t CLK1_CLK2_DS_CNTL;
+ uint32_t CLK1_CLK3_DS_CNTL;
+ uint32_t CLK1_CLK4_DS_CNTL;
+ uint32_t CLK1_CLK5_DS_CNTL;
+
+ uint32_t CLK1_CLK0_ALLOW_DS;
+ uint32_t CLK1_CLK1_ALLOW_DS;
+ uint32_t CLK1_CLK2_ALLOW_DS;
+ uint32_t CLK1_CLK3_ALLOW_DS;
+ uint32_t CLK1_CLK4_ALLOW_DS;
+ uint32_t CLK1_CLK5_ALLOW_DS;
+ uint32_t CLK5_spll_field_8;
+
};
struct clk_mgr_shift {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 67c32401893e..52b745667ef7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -228,6 +228,7 @@ struct hubbub_funcs {
void (*program_det_segments)(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg);
void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase);
void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst);
+ bool (*program_arbiter)(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 16580d624278..b610beb075d5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -42,6 +42,7 @@
#include "cursor_reg_cache.h"
#include "dml2/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2/dml21/inc/dml_top_types.h"
#define OPP_ID_INVALID 0xf
#define MAX_TTU 0xffffff
@@ -144,14 +145,26 @@ struct hubp_funcs {
struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+ void (*hubp_setup2)(
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *pipe_regs,
+ union dml2_global_sync_programming *pipe_global_sync,
+ struct dc_crtc_timing *timing);
+
void (*hubp_setup_interdependent)(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
struct _vcs_dpi_display_ttu_regs_st *ttu_regs);
+ void (*hubp_setup_interdependent2)(
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *pipe_regs);
+
void (*dcc_control)(struct hubp *hubp, bool enable,
enum hubp_ind_block_size blk_size);
+ void (*hubp_reset)(struct hubp *hubp);
+
void (*mem_program_viewport)(
struct hubp *hubp,
const struct rect *viewport,
@@ -165,7 +178,7 @@ struct hubp_funcs {
void (*hubp_program_pte_vm)(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum dc_rotation_angle rotation);
void (*hubp_set_vm_system_aperture_settings)(
@@ -179,7 +192,7 @@ struct hubp_funcs {
void (*hubp_program_surface_config)(
struct hubp *hubp,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -275,6 +288,7 @@ struct hubp_funcs {
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
int (*hubp_get_3dlut_fl_done)(struct hubp *hubp);
+ void (*hubp_clear_tiling)(struct hubp *hubp);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index af9183f5d69b..08c16ba52a51 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -168,6 +168,14 @@ struct link_encoder_funcs {
struct link_encoder *enc,
enum encoder_type_select sel,
uint32_t hpo_inst);
+ void (*enable_dpia_output)(struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t dpia_id,
+ uint8_t digmode,
+ uint8_t fec_rdy);
+ void (*disable_dpia_output)(struct link_encoder *link_enc,
+ uint8_t dpia_id,
+ uint8_t digmode);
};
/*
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index a8b44f398ce6..42fbc70f7056 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -150,7 +150,7 @@ struct mem_input_funcs {
void (*mem_input_program_pte_vm)(
struct mem_input *mem_input,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
enum dc_rotation_angle rotation);
void (*mem_input_set_vm_system_aperture_settings)(
@@ -164,7 +164,7 @@ struct mem_input_funcs {
void (*mem_input_program_surface_config)(
struct mem_input *mem_input,
enum surface_pixel_format format,
- union dc_tiling_info *tiling_info,
+ struct dc_tiling_info *tiling_info,
struct plane_size *plane_size,
enum dc_rotation_angle rotation,
struct dc_plane_dcc_param *dcc,
@@ -187,6 +187,8 @@ struct mem_input_funcs {
const struct dc_cursor_position *pos,
const struct dc_cursor_mi_param *param);
+ void (*mem_input_clear_tiling)(
+ struct mem_input *mem_input);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
index 03cbcbb36f1c..6fdc9809280c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
@@ -210,7 +210,7 @@ void optc1_enable_crtc_reset(struct timing_generator *optc,
bool optc1_configure_crc(struct timing_generator *optc, const struct crc_params *params);
-bool optc1_get_crc(struct timing_generator *optc,
+bool optc1_get_crc(struct timing_generator *optc, uint8_t idx,
uint32_t *r_cr,
uint32_t *g_y,
uint32_t *b_cb);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 3d4c8bd42b49..9885cb3c310f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -141,6 +141,9 @@ struct crc_params {
bool continuous_mode;
bool enable;
+
+ uint8_t crc_eng_inst;
+ bool reset;
};
/**
@@ -291,7 +294,7 @@ struct timing_generator_funcs {
* @get_crc: Get CRCs for the given timing generator. Return false if
* CRCs are not enabled (via configure_crc).
*/
- bool (*get_crc)(struct timing_generator *tg,
+ bool (*get_crc)(struct timing_generator *tg, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
void (*program_manual_trigger)(struct timing_generator *optc);
@@ -342,7 +345,11 @@ struct timing_generator_funcs {
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params);
void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
- bool (*get_double_buffer_pending)(struct timing_generator *tg);
+ bool (*get_optc_double_buffer_pending)(struct timing_generator *tg);
+ bool (*get_otg_double_buffer_pending)(struct timing_generator *tg);
+ bool (*get_pipe_update_pending)(struct timing_generator *tg);
+ void (*set_vupdate_keepout)(struct timing_generator *tg, bool enable);
+ bool (*wait_update_lock_status)(struct timing_generator *tg, bool locked);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index 72a8479e1f2d..fd1f9d3db039 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -148,6 +148,10 @@ struct link_service {
const struct dc_stream_state *stream,
const unsigned int num_streams);
+ uint32_t (*dp_required_hblank_size_bytes)(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params);
+
/*************************** DPMS *************************************/
void (*set_dpms_on)(struct dc_state *state, struct pipe_ctx *pipe_ctx);
@@ -248,8 +252,7 @@ struct link_service {
uint32_t *backlight_millinits_avg,
uint32_t *backlight_millinits_peak);
bool (*edp_set_backlight_level)(const struct dc_link *link,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp);
+ struct set_backlight_level_params *backlight_level_params);
bool (*edp_set_backlight_level_nits)(struct dc_link *link,
bool isHDR,
uint32_t backlight_millinits,
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
index 4fb9cd6708d5..1d61d475d36f 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
@@ -30,8 +30,8 @@
#include "../dce110/irq_service_dce110.h"
#include "irq_service_dcn201.h"
-#include "dcn/dcn_2_0_3_offset.h"
-#include "dcn/dcn_2_0_3_sh_mask.h"
+#include "dcn/dcn_2_0_1_offset.h"
+#include "dcn/dcn_2_0_1_sh_mask.h"
#include "cyan_skillfish_ip_offset.h"
#include "soc15_hw_ip.h"
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index ff8fe1a94965..96febabf464a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -251,7 +251,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
link_training_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link->cur_link_settings);
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT)
dp_fixed_vs_pe_read_lane_adjust(
link,
@@ -646,7 +646,7 @@ bool dp_set_test_pattern(
if (IS_DP_PHY_PATTERN(test_pattern)) {
/* Set DPCD Lane Settings before running test pattern */
if (p_link_settings != NULL) {
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
p_link_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
dp_fixed_vs_pe_set_retimer_lane_settings(
link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index 3e47a6735912..06faa461067b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -164,7 +164,9 @@ void disable_dio_link_output(struct dc_link *link,
{
struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
- link_enc->funcs->disable_output(link_enc, signal);
+ if (link_enc != NULL)
+ link_enc->funcs->disable_output(link_enc, signal);
+
link->dc->link_srv->dp_trace_source_sequence(link,
DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
index 348ea4cb832d..a6d1d7641ab4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c
@@ -187,7 +187,7 @@ static const struct link_hwss dio_fixed_vs_pe_retimer_link_hwss = {
bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link)
{
- return (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN);
+ return ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN);
}
const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void)
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
index 6499807af72a..36adf95744fe 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
@@ -77,17 +77,74 @@ static void set_dio_dpia_lane_settings(struct dc_link *link,
{
}
+static void enable_dpia_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings)
+{
+ struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+
+ if (link_enc != NULL) {
+ if (link->dc->config.enable_dpia_pre_training && link_enc->funcs->enable_dpia_output) {
+ uint8_t fec_rdy = link->dc->link_srv->dp_should_enable_fec(link);
+ uint8_t digmode = dc_is_dp_sst_signal(signal) ? DIG_SST_MODE : DIG_MST_MODE;
+
+ link_enc->funcs->enable_dpia_output(
+ link_enc,
+ link_settings,
+ link->ddc_hw_inst,
+ digmode,
+ fec_rdy);
+ } else {
+ if (dc_is_dp_sst_signal(signal))
+ link_enc->funcs->enable_dp_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ else
+ link_enc->funcs->enable_dp_mst_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ }
+
+ }
+
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+}
+
+static void disable_dpia_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+
+ if (link_enc != NULL) {
+ if (link->dc->config.enable_dpia_pre_training && link_enc->funcs->disable_dpia_output) {
+ uint8_t digmode = dc_is_dp_sst_signal(signal) ? DIG_SST_MODE : DIG_MST_MODE;
+
+ link_enc->funcs->disable_dpia_output(link_enc, link->ddc_hw_inst, digmode);
+ } else
+ link_enc->funcs->disable_output(link_enc, signal);
+ }
+
+ link->dc->link_srv->dp_trace_source_sequence(link,
+ DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+}
+
static const struct link_hwss dpia_link_hwss = {
.setup_stream_encoder = setup_dio_stream_encoder,
.reset_stream_encoder = reset_dio_stream_encoder,
.setup_stream_attribute = setup_dio_stream_attribute,
- .disable_link_output = disable_dio_link_output,
+ .disable_link_output = disable_dpia_link_output,
.setup_audio_output = setup_dio_audio_output,
.enable_audio_packet = enable_dio_audio_packet,
.disable_audio_packet = disable_dio_audio_packet,
.ext = {
.set_throttled_vcp_size = set_dio_throttled_vcp_size,
- .enable_dp_link_output = enable_dio_dp_link_output,
+ .enable_dp_link_output = enable_dpia_link_output,
.set_dp_link_test_pattern = set_dio_dpia_link_test_pattern,
.set_dp_lane_settings = set_dio_dpia_lane_settings,
.update_stream_allocation_table = update_dpia_stream_allocation_table,
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h
index ad16ec5d9bb7..259e0f4775e1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h
@@ -27,6 +27,9 @@
#include "link_hwss.h"
+#define DIG_SST_MODE 0
+#define DIG_MST_MODE 5
+
const struct link_hwss *get_dpia_link_hwss(void);
bool can_use_dpia_link_hwss(const struct dc_link *link,
const struct link_resource *link_res);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index d21ee9d12d26..550e1a098fa2 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -48,6 +48,9 @@
#include "dm_helpers.h"
#include "clk_mgr.h"
+ // Offset DPCD 050Eh == 0x5A
+#define MST_HUB_ID_0x5A 0x5A
+
#define DC_LOGGER \
link->ctx->logger
#define DC_LOGGER_INIT(logger)
@@ -692,6 +695,15 @@ static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link)
link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
!link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around)
link->wa_flags.dpia_mst_dsc_always_on = true;
+
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ link->type == dc_connection_mst_branch &&
+ link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+ link->dpcd_caps.branch_vendor_specific_data[2] == MST_HUB_ID_0x5A &&
+ link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
+ !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) {
+ link->wa_flags.dpia_mst_dsc_always_on = true;
+ }
}
static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link)
@@ -817,7 +829,8 @@ static bool should_verify_link_capability_destructively(struct dc_link *link,
if (link->dc->debug.skip_detection_link_training ||
dc_is_embedded_signal(link->local_sink->sink_signal) ||
- link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ !link->dc->config.enable_dpia_pre_training)) {
destrictive = false;
} else if (link_dp_get_encoding_format(&max_link_cap) ==
DP_8b_10b_ENCODING) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index c4e03482ba9a..ec7de9c01fab 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -772,6 +772,20 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
return result;
}
+static bool dp_set_hblank_reduction_on_rx(struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ bool result = false;
+
+ if (dc_is_virtual_signal(stream->signal))
+ result = true;
+ else
+ result = dm_helpers_dp_write_hblank_reduction(dc->ctx, stream);
+ return result;
+}
+
+
/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first,
* i.e. after dp_enable_dsc_on_rx() had been called
*/
@@ -808,7 +822,8 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
- dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
+ dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow +
+ stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
@@ -1952,11 +1967,15 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
if (stream->phy_pix_clk > 340000)
is_over_340mhz = true;
+ if (dc_is_tmds_signal(stream->signal) && stream->phy_pix_clk > 6000000UL) {
+ ASSERT(false);
+ return;
+ }
if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
unsigned short masked_chip_caps = pipe_ctx->stream->link->chip_caps &
- EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
- if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
+ AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ if (masked_chip_caps == AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
/* DP159, Retimer settings */
eng_id = pipe_ctx->stream_res.stream_enc->id;
@@ -1967,7 +1986,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
write_i2c_default_retimer_setting(pipe_ctx,
is_vga_mode, is_over_340mhz);
}
- } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
+ } else if (masked_chip_caps == AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
/* PI3EQX1204, Redriver settings */
write_i2c_redriver_setting(pipe_ctx, is_over_340mhz);
}
@@ -2023,7 +2042,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
int lt_attempts = LINK_TRAINING_ATTEMPTS;
// Increase retry count if attempting DP1.x on FIXED_VS link
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
lt_attempts = 10;
@@ -2038,7 +2057,8 @@ static enum dc_status enable_link_dp(struct dc_state *state,
/* Train with fallback when enabling DPIA link. Conventional links are
* trained with fallback during sink detection.
*/
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ !link->dc->config.enable_dpia_pre_training)
do_fallback = true;
/*
@@ -2082,6 +2102,9 @@ static enum dc_status enable_link_dp(struct dc_state *state,
if (link_settings->link_rate == LINK_RATE_LOW)
skip_video_pattern = false;
+ if (stream->sink_patches.oled_optimize_display_on)
+ set_default_brightness_aux(link);
+
if (perform_link_training_with_retries(link_settings,
skip_video_pattern,
lt_attempts,
@@ -2105,10 +2128,14 @@ static enum dc_status enable_link_dp(struct dc_state *state,
if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
- set_default_brightness_aux(link);
- if (link->dpcd_sink_ext_caps.bits.oled == 1)
- msleep(bl_oled_enable_delay);
- edp_backlight_enable_aux(link, true);
+ if (!stream->sink_patches.oled_optimize_display_on) {
+ set_default_brightness_aux(link);
+ if (link->dpcd_sink_ext_caps.bits.oled == 1)
+ msleep(bl_oled_enable_delay);
+ edp_backlight_enable_aux(link, true);
+ } else {
+ edp_backlight_enable_aux(link, true);
+ }
}
return status;
@@ -2367,13 +2394,13 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
enum engine_id eng_id = pipe_ctx->stream_res.stream_enc->id;
unsigned short masked_chip_caps = link->chip_caps &
- EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
//Need to inform that sink is going to use legacy HDMI mode.
write_scdc_data(
link->ddc,
165000,//vbios only handles 165Mhz.
false);
- if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
+ if (masked_chip_caps == AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
/* DP159, Retimer settings */
if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings))
write_i2c_retimer_setting(pipe_ctx,
@@ -2381,7 +2408,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
else
write_i2c_default_retimer_setting(pipe_ctx,
false, false);
- } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
+ } else if (masked_chip_caps == AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
/* PI3EQX1204, Redriver settings */
write_i2c_redriver_setting(pipe_ctx, false);
}
@@ -2521,6 +2548,15 @@ void link_set_dpms_on(
if (pipe_ctx->stream->dpms_off)
return;
+ /* For Dp tunneling link, a pending HPD means that we have a race condition between processing
+ * current link and processing the pending HPD. If we enable the link now, we may end up with a
+ * link that is not actually connected to a sink. So we skip enabling the link in this case.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->is_hpd_pending) {
+ DC_LOG_DEBUG("%s, Link%d HPD is pending, not enable it.\n", __func__, link->link_index);
+ return;
+ }
+
/* Have to setup DSC before DIG FE and BE are connected (which happens before the
* link training). This is to make sure the bandwidth sent to DIG BE won't be
* bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
@@ -2586,6 +2622,9 @@ void link_set_dpms_on(
}
}
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_set_hblank_reduction_on_rx(pipe_ctx);
+
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
allocate_usb4_bandwidth(pipe_ctx->stream);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 5e1b5ab9fbc6..a7877d57a00f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -101,6 +101,7 @@ static void construct_link_service_validation(struct link_service *link_srv)
link_srv->validate_mode_timing = link_validate_mode_timing;
link_srv->dp_link_bandwidth_kbps = dp_link_bandwidth_kbps;
link_srv->validate_dpia_bandwidth = link_validate_dpia_bandwidth;
+ link_srv->dp_required_hblank_size_bytes = dp_required_hblank_size_bytes;
}
/* link dpms owns the programming sequence of stream's dpms state associated
@@ -698,7 +699,7 @@ static bool construct_phy(struct dc_link *link,
link->chip_caps);
}
- if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) {
+ if ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) {
link->bios_forced_drive_settings.VOLTAGE_SWING =
(bios->integrated_info->ext_disp_conn_info.fixdpvoltageswing & 0x3);
link->bios_forced_drive_settings.PRE_EMPHASIS =
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index 60f15a9ba7a5..29606fda029d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -409,3 +409,182 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un
return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias);
}
+
+struct dp_audio_layout_config {
+ uint8_t layouts_per_sample_denom;
+ uint8_t symbols_per_layout;
+ uint8_t max_layouts_per_audio_sdp;
+};
+
+static void get_audio_layout_config(
+ uint32_t channel_count,
+ enum dp_link_encoding encoding,
+ struct dp_audio_layout_config *output)
+{
+ memset(output, 0, sizeof(struct dp_audio_layout_config));
+
+ /* Assuming L-PCM audio. Current implementation uses max 1 layout per SDP,
+ * with each layout being the same size (8ch layout).
+ */
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (channel_count == 2) {
+ output->layouts_per_sample_denom = 4;
+ output->symbols_per_layout = 40;
+ output->max_layouts_per_audio_sdp = 1;
+ } else if (channel_count == 8 || channel_count == 6) {
+ output->layouts_per_sample_denom = 1;
+ output->symbols_per_layout = 40;
+ output->max_layouts_per_audio_sdp = 1;
+ }
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ if (channel_count == 2) {
+ output->layouts_per_sample_denom = 4;
+ output->symbols_per_layout = 10;
+ output->max_layouts_per_audio_sdp = 1;
+ } else if (channel_count == 8 || channel_count == 6) {
+ output->layouts_per_sample_denom = 1;
+ output->symbols_per_layout = 10;
+ output->max_layouts_per_audio_sdp = 1;
+ }
+ }
+}
+
+static uint32_t get_av_stream_map_lane_count(
+ enum dp_link_encoding encoding,
+ enum dc_lane_count lane_count,
+ bool is_mst)
+{
+ uint32_t av_stream_map_lane_count = 0;
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (!is_mst)
+ av_stream_map_lane_count = lane_count;
+ else
+ av_stream_map_lane_count = 4;
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ av_stream_map_lane_count = 4;
+ }
+
+ ASSERT(av_stream_map_lane_count != 0);
+
+ return av_stream_map_lane_count;
+}
+
+static uint32_t get_audio_sdp_overhead(
+ enum dp_link_encoding encoding,
+ enum dc_lane_count lane_count,
+ bool is_mst)
+{
+ uint32_t audio_sdp_overhead = 0;
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ if (is_mst)
+ audio_sdp_overhead = 16; /* 4 * 2 + 8 */
+ else
+ audio_sdp_overhead = lane_count * 2 + 8;
+ } else if (encoding == DP_128b_132b_ENCODING) {
+ audio_sdp_overhead = 10; /* 4 x 2.5 */
+ }
+
+ ASSERT(audio_sdp_overhead != 0);
+
+ return audio_sdp_overhead;
+}
+
+/* Current calculation only applicable for 8b/10b MST and 128b/132b SST/MST.
+ */
+static uint32_t calculate_overhead_hblank_bw_in_symbols(
+ uint32_t max_slice_h)
+{
+ uint32_t overhead_hblank_bw = 0; /* in stream symbols */
+
+ overhead_hblank_bw += max_slice_h * 4; /* EOC overhead */
+ overhead_hblank_bw += 12; /* Main link overhead (VBID, BS/BE) */
+
+ return overhead_hblank_bw;
+}
+
+uint32_t dp_required_hblank_size_bytes(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params)
+{
+ /* Main logic from dce_audio is duplicated here, with the main
+ * difference being:
+ * - Pre-determined lane count of 4
+ * - Assumed 16 dsc slices for worst case
+ * - Assumed SDP split disabled for worst case
+ * TODO: Unify logic from dce_audio to prevent duplicated logic.
+ */
+
+ const struct dc_crtc_timing *timing = audio_params->crtc_timing;
+ const uint32_t channel_count = audio_params->channel_count;
+ const uint32_t sample_rate_hz = audio_params->sample_rate_hz;
+ const enum dp_link_encoding link_encoding = audio_params->link_encoding;
+
+ // 8b/10b MST and 128b/132b are always 4 logical lanes.
+ const uint32_t lane_count = 4;
+ const bool is_mst = (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT);
+ // Maximum slice count is with ODM 4:1, 4 slices per DSC
+ const uint32_t max_slices_h = 16;
+
+ const uint32_t av_stream_map_lane_count = get_av_stream_map_lane_count(
+ link_encoding, lane_count, is_mst);
+ const uint32_t audio_sdp_overhead = get_audio_sdp_overhead(
+ link_encoding, lane_count, is_mst);
+ struct dp_audio_layout_config layout_config;
+
+ if (link_encoding == DP_8b_10b_ENCODING && link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT)
+ return 0;
+
+ get_audio_layout_config(
+ channel_count, link_encoding, &layout_config);
+
+ /* DP spec recommends between 1.05 to 1.1 safety margin to prevent sample under-run */
+ struct fixed31_32 audio_sdp_margin = dc_fixpt_from_fraction(110, 100);
+ struct fixed31_32 horizontal_line_freq_khz = dc_fixpt_from_fraction(
+ timing->pix_clk_100hz, (long long)timing->h_total * 10);
+ struct fixed31_32 samples_per_line;
+ struct fixed31_32 layouts_per_line;
+ struct fixed31_32 symbols_per_sdp_max_layout;
+ struct fixed31_32 remainder;
+ uint32_t num_sdp_with_max_layouts;
+ uint32_t required_symbols_per_hblank;
+ uint32_t required_bytes_per_hblank = 0;
+
+ samples_per_line = dc_fixpt_from_fraction(sample_rate_hz, 1000);
+ samples_per_line = dc_fixpt_div(samples_per_line, horizontal_line_freq_khz);
+ layouts_per_line = dc_fixpt_div_int(samples_per_line, layout_config.layouts_per_sample_denom);
+ // HBlank expansion usage assumes SDP split disabled to allow for worst case.
+ layouts_per_line = dc_fixpt_from_int(dc_fixpt_ceil(layouts_per_line));
+
+ num_sdp_with_max_layouts = dc_fixpt_floor(
+ dc_fixpt_div_int(layouts_per_line, layout_config.max_layouts_per_audio_sdp));
+ symbols_per_sdp_max_layout = dc_fixpt_from_int(
+ layout_config.max_layouts_per_audio_sdp * layout_config.symbols_per_layout);
+ symbols_per_sdp_max_layout = dc_fixpt_add_int(symbols_per_sdp_max_layout, audio_sdp_overhead);
+ symbols_per_sdp_max_layout = dc_fixpt_mul(symbols_per_sdp_max_layout, audio_sdp_margin);
+ required_symbols_per_hblank = num_sdp_with_max_layouts;
+ required_symbols_per_hblank *= ((dc_fixpt_ceil(symbols_per_sdp_max_layout) + av_stream_map_lane_count) /
+ av_stream_map_lane_count) * av_stream_map_lane_count;
+
+ if (num_sdp_with_max_layouts != dc_fixpt_ceil(
+ dc_fixpt_div_int(layouts_per_line, layout_config.max_layouts_per_audio_sdp))) {
+ remainder = dc_fixpt_sub_int(layouts_per_line,
+ num_sdp_with_max_layouts * layout_config.max_layouts_per_audio_sdp);
+ remainder = dc_fixpt_mul_int(remainder, layout_config.symbols_per_layout);
+ remainder = dc_fixpt_add_int(remainder, audio_sdp_overhead);
+ remainder = dc_fixpt_mul(remainder, audio_sdp_margin);
+ required_symbols_per_hblank += ((dc_fixpt_ceil(remainder) + av_stream_map_lane_count) /
+ av_stream_map_lane_count) * av_stream_map_lane_count;
+ }
+
+ required_symbols_per_hblank += calculate_overhead_hblank_bw_in_symbols(max_slices_h);
+
+ if (link_encoding == DP_8b_10b_ENCODING)
+ required_bytes_per_hblank = required_symbols_per_hblank; // 8 bits per 8b/10b symbol
+ else if (link_encoding == DP_128b_132b_ENCODING)
+ required_bytes_per_hblank = required_symbols_per_hblank * 4; // 32 bits per 128b/132b symbol
+
+ return required_bytes_per_hblank;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
index 595fb05946e9..bf398c49c3e8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
@@ -37,4 +37,9 @@ uint32_t dp_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_settings);
+
+uint32_t dp_required_hblank_size_bytes(
+ const struct dc_link *link,
+ struct dp_audio_bandwidth_params *audio_params);
+
#endif /* __LINK_VALIDATION_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
index d6d5bbf2108c..267180e7bc48 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
@@ -505,7 +505,7 @@ bool try_to_configure_aux_timeout(struct ddc_service *ddc,
bool result = false;
struct ddc *ddc_pin = ddc->ddc_pin;
- if ((ddc->link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((ddc->link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
!ddc->link->dc->debug.disable_fixed_vs_aux_timeout_wa &&
ddc->ctx->dce_version == DCN_VERSION_3_1) {
/* Fixed VS workaround for AUX timeout */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index d78c8ec4de79..44c3023a7731 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -51,9 +51,10 @@
#include "dc_dmub_srv.h"
#include "gpio_service_interface.h"
+#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+
#define DC_LOGGER \
link->ctx->logger
-#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
#ifndef MAX
#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
@@ -1207,6 +1208,13 @@ static void get_active_converter_info(
dp_hw_fw_revision.ieee_fw_rev,
sizeof(dp_hw_fw_revision.ieee_fw_rev));
}
+
+ core_link_read_dpcd(
+ link,
+ DP_BRANCH_VENDOR_SPECIFIC_START,
+ (uint8_t *)link->dpcd_caps.branch_vendor_specific_data,
+ sizeof(link->dpcd_caps.branch_vendor_specific_data));
+
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
union dp_dfp_cap_ext dfp_cap_ext;
@@ -1546,7 +1554,7 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
/* If this chip cap is set, at least one retimer must exist in the chain
* Override count to 1 if we receive a known bad count (0 or an invalid value) */
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
(dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
/* If you see this message consistently, either the host platform has FIXED_VS flag
* incorrectly configured or the sink device is returning an invalid count.
@@ -1624,9 +1632,6 @@ static bool retrieve_link_cap(struct dc_link *link)
sizeof(link->dpcd_caps.lttpr_caps.phy_repeater_cnt));
}
- /* Read DP tunneling information. */
- status = dpcd_get_tunneling_device_data(link);
-
dpcd_set_source_specific_data(link);
/* Sink may need to configure internals based on vendor, so allow some
* time before proceeding with possibly vendor specific transactions
@@ -1699,7 +1704,7 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data;
if (status != DC_OK)
- dm_error("%s: Read DPRX caps data failed.\n", __func__);
+ dm_error("%s: Read DPRX feature list failed.\n", __func__);
/* AdaptiveSyncCapability */
dpcd_dprx_data = 0;
@@ -1714,15 +1719,13 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.adaptive_sync_caps.dp_adap_sync_caps.raw = dpcd_dprx_data;
if (status != DC_OK)
- dm_error("%s: Read DPRX caps data failed. Addr:%#x\n",
+ dm_error("%s: Read DPRX feature list_1 failed. Addr:%#x\n",
__func__, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1);
}
-
else {
link->dpcd_caps.dprx_feature.raw = 0;
}
-
/* Error condition checking...
* It is impossible for Sink to report Max Lane Count = 0.
* It is possible for Sink to report Max Link Rate = 0, if it is
@@ -1776,6 +1779,11 @@ static bool retrieve_link_cap(struct dc_link *link)
link->test_pattern_enabled = false;
link->compliance_test_state.raw = 0;
+ link->dpcd_caps.receive_port0_cap.raw[0] =
+ dpcd_data[DP_RECEIVE_PORT_0_CAP_0 - DP_DPCD_REV];
+ link->dpcd_caps.receive_port0_cap.raw[1] =
+ dpcd_data[DP_RECEIVE_PORT_0_BUFFER_SIZE - DP_DPCD_REV];
+
/* read sink count */
core_link_read_dpcd(link,
DP_SINK_COUNT,
@@ -1842,6 +1850,9 @@ static bool retrieve_link_cap(struct dc_link *link)
DP_FEC_CAPABILITY,
&link->dpcd_caps.fec_cap.raw,
sizeof(link->dpcd_caps.fec_cap.raw));
+ if (status != DC_OK)
+ DC_LOG_ERROR("%s:%d: core_link_read_dpcd (DP_FEC_CAPABILITY) failed\n", __func__, __LINE__);
+
status = core_link_read_dpcd(
link,
DP_DSC_SUPPORT,
@@ -1864,6 +1875,9 @@ static bool retrieve_link_cap(struct dc_link *link)
DP_DSC_BRANCH_OVERALL_THROUGHPUT_0,
link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw));
+ if (status != DC_OK)
+ DC_LOG_ERROR("%s:%d: core_link_read_dpcd (DP_DSC_BRANCH_OVERALL_THROUGHPUT_0) failed\n", __func__, __LINE__);
+
DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index);
DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x",
link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0);
@@ -1900,6 +1914,7 @@ static bool retrieve_link_cap(struct dc_link *link)
if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index);
+ /* Read 128b/132b suppoerted link rates */
core_link_read_dpcd(link,
DP_128B132B_SUPPORTED_LINK_RATES,
&link->dpcd_caps.dp_128b_132b_supported_link_rates.raw,
@@ -1947,6 +1962,13 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw,
sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw));
+ /* Read DP tunneling information. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ status = dpcd_get_tunneling_device_data(link);
+ if (status != DC_OK)
+ dm_error("%s: Read DP tunneling device data failed.\n", __func__);
+ }
+
retrieve_cable_id(link);
dpcd_write_cable_id_to_dprx(link);
@@ -2055,6 +2077,14 @@ void detect_edp_sink_caps(struct dc_link *link)
core_link_read_dpcd(link, DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE,
&link->dpcd_caps.pr_info.max_deviation_line,
sizeof(link->dpcd_caps.pr_info.max_deviation_line));
+
+ /*
+ * OLED Emission Rate info
+ */
+ if (link->dpcd_sink_ext_caps.bits.emission_output)
+ core_link_read_dpcd(link, DP_SINK_EMISSION_RATE,
+ (uint8_t *)&link->dpcd_caps.edp_oled_emission_rate,
+ sizeof(link->dpcd_caps.edp_oled_emission_rate));
}
bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
@@ -2282,6 +2312,14 @@ bool dp_verify_link_cap_with_retries(
} else {
link->verified_link_cap = last_verified_link_cap;
}
+
+ /* For Dp tunneling link, a pending HPD means that we have a race condition between processing
+ * current link and processing the pending HPD. Since the training is failed, we should just brak
+ * the loop so that we have chance to process the pending HPD.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->is_hpd_pending)
+ break;
+
fsleep(10 * 1000);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
index 6af42ba9885c..0d123e647652 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -59,12 +59,18 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
dpcd_dp_tun_data,
sizeof(dpcd_dp_tun_data));
+ if (status != DC_OK)
+ goto err;
+
status = core_link_read_dpcd(
link,
DP_USB4_ROUTER_TOPOLOGY_ID,
dpcd_topology_data,
sizeof(dpcd_topology_data));
+ if (status != DC_OK)
+ goto err;
+
link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT];
link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
@@ -75,6 +81,7 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++)
link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i];
+err:
return status;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index 96bf135b6f05..a08403c022ea 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -221,20 +221,12 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
&replay_error_status.raw,
sizeof(replay_error_status.raw));
- link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR =
- replay_error_status.bits.LINK_CRC_ERROR;
- link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR =
- replay_configuration.bits.DESYNC_ERROR_STATUS;
- link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR =
- replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS;
-
- if (link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR ||
- link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR ||
- link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR) {
+ if (replay_error_status.bits.LINK_CRC_ERROR ||
+ replay_configuration.bits.DESYNC_ERROR_STATUS ||
+ replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS) {
bool allow_active;
- if (link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR)
- link->replay_settings.config.received_desync_error_hpd = 1;
+ link->replay_settings.config.replay_error_status.raw |= replay_error_status.raw;
if (link->replay_settings.config.force_disable_desync_error_check)
return;
@@ -247,6 +239,9 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
&replay_configuration.raw,
sizeof(replay_configuration.raw));
+ /* Update desync error counter */
+ link->replay_settings.replay_desync_error_fail_count++;
+
/* Acknowledge and clear error bits */
dm_helpers_dp_write_dpcd(
link->ctx,
@@ -418,7 +413,8 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
// Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ !link->dc->config.enable_dpia_pre_training)
link->skip_fallback_on_link_loss = true;
device_service_clear.bits.AUTOMATED_TEST = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
index bafa52a0165a..2c73ac87cd66 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
@@ -104,7 +104,7 @@ void dp_set_hw_lane_settings(
// Don't return here if using FIXED_VS link HWSS and encoding is 128b/132b
if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) &&
!is_immediate_downstream(link, offset) &&
- (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) ||
+ (!((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) ||
link_dp_get_encoding_format(&link_settings->link_settings) == DP_8b_10b_ENCODING))
return;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 27b881f947e8..88d4288cde0f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -272,7 +272,7 @@ void dp_wait_for_training_aux_rd_interval(
struct dc_link *link,
uint32_t wait_in_micro_secs)
{
- fsleep(wait_in_micro_secs);
+ usleep_range_state(wait_in_micro_secs, wait_in_micro_secs, TASK_UNINTERRUPTIBLE);
DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n",
__func__,
@@ -739,7 +739,7 @@ void override_training_settings(
if (overrides->ffe_preset != NULL)
lt_settings->ffe_preset = overrides->ffe_preset;
/* Override HW lane settings with BIOS forced values if present */
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if ((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING;
lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS;
@@ -1107,9 +1107,13 @@ enum dc_status dpcd_set_link_settings(
status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
&downspread.raw, sizeof(downspread));
+ if (status != DC_OK)
+ DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_DOWNSPREAD_CTRL) failed\n", __func__, __LINE__);
status = core_link_write_dpcd(link, DP_LANE_COUNT_SET,
&lane_count_set.raw, 1);
+ if (status != DC_OK)
+ DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LANE_COUNT_SET) failed\n", __func__, __LINE__);
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
lt_settings->link_settings.use_link_rate_set == true) {
@@ -1125,12 +1129,19 @@ enum dc_status dpcd_set_link_settings(
supported_link_rates, sizeof(supported_link_rates));
}
status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
+ if (status != DC_OK)
+ DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LINK_BW_SET) failed\n", __func__, __LINE__);
+
status = core_link_write_dpcd(link, DP_LINK_RATE_SET,
&lt_settings->link_settings.link_rate_set, 1);
+ if (status != DC_OK)
+ DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LINK_RATE_SET) failed\n", __func__, __LINE__);
} else {
rate = get_dpcd_link_rate(&lt_settings->link_settings);
status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
+ if (status != DC_OK)
+ DC_LOG_ERROR("%s:%d: core_link_write_dpcd (DP_LINK_BW_SET) failed\n", __func__, __LINE__);
}
if (rate) {
@@ -1563,7 +1574,7 @@ enum link_training_result dp_perform_link_training(
* Per DP specs starting from here, DPTX device shall not issue
* Non-LT AUX transactions inside training mode.
*/
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING)
+ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING)
status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
else if (encoding == DP_8b_10b_ENCODING)
status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
index b5cf75975fff..ccf8096dde29 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
@@ -412,7 +412,6 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
/* 5. check CR done*/
if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
- status = LINK_TRAINING_SUCCESS;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 3aa05a2be6c0..e0e3bb865359 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -157,31 +157,13 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
uint32_t backlight_millinits,
uint32_t transition_time_in_ms)
{
- struct dpcd_source_backlight_set dpcd_backlight_set;
- uint8_t backlight_control = isHDR ? 1 : 0;
-
if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
return false;
- // OLEDs have no PWM, they can only use AUX
- if (link->dpcd_sink_ext_caps.bits.oled == 1)
- backlight_control = 1;
-
- *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
- *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
-
-
- if (!link->dpcd_caps.panel_luminance_control) {
- if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
- (uint8_t *)(&dpcd_backlight_set),
- sizeof(dpcd_backlight_set)) != DC_OK)
- return false;
-
- if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
- &backlight_control, 1) != DC_OK)
- return false;
- } else {
+ // use internal backlight control if dmub capabilities are not present
+ if (link->backlight_control_type == BACKLIGHT_CONTROL_VESA_AUX &&
+ !link->dc->caps.dmub_caps.aux_backlight_support) {
uint8_t backlight_enable = 0;
struct target_luminance_value *target_luminance = NULL;
@@ -205,6 +187,24 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
(uint8_t *)(target_luminance),
sizeof(struct target_luminance_value)) != DC_OK)
return false;
+ } else if (link->backlight_control_type == BACKLIGHT_CONTROL_AMD_AUX) {
+ struct dpcd_source_backlight_set dpcd_backlight_set;
+ *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
+ *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
+
+ uint8_t backlight_control = isHDR ? 1 : 0;
+ // OLEDs have no PWM, they can only use AUX
+ if (link->dpcd_sink_ext_caps.bits.oled == 1)
+ backlight_control = 1;
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ (uint8_t *)(&dpcd_backlight_set),
+ sizeof(dpcd_backlight_set)) != DC_OK)
+ return false;
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+ &backlight_control, 1) != DC_OK)
+ return false;
}
return true;
@@ -519,11 +519,11 @@ static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link)
}
bool edp_set_backlight_level(const struct dc_link *link,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp)
+ struct set_backlight_level_params *backlight_level_params)
{
struct dc *dc = link->ctx->dc;
-
+ uint32_t backlight_pwm_u16_16 = backlight_level_params->backlight_pwm_u16_16;
+ uint32_t frame_ramp = backlight_level_params->frame_ramp;
DC_LOGGER_INIT(link->ctx->logger);
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
backlight_pwm_u16_16, backlight_pwm_u16_16);
@@ -544,10 +544,11 @@ bool edp_set_backlight_level(const struct dc_link *link,
return false;
}
+ backlight_level_params->frame_ramp = frame_ramp;
+
dc->hwss.set_backlight_level(
pipe_ctx,
- backlight_pwm_u16_16,
- frame_ramp);
+ backlight_level_params);
}
return true;
}
@@ -940,8 +941,7 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
struct replay_context replay_context = { 0 };
unsigned int lineTimeInNs = 0;
-
- union replay_enable_and_configuration replay_config;
+ union replay_enable_and_configuration replay_config = { 0 };
union dpcd_alpm_configuration alpm_config;
@@ -1168,9 +1168,6 @@ static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link,
link_enc_index = link->link_enc->transmitter - TRANSMITTER_UNIPHY_A;
if (link_res->hpo_dp_link_enc) {
- if (link->wa_flags.disable_assr_for_uhbr)
- return;
-
link_enc_index = link_res->hpo_dp_link_enc->inst;
use_hpo_dp_link_enc = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index 30dc8c24c008..bcfa6ac5d4e7 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -36,8 +36,7 @@ bool edp_get_backlight_level_nits(struct dc_link *link,
uint32_t *backlight_millinits_avg,
uint32_t *backlight_millinits_peak);
bool edp_set_backlight_level(const struct dc_link *link,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp);
+ struct set_backlight_level_params *backlight_level_params);
bool edp_set_backlight_level_nits(struct dc_link *link,
bool isHDR,
uint32_t backlight_millinits,
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
index fe26fde12eeb..85298b8a1b5e 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
@@ -110,6 +110,23 @@ void mpc3_disable_dwb_mux(
MPC_DWB0_MUX, 0xf);
}
+void mpc3_set_out_rate_control(
+ struct mpc *mpc,
+ int opp_id,
+ bool enable,
+ bool rate_2x_mode,
+ struct mpc_dwb_flow_control *flow_control)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ /* Always disable mpc out rate and flow control.
+ * MPC flow rate control is not needed for DCN30 and above.
+ */
+ REG_UPDATE_2(MUX[opp_id],
+ MPC_OUT_RATE_CONTROL_DISABLE, 1,
+ MPC_OUT_RATE_CONTROL, 0);
+}
+
enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id)
{
/*Contrary to DCN2 and DCN1 wherein a single status register field holds this info;
@@ -1519,6 +1536,7 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
.set_dwb_mux = mpc3_set_dwb_mux,
.disable_dwb_mux = mpc3_disable_dwb_mux,
.is_dwb_idle = mpc3_is_dwb_idle,
+ .set_out_rate_control = mpc3_set_out_rate_control,
.set_gamut_remap = mpc3_set_gamut_remap,
.program_shaper = mpc3_program_shaper,
.acquire_rmu = mpcc3_acquire_rmu,
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
index ce93003dae01..103f29900a2c 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
@@ -1085,6 +1085,13 @@ bool mpc3_is_dwb_idle(
struct mpc *mpc,
int dwb_id);
+void mpc3_set_out_rate_control(
+ struct mpc *mpc,
+ int opp_id,
+ bool enable,
+ bool rate_2x_mode,
+ struct mpc_dwb_flow_control *flow_control);
+
void mpc3_power_on_ogam_lut(
struct mpc *mpc, int mpcc_id,
bool power_on);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
index 097d06023e64..19d5ebc6763c 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
@@ -302,7 +302,6 @@ void optc1_program_timing(
/* Enable stereo - only when we need to pack 3D frame. Other types
* of stereo handled in explicit call
*/
-
if (optc->funcs->is_two_pixels_per_container(&patched_crtc_timing) || optc1->opp_count == 2)
h_div = H_TIMING_DIV_BY2;
@@ -1471,37 +1470,71 @@ bool optc1_configure_crc(struct timing_generator *optc,
if (!optc1_is_tg_enabled(optc))
return false;
- REG_WRITE(OTG_CRC_CNTL, 0);
+ if (!params->enable || params->reset)
+ REG_WRITE(OTG_CRC_CNTL, 0);
if (!params->enable)
return true;
/* Program frame boundaries */
- /* Window A x axis start and end. */
- REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
- OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
- OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
-
- /* Window A y axis start and end. */
- REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
- OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
- OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
-
- /* Window B x axis start and end. */
- REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
- OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
- OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
-
- /* Window B y axis start and end. */
- REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
- OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
- OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
-
- /* Set crc mode and selection, and enable. Only using CRC0*/
- REG_UPDATE_3(OTG_CRC_CNTL,
- OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
- OTG_CRC0_SELECT, params->selection,
- OTG_CRC_EN, 1);
+ switch (params->crc_eng_inst) {
+ case 0:
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
+ OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
+ OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
+ OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
+ OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable.*/
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC0_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+ break;
+ case 1:
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWA_X_CONTROL,
+ OTG_CRC1_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC1_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWA_Y_CONTROL,
+ OTG_CRC1_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC1_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWB_X_CONTROL,
+ OTG_CRC1_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC1_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWB_Y_CONTROL,
+ OTG_CRC1_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC1_WINDOWB_Y_END, params->windowb_y_end);
+
+ /* Set crc mode and selection, and enable.*/
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC1_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+ break;
+ default:
+ return false;
+ }
return true;
}
@@ -1510,6 +1543,7 @@ bool optc1_configure_crc(struct timing_generator *optc,
* optc1_get_crc - Capture CRC result per component
*
* @optc: timing_generator instance.
+ * @idx: index of crc engine to get CRC from
* @r_cr: 16-bit primary CRC signature for red data.
* @g_y: 16-bit primary CRC signature for green data.
* @b_cb: 16-bit primary CRC signature for blue data.
@@ -1521,7 +1555,7 @@ bool optc1_configure_crc(struct timing_generator *optc,
* If CRC is disabled, return false; otherwise, return true, and the CRC
* results in the parameters.
*/
-bool optc1_get_crc(struct timing_generator *optc,
+bool optc1_get_crc(struct timing_generator *optc, uint8_t idx,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
uint32_t field = 0;
@@ -1533,14 +1567,30 @@ bool optc1_get_crc(struct timing_generator *optc,
if (!field)
return false;
- /* OTG_CRC0_DATA_RG has the CRC16 results for the red and green component */
- REG_GET_2(OTG_CRC0_DATA_RG,
- CRC0_R_CR, r_cr,
- CRC0_G_Y, g_y);
+ switch (idx) {
+ case 0:
+ /* OTG_CRC0_DATA_RG has the CRC16 results for the red and green component */
+ REG_GET_2(OTG_CRC0_DATA_RG,
+ CRC0_R_CR, r_cr,
+ CRC0_G_Y, g_y);
- /* OTG_CRC0_DATA_B has the CRC16 results for the blue component */
- REG_GET(OTG_CRC0_DATA_B,
- CRC0_B_CB, b_cb);
+ /* OTG_CRC0_DATA_B has the CRC16 results for the blue component */
+ REG_GET(OTG_CRC0_DATA_B,
+ CRC0_B_CB, b_cb);
+ break;
+ case 1:
+ /* OTG_CRC1_DATA_RG has the CRC16 results for the red and green component */
+ REG_GET_2(OTG_CRC1_DATA_RG,
+ CRC1_R_CR, r_cr,
+ CRC1_G_Y, g_y);
+
+ /* OTG_CRC1_DATA_B has the CRC16 results for the blue component */
+ REG_GET(OTG_CRC1_DATA_B,
+ CRC1_B_CB, b_cb);
+ break;
+ default:
+ return false;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index b7a57f98553d..159172178d51 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -86,6 +86,12 @@
SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\
SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\
SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\
+ SRI(OTG_CRC1_DATA_RG, OTG, inst),\
+ SRI(OTG_CRC1_DATA_B, OTG, inst),\
+ SRI(OTG_CRC1_WINDOWA_X_CONTROL, OTG, inst),\
+ SRI(OTG_CRC1_WINDOWA_Y_CONTROL, OTG, inst),\
+ SRI(OTG_CRC1_WINDOWB_X_CONTROL, OTG, inst),\
+ SRI(OTG_CRC1_WINDOWB_Y_CONTROL, OTG, inst),\
SR(GSL_SOURCE_SELECT),\
SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst)
@@ -202,6 +208,7 @@ struct dcn_optc_registers {
uint32_t OPTC_CLOCK_CONTROL;
uint32_t OPTC_WIDTH_CONTROL2;
uint32_t OTG_PSTATE_REGISTER;
+ uint32_t OTG_PIPE_UPDATE_STATUS;
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -314,6 +321,7 @@ struct dcn_optc_registers {
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\
SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\
SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC1_SELECT, mask_sh),\
SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\
SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\
SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\
@@ -326,6 +334,17 @@ struct dcn_optc_registers {
SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\
SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\
+ SF(OTG0_OTG_CRC1_DATA_RG, CRC1_R_CR, mask_sh),\
+ SF(OTG0_OTG_CRC1_DATA_RG, CRC1_G_Y, mask_sh),\
+ SF(OTG0_OTG_CRC1_DATA_B, CRC1_B_CB, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWA_X_CONTROL, OTG_CRC1_WINDOWA_X_START, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWA_X_CONTROL, OTG_CRC1_WINDOWA_X_END, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWA_Y_CONTROL, OTG_CRC1_WINDOWA_Y_START, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWA_Y_CONTROL, OTG_CRC1_WINDOWA_Y_END, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL, OTG_CRC1_WINDOWB_X_START, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL, OTG_CRC1_WINDOWB_X_END, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL, OTG_CRC1_WINDOWB_Y_START, mask_sh),\
+ SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL, OTG_CRC1_WINDOWB_Y_END, mask_sh),\
SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\
SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\
SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\
@@ -481,6 +500,7 @@ struct dcn_optc_registers {
type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;\
type OTG_CRC_CONT_EN;\
type OTG_CRC0_SELECT;\
+ type OTG_CRC1_SELECT;\
type OTG_CRC_EN;\
type CRC0_R_CR;\
type CRC0_G_Y;\
@@ -566,6 +586,12 @@ struct dcn_optc_registers {
type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING;\
type OPTC_DOUBLE_BUFFER_PENDING;\
+#define TG_REG_FIELD_LIST_DCN2_0(type) \
+ type OTG_FLIP_PENDING;\
+ type OTG_DC_REG_UPDATE_PENDING;\
+ type OTG_CURSOR_UPDATE_PENDING;\
+ type OTG_VUPDATE_KEEPOUT_STATUS;\
+
#define TG_REG_FIELD_LIST_DCN3_2(type) \
type OTG_H_TIMING_DIV_MODE_MANUAL;
@@ -600,6 +626,7 @@ struct dcn_optc_registers {
struct dcn_optc_shift {
TG_REG_FIELD_LIST(uint8_t)
+ TG_REG_FIELD_LIST_DCN2_0(uint8_t)
TG_REG_FIELD_LIST_DCN3_2(uint8_t)
TG_REG_FIELD_LIST_DCN3_5(uint8_t)
TG_REG_FIELD_LIST_DCN401(uint8_t)
@@ -607,6 +634,7 @@ struct dcn_optc_shift {
struct dcn_optc_mask {
TG_REG_FIELD_LIST(uint32_t)
+ TG_REG_FIELD_LIST_DCN2_0(uint32_t)
TG_REG_FIELD_LIST_DCN3_2(uint32_t)
TG_REG_FIELD_LIST_DCN3_5(uint32_t)
TG_REG_FIELD_LIST_DCN401(uint32_t)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h
index 364034b19028..928e110b95fb 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h
@@ -43,7 +43,8 @@
SRI(OPTC_MEMORY_CONFIG, ODM, inst),\
SR(DWB_SOURCE_SELECT),\
SRI(OTG_MANUAL_FLOW_CONTROL, OTG, inst), \
- SRI(OTG_DRR_CONTROL, OTG, inst)
+ SRI(OTG_DRR_CONTROL, OTG, inst),\
+ SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst)
#define TG_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\
TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\
@@ -53,6 +54,10 @@
SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\
SF(OTG0_OTG_GLOBAL_CONTROL2, DIG_UPDATE_LOCATION, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\
SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\
SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \
SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
index abcd03d78668..4c95c0958612 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
@@ -271,6 +271,48 @@ void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
optc1->opp_count = opp_cnt;
}
+/* OTG status register that indicates OPTC update is pending */
+bool optc3_get_optc_double_buffer_pending(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t update_pending = 0;
+
+ REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
+ OPTC_DOUBLE_BUFFER_PENDING,
+ &update_pending);
+
+ return (update_pending == 1);
+}
+
+/* OTG status register that indicates OTG update is pending */
+bool optc3_get_otg_update_pending(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t update_pending = 0;
+
+ REG_GET(OTG_DOUBLE_BUFFER_CONTROL,
+ OTG_UPDATE_PENDING,
+ &update_pending);
+
+ return (update_pending == 1);
+}
+
+/* OTG status register that indicates surface update is pending */
+bool optc3_get_pipe_update_pending(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t flip_pending = 0;
+ uint32_t dc_update_pending = 0;
+
+ REG_GET_2(OTG_PIPE_UPDATE_STATUS,
+ OTG_FLIP_PENDING,
+ &flip_pending,
+ OTG_DC_REG_UPDATE_PENDING,
+ &dc_update_pending);
+
+ return (flip_pending == 1 || dc_update_pending == 1);
+}
+
/**
* optc3_set_timing_double_buffer() - DRR double buffering control
*
@@ -375,6 +417,9 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.get_hw_timing = optc1_get_hw_timing,
.wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
+ .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending,
+ .get_otg_double_buffer_pending = optc3_get_otg_update_pending,
+ .get_pipe_update_pending = optc3_get_pipe_update_pending,
};
void dcn30_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h
index bda974d432ea..e2303f9eaf13 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h
@@ -109,7 +109,8 @@
SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\
SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
SRI(OPTC_MEMORY_CONFIG, ODM, inst),\
- SR(DWB_SOURCE_SELECT)
+ SR(DWB_SOURCE_SELECT),\
+ SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst)
#define DCN30_VTOTAL_REGS_SF(mask_sh)
@@ -209,6 +210,7 @@
SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\
+ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_DOUBLE_BUFFER_PENDING, mask_sh),\
SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\
SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\
SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\
@@ -319,7 +321,11 @@
SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\
- SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh)
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\
void dcn30_timing_generator_init(struct optc *optc1);
@@ -356,4 +362,7 @@ void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc);
void optc3_tg_init(struct timing_generator *optc);
void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max);
+bool optc3_get_optc_double_buffer_pending(struct timing_generator *optc);
+bool optc3_get_otg_update_pending(struct timing_generator *optc);
+bool optc3_get_pipe_update_pending(struct timing_generator *optc);
#endif /* __DC_OPTC_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
index 1a22ae89fb55..d7a45ef2d01b 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
@@ -169,6 +169,9 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.get_hw_timing = optc1_get_hw_timing,
.wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
+ .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending,
+ .get_otg_double_buffer_pending = optc3_get_otg_update_pending,
+ .get_pipe_update_pending = optc3_get_pipe_update_pending,
};
void dcn301_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
index 30b81a448ce2..fbbe86d00c2e 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
@@ -99,7 +99,8 @@
SRI(OPTC_MEMORY_CONFIG, ODM, inst),\
SRI(OTG_CRC_CNTL2, OTG, inst),\
SR(DWB_SOURCE_SELECT),\
- SRI(OTG_DRR_CONTROL, OTG, inst)
+ SRI(OTG_DRR_CONTROL, OTG, inst),\
+ SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst)
#define OPTC_COMMON_MASK_SH_LIST_DCN3_1(mask_sh)\
SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\
@@ -254,7 +255,11 @@
SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\
SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\
SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\
- SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh)
+ SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\
void dcn31_timing_generator_init(struct optc *optc1);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h
index 99c098e76116..0ff72b97b465 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h
@@ -98,7 +98,8 @@
SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\
SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
SRI(OPTC_MEMORY_CONFIG, ODM, inst),\
- SRI(OTG_DRR_CONTROL, OTG, inst)
+ SRI(OTG_DRR_CONTROL, OTG, inst),\
+ SRI(OTG_PIPE_UPDATE_STATUS, OTG, inst)
#define OPTC_COMMON_MASK_SH_LIST_DCN3_14(mask_sh)\
SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\
@@ -248,7 +249,11 @@
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\
- SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh)
+ SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh),\
void dcn314_timing_generator_init(struct optc *optc1);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index 00094f0e8470..c217f653b3c8 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -297,18 +297,6 @@ static void optc32_set_drr(
optc32_setup_manual_trigger(optc);
}
-bool optc32_get_double_buffer_pending(struct timing_generator *optc)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t update_pending = 0;
-
- REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
- OPTC_DOUBLE_BUFFER_PENDING,
- &update_pending);
-
- return (update_pending == 1);
-}
-
static struct timing_generator_funcs dcn32_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -373,7 +361,9 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
- .get_double_buffer_pending = optc32_get_double_buffer_pending,
+ .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending,
+ .get_otg_double_buffer_pending = optc3_get_otg_update_pending,
+ .get_pipe_update_pending = optc3_get_pipe_update_pending,
};
void dcn32_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
index 665d7c52f67c..0b0964a9da74 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
@@ -177,7 +177,11 @@
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\
- SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh)
+ SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh)
void dcn32_timing_generator_init(struct optc *optc1);
void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode);
@@ -185,6 +189,5 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi
void optc32_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg);
-bool optc32_get_double_buffer_pending(struct timing_generator *optc);
#endif /* __DC_OPTC_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index dfa9364fe5a6..d21e82b927d0 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -183,34 +183,87 @@ static bool optc35_configure_crc(struct timing_generator *optc,
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ /* Cannot configure crc on a CRTC that is disabled */
if (!optc1_is_tg_enabled(optc))
return false;
- REG_WRITE(OTG_CRC_CNTL, 0);
+
+ if (!params->enable || params->reset)
+ REG_WRITE(OTG_CRC_CNTL, 0);
+
if (!params->enable)
return true;
- REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
- OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
- OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
- REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
- OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
- OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
- REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
- OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
- OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
- REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
- OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
- OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
- if (optc1->base.ctx->dc->debug.otg_crc_db && optc1->tg_mask->OTG_CRC_WINDOW_DB_EN != 0) {
- REG_UPDATE_4(OTG_CRC_CNTL,
- OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
- OTG_CRC0_SELECT, params->selection,
- OTG_CRC_EN, 1,
- OTG_CRC_WINDOW_DB_EN, 1);
- } else
- REG_UPDATE_3(OTG_CRC_CNTL,
- OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
- OTG_CRC0_SELECT, params->selection,
- OTG_CRC_EN, 1);
+
+ /* Program frame boundaries */
+ switch (params->crc_eng_inst) {
+ case 0:
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
+ OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
+ OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
+ OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
+ OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
+
+ if (optc1->base.ctx->dc->debug.otg_crc_db && optc1->tg_mask->OTG_CRC_WINDOW_DB_EN != 0)
+ REG_UPDATE_4(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC0_SELECT, params->selection,
+ OTG_CRC_EN, 1,
+ OTG_CRC_WINDOW_DB_EN, 1);
+ else
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC0_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+ break;
+ case 1:
+ /* Window A x axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWA_X_CONTROL,
+ OTG_CRC1_WINDOWA_X_START, params->windowa_x_start,
+ OTG_CRC1_WINDOWA_X_END, params->windowa_x_end);
+
+ /* Window A y axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWA_Y_CONTROL,
+ OTG_CRC1_WINDOWA_Y_START, params->windowa_y_start,
+ OTG_CRC1_WINDOWA_Y_END, params->windowa_y_end);
+
+ /* Window B x axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWB_X_CONTROL,
+ OTG_CRC1_WINDOWB_X_START, params->windowb_x_start,
+ OTG_CRC1_WINDOWB_X_END, params->windowb_x_end);
+
+ /* Window B y axis start and end. */
+ REG_UPDATE_2(OTG_CRC1_WINDOWB_Y_CONTROL,
+ OTG_CRC1_WINDOWB_Y_START, params->windowb_y_start,
+ OTG_CRC1_WINDOWB_Y_END, params->windowb_y_end);
+
+ if (optc1->base.ctx->dc->debug.otg_crc_db && optc1->tg_mask->OTG_CRC_WINDOW_DB_EN != 0)
+ REG_UPDATE_4(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC1_SELECT, params->selection,
+ OTG_CRC_EN, 1,
+ OTG_CRC_WINDOW_DB_EN, 1);
+ else
+ REG_UPDATE_3(OTG_CRC_CNTL,
+ OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+ OTG_CRC1_SELECT, params->selection,
+ OTG_CRC_EN, 1);
+ break;
+ default:
+ return false;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
index d077e2392379..be749ab41dce 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
@@ -67,7 +67,11 @@
SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_END_READBACK, mask_sh),\
SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh),\
SF(OTG0_OTG_V_COUNT_STOP_CONTROL, OTG_V_COUNT_STOP, mask_sh),\
- SF(OTG0_OTG_V_COUNT_STOP_CONTROL2, OTG_V_COUNT_STOP_TIMER, mask_sh)
+ SF(OTG0_OTG_V_COUNT_STOP_CONTROL2, OTG_V_COUNT_STOP_TIMER, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh)
void dcn35_timing_generator_init(struct optc *optc1);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
index a5d6a7dca554..338a0cad23a5 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
@@ -315,7 +315,7 @@ void optc401_set_drr(
struct drr_params amended_params = { 0 };
bool program_manual_trigger = false;
- if (dc->caps.dmub_caps.fams_ver >= 2 && dc->debug.fams2_config.bits.enable) {
+ if (dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver && dc->debug.fams2_config.bits.enable) {
if (params != NULL &&
params->vertical_total_max > 0 &&
params->vertical_total_min > 0) {
@@ -380,7 +380,7 @@ void optc401_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, i
{
struct dc *dc = optc->ctx->dc;
- if (dc->caps.dmub_caps.fams_ver >= 2 && dc->debug.fams2_config.bits.enable) {
+ if (dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver && dc->debug.fams2_config.bits.enable) {
/* FAMS2 */
dc_dmub_srv_fams2_drr_update(dc, optc->inst,
vtotal_min,
@@ -430,6 +430,35 @@ static void optc401_program_global_sync(
REG_UPDATE(OTG_PSTATE_REGISTER, OTG_PSTATE_KEEPOUT_START, pstate_keepout);
}
+static void optc401_set_vupdate_keepout(struct timing_generator *tg, bool enable)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(tg);
+
+ REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, optc1->vready_offset + 10,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, enable);
+
+ return;
+}
+
+static bool optc401_wait_update_lock_status(struct timing_generator *tg, bool locked)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(tg);
+ uint32_t lock_status = 0;
+
+ REG_WAIT(OTG_MASTER_UPDATE_LOCK,
+ UPDATE_LOCK_STATUS, locked,
+ 1, 150000);
+
+ REG_GET(OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, &lock_status);
+
+ if (lock_status != locked)
+ return false;
+
+ return true;
+}
+
static struct timing_generator_funcs dcn401_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -493,7 +522,11 @@ static struct timing_generator_funcs dcn401_tg_funcs = {
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
- .get_double_buffer_pending = optc32_get_double_buffer_pending,
+ .get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending,
+ .get_otg_double_buffer_pending = optc3_get_otg_update_pending,
+ .get_pipe_update_pending = optc3_get_pipe_update_pending,
+ .set_vupdate_keepout = optc401_set_vupdate_keepout,
+ .wait_update_lock_status = optc401_wait_update_lock_status,
};
void dcn401_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
index bb13a645802d..1be89571986f 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h
@@ -159,7 +159,11 @@
SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_KEEPOUT_START, mask_sh),\
SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_EXTEND, mask_sh),\
SF(OTG0_OTG_PSTATE_REGISTER, OTG_UNBLANK, mask_sh),\
- SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_ALLOW_WIDTH_MIN, mask_sh)
+ SF(OTG0_OTG_PSTATE_REGISTER, OTG_PSTATE_ALLOW_WIDTH_MIN, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_FLIP_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_DC_REG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_CURSOR_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_PIPE_UPDATE_STATUS, OTG_VUPDATE_KEEPOUT_STATUS, mask_sh)
void dcn401_timing_generator_init(struct optc *optc1);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
index 53a5f4cb648c..e698543ec937 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
@@ -623,7 +623,7 @@ static struct link_encoder *dce100_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110)
+ if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
index 91da5cf85b69..035c6cfdaee5 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
@@ -668,7 +668,7 @@ static struct link_encoder *dce110_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110)
+ if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index 162856c523e4..480a50967385 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -629,7 +629,7 @@ static struct link_encoder *dce112_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110)
+ if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
index 621825a51f46..c63c59623433 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
@@ -706,7 +706,7 @@ static struct link_encoder *dce120_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110)
+ if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index a73d3c6ef425..3d5113f010bb 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -723,7 +723,7 @@ static struct link_encoder *dce80_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110)
+ if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index 563c5eec83ff..e92f14d50adb 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -533,7 +533,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = true,
.disable_dmcu = false,
.force_abm_enable = false,
- .timing_trace = false,
.clock_trace = true,
/* raven smu dones't allow 0 disp clk,
@@ -560,18 +559,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.using_dml2 = false,
};
-static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = false,
- .force_abm_enable = false,
- .timing_trace = true,
- .clock_trace = true,
- .disable_stutter = true,
- .disable_pplib_clock_request = true,
- .disable_pplib_wm_range = true,
- .underflow_assert_delay_us = 0xFFFFFFFF,
- .enable_legacy_fast_update = true,
-};
-
static void dcn10_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN10_DPP(*dpp));
@@ -751,7 +738,7 @@ static struct link_encoder *dcn10_link_encoder_create(
kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc10)
+ if (!enc10 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
@@ -1271,6 +1258,11 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
return NULL;
}
+unsigned int dcn10_get_vstartup_for_pipe(struct pipe_ctx *pipe_ctx)
+{
+ return pipe_ctx->pipe_dlg_param.vstartup_start;
+}
+
static const struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn10_get_dcc_compression_cap
};
@@ -1285,7 +1277,8 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
.validate_global = dcn10_validate_global,
.add_stream_to_ctx = dcn10_add_stream_to_ctx,
.patch_unknown_plane_state = dcn10_patch_unknown_plane_state,
- .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1400,8 +1393,6 @@ static bool dcn10_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else
- dc->debug = debug_defaults_diags;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h
index bf8e33cd8147..7bc1be53e800 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h
@@ -51,6 +51,7 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
const struct resource_pool *pool,
struct dc_stream_state *stream);
+unsigned int dcn10_get_vstartup_for_pipe(struct pipe_ctx *pipe_ctx);
#endif /* __DC_RESOURCE_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index eea2b3b307cd..5c6dc710e96c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -706,7 +706,6 @@ static const struct resource_caps res_cap_nv14 = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = false,
.force_abm_enable = false,
- .timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
@@ -920,7 +919,7 @@ struct link_encoder *dcn20_link_encoder_create(
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
@@ -1510,41 +1509,12 @@ bool dcn20_split_stream_for_odm(
next_odm_pipe->prev_odm_pipe = prev_odm_pipe;
if (prev_odm_pipe->plane_state) {
- struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data;
- int new_width;
-
- /* HACTIVE halved for odm combine */
- sd->h_active /= 2;
- /* Calculate new vp and recout for left pipe */
- /* Need at least 16 pixels width per side */
- if (sd->recout.x + 16 >= sd->h_active)
- return false;
- new_width = sd->h_active - sd->recout.x;
- sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz, sd->recout.width - new_width));
- sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz_c, sd->recout.width - new_width));
- sd->recout.width = new_width;
-
- /* Calculate new vp and recout for right pipe */
- sd = &next_odm_pipe->plane_res.scl_data;
- /* HACTIVE halved for odm combine */
- sd->h_active /= 2;
- /* Need at least 16 pixels width per side */
- if (new_width <= 16)
- return false;
- new_width = sd->recout.width + sd->recout.x - sd->h_active;
- sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz, sd->recout.width - new_width));
- sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz_c, sd->recout.width - new_width));
- sd->recout.width = new_width;
- sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz, sd->h_active - sd->recout.x));
- sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
- sd->ratios.horz_c, sd->h_active - sd->recout.x));
- sd->recout.x = 0;
+ if (!resource_build_scaling_params(prev_odm_pipe) ||
+ !resource_build_scaling_params(next_odm_pipe)) {
+ return false;
+ }
}
+
if (!next_odm_pipe->top_pipe)
next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
else
@@ -2133,6 +2103,7 @@ bool dcn20_fast_validate_bw(
ASSERT(0);
}
}
+
/* Actual dsc count per stream dsc validation*/
if (!dcn20_validate_dsc(dc, context)) {
context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
@@ -2258,7 +2229,8 @@ static const struct resource_funcs dcn20_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
- .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index fc54483b9104..43fa2cb117f3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -59,8 +59,8 @@
#include "cyan_skillfish_ip_offset.h"
-#include "dcn/dcn_2_0_3_offset.h"
-#include "dcn/dcn_2_0_3_sh_mask.h"
+#include "dcn/dcn_2_0_1_offset.h"
+#include "dcn/dcn_2_0_1_sh_mask.h"
#include "dpcs/dpcs_2_0_3_offset.h"
#include "dpcs/dpcs_2_0_3_sh_mask.h"
@@ -600,7 +600,6 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = true,
.force_abm_enable = false,
- .timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
@@ -797,7 +796,7 @@ static struct link_encoder *dcn201_link_encoder_create(
kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC);
struct dcn10_link_encoder *enc10;
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
enc10 = &enc20->enc10;
@@ -1080,7 +1079,8 @@ static struct resource_funcs dcn201_res_pool_funcs = {
.populate_dml_writeback_from_context = dcn201_populate_dml_writeback_from_context,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
- .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn201_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 347e6aaea582..2615c36d5ffe 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -610,7 +610,6 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = false,
.force_abm_enable = false,
- .timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
.min_disp_clk_khz = 100000,
@@ -1298,7 +1297,7 @@ static struct link_encoder *dcn21_link_encoder_create(
kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc21)
+ if (!enc21 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
@@ -1379,6 +1378,7 @@ static const struct resource_funcs dcn21_res_pool_funcs = {
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.update_bw_bounding_box = dcn21_update_bw_bounding_box,
.get_panel_config_defaults = dcn21_get_panel_config_defaults,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn21_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index 5040a4c6ed18..13202ce30d66 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -711,7 +711,6 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = true, //No DMCU on DCN30
.force_abm_enable = false,
- .timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
@@ -927,7 +926,7 @@ static struct link_encoder *dcn30_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
dcn30_link_encoder_construct(enc20,
@@ -2251,6 +2250,7 @@ static const struct resource_funcs dcn30_res_pool_funcs = {
.update_bw_bounding_box = dcn30_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn30_get_panel_config_defaults,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
#define CTX ctx
@@ -2354,6 +2354,7 @@ static bool dcn30_resource_construct(
dc->caps.dp_hdmi21_pcon_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
+ dc->caps.vtotal_limited_by_fp2 = true;
/* read VBIOS LTTPR caps */
{
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
index 7d04739c3ba1..121a86a59833 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
@@ -671,9 +671,9 @@ static const struct dc_plane_cap plane_cap = {
/* 6:1 downscaling ratio: 1000/6 = 166.666 */
.max_downscale_factor = {
- .argb8888 = 167,
- .nv12 = 167,
- .fp16 = 167
+ .argb8888 = 358,
+ .nv12 = 358,
+ .fp16 = 358
},
64,
64
@@ -682,7 +682,6 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = true,
.force_abm_enable = false,
- .timing_trace = false,
.clock_trace = true,
.disable_dpp_power_gate = false,
.disable_hubp_power_gate = false,
@@ -694,7 +693,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
- .max_downscale_src_width = 7680,/*upto 8K*/
+ .max_downscale_src_width = 4096,/*upto true 4k*/
.scl_reset_length10 = true,
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
@@ -883,7 +882,7 @@ static struct link_encoder *dcn301_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
dcn301_link_encoder_construct(enc20,
@@ -1401,7 +1400,8 @@ static struct resource_funcs dcn301_res_pool_funcs = {
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn301_update_bw_bounding_box,
- .patch_unknown_plane_state = dcn20_patch_unknown_plane_state
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn301_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
index 5791b5cc2875..012c5fd52cb1 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
@@ -81,7 +81,6 @@
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = true,
.force_abm_enable = false,
- .timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
@@ -893,7 +892,7 @@ static struct link_encoder *dcn302_link_encoder_create(
{
struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
dcn30_link_encoder_construct(enc20, enc_init_data, &link_enc_feature,
@@ -1152,6 +1151,7 @@ static struct resource_funcs dcn302_res_pool_funcs = {
.update_bw_bounding_box = dcn302_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn302_get_panel_config_defaults,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static struct dc_cap_funcs cap_funcs = {
@@ -1234,6 +1234,7 @@ static bool dcn302_resource_construct(
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
+ dc->caps.vtotal_limited_by_fp2 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index 63f0f882c861..a8d0b4686f9a 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -82,7 +82,6 @@
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = true,
.force_abm_enable = false,
- .timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_AVOID,
@@ -839,7 +838,7 @@ static struct link_encoder *dcn303_link_encoder_create(
{
struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
dcn30_link_encoder_construct(enc20, enc_init_data, &link_enc_feature,
@@ -1097,6 +1096,7 @@ static struct resource_funcs dcn303_res_pool_funcs = {
.update_bw_bounding_box = dcn303_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn303_get_panel_config_defaults,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static struct dc_cap_funcs cap_funcs = {
@@ -1179,6 +1179,7 @@ static bool dcn303_resource_construct(
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
+ dc->caps.vtotal_limited_by_fp2 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index ac8cb20e2e3b..911bd60d4fbc 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -858,7 +858,6 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = true,
.force_abm_enable = false,
- .timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = false,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
@@ -869,7 +868,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.max_downscale_src_width = 4096,/*upto true 4K*/
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
- .sanity_checks = true,
+ .sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
@@ -1093,7 +1092,7 @@ static struct link_encoder *dcn31_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
dcn31_link_encoder_construct(enc20,
@@ -1721,6 +1720,12 @@ int dcn31_populate_dml_pipes_from_context(
return pipe_cnt;
}
+unsigned int dcn31_get_det_buffer_size(
+ const struct dc_state *context)
+{
+ return context->bw_ctx.dml.ip.det_buffer_size_kbytes;
+}
+
void dcn31_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -1843,6 +1848,8 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.update_bw_bounding_box = dcn31_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn31_get_panel_config_defaults,
+ .get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
index 901436591ed4..551ad912f7be 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
@@ -63,6 +63,9 @@ struct resource_pool *dcn31_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc);
+unsigned int dcn31_get_det_buffer_size(
+ const struct dc_state *context);
+
/*temp: B0 specific before switch to dcn313 headers*/
#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index 169924d0a839..e3ba105034f8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -876,7 +876,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.replay_skip_crtc_disabled = true,
.disable_dmcu = true,
.force_abm_enable = false,
- .timing_trace = false,
.clock_trace = true,
.disable_dpp_power_gate = false,
.disable_hubp_power_gate = false,
@@ -889,7 +888,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.max_downscale_src_width = 4096,/*upto true 4k*/
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
- .sanity_checks = true,
+ .sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
@@ -1149,7 +1148,7 @@ static struct link_encoder *dcn31_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
dcn31_link_encoder_construct(enc20,
@@ -1778,6 +1777,8 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn314_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia,
+ .get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 3f4b9dba4112..14acef036b5a 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -858,7 +858,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = true, /*hw not support it*/
.disable_dmcu = true,
.force_abm_enable = false,
- .timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = false,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
@@ -1091,7 +1090,7 @@ static struct link_encoder *dcn31_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
dcn31_link_encoder_construct(enc20,
@@ -1812,6 +1811,11 @@ static void dcn315_get_panel_config_defaults(struct dc_panel_config *panel_confi
*panel_config = panel_config_defaults;
}
+static int dcn315_get_power_profile(const struct dc_state *context)
+{
+ return !context->bw_ctx.bw.dcn.clk.p_state_change_support;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1840,6 +1844,9 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.update_bw_bounding_box = dcn315_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn315_get_panel_config_defaults,
+ .get_power_profile = dcn315_get_power_profile,
+ .get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn315_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
index 5fd52c5fcee4..568094827212 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
@@ -853,7 +853,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = true, /*hw not support it*/
.disable_dmcu = true,
.force_abm_enable = false,
- .timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = false,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
@@ -1085,7 +1084,7 @@ static struct link_encoder *dcn31_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
dcn31_link_encoder_construct(enc20,
@@ -1720,6 +1719,8 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.update_bw_bounding_box = dcn316_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn316_get_panel_config_defaults,
+ .get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn316_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index a124ad9bd108..664302876019 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -689,7 +689,6 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = true,
.force_abm_enable = false,
- .timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = false,
.pipe_split_policy = MPC_SPLIT_AVOID, // Due to CRB, no need to MPC split anymore
@@ -1039,7 +1038,7 @@ static struct link_encoder *dcn32_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
#undef REG_STRUCT
@@ -1990,6 +1989,10 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned
return 0;
}
+ if (dc->caps.max_cab_allocation_bytes == 0) {
+ return 0xffffffff;
+ }
+
/* add 2 lines for worst case alignment */
cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2;
@@ -2063,6 +2066,7 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -2186,6 +2190,7 @@ static bool dcn32_resource_construct(
dc->caps.dmcub_support = true;
dc->caps.seamless_odm = true;
dc->caps.max_v_total = (1 << 15) - 1;
+ dc->caps.vtotal_limited_by_fp2 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
@@ -2800,6 +2805,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_opp_head(
free_pipe->plane_res.xfm = pool->transforms[free_pipe_idx];
free_pipe->plane_res.dpp = pool->dpps[free_pipe_idx];
free_pipe->plane_res.mpcc_inst = pool->dpps[free_pipe_idx]->inst;
+ free_pipe->hblank_borrow = otg_master->hblank_borrow;
if (free_pipe->stream->timing.flags.DSC == 1) {
dcn20_acquire_dsc(free_pipe->stream->ctx->dc,
&new_ctx->res_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 7901792afb7b..86c6e5e8c42e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -1054,7 +1054,8 @@ unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned
SRI_ARR(OPTC_BYTES_PER_PIXEL, ODM, inst), \
SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \
SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \
- SRI_ARR(OTG_DRR_CONTROL, OTG, inst)
+ SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_PIPE_UPDATE_STATUS, OTG, inst)
/* HUBP */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 827a94f84f10..38d76434683e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -686,7 +686,6 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = true,
.force_abm_enable = false,
- .timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = false,
.pipe_split_policy = MPC_SPLIT_AVOID,
@@ -1035,7 +1034,7 @@ static struct link_encoder *dcn321_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
#undef REG_STRUCT
@@ -1625,6 +1624,7 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.add_phantom_pipes = dcn32_add_phantom_pipes,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1743,6 +1743,7 @@ static bool dcn321_resource_construct(
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
+ dc->caps.vtotal_limited_by_fp2 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 893a9d9ee870..8ee3d99ea2aa 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -712,7 +712,6 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = true,
.force_abm_enable = false,
- .timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = false,
.pipe_split_policy = MPC_SPLIT_AVOID,
@@ -1074,7 +1073,7 @@ static struct link_encoder *dcn35_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
#undef REG_STRUCT
@@ -1753,6 +1752,13 @@ static bool dcn35_validate_bandwidth(struct dc *dc,
return out;
}
+enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_state)
+{
+ plane_state->tiling_info.gfxversion = DcGfxVersion9;
+ dcn20_patch_unknown_plane_state(plane_state);
+ return DC_OK;
+}
+
static struct resource_funcs dcn35_res_pool_funcs = {
.destroy = dcn35_destroy_resource_pool,
@@ -1776,9 +1782,11 @@ static struct resource_funcs dcn35_res_pool_funcs = {
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn35_update_bw_bounding_box_fpu,
- .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .patch_unknown_plane_state = dcn35_patch_unknown_plane_state,
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia,
+ .get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn35_resource_construct(
@@ -1850,6 +1858,7 @@ static bool dcn35_resource_construct(
dc->caps.zstate_support = true;
dc->caps.ips_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
+ dc->caps.vtotal_limited_by_fp2 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
index f97bb4cb3761..9d03a55d90cf 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
@@ -35,6 +35,7 @@
extern struct _vcs_dpi_ip_params_st dcn3_5_ip;
extern struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc;
+enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_state);
struct dcn35_resource_pool {
struct resource_pool base;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 70abd32ce2ad..14f7c3acdc96 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -692,7 +692,6 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = true,
.force_abm_enable = false,
- .timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = false,
.pipe_split_policy = MPC_SPLIT_AVOID,
@@ -1054,7 +1053,7 @@ static struct link_encoder *dcn35_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
#undef REG_STRUCT
@@ -1755,9 +1754,11 @@ static struct resource_funcs dcn351_res_pool_funcs = {
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn351_update_bw_bounding_box_fpu,
- .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .patch_unknown_plane_state = dcn35_patch_unknown_plane_state,
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia,
+ .get_det_buffer_size = dcn31_get_det_buffer_size,
+ .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe
};
static bool dcn351_resource_construct(
@@ -1829,6 +1830,7 @@ static bool dcn351_resource_construct(
dc->caps.zstate_support = true;
dc->caps.ips_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
+ dc->caps.vtotal_limited_by_fp2 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 9d56fbdcd06a..c1ebc6b1c937 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -685,7 +685,6 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_dmcu = true,
.force_abm_enable = false,
- .timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = false,
.pipe_split_policy = MPC_SPLIT_AVOID,
@@ -727,6 +726,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_unbounded_requesting = false,
.enable_legacy_fast_update = false,
.dcc_meta_propagation_delay_us = 10,
+ .fams_version = {
+ .minor = 1,
+ .major = 2,
+ }, //v2.1
.fams2_config = {
.bits = {
.enable = true,
@@ -734,7 +737,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.enable_stall_recovery = true,
}
},
- .force_cositing = CHROMA_COSITING_TOPLEFT + 1,
+ .force_cositing = CHROMA_COSITING_NONE + 1,
};
static struct dce_aux *dcn401_aux_engine_create(
@@ -1032,7 +1035,7 @@ static struct link_encoder *dcn401_link_encoder_create(
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
- if (!enc20)
+ if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
#undef REG_STRUCT
@@ -1294,6 +1297,29 @@ static struct hpo_dp_link_encoder *dcn401_hpo_dp_link_encoder_create(
return &hpo_dp_enc31->base;
}
+static unsigned int dcn401_calc_num_avail_chans_for_mall(struct dc *dc, unsigned int num_chans)
+{
+ unsigned int num_available_chans = 1;
+
+ /* channels for MALL must be a power of 2 */
+ while (num_chans > 1) {
+ num_available_chans = (num_available_chans << 1);
+ num_chans = (num_chans >> 1);
+ }
+
+ /* cannot be odd */
+ num_available_chans &= ~1;
+
+ /* clamp to max available channels for MALL per ASIC */
+ if (ASICREV_IS_GC_12_0_0_A0(dc->ctx->asic_id.hw_internal_rev)) {
+ num_available_chans = num_available_chans > 16 ? 16 : num_available_chans;
+ } else if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev)) {
+ num_available_chans = num_available_chans > 8 ? 8 : num_available_chans;
+ }
+
+ return num_available_chans;
+}
+
static struct dce_hwseq *dcn401_hwseq_create(
struct dc_context *ctx)
{
@@ -1579,7 +1605,8 @@ static void dcn401_destroy_resource_pool(struct resource_pool **pool)
}
static struct dc_cap_funcs cap_funcs = {
- .get_dcc_compression_cap = dcn20_get_dcc_compression_cap
+ .get_dcc_compression_cap = dcn20_get_dcc_compression_cap,
+ .get_subvp_en = dcn32_subvp_in_use,
};
static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
@@ -1588,6 +1615,14 @@ static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options));
+ /* re-calculate the available MALL size if required */
+ if (bw_params->num_channels > 0) {
+ dc->caps.max_cab_allocation_bytes = dcn401_calc_num_avail_chans_for_mall(
+ dc, bw_params->num_channels) *
+ dc->caps.mall_size_per_mem_channel * 1024 * 1024;
+ dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
+ }
+
DC_FP_START();
dcn401_update_bw_bounding_box_fpu(dc, bw_params);
@@ -1605,6 +1640,7 @@ static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
+ plane_state->tiling_info.gfxversion = DcGfxAddr3;
plane_state->tiling_info.gfx_addr3.swizzle = DC_ADDR3_SW_64KB_2D;
return DC_OK;
}
@@ -1688,6 +1724,27 @@ static void dcn401_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx)
}
}
+static int dcn401_get_power_profile(const struct dc_state *context)
+{
+ int uclk_mhz = context->bw_ctx.bw.dcn.clk.dramclk_khz / 1000;
+ int dpm_level = 0;
+
+ for (int i = 0; i < context->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) {
+ if (context->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz == 0 ||
+ uclk_mhz < context->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz)
+ break;
+ if (uclk_mhz > context->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz)
+ dpm_level++;
+ }
+
+ return dpm_level;
+}
+
+static unsigned int dcn401_get_vstartup_for_pipe(struct pipe_ctx *pipe_ctx)
+{
+ return pipe_ctx->global_sync.dcn4x.vstartup_lines;
+}
+
static struct resource_funcs dcn401_res_pool_funcs = {
.destroy = dcn401_destroy_resource_pool,
.link_enc_create = dcn401_link_encoder_create,
@@ -1714,6 +1771,8 @@ static struct resource_funcs dcn401_res_pool_funcs = {
.prepare_mcache_programming = dcn401_prepare_mcache_programming,
.build_pipe_pix_clk_params = dcn401_build_pipe_pix_clk_params,
.calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes,
+ .get_power_profile = dcn401_get_power_profile,
+ .get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@ -1795,14 +1854,12 @@ static bool dcn401_resource_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
- /* total size = mall per channel * num channels * 1024 * 1024 */
- dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.cache_line_size = 64;
dc->caps.cache_num_ways = 16;
/* Calculate the available MALL space */
- dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall(
+ dc->caps.max_cab_allocation_bytes = dcn401_calc_num_avail_chans_for_mall(
dc, dc->ctx->dc_bios->vram_info.num_chans) *
dc->caps.mall_size_per_mem_channel * 1024 * 1024;
dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
@@ -1826,6 +1883,7 @@ static bool dcn401_resource_construct(
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
+ dc->caps.vtotal_limited_by_fp2 = true;
if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev))
dc->caps.dcc_plane_width_limit = 7680;
@@ -1867,6 +1925,7 @@ static bool dcn401_resource_construct(
dc->config.prefer_easf = true;
dc->config.dc_mode_clk_limit_support = true;
dc->config.enable_windowed_mpo_odm = true;
+ dc->config.set_pipe_unlock_order = true; /* Need to ensure DET gets freed before allocating */
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
@@ -2132,6 +2191,7 @@ static bool dcn401_resource_construct(
/* SPL */
spl_init_easf_filter_coeffs();
spl_init_blur_scale_coeffs();
+ dc->caps.scl_caps.sharpener_support = true;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index 514d1ce20df9..19568c359669 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -536,8 +536,9 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \
SRI_ARR(OPTC_WIDTH_CONTROL2, ODM, inst), \
SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \
- SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \
- SRI_ARR(OTG_PSTATE_REGISTER, OTG, inst)
+ SRI_ARR(OTG_DRR_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_PSTATE_REGISTER, OTG, inst), \
+ SRI_ARR(OTG_PIPE_UPDATE_STATUS, OTG, inst)
/* HUBBUB */
#define HUBBUB_REG_LIST_DCN4_01_RI(id) \
@@ -609,7 +610,10 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
SR(DCHUBBUB_CLOCK_CNTL), \
SR(DCHUBBUB_SDPIF_CFG0), \
SR(DCHUBBUB_SDPIF_CFG1), \
- SR(DCHUBBUB_MEM_PWR_MODE_CTRL)
+ SR(DCHUBBUB_MEM_PWR_MODE_CTRL), \
+ SR(DCHUBBUB_TIMEOUT_DETECTION_CTRL1), \
+ SR(DCHUBBUB_TIMEOUT_DETECTION_CTRL2), \
+ SR(DCHUBBUB_CTRL_STATUS)
/* DCCG */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
index 014e8a296f0c..38a9a0d68058 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
@@ -11,6 +11,41 @@
#define IDENTITY_RATIO(ratio) (spl_fixpt_u2d19(ratio) == (1 << 19))
#define MIN_VIEWPORT_SIZE 12
+static bool spl_is_yuv420(enum spl_pixel_format format)
+{
+ if ((format >= SPL_PIXEL_FORMAT_420BPP8) &&
+ (format <= SPL_PIXEL_FORMAT_420BPP10))
+ return true;
+
+ return false;
+}
+
+static bool spl_is_rgb8(enum spl_pixel_format format)
+{
+ if (format == SPL_PIXEL_FORMAT_ARGB8888)
+ return true;
+
+ return false;
+}
+
+static bool spl_is_video_format(enum spl_pixel_format format)
+{
+ if (format >= SPL_PIXEL_FORMAT_VIDEO_BEGIN
+ && format <= SPL_PIXEL_FORMAT_VIDEO_END)
+ return true;
+ else
+ return false;
+}
+
+static bool spl_is_subsampled_format(enum spl_pixel_format format)
+{
+ if (format >= SPL_PIXEL_FORMAT_SUBSAMPLED_BEGIN
+ && format <= SPL_PIXEL_FORMAT_SUBSAMPLED_END)
+ return true;
+ else
+ return false;
+}
+
static struct spl_rect intersect_rec(const struct spl_rect *r0, const struct spl_rect *r1)
{
struct spl_rect rec;
@@ -99,7 +134,7 @@ static struct spl_rect calculate_plane_rec_in_timing_active(
*
* recout_x = 128 + round(plane_x * 2304 / 1920)
* recout_w = 128 + round((plane_x + plane_w) * 2304 / 1920) - recout_x
- * recout_y = 0 + round(plane_y * 1440 / 1280)
+ * recout_y = 0 + round(plane_y * 1440 / 1200)
* recout_h = 0 + round((plane_y + plane_h) * 1440 / 1200) - recout_y
*
* NOTE: fixed point division is not error free. To reduce errors
@@ -137,15 +172,32 @@ static struct spl_rect calculate_mpc_slice_in_timing_active(
struct spl_in *spl_in,
struct spl_rect *plane_clip_rec)
{
- int mpc_slice_count = spl_in->basic_in.mpc_combine_h;
- int mpc_slice_idx = spl_in->basic_in.mpc_combine_v;
+ bool use_recout_width_aligned =
+ spl_in->basic_in.num_h_slices_recout_width_align.use_recout_width_aligned;
+ int mpc_slice_count =
+ spl_in->basic_in.num_h_slices_recout_width_align.num_slices_recout_width.mpc_num_h_slices;
+ int recout_width_align =
+ spl_in->basic_in.num_h_slices_recout_width_align.num_slices_recout_width.mpc_recout_width_align;
+ int mpc_slice_idx = spl_in->basic_in.mpc_h_slice_index;
int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1;
struct spl_rect mpc_rec;
- mpc_rec.width = plane_clip_rec->width / mpc_slice_count;
- mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx;
- mpc_rec.height = plane_clip_rec->height;
- mpc_rec.y = plane_clip_rec->y;
+ if (use_recout_width_aligned) {
+ mpc_rec.width = recout_width_align;
+ if ((mpc_rec.width * (mpc_slice_idx + 1)) > plane_clip_rec->width) {
+ mpc_rec.width = plane_clip_rec->width % recout_width_align;
+ mpc_rec.x = plane_clip_rec->x + recout_width_align * mpc_slice_idx;
+ } else
+ mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx;
+ mpc_rec.height = plane_clip_rec->height;
+ mpc_rec.y = plane_clip_rec->y;
+
+ } else {
+ mpc_rec.width = plane_clip_rec->width / mpc_slice_count;
+ mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx;
+ mpc_rec.height = plane_clip_rec->height;
+ mpc_rec.y = plane_clip_rec->y;
+ }
SPL_ASSERT(mpc_slice_count == 1 ||
spl_in->basic_out.view_format != SPL_VIEW_3D_SIDE_BY_SIDE ||
mpc_rec.width % 2 == 0);
@@ -391,8 +443,7 @@ static void spl_calculate_scaling_ratios(struct spl_in *spl_in,
spl_scratch->scl_data.ratios.horz_c = spl_scratch->scl_data.ratios.horz;
spl_scratch->scl_data.ratios.vert_c = spl_scratch->scl_data.ratios.vert;
- if (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8
- || spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10) {
+ if (spl_is_yuv420(spl_in->basic_in.format)) {
spl_scratch->scl_data.ratios.horz_c.value /= 2;
spl_scratch->scl_data.ratios.vert_c.value /= 2;
}
@@ -529,23 +580,6 @@ static void spl_calculate_init_and_vp(bool flip_scan_dir,
*vp_offset = src_size - *vp_offset - *vp_size;
}
-static bool spl_is_yuv420(enum spl_pixel_format format)
-{
- if ((format >= SPL_PIXEL_FORMAT_420BPP8) &&
- (format <= SPL_PIXEL_FORMAT_420BPP10))
- return true;
-
- return false;
-}
-
-static bool spl_is_rgb8(enum spl_pixel_format format)
-{
- if (format == SPL_PIXEL_FORMAT_ARGB8888)
- return true;
-
- return false;
-}
-
/*Calculate inits and viewport */
static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
struct spl_scratch *spl_scratch)
@@ -556,8 +590,7 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
struct spl_rect recout_clip_in_recout_dst;
struct spl_rect overlap_in_active_timing;
struct spl_rect odm_slice = calculate_odm_slice_in_timing_active(spl_in);
- int vpc_div = (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8
- || spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10) ? 2 : 1;
+ int vpc_div = spl_is_subsampled_format(spl_in->basic_in.format) ? 2 : 1;
bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
struct spl_fixed31_32 init_adj_h = spl_fixpt_zero;
struct spl_fixed31_32 init_adj_v = spl_fixpt_zero;
@@ -585,12 +618,7 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
&flip_vert_scan_dir,
&flip_horz_scan_dir);
- if (orthogonal_rotation) {
- spl_swap(src.width, src.height);
- spl_swap(flip_vert_scan_dir, flip_horz_scan_dir);
- }
-
- if (spl_is_yuv420(spl_in->basic_in.format)) {
+ if (spl_is_subsampled_format(spl_in->basic_in.format)) {
/* this gives the direction of the cositing (negative will move
* left, right otherwise)
*/
@@ -598,15 +626,15 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
switch (spl_in->basic_in.cositing) {
- case CHROMA_COSITING_LEFT:
- init_adj_h = spl_fixpt_zero;
+ case CHROMA_COSITING_TOPLEFT:
+ init_adj_h = spl_fixpt_from_fraction(sign, 4);
init_adj_v = spl_fixpt_from_fraction(sign, 4);
break;
- case CHROMA_COSITING_NONE:
+ case CHROMA_COSITING_LEFT:
init_adj_h = spl_fixpt_from_fraction(sign, 4);
- init_adj_v = spl_fixpt_from_fraction(sign, 4);
+ init_adj_v = spl_fixpt_zero;
break;
- case CHROMA_COSITING_TOPLEFT:
+ case CHROMA_COSITING_NONE:
default:
init_adj_h = spl_fixpt_zero;
init_adj_v = spl_fixpt_zero;
@@ -614,6 +642,12 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
}
}
+ if (orthogonal_rotation) {
+ spl_swap(src.width, src.height);
+ spl_swap(flip_vert_scan_dir, flip_horz_scan_dir);
+ spl_swap(init_adj_h, init_adj_v);
+ }
+
spl_calculate_init_and_vp(
flip_horz_scan_dir,
recout_clip_in_recout_dst.x,
@@ -678,7 +712,7 @@ static void spl_handle_3d_recout(struct spl_in *spl_in, struct spl_rect *recout)
* since 3d is special and needs to calculate vp as if there is no recout offset
* This may break with rotation, good thing we aren't mixing hw rotation and 3d
*/
- if (spl_in->basic_in.mpc_combine_v) {
+ if (spl_in->basic_in.mpc_h_slice_index) {
SPL_ASSERT(spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_0 ||
(spl_in->basic_out.view_format != SPL_VIEW_3D_TOP_AND_BOTTOM &&
spl_in->basic_out.view_format != SPL_VIEW_3D_SIDE_BY_SIDE));
@@ -698,24 +732,6 @@ static void spl_clamp_viewport(struct spl_rect *viewport)
viewport->width = MIN_VIEWPORT_SIZE;
}
-static bool spl_dscl_is_420_format(enum spl_pixel_format format)
-{
- if (format == SPL_PIXEL_FORMAT_420BPP8 ||
- format == SPL_PIXEL_FORMAT_420BPP10)
- return true;
- else
- return false;
-}
-
-static bool spl_dscl_is_video_format(enum spl_pixel_format format)
-{
- if (format >= SPL_PIXEL_FORMAT_VIDEO_BEGIN
- && format <= SPL_PIXEL_FORMAT_VIDEO_END)
- return true;
- else
- return false;
-}
-
static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
const struct spl_scaler_data *data,
bool enable_isharp, bool enable_easf)
@@ -732,21 +748,20 @@ static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
&& !enable_isharp)
return SCL_MODE_SCALING_444_BYPASS;
- if (!spl_dscl_is_420_format(pixel_format)) {
- if (spl_dscl_is_video_format(pixel_format))
+ if (!spl_is_subsampled_format(pixel_format)) {
+ if (spl_is_video_format(pixel_format))
return SCL_MODE_SCALING_444_YCBCR_ENABLE;
else
return SCL_MODE_SCALING_444_RGB_ENABLE;
}
- /* Bypass YUV if at 1:1 with no ISHARP or if doing 2:1 YUV
- * downscale without EASF
+ /*
+ * Bypass YUV if Y is 1:1 with no ISHARP
+ * Do not bypass UV at 1:1 for cositing to be applied
*/
- if ((!enable_isharp) && (!enable_easf)) {
+ if (!enable_isharp) {
if (data->ratios.horz.value == one && data->ratios.vert.value == one)
return SCL_MODE_SCALING_420_LUMA_BYPASS;
- if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one)
- return SCL_MODE_SCALING_420_CHROMA_BYPASS;
}
return SCL_MODE_SCALING_420_YCBCR_ENABLE;
@@ -757,7 +772,7 @@ static bool spl_choose_lls_policy(enum spl_pixel_format format,
enum spl_transfer_func_predefined tf_predefined_type,
enum linear_light_scaling *lls_pref)
{
- if (spl_is_yuv420(format)) {
+ if (spl_is_video_format(format)) {
*lls_pref = LLS_PREF_NO;
if ((tf_type == SPL_TF_TYPE_PREDEFINED) ||
(tf_type == SPL_TF_TYPE_DISTRIBUTED_POINTS))
@@ -816,7 +831,7 @@ static bool enable_easf(struct spl_in *spl_in, struct spl_scratch *spl_scratch)
/* Check if video is in fullscreen mode */
static bool spl_is_video_fullscreen(struct spl_in *spl_in)
{
- if (spl_is_yuv420(spl_in->basic_in.format) && spl_in->is_fullscreen)
+ if (spl_is_video_format(spl_in->basic_in.format) && spl_in->is_fullscreen)
return true;
return false;
}
@@ -847,14 +862,14 @@ static bool spl_get_isharp_en(struct spl_in *spl_in,
* Apply sharpness to RGB and YUV (NV12/P010)
* surfaces based on policy setting
*/
- if (!spl_is_yuv420(spl_in->basic_in.format) &&
- (spl_in->debug.sharpen_policy == SHARPEN_YUV))
+ if (!spl_is_video_format(spl_in->basic_in.format) &&
+ (spl_in->sharpen_policy == SHARPEN_YUV))
return enable_isharp;
- else if ((spl_is_yuv420(spl_in->basic_in.format) && !fullscreen) &&
- (spl_in->debug.sharpen_policy == SHARPEN_RGB_FULLSCREEN_YUV))
+ else if ((spl_is_video_format(spl_in->basic_in.format) && !fullscreen) &&
+ (spl_in->sharpen_policy == SHARPEN_RGB_FULLSCREEN_YUV))
return enable_isharp;
else if (!spl_in->is_fullscreen &&
- spl_in->debug.sharpen_policy == SHARPEN_FULLSCREEN_ALL)
+ spl_in->sharpen_policy == SHARPEN_FULLSCREEN_ALL)
return enable_isharp;
/*
@@ -868,6 +883,60 @@ static bool spl_get_isharp_en(struct spl_in *spl_in,
return enable_isharp;
}
+/* Calculate number of tap with adaptive scaling off */
+static void spl_get_taps_non_adaptive_scaler(
+ struct spl_scratch *spl_scratch, const struct spl_taps *in_taps)
+{
+ if (in_taps->h_taps == 0) {
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz) > 1)
+ spl_scratch->scl_data.taps.h_taps = spl_min(2 * spl_fixpt_ceil(
+ spl_scratch->scl_data.ratios.horz), 8);
+ else
+ spl_scratch->scl_data.taps.h_taps = 4;
+ } else
+ spl_scratch->scl_data.taps.h_taps = in_taps->h_taps;
+
+ if (in_taps->v_taps == 0) {
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 1)
+ spl_scratch->scl_data.taps.v_taps = spl_min(2 * spl_fixpt_ceil(
+ spl_scratch->scl_data.ratios.vert), 8);
+ else
+ spl_scratch->scl_data.taps.v_taps = 4;
+ } else
+ spl_scratch->scl_data.taps.v_taps = in_taps->v_taps;
+
+ if (in_taps->v_taps_c == 0) {
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 1)
+ spl_scratch->scl_data.taps.v_taps_c = spl_min(2 * spl_fixpt_ceil(
+ spl_scratch->scl_data.ratios.vert_c), 8);
+ else
+ spl_scratch->scl_data.taps.v_taps_c = 4;
+ } else
+ spl_scratch->scl_data.taps.v_taps_c = in_taps->v_taps_c;
+
+ if (in_taps->h_taps_c == 0) {
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz_c) > 1)
+ spl_scratch->scl_data.taps.h_taps_c = spl_min(2 * spl_fixpt_ceil(
+ spl_scratch->scl_data.ratios.horz_c), 8);
+ else
+ spl_scratch->scl_data.taps.h_taps_c = 4;
+ } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
+ /* Only 1 and even h_taps_c are supported by hw */
+ spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c - 1;
+ else
+ spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c;
+
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz))
+ spl_scratch->scl_data.taps.h_taps = 1;
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))
+ spl_scratch->scl_data.taps.v_taps = 1;
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c))
+ spl_scratch->scl_data.taps.h_taps_c = 1;
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c))
+ spl_scratch->scl_data.taps.v_taps_c = 1;
+
+}
+
/* Calculate optimal number of taps */
static bool spl_get_optimal_number_of_taps(
int max_downscale_src_width, struct spl_in *spl_in, struct spl_scratch *spl_scratch,
@@ -879,11 +948,26 @@ static bool spl_get_optimal_number_of_taps(
int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
bool skip_easf = false;
+ bool is_subsampled = spl_is_subsampled_format(spl_in->basic_in.format);
if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active &&
max_downscale_src_width != 0 &&
- spl_scratch->scl_data.viewport.width > max_downscale_src_width)
+ spl_scratch->scl_data.viewport.width > max_downscale_src_width) {
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
+ *enable_easf_v = false;
+ *enable_easf_h = false;
+ *enable_isharp = false;
return false;
+ }
+
+ /* Disable adaptive scaler and sharpener when integer scaling is enabled */
+ if (spl_in->scaling_quality.integer_scaling) {
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
+ *enable_easf_v = false;
+ *enable_easf_h = false;
+ *enable_isharp = false;
+ return true;
+ }
/* Check if we are using EASF or not */
skip_easf = enable_easf(spl_in, spl_scratch);
@@ -893,44 +977,10 @@ static bool spl_get_optimal_number_of_taps(
* From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling
* taps = 4 for upscaling
*/
- if (skip_easf) {
- if (in_taps->h_taps == 0) {
- if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz) > 1)
- spl_scratch->scl_data.taps.h_taps = spl_min(2 * spl_fixpt_ceil(
- spl_scratch->scl_data.ratios.horz), 8);
- else
- spl_scratch->scl_data.taps.h_taps = 4;
- } else
- spl_scratch->scl_data.taps.h_taps = in_taps->h_taps;
- if (in_taps->v_taps == 0) {
- if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 1)
- spl_scratch->scl_data.taps.v_taps = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int(
- spl_scratch->scl_data.ratios.vert, 2)), 8);
- else
- spl_scratch->scl_data.taps.v_taps = 4;
- } else
- spl_scratch->scl_data.taps.v_taps = in_taps->v_taps;
- if (in_taps->v_taps_c == 0) {
- if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 1)
- spl_scratch->scl_data.taps.v_taps_c = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int(
- spl_scratch->scl_data.ratios.vert_c, 2)), 8);
- else
- spl_scratch->scl_data.taps.v_taps_c = 4;
- } else
- spl_scratch->scl_data.taps.v_taps_c = in_taps->v_taps_c;
- if (in_taps->h_taps_c == 0) {
- if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz_c) > 1)
- spl_scratch->scl_data.taps.h_taps_c = spl_min(2 * spl_fixpt_ceil(
- spl_scratch->scl_data.ratios.horz_c), 8);
- else
- spl_scratch->scl_data.taps.h_taps_c = 4;
- } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
- /* Only 1 and even h_taps_c are supported by hw */
- spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c - 1;
- else
- spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c;
- } else {
- if (spl_is_yuv420(spl_in->basic_in.format)) {
+ if (skip_easf)
+ spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
+ else {
+ if (spl_is_video_format(spl_in->basic_in.format)) {
spl_scratch->scl_data.taps.h_taps = 6;
spl_scratch->scl_data.taps.v_taps = 6;
spl_scratch->scl_data.taps.h_taps_c = 4;
@@ -948,13 +998,12 @@ static bool spl_get_optimal_number_of_taps(
min_taps_c = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c);
/* Use LB_MEMORY_CONFIG_3 for 4:2:0 */
- if ((spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8)
- || (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10))
+ if (spl_is_yuv420(spl_in->basic_in.format))
lb_config = LB_MEMORY_CONFIG_3;
else
lb_config = LB_MEMORY_CONFIG_0;
// Determine max vtap support by calculating how much line buffer can fit
- spl_in->funcs->spl_calc_lb_num_partitions(spl_in->basic_out.alpha_en, &spl_scratch->scl_data,
+ spl_in->callbacks.spl_calc_lb_num_partitions(spl_in->basic_out.alpha_en, &spl_scratch->scl_data,
lb_config, &num_part_y, &num_part_c);
/* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */
if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 2)
@@ -1005,13 +1054,11 @@ static bool spl_get_optimal_number_of_taps(
if (spl_scratch->scl_data.taps.h_taps_c == 5)
spl_scratch->scl_data.taps.h_taps_c = 4;
- if (spl_is_yuv420(spl_in->basic_in.format)) {
- if ((spl_scratch->scl_data.taps.h_taps <= 4) ||
- (spl_scratch->scl_data.taps.h_taps_c <= 3)) {
+ if (spl_is_video_format(spl_in->basic_in.format)) {
+ if (spl_scratch->scl_data.taps.h_taps <= 4) {
*enable_easf_v = false;
*enable_easf_h = false;
- } else if ((spl_scratch->scl_data.taps.v_taps <= 3) ||
- (spl_scratch->scl_data.taps.v_taps_c <= 3)) {
+ } else if (spl_scratch->scl_data.taps.v_taps <= 3) {
*enable_easf_v = false;
*enable_easf_h = true;
} else {
@@ -1040,10 +1087,9 @@ static bool spl_get_optimal_number_of_taps(
/* Sharpener requires scaler to be enabled, including for 1:1
* Check if ISHARP can be enabled
- * If ISHARP is not enabled, for 1:1, set taps to 1 and disable
- * EASF
- * For case of 2:1 YUV where chroma is 1:1, set taps to 1 if
- * EASF is not enabled
+ * If ISHARP is not enabled, set taps to 1 if ratio is 1:1
+ * except for chroma taps. Keep previous taps so it can
+ * handle cositing
*/
*enable_isharp = spl_get_isharp_en(spl_in, spl_scratch);
@@ -1053,20 +1099,28 @@ static bool spl_get_optimal_number_of_taps(
spl_scratch->scl_data.taps.h_taps = 1;
spl_scratch->scl_data.taps.v_taps = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c))
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled)
spl_scratch->scl_data.taps.h_taps_c = 1;
- if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c))
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !is_subsampled)
spl_scratch->scl_data.taps.v_taps_c = 1;
*enable_easf_v = false;
*enable_easf_h = false;
} else {
if ((!*enable_easf_h) &&
+ (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz)))
+ spl_scratch->scl_data.taps.h_taps = 1;
+
+ if ((!*enable_easf_v) &&
+ (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert)))
+ spl_scratch->scl_data.taps.v_taps = 1;
+
+ if ((!*enable_easf_h) && !is_subsampled &&
(IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c)))
spl_scratch->scl_data.taps.h_taps_c = 1;
- if ((!*enable_easf_v) &&
+ if ((!*enable_easf_v) && !is_subsampled &&
(IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c)))
spl_scratch->scl_data.taps.v_taps_c = 1;
}
@@ -1077,8 +1131,7 @@ static bool spl_get_optimal_number_of_taps(
static void spl_set_black_color_data(enum spl_pixel_format format,
struct scl_black_color *scl_black_color)
{
- bool ycbcr = format >= SPL_PIXEL_FORMAT_VIDEO_BEGIN
- && format <= SPL_PIXEL_FORMAT_VIDEO_END;
+ bool ycbcr = spl_is_video_format(format);
if (ycbcr) {
scl_black_color->offset_rgb_y = BLACK_OFFSET_RGB_Y;
scl_black_color->offset_rgb_cbcr = BLACK_OFFSET_CBCR;
@@ -1545,7 +1598,7 @@ static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *s
0x0; // fp1.5.10, C3 coefficient
}
- if (spl_is_yuv420(format)) { /* TODO: 0 = RGB, 1 = YUV */
+ if (spl_is_subsampled_format(format)) { /* TODO: 0 = RGB, 1 = YUV */
dscl_prog_data->easf_matrix_mode = 1;
/*
* 2-bit, BF3 chroma mode correction calculation mode
@@ -1590,7 +1643,8 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data,
spl_build_isharp_1dlut_from_reference_curve(ratio, setup, adp_sharpness,
scale_to_sharpness_policy);
- dscl_prog_data->isharp_delta = spl_get_pregen_filter_isharp_1D_lut(setup);
+ memcpy(dscl_prog_data->isharp_delta, spl_get_pregen_filter_isharp_1D_lut(setup),
+ sizeof(uint32_t) * ISHARP_LUT_TABLE_SIZE);
dscl_prog_data->sharpness_level = adp_sharpness.sharpness_level;
dscl_prog_data->isharp_en = 1; // ISHARP_EN
@@ -1711,6 +1765,32 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data,
spl_set_blur_scale_data(dscl_prog_data, data);
}
+/* Calculate recout, scaling ratio, and viewport, then get optimal number of taps */
+static bool spl_calculate_number_of_taps(struct spl_in *spl_in, struct spl_scratch *spl_scratch, struct spl_out *spl_out,
+ bool *enable_easf_v, bool *enable_easf_h, bool *enable_isharp)
+{
+ bool res = false;
+
+ memset(spl_scratch, 0, sizeof(struct spl_scratch));
+ spl_scratch->scl_data.h_active = spl_in->h_active;
+ spl_scratch->scl_data.v_active = spl_in->v_active;
+
+ // All SPL calls
+ /* recout calculation */
+ /* depends on h_active */
+ spl_calculate_recout(spl_in, spl_scratch, spl_out);
+ /* depends on pixel format */
+ spl_calculate_scaling_ratios(spl_in, spl_scratch, spl_out);
+ /* depends on scaling ratios and recout, does not calculate offset yet */
+ spl_calculate_viewport_size(spl_in, spl_scratch);
+
+ res = spl_get_optimal_number_of_taps(
+ spl_in->basic_out.max_downscale_src_width, spl_in,
+ spl_scratch, &spl_in->scaling_quality, enable_easf_v,
+ enable_easf_h, enable_isharp);
+ return res;
+}
+
/* Calculate scaler parameters */
bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
{
@@ -1725,23 +1805,9 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
bool enable_isharp = false;
const struct spl_scaler_data *data = &spl_scratch.scl_data;
- memset(&spl_scratch, 0, sizeof(struct spl_scratch));
- spl_scratch.scl_data.h_active = spl_in->h_active;
- spl_scratch.scl_data.v_active = spl_in->v_active;
-
- // All SPL calls
- /* recout calculation */
- /* depends on h_active */
- spl_calculate_recout(spl_in, &spl_scratch, spl_out);
- /* depends on pixel format */
- spl_calculate_scaling_ratios(spl_in, &spl_scratch, spl_out);
- /* depends on scaling ratios and recout, does not calculate offset yet */
- spl_calculate_viewport_size(spl_in, &spl_scratch);
+ res = spl_calculate_number_of_taps(spl_in, &spl_scratch, spl_out,
+ &enable_easf_v, &enable_easf_h, &enable_isharp);
- res = spl_get_optimal_number_of_taps(
- spl_in->basic_out.max_downscale_src_width, spl_in,
- &spl_scratch, &spl_in->scaling_quality, &enable_easf_v,
- &enable_easf_h, &enable_isharp);
/*
* Depends on recout, scaling ratios, h_active and taps
* May need to re-check lb size after this in some obscure scenario
@@ -1753,12 +1819,12 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
// Clamp
spl_clamp_viewport(&spl_scratch.scl_data.viewport);
- if (!res)
- return res;
-
// Save all calculated parameters in dscl_prog_data structure to program hw registers
spl_set_dscl_prog_data(spl_in, &spl_scratch, spl_out, enable_easf_v, enable_easf_h, enable_isharp);
+ if (!res)
+ return res;
+
if (spl_in->lls_pref == LLS_PREF_YES) {
if (spl_in->is_hdr_on)
setup = HDR_L;
@@ -1789,3 +1855,20 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
return res;
}
+
+/* External interface to get number of taps only */
+bool spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out *spl_out)
+{
+ bool res = false;
+ bool enable_easf_v = false;
+ bool enable_easf_h = false;
+ bool enable_isharp = false;
+ struct spl_scratch spl_scratch;
+ struct dscl_prog_data *dscl_prog_data = spl_out->dscl_prog_data;
+ const struct spl_scaler_data *data = &spl_scratch.scl_data;
+
+ res = spl_calculate_number_of_taps(spl_in, &spl_scratch, spl_out,
+ &enable_easf_v, &enable_easf_h, &enable_isharp);
+ spl_set_taps_data(dscl_prog_data, data);
+ return res;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h
index 205e59a2a8ee..02a2d6725ed5 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.h
@@ -13,4 +13,6 @@
bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out);
+bool spl_get_number_of_taps(struct spl_in *spl_in, struct spl_out *spl_out);
+
#endif /* __DC_SPL_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h
index afcc66206ca2..89af91e19b6c 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h
@@ -7,7 +7,6 @@
#include "dc_spl_types.h"
-#define ISHARP_LUT_TABLE_SIZE 32
const uint32_t *spl_get_filter_isharp_1D_lut_0(void);
const uint32_t *spl_get_filter_isharp_1D_lut_0p5x(void);
const uint32_t *spl_get_filter_isharp_1D_lut_1p0x(void);
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
index 2a74ff5fdfdb..467af9dd90de 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
@@ -5,10 +5,8 @@
#ifndef __DC_SPL_TYPES_H__
#define __DC_SPL_TYPES_H__
+#include "spl_debug.h"
#include "spl_os_types.h" // swap
-#ifndef SPL_ASSERT
-#define SPL_ASSERT(_bool) ((void *)0)
-#endif
#include "spl_fixpt31_32.h" // fixed31_32 and related functions
#include "spl_custom_float.h" // custom float and related functions
@@ -65,13 +63,13 @@ enum spl_pixel_format {
SPL_PIXEL_FORMAT_420BPP8,
SPL_PIXEL_FORMAT_420BPP10,
/*end of pixel format definition*/
- SPL_PIXEL_FORMAT_INVALID,
- SPL_PIXEL_FORMAT_422BPP8,
- SPL_PIXEL_FORMAT_422BPP10,
SPL_PIXEL_FORMAT_GRPH_BEGIN = SPL_PIXEL_FORMAT_INDEX8,
SPL_PIXEL_FORMAT_GRPH_END = SPL_PIXEL_FORMAT_FP16,
+ SPL_PIXEL_FORMAT_SUBSAMPLED_BEGIN = SPL_PIXEL_FORMAT_420BPP8,
+ SPL_PIXEL_FORMAT_SUBSAMPLED_END = SPL_PIXEL_FORMAT_420BPP10,
SPL_PIXEL_FORMAT_VIDEO_BEGIN = SPL_PIXEL_FORMAT_420BPP8,
SPL_PIXEL_FORMAT_VIDEO_END = SPL_PIXEL_FORMAT_420BPP10,
+ SPL_PIXEL_FORMAT_INVALID,
SPL_PIXEL_FORMAT_UNKNOWN
};
@@ -252,6 +250,7 @@ enum isharp_en {
ISHARP_DISABLE,
ISHARP_ENABLE
};
+#define ISHARP_LUT_TABLE_SIZE 32
// Below struct holds values that can be directly used to program
// hardware registers. No conversion/clamping is required
struct dscl_prog_data {
@@ -402,7 +401,7 @@ struct dscl_prog_data {
uint32_t isharp_nl_en; // ISHARP_NL_EN ? TODO:check this
struct isharp_lba isharp_lba; // ISHARP_LBA
struct isharp_fmt isharp_fmt; // ISHARP_FMT
- const uint32_t *isharp_delta;
+ uint32_t isharp_delta[ISHARP_LUT_TABLE_SIZE];
struct isharp_nldelta_sclip isharp_nldelta_sclip; // ISHARP_NLDELTA_SCLIP
/* blur and scale filter */
const uint16_t *filter_blur_scale_v;
@@ -437,8 +436,14 @@ struct basic_in {
struct spl_rect clip_rect; // Clip rect
enum spl_rotation_angle rotation; // Rotation
bool horizontal_mirror; // Horizontal mirror
- int mpc_combine_h; // MPC Horizontal Combine Factor (split_count)
- int mpc_combine_v; // MPC Vertical Combine Factor (split_idx)
+ struct { // previous mpc_combine_h - split count
+ bool use_recout_width_aligned;
+ union {
+ int mpc_num_h_slices;
+ int mpc_recout_width_align;
+ } num_slices_recout_width;
+ } num_h_slices_recout_width_align;
+ int mpc_h_slice_index; // previous mpc_combine_v - split_idx
// Inputs for adaptive scaler - TODO
enum spl_transfer_func_type tf_type; /* Transfer function type */
enum spl_transfer_func_predefined tf_predefined_type; /* Transfer function predefined type */
@@ -498,7 +503,7 @@ enum scale_to_sharpness_policy {
SCALE_TO_SHARPNESS_ADJ_YUV = 1,
SCALE_TO_SHARPNESS_ADJ_ALL = 2
};
-struct spl_funcs {
+struct spl_callbacks {
void (*spl_calc_lb_num_partitions)
(bool alpha_en,
const struct spl_scaler_data *scl_data,
@@ -510,7 +515,6 @@ struct spl_funcs {
struct spl_debug {
int visual_confirm_base_offset;
int visual_confirm_dpp_offset;
- enum sharpen_policy sharpen_policy;
enum scale_to_sharpness_policy scale_to_sharpness_policy;
};
@@ -520,7 +524,7 @@ struct spl_in {
// Basic slice information
int odm_slice_index; // ODM Slice Index using get_odm_split_index
struct spl_taps scaling_quality; // Explicit Scaling Quality
- struct spl_funcs *funcs;
+ struct spl_callbacks callbacks;
// Inputs for isharp and EASF
struct adaptive_sharpness adaptive_sharpness; // Adaptive Sharpness
enum linear_light_scaling lls_pref; // Linear Light Scaling
@@ -532,6 +536,7 @@ struct spl_in {
int h_active;
int v_active;
int sdr_white_level_nits;
+ enum sharpen_policy sharpen_policy;
};
// end of SPL inputs
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h b/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h
index 5696dafd0894..a6f6132df241 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h
@@ -5,21 +5,26 @@
#ifndef SPL_DEBUG_H
#define SPL_DEBUG_H
-#ifdef SPL_ASSERT
-#undef SPL_ASSERT
-#endif
-#define SPL_ASSERT(b)
+#if defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB)
+#define SPL_ASSERT_CRITICAL(expr) do { \
+ if (WARN_ON(!(expr))) { \
+ kgdb_breakpoint(); \
+ } \
+} while (0)
+#else
+#define SPL_ASSERT_CRITICAL(expr) do { \
+ if (WARN_ON(!(expr))) { \
+ ; \
+ } \
+} while (0)
+#endif /* CONFIG_HAVE_KGDB || CONFIG_KGDB */
-#define SPL_ASSERT_CRITICAL(expr) do {if (expr)/* Do nothing */; } while (0)
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+#define SPL_ASSERT(expr) SPL_ASSERT_CRITICAL(expr)
+#else
+#define SPL_ASSERT(expr) WARN_ON(!(expr))
+#endif /* CONFIG_DEBUG_KERNEL_DC */
-#ifdef SPL_DALMSG
-#undef SPL_DALMSG
-#endif
-#define SPL_DALMSG(b)
-
-#ifdef SPL_DAL_ASSERT_MSG
-#undef SPL_DAL_ASSERT_MSG
-#endif
-#define SPL_DAL_ASSERT_MSG(b, m)
+#define SPL_BREAK_TO_DEBUGGER() SPL_ASSERT(0)
#endif // SPL_DEBUG_H
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c
index a95565df5487..131f1e3949d3 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c
@@ -22,14 +22,14 @@ static inline unsigned long long abs_i64(
* result = dividend / divisor
* *remainder = dividend % divisor
*/
-static inline unsigned long long complete_integer_division_u64(
+static inline unsigned long long spl_complete_integer_division_u64(
unsigned long long dividend,
unsigned long long divisor,
unsigned long long *remainder)
{
unsigned long long result;
- ASSERT(divisor);
+ SPL_ASSERT(divisor);
result = spl_div64_u64_rem(dividend, divisor, remainder);
@@ -60,10 +60,10 @@ struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long den
/* determine integer part */
- unsigned long long res_value = complete_integer_division_u64(
+ unsigned long long res_value = spl_complete_integer_division_u64(
arg1_value, arg2_value, &remainder);
- ASSERT(res_value <= LONG_MAX);
+ SPL_ASSERT(res_value <= (unsigned long long)LONG_MAX);
/* determine fractional part */
{
@@ -85,7 +85,7 @@ struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long den
{
unsigned long long summand = (remainder << 1) >= arg2_value;
- ASSERT(res_value <= LLONG_MAX - summand);
+ SPL_ASSERT(res_value <= (unsigned long long)LLONG_MAX - summand);
res_value += summand;
}
@@ -118,19 +118,19 @@ struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed
res.value = arg1_int * arg2_int;
- ASSERT(res.value <= (long long)LONG_MAX);
+ SPL_ASSERT(res.value <= (long long)LONG_MAX);
res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
tmp = arg1_int * arg2_fra;
- ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+ SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
tmp = arg2_int * arg1_fra;
- ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+ SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
@@ -139,7 +139,7 @@ struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed
tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
(tmp >= (unsigned long long)spl_fixpt_half.value);
- ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+ SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
@@ -163,17 +163,17 @@ struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg)
res.value = arg_int * arg_int;
- ASSERT(res.value <= (long long)LONG_MAX);
+ SPL_ASSERT(res.value <= (long long)LONG_MAX);
res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
tmp = arg_int * arg_fra;
- ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+ SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
- ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+ SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
@@ -182,7 +182,7 @@ struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg)
tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
(tmp >= (unsigned long long)spl_fixpt_half.value);
- ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+ SPL_ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
@@ -196,7 +196,7 @@ struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg)
* Good idea to use Newton's method
*/
- ASSERT(arg.value);
+ SPL_ASSERT(arg.value);
return spl_fixpt_from_fraction(
spl_fixpt_one.value,
@@ -286,7 +286,7 @@ struct spl_fixed31_32 spl_fixpt_cos(struct spl_fixed31_32 arg)
*
* Calculated as Taylor series.
*/
-static struct spl_fixed31_32 fixed31_32_exp_from_taylor_series(struct spl_fixed31_32 arg)
+static struct spl_fixed31_32 spl_fixed31_32_exp_from_taylor_series(struct spl_fixed31_32 arg)
{
unsigned int n = 9;
@@ -295,7 +295,7 @@ static struct spl_fixed31_32 fixed31_32_exp_from_taylor_series(struct spl_fixed3
n + 1);
/* TODO find correct res */
- ASSERT(spl_fixpt_lt(arg, spl_fixpt_one));
+ SPL_ASSERT(spl_fixpt_lt(arg, spl_fixpt_one));
do
res = spl_fixpt_add(
@@ -337,22 +337,22 @@ struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg)
spl_fixpt_ln2,
m));
- ASSERT(m != 0);
+ SPL_ASSERT(m != 0);
- ASSERT(spl_fixpt_lt(
+ SPL_ASSERT(spl_fixpt_lt(
spl_fixpt_abs(r),
spl_fixpt_one));
if (m > 0)
return spl_fixpt_shl(
- fixed31_32_exp_from_taylor_series(r),
+ spl_fixed31_32_exp_from_taylor_series(r),
(unsigned char)m);
else
return spl_fixpt_div_int(
- fixed31_32_exp_from_taylor_series(r),
+ spl_fixed31_32_exp_from_taylor_series(r),
1LL << -m);
} else if (arg.value != 0)
- return fixed31_32_exp_from_taylor_series(arg);
+ return spl_fixed31_32_exp_from_taylor_series(arg);
else
return spl_fixpt_one;
}
@@ -364,7 +364,7 @@ struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg)
struct spl_fixed31_32 error;
- ASSERT(arg.value > 0);
+ SPL_ASSERT(arg.value > 0);
/* TODO if arg is negative, return NaN */
/* TODO if arg is zero, return -INF */
@@ -396,7 +396,7 @@ struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg)
* part in 32 bits. It is used in hw programming (scaler)
*/
-static inline unsigned int ux_dy(
+static inline unsigned int spl_ux_dy(
long long value,
unsigned int integer_bits,
unsigned int fractional_bits)
@@ -415,13 +415,13 @@ static inline unsigned int ux_dy(
return result | fractional_part;
}
-static inline unsigned int clamp_ux_dy(
+static inline unsigned int spl_clamp_ux_dy(
long long value,
unsigned int integer_bits,
unsigned int fractional_bits,
unsigned int min_clamp)
{
- unsigned int truncated_val = ux_dy(value, integer_bits, fractional_bits);
+ unsigned int truncated_val = spl_ux_dy(value, integer_bits, fractional_bits);
if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART)))
return (1 << (integer_bits + fractional_bits)) - 1;
@@ -433,40 +433,40 @@ static inline unsigned int clamp_ux_dy(
unsigned int spl_fixpt_u4d19(struct spl_fixed31_32 arg)
{
- return ux_dy(arg.value, 4, 19);
+ return spl_ux_dy(arg.value, 4, 19);
}
unsigned int spl_fixpt_u3d19(struct spl_fixed31_32 arg)
{
- return ux_dy(arg.value, 3, 19);
+ return spl_ux_dy(arg.value, 3, 19);
}
unsigned int spl_fixpt_u2d19(struct spl_fixed31_32 arg)
{
- return ux_dy(arg.value, 2, 19);
+ return spl_ux_dy(arg.value, 2, 19);
}
unsigned int spl_fixpt_u0d19(struct spl_fixed31_32 arg)
{
- return ux_dy(arg.value, 0, 19);
+ return spl_ux_dy(arg.value, 0, 19);
}
unsigned int spl_fixpt_clamp_u0d14(struct spl_fixed31_32 arg)
{
- return clamp_ux_dy(arg.value, 0, 14, 1);
+ return spl_clamp_ux_dy(arg.value, 0, 14, 1);
}
unsigned int spl_fixpt_clamp_u0d10(struct spl_fixed31_32 arg)
{
- return clamp_ux_dy(arg.value, 0, 10, 1);
+ return spl_clamp_ux_dy(arg.value, 0, 10, 1);
}
int spl_fixpt_s4d19(struct spl_fixed31_32 arg)
{
if (arg.value < 0)
- return -(int)ux_dy(spl_fixpt_abs(arg).value, 4, 19);
+ return -(int)spl_ux_dy(spl_fixpt_abs(arg).value, 4, 19);
else
- return ux_dy(arg.value, 4, 19);
+ return spl_ux_dy(arg.value, 4, 19);
}
struct spl_fixed31_32 spl_fixpt_from_ux_dy(unsigned int value,
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h
index 8a045e2f8699..ed2647f9a099 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h
@@ -5,11 +5,8 @@
#ifndef __SPL_FIXED31_32_H__
#define __SPL_FIXED31_32_H__
-#include "os_types.h"
+#include "spl_debug.h"
#include "spl_os_types.h" // swap
-#ifndef ASSERT
-#define ASSERT(_bool) ((void *)0)
-#endif
#ifndef LLONG_MAX
#define LLONG_MAX 9223372036854775807ll
@@ -194,7 +191,7 @@ static inline struct spl_fixed31_32 spl_fixpt_clamp(
*/
static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, unsigned char shift)
{
- ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
+ SPL_ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
((arg.value < 0) && (arg.value >= ~(LLONG_MAX >> shift))));
arg.value = arg.value << shift;
@@ -231,7 +228,7 @@ static inline struct spl_fixed31_32 spl_fixpt_add(struct spl_fixed31_32 arg1, st
{
struct spl_fixed31_32 res;
- ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
+ SPL_ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value)));
res.value = arg1.value + arg2.value;
@@ -256,7 +253,7 @@ static inline struct spl_fixed31_32 spl_fixpt_sub(struct spl_fixed31_32 arg1, st
{
struct spl_fixed31_32 res;
- ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
+ SPL_ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value)));
res.value = arg1.value - arg2.value;
@@ -448,7 +445,7 @@ static inline int spl_fixpt_round(struct spl_fixed31_32 arg)
const long long summand = spl_fixpt_half.value;
- ASSERT(LLONG_MAX - (long long)arg_value >= summand);
+ SPL_ASSERT(LLONG_MAX - (long long)arg_value >= summand);
arg_value += summand;
@@ -469,7 +466,7 @@ static inline int spl_fixpt_ceil(struct spl_fixed31_32 arg)
const long long summand = spl_fixpt_one.value -
spl_fixpt_epsilon.value;
- ASSERT(LLONG_MAX - (long long)arg_value >= summand);
+ SPL_ASSERT(LLONG_MAX - (long long)arg_value >= summand);
arg_value += summand;
@@ -504,7 +501,7 @@ static inline struct spl_fixed31_32 spl_fixpt_truncate(struct spl_fixed31_32 arg
bool negative = arg.value < 0;
if (frac_bits >= FIXED31_32_BITS_PER_FRACTIONAL_PART) {
- ASSERT(frac_bits == FIXED31_32_BITS_PER_FRACTIONAL_PART);
+ SPL_ASSERT(frac_bits == FIXED31_32_BITS_PER_FRACTIONAL_PART);
return arg;
}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h b/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h
index 709706ed4f2c..2e6ba71960ac 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h
@@ -6,6 +6,8 @@
#ifndef _SPL_OS_TYPES_H_
#define _SPL_OS_TYPES_H_
+#include "spl_debug.h"
+
#include <linux/slab.h>
#include <linux/kgdb.h>
#include <linux/kref.h>
@@ -18,7 +20,6 @@
* general debug capabilities
*
*/
-#define SPL_BREAK_TO_DEBUGGER() ASSERT(0)
static inline uint64_t spl_div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index fe5b6f7a3eb1..4b3ccbca0da2 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -69,6 +69,9 @@
#define DMUB_PC_SNAPSHOT_COUNT 10
+/* Default tracebuffer size if meta is absent. */
+#define DMUB_TRACE_BUFFER_SIZE (64 * 1024)
+
/* Forward declarations */
struct dmub_srv;
struct dmub_srv_common_regs;
@@ -301,6 +304,7 @@ struct dmub_srv_hw_params {
bool disallow_phy_access;
bool disable_sldo_opt;
bool enable_non_transparent_setconfig;
+ bool lower_hbr3_phy_ssc;
};
/**
@@ -570,6 +574,14 @@ struct dmub_notification {
};
};
+/* enum dmub_ips_mode - IPS mode identifier */
+enum dmub_ips_mode {
+ DMUB_IPS_MODE_IPS1_MAX = 0,
+ DMUB_IPS_MODE_IPS2,
+ DMUB_IPS_MODE_IPS1_RCG,
+ DMUB_IPS_MODE_IPS1_ONO2_ON
+};
+
/**
* DMUB firmware version helper macro - useful for checking if the version
* of a firmware to know if feature or functionality is supported or present.
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index ebcf68bfae2b..d0fe324cb537 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -170,6 +170,11 @@
#pragma pack(push, 1)
#define ABM_NUM_OF_ACE_SEGMENTS 5
+/**
+ * Debug FW state offset
+ */
+#define DMUB_DEBUG_FW_STATE_OFFSET 0x300
+
union abm_flags {
struct {
/**
@@ -426,7 +431,68 @@ union replay_debug_flags {
*/
uint32_t enable_ips_residency_profiling : 1;
- uint32_t reserved : 20;
+ /**
+ * 0x1000 (bit 12)
+ * @enable_coasting_vtotal_check: Enable Coasting_vtotal_check
+ */
+ uint32_t enable_coasting_vtotal_check : 1;
+ /**
+ * 0x2000 (bit 13)
+ * @enable_visual_confirm_debug: Enable Visual Confirm Debug
+ */
+ uint32_t enable_visual_confirm_debug : 1;
+
+ uint32_t reserved : 18;
+ } bitfields;
+
+ uint32_t u32All;
+};
+
+/**
+ * Flags record error state.
+ */
+union replay_visual_confirm_error_state_flags {
+ struct {
+ /**
+ * 0x1 (bit 0) - Desync Error flag.
+ */
+ uint32_t desync_error : 1;
+
+ /**
+ * 0x2 (bit 1) - State Transition Error flag.
+ */
+ uint32_t state_transition_error : 1;
+
+ /**
+ * 0x4 (bit 2) - Crc Error flag
+ */
+ uint32_t crc_error : 1;
+
+ /**
+ * 0x8 (bit 3) - Reserved
+ */
+ uint32_t reserved_3 : 1;
+
+ /**
+ * 0x10 (bit 4) - Incorrect Coasting vtotal checking --> use debug flag to control DPCD write.
+ * Added new debug flag to control DPCD.
+ */
+ uint32_t incorrect_vtotal_in_static_screen : 1;
+
+ /**
+ * 0x20 (bit 5) - No doubled Refresh Rate.
+ */
+ uint32_t no_double_rr : 1;
+
+ /**
+ * Reserved bit 6-7
+ */
+ uint32_t reserved_6_7 : 2;
+
+ /**
+ * Reserved bit 9-31
+ */
+ uint32_t reserved_9_31 : 24;
} bitfields;
uint32_t u32All;
@@ -470,11 +536,23 @@ union replay_hw_flags {
* Use TPS3 signal when restore main link.
*/
uint32_t force_wakeup_by_tps3 : 1;
+ /**
+ * @is_alpm_initialized: Indicates whether ALPM is initialized
+ */
+ uint32_t is_alpm_initialized : 1;
} bitfields;
uint32_t u32All;
};
+union fw_assisted_mclk_switch_version {
+ struct {
+ uint8_t minor : 5;
+ uint8_t major : 3;
+ };
+ uint8_t ver;
+};
+
/**
* DMUB feature capabilities.
* After DMUB init, driver will query FW capabilities prior to enabling certain features.
@@ -490,6 +568,7 @@ struct dmub_feature_caps {
uint8_t gecc_enable;
uint8_t replay_supported;
uint8_t replay_reserved[3];
+ uint8_t abm_aux_backlight_support;
};
struct dmub_visual_confirm_color {
@@ -689,7 +768,8 @@ union dmub_fw_boot_options {
uint32_t ips_disable: 3; /* options to disable ips support*/
uint32_t ips_sequential_ono: 1; /**< 1 to enable sequential ONO IPS sequence */
uint32_t disable_sldo_opt: 1; /**< 1 to disable SLDO optimizations */
- uint32_t reserved : 7; /**< reserved */
+ uint32_t lower_hbr3_phy_ssc: 1; /**< 1 to lower hbr3 phy ssc to 0.125 percent */
+ uint32_t reserved : 6; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -721,6 +801,7 @@ enum dmub_shared_state_feature_id {
DMUB_SHARED_SHARE_FEATURE__INVALID = 0,
DMUB_SHARED_SHARE_FEATURE__IPS_FW = 1,
DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER = 2,
+ DMUB_SHARED_SHARE_FEATURE__DEBUG_SETUP = 3,
DMUB_SHARED_STATE_FEATURE__LAST, /* Total number of features. */
};
@@ -747,7 +828,8 @@ union dmub_shared_state_ips_driver_signals {
uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */
uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */
uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */
- uint32_t reserved_bits : 28; /**< Reversed bits */
+ uint32_t allow_idle: 1; /**< 1 if driver is allowing idle */
+ uint32_t reserved_bits : 27; /**< Reversed bits */
} bits;
uint32_t all;
};
@@ -757,6 +839,14 @@ union dmub_shared_state_ips_driver_signals {
*/
#define DMUB_SHARED_STATE__IPS_FW_VERSION 1
+struct dmub_shared_state_debug_setup {
+ union {
+ struct {
+ uint32_t exclude_points[62];
+ } profile_mode;
+ };
+};
+
/**
* struct dmub_shared_state_ips_fw - Firmware state for IPS.
*/
@@ -809,6 +899,7 @@ struct dmub_shared_state_feature_block {
struct dmub_shared_state_feature_common common; /**< Generic data */
struct dmub_shared_state_ips_fw ips_fw; /**< IPS firmware state */
struct dmub_shared_state_ips_driver ips_driver; /**< IPS driver state */
+ struct dmub_shared_state_debug_setup debug_setup; /**< Debug setup */
} data; /**< Shared state data. */
}; /* 256-bytes, fixed */
@@ -1051,11 +1142,110 @@ enum dmub_gpint_command {
DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD3 = 119,
/**
+ * DESC: Set IPS residency measurement
+ * ARGS: 0 - Disable ips measurement
+ * 1 - Enable ips measurement
+ */
+ DMUB_GPINT__IPS_RESIDENCY = 121,
+
+ /**
* DESC: Enable measurements for various task duration
* ARGS: 0 - Disable measurement
* 1 - Enable measurement
*/
DMUB_GPINT__TRACE_DMUB_WAKE_ACTIVITY = 123,
+
+ /**
+ * DESC: Gets IPS residency in microseconds
+ * ARGS: 0 - Return IPS1 residency
+ * 1 - Return IPS2 residency
+ * 2 - Return IPS1_RCG residency
+ * 3 - Return IPS1_ONO2_ON residency
+ * RETURN: Total residency in microseconds - lower 32 bits
+ */
+ DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO = 124,
+
+ /**
+ * DESC: Gets IPS1 histogram counts
+ * ARGS: Bucket index
+ * RETURN: Total count for the bucket
+ */
+ DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER = 125,
+
+ /**
+ * DESC: Gets IPS2 histogram counts
+ * ARGS: Bucket index
+ * RETURN: Total count for the bucket
+ */
+ DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER = 126,
+
+ /**
+ * DESC: Gets IPS residency
+ * ARGS: 0 - Return IPS1 residency
+ * 1 - Return IPS2 residency
+ * 2 - Return IPS1_RCG residency
+ * 3 - Return IPS1_ONO2_ON residency
+ * RETURN: Total residency in milli-percent.
+ */
+ DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT = 127,
+
+ /**
+ * DESC: Gets IPS1_RCG histogram counts
+ * ARGS: Bucket index
+ * RETURN: Total count for the bucket
+ */
+ DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER = 128,
+
+ /**
+ * DESC: Gets IPS1_ONO2_ON histogram counts
+ * ARGS: Bucket index
+ * RETURN: Total count for the bucket
+ */
+ DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER = 129,
+
+ /**
+ * DESC: Gets IPS entry counter during residency measurement
+ * ARGS: 0 - Return IPS1 entry counts
+ * 1 - Return IPS2 entry counts
+ * 2 - Return IPS1_RCG entry counts
+ * 3 - Return IPS2_ONO2_ON entry counts
+ * RETURN: Entry counter for selected IPS mode
+ */
+ DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER = 130,
+
+ /**
+ * DESC: Gets IPS inactive residency in microseconds
+ * ARGS: 0 - Return IPS1_MAX residency
+ * 1 - Return IPS2 residency
+ * 2 - Return IPS1_RCG residency
+ * 3 - Return IPS1_ONO2_ON residency
+ * RETURN: Total inactive residency in microseconds - lower 32 bits
+ */
+ DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO = 131,
+
+ /**
+ * DESC: Gets IPS inactive residency in microseconds
+ * ARGS: 0 - Return IPS1_MAX residency
+ * 1 - Return IPS2 residency
+ * 2 - Return IPS1_RCG residency
+ * 3 - Return IPS1_ONO2_ON residency
+ * RETURN: Total inactive residency in microseconds - upper 32 bits
+ */
+ DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI = 132,
+
+ /**
+ * DESC: Gets IPS residency in microseconds
+ * ARGS: 0 - Return IPS1 residency
+ * 1 - Return IPS2 residency
+ * 2 - Return IPS1_RCG residency
+ * 3 - Return IPS1_ONO2_ON residency
+ * RETURN: Total residency in microseconds - upper 32 bits
+ */
+ DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI = 133,
+ /**
+ * DESC: Setup debug configs.
+ */
+ DMUB_GPINT__SETUP_DEBUG_MODE = 136,
};
/**
@@ -1306,9 +1496,10 @@ enum dmub_out_cmd_type {
/* DMUB_CMD__DPIA command sub-types. */
enum dmub_cmd_dpia_type {
DMUB_CMD__DPIA_DIG1_DPIA_CONTROL = 0,
- DMUB_CMD__DPIA_SET_CONFIG_ACCESS = 1,
+ DMUB_CMD__DPIA_SET_CONFIG_ACCESS = 1, // will be replaced by DPIA_SET_CONFIG_REQUEST
DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2,
DMUB_CMD__DPIA_SET_TPS_NOTIFICATION = 3,
+ DMUB_CMD__DPIA_SET_CONFIG_REQUEST = 4,
};
/* DMUB_OUT_CMD__DPIA_NOTIFICATION command types. */
@@ -1705,52 +1896,11 @@ enum fams2_stream_type {
FAMS2_STREAM_TYPE_SUBVP = 4,
};
-/* dynamic stream state */
-struct dmub_fams2_legacy_stream_dynamic_state {
- uint8_t force_allow_at_vblank;
- uint8_t pad[3];
-};
-
-struct dmub_fams2_subvp_stream_dynamic_state {
- uint16_t viewport_start_hubp_vline;
- uint16_t viewport_height_hubp_vlines;
- uint16_t viewport_start_c_hubp_vline;
- uint16_t viewport_height_c_hubp_vlines;
- uint16_t phantom_viewport_height_hubp_vlines;
- uint16_t phantom_viewport_height_c_hubp_vlines;
- uint16_t microschedule_start_otg_vline;
- uint16_t mall_start_otg_vline;
- uint16_t mall_start_hubp_vline;
- uint16_t mall_start_c_hubp_vline;
- uint8_t force_allow_at_vblank_only;
- uint8_t pad[3];
-};
-
-struct dmub_fams2_drr_stream_dynamic_state {
- uint16_t stretched_vtotal;
- uint8_t use_cur_vtotal;
- uint8_t pad;
-};
-
-struct dmub_fams2_stream_dynamic_state {
- uint64_t ref_tick;
- uint32_t cur_vtotal;
- uint16_t adjusted_allow_end_otg_vline;
- uint8_t pad[2];
- struct dmub_optc_position ref_otg_pos;
- struct dmub_optc_position target_otg_pos;
- union {
- struct dmub_fams2_legacy_stream_dynamic_state legacy;
- struct dmub_fams2_subvp_stream_dynamic_state subvp;
- struct dmub_fams2_drr_stream_dynamic_state drr;
- } sub_state;
-};
-
/* static stream state */
struct dmub_fams2_legacy_stream_static_state {
uint8_t vactive_det_fill_delay_otg_vlines;
uint8_t programming_delay_otg_vlines;
-};
+}; //v0
struct dmub_fams2_subvp_stream_static_state {
uint16_t vratio_numerator;
@@ -1769,14 +1919,59 @@ struct dmub_fams2_subvp_stream_static_state {
uint8_t phantom_otg_inst;
uint8_t phantom_pipe_mask;
uint8_t phantom_plane_pipe_masks[DMUB_MAX_PHANTOM_PLANES]; // phantom pipe mask per plane (for flip passthrough)
-};
+}; //v0
struct dmub_fams2_drr_stream_static_state {
uint16_t nom_stretched_vtotal;
uint8_t programming_delay_otg_vlines;
uint8_t only_stretch_if_required;
uint8_t pad[2];
-};
+}; //v0
+
+struct dmub_fams2_cmd_legacy_stream_static_state {
+ uint16_t vactive_det_fill_delay_otg_vlines;
+ uint16_t programming_delay_otg_vlines;
+}; //v1
+
+struct dmub_fams2_cmd_subvp_stream_static_state {
+ uint16_t vratio_numerator;
+ uint16_t vratio_denominator;
+ uint16_t phantom_vtotal;
+ uint16_t phantom_vactive;
+ uint16_t programming_delay_otg_vlines;
+ uint16_t prefetch_to_mall_otg_vlines;
+ union {
+ struct {
+ uint8_t is_multi_planar : 1;
+ uint8_t is_yuv420 : 1;
+ } bits;
+ uint8_t all;
+ } config;
+ uint8_t phantom_otg_inst;
+ uint8_t phantom_pipe_mask;
+ uint8_t pad0;
+ uint8_t phantom_plane_pipe_masks[DMUB_MAX_PHANTOM_PLANES]; // phantom pipe mask per plane (for flip passthrough)
+ uint8_t pad1[4 - (DMUB_MAX_PHANTOM_PLANES % 4)];
+}; //v1
+
+struct dmub_fams2_cmd_drr_stream_static_state {
+ uint16_t nom_stretched_vtotal;
+ uint16_t programming_delay_otg_vlines;
+ uint8_t only_stretch_if_required;
+ uint8_t pad[3];
+}; //v1
+
+union dmub_fams2_stream_static_sub_state {
+ struct dmub_fams2_legacy_stream_static_state legacy;
+ struct dmub_fams2_subvp_stream_static_state subvp;
+ struct dmub_fams2_drr_stream_static_state drr;
+}; //v0
+
+union dmub_fams2_cmd_stream_static_sub_state {
+ struct dmub_fams2_cmd_legacy_stream_static_state legacy;
+ struct dmub_fams2_cmd_subvp_stream_static_state subvp;
+ struct dmub_fams2_cmd_drr_stream_static_state drr;
+}; //v1
struct dmub_fams2_stream_static_state {
enum fams2_stream_type type;
@@ -1806,13 +2001,45 @@ struct dmub_fams2_stream_static_state {
uint8_t pipe_mask; // pipe mask for the whole config
uint8_t num_planes;
uint8_t plane_pipe_masks[DMUB_MAX_PLANES]; // pipe mask per plane (for flip passthrough)
- uint8_t pad[DMUB_MAX_PLANES % 4];
+ uint8_t pad[4 - (DMUB_MAX_PLANES % 4)];
+ union dmub_fams2_stream_static_sub_state sub_state;
+}; //v0
+
+struct dmub_fams2_cmd_stream_static_base_state {
+ enum fams2_stream_type type;
+ uint32_t otg_vline_time_ns;
+ uint32_t otg_vline_time_ticks;
+ uint16_t htotal;
+ uint16_t vtotal; // nominal vtotal
+ uint16_t vblank_start;
+ uint16_t vblank_end;
+ uint16_t max_vtotal;
+ uint16_t allow_start_otg_vline;
+ uint16_t allow_end_otg_vline;
+ uint16_t drr_keepout_otg_vline; // after this vline, vtotal cannot be changed
+ uint16_t scheduling_delay_otg_vlines; // min time to budget for ready to microschedule start
+ uint16_t contention_delay_otg_vlines; // time to budget for contention on execution
+ uint16_t vline_int_ack_delay_otg_vlines; // min time to budget for vertical interrupt firing
+ uint16_t allow_to_target_delay_otg_vlines; // time from allow vline to target vline
union {
- struct dmub_fams2_legacy_stream_static_state legacy;
- struct dmub_fams2_subvp_stream_static_state subvp;
- struct dmub_fams2_drr_stream_static_state drr;
- } sub_state;
-};
+ struct {
+ uint8_t is_drr : 1; // stream is DRR enabled
+ uint8_t clamp_vtotal_min : 1; // clamp vtotal to min instead of nominal
+ uint8_t min_ttu_vblank_usable : 1; // if min ttu vblank is above wm, no force pstate is needed in blank
+ } bits;
+ uint8_t all;
+ } config;
+ uint8_t otg_inst;
+ uint8_t pipe_mask; // pipe mask for the whole config
+ uint8_t num_planes;
+ uint8_t plane_pipe_masks[DMUB_MAX_PLANES]; // pipe mask per plane (for flip passthrough)
+ uint8_t pad[4 - (DMUB_MAX_PLANES % 4)];
+}; //v1
+
+struct dmub_fams2_stream_static_state_v1 {
+ struct dmub_fams2_cmd_stream_static_base_state base;
+ union dmub_fams2_cmd_stream_static_sub_state sub_state;
+}; //v1
/**
* enum dmub_fams2_allow_delay_check_mode - macroscheduler mode for breaking on excessive
@@ -1852,7 +2079,11 @@ struct dmub_cmd_fams2_global_config {
union dmub_cmd_fams2_config {
struct dmub_cmd_fams2_global_config global;
- struct dmub_fams2_stream_static_state stream;
+ struct dmub_fams2_stream_static_state stream; //v0
+ union {
+ struct dmub_fams2_cmd_stream_static_base_state base;
+ union dmub_fams2_cmd_stream_static_sub_state sub_state;
+ } stream_v1; //v1
};
/**
@@ -2097,7 +2328,7 @@ struct dmub_rb_cmd_dig1_dpia_control {
};
/**
- * SET_CONFIG Command Payload
+ * SET_CONFIG Command Payload (deprecated)
*/
struct set_config_cmd_payload {
uint8_t msg_type; /* set config message type */
@@ -2105,7 +2336,7 @@ struct set_config_cmd_payload {
};
/**
- * Data passed from driver to FW in a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command.
+ * Data passed from driver to FW in a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command. (deprecated)
*/
struct dmub_cmd_set_config_control_data {
struct set_config_cmd_payload cmd_pkt;
@@ -2114,6 +2345,17 @@ struct dmub_cmd_set_config_control_data {
};
/**
+ * SET_CONFIG Request Command Payload
+ */
+struct set_config_request_cmd_payload {
+ uint8_t instance; /* DPIA instance */
+ uint8_t immed_status; /* Immediate status returned in case of error */
+ uint8_t msg_type; /* set config message type */
+ uint8_t reserved;
+ uint32_t msg_data; /* set config message data */
+};
+
+/**
* DMUB command structure for SET_CONFIG command.
*/
struct dmub_rb_cmd_set_config_access {
@@ -2122,6 +2364,14 @@ struct dmub_rb_cmd_set_config_access {
};
/**
+ * DMUB command structure for SET_CONFIG request command.
+ */
+struct dmub_rb_cmd_set_config_request {
+ struct dmub_cmd_header header; /* header */
+ struct set_config_request_cmd_payload payload; /* set config request payload */
+};
+
+/**
* Data passed from driver to FW in a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command.
*/
struct dmub_cmd_mst_alloc_slots_control_data {
@@ -3455,6 +3705,8 @@ enum dmub_cmd_replay_general_subtype {
*/
REPLAY_GENERAL_CMD_DISABLED_ADAPTIVE_SYNC_SDP,
REPLAY_GENERAL_CMD_DISABLED_DESYNC_ERROR_DETECTION,
+ REPLAY_GENERAL_CMD_UPDATE_ERROR_STATUS,
+ REPLAY_GENERAL_CMD_SET_LOW_RR_ACTIVATE,
};
/**
@@ -4290,6 +4542,24 @@ struct dmub_rb_cmd_abm_set_pipe {
};
/**
+ * Type of backlight control method to be used by ABM module
+ */
+enum dmub_backlight_control_type {
+ /**
+ * PWM Backlight control
+ */
+ DMU_BACKLIGHT_CONTROL_PWM = 0,
+ /**
+ * VESA Aux-based backlight control
+ */
+ DMU_BACKLIGHT_CONTROL_VESA_AUX = 1,
+ /**
+ * AMD DPCD Aux-based backlight control
+ */
+ DMU_BACKLIGHT_CONTROL_AMD_AUX = 2,
+};
+
+/**
* Data passed from driver to FW in a DMUB_CMD__ABM_SET_BACKLIGHT command.
*/
struct dmub_cmd_abm_set_backlight_data {
@@ -4316,9 +4586,42 @@ struct dmub_cmd_abm_set_backlight_data {
uint8_t panel_mask;
/**
+ * AUX HW Instance.
+ */
+ uint8_t aux_inst;
+
+ /**
* Explicit padding to 4 byte boundary.
*/
- uint8_t pad[2];
+ uint8_t pad[1];
+
+ /**
+ * Backlight control type.
+ * Value 0 is PWM backlight control.
+ * Value 1 is VAUX backlight control.
+ * Value 2 is AMD DPCD AUX backlight control.
+ */
+ enum dmub_backlight_control_type backlight_control_type;
+
+ /**
+ * Minimum luminance in nits.
+ */
+ uint32_t min_luminance;
+
+ /**
+ * Maximum luminance in nits.
+ */
+ uint32_t max_luminance;
+
+ /**
+ * Minimum backlight in pwm.
+ */
+ uint32_t min_backlight_pwm;
+
+ /**
+ * Maximum backlight in pwm.
+ */
+ uint32_t max_backlight_pwm;
};
/**
@@ -5022,7 +5325,34 @@ struct dmub_rb_cmd_get_usbc_cable_id {
enum dmub_cmd_secure_display_type {
DMUB_CMD__SECURE_DISPLAY_TEST_CMD = 0, /* test command to only check if inbox message works */
DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE,
- DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY
+ DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY,
+ DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_STOP_UPDATE,
+ DMUB_CMD__SECURE_DISPLAY_MULTIPLE_CRC_WIN_NOTIFY
+};
+
+#define MAX_ROI_NUM 2
+
+struct dmub_cmd_roi_info {
+ uint16_t x_start;
+ uint16_t x_end;
+ uint16_t y_start;
+ uint16_t y_end;
+ uint8_t otg_id;
+ uint8_t phy_id;
+};
+
+struct dmub_cmd_roi_window_ctl {
+ uint16_t x_start;
+ uint16_t x_end;
+ uint16_t y_start;
+ uint16_t y_end;
+ bool enable;
+};
+
+struct dmub_cmd_roi_ctl_info {
+ uint8_t otg_id;
+ uint8_t phy_id;
+ struct dmub_cmd_roi_window_ctl roi_ctl[MAX_ROI_NUM];
};
/**
@@ -5033,14 +5363,8 @@ struct dmub_rb_cmd_secure_display {
/**
* Data passed from driver to dmub firmware.
*/
- struct dmub_cmd_roi_info {
- uint16_t x_start;
- uint16_t x_end;
- uint16_t y_start;
- uint16_t y_end;
- uint8_t otg_id;
- uint8_t phy_id;
- } roi_info;
+ struct dmub_cmd_roi_info roi_info;
+ struct dmub_cmd_roi_ctl_info mul_roi_ctl;
};
/**
@@ -5318,7 +5642,11 @@ union dmub_rb_cmd {
/**
* Definition of a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command.
*/
- struct dmub_rb_cmd_set_config_access set_config_access;
+ struct dmub_rb_cmd_set_config_access set_config_access; // (deprecated)
+ /**
+ * Definition of a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command.
+ */
+ struct dmub_rb_cmd_set_config_request set_config_request;
/**
* Definition of a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command.
*/
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 2ccad79053c5..e5e77bd3c31e 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -426,6 +426,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.ips_sequential_ono = params->ips_sequential_ono;
boot_options.bits.disable_sldo_opt = params->disable_sldo_opt;
boot_options.bits.enable_non_transparent_setconfig = params->enable_non_transparent_setconfig;
+ boot_options.bits.lower_hbr3_phy_ssc = params->lower_hbr3_phy_ssc;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}
@@ -463,7 +464,7 @@ uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub)
void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
{
- uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
+ uint32_t is_dmub_enabled, is_soft_reset;
uint32_t is_traceport_enabled, is_cw6_enabled;
if (!dmub || !diag_data)
@@ -513,9 +514,6 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
diag_data->is_dmcub_soft_reset = is_soft_reset;
- REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- diag_data->is_dmcub_secure_reset = is_sec_reset;
-
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
diag_data->is_traceport_en = is_traceport_enabled;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index db16066bc893..15ea216e903d 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -61,10 +61,6 @@
/* Default state size if meta is absent. */
#define DMUB_FW_STATE_SIZE (64 * 1024)
-/* Default tracebuffer size if meta is absent. */
-#define DMUB_TRACE_BUFFER_SIZE (64 * 1024)
-
-
/* Default scratch mem size. */
#define DMUB_SCRATCH_MEM_SIZE (1024)
@@ -497,6 +493,7 @@ enum dmub_status
const struct dmub_fw_meta_info *fw_info;
uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
+ uint32_t shared_state_size = DMUB_FW_HEADER_SHARED_STATE_SIZE;
uint32_t window_sizes[DMUB_WINDOW_TOTAL] = { 0 };
if (!dmub->sw_init)
@@ -514,6 +511,7 @@ enum dmub_status
fw_state_size = fw_info->fw_region_size;
trace_buffer_size = fw_info->trace_buffer_size;
+ shared_state_size = fw_info->shared_state_size;
/**
* If DM didn't fill in a version, then fill it in based on
@@ -534,7 +532,7 @@ enum dmub_status
window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size;
window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size;
window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE;
- window_sizes[DMUB_WINDOW_SHARED_STATE] = DMUB_FW_HEADER_SHARED_STATE_SIZE;
+ window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size);
out->fb_size =
dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB);
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index aee5170f5fb2..de8f3cfed6c8 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -164,18 +164,19 @@ enum dpcd_psr_sink_states {
PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7,
};
-#define DP_SOURCE_SEQUENCE 0x30c
-#define DP_SOURCE_TABLE_REVISION 0x310
-#define DP_SOURCE_PAYLOAD_SIZE 0x311
-#define DP_SOURCE_SINK_CAP 0x317
-#define DP_SOURCE_BACKLIGHT_LEVEL 0x320
-#define DP_SOURCE_BACKLIGHT_CURRENT_PEAK 0x326
-#define DP_SOURCE_BACKLIGHT_CONTROL 0x32E
-#define DP_SOURCE_BACKLIGHT_ENABLE 0x32F
-#define DP_SOURCE_MINIMUM_HBLANK_SUPPORTED 0x340
+#define DP_SOURCE_SEQUENCE 0x30C
+#define DP_SOURCE_TABLE_REVISION 0x310
+#define DP_SOURCE_PAYLOAD_SIZE 0x311
+#define DP_SOURCE_SINK_CAP 0x317
+#define DP_SOURCE_BACKLIGHT_LEVEL 0x320
+#define DP_SOURCE_BACKLIGHT_CURRENT_PEAK 0x326
+#define DP_SOURCE_BACKLIGHT_CONTROL 0x32E
+#define DP_SOURCE_BACKLIGHT_ENABLE 0x32F
+#define DP_SOURCE_MINIMUM_HBLANK_SUPPORTED 0x340
#define DP_SINK_PR_REPLAY_STATUS 0x378
#define DP_SINK_PR_PIXEL_DEVIATION_PER_LINE 0x379
#define DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE 0x37A
+#define DP_SINK_EMISSION_RATE 0x37E
/* Remove once drm_dp_helper.h is updated upstream */
#ifndef DP_TOTAL_LTTPR_CNT
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
index 02c23b04d34b..058f882d5bdd 100644
--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -52,10 +52,6 @@ void update_surface_trace(
void post_surface_trace(struct dc *dc);
-void context_timing_trace(
- struct dc *dc,
- struct resource_context *res_ctx);
-
void context_clock_trace(
struct dc *dc,
struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index a48d564d1660..4d68c1c6e210 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -61,11 +61,13 @@
#define DC_LOG_ALL_TF_CHANNELS(...) pr_debug("[GAMMA]:"__VA_ARGS__)
#define DC_LOG_DSC(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__)
-#define DC_LOG_MALL(...) pr_debug("[MALL]:"__VA_ARGS__)
#define DC_LOG_DWB(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_DP2(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__)
#define DC_LOG_AUTO_DPM_TEST(...) pr_debug("[AutoDPMTest]: "__VA_ARGS__)
#define DC_LOG_IPS(...) pr_debug("[IPS]: "__VA_ARGS__)
+#define DC_LOG_MALL(...) pr_debug("[MALL]:"__VA_ARGS__)
+#define DC_LOG_REGISTER_READ(...) pr_debug("[REGISTER_READ]: "__VA_ARGS__)
+#define DC_LOG_REGISTER_WRITE(...) pr_debug("[REGISTER_WRITE]: "__VA_ARGS__)
struct dc_log_buffer_ctx {
char *buf;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 3699e633801d..a71df052cf25 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1399,71 +1399,6 @@ static void scale_gamma_dx(struct pwl_float_data *pwl_rgb,
pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b);
}
-/* todo: all these scale_gamma functions are inherently the same but
- * take different structures as params or different format for ramp
- * values. We could probably implement it in a more generic fashion
- */
-static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb,
- const struct regamma_ramp *ramp,
- struct dividers dividers)
-{
- unsigned short max_driver = 0xFFFF;
- unsigned short max_os = 0xFF00;
- unsigned short scaler = max_os;
- uint32_t i;
- struct pwl_float_data *rgb = pwl_rgb;
- struct pwl_float_data *rgb_last = rgb + GAMMA_RGB_256_ENTRIES - 1;
-
- i = 0;
- do {
- if (ramp->gamma[i] > max_os ||
- ramp->gamma[i + 256] > max_os ||
- ramp->gamma[i + 512] > max_os) {
- scaler = max_driver;
- break;
- }
- i++;
- } while (i != GAMMA_RGB_256_ENTRIES);
-
- i = 0;
- do {
- rgb->r = dc_fixpt_from_fraction(
- ramp->gamma[i], scaler);
- rgb->g = dc_fixpt_from_fraction(
- ramp->gamma[i + 256], scaler);
- rgb->b = dc_fixpt_from_fraction(
- ramp->gamma[i + 512], scaler);
-
- ++rgb;
- ++i;
- } while (i != GAMMA_RGB_256_ENTRIES);
-
- rgb->r = dc_fixpt_mul(rgb_last->r,
- dividers.divider1);
- rgb->g = dc_fixpt_mul(rgb_last->g,
- dividers.divider1);
- rgb->b = dc_fixpt_mul(rgb_last->b,
- dividers.divider1);
-
- ++rgb;
-
- rgb->r = dc_fixpt_mul(rgb_last->r,
- dividers.divider2);
- rgb->g = dc_fixpt_mul(rgb_last->g,
- dividers.divider2);
- rgb->b = dc_fixpt_mul(rgb_last->b,
- dividers.divider2);
-
- ++rgb;
-
- rgb->r = dc_fixpt_mul(rgb_last->r,
- dividers.divider3);
- rgb->g = dc_fixpt_mul(rgb_last->g,
- dividers.divider3);
- rgb->b = dc_fixpt_mul(rgb_last->b,
- dividers.divider3);
-}
-
/*
* RS3+ color transform DDI - 1D LUT adjustment is composed with regamma here
* Input is evenly distributed in the output color space as specified in
@@ -1663,106 +1598,6 @@ static bool calculate_interpolated_hardware_curve(
return true;
}
-/* The "old" interpolation uses a complicated scheme to build an array of
- * coefficients while also using an array of 0-255 normalized to 0-1
- * Then there's another loop using both of the above + new scaled user ramp
- * and we concatenate them. It also searches for points of interpolation and
- * uses enums for positions.
- *
- * This function uses a different approach:
- * user ramp is always applied on X with 0/255, 1/255, 2/255, ..., 255/255
- * To find index for hwX , we notice the following:
- * i/255 <= hwX < (i+1)/255 <=> i <= 255*hwX < i+1
- * See apply_lut_1d which is the same principle, but on 4K entry 1D LUT
- *
- * Once the index is known, combined Y is simply:
- * user_ramp(index) + (hwX-index/255)*(user_ramp(index+1) - user_ramp(index)
- *
- * We should switch to this method in all cases, it's simpler and faster
- * ToDo one day - for now this only applies to ADL regamma to avoid regression
- * for regular use cases (sRGB and PQ)
- */
-static void interpolate_user_regamma(uint32_t hw_points_num,
- struct pwl_float_data *rgb_user,
- bool apply_degamma,
- struct dc_transfer_func_distributed_points *tf_pts)
-{
- uint32_t i;
- uint32_t color = 0;
- int32_t index;
- int32_t index_next;
- struct fixed31_32 *tf_point;
- struct fixed31_32 hw_x;
- struct fixed31_32 norm_factor =
- dc_fixpt_from_int(255);
- struct fixed31_32 norm_x;
- struct fixed31_32 index_f;
- struct fixed31_32 lut1;
- struct fixed31_32 lut2;
- struct fixed31_32 delta_lut;
- struct fixed31_32 delta_index;
- const struct fixed31_32 one = dc_fixpt_from_int(1);
-
- i = 0;
- /* fixed_pt library has problems handling too small values */
- while (i != 32) {
- tf_pts->red[i] = dc_fixpt_zero;
- tf_pts->green[i] = dc_fixpt_zero;
- tf_pts->blue[i] = dc_fixpt_zero;
- ++i;
- }
- while (i <= hw_points_num + 1) {
- for (color = 0; color < 3; color++) {
- if (color == 0)
- tf_point = &tf_pts->red[i];
- else if (color == 1)
- tf_point = &tf_pts->green[i];
- else
- tf_point = &tf_pts->blue[i];
-
- if (apply_degamma) {
- if (color == 0)
- hw_x = coordinates_x[i].regamma_y_red;
- else if (color == 1)
- hw_x = coordinates_x[i].regamma_y_green;
- else
- hw_x = coordinates_x[i].regamma_y_blue;
- } else
- hw_x = coordinates_x[i].x;
-
- if (dc_fixpt_le(one, hw_x))
- hw_x = one;
-
- norm_x = dc_fixpt_mul(norm_factor, hw_x);
- index = dc_fixpt_floor(norm_x);
- if (index < 0 || index > 255)
- continue;
-
- index_f = dc_fixpt_from_int(index);
- index_next = (index == 255) ? index : index + 1;
-
- if (color == 0) {
- lut1 = rgb_user[index].r;
- lut2 = rgb_user[index_next].r;
- } else if (color == 1) {
- lut1 = rgb_user[index].g;
- lut2 = rgb_user[index_next].g;
- } else {
- lut1 = rgb_user[index].b;
- lut2 = rgb_user[index_next].b;
- }
-
- // we have everything now, so interpolate
- delta_lut = dc_fixpt_sub(lut2, lut1);
- delta_index = dc_fixpt_sub(norm_x, index_f);
-
- *tf_point = dc_fixpt_add(lut1,
- dc_fixpt_mul(delta_index, delta_lut));
- }
- ++i;
- }
-}
-
static void build_new_custom_resulted_curve(
uint32_t hw_points_num,
struct dc_transfer_func_distributed_points *tf_pts)
@@ -1784,29 +1619,6 @@ static void build_new_custom_resulted_curve(
}
}
-static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma,
- uint32_t hw_points_num, struct calculate_buffer *cal_buffer)
-{
- uint32_t i;
-
- struct gamma_coefficients coeff;
- struct pwl_float_data_ex *rgb = rgb_regamma;
- const struct hw_x_point *coord_x = coordinates_x;
-
- build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB);
-
- i = 0;
- while (i != hw_points_num + 1) {
- rgb->r = translate_from_linear_space_ex(
- coord_x->x, &coeff, 0, cal_buffer);
- rgb->g = rgb->r;
- rgb->b = rgb->r;
- ++coord_x;
- ++rgb;
- ++i;
- }
-}
-
static bool map_regamma_hw_to_x_user(
const struct dc_gamma *ramp,
struct pixel_gamma_point *coeff128,
@@ -1855,125 +1667,6 @@ static bool map_regamma_hw_to_x_user(
#define _EXTRA_POINTS 3
-bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
- const struct regamma_lut *regamma,
- struct calculate_buffer *cal_buffer,
- const struct dc_gamma *ramp)
-{
- struct gamma_coefficients coeff;
- const struct hw_x_point *coord_x = coordinates_x;
- uint32_t i = 0;
-
- do {
- coeff.a0[i] = dc_fixpt_from_fraction(
- regamma->coeff.A0[i], 10000000);
- coeff.a1[i] = dc_fixpt_from_fraction(
- regamma->coeff.A1[i], 1000);
- coeff.a2[i] = dc_fixpt_from_fraction(
- regamma->coeff.A2[i], 1000);
- coeff.a3[i] = dc_fixpt_from_fraction(
- regamma->coeff.A3[i], 1000);
- coeff.user_gamma[i] = dc_fixpt_from_fraction(
- regamma->coeff.gamma[i], 1000);
-
- ++i;
- } while (i != 3);
-
- i = 0;
- /* fixed_pt library has problems handling too small values */
- while (i != 32) {
- output_tf->tf_pts.red[i] = dc_fixpt_zero;
- output_tf->tf_pts.green[i] = dc_fixpt_zero;
- output_tf->tf_pts.blue[i] = dc_fixpt_zero;
- ++coord_x;
- ++i;
- }
- while (i != MAX_HW_POINTS + 1) {
- output_tf->tf_pts.red[i] = translate_from_linear_space_ex(
- coord_x->x, &coeff, 0, cal_buffer);
- output_tf->tf_pts.green[i] = translate_from_linear_space_ex(
- coord_x->x, &coeff, 1, cal_buffer);
- output_tf->tf_pts.blue[i] = translate_from_linear_space_ex(
- coord_x->x, &coeff, 2, cal_buffer);
- ++coord_x;
- ++i;
- }
-
- if (ramp && ramp->type == GAMMA_CS_TFM_1D)
- apply_lut_1d(ramp, MAX_HW_POINTS, &output_tf->tf_pts);
-
- // this function just clamps output to 0-1
- build_new_custom_resulted_curve(MAX_HW_POINTS, &output_tf->tf_pts);
- output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
-
- return true;
-}
-
-bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
- const struct regamma_lut *regamma,
- struct calculate_buffer *cal_buffer,
- const struct dc_gamma *ramp)
-{
- struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
- struct dividers dividers;
-
- struct pwl_float_data *rgb_user = NULL;
- struct pwl_float_data_ex *rgb_regamma = NULL;
- bool ret = false;
-
- if (regamma == NULL)
- return false;
-
- output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
-
- rgb_user = kcalloc(GAMMA_RGB_256_ENTRIES + _EXTRA_POINTS,
- sizeof(*rgb_user),
- GFP_KERNEL);
- if (!rgb_user)
- goto rgb_user_alloc_fail;
-
- rgb_regamma = kcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
- sizeof(*rgb_regamma),
- GFP_KERNEL);
- if (!rgb_regamma)
- goto rgb_regamma_alloc_fail;
-
- dividers.divider1 = dc_fixpt_from_fraction(3, 2);
- dividers.divider2 = dc_fixpt_from_int(2);
- dividers.divider3 = dc_fixpt_from_fraction(5, 2);
-
- scale_user_regamma_ramp(rgb_user, &regamma->ramp, dividers);
-
- if (regamma->flags.bits.applyDegamma == 1) {
- apply_degamma_for_user_regamma(rgb_regamma, MAX_HW_POINTS, cal_buffer);
- copy_rgb_regamma_to_coordinates_x(coordinates_x,
- MAX_HW_POINTS, rgb_regamma);
- }
-
- interpolate_user_regamma(MAX_HW_POINTS, rgb_user,
- regamma->flags.bits.applyDegamma, tf_pts);
-
- // no custom HDR curves!
- tf_pts->end_exponent = 0;
- tf_pts->x_point_at_y1_red = 1;
- tf_pts->x_point_at_y1_green = 1;
- tf_pts->x_point_at_y1_blue = 1;
-
- if (ramp && ramp->type == GAMMA_CS_TFM_1D)
- apply_lut_1d(ramp, MAX_HW_POINTS, &output_tf->tf_pts);
-
- // this function just clamps output to 0-1
- build_new_custom_resulted_curve(MAX_HW_POINTS, tf_pts);
-
- ret = true;
-
- kfree(rgb_regamma);
-rgb_regamma_alloc_fail:
- kfree(rgb_user);
-rgb_user_alloc_fail:
- return ret;
-}
-
bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
struct dc_transfer_func *input_tf,
const struct dc_gamma *ramp, bool map_user_ramp)
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index ee5c466613de..97e55278940e 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -115,15 +115,4 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
struct dc_transfer_func *output_tf,
const struct dc_gamma *ramp, bool mapUserRamp);
-bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
- const struct regamma_lut *regamma,
- struct calculate_buffer *cal_buffer,
- const struct dc_gamma *ramp);
-
-bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
- const struct regamma_lut *regamma,
- struct calculate_buffer *cal_buffer,
- const struct dc_gamma *ramp);
-
-
#endif /* COLOR_MOD_COLOR_GAMMA_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index bbd259cea4f4..2b3964529539 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -48,6 +48,7 @@
#define VSYNCS_BETWEEN_FLIP_THRESHOLD 2
#define FREESYNC_CONSEC_FLIP_AFTER_VSYNC 5
#define FREESYNC_VSYNC_TO_FLIP_DELTA_IN_US 500
+#define MICRO_HZ_TO_HZ(x) (x / 1000000)
struct core_freesync {
struct mod_freesync public;
@@ -121,6 +122,17 @@ static unsigned int calc_duration_in_us_from_v_total(
return duration_in_us;
}
+static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
+{
+ unsigned int max_hw_v_total = stream->ctx->dc->caps.max_v_total;
+
+ if (stream->ctx->dc->caps.vtotal_limited_by_fp2) {
+ max_hw_v_total -= stream->timing.v_front_porch + 1;
+ }
+
+ return max_hw_v_total;
+}
+
unsigned int mod_freesync_calc_v_total_from_refresh(
const struct dc_stream_state *stream,
unsigned int refresh_in_uhz)
@@ -128,13 +140,26 @@ unsigned int mod_freesync_calc_v_total_from_refresh(
unsigned int v_total;
unsigned int frame_duration_in_ns;
+ if (refresh_in_uhz == 0)
+ return stream->timing.v_total;
+
frame_duration_in_ns =
((unsigned int)(div64_u64((1000000000ULL * 1000000),
refresh_in_uhz)));
- v_total = div64_u64(div64_u64(((unsigned long long)(
- frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
- stream->timing.h_total) + 500000, 1000000);
+ if (MICRO_HZ_TO_HZ(refresh_in_uhz) <= stream->timing.min_refresh_in_uhz) {
+ /* When the target refresh rate is the minimum panel refresh rate,
+ * round down the vtotal value to avoid stretching vblank over
+ * panel's vtotal boundary.
+ */
+ v_total = div64_u64(div64_u64(((unsigned long long)(
+ frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
+ stream->timing.h_total), 1000000);
+ } else {
+ v_total = div64_u64(div64_u64(((unsigned long long)(
+ frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)),
+ stream->timing.h_total) + 500000, 1000000);
+ }
/* v_total cannot be less than nominal */
if (v_total < stream->timing.v_total) {
@@ -1002,7 +1027,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
if (stream->ctx->dc->caps.max_v_total != 0 && stream->timing.h_total != 0) {
min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL),
- (stream->timing.h_total * (long long)stream->ctx->dc->caps.max_v_total));
+ (stream->timing.h_total * (long long)calc_max_hardware_v_total(stream)));
}
/* Limit minimum refresh rate to what can be supported by hardware */
min_refresh_in_uhz = min_hardware_refresh_in_uhz > in_config->min_refresh_in_uhz ?
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index c996365e84b0..1d41dd58f6bc 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -27,6 +27,11 @@
#include "hdcp.h"
+static inline uint16_t get_hdmi_rxstatus_msg_size(const uint8_t rxstatus[2])
+{
+ return HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(rxstatus[1]) << 8 | rxstatus[0];
+}
+
static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp *hdcp)
{
uint8_t is_ready = 0;
@@ -35,8 +40,7 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp
is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus_dp) ? 1 : 0;
else
is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[1]) &&
- (HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
- hdcp->auth.msg.hdcp2.rxstatus[0])) ? 1 : 0;
+ get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus) != 0) ? 1 : 0;
return is_ready ? MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY;
}
@@ -84,15 +88,13 @@ static inline enum mod_hdcp_status check_link_integrity_failure_dp(
static enum mod_hdcp_status check_ake_cert_available(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
- uint16_t size;
if (is_dp_hdcp(hdcp)) {
status = MOD_HDCP_STATUS_SUCCESS;
} else {
status = mod_hdcp_read_rxstatus(hdcp);
if (status == MOD_HDCP_STATUS_SUCCESS) {
- size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
- hdcp->auth.msg.hdcp2.rxstatus[0];
+ const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus);
status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_cert)) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING;
@@ -104,7 +106,6 @@ static enum mod_hdcp_status check_ake_cert_available(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_h_prime_available(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
- uint8_t size;
status = mod_hdcp_read_rxstatus(hdcp);
if (status != MOD_HDCP_STATUS_SUCCESS)
@@ -115,8 +116,7 @@ static enum mod_hdcp_status check_h_prime_available(struct mod_hdcp *hdcp)
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING;
} else {
- size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
- hdcp->auth.msg.hdcp2.rxstatus[0];
+ const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus);
status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING;
@@ -128,7 +128,6 @@ out:
static enum mod_hdcp_status check_pairing_info_available(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
- uint8_t size;
status = mod_hdcp_read_rxstatus(hdcp);
if (status != MOD_HDCP_STATUS_SUCCESS)
@@ -139,8 +138,7 @@ static enum mod_hdcp_status check_pairing_info_available(struct mod_hdcp *hdcp)
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING;
} else {
- size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
- hdcp->auth.msg.hdcp2.rxstatus[0];
+ const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus);
status = (size == sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING;
@@ -152,7 +150,6 @@ out:
static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_FAILURE;
- uint8_t size;
uint16_t max_wait = 20; // units of ms
uint16_t num_polls = 5;
uint16_t wait_time = max_wait / num_polls;
@@ -167,8 +164,7 @@ static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp)
if (status != MOD_HDCP_STATUS_SUCCESS)
break;
- size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
- hdcp->auth.msg.hdcp2.rxstatus[0];
+ const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus);
status = (size == sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING;
@@ -181,7 +177,6 @@ static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_stream_ready_available(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
- uint8_t size;
if (is_dp_hdcp(hdcp)) {
status = MOD_HDCP_STATUS_INVALID_OPERATION;
@@ -189,8 +184,7 @@ static enum mod_hdcp_status check_stream_ready_available(struct mod_hdcp *hdcp)
status = mod_hdcp_read_rxstatus(hdcp);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
- size = HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
- hdcp->auth.msg.hdcp2.rxstatus[0];
+ const uint16_t size = get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus);
status = (size == sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING;
@@ -249,8 +243,7 @@ static uint8_t process_rxstatus(struct mod_hdcp *hdcp,
sizeof(hdcp->auth.msg.hdcp2.rx_id_list);
else
hdcp->auth.msg.hdcp2.rx_id_list_size =
- HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
- hdcp->auth.msg.hdcp2.rxstatus[0];
+ get_hdmi_rxstatus_msg_size(hdcp->auth.msg.hdcp2.rxstatus);
}
out:
return (*status == MOD_HDCP_STATUS_SUCCESS);
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 3cd52e7a9c77..29ccd3532d13 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -841,6 +841,8 @@ bool is_psr_su_specific_panel(struct dc_link *link)
isPSRSUSupported = false;
else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
isPSRSUSupported = false;
+ else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x01)
+ isPSRSUSupported = false;
else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
isPSRSUSupported = true;
}
@@ -994,9 +996,9 @@ void set_replay_coasting_vtotal(struct dc_link *link,
link->replay_settings.coasting_vtotal_table[type] = vtotal;
}
-void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal)
+void set_replay_low_rr_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal)
{
- link->replay_settings.abm_with_ips_on_full_screen_video_pseudo_vtotal = vtotal;
+ link->replay_settings.low_rr_full_screen_video_pseudo_vtotal = vtotal;
}
void calculate_replay_link_off_frame_count(struct dc_link *link,
@@ -1037,3 +1039,8 @@ bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_back
memcpy(caps->data_points, custom_backlight_profiles[config_no].data_points, data_points_size);
return true;
}
+
+void reset_replay_dsync_error_count(struct dc_link *link)
+{
+ link->replay_settings.replay_desync_error_fail_count = 0;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index cac302e8fa10..758a8aa31fbe 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -62,7 +62,7 @@ void set_replay_defer_update_coasting_vtotal(struct dc_link *link,
uint32_t vtotal);
void update_replay_coasting_vtotal_from_defer(struct dc_link *link,
enum replay_coasting_vtotal_type type);
-void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
+void set_replay_low_rr_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
void calculate_replay_link_off_frame_count(struct dc_link *link,
uint16_t vtotal, uint16_t htotal);
@@ -78,4 +78,5 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
bool fill_custom_backlight_caps(unsigned int config_no,
struct dm_acpi_atif_backlight_caps *caps);
+void reset_replay_dsync_error_count(struct dc_link *link);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h
index a1ece3eecdf5..a08611cb8041 100644
--- a/drivers/gpu/drm/amd/include/amd_pcie.h
+++ b/drivers/gpu/drm/amd/include/amd_pcie.h
@@ -49,6 +49,17 @@
| CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
/* Following flags shows PCIe lane width switch supported in driver which are decided by chipset and ASIC */
+
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1 0x00000001
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 0x00000002
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 0x00000004
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 0x00000008
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 0x00000010
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 0x00000020
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 0x00000040
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_MASK 0x0000FFFF
+#define CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_SHIFT 0
+
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 0x00010000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 0x00020000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 0x00040000
@@ -56,6 +67,7 @@
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 0x00100000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 0x00200000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 0x00400000
+#define CAIL_PCIE_LINK_WIDTH_SUPPORT_MASK 0xFFFF0000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT 16
/* 1/2/4/8/16 lanes */
@@ -65,4 +77,10 @@
| CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 \
| CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+#define AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1 \
+ | CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 \
+ | CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 \
+ | CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 \
+ | CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16)
+
#endif
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 3f91926a50e9..6dccee403a3d 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -28,6 +28,8 @@
#define AMD_MAX_USEC_TIMEOUT 1000000 /* 1000 ms */
+struct amdgpu_ip_block;
+
/*
* Chip flags
@@ -337,6 +339,21 @@ enum DC_DEBUG_MASK {
* @DC_FORCE_IPS_ENABLE: If set, force enable all IPS, all the time.
*/
DC_FORCE_IPS_ENABLE = 0x4000,
+ /**
+ * @DC_DISABLE_ACPI_EDID: If set, don't attempt to fetch EDID for
+ * eDP display from ACPI _DDC method.
+ */
+ DC_DISABLE_ACPI_EDID = 0x8000,
+
+ /**
+ * @DC_DISABLE_HDMI_CEC: If set, disable HDMI-CEC feature in amdgpu driver.
+ */
+ DC_DISABLE_HDMI_CEC = 0x10000,
+
+ /**
+ * @DC_DISABLE_SUBVP: If set, disable DCN Sub-Viewport feature in amdgpu driver.
+ */
+ DC_DISABLE_SUBVP = 0x20000,
};
enum amd_dpm_forced_level;
@@ -377,30 +394,30 @@ enum amd_dpm_forced_level;
*/
struct amd_ip_funcs {
char *name;
- int (*early_init)(void *handle);
- int (*late_init)(void *handle);
- int (*sw_init)(void *handle);
- int (*sw_fini)(void *handle);
- int (*early_fini)(void *handle);
- int (*hw_init)(void *handle);
- int (*hw_fini)(void *handle);
- void (*late_fini)(void *handle);
- int (*prepare_suspend)(void *handle);
- int (*suspend)(void *handle);
- int (*resume)(void *handle);
+ int (*early_init)(struct amdgpu_ip_block *ip_block);
+ int (*late_init)(struct amdgpu_ip_block *ip_block);
+ int (*sw_init)(struct amdgpu_ip_block *ip_block);
+ int (*sw_fini)(struct amdgpu_ip_block *ip_block);
+ int (*early_fini)(struct amdgpu_ip_block *ip_block);
+ int (*hw_init)(struct amdgpu_ip_block *ip_block);
+ int (*hw_fini)(struct amdgpu_ip_block *ip_block);
+ void (*late_fini)(struct amdgpu_ip_block *ip_block);
+ int (*prepare_suspend)(struct amdgpu_ip_block *ip_block);
+ int (*suspend)(struct amdgpu_ip_block *ip_block);
+ int (*resume)(struct amdgpu_ip_block *ip_block);
bool (*is_idle)(void *handle);
- int (*wait_for_idle)(void *handle);
- bool (*check_soft_reset)(void *handle);
- int (*pre_soft_reset)(void *handle);
- int (*soft_reset)(void *handle);
- int (*post_soft_reset)(void *handle);
- int (*set_clockgating_state)(void *handle,
+ int (*wait_for_idle)(struct amdgpu_ip_block *ip_block);
+ bool (*check_soft_reset)(struct amdgpu_ip_block *ip_block);
+ int (*pre_soft_reset)(struct amdgpu_ip_block *ip_block);
+ int (*soft_reset)(struct amdgpu_ip_block *ip_block);
+ int (*post_soft_reset)(struct amdgpu_ip_block *ip_block);
+ int (*set_clockgating_state)(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state);
- int (*set_powergating_state)(void *handle,
+ int (*set_powergating_state)(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state);
void (*get_clockgating_state)(void *handle, u64 *flags);
- void (*dump_ip_state)(void *handle);
- void (*print_ip_state)(void *handle, struct drm_printer *p);
+ void (*dump_ip_state)(struct amdgpu_ip_block *ip_block);
+ void (*print_ip_state)(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_offset.h
index cae1a7e74323..73c5dd5e83d4 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_offset.h
@@ -19,8 +19,8 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#ifndef _dcn_2_0_3_OFFSET_HEADER
-#define _dcn_2_0_3_OFFSET_HEADER
+#ifndef _dcn_2_0_1_OFFSET_HEADER
+#define _dcn_2_0_1_OFFSET_HEADER
// addressBlock: dce_dc_dccg_dccg_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_sh_mask.h
index ca1e1eb39256..290d807800a6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_sh_mask.h
@@ -18,8 +18,8 @@
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#ifndef _dcn_2_0_3_SH_MASK_HEADER
-#define _dcn_2_0_3_SH_MASK_HEADER
+#ifndef _dcn_2_0_1_SH_MASK_HEADER
+#define _dcn_2_0_1_SH_MASK_HEADER
// addressBlock: dce_dc_dccg_dccg_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h
index f42a276499cd..5d9d5fea6e06 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h
@@ -6199,10 +6199,12 @@
#define DCHUBBUB_CTRL_STATUS__ROB_UNDERFLOW_STATUS__SHIFT 0x1
#define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_STATUS__SHIFT 0x2
#define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_CLEAR__SHIFT 0x3
+#define DCHUBBUB_CTRL_STATUS__DCHUBBUB_HW_DEBUG__SHIFT 0x4
#define DCHUBBUB_CTRL_STATUS__CSTATE_SWATH_CHK_GOOD_MODE__SHIFT 0x1f
#define DCHUBBUB_CTRL_STATUS__ROB_UNDERFLOW_STATUS_MASK 0x00000002L
#define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_STATUS_MASK 0x00000004L
#define DCHUBBUB_CTRL_STATUS__ROB_OVERFLOW_CLEAR_MASK 0x00000008L
+#define DCHUBBUB_CTRL_STATUS__DCHUBBUB_HW_DEBUG_MASK 0x3FFFFFF0L
#define DCHUBBUB_CTRL_STATUS__CSTATE_SWATH_CHK_GOOD_MODE_MASK 0x80000000L
//DCHUBBUB_TIMEOUT_DETECTION_CTRL1
#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_ERROR_STATUS__SHIFT 0x0
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h
index 2c3ce243861a..380e44230bda 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h
@@ -1232,6 +1232,29 @@
#define mmMC_VM_MX_L1_PERFCOUNTER_HI 0x059d
#define mmMC_VM_MX_L1_PERFCOUNTER_HI_BASE_IDX 0
+// Stand Alone Walker Registers
+#define VMC_TAP_PDE_REQUEST_SNOOP_OFFSET 8
+#define VMC_TAP_PTE_REQUEST_SNOOP_OFFSET 11
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x0606
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x0607
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x0608
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x0609
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x060a
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x060b
+#define mmVM_L2_SAW_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define mmVM_L2_SAW_CONTEXT0_CNTL 0x0604
+#define mmVM_L2_SAW_CONTEXT0_CNTL_BASE_IDX 0
+#define CONTEXT0_CNTL_ENABLE_OFFSET 0
+#define CONTEXT0_CNTL_PAGE_TABLE_DEPTH_OFFSET 1
+#define mmVM_L2_SAW_CONTEXTS_DISABLE 0x060c
+#define mmVM_L2_SAW_CONTEXTS_DISABLE_BASE_IDX 0
+#define mmVM_L2_SAW_CNTL4 0x0603
+#define mmVM_L2_SAW_CNTL4_BASE_IDX 0
// addressBlock: mmhub_utcl2_atcl2dec
// base address: 0x69900
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
index 5ebe4cb40f9d..c38a01742d6f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
@@ -7571,6 +7571,8 @@
// base address: 0x10100000
#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0 0xd000
#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_BASE_IDX 5
+#define regRCC_DEV0_EPF5_STRAP4 0xd284
+#define regRCC_DEV0_EPF5_STRAP4_BASE_IDX 5
// addressBlock: nbio_nbif0_bif_rst_bif_rst_regblk
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h
index eb8c556d9c93..3b96f1e5a180 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h
@@ -50665,6 +50665,19 @@
#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D1_SUPPORT_DEV0_F0_MASK 0x40000000L
#define RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_D2_SUPPORT_DEV0_F0_MASK 0x80000000L
+//RCC_DEV0_EPF5_STRAP4
+#define RCC_DEV0_EPF5_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F5__SHIFT 0x14
+#define RCC_DEV0_EPF5_STRAP4__STRAP_ATOMIC_EN_DEV0_F5__SHIFT 0x15
+#define RCC_DEV0_EPF5_STRAP4__STRAP_FLR_EN_DEV0_F5__SHIFT 0x16
+#define RCC_DEV0_EPF5_STRAP4__STRAP_PME_SUPPORT_DEV0_F5__SHIFT 0x17
+#define RCC_DEV0_EPF5_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F5__SHIFT 0x1c
+#define RCC_DEV0_EPF5_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F5__SHIFT 0x1f
+#define RCC_DEV0_EPF5_STRAP4__STRAP_ATOMIC_64BIT_EN_DEV0_F5_MASK 0x00100000L
+#define RCC_DEV0_EPF5_STRAP4__STRAP_ATOMIC_EN_DEV0_F5_MASK 0x00200000L
+#define RCC_DEV0_EPF5_STRAP4__STRAP_FLR_EN_DEV0_F5_MASK 0x00400000L
+#define RCC_DEV0_EPF5_STRAP4__STRAP_PME_SUPPORT_DEV0_F5_MASK 0x0F800000L
+#define RCC_DEV0_EPF5_STRAP4__STRAP_INTERRUPT_PIN_DEV0_F5_MASK 0x70000000L
+#define RCC_DEV0_EPF5_STRAP4__STRAP_AUXPWR_SUPPORT_DEV0_F5_MASK 0x80000000L
// addressBlock: nbio_nbif0_bif_rst_bif_rst_regblk
//HARD_RST_CTRL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_offset.h
new file mode 100644
index 000000000000..0e8f12728d5f
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_offset.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _umc_8_14_0_OFFSET_HEADER
+#define _umc_8_14_0_OFFSET_HEADER
+
+#define regUMCCH0_GeccErrCntSel 0x0328
+#define regUMCCH0_GeccErrCntSel_BASE_IDX 0
+#define regUMCCH0_GeccErrCnt 0x0329
+#define regUMCCH0_GeccErrCnt_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_sh_mask.h
new file mode 100644
index 000000000000..5d723b5d9b87
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_sh_mask.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _umc_8_14_0_SH_MASK_HEADER
+#define _umc_8_14_0_SH_MASK_HEADER
+
+//UMCCH0_GeccErrCntSel
+#define UMCCH0_GeccErrCntSel__GeccErrInt__SHIFT 0xc
+#define UMCCH0_GeccErrCntSel__GeccErrCntEn__SHIFT 0xf
+#define UMCCH0_GeccErrCntSel__PoisonCntEn__SHIFT 0x10
+#define UMCCH0_GeccErrCntSel__GeccErrInt_MASK 0x00003000L
+#define UMCCH0_GeccErrCntSel__GeccErrCntEn_MASK 0x00008000L
+#define UMCCH0_GeccErrCntSel__PoisonCntEn_MASK 0x00030000L
+//UMCCH0_GeccErrCnt
+#define UMCCH0_GeccErrCnt__GeccErrCnt__SHIFT 0x0
+#define UMCCH0_GeccErrCnt__GeccUnCorrErrCnt__SHIFT 0x10
+#define UMCCH0_GeccErrCnt__GeccErrCnt_MASK 0x0000FFFFL
+#define UMCCH0_GeccErrCnt__GeccUnCorrErrCnt_MASK 0xFFFF0000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index b0fc22383e28..0160d65f3f5e 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1300,12 +1300,17 @@ struct atom_ext_display_path
//usCaps
enum ext_display_path_cap_def {
- EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE = 0x0001,
- EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN = 0x0002,
- EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK = 0x007C,
- EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 = (0x01 << 2), //PI redriver chip
- EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT = (0x02 << 2), //TI retimer chip
- EXT_DISPLAY_PATH_CAPS__HDMI20_PARADE_PS175 = (0x03 << 2) //Parade DP->HDMI recoverter chip
+ EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK = 0x007E,
+ AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK = 0x007E,
+ AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN = (0x01 << 1),
+ AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 = (0x02 << 1),
+ AMD_EXT_DISPLAY_PATH_CAPS__DP_EARLY_8B10B_TPS2 = (0x03 << 1),
+ AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT = (0x04 << 1),
+ AMD_EXT_DISPLAY_PATH_CAPS__HDMI20_PARADE_PS175 = (0x06 << 1),
+ EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN = (0x07 << 1),
+ EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 = (0x08 << 1), //PI redriver chip
+ EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT = (0x09 << 1), //TI retimer chip
+ EXT_DISPLAY_PATH_CAPS__AMD_INTERNAL = (0x0a << 1), //AMD internal customer chip placeholder
};
struct atom_external_display_connection_info
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
new file mode 100644
index 000000000000..64b553e7de1a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __IRQSRCS_VCN_5_0_H__
+#define __IRQSRCS_VCN_5_0_H__
+
+#define VCN_5_0__SRCID__UVD_TRAP 114 // 0x72 UVD_TRAP
+#define VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE 119 // 0x77 Encoder General Purpose
+#define VCN_5_0__SRCID__UVD_ENC_LOW_LATENCY 120 // 0x78 Encoder Low Latency
+#define VCN_5_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT 124 // 0x7c UVD system message interrupt
+#define VCN_5_0__SRCID__JPEG_ENCODE 151 // 0x97 JRBC Encode interrupt
+#define VCN_5_0__SRCID__JPEG_DECODE 153 // 0x99 JRBC Decode interrupt
+#define VCN_5_0__SRCID__JPEG1_DECODE 149 // 0x95 JRBC1 Decode interrupt
+#define VCN_5_0__SRCID__JPEG2_DECODE 151 // 0x97 JRBC2 Decode interrupt
+#define VCN_5_0__SRCID__JPEG3_DECODE 171 // 0xab JRBC3 Decode interrupt
+#define VCN_5_0__SRCID__JPEG4_DECODE 172 // 0xac JRBC4 Decode interrupt
+#define VCN_5_0__SRCID__JPEG5_DECODE 173 // 0xad JRBC5 Decode interrupt
+#define VCN_5_0__SRCID__JPEG6_DECODE 174 // 0xae JRBC6 Decode interrupt
+#define VCN_5_0__SRCID__JPEG7_DECODE 175 // 0xaf JRBC7 Decode interrupt
+#define VCN_5_0__SRCID__JPEG8_DECODE 177 // 0xb1 JRBC8 Decode interrupt
+#define VCN_5_0__SRCID__JPEG9_DECODE 178 // 0xb2 JRBC9 Decode interrupt
+
+#define VCN_5_0__SRCID_UVD_POISON 160
+#define VCN_5_0__SRCID_DJPEG0_POISON 161
+#define VCN_5_0__SRCID_EJPEG0_POISON 162
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 19a48d98830a..9189dcb65188 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -119,6 +119,8 @@ enum pp_clock_type {
OD_ACOUSTIC_TARGET,
OD_FAN_TARGET_TEMPERATURE,
OD_FAN_MINIMUM_PWM,
+ OD_FAN_ZERO_RPM_ENABLE,
+ OD_FAN_ZERO_RPM_STOP_TEMP,
};
enum amd_pp_sensors {
@@ -162,6 +164,7 @@ enum amd_pp_task {
};
enum PP_SMC_POWER_PROFILE {
+ PP_SMC_POWER_PROFILE_UNKNOWN = -1,
PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT = 0x0,
PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x1,
PP_SMC_POWER_PROFILE_POWERSAVING = 0x2,
@@ -199,6 +202,8 @@ enum PP_OD_DPM_TABLE_COMMAND {
PP_OD_EDIT_ACOUSTIC_TARGET,
PP_OD_EDIT_FAN_TARGET_TEMPERATURE,
PP_OD_EDIT_FAN_MINIMUM_PWM,
+ PP_OD_EDIT_FAN_ZERO_RPM_ENABLE,
+ PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP,
};
struct pp_states_info {
@@ -336,7 +341,8 @@ enum pp_policy_soc_pstate {
#define MAX_CLKS 4
#define NUM_VCN 4
#define NUM_JPEG_ENG 32
-
+#define MAX_XCC 8
+#define NUM_XCP 8
struct seq_file;
enum amd_pp_clock_type;
struct amd_pp_simple_clock_info;
@@ -350,6 +356,26 @@ struct pp_smu_wm_range_sets;
struct pp_smu_nv_clock_table;
struct dpm_clocks;
+struct amdgpu_xcp_metrics {
+ /* Utilization Instantaneous (%) */
+ uint32_t gfx_busy_inst[MAX_XCC];
+ uint16_t jpeg_busy[NUM_JPEG_ENG];
+ uint16_t vcn_busy[NUM_VCN];
+ /* Utilization Accumulated (%) */
+ uint64_t gfx_busy_acc[MAX_XCC];
+};
+
+struct amdgpu_xcp_metrics_v1_1 {
+ /* Utilization Instantaneous (%) */
+ uint32_t gfx_busy_inst[MAX_XCC];
+ uint16_t jpeg_busy[NUM_JPEG_ENG];
+ uint16_t vcn_busy[NUM_VCN];
+ /* Utilization Accumulated (%) */
+ uint64_t gfx_busy_acc[MAX_XCC];
+ /* Total App Clock Counter Accumulated */
+ uint64_t gfx_below_host_limit_acc[MAX_XCC];
+};
+
struct amd_pm_funcs {
/* export for dpm on ci and si */
int (*pre_set_power_state)(void *handle);
@@ -395,7 +421,9 @@ struct amd_pm_funcs {
int (*load_firmware)(void *handle);
int (*wait_for_fw_loading_complete)(void *handle);
int (*set_powergating_by_smu)(void *handle,
- uint32_t block_type, bool gate);
+ uint32_t block_type,
+ bool gate,
+ int inst);
int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
int (*set_power_limit)(void *handle, uint32_t n);
int (*get_power_limit)(void *handle, uint32_t *limit,
@@ -872,6 +900,196 @@ struct gpu_metrics_v1_5 {
uint16_t padding;
};
+struct gpu_metrics_v1_6 {
+ struct metrics_table_header common_header;
+
+ /* Temperature (Celsius) */
+ uint16_t temperature_hotspot;
+ uint16_t temperature_mem;
+ uint16_t temperature_vrsoc;
+
+ /* Power (Watts) */
+ uint16_t curr_socket_power;
+
+ /* Utilization (%) */
+ uint16_t average_gfx_activity;
+ uint16_t average_umc_activity; // memory controller
+
+ /* Energy (15.259uJ (2^-16) units) */
+ uint64_t energy_accumulator;
+
+ /* Driver attached timestamp (in ns) */
+ uint64_t system_clock_counter;
+
+ /* Accumulation cycle counter */
+ uint32_t accumulation_counter;
+
+ /* Accumulated throttler residencies */
+ uint32_t prochot_residency_acc;
+ uint32_t ppt_residency_acc;
+ uint32_t socket_thm_residency_acc;
+ uint32_t vr_thm_residency_acc;
+ uint32_t hbm_thm_residency_acc;
+
+ /* Clock Lock Status. Each bit corresponds to clock instance */
+ uint32_t gfxclk_lock_status;
+
+ /* Link width (number of lanes) and speed (in 0.1 GT/s) */
+ uint16_t pcie_link_width;
+ uint16_t pcie_link_speed;
+
+ /* XGMI bus width and bitrate (in Gbps) */
+ uint16_t xgmi_link_width;
+ uint16_t xgmi_link_speed;
+
+ /* Utilization Accumulated (%) */
+ uint32_t gfx_activity_acc;
+ uint32_t mem_activity_acc;
+
+ /*PCIE accumulated bandwidth (GB/sec) */
+ uint64_t pcie_bandwidth_acc;
+
+ /*PCIE instantaneous bandwidth (GB/sec) */
+ uint64_t pcie_bandwidth_inst;
+
+ /* PCIE L0 to recovery state transition accumulated count */
+ uint64_t pcie_l0_to_recov_count_acc;
+
+ /* PCIE replay accumulated count */
+ uint64_t pcie_replay_count_acc;
+
+ /* PCIE replay rollover accumulated count */
+ uint64_t pcie_replay_rover_count_acc;
+
+ /* PCIE NAK sent accumulated count */
+ uint32_t pcie_nak_sent_count_acc;
+
+ /* PCIE NAK received accumulated count */
+ uint32_t pcie_nak_rcvd_count_acc;
+
+ /* XGMI accumulated data transfer size(KiloBytes) */
+ uint64_t xgmi_read_data_acc[NUM_XGMI_LINKS];
+ uint64_t xgmi_write_data_acc[NUM_XGMI_LINKS];
+
+ /* PMFW attached timestamp (10ns resolution) */
+ uint64_t firmware_timestamp;
+
+ /* Current clocks (Mhz) */
+ uint16_t current_gfxclk[MAX_GFX_CLKS];
+ uint16_t current_socclk[MAX_CLKS];
+ uint16_t current_vclk0[MAX_CLKS];
+ uint16_t current_dclk0[MAX_CLKS];
+ uint16_t current_uclk;
+
+ /* Number of current partition */
+ uint16_t num_partition;
+
+ /* XCP metrics stats */
+ struct amdgpu_xcp_metrics xcp_stats[NUM_XCP];
+
+ /* PCIE other end recovery counter */
+ uint32_t pcie_lc_perf_other_end_recovery;
+};
+
+struct gpu_metrics_v1_7 {
+ struct metrics_table_header common_header;
+
+ /* Temperature (Celsius) */
+ uint16_t temperature_hotspot;
+ uint16_t temperature_mem;
+ uint16_t temperature_vrsoc;
+
+ /* Power (Watts) */
+ uint16_t curr_socket_power;
+
+ /* Utilization (%) */
+ uint16_t average_gfx_activity;
+ uint16_t average_umc_activity; // memory controller
+
+ /* VRAM max bandwidthi (in GB/sec) at max memory clock */
+ uint64_t mem_max_bandwidth;
+
+ /* Energy (15.259uJ (2^-16) units) */
+ uint64_t energy_accumulator;
+
+ /* Driver attached timestamp (in ns) */
+ uint64_t system_clock_counter;
+
+ /* Accumulation cycle counter */
+ uint32_t accumulation_counter;
+
+ /* Accumulated throttler residencies */
+ uint32_t prochot_residency_acc;
+ uint32_t ppt_residency_acc;
+ uint32_t socket_thm_residency_acc;
+ uint32_t vr_thm_residency_acc;
+ uint32_t hbm_thm_residency_acc;
+
+ /* Clock Lock Status. Each bit corresponds to clock instance */
+ uint32_t gfxclk_lock_status;
+
+ /* Link width (number of lanes) and speed (in 0.1 GT/s) */
+ uint16_t pcie_link_width;
+ uint16_t pcie_link_speed;
+
+ /* XGMI bus width and bitrate (in Gbps) */
+ uint16_t xgmi_link_width;
+ uint16_t xgmi_link_speed;
+
+ /* Utilization Accumulated (%) */
+ uint32_t gfx_activity_acc;
+ uint32_t mem_activity_acc;
+
+ /*PCIE accumulated bandwidth (GB/sec) */
+ uint64_t pcie_bandwidth_acc;
+
+ /*PCIE instantaneous bandwidth (GB/sec) */
+ uint64_t pcie_bandwidth_inst;
+
+ /* PCIE L0 to recovery state transition accumulated count */
+ uint64_t pcie_l0_to_recov_count_acc;
+
+ /* PCIE replay accumulated count */
+ uint64_t pcie_replay_count_acc;
+
+ /* PCIE replay rollover accumulated count */
+ uint64_t pcie_replay_rover_count_acc;
+
+ /* PCIE NAK sent accumulated count */
+ uint32_t pcie_nak_sent_count_acc;
+
+ /* PCIE NAK received accumulated count */
+ uint32_t pcie_nak_rcvd_count_acc;
+
+ /* XGMI accumulated data transfer size(KiloBytes) */
+ uint64_t xgmi_read_data_acc[NUM_XGMI_LINKS];
+ uint64_t xgmi_write_data_acc[NUM_XGMI_LINKS];
+
+ /* XGMI link status(active/inactive) */
+ uint16_t xgmi_link_status[NUM_XGMI_LINKS];
+
+ uint16_t padding;
+
+ /* PMFW attached timestamp (10ns resolution) */
+ uint64_t firmware_timestamp;
+
+ /* Current clocks (Mhz) */
+ uint16_t current_gfxclk[MAX_GFX_CLKS];
+ uint16_t current_socclk[MAX_CLKS];
+ uint16_t current_vclk0[MAX_CLKS];
+ uint16_t current_dclk0[MAX_CLKS];
+ uint16_t current_uclk;
+
+ /* Number of current partition */
+ uint16_t num_partition;
+
+ /* XCP metrics stats */
+ struct amdgpu_xcp_metrics_v1_1 xcp_stats[NUM_XCP];
+
+ /* PCIE other end recovery counter */
+ uint32_t pcie_lc_perf_other_end_recovery;
+};
+
/*
* gpu_metrics_v2_0 is not recommended as it's not naturally aligned.
* Use gpu_metrics_v2_1 or later instead.
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index 21ceafce1f9b..eb46cb10c24d 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -230,13 +230,23 @@ union MESAPI_SET_HW_RESOURCES {
uint32_t disable_add_queue_wptr_mc_addr : 1;
uint32_t enable_mes_event_int_logging : 1;
uint32_t enable_reg_active_poll : 1;
- uint32_t reserved : 21;
+ uint32_t use_disable_queue_in_legacy_uq_preemption : 1;
+ uint32_t send_write_data : 1;
+ uint32_t os_tdr_timeout_override : 1;
+ uint32_t use_rs64mem_for_proc_gang_ctx : 1;
+ uint32_t use_add_queue_unmap_flag_addr : 1;
+ uint32_t enable_mes_sch_stb_log : 1;
+ uint32_t limit_single_process : 1;
+ uint32_t is_strix_tmz_wa_enabled :1;
+ uint32_t reserved : 13;
};
uint32_t uint32_t_all;
};
uint32_t oversubscription_timer;
uint64_t doorbell_info;
uint64_t event_intr_history_gpu_mc_ptr;
+ uint64_t timestamp;
+ uint32_t os_tdr_timeout_in_sec;
};
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
@@ -563,6 +573,11 @@ enum MESAPI_MISC_OPCODE {
MESAPI_MISC__READ_REG,
MESAPI_MISC__WAIT_REG_MEM,
MESAPI_MISC__SET_SHADER_DEBUGGER,
+ MESAPI_MISC__NOTIFY_WORK_ON_UNMAPPED_QUEUE,
+ MESAPI_MISC__NOTIFY_TO_UNMAP_PROCESSES,
+ MESAPI_MISC__CHANGE_CONFIG,
+ MESAPI_MISC__LAUNCH_CLEANER_SHADER,
+
MESAPI_MISC__MAX,
};
@@ -617,6 +632,31 @@ struct SET_SHADER_DEBUGGER {
uint32_t trap_en;
};
+enum MESAPI_MISC__CHANGE_CONFIG_OPTION {
+ MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS = 0,
+ MESAPI_MISC__CHANGE_CONFIG_OPTION_ENABLE_HWS_LOGGING_BUFFER = 1,
+ MESAPI_MISC__CHANGE_CONFIG_OPTION_CHANGE_TDR_CONFIG = 2,
+
+ MESAPI_MISC__CHANGE_CONFIG_OPTION_MAX = 0x1F
+};
+
+struct CHANGE_CONFIG {
+ enum MESAPI_MISC__CHANGE_CONFIG_OPTION opcode;
+ union {
+ struct {
+ uint32_t limit_single_process : 1;
+ uint32_t enable_hws_logging_buffer : 1;
+ uint32_t reserved : 31;
+ } bits;
+ uint32_t all;
+ } option;
+
+ struct {
+ uint32_t tdr_level;
+ uint32_t tdr_delay;
+ } tdr_config;
+};
+
union MESAPI__MISC {
struct {
union MES_API_HEADER header;
@@ -631,6 +671,7 @@ union MESAPI__MISC {
struct WAIT_REG_MEM wait_reg_mem;
struct SET_SHADER_DEBUGGER set_shader_debugger;
enum MES_AMD_PRIORITY_LEVEL queue_sch_level;
+ struct CHANGE_CONFIG change_config;
uint32_t data[MISC_DATA_MAX_SIZE_IN_DWORDS];
};
diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
index 101e2fe962c6..c9b2ca5cf75f 100644
--- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
@@ -643,6 +643,10 @@ enum MESAPI_MISC_OPCODE {
MESAPI_MISC__SET_SHADER_DEBUGGER,
MESAPI_MISC__NOTIFY_WORK_ON_UNMAPPED_QUEUE,
MESAPI_MISC__NOTIFY_TO_UNMAP_PROCESSES,
+ MESAPI_MISC__QUERY_HUNG_ENGINE_ID,
+ MESAPI_MISC__CHANGE_CONFIG,
+ MESAPI_MISC__LAUNCH_CLEANER_SHADER,
+ MESAPI_MISC__SETUP_MES_DBGEXT,
MESAPI_MISC__MAX,
};
@@ -713,6 +717,31 @@ struct SET_GANG_SUBMIT {
uint32_t slave_gang_context_array_index;
};
+enum MESAPI_MISC__CHANGE_CONFIG_OPTION {
+ MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS = 0,
+ MESAPI_MISC__CHANGE_CONFIG_OPTION_ENABLE_HWS_LOGGING_BUFFER = 1,
+ MESAPI_MISC__CHANGE_CONFIG_OPTION_CHANGE_TDR_CONFIG = 2,
+
+ MESAPI_MISC__CHANGE_CONFIG_OPTION_MAX = 0x1F
+};
+
+struct CHANGE_CONFIG {
+ enum MESAPI_MISC__CHANGE_CONFIG_OPTION opcode;
+ union {
+ struct {
+ uint32_t limit_single_process : 1;
+ uint32_t enable_hws_logging_buffer : 1;
+ uint32_t reserved : 30;
+ } bits;
+ uint32_t all;
+ } option;
+
+ struct {
+ uint32_t tdr_level;
+ uint32_t tdr_delay;
+ } tdr_config;
+};
+
union MESAPI__MISC {
struct {
union MES_API_HEADER header;
@@ -726,7 +755,7 @@ union MESAPI__MISC {
struct WAIT_REG_MEM wait_reg_mem;
struct SET_SHADER_DEBUGGER set_shader_debugger;
enum MES_AMD_PRIORITY_LEVEL queue_sch_level;
-
+ struct CHANGE_CONFIG change_config;
uint32_t data[MISC_DATA_MAX_SIZE_IN_DWORDS];
};
uint64_t timestamp;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 9dc82f4d7c93..6a9e26905edf 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -70,13 +70,18 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
return ret;
}
-int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
+int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
+ uint32_t block_type,
+ bool gate,
+ int inst)
{
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
+ bool is_vcn = (block_type == AMD_IP_BLOCK_TYPE_UVD || block_type == AMD_IP_BLOCK_TYPE_VCN);
- if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
+ if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
+ (!is_vcn || adev->vcn.num_vcn_inst == 1)) {
dev_dbg(adev->dev, "IP block%d already in the target %s state!",
block_type, gate ? "gate" : "ungate");
return 0;
@@ -88,7 +93,6 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
case AMD_IP_BLOCK_TYPE_UVD:
case AMD_IP_BLOCK_TYPE_VCE:
case AMD_IP_BLOCK_TYPE_GFX:
- case AMD_IP_BLOCK_TYPE_VCN:
case AMD_IP_BLOCK_TYPE_SDMA:
case AMD_IP_BLOCK_TYPE_JPEG:
case AMD_IP_BLOCK_TYPE_GMC:
@@ -96,7 +100,12 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
case AMD_IP_BLOCK_TYPE_VPE:
if (pp_funcs && pp_funcs->set_powergating_by_smu)
ret = (pp_funcs->set_powergating_by_smu(
- (adev)->powerplay.pp_handle, block_type, gate));
+ (adev)->powerplay.pp_handle, block_type, gate, 0));
+ break;
+ case AMD_IP_BLOCK_TYPE_VCN:
+ if (pp_funcs && pp_funcs->set_powergating_by_smu)
+ ret = (pp_funcs->set_powergating_by_smu(
+ (adev)->powerplay.pp_handle, block_type, gate, inst));
break;
default:
break;
@@ -566,7 +575,17 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
return;
}
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0);
+ if (ret)
+ DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+}
+
+void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst)
+{
+ int ret = 0;
+
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst);
if (ret)
DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
@@ -591,7 +610,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
return;
}
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0);
if (ret)
DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
@@ -601,7 +620,7 @@ void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0);
if (ret)
DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
enable ? "enable" : "disable", ret);
@@ -611,7 +630,7 @@ void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable);
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0);
if (ret)
DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
enable ? "enable" : "disable", ret);
@@ -700,6 +719,21 @@ int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
return ret;
}
+int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_reset_sdma(smu, inst_mask);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
@@ -953,6 +987,24 @@ enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device
return level;
}
+static void amdgpu_dpm_enter_umd_state(struct amdgpu_device *adev)
+{
+ /* enter UMD Pstate */
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
+}
+
+static void amdgpu_dpm_exit_umd_state(struct amdgpu_device *adev)
+{
+ /* exit UMD Pstate */
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
+}
+
int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
enum amd_dpm_forced_level level)
{
@@ -973,6 +1025,10 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
if (current_level == level)
return 0;
+ if (!(current_level & profile_mode_mask) &&
+ (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
+ return -EINVAL;
+
if (adev->asic_type == CHIP_RAVEN) {
if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
@@ -984,35 +1040,25 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
}
}
- if (!(current_level & profile_mode_mask) &&
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
- return -EINVAL;
-
- if (!(current_level & profile_mode_mask) &&
- (level & profile_mode_mask)) {
- /* enter UMD Pstate */
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_UNGATE);
- amdgpu_device_ip_set_clockgating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
- } else if ((current_level & profile_mode_mask) &&
- !(level & profile_mode_mask)) {
- /* exit UMD Pstate */
- amdgpu_device_ip_set_clockgating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_GATE);
- amdgpu_device_ip_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_GATE);
- }
+ if (!(current_level & profile_mode_mask) && (level & profile_mode_mask))
+ amdgpu_dpm_enter_umd_state(adev);
+ else if ((current_level & profile_mode_mask) &&
+ !(level & profile_mode_mask))
+ amdgpu_dpm_exit_umd_state(adev);
mutex_lock(&adev->pm.mutex);
if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
level)) {
mutex_unlock(&adev->pm.mutex);
+ /* If new level failed, retain the umd state as before */
+ if (!(current_level & profile_mode_mask) &&
+ (level & profile_mode_mask))
+ amdgpu_dpm_exit_umd_state(adev);
+ else if ((current_level & profile_mode_mask) &&
+ !(level & profile_mode_mask))
+ amdgpu_dpm_enter_umd_state(adev);
+
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index d5d6ab484e5a..e8ae7681bf0a 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -145,15 +145,12 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
amdgpu_dpm_get_current_power_state(adev, &pm);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return sysfs_emit(buf, "%s\n",
@@ -185,11 +182,9 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
else
return -EINVAL;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
amdgpu_dpm_set_power_state(adev, state);
@@ -273,15 +268,12 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
level = amdgpu_dpm_get_performance_level(adev);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return sysfs_emit(buf, "%s\n",
@@ -336,11 +328,9 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
return -EINVAL;
}
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
mutex_lock(&adev->pm.stable_pstate_ctx_lock);
if (amdgpu_dpm_force_performance_level(adev, level)) {
@@ -374,16 +364,13 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
if (amdgpu_dpm_get_pp_num_states(adev, &data))
memset(&data, 0, sizeof(data));
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
@@ -412,17 +399,14 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
amdgpu_dpm_get_current_power_state(adev, &pm);
ret = amdgpu_dpm_get_pp_num_states(adev, &data);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
if (ret)
@@ -485,11 +469,9 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_get_pp_num_states(adev, &data);
if (ret)
@@ -544,15 +526,12 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
size = amdgpu_dpm_get_pp_table(adev, &table);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
if (size <= 0)
@@ -580,11 +559,9 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_set_pp_table(adev, buf, count);
@@ -808,11 +785,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
}
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
type,
@@ -865,11 +840,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
@@ -888,7 +861,6 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
@@ -929,11 +901,9 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
if (ret)
return -EINVAL;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
@@ -960,17 +930,14 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
size = amdgpu_dpm_get_ppfeature_status(adev, buf);
if (size <= 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
@@ -1029,11 +996,9 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
if (ret == -ENOENT)
@@ -1042,7 +1007,6 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
@@ -1102,11 +1066,9 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
if (ret)
return ret;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_force_clock_level(adev, type, mask);
@@ -1283,15 +1245,12 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
value = amdgpu_dpm_get_sclk_od(adev);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return sysfs_emit(buf, "%d\n", value);
@@ -1317,11 +1276,9 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
if (ret)
return -EINVAL;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
@@ -1345,15 +1302,12 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
value = amdgpu_dpm_get_mclk_od(adev);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return sysfs_emit(buf, "%d\n", value);
@@ -1379,11 +1333,9 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
if (ret)
return -EINVAL;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
@@ -1409,7 +1361,11 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
* create a custom set of heuristics, write a string of numbers to the file
* starting with the number of the custom profile along with a setting
* for each heuristic parameter. Due to differences across asic families
- * the heuristic parameters vary from family to family.
+ * the heuristic parameters vary from family to family. Additionally,
+ * you can apply the custom heuristics to different clock domains. Each
+ * clock domain is considered a distinct operation so if you modify the
+ * gfxclk heuristics and then the memclk heuristics, the all of the
+ * custom heuristics will be retained until you switch to another profile.
*
*/
@@ -1427,17 +1383,14 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
size = amdgpu_dpm_get_power_profile_mode(adev, buf);
if (size <= 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
@@ -1492,11 +1445,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
}
parameter[parameter_size] = profile_mode;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
@@ -1520,16 +1471,13 @@ static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return r;
- }
+ r = pm_runtime_get_if_active(adev->dev);
+ if (r <= 0)
+ return r ?: -EPERM;
/* get the sensor value */
r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1639,15 +1587,12 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
if (!adev->asic_funcs->get_pcie_usage)
return -ENODATA;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return sysfs_emit(buf, "%llu %llu %i\n",
@@ -1770,11 +1715,9 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
if (!ret)
@@ -1782,7 +1725,6 @@ static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
else
size = sysfs_emit(buf, "failed to get thermal limit\n");
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
@@ -1807,14 +1749,14 @@ static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
return -EINVAL;
}
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
if (ret) {
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
dev_err(dev, "failed to update thermal limit\n");
return ret;
}
@@ -1849,15 +1791,12 @@ static ssize_t amdgpu_get_pm_metrics(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
@@ -1890,11 +1829,9 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(ddev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
if (size <= 0)
@@ -1906,7 +1843,6 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
memcpy(buf, gpu_metrics, size);
out:
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return size;
@@ -2008,11 +1944,9 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- r = pm_runtime_get_sync(ddev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ r = pm_runtime_resume_and_get(ddev->dev);
+ if (r < 0)
return r;
- }
r = kstrtoint(buf, 10, &bias);
if (r)
@@ -2335,11 +2269,9 @@ static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
policy_attr =
container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
+ ret = pm_runtime_resume_and_get(ddev->dev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val);
@@ -2772,15 +2704,12 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(adev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (ret)
@@ -2817,11 +2746,9 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
else
return -EINVAL;
- ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ ret = pm_runtime_resume_and_get(adev->dev);
+ if (ret < 0)
return ret;
- }
ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
@@ -2866,11 +2793,9 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
if (err)
return err;
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ err = pm_runtime_resume_and_get(adev->dev);
+ if (err < 0)
return err;
- }
err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
if (err)
@@ -2907,15 +2832,12 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return err;
- }
+ err = pm_runtime_get_if_active(adev->dev);
+ if (err <= 0)
+ return err ?: -EPERM;
err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
@@ -2937,15 +2859,12 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return err;
- }
+ err = pm_runtime_get_if_active(adev->dev);
+ if (err <= 0)
+ return err ?: -EPERM;
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
@@ -3001,15 +2920,12 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return err;
- }
+ err = pm_runtime_get_if_active(adev->dev);
+ if (err <= 0)
+ return err ?: -EPERM;
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (err)
@@ -3036,11 +2952,9 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
if (err)
return err;
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ err = pm_runtime_resume_and_get(adev->dev);
+ if (err < 0)
return err;
- }
err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
if (err)
@@ -3076,15 +2990,12 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(adev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (ret)
@@ -3119,11 +3030,9 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
else
return -EINVAL;
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ err = pm_runtime_resume_and_get(adev->dev);
+ if (err < 0)
return err;
- }
err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
@@ -3248,11 +3157,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return r;
- }
+ r = pm_runtime_get_if_active(adev->dev);
+ if (r <= 0)
+ return r ?: -EPERM;
r = amdgpu_dpm_get_power_limit(adev, &limit,
pp_limit_level, power_type);
@@ -3262,7 +3169,6 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
else
size = sysfs_emit(buf, "\n");
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return size;
@@ -3339,11 +3245,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
value = value / 1000000; /* convert to Watt */
value |= limit_type << 24;
- err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
- if (err < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ err = pm_runtime_resume_and_get(adev->dev);
+ if (err < 0)
return err;
- }
err = amdgpu_dpm_set_power_limit(adev, value);
@@ -3787,17 +3691,14 @@ static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- ret = pm_runtime_get_sync(adev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(adev->dev);
- return ret;
- }
+ ret = pm_runtime_get_if_active(adev->dev);
+ if (ret <= 0)
+ return ret ?: -EPERM;
size = amdgpu_dpm_print_clock_levels(adev, od_type, buf);
if (size == 0)
size = sysfs_emit(buf, "\n");
- pm_runtime_mark_last_busy(adev->dev);
pm_runtime_put_autosuspend(adev->dev);
return size;
@@ -3879,23 +3780,23 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
if (ret)
return ret;
- ret = pm_runtime_get_sync(adev->dev);
+ ret = pm_runtime_resume_and_get(adev->dev);
if (ret < 0)
- goto err_out0;
+ return ret;
ret = amdgpu_dpm_odn_edit_dpm_table(adev,
cmd_type,
parameter,
parameter_size);
if (ret)
- goto err_out1;
+ goto err_out;
if (cmd_type == PP_OD_COMMIT_DPM_TABLE) {
ret = amdgpu_dpm_dispatch_task(adev,
AMD_PP_TASK_READJUST_POWER_STATE,
NULL);
if (ret)
- goto err_out1;
+ goto err_out;
}
pm_runtime_mark_last_busy(adev->dev);
@@ -3903,9 +3804,8 @@ amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
return count;
-err_out1:
+err_out:
pm_runtime_mark_last_busy(adev->dev);
-err_out0:
pm_runtime_put_autosuspend(adev->dev);
return ret;
@@ -4213,6 +4113,117 @@ static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev)
return umode;
}
+/**
+ * DOC: fan_zero_rpm_enable
+ *
+ * The amdgpu driver provides a sysfs API for checking and adjusting the
+ * zero RPM feature.
+ *
+ * Reading back the file shows you the current setting and the permitted
+ * ranges if changable.
+ *
+ * Writing an integer to the file, change the setting accordingly.
+ *
+ * When you have finished the editing, write "c" (commit) to the file to commit
+ * your changes.
+ *
+ * If you want to reset to the default value, write "r" (reset) to the file to
+ * reset them.
+ */
+static ssize_t fan_zero_rpm_enable_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
+ struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
+
+ return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_ENABLE, buf);
+}
+
+static ssize_t fan_zero_rpm_enable_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
+ struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
+
+ return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
+ PP_OD_EDIT_FAN_ZERO_RPM_ENABLE,
+ buf,
+ count);
+}
+
+static umode_t fan_zero_rpm_enable_visible(struct amdgpu_device *adev)
+{
+ umode_t umode = 0000;
+
+ if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE)
+ umode |= S_IRUSR | S_IRGRP | S_IROTH;
+
+ if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET)
+ umode |= S_IWUSR;
+
+ return umode;
+}
+
+/**
+ * DOC: fan_zero_rpm_stop_temperature
+ *
+ * The amdgpu driver provides a sysfs API for checking and adjusting the
+ * zero RPM stop temperature feature.
+ *
+ * Reading back the file shows you the current setting and the permitted
+ * ranges if changable.
+ *
+ * Writing an integer to the file, change the setting accordingly.
+ *
+ * When you have finished the editing, write "c" (commit) to the file to commit
+ * your changes.
+ *
+ * If you want to reset to the default value, write "r" (reset) to the file to
+ * reset them.
+ *
+ * This setting works only if the Zero RPM setting is enabled. It adjusts the
+ * temperature below which the fan can stop.
+ */
+static ssize_t fan_zero_rpm_stop_temp_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
+ struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
+
+ return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_STOP_TEMP, buf);
+}
+
+static ssize_t fan_zero_rpm_stop_temp_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
+ struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
+
+ return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
+ PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP,
+ buf,
+ count);
+}
+
+static umode_t fan_zero_rpm_stop_temp_visible(struct amdgpu_device *adev)
+{
+ umode_t umode = 0000;
+
+ if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE)
+ umode |= S_IRUSR | S_IRGRP | S_IROTH;
+
+ if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET)
+ umode |= S_IWUSR;
+
+ return umode;
+}
+
static struct od_feature_set amdgpu_od_set = {
.containers = {
[0] = {
@@ -4258,6 +4269,22 @@ static struct od_feature_set amdgpu_od_set = {
.store = fan_minimum_pwm_store,
},
},
+ [5] = {
+ .name = "fan_zero_rpm_enable",
+ .ops = {
+ .is_visible = fan_zero_rpm_enable_visible,
+ .show = fan_zero_rpm_enable_show,
+ .store = fan_zero_rpm_enable_store,
+ },
+ },
+ [6] = {
+ .name = "fan_zero_rpm_stop_temperature",
+ .ops = {
+ .is_visible = fan_zero_rpm_stop_temp_visible,
+ .show = fan_zero_rpm_stop_temp_show,
+ .store = fan_zero_rpm_stop_temp_store,
+ },
+ },
},
},
},
@@ -4758,11 +4785,9 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- r = pm_runtime_get_sync(dev->dev);
- if (r < 0) {
- pm_runtime_put_autosuspend(dev->dev);
+ r = pm_runtime_resume_and_get(dev->dev);
+ if (r < 0)
return r;
- }
if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
r = amdgpu_debugfs_pm_info_pp(m, adev);
@@ -4777,7 +4802,6 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
seq_printf(m, "\n");
out:
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return r;
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index f5bf41f21c41..1f5ac7e0230d 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -328,6 +328,10 @@ struct config_table_setting
#define OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET BIT(7)
#define OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE BIT(8)
#define OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET BIT(9)
+#define OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE BIT(10)
+#define OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET BIT(11)
+#define OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE BIT(12)
+#define OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET BIT(13)
struct amdgpu_pm {
struct mutex mutex;
@@ -393,7 +397,7 @@ int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit
int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit);
int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
- uint32_t block_type, bool gate);
+ uint32_t block_type, bool gate, int inst);
extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low);
@@ -442,6 +446,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev);
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
+void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst);
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable);
@@ -597,5 +602,6 @@ int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
int policy_level);
ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
enum pp_pm_policy p_type, char *buf);
+int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask);
#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index e8b6989a40f3..67a8e22b1126 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -2954,9 +2954,9 @@ static int kv_dpm_get_temp(void *handle)
return actual_temp;
}
-static int kv_dpm_early_init(void *handle)
+static int kv_dpm_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->powerplay.pp_funcs = &kv_dpm_funcs;
adev->powerplay.pp_handle = adev;
@@ -2965,10 +2965,10 @@ static int kv_dpm_early_init(void *handle)
return 0;
}
-static int kv_dpm_late_init(void *handle)
+static int kv_dpm_late_init(struct amdgpu_ip_block *ip_block)
{
/* powerdown unused blocks for now */
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!adev->pm.dpm_enabled)
return 0;
@@ -2979,11 +2979,10 @@ static int kv_dpm_late_init(void *handle)
return 0;
}
-static int kv_dpm_sw_init(void *handle)
+static int kv_dpm_sw_init(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
+ struct amdgpu_device *adev = ip_block->adev;
ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230,
&adev->pm.dpm.thermal.irq);
if (ret)
@@ -3024,9 +3023,9 @@ dpm_failed:
return ret;
}
-static int kv_dpm_sw_fini(void *handle)
+static int kv_dpm_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
flush_work(&adev->pm.dpm.thermal.work);
@@ -3035,10 +3034,10 @@ static int kv_dpm_sw_fini(void *handle)
return 0;
}
-static int kv_dpm_hw_init(void *handle)
+static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!amdgpu_dpm)
return 0;
@@ -3053,9 +3052,9 @@ static int kv_dpm_hw_init(void *handle)
return ret;
}
-static int kv_dpm_hw_fini(void *handle)
+static int kv_dpm_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->pm.dpm_enabled)
kv_dpm_disable(adev);
@@ -3063,9 +3062,9 @@ static int kv_dpm_hw_fini(void *handle)
return 0;
}
-static int kv_dpm_suspend(void *handle)
+static int kv_dpm_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->pm.dpm_enabled) {
/* disable dpm */
@@ -3076,10 +3075,10 @@ static int kv_dpm_suspend(void *handle)
return 0;
}
-static int kv_dpm_resume(void *handle)
+static int kv_dpm_resume(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->pm.dpm_enabled) {
/* asic init will reset to the boot state */
@@ -3100,17 +3099,6 @@ static bool kv_dpm_is_idle(void *handle)
return true;
}
-static int kv_dpm_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-
-static int kv_dpm_soft_reset(void *handle)
-{
- return 0;
-}
-
static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -3189,13 +3177,13 @@ static int kv_dpm_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int kv_dpm_set_clockgating_state(void *handle,
+static int kv_dpm_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int kv_dpm_set_powergating_state(void *handle,
+static int kv_dpm_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -3288,7 +3276,9 @@ static int kv_dpm_read_sensor(void *handle, int idx,
}
static int kv_set_powergating_by_smu(void *handle,
- uint32_t block_type, bool gate)
+ uint32_t block_type,
+ bool gate,
+ int inst)
{
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
@@ -3314,12 +3304,8 @@ static const struct amd_ip_funcs kv_dpm_ip_funcs = {
.suspend = kv_dpm_suspend,
.resume = kv_dpm_resume,
.is_idle = kv_dpm_is_idle,
- .wait_for_idle = kv_dpm_wait_for_idle,
- .soft_reset = kv_dpm_soft_reset,
.set_clockgating_state = kv_dpm_set_clockgating_state,
.set_powergating_state = kv_dpm_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version kv_smu_ip_block = {
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index a1baa13ab2c2..a87dcf0974bc 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -4755,13 +4755,15 @@ static int si_populate_memory_timing_parameters(struct amdgpu_device *adev,
u32 dram_timing;
u32 dram_timing2;
u32 burst_time;
+ int ret;
arb_regs->mc_arb_rfsh_rate =
(u8)si_calculate_memory_refresh_rate(adev, pl->sclk);
- amdgpu_atombios_set_engine_dram_timings(adev,
- pl->sclk,
- pl->mclk);
+ ret = amdgpu_atombios_set_engine_dram_timings(adev, pl->sclk,
+ pl->mclk);
+ if (ret)
+ return ret;
dram_timing = RREG32(MC_ARB_DRAM_TIMING);
dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
@@ -7619,10 +7621,10 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
return 0;
}
-static int si_dpm_late_init(void *handle)
+static int si_dpm_late_init(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!adev->pm.dpm_enabled)
return 0;
@@ -7707,7 +7709,8 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s_smc.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_smc.bin", chip_name);
if (err) {
DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s_smc.bin\"\n",
err, chip_name);
@@ -7716,10 +7719,10 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
return err;
}
-static int si_dpm_sw_init(void *handle)
+static int si_dpm_sw_init(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
if (ret)
@@ -7763,9 +7766,9 @@ dpm_failed:
return ret;
}
-static int si_dpm_sw_fini(void *handle)
+static int si_dpm_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
flush_work(&adev->pm.dpm.thermal.work);
@@ -7774,11 +7777,11 @@ static int si_dpm_sw_fini(void *handle)
return 0;
}
-static int si_dpm_hw_init(void *handle)
+static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (!amdgpu_dpm)
return 0;
@@ -7793,9 +7796,9 @@ static int si_dpm_hw_init(void *handle)
return ret;
}
-static int si_dpm_hw_fini(void *handle)
+static int si_dpm_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->pm.dpm_enabled)
si_dpm_disable(adev);
@@ -7803,9 +7806,9 @@ static int si_dpm_hw_fini(void *handle)
return 0;
}
-static int si_dpm_suspend(void *handle)
+static int si_dpm_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->pm.dpm_enabled) {
/* disable dpm */
@@ -7816,10 +7819,10 @@ static int si_dpm_suspend(void *handle)
return 0;
}
-static int si_dpm_resume(void *handle)
+static int si_dpm_resume(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->pm.dpm_enabled) {
/* asic init will reset to the boot state */
@@ -7841,24 +7844,19 @@ static bool si_dpm_is_idle(void *handle)
return true;
}
-static int si_dpm_wait_for_idle(void *handle)
+static int si_dpm_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
/* XXX */
return 0;
}
-static int si_dpm_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int si_dpm_set_clockgating_state(void *handle,
+static int si_dpm_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int si_dpm_set_powergating_state(void *handle,
+static int si_dpm_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -7928,10 +7926,10 @@ static void si_dpm_print_power_state(void *handle,
amdgpu_dpm_print_ps_status(adev, rps);
}
-static int si_dpm_early_init(void *handle)
+static int si_dpm_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->powerplay.pp_funcs = &si_dpm_funcs;
adev->powerplay.pp_handle = adev;
@@ -8047,11 +8045,8 @@ static const struct amd_ip_funcs si_dpm_ip_funcs = {
.resume = si_dpm_resume,
.is_idle = si_dpm_is_idle,
.wait_for_idle = si_dpm_wait_for_idle,
- .soft_reset = si_dpm_soft_reset,
.set_clockgating_state = si_dpm_set_clockgating_state,
.set_powergating_state = si_dpm_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version si_smu_ip_block =
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index a71c6117d7e5..686345f75f26 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -75,11 +75,10 @@ static void amd_powerplay_destroy(struct amdgpu_device *adev)
hwmgr = NULL;
}
-static int pp_early_init(void *handle)
+static int pp_early_init(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = handle;
-
+ struct amdgpu_device *adev = ip_block->adev;
ret = amd_powerplay_create(adev);
if (ret != 0)
@@ -131,9 +130,9 @@ static void pp_swctf_delayed_work_handler(struct work_struct *work)
orderly_poweroff(true);
}
-static int pp_sw_init(void *handle)
+static int pp_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret = 0;
@@ -148,9 +147,9 @@ static int pp_sw_init(void *handle)
return ret;
}
-static int pp_sw_fini(void *handle)
+static int pp_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
hwmgr_sw_fini(hwmgr);
@@ -160,10 +159,10 @@ static int pp_sw_fini(void *handle)
return 0;
}
-static int pp_hw_init(void *handle)
+static int pp_hw_init(struct amdgpu_ip_block *ip_block)
{
int ret = 0;
- struct amdgpu_device *adev = handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
ret = hwmgr_hw_init(hwmgr);
@@ -174,10 +173,9 @@ static int pp_hw_init(void *handle)
return ret;
}
-static int pp_hw_fini(void *handle)
+static int pp_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle;
cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
@@ -217,9 +215,9 @@ static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
}
}
-static int pp_late_init(void *handle)
+static int pp_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
if (hwmgr && hwmgr->pm_en)
@@ -231,9 +229,9 @@ static int pp_late_init(void *handle)
return 0;
}
-static void pp_late_fini(void *handle)
+static void pp_late_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->pm.smu_prv_buffer)
amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
@@ -246,25 +244,15 @@ static bool pp_is_idle(void *handle)
return false;
}
-static int pp_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int pp_sw_reset(void *handle)
-{
- return 0;
-}
-
-static int pp_set_powergating_state(void *handle,
+static int pp_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
}
-static int pp_suspend(void *handle)
+static int pp_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
@@ -272,15 +260,14 @@ static int pp_suspend(void *handle)
return hwmgr_suspend(hwmgr);
}
-static int pp_resume(void *handle)
+static int pp_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle;
return hwmgr_resume(hwmgr);
}
-static int pp_set_clockgating_state(void *handle,
+static int pp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
@@ -298,12 +285,8 @@ static const struct amd_ip_funcs pp_ip_funcs = {
.suspend = pp_suspend,
.resume = pp_resume,
.is_idle = pp_is_idle,
- .wait_for_idle = pp_wait_for_idle,
- .soft_reset = pp_sw_reset,
.set_clockgating_state = pp_set_clockgating_state,
.set_powergating_state = pp_set_powergating_state,
- .dump_ip_state = NULL,
- .print_ip_state = NULL,
};
const struct amdgpu_ip_block_version pp_smu_ip_block =
@@ -1244,7 +1227,9 @@ static void pp_dpm_powergate_sdma(void *handle, bool gate)
}
static int pp_set_powergating_by_smu(void *handle,
- uint32_t block_type, bool gate)
+ uint32_t block_type,
+ bool gate,
+ int inst)
{
int ret = 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index b56298d9da98..4bd92fd782be 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
@@ -28,7 +28,6 @@
#include "ppatomctrl.h"
#include "atombios.h"
#include "cgs_common.h"
-#include "ppevvmath.h"
#define MEM_ID_MASK 0xff000000
#define MEM_ID_SHIFT 24
@@ -677,433 +676,6 @@ bool atomctrl_get_pp_assign_pin(
return bRet;
}
-int atomctrl_calculate_voltage_evv_on_sclk(
- struct pp_hwmgr *hwmgr,
- uint8_t voltage_type,
- uint32_t sclk,
- uint16_t virtual_voltage_Id,
- uint16_t *voltage,
- uint16_t dpm_level,
- bool debug)
-{
- ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo;
- struct amdgpu_device *adev = hwmgr->adev;
- EFUSE_LINEAR_FUNC_PARAM sRO_fuse;
- EFUSE_LINEAR_FUNC_PARAM sCACm_fuse;
- EFUSE_LINEAR_FUNC_PARAM sCACb_fuse;
- EFUSE_LOGISTIC_FUNC_PARAM sKt_Beta_fuse;
- EFUSE_LOGISTIC_FUNC_PARAM sKv_m_fuse;
- EFUSE_LOGISTIC_FUNC_PARAM sKv_b_fuse;
- EFUSE_INPUT_PARAMETER sInput_FuseValues;
- READ_EFUSE_VALUE_PARAMETER sOutput_FuseValues;
-
- uint32_t ul_RO_fused, ul_CACb_fused, ul_CACm_fused, ul_Kt_Beta_fused, ul_Kv_m_fused, ul_Kv_b_fused;
- fInt fSM_A0, fSM_A1, fSM_A2, fSM_A3, fSM_A4, fSM_A5, fSM_A6, fSM_A7;
- fInt fMargin_RO_a, fMargin_RO_b, fMargin_RO_c, fMargin_fixed, fMargin_FMAX_mean, fMargin_Plat_mean, fMargin_FMAX_sigma, fMargin_Plat_sigma, fMargin_DC_sigma;
- fInt fLkg_FT, repeat;
- fInt fMicro_FMAX, fMicro_CR, fSigma_FMAX, fSigma_CR, fSigma_DC, fDC_SCLK, fSquared_Sigma_DC, fSquared_Sigma_CR, fSquared_Sigma_FMAX;
- fInt fRLL_LoadLine, fDerateTDP, fVDDC_base, fA_Term, fC_Term, fB_Term, fRO_DC_margin;
- fInt fRO_fused, fCACm_fused, fCACb_fused, fKv_m_fused, fKv_b_fused, fKt_Beta_fused, fFT_Lkg_V0NORM;
- fInt fSclk_margin, fSclk, fEVV_V;
- fInt fV_min, fV_max, fT_prod, fLKG_Factor, fT_FT, fV_FT, fV_x, fTDP_Power, fTDP_Power_right, fTDP_Power_left, fTDP_Current, fV_NL;
- uint32_t ul_FT_Lkg_V0NORM;
- fInt fLn_MaxDivMin, fMin, fAverage, fRange;
- fInt fRoots[2];
- fInt fStepSize = GetScaledFraction(625, 100000);
-
- int result;
-
- getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *)
- smu_atom_get_data_table(hwmgr->adev,
- GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
- NULL, NULL, NULL);
-
- if (!getASICProfilingInfo)
- return -1;
-
- if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
- (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
- getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
- return -1;
-
- /*-----------------------------------------------------------
- *GETTING MULTI-STEP PARAMETERS RELATED TO CURRENT DPM LEVEL
- *-----------------------------------------------------------
- */
- fRLL_LoadLine = Divide(getASICProfilingInfo->ulLoadLineSlop, 1000);
-
- switch (dpm_level) {
- case 1:
- fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000);
- break;
- case 2:
- fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000);
- break;
- case 3:
- fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000);
- break;
- case 4:
- fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000);
- break;
- case 5:
- fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000);
- break;
- case 6:
- fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000);
- break;
- case 7:
- fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000);
- break;
- default:
- pr_err("DPM Level not supported\n");
- fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000);
- }
-
- /*-------------------------
- * DECODING FUSE VALUES
- * ------------------------
- */
- /*Decode RO_Fused*/
- sRO_fuse = getASICProfilingInfo->sRoFuse;
-
- sInput_FuseValues.usEfuseIndex = sRO_fuse.usEfuseIndex;
- sInput_FuseValues.ucBitShift = sRO_fuse.ucEfuseBitLSB;
- sInput_FuseValues.ucBitLength = sRO_fuse.ucEfuseLength;
-
- sOutput_FuseValues.sEfuse = sInput_FuseValues;
-
- result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
- GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
-
- if (result)
- return result;
-
- /* Finally, the actual fuse value */
- ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
- fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1);
- fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1);
- fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength);
-
- sCACm_fuse = getASICProfilingInfo->sCACm;
-
- sInput_FuseValues.usEfuseIndex = sCACm_fuse.usEfuseIndex;
- sInput_FuseValues.ucBitShift = sCACm_fuse.ucEfuseBitLSB;
- sInput_FuseValues.ucBitLength = sCACm_fuse.ucEfuseLength;
-
- sOutput_FuseValues.sEfuse = sInput_FuseValues;
-
- result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
- GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
-
- if (result)
- return result;
-
- ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
- fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000);
- fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000);
-
- fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength);
-
- sCACb_fuse = getASICProfilingInfo->sCACb;
-
- sInput_FuseValues.usEfuseIndex = sCACb_fuse.usEfuseIndex;
- sInput_FuseValues.ucBitShift = sCACb_fuse.ucEfuseBitLSB;
- sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength;
- sOutput_FuseValues.sEfuse = sInput_FuseValues;
-
- result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
- GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
-
- if (result)
- return result;
-
- ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
- fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000);
- fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000);
-
- fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength);
-
- sKt_Beta_fuse = getASICProfilingInfo->sKt_b;
-
- sInput_FuseValues.usEfuseIndex = sKt_Beta_fuse.usEfuseIndex;
- sInput_FuseValues.ucBitShift = sKt_Beta_fuse.ucEfuseBitLSB;
- sInput_FuseValues.ucBitLength = sKt_Beta_fuse.ucEfuseLength;
-
- sOutput_FuseValues.sEfuse = sInput_FuseValues;
-
- result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
- GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
-
- if (result)
- return result;
-
- ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
- fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000);
- fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000);
-
- fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused,
- fAverage, fRange, sKt_Beta_fuse.ucEfuseLength);
-
- sKv_m_fuse = getASICProfilingInfo->sKv_m;
-
- sInput_FuseValues.usEfuseIndex = sKv_m_fuse.usEfuseIndex;
- sInput_FuseValues.ucBitShift = sKv_m_fuse.ucEfuseBitLSB;
- sInput_FuseValues.ucBitLength = sKv_m_fuse.ucEfuseLength;
-
- sOutput_FuseValues.sEfuse = sInput_FuseValues;
-
- result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
- GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
- if (result)
- return result;
-
- ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
- fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000);
- fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000);
- fRange = fMultiply(fRange, ConvertToFraction(-1));
-
- fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused,
- fAverage, fRange, sKv_m_fuse.ucEfuseLength);
-
- sKv_b_fuse = getASICProfilingInfo->sKv_b;
-
- sInput_FuseValues.usEfuseIndex = sKv_b_fuse.usEfuseIndex;
- sInput_FuseValues.ucBitShift = sKv_b_fuse.ucEfuseBitLSB;
- sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength;
- sOutput_FuseValues.sEfuse = sInput_FuseValues;
-
- result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
- GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
-
- if (result)
- return result;
-
- ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
- fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000);
- fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000);
-
- fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused,
- fAverage, fRange, sKv_b_fuse.ucEfuseLength);
-
- /* Decoding the Leakage - No special struct container */
- /*
- * usLkgEuseIndex=56
- * ucLkgEfuseBitLSB=6
- * ucLkgEfuseLength=10
- * ulLkgEncodeLn_MaxDivMin=69077
- * ulLkgEncodeMax=1000000
- * ulLkgEncodeMin=1000
- * ulEfuseLogisticAlpha=13
- */
-
- sInput_FuseValues.usEfuseIndex = getASICProfilingInfo->usLkgEuseIndex;
- sInput_FuseValues.ucBitShift = getASICProfilingInfo->ucLkgEfuseBitLSB;
- sInput_FuseValues.ucBitLength = getASICProfilingInfo->ucLkgEfuseLength;
-
- sOutput_FuseValues.sEfuse = sInput_FuseValues;
-
- result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
- GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
-
- if (result)
- return result;
-
- ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
- fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000);
- fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000);
-
- fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM,
- fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength);
- fLkg_FT = fFT_Lkg_V0NORM;
-
- /*-------------------------------------------
- * PART 2 - Grabbing all required values
- *-------------------------------------------
- */
- fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000),
- ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign)));
- fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000),
- ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign)));
- fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000),
- ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign)));
- fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000),
- ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign)));
- fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000),
- ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign)));
- fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000),
- ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign)));
- fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000),
- ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign)));
- fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000),
- ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign)));
-
- fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a));
- fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b));
- fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c));
-
- fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed));
-
- fMargin_FMAX_mean = GetScaledFraction(
- le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000);
- fMargin_Plat_mean = GetScaledFraction(
- le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000);
- fMargin_FMAX_sigma = GetScaledFraction(
- le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000);
- fMargin_Plat_sigma = GetScaledFraction(
- le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000);
-
- fMargin_DC_sigma = GetScaledFraction(
- le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100);
- fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000));
-
- fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100));
- fCACb_fused = fDivide(fCACb_fused, ConvertToFraction(100));
- fKt_Beta_fused = fDivide(fKt_Beta_fused, ConvertToFraction(100));
- fKv_m_fused = fNegate(fDivide(fKv_m_fused, ConvertToFraction(100)));
- fKv_b_fused = fDivide(fKv_b_fused, ConvertToFraction(10));
-
- fSclk = GetScaledFraction(sclk, 100);
-
- fV_max = fDivide(GetScaledFraction(
- le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4));
- fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10);
- fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100);
- fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10);
- fV_FT = fDivide(GetScaledFraction(
- le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4));
- fV_min = fDivide(GetScaledFraction(
- le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4));
-
- /*-----------------------
- * PART 3
- *-----------------------
- */
-
- fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5));
- fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b);
- fC_Term = fAdd(fMargin_RO_c,
- fAdd(fMultiply(fSM_A0, fLkg_FT),
- fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)),
- fAdd(fMultiply(fSM_A3, fSclk),
- fSubtract(fSM_A7, fRO_fused)))));
-
- fVDDC_base = fSubtract(fRO_fused,
- fSubtract(fMargin_RO_c,
- fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk))));
- fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0, fSclk), fSM_A2));
-
- repeat = fSubtract(fVDDC_base,
- fDivide(fMargin_DC_sigma, ConvertToFraction(1000)));
-
- fRO_DC_margin = fAdd(fMultiply(fMargin_RO_a,
- fGetSquare(repeat)),
- fAdd(fMultiply(fMargin_RO_b, repeat),
- fMargin_RO_c));
-
- fDC_SCLK = fSubtract(fRO_fused,
- fSubtract(fRO_DC_margin,
- fSubtract(fSM_A3,
- fMultiply(fSM_A2, repeat))));
- fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0, repeat), fSM_A1));
-
- fSigma_DC = fSubtract(fSclk, fDC_SCLK);
-
- fMicro_FMAX = fMultiply(fSclk, fMargin_FMAX_mean);
- fMicro_CR = fMultiply(fSclk, fMargin_Plat_mean);
- fSigma_FMAX = fMultiply(fSclk, fMargin_FMAX_sigma);
- fSigma_CR = fMultiply(fSclk, fMargin_Plat_sigma);
-
- fSquared_Sigma_DC = fGetSquare(fSigma_DC);
- fSquared_Sigma_CR = fGetSquare(fSigma_CR);
- fSquared_Sigma_FMAX = fGetSquare(fSigma_FMAX);
-
- fSclk_margin = fAdd(fMicro_FMAX,
- fAdd(fMicro_CR,
- fAdd(fMargin_fixed,
- fSqrt(fAdd(fSquared_Sigma_FMAX,
- fAdd(fSquared_Sigma_DC, fSquared_Sigma_CR))))));
- /*
- fA_Term = fSM_A4 * (fSclk + fSclk_margin) + fSM_A5;
- fB_Term = fSM_A2 * (fSclk + fSclk_margin) + fSM_A6;
- fC_Term = fRO_DC_margin + fSM_A0 * fLkg_FT + fSM_A1 * fLkg_FT * (fSclk + fSclk_margin) + fSM_A3 * (fSclk + fSclk_margin) + fSM_A7 - fRO_fused;
- */
-
- fA_Term = fAdd(fMultiply(fSM_A4, fAdd(fSclk, fSclk_margin)), fSM_A5);
- fB_Term = fAdd(fMultiply(fSM_A2, fAdd(fSclk, fSclk_margin)), fSM_A6);
- fC_Term = fAdd(fRO_DC_margin,
- fAdd(fMultiply(fSM_A0, fLkg_FT),
- fAdd(fMultiply(fMultiply(fSM_A1, fLkg_FT),
- fAdd(fSclk, fSclk_margin)),
- fAdd(fMultiply(fSM_A3,
- fAdd(fSclk, fSclk_margin)),
- fSubtract(fSM_A7, fRO_fused)))));
-
- SolveQuadracticEqn(fA_Term, fB_Term, fC_Term, fRoots);
-
- if (GreaterThan(fRoots[0], fRoots[1]))
- fEVV_V = fRoots[1];
- else
- fEVV_V = fRoots[0];
-
- if (GreaterThan(fV_min, fEVV_V))
- fEVV_V = fV_min;
- else if (GreaterThan(fEVV_V, fV_max))
- fEVV_V = fSubtract(fV_max, fStepSize);
-
- fEVV_V = fRoundUpByStepSize(fEVV_V, fStepSize, 0);
-
- /*-----------------
- * PART 4
- *-----------------
- */
-
- fV_x = fV_min;
-
- while (GreaterThan(fAdd(fV_max, fStepSize), fV_x)) {
- fTDP_Power_left = fMultiply(fMultiply(fMultiply(fAdd(
- fMultiply(fCACm_fused, fV_x), fCACb_fused), fSclk),
- fGetSquare(fV_x)), fDerateTDP);
-
- fTDP_Power_right = fMultiply(fFT_Lkg_V0NORM, fMultiply(fLKG_Factor,
- fMultiply(fExponential(fMultiply(fAdd(fMultiply(fKv_m_fused,
- fT_prod), fKv_b_fused), fV_x)), fV_x)));
- fTDP_Power_right = fMultiply(fTDP_Power_right, fExponential(fMultiply(
- fKt_Beta_fused, fT_prod)));
- fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply(
- fAdd(fMultiply(fKv_m_fused, fT_prod), fKv_b_fused), fV_FT)));
- fTDP_Power_right = fDivide(fTDP_Power_right, fExponential(fMultiply(
- fKt_Beta_fused, fT_FT)));
-
- fTDP_Power = fAdd(fTDP_Power_left, fTDP_Power_right);
-
- fTDP_Current = fDivide(fTDP_Power, fV_x);
-
- fV_NL = fAdd(fV_x, fDivide(fMultiply(fTDP_Current, fRLL_LoadLine),
- ConvertToFraction(10)));
-
- fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0);
-
- if (GreaterThan(fV_max, fV_NL) &&
- (GreaterThan(fV_NL, fEVV_V) ||
- Equal(fV_NL, fEVV_V))) {
- fV_NL = fMultiply(fV_NL, ConvertToFraction(1000));
-
- *voltage = (uint16_t)fV_NL.partial.real;
- break;
- } else
- fV_x = fAdd(fV_x, fStepSize);
- }
-
- return result;
-}
-
/**
* atomctrl_get_voltage_evv_on_sclk: gets voltage via call to ATOM COMMAND table.
* @hwmgr: input: pointer to hwManager
@@ -1420,6 +992,8 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr
GetIndexIntoMasterTable(DATA, SMU_Info),
&size, &frev, &crev);
+ if (!psmu_info)
+ return -EINVAL;
for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h
index 1f987e846628..22b0ac12df97 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h
@@ -316,8 +316,6 @@ extern int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
pp_atomctrl_clock_dividers_kong *dividers);
extern int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
uint16_t end_index, uint32_t *efuse);
-extern int atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
- uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage, uint16_t dpm_level, bool debug);
extern int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_ai *dividers);
extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
uint8_t level);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h
deleted file mode 100644
index 409aeec6baa9..000000000000
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppevvmath.h
+++ /dev/null
@@ -1,561 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <asm/div64.h>
-
-enum ppevvmath_constants {
- /* We multiply all original integers with 2^SHIFT_AMOUNT to get the fInt representation */
- SHIFT_AMOUNT = 16,
-
- /* Change this value to change the number of decimal places in the final output - 5 is a good default */
- PRECISION = 5,
-
- SHIFTED_2 = (2 << SHIFT_AMOUNT),
-
- /* 32767 - Might change in the future */
- MAX = (1 << (SHIFT_AMOUNT - 1)) - 1,
-};
-
-/* -------------------------------------------------------------------------------
- * NEW TYPE - fINT
- * -------------------------------------------------------------------------------
- * A variable of type fInt can be accessed in 3 ways using the dot (.) operator
- * fInt A;
- * A.full => The full number as it is. Generally not easy to read
- * A.partial.real => Only the integer portion
- * A.partial.decimal => Only the fractional portion
- */
-typedef union _fInt {
- int full;
- struct _partial {
- unsigned int decimal: SHIFT_AMOUNT; /*Needs to always be unsigned*/
- int real: 32 - SHIFT_AMOUNT;
- } partial;
-} fInt;
-
-/* -------------------------------------------------------------------------------
- * Function Declarations
- * -------------------------------------------------------------------------------
- */
-static fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */
-static fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */
-static fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */
-static int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */
-
-static fInt fNegate(fInt); /* Returns -1 * input fInt value */
-static fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */
-static fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */
-static fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */
-static fInt fDivide (fInt A, fInt B); /* Returns A/B */
-static fInt fGetSquare(fInt); /* Returns the square of a fInt number */
-static fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */
-
-static int uAbs(int); /* Returns the Absolute value of the Int */
-static int uPow(int base, int exponent); /* Returns base^exponent an INT */
-
-static void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */
-static bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */
-static bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */
-
-static fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */
-static fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */
-
-/* Fuse decoding functions
- * -------------------------------------------------------------------------------------
- */
-static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength);
-static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength);
-static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength);
-
-/* Internal Support Functions - Use these ONLY for testing or adding to internal functions
- * -------------------------------------------------------------------------------------
- * Some of the following functions take two INTs as their input - This is unsafe for a variety of reasons.
- */
-static fInt Divide (int, int); /* Divide two INTs and return result as FINT */
-static fInt fNegate(fInt);
-
-static int uGetScaledDecimal (fInt); /* Internal function */
-static int GetReal (fInt A); /* Internal function */
-
-/* -------------------------------------------------------------------------------------
- * TROUBLESHOOTING INFORMATION
- * -------------------------------------------------------------------------------------
- * 1) ConvertToFraction - InputOutOfRangeException: Only accepts numbers smaller than MAX (default: 32767)
- * 2) fAdd - OutputOutOfRangeException: Output bigger than MAX (default: 32767)
- * 3) fMultiply - OutputOutOfRangeException:
- * 4) fGetSquare - OutputOutOfRangeException:
- * 5) fDivide - DivideByZeroException
- * 6) fSqrt - NegativeSquareRootException: Input cannot be a negative number
- */
-
-/* -------------------------------------------------------------------------------------
- * START OF CODE
- * -------------------------------------------------------------------------------------
- */
-static fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/
-{
- uint32_t i;
- bool bNegated = false;
-
- fInt fPositiveOne = ConvertToFraction(1);
- fInt fZERO = ConvertToFraction(0);
-
- fInt lower_bound = Divide(78, 10000);
- fInt solution = fPositiveOne; /*Starting off with baseline of 1 */
- fInt error_term;
-
- static const uint32_t k_array[11] = {55452, 27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78};
- static const uint32_t expk_array[11] = {2560000, 160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078};
-
- if (GreaterThan(fZERO, exponent)) {
- exponent = fNegate(exponent);
- bNegated = true;
- }
-
- while (GreaterThan(exponent, lower_bound)) {
- for (i = 0; i < 11; i++) {
- if (GreaterThan(exponent, GetScaledFraction(k_array[i], 10000))) {
- exponent = fSubtract(exponent, GetScaledFraction(k_array[i], 10000));
- solution = fMultiply(solution, GetScaledFraction(expk_array[i], 10000));
- }
- }
- }
-
- error_term = fAdd(fPositiveOne, exponent);
-
- solution = fMultiply(solution, error_term);
-
- if (bNegated)
- solution = fDivide(fPositiveOne, solution);
-
- return solution;
-}
-
-static fInt fNaturalLog(fInt value)
-{
- uint32_t i;
- fInt upper_bound = Divide(8, 1000);
- fInt fNegativeOne = ConvertToFraction(-1);
- fInt solution = ConvertToFraction(0); /*Starting off with baseline of 0 */
- fInt error_term;
-
- static const uint32_t k_array[10] = {160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078};
- static const uint32_t logk_array[10] = {27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78};
-
- while (GreaterThan(fAdd(value, fNegativeOne), upper_bound)) {
- for (i = 0; i < 10; i++) {
- if (GreaterThan(value, GetScaledFraction(k_array[i], 10000))) {
- value = fDivide(value, GetScaledFraction(k_array[i], 10000));
- solution = fAdd(solution, GetScaledFraction(logk_array[i], 10000));
- }
- }
- }
-
- error_term = fAdd(fNegativeOne, value);
-
- return fAdd(solution, error_term);
-}
-
-static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength)
-{
- fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value);
- fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
-
- fInt f_decoded_value;
-
- f_decoded_value = fDivide(f_fuse_value, f_bit_max_value);
- f_decoded_value = fMultiply(f_decoded_value, f_range);
- f_decoded_value = fAdd(f_decoded_value, f_min);
-
- return f_decoded_value;
-}
-
-
-static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength)
-{
- fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value);
- fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
-
- fInt f_CONSTANT_NEG13 = ConvertToFraction(-13);
- fInt f_CONSTANT1 = ConvertToFraction(1);
-
- fInt f_decoded_value;
-
- f_decoded_value = fSubtract(fDivide(f_bit_max_value, f_fuse_value), f_CONSTANT1);
- f_decoded_value = fNaturalLog(f_decoded_value);
- f_decoded_value = fMultiply(f_decoded_value, fDivide(f_range, f_CONSTANT_NEG13));
- f_decoded_value = fAdd(f_decoded_value, f_average);
-
- return f_decoded_value;
-}
-
-static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength)
-{
- fInt fLeakage;
- fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
-
- fLeakage = fMultiply(ln_max_div_min, Convert_ULONG_ToFraction(leakageID_fuse));
- fLeakage = fDivide(fLeakage, f_bit_max_value);
- fLeakage = fExponential(fLeakage);
- fLeakage = fMultiply(fLeakage, f_min);
-
- return fLeakage;
-}
-
-static fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */
-{
- fInt temp;
-
- if (X <= MAX)
- temp.full = (X << SHIFT_AMOUNT);
- else
- temp.full = 0;
-
- return temp;
-}
-
-static fInt fNegate(fInt X)
-{
- fInt CONSTANT_NEGONE = ConvertToFraction(-1);
- return fMultiply(X, CONSTANT_NEGONE);
-}
-
-static fInt Convert_ULONG_ToFraction(uint32_t X)
-{
- fInt temp;
-
- if (X <= MAX)
- temp.full = (X << SHIFT_AMOUNT);
- else
- temp.full = 0;
-
- return temp;
-}
-
-static fInt GetScaledFraction(int X, int factor)
-{
- int times_shifted, factor_shifted;
- bool bNEGATED;
- fInt fValue;
-
- times_shifted = 0;
- factor_shifted = 0;
- bNEGATED = false;
-
- if (X < 0) {
- X = -1*X;
- bNEGATED = true;
- }
-
- if (factor < 0) {
- factor = -1*factor;
- bNEGATED = !bNEGATED; /*If bNEGATED = true due to X < 0, this will cover the case of negative cancelling negative */
- }
-
- if ((X > MAX) || factor > MAX) {
- if ((X/factor) <= MAX) {
- while (X > MAX) {
- X = X >> 1;
- times_shifted++;
- }
-
- while (factor > MAX) {
- factor = factor >> 1;
- factor_shifted++;
- }
- } else {
- fValue.full = 0;
- return fValue;
- }
- }
-
- if (factor == 1)
- return ConvertToFraction(X);
-
- fValue = fDivide(ConvertToFraction(X * uPow(-1, bNEGATED)), ConvertToFraction(factor));
-
- fValue.full = fValue.full << times_shifted;
- fValue.full = fValue.full >> factor_shifted;
-
- return fValue;
-}
-
-/* Addition using two fInts */
-static fInt fAdd (fInt X, fInt Y)
-{
- fInt Sum;
-
- Sum.full = X.full + Y.full;
-
- return Sum;
-}
-
-/* Addition using two fInts */
-static fInt fSubtract (fInt X, fInt Y)
-{
- fInt Difference;
-
- Difference.full = X.full - Y.full;
-
- return Difference;
-}
-
-static bool Equal(fInt A, fInt B)
-{
- if (A.full == B.full)
- return true;
- else
- return false;
-}
-
-static bool GreaterThan(fInt A, fInt B)
-{
- if (A.full > B.full)
- return true;
- else
- return false;
-}
-
-static fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */
-{
- fInt Product;
- int64_t tempProduct;
-
- /*The following is for a very specific common case: Non-zero number with ONLY fractional portion*/
- /* TEMPORARILY DISABLED - CAN BE USED TO IMPROVE PRECISION
- bool X_LessThanOne, Y_LessThanOne;
-
- X_LessThanOne = (X.partial.real == 0 && X.partial.decimal != 0 && X.full >= 0);
- Y_LessThanOne = (Y.partial.real == 0 && Y.partial.decimal != 0 && Y.full >= 0);
-
- if (X_LessThanOne && Y_LessThanOne) {
- Product.full = X.full * Y.full;
- return Product
- }*/
-
- tempProduct = ((int64_t)X.full) * ((int64_t)Y.full); /*Q(16,16)*Q(16,16) = Q(32, 32) - Might become a negative number! */
- tempProduct = tempProduct >> 16; /*Remove lagging 16 bits - Will lose some precision from decimal; */
- Product.full = (int)tempProduct; /*The int64_t will lose the leading 16 bits that were part of the integer portion */
-
- return Product;
-}
-
-static fInt fDivide (fInt X, fInt Y)
-{
- fInt fZERO, fQuotient;
- int64_t longlongX, longlongY;
-
- fZERO = ConvertToFraction(0);
-
- if (Equal(Y, fZERO))
- return fZERO;
-
- longlongX = (int64_t)X.full;
- longlongY = (int64_t)Y.full;
-
- longlongX = longlongX << 16; /*Q(16,16) -> Q(32,32) */
-
- div64_s64(longlongX, longlongY); /*Q(32,32) divided by Q(16,16) = Q(16,16) Back to original format */
-
- fQuotient.full = (int)longlongX;
- return fQuotient;
-}
-
-static int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/
-{
- fInt fullNumber, scaledDecimal, scaledReal;
-
- scaledReal.full = GetReal(A) * uPow(10, PRECISION-1); /* DOUBLE CHECK THISSSS!!! */
-
- scaledDecimal.full = uGetScaledDecimal(A);
-
- fullNumber = fAdd(scaledDecimal, scaledReal);
-
- return fullNumber.full;
-}
-
-static fInt fGetSquare(fInt A)
-{
- return fMultiply(A, A);
-}
-
-/* x_new = x_old - (x_old^2 - C) / (2 * x_old) */
-static fInt fSqrt(fInt num)
-{
- fInt F_divide_Fprime, Fprime;
- fInt test;
- fInt twoShifted;
- int seed, counter, error;
- fInt x_new, x_old, C, y;
-
- fInt fZERO = ConvertToFraction(0);
-
- /* (0 > num) is the same as (num < 0), i.e., num is negative */
-
- if (GreaterThan(fZERO, num) || Equal(fZERO, num))
- return fZERO;
-
- C = num;
-
- if (num.partial.real > 3000)
- seed = 60;
- else if (num.partial.real > 1000)
- seed = 30;
- else if (num.partial.real > 100)
- seed = 10;
- else
- seed = 2;
-
- counter = 0;
-
- if (Equal(num, fZERO)) /*Square Root of Zero is zero */
- return fZERO;
-
- twoShifted = ConvertToFraction(2);
- x_new = ConvertToFraction(seed);
-
- do {
- counter++;
-
- x_old.full = x_new.full;
-
- test = fGetSquare(x_old); /*1.75*1.75 is reverting back to 1 when shifted down */
- y = fSubtract(test, C); /*y = f(x) = x^2 - C; */
-
- Fprime = fMultiply(twoShifted, x_old);
- F_divide_Fprime = fDivide(y, Fprime);
-
- x_new = fSubtract(x_old, F_divide_Fprime);
-
- error = ConvertBackToInteger(x_new) - ConvertBackToInteger(x_old);
-
- if (counter > 20) /*20 is already way too many iterations. If we dont have an answer by then, we never will*/
- return x_new;
-
- } while (uAbs(error) > 0);
-
- return x_new;
-}
-
-static void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[])
-{
- fInt *pRoots = &Roots[0];
- fInt temp, root_first, root_second;
- fInt f_CONSTANT10, f_CONSTANT100;
-
- f_CONSTANT100 = ConvertToFraction(100);
- f_CONSTANT10 = ConvertToFraction(10);
-
- while (GreaterThan(A, f_CONSTANT100) || GreaterThan(B, f_CONSTANT100) || GreaterThan(C, f_CONSTANT100)) {
- A = fDivide(A, f_CONSTANT10);
- B = fDivide(B, f_CONSTANT10);
- C = fDivide(C, f_CONSTANT10);
- }
-
- temp = fMultiply(ConvertToFraction(4), A); /* root = 4*A */
- temp = fMultiply(temp, C); /* root = 4*A*C */
- temp = fSubtract(fGetSquare(B), temp); /* root = b^2 - 4AC */
- temp = fSqrt(temp); /*root = Sqrt (b^2 - 4AC); */
-
- root_first = fSubtract(fNegate(B), temp); /* b - Sqrt(b^2 - 4AC) */
- root_second = fAdd(fNegate(B), temp); /* b + Sqrt(b^2 - 4AC) */
-
- root_first = fDivide(root_first, ConvertToFraction(2)); /* [b +- Sqrt(b^2 - 4AC)]/[2] */
- root_first = fDivide(root_first, A); /*[b +- Sqrt(b^2 - 4AC)]/[2*A] */
-
- root_second = fDivide(root_second, ConvertToFraction(2)); /* [b +- Sqrt(b^2 - 4AC)]/[2] */
- root_second = fDivide(root_second, A); /*[b +- Sqrt(b^2 - 4AC)]/[2*A] */
-
- *(pRoots + 0) = root_first;
- *(pRoots + 1) = root_second;
-}
-
-/* -----------------------------------------------------------------------------
- * SUPPORT FUNCTIONS
- * -----------------------------------------------------------------------------
- */
-
-/* Conversion Functions */
-static int GetReal (fInt A)
-{
- return (A.full >> SHIFT_AMOUNT);
-}
-
-static fInt Divide (int X, int Y)
-{
- fInt A, B, Quotient;
-
- A.full = X << SHIFT_AMOUNT;
- B.full = Y << SHIFT_AMOUNT;
-
- Quotient = fDivide(A, B);
-
- return Quotient;
-}
-
-static int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */
-{
- int dec[PRECISION];
- int i, scaledDecimal = 0, tmp = A.partial.decimal;
-
- for (i = 0; i < PRECISION; i++) {
- dec[i] = tmp / (1 << SHIFT_AMOUNT);
- tmp = tmp - ((1 << SHIFT_AMOUNT)*dec[i]);
- tmp *= 10;
- scaledDecimal = scaledDecimal + dec[i]*uPow(10, PRECISION - 1 - i);
- }
-
- return scaledDecimal;
-}
-
-static int uPow(int base, int power)
-{
- if (power == 0)
- return 1;
- else
- return (base)*uPow(base, power - 1);
-}
-
-static int uAbs(int X)
-{
- if (X < 0)
- return (X * -1);
- else
- return X;
-}
-
-static fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term)
-{
- fInt solution;
-
- solution = fDivide(A, fStepSize);
- solution.partial.decimal = 0; /*All fractional digits changes to 0 */
-
- if (error_term)
- solution.partial.real += 1; /*Error term of 1 added */
-
- solution = fMultiply(solution, fStepSize);
- solution = fAdd(solution, fStepSize);
-
- return solution;
-}
-
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
index 3007b054c873..776d58ea63ae 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
@@ -1120,13 +1120,14 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega10, VEGA10_CONFIGREG_DIDT);
if (0 != result)
- return result;
+ goto exit_safe_mode;
vega10_didt_set_mask(hwmgr, false);
+exit_safe_mode:
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
- return 0;
+ return result;
}
static int vega10_disable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
index 79c817752a33..2b446f8866ba 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
@@ -62,578 +62,6 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
return table_address;
}
-#if 0
-static void dump_pptable(PPTable_t *pptable)
-{
- int i;
-
- pr_info("Version = 0x%08x\n", pptable->Version);
-
- pr_info("FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]);
- pr_info("FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]);
-
- pr_info("SocketPowerLimitAc0 = %d\n", pptable->SocketPowerLimitAc0);
- pr_info("SocketPowerLimitAc0Tau = %d\n", pptable->SocketPowerLimitAc0Tau);
- pr_info("SocketPowerLimitAc1 = %d\n", pptable->SocketPowerLimitAc1);
- pr_info("SocketPowerLimitAc1Tau = %d\n", pptable->SocketPowerLimitAc1Tau);
- pr_info("SocketPowerLimitAc2 = %d\n", pptable->SocketPowerLimitAc2);
- pr_info("SocketPowerLimitAc2Tau = %d\n", pptable->SocketPowerLimitAc2Tau);
- pr_info("SocketPowerLimitAc3 = %d\n", pptable->SocketPowerLimitAc3);
- pr_info("SocketPowerLimitAc3Tau = %d\n", pptable->SocketPowerLimitAc3Tau);
- pr_info("SocketPowerLimitDc = %d\n", pptable->SocketPowerLimitDc);
- pr_info("SocketPowerLimitDcTau = %d\n", pptable->SocketPowerLimitDcTau);
- pr_info("TdcLimitSoc = %d\n", pptable->TdcLimitSoc);
- pr_info("TdcLimitSocTau = %d\n", pptable->TdcLimitSocTau);
- pr_info("TdcLimitGfx = %d\n", pptable->TdcLimitGfx);
- pr_info("TdcLimitGfxTau = %d\n", pptable->TdcLimitGfxTau);
-
- pr_info("TedgeLimit = %d\n", pptable->TedgeLimit);
- pr_info("ThotspotLimit = %d\n", pptable->ThotspotLimit);
- pr_info("ThbmLimit = %d\n", pptable->ThbmLimit);
- pr_info("Tvr_gfxLimit = %d\n", pptable->Tvr_gfxLimit);
- pr_info("Tvr_memLimit = %d\n", pptable->Tvr_memLimit);
- pr_info("Tliquid1Limit = %d\n", pptable->Tliquid1Limit);
- pr_info("Tliquid2Limit = %d\n", pptable->Tliquid2Limit);
- pr_info("TplxLimit = %d\n", pptable->TplxLimit);
- pr_info("FitLimit = %d\n", pptable->FitLimit);
-
- pr_info("PpmPowerLimit = %d\n", pptable->PpmPowerLimit);
- pr_info("PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold);
-
- pr_info("MemoryOnPackage = 0x%02x\n", pptable->MemoryOnPackage);
- pr_info("padding8_limits = 0x%02x\n", pptable->padding8_limits);
- pr_info("Tvr_SocLimit = %d\n", pptable->Tvr_SocLimit);
-
- pr_info("UlvVoltageOffsetSoc = %d\n", pptable->UlvVoltageOffsetSoc);
- pr_info("UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx);
-
- pr_info("UlvSmnclkDid = %d\n", pptable->UlvSmnclkDid);
- pr_info("UlvMp1clkDid = %d\n", pptable->UlvMp1clkDid);
- pr_info("UlvGfxclkBypass = %d\n", pptable->UlvGfxclkBypass);
- pr_info("Padding234 = 0x%02x\n", pptable->Padding234);
-
- pr_info("MinVoltageGfx = %d\n", pptable->MinVoltageGfx);
- pr_info("MinVoltageSoc = %d\n", pptable->MinVoltageSoc);
- pr_info("MaxVoltageGfx = %d\n", pptable->MaxVoltageGfx);
- pr_info("MaxVoltageSoc = %d\n", pptable->MaxVoltageSoc);
-
- pr_info("LoadLineResistanceGfx = %d\n", pptable->LoadLineResistanceGfx);
- pr_info("LoadLineResistanceSoc = %d\n", pptable->LoadLineResistanceSoc);
-
- pr_info("[PPCLK_GFXCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_GFXCLK].padding,
- pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c);
-
- pr_info("[PPCLK_VCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_VCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_VCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_VCLK].padding,
- pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c);
-
- pr_info("[PPCLK_DCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_DCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_DCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_DCLK].padding,
- pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c);
-
- pr_info("[PPCLK_ECLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_ECLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_ECLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_ECLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_ECLK].padding,
- pptable->DpmDescriptor[PPCLK_ECLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_ECLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_ECLK].SsCurve.c);
-
- pr_info("[PPCLK_SOCCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_SOCCLK].padding,
- pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c);
-
- pr_info("[PPCLK_UCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_UCLK].padding,
- pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c);
-
- pr_info("[PPCLK_DCEFCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_DCEFCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_DCEFCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_DCEFCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_DCEFCLK].padding,
- pptable->DpmDescriptor[PPCLK_DCEFCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_DCEFCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_DCEFCLK].SsCurve.c);
-
- pr_info("[PPCLK_DISPCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_DISPCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_DISPCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_DISPCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_DISPCLK].padding,
- pptable->DpmDescriptor[PPCLK_DISPCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_DISPCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_DISPCLK].SsCurve.c);
-
- pr_info("[PPCLK_PIXCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_PIXCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_PIXCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_PIXCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_PIXCLK].padding,
- pptable->DpmDescriptor[PPCLK_PIXCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_PIXCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_PIXCLK].SsCurve.c);
-
- pr_info("[PPCLK_PHYCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_PHYCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_PHYCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_PHYCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_PHYCLK].padding,
- pptable->DpmDescriptor[PPCLK_PHYCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_PHYCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_PHYCLK].SsCurve.c);
-
- pr_info("[PPCLK_FCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n",
- pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_FCLK].padding,
- pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c);
-
-
- pr_info("FreqTableGfx\n");
- for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTableGfx[i]);
-
- pr_info("FreqTableVclk\n");
- for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTableVclk[i]);
-
- pr_info("FreqTableDclk\n");
- for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDclk[i]);
-
- pr_info("FreqTableEclk\n");
- for (i = 0; i < NUM_ECLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTableEclk[i]);
-
- pr_info("FreqTableSocclk\n");
- for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTableSocclk[i]);
-
- pr_info("FreqTableUclk\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTableUclk[i]);
-
- pr_info("FreqTableFclk\n");
- for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTableFclk[i]);
-
- pr_info("FreqTableDcefclk\n");
- for (i = 0; i < NUM_DCEFCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDcefclk[i]);
-
- pr_info("FreqTableDispclk\n");
- for (i = 0; i < NUM_DISPCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDispclk[i]);
-
- pr_info("FreqTablePixclk\n");
- for (i = 0; i < NUM_PIXCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTablePixclk[i]);
-
- pr_info("FreqTablePhyclk\n");
- for (i = 0; i < NUM_PHYCLK_DPM_LEVELS; i++)
- pr_info(" .[%02d] = %d\n", i, pptable->FreqTablePhyclk[i]);
-
- pr_info("DcModeMaxFreq[PPCLK_GFXCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]);
- pr_info("DcModeMaxFreq[PPCLK_VCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_VCLK]);
- pr_info("DcModeMaxFreq[PPCLK_DCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DCLK]);
- pr_info("DcModeMaxFreq[PPCLK_ECLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_ECLK]);
- pr_info("DcModeMaxFreq[PPCLK_SOCCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]);
- pr_info("DcModeMaxFreq[PPCLK_UCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_UCLK]);
- pr_info("DcModeMaxFreq[PPCLK_DCEFCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DCEFCLK]);
- pr_info("DcModeMaxFreq[PPCLK_DISPCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_DISPCLK]);
- pr_info("DcModeMaxFreq[PPCLK_PIXCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_PIXCLK]);
- pr_info("DcModeMaxFreq[PPCLK_PHYCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_PHYCLK]);
- pr_info("DcModeMaxFreq[PPCLK_FCLK] = %d\n", pptable->DcModeMaxFreq[PPCLK_FCLK]);
- pr_info("Padding8_Clks = %d\n", pptable->Padding8_Clks);
-
- pr_info("Mp0clkFreq\n");
- for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->Mp0clkFreq[i]);
-
- pr_info("Mp0DpmVoltage\n");
- for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->Mp0DpmVoltage[i]);
-
- pr_info("GfxclkFidle = 0x%x\n", pptable->GfxclkFidle);
- pr_info("GfxclkSlewRate = 0x%x\n", pptable->GfxclkSlewRate);
- pr_info("CksEnableFreq = 0x%x\n", pptable->CksEnableFreq);
- pr_info("Padding789 = 0x%x\n", pptable->Padding789);
- pr_info("CksVoltageOffset[a = 0x%08x b = 0x%08x c = 0x%08x]\n",
- pptable->CksVoltageOffset.a,
- pptable->CksVoltageOffset.b,
- pptable->CksVoltageOffset.c);
- pr_info("Padding567[0] = 0x%x\n", pptable->Padding567[0]);
- pr_info("Padding567[1] = 0x%x\n", pptable->Padding567[1]);
- pr_info("Padding567[2] = 0x%x\n", pptable->Padding567[2]);
- pr_info("Padding567[3] = 0x%x\n", pptable->Padding567[3]);
- pr_info("GfxclkDsMaxFreq = %d\n", pptable->GfxclkDsMaxFreq);
- pr_info("GfxclkSource = 0x%x\n", pptable->GfxclkSource);
- pr_info("Padding456 = 0x%x\n", pptable->Padding456);
-
- pr_info("LowestUclkReservedForUlv = %d\n", pptable->LowestUclkReservedForUlv);
- pr_info("Padding8_Uclk[0] = 0x%x\n", pptable->Padding8_Uclk[0]);
- pr_info("Padding8_Uclk[1] = 0x%x\n", pptable->Padding8_Uclk[1]);
- pr_info("Padding8_Uclk[2] = 0x%x\n", pptable->Padding8_Uclk[2]);
-
- pr_info("PcieGenSpeed\n");
- for (i = 0; i < NUM_LINK_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->PcieGenSpeed[i]);
-
- pr_info("PcieLaneCount\n");
- for (i = 0; i < NUM_LINK_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->PcieLaneCount[i]);
-
- pr_info("LclkFreq\n");
- for (i = 0; i < NUM_LINK_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->LclkFreq[i]);
-
- pr_info("EnableTdpm = %d\n", pptable->EnableTdpm);
- pr_info("TdpmHighHystTemperature = %d\n", pptable->TdpmHighHystTemperature);
- pr_info("TdpmLowHystTemperature = %d\n", pptable->TdpmLowHystTemperature);
- pr_info("GfxclkFreqHighTempLimit = %d\n", pptable->GfxclkFreqHighTempLimit);
-
- pr_info("FanStopTemp = %d\n", pptable->FanStopTemp);
- pr_info("FanStartTemp = %d\n", pptable->FanStartTemp);
-
- pr_info("FanGainEdge = %d\n", pptable->FanGainEdge);
- pr_info("FanGainHotspot = %d\n", pptable->FanGainHotspot);
- pr_info("FanGainLiquid = %d\n", pptable->FanGainLiquid);
- pr_info("FanGainVrGfx = %d\n", pptable->FanGainVrGfx);
- pr_info("FanGainVrSoc = %d\n", pptable->FanGainVrSoc);
- pr_info("FanGainPlx = %d\n", pptable->FanGainPlx);
- pr_info("FanGainHbm = %d\n", pptable->FanGainHbm);
- pr_info("FanPwmMin = %d\n", pptable->FanPwmMin);
- pr_info("FanAcousticLimitRpm = %d\n", pptable->FanAcousticLimitRpm);
- pr_info("FanThrottlingRpm = %d\n", pptable->FanThrottlingRpm);
- pr_info("FanMaximumRpm = %d\n", pptable->FanMaximumRpm);
- pr_info("FanTargetTemperature = %d\n", pptable->FanTargetTemperature);
- pr_info("FanTargetGfxclk = %d\n", pptable->FanTargetGfxclk);
- pr_info("FanZeroRpmEnable = %d\n", pptable->FanZeroRpmEnable);
- pr_info("FanTachEdgePerRev = %d\n", pptable->FanTachEdgePerRev);
-
- pr_info("FuzzyFan_ErrorSetDelta = %d\n", pptable->FuzzyFan_ErrorSetDelta);
- pr_info("FuzzyFan_ErrorRateSetDelta = %d\n", pptable->FuzzyFan_ErrorRateSetDelta);
- pr_info("FuzzyFan_PwmSetDelta = %d\n", pptable->FuzzyFan_PwmSetDelta);
- pr_info("FuzzyFan_Reserved = %d\n", pptable->FuzzyFan_Reserved);
-
- pr_info("OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]);
- pr_info("OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]);
- pr_info("Padding8_Avfs[0] = %d\n", pptable->Padding8_Avfs[0]);
- pr_info("Padding8_Avfs[1] = %d\n", pptable->Padding8_Avfs[1]);
-
- pr_info("qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a,
- pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b,
- pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c);
- pr_info("qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a,
- pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b,
- pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c);
- pr_info("dBtcGbGfxCksOn{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbGfxCksOn.a,
- pptable->dBtcGbGfxCksOn.b,
- pptable->dBtcGbGfxCksOn.c);
- pr_info("dBtcGbGfxCksOff{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbGfxCksOff.a,
- pptable->dBtcGbGfxCksOff.b,
- pptable->dBtcGbGfxCksOff.c);
- pr_info("dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbGfxAfll.a,
- pptable->dBtcGbGfxAfll.b,
- pptable->dBtcGbGfxAfll.c);
- pr_info("dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbSoc.a,
- pptable->dBtcGbSoc.b,
- pptable->dBtcGbSoc.c);
- pr_info("qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n",
- pptable->qAgingGb[AVFS_VOLTAGE_GFX].m,
- pptable->qAgingGb[AVFS_VOLTAGE_GFX].b);
- pr_info("qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n",
- pptable->qAgingGb[AVFS_VOLTAGE_SOC].m,
- pptable->qAgingGb[AVFS_VOLTAGE_SOC].b);
-
- pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c);
- pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c);
-
- pr_info("DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]);
- pr_info("DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]);
-
- pr_info("DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]);
- pr_info("DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]);
- pr_info("Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]);
- pr_info("Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]);
-
- pr_info("DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]);
- pr_info("DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]);
- pr_info("DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]);
- pr_info("DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]);
-
- pr_info("XgmiLinkSpeed\n");
- for (i = 0; i < NUM_XGMI_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkSpeed[i]);
- pr_info("XgmiLinkWidth\n");
- for (i = 0; i < NUM_XGMI_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkWidth[i]);
- pr_info("XgmiFclkFreq\n");
- for (i = 0; i < NUM_XGMI_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->XgmiFclkFreq[i]);
- pr_info("XgmiUclkFreq\n");
- for (i = 0; i < NUM_XGMI_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->XgmiUclkFreq[i]);
- pr_info("XgmiSocclkFreq\n");
- for (i = 0; i < NUM_XGMI_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->XgmiSocclkFreq[i]);
- pr_info("XgmiSocVoltage\n");
- for (i = 0; i < NUM_XGMI_LEVELS; i++)
- pr_info(" .[%d] = %d\n", i, pptable->XgmiSocVoltage[i]);
-
- pr_info("DebugOverrides = 0x%x\n", pptable->DebugOverrides);
- pr_info("ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation0.a,
- pptable->ReservedEquation0.b,
- pptable->ReservedEquation0.c);
- pr_info("ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation1.a,
- pptable->ReservedEquation1.b,
- pptable->ReservedEquation1.c);
- pr_info("ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation2.a,
- pptable->ReservedEquation2.b,
- pptable->ReservedEquation2.c);
- pr_info("ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation3.a,
- pptable->ReservedEquation3.b,
- pptable->ReservedEquation3.c);
-
- pr_info("MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx);
- pr_info("MinVoltageUlvSoc = %d\n", pptable->MinVoltageUlvSoc);
-
- pr_info("MGpuFanBoostLimitRpm = %d\n", pptable->MGpuFanBoostLimitRpm);
- pr_info("padding16_Fan = %d\n", pptable->padding16_Fan);
-
- pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0);
- pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0);
-
- pr_info("DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]);
- pr_info("DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]);
-
- for (i = 0; i < 11; i++)
- pr_info("Reserved[%d] = 0x%x\n", i, pptable->Reserved[i]);
-
- for (i = 0; i < 3; i++)
- pr_info("Padding32[%d] = 0x%x\n", i, pptable->Padding32[i]);
-
- pr_info("MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx);
- pr_info("MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc);
-
- pr_info("VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping);
- pr_info("VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping);
- pr_info("VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping);
- pr_info("VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping);
-
- pr_info("GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask);
- pr_info("SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask);
- pr_info("ExternalSensorPresent = 0x%x\n", pptable->ExternalSensorPresent);
- pr_info("Padding8_V = 0x%x\n", pptable->Padding8_V);
-
- pr_info("GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent);
- pr_info("GfxOffset = 0x%x\n", pptable->GfxOffset);
- pr_info("Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx);
-
- pr_info("SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent);
- pr_info("SocOffset = 0x%x\n", pptable->SocOffset);
- pr_info("Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc);
-
- pr_info("Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent);
- pr_info("Mem0Offset = 0x%x\n", pptable->Mem0Offset);
- pr_info("Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0);
-
- pr_info("Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent);
- pr_info("Mem1Offset = 0x%x\n", pptable->Mem1Offset);
- pr_info("Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1);
-
- pr_info("AcDcGpio = %d\n", pptable->AcDcGpio);
- pr_info("AcDcPolarity = %d\n", pptable->AcDcPolarity);
- pr_info("VR0HotGpio = %d\n", pptable->VR0HotGpio);
- pr_info("VR0HotPolarity = %d\n", pptable->VR0HotPolarity);
-
- pr_info("VR1HotGpio = %d\n", pptable->VR1HotGpio);
- pr_info("VR1HotPolarity = %d\n", pptable->VR1HotPolarity);
- pr_info("Padding1 = 0x%x\n", pptable->Padding1);
- pr_info("Padding2 = 0x%x\n", pptable->Padding2);
-
- pr_info("LedPin0 = %d\n", pptable->LedPin0);
- pr_info("LedPin1 = %d\n", pptable->LedPin1);
- pr_info("LedPin2 = %d\n", pptable->LedPin2);
- pr_info("padding8_4 = 0x%x\n", pptable->padding8_4);
-
- pr_info("PllGfxclkSpreadEnabled = %d\n", pptable->PllGfxclkSpreadEnabled);
- pr_info("PllGfxclkSpreadPercent = %d\n", pptable->PllGfxclkSpreadPercent);
- pr_info("PllGfxclkSpreadFreq = %d\n", pptable->PllGfxclkSpreadFreq);
-
- pr_info("UclkSpreadEnabled = %d\n", pptable->UclkSpreadEnabled);
- pr_info("UclkSpreadPercent = %d\n", pptable->UclkSpreadPercent);
- pr_info("UclkSpreadFreq = %d\n", pptable->UclkSpreadFreq);
-
- pr_info("FclkSpreadEnabled = %d\n", pptable->FclkSpreadEnabled);
- pr_info("FclkSpreadPercent = %d\n", pptable->FclkSpreadPercent);
- pr_info("FclkSpreadFreq = %d\n", pptable->FclkSpreadFreq);
-
- pr_info("FllGfxclkSpreadEnabled = %d\n", pptable->FllGfxclkSpreadEnabled);
- pr_info("FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent);
- pr_info("FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq);
-
- for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) {
- pr_info("I2cControllers[%d]:\n", i);
- pr_info(" .Enabled = %d\n",
- pptable->I2cControllers[i].Enabled);
- pr_info(" .SlaveAddress = 0x%x\n",
- pptable->I2cControllers[i].SlaveAddress);
- pr_info(" .ControllerPort = %d\n",
- pptable->I2cControllers[i].ControllerPort);
- pr_info(" .ControllerName = %d\n",
- pptable->I2cControllers[i].ControllerName);
- pr_info(" .ThermalThrottler = %d\n",
- pptable->I2cControllers[i].ThermalThrottler);
- pr_info(" .I2cProtocol = %d\n",
- pptable->I2cControllers[i].I2cProtocol);
- pr_info(" .I2cSpeed = %d\n",
- pptable->I2cControllers[i].I2cSpeed);
- }
-
- for (i = 0; i < 10; i++)
- pr_info("BoardReserved[%d] = 0x%x\n", i, pptable->BoardReserved[i]);
-
- for (i = 0; i < 8; i++)
- pr_info("MmHubPadding[%d] = 0x%x\n", i, pptable->MmHubPadding[i]);
-}
-#endif
-
static int check_powerplay_tables(
struct pp_hwmgr *hwmgr,
const ATOM_Vega20_POWERPLAYTABLE *powerplay_table)
@@ -652,8 +80,6 @@ static int check_powerplay_tables(
return -EINVAL;
}
- //dump_pptable(&powerplay_table->smcPPTable);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 9118fcddbf11..227bf0e84a13 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -60,7 +60,7 @@ struct vi_dpm_level {
struct vi_dpm_table {
uint32_t count;
- struct vi_dpm_level dpm_level[] __counted_by(count);
+ struct vi_dpm_level dpm_level[];
};
#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
@@ -91,7 +91,7 @@ struct phm_set_power_state_input {
struct phm_clock_array {
uint32_t count;
- uint32_t values[] __counted_by(count);
+ uint32_t values[];
};
struct phm_clock_voltage_dependency_record {
@@ -123,7 +123,7 @@ struct phm_acpclock_voltage_dependency_record {
struct phm_clock_voltage_dependency_table {
uint32_t count;
- struct phm_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_clock_voltage_dependency_record entries[];
};
struct phm_phase_shedding_limits_record {
@@ -140,7 +140,7 @@ struct phm_uvd_clock_voltage_dependency_record {
struct phm_uvd_clock_voltage_dependency_table {
uint8_t count;
- struct phm_uvd_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_uvd_clock_voltage_dependency_record entries[];
};
struct phm_acp_clock_voltage_dependency_record {
@@ -150,7 +150,7 @@ struct phm_acp_clock_voltage_dependency_record {
struct phm_acp_clock_voltage_dependency_table {
uint32_t count;
- struct phm_acp_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_acp_clock_voltage_dependency_record entries[];
};
struct phm_vce_clock_voltage_dependency_record {
@@ -161,32 +161,32 @@ struct phm_vce_clock_voltage_dependency_record {
struct phm_phase_shedding_limits_table {
uint32_t count;
- struct phm_phase_shedding_limits_record entries[] __counted_by(count);
+ struct phm_phase_shedding_limits_record entries[];
};
struct phm_vceclock_voltage_dependency_table {
uint8_t count;
- struct phm_vceclock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_vceclock_voltage_dependency_record entries[];
};
struct phm_uvdclock_voltage_dependency_table {
uint8_t count;
- struct phm_uvdclock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_uvdclock_voltage_dependency_record entries[];
};
struct phm_samuclock_voltage_dependency_table {
uint8_t count;
- struct phm_samuclock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_samuclock_voltage_dependency_record entries[];
};
struct phm_acpclock_voltage_dependency_table {
uint32_t count;
- struct phm_acpclock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_acpclock_voltage_dependency_record entries[];
};
struct phm_vce_clock_voltage_dependency_table {
uint8_t count;
- struct phm_vce_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_vce_clock_voltage_dependency_record entries[];
};
@@ -393,7 +393,7 @@ union phm_cac_leakage_record {
struct phm_cac_leakage_table {
uint32_t count;
- union phm_cac_leakage_record entries[] __counted_by(count);
+ union phm_cac_leakage_record entries[];
};
struct phm_samu_clock_voltage_dependency_record {
@@ -404,7 +404,7 @@ struct phm_samu_clock_voltage_dependency_record {
struct phm_samu_clock_voltage_dependency_table {
uint8_t count;
- struct phm_samu_clock_voltage_dependency_record entries[] __counted_by(count);
+ struct phm_samu_clock_voltage_dependency_record entries[];
};
struct phm_cac_tdp_table {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
index b52ce135d84d..d3ff6a831ed5 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
@@ -257,20 +257,18 @@ static int vega12_smu_init(struct pp_hwmgr *hwmgr)
priv->smu_tables.entry[TABLE_WATERMARKS].size = sizeof(Watermarks_t);
tools_size = 0x19000;
- if (tools_size) {
- ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
- tools_size,
- PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
- &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
- &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
- if (ret)
- goto err1;
+ ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
+ tools_size,
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
+ &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
+ &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
+ if (ret)
+ goto err1;
- priv->smu_tables.entry[TABLE_PMSTATUSLOG].version = 0x01;
- priv->smu_tables.entry[TABLE_PMSTATUSLOG].size = tools_size;
- }
+ priv->smu_tables.entry[TABLE_PMSTATUSLOG].version = 0x01;
+ priv->smu_tables.entry[TABLE_PMSTATUSLOG].size = tools_size;
/* allocate space for AVFS Fuse table */
ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index bb3bc68dfc39..8ca793c222ff 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -72,6 +72,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit);
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
+static void smu_power_profile_mode_get(struct smu_context *smu,
+ enum PP_SMC_POWER_PROFILE profile_mode);
+static void smu_power_profile_mode_put(struct smu_context *smu,
+ enum PP_SMC_POWER_PROFILE profile_mode);
static int smu_sys_get_pp_feature_mask(void *handle,
char *buf)
@@ -140,7 +144,8 @@ int smu_set_soft_freq_range(struct smu_context *smu,
ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
clk_type,
min,
- max);
+ max,
+ false);
return ret;
}
@@ -233,7 +238,8 @@ static bool is_vcn_enabled(struct amdgpu_device *adev)
}
static int smu_dpm_set_vcn_enable(struct smu_context *smu,
- bool enable)
+ bool enable,
+ int inst)
{
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
@@ -248,12 +254,12 @@ static int smu_dpm_set_vcn_enable(struct smu_context *smu,
if (!smu->ppt_funcs->dpm_set_vcn_enable)
return 0;
- if (atomic_read(&power_gate->vcn_gated) ^ enable)
+ if (atomic_read(&power_gate->vcn_gated[inst]) ^ enable)
return 0;
- ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
+ ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, inst);
if (!ret)
- atomic_set(&power_gate->vcn_gated, !enable);
+ atomic_set(&power_gate->vcn_gated[inst], !enable);
return ret;
}
@@ -340,8 +346,9 @@ static int smu_set_mall_enable(struct smu_context *smu)
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
*
* @handle: smu_context pointer
- * @block_type: the IP block to power gate/ungate
- * @gate: to power gate if true, ungate otherwise
+ * @block_type: the IP block to power gate/ungate
+ * @gate: to power gate if true, ungate otherwise
+ * @inst: the instance of the IP block to power gate/ungate
*
* This API uses no smu->mutex lock protection due to:
* 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
@@ -352,7 +359,8 @@ static int smu_set_mall_enable(struct smu_context *smu)
*/
static int smu_dpm_set_power_gate(void *handle,
uint32_t block_type,
- bool gate)
+ bool gate,
+ int inst)
{
struct smu_context *smu = handle;
int ret = 0;
@@ -371,10 +379,10 @@ static int smu_dpm_set_power_gate(void *handle,
*/
case AMD_IP_BLOCK_TYPE_UVD:
case AMD_IP_BLOCK_TYPE_VCN:
- ret = smu_dpm_set_vcn_enable(smu, !gate);
+ ret = smu_dpm_set_vcn_enable(smu, !gate, inst);
if (ret)
- dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
- gate ? "gate" : "ungate");
+ dev_err(smu->adev->dev, "Failed to power %s VCN instance %d!\n",
+ gate ? "gate" : "ungate", inst);
break;
case AMD_IP_BLOCK_TYPE_GFX:
ret = smu_gfx_off_control(smu, gate);
@@ -549,7 +557,8 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20)
return false;
- if (amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0))
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) &&
+ amdgpu_device_ip_is_valid(adev, AMD_IP_BLOCK_TYPE_SMC))
return true;
return false;
@@ -718,6 +727,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
break;
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
smu_v13_0_6_set_ppt_funcs(smu);
/* Enable pp_od_clk_voltage node */
smu->od_enabled = true;
@@ -741,9 +751,9 @@ static int smu_set_funcs(struct amdgpu_device *adev)
return 0;
}
-static int smu_early_init(void *handle)
+static int smu_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu;
int r;
@@ -758,6 +768,7 @@ static int smu_early_init(void *handle)
smu->smu_baco.platform_support = false;
smu->smu_baco.maco_support = false;
smu->user_dpm_profile.fan_mode = -1;
+ smu->power_profile_mode = PP_SMC_POWER_PROFILE_UNKNOWN;
mutex_init(&smu->message_lock);
@@ -775,21 +786,25 @@ static int smu_set_default_dpm_table(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
- int vcn_gate, jpeg_gate;
+ int vcn_gate[AMDGPU_MAX_VCN_INSTANCES], jpeg_gate, i;
int ret = 0;
if (!smu->ppt_funcs->set_default_dpm_table)
return 0;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
- vcn_gate = atomic_read(&power_gate->vcn_gated);
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ vcn_gate[i] = atomic_read(&power_gate->vcn_gated[i]);
+ }
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
jpeg_gate = atomic_read(&power_gate->jpeg_gated);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
- ret = smu_dpm_set_vcn_enable(smu, true);
- if (ret)
- return ret;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ ret = smu_dpm_set_vcn_enable(smu, true, i);
+ if (ret)
+ return ret;
+ }
}
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
@@ -806,8 +821,10 @@ static int smu_set_default_dpm_table(struct smu_context *smu)
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
err_out:
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
- smu_dpm_set_vcn_enable(smu, !vcn_gate);
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ smu_dpm_set_vcn_enable(smu, !vcn_gate[i], i);
+ }
return ret;
}
@@ -825,9 +842,9 @@ static int smu_apply_default_config_table_settings(struct smu_context *smu)
return smu_set_config_table(smu, &adev->pm.config_table);
}
-static int smu_late_init(void *handle)
+static int smu_late_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
@@ -1234,11 +1251,34 @@ static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
}
}
-static int smu_sw_init(void *handle)
+static bool smu_is_workload_profile_available(struct smu_context *smu,
+ u32 profile)
+{
+ if (profile >= PP_SMC_POWER_PROFILE_COUNT)
+ return false;
+ return smu->workload_map && smu->workload_map[profile].valid_mapping;
+}
+
+static void smu_init_power_profile(struct smu_context *smu)
+{
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN) {
+ if (smu->is_apu ||
+ !smu_is_workload_profile_available(
+ smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
+ smu->power_profile_mode =
+ PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ else
+ smu->power_profile_mode =
+ PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ }
+ smu_power_profile_mode_get(smu, smu->power_profile_mode);
+}
+
+static int smu_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
- int ret;
+ int i, ret;
smu->pool_size = adev->pm.smu_prv_buffer_size;
smu->smu_feature.feature_num = SMU_FEATURE_MAX;
@@ -1249,30 +1289,14 @@ static int smu_sw_init(void *handle)
INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
atomic64_set(&smu->throttle_int_counter, 0);
smu->watermarks_bitmap = 0;
- smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
- smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
- atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
- smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
- smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
- smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
- smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
- smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
- smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
- smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
- smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
-
- smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
- smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
- smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
- smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
- smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
- smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
- smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
+ smu_init_power_profile(smu);
smu->display_config = &adev->pm.pm_display_cfg;
smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
@@ -1313,9 +1337,9 @@ static int smu_sw_init(void *handle)
return 0;
}
-static int smu_sw_fini(void *handle)
+static int smu_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
@@ -1325,6 +1349,11 @@ static int smu_sw_fini(void *handle)
return ret;
}
+ if (smu->custom_profile_params) {
+ kfree(smu->custom_profile_params);
+ smu->custom_profile_params = NULL;
+ }
+
smu_fini_microcode(smu);
return 0;
@@ -1682,7 +1711,9 @@ static int smu_smc_hw_setup(struct smu_context *smu)
return ret;
}
- if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5)
+ pcie_gen = 4;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
pcie_gen = 3;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
pcie_gen = 2;
@@ -1695,7 +1726,9 @@ static int smu_smc_hw_setup(struct smu_context *smu)
* Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
*/
- if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+ if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32)
+ pcie_width = 7;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
pcie_width = 6;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
pcie_width = 5;
@@ -1786,10 +1819,10 @@ static int smu_start_smc_engine(struct smu_context *smu)
return ret;
}
-static int smu_hw_init(void *handle)
+static int smu_hw_init(struct amdgpu_ip_block *ip_block)
{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, ret;
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
@@ -1814,7 +1847,8 @@ static int smu_hw_init(void *handle)
ret = smu_set_gfx_imu_enable(smu);
if (ret)
return ret;
- smu_dpm_set_vcn_enable(smu, true);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ smu_dpm_set_vcn_enable(smu, true, i);
smu_dpm_set_jpeg_enable(smu, true);
smu_dpm_set_vpe_enable(smu, true);
smu_dpm_set_umsch_mm_enable(smu, true);
@@ -2008,16 +2042,17 @@ static int smu_reset_mp1_state(struct smu_context *smu)
return ret;
}
-static int smu_hw_fini(void *handle)
+static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
- int ret;
+ int i, ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
- smu_dpm_set_vcn_enable(smu, false);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ smu_dpm_set_vcn_enable(smu, false, i);
smu_dpm_set_jpeg_enable(smu, false);
smu_dpm_set_vpe_enable(smu, false);
smu_dpm_set_umsch_mm_enable(smu, false);
@@ -2041,9 +2076,9 @@ static int smu_hw_fini(void *handle)
return 0;
}
-static void smu_late_fini(void *handle)
+static void smu_late_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
kfree(smu);
@@ -2052,26 +2087,31 @@ static void smu_late_fini(void *handle)
static int smu_reset(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_ip_block *ip_block;
int ret;
- ret = smu_hw_fini(adev);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC);
+ if (!ip_block)
+ return -EINVAL;
+
+ ret = smu_hw_fini(ip_block);
if (ret)
return ret;
- ret = smu_hw_init(adev);
+ ret = smu_hw_init(ip_block);
if (ret)
return ret;
- ret = smu_late_init(adev);
+ ret = smu_late_init(ip_block);
if (ret)
return ret;
return 0;
}
-static int smu_suspend(void *handle)
+static int smu_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
uint64_t count;
@@ -2100,13 +2140,16 @@ static int smu_suspend(void *handle)
if (!ret)
adev->gfx.gfx_off_entrycount = count;
+ /* clear this on suspend so it will get reprogrammed on resume */
+ smu->workload_mask = 0;
+
return 0;
}
-static int smu_resume(void *handle)
+static int smu_resume(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
@@ -2161,13 +2204,13 @@ static int smu_display_configuration_change(void *handle,
return 0;
}
-static int smu_set_clockgating_state(void *handle,
+static int smu_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int smu_set_powergating_state(void *handle,
+static int smu_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -2212,25 +2255,49 @@ static int smu_enable_umd_pstate(void *handle,
}
static int smu_bump_power_profile_mode(struct smu_context *smu,
- long *param,
- uint32_t param_size)
+ long *custom_params,
+ u32 custom_params_max_idx)
{
- int ret = 0;
+ u32 workload_mask = 0;
+ int i, ret = 0;
+
+ for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
+ if (smu->workload_refcount[i])
+ workload_mask |= 1 << i;
+ }
+
+ if (smu->workload_mask == workload_mask)
+ return 0;
if (smu->ppt_funcs->set_power_profile_mode)
- ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
+ ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask,
+ custom_params,
+ custom_params_max_idx);
+
+ if (!ret)
+ smu->workload_mask = workload_mask;
return ret;
}
+static void smu_power_profile_mode_get(struct smu_context *smu,
+ enum PP_SMC_POWER_PROFILE profile_mode)
+{
+ smu->workload_refcount[profile_mode]++;
+}
+
+static void smu_power_profile_mode_put(struct smu_context *smu,
+ enum PP_SMC_POWER_PROFILE profile_mode)
+{
+ if (smu->workload_refcount[profile_mode])
+ smu->workload_refcount[profile_mode]--;
+}
+
static int smu_adjust_power_state_dynamic(struct smu_context *smu,
enum amd_dpm_forced_level level,
- bool skip_display_settings,
- bool force_update)
+ bool skip_display_settings)
{
int ret = 0;
- int index = 0;
- long workload[1];
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
if (!skip_display_settings) {
@@ -2255,7 +2322,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
}
- if (force_update || smu_dpm_ctx->dpm_level != level) {
+ if (smu_dpm_ctx->dpm_level != level) {
ret = smu_asic_set_performance_level(smu, level);
if (ret) {
dev_err(smu->adev->dev, "Failed to set performance level!");
@@ -2267,14 +2334,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
- smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
- index = fls(smu->workload_mask);
- index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
- workload[0] = smu->workload_setting[index];
-
- if (force_update || smu->power_profile_mode != workload[0])
- smu_bump_power_profile_mode(smu, workload, 0);
- }
+ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
+ smu_bump_power_profile_mode(smu, NULL, 0);
return ret;
}
@@ -2293,13 +2354,13 @@ static int smu_handle_task(struct smu_context *smu,
ret = smu_pre_display_config_changed(smu);
if (ret)
return ret;
- ret = smu_adjust_power_state_dynamic(smu, level, false, false);
+ ret = smu_adjust_power_state_dynamic(smu, level, false);
break;
case AMD_PP_TASK_COMPLETE_INIT:
- ret = smu_adjust_power_state_dynamic(smu, level, true, true);
+ ret = smu_adjust_power_state_dynamic(smu, level, true);
break;
case AMD_PP_TASK_READJUST_POWER_STATE:
- ret = smu_adjust_power_state_dynamic(smu, level, true, false);
+ ret = smu_adjust_power_state_dynamic(smu, level, true);
break;
default:
break;
@@ -2321,12 +2382,11 @@ static int smu_handle_dpm_task(void *handle,
static int smu_switch_power_profile(void *handle,
enum PP_SMC_POWER_PROFILE type,
- bool en)
+ bool enable)
{
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- long workload[1];
- uint32_t index;
+ int ret;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -2334,21 +2394,21 @@ static int smu_switch_power_profile(void *handle,
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
return -EINVAL;
- if (!en) {
- smu->workload_mask &= ~(1 << smu->workload_prority[type]);
- index = fls(smu->workload_mask);
- index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
- workload[0] = smu->workload_setting[index];
- } else {
- smu->workload_mask |= (1 << smu->workload_prority[type]);
- index = fls(smu->workload_mask);
- index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
- workload[0] = smu->workload_setting[index];
- }
-
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
- smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
- smu_bump_power_profile_mode(smu, workload, 0);
+ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+ if (enable)
+ smu_power_profile_mode_get(smu, type);
+ else
+ smu_power_profile_mode_put(smu, type);
+ ret = smu_bump_power_profile_mode(smu, NULL, 0);
+ if (ret) {
+ if (enable)
+ smu_power_profile_mode_put(smu, type);
+ else
+ smu_power_profile_mode_get(smu, type);
+ return ret;
+ }
+ }
return 0;
}
@@ -2865,6 +2925,10 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break;
case OD_FAN_MINIMUM_PWM:
clk_type = SMU_OD_FAN_MINIMUM_PWM; break;
+ case OD_FAN_ZERO_RPM_ENABLE:
+ clk_type = SMU_OD_FAN_ZERO_RPM_ENABLE; break;
+ case OD_FAN_ZERO_RPM_STOP_TEMP:
+ clk_type = SMU_OD_FAN_ZERO_RPM_STOP_TEMP; break;
default:
clk_type = SMU_CLK_COUNT; break;
}
@@ -2928,9 +2992,10 @@ static int smu_read_sensor(void *handle,
int *size_arg)
{
struct smu_context *smu = handle;
+ struct amdgpu_device *adev = smu->adev;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
- int ret = 0;
+ int i, ret = 0;
uint32_t *size, size_val;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2976,7 +3041,13 @@ static int smu_read_sensor(void *handle,
*size = 4;
break;
case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
- *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1;
+ *(uint32_t *)data = 0;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (!atomic_read(&smu->smu_power.power_gate.vcn_gated[i])) {
+ *(uint32_t *)data = 1;
+ break;
+ }
+ }
*size = 4;
break;
case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
@@ -3036,12 +3107,35 @@ static int smu_set_power_profile_mode(void *handle,
uint32_t param_size)
{
struct smu_context *smu = handle;
+ bool custom = false;
+ int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
!smu->ppt_funcs->set_power_profile_mode)
return -EOPNOTSUPP;
- return smu_bump_power_profile_mode(smu, param, param_size);
+ if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) {
+ custom = true;
+ /* clear frontend mask so custom changes propogate */
+ smu->workload_mask = 0;
+ }
+
+ if ((param[param_size] != smu->power_profile_mode) || custom) {
+ /* clear the old user preference */
+ smu_power_profile_mode_put(smu, smu->power_profile_mode);
+ /* set the new user preference */
+ smu_power_profile_mode_get(smu, param[param_size]);
+ ret = smu_bump_power_profile_mode(smu,
+ custom ? param : NULL,
+ custom ? param_size : 0);
+ if (ret)
+ smu_power_profile_mode_put(smu, param[param_size]);
+ else
+ /* store the user's preference */
+ smu->power_profile_mode = param[param_size];
+ }
+
+ return ret;
}
static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
@@ -3821,3 +3915,13 @@ int smu_send_rma_reason(struct smu_context *smu)
return ret;
}
+
+int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
+{
+ int ret = 0;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma)
+ ret = smu->ppt_funcs->reset_sdma(smu, inst_mask);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index b44a185d07e8..3630593bce61 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -399,7 +399,7 @@ struct smu_dpm_context {
struct smu_power_gate {
bool uvd_gated;
bool vce_gated;
- atomic_t vcn_gated;
+ atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES];
atomic_t jpeg_gated;
atomic_t vpe_gated;
atomic_t umsch_mm_gated;
@@ -556,11 +556,13 @@ struct smu_context {
uint32_t hard_min_uclk_req_from_dal;
bool disable_uclk_switch;
+ /* asic agnostic workload mask */
uint32_t workload_mask;
- uint32_t workload_prority[WORKLOAD_POLICY_MAX];
- uint32_t workload_setting[WORKLOAD_POLICY_MAX];
+ /* default/user workload preference */
uint32_t power_profile_mode;
- uint32_t default_power_profile_mode;
+ uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT];
+ /* backend specific custom workload settings */
+ long *custom_profile_params;
bool pm_enabled;
bool is_apu;
@@ -731,15 +733,18 @@ struct pptable_funcs {
* @set_power_profile_mode: Set a power profile mode. Also used to
* create/set custom power profile modes.
* &input: Power profile mode parameters.
- * &size: Size of &input.
+ * &workload_mask: mask of workloads to enable
+ * &custom_params: custom profile parameters
+ * &custom_params_max_idx: max valid idx into custom_params
*/
- int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
+ int (*set_power_profile_mode)(struct smu_context *smu, u32 workload_mask,
+ long *custom_params, u32 custom_params_max_idx);
/**
* @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power
* management.
*/
- int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable);
+ int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable, int inst);
/**
* @dpm_set_jpeg_enable: Enable/disable JPEG engine dynamic power
@@ -859,11 +864,6 @@ struct pptable_funcs {
int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch);
/**
- * @dump_pptable: Print the power play table to the system log.
- */
- void (*dump_pptable)(struct smu_context *smu);
-
- /**
* @get_power_limit: Get the device's power limits.
*/
int (*get_power_limit)(struct smu_context *smu,
@@ -1260,7 +1260,8 @@ struct pptable_funcs {
* @set_soft_freq_limited_range: Set the soft frequency range of a clock
* domain in MHz.
*/
- int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
+ int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max,
+ bool automatic);
/**
* @set_power_source: Notify the SMU of the current power source.
@@ -1372,6 +1373,11 @@ struct pptable_funcs {
int (*send_rma_reason)(struct smu_context *smu);
/**
+ * @reset_sdma: message SMU to soft reset sdma instance.
+ */
+ int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask);
+
+ /**
* @get_ecc_table: message SMU to get ECC INFO table.
*/
ssize_t (*get_ecc_info)(struct smu_context *smu, void *table);
@@ -1630,6 +1636,7 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev);
int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size);
int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
int smu_send_rma_reason(struct smu_context *smu);
+int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level);
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
index ee457a6f0813..c2fd0a4a13e5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
@@ -25,7 +25,7 @@
#define SMU14_DRIVER_IF_V14_0_H
//Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x18
+#define PPTABLE_VERSION 0x1B
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
@@ -145,7 +145,7 @@ typedef enum {
} FEATURE_BTC_e;
// Debug Overrides Bitmask
-#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000001
+#define DEBUG_OVERRIDE_NOT_USE 0x00000001
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_DCN_FCLK 0x00000002
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_MP0_FCLK 0x00000004
#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCFCLK 0x00000008
@@ -161,6 +161,7 @@ typedef enum {
#define DEBUG_OVERRIDE_ENABLE_SOC_VF_BRINGUP_MODE 0x00002000
#define DEBUG_OVERRIDE_ENABLE_PER_WGP_RESIENCY 0x00004000
#define DEBUG_OVERRIDE_DISABLE_MEMORY_VOLTAGE_SCALING 0x00008000
+#define DEBUG_OVERRIDE_DFLL_BTC_FCW_LOG 0x00010000
// VR Mapping Bit Defines
#define VR_MAPPING_VR_SELECT_MASK 0x01
@@ -391,6 +392,21 @@ typedef struct {
EccInfo_t EccInfo[24];
} EccInfoTable_t;
+#define EPCS_HIGH_POWER 600
+#define EPCS_NORMAL_POWER 450
+#define EPCS_LOW_POWER 300
+#define EPCS_SHORTED_POWER 150
+#define EPCS_NO_BOOTUP 0
+
+typedef enum{
+ EPCS_SHORTED_LIMIT,
+ EPCS_LOW_POWER_LIMIT,
+ EPCS_NORMAL_POWER_LIMIT,
+ EPCS_HIGH_POWER_LIMIT,
+ EPCS_NOT_CONFIGURED,
+ EPCS_STATUS_COUNT,
+} EPCS_STATUS_e;
+
//D3HOT sequences
typedef enum {
BACO_SEQUENCE,
@@ -662,7 +678,7 @@ typedef enum {
} PP_GRTAVFS_FW_SEP_FUSE_e;
#define PP_NUM_RTAVFS_PWL_ZONES 5
-
+#define PP_NUM_PSM_DIDT_PWL_ZONES 3
// VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3
// Slope Q1.7, Offset Q1.2
@@ -746,10 +762,10 @@ typedef struct {
uint16_t Padding;
//Frequency changes
- int16_t GfxclkFmin; // MHz
- int16_t GfxclkFmax; // MHz
- uint16_t UclkFmin; // MHz
- uint16_t UclkFmax; // MHz
+ int16_t GfxclkFoffset;
+ uint16_t Padding1;
+ uint16_t UclkFmin;
+ uint16_t UclkFmax;
uint16_t FclkFmin;
uint16_t FclkFmax;
@@ -770,19 +786,23 @@ typedef struct {
uint8_t MaxOpTemp;
uint8_t AdvancedOdModeEnabled;
- uint8_t Padding1[3];
+ uint8_t Padding2[3];
uint16_t GfxVoltageFullCtrlMode;
uint16_t SocVoltageFullCtrlMode;
uint16_t GfxclkFullCtrlMode;
uint16_t UclkFullCtrlMode;
uint16_t FclkFullCtrlMode;
- uint16_t Padding2;
+ uint16_t Padding3;
int16_t GfxEdc;
int16_t GfxPccLimitControl;
- uint32_t Spare[10];
+ uint16_t GfxclkFmaxVmax;
+ uint8_t GfxclkFmaxVmaxTemperature;
+ uint8_t Padding4[1];
+
+ uint32_t Spare[9];
uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
} OverDriveTable_t;
@@ -802,8 +822,8 @@ typedef struct {
uint16_t VddSocVmax;
//gfxclk
- int16_t GfxclkFmin; // MHz
- int16_t GfxclkFmax; // MHz
+ int16_t GfxclkFoffset;
+ uint16_t Padding;
//uclk
uint16_t UclkFmin; // MHz
uint16_t UclkFmax; // MHz
@@ -828,7 +848,7 @@ typedef struct {
uint8_t FanZeroRpmEnable;
//temperature
uint8_t MaxOpTemp;
- uint8_t Padding[2];
+ uint8_t Padding1[2];
//Full Ctrl
uint16_t GfxVoltageFullCtrlMode;
@@ -839,7 +859,7 @@ typedef struct {
//EDC
int16_t GfxEdc;
int16_t GfxPccLimitControl;
- int16_t Padding1;
+ int16_t Padding2;
uint32_t Spare[5];
} OverDriveLimits_t;
@@ -987,8 +1007,9 @@ typedef struct {
uint16_t BaseClockDc;
uint16_t GameClockDc;
uint16_t BoostClockDc;
-
- uint32_t Reserved[4];
+ uint16_t MaxReportedClock;
+ uint16_t Padding;
+ uint32_t Reserved[3];
} DriverReportedClocks_t;
typedef struct {
@@ -1132,7 +1153,7 @@ typedef struct {
uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz
uint16_t GfxclkAibFmax;
- uint16_t GfxclkFreqCap;
+ uint16_t GfxDpmPadding;
//GFX Idle Power Settings
uint16_t GfxclkFgfxoffEntry; // Entry in RLC stage (PLL), in Mhz
@@ -1172,8 +1193,7 @@ typedef struct {
uint32_t DvoFmaxLowScaler; //Unitless float
// GFX DCS
- uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase
- uint16_t PaddingDcs;
+ uint32_t PaddingDcs;
uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase
uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch.
@@ -1205,8 +1225,7 @@ typedef struct {
uint16_t DalDcModeMaxUclkFreq;
uint8_t PaddingsMem[2];
//FCLK Section
- uint16_t FclkDpmDisallowPstateFreq; //Frequency which FW will target when indicated that display config cannot support P-state. Set to 0 use FW calculated value
- uint16_t PaddingFclk;
+ uint32_t PaddingFclk;
// Link DPM Settings
uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 4:PciE-gen5
@@ -1215,12 +1234,19 @@ typedef struct {
// SECTION: VDD_GFX AVFS
uint8_t OverrideGfxAvfsFuses;
- uint8_t GfxAvfsPadding[3];
+ uint8_t GfxAvfsPadding[1];
+ uint16_t DroopGBStDev;
uint32_t SocHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //new added for Soc domain
uint32_t GfxL2HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //see fusedoc for encoding
//uint32_t GfxSeHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT];
- uint32_t spare_HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT];
+
+ uint16_t PsmDidt_Vcross[PP_NUM_PSM_DIDT_PWL_ZONES-1];
+ uint32_t PsmDidt_StaticDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t PsmDidt_StaticDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t PsmDidt_DynDroop_A[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t PsmDidt_DynDroop_B[PP_NUM_PSM_DIDT_PWL_ZONES];
+ uint32_t spare_HwRtAvfsFuses[19];
uint32_t SocCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
uint32_t GfxCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT];
@@ -1246,11 +1272,7 @@ typedef struct {
uint32_t dGbV_dT_vmin;
uint32_t dGbV_dT_vmax;
- //Unused: PMFW-9370
- uint32_t V2F_vmin_range_low;
- uint32_t V2F_vmin_range_high;
- uint32_t V2F_vmax_range_low;
- uint32_t V2F_vmax_range_high;
+ uint32_t PaddingV2F[4];
AvfsDcBtcParams_t DcBtcGfxParams;
QuadraticInt_t SSCurve_GFX;
@@ -1327,18 +1349,18 @@ typedef struct {
uint16_t PsmDidtReleaseTimer;
uint32_t PsmDidtStallPattern; //Will be written to both pattern 1 and didt_static_level_prog
// CAC EDC
- uint32_t Leakage_C0; // in IEEE float
- uint32_t Leakage_C1; // in IEEE float
- uint32_t Leakage_C2; // in IEEE float
- uint32_t Leakage_C3; // in IEEE float
- uint32_t Leakage_C4; // in IEEE float
- uint32_t Leakage_C5; // in IEEE float
- uint32_t GFX_CLK_SCALAR; // in IEEE float
- uint32_t GFX_CLK_INTERCEPT; // in IEEE float
- uint32_t GFX_CAC_M; // in IEEE float
- uint32_t GFX_CAC_B; // in IEEE float
- uint32_t VDD_GFX_CurrentLimitGuardband; // in IEEE float
- uint32_t DynToTotalCacScalar; // in IEEE
+ uint32_t CacEdcCacLeakageC0;
+ uint32_t CacEdcCacLeakageC1;
+ uint32_t CacEdcCacLeakageC2;
+ uint32_t CacEdcCacLeakageC3;
+ uint32_t CacEdcCacLeakageC4;
+ uint32_t CacEdcCacLeakageC5;
+ uint32_t CacEdcGfxClkScalar;
+ uint32_t CacEdcGfxClkIntercept;
+ uint32_t CacEdcCac_m;
+ uint32_t CacEdcCac_b;
+ uint32_t CacEdcCurrLimitGuardband;
+ uint32_t CacEdcDynToTotalCacRatio;
// GFX EDC XVMIN
uint32_t XVmin_Gfx_EdcThreshScalar;
uint32_t XVmin_Gfx_EdcEnableFreq;
@@ -1467,7 +1489,7 @@ typedef struct {
uint8_t VddqOffEnabled;
uint8_t PaddingUmcFlags[2];
- uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued
+ uint32_t Paddign1;
uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS
uint8_t FuseWritePowerMuxPresent;
@@ -1530,7 +1552,7 @@ typedef struct {
int16_t FuzzyFan_ErrorSetDelta;
int16_t FuzzyFan_ErrorRateSetDelta;
int16_t FuzzyFan_PwmSetDelta;
- uint16_t FuzzyFan_Reserved;
+ uint16_t FanPadding2;
uint16_t FwCtfLimit[TEMP_COUNT];
@@ -1547,9 +1569,10 @@ typedef struct {
uint16_t FanSpare[1];
uint8_t FanIntakeSensorSupport;
uint8_t FanIntakePadding;
- uint32_t FanAmbientPerfBoostThreshold;
uint32_t FanSpare2[12];
+ uint32_t ODFeatureCtrlMask;
+
uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix
uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron
uint16_t TemperatureFwCtfLimit_Hynix;
@@ -1637,7 +1660,7 @@ typedef struct {
uint16_t AverageDclk0Frequency ;
uint16_t AverageVclk1Frequency ;
uint16_t AverageDclk1Frequency ;
- uint16_t PCIeBusy ;
+ uint16_t AveragePCIeBusy ;
uint16_t dGPU_W_MAX ;
uint16_t padding ;
@@ -1665,12 +1688,12 @@ typedef struct {
uint16_t AverageGfxActivity ;
uint16_t AverageUclkActivity ;
- uint16_t Vcn0ActivityPercentage ;
+ uint16_t AverageVcn0ActivityPercentage;
uint16_t Vcn1ActivityPercentage ;
uint32_t EnergyAccumulator;
uint16_t AverageSocketPower;
- uint16_t MovingAverageTotalBoardPower;
+ uint16_t AverageTotalBoardPower;
uint16_t AvgTemperature[TEMP_COUNT];
uint16_t AvgTemperatureFanIntake;
@@ -1684,7 +1707,8 @@ typedef struct {
uint8_t ThrottlingPercentage[THROTTLER_COUNT];
- uint8_t padding1[3];
+ uint8_t VmaxThrottlingPercentage;
+ uint8_t padding1[2];
//metrics for D3hot entry/exit and driver ARM msgs
uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT];
@@ -1693,7 +1717,7 @@ typedef struct {
uint16_t ApuSTAPMSmartShiftLimit;
uint16_t ApuSTAPMLimit;
- uint16_t MovingAvgApuSocketPower;
+ uint16_t AvgApuSocketPower;
uint16_t AverageUclkActivity_MAX;
@@ -1823,6 +1847,17 @@ typedef struct {
#define TABLE_TRANSFER_FAILED 0xFF
#define TABLE_TRANSFER_PENDING 0xAB
+#define TABLE_PPT_FAILED 0x100
+#define TABLE_TDC_FAILED 0x200
+#define TABLE_TEMP_FAILED 0x400
+#define TABLE_FAN_TARGET_TEMP_FAILED 0x800
+#define TABLE_FAN_STOP_TEMP_FAILED 0x1000
+#define TABLE_FAN_START_TEMP_FAILED 0x2000
+#define TABLE_FAN_PWM_MIN_FAILED 0x4000
+#define TABLE_ACOUSTIC_TARGET_RPM_FAILED 0x8000
+#define TABLE_ACOUSTIC_LIMIT_RPM_FAILED 0x10000
+#define TABLE_MGPU_ACOUSTIC_TARGET_RPM_FAILED 0x20000
+
// Table types
#define TABLE_PPTABLE 0
#define TABLE_COMBO_PPTABLE 1
@@ -1849,5 +1884,6 @@ typedef struct {
#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
+#define IH_INTERRUPT_CONTEXT_ID_DYNAMIC_TABLE 0xA
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index 822c6425d90e..274b3e1cc4fb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -34,6 +34,8 @@
#define NUM_PCIE_BITRATES 4
#define NUM_XGMI_BITRATES 4
#define NUM_XGMI_WIDTHS 3
+#define NUM_SOC_P2S_TABLES 3
+#define NUM_TDP_GROUPS 4
typedef enum {
/*0*/ FEATURE_DATA_CALCULATION = 0,
@@ -80,8 +82,10 @@ typedef enum {
/*41*/ FEATURE_CXL_QOS = 41,
/*42*/ FEATURE_SOC_DC_RTC = 42,
/*43*/ FEATURE_GFX_DC_RTC = 43,
+/*44*/ FEATURE_DVM_MIN_PSM = 44,
+/*45*/ FEATURE_PRC = 45,
-/*44*/ NUM_FEATURES = 44
+/*46*/ NUM_FEATURES = 46
} FEATURE_LIST_e;
//enum for MPIO PCIe gen speed msgs
@@ -123,7 +127,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0xD
+#define SMU_METRICS_TABLE_VERSION 0xF
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -231,6 +235,12 @@ typedef struct __attribute__((packed, aligned(4))) {
// PER XCD ACTIVITY
uint32_t GfxBusy[8];
uint64_t GfxBusyAcc[8];
+
+ //PCIE BW Data and error count
+ uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated
+
+ //Total App Clock Counter
+ uint64_t GfxclkBelowHostLimitAcc[8];
} MetricsTableX_t;
typedef struct __attribute__((packed, aligned(4))) {
@@ -325,13 +335,14 @@ typedef struct __attribute__((packed, aligned(4))) {
uint32_t JpegBusy[32];
} MetricsTableA_t;
-#define SMU_VF_METRICS_TABLE_VERSION 0x3
+#define SMU_VF_METRICS_TABLE_VERSION 0x5
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
uint32_t InstGfxclk_TargFreq;
uint64_t AccGfxclk_TargFreq;
uint64_t AccGfxRsmuDpm_Busy;
+ uint64_t AccGfxclkBelowHostLimit;
} VfMetricsTable_t;
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 41cb681927e2..147bfb12fd75 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -93,7 +93,8 @@
#define PPSMC_MSG_SelectPLPDMode 0x40
#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
#define PPSMC_MSG_SelectPstatePolicy 0x44
-#define PPSMC_Message_Count 0x45
+#define PPSMC_MSG_ResetSDMA 0x4D
+#define PPSMC_Message_Count 0x4E
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index e71a721c12b9..e4cd6a0d13da 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -275,7 +275,8 @@
__SMU_DUMMY_MAP(RmaDueToBadPageThreshold), \
__SMU_DUMMY_MAP(SelectPstatePolicy), \
__SMU_DUMMY_MAP(MALLPowerController), \
- __SMU_DUMMY_MAP(MALLPowerState),
+ __SMU_DUMMY_MAP(MALLPowerState), \
+ __SMU_DUMMY_MAP(ResetSDMA),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -313,6 +314,8 @@ enum smu_clk_type {
SMU_OD_ACOUSTIC_TARGET,
SMU_OD_FAN_TARGET_TEMPERATURE,
SMU_OD_FAN_MINIMUM_PWM,
+ SMU_OD_FAN_ZERO_RPM_ENABLE,
+ SMU_OD_FAN_ZERO_RPM_STOP_TEMP,
SMU_CLK_COUNT,
};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index c2ab336bb530..ed8304d82831 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
@@ -255,7 +255,7 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
uint32_t *min, uint32_t *max);
int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max);
+ uint32_t min, uint32_t max, bool automatic);
int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h
index 1ad2dff71090..0886d8cffbd0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h
@@ -56,7 +56,7 @@ int smu_v12_0_set_default_dpm_tables(struct smu_context *smu);
int smu_v12_0_mode2_reset(struct smu_context *smu);
int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max);
+ uint32_t min, uint32_t max, bool automatic);
int smu_v12_0_set_driver_table_location(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index e58220a7ee2f..8d4a96e23326 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -107,6 +107,7 @@ struct smu_13_0_dpm_context {
struct smu_13_0_dpm_tables dpm_tables;
uint32_t workload_policy_mask;
uint32_t dcef_min_ds_clk;
+ uint64_t caps;
};
enum smu_13_0_power_state {
@@ -219,7 +220,7 @@ int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
uint32_t *min, uint32_t *max);
int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max);
+ uint32_t min, uint32_t max, bool automatic);
int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
@@ -255,7 +256,8 @@ int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
uint64_t event_arg);
int smu_v13_0_set_vcn_enable(struct smu_context *smu,
- bool enable);
+ bool enable,
+ int inst);
int smu_v13_0_set_jpeg_enable(struct smu_context *smu,
bool enable);
@@ -302,5 +304,7 @@ int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value);
+
+void smu_v13_0_interrupt_work(struct smu_context *smu);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
index 46b456590a08..29a4583db873 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
@@ -28,7 +28,7 @@
#define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
-#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x26
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x2E
#define FEATURE_MASK(feature) (1ULL << feature)
@@ -53,7 +53,7 @@
#define CTF_OFFSET_MEM 5
extern const int decoded_link_speed[5];
-extern const int decoded_link_width[7];
+extern const int decoded_link_width[8];
#define DECODE_GEN_SPEED(gen_speed_idx) (decoded_link_speed[gen_speed_idx])
#define DECODE_LANE_WIDTH(lane_width_idx) (decoded_link_width[lane_width_idx])
@@ -186,7 +186,7 @@ int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
uint32_t *min, uint32_t *max);
int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max);
+ uint32_t min, uint32_t max, bool automatic);
int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
@@ -210,7 +210,8 @@ int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
uint64_t event_arg);
int smu_v14_0_set_vcn_enable(struct smu_context *smu,
- bool enable);
+ bool enable,
+ int inst);
int smu_v14_0_set_jpeg_enable(struct smu_context *smu,
bool enable);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index c0f6b59369b7..8aa61a9f7778 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1344,8 +1344,12 @@ static int arcturus_get_power_limit(struct smu_context *smu,
*default_power_limit = power_limit;
if (max_power_limit)
*max_power_limit = power_limit;
+ /**
+ * No lower bound is imposed on the limit. Any unreasonable limit set
+ * will result in frequent throttling.
+ */
if (min_power_limit)
- *min_power_limit = power_limit;
+ *min_power_limit = 0;
return 0;
}
@@ -1441,98 +1445,120 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
return size;
}
-static int arcturus_set_power_profile_mode(struct smu_context *smu,
- long *input,
- uint32_t size)
+#define ARCTURUS_CUSTOM_PARAMS_COUNT 10
+#define ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT 2
+#define ARCTURUS_CUSTOM_PARAMS_SIZE (ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT * ARCTURUS_CUSTOM_PARAMS_COUNT * sizeof(long))
+
+static int arcturus_set_power_profile_mode_coeff(struct smu_context *smu,
+ long *input)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
- int workload_type = 0;
- uint32_t profile_mode = input[size];
- int ret = 0;
+ int ret, idx;
- if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
- return -EINVAL;
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor),
+ false);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+ return ret;
}
+ idx = 0 * ARCTURUS_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Gfxclk */
+ activity_monitor.Gfx_FPS = input[idx + 1];
+ activity_monitor.Gfx_UseRlcBusy = input[idx + 2];
+ activity_monitor.Gfx_MinActiveFreqType = input[idx + 3];
+ activity_monitor.Gfx_MinActiveFreq = input[idx + 4];
+ activity_monitor.Gfx_BoosterFreqType = input[idx + 5];
+ activity_monitor.Gfx_BoosterFreq = input[idx + 6];
+ activity_monitor.Gfx_PD_Data_limit_c = input[idx + 7];
+ activity_monitor.Gfx_PD_Data_error_coeff = input[idx + 8];
+ activity_monitor.Gfx_PD_Data_error_rate_coeff = input[idx + 9];
+ }
+ idx = 1 * ARCTURUS_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Uclk */
+ activity_monitor.Mem_FPS = input[idx + 1];
+ activity_monitor.Mem_UseRlcBusy = input[idx + 2];
+ activity_monitor.Mem_MinActiveFreqType = input[idx + 3];
+ activity_monitor.Mem_MinActiveFreq = input[idx + 4];
+ activity_monitor.Mem_BoosterFreqType = input[idx + 5];
+ activity_monitor.Mem_BoosterFreq = input[idx + 6];
+ activity_monitor.Mem_PD_Data_limit_c = input[idx + 7];
+ activity_monitor.Mem_PD_Data_error_coeff = input[idx + 8];
+ activity_monitor.Mem_PD_Data_error_rate_coeff = input[idx + 9];
+ }
- if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
- (smu->smc_fw_version >= 0x360d00)) {
- if (size != 10)
- return -EINVAL;
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor),
+ true);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ return ret;
+ }
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
- WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor),
- false);
- if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
- return ret;
- }
+ return ret;
+}
- switch (input[0]) {
- case 0: /* Gfxclk */
- activity_monitor.Gfx_FPS = input[1];
- activity_monitor.Gfx_UseRlcBusy = input[2];
- activity_monitor.Gfx_MinActiveFreqType = input[3];
- activity_monitor.Gfx_MinActiveFreq = input[4];
- activity_monitor.Gfx_BoosterFreqType = input[5];
- activity_monitor.Gfx_BoosterFreq = input[6];
- activity_monitor.Gfx_PD_Data_limit_c = input[7];
- activity_monitor.Gfx_PD_Data_error_coeff = input[8];
- activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
- break;
- case 1: /* Uclk */
- activity_monitor.Mem_FPS = input[1];
- activity_monitor.Mem_UseRlcBusy = input[2];
- activity_monitor.Mem_MinActiveFreqType = input[3];
- activity_monitor.Mem_MinActiveFreq = input[4];
- activity_monitor.Mem_BoosterFreqType = input[5];
- activity_monitor.Mem_BoosterFreq = input[6];
- activity_monitor.Mem_PD_Data_limit_c = input[7];
- activity_monitor.Mem_PD_Data_error_coeff = input[8];
- activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
- break;
- default:
+static int arcturus_set_power_profile_mode(struct smu_context *smu,
+ u32 workload_mask,
+ long *custom_params,
+ u32 custom_params_max_idx)
+{
+ u32 backend_workload_mask = 0;
+ int ret, idx = -1, i;
+
+ smu_cmn_get_backend_workload_mask(smu, workload_mask,
+ &backend_workload_mask);
+
+ if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
+ if (smu->smc_fw_version < 0x360d00)
return -EINVAL;
+ if (!smu->custom_profile_params) {
+ smu->custom_profile_params =
+ kzalloc(ARCTURUS_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
+ if (!smu->custom_profile_params)
+ return -ENOMEM;
}
-
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
- WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor),
- true);
+ if (custom_params && custom_params_max_idx) {
+ if (custom_params_max_idx != ARCTURUS_CUSTOM_PARAMS_COUNT)
+ return -EINVAL;
+ if (custom_params[0] >= ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT)
+ return -EINVAL;
+ idx = custom_params[0] * ARCTURUS_CUSTOM_PARAMS_COUNT;
+ smu->custom_profile_params[idx] = 1;
+ for (i = 1; i < custom_params_max_idx; i++)
+ smu->custom_profile_params[idx + i] = custom_params[i];
+ }
+ ret = arcturus_set_power_profile_mode_coeff(smu,
+ smu->custom_profile_params);
if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
return ret;
}
- }
-
- /*
- * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
- * Not all profile modes are supported on arcturus.
- */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- profile_mode);
- if (workload_type < 0) {
- dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode);
- return -EINVAL;
+ } else if (smu->custom_profile_params) {
+ memset(smu->custom_profile_params, 0, ARCTURUS_CUSTOM_PARAMS_SIZE);
}
ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetWorkloadMask,
- 1 << workload_type,
- NULL);
+ SMU_MSG_SetWorkloadMask,
+ backend_workload_mask,
+ NULL);
if (ret) {
- dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
+ dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
+ workload_mask);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
return ret;
}
- smu->power_profile_mode = profile_mode;
-
- return 0;
+ return ret;
}
static int arcturus_set_performance_level(struct smu_context *smu,
@@ -1559,437 +1585,6 @@ static int arcturus_set_performance_level(struct smu_context *smu,
return smu_v11_0_set_performance_level(smu, level);
}
-static void arcturus_dump_pptable(struct smu_context *smu)
-{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_t *pptable = table_context->driver_pptable;
- int i;
-
- dev_info(smu->adev->dev, "Dumped PPTable:\n");
-
- dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version);
-
- dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]);
- dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]);
-
- for (i = 0; i < PPT_THROTTLER_COUNT; i++) {
- dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = %d\n", i, pptable->SocketPowerLimitAc[i]);
- dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = %d\n", i, pptable->SocketPowerLimitAcTau[i]);
- }
-
- dev_info(smu->adev->dev, "TdcLimitSoc = %d\n", pptable->TdcLimitSoc);
- dev_info(smu->adev->dev, "TdcLimitSocTau = %d\n", pptable->TdcLimitSocTau);
- dev_info(smu->adev->dev, "TdcLimitGfx = %d\n", pptable->TdcLimitGfx);
- dev_info(smu->adev->dev, "TdcLimitGfxTau = %d\n", pptable->TdcLimitGfxTau);
-
- dev_info(smu->adev->dev, "TedgeLimit = %d\n", pptable->TedgeLimit);
- dev_info(smu->adev->dev, "ThotspotLimit = %d\n", pptable->ThotspotLimit);
- dev_info(smu->adev->dev, "TmemLimit = %d\n", pptable->TmemLimit);
- dev_info(smu->adev->dev, "Tvr_gfxLimit = %d\n", pptable->Tvr_gfxLimit);
- dev_info(smu->adev->dev, "Tvr_memLimit = %d\n", pptable->Tvr_memLimit);
- dev_info(smu->adev->dev, "Tvr_socLimit = %d\n", pptable->Tvr_socLimit);
- dev_info(smu->adev->dev, "FitLimit = %d\n", pptable->FitLimit);
-
- dev_info(smu->adev->dev, "PpmPowerLimit = %d\n", pptable->PpmPowerLimit);
- dev_info(smu->adev->dev, "PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold);
-
- dev_info(smu->adev->dev, "ThrottlerControlMask = %d\n", pptable->ThrottlerControlMask);
-
- dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx);
- dev_info(smu->adev->dev, "UlvPadding = 0x%08x\n", pptable->UlvPadding);
-
- dev_info(smu->adev->dev, "UlvGfxclkBypass = %d\n", pptable->UlvGfxclkBypass);
- dev_info(smu->adev->dev, "Padding234[0] = 0x%02x\n", pptable->Padding234[0]);
- dev_info(smu->adev->dev, "Padding234[1] = 0x%02x\n", pptable->Padding234[1]);
- dev_info(smu->adev->dev, "Padding234[2] = 0x%02x\n", pptable->Padding234[2]);
-
- dev_info(smu->adev->dev, "MinVoltageGfx = %d\n", pptable->MinVoltageGfx);
- dev_info(smu->adev->dev, "MinVoltageSoc = %d\n", pptable->MinVoltageSoc);
- dev_info(smu->adev->dev, "MaxVoltageGfx = %d\n", pptable->MaxVoltageGfx);
- dev_info(smu->adev->dev, "MaxVoltageSoc = %d\n", pptable->MaxVoltageSoc);
-
- dev_info(smu->adev->dev, "LoadLineResistanceGfx = %d\n", pptable->LoadLineResistanceGfx);
- dev_info(smu->adev->dev, "LoadLineResistanceSoc = %d\n", pptable->LoadLineResistanceSoc);
-
- dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_GFXCLK].padding,
- pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_VCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_VCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_VCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_VCLK].padding,
- pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_VCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_VCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_DCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_DCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_DCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_DCLK].padding,
- pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_DCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_DCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_SOCCLK].padding,
- pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_UCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_UCLK].padding,
- pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_UCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_UCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_FCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_FCLK].padding,
- pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_FCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_FCLK].Padding16);
-
-
- dev_info(smu->adev->dev, "FreqTableGfx\n");
- for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableGfx[i]);
-
- dev_info(smu->adev->dev, "FreqTableVclk\n");
- for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableVclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableDclk\n");
- for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableDclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableSocclk\n");
- for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableSocclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableUclk\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableUclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableFclk\n");
- for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = %d\n", i, pptable->FreqTableFclk[i]);
-
- dev_info(smu->adev->dev, "Mp0clkFreq\n");
- for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->Mp0clkFreq[i]);
-
- dev_info(smu->adev->dev, "Mp0DpmVoltage\n");
- for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->Mp0DpmVoltage[i]);
-
- dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle);
- dev_info(smu->adev->dev, "GfxclkSlewRate = 0x%x\n", pptable->GfxclkSlewRate);
- dev_info(smu->adev->dev, "Padding567[0] = 0x%x\n", pptable->Padding567[0]);
- dev_info(smu->adev->dev, "Padding567[1] = 0x%x\n", pptable->Padding567[1]);
- dev_info(smu->adev->dev, "Padding567[2] = 0x%x\n", pptable->Padding567[2]);
- dev_info(smu->adev->dev, "Padding567[3] = 0x%x\n", pptable->Padding567[3]);
- dev_info(smu->adev->dev, "GfxclkDsMaxFreq = %d\n", pptable->GfxclkDsMaxFreq);
- dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource);
- dev_info(smu->adev->dev, "Padding456 = 0x%x\n", pptable->Padding456);
-
- dev_info(smu->adev->dev, "EnableTdpm = %d\n", pptable->EnableTdpm);
- dev_info(smu->adev->dev, "TdpmHighHystTemperature = %d\n", pptable->TdpmHighHystTemperature);
- dev_info(smu->adev->dev, "TdpmLowHystTemperature = %d\n", pptable->TdpmLowHystTemperature);
- dev_info(smu->adev->dev, "GfxclkFreqHighTempLimit = %d\n", pptable->GfxclkFreqHighTempLimit);
-
- dev_info(smu->adev->dev, "FanStopTemp = %d\n", pptable->FanStopTemp);
- dev_info(smu->adev->dev, "FanStartTemp = %d\n", pptable->FanStartTemp);
-
- dev_info(smu->adev->dev, "FanGainEdge = %d\n", pptable->FanGainEdge);
- dev_info(smu->adev->dev, "FanGainHotspot = %d\n", pptable->FanGainHotspot);
- dev_info(smu->adev->dev, "FanGainVrGfx = %d\n", pptable->FanGainVrGfx);
- dev_info(smu->adev->dev, "FanGainVrSoc = %d\n", pptable->FanGainVrSoc);
- dev_info(smu->adev->dev, "FanGainVrMem = %d\n", pptable->FanGainVrMem);
- dev_info(smu->adev->dev, "FanGainHbm = %d\n", pptable->FanGainHbm);
-
- dev_info(smu->adev->dev, "FanPwmMin = %d\n", pptable->FanPwmMin);
- dev_info(smu->adev->dev, "FanAcousticLimitRpm = %d\n", pptable->FanAcousticLimitRpm);
- dev_info(smu->adev->dev, "FanThrottlingRpm = %d\n", pptable->FanThrottlingRpm);
- dev_info(smu->adev->dev, "FanMaximumRpm = %d\n", pptable->FanMaximumRpm);
- dev_info(smu->adev->dev, "FanTargetTemperature = %d\n", pptable->FanTargetTemperature);
- dev_info(smu->adev->dev, "FanTargetGfxclk = %d\n", pptable->FanTargetGfxclk);
- dev_info(smu->adev->dev, "FanZeroRpmEnable = %d\n", pptable->FanZeroRpmEnable);
- dev_info(smu->adev->dev, "FanTachEdgePerRev = %d\n", pptable->FanTachEdgePerRev);
- dev_info(smu->adev->dev, "FanTempInputSelect = %d\n", pptable->FanTempInputSelect);
-
- dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = %d\n", pptable->FuzzyFan_ErrorSetDelta);
- dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = %d\n", pptable->FuzzyFan_ErrorRateSetDelta);
- dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = %d\n", pptable->FuzzyFan_PwmSetDelta);
- dev_info(smu->adev->dev, "FuzzyFan_Reserved = %d\n", pptable->FuzzyFan_Reserved);
-
- dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]);
- dev_info(smu->adev->dev, "Padding8_Avfs[0] = %d\n", pptable->Padding8_Avfs[0]);
- dev_info(smu->adev->dev, "Padding8_Avfs[1] = %d\n", pptable->Padding8_Avfs[1]);
-
- dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbGfxPll.a,
- pptable->dBtcGbGfxPll.b,
- pptable->dBtcGbGfxPll.c);
- dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbGfxAfll.a,
- pptable->dBtcGbGfxAfll.b,
- pptable->dBtcGbGfxAfll.c);
- dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbSoc.a,
- pptable->dBtcGbSoc.b,
- pptable->dBtcGbSoc.c);
-
- dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n",
- pptable->qAgingGb[AVFS_VOLTAGE_GFX].m,
- pptable->qAgingGb[AVFS_VOLTAGE_GFX].b);
- dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n",
- pptable->qAgingGb[AVFS_VOLTAGE_SOC].m,
- pptable->qAgingGb[AVFS_VOLTAGE_SOC].b);
-
- dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c);
- dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c);
-
- dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]);
-
- dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]);
- dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]);
- dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]);
-
- dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]);
- dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]);
-
- dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]);
-
- dev_info(smu->adev->dev, "XgmiDpmPstates\n");
- for (i = 0; i < NUM_XGMI_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiDpmPstates[i]);
- dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]);
- dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]);
-
- dev_info(smu->adev->dev, "VDDGFX_TVmin = %d\n", pptable->VDDGFX_TVmin);
- dev_info(smu->adev->dev, "VDDSOC_TVmin = %d\n", pptable->VDDSOC_TVmin);
- dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = %d\n", pptable->VDDGFX_Vmin_HiTemp);
- dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = %d\n", pptable->VDDGFX_Vmin_LoTemp);
- dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = %d\n", pptable->VDDSOC_Vmin_HiTemp);
- dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = %d\n", pptable->VDDSOC_Vmin_LoTemp);
- dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = %d\n", pptable->VDDGFX_TVminHystersis);
- dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = %d\n", pptable->VDDSOC_TVminHystersis);
-
- dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides);
- dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation0.a,
- pptable->ReservedEquation0.b,
- pptable->ReservedEquation0.c);
- dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation1.a,
- pptable->ReservedEquation1.b,
- pptable->ReservedEquation1.c);
- dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation2.a,
- pptable->ReservedEquation2.b,
- pptable->ReservedEquation2.c);
- dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation3.a,
- pptable->ReservedEquation3.b,
- pptable->ReservedEquation3.c);
-
- dev_info(smu->adev->dev, "MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx);
- dev_info(smu->adev->dev, "PaddingUlv = %d\n", pptable->PaddingUlv);
-
- dev_info(smu->adev->dev, "TotalPowerConfig = %d\n", pptable->TotalPowerConfig);
- dev_info(smu->adev->dev, "TotalPowerSpare1 = %d\n", pptable->TotalPowerSpare1);
- dev_info(smu->adev->dev, "TotalPowerSpare2 = %d\n", pptable->TotalPowerSpare2);
-
- dev_info(smu->adev->dev, "PccThresholdLow = %d\n", pptable->PccThresholdLow);
- dev_info(smu->adev->dev, "PccThresholdHigh = %d\n", pptable->PccThresholdHigh);
-
- dev_info(smu->adev->dev, "Board Parameters:\n");
- dev_info(smu->adev->dev, "MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx);
- dev_info(smu->adev->dev, "MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc);
-
- dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping);
- dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping);
- dev_info(smu->adev->dev, "VddMemVrMapping = 0x%x\n", pptable->VddMemVrMapping);
- dev_info(smu->adev->dev, "BoardVrMapping = 0x%x\n", pptable->BoardVrMapping);
-
- dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask);
- dev_info(smu->adev->dev, "ExternalSensorPresent = 0x%x\n", pptable->ExternalSensorPresent);
-
- dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent);
- dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset);
- dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx);
-
- dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent);
- dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset);
- dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc);
-
- dev_info(smu->adev->dev, "MemMaxCurrent = 0x%x\n", pptable->MemMaxCurrent);
- dev_info(smu->adev->dev, "MemOffset = 0x%x\n", pptable->MemOffset);
- dev_info(smu->adev->dev, "Padding_TelemetryMem = 0x%x\n", pptable->Padding_TelemetryMem);
-
- dev_info(smu->adev->dev, "BoardMaxCurrent = 0x%x\n", pptable->BoardMaxCurrent);
- dev_info(smu->adev->dev, "BoardOffset = 0x%x\n", pptable->BoardOffset);
- dev_info(smu->adev->dev, "Padding_TelemetryBoardInput = 0x%x\n", pptable->Padding_TelemetryBoardInput);
-
- dev_info(smu->adev->dev, "VR0HotGpio = %d\n", pptable->VR0HotGpio);
- dev_info(smu->adev->dev, "VR0HotPolarity = %d\n", pptable->VR0HotPolarity);
- dev_info(smu->adev->dev, "VR1HotGpio = %d\n", pptable->VR1HotGpio);
- dev_info(smu->adev->dev, "VR1HotPolarity = %d\n", pptable->VR1HotPolarity);
-
- dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = %d\n", pptable->PllGfxclkSpreadEnabled);
- dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = %d\n", pptable->PllGfxclkSpreadPercent);
- dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = %d\n", pptable->PllGfxclkSpreadFreq);
-
- dev_info(smu->adev->dev, "UclkSpreadEnabled = %d\n", pptable->UclkSpreadEnabled);
- dev_info(smu->adev->dev, "UclkSpreadPercent = %d\n", pptable->UclkSpreadPercent);
- dev_info(smu->adev->dev, "UclkSpreadFreq = %d\n", pptable->UclkSpreadFreq);
-
- dev_info(smu->adev->dev, "FclkSpreadEnabled = %d\n", pptable->FclkSpreadEnabled);
- dev_info(smu->adev->dev, "FclkSpreadPercent = %d\n", pptable->FclkSpreadPercent);
- dev_info(smu->adev->dev, "FclkSpreadFreq = %d\n", pptable->FclkSpreadFreq);
-
- dev_info(smu->adev->dev, "FllGfxclkSpreadEnabled = %d\n", pptable->FllGfxclkSpreadEnabled);
- dev_info(smu->adev->dev, "FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent);
- dev_info(smu->adev->dev, "FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq);
-
- for (i = 0; i < NUM_I2C_CONTROLLERS; i++) {
- dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i);
- dev_info(smu->adev->dev, " .Enabled = %d\n",
- pptable->I2cControllers[i].Enabled);
- dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n",
- pptable->I2cControllers[i].SlaveAddress);
- dev_info(smu->adev->dev, " .ControllerPort = %d\n",
- pptable->I2cControllers[i].ControllerPort);
- dev_info(smu->adev->dev, " .ControllerName = %d\n",
- pptable->I2cControllers[i].ControllerName);
- dev_info(smu->adev->dev, " .ThermalThrottler = %d\n",
- pptable->I2cControllers[i].ThermalThrotter);
- dev_info(smu->adev->dev, " .I2cProtocol = %d\n",
- pptable->I2cControllers[i].I2cProtocol);
- dev_info(smu->adev->dev, " .Speed = %d\n",
- pptable->I2cControllers[i].Speed);
- }
-
- dev_info(smu->adev->dev, "MemoryChannelEnabled = %d\n", pptable->MemoryChannelEnabled);
- dev_info(smu->adev->dev, "DramBitWidth = %d\n", pptable->DramBitWidth);
-
- dev_info(smu->adev->dev, "TotalBoardPower = %d\n", pptable->TotalBoardPower);
-
- dev_info(smu->adev->dev, "XgmiLinkSpeed\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiLinkSpeed[i]);
- dev_info(smu->adev->dev, "XgmiLinkWidth\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiLinkWidth[i]);
- dev_info(smu->adev->dev, "XgmiFclkFreq\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiFclkFreq[i]);
- dev_info(smu->adev->dev, "XgmiSocVoltage\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = %d\n", i, pptable->XgmiSocVoltage[i]);
-
-}
-
static bool arcturus_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
@@ -2002,7 +1597,9 @@ static bool arcturus_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
-static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
+static int arcturus_dpm_set_vcn_enable(struct smu_context *smu,
+ bool enable,
+ int inst)
{
int ret = 0;
@@ -2365,8 +1962,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.get_power_profile_mode = arcturus_get_power_profile_mode,
.set_power_profile_mode = arcturus_set_power_profile_mode,
.set_performance_level = arcturus_set_performance_level,
- /* debug (internal used) */
- .dump_pptable = arcturus_dump_pptable,
.get_power_limit = arcturus_get_power_limit,
.is_dpm_running = arcturus_is_dpm_running,
.dpm_set_vcn_enable = arcturus_dpm_set_vcn_enable,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 16af1a329621..7fad5dfb39c4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1135,7 +1135,9 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
return 0;
}
-static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
+static int navi10_dpm_set_vcn_enable(struct smu_context *smu,
+ bool enable,
+ int inst)
{
int ret = 0;
@@ -1689,7 +1691,7 @@ static int navi10_force_clk_levels(struct smu_context *smu,
if (ret)
return 0;
- ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return 0;
break;
@@ -2004,87 +2006,122 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
return size;
}
-static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+#define NAVI10_CUSTOM_PARAMS_COUNT 10
+#define NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT 3
+#define NAVI10_CUSTOM_PARAMS_SIZE (NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT * NAVI10_CUSTOM_PARAMS_COUNT * sizeof(long))
+
+static int navi10_set_power_profile_mode_coeff(struct smu_context *smu,
+ long *input)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
- int workload_type, ret = 0;
+ int ret, idx;
- smu->power_profile_mode = input[size];
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor), false);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+ return ret;
+ }
- if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
- return -EINVAL;
+ idx = 0 * NAVI10_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Gfxclk */
+ activity_monitor.Gfx_FPS = input[idx + 1];
+ activity_monitor.Gfx_MinFreqStep = input[idx + 2];
+ activity_monitor.Gfx_MinActiveFreqType = input[idx + 3];
+ activity_monitor.Gfx_MinActiveFreq = input[idx + 4];
+ activity_monitor.Gfx_BoosterFreqType = input[idx + 5];
+ activity_monitor.Gfx_BoosterFreq = input[idx + 6];
+ activity_monitor.Gfx_PD_Data_limit_c = input[idx + 7];
+ activity_monitor.Gfx_PD_Data_error_coeff = input[idx + 8];
+ activity_monitor.Gfx_PD_Data_error_rate_coeff = input[idx + 9];
+ }
+ idx = 1 * NAVI10_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Socclk */
+ activity_monitor.Soc_FPS = input[idx + 1];
+ activity_monitor.Soc_MinFreqStep = input[idx + 2];
+ activity_monitor.Soc_MinActiveFreqType = input[idx + 3];
+ activity_monitor.Soc_MinActiveFreq = input[idx + 4];
+ activity_monitor.Soc_BoosterFreqType = input[idx + 5];
+ activity_monitor.Soc_BoosterFreq = input[idx + 6];
+ activity_monitor.Soc_PD_Data_limit_c = input[idx + 7];
+ activity_monitor.Soc_PD_Data_error_coeff = input[idx + 8];
+ activity_monitor.Soc_PD_Data_error_rate_coeff = input[idx + 9];
+ }
+ idx = 2 * NAVI10_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Memclk */
+ activity_monitor.Mem_FPS = input[idx + 1];
+ activity_monitor.Mem_MinFreqStep = input[idx + 2];
+ activity_monitor.Mem_MinActiveFreqType = input[idx + 3];
+ activity_monitor.Mem_MinActiveFreq = input[idx + 4];
+ activity_monitor.Mem_BoosterFreqType = input[idx + 5];
+ activity_monitor.Mem_BoosterFreq = input[idx + 6];
+ activity_monitor.Mem_PD_Data_limit_c = input[idx + 7];
+ activity_monitor.Mem_PD_Data_error_coeff = input[idx + 8];
+ activity_monitor.Mem_PD_Data_error_rate_coeff = input[idx + 9];
+ }
+
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor), true);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ return ret;
}
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size != 10)
- return -EINVAL;
+ return ret;
+}
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor), false);
- if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
- return ret;
- }
+static int navi10_set_power_profile_mode(struct smu_context *smu,
+ u32 workload_mask,
+ long *custom_params,
+ u32 custom_params_max_idx)
+{
+ u32 backend_workload_mask = 0;
+ int ret, idx = -1, i;
- switch (input[0]) {
- case 0: /* Gfxclk */
- activity_monitor.Gfx_FPS = input[1];
- activity_monitor.Gfx_MinFreqStep = input[2];
- activity_monitor.Gfx_MinActiveFreqType = input[3];
- activity_monitor.Gfx_MinActiveFreq = input[4];
- activity_monitor.Gfx_BoosterFreqType = input[5];
- activity_monitor.Gfx_BoosterFreq = input[6];
- activity_monitor.Gfx_PD_Data_limit_c = input[7];
- activity_monitor.Gfx_PD_Data_error_coeff = input[8];
- activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
- break;
- case 1: /* Socclk */
- activity_monitor.Soc_FPS = input[1];
- activity_monitor.Soc_MinFreqStep = input[2];
- activity_monitor.Soc_MinActiveFreqType = input[3];
- activity_monitor.Soc_MinActiveFreq = input[4];
- activity_monitor.Soc_BoosterFreqType = input[5];
- activity_monitor.Soc_BoosterFreq = input[6];
- activity_monitor.Soc_PD_Data_limit_c = input[7];
- activity_monitor.Soc_PD_Data_error_coeff = input[8];
- activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
- break;
- case 2: /* Memclk */
- activity_monitor.Mem_FPS = input[1];
- activity_monitor.Mem_MinFreqStep = input[2];
- activity_monitor.Mem_MinActiveFreqType = input[3];
- activity_monitor.Mem_MinActiveFreq = input[4];
- activity_monitor.Mem_BoosterFreqType = input[5];
- activity_monitor.Mem_BoosterFreq = input[6];
- activity_monitor.Mem_PD_Data_limit_c = input[7];
- activity_monitor.Mem_PD_Data_error_coeff = input[8];
- activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
- break;
- default:
- return -EINVAL;
- }
+ smu_cmn_get_backend_workload_mask(smu, workload_mask,
+ &backend_workload_mask);
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor), true);
+ if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
+ if (!smu->custom_profile_params) {
+ smu->custom_profile_params = kzalloc(NAVI10_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
+ if (!smu->custom_profile_params)
+ return -ENOMEM;
+ }
+ if (custom_params && custom_params_max_idx) {
+ if (custom_params_max_idx != NAVI10_CUSTOM_PARAMS_COUNT)
+ return -EINVAL;
+ if (custom_params[0] >= NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT)
+ return -EINVAL;
+ idx = custom_params[0] * NAVI10_CUSTOM_PARAMS_COUNT;
+ smu->custom_profile_params[idx] = 1;
+ for (i = 1; i < custom_params_max_idx; i++)
+ smu->custom_profile_params[idx + i] = custom_params[i];
+ }
+ ret = navi10_set_power_profile_mode_coeff(smu,
+ smu->custom_profile_params);
if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
return ret;
}
+ } else if (smu->custom_profile_params) {
+ memset(smu->custom_profile_params, 0, NAVI10_CUSTOM_PARAMS_SIZE);
}
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- smu->power_profile_mode);
- if (workload_type < 0)
- return -EINVAL;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type, NULL);
- if (ret)
- dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
+ backend_workload_mask, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
+ workload_mask);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
+ return ret;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 9c3c48297cba..19a25fdc2f5b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1152,22 +1152,20 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
return 0;
}
-static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
+static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu,
+ bool enable,
+ int inst)
{
struct amdgpu_device *adev = smu->adev;
- int i, ret = 0;
+ int ret = 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- /* vcn dpm on is a prerequisite for vcn power gate messages */
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
- 0x10000 * i, NULL);
- if (ret)
- return ret;
- }
+ if (adev->vcn.harvest_config & (1 << inst))
+ return ret;
+ /* vcn dpm on is a prerequisite for vcn power gate messages */
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
+ 0x10000 * inst, NULL);
}
return ret;
@@ -1469,7 +1467,7 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
if (ret)
goto forec_level_out;
- ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
goto forec_level_out;
break;
@@ -1706,90 +1704,126 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
return size;
}
-static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+#define SIENNA_CICHLID_CUSTOM_PARAMS_COUNT 10
+#define SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT 3
+#define SIENNA_CICHLID_CUSTOM_PARAMS_SIZE (SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT * sizeof(long))
+
+static int sienna_cichlid_set_power_profile_mode_coeff(struct smu_context *smu,
+ long *input)
{
DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
- int workload_type, ret = 0;
+ int ret, idx;
- smu->power_profile_mode = input[size];
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external), false);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+ return ret;
+ }
- if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
- return -EINVAL;
+ idx = 0 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Gfxclk */
+ activity_monitor->Gfx_FPS = input[idx + 1];
+ activity_monitor->Gfx_MinFreqStep = input[idx + 2];
+ activity_monitor->Gfx_MinActiveFreqType = input[idx + 3];
+ activity_monitor->Gfx_MinActiveFreq = input[idx + 4];
+ activity_monitor->Gfx_BoosterFreqType = input[idx + 5];
+ activity_monitor->Gfx_BoosterFreq = input[idx + 6];
+ activity_monitor->Gfx_PD_Data_limit_c = input[idx + 7];
+ activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 8];
+ activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 9];
+ }
+ idx = 1 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Socclk */
+ activity_monitor->Fclk_FPS = input[idx + 1];
+ activity_monitor->Fclk_MinFreqStep = input[idx + 2];
+ activity_monitor->Fclk_MinActiveFreqType = input[idx + 3];
+ activity_monitor->Fclk_MinActiveFreq = input[idx + 4];
+ activity_monitor->Fclk_BoosterFreqType = input[idx + 5];
+ activity_monitor->Fclk_BoosterFreq = input[idx + 6];
+ activity_monitor->Fclk_PD_Data_limit_c = input[idx + 7];
+ activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 8];
+ activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 9];
+ }
+ idx = 2 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Memclk */
+ activity_monitor->Mem_FPS = input[idx + 1];
+ activity_monitor->Mem_MinFreqStep = input[idx + 2];
+ activity_monitor->Mem_MinActiveFreqType = input[idx + 3];
+ activity_monitor->Mem_MinActiveFreq = input[idx + 4];
+ activity_monitor->Mem_BoosterFreqType = input[idx + 5];
+ activity_monitor->Mem_BoosterFreq = input[idx + 6];
+ activity_monitor->Mem_PD_Data_limit_c = input[idx + 7];
+ activity_monitor->Mem_PD_Data_error_coeff = input[idx + 8];
+ activity_monitor->Mem_PD_Data_error_rate_coeff = input[idx + 9];
}
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size != 10)
- return -EINVAL;
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external), true);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ return ret;
+ }
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor_external), false);
- if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
- return ret;
- }
+ return ret;
+}
- switch (input[0]) {
- case 0: /* Gfxclk */
- activity_monitor->Gfx_FPS = input[1];
- activity_monitor->Gfx_MinFreqStep = input[2];
- activity_monitor->Gfx_MinActiveFreqType = input[3];
- activity_monitor->Gfx_MinActiveFreq = input[4];
- activity_monitor->Gfx_BoosterFreqType = input[5];
- activity_monitor->Gfx_BoosterFreq = input[6];
- activity_monitor->Gfx_PD_Data_limit_c = input[7];
- activity_monitor->Gfx_PD_Data_error_coeff = input[8];
- activity_monitor->Gfx_PD_Data_error_rate_coeff = input[9];
- break;
- case 1: /* Socclk */
- activity_monitor->Fclk_FPS = input[1];
- activity_monitor->Fclk_MinFreqStep = input[2];
- activity_monitor->Fclk_MinActiveFreqType = input[3];
- activity_monitor->Fclk_MinActiveFreq = input[4];
- activity_monitor->Fclk_BoosterFreqType = input[5];
- activity_monitor->Fclk_BoosterFreq = input[6];
- activity_monitor->Fclk_PD_Data_limit_c = input[7];
- activity_monitor->Fclk_PD_Data_error_coeff = input[8];
- activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9];
- break;
- case 2: /* Memclk */
- activity_monitor->Mem_FPS = input[1];
- activity_monitor->Mem_MinFreqStep = input[2];
- activity_monitor->Mem_MinActiveFreqType = input[3];
- activity_monitor->Mem_MinActiveFreq = input[4];
- activity_monitor->Mem_BoosterFreqType = input[5];
- activity_monitor->Mem_BoosterFreq = input[6];
- activity_monitor->Mem_PD_Data_limit_c = input[7];
- activity_monitor->Mem_PD_Data_error_coeff = input[8];
- activity_monitor->Mem_PD_Data_error_rate_coeff = input[9];
- break;
- default:
- return -EINVAL;
- }
+static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu,
+ u32 workload_mask,
+ long *custom_params,
+ u32 custom_params_max_idx)
+{
+ u32 backend_workload_mask = 0;
+ int ret, idx = -1, i;
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor_external), true);
+ smu_cmn_get_backend_workload_mask(smu, workload_mask,
+ &backend_workload_mask);
+
+ if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
+ if (!smu->custom_profile_params) {
+ smu->custom_profile_params =
+ kzalloc(SIENNA_CICHLID_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
+ if (!smu->custom_profile_params)
+ return -ENOMEM;
+ }
+ if (custom_params && custom_params_max_idx) {
+ if (custom_params_max_idx != SIENNA_CICHLID_CUSTOM_PARAMS_COUNT)
+ return -EINVAL;
+ if (custom_params[0] >= SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT)
+ return -EINVAL;
+ idx = custom_params[0] * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
+ smu->custom_profile_params[idx] = 1;
+ for (i = 1; i < custom_params_max_idx; i++)
+ smu->custom_profile_params[idx + i] = custom_params[i];
+ }
+ ret = sienna_cichlid_set_power_profile_mode_coeff(smu,
+ smu->custom_profile_params);
if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
return ret;
}
+ } else if (smu->custom_profile_params) {
+ memset(smu->custom_profile_params, 0, SIENNA_CICHLID_CUSTOM_PARAMS_SIZE);
}
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- smu->power_profile_mode);
- if (workload_type < 0)
- return -EINVAL;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type, NULL);
- if (ret)
- dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
+ backend_workload_mask, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
+ workload_mask);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
+ return ret;
+ }
return ret;
}
@@ -2493,1274 +2527,6 @@ static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu)
return val != 0x0;
}
-static void beige_goby_dump_pptable(struct smu_context *smu)
-{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_beige_goby_t *pptable = table_context->driver_pptable;
- int i;
-
- dev_info(smu->adev->dev, "Dumped PPTable:\n");
-
- dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version);
- dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]);
- dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]);
-
- for (i = 0; i < PPT_THROTTLER_COUNT; i++) {
- dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = 0x%x\n", i, pptable->SocketPowerLimitAc[i]);
- dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitAcTau[i]);
- dev_info(smu->adev->dev, "SocketPowerLimitDc[%d] = 0x%x\n", i, pptable->SocketPowerLimitDc[i]);
- dev_info(smu->adev->dev, "SocketPowerLimitDcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitDcTau[i]);
- }
-
- for (i = 0; i < TDC_THROTTLER_COUNT; i++) {
- dev_info(smu->adev->dev, "TdcLimit[%d] = 0x%x\n", i, pptable->TdcLimit[i]);
- dev_info(smu->adev->dev, "TdcLimitTau[%d] = 0x%x\n", i, pptable->TdcLimitTau[i]);
- }
-
- for (i = 0; i < TEMP_COUNT; i++) {
- dev_info(smu->adev->dev, "TemperatureLimit[%d] = 0x%x\n", i, pptable->TemperatureLimit[i]);
- }
-
- dev_info(smu->adev->dev, "FitLimit = 0x%x\n", pptable->FitLimit);
- dev_info(smu->adev->dev, "TotalPowerConfig = 0x%x\n", pptable->TotalPowerConfig);
- dev_info(smu->adev->dev, "TotalPowerPadding[0] = 0x%x\n", pptable->TotalPowerPadding[0]);
- dev_info(smu->adev->dev, "TotalPowerPadding[1] = 0x%x\n", pptable->TotalPowerPadding[1]);
- dev_info(smu->adev->dev, "TotalPowerPadding[2] = 0x%x\n", pptable->TotalPowerPadding[2]);
-
- dev_info(smu->adev->dev, "ApccPlusResidencyLimit = 0x%x\n", pptable->ApccPlusResidencyLimit);
- for (i = 0; i < NUM_SMNCLK_DPM_LEVELS; i++) {
- dev_info(smu->adev->dev, "SmnclkDpmFreq[%d] = 0x%x\n", i, pptable->SmnclkDpmFreq[i]);
- dev_info(smu->adev->dev, "SmnclkDpmVoltage[%d] = 0x%x\n", i, pptable->SmnclkDpmVoltage[i]);
- }
- dev_info(smu->adev->dev, "ThrottlerControlMask = 0x%x\n", pptable->ThrottlerControlMask);
-
- dev_info(smu->adev->dev, "FwDStateMask = 0x%x\n", pptable->FwDStateMask);
-
- dev_info(smu->adev->dev, "UlvVoltageOffsetSoc = 0x%x\n", pptable->UlvVoltageOffsetSoc);
- dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = 0x%x\n", pptable->UlvVoltageOffsetGfx);
- dev_info(smu->adev->dev, "MinVoltageUlvGfx = 0x%x\n", pptable->MinVoltageUlvGfx);
- dev_info(smu->adev->dev, "MinVoltageUlvSoc = 0x%x\n", pptable->MinVoltageUlvSoc);
-
- dev_info(smu->adev->dev, "SocLIVmin = 0x%x\n", pptable->SocLIVmin);
-
- dev_info(smu->adev->dev, "GceaLinkMgrIdleThreshold = 0x%x\n", pptable->GceaLinkMgrIdleThreshold);
-
- dev_info(smu->adev->dev, "MinVoltageGfx = 0x%x\n", pptable->MinVoltageGfx);
- dev_info(smu->adev->dev, "MinVoltageSoc = 0x%x\n", pptable->MinVoltageSoc);
- dev_info(smu->adev->dev, "MaxVoltageGfx = 0x%x\n", pptable->MaxVoltageGfx);
- dev_info(smu->adev->dev, "MaxVoltageSoc = 0x%x\n", pptable->MaxVoltageSoc);
-
- dev_info(smu->adev->dev, "LoadLineResistanceGfx = 0x%x\n", pptable->LoadLineResistanceGfx);
- dev_info(smu->adev->dev, "LoadLineResistanceSoc = 0x%x\n", pptable->LoadLineResistanceSoc);
-
- dev_info(smu->adev->dev, "VDDGFX_TVmin = 0x%x\n", pptable->VDDGFX_TVmin);
- dev_info(smu->adev->dev, "VDDSOC_TVmin = 0x%x\n", pptable->VDDSOC_TVmin);
- dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = 0x%x\n", pptable->VDDGFX_Vmin_HiTemp);
- dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = 0x%x\n", pptable->VDDGFX_Vmin_LoTemp);
- dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = 0x%x\n", pptable->VDDSOC_Vmin_HiTemp);
- dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = 0x%x\n", pptable->VDDSOC_Vmin_LoTemp);
- dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = 0x%x\n", pptable->VDDGFX_TVminHystersis);
- dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = 0x%x\n", pptable->VDDSOC_TVminHystersis);
-
- dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_GFXCLK].Padding,
- pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_SOCCLK].Padding,
- pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_UCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_UCLK].Padding,
- pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_UCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_UCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_FCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_FCLK].Padding,
- pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_FCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_FCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_DCLK_0]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_DCLK_0].VoltageMode,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_DCLK_0].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_DCLK_0].Padding,
- pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SsFmin,
- pptable->DpmDescriptor[PPCLK_DCLK_0].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_VCLK_0]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_VCLK_0].VoltageMode,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_VCLK_0].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_VCLK_0].Padding,
- pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SsFmin,
- pptable->DpmDescriptor[PPCLK_VCLK_0].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_DCLK_1]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_DCLK_1].VoltageMode,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_DCLK_1].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_DCLK_1].Padding,
- pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SsFmin,
- pptable->DpmDescriptor[PPCLK_DCLK_1].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_VCLK_1]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_VCLK_1].VoltageMode,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_VCLK_1].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_VCLK_1].Padding,
- pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SsFmin,
- pptable->DpmDescriptor[PPCLK_VCLK_1].Padding16);
-
- dev_info(smu->adev->dev, "FreqTableGfx\n");
- for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableGfx[i]);
-
- dev_info(smu->adev->dev, "FreqTableVclk\n");
- for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableVclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableDclk\n");
- for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableDclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableSocclk\n");
- for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableSocclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableUclk\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableUclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableFclk\n");
- for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableFclk[i]);
-
- dev_info(smu->adev->dev, "DcModeMaxFreq\n");
- dev_info(smu->adev->dev, " .PPCLK_GFXCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]);
- dev_info(smu->adev->dev, " .PPCLK_SOCCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]);
- dev_info(smu->adev->dev, " .PPCLK_UCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_UCLK]);
- dev_info(smu->adev->dev, " .PPCLK_FCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_FCLK]);
- dev_info(smu->adev->dev, " .PPCLK_DCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_0]);
- dev_info(smu->adev->dev, " .PPCLK_VCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_0]);
- dev_info(smu->adev->dev, " .PPCLK_DCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_1]);
- dev_info(smu->adev->dev, " .PPCLK_VCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_1]);
-
- dev_info(smu->adev->dev, "FreqTableUclkDiv\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FreqTableUclkDiv[i]);
-
- dev_info(smu->adev->dev, "FclkBoostFreq = 0x%x\n", pptable->FclkBoostFreq);
- dev_info(smu->adev->dev, "FclkParamPadding = 0x%x\n", pptable->FclkParamPadding);
-
- dev_info(smu->adev->dev, "Mp0clkFreq\n");
- for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0clkFreq[i]);
-
- dev_info(smu->adev->dev, "Mp0DpmVoltage\n");
- for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0DpmVoltage[i]);
-
- dev_info(smu->adev->dev, "MemVddciVoltage\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemVddciVoltage[i]);
-
- dev_info(smu->adev->dev, "MemMvddVoltage\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemMvddVoltage[i]);
-
- dev_info(smu->adev->dev, "GfxclkFgfxoffEntry = 0x%x\n", pptable->GfxclkFgfxoffEntry);
- dev_info(smu->adev->dev, "GfxclkFinit = 0x%x\n", pptable->GfxclkFinit);
- dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle);
- dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource);
- dev_info(smu->adev->dev, "GfxclkPadding = 0x%x\n", pptable->GfxclkPadding);
-
- dev_info(smu->adev->dev, "GfxGpoSubFeatureMask = 0x%x\n", pptable->GfxGpoSubFeatureMask);
-
- dev_info(smu->adev->dev, "GfxGpoEnabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoEnabledWorkPolicyMask);
- dev_info(smu->adev->dev, "GfxGpoDisabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoDisabledWorkPolicyMask);
- dev_info(smu->adev->dev, "GfxGpoPadding[0] = 0x%x\n", pptable->GfxGpoPadding[0]);
- dev_info(smu->adev->dev, "GfxGpoVotingAllow = 0x%x\n", pptable->GfxGpoVotingAllow);
- dev_info(smu->adev->dev, "GfxGpoPadding32[0] = 0x%x\n", pptable->GfxGpoPadding32[0]);
- dev_info(smu->adev->dev, "GfxGpoPadding32[1] = 0x%x\n", pptable->GfxGpoPadding32[1]);
- dev_info(smu->adev->dev, "GfxGpoPadding32[2] = 0x%x\n", pptable->GfxGpoPadding32[2]);
- dev_info(smu->adev->dev, "GfxGpoPadding32[3] = 0x%x\n", pptable->GfxGpoPadding32[3]);
- dev_info(smu->adev->dev, "GfxDcsFopt = 0x%x\n", pptable->GfxDcsFopt);
- dev_info(smu->adev->dev, "GfxDcsFclkFopt = 0x%x\n", pptable->GfxDcsFclkFopt);
- dev_info(smu->adev->dev, "GfxDcsUclkFopt = 0x%x\n", pptable->GfxDcsUclkFopt);
-
- dev_info(smu->adev->dev, "DcsGfxOffVoltage = 0x%x\n", pptable->DcsGfxOffVoltage);
- dev_info(smu->adev->dev, "DcsMinGfxOffTime = 0x%x\n", pptable->DcsMinGfxOffTime);
- dev_info(smu->adev->dev, "DcsMaxGfxOffTime = 0x%x\n", pptable->DcsMaxGfxOffTime);
- dev_info(smu->adev->dev, "DcsMinCreditAccum = 0x%x\n", pptable->DcsMinCreditAccum);
- dev_info(smu->adev->dev, "DcsExitHysteresis = 0x%x\n", pptable->DcsExitHysteresis);
- dev_info(smu->adev->dev, "DcsTimeout = 0x%x\n", pptable->DcsTimeout);
-
- dev_info(smu->adev->dev, "DcsParamPadding[0] = 0x%x\n", pptable->DcsParamPadding[0]);
- dev_info(smu->adev->dev, "DcsParamPadding[1] = 0x%x\n", pptable->DcsParamPadding[1]);
- dev_info(smu->adev->dev, "DcsParamPadding[2] = 0x%x\n", pptable->DcsParamPadding[2]);
- dev_info(smu->adev->dev, "DcsParamPadding[3] = 0x%x\n", pptable->DcsParamPadding[3]);
- dev_info(smu->adev->dev, "DcsParamPadding[4] = 0x%x\n", pptable->DcsParamPadding[4]);
-
- dev_info(smu->adev->dev, "FlopsPerByteTable\n");
- for (i = 0; i < RLC_PACE_TABLE_NUM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FlopsPerByteTable[i]);
-
- dev_info(smu->adev->dev, "LowestUclkReservedForUlv = 0x%x\n", pptable->LowestUclkReservedForUlv);
- dev_info(smu->adev->dev, "vddingMem[0] = 0x%x\n", pptable->PaddingMem[0]);
- dev_info(smu->adev->dev, "vddingMem[1] = 0x%x\n", pptable->PaddingMem[1]);
- dev_info(smu->adev->dev, "vddingMem[2] = 0x%x\n", pptable->PaddingMem[2]);
-
- dev_info(smu->adev->dev, "UclkDpmPstates\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->UclkDpmPstates[i]);
-
- dev_info(smu->adev->dev, "UclkDpmSrcFreqRange\n");
- dev_info(smu->adev->dev, " .Fmin = 0x%x\n",
- pptable->UclkDpmSrcFreqRange.Fmin);
- dev_info(smu->adev->dev, " .Fmax = 0x%x\n",
- pptable->UclkDpmSrcFreqRange.Fmax);
- dev_info(smu->adev->dev, "UclkDpmTargFreqRange\n");
- dev_info(smu->adev->dev, " .Fmin = 0x%x\n",
- pptable->UclkDpmTargFreqRange.Fmin);
- dev_info(smu->adev->dev, " .Fmax = 0x%x\n",
- pptable->UclkDpmTargFreqRange.Fmax);
- dev_info(smu->adev->dev, "UclkDpmMidstepFreq = 0x%x\n", pptable->UclkDpmMidstepFreq);
- dev_info(smu->adev->dev, "UclkMidstepPadding = 0x%x\n", pptable->UclkMidstepPadding);
-
- dev_info(smu->adev->dev, "PcieGenSpeed\n");
- for (i = 0; i < NUM_LINK_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieGenSpeed[i]);
-
- dev_info(smu->adev->dev, "PcieLaneCount\n");
- for (i = 0; i < NUM_LINK_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieLaneCount[i]);
-
- dev_info(smu->adev->dev, "LclkFreq\n");
- for (i = 0; i < NUM_LINK_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->LclkFreq[i]);
-
- dev_info(smu->adev->dev, "FanStopTemp = 0x%x\n", pptable->FanStopTemp);
- dev_info(smu->adev->dev, "FanStartTemp = 0x%x\n", pptable->FanStartTemp);
-
- dev_info(smu->adev->dev, "FanGain\n");
- for (i = 0; i < TEMP_COUNT; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FanGain[i]);
-
- dev_info(smu->adev->dev, "FanPwmMin = 0x%x\n", pptable->FanPwmMin);
- dev_info(smu->adev->dev, "FanAcousticLimitRpm = 0x%x\n", pptable->FanAcousticLimitRpm);
- dev_info(smu->adev->dev, "FanThrottlingRpm = 0x%x\n", pptable->FanThrottlingRpm);
- dev_info(smu->adev->dev, "FanMaximumRpm = 0x%x\n", pptable->FanMaximumRpm);
- dev_info(smu->adev->dev, "MGpuFanBoostLimitRpm = 0x%x\n", pptable->MGpuFanBoostLimitRpm);
- dev_info(smu->adev->dev, "FanTargetTemperature = 0x%x\n", pptable->FanTargetTemperature);
- dev_info(smu->adev->dev, "FanTargetGfxclk = 0x%x\n", pptable->FanTargetGfxclk);
- dev_info(smu->adev->dev, "FanPadding16 = 0x%x\n", pptable->FanPadding16);
- dev_info(smu->adev->dev, "FanTempInputSelect = 0x%x\n", pptable->FanTempInputSelect);
- dev_info(smu->adev->dev, "FanPadding = 0x%x\n", pptable->FanPadding);
- dev_info(smu->adev->dev, "FanZeroRpmEnable = 0x%x\n", pptable->FanZeroRpmEnable);
- dev_info(smu->adev->dev, "FanTachEdgePerRev = 0x%x\n", pptable->FanTachEdgePerRev);
-
- dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorSetDelta);
- dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorRateSetDelta);
- dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = 0x%x\n", pptable->FuzzyFan_PwmSetDelta);
- dev_info(smu->adev->dev, "FuzzyFan_Reserved = 0x%x\n", pptable->FuzzyFan_Reserved);
-
- dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]);
- dev_info(smu->adev->dev, "dBtcGbGfxDfllModelSelect = 0x%x\n", pptable->dBtcGbGfxDfllModelSelect);
- dev_info(smu->adev->dev, "Padding8_Avfs = 0x%x\n", pptable->Padding8_Avfs);
-
- dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a,
- pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b,
- pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c);
- dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a,
- pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b,
- pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c);
- dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbGfxPll.a,
- pptable->dBtcGbGfxPll.b,
- pptable->dBtcGbGfxPll.c);
- dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbGfxDfll.a,
- pptable->dBtcGbGfxDfll.b,
- pptable->dBtcGbGfxDfll.c);
- dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbSoc.a,
- pptable->dBtcGbSoc.b,
- pptable->dBtcGbSoc.c);
- dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n",
- pptable->qAgingGb[AVFS_VOLTAGE_GFX].m,
- pptable->qAgingGb[AVFS_VOLTAGE_GFX].b);
- dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n",
- pptable->qAgingGb[AVFS_VOLTAGE_SOC].m,
- pptable->qAgingGb[AVFS_VOLTAGE_SOC].b);
-
- dev_info(smu->adev->dev, "PiecewiseLinearDroopIntGfxDfll\n");
- for (i = 0; i < NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS; i++) {
- dev_info(smu->adev->dev, " Fset[%d] = 0x%x\n",
- i, pptable->PiecewiseLinearDroopIntGfxDfll.Fset[i]);
- dev_info(smu->adev->dev, " Vdroop[%d] = 0x%x\n",
- i, pptable->PiecewiseLinearDroopIntGfxDfll.Vdroop[i]);
- }
-
- dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c);
- dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c);
-
- dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]);
-
- dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]);
- dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]);
- dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]);
-
- dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]);
- dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]);
-
- dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]);
-
- dev_info(smu->adev->dev, "XgmiDpmPstates\n");
- for (i = 0; i < NUM_XGMI_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiDpmPstates[i]);
- dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]);
- dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]);
-
- dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides);
- dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation0.a,
- pptable->ReservedEquation0.b,
- pptable->ReservedEquation0.c);
- dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation1.a,
- pptable->ReservedEquation1.b,
- pptable->ReservedEquation1.c);
- dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation2.a,
- pptable->ReservedEquation2.b,
- pptable->ReservedEquation2.c);
- dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation3.a,
- pptable->ReservedEquation3.b,
- pptable->ReservedEquation3.c);
-
- dev_info(smu->adev->dev, "SkuReserved[0] = 0x%x\n", pptable->SkuReserved[0]);
- dev_info(smu->adev->dev, "SkuReserved[1] = 0x%x\n", pptable->SkuReserved[1]);
- dev_info(smu->adev->dev, "SkuReserved[2] = 0x%x\n", pptable->SkuReserved[2]);
- dev_info(smu->adev->dev, "SkuReserved[3] = 0x%x\n", pptable->SkuReserved[3]);
- dev_info(smu->adev->dev, "SkuReserved[4] = 0x%x\n", pptable->SkuReserved[4]);
- dev_info(smu->adev->dev, "SkuReserved[5] = 0x%x\n", pptable->SkuReserved[5]);
- dev_info(smu->adev->dev, "SkuReserved[6] = 0x%x\n", pptable->SkuReserved[6]);
- dev_info(smu->adev->dev, "SkuReserved[7] = 0x%x\n", pptable->SkuReserved[7]);
-
- dev_info(smu->adev->dev, "GamingClk[0] = 0x%x\n", pptable->GamingClk[0]);
- dev_info(smu->adev->dev, "GamingClk[1] = 0x%x\n", pptable->GamingClk[1]);
- dev_info(smu->adev->dev, "GamingClk[2] = 0x%x\n", pptable->GamingClk[2]);
- dev_info(smu->adev->dev, "GamingClk[3] = 0x%x\n", pptable->GamingClk[3]);
- dev_info(smu->adev->dev, "GamingClk[4] = 0x%x\n", pptable->GamingClk[4]);
- dev_info(smu->adev->dev, "GamingClk[5] = 0x%x\n", pptable->GamingClk[5]);
-
- for (i = 0; i < NUM_I2C_CONTROLLERS; i++) {
- dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i);
- dev_info(smu->adev->dev, " .Enabled = 0x%x\n",
- pptable->I2cControllers[i].Enabled);
- dev_info(smu->adev->dev, " .Speed = 0x%x\n",
- pptable->I2cControllers[i].Speed);
- dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n",
- pptable->I2cControllers[i].SlaveAddress);
- dev_info(smu->adev->dev, " .ControllerPort = 0x%x\n",
- pptable->I2cControllers[i].ControllerPort);
- dev_info(smu->adev->dev, " .ControllerName = 0x%x\n",
- pptable->I2cControllers[i].ControllerName);
- dev_info(smu->adev->dev, " .ThermalThrottler = 0x%x\n",
- pptable->I2cControllers[i].ThermalThrotter);
- dev_info(smu->adev->dev, " .I2cProtocol = 0x%x\n",
- pptable->I2cControllers[i].I2cProtocol);
- dev_info(smu->adev->dev, " .PaddingConfig = 0x%x\n",
- pptable->I2cControllers[i].PaddingConfig);
- }
-
- dev_info(smu->adev->dev, "GpioScl = 0x%x\n", pptable->GpioScl);
- dev_info(smu->adev->dev, "GpioSda = 0x%x\n", pptable->GpioSda);
- dev_info(smu->adev->dev, "FchUsbPdSlaveAddr = 0x%x\n", pptable->FchUsbPdSlaveAddr);
- dev_info(smu->adev->dev, "I2cSpare[0] = 0x%x\n", pptable->I2cSpare[0]);
-
- dev_info(smu->adev->dev, "Board Parameters:\n");
- dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping);
- dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping);
- dev_info(smu->adev->dev, "VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping);
- dev_info(smu->adev->dev, "VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping);
- dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask);
- dev_info(smu->adev->dev, "SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask);
- dev_info(smu->adev->dev, "VddciUlvPhaseSheddingMask = 0x%x\n", pptable->VddciUlvPhaseSheddingMask);
- dev_info(smu->adev->dev, "MvddUlvPhaseSheddingMask = 0x%x\n", pptable->MvddUlvPhaseSheddingMask);
-
- dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent);
- dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset);
- dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx);
-
- dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent);
- dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset);
- dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc);
-
- dev_info(smu->adev->dev, "Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent);
- dev_info(smu->adev->dev, "Mem0Offset = 0x%x\n", pptable->Mem0Offset);
- dev_info(smu->adev->dev, "Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0);
-
- dev_info(smu->adev->dev, "Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent);
- dev_info(smu->adev->dev, "Mem1Offset = 0x%x\n", pptable->Mem1Offset);
- dev_info(smu->adev->dev, "Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1);
-
- dev_info(smu->adev->dev, "MvddRatio = 0x%x\n", pptable->MvddRatio);
-
- dev_info(smu->adev->dev, "AcDcGpio = 0x%x\n", pptable->AcDcGpio);
- dev_info(smu->adev->dev, "AcDcPolarity = 0x%x\n", pptable->AcDcPolarity);
- dev_info(smu->adev->dev, "VR0HotGpio = 0x%x\n", pptable->VR0HotGpio);
- dev_info(smu->adev->dev, "VR0HotPolarity = 0x%x\n", pptable->VR0HotPolarity);
- dev_info(smu->adev->dev, "VR1HotGpio = 0x%x\n", pptable->VR1HotGpio);
- dev_info(smu->adev->dev, "VR1HotPolarity = 0x%x\n", pptable->VR1HotPolarity);
- dev_info(smu->adev->dev, "GthrGpio = 0x%x\n", pptable->GthrGpio);
- dev_info(smu->adev->dev, "GthrPolarity = 0x%x\n", pptable->GthrPolarity);
- dev_info(smu->adev->dev, "LedPin0 = 0x%x\n", pptable->LedPin0);
- dev_info(smu->adev->dev, "LedPin1 = 0x%x\n", pptable->LedPin1);
- dev_info(smu->adev->dev, "LedPin2 = 0x%x\n", pptable->LedPin2);
- dev_info(smu->adev->dev, "LedEnableMask = 0x%x\n", pptable->LedEnableMask);
- dev_info(smu->adev->dev, "LedPcie = 0x%x\n", pptable->LedPcie);
- dev_info(smu->adev->dev, "LedError = 0x%x\n", pptable->LedError);
- dev_info(smu->adev->dev, "LedSpare1[0] = 0x%x\n", pptable->LedSpare1[0]);
- dev_info(smu->adev->dev, "LedSpare1[1] = 0x%x\n", pptable->LedSpare1[1]);
-
- dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = 0x%x\n", pptable->PllGfxclkSpreadEnabled);
- dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = 0x%x\n", pptable->PllGfxclkSpreadPercent);
- dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = 0x%x\n", pptable->PllGfxclkSpreadFreq);
-
- dev_info(smu->adev->dev, "DfllGfxclkSpreadEnabled = 0x%x\n", pptable->DfllGfxclkSpreadEnabled);
- dev_info(smu->adev->dev, "DfllGfxclkSpreadPercent = 0x%x\n", pptable->DfllGfxclkSpreadPercent);
- dev_info(smu->adev->dev, "DfllGfxclkSpreadFreq = 0x%x\n", pptable->DfllGfxclkSpreadFreq);
-
- dev_info(smu->adev->dev, "UclkSpreadPadding = 0x%x\n", pptable->UclkSpreadPadding);
- dev_info(smu->adev->dev, "UclkSpreadFreq = 0x%x\n", pptable->UclkSpreadFreq);
-
- dev_info(smu->adev->dev, "FclkSpreadEnabled = 0x%x\n", pptable->FclkSpreadEnabled);
- dev_info(smu->adev->dev, "FclkSpreadPercent = 0x%x\n", pptable->FclkSpreadPercent);
- dev_info(smu->adev->dev, "FclkSpreadFreq = 0x%x\n", pptable->FclkSpreadFreq);
-
- dev_info(smu->adev->dev, "MemoryChannelEnabled = 0x%x\n", pptable->MemoryChannelEnabled);
- dev_info(smu->adev->dev, "DramBitWidth = 0x%x\n", pptable->DramBitWidth);
- dev_info(smu->adev->dev, "PaddingMem1[0] = 0x%x\n", pptable->PaddingMem1[0]);
- dev_info(smu->adev->dev, "PaddingMem1[1] = 0x%x\n", pptable->PaddingMem1[1]);
- dev_info(smu->adev->dev, "PaddingMem1[2] = 0x%x\n", pptable->PaddingMem1[2]);
-
- dev_info(smu->adev->dev, "TotalBoardPower = 0x%x\n", pptable->TotalBoardPower);
- dev_info(smu->adev->dev, "BoardPowerPadding = 0x%x\n", pptable->BoardPowerPadding);
-
- dev_info(smu->adev->dev, "XgmiLinkSpeed\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkSpeed[i]);
- dev_info(smu->adev->dev, "XgmiLinkWidth\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkWidth[i]);
- dev_info(smu->adev->dev, "XgmiFclkFreq\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiFclkFreq[i]);
- dev_info(smu->adev->dev, "XgmiSocVoltage\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiSocVoltage[i]);
-
- dev_info(smu->adev->dev, "HsrEnabled = 0x%x\n", pptable->HsrEnabled);
- dev_info(smu->adev->dev, "VddqOffEnabled = 0x%x\n", pptable->VddqOffEnabled);
- dev_info(smu->adev->dev, "PaddingUmcFlags[0] = 0x%x\n", pptable->PaddingUmcFlags[0]);
- dev_info(smu->adev->dev, "PaddingUmcFlags[1] = 0x%x\n", pptable->PaddingUmcFlags[1]);
-
- dev_info(smu->adev->dev, "BoardReserved[0] = 0x%x\n", pptable->BoardReserved[0]);
- dev_info(smu->adev->dev, "BoardReserved[1] = 0x%x\n", pptable->BoardReserved[1]);
- dev_info(smu->adev->dev, "BoardReserved[2] = 0x%x\n", pptable->BoardReserved[2]);
- dev_info(smu->adev->dev, "BoardReserved[3] = 0x%x\n", pptable->BoardReserved[3]);
- dev_info(smu->adev->dev, "BoardReserved[4] = 0x%x\n", pptable->BoardReserved[4]);
- dev_info(smu->adev->dev, "BoardReserved[5] = 0x%x\n", pptable->BoardReserved[5]);
- dev_info(smu->adev->dev, "BoardReserved[6] = 0x%x\n", pptable->BoardReserved[6]);
- dev_info(smu->adev->dev, "BoardReserved[7] = 0x%x\n", pptable->BoardReserved[7]);
- dev_info(smu->adev->dev, "BoardReserved[8] = 0x%x\n", pptable->BoardReserved[8]);
- dev_info(smu->adev->dev, "BoardReserved[9] = 0x%x\n", pptable->BoardReserved[9]);
- dev_info(smu->adev->dev, "BoardReserved[10] = 0x%x\n", pptable->BoardReserved[10]);
-
- dev_info(smu->adev->dev, "MmHubPadding[0] = 0x%x\n", pptable->MmHubPadding[0]);
- dev_info(smu->adev->dev, "MmHubPadding[1] = 0x%x\n", pptable->MmHubPadding[1]);
- dev_info(smu->adev->dev, "MmHubPadding[2] = 0x%x\n", pptable->MmHubPadding[2]);
- dev_info(smu->adev->dev, "MmHubPadding[3] = 0x%x\n", pptable->MmHubPadding[3]);
- dev_info(smu->adev->dev, "MmHubPadding[4] = 0x%x\n", pptable->MmHubPadding[4]);
- dev_info(smu->adev->dev, "MmHubPadding[5] = 0x%x\n", pptable->MmHubPadding[5]);
- dev_info(smu->adev->dev, "MmHubPadding[6] = 0x%x\n", pptable->MmHubPadding[6]);
- dev_info(smu->adev->dev, "MmHubPadding[7] = 0x%x\n", pptable->MmHubPadding[7]);
-}
-
-static void sienna_cichlid_dump_pptable(struct smu_context *smu)
-{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_t *pptable = table_context->driver_pptable;
- int i;
-
- if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
- IP_VERSION(11, 0, 13)) {
- beige_goby_dump_pptable(smu);
- return;
- }
-
- dev_info(smu->adev->dev, "Dumped PPTable:\n");
-
- dev_info(smu->adev->dev, "Version = 0x%08x\n", pptable->Version);
- dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]);
- dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]);
-
- for (i = 0; i < PPT_THROTTLER_COUNT; i++) {
- dev_info(smu->adev->dev, "SocketPowerLimitAc[%d] = 0x%x\n", i, pptable->SocketPowerLimitAc[i]);
- dev_info(smu->adev->dev, "SocketPowerLimitAcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitAcTau[i]);
- dev_info(smu->adev->dev, "SocketPowerLimitDc[%d] = 0x%x\n", i, pptable->SocketPowerLimitDc[i]);
- dev_info(smu->adev->dev, "SocketPowerLimitDcTau[%d] = 0x%x\n", i, pptable->SocketPowerLimitDcTau[i]);
- }
-
- for (i = 0; i < TDC_THROTTLER_COUNT; i++) {
- dev_info(smu->adev->dev, "TdcLimit[%d] = 0x%x\n", i, pptable->TdcLimit[i]);
- dev_info(smu->adev->dev, "TdcLimitTau[%d] = 0x%x\n", i, pptable->TdcLimitTau[i]);
- }
-
- for (i = 0; i < TEMP_COUNT; i++) {
- dev_info(smu->adev->dev, "TemperatureLimit[%d] = 0x%x\n", i, pptable->TemperatureLimit[i]);
- }
-
- dev_info(smu->adev->dev, "FitLimit = 0x%x\n", pptable->FitLimit);
- dev_info(smu->adev->dev, "TotalPowerConfig = 0x%x\n", pptable->TotalPowerConfig);
- dev_info(smu->adev->dev, "TotalPowerPadding[0] = 0x%x\n", pptable->TotalPowerPadding[0]);
- dev_info(smu->adev->dev, "TotalPowerPadding[1] = 0x%x\n", pptable->TotalPowerPadding[1]);
- dev_info(smu->adev->dev, "TotalPowerPadding[2] = 0x%x\n", pptable->TotalPowerPadding[2]);
-
- dev_info(smu->adev->dev, "ApccPlusResidencyLimit = 0x%x\n", pptable->ApccPlusResidencyLimit);
- for (i = 0; i < NUM_SMNCLK_DPM_LEVELS; i++) {
- dev_info(smu->adev->dev, "SmnclkDpmFreq[%d] = 0x%x\n", i, pptable->SmnclkDpmFreq[i]);
- dev_info(smu->adev->dev, "SmnclkDpmVoltage[%d] = 0x%x\n", i, pptable->SmnclkDpmVoltage[i]);
- }
- dev_info(smu->adev->dev, "ThrottlerControlMask = 0x%x\n", pptable->ThrottlerControlMask);
-
- dev_info(smu->adev->dev, "FwDStateMask = 0x%x\n", pptable->FwDStateMask);
-
- dev_info(smu->adev->dev, "UlvVoltageOffsetSoc = 0x%x\n", pptable->UlvVoltageOffsetSoc);
- dev_info(smu->adev->dev, "UlvVoltageOffsetGfx = 0x%x\n", pptable->UlvVoltageOffsetGfx);
- dev_info(smu->adev->dev, "MinVoltageUlvGfx = 0x%x\n", pptable->MinVoltageUlvGfx);
- dev_info(smu->adev->dev, "MinVoltageUlvSoc = 0x%x\n", pptable->MinVoltageUlvSoc);
-
- dev_info(smu->adev->dev, "SocLIVmin = 0x%x\n", pptable->SocLIVmin);
- dev_info(smu->adev->dev, "PaddingLIVmin = 0x%x\n", pptable->PaddingLIVmin);
-
- dev_info(smu->adev->dev, "GceaLinkMgrIdleThreshold = 0x%x\n", pptable->GceaLinkMgrIdleThreshold);
- dev_info(smu->adev->dev, "paddingRlcUlvParams[0] = 0x%x\n", pptable->paddingRlcUlvParams[0]);
- dev_info(smu->adev->dev, "paddingRlcUlvParams[1] = 0x%x\n", pptable->paddingRlcUlvParams[1]);
- dev_info(smu->adev->dev, "paddingRlcUlvParams[2] = 0x%x\n", pptable->paddingRlcUlvParams[2]);
-
- dev_info(smu->adev->dev, "MinVoltageGfx = 0x%x\n", pptable->MinVoltageGfx);
- dev_info(smu->adev->dev, "MinVoltageSoc = 0x%x\n", pptable->MinVoltageSoc);
- dev_info(smu->adev->dev, "MaxVoltageGfx = 0x%x\n", pptable->MaxVoltageGfx);
- dev_info(smu->adev->dev, "MaxVoltageSoc = 0x%x\n", pptable->MaxVoltageSoc);
-
- dev_info(smu->adev->dev, "LoadLineResistanceGfx = 0x%x\n", pptable->LoadLineResistanceGfx);
- dev_info(smu->adev->dev, "LoadLineResistanceSoc = 0x%x\n", pptable->LoadLineResistanceSoc);
-
- dev_info(smu->adev->dev, "VDDGFX_TVmin = 0x%x\n", pptable->VDDGFX_TVmin);
- dev_info(smu->adev->dev, "VDDSOC_TVmin = 0x%x\n", pptable->VDDSOC_TVmin);
- dev_info(smu->adev->dev, "VDDGFX_Vmin_HiTemp = 0x%x\n", pptable->VDDGFX_Vmin_HiTemp);
- dev_info(smu->adev->dev, "VDDGFX_Vmin_LoTemp = 0x%x\n", pptable->VDDGFX_Vmin_LoTemp);
- dev_info(smu->adev->dev, "VDDSOC_Vmin_HiTemp = 0x%x\n", pptable->VDDSOC_Vmin_HiTemp);
- dev_info(smu->adev->dev, "VDDSOC_Vmin_LoTemp = 0x%x\n", pptable->VDDSOC_Vmin_LoTemp);
- dev_info(smu->adev->dev, "VDDGFX_TVminHystersis = 0x%x\n", pptable->VDDGFX_TVminHystersis);
- dev_info(smu->adev->dev, "VDDSOC_TVminHystersis = 0x%x\n", pptable->VDDSOC_TVminHystersis);
-
- dev_info(smu->adev->dev, "[PPCLK_GFXCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_GFXCLK].Padding,
- pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_SOCCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_SOCCLK].Padding,
- pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_UCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_UCLK].Padding,
- pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_UCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_UCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_FCLK]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode,
- pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_FCLK].Padding,
- pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_FCLK].SsFmin,
- pptable->DpmDescriptor[PPCLK_FCLK].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_DCLK_0]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_DCLK_0].VoltageMode,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_DCLK_0].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_DCLK_0].Padding,
- pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_DCLK_0].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_DCLK_0].SsFmin,
- pptable->DpmDescriptor[PPCLK_DCLK_0].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_VCLK_0]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_VCLK_0].VoltageMode,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_VCLK_0].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_VCLK_0].Padding,
- pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_VCLK_0].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_VCLK_0].SsFmin,
- pptable->DpmDescriptor[PPCLK_VCLK_0].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_DCLK_1]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_DCLK_1].VoltageMode,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_DCLK_1].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_DCLK_1].Padding,
- pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_DCLK_1].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_DCLK_1].SsFmin,
- pptable->DpmDescriptor[PPCLK_DCLK_1].Padding16);
-
- dev_info(smu->adev->dev, "[PPCLK_VCLK_1]\n"
- " .VoltageMode = 0x%02x\n"
- " .SnapToDiscrete = 0x%02x\n"
- " .NumDiscreteLevels = 0x%02x\n"
- " .padding = 0x%02x\n"
- " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n"
- " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n"
- " .SsFmin = 0x%04x\n"
- " .Padding_16 = 0x%04x\n",
- pptable->DpmDescriptor[PPCLK_VCLK_1].VoltageMode,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SnapToDiscrete,
- pptable->DpmDescriptor[PPCLK_VCLK_1].NumDiscreteLevels,
- pptable->DpmDescriptor[PPCLK_VCLK_1].Padding,
- pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.m,
- pptable->DpmDescriptor[PPCLK_VCLK_1].ConversionToAvfsClk.b,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.a,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.b,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SsCurve.c,
- pptable->DpmDescriptor[PPCLK_VCLK_1].SsFmin,
- pptable->DpmDescriptor[PPCLK_VCLK_1].Padding16);
-
- dev_info(smu->adev->dev, "FreqTableGfx\n");
- for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableGfx[i]);
-
- dev_info(smu->adev->dev, "FreqTableVclk\n");
- for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableVclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableDclk\n");
- for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableDclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableSocclk\n");
- for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableSocclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableUclk\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableUclk[i]);
-
- dev_info(smu->adev->dev, "FreqTableFclk\n");
- for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%02d] = 0x%x\n", i, pptable->FreqTableFclk[i]);
-
- dev_info(smu->adev->dev, "DcModeMaxFreq\n");
- dev_info(smu->adev->dev, " .PPCLK_GFXCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_GFXCLK]);
- dev_info(smu->adev->dev, " .PPCLK_SOCCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_SOCCLK]);
- dev_info(smu->adev->dev, " .PPCLK_UCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_UCLK]);
- dev_info(smu->adev->dev, " .PPCLK_FCLK = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_FCLK]);
- dev_info(smu->adev->dev, " .PPCLK_DCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_0]);
- dev_info(smu->adev->dev, " .PPCLK_VCLK_0 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_0]);
- dev_info(smu->adev->dev, " .PPCLK_DCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_DCLK_1]);
- dev_info(smu->adev->dev, " .PPCLK_VCLK_1 = 0x%x\n", pptable->DcModeMaxFreq[PPCLK_VCLK_1]);
-
- dev_info(smu->adev->dev, "FreqTableUclkDiv\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FreqTableUclkDiv[i]);
-
- dev_info(smu->adev->dev, "FclkBoostFreq = 0x%x\n", pptable->FclkBoostFreq);
- dev_info(smu->adev->dev, "FclkParamPadding = 0x%x\n", pptable->FclkParamPadding);
-
- dev_info(smu->adev->dev, "Mp0clkFreq\n");
- for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0clkFreq[i]);
-
- dev_info(smu->adev->dev, "Mp0DpmVoltage\n");
- for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->Mp0DpmVoltage[i]);
-
- dev_info(smu->adev->dev, "MemVddciVoltage\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemVddciVoltage[i]);
-
- dev_info(smu->adev->dev, "MemMvddVoltage\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->MemMvddVoltage[i]);
-
- dev_info(smu->adev->dev, "GfxclkFgfxoffEntry = 0x%x\n", pptable->GfxclkFgfxoffEntry);
- dev_info(smu->adev->dev, "GfxclkFinit = 0x%x\n", pptable->GfxclkFinit);
- dev_info(smu->adev->dev, "GfxclkFidle = 0x%x\n", pptable->GfxclkFidle);
- dev_info(smu->adev->dev, "GfxclkSource = 0x%x\n", pptable->GfxclkSource);
- dev_info(smu->adev->dev, "GfxclkPadding = 0x%x\n", pptable->GfxclkPadding);
-
- dev_info(smu->adev->dev, "GfxGpoSubFeatureMask = 0x%x\n", pptable->GfxGpoSubFeatureMask);
-
- dev_info(smu->adev->dev, "GfxGpoEnabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoEnabledWorkPolicyMask);
- dev_info(smu->adev->dev, "GfxGpoDisabledWorkPolicyMask = 0x%x\n", pptable->GfxGpoDisabledWorkPolicyMask);
- dev_info(smu->adev->dev, "GfxGpoPadding[0] = 0x%x\n", pptable->GfxGpoPadding[0]);
- dev_info(smu->adev->dev, "GfxGpoVotingAllow = 0x%x\n", pptable->GfxGpoVotingAllow);
- dev_info(smu->adev->dev, "GfxGpoPadding32[0] = 0x%x\n", pptable->GfxGpoPadding32[0]);
- dev_info(smu->adev->dev, "GfxGpoPadding32[1] = 0x%x\n", pptable->GfxGpoPadding32[1]);
- dev_info(smu->adev->dev, "GfxGpoPadding32[2] = 0x%x\n", pptable->GfxGpoPadding32[2]);
- dev_info(smu->adev->dev, "GfxGpoPadding32[3] = 0x%x\n", pptable->GfxGpoPadding32[3]);
- dev_info(smu->adev->dev, "GfxDcsFopt = 0x%x\n", pptable->GfxDcsFopt);
- dev_info(smu->adev->dev, "GfxDcsFclkFopt = 0x%x\n", pptable->GfxDcsFclkFopt);
- dev_info(smu->adev->dev, "GfxDcsUclkFopt = 0x%x\n", pptable->GfxDcsUclkFopt);
-
- dev_info(smu->adev->dev, "DcsGfxOffVoltage = 0x%x\n", pptable->DcsGfxOffVoltage);
- dev_info(smu->adev->dev, "DcsMinGfxOffTime = 0x%x\n", pptable->DcsMinGfxOffTime);
- dev_info(smu->adev->dev, "DcsMaxGfxOffTime = 0x%x\n", pptable->DcsMaxGfxOffTime);
- dev_info(smu->adev->dev, "DcsMinCreditAccum = 0x%x\n", pptable->DcsMinCreditAccum);
- dev_info(smu->adev->dev, "DcsExitHysteresis = 0x%x\n", pptable->DcsExitHysteresis);
- dev_info(smu->adev->dev, "DcsTimeout = 0x%x\n", pptable->DcsTimeout);
-
- dev_info(smu->adev->dev, "DcsParamPadding[0] = 0x%x\n", pptable->DcsParamPadding[0]);
- dev_info(smu->adev->dev, "DcsParamPadding[1] = 0x%x\n", pptable->DcsParamPadding[1]);
- dev_info(smu->adev->dev, "DcsParamPadding[2] = 0x%x\n", pptable->DcsParamPadding[2]);
- dev_info(smu->adev->dev, "DcsParamPadding[3] = 0x%x\n", pptable->DcsParamPadding[3]);
- dev_info(smu->adev->dev, "DcsParamPadding[4] = 0x%x\n", pptable->DcsParamPadding[4]);
-
- dev_info(smu->adev->dev, "FlopsPerByteTable\n");
- for (i = 0; i < RLC_PACE_TABLE_NUM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FlopsPerByteTable[i]);
-
- dev_info(smu->adev->dev, "LowestUclkReservedForUlv = 0x%x\n", pptable->LowestUclkReservedForUlv);
- dev_info(smu->adev->dev, "vddingMem[0] = 0x%x\n", pptable->PaddingMem[0]);
- dev_info(smu->adev->dev, "vddingMem[1] = 0x%x\n", pptable->PaddingMem[1]);
- dev_info(smu->adev->dev, "vddingMem[2] = 0x%x\n", pptable->PaddingMem[2]);
-
- dev_info(smu->adev->dev, "UclkDpmPstates\n");
- for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->UclkDpmPstates[i]);
-
- dev_info(smu->adev->dev, "UclkDpmSrcFreqRange\n");
- dev_info(smu->adev->dev, " .Fmin = 0x%x\n",
- pptable->UclkDpmSrcFreqRange.Fmin);
- dev_info(smu->adev->dev, " .Fmax = 0x%x\n",
- pptable->UclkDpmSrcFreqRange.Fmax);
- dev_info(smu->adev->dev, "UclkDpmTargFreqRange\n");
- dev_info(smu->adev->dev, " .Fmin = 0x%x\n",
- pptable->UclkDpmTargFreqRange.Fmin);
- dev_info(smu->adev->dev, " .Fmax = 0x%x\n",
- pptable->UclkDpmTargFreqRange.Fmax);
- dev_info(smu->adev->dev, "UclkDpmMidstepFreq = 0x%x\n", pptable->UclkDpmMidstepFreq);
- dev_info(smu->adev->dev, "UclkMidstepPadding = 0x%x\n", pptable->UclkMidstepPadding);
-
- dev_info(smu->adev->dev, "PcieGenSpeed\n");
- for (i = 0; i < NUM_LINK_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieGenSpeed[i]);
-
- dev_info(smu->adev->dev, "PcieLaneCount\n");
- for (i = 0; i < NUM_LINK_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->PcieLaneCount[i]);
-
- dev_info(smu->adev->dev, "LclkFreq\n");
- for (i = 0; i < NUM_LINK_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->LclkFreq[i]);
-
- dev_info(smu->adev->dev, "FanStopTemp = 0x%x\n", pptable->FanStopTemp);
- dev_info(smu->adev->dev, "FanStartTemp = 0x%x\n", pptable->FanStartTemp);
-
- dev_info(smu->adev->dev, "FanGain\n");
- for (i = 0; i < TEMP_COUNT; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->FanGain[i]);
-
- dev_info(smu->adev->dev, "FanPwmMin = 0x%x\n", pptable->FanPwmMin);
- dev_info(smu->adev->dev, "FanAcousticLimitRpm = 0x%x\n", pptable->FanAcousticLimitRpm);
- dev_info(smu->adev->dev, "FanThrottlingRpm = 0x%x\n", pptable->FanThrottlingRpm);
- dev_info(smu->adev->dev, "FanMaximumRpm = 0x%x\n", pptable->FanMaximumRpm);
- dev_info(smu->adev->dev, "MGpuFanBoostLimitRpm = 0x%x\n", pptable->MGpuFanBoostLimitRpm);
- dev_info(smu->adev->dev, "FanTargetTemperature = 0x%x\n", pptable->FanTargetTemperature);
- dev_info(smu->adev->dev, "FanTargetGfxclk = 0x%x\n", pptable->FanTargetGfxclk);
- dev_info(smu->adev->dev, "FanPadding16 = 0x%x\n", pptable->FanPadding16);
- dev_info(smu->adev->dev, "FanTempInputSelect = 0x%x\n", pptable->FanTempInputSelect);
- dev_info(smu->adev->dev, "FanPadding = 0x%x\n", pptable->FanPadding);
- dev_info(smu->adev->dev, "FanZeroRpmEnable = 0x%x\n", pptable->FanZeroRpmEnable);
- dev_info(smu->adev->dev, "FanTachEdgePerRev = 0x%x\n", pptable->FanTachEdgePerRev);
-
- dev_info(smu->adev->dev, "FuzzyFan_ErrorSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorSetDelta);
- dev_info(smu->adev->dev, "FuzzyFan_ErrorRateSetDelta = 0x%x\n", pptable->FuzzyFan_ErrorRateSetDelta);
- dev_info(smu->adev->dev, "FuzzyFan_PwmSetDelta = 0x%x\n", pptable->FuzzyFan_PwmSetDelta);
- dev_info(smu->adev->dev, "FuzzyFan_Reserved = 0x%x\n", pptable->FuzzyFan_Reserved);
-
- dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]);
- dev_info(smu->adev->dev, "dBtcGbGfxDfllModelSelect = 0x%x\n", pptable->dBtcGbGfxDfllModelSelect);
- dev_info(smu->adev->dev, "Padding8_Avfs = 0x%x\n", pptable->Padding8_Avfs);
-
- dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qAvfsGb[AVFS_VOLTAGE_GFX].a,
- pptable->qAvfsGb[AVFS_VOLTAGE_GFX].b,
- pptable->qAvfsGb[AVFS_VOLTAGE_GFX].c);
- dev_info(smu->adev->dev, "qAvfsGb[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qAvfsGb[AVFS_VOLTAGE_SOC].a,
- pptable->qAvfsGb[AVFS_VOLTAGE_SOC].b,
- pptable->qAvfsGb[AVFS_VOLTAGE_SOC].c);
- dev_info(smu->adev->dev, "dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbGfxPll.a,
- pptable->dBtcGbGfxPll.b,
- pptable->dBtcGbGfxPll.c);
- dev_info(smu->adev->dev, "dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbGfxDfll.a,
- pptable->dBtcGbGfxDfll.b,
- pptable->dBtcGbGfxDfll.c);
- dev_info(smu->adev->dev, "dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->dBtcGbSoc.a,
- pptable->dBtcGbSoc.b,
- pptable->dBtcGbSoc.c);
- dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n",
- pptable->qAgingGb[AVFS_VOLTAGE_GFX].m,
- pptable->qAgingGb[AVFS_VOLTAGE_GFX].b);
- dev_info(smu->adev->dev, "qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n",
- pptable->qAgingGb[AVFS_VOLTAGE_SOC].m,
- pptable->qAgingGb[AVFS_VOLTAGE_SOC].b);
-
- dev_info(smu->adev->dev, "PiecewiseLinearDroopIntGfxDfll\n");
- for (i = 0; i < NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS; i++) {
- dev_info(smu->adev->dev, " Fset[%d] = 0x%x\n",
- i, pptable->PiecewiseLinearDroopIntGfxDfll.Fset[i]);
- dev_info(smu->adev->dev, " Vdroop[%d] = 0x%x\n",
- i, pptable->PiecewiseLinearDroopIntGfxDfll.Vdroop[i]);
- }
-
- dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c);
- dev_info(smu->adev->dev, "qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b,
- pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c);
-
- dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]);
-
- dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]);
- dev_info(smu->adev->dev, "Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]);
- dev_info(smu->adev->dev, "Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]);
-
- dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]);
- dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]);
-
- dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]);
- dev_info(smu->adev->dev, "DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]);
-
- dev_info(smu->adev->dev, "XgmiDpmPstates\n");
- for (i = 0; i < NUM_XGMI_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiDpmPstates[i]);
- dev_info(smu->adev->dev, "XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]);
- dev_info(smu->adev->dev, "XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]);
-
- dev_info(smu->adev->dev, "DebugOverrides = 0x%x\n", pptable->DebugOverrides);
- dev_info(smu->adev->dev, "ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation0.a,
- pptable->ReservedEquation0.b,
- pptable->ReservedEquation0.c);
- dev_info(smu->adev->dev, "ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation1.a,
- pptable->ReservedEquation1.b,
- pptable->ReservedEquation1.c);
- dev_info(smu->adev->dev, "ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation2.a,
- pptable->ReservedEquation2.b,
- pptable->ReservedEquation2.c);
- dev_info(smu->adev->dev, "ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n",
- pptable->ReservedEquation3.a,
- pptable->ReservedEquation3.b,
- pptable->ReservedEquation3.c);
-
- dev_info(smu->adev->dev, "SkuReserved[0] = 0x%x\n", pptable->SkuReserved[0]);
- dev_info(smu->adev->dev, "SkuReserved[1] = 0x%x\n", pptable->SkuReserved[1]);
- dev_info(smu->adev->dev, "SkuReserved[2] = 0x%x\n", pptable->SkuReserved[2]);
- dev_info(smu->adev->dev, "SkuReserved[3] = 0x%x\n", pptable->SkuReserved[3]);
- dev_info(smu->adev->dev, "SkuReserved[4] = 0x%x\n", pptable->SkuReserved[4]);
- dev_info(smu->adev->dev, "SkuReserved[5] = 0x%x\n", pptable->SkuReserved[5]);
- dev_info(smu->adev->dev, "SkuReserved[6] = 0x%x\n", pptable->SkuReserved[6]);
- dev_info(smu->adev->dev, "SkuReserved[7] = 0x%x\n", pptable->SkuReserved[7]);
-
- dev_info(smu->adev->dev, "GamingClk[0] = 0x%x\n", pptable->GamingClk[0]);
- dev_info(smu->adev->dev, "GamingClk[1] = 0x%x\n", pptable->GamingClk[1]);
- dev_info(smu->adev->dev, "GamingClk[2] = 0x%x\n", pptable->GamingClk[2]);
- dev_info(smu->adev->dev, "GamingClk[3] = 0x%x\n", pptable->GamingClk[3]);
- dev_info(smu->adev->dev, "GamingClk[4] = 0x%x\n", pptable->GamingClk[4]);
- dev_info(smu->adev->dev, "GamingClk[5] = 0x%x\n", pptable->GamingClk[5]);
-
- for (i = 0; i < NUM_I2C_CONTROLLERS; i++) {
- dev_info(smu->adev->dev, "I2cControllers[%d]:\n", i);
- dev_info(smu->adev->dev, " .Enabled = 0x%x\n",
- pptable->I2cControllers[i].Enabled);
- dev_info(smu->adev->dev, " .Speed = 0x%x\n",
- pptable->I2cControllers[i].Speed);
- dev_info(smu->adev->dev, " .SlaveAddress = 0x%x\n",
- pptable->I2cControllers[i].SlaveAddress);
- dev_info(smu->adev->dev, " .ControllerPort = 0x%x\n",
- pptable->I2cControllers[i].ControllerPort);
- dev_info(smu->adev->dev, " .ControllerName = 0x%x\n",
- pptable->I2cControllers[i].ControllerName);
- dev_info(smu->adev->dev, " .ThermalThrottler = 0x%x\n",
- pptable->I2cControllers[i].ThermalThrotter);
- dev_info(smu->adev->dev, " .I2cProtocol = 0x%x\n",
- pptable->I2cControllers[i].I2cProtocol);
- dev_info(smu->adev->dev, " .PaddingConfig = 0x%x\n",
- pptable->I2cControllers[i].PaddingConfig);
- }
-
- dev_info(smu->adev->dev, "GpioScl = 0x%x\n", pptable->GpioScl);
- dev_info(smu->adev->dev, "GpioSda = 0x%x\n", pptable->GpioSda);
- dev_info(smu->adev->dev, "FchUsbPdSlaveAddr = 0x%x\n", pptable->FchUsbPdSlaveAddr);
- dev_info(smu->adev->dev, "I2cSpare[0] = 0x%x\n", pptable->I2cSpare[0]);
-
- dev_info(smu->adev->dev, "Board Parameters:\n");
- dev_info(smu->adev->dev, "VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping);
- dev_info(smu->adev->dev, "VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping);
- dev_info(smu->adev->dev, "VddMem0VrMapping = 0x%x\n", pptable->VddMem0VrMapping);
- dev_info(smu->adev->dev, "VddMem1VrMapping = 0x%x\n", pptable->VddMem1VrMapping);
- dev_info(smu->adev->dev, "GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask);
- dev_info(smu->adev->dev, "SocUlvPhaseSheddingMask = 0x%x\n", pptable->SocUlvPhaseSheddingMask);
- dev_info(smu->adev->dev, "VddciUlvPhaseSheddingMask = 0x%x\n", pptable->VddciUlvPhaseSheddingMask);
- dev_info(smu->adev->dev, "MvddUlvPhaseSheddingMask = 0x%x\n", pptable->MvddUlvPhaseSheddingMask);
-
- dev_info(smu->adev->dev, "GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent);
- dev_info(smu->adev->dev, "GfxOffset = 0x%x\n", pptable->GfxOffset);
- dev_info(smu->adev->dev, "Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx);
-
- dev_info(smu->adev->dev, "SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent);
- dev_info(smu->adev->dev, "SocOffset = 0x%x\n", pptable->SocOffset);
- dev_info(smu->adev->dev, "Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc);
-
- dev_info(smu->adev->dev, "Mem0MaxCurrent = 0x%x\n", pptable->Mem0MaxCurrent);
- dev_info(smu->adev->dev, "Mem0Offset = 0x%x\n", pptable->Mem0Offset);
- dev_info(smu->adev->dev, "Padding_TelemetryMem0 = 0x%x\n", pptable->Padding_TelemetryMem0);
-
- dev_info(smu->adev->dev, "Mem1MaxCurrent = 0x%x\n", pptable->Mem1MaxCurrent);
- dev_info(smu->adev->dev, "Mem1Offset = 0x%x\n", pptable->Mem1Offset);
- dev_info(smu->adev->dev, "Padding_TelemetryMem1 = 0x%x\n", pptable->Padding_TelemetryMem1);
-
- dev_info(smu->adev->dev, "MvddRatio = 0x%x\n", pptable->MvddRatio);
-
- dev_info(smu->adev->dev, "AcDcGpio = 0x%x\n", pptable->AcDcGpio);
- dev_info(smu->adev->dev, "AcDcPolarity = 0x%x\n", pptable->AcDcPolarity);
- dev_info(smu->adev->dev, "VR0HotGpio = 0x%x\n", pptable->VR0HotGpio);
- dev_info(smu->adev->dev, "VR0HotPolarity = 0x%x\n", pptable->VR0HotPolarity);
- dev_info(smu->adev->dev, "VR1HotGpio = 0x%x\n", pptable->VR1HotGpio);
- dev_info(smu->adev->dev, "VR1HotPolarity = 0x%x\n", pptable->VR1HotPolarity);
- dev_info(smu->adev->dev, "GthrGpio = 0x%x\n", pptable->GthrGpio);
- dev_info(smu->adev->dev, "GthrPolarity = 0x%x\n", pptable->GthrPolarity);
- dev_info(smu->adev->dev, "LedPin0 = 0x%x\n", pptable->LedPin0);
- dev_info(smu->adev->dev, "LedPin1 = 0x%x\n", pptable->LedPin1);
- dev_info(smu->adev->dev, "LedPin2 = 0x%x\n", pptable->LedPin2);
- dev_info(smu->adev->dev, "LedEnableMask = 0x%x\n", pptable->LedEnableMask);
- dev_info(smu->adev->dev, "LedPcie = 0x%x\n", pptable->LedPcie);
- dev_info(smu->adev->dev, "LedError = 0x%x\n", pptable->LedError);
- dev_info(smu->adev->dev, "LedSpare1[0] = 0x%x\n", pptable->LedSpare1[0]);
- dev_info(smu->adev->dev, "LedSpare1[1] = 0x%x\n", pptable->LedSpare1[1]);
-
- dev_info(smu->adev->dev, "PllGfxclkSpreadEnabled = 0x%x\n", pptable->PllGfxclkSpreadEnabled);
- dev_info(smu->adev->dev, "PllGfxclkSpreadPercent = 0x%x\n", pptable->PllGfxclkSpreadPercent);
- dev_info(smu->adev->dev, "PllGfxclkSpreadFreq = 0x%x\n", pptable->PllGfxclkSpreadFreq);
-
- dev_info(smu->adev->dev, "DfllGfxclkSpreadEnabled = 0x%x\n", pptable->DfllGfxclkSpreadEnabled);
- dev_info(smu->adev->dev, "DfllGfxclkSpreadPercent = 0x%x\n", pptable->DfllGfxclkSpreadPercent);
- dev_info(smu->adev->dev, "DfllGfxclkSpreadFreq = 0x%x\n", pptable->DfllGfxclkSpreadFreq);
-
- dev_info(smu->adev->dev, "UclkSpreadPadding = 0x%x\n", pptable->UclkSpreadPadding);
- dev_info(smu->adev->dev, "UclkSpreadFreq = 0x%x\n", pptable->UclkSpreadFreq);
-
- dev_info(smu->adev->dev, "FclkSpreadEnabled = 0x%x\n", pptable->FclkSpreadEnabled);
- dev_info(smu->adev->dev, "FclkSpreadPercent = 0x%x\n", pptable->FclkSpreadPercent);
- dev_info(smu->adev->dev, "FclkSpreadFreq = 0x%x\n", pptable->FclkSpreadFreq);
-
- dev_info(smu->adev->dev, "MemoryChannelEnabled = 0x%x\n", pptable->MemoryChannelEnabled);
- dev_info(smu->adev->dev, "DramBitWidth = 0x%x\n", pptable->DramBitWidth);
- dev_info(smu->adev->dev, "PaddingMem1[0] = 0x%x\n", pptable->PaddingMem1[0]);
- dev_info(smu->adev->dev, "PaddingMem1[1] = 0x%x\n", pptable->PaddingMem1[1]);
- dev_info(smu->adev->dev, "PaddingMem1[2] = 0x%x\n", pptable->PaddingMem1[2]);
-
- dev_info(smu->adev->dev, "TotalBoardPower = 0x%x\n", pptable->TotalBoardPower);
- dev_info(smu->adev->dev, "BoardPowerPadding = 0x%x\n", pptable->BoardPowerPadding);
-
- dev_info(smu->adev->dev, "XgmiLinkSpeed\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkSpeed[i]);
- dev_info(smu->adev->dev, "XgmiLinkWidth\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiLinkWidth[i]);
- dev_info(smu->adev->dev, "XgmiFclkFreq\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiFclkFreq[i]);
- dev_info(smu->adev->dev, "XgmiSocVoltage\n");
- for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++)
- dev_info(smu->adev->dev, " .[%d] = 0x%x\n", i, pptable->XgmiSocVoltage[i]);
-
- dev_info(smu->adev->dev, "HsrEnabled = 0x%x\n", pptable->HsrEnabled);
- dev_info(smu->adev->dev, "VddqOffEnabled = 0x%x\n", pptable->VddqOffEnabled);
- dev_info(smu->adev->dev, "PaddingUmcFlags[0] = 0x%x\n", pptable->PaddingUmcFlags[0]);
- dev_info(smu->adev->dev, "PaddingUmcFlags[1] = 0x%x\n", pptable->PaddingUmcFlags[1]);
-
- dev_info(smu->adev->dev, "BoardReserved[0] = 0x%x\n", pptable->BoardReserved[0]);
- dev_info(smu->adev->dev, "BoardReserved[1] = 0x%x\n", pptable->BoardReserved[1]);
- dev_info(smu->adev->dev, "BoardReserved[2] = 0x%x\n", pptable->BoardReserved[2]);
- dev_info(smu->adev->dev, "BoardReserved[3] = 0x%x\n", pptable->BoardReserved[3]);
- dev_info(smu->adev->dev, "BoardReserved[4] = 0x%x\n", pptable->BoardReserved[4]);
- dev_info(smu->adev->dev, "BoardReserved[5] = 0x%x\n", pptable->BoardReserved[5]);
- dev_info(smu->adev->dev, "BoardReserved[6] = 0x%x\n", pptable->BoardReserved[6]);
- dev_info(smu->adev->dev, "BoardReserved[7] = 0x%x\n", pptable->BoardReserved[7]);
- dev_info(smu->adev->dev, "BoardReserved[8] = 0x%x\n", pptable->BoardReserved[8]);
- dev_info(smu->adev->dev, "BoardReserved[9] = 0x%x\n", pptable->BoardReserved[9]);
- dev_info(smu->adev->dev, "BoardReserved[10] = 0x%x\n", pptable->BoardReserved[10]);
-
- dev_info(smu->adev->dev, "MmHubPadding[0] = 0x%x\n", pptable->MmHubPadding[0]);
- dev_info(smu->adev->dev, "MmHubPadding[1] = 0x%x\n", pptable->MmHubPadding[1]);
- dev_info(smu->adev->dev, "MmHubPadding[2] = 0x%x\n", pptable->MmHubPadding[2]);
- dev_info(smu->adev->dev, "MmHubPadding[3] = 0x%x\n", pptable->MmHubPadding[3]);
- dev_info(smu->adev->dev, "MmHubPadding[4] = 0x%x\n", pptable->MmHubPadding[4]);
- dev_info(smu->adev->dev, "MmHubPadding[5] = 0x%x\n", pptable->MmHubPadding[5]);
- dev_info(smu->adev->dev, "MmHubPadding[6] = 0x%x\n", pptable->MmHubPadding[6]);
- dev_info(smu->adev->dev, "MmHubPadding[7] = 0x%x\n", pptable->MmHubPadding[7]);
-}
-
static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, int num_msgs)
{
@@ -4397,7 +3163,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.display_disable_memory_clock_switch = sienna_cichlid_display_disable_memory_clock_switch,
.get_power_limit = sienna_cichlid_get_power_limit,
.update_pcie_parameters = sienna_cichlid_update_pcie_parameters,
- .dump_pptable = sienna_cichlid_dump_pptable,
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
.fini_microcode = smu_v11_0_fini_microcode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 16fcd9dcd202..189c6a32b6bd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -105,7 +105,8 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
@@ -1616,7 +1617,8 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
break;
default:
if (!ras || !adev->ras_enabled ||
- adev->gmc.xgmi.pending_reset) {
+ (adev->init_lvl->level ==
+ AMDGPU_INIT_LEVEL_MINIMAL_XGMI)) {
if (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
IP_VERSION(11, 0, 2)) {
data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT);
@@ -1763,7 +1765,8 @@ failed:
int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t min,
- uint32_t max)
+ uint32_t max,
+ bool automatic)
{
int ret = 0, clk_id = 0;
uint32_t param;
@@ -1778,7 +1781,10 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
return clk_id;
if (max > 0) {
- param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ if (automatic)
+ param = (uint32_t)((clk_id << 16) | 0xffff);
+ else
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
param, NULL);
if (ret)
@@ -1786,7 +1792,10 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
}
if (min > 0) {
- param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ if (automatic)
+ param = (uint32_t)((clk_id << 16) | 0);
+ else
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
param, NULL);
if (ret)
@@ -1854,6 +1863,7 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
uint32_t mclk_min = 0, mclk_max = 0;
uint32_t socclk_min = 0, socclk_max = 0;
int ret = 0;
+ bool auto_level = false;
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
@@ -1873,6 +1883,7 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
mclk_max = mem_table->max;
socclk_min = soc_table->min;
socclk_max = soc_table->max;
+ auto_level = true;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
@@ -1905,13 +1916,15 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
mclk_min = mclk_max = 0;
socclk_min = socclk_max = 0;
+ auto_level = false;
}
if (sclk_min && sclk_max) {
ret = smu_v11_0_set_soft_freq_limited_range(smu,
SMU_GFXCLK,
sclk_min,
- sclk_max);
+ sclk_max,
+ auto_level);
if (ret)
return ret;
}
@@ -1920,7 +1933,8 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
ret = smu_v11_0_set_soft_freq_limited_range(smu,
SMU_MCLK,
mclk_min,
- mclk_max);
+ mclk_max,
+ auto_level);
if (ret)
return ret;
}
@@ -1929,7 +1943,8 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
ret = smu_v11_0_set_soft_freq_limited_range(smu,
SMU_SOCCLK,
socclk_min,
- socclk_max);
+ socclk_max,
+ auto_level);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 22737b11b1bf..a55ea76d7399 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -242,7 +242,9 @@ static int vangogh_tables_init(struct smu_context *smu)
goto err0_out;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = max(sizeof(struct gpu_metrics_v2_3), sizeof(struct gpu_metrics_v2_2));
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
+ smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_3));
+ smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_4));
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table)
goto err1_out;
@@ -459,7 +461,9 @@ static int vangogh_init_smc_tables(struct smu_context *smu)
return smu_v11_0_init_smc_tables(smu);
}
-static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
+static int vangogh_dpm_set_vcn_enable(struct smu_context *smu,
+ bool enable,
+ int inst)
{
int ret = 0;
@@ -1052,48 +1056,34 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu,
return size;
}
-static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+static int vangogh_set_power_profile_mode(struct smu_context *smu,
+ u32 workload_mask,
+ long *custom_params,
+ u32 custom_params_max_idx)
{
- int workload_type, ret;
- uint32_t profile_mode = input[size];
+ u32 backend_workload_mask = 0;
+ int ret;
- if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
- return -EINVAL;
- }
-
- if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
- profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
- return 0;
-
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- profile_mode);
- if (workload_type < 0) {
- dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",
- profile_mode);
- return -EINVAL;
- }
+ smu_cmn_get_backend_workload_mask(smu, workload_mask,
+ &backend_workload_mask);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
- 1 << workload_type,
- NULL);
+ backend_workload_mask,
+ NULL);
if (ret) {
- dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
- workload_type);
+ dev_err_once(smu->adev->dev, "Fail to set workload mask 0x%08x\n",
+ workload_mask);
return ret;
}
- smu->power_profile_mode = profile_mode;
-
- return 0;
+ return ret;
}
static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max,
+ bool automatic)
{
int ret = 0;
@@ -1299,7 +1289,7 @@ static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest)
return ret;
force_freq = highest ? max_freq : min_freq;
- ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
+ ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq, false);
if (ret)
return ret;
}
@@ -1335,7 +1325,7 @@ static int vangogh_unforce_dpm_levels(struct smu_context *smu)
if (ret)
return ret;
- ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return ret;
@@ -1354,7 +1344,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq);
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq, false);
if (ret)
return ret;
@@ -1362,7 +1352,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq);
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq, false);
if (ret)
return ret;
@@ -1370,7 +1360,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq);
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq, false);
if (ret)
return ret;
@@ -1378,7 +1368,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq);
+ ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq, false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index cc0504b063fa..37d82a71a2d7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -645,7 +645,9 @@ static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context
return pm_type;
}
-static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
+static int renoir_dpm_set_vcn_enable(struct smu_context *smu,
+ bool enable,
+ int inst)
{
int ret = 0;
@@ -707,7 +709,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
return ret;
force_freq = highest ? max_freq : min_freq;
- ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
+ ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq, false);
if (ret)
return ret;
}
@@ -740,7 +742,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) {
if (ret)
return ret;
- ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return ret;
}
@@ -862,44 +864,27 @@ static int renoir_force_clk_levels(struct smu_context *smu,
return ret;
}
-static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+static int renoir_set_power_profile_mode(struct smu_context *smu,
+ u32 workload_mask,
+ long *custom_params,
+ u32 custom_params_max_idx)
{
- int workload_type, ret;
- uint32_t profile_mode = input[size];
+ int ret;
+ u32 backend_workload_mask = 0;
- if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
- return -EINVAL;
- }
-
- if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
- profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
- return 0;
-
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- profile_mode);
- if (workload_type < 0) {
- /*
- * TODO: If some case need switch to powersave/default power mode
- * then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving.
- */
- dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on RENOIR\n", profile_mode);
- return -EINVAL;
- }
+ smu_cmn_get_backend_workload_mask(smu, workload_mask,
+ &backend_workload_mask);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
- 1 << workload_type,
- NULL);
+ backend_workload_mask,
+ NULL);
if (ret) {
- dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
+ dev_err_once(smu->adev->dev, "Failed to set workload mask 0x08%x\n",
+ workload_mask);
return ret;
}
- smu->power_profile_mode = profile_mode;
-
- return 0;
+ return ret;
}
static int renoir_set_peak_clock_by_device(struct smu_context *smu)
@@ -911,7 +896,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
if (ret)
return ret;
@@ -919,7 +904,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
if (ret)
return ret;
@@ -961,13 +946,13 @@ static int renior_set_dpm_profile_freq(struct smu_context *smu,
}
if (sclk)
- ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk, sclk);
+ ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk, sclk, false);
if (socclk)
- ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk, socclk);
+ ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk, socclk, false);
if (fclk)
- ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk, fclk);
+ ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk, fclk, false);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index ed15f5a0fd11..3d3cd546f0ad 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -211,7 +211,7 @@ int smu_v12_0_mode2_reset(struct smu_context *smu)
}
int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max)
+ uint32_t min, uint32_t max, bool automatic)
{
int ret = 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 2c35eb31475a..83163d7c7f00 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1297,9 +1297,10 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
}
static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max,
+ bool automatic)
{
struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
@@ -1328,7 +1329,7 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
return 0;
ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK,
- min, max);
+ min, max, false);
if (!ret) {
pstate_table->gfxclk_pstate.curr.min = min;
pstate_table->gfxclk_pstate.curr.max = max;
@@ -1348,7 +1349,7 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
/* Restore default min/max clocks and enable determinism */
min_clk = dpm_context->dpm_tables.gfx_table.min;
max_clk = dpm_context->dpm_tables.gfx_table.max;
- ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
+ ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false);
if (!ret) {
usleep_range(500, 1000);
ret = smu_cmn_send_smc_msg_with_param(smu,
@@ -1422,7 +1423,7 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_
min_clk = dpm_context->dpm_tables.gfx_table.min;
max_clk = dpm_context->dpm_tables.gfx_table.max;
- return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
+ return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false);
}
break;
case PP_OD_COMMIT_DPM_TABLE:
@@ -1441,7 +1442,7 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_
min_clk = pstate_table->gfxclk_pstate.custom.min;
max_clk = pstate_table->gfxclk_pstate.custom.max;
- return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
+ return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk, false);
}
break;
default:
@@ -1731,7 +1732,6 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
- gpu_metrics->average_mm_activity = 0;
/* Valid power data is available only from primary die */
if (aldebaran_is_primary(smu)) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index e17466cc1952..fbbdfa54f6a2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -103,7 +103,8 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
@@ -1320,11 +1321,11 @@ static int smu_v13_0_set_irq_state(struct amdgpu_device *adev,
return 0;
}
-static int smu_v13_0_ack_ac_dc_interrupt(struct smu_context *smu)
+void smu_v13_0_interrupt_work(struct smu_context *smu)
{
- return smu_cmn_send_smc_msg(smu,
- SMU_MSG_ReenableAcDcInterrupt,
- NULL);
+ smu_cmn_send_smc_msg(smu,
+ SMU_MSG_ReenableAcDcInterrupt,
+ NULL);
}
#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
@@ -1377,12 +1378,12 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
switch (ctxid) {
case SMU_IH_INTERRUPT_CONTEXT_ID_AC:
dev_dbg(adev->dev, "Switched to AC mode!\n");
- smu_v13_0_ack_ac_dc_interrupt(smu);
+ schedule_work(&smu->interrupt_work);
adev->pm.ac_power = true;
break;
case SMU_IH_INTERRUPT_CONTEXT_ID_DC:
dev_dbg(adev->dev, "Switched to DC mode!\n");
- smu_v13_0_ack_ac_dc_interrupt(smu);
+ schedule_work(&smu->interrupt_work);
adev->pm.ac_power = false;
break;
case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
@@ -1608,7 +1609,8 @@ failed:
int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t min,
- uint32_t max)
+ uint32_t max,
+ bool automatic)
{
int ret = 0, clk_id = 0;
uint32_t param;
@@ -1623,7 +1625,10 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
return clk_id;
if (max > 0) {
- param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ if (automatic)
+ param = (uint32_t)((clk_id << 16) | 0xffff);
+ else
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
param, NULL);
if (ret)
@@ -1631,7 +1636,10 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
}
if (min > 0) {
- param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ if (automatic)
+ param = (uint32_t)((clk_id << 16) | 0);
+ else
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
param, NULL);
if (ret)
@@ -1708,6 +1716,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,
uint32_t dclk_min = 0, dclk_max = 0;
uint32_t fclk_min = 0, fclk_max = 0;
int ret = 0, i;
+ bool auto_level = false;
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
@@ -1739,6 +1748,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,
dclk_max = dclk_table->max;
fclk_min = fclk_table->min;
fclk_max = fclk_table->max;
+ auto_level = true;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
@@ -1780,13 +1790,15 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,
vclk_min = vclk_max = 0;
dclk_min = dclk_max = 0;
fclk_min = fclk_max = 0;
+ auto_level = false;
}
if (sclk_min && sclk_max) {
ret = smu_v13_0_set_soft_freq_limited_range(smu,
SMU_GFXCLK,
sclk_min,
- sclk_max);
+ sclk_max,
+ auto_level);
if (ret)
return ret;
@@ -1798,7 +1810,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,
ret = smu_v13_0_set_soft_freq_limited_range(smu,
SMU_MCLK,
mclk_min,
- mclk_max);
+ mclk_max,
+ auto_level);
if (ret)
return ret;
@@ -1810,7 +1823,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,
ret = smu_v13_0_set_soft_freq_limited_range(smu,
SMU_SOCCLK,
socclk_min,
- socclk_max);
+ socclk_max,
+ auto_level);
if (ret)
return ret;
@@ -1825,7 +1839,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,
ret = smu_v13_0_set_soft_freq_limited_range(smu,
i ? SMU_VCLK1 : SMU_VCLK,
vclk_min,
- vclk_max);
+ vclk_max,
+ auto_level);
if (ret)
return ret;
}
@@ -1840,7 +1855,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,
ret = smu_v13_0_set_soft_freq_limited_range(smu,
i ? SMU_DCLK1 : SMU_DCLK,
dclk_min,
- dclk_max);
+ dclk_max,
+ auto_level);
if (ret)
return ret;
}
@@ -1852,7 +1868,8 @@ int smu_v13_0_set_performance_level(struct smu_context *smu,
ret = smu_v13_0_set_soft_freq_limited_range(smu,
SMU_FCLK,
fclk_min,
- fclk_max);
+ fclk_max,
+ auto_level);
if (ret)
return ret;
@@ -2088,21 +2105,18 @@ int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu)
}
int smu_v13_0_set_vcn_enable(struct smu_context *smu,
- bool enable)
+ bool enable,
+ int inst)
{
struct amdgpu_device *adev = smu->adev;
- int i, ret = 0;
+ int ret = 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->vcn.harvest_config & (1 << inst))
+ return ret;
- ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
- i << 16U, NULL);
- if (ret)
- return ret;
- }
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
+ inst << 16U, NULL);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 1d024b122b0c..0551a3311217 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -107,6 +107,8 @@
#define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8
#define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9
#define PP_OD_FEATURE_FAN_MINIMUM_PWM 10
+#define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11
+#define PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP 12
#define LINK_SPEED_MAX 3
@@ -736,19 +738,6 @@ static bool smu_v13_0_0_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
-static void smu_v13_0_0_dump_pptable(struct smu_context *smu)
-{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_t *pptable = table_context->driver_pptable;
- SkuTable_t *skutable = &pptable->SkuTable;
-
- dev_info(smu->adev->dev, "Dumped PPTable:\n");
-
- dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version);
- dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]);
- dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]);
-}
-
static int smu_v13_0_0_system_features_control(struct smu_context *smu,
bool en)
{
@@ -1143,6 +1132,14 @@ static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu,
od_min_setting = overdrive_lowerlimits->FanMinimumPwm;
od_max_setting = overdrive_upperlimits->FanMinimumPwm;
break;
+ case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE:
+ od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable;
+ od_max_setting = overdrive_upperlimits->FanZeroRpmEnable;
+ break;
+ case PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP:
+ od_min_setting = overdrive_lowerlimits->FanZeroRpmStopTemp;
+ od_max_setting = overdrive_upperlimits->FanZeroRpmStopTemp;
+ break;
default:
od_min_setting = od_max_setting = INT_MAX;
break;
@@ -1463,6 +1460,42 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
min_value, max_value);
break;
+ case SMU_OD_FAN_ZERO_RPM_ENABLE:
+ if (!smu_v13_0_0_is_od_feature_supported(smu,
+ PP_OD_FEATURE_ZERO_FAN_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.FanZeroRpmEnable);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_FAN_ZERO_RPM_STOP_TEMP:
+ if (!smu_v13_0_0_is_od_feature_supported(smu,
+ PP_OD_FEATURE_ZERO_FAN_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_STOP_TEMPERATURE:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.FanZeroRpmStopTemp);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "ZERO_RPM_STOP_TEMPERATURE: %u %u\n",
+ min_value, max_value);
+ break;
+
case SMU_OD_RANGE:
if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) &&
!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) &&
@@ -1560,6 +1593,16 @@ static int smu_v13_0_0_od_restore_table_single(struct smu_context *smu, long inp
od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
break;
+ case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:
+ od_table->OverDriveTable.FanZeroRpmEnable =
+ boot_overdrive_table->OverDriveTable.FanZeroRpmEnable;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
+ break;
+ case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP:
+ od_table->OverDriveTable.FanZeroRpmStopTemp =
+ boot_overdrive_table->OverDriveTable.FanZeroRpmStopTemp;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
+ break;
default:
dev_info(adev->dev, "Invalid table index: %ld\n", input);
return -EINVAL;
@@ -1853,6 +1896,48 @@ static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu,
od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
break;
+ case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:
+ if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) {
+ dev_warn(adev->dev, "Zero RPM setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanZeroRpmEnable = input[0];
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP:
+ if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) {
+ dev_warn(adev->dev, "Zero RPM setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_0_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "zero RPM stop temperature setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanZeroRpmStopTemp = input[0];
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
+ break;
+
case PP_OD_RESTORE_DEFAULT_TABLE:
if (size == 1) {
ret = smu_v13_0_0_od_restore_table_single(smu, input[0]);
@@ -1975,7 +2060,8 @@ static int smu_v13_0_0_force_clk_levels(struct smu_context *smu,
ret = smu_v13_0_set_soft_freq_limited_range(smu,
clk_type,
min_freq,
- max_freq);
+ max_freq,
+ false);
break;
case SMU_DCEFCLK:
case SMU_PCIE:
@@ -2122,7 +2208,11 @@ static void smu_v13_0_0_set_supported_od_feature_mask(struct smu_context *smu)
OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE |
OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET |
OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE |
- OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET;
+ OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET |
+ OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET |
+ OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET;
}
static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu)
@@ -2188,6 +2278,10 @@ static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu)
user_od_table_bak.OverDriveTable.FanTargetTemperature;
user_od_table->OverDriveTable.FanMinimumPwm =
user_od_table_bak.OverDriveTable.FanMinimumPwm;
+ user_od_table->OverDriveTable.FanZeroRpmEnable =
+ user_od_table_bak.OverDriveTable.FanZeroRpmEnable;
+ user_od_table->OverDriveTable.FanZeroRpmStopTemp =
+ user_od_table_bak.OverDriveTable.FanZeroRpmStopTemp;
}
smu_v13_0_0_set_supported_od_feature_mask(smu);
@@ -2477,104 +2571,130 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu,
return size;
}
-static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
- long *input,
- uint32_t size)
+#define SMU_13_0_0_CUSTOM_PARAMS_COUNT 9
+#define SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT 2
+#define SMU_13_0_0_CUSTOM_PARAMS_SIZE (SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_0_CUSTOM_PARAMS_COUNT * sizeof(long))
+
+static int smu_v13_0_0_set_power_profile_mode_coeff(struct smu_context *smu,
+ long *input)
{
DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
- int workload_type, ret = 0;
- u32 workload_mask;
-
- smu->power_profile_mode = input[size];
+ int ret, idx;
- if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
- return -EINVAL;
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external),
+ false);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+ return ret;
}
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size != 9)
- return -EINVAL;
-
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
- WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor_external),
- false);
- if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
- return ret;
- }
-
- switch (input[0]) {
- case 0: /* Gfxclk */
- activity_monitor->Gfx_FPS = input[1];
- activity_monitor->Gfx_MinActiveFreqType = input[2];
- activity_monitor->Gfx_MinActiveFreq = input[3];
- activity_monitor->Gfx_BoosterFreqType = input[4];
- activity_monitor->Gfx_BoosterFreq = input[5];
- activity_monitor->Gfx_PD_Data_limit_c = input[6];
- activity_monitor->Gfx_PD_Data_error_coeff = input[7];
- activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
- break;
- case 1: /* Fclk */
- activity_monitor->Fclk_FPS = input[1];
- activity_monitor->Fclk_MinActiveFreqType = input[2];
- activity_monitor->Fclk_MinActiveFreq = input[3];
- activity_monitor->Fclk_BoosterFreqType = input[4];
- activity_monitor->Fclk_BoosterFreq = input[5];
- activity_monitor->Fclk_PD_Data_limit_c = input[6];
- activity_monitor->Fclk_PD_Data_error_coeff = input[7];
- activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
- break;
- default:
- return -EINVAL;
- }
+ idx = 0 * SMU_13_0_0_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Gfxclk */
+ activity_monitor->Gfx_FPS = input[idx + 1];
+ activity_monitor->Gfx_MinActiveFreqType = input[idx + 2];
+ activity_monitor->Gfx_MinActiveFreq = input[idx + 3];
+ activity_monitor->Gfx_BoosterFreqType = input[idx + 4];
+ activity_monitor->Gfx_BoosterFreq = input[idx + 5];
+ activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6];
+ activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7];
+ activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8];
+ }
+ idx = 1 * SMU_13_0_0_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Fclk */
+ activity_monitor->Fclk_FPS = input[idx + 1];
+ activity_monitor->Fclk_MinActiveFreqType = input[idx + 2];
+ activity_monitor->Fclk_MinActiveFreq = input[idx + 3];
+ activity_monitor->Fclk_BoosterFreqType = input[idx + 4];
+ activity_monitor->Fclk_BoosterFreq = input[idx + 5];
+ activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6];
+ activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7];
+ activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8];
+ }
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
- WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor_external),
- true);
- if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
- return ret;
- }
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external),
+ true);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ return ret;
}
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- smu->power_profile_mode);
+ return ret;
+}
- if (workload_type < 0)
- return -EINVAL;
+static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
+ u32 workload_mask,
+ long *custom_params,
+ u32 custom_params_max_idx)
+{
+ u32 backend_workload_mask = 0;
+ int workload_type, ret, idx = -1, i;
- workload_mask = 1 << workload_type;
+ smu_cmn_get_backend_workload_mask(smu, workload_mask,
+ &backend_workload_mask);
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
- if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
- ((smu->adev->pm.fw_version == 0x004e6601) ||
- (smu->adev->pm.fw_version >= 0x004e7300))) ||
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
- smu->adev->pm.fw_version >= 0x00504500)) {
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- PP_SMC_POWER_PROFILE_POWERSAVING);
- if (workload_type >= 0)
- workload_mask |= 1 << workload_type;
+ if ((workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE)) &&
+ ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
+ ((smu->adev->pm.fw_version == 0x004e6601) ||
+ (smu->adev->pm.fw_version >= 0x004e7300))) ||
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+ smu->adev->pm.fw_version >= 0x00504500))) {
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ PP_SMC_POWER_PROFILE_POWERSAVING);
+ if (workload_type >= 0)
+ backend_workload_mask |= 1 << workload_type;
+ }
+
+ if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
+ if (!smu->custom_profile_params) {
+ smu->custom_profile_params =
+ kzalloc(SMU_13_0_0_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
+ if (!smu->custom_profile_params)
+ return -ENOMEM;
}
+ if (custom_params && custom_params_max_idx) {
+ if (custom_params_max_idx != SMU_13_0_0_CUSTOM_PARAMS_COUNT)
+ return -EINVAL;
+ if (custom_params[0] >= SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT)
+ return -EINVAL;
+ idx = custom_params[0] * SMU_13_0_0_CUSTOM_PARAMS_COUNT;
+ smu->custom_profile_params[idx] = 1;
+ for (i = 1; i < custom_params_max_idx; i++)
+ smu->custom_profile_params[idx + i] = custom_params[i];
+ }
+ ret = smu_v13_0_0_set_power_profile_mode_coeff(smu,
+ smu->custom_profile_params);
+ if (ret) {
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
+ return ret;
+ }
+ } else if (smu->custom_profile_params) {
+ memset(smu->custom_profile_params, 0, SMU_13_0_0_CUSTOM_PARAMS_SIZE);
}
ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetWorkloadMask,
- workload_mask,
- NULL);
- if (!ret)
- smu->workload_mask = workload_mask;
+ SMU_MSG_SetWorkloadMask,
+ backend_workload_mask,
+ NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
+ workload_mask);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
+ return ret;
+ }
return ret;
}
@@ -3026,7 +3146,6 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.i2c_init = smu_v13_0_0_i2c_control_init,
.i2c_fini = smu_v13_0_0_i2c_control_fini,
.is_dpm_running = smu_v13_0_0_is_dpm_running,
- .dump_pptable = smu_v13_0_0_dump_pptable,
.init_microcode = smu_v13_0_init_microcode,
.load_microcode = smu_v13_0_load_microcode,
.fini_microcode = smu_v13_0_fini_microcode,
@@ -3101,6 +3220,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.is_asic_wbrf_supported = smu_v13_0_0_wbrf_support_check,
.enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
.set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
+ .interrupt_work = smu_v13_0_interrupt_work,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index 9c2c43bfed0b..f5db181ef489 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -193,7 +193,9 @@ static int smu_v13_0_5_system_features_control(struct smu_context *smu, bool en)
return ret;
}
-static int smu_v13_0_5_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
+static int smu_v13_0_5_dpm_set_vcn_enable(struct smu_context *smu,
+ bool enable,
+ int inst)
{
int ret = 0;
@@ -811,9 +813,10 @@ failed:
}
static int smu_v13_0_5_set_soft_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max,
+ bool automatic)
{
enum smu_message_type msg_set_min, msg_set_max;
uint32_t min_clk = min;
@@ -950,7 +953,7 @@ static int smu_v13_0_5_force_clk_levels(struct smu_context *smu,
if (ret)
goto force_level_out;
- ret = smu_v13_0_5_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_v13_0_5_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
goto force_level_out;
break;
@@ -1046,9 +1049,10 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu,
if (sclk_min && sclk_max) {
ret = smu_v13_0_5_set_soft_freq_limited_range(smu,
- SMU_SCLK,
- sclk_min,
- sclk_max);
+ SMU_SCLK,
+ sclk_min,
+ sclk_max,
+ false);
if (ret)
return ret;
@@ -1060,7 +1064,8 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu,
ret = smu_v13_0_5_set_soft_freq_limited_range(smu,
SMU_VCLK,
vclk_min,
- vclk_max);
+ vclk_max,
+ false);
if (ret)
return ret;
}
@@ -1069,7 +1074,8 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu,
ret = smu_v13_0_5_set_soft_freq_limited_range(smu,
SMU_DCLK,
dclk_min,
- dclk_max);
+ dclk_max,
+ false);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 55ed6247eb61..da7bd9227afe 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -96,12 +96,30 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin");
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5
#define LINK_SPEED_MAX 4
-
#define SMU_13_0_6_DSCLK_THRESHOLD 140
#define MCA_BANK_IPID(_ip, _hwid, _type) \
[AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, }
+#define SMU_CAP(x) SMU_13_0_6_CAPS_##x
+
+enum smu_v13_0_6_caps {
+ SMU_CAP(DPM),
+ SMU_CAP(UNI_METRICS),
+ SMU_CAP(DPM_POLICY),
+ SMU_CAP(OTHER_END_METRICS),
+ SMU_CAP(SET_UCLK_MAX),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(HST_LIMIT_METRICS),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(PER_INST_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND),
+ SMU_CAP(SDMA_RESET),
+ SMU_CAP(ALL),
+};
+
struct mca_bank_ipid {
enum amdgpu_mca_ip ip;
uint16_t hwid;
@@ -176,6 +194,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0),
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
MSG_MAP(SelectPstatePolicy, PPSMC_MSG_SelectPstatePolicy, 0),
+ MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
};
// clang-format on
@@ -253,7 +272,7 @@ struct PPTable_t {
#define SMUQ10_TO_UINT(x) ((x) >> 10)
#define SMUQ10_FRAC(x) ((x) & 0x3ff)
#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
-#define GET_METRIC_FIELD(field) ((adev->flags & AMD_IS_APU) ?\
+#define GET_METRIC_FIELD(field, flag) ((flag) ?\
(metrics_a->field) : (metrics_x->field))
struct smu_v13_0_6_dpm_map {
@@ -263,6 +282,162 @@ struct smu_v13_0_6_dpm_map {
uint32_t *freq_table;
};
+static inline void smu_v13_0_6_cap_set(struct smu_context *smu,
+ enum smu_v13_0_6_caps cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ dpm_context->caps |= BIT_ULL(cap);
+}
+
+static inline void smu_v13_0_6_cap_clear(struct smu_context *smu,
+ enum smu_v13_0_6_caps cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ dpm_context->caps &= ~BIT_ULL(cap);
+}
+
+static inline bool smu_v13_0_6_cap_supported(struct smu_context *smu,
+ enum smu_v13_0_6_caps cap)
+{
+ struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+ return !!(dpm_context->caps & BIT_ULL(cap));
+}
+
+static void smu_v13_0_14_init_caps(struct smu_context *smu)
+{
+ enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
+ SMU_CAP(UNI_METRICS),
+ SMU_CAP(SET_UCLK_MAX),
+ SMU_CAP(DPM_POLICY),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND) };
+ uint32_t fw_ver = smu->smc_fw_version;
+
+ for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
+ smu_v13_0_6_cap_set(smu, default_cap_list[i]);
+
+ if (fw_ver >= 0x05550E00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS));
+ if (fw_ver >= 0x05551000)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+ if (fw_ver >= 0x05550B00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
+ if (fw_ver >= 0x5551200)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+}
+
+static void smu_v13_0_12_init_caps(struct smu_context *smu)
+{
+ enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
+ SMU_CAP(UNI_METRICS),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND) };
+ uint32_t fw_ver = smu->smc_fw_version;
+
+ for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
+ smu_v13_0_6_cap_set(smu, default_cap_list[i]);
+
+ if (fw_ver < 0x00561900)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM));
+
+ if (fw_ver >= 0x00561700)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+}
+
+static void smu_v13_0_6_init_caps(struct smu_context *smu)
+{
+ enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
+ SMU_CAP(UNI_METRICS),
+ SMU_CAP(SET_UCLK_MAX),
+ SMU_CAP(DPM_POLICY),
+ SMU_CAP(PCIE_METRICS),
+ SMU_CAP(CTF_LIMIT),
+ SMU_CAP(MCA_DEBUG_MODE),
+ SMU_CAP(RMA_MSG),
+ SMU_CAP(ACA_SYND) };
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t fw_ver = smu->smc_fw_version;
+ uint32_t pgm = (fw_ver >> 24) & 0xFF;
+
+ for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
+ smu_v13_0_6_cap_set(smu, default_cap_list[i]);
+
+ if (fw_ver < 0x552F00)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM));
+ if (fw_ver < 0x554500)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(CTF_LIMIT));
+
+ if (adev->flags & AMD_IS_APU) {
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(PCIE_METRICS));
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM_POLICY));
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
+
+ if (fw_ver <= 0x4556900)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(UNI_METRICS));
+ if (fw_ver >= 0x04556F00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+ if (fw_ver >= 0x04556A00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
+ } else {
+ if (fw_ver >= 0x557600)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS));
+ if (fw_ver < 0x00556000)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM_POLICY));
+ if (amdgpu_sriov_vf(adev) && (fw_ver < 0x556600))
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(SET_UCLK_MAX));
+ if (fw_ver < 0x556300)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(PCIE_METRICS));
+ if (fw_ver < 0x554800)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(MCA_DEBUG_MODE));
+ if (fw_ver >= 0x556F00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
+ if (fw_ver < 0x00555a00)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
+ if (fw_ver < 0x00555600)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
+ if (pgm == 0 && fw_ver >= 0x557900)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
+ }
+ if (((pgm == 7) && (fw_ver >= 0x7550700)) ||
+ ((pgm == 0) && (fw_ver >= 0x00557900)) ||
+ ((pgm == 4) && (fw_ver >= 0x4557000)))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+}
+
+static void smu_v13_0_x_init_caps(struct smu_context *smu)
+{
+ switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 12):
+ return smu_v13_0_12_init_caps(smu);
+ case IP_VERSION(13, 0, 14):
+ return smu_v13_0_14_init_caps(smu);
+ default:
+ return smu_v13_0_6_init_caps(smu);
+ }
+}
+
+static int smu_v13_0_6_check_fw_version(struct smu_context *smu)
+{
+ int r;
+
+ r = smu_v13_0_check_fw_version(smu);
+ /* Initialize caps flags once fw version is fetched */
+ if (!r)
+ smu_v13_0_x_init_caps(smu);
+
+ return r;
+}
+
static int smu_v13_0_6_init_microcode(struct smu_context *smu)
{
const struct smc_firmware_header_v2_1 *v2_1;
@@ -287,7 +462,8 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu)
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix,
sizeof(ucode_prefix));
- ret = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
+ ret = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (ret)
goto out;
@@ -352,7 +528,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
return -ENOMEM;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_5);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_7);
smu_table->gpu_metrics_table =
kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table) {
@@ -583,7 +759,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
- struct amdgpu_device *adev = smu->adev;
+ bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS));
int ret, i, retry = 100;
uint32_t table_version;
@@ -595,7 +771,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
return ret;
/* Ensure that metrics have been updated */
- if (GET_METRIC_FIELD(AccumulationCounter))
+ if (GET_METRIC_FIELD(AccumulationCounter, flag))
break;
usleep_range(1000, 1100);
@@ -612,29 +788,29 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
table_version;
pptable->MaxSocketPowerLimit =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit, flag));
pptable->MaxGfxclkFrequency =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, flag));
pptable->MinGfxclkFrequency =
- SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, flag));
for (i = 0; i < 4; ++i) {
pptable->FclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable, flag)[i]);
pptable->UclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable, flag)[i]);
pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND(
- GET_METRIC_FIELD(SocclkFrequencyTable)[i]);
+ GET_METRIC_FIELD(SocclkFrequencyTable, flag)[i]);
pptable->VclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable, flag)[i]);
pptable->DclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable, flag)[i]);
pptable->LclkFrequencyTable[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable, flag)[i]);
}
/* use AID0 serial number by default */
- pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID)[0];
+ pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID, flag)[0];
pptable->Init = true;
}
@@ -779,8 +955,7 @@ static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu)
smu_v13_0_6_setup_driver_pptable(smu);
/* DPM policy not supported in older firmwares */
- if (!(smu->adev->flags & AMD_IS_APU) &&
- (smu->smc_fw_version < 0x00556000)) {
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM_POLICY))) {
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
smu_dpm->dpm_policies->policy_mask &=
@@ -957,6 +1132,7 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
struct smu_table_context *smu_table = &smu->smu_table;
MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
+ bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS));
struct amdgpu_device *adev = smu->adev;
int ret = 0;
int xcc_id;
@@ -969,52 +1145,52 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
switch (member) {
case METRICS_CURR_GFXCLK:
case METRICS_AVERAGE_GFXCLK:
- if (smu->smc_fw_version >= 0x552F00) {
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) {
xcc_id = GET_INST(GC, 0);
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]);
} else {
*value = 0;
}
break;
case METRICS_CURR_SOCCLK:
case METRICS_AVERAGE_SOCCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, flag)[0]);
break;
case METRICS_CURR_UCLK:
case METRICS_AVERAGE_UCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, flag));
break;
case METRICS_CURR_VCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, flag)[0]);
break;
case METRICS_CURR_DCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, flag)[0]);
break;
case METRICS_CURR_FCLK:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency, flag));
break;
case METRICS_AVERAGE_GFXACTIVITY:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, flag));
break;
case METRICS_AVERAGE_MEMACTIVITY:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization));
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag));
break;
case METRICS_CURR_SOCKETPOWER:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)) << 8;
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag)) << 8;
break;
case METRICS_TEMPERATURE_HOTSPOT:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, flag)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_MEM:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, flag)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
/* This is the max of all VRs and not just SOC VR.
* No need to define another data type for the same.
*/
case METRICS_TEMPERATURE_VRSOC:
- *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, flag)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
default:
@@ -1656,7 +1832,7 @@ static int smu_v13_0_6_notify_unload(struct smu_context *smu)
static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable)
{
/* NOTE: this ClearMcaOnRead message is only supported for smu version 85.72.0 or higher */
- if (smu->smc_fw_version < 0x554800)
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(MCA_DEBUG_MODE)))
return 0;
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead,
@@ -1739,7 +1915,7 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
if (uclk_table->max != pstate_table->uclk_pstate.curr.max) {
/* Min UCLK is not expected to be changed */
ret = smu_v13_0_set_soft_freq_limited_range(
- smu, SMU_UCLK, 0, uclk_table->max);
+ smu, SMU_UCLK, 0, uclk_table->max, false);
if (ret)
return ret;
pstate_table->uclk_pstate.curr.max = uclk_table->max;
@@ -1758,7 +1934,8 @@ static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
- uint32_t min, uint32_t max)
+ uint32_t min, uint32_t max,
+ bool automatic)
{
struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
@@ -1800,13 +1977,12 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
if (max == pstate_table->uclk_pstate.curr.max)
return 0;
/* For VF, only allowed in FW versions 85.102 or greater */
- if (amdgpu_sriov_vf(adev) &&
- ((smu->smc_fw_version < 0x556600) ||
- (adev->flags & AMD_IS_APU)))
+ if (!smu_v13_0_6_cap_supported(smu,
+ SMU_CAP(SET_UCLK_MAX)))
return -EOPNOTSUPP;
/* Only max clock limiting is allowed for UCLK */
ret = smu_v13_0_set_soft_freq_limited_range(
- smu, SMU_UCLK, 0, max);
+ smu, SMU_UCLK, 0, max, false);
if (!ret)
pstate_table->uclk_pstate.curr.max = max;
}
@@ -1946,7 +2122,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
max_clk = dpm_context->dpm_tables.gfx_table.max;
ret = smu_v13_0_6_set_soft_freq_limited_range(
- smu, SMU_GFXCLK, min_clk, max_clk);
+ smu, SMU_GFXCLK, min_clk, max_clk, false);
if (ret)
return ret;
@@ -1954,7 +2130,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
min_clk = dpm_context->dpm_tables.uclk_table.min;
max_clk = dpm_context->dpm_tables.uclk_table.max;
ret = smu_v13_0_6_set_soft_freq_limited_range(
- smu, SMU_UCLK, min_clk, max_clk);
+ smu, SMU_UCLK, min_clk, max_clk, false);
if (ret)
return ret;
pstate_table->uclk_pstate.custom.max = 0;
@@ -1978,7 +2154,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
max_clk = pstate_table->gfxclk_pstate.custom.max;
ret = smu_v13_0_6_set_soft_freq_limited_range(
- smu, SMU_GFXCLK, min_clk, max_clk);
+ smu, SMU_GFXCLK, min_clk, max_clk, false);
if (ret)
return ret;
@@ -1989,7 +2165,7 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
min_clk = pstate_table->uclk_pstate.curr.min;
max_clk = pstate_table->uclk_pstate.custom.max;
return smu_v13_0_6_set_soft_freq_limited_range(
- smu, SMU_UCLK, min_clk, max_clk);
+ smu, SMU_UCLK, min_clk, max_clk, false);
}
break;
default:
@@ -2006,7 +2182,7 @@ static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu,
ret = smu_cmn_get_enabled_mask(smu, feature_mask);
- if (ret == -EIO && smu->smc_fw_version < 0x552F00) {
+ if (ret == -EIO && !smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) {
*feature_mask = 0;
ret = 0;
}
@@ -2300,13 +2476,17 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_5 *gpu_metrics =
- (struct gpu_metrics_v1_5 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_7 *gpu_metrics =
+ (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
+ bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS));
+ int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
- int ret = 0, xcc_id, inst, i, j;
MetricsTableX_t *metrics_x;
MetricsTableA_t *metrics_a;
+ struct amdgpu_xcp *xcp;
u16 link_width_level;
+ u32 inst_mask;
+ bool per_inst;
metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL);
ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true);
@@ -2317,60 +2497,70 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
metrics_a = (MetricsTableA_t *)metrics_x;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 5);
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 7);
gpu_metrics->temperature_hotspot =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, flag));
/* Individual HBM stack temperature is not reported */
gpu_metrics->temperature_mem =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature, flag));
/* Reports max temperature of all voltage rails */
gpu_metrics->temperature_vrsoc =
- SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature));
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature, flag));
gpu_metrics->average_gfx_activity =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy));
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy, flag));
gpu_metrics->average_umc_activity =
- SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization));
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization, flag));
+
+ gpu_metrics->mem_max_bandwidth =
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxDramBandwidth, flag));
gpu_metrics->curr_socket_power =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower));
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower, flag));
/* Energy counter reported in 15.259uJ (2^-16) units */
- gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc);
+ gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc, flag);
for (i = 0; i < MAX_GFX_CLKS; i++) {
xcc_id = GET_INST(GC, i);
if (xcc_id >= 0)
gpu_metrics->current_gfxclk[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]);
if (i < MAX_CLKS) {
gpu_metrics->current_socclk[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency, flag)[i]);
inst = GET_INST(VCN, i);
if (inst >= 0) {
gpu_metrics->current_vclk0[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[inst]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency, flag)[inst]);
gpu_metrics->current_dclk0[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[inst]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency, flag)[inst]);
}
}
}
- gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency));
+ gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, flag));
+
+ /* Total accumulated cycle counter */
+ gpu_metrics->accumulation_counter = GET_METRIC_FIELD(AccumulationCounter, flag);
- /* Throttle status is not reported through metrics now */
- gpu_metrics->throttle_status = 0;
+ /* Accumulated throttler residencies */
+ gpu_metrics->prochot_residency_acc = GET_METRIC_FIELD(ProchotResidencyAcc, flag);
+ gpu_metrics->ppt_residency_acc = GET_METRIC_FIELD(PptResidencyAcc, flag);
+ gpu_metrics->socket_thm_residency_acc = GET_METRIC_FIELD(SocketThmResidencyAcc, flag);
+ gpu_metrics->vr_thm_residency_acc = GET_METRIC_FIELD(VrThmResidencyAcc, flag);
+ gpu_metrics->hbm_thm_residency_acc = GET_METRIC_FIELD(HbmThmResidencyAcc, flag);
/* Clock Lock Status. Each bit corresponds to each GFXCLK instance */
- gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0);
+ gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak, flag) >> GET_INST(GC, 0);
if (!(adev->flags & AMD_IS_APU)) {
/*Check smu version, PCIE link speed and width will be reported from pmfw metric
* table for both pf & one vf for smu version 85.99.0 or higher else report only
* for pf from registers
*/
- if (smu->smc_fw_version >= 0x556300) {
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PCIE_METRICS))) {
gpu_metrics->pcie_link_width = metrics_x->PCIeLinkWidth;
gpu_metrics->pcie_link_speed =
pcie_gen_to_speed(metrics_x->PCIeLinkSpeed);
@@ -2399,41 +2589,76 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
metrics_x->PCIeNAKSentCountAcc;
gpu_metrics->pcie_nak_rcvd_count_acc =
metrics_x->PCIeNAKReceivedCountAcc;
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(OTHER_END_METRICS)))
+ gpu_metrics->pcie_lc_perf_other_end_recovery =
+ metrics_x->PCIeOtherEndRecoveryAcc;
+
}
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
gpu_metrics->gfx_activity_acc =
- SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc));
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc, flag));
gpu_metrics->mem_activity_acc =
- SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc));
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, flag));
for (i = 0; i < NUM_XGMI_LINKS; i++) {
gpu_metrics->xgmi_read_data_acc[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, flag)[i]);
gpu_metrics->xgmi_write_data_acc[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc)[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, flag)[i]);
+ ret = amdgpu_get_xgmi_link_status(adev, i);
+ if (ret >= 0)
+ gpu_metrics->xgmi_link_status[i] = ret;
}
- for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- inst = GET_INST(JPEG, i);
- for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
- gpu_metrics->jpeg_activity[(i * adev->jpeg.num_jpeg_rings) + j] =
- SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy)
- [(inst * adev->jpeg.num_jpeg_rings) + j]);
+ gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
+
+ per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS));
+
+ for_each_xcp(adev->xcp_mgr, xcp, i) {
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ /* Both JPEG and VCN has same instances */
+ inst = GET_INST(VCN, k);
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ gpu_metrics->xcp_stats[i].jpeg_busy
+ [(idx * adev->jpeg.num_jpeg_rings) + j] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, flag)
+ [(inst * adev->jpeg.num_jpeg_rings) + j]);
+ }
+ gpu_metrics->xcp_stats[i].vcn_busy[idx] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, flag)[inst]);
+ idx++;
+
}
- }
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- inst = GET_INST(VCN, i);
- gpu_metrics->vcn_activity[i] =
- SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy)[inst]);
+ if (per_inst) {
+ amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
+ idx = 0;
+ for_each_inst(k, inst_mask) {
+ inst = GET_INST(GC, k);
+ gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] =
+ SMUQ10_ROUND(metrics_x->GfxBusy[inst]);
+ gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
+ SMUQ10_ROUND(metrics_x->GfxBusyAcc[inst]);
+
+ if (smu_v13_0_6_cap_supported(
+ smu, SMU_CAP(HST_LIMIT_METRICS)))
+ gpu_metrics->xcp_stats[i].gfx_below_host_limit_acc[idx] =
+ SMUQ10_ROUND(metrics_x->GfxclkBelowHostLimitAcc
+ [inst]);
+ idx++;
+ }
+ }
}
- gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth));
- gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate));
+ gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth, flag));
+ gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate, flag));
- gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp);
+ gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, flag);
*table = (void *)gpu_metrics;
kfree(metrics_x);
@@ -2529,7 +2754,7 @@ static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
return -EINVAL;
/*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */
- if (smu->smc_fw_version < 0x554500)
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(CTF_LIMIT)))
return 0;
/* Get SOC Max operating temperature */
@@ -2631,11 +2856,10 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
static int smu_v13_0_6_send_rma_reason(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
int ret;
/* NOTE: the message is only valid on dGPU with pmfw 85.90.0 and above */
- if ((adev->flags & AMD_IS_APU) || smu->smc_fw_version < 0x00555a00)
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(RMA_MSG)))
return 0;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RmaDueToBadPageThreshold, NULL);
@@ -2647,6 +2871,23 @@ static int smu_v13_0_6_send_rma_reason(struct smu_context *smu)
return ret;
}
+static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
+{
+ int ret = 0;
+
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(SDMA_RESET)))
+ return -EOPNOTSUPP;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_ResetSDMA, inst_mask, NULL);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "failed to send ResetSDMA event with mask 0x%x\n",
+ inst_mask);
+
+ return ret;
+}
+
static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -2957,7 +3198,7 @@ static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amd
if (instlo != 0x03b30400)
return false;
- if (!(adev->flags & AMD_IS_APU) && smu->smc_fw_version >= 0x00555600) {
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(ACA_SYND))) {
errcode = MCA_REG__SYND__ERRORINFORMATION(entry->regs[MCA_REG_IDX_SYND]);
errcode &= 0xff;
} else {
@@ -2974,6 +3215,16 @@ static int mmhub_err_codes[] = {
CODE_VML2, CODE_VML2_WALKER, CODE_MMCANE,
};
+static int vcn_err_codes[] = {
+ CODE_VIDD, CODE_VIDV,
+};
+static int jpeg_err_codes[] = {
+ CODE_JPEG0S, CODE_JPEG0D, CODE_JPEG1S, CODE_JPEG1D,
+ CODE_JPEG2S, CODE_JPEG2D, CODE_JPEG3S, CODE_JPEG3D,
+ CODE_JPEG4S, CODE_JPEG4D, CODE_JPEG5S, CODE_JPEG5D,
+ CODE_JPEG6S, CODE_JPEG6D, CODE_JPEG7S, CODE_JPEG7D,
+};
+
static const struct mca_ras_info mca_ras_table[] = {
{
.blkid = AMDGPU_RAS_BLOCK__UMC,
@@ -3002,6 +3253,20 @@ static const struct mca_ras_info mca_ras_table[] = {
.blkid = AMDGPU_RAS_BLOCK__XGMI_WAFL,
.ip = AMDGPU_MCA_IP_PCS_XGMI,
.get_err_count = mca_pcs_xgmi_mca_get_err_count,
+ }, {
+ .blkid = AMDGPU_RAS_BLOCK__VCN,
+ .ip = AMDGPU_MCA_IP_SMU,
+ .err_code_array = vcn_err_codes,
+ .err_code_count = ARRAY_SIZE(vcn_err_codes),
+ .get_err_count = mca_smu_mca_get_err_count,
+ .bank_is_valid = mca_smu_bank_is_valid,
+ }, {
+ .blkid = AMDGPU_RAS_BLOCK__JPEG,
+ .ip = AMDGPU_MCA_IP_SMU,
+ .err_code_array = jpeg_err_codes,
+ .err_code_count = ARRAY_SIZE(jpeg_err_codes),
+ .get_err_count = mca_smu_mca_get_err_count,
+ .bank_is_valid = mca_smu_bank_is_valid,
},
};
@@ -3219,9 +3484,10 @@ static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev,
static int aca_smu_parse_error_code(struct amdgpu_device *adev, struct aca_bank *bank)
{
+ struct smu_context *smu = adev->powerplay.pp_handle;
int error_code;
- if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600)
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(ACA_SYND)))
error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]);
else
error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]);
@@ -3259,7 +3525,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_6_check_fw_status,
/* pptable related */
- .check_fw_version = smu_v13_0_check_fw_version,
+ .check_fw_version = smu_v13_0_6_check_fw_version,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.set_tool_table_location = smu_v13_0_set_tool_table_location,
.notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
@@ -3292,6 +3558,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.i2c_fini = smu_v13_0_6_i2c_control_fini,
.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
.send_rma_reason = smu_v13_0_6_send_rma_reason,
+ .reset_sdma = smu_v13_0_6_reset_sdma,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index b891a5e0a396..55ef18517b0f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -83,6 +83,8 @@
#define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8
#define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9
#define PP_OD_FEATURE_FAN_MINIMUM_PWM 10
+#define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11
+#define PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP 12
#define LINK_SPEED_MAX 3
@@ -734,19 +736,6 @@ static bool smu_v13_0_7_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
-static void smu_v13_0_7_dump_pptable(struct smu_context *smu)
-{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_t *pptable = table_context->driver_pptable;
- SkuTable_t *skutable = &pptable->SkuTable;
-
- dev_info(smu->adev->dev, "Dumped PPTable:\n");
-
- dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version);
- dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]);
- dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]);
-}
-
static uint32_t smu_v13_0_7_get_throttler_status(SmuMetrics_t *metrics)
{
uint32_t throttler_status = 0;
@@ -1132,6 +1121,14 @@ static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu,
od_min_setting = overdrive_lowerlimits->FanMinimumPwm;
od_max_setting = overdrive_upperlimits->FanMinimumPwm;
break;
+ case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE:
+ od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable;
+ od_max_setting = overdrive_upperlimits->FanZeroRpmEnable;
+ break;
+ case PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP:
+ od_min_setting = overdrive_lowerlimits->FanZeroRpmStopTemp;
+ od_max_setting = overdrive_upperlimits->FanZeroRpmStopTemp;
+ break;
default:
od_min_setting = od_max_setting = INT_MAX;
break;
@@ -1452,6 +1449,42 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
min_value, max_value);
break;
+ case SMU_OD_FAN_ZERO_RPM_ENABLE:
+ if (!smu_v13_0_7_is_od_feature_supported(smu,
+ PP_OD_FEATURE_ZERO_FAN_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.FanZeroRpmEnable);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n",
+ min_value, max_value);
+ break;
+
+ case SMU_OD_FAN_ZERO_RPM_STOP_TEMP:
+ if (!smu_v13_0_7_is_od_feature_supported(smu,
+ PP_OD_FEATURE_ZERO_FAN_BIT))
+ break;
+
+ size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_STOP_TEMPERATURE:\n");
+ size += sysfs_emit_at(buf, size, "%d\n",
+ (int)od_table->OverDriveTable.FanZeroRpmStopTemp);
+
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP,
+ &min_value,
+ &max_value);
+ size += sysfs_emit_at(buf, size, "ZERO_RPM_STOP_TEMPERATURE: %u %u\n",
+ min_value, max_value);
+ break;
+
case SMU_OD_RANGE:
if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) &&
!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) &&
@@ -1548,6 +1581,16 @@ static int smu_v13_0_7_od_restore_table_single(struct smu_context *smu, long inp
od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
break;
+ case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:
+ od_table->OverDriveTable.FanZeroRpmEnable =
+ boot_overdrive_table->OverDriveTable.FanZeroRpmEnable;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
+ break;
+ case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP:
+ od_table->OverDriveTable.FanZeroRpmStopTemp =
+ boot_overdrive_table->OverDriveTable.FanZeroRpmStopTemp;
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
+ break;
default:
dev_info(adev->dev, "Invalid table index: %ld\n", input);
return -EINVAL;
@@ -1841,6 +1884,48 @@ static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu,
od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
break;
+ case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:
+ if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) {
+ dev_warn(adev->dev, "Zero RPM setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanZeroRpmEnable = input[0];
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
+ break;
+
+ case PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP:
+ if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) {
+ dev_warn(adev->dev, "Zero RPM setting not supported!\n");
+ return -ENOTSUPP;
+ }
+
+ smu_v13_0_7_get_od_setting_limits(smu,
+ PP_OD_FEATURE_FAN_ZERO_RPM_STOP_TEMP,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "zero RPM stop temperature setting(%ld) must be within [%d, %d]!\n",
+ input[0], minimum, maximum);
+ return -EINVAL;
+ }
+
+ od_table->OverDriveTable.FanZeroRpmStopTemp = input[0];
+ od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
+ break;
+
case PP_OD_RESTORE_DEFAULT_TABLE:
if (size == 1) {
ret = smu_v13_0_7_od_restore_table_single(smu, input[0]);
@@ -1964,7 +2049,8 @@ static int smu_v13_0_7_force_clk_levels(struct smu_context *smu,
ret = smu_v13_0_set_soft_freq_limited_range(smu,
clk_type,
min_freq,
- max_freq);
+ max_freq,
+ false);
break;
case SMU_DCEFCLK:
case SMU_PCIE:
@@ -2061,6 +2147,8 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK];
+ gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
+ gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1];
@@ -2106,7 +2194,11 @@ static void smu_v13_0_7_set_supported_od_feature_mask(struct smu_context *smu)
OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE |
OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET |
OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE |
- OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET;
+ OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET |
+ OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET |
+ OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE |
+ OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET;
}
static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu)
@@ -2172,6 +2264,10 @@ static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu)
user_od_table_bak.OverDriveTable.FanTargetTemperature;
user_od_table->OverDriveTable.FanMinimumPwm =
user_od_table_bak.OverDriveTable.FanMinimumPwm;
+ user_od_table->OverDriveTable.FanZeroRpmEnable =
+ user_od_table_bak.OverDriveTable.FanZeroRpmEnable;
+ user_od_table->OverDriveTable.FanZeroRpmStopTemp =
+ user_od_table_bak.OverDriveTable.FanZeroRpmStopTemp;
}
smu_v13_0_7_set_supported_od_feature_mask(smu);
@@ -2434,78 +2530,110 @@ out:
return result;
}
-static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+#define SMU_13_0_7_CUSTOM_PARAMS_COUNT 8
+#define SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT 2
+#define SMU_13_0_7_CUSTOM_PARAMS_SIZE (SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_7_CUSTOM_PARAMS_COUNT * sizeof(long))
+
+static int smu_v13_0_7_set_power_profile_mode_coeff(struct smu_context *smu,
+ long *input)
{
DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
- int workload_type, ret = 0;
+ int ret, idx;
- smu->power_profile_mode = input[size];
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external), false);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+ return ret;
+ }
- if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_WINDOW3D) {
- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
- return -EINVAL;
+ idx = 0 * SMU_13_0_7_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Gfxclk */
+ activity_monitor->Gfx_ActiveHystLimit = input[idx + 1];
+ activity_monitor->Gfx_IdleHystLimit = input[idx + 2];
+ activity_monitor->Gfx_FPS = input[idx + 3];
+ activity_monitor->Gfx_MinActiveFreqType = input[idx + 4];
+ activity_monitor->Gfx_BoosterFreqType = input[idx + 5];
+ activity_monitor->Gfx_MinActiveFreq = input[idx + 6];
+ activity_monitor->Gfx_BoosterFreq = input[idx + 7];
+ }
+ idx = 1 * SMU_13_0_7_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Fclk */
+ activity_monitor->Fclk_ActiveHystLimit = input[idx + 1];
+ activity_monitor->Fclk_IdleHystLimit = input[idx + 2];
+ activity_monitor->Fclk_FPS = input[idx + 3];
+ activity_monitor->Fclk_MinActiveFreqType = input[idx + 4];
+ activity_monitor->Fclk_BoosterFreqType = input[idx + 5];
+ activity_monitor->Fclk_MinActiveFreq = input[idx + 6];
+ activity_monitor->Fclk_BoosterFreq = input[idx + 7];
}
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size != 8)
- return -EINVAL;
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external), true);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ return ret;
+ }
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor_external), false);
- if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
- return ret;
- }
+ return ret;
+}
- switch (input[0]) {
- case 0: /* Gfxclk */
- activity_monitor->Gfx_ActiveHystLimit = input[1];
- activity_monitor->Gfx_IdleHystLimit = input[2];
- activity_monitor->Gfx_FPS = input[3];
- activity_monitor->Gfx_MinActiveFreqType = input[4];
- activity_monitor->Gfx_BoosterFreqType = input[5];
- activity_monitor->Gfx_MinActiveFreq = input[6];
- activity_monitor->Gfx_BoosterFreq = input[7];
- break;
- case 1: /* Fclk */
- activity_monitor->Fclk_ActiveHystLimit = input[1];
- activity_monitor->Fclk_IdleHystLimit = input[2];
- activity_monitor->Fclk_FPS = input[3];
- activity_monitor->Fclk_MinActiveFreqType = input[4];
- activity_monitor->Fclk_BoosterFreqType = input[5];
- activity_monitor->Fclk_MinActiveFreq = input[6];
- activity_monitor->Fclk_BoosterFreq = input[7];
- break;
- default:
- return -EINVAL;
+static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu,
+ u32 workload_mask,
+ long *custom_params,
+ u32 custom_params_max_idx)
+{
+ u32 backend_workload_mask = 0;
+ int ret, idx = -1, i;
+
+ smu_cmn_get_backend_workload_mask(smu, workload_mask,
+ &backend_workload_mask);
+
+ if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
+ if (!smu->custom_profile_params) {
+ smu->custom_profile_params =
+ kzalloc(SMU_13_0_7_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
+ if (!smu->custom_profile_params)
+ return -ENOMEM;
}
-
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor_external), true);
+ if (custom_params && custom_params_max_idx) {
+ if (custom_params_max_idx != SMU_13_0_7_CUSTOM_PARAMS_COUNT)
+ return -EINVAL;
+ if (custom_params[0] >= SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT)
+ return -EINVAL;
+ idx = custom_params[0] * SMU_13_0_7_CUSTOM_PARAMS_COUNT;
+ smu->custom_profile_params[idx] = 1;
+ for (i = 1; i < custom_params_max_idx; i++)
+ smu->custom_profile_params[idx + i] = custom_params[i];
+ }
+ ret = smu_v13_0_7_set_power_profile_mode_coeff(smu,
+ smu->custom_profile_params);
if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
return ret;
}
+ } else if (smu->custom_profile_params) {
+ memset(smu->custom_profile_params, 0, SMU_13_0_7_CUSTOM_PARAMS_SIZE);
}
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- smu->power_profile_mode);
- if (workload_type < 0)
- return -EINVAL;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type, NULL);
+ backend_workload_mask, NULL);
- if (ret)
- dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
- else
- smu->workload_mask = (1 << workload_type);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
+ workload_mask);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
+ return ret;
+ }
return ret;
}
@@ -2605,7 +2733,6 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
.is_dpm_running = smu_v13_0_7_is_dpm_running,
- .dump_pptable = smu_v13_0_7_dump_pptable,
.init_microcode = smu_v13_0_init_microcode,
.load_microcode = smu_v13_0_load_microcode,
.fini_microcode = smu_v13_0_fini_microcode,
@@ -2670,6 +2797,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.is_asic_wbrf_supported = smu_v13_0_7_wbrf_support_check,
.enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
.set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
+ .interrupt_work = smu_v13_0_interrupt_work,
};
void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
@@ -2683,4 +2811,5 @@ void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
smu->workload_map = smu_v13_0_7_workload_map;
smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION;
smu_v13_0_set_smu_mailbox_registers(smu);
+ smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 260c339f89c5..73b4506ef5a8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -220,7 +220,9 @@ static int yellow_carp_system_features_control(struct smu_context *smu, bool en)
return ret;
}
-static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
+static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu,
+ bool enable,
+ int inst)
{
int ret = 0;
@@ -945,9 +947,10 @@ failed:
}
static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min,
- uint32_t max)
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max,
+ bool automatic)
{
enum smu_message_type msg_set_min, msg_set_max;
uint32_t min_clk = min;
@@ -1134,7 +1137,7 @@ static int yellow_carp_force_clk_levels(struct smu_context *smu,
if (ret)
goto force_level_out;
- ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
goto force_level_out;
break;
@@ -1254,9 +1257,10 @@ static int yellow_carp_set_performance_level(struct smu_context *smu,
if (sclk_min && sclk_max) {
ret = yellow_carp_set_soft_freq_limited_range(smu,
- SMU_SCLK,
- sclk_min,
- sclk_max);
+ SMU_SCLK,
+ sclk_min,
+ sclk_max,
+ false);
if (ret)
return ret;
@@ -1266,18 +1270,20 @@ static int yellow_carp_set_performance_level(struct smu_context *smu,
if (fclk_min && fclk_max) {
ret = yellow_carp_set_soft_freq_limited_range(smu,
- SMU_FCLK,
- fclk_min,
- fclk_max);
+ SMU_FCLK,
+ fclk_min,
+ fclk_max,
+ false);
if (ret)
return ret;
}
if (socclk_min && socclk_max) {
ret = yellow_carp_set_soft_freq_limited_range(smu,
- SMU_SOCCLK,
- socclk_min,
- socclk_max);
+ SMU_SOCCLK,
+ socclk_min,
+ socclk_max,
+ false);
if (ret)
return ret;
}
@@ -1286,7 +1292,8 @@ static int yellow_carp_set_performance_level(struct smu_context *smu,
ret = yellow_carp_set_soft_freq_limited_range(smu,
SMU_VCLK,
vclk_min,
- vclk_max);
+ vclk_max,
+ false);
if (ret)
return ret;
}
@@ -1295,7 +1302,8 @@ static int yellow_carp_set_performance_level(struct smu_context *smu,
ret = yellow_carp_set_soft_freq_limited_range(smu,
SMU_DCLK,
dclk_min,
- dclk_max);
+ dclk_max,
+ false);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 865e916fc425..9b2f4fe1578b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -49,7 +49,7 @@
#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0_BASE_IDX 0
const int decoded_link_speed[5] = {1, 2, 3, 4, 5};
-const int decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
+const int decoded_link_width[8] = {0, 1, 2, 4, 8, 12, 16, 32};
/*
* DO NOT use these for err/warn/info/debug messages.
* Use dev_err, dev_warn, dev_info and dev_dbg instead.
@@ -79,7 +79,8 @@ int smu_v14_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
@@ -1102,7 +1103,8 @@ failed:
int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t min,
- uint32_t max)
+ uint32_t max,
+ bool automatic)
{
int ret = 0, clk_id = 0;
uint32_t param;
@@ -1117,7 +1119,10 @@ int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu,
return clk_id;
if (max > 0) {
- param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ if (automatic)
+ param = (uint32_t)((clk_id << 16) | 0xffff);
+ else
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
param, NULL);
if (ret)
@@ -1125,7 +1130,10 @@ int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu,
}
if (min > 0) {
- param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ if (automatic)
+ param = (uint32_t)((clk_id << 16) | 0);
+ else
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
param, NULL);
if (ret)
@@ -1202,6 +1210,7 @@ int smu_v14_0_set_performance_level(struct smu_context *smu,
uint32_t dclk_min = 0, dclk_max = 0;
uint32_t fclk_min = 0, fclk_max = 0;
int ret = 0, i;
+ bool auto_level = false;
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
@@ -1233,6 +1242,7 @@ int smu_v14_0_set_performance_level(struct smu_context *smu,
dclk_max = dclk_table->max;
fclk_min = fclk_table->min;
fclk_max = fclk_table->max;
+ auto_level = true;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
@@ -1268,7 +1278,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_set_soft_freq_limited_range(smu,
SMU_GFXCLK,
sclk_min,
- sclk_max);
+ sclk_max,
+ auto_level);
if (ret)
return ret;
@@ -1280,7 +1291,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_set_soft_freq_limited_range(smu,
SMU_MCLK,
mclk_min,
- mclk_max);
+ mclk_max,
+ auto_level);
if (ret)
return ret;
@@ -1292,7 +1304,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_set_soft_freq_limited_range(smu,
SMU_SOCCLK,
socclk_min,
- socclk_max);
+ socclk_max,
+ auto_level);
if (ret)
return ret;
@@ -1307,7 +1320,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_set_soft_freq_limited_range(smu,
i ? SMU_VCLK1 : SMU_VCLK,
vclk_min,
- vclk_max);
+ vclk_max,
+ auto_level);
if (ret)
return ret;
}
@@ -1322,7 +1336,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_set_soft_freq_limited_range(smu,
i ? SMU_DCLK1 : SMU_DCLK,
dclk_min,
- dclk_max);
+ dclk_max,
+ auto_level);
if (ret)
return ret;
}
@@ -1334,7 +1349,8 @@ int smu_v14_0_set_performance_level(struct smu_context *smu,
ret = smu_v14_0_set_soft_freq_limited_range(smu,
SMU_FCLK,
fclk_min,
- fclk_max);
+ fclk_max,
+ auto_level);
if (ret)
return ret;
@@ -1492,32 +1508,28 @@ int smu_v14_0_set_single_dpm_table(struct smu_context *smu,
}
int smu_v14_0_set_vcn_enable(struct smu_context *smu,
- bool enable)
+ bool enable,
+ int inst)
{
struct amdgpu_device *adev = smu->adev;
- int i, ret = 0;
+ int ret = 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->vcn.harvest_config & (1 << inst))
+ return ret;
- if (smu->is_apu) {
- if (i == 0)
- ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
- i << 16U, NULL);
- else if (i == 1)
- ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
- i << 16U, NULL);
- } else {
+ if (smu->is_apu) {
+ if (inst == 0)
ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
- SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
- i << 16U, NULL);
- }
-
- if (ret)
- return ret;
+ SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
+ inst << 16U, NULL);
+ else if (inst == 1)
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
+ inst << 16U, NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
+ inst << 16U, NULL);
}
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 8798ebfcea83..84f9b007b59f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -1132,7 +1132,7 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu,
static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, size = 0, ret = 0;
+ int i, idx, ret = 0, size = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
@@ -1168,7 +1168,8 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
break;
for (i = 0; i < count; i++) {
- ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i;
+ ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, idx, &value);
if (ret)
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 5899d01fa73d..5cad09c5f2ff 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -367,54 +367,6 @@ static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu)
return 0;
}
-#ifndef atom_smc_dpm_info_table_14_0_0
-struct atom_smc_dpm_info_table_14_0_0 {
- struct atom_common_table_header table_header;
- BoardTable_t BoardTable;
-};
-#endif
-
-static int smu_v14_0_2_append_powerplay_table(struct smu_context *smu)
-{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_t *smc_pptable = table_context->driver_pptable;
- struct atom_smc_dpm_info_table_14_0_0 *smc_dpm_table;
- BoardTable_t *BoardTable = &smc_pptable->BoardTable;
- int index, ret;
-
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- smc_dpm_info);
-
- ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
- (uint8_t **)&smc_dpm_table);
- if (ret)
- return ret;
-
- memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t));
-
- return 0;
-}
-
-#if 0
-static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu,
- void **table,
- uint32_t *size)
-{
- struct smu_table_context *smu_table = &smu->smu_table;
- void *combo_pptable = smu_table->combo_pptable;
- int ret = 0;
-
- ret = smu_cmn_get_combo_pptable(smu);
- if (ret)
- return ret;
-
- *table = combo_pptable;
- *size = sizeof(struct smu_14_0_powerplay_table);
-
- return 0;
-}
-#endif
-
static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu,
void **table,
uint32_t *size)
@@ -436,16 +388,12 @@ static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu,
static int smu_v14_0_2_setup_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
if (amdgpu_sriov_vf(smu->adev))
return 0;
- if (!adev->scpm_enabled)
- ret = smu_v14_0_setup_pptable(smu);
- else
- ret = smu_v14_0_2_get_pptable_from_pmfw(smu,
+ ret = smu_v14_0_2_get_pptable_from_pmfw(smu,
&smu_table->power_play_table,
&smu_table->power_play_table_size);
if (ret)
@@ -455,16 +403,6 @@ static int smu_v14_0_2_setup_pptable(struct smu_context *smu)
if (ret)
return ret;
- /*
- * With SCPM enabled, the operation below will be handled
- * by PSP. Driver involvment is unnecessary and useless.
- */
- if (!adev->scpm_enabled) {
- ret = smu_v14_0_2_append_powerplay_table(smu);
- if (ret)
- return ret;
- }
-
ret = smu_v14_0_2_check_powerplay_table(smu);
if (ret)
return ret;
@@ -732,19 +670,6 @@ static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
-static void smu_v14_0_2_dump_pptable(struct smu_context *smu)
-{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_t *pptable = table_context->driver_pptable;
- PFE_Settings_t *PFEsettings = &pptable->PFE_Settings;
-
- dev_info(smu->adev->dev, "Dumped PPTable:\n");
-
- dev_info(smu->adev->dev, "Version = 0x%08x\n", PFEsettings->Version);
- dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", PFEsettings->FeaturesToRun[0]);
- dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", PFEsettings->FeaturesToRun[1]);
-}
-
static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics)
{
uint32_t throttler_status = 0;
@@ -1077,12 +1002,9 @@ static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu,
switch (od_feature_bit) {
case PP_OD_FEATURE_GFXCLK_FMIN:
- od_min_setting = overdrive_lowerlimits->GfxclkFmin;
- od_max_setting = overdrive_upperlimits->GfxclkFmin;
- break;
case PP_OD_FEATURE_GFXCLK_FMAX:
- od_min_setting = overdrive_lowerlimits->GfxclkFmax;
- od_max_setting = overdrive_upperlimits->GfxclkFmax;
+ od_min_setting = overdrive_lowerlimits->GfxclkFoffset;
+ od_max_setting = overdrive_upperlimits->GfxclkFoffset;
break;
case PP_OD_FEATURE_UCLK_FMIN:
od_min_setting = overdrive_lowerlimits->UclkFmin;
@@ -1251,13 +1173,15 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
(pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," :
(pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," :
(pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," :
- (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : "",
+ (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," :
+ (pcie_table->pcie_gen[i] == 4) ? "32.0GT/s," : "",
(pcie_table->pcie_lane[i] == 1) ? "x1" :
(pcie_table->pcie_lane[i] == 2) ? "x2" :
(pcie_table->pcie_lane[i] == 3) ? "x4" :
(pcie_table->pcie_lane[i] == 4) ? "x8" :
(pcie_table->pcie_lane[i] == 5) ? "x12" :
- (pcie_table->pcie_lane[i] == 6) ? "x16" : "",
+ (pcie_table->pcie_lane[i] == 6) ? "x16" :
+ (pcie_table->pcie_lane[i] == 7) ? "x32" : "",
pcie_table->clk_freq[i],
(gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
(lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
@@ -1269,10 +1193,16 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFXCLK_BIT))
break;
- size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
- size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
- od_table->OverDriveTable.GfxclkFmin,
- od_table->OverDriveTable.GfxclkFmax);
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ const OverDriveLimits_t * const overdrive_upperlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMax;
+ const OverDriveLimits_t * const overdrive_lowerlimits =
+ &pptable->SkuTable.OverDriveLimitsBasicMin;
+
+ size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n");
+ size += sysfs_emit_at(buf, size, "0: %dMhz\n1: %uMhz\n",
+ overdrive_lowerlimits->GfxclkFoffset,
+ overdrive_upperlimits->GfxclkFoffset);
break;
case SMU_OD_MCLK:
@@ -1414,7 +1344,7 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFXCLK_FMAX,
NULL,
&max_value);
- size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n",
min_value, max_value);
}
@@ -1516,7 +1446,8 @@ static int smu_v14_0_2_force_clk_levels(struct smu_context *smu,
ret = smu_v14_0_set_soft_freq_limited_range(smu,
clk_type,
min_freq,
- max_freq);
+ max_freq,
+ false);
break;
case SMU_DCEFCLK:
case SMU_PCIE:
@@ -1534,15 +1465,35 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_14_0_pcie_table *pcie_table =
&dpm_context->dpm_tables.pcie_table;
+ int num_of_levels = pcie_table->num_of_link_levels;
uint32_t smu_pcie_arg;
int ret, i;
- for (i = 0; i < pcie_table->num_of_link_levels; i++) {
- if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+ if (!num_of_levels)
+ return 0;
+
+ if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
+ pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
+
+ if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
+ pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
+
+ /* Force all levels to use the same settings */
+ for (i = 0; i < num_of_levels; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
- if (pcie_table->pcie_lane[i] > pcie_width_cap)
pcie_table->pcie_lane[i] = pcie_width_cap;
+ }
+ } else {
+ for (i = 0; i < num_of_levels; i++) {
+ if (pcie_table->pcie_gen[i] > pcie_gen_cap)
+ pcie_table->pcie_gen[i] = pcie_gen_cap;
+ if (pcie_table->pcie_lane[i] > pcie_width_cap)
+ pcie_table->pcie_lane[i] = pcie_width_cap;
+ }
+ }
+ for (i = 0; i < num_of_levels; i++) {
smu_pcie_arg = i << 16;
smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
smu_pcie_arg |= pcie_table->pcie_lane[i];
@@ -1788,85 +1739,120 @@ static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu,
return size;
}
-static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
- long *input,
- uint32_t size)
+#define SMU_14_0_2_CUSTOM_PARAMS_COUNT 9
+#define SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT 2
+#define SMU_14_0_2_CUSTOM_PARAMS_SIZE (SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT * SMU_14_0_2_CUSTOM_PARAMS_COUNT * sizeof(long))
+
+static int smu_v14_0_2_set_power_profile_mode_coeff(struct smu_context *smu,
+ long *input)
{
DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
- int workload_type, ret = 0;
+ int ret, idx;
- smu->power_profile_mode = input[size];
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external),
+ false);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
+ return ret;
+ }
- if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
- return -EINVAL;
+ idx = 0 * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Gfxclk */
+ activity_monitor->Gfx_FPS = input[idx + 1];
+ activity_monitor->Gfx_MinActiveFreqType = input[idx + 2];
+ activity_monitor->Gfx_MinActiveFreq = input[idx + 3];
+ activity_monitor->Gfx_BoosterFreqType = input[idx + 4];
+ activity_monitor->Gfx_BoosterFreq = input[idx + 5];
+ activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6];
+ activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7];
+ activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8];
+ }
+ idx = 1 * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
+ if (input[idx]) {
+ /* Fclk */
+ activity_monitor->Fclk_FPS = input[idx + 1];
+ activity_monitor->Fclk_MinActiveFreqType = input[idx + 2];
+ activity_monitor->Fclk_MinActiveFreq = input[idx + 3];
+ activity_monitor->Fclk_BoosterFreqType = input[idx + 4];
+ activity_monitor->Fclk_BoosterFreq = input[idx + 5];
+ activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6];
+ activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7];
+ activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8];
}
- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size != 9)
- return -EINVAL;
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT,
+ (void *)(&activity_monitor_external),
+ true);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ return ret;
+ }
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
- WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor_external),
- false);
- if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
- return ret;
- }
+ return ret;
+}
- switch (input[0]) {
- case 0: /* Gfxclk */
- activity_monitor->Gfx_FPS = input[1];
- activity_monitor->Gfx_MinActiveFreqType = input[2];
- activity_monitor->Gfx_MinActiveFreq = input[3];
- activity_monitor->Gfx_BoosterFreqType = input[4];
- activity_monitor->Gfx_BoosterFreq = input[5];
- activity_monitor->Gfx_PD_Data_limit_c = input[6];
- activity_monitor->Gfx_PD_Data_error_coeff = input[7];
- activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
- break;
- case 1: /* Fclk */
- activity_monitor->Fclk_FPS = input[1];
- activity_monitor->Fclk_MinActiveFreqType = input[2];
- activity_monitor->Fclk_MinActiveFreq = input[3];
- activity_monitor->Fclk_BoosterFreqType = input[4];
- activity_monitor->Fclk_BoosterFreq = input[5];
- activity_monitor->Fclk_PD_Data_limit_c = input[6];
- activity_monitor->Fclk_PD_Data_error_coeff = input[7];
- activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
- break;
- default:
- return -EINVAL;
- }
+static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
+ u32 workload_mask,
+ long *custom_params,
+ u32 custom_params_max_idx)
+{
+ u32 backend_workload_mask = 0;
+ int ret, idx = -1, i;
- ret = smu_cmn_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF,
- WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor_external),
- true);
+ smu_cmn_get_backend_workload_mask(smu, workload_mask,
+ &backend_workload_mask);
+
+ /* disable deep sleep if compute is enabled */
+ if (workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE))
+ smu_v14_0_deep_sleep_control(smu, false);
+ else
+ smu_v14_0_deep_sleep_control(smu, true);
+
+ if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
+ if (!smu->custom_profile_params) {
+ smu->custom_profile_params =
+ kzalloc(SMU_14_0_2_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
+ if (!smu->custom_profile_params)
+ return -ENOMEM;
+ }
+ if (custom_params && custom_params_max_idx) {
+ if (custom_params_max_idx != SMU_14_0_2_CUSTOM_PARAMS_COUNT)
+ return -EINVAL;
+ if (custom_params[0] >= SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT)
+ return -EINVAL;
+ idx = custom_params[0] * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
+ smu->custom_profile_params[idx] = 1;
+ for (i = 1; i < custom_params_max_idx; i++)
+ smu->custom_profile_params[idx + i] = custom_params[i];
+ }
+ ret = smu_v14_0_2_set_power_profile_mode_coeff(smu,
+ smu->custom_profile_params);
if (ret) {
- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
return ret;
}
+ } else if (smu->custom_profile_params) {
+ memset(smu->custom_profile_params, 0, SMU_14_0_2_CUSTOM_PARAMS_SIZE);
}
- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
- workload_type = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_WORKLOAD,
- smu->power_profile_mode);
- if (workload_type < 0)
- return -EINVAL;
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetWorkloadMask,
- 1 << workload_type,
- NULL);
- if (!ret)
- smu->workload_mask = 1 << workload_type;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+ backend_workload_mask, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
+ workload_mask);
+ if (idx != -1)
+ smu->custom_profile_params[idx] = 0;
+ return ret;
+ }
return ret;
}
@@ -2110,7 +2096,7 @@ static int smu_v14_0_2_enable_gfx_features(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(14, 0, 2))
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2))
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures,
FEATURE_PWR_GFX, NULL);
else
@@ -2158,7 +2144,7 @@ static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
- gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage,
+ gpu_metrics->average_mm_activity = max(metrics->AverageVcn0ActivityPercentage,
metrics->Vcn1ActivityPercentage);
gpu_metrics->average_socket_power = metrics->AverageSocketPower;
@@ -2217,8 +2203,7 @@ static void smu_v14_0_2_dump_od_table(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
- dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin,
- od_table->OverDriveTable.GfxclkFmax);
+ dev_dbg(adev->dev, "OD: Gfxclk offset: (%d)\n", od_table->OverDriveTable.GfxclkFoffset);
dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin,
od_table->OverDriveTable.UclkFmax);
}
@@ -2309,10 +2294,8 @@ static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu)
memcpy(user_od_table,
boot_od_table,
sizeof(OverDriveTableExternal_t));
- user_od_table->OverDriveTable.GfxclkFmin =
- user_od_table_bak.OverDriveTable.GfxclkFmin;
- user_od_table->OverDriveTable.GfxclkFmax =
- user_od_table_bak.OverDriveTable.GfxclkFmax;
+ user_od_table->OverDriveTable.GfxclkFoffset =
+ user_od_table_bak.OverDriveTable.GfxclkFoffset;
user_od_table->OverDriveTable.UclkFmin =
user_od_table_bak.OverDriveTable.UclkFmin;
user_od_table->OverDriveTable.UclkFmax =
@@ -2441,22 +2424,6 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
}
switch (input[i]) {
- case 0:
- smu_v14_0_2_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFXCLK_FMIN,
- &minimum,
- &maximum);
- if (input[i + 1] < minimum ||
- input[i + 1] > maximum) {
- dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n",
- input[i + 1], minimum, maximum);
- return -EINVAL;
- }
-
- od_table->OverDriveTable.GfxclkFmin = input[i + 1];
- od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
- break;
-
case 1:
smu_v14_0_2_get_od_setting_limits(smu,
PP_OD_FEATURE_GFXCLK_FMAX,
@@ -2469,7 +2436,7 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
return -EINVAL;
}
- od_table->OverDriveTable.GfxclkFmax = input[i + 1];
+ od_table->OverDriveTable.GfxclkFoffset = input[i + 1];
od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
break;
@@ -2480,13 +2447,6 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
}
}
- if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) {
- dev_err(adev->dev,
- "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n",
- (uint32_t)od_table->OverDriveTable.GfxclkFmin,
- (uint32_t)od_table->OverDriveTable.GfxclkFmax);
- return -EINVAL;
- }
break;
case PP_OD_EDIT_MCLK_VDDC_TABLE:
@@ -2806,7 +2766,6 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.i2c_init = smu_v14_0_2_i2c_control_init,
.i2c_fini = smu_v14_0_2_i2c_control_fini,
.is_dpm_running = smu_v14_0_2_is_dpm_running,
- .dump_pptable = smu_v14_0_2_dump_pptable,
.init_microcode = smu_v14_0_init_microcode,
.load_microcode = smu_v14_0_load_microcode,
.fini_microcode = smu_v14_0_fini_microcode,
@@ -2817,7 +2776,6 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.check_fw_status = smu_v14_0_check_fw_status,
.setup_pptable = smu_v14_0_2_setup_pptable,
.check_fw_version = smu_v14_0_check_fw_version,
- .write_pptable = smu_cmn_write_pptable,
.set_driver_table_location = smu_v14_0_set_driver_table_location,
.system_features_control = smu_v14_0_system_features_control,
.set_allowed_mask = smu_v14_0_set_allowed_mask,
@@ -2848,7 +2806,6 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.get_unique_id = smu_v14_0_2_get_unique_id,
.get_power_limit = smu_v14_0_2_get_power_limit,
.set_power_limit = smu_v14_0_2_set_power_limit,
- .set_power_source = smu_v14_0_set_power_source,
.get_power_profile_mode = smu_v14_0_2_get_power_profile_mode,
.set_power_profile_mode = smu_v14_0_2_set_power_profile_mode,
.run_btc = smu_v14_0_run_btc,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 91ad434bcdae..9f55207ea9bc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -1078,6 +1078,12 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
case METRICS_VERSION(1, 5):
structure_size = sizeof(struct gpu_metrics_v1_5);
break;
+ case METRICS_VERSION(1, 6):
+ structure_size = sizeof(struct gpu_metrics_v1_6);
+ break;
+ case METRICS_VERSION(1, 7):
+ structure_size = sizeof(struct gpu_metrics_v1_7);
+ break;
case METRICS_VERSION(2, 0):
structure_size = sizeof(struct gpu_metrics_v2_0);
break;
@@ -1215,3 +1221,28 @@ void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy)
{
policy->desc = &xgmi_plpd_policy_desc;
}
+
+void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
+ u32 workload_mask,
+ u32 *backend_workload_mask)
+{
+ int workload_type;
+ u32 profile_mode;
+
+ *backend_workload_mask = 0;
+
+ for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) {
+ if (!(workload_mask & (1 << profile_mode)))
+ continue;
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_WORKLOAD,
+ profile_mode);
+
+ if (workload_type < 0)
+ continue;
+
+ *backend_workload_mask |= 1 << workload_type;
+ }
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index 1de685defe85..a020277dec3e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -147,5 +147,9 @@ bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy);
void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy);
+void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
+ u32 workload_mask,
+ u32 *backend_workload_mask);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 6f4d212607d7..c09ecf1a68a0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -78,7 +78,6 @@
#define smu_register_irq_handler(smu) smu_ppt_funcs(register_irq_handler, 0, smu)
#define smu_get_dpm_ultimate_freq(smu, param, min, max) smu_ppt_funcs(get_dpm_ultimate_freq, 0, smu, param, min, max)
#define smu_asic_set_performance_level(smu, level) smu_ppt_funcs(set_performance_level, -EINVAL, smu, level)
-#define smu_dump_pptable(smu) smu_ppt_funcs(dump_pptable, 0, smu)
#define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) smu_ppt_funcs(update_pcie_parameters, 0, smu, pcie_gen_cap, pcie_width_cap)
#define smu_set_power_source(smu, power_src) smu_ppt_funcs(set_power_source, 0, smu, power_src)
#define smu_i2c_init(smu) smu_ppt_funcs(i2c_init, 0, smu)
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
index ddf20708370f..c901ac00c0c3 100644
--- a/drivers/gpu/drm/arm/Kconfig
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -6,6 +6,7 @@ config DRM_HDLCD
tristate "ARM HDLCD"
depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
help
@@ -27,6 +28,7 @@ config DRM_MALI_DISPLAY
tristate "ARM Mali Display Processor"
depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/arm/display/Kconfig b/drivers/gpu/drm/arm/display/Kconfig
index 4acc4285a4eb..415c10a6374b 100644
--- a/drivers/gpu/drm/arm/display/Kconfig
+++ b/drivers/gpu/drm/arm/display/Kconfig
@@ -3,6 +3,7 @@ config DRM_KOMEDA
tristate "ARM Komeda display driver"
depends on DRM && OF
depends on COMMON_CLK
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index 55c3773befde..358c1512b087 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -9,7 +9,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <drm/drm_fbdev_dma.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include "komeda_dev.h"
@@ -84,7 +84,7 @@ static int komeda_platform_probe(struct platform_device *pdev)
}
dev_set_drvdata(dev, mdrv);
- drm_fbdev_dma_setup(&mdrv->kms->base, 32);
+ drm_client_setup(&mdrv->kms->base, NULL);
return 0;
@@ -153,7 +153,7 @@ static const struct dev_pm_ops komeda_pm_ops = {
static struct platform_driver komeda_platform_driver = {
.probe = komeda_platform_probe,
- .remove_new = komeda_platform_remove,
+ .remove = komeda_platform_remove,
.shutdown = komeda_platform_shutdown,
.driver = {
.name = "komeda",
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index e5eb5d672bcd..6ed504099188 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -9,6 +9,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
@@ -58,10 +59,10 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
static const struct drm_driver komeda_kms_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_dma_dumb_create),
+ DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &komeda_cma_fops,
.name = "komeda",
.desc = "Arm Komeda Display Processor driver",
- .date = "20181101",
.major = 0,
.minor = 1,
};
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index ebccb74306a7..f30b3d5eeca5 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -160,6 +160,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
kwb_conn->wb_layer->layer_type,
&n_formats);
+ if (!formats) {
+ kfree(kwb_conn);
+ return -ENOMEM;
+ }
err = drm_writeback_connector_init(&kms->base, wb_conn,
&komeda_wb_connector_funcs,
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 32be9e370049..c3179d74f3f5 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -9,6 +9,7 @@
* ARM HDLCD Driver
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/clk.h>
@@ -21,7 +22,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
@@ -228,10 +229,10 @@ DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver hdlcd_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &fops,
.name = "hdlcd",
.desc = "ARM HDLCD Controller DRM",
- .date = "20151021",
.major = 1,
.minor = 0,
};
@@ -285,7 +286,7 @@ static int hdlcd_drm_bind(struct device *dev)
*/
if (hdlcd_read(hdlcd, HDLCD_REG_COMMAND)) {
hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
- drm_aperture_remove_framebuffers(&hdlcd_driver);
+ aperture_remove_all_conflicting_devices(hdlcd_driver.name);
}
drm_mode_config_reset(drm);
@@ -299,7 +300,7 @@ static int hdlcd_drm_bind(struct device *dev)
if (ret)
goto err_register;
- drm_fbdev_dma_setup(drm, 32);
+ drm_client_setup(drm, NULL);
return 0;
@@ -403,7 +404,7 @@ static SIMPLE_DEV_PM_OPS(hdlcd_pm_ops, hdlcd_pm_suspend, hdlcd_pm_resume);
static struct platform_driver hdlcd_platform_driver = {
.probe = hdlcd_probe,
- .remove_new = hdlcd_remove,
+ .remove = hdlcd_remove,
.shutdown = hdlcd_shutdown,
.driver = {
.name = "hdlcd",
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 6682131d2910..e083021e9e99 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -16,6 +16,7 @@
#include <linux/pm_runtime.h>
#include <linux/debugfs.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
@@ -562,13 +563,13 @@ static void malidp_debugfs_init(struct drm_minor *minor)
static const struct drm_driver malidp_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create),
+ DRM_FBDEV_DMA_DRIVER_OPS,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = malidp_debugfs_init,
#endif
.fops = &fops,
.name = "mali-dp",
.desc = "ARM Mali Display Processor driver",
- .date = "20160106",
.major = 1,
.minor = 0,
};
@@ -852,7 +853,7 @@ static int malidp_bind(struct device *dev)
if (ret)
goto register_fail;
- drm_fbdev_dma_setup(drm, 32);
+ drm_client_setup(drm, NULL);
return 0;
@@ -986,7 +987,7 @@ static const struct dev_pm_ops malidp_pm_ops = {
static struct platform_driver malidp_platform_driver = {
.probe = malidp_platform_probe,
- .remove_new = malidp_platform_remove,
+ .remove = malidp_platform_remove,
.shutdown = malidp_platform_shutdown,
.driver = {
.name = "mali-dp",
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig
index e5597d7c9ae1..b22c891a670b 100644
--- a/drivers/gpu/drm/armada/Kconfig
+++ b/drivers/gpu/drm/armada/Kconfig
@@ -2,6 +2,7 @@
config DRM_ARMADA
tristate "DRM support for Marvell Armada SoCs"
depends on DRM && HAVE_CLK && ARM && MMU
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
help
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index c78687c755a8..0900e4466ffb 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -1084,7 +1084,7 @@ MODULE_DEVICE_TABLE(platform, armada_lcd_platform_ids);
struct platform_driver armada_lcd_platform_driver = {
.probe = armada_lcd_probe,
- .remove_new = armada_lcd_remove,
+ .remove = armada_lcd_remove,
.driver = {
.name = "armada-lcd",
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index c303e8c7ff6c..3c0ff221a43b 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -16,6 +16,8 @@ struct armada_crtc;
struct armada_gem_object;
struct clk;
struct drm_display_mode;
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
static inline void
armada_updatel(uint32_t val, uint32_t mask, void __iomem *ptr)
@@ -74,10 +76,13 @@ struct armada_private {
#define drm_to_armada_dev(dev) container_of(dev, struct armada_private, drm)
#if defined(CONFIG_DRM_FBDEV_EMULATION)
-void armada_fbdev_setup(struct drm_device *dev);
+int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh,
+ struct drm_fb_helper_surface_size *sizes);
+#define ARMADA_FBDEV_DRIVER_OPS \
+ .fbdev_probe = armada_fbdev_driver_fbdev_probe
#else
-static inline void armada_fbdev_setup(struct drm_device *dev)
-{ }
+#define ARMADA_FBDEV_DRIVER_OPS \
+ .fbdev_probe = NULL
#endif
int armada_overlay_plane_create(struct drm_device *, unsigned long);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index e51ecc4f7ef4..cae25ad66c74 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -3,6 +3,7 @@
* Copyright (C) 2012 Russell King
*/
+#include <linux/aperture.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
@@ -10,7 +11,7 @@
#include <linux/of_graph.h>
#include <linux/platform_device.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
@@ -39,11 +40,11 @@ DEFINE_DRM_GEM_FOPS(armada_drm_fops);
static const struct drm_driver armada_drm_driver = {
.gem_prime_import = armada_gem_prime_import,
.dumb_create = armada_gem_dumb_create,
+ ARMADA_FBDEV_DRIVER_OPS,
.major = 1,
.minor = 0,
.name = "armada-drm",
.desc = "Armada SoC DRM",
- .date = "20120730",
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.ioctls = armada_ioctls,
.num_ioctls = ARRAY_SIZE(armada_ioctls),
@@ -91,7 +92,7 @@ static int armada_drm_bind(struct device *dev)
}
/* Remove early framebuffers */
- ret = drm_aperture_remove_framebuffers(&armada_drm_driver);
+ ret = aperture_remove_all_conflicting_devices(armada_drm_driver.name);
if (ret) {
dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n",
__func__, ret);
@@ -137,7 +138,7 @@ static int armada_drm_bind(struct device *dev)
armada_drm_debugfs_init(priv->drm.primary);
#endif
- armada_fbdev_setup(&priv->drm);
+ drm_client_setup(&priv->drm, NULL);
return 0;
@@ -248,7 +249,7 @@ MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids);
static struct platform_driver armada_drm_platform_driver = {
.probe = armada_drm_probe,
- .remove_new = armada_drm_remove,
+ .remove = armada_drm_remove,
.shutdown = armada_drm_shutdown,
.driver = {
.name = "armada-drm",
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index d223176912b6..6ee7ce04ee71 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -39,8 +39,10 @@ static const struct fb_ops armada_fb_ops = {
.fb_destroy = armada_fbdev_fb_destroy,
};
-static int armada_fbdev_create(struct drm_fb_helper *fbh,
- struct drm_fb_helper_surface_size *sizes)
+static const struct drm_fb_helper_funcs armada_fbdev_helper_funcs;
+
+int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh,
+ struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fbh->dev;
struct drm_mode_fb_cmd2 mode;
@@ -98,6 +100,7 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh,
info->fix.smem_len = obj->obj.size;
info->screen_size = obj->obj.size;
info->screen_base = ptr;
+ fbh->funcs = &armada_fbdev_helper_funcs;
fbh->fb = &dfb->fb;
drm_fb_helper_fill_info(info, fbh, sizes);
@@ -112,109 +115,3 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh,
dfb->fb.funcs->destroy(&dfb->fb);
return ret;
}
-
-static int armada_fb_probe(struct drm_fb_helper *fbh,
- struct drm_fb_helper_surface_size *sizes)
-{
- int ret = 0;
-
- if (!fbh->fb) {
- ret = armada_fbdev_create(fbh, sizes);
- if (ret == 0)
- ret = 1;
- }
- return ret;
-}
-
-static const struct drm_fb_helper_funcs armada_fb_helper_funcs = {
- .fb_probe = armada_fb_probe,
-};
-
-/*
- * Fbdev client and struct drm_client_funcs
- */
-
-static void armada_fbdev_client_unregister(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fbh = drm_fb_helper_from_client(client);
-
- if (fbh->info) {
- drm_fb_helper_unregister_info(fbh);
- } else {
- drm_client_release(&fbh->client);
- drm_fb_helper_unprepare(fbh);
- kfree(fbh);
- }
-}
-
-static int armada_fbdev_client_restore(struct drm_client_dev *client)
-{
- drm_fb_helper_lastclose(client->dev);
-
- return 0;
-}
-
-static int armada_fbdev_client_hotplug(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fbh = drm_fb_helper_from_client(client);
- struct drm_device *dev = client->dev;
- int ret;
-
- if (dev->fb_helper)
- return drm_fb_helper_hotplug_event(dev->fb_helper);
-
- ret = drm_fb_helper_init(dev, fbh);
- if (ret)
- goto err_drm_err;
-
- if (!drm_drv_uses_atomic_modeset(dev))
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(fbh);
- if (ret)
- goto err_drm_fb_helper_fini;
-
- return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(fbh);
-err_drm_err:
- drm_err(dev, "armada: Failed to setup fbdev emulation (ret=%d)\n", ret);
- return ret;
-}
-
-static const struct drm_client_funcs armada_fbdev_client_funcs = {
- .owner = THIS_MODULE,
- .unregister = armada_fbdev_client_unregister,
- .restore = armada_fbdev_client_restore,
- .hotplug = armada_fbdev_client_hotplug,
-};
-
-void armada_fbdev_setup(struct drm_device *dev)
-{
- struct drm_fb_helper *fbh;
- int ret;
-
- drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
- drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
-
- fbh = kzalloc(sizeof(*fbh), GFP_KERNEL);
- if (!fbh)
- return;
- drm_fb_helper_prepare(dev, fbh, 32, &armada_fb_helper_funcs);
-
- ret = drm_client_init(dev, &fbh->client, "fbdev", &armada_fbdev_client_funcs);
- if (ret) {
- drm_err(dev, "Failed to register client: %d\n", ret);
- goto err_drm_client_init;
- }
-
- drm_client_register(&fbh->client);
-
- return;
-
-err_drm_client_init:
- drm_fb_helper_unprepare(fbh);
- kfree(fbh);
- return;
-}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 26d10065d534..1a1680d71486 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -15,7 +15,7 @@
#include "armada_gem.h"
#include "armada_ioctlP.h"
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
{
diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig
index 8137c39b057b..6e68f20aac21 100644
--- a/drivers/gpu/drm/aspeed/Kconfig
+++ b/drivers/gpu/drm/aspeed/Kconfig
@@ -4,6 +4,7 @@ config DRM_ASPEED_GFX
depends on DRM && OF
depends on (COMPILE_TEST || ARCH_ASPEED)
depends on MMU
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DMA_CMA if HAVE_DMA_CONTIGUOUS
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index a7a6b70220eb..397e677a691c 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -13,6 +13,7 @@
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_fbdev_dma.h>
@@ -247,10 +248,10 @@ DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver aspeed_gfx_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &fops,
.name = "aspeed-gfx-drm",
.desc = "ASPEED GFX DRM",
- .date = "20180319",
.major = 1,
.minor = 0,
};
@@ -339,7 +340,7 @@ static int aspeed_gfx_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
- drm_fbdev_dma_setup(&priv->drm, 32);
+ drm_client_setup(&priv->drm, NULL);
return 0;
err_unload:
@@ -366,7 +367,7 @@ static void aspeed_gfx_shutdown(struct platform_device *pdev)
static struct platform_driver aspeed_gfx_platform_driver = {
.probe = aspeed_gfx_probe,
- .remove_new = aspeed_gfx_remove,
+ .remove = aspeed_gfx_remove,
.shutdown = aspeed_gfx_shutdown,
.driver = {
.name = "aspeed_gfx",
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index 563fa7a3b546..da0663542e8a 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -2,6 +2,7 @@
config DRM_AST
tristate "AST server chips"
depends on DRM && PCI && MMU
+ select DRM_CLIENT_SELECTION
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
select I2C
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index 00b364f9a71e..b9eb67e3fa90 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -149,28 +149,22 @@ int ast_dp_launch(struct ast_device *ast)
return 0;
}
-static bool ast_dp_power_is_on(struct ast_device *ast)
+static bool ast_dp_get_phy_sleep(struct ast_device *ast)
{
- u8 vgacre3;
+ u8 vgacre3 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xe3);
- vgacre3 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xe3);
-
- return !(vgacre3 & AST_DP_PHY_SLEEP);
+ return (vgacre3 & AST_IO_VGACRE3_DP_PHY_SLEEP);
}
-static void ast_dp_power_on_off(struct drm_device *dev, bool on)
+static void ast_dp_set_phy_sleep(struct ast_device *ast, bool sleep)
{
- struct ast_device *ast = to_ast_device(dev);
- // Read and Turn off DP PHY sleep
- u8 bE3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, AST_DP_VIDEO_ENABLE);
-
- // Turn on DP PHY sleep
- if (!on)
- bE3 |= AST_DP_PHY_SLEEP;
+ u8 vgacre3 = 0x00;
- // DP Power on/off
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_PHY_SLEEP, bE3);
+ if (sleep)
+ vgacre3 |= AST_IO_VGACRE3_DP_PHY_SLEEP;
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xe3, (u8)~AST_IO_VGACRE3_DP_PHY_SLEEP,
+ vgacre3);
msleep(50);
}
@@ -192,23 +186,39 @@ static void ast_dp_link_training(struct ast_device *ast)
drm_err(dev, "Link training failed\n");
}
-static void ast_dp_set_on_off(struct drm_device *dev, bool on)
+static bool __ast_dp_wait_enable(struct ast_device *ast, bool enabled)
{
- struct ast_device *ast = to_ast_device(dev);
- u8 video_on_off = on;
- u32 i = 0;
-
- // Video On/Off
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
-
- video_on_off <<= 4;
- while (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xDF,
- ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
- // wait 1 ms
- mdelay(1);
- if (++i > 200)
- break;
+ u8 vgacrdf_test = 0x00;
+ u8 vgacrdf;
+ unsigned int i;
+
+ if (enabled)
+ vgacrdf_test |= AST_IO_VGACRDF_DP_VIDEO_ENABLE;
+
+ for (i = 0; i < 1000; ++i) {
+ if (i)
+ mdelay(1);
+ vgacrdf = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xdf,
+ AST_IO_VGACRDF_DP_VIDEO_ENABLE);
+ if (vgacrdf == vgacrdf_test)
+ return true;
}
+
+ return false;
+}
+
+static void ast_dp_set_enable(struct ast_device *ast, bool enabled)
+{
+ struct drm_device *dev = &ast->base;
+ u8 vgacre3 = 0x00;
+
+ if (enabled)
+ vgacre3 |= AST_IO_VGACRE3_DP_VIDEO_ENABLE;
+
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xe3, (u8)~AST_IO_VGACRE3_DP_VIDEO_ENABLE,
+ vgacre3);
+
+ drm_WARN_ON(dev, !__ast_dp_wait_enable(ast, enabled));
}
static void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode)
@@ -317,26 +327,25 @@ static void ast_astdp_encoder_helper_atomic_mode_set(struct drm_encoder *encoder
static void ast_astdp_encoder_helper_atomic_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
- struct drm_device *dev = encoder->dev;
- struct ast_device *ast = to_ast_device(dev);
+ struct ast_device *ast = to_ast_device(encoder->dev);
struct ast_connector *ast_connector = &ast->output.astdp.connector;
if (ast_connector->physical_status == connector_status_connected) {
- ast_dp_power_on_off(dev, AST_DP_POWER_ON);
+ ast_dp_set_phy_sleep(ast, false);
ast_dp_link_training(ast);
ast_wait_for_vretrace(ast);
- ast_dp_set_on_off(dev, 1);
+ ast_dp_set_enable(ast, true);
}
}
static void ast_astdp_encoder_helper_atomic_disable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
- struct drm_device *dev = encoder->dev;
+ struct ast_device *ast = to_ast_device(encoder->dev);
- ast_dp_set_on_off(dev, 0);
- ast_dp_power_on_off(dev, AST_DP_POWER_OFF);
+ ast_dp_set_enable(ast, false);
+ ast_dp_set_phy_sleep(ast, true);
}
static const struct drm_encoder_helper_funcs ast_astdp_encoder_helper_funcs = {
@@ -383,22 +392,21 @@ static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector
bool force)
{
struct ast_connector *ast_connector = to_ast_connector(connector);
- struct drm_device *dev = connector->dev;
struct ast_device *ast = to_ast_device(connector->dev);
enum drm_connector_status status = connector_status_disconnected;
- bool power_is_on;
+ bool phy_sleep;
mutex_lock(&ast->modeset_lock);
- power_is_on = ast_dp_power_is_on(ast);
- if (!power_is_on)
- ast_dp_power_on_off(dev, true);
+ phy_sleep = ast_dp_get_phy_sleep(ast);
+ if (phy_sleep)
+ ast_dp_set_phy_sleep(ast, false);
if (ast_astdp_is_connected(ast))
status = connector_status_connected;
- if (!power_is_on && status == connector_status_disconnected)
- ast_dp_power_on_off(dev, false);
+ if (phy_sleep && status == connector_status_disconnected)
+ ast_dp_set_phy_sleep(ast, true);
mutex_unlock(&ast->modeset_lock);
@@ -414,6 +422,10 @@ static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs
.detect_ctx = ast_astdp_connector_helper_detect_ctx,
};
+/*
+ * Output
+ */
+
static const struct drm_connector_funcs ast_astdp_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -422,34 +434,18 @@ static const struct drm_connector_funcs ast_astdp_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector *connector)
-{
- int ret;
-
- ret = drm_connector_init(dev, connector, &ast_astdp_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
- if (ret)
- return ret;
-
- drm_connector_helper_add(connector, &ast_astdp_connector_helper_funcs);
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
-
- return 0;
-}
-
int ast_astdp_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
- struct drm_encoder *encoder = &ast->output.astdp.encoder;
- struct ast_connector *ast_connector = &ast->output.astdp.connector;
- struct drm_connector *connector = &ast_connector->base;
+ struct drm_encoder *encoder;
+ struct ast_connector *ast_connector;
+ struct drm_connector *connector;
int ret;
+ /* encoder */
+
+ encoder = &ast->output.astdp.encoder;
ret = drm_encoder_init(dev, encoder, &ast_astdp_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
if (ret)
@@ -458,9 +454,20 @@ int ast_astdp_output_init(struct ast_device *ast)
encoder->possible_crtcs = drm_crtc_mask(crtc);
- ret = ast_astdp_connector_init(dev, connector);
+ /* connector */
+
+ ast_connector = &ast->output.astdp.connector;
+ connector = &ast_connector->base;
+ ret = drm_connector_init(dev, connector, &ast_astdp_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
if (ret)
return ret;
+ drm_connector_helper_add(connector, &ast_astdp_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
ast_connector->physical_status = connector->status;
ret = drm_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index e4c636f45082..9e19d8c17730 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -21,9 +21,9 @@ static void ast_release_firmware(void *data)
ast->dp501_fw = NULL;
}
-static int ast_load_dp501_microcode(struct drm_device *dev)
+static int ast_load_dp501_microcode(struct ast_device *ast)
{
- struct ast_device *ast = to_ast_device(dev);
+ struct drm_device *dev = &ast->base;
int ret;
ret = request_firmware(&ast->dp501_fw, "ast_dp501_fw.bin", dev->dev);
@@ -109,10 +109,10 @@ static bool wait_fw_ready(struct ast_device *ast)
}
#endif
-static bool ast_write_cmd(struct drm_device *dev, u8 data)
+static bool ast_write_cmd(struct ast_device *ast, u8 data)
{
- struct ast_device *ast = to_ast_device(dev);
int retry = 0;
+
if (wait_nack(ast)) {
send_nack(ast);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x9a, 0x00, data);
@@ -131,10 +131,8 @@ static bool ast_write_cmd(struct drm_device *dev, u8 data)
return false;
}
-static bool ast_write_data(struct drm_device *dev, u8 data)
+static bool ast_write_data(struct ast_device *ast, u8 data)
{
- struct ast_device *ast = to_ast_device(dev);
-
if (wait_nack(ast)) {
send_nack(ast);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x9a, 0x00, data);
@@ -175,10 +173,10 @@ static void clear_cmd(struct ast_device *ast)
}
#endif
-static void ast_set_dp501_video_output(struct drm_device *dev, u8 mode)
+static void ast_set_dp501_video_output(struct ast_device *ast, u8 mode)
{
- ast_write_cmd(dev, 0x40);
- ast_write_data(dev, mode);
+ ast_write_cmd(ast, 0x40);
+ ast_write_data(ast, mode);
msleep(10);
}
@@ -188,9 +186,8 @@ static u32 get_fw_base(struct ast_device *ast)
return ast_mindwm(ast, 0x1e6e2104) & 0x7fffffff;
}
-bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
+bool ast_backup_fw(struct ast_device *ast, u8 *addr, u32 size)
{
- struct ast_device *ast = to_ast_device(dev);
u32 i, data;
u32 boot_address;
@@ -207,9 +204,8 @@ bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
return false;
}
-static bool ast_launch_m68k(struct drm_device *dev)
+static bool ast_launch_m68k(struct ast_device *ast)
{
- struct ast_device *ast = to_ast_device(dev);
u32 i, data, len = 0;
u32 boot_address;
u8 *fw_addr = NULL;
@@ -226,7 +222,7 @@ static bool ast_launch_m68k(struct drm_device *dev)
len = 32*1024;
} else {
if (!ast->dp501_fw &&
- ast_load_dp501_microcode(dev) < 0)
+ ast_load_dp501_microcode(ast) < 0)
return false;
fw_addr = (u8 *)ast->dp501_fw->data;
@@ -348,9 +344,8 @@ static int ast_dp512_read_edid_block(void *data, u8 *buf, unsigned int block, si
return true;
}
-static bool ast_init_dvo(struct drm_device *dev)
+static bool ast_init_dvo(struct ast_device *ast)
{
- struct ast_device *ast = to_ast_device(dev);
u8 jreg;
u32 data;
ast_write32(ast, 0xf004, 0x1e6e0000);
@@ -421,9 +416,8 @@ static bool ast_init_dvo(struct drm_device *dev)
}
-static void ast_init_analog(struct drm_device *dev)
+static void ast_init_analog(struct ast_device *ast)
{
- struct ast_device *ast = to_ast_device(dev);
u32 data;
/*
@@ -448,28 +442,28 @@ static void ast_init_analog(struct drm_device *dev)
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x00);
}
-void ast_init_3rdtx(struct drm_device *dev)
+void ast_init_3rdtx(struct ast_device *ast)
{
- struct ast_device *ast = to_ast_device(dev);
- u8 jreg;
+ u8 vgacrd1;
if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast)) {
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff);
- switch (jreg & 0x0e) {
- case 0x04:
- ast_init_dvo(dev);
+ vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1,
+ AST_IO_VGACRD1_TX_TYPE_MASK);
+ switch (vgacrd1) {
+ case AST_IO_VGACRD1_TX_SIL164_VBIOS:
+ ast_init_dvo(ast);
break;
- case 0x08:
- ast_launch_m68k(dev);
+ case AST_IO_VGACRD1_TX_DP501_VBIOS:
+ ast_launch_m68k(ast);
break;
- case 0x0c:
- ast_init_dvo(dev);
+ case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW:
+ ast_init_dvo(ast);
break;
default:
- if (ast->tx_chip_types & BIT(AST_TX_SIL164))
- ast_init_dvo(dev);
+ if (ast->tx_chip == AST_TX_SIL164)
+ ast_init_dvo(ast);
else
- ast_init_analog(dev);
+ ast_init_analog(ast);
}
}
}
@@ -485,17 +479,17 @@ static const struct drm_encoder_funcs ast_dp501_encoder_funcs = {
static void ast_dp501_encoder_helper_atomic_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
- struct drm_device *dev = encoder->dev;
+ struct ast_device *ast = to_ast_device(encoder->dev);
- ast_set_dp501_video_output(dev, 1);
+ ast_set_dp501_video_output(ast, 1);
}
static void ast_dp501_encoder_helper_atomic_disable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
- struct drm_device *dev = encoder->dev;
+ struct ast_device *ast = to_ast_device(encoder->dev);
- ast_set_dp501_video_output(dev, 0);
+ ast_set_dp501_video_output(ast, 0);
}
static const struct drm_encoder_helper_funcs ast_dp501_encoder_helper_funcs = {
@@ -567,34 +561,22 @@ static const struct drm_connector_funcs ast_dp501_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector *connector)
-{
- int ret;
-
- ret = drm_connector_init(dev, connector, &ast_dp501_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
- if (ret)
- return ret;
-
- drm_connector_helper_add(connector, &ast_dp501_connector_helper_funcs);
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
-
- return 0;
-}
+/*
+ * Output
+ */
int ast_dp501_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
- struct drm_encoder *encoder = &ast->output.dp501.encoder;
- struct ast_connector *ast_connector = &ast->output.dp501.connector;
- struct drm_connector *connector = &ast_connector->base;
+ struct drm_encoder *encoder;
+ struct ast_connector *ast_connector;
+ struct drm_connector *connector;
int ret;
+ /* encoder */
+
+ encoder = &ast->output.dp501.encoder;
ret = drm_encoder_init(dev, encoder, &ast_dp501_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
if (ret)
@@ -603,9 +585,20 @@ int ast_dp501_output_init(struct ast_device *ast)
encoder->possible_crtcs = drm_crtc_mask(crtc);
- ret = ast_dp501_connector_init(dev, connector);
+ /* connector */
+
+ ast_connector = &ast->output.dp501.connector;
+ connector = &ast_connector->base;
+ ret = drm_connector_init(dev, connector, &ast_dp501_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
if (ret)
return ret;
+ drm_connector_helper_add(connector, &ast_dp501_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
ast_connector->physical_status = connector->status;
ret = drm_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 3a908bb015fe..ff3bcdd1cff2 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -26,11 +26,12 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
@@ -59,12 +60,12 @@ static const struct drm_driver ast_driver = {
.fops = &ast_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- DRM_GEM_SHMEM_DRIVER_OPS
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
};
/*
@@ -279,7 +280,7 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct drm_device *drm;
bool need_post = false;
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &ast_driver);
+ ret = aperture_remove_conflicting_pci_devices(pdev, ast_driver.name);
if (ret)
return ret;
@@ -360,7 +361,7 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- drm_fbdev_shmem_setup(drm, 32);
+ drm_client_setup(drm, NULL);
return 0;
}
@@ -396,7 +397,7 @@ static int ast_drm_thaw(struct drm_device *dev)
ast_enable_vga(ast->ioregs);
ast_open_key(ast->ioregs);
ast_enable_mmio(dev->dev, ast->ioregs);
- ast_post_gpu(dev);
+ ast_post_gpu(ast);
return drm_mode_config_helper_resume(dev);
}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 91fe07cf7b07..6b4305ac07d4 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -43,7 +43,6 @@
#define DRIVER_NAME "ast"
#define DRIVER_DESC "AST"
-#define DRIVER_DATE "20120228"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 1
@@ -91,11 +90,6 @@ enum ast_tx_chip {
AST_TX_ASTDP,
};
-#define AST_TX_NONE_BIT BIT(AST_TX_NONE)
-#define AST_TX_SIL164_BIT BIT(AST_TX_SIL164)
-#define AST_TX_DP501_BIT BIT(AST_TX_DP501)
-#define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP)
-
enum ast_config_mode {
ast_use_p2a,
ast_use_dt,
@@ -187,10 +181,12 @@ struct ast_device {
struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */
+ enum ast_tx_chip tx_chip;
+
struct ast_plane primary_plane;
struct ast_plane cursor_plane;
struct drm_crtc crtc;
- struct {
+ union {
struct {
struct drm_encoder encoder;
struct ast_connector connector;
@@ -211,7 +207,6 @@ struct ast_device {
bool support_wide_screen;
- unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */
u8 *dp501_fw_addr;
const struct firmware *dp501_fw; /* dp501 fw */
};
@@ -407,9 +402,6 @@ int ast_mode_config_init(struct ast_device *ast);
#define AST_DP501_LINKRATE 0xf014
#define AST_DP501_EDID_DATA 0xf020
-#define AST_DP_POWER_ON true
-#define AST_DP_POWER_OFF false
-
/*
* ASTDP resoultion table:
* EX: ASTDP_A_B_C:
@@ -453,7 +445,7 @@ int ast_mode_config_init(struct ast_device *ast);
int ast_mm_init(struct ast_device *ast);
/* ast post */
-void ast_post_gpu(struct drm_device *dev);
+void ast_post_gpu(struct ast_device *ast);
u32 ast_mindwm(struct ast_device *ast, u32 r);
void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
void ast_patch_ahb_2500(void __iomem *regs);
@@ -462,8 +454,8 @@ int ast_vga_output_init(struct ast_device *ast);
int ast_sil164_output_init(struct ast_device *ast);
/* ast dp501 */
-bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
-void ast_init_3rdtx(struct drm_device *dev);
+bool ast_backup_fw(struct ast_device *ast, u8 *addr, u32 size);
+void ast_init_3rdtx(struct ast_device *ast);
int ast_dp501_output_init(struct ast_device *ast);
/* aspeed DP */
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index d836f2a4f9f3..bc37c65305d4 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -68,11 +68,33 @@ static void ast_detect_widescreen(struct ast_device *ast)
static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
{
+ static const char * const info_str[] = {
+ "analog VGA",
+ "Sil164 TMDS transmitter",
+ "DP501 DisplayPort transmitter",
+ "ASPEED DisplayPort transmitter",
+ };
+
struct drm_device *dev = &ast->base;
- u8 jreg;
+ u8 jreg, vgacrd1;
+
+ /*
+ * Several of the listed TX chips are not explicitly supported
+ * by the ast driver. If these exist in real-world devices, they
+ * are most likely reported as VGA or SIL164 outputs. We warn here
+ * to get bug reports for these devices. If none come in for some
+ * time, we can begin to fail device probing on these values.
+ */
+ vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, AST_IO_VGACRD1_TX_TYPE_MASK);
+ drm_WARN(dev, vgacrd1 == AST_IO_VGACRD1_TX_ITE66121_VBIOS,
+ "ITE IT66121 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
+ drm_WARN(dev, vgacrd1 == AST_IO_VGACRD1_TX_CH7003_VBIOS,
+ "Chrontel CH7003 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
+ drm_WARN(dev, vgacrd1 == AST_IO_VGACRD1_TX_ANX9807_VBIOS,
+ "Analogix ANX9807 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
/* Check 3rd Tx option (digital output afaik) */
- ast->tx_chip_types |= AST_TX_NONE_BIT;
+ ast->tx_chip = AST_TX_NONE;
/*
* VGACRA3 Enhanced Color Mode Register, check if DVO is already
@@ -85,7 +107,7 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
if (!need_post) {
jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff);
if (jreg & 0x80)
- ast->tx_chip_types = AST_TX_SIL164_BIT;
+ ast->tx_chip = AST_TX_SIL164;
}
if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast)) {
@@ -94,49 +116,42 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
* the SOC scratch register #1 bits 11:8 (interestingly marked
* as "reserved" in the spec)
*/
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff);
+ jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1,
+ AST_IO_VGACRD1_TX_TYPE_MASK);
switch (jreg) {
- case 0x04:
- ast->tx_chip_types = AST_TX_SIL164_BIT;
+ case AST_IO_VGACRD1_TX_SIL164_VBIOS:
+ ast->tx_chip = AST_TX_SIL164;
break;
- case 0x08:
+ case AST_IO_VGACRD1_TX_DP501_VBIOS:
ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL);
if (ast->dp501_fw_addr) {
/* backup firmware */
- if (ast_backup_fw(dev, ast->dp501_fw_addr, 32*1024)) {
+ if (ast_backup_fw(ast, ast->dp501_fw_addr, 32*1024)) {
drmm_kfree(dev, ast->dp501_fw_addr);
ast->dp501_fw_addr = NULL;
}
}
fallthrough;
- case 0x0c:
- ast->tx_chip_types = AST_TX_DP501_BIT;
+ case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW:
+ ast->tx_chip = AST_TX_DP501;
}
} else if (IS_AST_GEN7(ast)) {
- if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xD1, TX_TYPE_MASK) ==
- ASTDP_DPMCU_TX) {
+ if (ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, AST_IO_VGACRD1_TX_TYPE_MASK) ==
+ AST_IO_VGACRD1_TX_ASTDP) {
int ret = ast_dp_launch(ast);
if (!ret)
- ast->tx_chip_types = AST_TX_ASTDP_BIT;
+ ast->tx_chip = AST_TX_ASTDP;
}
}
- /* Print stuff for diagnostic purposes */
- if (ast->tx_chip_types & AST_TX_NONE_BIT)
- drm_info(dev, "Using analog VGA\n");
- if (ast->tx_chip_types & AST_TX_SIL164_BIT)
- drm_info(dev, "Using Sil164 TMDS transmitter\n");
- if (ast->tx_chip_types & AST_TX_DP501_BIT)
- drm_info(dev, "Using DP501 DisplayPort transmitter\n");
- if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
- drm_info(dev, "Using ASPEED DisplayPort transmitter\n");
+ drm_info(dev, "Using %s\n", info_str[ast->tx_chip]);
}
-static int ast_get_dram_info(struct drm_device *dev)
+static int ast_get_dram_info(struct ast_device *ast)
{
+ struct drm_device *dev = &ast->base;
struct device_node *np = dev->dev->of_node;
- struct ast_device *ast = to_ast_device(dev);
uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
uint32_t denum, num, div, ref_pll, dsel;
@@ -278,7 +293,7 @@ struct drm_device *ast_device_create(struct pci_dev *pdev,
ast_detect_widescreen(ast);
ast_detect_tx_chip(ast, need_post);
- ret = ast_get_dram_info(dev);
+ ret = ast_get_dram_info(ast);
if (ret)
return ERR_PTR(ret);
@@ -286,7 +301,7 @@ struct drm_device *ast_device_create(struct pci_dev *pdev,
ast->mclk, ast->dram_type, ast->dram_bus_width);
if (need_post)
- ast_post_gpu(dev);
+ ast_post_gpu(ast);
ret = ast_mm_init(ast);
if (ret)
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index ed496fb32bf3..9d5321c81e68 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -1287,9 +1287,9 @@ static const struct drm_crtc_funcs ast_crtc_funcs = {
.atomic_destroy_state = ast_crtc_atomic_destroy_state,
};
-static int ast_crtc_init(struct drm_device *dev)
+static int ast_crtc_init(struct ast_device *ast)
{
- struct ast_device *ast = to_ast_device(dev);
+ struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
int ret;
@@ -1396,28 +1396,26 @@ int ast_mode_config_init(struct ast_device *ast)
if (ret)
return ret;
- ast_crtc_init(dev);
+ ret = ast_crtc_init(ast);
+ if (ret)
+ return ret;
- if (ast->tx_chip_types & AST_TX_NONE_BIT) {
+ switch (ast->tx_chip) {
+ case AST_TX_NONE:
ret = ast_vga_output_init(ast);
- if (ret)
- return ret;
- }
- if (ast->tx_chip_types & AST_TX_SIL164_BIT) {
+ break;
+ case AST_TX_SIL164:
ret = ast_sil164_output_init(ast);
- if (ret)
- return ret;
- }
- if (ast->tx_chip_types & AST_TX_DP501_BIT) {
+ break;
+ case AST_TX_DP501:
ret = ast_dp501_output_init(ast);
- if (ret)
- return ret;
- }
- if (ast->tx_chip_types & AST_TX_ASTDP_BIT) {
+ break;
+ case AST_TX_ASTDP:
ret = ast_astdp_output_init(ast);
- if (ret)
- return ret;
+ break;
}
+ if (ret)
+ return ret;
drm_mode_config_reset(dev);
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 65755798ab94..364030f97571 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -34,16 +34,14 @@
#include "ast_dram_tables.h"
#include "ast_drv.h"
-static void ast_post_chip_2300(struct drm_device *dev);
-static void ast_post_chip_2500(struct drm_device *dev);
+static void ast_post_chip_2300(struct ast_device *ast);
+static void ast_post_chip_2500(struct ast_device *ast);
static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
-static void
-ast_set_def_ext_reg(struct drm_device *dev)
+static void ast_set_def_ext_reg(struct ast_device *ast)
{
- struct ast_device *ast = to_ast_device(dev);
u8 i, index, reg;
const u8 *ext_reg_info;
@@ -252,9 +250,8 @@ cbr_start:
-static void ast_init_dram_reg(struct drm_device *dev)
+static void ast_init_dram_reg(struct ast_device *ast)
{
- struct ast_device *ast = to_ast_device(dev);
u8 j;
u32 data, temp, i;
const struct ast_dramstruct *dram_reg_info;
@@ -343,26 +340,24 @@ static void ast_init_dram_reg(struct drm_device *dev)
} while ((j & 0x40) == 0);
}
-void ast_post_gpu(struct drm_device *dev)
+void ast_post_gpu(struct ast_device *ast)
{
- struct ast_device *ast = to_ast_device(dev);
-
- ast_set_def_ext_reg(dev);
+ ast_set_def_ext_reg(ast);
if (IS_AST_GEN7(ast)) {
- if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
+ if (ast->tx_chip == AST_TX_ASTDP)
ast_dp_launch(ast);
} else if (ast->config_mode == ast_use_p2a) {
if (IS_AST_GEN6(ast))
- ast_post_chip_2500(dev);
+ ast_post_chip_2500(ast);
else if (IS_AST_GEN5(ast) || IS_AST_GEN4(ast))
- ast_post_chip_2300(dev);
+ ast_post_chip_2300(ast);
else
- ast_init_dram_reg(dev);
+ ast_init_dram_reg(ast);
- ast_init_3rdtx(dev);
+ ast_init_3rdtx(ast);
} else {
- if (ast->tx_chip_types & AST_TX_SIL164_BIT)
+ if (ast->tx_chip == AST_TX_SIL164)
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); /* Enable DVO */
}
}
@@ -1569,9 +1564,8 @@ ddr2_init_start:
}
-static void ast_post_chip_2300(struct drm_device *dev)
+static void ast_post_chip_2300(struct ast_device *ast)
{
- struct ast_device *ast = to_ast_device(dev);
struct ast2300_dram_param param;
u32 temp;
u8 reg;
@@ -2038,9 +2032,9 @@ void ast_patch_ahb_2500(void __iomem *regs)
__ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */
}
-void ast_post_chip_2500(struct drm_device *dev)
+void ast_post_chip_2500(struct ast_device *ast)
{
- struct ast_device *ast = to_ast_device(dev);
+ struct drm_device *dev = &ast->base;
u32 temp;
u8 reg;
diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h
index 040961cc1a19..2aadf07d135a 100644
--- a/drivers/gpu/drm/ast/ast_reg.h
+++ b/drivers/gpu/drm/ast/ast_reg.h
@@ -37,28 +37,29 @@
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
-#define AST_IO_VGACRD1_MCU_FW_EXECUTING BIT(5)
+#define AST_IO_VGACRD1_MCU_FW_EXECUTING BIT(5)
+/* Display Transmitter Type */
+#define AST_IO_VGACRD1_TX_TYPE_MASK GENMASK(3, 1)
+#define AST_IO_VGACRD1_NO_TX 0x00
+#define AST_IO_VGACRD1_TX_ITE66121_VBIOS 0x02
+#define AST_IO_VGACRD1_TX_SIL164_VBIOS 0x04
+#define AST_IO_VGACRD1_TX_CH7003_VBIOS 0x06
+#define AST_IO_VGACRD1_TX_DP501_VBIOS 0x08
+#define AST_IO_VGACRD1_TX_ANX9807_VBIOS 0x0a
+#define AST_IO_VGACRD1_TX_FW_EMBEDDED_FW 0x0c /* special case of DP501 */
+#define AST_IO_VGACRD1_TX_ASTDP 0x0e
+
#define AST_IO_VGACRD7_EDID_VALID_FLAG BIT(0)
#define AST_IO_VGACRDC_LINK_SUCCESS BIT(0)
#define AST_IO_VGACRDF_HPD BIT(0)
+#define AST_IO_VGACRDF_DP_VIDEO_ENABLE BIT(4) /* mirrors AST_IO_VGACRE3_DP_VIDEO_ENABLE */
+#define AST_IO_VGACRE3_DP_VIDEO_ENABLE BIT(0)
+#define AST_IO_VGACRE3_DP_PHY_SLEEP BIT(4)
#define AST_IO_VGACRE5_EDID_READ_DONE BIT(0)
#define AST_IO_VGAIR1_R (0x5A)
#define AST_IO_VGAIR1_VREFRESH BIT(3)
-/*
- * Display Transmitter Type
- */
-
-#define TX_TYPE_MASK GENMASK(3, 1)
-#define NO_TX (0 << 1)
-#define ITE66121_VBIOS_TX (1 << 1)
-#define SI164_VBIOS_TX (2 << 1)
-#define CH7003_VBIOS_TX (3 << 1)
-#define DP501_VBIOS_TX (4 << 1)
-#define ANX9807_VBIOS_TX (5 << 1)
-#define TX_FW_EMBEDDED_FW_TX (6 << 1)
-#define ASTDP_DPMCU_TX (7 << 1)
#define AST_VRAM_INIT_STATUS_MASK GENMASK(7, 6)
//#define AST_VRAM_INIT_BY_BMC BIT(7)
@@ -68,18 +69,6 @@
* AST DisplayPort
*/
-/* Define for Soc scratched reg used on ASTDP */
-#define AST_DP_PHY_SLEEP BIT(4)
-#define AST_DP_VIDEO_ENABLE BIT(0)
-
-/*
- * CRDF[b4]: Mirror of AST_DP_VIDEO_ENABLE
- * Precondition: A. ~AST_DP_PHY_SLEEP &&
- * B. DP_HPD &&
- * C. DP_LINK_SUCCESS
- */
-#define ASTDP_MIRROR_VIDEO_ENABLE BIT(4)
-
/*
* ASTDP setmode registers:
* CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp)
diff --git a/drivers/gpu/drm/ast/ast_sil164.c b/drivers/gpu/drm/ast/ast_sil164.c
index 496c7120e515..be01254dd48a 100644
--- a/drivers/gpu/drm/ast/ast_sil164.c
+++ b/drivers/gpu/drm/ast/ast_sil164.c
@@ -29,6 +29,8 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector
if (ast_connector->physical_status == connector_status_connected) {
count = drm_connector_helper_get_modes(connector);
} else {
+ drm_edid_connector_update(connector, NULL);
+
/*
* There's no EDID data without a connected monitor. Set BMC-
* compatible modes in this case. The XGA default resolution
@@ -71,52 +73,49 @@ static const struct drm_connector_funcs ast_sil164_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int ast_sil164_connector_init(struct drm_device *dev, struct drm_connector *connector)
+/*
+ * Output
+ */
+
+int ast_sil164_output_init(struct ast_device *ast)
{
- struct ast_device *ast = to_ast_device(dev);
+ struct drm_device *dev = &ast->base;
+ struct drm_crtc *crtc = &ast->crtc;
struct i2c_adapter *ddc;
+ struct drm_encoder *encoder;
+ struct ast_connector *ast_connector;
+ struct drm_connector *connector;
int ret;
+ /* DDC */
+
ddc = ast_ddc_create(ast);
- if (IS_ERR(ddc)) {
- ret = PTR_ERR(ddc);
- drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret);
+ if (IS_ERR(ddc))
+ return PTR_ERR(ddc);
+
+ /* encoder */
+
+ encoder = &ast->output.sil164.encoder;
+ ret = drm_encoder_init(dev, encoder, &ast_sil164_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
return ret;
- }
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /* connector */
+ ast_connector = &ast->output.sil164.connector;
+ connector = &ast_connector->base;
ret = drm_connector_init_with_ddc(dev, connector, &ast_sil164_connector_funcs,
DRM_MODE_CONNECTOR_DVII, ddc);
if (ret)
return ret;
-
drm_connector_helper_add(connector, &ast_sil164_connector_helper_funcs);
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
-
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
- return 0;
-}
-
-int ast_sil164_output_init(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct drm_crtc *crtc = &ast->crtc;
- struct drm_encoder *encoder = &ast->output.sil164.encoder;
- struct ast_connector *ast_connector = &ast->output.sil164.connector;
- struct drm_connector *connector = &ast_connector->base;
- int ret;
-
- ret = drm_encoder_init(dev, encoder, &ast_sil164_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- if (ret)
- return ret;
- encoder->possible_crtcs = drm_crtc_mask(crtc);
-
- ret = ast_sil164_connector_init(dev, connector);
- if (ret)
- return ret;
ast_connector->physical_status = connector->status;
ret = drm_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/ast/ast_vga.c b/drivers/gpu/drm/ast/ast_vga.c
index 3e815da43fbd..abe0fff8485c 100644
--- a/drivers/gpu/drm/ast/ast_vga.c
+++ b/drivers/gpu/drm/ast/ast_vga.c
@@ -29,6 +29,8 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
if (ast_connector->physical_status == connector_status_connected) {
count = drm_connector_helper_get_modes(connector);
} else {
+ drm_edid_connector_update(connector, NULL);
+
/*
* There's no EDID data without a connected monitor. Set BMC-
* compatible modes in this case. The XGA default resolution
@@ -71,52 +73,49 @@ static const struct drm_connector_funcs ast_vga_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int ast_vga_connector_init(struct drm_device *dev, struct drm_connector *connector)
+/*
+ * Output
+ */
+
+int ast_vga_output_init(struct ast_device *ast)
{
- struct ast_device *ast = to_ast_device(dev);
+ struct drm_device *dev = &ast->base;
+ struct drm_crtc *crtc = &ast->crtc;
struct i2c_adapter *ddc;
+ struct drm_encoder *encoder;
+ struct ast_connector *ast_connector;
+ struct drm_connector *connector;
int ret;
+ /* DDC */
+
ddc = ast_ddc_create(ast);
- if (IS_ERR(ddc)) {
- ret = PTR_ERR(ddc);
- drm_err(dev, "failed to add DDC bus for connector; ret=%d\n", ret);
+ if (IS_ERR(ddc))
+ return PTR_ERR(ddc);
+
+ /* encoder */
+
+ encoder = &ast->output.vga.encoder;
+ ret = drm_encoder_init(dev, encoder, &ast_vga_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret)
return ret;
- }
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /* connector */
+ ast_connector = &ast->output.vga.connector;
+ connector = &ast_connector->base;
ret = drm_connector_init_with_ddc(dev, connector, &ast_vga_connector_funcs,
DRM_MODE_CONNECTOR_VGA, ddc);
if (ret)
return ret;
-
drm_connector_helper_add(connector, &ast_vga_connector_helper_funcs);
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
-
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
- return 0;
-}
-
-int ast_vga_output_init(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct drm_crtc *crtc = &ast->crtc;
- struct drm_encoder *encoder = &ast->output.vga.encoder;
- struct ast_connector *ast_connector = &ast->output.vga.connector;
- struct drm_connector *connector = &ast_connector->base;
- int ret;
-
- ret = drm_encoder_init(dev, encoder, &ast_vga_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- if (ret)
- return ret;
- encoder->possible_crtcs = drm_crtc_mask(crtc);
-
- ret = ast_vga_connector_init(dev, connector);
- if (ret)
- return ret;
ast_connector->physical_status = connector->status;
ret = drm_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/atmel-hlcdc/Kconfig b/drivers/gpu/drm/atmel-hlcdc/Kconfig
index 945f3aa7bb24..f8b9c91907d8 100644
--- a/drivers/gpu/drm/atmel-hlcdc/Kconfig
+++ b/drivers/gpu/drm/atmel-hlcdc/Kconfig
@@ -2,6 +2,7 @@
config DRM_ATMEL_HLCDC
tristate "DRM Support for ATMEL HLCDC Display Controller"
depends on DRM && OF && COMMON_CLK && ((MFD_ATMEL_HLCDC && ARM) || COMPILE_TEST)
+ select DRM_CLIENT_SELECTION
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 9ce429f889ca..fa8ad94e431a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -16,10 +16,12 @@
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
@@ -840,10 +842,10 @@ DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver atmel_hlcdc_dc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &fops,
.name = "atmel-hlcdc",
.desc = "Atmel HLCD Controller DRM",
- .date = "20141504",
.major = 1,
.minor = 0,
};
@@ -865,7 +867,7 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
- drm_fbdev_dma_setup(ddev, 24);
+ drm_client_setup_with_fourcc(ddev, DRM_FORMAT_RGB888);
return 0;
@@ -934,7 +936,7 @@ static const struct of_device_id atmel_hlcdc_dc_of_match[] = {
static struct platform_driver atmel_hlcdc_dc_platform_driver = {
.probe = atmel_hlcdc_dc_drm_probe,
- .remove_new = atmel_hlcdc_dc_drm_remove,
+ .remove = atmel_hlcdc_dc_drm_remove,
.shutdown = atmel_hlcdc_dc_drm_shutdown,
.driver = {
.name = "atmel-hlcdc-display-controller",
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 3eb955333c80..6b4664d91faa 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -90,6 +90,17 @@ config DRM_FSL_LDB
help
Support for i.MX8MP DPI-to-LVDS on-SoC encoder.
+config DRM_ITE_IT6263
+ tristate "ITE IT6263 LVDS/HDMI bridge"
+ depends on OF
+ select DRM_DISPLAY_HDMI_STATE_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ help
+ ITE IT6263 LVDS to HDMI bridge chip driver.
+
config DRM_ITE_IT6505
tristate "ITE IT6505 DisplayPort bridge"
depends on OF
@@ -140,6 +151,8 @@ config DRM_LONTIUM_LT9611
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
select DRM_MIPI_DSI
+ select DRM_DISPLAY_HELPER
+ select DRM_DISPLAY_HDMI_STATE_HELPER
select REGMAP_I2C
help
Driver for Lontium LT9611 DSI to HDMI bridge
@@ -368,6 +381,13 @@ config DRM_TI_DLPC3433
It supports up to 720p resolution with 60 and 120 Hz refresh
rates.
+config DRM_TI_TDP158
+ tristate "TI TDP158 HDMI/TMDS bridge"
+ depends on OF
+ select DRM_PANEL_BRIDGE
+ help
+ Texas Instruments TDP158 HDMI/TMDS Bridge driver
+
config DRM_TI_TFP410
tristate "TI TFP410 DVI/HDMI bridge"
depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 7df87b582dca..97304b429a53 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o
obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
obj-$(CONFIG_DRM_FSL_LDB) += fsl-ldb.o
+obj-$(CONFIG_DRM_ITE_IT6263) += ite-it6263.o
obj-$(CONFIG_DRM_ITE_IT6505) += ite-it6505.o
obj-$(CONFIG_DRM_LONTIUM_LT8912B) += lontium-lt8912b.o
obj-$(CONFIG_DRM_LONTIUM_LT9211) += lontium-lt9211.o
@@ -32,6 +33,7 @@ obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
obj-$(CONFIG_DRM_TI_DLPC3433) += ti-dlpc3433.o
obj-$(CONFIG_DRM_TI_SN65DSI83) += ti-sn65dsi83.o
obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
+obj-$(CONFIG_DRM_TI_TDP158) += ti-tdp158.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 61f4a38e7d2b..657bc3dd18df 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -153,7 +153,16 @@ static int adv7511_hdmi_hw_params(struct device *dev, void *data,
ADV7511_AUDIO_CFG3_LEN_MASK, len);
regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
- regmap_write(adv7511->regmap, 0x73, 0x1);
+
+ /* send current Audio infoframe values while updating */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), BIT(5));
+
+ regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(0), 0x1);
+
+ /* use Audio infoframe updated info */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), 0);
return 0;
}
@@ -184,8 +193,9 @@ static int audio_startup(struct device *dev, void *data)
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
BIT(7) | BIT(6), BIT(7));
/* use Audio infoframe updated info */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
BIT(5), 0);
+
/* enable SPDIF receiver */
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
@@ -204,7 +214,8 @@ static void audio_shutdown(struct device *dev, void *data)
}
static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint)
+ struct device_node *endpoint,
+ void *data)
{
struct of_endpoint of_ep;
int ret;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index eb5919b38263..a13b3d8ab6ac 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1241,8 +1241,10 @@ static int adv7511_probe(struct i2c_client *i2c)
return ret;
ret = adv7511_init_regulators(adv7511);
- if (ret)
- return dev_err_probe(dev, ret, "failed to init regulators\n");
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to init regulators\n");
+ goto err_of_node_put;
+ }
/*
* The power down GPIO is optional. If present, toggle it from active to
@@ -1363,6 +1365,8 @@ err_i2c_unregister_edid:
i2c_unregister_device(adv7511->i2c_edid);
uninit_regulators:
adv7511_uninit_regulators(adv7511);
+err_of_node_put:
+ of_node_put(adv7511->host_node);
return ret;
}
@@ -1371,6 +1375,8 @@ static void adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+ of_node_put(adv7511->host_node);
+
adv7511_uninit_regulators(adv7511);
drm_bridge_remove(&adv7511->bridge);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index 4481489aaf5e..122ad91e8a32 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -172,7 +172,7 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
- if (num_lanes < 1 || num_lanes > 4)
+ if (num_lanes < 2 || num_lanes > 4)
return -EINVAL;
adv->num_dsi_lanes = num_lanes;
@@ -181,8 +181,6 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
if (!adv->host_node)
return -ENODEV;
- of_node_put(adv->host_node);
-
adv->use_timing_gen = !of_property_read_bool(np,
"adi,disable-timing-generator");
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index b754947e3e00..83d711ee3a2e 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -793,7 +793,7 @@ static void anx6345_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id anx6345_id[] = {
- { "anx6345", 0 },
+ { "anx6345" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, anx6345_id);
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index a2e9bb485c36..4be34d5c7a3b 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1952,7 +1952,8 @@ static void anx7625_audio_shutdown(struct device *dev, void *data)
}
static int anx7625_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint)
+ struct device_node *endpoint,
+ void *data)
{
struct of_endpoint of_ep;
int ret;
@@ -2002,8 +2003,10 @@ static int anx7625_audio_get_eld(struct device *dev, void *data,
memset(buf, 0, len);
} else {
dev_dbg(dev, "audio copy eld\n");
+ mutex_lock(&ctx->connector->eld_mutex);
memcpy(buf, ctx->connector->eld,
min(sizeof(ctx->connector->eld), len));
+ mutex_unlock(&ctx->connector->eld_mutex);
}
return 0;
@@ -2137,49 +2140,6 @@ static void hdcp_check_work_func(struct work_struct *work)
drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
}
-static int anx7625_connector_atomic_check(struct anx7625_data *ctx,
- struct drm_connector_state *state)
-{
- struct device *dev = ctx->dev;
- int cp;
-
- dev_dbg(dev, "hdcp state check\n");
- cp = state->content_protection;
-
- if (cp == ctx->hdcp_cp)
- return 0;
-
- if (cp == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
- if (ctx->dp_en) {
- dev_dbg(dev, "enable HDCP\n");
- anx7625_hdcp_enable(ctx);
-
- queue_delayed_work(ctx->hdcp_workqueue,
- &ctx->hdcp_work,
- msecs_to_jiffies(2000));
- }
- }
-
- if (cp == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- if (ctx->hdcp_cp != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
- dev_err(dev, "current CP is not ENABLED\n");
- return -EINVAL;
- }
- anx7625_hdcp_disable(ctx);
- ctx->hdcp_cp = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
- drm_hdcp_update_content_protection(ctx->connector,
- ctx->hdcp_cp);
- dev_dbg(dev, "update CP to UNDESIRE\n");
- }
-
- if (cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
- dev_err(dev, "Userspace illegal set to PROTECTION ENABLE\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static int anx7625_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
@@ -2416,7 +2376,7 @@ static int anx7625_bridge_atomic_check(struct drm_bridge *bridge,
anx7625_bridge_mode_fixup(bridge, &crtc_state->mode,
&crtc_state->adjusted_mode);
- return anx7625_connector_atomic_check(ctx, conn_state);
+ return 0;
}
static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
@@ -2425,6 +2385,7 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = ctx->dev;
struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
dev_dbg(dev, "drm atomic enable\n");
@@ -2439,6 +2400,22 @@ static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge,
_anx7625_hpd_polling(ctx, 5000 * 100);
anx7625_dp_start(ctx);
+
+ conn_state = drm_atomic_get_new_connector_state(state->base.state, connector);
+
+ if (WARN_ON(!conn_state))
+ return;
+
+ if (conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (ctx->dp_en) {
+ dev_dbg(dev, "enable HDCP\n");
+ anx7625_hdcp_enable(ctx);
+
+ queue_delayed_work(ctx->hdcp_workqueue,
+ &ctx->hdcp_work,
+ msecs_to_jiffies(2000));
+ }
+ }
}
static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
@@ -2449,6 +2426,17 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
dev_dbg(dev, "drm atomic disable\n");
+ flush_workqueue(ctx->hdcp_workqueue);
+
+ if (ctx->connector &&
+ ctx->hdcp_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ anx7625_hdcp_disable(ctx);
+ ctx->hdcp_cp = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ drm_hdcp_update_content_protection(ctx->connector,
+ ctx->hdcp_cp);
+ dev_dbg(dev, "update CP to DESIRE\n");
+ }
+
ctx->connector = NULL;
anx7625_dp_stop(ctx);
@@ -2551,6 +2539,8 @@ static int __maybe_unused anx7625_runtime_pm_suspend(struct device *dev)
mutex_lock(&ctx->lock);
anx7625_stop_dp_work(ctx);
+ if (!ctx->pdata.panel_bridge)
+ anx7625_remove_edid(ctx);
anx7625_power_standby(ctx);
mutex_unlock(&ctx->lock);
@@ -2793,7 +2783,7 @@ static void anx7625_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id anx7625_id[] = {
- {"anx7625", 0},
+ { "anx7625" },
{}
};
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
index b29980f95379..015983c015e5 100644
--- a/drivers/gpu/drm/bridge/aux-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -58,9 +58,10 @@ int drm_aux_bridge_register(struct device *parent)
adev->id = ret;
adev->name = "aux_bridge";
adev->dev.parent = parent;
- adev->dev.of_node = of_node_get(parent->of_node);
adev->dev.release = drm_aux_bridge_release;
+ device_set_of_node_from_dev(&adev->dev, parent);
+
ret = auxiliary_device_init(adev);
if (ret) {
ida_free(&drm_aux_bridge_ida, adev->id);
@@ -120,6 +121,10 @@ static int drm_aux_bridge_probe(struct auxiliary_device *auxdev,
data->bridge.funcs = &drm_aux_bridge_funcs;
data->bridge.of_node = data->dev->of_node;
+ /* passthrough data, allow everything */
+ data->bridge.interlace_allowed = true;
+ data->bridge.ycbcr_420_allowed = true;
+
return devm_drm_bridge_add(data->dev, &data->bridge);
}
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
index 6886db2d9e00..48f297c78ee6 100644
--- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -180,6 +180,10 @@ static int drm_aux_hpd_bridge_probe(struct auxiliary_device *auxdev,
data->bridge.ops = DRM_BRIDGE_OP_HPD;
data->bridge.type = id->driver_data;
+ /* passthrough data, allow everything */
+ data->bridge.interlace_allowed = true;
+ data->bridge.ycbcr_420_allowed = true;
+
auxiliary_set_drvdata(auxdev, data);
return devm_drm_bridge_add(data->dev, &data->bridge);
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index 7457d38622b0..c7a0247e06ad 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -1300,7 +1300,7 @@ MODULE_DEVICE_TABLE(of, cdns_dsi_of_match);
static struct platform_driver cdns_dsi_platform_driver = {
.probe = cdns_dsi_drm_probe,
- .remove_new = cdns_dsi_drm_remove,
+ .remove = cdns_dsi_drm_remove,
.driver = {
.name = "cdns-dsi",
.of_match_table = cdns_dsi_of_match,
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 41f72d458487..d081850e3c03 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -2656,7 +2656,7 @@ static struct platform_driver mhdp_driver = {
.of_match_table = mhdp_ids,
},
.probe = cdns_mhdp_probe,
- .remove_new = cdns_mhdp_remove,
+ .remove = cdns_mhdp_remove,
};
module_platform_driver(mhdp_driver);
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
index 31832ba4017f..42248f179b69 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
@@ -500,34 +500,6 @@ static void cdns_mhdp_hdcp_prop_work(struct work_struct *work)
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
-int cdns_mhdp_hdcp_set_lc(struct cdns_mhdp_device *mhdp, u8 *val)
-{
- int ret;
-
- mutex_lock(&mhdp->mbox_mutex);
- ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_GENERAL,
- HDCP_GENERAL_SET_LC_128,
- 16, val);
- mutex_unlock(&mhdp->mbox_mutex);
-
- return ret;
-}
-
-int
-cdns_mhdp_hdcp_set_public_key_param(struct cdns_mhdp_device *mhdp,
- struct cdns_hdcp_tx_public_key_param *val)
-{
- int ret;
-
- mutex_lock(&mhdp->mbox_mutex);
- ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_TX,
- HDCP2X_TX_SET_PUBLIC_KEY_PARAMS,
- sizeof(*val), (u8 *)val);
- mutex_unlock(&mhdp->mbox_mutex);
-
- return ret;
-}
-
int cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type)
{
int ret;
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h
index 334c0b8b0d4f..3b6ec9c3a8d8 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h
@@ -82,9 +82,6 @@ struct cdns_hdcp_tx_public_key_param {
u8 E[DLP_E];
};
-int cdns_mhdp_hdcp_set_public_key_param(struct cdns_mhdp_device *mhdp,
- struct cdns_hdcp_tx_public_key_param *val);
-int cdns_mhdp_hdcp_set_lc(struct cdns_mhdp_device *mhdp, u8 *val);
int cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type);
int cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp);
void cdns_mhdp_hdcp_init(struct cdns_mhdp_device *mhdp);
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index 9eecac457dcf..d47703559b0d 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -785,7 +785,7 @@ static struct mipi_dsi_driver chipone_dsi_driver = {
},
};
-static struct i2c_device_id chipone_i2c_id[] = {
+static const struct i2c_device_id chipone_i2c_id[] = {
{ "chipone,icn6211" },
{},
};
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index c83486cf6b15..da17f0978a79 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -597,7 +597,7 @@ static const struct of_device_id ch7033_dt_ids[] = {
MODULE_DEVICE_TABLE(of, ch7033_dt_ids);
static const struct i2c_device_id ch7033_ids[] = {
- { "ch7033", 0 },
+ { "ch7033" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ch7033_ids);
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index ab8e00baf3f1..72bc508d4e6e 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -270,6 +270,10 @@ static int display_connector_probe(struct platform_device *pdev)
/* All the supported connector types support interlaced modes. */
conn->bridge.interlace_allowed = true;
+ if (type == DRM_MODE_CONNECTOR_HDMIA ||
+ type == DRM_MODE_CONNECTOR_DisplayPort)
+ conn->bridge.ycbcr_420_allowed = true;
+
/* Get the optional connector label. */
of_property_read_string(pdev->dev.of_node, "label", &label);
@@ -423,7 +427,7 @@ MODULE_DEVICE_TABLE(of, display_connector_match);
static struct platform_driver display_connector_driver = {
.probe = display_connector_probe,
- .remove_new = display_connector_remove,
+ .remove = display_connector_remove,
.driver = {
.name = "display-connector",
.of_match_table = display_connector_match,
diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c
index 0e4bac7dd04f..0fc8a14fd800 100644
--- a/drivers/gpu/drm/bridge/fsl-ldb.c
+++ b/drivers/gpu/drm/bridge/fsl-ldb.c
@@ -393,7 +393,7 @@ MODULE_DEVICE_TABLE(of, fsl_ldb_match);
static struct platform_driver fsl_ldb_driver = {
.probe = fsl_ldb_probe,
- .remove_new = fsl_ldb_remove,
+ .remove = fsl_ldb_remove,
.driver = {
.name = "fsl-ldb",
.of_match_table = fsl_ldb_match,
diff --git a/drivers/gpu/drm/bridge/imx/Kconfig b/drivers/gpu/drm/bridge/imx/Kconfig
index 8dd89efa8ea7..9a480c6abb85 100644
--- a/drivers/gpu/drm/bridge/imx/Kconfig
+++ b/drivers/gpu/drm/bridge/imx/Kconfig
@@ -3,6 +3,16 @@ if ARCH_MXC || COMPILE_TEST
config DRM_IMX_LDB_HELPER
tristate
+config DRM_IMX_LEGACY_BRIDGE
+ tristate
+ depends on DRM_IMX
+ help
+ This is a DRM bridge implementation for the DRM i.MX IPUv3 driver,
+ that uses of_get_drm_display_mode to acquire display mode.
+
+ Newer designs should not use this bridge and should use proper panel
+ driver instead.
+
config DRM_IMX8MP_DW_HDMI_BRIDGE
tristate "Freescale i.MX8MP HDMI-TX bridge support"
depends on OF
diff --git a/drivers/gpu/drm/bridge/imx/Makefile b/drivers/gpu/drm/bridge/imx/Makefile
index edb0a7b71b30..dd5d48584806 100644
--- a/drivers/gpu/drm/bridge/imx/Makefile
+++ b/drivers/gpu/drm/bridge/imx/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_DRM_IMX_LDB_HELPER) += imx-ldb-helper.o
+obj-$(CONFIG_DRM_IMX_LEGACY_BRIDGE) += imx-legacy-bridge.o
obj-$(CONFIG_DRM_IMX8MP_DW_HDMI_BRIDGE) += imx8mp-hdmi-tx.o
obj-$(CONFIG_DRM_IMX8MP_HDMI_PVI) += imx8mp-hdmi-pvi.o
obj-$(CONFIG_DRM_IMX8QM_LDB) += imx8qm-ldb.o
diff --git a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
new file mode 100644
index 000000000000..3ebf0b9866de
--- /dev/null
+++ b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Freescale i.MX drm driver
+ *
+ * bridge driver for legacy DT bindings, utilizing display-timings node
+ */
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/bridge/imx.h>
+
+#include <video/of_display_timing.h>
+#include <video/of_videomode.h>
+
+struct imx_legacy_bridge {
+ struct drm_bridge base;
+
+ struct drm_display_mode mode;
+ u32 bus_flags;
+};
+
+#define to_imx_legacy_bridge(bridge) container_of(bridge, struct imx_legacy_bridge, base)
+
+static int imx_legacy_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int imx_legacy_bridge_get_modes(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct imx_legacy_bridge *imx_bridge = to_imx_legacy_bridge(bridge);
+ int ret;
+
+ ret = drm_connector_helper_get_modes_fixed(connector, &imx_bridge->mode);
+ if (ret)
+ return ret;
+
+ connector->display_info.bus_flags = imx_bridge->bus_flags;
+
+ return 0;
+}
+
+struct drm_bridge_funcs imx_legacy_bridge_funcs = {
+ .attach = imx_legacy_bridge_attach,
+ .get_modes = imx_legacy_bridge_get_modes,
+};
+
+struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
+ struct device_node *np,
+ int type)
+{
+ struct imx_legacy_bridge *imx_bridge;
+ int ret;
+
+ imx_bridge = devm_kzalloc(dev, sizeof(*imx_bridge), GFP_KERNEL);
+ if (!imx_bridge)
+ return ERR_PTR(-ENOMEM);
+
+ ret = of_get_drm_display_mode(np,
+ &imx_bridge->mode,
+ &imx_bridge->bus_flags,
+ OF_USE_NATIVE_MODE);
+ if (ret)
+ return ERR_PTR(ret);
+
+ imx_bridge->mode.type |= DRM_MODE_TYPE_DRIVER;
+
+ imx_bridge->base.funcs = &imx_legacy_bridge_funcs;
+ imx_bridge->base.of_node = np;
+ imx_bridge->base.ops = DRM_BRIDGE_OP_MODES;
+ imx_bridge->base.type = type;
+
+ ret = devm_drm_bridge_add(dev, &imx_bridge->base);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &imx_bridge->base;
+}
+EXPORT_SYMBOL_GPL(devm_imx_drm_legacy_bridge);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Freescale i.MX DRM bridge driver for legacy DT bindings");
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
index 073e64dc200c..0d1ac3edcab4 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
@@ -193,7 +193,7 @@ MODULE_DEVICE_TABLE(of, imx8mp_hdmi_pvi_match);
static struct platform_driver imx8mp_hdmi_pvi_driver = {
.probe = imx8mp_hdmi_pvi_probe,
- .remove_new = imx8mp_hdmi_pvi_remove,
+ .remove = imx8mp_hdmi_pvi_remove,
.driver = {
.name = "imx-hdmi-pvi",
.of_match_table = imx8mp_hdmi_pvi_match,
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
index 13bc570c5473..1e7a789ec289 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
@@ -23,6 +23,7 @@ imx8mp_hdmi_mode_valid(struct dw_hdmi *dw_hdmi, void *data,
const struct drm_display_mode *mode)
{
struct imx8mp_hdmi *hdmi = (struct imx8mp_hdmi *)data;
+ long round_rate;
if (mode->clock < 13500)
return MODE_CLOCK_LOW;
@@ -30,8 +31,14 @@ imx8mp_hdmi_mode_valid(struct dw_hdmi *dw_hdmi, void *data,
if (mode->clock > 297000)
return MODE_CLOCK_HIGH;
- if (clk_round_rate(hdmi->pixclk, mode->clock * 1000) !=
- mode->clock * 1000)
+ round_rate = clk_round_rate(hdmi->pixclk, mode->clock * 1000);
+ /* imx8mp's pixel clock generator (fsl-samsung-hdmi) cannot generate
+ * all possible frequencies, so allow some tolerance to support more
+ * modes.
+ * Allow 0.5% difference allowed in various standards (VESA, CEA861)
+ * 0.5% = 5/1000 tolerance (mode->clock is 1/1000)
+ */
+ if (abs(round_rate - mode->clock * 1000) > mode->clock * 5)
return MODE_CLOCK_RANGE;
/* We don't support double-clocked and Interlaced modes */
@@ -111,12 +118,12 @@ static void imx8mp_dw_hdmi_remove(struct platform_device *pdev)
dw_hdmi_remove(hdmi->dw_hdmi);
}
-static int __maybe_unused imx8mp_dw_hdmi_pm_suspend(struct device *dev)
+static int imx8mp_dw_hdmi_pm_suspend(struct device *dev)
{
return 0;
}
-static int __maybe_unused imx8mp_dw_hdmi_pm_resume(struct device *dev)
+static int imx8mp_dw_hdmi_pm_resume(struct device *dev)
{
struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev);
@@ -126,8 +133,7 @@ static int __maybe_unused imx8mp_dw_hdmi_pm_resume(struct device *dev)
}
static const struct dev_pm_ops imx8mp_dw_hdmi_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(imx8mp_dw_hdmi_pm_suspend,
- imx8mp_dw_hdmi_pm_resume)
+ SYSTEM_SLEEP_PM_OPS(imx8mp_dw_hdmi_pm_suspend, imx8mp_dw_hdmi_pm_resume)
};
static const struct of_device_id imx8mp_dw_hdmi_of_table[] = {
@@ -138,11 +144,11 @@ MODULE_DEVICE_TABLE(of, imx8mp_dw_hdmi_of_table);
static struct platform_driver imx8mp_dw_hdmi_platform_driver = {
.probe = imx8mp_dw_hdmi_probe,
- .remove_new = imx8mp_dw_hdmi_remove,
+ .remove = imx8mp_dw_hdmi_remove,
.driver = {
.name = "imx8mp-dw-hdmi-tx",
.of_match_table = imx8mp_dw_hdmi_of_table,
- .pm = &imx8mp_dw_hdmi_pm_ops,
+ .pm = pm_ptr(&imx8mp_dw_hdmi_pm_ops),
},
};
diff --git a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
index 21471a9a28b2..dd5823f04c70 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
@@ -542,12 +542,12 @@ static void imx8qm_ldb_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-static int __maybe_unused imx8qm_ldb_runtime_suspend(struct device *dev)
+static int imx8qm_ldb_runtime_suspend(struct device *dev)
{
return 0;
}
-static int __maybe_unused imx8qm_ldb_runtime_resume(struct device *dev)
+static int imx8qm_ldb_runtime_resume(struct device *dev)
{
struct imx8qm_ldb *imx8qm_ldb = dev_get_drvdata(dev);
struct ldb *ldb = &imx8qm_ldb->base;
@@ -559,8 +559,7 @@ static int __maybe_unused imx8qm_ldb_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops imx8qm_ldb_pm_ops = {
- SET_RUNTIME_PM_OPS(imx8qm_ldb_runtime_suspend,
- imx8qm_ldb_runtime_resume, NULL)
+ RUNTIME_PM_OPS(imx8qm_ldb_runtime_suspend, imx8qm_ldb_runtime_resume, NULL)
};
static const struct of_device_id imx8qm_ldb_dt_ids[] = {
@@ -571,9 +570,9 @@ MODULE_DEVICE_TABLE(of, imx8qm_ldb_dt_ids);
static struct platform_driver imx8qm_ldb_driver = {
.probe = imx8qm_ldb_probe,
- .remove_new = imx8qm_ldb_remove,
+ .remove = imx8qm_ldb_remove,
.driver = {
- .pm = &imx8qm_ldb_pm_ops,
+ .pm = pm_ptr(&imx8qm_ldb_pm_ops),
.name = DRIVER_NAME,
.of_match_table = imx8qm_ldb_dt_ids,
},
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
index 7984da9c0a35..7bce2305d676 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
@@ -678,12 +678,12 @@ static void imx8qxp_ldb_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-static int __maybe_unused imx8qxp_ldb_runtime_suspend(struct device *dev)
+static int imx8qxp_ldb_runtime_suspend(struct device *dev)
{
return 0;
}
-static int __maybe_unused imx8qxp_ldb_runtime_resume(struct device *dev)
+static int imx8qxp_ldb_runtime_resume(struct device *dev)
{
struct imx8qxp_ldb *imx8qxp_ldb = dev_get_drvdata(dev);
struct ldb *ldb = &imx8qxp_ldb->base;
@@ -695,8 +695,7 @@ static int __maybe_unused imx8qxp_ldb_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops imx8qxp_ldb_pm_ops = {
- SET_RUNTIME_PM_OPS(imx8qxp_ldb_runtime_suspend,
- imx8qxp_ldb_runtime_resume, NULL)
+ RUNTIME_PM_OPS(imx8qxp_ldb_runtime_suspend, imx8qxp_ldb_runtime_resume, NULL)
};
static const struct of_device_id imx8qxp_ldb_dt_ids[] = {
@@ -707,9 +706,9 @@ MODULE_DEVICE_TABLE(of, imx8qxp_ldb_dt_ids);
static struct platform_driver imx8qxp_ldb_driver = {
.probe = imx8qxp_ldb_probe,
- .remove_new = imx8qxp_ldb_remove,
+ .remove = imx8qxp_ldb_remove,
.driver = {
- .pm = &imx8qxp_ldb_pm_ops,
+ .pm = pm_ptr(&imx8qxp_ldb_pm_ops),
.name = DRIVER_NAME,
.of_match_table = imx8qxp_ldb_dt_ids,
},
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
index e6dbbdc87ce2..1812bd106261 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
@@ -371,7 +371,7 @@ static void imx8qxp_pc_bridge_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-static int __maybe_unused imx8qxp_pc_runtime_suspend(struct device *dev)
+static int imx8qxp_pc_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct imx8qxp_pc *pc = platform_get_drvdata(pdev);
@@ -393,7 +393,7 @@ static int __maybe_unused imx8qxp_pc_runtime_suspend(struct device *dev)
return ret;
}
-static int __maybe_unused imx8qxp_pc_runtime_resume(struct device *dev)
+static int imx8qxp_pc_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct imx8qxp_pc *pc = platform_get_drvdata(pdev);
@@ -415,8 +415,7 @@ static int __maybe_unused imx8qxp_pc_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops imx8qxp_pc_pm_ops = {
- SET_RUNTIME_PM_OPS(imx8qxp_pc_runtime_suspend,
- imx8qxp_pc_runtime_resume, NULL)
+ RUNTIME_PM_OPS(imx8qxp_pc_runtime_suspend, imx8qxp_pc_runtime_resume, NULL)
};
static const struct of_device_id imx8qxp_pc_dt_ids[] = {
@@ -428,9 +427,9 @@ MODULE_DEVICE_TABLE(of, imx8qxp_pc_dt_ids);
static struct platform_driver imx8qxp_pc_bridge_driver = {
.probe = imx8qxp_pc_bridge_probe,
- .remove_new = imx8qxp_pc_bridge_remove,
+ .remove = imx8qxp_pc_bridge_remove,
.driver = {
- .pm = &imx8qxp_pc_pm_ops,
+ .pm = pm_ptr(&imx8qxp_pc_pm_ops),
.name = DRIVER_NAME,
.of_match_table = imx8qxp_pc_dt_ids,
},
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
index 1d11cc1df43c..4b0715ed6f38 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
@@ -409,7 +409,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_pixel_link_dt_ids);
static struct platform_driver imx8qxp_pixel_link_bridge_driver = {
.probe = imx8qxp_pixel_link_bridge_probe,
- .remove_new = imx8qxp_pixel_link_bridge_remove,
+ .remove = imx8qxp_pixel_link_bridge_remove,
.driver = {
.of_match_table = imx8qxp_pixel_link_dt_ids,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
index fb7cf4369bb8..65cf3a6c8ec6 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
@@ -467,7 +467,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_pxl2dpi_dt_ids);
static struct platform_driver imx8qxp_pxl2dpi_bridge_driver = {
.probe = imx8qxp_pxl2dpi_bridge_probe,
- .remove_new = imx8qxp_pxl2dpi_bridge_remove,
+ .remove = imx8qxp_pxl2dpi_bridge_remove,
.driver = {
.of_match_table = imx8qxp_pxl2dpi_dt_ids,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
index 2347f8dd632f..bea8346515b8 100644
--- a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
@@ -904,7 +904,7 @@ MODULE_DEVICE_TABLE(of, imx93_dsi_dt_ids);
static struct platform_driver imx93_dsi_driver = {
.probe = imx93_dsi_probe,
- .remove_new = imx93_dsi_remove,
+ .remove = imx93_dsi_remove,
.driver = {
.of_match_table = imx93_dsi_dt_ids,
.name = "imx93_mipi_dsi",
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
new file mode 100644
index 000000000000..306b5e374b9e
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -0,0 +1,907 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/hdmi.h>
+#include <linux/i2c.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+
+/* -----------------------------------------------------------------------------
+ * LVDS registers
+ */
+
+/* LVDS software reset registers */
+#define LVDS_REG_05 0x05
+#define REG_SOFT_P_RST BIT(1)
+
+/* LVDS system configuration registers */
+/* 0x0b */
+#define LVDS_REG_0B 0x0b
+#define REG_SSC_PCLK_RF BIT(0)
+#define REG_LVDS_IN_SWAP BIT(1)
+
+/* LVDS test pattern gen control registers */
+/* 0x2c */
+#define LVDS_REG_2C 0x2c
+#define REG_COL_DEP GENMASK(1, 0)
+#define BIT8 FIELD_PREP(REG_COL_DEP, 1)
+#define OUT_MAP BIT(4)
+#define VESA BIT(4)
+#define JEIDA 0
+#define REG_DESSC_ENB BIT(6)
+#define DMODE BIT(7)
+#define DISO BIT(7)
+#define SISO 0
+
+#define LVDS_REG_3C 0x3c
+#define LVDS_REG_3F 0x3f
+#define LVDS_REG_47 0x47
+#define LVDS_REG_48 0x48
+#define LVDS_REG_4F 0x4f
+#define LVDS_REG_52 0x52
+
+/* -----------------------------------------------------------------------------
+ * HDMI registers are separated into three banks:
+ * 1) HDMI register common bank: 0x00 ~ 0x2f
+ */
+
+/* HDMI genernal registers */
+#define HDMI_REG_SW_RST 0x04
+#define SOFTREF_RST BIT(5)
+#define SOFTA_RST BIT(4)
+#define SOFTV_RST BIT(3)
+#define AUD_RST BIT(2)
+#define HDCP_RST BIT(0)
+#define HDMI_RST_ALL (SOFTREF_RST | SOFTA_RST | SOFTV_RST | \
+ AUD_RST | HDCP_RST)
+
+#define HDMI_REG_SYS_STATUS 0x0e
+#define HPDETECT BIT(6)
+#define TXVIDSTABLE BIT(4)
+
+#define HDMI_REG_BANK_CTRL 0x0f
+#define REG_BANK_SEL BIT(0)
+
+/* HDMI System DDC control registers */
+#define HDMI_REG_DDC_MASTER_CTRL 0x10
+#define MASTER_SEL_HOST BIT(0)
+
+#define HDMI_REG_DDC_HEADER 0x11
+
+#define HDMI_REG_DDC_REQOFF 0x12
+#define HDMI_REG_DDC_REQCOUNT 0x13
+#define HDMI_REG_DDC_EDIDSEG 0x14
+
+#define HDMI_REG_DDC_CMD 0x15
+#define DDC_CMD_EDID_READ 0x3
+#define DDC_CMD_FIFO_CLR 0x9
+
+#define HDMI_REG_DDC_STATUS 0x16
+#define DDC_DONE BIT(7)
+#define DDC_NOACK BIT(5)
+#define DDC_WAITBUS BIT(4)
+#define DDC_ARBILOSE BIT(3)
+#define DDC_ERROR (DDC_NOACK | DDC_WAITBUS | DDC_ARBILOSE)
+
+#define HDMI_DDC_FIFO_BYTES 32
+#define HDMI_REG_DDC_READFIFO 0x17
+#define HDMI_REG_LVDS_PORT 0x1d /* LVDS input control I2C addr */
+#define HDMI_REG_LVDS_PORT_EN 0x1e
+#define LVDS_INPUT_CTRL_I2C_ADDR 0x33
+
+/* -----------------------------------------------------------------------------
+ * 2) HDMI register bank0: 0x30 ~ 0xff
+ */
+
+/* HDMI AFE registers */
+#define HDMI_REG_AFE_DRV_CTRL 0x61
+#define AFE_DRV_PWD BIT(5)
+#define AFE_DRV_RST BIT(4)
+
+#define HDMI_REG_AFE_XP_CTRL 0x62
+#define AFE_XP_GAINBIT BIT(7)
+#define AFE_XP_ER0 BIT(4)
+#define AFE_XP_RESETB BIT(3)
+
+#define HDMI_REG_AFE_ISW_CTRL 0x63
+
+#define HDMI_REG_AFE_IP_CTRL 0x64
+#define AFE_IP_GAINBIT BIT(7)
+#define AFE_IP_ER0 BIT(3)
+#define AFE_IP_RESETB BIT(2)
+
+/* HDMI input data format registers */
+#define HDMI_REG_INPUT_MODE 0x70
+#define IN_RGB 0x00
+
+/* HDMI general control registers */
+#define HDMI_REG_HDMI_MODE 0xc0
+#define TX_HDMI_MODE BIT(0)
+
+#define HDMI_REG_GCP 0xc1
+#define AVMUTE BIT(0)
+#define HDMI_COLOR_DEPTH GENMASK(6, 4)
+#define HDMI_COLOR_DEPTH_24 FIELD_PREP(HDMI_COLOR_DEPTH, 4)
+
+#define HDMI_REG_PKT_GENERAL_CTRL 0xc6
+#define HDMI_REG_AVI_INFOFRM_CTRL 0xcd
+#define ENABLE_PKT BIT(0)
+#define REPEAT_PKT BIT(1)
+
+/* -----------------------------------------------------------------------------
+ * 3) HDMI register bank1: 0x130 ~ 0x1ff (HDMI packet registers)
+ */
+
+/* AVI packet registers */
+#define HDMI_REG_AVI_DB1 0x158
+#define HDMI_REG_AVI_DB2 0x159
+#define HDMI_REG_AVI_DB3 0x15a
+#define HDMI_REG_AVI_DB4 0x15b
+#define HDMI_REG_AVI_DB5 0x15c
+#define HDMI_REG_AVI_CSUM 0x15d
+#define HDMI_REG_AVI_DB6 0x15e
+#define HDMI_REG_AVI_DB7 0x15f
+#define HDMI_REG_AVI_DB8 0x160
+#define HDMI_REG_AVI_DB9 0x161
+#define HDMI_REG_AVI_DB10 0x162
+#define HDMI_REG_AVI_DB11 0x163
+#define HDMI_REG_AVI_DB12 0x164
+#define HDMI_REG_AVI_DB13 0x165
+
+#define HDMI_AVI_DB_CHUNK1_SIZE (HDMI_REG_AVI_DB5 - HDMI_REG_AVI_DB1 + 1)
+#define HDMI_AVI_DB_CHUNK2_SIZE (HDMI_REG_AVI_DB13 - HDMI_REG_AVI_DB6 + 1)
+
+/* IT6263 data sheet Rev0.8: LVDS RX supports input clock rate up to 150MHz. */
+#define MAX_PIXEL_CLOCK_KHZ 150000
+
+/* IT6263 programming guide Ver0.90: PCLK_HIGH for TMDS clock over 80MHz. */
+#define HIGH_PIXEL_CLOCK_KHZ 80000
+
+/*
+ * IT6263 data sheet Rev0.8: HDMI TX supports link speeds of up to 2.25Gbps
+ * (link clock rate of 225MHz).
+ */
+#define MAX_HDMI_TMDS_CHAR_RATE_HZ 225000000
+
+struct it6263 {
+ struct device *dev;
+ struct i2c_client *hdmi_i2c;
+ struct i2c_client *lvds_i2c;
+ struct regmap *hdmi_regmap;
+ struct regmap *lvds_regmap;
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ int lvds_data_mapping;
+ bool lvds_dual_link;
+ bool lvds_link12_swap;
+};
+
+static inline struct it6263 *bridge_to_it6263(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct it6263, bridge);
+}
+
+static bool it6263_hdmi_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case HDMI_REG_SW_RST:
+ case HDMI_REG_BANK_CTRL:
+ case HDMI_REG_DDC_MASTER_CTRL:
+ case HDMI_REG_DDC_HEADER:
+ case HDMI_REG_DDC_REQOFF:
+ case HDMI_REG_DDC_REQCOUNT:
+ case HDMI_REG_DDC_EDIDSEG:
+ case HDMI_REG_DDC_CMD:
+ case HDMI_REG_LVDS_PORT:
+ case HDMI_REG_LVDS_PORT_EN:
+ case HDMI_REG_AFE_DRV_CTRL:
+ case HDMI_REG_AFE_XP_CTRL:
+ case HDMI_REG_AFE_ISW_CTRL:
+ case HDMI_REG_AFE_IP_CTRL:
+ case HDMI_REG_INPUT_MODE:
+ case HDMI_REG_HDMI_MODE:
+ case HDMI_REG_GCP:
+ case HDMI_REG_PKT_GENERAL_CTRL:
+ case HDMI_REG_AVI_INFOFRM_CTRL:
+ case HDMI_REG_AVI_DB1:
+ case HDMI_REG_AVI_DB2:
+ case HDMI_REG_AVI_DB3:
+ case HDMI_REG_AVI_DB4:
+ case HDMI_REG_AVI_DB5:
+ case HDMI_REG_AVI_CSUM:
+ case HDMI_REG_AVI_DB6:
+ case HDMI_REG_AVI_DB7:
+ case HDMI_REG_AVI_DB8:
+ case HDMI_REG_AVI_DB9:
+ case HDMI_REG_AVI_DB10:
+ case HDMI_REG_AVI_DB11:
+ case HDMI_REG_AVI_DB12:
+ case HDMI_REG_AVI_DB13:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool it6263_hdmi_readable_reg(struct device *dev, unsigned int reg)
+{
+ if (it6263_hdmi_writeable_reg(dev, reg))
+ return true;
+
+ switch (reg) {
+ case HDMI_REG_SYS_STATUS:
+ case HDMI_REG_DDC_STATUS:
+ case HDMI_REG_DDC_READFIFO:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool it6263_hdmi_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case HDMI_REG_SW_RST:
+ case HDMI_REG_SYS_STATUS:
+ case HDMI_REG_DDC_STATUS:
+ case HDMI_REG_DDC_READFIFO:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_range_cfg it6263_hdmi_range_cfg = {
+ .range_min = 0x00,
+ .range_max = HDMI_REG_AVI_DB13,
+ .selector_reg = HDMI_REG_BANK_CTRL,
+ .selector_mask = REG_BANK_SEL,
+ .selector_shift = 0,
+ .window_start = 0x00,
+ .window_len = 0x100,
+};
+
+static const struct regmap_config it6263_hdmi_regmap_config = {
+ .name = "it6263-hdmi",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = it6263_hdmi_writeable_reg,
+ .readable_reg = it6263_hdmi_readable_reg,
+ .volatile_reg = it6263_hdmi_volatile_reg,
+ .max_register = HDMI_REG_AVI_DB13,
+ .ranges = &it6263_hdmi_range_cfg,
+ .num_ranges = 1,
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static bool it6263_lvds_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case LVDS_REG_05:
+ case LVDS_REG_0B:
+ case LVDS_REG_2C:
+ case LVDS_REG_3C:
+ case LVDS_REG_3F:
+ case LVDS_REG_47:
+ case LVDS_REG_48:
+ case LVDS_REG_4F:
+ case LVDS_REG_52:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool it6263_lvds_readable_reg(struct device *dev, unsigned int reg)
+{
+ return it6263_lvds_writeable_reg(dev, reg);
+}
+
+static bool it6263_lvds_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return reg == LVDS_REG_05;
+}
+
+static const struct regmap_config it6263_lvds_regmap_config = {
+ .name = "it6263-lvds",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = it6263_lvds_writeable_reg,
+ .readable_reg = it6263_lvds_readable_reg,
+ .volatile_reg = it6263_lvds_volatile_reg,
+ .max_register = LVDS_REG_52,
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static const char * const it6263_supplies[] = {
+ "ivdd", "ovdd", "txavcc18", "txavcc33", "pvcc1", "pvcc2",
+ "avcc", "anvdd", "apvdd"
+};
+
+static int it6263_parse_dt(struct it6263 *it)
+{
+ struct device *dev = it->dev;
+ struct device_node *port0, *port1;
+ int ret = 0;
+
+ it->lvds_data_mapping = drm_of_lvds_get_data_mapping(dev->of_node);
+ if (it->lvds_data_mapping < 0) {
+ dev_err(dev, "%pOF: invalid or missing %s DT property: %d\n",
+ dev->of_node, "data-mapping", it->lvds_data_mapping);
+ return it->lvds_data_mapping;
+ }
+
+ it->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 2, 0);
+ if (IS_ERR(it->next_bridge))
+ return dev_err_probe(dev, PTR_ERR(it->next_bridge),
+ "failed to get next bridge\n");
+
+ port0 = of_graph_get_port_by_id(dev->of_node, 0);
+ port1 = of_graph_get_port_by_id(dev->of_node, 1);
+ if (port0 && port1) {
+ int order;
+
+ it->lvds_dual_link = true;
+ order = drm_of_lvds_get_dual_link_pixel_order_sink(port0, port1);
+ if (order < 0) {
+ dev_err(dev,
+ "failed to get dual link pixel order: %d\n",
+ order);
+ ret = order;
+ } else if (order == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS) {
+ it->lvds_link12_swap = true;
+ }
+ } else if (port1) {
+ ret = -EINVAL;
+ dev_err(dev, "single input LVDS port1 is not supported\n");
+ } else if (!port0) {
+ ret = -EINVAL;
+ dev_err(dev, "no input LVDS port\n");
+ }
+
+ of_node_put(port0);
+ of_node_put(port1);
+
+ return ret;
+}
+
+static inline void it6263_hw_reset(struct gpio_desc *reset_gpio)
+{
+ if (!reset_gpio)
+ return;
+
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ fsleep(1000);
+ gpiod_set_value_cansleep(reset_gpio, 1);
+ /* The chip maker says the low pulse should be at least 40ms. */
+ fsleep(40000);
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ /* addtional time to wait the high voltage to be stable */
+ fsleep(5000);
+}
+
+static inline int it6263_lvds_set_i2c_addr(struct it6263 *it)
+{
+ int ret;
+
+ ret = regmap_write(it->hdmi_regmap, HDMI_REG_LVDS_PORT,
+ LVDS_INPUT_CTRL_I2C_ADDR << 1);
+ if (ret)
+ return ret;
+
+ return regmap_write(it->hdmi_regmap, HDMI_REG_LVDS_PORT_EN, BIT(0));
+}
+
+static inline void it6263_lvds_reset(struct it6263 *it)
+{
+ /* AFE PLL reset */
+ regmap_write_bits(it->lvds_regmap, LVDS_REG_3C, BIT(0), 0x0);
+ fsleep(1000);
+ regmap_write_bits(it->lvds_regmap, LVDS_REG_3C, BIT(0), BIT(0));
+
+ /* software pixel clock domain reset */
+ regmap_write_bits(it->lvds_regmap, LVDS_REG_05, REG_SOFT_P_RST,
+ REG_SOFT_P_RST);
+ fsleep(1000);
+ regmap_write_bits(it->lvds_regmap, LVDS_REG_05, REG_SOFT_P_RST, 0x0);
+ fsleep(10000);
+}
+
+static inline bool it6263_is_input_bus_fmt_valid(int input_fmt)
+{
+ switch (input_fmt) {
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ return true;
+ }
+ return false;
+}
+
+static inline void it6263_lvds_set_interface(struct it6263 *it)
+{
+ u8 fmt;
+
+ /* color depth */
+ regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, REG_COL_DEP, BIT8);
+
+ if (it->lvds_data_mapping == MEDIA_BUS_FMT_RGB888_1X7X4_SPWG)
+ fmt = VESA;
+ else
+ fmt = JEIDA;
+
+ /* output mapping */
+ regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, OUT_MAP, fmt);
+
+ if (it->lvds_dual_link) {
+ regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, DMODE, DISO);
+ regmap_write_bits(it->lvds_regmap, LVDS_REG_52, BIT(1), BIT(1));
+ } else {
+ regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, DMODE, SISO);
+ regmap_write_bits(it->lvds_regmap, LVDS_REG_52, BIT(1), 0);
+ }
+}
+
+static inline void it6263_lvds_set_afe(struct it6263 *it)
+{
+ regmap_write(it->lvds_regmap, LVDS_REG_3C, 0xaa);
+ regmap_write(it->lvds_regmap, LVDS_REG_3F, 0x02);
+ regmap_write(it->lvds_regmap, LVDS_REG_47, 0xaa);
+ regmap_write(it->lvds_regmap, LVDS_REG_48, 0x02);
+ regmap_write(it->lvds_regmap, LVDS_REG_4F, 0x11);
+
+ regmap_write_bits(it->lvds_regmap, LVDS_REG_0B, REG_SSC_PCLK_RF,
+ REG_SSC_PCLK_RF);
+ regmap_write_bits(it->lvds_regmap, LVDS_REG_3C, 0x07, 0);
+ regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, REG_DESSC_ENB,
+ REG_DESSC_ENB);
+}
+
+static inline void it6263_lvds_sys_cfg(struct it6263 *it)
+{
+ regmap_write_bits(it->lvds_regmap, LVDS_REG_0B, REG_LVDS_IN_SWAP,
+ it->lvds_link12_swap ? REG_LVDS_IN_SWAP : 0);
+}
+
+static inline void it6263_lvds_config(struct it6263 *it)
+{
+ it6263_lvds_reset(it);
+ it6263_lvds_set_interface(it);
+ it6263_lvds_set_afe(it);
+ it6263_lvds_sys_cfg(it);
+}
+
+static inline void it6263_hdmi_config(struct it6263 *it)
+{
+ regmap_write(it->hdmi_regmap, HDMI_REG_SW_RST, HDMI_RST_ALL);
+ regmap_write(it->hdmi_regmap, HDMI_REG_INPUT_MODE, IN_RGB);
+ regmap_write_bits(it->hdmi_regmap, HDMI_REG_GCP, HDMI_COLOR_DEPTH,
+ HDMI_COLOR_DEPTH_24);
+}
+
+static enum drm_connector_status it6263_detect(struct it6263 *it)
+{
+ unsigned int val;
+
+ regmap_read(it->hdmi_regmap, HDMI_REG_SYS_STATUS, &val);
+ if (val & HPDETECT)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static int it6263_read_edid(void *data, u8 *buf, unsigned int block, size_t len)
+{
+ struct it6263 *it = data;
+ struct regmap *regmap = it->hdmi_regmap;
+ unsigned int start = (block % 2) * EDID_LENGTH;
+ unsigned int segment = block >> 1;
+ unsigned int count, val;
+ int ret;
+
+ regmap_write(regmap, HDMI_REG_DDC_MASTER_CTRL, MASTER_SEL_HOST);
+ regmap_write(regmap, HDMI_REG_DDC_HEADER, DDC_ADDR << 1);
+ regmap_write(regmap, HDMI_REG_DDC_EDIDSEG, segment);
+
+ while (len) {
+ /* clear DDC FIFO */
+ regmap_write(regmap, HDMI_REG_DDC_CMD, DDC_CMD_FIFO_CLR);
+
+ ret = regmap_read_poll_timeout(regmap, HDMI_REG_DDC_STATUS,
+ val, val & DDC_DONE,
+ 2000, 10000);
+ if (ret) {
+ dev_err(it->dev, "failed to clear DDC FIFO:%d\n", ret);
+ return ret;
+ }
+
+ count = len > HDMI_DDC_FIFO_BYTES ? HDMI_DDC_FIFO_BYTES : len;
+
+ /* fire the read command */
+ regmap_write(regmap, HDMI_REG_DDC_REQOFF, start);
+ regmap_write(regmap, HDMI_REG_DDC_REQCOUNT, count);
+ regmap_write(regmap, HDMI_REG_DDC_CMD, DDC_CMD_EDID_READ);
+
+ start += count;
+ len -= count;
+
+ ret = regmap_read_poll_timeout(regmap, HDMI_REG_DDC_STATUS, val,
+ val & (DDC_DONE | DDC_ERROR),
+ 20000, 250000);
+ if (ret && !(val & DDC_ERROR)) {
+ dev_err(it->dev, "failed to read EDID:%d\n", ret);
+ return ret;
+ }
+
+ if (val & DDC_ERROR) {
+ dev_err(it->dev, "DDC error\n");
+ return -EIO;
+ }
+
+ /* cache to buffer */
+ for (; count > 0; count--) {
+ regmap_read(regmap, HDMI_REG_DDC_READFIFO, &val);
+ *(buf++) = val;
+ }
+ }
+
+ return 0;
+}
+
+static void
+it6263_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct it6263 *it = bridge_to_it6263(bridge);
+
+ regmap_write_bits(it->hdmi_regmap, HDMI_REG_GCP, AVMUTE, AVMUTE);
+ regmap_write(it->hdmi_regmap, HDMI_REG_PKT_GENERAL_CTRL, 0);
+ regmap_write(it->hdmi_regmap, HDMI_REG_AFE_DRV_CTRL,
+ AFE_DRV_RST | AFE_DRV_PWD);
+}
+
+static void
+it6263_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct drm_atomic_state *state = old_bridge_state->base.state;
+ struct it6263 *it = bridge_to_it6263(bridge);
+ const struct drm_crtc_state *crtc_state;
+ struct regmap *regmap = it->hdmi_regmap;
+ const struct drm_display_mode *mode;
+ struct drm_connector *connector;
+ bool is_stable = false;
+ struct drm_crtc *crtc;
+ unsigned int val;
+ bool pclk_high;
+ int i, ret;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ mode = &crtc_state->adjusted_mode;
+
+ regmap_write(regmap, HDMI_REG_HDMI_MODE, TX_HDMI_MODE);
+
+ drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
+
+ /* HDMI AFE setup */
+ pclk_high = mode->clock > HIGH_PIXEL_CLOCK_KHZ;
+ regmap_write(regmap, HDMI_REG_AFE_DRV_CTRL, AFE_DRV_RST);
+ if (pclk_high)
+ regmap_write(regmap, HDMI_REG_AFE_XP_CTRL,
+ AFE_XP_GAINBIT | AFE_XP_RESETB);
+ else
+ regmap_write(regmap, HDMI_REG_AFE_XP_CTRL,
+ AFE_XP_ER0 | AFE_XP_RESETB);
+ regmap_write(regmap, HDMI_REG_AFE_ISW_CTRL, 0x10);
+ if (pclk_high)
+ regmap_write(regmap, HDMI_REG_AFE_IP_CTRL,
+ AFE_IP_GAINBIT | AFE_IP_RESETB);
+ else
+ regmap_write(regmap, HDMI_REG_AFE_IP_CTRL,
+ AFE_IP_ER0 | AFE_IP_RESETB);
+
+ /* HDMI software video reset */
+ regmap_write_bits(regmap, HDMI_REG_SW_RST, SOFTV_RST, SOFTV_RST);
+ fsleep(1000);
+ regmap_write_bits(regmap, HDMI_REG_SW_RST, SOFTV_RST, 0);
+
+ /* reconfigure LVDS and retry several times in case video is instable */
+ for (i = 0; i < 3; i++) {
+ ret = regmap_read_poll_timeout(regmap, HDMI_REG_SYS_STATUS, val,
+ val & TXVIDSTABLE,
+ 20000, 500000);
+ if (!ret) {
+ is_stable = true;
+ break;
+ }
+
+ it6263_lvds_config(it);
+ }
+
+ if (!is_stable)
+ dev_warn(it->dev, "failed to wait for video stable\n");
+
+ /* HDMI AFE reset release and power up */
+ regmap_write(regmap, HDMI_REG_AFE_DRV_CTRL, 0);
+
+ regmap_write_bits(regmap, HDMI_REG_GCP, AVMUTE, 0);
+
+ regmap_write(regmap, HDMI_REG_PKT_GENERAL_CTRL, ENABLE_PKT | REPEAT_PKT);
+}
+
+static enum drm_mode_status
+it6263_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ unsigned long long rate;
+
+ rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB);
+ if (rate == 0)
+ return MODE_NOCLOCK;
+
+ return bridge->funcs->hdmi_tmds_char_rate_valid(bridge, mode, rate);
+}
+
+static int it6263_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct it6263 *it = bridge_to_it6263(bridge);
+ struct drm_connector *connector;
+ int ret;
+
+ ret = drm_bridge_attach(bridge->encoder, it->next_bridge, bridge,
+ flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret < 0)
+ return ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return 0;
+
+ connector = drm_bridge_connector_init(bridge->dev, bridge->encoder);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ dev_err(it->dev, "failed to initialize bridge connector: %d\n",
+ ret);
+ return ret;
+ }
+
+ drm_connector_attach_encoder(connector, bridge->encoder);
+
+ return 0;
+}
+
+static enum drm_connector_status it6263_bridge_detect(struct drm_bridge *bridge)
+{
+ struct it6263 *it = bridge_to_it6263(bridge);
+
+ return it6263_detect(it);
+}
+
+static const struct drm_edid *
+it6263_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct it6263 *it = bridge_to_it6263(bridge);
+
+ return drm_edid_read_custom(connector, it6263_read_edid, it);
+}
+
+static u32 *
+it6263_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct it6263 *it = bridge_to_it6263(bridge);
+ u32 *input_fmts;
+
+ *num_input_fmts = 0;
+
+ if (!it6263_is_input_bus_fmt_valid(it->lvds_data_mapping))
+ return NULL;
+
+ input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = it->lvds_data_mapping;
+ *num_input_fmts = 1;
+
+ return input_fmts;
+}
+
+static enum drm_mode_status
+it6263_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate)
+{
+ if (mode->clock > MAX_PIXEL_CLOCK_KHZ)
+ return MODE_CLOCK_HIGH;
+
+ if (tmds_rate > MAX_HDMI_TMDS_CHAR_RATE_HZ)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static int it6263_hdmi_clear_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type)
+{
+ struct it6263 *it = bridge_to_it6263(bridge);
+
+ if (type == HDMI_INFOFRAME_TYPE_AVI)
+ regmap_write(it->hdmi_regmap, HDMI_REG_AVI_INFOFRM_CTRL, 0);
+ else
+ dev_dbg(it->dev, "unsupported HDMI infoframe 0x%x\n", type);
+
+ return 0;
+}
+
+static int it6263_hdmi_write_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len)
+{
+ struct it6263 *it = bridge_to_it6263(bridge);
+ struct regmap *regmap = it->hdmi_regmap;
+
+ if (type != HDMI_INFOFRAME_TYPE_AVI) {
+ dev_dbg(it->dev, "unsupported HDMI infoframe 0x%x\n", type);
+ return 0;
+ }
+
+ /* write the first AVI infoframe data byte chunk(DB1-DB5) */
+ regmap_bulk_write(regmap, HDMI_REG_AVI_DB1,
+ &buffer[HDMI_INFOFRAME_HEADER_SIZE],
+ HDMI_AVI_DB_CHUNK1_SIZE);
+
+ /* write the second AVI infoframe data byte chunk(DB6-DB13) */
+ regmap_bulk_write(regmap, HDMI_REG_AVI_DB6,
+ &buffer[HDMI_INFOFRAME_HEADER_SIZE +
+ HDMI_AVI_DB_CHUNK1_SIZE],
+ HDMI_AVI_DB_CHUNK2_SIZE);
+
+ /* write checksum */
+ regmap_write(regmap, HDMI_REG_AVI_CSUM, buffer[3]);
+
+ regmap_write(regmap, HDMI_REG_AVI_INFOFRM_CTRL, ENABLE_PKT | REPEAT_PKT);
+
+ return 0;
+}
+
+static const struct drm_bridge_funcs it6263_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .attach = it6263_bridge_attach,
+ .mode_valid = it6263_bridge_mode_valid,
+ .atomic_disable = it6263_bridge_atomic_disable,
+ .atomic_enable = it6263_bridge_atomic_enable,
+ .detect = it6263_bridge_detect,
+ .edid_read = it6263_bridge_edid_read,
+ .atomic_get_input_bus_fmts = it6263_bridge_atomic_get_input_bus_fmts,
+ .hdmi_tmds_char_rate_valid = it6263_hdmi_tmds_char_rate_valid,
+ .hdmi_clear_infoframe = it6263_hdmi_clear_infoframe,
+ .hdmi_write_infoframe = it6263_hdmi_write_infoframe,
+};
+
+static int it6263_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct gpio_desc *reset_gpio;
+ struct it6263 *it;
+ int ret;
+
+ it = devm_kzalloc(dev, sizeof(*it), GFP_KERNEL);
+ if (!it)
+ return -ENOMEM;
+
+ it->dev = dev;
+ it->hdmi_i2c = client;
+
+ it->hdmi_regmap = devm_regmap_init_i2c(client,
+ &it6263_hdmi_regmap_config);
+ if (IS_ERR(it->hdmi_regmap))
+ return dev_err_probe(dev, PTR_ERR(it->hdmi_regmap),
+ "failed to init I2C regmap for HDMI\n");
+
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(reset_gpio),
+ "failed to get reset gpio\n");
+
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(it6263_supplies),
+ it6263_supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get power supplies\n");
+
+ ret = it6263_parse_dt(it);
+ if (ret)
+ return ret;
+
+ it6263_hw_reset(reset_gpio);
+
+ ret = it6263_lvds_set_i2c_addr(it);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to set I2C addr\n");
+
+ it->lvds_i2c = devm_i2c_new_dummy_device(dev, client->adapter,
+ LVDS_INPUT_CTRL_I2C_ADDR);
+ if (IS_ERR(it->lvds_i2c))
+ return dev_err_probe(it->dev, PTR_ERR(it->lvds_i2c),
+ "failed to allocate I2C device for LVDS\n");
+
+ it->lvds_regmap = devm_regmap_init_i2c(it->lvds_i2c,
+ &it6263_lvds_regmap_config);
+ if (IS_ERR(it->lvds_regmap))
+ return dev_err_probe(dev, PTR_ERR(it->lvds_regmap),
+ "failed to init I2C regmap for LVDS\n");
+
+ it6263_lvds_config(it);
+ it6263_hdmi_config(it);
+
+ i2c_set_clientdata(client, it);
+
+ it->bridge.funcs = &it6263_bridge_funcs;
+ it->bridge.of_node = dev->of_node;
+ /* IT6263 chip doesn't support HPD interrupt. */
+ it->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
+ DRM_BRIDGE_OP_HDMI;
+ it->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ it->bridge.vendor = "ITE";
+ it->bridge.product = "IT6263";
+
+ return devm_drm_bridge_add(dev, &it->bridge);
+}
+
+static const struct of_device_id it6263_of_match[] = {
+ { .compatible = "ite,it6263", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, it6263_of_match);
+
+static const struct i2c_device_id it6263_i2c_ids[] = {
+ { "it6263" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, it6263_i2c_ids);
+
+static struct i2c_driver it6263_driver = {
+ .probe = it6263_probe,
+ .driver = {
+ .name = "it6263",
+ .of_match_table = it6263_of_match,
+ },
+ .id_table = it6263_i2c_ids,
+};
+module_i2c_driver(it6263_driver);
+
+MODULE_DESCRIPTION("ITE Tech. Inc. IT6263 LVDS/HDMI bridge");
+MODULE_AUTHOR("Liu Ying <victor.liu@nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 87b8545fccc0..88ef76a37fe6 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -19,6 +19,7 @@
#include <linux/regulator/consumer.h>
#include <linux/types.h>
#include <linux/wait.h>
+#include <linux/bitfield.h>
#include <crypto/hash.h>
@@ -126,6 +127,7 @@
#define REG_AUX_OUT_DATA0 0x27
#define REG_AUX_CMD_REQ 0x2B
+#define M_AUX_REQ_CMD 0x0F
#define AUX_BUSY BIT(5)
#define REG_AUX_DATA_0_7 0x2C
@@ -266,6 +268,18 @@
#define REG_SSC_CTRL1 0x189
#define REG_SSC_CTRL2 0x18A
+#define REG_AUX_USER_CTRL 0x190
+#define EN_USER_AUX BIT(0)
+#define USER_AUX_DONE BIT(1)
+#define AUX_EVENT BIT(4)
+
+#define REG_AUX_USER_DATA_REC 0x191
+#define M_AUX_IN_REC 0xF0
+#define M_AUX_OUT_REC 0x0F
+
+#define REG_AUX_USER_REPLY 0x19A
+#define REG_AUX_USER_RXB(n) (n + 0x19B)
+
#define RBR DP_LINK_BW_1_62
#define HBR DP_LINK_BW_2_7
#define HBR2 DP_LINK_BW_5_4
@@ -296,11 +310,13 @@
#define MAX_LANE_COUNT 4
#define MAX_LINK_RATE HBR
#define AUTO_TRAIN_RETRY 3
-#define MAX_HDCP_DOWN_STREAM_COUNT 10
+#define MAX_HDCP_DOWN_STREAM_COUNT 127
#define MAX_CR_LEVEL 0x03
#define MAX_EQ_LEVEL 0x03
#define AUX_WAIT_TIMEOUT_MS 15
-#define AUX_FIFO_MAX_SIZE 32
+#define AUX_FIFO_MAX_SIZE 16
+#define AUX_I2C_MAX_SIZE 4
+#define AUX_I2C_DEFER_RETRY 4
#define PIXEL_CLK_DELAY 1
#define PIXEL_CLK_INVERSE 0
#define ADJUST_PHASE_THRESHOLD 80000
@@ -323,7 +339,15 @@
enum aux_cmd_type {
CMD_AUX_NATIVE_READ = 0x0,
CMD_AUX_NATIVE_WRITE = 0x5,
+ CMD_AUX_GI2C_ADR = 0x08,
+ CMD_AUX_GI2C_READ = 0x09,
+ CMD_AUX_GI2C_WRITE = 0x0A,
CMD_AUX_I2C_EDID_READ = 0xB,
+ CMD_AUX_I2C_READ = 0x0D,
+ CMD_AUX_I2C_WRITE = 0x0C,
+
+ /* KSV read with AUX FIFO extend from CMD_AUX_NATIVE_READ*/
+ CMD_AUX_GET_KSV_LIST = 0x10,
};
enum aux_cmd_reply {
@@ -965,7 +989,8 @@ static ssize_t it6505_aux_operation(struct it6505 *it6505,
it6505_set_bits(it6505, REG_AUX_CTRL, AUX_USER_MODE, AUX_USER_MODE);
aux_op_start:
- if (cmd == CMD_AUX_I2C_EDID_READ) {
+ /* HW AUX FIFO supports only EDID and DCPD KSV FIFO area */
+ if (cmd == CMD_AUX_I2C_EDID_READ || cmd == CMD_AUX_GET_KSV_LIST) {
/* AUX EDID FIFO has max length of AUX_FIFO_MAX_SIZE bytes. */
size = min_t(size_t, size, AUX_FIFO_MAX_SIZE);
/* Enable AUX FIFO read back and clear FIFO */
@@ -996,7 +1021,7 @@ aux_op_start:
size);
/* Aux Fire */
- it6505_write(it6505, REG_AUX_CMD_REQ, cmd);
+ it6505_write(it6505, REG_AUX_CMD_REQ, FIELD_GET(M_AUX_REQ_CMD, cmd));
ret = it6505_aux_wait(it6505);
if (ret < 0)
@@ -1030,7 +1055,7 @@ aux_op_start:
goto aux_op_start;
}
- if (cmd == CMD_AUX_I2C_EDID_READ) {
+ if (cmd == CMD_AUX_I2C_EDID_READ || cmd == CMD_AUX_GET_KSV_LIST) {
for (i = 0; i < size; i++) {
ret = it6505_read(it6505, REG_AUX_DATA_FIFO);
if (ret < 0)
@@ -1055,7 +1080,7 @@ aux_op_start:
ret = i;
aux_op_err:
- if (cmd == CMD_AUX_I2C_EDID_READ) {
+ if (cmd == CMD_AUX_I2C_EDID_READ || cmd == CMD_AUX_GET_KSV_LIST) {
/* clear AUX FIFO */
it6505_set_bits(it6505, REG_AUX_CTRL,
AUX_EN_FIFO_READ | CLR_EDID_FIFO,
@@ -1076,10 +1101,14 @@ static ssize_t it6505_aux_do_transfer(struct it6505 *it6505,
size_t size, enum aux_cmd_reply *reply)
{
int i, ret_size, ret = 0, request_size;
+ int fifo_max_size = (cmd == CMD_AUX_I2C_EDID_READ || cmd == CMD_AUX_GET_KSV_LIST) ?
+ AUX_FIFO_MAX_SIZE : 4;
mutex_lock(&it6505->aux_lock);
- for (i = 0; i < size; i += 4) {
- request_size = min((int)size - i, 4);
+ i = 0;
+ do {
+ request_size = min_t(int, (int)size - i, fifo_max_size);
+
ret_size = it6505_aux_operation(it6505, cmd, address + i,
buffer + i, request_size,
reply);
@@ -1088,14 +1117,170 @@ static ssize_t it6505_aux_do_transfer(struct it6505 *it6505,
goto aux_op_err;
}
+ i += request_size;
ret += ret_size;
- }
+ } while (i < size);
aux_op_err:
mutex_unlock(&it6505->aux_lock);
return ret;
}
+static bool it6505_aux_i2c_reply_defer(u8 reply)
+{
+ if (reply == DP_AUX_NATIVE_REPLY_DEFER || reply == DP_AUX_I2C_REPLY_DEFER)
+ return true;
+ return false;
+}
+
+static bool it6505_aux_i2c_reply_nack(u8 reply)
+{
+ if (reply == DP_AUX_NATIVE_REPLY_NACK || reply == DP_AUX_I2C_REPLY_NACK)
+ return true;
+ return false;
+}
+
+static int it6505_aux_i2c_wait(struct it6505 *it6505, u8 *reply)
+{
+ int err = 0;
+ unsigned long timeout;
+ struct device *dev = it6505->dev;
+
+ timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1;
+
+ do {
+ if (it6505_read(it6505, REG_AUX_USER_CTRL) & AUX_EVENT)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dev_err(dev, "Timed out waiting AUX I2C, BUSY = %X\n",
+ it6505_aux_op_finished(it6505));
+ err = -ETIMEDOUT;
+ goto end_aux_i2c_wait;
+ }
+ usleep_range(300, 800);
+ } while (!it6505_aux_op_finished(it6505));
+
+ *reply = it6505_read(it6505, REG_AUX_USER_REPLY) >> 4;
+
+ if (*reply == 0)
+ goto end_aux_i2c_wait;
+
+ if (it6505_aux_i2c_reply_defer(*reply))
+ err = -EBUSY;
+ else if (it6505_aux_i2c_reply_nack(*reply))
+ err = -ENXIO;
+
+end_aux_i2c_wait:
+ it6505_set_bits(it6505, REG_AUX_USER_CTRL, USER_AUX_DONE, USER_AUX_DONE);
+ return err;
+}
+
+static int it6505_aux_i2c_readb(struct it6505 *it6505, u8 *buf, size_t size, u8 *reply)
+{
+ int ret, i;
+ int retry;
+
+ for (retry = 0; retry < AUX_I2C_DEFER_RETRY; retry++) {
+ it6505_write(it6505, REG_AUX_CMD_REQ, CMD_AUX_GI2C_READ);
+
+ ret = it6505_aux_i2c_wait(it6505, reply);
+ if (it6505_aux_i2c_reply_defer(*reply))
+ continue;
+ if (ret >= 0)
+ break;
+ }
+
+ for (i = 0; i < size; i++)
+ buf[i] = it6505_read(it6505, REG_AUX_USER_RXB(0 + i));
+
+ return size;
+}
+
+static int it6505_aux_i2c_writeb(struct it6505 *it6505, u8 *buf, size_t size, u8 *reply)
+{
+ int i, ret;
+ int retry;
+
+ for (i = 0; i < size; i++)
+ it6505_write(it6505, REG_AUX_OUT_DATA0 + i, buf[i]);
+
+ for (retry = 0; retry < AUX_I2C_DEFER_RETRY; retry++) {
+ it6505_write(it6505, REG_AUX_CMD_REQ, CMD_AUX_GI2C_WRITE);
+
+ ret = it6505_aux_i2c_wait(it6505, reply);
+ if (it6505_aux_i2c_reply_defer(*reply))
+ continue;
+ if (ret >= 0)
+ break;
+ }
+ return size;
+}
+
+static ssize_t it6505_aux_i2c_operation(struct it6505 *it6505,
+ struct drm_dp_aux_msg *msg)
+{
+ int ret;
+ ssize_t request_size, data_cnt = 0;
+ u8 *buffer = msg->buffer;
+
+ /* set AUX user mode */
+ it6505_set_bits(it6505, REG_AUX_CTRL,
+ AUX_USER_MODE | AUX_NO_SEGMENT_WR, AUX_USER_MODE);
+ it6505_set_bits(it6505, REG_AUX_USER_CTRL, EN_USER_AUX, EN_USER_AUX);
+ /* clear AUX FIFO */
+ it6505_set_bits(it6505, REG_AUX_CTRL,
+ AUX_EN_FIFO_READ | CLR_EDID_FIFO,
+ AUX_EN_FIFO_READ | CLR_EDID_FIFO);
+
+ it6505_set_bits(it6505, REG_AUX_CTRL,
+ AUX_EN_FIFO_READ | CLR_EDID_FIFO, 0x00);
+
+ it6505_write(it6505, REG_AUX_ADR_0_7, 0x00);
+ it6505_write(it6505, REG_AUX_ADR_8_15, msg->address << 1);
+
+ if (msg->size == 0) {
+ /* IIC Start/STOP dummy write */
+ it6505_write(it6505, REG_AUX_ADR_16_19, msg->request);
+ it6505_write(it6505, REG_AUX_CMD_REQ, CMD_AUX_GI2C_ADR);
+ ret = it6505_aux_i2c_wait(it6505, &msg->reply);
+ goto end_aux_i2c_transfer;
+ }
+
+ /* IIC data transfer */
+ data_cnt = 0;
+ do {
+ request_size = min_t(ssize_t, msg->size - data_cnt, AUX_I2C_MAX_SIZE);
+ it6505_write(it6505, REG_AUX_ADR_16_19,
+ msg->request | ((request_size - 1) << 4));
+ if ((msg->request & DP_AUX_I2C_READ) == DP_AUX_I2C_READ)
+ ret = it6505_aux_i2c_readb(it6505, &buffer[data_cnt],
+ request_size, &msg->reply);
+ else
+ ret = it6505_aux_i2c_writeb(it6505, &buffer[data_cnt],
+ request_size, &msg->reply);
+
+ if (ret < 0)
+ goto end_aux_i2c_transfer;
+
+ data_cnt += request_size;
+ } while (data_cnt < msg->size);
+ ret = data_cnt;
+end_aux_i2c_transfer:
+
+ it6505_set_bits(it6505, REG_AUX_USER_CTRL, EN_USER_AUX, 0);
+ it6505_set_bits(it6505, REG_AUX_CTRL, AUX_USER_MODE, 0);
+ return ret;
+}
+
+static ssize_t it6505_aux_i2c_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct it6505 *it6505 = container_of(aux, struct it6505, aux);
+
+ guard(mutex)(&it6505->aux_lock);
+ return it6505_aux_i2c_operation(it6505, msg);
+}
+
static ssize_t it6505_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
@@ -1105,9 +1290,8 @@ static ssize_t it6505_aux_transfer(struct drm_dp_aux *aux,
int ret;
enum aux_cmd_reply reply;
- /* IT6505 doesn't support arbitrary I2C read / write. */
if (is_i2c)
- return -EINVAL;
+ return it6505_aux_i2c_transfer(aux, msg);
switch (msg->request) {
case DP_AUX_NATIVE_READ:
@@ -1178,6 +1362,37 @@ static int it6505_get_edid_block(void *data, u8 *buf, unsigned int block,
return 0;
}
+static int it6505_get_ksvlist(struct it6505 *it6505, u8 *buf, size_t len)
+{
+ struct device *dev = it6505->dev;
+ enum aux_cmd_reply reply;
+ int request_size, ret;
+ int i = 0;
+
+ do {
+ request_size = min_t(int, (int)len - i, 15);
+
+ ret = it6505_aux_do_transfer(it6505, CMD_AUX_GET_KSV_LIST,
+ DP_AUX_HDCP_KSV_FIFO,
+ buf + i, request_size, &reply);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "request_size = %d, ret =%d", request_size, ret);
+ if (ret < 0)
+ return ret;
+
+ i += request_size;
+ } while (i < len);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "ksv read cnt = %d down_stream_cnt=%d ", i, i / 5);
+
+ for (i = 0 ; i < len; i += 5) {
+ DRM_DEV_DEBUG_DRIVER(dev, "ksv[%d] = %02X%02X%02X%02X%02X",
+ i / 5, buf[i], buf[i + 1], buf[i + 2], buf[i + 3], buf[i + 4]);
+ }
+
+ return len;
+}
+
static void it6505_variable_config(struct it6505 *it6505)
{
it6505->link_rate_bw_code = HBR;
@@ -1959,7 +2174,7 @@ static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input)
{
struct device *dev = it6505->dev;
u8 binfo[2];
- int down_stream_count, i, err, msg_count = 0;
+ int down_stream_count, err, msg_count = 0;
err = it6505_get_dpcd(it6505, DP_AUX_HDCP_BINFO, binfo,
ARRAY_SIZE(binfo));
@@ -1984,18 +2199,11 @@ static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input)
down_stream_count);
return 0;
}
+ err = it6505_get_ksvlist(it6505, sha1_input, down_stream_count * 5);
+ if (err < 0)
+ return err;
- for (i = 0; i < down_stream_count; i++) {
- err = it6505_get_dpcd(it6505, DP_AUX_HDCP_KSV_FIFO +
- (i % 3) * DRM_HDCP_KSV_LEN,
- sha1_input + msg_count,
- DRM_HDCP_KSV_LEN);
-
- if (err < 0)
- return err;
-
- msg_count += 5;
- }
+ msg_count += down_stream_count * 5;
it6505->hdcp_down_stream_count = down_stream_count;
sha1_input[msg_count++] = binfo[0];
@@ -2023,7 +2231,7 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
{
struct device *dev = it6505->dev;
u8 av[5][4], bv[5][4];
- int i, err;
+ int i, err, retry;
i = it6505_setup_sha1_input(it6505, it6505->sha1_input);
if (i <= 0) {
@@ -2032,22 +2240,28 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
}
it6505_sha1_digest(it6505, it6505->sha1_input, i, (u8 *)av);
+ /*1B-05 V' must retry 3 times */
+ for (retry = 0; retry < 3; retry++) {
+ err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv,
+ sizeof(bv));
- err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv,
- sizeof(bv));
+ if (err < 0) {
+ dev_err(dev, "Read V' value Fail %d", retry);
+ continue;
+ }
- if (err < 0) {
- dev_err(dev, "Read V' value Fail");
- return false;
- }
+ for (i = 0; i < 5; i++) {
+ if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] ||
+ av[i][1] != av[i][2] || bv[i][0] != av[i][3])
+ break;
- for (i = 0; i < 5; i++)
- if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] ||
- bv[i][1] != av[i][2] || bv[i][0] != av[i][3])
- return false;
+ DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d, %d", retry, i);
+ return true;
+ }
+ }
- DRM_DEV_DEBUG_DRIVER(dev, "V' all match!!");
- return true;
+ DRM_DEV_DEBUG_DRIVER(dev, "V' NOT match!! %d", retry);
+ return false;
}
static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
@@ -2055,12 +2269,13 @@ static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
struct it6505 *it6505 = container_of(work, struct it6505,
hdcp_wait_ksv_list);
struct device *dev = it6505->dev;
- unsigned int timeout = 5000;
- u8 bstatus = 0;
+ u8 bstatus;
bool ksv_list_check;
+ /* 1B-04 wait ksv list for 5s */
+ unsigned long timeout = jiffies +
+ msecs_to_jiffies(5000) + 1;
- timeout /= 20;
- while (timeout > 0) {
+ for (;;) {
if (!it6505_get_sink_hpd_status(it6505))
return;
@@ -2069,27 +2284,23 @@ static void it6505_hdcp_wait_ksv_list(struct work_struct *work)
if (bstatus & DP_BSTATUS_READY)
break;
- msleep(20);
- timeout--;
- }
+ if (time_after(jiffies, timeout)) {
+ DRM_DEV_DEBUG_DRIVER(dev, "KSV list wait timeout");
+ goto timeout;
+ }
- if (timeout == 0) {
- DRM_DEV_DEBUG_DRIVER(dev, "timeout and ksv list wait failed");
- goto timeout;
+ msleep(20);
}
ksv_list_check = it6505_hdcp_part2_ksvlist_check(it6505);
DRM_DEV_DEBUG_DRIVER(dev, "ksv list ready, ksv list check %s",
ksv_list_check ? "pass" : "fail");
- if (ksv_list_check) {
- it6505_set_bits(it6505, REG_HDCP_TRIGGER,
- HDCP_TRIGGER_KSV_DONE, HDCP_TRIGGER_KSV_DONE);
+
+ if (ksv_list_check)
return;
- }
+
timeout:
- it6505_set_bits(it6505, REG_HDCP_TRIGGER,
- HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL,
- HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL);
+ it6505_start_hdcp(it6505);
}
static void it6505_hdcp_work(struct work_struct *work)
@@ -2312,14 +2523,20 @@ static int it6505_process_hpd_irq(struct it6505 *it6505)
DRM_DEV_DEBUG_DRIVER(dev, "dp_irq_vector = 0x%02x", dp_irq_vector);
if (dp_irq_vector & DP_CP_IRQ) {
- it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_CPIRQ,
- HDCP_TRIGGER_CPIRQ);
-
bstatus = it6505_dpcd_read(it6505, DP_AUX_HDCP_BSTATUS);
if (bstatus < 0)
return bstatus;
DRM_DEV_DEBUG_DRIVER(dev, "Bstatus = 0x%02x", bstatus);
+
+ /*Check BSTATUS when recive CP_IRQ */
+ if (bstatus & DP_BSTATUS_R0_PRIME_READY &&
+ it6505->hdcp_status == HDCP_AUTH_GOING)
+ it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_CPIRQ,
+ HDCP_TRIGGER_CPIRQ);
+ else if (bstatus & (DP_BSTATUS_REAUTH_REQ | DP_BSTATUS_LINK_FAILURE) &&
+ it6505->hdcp_status == HDCP_AUTH_DONE)
+ it6505_start_hdcp(it6505);
}
ret = drm_dp_dpcd_read_link_status(&it6505->aux, link_status);
@@ -2456,7 +2673,11 @@ static void it6505_irq_hdcp_ksv_check(struct it6505 *it6505)
{
struct device *dev = it6505->dev;
- DRM_DEV_DEBUG_DRIVER(dev, "HDCP event Interrupt");
+ DRM_DEV_DEBUG_DRIVER(dev, "HDCP repeater R0 event Interrupt");
+ /* 1B01 HDCP encription should start when R0 is ready*/
+ it6505_set_bits(it6505, REG_HDCP_TRIGGER,
+ HDCP_TRIGGER_KSV_DONE, HDCP_TRIGGER_KSV_DONE);
+
schedule_work(&it6505->hdcp_wait_ksv_list);
}
@@ -2614,9 +2835,9 @@ static int it6505_poweron(struct it6505 *it6505)
/* time interval between OVDD and SYSRSTN at least be 10ms */
if (pdata->gpiod_reset) {
usleep_range(10000, 20000);
- gpiod_set_value_cansleep(pdata->gpiod_reset, 0);
- usleep_range(1000, 2000);
gpiod_set_value_cansleep(pdata->gpiod_reset, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(pdata->gpiod_reset, 0);
usleep_range(25000, 35000);
}
@@ -2647,7 +2868,7 @@ static int it6505_poweroff(struct it6505 *it6505)
disable_irq_nosync(it6505->irq);
if (pdata->gpiod_reset)
- gpiod_set_value_cansleep(pdata->gpiod_reset, 0);
+ gpiod_set_value_cansleep(pdata->gpiod_reset, 1);
if (pdata->pwr18) {
err = regulator_disable(pdata->pwr18);
@@ -3107,6 +3328,8 @@ static __maybe_unused int it6505_bridge_suspend(struct device *dev)
{
struct it6505 *it6505 = dev_get_drvdata(dev);
+ it6505_remove_edid(it6505);
+
return it6505_poweroff(it6505);
}
@@ -3133,7 +3356,7 @@ static int it6505_init_pdata(struct it6505 *it6505)
return PTR_ERR(pdata->ovdd);
}
- pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(pdata->gpiod_reset)) {
dev_err(dev, "gpiod_reset gpio not found");
return PTR_ERR(pdata->gpiod_reset);
@@ -3495,7 +3718,7 @@ static void it6505_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id it6505_id[] = {
- { "it6505", 0 },
+ { "it6505" },
{ }
};
@@ -3505,6 +3728,7 @@ static const struct of_device_id it6505_of_match[] = {
{ .compatible = "ite,it6505" },
{ }
};
+MODULE_DEVICE_TABLE(of, it6505_of_match);
static struct i2c_driver it6505_i2c_driver = {
.driver = {
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 925e42f46cd8..23edcde6b9a7 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -770,8 +770,6 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge,
mutex_lock(&ctx->lock);
- hdmi_avi_infoframe_init(&ctx->hdmi_avi_infoframe);
-
ret = drm_hdmi_avi_infoframe_from_display_mode(&ctx->hdmi_avi_infoframe, ctx->connector,
adjusted_mode);
if (ret) {
@@ -1452,8 +1450,10 @@ static int it66121_audio_get_eld(struct device *dev, void *data,
dev_dbg(dev, "No connector present, passing empty EDID data");
memset(buf, 0, len);
} else {
+ mutex_lock(&ctx->connector->eld_mutex);
memcpy(buf, ctx->connector->eld,
min(sizeof(ctx->connector->eld), len));
+ mutex_unlock(&ctx->connector->eld_mutex);
}
mutex_unlock(&ctx->lock);
@@ -1466,7 +1466,6 @@ static const struct hdmi_codec_ops it66121_audio_codec_ops = {
.audio_shutdown = it66121_audio_shutdown,
.mute_stream = it66121_audio_mute,
.get_eld = it66121_audio_get_eld,
- .no_capture_mute = 1,
};
static int it66121_audio_codec_init(struct it66121_ctx *ctx, struct device *dev)
@@ -1476,11 +1475,12 @@ static int it66121_audio_codec_init(struct it66121_ctx *ctx, struct device *dev)
.i2s = 1, /* Only i2s support for now */
.spdif = 0,
.max_i2s_channels = 8,
+ .no_capture_mute = 1,
};
dev_dbg(dev, "%s\n", __func__);
- if (!of_property_read_bool(dev->of_node, "#sound-dai-cells")) {
+ if (!of_property_present(dev->of_node, "#sound-dai-cells")) {
dev_info(dev, "No \"#sound-dai-cells\", no audio\n");
return 0;
}
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index e265ab3c8c92..52da204f5740 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -815,8 +815,8 @@ static const struct of_device_id lt8912_dt_match[] = {
MODULE_DEVICE_TABLE(of, lt8912_dt_match);
static const struct i2c_device_id lt8912_id[] = {
- {"lt8912", 0},
- {},
+ { "lt8912" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, lt8912_id);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index c8881796fba4..999ddebb832d 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -773,7 +773,7 @@ static void lt9211_remove(struct i2c_client *client)
drm_bridge_remove(&ctx->bridge);
}
-static struct i2c_device_id lt9211_id[] = {
+static const struct i2c_device_id lt9211_id[] = {
{ "lontium,lt9211" },
{},
};
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 73983f9b50cb..e650cd83fc8d 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -23,6 +23,8 @@
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
#define EDID_SEG_SIZE 256
#define EDID_LEN 32
@@ -43,7 +45,6 @@ struct lt9611 {
struct device_node *dsi1_node;
struct mipi_dsi_device *dsi0;
struct mipi_dsi_device *dsi1;
- struct platform_device *audio_pdev;
bool ac_mode;
@@ -333,49 +334,6 @@ end:
return temp;
}
-static void lt9611_hdmi_set_infoframes(struct lt9611 *lt9611,
- struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- union hdmi_infoframe infoframe;
- ssize_t len;
- u8 iframes = 0x0a; /* UD1 infoframe */
- u8 buf[32];
- int ret;
- int i;
-
- ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe.avi,
- connector,
- mode);
- if (ret < 0)
- goto out;
-
- len = hdmi_infoframe_pack(&infoframe, buf, sizeof(buf));
- if (len < 0)
- goto out;
-
- for (i = 0; i < len; i++)
- regmap_write(lt9611->regmap, 0x8440 + i, buf[i]);
-
- ret = drm_hdmi_vendor_infoframe_from_display_mode(&infoframe.vendor.hdmi,
- connector,
- mode);
- if (ret < 0)
- goto out;
-
- len = hdmi_infoframe_pack(&infoframe, buf, sizeof(buf));
- if (len < 0)
- goto out;
-
- for (i = 0; i < len; i++)
- regmap_write(lt9611->regmap, 0x8474 + i, buf[i]);
-
- iframes |= 0x20;
-
-out:
- regmap_write(lt9611->regmap, 0x843d, iframes); /* UD1 infoframe */
-}
-
static void lt9611_hdmi_tx_digital(struct lt9611 *lt9611, bool is_hdmi)
{
if (is_hdmi)
@@ -719,7 +677,7 @@ lt9611_bridge_atomic_enable(struct drm_bridge *bridge,
}
lt9611_mipi_input_analog(lt9611);
- lt9611_hdmi_set_infoframes(lt9611, connector, mode);
+ drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
lt9611_hdmi_tx_digital(lt9611, connector->display_info.is_hdmi);
lt9611_hdmi_tx_phy(lt9611);
@@ -802,18 +760,10 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
if (mode->hdisplay > 3840)
return MODE_BAD_HVALUE;
- if (mode->vdisplay > 2160)
- return MODE_BAD_VVALUE;
-
- if (mode->hdisplay == 3840 &&
- mode->vdisplay == 2160 &&
- drm_mode_vrefresh(mode) > 30)
- return MODE_CLOCK_HIGH;
-
if (mode->hdisplay > 2000 && !lt9611->dsi1_node)
return MODE_PANEL;
- else
- return MODE_OK;
+
+ return MODE_OK;
}
static void lt9611_bridge_atomic_pre_enable(struct drm_bridge *bridge,
@@ -887,6 +837,157 @@ lt9611_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
return input_fmts;
}
+/*
+ * Other working frames:
+ * - 0x01, 0x84df
+ * - 0x04, 0x84c0
+ */
+#define LT9611_INFOFRAME_AUDIO 0x02
+#define LT9611_INFOFRAME_AVI 0x08
+#define LT9611_INFOFRAME_SPD 0x10
+#define LT9611_INFOFRAME_VENDOR 0x20
+
+static int lt9611_hdmi_clear_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+ unsigned int mask;
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ mask = LT9611_INFOFRAME_AUDIO;
+ break;
+
+ case HDMI_INFOFRAME_TYPE_AVI:
+ mask = LT9611_INFOFRAME_AVI;
+ break;
+
+ case HDMI_INFOFRAME_TYPE_SPD:
+ mask = LT9611_INFOFRAME_SPD;
+ break;
+
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ mask = LT9611_INFOFRAME_VENDOR;
+ break;
+
+ default:
+ drm_dbg_driver(lt9611->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type);
+ mask = 0;
+ break;
+ }
+
+ if (mask)
+ regmap_update_bits(lt9611->regmap, 0x843d, mask, 0);
+
+ return 0;
+}
+
+static int lt9611_hdmi_write_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+ unsigned int mask, addr;
+ int i;
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ mask = LT9611_INFOFRAME_AUDIO;
+ addr = 0x84b2;
+ break;
+
+ case HDMI_INFOFRAME_TYPE_AVI:
+ mask = LT9611_INFOFRAME_AVI;
+ addr = 0x8440;
+ break;
+
+ case HDMI_INFOFRAME_TYPE_SPD:
+ mask = LT9611_INFOFRAME_SPD;
+ addr = 0x8493;
+ break;
+
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ mask = LT9611_INFOFRAME_VENDOR;
+ addr = 0x8474;
+ break;
+
+ default:
+ drm_dbg_driver(lt9611->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type);
+ mask = 0;
+ break;
+ }
+
+ if (mask) {
+ for (i = 0; i < len; i++)
+ regmap_write(lt9611->regmap, addr + i, buffer[i]);
+
+ regmap_update_bits(lt9611->regmap, 0x843d, mask, mask);
+ }
+
+ return 0;
+}
+
+static enum drm_mode_status
+lt9611_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate)
+{
+ /* 297 MHz for 4k@30 mode */
+ if (tmds_rate > 297000000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static int lt9611_hdmi_audio_startup(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+
+ regmap_write(lt9611->regmap, 0x82d6, 0x8c);
+ regmap_write(lt9611->regmap, 0x82d7, 0x04);
+
+ regmap_write(lt9611->regmap, 0x8406, 0x08);
+ regmap_write(lt9611->regmap, 0x8407, 0x10);
+
+ regmap_write(lt9611->regmap, 0x8434, 0xd5);
+
+ return 0;
+}
+
+static int lt9611_hdmi_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+
+ if (hparms->sample_rate == 48000)
+ regmap_write(lt9611->regmap, 0x840f, 0x2b);
+ else if (hparms->sample_rate == 96000)
+ regmap_write(lt9611->regmap, 0x840f, 0xab);
+ else
+ return -EINVAL;
+
+ regmap_write(lt9611->regmap, 0x8435, 0x00);
+ regmap_write(lt9611->regmap, 0x8436, 0x18);
+ regmap_write(lt9611->regmap, 0x8437, 0x00);
+
+ return drm_atomic_helper_connector_hdmi_update_audio_infoframe(connector,
+ &hparms->cea);
+}
+
+static void lt9611_hdmi_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+
+ drm_atomic_helper_connector_hdmi_clear_audio_infoframe(connector);
+
+ regmap_write(lt9611->regmap, 0x8406, 0x00);
+ regmap_write(lt9611->regmap, 0x8407, 0x00);
+}
+
static const struct drm_bridge_funcs lt9611_bridge_funcs = {
.attach = lt9611_bridge_attach,
.mode_valid = lt9611_bridge_mode_valid,
@@ -902,6 +1003,14 @@ static const struct drm_bridge_funcs lt9611_bridge_funcs = {
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_get_input_bus_fmts = lt9611_atomic_get_input_bus_fmts,
+
+ .hdmi_tmds_char_rate_valid = lt9611_hdmi_tmds_char_rate_valid,
+ .hdmi_write_infoframe = lt9611_hdmi_write_infoframe,
+ .hdmi_clear_infoframe = lt9611_hdmi_clear_infoframe,
+
+ .hdmi_audio_startup = lt9611_hdmi_audio_startup,
+ .hdmi_audio_prepare = lt9611_hdmi_audio_prepare,
+ .hdmi_audio_shutdown = lt9611_hdmi_audio_shutdown,
};
static int lt9611_parse_dt(struct device *dev,
@@ -955,101 +1064,6 @@ static int lt9611_read_device_rev(struct lt9611 *lt9611)
return ret;
}
-static int lt9611_hdmi_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *fmt,
- struct hdmi_codec_params *hparms)
-{
- struct lt9611 *lt9611 = data;
-
- if (hparms->sample_rate == 48000)
- regmap_write(lt9611->regmap, 0x840f, 0x2b);
- else if (hparms->sample_rate == 96000)
- regmap_write(lt9611->regmap, 0x840f, 0xab);
- else
- return -EINVAL;
-
- regmap_write(lt9611->regmap, 0x8435, 0x00);
- regmap_write(lt9611->regmap, 0x8436, 0x18);
- regmap_write(lt9611->regmap, 0x8437, 0x00);
-
- return 0;
-}
-
-static int lt9611_audio_startup(struct device *dev, void *data)
-{
- struct lt9611 *lt9611 = data;
-
- regmap_write(lt9611->regmap, 0x82d6, 0x8c);
- regmap_write(lt9611->regmap, 0x82d7, 0x04);
-
- regmap_write(lt9611->regmap, 0x8406, 0x08);
- regmap_write(lt9611->regmap, 0x8407, 0x10);
-
- regmap_write(lt9611->regmap, 0x8434, 0xd5);
-
- return 0;
-}
-
-static void lt9611_audio_shutdown(struct device *dev, void *data)
-{
- struct lt9611 *lt9611 = data;
-
- regmap_write(lt9611->regmap, 0x8406, 0x00);
- regmap_write(lt9611->regmap, 0x8407, 0x00);
-}
-
-static int lt9611_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint)
-{
- struct of_endpoint of_ep;
- int ret;
-
- ret = of_graph_parse_endpoint(endpoint, &of_ep);
- if (ret < 0)
- return ret;
-
- /*
- * HDMI sound should be located as reg = <2>
- * Then, it is sound port 0
- */
- if (of_ep.port == 2)
- return 0;
-
- return -EINVAL;
-}
-
-static const struct hdmi_codec_ops lt9611_codec_ops = {
- .hw_params = lt9611_hdmi_hw_params,
- .audio_shutdown = lt9611_audio_shutdown,
- .audio_startup = lt9611_audio_startup,
- .get_dai_id = lt9611_hdmi_i2s_get_dai_id,
-};
-
-static struct hdmi_codec_pdata codec_data = {
- .ops = &lt9611_codec_ops,
- .max_i2s_channels = 8,
- .i2s = 1,
-};
-
-static int lt9611_audio_init(struct device *dev, struct lt9611 *lt9611)
-{
- codec_data.data = lt9611;
- lt9611->audio_pdev =
- platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &codec_data, sizeof(codec_data));
-
- return PTR_ERR_OR_ZERO(lt9611->audio_pdev);
-}
-
-static void lt9611_audio_exit(struct lt9611 *lt9611)
-{
- if (lt9611->audio_pdev) {
- platform_device_unregister(lt9611->audio_pdev);
- lt9611->audio_pdev = NULL;
- }
-}
-
static int lt9611_probe(struct i2c_client *client)
{
struct lt9611 *lt9611;
@@ -1113,11 +1127,20 @@ static int lt9611_probe(struct i2c_client *client)
i2c_set_clientdata(client, lt9611);
+ /* Disable Audio InfoFrame, enabled by default */
+ regmap_update_bits(lt9611->regmap, 0x843d, LT9611_INFOFRAME_AUDIO, 0);
+
lt9611->bridge.funcs = &lt9611_bridge_funcs;
lt9611->bridge.of_node = client->dev.of_node;
lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
- DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES;
+ DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES |
+ DRM_BRIDGE_OP_HDMI;
lt9611->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ lt9611->bridge.vendor = "Lontium";
+ lt9611->bridge.product = "LT9611";
+ lt9611->bridge.hdmi_audio_dev = dev;
+ lt9611->bridge.hdmi_audio_max_i2s_playback_channels = 8;
+ lt9611->bridge.hdmi_audio_dai_port = 2;
drm_bridge_add(&lt9611->bridge);
@@ -1139,10 +1162,6 @@ static int lt9611_probe(struct i2c_client *client)
lt9611_enable_hpd_interrupts(lt9611);
- ret = lt9611_audio_init(dev, lt9611);
- if (ret)
- goto err_remove_bridge;
-
return 0;
err_remove_bridge:
@@ -1163,7 +1182,6 @@ static void lt9611_remove(struct i2c_client *client)
struct lt9611 *lt9611 = i2c_get_clientdata(client);
disable_irq(client->irq);
- lt9611_audio_exit(lt9611);
drm_bridge_remove(&lt9611->bridge);
regulator_bulk_disable(ARRAY_SIZE(lt9611->supplies), lt9611->supplies);
@@ -1172,8 +1190,8 @@ static void lt9611_remove(struct i2c_client *client)
of_node_put(lt9611->dsi0_node);
}
-static struct i2c_device_id lt9611_id[] = {
- { "lontium,lt9611", 0 },
+static const struct i2c_device_id lt9611_id[] = {
+ { "lontium,lt9611" },
{}
};
MODULE_DEVICE_TABLE(i2c, lt9611_id);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index 4d1d40e1f1b4..f4c3ff1fdc69 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -522,7 +522,8 @@ static void lt9611uxc_audio_shutdown(struct device *dev, void *data)
}
static int lt9611uxc_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint)
+ struct device_node *endpoint,
+ void *data)
{
struct of_endpoint of_ep;
int ret;
@@ -913,8 +914,8 @@ static void lt9611uxc_remove(struct i2c_client *client)
of_node_put(lt9611uxc->dsi0_node);
}
-static struct i2c_device_id lt9611uxc_id[] = {
- { "lontium,lt9611uxc", 0 },
+static const struct i2c_device_id lt9611uxc_id[] = {
+ { "lontium,lt9611uxc" },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 991732c4b629..389af0233fcd 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -236,7 +236,7 @@ MODULE_DEVICE_TABLE(of, lvds_codec_match);
static struct platform_driver lvds_codec_driver = {
.probe = lvds_codec_probe,
- .remove_new = lvds_codec_remove,
+ .remove = lvds_codec_remove,
.driver = {
.name = "lvds-codec",
.of_match_table = lvds_codec_match,
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 37f1acf5c0f8..a3dcee62e7a5 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -318,8 +318,8 @@ static void stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c)
}
static const struct i2c_device_id stdp4028_ge_b850v3_fw_i2c_table[] = {
- {"stdp4028_ge_fw", 0},
- {},
+ { "stdp4028_ge_fw" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, stdp4028_ge_b850v3_fw_i2c_table);
@@ -365,8 +365,8 @@ static void stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c)
}
static const struct i2c_device_id stdp2690_ge_b850v3_fw_i2c_table[] = {
- {"stdp2690_ge_fw", 0},
- {},
+ { "stdp2690_ge_fw" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, stdp2690_ge_b850v3_fw_i2c_table);
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index 5f05647a3bea..1e5b2a37cb8c 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -1211,7 +1211,7 @@ static void nwl_dsi_remove(struct platform_device *pdev)
static struct platform_driver nwl_dsi_driver = {
.probe = nwl_dsi_probe,
- .remove_new = nwl_dsi_remove,
+ .remove = nwl_dsi_remove,
.driver = {
.of_match_table = nwl_dsi_dt_ids,
.name = DRV_NAME,
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index e77aab965fcf..44e36ae66db4 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -319,8 +319,8 @@ static void ptn3460_remove(struct i2c_client *client)
}
static const struct i2c_device_id ptn3460_i2c_table[] = {
- {"ptn3460", 0},
- {},
+ { "ptn3460" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ptn3460_i2c_table);
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index 430f8adebf9c..f8b4fb835765 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -2043,7 +2043,7 @@ void samsung_dsim_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(samsung_dsim_remove);
-static int __maybe_unused samsung_dsim_suspend(struct device *dev)
+static int samsung_dsim_suspend(struct device *dev)
{
struct samsung_dsim *dsi = dev_get_drvdata(dev);
const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
@@ -2073,7 +2073,7 @@ static int __maybe_unused samsung_dsim_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused samsung_dsim_resume(struct device *dev)
+static int samsung_dsim_resume(struct device *dev)
{
struct samsung_dsim *dsi = dev_get_drvdata(dev);
const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
@@ -2108,7 +2108,7 @@ err_clk:
}
const struct dev_pm_ops samsung_dsim_pm_ops = {
- SET_RUNTIME_PM_OPS(samsung_dsim_suspend, samsung_dsim_resume, NULL)
+ RUNTIME_PM_OPS(samsung_dsim_suspend, samsung_dsim_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
@@ -2139,10 +2139,10 @@ MODULE_DEVICE_TABLE(of, samsung_dsim_of_match);
static struct platform_driver samsung_dsim_driver = {
.probe = samsung_dsim_probe,
- .remove_new = samsung_dsim_remove,
+ .remove = samsung_dsim_remove,
.driver = {
.name = "samsung-dsim",
- .pm = &samsung_dsim_pm_ops,
+ .pm = pm_ptr(&samsung_dsim_pm_ops),
.of_match_table = samsung_dsim_of_match,
},
};
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 7f91b0db161e..bf2d1632b020 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -180,6 +180,8 @@ struct sii902x {
struct gpio_desc *reset_gpio;
struct i2c_mux_core *i2cmux;
bool sink_is_hdmi;
+ u32 bus_width;
+
/*
* Mutex protects audio and video functions from interfering
* each other, by keeping their i2c command sequences atomic.
@@ -477,6 +479,8 @@ static u32 *sii902x_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
u32 output_fmt,
unsigned int *num_input_fmts)
{
+
+ struct sii902x *sii902x = bridge_to_sii902x(bridge);
u32 *input_fmts;
*num_input_fmts = 0;
@@ -485,7 +489,20 @@ static u32 *sii902x_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
if (!input_fmts)
return NULL;
- input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
+ switch (sii902x->bus_width) {
+ case 16:
+ input_fmts[0] = MEDIA_BUS_FMT_RGB565_1X16;
+ break;
+ case 18:
+ input_fmts[0] = MEDIA_BUS_FMT_RGB666_1X18;
+ break;
+ case 24:
+ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ default:
+ return NULL;
+ }
+
*num_input_fmts = 1;
return input_fmts;
@@ -798,7 +815,8 @@ static int sii902x_audio_get_eld(struct device *dev, void *data,
}
static int sii902x_audio_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint)
+ struct device_node *endpoint,
+ void *data)
{
struct of_endpoint of_ep;
int ret;
@@ -823,7 +841,6 @@ static const struct hdmi_codec_ops sii902x_audio_codec_ops = {
.mute_stream = sii902x_audio_mute,
.get_eld = sii902x_audio_get_eld,
.get_dai_id = sii902x_audio_get_dai_id,
- .no_capture_mute = 1,
};
static int sii902x_audio_codec_init(struct sii902x *sii902x,
@@ -846,11 +863,12 @@ static int sii902x_audio_codec_init(struct sii902x *sii902x,
.i2s = 1, /* Only i2s support for now. */
.spdif = 0,
.max_i2s_channels = 0,
+ .no_capture_mute = 1,
};
u8 lanes[4];
int num_lanes, i;
- if (!of_property_read_bool(dev->of_node, "#sound-dai-cells")) {
+ if (!of_property_present(dev->of_node, "#sound-dai-cells")) {
dev_dbg(dev, "%s: No \"#sound-dai-cells\", no audio\n",
__func__);
return 0;
@@ -1167,6 +1185,11 @@ static int sii902x_probe(struct i2c_client *client)
return PTR_ERR(sii902x->reset_gpio);
}
+ sii902x->bus_width = 24;
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
+ if (endpoint)
+ of_property_read_u32(endpoint, "bus-width", &sii902x->bus_width);
+
endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1);
if (endpoint) {
struct device_node *remote = of_graph_get_remote_port_parent(endpoint);
@@ -1217,8 +1240,8 @@ static const struct of_device_id sii902x_dt_ids[] = {
MODULE_DEVICE_TABLE(of, sii902x_dt_ids);
static const struct i2c_device_id sii902x_i2c_ids[] = {
- { "sii9022", 0 },
- { },
+ { "sii9022" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, sii902x_i2c_ids);
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index 0c74cdc07032..cd7837c9a6e0 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -945,8 +945,8 @@ static const struct of_device_id sii9234_dt_match[] = {
MODULE_DEVICE_TABLE(of, sii9234_dt_match);
static const struct i2c_device_id sii9234_id[] = {
- { "SII9234", 0 },
- { },
+ { "SII9234" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, sii9234_id);
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 26b8d137bce0..28a2e1ee04b2 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2368,8 +2368,8 @@ static const struct of_device_id sii8620_dt_match[] = {
MODULE_DEVICE_TABLE(of, sii8620_dt_match);
static const struct i2c_device_id sii8620_id[] = {
- { "sii8620", 0 },
- { },
+ { "sii8620" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, sii8620_id);
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig
index 15fc182d05ef..f3ab2f985f8c 100644
--- a/drivers/gpu/drm/bridge/synopsys/Kconfig
+++ b/drivers/gpu/drm/bridge/synopsys/Kconfig
@@ -46,8 +46,22 @@ config DRM_DW_HDMI_CEC
Support the CE interface which is part of the Synopsys
Designware HDMI block.
+config DRM_DW_HDMI_QP
+ tristate
+ select DRM_DISPLAY_HDMI_HELPER
+ select DRM_DISPLAY_HDMI_STATE_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_KMS_HELPER
+ select REGMAP_MMIO
+
config DRM_DW_MIPI_DSI
tristate
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL_BRIDGE
+
+config DRM_DW_MIPI_DSI2
+ tristate
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/bridge/synopsys/Makefile b/drivers/gpu/drm/bridge/synopsys/Makefile
index ce715562e9e5..9dc376d220ad 100644
--- a/drivers/gpu/drm/bridge/synopsys/Makefile
+++ b/drivers/gpu/drm/bridge/synopsys/Makefile
@@ -5,4 +5,7 @@ obj-$(CONFIG_DRM_DW_HDMI_GP_AUDIO) += dw-hdmi-gp-audio.o
obj-$(CONFIG_DRM_DW_HDMI_I2S_AUDIO) += dw-hdmi-i2s-audio.o
obj-$(CONFIG_DRM_DW_HDMI_CEC) += dw-hdmi-cec.o
+obj-$(CONFIG_DRM_DW_HDMI_QP) += dw-hdmi-qp.o
+
obj-$(CONFIG_DRM_DW_MIPI_DSI) += dw-mipi-dsi.o
+obj-$(CONFIG_DRM_DW_MIPI_DSI2) += dw-mipi-dsi2.o
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index 221e9a4edb40..cf1f66b7b192 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -645,7 +645,7 @@ static SIMPLE_DEV_PM_OPS(snd_dw_hdmi_pm, snd_dw_hdmi_suspend,
static struct platform_driver snd_dw_hdmi_driver = {
.probe = snd_dw_hdmi_probe,
- .remove_new = snd_dw_hdmi_remove,
+ .remove = snd_dw_hdmi_remove,
.driver = {
.name = DRIVER_NAME,
.pm = PM_OPS,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
index 673661160e54..9549dabde941 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
@@ -312,7 +312,7 @@ static void dw_hdmi_cec_remove(struct platform_device *pdev)
cec_unregister_adapter(cec->adap);
}
-static int __maybe_unused dw_hdmi_cec_resume(struct device *dev)
+static int dw_hdmi_cec_resume(struct device *dev)
{
struct dw_hdmi_cec *cec = dev_get_drvdata(dev);
@@ -328,7 +328,7 @@ static int __maybe_unused dw_hdmi_cec_resume(struct device *dev)
return 0;
}
-static int __maybe_unused dw_hdmi_cec_suspend(struct device *dev)
+static int dw_hdmi_cec_suspend(struct device *dev)
{
struct dw_hdmi_cec *cec = dev_get_drvdata(dev);
@@ -341,15 +341,15 @@ static int __maybe_unused dw_hdmi_cec_suspend(struct device *dev)
}
static const struct dev_pm_ops dw_hdmi_cec_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(dw_hdmi_cec_suspend, dw_hdmi_cec_resume)
+ SYSTEM_SLEEP_PM_OPS(dw_hdmi_cec_suspend, dw_hdmi_cec_resume)
};
static struct platform_driver dw_hdmi_cec_driver = {
.probe = dw_hdmi_cec_probe,
- .remove_new = dw_hdmi_cec_remove,
+ .remove = dw_hdmi_cec_remove,
.driver = {
.name = "dw-hdmi-cec",
- .pm = &dw_hdmi_cec_pm,
+ .pm = pm_ptr(&dw_hdmi_cec_pm),
},
};
module_platform_driver(dw_hdmi_cec_driver);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
index 423762da2ab4..ab18f9a3bf23 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
@@ -181,7 +181,7 @@ static void snd_dw_hdmi_remove(struct platform_device *pdev)
static struct platform_driver snd_dw_hdmi_driver = {
.probe = snd_dw_hdmi_probe,
- .remove_new = snd_dw_hdmi_remove,
+ .remove = snd_dw_hdmi_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index 26c187d20d97..2c903c9fe805 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -148,7 +148,8 @@ static int dw_hdmi_i2s_get_eld(struct device *dev, void *data, uint8_t *buf,
}
static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint)
+ struct device_node *endpoint,
+ void *data)
{
struct of_endpoint of_ep;
int ret;
@@ -225,7 +226,7 @@ static void snd_dw_hdmi_remove(struct platform_device *pdev)
static struct platform_driver snd_dw_hdmi_driver = {
.probe = snd_dw_hdmi_probe,
- .remove_new = snd_dw_hdmi_remove,
+ .remove = snd_dw_hdmi_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
new file mode 100644
index 000000000000..b281cabfe992
--- /dev/null
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2021-2022 Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ *
+ * Author: Algea Cao <algea.cao@rock-chips.com>
+ * Author: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
+ */
+#include <linux/completion.h>
+#include <linux/hdmi.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/workqueue.h>
+
+#include <drm/bridge/dw_hdmi_qp.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modes.h>
+
+#include <sound/hdmi-codec.h>
+
+#include "dw-hdmi-qp.h"
+
+#define DDC_CI_ADDR 0x37
+#define DDC_SEGMENT_ADDR 0x30
+
+#define HDMI14_MAX_TMDSCLK 340000000
+
+#define SCRAMB_POLL_DELAY_MS 3000
+
+struct dw_hdmi_qp_i2c {
+ struct i2c_adapter adap;
+
+ struct mutex lock; /* used to serialize data transfers */
+ struct completion cmp;
+ u8 stat;
+
+ u8 slave_reg;
+ bool is_regaddr;
+ bool is_segment;
+};
+
+struct dw_hdmi_qp {
+ struct drm_bridge bridge;
+
+ struct device *dev;
+ struct dw_hdmi_qp_i2c *i2c;
+
+ struct {
+ const struct dw_hdmi_qp_phy_ops *ops;
+ void *data;
+ } phy;
+
+ struct regmap *regm;
+};
+
+static void dw_hdmi_qp_write(struct dw_hdmi_qp *hdmi, unsigned int val,
+ int offset)
+{
+ regmap_write(hdmi->regm, offset, val);
+}
+
+static unsigned int dw_hdmi_qp_read(struct dw_hdmi_qp *hdmi, int offset)
+{
+ unsigned int val = 0;
+
+ regmap_read(hdmi->regm, offset, &val);
+
+ return val;
+}
+
+static void dw_hdmi_qp_mod(struct dw_hdmi_qp *hdmi, unsigned int data,
+ unsigned int mask, unsigned int reg)
+{
+ regmap_update_bits(hdmi->regm, reg, mask, data);
+}
+
+static int dw_hdmi_qp_i2c_read(struct dw_hdmi_qp *hdmi,
+ unsigned char *buf, unsigned int length)
+{
+ struct dw_hdmi_qp_i2c *i2c = hdmi->i2c;
+ int stat;
+
+ if (!i2c->is_regaddr) {
+ dev_dbg(hdmi->dev, "set read register address to 0\n");
+ i2c->slave_reg = 0x00;
+ i2c->is_regaddr = true;
+ }
+
+ while (length--) {
+ reinit_completion(&i2c->cmp);
+
+ dw_hdmi_qp_mod(hdmi, i2c->slave_reg++ << 12, I2CM_ADDR,
+ I2CM_INTERFACE_CONTROL0);
+
+ if (i2c->is_segment)
+ dw_hdmi_qp_mod(hdmi, I2CM_EXT_READ, I2CM_WR_MASK,
+ I2CM_INTERFACE_CONTROL0);
+ else
+ dw_hdmi_qp_mod(hdmi, I2CM_FM_READ, I2CM_WR_MASK,
+ I2CM_INTERFACE_CONTROL0);
+
+ stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10);
+ if (!stat) {
+ dev_err(hdmi->dev, "i2c read timed out\n");
+ dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0);
+ return -EAGAIN;
+ }
+
+ /* Check for error condition on the bus */
+ if (i2c->stat & I2CM_NACK_RCVD_IRQ) {
+ dev_err(hdmi->dev, "i2c read error\n");
+ dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0);
+ return -EIO;
+ }
+
+ *buf++ = dw_hdmi_qp_read(hdmi, I2CM_INTERFACE_RDDATA_0_3) & 0xff;
+ dw_hdmi_qp_mod(hdmi, 0, I2CM_WR_MASK, I2CM_INTERFACE_CONTROL0);
+ }
+
+ i2c->is_segment = false;
+
+ return 0;
+}
+
+static int dw_hdmi_qp_i2c_write(struct dw_hdmi_qp *hdmi,
+ unsigned char *buf, unsigned int length)
+{
+ struct dw_hdmi_qp_i2c *i2c = hdmi->i2c;
+ int stat;
+
+ if (!i2c->is_regaddr) {
+ /* Use the first write byte as register address */
+ i2c->slave_reg = buf[0];
+ length--;
+ buf++;
+ i2c->is_regaddr = true;
+ }
+
+ while (length--) {
+ reinit_completion(&i2c->cmp);
+
+ dw_hdmi_qp_write(hdmi, *buf++, I2CM_INTERFACE_WRDATA_0_3);
+ dw_hdmi_qp_mod(hdmi, i2c->slave_reg++ << 12, I2CM_ADDR,
+ I2CM_INTERFACE_CONTROL0);
+ dw_hdmi_qp_mod(hdmi, I2CM_FM_WRITE, I2CM_WR_MASK,
+ I2CM_INTERFACE_CONTROL0);
+
+ stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10);
+ if (!stat) {
+ dev_err(hdmi->dev, "i2c write time out!\n");
+ dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0);
+ return -EAGAIN;
+ }
+
+ /* Check for error condition on the bus */
+ if (i2c->stat & I2CM_NACK_RCVD_IRQ) {
+ dev_err(hdmi->dev, "i2c write nack!\n");
+ dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0);
+ return -EIO;
+ }
+
+ dw_hdmi_qp_mod(hdmi, 0, I2CM_WR_MASK, I2CM_INTERFACE_CONTROL0);
+ }
+
+ return 0;
+}
+
+static int dw_hdmi_qp_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct dw_hdmi_qp *hdmi = i2c_get_adapdata(adap);
+ struct dw_hdmi_qp_i2c *i2c = hdmi->i2c;
+ u8 addr = msgs[0].addr;
+ int i, ret = 0;
+
+ if (addr == DDC_CI_ADDR)
+ /*
+ * The internal I2C controller does not support the multi-byte
+ * read and write operations needed for DDC/CI.
+ * FIXME: Blacklist the DDC/CI address until we filter out
+ * unsupported I2C operations.
+ */
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < num; i++) {
+ if (msgs[i].len == 0) {
+ dev_err(hdmi->dev,
+ "unsupported transfer %d/%d, no data\n",
+ i + 1, num);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ guard(mutex)(&i2c->lock);
+
+ /* Unmute DONE and ERROR interrupts */
+ dw_hdmi_qp_mod(hdmi, I2CM_NACK_RCVD_MASK_N | I2CM_OP_DONE_MASK_N,
+ I2CM_NACK_RCVD_MASK_N | I2CM_OP_DONE_MASK_N,
+ MAINUNIT_1_INT_MASK_N);
+
+ /* Set slave device address taken from the first I2C message */
+ if (addr == DDC_SEGMENT_ADDR && msgs[0].len == 1)
+ addr = DDC_ADDR;
+
+ dw_hdmi_qp_mod(hdmi, addr << 5, I2CM_SLVADDR, I2CM_INTERFACE_CONTROL0);
+
+ /* Set slave device register address on transfer */
+ i2c->is_regaddr = false;
+
+ /* Set segment pointer for I2C extended read mode operation */
+ i2c->is_segment = false;
+
+ for (i = 0; i < num; i++) {
+ if (msgs[i].addr == DDC_SEGMENT_ADDR && msgs[i].len == 1) {
+ i2c->is_segment = true;
+ dw_hdmi_qp_mod(hdmi, DDC_SEGMENT_ADDR, I2CM_SEG_ADDR,
+ I2CM_INTERFACE_CONTROL1);
+ dw_hdmi_qp_mod(hdmi, *msgs[i].buf << 7, I2CM_SEG_PTR,
+ I2CM_INTERFACE_CONTROL1);
+ } else {
+ if (msgs[i].flags & I2C_M_RD)
+ ret = dw_hdmi_qp_i2c_read(hdmi, msgs[i].buf,
+ msgs[i].len);
+ else
+ ret = dw_hdmi_qp_i2c_write(hdmi, msgs[i].buf,
+ msgs[i].len);
+ }
+ if (ret < 0)
+ break;
+ }
+
+ if (!ret)
+ ret = num;
+
+ /* Mute DONE and ERROR interrupts */
+ dw_hdmi_qp_mod(hdmi, 0, I2CM_OP_DONE_MASK_N | I2CM_NACK_RCVD_MASK_N,
+ MAINUNIT_1_INT_MASK_N);
+
+ return ret;
+}
+
+static u32 dw_hdmi_qp_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm dw_hdmi_qp_algorithm = {
+ .master_xfer = dw_hdmi_qp_i2c_xfer,
+ .functionality = dw_hdmi_qp_i2c_func,
+};
+
+static struct i2c_adapter *dw_hdmi_qp_i2c_adapter(struct dw_hdmi_qp *hdmi)
+{
+ struct dw_hdmi_qp_i2c *i2c;
+ struct i2c_adapter *adap;
+ int ret;
+
+ i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&i2c->lock);
+ init_completion(&i2c->cmp);
+
+ adap = &i2c->adap;
+ adap->owner = THIS_MODULE;
+ adap->dev.parent = hdmi->dev;
+ adap->algo = &dw_hdmi_qp_algorithm;
+ strscpy(adap->name, "DesignWare HDMI QP", sizeof(adap->name));
+
+ i2c_set_adapdata(adap, hdmi);
+
+ ret = devm_i2c_add_adapter(hdmi->dev, adap);
+ if (ret) {
+ dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name);
+ devm_kfree(hdmi->dev, i2c);
+ return ERR_PTR(ret);
+ }
+
+ hdmi->i2c = i2c;
+ dev_info(hdmi->dev, "registered %s I2C bus driver\n", adap->name);
+
+ return adap;
+}
+
+static int dw_hdmi_qp_config_avi_infoframe(struct dw_hdmi_qp *hdmi,
+ const u8 *buffer, size_t len)
+{
+ u32 val, i, j;
+
+ if (len != HDMI_INFOFRAME_SIZE(AVI)) {
+ dev_err(hdmi->dev, "failed to configure avi infoframe\n");
+ return -EINVAL;
+ }
+
+ /*
+ * DW HDMI QP IP uses a different byte format from standard AVI info
+ * frames, though generally the bits are in the correct bytes.
+ */
+ val = buffer[1] << 8 | buffer[2] << 16;
+ dw_hdmi_qp_write(hdmi, val, PKT_AVI_CONTENTS0);
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 4; j++) {
+ if (i * 4 + j >= 14)
+ break;
+ if (!j)
+ val = buffer[i * 4 + j + 3];
+ val |= buffer[i * 4 + j + 3] << (8 * j);
+ }
+
+ dw_hdmi_qp_write(hdmi, val, PKT_AVI_CONTENTS1 + i * 4);
+ }
+
+ dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_AVI_FIELDRATE, PKTSCHED_PKT_CONFIG1);
+
+ dw_hdmi_qp_mod(hdmi, PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN,
+ PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN, PKTSCHED_PKT_EN);
+
+ return 0;
+}
+
+static int dw_hdmi_qp_config_drm_infoframe(struct dw_hdmi_qp *hdmi,
+ const u8 *buffer, size_t len)
+{
+ u32 val, i;
+
+ if (len != HDMI_INFOFRAME_SIZE(DRM)) {
+ dev_err(hdmi->dev, "failed to configure drm infoframe\n");
+ return -EINVAL;
+ }
+
+ dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_TX_EN, PKTSCHED_PKT_EN);
+
+ val = buffer[1] << 8 | buffer[2] << 16;
+ dw_hdmi_qp_write(hdmi, val, PKT_DRMI_CONTENTS0);
+
+ for (i = 0; i <= buffer[2]; i++) {
+ if (i % 4 == 0)
+ val = buffer[3 + i];
+ val |= buffer[3 + i] << ((i % 4) * 8);
+
+ if ((i % 4 == 3) || i == buffer[2])
+ dw_hdmi_qp_write(hdmi, val,
+ PKT_DRMI_CONTENTS1 + ((i / 4) * 4));
+ }
+
+ dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_FIELDRATE, PKTSCHED_PKT_CONFIG1);
+ dw_hdmi_qp_mod(hdmi, PKTSCHED_DRMI_TX_EN, PKTSCHED_DRMI_TX_EN,
+ PKTSCHED_PKT_EN);
+
+ return 0;
+}
+
+static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_state)
+{
+ struct dw_hdmi_qp *hdmi = bridge->driver_private;
+ struct drm_atomic_state *state = old_state->base.state;
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ unsigned int op_mode;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+
+ if (connector->display_info.is_hdmi) {
+ dev_dbg(hdmi->dev, "%s mode=HDMI rate=%llu\n",
+ __func__, conn_state->hdmi.tmds_char_rate);
+ op_mode = 0;
+ } else {
+ dev_dbg(hdmi->dev, "%s mode=DVI\n", __func__);
+ op_mode = OPMODE_DVI;
+ }
+
+ hdmi->phy.ops->init(hdmi, hdmi->phy.data);
+
+ dw_hdmi_qp_mod(hdmi, HDCP2_BYPASS, HDCP2_BYPASS, HDCP2LOGIC_CONFIG0);
+ dw_hdmi_qp_mod(hdmi, op_mode, OPMODE_DVI, LINK_CONFIG0);
+
+ drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
+}
+
+static void dw_hdmi_qp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_state)
+{
+ struct dw_hdmi_qp *hdmi = bridge->driver_private;
+
+ hdmi->phy.ops->disable(hdmi, hdmi->phy.data);
+}
+
+static enum drm_connector_status
+dw_hdmi_qp_bridge_detect(struct drm_bridge *bridge)
+{
+ struct dw_hdmi_qp *hdmi = bridge->driver_private;
+
+ return hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
+}
+
+static const struct drm_edid *
+dw_hdmi_qp_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct dw_hdmi_qp *hdmi = bridge->driver_private;
+ const struct drm_edid *drm_edid;
+
+ drm_edid = drm_edid_read_ddc(connector, bridge->ddc);
+ if (!drm_edid)
+ dev_dbg(hdmi->dev, "failed to get edid\n");
+
+ return drm_edid;
+}
+
+static enum drm_mode_status
+dw_hdmi_qp_bridge_tmds_char_rate_valid(const struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ unsigned long long rate)
+{
+ struct dw_hdmi_qp *hdmi = bridge->driver_private;
+
+ if (rate > HDMI14_MAX_TMDSCLK) {
+ dev_dbg(hdmi->dev, "Unsupported TMDS char rate: %lld\n", rate);
+ return MODE_CLOCK_HIGH;
+ }
+
+ return MODE_OK;
+}
+
+static int dw_hdmi_qp_bridge_clear_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type)
+{
+ struct dw_hdmi_qp *hdmi = bridge->driver_private;
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN,
+ PKTSCHED_PKT_EN);
+ break;
+
+ case HDMI_INFOFRAME_TYPE_DRM:
+ dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_TX_EN, PKTSCHED_PKT_EN);
+ break;
+
+ default:
+ dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type);
+ }
+
+ return 0;
+}
+
+static int dw_hdmi_qp_bridge_write_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len)
+{
+ struct dw_hdmi_qp *hdmi = bridge->driver_private;
+
+ dw_hdmi_qp_bridge_clear_infoframe(bridge, type);
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ return dw_hdmi_qp_config_avi_infoframe(hdmi, buffer, len);
+
+ case HDMI_INFOFRAME_TYPE_DRM:
+ return dw_hdmi_qp_config_drm_infoframe(hdmi, buffer, len);
+
+ default:
+ dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type);
+ return 0;
+ }
+}
+
+static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_enable = dw_hdmi_qp_bridge_atomic_enable,
+ .atomic_disable = dw_hdmi_qp_bridge_atomic_disable,
+ .detect = dw_hdmi_qp_bridge_detect,
+ .edid_read = dw_hdmi_qp_bridge_edid_read,
+ .hdmi_tmds_char_rate_valid = dw_hdmi_qp_bridge_tmds_char_rate_valid,
+ .hdmi_clear_infoframe = dw_hdmi_qp_bridge_clear_infoframe,
+ .hdmi_write_infoframe = dw_hdmi_qp_bridge_write_infoframe,
+};
+
+static irqreturn_t dw_hdmi_qp_main_hardirq(int irq, void *dev_id)
+{
+ struct dw_hdmi_qp *hdmi = dev_id;
+ struct dw_hdmi_qp_i2c *i2c = hdmi->i2c;
+ u32 stat;
+
+ stat = dw_hdmi_qp_read(hdmi, MAINUNIT_1_INT_STATUS);
+
+ i2c->stat = stat & (I2CM_OP_DONE_IRQ | I2CM_READ_REQUEST_IRQ |
+ I2CM_NACK_RCVD_IRQ);
+
+ if (i2c->stat) {
+ dw_hdmi_qp_write(hdmi, i2c->stat, MAINUNIT_1_INT_CLEAR);
+ complete(&i2c->cmp);
+ }
+
+ if (stat)
+ return IRQ_HANDLED;
+
+ return IRQ_NONE;
+}
+
+static const struct regmap_config dw_hdmi_qp_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = EARCRX_1_INT_FORCE,
+};
+
+static void dw_hdmi_qp_init_hw(struct dw_hdmi_qp *hdmi)
+{
+ dw_hdmi_qp_write(hdmi, 0, MAINUNIT_0_INT_MASK_N);
+ dw_hdmi_qp_write(hdmi, 0, MAINUNIT_1_INT_MASK_N);
+ dw_hdmi_qp_write(hdmi, 428571429, TIMER_BASE_CONFIG0);
+
+ /* Software reset */
+ dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0);
+
+ dw_hdmi_qp_write(hdmi, 0x085c085c, I2CM_FM_SCL_CONFIG0);
+
+ dw_hdmi_qp_mod(hdmi, 0, I2CM_FM_EN, I2CM_INTERFACE_CONTROL0);
+
+ /* Clear DONE and ERROR interrupts */
+ dw_hdmi_qp_write(hdmi, I2CM_OP_DONE_CLEAR | I2CM_NACK_RCVD_CLEAR,
+ MAINUNIT_1_INT_CLEAR);
+
+ if (hdmi->phy.ops->setup_hpd)
+ hdmi->phy.ops->setup_hpd(hdmi, hdmi->phy.data);
+}
+
+struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
+ struct drm_encoder *encoder,
+ const struct dw_hdmi_qp_plat_data *plat_data)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_hdmi_qp *hdmi;
+ void __iomem *regs;
+ int ret;
+
+ if (!plat_data->phy_ops || !plat_data->phy_ops->init ||
+ !plat_data->phy_ops->disable || !plat_data->phy_ops->read_hpd) {
+ dev_err(dev, "Missing platform PHY ops\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return ERR_PTR(-ENOMEM);
+
+ hdmi->dev = dev;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return ERR_CAST(regs);
+
+ hdmi->regm = devm_regmap_init_mmio(dev, regs, &dw_hdmi_qp_regmap_config);
+ if (IS_ERR(hdmi->regm)) {
+ dev_err(dev, "Failed to configure regmap\n");
+ return ERR_CAST(hdmi->regm);
+ }
+
+ hdmi->phy.ops = plat_data->phy_ops;
+ hdmi->phy.data = plat_data->phy_data;
+
+ dw_hdmi_qp_init_hw(hdmi);
+
+ ret = devm_request_threaded_irq(dev, plat_data->main_irq,
+ dw_hdmi_qp_main_hardirq, NULL,
+ IRQF_SHARED, dev_name(dev), hdmi);
+ if (ret)
+ return ERR_PTR(ret);
+
+ hdmi->bridge.driver_private = hdmi;
+ hdmi->bridge.funcs = &dw_hdmi_qp_bridge_funcs;
+ hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID |
+ DRM_BRIDGE_OP_HDMI |
+ DRM_BRIDGE_OP_HPD;
+ hdmi->bridge.of_node = pdev->dev.of_node;
+ hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ hdmi->bridge.vendor = "Synopsys";
+ hdmi->bridge.product = "DW HDMI QP TX";
+
+ hdmi->bridge.ddc = dw_hdmi_qp_i2c_adapter(hdmi);
+ if (IS_ERR(hdmi->bridge.ddc))
+ return ERR_CAST(hdmi->bridge.ddc);
+
+ ret = devm_drm_bridge_add(dev, &hdmi->bridge);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return hdmi;
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_qp_bind);
+
+void dw_hdmi_qp_resume(struct device *dev, struct dw_hdmi_qp *hdmi)
+{
+ dw_hdmi_qp_init_hw(hdmi);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_qp_resume);
+
+MODULE_AUTHOR("Algea Cao <algea.cao@rock-chips.com>");
+MODULE_AUTHOR("Cristian Ciocaltea <cristian.ciocaltea@collabora.com>");
+MODULE_DESCRIPTION("DW HDMI QP transmitter library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h
new file mode 100644
index 000000000000..72987e6c4689
--- /dev/null
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) Rockchip Electronics Co., Ltd.
+ * Author:
+ * Algea Cao <algea.cao@rock-chips.com>
+ */
+#ifndef __DW_HDMI_QP_H__
+#define __DW_HDMI_QP_H__
+
+#include <linux/bits.h>
+
+/* Main Unit Registers */
+#define CORE_ID 0x0
+#define VER_NUMBER 0x4
+#define VER_TYPE 0x8
+#define CONFIG_REG 0xc
+#define CONFIG_CEC BIT(28)
+#define CONFIG_AUD_UD BIT(23)
+#define CORE_TIMESTAMP_HHMM 0x14
+#define CORE_TIMESTAMP_MMDD 0x18
+#define CORE_TIMESTAMP_YYYY 0x1c
+/* Reset Manager Registers */
+#define GLOBAL_SWRESET_REQUEST 0x40
+#define EARCRX_CMDC_SWINIT_P BIT(27)
+#define AVP_DATAPATH_PACKET_AUDIO_SWINIT_P BIT(10)
+#define GLOBAL_SWDISABLE 0x44
+#define CEC_SWDISABLE BIT(17)
+#define AVP_DATAPATH_PACKET_AUDIO_SWDISABLE BIT(10)
+#define AVP_DATAPATH_VIDEO_SWDISABLE BIT(6)
+#define RESET_MANAGER_CONFIG0 0x48
+#define RESET_MANAGER_STATUS0 0x50
+#define RESET_MANAGER_STATUS1 0x54
+#define RESET_MANAGER_STATUS2 0x58
+/* Timer Base Registers */
+#define TIMER_BASE_CONFIG0 0x80
+#define TIMER_BASE_STATUS0 0x84
+/* CMU Registers */
+#define CMU_CONFIG0 0xa0
+#define CMU_CONFIG1 0xa4
+#define CMU_CONFIG2 0xa8
+#define CMU_CONFIG3 0xac
+#define CMU_STATUS 0xb0
+#define DISPLAY_CLK_MONITOR 0x3f
+#define DISPLAY_CLK_LOCKED 0X15
+#define EARC_BPCLK_OFF BIT(9)
+#define AUDCLK_OFF BIT(7)
+#define LINKQPCLK_OFF BIT(5)
+#define VIDQPCLK_OFF BIT(3)
+#define IPI_CLK_OFF BIT(1)
+#define CMU_IPI_CLK_FREQ 0xb4
+#define CMU_VIDQPCLK_FREQ 0xb8
+#define CMU_LINKQPCLK_FREQ 0xbc
+#define CMU_AUDQPCLK_FREQ 0xc0
+#define CMU_EARC_BPCLK_FREQ 0xc4
+/* I2CM Registers */
+#define I2CM_SM_SCL_CONFIG0 0xe0
+#define I2CM_FM_SCL_CONFIG0 0xe4
+#define I2CM_CONFIG0 0xe8
+#define I2CM_CONTROL0 0xec
+#define I2CM_STATUS0 0xf0
+#define I2CM_INTERFACE_CONTROL0 0xf4
+#define I2CM_ADDR 0xff000
+#define I2CM_SLVADDR 0xfe0
+#define I2CM_WR_MASK 0x1e
+#define I2CM_EXT_READ BIT(4)
+#define I2CM_SHORT_READ BIT(3)
+#define I2CM_FM_READ BIT(2)
+#define I2CM_FM_WRITE BIT(1)
+#define I2CM_FM_EN BIT(0)
+#define I2CM_INTERFACE_CONTROL1 0xf8
+#define I2CM_SEG_PTR 0x7f80
+#define I2CM_SEG_ADDR 0x7f
+#define I2CM_INTERFACE_WRDATA_0_3 0xfc
+#define I2CM_INTERFACE_WRDATA_4_7 0x100
+#define I2CM_INTERFACE_WRDATA_8_11 0x104
+#define I2CM_INTERFACE_WRDATA_12_15 0x108
+#define I2CM_INTERFACE_RDDATA_0_3 0x10c
+#define I2CM_INTERFACE_RDDATA_4_7 0x110
+#define I2CM_INTERFACE_RDDATA_8_11 0x114
+#define I2CM_INTERFACE_RDDATA_12_15 0x118
+/* SCDC Registers */
+#define SCDC_CONFIG0 0x140
+#define SCDC_I2C_FM_EN BIT(12)
+#define SCDC_UPD_FLAGS_AUTO_CLR BIT(6)
+#define SCDC_UPD_FLAGS_POLL_EN BIT(4)
+#define SCDC_CONTROL0 0x148
+#define SCDC_STATUS0 0x150
+#define STATUS_UPDATE BIT(0)
+#define FRL_START BIT(4)
+#define FLT_UPDATE BIT(5)
+/* FLT Registers */
+#define FLT_CONFIG0 0x160
+#define FLT_CONFIG1 0x164
+#define FLT_CONFIG2 0x168
+#define FLT_CONTROL0 0x170
+/* Main Unit 2 Registers */
+#define MAINUNIT_STATUS0 0x180
+/* Video Interface Registers */
+#define VIDEO_INTERFACE_CONFIG0 0x800
+#define VIDEO_INTERFACE_CONFIG1 0x804
+#define VIDEO_INTERFACE_CONFIG2 0x808
+#define VIDEO_INTERFACE_CONTROL0 0x80c
+#define VIDEO_INTERFACE_STATUS0 0x814
+/* Video Packing Registers */
+#define VIDEO_PACKING_CONFIG0 0x81c
+/* Audio Interface Registers */
+#define AUDIO_INTERFACE_CONFIG0 0x820
+#define AUD_IF_SEL_MSK 0x3
+#define AUD_IF_SPDIF 0x2
+#define AUD_IF_I2S 0x1
+#define AUD_IF_PAI 0x0
+#define AUD_FIFO_INIT_ON_OVF_MSK BIT(2)
+#define AUD_FIFO_INIT_ON_OVF_EN BIT(2)
+#define I2S_LINES_EN_MSK GENMASK(7, 4)
+#define I2S_LINES_EN(x) BIT((x) + 4)
+#define I2S_BPCUV_RCV_MSK BIT(12)
+#define I2S_BPCUV_RCV_EN BIT(12)
+#define I2S_BPCUV_RCV_DIS 0
+#define SPDIF_LINES_EN GENMASK(19, 16)
+#define AUD_FORMAT_MSK GENMASK(26, 24)
+#define AUD_3DOBA (0x7 << 24)
+#define AUD_3DASP (0x6 << 24)
+#define AUD_MSOBA (0x5 << 24)
+#define AUD_MSASP (0x4 << 24)
+#define AUD_HBR (0x3 << 24)
+#define AUD_DST (0x2 << 24)
+#define AUD_OBA (0x1 << 24)
+#define AUD_ASP (0x0 << 24)
+#define AUDIO_INTERFACE_CONFIG1 0x824
+#define AUDIO_INTERFACE_CONTROL0 0x82c
+#define AUDIO_FIFO_CLR_P BIT(0)
+#define AUDIO_INTERFACE_STATUS0 0x834
+/* Frame Composer Registers */
+#define FRAME_COMPOSER_CONFIG0 0x840
+#define FRAME_COMPOSER_CONFIG1 0x844
+#define FRAME_COMPOSER_CONFIG2 0x848
+#define FRAME_COMPOSER_CONFIG3 0x84c
+#define FRAME_COMPOSER_CONFIG4 0x850
+#define FRAME_COMPOSER_CONFIG5 0x854
+#define FRAME_COMPOSER_CONFIG6 0x858
+#define FRAME_COMPOSER_CONFIG7 0x85c
+#define FRAME_COMPOSER_CONFIG8 0x860
+#define FRAME_COMPOSER_CONFIG9 0x864
+#define FRAME_COMPOSER_CONTROL0 0x86c
+/* Video Monitor Registers */
+#define VIDEO_MONITOR_CONFIG0 0x880
+#define VIDEO_MONITOR_STATUS0 0x884
+#define VIDEO_MONITOR_STATUS1 0x888
+#define VIDEO_MONITOR_STATUS2 0x88c
+#define VIDEO_MONITOR_STATUS3 0x890
+#define VIDEO_MONITOR_STATUS4 0x894
+#define VIDEO_MONITOR_STATUS5 0x898
+#define VIDEO_MONITOR_STATUS6 0x89c
+/* HDCP2 Logic Registers */
+#define HDCP2LOGIC_CONFIG0 0x8e0
+#define HDCP2_BYPASS BIT(0)
+#define HDCP2LOGIC_ESM_GPIO_IN 0x8e4
+#define HDCP2LOGIC_ESM_GPIO_OUT 0x8e8
+/* HDCP14 Registers */
+#define HDCP14_CONFIG0 0x900
+#define HDCP14_CONFIG1 0x904
+#define HDCP14_CONFIG2 0x908
+#define HDCP14_CONFIG3 0x90c
+#define HDCP14_KEY_SEED 0x914
+#define HDCP14_KEY_H 0x918
+#define HDCP14_KEY_L 0x91c
+#define HDCP14_KEY_STATUS 0x920
+#define HDCP14_AKSV_H 0x924
+#define HDCP14_AKSV_L 0x928
+#define HDCP14_AN_H 0x92c
+#define HDCP14_AN_L 0x930
+#define HDCP14_STATUS0 0x934
+#define HDCP14_STATUS1 0x938
+/* Scrambler Registers */
+#define SCRAMB_CONFIG0 0x960
+/* Video Configuration Registers */
+#define LINK_CONFIG0 0x968
+#define OPMODE_FRL_4LANES BIT(8)
+#define OPMODE_DVI BIT(4)
+#define OPMODE_FRL BIT(0)
+/* TMDS FIFO Registers */
+#define TMDS_FIFO_CONFIG0 0x970
+#define TMDS_FIFO_CONTROL0 0x974
+/* FRL RSFEC Registers */
+#define FRL_RSFEC_CONFIG0 0xa20
+#define FRL_RSFEC_STATUS0 0xa30
+/* FRL Packetizer Registers */
+#define FRL_PKTZ_CONFIG0 0xa40
+#define FRL_PKTZ_CONTROL0 0xa44
+#define FRL_PKTZ_CONTROL1 0xa50
+#define FRL_PKTZ_STATUS1 0xa54
+/* Packet Scheduler Registers */
+#define PKTSCHED_CONFIG0 0xa80
+#define PKTSCHED_PRQUEUE0_CONFIG0 0xa84
+#define PKTSCHED_PRQUEUE1_CONFIG0 0xa88
+#define PKTSCHED_PRQUEUE2_CONFIG0 0xa8c
+#define PKTSCHED_PRQUEUE2_CONFIG1 0xa90
+#define PKTSCHED_PRQUEUE2_CONFIG2 0xa94
+#define PKTSCHED_PKT_CONFIG0 0xa98
+#define PKTSCHED_PKT_CONFIG1 0xa9c
+#define PKTSCHED_DRMI_FIELDRATE BIT(13)
+#define PKTSCHED_AVI_FIELDRATE BIT(12)
+#define PKTSCHED_PKT_CONFIG2 0xaa0
+#define PKTSCHED_PKT_CONFIG3 0xaa4
+#define PKTSCHED_PKT_EN 0xaa8
+#define PKTSCHED_DRMI_TX_EN BIT(17)
+#define PKTSCHED_AUDI_TX_EN BIT(15)
+#define PKTSCHED_AVI_TX_EN BIT(13)
+#define PKTSCHED_EMP_CVTEM_TX_EN BIT(10)
+#define PKTSCHED_AMD_TX_EN BIT(8)
+#define PKTSCHED_GCP_TX_EN BIT(3)
+#define PKTSCHED_AUDS_TX_EN BIT(2)
+#define PKTSCHED_ACR_TX_EN BIT(1)
+#define PKTSCHED_NULL_TX_EN BIT(0)
+#define PKTSCHED_PKT_CONTROL0 0xaac
+#define PKTSCHED_PKT_SEND 0xab0
+#define PKTSCHED_PKT_STATUS0 0xab4
+#define PKTSCHED_PKT_STATUS1 0xab8
+#define PKT_NULL_CONTENTS0 0xb00
+#define PKT_NULL_CONTENTS1 0xb04
+#define PKT_NULL_CONTENTS2 0xb08
+#define PKT_NULL_CONTENTS3 0xb0c
+#define PKT_NULL_CONTENTS4 0xb10
+#define PKT_NULL_CONTENTS5 0xb14
+#define PKT_NULL_CONTENTS6 0xb18
+#define PKT_NULL_CONTENTS7 0xb1c
+#define PKT_ACP_CONTENTS0 0xb20
+#define PKT_ACP_CONTENTS1 0xb24
+#define PKT_ACP_CONTENTS2 0xb28
+#define PKT_ACP_CONTENTS3 0xb2c
+#define PKT_ACP_CONTENTS4 0xb30
+#define PKT_ACP_CONTENTS5 0xb34
+#define PKT_ACP_CONTENTS6 0xb38
+#define PKT_ACP_CONTENTS7 0xb3c
+#define PKT_ISRC1_CONTENTS0 0xb40
+#define PKT_ISRC1_CONTENTS1 0xb44
+#define PKT_ISRC1_CONTENTS2 0xb48
+#define PKT_ISRC1_CONTENTS3 0xb4c
+#define PKT_ISRC1_CONTENTS4 0xb50
+#define PKT_ISRC1_CONTENTS5 0xb54
+#define PKT_ISRC1_CONTENTS6 0xb58
+#define PKT_ISRC1_CONTENTS7 0xb5c
+#define PKT_ISRC2_CONTENTS0 0xb60
+#define PKT_ISRC2_CONTENTS1 0xb64
+#define PKT_ISRC2_CONTENTS2 0xb68
+#define PKT_ISRC2_CONTENTS3 0xb6c
+#define PKT_ISRC2_CONTENTS4 0xb70
+#define PKT_ISRC2_CONTENTS5 0xb74
+#define PKT_ISRC2_CONTENTS6 0xb78
+#define PKT_ISRC2_CONTENTS7 0xb7c
+#define PKT_GMD_CONTENTS0 0xb80
+#define PKT_GMD_CONTENTS1 0xb84
+#define PKT_GMD_CONTENTS2 0xb88
+#define PKT_GMD_CONTENTS3 0xb8c
+#define PKT_GMD_CONTENTS4 0xb90
+#define PKT_GMD_CONTENTS5 0xb94
+#define PKT_GMD_CONTENTS6 0xb98
+#define PKT_GMD_CONTENTS7 0xb9c
+#define PKT_AMD_CONTENTS0 0xba0
+#define PKT_AMD_CONTENTS1 0xba4
+#define PKT_AMD_CONTENTS2 0xba8
+#define PKT_AMD_CONTENTS3 0xbac
+#define PKT_AMD_CONTENTS4 0xbb0
+#define PKT_AMD_CONTENTS5 0xbb4
+#define PKT_AMD_CONTENTS6 0xbb8
+#define PKT_AMD_CONTENTS7 0xbbc
+#define PKT_VSI_CONTENTS0 0xbc0
+#define PKT_VSI_CONTENTS1 0xbc4
+#define PKT_VSI_CONTENTS2 0xbc8
+#define PKT_VSI_CONTENTS3 0xbcc
+#define PKT_VSI_CONTENTS4 0xbd0
+#define PKT_VSI_CONTENTS5 0xbd4
+#define PKT_VSI_CONTENTS6 0xbd8
+#define PKT_VSI_CONTENTS7 0xbdc
+#define PKT_AVI_CONTENTS0 0xbe0
+#define HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT BIT(4)
+#define HDMI_FC_AVICONF0_BAR_DATA_VERT_BAR 0x04
+#define HDMI_FC_AVICONF0_BAR_DATA_HORIZ_BAR 0x08
+#define HDMI_FC_AVICONF2_IT_CONTENT_VALID 0x80
+#define PKT_AVI_CONTENTS1 0xbe4
+#define PKT_AVI_CONTENTS2 0xbe8
+#define PKT_AVI_CONTENTS3 0xbec
+#define PKT_AVI_CONTENTS4 0xbf0
+#define PKT_AVI_CONTENTS5 0xbf4
+#define PKT_AVI_CONTENTS6 0xbf8
+#define PKT_AVI_CONTENTS7 0xbfc
+#define PKT_SPDI_CONTENTS0 0xc00
+#define PKT_SPDI_CONTENTS1 0xc04
+#define PKT_SPDI_CONTENTS2 0xc08
+#define PKT_SPDI_CONTENTS3 0xc0c
+#define PKT_SPDI_CONTENTS4 0xc10
+#define PKT_SPDI_CONTENTS5 0xc14
+#define PKT_SPDI_CONTENTS6 0xc18
+#define PKT_SPDI_CONTENTS7 0xc1c
+#define PKT_AUDI_CONTENTS0 0xc20
+#define PKT_AUDI_CONTENTS1 0xc24
+#define PKT_AUDI_CONTENTS2 0xc28
+#define PKT_AUDI_CONTENTS3 0xc2c
+#define PKT_AUDI_CONTENTS4 0xc30
+#define PKT_AUDI_CONTENTS5 0xc34
+#define PKT_AUDI_CONTENTS6 0xc38
+#define PKT_AUDI_CONTENTS7 0xc3c
+#define PKT_NVI_CONTENTS0 0xc40
+#define PKT_NVI_CONTENTS1 0xc44
+#define PKT_NVI_CONTENTS2 0xc48
+#define PKT_NVI_CONTENTS3 0xc4c
+#define PKT_NVI_CONTENTS4 0xc50
+#define PKT_NVI_CONTENTS5 0xc54
+#define PKT_NVI_CONTENTS6 0xc58
+#define PKT_NVI_CONTENTS7 0xc5c
+#define PKT_DRMI_CONTENTS0 0xc60
+#define PKT_DRMI_CONTENTS1 0xc64
+#define PKT_DRMI_CONTENTS2 0xc68
+#define PKT_DRMI_CONTENTS3 0xc6c
+#define PKT_DRMI_CONTENTS4 0xc70
+#define PKT_DRMI_CONTENTS5 0xc74
+#define PKT_DRMI_CONTENTS6 0xc78
+#define PKT_DRMI_CONTENTS7 0xc7c
+#define PKT_GHDMI1_CONTENTS0 0xc80
+#define PKT_GHDMI1_CONTENTS1 0xc84
+#define PKT_GHDMI1_CONTENTS2 0xc88
+#define PKT_GHDMI1_CONTENTS3 0xc8c
+#define PKT_GHDMI1_CONTENTS4 0xc90
+#define PKT_GHDMI1_CONTENTS5 0xc94
+#define PKT_GHDMI1_CONTENTS6 0xc98
+#define PKT_GHDMI1_CONTENTS7 0xc9c
+#define PKT_GHDMI2_CONTENTS0 0xca0
+#define PKT_GHDMI2_CONTENTS1 0xca4
+#define PKT_GHDMI2_CONTENTS2 0xca8
+#define PKT_GHDMI2_CONTENTS3 0xcac
+#define PKT_GHDMI2_CONTENTS4 0xcb0
+#define PKT_GHDMI2_CONTENTS5 0xcb4
+#define PKT_GHDMI2_CONTENTS6 0xcb8
+#define PKT_GHDMI2_CONTENTS7 0xcbc
+/* EMP Packetizer Registers */
+#define PKT_EMP_CONFIG0 0xce0
+#define PKT_EMP_CONTROL0 0xcec
+#define PKT_EMP_CONTROL1 0xcf0
+#define PKT_EMP_CONTROL2 0xcf4
+#define PKT_EMP_VTEM_CONTENTS0 0xd00
+#define PKT_EMP_VTEM_CONTENTS1 0xd04
+#define PKT_EMP_VTEM_CONTENTS2 0xd08
+#define PKT_EMP_VTEM_CONTENTS3 0xd0c
+#define PKT_EMP_VTEM_CONTENTS4 0xd10
+#define PKT_EMP_VTEM_CONTENTS5 0xd14
+#define PKT_EMP_VTEM_CONTENTS6 0xd18
+#define PKT_EMP_VTEM_CONTENTS7 0xd1c
+#define PKT0_EMP_CVTEM_CONTENTS0 0xd20
+#define PKT0_EMP_CVTEM_CONTENTS1 0xd24
+#define PKT0_EMP_CVTEM_CONTENTS2 0xd28
+#define PKT0_EMP_CVTEM_CONTENTS3 0xd2c
+#define PKT0_EMP_CVTEM_CONTENTS4 0xd30
+#define PKT0_EMP_CVTEM_CONTENTS5 0xd34
+#define PKT0_EMP_CVTEM_CONTENTS6 0xd38
+#define PKT0_EMP_CVTEM_CONTENTS7 0xd3c
+#define PKT1_EMP_CVTEM_CONTENTS0 0xd40
+#define PKT1_EMP_CVTEM_CONTENTS1 0xd44
+#define PKT1_EMP_CVTEM_CONTENTS2 0xd48
+#define PKT1_EMP_CVTEM_CONTENTS3 0xd4c
+#define PKT1_EMP_CVTEM_CONTENTS4 0xd50
+#define PKT1_EMP_CVTEM_CONTENTS5 0xd54
+#define PKT1_EMP_CVTEM_CONTENTS6 0xd58
+#define PKT1_EMP_CVTEM_CONTENTS7 0xd5c
+#define PKT2_EMP_CVTEM_CONTENTS0 0xd60
+#define PKT2_EMP_CVTEM_CONTENTS1 0xd64
+#define PKT2_EMP_CVTEM_CONTENTS2 0xd68
+#define PKT2_EMP_CVTEM_CONTENTS3 0xd6c
+#define PKT2_EMP_CVTEM_CONTENTS4 0xd70
+#define PKT2_EMP_CVTEM_CONTENTS5 0xd74
+#define PKT2_EMP_CVTEM_CONTENTS6 0xd78
+#define PKT2_EMP_CVTEM_CONTENTS7 0xd7c
+#define PKT3_EMP_CVTEM_CONTENTS0 0xd80
+#define PKT3_EMP_CVTEM_CONTENTS1 0xd84
+#define PKT3_EMP_CVTEM_CONTENTS2 0xd88
+#define PKT3_EMP_CVTEM_CONTENTS3 0xd8c
+#define PKT3_EMP_CVTEM_CONTENTS4 0xd90
+#define PKT3_EMP_CVTEM_CONTENTS5 0xd94
+#define PKT3_EMP_CVTEM_CONTENTS6 0xd98
+#define PKT3_EMP_CVTEM_CONTENTS7 0xd9c
+#define PKT4_EMP_CVTEM_CONTENTS0 0xda0
+#define PKT4_EMP_CVTEM_CONTENTS1 0xda4
+#define PKT4_EMP_CVTEM_CONTENTS2 0xda8
+#define PKT4_EMP_CVTEM_CONTENTS3 0xdac
+#define PKT4_EMP_CVTEM_CONTENTS4 0xdb0
+#define PKT4_EMP_CVTEM_CONTENTS5 0xdb4
+#define PKT4_EMP_CVTEM_CONTENTS6 0xdb8
+#define PKT4_EMP_CVTEM_CONTENTS7 0xdbc
+#define PKT5_EMP_CVTEM_CONTENTS0 0xdc0
+#define PKT5_EMP_CVTEM_CONTENTS1 0xdc4
+#define PKT5_EMP_CVTEM_CONTENTS2 0xdc8
+#define PKT5_EMP_CVTEM_CONTENTS3 0xdcc
+#define PKT5_EMP_CVTEM_CONTENTS4 0xdd0
+#define PKT5_EMP_CVTEM_CONTENTS5 0xdd4
+#define PKT5_EMP_CVTEM_CONTENTS6 0xdd8
+#define PKT5_EMP_CVTEM_CONTENTS7 0xddc
+/* Audio Packetizer Registers */
+#define AUDPKT_CONTROL0 0xe20
+#define AUDPKT_PBIT_FORCE_EN_MASK BIT(12)
+#define AUDPKT_PBIT_FORCE_EN BIT(12)
+#define AUDPKT_CHSTATUS_OVR_EN_MASK BIT(0)
+#define AUDPKT_CHSTATUS_OVR_EN BIT(0)
+#define AUDPKT_CONTROL1 0xe24
+#define AUDPKT_ACR_CONTROL0 0xe40
+#define AUDPKT_ACR_N_VALUE 0xfffff
+#define AUDPKT_ACR_CONTROL1 0xe44
+#define AUDPKT_ACR_CTS_OVR_VAL_MSK GENMASK(23, 4)
+#define AUDPKT_ACR_CTS_OVR_VAL(x) ((x) << 4)
+#define AUDPKT_ACR_CTS_OVR_EN_MSK BIT(1)
+#define AUDPKT_ACR_CTS_OVR_EN BIT(1)
+#define AUDPKT_ACR_STATUS0 0xe4c
+#define AUDPKT_CHSTATUS_OVR0 0xe60
+#define AUDPKT_CHSTATUS_OVR1 0xe64
+/* IEC60958 Byte 3: Sampleing frenuency Bits 24 to 27 */
+#define AUDPKT_CHSTATUS_SR_MASK GENMASK(3, 0)
+#define AUDPKT_CHSTATUS_SR_22050 0x4
+#define AUDPKT_CHSTATUS_SR_24000 0x6
+#define AUDPKT_CHSTATUS_SR_32000 0x3
+#define AUDPKT_CHSTATUS_SR_44100 0x0
+#define AUDPKT_CHSTATUS_SR_48000 0x2
+#define AUDPKT_CHSTATUS_SR_88200 0x8
+#define AUDPKT_CHSTATUS_SR_96000 0xa
+#define AUDPKT_CHSTATUS_SR_176400 0xc
+#define AUDPKT_CHSTATUS_SR_192000 0xe
+#define AUDPKT_CHSTATUS_SR_768000 0x9
+#define AUDPKT_CHSTATUS_SR_NOT_INDICATED 0x1
+/* IEC60958 Byte 4: Original Sampleing frenuency Bits 36 to 39 */
+#define AUDPKT_CHSTATUS_0SR_MASK GENMASK(15, 12)
+#define AUDPKT_CHSTATUS_OSR_8000 0x6
+#define AUDPKT_CHSTATUS_OSR_11025 0xa
+#define AUDPKT_CHSTATUS_OSR_12000 0x2
+#define AUDPKT_CHSTATUS_OSR_16000 0x8
+#define AUDPKT_CHSTATUS_OSR_22050 0xb
+#define AUDPKT_CHSTATUS_OSR_24000 0x9
+#define AUDPKT_CHSTATUS_OSR_32000 0xc
+#define AUDPKT_CHSTATUS_OSR_44100 0xf
+#define AUDPKT_CHSTATUS_OSR_48000 0xd
+#define AUDPKT_CHSTATUS_OSR_88200 0x7
+#define AUDPKT_CHSTATUS_OSR_96000 0x5
+#define AUDPKT_CHSTATUS_OSR_176400 0x3
+#define AUDPKT_CHSTATUS_OSR_192000 0x1
+#define AUDPKT_CHSTATUS_OSR_NOT_INDICATED 0x0
+#define AUDPKT_CHSTATUS_OVR2 0xe68
+#define AUDPKT_CHSTATUS_OVR3 0xe6c
+#define AUDPKT_CHSTATUS_OVR4 0xe70
+#define AUDPKT_CHSTATUS_OVR5 0xe74
+#define AUDPKT_CHSTATUS_OVR6 0xe78
+#define AUDPKT_CHSTATUS_OVR7 0xe7c
+#define AUDPKT_CHSTATUS_OVR8 0xe80
+#define AUDPKT_CHSTATUS_OVR9 0xe84
+#define AUDPKT_CHSTATUS_OVR10 0xe88
+#define AUDPKT_CHSTATUS_OVR11 0xe8c
+#define AUDPKT_CHSTATUS_OVR12 0xe90
+#define AUDPKT_CHSTATUS_OVR13 0xe94
+#define AUDPKT_CHSTATUS_OVR14 0xe98
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC0 0xea0
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC1 0xea4
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC2 0xea8
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC3 0xeac
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC4 0xeb0
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC5 0xeb4
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC6 0xeb8
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC7 0xebc
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC8 0xec0
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC9 0xec4
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC10 0xec8
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC11 0xecc
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC12 0xed0
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC13 0xed4
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC14 0xed8
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC15 0xedc
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC16 0xee0
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC17 0xee4
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC18 0xee8
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC19 0xeec
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC20 0xef0
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC21 0xef4
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC22 0xef8
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC23 0xefc
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC24 0xf00
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC25 0xf04
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC26 0xf08
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC27 0xf0c
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC28 0xf10
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC29 0xf14
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC30 0xf18
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC31 0xf1c
+#define AUDPKT_USRDATA_OVR_MSG_GENERIC32 0xf20
+#define AUDPKT_VBIT_OVR0 0xf24
+/* CEC Registers */
+#define CEC_TX_CONTROL 0x1000
+#define CEC_STATUS 0x1004
+#define CEC_CONFIG 0x1008
+#define CEC_ADDR 0x100c
+#define CEC_TX_COUNT 0x1020
+#define CEC_TX_DATA3_0 0x1024
+#define CEC_TX_DATA7_4 0x1028
+#define CEC_TX_DATA11_8 0x102c
+#define CEC_TX_DATA15_12 0x1030
+#define CEC_RX_COUNT_STATUS 0x1040
+#define CEC_RX_DATA3_0 0x1044
+#define CEC_RX_DATA7_4 0x1048
+#define CEC_RX_DATA11_8 0x104c
+#define CEC_RX_DATA15_12 0x1050
+#define CEC_LOCK_CONTROL 0x1054
+#define CEC_RXQUAL_BITTIME_CONFIG 0x1060
+#define CEC_RX_BITTIME_CONFIG 0x1064
+#define CEC_TX_BITTIME_CONFIG 0x1068
+/* eARC RX CMDC Registers */
+#define EARCRX_CMDC_CONFIG0 0x1800
+#define EARCRX_XACTREAD_STOP_CFG BIT(26)
+#define EARCRX_XACTREAD_RETRY_CFG BIT(25)
+#define EARCRX_CMDC_DSCVR_EARCVALID0_TO_DISC1 BIT(24)
+#define EARCRX_CMDC_XACT_RESTART_EN BIT(18)
+#define EARCRX_CMDC_CONFIG1 0x1804
+#define EARCRX_CMDC_CONTROL 0x1808
+#define EARCRX_CMDC_HEARTBEAT_LOSS_EN BIT(4)
+#define EARCRX_CMDC_DISCOVERY_EN BIT(3)
+#define EARCRX_CONNECTOR_HPD BIT(1)
+#define EARCRX_CMDC_WHITELIST0_CONFIG 0x180c
+#define EARCRX_CMDC_WHITELIST1_CONFIG 0x1810
+#define EARCRX_CMDC_WHITELIST2_CONFIG 0x1814
+#define EARCRX_CMDC_WHITELIST3_CONFIG 0x1818
+#define EARCRX_CMDC_STATUS 0x181c
+#define EARCRX_CMDC_XACT_INFO 0x1820
+#define EARCRX_CMDC_XACT_ACTION 0x1824
+#define EARCRX_CMDC_HEARTBEAT_RXSTAT_SE 0x1828
+#define EARCRX_CMDC_HEARTBEAT_STATUS 0x182c
+#define EARCRX_CMDC_XACT_WR0 0x1840
+#define EARCRX_CMDC_XACT_WR1 0x1844
+#define EARCRX_CMDC_XACT_WR2 0x1848
+#define EARCRX_CMDC_XACT_WR3 0x184c
+#define EARCRX_CMDC_XACT_WR4 0x1850
+#define EARCRX_CMDC_XACT_WR5 0x1854
+#define EARCRX_CMDC_XACT_WR6 0x1858
+#define EARCRX_CMDC_XACT_WR7 0x185c
+#define EARCRX_CMDC_XACT_WR8 0x1860
+#define EARCRX_CMDC_XACT_WR9 0x1864
+#define EARCRX_CMDC_XACT_WR10 0x1868
+#define EARCRX_CMDC_XACT_WR11 0x186c
+#define EARCRX_CMDC_XACT_WR12 0x1870
+#define EARCRX_CMDC_XACT_WR13 0x1874
+#define EARCRX_CMDC_XACT_WR14 0x1878
+#define EARCRX_CMDC_XACT_WR15 0x187c
+#define EARCRX_CMDC_XACT_WR16 0x1880
+#define EARCRX_CMDC_XACT_WR17 0x1884
+#define EARCRX_CMDC_XACT_WR18 0x1888
+#define EARCRX_CMDC_XACT_WR19 0x188c
+#define EARCRX_CMDC_XACT_WR20 0x1890
+#define EARCRX_CMDC_XACT_WR21 0x1894
+#define EARCRX_CMDC_XACT_WR22 0x1898
+#define EARCRX_CMDC_XACT_WR23 0x189c
+#define EARCRX_CMDC_XACT_WR24 0x18a0
+#define EARCRX_CMDC_XACT_WR25 0x18a4
+#define EARCRX_CMDC_XACT_WR26 0x18a8
+#define EARCRX_CMDC_XACT_WR27 0x18ac
+#define EARCRX_CMDC_XACT_WR28 0x18b0
+#define EARCRX_CMDC_XACT_WR29 0x18b4
+#define EARCRX_CMDC_XACT_WR30 0x18b8
+#define EARCRX_CMDC_XACT_WR31 0x18bc
+#define EARCRX_CMDC_XACT_WR32 0x18c0
+#define EARCRX_CMDC_XACT_WR33 0x18c4
+#define EARCRX_CMDC_XACT_WR34 0x18c8
+#define EARCRX_CMDC_XACT_WR35 0x18cc
+#define EARCRX_CMDC_XACT_WR36 0x18d0
+#define EARCRX_CMDC_XACT_WR37 0x18d4
+#define EARCRX_CMDC_XACT_WR38 0x18d8
+#define EARCRX_CMDC_XACT_WR39 0x18dc
+#define EARCRX_CMDC_XACT_WR40 0x18e0
+#define EARCRX_CMDC_XACT_WR41 0x18e4
+#define EARCRX_CMDC_XACT_WR42 0x18e8
+#define EARCRX_CMDC_XACT_WR43 0x18ec
+#define EARCRX_CMDC_XACT_WR44 0x18f0
+#define EARCRX_CMDC_XACT_WR45 0x18f4
+#define EARCRX_CMDC_XACT_WR46 0x18f8
+#define EARCRX_CMDC_XACT_WR47 0x18fc
+#define EARCRX_CMDC_XACT_WR48 0x1900
+#define EARCRX_CMDC_XACT_WR49 0x1904
+#define EARCRX_CMDC_XACT_WR50 0x1908
+#define EARCRX_CMDC_XACT_WR51 0x190c
+#define EARCRX_CMDC_XACT_WR52 0x1910
+#define EARCRX_CMDC_XACT_WR53 0x1914
+#define EARCRX_CMDC_XACT_WR54 0x1918
+#define EARCRX_CMDC_XACT_WR55 0x191c
+#define EARCRX_CMDC_XACT_WR56 0x1920
+#define EARCRX_CMDC_XACT_WR57 0x1924
+#define EARCRX_CMDC_XACT_WR58 0x1928
+#define EARCRX_CMDC_XACT_WR59 0x192c
+#define EARCRX_CMDC_XACT_WR60 0x1930
+#define EARCRX_CMDC_XACT_WR61 0x1934
+#define EARCRX_CMDC_XACT_WR62 0x1938
+#define EARCRX_CMDC_XACT_WR63 0x193c
+#define EARCRX_CMDC_XACT_WR64 0x1940
+#define EARCRX_CMDC_XACT_RD0 0x1960
+#define EARCRX_CMDC_XACT_RD1 0x1964
+#define EARCRX_CMDC_XACT_RD2 0x1968
+#define EARCRX_CMDC_XACT_RD3 0x196c
+#define EARCRX_CMDC_XACT_RD4 0x1970
+#define EARCRX_CMDC_XACT_RD5 0x1974
+#define EARCRX_CMDC_XACT_RD6 0x1978
+#define EARCRX_CMDC_XACT_RD7 0x197c
+#define EARCRX_CMDC_XACT_RD8 0x1980
+#define EARCRX_CMDC_XACT_RD9 0x1984
+#define EARCRX_CMDC_XACT_RD10 0x1988
+#define EARCRX_CMDC_XACT_RD11 0x198c
+#define EARCRX_CMDC_XACT_RD12 0x1990
+#define EARCRX_CMDC_XACT_RD13 0x1994
+#define EARCRX_CMDC_XACT_RD14 0x1998
+#define EARCRX_CMDC_XACT_RD15 0x199c
+#define EARCRX_CMDC_XACT_RD16 0x19a0
+#define EARCRX_CMDC_XACT_RD17 0x19a4
+#define EARCRX_CMDC_XACT_RD18 0x19a8
+#define EARCRX_CMDC_XACT_RD19 0x19ac
+#define EARCRX_CMDC_XACT_RD20 0x19b0
+#define EARCRX_CMDC_XACT_RD21 0x19b4
+#define EARCRX_CMDC_XACT_RD22 0x19b8
+#define EARCRX_CMDC_XACT_RD23 0x19bc
+#define EARCRX_CMDC_XACT_RD24 0x19c0
+#define EARCRX_CMDC_XACT_RD25 0x19c4
+#define EARCRX_CMDC_XACT_RD26 0x19c8
+#define EARCRX_CMDC_XACT_RD27 0x19cc
+#define EARCRX_CMDC_XACT_RD28 0x19d0
+#define EARCRX_CMDC_XACT_RD29 0x19d4
+#define EARCRX_CMDC_XACT_RD30 0x19d8
+#define EARCRX_CMDC_XACT_RD31 0x19dc
+#define EARCRX_CMDC_XACT_RD32 0x19e0
+#define EARCRX_CMDC_XACT_RD33 0x19e4
+#define EARCRX_CMDC_XACT_RD34 0x19e8
+#define EARCRX_CMDC_XACT_RD35 0x19ec
+#define EARCRX_CMDC_XACT_RD36 0x19f0
+#define EARCRX_CMDC_XACT_RD37 0x19f4
+#define EARCRX_CMDC_XACT_RD38 0x19f8
+#define EARCRX_CMDC_XACT_RD39 0x19fc
+#define EARCRX_CMDC_XACT_RD40 0x1a00
+#define EARCRX_CMDC_XACT_RD41 0x1a04
+#define EARCRX_CMDC_XACT_RD42 0x1a08
+#define EARCRX_CMDC_XACT_RD43 0x1a0c
+#define EARCRX_CMDC_XACT_RD44 0x1a10
+#define EARCRX_CMDC_XACT_RD45 0x1a14
+#define EARCRX_CMDC_XACT_RD46 0x1a18
+#define EARCRX_CMDC_XACT_RD47 0x1a1c
+#define EARCRX_CMDC_XACT_RD48 0x1a20
+#define EARCRX_CMDC_XACT_RD49 0x1a24
+#define EARCRX_CMDC_XACT_RD50 0x1a28
+#define EARCRX_CMDC_XACT_RD51 0x1a2c
+#define EARCRX_CMDC_XACT_RD52 0x1a30
+#define EARCRX_CMDC_XACT_RD53 0x1a34
+#define EARCRX_CMDC_XACT_RD54 0x1a38
+#define EARCRX_CMDC_XACT_RD55 0x1a3c
+#define EARCRX_CMDC_XACT_RD56 0x1a40
+#define EARCRX_CMDC_XACT_RD57 0x1a44
+#define EARCRX_CMDC_XACT_RD58 0x1a48
+#define EARCRX_CMDC_XACT_RD59 0x1a4c
+#define EARCRX_CMDC_XACT_RD60 0x1a50
+#define EARCRX_CMDC_XACT_RD61 0x1a54
+#define EARCRX_CMDC_XACT_RD62 0x1a58
+#define EARCRX_CMDC_XACT_RD63 0x1a5c
+#define EARCRX_CMDC_XACT_RD64 0x1a60
+#define EARCRX_CMDC_SYNC_CONFIG 0x1b00
+/* eARC RX DMAC Registers */
+#define EARCRX_DMAC_PHY_CONTROL 0x1c00
+#define EARCRX_DMAC_CONFIG 0x1c08
+#define EARCRX_DMAC_CONTROL0 0x1c0c
+#define EARCRX_DMAC_AUDIO_EN BIT(1)
+#define EARCRX_DMAC_EN BIT(0)
+#define EARCRX_DMAC_CONTROL1 0x1c10
+#define EARCRX_DMAC_STATUS 0x1c14
+#define EARCRX_DMAC_CHSTATUS0 0x1c18
+#define EARCRX_DMAC_CHSTATUS1 0x1c1c
+#define EARCRX_DMAC_CHSTATUS2 0x1c20
+#define EARCRX_DMAC_CHSTATUS3 0x1c24
+#define EARCRX_DMAC_CHSTATUS4 0x1c28
+#define EARCRX_DMAC_CHSTATUS5 0x1c2c
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC0 0x1c30
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC1 0x1c34
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC2 0x1c38
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC3 0x1c3c
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC4 0x1c40
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC5 0x1c44
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC6 0x1c48
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC7 0x1c4c
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC8 0x1c50
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC9 0x1c54
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC10 0x1c58
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC11 0x1c5c
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT0 0x1c60
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT1 0x1c64
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT2 0x1c68
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT3 0x1c6c
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT4 0x1c70
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT5 0x1c74
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT6 0x1c78
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT7 0x1c7c
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT8 0x1c80
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT9 0x1c84
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT10 0x1c88
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT11 0x1c8c
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT0 0x1c90
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT1 0x1c94
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT2 0x1c98
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT3 0x1c9c
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT4 0x1ca0
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT5 0x1ca4
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT6 0x1ca8
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT7 0x1cac
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT8 0x1cb0
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT9 0x1cb4
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT10 0x1cb8
+#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT11 0x1cbc
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC0 0x1cc0
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC1 0x1cc4
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC2 0x1cc8
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC3 0x1ccc
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC4 0x1cd0
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC5 0x1cd4
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC6 0x1cd8
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC7 0x1cdc
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC8 0x1ce0
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC9 0x1ce4
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC10 0x1ce8
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC11 0x1cec
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC12 0x1cf0
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC13 0x1cf4
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC14 0x1cf8
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC15 0x1cfc
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC16 0x1d00
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC17 0x1d04
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC18 0x1d08
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC19 0x1d0c
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC20 0x1d10
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC21 0x1d14
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC22 0x1d18
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC23 0x1d1c
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC24 0x1d20
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC25 0x1d24
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC26 0x1d28
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC27 0x1d2c
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC28 0x1d30
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC29 0x1d34
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC30 0x1d38
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC31 0x1d3c
+#define EARCRX_DMAC_USRDATA_MSG_GENERIC32 0x1d40
+#define EARCRX_DMAC_CHSTATUS_STREAMER0 0x1d44
+#define EARCRX_DMAC_CHSTATUS_STREAMER1 0x1d48
+#define EARCRX_DMAC_CHSTATUS_STREAMER2 0x1d4c
+#define EARCRX_DMAC_CHSTATUS_STREAMER3 0x1d50
+#define EARCRX_DMAC_CHSTATUS_STREAMER4 0x1d54
+#define EARCRX_DMAC_CHSTATUS_STREAMER5 0x1d58
+#define EARCRX_DMAC_CHSTATUS_STREAMER6 0x1d5c
+#define EARCRX_DMAC_CHSTATUS_STREAMER7 0x1d60
+#define EARCRX_DMAC_CHSTATUS_STREAMER8 0x1d64
+#define EARCRX_DMAC_CHSTATUS_STREAMER9 0x1d68
+#define EARCRX_DMAC_CHSTATUS_STREAMER10 0x1d6c
+#define EARCRX_DMAC_CHSTATUS_STREAMER11 0x1d70
+#define EARCRX_DMAC_CHSTATUS_STREAMER12 0x1d74
+#define EARCRX_DMAC_CHSTATUS_STREAMER13 0x1d78
+#define EARCRX_DMAC_CHSTATUS_STREAMER14 0x1d7c
+#define EARCRX_DMAC_USRDATA_STREAMER0 0x1d80
+/* Main Unit Interrupt Registers */
+#define MAIN_INTVEC_INDEX 0x3000
+#define MAINUNIT_0_INT_STATUS 0x3010
+#define MAINUNIT_0_INT_MASK_N 0x3014
+#define MAINUNIT_0_INT_CLEAR 0x3018
+#define MAINUNIT_0_INT_FORCE 0x301c
+#define MAINUNIT_1_INT_STATUS 0x3020
+#define FLT_EXIT_TO_LTSL_IRQ BIT(22)
+#define FLT_EXIT_TO_LTS4_IRQ BIT(21)
+#define FLT_EXIT_TO_LTSP_IRQ BIT(20)
+#define SCDC_NACK_RCVD_IRQ BIT(12)
+#define SCDC_RR_REPLY_STOP_IRQ BIT(11)
+#define SCDC_UPD_FLAGS_CLR_IRQ BIT(10)
+#define SCDC_UPD_FLAGS_CHG_IRQ BIT(9)
+#define SCDC_UPD_FLAGS_RD_IRQ BIT(8)
+#define I2CM_NACK_RCVD_IRQ BIT(2)
+#define I2CM_READ_REQUEST_IRQ BIT(1)
+#define I2CM_OP_DONE_IRQ BIT(0)
+#define MAINUNIT_1_INT_MASK_N 0x3024
+#define I2CM_NACK_RCVD_MASK_N BIT(2)
+#define I2CM_READ_REQUEST_MASK_N BIT(1)
+#define I2CM_OP_DONE_MASK_N BIT(0)
+#define MAINUNIT_1_INT_CLEAR 0x3028
+#define I2CM_NACK_RCVD_CLEAR BIT(2)
+#define I2CM_READ_REQUEST_CLEAR BIT(1)
+#define I2CM_OP_DONE_CLEAR BIT(0)
+#define MAINUNIT_1_INT_FORCE 0x302c
+/* AVPUNIT Interrupt Registers */
+#define AVP_INTVEC_INDEX 0x3800
+#define AVP_0_INT_STATUS 0x3810
+#define AVP_0_INT_MASK_N 0x3814
+#define AVP_0_INT_CLEAR 0x3818
+#define AVP_0_INT_FORCE 0x381c
+#define AVP_1_INT_STATUS 0x3820
+#define AVP_1_INT_MASK_N 0x3824
+#define HDCP14_AUTH_CHG_MASK_N BIT(6)
+#define AVP_1_INT_CLEAR 0x3828
+#define AVP_1_INT_FORCE 0x382c
+#define AVP_2_INT_STATUS 0x3830
+#define AVP_2_INT_MASK_N 0x3834
+#define AVP_2_INT_CLEAR 0x3838
+#define AVP_2_INT_FORCE 0x383c
+#define AVP_3_INT_STATUS 0x3840
+#define AVP_3_INT_MASK_N 0x3844
+#define AVP_3_INT_CLEAR 0x3848
+#define AVP_3_INT_FORCE 0x384c
+#define AVP_4_INT_STATUS 0x3850
+#define AVP_4_INT_MASK_N 0x3854
+#define AVP_4_INT_CLEAR 0x3858
+#define AVP_4_INT_FORCE 0x385c
+#define AVP_5_INT_STATUS 0x3860
+#define AVP_5_INT_MASK_N 0x3864
+#define AVP_5_INT_CLEAR 0x3868
+#define AVP_5_INT_FORCE 0x386c
+#define AVP_6_INT_STATUS 0x3870
+#define AVP_6_INT_MASK_N 0x3874
+#define AVP_6_INT_CLEAR 0x3878
+#define AVP_6_INT_FORCE 0x387c
+/* CEC Interrupt Registers */
+#define CEC_INT_STATUS 0x4000
+#define CEC_INT_MASK_N 0x4004
+#define CEC_INT_CLEAR 0x4008
+#define CEC_INT_FORCE 0x400c
+/* eARC RX Interrupt Registers */
+#define EARCRX_INTVEC_INDEX 0x4800
+#define EARCRX_0_INT_STATUS 0x4810
+#define EARCRX_CMDC_DISCOVERY_TIMEOUT_IRQ BIT(9)
+#define EARCRX_CMDC_DISCOVERY_DONE_IRQ BIT(8)
+#define EARCRX_0_INT_MASK_N 0x4814
+#define EARCRX_0_INT_CLEAR 0x4818
+#define EARCRX_0_INT_FORCE 0x481c
+#define EARCRX_1_INT_STATUS 0x4820
+#define EARCRX_1_INT_MASK_N 0x4824
+#define EARCRX_1_INT_CLEAR 0x4828
+#define EARCRX_1_INT_FORCE 0x482c
+
+#endif /* __DW_HDMI_QP_H__ */
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 0031f3c54882..996733ed2c00 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -3503,6 +3503,9 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
hdmi->bridge.of_node = pdev->dev.of_node;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ if (hdmi->version >= 0x200a)
+ hdmi->bridge.ycbcr_420_allowed = plat_data->ycbcr_420_allowed;
+
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.parent = dev;
pdevinfo.id = PLATFORM_DEVID_AUTO;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
new file mode 100644
index 000000000000..d7569bf2d9c3
--- /dev/null
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
@@ -0,0 +1,1030 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2024, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Modified by Heiko Stuebner <heiko.stuebner@cherry.de>
+ * This generic Synopsys DesignWare MIPI DSI2 host driver is based on the
+ * Rockchip version from rockchip/dw-mipi-dsi2.c converted to use bridge APIs.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/bridge/dw_mipi_dsi2.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+
+#define DSI2_PWR_UP 0x000c
+#define RESET 0
+#define POWER_UP BIT(0)
+#define CMD_TX_MODE(x) FIELD_PREP(BIT(24), x)
+#define DSI2_SOFT_RESET 0x0010
+#define SYS_RSTN BIT(2)
+#define PHY_RSTN BIT(1)
+#define IPI_RSTN BIT(0)
+#define INT_ST_MAIN 0x0014
+#define DSI2_MODE_CTRL 0x0018
+#define DSI2_MODE_STATUS 0x001c
+#define DSI2_CORE_STATUS 0x0020
+#define PRI_RD_DATA_AVAIL BIT(26)
+#define PRI_FIFOS_NOT_EMPTY BIT(25)
+#define PRI_BUSY BIT(24)
+#define CRI_RD_DATA_AVAIL BIT(18)
+#define CRT_FIFOS_NOT_EMPTY BIT(17)
+#define CRI_BUSY BIT(16)
+#define IPI_FIFOS_NOT_EMPTY BIT(9)
+#define IPI_BUSY BIT(8)
+#define CORE_FIFOS_NOT_EMPTY BIT(1)
+#define CORE_BUSY BIT(0)
+#define MANUAL_MODE_CFG 0x0024
+#define MANUAL_MODE_EN BIT(0)
+#define DSI2_TIMEOUT_HSTX_CFG 0x0048
+#define TO_HSTX(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DSI2_TIMEOUT_HSTXRDY_CFG 0x004c
+#define TO_HSTXRDY(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DSI2_TIMEOUT_LPRX_CFG 0x0050
+#define TO_LPRXRDY(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DSI2_TIMEOUT_LPTXRDY_CFG 0x0054
+#define TO_LPTXRDY(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DSI2_TIMEOUT_LPTXTRIG_CFG 0x0058
+#define TO_LPTXTRIG(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DSI2_TIMEOUT_LPTXULPS_CFG 0x005c
+#define TO_LPTXULPS(x) FIELD_PREP(GENMASK(15, 0), x)
+#define DSI2_TIMEOUT_BTA_CFG 0x60
+#define TO_BTA(x) FIELD_PREP(GENMASK(15, 0), x)
+
+#define DSI2_PHY_MODE_CFG 0x0100
+#define PPI_WIDTH(x) FIELD_PREP(GENMASK(9, 8), x)
+#define PHY_LANES(x) FIELD_PREP(GENMASK(5, 4), (x) - 1)
+#define PHY_TYPE(x) FIELD_PREP(BIT(0), x)
+#define DSI2_PHY_CLK_CFG 0X0104
+#define PHY_LPTX_CLK_DIV(x) FIELD_PREP(GENMASK(12, 8), x)
+#define CLK_TYPE_MASK BIT(0)
+#define NON_CONTINUOUS_CLK BIT(0)
+#define CONTINUOUS_CLK 0
+#define DSI2_PHY_LP2HS_MAN_CFG 0x010c
+#define PHY_LP2HS_TIME(x) FIELD_PREP(GENMASK(28, 0), x)
+#define DSI2_PHY_HS2LP_MAN_CFG 0x0114
+#define PHY_HS2LP_TIME(x) FIELD_PREP(GENMASK(28, 0), x)
+#define DSI2_PHY_MAX_RD_T_MAN_CFG 0x011c
+#define PHY_MAX_RD_TIME(x) FIELD_PREP(GENMASK(26, 0), x)
+#define DSI2_PHY_ESC_CMD_T_MAN_CFG 0x0124
+#define PHY_ESC_CMD_TIME(x) FIELD_PREP(GENMASK(28, 0), x)
+#define DSI2_PHY_ESC_BYTE_T_MAN_CFG 0x012c
+#define PHY_ESC_BYTE_TIME(x) FIELD_PREP(GENMASK(28, 0), x)
+
+#define DSI2_PHY_IPI_RATIO_MAN_CFG 0x0134
+#define PHY_IPI_RATIO(x) FIELD_PREP(GENMASK(21, 0), x)
+#define DSI2_PHY_SYS_RATIO_MAN_CFG 0x013C
+#define PHY_SYS_RATIO(x) FIELD_PREP(GENMASK(16, 0), x)
+
+#define DSI2_DSI_GENERAL_CFG 0x0200
+#define BTA_EN BIT(1)
+#define EOTP_TX_EN BIT(0)
+#define DSI2_DSI_VCID_CFG 0x0204
+#define TX_VCID(x) FIELD_PREP(GENMASK(1, 0), x)
+#define DSI2_DSI_SCRAMBLING_CFG 0x0208
+#define SCRAMBLING_SEED(x) FIELD_PREP(GENMASK(31, 16), x)
+#define SCRAMBLING_EN BIT(0)
+#define DSI2_DSI_VID_TX_CFG 0x020c
+#define LPDT_DISPLAY_CMD_EN BIT(20)
+#define BLK_VFP_HS_EN BIT(14)
+#define BLK_VBP_HS_EN BIT(13)
+#define BLK_VSA_HS_EN BIT(12)
+#define BLK_HFP_HS_EN BIT(6)
+#define BLK_HBP_HS_EN BIT(5)
+#define BLK_HSA_HS_EN BIT(4)
+#define VID_MODE_TYPE(x) FIELD_PREP(GENMASK(1, 0), x)
+#define DSI2_CRI_TX_HDR 0x02c0
+#define CMD_TX_MODE(x) FIELD_PREP(BIT(24), x)
+#define DSI2_CRI_TX_PLD 0x02c4
+#define DSI2_CRI_RX_HDR 0x02c8
+#define DSI2_CRI_RX_PLD 0x02cc
+
+#define DSI2_IPI_COLOR_MAN_CFG 0x0300
+#define IPI_DEPTH(x) FIELD_PREP(GENMASK(7, 4), x)
+#define IPI_DEPTH_5_6_5_BITS 0x02
+#define IPI_DEPTH_6_BITS 0x03
+#define IPI_DEPTH_8_BITS 0x05
+#define IPI_DEPTH_10_BITS 0x06
+#define IPI_FORMAT(x) FIELD_PREP(GENMASK(3, 0), x)
+#define IPI_FORMAT_RGB 0x0
+#define IPI_FORMAT_DSC 0x0b
+#define DSI2_IPI_VID_HSA_MAN_CFG 0x0304
+#define VID_HSA_TIME(x) FIELD_PREP(GENMASK(29, 0), x)
+#define DSI2_IPI_VID_HBP_MAN_CFG 0x030c
+#define VID_HBP_TIME(x) FIELD_PREP(GENMASK(29, 0), x)
+#define DSI2_IPI_VID_HACT_MAN_CFG 0x0314
+#define VID_HACT_TIME(x) FIELD_PREP(GENMASK(29, 0), x)
+#define DSI2_IPI_VID_HLINE_MAN_CFG 0x031c
+#define VID_HLINE_TIME(x) FIELD_PREP(GENMASK(29, 0), x)
+#define DSI2_IPI_VID_VSA_MAN_CFG 0x0324
+#define VID_VSA_LINES(x) FIELD_PREP(GENMASK(9, 0), x)
+#define DSI2_IPI_VID_VBP_MAN_CFG 0X032C
+#define VID_VBP_LINES(x) FIELD_PREP(GENMASK(9, 0), x)
+#define DSI2_IPI_VID_VACT_MAN_CFG 0X0334
+#define VID_VACT_LINES(x) FIELD_PREP(GENMASK(13, 0), x)
+#define DSI2_IPI_VID_VFP_MAN_CFG 0X033C
+#define VID_VFP_LINES(x) FIELD_PREP(GENMASK(9, 0), x)
+#define DSI2_IPI_PIX_PKT_CFG 0x0344
+#define MAX_PIX_PKT(x) FIELD_PREP(GENMASK(15, 0), x)
+
+#define DSI2_INT_ST_PHY 0x0400
+#define DSI2_INT_MASK_PHY 0x0404
+#define DSI2_INT_ST_TO 0x0410
+#define DSI2_INT_MASK_TO 0x0414
+#define DSI2_INT_ST_ACK 0x0420
+#define DSI2_INT_MASK_ACK 0x0424
+#define DSI2_INT_ST_IPI 0x0430
+#define DSI2_INT_MASK_IPI 0x0434
+#define DSI2_INT_ST_FIFO 0x0440
+#define DSI2_INT_MASK_FIFO 0x0444
+#define DSI2_INT_ST_PRI 0x0450
+#define DSI2_INT_MASK_PRI 0x0454
+#define DSI2_INT_ST_CRI 0x0460
+#define DSI2_INT_MASK_CRI 0x0464
+#define DSI2_INT_FORCE_CRI 0x0468
+#define DSI2_MAX_REGISGER DSI2_INT_FORCE_CRI
+
+#define MODE_STATUS_TIMEOUT_US 10000
+#define CMD_PKT_STATUS_TIMEOUT_US 20000
+
+enum vid_mode_type {
+ VID_MODE_TYPE_NON_BURST_SYNC_PULSES,
+ VID_MODE_TYPE_NON_BURST_SYNC_EVENTS,
+ VID_MODE_TYPE_BURST,
+};
+
+enum mode_ctrl {
+ IDLE_MODE,
+ AUTOCALC_MODE,
+ COMMAND_MODE,
+ VIDEO_MODE,
+ DATA_STREAM_MODE,
+ VIDEO_TEST_MODE,
+ DATA_STREAM_TEST_MODE,
+};
+
+enum ppi_width {
+ PPI_WIDTH_8_BITS,
+ PPI_WIDTH_16_BITS,
+ PPI_WIDTH_32_BITS,
+};
+
+struct cmd_header {
+ u8 cmd_type;
+ u8 delay;
+ u8 payload_length;
+};
+
+struct dw_mipi_dsi2 {
+ struct drm_bridge bridge;
+ struct mipi_dsi_host dsi_host;
+ struct drm_bridge *panel_bridge;
+ struct device *dev;
+ struct regmap *regmap;
+ struct clk *pclk;
+ struct clk *sys_clk;
+
+ unsigned int lane_mbps; /* per lane */
+ u32 channel;
+ u32 lanes;
+ u32 format;
+ unsigned long mode_flags;
+
+ struct drm_display_mode mode;
+ const struct dw_mipi_dsi2_plat_data *plat_data;
+};
+
+static inline struct dw_mipi_dsi2 *host_to_dsi2(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct dw_mipi_dsi2, dsi_host);
+}
+
+static inline struct dw_mipi_dsi2 *bridge_to_dsi2(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct dw_mipi_dsi2, bridge);
+}
+
+static int cri_fifos_wait_avail(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 sts, mask;
+ int ret;
+
+ mask = CRI_BUSY | CRT_FIFOS_NOT_EMPTY;
+ ret = regmap_read_poll_timeout(dsi2->regmap, DSI2_CORE_STATUS, sts,
+ !(sts & mask), 0, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(dsi2->dev, "command interface is busy\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dw_mipi_dsi2_set_vid_mode(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 val = 0, mode;
+ int ret;
+
+ if (dsi2->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP)
+ val |= BLK_HFP_HS_EN;
+
+ if (dsi2->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP)
+ val |= BLK_HBP_HS_EN;
+
+ if (dsi2->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA)
+ val |= BLK_HSA_HS_EN;
+
+ if (dsi2->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ val |= VID_MODE_TYPE_BURST;
+ else if (dsi2->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ val |= VID_MODE_TYPE_NON_BURST_SYNC_PULSES;
+ else
+ val |= VID_MODE_TYPE_NON_BURST_SYNC_EVENTS;
+
+ regmap_write(dsi2->regmap, DSI2_DSI_VID_TX_CFG, val);
+
+ regmap_write(dsi2->regmap, DSI2_MODE_CTRL, VIDEO_MODE);
+ ret = regmap_read_poll_timeout(dsi2->regmap, DSI2_MODE_STATUS,
+ mode, mode & VIDEO_MODE,
+ 1000, MODE_STATUS_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(dsi2->dev, "failed to enter video mode\n");
+}
+
+static void dw_mipi_dsi2_set_data_stream_mode(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 mode;
+ int ret;
+
+ regmap_write(dsi2->regmap, DSI2_MODE_CTRL, DATA_STREAM_MODE);
+ ret = regmap_read_poll_timeout(dsi2->regmap, DSI2_MODE_STATUS,
+ mode, mode & DATA_STREAM_MODE,
+ 1000, MODE_STATUS_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(dsi2->dev, "failed to enter data stream mode\n");
+}
+
+static void dw_mipi_dsi2_set_cmd_mode(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 mode;
+ int ret;
+
+ regmap_write(dsi2->regmap, DSI2_MODE_CTRL, COMMAND_MODE);
+ ret = regmap_read_poll_timeout(dsi2->regmap, DSI2_MODE_STATUS,
+ mode, mode & COMMAND_MODE,
+ 1000, MODE_STATUS_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(dsi2->dev, "failed to enter data stream mode\n");
+}
+
+static void dw_mipi_dsi2_host_softrst(struct dw_mipi_dsi2 *dsi2)
+{
+ regmap_write(dsi2->regmap, DSI2_SOFT_RESET, 0x0);
+ usleep_range(50, 100);
+ regmap_write(dsi2->regmap, DSI2_SOFT_RESET,
+ SYS_RSTN | PHY_RSTN | IPI_RSTN);
+}
+
+static void dw_mipi_dsi2_phy_clk_mode_cfg(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 sys_clk, esc_clk_div;
+ u32 val = 0;
+
+ /*
+ * clk_type should be NON_CONTINUOUS_CLK before
+ * initial deskew calibration be sent.
+ */
+ val |= NON_CONTINUOUS_CLK;
+
+ /* The maximum value of the escape clock frequency is 20MHz */
+ sys_clk = clk_get_rate(dsi2->sys_clk) / USEC_PER_SEC;
+ esc_clk_div = DIV_ROUND_UP(sys_clk, 20 * 2);
+ val |= PHY_LPTX_CLK_DIV(esc_clk_div);
+
+ regmap_write(dsi2->regmap, DSI2_PHY_CLK_CFG, val);
+}
+
+static void dw_mipi_dsi2_phy_ratio_cfg(struct dw_mipi_dsi2 *dsi2)
+{
+ struct drm_display_mode *mode = &dsi2->mode;
+ u64 sys_clk = clk_get_rate(dsi2->sys_clk);
+ u64 pixel_clk, ipi_clk, phy_hsclk;
+ u64 tmp;
+
+ /*
+ * in DPHY mode, the phy_hstx_clk is exactly 1/16 the Lane high-speed
+ * data rate; In CPHY mode, the phy_hstx_clk is exactly 1/7 the trio
+ * high speed symbol rate.
+ */
+ phy_hsclk = DIV_ROUND_CLOSEST_ULL(dsi2->lane_mbps * USEC_PER_SEC, 16);
+
+ /* IPI_RATIO_MAN_CFG = PHY_HSTX_CLK / IPI_CLK */
+ pixel_clk = mode->crtc_clock * MSEC_PER_SEC;
+ ipi_clk = pixel_clk / 4;
+
+ tmp = DIV_ROUND_CLOSEST_ULL(phy_hsclk << 16, ipi_clk);
+ regmap_write(dsi2->regmap, DSI2_PHY_IPI_RATIO_MAN_CFG,
+ PHY_IPI_RATIO(tmp));
+
+ /*
+ * SYS_RATIO_MAN_CFG = MIPI_DCPHY_HSCLK_Freq / MIPI_DCPHY_HSCLK_Freq
+ */
+ tmp = DIV_ROUND_CLOSEST_ULL(phy_hsclk << 16, sys_clk);
+ regmap_write(dsi2->regmap, DSI2_PHY_SYS_RATIO_MAN_CFG,
+ PHY_SYS_RATIO(tmp));
+}
+
+static void dw_mipi_dsi2_lp2hs_or_hs2lp_cfg(struct dw_mipi_dsi2 *dsi2)
+{
+ const struct dw_mipi_dsi2_phy_ops *phy_ops = dsi2->plat_data->phy_ops;
+ struct dw_mipi_dsi2_phy_timing timing;
+ int ret;
+
+ ret = phy_ops->get_timing(dsi2->plat_data->priv_data,
+ dsi2->lane_mbps, &timing);
+ if (ret)
+ dev_err(dsi2->dev, "Retrieving phy timings failed\n");
+
+ regmap_write(dsi2->regmap, DSI2_PHY_LP2HS_MAN_CFG, PHY_LP2HS_TIME(timing.data_lp2hs));
+ regmap_write(dsi2->regmap, DSI2_PHY_HS2LP_MAN_CFG, PHY_HS2LP_TIME(timing.data_hs2lp));
+}
+
+static void dw_mipi_dsi2_phy_init(struct dw_mipi_dsi2 *dsi2)
+{
+ const struct dw_mipi_dsi2_phy_ops *phy_ops = dsi2->plat_data->phy_ops;
+ struct dw_mipi_dsi2_phy_iface iface;
+ u32 val = 0;
+
+ phy_ops->get_interface(dsi2->plat_data->priv_data, &iface);
+
+ switch (iface.ppi_width) {
+ case 8:
+ val |= PPI_WIDTH(PPI_WIDTH_8_BITS);
+ break;
+ case 16:
+ val |= PPI_WIDTH(PPI_WIDTH_16_BITS);
+ break;
+ case 32:
+ val |= PPI_WIDTH(PPI_WIDTH_32_BITS);
+ break;
+ default:
+ /* Caught in probe */
+ break;
+ }
+
+ val |= PHY_LANES(dsi2->lanes);
+ val |= PHY_TYPE(DW_MIPI_DSI2_DPHY);
+ regmap_write(dsi2->regmap, DSI2_PHY_MODE_CFG, val);
+
+ dw_mipi_dsi2_phy_clk_mode_cfg(dsi2);
+ dw_mipi_dsi2_phy_ratio_cfg(dsi2);
+ dw_mipi_dsi2_lp2hs_or_hs2lp_cfg(dsi2);
+
+ /* phy configuration 8 - 10 */
+}
+
+static void dw_mipi_dsi2_tx_option_set(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 val;
+
+ val = BTA_EN | EOTP_TX_EN;
+
+ if (dsi2->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ val &= ~EOTP_TX_EN;
+
+ regmap_write(dsi2->regmap, DSI2_DSI_GENERAL_CFG, val);
+ regmap_write(dsi2->regmap, DSI2_DSI_VCID_CFG, TX_VCID(dsi2->channel));
+}
+
+static void dw_mipi_dsi2_ipi_color_coding_cfg(struct dw_mipi_dsi2 *dsi2)
+{
+ u32 val, color_depth;
+
+ switch (dsi2->format) {
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ color_depth = IPI_DEPTH_6_BITS;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ color_depth = IPI_DEPTH_5_6_5_BITS;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ default:
+ color_depth = IPI_DEPTH_8_BITS;
+ break;
+ }
+
+ val = IPI_DEPTH(color_depth) |
+ IPI_FORMAT(IPI_FORMAT_RGB);
+ regmap_write(dsi2->regmap, DSI2_IPI_COLOR_MAN_CFG, val);
+}
+
+static void dw_mipi_dsi2_vertical_timing_config(struct dw_mipi_dsi2 *dsi2,
+ const struct drm_display_mode *mode)
+{
+ u32 vactive, vsa, vfp, vbp;
+
+ vactive = mode->vdisplay;
+ vsa = mode->vsync_end - mode->vsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_VSA_MAN_CFG, VID_VSA_LINES(vsa));
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_VBP_MAN_CFG, VID_VBP_LINES(vbp));
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_VACT_MAN_CFG, VID_VACT_LINES(vactive));
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_VFP_MAN_CFG, VID_VFP_LINES(vfp));
+}
+
+static void dw_mipi_dsi2_ipi_set(struct dw_mipi_dsi2 *dsi2)
+{
+ struct drm_display_mode *mode = &dsi2->mode;
+ u32 hline, hsa, hbp, hact;
+ u64 hline_time, hsa_time, hbp_time, hact_time, tmp;
+ u64 pixel_clk, phy_hs_clk;
+ u16 val;
+
+ val = mode->hdisplay;
+
+ regmap_write(dsi2->regmap, DSI2_IPI_PIX_PKT_CFG, MAX_PIX_PKT(val));
+
+ dw_mipi_dsi2_ipi_color_coding_cfg(dsi2);
+
+ /*
+ * if the controller is intended to operate in data stream mode,
+ * no more steps are required.
+ */
+ if (!(dsi2->mode_flags & MIPI_DSI_MODE_VIDEO))
+ return;
+
+ hact = mode->hdisplay;
+ hsa = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+ hline = mode->htotal;
+
+ pixel_clk = mode->crtc_clock * MSEC_PER_SEC;
+
+ phy_hs_clk = DIV_ROUND_CLOSEST_ULL(dsi2->lane_mbps * USEC_PER_SEC, 16);
+
+ tmp = hsa * phy_hs_clk;
+ hsa_time = DIV_ROUND_CLOSEST_ULL(tmp << 16, pixel_clk);
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_HSA_MAN_CFG, VID_HSA_TIME(hsa_time));
+
+ tmp = hbp * phy_hs_clk;
+ hbp_time = DIV_ROUND_CLOSEST_ULL(tmp << 16, pixel_clk);
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_HBP_MAN_CFG, VID_HBP_TIME(hbp_time));
+
+ tmp = hact * phy_hs_clk;
+ hact_time = DIV_ROUND_CLOSEST_ULL(tmp << 16, pixel_clk);
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_HACT_MAN_CFG, VID_HACT_TIME(hact_time));
+
+ tmp = hline * phy_hs_clk;
+ hline_time = DIV_ROUND_CLOSEST_ULL(tmp << 16, pixel_clk);
+ regmap_write(dsi2->regmap, DSI2_IPI_VID_HLINE_MAN_CFG, VID_HLINE_TIME(hline_time));
+
+ dw_mipi_dsi2_vertical_timing_config(dsi2, mode);
+}
+
+static void
+dw_mipi_dsi2_work_mode(struct dw_mipi_dsi2 *dsi2, u32 mode)
+{
+ /*
+ * select controller work in Manual mode
+ * Manual: MANUAL_MODE_EN
+ * Automatic: 0
+ */
+ regmap_write(dsi2->regmap, MANUAL_MODE_CFG, mode);
+}
+
+static int dw_mipi_dsi2_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi2 *dsi2 = host_to_dsi2(host);
+ const struct dw_mipi_dsi2_plat_data *pdata = dsi2->plat_data;
+ struct drm_bridge *bridge;
+ int ret;
+
+ if (device->lanes > dsi2->plat_data->max_data_lanes) {
+ dev_err(dsi2->dev, "the number of data lanes(%u) is too many\n",
+ device->lanes);
+ return -EINVAL;
+ }
+
+ dsi2->lanes = device->lanes;
+ dsi2->channel = device->channel;
+ dsi2->format = device->format;
+ dsi2->mode_flags = device->mode_flags;
+
+ bridge = devm_drm_of_get_bridge(dsi2->dev, dsi2->dev->of_node, 1, 0);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+
+ bridge->pre_enable_prev_first = true;
+ dsi2->panel_bridge = bridge;
+
+ drm_bridge_add(&dsi2->bridge);
+
+ if (pdata->host_ops && pdata->host_ops->attach) {
+ ret = pdata->host_ops->attach(pdata->priv_data, device);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_mipi_dsi2_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi2 *dsi2 = host_to_dsi2(host);
+ const struct dw_mipi_dsi2_plat_data *pdata = dsi2->plat_data;
+ int ret;
+
+ if (pdata->host_ops && pdata->host_ops->detach) {
+ ret = pdata->host_ops->detach(pdata->priv_data, device);
+ if (ret < 0)
+ return ret;
+ }
+
+ drm_bridge_remove(&dsi2->bridge);
+
+ drm_of_panel_bridge_remove(host->dev->of_node, 1, 0);
+
+ return 0;
+}
+
+static int dw_mipi_dsi2_gen_pkt_hdr_write(struct dw_mipi_dsi2 *dsi2,
+ u32 hdr_val, bool lpm)
+{
+ int ret;
+
+ regmap_write(dsi2->regmap, DSI2_CRI_TX_HDR, hdr_val | CMD_TX_MODE(lpm));
+
+ ret = cri_fifos_wait_avail(dsi2);
+ if (ret) {
+ dev_err(dsi2->dev, "failed to write command header\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_mipi_dsi2_write(struct dw_mipi_dsi2 *dsi2,
+ const struct mipi_dsi_packet *packet, bool lpm)
+{
+ const u8 *tx_buf = packet->payload;
+ int len = packet->payload_length, pld_data_bytes = sizeof(u32);
+ __le32 word;
+
+ /* Send payload */
+ while (len) {
+ if (len < pld_data_bytes) {
+ word = 0;
+ memcpy(&word, tx_buf, len);
+ regmap_write(dsi2->regmap, DSI2_CRI_TX_PLD, le32_to_cpu(word));
+ len = 0;
+ } else {
+ memcpy(&word, tx_buf, pld_data_bytes);
+ regmap_write(dsi2->regmap, DSI2_CRI_TX_PLD, le32_to_cpu(word));
+ tx_buf += pld_data_bytes;
+ len -= pld_data_bytes;
+ }
+ }
+
+ word = 0;
+ memcpy(&word, packet->header, sizeof(packet->header));
+ return dw_mipi_dsi2_gen_pkt_hdr_write(dsi2, le32_to_cpu(word), lpm);
+}
+
+static int dw_mipi_dsi2_read(struct dw_mipi_dsi2 *dsi2,
+ const struct mipi_dsi_msg *msg)
+{
+ u8 *payload = msg->rx_buf;
+ int i, j, ret, len = msg->rx_len;
+ u8 data_type;
+ u16 wc;
+ u32 val;
+
+ ret = regmap_read_poll_timeout(dsi2->regmap, DSI2_CORE_STATUS,
+ val, val & CRI_RD_DATA_AVAIL,
+ 100, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(dsi2->dev, "CRI has no available read data\n");
+ return ret;
+ }
+
+ regmap_read(dsi2->regmap, DSI2_CRI_RX_HDR, &val);
+ data_type = val & 0x3f;
+
+ if (mipi_dsi_packet_format_is_short(data_type)) {
+ for (i = 0; i < len && i < 2; i++)
+ payload[i] = (val >> (8 * (i + 1))) & 0xff;
+
+ return 0;
+ }
+
+ wc = (val >> 8) & 0xffff;
+ /* Receive payload */
+ for (i = 0; i < len && i < wc; i += 4) {
+ regmap_read(dsi2->regmap, DSI2_CRI_RX_PLD, &val);
+ for (j = 0; j < 4 && j + i < len && j + i < wc; j++)
+ payload[i + j] = val >> (8 * j);
+ }
+
+ return 0;
+}
+
+static ssize_t dw_mipi_dsi2_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct dw_mipi_dsi2 *dsi2 = host_to_dsi2(host);
+ bool lpm = msg->flags & MIPI_DSI_MSG_USE_LPM;
+ struct mipi_dsi_packet packet;
+ int ret, nb_bytes;
+
+ regmap_update_bits(dsi2->regmap, DSI2_DSI_VID_TX_CFG,
+ LPDT_DISPLAY_CMD_EN,
+ lpm ? LPDT_DISPLAY_CMD_EN : 0);
+
+ /* create a packet to the DSI protocol */
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret) {
+ dev_err(dsi2->dev, "failed to create packet: %d\n", ret);
+ return ret;
+ }
+
+ ret = cri_fifos_wait_avail(dsi2);
+ if (ret)
+ return ret;
+
+ ret = dw_mipi_dsi2_write(dsi2, &packet, lpm);
+ if (ret)
+ return ret;
+
+ if (msg->rx_buf && msg->rx_len) {
+ ret = dw_mipi_dsi2_read(dsi2, msg);
+ if (ret < 0)
+ return ret;
+ nb_bytes = msg->rx_len;
+ } else {
+ nb_bytes = packet.size;
+ }
+
+ return nb_bytes;
+}
+
+static const struct mipi_dsi_host_ops dw_mipi_dsi2_host_ops = {
+ .attach = dw_mipi_dsi2_host_attach,
+ .detach = dw_mipi_dsi2_host_detach,
+ .transfer = dw_mipi_dsi2_host_transfer,
+};
+
+static u32 *
+dw_mipi_dsi2_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+ const struct dw_mipi_dsi2_plat_data *pdata = dsi2->plat_data;
+ u32 *input_fmts;
+
+ if (pdata->get_input_bus_fmts)
+ return pdata->get_input_bus_fmts(pdata->priv_data,
+ bridge, bridge_state,
+ crtc_state, conn_state,
+ output_fmt, num_input_fmts);
+
+ /* Fall back to MEDIA_BUS_FMT_FIXED as the only input format. */
+ input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+ input_fmts[0] = MEDIA_BUS_FMT_FIXED;
+ *num_input_fmts = 1;
+
+ return input_fmts;
+}
+
+static int dw_mipi_dsi2_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+ const struct dw_mipi_dsi2_plat_data *pdata = dsi2->plat_data;
+ bool ret;
+
+ bridge_state->input_bus_cfg.flags =
+ DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE;
+
+ if (pdata->mode_fixup) {
+ ret = pdata->mode_fixup(pdata->priv_data, &crtc_state->mode,
+ &crtc_state->adjusted_mode);
+ if (!ret) {
+ DRM_DEBUG_DRIVER("failed to fixup mode " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&crtc_state->mode));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void dw_mipi_dsi2_bridge_post_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+ const struct dw_mipi_dsi2_phy_ops *phy_ops = dsi2->plat_data->phy_ops;
+
+ regmap_write(dsi2->regmap, DSI2_IPI_PIX_PKT_CFG, 0);
+
+ /*
+ * Switch to command mode before panel-bridge post_disable &
+ * panel unprepare.
+ * Note: panel-bridge disable & panel disable has been called
+ * before by the drm framework.
+ */
+ dw_mipi_dsi2_set_cmd_mode(dsi2);
+
+ regmap_write(dsi2->regmap, DSI2_PWR_UP, RESET);
+
+ if (phy_ops->power_off)
+ phy_ops->power_off(dsi2->plat_data->priv_data);
+
+ clk_disable_unprepare(dsi2->sys_clk);
+ clk_disable_unprepare(dsi2->pclk);
+ pm_runtime_put(dsi2->dev);
+}
+
+static unsigned int dw_mipi_dsi2_get_lanes(struct dw_mipi_dsi2 *dsi2)
+{
+ /* single-dsi, so no other instance to consider */
+ return dsi2->lanes;
+}
+
+static void dw_mipi_dsi2_mode_set(struct dw_mipi_dsi2 *dsi2,
+ const struct drm_display_mode *adjusted_mode)
+{
+ const struct dw_mipi_dsi2_phy_ops *phy_ops = dsi2->plat_data->phy_ops;
+ void *priv_data = dsi2->plat_data->priv_data;
+ u32 lanes = dw_mipi_dsi2_get_lanes(dsi2);
+ int ret;
+
+ clk_prepare_enable(dsi2->pclk);
+ clk_prepare_enable(dsi2->sys_clk);
+
+ ret = phy_ops->get_lane_mbps(priv_data, adjusted_mode, dsi2->mode_flags,
+ lanes, dsi2->format, &dsi2->lane_mbps);
+ if (ret)
+ DRM_DEBUG_DRIVER("Phy get_lane_mbps() failed\n");
+
+ pm_runtime_get_sync(dsi2->dev);
+
+ dw_mipi_dsi2_host_softrst(dsi2);
+ regmap_write(dsi2->regmap, DSI2_PWR_UP, RESET);
+
+ dw_mipi_dsi2_work_mode(dsi2, MANUAL_MODE_EN);
+ dw_mipi_dsi2_phy_init(dsi2);
+
+ if (phy_ops->power_on)
+ phy_ops->power_on(dsi2->plat_data->priv_data);
+
+ dw_mipi_dsi2_tx_option_set(dsi2);
+
+ /*
+ * initial deskew calibration is send after phy_power_on,
+ * then we can configure clk_type.
+ */
+
+ regmap_update_bits(dsi2->regmap, DSI2_PHY_CLK_CFG, CLK_TYPE_MASK,
+ dsi2->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS ? NON_CONTINUOUS_CLK :
+ CONTINUOUS_CLK);
+
+ regmap_write(dsi2->regmap, DSI2_PWR_UP, POWER_UP);
+ dw_mipi_dsi2_set_cmd_mode(dsi2);
+
+ dw_mipi_dsi2_ipi_set(dsi2);
+}
+
+static void dw_mipi_dsi2_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+
+ /* Power up the dsi ctl into a command mode */
+ dw_mipi_dsi2_mode_set(dsi2, &dsi2->mode);
+}
+
+static void dw_mipi_dsi2_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+
+ /* Store the display mode for later use in pre_enable callback */
+ drm_mode_copy(&dsi2->mode, adjusted_mode);
+}
+
+static void dw_mipi_dsi2_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+
+ /* Switch to video mode for panel-bridge enable & panel enable */
+ if (dsi2->mode_flags & MIPI_DSI_MODE_VIDEO)
+ dw_mipi_dsi2_set_vid_mode(dsi2);
+ else
+ dw_mipi_dsi2_set_data_stream_mode(dsi2);
+}
+
+static enum drm_mode_status
+dw_mipi_dsi2_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+ const struct dw_mipi_dsi2_plat_data *pdata = dsi2->plat_data;
+ enum drm_mode_status mode_status = MODE_OK;
+
+ if (pdata->mode_valid)
+ mode_status = pdata->mode_valid(pdata->priv_data, mode,
+ dsi2->mode_flags,
+ dw_mipi_dsi2_get_lanes(dsi2),
+ dsi2->format);
+
+ return mode_status;
+}
+
+static int dw_mipi_dsi2_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge);
+
+ /* Set the encoder type as caller does not know it */
+ bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
+
+ /* Attach the panel-bridge to the dsi bridge */
+ return drm_bridge_attach(bridge->encoder, dsi2->panel_bridge, bridge,
+ flags);
+}
+
+static const struct drm_bridge_funcs dw_mipi_dsi2_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_get_input_bus_fmts = dw_mipi_dsi2_bridge_atomic_get_input_bus_fmts,
+ .atomic_check = dw_mipi_dsi2_bridge_atomic_check,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_pre_enable = dw_mipi_dsi2_bridge_atomic_pre_enable,
+ .atomic_enable = dw_mipi_dsi2_bridge_atomic_enable,
+ .atomic_post_disable = dw_mipi_dsi2_bridge_post_atomic_disable,
+ .mode_set = dw_mipi_dsi2_bridge_mode_set,
+ .mode_valid = dw_mipi_dsi2_bridge_mode_valid,
+ .attach = dw_mipi_dsi2_bridge_attach,
+};
+
+static const struct regmap_config dw_mipi_dsi2_regmap_config = {
+ .name = "dsi2-host",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+};
+
+static struct dw_mipi_dsi2 *
+__dw_mipi_dsi2_probe(struct platform_device *pdev,
+ const struct dw_mipi_dsi2_plat_data *plat_data)
+{
+ struct device *dev = &pdev->dev;
+ struct reset_control *apb_rst;
+ struct dw_mipi_dsi2 *dsi2;
+ int ret;
+
+ dsi2 = devm_kzalloc(dev, sizeof(*dsi2), GFP_KERNEL);
+ if (!dsi2)
+ return ERR_PTR(-ENOMEM);
+
+ dsi2->dev = dev;
+ dsi2->plat_data = plat_data;
+
+ if (!plat_data->phy_ops->init || !plat_data->phy_ops->get_lane_mbps ||
+ !plat_data->phy_ops->get_timing)
+ return dev_err_ptr_probe(dev, -ENODEV, "Phy not properly configured\n");
+
+ if (!plat_data->regmap) {
+ void __iomem *base = devm_platform_ioremap_resource(pdev, 0);
+
+ if (IS_ERR(base))
+ return dev_err_cast_probe(dev, base, "failed to registers\n");
+
+ dsi2->regmap = devm_regmap_init_mmio(dev, base,
+ &dw_mipi_dsi2_regmap_config);
+ if (IS_ERR(dsi2->regmap))
+ return dev_err_cast_probe(dev, dsi2->regmap, "failed to init regmap\n");
+ } else {
+ dsi2->regmap = plat_data->regmap;
+ }
+
+ dsi2->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(dsi2->pclk))
+ return dev_err_cast_probe(dev, dsi2->pclk, "Unable to get pclk\n");
+
+ dsi2->sys_clk = devm_clk_get(dev, "sys");
+ if (IS_ERR(dsi2->sys_clk))
+ return dev_err_cast_probe(dev, dsi2->sys_clk, "Unable to get sys_clk\n");
+
+ /*
+ * Note that the reset was not defined in the initial device tree, so
+ * we have to be prepared for it not being found.
+ */
+ apb_rst = devm_reset_control_get_optional_exclusive(dev, "apb");
+ if (IS_ERR(apb_rst))
+ return dev_err_cast_probe(dev, apb_rst, "Unable to get reset control\n");
+
+ if (apb_rst) {
+ ret = clk_prepare_enable(dsi2->pclk);
+ if (ret) {
+ dev_err(dev, "%s: Failed to enable pclk\n", __func__);
+ return ERR_PTR(ret);
+ }
+
+ reset_control_assert(apb_rst);
+ usleep_range(10, 20);
+ reset_control_deassert(apb_rst);
+
+ clk_disable_unprepare(dsi2->pclk);
+ }
+
+ devm_pm_runtime_enable(dev);
+
+ dsi2->dsi_host.ops = &dw_mipi_dsi2_host_ops;
+ dsi2->dsi_host.dev = dev;
+ ret = mipi_dsi_host_register(&dsi2->dsi_host);
+ if (ret) {
+ dev_err(dev, "Failed to register MIPI host: %d\n", ret);
+ pm_runtime_disable(dev);
+ return ERR_PTR(ret);
+ }
+
+ dsi2->bridge.driver_private = dsi2;
+ dsi2->bridge.funcs = &dw_mipi_dsi2_bridge_funcs;
+ dsi2->bridge.of_node = pdev->dev.of_node;
+
+ return dsi2;
+}
+
+static void __dw_mipi_dsi2_remove(struct dw_mipi_dsi2 *dsi2)
+{
+ mipi_dsi_host_unregister(&dsi2->dsi_host);
+}
+
+/*
+ * Probe/remove API, used to create the bridge instance.
+ */
+struct dw_mipi_dsi2 *
+dw_mipi_dsi2_probe(struct platform_device *pdev,
+ const struct dw_mipi_dsi2_plat_data *plat_data)
+{
+ return __dw_mipi_dsi2_probe(pdev, plat_data);
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi2_probe);
+
+void dw_mipi_dsi2_remove(struct dw_mipi_dsi2 *dsi2)
+{
+ __dw_mipi_dsi2_remove(dsi2);
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi2_remove);
+
+/*
+ * Bind/unbind API, used from platforms based on the component framework
+ * to attach the bridge to an encoder.
+ */
+int dw_mipi_dsi2_bind(struct dw_mipi_dsi2 *dsi2, struct drm_encoder *encoder)
+{
+ return drm_bridge_attach(encoder, &dsi2->bridge, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi2_bind);
+
+void dw_mipi_dsi2_unbind(struct dw_mipi_dsi2 *dsi2)
+{
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi2_unbind);
+
+MODULE_AUTHOR("Guochun Huang <hero.huang@rock-chips.com>");
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@cherry.de>");
+MODULE_DESCRIPTION("DW MIPI DSI2 host controller driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:dw-mipi-dsi2");
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 290e2532fab1..4637bf6ea7a3 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1707,13 +1707,20 @@ static void tc_bridge_mode_set(struct drm_bridge *bridge,
{
struct tc_data *tc = bridge_to_tc(bridge);
- drm_mode_copy(&tc->mode, mode);
+ drm_mode_copy(&tc->mode, adj);
}
static const struct drm_edid *tc_edid_read(struct drm_bridge *bridge,
struct drm_connector *connector)
{
struct tc_data *tc = bridge_to_tc(bridge);
+ int ret;
+
+ ret = tc_get_display_props(tc);
+ if (ret < 0) {
+ dev_err(tc->dev, "failed to read display props: %d\n", ret);
+ return 0;
+ }
return drm_edid_read_ddc(connector, &tc->aux.ddc);
}
@@ -2169,19 +2176,31 @@ static const struct regmap_access_table tc_precious_table = {
.n_yes_ranges = ARRAY_SIZE(tc_precious_ranges),
};
-static const struct regmap_range tc_non_writeable_ranges[] = {
- regmap_reg_range(PPI_BUSYPPI, PPI_BUSYPPI),
- regmap_reg_range(DSI_BUSYDSI, DSI_BUSYDSI),
- regmap_reg_range(DSI_LANESTATUS0, DSI_INTSTATUS),
- regmap_reg_range(TC_IDREG, SYSSTAT),
- regmap_reg_range(GPIOI, GPIOI),
- regmap_reg_range(DP0_LTSTAT, DP0_SNKLTCHGREQ),
-};
-
-static const struct regmap_access_table tc_writeable_table = {
- .no_ranges = tc_non_writeable_ranges,
- .n_no_ranges = ARRAY_SIZE(tc_non_writeable_ranges),
-};
+static bool tc_writeable_reg(struct device *dev, unsigned int reg)
+{
+ /* RO reg */
+ switch (reg) {
+ case PPI_BUSYPPI:
+ case DSI_BUSYDSI:
+ case DSI_LANESTATUS0:
+ case DSI_LANESTATUS1:
+ case DSI_INTSTATUS:
+ case TC_IDREG:
+ case SYSBOOT:
+ case SYSSTAT:
+ case GPIOI:
+ case DP0_LTSTAT:
+ case DP0_SNKLTCHGREQ:
+ return false;
+ }
+ /* WO reg */
+ switch (reg) {
+ case DSI_STARTDSI:
+ case DSI_INTCLR:
+ return true;
+ }
+ return tc_readable_reg(dev, reg);
+}
static const struct regmap_config tc_regmap_config = {
.name = "tc358767",
@@ -2191,9 +2210,9 @@ static const struct regmap_config tc_regmap_config = {
.max_register = PLL_DBG,
.cache_type = REGCACHE_MAPLE,
.readable_reg = tc_readable_reg,
+ .writeable_reg = tc_writeable_reg,
.volatile_table = &tc_volatile_table,
.precious_table = &tc_precious_table,
- .wr_table = &tc_writeable_table,
.reg_format_endian = REGMAP_ENDIAN_BIG,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
@@ -2229,11 +2248,11 @@ static irqreturn_t tc_irq_handler(int irq, void *arg)
bool h = val & INT_GPIO_H(tc->hpd_pin);
bool lc = val & INT_GPIO_LC(tc->hpd_pin);
- dev_dbg(tc->dev, "GPIO%d: %s %s\n", tc->hpd_pin,
- h ? "H" : "", lc ? "LC" : "");
-
- if (h || lc)
+ if (h || lc) {
+ dev_dbg(tc->dev, "GPIO%d: %s %s\n", tc->hpd_pin,
+ h ? "H" : "", lc ? "LC" : "");
drm_kms_helper_hotplug_event(tc->bridge.dev);
+ }
}
regmap_write(tc->regmap, INTSTS_G, val);
@@ -2298,7 +2317,8 @@ static int tc_probe_dpi_bridge_endpoint(struct tc_data *tc)
/* port@1 is the DPI input/output port */
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, &bridge);
if (ret && ret != -ENODEV)
- return ret;
+ return dev_err_probe(dev, ret,
+ "Could not find DPI panel or bridge\n");
if (panel) {
bridge = devm_drm_panel_bridge_add(dev, panel);
@@ -2326,7 +2346,8 @@ static int tc_probe_edp_bridge_endpoint(struct tc_data *tc)
/* port@2 is the output port */
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, NULL);
if (ret && ret != -ENODEV)
- return ret;
+ return dev_err_probe(dev, ret,
+ "Could not find DSI panel or bridge\n");
if (panel) {
struct drm_bridge *panel_bridge;
@@ -2391,6 +2412,7 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
if (tc->pre_emphasis[0] < 0 || tc->pre_emphasis[0] > 2 ||
tc->pre_emphasis[1] < 0 || tc->pre_emphasis[1] > 2) {
dev_err(dev, "Incorrect Pre-Emphasis setting, use either 0=0dB 1=3.5dB 2=6dB\n");
+ of_node_put(node);
return -EINVAL;
}
}
@@ -2550,7 +2572,7 @@ static int tc_probe(struct i2c_client *client)
ret = tc_mipi_dsi_host_attach(tc);
if (ret) {
drm_bridge_remove(&tc->bridge);
- return ret;
+ return dev_err_probe(dev, ret, "Failed to attach DSI host\n");
}
}
@@ -2565,7 +2587,7 @@ static void tc_remove(struct i2c_client *client)
}
static const struct i2c_device_id tc358767_i2c_ids[] = {
- { "tc358767", 0 },
+ { "tc358767" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tc358767_i2c_ids);
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index 0e8813278a2f..ec79b0dd0e2c 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -125,6 +125,9 @@
#define TC358768_DSI_CONFW_MODE_CLR (6 << 29)
#define TC358768_DSI_CONFW_ADDR_DSI_CONTROL (0x3 << 24)
+/* TC358768_DSICMD_TX (0x0600) register */
+#define TC358768_DSI_CMDTX_DC_START BIT(0)
+
static const char * const tc358768_supplies[] = {
"vddc", "vddmipi", "vddio"
};
@@ -229,6 +232,21 @@ static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask,
tc358768_write(priv, reg, tmp);
}
+static void tc358768_dsicmd_tx(struct tc358768_priv *priv)
+{
+ u32 val;
+
+ /* start transfer */
+ tc358768_write(priv, TC358768_DSICMD_TX, TC358768_DSI_CMDTX_DC_START);
+ if (priv->error)
+ return;
+
+ /* wait transfer completion */
+ priv->error = regmap_read_poll_timeout(priv->regmap, TC358768_DSICMD_TX, val,
+ (val & TC358768_DSI_CMDTX_DC_START) == 0,
+ 100, 100000);
+}
+
static int tc358768_sw_reset(struct tc358768_priv *priv)
{
/* Assert Reset */
@@ -443,7 +461,9 @@ static int tc358768_dsi_host_attach(struct mipi_dsi_host *host,
ret = -EINVAL;
ep = of_graph_get_endpoint_by_regs(host->dev->of_node, 0, 0);
if (ep) {
- ret = of_property_read_u32(ep, "data-lines", &priv->pd_lines);
+ ret = of_property_read_u32(ep, "bus-width", &priv->pd_lines);
+ if (ret)
+ ret = of_property_read_u32(ep, "data-lines", &priv->pd_lines);
of_node_put(ep);
}
@@ -516,8 +536,7 @@ static ssize_t tc358768_dsi_host_transfer(struct mipi_dsi_host *host,
}
}
- /* start transfer */
- tc358768_write(priv, TC358768_DSICMD_TX, 1);
+ tc358768_dsicmd_tx(priv);
ret = tc358768_clear_error(priv);
if (ret)
@@ -1225,8 +1244,8 @@ static const struct regmap_config tc358768_regmap_config = {
};
static const struct i2c_device_id tc358768_i2c_ids[] = {
- { "tc358768", 0 },
- { "tc358778", 0 },
+ { "tc358768" },
+ { "tc358778" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tc358768_i2c_ids);
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index 674efc489e3a..bba10cf9b4f9 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -230,7 +230,7 @@ MODULE_DEVICE_TABLE(of, thc63_match);
static struct platform_driver thc63_driver = {
.probe = thc63_probe,
- .remove_new = thc63_remove,
+ .remove = thc63_remove,
.driver = {
.name = "thc63lvd1024",
.of_match_table = thc63_match,
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index 6b559e071301..eaec70fa42b6 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -94,7 +94,7 @@ static const struct regmap_access_table dlpc_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(dlpc_volatile_ranges),
};
-static struct regmap_config dlpc_regmap_config = {
+static const struct regmap_config dlpc_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = WR_DSI_PORT_EN,
@@ -389,7 +389,7 @@ static void dlpc3433_remove(struct i2c_client *client)
}
static const struct i2c_device_id dlpc3433_id[] = {
- { "ti,dlpc3433", 0 },
+ { "ti,dlpc3433" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, dlpc3433_id);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 57a7ed13f996..336380114eea 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -132,6 +132,16 @@
#define REG_IRQ_STAT_CHA_SOT_BIT_ERR BIT(2)
#define REG_IRQ_STAT_CHA_PLL_UNLOCK BIT(0)
+enum sn65dsi83_channel {
+ CHANNEL_A,
+ CHANNEL_B
+};
+
+enum sn65dsi83_lvds_term {
+ OHM_100,
+ OHM_200
+};
+
enum sn65dsi83_model {
MODEL_SN65DSI83,
MODEL_SN65DSI84,
@@ -147,6 +157,8 @@ struct sn65dsi83 {
struct regulator *vcc;
bool lvds_dual_link;
bool lvds_dual_link_even_odd_swap;
+ int lvds_vod_swing_conf[2];
+ int lvds_term_conf[2];
};
static const struct regmap_range sn65dsi83_readable_ranges[] = {
@@ -237,6 +249,36 @@ static const struct regmap_config sn65dsi83_regmap_config = {
.max_register = REG_IRQ_STAT,
};
+static const int lvds_vod_swing_data_table[2][4][2] = {
+ { /* 100 Ohm */
+ { 180000, 313000 },
+ { 215000, 372000 },
+ { 250000, 430000 },
+ { 290000, 488000 },
+ },
+ { /* 200 Ohm */
+ { 150000, 261000 },
+ { 200000, 346000 },
+ { 250000, 428000 },
+ { 300000, 511000 },
+ },
+};
+
+static const int lvds_vod_swing_clock_table[2][4][2] = {
+ { /* 100 Ohm */
+ { 140000, 244000 },
+ { 168000, 290000 },
+ { 195000, 335000 },
+ { 226000, 381000 },
+ },
+ { /* 200 Ohm */
+ { 117000, 204000 },
+ { 156000, 270000 },
+ { 195000, 334000 },
+ { 234000, 399000 },
+ },
+};
+
static struct sn65dsi83 *bridge_to_sn65dsi83(struct drm_bridge *bridge)
{
return container_of(bridge, struct sn65dsi83, bridge);
@@ -435,12 +477,16 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
val |= REG_LVDS_FMT_LVDS_LINK_CFG;
regmap_write(ctx->regmap, REG_LVDS_FMT, val);
- regmap_write(ctx->regmap, REG_LVDS_VCOM, 0x05);
+ regmap_write(ctx->regmap, REG_LVDS_VCOM,
+ REG_LVDS_VCOM_CHA_LVDS_VOD_SWING(ctx->lvds_vod_swing_conf[CHANNEL_A]) |
+ REG_LVDS_VCOM_CHB_LVDS_VOD_SWING(ctx->lvds_vod_swing_conf[CHANNEL_B]));
regmap_write(ctx->regmap, REG_LVDS_LANE,
(ctx->lvds_dual_link_even_odd_swap ?
REG_LVDS_LANE_EVEN_ODD_SWAP : 0) |
- REG_LVDS_LANE_CHA_LVDS_TERM |
- REG_LVDS_LANE_CHB_LVDS_TERM);
+ (ctx->lvds_term_conf[CHANNEL_A] ?
+ REG_LVDS_LANE_CHA_LVDS_TERM : 0) |
+ (ctx->lvds_term_conf[CHANNEL_B] ?
+ REG_LVDS_LANE_CHB_LVDS_TERM : 0));
regmap_write(ctx->regmap, REG_LVDS_CM, 0x00);
le16val = cpu_to_le16(mode->hdisplay);
@@ -576,10 +622,103 @@ static const struct drm_bridge_funcs sn65dsi83_funcs = {
.atomic_get_input_bus_fmts = sn65dsi83_atomic_get_input_bus_fmts,
};
+static int sn65dsi83_select_lvds_vod_swing(struct device *dev,
+ u32 lvds_vod_swing_data[2], u32 lvds_vod_swing_clk[2], u8 lvds_term)
+{
+ int i;
+
+ for (i = 0; i <= 3; i++) {
+ if (lvds_vod_swing_data_table[lvds_term][i][0] >= lvds_vod_swing_data[0] &&
+ lvds_vod_swing_data_table[lvds_term][i][1] <= lvds_vod_swing_data[1] &&
+ lvds_vod_swing_clock_table[lvds_term][i][0] >= lvds_vod_swing_clk[0] &&
+ lvds_vod_swing_clock_table[lvds_term][i][1] <= lvds_vod_swing_clk[1])
+ return i;
+ }
+
+ dev_err(dev, "failed to find appropriate LVDS_VOD_SWING configuration\n");
+ return -EINVAL;
+}
+
+static int sn65dsi83_parse_lvds_endpoint(struct sn65dsi83 *ctx, int channel)
+{
+ struct device *dev = ctx->dev;
+ struct device_node *endpoint;
+ int endpoint_reg;
+ /* Set so the property can be freely selected if not defined */
+ u32 lvds_vod_swing_data[2] = { 0, 1000000 };
+ u32 lvds_vod_swing_clk[2] = { 0, 1000000 };
+ /* Set default near end terminataion to 200 Ohm */
+ u32 lvds_term = 200;
+ int lvds_vod_swing_conf;
+ int ret = 0;
+ int ret_data;
+ int ret_clock;
+
+ if (channel == CHANNEL_A)
+ endpoint_reg = 2;
+ else
+ endpoint_reg = 3;
+
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, endpoint_reg, -1);
+
+ of_property_read_u32(endpoint, "ti,lvds-termination-ohms", &lvds_term);
+ if (lvds_term == 100)
+ ctx->lvds_term_conf[channel] = OHM_100;
+ else if (lvds_term == 200)
+ ctx->lvds_term_conf[channel] = OHM_200;
+ else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret_data = of_property_read_u32_array(endpoint, "ti,lvds-vod-swing-data-microvolt",
+ lvds_vod_swing_data, ARRAY_SIZE(lvds_vod_swing_data));
+ if (ret_data != 0 && ret_data != -EINVAL) {
+ ret = ret_data;
+ goto exit;
+ }
+
+ ret_clock = of_property_read_u32_array(endpoint, "ti,lvds-vod-swing-clock-microvolt",
+ lvds_vod_swing_clk, ARRAY_SIZE(lvds_vod_swing_clk));
+ if (ret_clock != 0 && ret_clock != -EINVAL) {
+ ret = ret_clock;
+ goto exit;
+ }
+
+ /* Use default value if both properties are NOT defined. */
+ if (ret_data == -EINVAL && ret_clock == -EINVAL)
+ lvds_vod_swing_conf = 0x1;
+
+ /* Use lookup table if any of the two properties is defined. */
+ if (!ret_data || !ret_clock) {
+ lvds_vod_swing_conf = sn65dsi83_select_lvds_vod_swing(dev, lvds_vod_swing_data,
+ lvds_vod_swing_clk, ctx->lvds_term_conf[channel]);
+ if (lvds_vod_swing_conf < 0) {
+ ret = lvds_vod_swing_conf;
+ goto exit;
+ }
+ }
+
+ ctx->lvds_vod_swing_conf[channel] = lvds_vod_swing_conf;
+ ret = 0;
+exit:
+ of_node_put(endpoint);
+ return ret;
+}
+
static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
{
struct drm_bridge *panel_bridge;
struct device *dev = ctx->dev;
+ int ret;
+
+ ret = sn65dsi83_parse_lvds_endpoint(ctx, CHANNEL_A);
+ if (ret < 0)
+ return ret;
+
+ ret = sn65dsi83_parse_lvds_endpoint(ctx, CHANNEL_B);
+ if (ret < 0)
+ return ret;
ctx->lvds_dual_link = false;
ctx->lvds_dual_link_even_odd_swap = false;
@@ -606,7 +745,7 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 2, 0);
if (IS_ERR(panel_bridge))
- return PTR_ERR(panel_bridge);
+ return dev_err_probe(dev, PTR_ERR(panel_bridge), "Failed to get panel bridge\n");
ctx->panel_bridge = panel_bridge;
@@ -732,7 +871,7 @@ static void sn65dsi83_remove(struct i2c_client *client)
drm_bridge_remove(&ctx->bridge);
}
-static struct i2c_device_id sn65dsi83_id[] = {
+static const struct i2c_device_id sn65dsi83_id[] = {
{ "ti,sn65dsi83", MODEL_SN65DSI83 },
{ "ti,sn65dsi84", MODEL_SN65DSI84 },
{},
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 582cf4f73a74..e4d9006b59f1 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1635,8 +1635,8 @@ static void ti_sn_pwm_unregister(void)
}
#else
-static inline int ti_sn_pwm_pin_request(struct ti_sn65dsi86 *pdata) { return 0; }
-static inline void ti_sn_pwm_pin_release(struct ti_sn65dsi86 *pdata) {}
+static inline int __maybe_unused ti_sn_pwm_pin_request(struct ti_sn65dsi86 *pdata) { return 0; }
+static inline void __maybe_unused ti_sn_pwm_pin_release(struct ti_sn65dsi86 *pdata) {}
static inline int ti_sn_pwm_register(void) { return 0; }
static inline void ti_sn_pwm_unregister(void) {}
@@ -1970,9 +1970,9 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
return ti_sn65dsi86_add_aux_device(pdata, &pdata->aux_aux, "aux");
}
-static struct i2c_device_id ti_sn65dsi86_id[] = {
- { "ti,sn65dsi86", 0},
- {},
+static const struct i2c_device_id ti_sn65dsi86_id[] = {
+ { "ti,sn65dsi86" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ti_sn65dsi86_id);
diff --git a/drivers/gpu/drm/bridge/ti-tdp158.c b/drivers/gpu/drm/bridge/ti-tdp158.c
new file mode 100644
index 000000000000..3472ed5924e8
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ti-tdp158.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2024 Freebox SAS
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+
+struct tdp158 {
+ struct drm_bridge bridge;
+ struct drm_bridge *next;
+ struct gpio_desc *enable; // Operation Enable - pin 36
+ struct regulator *vcc; // 3.3V
+ struct regulator *vdd; // 1.1V
+ struct device *dev;
+};
+
+static void tdp158_enable(struct drm_bridge *bridge, struct drm_bridge_state *prev)
+{
+ int err;
+ struct tdp158 *tdp158 = bridge->driver_private;
+
+ err = regulator_enable(tdp158->vcc);
+ if (err)
+ dev_err(tdp158->dev, "failed to enable vcc: %d", err);
+
+ err = regulator_enable(tdp158->vdd);
+ if (err)
+ dev_err(tdp158->dev, "failed to enable vdd: %d", err);
+
+ gpiod_set_value_cansleep(tdp158->enable, 1);
+}
+
+static void tdp158_disable(struct drm_bridge *bridge, struct drm_bridge_state *prev)
+{
+ struct tdp158 *tdp158 = bridge->driver_private;
+
+ gpiod_set_value_cansleep(tdp158->enable, 0);
+ regulator_disable(tdp158->vdd);
+ regulator_disable(tdp158->vcc);
+}
+
+static int tdp158_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
+{
+ struct tdp158 *tdp158 = bridge->driver_private;
+
+ return drm_bridge_attach(bridge->encoder, tdp158->next, bridge, flags);
+}
+
+static const struct drm_bridge_funcs tdp158_bridge_funcs = {
+ .attach = tdp158_attach,
+ .atomic_enable = tdp158_enable,
+ .atomic_disable = tdp158_disable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+};
+
+static int tdp158_probe(struct i2c_client *client)
+{
+ struct tdp158 *tdp158;
+ struct device *dev = &client->dev;
+
+ tdp158 = devm_kzalloc(dev, sizeof(*tdp158), GFP_KERNEL);
+ if (!tdp158)
+ return -ENOMEM;
+
+ tdp158->next = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
+ if (IS_ERR(tdp158->next))
+ return dev_err_probe(dev, PTR_ERR(tdp158->next), "missing bridge");
+
+ tdp158->vcc = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(tdp158->vcc))
+ return dev_err_probe(dev, PTR_ERR(tdp158->vcc), "vcc");
+
+ tdp158->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(tdp158->vdd))
+ return dev_err_probe(dev, PTR_ERR(tdp158->vdd), "vdd");
+
+ tdp158->enable = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(tdp158->enable))
+ return dev_err_probe(dev, PTR_ERR(tdp158->enable), "enable");
+
+ tdp158->bridge.of_node = dev->of_node;
+ tdp158->bridge.funcs = &tdp158_bridge_funcs;
+ tdp158->bridge.driver_private = tdp158;
+ tdp158->dev = dev;
+
+ return devm_drm_bridge_add(dev, &tdp158->bridge);
+}
+
+static const struct of_device_id tdp158_match_table[] = {
+ { .compatible = "ti,tdp158" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tdp158_match_table);
+
+static struct i2c_driver tdp158_driver = {
+ .probe = tdp158_probe,
+ .driver = {
+ .name = "tdp158",
+ .of_match_table = tdp158_match_table,
+ },
+};
+module_i2c_driver(tdp158_driver);
+
+MODULE_DESCRIPTION("TI TDP158 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index b1b1e4d5a24a..79ab5da827e1 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -406,7 +406,7 @@ MODULE_DEVICE_TABLE(of, tfp410_match);
static struct platform_driver tfp410_platform_driver = {
.probe = tfp410_probe,
- .remove_new = tfp410_remove,
+ .remove = tfp410_remove,
.driver = {
.name = "tfp410-bridge",
.of_match_table = tfp410_match,
@@ -435,7 +435,7 @@ static void tfp410_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id tfp410_i2c_ids[] = {
- { "tfp410", 0 },
+ { "tfp410" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tfp410_i2c_ids);
diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
index f9fb35683a27..47b74cb25b14 100644
--- a/drivers/gpu/drm/bridge/ti-tpd12s015.c
+++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
@@ -195,7 +195,7 @@ MODULE_DEVICE_TABLE(of, tpd12s015_of_match);
static struct platform_driver tpd12s015_driver = {
.probe = tpd12s015_probe,
- .remove_new = tpd12s015_remove,
+ .remove = tpd12s015_remove,
.driver = {
.name = "tpd12s015",
.of_match_table = tpd12s015_of_match,
diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config
index 66e70ced796f..a8fca079921b 100644
--- a/drivers/gpu/drm/ci/arm64.config
+++ b/drivers/gpu/drm/ci/arm64.config
@@ -90,7 +90,12 @@ CONFIG_QCOM_GPI_DMA=y
CONFIG_USB_ONBOARD_DEV=y
CONFIG_NVMEM_QCOM_QFPROM=y
CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2=y
-
+CONFIG_REGULATOR_QCOM_REFGEN=y
+CONFIG_TYPEC_MUX_FSA4480=y
+CONFIG_QCOM_PMIC_GLINK=y
+CONFIG_UCSI_PMIC_GLINK=y
+CONFIG_QRTR=y
+CONFIG_QRTR_SMD=y
# db410c ethernet
CONFIG_USB_RTL8152=y
diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh
index 5a3bdcffae32..139b81db6312 100644
--- a/drivers/gpu/drm/ci/build.sh
+++ b/drivers/gpu/drm/ci/build.sh
@@ -30,6 +30,7 @@ if [[ "$KERNEL_ARCH" = "arm64" ]]; then
DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r0.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r5.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown.dtb"
+ DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sm8350-hdk.dtb"
elif [[ "$KERNEL_ARCH" = "arm" ]]; then
GCC_ARCH="arm-linux-gnueabihf"
DEBIAN_ARCH="armhf"
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index eca47d4f816f..90bde9f00cc3 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -1,14 +1,14 @@
variables:
DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa
- DRM_CI_COMMIT_SHA: &drm-ci-commit-sha d9849ac46623797a9f56fb9d46dc52460ac477de
+ DRM_CI_COMMIT_SHA: &drm-ci-commit-sha c6a9a9c3bce90923f7700219354e0b6e5a3c9ba6
UPSTREAM_REPO: https://gitlab.freedesktop.org/drm/kernel.git
TARGET_BRANCH: drm-next
- IGT_VERSION: f13702b8e4e847c56da3ef6f0969065d686049c5
+ IGT_VERSION: a73311079a5d8ac99eb25336a8369a2c3c6b519b
DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/mesa/deqp-runner.git
- DEQP_RUNNER_GIT_TAG: v0.15.0
+ DEQP_RUNNER_GIT_TAG: v0.20.0
FDO_UPSTREAM_REPO: helen.fornazier/linux # The repo where the git-archive daily runs
MESA_TEMPLATES_COMMIT: &ci-templates-commit d5aa3941aa03c2f716595116354fb81eb8012acb
@@ -153,6 +153,14 @@ stages:
# Pre-merge pipeline for Marge Bot
- if: &is-pre-merge-for-marge '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"'
when: on_success
+ # Push to a branch on a fork
+ - &is-fork-push '$CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push"'
+
+# Rules applied to every job in the pipeline
+.common-rules:
+ rules:
+ - if: *is-fork-push
+ when: manual
.never-post-merge-rules:
rules:
diff --git a/drivers/gpu/drm/ci/image-tags.yml b/drivers/gpu/drm/ci/image-tags.yml
index 2c340d063a96..8d8b9e71852e 100644
--- a/drivers/gpu/drm/ci/image-tags.yml
+++ b/drivers/gpu/drm/ci/image-tags.yml
@@ -1,5 +1,5 @@
variables:
- CONTAINER_TAG: "2024-08-07-mesa-uprev"
+ CONTAINER_TAG: "2024-09-09-uprevs"
DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
DEBIAN_BASE_TAG: "${CONTAINER_TAG}"
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index 09d8447840e9..f0ef60c8f56d 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -162,6 +162,22 @@ msm:sdm845:
script:
- ./install/bare-metal/cros-servo.sh
+msm:sm8350-hdk:
+ extends:
+ - .lava-igt:arm64
+ stage: msm
+ parallel: 4
+ variables:
+ BOOT_METHOD: fastboot
+ DEVICE_TYPE: sm8350-hdk
+ DRIVER_NAME: msm
+ DTB: ${DEVICE_TYPE}
+ FARM: collabora
+ GPU_VERSION: ${DEVICE_TYPE}
+ KERNEL_IMAGE_NAME: "Image.gz"
+ KERNEL_IMAGE_TYPE: ""
+ RUNNER_TAG: mesa-ci-x86-64-lava-sm8350-hdk
+
.rockchip-device:
variables:
DTB: ${DEVICE_TYPE}
@@ -286,6 +302,15 @@ i915:tgl:
GPU_VERSION: tgl
RUNNER_TAG: mesa-ci-x86-64-lava-acer-cp514-2h-1130g7-volteer
+i915:jsl:
+ extends:
+ - .i915
+ parallel: 4
+ variables:
+ DEVICE_TYPE: acer-cb317-1h-c3z6-dedede
+ GPU_VERSION: jsl
+ RUNNER_TAG: mesa-ci-x86-64-lava-acer-cb317-1h-c3z6-dedede
+
.amdgpu:
extends:
- .lava-igt:x86_64
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
index 8e2fed6d76a3..f44dbce3151a 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-fails.txt
@@ -2,6 +2,7 @@ amdgpu/amd_abm@abm_enabled,Fail
amdgpu/amd_abm@abm_gradual,Fail
amdgpu/amd_abm@backlight_monotonic_abm,Fail
amdgpu/amd_abm@backlight_monotonic_basic,Fail
+amdgpu/amd_abm@dpms_cycle,Fail
amdgpu/amd_assr@assr-links,Fail
amdgpu/amd_assr@assr-links-dpms,Fail
amdgpu/amd_mall@static-screen,Crash
@@ -14,7 +15,6 @@ amdgpu/amd_plane@mpo-scale-p010,Fail
amdgpu/amd_plane@mpo-scale-rgb,Crash
amdgpu/amd_plane@mpo-swizzle-toggle,Fail
amdgpu/amd_uvd_dec@amdgpu_uvd_decode,Fail
-dumb_buffer@invalid-bpp,Fail
kms_addfb_basic@bad-pitch-65536,Fail
kms_addfb_basic@bo-too-small,Fail
kms_addfb_basic@too-high,Fail
diff --git a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
index e4faa96fa000..e70bd9d447ca 100644
--- a/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/amdgpu-stoney-flakes.txt
@@ -18,3 +18,10 @@ kms_async_flips@crc
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
kms_plane@pixel-format-source-clamping
+
+# Board Name: hp-11A-G6-EE-grunt
+# Bug Report: https://lore.kernel.org/amd-gfx/09ee1862-3a0e-4085-ac1b-262601b1122b@collabora.com/T/#t
+# Failure Rate: 100
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.11.0-rc2
+kms_async_flips@async-flip-with-page-flip-events
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
index 9b84f68a5122..0907cb0f6d9e 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-fails.txt
@@ -1,3 +1,4 @@
+core_setmaster@master-drop-set-shared-fd,Fail
core_setmaster@master-drop-set-user,Fail
core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
@@ -6,7 +7,6 @@ i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
-kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout
kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
kms_fb_coherency@memset-crc,Crash
kms_flip@busy-flip,Timeout
diff --git a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
index 581f0da4d0f2..0207c9807bee 100644
--- a/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-amly-flakes.txt
@@ -46,3 +46,10 @@ i915_hangman@engine-engine-hang
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
kms_pm_rpm@modeset-lpsp-stress
+
+# Board Name: asus-C433TA-AJ0005-rammus
+# Bug Report: https://lore.kernel.org/intel-gfx/61f62c86-3e82-4eff-bd3c-8123fa0ca332@collabora.com/T/#t
+# Failure Rate: 50
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.11.0-rc2
+kms_pm_rpm@drm-resources-equal
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
index e612281149aa..64772fedaed5 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-fails.txt
@@ -8,7 +8,6 @@ kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
-kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt
index 4663d4d13f35..e8bddda56737 100644
--- a/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-apl-flakes.txt
@@ -4,3 +4,10 @@
# IGT Version: 1.28-g0df7b9b97
# Linux Version: 6.9.0-rc7
kms_fb_coherency@memset-crc
+
+# Board Name: asus-C523NA-A20057-coral
+# Bug Report: https://lore.kernel.org/intel-gfx/61f62c86-3e82-4eff-bd3c-8123fa0ca332@collabora.com/T/#t
+# Failure Rate: 100
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.11.0-rc2
+kms_universal_plane@cursor-fb-leak
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
index 2723e2832797..f352b719cf7d 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-fails.txt
@@ -1,5 +1,5 @@
+core_setmaster@master-drop-set-shared-fd,Fail
core_setmaster@master-drop-set-user,Fail
-core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -9,10 +9,10 @@ i915_pipe_stress@stress-xrgb8888-ytiled,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout
+i915_pm_rps@engine-order,Fail
+kms_big_fb@linear-16bpp-rotate-180,Timeout
kms_fb_coherency@memset-crc,Crash
kms_flip@busy-flip,Timeout
-kms_flip@single-buffer-flip-vs-dpms-off-vs-modeset-interruptible,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
@@ -40,14 +40,11 @@ kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_pm_rpm@universal-planes,Timeout
kms_pm_rpm@universal-planes-dpms,Timeout
-kms_prop_blob@invalid-set-prop,Fail
kms_psr2_sf@cursor-plane-update-sf,Fail
-kms_psr2_sf@fbc-plane-move-sf-dmg-area,Timeout
kms_psr2_sf@overlay-plane-update-continuous-sf,Fail
kms_psr2_sf@overlay-plane-update-sf-dmg-area,Fail
kms_psr2_sf@overlay-primary-update-sf-dmg-area,Fail
@@ -55,7 +52,6 @@ kms_psr2_sf@plane-move-sf-dmg-area,Fail
kms_psr2_sf@primary-plane-update-sf-dmg-area,Fail
kms_psr2_sf@primary-plane-update-sf-dmg-area-big-fb,Fail
kms_psr2_su@page_flip-NV12,Fail
-kms_psr2_su@page_flip-P010,Fail
kms_rotation_crc@primary-rotation-180,Timeout
kms_setmode@basic,Fail
kms_vblank@query-forked-hang,Timeout
diff --git a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
index 58a6001abb28..d8401251e5f4 100644
--- a/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-cml-flakes.txt
@@ -11,3 +11,17 @@ kms_plane_alpha_blend@constant-alpha-min
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
kms_atomic_transition@plane-all-modeset-transition-internal-panels
+
+# Board Name: asus-C436FA-Flip-hatch
+# Bug Report: https://lore.kernel.org/intel-gfx/61f62c86-3e82-4eff-bd3c-8123fa0ca332@collabora.com/T/#t
+# Failure Rate: 100
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.11.0-rc2
+kms_plane_alpha_blend@constant-alpha-min
+
+# Board Name: asus-C436FA-Flip-hatch
+# Bug Report: https://lore.kernel.org/intel-gfx/61f62c86-3e82-4eff-bd3c-8123fa0ca332@collabora.com/T/#t
+# Failure Rate: 50
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.11.0-rc2
+kms_async_flips@crc
diff --git a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
index 4821c9adefd1..6eb64c672f7d 100644
--- a/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-glk-fails.txt
@@ -63,3 +63,4 @@ xe_module_load@load,Fail
xe_module_load@many-reload,Fail
xe_module_load@reload,Fail
xe_module_load@reload-no-display,Fail
+core_setmaster@master-drop-set-shared-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
new file mode 100644
index 000000000000..ed9f7b576843
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-fails.txt
@@ -0,0 +1,51 @@
+core_setmaster@master-drop-set-user,Fail
+i915_module_load@load,Fail
+i915_module_load@reload,Fail
+i915_module_load@reload-no-display,Fail
+i915_module_load@resize-bar,Fail
+i915_pm_rpm@gem-execbuf-stress,Timeout
+i915_pm_rpm@module-reload,Fail
+kms_flip@plain-flip-fb-recreate,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
+kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
+kms_lease@lease-uevent,Fail
+kms_pm_rpm@legacy-planes,Timeout
+kms_pm_rpm@legacy-planes-dpms,Timeout
+kms_pm_rpm@modeset-stress-extra-wait,Timeout
+kms_pm_rpm@universal-planes,Timeout
+kms_pm_rpm@universal-planes-dpms,Timeout
+kms_rotation_crc@multiplane-rotation,Fail
+kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
+kms_rotation_crc@multiplane-rotation-cropping-top,Fail
+perf@i915-ref-count,Fail
+perf_pmu@busy-accuracy-50,Fail
+perf_pmu@module-unload,Fail
+perf_pmu@most-busy-idle-check-all,Fail
+perf_pmu@rc6,Crash
+sysfs_heartbeat_interval@long,Timeout
+sysfs_heartbeat_interval@off,Timeout
+sysfs_preempt_timeout@off,Timeout
+sysfs_timeslice_duration@off,Timeout
+xe_module_load@force-load,Fail
+xe_module_load@load,Fail
+xe_module_load@many-reload,Fail
+xe_module_load@reload,Fail
+xe_module_load@reload-no-display,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt
new file mode 100644
index 000000000000..5c3ef4486b9d
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-flakes.txt
@@ -0,0 +1,13 @@
+# Board Name: acer-cb317-1h-c3z6-dedede
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12475
+# Failure Rate: 100
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.12.0-rc1
+kms_flip@flip-vs-panning-interruptible
+
+# Board Name: acer-cb317-1h-c3z6-dedede
+# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12476
+# Failure Rate: 100
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.12.0-rc1
+kms_universal_plane@cursor-fb-leak
diff --git a/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
new file mode 100644
index 000000000000..1a3d87c0ca6e
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/i915-jsl-skips.txt
@@ -0,0 +1,20 @@
+# Suspend to RAM seems to be broken on this machine
+.*suspend.*
+
+# Skip driver specific tests
+^amdgpu.*
+^msm.*
+nouveau_.*
+^panfrost.*
+^v3d.*
+^vc4.*
+^vmwgfx*
+
+# GEM tests takes ~1000 hours, so skip it
+gem_.*
+
+# trap_err
+i915_pm_rc6_residency.*
+
+# Hangs the machine and timeout occurs
+i915_pm_rpm@system-hibernate*
diff --git a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
index 1de04a3308c4..d4fba4f55ec1 100644
--- a/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-kbl-fails.txt
@@ -17,12 +17,10 @@ perf@i915-ref-count,Fail
perf_pmu@busy-accuracy-50,Fail
perf_pmu@module-unload,Fail
perf_pmu@rc6,Crash
-prime_busy@after,Fail
sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
-testdisplay,Timeout
xe_module_load@force-load,Fail
xe_module_load@load,Fail
xe_module_load@many-reload,Fail
diff --git a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
index e728ccc62326..461ef69ef08a 100644
--- a/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-tgl-fails.txt
@@ -1,40 +1,64 @@
+api_intel_allocator@fork-simple-stress-signal,Timeout
+api_intel_allocator@open-vm,Timeout
api_intel_allocator@simple-allocator,Timeout
+api_intel_bb@lot-of-buffers,Timeout
api_intel_bb@object-reloc-keep-cache,Timeout
api_intel_bb@offset-control,Timeout
-core_auth@getclient-simple,Timeout
-core_hotunplug@hotunbind-rebind,Timeout
+api_intel_bb@render-ccs,Timeout
+api_intel_bb@reset-bb,Timeout
+core_auth@basic-auth,Timeout
+core_hotunplug@hotrebind,Timeout
+core_setmaster@master-drop-set-user,Fail
debugfs_test@read_all_entries_display_on,Timeout
-drm_read@invalid-buffer,Timeout
-drm_read@short-buffer-nonblock,Timeout
+drm_read@empty-block,Timeout
+dumb_buffer@create-clear,Timeout
+dumb_buffer@invalid-bpp,Timeout
gen3_render_tiledx_blits,Timeout
gen7_exec_parse@basic-allocation,Timeout
-gen7_exec_parse@batch-without-end,Timeout
gen9_exec_parse@batch-invalid-length,Timeout
gen9_exec_parse@bb-secure,Timeout
gen9_exec_parse@secure-batches,Timeout
gen9_exec_parse@shadow-peek,Timeout
gen9_exec_parse@unaligned-jump,Timeout
+i915_getparams_basic@basic-subslice-total,Timeout
+i915_hangman@gt-engine-hang,Timeout
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
+i915_pciid,Timeout
+i915_pipe_stress@stress-xrgb8888-ytiled,Timeout
+i915_pm_rpm@gem-execbuf-stress,Timeout
+i915_pm_rps@engine-order,Timeout
+i915_pm_rps@thresholds-idle-park,Timeout
i915_query@engine-info,Timeout
i915_query@query-topology-kernel-writes,Timeout
i915_query@test-query-geometry-subslices,Timeout
kms_lease@lease-uevent,Fail
kms_rotation_crc@multiplane-rotation,Fail
perf@i915-ref-count,Fail
+perf_pmu@busy,Timeout
perf_pmu@enable-race,Timeout
perf_pmu@event-wait,Timeout
+perf_pmu@faulting-read,Timeout
perf_pmu@gt-awake,Timeout
perf_pmu@interrupts,Timeout
perf_pmu@module-unload,Fail
+perf_pmu@most-busy-idle-check-all,Timeout
perf_pmu@rc6,Crash
+perf_pmu@render-node-busy-idle,Fail
+perf_pmu@semaphore-wait-idle,Timeout
+prime_busy@after,Timeout
+prime_mmap@test_aperture_limit,Timeout
prime_mmap@test_map_unmap,Timeout
prime_mmap@test_refcounting,Timeout
prime_self_import@basic-with_one_bo,Timeout
+sriov_basic@enable-vfs-autoprobe-off,Timeout
+syncobj_basic@bad-destroy,Timeout
syncobj_basic@bad-flags-fd-to-handle,Timeout
+syncobj_basic@create-signaled,Timeout
syncobj_eventfd@invalid-bad-pad,Timeout
+syncobj_eventfd@timeline-wait-before-signal,Timeout
syncobj_wait@invalid-multi-wait-unsubmitted-signaled,Timeout
syncobj_wait@invalid-signal-illegal-handle,Timeout
syncobj_wait@invalid-single-wait-all-unsubmitted,Timeout
diff --git a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
index 2adae2175501..0ce240e3aa07 100644
--- a/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/i915-whl-fails.txt
@@ -1,5 +1,5 @@
+core_setmaster@master-drop-set-shared-fd,Fail
core_setmaster@master-drop-set-user,Fail
-core_setmaster_vs_auth,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
@@ -7,7 +7,8 @@ i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
i915_pm_rpm@system-suspend-execbuf,Timeout
-kms_ccs@crc-primary-rotation-180-yf-tiled-ccs,Timeout
+i915_pm_rps@engine-order,Fail
+kms_big_fb@linear-16bpp-rotate-180,Timeout
kms_cursor_legacy@short-flip-before-cursor-atomic-transitions,Timeout
kms_dirtyfb@default-dirtyfb-ioctl,Fail
kms_dirtyfb@fbc-dirtyfb-ioctl,Fail
@@ -32,19 +33,17 @@ kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
-kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-cpu,Timeout
kms_frontbuffer_tracking@fbc-tiling-linear,Fail
+kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack-mmap-gtt,Timeout
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
-kms_plane_scaling@plane-scaler-with-clipping-clamping-rotation,Timeout
kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-5,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_pm_rpm@universal-planes,Timeout
kms_pm_rpm@universal-planes-dpms,Timeout
-kms_prop_blob@invalid-set-prop,Fail
kms_rotation_crc@primary-rotation-180,Timeout
kms_vblank@query-forked-hang,Timeout
perf@i915-ref-count,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
index a14349a1967f..8e0efc80d510 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt
@@ -1,8 +1,3 @@
-device_reset@cold-reset-bound,Fail
-device_reset@reset-bound,Fail
-device_reset@unbind-cold-reset-rebind,Fail
-device_reset@unbind-reset-rebind,Fail
-dumb_buffer@invalid-bpp,Fail
fbdev@eof,Fail
fbdev@read,Fail
kms_3d,Fail
@@ -27,10 +22,6 @@ kms_cursor_legacy@cursor-vs-flip-atomic,Fail
kms_cursor_legacy@cursor-vs-flip-legacy,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
-kms_flip@flip-vs-suspend,Fail
-kms_flip@flip-vs-suspend-interruptible,Fail
kms_lease@lease-uevent,Fail
-kms_properties@get_properties-sanity-atomic,Fail
-kms_properties@plane-properties-atomic,Fail
-kms_properties@plane-properties-legacy,Fail
kms_rmfb@close-fd,Fail
+kms_flip@flip-vs-suspend-interruptible,Fail
diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
index 8cb2cb67853d..845f852bb4a0 100644
--- a/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8183-fails.txt
@@ -1,10 +1,5 @@
core_setmaster@master-drop-set-shared-fd,Fail
-device_reset@cold-reset-bound,Fail
-device_reset@reset-bound,Fail
-device_reset@unbind-cold-reset-rebind,Fail
-device_reset@unbind-reset-rebind,Fail
dumb_buffer@create-clear,Crash
-dumb_buffer@invalid-bpp,Fail
fbdev@eof,Fail
fbdev@pan,Fail
fbdev@read,Fail
@@ -18,5 +13,4 @@ kms_color@invalid-gamma-lut-sizes,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_flip@flip-vs-suspend,Fail
kms_lease@lease-uevent,Fail
-kms_properties@plane-properties-atomic,Fail
kms_rmfb@close-fd,Fail
diff --git a/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt b/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt
index 328967d3e23d..fc3745180683 100644
--- a/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/meson-g12b-fails.txt
@@ -1,4 +1,3 @@
-dumb_buffer@invalid-bpp,Fail
kms_3d,Fail
kms_cursor_legacy@forked-bo,Fail
kms_cursor_legacy@forked-move,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
index 4ac46168eff3..066d24ee3e08 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt
@@ -1,8 +1,3 @@
-device_reset@cold-reset-bound,Fail
-device_reset@reset-bound,Fail
-device_reset@unbind-cold-reset-rebind,Fail
-device_reset@unbind-reset-rebind,Fail
-dumb_buffer@invalid-bpp,Fail
kms_3d,Fail
kms_cursor_legacy@torture-bo,Fail
kms_force_connector_basic@force-edid,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
index bd0653caf7a0..2893f98a6b97 100644
--- a/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-apq8096-fails.txt
@@ -1,7 +1,2 @@
-device_reset@cold-reset-bound,Fail
-device_reset@reset-bound,Fail
-device_reset@unbind-cold-reset-rebind,Fail
-device_reset@unbind-reset-rebind,Fail
-dumb_buffer@invalid-bpp,Fail
kms_3d,Fail
kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
index d42004cd6977..6dbc2080347d 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-kingoftown-fails.txt
@@ -1,8 +1,3 @@
-device_reset@cold-reset-bound,Fail
-device_reset@reset-bound,Fail
-device_reset@unbind-cold-reset-rebind,Fail
-device_reset@unbind-reset-rebind,Fail
-dumb_buffer@invalid-bpp,Fail
kms_color@ctm-0-25,Fail
kms_color@ctm-0-50,Fail
kms_color@ctm-0-75,Fail
@@ -11,35 +6,13 @@ kms_color@ctm-green-to-red,Fail
kms_color@ctm-negative,Fail
kms_color@ctm-red-to-blue,Fail
kms_color@ctm-signed,Fail
-kms_content_protection@atomic,Crash
-kms_content_protection@atomic-dpms,Crash
-kms_content_protection@content-type-change,Crash
-kms_content_protection@lic-type-0,Crash
-kms_content_protection@lic-type-1,Crash
-kms_content_protection@srm,Crash
-kms_content_protection@type1,Crash
-kms_content_protection@uevent,Crash
-kms_cursor_legacy@2x-cursor-vs-flip-atomic,Fail
-kms_cursor_legacy@2x-cursor-vs-flip-legacy,Fail
-kms_cursor_legacy@2x-flip-vs-cursor-atomic,Fail
-kms_cursor_legacy@2x-flip-vs-cursor-legacy,Fail
-kms_cursor_legacy@2x-long-cursor-vs-flip-atomic,Fail
-kms_cursor_legacy@2x-long-cursor-vs-flip-legacy,Fail
-kms_cursor_legacy@2x-long-flip-vs-cursor-atomic,Fail
-kms_cursor_legacy@2x-long-flip-vs-cursor-legacy,Fail
kms_cursor_legacy@cursor-vs-flip-toggle,Fail
kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
-kms_display_modes@extended-mode-basic,Fail
-kms_flip@2x-flip-vs-modeset-vs-hang,Fail
-kms_flip@2x-flip-vs-panning-vs-hang,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_lease@lease-uevent,Fail
-kms_multipipe_modeset@basic-max-pipe-crc-check,Fail
kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
kms_plane_alpha_blend@alpha-7efc,Fail
kms_plane_alpha_blend@coverage-7efc,Fail
kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_plane_lowres@tiling-none,Fail
kms_rmfb@close-fd,Fail
-kms_vblank@ts-continuation-dpms-rpm,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
index d42004cd6977..6dbc2080347d 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sc7180-trogdor-lazor-limozeen-fails.txt
@@ -1,8 +1,3 @@
-device_reset@cold-reset-bound,Fail
-device_reset@reset-bound,Fail
-device_reset@unbind-cold-reset-rebind,Fail
-device_reset@unbind-reset-rebind,Fail
-dumb_buffer@invalid-bpp,Fail
kms_color@ctm-0-25,Fail
kms_color@ctm-0-50,Fail
kms_color@ctm-0-75,Fail
@@ -11,35 +6,13 @@ kms_color@ctm-green-to-red,Fail
kms_color@ctm-negative,Fail
kms_color@ctm-red-to-blue,Fail
kms_color@ctm-signed,Fail
-kms_content_protection@atomic,Crash
-kms_content_protection@atomic-dpms,Crash
-kms_content_protection@content-type-change,Crash
-kms_content_protection@lic-type-0,Crash
-kms_content_protection@lic-type-1,Crash
-kms_content_protection@srm,Crash
-kms_content_protection@type1,Crash
-kms_content_protection@uevent,Crash
-kms_cursor_legacy@2x-cursor-vs-flip-atomic,Fail
-kms_cursor_legacy@2x-cursor-vs-flip-legacy,Fail
-kms_cursor_legacy@2x-flip-vs-cursor-atomic,Fail
-kms_cursor_legacy@2x-flip-vs-cursor-legacy,Fail
-kms_cursor_legacy@2x-long-cursor-vs-flip-atomic,Fail
-kms_cursor_legacy@2x-long-cursor-vs-flip-legacy,Fail
-kms_cursor_legacy@2x-long-flip-vs-cursor-atomic,Fail
-kms_cursor_legacy@2x-long-flip-vs-cursor-legacy,Fail
kms_cursor_legacy@cursor-vs-flip-toggle,Fail
kms_cursor_legacy@cursor-vs-flip-varying-size,Fail
-kms_display_modes@extended-mode-basic,Fail
-kms_flip@2x-flip-vs-modeset-vs-hang,Fail
-kms_flip@2x-flip-vs-panning-vs-hang,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
kms_flip@flip-vs-panning-vs-hang,Fail
kms_lease@lease-uevent,Fail
-kms_multipipe_modeset@basic-max-pipe-crc-check,Fail
kms_pipe_crc_basic@compare-crc-sanitycheck-nv12,Fail
kms_plane_alpha_blend@alpha-7efc,Fail
kms_plane_alpha_blend@coverage-7efc,Fail
kms_plane_alpha_blend@coverage-vs-premult-vs-constant,Fail
-kms_plane_lowres@tiling-none,Fail
kms_rmfb@close-fd,Fail
-kms_vblank@ts-continuation-dpms-rpm,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
index 770a1c685fde..fa8c7e663858 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-fails.txt
@@ -1,8 +1,4 @@
-device_reset@cold-reset-bound,Fail
-device_reset@reset-bound,Fail
-device_reset@unbind-cold-reset-rebind,Fail
-device_reset@unbind-reset-rebind,Fail
-dumb_buffer@invalid-bpp,Fail
+drm_read@invalid-buffer,Fail
kms_color@ctm-0-25,Fail
kms_color@ctm-0-50,Fail
kms_color@ctm-0-75,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
index 2aa96b1241c3..38ec0305c1f4 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-flakes.txt
@@ -116,3 +116,17 @@ kms_cursor_legacy@flip-vs-cursor-toggle
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
msm/msm_shrink@copy-mmap-oom-8
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/64bc4bcf-de51-4e60-a9f7-1295a1e64c65@collabora.com/T/#t
+# Failure Rate: 50
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.11.0-rc2
+kms_lease@page-flip-implicit-plane
+
+# Board Name: sdm845-cheza-r3
+# Bug Report: https://lore.kernel.org/linux-arm-msm/64bc4bcf-de51-4e60-a9f7-1295a1e64c65@collabora.com/T/#t
+# Failure Rate: 50
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.11.0-rc5
+kms_flip@flip-vs-expired-vblank
diff --git a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
index 90651048ab61..94783cafc21a 100644
--- a/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/msm-sdm845-skips.txt
@@ -25,3 +25,8 @@ core_hotunplug.*
# Whole machine hangs
kms_cursor_crc.*
+
+# IGT test crash
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.11.0-rc2
+kms_content_protection@uevent
diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt
new file mode 100644
index 000000000000..4892c0c70a6d
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-fails.txt
@@ -0,0 +1,15 @@
+kms_3d,Fail
+kms_cursor_legacy@forked-bo,Fail
+kms_cursor_legacy@forked-move,Fail
+kms_cursor_legacy@single-bo,Fail
+kms_cursor_legacy@single-move,Fail
+kms_cursor_legacy@torture-bo,Fail
+kms_cursor_legacy@torture-move,Fail
+kms_hdmi_inject@inject-4k,Fail
+kms_lease@lease-uevent,Fail
+kms_plane_alpha_blend@alpha-7efc,Fail
+kms_plane_alpha_blend@alpha-basic,Fail
+kms_plane_alpha_blend@alpha-opaque-fb,Fail
+kms_plane_alpha_blend@alpha-transparent-fb,Fail
+kms_plane_alpha_blend@constant-alpha-max,Fail
+msm/msm_recovery@gpu-fault-parallel,Fail
diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-flakes.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-flakes.txt
new file mode 100644
index 000000000000..c1859d9b165f
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-flakes.txt
@@ -0,0 +1,6 @@
+# Board Name: sm8350-hdk
+# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/65
+# Failure Rate: 100
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.12.0-rc1
+msm/msm_recovery@gpu-fault
diff --git a/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
new file mode 100644
index 000000000000..329770c520d9
--- /dev/null
+++ b/drivers/gpu/drm/ci/xfails/msm-sm8350-hdk-skips.txt
@@ -0,0 +1,211 @@
+# Skip driver specific tests
+^amdgpu.*
+nouveau_.*
+^panfrost.*
+^v3d.*
+^vc4.*
+^vmwgfx*
+
+# Skip intel specific tests
+gem_.*
+i915_.*
+tools_test.*
+
+# Currently fails and causes coverage loss for other tests
+# since core_getversion also fails.
+core_hotunplug.*
+
+# Kernel panic
+msm/msm_mapping@ring
+# DEBUG - Begin test msm/msm_mapping@ring
+# [ 200.874157] [IGT] msm_mapping: executing
+# [ 200.880236] [IGT] msm_mapping: starting subtest ring
+# [ 200.895243] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=PERMISSION source=CP (0,0,0,1)
+# [ 200.906885] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 200.917625] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 200.928353] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 200.939084] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 200.949815] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 200.950227] platform 3d6a000.gmu: [drm:a6xx_hfi_send_msg.constprop.0] *ERROR* Message HFI_H2F_MSG_GX_BW_PERF_VOTE id 25 timed out waiting for response
+# [ 200.960467] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 200.960500] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 200.995966] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 201.006702] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 204.213387] platform 3d6a000.gmu: GMU watchdog expired
+# [ 205.909103] adreno_fault_handler: 224274 callbacks suppressed
+# [ 205.909108] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 205.925794] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 205.936529] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 205.947263] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 205.957997] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 205.968731] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 205.979465] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 205.990199] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 206.000932] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 206.011666] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 210.925090] adreno_fault_handler: 224511 callbacks suppressed
+# [ 210.925096] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 210.941781] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 210.952517] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 210.963250] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 210.973985] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 210.984719] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 210.995452] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 211.006186] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 211.016921] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 211.027655] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 215.937100] adreno_fault_handler: 223760 callbacks suppressed
+# [ 215.937106] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 215.953824] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 215.964573] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 215.975321] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 215.986067] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 215.996815] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 216.007563] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 216.018310] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 216.029057] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 216.039805] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 220.945182] adreno_fault_handler: 222822 callbacks suppressed
+# [ 220.945188] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 220.961897] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 220.972645] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 220.983392] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 220.994140] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 221.004889] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 221.015636] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 221.026383] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 221.037130] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 221.047879] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 225.953179] adreno_fault_handler: 223373 callbacks suppressed
+# [ 225.953184] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 225.969883] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 225.980617] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 225.991350] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 226.002084] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 226.012818] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 226.023551] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 226.034285] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 226.045019] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 226.055753] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
+# [ 228.001087] rcu: INFO: rcu_preempt detected stalls on CPUs/tasks:
+# [ 228.007412] rcu: 0-....: (524 ticks this GP) idle=4ffc/1/0x4000000000000000 softirq=9367/9368 fqs=29
+# [ 228.017097] rcu: (detected by 1, t=6504 jiffies, g=29837, q=6 ncpus=8)
+# [ 228.023959] Sending NMI from CPU 1 to CPUs 0:
+# [ 228.161164] watchdog: BUG: soft lockup - CPU#0 stuck for 26s! [gpu-worker:150]
+# [ 228.173169] Modules linked in:
+# [ 228.176361] irq event stamp: 2809595
+# [ 228.180083] hardirqs last enabled at (2809594): [<ffffd3bc52cb91ac>] exit_to_kernel_mode+0x38/0x130
+# [ 228.189547] hardirqs last disabled at (2809595): [<ffffd3bc52cb92c8>] el1_interrupt+0x24/0x64
+# [ 228.198377] softirqs last enabled at (1669060): [<ffffd3bc51936f98>] handle_softirqs+0x4a4/0x4bc
+# [ 228.207565] softirqs last disabled at (1669063): [<ffffd3bc518905a4>] __do_softirq+0x14/0x20
+# [ 228.216316] CPU: 0 UID: 0 PID: 150 Comm: gpu-worker Not tainted 6.12.0-rc1-g685d530dc83a #1
+# [ 228.224966] Hardware name: Qualcomm Technologies, Inc. SM8350 HDK (DT)
+# [ 228.231730] pstate: 00400005 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+# [ 228.238948] pc : tcp_fastretrans_alert+0x0/0x884
+# [ 228.243751] lr : tcp_ack+0x9d4/0x1238
+# [ 228.247562] sp : ffff8000800036d0
+# [ 228.251011] x29: ffff8000800036d0 x28: 000000000000000c x27: 0000000000000001
+# [ 228.258421] x26: ffff704683cd8000 x25: 0000000000000403 x24: ffff70468b7e7c00
+# [ 228.265829] x23: 0000000000000000 x22: 0000000000000004 x21: 000000000000140f
+# [ 228.273237] x20: 00000000f1de79f7 x19: 00000000f1de7a5f x18: 0000000000000001
+# [ 228.280644] x17: 00000000302d6762 x16: 632d6b64682d3035 x15: ffff704683c39000
+# [ 228.288051] x14: 00000000000e2000 x13: ffff704683df6000 x12: 0000000000000000
+# [ 228.295458] x11: 00000000000000a0 x10: 0000000000000000 x9 : ffffd3bc551a9a20
+# [ 228.302865] x8 : ffff800080003640 x7 : 0000000000040faa x6 : 00000000ffff9634
+# [ 228.310271] x5 : 00000000000005a8 x4 : ffff800080003788 x3 : ffff80008000377c
+# [ 228.317679] x2 : 0000000000000000 x1 : 00000000f1de79f7 x0 : ffff704683cd8000
+# [ 228.325087] Call trace:
+# [ 228.327640] tcp_fastretrans_alert+0x0/0x884
+# [ 228.332082] tcp_rcv_established+0x7c4/0x8bc
+# [ 228.336523] tcp_v4_do_rcv+0x244/0x31c
+# [ 228.340429] tcp_v4_rcv+0xcc4/0x1084
+# [ 228.344155] ip_protocol_deliver_rcu+0x64/0x218
+# [ 228.348862] ip_local_deliver_finish+0xb8/0x1ac
+# [ 228.353566] ip_local_deliver+0x84/0x254
+# [ 228.357651] ip_sublist_rcv_finish+0x84/0xb8
+# [ 228.362092] ip_sublist_rcv+0x11c/0x2f0
+# [ 228.366081] ip_list_rcv+0xfc/0x190
+# [ 228.369711] __netif_receive_skb_list_core+0x174/0x208
+# [ 228.375050] netif_receive_skb_list_internal+0x204/0x3ac
+# [ 228.380564] napi_complete_done+0x64/0x1d0
+# [ 228.384826] lan78xx_poll+0x71c/0x9cc
+# [ 228.388638] __napi_poll.constprop.0+0x3c/0x254
+# [ 228.393341] net_rx_action+0x164/0x2d4
+# [ 228.397244] handle_softirqs+0x128/0x4bc
+# [ 228.401329] __do_softirq+0x14/0x20
+# [ 228.404958] ____do_softirq+0x10/0x1c
+# [ 228.408769] call_on_irq_stack+0x24/0x4c
+# [ 228.412854] do_softirq_own_stack+0x1c/0x28
+# [ 228.417199] __irq_exit_rcu+0x124/0x164
+# [ 228.421188] irq_exit_rcu+0x10/0x38
+# [ 228.424819] el1_interrupt+0x38/0x64
+# [ 228.428546] el1h_64_irq_handler+0x18/0x24
+# [ 228.432807] el1h_64_irq+0x64/0x68
+# [ 228.436354] lock_acquire+0x214/0x32c
+# [ 228.440166] __mutex_lock+0x98/0x3d0
+# [ 228.443893] mutex_lock_nested+0x24/0x30
+# [ 228.447978] fault_worker+0x58/0x184
+# [ 228.451704] kthread_worker_fn+0xf4/0x320
+# [ 228.455873] kthread+0x114/0x118
+# [ 228.459243] ret_from_fork+0x10/0x20
+# [ 228.462970] Kernel panic - not syncing: softlockup: hung tasks
+# [ 228.469018] CPU: 0 UID: 0 PID: 150 Comm: gpu-worker Tainted: G L 6.12.0-rc1-g685d530dc83a #1
+# [ 228.479190] Tainted: [L]=SOFTLOCKUP
+# [ 228.482815] Hardware name: Qualcomm Technologies, Inc. SM8350 HDK (DT)
+# [ 228.489574] Call trace:
+# [ 228.492125] dump_backtrace+0x98/0xf0
+# [ 228.495931] show_stack+0x18/0x24
+# [ 228.499380] dump_stack_lvl+0x38/0xd0
+# [ 228.503189] dump_stack+0x18/0x24
+# [ 228.506639] panic+0x3bc/0x41c
+# [ 228.509826] watchdog_timer_fn+0x254/0x2e4
+# [ 228.514087] __hrtimer_run_queues+0x3b0/0x40c
+# [ 228.518612] hrtimer_interrupt+0xe8/0x248
+# [ 228.522777] arch_timer_handler_virt+0x2c/0x44
+# [ 228.527399] handle_percpu_devid_irq+0xa8/0x2c4
+# [ 228.532103] generic_handle_domain_irq+0x2c/0x44
+# [ 228.536902] gic_handle_irq+0x4c/0x11c
+# [ 228.540802] do_interrupt_handler+0x50/0x84
+# [ 228.545146] el1_interrupt+0x34/0x64
+# [ 228.548870] el1h_64_irq_handler+0x18/0x24
+# [ 228.553128] el1h_64_irq+0x64/0x68
+# [ 228.556672] tcp_fastretrans_alert+0x0/0x884
+# [ 228.561110] tcp_rcv_established+0x7c4/0x8bc
+# [ 228.565548] tcp_v4_do_rcv+0x244/0x31c
+# [ 228.569449] tcp_v4_rcv+0xcc4/0x1084
+# [ 228.573171] ip_protocol_deliver_rcu+0x64/0x218
+# [ 228.577873] ip_local_deliver_finish+0xb8/0x1ac
+# [ 228.582574] ip_local_deliver+0x84/0x254
+# [ 228.586655] ip_sublist_rcv_finish+0x84/0xb8
+# [ 228.591092] ip_sublist_rcv+0x11c/0x2f0
+# [ 228.595079] ip_list_rcv+0xfc/0x190
+# [ 228.598706] __netif_receive_skb_list_core+0x174/0x208
+# [ 228.604039] netif_receive_skb_list_internal+0x204/0x3ac
+# [ 228.609549] napi_complete_done+0x64/0x1d0
+# [ 228.613808] lan78xx_poll+0x71c/0x9cc
+# [ 228.617614] __napi_poll.constprop.0+0x3c/0x254
+# [ 228.622314] net_rx_action+0x164/0x2d4
+# [ 228.626214] handle_softirqs+0x128/0x4bc
+# [ 228.630297] __do_softirq+0x14/0x20
+# [ 228.633923] ____do_softirq+0x10/0x1c
+# [ 228.637729] call_on_irq_stack+0x24/0x4c
+# [ 228.641811] do_softirq_own_stack+0x1c/0x28
+# [ 228.646152] __irq_exit_rcu+0x124/0x164
+# [ 228.650139] irq_exit_rcu+0x10/0x38
+# [ 228.653768] el1_interrupt+0x38/0x64
+# [ 228.657491] el1h_64_irq_handler+0x18/0x24
+# [ 228.661750] el1h_64_irq+0x64/0x68
+# [ 228.665293] lock_acquire+0x214/0x32c
+# [ 228.669098] __mutex_lock+0x98/0x3d0
+# [ 228.672821] mutex_lock_nested+0x24/0x30
+# [ 228.676903] fault_worker+0x58/0x184
+# [ 228.680626] kthread_worker_fn+0xf4/0x320
+# [ 228.684790] kthread+0x114/0x118
+# [ 228.688156] ret_from_fork+0x10/0x20
+# [ 228.691882] SMP: stopping secondary CPUs
+# [ 229.736843] SMP: failed to stop secondary CPUs 1,4
+# [ 229.741827] Kernel Offset: 0x53bbd1880000 from 0xffff800080000000
+# [ 229.748159] PHYS_OFFSET: 0xfff08fba80000000
+# [ 229.752499] CPU features: 0x18,00000017,00200928,4200720b
+# [ 229.758095] Memory Limit: none
+# [ 229.761291] ---[ end Kernel panic - not syncing: softlockup: hung tasks ]---
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt
index fe8ce2ce33e6..abd1ccb71561 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-g12b-fails.txt
@@ -1 +1,2 @@
panfrost/panfrost_prime@gem-prime-import,Fail
+panfrost/panfrost_submit@pan-submit-error-bad-requirements,Fail
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt
index fe8ce2ce33e6..abd1ccb71561 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-mt8183-fails.txt
@@ -1 +1,2 @@
panfrost/panfrost_prime@gem-prime-import,Fail
+panfrost/panfrost_submit@pan-submit-error-bad-requirements,Fail
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt
index 4a2f4b6b14c1..8330b934602a 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3288-fails.txt
@@ -1 +1,2 @@
panfrost/panfrost_prime@gem-prime-import,Crash
+panfrost/panfrost_submit@pan-submit-error-bad-requirements,Crash
diff --git a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt
index fe8ce2ce33e6..abd1ccb71561 100644
--- a/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/panfrost-rk3399-fails.txt
@@ -1 +1,2 @@
panfrost/panfrost_prime@gem-prime-import,Fail
+panfrost/panfrost_submit@pan-submit-error-bad-requirements,Fail
diff --git a/drivers/gpu/drm/ci/xfails/requirements.txt b/drivers/gpu/drm/ci/xfails/requirements.txt
deleted file mode 100644
index 5e6d48d98e4e..000000000000
--- a/drivers/gpu/drm/ci/xfails/requirements.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-git+https://gitlab.freedesktop.org/gfx-ci/ci-collate@09e7142715c16f54344ddf97013331ba063b162b
-termcolor==2.3.0
-
-# ci-collate dependencies
-certifi==2023.7.22
-charset-normalizer==3.2.0
-idna==3.4
-pip==23.3
-python-gitlab==3.15.0
-requests==2.31.0
-requests-toolbelt==1.0.0
-ruamel.yaml==0.17.32
-ruamel.yaml.clib==0.2.7
-setuptools==70.0.0
-tenacity==8.2.3
-urllib3==2.0.7
-wheel==0.41.1
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
index ea7b2ceb95b9..90282dfa19f4 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-fails.txt
@@ -1,18 +1,24 @@
-core_setmaster@master-drop-set-root,Crash
core_setmaster@master-drop-set-user,Crash
-core_setmaster_vs_auth,Crash
-device_reset@cold-reset-bound,Crash
-device_reset@reset-bound,Crash
-device_reset@unbind-cold-reset-rebind,Crash
-device_reset@unbind-reset-rebind,Crash
dumb_buffer@create-clear,Crash
-dumb_buffer@invalid-bpp,Crash
fbdev@pan,Crash
+kms_bw@linear-tiling-2-displays-1920x1080p,Fail
kms_cursor_crc@cursor-onscreen-32x10,Crash
kms_cursor_crc@cursor-onscreen-32x32,Crash
+kms_cursor_crc@cursor-onscreen-64x64,Crash
kms_cursor_crc@cursor-random-32x10,Crash
+kms_cursor_crc@cursor-sliding-32x10,Crash
kms_cursor_crc@cursor-sliding-32x32,Crash
+kms_cursor_crc@cursor-sliding-64x21,Crash
kms_cursor_legacy@basic-flip-before-cursor-atomic,Fail
kms_cursor_legacy@cursor-vs-flip-legacy,Fail
+kms_cursor_legacy@flip-vs-cursor-crc-atomic,Crash
+kms_flip@flip-vs-panning-vs-hang,Crash
+kms_invalid_mode@int-max-clock,Crash
+kms_lease@invalid-create-leases,Fail
+kms_pipe_crc_basic@read-crc-frame-sequence,Crash
+kms_plane@pixel-format,Crash
+kms_plane@pixel-format-source-clamping,Crash
kms_prop_blob@invalid-set-prop,Crash
-kms_prop_blob@invalid-set-prop-any,Crash
+kms_properties@get_properties-sanity-atomic,Crash
+kms_properties@get_properties-sanity-non-atomic,Crash
+kms_rmfb@close-fd,Crash
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt
index 7ede273aab20..cd0b27d8b636 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3288-flakes.txt
@@ -4,3 +4,31 @@
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
kms_cursor_legacy@flip-vs-cursor-atomic
+
+# Board Name: rk3288-veyron-jaq
+# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t
+# Failure Rate: 100
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.11.0-rc2
+kms_cursor_crc@cursor-offscreen-32x10
+
+# Board Name: rk3288-veyron-jaq
+# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t
+# Failure Rate: 100
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.11.0-rc2
+kms_cursor_edge_walk@64x64-left-edge
+
+# Board Name: rk3288-veyron-jaq
+# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t
+# Failure Rate: 100
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.11.0-rc2
+kms_flip@plain-flip-ts-check
+
+# Board Name: rk3288-veyron-jaq
+# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t
+# Failure Rate: 100
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.11.0-rc2
+kms_cursor_crc@cursor-alpha-opaque
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
index 9309ff15e23a..83a38853b4af 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-fails.txt
@@ -1,9 +1,4 @@
-device_reset@cold-reset-bound,Fail
-device_reset@reset-bound,Fail
-device_reset@unbind-cold-reset-rebind,Fail
-device_reset@unbind-reset-rebind,Fail
dumb_buffer@create-clear,Crash
-dumb_buffer@invalid-bpp,Fail
kms_atomic_transition@modeset-transition,Fail
kms_atomic_transition@modeset-transition-fencing,Fail
kms_atomic_transition@plane-toggle-modeset-transition,Fail
@@ -46,7 +41,6 @@ kms_cursor_legacy@flip-vs-cursor-legacy,Fail
kms_cursor_legacy@long-nonblocking-modeset-vs-cursor-atomic,Fail
kms_flip@basic-flip-vs-wf_vblank,Fail
kms_flip@blocking-wf_vblank,Fail
-kms_flip@dpms-vs-vblank-race,Fail
kms_flip@flip-vs-absolute-wf_vblank,Fail
kms_flip@flip-vs-blocking-wf-vblank,Fail
kms_flip@flip-vs-modeset-vs-hang,Fail
@@ -59,7 +53,6 @@ kms_flip@plain-flip-fb-recreate,Fail
kms_flip@plain-flip-fb-recreate-interruptible,Fail
kms_flip@plain-flip-ts-check,Fail
kms_flip@plain-flip-ts-check-interruptible,Fail
-kms_flip@wf_vblank-ts-check,Fail
kms_flip@wf_vblank-ts-check-interruptible,Fail
kms_invalid_mode@int-max-clock,Fail
kms_lease@lease-uevent,Fail
diff --git a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
index d98f6a17343c..56f7d4f1ed15 100644
--- a/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
+++ b/drivers/gpu/drm/ci/xfails/rockchip-rk3399-flakes.txt
@@ -46,3 +46,31 @@ kms_setmode@basic
# IGT Version: 1.28-gf13702b8e
# Linux Version: 6.10.0-rc5
kms_bw@connected-linear-tiling-1-displays-2560x1440p
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t
+# Failure Rate: 50
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.11.0-rc2
+kms_flip@wf_vblank-ts-check
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t
+# Failure Rate: 50
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.11.0-rc5
+kms_flip@dpms-vs-vblank-race
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t
+# Failure Rate: 50
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.11.0-rc5
+kms_bw@linear-tiling-2-displays-2160x1440p
+
+# Board Name: rk3399-gru-kevin
+# Bug Report: https://lore.kernel.org/linux-rockchip/7505ac00-29ef-4ad9-8904-94b4c024c02b@collabora.com/T/#t
+# Failure Rate: 50
+# IGT Version: 1.28-ga73311079
+# Linux Version: 6.11.0-rc5
+kms_flip@flip-vs-expired-vblank
diff --git a/drivers/gpu/drm/ci/xfails/update-xfails.py b/drivers/gpu/drm/ci/xfails/update-xfails.py
deleted file mode 100755
index a446e98d72a1..000000000000
--- a/drivers/gpu/drm/ci/xfails/update-xfails.py
+++ /dev/null
@@ -1,204 +0,0 @@
-#!/usr/bin/env python3
-
-import argparse
-from collections import defaultdict
-import difflib
-import os
-import re
-from glcollate import Collate
-from termcolor import colored
-from urllib.parse import urlparse
-
-
-def get_canonical_name(job_name):
- return re.split(r" \d+/\d+", job_name)[0]
-
-
-def get_xfails_file_path(job_name, suffix):
- canonical_name = get_canonical_name(job_name)
- name = canonical_name.replace(":", "-")
- script_dir = os.path.dirname(os.path.abspath(__file__))
- return os.path.join(script_dir, f"{name}-{suffix}.txt")
-
-
-def get_unit_test_name_and_results(unit_test):
- if "Artifact results/failures.csv not found" in unit_test or '' == unit_test:
- return None, None
- unit_test_name, unit_test_result = unit_test.strip().split(",")
- return unit_test_name, unit_test_result
-
-
-def read_file(file_path):
- try:
- with open(file_path, "r") as file:
- f = file.readlines()
- if len(f):
- f[-1] = f[-1].strip() + "\n"
- return f
- except FileNotFoundError:
- return []
-
-
-def save_file(content, file_path):
- # delete file is content is empty
- if not content or not any(content):
- if os.path.exists(file_path):
- os.remove(file_path)
- return
-
- with open(file_path, "w") as file:
- file.writelines(content)
-
-
-def is_test_present_on_file(file_content, unit_test_name):
- return any(unit_test_name in line for line in file_content)
-
-
-def is_unit_test_present_in_other_jobs(unit_test, job_ids):
- return all(unit_test in job_ids[job_id] for job_id in job_ids)
-
-
-def remove_unit_test_if_present(lines, unit_test_name):
- if not is_test_present_on_file(lines, unit_test_name):
- return
- lines[:] = [line for line in lines if unit_test_name not in line]
-
-
-def add_unit_test_if_not_present(lines, unit_test_name, file_name):
- # core_getversion is mandatory
- if "core_getversion" in unit_test_name:
- print("WARNING: core_getversion should pass, not adding it to", os.path.basename(file_name))
- elif all(unit_test_name not in line for line in lines):
- lines.append(unit_test_name + "\n")
-
-
-def update_unit_test_result_in_fails_txt(fails_txt, unit_test):
- unit_test_name, unit_test_result = get_unit_test_name_and_results(unit_test)
- for i, line in enumerate(fails_txt):
- if unit_test_name in line:
- _, current_result = get_unit_test_name_and_results(line)
- fails_txt[i] = unit_test + "\n"
- return
-
-
-def add_unit_test_or_update_result_to_fails_if_present(fails_txt, unit_test, fails_txt_path):
- unit_test_name, _ = get_unit_test_name_and_results(unit_test)
- if not is_test_present_on_file(fails_txt, unit_test_name):
- add_unit_test_if_not_present(fails_txt, unit_test, fails_txt_path)
- # if it is present but not with the same result
- elif not is_test_present_on_file(fails_txt, unit_test):
- update_unit_test_result_in_fails_txt(fails_txt, unit_test)
-
-
-def split_unit_test_from_collate(xfails):
- for job_name in xfails.keys():
- for job_id in xfails[job_name].copy().keys():
- if "not found" in xfails[job_name][job_id].content_as_str:
- del xfails[job_name][job_id]
- continue
- xfails[job_name][job_id] = xfails[job_name][job_id].content_as_str.splitlines()
-
-
-def get_xfails_from_pipeline_url(pipeline_url):
- parsed_url = urlparse(pipeline_url)
- path_components = parsed_url.path.strip("/").split("/")
-
- namespace = path_components[0]
- project = path_components[1]
- pipeline_id = path_components[-1]
-
- print("Collating from:", namespace, project, pipeline_id)
- xfails = (
- Collate(namespace=namespace, project=project)
- .from_pipeline(pipeline_id)
- .get_artifact("results/failures.csv")
- )
-
- split_unit_test_from_collate(xfails)
- return xfails
-
-
-def get_xfails_from_pipeline_urls(pipelines_urls):
- xfails = defaultdict(dict)
-
- for url in pipelines_urls:
- new_xfails = get_xfails_from_pipeline_url(url)
- for key in new_xfails:
- xfails[key].update(new_xfails[key])
-
- return xfails
-
-
-def print_diff(old_content, new_content, file_name):
- diff = difflib.unified_diff(old_content, new_content, lineterm="", fromfile=file_name, tofile=file_name)
- diff = [colored(line, "green") if line.startswith("+") else
- colored(line, "red") if line.startswith("-") else line for line in diff]
- print("\n".join(diff[:3]))
- print("".join(diff[3:]))
-
-
-def main(pipelines_urls, only_flakes):
- xfails = get_xfails_from_pipeline_urls(pipelines_urls)
-
- for job_name in xfails.keys():
- fails_txt_path = get_xfails_file_path(job_name, "fails")
- flakes_txt_path = get_xfails_file_path(job_name, "flakes")
-
- fails_txt = read_file(fails_txt_path)
- flakes_txt = read_file(flakes_txt_path)
-
- fails_txt_original = fails_txt.copy()
- flakes_txt_original = flakes_txt.copy()
-
- for job_id in xfails[job_name].keys():
- for unit_test in xfails[job_name][job_id]:
- unit_test_name, unit_test_result = get_unit_test_name_and_results(unit_test)
-
- if not unit_test_name:
- continue
-
- if only_flakes:
- remove_unit_test_if_present(fails_txt, unit_test_name)
- add_unit_test_if_not_present(flakes_txt, unit_test_name, flakes_txt_path)
- continue
-
- # drop it from flakes if it is present to analyze it again
- remove_unit_test_if_present(flakes_txt, unit_test_name)
-
- if unit_test_result == "UnexpectedPass":
- remove_unit_test_if_present(fails_txt, unit_test_name)
- # flake result
- if not is_unit_test_present_in_other_jobs(unit_test, xfails[job_name]):
- add_unit_test_if_not_present(flakes_txt, unit_test_name, flakes_txt_path)
- continue
-
- # flake result
- if not is_unit_test_present_in_other_jobs(unit_test, xfails[job_name]):
- remove_unit_test_if_present(fails_txt, unit_test_name)
- add_unit_test_if_not_present(flakes_txt, unit_test_name, flakes_txt_path)
- continue
-
- # consistent result
- add_unit_test_or_update_result_to_fails_if_present(fails_txt, unit_test,
- fails_txt_path)
-
- fails_txt.sort()
- flakes_txt.sort()
-
- if fails_txt != fails_txt_original:
- save_file(fails_txt, fails_txt_path)
- print_diff(fails_txt_original, fails_txt, os.path.basename(fails_txt_path))
- if flakes_txt != flakes_txt_original:
- save_file(flakes_txt, flakes_txt_path)
- print_diff(flakes_txt_original, flakes_txt, os.path.basename(flakes_txt_path))
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(description="Update xfails from a given pipeline.")
- parser.add_argument("pipeline_urls", nargs="+", type=str, help="URLs to the pipelines to analyze the failures.")
- parser.add_argument("--only-flakes", action="store_true", help="Treat every detected failure as a flake, edit *-flakes.txt only.")
-
- args = parser.parse_args()
-
- main(args.pipeline_urls, args.only_flakes)
- print("Done.")
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
index 5408110f4c60..71c02104a683 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-fails.txt
@@ -1,24 +1,3 @@
-core_hotunplug@hotrebind,Fail
-core_hotunplug@hotrebind-lateclose,Fail
-core_hotunplug@hotreplug,Fail
-core_hotunplug@hotreplug-lateclose,Fail
-core_hotunplug@hotunbind-rebind,Fail
-core_hotunplug@hotunplug-rescan,Fail
-core_hotunplug@unbind-rebind,Fail
-core_hotunplug@unplug-rescan,Fail
-device_reset@cold-reset-bound,Fail
-device_reset@reset-bound,Fail
-device_reset@unbind-cold-reset-rebind,Fail
-device_reset@unbind-reset-rebind,Fail
-dumb_buffer@invalid-bpp,Fail
-kms_content_protection@atomic,Crash
-kms_content_protection@atomic-dpms,Crash
-kms_content_protection@content-type-change,Crash
-kms_content_protection@lic-type-0,Crash
-kms_content_protection@lic-type-1,Crash
-kms_content_protection@srm,Crash
-kms_content_protection@type1,Crash
-kms_content_protection@uevent,Crash
kms_cursor_crc@cursor-rapid-movement-128x128,Fail
kms_cursor_crc@cursor-rapid-movement-128x42,Fail
kms_cursor_crc@cursor-rapid-movement-256x256,Fail
diff --git a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
index 5ccc771fbb36..b3d16e82e9a2 100644
--- a/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
+++ b/drivers/gpu/drm/ci/xfails/vkms-none-skips.txt
@@ -205,6 +205,59 @@ kms_cursor_edge_walk@128x128-right-edge
# R10: ffffa2c181790000 R11: 0000000000000000 R12: ffffa2c1814fa810
# R13: 0000000000000031 R14: 0000000000000031 R15: 000000000000
+kms_cursor_edge_walk@128x128-left-edge
+# DEBUG - Begin test kms_cursor_edge_walk@128x128-left-edge
+# Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI
+# CPU: 0 UID: 0 PID: 27 Comm: kworker/u8:1 Not tainted 6.11.0-rc5-g5d3429a7e9aa #1
+# Hardware name: ChromiumOS crosvm, BIOS 0
+# Workqueue: vkms_composer vkms_composer_worker [vkms]
+# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms]
+# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48
+# RSP: 0018:ffffa437800ebd58 EFLAGS: 00010282
+# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffffa0e841904000
+# RDX: 00000000000000ff RSI: ffffa0e841905ff8 RDI: ffffa0e841902000
+# RBP: 0000000000000000 R08: ffffa0e84158a600 R09: 00000000000003ff
+# R10: 0000000078b2bcd2 R11: 00000000278b2bcd R12: ffffa0e84870fc60
+# R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
+# FS: 0000000000000000(0000) GS:ffffa0e86bc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 0000000000000018 CR3: 0000000101710000 CR4: 0000000000350ef0
+# Call Trace:
+# <TASK>
+# ? __die+0x1e/0x60
+# ? page_fault_oops+0x17b/0x4a0
+# ? exc_page_fault+0x6d/0x230
+# ? asm_exc_page_fault+0x26/0x30
+# ? compose_active_planes+0x344/0x4e0 [vkms]
+# ? compose_active_planes+0x32f/0x4e0 [vkms]
+# ? srso_return_thunk+0x5/0x5f
+# vkms_composer_worker+0x205/0x240 [vkms]
+# process_one_work+0x201/0x6c0
+# ? lock_is_held_type+0x9e/0x110
+# worker_thread+0x17e/0x310
+# ? __pfx_worker_thread+0x10/0x10
+# kthread+0xce/0x100
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork+0x2f/0x50
+# ? __pfx_kthread+0x10/0x10
+# ret_from_fork_asm+0x1a/0x30
+# </TASK>
+# Modules linked in: vkms
+# CR2: 0000000000000018
+# ---[ end trace 0000000000000000 ]---
+# RIP: 0010:compose_active_planes+0x344/0x4e0 [vkms]
+# Code: 6a 34 0f 8e 91 fe ff ff 44 89 ea 48 8d 7c 24 48 e8 71 f0 ff ff 4b 8b 04 fc 48 8b 4c 24 50 48 8b 7c 24 40 48 8b 80 48 01 00 00 <48> 63 70 18 8b 40 20 48 89 f2 48 c1 e6 03 29 d0 48 8b 54 24 48 48
+# RSP: 0018:ffffa437800ebd58 EFLAGS: 00010282
+# RAX: 0000000000000000 RBX: 0000000000000002 RCX: ffffa0e841904000
+# RDX: 00000000000000ff RSI: ffffa0e841905ff8 RDI: ffffa0e841902000
+# RBP: 0000000000000000 R08: ffffa0e84158a600 R09: 00000000000003ff
+# R10: 0000000078b2bcd2 R11: 00000000278b2bcd R12: ffffa0e84870fc60
+# R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
+# FS: 0000000000000000(0000) GS:ffffa0e86bc00000(0000) knlGS:0000000000000000
+# CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+# CR2: 0000000000000018 CR3: 0000000101710000 CR4: 0000000000350ef0
+# vkms_vblank_simulate: vblank timer overrun
+
# Skip driver specific tests
^amdgpu.*
^msm.*
diff --git a/drivers/gpu/drm/clients/Kconfig b/drivers/gpu/drm/clients/Kconfig
new file mode 100644
index 000000000000..6096c623d9d5
--- /dev/null
+++ b/drivers/gpu/drm/clients/Kconfig
@@ -0,0 +1,123 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_CLIENT_LIB
+ tristate
+ depends on DRM
+ select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
+ select FB_CORE if DRM_FBDEV_EMULATION
+ help
+ This option enables the DRM client library and selects all
+ modules and components according to the enabled clients.
+
+config DRM_CLIENT_SELECTION
+ tristate
+ depends on DRM
+ select DRM_CLIENT_LIB if DRM_CLIENT_LOG
+ select DRM_CLIENT_LIB if DRM_FBDEV_EMULATION
+ help
+ Drivers that support in-kernel DRM clients have to select this
+ option.
+
+config DRM_CLIENT_SETUP
+ bool
+ depends on DRM_CLIENT_SELECTION
+ help
+ Enables the DRM client selection. DRM drivers that support the
+ default clients should select DRM_CLIENT_SELECTION instead.
+
+menu "Supported DRM clients"
+ depends on DRM_CLIENT_SELECTION
+
+config DRM_FBDEV_EMULATION
+ bool "Enable legacy fbdev support for your modesetting driver"
+ depends on DRM_CLIENT_SELECTION
+ select DRM_CLIENT
+ select DRM_CLIENT_SETUP
+ select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
+ default FB
+ help
+ Choose this option if you have a need for the legacy fbdev
+ support. Note that this support also provides the linux console
+ support on top of your modesetting driver.
+
+ If in doubt, say "Y".
+
+config DRM_FBDEV_OVERALLOC
+ int "Overallocation of the fbdev buffer"
+ depends on DRM_FBDEV_EMULATION
+ default 100
+ help
+ Defines the fbdev buffer overallocation in percent. Default
+ is 100. Typical values for double buffering will be 200,
+ triple buffering 300.
+
+config DRM_FBDEV_LEAK_PHYS_SMEM
+ bool "Shamelessly allow leaking of fbdev physical address (DANGEROUS)"
+ depends on DRM_FBDEV_EMULATION && EXPERT
+ default n
+ help
+ In order to keep user-space compatibility, we want in certain
+ use-cases to keep leaking the fbdev physical address to the
+ user-space program handling the fbdev buffer.
+ This affects, not only, Amlogic, Allwinner or Rockchip devices
+ with ARM Mali GPUs using a userspace Blob.
+ This option is not supported by upstream developers and should be
+ removed as soon as possible and be considered as a broken and
+ legacy behaviour from a modern fbdev device driver.
+
+ Please send any bug reports when using this to your proprietary
+ software vendor that requires this.
+
+ If in doubt, say "N" or spread the word to your closed source
+ library vendor.
+
+config DRM_CLIENT_LOG
+ bool "Print the kernel boot message on the screen"
+ depends on DRM_CLIENT_SELECTION
+ select DRM_CLIENT
+ select DRM_CLIENT_SETUP
+ select DRM_DRAW
+ select FONT_SUPPORT
+ help
+ This enable a drm logger, that will print the kernel messages to the
+ screen until the userspace is ready to take over.
+
+ If you only need logs, but no terminal, or if you prefer userspace
+ terminal, say "Y".
+
+choice
+ prompt "Default DRM Client"
+ depends on DRM_CLIENT_SELECTION
+ depends on DRM_FBDEV_EMULATION || DRM_CLIENT_LOG
+ default DRM_CLIENT_DEFAULT_FBDEV
+ help
+ Selects the default drm client.
+
+ The selection made here can be overridden by using the kernel
+ command line 'drm_client_lib.active=fbdev' option.
+
+config DRM_CLIENT_DEFAULT_FBDEV
+ bool "fbdev"
+ depends on DRM_FBDEV_EMULATION
+ help
+ Use fbdev emulation as default drm client. This is needed to have
+ fbcon on top of a drm driver.
+
+config DRM_CLIENT_DEFAULT_LOG
+ bool "log"
+ depends on DRM_CLIENT_LOG
+ help
+ Use drm log as default drm client. This will display boot logs on the
+ screen, but doesn't implement a full terminal. For that you will need
+ a userspace terminal using drm/kms.
+
+endchoice
+
+config DRM_CLIENT_DEFAULT
+ string
+ depends on DRM_CLIENT
+ default "fbdev" if DRM_CLIENT_DEFAULT_FBDEV
+ default "log" if DRM_CLIENT_DEFAULT_LOG
+ default ""
+
+endmenu
diff --git a/drivers/gpu/drm/clients/Makefile b/drivers/gpu/drm/clients/Makefile
new file mode 100644
index 000000000000..c16addbc327f
--- /dev/null
+++ b/drivers/gpu/drm/clients/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+
+subdir-ccflags-y += -I$(src)/..
+
+drm_client_lib-y := drm_client_setup.o
+drm_client_lib-$(CONFIG_DRM_CLIENT_LOG) += drm_log.o
+drm_client_lib-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fbdev_client.o
+obj-$(CONFIG_DRM_CLIENT_LIB) += drm_client_lib.o
diff --git a/drivers/gpu/drm/clients/drm_client_internal.h b/drivers/gpu/drm/clients/drm_client_internal.h
new file mode 100644
index 000000000000..6dc078bf6503
--- /dev/null
+++ b/drivers/gpu/drm/clients/drm_client_internal.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_CLIENT_INTERNAL_H
+#define DRM_CLIENT_INTERNAL_H
+
+struct drm_device;
+struct drm_format_info;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+int drm_fbdev_client_setup(struct drm_device *dev, const struct drm_format_info *format);
+#else
+static inline int drm_fbdev_client_setup(struct drm_device *dev,
+ const struct drm_format_info *format)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_DRM_CLIENT_LOG
+void drm_log_register(struct drm_device *dev);
+#else
+static inline void drm_log_register(struct drm_device *dev) {}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/clients/drm_client_setup.c b/drivers/gpu/drm/clients/drm_client_setup.c
new file mode 100644
index 000000000000..e17265039ca8
--- /dev/null
+++ b/drivers/gpu/drm/clients/drm_client_setup.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: MIT
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_device.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
+
+#include "drm_client_internal.h"
+
+static char drm_client_default[16] = CONFIG_DRM_CLIENT_DEFAULT;
+module_param_string(active, drm_client_default, sizeof(drm_client_default), 0444);
+MODULE_PARM_DESC(active,
+ "Choose which drm client to start, default is"
+ CONFIG_DRM_CLIENT_DEFAULT "]");
+
+/**
+ * drm_client_setup() - Setup in-kernel DRM clients
+ * @dev: DRM device
+ * @format: Preferred pixel format for the device. Use NULL, unless
+ * there is clearly a driver-preferred format.
+ *
+ * This function sets up the in-kernel DRM clients. Restore, hotplug
+ * events and teardown are all taken care of.
+ *
+ * Drivers should call drm_client_setup() after registering the new
+ * DRM device with drm_dev_register(). This function is safe to call
+ * even when there are no connectors present. Setup will be retried
+ * on the next hotplug event.
+ *
+ * The clients are destroyed by drm_dev_unregister().
+ */
+void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format)
+{
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ if (!strcmp(drm_client_default, "fbdev")) {
+ int ret;
+
+ ret = drm_fbdev_client_setup(dev, format);
+ if (ret)
+ drm_warn(dev, "Failed to set up DRM client; error %d\n", ret);
+ return;
+ }
+#endif
+
+#ifdef CONFIG_DRM_CLIENT_LOG
+ if (!strcmp(drm_client_default, "log")) {
+ drm_log_register(dev);
+ return;
+ }
+#endif
+ if (strcmp(drm_client_default, ""))
+ drm_warn(dev, "Unknown DRM client %s\n", drm_client_default);
+}
+EXPORT_SYMBOL(drm_client_setup);
+
+/**
+ * drm_client_setup_with_fourcc() - Setup in-kernel DRM clients for color mode
+ * @dev: DRM device
+ * @fourcc: Preferred pixel format as 4CC code for the device
+ *
+ * This function sets up the in-kernel DRM clients. It is equivalent
+ * to drm_client_setup(), but expects a 4CC code as second argument.
+ */
+void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc)
+{
+ drm_client_setup(dev, drm_format_info(fourcc));
+}
+EXPORT_SYMBOL(drm_client_setup_with_fourcc);
+
+/**
+ * drm_client_setup_with_color_mode() - Setup in-kernel DRM clients for color mode
+ * @dev: DRM device
+ * @color_mode: Preferred color mode for the device
+ *
+ * This function sets up the in-kernel DRM clients. It is equivalent
+ * to drm_client_setup(), but expects a color mode as second argument.
+ *
+ * Do not use this function in new drivers. Prefer drm_client_setup() with a
+ * format of NULL.
+ */
+void drm_client_setup_with_color_mode(struct drm_device *dev, unsigned int color_mode)
+{
+ u32 fourcc = drm_driver_color_mode_format(dev, color_mode);
+
+ drm_client_setup_with_fourcc(dev, fourcc);
+}
+EXPORT_SYMBOL(drm_client_setup_with_color_mode);
+
+MODULE_DESCRIPTION("In-kernel DRM clients");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/clients/drm_fbdev_client.c b/drivers/gpu/drm/clients/drm_fbdev_client.c
new file mode 100644
index 000000000000..f894ba52bdb5
--- /dev/null
+++ b/drivers/gpu/drm/clients/drm_fbdev_client.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: MIT
+
+#include <drm/drm_client.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
+
+#include "drm_client_internal.h"
+
+/*
+ * struct drm_client_funcs
+ */
+
+static void drm_fbdev_client_unregister(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ if (fb_helper->info) {
+ drm_fb_helper_unregister_info(fb_helper);
+ } else {
+ drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+ }
+}
+
+static int drm_fbdev_client_restore(struct drm_client_dev *client)
+{
+ drm_fb_helper_lastclose(client->dev);
+
+ return 0;
+}
+
+static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+ struct drm_device *dev = client->dev;
+ int ret;
+
+ if (dev->fb_helper)
+ return drm_fb_helper_hotplug_event(dev->fb_helper);
+
+ ret = drm_fb_helper_init(dev, fb_helper);
+ if (ret)
+ goto err_drm_err;
+
+ if (!drm_drv_uses_atomic_modeset(dev))
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(fb_helper);
+ if (ret)
+ goto err_drm_fb_helper_fini;
+
+ return 0;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_drm_err:
+ drm_err(dev, "fbdev: Failed to setup emulation (ret=%d)\n", ret);
+ return ret;
+}
+
+static int drm_fbdev_client_suspend(struct drm_client_dev *client, bool holds_console_lock)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ if (holds_console_lock)
+ drm_fb_helper_set_suspend(fb_helper, true);
+ else
+ drm_fb_helper_set_suspend_unlocked(fb_helper, true);
+
+ return 0;
+}
+
+static int drm_fbdev_client_resume(struct drm_client_dev *client, bool holds_console_lock)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ if (holds_console_lock)
+ drm_fb_helper_set_suspend(fb_helper, false);
+ else
+ drm_fb_helper_set_suspend_unlocked(fb_helper, false);
+
+ return 0;
+}
+
+static const struct drm_client_funcs drm_fbdev_client_funcs = {
+ .owner = THIS_MODULE,
+ .unregister = drm_fbdev_client_unregister,
+ .restore = drm_fbdev_client_restore,
+ .hotplug = drm_fbdev_client_hotplug,
+ .suspend = drm_fbdev_client_suspend,
+ .resume = drm_fbdev_client_resume,
+};
+
+/**
+ * drm_fbdev_client_setup() - Setup fbdev emulation
+ * @dev: DRM device
+ * @format: Preferred color format for the device. DRM_FORMAT_XRGB8888
+ * is used if this is zero.
+ *
+ * This function sets up fbdev emulation. Restore, hotplug events and
+ * teardown are all taken care of. Drivers that do suspend/resume need
+ * to call drm_client_dev_suspend() and drm_client_dev_resume() by
+ * themselves. Simple drivers might use drm_mode_config_helper_suspend().
+ *
+ * This function is safe to call even when there are no connectors present.
+ * Setup will be retried on the next hotplug event.
+ *
+ * The fbdev client is destroyed by drm_dev_unregister().
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int drm_fbdev_client_setup(struct drm_device *dev, const struct drm_format_info *format)
+{
+ struct drm_fb_helper *fb_helper;
+ unsigned int color_mode;
+ int ret;
+
+ /* TODO: Use format info throughout DRM */
+ if (format) {
+ unsigned int bpp = drm_format_info_bpp(format, 0);
+
+ switch (bpp) {
+ case 16:
+ color_mode = format->depth; // could also be 15
+ break;
+ default:
+ color_mode = bpp;
+ }
+ } else {
+ switch (dev->mode_config.preferred_depth) {
+ case 0:
+ case 24:
+ color_mode = 32;
+ break;
+ default:
+ color_mode = dev->mode_config.preferred_depth;
+ }
+ }
+
+ drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
+ drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
+
+ fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
+ if (!fb_helper)
+ return -ENOMEM;
+ drm_fb_helper_prepare(dev, fb_helper, color_mode, NULL);
+
+ ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
+ if (ret) {
+ drm_err(dev, "Failed to register client: %d\n", ret);
+ goto err_drm_client_init;
+ }
+
+ drm_client_register(&fb_helper->client);
+
+ return 0;
+
+err_drm_client_init:
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+ return ret;
+}
diff --git a/drivers/gpu/drm/clients/drm_log.c b/drivers/gpu/drm/clients/drm_log.c
new file mode 100644
index 000000000000..379850c83e51
--- /dev/null
+++ b/drivers/gpu/drm/clients/drm_log.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/*
+ * Copyright (c) 2024 Red Hat.
+ * Author: Jocelyn Falempe <jfalempe@redhat.com>
+ */
+
+#include <linux/console.h>
+#include <linux/font.h>
+#include <linux/init.h>
+#include <linux/iosys-map.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <drm/drm_client.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
+
+#include "drm_client_internal.h"
+#include "drm_draw_internal.h"
+#include "drm_internal.h"
+
+MODULE_AUTHOR("Jocelyn Falempe");
+MODULE_DESCRIPTION("DRM boot logger");
+MODULE_LICENSE("GPL");
+
+static unsigned int scale = 1;
+module_param(scale, uint, 0444);
+MODULE_PARM_DESC(scale, "Integer scaling factor for drm_log, default is 1");
+
+/**
+ * DOC: overview
+ *
+ * This is a simple graphic logger, to print the kernel message on screen, until
+ * a userspace application is able to take over.
+ * It is only for debugging purpose.
+ */
+
+struct drm_log_scanout {
+ struct drm_client_buffer *buffer;
+ const struct font_desc *font;
+ u32 rows;
+ u32 columns;
+ u32 scaled_font_h;
+ u32 scaled_font_w;
+ u32 line;
+ u32 format;
+ u32 px_width;
+ u32 front_color;
+ u32 prefix_color;
+};
+
+struct drm_log {
+ struct mutex lock;
+ struct drm_client_dev client;
+ struct console con;
+ bool probed;
+ u32 n_scanout;
+ struct drm_log_scanout *scanout;
+};
+
+static struct drm_log *client_to_drm_log(struct drm_client_dev *client)
+{
+ return container_of(client, struct drm_log, client);
+}
+
+static struct drm_log *console_to_drm_log(struct console *con)
+{
+ return container_of(con, struct drm_log, con);
+}
+
+static void drm_log_blit(struct iosys_map *dst, unsigned int dst_pitch,
+ const u8 *src, unsigned int src_pitch,
+ u32 height, u32 width, u32 px_width, u32 color)
+{
+ switch (px_width) {
+ case 2:
+ drm_draw_blit16(dst, dst_pitch, src, src_pitch, height, width, scale, color);
+ break;
+ case 3:
+ drm_draw_blit24(dst, dst_pitch, src, src_pitch, height, width, scale, color);
+ break;
+ case 4:
+ drm_draw_blit32(dst, dst_pitch, src, src_pitch, height, width, scale, color);
+ break;
+ default:
+ WARN_ONCE(1, "Can't blit with pixel width %d\n", px_width);
+ }
+}
+
+static void drm_log_clear_line(struct drm_log_scanout *scanout, u32 line)
+{
+ struct drm_framebuffer *fb = scanout->buffer->fb;
+ unsigned long height = scanout->scaled_font_h;
+ struct iosys_map map;
+ struct drm_rect r = DRM_RECT_INIT(0, line * height, fb->width, height);
+
+ if (drm_client_buffer_vmap_local(scanout->buffer, &map))
+ return;
+ iosys_map_memset(&map, r.y1 * fb->pitches[0], 0, height * fb->pitches[0]);
+ drm_client_buffer_vunmap_local(scanout->buffer);
+ drm_client_framebuffer_flush(scanout->buffer, &r);
+}
+
+static void drm_log_draw_line(struct drm_log_scanout *scanout, const char *s,
+ unsigned int len, unsigned int prefix_len)
+{
+ struct drm_framebuffer *fb = scanout->buffer->fb;
+ struct iosys_map map;
+ const struct font_desc *font = scanout->font;
+ size_t font_pitch = DIV_ROUND_UP(font->width, 8);
+ const u8 *src;
+ u32 px_width = fb->format->cpp[0];
+ struct drm_rect r = DRM_RECT_INIT(0, scanout->line * scanout->scaled_font_h,
+ fb->width, (scanout->line + 1) * scanout->scaled_font_h);
+ u32 i;
+
+ if (drm_client_buffer_vmap_local(scanout->buffer, &map))
+ return;
+
+ iosys_map_incr(&map, r.y1 * fb->pitches[0]);
+ for (i = 0; i < len && i < scanout->columns; i++) {
+ u32 color = (i < prefix_len) ? scanout->prefix_color : scanout->front_color;
+ src = drm_draw_get_char_bitmap(font, s[i], font_pitch);
+ drm_log_blit(&map, fb->pitches[0], src, font_pitch,
+ scanout->scaled_font_h, scanout->scaled_font_w,
+ px_width, color);
+ iosys_map_incr(&map, scanout->scaled_font_w * px_width);
+ }
+
+ scanout->line++;
+ if (scanout->line >= scanout->rows)
+ scanout->line = 0;
+ drm_client_buffer_vunmap_local(scanout->buffer);
+ drm_client_framebuffer_flush(scanout->buffer, &r);
+}
+
+static void drm_log_draw_new_line(struct drm_log_scanout *scanout,
+ const char *s, unsigned int len, unsigned int prefix_len)
+{
+ if (scanout->line == 0) {
+ drm_log_clear_line(scanout, 0);
+ drm_log_clear_line(scanout, 1);
+ drm_log_clear_line(scanout, 2);
+ } else if (scanout->line + 2 < scanout->rows)
+ drm_log_clear_line(scanout, scanout->line + 2);
+
+ drm_log_draw_line(scanout, s, len, prefix_len);
+}
+
+/*
+ * Depends on print_time() in printk.c
+ * Timestamp is written with "[%5lu.%06lu]"
+ */
+#define TS_PREFIX_LEN 13
+
+static void drm_log_draw_kmsg_record(struct drm_log_scanout *scanout,
+ const char *s, unsigned int len)
+{
+ u32 prefix_len = 0;
+
+ if (len > TS_PREFIX_LEN && s[0] == '[' && s[6] == '.' && s[TS_PREFIX_LEN] == ']')
+ prefix_len = TS_PREFIX_LEN + 1;
+
+ /* do not print the ending \n character */
+ if (s[len - 1] == '\n')
+ len--;
+
+ while (len > scanout->columns) {
+ drm_log_draw_new_line(scanout, s, scanout->columns, prefix_len);
+ s += scanout->columns;
+ len -= scanout->columns;
+ prefix_len = 0;
+ }
+ if (len)
+ drm_log_draw_new_line(scanout, s, len, prefix_len);
+}
+
+static u32 drm_log_find_usable_format(struct drm_plane *plane)
+{
+ int i;
+
+ for (i = 0; i < plane->format_count; i++)
+ if (drm_draw_color_from_xrgb8888(0xffffff, plane->format_types[i]) != 0)
+ return plane->format_types[i];
+ return DRM_FORMAT_INVALID;
+}
+
+static int drm_log_setup_modeset(struct drm_client_dev *client,
+ struct drm_mode_set *mode_set,
+ struct drm_log_scanout *scanout)
+{
+ struct drm_crtc *crtc = mode_set->crtc;
+ u32 width = mode_set->mode->hdisplay;
+ u32 height = mode_set->mode->vdisplay;
+ u32 format;
+
+ scanout->font = get_default_font(width, height, NULL, NULL);
+ if (!scanout->font)
+ return -ENOENT;
+
+ format = drm_log_find_usable_format(crtc->primary);
+ if (format == DRM_FORMAT_INVALID)
+ return -EINVAL;
+
+ scanout->buffer = drm_client_framebuffer_create(client, width, height, format);
+ if (IS_ERR(scanout->buffer)) {
+ drm_warn(client->dev, "drm_log can't create framebuffer %d %d %p4cc\n",
+ width, height, &format);
+ return -ENOMEM;
+ }
+ mode_set->fb = scanout->buffer->fb;
+ scanout->scaled_font_h = scanout->font->height * scale;
+ scanout->scaled_font_w = scanout->font->width * scale;
+ scanout->rows = height / scanout->scaled_font_h;
+ scanout->columns = width / scanout->scaled_font_w;
+ scanout->front_color = drm_draw_color_from_xrgb8888(0xffffff, format);
+ scanout->prefix_color = drm_draw_color_from_xrgb8888(0x4e9a06, format);
+ return 0;
+}
+
+static int drm_log_count_modeset(struct drm_client_dev *client)
+{
+ struct drm_mode_set *mode_set;
+ int count = 0;
+
+ mutex_lock(&client->modeset_mutex);
+ drm_client_for_each_modeset(mode_set, client)
+ count++;
+ mutex_unlock(&client->modeset_mutex);
+ return count;
+}
+
+static void drm_log_init_client(struct drm_log *dlog)
+{
+ struct drm_client_dev *client = &dlog->client;
+ struct drm_mode_set *mode_set;
+ int i, max_modeset;
+ int n_modeset = 0;
+
+ dlog->probed = true;
+
+ if (drm_client_modeset_probe(client, 0, 0))
+ return;
+
+ max_modeset = drm_log_count_modeset(client);
+ if (!max_modeset)
+ return;
+
+ dlog->scanout = kcalloc(max_modeset, sizeof(*dlog->scanout), GFP_KERNEL);
+ if (!dlog->scanout)
+ return;
+
+ mutex_lock(&client->modeset_mutex);
+ drm_client_for_each_modeset(mode_set, client) {
+ if (!mode_set->mode)
+ continue;
+ if (drm_log_setup_modeset(client, mode_set, &dlog->scanout[n_modeset]))
+ continue;
+ n_modeset++;
+ }
+ mutex_unlock(&client->modeset_mutex);
+ if (n_modeset == 0)
+ goto err_nomodeset;
+
+ if (drm_client_modeset_commit(client))
+ goto err_failed_commit;
+
+ dlog->n_scanout = n_modeset;
+ return;
+
+err_failed_commit:
+ for (i = 0; i < n_modeset; i++)
+ drm_client_framebuffer_delete(dlog->scanout[i].buffer);
+
+err_nomodeset:
+ kfree(dlog->scanout);
+ dlog->scanout = NULL;
+}
+
+static void drm_log_free_scanout(struct drm_client_dev *client)
+{
+ struct drm_log *dlog = client_to_drm_log(client);
+ int i;
+
+ if (dlog->n_scanout) {
+ for (i = 0; i < dlog->n_scanout; i++)
+ drm_client_framebuffer_delete(dlog->scanout[i].buffer);
+ dlog->n_scanout = 0;
+ kfree(dlog->scanout);
+ dlog->scanout = NULL;
+ }
+}
+
+static void drm_log_client_unregister(struct drm_client_dev *client)
+{
+ struct drm_log *dlog = client_to_drm_log(client);
+ struct drm_device *dev = client->dev;
+
+ unregister_console(&dlog->con);
+
+ mutex_lock(&dlog->lock);
+ drm_log_free_scanout(client);
+ drm_client_release(client);
+ mutex_unlock(&dlog->lock);
+ kfree(dlog);
+ drm_dbg(dev, "Unregistered with drm log\n");
+}
+
+static int drm_log_client_hotplug(struct drm_client_dev *client)
+{
+ struct drm_log *dlog = client_to_drm_log(client);
+
+ mutex_lock(&dlog->lock);
+ drm_log_free_scanout(client);
+ dlog->probed = false;
+ mutex_unlock(&dlog->lock);
+ return 0;
+}
+
+static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_lock)
+{
+ struct drm_log *dlog = client_to_drm_log(client);
+
+ console_stop(&dlog->con);
+
+ return 0;
+}
+
+static int drm_log_client_resume(struct drm_client_dev *client, bool _console_lock)
+{
+ struct drm_log *dlog = client_to_drm_log(client);
+
+ console_start(&dlog->con);
+
+ return 0;
+}
+
+static const struct drm_client_funcs drm_log_client_funcs = {
+ .owner = THIS_MODULE,
+ .unregister = drm_log_client_unregister,
+ .hotplug = drm_log_client_hotplug,
+ .suspend = drm_log_client_suspend,
+ .resume = drm_log_client_resume,
+};
+
+static void drm_log_write_thread(struct console *con, struct nbcon_write_context *wctxt)
+{
+ struct drm_log *dlog = console_to_drm_log(con);
+ int i;
+
+ if (!dlog->probed)
+ drm_log_init_client(dlog);
+
+ /* Check that we are still the master before drawing */
+ if (drm_master_internal_acquire(dlog->client.dev)) {
+ drm_master_internal_release(dlog->client.dev);
+
+ for (i = 0; i < dlog->n_scanout; i++)
+ drm_log_draw_kmsg_record(&dlog->scanout[i], wctxt->outbuf, wctxt->len);
+ }
+}
+
+static void drm_log_lock(struct console *con, unsigned long *flags)
+{
+ struct drm_log *dlog = console_to_drm_log(con);
+
+ mutex_lock(&dlog->lock);
+ migrate_disable();
+}
+
+static void drm_log_unlock(struct console *con, unsigned long flags)
+{
+ struct drm_log *dlog = console_to_drm_log(con);
+
+ migrate_enable();
+ mutex_unlock(&dlog->lock);
+}
+
+static void drm_log_register_console(struct console *con)
+{
+ strscpy(con->name, "drm_log");
+ con->write_thread = drm_log_write_thread;
+ con->device_lock = drm_log_lock;
+ con->device_unlock = drm_log_unlock;
+ con->flags = CON_PRINTBUFFER | CON_NBCON;
+ con->index = -1;
+
+ register_console(con);
+}
+
+/**
+ * drm_log_register() - Register a drm device to drm_log
+ * @dev: the drm device to register.
+ */
+void drm_log_register(struct drm_device *dev)
+{
+ struct drm_log *new;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ goto err_warn;
+
+ mutex_init(&new->lock);
+ if (drm_client_init(dev, &new->client, "drm_log", &drm_log_client_funcs))
+ goto err_free;
+
+ drm_client_register(&new->client);
+
+ drm_log_register_console(&new->con);
+
+ drm_dbg(dev, "Registered with drm log as %s\n", new->con.name);
+ return;
+
+err_free:
+ kfree(new);
+err_warn:
+ drm_warn(dev, "Failed to register with drm log\n");
+}
diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig
index 3b824e01c9b5..8d22b7627d41 100644
--- a/drivers/gpu/drm/display/Kconfig
+++ b/drivers/gpu/drm/display/Kconfig
@@ -3,7 +3,7 @@
config DRM_DISPLAY_DP_AUX_BUS
tristate
depends on DRM
- depends on OF || COMPILE_TEST
+ depends on OF
config DRM_DISPLAY_HELPER
tristate
@@ -15,6 +15,7 @@ if DRM_DISPLAY_HELPER
config DRM_BRIDGE_CONNECTOR
bool
+ select DRM_DISPLAY_HDMI_AUDIO_HELPER
select DRM_DISPLAY_HDMI_STATE_HELPER
help
DRM connector implementation terminating DRM bridge chains.
@@ -64,11 +65,23 @@ config DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
If in doubt, say "N".
+config DRM_DISPLAY_DSC_HELPER
+ bool
+ depends on DRM_DISPLAY_HELPER
+ help
+ DRM display helpers for VESA DSC (used by DSI and DisplayPort).
+
config DRM_DISPLAY_HDCP_HELPER
bool
help
DRM display helpers for HDCP.
+config DRM_DISPLAY_HDMI_AUDIO_HELPER
+ bool
+ help
+ DRM display helpers for HDMI Audio functionality (generic HDMI Codec
+ implementation).
+
config DRM_DISPLAY_HDMI_HELPER
bool
help
@@ -76,6 +89,7 @@ config DRM_DISPLAY_HDMI_HELPER
config DRM_DISPLAY_HDMI_STATE_HELPER
bool
+ select DRM_DISPLAY_HDMI_AUDIO_HELPER
select DRM_DISPLAY_HDMI_HELPER
help
DRM KMS state helpers for HDMI.
diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile
index fbb9d2b8acd4..b17879b957d5 100644
--- a/drivers/gpu/drm/display/Makefile
+++ b/drivers/gpu/drm/display/Makefile
@@ -8,11 +8,14 @@ drm_display_helper-$(CONFIG_DRM_BRIDGE_CONNECTOR) += \
drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \
drm_dp_dual_mode_helper.o \
drm_dp_helper.o \
- drm_dp_mst_topology.o \
- drm_dsc_helper.o
+ drm_dp_mst_topology.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_TUNNEL) += \
drm_dp_tunnel.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_DSC_HELPER) += \
+ drm_dsc_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_AUDIO_HELPER) += \
+ drm_hdmi_audio_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
drm_hdmi_helper.o \
drm_scdc_helper.o
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 3da5b8bf8259..56f977bbe62d 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -17,7 +17,10 @@
#include <drm/drm_edid.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/display/drm_hdmi_audio_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
/**
@@ -179,11 +182,15 @@ drm_bridge_connector_detect(struct drm_connector *connector, bool force)
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
struct drm_bridge *detect = bridge_connector->bridge_detect;
+ struct drm_bridge *hdmi = bridge_connector->bridge_hdmi;
enum drm_connector_status status;
if (detect) {
status = detect->funcs->detect(detect);
+ if (hdmi)
+ drm_atomic_helper_connector_hdmi_hotplug(connector, status);
+
drm_bridge_connector_hpd_notify(connector, status);
} else {
switch (connector->connector_type) {
@@ -202,6 +209,16 @@ drm_bridge_connector_detect(struct drm_connector *connector, bool force)
return status;
}
+static void drm_bridge_connector_force(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *hdmi = bridge_connector->bridge_hdmi;
+
+ if (hdmi)
+ drm_atomic_helper_connector_hdmi_force(connector);
+}
+
static void drm_bridge_connector_debugfs_init(struct drm_connector *connector,
struct dentry *root)
{
@@ -230,6 +247,7 @@ static void drm_bridge_connector_reset(struct drm_connector *connector)
static const struct drm_connector_funcs drm_bridge_connector_funcs = {
.reset = drm_bridge_connector_reset,
.detect = drm_bridge_connector_detect,
+ .force = drm_bridge_connector_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -276,6 +294,14 @@ static int drm_bridge_connector_get_modes(struct drm_connector *connector)
struct drm_bridge *bridge;
/*
+ * If there is a HDMI bridge, EDID has been updated as a part of
+ * the .detect(). Just update the modes here.
+ */
+ bridge = bridge_connector->bridge_hdmi;
+ if (bridge)
+ return drm_edid_connector_add_modes(connector);
+
+ /*
* If display exposes EDID, then we parse that in the normal way to
* build table of supported modes.
*/
@@ -299,11 +325,37 @@ static int drm_bridge_connector_get_modes(struct drm_connector *connector)
return 0;
}
+static enum drm_mode_status
+drm_bridge_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+
+ if (bridge_connector->bridge_hdmi)
+ return drm_hdmi_connector_mode_valid(connector, mode);
+
+ return MODE_OK;
+}
+
+static int drm_bridge_connector_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+
+ if (bridge_connector->bridge_hdmi)
+ return drm_atomic_helper_connector_hdmi_check(connector, state);
+
+ return 0;
+}
+
static const struct drm_connector_helper_funcs drm_bridge_connector_helper_funcs = {
.get_modes = drm_bridge_connector_get_modes,
- /* No need for .mode_valid(), the bridges are checked by the core. */
+ .mode_valid = drm_bridge_connector_mode_valid,
.enable_hpd = drm_bridge_connector_enable_hpd,
.disable_hpd = drm_bridge_connector_disable_hpd,
+ .atomic_check = drm_bridge_connector_atomic_check,
};
static enum drm_mode_status
@@ -354,10 +406,94 @@ static int drm_bridge_connector_write_infoframe(struct drm_connector *connector,
return bridge->funcs->hdmi_write_infoframe(bridge, type, buffer, len);
}
+static const struct drm_edid *
+drm_bridge_connector_read_edid(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_edid;
+ if (!bridge)
+ return NULL;
+
+ return drm_bridge_edid_read(bridge, connector);
+}
+
static const struct drm_connector_hdmi_funcs drm_bridge_connector_hdmi_funcs = {
.tmds_char_rate_valid = drm_bridge_connector_tmds_char_rate_valid,
.clear_infoframe = drm_bridge_connector_clear_infoframe,
.write_infoframe = drm_bridge_connector_write_infoframe,
+ .read_edid = drm_bridge_connector_read_edid,
+};
+
+static int drm_bridge_connector_audio_startup(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return -EINVAL;
+
+ if (!bridge->funcs->hdmi_audio_startup)
+ return 0;
+
+ return bridge->funcs->hdmi_audio_startup(connector, bridge);
+}
+
+static int drm_bridge_connector_audio_prepare(struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return -EINVAL;
+
+ return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms);
+}
+
+static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return;
+
+ bridge->funcs->hdmi_audio_shutdown(connector, bridge);
+}
+
+static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connector,
+ bool enable, int direction)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi;
+ if (!bridge)
+ return -EINVAL;
+
+ if (bridge->funcs->hdmi_audio_mute_stream)
+ return bridge->funcs->hdmi_audio_mute_stream(connector, bridge,
+ enable, direction);
+ else
+ return -ENOTSUPP;
+}
+
+static const struct drm_connector_hdmi_audio_funcs drm_bridge_connector_hdmi_audio_funcs = {
+ .startup = drm_bridge_connector_audio_startup,
+ .prepare = drm_bridge_connector_audio_prepare,
+ .shutdown = drm_bridge_connector_audio_shutdown,
+ .mute_stream = drm_bridge_connector_audio_mute_stream,
};
/* -----------------------------------------------------------------------------
@@ -397,11 +533,11 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
bridge_connector->encoder = encoder;
/*
- * TODO: Handle doublescan_allowed, stereo_allowed and
- * ycbcr_420_allowed.
+ * TODO: Handle doublescan_allowed and stereo_allowed.
*/
connector = &bridge_connector->base;
connector->interlace_allowed = true;
+ connector->ycbcr_420_allowed = true;
/*
* Initialise connector status handling. First locate the furthest
@@ -414,6 +550,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
drm_for_each_bridge_in_chain(encoder, bridge) {
if (!bridge->interlace_allowed)
connector->interlace_allowed = false;
+ if (!bridge->ycbcr_420_allowed)
+ connector->ycbcr_420_allowed = false;
if (bridge->ops & DRM_BRIDGE_OP_EDID)
bridge_connector->bridge_edid = bridge;
@@ -457,7 +595,12 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return ERR_PTR(-EINVAL);
- if (bridge_connector->bridge_hdmi)
+ if (bridge_connector->bridge_hdmi) {
+ if (!connector->ycbcr_420_allowed)
+ supported_formats &= ~BIT(HDMI_COLORSPACE_YUV420);
+
+ bridge = bridge_connector->bridge_hdmi;
+
ret = drmm_connector_hdmi_init(drm, connector,
bridge_connector->bridge_hdmi->vendor,
bridge_connector->bridge_hdmi->product,
@@ -466,12 +609,31 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
connector_type, ddc,
supported_formats,
max_bpc);
- else
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (bridge->hdmi_audio_max_i2s_playback_channels ||
+ bridge->hdmi_audio_spdif_playback) {
+ if (!bridge->funcs->hdmi_audio_prepare ||
+ !bridge->funcs->hdmi_audio_shutdown)
+ return ERR_PTR(-EINVAL);
+
+ ret = drm_connector_hdmi_audio_init(connector,
+ bridge->hdmi_audio_dev,
+ &drm_bridge_connector_hdmi_audio_funcs,
+ bridge->hdmi_audio_max_i2s_playback_channels,
+ bridge->hdmi_audio_spdif_playback,
+ bridge->hdmi_audio_dai_port);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ } else {
ret = drmm_connector_init(drm, connector,
&drm_bridge_connector_funcs,
connector_type, ddc);
- if (ret)
- return ERR_PTR(ret);
+ if (ret)
+ return ERR_PTR(ret);
+ }
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
diff --git a/drivers/gpu/drm/display/drm_dp_aux_bus.c b/drivers/gpu/drm/display/drm_dp_aux_bus.c
index d810529ebfb6..ec7eac6b595f 100644
--- a/drivers/gpu/drm/display/drm_dp_aux_bus.c
+++ b/drivers/gpu/drm/display/drm_dp_aux_bus.c
@@ -292,7 +292,7 @@ int of_dp_aux_populate_bus(struct drm_dp_aux *aux,
aux_ep->dev.parent = aux->dev;
aux_ep->dev.bus = &dp_aux_bus_type;
aux_ep->dev.type = &dp_aux_device_type_type;
- aux_ep->dev.of_node = of_node_get(np);
+ device_set_node(&aux_ep->dev, of_fwnode_handle(of_node_get(np)));
dev_set_name(&aux_ep->dev, "aux-%s", dev_name(aux->dev));
ret = device_register(&aux_ep->dev);
diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
index 007ceb281d00..56a4965e518c 100644
--- a/drivers/gpu/drm/display/drm_dp_cec.c
+++ b/drivers/gpu/drm/display/drm_dp_cec.c
@@ -311,16 +311,6 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
if (!aux->transfer)
return;
-#ifndef CONFIG_MEDIA_CEC_RC
- /*
- * CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
- * cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined.
- *
- * Do this here as well to ensure the tests against cec_caps are
- * correct.
- */
- cec_caps &= ~CEC_CAP_RC;
-#endif
cancel_delayed_work_sync(&aux->cec.unregister_work);
mutex_lock(&aux->cec.lock);
@@ -337,7 +327,9 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
num_las = CEC_MAX_LOG_ADDRS;
if (aux->cec.adap) {
- if (aux->cec.adap->capabilities == cec_caps &&
+ /* Check if the adapter properties have changed */
+ if ((aux->cec.adap->capabilities & CEC_CAP_MONITOR_ALL) ==
+ (cec_caps & CEC_CAP_MONITOR_ALL) &&
aux->cec.adap->available_log_addrs == num_las) {
/* Unchanged, so just set the phys addr */
cec_s_phys_addr(aux->cec.adap, source_physical_address, false);
diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
index 14a2a8473682..c491e3203bf1 100644
--- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c
@@ -160,11 +160,11 @@ EXPORT_SYMBOL(drm_dp_dual_mode_write);
static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
{
- static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] =
+ static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN + 1] =
"DP-HDMI ADAPTOR\x04";
return memcmp(hdmi_id, dp_dual_mode_hdmi_id,
- sizeof(dp_dual_mode_hdmi_id)) == 0;
+ DP_DUAL_MODE_HDMI_ID_LEN) == 0;
}
static bool is_type1_adaptor(uint8_t adaptor_id)
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 6ee51003de3c..da3c8521a7fa 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -22,15 +22,16 @@
#include <linux/backlight.h>
#include <linux/delay.h>
+#include <linux/dynamic_debug.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/string_helpers.h>
-#include <linux/dynamic_debug.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
@@ -779,6 +780,128 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
}
EXPORT_SYMBOL(drm_dp_dpcd_read_phy_link_status);
+static int read_payload_update_status(struct drm_dp_aux *aux)
+{
+ int ret;
+ u8 status;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ return status;
+}
+
+/**
+ * drm_dp_dpcd_write_payload() - Write Virtual Channel information to payload table
+ * @aux: DisplayPort AUX channel
+ * @vcpid: Virtual Channel Payload ID
+ * @start_time_slot: Starting time slot
+ * @time_slot_count: Time slot count
+ *
+ * Write the Virtual Channel payload allocation table, checking the payload
+ * update status and retrying as necessary.
+ *
+ * Returns:
+ * 0 on success, negative error otherwise
+ */
+int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux,
+ int vcpid, u8 start_time_slot, u8 time_slot_count)
+{
+ u8 payload_alloc[3], status;
+ int ret;
+ int retries = 0;
+
+ drm_dp_dpcd_writeb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ DP_PAYLOAD_TABLE_UPDATED);
+
+ payload_alloc[0] = vcpid;
+ payload_alloc[1] = start_time_slot;
+ payload_alloc[2] = time_slot_count;
+
+ ret = drm_dp_dpcd_write(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
+ if (ret != 3) {
+ drm_dbg_kms(aux->drm_dev, "failed to write payload allocation %d\n", ret);
+ goto fail;
+ }
+
+retry:
+ ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ if (ret < 0) {
+ drm_dbg_kms(aux->drm_dev, "failed to read payload table status %d\n", ret);
+ goto fail;
+ }
+
+ if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
+ retries++;
+ if (retries < 20) {
+ usleep_range(10000, 20000);
+ goto retry;
+ }
+ drm_dbg_kms(aux->drm_dev, "status not set after read payload table status %d\n",
+ status);
+ ret = -EINVAL;
+ goto fail;
+ }
+ ret = 0;
+fail:
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_dpcd_write_payload);
+
+/**
+ * drm_dp_dpcd_clear_payload() - Clear the entire VC Payload ID table
+ * @aux: DisplayPort AUX channel
+ *
+ * Clear the entire VC Payload ID table.
+ *
+ * Returns: 0 on success, negative error code on errors.
+ */
+int drm_dp_dpcd_clear_payload(struct drm_dp_aux *aux)
+{
+ return drm_dp_dpcd_write_payload(aux, 0, 0, 0x3f);
+}
+EXPORT_SYMBOL(drm_dp_dpcd_clear_payload);
+
+/**
+ * drm_dp_dpcd_poll_act_handled() - Poll for ACT handled status
+ * @aux: DisplayPort AUX channel
+ * @timeout_ms: Timeout in ms
+ *
+ * Try waiting for the sink to finish updating its payload table by polling for
+ * the ACT handled bit of DP_PAYLOAD_TABLE_UPDATE_STATUS for up to @timeout_ms
+ * milliseconds, defaulting to 3000 ms if 0.
+ *
+ * Returns:
+ * 0 if the ACT was handled in time, negative error code on failure.
+ */
+int drm_dp_dpcd_poll_act_handled(struct drm_dp_aux *aux, int timeout_ms)
+{
+ int ret, status;
+
+ /* default to 3 seconds, this is arbitrary */
+ timeout_ms = timeout_ms ?: 3000;
+
+ ret = readx_poll_timeout(read_payload_update_status, aux, status,
+ status & DP_PAYLOAD_ACT_HANDLED || status < 0,
+ 200, timeout_ms * USEC_PER_MSEC);
+ if (ret < 0 && status >= 0) {
+ drm_err(aux->drm_dev, "Failed to get ACT after %d ms, last status: %02x\n",
+ timeout_ms, status);
+ return -EINVAL;
+ } else if (status < 0) {
+ /*
+ * Failure here isn't unexpected - the hub may have
+ * just been unplugged
+ */
+ drm_dbg_kms(aux->drm_dev, "Failed to read payload table status: %d\n", status);
+ return status;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dpcd_poll_act_handled);
+
static bool is_edid_digital_input_dp(const struct drm_edid *drm_edid)
{
/* FIXME: get rid of drm_edid_raw() */
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index ac90118b9e7a..06c91c5b7f7c 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -29,7 +29,6 @@
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <linux/iopoll.h>
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
#include <linux/stacktrace.h>
@@ -68,9 +67,6 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
-static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
- int id, u8 start_slot, u8 num_slots);
-
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
@@ -320,6 +316,9 @@ static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr
hdr->broadcast = (buf[idx] >> 7) & 0x1;
hdr->path_msg = (buf[idx] >> 6) & 0x1;
hdr->msg_len = buf[idx] & 0x3f;
+ if (hdr->msg_len < 1) /* min space for body CRC */
+ return false;
+
idx++;
hdr->somt = (buf[idx] >> 7) & 0x1;
hdr->eomt = (buf[idx] >> 6) & 0x1;
@@ -2282,7 +2281,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
port->cached_edid = drm_edid_read_ddc(port->connector,
&port->aux.ddc);
- drm_connector_register(port->connector);
+ drm_connector_dynamic_register(port->connector);
return;
error:
@@ -3264,7 +3263,7 @@ EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
static int drm_dp_create_payload_at_dfp(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_atomic_payload *payload)
{
- return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot,
+ return drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot,
payload->time_slots);
}
@@ -3295,7 +3294,7 @@ static void drm_dp_destroy_payload_at_remote_and_dfp(struct drm_dp_mst_topology_
}
if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_DFP)
- drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0);
+ drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot, 0);
}
/**
@@ -3573,8 +3572,7 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
}
/**
- * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
- * @mgr: The &drm_dp_mst_topology_mgr to use
+ * drm_dp_get_vc_payload_bw - get the VC payload BW for an MTP link
* @link_rate: link rate in 10kbits/s units
* @link_lane_count: lane count
*
@@ -3585,17 +3583,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
*
* Returns the BW / timeslot value in 20.12 fixed point format.
*/
-fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
- int link_rate, int link_lane_count)
+fixed20_12 drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
{
int ch_coding_efficiency =
drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(link_rate));
fixed20_12 ret;
- if (link_rate == 0 || link_lane_count == 0)
- drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n",
- link_rate, link_lane_count);
-
/* See DP v2.0 2.6.4.2, 2.7.6.3 VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
ret.full = DIV_ROUND_DOWN_ULL(mul_u32_u32(link_rate * link_lane_count,
ch_coding_efficiency),
@@ -3683,7 +3676,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock;
/* Write reset payload */
- drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f);
+ drm_dp_dpcd_clear_payload(mgr->aux);
drm_dp_mst_queue_probe_work(mgr);
@@ -3697,8 +3690,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
ret = 0;
mgr->payload_id_table_cleared = false;
- memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
- memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
+ mgr->reset_rx_state = true;
}
out_unlock:
@@ -3856,6 +3848,11 @@ out_fail:
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
+static void reset_msg_rx_state(struct drm_dp_sideband_msg_rx *msg)
+{
+ memset(msg, 0, sizeof(*msg));
+}
+
static bool
drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
struct drm_dp_mst_branch **mstb)
@@ -3934,6 +3931,34 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
return true;
}
+static int get_msg_request_type(u8 data)
+{
+ return data & 0x7f;
+}
+
+static bool verify_rx_request_type(struct drm_dp_mst_topology_mgr *mgr,
+ const struct drm_dp_sideband_msg_tx *txmsg,
+ const struct drm_dp_sideband_msg_rx *rxmsg)
+{
+ const struct drm_dp_sideband_msg_hdr *hdr = &rxmsg->initial_hdr;
+ const struct drm_dp_mst_branch *mstb = txmsg->dst;
+ int tx_req_type = get_msg_request_type(txmsg->msg[0]);
+ int rx_req_type = get_msg_request_type(rxmsg->msg[0]);
+ char rad_str[64];
+
+ if (tx_req_type == rx_req_type)
+ return true;
+
+ drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, rad_str, sizeof(rad_str));
+ drm_dbg_kms(mgr->dev,
+ "Got unexpected MST reply, mstb: %p seqno: %d lct: %d rad: %s rx_req_type: %s (%02x) != tx_req_type: %s (%02x)\n",
+ mstb, hdr->seqno, mstb->lct, rad_str,
+ drm_dp_mst_req_type_str(rx_req_type), rx_req_type,
+ drm_dp_mst_req_type_str(tx_req_type), tx_req_type);
+
+ return false;
+}
+
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_tx *txmsg;
@@ -3949,9 +3974,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
/* find the message */
mutex_lock(&mgr->qlock);
+
txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
struct drm_dp_sideband_msg_tx, next);
- mutex_unlock(&mgr->qlock);
/* Were we actually expecting a response, and from this mstb? */
if (!txmsg || txmsg->dst != mstb) {
@@ -3960,6 +3985,15 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
hdr = &msg->initial_hdr;
drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n",
mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]);
+
+ mutex_unlock(&mgr->qlock);
+
+ goto out_clear_reply;
+ }
+
+ if (!verify_rx_request_type(mgr, txmsg, msg)) {
+ mutex_unlock(&mgr->qlock);
+
goto out_clear_reply;
}
@@ -3975,20 +4009,15 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
txmsg->reply.u.nak.nak_data);
}
- memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
- drm_dp_mst_topology_put_mstb(mstb);
-
- mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
list_del(&txmsg->next);
+
mutex_unlock(&mgr->qlock);
wake_up_all(&mgr->tx_waitq);
- return 0;
-
out_clear_reply:
- memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ reset_msg_rx_state(msg);
out:
if (mstb)
drm_dp_mst_topology_put_mstb(mstb);
@@ -4070,16 +4099,20 @@ static void drm_dp_mst_up_req_work(struct work_struct *work)
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_pending_up_req *up_req;
+ struct drm_dp_mst_branch *mst_primary;
+ int ret = 0;
if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
- goto out;
+ goto out_clear_reply;
if (!mgr->up_req_recv.have_eomt)
return 0;
up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
- if (!up_req)
- return -ENOMEM;
+ if (!up_req) {
+ ret = -ENOMEM;
+ goto out_clear_reply;
+ }
INIT_LIST_HEAD(&up_req->next);
@@ -4090,10 +4123,19 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n",
up_req->msg.req_type);
kfree(up_req);
- goto out;
+ goto out_clear_reply;
+ }
+
+ mutex_lock(&mgr->lock);
+ mst_primary = mgr->mst_primary;
+ if (!mst_primary || !drm_dp_mst_topology_try_get_mstb(mst_primary)) {
+ mutex_unlock(&mgr->lock);
+ kfree(up_req);
+ goto out_clear_reply;
}
+ mutex_unlock(&mgr->lock);
- drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
+ drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type,
false);
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
@@ -4110,13 +4152,13 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
conn_stat->peer_device_type);
mutex_lock(&mgr->probe_lock);
- handle_csn = mgr->mst_primary->link_address_sent;
+ handle_csn = mst_primary->link_address_sent;
mutex_unlock(&mgr->probe_lock);
if (!handle_csn) {
drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
kfree(up_req);
- goto out;
+ goto out_put_primary;
}
} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
const struct drm_dp_resource_status_notify *res_stat =
@@ -4133,9 +4175,22 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
mutex_unlock(&mgr->up_req_lock);
queue_work(system_long_wq, &mgr->up_req_work);
-out:
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
+out_put_primary:
+ drm_dp_mst_topology_put_mstb(mst_primary);
+out_clear_reply:
+ reset_msg_rx_state(&mgr->up_req_recv);
+ return ret;
+}
+
+static void update_msg_rx_state(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_lock(&mgr->lock);
+ if (mgr->reset_rx_state) {
+ mgr->reset_rx_state = false;
+ reset_msg_rx_state(&mgr->down_rep_recv);
+ reset_msg_rx_state(&mgr->up_req_recv);
+ }
+ mutex_unlock(&mgr->lock);
}
/**
@@ -4172,6 +4227,8 @@ int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u
*handled = true;
}
+ update_msg_rx_state(mgr);
+
if (esi[1] & DP_DOWN_REP_MSG_RDY) {
ret = drm_dp_mst_handle_down_rep(mgr);
*handled = true;
@@ -4680,61 +4737,6 @@ void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_
}
EXPORT_SYMBOL(drm_dp_mst_update_slots);
-static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
- int id, u8 start_slot, u8 num_slots)
-{
- u8 payload_alloc[3], status;
- int ret;
- int retries = 0;
-
- drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
- DP_PAYLOAD_TABLE_UPDATED);
-
- payload_alloc[0] = id;
- payload_alloc[1] = start_slot;
- payload_alloc[2] = num_slots;
-
- ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
- if (ret != 3) {
- drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret);
- goto fail;
- }
-
-retry:
- ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
- if (ret < 0) {
- drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret);
- goto fail;
- }
-
- if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
- retries++;
- if (retries < 20) {
- usleep_range(10000, 20000);
- goto retry;
- }
- drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n",
- status);
- ret = -EINVAL;
- goto fail;
- }
- ret = 0;
-fail:
- return ret;
-}
-
-static int do_get_act_status(struct drm_dp_aux *aux)
-{
- int ret;
- u8 status;
-
- ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
- if (ret < 0)
- return ret;
-
- return status;
-}
-
/**
* drm_dp_check_act_status() - Polls for ACT handled status.
* @mgr: manager to use
@@ -4752,28 +4754,9 @@ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
* There doesn't seem to be any recommended retry count or timeout in
* the MST specification. Since some hubs have been observed to take
* over 1 second to update their payload allocations under certain
- * conditions, we use a rather large timeout value.
+ * conditions, we use a rather large timeout value of 3 seconds.
*/
- const int timeout_ms = 3000;
- int ret, status;
-
- ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
- status & DP_PAYLOAD_ACT_HANDLED || status < 0,
- 200, timeout_ms * USEC_PER_MSEC);
- if (ret < 0 && status >= 0) {
- drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n",
- timeout_ms, status);
- return -EINVAL;
- } else if (status < 0) {
- /*
- * Failure here isn't unexpected - the hub may have
- * just been unplugged
- */
- drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status);
- return status;
- }
-
- return 0;
+ return drm_dp_dpcd_poll_act_handled(mgr->aux, 3000);
}
EXPORT_SYMBOL(drm_dp_check_act_status);
diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
index 48b2df120086..90fe07a89260 100644
--- a/drivers/gpu/drm/display/drm_dp_tunnel.c
+++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
@@ -1896,8 +1896,8 @@ static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr)
*
* Creates a DP tunnel manager for @dev.
*
- * Returns a pointer to the tunnel manager if created successfully or NULL in
- * case of an error.
+ * Returns a pointer to the tunnel manager if created successfully or error
+ * pointer in case of failure.
*/
struct drm_dp_tunnel_mgr *
drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
@@ -1907,7 +1907,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
if (!mgr)
- return NULL;
+ return ERR_PTR(-ENOMEM);
mgr->dev = dev;
init_waitqueue_head(&mgr->bw_req_queue);
@@ -1916,7 +1916,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
if (!mgr->groups) {
kfree(mgr);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
@@ -1927,7 +1927,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
if (!init_group(mgr, &mgr->groups[i])) {
destroy_mgr(mgr);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
mgr->group_count++;
diff --git a/drivers/gpu/drm/display/drm_hdmi_audio_helper.c b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
new file mode 100644
index 000000000000..05afc9f0bdd6
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2024 Linaro Ltd
+ */
+
+#include <linux/mutex.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/display/drm_hdmi_audio_helper.h>
+
+#include <sound/hdmi-codec.h>
+
+static int drm_connector_hdmi_audio_startup(struct device *dev, void *data)
+{
+ struct drm_connector *connector = data;
+ const struct drm_connector_hdmi_audio_funcs *funcs =
+ connector->hdmi_audio.funcs;
+
+ if (funcs->startup)
+ return funcs->startup(connector);
+
+ return 0;
+}
+
+static int drm_connector_hdmi_audio_prepare(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ struct drm_connector *connector = data;
+ const struct drm_connector_hdmi_audio_funcs *funcs =
+ connector->hdmi_audio.funcs;
+
+ return funcs->prepare(connector, fmt, hparms);
+}
+
+static void drm_connector_hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct drm_connector *connector = data;
+ const struct drm_connector_hdmi_audio_funcs *funcs =
+ connector->hdmi_audio.funcs;
+
+ return funcs->shutdown(connector);
+}
+
+static int drm_connector_hdmi_audio_mute_stream(struct device *dev, void *data,
+ bool enable, int direction)
+{
+ struct drm_connector *connector = data;
+ const struct drm_connector_hdmi_audio_funcs *funcs =
+ connector->hdmi_audio.funcs;
+
+ if (funcs->mute_stream)
+ return funcs->mute_stream(connector, enable, direction);
+
+ return -ENOTSUPP;
+}
+
+static int drm_connector_hdmi_audio_get_dai_id(struct snd_soc_component *comment,
+ struct device_node *endpoint,
+ void *data)
+{
+ struct drm_connector *connector = data;
+ struct of_endpoint of_ep;
+ int ret;
+
+ if (connector->hdmi_audio.dai_port < 0)
+ return -ENOTSUPP;
+
+ ret = of_graph_parse_endpoint(endpoint, &of_ep);
+ if (ret < 0)
+ return ret;
+
+ if (of_ep.port == connector->hdmi_audio.dai_port)
+ return 0;
+
+ return -EINVAL;
+}
+
+static int drm_connector_hdmi_audio_get_eld(struct device *dev, void *data,
+ uint8_t *buf, size_t len)
+{
+ struct drm_connector *connector = data;
+
+ mutex_lock(&connector->eld_mutex);
+ memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+ mutex_unlock(&connector->eld_mutex);
+
+ return 0;
+}
+
+static int drm_connector_hdmi_audio_hook_plugged_cb(struct device *dev,
+ void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct drm_connector *connector = data;
+
+ mutex_lock(&connector->hdmi_audio.lock);
+
+ connector->hdmi_audio.plugged_cb = fn;
+ connector->hdmi_audio.plugged_cb_dev = codec_dev;
+
+ fn(codec_dev, connector->hdmi_audio.last_state);
+
+ mutex_unlock(&connector->hdmi_audio.lock);
+
+ return 0;
+}
+
+void drm_connector_hdmi_audio_plugged_notify(struct drm_connector *connector,
+ bool plugged)
+{
+ mutex_lock(&connector->hdmi_audio.lock);
+
+ connector->hdmi_audio.last_state = plugged;
+
+ if (connector->hdmi_audio.plugged_cb &&
+ connector->hdmi_audio.plugged_cb_dev)
+ connector->hdmi_audio.plugged_cb(connector->hdmi_audio.plugged_cb_dev,
+ connector->hdmi_audio.last_state);
+
+ mutex_unlock(&connector->hdmi_audio.lock);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_audio_plugged_notify);
+
+static const struct hdmi_codec_ops drm_connector_hdmi_audio_ops = {
+ .audio_startup = drm_connector_hdmi_audio_startup,
+ .prepare = drm_connector_hdmi_audio_prepare,
+ .audio_shutdown = drm_connector_hdmi_audio_shutdown,
+ .mute_stream = drm_connector_hdmi_audio_mute_stream,
+ .get_eld = drm_connector_hdmi_audio_get_eld,
+ .get_dai_id = drm_connector_hdmi_audio_get_dai_id,
+ .hook_plugged_cb = drm_connector_hdmi_audio_hook_plugged_cb,
+};
+
+/**
+ * drm_connector_hdmi_audio_init - Initialize HDMI Codec device for the DRM connector
+ * @connector: A pointer to the connector to allocate codec for
+ * @hdmi_codec_dev: device to be used as a parent for the HDMI Codec
+ * @funcs: callbacks for this HDMI Codec
+ * @max_i2s_playback_channels: maximum number of playback I2S channels
+ * @spdif_playback: set if HDMI codec has S/PDIF playback port
+ * @dai_port: sound DAI port, -1 if it is not enabled
+ *
+ * Create a HDMI codec device to be used with the specified connector.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_hdmi_audio_init(struct drm_connector *connector,
+ struct device *hdmi_codec_dev,
+ const struct drm_connector_hdmi_audio_funcs *funcs,
+ unsigned int max_i2s_playback_channels,
+ bool spdif_playback,
+ int dai_port)
+{
+ struct hdmi_codec_pdata codec_pdata = {
+ .ops = &drm_connector_hdmi_audio_ops,
+ .max_i2s_channels = max_i2s_playback_channels,
+ .i2s = !!max_i2s_playback_channels,
+ .spdif = spdif_playback,
+ .no_i2s_capture = true,
+ .no_spdif_capture = true,
+ .data = connector,
+ };
+ struct platform_device *pdev;
+
+ if (!funcs ||
+ !funcs->prepare ||
+ !funcs->shutdown)
+ return -EINVAL;
+
+ connector->hdmi_audio.funcs = funcs;
+ connector->hdmi_audio.dai_port = dai_port;
+
+ pdev = platform_device_register_data(hdmi_codec_dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_pdata, sizeof(codec_pdata));
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ connector->hdmi_audio.codec_pdev = pdev;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_hdmi_audio_init);
diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
index feb7a3a75981..9b2ee2385634 100644
--- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
@@ -5,6 +5,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
+#include <drm/display/drm_hdmi_audio_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
@@ -347,6 +348,8 @@ static int hdmi_generate_avi_infoframe(const struct drm_connector *connector,
is_limited_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL;
int ret;
+ infoframe->set = false;
+
ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector, mode);
if (ret)
return ret;
@@ -376,6 +379,8 @@ static int hdmi_generate_spd_infoframe(const struct drm_connector *connector,
&infoframe->data.spd;
int ret;
+ infoframe->set = false;
+
ret = hdmi_spd_infoframe_init(frame,
connector->hdmi.vendor,
connector->hdmi.product);
@@ -398,6 +403,8 @@ static int hdmi_generate_hdr_infoframe(const struct drm_connector *connector,
&infoframe->data.drm;
int ret;
+ infoframe->set = false;
+
if (connector->max_bpc < 10)
return 0;
@@ -425,6 +432,8 @@ static int hdmi_generate_hdmi_vendor_infoframe(const struct drm_connector *conne
&infoframe->data.vendor.hdmi;
int ret;
+ infoframe->set = false;
+
if (!info->has_hdmi_infoframe)
return 0;
@@ -494,6 +503,9 @@ int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector,
connector_state_get_mode(new_conn_state);
int ret;
+ if (!new_conn_state->crtc || !new_conn_state->best_encoder)
+ return 0;
+
new_conn_state->hdmi.is_limited_range = hdmi_is_limited_range(connector, new_conn_state);
ret = hdmi_compute_config(connector, new_conn_state, mode);
@@ -521,6 +533,27 @@ int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_check);
+/**
+ * drm_hdmi_connector_mode_valid() - Check if mode is valid for HDMI connector
+ * @connector: DRM connector to validate the mode
+ * @mode: Display mode to validate
+ *
+ * Generic .mode_valid implementation for HDMI connectors.
+ */
+enum drm_mode_status
+drm_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ unsigned long long clock;
+
+ clock = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB);
+ if (!clock)
+ return MODE_ERROR;
+
+ return hdmi_clock_valid(connector, mode, clock);
+}
+EXPORT_SYMBOL(drm_hdmi_connector_mode_valid);
+
static int clear_device_infoframe(struct drm_connector *connector,
enum hdmi_infoframe_type type)
{
@@ -748,3 +781,61 @@ drm_atomic_helper_connector_hdmi_clear_audio_infoframe(struct drm_connector *con
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_clear_audio_infoframe);
+
+static void
+drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ const struct drm_edid *drm_edid;
+
+ if (status == connector_status_disconnected) {
+ // TODO: also handle CEC and scramber, HDMI sink disconnected.
+ drm_connector_hdmi_audio_plugged_notify(connector, false);
+ drm_edid_connector_update(connector, NULL);
+ return;
+ }
+
+ if (connector->hdmi.funcs->read_edid)
+ drm_edid = connector->hdmi.funcs->read_edid(connector);
+ else
+ drm_edid = drm_edid_read(connector);
+
+ drm_edid_connector_update(connector, drm_edid);
+
+ drm_edid_free(drm_edid);
+
+ if (status == connector_status_connected) {
+ // TODO: also handle CEC and scramber, HDMI sink is now connected.
+ drm_connector_hdmi_audio_plugged_notify(connector, true);
+ }
+}
+
+/**
+ * drm_atomic_helper_connector_hdmi_hotplug - Handle the hotplug event for the HDMI connector
+ * @connector: A pointer to the HDMI connector
+ * @status: Connection status
+ *
+ * This function should be called as a part of the .detect() / .detect_ctx()
+ * callbacks, updating the HDMI-specific connector's data.
+ */
+void drm_atomic_helper_connector_hdmi_hotplug(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ drm_atomic_helper_connector_hdmi_update(connector, status);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_hotplug);
+
+/**
+ * drm_atomic_helper_connector_hdmi_force - HDMI Connector implementation of the force callback
+ * @connector: A pointer to the HDMI connector
+ *
+ * This function implements the .force() callback for the HDMI connectors. It
+ * can either be used directly as the callback or should be called from within
+ * the .force() callback implementation to maintain the HDMI-specific
+ * connector's data.
+ */
+void drm_atomic_helper_connector_hdmi_force(struct drm_connector *connector)
+{
+ drm_atomic_helper_connector_hdmi_update(connector, connector->status);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_force);
diff --git a/drivers/gpu/drm/drm_aperture.c b/drivers/gpu/drm/drm_aperture.c
deleted file mode 100644
index 5729f3bb4398..000000000000
--- a/drivers/gpu/drm/drm_aperture.c
+++ /dev/null
@@ -1,192 +0,0 @@
-// SPDX-License-Identifier: MIT
-
-#include <linux/aperture.h>
-#include <linux/platform_device.h>
-
-#include <drm/drm_aperture.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_print.h>
-
-/**
- * DOC: overview
- *
- * A graphics device might be supported by different drivers, but only one
- * driver can be active at any given time. Many systems load a generic
- * graphics drivers, such as EFI-GOP or VESA, early during the boot process.
- * During later boot stages, they replace the generic driver with a dedicated,
- * hardware-specific driver. To take over the device the dedicated driver
- * first has to remove the generic driver. DRM aperture functions manage
- * ownership of DRM framebuffer memory and hand-over between drivers.
- *
- * DRM drivers should call drm_aperture_remove_conflicting_framebuffers()
- * at the top of their probe function. The function removes any generic
- * driver that is currently associated with the given framebuffer memory.
- * If the framebuffer is located at PCI BAR 0, the rsp code looks as in the
- * example given below.
- *
- * .. code-block:: c
- *
- * static const struct drm_driver example_driver = {
- * ...
- * };
- *
- * static int remove_conflicting_framebuffers(struct pci_dev *pdev)
- * {
- * resource_size_t base, size;
- * int ret;
- *
- * base = pci_resource_start(pdev, 0);
- * size = pci_resource_len(pdev, 0);
- *
- * return drm_aperture_remove_conflicting_framebuffers(base, size,
- * &example_driver);
- * }
- *
- * static int probe(struct pci_dev *pdev)
- * {
- * int ret;
- *
- * // Remove any generic drivers...
- * ret = remove_conflicting_framebuffers(pdev);
- * if (ret)
- * return ret;
- *
- * // ... and initialize the hardware.
- * ...
- *
- * drm_dev_register();
- *
- * return 0;
- * }
- *
- * PCI device drivers should call
- * drm_aperture_remove_conflicting_pci_framebuffers() and let it detect the
- * framebuffer apertures automatically. Device drivers without knowledge of
- * the framebuffer's location shall call drm_aperture_remove_framebuffers(),
- * which removes all drivers for known framebuffer.
- *
- * Drivers that are susceptible to being removed by other drivers, such as
- * generic EFI or VESA drivers, have to register themselves as owners of their
- * given framebuffer memory. Ownership of the framebuffer memory is achieved
- * by calling devm_aperture_acquire_from_firmware(). On success, the driver
- * is the owner of the framebuffer range. The function fails if the
- * framebuffer is already owned by another driver. See below for an example.
- *
- * .. code-block:: c
- *
- * static int acquire_framebuffers(struct drm_device *dev, struct platform_device *pdev)
- * {
- * resource_size_t base, size;
- *
- * mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- * if (!mem)
- * return -EINVAL;
- * base = mem->start;
- * size = resource_size(mem);
- *
- * return devm_acquire_aperture_from_firmware(dev, base, size);
- * }
- *
- * static int probe(struct platform_device *pdev)
- * {
- * struct drm_device *dev;
- * int ret;
- *
- * // ... Initialize the device...
- * dev = devm_drm_dev_alloc();
- * ...
- *
- * // ... and acquire ownership of the framebuffer.
- * ret = acquire_framebuffers(dev, pdev);
- * if (ret)
- * return ret;
- *
- * drm_dev_register(dev, 0);
- *
- * return 0;
- * }
- *
- * The generic driver is now subject to forced removal by other drivers. This
- * only works for platform drivers that support hot unplug.
- * When a driver calls drm_aperture_remove_conflicting_framebuffers() et al.
- * for the registered framebuffer range, the aperture helpers call
- * platform_device_unregister() and the generic driver unloads itself. It
- * may not access the device's registers, framebuffer memory, ROM, etc
- * afterwards.
- */
-
-/**
- * devm_aperture_acquire_from_firmware - Acquires ownership of a firmware framebuffer
- * on behalf of a DRM driver.
- * @dev: the DRM device to own the framebuffer memory
- * @base: the framebuffer's byte offset in physical memory
- * @size: the framebuffer size in bytes
- *
- * Installs the given device as the new owner of the framebuffer. The function
- * expects the framebuffer to be provided by a platform device that has been
- * set up by firmware. Firmware can be any generic interface, such as EFI,
- * VESA, VGA, etc. If the native hardware driver takes over ownership of the
- * framebuffer range, the firmware state gets lost. Aperture helpers will then
- * unregister the platform device automatically. Acquired apertures are
- * released automatically if the underlying device goes away.
- *
- * The function fails if the framebuffer range, or parts of it, is currently
- * owned by another driver. To evict current owners, callers should use
- * drm_aperture_remove_conflicting_framebuffers() et al. before calling this
- * function. The function also fails if the given device is not a platform
- * device.
- *
- * Returns:
- * 0 on success, or a negative errno value otherwise.
- */
-int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base,
- resource_size_t size)
-{
- struct platform_device *pdev;
-
- if (drm_WARN_ON(dev, !dev_is_platform(dev->dev)))
- return -EINVAL;
-
- pdev = to_platform_device(dev->dev);
-
- return devm_aperture_acquire_for_platform_device(pdev, base, size);
-}
-EXPORT_SYMBOL(devm_aperture_acquire_from_firmware);
-
-/**
- * drm_aperture_remove_conflicting_framebuffers - remove existing framebuffers in the given range
- * @base: the aperture's base address in physical memory
- * @size: aperture size in bytes
- * @req_driver: requesting DRM driver
- *
- * This function removes graphics device drivers which use the memory range described by
- * @base and @size.
- *
- * Returns:
- * 0 on success, or a negative errno code otherwise
- */
-int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size,
- const struct drm_driver *req_driver)
-{
- return aperture_remove_conflicting_devices(base, size, req_driver->name);
-}
-EXPORT_SYMBOL(drm_aperture_remove_conflicting_framebuffers);
-
-/**
- * drm_aperture_remove_conflicting_pci_framebuffers - remove existing framebuffers for PCI devices
- * @pdev: PCI device
- * @req_driver: requesting DRM driver
- *
- * This function removes graphics device drivers using the memory range configured
- * for any of @pdev's memory bars. The function assumes that a PCI device with
- * shadowed ROM drives a primary display and so kicks out vga16fb.
- *
- * Returns:
- * 0 on success, or a negative errno code otherwise
- */
-int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
- const struct drm_driver *req_driver)
-{
- return aperture_remove_conflicting_pci_devices(pdev, req_driver->name);
-}
-EXPORT_SYMBOL(drm_aperture_remove_conflicting_pci_framebuffers);
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 0fc99da93afe..9ea2611770f4 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1132,6 +1132,8 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
+ drm_printf(p, "\tinterlace_allowed=%d\n", connector->interlace_allowed);
+ drm_printf(p, "\tycbcr_420_allowed=%d\n", connector->ycbcr_420_allowed);
drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc);
drm_printf(p, "\tcolorspace=%s\n", drm_get_colorspace_name(state->colorspace));
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 43cdf39019a4..5186d2114a50 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3015,7 +3015,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
bool stall)
{
int i, ret;
- unsigned long flags;
+ unsigned long flags = 0;
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
struct drm_crtc *crtc;
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index c6af46dd02bf..241a384ebce3 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -207,6 +207,10 @@ void drm_bridge_add(struct drm_bridge *bridge)
{
mutex_init(&bridge->hpd_mutex);
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI)
+ bridge->ycbcr_420_allowed = !!(bridge->supported_formats &
+ BIT(HDMI_COLORSPACE_YUV420));
+
mutex_lock(&bridge_lock);
list_add_tail(&bridge->list, &bridge_list);
mutex_unlock(&bridge_lock);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index bfedcbf516db..549b28a5918c 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -10,7 +10,6 @@
#include <linux/slab.h>
#include <drm/drm_client.h>
-#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
@@ -172,99 +171,6 @@ void drm_client_release(struct drm_client_dev *client)
}
EXPORT_SYMBOL(drm_client_release);
-/**
- * drm_client_dev_unregister - Unregister clients
- * @dev: DRM device
- *
- * This function releases all clients by calling each client's
- * &drm_client_funcs.unregister callback. The callback function
- * is responsibe for releaseing all resources including the client
- * itself.
- *
- * The helper drm_dev_unregister() calls this function. Drivers
- * that use it don't need to call this function themselves.
- */
-void drm_client_dev_unregister(struct drm_device *dev)
-{
- struct drm_client_dev *client, *tmp;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- mutex_lock(&dev->clientlist_mutex);
- list_for_each_entry_safe(client, tmp, &dev->clientlist, list) {
- list_del(&client->list);
- if (client->funcs && client->funcs->unregister) {
- client->funcs->unregister(client);
- } else {
- drm_client_release(client);
- kfree(client);
- }
- }
- mutex_unlock(&dev->clientlist_mutex);
-}
-EXPORT_SYMBOL(drm_client_dev_unregister);
-
-/**
- * drm_client_dev_hotplug - Send hotplug event to clients
- * @dev: DRM device
- *
- * This function calls the &drm_client_funcs.hotplug callback on the attached clients.
- *
- * drm_kms_helper_hotplug_event() calls this function, so drivers that use it
- * don't need to call this function themselves.
- */
-void drm_client_dev_hotplug(struct drm_device *dev)
-{
- struct drm_client_dev *client;
- int ret;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- if (!dev->mode_config.num_connector) {
- drm_dbg_kms(dev, "No connectors found, will not send hotplug events!\n");
- return;
- }
-
- mutex_lock(&dev->clientlist_mutex);
- list_for_each_entry(client, &dev->clientlist, list) {
- if (!client->funcs || !client->funcs->hotplug)
- continue;
-
- if (client->hotplug_failed)
- continue;
-
- ret = client->funcs->hotplug(client);
- drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
- if (ret)
- client->hotplug_failed = true;
- }
- mutex_unlock(&dev->clientlist_mutex);
-}
-EXPORT_SYMBOL(drm_client_dev_hotplug);
-
-void drm_client_dev_restore(struct drm_device *dev)
-{
- struct drm_client_dev *client;
- int ret;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- mutex_lock(&dev->clientlist_mutex);
- list_for_each_entry(client, &dev->clientlist, list) {
- if (!client->funcs || !client->funcs->restore)
- continue;
-
- ret = client->funcs->restore(client);
- drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
- if (!ret) /* The first one to return zero gets the privilege to restore */
- break;
- }
- mutex_unlock(&dev->clientlist_mutex);
-}
-
static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
if (buffer->gem) {
@@ -584,30 +490,3 @@ int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_re
0, 0, NULL, 0);
}
EXPORT_SYMBOL(drm_client_framebuffer_flush);
-
-#ifdef CONFIG_DEBUG_FS
-static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data)
-{
- struct drm_debugfs_entry *entry = m->private;
- struct drm_device *dev = entry->dev;
- struct drm_printer p = drm_seq_file_printer(m);
- struct drm_client_dev *client;
-
- mutex_lock(&dev->clientlist_mutex);
- list_for_each_entry(client, &dev->clientlist, list)
- drm_printf(&p, "%s\n", client->name);
- mutex_unlock(&dev->clientlist_mutex);
-
- return 0;
-}
-
-static const struct drm_debugfs_info drm_client_debugfs_list[] = {
- { "internal_clients", drm_client_debugfs_internal_clients, 0 },
-};
-
-void drm_client_debugfs_init(struct drm_device *dev)
-{
- drm_debugfs_add_files(dev, drm_client_debugfs_list,
- ARRAY_SIZE(drm_client_debugfs_list));
-}
-#endif
diff --git a/drivers/gpu/drm/drm_client_event.c b/drivers/gpu/drm/drm_client_event.c
new file mode 100644
index 000000000000..e303de564485
--- /dev/null
+++ b/drivers/gpu/drm/drm_client_event.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/*
+ * Copyright 2018 Noralf Trønnes
+ */
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+
+#include <drm/drm_client.h>
+#include <drm/drm_client_event.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
+
+#include "drm_internal.h"
+
+/**
+ * drm_client_dev_unregister - Unregister clients
+ * @dev: DRM device
+ *
+ * This function releases all clients by calling each client's
+ * &drm_client_funcs.unregister callback. The callback function
+ * is responsibe for releaseing all resources including the client
+ * itself.
+ *
+ * The helper drm_dev_unregister() calls this function. Drivers
+ * that use it don't need to call this function themselves.
+ */
+void drm_client_dev_unregister(struct drm_device *dev)
+{
+ struct drm_client_dev *client, *tmp;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_for_each_entry_safe(client, tmp, &dev->clientlist, list) {
+ list_del(&client->list);
+ if (client->funcs && client->funcs->unregister) {
+ client->funcs->unregister(client);
+ } else {
+ drm_client_release(client);
+ kfree(client);
+ }
+ }
+ mutex_unlock(&dev->clientlist_mutex);
+}
+EXPORT_SYMBOL(drm_client_dev_unregister);
+
+/**
+ * drm_client_dev_hotplug - Send hotplug event to clients
+ * @dev: DRM device
+ *
+ * This function calls the &drm_client_funcs.hotplug callback on the attached clients.
+ *
+ * drm_kms_helper_hotplug_event() calls this function, so drivers that use it
+ * don't need to call this function themselves.
+ */
+void drm_client_dev_hotplug(struct drm_device *dev)
+{
+ struct drm_client_dev *client;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ if (!dev->mode_config.num_connector) {
+ drm_dbg_kms(dev, "No connectors found, will not send hotplug events!\n");
+ return;
+ }
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_for_each_entry(client, &dev->clientlist, list) {
+ if (!client->funcs || !client->funcs->hotplug)
+ continue;
+
+ if (client->hotplug_failed)
+ continue;
+
+ ret = client->funcs->hotplug(client);
+ drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
+ if (ret)
+ client->hotplug_failed = true;
+ }
+ mutex_unlock(&dev->clientlist_mutex);
+}
+EXPORT_SYMBOL(drm_client_dev_hotplug);
+
+void drm_client_dev_restore(struct drm_device *dev)
+{
+ struct drm_client_dev *client;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_for_each_entry(client, &dev->clientlist, list) {
+ if (!client->funcs || !client->funcs->restore)
+ continue;
+
+ ret = client->funcs->restore(client);
+ drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
+ if (!ret) /* The first one to return zero gets the privilege to restore */
+ break;
+ }
+ mutex_unlock(&dev->clientlist_mutex);
+}
+
+static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_lock)
+{
+ struct drm_device *dev = client->dev;
+ int ret = 0;
+
+ if (drm_WARN_ON_ONCE(dev, client->suspended))
+ return 0;
+
+ if (client->funcs && client->funcs->suspend)
+ ret = client->funcs->suspend(client, holds_console_lock);
+ drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
+
+ client->suspended = true;
+
+ return ret;
+}
+
+void drm_client_dev_suspend(struct drm_device *dev, bool holds_console_lock)
+{
+ struct drm_client_dev *client;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_for_each_entry(client, &dev->clientlist, list) {
+ if (!client->suspended)
+ drm_client_suspend(client, holds_console_lock);
+ }
+ mutex_unlock(&dev->clientlist_mutex);
+}
+EXPORT_SYMBOL(drm_client_dev_suspend);
+
+static int drm_client_resume(struct drm_client_dev *client, bool holds_console_lock)
+{
+ struct drm_device *dev = client->dev;
+ int ret = 0;
+
+ if (drm_WARN_ON_ONCE(dev, !client->suspended))
+ return 0;
+
+ if (client->funcs && client->funcs->resume)
+ ret = client->funcs->resume(client, holds_console_lock);
+ drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
+
+ client->suspended = false;
+
+ return ret;
+}
+
+void drm_client_dev_resume(struct drm_device *dev, bool holds_console_lock)
+{
+ struct drm_client_dev *client;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_for_each_entry(client, &dev->clientlist, list) {
+ if (client->suspended)
+ drm_client_resume(client, holds_console_lock);
+ }
+ mutex_unlock(&dev->clientlist_mutex);
+}
+EXPORT_SYMBOL(drm_client_dev_resume);
+
+#ifdef CONFIG_DEBUG_FS
+static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data)
+{
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_client_dev *client;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_for_each_entry(client, &dev->clientlist, list)
+ drm_printf(&p, "%s\n", client->name);
+ mutex_unlock(&dev->clientlist_mutex);
+
+ return 0;
+}
+
+static const struct drm_debugfs_info drm_client_debugfs_list[] = {
+ { "internal_clients", drm_client_debugfs_internal_clients, 0 },
+};
+
+void drm_client_debugfs_init(struct drm_device *dev)
+{
+ drm_debugfs_add_files(dev, drm_client_debugfs_list,
+ ARRAY_SIZE(drm_client_debugfs_list));
+}
+#endif
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index cee5eafbfb81..aca442c25209 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -145,7 +145,7 @@ drm_connector_fallback_non_tiled_mode(struct drm_connector *connector)
}
static struct drm_display_mode *
-drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int height)
+drm_connector_preferred_mode(struct drm_connector *connector, int width, int height)
{
struct drm_display_mode *mode;
@@ -159,6 +159,12 @@ drm_connector_has_preferred_mode(struct drm_connector *connector, int width, int
return NULL;
}
+static struct drm_display_mode *drm_connector_first_mode(struct drm_connector *connector)
+{
+ return list_first_entry_or_null(&connector->modes,
+ struct drm_display_mode, head);
+}
+
static struct drm_display_mode *drm_connector_pick_cmdline_mode(struct drm_connector *connector)
{
struct drm_cmdline_mode *cmdline_mode;
@@ -331,7 +337,7 @@ static bool drm_client_target_cloned(struct drm_device *dev,
if (!modes[i])
can_clone = false;
}
- kfree(dmt_mode);
+ drm_mode_destroy(dev, dmt_mode);
if (can_clone) {
drm_dbg_kms(dev, "can clone using 1024x768\n");
@@ -441,13 +447,11 @@ retry:
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for preferred mode, tile %d\n",
connector->base.id, connector->name,
connector->tile_group ? connector->tile_group->id : 0);
- modes[i] = drm_connector_has_preferred_mode(connector, width, height);
+ modes[i] = drm_connector_preferred_mode(connector, width, height);
}
/* No preferred modes, pick one off the list */
- if (!modes[i] && !list_empty(&connector->modes)) {
- list_for_each_entry(modes[i], &connector->modes, head)
- break;
- }
+ if (!modes[i])
+ modes[i] = drm_connector_first_mode(connector);
/*
* In case of tiled mode if all tiles not present fallback to
* first available non tiled mode.
@@ -531,7 +535,7 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client,
my_score++;
if (connector->cmdline_mode.specified)
my_score++;
- if (drm_connector_has_preferred_mode(connector, width, height))
+ if (drm_connector_preferred_mode(connector, width, height))
my_score++;
/*
@@ -686,16 +690,14 @@ retry:
"[CONNECTOR:%d:%s] looking for preferred mode, has tile: %s\n",
connector->base.id, connector->name,
str_yes_no(connector->has_tile));
- modes[i] = drm_connector_has_preferred_mode(connector, width, height);
+ modes[i] = drm_connector_preferred_mode(connector, width, height);
}
/* No preferred mode marked by the EDID? Are there any modes? */
if (!modes[i] && !list_empty(&connector->modes)) {
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] using first listed mode\n",
connector->base.id, connector->name);
- modes[i] = list_first_entry(&connector->modes,
- struct drm_display_mode,
- head);
+ modes[i] = drm_connector_first_mode(connector);
}
/* last resort: use current mode */
@@ -741,6 +743,15 @@ retry:
if ((conn_configured & mask) != mask && conn_configured != conn_seq)
goto retry;
+ for (i = 0; i < count; i++) {
+ struct drm_connector *connector = connectors[i];
+
+ if (connector->has_tile)
+ drm_client_get_tile_offsets(dev, connectors, connector_count,
+ modes, offsets, i,
+ connector->tile_h_loc, connector->tile_v_loc);
+ }
+
/*
* If the BIOS didn't enable everything it could, fall back to have the
* same user experiencing of lighting up as much as possible like the
@@ -878,7 +889,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
break;
}
- kfree(modeset->mode);
+ drm_mode_destroy(dev, modeset->mode);
modeset->mode = drm_mode_duplicate(dev, mode);
if (!modeset->mode) {
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index fc35f47e2849..5f24d6b41cc6 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -33,6 +33,7 @@
#include <drm/drm_sysfs.h>
#include <drm/drm_utils.h>
+#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/uaccess.h>
@@ -218,11 +219,11 @@ void drm_connector_free_work_fn(struct work_struct *work)
}
}
-static int __drm_connector_init(struct drm_device *dev,
- struct drm_connector *connector,
- const struct drm_connector_funcs *funcs,
- int connector_type,
- struct i2c_adapter *ddc)
+static int drm_connector_init_only(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
@@ -273,12 +274,15 @@ static int __drm_connector_init(struct drm_device *dev,
/* provide ddc symlink in sysfs */
connector->ddc = ddc;
+ INIT_LIST_HEAD(&connector->head);
INIT_LIST_HEAD(&connector->global_connector_list_entry);
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
+ mutex_init(&connector->eld_mutex);
mutex_init(&connector->edid_override_mutex);
mutex_init(&connector->hdmi.infoframes.lock);
+ mutex_init(&connector->hdmi_audio.lock);
connector->edid_blob_ptr = NULL;
connector->epoch_counter = 0;
connector->tile_blob_ptr = NULL;
@@ -288,14 +292,6 @@ static int __drm_connector_init(struct drm_device *dev,
drm_connector_get_cmdline_mode(connector);
- /* We should add connectors at the end to avoid upsetting the connector
- * index too much.
- */
- spin_lock_irq(&config->connector_list_lock);
- list_add_tail(&connector->head, &config->connector_list);
- config->num_connector++;
- spin_unlock_irq(&config->connector_list_lock);
-
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL &&
connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
drm_connector_attach_edid_property(connector);
@@ -332,6 +328,54 @@ out_put:
return ret;
}
+static void drm_connector_add(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (drm_WARN_ON(dev, !list_empty(&connector->head)))
+ return;
+
+ spin_lock_irq(&config->connector_list_lock);
+ list_add_tail(&connector->head, &config->connector_list);
+ config->num_connector++;
+ spin_unlock_irq(&config->connector_list_lock);
+}
+
+static void drm_connector_remove(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ /*
+ * For dynamic connectors drm_connector_cleanup() can call this function
+ * before the connector is registered and added to the list.
+ */
+ if (list_empty(&connector->head))
+ return;
+
+ spin_lock_irq(&dev->mode_config.connector_list_lock);
+ list_del_init(&connector->head);
+ dev->mode_config.num_connector--;
+ spin_unlock_irq(&dev->mode_config.connector_list_lock);
+}
+
+static int drm_connector_init_and_add(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc)
+{
+ int ret;
+
+ ret = drm_connector_init_only(dev, connector, funcs, connector_type, ddc);
+ if (ret)
+ return ret;
+
+ drm_connector_add(connector);
+
+ return 0;
+}
+
/**
* drm_connector_init - Init a preallocated connector
* @dev: DRM device
@@ -361,11 +405,52 @@ int drm_connector_init(struct drm_device *dev,
if (drm_WARN_ON(dev, !(funcs && funcs->destroy)))
return -EINVAL;
- return __drm_connector_init(dev, connector, funcs, connector_type, NULL);
+ return drm_connector_init_and_add(dev, connector, funcs, connector_type, NULL);
}
EXPORT_SYMBOL(drm_connector_init);
/**
+ * drm_connector_dynamic_init - Init a preallocated dynamic connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @connector_type: user visible type of the connector
+ * @ddc: pointer to the associated ddc adapter
+ *
+ * Initialises a preallocated dynamic connector. Connectors should be
+ * subclassed as part of driver connector objects. The connector
+ * structure should not be allocated with devm_kzalloc().
+ *
+ * Drivers should call this for dynamic connectors which can be hotplugged
+ * after drm_dev_register() has been called already, e.g. DP MST connectors.
+ * For all other - static - connectors, drivers should call one of the
+ * drm_connector_init*()/drmm_connector_init*() functions.
+ *
+ * After calling this function the drivers must call
+ * drm_connector_dynamic_register().
+ *
+ * To remove the connector the driver must call drm_connector_unregister()
+ * followed by drm_connector_put(). Putting the last reference will call the
+ * driver's &drm_connector_funcs.destroy hook, which in turn must call
+ * drm_connector_cleanup() and free the connector structure.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_dynamic_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc)
+{
+ if (drm_WARN_ON(dev, !(funcs && funcs->destroy)))
+ return -EINVAL;
+
+ return drm_connector_init_only(dev, connector, funcs, connector_type, ddc);
+}
+EXPORT_SYMBOL(drm_connector_dynamic_init);
+
+/**
* drm_connector_init_with_ddc - Init a preallocated connector
* @dev: DRM device
* @connector: the connector to init
@@ -398,7 +483,7 @@ int drm_connector_init_with_ddc(struct drm_device *dev,
if (drm_WARN_ON(dev, !(funcs && funcs->destroy)))
return -EINVAL;
- return __drm_connector_init(dev, connector, funcs, connector_type, ddc);
+ return drm_connector_init_and_add(dev, connector, funcs, connector_type, ddc);
}
EXPORT_SYMBOL(drm_connector_init_with_ddc);
@@ -442,7 +527,7 @@ int drmm_connector_init(struct drm_device *dev,
if (drm_WARN_ON(dev, funcs && funcs->destroy))
return -EINVAL;
- ret = __drm_connector_init(dev, connector, funcs, connector_type, ddc);
+ ret = drm_connector_init_and_add(dev, connector, funcs, connector_type, ddc);
if (ret)
return ret;
@@ -507,6 +592,9 @@ int drmm_connector_hdmi_init(struct drm_device *dev,
if (!supported_formats || !(supported_formats & BIT(HDMI_COLORSPACE_RGB)))
return -EINVAL;
+ if (connector->ycbcr_420_allowed != !!(supported_formats & BIT(HDMI_COLORSPACE_YUV420)))
+ return -EINVAL;
+
if (!(max_bpc == 8 || max_bpc == 10 || max_bpc == 12))
return -EINVAL;
@@ -631,6 +719,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
DRM_CONNECTOR_REGISTERED))
drm_connector_unregister(connector);
+ platform_device_unregister(connector->hdmi_audio.codec_pdev);
+
if (connector->privacy_screen) {
drm_privacy_screen_put(connector->privacy_screen);
connector->privacy_screen = NULL;
@@ -659,16 +749,15 @@ void drm_connector_cleanup(struct drm_connector *connector)
connector->name = NULL;
fwnode_handle_put(connector->fwnode);
connector->fwnode = NULL;
- spin_lock_irq(&dev->mode_config.connector_list_lock);
- list_del(&connector->head);
- dev->mode_config.num_connector--;
- spin_unlock_irq(&dev->mode_config.connector_list_lock);
+
+ drm_connector_remove(connector);
WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
if (connector->state && connector->funcs->atomic_destroy_state)
connector->funcs->atomic_destroy_state(connector,
connector->state);
+ mutex_destroy(&connector->hdmi_audio.lock);
mutex_destroy(&connector->hdmi.infoframes.lock);
mutex_destroy(&connector->mutex);
@@ -683,14 +772,17 @@ EXPORT_SYMBOL(drm_connector_cleanup);
* drm_connector_register - register a connector
* @connector: the connector to register
*
- * Register userspace interfaces for a connector. Only call this for connectors
- * which can be hotplugged after drm_dev_register() has been called already,
- * e.g. DP MST connectors. All other connectors will be registered automatically
- * when calling drm_dev_register().
+ * Register userspace interfaces for a connector. Drivers shouldn't call this
+ * function. Static connectors will be registered automatically by DRM core
+ * from drm_dev_register(), dynamic connectors (MST) should be registered by
+ * drivers calling drm_connector_dynamic_register().
*
* When the connector is no longer available, callers must call
* drm_connector_unregister().
*
+ * Note: Existing uses of this function in drivers should be a nop already and
+ * are scheduled to be removed.
+ *
* Returns:
* Zero on success, error code on failure.
*/
@@ -750,12 +842,43 @@ unlock:
EXPORT_SYMBOL(drm_connector_register);
/**
+ * drm_connector_dynamic_register - register a dynamic connector
+ * @connector: the connector to register
+ *
+ * Register userspace interfaces for a connector. Only call this for connectors
+ * initialized by calling drm_connector_dynamic_init(). All other connectors
+ * will be registered automatically when calling drm_dev_register().
+ *
+ * When the connector is no longer available the driver must call
+ * drm_connector_unregister().
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_dynamic_register(struct drm_connector *connector)
+{
+ /* Was the connector inited already? */
+ if (WARN_ON(!(connector->funcs && connector->funcs->destroy)))
+ return -EINVAL;
+
+ drm_connector_add(connector);
+
+ return drm_connector_register(connector);
+}
+EXPORT_SYMBOL(drm_connector_dynamic_register);
+
+/**
* drm_connector_unregister - unregister a connector
* @connector: the connector to unregister
*
- * Unregister userspace interfaces for a connector. Only call this for
- * connectors which have been registered explicitly by calling
- * drm_connector_register().
+ * Unregister userspace interfaces for a connector. Drivers should call this
+ * for dynamic connectors (MST) only, which were registered explicitly by
+ * calling drm_connector_dynamic_register(). All other - static - connectors
+ * will be unregistered automatically by DRM core and drivers shouldn't call
+ * this function for those.
+ *
+ * Note: Existing uses of this function in drivers for static connectors
+ * should be a nop already and are scheduled to be removed.
*/
void drm_connector_unregister(struct drm_connector *connector)
{
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 9d3e6dd68810..536409a35df4 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -32,7 +32,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_auth.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_client.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
@@ -78,12 +77,14 @@ static int drm_clients_info(struct seq_file *m, void *data)
kuid_t uid;
seq_printf(m,
- "%20s %5s %3s master a %5s %10s\n",
+ "%20s %5s %3s master a %5s %10s %*s\n",
"command",
"tgid",
"dev",
"uid",
- "magic");
+ "magic",
+ DRM_CLIENT_NAME_MAX_LEN,
+ "name");
/* dev->filelist is sorted youngest first, but we want to present
* oldest first (i.e. kernel, servers, clients), so walk backwardss.
@@ -94,19 +95,23 @@ static int drm_clients_info(struct seq_file *m, void *data)
struct task_struct *task;
struct pid *pid;
+ mutex_lock(&priv->client_name_lock);
rcu_read_lock(); /* Locks priv->pid and pid_task()->comm! */
pid = rcu_dereference(priv->pid);
task = pid_task(pid, PIDTYPE_TGID);
uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
- seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n",
+ seq_printf(m, "%20s %5d %3d %c %c %5d %10u %*s\n",
task ? task->comm : "<unknown>",
pid_vnr(pid),
priv->minor->index,
is_current_master ? 'y' : 'n',
priv->authenticated ? 'y' : 'n',
from_kuid_munged(seq_user_ns(m), uid),
- priv->magic);
+ priv->magic,
+ DRM_CLIENT_NAME_MAX_LEN,
+ priv->client_name ? priv->client_name : "<unset>");
rcu_read_unlock();
+ mutex_unlock(&priv->client_name_lock);
}
mutex_unlock(&dev->filelist_mutex);
return 0;
diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c
new file mode 100644
index 000000000000..cb2ad12bce57
--- /dev/null
+++ b/drivers/gpu/drm/drm_draw.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/*
+ * Copyright (c) 2023 Red Hat.
+ * Author: Jocelyn Falempe <jfalempe@redhat.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/iosys-map.h>
+#include <linux/types.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "drm_draw_internal.h"
+
+/*
+ * Conversions from xrgb8888
+ */
+
+static u16 convert_xrgb8888_to_rgb565(u32 pix)
+{
+ return ((pix & 0x00F80000) >> 8) |
+ ((pix & 0x0000FC00) >> 5) |
+ ((pix & 0x000000F8) >> 3);
+}
+
+static u16 convert_xrgb8888_to_rgba5551(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000f800) >> 5) |
+ ((pix & 0x000000f8) >> 2) |
+ BIT(0); /* set alpha bit */
+}
+
+static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
+{
+ return ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static u16 convert_xrgb8888_to_argb1555(u32 pix)
+{
+ return BIT(15) | /* set alpha bit */
+ ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+}
+
+static u32 convert_xrgb8888_to_argb8888(u32 pix)
+{
+ return pix | GENMASK(31, 24); /* fill alpha bits */
+}
+
+static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
+{
+ return ((pix & 0x00ff0000) >> 16) << 0 |
+ ((pix & 0x0000ff00) >> 8) << 8 |
+ ((pix & 0x000000ff) >> 0) << 16 |
+ ((pix & 0xff000000) >> 24) << 24;
+}
+
+static u32 convert_xrgb8888_to_abgr8888(u32 pix)
+{
+ return ((pix & 0x00ff0000) >> 16) << 0 |
+ ((pix & 0x0000ff00) >> 8) << 8 |
+ ((pix & 0x000000ff) >> 0) << 16 |
+ GENMASK(31, 24); /* fill alpha bits */
+}
+
+static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
+{
+ pix = ((pix & 0x000000FF) << 2) |
+ ((pix & 0x0000FF00) << 4) |
+ ((pix & 0x00FF0000) << 6);
+ return pix | ((pix >> 8) & 0x00300C03);
+}
+
+static u32 convert_xrgb8888_to_argb2101010(u32 pix)
+{
+ pix = ((pix & 0x000000FF) << 2) |
+ ((pix & 0x0000FF00) << 4) |
+ ((pix & 0x00FF0000) << 6);
+ return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
+}
+
+static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
+{
+ pix = ((pix & 0x00FF0000) >> 14) |
+ ((pix & 0x0000FF00) << 4) |
+ ((pix & 0x000000FF) << 22);
+ return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
+}
+
+/**
+ * drm_draw_color_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
+ * @color: input color, in xrgb8888 format
+ * @format: output format
+ *
+ * Returns:
+ * Color in the format specified, casted to u32.
+ * Or 0 if the format is not supported.
+ */
+u32 drm_draw_color_from_xrgb8888(u32 color, u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_RGB565:
+ return convert_xrgb8888_to_rgb565(color);
+ case DRM_FORMAT_RGBA5551:
+ return convert_xrgb8888_to_rgba5551(color);
+ case DRM_FORMAT_XRGB1555:
+ return convert_xrgb8888_to_xrgb1555(color);
+ case DRM_FORMAT_ARGB1555:
+ return convert_xrgb8888_to_argb1555(color);
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ return color;
+ case DRM_FORMAT_ARGB8888:
+ return convert_xrgb8888_to_argb8888(color);
+ case DRM_FORMAT_XBGR8888:
+ return convert_xrgb8888_to_xbgr8888(color);
+ case DRM_FORMAT_ABGR8888:
+ return convert_xrgb8888_to_abgr8888(color);
+ case DRM_FORMAT_XRGB2101010:
+ return convert_xrgb8888_to_xrgb2101010(color);
+ case DRM_FORMAT_ARGB2101010:
+ return convert_xrgb8888_to_argb2101010(color);
+ case DRM_FORMAT_ABGR2101010:
+ return convert_xrgb8888_to_abgr2101010(color);
+ default:
+ WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(drm_draw_color_from_xrgb8888);
+
+/*
+ * Blit functions
+ */
+void drm_draw_blit16(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ unsigned int scale, u16 fg16)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, fg16);
+}
+EXPORT_SYMBOL(drm_draw_blit16);
+
+void drm_draw_blit24(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ unsigned int scale, u32 fg32)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ u32 off = y * dpitch + x * 3;
+
+ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
+ /* write blue-green-red to output in little endianness */
+ iosys_map_wr(dmap, off, u8, (fg32 & 0x000000FF) >> 0);
+ iosys_map_wr(dmap, off + 1, u8, (fg32 & 0x0000FF00) >> 8);
+ iosys_map_wr(dmap, off + 2, u8, (fg32 & 0x00FF0000) >> 16);
+ }
+ }
+ }
+}
+EXPORT_SYMBOL(drm_draw_blit24);
+
+void drm_draw_blit32(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ unsigned int scale, u32 fg32)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, fg32);
+}
+EXPORT_SYMBOL(drm_draw_blit32);
+
+/*
+ * Fill functions
+ */
+void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u16 color)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, color);
+}
+EXPORT_SYMBOL(drm_draw_fill16);
+
+void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u16 color)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ unsigned int off = y * dpitch + x * 3;
+
+ /* write blue-green-red to output in little endianness */
+ iosys_map_wr(dmap, off, u8, (color & 0x000000FF) >> 0);
+ iosys_map_wr(dmap, off + 1, u8, (color & 0x0000FF00) >> 8);
+ iosys_map_wr(dmap, off + 2, u8, (color & 0x00FF0000) >> 16);
+ }
+ }
+}
+EXPORT_SYMBOL(drm_draw_fill24);
+
+void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u32 color)
+{
+ unsigned int y, x;
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, color);
+}
+EXPORT_SYMBOL(drm_draw_fill32);
diff --git a/drivers/gpu/drm/drm_draw_internal.h b/drivers/gpu/drm/drm_draw_internal.h
new file mode 100644
index 000000000000..f121ee7339dc
--- /dev/null
+++ b/drivers/gpu/drm/drm_draw_internal.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/*
+ * Copyright (c) 2023 Red Hat.
+ * Author: Jocelyn Falempe <jfalempe@redhat.com>
+ */
+
+#ifndef __DRM_DRAW_INTERNAL_H__
+#define __DRM_DRAW_INTERNAL_H__
+
+#include <linux/font.h>
+#include <linux/types.h>
+
+struct iosys_map;
+
+/* check if the pixel at coord x,y is 1 (foreground) or 0 (background) */
+static inline bool drm_draw_is_pixel_fg(const u8 *sbuf8, unsigned int spitch, int x, int y)
+{
+ return (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) != 0;
+}
+
+static inline const u8 *drm_draw_get_char_bitmap(const struct font_desc *font,
+ char c, size_t font_pitch)
+{
+ return font->data + (c * font->height) * font_pitch;
+}
+
+u32 drm_draw_color_from_xrgb8888(u32 color, u32 format);
+
+void drm_draw_blit16(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ unsigned int scale, u16 fg16);
+
+void drm_draw_blit24(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ unsigned int scale, u32 fg32);
+
+void drm_draw_blit32(struct iosys_map *dmap, unsigned int dpitch,
+ const u8 *sbuf8, unsigned int spitch,
+ unsigned int height, unsigned int width,
+ unsigned int scale, u32 fg32);
+
+void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u16 color);
+
+void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u16 color);
+
+void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch,
+ unsigned int height, unsigned int width,
+ u32 color);
+
+#endif /* __DRM_DRAW_INTERNAL_H__ */
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index ac30b0ec9d93..3cf440eee8a2 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -26,6 +26,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/cgroup_dmem.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/module.h>
@@ -38,7 +39,7 @@
#include <drm/drm_accel.h>
#include <drm/drm_cache.h>
-#include <drm/drm_client.h>
+#include <drm/drm_client_event.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
@@ -820,6 +821,37 @@ void drm_dev_put(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_put);
+static void drmm_cg_unregister_region(struct drm_device *dev, void *arg)
+{
+ dmem_cgroup_unregister_region(arg);
+}
+
+/**
+ * drmm_cgroup_register_region - Register a region of a DRM device to cgroups
+ * @dev: device for region
+ * @region_name: Region name for registering
+ * @size: Size of region in bytes
+ *
+ * This decreases the ref-count of @dev by one. The device is destroyed if the
+ * ref-count drops to zero.
+ */
+struct dmem_cgroup_region *drmm_cgroup_register_region(struct drm_device *dev, const char *region_name, u64 size)
+{
+ struct dmem_cgroup_region *region;
+ int ret;
+
+ region = dmem_cgroup_register_region(size, "drm/%s/%s", dev->unique, region_name);
+ if (IS_ERR_OR_NULL(region))
+ return region;
+
+ ret = drmm_add_action_or_reset(dev, drmm_cg_unregister_region, region);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return region;
+}
+EXPORT_SYMBOL_GPL(drmm_cgroup_register_region);
+
static int create_compat_control_link(struct drm_device *dev)
{
struct drm_minor *minor;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 855beafb76ff..13bc4c290b17 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -5605,7 +5605,9 @@ EXPORT_SYMBOL(drm_edid_get_monitor_name);
static void clear_eld(struct drm_connector *connector)
{
+ mutex_lock(&connector->eld_mutex);
memset(connector->eld, 0, sizeof(connector->eld));
+ mutex_unlock(&connector->eld_mutex);
connector->latency_present[0] = false;
connector->latency_present[1] = false;
@@ -5657,6 +5659,8 @@ static void drm_edid_to_eld(struct drm_connector *connector,
if (!drm_edid)
return;
+ mutex_lock(&connector->eld_mutex);
+
mnl = get_monitor_name(drm_edid, &eld[DRM_ELD_MONITOR_NAME_STRING]);
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] ELD monitor %s\n",
connector->base.id, connector->name,
@@ -5717,6 +5721,8 @@ static void drm_edid_to_eld(struct drm_connector *connector,
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] ELD size %d, SAD count %d\n",
connector->base.id, connector->name,
drm_eld_size(eld), total_sad_count);
+
+ mutex_unlock(&connector->eld_mutex);
}
static int _drm_edid_to_sad(const struct drm_edid *drm_edid,
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 29c53f9f449c..fb3614a7ba44 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -492,8 +492,8 @@ EXPORT_SYMBOL(drm_fb_helper_init);
* @fb_helper: driver-allocated fbdev helper
*
* A helper to alloc fb_info and the member cmap. Called by the driver
- * within the fb_probe fb_helper callback function. Drivers do not
- * need to release the allocated fb_info structure themselves, this is
+ * within the struct &drm_driver.fbdev_probe callback function. Drivers do
+ * not need to release the allocated fb_info structure themselves, this is
* automatically done when calling drm_fb_helper_fini().
*
* RETURNS:
@@ -554,7 +554,7 @@ EXPORT_SYMBOL(drm_fb_helper_release_info);
/**
* drm_fb_helper_unregister_info - unregister fb_info framebuffer device
- * @fb_helper: driver-allocated fbdev helper, can be NULL
+ * @fb_helper: driver-allocated fbdev helper, must not be NULL
*
* A wrapper around unregister_framebuffer, to release the fb_info
* framebuffer device. This must be called before releasing all resources for
@@ -562,8 +562,12 @@ EXPORT_SYMBOL(drm_fb_helper_release_info);
*/
void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper)
{
- if (fb_helper && fb_helper->info)
- unregister_framebuffer(fb_helper->info);
+ struct fb_info *info = fb_helper->info;
+ struct device *dev = info->device;
+
+ if (dev_is_pci(dev))
+ vga_switcheroo_client_fb_set(to_pci_dev(dev), NULL);
+ unregister_framebuffer(fb_helper->info);
}
EXPORT_SYMBOL(drm_fb_helper_unregister_info);
@@ -693,6 +697,7 @@ void drm_fb_helper_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u3
}
EXPORT_SYMBOL(drm_fb_helper_damage_area);
+#ifdef CONFIG_FB_DEFERRED_IO
/**
* drm_fb_helper_deferred_io() - fbdev deferred_io callback function
* @info: fb_info struct pointer
@@ -736,6 +741,7 @@ void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagerefli
}
}
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
+#endif
/**
* drm_fb_helper_set_suspend - wrapper around fb_set_suspend
@@ -1348,14 +1354,14 @@ int drm_fb_helper_set_par(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
-static void pan_set(struct drm_fb_helper *fb_helper, int x, int y)
+static void pan_set(struct drm_fb_helper *fb_helper, int dx, int dy)
{
struct drm_mode_set *mode_set;
mutex_lock(&fb_helper->client.modeset_mutex);
drm_client_for_each_modeset(mode_set, &fb_helper->client) {
- mode_set->x = x;
- mode_set->y = y;
+ mode_set->x += dx;
+ mode_set->y += dy;
}
mutex_unlock(&fb_helper->client.modeset_mutex);
}
@@ -1364,16 +1370,18 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- int ret;
+ int ret, dx, dy;
- pan_set(fb_helper, var->xoffset, var->yoffset);
+ dx = var->xoffset - info->var.xoffset;
+ dy = var->yoffset - info->var.yoffset;
+ pan_set(fb_helper, dx, dy);
ret = drm_client_modeset_commit_locked(&fb_helper->client);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
} else
- pan_set(fb_helper, info->var.xoffset, info->var.yoffset);
+ pan_set(fb_helper, -dx, -dy);
return ret;
}
@@ -1441,67 +1449,27 @@ unlock:
EXPORT_SYMBOL(drm_fb_helper_pan_display);
static uint32_t drm_fb_helper_find_format(struct drm_fb_helper *fb_helper, const uint32_t *formats,
- size_t format_count, uint32_t bpp, uint32_t depth)
+ size_t format_count, unsigned int color_mode)
{
struct drm_device *dev = fb_helper->dev;
uint32_t format;
size_t i;
- /*
- * Do not consider YUV or other complicated formats
- * for framebuffers. This means only legacy formats
- * are supported (fmt->depth is a legacy field), but
- * the framebuffer emulation can only deal with such
- * formats, specifically RGB/BGA formats.
- */
- format = drm_mode_legacy_fb_format(bpp, depth);
- if (!format)
- goto err;
+ format = drm_driver_color_mode_format(dev, color_mode);
+ if (!format) {
+ drm_info(dev, "unsupported color mode of %d\n", color_mode);
+ return DRM_FORMAT_INVALID;
+ }
for (i = 0; i < format_count; ++i) {
if (formats[i] == format)
return format;
}
-
-err:
- /* We found nothing. */
- drm_warn(dev, "bpp/depth value of %u/%u not supported\n", bpp, depth);
+ drm_warn(dev, "format %p4cc not supported\n", &format);
return DRM_FORMAT_INVALID;
}
-static uint32_t drm_fb_helper_find_color_mode_format(struct drm_fb_helper *fb_helper,
- const uint32_t *formats, size_t format_count,
- unsigned int color_mode)
-{
- struct drm_device *dev = fb_helper->dev;
- uint32_t bpp, depth;
-
- switch (color_mode) {
- case 1:
- case 2:
- case 4:
- case 8:
- case 16:
- case 24:
- bpp = depth = color_mode;
- break;
- case 15:
- bpp = 16;
- depth = 15;
- break;
- case 32:
- bpp = 32;
- depth = 24;
- break;
- default:
- drm_info(dev, "unsupported color mode of %d\n", color_mode);
- return DRM_FORMAT_INVALID;
- }
-
- return drm_fb_helper_find_format(fb_helper, formats, format_count, bpp, depth);
-}
-
static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
@@ -1531,10 +1499,10 @@ static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper,
if (!cmdline_mode->bpp_specified)
continue;
- surface_format = drm_fb_helper_find_color_mode_format(fb_helper,
- plane->format_types,
- plane->format_count,
- cmdline_mode->bpp);
+ surface_format = drm_fb_helper_find_format(fb_helper,
+ plane->format_types,
+ plane->format_count,
+ cmdline_mode->bpp);
if (surface_format != DRM_FORMAT_INVALID)
break; /* found supported format */
}
@@ -1544,10 +1512,10 @@ static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper,
break; /* found supported format */
/* try preferred color mode */
- surface_format = drm_fb_helper_find_color_mode_format(fb_helper,
- plane->format_types,
- plane->format_count,
- fb_helper->preferred_bpp);
+ surface_format = drm_fb_helper_find_format(fb_helper,
+ plane->format_types,
+ plane->format_count,
+ fb_helper->preferred_bpp);
if (surface_format != DRM_FORMAT_INVALID)
break; /* found supported format */
}
@@ -1648,13 +1616,14 @@ static int drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper,
/*
* Allocates the backing storage and sets up the fbdev info structure through
- * the ->fb_probe callback.
+ * the ->fbdev_probe callback.
*/
static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
struct drm_fb_helper_surface_size sizes;
+ struct fb_info *info;
int ret;
ret = drm_fb_helper_find_sizes(fb_helper, &sizes);
@@ -1666,15 +1635,20 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
}
/* push down into drivers */
- ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+ if (dev->driver->fbdev_probe)
+ ret = dev->driver->fbdev_probe(fb_helper, &sizes);
+ else if (fb_helper->funcs)
+ ret = fb_helper->funcs->fb_probe(fb_helper, &sizes);
if (ret < 0)
return ret;
strcpy(fb_helper->fb->comm, "[fbcon]");
+ info = fb_helper->info;
+
/* Set the fb info for vgaswitcheroo clients. Does nothing otherwise. */
- if (dev_is_pci(dev->dev))
- vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), fb_helper->info);
+ if (dev_is_pci(info->device))
+ vga_switcheroo_client_fb_set(to_pci_dev(info->device), info);
return 0;
}
@@ -1738,7 +1712,7 @@ static void drm_fb_helper_fill_var(struct fb_info *info,
* instance and the drm framebuffer allocated in &drm_fb_helper.fb.
*
* Drivers should call this (or their equivalent setup code) from their
- * &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev
+ * &drm_driver.fbdev_probe callback after having allocated the fbdev
* backing storage framebuffer.
*/
void drm_fb_helper_fill_info(struct fb_info *info,
@@ -1894,7 +1868,7 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
* Note that this also registers the fbdev and so allows userspace to call into
* the driver through the fbdev interfaces.
*
- * This function will call down into the &drm_fb_helper_funcs.fb_probe callback
+ * This function will call down into the &drm_driver.fbdev_probe callback
* to let the driver allocate and initialize the fbdev info structure and the
* drm framebuffer used to back the fbdev. drm_fb_helper_fill_info() is provided
* as a helper to setup simple default values for the fbdev info structure.
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index b0602c4f3628..b14b581c059d 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -2,15 +2,13 @@
#include <linux/fb.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
-#include <drm/drm_fbdev_dma.h>
-
/*
* struct fb_ops
*/
@@ -50,7 +48,8 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
if (!fb_helper->dev)
return;
- fb_deferred_io_cleanup(info);
+ if (info->fbdefio)
+ fb_deferred_io_cleanup(info);
drm_fb_helper_fini(fb_helper);
drm_client_buffer_vunmap(fb_helper->buffer);
@@ -102,8 +101,35 @@ static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
* struct drm_fb_helper
*/
-static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes)
+static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
+ struct drm_clip_rect *clip)
+{
+ struct drm_device *dev = helper->dev;
+ int ret;
+
+ /* Call damage handlers only if necessary */
+ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+ return 0;
+
+ if (helper->fb->funcs->dirty) {
+ ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+ if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
+ .fb_dirty = drm_fbdev_dma_helper_fb_dirty,
+};
+
+/*
+ * struct drm_fb_helper
+ */
+
+int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
@@ -147,6 +173,7 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
goto err_drm_client_buffer_delete;
}
+ fb_helper->funcs = &drm_fbdev_dma_helper_funcs;
fb_helper->buffer = buffer;
fb_helper->fb = fb;
@@ -210,136 +237,4 @@ err_drm_client_buffer_delete:
drm_client_framebuffer_delete(buffer);
return ret;
}
-
-static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
- struct drm_clip_rect *clip)
-{
- struct drm_device *dev = helper->dev;
- int ret;
-
- /* Call damage handlers only if necessary */
- if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
- return 0;
-
- if (helper->fb->funcs->dirty) {
- ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
- if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
- return ret;
- }
-
- return 0;
-}
-
-static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
- .fb_probe = drm_fbdev_dma_helper_fb_probe,
- .fb_dirty = drm_fbdev_dma_helper_fb_dirty,
-};
-
-/*
- * struct drm_client_funcs
- */
-
-static void drm_fbdev_dma_client_unregister(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
-
- if (fb_helper->info) {
- drm_fb_helper_unregister_info(fb_helper);
- } else {
- drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
- }
-}
-
-static int drm_fbdev_dma_client_restore(struct drm_client_dev *client)
-{
- drm_fb_helper_lastclose(client->dev);
-
- return 0;
-}
-
-static int drm_fbdev_dma_client_hotplug(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- struct drm_device *dev = client->dev;
- int ret;
-
- if (dev->fb_helper)
- return drm_fb_helper_hotplug_event(dev->fb_helper);
-
- ret = drm_fb_helper_init(dev, fb_helper);
- if (ret)
- goto err_drm_err;
-
- if (!drm_drv_uses_atomic_modeset(dev))
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(fb_helper);
- if (ret)
- goto err_drm_fb_helper_fini;
-
- return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(fb_helper);
-err_drm_err:
- drm_err(dev, "fbdev-dma: Failed to setup generic emulation (ret=%d)\n", ret);
- return ret;
-}
-
-static const struct drm_client_funcs drm_fbdev_dma_client_funcs = {
- .owner = THIS_MODULE,
- .unregister = drm_fbdev_dma_client_unregister,
- .restore = drm_fbdev_dma_client_restore,
- .hotplug = drm_fbdev_dma_client_hotplug,
-};
-
-/**
- * drm_fbdev_dma_setup() - Setup fbdev emulation for GEM DMA helpers
- * @dev: DRM device
- * @preferred_bpp: Preferred bits per pixel for the device.
- * 32 is used if this is zero.
- *
- * This function sets up fbdev emulation for GEM DMA drivers that support
- * dumb buffers with a virtual address and that can be mmap'ed.
- * drm_fbdev_dma_setup() shall be called after the DRM driver registered
- * the new DRM device with drm_dev_register().
- *
- * Restore, hotplug events and teardown are all taken care of. Drivers that do
- * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
- * Simple drivers might use drm_mode_config_helper_suspend().
- *
- * This function is safe to call even when there are no connectors present.
- * Setup will be retried on the next hotplug event.
- *
- * The fbdev is destroyed by drm_dev_unregister().
- */
-void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
-{
- struct drm_fb_helper *fb_helper;
- int ret;
-
- drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
- drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
-
- fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
- if (!fb_helper)
- return;
- drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_dma_helper_funcs);
-
- ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_dma_client_funcs);
- if (ret) {
- drm_err(dev, "Failed to register client: %d\n", ret);
- goto err_drm_client_init;
- }
-
- drm_client_register(&fb_helper->client);
-
- return;
-
-err_drm_client_init:
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
-}
-EXPORT_SYMBOL(drm_fbdev_dma_setup);
+EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe);
diff --git a/drivers/gpu/drm/drm_fbdev_shmem.c b/drivers/gpu/drm/drm_fbdev_shmem.c
index 0c785007f11b..f824369baacd 100644
--- a/drivers/gpu/drm/drm_fbdev_shmem.c
+++ b/drivers/gpu/drm/drm_fbdev_shmem.c
@@ -2,15 +2,13 @@
#include <linux/fb.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
-#include <drm/drm_fbdev_shmem.h>
-
/*
* struct fb_ops
*/
@@ -105,8 +103,35 @@ static struct page *drm_fbdev_shmem_get_page(struct fb_info *info, unsigned long
* struct drm_fb_helper
*/
-static int drm_fbdev_shmem_helper_fb_probe(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes)
+static int drm_fbdev_shmem_helper_fb_dirty(struct drm_fb_helper *helper,
+ struct drm_clip_rect *clip)
+{
+ struct drm_device *dev = helper->dev;
+ int ret;
+
+ /* Call damage handlers only if necessary */
+ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+ return 0;
+
+ if (helper->fb->funcs->dirty) {
+ ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+ if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct drm_fb_helper_funcs drm_fbdev_shmem_helper_funcs = {
+ .fb_dirty = drm_fbdev_shmem_helper_fb_dirty,
+};
+
+/*
+ * struct drm_driver
+ */
+
+int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
@@ -139,6 +164,7 @@ static int drm_fbdev_shmem_helper_fb_probe(struct drm_fb_helper *fb_helper,
goto err_drm_client_buffer_delete;
}
+ fb_helper->funcs = &drm_fbdev_shmem_helper_funcs;
fb_helper->buffer = buffer;
fb_helper->fb = fb;
@@ -182,136 +208,4 @@ err_drm_client_buffer_delete:
drm_client_framebuffer_delete(buffer);
return ret;
}
-
-static int drm_fbdev_shmem_helper_fb_dirty(struct drm_fb_helper *helper,
- struct drm_clip_rect *clip)
-{
- struct drm_device *dev = helper->dev;
- int ret;
-
- /* Call damage handlers only if necessary */
- if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
- return 0;
-
- if (helper->fb->funcs->dirty) {
- ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
- if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
- return ret;
- }
-
- return 0;
-}
-
-static const struct drm_fb_helper_funcs drm_fbdev_shmem_helper_funcs = {
- .fb_probe = drm_fbdev_shmem_helper_fb_probe,
- .fb_dirty = drm_fbdev_shmem_helper_fb_dirty,
-};
-
-/*
- * struct drm_client_funcs
- */
-
-static void drm_fbdev_shmem_client_unregister(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
-
- if (fb_helper->info) {
- drm_fb_helper_unregister_info(fb_helper);
- } else {
- drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
- }
-}
-
-static int drm_fbdev_shmem_client_restore(struct drm_client_dev *client)
-{
- drm_fb_helper_lastclose(client->dev);
-
- return 0;
-}
-
-static int drm_fbdev_shmem_client_hotplug(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- struct drm_device *dev = client->dev;
- int ret;
-
- if (dev->fb_helper)
- return drm_fb_helper_hotplug_event(dev->fb_helper);
-
- ret = drm_fb_helper_init(dev, fb_helper);
- if (ret)
- goto err_drm_err;
-
- if (!drm_drv_uses_atomic_modeset(dev))
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(fb_helper);
- if (ret)
- goto err_drm_fb_helper_fini;
-
- return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(fb_helper);
-err_drm_err:
- drm_err(dev, "fbdev-shmem: Failed to setup emulation (ret=%d)\n", ret);
- return ret;
-}
-
-static const struct drm_client_funcs drm_fbdev_shmem_client_funcs = {
- .owner = THIS_MODULE,
- .unregister = drm_fbdev_shmem_client_unregister,
- .restore = drm_fbdev_shmem_client_restore,
- .hotplug = drm_fbdev_shmem_client_hotplug,
-};
-
-/**
- * drm_fbdev_shmem_setup() - Setup fbdev emulation for GEM SHMEM helpers
- * @dev: DRM device
- * @preferred_bpp: Preferred bits per pixel for the device.
- * 32 is used if this is zero.
- *
- * This function sets up fbdev emulation for GEM DMA drivers that support
- * dumb buffers with a virtual address and that can be mmap'ed.
- * drm_fbdev_shmem_setup() shall be called after the DRM driver registered
- * the new DRM device with drm_dev_register().
- *
- * Restore, hotplug events and teardown are all taken care of. Drivers that do
- * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
- * Simple drivers might use drm_mode_config_helper_suspend().
- *
- * This function is safe to call even when there are no connectors present.
- * Setup will be retried on the next hotplug event.
- *
- * The fbdev is destroyed by drm_dev_unregister().
- */
-void drm_fbdev_shmem_setup(struct drm_device *dev, unsigned int preferred_bpp)
-{
- struct drm_fb_helper *fb_helper;
- int ret;
-
- drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
- drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
-
- fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
- if (!fb_helper)
- return;
- drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_shmem_helper_funcs);
-
- ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_shmem_client_funcs);
- if (ret) {
- drm_err(dev, "Failed to register client: %d\n", ret);
- goto err_drm_client_init;
- }
-
- drm_client_register(&fb_helper->client);
-
- return;
-
-err_drm_client_init:
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
-}
-EXPORT_SYMBOL(drm_fbdev_shmem_setup);
+EXPORT_SYMBOL(drm_fbdev_shmem_driver_fbdev_probe);
diff --git a/drivers/gpu/drm/drm_fbdev_ttm.c b/drivers/gpu/drm/drm_fbdev_ttm.c
index 119ffb28aaf9..73d35d59590c 100644
--- a/drivers/gpu/drm/drm_fbdev_ttm.c
+++ b/drivers/gpu/drm/drm_fbdev_ttm.c
@@ -65,79 +65,6 @@ static const struct fb_ops drm_fbdev_ttm_fb_ops = {
.fb_destroy = drm_fbdev_ttm_fb_destroy,
};
-/*
- * This function uses the client API to create a framebuffer backed by a dumb buffer.
- */
-static int drm_fbdev_ttm_helper_fb_probe(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct drm_client_dev *client = &fb_helper->client;
- struct drm_device *dev = fb_helper->dev;
- struct drm_client_buffer *buffer;
- struct fb_info *info;
- size_t screen_size;
- void *screen_buffer;
- u32 format;
- int ret;
-
- drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
- sizes->surface_width, sizes->surface_height,
- sizes->surface_bpp);
-
- format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
- sizes->surface_depth);
- buffer = drm_client_framebuffer_create(client, sizes->surface_width,
- sizes->surface_height, format);
- if (IS_ERR(buffer))
- return PTR_ERR(buffer);
-
- fb_helper->buffer = buffer;
- fb_helper->fb = buffer->fb;
-
- screen_size = buffer->gem->size;
- screen_buffer = vzalloc(screen_size);
- if (!screen_buffer) {
- ret = -ENOMEM;
- goto err_drm_client_framebuffer_delete;
- }
-
- info = drm_fb_helper_alloc_info(fb_helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_vfree;
- }
-
- drm_fb_helper_fill_info(info, fb_helper, sizes);
-
- info->fbops = &drm_fbdev_ttm_fb_ops;
-
- /* screen */
- info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
- info->screen_buffer = screen_buffer;
- info->fix.smem_len = screen_size;
-
- /* deferred I/O */
- fb_helper->fbdefio.delay = HZ / 20;
- fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
-
- info->fbdefio = &fb_helper->fbdefio;
- ret = fb_deferred_io_init(info);
- if (ret)
- goto err_drm_fb_helper_release_info;
-
- return 0;
-
-err_drm_fb_helper_release_info:
- drm_fb_helper_release_info(fb_helper);
-err_vfree:
- vfree(screen_buffer);
-err_drm_client_framebuffer_delete:
- fb_helper->fb = NULL;
- fb_helper->buffer = NULL;
- drm_client_framebuffer_delete(buffer);
- return ret;
-}
-
static void drm_fbdev_ttm_damage_blit_real(struct drm_fb_helper *fb_helper,
struct drm_clip_rect *clip,
struct iosys_map *dst)
@@ -236,115 +163,81 @@ static int drm_fbdev_ttm_helper_fb_dirty(struct drm_fb_helper *helper,
}
static const struct drm_fb_helper_funcs drm_fbdev_ttm_helper_funcs = {
- .fb_probe = drm_fbdev_ttm_helper_fb_probe,
.fb_dirty = drm_fbdev_ttm_helper_fb_dirty,
};
-static void drm_fbdev_ttm_client_unregister(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
-
- if (fb_helper->info) {
- drm_fb_helper_unregister_info(fb_helper);
- } else {
- drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
- }
-}
-
-static int drm_fbdev_ttm_client_restore(struct drm_client_dev *client)
-{
- drm_fb_helper_lastclose(client->dev);
-
- return 0;
-}
+/*
+ * struct drm_driver
+ */
-static int drm_fbdev_ttm_client_hotplug(struct drm_client_dev *client)
+int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- struct drm_device *dev = client->dev;
+ struct drm_client_dev *client = &fb_helper->client;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_client_buffer *buffer;
+ struct fb_info *info;
+ size_t screen_size;
+ void *screen_buffer;
+ u32 format;
int ret;
- if (dev->fb_helper)
- return drm_fb_helper_hotplug_event(dev->fb_helper);
-
- ret = drm_fb_helper_init(dev, fb_helper);
- if (ret)
- goto err_drm_err;
-
- if (!drm_drv_uses_atomic_modeset(dev))
- drm_helper_disable_unused_functions(dev);
+ drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
- ret = drm_fb_helper_initial_config(fb_helper);
- if (ret)
- goto err_drm_fb_helper_fini;
+ format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
+ sizes->surface_depth);
+ buffer = drm_client_framebuffer_create(client, sizes->surface_width,
+ sizes->surface_height, format);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
- return 0;
+ fb_helper->funcs = &drm_fbdev_ttm_helper_funcs;
+ fb_helper->buffer = buffer;
+ fb_helper->fb = buffer->fb;
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(fb_helper);
-err_drm_err:
- drm_err(dev, "fbdev: Failed to setup emulation (ret=%d)\n", ret);
- return ret;
-}
+ screen_size = buffer->gem->size;
+ screen_buffer = vzalloc(screen_size);
+ if (!screen_buffer) {
+ ret = -ENOMEM;
+ goto err_drm_client_framebuffer_delete;
+ }
-static const struct drm_client_funcs drm_fbdev_ttm_client_funcs = {
- .owner = THIS_MODULE,
- .unregister = drm_fbdev_ttm_client_unregister,
- .restore = drm_fbdev_ttm_client_restore,
- .hotplug = drm_fbdev_ttm_client_hotplug,
-};
+ info = drm_fb_helper_alloc_info(fb_helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ goto err_vfree;
+ }
-/**
- * drm_fbdev_ttm_setup() - Setup fbdev emulation for TTM-based drivers
- * @dev: DRM device
- * @preferred_bpp: Preferred bits per pixel for the device.
- *
- * This function sets up fbdev emulation for TTM-based drivers that support
- * dumb buffers with a virtual address and that can be mmap'ed.
- * drm_fbdev_ttm_setup() shall be called after the DRM driver registered
- * the new DRM device with drm_dev_register().
- *
- * Restore, hotplug events and teardown are all taken care of. Drivers that do
- * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
- * Simple drivers might use drm_mode_config_helper_suspend().
- *
- * In order to provide fixed mmap-able memory ranges, fbdev emulation
- * uses a shadow buffer in system memory. The implementation blits the shadow
- * fbdev buffer onto the real buffer in regular intervals.
- *
- * This function is safe to call even when there are no connectors present.
- * Setup will be retried on the next hotplug event.
- *
- * The fbdev is destroyed by drm_dev_unregister().
- */
-void drm_fbdev_ttm_setup(struct drm_device *dev, unsigned int preferred_bpp)
-{
- struct drm_fb_helper *fb_helper;
- int ret;
+ drm_fb_helper_fill_info(info, fb_helper, sizes);
- drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
- drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
+ info->fbops = &drm_fbdev_ttm_fb_ops;
- fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
- if (!fb_helper)
- return;
- drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_ttm_helper_funcs);
+ /* screen */
+ info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
+ info->screen_buffer = screen_buffer;
+ info->fix.smem_len = screen_size;
- ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_ttm_client_funcs);
- if (ret) {
- drm_err(dev, "Failed to register client: %d\n", ret);
- goto err_drm_client_init;
- }
+ /* deferred I/O */
+ fb_helper->fbdefio.delay = HZ / 20;
+ fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
- drm_client_register(&fb_helper->client);
+ info->fbdefio = &fb_helper->fbdefio;
+ ret = fb_deferred_io_init(info);
+ if (ret)
+ goto err_drm_fb_helper_release_info;
- return;
+ return 0;
-err_drm_client_init:
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
- return;
+err_drm_fb_helper_release_info:
+ drm_fb_helper_release_info(fb_helper);
+err_vfree:
+ vfree(screen_buffer);
+err_drm_client_framebuffer_delete:
+ fb_helper->fb = NULL;
+ fb_helper->buffer = NULL;
+ drm_client_framebuffer_delete(buffer);
+ return ret;
}
-EXPORT_SYMBOL(drm_fbdev_ttm_setup);
+EXPORT_SYMBOL(drm_fbdev_ttm_driver_fbdev_probe);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index ad1dc638c83b..2289e71e2fa2 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -40,7 +40,7 @@
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drm_client.h>
+#include <drm/drm_client_event.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
@@ -129,7 +129,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev)
*/
struct drm_file *drm_file_alloc(struct drm_minor *minor)
{
- static atomic64_t ident = ATOMIC_INIT(0);
+ static atomic64_t ident = ATOMIC64_INIT(0);
struct drm_device *dev = minor->dev;
struct drm_file *file;
int ret;
@@ -157,6 +157,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
spin_lock_init(&file->master_lookup_lock);
mutex_init(&file->event_read_lock);
+ mutex_init(&file->client_name_lock);
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_open(dev, file);
@@ -258,6 +259,10 @@ void drm_file_free(struct drm_file *file)
WARN_ON(!list_empty(&file->event_list));
put_pid(rcu_access_pointer(file->pid));
+
+ mutex_destroy(&file->client_name_lock);
+ kfree(file->client_name);
+
kfree(file);
}
@@ -840,6 +845,16 @@ static void print_size(struct drm_printer *p, const char *stat,
drm_printf(p, "drm-%s-%s:\t%llu%s\n", stat, region, sz, units[u]);
}
+int drm_memory_stats_is_zero(const struct drm_memory_stats *stats)
+{
+ return (stats->shared == 0 &&
+ stats->private == 0 &&
+ stats->resident == 0 &&
+ stats->purgeable == 0 &&
+ stats->active == 0);
+}
+EXPORT_SYMBOL(drm_memory_stats_is_zero);
+
/**
* drm_print_memory_stats - A helper to print memory stats
* @p: The printer to print output to
@@ -855,7 +870,9 @@ void drm_print_memory_stats(struct drm_printer *p,
{
print_size(p, "total", region, stats->private + stats->shared);
print_size(p, "shared", region, stats->shared);
- print_size(p, "active", region, stats->active);
+
+ if (supported_status & DRM_GEM_OBJECT_ACTIVE)
+ print_size(p, "active", region, stats->active);
if (supported_status & DRM_GEM_OBJECT_RESIDENT)
print_size(p, "resident", region, stats->resident);
@@ -888,15 +905,13 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file)
if (obj->funcs && obj->funcs->status) {
s = obj->funcs->status(obj);
- supported_status = DRM_GEM_OBJECT_RESIDENT |
- DRM_GEM_OBJECT_PURGEABLE;
+ supported_status |= s;
}
- if (drm_gem_object_is_shared_for_memory_stats(obj)) {
+ if (drm_gem_object_is_shared_for_memory_stats(obj))
status.shared += obj->size;
- } else {
+ else
status.private += obj->size;
- }
if (s & DRM_GEM_OBJECT_RESIDENT) {
status.resident += add_size;
@@ -909,6 +924,7 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file)
if (!dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true))) {
status.active += add_size;
+ supported_status |= DRM_GEM_OBJECT_ACTIVE;
/* If still active, don't count as purgeable: */
s &= ~DRM_GEM_OBJECT_PURGEABLE;
@@ -950,6 +966,11 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
}
+ mutex_lock(&file->client_name_lock);
+ if (file->client_name)
+ drm_printf(&p, "drm-client-name:\t%s\n", file->client_name);
+ mutex_unlock(&file->client_name_lock);
+
if (dev->driver->show_fdinfo)
dev->driver->show_fdinfo(&p, file);
}
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 193cf8ed7912..3a94ca211f9c 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -36,7 +36,6 @@
* @depth: bit depth per pixel
*
* Computes a drm fourcc pixel format code for the given @bpp/@depth values.
- * Useful in fbdev emulation code, since that deals in those values.
*/
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
{
@@ -140,6 +139,35 @@ uint32_t drm_driver_legacy_fb_format(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_driver_legacy_fb_format);
+/**
+ * drm_driver_color_mode_format - Compute DRM 4CC code from color mode
+ * @dev: DRM device
+ * @color_mode: command-line color mode
+ *
+ * Computes a DRM 4CC pixel format code for the given color mode using
+ * drm_driver_color_mode(). The color mode is in the format used and the
+ * kernel command line. It specifies the number of bits per pixel
+ * and color depth in a single value.
+ *
+ * Useful in fbdev emulation code, since that deals in those values. The
+ * helper does not consider YUV or other complicated formats. This means
+ * only legacy formats are supported (fmt->depth is a legacy field), but
+ * the framebuffer emulation can only deal with such formats, specifically
+ * RGB/BGA formats.
+ */
+uint32_t drm_driver_color_mode_format(struct drm_device *dev, unsigned int color_mode)
+{
+ switch (color_mode) {
+ case 15:
+ return drm_driver_legacy_fb_format(dev, 16, 15);
+ case 32:
+ return drm_driver_legacy_fb_format(dev, 32, 24);
+ default:
+ return drm_driver_legacy_fb_format(dev, color_mode, color_mode);
+ }
+}
+EXPORT_SYMBOL(drm_driver_color_mode_format);
+
/*
* Internal function to query information for a given format. See
* drm_format_info() for the public API.
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 888aadb6a4ac..b781601946db 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -99,6 +99,7 @@ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
return 0;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_framebuffer_check_src_coords);
/**
* drm_mode_addfb - add an FB to the graphics configuration
@@ -838,6 +839,7 @@ void drm_framebuffer_free(struct kref *kref)
fb->funcs->destroy(fb);
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_framebuffer_free);
/**
* drm_framebuffer_init - initialize a framebuffer
@@ -868,7 +870,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
INIT_LIST_HEAD(&fb->filp_head);
fb->funcs = funcs;
- strcpy(fb->comm, current->comm);
+ strscpy(fb->comm, current->comm);
ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB,
false, drm_framebuffer_free);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 149b8e25da5b..ee811764c3df 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -114,22 +114,32 @@ drm_gem_init(struct drm_device *dev)
}
/**
- * drm_gem_object_init - initialize an allocated shmem-backed GEM object
+ * drm_gem_object_init_with_mnt - initialize an allocated shmem-backed GEM
+ * object in a given shmfs mountpoint
+ *
* @dev: drm_device the object should be initialized for
* @obj: drm_gem_object to initialize
* @size: object size
+ * @gemfs: tmpfs mount where the GEM object will be created. If NULL, use
+ * the usual tmpfs mountpoint (`shm_mnt`).
*
* Initialize an already allocated GEM object of the specified size with
* shmfs backing store.
*/
-int drm_gem_object_init(struct drm_device *dev,
- struct drm_gem_object *obj, size_t size)
+int drm_gem_object_init_with_mnt(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size,
+ struct vfsmount *gemfs)
{
struct file *filp;
drm_gem_private_object_init(dev, obj, size);
- filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+ if (gemfs)
+ filp = shmem_file_setup_with_mnt(gemfs, "drm mm object", size,
+ VM_NORESERVE);
+ else
+ filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+
if (IS_ERR(filp))
return PTR_ERR(filp);
@@ -137,6 +147,22 @@ int drm_gem_object_init(struct drm_device *dev,
return 0;
}
+EXPORT_SYMBOL(drm_gem_object_init_with_mnt);
+
+/**
+ * drm_gem_object_init - initialize an allocated shmem-backed GEM object
+ * @dev: drm_device the object should be initialized for
+ * @obj: drm_gem_object to initialize
+ * @size: object size
+ *
+ * Initialize an already allocated GEM object of the specified size with
+ * shmfs backing store.
+ */
+int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj,
+ size_t size)
+{
+ return drm_gem_object_init_with_mnt(dev, obj, size, NULL);
+}
EXPORT_SYMBOL(drm_gem_object_init);
/**
diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c
index 870b90b78bc4..16988d316a6d 100644
--- a/drivers/gpu/drm/drm_gem_dma_helper.c
+++ b/drivers/gpu/drm/drm_gem_dma_helper.c
@@ -600,5 +600,5 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
EXPORT_SYMBOL(drm_gem_dma_prime_import_sg_table_vmap);
MODULE_DESCRIPTION("DRM DMA memory-management helpers");
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 3bdb6ba37ff4..185534f56bab 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -18,7 +18,7 @@
#include "drm_internal.h"
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
#define AFBC_HEADER_SIZE 16
#define AFBC_TH_LAYOUT_ALIGNMENT 8
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 53c003983ad1..5ab351409312 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -22,7 +22,7 @@
#include <drm/drm_prime.h>
#include <drm/drm_print.h>
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
/**
* DOC: overview
@@ -49,7 +49,8 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
};
static struct drm_gem_shmem_object *
-__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
+__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
+ struct vfsmount *gemfs)
{
struct drm_gem_shmem_object *shmem;
struct drm_gem_object *obj;
@@ -76,7 +77,7 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
drm_gem_private_object_init(dev, obj, size);
shmem->map_wc = false; /* dma-buf mappings use always writecombine */
} else {
- ret = drm_gem_object_init(dev, obj, size);
+ ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs);
}
if (ret) {
drm_gem_private_object_fini(obj);
@@ -123,11 +124,32 @@ err_free:
*/
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
{
- return __drm_gem_shmem_create(dev, size, false);
+ return __drm_gem_shmem_create(dev, size, false, NULL);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
/**
+ * drm_gem_shmem_create_with_mnt - Allocate an object with the given size in a
+ * given mountpoint
+ * @dev: DRM device
+ * @size: Size of the object to allocate
+ * @gemfs: tmpfs mount where the GEM object will be created
+ *
+ * This function creates a shmem GEM object in a given tmpfs mountpoint.
+ *
+ * Returns:
+ * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
+ size_t size,
+ struct vfsmount *gemfs)
+{
+ return __drm_gem_shmem_create(dev, size, false, gemfs);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt);
+
+/**
* drm_gem_shmem_free - Free resources associated with a shmem GEM object
* @shmem: shmem GEM object to free
*
@@ -765,7 +787,7 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
size_t size = PAGE_ALIGN(attach->dmabuf->size);
struct drm_gem_shmem_object *shmem;
- shmem = __drm_gem_shmem_create(dev, size, true);
+ shmem = __drm_gem_shmem_create(dev, size, true, NULL);
if (IS_ERR(shmem))
return ERR_CAST(shmem);
@@ -778,5 +800,5 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 6027584406af..22b1fe9c03b8 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -16,7 +16,6 @@
#include <drm/drm_mode.h>
#include <drm/drm_plane.h>
#include <drm/drm_prime.h>
-#include <drm/drm_simple_kms_helper.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_tt.h>
@@ -687,50 +686,6 @@ drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
/*
- * Helpers for struct drm_simple_display_pipe_funcs
- */
-
-/**
- * drm_gem_vram_simple_display_pipe_prepare_fb() - Implements &struct
- * drm_simple_display_pipe_funcs.prepare_fb
- * @pipe: a simple display pipe
- * @new_state: the plane's new state
- *
- * During plane updates, this function pins the GEM VRAM
- * objects of the plane's new framebuffer to VRAM. Call
- * drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them.
- *
- * Returns:
- * 0 on success, or
- * a negative errno code otherwise.
- */
-int drm_gem_vram_simple_display_pipe_prepare_fb(
- struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *new_state)
-{
- return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state);
-}
-EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb);
-
-/**
- * drm_gem_vram_simple_display_pipe_cleanup_fb() - Implements &struct
- * drm_simple_display_pipe_funcs.cleanup_fb
- * @pipe: a simple display pipe
- * @old_state: the plane's old state
- *
- * During plane updates, this function unpins the GEM VRAM
- * objects of the plane's old framebuffer from VRAM. Complements
- * drm_gem_vram_simple_display_pipe_prepare_fb().
- */
-void drm_gem_vram_simple_display_pipe_cleanup_fb(
- struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
-{
- drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state);
-}
-EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb);
-
-/*
* PRIME helpers
*/
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 1705bfc90b1e..b2b6a8e49dda 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -48,6 +48,14 @@ struct drm_prime_file_private;
struct drm_printer;
struct drm_vblank_crtc;
+/* drm_client_event.c */
+#if defined(CONFIG_DRM_CLIENT)
+void drm_client_debugfs_init(struct drm_device *dev);
+#else
+static inline void drm_client_debugfs_init(struct drm_device *dev)
+{ }
+#endif
+
/* drm_file.c */
extern struct mutex drm_global_mutex;
bool drm_dev_needs_global_mutex(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 51f39912866f..f593dc569d31 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -540,6 +540,55 @@ int drm_version(struct drm_device *dev, void *data,
return err;
}
+/*
+ * Check if the passed string contains control char or spaces or
+ * anything that would mess up a formatted output.
+ */
+static int drm_validate_value_string(const char *value, size_t len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (!isascii(value[i]) || !isgraph(value[i]))
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int drm_set_client_name(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_set_client_name *name = data;
+ size_t len = name->name_len;
+ void __user *user_ptr;
+ char *new_name;
+
+ if (len > DRM_CLIENT_NAME_MAX_LEN) {
+ return -EINVAL;
+ } else if (len) {
+ user_ptr = u64_to_user_ptr(name->name);
+
+ new_name = memdup_user_nul(user_ptr, len);
+ if (IS_ERR(new_name))
+ return PTR_ERR(new_name);
+
+ if (strlen(new_name) != len ||
+ drm_validate_value_string(new_name, len) < 0) {
+ kfree(new_name);
+ return -EINVAL;
+ }
+ } else {
+ new_name = NULL;
+ }
+
+ mutex_lock(&file_priv->client_name_lock);
+ kfree(file_priv->client_name);
+ file_priv->client_name = new_name;
+ mutex_unlock(&file_priv->client_name_lock);
+
+ return 0;
+}
+
static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
{
/* ROOT_ONLY is only for CAP_SYS_ADMIN */
@@ -610,6 +659,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_NAME, drm_set_client_name, DRM_RENDER_ALLOW),
+
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER),
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 2bc3973d35a1..5e5c5f84daac 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -1521,6 +1521,22 @@ void mipi_dsi_compression_mode_ext_multi(struct mipi_dsi_multi_context *ctx,
EXPORT_SYMBOL(mipi_dsi_compression_mode_ext_multi);
/**
+ * mipi_dsi_compression_mode_multi() - enable/disable DSC on the peripheral
+ * @ctx: Context for multiple DSI transactions
+ * @enable: Whether to enable or disable the DSC
+ *
+ * Enable or disable Display Stream Compression on the peripheral using the
+ * default Picture Parameter Set and VESA DSC 1.1 algorithm.
+ */
+void mipi_dsi_compression_mode_multi(struct mipi_dsi_multi_context *ctx,
+ bool enable)
+{
+ return mipi_dsi_compression_mode_ext_multi(ctx, enable,
+ MIPI_DSI_COMPRESSION_DSC, 0);
+}
+EXPORT_SYMBOL(mipi_dsi_compression_mode_multi);
+
+/**
* mipi_dsi_dcs_nop_multi() - send DCS NOP packet
* @ctx: Context for multiple DSI transactions
*
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 5ace481c1901..ca254611b382 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -151,7 +151,7 @@ static void show_leaks(struct drm_mm *mm) { }
INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
u64, __subtree_last,
- START, LAST, static inline, drm_mm_interval_tree)
+ START, LAST, static inline __maybe_unused, drm_mm_interval_tree)
struct drm_mm_node *
__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
@@ -611,7 +611,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
-static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
+static inline __maybe_unused bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
{
return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
}
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 37d2e0a4ef4b..8642a2fb25a9 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -150,6 +150,15 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
drm_connector_list_iter_begin(dev, &conn_iter);
count = 0;
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
+ /*
+ * FIXME: the connectors on the list may not be fully initialized yet,
+ * if the ioctl is called before the connectors are registered. (See
+ * drm_dev_register()->drm_modeset_register_all() for static and
+ * drm_connector_dynamic_register() for dynamic connectors.)
+ * The driver should only get registered after static connectors are
+ * fully initialized and dynamic connectors should be added to the
+ * connector list only after fully initializing them.
+ */
drm_for_each_connector_iter(connector, &conn_iter) {
/* only expose writeback connectors if userspace understands them */
if (!file_priv->writeback_connectors &&
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index df4cc0e8e263..e943205a2394 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -81,6 +81,7 @@ int drm_mode_object_add(struct drm_device *dev,
{
return __drm_mode_object_add(dev, obj, obj_type, true, NULL);
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_mode_object_add);
void drm_mode_object_register(struct drm_device *dev,
struct drm_mode_object *obj)
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 6ba167a33461..e72f855fc495 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1282,19 +1282,15 @@ EXPORT_SYMBOL(drm_mode_set_name);
* @mode: mode
*
* Returns:
- * @modes's vrefresh rate in Hz, rounded to the nearest integer. Calculates the
- * value first if it is not yet set.
+ * @modes's vrefresh rate in Hz, rounded to the nearest integer.
*/
int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
- unsigned int num, den;
+ unsigned int num = 1, den = 1;
if (mode->htotal == 0 || mode->vtotal == 0)
return 0;
- num = mode->clock;
- den = mode->htotal * mode->vtotal;
-
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
num *= 2;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -1302,6 +1298,12 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
if (mode->vscan > 1)
den *= mode->vscan;
+ if (check_mul_overflow(mode->clock, num, &num))
+ return 0;
+
+ if (check_mul_overflow(mode->htotal * mode->vtotal, den, &den))
+ return 0;
+
return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(num, 1000), den);
}
EXPORT_SYMBOL(drm_mode_vrefresh);
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 2c582020cb42..5565464c1734 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -21,7 +21,7 @@
*/
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_client_event.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_modeset_helper.h>
@@ -185,7 +185,7 @@ EXPORT_SYMBOL(drm_crtc_init);
* Zero on success, negative error code on error.
*
* See also:
- * drm_kms_helper_poll_disable() and drm_fb_helper_set_suspend_unlocked().
+ * drm_kms_helper_poll_disable() and drm_client_dev_suspend().
*/
int drm_mode_config_helper_suspend(struct drm_device *dev)
{
@@ -199,10 +199,11 @@ int drm_mode_config_helper_suspend(struct drm_device *dev)
if (dev->mode_config.poll_enabled)
drm_kms_helper_poll_disable(dev);
- drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1);
+ drm_client_dev_suspend(dev, false);
state = drm_atomic_helper_suspend(dev);
if (IS_ERR(state)) {
- drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
+ drm_client_dev_resume(dev, false);
+
/*
* Don't enable polling if it was never initialized
*/
@@ -230,7 +231,7 @@ EXPORT_SYMBOL(drm_mode_config_helper_suspend);
* Zero on success, negative error code on error.
*
* See also:
- * drm_fb_helper_set_suspend_unlocked() and drm_kms_helper_poll_enable().
+ * drm_client_dev_resume() and drm_kms_helper_poll_enable().
*/
int drm_mode_config_helper_resume(struct drm_device *dev)
{
@@ -247,7 +248,8 @@ int drm_mode_config_helper_resume(struct drm_device *dev)
DRM_ERROR("Failed to resume (%d)\n", ret);
dev->mode_config.suspend_state = NULL;
- drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
+ drm_client_dev_resume(dev, false);
+
/*
* Don't enable polling if it is not initialized
*/
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 177b600895d3..5530919e0ba0 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -341,8 +341,23 @@ static int drm_of_lvds_get_remote_pixels_type(
return pixels_type;
}
+static int __drm_of_lvds_get_dual_link_pixel_order(int p1_pt, int p2_pt)
+{
+ /*
+ * A valid dual-lVDS bus is found when one port is marked with
+ * "dual-lvds-even-pixels", and the other port is marked with
+ * "dual-lvds-odd-pixels", bail out if the markers are not right.
+ */
+ if (p1_pt + p2_pt != DRM_OF_LVDS_EVEN + DRM_OF_LVDS_ODD)
+ return -EINVAL;
+
+ return p1_pt == DRM_OF_LVDS_EVEN ?
+ DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS :
+ DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS;
+}
+
/**
- * drm_of_lvds_get_dual_link_pixel_order - Get LVDS dual-link pixel order
+ * drm_of_lvds_get_dual_link_pixel_order - Get LVDS dual-link source pixel order
* @port1: First DT port node of the Dual-link LVDS source
* @port2: Second DT port node of the Dual-link LVDS source
*
@@ -387,19 +402,58 @@ int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
if (remote_p2_pt < 0)
return remote_p2_pt;
- /*
- * A valid dual-lVDS bus is found when one remote port is marked with
- * "dual-lvds-even-pixels", and the other remote port is marked with
- * "dual-lvds-odd-pixels", bail out if the markers are not right.
- */
- if (remote_p1_pt + remote_p2_pt != DRM_OF_LVDS_EVEN + DRM_OF_LVDS_ODD)
+ return __drm_of_lvds_get_dual_link_pixel_order(remote_p1_pt, remote_p2_pt);
+}
+EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order);
+
+/**
+ * drm_of_lvds_get_dual_link_pixel_order_sink - Get LVDS dual-link sink pixel order
+ * @port1: First DT port node of the Dual-link LVDS sink
+ * @port2: Second DT port node of the Dual-link LVDS sink
+ *
+ * An LVDS dual-link connection is made of two links, with even pixels
+ * transitting on one link, and odd pixels on the other link. This function
+ * returns, for two ports of an LVDS dual-link sink, which port shall transmit
+ * the even and odd pixels, based on the requirements of the sink.
+ *
+ * The pixel order is determined from the dual-lvds-even-pixels and
+ * dual-lvds-odd-pixels properties in the sink's DT port nodes. If those
+ * properties are not present, or if their usage is not valid, this function
+ * returns -EINVAL.
+ *
+ * If either port is not connected, this function returns -EPIPE.
+ *
+ * @port1 and @port2 are typically DT sibling nodes, but may have different
+ * parents when, for instance, two separate LVDS decoders receive the even and
+ * odd pixels.
+ *
+ * Return:
+ * * DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS - @port1 receives even pixels and @port2
+ * receives odd pixels
+ * * DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS - @port1 receives odd pixels and @port2
+ * receives even pixels
+ * * -EINVAL - @port1 or @port2 are NULL
+ * * -EPIPE - when @port1 or @port2 are not connected
+ */
+int drm_of_lvds_get_dual_link_pixel_order_sink(struct device_node *port1,
+ struct device_node *port2)
+{
+ int sink_p1_pt, sink_p2_pt;
+
+ if (!port1 || !port2)
return -EINVAL;
- return remote_p1_pt == DRM_OF_LVDS_EVEN ?
- DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS :
- DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS;
+ sink_p1_pt = drm_of_lvds_get_port_pixels_type(port1);
+ if (!sink_p1_pt)
+ return -EPIPE;
+
+ sink_p2_pt = drm_of_lvds_get_port_pixels_type(port2);
+ if (!sink_p2_pt)
+ return -EPIPE;
+
+ return __drm_of_lvds_get_dual_link_pixel_order(sink_p1_pt, sink_p2_pt);
}
-EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order);
+EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order_sink);
/**
* drm_of_lvds_get_data_mapping - Get LVDS data mapping
@@ -410,7 +464,9 @@ EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order);
* Return:
* * MEDIA_BUS_FMT_RGB666_1X7X3_SPWG - data-mapping is "jeida-18"
* * MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA - data-mapping is "jeida-24"
+ * * MEDIA_BUS_FMT_RGB101010_1X7X5_JEIDA - data-mapping is "jeida-30"
* * MEDIA_BUS_FMT_RGB888_1X7X4_SPWG - data-mapping is "vesa-24"
+ * * MEDIA_BUS_FMT_RGB101010_1X7X5_SPWG - data-mapping is "vesa-30"
* * -EINVAL - the "data-mapping" property is unsupported
* * -ENODEV - the "data-mapping" property is missing
*/
@@ -427,8 +483,12 @@ int drm_of_lvds_get_data_mapping(const struct device_node *port)
return MEDIA_BUS_FMT_RGB666_1X7X3_SPWG;
if (!strcmp(mapping, "jeida-24"))
return MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA;
+ if (!strcmp(mapping, "jeida-30"))
+ return MEDIA_BUS_FMT_RGB101010_1X7X5_JEIDA;
if (!strcmp(mapping, "vesa-24"))
return MEDIA_BUS_FMT_RGB888_1X7X4_SPWG;
+ if (!strcmp(mapping, "vesa-30"))
+ return MEDIA_BUS_FMT_RGB101010_1X7X5_SPWG;
return -EINVAL;
}
@@ -504,6 +564,8 @@ EXPORT_SYMBOL_GPL(drm_of_get_data_lanes_count_ep);
* Gets parent DSI bus for a DSI device controlled through a bus other
* than MIPI-DCS (SPI, I2C, etc.) using the Device Tree.
*
+ * This function assumes that the device's port@0 is the DSI input.
+ *
* Returns pointer to mipi_dsi_host if successful, -EINVAL if the
* request is unsupported, -EPROBE_DEFER if the DSI host is found but
* not available, or -ENODEV otherwise.
@@ -516,7 +578,7 @@ struct mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev)
/*
* Get first endpoint child from device.
*/
- endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
if (!endpoint)
return ERR_PTR(-ENODEV);
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 19ab0a794add..9940e96d35e3 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -24,6 +24,7 @@
#include <linux/backlight.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <drm/drm_crtc.h>
#include <drm/drm_panel.h>
@@ -413,7 +414,7 @@ bool drm_is_panel_follower(struct device *dev)
* don't bother trying to parse it here. We just need to know if the
* property is there.
*/
- return of_property_read_bool(dev->of_node, "panel");
+ return of_property_present(dev->of_node, "panel");
}
EXPORT_SYMBOL(drm_is_panel_follower);
diff --git a/drivers/gpu/drm/drm_panel_backlight_quirks.c b/drivers/gpu/drm/drm_panel_backlight_quirks.c
new file mode 100644
index 000000000000..c477d98ade2b
--- /dev/null
+++ b/drivers/gpu/drm/drm_panel_backlight_quirks.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/array_size.h>
+#include <linux/dmi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_utils.h>
+
+struct drm_panel_min_backlight_quirk {
+ struct {
+ enum dmi_field field;
+ const char * const value;
+ } dmi_match;
+ struct drm_edid_ident ident;
+ u8 min_brightness;
+};
+
+static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks[] = {
+ /* 13 inch matte panel */
+ {
+ .dmi_match.field = DMI_BOARD_VENDOR,
+ .dmi_match.value = "Framework",
+ .ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0bca),
+ .ident.name = "NE135FBM-N41",
+ .min_brightness = 0,
+ },
+ /* 13 inch glossy panel */
+ {
+ .dmi_match.field = DMI_BOARD_VENDOR,
+ .dmi_match.value = "Framework",
+ .ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x095f),
+ .ident.name = "NE135FBM-N41",
+ .min_brightness = 0,
+ },
+ /* 13 inch 2.8k panel */
+ {
+ .dmi_match.field = DMI_BOARD_VENDOR,
+ .dmi_match.value = "Framework",
+ .ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0cb4),
+ .ident.name = "NE135A1M-NY1",
+ .min_brightness = 0,
+ },
+};
+
+static bool drm_panel_min_backlight_quirk_matches(const struct drm_panel_min_backlight_quirk *quirk,
+ const struct drm_edid *edid)
+{
+ if (!dmi_match(quirk->dmi_match.field, quirk->dmi_match.value))
+ return false;
+
+ if (!drm_edid_match(edid, &quirk->ident))
+ return false;
+
+ return true;
+}
+
+/**
+ * drm_get_panel_min_brightness_quirk - Get minimum supported brightness level for a panel.
+ * @edid: EDID of the panel to check
+ *
+ * This function checks for platform specific (e.g. DMI based) quirks
+ * providing info on the minimum backlight brightness for systems where this
+ * cannot be probed correctly from the hard-/firm-ware.
+ *
+ * Returns:
+ * A negative error value or
+ * an override value in the range [0, 255] representing 0-100% to be scaled to
+ * the drivers target range.
+ */
+int drm_get_panel_min_brightness_quirk(const struct drm_edid *edid)
+{
+ const struct drm_panel_min_backlight_quirk *quirk;
+ size_t i;
+
+ if (!IS_ENABLED(CONFIG_DMI))
+ return -ENODATA;
+
+ if (!edid)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(drm_panel_min_backlight_quirks); i++) {
+ quirk = &drm_panel_min_backlight_quirks[i];
+
+ if (drm_panel_min_backlight_quirk_matches(quirk, edid))
+ return quirk->min_brightness;
+ }
+
+ return -ENODATA;
+}
+EXPORT_SYMBOL(drm_get_panel_min_brightness_quirk);
+
+MODULE_DESCRIPTION("Quirks for panel backlight overrides");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 0830cae9a4d0..4a73821b81f6 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -184,6 +184,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* AYA NEO AYANEO 2 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* AYA NEO 2021 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYADEVICE"),
@@ -196,6 +202,18 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "AIR"),
},
.driver_data = (void *)&lcd1080x1920_leftside_up,
+ }, { /* AYA NEO Founder */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYA NEO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AYA NEO Founder"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* AYA NEO GEEK */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GEEK"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* AYA NEO NEXT */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
@@ -403,7 +421,6 @@ static const struct dmi_system_id orientation_data[] = {
}, { /* Lenovo Yoga Tab 3 X90F */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
},
.driver_data = (void *)&lcd1600x2560_rightside_up,
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index 74412b7bf936..f128d345b16d 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -31,6 +31,7 @@
#include <drm/drm_rect.h>
#include "drm_crtc_internal.h"
+#include "drm_draw_internal.h"
MODULE_AUTHOR("Jocelyn Falempe");
MODULE_DESCRIPTION("DRM panic handler");
@@ -139,171 +140,8 @@ device_initcall(drm_panic_setup_logo);
#endif
/*
- * Color conversion
+ * Blit & Fill functions
*/
-
-static u16 convert_xrgb8888_to_rgb565(u32 pix)
-{
- return ((pix & 0x00F80000) >> 8) |
- ((pix & 0x0000FC00) >> 5) |
- ((pix & 0x000000F8) >> 3);
-}
-
-static u16 convert_xrgb8888_to_rgba5551(u32 pix)
-{
- return ((pix & 0x00f80000) >> 8) |
- ((pix & 0x0000f800) >> 5) |
- ((pix & 0x000000f8) >> 2) |
- BIT(0); /* set alpha bit */
-}
-
-static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
-{
- return ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
-}
-
-static u16 convert_xrgb8888_to_argb1555(u32 pix)
-{
- return BIT(15) | /* set alpha bit */
- ((pix & 0x00f80000) >> 9) |
- ((pix & 0x0000f800) >> 6) |
- ((pix & 0x000000f8) >> 3);
-}
-
-static u32 convert_xrgb8888_to_argb8888(u32 pix)
-{
- return pix | GENMASK(31, 24); /* fill alpha bits */
-}
-
-static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
-{
- return ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- ((pix & 0xff000000) >> 24) << 24;
-}
-
-static u32 convert_xrgb8888_to_abgr8888(u32 pix)
-{
- return ((pix & 0x00ff0000) >> 16) << 0 |
- ((pix & 0x0000ff00) >> 8) << 8 |
- ((pix & 0x000000ff) >> 0) << 16 |
- GENMASK(31, 24); /* fill alpha bits */
-}
-
-static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
-{
- pix = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- return pix | ((pix >> 8) & 0x00300C03);
-}
-
-static u32 convert_xrgb8888_to_argb2101010(u32 pix)
-{
- pix = ((pix & 0x000000FF) << 2) |
- ((pix & 0x0000FF00) << 4) |
- ((pix & 0x00FF0000) << 6);
- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
-}
-
-/*
- * convert_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
- * @color: input color, in xrgb8888 format
- * @format: output format
- *
- * Returns:
- * Color in the format specified, casted to u32.
- * Or 0 if the format is not supported.
- */
-static u32 convert_from_xrgb8888(u32 color, u32 format)
-{
- switch (format) {
- case DRM_FORMAT_RGB565:
- return convert_xrgb8888_to_rgb565(color);
- case DRM_FORMAT_RGBA5551:
- return convert_xrgb8888_to_rgba5551(color);
- case DRM_FORMAT_XRGB1555:
- return convert_xrgb8888_to_xrgb1555(color);
- case DRM_FORMAT_ARGB1555:
- return convert_xrgb8888_to_argb1555(color);
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_XRGB8888:
- return color;
- case DRM_FORMAT_ARGB8888:
- return convert_xrgb8888_to_argb8888(color);
- case DRM_FORMAT_XBGR8888:
- return convert_xrgb8888_to_xbgr8888(color);
- case DRM_FORMAT_ABGR8888:
- return convert_xrgb8888_to_abgr8888(color);
- case DRM_FORMAT_XRGB2101010:
- return convert_xrgb8888_to_xrgb2101010(color);
- case DRM_FORMAT_ARGB2101010:
- return convert_xrgb8888_to_argb2101010(color);
- default:
- WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
- return 0;
- }
-}
-
-/*
- * Blit & Fill
- */
-/* check if the pixel at coord x,y is 1 (foreground) or 0 (background) */
-static bool drm_panic_is_pixel_fg(const u8 *sbuf8, unsigned int spitch, int x, int y)
-{
- return (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) != 0;
-}
-
-static void drm_panic_blit16(struct iosys_map *dmap, unsigned int dpitch,
- const u8 *sbuf8, unsigned int spitch,
- unsigned int height, unsigned int width,
- unsigned int scale, u16 fg16)
-{
- unsigned int y, x;
-
- for (y = 0; y < height; y++)
- for (x = 0; x < width; x++)
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
- iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, fg16);
-}
-
-static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch,
- const u8 *sbuf8, unsigned int spitch,
- unsigned int height, unsigned int width,
- unsigned int scale, u32 fg32)
-{
- unsigned int y, x;
-
- for (y = 0; y < height; y++) {
- for (x = 0; x < width; x++) {
- u32 off = y * dpitch + x * 3;
-
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
- /* write blue-green-red to output in little endianness */
- iosys_map_wr(dmap, off, u8, (fg32 & 0x000000FF) >> 0);
- iosys_map_wr(dmap, off + 1, u8, (fg32 & 0x0000FF00) >> 8);
- iosys_map_wr(dmap, off + 2, u8, (fg32 & 0x00FF0000) >> 16);
- }
- }
- }
-}
-
-static void drm_panic_blit32(struct iosys_map *dmap, unsigned int dpitch,
- const u8 *sbuf8, unsigned int spitch,
- unsigned int height, unsigned int width,
- unsigned int scale, u32 fg32)
-{
- unsigned int y, x;
-
- for (y = 0; y < height; y++)
- for (x = 0; x < width; x++)
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
- iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, fg32);
-}
-
static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect *clip,
const u8 *sbuf8, unsigned int spitch, unsigned int scale,
u32 fg_color)
@@ -312,7 +150,7 @@ static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect
for (y = 0; y < drm_rect_height(clip); y++)
for (x = 0; x < drm_rect_width(clip); x++)
- if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
+ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, fg_color);
}
@@ -344,62 +182,22 @@ static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip,
switch (sb->format->cpp[0]) {
case 2:
- drm_panic_blit16(&map, sb->pitch[0], sbuf8, spitch,
- drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
+ drm_draw_blit16(&map, sb->pitch[0], sbuf8, spitch,
+ drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
break;
case 3:
- drm_panic_blit24(&map, sb->pitch[0], sbuf8, spitch,
- drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
+ drm_draw_blit24(&map, sb->pitch[0], sbuf8, spitch,
+ drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
break;
case 4:
- drm_panic_blit32(&map, sb->pitch[0], sbuf8, spitch,
- drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
+ drm_draw_blit32(&map, sb->pitch[0], sbuf8, spitch,
+ drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
break;
default:
WARN_ONCE(1, "Can't blit with pixel width %d\n", sb->format->cpp[0]);
}
}
-static void drm_panic_fill16(struct iosys_map *dmap, unsigned int dpitch,
- unsigned int height, unsigned int width,
- u16 color)
-{
- unsigned int y, x;
-
- for (y = 0; y < height; y++)
- for (x = 0; x < width; x++)
- iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, color);
-}
-
-static void drm_panic_fill24(struct iosys_map *dmap, unsigned int dpitch,
- unsigned int height, unsigned int width,
- u32 color)
-{
- unsigned int y, x;
-
- for (y = 0; y < height; y++) {
- for (x = 0; x < width; x++) {
- unsigned int off = y * dpitch + x * 3;
-
- /* write blue-green-red to output in little endianness */
- iosys_map_wr(dmap, off, u8, (color & 0x000000FF) >> 0);
- iosys_map_wr(dmap, off + 1, u8, (color & 0x0000FF00) >> 8);
- iosys_map_wr(dmap, off + 2, u8, (color & 0x00FF0000) >> 16);
- }
- }
-}
-
-static void drm_panic_fill32(struct iosys_map *dmap, unsigned int dpitch,
- unsigned int height, unsigned int width,
- u32 color)
-{
- unsigned int y, x;
-
- for (y = 0; y < height; y++)
- for (x = 0; x < width; x++)
- iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, color);
-}
-
static void drm_panic_fill_pixel(struct drm_scanout_buffer *sb,
struct drm_rect *clip,
u32 color)
@@ -432,27 +230,22 @@ static void drm_panic_fill(struct drm_scanout_buffer *sb, struct drm_rect *clip,
switch (sb->format->cpp[0]) {
case 2:
- drm_panic_fill16(&map, sb->pitch[0], drm_rect_height(clip),
- drm_rect_width(clip), color);
+ drm_draw_fill16(&map, sb->pitch[0], drm_rect_height(clip),
+ drm_rect_width(clip), color);
break;
case 3:
- drm_panic_fill24(&map, sb->pitch[0], drm_rect_height(clip),
- drm_rect_width(clip), color);
+ drm_draw_fill24(&map, sb->pitch[0], drm_rect_height(clip),
+ drm_rect_width(clip), color);
break;
case 4:
- drm_panic_fill32(&map, sb->pitch[0], drm_rect_height(clip),
- drm_rect_width(clip), color);
+ drm_draw_fill32(&map, sb->pitch[0], drm_rect_height(clip),
+ drm_rect_width(clip), color);
break;
default:
WARN_ONCE(1, "Can't fill with pixel width %d\n", sb->format->cpp[0]);
}
}
-static const u8 *get_char_bitmap(const struct font_desc *font, char c, size_t font_pitch)
-{
- return font->data + (c * font->height) * font_pitch;
-}
-
static unsigned int get_max_line_len(const struct drm_panic_line *lines, int len)
{
int i;
@@ -491,7 +284,7 @@ static void draw_txt_rectangle(struct drm_scanout_buffer *sb,
rec.x1 += (drm_rect_width(clip) - (line_len * font->width)) / 2;
for (j = 0; j < line_len; j++) {
- src = get_char_bitmap(font, msg[i].txt[j], font_pitch);
+ src = drm_draw_get_char_bitmap(font, msg[i].txt[j], font_pitch);
rec.x2 = rec.x1 + font->width;
drm_panic_blit(sb, &rec, src, font_pitch, 1, color);
rec.x1 += font->width;
@@ -523,8 +316,10 @@ static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *
static void draw_panic_static_user(struct drm_scanout_buffer *sb)
{
- u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
- u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
+ u32 fg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR,
+ sb->format->format);
+ u32 bg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR,
+ sb->format->format);
const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
struct drm_rect r_screen, r_logo, r_msg;
unsigned int msg_width, msg_height;
@@ -590,8 +385,10 @@ static int draw_line_with_wrap(struct drm_scanout_buffer *sb, const struct font_
*/
static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb)
{
- u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
- u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
+ u32 fg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR,
+ sb->format->format);
+ u32 bg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR,
+ sb->format->format);
const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
struct drm_rect r_screen = DRM_RECT_INIT(0, 0, sb->width, sb->height);
struct kmsg_dump_iter iter;
@@ -781,8 +578,10 @@ static int drm_panic_get_qr_code(u8 **qr_image)
*/
static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
{
- u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
- u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
+ u32 fg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR,
+ sb->format->format);
+ u32 bg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR,
+ sb->format->format);
const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
struct drm_rect r_screen, r_logo, r_msg, r_qr, r_qr_canvas;
unsigned int max_qr_size, scale;
@@ -868,7 +667,7 @@ static bool drm_panic_is_format_supported(const struct drm_format_info *format)
{
if (format->num_planes != 1)
return false;
- return convert_from_xrgb8888(0xffffff, format->format) != 0;
+ return drm_draw_color_from_xrgb8888(0xffffff, format->format) != 0;
}
static void draw_panic_dispatch(struct drm_scanout_buffer *sb)
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index 1ef56cb07dfb..bcf248f69252 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -209,12 +209,9 @@ const FORMAT_INFOS_QR_L: [u16; 8] = [
impl Version {
/// Returns the smallest QR version than can hold these segments.
fn from_segments(segments: &[&Segment<'_>]) -> Option<Version> {
- for v in (1..=40).map(|k| Version(k)) {
- if v.max_data() * 8 >= segments.iter().map(|s| s.total_size_bits(v)).sum() {
- return Some(v);
- }
- }
- None
+ (1..=40)
+ .map(Version)
+ .find(|&v| v.max_data() * 8 >= segments.iter().map(|s| s.total_size_bits(v)).sum())
}
fn width(&self) -> u8 {
@@ -242,7 +239,7 @@ impl Version {
}
fn alignment_pattern(&self) -> &'static [u8] {
- &ALIGNMENT_PATTERNS[self.0 - 1]
+ ALIGNMENT_PATTERNS[self.0 - 1]
}
fn poly(&self) -> &'static [u8] {
@@ -479,7 +476,7 @@ struct EncodedMsg<'a> {
/// Data to be put in the QR code, with correct segment encoding, padding, and
/// Error Code Correction.
impl EncodedMsg<'_> {
- fn new<'a, 'b>(segments: &[&Segment<'b>], data: &'a mut [u8]) -> Option<EncodedMsg<'a>> {
+ fn new<'a>(segments: &[&Segment<'_>], data: &'a mut [u8]) -> Option<EncodedMsg<'a>> {
let version = Version::from_segments(segments)?;
let ec_size = version.ec_size();
let g1_blocks = version.g1_blocks();
@@ -492,7 +489,7 @@ impl EncodedMsg<'_> {
data.fill(0);
let mut em = EncodedMsg {
- data: data,
+ data,
ec_size,
g1_blocks,
g2_blocks,
@@ -722,7 +719,10 @@ impl QrImage<'_> {
fn is_finder(&self, x: u8, y: u8) -> bool {
let end = self.width - 8;
- (x < 8 && y < 8) || (x < 8 && y >= end) || (x >= end && y < 8)
+ #[expect(clippy::nonminimal_bool)]
+ {
+ (x < 8 && y < 8) || (x < 8 && y >= end) || (x >= end && y < 8)
+ }
}
// Alignment pattern: 5x5 squares in a grid.
@@ -929,10 +929,9 @@ impl QrImage<'_> {
/// * `tmp` must be valid for reading and writing for `tmp_size` bytes.
///
/// They must remain valid for the duration of the function call.
-
#[no_mangle]
pub unsafe extern "C" fn drm_panic_qr_generate(
- url: *const i8,
+ url: *const kernel::ffi::c_char,
data: *mut u8,
data_len: usize,
data_size: usize,
@@ -979,10 +978,11 @@ pub unsafe extern "C" fn drm_panic_qr_generate(
/// * `url_len`: Length of the URL.
///
/// * If `url_len` > 0, remove the 2 segments header/length and also count the
-/// conversion to numeric segments.
+/// conversion to numeric segments.
/// * If `url_len` = 0, only removes 3 bytes for 1 binary segment.
#[no_mangle]
pub extern "C" fn drm_panic_qr_max_data_size(version: u8, url_len: usize) -> usize {
+ #[expect(clippy::manual_range_contains)]
if version < 1 || version > 40 {
return 0;
}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 0e3f8adf162f..32a8781cfd67 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -40,7 +40,7 @@
#include "drm_internal.h"
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
/**
* DOC: overview and lifetime rules
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 0081190201a7..79517bd4418f 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -235,6 +235,20 @@ void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf)
}
EXPORT_SYMBOL(__drm_printfn_err);
+void __drm_printfn_line(struct drm_printer *p, struct va_format *vaf)
+{
+ unsigned int counter = ++p->line.counter;
+ const char *prefix = p->prefix ?: "";
+ const char *pad = p->prefix ? " " : "";
+
+ if (p->line.series)
+ drm_printf(p->arg, "%s%s%u.%u: %pV",
+ prefix, pad, p->line.series, counter, vaf);
+ else
+ drm_printf(p->arg, "%s%s%u: %pV", prefix, pad, counter, vaf);
+}
+EXPORT_SYMBOL(__drm_printfn_line);
+
/**
* drm_puts - print a const string to a &drm_printer stream
* @p: the &drm printer
@@ -376,3 +390,26 @@ void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset)
}
}
EXPORT_SYMBOL(drm_print_regset32);
+
+/**
+ * drm_print_hex_dump - print a hex dump to a &drm_printer stream
+ * @p: The &drm_printer
+ * @prefix: Prefix for each line, may be NULL for no prefix
+ * @buf: Buffer to dump
+ * @len: Length of buffer
+ *
+ * Print hex dump to &drm_printer, with 16 space-separated hex bytes per line,
+ * optionally with a prefix on each line. No separator is added after prefix.
+ */
+void drm_print_hex_dump(struct drm_printer *p, const char *prefix,
+ const u8 *buf, size_t len)
+{
+ int i;
+
+ for (i = 0; i < len; i += 16) {
+ int bytes_per_line = min(16, len - i);
+
+ drm_printf(p, "%s%*ph\n", prefix ?: "", bytes_per_line, buf + i);
+ }
+}
+EXPORT_SYMBOL(drm_print_hex_dump);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 92f21764246f..96b266b37ba4 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -33,7 +33,7 @@
#include <linux/moduleparam.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_client.h>
+#include <drm/drm_client_event.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 8e3d2d7060f8..4f2ab8a7b50f 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -712,16 +712,14 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
int fd, u32 *handle)
{
struct drm_syncobj *syncobj;
- struct fd f = fdget(fd);
+ CLASS(fd, f)(fd);
int ret;
- if (!fd_file(f))
+ if (fd_empty(f))
return -EINVAL;
- if (fd_file(f)->f_op != &drm_syncobj_file_fops) {
- fdput(f);
+ if (fd_file(f)->f_op != &drm_syncobj_file_fops)
return -EINVAL;
- }
/* take a reference to put in the idr */
syncobj = fd_file(f)->private_data;
@@ -739,7 +737,6 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
} else
drm_syncobj_put(syncobj);
- fdput(f);
return ret;
}
diff --git a/drivers/gpu/drm/drm_vblank_work.c b/drivers/gpu/drm/drm_vblank_work.c
index 1752ffb44e1d..9cc71120246f 100644
--- a/drivers/gpu/drm/drm_vblank_work.c
+++ b/drivers/gpu/drm/drm_vblank_work.c
@@ -277,7 +277,7 @@ int drm_vblank_worker_init(struct drm_vblank_crtc *vblank)
INIT_LIST_HEAD(&vblank->pending_work);
init_waitqueue_head(&vblank->work_wait_queue);
- worker = kthread_create_worker(0, "card%d-crtc%d",
+ worker = kthread_run_worker(0, "card%d-crtc%d",
vblank->dev->primary->index,
vblank->pipe);
if (IS_ERR(worker))
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index a031c335bdb9..33a3c98a962d 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -100,15 +100,9 @@ drm_writeback_fence_get_timeline_name(struct dma_fence *fence)
return wb_connector->timeline_name;
}
-static bool drm_writeback_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static const struct dma_fence_ops drm_writeback_fence_ops = {
.get_driver_name = drm_writeback_fence_get_driver_name,
.get_timeline_name = drm_writeback_fence_get_timeline_name,
- .enable_signaling = drm_writeback_fence_enable_signaling,
};
static int create_writeback_properties(struct drm_device *dev)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 384df1659be6..b13a17276d07 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -482,7 +482,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
} else {
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
VIVS_GL_FLUSH_CACHE_DEPTH |
- VIVS_GL_FLUSH_CACHE_COLOR);
+ VIVS_GL_FLUSH_CACHE_COLOR |
+ VIVS_GL_FLUSH_CACHE_SHADER_L1);
if (has_blt) {
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
index 721d633aece9..3a221923f15d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
@@ -5,13 +5,10 @@
#include <linux/dma-mapping.h>
-#include <drm/drm_mm.h>
-
#include "etnaviv_cmdbuf.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
-#include "etnaviv_perfmon.h"
#define SUBALLOC_SIZE SZ_512K
#define SUBALLOC_GRANULE SZ_4K
@@ -55,6 +52,7 @@ etnaviv_cmdbuf_suballoc_new(struct device *dev)
return suballoc;
free_suballoc:
+ mutex_destroy(&suballoc->lock);
kfree(suballoc);
return ERR_PTR(ret);
@@ -79,6 +77,7 @@ void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
{
dma_free_wc(suballoc->dev, SUBALLOC_SIZE, suballoc->vaddr,
suballoc->paddr);
+ mutex_destroy(&suballoc->lock);
kfree(suballoc);
}
@@ -100,7 +99,7 @@ retry:
mutex_unlock(&suballoc->lock);
ret = wait_event_interruptible_timeout(suballoc->free_event,
suballoc->free_space,
- msecs_to_jiffies(10 * 1000));
+ secs_to_jiffies(10));
if (!ret) {
dev_err(suballoc->dev,
"Timeout waiting for cmdbuf space\n");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 6500f3999c5f..3e91747ed339 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -488,7 +488,16 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
};
-DEFINE_DRM_GEM_FOPS(fops);
+static void etnaviv_show_fdinfo(struct drm_printer *p, struct drm_file *file)
+{
+ drm_show_memory_stats(p, file);
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ DRM_GEM_FOPS,
+ .show_fdinfo = drm_show_fdinfo,
+};
static const struct drm_driver etnaviv_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
@@ -498,12 +507,12 @@ static const struct drm_driver etnaviv_drm_driver = {
#ifdef CONFIG_DEBUG_FS
.debugfs_init = etnaviv_debugfs_init,
#endif
+ .show_fdinfo = etnaviv_show_fdinfo,
.ioctls = etnaviv_ioctls,
.num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
.fops = &fops,
.name = "etnaviv",
.desc = "etnaviv DRM",
- .date = "20151214",
.major = 1,
.minor = 4,
};
@@ -538,6 +547,16 @@ static int etnaviv_bind(struct device *dev)
priv->num_gpus = 0;
priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+ /*
+ * If the GPU is part of a system with DMA addressing limitations,
+ * request pages for our SHM backend buffers from the DMA32 zone to
+ * hopefully avoid performance killing SWIOTLB bounce buffering.
+ */
+ if (dma_addressing_limited(dev)) {
+ priv->shm_gfp_mask |= GFP_DMA32;
+ priv->shm_gfp_mask &= ~__GFP_HIGHMEM;
+ }
+
priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev);
if (IS_ERR(priv->cmdbuf_suballoc)) {
dev_err(drm->dev, "Failed to create cmdbuf suballocator\n");
@@ -564,6 +583,7 @@ out_unbind:
out_destroy_suballoc:
etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
out_free_priv:
+ mutex_destroy(&priv->gem_lock);
kfree(priv);
out_put:
drm_dev_put(drm);
@@ -608,7 +628,7 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
if (!of_device_is_available(core_node))
continue;
- drm_of_component_match_add(&pdev->dev, &match,
+ drm_of_component_match_add(dev, &match,
component_compare_of, core_node);
}
} else {
@@ -631,9 +651,9 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
* bit to make sure we are allocating the command buffers and
* TLBs in the lower 4 GiB address space.
*/
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)) ||
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- dev_dbg(&pdev->dev, "No suitable DMA available\n");
+ if (dma_set_mask(dev, DMA_BIT_MASK(40)) ||
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
+ dev_dbg(dev, "No suitable DMA available\n");
return -ENODEV;
}
@@ -644,7 +664,7 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
*/
first_node = etnaviv_of_first_available_node();
if (first_node) {
- of_dma_configure(&pdev->dev, first_node, true);
+ of_dma_configure(dev, first_node, true);
of_node_put(first_node);
}
@@ -658,7 +678,7 @@ static void etnaviv_pdev_remove(struct platform_device *pdev)
static struct platform_driver etnaviv_platform_driver = {
.probe = etnaviv_pdev_probe,
- .remove_new = etnaviv_pdev_remove,
+ .remove = etnaviv_pdev_remove,
.driver = {
.name = "etnaviv",
},
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5c0c9d4e3be1..2f844e82bc46 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -342,6 +342,7 @@ void *etnaviv_gem_vmap(struct drm_gem_object *obj)
static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
{
struct page **pages;
+ pgprot_t prot;
lockdep_assert_held(&obj->lock);
@@ -349,8 +350,19 @@ static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
if (IS_ERR(pages))
return NULL;
- return vmap(pages, obj->base.size >> PAGE_SHIFT,
- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ switch (obj->flags & ETNA_BO_CACHE_MASK) {
+ case ETNA_BO_CACHED:
+ prot = PAGE_KERNEL;
+ break;
+ case ETNA_BO_UNCACHED:
+ prot = pgprot_noncached(PAGE_KERNEL);
+ break;
+ case ETNA_BO_WC:
+ default:
+ prot = pgprot_writecombine(PAGE_KERNEL);
+ }
+
+ return vmap(pages, obj->base.size >> PAGE_SHIFT, VM_MAP, prot);
}
static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
@@ -514,6 +526,7 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj)
etnaviv_obj->ops->release(etnaviv_obj);
drm_gem_object_release(obj);
+ mutex_destroy(&etnaviv_obj->lock);
kfree(etnaviv_obj);
}
@@ -527,6 +540,17 @@ void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
mutex_unlock(&priv->gem_lock);
}
+static enum drm_gem_object_status etnaviv_gem_status(struct drm_gem_object *obj)
+{
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
+ enum drm_gem_object_status status = 0;
+
+ if (etnaviv_obj->pages)
+ status |= DRM_GEM_OBJECT_RESIDENT;
+
+ return status;
+}
+
static const struct vm_operations_struct vm_ops = {
.fault = etnaviv_gem_fault,
.open = drm_gem_vm_open,
@@ -540,10 +564,11 @@ static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
.get_sg_table = etnaviv_gem_prime_get_sg_table,
.vmap = etnaviv_gem_prime_vmap,
.mmap = etnaviv_gem_mmap,
+ .status = etnaviv_gem_status,
.vm_ops = &vm_ops,
};
-static int etnaviv_gem_new_impl(struct drm_device *dev, u32 flags,
+static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
{
struct etnaviv_gem_object *etnaviv_obj;
@@ -570,6 +595,7 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 flags,
if (!etnaviv_obj)
return -ENOMEM;
+ etnaviv_obj->size = ALIGN(size, SZ_4K);
etnaviv_obj->flags = flags;
etnaviv_obj->ops = ops;
@@ -590,15 +616,13 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
struct drm_gem_object *obj = NULL;
int ret;
- size = PAGE_ALIGN(size);
-
- ret = etnaviv_gem_new_impl(dev, flags, &etnaviv_gem_shmem_ops, &obj);
+ ret = etnaviv_gem_new_impl(dev, size, flags, &etnaviv_gem_shmem_ops, &obj);
if (ret)
goto fail;
lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
- ret = drm_gem_object_init(dev, obj, size);
+ ret = drm_gem_object_init(dev, obj, PAGE_ALIGN(size));
if (ret)
goto fail;
@@ -627,7 +651,7 @@ int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
struct drm_gem_object *obj;
int ret;
- ret = etnaviv_gem_new_impl(dev, flags, ops, &obj);
+ ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
if (ret)
return ret;
@@ -686,7 +710,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
kfree(etnaviv_obj->sgt);
}
if (etnaviv_obj->pages) {
- int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
+ unsigned int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
unpin_user_pages(etnaviv_obj->pages, npages);
kvfree(etnaviv_obj->pages);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index a42d260cac2c..e5ee82a0674c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -36,12 +36,15 @@ struct etnaviv_gem_object {
const struct etnaviv_gem_ops *ops;
struct mutex lock;
+ /*
+ * The actual size that is visible to the GPU, not necessarily
+ * PAGE_SIZE aligned, but should be aligned to GPU page size.
+ */
+ u32 size;
u32 flags;
struct list_head gem_node;
- struct etnaviv_gpu *gpu; /* non-null if active */
atomic_t gpu_active;
- u32 access;
struct page **pages;
struct sg_table *sgt;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 3524b5811682..42e57d142554 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -10,14 +10,14 @@
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
static struct lock_class_key etnaviv_prime_lock_class;
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
- int npages = obj->size >> PAGE_SHIFT;
+ unsigned int npages = obj->size >> PAGE_SHIFT;
if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */
return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 3d0f8d182506..3c0a5c3e0e3d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -6,7 +6,6 @@
#include <drm/drm_file.h>
#include <linux/dma-fence-array.h>
#include <linux/file.h>
-#include <linux/pm_runtime.h>
#include <linux/dma-resv.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 7c7f97793ddd..cf0d9049bcf1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <linux/thermal.h>
#include "etnaviv_cmdbuf.h"
@@ -172,6 +173,29 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
return 0;
}
+static int etnaviv_gpu_reset_deassert(struct etnaviv_gpu *gpu)
+{
+ int ret;
+
+ /*
+ * 32 core clock cycles (slowest clock) required before deassertion
+ * 1 microsecond might match all implementations without computation
+ */
+ usleep_range(1, 2);
+
+ ret = reset_control_deassert(gpu->rst);
+ if (ret)
+ return ret;
+
+ /*
+ * 128 core clock cycles (slowest clock) required before any activity on AHB
+ * 1 microsecond might match all implementations without computation
+ */
+ usleep_range(1, 2);
+
+ return 0;
+}
+
static inline bool etnaviv_is_model_rev(struct etnaviv_gpu *gpu, u32 model, u32 revision)
{
return gpu->identity.model == model &&
@@ -574,8 +598,8 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
continue;
}
- /* disable debug registers, as they are not normally needed */
- control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
+ /* enable debug register access */
+ control &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
failed = false;
@@ -799,6 +823,12 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
goto pm_put;
}
+ ret = etnaviv_gpu_reset_deassert(gpu);
+ if (ret) {
+ dev_err(gpu->dev, "GPU reset deassert failed\n");
+ goto fail;
+ }
+
etnaviv_hw_identify(gpu);
if (gpu->identity.model == 0) {
@@ -839,17 +869,8 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
if (ret)
goto fail;
- /*
- * If the GPU is part of a system with DMA addressing limitations,
- * request pages for our SHM backend buffers from the DMA32 zone to
- * hopefully avoid performance killing SWIOTLB bounce buffering.
- */
- if (dma_addressing_limited(gpu->dev))
- priv->shm_gfp_mask |= GFP_DMA32;
-
/* Create buffer: */
- ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer,
- PAGE_SIZE);
+ ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer, SZ_4K);
if (ret) {
dev_err(gpu->dev, "could not create command buffer\n");
goto fail;
@@ -1330,17 +1351,16 @@ static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
{
u32 val;
+ mutex_lock(&gpu->lock);
+
/* disable clock gating */
val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val);
- /* enable debug register */
- val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
- val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
- gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
-
sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
+
+ mutex_unlock(&gpu->lock);
}
static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
@@ -1350,23 +1370,22 @@ static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
unsigned int i;
u32 val;
+ mutex_lock(&gpu->lock);
+
sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
+ /* enable clock gating */
+ val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
+ val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
+ gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val);
+
+ mutex_unlock(&gpu->lock);
+
for (i = 0; i < submit->nr_pmrs; i++) {
const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
*pmr->bo_vma = pmr->sequence;
}
-
- /* disable debug register */
- val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
- val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
- gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
-
- /* enable clock gating */
- val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
- val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
- gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val);
}
@@ -1862,7 +1881,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
if (!gpu)
return -ENOMEM;
- gpu->dev = &pdev->dev;
+ gpu->dev = dev;
mutex_init(&gpu->lock);
mutex_init(&gpu->sched_lock);
@@ -1871,13 +1890,24 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
if (IS_ERR(gpu->mmio))
return PTR_ERR(gpu->mmio);
+
+ /* Get Reset: */
+ gpu->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(gpu->rst))
+ return dev_err_probe(dev, PTR_ERR(gpu->rst),
+ "failed to get reset\n");
+
+ err = reset_control_assert(gpu->rst);
+ if (err)
+ return dev_err_probe(dev, err, "failed to assert reset\n");
+
/* Get Interrupt: */
gpu->irq = platform_get_irq(pdev, 0);
if (gpu->irq < 0)
return gpu->irq;
- err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
- dev_name(gpu->dev), gpu);
+ err = devm_request_irq(dev, gpu->irq, irq_handler, 0,
+ dev_name(dev), gpu);
if (err) {
dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
return err;
@@ -1914,13 +1944,13 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
* autosuspend delay is rather arbitary: no measurements have
* yet been performed to determine an appropriate value.
*/
- pm_runtime_use_autosuspend(gpu->dev);
- pm_runtime_set_autosuspend_delay(gpu->dev, 200);
- pm_runtime_enable(gpu->dev);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 200);
+ pm_runtime_enable(dev);
- err = component_add(&pdev->dev, &gpu_ops);
+ err = component_add(dev, &gpu_ops);
if (err < 0) {
- dev_err(&pdev->dev, "failed to register component: %d\n", err);
+ dev_err(dev, "failed to register component: %d\n", err);
return err;
}
@@ -1929,8 +1959,13 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
static void etnaviv_gpu_platform_remove(struct platform_device *pdev)
{
+ struct etnaviv_gpu *gpu = dev_get_drvdata(&pdev->dev);
+
component_del(&pdev->dev, &gpu_ops);
pm_runtime_disable(&pdev->dev);
+
+ mutex_destroy(&gpu->lock);
+ mutex_destroy(&gpu->sched_lock);
}
static int etnaviv_gpu_rpm_suspend(struct device *dev)
@@ -1991,6 +2026,6 @@ struct platform_driver etnaviv_gpu_driver = {
.of_match_table = etnaviv_gpu_match,
},
.probe = etnaviv_gpu_platform_probe,
- .remove_new = etnaviv_gpu_platform_remove,
+ .remove = etnaviv_gpu_platform_remove,
.id_table = gpu_ids,
};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 31322195b9e4..5cb46c84e03a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -93,6 +93,7 @@ struct etnaviv_event {
struct etnaviv_cmdbuf_suballoc;
struct regulator;
struct clk;
+struct reset_control;
#define ETNA_NR_EVENTS 30
@@ -144,6 +145,7 @@ struct etnaviv_gpu {
/* hang detection */
u32 hangcheck_dma_addr;
+ u32 hangcheck_primid;
u32 hangcheck_fence;
void __iomem *mmio;
@@ -157,6 +159,7 @@ struct etnaviv_gpu {
struct clk *clk_reg;
struct clk *clk_core;
struct clk *clk_shader;
+ struct reset_control *rst;
unsigned int freq_scale;
unsigned int fe_waitcycles;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 1661d589bf3e..df5192083b20 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -19,12 +19,6 @@ static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
size_t unmapped_page, unmapped = 0;
size_t pgsize = SZ_4K;
- if (!IS_ALIGNED(iova | size, pgsize)) {
- pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
- iova, size, pgsize);
- return;
- }
-
while (unmapped < size) {
unmapped_page = context->global->ops->unmap(context, iova,
pgsize);
@@ -45,12 +39,6 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context,
size_t orig_size = size;
int ret = 0;
- if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
- pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
- iova, &paddr, size, pgsize);
- return -EINVAL;
- }
-
while (size) {
ret = context->global->ops->map(context, iova, paddr, pgsize,
prot);
@@ -69,9 +57,11 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context,
return ret;
}
-static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
+static int etnaviv_iommu_map(struct etnaviv_iommu_context *context,
+ u32 iova, unsigned int va_len,
struct sg_table *sgt, int prot)
-{ struct scatterlist *sg;
+{
+ struct scatterlist *sg;
unsigned int da = iova;
unsigned int i;
int ret;
@@ -80,15 +70,25 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
return -EINVAL;
for_each_sgtable_dma_sg(sgt, sg, i) {
- phys_addr_t pa = sg_dma_address(sg) - sg->offset;
- size_t bytes = sg_dma_len(sg) + sg->offset;
+ phys_addr_t pa = sg_dma_address(sg);
+ unsigned int da_len = sg_dma_len(sg);
+ unsigned int bytes = min_t(unsigned int, da_len, va_len);
+
+ VERB("map[%d]: %08x %pap(%x)", i, da, &pa, bytes);
- VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes);
+ if (!IS_ALIGNED(iova | pa | bytes, SZ_4K)) {
+ dev_err(context->global->dev,
+ "unaligned: iova 0x%x pa %pa size 0x%x\n",
+ iova, &pa, bytes);
+ ret = -EINVAL;
+ goto fail;
+ }
ret = etnaviv_context_map(context, da, pa, bytes, prot);
if (ret)
goto fail;
+ va_len -= bytes;
da += bytes;
}
@@ -104,21 +104,7 @@ fail:
static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
struct sg_table *sgt, unsigned len)
{
- struct scatterlist *sg;
- unsigned int da = iova;
- int i;
-
- for_each_sgtable_dma_sg(sgt, sg, i) {
- size_t bytes = sg_dma_len(sg) + sg->offset;
-
- etnaviv_context_unmap(context, da, bytes);
-
- VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
-
- BUG_ON(!PAGE_ALIGNED(bytes));
-
- da += bytes;
- }
+ etnaviv_context_unmap(context, iova, len);
context->flush_seq++;
}
@@ -131,7 +117,7 @@ static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
lockdep_assert_held(&context->lock);
etnaviv_iommu_unmap(context, mapping->vram_node.start,
- etnaviv_obj->sgt, etnaviv_obj->base.size);
+ etnaviv_obj->sgt, etnaviv_obj->size);
drm_mm_remove_node(&mapping->vram_node);
}
@@ -305,16 +291,14 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
node = &mapping->vram_node;
if (va)
- ret = etnaviv_iommu_insert_exact(context, node,
- etnaviv_obj->base.size, va);
+ ret = etnaviv_iommu_insert_exact(context, node, etnaviv_obj->size, va);
else
- ret = etnaviv_iommu_find_iova(context, node,
- etnaviv_obj->base.size);
+ ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->size);
if (ret < 0)
goto unlock;
mapping->iova = node->start;
- ret = etnaviv_iommu_map(context, node->start, sgt,
+ ret = etnaviv_iommu_map(context, node->start, etnaviv_obj->size, sgt,
ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
if (ret < 0) {
@@ -358,7 +342,7 @@ static void etnaviv_iommu_context_free(struct kref *kref)
container_of(kref, struct etnaviv_iommu_context, refcount);
etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
-
+ mutex_destroy(&context->lock);
context->global->ops->free(context);
}
void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
index c01a147f0dfd..7f8ac0178547 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -61,7 +61,6 @@ struct etnaviv_iommu_global {
/* P(age) T(able) A(rray) */
u64 *pta_cpu;
dma_addr_t pta_dma;
- struct spinlock pta_lock;
DECLARE_BITMAP(pta_alloc, ETNAVIV_PTA_ENTRIES);
} v2;
};
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
index dc9dea664a28..d53a5c293373 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -62,6 +62,8 @@ static u32 pipe_perf_reg_read(struct etnaviv_gpu *gpu,
u32 value = 0;
unsigned i;
+ lockdep_assert_held(&gpu->lock);
+
for (i = 0; i < gpu->identity.pixel_pipes; i++) {
pipe_select(gpu, clock, i);
value += perf_reg_read(gpu, domain, signal);
@@ -81,6 +83,8 @@ static u32 pipe_reg_read(struct etnaviv_gpu *gpu,
u32 value = 0;
unsigned i;
+ lockdep_assert_held(&gpu->lock);
+
for (i = 0; i < gpu->identity.pixel_pipes; i++) {
pipe_select(gpu, clock, i);
value += gpu_read(gpu, signal->data);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index ab9ca4824b62..5b67eda122db 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -11,6 +11,7 @@
#include "etnaviv_gpu.h"
#include "etnaviv_sched.h"
#include "state.xml.h"
+#include "state_hi.xml.h"
static int etnaviv_job_hang_limit = 0;
module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
@@ -35,7 +36,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
struct etnaviv_gpu *gpu = submit->gpu;
- u32 dma_addr;
+ u32 dma_addr, primid = 0;
int change;
/*
@@ -52,10 +53,22 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
*/
dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
change = dma_addr - gpu->hangcheck_dma_addr;
+ if (submit->exec_state == ETNA_PIPE_3D) {
+ /* guard against concurrent usage from perfmon_sample */
+ mutex_lock(&gpu->lock);
+ gpu_write(gpu, VIVS_MC_PROFILE_CONFIG0,
+ VIVS_MC_PROFILE_CONFIG0_FE_CURRENT_PRIM <<
+ VIVS_MC_PROFILE_CONFIG0_FE__SHIFT);
+ primid = gpu_read(gpu, VIVS_MC_PROFILE_FE_READ);
+ mutex_unlock(&gpu->lock);
+ }
if (gpu->state == ETNA_GPU_STATE_RUNNING &&
(gpu->completed_fence != gpu->hangcheck_fence ||
- change < 0 || change > 16)) {
+ change < 0 || change > 16 ||
+ (submit->exec_state == ETNA_PIPE_3D &&
+ gpu->hangcheck_primid != primid))) {
gpu->hangcheck_dma_addr = dma_addr;
+ gpu->hangcheck_primid = primid;
gpu->hangcheck_fence = gpu->completed_fence;
goto out_no_timeout;
}
@@ -72,7 +85,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
drm_sched_resubmit_jobs(&gpu->sched);
- drm_sched_start(&gpu->sched);
+ drm_sched_start(&gpu->sched, 0);
return DRM_GPU_SCHED_STAT_NOMINAL;
out_no_timeout:
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
index 829bc528e618..f7bc5f6e20ff 100644
--- a/drivers/gpu/drm/etnaviv/state_hi.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -8,17 +8,17 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
-- state.xml ( 29355 bytes, from 2024-01-19 10:18:54)
-- common.xml ( 35664 bytes, from 2023-12-06 10:55:32)
-- common_3d.xml ( 15069 bytes, from 2023-11-22 10:05:24)
-- state_hi.xml ( 35854 bytes, from 2023-12-11 15:50:17)
-- copyright.xml ( 1597 bytes, from 2016-11-10 13:58:32)
-- state_2d.xml ( 52271 bytes, from 2023-06-02 12:35:03)
-- state_3d.xml ( 89522 bytes, from 2024-01-19 10:18:54)
-- state_blt.xml ( 14592 bytes, from 2023-11-22 10:05:09)
-- state_vg.xml ( 5975 bytes, from 2016-11-10 13:58:32)
-
-Copyright (C) 2012-2023 by the following authors:
+- state.xml ( 30729 bytes, from 2024-06-21 11:31:54)
+- common.xml ( 35664 bytes, from 2023-12-13 09:33:18)
+- common_3d.xml ( 15069 bytes, from 2023-12-13 09:33:18)
+- state_hi.xml ( 35909 bytes, from 2024-06-21 11:31:54)
+- copyright.xml ( 1597 bytes, from 2020-10-28 12:56:03)
+- state_2d.xml ( 52271 bytes, from 2023-05-30 20:50:02)
+- state_3d.xml ( 89626 bytes, from 2024-06-21 11:32:57)
+- state_blt.xml ( 14592 bytes, from 2023-12-13 09:33:18)
+- state_vg.xml ( 5975 bytes, from 2020-10-28 12:56:03)
+
+Copyright (C) 2012-2024 by the following authors:
- Wladimir J. van der Laan <laanwj@gmail.com>
- Christian Gmeiner <christian.gmeiner@gmail.com>
- Lucas Stach <l.stach@pengutronix.de>
@@ -467,6 +467,7 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_MC_PROFILE_CONFIG0 0x00000470
#define VIVS_MC_PROFILE_CONFIG0_FE__MASK 0x000000ff
#define VIVS_MC_PROFILE_CONFIG0_FE__SHIFT 0
+#define VIVS_MC_PROFILE_CONFIG0_FE_CURRENT_PRIM 0x00000009
#define VIVS_MC_PROFILE_CONFIG0_FE_DRAW_COUNT 0x0000000a
#define VIVS_MC_PROFILE_CONFIG0_FE_OUT_VERTEX_COUNT 0x0000000b
#define VIVS_MC_PROFILE_CONFIG0_FE_CACHE_MISS_COUNT 0x0000000c
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 733b109a5095..0d13828e7d9e 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -4,6 +4,7 @@ config DRM_EXYNOS
depends on OF && DRM && COMMON_CLK
depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
depends on MMU
+ select DRM_CLIENT_SELECTION
select DRM_DISPLAY_HELPER if DRM_EXYNOS_DP
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 0ef7bc8848b0..b9e206303b48 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -871,7 +871,7 @@ static void exynos5433_decon_remove(struct platform_device *pdev)
struct platform_driver exynos5433_decon_driver = {
.probe = exynos5433_decon_probe,
- .remove_new = exynos5433_decon_remove,
+ .remove = exynos5433_decon_remove,
.driver = {
.name = "exynos5433-decon",
.pm = pm_ptr(&exynos5433_decon_pm_ops),
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 0d185c0564b9..5170f72b0830 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -37,6 +37,24 @@
#define WINDOWS_NR 2
+struct decon_data {
+ unsigned int vidw_buf_start_base;
+ unsigned int shadowcon_win_protect_shift;
+ unsigned int wincon_burstlen_shift;
+};
+
+static struct decon_data exynos7_decon_data = {
+ .vidw_buf_start_base = 0x80,
+ .shadowcon_win_protect_shift = 10,
+ .wincon_burstlen_shift = 11,
+};
+
+static struct decon_data exynos7870_decon_data = {
+ .vidw_buf_start_base = 0x880,
+ .shadowcon_win_protect_shift = 8,
+ .wincon_burstlen_shift = 10,
+};
+
struct decon_context {
struct device *dev;
struct drm_device *drm_dev;
@@ -55,11 +73,19 @@ struct decon_context {
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
+ const struct decon_data *data;
struct drm_encoder *encoder;
};
static const struct of_device_id decon_driver_dt_match[] = {
- {.compatible = "samsung,exynos7-decon"},
+ {
+ .compatible = "samsung,exynos7-decon",
+ .data = &exynos7_decon_data,
+ },
+ {
+ .compatible = "samsung,exynos7870-decon",
+ .data = &exynos7870_decon_data,
+ },
{},
};
MODULE_DEVICE_TABLE(of, decon_driver_dt_match);
@@ -81,10 +107,31 @@ static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
DRM_PLANE_TYPE_CURSOR,
};
-static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
+/**
+ * decon_shadow_protect_win() - disable updating values from shadow registers at vsync
+ *
+ * @ctx: display and enhancement controller context
+ * @win: window to protect registers for
+ * @protect: 1 to protect (disable updates)
+ */
+static void decon_shadow_protect_win(struct decon_context *ctx,
+ unsigned int win, bool protect)
{
- struct decon_context *ctx = crtc->ctx;
+ u32 bits, val;
+ unsigned int shift = ctx->data->shadowcon_win_protect_shift;
+
+ bits = SHADOWCON_WINx_PROTECT(shift, win);
+
+ val = readl(ctx->regs + SHADOWCON);
+ if (protect)
+ val |= bits;
+ else
+ val &= ~bits;
+ writel(val, ctx->regs + SHADOWCON);
+}
+static void decon_wait_for_vblank(struct decon_context *ctx)
+{
if (ctx->suspended)
return;
@@ -100,25 +147,33 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n");
}
-static void decon_clear_channels(struct exynos_drm_crtc *crtc)
+static void decon_clear_channels(struct decon_context *ctx)
{
- struct decon_context *ctx = crtc->ctx;
unsigned int win, ch_enabled = 0;
+ u32 val;
/* Check if any channel is enabled. */
for (win = 0; win < WINDOWS_NR; win++) {
- u32 val = readl(ctx->regs + WINCON(win));
+ val = readl(ctx->regs + WINCON(win));
if (val & WINCONx_ENWIN) {
+ decon_shadow_protect_win(ctx, win, true);
+
val &= ~WINCONx_ENWIN;
writel(val, ctx->regs + WINCON(win));
ch_enabled = 1;
+
+ decon_shadow_protect_win(ctx, win, false);
}
}
+ val = readl(ctx->regs + DECON_UPDATE);
+ val |= DECON_UPDATE_STANDALONE_F;
+ writel(val, ctx->regs + DECON_UPDATE);
+
/* Wait for vsync, as disable channel takes effect at next vsync */
if (ch_enabled)
- decon_wait_for_vblank(ctx->crtc);
+ decon_wait_for_vblank(ctx);
}
static int decon_ctx_initialize(struct decon_context *ctx,
@@ -126,7 +181,7 @@ static int decon_ctx_initialize(struct decon_context *ctx,
{
ctx->drm_dev = drm_dev;
- decon_clear_channels(ctx->crtc);
+ decon_clear_channels(ctx);
return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv);
}
@@ -140,7 +195,7 @@ static void decon_ctx_remove(struct decon_context *ctx)
static u32 decon_calc_clkdiv(struct decon_context *ctx,
const struct drm_display_mode *mode)
{
- unsigned long ideal_clk = mode->clock;
+ unsigned long ideal_clk = mode->clock * 1000;
u32 clkdiv;
/* Find the clock divider value that gets us closest to ideal_clk */
@@ -263,6 +318,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
{
unsigned long val;
int padding;
+ unsigned int shift = ctx->data->wincon_burstlen_shift;
val = readl(ctx->regs + WINCON(win));
val &= ~WINCONx_BPPMODE_MASK;
@@ -270,44 +326,44 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
val |= WINCONx_BPPMODE_16BPP_565;
- val |= WINCONx_BURSTLEN_16WORD;
+ val |= WINCONx_BURSTLEN_16WORD(shift);
break;
case DRM_FORMAT_XRGB8888:
val |= WINCONx_BPPMODE_24BPP_xRGB;
- val |= WINCONx_BURSTLEN_16WORD;
+ val |= WINCONx_BURSTLEN_16WORD(shift);
break;
case DRM_FORMAT_XBGR8888:
val |= WINCONx_BPPMODE_24BPP_xBGR;
- val |= WINCONx_BURSTLEN_16WORD;
+ val |= WINCONx_BURSTLEN_16WORD(shift);
break;
case DRM_FORMAT_RGBX8888:
val |= WINCONx_BPPMODE_24BPP_RGBx;
- val |= WINCONx_BURSTLEN_16WORD;
+ val |= WINCONx_BURSTLEN_16WORD(shift);
break;
case DRM_FORMAT_BGRX8888:
val |= WINCONx_BPPMODE_24BPP_BGRx;
- val |= WINCONx_BURSTLEN_16WORD;
+ val |= WINCONx_BURSTLEN_16WORD(shift);
break;
case DRM_FORMAT_ARGB8888:
val |= WINCONx_BPPMODE_32BPP_ARGB | WINCONx_BLD_PIX |
WINCONx_ALPHA_SEL;
- val |= WINCONx_BURSTLEN_16WORD;
+ val |= WINCONx_BURSTLEN_16WORD(shift);
break;
case DRM_FORMAT_ABGR8888:
val |= WINCONx_BPPMODE_32BPP_ABGR | WINCONx_BLD_PIX |
WINCONx_ALPHA_SEL;
- val |= WINCONx_BURSTLEN_16WORD;
+ val |= WINCONx_BURSTLEN_16WORD(shift);
break;
case DRM_FORMAT_RGBA8888:
val |= WINCONx_BPPMODE_32BPP_RGBA | WINCONx_BLD_PIX |
WINCONx_ALPHA_SEL;
- val |= WINCONx_BURSTLEN_16WORD;
+ val |= WINCONx_BURSTLEN_16WORD(shift);
break;
case DRM_FORMAT_BGRA8888:
default:
val |= WINCONx_BPPMODE_32BPP_BGRA | WINCONx_BLD_PIX |
WINCONx_ALPHA_SEL;
- val |= WINCONx_BURSTLEN_16WORD;
+ val |= WINCONx_BURSTLEN_16WORD(shift);
break;
}
@@ -323,8 +379,8 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
padding = (fb->pitches[0] / fb->format->cpp[0]) - fb->width;
if (fb->width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
- val &= ~WINCONx_BURSTLEN_MASK;
- val |= WINCONx_BURSTLEN_8WORD;
+ val &= ~WINCONx_BURSTLEN_MASK(shift);
+ val |= WINCONx_BURSTLEN_8WORD(shift);
}
writel(val, ctx->regs + WINCON(win));
@@ -343,28 +399,6 @@ static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win)
writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
}
-/**
- * decon_shadow_protect_win() - disable updating values from shadow registers at vsync
- *
- * @ctx: display and enhancement controller context
- * @win: window to protect registers for
- * @protect: 1 to protect (disable updates)
- */
-static void decon_shadow_protect_win(struct decon_context *ctx,
- unsigned int win, bool protect)
-{
- u32 bits, val;
-
- bits = SHADOWCON_WINx_PROTECT(win);
-
- val = readl(ctx->regs + SHADOWCON);
- if (protect)
- val |= bits;
- else
- val &= ~bits;
- writel(val, ctx->regs + SHADOWCON);
-}
-
static void decon_atomic_begin(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
@@ -391,6 +425,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
unsigned int win = plane->index;
unsigned int cpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
+ unsigned int vidw_addr0_base = ctx->data->vidw_buf_start_base;
if (ctx->suspended)
return;
@@ -407,7 +442,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
/* buffer start address */
val = (unsigned long)exynos_drm_fb_dma_addr(fb, 0);
- writel(val, ctx->regs + VIDW_BUF_START(win));
+ writel(val, ctx->regs + VIDW_BUF_START(vidw_addr0_base, win));
padding = (pitch / cpp) - fb->width;
@@ -689,6 +724,7 @@ static int decon_probe(struct platform_device *pdev)
ctx->dev = dev;
ctx->suspended = true;
+ ctx->data = of_device_get_match_data(dev);
i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
if (i80_if_timings)
@@ -838,7 +874,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(exynos7_decon_pm_ops, exynos7_decon_suspend,
struct platform_driver decon_driver = {
.probe = decon_probe,
- .remove_new = decon_remove,
+ .remove = decon_remove,
.driver = {
.name = "exynos-decon",
.pm = pm_ptr(&exynos7_decon_pm_ops),
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 22142b293279..5bcf41e0bd04 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -279,7 +279,7 @@ MODULE_DEVICE_TABLE(of, exynos_dp_match);
struct platform_driver dp_driver = {
.probe = exynos_dp_probe,
- .remove_new = exynos_dp_remove,
+ .remove = exynos_dp_remove,
.driver = {
.name = "exynos-dp",
.pm = pm_ptr(&exynos_dp_pm_ops),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 0ed4f2b8595a..1815374c38df 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -19,9 +19,6 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
enum exynos_drm_output_type out_type,
const struct exynos_drm_crtc_ops *ops,
void *context);
-void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc);
-void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
- struct exynos_drm_plane *exynos_plane);
/* This function gets crtc device matched with out_type. */
struct exynos_drm_crtc *exynos_drm_crtc_get_by_type(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 7c59e1164a48..f313ae7bc3a3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -13,6 +13,7 @@
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
@@ -34,7 +35,6 @@
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
-#define DRIVER_DATE "20180330"
/*
* Interface history:
@@ -111,12 +111,12 @@ static const struct drm_driver exynos_drm_driver = {
.dumb_create = exynos_drm_gem_dumb_create,
.gem_prime_import = exynos_drm_gem_prime_import,
.gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
+ EXYNOS_DRM_FBDEV_DRIVER_OPS,
.ioctls = exynos_ioctls,
.num_ioctls = ARRAY_SIZE(exynos_ioctls),
.fops = &exynos_drm_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
@@ -288,7 +288,7 @@ static int exynos_drm_bind(struct device *dev)
if (ret < 0)
goto err_cleanup_poll;
- exynos_drm_fbdev_setup(drm);
+ drm_client_setup(drm, NULL);
return 0;
@@ -361,7 +361,7 @@ static void exynos_drm_platform_shutdown(struct platform_device *pdev)
static struct platform_driver exynos_drm_platform_driver = {
.probe = exynos_drm_platform_probe,
- .remove_new = exynos_drm_platform_remove,
+ .remove = exynos_drm_platform_remove,
.shutdown = exynos_drm_platform_shutdown,
.driver = {
.name = "exynos-drm",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index bf16deaae68b..896a03639e2d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -181,7 +181,7 @@ MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
struct platform_driver dsi_driver = {
.probe = samsung_dsim_probe,
- .remove_new = samsung_dsim_remove,
+ .remove = samsung_dsim_remove,
.driver = {
.name = "exynos-dsi",
.pm = &samsung_dsim_pm_ops,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index a379c8ca435a..9526a25e90ac 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -23,7 +23,6 @@
#include "exynos_drm_fbdev.h"
#define MAX_CONNECTOR 4
-#define PREFERRED_BPP 32
static int exynos_drm_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
@@ -87,8 +86,11 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
return 0;
}
-static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
+static const struct drm_fb_helper_funcs exynos_drm_fbdev_helper_funcs = {
+};
+
+int exynos_drm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
struct exynos_drm_gem *exynos_gem;
struct drm_device *dev = helper->dev;
@@ -120,6 +122,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
ret = PTR_ERR(helper->fb);
goto err_destroy_gem;
}
+ helper->funcs = &exynos_drm_fbdev_helper_funcs;
ret = exynos_drm_fbdev_update(helper, sizes, exynos_gem);
if (ret < 0)
@@ -134,93 +137,3 @@ err_destroy_gem:
exynos_drm_gem_destroy(exynos_gem);
return ret;
}
-
-static const struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
- .fb_probe = exynos_drm_fbdev_create,
-};
-
-/*
- * struct drm_client
- */
-
-static void exynos_drm_fbdev_client_unregister(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
-
- if (fb_helper->info) {
- drm_fb_helper_unregister_info(fb_helper);
- } else {
- drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
- }
-}
-
-static int exynos_drm_fbdev_client_restore(struct drm_client_dev *client)
-{
- drm_fb_helper_lastclose(client->dev);
-
- return 0;
-}
-
-static int exynos_drm_fbdev_client_hotplug(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- struct drm_device *dev = client->dev;
- int ret;
-
- if (dev->fb_helper)
- return drm_fb_helper_hotplug_event(dev->fb_helper);
-
- ret = drm_fb_helper_init(dev, fb_helper);
- if (ret)
- goto err_drm_err;
-
- if (!drm_drv_uses_atomic_modeset(dev))
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(fb_helper);
- if (ret)
- goto err_drm_fb_helper_fini;
-
- return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(fb_helper);
-err_drm_err:
- drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret);
- return ret;
-}
-
-static const struct drm_client_funcs exynos_drm_fbdev_client_funcs = {
- .owner = THIS_MODULE,
- .unregister = exynos_drm_fbdev_client_unregister,
- .restore = exynos_drm_fbdev_client_restore,
- .hotplug = exynos_drm_fbdev_client_hotplug,
-};
-
-void exynos_drm_fbdev_setup(struct drm_device *dev)
-{
- struct drm_fb_helper *fb_helper;
- int ret;
-
- drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
- drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
-
- fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
- if (!fb_helper)
- return;
- drm_fb_helper_prepare(dev, fb_helper, PREFERRED_BPP, &exynos_drm_fb_helper_funcs);
-
- ret = drm_client_init(dev, &fb_helper->client, "fbdev", &exynos_drm_fbdev_client_funcs);
- if (ret)
- goto err_drm_client_init;
-
- drm_client_register(&fb_helper->client);
-
- return;
-
-err_drm_client_init:
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index 1e1dea627cd9..02a9201abea3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -11,12 +11,17 @@
#ifndef _EXYNOS_DRM_FBDEV_H_
#define _EXYNOS_DRM_FBDEV_H_
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-void exynos_drm_fbdev_setup(struct drm_device *dev);
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
+
+#if defined(CONFIG_DRM_FBDEV_EMULATION)
+int exynos_drm_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh,
+ struct drm_fb_helper_surface_size *sizes);
+#define EXYNOS_DRM_FBDEV_DRIVER_OPS \
+ .fbdev_probe = exynos_drm_fbdev_driver_fbdev_probe
#else
-static inline void exynos_drm_fbdev_setup(struct drm_device *dev)
-{
-}
+#define EXYNOS_DRM_FBDEV_DRIVER_OPS \
+ .fbdev_probe = NULL
#endif
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 4d7ea65b7dd8..b150cfd92f6e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1408,7 +1408,7 @@ MODULE_DEVICE_TABLE(of, fimc_of_match);
struct platform_driver fimc_driver = {
.probe = fimc_probe,
- .remove_new = fimc_remove,
+ .remove = fimc_remove,
.driver = {
.of_match_table = fimc_of_match,
.name = "exynos-drm-fimc",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index f57df8c48139..1ad87584b1c2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -1323,7 +1323,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(exynos_fimd_pm_ops, exynos_fimd_suspend,
struct platform_driver fimd_driver = {
.probe = fimd_probe,
- .remove_new = fimd_remove,
+ .remove = fimd_remove,
.driver = {
.name = "exynos4-fb",
.pm = pm_ptr(&exynos_fimd_pm_ops),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 3a3b2c00e400..d32f2474cbaa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1607,7 +1607,7 @@ MODULE_DEVICE_TABLE(of, exynos_g2d_match);
struct platform_driver g2d_driver = {
.probe = g2d_probe,
- .remove_new = g2d_remove,
+ .remove = g2d_remove,
.driver = {
.name = "exynos-drm-g2d",
.pm = pm_ptr(&g2d_pm_ops),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 638ca96830e9..4787fee4696f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -18,7 +18,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 59fa22050717..e6d516e1976d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1286,7 +1286,7 @@ static int gsc_probe(struct platform_device *pdev)
return ret;
}
- /* context initailization */
+ /* context initialization */
ctx->id = pdev->id;
platform_set_drvdata(pdev, ctx);
@@ -1420,7 +1420,7 @@ MODULE_DEVICE_TABLE(of, exynos_drm_gsc_of_match);
struct platform_driver gsc_driver = {
.probe = gsc_probe,
- .remove_new = gsc_remove,
+ .remove = gsc_remove,
.driver = {
.name = "exynos-drm-gsc",
.pm = &gsc_pm_ops,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index d61ec451807c..b34ec6728337 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -460,7 +460,7 @@ MODULE_DEVICE_TABLE(of, exynos_mic_of_match);
struct platform_driver mic_driver = {
.probe = exynos_mic_probe,
- .remove_new = exynos_mic_remove,
+ .remove = exynos_mic_remove,
.driver = {
.name = "exynos-mic",
.pm = pm_ptr(&exynos_mic_pm_ops),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 2eb0b701672f..7b0f4a98a70a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -451,7 +451,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(rotator_pm_ops, rotator_runtime_suspend,
struct platform_driver rotator_driver = {
.probe = rotator_probe,
- .remove_new = rotator_remove,
+ .remove = rotator_remove,
.driver = {
.name = "exynos-rotator",
.pm = pm_ptr(&rotator_pm_ops),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 2788105ac780..c8a1b6b0a29c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -719,7 +719,7 @@ MODULE_DEVICE_TABLE(of, exynos_scaler_match);
struct platform_driver scaler_driver = {
.probe = scaler_probe,
- .remove_new = scaler_remove,
+ .remove = scaler_remove,
.driver = {
.name = "exynos-scaler",
.pm = pm_ptr(&scaler_pm_ops),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 6de0cced6c9d..fd388b1dbe68 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -467,7 +467,7 @@ static void vidi_remove(struct platform_device *pdev)
struct platform_driver vidi_driver = {
.probe = vidi_probe,
- .remove_new = vidi_remove,
+ .remove = vidi_remove,
.driver = {
.name = "exynos-drm-vidi",
.dev_groups = vidi_groups,
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 1e26cd4f8347..176fd8871759 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -883,27 +883,32 @@ static const struct drm_connector_funcs hdmi_connector_funcs = {
static int hdmi_get_modes(struct drm_connector *connector)
{
struct hdmi_context *hdata = connector_to_hdmi(connector);
- struct edid *edid;
+ const struct drm_display_info *info = &connector->display_info;
+ const struct drm_edid *drm_edid;
int ret;
if (!hdata->ddc_adpt)
goto no_edid;
- edid = drm_get_edid(connector, hdata->ddc_adpt);
- if (!edid)
+ drm_edid = drm_edid_read_ddc(connector, hdata->ddc_adpt);
+
+ ret = drm_edid_connector_update(connector, drm_edid);
+ if (ret)
+ return 0;
+
+ cec_notifier_set_phys_addr(hdata->notifier, info->source_physical_address);
+
+ if (!drm_edid)
goto no_edid;
- hdata->dvi_mode = !connector->display_info.is_hdmi;
+ hdata->dvi_mode = !info->is_hdmi;
DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
- edid->width_cm, edid->height_cm);
-
- drm_connector_update_edid_property(connector, edid);
- cec_notifier_set_phys_addr_from_edid(hdata->notifier, edid);
+ info->width_mm / 10, info->height_mm / 10);
- ret = drm_add_edid_modes(connector, edid);
+ ret = drm_edid_connector_add_modes(connector);
- kfree(edid);
+ drm_edid_free(drm_edid);
return ret;
@@ -1643,7 +1648,9 @@ static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
struct hdmi_context *hdata = dev_get_drvdata(dev);
struct drm_connector *connector = &hdata->connector;
+ mutex_lock(&connector->eld_mutex);
memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+ mutex_unlock(&connector->eld_mutex);
return 0;
}
@@ -1653,7 +1660,6 @@ static const struct hdmi_codec_ops audio_codec_ops = {
.audio_shutdown = hdmi_audio_shutdown,
.mute_stream = hdmi_audio_mute,
.get_eld = hdmi_audio_get_eld,
- .no_capture_mute = 1,
};
static int hdmi_register_audio_device(struct hdmi_context *hdata)
@@ -1662,6 +1668,7 @@ static int hdmi_register_audio_device(struct hdmi_context *hdata)
.ops = &audio_codec_ops,
.max_i2s_channels = 6,
.i2s = 1,
+ .no_capture_mute = 1,
};
hdata->audio.pdev = platform_device_register_data(
@@ -2121,7 +2128,7 @@ static const struct dev_pm_ops exynos_hdmi_pm_ops = {
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
- .remove_new = hdmi_remove,
+ .remove = hdmi_remove,
.driver = {
.name = "exynos-hdmi",
.pm = &exynos_hdmi_pm_ops,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 1db955f00044..a3670d2eaab2 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1335,5 +1335,5 @@ struct platform_driver mixer_driver = {
.of_match_table = mixer_match_types,
},
.probe = mixer_probe,
- .remove_new = mixer_remove,
+ .remove = mixer_remove,
};
diff --git a/drivers/gpu/drm/exynos/regs-decon7.h b/drivers/gpu/drm/exynos/regs-decon7.h
index 5bc5f1db5196..216c106dac8f 100644
--- a/drivers/gpu/drm/exynos/regs-decon7.h
+++ b/drivers/gpu/drm/exynos/regs-decon7.h
@@ -48,7 +48,7 @@
/* SHADOWCON */
#define SHADOWCON 0x30
-#define SHADOWCON_WINx_PROTECT(_win) (1 << (10 + (_win)))
+#define SHADOWCON_WINx_PROTECT(_shf, _win) (1 << ((_shf) + (_win)))
/* WINCONx */
#define WINCON(_win) (0x50 + ((_win) * 4))
@@ -58,10 +58,9 @@
#define WINCONx_BUFSEL_SHIFT 28
#define WINCONx_TRIPLE_BUF_MODE (0x1 << 18)
#define WINCONx_DOUBLE_BUF_MODE (0x0 << 18)
-#define WINCONx_BURSTLEN_16WORD (0x0 << 11)
-#define WINCONx_BURSTLEN_8WORD (0x1 << 11)
-#define WINCONx_BURSTLEN_MASK (0x1 << 11)
-#define WINCONx_BURSTLEN_SHIFT 11
+#define WINCONx_BURSTLEN_16WORD(_shf) (0x0 << (_shf))
+#define WINCONx_BURSTLEN_8WORD(_shf) (0x1 << (_shf))
+#define WINCONx_BURSTLEN_MASK(_shf) (0x1 << (_shf))
#define WINCONx_BLD_PLANE (0 << 8)
#define WINCONx_BLD_PIX (1 << 8)
#define WINCONx_ALPHA_MUL (1 << 7)
@@ -89,9 +88,9 @@
#define VIDOSD_H(_x) (0x80 + ((_x) * 4))
/* Frame buffer start addresses: VIDWxxADD0n */
-#define VIDW_BUF_START(_win) (0x80 + ((_win) * 0x10))
-#define VIDW_BUF_START1(_win) (0x84 + ((_win) * 0x10))
-#define VIDW_BUF_START2(_win) (0x88 + ((_win) * 0x10))
+#define VIDW_BUF_START(_base, _win) ((_base) + ((_win) * 0x10))
+#define VIDW_BUF_START1(_base, _win) ((_base) + ((_win) * 0x10))
+#define VIDW_BUF_START2(_base, _win) ((_base) + ((_win) * 0x10))
#define VIDW_WHOLE_X(_win) (0x0130 + ((_win) * 8))
#define VIDW_WHOLE_Y(_win) (0x0134 + ((_win) * 8))
diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
index 5ca71ef87325..0e0f910ceb9f 100644
--- a/drivers/gpu/drm/fsl-dcu/Kconfig
+++ b/drivers/gpu/drm/fsl-dcu/Kconfig
@@ -3,11 +3,13 @@ config DRM_FSL_DCU
tristate "DRM Support for Freescale DCU"
depends on DRM && OF && ARM && COMMON_CLK
select BACKLIGHT_CLASS_DEVICE
+ select DRM_CLIENT_SELECTION
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select REGMAP_MMIO
select VIDEOMODE_HELPERS
+ select MFD_SYSCON if SOC_LS1021A
help
Choose this option if you have an Freescale DCU chipset.
If M is selected the module will be called fsl-dcu-drm.
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index ab6c0c6cd0e2..03b076db9381 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -18,6 +18,7 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -100,12 +101,25 @@ static void fsl_dcu_irq_uninstall(struct drm_device *dev)
static int fsl_dcu_load(struct drm_device *dev, unsigned long flags)
{
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ struct regmap *scfg;
int ret;
ret = fsl_dcu_drm_modeset_init(fsl_dev);
- if (ret < 0) {
- dev_err(dev->dev, "failed to initialize mode setting\n");
- return ret;
+ if (ret < 0)
+ return dev_err_probe(dev->dev, ret, "failed to initialize mode setting\n");
+
+ scfg = syscon_regmap_lookup_by_compatible("fsl,ls1021a-scfg");
+ if (PTR_ERR(scfg) != -ENODEV) {
+ /*
+ * For simplicity, enable the PIXCLK unconditionally,
+ * resulting in increased power consumption. Disabling
+ * the clock in PM or on unload could be implemented as
+ * a future improvement.
+ */
+ ret = regmap_update_bits(scfg, SCFG_PIXCLKCR, SCFG_PIXCLKCR_PXCEN,
+ SCFG_PIXCLKCR_PXCEN);
+ if (ret < 0)
+ return dev_err_probe(dev->dev, ret, "failed to enable pixclk\n");
}
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
@@ -156,10 +170,10 @@ static const struct drm_driver fsl_dcu_drm_driver = {
.load = fsl_dcu_load,
.unload = fsl_dcu_unload,
DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &fsl_dcu_drm_fops,
.name = "fsl-dcu-drm",
.desc = "Freescale DCU DRM",
- .date = "20160425",
.major = 1,
.minor = 1,
};
@@ -272,10 +286,8 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
}
fsl_dev->irq = platform_get_irq(pdev, 0);
- if (fsl_dev->irq < 0) {
- dev_err(dev, "failed to get irq\n");
+ if (fsl_dev->irq < 0)
return fsl_dev->irq;
- }
fsl_dev->regmap = devm_regmap_init_mmio(dev, base,
&fsl_dcu_regmap_config);
@@ -333,7 +345,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
if (ret < 0)
goto put;
- drm_fbdev_dma_setup(drm, legacyfb_depth);
+ drm_client_setup_with_color_mode(drm, legacyfb_depth);
return 0;
@@ -365,7 +377,7 @@ static void fsl_dcu_drm_shutdown(struct platform_device *pdev)
static struct platform_driver fsl_dcu_drm_platform_driver = {
.probe = fsl_dcu_drm_probe,
- .remove_new = fsl_dcu_drm_remove,
+ .remove = fsl_dcu_drm_remove,
.shutdown = fsl_dcu_drm_shutdown,
.driver = {
.name = "fsl-dcu",
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
index e2049a0e8a92..566396013c04 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -160,6 +160,9 @@
#define FSL_DCU_ARGB4444 12
#define FSL_DCU_YUV422 14
+#define SCFG_PIXCLKCR 0x28
+#define SCFG_PIXCLKCR_PXCEN BIT(31)
+
#define VF610_LAYER_REG_NUM 9
#define LS1021A_LAYER_REG_NUM 10
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 2c2b92324a2e..c418e8496bdf 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -6,6 +6,7 @@
*/
#include <linux/backlight.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
index 9eb5abaf7d66..49bbd00c77ae 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
@@ -29,7 +29,7 @@ void fsl_tcon_bypass_enable(struct fsl_tcon *tcon)
FSL_TCON_CTRL1_TCON_BYPASS);
}
-static struct regmap_config fsl_tcon_regmap_config = {
+static const struct regmap_config fsl_tcon_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index efb4a2dd2f80..aa2ea128aa2f 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_GMA500
tristate "Intel GMA500/600/3600/3650 KMS Framebuffer"
- depends on DRM && PCI && X86 && MMU
+ depends on DRM && PCI && X86 && MMU && HAS_IOPORT
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
select I2C
diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c
index 98b44974d42d..8edefea2ef59 100644
--- a/drivers/gpu/drm/gma500/fbdev.c
+++ b/drivers/gpu/drm/gma500/fbdev.c
@@ -143,12 +143,15 @@ static const struct fb_ops psb_fbdev_fb_ops = {
.fb_destroy = psb_fbdev_fb_destroy,
};
+static const struct drm_fb_helper_funcs psb_fbdev_fb_helper_funcs = {
+};
+
/*
- * struct drm_fb_helper_funcs
+ * struct drm_driver
*/
-static int psb_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes)
+int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fb_helper->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
@@ -206,6 +209,7 @@ static int psb_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
goto err_drm_gem_object_put;
}
+ fb_helper->funcs = &psb_fbdev_fb_helper_funcs;
fb_helper->fb = fb;
info = drm_fb_helper_alloc_info(fb_helper);
@@ -246,93 +250,3 @@ err_drm_gem_object_put:
drm_gem_object_put(obj);
return ret;
}
-
-static const struct drm_fb_helper_funcs psb_fbdev_fb_helper_funcs = {
- .fb_probe = psb_fbdev_fb_probe,
-};
-
-/*
- * struct drm_client_funcs and setup code
- */
-
-static void psb_fbdev_client_unregister(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
-
- if (fb_helper->info) {
- drm_fb_helper_unregister_info(fb_helper);
- } else {
- drm_fb_helper_unprepare(fb_helper);
- drm_client_release(&fb_helper->client);
- kfree(fb_helper);
- }
-}
-
-static int psb_fbdev_client_restore(struct drm_client_dev *client)
-{
- drm_fb_helper_lastclose(client->dev);
-
- return 0;
-}
-
-static int psb_fbdev_client_hotplug(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- struct drm_device *dev = client->dev;
- int ret;
-
- if (dev->fb_helper)
- return drm_fb_helper_hotplug_event(dev->fb_helper);
-
- ret = drm_fb_helper_init(dev, fb_helper);
- if (ret)
- goto err_drm_err;
-
- if (!drm_drv_uses_atomic_modeset(dev))
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(fb_helper);
- if (ret)
- goto err_drm_fb_helper_fini;
-
- return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(fb_helper);
-err_drm_err:
- drm_err(dev, "Failed to setup gma500 fbdev emulation (ret=%d)\n", ret);
- return ret;
-}
-
-static const struct drm_client_funcs psb_fbdev_client_funcs = {
- .owner = THIS_MODULE,
- .unregister = psb_fbdev_client_unregister,
- .restore = psb_fbdev_client_restore,
- .hotplug = psb_fbdev_client_hotplug,
-};
-
-void psb_fbdev_setup(struct drm_psb_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->dev;
- struct drm_fb_helper *fb_helper;
- int ret;
-
- fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
- if (!fb_helper)
- return;
- drm_fb_helper_prepare(dev, fb_helper, 32, &psb_fbdev_fb_helper_funcs);
-
- ret = drm_client_init(dev, &fb_helper->client, "fbdev-gma500", &psb_fbdev_client_funcs);
- if (ret) {
- drm_err(dev, "Failed to register client: %d\n", ret);
- goto err_drm_fb_helper_unprepare;
- }
-
- drm_client_register(&fb_helper->client);
-
- return;
-
-err_drm_fb_helper_unprepare:
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
-}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index d67c2b3ad901..85d3557c2eb9 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -19,6 +19,7 @@
#include <acpi/video.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
@@ -475,7 +476,7 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- psb_fbdev_setup(dev_priv);
+ drm_client_setup(dev, NULL);
return 0;
}
@@ -507,11 +508,11 @@ static const struct drm_driver driver = {
.num_ioctls = ARRAY_SIZE(psb_ioctls),
.dumb_create = psb_gem_dumb_create,
+ PSB_FBDEV_DRIVER_OPS,
.ioctls = psb_ioctls,
.fops = &psb_gem_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index bddf89b82fec..7f77cb2b2751 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -26,7 +26,6 @@
#define DRIVER_NAME "gma500"
#define DRIVER_DESC "DRM driver for the Intel GMA500, GMA600, GMA3600, GMA3650"
-#define DRIVER_DATE "20140314"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -184,6 +183,9 @@
#define KSEL_BYPASS_25 6
#define KSEL_BYPASS_83_100 7
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
+
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
@@ -597,10 +599,13 @@ struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev,
/* fbdev */
#if defined(CONFIG_DRM_FBDEV_EMULATION)
-void psb_fbdev_setup(struct drm_psb_private *dev_priv);
+int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes);
+#define PSB_FBDEV_DRIVER_OPS \
+ .fbdev_probe = psb_fbdev_driver_fbdev_probe
#else
-static inline void psb_fbdev_setup(struct drm_psb_private *dev_priv)
-{ }
+#define PSB_FBDEV_DRIVER_OPS \
+ .fbdev_probe = NULL
#endif
/* backlight.c */
diff --git a/drivers/gpu/drm/gud/Kconfig b/drivers/gpu/drm/gud/Kconfig
index 9c1e61f9eec3..b4d2136942f0 100644
--- a/drivers/gpu/drm/gud/Kconfig
+++ b/drivers/gpu/drm/gud/Kconfig
@@ -4,6 +4,7 @@ config DRM_GUD
tristate "GUD USB Display"
depends on DRM && USB && MMU
select LZ4_COMPRESS
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
select BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index ac6bbf920c72..cb405771d6e2 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -13,6 +13,7 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
@@ -376,10 +377,10 @@ static const struct drm_driver gud_drm_driver = {
.fops = &gud_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
.gem_prime_import = gud_gem_prime_import,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = "gud",
.desc = "Generic USB Display",
- .date = "20200422",
.major = 1,
.minor = 0,
};
@@ -622,7 +623,7 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
drm_kms_helper_poll_init(drm);
- drm_fbdev_shmem_setup(drm, 0);
+ drm_client_setup(drm, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 126504318a4f..93b8d32e3be1 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
- depends on DRM && PCI && (ARM64 || COMPILE_TEST)
+ depends on DRM && PCI
depends on MMU
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index d25c75e60d3d..95a4ed599d98 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_i2c.o
+hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_i2c.o \
+ dp/dp_aux.o dp/dp_link.o dp/dp_hw.o hibmc_drm_dp.o
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
new file mode 100644
index 000000000000..0a903cce1fa9
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_aux.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/minmax.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include "dp_comm.h"
+#include "dp_reg.h"
+
+#define HIBMC_AUX_CMD_REQ_LEN GENMASK(7, 4)
+#define HIBMC_AUX_CMD_ADDR GENMASK(27, 8)
+#define HIBMC_AUX_CMD_I2C_ADDR_ONLY BIT(28)
+#define HIBMC_BYTES_IN_U32 4
+#define HIBMC_AUX_I2C_WRITE_SUCCESS 0x1
+#define HIBMC_DP_MIN_PULSE_NUM 0x9
+#define BITS_IN_U8 8
+
+static inline void hibmc_dp_aux_reset(struct hibmc_dp_dev *dp)
+{
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_DPTX_RST_CTRL, HIBMC_DP_CFG_AUX_RST_N, 0x0);
+ usleep_range(10, 15);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_DPTX_RST_CTRL, HIBMC_DP_CFG_AUX_RST_N, 0x1);
+}
+
+static void hibmc_dp_aux_read_data(struct hibmc_dp_dev *dp, u8 *buf, u8 size)
+{
+ u32 reg_num;
+ u32 value;
+ u32 num;
+ u8 i, j;
+
+ reg_num = DIV_ROUND_UP(size, HIBMC_BYTES_IN_U32);
+ for (i = 0; i < reg_num; i++) {
+ /* number of bytes read from a single register */
+ num = min(size - i * HIBMC_BYTES_IN_U32, HIBMC_BYTES_IN_U32);
+ value = readl(dp->base + HIBMC_DP_AUX_RD_DATA0 + i * HIBMC_BYTES_IN_U32);
+ /* convert the 32-bit value of the register to the buffer. */
+ for (j = 0; j < num; j++)
+ buf[i * HIBMC_BYTES_IN_U32 + j] = value >> (j * BITS_IN_U8);
+ }
+}
+
+static void hibmc_dp_aux_write_data(struct hibmc_dp_dev *dp, u8 *buf, u8 size)
+{
+ u32 reg_num;
+ u32 value;
+ u32 num;
+ u8 i, j;
+
+ reg_num = DIV_ROUND_UP(size, HIBMC_BYTES_IN_U32);
+ for (i = 0; i < reg_num; i++) {
+ /* number of bytes written to a single register */
+ num = min_t(u8, size - i * HIBMC_BYTES_IN_U32, HIBMC_BYTES_IN_U32);
+ value = 0;
+ /* obtain the 32-bit value written to a single register. */
+ for (j = 0; j < num; j++)
+ value |= buf[i * HIBMC_BYTES_IN_U32 + j] << (j * BITS_IN_U8);
+ /* writing data to a single register */
+ writel(value, dp->base + HIBMC_DP_AUX_WR_DATA0 + i * HIBMC_BYTES_IN_U32);
+ }
+}
+
+static u32 hibmc_dp_aux_build_cmd(const struct drm_dp_aux_msg *msg)
+{
+ u32 aux_cmd = msg->request;
+
+ if (msg->size)
+ aux_cmd |= FIELD_PREP(HIBMC_AUX_CMD_REQ_LEN, (msg->size - 1));
+ else
+ aux_cmd |= FIELD_PREP(HIBMC_AUX_CMD_I2C_ADDR_ONLY, 1);
+
+ aux_cmd |= FIELD_PREP(HIBMC_AUX_CMD_ADDR, msg->address);
+
+ return aux_cmd;
+}
+
+/* ret >= 0, ret is size; ret < 0, ret is err code */
+static int hibmc_dp_aux_parse_xfer(struct hibmc_dp_dev *dp, struct drm_dp_aux_msg *msg)
+{
+ u32 buf_data_cnt;
+ u32 aux_status;
+
+ aux_status = readl(dp->base + HIBMC_DP_AUX_STATUS);
+ msg->reply = FIELD_GET(HIBMC_DP_CFG_AUX_STATUS, aux_status);
+
+ if (aux_status & HIBMC_DP_CFG_AUX_TIMEOUT)
+ return -ETIMEDOUT;
+
+ /* only address */
+ if (!msg->size)
+ return 0;
+
+ if (msg->reply != DP_AUX_NATIVE_REPLY_ACK)
+ return -EIO;
+
+ buf_data_cnt = FIELD_GET(HIBMC_DP_CFG_AUX_READY_DATA_BYTE, aux_status);
+
+ switch (msg->request) {
+ case DP_AUX_NATIVE_WRITE:
+ return msg->size;
+ case DP_AUX_I2C_WRITE | DP_AUX_I2C_MOT:
+ if (buf_data_cnt == HIBMC_AUX_I2C_WRITE_SUCCESS)
+ return msg->size;
+ else
+ return FIELD_GET(HIBMC_DP_CFG_AUX, aux_status);
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ | DP_AUX_I2C_MOT:
+ buf_data_cnt--;
+ if (buf_data_cnt != msg->size) {
+ /* only the successful part of data is read */
+ return -EBUSY;
+ }
+
+ /* all data is successfully read */
+ hibmc_dp_aux_read_data(dp, msg->buffer, msg->size);
+ return msg->size;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* ret >= 0 ,ret is size; ret < 0, ret is err code */
+static ssize_t hibmc_dp_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+ struct hibmc_dp_dev *dp = container_of(aux, struct hibmc_dp_dev, aux);
+ u32 aux_cmd;
+ int ret;
+ u32 val; /* val will be assigned at the beginning of readl_poll_timeout function */
+
+ writel(0, dp->base + HIBMC_DP_AUX_WR_DATA0);
+ writel(0, dp->base + HIBMC_DP_AUX_WR_DATA1);
+ writel(0, dp->base + HIBMC_DP_AUX_WR_DATA2);
+ writel(0, dp->base + HIBMC_DP_AUX_WR_DATA3);
+
+ hibmc_dp_aux_write_data(dp, msg->buffer, msg->size);
+
+ aux_cmd = hibmc_dp_aux_build_cmd(msg);
+ writel(aux_cmd, dp->base + HIBMC_DP_AUX_CMD_ADDR);
+
+ /* enable aux transfer */
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_REQ, 0x1);
+ ret = readl_poll_timeout(dp->base + HIBMC_DP_AUX_REQ, val,
+ !(val & HIBMC_DP_CFG_AUX_REQ), 50, 5000);
+ if (ret) {
+ hibmc_dp_aux_reset(dp);
+ return ret;
+ }
+
+ return hibmc_dp_aux_parse_xfer(dp, msg);
+}
+
+void hibmc_dp_aux_init(struct hibmc_dp_dev *dp)
+{
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_SYNC_LEN_SEL, 0x0);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_TIMER_TIMEOUT, 0x1);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_AUX_REQ, HIBMC_DP_CFG_AUX_MIN_PULSE_NUM,
+ HIBMC_DP_MIN_PULSE_NUM);
+
+ dp->aux.transfer = hibmc_dp_aux_xfer;
+ dp->aux.is_remote = 0;
+ drm_dp_aux_init(&dp->aux);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
new file mode 100644
index 000000000000..2c52a4476c4d
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_comm.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef DP_COMM_H
+#define DP_COMM_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <drm/display/drm_dp_helper.h>
+
+#define HIBMC_DP_LANE_NUM_MAX 2
+
+struct hibmc_link_status {
+ bool clock_recovered;
+ bool channel_equalized;
+};
+
+struct hibmc_link_cap {
+ u8 link_rate;
+ u8 lanes;
+};
+
+struct hibmc_dp_link {
+ struct hibmc_link_status status;
+ u8 train_set[HIBMC_DP_LANE_NUM_MAX];
+ struct hibmc_link_cap cap;
+};
+
+struct hibmc_dp_dev {
+ struct drm_dp_aux aux;
+ struct drm_device *dev;
+ void __iomem *base;
+ struct mutex lock; /* protects concurrent RW in hibmc_dp_reg_write_field() */
+ struct hibmc_dp_link link;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+};
+
+#define dp_field_modify(reg_value, mask, val) \
+ do { \
+ (reg_value) &= ~(mask); \
+ (reg_value) |= FIELD_PREP(mask, val); \
+ } while (0) \
+
+#define hibmc_dp_reg_write_field(dp, offset, mask, val) \
+ do { \
+ typeof(dp) _dp = dp; \
+ typeof(_dp->base) addr = (_dp->base + (offset)); \
+ mutex_lock(&_dp->lock); \
+ u32 reg_value = readl(addr); \
+ dp_field_modify(reg_value, mask, val); \
+ writel(reg_value, addr); \
+ mutex_unlock(&_dp->lock); \
+ } while (0)
+
+void hibmc_dp_aux_init(struct hibmc_dp_dev *dp);
+int hibmc_dp_link_training(struct hibmc_dp_dev *dp);
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
new file mode 100644
index 000000000000..74dd9956144e
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_config.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef DP_CONFIG_H
+#define DP_CONFIG_H
+
+#define HIBMC_DP_BPP 24
+#define HIBMC_DP_SYMBOL_PER_FCLK 4
+#define HIBMC_DP_MSA1 0x20
+#define HIBMC_DP_MSA2 0x845c00
+#define HIBMC_DP_OFFSET 0x1e0000
+#define HIBMC_DP_HDCP 0x2
+#define HIBMC_DP_INT_RST 0xffff
+#define HIBMC_DP_DPTX_RST 0x3ff
+#define HIBMC_DP_CLK_EN 0x7
+#define HIBMC_DP_SYNC_EN_MASK 0x3
+#define HIBMC_DP_LINK_RATE_CAL 27
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
new file mode 100644
index 000000000000..a8d543881c09
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include "dp_config.h"
+#include "dp_comm.h"
+#include "dp_reg.h"
+#include "dp_hw.h"
+
+static void hibmc_dp_set_tu(struct hibmc_dp_dev *dp, struct drm_display_mode *mode)
+{
+ u32 tu_symbol_frac_size;
+ u32 tu_symbol_size;
+ u32 rate_ks;
+ u8 lane_num;
+ u32 value;
+ u32 bpp;
+
+ lane_num = dp->link.cap.lanes;
+ if (lane_num == 0) {
+ drm_err(dp->dev, "set tu failed, lane num cannot be 0!\n");
+ return;
+ }
+
+ bpp = HIBMC_DP_BPP;
+ rate_ks = dp->link.cap.link_rate * HIBMC_DP_LINK_RATE_CAL;
+ value = (mode->clock * bpp * 5) / (61 * lane_num * rate_ks);
+
+ if (value % 10 == 9) { /* 9 carry */
+ tu_symbol_size = value / 10 + 1;
+ tu_symbol_frac_size = 0;
+ } else {
+ tu_symbol_size = value / 10;
+ tu_symbol_frac_size = value % 10 + 1;
+ }
+
+ drm_dbg_dp(dp->dev, "tu value: %u.%u value: %u\n",
+ tu_symbol_size, tu_symbol_frac_size, value);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_PACKET,
+ HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE, tu_symbol_size);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_PACKET,
+ HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE, tu_symbol_frac_size);
+}
+
+static void hibmc_dp_set_sst(struct hibmc_dp_dev *dp, struct drm_display_mode *mode)
+{
+ u32 hblank_size;
+ u32 htotal_size;
+ u32 htotal_int;
+ u32 hblank_int;
+ u32 fclk; /* flink_clock */
+
+ fclk = dp->link.cap.link_rate * HIBMC_DP_LINK_RATE_CAL;
+
+ /* Considering the effect of spread spectrum, the value may be deviated.
+ * The coefficient (0.9947) is used to offset the deviation.
+ */
+ htotal_int = mode->htotal * 9947 / 10000;
+ htotal_size = htotal_int * fclk / (HIBMC_DP_SYMBOL_PER_FCLK * (mode->clock / 1000));
+
+ hblank_int = mode->htotal - mode->hdisplay - mode->hdisplay * 53 / 10000;
+ hblank_size = hblank_int * fclk * 9947 /
+ (mode->clock * 10 * HIBMC_DP_SYMBOL_PER_FCLK);
+
+ drm_dbg_dp(dp->dev, "h_active %u v_active %u htotal_size %u hblank_size %u",
+ mode->hdisplay, mode->vdisplay, htotal_size, hblank_size);
+ drm_dbg_dp(dp->dev, "flink_clock %u pixel_clock %d", fclk, mode->clock / 1000);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_HORIZONTAL_SIZE,
+ HIBMC_DP_CFG_STREAM_HTOTAL_SIZE, htotal_size);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_HORIZONTAL_SIZE,
+ HIBMC_DP_CFG_STREAM_HBLANK_SIZE, hblank_size);
+}
+
+static void hibmc_dp_link_cfg(struct hibmc_dp_dev *dp, struct drm_display_mode *mode)
+{
+ u32 timing_delay;
+ u32 vblank;
+ u32 hstart;
+ u32 vstart;
+
+ vblank = mode->vtotal - mode->vdisplay;
+ timing_delay = mode->htotal - mode->hsync_start;
+ hstart = mode->htotal - mode->hsync_start;
+ vstart = mode->vtotal - mode->vsync_start;
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_TIMING_GEN_CONFIG0,
+ HIBMC_DP_CFG_TIMING_GEN0_HBLANK, mode->htotal - mode->hdisplay);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_TIMING_GEN_CONFIG0,
+ HIBMC_DP_CFG_TIMING_GEN0_HACTIVE, mode->hdisplay);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_TIMING_GEN_CONFIG2,
+ HIBMC_DP_CFG_TIMING_GEN0_VBLANK, vblank);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_TIMING_GEN_CONFIG2,
+ HIBMC_DP_CFG_TIMING_GEN0_VACTIVE, mode->vdisplay);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_TIMING_GEN_CONFIG3,
+ HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH,
+ mode->vsync_start - mode->vdisplay);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG0,
+ HIBMC_DP_CFG_STREAM_HACTIVE, mode->hdisplay);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG0,
+ HIBMC_DP_CFG_STREAM_HBLANK, mode->htotal - mode->hdisplay);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG2,
+ HIBMC_DP_CFG_STREAM_HSYNC_WIDTH,
+ mode->hsync_end - mode->hsync_start);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG1,
+ HIBMC_DP_CFG_STREAM_VACTIVE, mode->vdisplay);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG1,
+ HIBMC_DP_CFG_STREAM_VBLANK, vblank);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG3,
+ HIBMC_DP_CFG_STREAM_VFRONT_PORCH,
+ mode->vsync_start - mode->vdisplay);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CONFIG3,
+ HIBMC_DP_CFG_STREAM_VSYNC_WIDTH,
+ mode->vsync_end - mode->vsync_start);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_MSA0,
+ HIBMC_DP_CFG_STREAM_VSTART, vstart);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_MSA0,
+ HIBMC_DP_CFG_STREAM_HSTART, hstart);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CTRL, HIBMC_DP_CFG_STREAM_VSYNC_POLARITY,
+ mode->flags & DRM_MODE_FLAG_PVSYNC ? 1 : 0);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CTRL, HIBMC_DP_CFG_STREAM_HSYNC_POLARITY,
+ mode->flags & DRM_MODE_FLAG_PHSYNC ? 1 : 0);
+
+ /* MSA mic 0 and 1 */
+ writel(HIBMC_DP_MSA1, dp->base + HIBMC_DP_VIDEO_MSA1);
+ writel(HIBMC_DP_MSA2, dp->base + HIBMC_DP_VIDEO_MSA2);
+
+ hibmc_dp_set_tu(dp, mode);
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CTRL, HIBMC_DP_CFG_STREAM_RGB_ENABLE, 0x1);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CTRL, HIBMC_DP_CFG_STREAM_VIDEO_MAPPING, 0);
+
+ /* divide 2: up even */
+ if (timing_delay % 2)
+ timing_delay++;
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_TIMING_MODEL_CTRL,
+ HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1, timing_delay);
+
+ hibmc_dp_set_sst(dp, mode);
+}
+
+int hibmc_dp_hw_init(struct hibmc_dp *dp)
+{
+ struct drm_device *drm_dev = dp->drm_dev;
+ struct hibmc_dp_dev *dp_dev;
+
+ dp_dev = devm_kzalloc(drm_dev->dev, sizeof(struct hibmc_dp_dev), GFP_KERNEL);
+ if (!dp_dev)
+ return -ENOMEM;
+
+ mutex_init(&dp_dev->lock);
+
+ dp->dp_dev = dp_dev;
+
+ dp_dev->dev = drm_dev;
+ dp_dev->base = dp->mmio + HIBMC_DP_OFFSET;
+
+ hibmc_dp_aux_init(dp_dev);
+
+ dp_dev->link.cap.lanes = 0x2;
+ dp_dev->link.cap.link_rate = DP_LINK_BW_2_7;
+
+ /* hdcp data */
+ writel(HIBMC_DP_HDCP, dp_dev->base + HIBMC_DP_HDCP_CFG);
+ /* int init */
+ writel(0, dp_dev->base + HIBMC_DP_INTR_ENABLE);
+ writel(HIBMC_DP_INT_RST, dp_dev->base + HIBMC_DP_INTR_ORIGINAL_STATUS);
+ /* rst */
+ writel(HIBMC_DP_DPTX_RST, dp_dev->base + HIBMC_DP_DPTX_RST_CTRL);
+ /* clock enable */
+ writel(HIBMC_DP_CLK_EN, dp_dev->base + HIBMC_DP_DPTX_CLK_CTRL);
+
+ return 0;
+}
+
+void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+
+ if (enable) {
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_VIDEO_CTRL, BIT(0), 0x1);
+ writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_DPTX_GCTL0, BIT(10), 0x1);
+ writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL);
+ } else {
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_DPTX_GCTL0, BIT(10), 0);
+ writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL);
+ hibmc_dp_reg_write_field(dp_dev, HIBMC_DP_VIDEO_CTRL, BIT(0), 0);
+ writel(HIBMC_DP_SYNC_EN_MASK, dp_dev->base + HIBMC_DP_TIMING_SYNC_CTRL);
+ }
+
+ msleep(50);
+}
+
+int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode)
+{
+ struct hibmc_dp_dev *dp_dev = dp->dp_dev;
+ int ret;
+
+ if (!dp_dev->link.status.channel_equalized) {
+ ret = hibmc_dp_link_training(dp_dev);
+ if (ret) {
+ drm_err(dp->drm_dev, "dp link training failed, ret: %d\n", ret);
+ return ret;
+ }
+ }
+
+ hibmc_dp_display_en(dp, false);
+ hibmc_dp_link_cfg(dp_dev, mode);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
new file mode 100644
index 000000000000..4dc13b3d9875
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_hw.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef DP_KAPI_H
+#define DP_KAPI_H
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_print.h>
+
+struct hibmc_dp_dev;
+
+struct hibmc_dp {
+ struct hibmc_dp_dev *dp_dev;
+ struct drm_device *drm_dev;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ void __iomem *mmio;
+};
+
+int hibmc_dp_hw_init(struct hibmc_dp *dp);
+int hibmc_dp_mode_set(struct hibmc_dp *dp, struct drm_display_mode *mode);
+void hibmc_dp_display_en(struct hibmc_dp *dp, bool enable);
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
new file mode 100644
index 000000000000..f6355c16cc0a
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/delay.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include "dp_comm.h"
+#include "dp_reg.h"
+
+#define HIBMC_EQ_MAX_RETRY 5
+
+static int hibmc_dp_link_training_configure(struct hibmc_dp_dev *dp)
+{
+ u8 buf[2];
+ int ret;
+
+ /* DP 2 lane */
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_LANE_DATA_EN,
+ dp->link.cap.lanes == 0x2 ? 0x3 : 0x1);
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_DPTX_GCTL0, HIBMC_DP_CFG_PHY_LANE_NUM,
+ dp->link.cap.lanes == 0x2 ? 0x1 : 0);
+
+ /* enhanced frame */
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_VIDEO_CTRL, HIBMC_DP_CFG_STREAM_FRAME_MODE, 0x1);
+
+ /* set rate and lane count */
+ buf[0] = dp->link.cap.link_rate;
+ buf[1] = DP_LANE_COUNT_ENHANCED_FRAME_EN | dp->link.cap.lanes;
+ ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
+ drm_dbg_dp(dp->dev, "dp aux write link rate and lanes failed, ret: %d\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ /* set 8b/10b and downspread */
+ buf[0] = DP_SPREAD_AMP_0_5;
+ buf[1] = DP_SET_ANSI_8B10B;
+ ret = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
+ drm_dbg_dp(dp->dev, "dp aux write 8b/10b and downspread failed, ret: %d\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ ret = drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd);
+ if (ret)
+ drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
+
+ return ret;
+}
+
+static int hibmc_dp_link_set_pattern(struct hibmc_dp_dev *dp, int pattern)
+{
+ int ret;
+ u8 val;
+ u8 buf;
+
+ buf = (u8)pattern;
+ if (pattern != DP_TRAINING_PATTERN_DISABLE && pattern != DP_TRAINING_PATTERN_4) {
+ buf |= DP_LINK_SCRAMBLING_DISABLE;
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_SCRAMBLE_EN, 0x1);
+ } else {
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_SCRAMBLE_EN, 0);
+ }
+
+ switch (pattern) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ val = 0;
+ break;
+ case DP_TRAINING_PATTERN_1:
+ val = 1;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ val = 2;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ val = 3;
+ break;
+ case DP_TRAINING_PATTERN_4:
+ val = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hibmc_dp_reg_write_field(dp, HIBMC_DP_PHYIF_CTRL0, HIBMC_DP_CFG_PAT_SEL, val);
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_PATTERN_SET, &buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
+ drm_dbg_dp(dp->dev, "dp aux write training pattern set failed\n");
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ return 0;
+}
+
+static int hibmc_dp_link_training_cr_pre(struct hibmc_dp_dev *dp)
+{
+ u8 *train_set = dp->link.train_set;
+ int ret;
+ u8 i;
+
+ ret = hibmc_dp_link_training_configure(dp);
+ if (ret)
+ return ret;
+
+ ret = hibmc_dp_link_set_pattern(dp, DP_TRAINING_PATTERN_1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < dp->link.cap.lanes; i++)
+ train_set[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set, dp->link.cap.lanes);
+ if (ret != dp->link.cap.lanes) {
+ drm_dbg_dp(dp->dev, "dp aux write training lane set failed\n");
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ return 0;
+}
+
+static bool hibmc_dp_link_get_adjust_train(struct hibmc_dp_dev *dp,
+ u8 lane_status[DP_LINK_STATUS_SIZE])
+{
+ u8 train_set[HIBMC_DP_LANE_NUM_MAX] = {0};
+ u8 lane;
+
+ for (lane = 0; lane < dp->link.cap.lanes; lane++)
+ train_set[lane] = drm_dp_get_adjust_request_voltage(lane_status, lane) |
+ drm_dp_get_adjust_request_pre_emphasis(lane_status, lane);
+
+ if (memcmp(dp->link.train_set, train_set, HIBMC_DP_LANE_NUM_MAX)) {
+ memcpy(dp->link.train_set, train_set, HIBMC_DP_LANE_NUM_MAX);
+ return true;
+ }
+
+ return false;
+}
+
+static inline int hibmc_dp_link_reduce_rate(struct hibmc_dp_dev *dp)
+{
+ switch (dp->link.cap.link_rate) {
+ case DP_LINK_BW_2_7:
+ dp->link.cap.link_rate = DP_LINK_BW_1_62;
+ return 0;
+ case DP_LINK_BW_5_4:
+ dp->link.cap.link_rate = DP_LINK_BW_2_7;
+ return 0;
+ case DP_LINK_BW_8_1:
+ dp->link.cap.link_rate = DP_LINK_BW_5_4;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int hibmc_dp_link_reduce_lane(struct hibmc_dp_dev *dp)
+{
+ switch (dp->link.cap.lanes) {
+ case 0x2:
+ dp->link.cap.lanes--;
+ break;
+ case 0x1:
+ drm_err(dp->dev, "dp link training reduce lane failed, already reach minimum\n");
+ return -EIO;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp)
+{
+ u8 lane_status[DP_LINK_STATUS_SIZE] = {0};
+ bool level_changed;
+ u32 voltage_tries;
+ u32 cr_tries;
+ int ret;
+
+ /*
+ * DP 1.4 spec define 10 for maxtries value, for pre DP 1.4 version set a limit of 80
+ * (4 voltage levels x 4 preemphasis levels x 5 identical voltage retries)
+ */
+
+ voltage_tries = 1;
+ for (cr_tries = 0; cr_tries < 80; cr_tries++) {
+ drm_dp_link_train_clock_recovery_delay(&dp->aux, dp->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
+ if (ret != DP_LINK_STATUS_SIZE) {
+ drm_err(dp->dev, "Get lane status failed\n");
+ return ret;
+ }
+
+ if (drm_dp_clock_recovery_ok(lane_status, dp->link.cap.lanes)) {
+ drm_dbg_dp(dp->dev, "dp link training cr done\n");
+ dp->link.status.clock_recovered = true;
+ return 0;
+ }
+
+ if (voltage_tries == 5) {
+ drm_dbg_dp(dp->dev, "same voltage tries 5 times\n");
+ dp->link.status.clock_recovered = false;
+ return 0;
+ }
+
+ level_changed = hibmc_dp_link_get_adjust_train(dp, lane_status);
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->link.train_set,
+ dp->link.cap.lanes);
+ if (ret != dp->link.cap.lanes) {
+ drm_dbg_dp(dp->dev, "Update link training failed\n");
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ voltage_tries = level_changed ? 1 : voltage_tries + 1;
+ }
+
+ drm_err(dp->dev, "dp link training clock recovery 80 times failed\n");
+ dp->link.status.clock_recovered = false;
+
+ return 0;
+}
+
+static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp)
+{
+ u8 lane_status[DP_LINK_STATUS_SIZE] = {0};
+ u8 eq_tries;
+ int ret;
+
+ ret = hibmc_dp_link_set_pattern(dp, DP_TRAINING_PATTERN_2);
+ if (ret)
+ return ret;
+
+ for (eq_tries = 0; eq_tries < HIBMC_EQ_MAX_RETRY; eq_tries++) {
+ drm_dp_link_train_channel_eq_delay(&dp->aux, dp->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status);
+ if (ret != DP_LINK_STATUS_SIZE) {
+ drm_err(dp->dev, "get lane status failed\n");
+ break;
+ }
+
+ if (!drm_dp_clock_recovery_ok(lane_status, dp->link.cap.lanes)) {
+ drm_dbg_dp(dp->dev, "clock recovery check failed\n");
+ drm_dbg_dp(dp->dev, "cannot continue channel equalization\n");
+ dp->link.status.clock_recovered = false;
+ break;
+ }
+
+ if (drm_dp_channel_eq_ok(lane_status, dp->link.cap.lanes)) {
+ dp->link.status.channel_equalized = true;
+ drm_dbg_dp(dp->dev, "dp link training eq done\n");
+ break;
+ }
+
+ hibmc_dp_link_get_adjust_train(dp, lane_status);
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
+ dp->link.train_set, dp->link.cap.lanes);
+ if (ret != dp->link.cap.lanes) {
+ drm_dbg_dp(dp->dev, "Update link training failed\n");
+ ret = (ret >= 0) ? -EIO : ret;
+ break;
+ }
+ }
+
+ if (eq_tries == HIBMC_EQ_MAX_RETRY)
+ drm_err(dp->dev, "channel equalization failed %u times\n", eq_tries);
+
+ hibmc_dp_link_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int hibmc_dp_link_downgrade_training_cr(struct hibmc_dp_dev *dp)
+{
+ if (hibmc_dp_link_reduce_rate(dp))
+ return hibmc_dp_link_reduce_lane(dp);
+
+ return 0;
+}
+
+static int hibmc_dp_link_downgrade_training_eq(struct hibmc_dp_dev *dp)
+{
+ if ((dp->link.status.clock_recovered && !dp->link.status.channel_equalized)) {
+ if (!hibmc_dp_link_reduce_lane(dp))
+ return 0;
+ }
+
+ return hibmc_dp_link_reduce_rate(dp);
+}
+
+int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
+{
+ struct hibmc_dp_link *link = &dp->link;
+ int ret;
+
+ while (true) {
+ ret = hibmc_dp_link_training_cr_pre(dp);
+ if (ret)
+ goto err;
+
+ ret = hibmc_dp_link_training_cr(dp);
+ if (ret)
+ goto err;
+
+ if (!link->status.clock_recovered) {
+ ret = hibmc_dp_link_downgrade_training_cr(dp);
+ if (ret)
+ goto err;
+ continue;
+ }
+
+ ret = hibmc_dp_link_training_channel_eq(dp);
+ if (ret)
+ goto err;
+
+ if (!link->status.channel_equalized) {
+ ret = hibmc_dp_link_downgrade_training_eq(dp);
+ if (ret)
+ goto err;
+ continue;
+ }
+
+ return 0;
+ }
+
+err:
+ hibmc_dp_link_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
new file mode 100644
index 000000000000..4a515c726d52
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_reg.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (c) 2024 Hisilicon Limited. */
+
+#ifndef DP_REG_H
+#define DP_REG_H
+
+#define HIBMC_DP_AUX_CMD_ADDR 0x50
+#define HIBMC_DP_AUX_WR_DATA0 0x54
+#define HIBMC_DP_AUX_WR_DATA1 0x58
+#define HIBMC_DP_AUX_WR_DATA2 0x5c
+#define HIBMC_DP_AUX_WR_DATA3 0x60
+#define HIBMC_DP_AUX_RD_DATA0 0x64
+#define HIBMC_DP_AUX_REQ 0x74
+#define HIBMC_DP_AUX_STATUS 0x78
+#define HIBMC_DP_PHYIF_CTRL0 0xa0
+#define HIBMC_DP_VIDEO_CTRL 0x100
+#define HIBMC_DP_VIDEO_CONFIG0 0x104
+#define HIBMC_DP_VIDEO_CONFIG1 0x108
+#define HIBMC_DP_VIDEO_CONFIG2 0x10c
+#define HIBMC_DP_VIDEO_CONFIG3 0x110
+#define HIBMC_DP_VIDEO_PACKET 0x114
+#define HIBMC_DP_VIDEO_MSA0 0x118
+#define HIBMC_DP_VIDEO_MSA1 0x11c
+#define HIBMC_DP_VIDEO_MSA2 0x120
+#define HIBMC_DP_VIDEO_HORIZONTAL_SIZE 0X124
+#define HIBMC_DP_TIMING_GEN_CONFIG0 0x26c
+#define HIBMC_DP_TIMING_GEN_CONFIG2 0x274
+#define HIBMC_DP_TIMING_GEN_CONFIG3 0x278
+#define HIBMC_DP_HDCP_CFG 0x600
+#define HIBMC_DP_DPTX_RST_CTRL 0x700
+#define HIBMC_DP_DPTX_CLK_CTRL 0x704
+#define HIBMC_DP_DPTX_GCTL0 0x708
+#define HIBMC_DP_INTR_ENABLE 0x720
+#define HIBMC_DP_INTR_ORIGINAL_STATUS 0x728
+#define HIBMC_DP_TIMING_MODEL_CTRL 0x884
+#define HIBMC_DP_TIMING_SYNC_CTRL 0xFF0
+
+#define HIBMC_DP_CFG_AUX_SYNC_LEN_SEL BIT(1)
+#define HIBMC_DP_CFG_AUX_TIMER_TIMEOUT BIT(2)
+#define HIBMC_DP_CFG_STREAM_FRAME_MODE BIT(6)
+#define HIBMC_DP_CFG_AUX_MIN_PULSE_NUM GENMASK(13, 9)
+#define HIBMC_DP_CFG_LANE_DATA_EN GENMASK(11, 8)
+#define HIBMC_DP_CFG_PHY_LANE_NUM GENMASK(2, 1)
+#define HIBMC_DP_CFG_AUX_REQ BIT(0)
+#define HIBMC_DP_CFG_AUX_RST_N BIT(4)
+#define HIBMC_DP_CFG_AUX_TIMEOUT BIT(0)
+#define HIBMC_DP_CFG_AUX_READY_DATA_BYTE GENMASK(16, 12)
+#define HIBMC_DP_CFG_AUX GENMASK(24, 17)
+#define HIBMC_DP_CFG_AUX_STATUS GENMASK(11, 4)
+#define HIBMC_DP_CFG_SCRAMBLE_EN BIT(0)
+#define HIBMC_DP_CFG_PAT_SEL GENMASK(7, 4)
+#define HIBMC_DP_CFG_TIMING_GEN0_HACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_TIMING_GEN0_HBLANK GENMASK(15, 0)
+#define HIBMC_DP_CFG_TIMING_GEN0_VACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_TIMING_GEN0_VBLANK GENMASK(15, 0)
+#define HIBMC_DP_CFG_TIMING_GEN0_VFRONT_PORCH GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HBLANK GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_HSYNC_WIDTH GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_VACTIVE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_VBLANK GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_VFRONT_PORCH GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_VSYNC_WIDTH GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_VSTART GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HSTART GENMASK(15, 0)
+#define HIBMC_DP_CFG_STREAM_VSYNC_POLARITY BIT(8)
+#define HIBMC_DP_CFG_STREAM_HSYNC_POLARITY BIT(7)
+#define HIBMC_DP_CFG_STREAM_RGB_ENABLE BIT(1)
+#define HIBMC_DP_CFG_STREAM_VIDEO_MAPPING GENMASK(5, 2)
+#define HIBMC_DP_CFG_PIXEL_NUM_TIMING_MODE_SEL1 GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_SIZE GENMASK(5, 0)
+#define HIBMC_DP_CFG_STREAM_TU_SYMBOL_FRAC_SIZE GENMASK(9, 6)
+#define HIBMC_DP_CFG_STREAM_HTOTAL_SIZE GENMASK(31, 16)
+#define HIBMC_DP_CFG_STREAM_HBLANK_SIZE GENMASK(15, 0)
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
new file mode 100644
index 000000000000..603d6b198a54
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_dp.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Hisilicon Limited.
+
+#include <linux/io.h>
+
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+
+#include "hibmc_drm_drv.h"
+#include "dp/dp_hw.h"
+
+static int hibmc_dp_connector_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ count = drm_add_modes_noedid(connector, connector->dev->mode_config.max_width,
+ connector->dev->mode_config.max_height);
+ drm_set_preferred_mode(connector, 1024, 768); // temporary implementation
+
+ return count;
+}
+
+static const struct drm_connector_helper_funcs hibmc_dp_conn_helper_funcs = {
+ .get_modes = hibmc_dp_connector_get_modes,
+};
+
+static const struct drm_connector_funcs hibmc_dp_conn_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static inline int hibmc_dp_prepare(struct hibmc_dp *dp, struct drm_display_mode *mode)
+{
+ int ret;
+
+ hibmc_dp_display_en(dp, false);
+
+ ret = hibmc_dp_mode_set(dp, mode);
+ if (ret)
+ drm_err(dp->drm_dev, "hibmc dp mode set failed: %d\n", ret);
+
+ return ret;
+}
+
+static void hibmc_dp_encoder_enable(struct drm_encoder *drm_encoder,
+ struct drm_atomic_state *state)
+{
+ struct hibmc_dp *dp = container_of(drm_encoder, struct hibmc_dp, encoder);
+ struct drm_display_mode *mode = &drm_encoder->crtc->state->mode;
+
+ if (hibmc_dp_prepare(dp, mode))
+ return;
+
+ hibmc_dp_display_en(dp, true);
+}
+
+static void hibmc_dp_encoder_disable(struct drm_encoder *drm_encoder,
+ struct drm_atomic_state *state)
+{
+ struct hibmc_dp *dp = container_of(drm_encoder, struct hibmc_dp, encoder);
+
+ hibmc_dp_display_en(dp, false);
+}
+
+static const struct drm_encoder_helper_funcs hibmc_dp_encoder_helper_funcs = {
+ .atomic_enable = hibmc_dp_encoder_enable,
+ .atomic_disable = hibmc_dp_encoder_disable,
+};
+
+int hibmc_dp_init(struct hibmc_drm_private *priv)
+{
+ struct drm_device *dev = &priv->dev;
+ struct drm_crtc *crtc = &priv->crtc;
+ struct hibmc_dp *dp = &priv->dp;
+ struct drm_connector *connector = &dp->connector;
+ struct drm_encoder *encoder = &dp->encoder;
+ int ret;
+
+ dp->mmio = priv->mmio;
+ dp->drm_dev = dev;
+
+ ret = hibmc_dp_hw_init(&priv->dp);
+ if (ret) {
+ drm_err(dev, "hibmc dp hw init failed: %d\n", ret);
+ return ret;
+ }
+
+ hibmc_dp_display_en(&priv->dp, false);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret) {
+ drm_err(dev, "init dp encoder failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &hibmc_dp_encoder_helper_funcs);
+
+ ret = drm_connector_init(dev, connector, &hibmc_dp_conn_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret) {
+ drm_err(dev, "init dp connector failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_connector_helper_add(connector, &hibmc_dp_conn_helper_funcs);
+
+ drm_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 9f9b19ea0587..e6de6d5edf6b 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -11,10 +11,11 @@
* Jianhua Li <lijianhua@huawei.com>
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
@@ -27,6 +28,10 @@
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
+#define HIBMC_DP_HOST_SERDES_CTRL 0x1f001c
+#define HIBMC_DP_HOST_SERDES_CTRL_VAL 0x8a00
+#define HIBMC_DP_HOST_SERDES_CTRL_MASK 0x7ffff
+
DEFINE_DRM_GEM_FOPS(hibmc_fops);
static irqreturn_t hibmc_interrupt(int irq, void *arg)
@@ -56,13 +61,13 @@ static const struct drm_driver hibmc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hibmc_fops,
.name = "hibmc",
- .date = "20160828",
.desc = "hibmc drm driver",
.major = 1,
.minor = 0,
.debugfs_init = drm_vram_mm_debugfs_init,
.dumb_create = hibmc_dumb_create,
.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
+ DRM_FBDEV_TTM_DRIVER_OPS,
};
static int __maybe_unused hibmc_pm_suspend(struct device *dev)
@@ -116,6 +121,14 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
return ret;
}
+ /* if DP existed, init DP */
+ if ((readl(priv->mmio + HIBMC_DP_HOST_SERDES_CTRL) &
+ HIBMC_DP_HOST_SERDES_CTRL_MASK) == HIBMC_DP_HOST_SERDES_CTRL_VAL) {
+ ret = hibmc_dp_init(priv);
+ if (ret)
+ drm_err(dev, "failed to init dp: %d\n", ret);
+ }
+
ret = hibmc_vdac_init(priv);
if (ret) {
drm_err(dev, "failed to init vdac: %d\n", ret);
@@ -306,7 +319,7 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
struct drm_device *dev;
int ret;
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &hibmc_driver);
+ ret = aperture_remove_conflicting_pci_devices(pdev, hibmc_driver.name);
if (ret)
return ret;
@@ -326,6 +339,8 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
goto err_return;
}
+ pci_set_master(pdev);
+
ret = hibmc_load(dev);
if (ret) {
drm_err(dev, "failed to load hibmc: %d\n", ret);
@@ -339,7 +354,7 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
goto err_unload;
}
- drm_fbdev_ttm_setup(dev, 32);
+ drm_client_setup(dev, NULL);
return 0;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 6b566f3aeecb..d982f1e4b958 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -20,9 +20,12 @@
#include <drm/drm_framebuffer.h>
-struct hibmc_connector {
- struct drm_connector base;
+#include "dp/dp_hw.h"
+struct hibmc_vdac {
+ struct drm_device *dev;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
struct i2c_adapter adapter;
struct i2c_algo_bit_data bit_data;
};
@@ -35,13 +38,13 @@ struct hibmc_drm_private {
struct drm_device dev;
struct drm_plane primary_plane;
struct drm_crtc crtc;
- struct drm_encoder encoder;
- struct hibmc_connector connector;
+ struct hibmc_vdac vdac;
+ struct hibmc_dp dp;
};
-static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector)
+static inline struct hibmc_vdac *to_hibmc_vdac(struct drm_connector *connector)
{
- return container_of(connector, struct hibmc_connector, base);
+ return container_of(connector, struct hibmc_vdac, connector);
}
static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
@@ -57,6 +60,8 @@ void hibmc_set_current_gate(struct hibmc_drm_private *priv,
int hibmc_de_init(struct hibmc_drm_private *priv);
int hibmc_vdac_init(struct hibmc_drm_private *priv);
-int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector);
+int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector);
+
+int hibmc_dp_init(struct hibmc_drm_private *priv);
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
index e6e48651c15c..99b3b77b5445 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
@@ -25,8 +25,8 @@
static void hibmc_set_i2c_signal(void *data, u32 mask, int value)
{
- struct hibmc_connector *hibmc_connector = data;
- struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
+ struct hibmc_vdac *vdac = data;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(vdac->connector.dev);
u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
if (value) {
@@ -45,8 +45,8 @@ static void hibmc_set_i2c_signal(void *data, u32 mask, int value)
static int hibmc_get_i2c_signal(void *data, u32 mask)
{
- struct hibmc_connector *hibmc_connector = data;
- struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
+ struct hibmc_vdac *vdac = data;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(vdac->connector.dev);
u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
if ((tmp_dir & mask) != mask) {
@@ -77,22 +77,21 @@ static int hibmc_ddc_getscl(void *data)
return hibmc_get_i2c_signal(data, I2C_SCL_MASK);
}
-int hibmc_ddc_create(struct drm_device *drm_dev,
- struct hibmc_connector *connector)
+int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *vdac)
{
- connector->adapter.owner = THIS_MODULE;
- snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
- connector->adapter.dev.parent = drm_dev->dev;
- i2c_set_adapdata(&connector->adapter, connector);
- connector->adapter.algo_data = &connector->bit_data;
-
- connector->bit_data.udelay = 20;
- connector->bit_data.timeout = usecs_to_jiffies(2000);
- connector->bit_data.data = connector;
- connector->bit_data.setsda = hibmc_ddc_setsda;
- connector->bit_data.setscl = hibmc_ddc_setscl;
- connector->bit_data.getsda = hibmc_ddc_getsda;
- connector->bit_data.getscl = hibmc_ddc_getscl;
-
- return i2c_bit_add_bus(&connector->adapter);
+ vdac->adapter.owner = THIS_MODULE;
+ snprintf(vdac->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
+ vdac->adapter.dev.parent = drm_dev->dev;
+ i2c_set_adapdata(&vdac->adapter, vdac);
+ vdac->adapter.algo_data = &vdac->bit_data;
+
+ vdac->bit_data.udelay = 20;
+ vdac->bit_data.timeout = usecs_to_jiffies(2000);
+ vdac->bit_data.data = vdac;
+ vdac->bit_data.setsda = hibmc_ddc_setsda;
+ vdac->bit_data.setscl = hibmc_ddc_setscl;
+ vdac->bit_data.getsda = hibmc_ddc_getsda;
+ vdac->bit_data.getscl = hibmc_ddc_getscl;
+
+ return i2c_bit_add_bus(&vdac->adapter);
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 409c551c92af..05e19ea4c9f9 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -24,11 +24,11 @@
static int hibmc_connector_get_modes(struct drm_connector *connector)
{
- struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
+ struct hibmc_vdac *vdac = to_hibmc_vdac(connector);
const struct drm_edid *drm_edid;
int count;
- drm_edid = drm_edid_read_ddc(connector, &hibmc_connector->adapter);
+ drm_edid = drm_edid_read_ddc(connector, &vdac->adapter);
drm_edid_connector_update(connector, drm_edid);
@@ -51,9 +51,9 @@ out:
static void hibmc_connector_destroy(struct drm_connector *connector)
{
- struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
+ struct hibmc_vdac *vdac = to_hibmc_vdac(connector);
- i2c_del_adapter(&hibmc_connector->adapter);
+ i2c_del_adapter(&vdac->adapter);
drm_connector_cleanup(connector);
}
@@ -93,20 +93,20 @@ static const struct drm_encoder_helper_funcs hibmc_encoder_helper_funcs = {
int hibmc_vdac_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
- struct hibmc_connector *hibmc_connector = &priv->connector;
- struct drm_encoder *encoder = &priv->encoder;
+ struct hibmc_vdac *vdac = &priv->vdac;
+ struct drm_encoder *encoder = &vdac->encoder;
struct drm_crtc *crtc = &priv->crtc;
- struct drm_connector *connector = &hibmc_connector->base;
+ struct drm_connector *connector = &vdac->connector;
int ret;
- ret = hibmc_ddc_create(dev, hibmc_connector);
+ ret = hibmc_ddc_create(dev, vdac);
if (ret) {
drm_err(dev, "failed to create ddc: %d\n", ret);
return ret;
}
encoder->possible_crtcs = drm_crtc_mask(crtc);
- ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
+ ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_DAC, NULL);
if (ret) {
drm_err(dev, "failed to init encoder: %d\n", ret);
return ret;
@@ -117,7 +117,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
ret = drm_connector_init_with_ddc(dev, connector,
&hibmc_connector_funcs,
DRM_MODE_CONNECTOR_VGA,
- &hibmc_connector->adapter);
+ &vdac->adapter);
if (ret) {
drm_err(dev, "failed to init connector: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig
index 0772f79567ef..43e8a4fd2d11 100644
--- a/drivers/gpu/drm/hisilicon/kirin/Kconfig
+++ b/drivers/gpu/drm/hisilicon/kirin/Kconfig
@@ -2,6 +2,7 @@
config DRM_HISI_KIRIN
tristate "DRM Support for Hisilicon Kirin series SoCs Platform"
depends on DRM && OF && (ARM64 || COMPILE_TEST)
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index a39cc549c20b..2eea9fb0e76b 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -889,7 +889,7 @@ MODULE_DEVICE_TABLE(of, dsi_of_match);
static struct platform_driver dsi_driver = {
.probe = dsi_probe,
- .remove_new = dsi_remove,
+ .remove = dsi_remove,
.driver = {
.name = "dw-dsi",
.of_match_table = dsi_of_match,
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 871f79a6b17e..2eb49177ac42 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -25,6 +25,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
@@ -925,9 +926,9 @@ static const struct drm_driver ade_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ade_fops,
DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.name = "kirin",
.desc = "Hisilicon Kirin620 SoC DRM Driver",
- .date = "20150718",
.major = 1,
.minor = 0,
};
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 12666985686b..1e1c87be1204 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -17,9 +17,9 @@
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
@@ -237,7 +237,7 @@ static int kirin_drm_bind(struct device *dev)
if (ret)
goto err_kms_cleanup;
- drm_fbdev_dma_setup(drm_dev, 32);
+ drm_client_setup(drm_dev, NULL);
return 0;
@@ -302,7 +302,7 @@ MODULE_DEVICE_TABLE(of, kirin_drm_dt_ids);
static struct platform_driver kirin_drm_platform_driver = {
.probe = kirin_drm_platform_probe,
- .remove_new = kirin_drm_platform_remove,
+ .remove = kirin_drm_platform_remove,
.shutdown = kirin_drm_platform_shutdown,
.driver = {
.name = "kirin-drm",
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index ff93e08d5036..f59abfa7622a 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -3,12 +3,13 @@
* Copyright 2021 Microsoft
*/
+#include <linux/aperture.h>
#include <linux/efi.h>
#include <linux/hyperv.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
@@ -19,7 +20,6 @@
#define DRIVER_NAME "hyperv_drm"
#define DRIVER_DESC "DRM driver for Hyper-V synthetic video device"
-#define DRIVER_DATE "2020"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -30,12 +30,12 @@ static struct drm_driver hyperv_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.fops = &hv_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
};
static int hyperv_pci_probe(struct pci_dev *pdev,
@@ -124,7 +124,7 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
goto err_hv_set_drv_data;
}
- drm_aperture_remove_framebuffers(&hyperv_driver);
+ aperture_remove_all_conflicting_devices(hyperv_driver.name);
ret = hyperv_setup_vram(hv, hdev);
if (ret)
@@ -149,7 +149,7 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
goto err_free_mmio;
}
- drm_fbdev_shmem_setup(dev, 0);
+ drm_client_setup(dev, NULL);
return 0;
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 131512a5f3bd..fcb0fcd6c897 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -486,7 +486,7 @@ static int ch7006_encoder_init(struct i2c_client *client,
}
static const struct i2c_device_id ch7006_ids[] = {
- { "ch7006", 0 },
+ { "ch7006" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ch7006_ids);
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index ff23422727fc..c17afa025d9d 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -413,7 +413,7 @@ sil164_encoder_init(struct i2c_client *client,
}
static const struct i2c_device_id sil164_ids[] = {
- { "sil164", 0 },
+ { "sil164" },
{ }
};
MODULE_DEVICE_TABLE(i2c, sil164_ids);
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 82d618c40dce..cbff851e0c85 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -486,8 +486,8 @@ static void tda9950_remove(struct i2c_client *client)
}
static struct i2c_device_id tda9950_ids[] = {
- { "tda9950", 0 },
- { },
+ { "tda9950" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, tda9950_ids);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 2160f05bbd16..82d4a4e206a5 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1165,7 +1165,6 @@ static const struct hdmi_codec_ops audio_codec_ops = {
.audio_shutdown = tda998x_audio_shutdown,
.mute_stream = tda998x_audio_mute_stream,
.get_eld = tda998x_audio_get_eld,
- .no_capture_mute = 1,
};
static int tda998x_audio_codec_init(struct tda998x_priv *priv,
@@ -1176,6 +1175,7 @@ static int tda998x_audio_codec_init(struct tda998x_priv *priv,
.max_i2s_channels = 2,
.no_i2s_capture = 1,
.no_spdif_capture = 1,
+ .no_capture_mute = 1,
};
if (priv->audio_port_enable[AUDIO_ROUTE_I2S])
@@ -2094,7 +2094,7 @@ MODULE_DEVICE_TABLE(of, tda998x_dt_ids);
#endif
static const struct i2c_device_id tda998x_ids[] = {
- { "tda998x", 0 },
+ { "tda998x" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tda998x_ids);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index faa253b27664..5e939004b646 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -10,7 +10,9 @@ config DRM_I915
# the shmem_readpage() which depends upon tmpfs
select SHMEM
select TMPFS
+ select DRM_CLIENT_SELECTION
select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_DSC_HELPER
select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
@@ -123,9 +125,8 @@ config DRM_I915_USERPTR
config DRM_I915_GVT_KVMGT
tristate "Enable KVM host support Intel GVT-g graphics virtualization"
depends on DRM_I915
- depends on X86
+ depends on KVM_X86
depends on 64BIT
- depends on KVM
depends on VFIO
select DRM_I915_GVT
select KVM_EXTERNAL_WRITE_TRACKING
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index c63fa2133ccb..3dda9f0eda82 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -30,11 +30,11 @@ i915-y += \
i915_params.o \
i915_pci.o \
i915_scatterlist.o \
- i915_suspend.o \
i915_switcheroo.o \
i915_sysfs.o \
i915_utils.o \
intel_clock_gating.o \
+ intel_cpu_info.o \
intel_device_info.o \
intel_memory_region.o \
intel_pcode.o \
@@ -43,6 +43,7 @@ i915-y += \
intel_sbi.o \
intel_step.o \
intel_uncore.o \
+ intel_uncore_trace.o \
intel_wakeref.o \
vlv_sideband.o \
vlv_suspend.o
@@ -51,7 +52,8 @@ i915-y += \
i915-y += \
soc/intel_dram.o \
soc/intel_gmch.o \
- soc/intel_pch.o
+ soc/intel_pch.o \
+ soc/intel_rom.o
# core library code
i915-y += \
@@ -219,12 +221,14 @@ i915-$(CONFIG_HWMON) += \
i915-y += \
display/hsw_ips.o \
display/i9xx_plane.o \
+ display/i9xx_display_sr.o \
display/i9xx_wm.o \
display/intel_alpm.o \
display/intel_atomic.o \
display/intel_atomic_plane.o \
display/intel_audio.o \
display/intel_bios.o \
+ display/intel_bo.o \
display/intel_bw.o \
display/intel_cdclk.o \
display/intel_color.o \
@@ -234,6 +238,7 @@ i915-y += \
display/intel_crtc_state_dump.o \
display/intel_cursor.o \
display/intel_display.o \
+ display/intel_display_conversion.o \
display/intel_display_driver.o \
display/intel_display_irq.o \
display/intel_display_params.o \
@@ -242,6 +247,7 @@ i915-y += \
display/intel_display_power_well.o \
display/intel_display_reset.o \
display/intel_display_rps.o \
+ display/intel_display_snapshot.o \
display/intel_display_wa.o \
display/intel_dmc.o \
display/intel_dmc_wl.o \
@@ -325,6 +331,7 @@ i915-y += \
display/intel_dp_hdcp.o \
display/intel_dp_link_training.o \
display/intel_dp_mst.o \
+ display/intel_dp_test.o \
display/intel_dsi.o \
display/intel_dsi_dcs_backlight.o \
display/intel_dsi_vbt.o \
@@ -335,6 +342,7 @@ i915-y += \
display/intel_lspcon.o \
display/intel_lvds.o \
display/intel_panel.o \
+ display/intel_pfit.o \
display/intel_pps.o \
display/intel_qp_tables.o \
display/intel_sdvo.o \
diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index 9d47f8a93e94..686393dfbbf5 100644
--- a/drivers/gpu/drm/i915/display/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
@@ -26,7 +26,6 @@
*
*/
-#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dvo_dev.h"
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 526c8c4d7b53..56353377466c 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -8,6 +8,7 @@
#include <linux/string_helpers.h>
#include "g4x_dp.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_audio.h"
#include "intel_backlight.h"
@@ -19,6 +20,7 @@
#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
+#include "intel_dp_test.h"
#include "intel_dpio_phy.h"
#include "intel_encoder.h"
#include "intel_fifo_underrun.h"
@@ -54,8 +56,8 @@ const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
return IS_CHERRYVIEW(i915) ? &chv_dpll[0] : &vlv_dpll[0];
}
-void g4x_dp_set_clock(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+static void g4x_dp_set_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct dpll *divisor = NULL;
@@ -169,13 +171,12 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
bool cur_state = intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN;
- I915_STATE_WARN(dev_priv, cur_state != state,
- "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
- dig_port->base.base.base.id, dig_port->base.base.name,
- str_on_off(state), str_on_off(cur_state));
+ INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
+ "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ str_on_off(state), str_on_off(cur_state));
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
@@ -184,9 +185,9 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
struct intel_display *display = &dev_priv->display;
bool cur_state = intel_de_read(display, DP_A) & DP_PLL_ENABLE;
- I915_STATE_WARN(dev_priv, cur_state != state,
- "eDP PLL state assertion failure (expected %s, current %s)\n",
- str_on_off(state), str_on_off(cur_state));
+ INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
+ "eDP PLL state assertion failure (expected %s, current %s)\n",
+ str_on_off(state), str_on_off(cur_state));
}
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
@@ -477,12 +478,8 @@ intel_dp_link_down(struct intel_encoder *encoder,
msleep(intel_dp->pps.panel_power_down_delay);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- intel_wakeref_t wakeref;
-
- with_intel_pps_lock(intel_dp, wakeref)
- intel_dp->pps.active_pipe = INVALID_PIPE;
- }
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ vlv_pps_port_disable(encoder, old_crtc_state);
}
static void g4x_dp_audio_enable(struct intel_encoder *encoder,
@@ -694,7 +691,7 @@ static void intel_enable_dp(struct intel_atomic_state *state,
with_intel_pps_lock(intel_dp, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_pps_init(encoder, pipe_config);
+ vlv_pps_port_enable_unlocked(encoder, pipe_config);
intel_dp_enable_port(intel_dp, pipe_config);
@@ -709,8 +706,7 @@ static void intel_enable_dp(struct intel_atomic_state *state,
if (IS_CHERRYVIEW(dev_priv))
lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
- vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
- lane_mask);
+ vlv_wait_port_ready(display, dp_to_dig_port(intel_dp), lane_mask);
}
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
@@ -1172,12 +1168,8 @@ intel_dp_hotplug(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum intel_hotplug_state state;
- if (intel_dp->compliance.test_active &&
- intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) {
- intel_dp_phy_test(encoder);
- /* just do the PHY test and nothing else */
+ if (intel_dp_test_phy(intel_dp))
return INTEL_HOTPLUG_UNCHANGED;
- }
state = intel_encoder_hotplug(encoder, connector);
@@ -1232,6 +1224,25 @@ static bool ilk_digital_port_connected(struct intel_encoder *encoder)
return intel_de_read(display, DEISR) & bit;
}
+static int g4x_dp_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ int ret;
+
+ if (HAS_PCH_SPLIT(i915) && encoder->port != PORT_A)
+ crtc_state->has_pch_encoder = true;
+
+ ret = intel_dp_compute_config(encoder, crtc_state, conn_state);
+ if (ret)
+ return ret;
+
+ g4x_dp_set_clock(encoder, crtc_state);
+
+ return 0;
+}
+
static void g4x_dp_suspend_complete(struct intel_encoder *encoder)
{
/*
@@ -1249,20 +1260,6 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
kfree(enc_to_dig_port(to_intel_encoder(encoder)));
}
-enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
-{
- struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- enum pipe pipe;
-
- if (g4x_dp_port_enabled(dev_priv, intel_dp->output_reg,
- encoder->port, &pipe))
- return pipe;
-
- return INVALID_PIPE;
-}
-
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder->dev);
@@ -1272,13 +1269,10 @@ static void intel_dp_encoder_reset(struct drm_encoder *encoder)
intel_dp->DP = intel_de_read(display, intel_dp->output_reg);
intel_dp->reset_link_params = true;
+ intel_dp_invalidate_source_oui(intel_dp);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- intel_wakeref_t wakeref;
-
- with_intel_pps_lock(intel_dp, wakeref)
- intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
- }
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ vlv_pps_pipe_reset(intel_dp);
intel_pps_encoder_reset(intel_dp);
}
@@ -1333,7 +1327,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_encoder_link_check_init(intel_encoder, intel_dp_link_check);
intel_encoder->hotplug = intel_dp_hotplug;
- intel_encoder->compute_config = intel_dp_compute_config;
+ intel_encoder->compute_config = g4x_dp_compute_config;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
intel_encoder->sync_state = intel_dp_sync_state;
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.h b/drivers/gpu/drm/i915/display/g4x_dp.h
index a10638ab749c..839a251dc069 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.h
+++ b/drivers/gpu/drm/i915/display/g4x_dp.h
@@ -19,9 +19,6 @@ struct intel_encoder;
#ifdef I915
const struct dpll *vlv_get_dpll(struct drm_i915_private *i915);
-enum pipe vlv_active_pipe(struct intel_dp *intel_dp);
-void g4x_dp_set_clock(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config);
bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t dp_reg, enum port port,
enum pipe *pipe);
@@ -32,14 +29,6 @@ static inline const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
{
return NULL;
}
-static inline int vlv_active_pipe(struct intel_dp *intel_dp)
-{
- return 0;
-}
-static inline void g4x_dp_set_clock(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
-}
static inline bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t dp_reg, int port,
enum pipe *pipe)
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 46f23bdb4c17..98e6a931042f 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -6,6 +6,7 @@
*/
#include "g4x_hdmi.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -480,8 +481,8 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
vlv_phy_pre_encoder_enable(encoder, pipe_config);
@@ -496,7 +497,7 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
g4x_hdmi_enable_port(encoder, pipe_config);
- vlv_wait_port_ready(dev_priv, dig_port, 0x0);
+ vlv_wait_port_ready(display, dig_port, 0x0);
}
static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
@@ -557,9 +558,8 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
chv_phy_pre_encoder_enable(encoder, pipe_config);
@@ -573,7 +573,7 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
g4x_hdmi_enable_port(encoder, pipe_config);
- vlv_wait_port_ready(dev_priv, dig_port, 0x0);
+ vlv_wait_port_ready(display, dig_port, 0x0);
/* Second common lane will stay alive on its own now */
chv_phy_release_cl2_override(encoder);
@@ -683,7 +683,7 @@ static bool assert_hdmi_port_valid(struct drm_i915_private *i915, enum port port
"Platform does not support HDMI %c\n", port_name(port));
}
-void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
struct intel_display *display = &dev_priv->display;
@@ -693,10 +693,10 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
struct intel_connector *intel_connector;
if (!assert_port_valid(dev_priv, port))
- return;
+ return false;
if (!assert_hdmi_port_valid(dev_priv, port))
- return;
+ return false;
devdata = intel_bios_encoder_data_lookup(display, port);
@@ -707,15 +707,13 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
if (!dig_port)
- return;
+ return false;
dig_port->aux_ch = AUX_CH_NONE;
intel_connector = intel_connector_alloc();
- if (!intel_connector) {
- kfree(dig_port);
- return;
- }
+ if (!intel_connector)
+ goto err_connector_alloc;
intel_encoder = &dig_port->base;
@@ -723,9 +721,10 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
mutex_init(&dig_port->hdcp_mutex);
- drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
- &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
- "HDMI %c", port_name(port));
+ if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
+ "HDMI %c", port_name(port)))
+ goto err_encoder_init;
intel_encoder->hotplug = intel_hdmi_hotplug;
intel_encoder->compute_config = g4x_hdmi_compute_config;
@@ -788,5 +787,17 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_infoframe_init(dig_port);
- intel_hdmi_init_connector(dig_port, intel_connector);
+ if (!intel_hdmi_init_connector(dig_port, intel_connector))
+ goto err_init_connector;
+
+ return true;
+
+err_init_connector:
+ drm_encoder_cleanup(&intel_encoder->base);
+err_encoder_init:
+ kfree(intel_connector);
+err_connector_alloc:
+ kfree(dig_port);
+
+ return false;
}
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.h b/drivers/gpu/drm/i915/display/g4x_hdmi.h
index 817f55c7a3a1..a52e8986ec7a 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.h
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.h
@@ -16,14 +16,15 @@ struct drm_connector;
struct drm_i915_private;
#ifdef I915
-void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port);
int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state);
#else
-static inline void g4x_hdmi_init(struct drm_i915_private *dev_priv,
+static inline bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, int port)
{
+ return false;
}
static inline int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 611a7d6ef80c..d02c328bf902 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -3,6 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
+#include <linux/debugfs.h>
+
#include "hsw_ips.h"
#include "i915_drv.h"
#include "i915_reg.h"
@@ -13,6 +15,7 @@
static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 val;
@@ -25,16 +28,16 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
* This function is called from post_plane_update, which is run after
* a vblank wait.
*/
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
val = IPS_ENABLE;
- if (i915->display.ips.false_color)
+ if (display->ips.false_color)
val |= IPS_FALSE_COLOR;
if (IS_BROADWELL(i915)) {
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL,
val | IPS_PCODE_CONTROL));
/*
@@ -44,7 +47,7 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
* so we need to just enable it and continue on.
*/
} else {
- intel_de_write(i915, IPS_CTL, val);
+ intel_de_write(display, IPS_CTL, val);
/*
* The bit only becomes 1 in the next vblank, so this wait here
* is essentially intel_wait_for_vblank. If we don't have this
@@ -52,14 +55,15 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
* the HW state readout code will complain that the expected
* IPS_CTL value is not the one we read.
*/
- if (intel_de_wait_for_set(i915, IPS_CTL, IPS_ENABLE, 50))
- drm_err(&i915->drm,
+ if (intel_de_wait_for_set(display, IPS_CTL, IPS_ENABLE, 50))
+ drm_err(display->drm,
"Timed out waiting for IPS enable\n");
}
}
bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
bool need_vblank_wait = false;
@@ -68,19 +72,19 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
return need_vblank_wait;
if (IS_BROADWELL(i915)) {
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 0));
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
* instead.
*/
- if (intel_de_wait_for_clear(i915, IPS_CTL, IPS_ENABLE, 100))
- drm_err(&i915->drm,
+ if (intel_de_wait_for_clear(display, IPS_CTL, IPS_ENABLE, 100))
+ drm_err(display->drm,
"Timed out waiting for IPS disable\n");
} else {
- intel_de_write(i915, IPS_CTL, 0);
- intel_de_posting_read(i915, IPS_CTL);
+ intel_de_write(display, IPS_CTL, 0);
+ intel_de_posting_read(display, IPS_CTL);
}
/* We need to wait for a vblank before we can disable the plane. */
@@ -181,11 +185,14 @@ void hsw_ips_post_update(struct intel_atomic_state *state,
/* IPS only exists on ULT machines and is tied to pipe A. */
bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
{
- return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
+ struct intel_display *display = to_intel_display(crtc);
+
+ return HAS_IPS(display) && crtc->pipe == PIPE_A;
}
-bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
+static bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
@@ -193,7 +200,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
if (!hsw_crtc_supports_ips(crtc))
return false;
- if (!i915->display.params.enable_ips)
+ if (!display->params.enable_ips)
return false;
if (crtc_state->pipe_bpp > 24)
@@ -207,12 +214,26 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
* Should measure whether using a lower cdclk w/o IPS
*/
if (IS_BROADWELL(i915) &&
- crtc_state->pixel_rate > i915->display.cdclk.max_cdclk_freq * 95 / 100)
+ crtc_state->pixel_rate > display->cdclk.max_cdclk_freq * 95 / 100)
return false;
return true;
}
+int hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (!IS_BROADWELL(i915))
+ return 0;
+
+ if (!hsw_crtc_state_ips_capable(crtc_state))
+ return 0;
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95);
+}
+
int hsw_ips_compute_config(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -257,6 +278,7 @@ int hsw_ips_compute_config(struct intel_atomic_state *state,
void hsw_ips_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
@@ -264,7 +286,7 @@ void hsw_ips_get_config(struct intel_crtc_state *crtc_state)
return;
if (IS_HASWELL(i915)) {
- crtc_state->ips_enabled = intel_de_read(i915, IPS_CTL) & IPS_ENABLE;
+ crtc_state->ips_enabled = intel_de_read(display, IPS_CTL) & IPS_ENABLE;
} else {
/*
* We cannot readout IPS state on broadwell, set to
@@ -278,9 +300,9 @@ void hsw_ips_get_config(struct intel_crtc_state *crtc_state)
static int hsw_ips_debugfs_false_color_get(void *data, u64 *val)
{
struct intel_crtc *crtc = data;
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- *val = i915->display.ips.false_color;
+ *val = display->ips.false_color;
return 0;
}
@@ -288,7 +310,7 @@ static int hsw_ips_debugfs_false_color_get(void *data, u64 *val)
static int hsw_ips_debugfs_false_color_set(void *data, u64 val)
{
struct intel_crtc *crtc = data;
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state;
int ret;
@@ -296,7 +318,7 @@ static int hsw_ips_debugfs_false_color_set(void *data, u64 val)
if (ret)
return ret;
- i915->display.ips.false_color = val;
+ display->ips.false_color = val;
crtc_state = to_intel_crtc_state(crtc->base.state);
@@ -323,18 +345,19 @@ DEFINE_DEBUGFS_ATTRIBUTE(hsw_ips_debugfs_false_color_fops,
static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
{
struct intel_crtc *crtc = m->private;
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
seq_printf(m, "Enabled by kernel parameter: %s\n",
- str_yes_no(i915->display.params.enable_ips));
+ str_yes_no(display->params.enable_ips));
- if (DISPLAY_VER(i915) >= 8) {
+ if (DISPLAY_VER(display) >= 8) {
seq_puts(m, "Currently: unknown\n");
} else {
- if (intel_de_read(i915, IPS_CTL) & IPS_ENABLE)
+ if (intel_de_read(display, IPS_CTL) & IPS_ENABLE)
seq_puts(m, "Currently: enabled\n");
else
seq_puts(m, "Currently: disabled\n");
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.h b/drivers/gpu/drm/i915/display/hsw_ips.h
index 35364228e1c1..7af12f88a8ce 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.h
+++ b/drivers/gpu/drm/i915/display/hsw_ips.h
@@ -19,7 +19,7 @@ bool hsw_ips_pre_update(struct intel_atomic_state *state,
void hsw_ips_post_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
bool hsw_crtc_supports_ips(struct intel_crtc *crtc);
-bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
+int hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state);
int hsw_ips_compute_config(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void hsw_ips_get_config(struct intel_crtc_state *crtc_state);
@@ -42,9 +42,9 @@ static inline bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
{
return false;
}
-static inline bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
+static inline int hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state)
{
- return false;
+ return 0;
}
static inline int hsw_ips_compute_config(struct intel_atomic_state *state,
struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/display/i9xx_display_sr.c b/drivers/gpu/drm/i915/display/i9xx_display_sr.c
new file mode 100644
index 000000000000..32abe9743014
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_display_sr.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <drm/drm_device.h>
+
+#include "i915_reg.h"
+#include "i9xx_display_sr.h"
+#include "i9xx_wm_regs.h"
+#include "intel_de.h"
+#include "intel_gmbus.h"
+#include "intel_pci_config.h"
+
+static void i9xx_display_save_swf(struct intel_display *display)
+{
+ int i;
+
+ /* Scratch space */
+ if (DISPLAY_VER(display) == 2 && display->platform.mobile) {
+ for (i = 0; i < 7; i++) {
+ display->restore.saveSWF0[i] = intel_de_read(display, SWF0(display, i));
+ display->restore.saveSWF1[i] = intel_de_read(display, SWF1(display, i));
+ }
+ for (i = 0; i < 3; i++)
+ display->restore.saveSWF3[i] = intel_de_read(display, SWF3(display, i));
+ } else if (DISPLAY_VER(display) == 2) {
+ for (i = 0; i < 7; i++)
+ display->restore.saveSWF1[i] = intel_de_read(display, SWF1(display, i));
+ } else if (HAS_GMCH(display)) {
+ for (i = 0; i < 16; i++) {
+ display->restore.saveSWF0[i] = intel_de_read(display, SWF0(display, i));
+ display->restore.saveSWF1[i] = intel_de_read(display, SWF1(display, i));
+ }
+ for (i = 0; i < 3; i++)
+ display->restore.saveSWF3[i] = intel_de_read(display, SWF3(display, i));
+ }
+}
+
+static void i9xx_display_restore_swf(struct intel_display *display)
+{
+ int i;
+
+ /* Scratch space */
+ if (DISPLAY_VER(display) == 2 && display->platform.mobile) {
+ for (i = 0; i < 7; i++) {
+ intel_de_write(display, SWF0(display, i), display->restore.saveSWF0[i]);
+ intel_de_write(display, SWF1(display, i), display->restore.saveSWF1[i]);
+ }
+ for (i = 0; i < 3; i++)
+ intel_de_write(display, SWF3(display, i), display->restore.saveSWF3[i]);
+ } else if (DISPLAY_VER(display) == 2) {
+ for (i = 0; i < 7; i++)
+ intel_de_write(display, SWF1(display, i), display->restore.saveSWF1[i]);
+ } else if (HAS_GMCH(display)) {
+ for (i = 0; i < 16; i++) {
+ intel_de_write(display, SWF0(display, i), display->restore.saveSWF0[i]);
+ intel_de_write(display, SWF1(display, i), display->restore.saveSWF1[i]);
+ }
+ for (i = 0; i < 3; i++)
+ intel_de_write(display, SWF3(display, i), display->restore.saveSWF3[i]);
+ }
+}
+
+void i9xx_display_sr_save(struct intel_display *display)
+{
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+
+ if (!HAS_DISPLAY(display))
+ return;
+
+ /* Display arbitration control */
+ if (DISPLAY_VER(display) <= 4)
+ display->restore.saveDSPARB = intel_de_read(display, DSPARB(display));
+
+ if (DISPLAY_VER(display) == 4)
+ pci_read_config_word(pdev, GCDGMBUS, &display->restore.saveGCDGMBUS);
+
+ i9xx_display_save_swf(display);
+}
+
+void i9xx_display_sr_restore(struct intel_display *display)
+{
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+
+ if (!HAS_DISPLAY(display))
+ return;
+
+ i9xx_display_restore_swf(display);
+
+ if (DISPLAY_VER(display) == 4)
+ pci_write_config_word(pdev, GCDGMBUS, display->restore.saveGCDGMBUS);
+
+ /* Display arbitration */
+ if (DISPLAY_VER(display) <= 4)
+ intel_de_write(display, DSPARB(display), display->restore.saveDSPARB);
+}
diff --git a/drivers/gpu/drm/i915/display/i9xx_display_sr.h b/drivers/gpu/drm/i915/display/i9xx_display_sr.h
new file mode 100644
index 000000000000..39b8c18fe738
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_display_sr.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __I9XX_DISPLAY_SR_H__
+#define __I9XX_DISPLAY_SR_H__
+
+struct intel_display;
+
+void i9xx_display_sr_save(struct intel_display *display);
+void i9xx_display_sr_restore(struct intel_display *display);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 9447f7229b60..48e657a80a16 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -8,6 +8,7 @@
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
@@ -416,7 +417,8 @@ static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
return DIV_ROUND_UP(pixel_rate * num, den);
}
-static void i9xx_plane_update_noarm(struct intel_plane *plane,
+static void i9xx_plane_update_noarm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -444,7 +446,8 @@ static void i9xx_plane_update_noarm(struct intel_plane *plane,
}
}
-static void i9xx_plane_update_arm(struct intel_plane *plane,
+static void i9xx_plane_update_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -507,7 +510,8 @@ static void i9xx_plane_update_arm(struct intel_plane *plane,
intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
}
-static void i830_plane_update_arm(struct intel_plane *plane,
+static void i830_plane_update_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -517,11 +521,12 @@ static void i830_plane_update_arm(struct intel_plane *plane,
* Additional breakage on i830 causes register reads to return
* the last latched value instead of the last written value [ALM026].
*/
- i9xx_plane_update_noarm(plane, crtc_state, plane_state);
- i9xx_plane_update_arm(plane, crtc_state, plane_state);
+ i9xx_plane_update_noarm(dsb, plane, crtc_state, plane_state);
+ i9xx_plane_update_arm(dsb, plane, crtc_state, plane_state);
}
-static void i9xx_plane_disable_arm(struct intel_plane *plane,
+static void i9xx_plane_disable_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -549,7 +554,8 @@ static void i9xx_plane_disable_arm(struct intel_plane *plane,
}
static void
-g4x_primary_async_flip(struct intel_plane *plane,
+g4x_primary_async_flip(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
bool async_flip)
@@ -569,7 +575,8 @@ g4x_primary_async_flip(struct intel_plane *plane,
}
static void
-vlv_primary_async_flip(struct intel_plane *plane,
+vlv_primary_async_flip(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
bool async_flip)
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 15cda57fbc91..db78c1e6b0a3 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -6,14 +6,25 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "i9xx_wm.h"
+#include "i9xx_wm_regs.h"
#include "intel_atomic.h"
+#include "intel_bo.h"
#include "intel_display.h"
#include "intel_display_trace.h"
+#include "intel_fb.h"
#include "intel_mchbar_regs.h"
#include "intel_wm.h"
#include "skl_watermark.h"
#include "vlv_sideband.h"
+struct intel_watermark_params {
+ u16 fifo_size;
+ u16 max_wm;
+ u8 default_wm;
+ u8 guard_size;
+ u8 cacheline_size;
+};
+
/* used in computing the new watermarks state */
struct intel_wm_config {
unsigned int num_pipes_active;
@@ -136,6 +147,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
+ struct intel_display *display = &dev_priv->display;
bool was_enabled;
u32 val;
@@ -177,7 +189,7 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
return false;
}
- trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
+ trace_intel_memory_cxsr(display, was_enabled, enable);
drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
str_enabled_disabled(enable),
@@ -695,6 +707,76 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv)
}
}
+static bool i9xx_wm_need_update(const struct intel_plane_state *old_plane_state,
+ const struct intel_plane_state *new_plane_state)
+{
+ /* Update watermarks on tiling or size changes. */
+ if (old_plane_state->uapi.visible != new_plane_state->uapi.visible)
+ return true;
+
+ if (!old_plane_state->hw.fb || !new_plane_state->hw.fb)
+ return false;
+
+ if (old_plane_state->hw.fb->modifier != new_plane_state->hw.fb->modifier ||
+ old_plane_state->hw.rotation != new_plane_state->hw.rotation ||
+ drm_rect_width(&old_plane_state->uapi.src) != drm_rect_width(&new_plane_state->uapi.src) ||
+ drm_rect_height(&old_plane_state->uapi.src) != drm_rect_height(&new_plane_state->uapi.src) ||
+ drm_rect_width(&old_plane_state->uapi.dst) != drm_rect_width(&new_plane_state->uapi.dst) ||
+ drm_rect_height(&old_plane_state->uapi.dst) != drm_rect_height(&new_plane_state->uapi.dst))
+ return true;
+
+ return false;
+}
+
+static void i9xx_wm_compute(struct intel_crtc_state *new_crtc_state,
+ const struct intel_plane_state *old_plane_state,
+ const struct intel_plane_state *new_plane_state)
+{
+ bool turn_off, turn_on, visible, was_visible, mode_changed;
+
+ mode_changed = intel_crtc_needs_modeset(new_crtc_state);
+ was_visible = old_plane_state->uapi.visible;
+ visible = new_plane_state->uapi.visible;
+
+ if (!was_visible && !visible)
+ return;
+
+ turn_off = was_visible && (!visible || mode_changed);
+ turn_on = visible && (!was_visible || mode_changed);
+
+ /* FIXME nuke when all wm code is atomic */
+ if (turn_on) {
+ new_crtc_state->update_wm_pre = true;
+ } else if (turn_off) {
+ new_crtc_state->update_wm_post = true;
+ } else if (i9xx_wm_need_update(old_plane_state, new_plane_state)) {
+ /* FIXME bollocks */
+ new_crtc_state->update_wm_pre = true;
+ new_crtc_state->update_wm_post = true;
+ }
+}
+
+static int i9xx_compute_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *old_plane_state;
+ const struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i) {
+ if (plane->pipe != crtc->pipe)
+ continue;
+
+ i9xx_wm_compute(new_crtc_state, old_plane_state, new_plane_state);
+ }
+
+ return 0;
+}
+
/*
* Documentation says:
* "If the line size is small, the TLB fetches can get in the way of the
@@ -715,10 +797,11 @@ static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
const struct g4x_wm_values *wm)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
for_each_pipe(dev_priv, pipe)
- trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
+ trace_g4x_wm(intel_crtc_for_pipe(display, pipe), wm);
intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
FW_WM(wm->sr.plane, SR) |
@@ -747,10 +830,11 @@ static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
const struct vlv_wm_values *wm)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
- trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
+ trace_vlv_wm(intel_crtc_for_pipe(display, pipe), wm);
intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
(wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
@@ -1276,6 +1360,22 @@ out:
return 0;
}
+static int g4x_compute_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ int ret;
+
+ ret = g4x_compute_pipe_wm(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = g4x_compute_intermediate_wm(state, crtc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static void g4x_merge_wm(struct drm_i915_private *dev_priv,
struct g4x_wm_values *wm)
{
@@ -1902,6 +2002,22 @@ out:
return 0;
}
+static int vlv_compute_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ int ret;
+
+ ret = vlv_compute_pipe_wm(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = vlv_compute_intermediate_wm(state, crtc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static void vlv_merge_wm(struct drm_i915_private *dev_priv,
struct vlv_wm_values *wm)
{
@@ -2088,12 +2204,13 @@ static void i965_update_wm(struct drm_i915_private *dev_priv)
static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
enum i9xx_plane_id i9xx_plane)
{
+ struct intel_display *display = &i915->display;
struct intel_plane *plane;
for_each_intel_plane(&i915->drm, plane) {
if (plane->id == PLANE_PRIMARY &&
plane->i9xx_plane == i9xx_plane)
- return intel_crtc_for_pipe(i915, plane->pipe);
+ return intel_crtc_for_pipe(display, plane->pipe);
}
return NULL;
@@ -2172,12 +2289,12 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
crtc = single_enabled_crtc(dev_priv);
if (IS_I915GM(dev_priv) && crtc) {
- struct drm_i915_gem_object *obj;
+ struct drm_gem_object *obj;
- obj = intel_fb_obj(crtc->base.primary->state->fb);
+ obj = intel_fb_bo(crtc->base.primary->state->fb);
/* self-refresh seems busted with untiled */
- if (!i915_gem_object_is_tiled(obj))
+ if (!intel_bo_is_tiled(obj))
crtc = NULL;
}
@@ -2878,8 +2995,9 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
- struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate;
- const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal;
+ struct intel_pipe_wm *intermediate = &new_crtc_state->wm.ilk.intermediate;
+ const struct intel_pipe_wm *optimal = &new_crtc_state->wm.ilk.optimal;
+ const struct intel_pipe_wm *active = &old_crtc_state->wm.ilk.optimal;
int level;
/*
@@ -2887,25 +3005,29 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
* currently active watermarks to get values that are safe both before
* and after the vblank.
*/
- *a = new_crtc_state->wm.ilk.optimal;
+ *intermediate = *optimal;
if (!new_crtc_state->hw.active ||
intel_crtc_needs_modeset(new_crtc_state) ||
state->skip_intermediate_wm)
return 0;
- a->pipe_enabled |= b->pipe_enabled;
- a->sprites_enabled |= b->sprites_enabled;
- a->sprites_scaled |= b->sprites_scaled;
+ intermediate->pipe_enabled |= active->pipe_enabled;
+ intermediate->sprites_enabled |= active->sprites_enabled;
+ intermediate->sprites_scaled |= active->sprites_scaled;
for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
- struct intel_wm_level *a_wm = &a->wm[level];
- const struct intel_wm_level *b_wm = &b->wm[level];
-
- a_wm->enable &= b_wm->enable;
- a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
- a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
- a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
- a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
+ struct intel_wm_level *intermediate_wm = &intermediate->wm[level];
+ const struct intel_wm_level *active_wm = &active->wm[level];
+
+ intermediate_wm->enable &= active_wm->enable;
+ intermediate_wm->pri_val = max(intermediate_wm->pri_val,
+ active_wm->pri_val);
+ intermediate_wm->spr_val = max(intermediate_wm->spr_val,
+ active_wm->spr_val);
+ intermediate_wm->cur_val = max(intermediate_wm->cur_val,
+ active_wm->cur_val);
+ intermediate_wm->fbc_val = max(intermediate_wm->fbc_val,
+ active_wm->fbc_val);
}
/*
@@ -2914,19 +3036,35 @@ static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
* there's no safe way to transition from the old state to
* the new state, so we need to fail the atomic transaction.
*/
- if (!ilk_validate_pipe_wm(dev_priv, a))
+ if (!ilk_validate_pipe_wm(dev_priv, intermediate))
return -EINVAL;
/*
* If our intermediate WM are identical to the final WM, then we can
* omit the post-vblank programming; only update if it's different.
*/
- if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0)
+ if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
new_crtc_state->wm.need_postvbl_update = true;
return 0;
}
+static int ilk_compute_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ int ret;
+
+ ret = ilk_compute_pipe_wm(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = ilk_compute_intermediate_wm(state, crtc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
/*
* Merge the watermarks from all active pipes for a specific level.
*/
@@ -3265,7 +3403,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
dev_priv->display.wm.hw = *results;
}
-bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
+bool ilk_disable_cxsr(struct drm_i915_private *dev_priv)
{
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
@@ -3716,6 +3854,7 @@ static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
@@ -3723,7 +3862,7 @@ static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
for_each_intel_plane(&dev_priv->drm, plane) {
struct intel_crtc *crtc =
- intel_crtc_for_pipe(dev_priv, plane->pipe);
+ intel_crtc_for_pipe(display, plane->pipe);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
@@ -3871,6 +4010,7 @@ static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
@@ -3878,7 +4018,7 @@ static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
for_each_intel_plane(&dev_priv->drm, plane) {
struct intel_crtc *crtc =
- intel_crtc_for_pipe(dev_priv, plane->pipe);
+ intel_crtc_for_pipe(display, plane->pipe);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
@@ -3971,16 +4111,14 @@ static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
}
static const struct intel_wm_funcs ilk_wm_funcs = {
- .compute_pipe_wm = ilk_compute_pipe_wm,
- .compute_intermediate_wm = ilk_compute_intermediate_wm,
+ .compute_watermarks = ilk_compute_watermarks,
.initial_watermarks = ilk_initial_watermarks,
.optimize_watermarks = ilk_optimize_watermarks,
.get_hw_state = ilk_wm_get_hw_state,
};
static const struct intel_wm_funcs vlv_wm_funcs = {
- .compute_pipe_wm = vlv_compute_pipe_wm,
- .compute_intermediate_wm = vlv_compute_intermediate_wm,
+ .compute_watermarks = vlv_compute_watermarks,
.initial_watermarks = vlv_initial_watermarks,
.optimize_watermarks = vlv_optimize_watermarks,
.atomic_update_watermarks = vlv_atomic_update_fifo,
@@ -3988,26 +4126,29 @@ static const struct intel_wm_funcs vlv_wm_funcs = {
};
static const struct intel_wm_funcs g4x_wm_funcs = {
- .compute_pipe_wm = g4x_compute_pipe_wm,
- .compute_intermediate_wm = g4x_compute_intermediate_wm,
+ .compute_watermarks = g4x_compute_watermarks,
.initial_watermarks = g4x_initial_watermarks,
.optimize_watermarks = g4x_optimize_watermarks,
.get_hw_state = g4x_wm_get_hw_state_and_sanitize,
};
static const struct intel_wm_funcs pnv_wm_funcs = {
+ .compute_watermarks = i9xx_compute_watermarks,
.update_wm = pnv_update_wm,
};
static const struct intel_wm_funcs i965_wm_funcs = {
+ .compute_watermarks = i9xx_compute_watermarks,
.update_wm = i965_update_wm,
};
static const struct intel_wm_funcs i9xx_wm_funcs = {
+ .compute_watermarks = i9xx_compute_watermarks,
.update_wm = i9xx_update_wm,
};
static const struct intel_wm_funcs i845_wm_funcs = {
+ .compute_watermarks = i9xx_compute_watermarks,
.update_wm = i845_update_wm,
};
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h
index de0920730ab2..06ac37c6c94b 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.h
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.h
@@ -13,12 +13,12 @@ struct intel_crtc_state;
struct intel_plane_state;
#ifdef I915
-bool ilk_disable_lp_wm(struct drm_i915_private *i915);
+bool ilk_disable_cxsr(struct drm_i915_private *i915);
void ilk_wm_sanitize(struct drm_i915_private *i915);
bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable);
void i9xx_wm_init(struct drm_i915_private *i915);
#else
-static inline bool ilk_disable_lp_wm(struct drm_i915_private *i915)
+static inline bool ilk_disable_cxsr(struct drm_i915_private *i915)
{
return false;
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm_regs.h b/drivers/gpu/drm/i915/display/i9xx_wm_regs.h
new file mode 100644
index 000000000000..d68d22235cf2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/i9xx_wm_regs.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef __I9XX_WM_REGS_H__
+#define __I9XX_WM_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define DSPARB(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030)
+#define DSPARB_CSTART_MASK (0x7f << 7)
+#define DSPARB_CSTART_SHIFT 7
+#define DSPARB_BSTART_MASK (0x7f)
+#define DSPARB_BSTART_SHIFT 0
+#define DSPARB_BEND_SHIFT 9 /* on 855 */
+#define DSPARB_AEND_SHIFT 0
+#define DSPARB_SPRITEA_SHIFT_VLV 0
+#define DSPARB_SPRITEA_MASK_VLV (0xff << 0)
+#define DSPARB_SPRITEB_SHIFT_VLV 8
+#define DSPARB_SPRITEB_MASK_VLV (0xff << 8)
+#define DSPARB_SPRITEC_SHIFT_VLV 16
+#define DSPARB_SPRITEC_MASK_VLV (0xff << 16)
+#define DSPARB_SPRITED_SHIFT_VLV 24
+#define DSPARB_SPRITED_MASK_VLV (0xff << 24)
+#define DSPARB2 _MMIO(VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
+#define DSPARB_SPRITEA_HI_SHIFT_VLV 0
+#define DSPARB_SPRITEA_HI_MASK_VLV (0x1 << 0)
+#define DSPARB_SPRITEB_HI_SHIFT_VLV 4
+#define DSPARB_SPRITEB_HI_MASK_VLV (0x1 << 4)
+#define DSPARB_SPRITEC_HI_SHIFT_VLV 8
+#define DSPARB_SPRITEC_HI_MASK_VLV (0x1 << 8)
+#define DSPARB_SPRITED_HI_SHIFT_VLV 12
+#define DSPARB_SPRITED_HI_MASK_VLV (0x1 << 12)
+#define DSPARB_SPRITEE_HI_SHIFT_VLV 16
+#define DSPARB_SPRITEE_HI_MASK_VLV (0x1 << 16)
+#define DSPARB_SPRITEF_HI_SHIFT_VLV 20
+#define DSPARB_SPRITEF_HI_MASK_VLV (0x1 << 20)
+#define DSPARB3 _MMIO(VLV_DISPLAY_BASE + 0x7006c) /* chv */
+#define DSPARB_SPRITEE_SHIFT_VLV 0
+#define DSPARB_SPRITEE_MASK_VLV (0xff << 0)
+#define DSPARB_SPRITEF_SHIFT_VLV 8
+#define DSPARB_SPRITEF_MASK_VLV (0xff << 8)
+
+/* pnv/gen4/g4x/vlv/chv */
+#define DSPFW1(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70034)
+#define DSPFW_SR_SHIFT 23
+#define DSPFW_SR_MASK (0x1ff << 23)
+#define DSPFW_CURSORB_SHIFT 16
+#define DSPFW_CURSORB_MASK (0x3f << 16)
+#define DSPFW_PLANEB_SHIFT 8
+#define DSPFW_PLANEB_MASK (0x7f << 8)
+#define DSPFW_PLANEB_MASK_VLV (0xff << 8) /* vlv/chv */
+#define DSPFW_PLANEA_SHIFT 0
+#define DSPFW_PLANEA_MASK (0x7f << 0)
+#define DSPFW_PLANEA_MASK_VLV (0xff << 0) /* vlv/chv */
+#define DSPFW2(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70038)
+#define DSPFW_FBC_SR_EN (1 << 31) /* g4x */
+#define DSPFW_FBC_SR_SHIFT 28
+#define DSPFW_FBC_SR_MASK (0x7 << 28) /* g4x */
+#define DSPFW_FBC_HPLL_SR_SHIFT 24
+#define DSPFW_FBC_HPLL_SR_MASK (0xf << 24) /* g4x */
+#define DSPFW_SPRITEB_SHIFT (16)
+#define DSPFW_SPRITEB_MASK (0x7f << 16) /* g4x */
+#define DSPFW_SPRITEB_MASK_VLV (0xff << 16) /* vlv/chv */
+#define DSPFW_CURSORA_SHIFT 8
+#define DSPFW_CURSORA_MASK (0x3f << 8)
+#define DSPFW_PLANEC_OLD_SHIFT 0
+#define DSPFW_PLANEC_OLD_MASK (0x7f << 0) /* pre-gen4 sprite C */
+#define DSPFW_SPRITEA_SHIFT 0
+#define DSPFW_SPRITEA_MASK (0x7f << 0) /* g4x */
+#define DSPFW_SPRITEA_MASK_VLV (0xff << 0) /* vlv/chv */
+#define DSPFW3(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x7003c)
+#define DSPFW_HPLL_SR_EN (1 << 31)
+#define PINEVIEW_SELF_REFRESH_EN (1 << 30)
+#define DSPFW_CURSOR_SR_SHIFT 24
+#define DSPFW_CURSOR_SR_MASK (0x3f << 24)
+#define DSPFW_HPLL_CURSOR_SHIFT 16
+#define DSPFW_HPLL_CURSOR_MASK (0x3f << 16)
+#define DSPFW_HPLL_SR_SHIFT 0
+#define DSPFW_HPLL_SR_MASK (0x1ff << 0)
+
+/* vlv/chv */
+#define DSPFW4 _MMIO(VLV_DISPLAY_BASE + 0x70070)
+#define DSPFW_SPRITEB_WM1_SHIFT 16
+#define DSPFW_SPRITEB_WM1_MASK (0xff << 16)
+#define DSPFW_CURSORA_WM1_SHIFT 8
+#define DSPFW_CURSORA_WM1_MASK (0x3f << 8)
+#define DSPFW_SPRITEA_WM1_SHIFT 0
+#define DSPFW_SPRITEA_WM1_MASK (0xff << 0)
+#define DSPFW5 _MMIO(VLV_DISPLAY_BASE + 0x70074)
+#define DSPFW_PLANEB_WM1_SHIFT 24
+#define DSPFW_PLANEB_WM1_MASK (0xff << 24)
+#define DSPFW_PLANEA_WM1_SHIFT 16
+#define DSPFW_PLANEA_WM1_MASK (0xff << 16)
+#define DSPFW_CURSORB_WM1_SHIFT 8
+#define DSPFW_CURSORB_WM1_MASK (0x3f << 8)
+#define DSPFW_CURSOR_SR_WM1_SHIFT 0
+#define DSPFW_CURSOR_SR_WM1_MASK (0x3f << 0)
+#define DSPFW6 _MMIO(VLV_DISPLAY_BASE + 0x70078)
+#define DSPFW_SR_WM1_SHIFT 0
+#define DSPFW_SR_WM1_MASK (0x1ff << 0)
+#define DSPFW7 _MMIO(VLV_DISPLAY_BASE + 0x7007c)
+#define DSPFW7_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
+#define DSPFW_SPRITED_WM1_SHIFT 24
+#define DSPFW_SPRITED_WM1_MASK (0xff << 24)
+#define DSPFW_SPRITED_SHIFT 16
+#define DSPFW_SPRITED_MASK_VLV (0xff << 16)
+#define DSPFW_SPRITEC_WM1_SHIFT 8
+#define DSPFW_SPRITEC_WM1_MASK (0xff << 8)
+#define DSPFW_SPRITEC_SHIFT 0
+#define DSPFW_SPRITEC_MASK_VLV (0xff << 0)
+#define DSPFW8_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b8)
+#define DSPFW_SPRITEF_WM1_SHIFT 24
+#define DSPFW_SPRITEF_WM1_MASK (0xff << 24)
+#define DSPFW_SPRITEF_SHIFT 16
+#define DSPFW_SPRITEF_MASK_VLV (0xff << 16)
+#define DSPFW_SPRITEE_WM1_SHIFT 8
+#define DSPFW_SPRITEE_WM1_MASK (0xff << 8)
+#define DSPFW_SPRITEE_SHIFT 0
+#define DSPFW_SPRITEE_MASK_VLV (0xff << 0)
+#define DSPFW9_CHV _MMIO(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
+#define DSPFW_PLANEC_WM1_SHIFT 24
+#define DSPFW_PLANEC_WM1_MASK (0xff << 24)
+#define DSPFW_PLANEC_SHIFT 16
+#define DSPFW_PLANEC_MASK_VLV (0xff << 16)
+#define DSPFW_CURSORC_WM1_SHIFT 8
+#define DSPFW_CURSORC_WM1_MASK (0x3f << 16)
+#define DSPFW_CURSORC_SHIFT 0
+#define DSPFW_CURSORC_MASK (0x3f << 0)
+
+/* vlv/chv high order bits */
+#define DSPHOWM _MMIO(VLV_DISPLAY_BASE + 0x70064)
+#define DSPFW_SR_HI_SHIFT 24
+#define DSPFW_SR_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
+#define DSPFW_SPRITEF_HI_SHIFT 23
+#define DSPFW_SPRITEF_HI_MASK (1 << 23)
+#define DSPFW_SPRITEE_HI_SHIFT 22
+#define DSPFW_SPRITEE_HI_MASK (1 << 22)
+#define DSPFW_PLANEC_HI_SHIFT 21
+#define DSPFW_PLANEC_HI_MASK (1 << 21)
+#define DSPFW_SPRITED_HI_SHIFT 20
+#define DSPFW_SPRITED_HI_MASK (1 << 20)
+#define DSPFW_SPRITEC_HI_SHIFT 16
+#define DSPFW_SPRITEC_HI_MASK (1 << 16)
+#define DSPFW_PLANEB_HI_SHIFT 12
+#define DSPFW_PLANEB_HI_MASK (1 << 12)
+#define DSPFW_SPRITEB_HI_SHIFT 8
+#define DSPFW_SPRITEB_HI_MASK (1 << 8)
+#define DSPFW_SPRITEA_HI_SHIFT 4
+#define DSPFW_SPRITEA_HI_MASK (1 << 4)
+#define DSPFW_PLANEA_HI_SHIFT 0
+#define DSPFW_PLANEA_HI_MASK (1 << 0)
+#define DSPHOWM1 _MMIO(VLV_DISPLAY_BASE + 0x70068)
+#define DSPFW_SR_WM1_HI_SHIFT 24
+#define DSPFW_SR_WM1_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
+#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
+#define DSPFW_SPRITEF_WM1_HI_MASK (1 << 23)
+#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
+#define DSPFW_SPRITEE_WM1_HI_MASK (1 << 22)
+#define DSPFW_PLANEC_WM1_HI_SHIFT 21
+#define DSPFW_PLANEC_WM1_HI_MASK (1 << 21)
+#define DSPFW_SPRITED_WM1_HI_SHIFT 20
+#define DSPFW_SPRITED_WM1_HI_MASK (1 << 20)
+#define DSPFW_SPRITEC_WM1_HI_SHIFT 16
+#define DSPFW_SPRITEC_WM1_HI_MASK (1 << 16)
+#define DSPFW_PLANEB_WM1_HI_SHIFT 12
+#define DSPFW_PLANEB_WM1_HI_MASK (1 << 12)
+#define DSPFW_SPRITEB_WM1_HI_SHIFT 8
+#define DSPFW_SPRITEB_WM1_HI_MASK (1 << 8)
+#define DSPFW_SPRITEA_WM1_HI_SHIFT 4
+#define DSPFW_SPRITEA_WM1_HI_MASK (1 << 4)
+#define DSPFW_PLANEA_WM1_HI_SHIFT 0
+#define DSPFW_PLANEA_WM1_HI_MASK (1 << 0)
+
+/* drain latency register values*/
+#define VLV_DDL(pipe) _MMIO(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
+#define DDL_CURSOR_SHIFT 24
+#define DDL_SPRITE_SHIFT(sprite) (8 + 8 * (sprite))
+#define DDL_PLANE_SHIFT 0
+#define DDL_PRECISION_HIGH (1 << 7)
+#define DDL_PRECISION_LOW (0 << 7)
+#define DRAIN_LATENCY_MASK 0x7f
+
+/* FIFO watermark sizes etc */
+#define G4X_FIFO_LINE_SIZE 64
+#define I915_FIFO_LINE_SIZE 64
+#define I830_FIFO_LINE_SIZE 32
+
+#define VALLEYVIEW_FIFO_SIZE 255
+#define G4X_FIFO_SIZE 127
+#define I965_FIFO_SIZE 512
+#define I945_FIFO_SIZE 127
+#define I915_FIFO_SIZE 95
+#define I855GM_FIFO_SIZE 127 /* In cachelines */
+#define I830_FIFO_SIZE 95
+
+#define VALLEYVIEW_MAX_WM 0xff
+#define G4X_MAX_WM 0x3f
+#define I915_MAX_WM 0x3f
+
+#define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */
+#define PINEVIEW_FIFO_LINE_SIZE 64
+#define PINEVIEW_MAX_WM 0x1ff
+#define PINEVIEW_DFT_WM 0x3f
+#define PINEVIEW_DFT_HPLLOFF_WM 0
+#define PINEVIEW_GUARD_WM 10
+#define PINEVIEW_CURSOR_FIFO 64
+#define PINEVIEW_CURSOR_MAX_WM 0x3f
+#define PINEVIEW_CURSOR_DFT_WM 0
+#define PINEVIEW_CURSOR_GUARD_WM 5
+
+#define VALLEYVIEW_CURSOR_MAX_WM 64
+#define I965_CURSOR_FIFO 64
+#define I965_CURSOR_MAX_WM 32
+#define I965_CURSOR_DFT_WM 8
+
+/* define the Watermark register on Ironlake */
+#define _WM0_PIPEA_ILK 0x45100
+#define _WM0_PIPEB_ILK 0x45104
+#define _WM0_PIPEC_IVB 0x45200
+#define WM0_PIPE_ILK(pipe) _MMIO_BASE_PIPE3(0, (pipe), _WM0_PIPEA_ILK, \
+ _WM0_PIPEB_ILK, _WM0_PIPEC_IVB)
+#define WM0_PIPE_PRIMARY_MASK REG_GENMASK(31, 16)
+#define WM0_PIPE_SPRITE_MASK REG_GENMASK(15, 8)
+#define WM0_PIPE_CURSOR_MASK REG_GENMASK(7, 0)
+#define WM0_PIPE_PRIMARY(x) REG_FIELD_PREP(WM0_PIPE_PRIMARY_MASK, (x))
+#define WM0_PIPE_SPRITE(x) REG_FIELD_PREP(WM0_PIPE_SPRITE_MASK, (x))
+#define WM0_PIPE_CURSOR(x) REG_FIELD_PREP(WM0_PIPE_CURSOR_MASK, (x))
+#define WM1_LP_ILK _MMIO(0x45108)
+#define WM2_LP_ILK _MMIO(0x4510c)
+#define WM3_LP_ILK _MMIO(0x45110)
+#define WM_LP_ENABLE REG_BIT(31)
+#define WM_LP_LATENCY_MASK REG_GENMASK(30, 24)
+#define WM_LP_FBC_MASK_BDW REG_GENMASK(23, 19)
+#define WM_LP_FBC_MASK_ILK REG_GENMASK(23, 20)
+#define WM_LP_PRIMARY_MASK REG_GENMASK(18, 8)
+#define WM_LP_CURSOR_MASK REG_GENMASK(7, 0)
+#define WM_LP_LATENCY(x) REG_FIELD_PREP(WM_LP_LATENCY_MASK, (x))
+#define WM_LP_FBC_BDW(x) REG_FIELD_PREP(WM_LP_FBC_MASK_BDW, (x))
+#define WM_LP_FBC_ILK(x) REG_FIELD_PREP(WM_LP_FBC_MASK_ILK, (x))
+#define WM_LP_PRIMARY(x) REG_FIELD_PREP(WM_LP_PRIMARY_MASK, (x))
+#define WM_LP_CURSOR(x) REG_FIELD_PREP(WM_LP_CURSOR_MASK, (x))
+#define WM1S_LP_ILK _MMIO(0x45120)
+#define WM2S_LP_IVB _MMIO(0x45124)
+#define WM3S_LP_IVB _MMIO(0x45128)
+#define WM_LP_SPRITE_ENABLE REG_BIT(31) /* ilk/snb WM1S only */
+#define WM_LP_SPRITE_MASK REG_GENMASK(10, 0)
+#define WM_LP_SPRITE(x) REG_FIELD_PREP(WM_LP_SPRITE_MASK, (x))
+
+#define WM_MISC _MMIO(0x45260)
+#define WM_MISC_DATA_PARTITION_5_6 (1 << 0)
+
+#define WM_DBG _MMIO(0x45280)
+#define WM_DBG_DISALLOW_MULTIPLE_LP (1 << 0)
+#define WM_DBG_DISALLOW_MAXFIFO (1 << 1)
+#define WM_DBG_DISALLOW_SPRITE (1 << 2)
+
+#endif /* __I9XX_WM_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 293efc1f841d..c977b74f82f0 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -29,7 +29,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_probe_helper.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "icl_dsi.h"
#include "icl_dsi_regs.h"
@@ -45,43 +47,44 @@
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "intel_panel.h"
+#include "intel_pfit.h"
#include "intel_vdsc.h"
#include "intel_vdsc_regs.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
-static int header_credits_available(struct drm_i915_private *dev_priv,
+static int header_credits_available(struct intel_display *display,
enum transcoder dsi_trans)
{
- return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
+ return (intel_de_read(display, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
>> FREE_HEADER_CREDIT_SHIFT;
}
-static int payload_credits_available(struct drm_i915_private *dev_priv,
+static int payload_credits_available(struct intel_display *display,
enum transcoder dsi_trans)
{
- return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
+ return (intel_de_read(display, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
>> FREE_PLOAD_CREDIT_SHIFT;
}
-static bool wait_for_header_credits(struct drm_i915_private *dev_priv,
+static bool wait_for_header_credits(struct intel_display *display,
enum transcoder dsi_trans, int hdr_credit)
{
- if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=
+ if (wait_for_us(header_credits_available(display, dsi_trans) >=
hdr_credit, 100)) {
- drm_err(&dev_priv->drm, "DSI header credits not released\n");
+ drm_err(display->drm, "DSI header credits not released\n");
return false;
}
return true;
}
-static bool wait_for_payload_credits(struct drm_i915_private *dev_priv,
+static bool wait_for_payload_credits(struct intel_display *display,
enum transcoder dsi_trans, int payld_credit)
{
- if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=
+ if (wait_for_us(payload_credits_available(display, dsi_trans) >=
payld_credit, 100)) {
- drm_err(&dev_priv->drm, "DSI payload credits not released\n");
+ drm_err(display->drm, "DSI payload credits not released\n");
return false;
}
@@ -98,7 +101,7 @@ static enum transcoder dsi_port_to_transcoder(enum port port)
static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct mipi_dsi_device *dsi;
enum port port;
@@ -108,8 +111,8 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
/* wait for header/payload credits to be released */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT);
- wait_for_payload_credits(dev_priv, dsi_trans, MAX_PLOAD_CREDIT);
+ wait_for_header_credits(display, dsi_trans, MAX_HEADER_CREDIT);
+ wait_for_payload_credits(display, dsi_trans, MAX_PLOAD_CREDIT);
}
/* send nop DCS command */
@@ -119,22 +122,22 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
dsi->channel = 0;
ret = mipi_dsi_dcs_nop(dsi);
if (ret < 0)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"error sending DCS NOP command\n");
}
/* wait for header credits to be released */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT);
+ wait_for_header_credits(display, dsi_trans, MAX_HEADER_CREDIT);
}
/* wait for LP TX in progress bit to be cleared */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- if (wait_for_us(!(intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) &
+ if (wait_for_us(!(intel_de_read(display, DSI_LP_MSG(dsi_trans)) &
LPTX_IN_PROGRESS), 20))
- drm_err(&dev_priv->drm, "LPTX bit not cleared\n");
+ drm_err(display->drm, "LPTX bit not cleared\n");
}
}
@@ -142,7 +145,7 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host,
const struct mipi_dsi_packet *packet)
{
struct intel_dsi *intel_dsi = host->intel_dsi;
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
const u8 *data = packet->payload;
u32 len = packet->payload_length;
@@ -150,20 +153,20 @@ static int dsi_send_pkt_payld(struct intel_dsi_host *host,
/* payload queue can accept *256 bytes*, check limit */
if (len > MAX_PLOAD_CREDIT * 4) {
- drm_err(&i915->drm, "payload size exceeds max queue limit\n");
+ drm_err(display->drm, "payload size exceeds max queue limit\n");
return -EINVAL;
}
for (i = 0; i < len; i += 4) {
u32 tmp = 0;
- if (!wait_for_payload_credits(i915, dsi_trans, 1))
+ if (!wait_for_payload_credits(display, dsi_trans, 1))
return -EBUSY;
for (j = 0; j < min_t(u32, len - i, 4); j++)
tmp |= *data++ << 8 * j;
- intel_de_write(i915, DSI_CMD_TXPYLD(dsi_trans), tmp);
+ intel_de_write(display, DSI_CMD_TXPYLD(dsi_trans), tmp);
}
return 0;
@@ -174,14 +177,14 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
bool enable_lpdt)
{
struct intel_dsi *intel_dsi = host->intel_dsi;
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
u32 tmp;
- if (!wait_for_header_credits(dev_priv, dsi_trans, 1))
+ if (!wait_for_header_credits(display, dsi_trans, 1))
return -EBUSY;
- tmp = intel_de_read(dev_priv, DSI_CMD_TXHDR(dsi_trans));
+ tmp = intel_de_read(display, DSI_CMD_TXHDR(dsi_trans));
if (packet->payload)
tmp |= PAYLOAD_PRESENT;
@@ -200,15 +203,14 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
tmp |= ((packet->header[0] & DT_MASK) << DT_SHIFT);
tmp |= (packet->header[1] << PARAM_WC_LOWER_SHIFT);
tmp |= (packet->header[2] << PARAM_WC_UPPER_SHIFT);
- intel_de_write(dev_priv, DSI_CMD_TXHDR(dsi_trans), tmp);
+ intel_de_write(display, DSI_CMD_TXHDR(dsi_trans), tmp);
return 0;
}
void icl_dsi_frame_update(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
u32 mode_flags;
enum port port;
@@ -226,12 +228,13 @@ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state)
else
return;
- intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), 0, DSI_FRAME_UPDATE_REQUEST);
+ intel_de_rmw(display, DSI_CMD_FRMCTL(port), 0,
+ DSI_FRAME_UPDATE_REQUEST);
}
static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum phy phy;
u32 tmp, mask, val;
@@ -245,31 +248,31 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
mask = SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK;
val = SCALING_MODE_SEL(0x2) | TAP2_DISABLE | TAP3_DISABLE |
RTERM_SELECT(0x6);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ tmp = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
tmp &= ~mask;
tmp |= val;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), mask, val);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), tmp);
+ intel_de_rmw(display, ICL_PORT_TX_DW5_AUX(phy), mask, val);
mask = SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK;
val = SWING_SEL_UPPER(0x2) | SWING_SEL_LOWER(0x2) |
RCOMP_SCALAR(0x98);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
+ tmp = intel_de_read(display, ICL_PORT_TX_DW2_LN(0, phy));
tmp &= ~mask;
tmp |= val;
- intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), mask, val);
+ intel_de_write(display, ICL_PORT_TX_DW2_GRP(phy), tmp);
+ intel_de_rmw(display, ICL_PORT_TX_DW2_AUX(phy), mask, val);
mask = POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK;
val = POST_CURSOR_1(0x0) | POST_CURSOR_2(0x0) |
CURSOR_COEFF(0x3f);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), mask, val);
+ intel_de_rmw(display, ICL_PORT_TX_DW4_AUX(phy), mask, val);
/* Bspec: must not use GRP register for write */
for (lane = 0; lane <= 3; lane++)
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW4_LN(lane, phy),
mask, val);
}
}
@@ -277,13 +280,13 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
static void configure_dual_link_mode(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
u32 dss_ctl1;
/* FIXME: Move all DSS handling to intel_vdsc.c */
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe);
@@ -293,7 +296,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
dss_ctl2_reg = DSS_CTL2;
}
- dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg);
+ dss_ctl1 = intel_de_read(display, dss_ctl1_reg);
dss_ctl1 |= SPLITTER_ENABLE;
dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
@@ -308,19 +311,19 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap;
if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"DL buffer depth exceed max value\n");
dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- intel_de_rmw(dev_priv, dss_ctl2_reg, RIGHT_DL_BUF_TARGET_DEPTH_MASK,
+ intel_de_rmw(display, dss_ctl2_reg, RIGHT_DL_BUF_TARGET_DEPTH_MASK,
RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth));
} else {
/* Interleave */
dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
}
- intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1);
+ intel_de_write(display, dss_ctl1_reg, dss_ctl1);
}
/* aka DSI 8X clock */
@@ -341,6 +344,7 @@ static int afe_clk(struct intel_encoder *encoder,
static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
@@ -360,33 +364,34 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- intel_de_write(dev_priv, ICL_DSI_ESC_CLK_DIV(port),
+ intel_de_write(display, ICL_DSI_ESC_CLK_DIV(port),
esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
- intel_de_posting_read(dev_priv, ICL_DSI_ESC_CLK_DIV(port));
+ intel_de_posting_read(display, ICL_DSI_ESC_CLK_DIV(port));
}
for_each_dsi_port(port, intel_dsi->ports) {
- intel_de_write(dev_priv, ICL_DPHY_ESC_CLK_DIV(port),
+ intel_de_write(display, ICL_DPHY_ESC_CLK_DIV(port),
esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
- intel_de_posting_read(dev_priv, ICL_DPHY_ESC_CLK_DIV(port));
+ intel_de_posting_read(display, ICL_DPHY_ESC_CLK_DIV(port));
}
if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) {
for_each_dsi_port(port, intel_dsi->ports) {
- intel_de_write(dev_priv, ADL_MIPIO_DW(port, 8),
+ intel_de_write(display, ADL_MIPIO_DW(port, 8),
esc_clk_div_m_phy & TX_ESC_CLK_DIV_PHY);
- intel_de_posting_read(dev_priv, ADL_MIPIO_DW(port, 8));
+ intel_de_posting_read(display, ADL_MIPIO_DW(port, 8));
}
}
}
-static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
- struct intel_dsi *intel_dsi)
+static void get_dsi_io_power_domains(struct intel_dsi *intel_dsi)
{
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- drm_WARN_ON(&dev_priv->drm, intel_dsi->io_wakeref[port]);
+ drm_WARN_ON(display->drm, intel_dsi->io_wakeref[port]);
intel_dsi->io_wakeref[port] =
intel_display_power_get(dev_priv,
port == PORT_A ?
@@ -397,15 +402,15 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port),
+ intel_de_rmw(display, ICL_DSI_IO_MODECTL(port),
0, COMBO_PHY_MODE_DSI);
- get_dsi_io_power_domains(dev_priv, intel_dsi);
+ get_dsi_io_power_domains(intel_dsi);
}
static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
@@ -421,6 +426,7 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum phy phy;
@@ -429,32 +435,33 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
/* Step 4b(i) set loadgen select for transmit and aux lanes */
for_each_dsi_phy(phy, intel_dsi->phys) {
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), LOADGEN_SELECT, 0);
+ intel_de_rmw(display, ICL_PORT_TX_DW4_AUX(phy),
+ LOADGEN_SELECT, 0);
for (lane = 0; lane <= 3; lane++)
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW4_LN(lane, phy),
LOADGEN_SELECT, lane != 2 ? LOADGEN_SELECT : 0);
}
/* Step 4b(ii) set latency optimization for transmit and aux lanes */
for_each_dsi_phy(phy, intel_dsi->phys) {
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy),
+ intel_de_rmw(display, ICL_PORT_TX_DW2_AUX(phy),
FRC_LATENCY_OPTIM_MASK, FRC_LATENCY_OPTIM_VAL(0x5));
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
+ tmp = intel_de_read(display, ICL_PORT_TX_DW2_LN(0, phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
- intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
+ intel_de_write(display, ICL_PORT_TX_DW2_GRP(phy), tmp);
/* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv) ||
- (DISPLAY_VER(dev_priv) >= 12)) {
- intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy),
+ (DISPLAY_VER(display) >= 12)) {
+ intel_de_rmw(display, ICL_PORT_PCS_DW1_AUX(phy),
LATENCY_OPTIM_MASK, LATENCY_OPTIM_VAL(0));
- tmp = intel_de_read(dev_priv,
+ tmp = intel_de_read(display,
ICL_PORT_PCS_DW1_LN(0, phy));
tmp &= ~LATENCY_OPTIM_MASK;
tmp |= LATENCY_OPTIM_VAL(0x1);
- intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy),
+ intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy),
tmp);
}
}
@@ -463,17 +470,17 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
enum phy phy;
/* clear common keeper enable bit */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
+ tmp = intel_de_read(display, ICL_PORT_PCS_DW1_LN(0, phy));
tmp &= ~COMMON_KEEPER_EN;
- intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp);
- intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), COMMON_KEEPER_EN, 0);
+ intel_de_write(display, ICL_PORT_PCS_DW1_GRP(phy), tmp);
+ intel_de_rmw(display, ICL_PORT_PCS_DW1_AUX(phy), COMMON_KEEPER_EN, 0);
}
/*
@@ -482,14 +489,15 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
* as part of lane phy sequence configuration
*/
for_each_dsi_phy(phy, intel_dsi->phys)
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), 0, SUS_CLOCK_CONFIG);
+ intel_de_rmw(display, ICL_PORT_CL_DW5(phy), 0,
+ SUS_CLOCK_CONFIG);
/* Clear training enable to change swing values */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ tmp = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
tmp &= ~TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), TX_TRAINING_EN, 0);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), tmp);
+ intel_de_rmw(display, ICL_PORT_TX_DW5_AUX(phy), TX_TRAINING_EN, 0);
}
/* Program swing and de-emphasis */
@@ -497,26 +505,26 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
/* Set training enable to trigger update */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
+ tmp = intel_de_read(display, ICL_PORT_TX_DW5_LN(0, phy));
tmp |= TX_TRAINING_EN;
- intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), 0, TX_TRAINING_EN);
+ intel_de_write(display, ICL_PORT_TX_DW5_GRP(phy), tmp);
+ intel_de_rmw(display, ICL_PORT_TX_DW5_AUX(phy), 0, TX_TRAINING_EN);
}
}
static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- intel_de_rmw(dev_priv, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE);
+ intel_de_rmw(display, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE);
- if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
+ if (wait_for_us(!(intel_de_read(display, DDI_BUF_CTL(port)) &
DDI_BUF_IS_IDLE),
500))
- drm_err(&dev_priv->drm, "DDI port:%c buffer idle\n",
+ drm_err(display->drm, "DDI port:%c buffer idle\n",
port_name(port));
}
}
@@ -525,6 +533,7 @@ static void
gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
@@ -532,12 +541,12 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
/* Program DPHY clock lanes timings */
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_write(dev_priv, DPHY_CLK_TIMING_PARAM(port),
+ intel_de_write(display, DPHY_CLK_TIMING_PARAM(port),
intel_dsi->dphy_reg);
/* Program DPHY data lanes timings */
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_write(dev_priv, DPHY_DATA_TIMING_PARAM(port),
+ intel_de_write(display, DPHY_DATA_TIMING_PARAM(port),
intel_dsi->dphy_data_lane_reg);
/*
@@ -546,10 +555,10 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
* a value '0' inside TA_PARAM_REGISTERS otherwise
* leave all fields at HW default values.
*/
- if (DISPLAY_VER(dev_priv) == 11) {
+ if (DISPLAY_VER(display) == 11) {
if (afe_clk(encoder, crtc_state) <= 800000) {
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(dev_priv, DPHY_TA_TIMING_PARAM(port),
+ intel_de_rmw(display, DPHY_TA_TIMING_PARAM(port),
TA_SURE_MASK,
TA_SURE_OVERRIDE | TA_SURE(0));
}
@@ -557,7 +566,7 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
for_each_dsi_phy(phy, intel_dsi->phys)
- intel_de_rmw(dev_priv, ICL_DPHY_CHKN(phy),
+ intel_de_rmw(display, ICL_DPHY_CHKN(phy),
0, ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP);
}
}
@@ -566,30 +575,30 @@ static void
gen11_dsi_setup_timings(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
/* Program T-INIT master registers */
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port),
+ intel_de_rmw(display, ICL_DSI_T_INIT_MASTER(port),
DSI_T_INIT_MASTER_MASK, intel_dsi->init_count);
/* shadow register inside display core */
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port),
+ intel_de_write(display, DSI_CLK_TIMING_PARAM(port),
intel_dsi->dphy_reg);
/* shadow register inside display core */
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port),
+ intel_de_write(display, DSI_DATA_TIMING_PARAM(port),
intel_dsi->dphy_data_lane_reg);
/* shadow register inside display core */
- if (DISPLAY_VER(dev_priv) == 11) {
+ if (DISPLAY_VER(display) == 11) {
if (afe_clk(encoder, crtc_state) <= 800000) {
for_each_dsi_port(port, intel_dsi->ports) {
- intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port),
+ intel_de_rmw(display, DSI_TA_TIMING_PARAM(port),
TA_SURE_MASK,
TA_SURE_OVERRIDE | TA_SURE(0));
}
@@ -599,45 +608,45 @@ gen11_dsi_setup_timings(struct intel_encoder *encoder,
static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
enum phy phy;
- mutex_lock(&dev_priv->display.dpll.lock);
- tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
+ mutex_lock(&display->dpll.lock);
+ tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
- mutex_unlock(&dev_priv->display.dpll.lock);
+ intel_de_write(display, ICL_DPCLKA_CFGCR0, tmp);
+ mutex_unlock(&display->dpll.lock);
}
static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
enum phy phy;
- mutex_lock(&dev_priv->display.dpll.lock);
- tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
+ mutex_lock(&display->dpll.lock);
+ tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
- mutex_unlock(&dev_priv->display.dpll.lock);
+ intel_de_write(display, ICL_DPCLKA_CFGCR0, tmp);
+ mutex_unlock(&display->dpll.lock);
}
static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
bool clock_enabled = false;
enum phy phy;
u32 tmp;
- tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
+ tmp = intel_de_read(display, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys) {
if (!(tmp & ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)))
@@ -650,36 +659,36 @@ static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder)
static void gen11_dsi_map_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
enum phy phy;
u32 val;
- mutex_lock(&dev_priv->display.dpll.lock);
+ mutex_lock(&display->dpll.lock);
- val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
+ val = intel_de_read(display, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys) {
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
}
- intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
+ intel_de_write(display, ICL_DPCLKA_CFGCR0, val);
for_each_dsi_phy(phy, intel_dsi->phys) {
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
}
- intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
+ intel_de_write(display, ICL_DPCLKA_CFGCR0, val);
- intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
+ intel_de_posting_read(display, ICL_DPCLKA_CFGCR0);
- mutex_unlock(&dev_priv->display.dpll.lock);
+ mutex_unlock(&display->dpll.lock);
}
static void
gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
enum pipe pipe = crtc->pipe;
@@ -689,7 +698,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans));
+ tmp = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans));
if (intel_dsi->eotp_pkt)
tmp &= ~EOTP_DISABLED;
@@ -745,7 +754,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
}
}
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
if (is_vid_mode(intel_dsi))
tmp |= BLANKING_PACKET_ENABLE;
}
@@ -778,15 +787,15 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
tmp |= TE_SOURCE_GPIO;
}
- intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
+ intel_de_write(display, DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
}
/* enable port sync mode if dual link */
if (intel_dsi->dual_link) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_rmw(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, dsi_trans),
+ intel_de_rmw(display,
+ TRANS_DDI_FUNC_CTL2(display, dsi_trans),
0, PORT_SYNC_MODE_ENABLE);
}
@@ -798,8 +807,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
dsi_trans = dsi_port_to_transcoder(port);
/* select data lane width */
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans));
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, dsi_trans));
tmp &= ~DDI_PORT_WIDTH_MASK;
tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
@@ -825,16 +834,16 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
/* enable DDI buffer */
tmp |= TRANS_DDI_FUNC_ENABLE;
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans), tmp);
+ intel_de_write(display,
+ TRANS_DDI_FUNC_CTL(display, dsi_trans), tmp);
}
/* wait for link ready */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- if (wait_for_us((intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)) &
+ if (wait_for_us((intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)) &
LINK_READY), 2500))
- drm_err(&dev_priv->drm, "DSI link not ready\n");
+ drm_err(display->drm, "DSI link not ready\n");
}
}
@@ -842,7 +851,7 @@ static void
gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -909,17 +918,17 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
/* minimum hactive as per bspec: 256 pixels */
if (adjusted_mode->crtc_hdisplay < 256)
- drm_err(&dev_priv->drm, "hactive is less then 256 pixels\n");
+ drm_err(display->drm, "hactive is less then 256 pixels\n");
/* if RGB666 format, then hactive must be multiple of 4 pixels */
if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"hactive pixels are not multiple of 4\n");
/* program TRANS_HTOTAL register */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, dsi_trans),
+ intel_de_write(display, TRANS_HTOTAL(display, dsi_trans),
HACTIVE(hactive - 1) | HTOTAL(htotal - 1));
}
@@ -928,12 +937,12 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
if (intel_dsi->video_mode == NON_BURST_SYNC_PULSE) {
/* BSPEC: hsync size should be atleast 16 pixels */
if (hsync_size < 16)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"hsync size < 16 pixels\n");
}
if (hback_porch < 16)
- drm_err(&dev_priv->drm, "hback porch < 16 pixels\n");
+ drm_err(display->drm, "hback porch < 16 pixels\n");
if (intel_dsi->dual_link) {
hsync_start /= 2;
@@ -942,8 +951,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv,
- TRANS_HSYNC(dev_priv, dsi_trans),
+ intel_de_write(display,
+ TRANS_HSYNC(display, dsi_trans),
HSYNC_START(hsync_start - 1) | HSYNC_END(hsync_end - 1));
}
}
@@ -957,22 +966,22 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
* struct drm_display_mode.
* For interlace mode: program required pixel minus 2
*/
- intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, dsi_trans),
+ intel_de_write(display, TRANS_VTOTAL(display, dsi_trans),
VACTIVE(vactive - 1) | VTOTAL(vtotal - 1));
}
if (vsync_end < vsync_start || vsync_end > vtotal)
- drm_err(&dev_priv->drm, "Invalid vsync_end value\n");
+ drm_err(display->drm, "Invalid vsync_end value\n");
if (vsync_start < vactive)
- drm_err(&dev_priv->drm, "vsync_start less than vactive\n");
+ drm_err(display->drm, "vsync_start less than vactive\n");
/* program TRANS_VSYNC register for video mode only */
if (is_vid_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv,
- TRANS_VSYNC(dev_priv, dsi_trans),
+ intel_de_write(display,
+ TRANS_VSYNC(display, dsi_trans),
VSYNC_START(vsync_start - 1) | VSYNC_END(vsync_end - 1));
}
}
@@ -986,8 +995,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
if (is_vid_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv,
- TRANS_VSYNCSHIFT(dev_priv, dsi_trans),
+ intel_de_write(display,
+ TRANS_VSYNCSHIFT(display, dsi_trans),
vsync_shift);
}
}
@@ -998,11 +1007,11 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
* FIXME get rid of these local hacks and do it right,
* this will not handle eg. delayed vblank correctly.
*/
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_write(dev_priv,
- TRANS_VBLANK(dev_priv, dsi_trans),
+ intel_de_write(display,
+ TRANS_VBLANK(display, dsi_trans),
VBLANK_START(vactive - 1) | VBLANK_END(vtotal - 1));
}
}
@@ -1010,20 +1019,20 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_rmw(dev_priv, TRANSCONF(dev_priv, dsi_trans), 0,
+ intel_de_rmw(display, TRANSCONF(display, dsi_trans), 0,
TRANSCONF_ENABLE);
/* wait for transcoder to be enabled */
- if (intel_de_wait_for_set(dev_priv, TRANSCONF(dev_priv, dsi_trans),
+ if (intel_de_wait_for_set(display, TRANSCONF(display, dsi_trans),
TRANSCONF_STATE_ENABLE, 10))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"DSI transcoder not enabled\n");
}
}
@@ -1031,7 +1040,7 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
@@ -1055,21 +1064,21 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
dsi_trans = dsi_port_to_transcoder(port);
/* program hst_tx_timeout */
- intel_de_rmw(dev_priv, DSI_HSTX_TO(dsi_trans),
+ intel_de_rmw(display, DSI_HSTX_TO(dsi_trans),
HSTX_TIMEOUT_VALUE_MASK,
HSTX_TIMEOUT_VALUE(hs_tx_timeout));
/* FIXME: DSI_CALIB_TO */
/* program lp_rx_host timeout */
- intel_de_rmw(dev_priv, DSI_LPRX_HOST_TO(dsi_trans),
+ intel_de_rmw(display, DSI_LPRX_HOST_TO(dsi_trans),
LPRX_TIMEOUT_VALUE_MASK,
LPRX_TIMEOUT_VALUE(lp_rx_timeout));
/* FIXME: DSI_PWAIT_TO */
/* program turn around timeout */
- intel_de_rmw(dev_priv, DSI_TA_TO(dsi_trans),
+ intel_de_rmw(display, DSI_TA_TO(dsi_trans),
TA_TIMEOUT_VALUE_MASK,
TA_TIMEOUT_VALUE(ta_timeout));
}
@@ -1078,7 +1087,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
static void gen11_dsi_config_util_pin(struct intel_encoder *encoder,
bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 tmp;
@@ -1090,7 +1099,7 @@ static void gen11_dsi_config_util_pin(struct intel_encoder *encoder,
if (is_vid_mode(intel_dsi) || (intel_dsi->ports & BIT(PORT_B)))
return;
- tmp = intel_de_read(dev_priv, UTIL_PIN_CTL);
+ tmp = intel_de_read(display, UTIL_PIN_CTL);
if (enable) {
tmp |= UTIL_PIN_DIRECTION_INPUT;
@@ -1098,7 +1107,7 @@ static void gen11_dsi_config_util_pin(struct intel_encoder *encoder,
} else {
tmp &= ~UTIL_PIN_ENABLE;
}
- intel_de_write(dev_priv, UTIL_PIN_CTL, tmp);
+ intel_de_write(display, UTIL_PIN_CTL, tmp);
}
static void
@@ -1136,7 +1145,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct mipi_dsi_device *dsi;
enum port port;
@@ -1152,14 +1161,14 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
* FIXME: This uses the number of DW's currently in the payload
* receive queue. This is probably not what we want here.
*/
- tmp = intel_de_read(dev_priv, DSI_CMD_RXCTL(dsi_trans));
+ tmp = intel_de_read(display, DSI_CMD_RXCTL(dsi_trans));
tmp &= NUMBER_RX_PLOAD_DW_MASK;
/* multiply "Number Rx Payload DW" by 4 to get max value */
tmp = tmp * 4;
dsi = intel_dsi->dsi_hosts[port]->device;
ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp);
if (ret < 0)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"error setting max return pkt size%d\n", tmp);
}
@@ -1219,10 +1228,10 @@ static void gen11_dsi_pre_enable(struct intel_atomic_state *state,
static void icl_apply_kvmr_pipe_a_wa(struct intel_encoder *encoder,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B)
- intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
+ if (DISPLAY_VER(display) == 11 && pipe == PIPE_B)
+ intel_de_rmw(display, CHICKEN_PAR1_1,
IGNORE_KVMR_PIPE_A,
enable ? IGNORE_KVMR_PIPE_A : 0);
}
@@ -1235,13 +1244,13 @@ static void icl_apply_kvmr_pipe_a_wa(struct intel_encoder *encoder,
*/
static void adlp_set_lp_hs_wakeup_gb(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- if (DISPLAY_VER(i915) == 13) {
+ if (DISPLAY_VER(display) == 13) {
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(i915, TGL_DSI_CHKN_REG(port),
+ intel_de_rmw(display, TGL_DSI_CHKN_REG(port),
TGL_DSI_CHKN_LSHS_GB_MASK,
TGL_DSI_CHKN_LSHS_GB(4));
}
@@ -1275,7 +1284,7 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
@@ -1284,13 +1293,13 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
dsi_trans = dsi_port_to_transcoder(port);
/* disable transcoder */
- intel_de_rmw(dev_priv, TRANSCONF(dev_priv, dsi_trans),
+ intel_de_rmw(display, TRANSCONF(display, dsi_trans),
TRANSCONF_ENABLE, 0);
/* wait for transcoder to be disabled */
- if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dev_priv, dsi_trans),
+ if (intel_de_wait_for_clear(display, TRANSCONF(display, dsi_trans),
TRANSCONF_STATE_ENABLE, 50))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"DSI trancoder not disabled\n");
}
}
@@ -1307,7 +1316,7 @@ static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
enum transcoder dsi_trans;
@@ -1316,29 +1325,29 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
/* disable periodic update mode */
if (is_cmd_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port),
+ intel_de_rmw(display, DSI_CMD_FRMCTL(port),
DSI_PERIODIC_FRAME_UPDATE_ENABLE, 0);
}
/* put dsi link in ULPS */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans));
+ tmp = intel_de_read(display, DSI_LP_MSG(dsi_trans));
tmp |= LINK_ENTER_ULPS;
tmp &= ~LINK_ULPS_TYPE_LP11;
- intel_de_write(dev_priv, DSI_LP_MSG(dsi_trans), tmp);
+ intel_de_write(display, DSI_LP_MSG(dsi_trans), tmp);
- if (wait_for_us((intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) &
+ if (wait_for_us((intel_de_read(display, DSI_LP_MSG(dsi_trans)) &
LINK_IN_ULPS),
10))
- drm_err(&dev_priv->drm, "DSI link not in ULPS\n");
+ drm_err(display->drm, "DSI link not in ULPS\n");
}
/* disable ddi function */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_rmw(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans),
+ intel_de_rmw(display,
+ TRANS_DDI_FUNC_CTL(display, dsi_trans),
TRANS_DDI_FUNC_ENABLE, 0);
}
@@ -1346,8 +1355,8 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
if (intel_dsi->dual_link) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- intel_de_rmw(dev_priv,
- TRANS_DDI_FUNC_CTL2(dev_priv, dsi_trans),
+ intel_de_rmw(display,
+ TRANS_DDI_FUNC_CTL2(display, dsi_trans),
PORT_SYNC_MODE_ENABLE, 0);
}
}
@@ -1355,18 +1364,18 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
static void gen11_dsi_disable_port(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
gen11_dsi_ungate_clocks(encoder);
for_each_dsi_port(port, intel_dsi->ports) {
- intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
+ intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
- if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
+ if (wait_for_us((intel_de_read(display, DDI_BUF_CTL(port)) &
DDI_BUF_IS_IDLE),
8))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"DDI port:%c buffer not idle\n",
port_name(port));
}
@@ -1375,6 +1384,7 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
@@ -1392,7 +1402,7 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
/* set mode to DDI */
for_each_dsi_port(port, intel_dsi->ports)
- intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port),
+ intel_de_rmw(display, ICL_DSI_IO_MODECTL(port),
COMBO_PHY_MODE_DSI, 0);
}
@@ -1504,8 +1514,7 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
enum transcoder dsi_trans;
u32 val;
@@ -1514,7 +1523,7 @@ static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi)
else
dsi_trans = TRANSCODER_DSI_0;
- val = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans));
+ val = intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans));
return (val & DSI_PERIODIC_FRAME_UPDATE_ENABLE);
}
@@ -1557,7 +1566,7 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
static void gen11_dsi_sync_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *intel_crtc;
enum pipe pipe;
@@ -1568,9 +1577,9 @@ static void gen11_dsi_sync_state(struct intel_encoder *encoder,
pipe = intel_crtc->pipe;
/* wa verify 1409054076:icl,jsl,ehl */
- if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B &&
- !(intel_de_read(dev_priv, CHICKEN_PAR1_1) & IGNORE_KVMR_PIPE_A))
- drm_dbg_kms(&dev_priv->drm,
+ if (DISPLAY_VER(display) == 11 && pipe == PIPE_B &&
+ !(intel_de_read(display, CHICKEN_PAR1_1) & IGNORE_KVMR_PIPE_A))
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] BIOS left IGNORE_KVMR_PIPE_A cleared with pipe B enabled\n",
encoder->base.base.id,
encoder->base.name);
@@ -1579,9 +1588,9 @@ static void gen11_dsi_sync_state(struct intel_encoder *encoder,
static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
- int dsc_max_bpc = DISPLAY_VER(dev_priv) >= 12 ? 12 : 10;
+ int dsc_max_bpc = DISPLAY_VER(display) >= 12 ? 12 : 10;
bool use_dsc;
int ret;
@@ -1594,7 +1603,9 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
/* FIXME: split only when necessary */
if (crtc_state->dsc.slice_count > 1)
- crtc_state->dsc.dsc_split = true;
+ crtc_state->dsc.num_streams = 2;
+ else
+ crtc_state->dsc.num_streams = 1;
/* FIXME: initialize from VBT */
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
@@ -1606,12 +1617,12 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
return ret;
/* DSI specific sanity checks on the common code */
- drm_WARN_ON(&dev_priv->drm, vdsc_cfg->vbr_enable);
- drm_WARN_ON(&dev_priv->drm, vdsc_cfg->simple_422);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm, vdsc_cfg->vbr_enable);
+ drm_WARN_ON(display->drm, vdsc_cfg->simple_422);
+ drm_WARN_ON(display->drm,
vdsc_cfg->pic_width % vdsc_cfg->slice_width);
- drm_WARN_ON(&dev_priv->drm, vdsc_cfg->slice_height < 8);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm, vdsc_cfg->slice_height < 8);
+ drm_WARN_ON(display->drm,
vdsc_cfg->pic_height % vdsc_cfg->slice_height);
ret = drm_dsc_compute_rc_parameters(vdsc_cfg);
@@ -1627,7 +1638,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *adjusted_mode =
@@ -1661,7 +1672,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
pipe_config->clock_set = true;
if (gen11_dsi_dsc_compute_config(encoder, pipe_config))
- drm_dbg_kms(&i915->drm, "Attempting to use DSC failed\n");
+ drm_dbg_kms(display->drm, "Attempting to use DSC failed\n");
pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5;
@@ -1679,15 +1690,13 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-
- get_dsi_io_power_domains(i915,
- enc_to_intel_dsi(encoder));
+ get_dsi_io_power_domains(enc_to_intel_dsi(encoder));
}
static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum transcoder dsi_trans;
@@ -1703,8 +1712,8 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans));
+ tmp = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, dsi_trans));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
*pipe = PIPE_A;
@@ -1719,11 +1728,11 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
*pipe = PIPE_D;
break;
default:
- drm_err(&dev_priv->drm, "Invalid PIPE input\n");
+ drm_err(display->drm, "Invalid PIPE input\n");
goto out;
}
- tmp = intel_de_read(dev_priv, TRANSCONF(dev_priv, dsi_trans));
+ tmp = intel_de_read(display, TRANSCONF(display, dsi_trans));
ret = tmp & TRANSCONF_ENABLE;
}
out:
@@ -1833,8 +1842,7 @@ static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&intel_dsi->base);
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
u32 tlpx_ns;
@@ -1858,7 +1866,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
*/
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "prepare_cnt out of range (%d)\n",
+ drm_dbg_kms(display->drm, "prepare_cnt out of range (%d)\n",
prepare_cnt);
prepare_cnt = ICL_PREPARE_CNT_MAX;
}
@@ -1867,7 +1875,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
ths_prepare_ns, tlpx_ns);
if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
}
@@ -1875,7 +1883,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
/* trail cnt in escape clocks*/
trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
if (trail_cnt > ICL_TRAIL_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "trail_cnt out of range (%d)\n",
+ drm_dbg_kms(display->drm, "trail_cnt out of range (%d)\n",
trail_cnt);
trail_cnt = ICL_TRAIL_CNT_MAX;
}
@@ -1883,7 +1891,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
/* tclk pre count in escape clocks */
tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
}
@@ -1892,7 +1900,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
ths_prepare_ns, tlpx_ns);
if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm, "hs_zero_cnt out of range (%d)\n",
+ drm_dbg_kms(display->drm, "hs_zero_cnt out of range (%d)\n",
hs_zero_cnt);
hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
}
@@ -1900,7 +1908,7 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
/* hs exit zero cnt in escape clocks */
exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"exit_zero_cnt out of range (%d)\n",
exit_zero_cnt);
exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
@@ -1942,10 +1950,9 @@ static void icl_dsi_add_properties(struct intel_connector *connector)
fixed_mode->vdisplay);
}
-void icl_dsi_init(struct drm_i915_private *dev_priv,
+void icl_dsi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata)
{
- struct intel_display *display = &dev_priv->display;
struct intel_dsi *intel_dsi;
struct intel_encoder *encoder;
struct intel_connector *intel_connector;
@@ -1973,7 +1980,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv,
encoder->devdata = devdata;
/* register DSI encoder with DRM subsystem */
- drm_encoder_init(&dev_priv->drm, &encoder->base, &gen11_dsi_encoder_funcs,
+ drm_encoder_init(display->drm, &encoder->base,
+ &gen11_dsi_encoder_funcs,
DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
@@ -1998,7 +2006,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv,
encoder->shutdown = intel_dsi_shutdown;
/* register DSI connector with DRM subsystem */
- drm_connector_init(&dev_priv->drm, connector, &gen11_dsi_connector_funcs,
+ drm_connector_init(display->drm, connector,
+ &gen11_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -2011,12 +2020,12 @@ void icl_dsi_init(struct drm_i915_private *dev_priv,
intel_bios_init_panel_late(display, &intel_connector->panel, encoder->devdata, NULL);
- mutex_lock(&dev_priv->drm.mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
if (!intel_panel_preferred_fixed_mode(intel_connector)) {
- drm_err(&dev_priv->drm, "DSI fixed mode info missing\n");
+ drm_err(display->drm, "DSI fixed mode info missing\n");
goto err;
}
@@ -2029,10 +2038,10 @@ void icl_dsi_init(struct drm_i915_private *dev_priv,
else
intel_dsi->ports = BIT(port);
- if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ if (drm_WARN_ON(display->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
- if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ if (drm_WARN_ON(display->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
for_each_dsi_port(port, intel_dsi->ports) {
@@ -2046,7 +2055,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv,
}
if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
- drm_dbg_kms(&dev_priv->drm, "no device found\n");
+ drm_dbg_kms(display->drm, "no device found\n");
goto err;
}
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.h b/drivers/gpu/drm/i915/display/icl_dsi.h
index 43fa7d72eeb1..099fc50e35b4 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.h
+++ b/drivers/gpu/drm/i915/display/icl_dsi.h
@@ -6,11 +6,11 @@
#ifndef __ICL_DSI_H__
#define __ICL_DSI_H__
-struct drm_i915_private;
struct intel_bios_encoder_data;
struct intel_crtc_state;
+struct intel_display;
-void icl_dsi_init(struct drm_i915_private *dev_priv,
+void icl_dsi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata);
void icl_dsi_frame_update(struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index c3b29a331d72..bbf8c5a8fdbd 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -9,8 +9,9 @@
#include <linux/acpi.h>
#include <acpi/video.h>
-#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_acpi.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index 186cf4833f71..55f3ae1e68c9 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -3,6 +3,8 @@
* Copyright 2024, Intel Corporation.
*/
+#include <linux/debugfs.h>
+
#include "intel_alpm.h"
#include "intel_crtc.h"
#include "intel_de.h"
@@ -330,7 +332,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
ALPM_CTL_AUX_LESS_WAKE_TIME(intel_dp->alpm_parameters.aux_less_wake_lines);
intel_de_write(display,
- PORT_ALPM_CTL(display, port),
+ PORT_ALPM_CTL(port),
PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE |
PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
@@ -338,7 +340,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
intel_dp->alpm_parameters.silence_period_sym_clocks));
intel_de_write(display,
- PORT_ALPM_LFPS_CTL(display, port),
+ PORT_ALPM_LFPS_CTL(port),
PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 12d6ed940751..03dc54c802d3 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -266,7 +266,6 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->update_pipe = false;
crtc_state->update_m_n = false;
crtc_state->update_lrr = false;
- crtc_state->disable_lp_wm = false;
crtc_state->disable_cxsr = false;
crtc_state->update_wm_pre = false;
crtc_state->update_wm_post = false;
@@ -277,7 +276,8 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->fb_bits = 0;
crtc_state->update_planes = 0;
crtc_state->dsb_color_vblank = NULL;
- crtc_state->dsb_color_commit = NULL;
+ crtc_state->dsb_commit = NULL;
+ crtc_state->use_dsb = false;
return &crtc_state->uapi;
}
@@ -312,7 +312,7 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
struct intel_crtc_state *crtc_state = to_intel_crtc_state(state);
drm_WARN_ON(crtc->dev, crtc_state->dsb_color_vblank);
- drm_WARN_ON(crtc->dev, crtc_state->dsb_color_commit);
+ drm_WARN_ON(crtc->dev, crtc_state->dsb_commit);
__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
intel_crtc_free_hw_state(crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index e979786aa5cf..612e9b0ec14a 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -35,10 +35,12 @@
#include <linux/dma-resv.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include "i915_drv.h"
#include "i915_config.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic_plane.h"
@@ -206,17 +208,6 @@ unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
fb->format->cpp[color_plane];
}
-static bool
-use_min_ddb(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
-
- return DISPLAY_VER(i915) >= 13 &&
- crtc_state->uapi.async_flip &&
- plane->async_flip;
-}
-
static unsigned int
intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
@@ -224,8 +215,8 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- int width, height;
unsigned int rel_data_rate;
+ int width, height;
if (plane->id == PLANE_CURSOR)
return 0;
@@ -234,14 +225,6 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
return 0;
/*
- * We calculate extra ddb based on ratio plane rate/total data rate
- * in case, in some cases we should not allocate extra ddb for the plane,
- * so do not count its data rate, if this is the case.
- */
- if (use_min_ddb(crtc_state, plane))
- return 0;
-
- /*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
@@ -255,7 +238,11 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
height /= 2;
}
- rel_data_rate = width * height * fb->format->cpp[color_plane];
+ rel_data_rate =
+ skl_plane_relative_data_rate(crtc_state, plane, width, height,
+ fb->format->cpp[color_plane]);
+ if (!rel_data_rate)
+ return 0;
return intel_adjusted_rate(&plane_state->uapi.src,
&plane_state->uapi.dst,
@@ -391,28 +378,6 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
plane_state->uapi.visible = false;
}
-/* FIXME nuke when all wm code is atomic */
-static bool intel_wm_need_update(const struct intel_plane_state *cur,
- struct intel_plane_state *new)
-{
- /* Update watermarks on tiling or size changes. */
- if (new->uapi.visible != cur->uapi.visible)
- return true;
-
- if (!cur->hw.fb || !new->hw.fb)
- return false;
-
- if (cur->hw.fb->modifier != new->hw.fb->modifier ||
- cur->hw.rotation != new->hw.rotation ||
- drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
- drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
- drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
- drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
- return true;
-
- return false;
-}
-
static bool intel_plane_is_scaled(const struct intel_plane_state *plane_state)
{
int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
@@ -492,6 +457,61 @@ static bool i9xx_must_disable_cxsr(const struct intel_crtc_state *new_crtc_state
return old_ctl != new_ctl;
}
+static bool ilk_must_disable_cxsr(const struct intel_crtc_state *new_crtc_state,
+ const struct intel_plane_state *old_plane_state,
+ const struct intel_plane_state *new_plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
+ bool old_visible = old_plane_state->uapi.visible;
+ bool new_visible = new_plane_state->uapi.visible;
+ bool modeset, turn_on;
+
+ if (plane->id == PLANE_CURSOR)
+ return false;
+
+ modeset = intel_crtc_needs_modeset(new_crtc_state);
+ turn_on = new_visible && (!old_visible || modeset);
+
+ /*
+ * ILK/SNB DVSACNTR/Sprite Enable
+ * IVB SPR_CTL/Sprite Enable
+ * "When in Self Refresh Big FIFO mode, a write to enable the
+ * plane will be internally buffered and delayed while Big FIFO
+ * mode is exiting."
+ *
+ * Which means that enabling the sprite can take an extra frame
+ * when we start in big FIFO mode (LP1+). Thus we need to drop
+ * down to LP0 and wait for vblank in order to make sure the
+ * sprite gets enabled on the next vblank after the register write.
+ * Doing otherwise would risk enabling the sprite one frame after
+ * we've already signalled flip completion. We can resume LP1+
+ * once the sprite has been enabled.
+ *
+ * With experimental results seems this is needed also for primary
+ * plane, not only sprite plane.
+ */
+ if (turn_on)
+ return true;
+
+ /*
+ * WaCxSRDisabledForSpriteScaling:ivb
+ * IVB SPR_SCALE/Scaling Enable
+ * "Low Power watermarks must be disabled for at least one
+ * frame before enabling sprite scaling, and kept disabled
+ * until sprite scaling is disabled."
+ *
+ * ILK/SNB DVSASCALE/Scaling Enable
+ * "When in Self Refresh Big FIFO mode, scaling enable will be
+ * masked off while Big FIFO mode is exiting."
+ *
+ * Despite the w/a only being listed for IVB we assume that
+ * the ILK/SNB note has similar ramifications, hence we apply
+ * the w/a on all three platforms.
+ */
+ return !intel_plane_is_scaled(old_plane_state) &&
+ intel_plane_is_scaled(new_plane_state);
+}
+
static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
@@ -546,20 +566,6 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
was_visible, visible,
turn_off, turn_on, mode_changed);
- if (turn_on) {
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- new_crtc_state->update_wm_pre = true;
- } else if (turn_off) {
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- new_crtc_state->update_wm_post = true;
- } else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
- /* FIXME bollocks */
- new_crtc_state->update_wm_pre = true;
- new_crtc_state->update_wm_post = true;
- }
- }
-
if (visible || was_visible)
new_crtc_state->fb_bits |= plane->frontbuffer_bit;
@@ -567,45 +573,9 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
i9xx_must_disable_cxsr(new_crtc_state, old_plane_state, new_plane_state))
new_crtc_state->disable_cxsr = true;
- /*
- * ILK/SNB DVSACNTR/Sprite Enable
- * IVB SPR_CTL/Sprite Enable
- * "When in Self Refresh Big FIFO mode, a write to enable the
- * plane will be internally buffered and delayed while Big FIFO
- * mode is exiting."
- *
- * Which means that enabling the sprite can take an extra frame
- * when we start in big FIFO mode (LP1+). Thus we need to drop
- * down to LP0 and wait for vblank in order to make sure the
- * sprite gets enabled on the next vblank after the register write.
- * Doing otherwise would risk enabling the sprite one frame after
- * we've already signalled flip completion. We can resume LP1+
- * once the sprite has been enabled.
- *
- *
- * WaCxSRDisabledForSpriteScaling:ivb
- * IVB SPR_SCALE/Scaling Enable
- * "Low Power watermarks must be disabled for at least one
- * frame before enabling sprite scaling, and kept disabled
- * until sprite scaling is disabled."
- *
- * ILK/SNB DVSASCALE/Scaling Enable
- * "When in Self Refresh Big FIFO mode, scaling enable will be
- * masked off while Big FIFO mode is exiting."
- *
- * Despite the w/a only being listed for IVB we assume that
- * the ILK/SNB note has similar ramifications, hence we apply
- * the w/a on all three platforms.
- *
- * With experimental results seems this is needed also for primary
- * plane, not only sprite plane.
- */
- if (plane->id != PLANE_CURSOR &&
- (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
- IS_IVYBRIDGE(dev_priv)) &&
- (turn_on || (!intel_plane_is_scaled(old_plane_state) &&
- intel_plane_is_scaled(new_plane_state))))
- new_crtc_state->disable_lp_wm = true;
+ if ((IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) &&
+ ilk_must_disable_cxsr(new_crtc_state, old_plane_state, new_plane_state))
+ new_crtc_state->disable_cxsr = true;
if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) {
new_crtc_state->do_async_flip = true;
@@ -710,13 +680,13 @@ intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id)
int intel_plane_atomic_check(struct intel_atomic_state *state,
struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_plane_state *new_plane_state =
intel_atomic_get_new_plane_state(state, plane);
const struct intel_plane_state *old_plane_state =
intel_atomic_get_old_plane_state(state, plane);
const struct intel_plane_state *new_primary_crtc_plane_state;
- struct intel_crtc *crtc = intel_crtc_for_pipe(i915, plane->pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, plane->pipe);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
@@ -790,7 +760,8 @@ skl_next_plane_to_commit(struct intel_atomic_state *state,
return NULL;
}
-void intel_plane_update_noarm(struct intel_plane *plane,
+void intel_plane_update_noarm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -799,10 +770,11 @@ void intel_plane_update_noarm(struct intel_plane *plane,
trace_intel_plane_update_noarm(plane, crtc);
if (plane->update_noarm)
- plane->update_noarm(plane, crtc_state, plane_state);
+ plane->update_noarm(dsb, plane, crtc_state, plane_state);
}
-void intel_plane_async_flip(struct intel_plane *plane,
+void intel_plane_async_flip(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
bool async_flip)
@@ -810,34 +782,37 @@ void intel_plane_async_flip(struct intel_plane *plane,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
trace_intel_plane_async_flip(plane, crtc, async_flip);
- plane->async_flip(plane, crtc_state, plane_state, async_flip);
+ plane->async_flip(dsb, plane, crtc_state, plane_state, async_flip);
}
-void intel_plane_update_arm(struct intel_plane *plane,
+void intel_plane_update_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
if (crtc_state->do_async_flip && plane->async_flip) {
- intel_plane_async_flip(plane, crtc_state, plane_state, true);
+ intel_plane_async_flip(dsb, plane, crtc_state, plane_state, true);
return;
}
trace_intel_plane_update_arm(plane, crtc);
- plane->update_arm(plane, crtc_state, plane_state);
+ plane->update_arm(dsb, plane, crtc_state, plane_state);
}
-void intel_plane_disable_arm(struct intel_plane *plane,
+void intel_plane_disable_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
trace_intel_plane_disable_arm(plane, crtc);
- plane->disable_arm(plane, crtc_state);
+ plane->disable_arm(dsb, plane, crtc_state);
}
-void intel_crtc_planes_update_noarm(struct intel_atomic_state *state,
+void intel_crtc_planes_update_noarm(struct intel_dsb *dsb,
+ struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *new_crtc_state =
@@ -862,11 +837,13 @@ void intel_crtc_planes_update_noarm(struct intel_atomic_state *state,
/* TODO: for mailbox updates this should be skipped */
if (new_plane_state->uapi.visible ||
new_plane_state->planar_slave)
- intel_plane_update_noarm(plane, new_crtc_state, new_plane_state);
+ intel_plane_update_noarm(dsb, plane,
+ new_crtc_state, new_plane_state);
}
}
-static void skl_crtc_planes_update_arm(struct intel_atomic_state *state,
+static void skl_crtc_planes_update_arm(struct intel_dsb *dsb,
+ struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *old_crtc_state =
@@ -893,13 +870,14 @@ static void skl_crtc_planes_update_arm(struct intel_atomic_state *state,
*/
if (new_plane_state->uapi.visible ||
new_plane_state->planar_slave)
- intel_plane_update_arm(plane, new_crtc_state, new_plane_state);
+ intel_plane_update_arm(dsb, plane, new_crtc_state, new_plane_state);
else
- intel_plane_disable_arm(plane, new_crtc_state);
+ intel_plane_disable_arm(dsb, plane, new_crtc_state);
}
}
-static void i9xx_crtc_planes_update_arm(struct intel_atomic_state *state,
+static void i9xx_crtc_planes_update_arm(struct intel_dsb *dsb,
+ struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *new_crtc_state =
@@ -919,21 +897,22 @@ static void i9xx_crtc_planes_update_arm(struct intel_atomic_state *state,
* would have to be called here as well.
*/
if (new_plane_state->uapi.visible)
- intel_plane_update_arm(plane, new_crtc_state, new_plane_state);
+ intel_plane_update_arm(dsb, plane, new_crtc_state, new_plane_state);
else
- intel_plane_disable_arm(plane, new_crtc_state);
+ intel_plane_disable_arm(dsb, plane, new_crtc_state);
}
}
-void intel_crtc_planes_update_arm(struct intel_atomic_state *state,
+void intel_crtc_planes_update_arm(struct intel_dsb *dsb,
+ struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
if (DISPLAY_VER(i915) >= 9)
- skl_crtc_planes_update_arm(state, crtc);
+ skl_crtc_planes_update_arm(dsb, state, crtc);
else
- i9xx_crtc_planes_update_arm(state, crtc);
+ i9xx_crtc_planes_update_arm(dsb, state, crtc);
}
int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
@@ -1031,6 +1010,12 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
*/
hsub = 1;
vsub = 1;
+
+ /* Wa_16023981245 */
+ if ((DISPLAY_VERx100(i915) == 2000 ||
+ DISPLAY_VERx100(i915) == 3000) &&
+ src_x % 2 != 0)
+ hsub = 2;
} else {
hsub = fb->format->hsub;
vsub = fb->format->vsub;
@@ -1114,8 +1099,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct intel_plane_state *old_plane_state =
intel_atomic_get_old_plane_state(state, plane);
- struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
- struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
+ struct drm_gem_object *obj = intel_fb_bo(new_plane_state->hw.fb);
+ struct drm_gem_object *old_obj = intel_fb_bo(old_plane_state->hw.fb);
int ret;
if (old_obj) {
@@ -1135,7 +1120,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* can safely continue.
*/
if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) {
- ret = add_dma_resv_fences(intel_bo_to_drm_bo(old_obj)->resv,
+ ret = add_dma_resv_fences(old_obj->resv,
&new_plane_state->uapi);
if (ret < 0)
return ret;
@@ -1195,7 +1180,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
struct intel_atomic_state *state =
to_intel_atomic_state(old_plane_state->uapi.state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
+ struct drm_gem_object *obj = intel_fb_bo(old_plane_state->hw.fb);
if (!obj)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 6c4fe3596465..0f982f452ff3 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -14,6 +14,7 @@ struct drm_rect;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_dsb;
struct intel_plane;
struct intel_plane_state;
enum plane_id;
@@ -32,26 +33,32 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
struct intel_crtc *crtc);
void intel_plane_copy_hw_state(struct intel_plane_state *plane_state,
const struct intel_plane_state *from_plane_state);
-void intel_plane_async_flip(struct intel_plane *plane,
+void intel_plane_async_flip(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
bool async_flip);
-void intel_plane_update_noarm(struct intel_plane *plane,
+void intel_plane_update_noarm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
-void intel_plane_update_arm(struct intel_plane *plane,
+void intel_plane_update_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
-void intel_plane_disable_arm(struct intel_plane *plane,
+void intel_plane_disable_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
struct intel_plane *intel_plane_alloc(void);
void intel_plane_free(struct intel_plane *plane);
struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
-void intel_crtc_planes_update_noarm(struct intel_atomic_state *state,
+void intel_crtc_planes_update_noarm(struct intel_dsb *dsb,
+ struct intel_atomic_state *state,
struct intel_crtc *crtc);
-void intel_crtc_planes_update_arm(struct intel_atomic_state *state,
+void intel_crtc_planes_update_arm(struct intel_dsb *dsbx,
+ struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index f5e7eefab2f1..ce8a4319a63c 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -681,12 +681,11 @@ static void ibx_audio_codec_enable(struct intel_encoder *encoder,
void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder trans = crtc_state->cpu_transcoder;
- if (HAS_DP20(i915))
- intel_de_rmw(i915, AUD_DP_2DOT0_CTRL(trans), AUD_ENABLE_SDP_SPLIT,
+ if (HAS_DP20(display))
+ intel_de_rmw(display, AUD_DP_2DOT0_CTRL(trans), AUD_ENABLE_SDP_SPLIT,
crtc_state->sdp_split_enable ? AUD_ENABLE_SDP_SPLIT : 0);
}
@@ -699,10 +698,12 @@ bool intel_audio_compute_config(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ mutex_lock(&connector->eld_mutex);
if (!connector->eld[0]) {
drm_dbg_kms(&i915->drm,
"Bogus ELD on [CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
+ mutex_unlock(&connector->eld_mutex);
return false;
}
@@ -710,6 +711,7 @@ bool intel_audio_compute_config(struct intel_encoder *encoder,
memcpy(crtc_state->eld, connector->eld, sizeof(crtc_state->eld));
crtc_state->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+ mutex_unlock(&connector->eld_mutex);
return true;
}
@@ -978,16 +980,63 @@ retry:
drm_modeset_acquire_fini(&ctx);
}
+int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ int min_cdclk = 0;
+
+ if (!crtc_state->has_audio)
+ return 0;
+
+ /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
+ * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else
+ * there may be audio corruption or screen corruption." This cdclk
+ * restriction for GLK is 316.8 MHz.
+ */
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
+ crtc_state->port_clock >= 540000 &&
+ crtc_state->lane_count == 4) {
+ if (DISPLAY_VER(display) == 10) {
+ /* Display WA #1145: glk */
+ min_cdclk = max(min_cdclk, 316800);
+ } else if (DISPLAY_VER(display) == 9 || IS_BROADWELL(dev_priv)) {
+ /* Display WA #1144: skl,bxt */
+ min_cdclk = max(min_cdclk, 432000);
+ }
+ }
+
+ /*
+ * According to BSpec, "The CD clock frequency must be at least twice
+ * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
+ */
+ if (DISPLAY_VER(display) >= 9)
+ min_cdclk = max(min_cdclk, 2 * 96000);
+
+ /*
+ * "For DP audio configuration, cdclk frequency shall be set to
+ * meet the following requirements:
+ * DP Link Frequency(MHz) | Cdclk frequency(MHz)
+ * 270 | 320 or higher
+ * 162 | 200 or higher"
+ */
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_crtc_has_dp_encoder(crtc_state))
+ min_cdclk = max(min_cdclk, crtc_state->port_clock);
+
+ return min_cdclk;
+}
+
static unsigned long i915_audio_component_get_power(struct device *kdev)
{
struct intel_display *display = to_intel_display(kdev);
struct drm_i915_private *i915 = to_i915(display->drm);
- intel_wakeref_t ret;
+ intel_wakeref_t wakeref;
/* Catch potential impedance mismatches before they occur! */
BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long));
- ret = intel_display_power_get(i915, POWER_DOMAIN_AUDIO_PLAYBACK);
+ wakeref = intel_display_power_get(i915, POWER_DOMAIN_AUDIO_PLAYBACK);
if (i915->display.audio.power_refcount++ == 0) {
if (DISPLAY_VER(i915) >= 9) {
@@ -1007,7 +1056,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
0, AUD_PIN_BUF_ENABLE);
}
- return ret;
+ return (unsigned long)wakeref;
}
static void i915_audio_component_put_power(struct device *kdev,
@@ -1015,13 +1064,14 @@ static void i915_audio_component_put_power(struct device *kdev,
{
struct intel_display *display = to_intel_display(kdev);
struct drm_i915_private *i915 = to_i915(display->drm);
+ intel_wakeref_t wakeref = (intel_wakeref_t)cookie;
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
if (--i915->display.audio.power_refcount == 0)
if (IS_GEMINILAKE(i915))
glk_force_audio_cdclk(i915, false);
- intel_display_power_put(i915, POWER_DOMAIN_AUDIO_PLAYBACK, cookie);
+ intel_display_power_put(i915, POWER_DOMAIN_AUDIO_PLAYBACK, wakeref);
}
static void i915_audio_component_codec_wake_override(struct device *kdev,
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index 576c061d72a4..1bafc155434a 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
@@ -27,6 +27,7 @@ void intel_audio_codec_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
+int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state);
void intel_audio_init(struct drm_i915_private *dev_priv);
void intel_audio_register(struct drm_i915_private *i915);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 9e05745d797d..7e6ce905bdaf 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -10,6 +10,7 @@
#include <acpi/video.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_backlight.h"
#include "intel_backlight_regs.h"
@@ -40,8 +41,9 @@ static u32 scale(u32 source_val,
{
u64 target_val;
- WARN_ON(source_min > source_max);
- WARN_ON(target_min > target_max);
+ if (WARN_ON(source_min >= source_max) ||
+ WARN_ON(target_min > target_max))
+ return target_min;
/* defensive */
source_val = clamp(source_val, source_min, source_max);
@@ -949,7 +951,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
else
props.power = BACKLIGHT_POWER_OFF;
- name = kstrdup_const("intel_backlight", GFP_KERNEL);
+ name = kstrdup("intel_backlight", GFP_KERNEL);
if (!name)
return -ENOMEM;
@@ -963,7 +965,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
* compatibility. Use unique names for subsequent backlight devices as a
* fallback when the default name already exists.
*/
- kfree_const(name);
+ kfree(name);
name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
i915->drm.primary->index, connector->base.name);
if (!name)
@@ -987,7 +989,7 @@ int intel_backlight_device_register(struct intel_connector *connector)
connector->base.base.id, connector->base.name, name);
out:
- kfree_const(name);
+ kfree(name);
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index bed485374ab0..e0e4e9b62d8d 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -25,6 +25,7 @@
*
*/
+#include <linux/debugfs.h>
#include <linux/firmware.h>
#include <drm/display/drm_dp_helper.h>
@@ -32,12 +33,12 @@
#include <drm/drm_edid.h>
#include <drm/drm_fixed.h>
+#include "soc/intel_rom.h"
+
#include "i915_drv.h"
-#include "i915_reg.h"
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
-#include "intel_uncore.h"
#define _INTEL_BIOS_PRIVATE
#include "intel_vbt_defs.h"
@@ -1168,7 +1169,6 @@ static int intel_bios_ssc_frequency(struct intel_display *display,
static void
parse_general_features(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct bdb_general_features *general;
general = bdb_find_section(display, BDB_GENERAL_FEATURES);
@@ -1178,7 +1178,7 @@ parse_general_features(struct intel_display *display)
display->vbt.int_tv_support = general->int_tv_support;
/* int_crt_support can't be trusted on earlier platforms */
if (display->vbt.version >= 155 &&
- (HAS_DDI(display) || IS_VALLEYVIEW(i915)))
+ (HAS_DDI(display) || display->platform.valleyview))
display->vbt.int_crt_support = general->int_crt_support;
display->vbt.lvds_use_ssc = general->enable_ssc;
display->vbt.lvds_ssc_freq =
@@ -1402,12 +1402,21 @@ parse_power_conservation_features(struct intel_display *display,
panel_type);
}
+static void vbt_edp_to_pps_delays(struct intel_pps_delays *pps,
+ const struct edp_power_seq *edp_pps)
+{
+ pps->power_up = edp_pps->t1_t3;
+ pps->backlight_on = edp_pps->t8;
+ pps->backlight_off = edp_pps->t9;
+ pps->power_down = edp_pps->t10;
+ pps->power_cycle = edp_pps->t11_t12;
+}
+
static void
parse_edp(struct intel_display *display,
struct intel_panel *panel)
{
const struct bdb_edp *edp;
- const struct edp_power_seq *edp_pps;
const struct edp_fast_link_params *edp_link_params;
int panel_type = panel->vbt.panel_type;
@@ -1428,10 +1437,10 @@ parse_edp(struct intel_display *display,
}
/* Get the eDP sequencing and link info */
- edp_pps = &edp->power_seqs[panel_type];
edp_link_params = &edp->fast_link_params[panel_type];
- panel->vbt.edp.pps = *edp_pps;
+ vbt_edp_to_pps_delays(&panel->vbt.edp.pps,
+ &edp->power_seqs[panel_type]);
if (display->vbt.version >= 224) {
panel->vbt.edp.rate =
@@ -1541,7 +1550,6 @@ static void
parse_psr(struct intel_display *display,
struct intel_panel *panel)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct bdb_psr *psr;
const struct psr_table *psr_table;
int panel_type = panel->vbt.panel_type;
@@ -1566,7 +1574,7 @@ parse_psr(struct intel_display *display,
* Old decimal value is wake up time in multiples of 100 us.
*/
if (display->vbt.version >= 205 &&
- (DISPLAY_VER(display) >= 9 && !IS_BROXTON(i915))) {
+ (DISPLAY_VER(display) >= 9 && !display->platform.broxton)) {
switch (psr_table->tp1_wakeup_time) {
case 0:
panel->vbt.psr.tp1_wakeup_time_us = 500;
@@ -1705,8 +1713,8 @@ parse_mipi_config(struct intel_display *display,
return;
}
- drm_dbg(display->drm, "Found MIPI Config block, panel index = %d\n",
- panel_type);
+ drm_dbg_kms(display->drm, "Found MIPI Config block, panel index = %d\n",
+ panel_type);
/*
* get hold of the correct configuration block and pps data as per
@@ -2028,11 +2036,9 @@ static void icl_fixup_mipi_sequences(struct intel_display *display,
static void fixup_mipi_sequences(struct intel_display *display,
struct intel_panel *panel)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (DISPLAY_VER(display) >= 11)
icl_fixup_mipi_sequences(display, panel);
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
vlv_fixup_mipi_sequences(display, panel);
}
@@ -2066,8 +2072,8 @@ parse_mipi_sequence(struct intel_display *display,
return;
}
- drm_dbg(display->drm, "Found MIPI sequence block v%u\n",
- sequence->version);
+ drm_dbg_kms(display->drm, "Found MIPI sequence block v%u\n",
+ sequence->version);
seq_data = find_panel_sequence_block(display, sequence, panel_type, &seq_size);
if (!seq_data)
@@ -2113,7 +2119,7 @@ parse_mipi_sequence(struct intel_display *display,
fixup_mipi_sequences(display, panel);
- drm_dbg(display->drm, "MIPI related VBT parsing complete\n");
+ drm_dbg_kms(display->drm, "MIPI related VBT parsing complete\n");
return;
err:
@@ -2242,15 +2248,15 @@ static u8 map_ddc_pin(struct intel_display *display, u8 vbt_pin)
const u8 *ddc_pin_map;
int i, n_entries;
- if (INTEL_PCH_TYPE(i915) >= PCH_MTL || IS_ALDERLAKE_P(i915)) {
+ if (INTEL_PCH_TYPE(i915) >= PCH_MTL || display->platform.alderlake_p) {
ddc_pin_map = adlp_ddc_pin_map;
n_entries = ARRAY_SIZE(adlp_ddc_pin_map);
- } else if (IS_ALDERLAKE_S(i915)) {
+ } else if (display->platform.alderlake_s) {
ddc_pin_map = adls_ddc_pin_map;
n_entries = ARRAY_SIZE(adls_ddc_pin_map);
} else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
return vbt_pin;
- } else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) {
+ } else if (display->platform.rocketlake && INTEL_PCH_TYPE(i915) == PCH_TGP) {
ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
} else if (HAS_PCH_TGP(i915) && DISPLAY_VER(display) == 9) {
@@ -2333,7 +2339,6 @@ static enum port __dvo_port_to_port(int n_ports, int n_dvo,
static enum port dvo_port_to_port(struct intel_display *display,
u8 dvo_port)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
/*
* Each DDI port can have more than one value on the "DVO Port" field,
* so look for all the possible values for each port.
@@ -2390,12 +2395,12 @@ static enum port dvo_port_to_port(struct intel_display *display,
ARRAY_SIZE(xelpd_port_mapping[0]),
xelpd_port_mapping,
dvo_port);
- else if (IS_ALDERLAKE_S(i915))
+ else if (display->platform.alderlake_s)
return __dvo_port_to_port(ARRAY_SIZE(adls_port_mapping),
ARRAY_SIZE(adls_port_mapping[0]),
adls_port_mapping,
dvo_port);
- else if (IS_DG1(i915) || IS_ROCKETLAKE(i915))
+ else if (display->platform.dg1 || display->platform.rocketlake)
return __dvo_port_to_port(ARRAY_SIZE(rkl_port_mapping),
ARRAY_SIZE(rkl_port_mapping[0]),
rkl_port_mapping,
@@ -2518,7 +2523,6 @@ static void sanitize_hdmi_level_shift(struct intel_bios_encoder_data *devdata,
enum port port)
{
struct intel_display *display = devdata->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
if (!intel_bios_encoder_supports_dvi(devdata))
return;
@@ -2528,7 +2532,7 @@ static void sanitize_hdmi_level_shift(struct intel_bios_encoder_data *devdata,
* with a HSW VBT where the level shifter value goes
* up to 11, whereas the BDW max is 9.
*/
- if (IS_BROADWELL(i915) && devdata->child.hdmi_level_shifter_value > 9) {
+ if (display->platform.broadwell && devdata->child.hdmi_level_shifter_value > 9) {
drm_dbg_kms(display->drm,
"Bogus port %c VBT HDMI level shift %d, adjusting to %d\n",
port_name(port), devdata->child.hdmi_level_shifter_value, 9);
@@ -2617,14 +2621,13 @@ int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata
static bool is_port_valid(struct intel_display *display, enum port port)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
/*
* On some ICL SKUs port F is not present, but broken VBTs mark
* the port as present. Only try to initialize port F for the
* SKUs that may actually have it.
*/
- if (port == PORT_F && IS_ICELAKE(i915))
- return IS_ICL_WITH_PORT_F(i915);
+ if (port == PORT_F && display->platform.icelake)
+ return display->platform.icelake_port_f;
return true;
}
@@ -2722,9 +2725,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
static bool has_ddi_port_info(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- return DISPLAY_VER(display) >= 5 || IS_G4X(i915);
+ return DISPLAY_VER(display) >= 5 || display->platform.g4x;
}
static void parse_ddi_ports(struct intel_display *display)
@@ -2770,9 +2771,9 @@ static bool child_device_size_valid(struct intel_display *display, int size)
expected_size = child_device_expected_size(display->vbt.version);
if (expected_size < 0) {
expected_size = sizeof(struct child_device_config);
- drm_dbg(display->drm,
- "Expected child device config size for VBT version %u not known; assuming %d\n",
- display->vbt.version, expected_size);
+ drm_dbg_kms(display->drm,
+ "Expected child device config size for VBT version %u not known; assuming %d\n",
+ display->vbt.version, expected_size);
}
/* Flag an error for unexpected size, but continue anyway. */
@@ -2795,7 +2796,6 @@ static bool child_device_size_valid(struct intel_display *display, int size)
static void
parse_general_definitions(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct bdb_general_definitions *defs;
struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
@@ -2820,7 +2820,7 @@ parse_general_definitions(struct intel_display *display)
bus_pin = defs->crt_ddc_gmbus_pin;
drm_dbg_kms(display->drm, "crt_ddc_bus_pin: %d\n", bus_pin);
- if (intel_gmbus_is_valid_pin(i915, bus_pin))
+ if (intel_gmbus_is_valid_pin(display, bus_pin))
display->vbt.crt_ddc_pin = bus_pin;
if (!child_device_size_valid(display, defs->child_dev_size))
@@ -2906,7 +2906,7 @@ init_vbt_missing_defaults(struct intel_display *display)
unsigned int ports = DISPLAY_RUNTIME_INFO(display)->port_mask;
enum port port;
- if (!HAS_DDI(display) && !IS_CHERRYVIEW(i915))
+ if (!HAS_DDI(display) && !display->platform.cherryview)
return;
for_each_port_masked(port, ports) {
@@ -2963,6 +2963,9 @@ static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt)
return _vbt + vbt->bdb_offset;
}
+static const char vbt_signature[] = "$VBT";
+static const int vbt_signature_len = 4;
+
/**
* intel_bios_is_valid_vbt - does the given buffer contain a valid VBT
* @display: display device
@@ -2985,7 +2988,7 @@ bool intel_bios_is_valid_vbt(struct intel_display *display,
return false;
}
- if (memcmp(vbt->signature, "$VBT", 4)) {
+ if (memcmp(vbt->signature, vbt_signature, vbt_signature_len)) {
drm_dbg_kms(display->drm, "VBT invalid signature\n");
return false;
}
@@ -3052,131 +3055,59 @@ static struct vbt_header *firmware_get_vbt(struct intel_display *display,
return vbt;
}
-static u32 intel_spi_read(struct intel_uncore *uncore, u32 offset)
-{
- intel_uncore_write(uncore, PRIMARY_SPI_ADDRESS, offset);
-
- return intel_uncore_read(uncore, PRIMARY_SPI_TRIGGER);
-}
-
-static struct vbt_header *spi_oprom_get_vbt(struct intel_display *display,
- size_t *size)
-{
- struct drm_i915_private *i915 = to_i915(display->drm);
- u32 count, data, found, store = 0;
- u32 static_region, oprom_offset;
- u32 oprom_size = 0x200000;
- u16 vbt_size;
- u32 *vbt;
-
- static_region = intel_uncore_read(&i915->uncore, SPI_STATIC_REGIONS);
- static_region &= OPTIONROM_SPI_REGIONID_MASK;
- intel_uncore_write(&i915->uncore, PRIMARY_SPI_REGIONID, static_region);
-
- oprom_offset = intel_uncore_read(&i915->uncore, OROM_OFFSET);
- oprom_offset &= OROM_OFFSET_MASK;
-
- for (count = 0; count < oprom_size; count += 4) {
- data = intel_spi_read(&i915->uncore, oprom_offset + count);
- if (data == *((const u32 *)"$VBT")) {
- found = oprom_offset + count;
- break;
- }
- }
-
- if (count >= oprom_size)
- goto err_not_found;
-
- /* Get VBT size and allocate space for the VBT */
- vbt_size = intel_spi_read(&i915->uncore,
- found + offsetof(struct vbt_header, vbt_size));
- vbt_size &= 0xffff;
-
- vbt = kzalloc(round_up(vbt_size, 4), GFP_KERNEL);
- if (!vbt)
- goto err_not_found;
-
- for (count = 0; count < vbt_size; count += 4)
- *(vbt + store++) = intel_spi_read(&i915->uncore, found + count);
-
- if (!intel_bios_is_valid_vbt(display, vbt, vbt_size))
- goto err_free_vbt;
-
- drm_dbg_kms(display->drm, "Found valid VBT in SPI flash\n");
-
- if (size)
- *size = vbt_size;
-
- return (struct vbt_header *)vbt;
-
-err_free_vbt:
- kfree(vbt);
-err_not_found:
- return NULL;
-}
-
static struct vbt_header *oprom_get_vbt(struct intel_display *display,
- size_t *sizep)
+ struct intel_rom *rom,
+ size_t *size, const char *type)
{
- struct pci_dev *pdev = to_pci_dev(display->drm->dev);
- void __iomem *p = NULL, *oprom;
struct vbt_header *vbt;
- u16 vbt_size;
- size_t i, size;
+ size_t vbt_size;
+ loff_t offset;
- oprom = pci_map_rom(pdev, &size);
- if (!oprom)
+ if (!rom)
return NULL;
- /* Scour memory looking for the VBT signature. */
- for (i = 0; i + 4 < size; i += 4) {
- if (ioread32(oprom + i) != *((const u32 *)"$VBT"))
- continue;
+ BUILD_BUG_ON(vbt_signature_len != sizeof(vbt_signature) - 1);
+ BUILD_BUG_ON(vbt_signature_len != sizeof(u32));
- p = oprom + i;
- size -= i;
- break;
- }
+ offset = intel_rom_find(rom, *(const u32 *)vbt_signature);
+ if (offset < 0)
+ goto err_free_rom;
- if (!p)
- goto err_unmap_oprom;
-
- if (sizeof(struct vbt_header) > size) {
- drm_dbg(display->drm, "VBT header incomplete\n");
- goto err_unmap_oprom;
+ if (sizeof(struct vbt_header) > intel_rom_size(rom) - offset) {
+ drm_dbg_kms(display->drm, "VBT header incomplete\n");
+ goto err_free_rom;
}
- vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size));
- if (vbt_size > size) {
- drm_dbg(display->drm,
- "VBT incomplete (vbt_size overflows)\n");
- goto err_unmap_oprom;
+ BUILD_BUG_ON(sizeof(vbt->vbt_size) != sizeof(u16));
+
+ vbt_size = intel_rom_read16(rom, offset + offsetof(struct vbt_header, vbt_size));
+ if (vbt_size > intel_rom_size(rom) - offset) {
+ drm_dbg_kms(display->drm, "VBT incomplete (vbt_size overflows)\n");
+ goto err_free_rom;
}
- /* The rest will be validated by intel_bios_is_valid_vbt() */
- vbt = kmalloc(vbt_size, GFP_KERNEL);
+ vbt = kzalloc(round_up(vbt_size, 4), GFP_KERNEL);
if (!vbt)
- goto err_unmap_oprom;
+ goto err_free_rom;
- memcpy_fromio(vbt, p, vbt_size);
+ intel_rom_read_block(rom, vbt, offset, vbt_size);
if (!intel_bios_is_valid_vbt(display, vbt, vbt_size))
goto err_free_vbt;
- pci_unmap_rom(pdev, oprom);
+ drm_dbg_kms(display->drm, "Found valid VBT in %s\n", type);
- if (sizep)
- *sizep = vbt_size;
+ if (size)
+ *size = vbt_size;
- drm_dbg_kms(display->drm, "Found valid VBT in PCI ROM\n");
+ intel_rom_free(rom);
return vbt;
err_free_vbt:
kfree(vbt);
-err_unmap_oprom:
- pci_unmap_rom(pdev, oprom);
-
+err_free_rom:
+ intel_rom_free(rom);
return NULL;
}
@@ -3198,11 +3129,11 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display
*/
if (!vbt && IS_DGFX(i915))
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vbt = spi_oprom_get_vbt(display, sizep);
+ vbt = oprom_get_vbt(display, intel_rom_spi(i915), sizep, "SPI flash");
if (!vbt)
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vbt = oprom_get_vbt(display, sizep);
+ vbt = oprom_get_vbt(display, intel_rom_pci(i915), sizep, "PCI ROM");
return vbt;
}
@@ -3406,7 +3337,6 @@ bool intel_bios_is_tv_present(struct intel_display *display)
*/
bool intel_bios_is_lvds_present(struct intel_display *display, u8 *i2c_pin)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_bios_encoder_data *devdata;
if (list_empty(&display->vbt.display_devices))
@@ -3423,7 +3353,7 @@ bool intel_bios_is_lvds_present(struct intel_display *display, u8 *i2c_pin)
child->device_type != DEVICE_TYPE_LFP)
continue;
- if (intel_gmbus_is_valid_pin(i915, child->i2c_pin))
+ if (intel_gmbus_is_valid_pin(display, child->i2c_pin))
*i2c_pin = child->i2c_pin;
/* However, we cannot trust the BIOS writers to populate
@@ -3671,17 +3601,16 @@ static const u8 direct_aux_ch_map[] = {
static enum aux_ch map_aux_ch(struct intel_display *display, u8 aux_channel)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const u8 *aux_ch_map;
int i, n_entries;
if (DISPLAY_VER(display) >= 13) {
aux_ch_map = adlp_aux_ch_map;
n_entries = ARRAY_SIZE(adlp_aux_ch_map);
- } else if (IS_ALDERLAKE_S(i915)) {
+ } else if (display->platform.alderlake_s) {
aux_ch_map = adls_aux_ch_map;
n_entries = ARRAY_SIZE(adls_aux_ch_map);
- } else if (IS_DG1(i915) || IS_ROCKETLAKE(i915)) {
+ } else if (display->platform.dg1 || display->platform.rocketlake) {
aux_ch_map = rkl_aux_ch_map;
n_entries = ARRAY_SIZE(rkl_aux_ch_map);
} else {
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 8b703f6cfe17..f9841f0498c6 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -50,14 +50,6 @@ enum intel_backlight_type {
INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE,
};
-struct edp_power_seq {
- u16 t1_t3;
- u16 t8;
- u16 t9;
- u16 t10;
- u16 t11_t12;
-} __packed;
-
/*
* MIPI Sequence Block definitions
*
diff --git a/drivers/gpu/drm/i915/display/intel_bo.c b/drivers/gpu/drm/i915/display/intel_bo.c
new file mode 100644
index 000000000000..fbd16d7b58d9
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_bo.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2024 Intel Corporation */
+
+#include "gem/i915_gem_mman.h"
+#include "gem/i915_gem_object.h"
+#include "gem/i915_gem_object_frontbuffer.h"
+#include "i915_debugfs.h"
+#include "intel_bo.h"
+
+bool intel_bo_is_tiled(struct drm_gem_object *obj)
+{
+ return i915_gem_object_is_tiled(to_intel_bo(obj));
+}
+
+bool intel_bo_is_userptr(struct drm_gem_object *obj)
+{
+ return i915_gem_object_is_userptr(to_intel_bo(obj));
+}
+
+bool intel_bo_is_shmem(struct drm_gem_object *obj)
+{
+ return i915_gem_object_is_shmem(to_intel_bo(obj));
+}
+
+bool intel_bo_is_protected(struct drm_gem_object *obj)
+{
+ return i915_gem_object_is_protected(to_intel_bo(obj));
+}
+
+void intel_bo_flush_if_display(struct drm_gem_object *obj)
+{
+ i915_gem_object_flush_if_display(to_intel_bo(obj));
+}
+
+int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ return i915_gem_fb_mmap(to_intel_bo(obj), vma);
+}
+
+int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
+{
+ return i915_gem_object_read_from_page(to_intel_bo(obj), offset, dst, size);
+}
+
+struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj)
+{
+ return i915_gem_object_get_frontbuffer(to_intel_bo(obj));
+}
+
+struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
+ struct intel_frontbuffer *front)
+{
+ return i915_gem_object_set_frontbuffer(to_intel_bo(obj), front);
+}
+
+void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
+{
+ i915_debugfs_describe_obj(m, to_intel_bo(obj));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bo.h b/drivers/gpu/drm/i915/display/intel_bo.h
new file mode 100644
index 000000000000..ea7a2253aaa5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_bo.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef __INTEL_BO__
+#define __INTEL_BO__
+
+#include <linux/types.h>
+
+struct drm_gem_object;
+struct seq_file;
+struct vm_area_struct;
+
+bool intel_bo_is_tiled(struct drm_gem_object *obj);
+bool intel_bo_is_userptr(struct drm_gem_object *obj);
+bool intel_bo_is_shmem(struct drm_gem_object *obj);
+bool intel_bo_is_protected(struct drm_gem_object *obj);
+void intel_bo_flush_if_display(struct drm_gem_object *obj);
+int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size);
+
+struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj);
+struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
+ struct intel_frontbuffer *front);
+
+void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj);
+
+#endif /* __INTEL_BO__ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 47036d4abb33..23edc81741de 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -743,7 +743,7 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (DISPLAY_VER_FULL(dev_priv) >= IP_VER(14, 1) && IS_DGFX(dev_priv))
+ if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv))
xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_sa_info);
else if (DISPLAY_VER(dev_priv) >= 14)
tgl_get_bw_info(dev_priv, &mtl_sa_info);
@@ -1256,7 +1256,7 @@ int intel_bw_min_cdclk(struct drm_i915_private *i915,
min_cdclk = intel_bw_dbuf_min_cdclk(i915, bw_state);
for_each_pipe(i915, pipe)
- min_cdclk = max(bw_state->min_cdclk[pipe], min_cdclk);
+ min_cdclk = max(min_cdclk, bw_state->min_cdclk[pipe]);
return min_cdclk;
}
@@ -1447,13 +1447,14 @@ static const struct intel_global_state_funcs intel_bw_funcs = {
int intel_bw_init(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct intel_bw_state *state;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
- intel_atomic_global_obj_init(i915, &i915->display.bw.obj,
+ intel_atomic_global_obj_init(display, &display->bw.obj,
&state->base, &intel_bw_funcs);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index aa3ba66c5307..c7a603589412 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/debugfs.h>
#include <linux/time.h>
#include <drm/drm_fixed.h>
@@ -28,6 +29,7 @@
#include "soc/intel_dram.h"
#include "hsw_ips.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
@@ -36,7 +38,6 @@
#include "intel_cdclk.h"
#include "intel_crtc.h"
#include "intel_de.h"
-#include "intel_dp.h"
#include "intel_display_types.h"
#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
@@ -45,6 +46,7 @@
#include "intel_vdsc.h"
#include "skl_watermark.h"
#include "skl_watermark_regs.h"
+#include "vlv_dsi.h"
#include "vlv_sideband.h"
/**
@@ -112,81 +114,81 @@
*/
struct intel_cdclk_funcs {
- void (*get_cdclk)(struct drm_i915_private *i915,
+ void (*get_cdclk)(struct intel_display *display,
struct intel_cdclk_config *cdclk_config);
- void (*set_cdclk)(struct drm_i915_private *i915,
+ void (*set_cdclk)(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe);
int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
u8 (*calc_voltage_level)(int cdclk);
};
-void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
+void intel_cdclk_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- dev_priv->display.funcs.cdclk->get_cdclk(dev_priv, cdclk_config);
+ display->funcs.cdclk->get_cdclk(display, cdclk_config);
}
-static void intel_cdclk_set_cdclk(struct drm_i915_private *dev_priv,
+static void intel_cdclk_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- dev_priv->display.funcs.cdclk->set_cdclk(dev_priv, cdclk_config, pipe);
+ display->funcs.cdclk->set_cdclk(display, cdclk_config, pipe);
}
static int intel_cdclk_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
- return dev_priv->display.funcs.cdclk->modeset_calc_cdclk(state);
+ return display->funcs.cdclk->modeset_calc_cdclk(state);
}
-static u8 intel_cdclk_calc_voltage_level(struct drm_i915_private *dev_priv,
+static u8 intel_cdclk_calc_voltage_level(struct intel_display *display,
int cdclk)
{
- return dev_priv->display.funcs.cdclk->calc_voltage_level(cdclk);
+ return display->funcs.cdclk->calc_voltage_level(cdclk);
}
-static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv,
+static void fixed_133mhz_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
cdclk_config->cdclk = 133333;
}
-static void fixed_200mhz_get_cdclk(struct drm_i915_private *dev_priv,
+static void fixed_200mhz_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
cdclk_config->cdclk = 200000;
}
-static void fixed_266mhz_get_cdclk(struct drm_i915_private *dev_priv,
+static void fixed_266mhz_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
cdclk_config->cdclk = 266667;
}
-static void fixed_333mhz_get_cdclk(struct drm_i915_private *dev_priv,
+static void fixed_333mhz_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
cdclk_config->cdclk = 333333;
}
-static void fixed_400mhz_get_cdclk(struct drm_i915_private *dev_priv,
+static void fixed_400mhz_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
cdclk_config->cdclk = 400000;
}
-static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv,
+static void fixed_450mhz_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
cdclk_config->cdclk = 450000;
}
-static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
+static void i85x_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
u16 hpllcc = 0;
/*
@@ -225,10 +227,10 @@ static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
}
}
-static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
+static void i915gm_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -249,10 +251,10 @@ static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
}
}
-static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
+static void i945gm_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -273,7 +275,7 @@ static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
}
}
-static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
+static unsigned int intel_hpll_vco(struct intel_display *display)
{
static const unsigned int blb_vco[8] = {
[0] = 3200000,
@@ -312,6 +314,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
[4] = 2666667,
[5] = 4266667,
};
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
const unsigned int *vco_table;
unsigned int vco;
u8 tmp = 0;
@@ -330,23 +333,23 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
else
return 0;
- tmp = intel_de_read(dev_priv,
+ tmp = intel_de_read(display,
IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
vco = vco_table[tmp & 0x7];
if (vco == 0)
- drm_err(&dev_priv->drm, "Bad HPLL VCO (HPLLVCO=0x%02x)\n",
+ drm_err(display->drm, "Bad HPLL VCO (HPLLVCO=0x%02x)\n",
tmp);
else
- drm_dbg_kms(&dev_priv->drm, "HPLL VCO %u kHz\n", vco);
+ drm_dbg_kms(display->drm, "HPLL VCO %u kHz\n", vco);
return vco;
}
-static void g33_get_cdclk(struct drm_i915_private *dev_priv,
+static void g33_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 };
static const u8 div_4000[] = { 14, 12, 10, 8, 6, 20 };
static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 };
@@ -355,7 +358,7 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
unsigned int cdclk_sel;
u16 tmp = 0;
- cdclk_config->vco = intel_hpll_vco(dev_priv);
+ cdclk_config->vco = intel_hpll_vco(display);
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -386,16 +389,16 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
return;
fail:
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n",
cdclk_config->vco, tmp);
cdclk_config->cdclk = 190476;
}
-static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
+static void pnv_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
u16 gcfgc = 0;
pci_read_config_word(pdev, GCFGC, &gcfgc);
@@ -414,7 +417,7 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_config->cdclk = 200000;
break;
default:
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Unknown pnv display core clock 0x%04x\n", gcfgc);
fallthrough;
case GC_DISPLAY_CLOCK_133_MHZ_PNV:
@@ -426,10 +429,10 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
}
}
-static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
+static void i965gm_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
static const u8 div_3200[] = { 16, 10, 8 };
static const u8 div_4000[] = { 20, 12, 10 };
static const u8 div_5333[] = { 24, 16, 14 };
@@ -437,7 +440,7 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
unsigned int cdclk_sel;
u16 tmp = 0;
- cdclk_config->vco = intel_hpll_vco(dev_priv);
+ cdclk_config->vco = intel_hpll_vco(display);
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -465,20 +468,20 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
return;
fail:
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n",
cdclk_config->vco, tmp);
cdclk_config->cdclk = 200000;
}
-static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
+static void gm45_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
unsigned int cdclk_sel;
u16 tmp = 0;
- cdclk_config->vco = intel_hpll_vco(dev_priv);
+ cdclk_config->vco = intel_hpll_vco(display);
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -494,7 +497,7 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_config->cdclk = cdclk_sel ? 320000 : 228571;
break;
default:
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n",
cdclk_config->vco, tmp);
cdclk_config->cdclk = 222222;
@@ -502,15 +505,16 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
}
}
-static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
+static void hsw_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ u32 lcpll = intel_de_read(display, LCPLL_CTL);
u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
if (lcpll & LCPLL_CD_SOURCE_FCLK)
cdclk_config->cdclk = 800000;
- else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ else if (intel_de_read(display, FUSE_STRAP) & HSW_CDCLK_LIMIT)
cdclk_config->cdclk = 450000;
else if (freq == LCPLL_CLK_FREQ_450)
cdclk_config->cdclk = 450000;
@@ -520,8 +524,9 @@ static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_config->cdclk = 540000;
}
-static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
+static int vlv_calc_cdclk(struct intel_display *display, int min_cdclk)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ?
333333 : 320000;
@@ -540,8 +545,10 @@ static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
return 200000;
}
-static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
+static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
if (IS_VALLEYVIEW(dev_priv)) {
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
return 2;
@@ -559,9 +566,10 @@ static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
}
}
-static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
+static void vlv_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
vlv_iosf_sb_get(dev_priv,
@@ -585,8 +593,9 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
DSPFREQGUAR_SHIFT_CHV;
}
-static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
+static void vlv_program_pfi_credits(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
unsigned int credits, default_credits;
if (IS_CHERRYVIEW(dev_priv))
@@ -594,7 +603,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
else
default_credits = PFI_CREDIT(8);
- if (dev_priv->display.cdclk.hw.cdclk >= dev_priv->czclk_freq) {
+ if (display->cdclk.hw.cdclk >= dev_priv->czclk_freq) {
/* CHV suggested value is 31 or 63 */
if (IS_CHERRYVIEW(dev_priv))
credits = PFI_CREDIT_63;
@@ -608,24 +617,25 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
* WA - write default credits before re-programming
* FIXME: should we also set the resend bit here?
*/
- intel_de_write(dev_priv, GCI_CONTROL,
+ intel_de_write(display, GCI_CONTROL,
VGA_FAST_MODE_DISABLE | default_credits);
- intel_de_write(dev_priv, GCI_CONTROL,
+ intel_de_write(display, GCI_CONTROL,
VGA_FAST_MODE_DISABLE | credits | PFI_CREDIT_RESEND);
/*
* FIXME is this guaranteed to clear
* immediately or should we poll for it?
*/
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, GCI_CONTROL) & PFI_CREDIT_RESEND);
+ drm_WARN_ON(display->drm,
+ intel_de_read(display, GCI_CONTROL) & PFI_CREDIT_RESEND);
}
-static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
+static void vlv_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
@@ -662,7 +672,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
50)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"timed out waiting for CDclk change\n");
}
@@ -681,7 +691,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
50))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"timed out waiting for CDclk change\n");
}
@@ -704,17 +714,18 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
BIT(VLV_IOSF_SB_BUNIT) |
BIT(VLV_IOSF_SB_PUNIT));
- intel_update_cdclk(dev_priv);
+ intel_update_cdclk(display);
- vlv_program_pfi_credits(dev_priv);
+ vlv_program_pfi_credits(display);
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
}
-static void chv_set_cdclk(struct drm_i915_private *dev_priv,
+static void chv_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
@@ -746,15 +757,15 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
50)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"timed out waiting for CDclk change\n");
}
vlv_punit_put(dev_priv);
- intel_update_cdclk(dev_priv);
+ intel_update_cdclk(display);
- vlv_program_pfi_credits(dev_priv);
+ vlv_program_pfi_credits(display);
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
}
@@ -786,15 +797,15 @@ static u8 bdw_calc_voltage_level(int cdclk)
}
}
-static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
+static void bdw_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL);
+ u32 lcpll = intel_de_read(display, LCPLL_CTL);
u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
if (lcpll & LCPLL_CD_SOURCE_FCLK)
cdclk_config->cdclk = 800000;
- else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ else if (intel_de_read(display, FUSE_STRAP) & HSW_CDCLK_LIMIT)
cdclk_config->cdclk = 450000;
else if (freq == LCPLL_CLK_FREQ_450)
cdclk_config->cdclk = 450000;
@@ -830,15 +841,16 @@ static u32 bdw_cdclk_freq_sel(int cdclk)
}
}
-static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
+static void bdw_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
int ret;
- if (drm_WARN(&dev_priv->drm,
- (intel_de_read(dev_priv, LCPLL_CTL) &
+ if (drm_WARN(display->drm,
+ (intel_de_read(display, LCPLL_CTL) &
(LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
@@ -848,39 +860,39 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
ret = snb_pcode_write(&dev_priv->uncore, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
if (ret) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"failed to inform pcode about cdclk change\n");
return;
}
- intel_de_rmw(dev_priv, LCPLL_CTL,
+ intel_de_rmw(display, LCPLL_CTL,
0, LCPLL_CD_SOURCE_FCLK);
/*
* According to the spec, it should be enough to poll for this 1 us.
* However, extensive testing shows that this can take longer.
*/
- if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
+ if (wait_for_us(intel_de_read(display, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE, 100))
- drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
+ drm_err(display->drm, "Switching to FCLK failed\n");
- intel_de_rmw(dev_priv, LCPLL_CTL,
+ intel_de_rmw(display, LCPLL_CTL,
LCPLL_CLK_FREQ_MASK, bdw_cdclk_freq_sel(cdclk));
- intel_de_rmw(dev_priv, LCPLL_CTL,
+ intel_de_rmw(display, LCPLL_CTL,
LCPLL_CD_SOURCE_FCLK, 0);
- if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
+ if (wait_for_us((intel_de_read(display, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
- drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n");
+ drm_err(display->drm, "Switching back to LCPLL failed\n");
snb_pcode_write(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_config->voltage_level);
- intel_de_write(dev_priv, CDCLK_FREQ,
+ intel_de_write(display, CDCLK_FREQ,
DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
- intel_update_cdclk(dev_priv);
+ intel_update_cdclk(display);
}
static int skl_calc_cdclk(int min_cdclk, int vco)
@@ -918,7 +930,7 @@ static u8 skl_calc_voltage_level(int cdclk)
return 0;
}
-static void skl_dpll0_update(struct drm_i915_private *dev_priv,
+static void skl_dpll0_update(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
u32 val;
@@ -926,16 +938,16 @@ static void skl_dpll0_update(struct drm_i915_private *dev_priv,
cdclk_config->ref = 24000;
cdclk_config->vco = 0;
- val = intel_de_read(dev_priv, LCPLL1_CTL);
+ val = intel_de_read(display, LCPLL1_CTL);
if ((val & LCPLL_PLL_ENABLE) == 0)
return;
- if (drm_WARN_ON(&dev_priv->drm, (val & LCPLL_PLL_LOCK) == 0))
+ if (drm_WARN_ON(display->drm, (val & LCPLL_PLL_LOCK) == 0))
return;
- val = intel_de_read(dev_priv, DPLL_CTRL1);
+ val = intel_de_read(display, DPLL_CTRL1);
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
(val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
DPLL_CTRL1_SSC(SKL_DPLL0) |
DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
@@ -959,19 +971,19 @@ static void skl_dpll0_update(struct drm_i915_private *dev_priv,
}
}
-static void skl_get_cdclk(struct drm_i915_private *dev_priv,
+static void skl_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
u32 cdctl;
- skl_dpll0_update(dev_priv, cdclk_config);
+ skl_dpll0_update(display, cdclk_config);
cdclk_config->cdclk = cdclk_config->bypass = cdclk_config->ref;
if (cdclk_config->vco == 0)
goto out;
- cdctl = intel_de_read(dev_priv, CDCLK_CTL);
+ cdctl = intel_de_read(display, CDCLK_CTL);
if (cdclk_config->vco == 8640000) {
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
@@ -1026,19 +1038,19 @@ static int skl_cdclk_decimal(int cdclk)
return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
}
-static void skl_set_preferred_cdclk_vco(struct drm_i915_private *i915, int vco)
+static void skl_set_preferred_cdclk_vco(struct intel_display *display, int vco)
{
- bool changed = i915->display.cdclk.skl_preferred_vco_freq != vco;
+ bool changed = display->cdclk.skl_preferred_vco_freq != vco;
- i915->display.cdclk.skl_preferred_vco_freq = vco;
+ display->cdclk.skl_preferred_vco_freq = vco;
if (changed)
- intel_update_max_cdclk(i915);
+ intel_update_max_cdclk(display);
}
-static u32 skl_dpll0_link_rate(struct drm_i915_private *dev_priv, int vco)
+static u32 skl_dpll0_link_rate(struct intel_display *display, int vco)
{
- drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
+ drm_WARN_ON(display->drm, vco != 8100000 && vco != 8640000);
/*
* We always enable DPLL0 with the lowest link rate possible, but still
@@ -1055,47 +1067,47 @@ static u32 skl_dpll0_link_rate(struct drm_i915_private *dev_priv, int vco)
return DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0);
}
-static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
+static void skl_dpll0_enable(struct intel_display *display, int vco)
{
- intel_de_rmw(dev_priv, DPLL_CTRL1,
+ intel_de_rmw(display, DPLL_CTRL1,
DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
DPLL_CTRL1_SSC(SKL_DPLL0) |
DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0),
DPLL_CTRL1_OVERRIDE(SKL_DPLL0) |
- skl_dpll0_link_rate(dev_priv, vco));
- intel_de_posting_read(dev_priv, DPLL_CTRL1);
+ skl_dpll0_link_rate(display, vco));
+ intel_de_posting_read(display, DPLL_CTRL1);
- intel_de_rmw(dev_priv, LCPLL1_CTL,
+ intel_de_rmw(display, LCPLL1_CTL,
0, LCPLL_PLL_ENABLE);
- if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
- drm_err(&dev_priv->drm, "DPLL0 not locked\n");
+ if (intel_de_wait_for_set(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
+ drm_err(display->drm, "DPLL0 not locked\n");
- dev_priv->display.cdclk.hw.vco = vco;
+ display->cdclk.hw.vco = vco;
/* We'll want to keep using the current vco from now on. */
- skl_set_preferred_cdclk_vco(dev_priv, vco);
+ skl_set_preferred_cdclk_vco(display, vco);
}
-static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
+static void skl_dpll0_disable(struct intel_display *display)
{
- intel_de_rmw(dev_priv, LCPLL1_CTL,
+ intel_de_rmw(display, LCPLL1_CTL,
LCPLL_PLL_ENABLE, 0);
- if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
- drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n");
+ if (intel_de_wait_for_clear(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
+ drm_err(display->drm, "Couldn't disable DPLL0\n");
- dev_priv->display.cdclk.hw.vco = 0;
+ display->cdclk.hw.vco = 0;
}
-static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv,
+static u32 skl_cdclk_freq_sel(struct intel_display *display,
int cdclk, int vco)
{
switch (cdclk) {
default:
- drm_WARN_ON(&dev_priv->drm,
- cdclk != dev_priv->display.cdclk.hw.bypass);
- drm_WARN_ON(&dev_priv->drm, vco != 0);
+ drm_WARN_ON(display->drm,
+ cdclk != display->cdclk.hw.bypass);
+ drm_WARN_ON(display->drm, vco != 0);
fallthrough;
case 308571:
case 337500:
@@ -1111,10 +1123,11 @@ static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv,
}
}
-static void skl_set_cdclk(struct drm_i915_private *dev_priv,
+static void skl_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
u32 freq_select, cdclk_ctl;
@@ -1128,7 +1141,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
* use the corresponding VCO freq as that always leads to using the
* minimum 308MHz CDCLK.
*/
- drm_WARN_ON_ONCE(&dev_priv->drm,
+ drm_WARN_ON_ONCE(display->drm,
IS_SKYLAKE(dev_priv) && vco == 8640000);
ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
@@ -1136,54 +1149,54 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
if (ret) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to inform PCU about cdclk change (%d)\n", ret);
return;
}
- freq_select = skl_cdclk_freq_sel(dev_priv, cdclk, vco);
+ freq_select = skl_cdclk_freq_sel(display, cdclk, vco);
- if (dev_priv->display.cdclk.hw.vco != 0 &&
- dev_priv->display.cdclk.hw.vco != vco)
- skl_dpll0_disable(dev_priv);
+ if (display->cdclk.hw.vco != 0 &&
+ display->cdclk.hw.vco != vco)
+ skl_dpll0_disable(display);
- cdclk_ctl = intel_de_read(dev_priv, CDCLK_CTL);
+ cdclk_ctl = intel_de_read(display, CDCLK_CTL);
- if (dev_priv->display.cdclk.hw.vco != vco) {
+ if (display->cdclk.hw.vco != vco) {
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
- intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
+ intel_de_write(display, CDCLK_CTL, cdclk_ctl);
}
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
- intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
- intel_de_posting_read(dev_priv, CDCLK_CTL);
+ intel_de_write(display, CDCLK_CTL, cdclk_ctl);
+ intel_de_posting_read(display, CDCLK_CTL);
- if (dev_priv->display.cdclk.hw.vco != vco)
- skl_dpll0_enable(dev_priv, vco);
+ if (display->cdclk.hw.vco != vco)
+ skl_dpll0_enable(display, vco);
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
- intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
+ intel_de_write(display, CDCLK_CTL, cdclk_ctl);
cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
- intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
+ intel_de_write(display, CDCLK_CTL, cdclk_ctl);
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
- intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
- intel_de_posting_read(dev_priv, CDCLK_CTL);
+ intel_de_write(display, CDCLK_CTL, cdclk_ctl);
+ intel_de_posting_read(display, CDCLK_CTL);
/* inform PCU of the change */
snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
cdclk_config->voltage_level);
- intel_update_cdclk(dev_priv);
+ intel_update_cdclk(display);
}
-static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
+static void skl_sanitize_cdclk(struct intel_display *display)
{
u32 cdctl, expected;
@@ -1192,15 +1205,15 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
* There is SWF18 scratchpad register defined which is set by the
* pre-os which can be used by the OS drivers to check the status
*/
- if ((intel_de_read(dev_priv, SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
+ if ((intel_de_read(display, SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
goto sanitize;
- intel_update_cdclk(dev_priv);
- intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
+ intel_update_cdclk(display);
+ intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
/* Is PLL enabled and locked ? */
- if (dev_priv->display.cdclk.hw.vco == 0 ||
- dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass)
+ if (display->cdclk.hw.vco == 0 ||
+ display->cdclk.hw.cdclk == display->cdclk.hw.bypass)
goto sanitize;
/* DPLL okay; verify the cdclock
@@ -1209,60 +1222,60 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
* decimal part is programmed wrong from BIOS where pre-os does not
* enable display. Verify the same as well.
*/
- cdctl = intel_de_read(dev_priv, CDCLK_CTL);
+ cdctl = intel_de_read(display, CDCLK_CTL);
expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
- skl_cdclk_decimal(dev_priv->display.cdclk.hw.cdclk);
+ skl_cdclk_decimal(display->cdclk.hw.cdclk);
if (cdctl == expected)
/* All well; nothing to sanitize */
return;
sanitize:
- drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
+ drm_dbg_kms(display->drm, "Sanitizing cdclk programmed by pre-os\n");
/* force cdclk programming */
- dev_priv->display.cdclk.hw.cdclk = 0;
+ display->cdclk.hw.cdclk = 0;
/* force full PLL disable + enable */
- dev_priv->display.cdclk.hw.vco = ~0;
+ display->cdclk.hw.vco = ~0;
}
-static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
+static void skl_cdclk_init_hw(struct intel_display *display)
{
struct intel_cdclk_config cdclk_config;
- skl_sanitize_cdclk(dev_priv);
+ skl_sanitize_cdclk(display);
- if (dev_priv->display.cdclk.hw.cdclk != 0 &&
- dev_priv->display.cdclk.hw.vco != 0) {
+ if (display->cdclk.hw.cdclk != 0 &&
+ display->cdclk.hw.vco != 0) {
/*
* Use the current vco as our initial
* guess as to what the preferred vco is.
*/
- if (dev_priv->display.cdclk.skl_preferred_vco_freq == 0)
- skl_set_preferred_cdclk_vco(dev_priv,
- dev_priv->display.cdclk.hw.vco);
+ if (display->cdclk.skl_preferred_vco_freq == 0)
+ skl_set_preferred_cdclk_vco(display,
+ display->cdclk.hw.vco);
return;
}
- cdclk_config = dev_priv->display.cdclk.hw;
+ cdclk_config = display->cdclk.hw;
- cdclk_config.vco = dev_priv->display.cdclk.skl_preferred_vco_freq;
+ cdclk_config.vco = display->cdclk.skl_preferred_vco_freq;
if (cdclk_config.vco == 0)
cdclk_config.vco = 8100000;
cdclk_config.cdclk = skl_calc_cdclk(0, cdclk_config.vco);
cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk);
- skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
+ skl_set_cdclk(display, &cdclk_config, INVALID_PIPE);
}
-static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
+static void skl_cdclk_uninit_hw(struct intel_display *display)
{
- struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw;
+ struct intel_cdclk_config cdclk_config = display->cdclk.hw;
cdclk_config.cdclk = cdclk_config.bypass;
cdclk_config.vco = 0;
cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk);
- skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
+ skl_set_cdclk(display, &cdclk_config, INVALID_PIPE);
}
struct intel_cdclk_vals {
@@ -1456,6 +1469,39 @@ static const struct intel_cdclk_vals xe2hpd_cdclk_table[] = {
{}
};
+static const struct intel_cdclk_vals xe3lpd_cdclk_table[] = {
+ { .refclk = 38400, .cdclk = 153600, .ratio = 16, .waveform = 0xaaaa },
+ { .refclk = 38400, .cdclk = 172800, .ratio = 16, .waveform = 0xad5a },
+ { .refclk = 38400, .cdclk = 192000, .ratio = 16, .waveform = 0xb6b6 },
+ { .refclk = 38400, .cdclk = 211200, .ratio = 16, .waveform = 0xdbb6 },
+ { .refclk = 38400, .cdclk = 230400, .ratio = 16, .waveform = 0xeeee },
+ { .refclk = 38400, .cdclk = 249600, .ratio = 16, .waveform = 0xf7de },
+ { .refclk = 38400, .cdclk = 268800, .ratio = 16, .waveform = 0xfefe },
+ { .refclk = 38400, .cdclk = 288000, .ratio = 16, .waveform = 0xfffe },
+ { .refclk = 38400, .cdclk = 307200, .ratio = 16, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 326400, .ratio = 17, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 345600, .ratio = 18, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 364800, .ratio = 19, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 384000, .ratio = 20, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 403200, .ratio = 21, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 422400, .ratio = 22, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 441600, .ratio = 23, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 460800, .ratio = 24, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 480000, .ratio = 25, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 499200, .ratio = 26, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 518400, .ratio = 27, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 537600, .ratio = 28, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 556800, .ratio = 29, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 576000, .ratio = 30, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 595200, .ratio = 31, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 614400, .ratio = 32, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 633600, .ratio = 33, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 672000, .ratio = 35, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 691200, .ratio = 36, .waveform = 0xffff },
+ {}
+};
+
static const int cdclk_squash_len = 16;
static int cdclk_squash_divider(u16 waveform)
@@ -1470,37 +1516,37 @@ static int cdclk_divider(int cdclk, int vco, u16 waveform)
cdclk * cdclk_squash_len);
}
-static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
+static int bxt_calc_cdclk(struct intel_display *display, int min_cdclk)
{
- const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
+ const struct intel_cdclk_vals *table = display->cdclk.table;
int i;
for (i = 0; table[i].refclk; i++)
- if (table[i].refclk == dev_priv->display.cdclk.hw.ref &&
+ if (table[i].refclk == display->cdclk.hw.ref &&
table[i].cdclk >= min_cdclk)
return table[i].cdclk;
- drm_WARN(&dev_priv->drm, 1,
+ drm_WARN(display->drm, 1,
"Cannot satisfy minimum cdclk %d with refclk %u\n",
- min_cdclk, dev_priv->display.cdclk.hw.ref);
+ min_cdclk, display->cdclk.hw.ref);
return 0;
}
-static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+static int bxt_calc_cdclk_pll_vco(struct intel_display *display, int cdclk)
{
- const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
+ const struct intel_cdclk_vals *table = display->cdclk.table;
int i;
- if (cdclk == dev_priv->display.cdclk.hw.bypass)
+ if (cdclk == display->cdclk.hw.bypass)
return 0;
for (i = 0; table[i].refclk; i++)
- if (table[i].refclk == dev_priv->display.cdclk.hw.ref &&
+ if (table[i].refclk == display->cdclk.hw.ref &&
table[i].cdclk == cdclk)
- return dev_priv->display.cdclk.hw.ref * table[i].ratio;
+ return display->cdclk.hw.ref * table[i].ratio;
- drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n",
- cdclk, dev_priv->display.cdclk.hw.ref);
+ drm_WARN(display->drm, 1, "cdclk %d not valid for refclk %u\n",
+ cdclk, display->cdclk.hw.ref);
return 0;
}
@@ -1582,10 +1628,20 @@ static u8 rplu_calc_voltage_level(int cdclk)
rplu_voltage_level_max_cdclk);
}
-static void icl_readout_refclk(struct drm_i915_private *dev_priv,
+static u8 xe3lpd_calc_voltage_level(int cdclk)
+{
+ /*
+ * Starting with xe3lpd power controller does not need the voltage
+ * index when doing the modeset update. This function is best left
+ * defined but returning 0 to the mask.
+ */
+ return 0;
+}
+
+static void icl_readout_refclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
- u32 dssm = intel_de_read(dev_priv, SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK;
+ u32 dssm = intel_de_read(display, SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK;
switch (dssm) {
default:
@@ -1603,19 +1659,20 @@ static void icl_readout_refclk(struct drm_i915_private *dev_priv,
}
}
-static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
+static void bxt_de_pll_readout(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val, ratio;
if (IS_DG2(dev_priv))
cdclk_config->ref = 38400;
- else if (DISPLAY_VER(dev_priv) >= 11)
- icl_readout_refclk(dev_priv, cdclk_config);
+ else if (DISPLAY_VER(display) >= 11)
+ icl_readout_refclk(display, cdclk_config);
else
cdclk_config->ref = 19200;
- val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE);
+ val = intel_de_read(display, BXT_DE_PLL_ENABLE);
if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
(val & BXT_DE_PLL_LOCK) == 0) {
/*
@@ -1630,26 +1687,26 @@ static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
* DISPLAY_VER >= 11 have the ratio directly in the PLL enable register,
* gen9lp had it in a separate PLL control register.
*/
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
ratio = val & ICL_CDCLK_PLL_RATIO_MASK;
else
- ratio = intel_de_read(dev_priv, BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
+ ratio = intel_de_read(display, BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
cdclk_config->vco = ratio * cdclk_config->ref;
}
-static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
+static void bxt_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config)
{
u32 squash_ctl = 0;
u32 divider;
int div;
- bxt_de_pll_readout(dev_priv, cdclk_config);
+ bxt_de_pll_readout(display, cdclk_config);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
cdclk_config->bypass = cdclk_config->ref / 2;
- else if (DISPLAY_VER(dev_priv) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
cdclk_config->bypass = 50000;
else
cdclk_config->bypass = cdclk_config->ref;
@@ -1659,7 +1716,7 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
goto out;
}
- divider = intel_de_read(dev_priv, CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
+ divider = intel_de_read(display, CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
switch (divider) {
case BXT_CDCLK_CD2X_DIV_SEL_1:
@@ -1679,8 +1736,8 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
return;
}
- if (HAS_CDCLK_SQUASH(dev_priv))
- squash_ctl = intel_de_read(dev_priv, CDCLK_SQUASH_CTL);
+ if (HAS_CDCLK_SQUASH(display))
+ squash_ctl = intel_de_read(display, CDCLK_SQUASH_CTL);
if (squash_ctl & CDCLK_SQUASH_ENABLE) {
u16 waveform;
@@ -1696,107 +1753,107 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
}
out:
- if (DISPLAY_VER(dev_priv) >= 20)
- cdclk_config->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN;
+ if (DISPLAY_VER(display) >= 20)
+ cdclk_config->joined_mbus = intel_de_read(display, MBUS_CTL) & MBUS_JOIN;
/*
* Can't read this out :( Let's assume it's
* at least what the CDCLK frequency requires.
*/
cdclk_config->voltage_level =
- intel_cdclk_calc_voltage_level(dev_priv, cdclk_config->cdclk);
+ intel_cdclk_calc_voltage_level(display, cdclk_config->cdclk);
}
-static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
+static void bxt_de_pll_disable(struct intel_display *display)
{
- intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, 0);
+ intel_de_write(display, BXT_DE_PLL_ENABLE, 0);
/* Timeout 200us */
- if (intel_de_wait_for_clear(dev_priv,
+ if (intel_de_wait_for_clear(display,
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
- drm_err(&dev_priv->drm, "timeout waiting for DE PLL unlock\n");
+ drm_err(display->drm, "timeout waiting for DE PLL unlock\n");
- dev_priv->display.cdclk.hw.vco = 0;
+ display->cdclk.hw.vco = 0;
}
-static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
+static void bxt_de_pll_enable(struct intel_display *display, int vco)
{
- int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref);
+ int ratio = DIV_ROUND_CLOSEST(vco, display->cdclk.hw.ref);
- intel_de_rmw(dev_priv, BXT_DE_PLL_CTL,
+ intel_de_rmw(display, BXT_DE_PLL_CTL,
BXT_DE_PLL_RATIO_MASK, BXT_DE_PLL_RATIO(ratio));
- intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
+ intel_de_write(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
/* Timeout 200us */
- if (intel_de_wait_for_set(dev_priv,
+ if (intel_de_wait_for_set(display,
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
- drm_err(&dev_priv->drm, "timeout waiting for DE PLL lock\n");
+ drm_err(display->drm, "timeout waiting for DE PLL lock\n");
- dev_priv->display.cdclk.hw.vco = vco;
+ display->cdclk.hw.vco = vco;
}
-static void icl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
+static void icl_cdclk_pll_disable(struct intel_display *display)
{
- intel_de_rmw(dev_priv, BXT_DE_PLL_ENABLE,
+ intel_de_rmw(display, BXT_DE_PLL_ENABLE,
BXT_DE_PLL_PLL_ENABLE, 0);
/* Timeout 200us */
- if (intel_de_wait_for_clear(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
- drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL unlock\n");
+ if (intel_de_wait_for_clear(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ drm_err(display->drm, "timeout waiting for CDCLK PLL unlock\n");
- dev_priv->display.cdclk.hw.vco = 0;
+ display->cdclk.hw.vco = 0;
}
-static void icl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
+static void icl_cdclk_pll_enable(struct intel_display *display, int vco)
{
- int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref);
+ int ratio = DIV_ROUND_CLOSEST(vco, display->cdclk.hw.ref);
u32 val;
val = ICL_CDCLK_PLL_RATIO(ratio);
- intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
+ intel_de_write(display, BXT_DE_PLL_ENABLE, val);
val |= BXT_DE_PLL_PLL_ENABLE;
- intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
+ intel_de_write(display, BXT_DE_PLL_ENABLE, val);
/* Timeout 200us */
- if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
- drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL lock\n");
+ if (intel_de_wait_for_set(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ drm_err(display->drm, "timeout waiting for CDCLK PLL lock\n");
- dev_priv->display.cdclk.hw.vco = vco;
+ display->cdclk.hw.vco = vco;
}
-static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco)
+static void adlp_cdclk_pll_crawl(struct intel_display *display, int vco)
{
- int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref);
+ int ratio = DIV_ROUND_CLOSEST(vco, display->cdclk.hw.ref);
u32 val;
/* Write PLL ratio without disabling */
val = ICL_CDCLK_PLL_RATIO(ratio) | BXT_DE_PLL_PLL_ENABLE;
- intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
+ intel_de_write(display, BXT_DE_PLL_ENABLE, val);
/* Submit freq change request */
val |= BXT_DE_PLL_FREQ_REQ;
- intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
+ intel_de_write(display, BXT_DE_PLL_ENABLE, val);
/* Timeout 200us */
- if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE,
+ if (intel_de_wait_for_set(display, BXT_DE_PLL_ENABLE,
BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1))
- drm_err(&dev_priv->drm, "timeout waiting for FREQ change request ack\n");
+ drm_err(display->drm, "timeout waiting for FREQ change request ack\n");
val &= ~BXT_DE_PLL_FREQ_REQ;
- intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
+ intel_de_write(display, BXT_DE_PLL_ENABLE, val);
- dev_priv->display.cdclk.hw.vco = vco;
+ display->cdclk.hw.vco = vco;
}
-static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
+static u32 bxt_cdclk_cd2x_pipe(struct intel_display *display, enum pipe pipe)
{
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
if (pipe == INVALID_PIPE)
return TGL_CDCLK_CD2X_PIPE_NONE;
else
return TGL_CDCLK_CD2X_PIPE(pipe);
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
if (pipe == INVALID_PIPE)
return ICL_CDCLK_CD2X_PIPE_NONE;
else
@@ -1809,15 +1866,15 @@ static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe
}
}
-static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv,
+static u32 bxt_cdclk_cd2x_div_sel(struct intel_display *display,
int cdclk, int vco, u16 waveform)
{
/* cdclk = vco / 2 / div{1,1.5,2,4} */
switch (cdclk_divider(cdclk, vco, waveform)) {
default:
- drm_WARN_ON(&dev_priv->drm,
- cdclk != dev_priv->display.cdclk.hw.bypass);
- drm_WARN_ON(&dev_priv->drm, vco != 0);
+ drm_WARN_ON(display->drm,
+ cdclk != display->cdclk.hw.bypass);
+ drm_WARN_ON(display->drm, vco != 0);
fallthrough;
case 2:
return BXT_CDCLK_CD2X_DIV_SEL_1;
@@ -1830,47 +1887,47 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv,
}
}
-static u16 cdclk_squash_waveform(struct drm_i915_private *dev_priv,
+static u16 cdclk_squash_waveform(struct intel_display *display,
int cdclk)
{
- const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
+ const struct intel_cdclk_vals *table = display->cdclk.table;
int i;
- if (cdclk == dev_priv->display.cdclk.hw.bypass)
+ if (cdclk == display->cdclk.hw.bypass)
return 0;
for (i = 0; table[i].refclk; i++)
- if (table[i].refclk == dev_priv->display.cdclk.hw.ref &&
+ if (table[i].refclk == display->cdclk.hw.ref &&
table[i].cdclk == cdclk)
return table[i].waveform;
- drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n",
- cdclk, dev_priv->display.cdclk.hw.ref);
+ drm_WARN(display->drm, 1, "cdclk %d not valid for refclk %u\n",
+ cdclk, display->cdclk.hw.ref);
return 0xffff;
}
-static void icl_cdclk_pll_update(struct drm_i915_private *i915, int vco)
+static void icl_cdclk_pll_update(struct intel_display *display, int vco)
{
- if (i915->display.cdclk.hw.vco != 0 &&
- i915->display.cdclk.hw.vco != vco)
- icl_cdclk_pll_disable(i915);
+ if (display->cdclk.hw.vco != 0 &&
+ display->cdclk.hw.vco != vco)
+ icl_cdclk_pll_disable(display);
- if (i915->display.cdclk.hw.vco != vco)
- icl_cdclk_pll_enable(i915, vco);
+ if (display->cdclk.hw.vco != vco)
+ icl_cdclk_pll_enable(display, vco);
}
-static void bxt_cdclk_pll_update(struct drm_i915_private *i915, int vco)
+static void bxt_cdclk_pll_update(struct intel_display *display, int vco)
{
- if (i915->display.cdclk.hw.vco != 0 &&
- i915->display.cdclk.hw.vco != vco)
- bxt_de_pll_disable(i915);
+ if (display->cdclk.hw.vco != 0 &&
+ display->cdclk.hw.vco != vco)
+ bxt_de_pll_disable(display);
- if (i915->display.cdclk.hw.vco != vco)
- bxt_de_pll_enable(i915, vco);
+ if (display->cdclk.hw.vco != vco)
+ bxt_de_pll_enable(display, vco);
}
-static void dg2_cdclk_squash_program(struct drm_i915_private *i915,
+static void dg2_cdclk_squash_program(struct intel_display *display,
u16 waveform)
{
u32 squash_ctl = 0;
@@ -1879,7 +1936,7 @@ static void dg2_cdclk_squash_program(struct drm_i915_private *i915,
squash_ctl = CDCLK_SQUASH_ENABLE |
CDCLK_SQUASH_WINDOW_SIZE(0xf) | waveform;
- intel_de_write(i915, CDCLK_SQUASH_CTL, squash_ctl);
+ intel_de_write(display, CDCLK_SQUASH_CTL, squash_ctl);
}
static bool cdclk_pll_is_unknown(unsigned int vco)
@@ -1892,38 +1949,40 @@ static bool cdclk_pll_is_unknown(unsigned int vco)
return vco == ~0;
}
-static bool mdclk_source_is_cdclk_pll(struct drm_i915_private *i915)
+static bool mdclk_source_is_cdclk_pll(struct intel_display *display)
{
- return DISPLAY_VER(i915) >= 20;
+ return DISPLAY_VER(display) >= 20;
}
-static u32 xe2lpd_mdclk_source_sel(struct drm_i915_private *i915)
+static u32 xe2lpd_mdclk_source_sel(struct intel_display *display)
{
- if (mdclk_source_is_cdclk_pll(i915))
+ if (mdclk_source_is_cdclk_pll(display))
return MDCLK_SOURCE_SEL_CDCLK_PLL;
return MDCLK_SOURCE_SEL_CD2XCLK;
}
-int intel_mdclk_cdclk_ratio(struct drm_i915_private *i915,
+int intel_mdclk_cdclk_ratio(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config)
{
- if (mdclk_source_is_cdclk_pll(i915))
+ if (mdclk_source_is_cdclk_pll(display))
return DIV_ROUND_UP(cdclk_config->vco, cdclk_config->cdclk);
/* Otherwise, source for MDCLK is CD2XCLK. */
return 2;
}
-static void xe2lpd_mdclk_cdclk_ratio_program(struct drm_i915_private *i915,
+static void xe2lpd_mdclk_cdclk_ratio_program(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
intel_dbuf_mdclk_cdclk_ratio_update(i915,
- intel_mdclk_cdclk_ratio(i915, cdclk_config),
+ intel_mdclk_cdclk_ratio(display, cdclk_config),
cdclk_config->joined_mbus);
}
-static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i915,
+static bool cdclk_compute_crawl_and_squash_midpoint(struct intel_display *display,
const struct intel_cdclk_config *old_cdclk_config,
const struct intel_cdclk_config *new_cdclk_config,
struct intel_cdclk_config *mid_cdclk_config)
@@ -1936,11 +1995,11 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91
return false;
/* Return if both Squash and Crawl are not present */
- if (!HAS_CDCLK_CRAWL(i915) || !HAS_CDCLK_SQUASH(i915))
+ if (!HAS_CDCLK_CRAWL(display) || !HAS_CDCLK_SQUASH(display))
return false;
- old_waveform = cdclk_squash_waveform(i915, old_cdclk_config->cdclk);
- new_waveform = cdclk_squash_waveform(i915, new_cdclk_config->cdclk);
+ old_waveform = cdclk_squash_waveform(display, old_cdclk_config->cdclk);
+ new_waveform = cdclk_squash_waveform(display, new_cdclk_config->cdclk);
/* Return if Squash only or Crawl only is the desired action */
if (old_cdclk_config->vco == 0 || new_cdclk_config->vco == 0 ||
@@ -1957,7 +2016,7 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91
* Should not happen currently. We might need more midpoint
* transitions if we need to also change the cd2x divider.
*/
- if (drm_WARN_ON(&i915->drm, old_div != new_div))
+ if (drm_WARN_ON(display->drm, old_div != new_div))
return false;
*mid_cdclk_config = *new_cdclk_config;
@@ -1986,37 +2045,40 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91
/* make sure the mid clock came out sane */
- drm_WARN_ON(&i915->drm, mid_cdclk_config->cdclk <
+ drm_WARN_ON(display->drm, mid_cdclk_config->cdclk <
min(old_cdclk_config->cdclk, new_cdclk_config->cdclk));
- drm_WARN_ON(&i915->drm, mid_cdclk_config->cdclk >
- i915->display.cdclk.max_cdclk_freq);
- drm_WARN_ON(&i915->drm, cdclk_squash_waveform(i915, mid_cdclk_config->cdclk) !=
+ drm_WARN_ON(display->drm, mid_cdclk_config->cdclk >
+ display->cdclk.max_cdclk_freq);
+ drm_WARN_ON(display->drm, cdclk_squash_waveform(display, mid_cdclk_config->cdclk) !=
mid_waveform);
return true;
}
-static bool pll_enable_wa_needed(struct drm_i915_private *dev_priv)
+static bool pll_enable_wa_needed(struct intel_display *display)
{
- return (DISPLAY_VER_FULL(dev_priv) == IP_VER(20, 0) ||
- DISPLAY_VER_FULL(dev_priv) == IP_VER(14, 0) ||
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ return (DISPLAY_VERx100(display) == 2000 ||
+ DISPLAY_VERx100(display) == 1400 ||
IS_DG2(dev_priv)) &&
- dev_priv->display.cdclk.hw.vco > 0;
+ display->cdclk.hw.vco > 0;
}
-static u32 bxt_cdclk_ctl(struct drm_i915_private *i915,
+static u32 bxt_cdclk_ctl(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
u16 waveform;
u32 val;
- waveform = cdclk_squash_waveform(i915, cdclk);
+ waveform = cdclk_squash_waveform(display, cdclk);
- val = bxt_cdclk_cd2x_div_sel(i915, cdclk, vco, waveform) |
- bxt_cdclk_cd2x_pipe(i915, pipe);
+ val = bxt_cdclk_cd2x_div_sel(display, cdclk, vco, waveform) |
+ bxt_cdclk_cd2x_pipe(display, pipe);
/*
* Disable SSA Precharge when CD clock frequency < 500 MHz,
@@ -2026,50 +2088,52 @@ static u32 bxt_cdclk_ctl(struct drm_i915_private *i915,
cdclk >= 500000)
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- if (DISPLAY_VER(i915) >= 20)
- val |= xe2lpd_mdclk_source_sel(i915);
+ if (DISPLAY_VER(display) >= 20)
+ val |= xe2lpd_mdclk_source_sel(display);
else
val |= skl_cdclk_decimal(cdclk);
return val;
}
-static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
+static void _bxt_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
- if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 &&
- !cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) {
- if (dev_priv->display.cdclk.hw.vco != vco)
- adlp_cdclk_pll_crawl(dev_priv, vco);
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ if (HAS_CDCLK_CRAWL(display) && display->cdclk.hw.vco > 0 && vco > 0 &&
+ !cdclk_pll_is_unknown(display->cdclk.hw.vco)) {
+ if (display->cdclk.hw.vco != vco)
+ adlp_cdclk_pll_crawl(display, vco);
+ } else if (DISPLAY_VER(display) >= 11) {
/* wa_15010685871: dg2, mtl */
- if (pll_enable_wa_needed(dev_priv))
- dg2_cdclk_squash_program(dev_priv, 0);
+ if (pll_enable_wa_needed(display))
+ dg2_cdclk_squash_program(display, 0);
- icl_cdclk_pll_update(dev_priv, vco);
- } else
- bxt_cdclk_pll_update(dev_priv, vco);
+ icl_cdclk_pll_update(display, vco);
+ } else {
+ bxt_cdclk_pll_update(display, vco);
+ }
- if (HAS_CDCLK_SQUASH(dev_priv)) {
- u16 waveform = cdclk_squash_waveform(dev_priv, cdclk);
+ if (HAS_CDCLK_SQUASH(display)) {
+ u16 waveform = cdclk_squash_waveform(display, cdclk);
- dg2_cdclk_squash_program(dev_priv, waveform);
+ dg2_cdclk_squash_program(display, waveform);
}
- intel_de_write(dev_priv, CDCLK_CTL, bxt_cdclk_ctl(dev_priv, cdclk_config, pipe));
+ intel_de_write(display, CDCLK_CTL, bxt_cdclk_ctl(display, cdclk_config, pipe));
if (pipe != INVALID_PIPE)
- intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
+ intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(display, pipe));
}
-static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
+static void bxt_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_cdclk_config mid_cdclk_config;
int cdclk = cdclk_config->cdclk;
int ret = 0;
@@ -2080,9 +2144,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
* mailbox communication, skip
* this step.
*/
- if (DISPLAY_VER(dev_priv) >= 14 || IS_DG2(dev_priv))
+ if (DISPLAY_VER(display) >= 14 || IS_DG2(dev_priv))
/* NOOP */;
- else if (DISPLAY_VER(dev_priv) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
@@ -2097,35 +2161,35 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
0x80000000, 150, 2);
if (ret) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to inform PCU about cdclk change (err %d, freq %d)\n",
ret, cdclk);
return;
}
- if (DISPLAY_VER(dev_priv) >= 20 && cdclk < dev_priv->display.cdclk.hw.cdclk)
- xe2lpd_mdclk_cdclk_ratio_program(dev_priv, cdclk_config);
+ if (DISPLAY_VER(display) >= 20 && cdclk < display->cdclk.hw.cdclk)
+ xe2lpd_mdclk_cdclk_ratio_program(display, cdclk_config);
- if (cdclk_compute_crawl_and_squash_midpoint(dev_priv, &dev_priv->display.cdclk.hw,
+ if (cdclk_compute_crawl_and_squash_midpoint(display, &display->cdclk.hw,
cdclk_config, &mid_cdclk_config)) {
- _bxt_set_cdclk(dev_priv, &mid_cdclk_config, pipe);
- _bxt_set_cdclk(dev_priv, cdclk_config, pipe);
+ _bxt_set_cdclk(display, &mid_cdclk_config, pipe);
+ _bxt_set_cdclk(display, cdclk_config, pipe);
} else {
- _bxt_set_cdclk(dev_priv, cdclk_config, pipe);
+ _bxt_set_cdclk(display, cdclk_config, pipe);
}
- if (DISPLAY_VER(dev_priv) >= 20 && cdclk > dev_priv->display.cdclk.hw.cdclk)
- xe2lpd_mdclk_cdclk_ratio_program(dev_priv, cdclk_config);
+ if (DISPLAY_VER(display) >= 20 && cdclk > display->cdclk.hw.cdclk)
+ xe2lpd_mdclk_cdclk_ratio_program(display, cdclk_config);
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
/*
* NOOP - No Pcode communication needed for
* Display versions 14 and beyond
*/;
- else if (DISPLAY_VER(dev_priv) >= 11 && !IS_DG2(dev_priv))
+ else if (DISPLAY_VER(display) >= 11 && !IS_DG2(dev_priv))
ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
cdclk_config->voltage_level);
- if (DISPLAY_VER(dev_priv) < 11) {
+ if (DISPLAY_VER(display) < 11) {
/*
* The timeout isn't specified, the 2ms used here is based on
* experiment.
@@ -2138,42 +2202,42 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
150, 2);
}
if (ret) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"PCode CDCLK freq set failed, (err %d, freq %d)\n",
ret, cdclk);
return;
}
- intel_update_cdclk(dev_priv);
+ intel_update_cdclk(display);
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
/*
* Can't read out the voltage level :(
* Let's just assume everything is as expected.
*/
- dev_priv->display.cdclk.hw.voltage_level = cdclk_config->voltage_level;
+ display->cdclk.hw.voltage_level = cdclk_config->voltage_level;
}
-static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_sanitize_cdclk(struct intel_display *display)
{
u32 cdctl, expected;
int cdclk, vco;
- intel_update_cdclk(dev_priv);
- intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
+ intel_update_cdclk(display);
+ intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
- if (dev_priv->display.cdclk.hw.vco == 0 ||
- dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass)
+ if (display->cdclk.hw.vco == 0 ||
+ display->cdclk.hw.cdclk == display->cdclk.hw.bypass)
goto sanitize;
/* Make sure this is a legal cdclk value for the platform */
- cdclk = bxt_calc_cdclk(dev_priv, dev_priv->display.cdclk.hw.cdclk);
- if (cdclk != dev_priv->display.cdclk.hw.cdclk)
+ cdclk = bxt_calc_cdclk(display, display->cdclk.hw.cdclk);
+ if (cdclk != display->cdclk.hw.cdclk)
goto sanitize;
/* Make sure the VCO is correct for the cdclk */
- vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
- if (vco != dev_priv->display.cdclk.hw.vco)
+ vco = bxt_calc_cdclk_pll_vco(display, cdclk);
+ if (vco != display->cdclk.hw.vco)
goto sanitize;
/*
@@ -2181,129 +2245,133 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
* set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
* so sanitize this register.
*/
- cdctl = intel_de_read(dev_priv, CDCLK_CTL);
- expected = bxt_cdclk_ctl(dev_priv, &dev_priv->display.cdclk.hw, INVALID_PIPE);
+ cdctl = intel_de_read(display, CDCLK_CTL);
+ expected = bxt_cdclk_ctl(display, &display->cdclk.hw, INVALID_PIPE);
/*
* Let's ignore the pipe field, since BIOS could have configured the
* dividers both synching to an active pipe, or asynchronously
* (PIPE_NONE).
*/
- cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
- expected &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
+ cdctl &= ~bxt_cdclk_cd2x_pipe(display, INVALID_PIPE);
+ expected &= ~bxt_cdclk_cd2x_pipe(display, INVALID_PIPE);
if (cdctl == expected)
/* All well; nothing to sanitize */
return;
sanitize:
- drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
+ drm_dbg_kms(display->drm, "Sanitizing cdclk programmed by pre-os\n");
/* force cdclk programming */
- dev_priv->display.cdclk.hw.cdclk = 0;
+ display->cdclk.hw.cdclk = 0;
/* force full PLL disable + enable */
- dev_priv->display.cdclk.hw.vco = ~0;
+ display->cdclk.hw.vco = ~0;
}
-static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
+static void bxt_cdclk_init_hw(struct intel_display *display)
{
struct intel_cdclk_config cdclk_config;
- bxt_sanitize_cdclk(dev_priv);
+ bxt_sanitize_cdclk(display);
- if (dev_priv->display.cdclk.hw.cdclk != 0 &&
- dev_priv->display.cdclk.hw.vco != 0)
+ if (display->cdclk.hw.cdclk != 0 &&
+ display->cdclk.hw.vco != 0)
return;
- cdclk_config = dev_priv->display.cdclk.hw;
+ cdclk_config = display->cdclk.hw;
/*
* FIXME:
* - The initial CDCLK needs to be read from VBT.
* Need to make this change after VBT has changes for BXT.
*/
- cdclk_config.cdclk = bxt_calc_cdclk(dev_priv, 0);
- cdclk_config.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_config.cdclk);
+ cdclk_config.cdclk = bxt_calc_cdclk(display, 0);
+ cdclk_config.vco = bxt_calc_cdclk_pll_vco(display, cdclk_config.cdclk);
cdclk_config.voltage_level =
- intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk);
+ intel_cdclk_calc_voltage_level(display, cdclk_config.cdclk);
- bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
+ bxt_set_cdclk(display, &cdclk_config, INVALID_PIPE);
}
-static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
+static void bxt_cdclk_uninit_hw(struct intel_display *display)
{
- struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw;
+ struct intel_cdclk_config cdclk_config = display->cdclk.hw;
cdclk_config.cdclk = cdclk_config.bypass;
cdclk_config.vco = 0;
cdclk_config.voltage_level =
- intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk);
+ intel_cdclk_calc_voltage_level(display, cdclk_config.cdclk);
- bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
+ bxt_set_cdclk(display, &cdclk_config, INVALID_PIPE);
}
/**
* intel_cdclk_init_hw - Initialize CDCLK hardware
- * @i915: i915 device
+ * @display: display instance
*
- * Initialize CDCLK. This consists mainly of initializing dev_priv->display.cdclk.hw and
+ * Initialize CDCLK. This consists mainly of initializing display->cdclk.hw and
* sanitizing the state of the hardware if needed. This is generally done only
* during the display core initialization sequence, after which the DMC will
* take care of turning CDCLK off/on as needed.
*/
-void intel_cdclk_init_hw(struct drm_i915_private *i915)
+void intel_cdclk_init_hw(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915))
- bxt_cdclk_init_hw(i915);
- else if (DISPLAY_VER(i915) == 9)
- skl_cdclk_init_hw(i915);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 10 || IS_BROXTON(i915))
+ bxt_cdclk_init_hw(display);
+ else if (DISPLAY_VER(display) == 9)
+ skl_cdclk_init_hw(display);
}
/**
* intel_cdclk_uninit_hw - Uninitialize CDCLK hardware
- * @i915: i915 device
+ * @display: display instance
*
* Uninitialize CDCLK. This is done only during the display core
* uninitialization sequence.
*/
-void intel_cdclk_uninit_hw(struct drm_i915_private *i915)
+void intel_cdclk_uninit_hw(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915))
- bxt_cdclk_uninit_hw(i915);
- else if (DISPLAY_VER(i915) == 9)
- skl_cdclk_uninit_hw(i915);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 10 || IS_BROXTON(i915))
+ bxt_cdclk_uninit_hw(display);
+ else if (DISPLAY_VER(display) == 9)
+ skl_cdclk_uninit_hw(display);
}
-static bool intel_cdclk_can_crawl_and_squash(struct drm_i915_private *i915,
+static bool intel_cdclk_can_crawl_and_squash(struct intel_display *display,
const struct intel_cdclk_config *a,
const struct intel_cdclk_config *b)
{
u16 old_waveform;
u16 new_waveform;
- drm_WARN_ON(&i915->drm, cdclk_pll_is_unknown(a->vco));
+ drm_WARN_ON(display->drm, cdclk_pll_is_unknown(a->vco));
if (a->vco == 0 || b->vco == 0)
return false;
- if (!HAS_CDCLK_CRAWL(i915) || !HAS_CDCLK_SQUASH(i915))
+ if (!HAS_CDCLK_CRAWL(display) || !HAS_CDCLK_SQUASH(display))
return false;
- old_waveform = cdclk_squash_waveform(i915, a->cdclk);
- new_waveform = cdclk_squash_waveform(i915, b->cdclk);
+ old_waveform = cdclk_squash_waveform(display, a->cdclk);
+ new_waveform = cdclk_squash_waveform(display, b->cdclk);
return a->vco != b->vco &&
old_waveform != new_waveform;
}
-static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv,
+static bool intel_cdclk_can_crawl(struct intel_display *display,
const struct intel_cdclk_config *a,
const struct intel_cdclk_config *b)
{
int a_div, b_div;
- if (!HAS_CDCLK_CRAWL(dev_priv))
+ if (!HAS_CDCLK_CRAWL(display))
return false;
/*
@@ -2319,7 +2387,7 @@ static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv,
a->ref == b->ref;
}
-static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv,
+static bool intel_cdclk_can_squash(struct intel_display *display,
const struct intel_cdclk_config *a,
const struct intel_cdclk_config *b)
{
@@ -2329,7 +2397,7 @@ static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv,
* the moment all platforms with squasher use a fixed cd2x
* divider.
*/
- if (!HAS_CDCLK_SQUASH(dev_priv))
+ if (!HAS_CDCLK_SQUASH(display))
return false;
return a->cdclk != b->cdclk &&
@@ -2358,7 +2426,7 @@ bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a,
/**
* intel_cdclk_can_cd2x_update - Determine if changing between the two CDCLK
* configurations requires only a cd2x divider update
- * @dev_priv: i915 device
+ * @display: display instance
* @a: first CDCLK configuration
* @b: second CDCLK configuration
*
@@ -2366,12 +2434,14 @@ bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a,
* True if changing between the two CDCLK configurations
* can be done with just a cd2x divider update, false if not.
*/
-static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv,
+static bool intel_cdclk_can_cd2x_update(struct intel_display *display,
const struct intel_cdclk_config *a,
const struct intel_cdclk_config *b)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
/* Older hw doesn't have the capability */
- if (DISPLAY_VER(dev_priv) < 10 && !IS_BROXTON(dev_priv))
+ if (DISPLAY_VER(display) < 10 && !IS_BROXTON(dev_priv))
return false;
/*
@@ -2380,7 +2450,7 @@ static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv,
* the moment all platforms with squasher use a fixed cd2x
* divider.
*/
- if (HAS_CDCLK_SQUASH(dev_priv))
+ if (HAS_CDCLK_SQUASH(display))
return false;
return a->cdclk != b->cdclk &&
@@ -2404,23 +2474,24 @@ static bool intel_cdclk_changed(const struct intel_cdclk_config *a,
a->voltage_level != b->voltage_level;
}
-void intel_cdclk_dump_config(struct drm_i915_private *i915,
+void intel_cdclk_dump_config(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
const char *context)
{
- drm_dbg_kms(&i915->drm, "%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n",
+ drm_dbg_kms(display->drm, "%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n",
context, cdclk_config->cdclk, cdclk_config->vco,
cdclk_config->ref, cdclk_config->bypass,
cdclk_config->voltage_level);
}
-static void intel_pcode_notify(struct drm_i915_private *i915,
+static void intel_pcode_notify(struct intel_display *display,
u8 voltage_level,
u8 active_pipe_count,
u16 cdclk,
bool cdclk_update_valid,
bool pipe_count_update_valid)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
u32 update_mask = 0;
@@ -2441,26 +2512,27 @@ static void intel_pcode_notify(struct drm_i915_private *i915,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
if (ret)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Failed to inform PCU about display config (err %d)\n",
ret);
}
-static void intel_set_cdclk(struct drm_i915_private *dev_priv,
+static void intel_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe, const char *context)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
- if (!intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config))
+ if (!intel_cdclk_changed(&display->cdclk.hw, cdclk_config))
return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.funcs.cdclk->set_cdclk))
+ if (drm_WARN_ON_ONCE(display->drm, !display->funcs.cdclk->set_cdclk))
return;
- intel_cdclk_dump_config(dev_priv, cdclk_config, context);
+ intel_cdclk_dump_config(display, cdclk_config, context);
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_psr_pause(intel_dp);
@@ -2473,24 +2545,24 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
* functions use cdclk. Not all platforms/ports do,
* but we'll lock them all for simplicity.
*/
- mutex_lock(&dev_priv->display.gmbus.mutex);
- for_each_intel_dp(&dev_priv->drm, encoder) {
+ mutex_lock(&display->gmbus.mutex);
+ for_each_intel_dp(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
mutex_lock_nest_lock(&intel_dp->aux.hw_mutex,
- &dev_priv->display.gmbus.mutex);
+ &display->gmbus.mutex);
}
- intel_cdclk_set_cdclk(dev_priv, cdclk_config, pipe);
+ intel_cdclk_set_cdclk(display, cdclk_config, pipe);
- for_each_intel_dp(&dev_priv->drm, encoder) {
+ for_each_intel_dp(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
mutex_unlock(&intel_dp->aux.hw_mutex);
}
- mutex_unlock(&dev_priv->display.gmbus.mutex);
+ mutex_unlock(&display->gmbus.mutex);
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_psr_resume(intel_dp);
@@ -2498,17 +2570,17 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
intel_audio_cdclk_change_post(dev_priv);
- if (drm_WARN(&dev_priv->drm,
- intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config),
+ if (drm_WARN(display->drm,
+ intel_cdclk_changed(&display->cdclk.hw, cdclk_config),
"cdclk state doesn't match!\n")) {
- intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "[hw state]");
- intel_cdclk_dump_config(dev_priv, cdclk_config, "[sw state]");
+ intel_cdclk_dump_config(display, &display->cdclk.hw, "[hw state]");
+ intel_cdclk_dump_config(display, cdclk_config, "[sw state]");
}
}
static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_cdclk_state *old_cdclk_state =
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
@@ -2547,13 +2619,13 @@ static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state)
if (update_pipe_count)
num_active_pipes = hweight8(new_cdclk_state->active_pipes);
- intel_pcode_notify(i915, voltage_level, num_active_pipes, cdclk,
+ intel_pcode_notify(display, voltage_level, num_active_pipes, cdclk,
change_cdclk, update_pipe_count);
}
static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
const struct intel_cdclk_state *old_cdclk_state =
@@ -2584,7 +2656,7 @@ static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state)
if (update_pipe_count)
num_active_pipes = hweight8(new_cdclk_state->active_pipes);
- intel_pcode_notify(i915, voltage_level, num_active_pipes, cdclk,
+ intel_pcode_notify(display, voltage_level, num_active_pipes, cdclk,
update_cdclk, update_pipe_count);
}
@@ -2609,7 +2681,8 @@ bool intel_cdclk_is_decreasing_later(struct intel_atomic_state *state)
void
intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_cdclk_state *old_cdclk_state =
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
@@ -2646,9 +2719,9 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
*/
cdclk_config.joined_mbus = old_cdclk_state->actual.joined_mbus;
- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+ drm_WARN_ON(display->drm, !new_cdclk_state->base.changed);
- intel_set_cdclk(i915, &cdclk_config, pipe,
+ intel_set_cdclk(display, &cdclk_config, pipe,
"Pre changing CDCLK to");
}
@@ -2662,7 +2735,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
void
intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_cdclk_state *old_cdclk_state =
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
@@ -2682,166 +2756,76 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
else
pipe = INVALID_PIPE;
- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+ drm_WARN_ON(display->drm, !new_cdclk_state->base.changed);
- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe,
+ intel_set_cdclk(display, &new_cdclk_state->actual, pipe,
"Post changing CDCLK to");
}
-static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
+/* pixels per CDCLK */
+static int intel_cdclk_ppc(struct intel_display *display, bool double_wide)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int pixel_rate = crtc_state->pixel_rate;
+ return DISPLAY_VER(display) >= 10 || double_wide ? 2 : 1;
+}
- if (DISPLAY_VER(dev_priv) >= 10)
- return DIV_ROUND_UP(pixel_rate, 2);
- else if (DISPLAY_VER(dev_priv) == 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return pixel_rate;
+/* max pixel rate as % of CDCLK (not accounting for PPC) */
+static int intel_cdclk_guardband(struct intel_display *display)
+{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return 100;
else if (IS_CHERRYVIEW(dev_priv))
- return DIV_ROUND_UP(pixel_rate * 100, 95);
- else if (crtc_state->double_wide)
- return DIV_ROUND_UP(pixel_rate * 100, 90 * 2);
+ return 95;
else
- return DIV_ROUND_UP(pixel_rate * 100, 90);
+ return 90;
}
-static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
+static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_plane *plane;
- int min_cdclk = 0;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
- min_cdclk = max(crtc_state->min_cdclk[plane->id], min_cdclk);
+ struct intel_display *display = to_intel_display(crtc_state);
+ int ppc = intel_cdclk_ppc(display, crtc_state->double_wide);
+ int guardband = intel_cdclk_guardband(display);
+ int pixel_rate = crtc_state->pixel_rate;
- return min_cdclk;
+ return DIV_ROUND_UP(pixel_rate * 100, guardband * ppc);
}
-static int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
+static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state);
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_plane *plane;
int min_cdclk = 0;
- /*
- * When we decide to use only one VDSC engine, since
- * each VDSC operates with 1 ppc throughput, pixel clock
- * cannot be higher than the VDSC clock (cdclk)
- * If there 2 VDSC engines, then pixel clock can't be higher than
- * VDSC clock(cdclk) * 2 and so on.
- */
- min_cdclk = max_t(int, min_cdclk,
- DIV_ROUND_UP(crtc_state->pixel_rate, num_vdsc_instances));
-
- if (crtc_state->joiner_pipes) {
- int pixel_clock = intel_dp_mode_to_fec_clock(crtc_state->hw.adjusted_mode.clock);
-
- /*
- * According to Bigjoiner bw check:
- * compressed_bpp <= PPC * CDCLK * Big joiner Interface bits / Pixel clock
- *
- * We have already computed compressed_bpp, so now compute the min CDCLK that
- * is required to support this compressed_bpp.
- *
- * => CDCLK >= compressed_bpp * Pixel clock / (PPC * Bigjoiner Interface bits)
- *
- * Since PPC = 2 with bigjoiner
- * => CDCLK >= compressed_bpp * Pixel clock / 2 * Bigjoiner Interface bits
- */
- int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24;
- int min_cdclk_bj =
- (fxp_q4_to_int_roundup(crtc_state->dsc.compressed_bpp_x16) *
- pixel_clock) / (2 * bigjoiner_interface_bits);
-
- min_cdclk = max(min_cdclk, min_cdclk_bj);
- }
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane)
+ min_cdclk = max(min_cdclk, crtc_state->min_cdclk[plane->id]);
return min_cdclk;
}
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(crtc_state->uapi.crtc->dev);
int min_cdclk;
if (!crtc_state->hw.enable)
return 0;
min_cdclk = intel_pixel_rate_to_cdclk(crtc_state);
-
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state))
- min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95);
-
- /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
- * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else
- * there may be audio corruption or screen corruption." This cdclk
- * restriction for GLK is 316.8 MHz.
- */
- if (intel_crtc_has_dp_encoder(crtc_state) &&
- crtc_state->has_audio &&
- crtc_state->port_clock >= 540000 &&
- crtc_state->lane_count == 4) {
- if (DISPLAY_VER(dev_priv) == 10) {
- /* Display WA #1145: glk */
- min_cdclk = max(316800, min_cdclk);
- } else if (DISPLAY_VER(dev_priv) == 9 || IS_BROADWELL(dev_priv)) {
- /* Display WA #1144: skl,bxt */
- min_cdclk = max(432000, min_cdclk);
- }
- }
-
- /*
- * According to BSpec, "The CD clock frequency must be at least twice
- * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
- */
- if (crtc_state->has_audio && DISPLAY_VER(dev_priv) >= 9)
- min_cdclk = max(2 * 96000, min_cdclk);
-
- /*
- * "For DP audio configuration, cdclk frequency shall be set to
- * meet the following requirements:
- * DP Link Frequency(MHz) | Cdclk frequency(MHz)
- * 270 | 320 or higher
- * 162 | 200 or higher"
- */
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio)
- min_cdclk = max(crtc_state->port_clock, min_cdclk);
-
- /*
- * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
- * than 320000KHz.
- */
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
- IS_VALLEYVIEW(dev_priv))
- min_cdclk = max(320000, min_cdclk);
-
- /*
- * On Geminilake once the CDCLK gets as low as 79200
- * picture gets unstable, despite that values are
- * correct for DSI PLL and DE PLL.
- */
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
- IS_GEMINILAKE(dev_priv))
- min_cdclk = max(158400, min_cdclk);
-
- /* Account for additional needs from the planes */
- min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk);
-
- if (crtc_state->dsc.compression_enable)
- min_cdclk = max(min_cdclk, intel_vdsc_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, hsw_ips_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, intel_audio_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, vlv_dsi_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, intel_planes_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, intel_vdsc_min_cdclk(crtc_state));
return min_cdclk;
}
static int intel_compute_min_cdclk(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_cdclk_state *cdclk_state =
intel_atomic_get_new_cdclk_state(state);
const struct intel_bw_state *bw_state;
@@ -2884,8 +2868,8 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
min_cdclk = max(cdclk_state->force_min_cdclk,
cdclk_state->bw_min_cdclk);
- for_each_pipe(dev_priv, pipe)
- min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
+ for_each_pipe(display, pipe)
+ min_cdclk = max(min_cdclk, cdclk_state->min_cdclk[pipe]);
/*
* Avoid glk_force_audio_cdclk() causing excessive screen
@@ -2897,12 +2881,12 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
*/
if (IS_GEMINILAKE(dev_priv) && cdclk_state->active_pipes &&
!is_power_of_2(cdclk_state->active_pipes))
- min_cdclk = max(2 * 96000, min_cdclk);
+ min_cdclk = max(min_cdclk, 2 * 96000);
- if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) {
- drm_dbg_kms(&dev_priv->drm,
+ if (min_cdclk > display->cdclk.max_cdclk_freq) {
+ drm_dbg_kms(display->drm,
"required cdclk (%d kHz) exceeds max (%d kHz)\n",
- min_cdclk, dev_priv->display.cdclk.max_cdclk_freq);
+ min_cdclk, display->cdclk.max_cdclk_freq);
return -EINVAL;
}
@@ -2924,7 +2908,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
*/
static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_cdclk_state *cdclk_state =
intel_atomic_get_new_cdclk_state(state);
struct intel_crtc *crtc;
@@ -2952,16 +2936,16 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
}
min_voltage_level = 0;
- for_each_pipe(dev_priv, pipe)
- min_voltage_level = max(cdclk_state->min_voltage_level[pipe],
- min_voltage_level);
+ for_each_pipe(display, pipe)
+ min_voltage_level = max(min_voltage_level,
+ cdclk_state->min_voltage_level[pipe]);
return min_voltage_level;
}
static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_cdclk_state *cdclk_state =
intel_atomic_get_new_cdclk_state(state);
int min_cdclk, cdclk;
@@ -2970,18 +2954,18 @@ static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state)
if (min_cdclk < 0)
return min_cdclk;
- cdclk = vlv_calc_cdclk(dev_priv, min_cdclk);
+ cdclk = vlv_calc_cdclk(display, min_cdclk);
cdclk_state->logical.cdclk = cdclk;
cdclk_state->logical.voltage_level =
- vlv_calc_voltage_level(dev_priv, cdclk);
+ vlv_calc_voltage_level(display, cdclk);
if (!cdclk_state->active_pipes) {
- cdclk = vlv_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk);
+ cdclk = vlv_calc_cdclk(display, cdclk_state->force_min_cdclk);
cdclk_state->actual.cdclk = cdclk;
cdclk_state->actual.voltage_level =
- vlv_calc_voltage_level(dev_priv, cdclk);
+ vlv_calc_voltage_level(display, cdclk);
} else {
cdclk_state->actual = cdclk_state->logical;
}
@@ -3020,7 +3004,7 @@ static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state)
static int skl_dpll0_vco(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_cdclk_state *cdclk_state =
intel_atomic_get_new_cdclk_state(state);
struct intel_crtc *crtc;
@@ -3029,7 +3013,7 @@ static int skl_dpll0_vco(struct intel_atomic_state *state)
vco = cdclk_state->logical.vco;
if (!vco)
- vco = dev_priv->display.cdclk.skl_preferred_vco_freq;
+ vco = display->cdclk.skl_preferred_vco_freq;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
if (!crtc_state->hw.enable)
@@ -3091,7 +3075,7 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_cdclk_state *cdclk_state =
intel_atomic_get_new_cdclk_state(state);
int min_cdclk, min_voltage_level, cdclk, vco;
@@ -3104,23 +3088,23 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
if (min_voltage_level < 0)
return min_voltage_level;
- cdclk = bxt_calc_cdclk(dev_priv, min_cdclk);
- vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
+ cdclk = bxt_calc_cdclk(display, min_cdclk);
+ vco = bxt_calc_cdclk_pll_vco(display, cdclk);
cdclk_state->logical.vco = vco;
cdclk_state->logical.cdclk = cdclk;
cdclk_state->logical.voltage_level =
max_t(int, min_voltage_level,
- intel_cdclk_calc_voltage_level(dev_priv, cdclk));
+ intel_cdclk_calc_voltage_level(display, cdclk));
if (!cdclk_state->active_pipes) {
- cdclk = bxt_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk);
- vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
+ cdclk = bxt_calc_cdclk(display, cdclk_state->force_min_cdclk);
+ vco = bxt_calc_cdclk_pll_vco(display, cdclk);
cdclk_state->actual.vco = vco;
cdclk_state->actual.cdclk = cdclk;
cdclk_state->actual.voltage_level =
- intel_cdclk_calc_voltage_level(dev_priv, cdclk);
+ intel_cdclk_calc_voltage_level(display, cdclk);
} else {
cdclk_state->actual = cdclk_state->logical;
}
@@ -3172,10 +3156,10 @@ static const struct intel_global_state_funcs intel_cdclk_funcs = {
struct intel_cdclk_state *
intel_atomic_get_cdclk_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *cdclk_state;
- cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.cdclk.obj);
+ cdclk_state = intel_atomic_get_global_obj_state(state, &display->cdclk.obj);
if (IS_ERR(cdclk_state))
return ERR_CAST(cdclk_state);
@@ -3231,7 +3215,7 @@ int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joi
return intel_atomic_lock_global_state(&cdclk_state->base);
}
-int intel_cdclk_init(struct drm_i915_private *dev_priv)
+int intel_cdclk_init(struct intel_display *display)
{
struct intel_cdclk_state *cdclk_state;
@@ -3239,16 +3223,17 @@ int intel_cdclk_init(struct drm_i915_private *dev_priv)
if (!cdclk_state)
return -ENOMEM;
- intel_atomic_global_obj_init(dev_priv, &dev_priv->display.cdclk.obj,
+ intel_atomic_global_obj_init(display, &display->cdclk.obj,
&cdclk_state->base, &intel_cdclk_funcs);
return 0;
}
-static bool intel_cdclk_need_serialize(struct drm_i915_private *i915,
+static bool intel_cdclk_need_serialize(struct intel_display *display,
const struct intel_cdclk_state *old_cdclk_state,
const struct intel_cdclk_state *new_cdclk_state)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
bool power_well_cnt_changed = hweight8(old_cdclk_state->active_pipes) !=
hweight8(new_cdclk_state->active_pipes);
bool cdclk_changed = intel_cdclk_changed(&old_cdclk_state->actual,
@@ -3262,7 +3247,7 @@ static bool intel_cdclk_need_serialize(struct drm_i915_private *i915,
int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_cdclk_state *old_cdclk_state;
struct intel_cdclk_state *new_cdclk_state;
enum pipe pipe = INVALID_PIPE;
@@ -3281,7 +3266,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
if (ret)
return ret;
- if (intel_cdclk_need_serialize(dev_priv, old_cdclk_state, new_cdclk_state)) {
+ if (intel_cdclk_need_serialize(display, old_cdclk_state, new_cdclk_state)) {
/*
* Also serialize commits across all crtcs
* if the actual hw needs to be poked.
@@ -3301,14 +3286,14 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
}
if (is_power_of_2(new_cdclk_state->active_pipes) &&
- intel_cdclk_can_cd2x_update(dev_priv,
+ intel_cdclk_can_cd2x_update(display,
&old_cdclk_state->actual,
&new_cdclk_state->actual)) {
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
pipe = ilog2(new_cdclk_state->active_pipes);
- crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ crtc = intel_crtc_for_pipe(display, pipe);
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
if (IS_ERR(crtc_state))
@@ -3318,25 +3303,25 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
pipe = INVALID_PIPE;
}
- if (intel_cdclk_can_crawl_and_squash(dev_priv,
+ if (intel_cdclk_can_crawl_and_squash(display,
&old_cdclk_state->actual,
&new_cdclk_state->actual)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Can change cdclk via crawling and squashing\n");
- } else if (intel_cdclk_can_squash(dev_priv,
+ } else if (intel_cdclk_can_squash(display,
&old_cdclk_state->actual,
&new_cdclk_state->actual)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Can change cdclk via squashing\n");
- } else if (intel_cdclk_can_crawl(dev_priv,
+ } else if (intel_cdclk_can_crawl(display,
&old_cdclk_state->actual,
&new_cdclk_state->actual)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Can change cdclk via crawling\n");
} else if (pipe != INVALID_PIPE) {
new_cdclk_state->pipe = pipe;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Can change cdclk cd2x divider with pipe %c active\n",
pipe_name(pipe));
} else if (intel_cdclk_clock_changed(&old_cdclk_state->actual,
@@ -3348,24 +3333,24 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
new_cdclk_state->disable_pipes = true;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Modeset required for cdclk change\n");
}
- if (intel_mdclk_cdclk_ratio(dev_priv, &old_cdclk_state->actual) !=
- intel_mdclk_cdclk_ratio(dev_priv, &new_cdclk_state->actual)) {
- int ratio = intel_mdclk_cdclk_ratio(dev_priv, &new_cdclk_state->actual);
+ if (intel_mdclk_cdclk_ratio(display, &old_cdclk_state->actual) !=
+ intel_mdclk_cdclk_ratio(display, &new_cdclk_state->actual)) {
+ int ratio = intel_mdclk_cdclk_ratio(display, &new_cdclk_state->actual);
ret = intel_dbuf_state_set_mdclk_cdclk_ratio(state, ratio);
if (ret)
return ret;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"New cdclk calculated to be logical %u kHz, actual %u kHz\n",
new_cdclk_state->logical.cdclk,
new_cdclk_state->actual.cdclk);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"New voltage level calculated to be logical %u, actual %u\n",
new_cdclk_state->logical.voltage_level,
new_cdclk_state->actual.voltage_level);
@@ -3373,53 +3358,49 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
return 0;
}
-static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
+static int intel_compute_max_dotclk(struct intel_display *display)
{
- int max_cdclk_freq = dev_priv->display.cdclk.max_cdclk_freq;
+ int ppc = intel_cdclk_ppc(display, HAS_DOUBLE_WIDE(display));
+ int guardband = intel_cdclk_guardband(display);
+ int max_cdclk_freq = display->cdclk.max_cdclk_freq;
- if (DISPLAY_VER(dev_priv) >= 10)
- return 2 * max_cdclk_freq;
- else if (DISPLAY_VER(dev_priv) == 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return max_cdclk_freq;
- else if (IS_CHERRYVIEW(dev_priv))
- return max_cdclk_freq*95/100;
- else if (DISPLAY_VER(dev_priv) < 4)
- return 2*max_cdclk_freq*90/100;
- else
- return max_cdclk_freq*90/100;
+ return ppc * max_cdclk_freq * guardband / 100;
}
/**
* intel_update_max_cdclk - Determine the maximum support CDCLK frequency
- * @dev_priv: i915 device
+ * @display: display instance
*
* Determine the maximum CDCLK frequency the platform supports, and also
* derive the maximum dot clock frequency the maximum CDCLK frequency
* allows.
*/
-void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
+void intel_update_max_cdclk(struct intel_display *display)
{
- if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
- if (dev_priv->display.cdclk.hw.ref == 24000)
- dev_priv->display.cdclk.max_cdclk_freq = 552000;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 30) {
+ display->cdclk.max_cdclk_freq = 691200;
+ } else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
+ if (display->cdclk.hw.ref == 24000)
+ display->cdclk.max_cdclk_freq = 552000;
else
- dev_priv->display.cdclk.max_cdclk_freq = 556800;
- } else if (DISPLAY_VER(dev_priv) >= 11) {
- if (dev_priv->display.cdclk.hw.ref == 24000)
- dev_priv->display.cdclk.max_cdclk_freq = 648000;
+ display->cdclk.max_cdclk_freq = 556800;
+ } else if (DISPLAY_VER(display) >= 11) {
+ if (display->cdclk.hw.ref == 24000)
+ display->cdclk.max_cdclk_freq = 648000;
else
- dev_priv->display.cdclk.max_cdclk_freq = 652800;
+ display->cdclk.max_cdclk_freq = 652800;
} else if (IS_GEMINILAKE(dev_priv)) {
- dev_priv->display.cdclk.max_cdclk_freq = 316800;
+ display->cdclk.max_cdclk_freq = 316800;
} else if (IS_BROXTON(dev_priv)) {
- dev_priv->display.cdclk.max_cdclk_freq = 624000;
- } else if (DISPLAY_VER(dev_priv) == 9) {
- u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
+ display->cdclk.max_cdclk_freq = 624000;
+ } else if (DISPLAY_VER(display) == 9) {
+ u32 limit = intel_de_read(display, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
int max_cdclk, vco;
- vco = dev_priv->display.cdclk.skl_preferred_vco_freq;
- drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
+ vco = display->cdclk.skl_preferred_vco_freq;
+ drm_WARN_ON(display->drm, vco != 8100000 && vco != 8640000);
/*
* Use the lower (vco 8640) cdclk values as a
@@ -3435,7 +3416,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
else
max_cdclk = 308571;
- dev_priv->display.cdclk.max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
+ display->cdclk.max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
} else if (IS_BROADWELL(dev_priv)) {
/*
* FIXME with extra cooling we can allow
@@ -3443,41 +3424,43 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
* How can we know if extra cooling is
* available? PCI ID, VTB, something else?
*/
- if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
- dev_priv->display.cdclk.max_cdclk_freq = 450000;
+ if (intel_de_read(display, FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ display->cdclk.max_cdclk_freq = 450000;
else if (IS_BROADWELL_ULX(dev_priv))
- dev_priv->display.cdclk.max_cdclk_freq = 450000;
+ display->cdclk.max_cdclk_freq = 450000;
else if (IS_BROADWELL_ULT(dev_priv))
- dev_priv->display.cdclk.max_cdclk_freq = 540000;
+ display->cdclk.max_cdclk_freq = 540000;
else
- dev_priv->display.cdclk.max_cdclk_freq = 675000;
+ display->cdclk.max_cdclk_freq = 675000;
} else if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.cdclk.max_cdclk_freq = 320000;
+ display->cdclk.max_cdclk_freq = 320000;
} else if (IS_VALLEYVIEW(dev_priv)) {
- dev_priv->display.cdclk.max_cdclk_freq = 400000;
+ display->cdclk.max_cdclk_freq = 400000;
} else {
/* otherwise assume cdclk is fixed */
- dev_priv->display.cdclk.max_cdclk_freq = dev_priv->display.cdclk.hw.cdclk;
+ display->cdclk.max_cdclk_freq = display->cdclk.hw.cdclk;
}
- dev_priv->display.cdclk.max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
+ display->cdclk.max_dotclk_freq = intel_compute_max_dotclk(display);
- drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n",
- dev_priv->display.cdclk.max_cdclk_freq);
+ drm_dbg(display->drm, "Max CD clock rate: %d kHz\n",
+ display->cdclk.max_cdclk_freq);
- drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n",
- dev_priv->display.cdclk.max_dotclk_freq);
+ drm_dbg(display->drm, "Max dotclock rate: %d kHz\n",
+ display->cdclk.max_dotclk_freq);
}
/**
* intel_update_cdclk - Determine the current CDCLK frequency
- * @dev_priv: i915 device
+ * @display: display instance
*
* Determine the current CDCLK frequency.
*/
-void intel_update_cdclk(struct drm_i915_private *dev_priv)
+void intel_update_cdclk(struct intel_display *display)
{
- intel_cdclk_get_cdclk(dev_priv, &dev_priv->display.cdclk.hw);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ intel_cdclk_get_cdclk(display, &display->cdclk.hw);
/*
* 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
@@ -3486,28 +3469,29 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv)
* generate GMBus clock. This will vary with the cdclk freq.
*/
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- intel_de_write(dev_priv, GMBUSFREQ_VLV,
- DIV_ROUND_UP(dev_priv->display.cdclk.hw.cdclk, 1000));
+ intel_de_write(display, GMBUSFREQ_VLV,
+ DIV_ROUND_UP(display->cdclk.hw.cdclk, 1000));
}
-static int dg1_rawclk(struct drm_i915_private *dev_priv)
+static int dg1_rawclk(struct intel_display *display)
{
/*
* DG1 always uses a 38.4 MHz rawclk. The bspec tells us
* "Program Numerator=2, Denominator=4, Divider=37 decimal."
*/
- intel_de_write(dev_priv, PCH_RAWCLK_FREQ,
+ intel_de_write(display, PCH_RAWCLK_FREQ,
CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2));
return 38400;
}
-static int cnp_rawclk(struct drm_i915_private *dev_priv)
+static int cnp_rawclk(struct intel_display *display)
{
- u32 rawclk;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int divider, fraction;
+ u32 rawclk;
- if (intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
+ if (intel_de_read(display, SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
/* 24 MHz */
divider = 24000;
fraction = 0;
@@ -3527,37 +3511,42 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
rawclk |= ICP_RAWCLK_NUM(numerator);
}
- intel_de_write(dev_priv, PCH_RAWCLK_FREQ, rawclk);
+ intel_de_write(display, PCH_RAWCLK_FREQ, rawclk);
return divider + fraction;
}
-static int pch_rawclk(struct drm_i915_private *dev_priv)
+static int pch_rawclk(struct intel_display *display)
{
- return (intel_de_read(dev_priv, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
+ return (intel_de_read(display, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
}
-static int vlv_hrawclk(struct drm_i915_private *dev_priv)
+static int vlv_hrawclk(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
/* RAWCLK_FREQ_VLV register updated from power well code */
return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
CCK_DISPLAY_REF_CLOCK_CONTROL);
}
-static int i9xx_hrawclk(struct drm_i915_private *i915)
+static int i9xx_hrawclk(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
/* hrawclock is 1/4 the FSB frequency */
return DIV_ROUND_CLOSEST(i9xx_fsb_freq(i915), 4);
}
/**
* intel_read_rawclk - Determine the current RAWCLK frequency
- * @dev_priv: i915 device
+ * @display: display instance
*
* Determine the current RAWCLK frequency. RAWCLK is a fixed
* frequency clock so this needs to done only once.
*/
-u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
+u32 intel_read_rawclk(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 freq;
if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTL)
@@ -3568,15 +3557,15 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
*/
freq = 38400;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
- freq = dg1_rawclk(dev_priv);
+ freq = dg1_rawclk(display);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
- freq = cnp_rawclk(dev_priv);
+ freq = cnp_rawclk(display);
else if (HAS_PCH_SPLIT(dev_priv))
- freq = pch_rawclk(dev_priv);
+ freq = pch_rawclk(display);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- freq = vlv_hrawclk(dev_priv);
- else if (DISPLAY_VER(dev_priv) >= 3)
- freq = i9xx_hrawclk(dev_priv);
+ freq = vlv_hrawclk(display);
+ else if (DISPLAY_VER(display) >= 3)
+ freq = i9xx_hrawclk(display);
else
/* no rawclk on other platforms, or no need to know it */
return 0;
@@ -3586,25 +3575,32 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
static int i915_cdclk_info_show(struct seq_file *m, void *unused)
{
- struct drm_i915_private *i915 = m->private;
+ struct intel_display *display = m->private;
- seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk);
- seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq);
- seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->display.cdclk.max_dotclk_freq);
+ seq_printf(m, "Current CD clock frequency: %d kHz\n", display->cdclk.hw.cdclk);
+ seq_printf(m, "Max CD clock frequency: %d kHz\n", display->cdclk.max_cdclk_freq);
+ seq_printf(m, "Max pixel clock frequency: %d kHz\n", display->cdclk.max_dotclk_freq);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(i915_cdclk_info);
-void intel_cdclk_debugfs_register(struct drm_i915_private *i915)
+void intel_cdclk_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_cdclk_info", 0444, minor->debugfs_root,
- i915, &i915_cdclk_info_fops);
+ display, &i915_cdclk_info_fops);
}
+static const struct intel_cdclk_funcs xe3lpd_cdclk_funcs = {
+ .get_cdclk = bxt_get_cdclk,
+ .set_cdclk = bxt_set_cdclk,
+ .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
+ .calc_voltage_level = xe3lpd_calc_voltage_level,
+};
+
static const struct intel_cdclk_funcs rplu_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
@@ -3743,97 +3739,102 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = {
/**
* intel_init_cdclk_hooks - Initialize CDCLK related modesetting hooks
- * @dev_priv: i915 device
+ * @display: display instance
*/
-void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 20) {
- dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs;
- dev_priv->display.cdclk.table = xe2lpd_cdclk_table;
- } else if (DISPLAY_VER_FULL(dev_priv) >= IP_VER(14, 1)) {
- dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs;
- dev_priv->display.cdclk.table = xe2hpd_cdclk_table;
- } else if (DISPLAY_VER(dev_priv) >= 14) {
- dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs;
- dev_priv->display.cdclk.table = mtl_cdclk_table;
+void intel_init_cdclk_hooks(struct intel_display *display)
+{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 30) {
+ display->funcs.cdclk = &xe3lpd_cdclk_funcs;
+ display->cdclk.table = xe3lpd_cdclk_table;
+ } else if (DISPLAY_VER(display) >= 20) {
+ display->funcs.cdclk = &rplu_cdclk_funcs;
+ display->cdclk.table = xe2lpd_cdclk_table;
+ } else if (DISPLAY_VERx100(display) >= 1401) {
+ display->funcs.cdclk = &rplu_cdclk_funcs;
+ display->cdclk.table = xe2hpd_cdclk_table;
+ } else if (DISPLAY_VER(display) >= 14) {
+ display->funcs.cdclk = &rplu_cdclk_funcs;
+ display->cdclk.table = mtl_cdclk_table;
} else if (IS_DG2(dev_priv)) {
- dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
- dev_priv->display.cdclk.table = dg2_cdclk_table;
+ display->funcs.cdclk = &tgl_cdclk_funcs;
+ display->cdclk.table = dg2_cdclk_table;
} else if (IS_ALDERLAKE_P(dev_priv)) {
/* Wa_22011320316:adl-p[a0] */
if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
- dev_priv->display.cdclk.table = adlp_a_step_cdclk_table;
- dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+ display->cdclk.table = adlp_a_step_cdclk_table;
+ display->funcs.cdclk = &tgl_cdclk_funcs;
} else if (IS_RAPTORLAKE_U(dev_priv)) {
- dev_priv->display.cdclk.table = rplu_cdclk_table;
- dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs;
+ display->cdclk.table = rplu_cdclk_table;
+ display->funcs.cdclk = &rplu_cdclk_funcs;
} else {
- dev_priv->display.cdclk.table = adlp_cdclk_table;
- dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+ display->cdclk.table = adlp_cdclk_table;
+ display->funcs.cdclk = &tgl_cdclk_funcs;
}
} else if (IS_ROCKETLAKE(dev_priv)) {
- dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
- dev_priv->display.cdclk.table = rkl_cdclk_table;
- } else if (DISPLAY_VER(dev_priv) >= 12) {
- dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
- dev_priv->display.cdclk.table = icl_cdclk_table;
+ display->funcs.cdclk = &tgl_cdclk_funcs;
+ display->cdclk.table = rkl_cdclk_table;
+ } else if (DISPLAY_VER(display) >= 12) {
+ display->funcs.cdclk = &tgl_cdclk_funcs;
+ display->cdclk.table = icl_cdclk_table;
} else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
- dev_priv->display.funcs.cdclk = &ehl_cdclk_funcs;
- dev_priv->display.cdclk.table = icl_cdclk_table;
- } else if (DISPLAY_VER(dev_priv) >= 11) {
- dev_priv->display.funcs.cdclk = &icl_cdclk_funcs;
- dev_priv->display.cdclk.table = icl_cdclk_table;
+ display->funcs.cdclk = &ehl_cdclk_funcs;
+ display->cdclk.table = icl_cdclk_table;
+ } else if (DISPLAY_VER(display) >= 11) {
+ display->funcs.cdclk = &icl_cdclk_funcs;
+ display->cdclk.table = icl_cdclk_table;
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- dev_priv->display.funcs.cdclk = &bxt_cdclk_funcs;
+ display->funcs.cdclk = &bxt_cdclk_funcs;
if (IS_GEMINILAKE(dev_priv))
- dev_priv->display.cdclk.table = glk_cdclk_table;
+ display->cdclk.table = glk_cdclk_table;
else
- dev_priv->display.cdclk.table = bxt_cdclk_table;
- } else if (DISPLAY_VER(dev_priv) == 9) {
- dev_priv->display.funcs.cdclk = &skl_cdclk_funcs;
+ display->cdclk.table = bxt_cdclk_table;
+ } else if (DISPLAY_VER(display) == 9) {
+ display->funcs.cdclk = &skl_cdclk_funcs;
} else if (IS_BROADWELL(dev_priv)) {
- dev_priv->display.funcs.cdclk = &bdw_cdclk_funcs;
+ display->funcs.cdclk = &bdw_cdclk_funcs;
} else if (IS_HASWELL(dev_priv)) {
- dev_priv->display.funcs.cdclk = &hsw_cdclk_funcs;
+ display->funcs.cdclk = &hsw_cdclk_funcs;
} else if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.funcs.cdclk = &chv_cdclk_funcs;
+ display->funcs.cdclk = &chv_cdclk_funcs;
} else if (IS_VALLEYVIEW(dev_priv)) {
- dev_priv->display.funcs.cdclk = &vlv_cdclk_funcs;
+ display->funcs.cdclk = &vlv_cdclk_funcs;
} else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
- dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs;
+ display->funcs.cdclk = &fixed_400mhz_cdclk_funcs;
} else if (IS_IRONLAKE(dev_priv)) {
- dev_priv->display.funcs.cdclk = &ilk_cdclk_funcs;
+ display->funcs.cdclk = &ilk_cdclk_funcs;
} else if (IS_GM45(dev_priv)) {
- dev_priv->display.funcs.cdclk = &gm45_cdclk_funcs;
+ display->funcs.cdclk = &gm45_cdclk_funcs;
} else if (IS_G45(dev_priv)) {
- dev_priv->display.funcs.cdclk = &g33_cdclk_funcs;
+ display->funcs.cdclk = &g33_cdclk_funcs;
} else if (IS_I965GM(dev_priv)) {
- dev_priv->display.funcs.cdclk = &i965gm_cdclk_funcs;
+ display->funcs.cdclk = &i965gm_cdclk_funcs;
} else if (IS_I965G(dev_priv)) {
- dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs;
+ display->funcs.cdclk = &fixed_400mhz_cdclk_funcs;
} else if (IS_PINEVIEW(dev_priv)) {
- dev_priv->display.funcs.cdclk = &pnv_cdclk_funcs;
+ display->funcs.cdclk = &pnv_cdclk_funcs;
} else if (IS_G33(dev_priv)) {
- dev_priv->display.funcs.cdclk = &g33_cdclk_funcs;
+ display->funcs.cdclk = &g33_cdclk_funcs;
} else if (IS_I945GM(dev_priv)) {
- dev_priv->display.funcs.cdclk = &i945gm_cdclk_funcs;
+ display->funcs.cdclk = &i945gm_cdclk_funcs;
} else if (IS_I945G(dev_priv)) {
- dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs;
+ display->funcs.cdclk = &fixed_400mhz_cdclk_funcs;
} else if (IS_I915GM(dev_priv)) {
- dev_priv->display.funcs.cdclk = &i915gm_cdclk_funcs;
+ display->funcs.cdclk = &i915gm_cdclk_funcs;
} else if (IS_I915G(dev_priv)) {
- dev_priv->display.funcs.cdclk = &i915g_cdclk_funcs;
+ display->funcs.cdclk = &i915g_cdclk_funcs;
} else if (IS_I865G(dev_priv)) {
- dev_priv->display.funcs.cdclk = &i865g_cdclk_funcs;
+ display->funcs.cdclk = &i865g_cdclk_funcs;
} else if (IS_I85X(dev_priv)) {
- dev_priv->display.funcs.cdclk = &i85x_cdclk_funcs;
+ display->funcs.cdclk = &i85x_cdclk_funcs;
} else if (IS_I845G(dev_priv)) {
- dev_priv->display.funcs.cdclk = &i845g_cdclk_funcs;
+ display->funcs.cdclk = &i845g_cdclk_funcs;
} else if (IS_I830(dev_priv)) {
- dev_priv->display.funcs.cdclk = &i830_cdclk_funcs;
+ display->funcs.cdclk = &i830_cdclk_funcs;
}
- if (drm_WARN(&dev_priv->drm, !dev_priv->display.funcs.cdclk,
+ if (drm_WARN(display->drm, !display->funcs.cdclk,
"Unknown platform. Assuming i830\n"))
- dev_priv->display.funcs.cdclk = &i830_cdclk_funcs;
+ display->funcs.cdclk = &i830_cdclk_funcs;
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index cfdcdec07a4d..6b0e7a41eba3 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -11,9 +11,9 @@
#include "intel_display_limits.h"
#include "intel_global_state.h"
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc_state;
+struct intel_display;
struct intel_cdclk_config {
unsigned int cdclk, vco, ref, bypass;
@@ -59,24 +59,24 @@ struct intel_cdclk_state {
};
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
-void intel_cdclk_init_hw(struct drm_i915_private *i915);
-void intel_cdclk_uninit_hw(struct drm_i915_private *i915);
-void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
-void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
-void intel_update_cdclk(struct drm_i915_private *dev_priv);
-u32 intel_read_rawclk(struct drm_i915_private *dev_priv);
+void intel_cdclk_init_hw(struct intel_display *display);
+void intel_cdclk_uninit_hw(struct intel_display *display);
+void intel_init_cdclk_hooks(struct intel_display *display);
+void intel_update_max_cdclk(struct intel_display *display);
+void intel_update_cdclk(struct intel_display *display);
+u32 intel_read_rawclk(struct intel_display *display);
bool intel_cdclk_clock_changed(const struct intel_cdclk_config *a,
const struct intel_cdclk_config *b);
-int intel_mdclk_cdclk_ratio(struct drm_i915_private *i915,
+int intel_mdclk_cdclk_ratio(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config);
bool intel_cdclk_is_decreasing_later(struct intel_atomic_state *state);
void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state);
void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state);
-void intel_cdclk_dump_config(struct drm_i915_private *i915,
+void intel_cdclk_dump_config(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
const char *context);
int intel_modeset_calc_cdclk(struct intel_atomic_state *state);
-void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
+void intel_cdclk_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config);
int intel_cdclk_atomic_check(struct intel_atomic_state *state,
bool *need_cdclk_calc);
@@ -88,11 +88,11 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
container_of_const((global_state), struct intel_cdclk_state, base)
#define intel_atomic_get_old_cdclk_state(state) \
- to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj))
+ to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_intel_display(state)->cdclk.obj))
#define intel_atomic_get_new_cdclk_state(state) \
- to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj))
+ to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->cdclk.obj))
-int intel_cdclk_init(struct drm_i915_private *dev_priv);
-void intel_cdclk_debugfs_register(struct drm_i915_private *i915);
+int intel_cdclk_init(struct intel_display *display);
+void intel_cdclk_debugfs_register(struct intel_display *display);
#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 5d701f48351b..2f51eccdb27a 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -22,6 +22,7 @@
*
*/
+#include "i915_drv.h"
#include "i9xx_plane_regs.h"
#include "intel_color.h"
#include "intel_color_regs.h"
@@ -39,7 +40,8 @@ struct intel_color_funcs {
* the next vblank start, alongside any other double buffered
* registers involved with the same commit. This hook is optional.
*/
- void (*color_commit_noarm)(const struct intel_crtc_state *crtc_state);
+ void (*color_commit_noarm)(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state);
/*
* Program arming double buffered color management registers
* during vblank evasion. The registers (and whatever other registers
@@ -47,7 +49,8 @@ struct intel_color_funcs {
* during the next vblank start, alongside any other double buffered
* registers involved with the same commit.
*/
- void (*color_commit_arm)(const struct intel_crtc_state *crtc_state);
+ void (*color_commit_arm)(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state);
/*
* Perform any extra tasks needed after all the
* double buffered registers have been latched.
@@ -205,74 +208,81 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
return result;
}
-static void ilk_update_pipe_csc(struct intel_crtc *crtc,
+static void ilk_update_pipe_csc(struct intel_dsb *dsb,
+ struct intel_crtc *crtc,
const struct intel_csc_matrix *csc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- intel_de_write_fw(i915, PIPE_CSC_PREOFF_HI(pipe), csc->preoff[0]);
- intel_de_write_fw(i915, PIPE_CSC_PREOFF_ME(pipe), csc->preoff[1]);
- intel_de_write_fw(i915, PIPE_CSC_PREOFF_LO(pipe), csc->preoff[2]);
-
- intel_de_write_fw(i915, PIPE_CSC_COEFF_RY_GY(pipe),
- csc->coeff[0] << 16 | csc->coeff[1]);
- intel_de_write_fw(i915, PIPE_CSC_COEFF_BY(pipe),
- csc->coeff[2] << 16);
-
- intel_de_write_fw(i915, PIPE_CSC_COEFF_RU_GU(pipe),
- csc->coeff[3] << 16 | csc->coeff[4]);
- intel_de_write_fw(i915, PIPE_CSC_COEFF_BU(pipe),
- csc->coeff[5] << 16);
-
- intel_de_write_fw(i915, PIPE_CSC_COEFF_RV_GV(pipe),
- csc->coeff[6] << 16 | csc->coeff[7]);
- intel_de_write_fw(i915, PIPE_CSC_COEFF_BV(pipe),
- csc->coeff[8] << 16);
-
- if (DISPLAY_VER(i915) < 7)
+ intel_de_write_dsb(display, dsb, PIPE_CSC_PREOFF_HI(pipe),
+ csc->preoff[0]);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_PREOFF_ME(pipe),
+ csc->preoff[1]);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_PREOFF_LO(pipe),
+ csc->preoff[2]);
+
+ intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_RY_GY(pipe),
+ csc->coeff[0] << 16 | csc->coeff[1]);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_BY(pipe),
+ csc->coeff[2] << 16);
+
+ intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_RU_GU(pipe),
+ csc->coeff[3] << 16 | csc->coeff[4]);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_BU(pipe),
+ csc->coeff[5] << 16);
+
+ intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_RV_GV(pipe),
+ csc->coeff[6] << 16 | csc->coeff[7]);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_COEFF_BV(pipe),
+ csc->coeff[8] << 16);
+
+ if (DISPLAY_VER(display) < 7)
return;
- intel_de_write_fw(i915, PIPE_CSC_POSTOFF_HI(pipe), csc->postoff[0]);
- intel_de_write_fw(i915, PIPE_CSC_POSTOFF_ME(pipe), csc->postoff[1]);
- intel_de_write_fw(i915, PIPE_CSC_POSTOFF_LO(pipe), csc->postoff[2]);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_POSTOFF_HI(pipe),
+ csc->postoff[0]);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_POSTOFF_ME(pipe),
+ csc->postoff[1]);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_POSTOFF_LO(pipe),
+ csc->postoff[2]);
}
static void ilk_read_pipe_csc(struct intel_crtc *crtc,
struct intel_csc_matrix *csc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
u32 tmp;
- csc->preoff[0] = intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(pipe));
- csc->preoff[1] = intel_de_read_fw(i915, PIPE_CSC_PREOFF_ME(pipe));
- csc->preoff[2] = intel_de_read_fw(i915, PIPE_CSC_PREOFF_LO(pipe));
+ csc->preoff[0] = intel_de_read_fw(display, PIPE_CSC_PREOFF_HI(pipe));
+ csc->preoff[1] = intel_de_read_fw(display, PIPE_CSC_PREOFF_ME(pipe));
+ csc->preoff[2] = intel_de_read_fw(display, PIPE_CSC_PREOFF_LO(pipe));
- tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_RY_GY(pipe));
+ tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_RY_GY(pipe));
csc->coeff[0] = tmp >> 16;
csc->coeff[1] = tmp & 0xffff;
- tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_BY(pipe));
+ tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_BY(pipe));
csc->coeff[2] = tmp >> 16;
- tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_RU_GU(pipe));
+ tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_RU_GU(pipe));
csc->coeff[3] = tmp >> 16;
csc->coeff[4] = tmp & 0xffff;
- tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_BU(pipe));
+ tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_BU(pipe));
csc->coeff[5] = tmp >> 16;
- tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_RV_GV(pipe));
+ tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_RV_GV(pipe));
csc->coeff[6] = tmp >> 16;
csc->coeff[7] = tmp & 0xffff;
- tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_BV(pipe));
+ tmp = intel_de_read_fw(display, PIPE_CSC_COEFF_BV(pipe));
csc->coeff[8] = tmp >> 16;
- if (DISPLAY_VER(i915) < 7)
+ if (DISPLAY_VER(display) < 7)
return;
- csc->postoff[0] = intel_de_read_fw(i915, PIPE_CSC_POSTOFF_HI(pipe));
- csc->postoff[1] = intel_de_read_fw(i915, PIPE_CSC_POSTOFF_ME(pipe));
- csc->postoff[2] = intel_de_read_fw(i915, PIPE_CSC_POSTOFF_LO(pipe));
+ csc->postoff[0] = intel_de_read_fw(display, PIPE_CSC_POSTOFF_HI(pipe));
+ csc->postoff[1] = intel_de_read_fw(display, PIPE_CSC_POSTOFF_ME(pipe));
+ csc->postoff[2] = intel_de_read_fw(display, PIPE_CSC_POSTOFF_LO(pipe));
}
static void ilk_read_csc(struct intel_crtc_state *crtc_state)
@@ -304,68 +314,75 @@ static void skl_read_csc(struct intel_crtc_state *crtc_state)
ilk_read_pipe_csc(crtc, &crtc_state->csc);
}
-static void icl_update_output_csc(struct intel_crtc *crtc,
+static void icl_update_output_csc(struct intel_dsb *dsb,
+ struct intel_crtc *crtc,
const struct intel_csc_matrix *csc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), csc->preoff[0]);
- intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), csc->preoff[1]);
- intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), csc->preoff[2]);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_PREOFF_HI(pipe),
+ csc->preoff[0]);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_PREOFF_ME(pipe),
+ csc->preoff[1]);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_PREOFF_LO(pipe),
+ csc->preoff[2]);
- intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe),
- csc->coeff[0] << 16 | csc->coeff[1]);
- intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_BY(pipe),
- csc->coeff[2] << 16);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe),
+ csc->coeff[0] << 16 | csc->coeff[1]);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_BY(pipe),
+ csc->coeff[2] << 16);
- intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe),
- csc->coeff[3] << 16 | csc->coeff[4]);
- intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_BU(pipe),
- csc->coeff[5] << 16);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe),
+ csc->coeff[3] << 16 | csc->coeff[4]);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_BU(pipe),
+ csc->coeff[5] << 16);
- intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe),
- csc->coeff[6] << 16 | csc->coeff[7]);
- intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_BV(pipe),
- csc->coeff[8] << 16);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe),
+ csc->coeff[6] << 16 | csc->coeff[7]);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_COEFF_BV(pipe),
+ csc->coeff[8] << 16);
- intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), csc->postoff[0]);
- intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), csc->postoff[1]);
- intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), csc->postoff[2]);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe),
+ csc->postoff[0]);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe),
+ csc->postoff[1]);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe),
+ csc->postoff[2]);
}
static void icl_read_output_csc(struct intel_crtc *crtc,
struct intel_csc_matrix *csc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
u32 tmp;
- csc->preoff[0] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_PREOFF_HI(pipe));
- csc->preoff[1] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_PREOFF_ME(pipe));
- csc->preoff[2] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_PREOFF_LO(pipe));
+ csc->preoff[0] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_PREOFF_HI(pipe));
+ csc->preoff[1] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_PREOFF_ME(pipe));
+ csc->preoff[2] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_PREOFF_LO(pipe));
- tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe));
+ tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe));
csc->coeff[0] = tmp >> 16;
csc->coeff[1] = tmp & 0xffff;
- tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_BY(pipe));
+ tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_BY(pipe));
csc->coeff[2] = tmp >> 16;
- tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe));
+ tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe));
csc->coeff[3] = tmp >> 16;
csc->coeff[4] = tmp & 0xffff;
- tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_BU(pipe));
+ tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_BU(pipe));
csc->coeff[5] = tmp >> 16;
- tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe));
+ tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe));
csc->coeff[6] = tmp >> 16;
csc->coeff[7] = tmp & 0xffff;
- tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_BV(pipe));
+ tmp = intel_de_read_fw(display, PIPE_CSC_OUTPUT_COEFF_BV(pipe));
csc->coeff[8] = tmp >> 16;
- csc->postoff[0] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe));
- csc->postoff[1] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe));
- csc->postoff[2] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe));
+ csc->postoff[0] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe));
+ csc->postoff[1] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe));
+ csc->postoff[2] = intel_de_read_fw(display, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe));
}
static void icl_read_csc(struct intel_crtc_state *crtc_state)
@@ -386,14 +403,15 @@ static void icl_read_csc(struct intel_crtc_state *crtc_state)
static bool ilk_limited_range(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct drm_i915_private *i915 = to_i915(display->drm);
/* icl+ have dedicated output CSC */
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return false;
/* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */
- if (DISPLAY_VER(i915) < 7 || IS_IVYBRIDGE(i915))
+ if (DISPLAY_VER(display) < 7 || IS_IVYBRIDGE(i915))
return false;
return crtc_state->limited_color_range;
@@ -401,7 +419,7 @@ static bool ilk_limited_range(const struct intel_crtc_state *crtc_state)
static bool ilk_lut_limited_range(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (!ilk_limited_range(crtc_state))
return false;
@@ -409,7 +427,7 @@ static bool ilk_lut_limited_range(const struct intel_crtc_state *crtc_state)
if (crtc_state->c8_planes)
return false;
- if (DISPLAY_VER(i915) == 10)
+ if (DISPLAY_VER(display) == 10)
return crtc_state->hw.gamma_lut;
else
return crtc_state->hw.gamma_lut &&
@@ -424,13 +442,13 @@ static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
return !ilk_lut_limited_range(crtc_state);
}
-static void ilk_csc_copy(struct drm_i915_private *i915,
+static void ilk_csc_copy(struct intel_display *display,
struct intel_csc_matrix *dst,
const struct intel_csc_matrix *src)
{
*dst = *src;
- if (DISPLAY_VER(i915) < 7)
+ if (DISPLAY_VER(display) < 7)
memset(dst->postoff, 0, sizeof(dst->postoff));
}
@@ -438,7 +456,7 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
struct intel_csc_matrix *csc,
bool limited_color_range)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
const u64 *input;
u64 temp[9];
@@ -446,9 +464,9 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
/* for preoff/postoff */
if (limited_color_range)
- ilk_csc_copy(i915, csc, &ilk_csc_matrix_limited_range);
+ ilk_csc_copy(display, csc, &ilk_csc_matrix_limited_range);
else
- ilk_csc_copy(i915, csc, &ilk_csc_matrix_identity);
+ ilk_csc_copy(display, csc, &ilk_csc_matrix_identity);
if (limited_color_range)
input = ctm_mult_by_limited(temp, ctm->matrix);
@@ -496,21 +514,22 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
static void ilk_assign_csc(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct drm_i915_private *i915 = to_i915(display->drm);
bool limited_color_range = ilk_csc_limited_range(crtc_state);
if (crtc_state->hw.ctm) {
- drm_WARN_ON(&i915->drm, !crtc_state->csc_enable);
+ drm_WARN_ON(display->drm, !crtc_state->csc_enable);
ilk_csc_convert_ctm(crtc_state, &crtc_state->csc, limited_color_range);
} else if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
- drm_WARN_ON(&i915->drm, !crtc_state->csc_enable);
+ drm_WARN_ON(display->drm, !crtc_state->csc_enable);
- ilk_csc_copy(i915, &crtc_state->csc, &ilk_csc_matrix_rgb_to_ycbcr);
+ ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_rgb_to_ycbcr);
} else if (limited_color_range) {
- drm_WARN_ON(&i915->drm, !crtc_state->csc_enable);
+ drm_WARN_ON(display->drm, !crtc_state->csc_enable);
- ilk_csc_copy(i915, &crtc_state->csc, &ilk_csc_matrix_limited_range);
+ ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_limited_range);
} else if (crtc_state->csc_enable) {
/*
* On GLK both pipe CSC and degamma LUT are controlled
@@ -518,60 +537,62 @@ static void ilk_assign_csc(struct intel_crtc_state *crtc_state)
* LUT is needed but CSC is not we need to load an
* identity matrix.
*/
- drm_WARN_ON(&i915->drm, !IS_GEMINILAKE(i915));
+ drm_WARN_ON(display->drm, !IS_GEMINILAKE(i915));
- ilk_csc_copy(i915, &crtc_state->csc, &ilk_csc_matrix_identity);
+ ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_identity);
} else {
intel_csc_clear(&crtc_state->csc);
}
}
-static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+static void ilk_load_csc_matrix(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
if (crtc_state->csc_enable)
- ilk_update_pipe_csc(crtc, &crtc_state->csc);
+ ilk_update_pipe_csc(dsb, crtc, &crtc_state->csc);
}
static void icl_assign_csc(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (crtc_state->hw.ctm) {
- drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) == 0);
+ drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) == 0);
ilk_csc_convert_ctm(crtc_state, &crtc_state->csc, false);
} else {
- drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) != 0);
+ drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) != 0);
intel_csc_clear(&crtc_state->csc);
}
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
- drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0);
+ drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0);
- ilk_csc_copy(i915, &crtc_state->output_csc, &ilk_csc_matrix_rgb_to_ycbcr);
+ ilk_csc_copy(display, &crtc_state->output_csc, &ilk_csc_matrix_rgb_to_ycbcr);
} else if (crtc_state->limited_color_range) {
- drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0);
+ drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0);
- ilk_csc_copy(i915, &crtc_state->output_csc, &ilk_csc_matrix_limited_range);
+ ilk_csc_copy(display, &crtc_state->output_csc, &ilk_csc_matrix_limited_range);
} else {
- drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) != 0);
+ drm_WARN_ON(display->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) != 0);
intel_csc_clear(&crtc_state->output_csc);
}
}
-static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+static void icl_load_csc_matrix(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
if (crtc_state->csc_mode & ICL_CSC_ENABLE)
- ilk_update_pipe_csc(crtc, &crtc_state->csc);
+ ilk_update_pipe_csc(dsb, crtc, &crtc_state->csc);
if (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE)
- icl_update_output_csc(crtc, &crtc_state->output_csc);
+ icl_update_output_csc(dsb, crtc, &crtc_state->output_csc);
}
static u16 ctm_to_twos_complement(u64 coeff, int int_bits, int frac_bits)
@@ -614,51 +635,51 @@ static void vlv_wgc_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
static void vlv_load_wgc_csc(struct intel_crtc *crtc,
const struct intel_csc_matrix *csc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- intel_de_write_fw(dev_priv, PIPE_WGC_C01_C00(dev_priv, pipe),
+ intel_de_write_fw(display, PIPE_WGC_C01_C00(display, pipe),
csc->coeff[1] << 16 | csc->coeff[0]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C02(dev_priv, pipe),
+ intel_de_write_fw(display, PIPE_WGC_C02(display, pipe),
csc->coeff[2]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C11_C10(dev_priv, pipe),
+ intel_de_write_fw(display, PIPE_WGC_C11_C10(display, pipe),
csc->coeff[4] << 16 | csc->coeff[3]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C12(dev_priv, pipe),
+ intel_de_write_fw(display, PIPE_WGC_C12(display, pipe),
csc->coeff[5]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C21_C20(dev_priv, pipe),
+ intel_de_write_fw(display, PIPE_WGC_C21_C20(display, pipe),
csc->coeff[7] << 16 | csc->coeff[6]);
- intel_de_write_fw(dev_priv, PIPE_WGC_C22(dev_priv, pipe),
+ intel_de_write_fw(display, PIPE_WGC_C22(display, pipe),
csc->coeff[8]);
}
static void vlv_read_wgc_csc(struct intel_crtc *crtc,
struct intel_csc_matrix *csc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
u32 tmp;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C01_C00(dev_priv, pipe));
+ tmp = intel_de_read_fw(display, PIPE_WGC_C01_C00(display, pipe));
csc->coeff[0] = tmp & 0xffff;
csc->coeff[1] = tmp >> 16;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C02(dev_priv, pipe));
+ tmp = intel_de_read_fw(display, PIPE_WGC_C02(display, pipe));
csc->coeff[2] = tmp & 0xffff;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C11_C10(dev_priv, pipe));
+ tmp = intel_de_read_fw(display, PIPE_WGC_C11_C10(display, pipe));
csc->coeff[3] = tmp & 0xffff;
csc->coeff[4] = tmp >> 16;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C12(dev_priv, pipe));
+ tmp = intel_de_read_fw(display, PIPE_WGC_C12(display, pipe));
csc->coeff[5] = tmp & 0xffff;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C21_C20(dev_priv, pipe));
+ tmp = intel_de_read_fw(display, PIPE_WGC_C21_C20(display, pipe));
csc->coeff[6] = tmp & 0xffff;
csc->coeff[7] = tmp >> 16;
- tmp = intel_de_read_fw(dev_priv, PIPE_WGC_C22(dev_priv, pipe));
+ tmp = intel_de_read_fw(display, PIPE_WGC_C22(display, pipe));
csc->coeff[8] = tmp & 0xffff;
}
@@ -672,14 +693,14 @@ static void vlv_read_csc(struct intel_crtc_state *crtc_state)
static void vlv_assign_csc(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (crtc_state->hw.ctm) {
- drm_WARN_ON(&i915->drm, !crtc_state->wgc_enable);
+ drm_WARN_ON(display->drm, !crtc_state->wgc_enable);
vlv_wgc_csc_convert_ctm(crtc_state, &crtc_state->csc);
} else {
- drm_WARN_ON(&i915->drm, crtc_state->wgc_enable);
+ drm_WARN_ON(display->drm, crtc_state->wgc_enable);
intel_csc_clear(&crtc_state->csc);
}
@@ -716,45 +737,45 @@ static const struct intel_csc_matrix chv_cgm_csc_matrix_identity = {
static void chv_load_cgm_csc(struct intel_crtc *crtc,
const struct intel_csc_matrix *csc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
- intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF01(pipe),
+ intel_de_write_fw(display, CGM_PIPE_CSC_COEFF01(pipe),
csc->coeff[1] << 16 | csc->coeff[0]);
- intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF23(pipe),
+ intel_de_write_fw(display, CGM_PIPE_CSC_COEFF23(pipe),
csc->coeff[3] << 16 | csc->coeff[2]);
- intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF45(pipe),
+ intel_de_write_fw(display, CGM_PIPE_CSC_COEFF45(pipe),
csc->coeff[5] << 16 | csc->coeff[4]);
- intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF67(pipe),
+ intel_de_write_fw(display, CGM_PIPE_CSC_COEFF67(pipe),
csc->coeff[7] << 16 | csc->coeff[6]);
- intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF8(pipe),
+ intel_de_write_fw(display, CGM_PIPE_CSC_COEFF8(pipe),
csc->coeff[8]);
}
static void chv_read_cgm_csc(struct intel_crtc *crtc,
struct intel_csc_matrix *csc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
u32 tmp;
- tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF01(pipe));
+ tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF01(pipe));
csc->coeff[0] = tmp & 0xffff;
csc->coeff[1] = tmp >> 16;
- tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF23(pipe));
+ tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF23(pipe));
csc->coeff[2] = tmp & 0xffff;
csc->coeff[3] = tmp >> 16;
- tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF45(pipe));
+ tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF45(pipe));
csc->coeff[4] = tmp & 0xffff;
csc->coeff[5] = tmp >> 16;
- tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF67(pipe));
+ tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF67(pipe));
csc->coeff[6] = tmp & 0xffff;
csc->coeff[7] = tmp >> 16;
- tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF8(pipe));
+ tmp = intel_de_read_fw(display, CGM_PIPE_CSC_COEFF8(pipe));
csc->coeff[8] = tmp & 0xffff;
}
@@ -768,16 +789,16 @@ static void chv_read_csc(struct intel_crtc_state *crtc_state)
static void chv_assign_csc(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- drm_WARN_ON(&i915->drm, crtc_state->wgc_enable);
+ drm_WARN_ON(display->drm, crtc_state->wgc_enable);
if (crtc_state->hw.ctm) {
- drm_WARN_ON(&i915->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0);
+ drm_WARN_ON(display->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0);
chv_cgm_csc_convert_ctm(crtc_state, &crtc_state->csc);
} else {
- drm_WARN_ON(&i915->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0);
+ drm_WARN_ON(display->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0);
crtc_state->csc = chv_cgm_csc_matrix_identity;
}
@@ -953,7 +974,8 @@ static void ilk_lut_12p4_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
REG_FIELD_GET(PREC_PALETTE_12P4_BLUE_LDW_MASK, ldw);
}
-static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+static void icl_color_commit_noarm(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
/*
* Despite Wa_1406463849, ICL no longer suffers from the SKL
@@ -963,10 +985,11 @@ static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
*
* On TGL+ all CSC arming issues have been properly fixed.
*/
- icl_load_csc_matrix(crtc_state);
+ icl_load_csc_matrix(dsb, crtc_state);
}
-static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+static void skl_color_commit_noarm(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
/*
* Possibly related to display WA #1184, SKL CSC loses the latched
@@ -979,72 +1002,76 @@ static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
* which is called after PSR exit.
*/
if (!crtc_state->has_psr)
- ilk_load_csc_matrix(crtc_state);
+ ilk_load_csc_matrix(dsb, crtc_state);
}
-static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+static void ilk_color_commit_noarm(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
- ilk_load_csc_matrix(crtc_state);
+ ilk_load_csc_matrix(dsb, crtc_state);
}
-static void i9xx_color_commit_arm(const struct intel_crtc_state *crtc_state)
+static void i9xx_color_commit_arm(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
/* update TRANSCONF GAMMA_MODE */
i9xx_set_pipeconf(crtc_state);
}
-static void ilk_color_commit_arm(const struct intel_crtc_state *crtc_state)
+static void ilk_color_commit_arm(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
/* update TRANSCONF GAMMA_MODE */
ilk_set_pipeconf(crtc_state);
- intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
+ intel_de_write_fw(display, PIPE_CSC_MODE(crtc->pipe),
crtc_state->csc_mode);
}
-static void hsw_color_commit_arm(const struct intel_crtc_state *crtc_state)
+static void hsw_color_commit_arm(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- intel_de_write(i915, GAMMA_MODE(crtc->pipe),
+ intel_de_write(display, GAMMA_MODE(crtc->pipe),
crtc_state->gamma_mode);
- intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
+ intel_de_write_fw(display, PIPE_CSC_MODE(crtc->pipe),
crtc_state->csc_mode);
}
static u32 hsw_read_gamma_mode(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- return intel_de_read(i915, GAMMA_MODE(crtc->pipe));
+ return intel_de_read(display, GAMMA_MODE(crtc->pipe));
}
static u32 ilk_read_csc_mode(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
- return intel_de_read(i915, PIPE_CSC_MODE(crtc->pipe));
+ return intel_de_read(display, PIPE_CSC_MODE(crtc->pipe));
}
static void i9xx_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 tmp;
- tmp = intel_de_read(dev_priv, DSPCNTR(dev_priv, i9xx_plane));
+ tmp = intel_de_read(display, DSPCNTR(display, i9xx_plane));
if (tmp & DISP_PIPE_GAMMA_ENABLE)
crtc_state->gamma_enable = true;
- if (!HAS_GMCH(dev_priv) && tmp & DISP_PIPE_CSC_ENABLE)
+ if (!HAS_GMCH(display) && tmp & DISP_PIPE_CSC_ENABLE)
crtc_state->csc_enable = true;
}
@@ -1060,14 +1087,14 @@ static void hsw_get_config(struct intel_crtc_state *crtc_state)
static void skl_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 tmp;
crtc_state->gamma_mode = hsw_read_gamma_mode(crtc);
crtc_state->csc_mode = ilk_read_csc_mode(crtc);
- tmp = intel_de_read(i915, SKL_BOTTOM_COLOR(crtc->pipe));
+ tmp = intel_de_read(display, SKL_BOTTOM_COLOR(crtc->pipe));
if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
crtc_state->gamma_enable = true;
@@ -1076,15 +1103,16 @@ static void skl_get_config(struct intel_crtc_state *crtc_state)
crtc_state->csc_enable = true;
}
-static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
+static void skl_color_commit_arm(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 val = 0;
if (crtc_state->has_psr)
- ilk_load_csc_matrix(crtc_state);
+ ilk_load_csc_matrix(dsb, crtc_state);
/*
* We don't (yet) allow userspace to control the pipe background color,
@@ -1095,38 +1123,35 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE;
if (crtc_state->csc_enable)
val |= SKL_BOTTOM_COLOR_CSC_ENABLE;
- intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), val);
+ intel_de_write_dsb(display, dsb, SKL_BOTTOM_COLOR(pipe), val);
- intel_de_write(i915, GAMMA_MODE(crtc->pipe),
- crtc_state->gamma_mode);
+ intel_de_write_dsb(display, dsb, GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
- intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
- crtc_state->csc_mode);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
}
-static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state)
+static void icl_color_commit_arm(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/*
* We don't (yet) allow userspace to control the pipe background color,
* so force it to black.
*/
- intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), 0);
+ intel_de_write_dsb(display, dsb, SKL_BOTTOM_COLOR(pipe), 0);
- intel_de_write(i915, GAMMA_MODE(crtc->pipe),
- crtc_state->gamma_mode);
+ intel_de_write_dsb(display, dsb, GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
- intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
- crtc_state->csc_mode);
+ intel_de_write_dsb(display, dsb, PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
}
static void icl_color_post_update(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/*
* Despite Wa_1406463849, ICL CSC is no longer disarmed by
@@ -1142,17 +1167,17 @@ static void icl_color_post_update(const struct intel_crtc_state *crtc_state)
*
* TGL+ no longer need this workaround.
*/
- intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(crtc->pipe));
+ intel_de_read_fw(display, PIPE_CSC_PREOFF_HI(crtc->pipe));
}
static struct drm_property_blob *
-create_linear_lut(struct drm_i915_private *i915, int lut_size)
+create_linear_lut(struct intel_display *display, int lut_size)
{
struct drm_property_blob *blob;
struct drm_color_lut *lut;
int i;
- blob = drm_property_create_blob(&i915->drm,
+ blob = drm_property_create_blob(display->drm,
sizeof(lut[0]) * lut_size,
NULL);
if (IS_ERR(blob))
@@ -1180,7 +1205,7 @@ static u16 lut_limited_range(unsigned int value)
}
static struct drm_property_blob *
-create_resized_lut(struct drm_i915_private *i915,
+create_resized_lut(struct intel_display *display,
const struct drm_property_blob *blob_in, int lut_out_size,
bool limited_color_range)
{
@@ -1189,7 +1214,7 @@ create_resized_lut(struct drm_i915_private *i915,
const struct drm_color_lut *lut_in;
struct drm_color_lut *lut_out;
- blob_out = drm_property_create_blob(&i915->drm,
+ blob_out = drm_property_create_blob(display->drm,
sizeof(lut_out[0]) * lut_out_size,
NULL);
if (IS_ERR(blob_out))
@@ -1217,7 +1242,7 @@ create_resized_lut(struct drm_i915_private *i915,
static void i9xx_load_lut_8(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct drm_color_lut *lut;
enum pipe pipe = crtc->pipe;
int i;
@@ -1228,24 +1253,24 @@ static void i9xx_load_lut_8(struct intel_crtc *crtc,
lut = blob->data;
for (i = 0; i < 256; i++)
- intel_de_write_fw(dev_priv, PALETTE(dev_priv, pipe, i),
+ intel_de_write_fw(display, PALETTE(display, pipe, i),
i9xx_lut_8(&lut[i]));
}
static void i9xx_load_lut_10(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size - 1; i++) {
- intel_de_write_fw(dev_priv,
- PALETTE(dev_priv, pipe, 2 * i + 0),
+ intel_de_write_fw(display,
+ PALETTE(display, pipe, 2 * i + 0),
i9xx_lut_10_ldw(&lut[i]));
- intel_de_write_fw(dev_priv,
- PALETTE(dev_priv, pipe, 2 * i + 1),
+ intel_de_write_fw(display,
+ PALETTE(display, pipe, 2 * i + 1),
i9xx_lut_10_udw(&lut[i]));
}
}
@@ -1271,23 +1296,23 @@ static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
static void i965_load_lut_10p6(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size - 1; i++) {
- intel_de_write_fw(dev_priv,
- PALETTE(dev_priv, pipe, 2 * i + 0),
+ intel_de_write_fw(display,
+ PALETTE(display, pipe, 2 * i + 0),
i965_lut_10p6_ldw(&lut[i]));
- intel_de_write_fw(dev_priv,
- PALETTE(dev_priv, pipe, 2 * i + 1),
+ intel_de_write_fw(display,
+ PALETTE(display, pipe, 2 * i + 1),
i965_lut_10p6_udw(&lut[i]));
}
- intel_de_write_fw(dev_priv, PIPEGCMAX(dev_priv, pipe, 0), lut[i].red);
- intel_de_write_fw(dev_priv, PIPEGCMAX(dev_priv, pipe, 1), lut[i].green);
- intel_de_write_fw(dev_priv, PIPEGCMAX(dev_priv, pipe, 2), lut[i].blue);
+ intel_de_write_fw(display, PIPEGCMAX(display, pipe, 0), lut[i].red);
+ intel_de_write_fw(display, PIPEGCMAX(display, pipe, 1), lut[i].green);
+ intel_de_write_fw(display, PIPEGCMAX(display, pipe, 2), lut[i].blue);
}
static void i965_load_luts(const struct intel_crtc_state *crtc_state)
@@ -1311,12 +1336,23 @@ static void i965_load_luts(const struct intel_crtc_state *crtc_state)
static void ilk_lut_write(const struct intel_crtc_state *crtc_state,
i915_reg_t reg, u32 val)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (crtc_state->dsb_color_vblank)
intel_dsb_reg_write(crtc_state->dsb_color_vblank, reg, val);
else
- intel_de_write_fw(i915, reg, val);
+ intel_de_write_fw(display, reg, val);
+}
+
+static void ilk_lut_write_indexed(const struct intel_crtc_state *crtc_state,
+ i915_reg_t reg, u32 val)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (crtc_state->dsb_color_vblank)
+ intel_dsb_reg_write_indexed(crtc_state->dsb_color_vblank, reg, val);
+ else
+ intel_de_write_fw(display, reg, val);
}
static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state,
@@ -1333,19 +1369,29 @@ static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state,
lut = blob->data;
/*
- * DSB fails to correctly load the legacy LUT
- * unless we either write each entry twice,
- * or use non-posted writes
+ * DSB fails to correctly load the legacy LUT unless
+ * we either write each entry twice when using posted
+ * writes, or we use non-posted writes.
+ *
+ * If palette anti-collision is active during LUT
+ * register writes:
+ * - posted writes simply get dropped and thus the LUT
+ * contents may not be correctly updated
+ * - non-posted writes are blocked and thus the LUT
+ * contents are always correct, but simultaneous CPU
+ * MMIO access will start to fail
+ *
+ * Choose the lesser of two evils and use posted writes.
+ * Using posted writes is also faster, even when having
+ * to write each register twice.
*/
- if (crtc_state->dsb_color_vblank)
- intel_dsb_nonpost_start(crtc_state->dsb_color_vblank);
-
- for (i = 0; i < 256; i++)
+ for (i = 0; i < 256; i++) {
ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
i9xx_lut_8(&lut[i]));
-
- if (crtc_state->dsb_color_vblank)
- intel_dsb_nonpost_end(crtc_state->dsb_color_vblank);
+ if (crtc_state->dsb_color_vblank)
+ ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
+ i9xx_lut_8(&lut[i]));
+ }
}
static void ilk_load_lut_10(const struct intel_crtc_state *crtc_state,
@@ -1434,8 +1480,8 @@ static void bdw_load_lut_10(const struct intel_crtc_state *crtc_state,
prec_index);
for (i = 0; i < lut_size; i++)
- ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_10(&lut[i]));
+ ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_10(&lut[i]));
/*
* Reset the index, otherwise it prevents the legacy palette to be
@@ -1523,9 +1569,9 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
}
}
-static int glk_degamma_lut_size(struct drm_i915_private *i915)
+static int glk_degamma_lut_size(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return 131;
else
return 35;
@@ -1557,8 +1603,8 @@ static void mtl_degamma_lut_pack(struct drm_color_lut *entry, u32 val)
static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
@@ -1588,16 +1634,16 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
* ToDo: Extend to max 7.0. Enable 32 bit input value
* as compared to just 16 to achieve this.
*/
- ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
- DISPLAY_VER(i915) >= 14 ?
- mtl_degamma_lut(&lut[i]) : glk_degamma_lut(&lut[i]));
+ ilk_lut_write_indexed(crtc_state, PRE_CSC_GAMC_DATA(pipe),
+ DISPLAY_VER(display) >= 14 ?
+ mtl_degamma_lut(&lut[i]) : glk_degamma_lut(&lut[i]));
}
/* Clamp values > 1.0. */
- while (i++ < glk_degamma_lut_size(i915))
- ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
- DISPLAY_VER(i915) >= 14 ?
- 1 << 24 : 1 << 16);
+ while (i++ < glk_degamma_lut_size(display))
+ ilk_lut_write_indexed(crtc_state, PRE_CSC_GAMC_DATA(pipe),
+ DISPLAY_VER(display) >= 14 ?
+ 1 << 24 : 1 << 16);
ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), 0);
}
@@ -1663,10 +1709,10 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
for (i = 0; i < 9; i++) {
const struct drm_color_lut *entry = &lut[i];
- ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+ ilk_lut_write_indexed(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ ilk_lut_write_indexed(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
@@ -1702,10 +1748,10 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
for (i = 1; i < 257; i++) {
entry = &lut[i * 8];
- ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+ ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
/*
@@ -1723,10 +1769,10 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
for (i = 0; i < 256; i++) {
entry = &lut[i * 8 * 128];
- ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+ ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ ilk_lut_write_indexed(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
@@ -1797,15 +1843,15 @@ static void chv_cgm_degamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
static void chv_load_cgm_degamma(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++) {
- intel_de_write_fw(i915, CGM_PIPE_DEGAMMA(pipe, i, 0),
+ intel_de_write_fw(display, CGM_PIPE_DEGAMMA(pipe, i, 0),
chv_cgm_degamma_ldw(&lut[i]));
- intel_de_write_fw(i915, CGM_PIPE_DEGAMMA(pipe, i, 1),
+ intel_de_write_fw(display, CGM_PIPE_DEGAMMA(pipe, i, 1),
chv_cgm_degamma_udw(&lut[i]));
}
}
@@ -1831,23 +1877,23 @@ static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
static void chv_load_cgm_gamma(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++) {
- intel_de_write_fw(i915, CGM_PIPE_GAMMA(pipe, i, 0),
+ intel_de_write_fw(display, CGM_PIPE_GAMMA(pipe, i, 0),
chv_cgm_gamma_ldw(&lut[i]));
- intel_de_write_fw(i915, CGM_PIPE_GAMMA(pipe, i, 1),
+ intel_de_write_fw(display, CGM_PIPE_GAMMA(pipe, i, 1),
chv_cgm_gamma_udw(&lut[i]));
}
}
static void chv_load_luts(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
@@ -1862,50 +1908,66 @@ static void chv_load_luts(const struct intel_crtc_state *crtc_state)
else
i965_load_luts(crtc_state);
- intel_de_write_fw(i915, CGM_PIPE_MODE(crtc->pipe),
+ intel_de_write_fw(display, CGM_PIPE_MODE(crtc->pipe),
crtc_state->cgm_mode);
}
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (crtc_state->dsb_color_vblank)
return;
- i915->display.funcs.color->load_luts(crtc_state);
+ display->funcs.color->load_luts(crtc_state);
}
-void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+void intel_color_commit_noarm(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (i915->display.funcs.color->color_commit_noarm)
- i915->display.funcs.color->color_commit_noarm(crtc_state);
+ if (display->funcs.color->color_commit_noarm)
+ display->funcs.color->color_commit_noarm(dsb, crtc_state);
}
-void intel_color_commit_arm(const struct intel_crtc_state *crtc_state)
+void intel_color_commit_arm(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- i915->display.funcs.color->color_commit_arm(crtc_state);
-
- if (crtc_state->dsb_color_commit)
- intel_dsb_commit(crtc_state->dsb_color_commit, false);
+ display->funcs.color->color_commit_arm(dsb, crtc_state);
}
void intel_color_post_update(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (i915->display.funcs.color->color_post_update)
- i915->display.funcs.color->color_post_update(crtc_state);
+ if (display->funcs.color->color_post_update)
+ display->funcs.color->color_post_update(crtc_state);
+}
+
+void intel_color_modeset(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ intel_color_load_luts(crtc_state);
+ intel_color_commit_noarm(NULL, crtc_state);
+ intel_color_commit_arm(NULL, crtc_state);
+
+ if (DISPLAY_VER(display) < 9) {
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+
+ /* update DSPCNTR to configure gamma/csc for pipe bottom color */
+ plane->disable_arm(NULL, plane, crtc_state);
+ }
}
void intel_color_prepare_commit(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -1923,30 +1985,16 @@ void intel_color_prepare_commit(struct intel_atomic_state *state,
if (!crtc_state->dsb_color_vblank)
return;
- i915->display.funcs.color->load_luts(crtc_state);
+ display->funcs.color->load_luts(crtc_state);
- intel_dsb_finish(crtc_state->dsb_color_vblank);
+ intel_dsb_wait_vblank_delay(state, crtc_state->dsb_color_vblank);
+ intel_dsb_interrupt(crtc_state->dsb_color_vblank);
- crtc_state->dsb_color_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0, 16);
- if (!crtc_state->dsb_color_commit) {
- intel_dsb_cleanup(crtc_state->dsb_color_vblank);
- crtc_state->dsb_color_vblank = NULL;
- return;
- }
-
- intel_dsb_chain(state, crtc_state->dsb_color_commit,
- crtc_state->dsb_color_vblank, true);
-
- intel_dsb_finish(crtc_state->dsb_color_commit);
+ intel_dsb_finish(crtc_state->dsb_color_vblank);
}
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state)
{
- if (crtc_state->dsb_color_commit) {
- intel_dsb_cleanup(crtc_state->dsb_color_commit);
- crtc_state->dsb_color_commit = NULL;
- }
-
if (crtc_state->dsb_color_vblank) {
intel_dsb_cleanup(crtc_state->dsb_color_vblank);
crtc_state->dsb_color_vblank = NULL;
@@ -1955,8 +2003,6 @@ void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state)
void intel_color_wait_commit(const struct intel_crtc_state *crtc_state)
{
- if (crtc_state->dsb_color_commit)
- intel_dsb_wait(crtc_state->dsb_color_commit);
if (crtc_state->dsb_color_vblank)
intel_dsb_wait(crtc_state->dsb_color_vblank);
}
@@ -2008,7 +2054,7 @@ static bool chv_can_preload_luts(struct intel_atomic_state *state,
int intel_color_check(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
@@ -2024,20 +2070,19 @@ int intel_color_check(struct intel_atomic_state *state,
if (!intel_crtc_needs_color_update(new_crtc_state))
return 0;
- return i915->display.funcs.color->color_check(state, crtc);
+ return display->funcs.color->color_check(state, crtc);
}
void intel_color_get_config(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- if (i915->display.funcs.color->get_config)
- i915->display.funcs.color->get_config(crtc_state);
+ display->funcs.color->get_config(crtc_state);
- i915->display.funcs.color->read_luts(crtc_state);
+ display->funcs.color->read_luts(crtc_state);
- if (i915->display.funcs.color->read_csc)
- i915->display.funcs.color->read_csc(crtc_state);
+ if (display->funcs.color->read_csc)
+ display->funcs.color->read_csc(crtc_state);
}
bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
@@ -2045,7 +2090,7 @@ bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob2,
bool is_pre_csc_lut)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
/*
* FIXME c8_planes readout missing thus
@@ -2054,14 +2099,14 @@ bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
if (!is_pre_csc_lut && crtc_state->c8_planes)
return true;
- return i915->display.funcs.color->lut_equal(crtc_state, blob1, blob2,
- is_pre_csc_lut);
+ return display->funcs.color->lut_equal(crtc_state, blob1, blob2,
+ is_pre_csc_lut);
}
static bool need_plane_update(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
/*
* On pre-SKL the pipe gamma enable and pipe csc enable for
@@ -2069,15 +2114,14 @@ static bool need_plane_update(struct intel_plane *plane,
* We have to reconfigure that even if the plane is inactive.
*/
return crtc_state->active_planes & BIT(plane->id) ||
- (DISPLAY_VER(i915) < 9 &&
- plane->id == PLANE_PRIMARY);
+ (DISPLAY_VER(display) < 9 && plane->id == PLANE_PRIMARY);
}
static int
intel_color_add_affected_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
@@ -2092,7 +2136,7 @@ intel_color_add_affected_planes(struct intel_atomic_state *state,
new_crtc_state->csc_enable == old_crtc_state->csc_enable)
return 0;
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ for_each_intel_plane_on_crtc(display->drm, crtc, plane) {
struct intel_plane_state *plane_state;
if (!need_plane_update(plane, new_crtc_state))
@@ -2107,7 +2151,7 @@ intel_color_add_affected_planes(struct intel_atomic_state *state,
new_crtc_state->do_async_flip = false;
/* plane control register changes blocked by CxSR */
- if (HAS_GMCH(i915))
+ if (HAS_GMCH(display))
new_crtc_state->disable_cxsr = true;
}
@@ -2116,43 +2160,44 @@ intel_color_add_affected_planes(struct intel_atomic_state *state,
static u32 intel_gamma_lut_tests(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
if (lut_is_legacy(gamma_lut))
return 0;
- return DISPLAY_INFO(i915)->color.gamma_lut_tests;
+ return DISPLAY_INFO(display)->color.gamma_lut_tests;
}
static u32 intel_degamma_lut_tests(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- return DISPLAY_INFO(i915)->color.degamma_lut_tests;
+ return DISPLAY_INFO(display)->color.degamma_lut_tests;
}
static int intel_gamma_lut_size(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
if (lut_is_legacy(gamma_lut))
return LEGACY_LUT_LENGTH;
- return DISPLAY_INFO(i915)->color.gamma_lut_size;
+ return DISPLAY_INFO(display)->color.gamma_lut_size;
}
static u32 intel_degamma_lut_size(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- return DISPLAY_INFO(i915)->color.degamma_lut_size;
+ return DISPLAY_INFO(display)->color.degamma_lut_size;
}
-static int check_lut_size(struct drm_i915_private *i915,
+static int check_lut_size(struct intel_crtc *crtc, const char *lut_name,
const struct drm_property_blob *lut, int expected)
{
+ struct intel_display *display = to_intel_display(crtc);
int len;
if (!lut)
@@ -2160,8 +2205,9 @@ static int check_lut_size(struct drm_i915_private *i915,
len = drm_color_lut_size(lut);
if (len != expected) {
- drm_dbg_kms(&i915->drm, "Invalid LUT size; got %d, expected %d\n",
- len, expected);
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] Invalid %s LUT size; got %d, expected %d\n",
+ crtc->base.base.id, crtc->base.name, lut_name, len, expected);
return -EINVAL;
}
@@ -2171,23 +2217,25 @@ static int check_lut_size(struct drm_i915_private *i915,
static int _check_luts(const struct intel_crtc_state *crtc_state,
u32 degamma_tests, u32 gamma_tests)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
int gamma_length, degamma_length;
/* C8 relies on its palette being stored in the legacy LUT */
if (crtc_state->c8_planes && !lut_is_legacy(crtc_state->hw.gamma_lut)) {
- drm_dbg_kms(&i915->drm,
- "C8 pixelformat requires the legacy LUT\n");
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] C8 pixelformat requires the legacy LUT\n",
+ crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
degamma_length = intel_degamma_lut_size(crtc_state);
gamma_length = intel_gamma_lut_size(crtc_state);
- if (check_lut_size(i915, degamma_lut, degamma_length) ||
- check_lut_size(i915, gamma_lut, gamma_length))
+ if (check_lut_size(crtc, "degamma", degamma_lut, degamma_length) ||
+ check_lut_size(crtc, "gamma", gamma_lut, gamma_length))
return -EINVAL;
if (drm_color_lut_check(degamma_lut, degamma_tests) ||
@@ -2219,9 +2267,10 @@ static int i9xx_lut_10_diff(u16 a, u16 b)
drm_color_lut_extract(b, 10);
}
-static int i9xx_check_lut_10(struct drm_i915_private *dev_priv,
+static int i9xx_check_lut_10(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
+ struct intel_display *display = to_intel_display(crtc);
const struct drm_color_lut *lut = blob->data;
int lut_size = drm_color_lut_size(blob);
const struct drm_color_lut *a = &lut[lut_size - 2];
@@ -2230,7 +2279,9 @@ static int i9xx_check_lut_10(struct drm_i915_private *dev_priv,
if (i9xx_lut_10_diff(b->red, a->red) > 0x7f ||
i9xx_lut_10_diff(b->green, a->green) > 0x7f ||
i9xx_lut_10_diff(b->blue, a->blue) > 0x7f) {
- drm_dbg_kms(&dev_priv->drm, "Last gamma LUT entry exceeds max slope\n");
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] Last gamma LUT entry exceeds max slope\n",
+ crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
@@ -2239,28 +2290,28 @@ static int i9xx_check_lut_10(struct drm_i915_private *dev_priv,
void intel_color_assert_luts(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
/* make sure {pre,post}_csc_lut were correctly assigned */
- if (DISPLAY_VER(i915) >= 11 || HAS_GMCH(i915)) {
- drm_WARN_ON(&i915->drm,
+ if (DISPLAY_VER(display) >= 11 || HAS_GMCH(display)) {
+ drm_WARN_ON(display->drm,
crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut);
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
crtc_state->post_csc_lut != crtc_state->hw.gamma_lut);
- } else if (DISPLAY_VER(i915) == 10) {
- drm_WARN_ON(&i915->drm,
+ } else if (DISPLAY_VER(display) == 10) {
+ drm_WARN_ON(display->drm,
crtc_state->post_csc_lut == crtc_state->hw.gamma_lut &&
crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut &&
- crtc_state->pre_csc_lut != i915->display.color.glk_linear_degamma_lut);
- drm_WARN_ON(&i915->drm,
+ crtc_state->pre_csc_lut != display->color.glk_linear_degamma_lut);
+ drm_WARN_ON(display->drm,
!ilk_lut_limited_range(crtc_state) &&
crtc_state->post_csc_lut != NULL &&
crtc_state->post_csc_lut != crtc_state->hw.gamma_lut);
} else if (crtc_state->gamma_mode != GAMMA_MODE_MODE_SPLIT) {
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut &&
crtc_state->pre_csc_lut != crtc_state->hw.gamma_lut);
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
!ilk_lut_limited_range(crtc_state) &&
crtc_state->post_csc_lut != crtc_state->hw.degamma_lut &&
crtc_state->post_csc_lut != crtc_state->hw.gamma_lut);
@@ -2278,7 +2329,7 @@ static void intel_assign_luts(struct intel_crtc_state *crtc_state)
static int i9xx_color_check(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
@@ -2293,9 +2344,9 @@ static int i9xx_color_check(struct intel_atomic_state *state,
crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state);
- if (DISPLAY_VER(i915) < 4 &&
+ if (DISPLAY_VER(display) < 4 &&
crtc_state->gamma_mode == GAMMA_MODE_MODE_10BIT) {
- ret = i9xx_check_lut_10(i915, crtc_state->hw.gamma_lut);
+ ret = i9xx_check_lut_10(crtc, crtc_state->hw.gamma_lut);
if (ret)
return ret;
}
@@ -2462,12 +2513,12 @@ static u32 ilk_csc_mode(const struct intel_crtc_state *crtc_state)
static int ilk_assign_luts(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (ilk_lut_limited_range(crtc_state)) {
struct drm_property_blob *gamma_lut;
- gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut,
+ gamma_lut = create_resized_lut(display, crtc_state->hw.gamma_lut,
drm_color_lut_size(crtc_state->hw.gamma_lut),
true);
if (IS_ERR(gamma_lut))
@@ -2501,7 +2552,7 @@ static int ilk_assign_luts(struct intel_crtc_state *crtc_state)
static int ilk_color_check(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
@@ -2511,15 +2562,17 @@ static int ilk_color_check(struct intel_atomic_state *state,
return ret;
if (crtc_state->hw.degamma_lut && crtc_state->hw.gamma_lut) {
- drm_dbg_kms(&i915->drm,
- "Degamma and gamma together are not possible\n");
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] Degamma and gamma together are not possible\n",
+ crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
crtc_state->hw.ctm) {
- drm_dbg_kms(&i915->drm,
- "YCbCr and CTM together are not possible\n");
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] YCbCr and CTM together are not possible\n",
+ crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
@@ -2572,21 +2625,21 @@ static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state)
static int ivb_assign_luts(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_property_blob *degamma_lut, *gamma_lut;
if (crtc_state->gamma_mode != GAMMA_MODE_MODE_SPLIT)
return ilk_assign_luts(crtc_state);
- drm_WARN_ON(&i915->drm, drm_color_lut_size(crtc_state->hw.degamma_lut) != 1024);
- drm_WARN_ON(&i915->drm, drm_color_lut_size(crtc_state->hw.gamma_lut) != 1024);
+ drm_WARN_ON(display->drm, drm_color_lut_size(crtc_state->hw.degamma_lut) != 1024);
+ drm_WARN_ON(display->drm, drm_color_lut_size(crtc_state->hw.gamma_lut) != 1024);
- degamma_lut = create_resized_lut(i915, crtc_state->hw.degamma_lut, 512,
+ degamma_lut = create_resized_lut(display, crtc_state->hw.degamma_lut, 512,
false);
if (IS_ERR(degamma_lut))
return PTR_ERR(degamma_lut);
- gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut, 512,
+ gamma_lut = create_resized_lut(display, crtc_state->hw.gamma_lut, 512,
ilk_lut_limited_range(crtc_state));
if (IS_ERR(gamma_lut)) {
drm_property_blob_put(degamma_lut);
@@ -2605,7 +2658,7 @@ static int ivb_assign_luts(struct intel_crtc_state *crtc_state)
static int ivb_color_check(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
@@ -2615,22 +2668,25 @@ static int ivb_color_check(struct intel_atomic_state *state,
return ret;
if (crtc_state->c8_planes && crtc_state->hw.degamma_lut) {
- drm_dbg_kms(&i915->drm,
- "C8 pixelformat and degamma together are not possible\n");
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] C8 pixelformat and degamma together are not possible\n",
+ crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
crtc_state->hw.ctm) {
- drm_dbg_kms(&i915->drm,
- "YCbCr and CTM together are not possible\n");
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] YCbCr and CTM together are not possible\n",
+ crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
crtc_state->hw.degamma_lut && crtc_state->hw.gamma_lut) {
- drm_dbg_kms(&i915->drm,
- "YCbCr and degamma+gamma together are not possible\n");
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] YCbCr and degamma+gamma together are not possible\n",
+ crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
@@ -2675,13 +2731,13 @@ static bool glk_use_pre_csc_lut_for_gamma(const struct intel_crtc_state *crtc_st
static int glk_assign_luts(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (glk_use_pre_csc_lut_for_gamma(crtc_state)) {
struct drm_property_blob *gamma_lut;
- gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut,
- DISPLAY_INFO(i915)->color.degamma_lut_size,
+ gamma_lut = create_resized_lut(display, crtc_state->hw.gamma_lut,
+ DISPLAY_INFO(display)->color.degamma_lut_size,
false);
if (IS_ERR(gamma_lut))
return PTR_ERR(gamma_lut);
@@ -2697,7 +2753,7 @@ static int glk_assign_luts(struct intel_crtc_state *crtc_state)
if (ilk_lut_limited_range(crtc_state)) {
struct drm_property_blob *gamma_lut;
- gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut,
+ gamma_lut = create_resized_lut(display, crtc_state->hw.gamma_lut,
drm_color_lut_size(crtc_state->hw.gamma_lut),
true);
if (IS_ERR(gamma_lut))
@@ -2720,7 +2776,7 @@ static int glk_assign_luts(struct intel_crtc_state *crtc_state)
*/
if (crtc_state->csc_enable && !crtc_state->pre_csc_lut)
drm_property_replace_blob(&crtc_state->pre_csc_lut,
- i915->display.color.glk_linear_degamma_lut);
+ display->color.glk_linear_degamma_lut);
return 0;
}
@@ -2739,7 +2795,7 @@ static int glk_check_luts(const struct intel_crtc_state *crtc_state)
static int glk_color_check(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
@@ -2750,15 +2806,17 @@ static int glk_color_check(struct intel_atomic_state *state,
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
crtc_state->hw.ctm) {
- drm_dbg_kms(&i915->drm,
- "YCbCr and CTM together are not possible\n");
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] YCbCr and CTM together are not possible\n",
+ crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
crtc_state->hw.degamma_lut && crtc_state->hw.gamma_lut) {
- drm_dbg_kms(&i915->drm,
- "YCbCr and degamma+gamma together are not possible\n");
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] YCbCr and degamma+gamma together are not possible\n",
+ crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
@@ -2795,8 +2853,7 @@ static int glk_color_check(struct intel_atomic_state *state,
static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
u32 gamma_mode = 0;
if (crtc_state->hw.degamma_lut)
@@ -2814,7 +2871,7 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
* ToDo: Extend to Logarithmic Gamma once the new UAPI
* is accepted and implemented by a userspace consumer
*/
- else if (DISPLAY_VER(i915) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
gamma_mode |= GAMMA_MODE_MODE_10BIT;
else
gamma_mode |= GAMMA_MODE_MODE_12BIT_MULTI_SEG;
@@ -3195,13 +3252,13 @@ static bool icl_lut_equal(const struct intel_crtc_state *crtc_state,
static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
int i;
- blob = drm_property_create_blob(&dev_priv->drm,
+ blob = drm_property_create_blob(display->drm,
sizeof(lut[0]) * LEGACY_LUT_LENGTH,
NULL);
if (IS_ERR(blob))
@@ -3210,8 +3267,8 @@ static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
- u32 val = intel_de_read_fw(dev_priv,
- PALETTE(dev_priv, pipe, i));
+ u32 val = intel_de_read_fw(display,
+ PALETTE(display, pipe, i));
i9xx_lut_8_pack(&lut[i], val);
}
@@ -3221,15 +3278,15 @@ static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
static struct drm_property_blob *i9xx_read_lut_10(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 lut_size = DISPLAY_INFO(dev_priv)->color.gamma_lut_size;
+ struct intel_display *display = to_intel_display(crtc);
+ u32 lut_size = DISPLAY_INFO(display)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
u32 ldw, udw;
int i;
- blob = drm_property_create_blob(&dev_priv->drm,
+ blob = drm_property_create_blob(display->drm,
lut_size * sizeof(lut[0]), NULL);
if (IS_ERR(blob))
return NULL;
@@ -3237,10 +3294,10 @@ static struct drm_property_blob *i9xx_read_lut_10(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < lut_size - 1; i++) {
- ldw = intel_de_read_fw(dev_priv,
- PALETTE(dev_priv, pipe, 2 * i + 0));
- udw = intel_de_read_fw(dev_priv,
- PALETTE(dev_priv, pipe, 2 * i + 1));
+ ldw = intel_de_read_fw(display,
+ PALETTE(display, pipe, 2 * i + 0));
+ udw = intel_de_read_fw(display,
+ PALETTE(display, pipe, 2 * i + 1));
i9xx_lut_10_pack(&lut[i], ldw, udw);
}
@@ -3272,13 +3329,13 @@ static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int i, lut_size = DISPLAY_INFO(dev_priv)->color.gamma_lut_size;
+ struct intel_display *display = to_intel_display(crtc);
+ int i, lut_size = DISPLAY_INFO(display)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
- blob = drm_property_create_blob(&dev_priv->drm,
+ blob = drm_property_create_blob(display->drm,
sizeof(lut[0]) * lut_size,
NULL);
if (IS_ERR(blob))
@@ -3287,17 +3344,17 @@ static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < lut_size - 1; i++) {
- u32 ldw = intel_de_read_fw(dev_priv,
- PALETTE(dev_priv, pipe, 2 * i + 0));
- u32 udw = intel_de_read_fw(dev_priv,
- PALETTE(dev_priv, pipe, 2 * i + 1));
+ u32 ldw = intel_de_read_fw(display,
+ PALETTE(display, pipe, 2 * i + 0));
+ u32 udw = intel_de_read_fw(display,
+ PALETTE(display, pipe, 2 * i + 1));
i965_lut_10p6_pack(&lut[i], ldw, udw);
}
- lut[i].red = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(dev_priv, pipe, 0)));
- lut[i].green = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(dev_priv, pipe, 1)));
- lut[i].blue = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(dev_priv, pipe, 2)));
+ lut[i].red = i965_lut_11p6_max_pack(intel_de_read_fw(display, PIPEGCMAX(display, pipe, 0)));
+ lut[i].green = i965_lut_11p6_max_pack(intel_de_read_fw(display, PIPEGCMAX(display, pipe, 1)));
+ lut[i].blue = i965_lut_11p6_max_pack(intel_de_read_fw(display, PIPEGCMAX(display, pipe, 2)));
return blob;
}
@@ -3324,13 +3381,13 @@ static void i965_read_luts(struct intel_crtc_state *crtc_state)
static struct drm_property_blob *chv_read_cgm_degamma(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int i, lut_size = DISPLAY_INFO(dev_priv)->color.degamma_lut_size;
+ struct intel_display *display = to_intel_display(crtc);
+ int i, lut_size = DISPLAY_INFO(display)->color.degamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
- blob = drm_property_create_blob(&dev_priv->drm,
+ blob = drm_property_create_blob(display->drm,
sizeof(lut[0]) * lut_size,
NULL);
if (IS_ERR(blob))
@@ -3339,8 +3396,8 @@ static struct drm_property_blob *chv_read_cgm_degamma(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < lut_size; i++) {
- u32 ldw = intel_de_read_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0));
- u32 udw = intel_de_read_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1));
+ u32 ldw = intel_de_read_fw(display, CGM_PIPE_DEGAMMA(pipe, i, 0));
+ u32 udw = intel_de_read_fw(display, CGM_PIPE_DEGAMMA(pipe, i, 1));
chv_cgm_degamma_pack(&lut[i], ldw, udw);
}
@@ -3350,13 +3407,13 @@ static struct drm_property_blob *chv_read_cgm_degamma(struct intel_crtc *crtc)
static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- int i, lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size;
+ struct intel_display *display = to_intel_display(crtc);
+ int i, lut_size = DISPLAY_INFO(display)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
- blob = drm_property_create_blob(&i915->drm,
+ blob = drm_property_create_blob(display->drm,
sizeof(lut[0]) * lut_size,
NULL);
if (IS_ERR(blob))
@@ -3365,8 +3422,8 @@ static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < lut_size; i++) {
- u32 ldw = intel_de_read_fw(i915, CGM_PIPE_GAMMA(pipe, i, 0));
- u32 udw = intel_de_read_fw(i915, CGM_PIPE_GAMMA(pipe, i, 1));
+ u32 ldw = intel_de_read_fw(display, CGM_PIPE_GAMMA(pipe, i, 0));
+ u32 udw = intel_de_read_fw(display, CGM_PIPE_GAMMA(pipe, i, 1));
chv_cgm_gamma_pack(&lut[i], ldw, udw);
}
@@ -3376,10 +3433,10 @@ static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc)
static void chv_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- crtc_state->cgm_mode = intel_de_read(i915, CGM_PIPE_MODE(crtc->pipe));
+ crtc_state->cgm_mode = intel_de_read(display, CGM_PIPE_MODE(crtc->pipe));
i9xx_get_config(crtc_state);
}
@@ -3399,13 +3456,13 @@ static void chv_read_luts(struct intel_crtc_state *crtc_state)
static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
int i;
- blob = drm_property_create_blob(&i915->drm,
+ blob = drm_property_create_blob(display->drm,
sizeof(lut[0]) * LEGACY_LUT_LENGTH,
NULL);
if (IS_ERR(blob))
@@ -3414,7 +3471,7 @@ static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
- u32 val = intel_de_read_fw(i915, LGC_PALETTE(pipe, i));
+ u32 val = intel_de_read_fw(display, LGC_PALETTE(pipe, i));
i9xx_lut_8_pack(&lut[i], val);
}
@@ -3424,13 +3481,13 @@ static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc)
static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- int i, lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size;
+ struct intel_display *display = to_intel_display(crtc);
+ int i, lut_size = DISPLAY_INFO(display)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
- blob = drm_property_create_blob(&i915->drm,
+ blob = drm_property_create_blob(display->drm,
sizeof(lut[0]) * lut_size,
NULL);
if (IS_ERR(blob))
@@ -3439,7 +3496,7 @@ static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc)
lut = blob->data;
for (i = 0; i < lut_size; i++) {
- u32 val = intel_de_read_fw(i915, PREC_PALETTE(pipe, i));
+ u32 val = intel_de_read_fw(display, PREC_PALETTE(pipe, i));
ilk_lut_10_pack(&lut[i], val);
}
@@ -3487,13 +3544,13 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state)
static struct drm_property_blob *ivb_read_lut_10(struct intel_crtc *crtc,
u32 prec_index)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
int i, lut_size = ivb_lut_10_size(prec_index);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
- blob = drm_property_create_blob(&dev_priv->drm,
+ blob = drm_property_create_blob(display->drm,
sizeof(lut[0]) * lut_size,
NULL);
if (IS_ERR(blob))
@@ -3504,14 +3561,14 @@ static struct drm_property_blob *ivb_read_lut_10(struct intel_crtc *crtc,
for (i = 0; i < lut_size; i++) {
u32 val;
- intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe),
+ intel_de_write_fw(display, PREC_PAL_INDEX(pipe),
prec_index + i);
- val = intel_de_read_fw(dev_priv, PREC_PAL_DATA(pipe));
+ val = intel_de_read_fw(display, PREC_PAL_DATA(pipe));
ilk_lut_10_pack(&lut[i], val);
}
- intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe),
+ intel_de_write_fw(display, PREC_PAL_INDEX(pipe),
PAL_PREC_INDEX_VALUE(0));
return blob;
@@ -3552,13 +3609,13 @@ static void ivb_read_luts(struct intel_crtc_state *crtc_state)
static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc,
u32 prec_index)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
int i, lut_size = ivb_lut_10_size(prec_index);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
- blob = drm_property_create_blob(&i915->drm,
+ blob = drm_property_create_blob(display->drm,
sizeof(lut[0]) * lut_size,
NULL);
if (IS_ERR(blob))
@@ -3566,19 +3623,19 @@ static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc,
lut = blob->data;
- intel_de_write_fw(i915, PREC_PAL_INDEX(pipe),
+ intel_de_write_fw(display, PREC_PAL_INDEX(pipe),
prec_index);
- intel_de_write_fw(i915, PREC_PAL_INDEX(pipe),
+ intel_de_write_fw(display, PREC_PAL_INDEX(pipe),
PAL_PREC_AUTO_INCREMENT |
prec_index);
for (i = 0; i < lut_size; i++) {
- u32 val = intel_de_read_fw(i915, PREC_PAL_DATA(pipe));
+ u32 val = intel_de_read_fw(display, PREC_PAL_DATA(pipe));
ilk_lut_10_pack(&lut[i], val);
}
- intel_de_write_fw(i915, PREC_PAL_INDEX(pipe),
+ intel_de_write_fw(display, PREC_PAL_INDEX(pipe),
PAL_PREC_INDEX_VALUE(0));
return blob;
@@ -3617,13 +3674,13 @@ static void bdw_read_luts(struct intel_crtc_state *crtc_state)
static struct drm_property_blob *glk_read_degamma_lut(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int i, lut_size = DISPLAY_INFO(dev_priv)->color.degamma_lut_size;
+ struct intel_display *display = to_intel_display(crtc);
+ int i, lut_size = DISPLAY_INFO(display)->color.degamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
- blob = drm_property_create_blob(&dev_priv->drm,
+ blob = drm_property_create_blob(display->drm,
sizeof(lut[0]) * lut_size,
NULL);
if (IS_ERR(blob))
@@ -3636,22 +3693,22 @@ static struct drm_property_blob *glk_read_degamma_lut(struct intel_crtc *crtc)
* ignore the index bits, so we need to reset it to index 0
* separately.
*/
- intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ intel_de_write_fw(display, PRE_CSC_GAMC_INDEX(pipe),
PRE_CSC_GAMC_INDEX_VALUE(0));
- intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ intel_de_write_fw(display, PRE_CSC_GAMC_INDEX(pipe),
PRE_CSC_GAMC_AUTO_INCREMENT |
PRE_CSC_GAMC_INDEX_VALUE(0));
for (i = 0; i < lut_size; i++) {
- u32 val = intel_de_read_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe));
+ u32 val = intel_de_read_fw(display, PRE_CSC_GAMC_DATA(pipe));
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
mtl_degamma_lut_pack(&lut[i], val);
else
glk_degamma_lut_pack(&lut[i], val);
}
- intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ intel_de_write_fw(display, PRE_CSC_GAMC_INDEX(pipe),
PRE_CSC_GAMC_INDEX_VALUE(0));
return blob;
@@ -3683,13 +3740,13 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state)
static struct drm_property_blob *
icl_read_lut_multi_segment(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- int i, lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size;
+ struct intel_display *display = to_intel_display(crtc);
+ int i, lut_size = DISPLAY_INFO(display)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
- blob = drm_property_create_blob(&i915->drm,
+ blob = drm_property_create_blob(display->drm,
sizeof(lut[0]) * lut_size,
NULL);
if (IS_ERR(blob))
@@ -3697,20 +3754,20 @@ icl_read_lut_multi_segment(struct intel_crtc *crtc)
lut = blob->data;
- intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ intel_de_write_fw(display, PREC_PAL_MULTI_SEG_INDEX(pipe),
PAL_PREC_MULTI_SEG_INDEX_VALUE(0));
- intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ intel_de_write_fw(display, PREC_PAL_MULTI_SEG_INDEX(pipe),
PAL_PREC_MULTI_SEG_AUTO_INCREMENT |
PAL_PREC_MULTI_SEG_INDEX_VALUE(0));
for (i = 0; i < 9; i++) {
- u32 ldw = intel_de_read_fw(i915, PREC_PAL_MULTI_SEG_DATA(pipe));
- u32 udw = intel_de_read_fw(i915, PREC_PAL_MULTI_SEG_DATA(pipe));
+ u32 ldw = intel_de_read_fw(display, PREC_PAL_MULTI_SEG_DATA(pipe));
+ u32 udw = intel_de_read_fw(display, PREC_PAL_MULTI_SEG_DATA(pipe));
ilk_lut_12p4_pack(&lut[i], ldw, udw);
}
- intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ intel_de_write_fw(display, PREC_PAL_MULTI_SEG_INDEX(pipe),
PAL_PREC_MULTI_SEG_INDEX_VALUE(0));
/*
@@ -3877,15 +3934,15 @@ static const struct intel_color_funcs ilk_color_funcs = {
void intel_color_crtc_init(struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
int degamma_lut_size, gamma_lut_size;
bool has_ctm;
drm_mode_crtc_set_gamma_size(&crtc->base, 256);
- gamma_lut_size = DISPLAY_INFO(i915)->color.gamma_lut_size;
- degamma_lut_size = DISPLAY_INFO(i915)->color.degamma_lut_size;
- has_ctm = DISPLAY_VER(i915) >= 5;
+ gamma_lut_size = DISPLAY_INFO(display)->color.gamma_lut_size;
+ degamma_lut_size = DISPLAY_INFO(display)->color.degamma_lut_size;
+ has_ctm = DISPLAY_VER(display) >= 5;
/*
* "DPALETTE_A: NOTE: The 8-bit (non-10-bit) mode is the
@@ -3895,57 +3952,59 @@ void intel_color_crtc_init(struct intel_crtc *crtc)
* Confirmed on alv,cst,pnv. Mobile gen2 parts (alm,mgm)
* are confirmed not to suffer from this restriction.
*/
- if (DISPLAY_VER(i915) == 3 && crtc->pipe == PIPE_A)
+ if (DISPLAY_VER(display) == 3 && crtc->pipe == PIPE_A)
gamma_lut_size = 256;
drm_crtc_enable_color_mgmt(&crtc->base, degamma_lut_size,
has_ctm, gamma_lut_size);
}
-int intel_color_init(struct drm_i915_private *i915)
+int intel_color_init(struct intel_display *display)
{
struct drm_property_blob *blob;
- if (DISPLAY_VER(i915) != 10)
+ if (DISPLAY_VER(display) != 10)
return 0;
- blob = create_linear_lut(i915,
- DISPLAY_INFO(i915)->color.degamma_lut_size);
+ blob = create_linear_lut(display,
+ DISPLAY_INFO(display)->color.degamma_lut_size);
if (IS_ERR(blob))
return PTR_ERR(blob);
- i915->display.color.glk_linear_degamma_lut = blob;
+ display->color.glk_linear_degamma_lut = blob;
return 0;
}
-void intel_color_init_hooks(struct drm_i915_private *i915)
+void intel_color_init_hooks(struct intel_display *display)
{
- if (HAS_GMCH(i915)) {
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (HAS_GMCH(display)) {
if (IS_CHERRYVIEW(i915))
- i915->display.funcs.color = &chv_color_funcs;
+ display->funcs.color = &chv_color_funcs;
else if (IS_VALLEYVIEW(i915))
- i915->display.funcs.color = &vlv_color_funcs;
- else if (DISPLAY_VER(i915) >= 4)
- i915->display.funcs.color = &i965_color_funcs;
+ display->funcs.color = &vlv_color_funcs;
+ else if (DISPLAY_VER(display) >= 4)
+ display->funcs.color = &i965_color_funcs;
else
- i915->display.funcs.color = &i9xx_color_funcs;
+ display->funcs.color = &i9xx_color_funcs;
} else {
- if (DISPLAY_VER(i915) >= 12)
- i915->display.funcs.color = &tgl_color_funcs;
- else if (DISPLAY_VER(i915) == 11)
- i915->display.funcs.color = &icl_color_funcs;
- else if (DISPLAY_VER(i915) == 10)
- i915->display.funcs.color = &glk_color_funcs;
- else if (DISPLAY_VER(i915) == 9)
- i915->display.funcs.color = &skl_color_funcs;
- else if (DISPLAY_VER(i915) == 8)
- i915->display.funcs.color = &bdw_color_funcs;
+ if (DISPLAY_VER(display) >= 12)
+ display->funcs.color = &tgl_color_funcs;
+ else if (DISPLAY_VER(display) == 11)
+ display->funcs.color = &icl_color_funcs;
+ else if (DISPLAY_VER(display) == 10)
+ display->funcs.color = &glk_color_funcs;
+ else if (DISPLAY_VER(display) == 9)
+ display->funcs.color = &skl_color_funcs;
+ else if (DISPLAY_VER(display) == 8)
+ display->funcs.color = &bdw_color_funcs;
else if (IS_HASWELL(i915))
- i915->display.funcs.color = &hsw_color_funcs;
- else if (DISPLAY_VER(i915) == 7)
- i915->display.funcs.color = &ivb_color_funcs;
+ display->funcs.color = &hsw_color_funcs;
+ else if (DISPLAY_VER(display) == 7)
+ display->funcs.color = &ivb_color_funcs;
else
- i915->display.funcs.color = &ilk_color_funcs;
+ display->funcs.color = &ilk_color_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index 79f230a1709a..9d66457c1e89 100644
--- a/drivers/gpu/drm/i915/display/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -11,11 +11,12 @@
struct intel_atomic_state;
struct intel_crtc_state;
struct intel_crtc;
-struct drm_i915_private;
+struct intel_display;
+struct intel_dsb;
struct drm_property_blob;
-void intel_color_init_hooks(struct drm_i915_private *i915);
-int intel_color_init(struct drm_i915_private *i915);
+void intel_color_init_hooks(struct intel_display *display);
+int intel_color_init(struct intel_display *display);
void intel_color_crtc_init(struct intel_crtc *crtc);
int intel_color_check(struct intel_atomic_state *state,
struct intel_crtc *crtc);
@@ -24,10 +25,13 @@ void intel_color_prepare_commit(struct intel_atomic_state *state,
void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state);
bool intel_color_uses_dsb(const struct intel_crtc_state *crtc_state);
void intel_color_wait_commit(const struct intel_crtc_state *crtc_state);
-void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state);
-void intel_color_commit_arm(const struct intel_crtc_state *crtc_state);
+void intel_color_commit_noarm(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state);
+void intel_color_commit_arm(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state);
void intel_color_post_update(const struct intel_crtc_state *crtc_state);
void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
+void intel_color_modeset(const struct intel_crtc_state *crtc_state);
void intel_color_get_config(struct intel_crtc_state *crtc_state);
bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob1,
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 3252dab56430..4fbe2e3542ca 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -3,6 +3,7 @@
* Copyright © 2018 Intel Corporation
*/
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 835c8b844494..4634d3fd9f20 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -38,6 +38,7 @@
#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_crt.h"
+#include "intel_crt_regs.h"
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
@@ -55,18 +56,23 @@
#include "intel_pch_refclk.h"
/* Here's the desired hotplug mode */
-#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
+#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_ENABLE | \
+ ADPA_CRT_HOTPLUG_PERIOD_128 | \
ADPA_CRT_HOTPLUG_WARMUP_10MS | \
ADPA_CRT_HOTPLUG_SAMPLE_4S | \
ADPA_CRT_HOTPLUG_VOLTAGE_50 | \
- ADPA_CRT_HOTPLUG_VOLREF_325MV | \
- ADPA_CRT_HOTPLUG_ENABLE)
+ ADPA_CRT_HOTPLUG_VOLREF_325MV)
+#define ADPA_HOTPLUG_MASK (ADPA_CRT_HOTPLUG_MONITOR_MASK | \
+ ADPA_CRT_HOTPLUG_ENABLE | \
+ ADPA_CRT_HOTPLUG_PERIOD_MASK | \
+ ADPA_CRT_HOTPLUG_WARMUP_MASK | \
+ ADPA_CRT_HOTPLUG_SAMPLE_MASK | \
+ ADPA_CRT_HOTPLUG_VOLTAGE_MASK | \
+ ADPA_CRT_HOTPLUG_VOLREF_MASK | \
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER)
struct intel_crt {
struct intel_encoder base;
- /* DPMS state is stored in the connector, which we need in the
- * encoder's enable/disable callbacks */
- struct intel_connector *connector;
bool force_hotplug_required;
i915_reg_t adpa_reg;
};
@@ -81,18 +87,19 @@ static struct intel_crt *intel_attached_crt(struct intel_connector *connector)
return intel_encoder_to_crt(intel_attached_encoder(connector));
}
-bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_crt_port_enabled(struct intel_display *display,
i915_reg_t adpa_reg, enum pipe *pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val;
- val = intel_de_read(dev_priv, adpa_reg);
+ val = intel_de_read(display, adpa_reg);
/* asserts want to know the pipe even if the port is disabled */
if (HAS_PCH_CPT(dev_priv))
- *pipe = (val & ADPA_PIPE_SEL_MASK_CPT) >> ADPA_PIPE_SEL_SHIFT_CPT;
+ *pipe = REG_FIELD_GET(ADPA_PIPE_SEL_MASK_CPT, val);
else
- *pipe = (val & ADPA_PIPE_SEL_MASK) >> ADPA_PIPE_SEL_SHIFT;
+ *pipe = REG_FIELD_GET(ADPA_PIPE_SEL_MASK, val);
return val & ADPA_DAC_ENABLE;
}
@@ -100,6 +107,7 @@ bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
intel_wakeref_t wakeref;
@@ -110,7 +118,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
if (!wakeref)
return false;
- ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe);
+ ret = intel_crt_port_enabled(display, crt->adpa_reg, pipe);
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
@@ -119,11 +127,11 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 tmp, flags = 0;
- tmp = intel_de_read(dev_priv, crt->adpa_reg);
+ tmp = intel_de_read(display, crt->adpa_reg);
if (tmp & ADPA_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -139,27 +147,27 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
}
static void intel_crt_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *crtc_state)
{
- pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_ANALOG);
- pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+ crtc_state->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
- pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock;
}
static void hsw_crt_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *crtc_state)
{
- lpt_pch_get_config(pipe_config);
+ lpt_pch_get_config(crtc_state);
- hsw_ddi_get_config(encoder, pipe_config);
+ hsw_ddi_get_config(encoder, crtc_state);
- pipe_config->hw.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
- DRM_MODE_FLAG_NHSYNC |
- DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_NVSYNC);
- pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+ crtc_state->hw.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
+ DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_NVSYNC);
+ crtc_state->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
}
/* Note: The caller is required to filter out dpms modes not supported by the
@@ -168,13 +176,14 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int mode)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 adpa;
- if (DISPLAY_VER(dev_priv) >= 5)
+ if (DISPLAY_VER(display) >= 5)
adpa = ADPA_HOTPLUG_BITS;
else
adpa = 0;
@@ -193,7 +202,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
adpa |= ADPA_PIPE_SEL(crtc->pipe);
if (!HAS_PCH_SPLIT(dev_priv))
- intel_de_write(dev_priv, BCLRPAT(dev_priv, crtc->pipe), 0);
+ intel_de_write(display, BCLRPAT(display, crtc->pipe), 0);
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -210,7 +219,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
break;
}
- intel_de_write(dev_priv, crt->adpa_reg, adpa);
+ intel_de_write(display, crt->adpa_reg, adpa);
}
static void intel_disable_crt(struct intel_atomic_state *state,
@@ -241,9 +250,10 @@ static void hsw_disable_crt(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
+ drm_WARN_ON(display->drm, !old_crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
@@ -253,6 +263,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -272,7 +283,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
hsw_fdi_disable(encoder);
- drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
+ drm_WARN_ON(display->drm, !old_crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -282,9 +293,10 @@ static void hsw_pre_pll_enable_crt(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
+ drm_WARN_ON(display->drm, !crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
@@ -294,11 +306,12 @@ static void hsw_pre_enable_crt(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
- drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
+ drm_WARN_ON(display->drm, !crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
@@ -312,11 +325,12 @@ static void hsw_enable_crt(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
- drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
+ drm_WARN_ON(display->drm, !crtc_state->has_pch_encoder);
intel_ddi_enable_transcoder_func(encoder, crtc_state);
@@ -346,9 +360,9 @@ static enum drm_mode_status
intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq;
+ struct intel_display *display = to_intel_display(connector->dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ int max_dotclk = display->cdclk.max_dotclk_freq;
enum drm_mode_status status;
int max_clock;
@@ -367,7 +381,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
* DAC limit supposedly 355 MHz.
*/
max_clock = 270000;
- else if (IS_DISPLAY_VER(dev_priv, 3, 4))
+ else if (IS_DISPLAY_VER(display, 3, 4))
max_clock = 400000;
else
max_clock = 350000;
@@ -390,47 +404,48 @@ intel_crt_mode_valid(struct drm_connector *connector,
}
static int intel_crt_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
+ struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode =
- &pipe_config->hw.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
return 0;
}
static int pch_crt_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
+ struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode =
- &pipe_config->hw.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- pipe_config->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(pipe_config))
+ crtc_state->has_pch_encoder = true;
+ if (!intel_fdi_compute_pipe_bpp(crtc_state))
return -EINVAL;
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
return 0;
}
static int hsw_crt_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
+ struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
- &pipe_config->hw.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -440,39 +455,39 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
adjusted_mode->crtc_hblank_start > 4096)
return -EINVAL;
- pipe_config->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(pipe_config))
+ crtc_state->has_pch_encoder = true;
+ if (!intel_fdi_compute_pipe_bpp(crtc_state))
return -EINVAL;
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
/* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev_priv)) {
/* TODO: Check crtc_state->max_link_bpp_x16 instead of bw_constrained */
- if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
- drm_dbg_kms(&dev_priv->drm,
+ if (crtc_state->bw_constrained && crtc_state->pipe_bpp < 24) {
+ drm_dbg_kms(display->drm,
"LPT only supports 24bpp\n");
return -EINVAL;
}
- pipe_config->pipe_bpp = 24;
+ crtc_state->pipe_bpp = 24;
}
/* FDI must always be 2.7 GHz */
- pipe_config->port_clock = 135000 * 2;
+ crtc_state->port_clock = 135000 * 2;
- pipe_config->enhanced_framing = true;
+ crtc_state->enhanced_framing = true;
- adjusted_mode->crtc_clock = lpt_iclkip(pipe_config);
+ adjusted_mode->crtc_clock = lpt_iclkip(crtc_state);
return 0;
}
static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
u32 adpa;
bool ret;
@@ -483,36 +498,36 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
crt->force_hotplug_required = false;
- save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg);
- drm_dbg_kms(&dev_priv->drm,
+ save_adpa = adpa = intel_de_read(display, crt->adpa_reg);
+ drm_dbg_kms(display->drm,
"trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
if (turn_off_dac)
adpa &= ~ADPA_DAC_ENABLE;
- intel_de_write(dev_priv, crt->adpa_reg, adpa);
+ intel_de_write(display, crt->adpa_reg, adpa);
- if (intel_de_wait_for_clear(dev_priv,
+ if (intel_de_wait_for_clear(display,
crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER,
1000))
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) {
- intel_de_write(dev_priv, crt->adpa_reg, save_adpa);
- intel_de_posting_read(dev_priv, crt->adpa_reg);
+ intel_de_write(display, crt->adpa_reg, save_adpa);
+ intel_de_posting_read(display, crt->adpa_reg);
}
}
/* Check the status to see if both blue and green are on now */
- adpa = intel_de_read(dev_priv, crt->adpa_reg);
+ adpa = intel_de_read(display, crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
ret = false;
- drm_dbg_kms(&dev_priv->drm, "ironlake hotplug adpa=0x%x, result %d\n",
+ drm_dbg_kms(display->drm, "ironlake hotplug adpa=0x%x, result %d\n",
adpa, ret);
return ret;
@@ -520,9 +535,9 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
bool reenable_hpd;
u32 adpa;
bool ret;
@@ -542,29 +557,29 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
*/
reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
- save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg);
- drm_dbg_kms(&dev_priv->drm,
+ save_adpa = adpa = intel_de_read(display, crt->adpa_reg);
+ drm_dbg_kms(display->drm,
"trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
- intel_de_write(dev_priv, crt->adpa_reg, adpa);
+ intel_de_write(display, crt->adpa_reg, adpa);
- if (intel_de_wait_for_clear(dev_priv, crt->adpa_reg,
+ if (intel_de_wait_for_clear(display, crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"timed out waiting for FORCE_TRIGGER");
- intel_de_write(dev_priv, crt->adpa_reg, save_adpa);
+ intel_de_write(display, crt->adpa_reg, save_adpa);
}
/* Check the status to see if both blue and green are on now */
- adpa = intel_de_read(dev_priv, crt->adpa_reg);
+ adpa = intel_de_read(display, crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
ret = false;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
if (reenable_hpd)
@@ -575,8 +590,8 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
u32 stat;
bool ret = false;
int i, tries = 0;
@@ -603,18 +618,18 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
- if (intel_de_wait_for_clear(dev_priv, PORT_HOTPLUG_EN(dev_priv),
+ if (intel_de_wait_for_clear(display, PORT_HOTPLUG_EN(display),
CRT_HOTPLUG_FORCE_DETECT, 1000))
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"timed out waiting for FORCE_DETECT to go off");
}
- stat = intel_de_read(dev_priv, PORT_HOTPLUG_STAT(dev_priv));
+ stat = intel_de_read(display, PORT_HOTPLUG_STAT(display));
if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
ret = true;
/* clear the interrupt we just generated, if any */
- intel_de_write(dev_priv, PORT_HOTPLUG_STAT(dev_priv),
+ intel_de_write(display, PORT_HOTPLUG_STAT(display),
CRT_HOTPLUG_INT_STATUS);
i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0);
@@ -660,8 +675,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
static bool intel_crt_detect_ddc(struct drm_connector *connector)
{
- struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
+ struct intel_display *display = to_intel_display(connector->dev);
const struct drm_edid *drm_edid;
bool ret = false;
@@ -674,15 +688,15 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
* have to check the EDID input spec of the attached device.
*/
if (drm_edid_is_digital(drm_edid)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
} else {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"CRT detected via DDC:0x50 [EDID]\n");
ret = true;
}
} else {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"CRT not detected via DDC:0x50 [no valid EDID found]\n");
}
@@ -694,8 +708,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
static enum drm_connector_status
intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
{
- struct drm_device *dev = crt->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(&crt->base);
enum transcoder cpu_transcoder = (enum transcoder)pipe;
u32 save_bclrpat;
u32 save_vtotal;
@@ -706,14 +719,14 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
u8 st00;
enum drm_connector_status status;
- drm_dbg_kms(&dev_priv->drm, "starting load-detect on CRT\n");
+ drm_dbg_kms(display->drm, "starting load-detect on CRT\n");
- save_bclrpat = intel_de_read(dev_priv,
- BCLRPAT(dev_priv, cpu_transcoder));
- save_vtotal = intel_de_read(dev_priv,
- TRANS_VTOTAL(dev_priv, cpu_transcoder));
- vblank = intel_de_read(dev_priv,
- TRANS_VBLANK(dev_priv, cpu_transcoder));
+ save_bclrpat = intel_de_read(display,
+ BCLRPAT(display, cpu_transcoder));
+ save_vtotal = intel_de_read(display,
+ TRANS_VTOTAL(display, cpu_transcoder));
+ vblank = intel_de_read(display,
+ TRANS_VBLANK(display, cpu_transcoder));
vtotal = REG_FIELD_GET(VTOTAL_MASK, save_vtotal) + 1;
vactive = REG_FIELD_GET(VACTIVE_MASK, save_vtotal) + 1;
@@ -722,25 +735,25 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
vblank_end = REG_FIELD_GET(VBLANK_END_MASK, vblank) + 1;
/* Set the border color to purple. */
- intel_de_write(dev_priv, BCLRPAT(dev_priv, cpu_transcoder), 0x500050);
+ intel_de_write(display, BCLRPAT(display, cpu_transcoder), 0x500050);
- if (DISPLAY_VER(dev_priv) != 2) {
- u32 transconf = intel_de_read(dev_priv,
- TRANSCONF(dev_priv, cpu_transcoder));
+ if (DISPLAY_VER(display) != 2) {
+ u32 transconf = intel_de_read(display,
+ TRANSCONF(display, cpu_transcoder));
- intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANSCONF(display, cpu_transcoder),
transconf | TRANSCONF_FORCE_BORDER);
- intel_de_posting_read(dev_priv,
- TRANSCONF(dev_priv, cpu_transcoder));
+ intel_de_posting_read(display,
+ TRANSCONF(display, cpu_transcoder));
/* Wait for next Vblank to substitue
* border color for Color info */
- intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
- st00 = intel_de_read8(dev_priv, _VGA_MSR_WRITE);
+ intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(display, pipe));
+ st00 = intel_de_read8(display, _VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :
connector_status_disconnected;
- intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANSCONF(display, cpu_transcoder),
transconf);
} else {
bool restore_vblank = false;
@@ -751,13 +764,13 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
* Yes, this will flicker
*/
if (vblank_start <= vactive && vblank_end >= vtotal) {
- u32 vsync = intel_de_read(dev_priv,
- TRANS_VSYNC(dev_priv, cpu_transcoder));
+ u32 vsync = intel_de_read(display,
+ TRANS_VSYNC(display, cpu_transcoder));
u32 vsync_start = REG_FIELD_GET(VSYNC_START_MASK, vsync) + 1;
vblank_start = vsync_start;
- intel_de_write(dev_priv,
- TRANS_VBLANK(dev_priv, cpu_transcoder),
+ intel_de_write(display,
+ TRANS_VBLANK(display, cpu_transcoder),
VBLANK_START(vblank_start - 1) |
VBLANK_END(vblank_end - 1));
restore_vblank = true;
@@ -771,9 +784,9 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
/*
* Wait for the border to be displayed
*/
- while (intel_de_read(dev_priv, PIPEDSL(dev_priv, pipe)) >= vactive)
+ while (intel_de_read(display, PIPEDSL(display, pipe)) >= vactive)
;
- while ((dsl = intel_de_read(dev_priv, PIPEDSL(dev_priv, pipe))) <= vsample)
+ while ((dsl = intel_de_read(display, PIPEDSL(display, pipe))) <= vsample)
;
/*
* Watch ST00 for an entire scanline
@@ -783,15 +796,15 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
do {
count++;
/* Read the ST00 VGA status register */
- st00 = intel_de_read8(dev_priv, _VGA_MSR_WRITE);
+ st00 = intel_de_read8(display, _VGA_MSR_WRITE);
if (st00 & (1 << 4))
detect++;
- } while ((intel_de_read(dev_priv, PIPEDSL(dev_priv, pipe)) == dsl));
+ } while ((intel_de_read(display, PIPEDSL(display, pipe)) == dsl));
/* restore vblank if necessary */
if (restore_vblank)
- intel_de_write(dev_priv,
- TRANS_VBLANK(dev_priv, cpu_transcoder),
+ intel_de_write(display,
+ TRANS_VBLANK(display, cpu_transcoder),
vblank);
/*
* If more than 3/4 of the scanline detected a monitor,
@@ -805,7 +818,7 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
}
/* Restore previous settings */
- intel_de_write(dev_priv, BCLRPAT(dev_priv, cpu_transcoder),
+ intel_de_write(display, BCLRPAT(display, cpu_transcoder),
save_bclrpat);
return status;
@@ -842,26 +855,26 @@ intel_crt_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct intel_encoder *intel_encoder = &crt->base;
+ struct intel_encoder *encoder = &crt->base;
struct drm_atomic_state *state;
intel_wakeref_t wakeref;
int status;
- drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] force=%d\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, connector->name,
force);
- if (!intel_display_device_enabled(dev_priv))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(dev_priv))
+ if (!intel_display_driver_check_access(display))
return connector->status;
- if (dev_priv->display.params.load_detect_test) {
- wakeref = intel_display_power_get(dev_priv,
- intel_encoder->power_domain);
+ if (display->params.load_detect_test) {
+ wakeref = intel_display_power_get(dev_priv, encoder->power_domain);
goto load_detect;
}
@@ -869,21 +882,20 @@ intel_crt_detect(struct drm_connector *connector,
if (dmi_check_system(intel_spurious_crt_detect))
return connector_status_disconnected;
- wakeref = intel_display_power_get(dev_priv,
- intel_encoder->power_domain);
+ wakeref = intel_display_power_get(dev_priv, encoder->power_domain);
- if (I915_HAS_HOTPLUG(dev_priv)) {
+ if (I915_HAS_HOTPLUG(display)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
* only trust an assertion that the monitor is connected.
*/
if (intel_crt_detect_hotplug(connector)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"CRT detected via hotplug\n");
status = connector_status_connected;
goto out;
} else
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"CRT not detected via hotplug\n");
}
@@ -896,7 +908,7 @@ intel_crt_detect(struct drm_connector *connector,
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(dev_priv)) {
+ if (I915_HAS_HOTPLUG(display)) {
status = connector_status_disconnected;
goto out;
}
@@ -916,10 +928,10 @@ load_detect:
} else {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
- else if (DISPLAY_VER(dev_priv) < 4)
+ else if (DISPLAY_VER(display) < 4)
status = intel_crt_load_detect(crt,
to_intel_crtc(connector->state->crtc)->pipe);
- else if (dev_priv->display.params.load_detect_test)
+ else if (display->params.load_detect_test)
status = connector_status_disconnected;
else
status = connector_status_unknown;
@@ -927,56 +939,55 @@ load_detect:
}
out:
- intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return status;
}
static int intel_crt_get_modes(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(connector->dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
- struct intel_encoder *intel_encoder = &crt->base;
+ struct intel_encoder *encoder = &crt->base;
intel_wakeref_t wakeref;
struct i2c_adapter *ddc;
int ret;
- if (!intel_display_driver_check_access(dev_priv))
+ if (!intel_display_driver_check_access(display))
return drm_edid_connector_add_modes(connector);
- wakeref = intel_display_power_get(dev_priv,
- intel_encoder->power_domain);
+ wakeref = intel_display_power_get(dev_priv, encoder->power_domain);
ret = intel_crt_ddc_get_modes(connector, connector->ddc);
if (ret || !IS_G4X(dev_priv))
goto out;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
- ddc = intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPB);
+ ddc = intel_gmbus_get_adapter(display, GMBUS_PIN_DPB);
ret = intel_crt_ddc_get_modes(connector, ddc);
out:
- intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
void intel_crt_reset(struct drm_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct intel_display *display = to_intel_display(encoder->dev);
struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder));
- if (DISPLAY_VER(dev_priv) >= 5) {
+ if (DISPLAY_VER(display) >= 5) {
u32 adpa;
- adpa = intel_de_read(dev_priv, crt->adpa_reg);
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ adpa = intel_de_read(display, crt->adpa_reg);
+ adpa &= ~ADPA_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
- intel_de_write(dev_priv, crt->adpa_reg, adpa);
- intel_de_posting_read(dev_priv, crt->adpa_reg);
+ intel_de_write(display, crt->adpa_reg, adpa);
+ intel_de_posting_read(display, crt->adpa_reg);
- drm_dbg_kms(&dev_priv->drm, "crt adpa set to 0x%x\n", adpa);
+ drm_dbg_kms(display->drm, "crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = true;
}
@@ -1006,11 +1017,11 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
.destroy = intel_encoder_destroy,
};
-void intel_crt_init(struct drm_i915_private *dev_priv)
+void intel_crt_init(struct intel_display *display)
{
- struct drm_connector *connector;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_connector *connector;
struct intel_crt *crt;
- struct intel_connector *intel_connector;
i915_reg_t adpa_reg;
u8 ddc_pin;
u32 adpa;
@@ -1022,7 +1033,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
else
adpa_reg = ADPA;
- adpa = intel_de_read(dev_priv, adpa_reg);
+ adpa = intel_de_read(display, adpa_reg);
if ((adpa & ADPA_DAC_ENABLE) == 0) {
/*
* On some machines (some IVB at least) CRT can be
@@ -1032,36 +1043,36 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
* take. So the only way to tell is attempt to enable
* it and see what happens.
*/
- intel_de_write(dev_priv, adpa_reg,
- adpa | ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
- if ((intel_de_read(dev_priv, adpa_reg) & ADPA_DAC_ENABLE) == 0)
+ intel_de_write(display, adpa_reg,
+ adpa | ADPA_DAC_ENABLE |
+ ADPA_HSYNC_CNTL_DISABLE |
+ ADPA_VSYNC_CNTL_DISABLE);
+ if ((intel_de_read(display, adpa_reg) & ADPA_DAC_ENABLE) == 0)
return;
- intel_de_write(dev_priv, adpa_reg, adpa);
+ intel_de_write(display, adpa_reg, adpa);
}
crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
if (!crt)
return;
- intel_connector = intel_connector_alloc();
- if (!intel_connector) {
+ connector = intel_connector_alloc();
+ if (!connector) {
kfree(crt);
return;
}
- ddc_pin = dev_priv->display.vbt.crt_ddc_pin;
+ ddc_pin = display->vbt.crt_ddc_pin;
- connector = &intel_connector->base;
- crt->connector = intel_connector;
- drm_connector_init_with_ddc(&dev_priv->drm, connector,
+ drm_connector_init_with_ddc(display->drm, &connector->base,
&intel_crt_connector_funcs,
DRM_MODE_CONNECTOR_VGA,
- intel_gmbus_get_adapter(dev_priv, ddc_pin));
+ intel_gmbus_get_adapter(display, ddc_pin));
- drm_encoder_init(&dev_priv->drm, &crt->base.base, &intel_crt_enc_funcs,
+ drm_encoder_init(display->drm, &crt->base.base, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC, "CRT");
- intel_connector_attach_encoder(intel_connector, &crt->base);
+ intel_connector_attach_encoder(connector, &crt->base);
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = BIT(INTEL_OUTPUT_DVO) | BIT(INTEL_OUTPUT_HDMI);
@@ -1070,24 +1081,24 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
else
crt->base.pipe_mask = ~0;
- if (DISPLAY_VER(dev_priv) != 2)
- connector->interlace_allowed = true;
+ if (DISPLAY_VER(display) != 2)
+ connector->base.interlace_allowed = true;
crt->adpa_reg = adpa_reg;
crt->base.power_domain = POWER_DOMAIN_PORT_CRT;
- if (I915_HAS_HOTPLUG(dev_priv) &&
+ if (I915_HAS_HOTPLUG(display) &&
!dmi_check_system(intel_spurious_crt_detect)) {
crt->base.hpd_pin = HPD_CRT;
crt->base.hotplug = intel_encoder_hotplug;
- intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
} else {
- intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
}
- intel_connector->base.polled = intel_connector->polled;
+ connector->base.polled = connector->polled;
- if (HAS_DDI(dev_priv)) {
+ if (HAS_DDI(display)) {
assert_port_valid(dev_priv, PORT_E);
crt->base.port = PORT_E;
@@ -1118,9 +1129,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
crt->base.get_hw_state = intel_crt_get_hw_state;
crt->base.enable = intel_enable_crt;
}
- intel_connector->get_hw_state = intel_connector_get_hw_state;
+ connector->get_hw_state = intel_connector_get_hw_state;
- drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+ drm_connector_helper_add(&connector->base, &intel_crt_connector_helper_funcs);
/*
* TODO: find a proper way to discover whether we need to set the the
@@ -1131,8 +1142,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
- dev_priv->display.fdi.rx_config = intel_de_read(dev_priv,
- FDI_RX_CTL(PIPE_A)) & fdi_config;
+ display->fdi.rx_config = intel_de_read(display,
+ FDI_RX_CTL(PIPE_A)) & fdi_config;
}
intel_crt_reset(&crt->base.base);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.h b/drivers/gpu/drm/i915/display/intel_crt.h
index fe7690c2b948..e0abfe96a3d2 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.h
+++ b/drivers/gpu/drm/i915/display/intel_crt.h
@@ -10,20 +10,20 @@
enum pipe;
struct drm_encoder;
-struct drm_i915_private;
+struct intel_display;
#ifdef I915
-bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+bool intel_crt_port_enabled(struct intel_display *display,
i915_reg_t adpa_reg, enum pipe *pipe);
-void intel_crt_init(struct drm_i915_private *dev_priv);
+void intel_crt_init(struct intel_display *display);
void intel_crt_reset(struct drm_encoder *encoder);
#else
-static inline bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+static inline bool intel_crt_port_enabled(struct intel_display *display,
i915_reg_t adpa_reg, enum pipe *pipe)
{
return false;
}
-static inline void intel_crt_init(struct drm_i915_private *dev_priv)
+static inline void intel_crt_init(struct intel_display *display)
{
}
static inline void intel_crt_reset(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/display/intel_crt_regs.h b/drivers/gpu/drm/i915/display/intel_crt_regs.h
new file mode 100644
index 000000000000..571a67ae9afa
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_crt_regs.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __INTEL_CRT_REGS_H__
+#define __INTEL_CRT_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define ADPA _MMIO(0x61100)
+#define PCH_ADPA _MMIO(0xe1100)
+#define VLV_ADPA _MMIO(VLV_DISPLAY_BASE + 0x61100)
+#define ADPA_DAC_ENABLE REG_BIT(31)
+#define ADPA_PIPE_SEL_MASK REG_BIT(30)
+#define ADPA_PIPE_SEL(pipe) REG_FIELD_PREP(ADPA_PIPE_SEL_MASK, (pipe))
+#define ADPA_PIPE_SEL_MASK_CPT REG_GENMASK(30, 29)
+#define ADPA_PIPE_SEL_CPT(pipe) REG_FIELD_PREP(ADPA_PIPE_SEL_MASK_CPT, (pipe))
+#define ADPA_CRT_HOTPLUG_MONITOR_MASK REG_GENMASK(25, 24)
+#define ADPA_CRT_HOTPLUG_MONITOR_NONE REG_FIELD_PREP(ADPA_CRT_HOTPLUG_MONITOR_MASK, 0)
+#define ADPA_CRT_HOTPLUG_MONITOR_COLOR REG_FIELD_PREP(ADPA_CRT_HOTPLUG_MONITOR_MASK, 3)
+#define ADPA_CRT_HOTPLUG_MONITOR_MONO REG_FIELD_PREP(ADPA_CRT_HOTPLUG_MONITOR_MASK, 2)
+#define ADPA_CRT_HOTPLUG_ENABLE REG_BIT(23)
+#define ADPA_CRT_HOTPLUG_PERIOD_MASK REG_BIT(22)
+#define ADPA_CRT_HOTPLUG_PERIOD_64 REG_FIELD_PREP(ADPA_CRT_HOTPLUG_PERIOD_MASK, 0)
+#define ADPA_CRT_HOTPLUG_PERIOD_128 REG_FIELD_PREP(ADPA_CRT_HOTPLUG_PERIOD_MASK, 1)
+#define ADPA_CRT_HOTPLUG_WARMUP_MASK REG_BIT(21)
+#define ADPA_CRT_HOTPLUG_WARMUP_5MS REG_FIELD_PREP(ADPA_CRT_HOTPLUG_WARMUP_MASK, 0)
+#define ADPA_CRT_HOTPLUG_WARMUP_10MS REG_FIELD_PREP(ADPA_CRT_HOTPLUG_WARMUP_MASK, 1)
+#define ADPA_CRT_HOTPLUG_SAMPLE_MASK REG_BIT(20)
+#define ADPA_CRT_HOTPLUG_SAMPLE_2S REG_FIELD_PREP(ADPA_CRT_HOTPLUG_SAMPLE_MASK, 0)
+#define ADPA_CRT_HOTPLUG_SAMPLE_4S REG_FIELD_PREP(ADPA_CRT_HOTPLUG_SAMPLE_MASK, 1)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_MASK REG_GENMASK(19, 18)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_40 REG_FIELD_PREP(ADPA_CRT_HOTPLUG_VOLTAGE_MASK, 0)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_50 REG_FIELD_PREP(ADPA_CRT_HOTPLUG_VOLTAGE_MASK, 1)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_60 REG_FIELD_PREP(ADPA_CRT_HOTPLUG_VOLTAGE_MASK, 2)
+#define ADPA_CRT_HOTPLUG_VOLTAGE_70 REG_FIELD_PREP(ADPA_CRT_HOTPLUG_VOLTAGE_MASK, 3)
+#define ADPA_CRT_HOTPLUG_VOLREF_MASK REG_BIT(17)
+#define ADPA_CRT_HOTPLUG_VOLREF_325MV REG_FIELD_PREP(ADPA_CRT_HOTPLUG_VOLREF_MASK, 0)
+#define ADPA_CRT_HOTPLUG_VOLREF_475MV REG_FIELD_PREP(ADPA_CRT_HOTPLUG_VOLREF_MASK, 1)
+#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER REG_BIT(16)
+#define ADPA_USE_VGA_HVPOLARITY REG_BIT(15)
+#define ADPA_HSYNC_CNTL_DISABLE REG_BIT(11)
+#define ADPA_VSYNC_CNTL_DISABLE REG_BIT(10)
+#define ADPA_VSYNC_ACTIVE_HIGH REG_BIT(4)
+#define ADPA_HSYNC_ACTIVE_HIGH REG_BIT(3)
+
+#define _VGA_MSR_WRITE _MMIO(0x3c2)
+
+#endif /* __INTEL_CRT_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 1b578cad2813..c910168602d2 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -9,8 +9,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
+#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
+#include "i915_drv.h"
#include "i915_vgpu.h"
#include "i9xx_plane.h"
#include "icl_dsi.h"
@@ -35,11 +37,11 @@
static void assert_vblank_disabled(struct drm_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(crtc->dev);
+ struct intel_display *display = to_intel_display(crtc->dev);
- if (I915_STATE_WARN(i915, drm_crtc_vblank_get(crtc) == 0,
- "[CRTC:%d:%s] vblank assertion failure (expected off, current on)\n",
- crtc->base.id, crtc->name))
+ if (INTEL_DISPLAY_STATE_WARN(display, drm_crtc_vblank_get(crtc) == 0,
+ "[CRTC:%d:%s] vblank assertion failure (expected off, current on)\n",
+ crtc->base.id, crtc->name))
drm_crtc_vblank_put(crtc);
}
@@ -48,12 +50,12 @@ struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915)
return to_intel_crtc(drm_crtc_from_index(&i915->drm, 0));
}
-struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915,
+struct intel_crtc *intel_crtc_for_pipe(struct intel_display *display,
enum pipe pipe)
{
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
if (crtc->pipe == pipe)
return crtc;
}
@@ -69,7 +71,8 @@ void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc)
void intel_wait_for_vblank_if_active(struct drm_i915_private *i915,
enum pipe pipe)
{
- struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
+ struct intel_display *display = &i915->display;
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
if (crtc->active)
intel_crtc_wait_for_next_vblank(crtc);
@@ -122,6 +125,8 @@ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ crtc->block_dc_for_vblank = intel_psr_needs_block_dc_vblank(crtc_state);
+
assert_vblank_disabled(&crtc->base);
drm_crtc_set_max_vblank_count(&crtc->base,
intel_crtc_max_vblank_count(crtc_state));
@@ -138,6 +143,7 @@ void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_display *display = to_intel_display(crtc);
/*
* Should really happen exactly when we disable the pipe
@@ -148,6 +154,10 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
drm_crtc_vblank_off(&crtc->base);
assert_vblank_disabled(&crtc->base);
+
+ crtc->block_dc_for_vblank = false;
+
+ flush_work(&display->irq.vblank_dc_work);
}
struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
@@ -387,13 +397,31 @@ fail:
return ret;
}
+int intel_crtc_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
+ struct drm_crtc *drm_crtc;
+ struct intel_crtc *crtc;
+
+ drm_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
+ if (!drm_crtc)
+ return -ENOENT;
+
+ crtc = to_intel_crtc(drm_crtc);
+ pipe_from_crtc_id->pipe = crtc->pipe;
+
+ return 0;
+}
+
static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state)
{
return crtc_state->hw.active &&
- !intel_crtc_needs_modeset(crtc_state) &&
!crtc_state->preload_luts &&
+ !intel_crtc_needs_modeset(crtc_state) &&
intel_crtc_needs_color_update(crtc_state) &&
- !intel_color_uses_dsb(crtc_state);
+ !intel_color_uses_dsb(crtc_state) &&
+ !crtc_state->use_dsb;
}
static void intel_crtc_vblank_work(struct kthread_work *base)
@@ -457,6 +485,17 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
1000 * adjusted_mode->crtc_htotal);
}
+int intel_scanlines_to_usecs(const struct drm_display_mode *adjusted_mode,
+ int scanlines)
+{
+ /* paranoia */
+ if (!adjusted_mode->crtc_clock)
+ return 1;
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(scanlines, adjusted_mode->crtc_htotal * 1000),
+ adjusted_mode->crtc_clock);
+}
+
/**
* intel_pipe_update_start() - start update of a set of display registers
* @state: the atomic state
@@ -484,12 +523,8 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
intel_psr_lock(new_crtc_state);
if (new_crtc_state->do_async_flip) {
- spin_lock_irq(&crtc->base.dev->event_lock);
- /* arm the event for the flip done irq handler */
- crtc->flip_done_event = new_crtc_state->uapi.event;
- spin_unlock_irq(&crtc->base.dev->event_lock);
-
- new_crtc_state->uapi.event = NULL;
+ intel_crtc_prepare_vblank_event(new_crtc_state,
+ &crtc->flip_done_event);
return;
}
@@ -589,6 +624,19 @@ void intel_crtc_arm_vblank_event(struct intel_crtc_state *crtc_state)
crtc_state->uapi.event = NULL;
}
+void intel_crtc_prepare_vblank_event(struct intel_crtc_state *crtc_state,
+ struct drm_pending_vblank_event **event)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&crtc->base.dev->event_lock, irqflags);
+ *event = crtc_state->uapi.event;
+ spin_unlock_irqrestore(&crtc->base.dev->event_lock, irqflags);
+
+ crtc_state->uapi.event = NULL;
+}
+
/**
* intel_pipe_update_end() - end update of a set of display registers
* @state: the atomic state
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h
index b615b7ab5ccd..de54ae1deedf 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.h
+++ b/drivers/gpu/drm/i915/display/intel_crtc.h
@@ -10,11 +10,15 @@
enum i9xx_plane_id;
enum pipe;
+struct drm_device;
struct drm_display_mode;
+struct drm_file;
struct drm_i915_private;
+struct drm_pending_vblank_event;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
/*
* FIXME: We should instead only take spinlocks once for the entire update
@@ -28,9 +32,15 @@ struct intel_crtc_state;
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
+int intel_scanlines_to_usecs(const struct drm_display_mode *adjusted_mode,
+ int scanlines);
void intel_crtc_arm_vblank_event(struct intel_crtc_state *crtc_state);
+void intel_crtc_prepare_vblank_event(struct intel_crtc_state *crtc_state,
+ struct drm_pending_vblank_event **event);
u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state);
int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe);
+int intel_crtc_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
struct intel_crtc *crtc);
@@ -43,7 +53,7 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_wait_for_vblank_workers(struct intel_atomic_state *state);
struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915);
-struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915,
+struct intel_crtc *intel_crtc_for_pipe(struct intel_display *display,
enum pipe pipe);
void intel_wait_for_vblank_if_active(struct drm_i915_private *i915,
enum pipe pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 705ec5ad385c..1faef60be472 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -50,16 +50,6 @@ intel_dump_infoframe(struct drm_i915_private *i915,
hdmi_infoframe_log(KERN_DEBUG, i915->drm.dev, frame);
}
-static void
-intel_dump_buffer(const char *prefix, const u8 *buf, size_t len)
-{
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
-
- print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_NONE,
- 16, 0, buf, len, false);
-}
-
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
static const char * const output_type_str[] = {
@@ -293,8 +283,8 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_dp_as_sdp_log(&p, &pipe_config->infoframes.as_sdp);
if (pipe_config->has_audio)
- intel_dump_buffer("ELD: ", pipe_config->eld,
- drm_eld_size(pipe_config->eld));
+ drm_print_hex_dump(&p, "ELD: ", pipe_config->eld,
+ drm_eld_size(pipe_config->eld));
drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
str_yes_no(pipe_config->vrr.enable),
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 9ad53e1cbbd0..57cf8f46a458 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -9,7 +9,9 @@
#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_vblank.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
@@ -26,8 +28,6 @@
#include "intel_vblank.h"
#include "skl_watermark.h"
-#include "gem/i915_gem_object.h"
-
/* Cursor formats */
static const u32 intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
@@ -275,7 +275,8 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
}
/* TODO: split into noarm+arm pair */
-static void i845_cursor_update_arm(struct intel_plane *plane,
+static void i845_cursor_update_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -315,10 +316,11 @@ static void i845_cursor_update_arm(struct intel_plane *plane,
}
}
-static void i845_cursor_disable_arm(struct intel_plane *plane,
+static void i845_cursor_disable_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- i845_cursor_update_arm(plane, crtc_state, NULL);
+ i845_cursor_update_arm(dsb, plane, crtc_state, NULL);
}
static bool i845_cursor_get_hw_state(struct intel_plane *plane,
@@ -527,22 +529,25 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
return 0;
}
-static void i9xx_cursor_disable_sel_fetch_arm(struct intel_plane *plane,
+static void i9xx_cursor_disable_sel_fetch_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum pipe pipe = plane->pipe;
if (!crtc_state->enable_psr2_sel_fetch)
return;
- intel_de_write_fw(dev_priv, SEL_FETCH_CUR_CTL(pipe), 0);
+ intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), 0);
}
-static void wa_16021440873(struct intel_plane *plane,
+static void wa_16021440873(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
u32 ctl = plane_state->ctl;
int et_y_position = drm_rect_height(&crtc_state->pipe_src) + 1;
@@ -551,16 +556,18 @@ static void wa_16021440873(struct intel_plane *plane,
ctl &= ~MCURSOR_MODE_MASK;
ctl |= MCURSOR_MODE_64_2B;
- intel_de_write_fw(dev_priv, SEL_FETCH_CUR_CTL(pipe), ctl);
+ intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), ctl);
- intel_de_write(dev_priv, CURPOS_ERLY_TPT(dev_priv, pipe),
- CURSOR_POS_Y(et_y_position));
+ intel_de_write_dsb(display, dsb, CURPOS_ERLY_TPT(dev_priv, pipe),
+ CURSOR_POS_Y(et_y_position));
}
-static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane,
+static void i9xx_cursor_update_sel_fetch_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -571,19 +578,17 @@ static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane,
if (crtc_state->enable_psr2_su_region_et) {
u32 val = intel_cursor_position(crtc_state, plane_state,
true);
- intel_de_write_fw(dev_priv,
- CURPOS_ERLY_TPT(dev_priv, pipe),
- val);
+
+ intel_de_write_dsb(display, dsb, CURPOS_ERLY_TPT(dev_priv, pipe), val);
}
- intel_de_write_fw(dev_priv, SEL_FETCH_CUR_CTL(pipe),
- plane_state->ctl);
+ intel_de_write_dsb(display, dsb, SEL_FETCH_CUR_CTL(pipe), plane_state->ctl);
} else {
/* Wa_16021440873 */
if (crtc_state->enable_psr2_su_region_et)
- wa_16021440873(plane, crtc_state, plane_state);
+ wa_16021440873(dsb, plane, crtc_state, plane_state);
else
- i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
+ i9xx_cursor_disable_sel_fetch_arm(dsb, plane, crtc_state);
}
}
@@ -610,10 +615,11 @@ static u32 skl_cursor_wm_reg_val(const struct skl_wm_level *level)
return val;
}
-static void skl_write_cursor_wm(struct intel_plane *plane,
+static void skl_write_cursor_wm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
@@ -621,31 +627,33 @@ static void skl_write_cursor_wm(struct intel_plane *plane,
&crtc_state->wm.skl.plane_ddb[plane_id];
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++)
- intel_de_write_fw(i915, CUR_WM(pipe, level),
- skl_cursor_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level)));
+ for (level = 0; level < display->wm.num_levels; level++)
+ intel_de_write_dsb(display, dsb, CUR_WM(pipe, level),
+ skl_cursor_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level)));
- intel_de_write_fw(i915, CUR_WM_TRANS(pipe),
- skl_cursor_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id)));
+ intel_de_write_dsb(display, dsb, CUR_WM_TRANS(pipe),
+ skl_cursor_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id)));
- if (HAS_HW_SAGV_WM(i915)) {
+ if (HAS_HW_SAGV_WM(display)) {
const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
- intel_de_write_fw(i915, CUR_WM_SAGV(pipe),
- skl_cursor_wm_reg_val(&wm->sagv.wm0));
- intel_de_write_fw(i915, CUR_WM_SAGV_TRANS(pipe),
- skl_cursor_wm_reg_val(&wm->sagv.trans_wm));
+ intel_de_write_dsb(display, dsb, CUR_WM_SAGV(pipe),
+ skl_cursor_wm_reg_val(&wm->sagv.wm0));
+ intel_de_write_dsb(display, dsb, CUR_WM_SAGV_TRANS(pipe),
+ skl_cursor_wm_reg_val(&wm->sagv.trans_wm));
}
- intel_de_write_fw(i915, CUR_BUF_CFG(pipe),
- skl_cursor_ddb_reg_val(ddb));
+ intel_de_write_dsb(display, dsb, CUR_BUF_CFG(pipe),
+ skl_cursor_ddb_reg_val(ddb));
}
/* TODO: split into noarm+arm pair */
-static void i9xx_cursor_update_arm(struct intel_plane *plane,
+static void i9xx_cursor_update_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
@@ -685,38 +693,36 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane,
*/
if (DISPLAY_VER(dev_priv) >= 9)
- skl_write_cursor_wm(plane, crtc_state);
+ skl_write_cursor_wm(dsb, plane, crtc_state);
if (plane_state)
- i9xx_cursor_update_sel_fetch_arm(plane, crtc_state,
- plane_state);
+ i9xx_cursor_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state);
else
- i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
+ i9xx_cursor_disable_sel_fetch_arm(dsb, plane, crtc_state);
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
plane->cursor.cntl != cntl) {
if (HAS_CUR_FBC(dev_priv))
- intel_de_write_fw(dev_priv,
- CUR_FBC_CTL(dev_priv, pipe),
- fbc_ctl);
- intel_de_write_fw(dev_priv, CURCNTR(dev_priv, pipe), cntl);
- intel_de_write_fw(dev_priv, CURPOS(dev_priv, pipe), pos);
- intel_de_write_fw(dev_priv, CURBASE(dev_priv, pipe), base);
+ intel_de_write_dsb(display, dsb, CUR_FBC_CTL(dev_priv, pipe), fbc_ctl);
+ intel_de_write_dsb(display, dsb, CURCNTR(dev_priv, pipe), cntl);
+ intel_de_write_dsb(display, dsb, CURPOS(dev_priv, pipe), pos);
+ intel_de_write_dsb(display, dsb, CURBASE(dev_priv, pipe), base);
plane->cursor.base = base;
plane->cursor.size = fbc_ctl;
plane->cursor.cntl = cntl;
} else {
- intel_de_write_fw(dev_priv, CURPOS(dev_priv, pipe), pos);
- intel_de_write_fw(dev_priv, CURBASE(dev_priv, pipe), base);
+ intel_de_write_dsb(display, dsb, CURPOS(dev_priv, pipe), pos);
+ intel_de_write_dsb(display, dsb, CURBASE(dev_priv, pipe), base);
}
}
-static void i9xx_cursor_disable_arm(struct intel_plane *plane,
+static void i9xx_cursor_disable_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- i9xx_cursor_update_arm(plane, crtc_state, NULL);
+ i9xx_cursor_update_arm(dsb, plane, crtc_state, NULL);
}
static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
@@ -905,10 +911,10 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
}
if (new_plane_state->uapi.visible) {
- intel_plane_update_noarm(plane, crtc_state, new_plane_state);
- intel_plane_update_arm(plane, crtc_state, new_plane_state);
+ intel_plane_update_noarm(NULL, plane, crtc_state, new_plane_state);
+ intel_plane_update_arm(NULL, plane, crtc_state, new_plane_state);
} else {
- intel_plane_disable_arm(plane, crtc_state);
+ intel_plane_disable_arm(NULL, plane, crtc_state);
}
local_irq_enable();
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 4a6c3040ca15..e768dc6a15b3 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -5,6 +5,8 @@
#include <linux/log2.h>
#include <linux/math64.h>
+
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_cx0_phy.h"
#include "intel_cx0_phy_regs.h"
@@ -34,6 +36,9 @@ bool intel_encoder_is_c10phy(struct intel_encoder *encoder)
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum phy phy = intel_encoder_to_phy(encoder);
+ if (IS_PANTHERLAKE(i915) && phy == PHY_A)
+ return true;
+
if ((IS_LUNARLAKE(i915) || IS_METEORLAKE(i915)) && phy < PHY_C)
return true;
@@ -65,22 +70,23 @@ static u8 intel_cx0_get_owned_lane_mask(struct intel_encoder *encoder)
}
static void
-assert_dc_off(struct drm_i915_private *i915)
+assert_dc_off(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
bool enabled;
enabled = intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF);
- drm_WARN_ON(&i915->drm, !enabled);
+ drm_WARN_ON(display->drm, !enabled);
}
static void intel_cx0_program_msgbus_timer(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
int lane;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
for_each_cx0_lane_in_mask(INTEL_CX0_BOTH_LANES, lane)
- intel_de_rmw(i915,
- XELPDP_PORT_MSGBUS_TIMER(i915, encoder->port, lane),
+ intel_de_rmw(display,
+ XELPDP_PORT_MSGBUS_TIMER(display, encoder->port, lane),
XELPDP_PORT_MSGBUS_TIMER_VAL_MASK,
XELPDP_PORT_MSGBUS_TIMER_VAL);
}
@@ -119,25 +125,28 @@ static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_w
static void intel_clear_response_ready_flag(struct intel_encoder *encoder,
int lane)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, encoder->port, lane),
+ intel_de_rmw(display,
+ XELPDP_PORT_P2M_MSGBUS_STATUS(display, encoder->port, lane),
0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET);
}
static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
enum phy phy = intel_encoder_to_phy(encoder);
- intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
+ intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
XELPDP_PORT_M2P_TRANSACTION_RESET);
- if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
+ if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
XELPDP_PORT_M2P_TRANSACTION_RESET,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
- drm_err_once(&i915->drm, "Failed to bring PHY %c to idle.\n", phy_name(phy));
+ drm_err_once(display->drm,
+ "Failed to bring PHY %c to idle.\n",
+ phy_name(phy));
return;
}
@@ -147,22 +156,23 @@ static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
static int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
int command, int lane, u32 *val)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
enum phy phy = intel_encoder_to_phy(encoder);
- if (intel_de_wait_custom(i915,
- XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane),
+ if (intel_de_wait_custom(display,
+ XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane),
XELPDP_PORT_P2M_RESPONSE_READY,
XELPDP_PORT_P2M_RESPONSE_READY,
XELPDP_MSGBUS_TIMEOUT_FAST_US,
XELPDP_MSGBUS_TIMEOUT_SLOW, val)) {
- drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n",
+ drm_dbg_kms(display->drm,
+ "PHY %c Timeout waiting for message ACK. Status: 0x%x\n",
phy_name(phy), *val);
- if (!(intel_de_read(i915, XELPDP_PORT_MSGBUS_TIMER(i915, port, lane)) &
+ if (!(intel_de_read(display, XELPDP_PORT_MSGBUS_TIMER(display, port, lane)) &
XELPDP_PORT_MSGBUS_TIMER_TIMED_OUT))
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PHY %c Hardware did not detect a timeout\n",
phy_name(phy));
@@ -171,14 +181,18 @@ static int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
}
if (*val & XELPDP_PORT_P2M_ERROR_SET) {
- drm_dbg_kms(&i915->drm, "PHY %c Error occurred during %s command. Status: 0x%x\n", phy_name(phy),
+ drm_dbg_kms(display->drm,
+ "PHY %c Error occurred during %s command. Status: 0x%x\n",
+ phy_name(phy),
command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val);
intel_cx0_bus_reset(encoder, lane);
return -EINVAL;
}
if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, *val) != command) {
- drm_dbg_kms(&i915->drm, "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n", phy_name(phy),
+ drm_dbg_kms(display->drm,
+ "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n",
+ phy_name(phy),
command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val);
intel_cx0_bus_reset(encoder, lane);
return -EINVAL;
@@ -190,22 +204,22 @@ static int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
static int __intel_cx0_read_once(struct intel_encoder *encoder,
int lane, u16 addr)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
enum phy phy = intel_encoder_to_phy(encoder);
int ack;
u32 val;
- if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
+ if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
intel_cx0_bus_reset(encoder, lane);
return -ETIMEDOUT;
}
- intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
+ intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING |
XELPDP_PORT_M2P_COMMAND_READ |
XELPDP_PORT_M2P_ADDRESS(addr));
@@ -221,7 +235,8 @@ static int __intel_cx0_read_once(struct intel_encoder *encoder,
* down and let the message bus to end up
* in a known state
*/
- intel_cx0_bus_reset(encoder, lane);
+ if (DISPLAY_VER(display) < 30)
+ intel_cx0_bus_reset(encoder, lane);
return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val);
}
@@ -229,11 +244,11 @@ static int __intel_cx0_read_once(struct intel_encoder *encoder,
static u8 __intel_cx0_read(struct intel_encoder *encoder,
int lane, u16 addr)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
int i, status;
- assert_dc_off(i915);
+ assert_dc_off(display);
/* 3 tries is assumed to be enough to read successfully */
for (i = 0; i < 3; i++) {
@@ -243,7 +258,8 @@ static u8 __intel_cx0_read(struct intel_encoder *encoder,
return status;
}
- drm_err_once(&i915->drm, "PHY %c Read %04x failed after %d retries.\n",
+ drm_err_once(display->drm,
+ "PHY %c Read %04x failed after %d retries.\n",
phy_name(phy), addr, i);
return 0;
@@ -260,32 +276,32 @@ static u8 intel_cx0_read(struct intel_encoder *encoder,
static int __intel_cx0_write_once(struct intel_encoder *encoder,
int lane, u16 addr, u8 data, bool committed)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
enum phy phy = intel_encoder_to_phy(encoder);
int ack;
u32 val;
- if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
+ if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy));
intel_cx0_bus_reset(encoder, lane);
return -ETIMEDOUT;
}
- intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
+ intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING |
(committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED :
XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) |
XELPDP_PORT_M2P_DATA(data) |
XELPDP_PORT_M2P_ADDRESS(addr));
- if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
+ if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for write to complete. Resetting the bus.\n", phy_name(phy));
intel_cx0_bus_reset(encoder, lane);
return -ETIMEDOUT;
@@ -295,9 +311,9 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder,
ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
if (ack < 0)
return ack;
- } else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(i915, port, lane)) &
+ } else if ((intel_de_read(display, XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane)) &
XELPDP_PORT_P2M_ERROR_SET)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PHY %c Error occurred during write command.\n", phy_name(phy));
intel_cx0_bus_reset(encoder, lane);
return -EINVAL;
@@ -310,7 +326,8 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder,
* down and let the message bus to end up
* in a known state
*/
- intel_cx0_bus_reset(encoder, lane);
+ if (DISPLAY_VER(display) < 30)
+ intel_cx0_bus_reset(encoder, lane);
return 0;
}
@@ -318,11 +335,11 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder,
static void __intel_cx0_write(struct intel_encoder *encoder,
int lane, u16 addr, u8 data, bool committed)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
int i, status;
- assert_dc_off(i915);
+ assert_dc_off(display);
/* 3 tries is assumed to be enough to write successfully */
for (i = 0; i < 3; i++) {
@@ -332,7 +349,7 @@ static void __intel_cx0_write(struct intel_encoder *encoder,
return;
}
- drm_err_once(&i915->drm,
+ drm_err_once(display->drm,
"PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i);
}
@@ -348,9 +365,9 @@ static void intel_cx0_write(struct intel_encoder *encoder,
static void intel_c20_sram_write(struct intel_encoder *encoder,
int lane, u16 addr, u16 data)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- assert_dc_off(i915);
+ assert_dc_off(display);
intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0);
intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0);
@@ -362,10 +379,10 @@ static void intel_c20_sram_write(struct intel_encoder *encoder,
static u16 intel_c20_sram_read(struct intel_encoder *encoder,
int lane, u16 addr)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u16 val;
- assert_dc_off(i915);
+ assert_dc_off(display);
intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0);
intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1);
@@ -429,7 +446,7 @@ static u8 intel_c10_get_tx_term_ctl(const struct intel_crtc_state *crtc_state)
void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_ddi_buf_trans *trans;
u8 owned_lane_mask;
intel_wakeref_t wakeref;
@@ -444,7 +461,7 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
wakeref = intel_cx0_phy_transaction_begin(encoder);
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&i915->drm, !trans)) {
+ if (drm_WARN_ON_ONCE(display->drm, !trans)) {
intel_cx0_phy_transaction_end(encoder, wakeref);
return;
}
@@ -923,10 +940,10 @@ static const struct intel_c20pll_state mtl_c20_dp_uhbr20 = {
},
.mplla = { 0x3104, /* mplla cfg0 */
0xd105, /* mplla cfg1 */
- 0xc025, /* mplla cfg2 */
- 0xc025, /* mplla cfg3 */
- 0xa6ab, /* mplla cfg4 */
- 0x8c00, /* mplla cfg5 */
+ 0x9217, /* mplla cfg2 */
+ 0x9217, /* mplla cfg3 */
+ 0x8c00, /* mplla cfg4 */
+ 0x759a, /* mplla cfg5 */
0x4000, /* mplla cfg6 */
0x0003, /* mplla cfg7 */
0x3555, /* mplla cfg8 */
@@ -1122,6 +1139,22 @@ static const struct intel_c20pll_state * const xe2hpd_c20_dp_tables[] = {
NULL,
};
+static const struct intel_c20pll_state * const xe3lpd_c20_dp_edp_tables[] = {
+ &mtl_c20_dp_rbr,
+ &xe2hpd_c20_edp_r216,
+ &xe2hpd_c20_edp_r243,
+ &mtl_c20_dp_hbr1,
+ &xe2hpd_c20_edp_r324,
+ &xe2hpd_c20_edp_r432,
+ &mtl_c20_dp_hbr2,
+ &xe2hpd_c20_edp_r675,
+ &mtl_c20_dp_hbr3,
+ &mtl_c20_dp_uhbr10,
+ &xe2hpd_c20_dp_uhbr13_5,
+ &mtl_c20_dp_uhbr20,
+ NULL,
+};
+
/*
* HDMI link rates with 38.4 MHz reference clock.
*/
@@ -2003,12 +2036,12 @@ intel_c10pll_tables_get(struct intel_crtc_state *crtc_state,
static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_cx0pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll;
int i;
if (intel_crtc_has_dp_encoder(crtc_state)) {
- if (intel_panel_use_ssc(i915)) {
+ if (intel_panel_use_ssc(display)) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
pll_state->ssc_enabled =
@@ -2019,7 +2052,7 @@ static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state,
if (pll_state->ssc_enabled)
return;
- drm_WARN_ON(&i915->drm, ARRAY_SIZE(pll_state->c10.pll) < 9);
+ drm_WARN_ON(display->drm, ARRAY_SIZE(pll_state->c10.pll) < 9);
for (i = 4; i < 9; i++)
pll_state->c10.pll[i] = 0;
}
@@ -2073,7 +2106,7 @@ static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
intel_cx0_phy_transaction_end(encoder, wakeref);
}
-static void intel_c10_pll_program(struct drm_i915_private *i915,
+static void intel_c10_pll_program(struct intel_display *display,
const struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
@@ -2084,14 +2117,6 @@ static void intel_c10_pll_program(struct drm_i915_private *i915,
0, C10_VDR_CTRL_MSGBUS_ACCESS,
MB_WRITE_COMMITTED);
- /* Custom width needs to be programmed to 0 for both the phy lanes */
- intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
- C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
- MB_WRITE_COMMITTED);
- intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
- 0, C10_VDR_CTRL_UPDATE_CFG,
- MB_WRITE_COMMITTED);
-
/* Program the pll values only for the master lane */
for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
@@ -2101,12 +2126,16 @@ static void intel_c10_pll_program(struct drm_i915_private *i915,
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
+ /* Custom width needs to be programmed to 0 for both the phy lanes */
+ intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
+ C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
+ MB_WRITE_COMMITTED);
intel_cx0_rmw(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG,
MB_WRITE_COMMITTED);
}
-static void intel_c10pll_dump_hw_state(struct drm_i915_private *i915,
+static void intel_c10pll_dump_hw_state(struct intel_display *display,
const struct intel_c10pll_state *hw_state)
{
bool fracen;
@@ -2115,35 +2144,39 @@ static void intel_c10pll_dump_hw_state(struct drm_i915_private *i915,
unsigned int multiplier, tx_clk_div;
fracen = hw_state->pll[0] & C10_PLL0_FRACEN;
- drm_dbg_kms(&i915->drm, "c10pll_hw_state: fracen: %s, ",
+ drm_dbg_kms(display->drm, "c10pll_hw_state: fracen: %s, ",
str_yes_no(fracen));
if (fracen) {
frac_quot = hw_state->pll[12] << 8 | hw_state->pll[11];
frac_rem = hw_state->pll[14] << 8 | hw_state->pll[13];
frac_den = hw_state->pll[10] << 8 | hw_state->pll[9];
- drm_dbg_kms(&i915->drm, "quot: %u, rem: %u, den: %u,\n",
+ drm_dbg_kms(display->drm, "quot: %u, rem: %u, den: %u,\n",
frac_quot, frac_rem, frac_den);
}
multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, hw_state->pll[3]) << 8 |
hw_state->pll[2]) / 2 + 16;
tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, hw_state->pll[15]);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"multiplier: %u, tx_clk_div: %u.\n", multiplier, tx_clk_div);
- drm_dbg_kms(&i915->drm, "c10pll_rawhw_state:");
- drm_dbg_kms(&i915->drm, "tx: 0x%x, cmn: 0x%x\n", hw_state->tx, hw_state->cmn);
+ drm_dbg_kms(display->drm, "c10pll_rawhw_state:");
+ drm_dbg_kms(display->drm, "tx: 0x%x, cmn: 0x%x\n", hw_state->tx,
+ hw_state->cmn);
BUILD_BUG_ON(ARRAY_SIZE(hw_state->pll) % 4);
for (i = 0; i < ARRAY_SIZE(hw_state->pll); i = i + 4)
- drm_dbg_kms(&i915->drm, "pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x\n",
+ drm_dbg_kms(display->drm,
+ "pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x\n",
i, hw_state->pll[i], i + 1, hw_state->pll[i + 1],
i + 2, hw_state->pll[i + 2], i + 3, hw_state->pll[i + 3]);
}
-static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_state *pll_state)
+static int intel_c20_compute_hdmi_tmds_pll(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_c20pll_state *pll_state = &crtc_state->dpll_hw_state.cx0pll.c20;
u64 datarate;
u64 mpll_tx_clk_div;
u64 vco_freq_shift;
@@ -2152,13 +2185,14 @@ static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_
u64 mpll_multiplier;
u64 mpll_fracn_quot;
u64 mpll_fracn_rem;
+ u16 tx_misc;
u8 mpllb_ana_freq_vco;
u8 mpll_div_multiplier;
- if (pixel_clock < 25175 || pixel_clock > 600000)
+ if (crtc_state->port_clock < 25175 || crtc_state->port_clock > 600000)
return -EINVAL;
- datarate = ((u64)pixel_clock * 1000) * 10;
+ datarate = ((u64)crtc_state->port_clock * 1000) * 10;
mpll_tx_clk_div = ilog2(div64_u64((u64)CLOCK_9999MHZ, (u64)datarate));
vco_freq_shift = ilog2(div64_u64((u64)CLOCK_4999MHZ * (u64)256, (u64)datarate));
vco_freq = (datarate << vco_freq_shift) >> 8;
@@ -2171,6 +2205,11 @@ static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_
mpll_div_multiplier = min_t(u8, div64_u64((vco_freq * 16 + (datarate >> 1)),
datarate), 255);
+ if (DISPLAY_VER(display) >= 20)
+ tx_misc = 0x5;
+ else
+ tx_misc = 0x0;
+
if (vco_freq <= DATARATE_3000000000)
mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_3;
else if (vco_freq <= DATARATE_3500000000)
@@ -2180,9 +2219,9 @@ static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_
else
mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_0;
- pll_state->clock = pixel_clock;
+ pll_state->clock = crtc_state->port_clock;
pll_state->tx[0] = 0xbe88;
- pll_state->tx[1] = 0x9800;
+ pll_state->tx[1] = 0x9800 | C20_PHY_TX_MISC(tx_misc);
pll_state->tx[2] = 0x0000;
pll_state->cmn[0] = 0x0500;
pll_state->cmn[1] = 0x0005;
@@ -2239,13 +2278,19 @@ static const struct intel_c20pll_state * const *
intel_c20_pll_tables_get(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (intel_crtc_has_dp_encoder(crtc_state)) {
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- return xe2hpd_c20_edp_tables;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) {
+ if (DISPLAY_RUNTIME_INFO(display)->edp_typec_support)
+ return xe3lpd_c20_dp_edp_tables;
+ if (DISPLAY_VERx100(display) == 1401)
+ return xe2hpd_c20_edp_tables;
+ }
- if (DISPLAY_VER_FULL(i915) == IP_VER(14, 1))
+ if (DISPLAY_VER(display) >= 30)
+ return xe3lpd_c20_dp_edp_tables;
+ else if (DISPLAY_VERx100(display) == 1401)
return xe2hpd_c20_dp_tables;
else
return mtl_c20_dp_tables;
@@ -2266,8 +2311,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
/* try computed C20 HDMI tables before using consolidated tables */
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- if (intel_c20_compute_hdmi_tmds_pll(crtc_state->port_clock,
- &crtc_state->dpll_hw_state.cx0pll.c20) == 0)
+ if (intel_c20_compute_hdmi_tmds_pll(crtc_state) == 0)
return 0;
}
@@ -2347,10 +2391,10 @@ static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_c20pll_state *pll_state)
{
+ struct intel_display *display = to_intel_display(encoder);
bool cntx;
intel_wakeref_t wakeref;
int i;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
wakeref = intel_cx0_phy_transaction_begin(encoder);
@@ -2362,11 +2406,11 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
if (cntx)
pll_state->tx[i] = intel_c20_sram_read(encoder,
INTEL_CX0_LANE0,
- PHY_C20_B_TX_CNTX_CFG(i915, i));
+ PHY_C20_B_TX_CNTX_CFG(display, i));
else
pll_state->tx[i] = intel_c20_sram_read(encoder,
INTEL_CX0_LANE0,
- PHY_C20_A_TX_CNTX_CFG(i915, i));
+ PHY_C20_A_TX_CNTX_CFG(display, i));
}
/* Read common configuration */
@@ -2374,11 +2418,11 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
if (cntx)
pll_state->cmn[i] = intel_c20_sram_read(encoder,
INTEL_CX0_LANE0,
- PHY_C20_B_CMN_CNTX_CFG(i915, i));
+ PHY_C20_B_CMN_CNTX_CFG(display, i));
else
pll_state->cmn[i] = intel_c20_sram_read(encoder,
INTEL_CX0_LANE0,
- PHY_C20_A_CMN_CNTX_CFG(i915, i));
+ PHY_C20_A_CMN_CNTX_CFG(display, i));
}
if (intel_c20phy_use_mpllb(pll_state)) {
@@ -2387,11 +2431,11 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
if (cntx)
pll_state->mpllb[i] = intel_c20_sram_read(encoder,
INTEL_CX0_LANE0,
- PHY_C20_B_MPLLB_CNTX_CFG(i915, i));
+ PHY_C20_B_MPLLB_CNTX_CFG(display, i));
else
pll_state->mpllb[i] = intel_c20_sram_read(encoder,
INTEL_CX0_LANE0,
- PHY_C20_A_MPLLB_CNTX_CFG(i915, i));
+ PHY_C20_A_MPLLB_CNTX_CFG(display, i));
}
} else {
/* MPLLA configuration */
@@ -2399,11 +2443,11 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
if (cntx)
pll_state->mplla[i] = intel_c20_sram_read(encoder,
INTEL_CX0_LANE0,
- PHY_C20_B_MPLLA_CNTX_CFG(i915, i));
+ PHY_C20_B_MPLLA_CNTX_CFG(display, i));
else
pll_state->mplla[i] = intel_c20_sram_read(encoder,
INTEL_CX0_LANE0,
- PHY_C20_A_MPLLA_CNTX_CFG(i915, i));
+ PHY_C20_A_MPLLA_CNTX_CFG(display, i));
}
}
@@ -2412,33 +2456,37 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
intel_cx0_phy_transaction_end(encoder, wakeref);
}
-static void intel_c20pll_dump_hw_state(struct drm_i915_private *i915,
+static void intel_c20pll_dump_hw_state(struct intel_display *display,
const struct intel_c20pll_state *hw_state)
{
int i;
- drm_dbg_kms(&i915->drm, "c20pll_hw_state:\n");
- drm_dbg_kms(&i915->drm, "tx[0] = 0x%.4x, tx[1] = 0x%.4x, tx[2] = 0x%.4x\n",
+ drm_dbg_kms(display->drm, "c20pll_hw_state:\n");
+ drm_dbg_kms(display->drm,
+ "tx[0] = 0x%.4x, tx[1] = 0x%.4x, tx[2] = 0x%.4x\n",
hw_state->tx[0], hw_state->tx[1], hw_state->tx[2]);
- drm_dbg_kms(&i915->drm, "cmn[0] = 0x%.4x, cmn[1] = 0x%.4x, cmn[2] = 0x%.4x, cmn[3] = 0x%.4x\n",
+ drm_dbg_kms(display->drm,
+ "cmn[0] = 0x%.4x, cmn[1] = 0x%.4x, cmn[2] = 0x%.4x, cmn[3] = 0x%.4x\n",
hw_state->cmn[0], hw_state->cmn[1], hw_state->cmn[2], hw_state->cmn[3]);
if (intel_c20phy_use_mpllb(hw_state)) {
for (i = 0; i < ARRAY_SIZE(hw_state->mpllb); i++)
- drm_dbg_kms(&i915->drm, "mpllb[%d] = 0x%.4x\n", i, hw_state->mpllb[i]);
+ drm_dbg_kms(display->drm, "mpllb[%d] = 0x%.4x\n", i,
+ hw_state->mpllb[i]);
} else {
for (i = 0; i < ARRAY_SIZE(hw_state->mplla); i++)
- drm_dbg_kms(&i915->drm, "mplla[%d] = 0x%.4x\n", i, hw_state->mplla[i]);
+ drm_dbg_kms(display->drm, "mplla[%d] = 0x%.4x\n", i,
+ hw_state->mplla[i]);
}
}
-void intel_cx0pll_dump_hw_state(struct drm_i915_private *i915,
+void intel_cx0pll_dump_hw_state(struct intel_display *display,
const struct intel_cx0pll_state *hw_state)
{
if (hw_state->use_c10)
- intel_c10pll_dump_hw_state(i915, &hw_state->c10);
+ intel_c10pll_dump_hw_state(display, &hw_state->c10);
else
- intel_c20pll_dump_hw_state(i915, &hw_state->c20);
+ intel_c20pll_dump_hw_state(display, &hw_state->c20);
}
static u8 intel_c20_get_dp_rate(u32 clock)
@@ -2538,7 +2586,7 @@ static int intel_get_c20_custom_width(u32 clock, bool dp)
return 0;
}
-static void intel_c20_pll_program(struct drm_i915_private *i915,
+static void intel_c20_pll_program(struct intel_display *display,
const struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
@@ -2571,11 +2619,11 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) {
if (cntx)
intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
- PHY_C20_A_TX_CNTX_CFG(i915, i),
+ PHY_C20_A_TX_CNTX_CFG(display, i),
pll_state->tx[i]);
else
intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
- PHY_C20_B_TX_CNTX_CFG(i915, i),
+ PHY_C20_B_TX_CNTX_CFG(display, i),
pll_state->tx[i]);
}
@@ -2583,11 +2631,11 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) {
if (cntx)
intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
- PHY_C20_A_CMN_CNTX_CFG(i915, i),
+ PHY_C20_A_CMN_CNTX_CFG(display, i),
pll_state->cmn[i]);
else
intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
- PHY_C20_B_CMN_CNTX_CFG(i915, i),
+ PHY_C20_B_CMN_CNTX_CFG(display, i),
pll_state->cmn[i]);
}
@@ -2596,22 +2644,22 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
if (cntx)
intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
- PHY_C20_A_MPLLB_CNTX_CFG(i915, i),
+ PHY_C20_A_MPLLB_CNTX_CFG(display, i),
pll_state->mpllb[i]);
else
intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
- PHY_C20_B_MPLLB_CNTX_CFG(i915, i),
+ PHY_C20_B_MPLLB_CNTX_CFG(display, i),
pll_state->mpllb[i]);
}
} else {
for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
if (cntx)
intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
- PHY_C20_A_MPLLA_CNTX_CFG(i915, i),
+ PHY_C20_A_MPLLA_CNTX_CFG(display, i),
pll_state->mplla[i]);
else
intel_c20_sram_write(encoder, INTEL_CX0_LANE0,
- PHY_C20_B_MPLLA_CNTX_CFG(i915, i),
+ PHY_C20_B_MPLLA_CNTX_CFG(display, i),
pll_state->mplla[i]);
}
}
@@ -2678,10 +2726,10 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
bool lane_reversal)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 val = 0;
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(i915, encoder->port),
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port),
XELPDP_PORT_REVERSAL,
lane_reversal ? XELPDP_PORT_REVERSAL : 0);
@@ -2703,7 +2751,7 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
else
val |= crtc_state->dpll_hw_state.cx0pll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLA |
XELPDP_SSC_ENABLE_PLLB, val);
@@ -2734,48 +2782,49 @@ static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state)
static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
u8 lane_mask, u8 state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
enum phy phy = intel_encoder_to_phy(encoder);
- i915_reg_t buf_ctl2_reg = XELPDP_PORT_BUF_CTL2(i915, port);
+ i915_reg_t buf_ctl2_reg = XELPDP_PORT_BUF_CTL2(display, port);
int lane;
- intel_de_rmw(i915, buf_ctl2_reg,
+ intel_de_rmw(display, buf_ctl2_reg,
intel_cx0_get_powerdown_state(INTEL_CX0_BOTH_LANES, XELPDP_LANE_POWERDOWN_NEW_STATE_MASK),
intel_cx0_get_powerdown_state(lane_mask, state));
/* Wait for pending transactions.*/
for_each_cx0_lane_in_mask(lane_mask, lane)
- if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(i915, port, lane),
+ if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING,
XELPDP_MSGBUS_TIMEOUT_SLOW)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n",
phy_name(phy));
intel_cx0_bus_reset(encoder, lane);
}
- intel_de_rmw(i915, buf_ctl2_reg,
+ intel_de_rmw(display, buf_ctl2_reg,
intel_cx0_get_powerdown_update(INTEL_CX0_BOTH_LANES),
intel_cx0_get_powerdown_update(lane_mask));
/* Update Timeout Value */
- if (intel_de_wait_custom(i915, buf_ctl2_reg,
+ if (intel_de_wait_custom(display, buf_ctl2_reg,
intel_cx0_get_powerdown_update(lane_mask), 0,
XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
- drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
+ drm_warn(display->drm,
+ "PHY %c failed to bring out of Lane reset after %dus.\n",
phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
}
static void intel_cx0_setup_powerdown(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port),
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
XELPDP_POWER_STATE_READY_MASK,
XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY));
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(i915, port),
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL3(display, port),
XELPDP_POWER_STATE_ACTIVE_MASK |
XELPDP_PLL_LANE_STAGGERING_DELAY_MASK,
XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) |
@@ -2807,7 +2856,7 @@ static u32 intel_cx0_get_pclk_refclk_ack(u8 lane_mask)
static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder,
bool lane_reversal)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
enum phy phy = intel_encoder_to_phy(encoder);
u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
@@ -2820,48 +2869,51 @@ static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder,
XELPDP_LANE_PHY_CURRENT_STATUS(1))
: XELPDP_LANE_PHY_CURRENT_STATUS(0);
- if (intel_de_wait_custom(i915, XELPDP_PORT_BUF_CTL1(i915, port),
+ if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL1(display, port),
XELPDP_PORT_BUF_SOC_PHY_READY,
XELPDP_PORT_BUF_SOC_PHY_READY,
XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
- drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
+ drm_warn(display->drm,
+ "PHY %c failed to bring out of SOC reset after %dus.\n",
phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset,
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset,
lane_pipe_reset);
- if (intel_de_wait_custom(i915, XELPDP_PORT_BUF_CTL2(i915, port),
+ if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
lane_phy_current_status, lane_phy_current_status,
XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
- drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
+ drm_warn(display->drm,
+ "PHY %c failed to bring out of Lane reset after %dus.\n",
phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, port),
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
intel_cx0_get_pclk_refclk_request(owned_lane_mask),
intel_cx0_get_pclk_refclk_request(lane_mask));
- if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, port),
+ if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
intel_cx0_get_pclk_refclk_ack(lane_mask),
XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
- drm_warn(&i915->drm, "PHY %c failed to request refclk after %dus.\n",
+ drm_warn(display->drm,
+ "PHY %c failed to request refclk after %dus.\n",
phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US);
intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
CX0_P2_STATE_RESET);
intel_cx0_setup_powerdown(encoder);
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(i915, port), lane_pipe_reset, 0);
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset, 0);
- if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(i915, port),
+ if (intel_de_wait_for_clear(display, XELPDP_PORT_BUF_CTL2(display, port),
lane_phy_current_status,
XELPDP_PORT_RESET_END_TIMEOUT))
- drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dms.\n",
+ drm_warn(display->drm,
+ "PHY %c failed to bring out of Lane reset after %dms.\n",
phy_name(phy), XELPDP_PORT_RESET_END_TIMEOUT);
}
-static void intel_cx0_program_phy_lane(struct drm_i915_private *i915,
- struct intel_encoder *encoder, int lane_count,
+static void intel_cx0_program_phy_lane(struct intel_encoder *encoder, int lane_count,
bool lane_reversal)
{
int i;
@@ -2930,10 +2982,10 @@ static u32 intel_cx0_get_pclk_pll_ack(u8 lane_mask)
static void intel_cx0pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+ bool lane_reversal = dig_port->lane_reversal;
u8 maxpclk_lane = lane_reversal ? INTEL_CX0_LANE1 :
INTEL_CX0_LANE0;
intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder);
@@ -2962,15 +3014,15 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
/* 5. Program PHY internal PLL internal registers. */
if (intel_encoder_is_c10phy(encoder))
- intel_c10_pll_program(i915, crtc_state, encoder);
+ intel_c10_pll_program(display, crtc_state, encoder);
else
- intel_c20_pll_program(i915, crtc_state, encoder);
+ intel_c20_pll_program(display, crtc_state, encoder);
/*
* 6. Program the enabled and disabled owned PHY lane
* transmitters over message bus
*/
- intel_cx0_program_phy_lane(i915, encoder, crtc_state->lane_count, lane_reversal);
+ intel_cx0_program_phy_lane(encoder, crtc_state->lane_count, lane_reversal);
/*
* 7. Follow the Display Voltage Frequency Switching - Sequence
@@ -2981,23 +3033,23 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
* 8. Program DDI_CLK_VALFREQ to match intended DDI
* clock frequency.
*/
- intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port),
+ intel_de_write(display, DDI_CLK_VALFREQ(encoder->port),
crtc_state->port_clock);
/*
* 9. Set PORT_CLOCK_CTL register PCLK PLL Request
* LN<Lane for maxPCLK> to "1" to enable PLL.
*/
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES),
intel_cx0_get_pclk_pll_request(maxpclk_lane));
/* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */
- if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
intel_cx0_get_pclk_pll_ack(maxpclk_lane),
XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
- drm_warn(&i915->drm, "Port %c PLL not locked after %dus.\n",
+ drm_warn(display->drm, "Port %c PLL not locked after %dus.\n",
phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US);
/*
@@ -3011,15 +3063,19 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- u32 clock;
- u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port));
+ struct intel_display *display = to_intel_display(encoder);
+ u32 clock, val;
- clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
+ val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port));
+
+ if (DISPLAY_VER(display) >= 30)
+ clock = REG_FIELD_GET(XE3_DDI_CLOCK_SELECT_MASK, val);
+ else
+ clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
- drm_WARN_ON(&i915->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE));
- drm_WARN_ON(&i915->drm, !(val & XELPDP_TBT_CLOCK_REQUEST));
- drm_WARN_ON(&i915->drm, !(val & XELPDP_TBT_CLOCK_ACK));
+ drm_WARN_ON(display->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE));
+ drm_WARN_ON(display->drm, !(val & XELPDP_TBT_CLOCK_REQUEST));
+ drm_WARN_ON(display->drm, !(val & XELPDP_TBT_CLOCK_ACK));
switch (clock) {
case XELPDP_DDI_CLOCK_SELECT_TBT_162:
@@ -3030,13 +3086,18 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
return 540000;
case XELPDP_DDI_CLOCK_SELECT_TBT_810:
return 810000;
+ case XELPDP_DDI_CLOCK_SELECT_TBT_312_5:
+ return 1000000;
+ case XELPDP_DDI_CLOCK_SELECT_TBT_625:
+ return 2000000;
default:
MISSING_CASE(clock);
return 162000;
}
}
-static int intel_mtl_tbt_clock_select(struct drm_i915_private *i915, int clock)
+static int intel_mtl_tbt_clock_select(struct intel_display *display,
+ int clock)
{
switch (clock) {
case 162000:
@@ -3047,6 +3108,18 @@ static int intel_mtl_tbt_clock_select(struct drm_i915_private *i915, int clock)
return XELPDP_DDI_CLOCK_SELECT_TBT_540;
case 810000:
return XELPDP_DDI_CLOCK_SELECT_TBT_810;
+ case 1000000:
+ if (DISPLAY_VER(display) < 30) {
+ drm_WARN_ON(display->drm, "UHBR10 not supported for the platform\n");
+ return XELPDP_DDI_CLOCK_SELECT_TBT_162;
+ }
+ return XELPDP_DDI_CLOCK_SELECT_TBT_312_5;
+ case 2000000:
+ if (DISPLAY_VER(display) < 30) {
+ drm_WARN_ON(display->drm, "UHBR20 not supported for the platform\n");
+ return XELPDP_DDI_CLOCK_SELECT_TBT_162;
+ }
+ return XELPDP_DDI_CLOCK_SELECT_TBT_625;
default:
MISSING_CASE(clock);
return XELPDP_DDI_CLOCK_SELECT_TBT_162;
@@ -3056,21 +3129,32 @@ static int intel_mtl_tbt_clock_select(struct drm_i915_private *i915, int clock)
static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
u32 val = 0;
+ u32 mask;
/*
* 1. Program PORT_CLOCK_CTL REGISTER to configure
* clock muxes, gating and SSC
*/
- val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(i915, crtc_state->port_clock));
+
+ if (DISPLAY_VER(display) >= 30) {
+ mask = XE3_DDI_CLOCK_SELECT_MASK;
+ val |= XE3_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
+ } else {
+ mask = XELPDP_DDI_CLOCK_SELECT_MASK;
+ val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(display, crtc_state->port_clock));
+ }
+
+ mask |= XELPDP_FORWARD_CLOCK_UNGATE;
val |= XELPDP_FORWARD_CLOCK_UNGATE;
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
- XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, val);
+
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
+ mask, val);
/* 2. Read back PORT_CLOCK_CTL REGISTER */
- val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port));
+ val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port));
/*
* 3. Follow the Display Voltage Frequency Switching - Sequence
@@ -3081,14 +3165,15 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
* 4. Set PORT_CLOCK_CTL register TBT CLOCK Request to "1" to enable PLL.
*/
val |= XELPDP_TBT_CLOCK_REQUEST;
- intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port), val);
+ intel_de_write(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), val);
/* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */
- if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_TBT_CLOCK_ACK,
XELPDP_TBT_CLOCK_ACK,
100, 0, NULL))
- drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n",
+ drm_warn(display->drm,
+ "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n",
encoder->base.base.id, encoder->base.name, phy_name(phy));
/*
@@ -3100,7 +3185,7 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
* 7. Program DDI_CLK_VALFREQ to match intended DDI
* clock frequency.
*/
- intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port),
+ intel_de_write(display, DDI_CLK_VALFREQ(encoder->port),
crtc_state->port_clock);
}
@@ -3117,12 +3202,14 @@ void intel_mtl_pll_enable(struct intel_encoder *encoder,
static u8 cx0_power_control_disable_val(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_encoder_is_c10phy(encoder))
return CX0_P2PG_STATE_DISABLE;
- if (IS_BATTLEMAGE(i915) && encoder->port == PORT_A)
+ if ((IS_BATTLEMAGE(i915) && encoder->port == PORT_A) ||
+ (DISPLAY_VER(display) >= 30 && encoder->type == INTEL_OUTPUT_EDP))
return CX0_P2PG_STATE_DISABLE;
return CX0_P4PG_STATE_DISABLE;
@@ -3130,7 +3217,7 @@ static u8 cx0_power_control_disable_val(struct intel_encoder *encoder)
static void intel_cx0pll_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder);
@@ -3147,21 +3234,22 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
* 3. Set PORT_CLOCK_CTL register PCLK PLL Request LN<Lane for maxPCLK>
* to "0" to disable PLL.
*/
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES) |
intel_cx0_get_pclk_refclk_request(INTEL_CX0_BOTH_LANES), 0);
/* 4. Program DDI_CLK_VALFREQ to 0. */
- intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0);
+ intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 0);
/*
* 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0".
*/
- if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0,
XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
- drm_warn(&i915->drm, "Port %c PLL not unlocked after %dus.\n",
+ drm_warn(display->drm,
+ "Port %c PLL not unlocked after %dus.\n",
phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US);
/*
@@ -3170,9 +3258,9 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
*/
/* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_DDI_CLOCK_SELECT_MASK, 0);
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_FORWARD_CLOCK_UNGATE, 0);
intel_cx0_phy_transaction_end(encoder, wakeref);
@@ -3180,7 +3268,7 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
/*
@@ -3191,13 +3279,14 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
/*
* 2. Set PORT_CLOCK_CTL register TBT CLOCK Request to "0" to disable PLL.
*/
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_TBT_CLOCK_REQUEST, 0);
/* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */
- if (intel_de_wait_custom(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL))
- drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n",
+ drm_warn(display->drm,
+ "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n",
encoder->base.base.id, encoder->base.name, phy_name(phy));
/*
@@ -3208,12 +3297,12 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
/*
* 5. Program PORT CLOCK CTRL register to disable and gate clocks
*/
- intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port),
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
XELPDP_DDI_CLOCK_SELECT_MASK |
XELPDP_FORWARD_CLOCK_UNGATE, 0);
/* 6. Program DDI_CLK_VALFREQ to 0. */
- intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0);
+ intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 0);
}
void intel_mtl_pll_disable(struct intel_encoder *encoder)
@@ -3230,13 +3319,15 @@ enum icl_port_dpll_id
intel_mtl_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
+ u32 val, clock;
+
/*
* TODO: Determine the PLL type from the SW state, once MTL PLL
* handling is done via the standard shared DPLL framework.
*/
- u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(i915, encoder->port));
- u32 clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
+ val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port));
+ clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
if (clock == XELPDP_DDI_CLOCK_SELECT_MAXPCLK ||
clock == XELPDP_DDI_CLOCK_SELECT_DIV18CLK)
@@ -3250,28 +3341,28 @@ static void intel_c10pll_state_verify(const struct intel_crtc_state *state,
struct intel_encoder *encoder,
struct intel_c10pll_state *mpllb_hw_state)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_c10pll_state *mpllb_sw_state = &state->dpll_hw_state.cx0pll.c10;
int i;
for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) {
u8 expected = mpllb_sw_state->pll[i];
- I915_STATE_WARN(i915, mpllb_hw_state->pll[i] != expected,
- "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)",
- crtc->base.base.id, crtc->base.name, i,
- expected, mpllb_hw_state->pll[i]);
+ INTEL_DISPLAY_STATE_WARN(display, mpllb_hw_state->pll[i] != expected,
+ "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)",
+ crtc->base.base.id, crtc->base.name, i,
+ expected, mpllb_hw_state->pll[i]);
}
- I915_STATE_WARN(i915, mpllb_hw_state->tx != mpllb_sw_state->tx,
- "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)",
- crtc->base.base.id, crtc->base.name,
- mpllb_sw_state->tx, mpllb_hw_state->tx);
+ INTEL_DISPLAY_STATE_WARN(display, mpllb_hw_state->tx != mpllb_sw_state->tx,
+ "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)",
+ crtc->base.base.id, crtc->base.name,
+ mpllb_sw_state->tx, mpllb_hw_state->tx);
- I915_STATE_WARN(i915, mpllb_hw_state->cmn != mpllb_sw_state->cmn,
- "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)",
- crtc->base.base.id, crtc->base.name,
- mpllb_sw_state->cmn, mpllb_hw_state->cmn);
+ INTEL_DISPLAY_STATE_WARN(display, mpllb_hw_state->cmn != mpllb_sw_state->cmn,
+ "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)",
+ crtc->base.base.id, crtc->base.name,
+ mpllb_sw_state->cmn, mpllb_hw_state->cmn);
}
void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
@@ -3357,64 +3448,64 @@ static void intel_c20pll_state_verify(const struct intel_crtc_state *state,
struct intel_encoder *encoder,
struct intel_c20pll_state *mpll_hw_state)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_c20pll_state *mpll_sw_state = &state->dpll_hw_state.cx0pll.c20;
bool sw_use_mpllb = intel_c20phy_use_mpllb(mpll_sw_state);
bool hw_use_mpllb = intel_c20phy_use_mpllb(mpll_hw_state);
int clock = intel_c20pll_calc_port_clock(encoder, mpll_sw_state);
int i;
- I915_STATE_WARN(i915, mpll_hw_state->clock != clock,
- "[CRTC:%d:%s] mismatch in C20: Register CLOCK (expected %d, found %d)",
- crtc->base.base.id, crtc->base.name,
- mpll_sw_state->clock, mpll_hw_state->clock);
+ INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->clock != clock,
+ "[CRTC:%d:%s] mismatch in C20: Register CLOCK (expected %d, found %d)",
+ crtc->base.base.id, crtc->base.name,
+ mpll_sw_state->clock, mpll_hw_state->clock);
- I915_STATE_WARN(i915, sw_use_mpllb != hw_use_mpllb,
- "[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)",
- crtc->base.base.id, crtc->base.name,
- sw_use_mpllb, hw_use_mpllb);
+ INTEL_DISPLAY_STATE_WARN(display, sw_use_mpllb != hw_use_mpllb,
+ "[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)",
+ crtc->base.base.id, crtc->base.name,
+ sw_use_mpllb, hw_use_mpllb);
if (hw_use_mpllb) {
for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mpllb); i++) {
- I915_STATE_WARN(i915, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i],
- "[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)",
- crtc->base.base.id, crtc->base.name, i,
- mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]);
+ INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i],
+ "[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]);
}
} else {
for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) {
- I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i],
- "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)",
- crtc->base.base.id, crtc->base.name, i,
- mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]);
+ INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i],
+ "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]);
}
}
for (i = 0; i < ARRAY_SIZE(mpll_sw_state->tx); i++) {
- I915_STATE_WARN(i915, mpll_hw_state->tx[i] != mpll_sw_state->tx[i],
- "[CRTC:%d:%s] mismatch in C20: Register TX[%i] (expected 0x%04x, found 0x%04x)",
- crtc->base.base.id, crtc->base.name, i,
- mpll_sw_state->tx[i], mpll_hw_state->tx[i]);
+ INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->tx[i] != mpll_sw_state->tx[i],
+ "[CRTC:%d:%s] mismatch in C20: Register TX[%i] (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ mpll_sw_state->tx[i], mpll_hw_state->tx[i]);
}
for (i = 0; i < ARRAY_SIZE(mpll_sw_state->cmn); i++) {
- I915_STATE_WARN(i915, mpll_hw_state->cmn[i] != mpll_sw_state->cmn[i],
- "[CRTC:%d:%s] mismatch in C20: Register CMN[%i] (expected 0x%04x, found 0x%04x)",
- crtc->base.base.id, crtc->base.name, i,
- mpll_sw_state->cmn[i], mpll_hw_state->cmn[i]);
+ INTEL_DISPLAY_STATE_WARN(display, mpll_hw_state->cmn[i] != mpll_sw_state->cmn[i],
+ "[CRTC:%d:%s] mismatch in C20: Register CMN[%i] (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ mpll_sw_state->cmn[i], mpll_hw_state->cmn[i]);
}
}
void intel_cx0pll_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
struct intel_cx0pll_state mpll_hw_state = {};
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
if (!new_crtc_state->hw.active)
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index 9004b99bb51f..711168882684 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -7,17 +7,15 @@
#define __INTEL_CX0_PHY_H__
#include <linux/types.h>
-#include <linux/bitfield.h>
-#include <linux/bits.h>
enum icl_port_dpll_id;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_c10pll_state;
struct intel_c20pll_state;
-struct intel_cx0pll_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_cx0pll_state;
+struct intel_display;
struct intel_encoder;
struct intel_hdmi;
@@ -35,7 +33,7 @@ void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder,
const struct intel_cx0pll_state *pll_state);
-void intel_cx0pll_dump_hw_state(struct drm_i915_private *dev_priv,
+void intel_cx0pll_dump_hw_state(struct intel_display *display,
const struct intel_cx0pll_state *hw_state);
void intel_cx0pll_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index ab3ae110b68f..da154ff26b96 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -9,6 +9,11 @@
#include "i915_reg_defs.h"
#include "intel_display_limits.h"
+/* DDI Buffer Control */
+#define _DDI_CLK_VALFREQ_A 0x64030
+#define _DDI_CLK_VALFREQ_B 0x64130
+#define DDI_CLK_VALFREQ(port) _MMIO_PORT(port, _DDI_CLK_VALFREQ_A, _DDI_CLK_VALFREQ_B)
+
/*
* Wrapper macro to convert from port number to the index used in some of the
* registers. For Display version 20 and above it converts the port number to a
@@ -187,7 +192,9 @@
#define XELPDP_TBT_CLOCK_REQUEST REG_BIT(19)
#define XELPDP_TBT_CLOCK_ACK REG_BIT(18)
#define XELPDP_DDI_CLOCK_SELECT_MASK REG_GENMASK(15, 12)
+#define XE3_DDI_CLOCK_SELECT_MASK REG_GENMASK(16, 12)
#define XELPDP_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XELPDP_DDI_CLOCK_SELECT_MASK, val)
+#define XE3_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XE3_DDI_CLOCK_SELECT_MASK, val)
#define XELPDP_DDI_CLOCK_SELECT_NONE 0x0
#define XELPDP_DDI_CLOCK_SELECT_MAXPCLK 0x8
#define XELPDP_DDI_CLOCK_SELECT_DIV18CLK 0x9
@@ -195,11 +202,20 @@
#define XELPDP_DDI_CLOCK_SELECT_TBT_270 0xd
#define XELPDP_DDI_CLOCK_SELECT_TBT_540 0xe
#define XELPDP_DDI_CLOCK_SELECT_TBT_810 0xf
+#define XELPDP_DDI_CLOCK_SELECT_TBT_312_5 0x18
+#define XELPDP_DDI_CLOCK_SELECT_TBT_625 0x19
#define XELPDP_FORWARD_CLOCK_UNGATE REG_BIT(10)
#define XELPDP_LANE1_PHY_CLOCK_SELECT REG_BIT(8)
#define XELPDP_SSC_ENABLE_PLLA REG_BIT(1)
#define XELPDP_SSC_ENABLE_PLLB REG_BIT(0)
+#define TCSS_DISP_MAILBOX_IN_CMD _MMIO(0x161300)
+#define TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY REG_BIT(31)
+#define TCSS_DISP_MAILBOX_IN_CMD_CMD_MASK REG_GENMASK(7, 0)
+#define TCSS_DISP_MAILBOX_IN_CMD_DATA(val) REG_FIELD_PREP(TCSS_DISP_MAILBOX_IN_CMD_CMD_MASK, val)
+
+#define TCSS_DISP_MAILBOX_IN_DATA _MMIO(0x161304)
+
/* C10 Vendor Registers */
#define PHY_C10_VDR_PLL(idx) (0xC00 + (idx))
#define C10_PLL0_FRACEN REG_BIT8(4)
@@ -273,13 +289,15 @@
#define _XE2HPD_C20_A_MPLLB_CFG 0xCCC2
#define _XE2HPD_C20_B_MPLLB_CFG 0xCCB6
-#define _IS_XE2HPD_C20(i915) (DISPLAY_VER_FULL(i915) == IP_VER(14, 1))
+#define _IS_XE2HPD_C20(i915) (DISPLAY_VERx100(i915) == 1401)
#define PHY_C20_A_TX_CNTX_CFG(i915, idx) \
((_IS_XE2HPD_C20(i915) ? _XE2HPD_C20_A_TX_CNTX_CFG : _MTL_C20_A_TX_CNTX_CFG) - (idx))
#define PHY_C20_B_TX_CNTX_CFG(i915, idx) \
((_IS_XE2HPD_C20(i915) ? _XE2HPD_C20_B_TX_CNTX_CFG : _MTL_C20_B_TX_CNTX_CFG) - (idx))
#define C20_PHY_TX_RATE REG_GENMASK(2, 0)
+#define C20_PHY_TX_MISC_MASK REG_GENMASK16(7, 0)
+#define C20_PHY_TX_MISC(val) REG_FIELD_PREP16(C20_PHY_TX_MISC_MASK, (val))
#define PHY_C20_A_CMN_CNTX_CFG(i915, idx) \
((_IS_XE2HPD_C20(i915) ? _XE2HPD_C20_A_CMN_CNTX_CFG : _MTL_C20_A_CMN_CNTX_CFG) - (idx))
@@ -363,4 +381,7 @@
#define HDMI_DIV_MASK REG_GENMASK16(2, 0)
#define HDMI_DIV(val) REG_FIELD_PREP16(HDMI_DIV_MASK, val)
+#define PICA_PHY_CONFIG_CONTROL _MMIO(0x16FE68)
+#define EDP_ON_TYPEC REG_BIT(31)
+
#endif /* __INTEL_CX0_REG_DEFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index b1c294236cc8..acb986bc1f33 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -28,6 +28,7 @@
#include <linux/iopoll.h>
#include <linux/string_helpers.h>
+#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_privacy_screen_consumer.h>
@@ -54,6 +55,7 @@
#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
+#include "intel_dp_test.h"
#include "intel_dp_tunnel.h"
#include "intel_dpio_phy.h"
#include "intel_dsi.h"
@@ -334,10 +336,14 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
/* DDI_BUF_CTL_ENABLE will be set by intel_ddi_prepare_link_retrain() later */
- intel_dp->DP = dig_port->saved_port_bits |
- DDI_PORT_WIDTH(crtc_state->lane_count) |
+ intel_dp->DP = DDI_PORT_WIDTH(crtc_state->lane_count) |
DDI_BUF_TRANS_SELECT(0);
+ if (dig_port->lane_reversal)
+ intel_dp->DP |= DDI_BUF_PORT_REVERSAL;
+ if (dig_port->ddi_a_4_lanes)
+ intel_dp->DP |= DDI_A_4_LANES;
+
if (DISPLAY_VER(i915) >= 14) {
if (intel_dp_is_uhbr(crtc_state))
intel_dp->DP |= DDI_BUF_PORT_DATA_40BIT;
@@ -454,17 +460,20 @@ static u32 bdw_trans_port_sync_master_select(enum transcoder master_transcoder)
}
static void
-intel_ddi_config_transcoder_dp2(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+intel_ddi_config_transcoder_dp2(const struct intel_crtc_state *crtc_state,
+ bool enable)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
- if (intel_dp_is_uhbr(crtc_state))
+ if (!HAS_DP20(display))
+ return;
+
+ if (enable && intel_dp_is_uhbr(crtc_state))
val = TRANS_DP2_128B132B_CHANNEL_CODING;
- intel_de_write(i915, TRANS_DP2_CTL(cpu_transcoder), val);
+ intel_de_write(display, TRANS_DP2_CTL(cpu_transcoder), val);
}
/*
@@ -553,7 +562,8 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
temp |= (crtc_state->fdi_lanes - 1) << 1;
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) ||
+ intel_dp_is_uhbr(crtc_state)) {
if (intel_dp_is_uhbr(crtc_state))
temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
else
@@ -616,9 +626,10 @@ void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
/*
* Same as intel_ddi_enable_transcoder_func(), but it does not set the enable
- * bit.
+ * bit for the DDI function and enables the DP2 configuration. Called for all
+ * transcoder types.
*/
-static void
+void
intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
@@ -627,18 +638,27 @@ intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 ctl;
+ intel_ddi_config_transcoder_dp2(crtc_state, true);
+
ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
ctl &= ~TRANS_DDI_FUNC_ENABLE;
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
ctl);
}
+/*
+ * Disable the DDI function and port syncing.
+ * For SST, pre-TGL MST, TGL+ MST-slave transcoders: deselect the DDI port,
+ * SST/MST mode and disable the DP2 configuration. For TGL+ MST-master
+ * transcoders these are done later in intel_ddi_post_disable_dp().
+ */
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
u32 ctl;
if (DISPLAY_VER(dev_priv) >= 11)
@@ -658,7 +678,8 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
if (DISPLAY_VER(dev_priv) >= 12) {
- if (!intel_dp_mst_is_master_trans(crtc_state)) {
+ if (!intel_dp_mst_is_master_trans(crtc_state) ||
+ (!is_mst && intel_dp_is_uhbr(crtc_state))) {
ctl &= ~(TGL_TRANS_DDI_PORT_MASK |
TRANS_DDI_MODE_SELECT_MASK);
}
@@ -669,6 +690,9 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder),
ctl);
+ if (intel_dp_mst_is_slave_trans(crtc_state))
+ intel_ddi_config_transcoder_dp2(crtc_state, false);
+
if (intel_has_quirk(display, QUIRK_INCREASE_DDI_DISABLED_TIME) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
drm_dbg_kms(display->drm, "Quirk Increase DDI disabled time\n");
@@ -699,15 +723,15 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
- struct drm_device *dev = intel_connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(intel_connector);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
int type = intel_connector->base.connector_type;
enum port port = encoder->port;
enum transcoder cpu_transcoder;
intel_wakeref_t wakeref;
enum pipe pipe = 0;
- u32 tmp;
+ u32 ddi_mode;
bool ret;
wakeref = intel_display_power_get_if_enabled(dev_priv,
@@ -715,6 +739,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
if (!wakeref)
return false;
+ /* Note: This returns false for DP MST primary encoders. */
if (!encoder->get_hw_state(encoder, &pipe)) {
ret = false;
goto out;
@@ -725,38 +750,28 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
else
cpu_transcoder = (enum transcoder) pipe;
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ ddi_mode = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) &
+ TRANS_DDI_MODE_SELECT_MASK;
- switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
- case TRANS_DDI_MODE_SELECT_HDMI:
- case TRANS_DDI_MODE_SELECT_DVI:
+ if (ddi_mode == TRANS_DDI_MODE_SELECT_HDMI ||
+ ddi_mode == TRANS_DDI_MODE_SELECT_DVI) {
ret = type == DRM_MODE_CONNECTOR_HDMIA;
- break;
-
- case TRANS_DDI_MODE_SELECT_DP_SST:
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && !HAS_DP20(display)) {
+ ret = type == DRM_MODE_CONNECTOR_VGA;
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_SST) {
ret = type == DRM_MODE_CONNECTOR_eDP ||
- type == DRM_MODE_CONNECTOR_DisplayPort;
- break;
-
- case TRANS_DDI_MODE_SELECT_DP_MST:
- /* if the transcoder is in MST state then
- * connector isn't connected */
+ type == DRM_MODE_CONNECTOR_DisplayPort;
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) {
+ /*
+ * encoder->get_hw_state() should have bailed out on MST. This
+ * must be SST and non-eDP.
+ */
+ ret = type == DRM_MODE_CONNECTOR_DisplayPort;
+ } else if (drm_WARN_ON(display->drm, ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST)) {
+ /* encoder->get_hw_state() should have bailed out on MST. */
ret = false;
- break;
-
- case TRANS_DDI_MODE_SELECT_FDI_OR_128B132B:
- if (HAS_DP20(dev_priv))
- /* 128b/132b */
- ret = false;
- else
- /* FDI */
- ret = type == DRM_MODE_CONNECTOR_VGA;
- break;
-
- default:
+ } else {
ret = false;
- break;
}
out:
@@ -768,13 +783,13 @@ out:
static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
u8 *pipe_mask, bool *is_dp_mst)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port = encoder->port;
intel_wakeref_t wakeref;
enum pipe p;
u32 tmp;
- u8 mst_pipe_mask;
+ u8 mst_pipe_mask = 0, dp128b132b_pipe_mask = 0;
*pipe_mask = 0;
*is_dp_mst = false;
@@ -811,10 +826,9 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
goto out;
}
- mst_pipe_mask = 0;
for_each_pipe(dev_priv, p) {
enum transcoder cpu_transcoder = (enum transcoder)p;
- unsigned int port_mask, ddi_select;
+ u32 port_mask, ddi_select, ddi_mode;
intel_wakeref_t trans_wakeref;
trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
@@ -838,10 +852,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if ((tmp & port_mask) != ddi_select)
continue;
- if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST ||
- (HAS_DP20(dev_priv) &&
- (tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B))
+ ddi_mode = tmp & TRANS_DDI_MODE_SELECT_MASK;
+
+ if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST)
mst_pipe_mask |= BIT(p);
+ else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display))
+ dp128b132b_pipe_mask |= BIT(p);
*pipe_mask |= BIT(p);
}
@@ -851,6 +867,23 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
"No pipe for [ENCODER:%d:%s] found\n",
encoder->base.base.id, encoder->base.name);
+ if (!mst_pipe_mask && dp128b132b_pipe_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ /*
+ * If we don't have 8b/10b MST, but have more than one
+ * transcoder in 128b/132b mode, we know it must be 128b/132b
+ * MST.
+ *
+ * Otherwise, we fall back to checking the current MST
+ * state. It's not accurate for hardware takeover at probe, but
+ * we don't expect MST to have been enabled at that point, and
+ * can assume it's SST.
+ */
+ if (hweight8(dp128b132b_pipe_mask) > 1 || intel_dp->is_mst)
+ mst_pipe_mask = dp128b132b_pipe_mask;
+ }
+
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
drm_dbg_kms(&dev_priv->drm,
"Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
@@ -861,9 +894,9 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
drm_dbg_kms(&dev_priv->drm,
- "Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
+ "Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe masks: all %02x, MST %02x, 128b/132b %02x)\n",
encoder->base.base.id, encoder->base.name,
- *pipe_mask, mst_pipe_mask);
+ *pipe_mask, mst_pipe_mask, dp128b132b_pipe_mask);
else
*is_dp_mst = mst_pipe_mask;
@@ -2195,8 +2228,8 @@ i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
return DP_TP_CTL(encoder->port);
}
-i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -2207,6 +2240,25 @@ i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
return DP_TP_STATUS(encoder->port);
}
+void intel_ddi_clear_act_sent(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ intel_de_write(display, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_ACT_SENT);
+}
+
+void intel_ddi_wait_for_act_sent(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ if (intel_de_wait_for_set(display, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_ACT_SENT, 1))
+ drm_err(display->drm, "Timed out waiting for ACT sent\n");
+}
+
static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool enable)
@@ -2235,7 +2287,7 @@ static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION,
enable ? DP_FEC_READY : 0) <= 0)
drm_dbg_kms(display->drm, "Failed to set FEC_READY to %s in the sink\n",
- enable ? "enabled" : "disabled");
+ str_enabled_disabled(enable));
if (enable &&
drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_STATUS,
@@ -2255,9 +2307,9 @@ static int read_fec_detected_status(struct drm_dp_aux *aux)
return status;
}
-static void wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled)
+static int wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled)
{
- struct drm_i915_private *i915 = to_i915(aux->drm_dev);
+ struct intel_display *display = to_intel_display(aux->drm_dev);
int mask = enabled ? DP_FEC_DECODE_EN_DETECTED : DP_FEC_DECODE_DIS_DETECTED;
int status;
int err;
@@ -2266,57 +2318,92 @@ static void wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled)
status & mask || status < 0,
10000, 200000);
- if (!err && status >= 0)
- return;
+ if (err || status < 0) {
+ drm_dbg_kms(display->drm,
+ "Failed waiting for FEC %s to get detected: %d (status %d)\n",
+ str_enabled_disabled(enabled), err, status);
+ return err ? err : status;
+ }
- if (err == -ETIMEDOUT)
- drm_dbg_kms(&i915->drm, "Timeout waiting for FEC %s to get detected\n",
- str_enabled_disabled(enabled));
- else
- drm_dbg_kms(&i915->drm, "FEC detected status read error: %d\n", status);
+ return 0;
}
-void intel_ddi_wait_for_fec_status(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- bool enabled)
+int intel_ddi_wait_for_fec_status(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool enabled)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int ret;
if (!crtc_state->fec_enable)
- return;
+ return 0;
if (enabled)
- ret = intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state),
+ ret = intel_de_wait_for_set(display, dp_tp_status_reg(encoder, crtc_state),
DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
else
- ret = intel_de_wait_for_clear(i915, dp_tp_status_reg(encoder, crtc_state),
+ ret = intel_de_wait_for_clear(display, dp_tp_status_reg(encoder, crtc_state),
DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
- if (ret)
- drm_err(&i915->drm,
+ if (ret) {
+ drm_err(display->drm,
"Timeout waiting for FEC live state to get %s\n",
str_enabled_disabled(enabled));
-
+ return ret;
+ }
/*
* At least the Synoptics MST hub doesn't set the detected flag for
* FEC decoding disabling so skip waiting for that.
*/
- if (enabled)
- wait_for_fec_detected(&intel_dp->aux, enabled);
+ if (enabled) {
+ ret = wait_for_fec_detected(&intel_dp->aux, enabled);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static void intel_ddi_enable_fec(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
+ int i;
+ int ret;
if (!crtc_state->fec_enable)
return;
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
0, DP_TP_CTL_FEC_ENABLE);
+
+ if (DISPLAY_VER(display) < 30)
+ return;
+
+ ret = intel_ddi_wait_for_fec_status(encoder, crtc_state, true);
+ if (!ret)
+ return;
+
+ for (i = 0; i < 3; i++) {
+ drm_dbg_kms(display->drm, "Retry FEC enabling\n");
+
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_FEC_ENABLE, 0);
+
+ ret = intel_ddi_wait_for_fec_status(encoder, crtc_state, false);
+ if (ret)
+ continue;
+
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
+ 0, DP_TP_CTL_FEC_ENABLE);
+
+ ret = intel_ddi_wait_for_fec_status(encoder, crtc_state, true);
+ if (!ret)
+ return;
+ }
+
+ drm_err(display->drm, "Failed to enable FEC after retries\n");
}
static void intel_ddi_disable_fec(struct intel_encoder *encoder,
@@ -2340,12 +2427,10 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
if (intel_encoder_is_combo(encoder)) {
enum phy phy = intel_encoder_to_phy(encoder);
- bool lane_reversal =
- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
intel_combo_phy_power_up_lanes(i915, phy, false,
crtc_state->lane_count,
- lane_reversal);
+ dig_port->lane_reversal);
}
}
@@ -2470,25 +2555,24 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder)
static void mtl_port_buf_ctl_program(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum port port = encoder->port;
- u32 val;
+ u32 val = 0;
- val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(i915, port));
- val &= ~XELPDP_PORT_WIDTH_MASK;
val |= XELPDP_PORT_WIDTH(mtl_get_port_width(crtc_state->lane_count));
- val &= ~XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK;
if (intel_dp_is_uhbr(crtc_state))
val |= XELPDP_PORT_BUF_PORT_DATA_40BIT;
else
val |= XELPDP_PORT_BUF_PORT_DATA_10BIT;
- if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL)
+ if (dig_port->lane_reversal)
val |= XELPDP_PORT_REVERSAL;
- intel_de_write(i915, XELPDP_PORT_BUF_CTL1(i915, port), val);
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, port),
+ XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK,
+ val);
}
static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder)
@@ -2510,6 +2594,7 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ int ret;
intel_dp_set_link_params(intel_dp,
crtc_state->port_clock,
@@ -2547,10 +2632,6 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/*
* 6.b If DP v2.0/128b mode - Configure TRANS_DP2_CTL register settings.
- */
- intel_ddi_config_transcoder_dp2(encoder, crtc_state);
-
- /*
* 6.c Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
* Transport Select
*/
@@ -2608,6 +2689,14 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 6.o Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
+ /* 7.a 128b/132b SST. */
+ if (!is_mst && intel_dp_is_uhbr(crtc_state)) {
+ /* VCPID 1, start slot 0 for 128b/132b, tu slots */
+ ret = drm_dp_dpcd_write_payload(&intel_dp->aux, 1, 0, crtc_state->dp_m_n.tu);
+ if (ret < 0)
+ intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
+ }
+
if (!is_mst)
intel_dsc_dp_pps_write(encoder, crtc_state);
}
@@ -2621,6 +2710,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ int ret;
intel_dp_set_link_params(intel_dp,
crtc_state->port_clock,
@@ -2685,9 +2775,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
intel_ddi_enable_transcoder_clock(encoder, crtc_state);
- if (HAS_DP20(dev_priv))
- intel_ddi_config_transcoder_dp2(encoder, crtc_state);
-
/*
* 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
* Transport Select
@@ -2750,6 +2837,13 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 7.l Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
+ if (!is_mst && intel_dp_is_uhbr(crtc_state)) {
+ /* VCPID 1, start slot 0 for 128b/132b, tu slots */
+ ret = drm_dp_dpcd_write_payload(&intel_dp->aux, 1, 0, crtc_state->dp_m_n.tu);
+ if (ret < 0)
+ intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
+ }
+
if (!is_mst)
intel_dsc_dp_pps_write(encoder, crtc_state);
}
@@ -2826,9 +2920,9 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
- if (HAS_DP20(dev_priv))
+ if (HAS_DP20(display))
intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder),
crtc_state);
@@ -2836,9 +2930,9 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
if (crtc_state->has_panel_replay)
intel_psr_enable_sink(enc_to_intel_dp(encoder), crtc_state);
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
else
hsw_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
@@ -2875,6 +2969,24 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
crtc_state, conn_state);
}
+/*
+ * Note: Also called from the ->pre_enable of the first active MST stream
+ * encoder on its primary encoder.
+ *
+ * When called from DP MST code:
+ *
+ * - conn_state will be NULL
+ *
+ * - encoder will be the primary encoder (i.e. mst->primary)
+ *
+ * - the main connector associated with this port won't be active or linked to a
+ * crtc
+ *
+ * - crtc_state will be the state of the first stream to be activated on this
+ * port, and it may not be the same stream that will be deactivated last, but
+ * each stream should have a state that is identical when it comes to the DP
+ * link parameteres
+ */
static void intel_ddi_pre_enable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -2884,19 +2996,6 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- /*
- * When called from DP MST code:
- * - conn_state will be NULL
- * - encoder will be the main encoder (ie. mst->primary)
- * - the main connector associated with this port
- * won't be active or linked to a crtc
- * - crtc_state will be the state of the first stream to
- * be activated on this port, and it may not be the same
- * stream that will be deactivated last, but each stream
- * should have a state that is identical when it comes to
- * the DP link parameteres
- */
-
drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
@@ -3052,6 +3151,8 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_dp_sink_set_fec_ready(intel_dp, old_crtc_state, false);
+ intel_ddi_config_transcoder_dp2(old_crtc_state, false);
+
/*
* From TGL spec: "If single stream or multi-stream master transcoder:
* Configure Transcoder Clock select to direct no clock to the
@@ -3115,11 +3216,14 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *pipe_crtc;
+ bool is_hdmi = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI);
+ int i;
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
- intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
const struct intel_crtc_state *old_pipe_crtc_state =
intel_atomic_get_old_crtc_state(state, pipe_crtc);
@@ -3128,10 +3232,23 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
intel_disable_transcoder(old_crtc_state);
+ /* 128b/132b SST */
+ if (!is_hdmi && intel_dp_is_uhbr(old_crtc_state)) {
+ /* VCPID 1, start slot 0 for 128b/132b, clear */
+ drm_dp_dpcd_write_payload(&intel_dp->aux, 1, 0, 0);
+
+ intel_ddi_clear_act_sent(encoder, old_crtc_state);
+
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, old_crtc_state->cpu_transcoder),
+ TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0);
+
+ intel_ddi_wait_for_act_sent(encoder, old_crtc_state);
+ drm_dp_dpcd_poll_act_handled(&intel_dp->aux, 0);
+ }
+
intel_ddi_disable_transcoder_func(old_crtc_state);
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
- intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
const struct intel_crtc_state *old_pipe_crtc_state =
intel_atomic_get_old_crtc_state(state, pipe_crtc);
@@ -3144,6 +3261,11 @@ static void intel_ddi_post_disable_hdmi_or_sst(struct intel_atomic_state *state,
}
}
+/*
+ * Note: Also called from the ->post_disable of the last active MST stream
+ * encoder on its primary encoder. See also the comment for
+ * intel_ddi_pre_enable().
+ */
static void intel_ddi_post_disable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -3174,6 +3296,11 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
old_conn_state);
}
+/*
+ * Note: Also called from the ->post_pll_disable of the last active MST stream
+ * encoder on its primary encoder. See also the comment for
+ * intel_ddi_pre_enable().
+ */
static void intel_ddi_post_pll_disable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -3224,7 +3351,7 @@ static void trans_port_sync_stop_link_train(struct intel_atomic_state *state,
crtc_state);
}
-static void intel_enable_ddi_dp(struct intel_atomic_state *state,
+static void intel_ddi_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -3246,18 +3373,8 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
trans_port_sync_stop_link_train(state, encoder, crtc_state);
}
-/* FIXME bad home for this function */
-i915_reg_t hsw_chicken_trans_reg(struct drm_i915_private *i915,
- enum transcoder cpu_transcoder)
-{
- return DISPLAY_VER(i915) >= 14 ?
- MTL_CHICKEN_TRANS(cpu_transcoder) :
- CHICKEN_TRANS(cpu_transcoder);
-}
-
static i915_reg_t
-gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
- enum port port)
+gen9_chicken_trans_reg_by_port(struct intel_display *display, enum port port)
{
static const enum transcoder trans[] = {
[PORT_A] = TRANSCODER_EDP,
@@ -3267,19 +3384,20 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
[PORT_E] = TRANSCODER_A,
};
- drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) < 9);
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) < 9);
- if (drm_WARN_ON(&dev_priv->drm, port < PORT_A || port > PORT_E))
+ if (drm_WARN_ON(display->drm, port < PORT_A || port > PORT_E))
port = PORT_A;
- return CHICKEN_TRANS(trans[port]);
+ return CHICKEN_TRANS(display, trans[port]);
}
-static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
+static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
@@ -3310,7 +3428,7 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
* the bits affect a specific DDI port rather than
* a specific transcoder.
*/
- i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port);
+ i915_reg_t reg = gen9_chicken_trans_reg_by_port(display, port);
u32 val;
val = intel_de_read(dev_priv, reg);
@@ -3350,14 +3468,20 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
* is filled with lane count, already set in the crtc_state.
* The same is required to be filled in PORT_BUF_CTL for C10/20 Phy.
*/
- buf_ctl = dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE;
+ buf_ctl = DDI_BUF_CTL_ENABLE;
+
+ if (dig_port->lane_reversal)
+ buf_ctl |= DDI_BUF_PORT_REVERSAL;
+ if (dig_port->ddi_a_4_lanes)
+ buf_ctl |= DDI_A_4_LANES;
+
if (DISPLAY_VER(dev_priv) >= 14) {
u8 lane_count = mtl_get_port_width(crtc_state->lane_count);
u32 port_buf = 0;
port_buf |= XELPDP_PORT_WIDTH(lane_count);
- if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL)
+ if (dig_port->lane_reversal)
port_buf |= XELPDP_PORT_REVERSAL;
intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port),
@@ -3377,41 +3501,67 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
intel_wait_ddi_buf_active(encoder);
}
-static void intel_enable_ddi(struct intel_atomic_state *state,
+static void intel_ddi_enable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *pipe_crtc;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ bool is_hdmi = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
+ int i;
+
+ /* 128b/132b SST */
+ if (!is_hdmi && intel_dp_is_uhbr(crtc_state)) {
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock);
+
+ intel_de_write(display, TRANS_DP2_VFREQHIGH(cpu_transcoder),
+ TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24));
+ intel_de_write(display, TRANS_DP2_VFREQLOW(cpu_transcoder),
+ TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
+ }
intel_ddi_enable_transcoder_func(encoder, crtc_state);
/* Enable/Disable DP2.0 SDP split config before transcoder */
intel_audio_sdp_split_update(crtc_state);
+ /* 128b/132b SST */
+ if (!is_hdmi && intel_dp_is_uhbr(crtc_state)) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_ddi_clear_act_sent(encoder, crtc_state);
+
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder), 0,
+ TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
+
+ intel_ddi_wait_for_act_sent(encoder, crtc_state);
+ drm_dp_dpcd_poll_act_handled(&intel_dp->aux, 0);
+ }
+
intel_enable_transcoder(crtc_state);
intel_ddi_wait_for_fec_status(encoder, crtc_state, true);
- for_each_intel_crtc_in_pipe_mask_reverse(&i915->drm, pipe_crtc,
- intel_crtc_joined_pipe_mask(crtc_state)) {
+ for_each_pipe_crtc_modeset_enable(display, pipe_crtc, crtc_state, i) {
const struct intel_crtc_state *pipe_crtc_state =
intel_atomic_get_new_crtc_state(state, pipe_crtc);
intel_crtc_vblank_on(pipe_crtc_state);
}
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- intel_enable_ddi_hdmi(state, encoder, crtc_state, conn_state);
+ if (is_hdmi)
+ intel_ddi_enable_hdmi(state, encoder, crtc_state, conn_state);
else
- intel_enable_ddi_dp(state, encoder, crtc_state, conn_state);
+ intel_ddi_enable_dp(state, encoder, crtc_state, conn_state);
intel_hdcp_enable(state, encoder, crtc_state, conn_state);
}
-static void intel_disable_ddi_dp(struct intel_atomic_state *state,
+static void intel_ddi_disable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
@@ -3432,7 +3582,7 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state,
false);
}
-static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
+static void intel_ddi_disable_hdmi(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
@@ -3447,7 +3597,7 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
connector->base.id, connector->name);
}
-static void intel_disable_ddi(struct intel_atomic_state *state,
+static void intel_ddi_disable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
@@ -3457,10 +3607,10 @@ static void intel_disable_ddi(struct intel_atomic_state *state,
intel_hdcp_disable(to_intel_connector(old_conn_state->connector));
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
- intel_disable_ddi_hdmi(state, encoder, old_crtc_state,
+ intel_ddi_disable_hdmi(state, encoder, old_crtc_state,
old_conn_state);
else
- intel_disable_ddi_dp(state, encoder, old_crtc_state,
+ intel_ddi_disable_dp(state, encoder, old_crtc_state,
old_conn_state);
}
@@ -3477,6 +3627,13 @@ static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state,
drm_connector_update_privacy_screen(conn_state);
}
+static void intel_ddi_update_pipe_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ intel_hdmi_fastset_infoframes(encoder, crtc_state, conn_state);
+}
+
void intel_ddi_update_pipe(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -3488,6 +3645,10 @@ void intel_ddi_update_pipe(struct intel_atomic_state *state,
intel_ddi_update_pipe_dp(state, encoder, crtc_state,
conn_state);
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ intel_ddi_update_pipe_hdmi(encoder, crtc_state,
+ conn_state);
+
intel_hdcp_update_pipe(state, encoder, crtc_state, conn_state);
}
@@ -3509,6 +3670,11 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
intel_update_active_dpll(state, pipe_crtc, encoder);
}
+/*
+ * Note: Also called from the ->pre_pll_enable of the first active MST stream
+ * encoder on its primary encoder. See also the comment for
+ * intel_ddi_pre_enable().
+ */
static void
intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
@@ -3552,9 +3718,9 @@ static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder)
static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
u32 dp_tp_ctl;
@@ -3562,21 +3728,22 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
* TODO: To train with only a different voltage swing entry is not
* necessary disable and enable port
*/
- dp_tp_ctl = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ dp_tp_ctl = intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state));
if (dp_tp_ctl & DP_TP_CTL_ENABLE)
mtl_disable_ddi_buf(encoder, crtc_state);
/* 6.d Configure and enable DP_TP_CTL with link training pattern 1 selected */
dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) ||
+ intel_dp_is_uhbr(crtc_state)) {
dp_tp_ctl |= DP_TP_CTL_MODE_MST;
} else {
dp_tp_ctl |= DP_TP_CTL_MODE_SST;
if (crtc_state->enhanced_framing)
dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
- intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ intel_de_write(display, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
+ intel_de_posting_read(display, dp_tp_ctl_reg(encoder, crtc_state));
/* 6.f Enable D2D Link */
mtl_ddi_enable_d2d(encoder);
@@ -3589,11 +3756,11 @@ static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
/* 6.i Configure and enable DDI_CTL_DE to start sending valid data to port slice */
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
intel_dp->DP |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
- intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
- intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
+ intel_de_write(display, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(display, DDI_BUF_CTL(port));
/* 6.j Poll for PORT_BUF_CTL Idle Status == 0, timeout after 100 us */
intel_wait_ddi_buf_active(encoder);
@@ -3628,7 +3795,8 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
}
dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) ||
+ intel_dp_is_uhbr(crtc_state)) {
dp_tp_ctl |= DP_TP_CTL_MODE_MST;
} else {
dp_tp_ctl |= DP_TP_CTL_MODE_SST;
@@ -3821,29 +3989,141 @@ static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
crtc_state->sync_mode_slaves_mask);
}
+static void intel_ddi_read_func_ctl_dvi(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ u32 ddi_func_ctl)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_HDMI);
+ if (DISPLAY_VER(display) >= 14)
+ crtc_state->lane_count =
+ ((ddi_func_ctl & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+ else
+ crtc_state->lane_count = 4;
+}
+
+static void intel_ddi_read_func_ctl_hdmi(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ u32 ddi_func_ctl)
+{
+ crtc_state->has_hdmi_sink = true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, crtc_state);
+
+ if (crtc_state->infoframes.enable)
+ crtc_state->has_infoframe = true;
+
+ if (ddi_func_ctl & TRANS_DDI_HDMI_SCRAMBLING)
+ crtc_state->hdmi_scrambling = true;
+ if (ddi_func_ctl & TRANS_DDI_HIGH_TMDS_CHAR_RATE)
+ crtc_state->hdmi_high_tmds_clock_ratio = true;
+
+ intel_ddi_read_func_ctl_dvi(encoder, crtc_state, ddi_func_ctl);
+}
+
+static void intel_ddi_read_func_ctl_fdi(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ u32 ddi_func_ctl)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_ANALOG);
+ crtc_state->enhanced_framing =
+ intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state)) &
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+}
+
+static void intel_ddi_read_func_ctl_dp_sst(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ u32 ddi_func_ctl)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_EDP);
+ else
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_DP);
+ crtc_state->lane_count =
+ ((ddi_func_ctl & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+
+ if (DISPLAY_VER(display) >= 12 &&
+ (ddi_func_ctl & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B)
+ crtc_state->mst_master_transcoder =
+ REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, ddi_func_ctl);
+
+ intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder, &crtc_state->dp_m_n);
+ intel_cpu_transcoder_get_m2_n2(crtc, cpu_transcoder, &crtc_state->dp_m2_n2);
+
+ crtc_state->enhanced_framing =
+ intel_de_read(display, dp_tp_ctl_reg(encoder, crtc_state)) &
+ DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+
+ if (DISPLAY_VER(display) >= 11)
+ crtc_state->fec_enable =
+ intel_de_read(display,
+ dp_tp_ctl_reg(encoder, crtc_state)) & DP_TP_CTL_FEC_ENABLE;
+
+ if (dig_port->lspcon.active && intel_dp_has_hdmi_sink(&dig_port->dp))
+ crtc_state->infoframes.enable |=
+ intel_lspcon_infoframes_enabled(encoder, crtc_state);
+ else
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, crtc_state);
+}
+
+static void intel_ddi_read_func_ctl_dp_mst(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ u32 ddi_func_ctl)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ crtc_state->output_types |= BIT(INTEL_OUTPUT_DP_MST);
+ crtc_state->lane_count =
+ ((ddi_func_ctl & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+
+ if (DISPLAY_VER(display) >= 12)
+ crtc_state->mst_master_transcoder =
+ REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, ddi_func_ctl);
+
+ intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder, &crtc_state->dp_m_n);
+
+ if (DISPLAY_VER(display) >= 11)
+ crtc_state->fec_enable =
+ intel_de_read(display,
+ dp_tp_ctl_reg(encoder, crtc_state)) & DP_TP_CTL_FEC_ENABLE;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, crtc_state);
+}
+
static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- u32 temp, flags = 0;
+ u32 ddi_func_ctl, ddi_mode, flags = 0;
- temp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
- if (temp & TRANS_DDI_PHSYNC)
+ ddi_func_ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder));
+ if (ddi_func_ctl & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
- if (temp & TRANS_DDI_PVSYNC)
+ if (ddi_func_ctl & TRANS_DDI_PVSYNC)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->hw.adjusted_mode.flags |= flags;
- switch (temp & TRANS_DDI_BPC_MASK) {
+ switch (ddi_func_ctl & TRANS_DDI_BPC_MASK) {
case TRANS_DDI_BPC_6:
pipe_config->pipe_bpp = 18;
break;
@@ -3860,93 +4140,37 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
break;
}
- switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
- case TRANS_DDI_MODE_SELECT_HDMI:
- pipe_config->has_hdmi_sink = true;
-
- pipe_config->infoframes.enable |=
- intel_hdmi_infoframes_enabled(encoder, pipe_config);
-
- if (pipe_config->infoframes.enable)
- pipe_config->has_infoframe = true;
-
- if (temp & TRANS_DDI_HDMI_SCRAMBLING)
- pipe_config->hdmi_scrambling = true;
- if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE)
- pipe_config->hdmi_high_tmds_clock_ratio = true;
- fallthrough;
- case TRANS_DDI_MODE_SELECT_DVI:
- pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
- if (DISPLAY_VER(dev_priv) >= 14)
- pipe_config->lane_count =
- ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
- else
- pipe_config->lane_count = 4;
- break;
- case TRANS_DDI_MODE_SELECT_DP_SST:
- if (encoder->type == INTEL_OUTPUT_EDP)
- pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
- else
- pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
- pipe_config->lane_count =
- ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
-
- intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder,
- &pipe_config->dp_m_n);
- intel_cpu_transcoder_get_m2_n2(crtc, cpu_transcoder,
- &pipe_config->dp_m2_n2);
-
- pipe_config->enhanced_framing =
- intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, pipe_config)) &
- DP_TP_CTL_ENHANCED_FRAME_ENABLE;
-
- if (DISPLAY_VER(dev_priv) >= 11)
- pipe_config->fec_enable =
- intel_de_read(dev_priv,
- dp_tp_ctl_reg(encoder, pipe_config)) & DP_TP_CTL_FEC_ENABLE;
+ ddi_mode = ddi_func_ctl & TRANS_DDI_MODE_SELECT_MASK;
+
+ if (ddi_mode == TRANS_DDI_MODE_SELECT_HDMI) {
+ intel_ddi_read_func_ctl_hdmi(encoder, pipe_config, ddi_func_ctl);
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_DVI) {
+ intel_ddi_read_func_ctl_dvi(encoder, pipe_config, ddi_func_ctl);
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && !HAS_DP20(display)) {
+ intel_ddi_read_func_ctl_fdi(encoder, pipe_config, ddi_func_ctl);
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_SST) {
+ intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl);
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST) {
+ intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
+ } else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (dig_port->lspcon.active && intel_dp_has_hdmi_sink(&dig_port->dp))
- pipe_config->infoframes.enable |=
- intel_lspcon_infoframes_enabled(encoder, pipe_config);
+ /*
+ * If this is true, we know we're being called from mst stream
+ * encoder's ->get_config().
+ */
+ if (intel_dp->is_mst)
+ intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
else
- pipe_config->infoframes.enable |=
- intel_hdmi_infoframes_enabled(encoder, pipe_config);
- break;
- case TRANS_DDI_MODE_SELECT_FDI_OR_128B132B:
- if (!HAS_DP20(dev_priv)) {
- /* FDI */
- pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
- pipe_config->enhanced_framing =
- intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, pipe_config)) &
- DP_TP_CTL_ENHANCED_FRAME_ENABLE;
- break;
- }
- fallthrough; /* 128b/132b */
- case TRANS_DDI_MODE_SELECT_DP_MST:
- pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
- pipe_config->lane_count =
- ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
-
- if (DISPLAY_VER(dev_priv) >= 12)
- pipe_config->mst_master_transcoder =
- REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp);
-
- intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder,
- &pipe_config->dp_m_n);
-
- if (DISPLAY_VER(dev_priv) >= 11)
- pipe_config->fec_enable =
- intel_de_read(dev_priv,
- dp_tp_ctl_reg(encoder, pipe_config)) & DP_TP_CTL_FEC_ENABLE;
-
- pipe_config->infoframes.enable |=
- intel_hdmi_infoframes_enabled(encoder, pipe_config);
- break;
- default:
- break;
+ intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl);
}
}
+/*
+ * Note: Also called from the ->get_config of the MST stream encoders on their
+ * primary encoder, via the platform specific hooks here. See also the comment
+ * for intel_ddi_pre_enable().
+ */
static void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -4391,6 +4615,7 @@ static void intel_ddi_encoder_reset(struct drm_encoder *encoder)
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
intel_dp->reset_link_params = true;
+ intel_dp_invalidate_source_oui(intel_dp);
intel_pps_encoder_reset(intel_dp);
@@ -4413,8 +4638,7 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
.late_register = intel_ddi_encoder_late_register,
};
-static struct intel_connector *
-intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
+static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_connector *connector;
@@ -4422,7 +4646,7 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
connector = intel_connector_alloc();
if (!connector)
- return NULL;
+ return -ENOMEM;
dig_port->dp.output_reg = DDI_BUF_CTL(port);
if (DISPLAY_VER(i915) >= 14)
@@ -4437,7 +4661,7 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
if (!intel_dp_init_connector(dig_port, connector)) {
kfree(connector);
- return NULL;
+ return -EINVAL;
}
if (dig_port->base.type == INTEL_OUTPUT_EDP) {
@@ -4453,7 +4677,7 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
}
}
- return connector;
+ return 0;
}
static int intel_hdmi_reset_link(struct intel_encoder *encoder,
@@ -4550,12 +4774,8 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
enum intel_hotplug_state state;
int ret;
- if (intel_dp->compliance.test_active &&
- intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) {
- intel_dp_phy_test(encoder);
- /* just do the PHY test and nothing else */
+ if (intel_dp_test_phy(intel_dp))
return INTEL_HOTPLUG_UNCHANGED;
- }
state = intel_encoder_hotplug(encoder, connector);
@@ -4623,20 +4843,28 @@ static bool bdw_digital_port_connected(struct intel_encoder *encoder)
return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
}
-static struct intel_connector *
-intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
+static int intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
{
struct intel_connector *connector;
enum port port = dig_port->base.port;
connector = intel_connector_alloc();
if (!connector)
- return NULL;
+ return -ENOMEM;
dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
- intel_hdmi_init_connector(dig_port, connector);
- return connector;
+ if (!intel_hdmi_init_connector(dig_port, connector)) {
+ /*
+ * HDMI connector init failures may just mean conflicting DDC
+ * pins or not having enough lanes. Handle them gracefully, but
+ * don't fail the entire DDI init.
+ */
+ dig_port->hdmi.hdmi_reg = INVALID_MMIO_REG;
+ kfree(connector);
+ }
+
+ return 0;
}
static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
@@ -4646,7 +4874,7 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
if (dig_port->base.port != PORT_A)
return false;
- if (dig_port->saved_port_bits & DDI_A_4_LANES)
+ if (dig_port->ddi_a_4_lanes)
return false;
/* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
@@ -4684,7 +4912,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
if (intel_ddi_a_force_4_lanes(dig_port)) {
drm_dbg_kms(&dev_priv->drm,
"Forcing DDI_A_4_LANES for port A\n");
- dig_port->saved_port_bits |= DDI_A_4_LANES;
+ dig_port->ddi_a_4_lanes = true;
max_lanes = 4;
}
@@ -4791,8 +5019,10 @@ static void intel_ddi_tc_encoder_suspend_complete(struct intel_encoder *encoder)
static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder)
{
- intel_dp_encoder_shutdown(encoder);
- intel_hdmi_encoder_shutdown(encoder);
+ if (intel_encoder_is_dp(encoder))
+ intel_dp_encoder_shutdown(encoder);
+ if (intel_encoder_is_hdmi(encoder))
+ intel_hdmi_encoder_shutdown(encoder);
}
static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder)
@@ -4863,6 +5093,7 @@ void intel_ddi_init(struct intel_display *display,
bool init_hdmi, init_dp;
enum port port;
enum phy phy;
+ u32 ddi_buf_ctl;
port = intel_bios_encoder_port(devdata);
if (port == PORT_NONE)
@@ -4888,7 +5119,7 @@ void intel_ddi_init(struct intel_display *display,
if (!assert_has_icl_dsi(dev_priv))
return;
- icl_dsi_init(dev_priv, devdata);
+ icl_dsi_init(display, devdata);
return;
}
@@ -4986,10 +5217,10 @@ void intel_ddi_init(struct intel_display *display,
encoder->compute_output_type = intel_ddi_compute_output_type;
encoder->compute_config = intel_ddi_compute_config;
encoder->compute_config_late = intel_ddi_compute_config_late;
- encoder->enable = intel_enable_ddi;
+ encoder->enable = intel_ddi_enable;
encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
encoder->pre_enable = intel_ddi_pre_enable;
- encoder->disable = intel_disable_ddi;
+ encoder->disable = intel_ddi_disable;
encoder->post_pll_disable = intel_ddi_post_pll_disable;
encoder->post_disable = intel_ddi_post_disable;
encoder->update_pipe = intel_ddi_update_pipe;
@@ -5112,17 +5343,12 @@ void intel_ddi_init(struct intel_display *display,
else
encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
- if (DISPLAY_VER(dev_priv) >= 11)
- dig_port->saved_port_bits =
- intel_de_read(dev_priv, DDI_BUF_CTL(port))
- & DDI_BUF_PORT_REVERSAL;
- else
- dig_port->saved_port_bits =
- intel_de_read(dev_priv, DDI_BUF_CTL(port))
- & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
+ ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+
+ dig_port->lane_reversal = intel_bios_encoder_lane_reversal(devdata) ||
+ ddi_buf_ctl & DDI_BUF_PORT_REVERSAL;
- if (intel_bios_encoder_lane_reversal(devdata))
- dig_port->saved_port_bits |= DDI_BUF_PORT_REVERSAL;
+ dig_port->ddi_a_4_lanes = DISPLAY_VER(dev_priv) < 11 && ddi_buf_ctl & DDI_A_4_LANES;
dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
@@ -5185,7 +5411,7 @@ void intel_ddi_init(struct intel_display *display,
intel_infoframe_init(dig_port);
if (init_dp) {
- if (!intel_ddi_init_dp_connector(dig_port))
+ if (intel_ddi_init_dp_connector(dig_port))
goto err;
dig_port->hpd_pulse = intel_dp_hpd_pulse;
@@ -5199,7 +5425,7 @@ void intel_ddi_init(struct intel_display *display,
* but leave it just in case we have some really bad VBTs...
*/
if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
- if (!intel_ddi_init_hdmi_connector(dig_port))
+ if (intel_ddi_init_hdmi_connector(dig_port))
goto err;
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 6d85422bdefe..2faadd1441e2 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -26,10 +26,12 @@ enum transcoder;
i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
-i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
-i915_reg_t hsw_chicken_trans_reg(struct drm_i915_private *i915,
- enum transcoder cpu_transcoder);
+
+void intel_ddi_clear_act_sent(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_ddi_wait_for_act_sent(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+
void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
struct intel_encoder *intel_encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -57,15 +59,17 @@ void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
+void intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state);
-void intel_ddi_wait_for_fec_status(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- bool enabled);
+int intel_ddi_wait_for_fec_status(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool enabled);
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index 4d21ce734343..9389b295036e 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -1687,18 +1687,24 @@ dg2_get_snps_buf_trans(struct intel_encoder *encoder,
}
static const struct intel_ddi_buf_trans *
-mtl_get_cx0_buf_trans(struct intel_encoder *encoder,
+mtl_get_c10_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- if (intel_crtc_has_dp_encoder(crtc_state) && crtc_state->port_clock >= 1000000)
+ return intel_get_buf_trans(&mtl_c10_trans_dp14, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+mtl_get_c20_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_dp_encoder(crtc_state) && intel_dp_is_uhbr(crtc_state))
return intel_get_buf_trans(&mtl_c20_trans_uhbr, n_entries);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_encoder_is_c10phy(encoder)))
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
return intel_get_buf_trans(&mtl_c20_trans_hdmi, n_entries);
- else if (!intel_encoder_is_c10phy(encoder))
- return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries);
else
- return intel_get_buf_trans(&mtl_c10_trans_dp14, n_entries);
+ return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries);
}
void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
@@ -1706,7 +1712,10 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (DISPLAY_VER(i915) >= 14) {
- encoder->get_buf_trans = mtl_get_cx0_buf_trans;
+ if (intel_encoder_is_c10phy(encoder))
+ encoder->get_buf_trans = mtl_get_c10_buf_trans;
+ else
+ encoder->get_buf_trans = mtl_get_c20_buf_trans;
} else if (IS_DG2(i915)) {
encoder->get_buf_trans = dg2_get_snps_buf_trans;
} else if (IS_ALDERLAKE_P(i915)) {
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index e881bfeafb47..b7399e9d11cc 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -6,13 +6,16 @@
#ifndef __INTEL_DE_H__
#define __INTEL_DE_H__
-#include "i915_drv.h"
-#include "i915_trace.h"
+#include "intel_display_conversion.h"
+#include "intel_display_core.h"
+#include "intel_dmc_wl.h"
+#include "intel_dsb.h"
#include "intel_uncore.h"
+#include "intel_uncore_trace.h"
static inline struct intel_uncore *__to_uncore(struct intel_display *display)
{
- return &to_i915(display->drm)->uncore;
+ return to_intel_uncore(display->drm);
}
static inline u32
@@ -31,7 +34,7 @@ __intel_de_read(struct intel_display *display, i915_reg_t reg)
#define intel_de_read(p,...) __intel_de_read(__to_intel_display(p), __VA_ARGS__)
static inline u8
-__intel_de_read8(struct intel_display *display, i915_reg_t reg)
+intel_de_read8(struct intel_display *display, i915_reg_t reg)
{
u8 val;
@@ -43,11 +46,10 @@ __intel_de_read8(struct intel_display *display, i915_reg_t reg)
return val;
}
-#define intel_de_read8(p,...) __intel_de_read8(__to_intel_display(p), __VA_ARGS__)
static inline u64
-__intel_de_read64_2x32(struct intel_display *display,
- i915_reg_t lower_reg, i915_reg_t upper_reg)
+intel_de_read64_2x32(struct intel_display *display,
+ i915_reg_t lower_reg, i915_reg_t upper_reg)
{
u64 val;
@@ -62,7 +64,6 @@ __intel_de_read64_2x32(struct intel_display *display,
return val;
}
-#define intel_de_read64_2x32(p,...) __intel_de_read64_2x32(__to_intel_display(p), __VA_ARGS__)
static inline void
__intel_de_posting_read(struct intel_display *display, i915_reg_t reg)
@@ -87,12 +88,11 @@ __intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
#define intel_de_write(p,...) __intel_de_write(__to_intel_display(p), __VA_ARGS__)
static inline u32
-____intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg,
- u32 clear, u32 set)
+__intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg,
+ u32 clear, u32 set)
{
return intel_uncore_rmw(__to_uncore(display), reg, clear, set);
}
-#define __intel_de_rmw_nowl(p,...) ____intel_de_rmw_nowl(__to_intel_display(p), __VA_ARGS__)
static inline u32
__intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear,
@@ -111,18 +111,27 @@ __intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear,
#define intel_de_rmw(p,...) __intel_de_rmw(__to_intel_display(p), __VA_ARGS__)
static inline int
-____intel_de_wait_for_register_nowl(struct intel_display *display,
- i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+__intel_de_wait_for_register_nowl(struct intel_display *display,
+ i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout)
{
return intel_wait_for_register(__to_uncore(display), reg, mask,
value, timeout);
}
-#define __intel_de_wait_for_register_nowl(p,...) ____intel_de_wait_for_register_nowl(__to_intel_display(p), __VA_ARGS__)
static inline int
-__intel_de_wait(struct intel_display *display, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+__intel_de_wait_for_register_atomic_nowl(struct intel_display *display,
+ i915_reg_t reg,
+ u32 mask, u32 value,
+ unsigned int fast_timeout_us)
+{
+ return __intel_wait_for_register(__to_uncore(display), reg, mask,
+ value, fast_timeout_us, 0, NULL);
+}
+
+static inline int
+intel_de_wait(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout)
{
int ret;
@@ -135,11 +144,10 @@ __intel_de_wait(struct intel_display *display, i915_reg_t reg,
return ret;
}
-#define intel_de_wait(p,...) __intel_de_wait(__to_intel_display(p), __VA_ARGS__)
static inline int
-__intel_de_wait_fw(struct intel_display *display, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout)
+intel_de_wait_fw(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout)
{
int ret;
@@ -152,13 +160,12 @@ __intel_de_wait_fw(struct intel_display *display, i915_reg_t reg,
return ret;
}
-#define intel_de_wait_fw(p,...) __intel_de_wait_fw(__to_intel_display(p), __VA_ARGS__)
static inline int
-__intel_de_wait_custom(struct intel_display *display, i915_reg_t reg,
- u32 mask, u32 value,
- unsigned int fast_timeout_us,
- unsigned int slow_timeout_ms, u32 *out_value)
+intel_de_wait_custom(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value,
+ unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms, u32 *out_value)
{
int ret;
@@ -172,7 +179,6 @@ __intel_de_wait_custom(struct intel_display *display, i915_reg_t reg,
return ret;
}
-#define intel_de_wait_custom(p,...) __intel_de_wait_custom(__to_intel_display(p), __VA_ARGS__)
static inline int
__intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
@@ -219,18 +225,25 @@ __intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val)
#define intel_de_write_fw(p,...) __intel_de_write_fw(__to_intel_display(p), __VA_ARGS__)
static inline u32
-__intel_de_read_notrace(struct intel_display *display, i915_reg_t reg)
+intel_de_read_notrace(struct intel_display *display, i915_reg_t reg)
{
return intel_uncore_read_notrace(__to_uncore(display), reg);
}
-#define intel_de_read_notrace(p,...) __intel_de_read_notrace(__to_intel_display(p), __VA_ARGS__)
static inline void
-__intel_de_write_notrace(struct intel_display *display, i915_reg_t reg,
- u32 val)
+intel_de_write_notrace(struct intel_display *display, i915_reg_t reg, u32 val)
{
intel_uncore_write_notrace(__to_uncore(display), reg, val);
}
-#define intel_de_write_notrace(p,...) __intel_de_write_notrace(__to_intel_display(p), __VA_ARGS__)
+
+static __always_inline void
+intel_de_write_dsb(struct intel_display *display, struct intel_dsb *dsb,
+ i915_reg_t reg, u32 val)
+{
+ if (dsb)
+ intel_dsb_reg_write(dsb, reg, val);
+ else
+ intel_de_write_fw(display, reg, val);
+}
#endif /* __INTEL_DE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index b4ef4d59da1a..4271da219b41 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -43,9 +43,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
-
-#include "gem/i915_gem_lmem.h"
-#include "gem/i915_gem_object.h"
+#include <drm/drm_vblank.h>
#include "g4x_dp.h"
#include "g4x_hdmi.h"
@@ -60,6 +58,7 @@
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_audio.h"
+#include "intel_bo.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_clock_gating.h"
@@ -135,7 +134,8 @@
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
-static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state);
+static void bdw_set_pipe_misc(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state);
/* returns HPLL frequency in kHz */
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
@@ -253,6 +253,108 @@ static enum pipe joiner_primary_pipe(const struct intel_crtc_state *crtc_state)
return ffs(crtc_state->joiner_pipes) - 1;
}
+/*
+ * The following helper functions, despite being named for bigjoiner,
+ * are applicable to both bigjoiner and uncompressed joiner configurations.
+ */
+static bool is_bigjoiner(const struct intel_crtc_state *crtc_state)
+{
+ return hweight8(crtc_state->joiner_pipes) >= 2;
+}
+
+static u8 bigjoiner_primary_pipes(const struct intel_crtc_state *crtc_state)
+{
+ if (!is_bigjoiner(crtc_state))
+ return 0;
+
+ return crtc_state->joiner_pipes & (0b01010101 << joiner_primary_pipe(crtc_state));
+}
+
+static unsigned int bigjoiner_secondary_pipes(const struct intel_crtc_state *crtc_state)
+{
+ if (!is_bigjoiner(crtc_state))
+ return 0;
+
+ return crtc_state->joiner_pipes & (0b10101010 << joiner_primary_pipe(crtc_state));
+}
+
+bool intel_crtc_is_bigjoiner_primary(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (!is_bigjoiner(crtc_state))
+ return false;
+
+ return BIT(crtc->pipe) & bigjoiner_primary_pipes(crtc_state);
+}
+
+bool intel_crtc_is_bigjoiner_secondary(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (!is_bigjoiner(crtc_state))
+ return false;
+
+ return BIT(crtc->pipe) & bigjoiner_secondary_pipes(crtc_state);
+}
+
+u8 _intel_modeset_primary_pipes(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (!is_bigjoiner(crtc_state))
+ return BIT(crtc->pipe);
+
+ return bigjoiner_primary_pipes(crtc_state);
+}
+
+u8 _intel_modeset_secondary_pipes(const struct intel_crtc_state *crtc_state)
+{
+ return bigjoiner_secondary_pipes(crtc_state);
+}
+
+bool intel_crtc_is_ultrajoiner(const struct intel_crtc_state *crtc_state)
+{
+ return intel_crtc_num_joined_pipes(crtc_state) >= 4;
+}
+
+static u8 ultrajoiner_primary_pipes(const struct intel_crtc_state *crtc_state)
+{
+ if (!intel_crtc_is_ultrajoiner(crtc_state))
+ return 0;
+
+ return crtc_state->joiner_pipes & (0b00010001 << joiner_primary_pipe(crtc_state));
+}
+
+bool intel_crtc_is_ultrajoiner_primary(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ return intel_crtc_is_ultrajoiner(crtc_state) &&
+ BIT(crtc->pipe) & ultrajoiner_primary_pipes(crtc_state);
+}
+
+/*
+ * The ultrajoiner enable bit doesn't seem to follow primary/secondary logic or
+ * any other logic, so lets just add helper function to
+ * at least hide this hassle..
+ */
+static u8 ultrajoiner_enable_pipes(const struct intel_crtc_state *crtc_state)
+{
+ if (!intel_crtc_is_ultrajoiner(crtc_state))
+ return 0;
+
+ return crtc_state->joiner_pipes & (0b01110111 << joiner_primary_pipe(crtc_state));
+}
+
+bool intel_crtc_ultrajoiner_enable_needed(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ return intel_crtc_is_ultrajoiner(crtc_state) &&
+ BIT(crtc->pipe) & ultrajoiner_enable_pipes(crtc_state);
+}
+
u8 intel_crtc_joiner_secondary_pipes(const struct intel_crtc_state *crtc_state)
{
if (crtc_state->joiner_pipes)
@@ -277,9 +379,9 @@ bool intel_crtc_is_joiner_primary(const struct intel_crtc_state *crtc_state)
crtc->pipe == joiner_primary_pipe(crtc_state);
}
-static int intel_joiner_num_pipes(const struct intel_crtc_state *crtc_state)
+int intel_crtc_num_joined_pipes(const struct intel_crtc_state *crtc_state)
{
- return hweight8(crtc_state->joiner_pipes);
+ return hweight8(intel_crtc_joined_pipe_mask(crtc_state));
}
u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state)
@@ -291,10 +393,10 @@ u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state)
struct intel_crtc *intel_primary_crtc(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (intel_crtc_is_joiner_secondary(crtc_state))
- return intel_crtc_for_pipe(i915, joiner_primary_pipe(crtc_state));
+ return intel_crtc_for_pipe(display, joiner_primary_pipe(crtc_state));
else
return to_intel_crtc(crtc_state->uapi.crtc);
}
@@ -320,6 +422,7 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
void assert_transcoder(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, bool state)
{
+ struct intel_display *display = &dev_priv->display;
bool cur_state;
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
@@ -340,24 +443,24 @@ void assert_transcoder(struct drm_i915_private *dev_priv,
cur_state = false;
}
- I915_STATE_WARN(dev_priv, cur_state != state,
- "transcoder %s assertion failure (expected %s, current %s)\n",
- transcoder_name(cpu_transcoder), str_on_off(state),
- str_on_off(cur_state));
+ INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
+ "transcoder %s assertion failure (expected %s, current %s)\n",
+ transcoder_name(cpu_transcoder), str_on_off(state),
+ str_on_off(cur_state));
}
static void assert_plane(struct intel_plane *plane, bool state)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum pipe pipe;
bool cur_state;
cur_state = plane->get_hw_state(plane, &pipe);
- I915_STATE_WARN(i915, cur_state != state,
- "%s assertion failure (expected %s, current %s)\n",
- plane->base.name, str_on_off(state),
- str_on_off(cur_state));
+ INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
+ "%s assertion failure (expected %s, current %s)\n",
+ plane->base.name, str_on_off(state),
+ str_on_off(cur_state));
}
#define assert_plane_enabled(p) assert_plane(p, true)
@@ -372,7 +475,7 @@ static void assert_planes_disabled(struct intel_crtc *crtc)
assert_plane_disabled(plane);
}
-void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+void vlv_wait_port_ready(struct intel_display *display,
struct intel_digital_port *dig_port,
unsigned int expected_mask)
{
@@ -385,11 +488,11 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
fallthrough;
case PORT_B:
port_mask = DPLL_PORTB_READY_MASK;
- dpll_reg = DPLL(dev_priv, 0);
+ dpll_reg = DPLL(display, 0);
break;
case PORT_C:
port_mask = DPLL_PORTC_READY_MASK;
- dpll_reg = DPLL(dev_priv, 0);
+ dpll_reg = DPLL(display, 0);
expected_mask <<= 4;
break;
case PORT_D:
@@ -398,16 +501,17 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
break;
}
- if (intel_de_wait(dev_priv, dpll_reg, port_mask, expected_mask, 1000))
- drm_WARN(&dev_priv->drm, 1,
+ if (intel_de_wait(display, dpll_reg, port_mask, expected_mask, 1000))
+ drm_WARN(display->drm, 1,
"timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- intel_de_read(dev_priv, dpll_reg) & port_mask,
+ intel_de_read(display, dpll_reg) & port_mask,
expected_mask);
}
void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
{
+ struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
@@ -451,8 +555,7 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
if (DISPLAY_VER(dev_priv) == 14)
set |= DP_FEC_BS_JITTER_WA;
- intel_de_rmw(dev_priv,
- hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
clear, set);
}
@@ -488,6 +591,7 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_display *display = to_intel_display(old_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
@@ -525,7 +629,7 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
intel_de_write(dev_priv, TRANSCONF(dev_priv, cpu_transcoder), val);
if (DISPLAY_VER(dev_priv) >= 12)
- intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
if ((val & TRANSCONF_ENABLE) == 0)
@@ -715,7 +819,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc,
if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
- intel_plane_disable_arm(plane, crtc_state);
+ intel_plane_disable_arm(NULL, plane, crtc_state);
intel_crtc_wait_for_next_vblank(crtc);
}
@@ -759,7 +863,7 @@ static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
*/
if (IS_DG2(dev_priv))
tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
- else if (DISPLAY_VER(dev_priv) >= 13)
+ else if ((DISPLAY_VER(dev_priv) >= 13) && (DISPLAY_VER(dev_priv) < 30))
tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
/* Wa_14010547955:dg2 */
@@ -1116,6 +1220,22 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
intel_encoders_audio_enable(state, crtc);
}
+static void intel_post_plane_update_after_readout(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */
+ hsw_ips_post_update(state, crtc);
+
+ /*
+ * Activate DRRS after state readout to avoid
+ * dp_m_n vs. dp_m2_n2 confusion on BDW+.
+ */
+ intel_drrs_activate(new_crtc_state);
+}
+
static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -1172,8 +1292,8 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
* Apart from the async flip bit we want to
* preserve the old state for the plane.
*/
- intel_plane_async_flip(plane, old_crtc_state,
- old_plane_state, false);
+ intel_plane_async_flip(NULL, plane,
+ old_crtc_state, old_plane_state, false);
need_vbl_wait = true;
}
}
@@ -1249,8 +1369,8 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
*
* WaCxSRDisabledForSpriteScaling:ivb
*/
- if (old_crtc_state->hw.active &&
- new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
+ if (!HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
+ new_crtc_state->disable_cxsr && ilk_disable_cxsr(dev_priv))
intel_crtc_wait_for_next_vblank(crtc);
/*
@@ -1315,7 +1435,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
!(update_mask & BIT(plane->id)))
continue;
- intel_plane_disable_arm(plane, new_crtc_state);
+ intel_plane_disable_arm(NULL, plane, new_crtc_state);
if (old_plane_state->uapi.visible)
fb_bits |= plane->frontbuffer_bit;
@@ -1502,14 +1622,6 @@ static void intel_encoders_update_pipe(struct intel_atomic_state *state,
}
}
-static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_plane *plane = to_intel_plane(crtc->base.primary);
-
- plane->disable_arm(plane, crtc_state);
-}
-
static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1575,11 +1687,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
- intel_color_load_luts(new_crtc_state);
- intel_color_commit_noarm(new_crtc_state);
- intel_color_commit_arm(new_crtc_state);
- /* update DSPCNTR to configure gamma for pipe bottom color */
- intel_disable_primary_plane(new_crtc_state);
+ intel_color_modeset(new_crtc_state);
intel_initial_watermarks(state, crtc);
intel_enable_transcoder(new_crtc_state);
@@ -1637,10 +1745,9 @@ static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
- intel_de_rmw(i915, hsw_chicken_trans_reg(i915, crtc_state->cpu_transcoder),
+ intel_de_rmw(display, CHICKEN_TRANS(display, crtc_state->cpu_transcoder),
HSW_FRAME_START_DELAY_MASK,
HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
}
@@ -1677,23 +1784,22 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta
static void hsw_crtc_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
struct intel_crtc *pipe_crtc;
+ int i;
if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
-
- for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
- intel_crtc_joined_pipe_mask(new_crtc_state))
- intel_dmc_enable_pipe(dev_priv, pipe_crtc->pipe);
+ for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i)
+ intel_dmc_enable_pipe(display, pipe_crtc->pipe);
intel_encoders_pre_pll_enable(state, crtc);
- for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
- intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) {
const struct intel_crtc_state *pipe_crtc_state =
intel_atomic_get_new_crtc_state(state, pipe_crtc);
@@ -1703,27 +1809,25 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_encoders_pre_enable(state, crtc);
- for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
- intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) {
const struct intel_crtc_state *pipe_crtc_state =
intel_atomic_get_new_crtc_state(state, pipe_crtc);
intel_dsc_enable(pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (HAS_UNCOMPRESSED_JOINER(dev_priv))
intel_uncompressed_joiner_enable(pipe_crtc_state);
intel_set_pipe_src_size(pipe_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- bdw_set_pipe_misc(pipe_crtc_state);
+ bdw_set_pipe_misc(NULL, pipe_crtc_state);
}
if (!transcoder_is_dsi(cpu_transcoder))
hsw_configure_cpu_transcoder(new_crtc_state);
- for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
- intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) {
const struct intel_crtc_state *pipe_crtc_state =
intel_atomic_get_new_crtc_state(state, pipe_crtc);
@@ -1741,12 +1845,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
- intel_color_load_luts(pipe_crtc_state);
- intel_color_commit_noarm(pipe_crtc_state);
- intel_color_commit_arm(pipe_crtc_state);
- /* update DSPCNTR to configure gamma/csc for pipe bottom color */
- if (DISPLAY_VER(dev_priv) < 9)
- intel_disable_primary_plane(pipe_crtc_state);
+ intel_color_modeset(pipe_crtc_state);
hsw_set_linetime_wm(pipe_crtc_state);
@@ -1758,8 +1857,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_encoders_enable(state, crtc);
- for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
- intel_crtc_joined_pipe_mask(new_crtc_state)) {
+ for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) {
const struct intel_crtc_state *pipe_crtc_state =
intel_atomic_get_new_crtc_state(state, pipe_crtc);
enum pipe hsw_workaround_pipe;
@@ -1776,7 +1874,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
hsw_workaround_pipe = pipe_crtc_state->hsw_workaround_pipe;
if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
struct intel_crtc *wa_crtc =
- intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
+ intel_crtc_for_pipe(display, hsw_workaround_pipe);
intel_crtc_wait_for_next_vblank(wa_crtc);
intel_crtc_wait_for_next_vblank(wa_crtc);
@@ -1841,10 +1939,11 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
static void hsw_crtc_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_crtc *pipe_crtc;
+ int i;
/*
* FIXME collapse everything to one hook.
@@ -1853,8 +1952,7 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
intel_encoders_disable(state, crtc);
intel_encoders_post_disable(state, crtc);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
- intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
const struct intel_crtc_state *old_pipe_crtc_state =
intel_atomic_get_old_crtc_state(state, pipe_crtc);
@@ -1863,9 +1961,8 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
intel_encoders_post_pll_disable(state, crtc);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, pipe_crtc,
- intel_crtc_joined_pipe_mask(old_crtc_state))
- intel_dmc_disable_pipe(i915, pipe_crtc->pipe);
+ for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i)
+ intel_dmc_disable_pipe(display, pipe_crtc->pipe);
}
static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
@@ -2147,11 +2244,7 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
i9xx_pfit_enable(new_crtc_state);
- intel_color_load_luts(new_crtc_state);
- intel_color_commit_noarm(new_crtc_state);
- intel_color_commit_arm(new_crtc_state);
- /* update DSPCNTR to configure gamma for pipe bottom color */
- intel_disable_primary_plane(new_crtc_state);
+ intel_color_modeset(new_crtc_state);
intel_initial_watermarks(state, crtc);
intel_enable_transcoder(new_crtc_state);
@@ -2187,11 +2280,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
i9xx_pfit_enable(new_crtc_state);
- intel_color_load_luts(new_crtc_state);
- intel_color_commit_noarm(new_crtc_state);
- intel_color_commit_arm(new_crtc_state);
- /* update DSPCNTR to configure gamma for pipe bottom color */
- intel_disable_primary_plane(new_crtc_state);
+ intel_color_modeset(new_crtc_state);
if (!intel_initial_watermarks(state, crtc))
intel_update_watermarks(dev_priv);
@@ -2224,9 +2313,10 @@ static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
static void i9xx_crtc_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/*
@@ -2265,7 +2355,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
/* clock the pipe down to 640x480@60 to potentially save power */
if (IS_I830(dev_priv))
- i830_enable_pipe(dev_priv, pipe);
+ i830_enable_pipe(display, pipe);
}
void intel_encoder_destroy(struct drm_encoder *encoder)
@@ -2281,7 +2371,7 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/* GDG double wide on either pipe, otherwise pipe A only */
- return DISPLAY_VER(dev_priv) < 4 &&
+ return HAS_DOUBLE_WIDE(dev_priv) &&
(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
}
@@ -2343,9 +2433,9 @@ static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
static void intel_joiner_adjust_timings(const struct intel_crtc_state *crtc_state,
struct drm_display_mode *mode)
{
- int num_pipes = intel_joiner_num_pipes(crtc_state);
+ int num_pipes = intel_crtc_num_joined_pipes(crtc_state);
- if (num_pipes < 2)
+ if (num_pipes == 1)
return;
mode->crtc_clock /= num_pipes;
@@ -2407,7 +2497,7 @@ static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state
drm_mode_copy(mode, pipe_mode);
intel_mode_from_crtc_timings(mode, mode);
mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) *
- (intel_joiner_num_pipes(crtc_state) ?: 1);
+ intel_crtc_num_joined_pipes(crtc_state);
mode->vdisplay = drm_rect_height(&crtc_state->pipe_src);
/* Derive per-pipe timings in case joiner is used */
@@ -2427,10 +2517,10 @@ void intel_encoder_get_config(struct intel_encoder *encoder,
static void intel_joiner_compute_pipe_src(struct intel_crtc_state *crtc_state)
{
- int num_pipes = intel_joiner_num_pipes(crtc_state);
+ int num_pipes = intel_crtc_num_joined_pipes(crtc_state);
int width, height;
- if (num_pipes < 2)
+ if (num_pipes == 1)
return;
width = drm_rect_width(&crtc_state->pipe_src);
@@ -2520,13 +2610,29 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
return 0;
}
+static bool intel_crtc_needs_wa_14015401596(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+
+ return intel_vrr_possible(crtc_state) && crtc_state->has_psr &&
+ adjusted_mode->crtc_vblank_start == adjusted_mode->crtc_vdisplay &&
+ IS_DISPLAY_VER(display, 13, 14);
+}
+
static int intel_crtc_compute_config(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
int ret;
+ /* Wa_14015401596 */
+ if (intel_crtc_needs_wa_14015401596(crtc_state))
+ adjusted_mode->crtc_vblank_start += 1;
+
ret = intel_dpll_crtc_compute_clock(state, crtc);
if (ret)
return ret;
@@ -2887,11 +2993,11 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc,
static void intel_joiner_adjust_pipe_src(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- int num_pipes = intel_joiner_num_pipes(crtc_state);
+ int num_pipes = intel_crtc_num_joined_pipes(crtc_state);
enum pipe primary_pipe, pipe = crtc->pipe;
int width;
- if (num_pipes < 2)
+ if (num_pipes == 1)
return;
primary_pipe = joiner_primary_pipe(crtc_state);
@@ -3031,9 +3137,14 @@ bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe));
if (tmp & PIPE_MISC_YUV420_ENABLE) {
- /* We support 4:2:0 in full blend mode only */
- drm_WARN_ON(&dev_priv->drm,
- (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0);
+ /*
+ * We support 4:2:0 in full blend mode only.
+ * For xe3_lpd+ this is implied in YUV420 Enable bit.
+ * Ensure the same for prior platforms in YUV420 Mode bit.
+ */
+ if (DISPLAY_VER(dev_priv) < 30)
+ drm_WARN_ON(&dev_priv->drm,
+ (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0);
return INTEL_OUTPUT_FORMAT_YCBCR420;
} else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) {
@@ -3101,7 +3212,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
intel_color_get_config(pipe_config);
- if (DISPLAY_VER(dev_priv) < 4)
+ if (HAS_DOUBLE_WIDE(dev_priv))
pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE;
intel_get_transcoder_timings(crtc, pipe_config);
@@ -3246,9 +3357,11 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, cpu_transcoder));
}
-static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state)
+static void bdw_set_pipe_misc(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_display *display = to_intel_display(crtc->base.dev);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val = 0;
@@ -3280,8 +3393,8 @@ static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state)
val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV;
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
- val |= PIPE_MISC_YUV420_ENABLE |
- PIPE_MISC_YUV420_MODE_FULL_BLEND;
+ val |= DISPLAY_VER(display) >= 30 ? PIPE_MISC_YUV420_ENABLE :
+ PIPE_MISC_YUV420_ENABLE | PIPE_MISC_YUV420_MODE_FULL_BLEND;
if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
val |= PIPE_MISC_HDR_MODE_PRECISION;
@@ -3293,7 +3406,7 @@ static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state)
if (IS_BROADWELL(dev_priv))
val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE;
- intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val);
+ intel_de_write_dsb(display, dsb, PIPE_MISC(crtc->pipe), val);
}
int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
@@ -3534,23 +3647,57 @@ static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
return tmp & TRANS_DDI_FUNC_ENABLE;
}
-static void enabled_joiner_pipes(struct drm_i915_private *dev_priv,
- u8 *primary_pipes, u8 *secondary_pipes)
+static void enabled_uncompressed_joiner_pipes(struct intel_display *display,
+ u8 *primary_pipes, u8 *secondary_pipes)
+{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_crtc *crtc;
+
+ *primary_pipes = 0;
+ *secondary_pipes = 0;
+
+ if (!HAS_UNCOMPRESSED_JOINER(display))
+ return;
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
+ joiner_pipes(i915)) {
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = crtc->pipe;
+ intel_wakeref_t wakeref;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ with_intel_display_power_if_enabled(i915, power_domain, wakeref) {
+ u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe));
+
+ if (tmp & UNCOMPRESSED_JOINER_PRIMARY)
+ *primary_pipes |= BIT(pipe);
+ if (tmp & UNCOMPRESSED_JOINER_SECONDARY)
+ *secondary_pipes |= BIT(pipe);
+ }
+ }
+}
+
+static void enabled_bigjoiner_pipes(struct intel_display *display,
+ u8 *primary_pipes, u8 *secondary_pipes)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_crtc *crtc;
*primary_pipes = 0;
*secondary_pipes = 0;
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc,
- joiner_pipes(dev_priv)) {
+ if (!HAS_BIGJOINER(display))
+ return;
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
+ joiner_pipes(i915)) {
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
- power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
- with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
- u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
+ power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe);
+ with_intel_display_power_if_enabled(i915, power_domain, wakeref) {
+ u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe));
if (!(tmp & BIG_JOINER_ENABLE))
continue;
@@ -3560,56 +3707,198 @@ static void enabled_joiner_pipes(struct drm_i915_private *dev_priv,
else
*secondary_pipes |= BIT(pipe);
}
+ }
+}
- if (DISPLAY_VER(dev_priv) < 13)
- continue;
+static u8 expected_secondary_pipes(u8 primary_pipes, int num_pipes)
+{
+ u8 secondary_pipes = 0;
- power_domain = POWER_DOMAIN_PIPE(pipe);
- with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
- u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
+ for (int i = 1; i < num_pipes; i++)
+ secondary_pipes |= primary_pipes << i;
- if (tmp & UNCOMPRESSED_JOINER_PRIMARY)
- *primary_pipes |= BIT(pipe);
- if (tmp & UNCOMPRESSED_JOINER_SECONDARY)
- *secondary_pipes |= BIT(pipe);
- }
- }
+ return secondary_pipes;
+}
- /* Joiner pipes should always be consecutive primary and secondary */
- drm_WARN(&dev_priv->drm, *secondary_pipes != *primary_pipes << 1,
- "Joiner misconfigured (primary pipes 0x%x, secondary pipes 0x%x)\n",
- *primary_pipes, *secondary_pipes);
+static u8 expected_uncompressed_joiner_secondary_pipes(u8 uncompjoiner_primary_pipes)
+{
+ return expected_secondary_pipes(uncompjoiner_primary_pipes, 2);
+}
+
+static u8 expected_bigjoiner_secondary_pipes(u8 bigjoiner_primary_pipes)
+{
+ return expected_secondary_pipes(bigjoiner_primary_pipes, 2);
}
-static enum pipe get_joiner_primary_pipe(enum pipe pipe, u8 primary_pipes, u8 secondary_pipes)
+static u8 get_joiner_primary_pipe(enum pipe pipe, u8 primary_pipes)
{
- if ((secondary_pipes & BIT(pipe)) == 0)
- return pipe;
+ primary_pipes &= GENMASK(pipe, 0);
- /* ignore everything above our pipe */
- primary_pipes &= ~GENMASK(7, pipe);
+ return primary_pipes ? BIT(fls(primary_pipes) - 1) : 0;
+}
- /* highest remaining bit should be our primary pipe */
- return fls(primary_pipes) - 1;
+static u8 expected_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes)
+{
+ return expected_secondary_pipes(ultrajoiner_primary_pipes, 4);
}
-static u8 get_joiner_secondary_pipes(enum pipe pipe, u8 primary_pipes, u8 secondary_pipes)
+static u8 fixup_ultrajoiner_secondary_pipes(u8 ultrajoiner_primary_pipes,
+ u8 ultrajoiner_secondary_pipes)
{
- enum pipe primary_pipe, next_primary_pipe;
+ return ultrajoiner_secondary_pipes | ultrajoiner_primary_pipes << 3;
+}
- primary_pipe = get_joiner_primary_pipe(pipe, primary_pipes, secondary_pipes);
+static void enabled_ultrajoiner_pipes(struct drm_i915_private *i915,
+ u8 *primary_pipes, u8 *secondary_pipes)
+{
+ struct intel_display *display = &i915->display;
+ struct intel_crtc *crtc;
- if ((primary_pipes & BIT(primary_pipe)) == 0)
- return 0;
+ *primary_pipes = 0;
+ *secondary_pipes = 0;
- /* ignore our primary pipe and everything below it */
- primary_pipes &= ~GENMASK(primary_pipe, 0);
- /* make sure a high bit is set for the ffs() */
- primary_pipes |= BIT(7);
- /* lowest remaining bit should be the next primary pipe */
- next_primary_pipe = ffs(primary_pipes) - 1;
+ if (!HAS_ULTRAJOINER(display))
+ return;
- return secondary_pipes & GENMASK(next_primary_pipe - 1, primary_pipe);
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc,
+ joiner_pipes(i915)) {
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = crtc->pipe;
+ intel_wakeref_t wakeref;
+
+ power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe);
+ with_intel_display_power_if_enabled(i915, power_domain, wakeref) {
+ u32 tmp = intel_de_read(i915, ICL_PIPE_DSS_CTL1(pipe));
+
+ if (!(tmp & ULTRA_JOINER_ENABLE))
+ continue;
+
+ if (tmp & PRIMARY_ULTRA_JOINER_ENABLE)
+ *primary_pipes |= BIT(pipe);
+ else
+ *secondary_pipes |= BIT(pipe);
+ }
+ }
+}
+
+static void enabled_joiner_pipes(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ u8 *primary_pipe, u8 *secondary_pipes)
+{
+ struct intel_display *display = to_intel_display(&dev_priv->drm);
+ u8 primary_ultrajoiner_pipes;
+ u8 primary_uncompressed_joiner_pipes, primary_bigjoiner_pipes;
+ u8 secondary_ultrajoiner_pipes;
+ u8 secondary_uncompressed_joiner_pipes, secondary_bigjoiner_pipes;
+ u8 ultrajoiner_pipes;
+ u8 uncompressed_joiner_pipes, bigjoiner_pipes;
+
+ enabled_ultrajoiner_pipes(dev_priv, &primary_ultrajoiner_pipes,
+ &secondary_ultrajoiner_pipes);
+ /*
+ * For some strange reason the last pipe in the set of four
+ * shouldn't have ultrajoiner enable bit set in hardware.
+ * Set the bit anyway to make life easier.
+ */
+ drm_WARN_ON(&dev_priv->drm,
+ expected_secondary_pipes(primary_ultrajoiner_pipes, 3) !=
+ secondary_ultrajoiner_pipes);
+ secondary_ultrajoiner_pipes =
+ fixup_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes,
+ secondary_ultrajoiner_pipes);
+
+ drm_WARN_ON(&dev_priv->drm, (primary_ultrajoiner_pipes & secondary_ultrajoiner_pipes) != 0);
+
+ enabled_uncompressed_joiner_pipes(display, &primary_uncompressed_joiner_pipes,
+ &secondary_uncompressed_joiner_pipes);
+
+ drm_WARN_ON(display->drm,
+ (primary_uncompressed_joiner_pipes & secondary_uncompressed_joiner_pipes) != 0);
+
+ enabled_bigjoiner_pipes(display, &primary_bigjoiner_pipes,
+ &secondary_bigjoiner_pipes);
+
+ drm_WARN_ON(display->drm,
+ (primary_bigjoiner_pipes & secondary_bigjoiner_pipes) != 0);
+
+ ultrajoiner_pipes = primary_ultrajoiner_pipes | secondary_ultrajoiner_pipes;
+ uncompressed_joiner_pipes = primary_uncompressed_joiner_pipes |
+ secondary_uncompressed_joiner_pipes;
+ bigjoiner_pipes = primary_bigjoiner_pipes | secondary_bigjoiner_pipes;
+
+ drm_WARN(display->drm, (ultrajoiner_pipes & bigjoiner_pipes) != ultrajoiner_pipes,
+ "Ultrajoiner pipes(%#x) should be bigjoiner pipes(%#x)\n",
+ ultrajoiner_pipes, bigjoiner_pipes);
+
+ drm_WARN(display->drm, secondary_ultrajoiner_pipes !=
+ expected_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes),
+ "Wrong secondary ultrajoiner pipes(expected %#x, current %#x)\n",
+ expected_ultrajoiner_secondary_pipes(primary_ultrajoiner_pipes),
+ secondary_ultrajoiner_pipes);
+
+ drm_WARN(display->drm, (uncompressed_joiner_pipes & bigjoiner_pipes) != 0,
+ "Uncompressed joiner pipes(%#x) and bigjoiner pipes(%#x) can't intersect\n",
+ uncompressed_joiner_pipes, bigjoiner_pipes);
+
+ drm_WARN(display->drm, secondary_bigjoiner_pipes !=
+ expected_bigjoiner_secondary_pipes(primary_bigjoiner_pipes),
+ "Wrong secondary bigjoiner pipes(expected %#x, current %#x)\n",
+ expected_bigjoiner_secondary_pipes(primary_bigjoiner_pipes),
+ secondary_bigjoiner_pipes);
+
+ drm_WARN(display->drm, secondary_uncompressed_joiner_pipes !=
+ expected_uncompressed_joiner_secondary_pipes(primary_uncompressed_joiner_pipes),
+ "Wrong secondary uncompressed joiner pipes(expected %#x, current %#x)\n",
+ expected_uncompressed_joiner_secondary_pipes(primary_uncompressed_joiner_pipes),
+ secondary_uncompressed_joiner_pipes);
+
+ *primary_pipe = 0;
+ *secondary_pipes = 0;
+
+ if (ultrajoiner_pipes & BIT(pipe)) {
+ *primary_pipe = get_joiner_primary_pipe(pipe, primary_ultrajoiner_pipes);
+ *secondary_pipes = secondary_ultrajoiner_pipes &
+ expected_ultrajoiner_secondary_pipes(*primary_pipe);
+
+ drm_WARN(display->drm,
+ expected_ultrajoiner_secondary_pipes(*primary_pipe) !=
+ *secondary_pipes,
+ "Wrong ultrajoiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n",
+ *primary_pipe,
+ expected_ultrajoiner_secondary_pipes(*primary_pipe),
+ *secondary_pipes);
+ return;
+ }
+
+ if (uncompressed_joiner_pipes & BIT(pipe)) {
+ *primary_pipe = get_joiner_primary_pipe(pipe, primary_uncompressed_joiner_pipes);
+ *secondary_pipes = secondary_uncompressed_joiner_pipes &
+ expected_uncompressed_joiner_secondary_pipes(*primary_pipe);
+
+ drm_WARN(display->drm,
+ expected_uncompressed_joiner_secondary_pipes(*primary_pipe) !=
+ *secondary_pipes,
+ "Wrong uncompressed joiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n",
+ *primary_pipe,
+ expected_uncompressed_joiner_secondary_pipes(*primary_pipe),
+ *secondary_pipes);
+ return;
+ }
+
+ if (bigjoiner_pipes & BIT(pipe)) {
+ *primary_pipe = get_joiner_primary_pipe(pipe, primary_bigjoiner_pipes);
+ *secondary_pipes = secondary_bigjoiner_pipes &
+ expected_bigjoiner_secondary_pipes(*primary_pipe);
+
+ drm_WARN(display->drm,
+ expected_bigjoiner_secondary_pipes(*primary_pipe) !=
+ *secondary_pipes,
+ "Wrong bigjoiner secondary pipes for primary_pipe %#x (expected %#x, current %#x)\n",
+ *primary_pipe,
+ expected_bigjoiner_secondary_pipes(*primary_pipe),
+ *secondary_pipes);
+ return;
+ }
}
static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
@@ -3628,7 +3917,7 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(dev);
u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
enum transcoder cpu_transcoder;
- u8 primary_pipes, secondary_pipes;
+ u8 primary_pipe, secondary_pipes;
u8 enabled_transcoders = 0;
/*
@@ -3681,10 +3970,9 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
enabled_transcoders |= BIT(cpu_transcoder);
/* joiner secondary -> consider the primary pipe's transcoder as well */
- enabled_joiner_pipes(dev_priv, &primary_pipes, &secondary_pipes);
+ enabled_joiner_pipes(dev_priv, crtc->pipe, &primary_pipe, &secondary_pipes);
if (secondary_pipes & BIT(crtc->pipe)) {
- cpu_transcoder = (enum transcoder)
- get_joiner_primary_pipe(crtc->pipe, primary_pipes, secondary_pipes);
+ cpu_transcoder = (enum transcoder)ffs(primary_pipe) - 1;
if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
enabled_transcoders |= BIT(cpu_transcoder);
}
@@ -3815,22 +4103,21 @@ static void intel_joiner_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- u8 primary_pipes, secondary_pipes;
+ u8 primary_pipe, secondary_pipes;
enum pipe pipe = crtc->pipe;
- enabled_joiner_pipes(i915, &primary_pipes, &secondary_pipes);
+ enabled_joiner_pipes(i915, pipe, &primary_pipe, &secondary_pipes);
- if (((primary_pipes | secondary_pipes) & BIT(pipe)) == 0)
+ if (((primary_pipe | secondary_pipes) & BIT(pipe)) == 0)
return;
- crtc_state->joiner_pipes =
- BIT(get_joiner_primary_pipe(pipe, primary_pipes, secondary_pipes)) |
- get_joiner_secondary_pipes(pipe, primary_pipes, secondary_pipes);
+ crtc_state->joiner_pipes = primary_pipe | secondary_pipes;
}
static bool hsw_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool active;
u32 tmp;
@@ -3907,7 +4194,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
}
if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
- tmp = intel_de_read(dev_priv, hsw_chicken_trans_reg(dev_priv, pipe_config->cpu_transcoder));
+ tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder));
pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
} else {
@@ -3986,7 +4273,7 @@ int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc_state *crtc_state;
struct drm_display_mode *mode;
struct intel_crtc *crtc;
@@ -3995,7 +4282,7 @@ intel_encoder_current_mode(struct intel_encoder *encoder)
if (!encoder->get_hw_state(encoder, &pipe))
return NULL;
- crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ crtc = intel_crtc_for_pipe(display, pipe);
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
@@ -4265,6 +4552,7 @@ static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
static int intel_crtc_atomic_check(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -4285,22 +4573,11 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
if (ret)
return ret;
- ret = intel_compute_pipe_wm(state, crtc);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "Target pipe watermarks are invalid\n");
- return ret;
- }
-
- /*
- * Calculate 'intermediate' watermarks that satisfy both the
- * old state and the new state. We can program these
- * immediately.
- */
- ret = intel_compute_intermediate_wm(state, crtc);
+ ret = intel_wm_compute(state, crtc);
if (ret) {
drm_dbg_kms(&dev_priv->drm,
- "No valid intermediate pipe watermarks are possible\n");
+ "[CRTC:%d:%s] watermarks are invalid\n",
+ crtc->base.base.id, crtc->base.name);
return ret;
}
@@ -4312,12 +4589,12 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return ret;
}
- ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
+ ret = intel_atomic_setup_scalers(state, crtc);
if (ret)
return ret;
}
- if (HAS_IPS(dev_priv)) {
+ if (HAS_IPS(display)) {
ret = hsw_ips_compute_config(state, crtc);
if (ret)
return ret;
@@ -4798,6 +5075,8 @@ intel_modeset_pipe_config_late(struct intel_atomic_state *state,
struct drm_connector *connector;
int i;
+ intel_vrr_compute_config_late(crtc_state);
+
for_each_new_connector_in_state(&state->base, connector,
conn_state, i) {
struct intel_encoder *encoder =
@@ -4937,7 +5216,7 @@ pipe_config_dp_vsc_sdp_mismatch(struct drm_printer *p, bool fastset,
const struct drm_dp_vsc_sdp *a,
const struct drm_dp_vsc_sdp *b)
{
- pipe_config_mismatch(p, fastset, crtc, name, "dp sdp");
+ pipe_config_mismatch(p, fastset, crtc, name, "dp vsc sdp");
drm_printf(p, "expected:\n");
drm_dp_vsc_sdp_log(p, a);
@@ -4946,27 +5225,18 @@ pipe_config_dp_vsc_sdp_mismatch(struct drm_printer *p, bool fastset,
}
static void
-pipe_config_dp_as_sdp_mismatch(struct drm_i915_private *i915,
- bool fastset, const char *name,
+pipe_config_dp_as_sdp_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
+ const char *name,
const struct drm_dp_as_sdp *a,
const struct drm_dp_as_sdp *b)
{
- struct drm_printer p;
-
- if (fastset) {
- p = drm_dbg_printer(&i915->drm, DRM_UT_KMS, NULL);
+ pipe_config_mismatch(p, fastset, crtc, name, "dp as sdp");
- drm_printf(&p, "fastset requirement not met in %s dp sdp\n", name);
- } else {
- p = drm_err_printer(&i915->drm, NULL);
-
- drm_printf(&p, "mismatch in %s dp sdp\n", name);
- }
-
- drm_printf(&p, "expected:\n");
- drm_dp_as_sdp_log(&p, a);
- drm_printf(&p, "found:\n");
- drm_dp_as_sdp_log(&p, b);
+ drm_printf(p, "expected:\n");
+ drm_dp_as_sdp_log(p, a);
+ drm_printf(p, "found:\n");
+ drm_dp_as_sdp_log(p, b);
}
/* Returns the length up to and including the last differing byte */
@@ -4989,26 +5259,13 @@ pipe_config_buffer_mismatch(struct drm_printer *p, bool fastset,
const char *name,
const u8 *a, const u8 *b, size_t len)
{
- const char *loglevel;
-
- if (fastset) {
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
-
- loglevel = KERN_DEBUG;
- } else {
- loglevel = KERN_ERR;
- }
-
pipe_config_mismatch(p, fastset, crtc, name, "buffer");
/* only dump up to the last difference */
len = memcmp_diff_len(a, b, len);
- print_hex_dump(loglevel, "expected: ", DUMP_PREFIX_NONE,
- 16, 0, a, len, false);
- print_hex_dump(loglevel, "found: ", DUMP_PREFIX_NONE,
- 16, 0, b, len, false);
+ drm_print_hex_dump(p, "expected: ", a, len);
+ drm_print_hex_dump(p, "found: ", b, len);
}
static void
@@ -5035,15 +5292,15 @@ pipe_config_cx0pll_mismatch(struct drm_printer *p, bool fastset,
const struct intel_cx0pll_state *a,
const struct intel_cx0pll_state *b)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
char *chipname = a->use_c10 ? "C10" : "C20";
pipe_config_mismatch(p, fastset, crtc, name, chipname);
drm_printf(p, "expected:\n");
- intel_cx0pll_dump_hw_state(i915, a);
+ intel_cx0pll_dump_hw_state(display, a);
drm_printf(p, "found:\n");
- intel_cx0pll_dump_hw_state(i915, b);
+ intel_cx0pll_dump_hw_state(display, b);
}
bool
@@ -5051,6 +5308,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
const struct intel_crtc_state *pipe_config,
bool fastset)
{
+ struct intel_display *display = to_intel_display(current_config);
struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_printer p;
@@ -5227,7 +5485,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_DP_AS_SDP(name) do { \
if (!intel_compare_dp_as_sdp(&current_config->infoframes.name, \
&pipe_config->infoframes.name)) { \
- pipe_config_dp_as_sdp_mismatch(dev_priv, fastset, __stringify(name), \
+ pipe_config_dp_as_sdp_mismatch(&p, fastset, crtc, __stringify(name), \
&current_config->infoframes.name, \
&pipe_config->infoframes.name); \
ret = false; \
@@ -5291,7 +5549,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(lane_count);
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
- if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
+ if (HAS_DOUBLE_BUFFERED_M_N(display)) {
if (!fastset || !pipe_config->update_m_n)
PIPE_CONF_CHECK_M_N(dp_m_n);
} else {
@@ -5431,7 +5689,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_INFOFRAME(avi);
PIPE_CONF_CHECK_INFOFRAME(spd);
PIPE_CONF_CHECK_INFOFRAME(hdmi);
- PIPE_CONF_CHECK_INFOFRAME(drm);
+ if (!fastset)
+ PIPE_CONF_CHECK_INFOFRAME(drm);
PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
PIPE_CONF_CHECK_DP_AS_SDP(as_sdp);
@@ -5471,7 +5730,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset);
PIPE_CONF_CHECK_BOOL(dsc.compression_enable);
- PIPE_CONF_CHECK_BOOL(dsc.dsc_split);
+ PIPE_CONF_CHECK_I(dsc.num_streams);
PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16);
PIPE_CONF_CHECK_BOOL(splitter.enable);
@@ -6525,6 +6784,7 @@ static int intel_atomic_check_config_and_link(struct intel_atomic_state *state)
int intel_atomic_check(struct drm_device *dev,
struct drm_atomic_state *_state)
{
+ struct intel_display *display = to_intel_display(dev);
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
@@ -6532,7 +6792,7 @@ int intel_atomic_check(struct drm_device *dev,
int ret, i;
bool any_ms = false;
- if (!intel_display_driver_check_access(dev_priv))
+ if (!intel_display_driver_check_access(display))
return -ENODEV;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -6732,17 +6992,12 @@ int intel_atomic_check(struct drm_device *dev,
static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
{
- struct intel_crtc_state __maybe_unused *crtc_state;
- struct intel_crtc *crtc;
- int i, ret;
+ int ret;
ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
if (ret < 0)
return ret;
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
- intel_color_prepare_commit(state, crtc);
-
return 0;
}
@@ -6823,12 +7078,12 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state,
* During modesets pipe configuration was programmed as the
* CRTC was enabled.
*/
- if (!modeset) {
+ if (!modeset && !new_crtc_state->use_dsb) {
if (intel_crtc_needs_color_update(new_crtc_state))
- intel_color_commit_arm(new_crtc_state);
+ intel_color_commit_arm(NULL, new_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- bdw_set_pipe_misc(new_crtc_state);
+ bdw_set_pipe_misc(NULL, new_crtc_state);
if (intel_crtc_needs_fastset(new_crtc_state))
intel_pipe_fastset(old_crtc_state, new_crtc_state);
@@ -6925,10 +7180,12 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state,
drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
if (!modeset &&
- intel_crtc_needs_color_update(new_crtc_state))
- intel_color_commit_noarm(new_crtc_state);
+ intel_crtc_needs_color_update(new_crtc_state) &&
+ !new_crtc_state->use_dsb)
+ intel_color_commit_noarm(NULL, new_crtc_state);
- intel_crtc_planes_update_noarm(state, crtc);
+ if (!new_crtc_state->use_dsb)
+ intel_crtc_planes_update_noarm(NULL, state, crtc);
}
static void intel_update_crtc(struct intel_atomic_state *state,
@@ -6939,16 +7196,25 @@ static void intel_update_crtc(struct intel_atomic_state *state,
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- /* Perform vblank evasion around commit operation */
- intel_pipe_update_start(state, crtc);
+ if (new_crtc_state->use_dsb) {
+ intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->dsb_event);
- commit_pipe_pre_planes(state, crtc);
+ intel_dsb_commit(new_crtc_state->dsb_commit, false);
+ } else {
+ /* Perform vblank evasion around commit operation */
+ intel_pipe_update_start(state, crtc);
+
+ if (new_crtc_state->dsb_commit)
+ intel_dsb_commit(new_crtc_state->dsb_commit, false);
- intel_crtc_planes_update_arm(state, crtc);
+ commit_pipe_pre_planes(state, crtc);
- commit_pipe_post_planes(state, crtc);
+ intel_crtc_planes_update_arm(NULL, state, crtc);
- intel_pipe_update_end(state, crtc);
+ commit_pipe_post_planes(state, crtc);
+
+ intel_pipe_update_end(state, crtc);
+ }
/*
* VRR/Seamless M/N update may need to update frame timings.
@@ -7273,17 +7539,35 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
}
}
+static void intel_atomic_dsb_wait_commit(struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->dsb_commit)
+ intel_dsb_wait(crtc_state->dsb_commit);
+
+ intel_color_wait_commit(crtc_state);
+}
+
+static void intel_atomic_dsb_cleanup(struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->dsb_commit) {
+ intel_dsb_cleanup(crtc_state->dsb_commit);
+ crtc_state->dsb_commit = NULL;
+ }
+
+ intel_color_cleanup_commit(crtc_state);
+}
+
static void intel_atomic_cleanup_work(struct work_struct *work)
{
struct intel_atomic_state *state =
- container_of(work, struct intel_atomic_state, base.commit_work);
+ container_of(work, struct intel_atomic_state, cleanup_work);
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state;
struct intel_crtc *crtc;
int i;
for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i)
- intel_color_cleanup_commit(old_crtc_state);
+ intel_atomic_dsb_cleanup(old_crtc_state);
drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
drm_atomic_helper_commit_cleanup_done(&state->base);
@@ -7324,15 +7608,93 @@ static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *s
* caller made sure that the object is synced wrt. the related color clear value
* GPU write on it.
*/
- ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
- fb->offsets[cc_plane] + 16,
- &plane_state->ccval,
- sizeof(plane_state->ccval));
+ ret = intel_bo_read_from_page(intel_fb_bo(fb),
+ fb->offsets[cc_plane] + 16,
+ &plane_state->ccval,
+ sizeof(plane_state->ccval));
/* The above could only fail if the FB obj has an unexpected backing store type. */
drm_WARN_ON(&i915->drm, ret);
}
}
+static void intel_atomic_dsb_prepare(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ intel_color_prepare_commit(state, crtc);
+}
+
+static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!new_crtc_state->hw.active)
+ return;
+
+ if (state->base.legacy_cursor_update)
+ return;
+
+ /* FIXME deal with everything */
+ new_crtc_state->use_dsb =
+ new_crtc_state->update_planes &&
+ !new_crtc_state->vrr.enable &&
+ !new_crtc_state->do_async_flip &&
+ !new_crtc_state->has_psr &&
+ !new_crtc_state->scaler_state.scaler_users &&
+ !old_crtc_state->scaler_state.scaler_users &&
+ !intel_crtc_needs_modeset(new_crtc_state) &&
+ !intel_crtc_needs_fastset(new_crtc_state);
+
+ if (!new_crtc_state->use_dsb && !new_crtc_state->dsb_color_vblank)
+ return;
+
+ /*
+ * Rough estimate:
+ * ~64 registers per each plane * 8 planes = 512
+ * Double that for pipe stuff and other overhead.
+ */
+ new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0,
+ new_crtc_state->use_dsb ? 1024 : 16);
+ if (!new_crtc_state->dsb_commit) {
+ new_crtc_state->use_dsb = false;
+ intel_color_cleanup_commit(new_crtc_state);
+ return;
+ }
+
+ if (new_crtc_state->use_dsb) {
+ if (intel_crtc_needs_color_update(new_crtc_state))
+ intel_color_commit_noarm(new_crtc_state->dsb_commit,
+ new_crtc_state);
+ intel_crtc_planes_update_noarm(new_crtc_state->dsb_commit,
+ state, crtc);
+
+ intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit);
+
+ if (intel_crtc_needs_color_update(new_crtc_state))
+ intel_color_commit_arm(new_crtc_state->dsb_commit,
+ new_crtc_state);
+ bdw_set_pipe_misc(new_crtc_state->dsb_commit,
+ new_crtc_state);
+ intel_crtc_planes_update_arm(new_crtc_state->dsb_commit,
+ state, crtc);
+
+ if (!new_crtc_state->dsb_color_vblank) {
+ intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
+ intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit);
+ intel_dsb_interrupt(new_crtc_state->dsb_commit);
+ }
+ }
+
+ if (new_crtc_state->dsb_color_vblank)
+ intel_dsb_chain(state, new_crtc_state->dsb_commit,
+ new_crtc_state->dsb_color_vblank, true);
+
+ intel_dsb_finish(new_crtc_state->dsb_commit);
+}
+
static void intel_atomic_commit_tail(struct intel_atomic_state *state)
{
struct drm_device *dev = state->base.dev;
@@ -7340,13 +7702,21 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
int i;
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ intel_atomic_dsb_prepare(state, crtc);
+
intel_atomic_commit_fence_wait(state);
intel_td_flush(dev_priv);
+ intel_atomic_prepare_plane_clear_colors(state);
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ intel_atomic_dsb_finish(state, crtc);
+
drm_atomic_helper_wait_for_dependencies(&state->base);
drm_dp_mst_atomic_wait_for_dependencies(&state->base);
intel_atomic_global_state_wait_for_dependencies(state);
@@ -7380,8 +7750,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
- intel_atomic_prepare_plane_clear_colors(state);
-
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (intel_crtc_needs_modeset(new_crtc_state) ||
@@ -7442,6 +7810,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
dev_priv->display.funcs.display->commit_modeset_enables(state);
+ intel_program_dpkgc_latency(state);
+
if (state->modeset)
intel_set_cdclk_post_plane_update(state);
@@ -7462,7 +7832,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (new_crtc_state->do_async_flip)
intel_crtc_disable_flip_done(state, crtc);
- intel_color_wait_commit(new_crtc_state);
+ intel_atomic_dsb_wait_commit(new_crtc_state);
}
/*
@@ -7497,14 +7867,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_modeset_verify_crtc(state, crtc);
- /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */
- hsw_ips_post_update(state, crtc);
-
- /*
- * Activate DRRS after state readout to avoid
- * dp_m_n vs. dp_m2_n2 confusion on BDW+.
- */
- intel_drrs_activate(new_crtc_state);
+ intel_post_plane_update_after_readout(state, crtc);
/*
* DSB cleanup is done in cleanup_work aligning with framebuffer
@@ -7514,7 +7877,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* FIXME get rid of this funny new->old swapping
*/
old_crtc_state->dsb_color_vblank = fetch_and_zero(&new_crtc_state->dsb_color_vblank);
- old_crtc_state->dsb_color_commit = fetch_and_zero(&new_crtc_state->dsb_color_commit);
+ old_crtc_state->dsb_commit = fetch_and_zero(&new_crtc_state->dsb_commit);
}
/* Underruns don't always raise interrupts, so check manually */
@@ -7554,8 +7917,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* schedule point (cond_resched()) here anyway to keep latencies
* down.
*/
- INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
- queue_work(system_highpri_wq, &state->base.commit_work);
+ INIT_WORK(&state->cleanup_work, intel_atomic_cleanup_work);
+ queue_work(dev_priv->display.wq.cleanup, &state->cleanup_work);
}
static void intel_atomic_commit_work(struct work_struct *work)
@@ -7661,13 +8024,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
ret = intel_atomic_swap_state(state);
if (ret) {
- struct intel_crtc_state *new_crtc_state;
- struct intel_crtc *crtc;
- int i;
-
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
- intel_color_cleanup_commit(new_crtc_state);
-
drm_atomic_helper_unprepare_planes(dev, &state->base);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
@@ -7702,23 +8058,6 @@ void intel_plane_destroy(struct drm_plane *plane)
kfree(to_intel_plane(plane));
}
-int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
- struct drm_crtc *drmmode_crtc;
- struct intel_crtc *crtc;
-
- drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
- if (!drmmode_crtc)
- return -ENOENT;
-
- crtc = to_intel_crtc(drmmode_crtc);
- pipe_from_crtc_id->pipe = crtc->pipe;
-
- return 0;
-}
-
static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
@@ -7800,7 +8139,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (HAS_DDI(dev_priv)) {
if (intel_ddi_crt_present(dev_priv))
- intel_crt_init(dev_priv);
+ intel_crt_init(display);
intel_bios_for_each_encoder(display, intel_ddi_init);
@@ -7815,9 +8154,9 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
* incorrect sharing of the PPS.
*/
intel_lvds_init(dev_priv);
- intel_crt_init(dev_priv);
+ intel_crt_init(display);
- dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
+ dpd_is_edp = intel_dp_is_port_edp(display, PORT_D);
if (ilk_has_edp_a(dev_priv))
g4x_dp_init(dev_priv, DP_A, PORT_A);
@@ -7846,7 +8185,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
bool has_edp, has_port;
if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support)
- intel_crt_init(dev_priv);
+ intel_crt_init(display);
/*
* The DP_DETECTED bit is the latched state of the DDC
@@ -7863,14 +8202,14 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
* trust the port type the VBT declares as we've seen at least
* HDMI ports that the VBT claim are DP or eDP.
*/
- has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
+ has_edp = intel_dp_is_port_edp(display, PORT_B);
has_port = intel_bios_is_port_present(display, PORT_B);
if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
- has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
+ has_edp = intel_dp_is_port_edp(display, PORT_C);
has_port = intel_bios_is_port_present(display, PORT_C);
if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
@@ -7892,14 +8231,14 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
vlv_dsi_init(dev_priv);
} else if (IS_PINEVIEW(dev_priv)) {
intel_lvds_init(dev_priv);
- intel_crt_init(dev_priv);
+ intel_crt_init(display);
} else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
bool found = false;
if (IS_MOBILE(dev_priv))
intel_lvds_init(dev_priv);
- intel_crt_init(dev_priv);
+ intel_crt_init(display);
if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
@@ -7941,7 +8280,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (IS_I85X(dev_priv))
intel_lvds_init(dev_priv);
- intel_crt_init(dev_priv);
+ intel_crt_init(display);
intel_dvo_init(dev_priv);
}
@@ -7959,10 +8298,12 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
static int max_dotclock(struct drm_i915_private *i915)
{
- int max_dotclock = i915->display.cdclk.max_dotclk_freq;
+ struct intel_display *display = &i915->display;
+ int max_dotclock = display->cdclk.max_dotclk_freq;
- /* icl+ might use joiner */
- if (DISPLAY_VER(i915) >= 11)
+ if (HAS_ULTRAJOINER(display))
+ max_dotclock *= 4;
+ else if (HAS_UNCOMPRESSED_JOINER(display) || HAS_BIGJOINER(display))
max_dotclock *= 2;
return max_dotclock;
@@ -8086,7 +8427,7 @@ enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *de
enum drm_mode_status
intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
const struct drm_display_mode *mode,
- bool joiner)
+ int num_joined_pipes)
{
int plane_width_max, plane_height_max;
@@ -8102,8 +8443,11 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
* plane so let's not advertize modes that are
* too big for that.
*/
- if (DISPLAY_VER(dev_priv) >= 11) {
- plane_width_max = 5120 << joiner;
+ if (DISPLAY_VER(dev_priv) >= 30) {
+ plane_width_max = 6144 * num_joined_pipes;
+ plane_height_max = 4800;
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
+ plane_width_max = 5120 * num_joined_pipes;
plane_height_max = 4320;
} else {
plane_width_max = 5120;
@@ -8255,9 +8599,9 @@ out:
return ret;
}
-void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
+void i830_enable_pipe(struct intel_display *display, enum pipe pipe)
{
- struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
enum transcoder cpu_transcoder = (enum transcoder)pipe;
/* 640x480@60Hz, ~25175 kHz */
struct dpll clock = {
@@ -8270,10 +8614,10 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 dpll, fp;
int i;
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
i9xx_calc_dpll_params(48000, &clock) != 25154);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
pipe_name(pipe), clock.vco, clock.dot);
@@ -8285,35 +8629,35 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
PLL_REF_INPUT_DREFCLK |
DPLL_VCO_ENABLE;
- intel_de_write(dev_priv, TRANS_HTOTAL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_HTOTAL(display, cpu_transcoder),
HACTIVE(640 - 1) | HTOTAL(800 - 1));
- intel_de_write(dev_priv, TRANS_HBLANK(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_HBLANK(display, cpu_transcoder),
HBLANK_START(640 - 1) | HBLANK_END(800 - 1));
- intel_de_write(dev_priv, TRANS_HSYNC(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_HSYNC(display, cpu_transcoder),
HSYNC_START(656 - 1) | HSYNC_END(752 - 1));
- intel_de_write(dev_priv, TRANS_VTOTAL(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VTOTAL(display, cpu_transcoder),
VACTIVE(480 - 1) | VTOTAL(525 - 1));
- intel_de_write(dev_priv, TRANS_VBLANK(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VBLANK(display, cpu_transcoder),
VBLANK_START(480 - 1) | VBLANK_END(525 - 1));
- intel_de_write(dev_priv, TRANS_VSYNC(dev_priv, cpu_transcoder),
+ intel_de_write(display, TRANS_VSYNC(display, cpu_transcoder),
VSYNC_START(490 - 1) | VSYNC_END(492 - 1));
- intel_de_write(dev_priv, PIPESRC(dev_priv, pipe),
+ intel_de_write(display, PIPESRC(display, pipe),
PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1));
- intel_de_write(dev_priv, FP0(pipe), fp);
- intel_de_write(dev_priv, FP1(pipe), fp);
+ intel_de_write(display, FP0(pipe), fp);
+ intel_de_write(display, FP1(pipe), fp);
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- intel_de_write(dev_priv, DPLL(dev_priv, pipe),
+ intel_de_write(display, DPLL(display, pipe),
dpll & ~DPLL_VGA_MODE_DIS);
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll);
+ intel_de_write(display, DPLL(display, pipe), dpll);
/* Wait for the clocks to stabilize. */
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_posting_read(display, DPLL(display, pipe));
udelay(150);
/* The pixel multiplier can only be updated once the
@@ -8321,46 +8665,46 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
*
* So write it again.
*/
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll);
+ intel_de_write(display, DPLL(display, pipe), dpll);
/* We do this three times for luck */
for (i = 0; i < 3 ; i++) {
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), dpll);
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_write(display, DPLL(display, pipe), dpll);
+ intel_de_posting_read(display, DPLL(display, pipe));
udelay(150); /* wait for warmup */
}
- intel_de_write(dev_priv, TRANSCONF(dev_priv, pipe), TRANSCONF_ENABLE);
- intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, pipe));
+ intel_de_write(display, TRANSCONF(display, pipe), TRANSCONF_ENABLE);
+ intel_de_posting_read(display, TRANSCONF(display, pipe));
intel_wait_for_pipe_scanline_moving(crtc);
}
-void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
+void i830_disable_pipe(struct intel_display *display, enum pipe pipe)
{
- struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
- drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
+ drm_dbg_kms(display->drm, "disabling pipe %c due to force quirk\n",
pipe_name(pipe));
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, DSPCNTR(dev_priv, PLANE_A)) & DISP_ENABLE);
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, DSPCNTR(dev_priv, PLANE_B)) & DISP_ENABLE);
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, DSPCNTR(dev_priv, PLANE_C)) & DISP_ENABLE);
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, CURCNTR(dev_priv, PIPE_A)) & MCURSOR_MODE_MASK);
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, CURCNTR(dev_priv, PIPE_B)) & MCURSOR_MODE_MASK);
+ drm_WARN_ON(display->drm,
+ intel_de_read(display, DSPCNTR(display, PLANE_A)) & DISP_ENABLE);
+ drm_WARN_ON(display->drm,
+ intel_de_read(display, DSPCNTR(display, PLANE_B)) & DISP_ENABLE);
+ drm_WARN_ON(display->drm,
+ intel_de_read(display, DSPCNTR(display, PLANE_C)) & DISP_ENABLE);
+ drm_WARN_ON(display->drm,
+ intel_de_read(display, CURCNTR(display, PIPE_A)) & MCURSOR_MODE_MASK);
+ drm_WARN_ON(display->drm,
+ intel_de_read(display, CURCNTR(display, PIPE_B)) & MCURSOR_MODE_MASK);
- intel_de_write(dev_priv, TRANSCONF(dev_priv, pipe), 0);
- intel_de_posting_read(dev_priv, TRANSCONF(dev_priv, pipe));
+ intel_de_write(display, TRANSCONF(display, pipe), 0);
+ intel_de_posting_read(display, TRANSCONF(display, pipe));
intel_wait_for_pipe_scanline_stopped(crtc);
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), DPLL_VGA_MODE_DIS);
- intel_de_posting_read(dev_priv, DPLL(dev_priv, pipe));
+ intel_de_write(display, DPLL(display, pipe), DPLL_VGA_MODE_DIS);
+ intel_de_posting_read(display, DPLL(display, pipe));
}
void intel_hpd_poll_fini(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index b21d9578d5db..49a246feb1ae 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -40,7 +40,6 @@ struct drm_encoder;
struct drm_file;
struct drm_format_info;
struct drm_framebuffer;
-struct drm_i915_gem_object;
struct drm_i915_private;
struct drm_mode_fb_cmd2;
struct drm_modeset_acquire_ctx;
@@ -52,6 +51,7 @@ struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_digital_port;
+struct intel_display;
struct intel_dp;
struct intel_encoder;
struct intel_initial_plane_config;
@@ -94,16 +94,6 @@ static inline bool transcoder_is_dsi(enum transcoder transcoder)
return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
}
-/*
- * Global legacy plane identifier. Valid only for primary/sprite
- * planes on pre-g4x, and only for primary planes on g4x-bdw.
- */
-enum i9xx_plane_id {
- PLANE_A,
- PLANE_B,
- PLANE_C,
-};
-
#define plane_name(p) ((p) + 'A')
#define for_each_plane_id_on_crtc(__crtc, __p) \
@@ -248,9 +238,6 @@ enum phy_fia {
for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
for_each_if((__phys_mask) & BIT(__phy))
-#define for_each_crtc(dev, crtc) \
- list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
-
#define for_each_intel_plane(dev, intel_plane) \
list_for_each_entry(intel_plane, \
&(dev)->mode_config.plane_list, \
@@ -401,6 +388,30 @@ enum phy_fia {
((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
(new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
+#define for_each_crtc_in_masks(display, crtc, first_pipes, second_pipes, i) \
+ for ((i) = 0; \
+ (i) < (I915_MAX_PIPES * 2) && ((crtc) = intel_crtc_for_pipe(display, (i) % I915_MAX_PIPES), 1); \
+ (i)++) \
+ for_each_if((crtc) && ((first_pipes) | ((second_pipes) << I915_MAX_PIPES)) & BIT(i))
+
+#define for_each_crtc_in_masks_reverse(display, crtc, first_pipes, second_pipes, i) \
+ for ((i) = (I915_MAX_PIPES * 2 - 1); \
+ (i) >= 0 && ((crtc) = intel_crtc_for_pipe(display, (i) % I915_MAX_PIPES), 1); \
+ (i)--) \
+ for_each_if((crtc) && ((first_pipes) | ((second_pipes) << I915_MAX_PIPES)) & BIT(i))
+
+#define for_each_pipe_crtc_modeset_disable(display, crtc, crtc_state, i) \
+ for_each_crtc_in_masks(display, crtc, \
+ _intel_modeset_primary_pipes(crtc_state), \
+ _intel_modeset_secondary_pipes(crtc_state), \
+ i)
+
+#define for_each_pipe_crtc_modeset_enable(display, crtc, crtc_state, i) \
+ for_each_crtc_in_masks_reverse(display, crtc, \
+ _intel_modeset_primary_pipes(crtc_state), \
+ _intel_modeset_secondary_pipes(crtc_state), \
+ i)
+
int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc);
@@ -415,7 +426,7 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
enum drm_mode_status
intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
const struct drm_display_mode *mode,
- bool joiner);
+ int num_joined_pipes);
enum drm_mode_status
intel_cpu_transcoder_mode_valid(struct drm_i915_private *i915,
const struct drm_display_mode *mode);
@@ -425,7 +436,14 @@ bool is_trans_port_sync_master(const struct intel_crtc_state *state);
u8 intel_crtc_joined_pipe_mask(const struct intel_crtc_state *crtc_state);
bool intel_crtc_is_joiner_secondary(const struct intel_crtc_state *crtc_state);
bool intel_crtc_is_joiner_primary(const struct intel_crtc_state *crtc_state);
+bool intel_crtc_is_bigjoiner_primary(const struct intel_crtc_state *crtc_state);
+bool intel_crtc_is_bigjoiner_secondary(const struct intel_crtc_state *crtc_state);
+bool intel_crtc_is_ultrajoiner(const struct intel_crtc_state *crtc_state);
+bool intel_crtc_is_ultrajoiner_primary(const struct intel_crtc_state *crtc_state);
+bool intel_crtc_ultrajoiner_enable_needed(const struct intel_crtc_state *crtc_state);
u8 intel_crtc_joiner_secondary_pipes(const struct intel_crtc_state *crtc_state);
+u8 _intel_modeset_primary_pipes(const struct intel_crtc_state *crtc_state);
+u8 _intel_modeset_secondary_pipes(const struct intel_crtc_state *crtc_state);
struct intel_crtc *intel_primary_crtc(const struct intel_crtc_state *crtc_state);
bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state);
bool intel_pipe_config_compare(const struct intel_crtc_state *current_config,
@@ -437,8 +455,8 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state);
void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state);
-void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
-void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
+void i830_enable_pipe(struct intel_display *display, enum pipe pipe);
+void i830_disable_pipe(struct intel_display *display, enum pipe pipe);
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
@@ -470,16 +488,10 @@ bool intel_encoder_is_snps(struct intel_encoder *encoder);
bool intel_encoder_is_tc(struct intel_encoder *encoder);
enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder);
-int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
-void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
+void vlv_wait_port_ready(struct intel_display *display,
struct intel_digital_port *dig_port,
unsigned int expected_mask);
-struct drm_framebuffer *
-intel_framebuffer_create(struct drm_i915_gem_object *obj,
- struct drm_mode_fb_cmd2 *mode_cmd);
bool intel_fuzzy_clock_check(int clock1, int clock2);
@@ -570,21 +582,21 @@ void assert_transcoder(struct drm_i915_private *dev_priv,
bool assert_port_valid(struct drm_i915_private *i915, enum port port);
/*
- * Use I915_STATE_WARN(x) (rather than WARN() and WARN_ON()) for hw state sanity
- * checks to check for unexpected conditions which may not necessarily be a user
- * visible problem. This will either WARN() or DRM_ERROR() depending on the
- * verbose_state_checks module param, to enable distros and users to tailor
- * their preferred amount of i915 abrt spam.
+ * Use INTEL_DISPLAY_STATE_WARN(x) (rather than WARN() and WARN_ON()) for hw
+ * state sanity checks to check for unexpected conditions which may not
+ * necessarily be a user visible problem. This will either drm_WARN() or
+ * drm_err() depending on the verbose_state_checks module param, to enable
+ * distros and users to tailor their preferred amount of i915 abrt spam.
*/
-#define I915_STATE_WARN(__i915, condition, format...) ({ \
- struct drm_device *drm = &(__i915)->drm; \
+#define INTEL_DISPLAY_STATE_WARN(__display, condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
- if (!drm_WARN(drm, __i915->display.params.verbose_state_checks, format)) \
- drm_err(drm, format); \
+ if (!drm_WARN((__display)->drm, (__display)->params.verbose_state_checks, format)) \
+ drm_err((__display)->drm, format); \
unlikely(__ret_warn_on); \
})
bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915);
+int intel_crtc_num_joined_pipes(const struct intel_crtc_state *crtc_state);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.c b/drivers/gpu/drm/i915/display/intel_display_conversion.c
new file mode 100644
index 000000000000..0578b68404da
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2024 Intel Corporation */
+
+#include "i915_drv.h"
+
+struct intel_display *__i915_to_display(struct drm_i915_private *i915)
+{
+ return &i915->display;
+}
+
+struct intel_display *__drm_to_display(struct drm_device *drm)
+{
+ return __i915_to_display(to_i915(drm));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.h b/drivers/gpu/drm/i915/display/intel_display_conversion.h
index ad8545c8055d..46c7208d42ba 100644
--- a/drivers/gpu/drm/i915/display/intel_display_conversion.h
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.h
@@ -8,14 +8,20 @@
#ifndef __INTEL_DISPLAY_CONVERSION__
#define __INTEL_DISPLAY_CONVERSION__
+struct drm_device;
+struct drm_i915_private;
+struct intel_display;
+
+struct intel_display *__i915_to_display(struct drm_i915_private *i915);
+struct intel_display *__drm_to_display(struct drm_device *drm);
/*
* Transitional macro to optionally convert struct drm_i915_private * to struct
* intel_display *, also accepting the latter.
*/
#define __to_intel_display(p) \
_Generic(p, \
- const struct drm_i915_private *: (&((const struct drm_i915_private *)(p))->display), \
- struct drm_i915_private *: (&((struct drm_i915_private *)(p))->display), \
+ const struct drm_i915_private *: __i915_to_display((struct drm_i915_private *)(p)), \
+ struct drm_i915_private *: __i915_to_display((struct drm_i915_private *)(p)), \
const struct intel_display *: (p), \
struct intel_display *: (p))
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 0a711114ff2b..554870d2494b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -81,10 +81,8 @@ struct intel_display_funcs {
struct intel_wm_funcs {
/* update_wm is for legacy wm management */
void (*update_wm)(struct drm_i915_private *dev_priv);
- int (*compute_pipe_wm)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- int (*compute_intermediate_wm)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
+ int (*compute_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void (*initial_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void (*atomic_update_watermarks)(struct intel_atomic_state *state,
@@ -286,6 +284,9 @@ struct intel_display {
/* drm device backpointer */
struct drm_device *drm;
+ /* Platform (and subplatform, if any) identification */
+ struct intel_display_platforms platform;
+
/* Display functions */
struct {
/* Top level crtc-ish functions */
@@ -452,11 +453,22 @@ struct intel_display {
} ips;
struct {
- bool display_irqs_enabled;
+ /*
+ * Most platforms treat the display irq block as an always-on
+ * power domain. vlv/chv can disable it at runtime and need
+ * special care to avoid writing any of the display block
+ * registers outside of the power domain. We defer setting up
+ * the display irqs in this case to the runtime pm.
+ */
+ bool vlv_display_irqs_enabled;
/* For i915gm/i945gm vblank irq workaround */
u8 vblank_enabled;
+ int vblank_wa_num_pipes;
+
+ struct work_struct vblank_dc_work;
+
u32 de_irq_mask[I915_MAX_PIPES];
u32 pipestat_irq_mask[I915_MAX_PIPES];
} irq;
@@ -500,6 +512,11 @@ struct intel_display {
/* restore state for suspend/resume and display reset */
struct drm_atomic_state *modeset_state;
struct drm_modeset_acquire_ctx reset_ctx;
+ u32 saveDSPARB;
+ u32 saveSWF0[16];
+ u32 saveSWF1[16];
+ u32 saveSWF3[3];
+ u16 saveGCDGMBUS;
} restore;
struct {
@@ -537,6 +554,9 @@ struct intel_display {
/* unbound hipri wq for page flips/plane updates */
struct workqueue_struct *flip;
+
+ /* hipri wq for commit cleanups */
+ struct workqueue_struct *cleanup;
} wq;
/* Grouping using named structs. Keep sorted. */
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index f5f618199d39..f1d76484025a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -3,6 +3,7 @@
* Copyright © 2020 Intel Corporation
*/
+#include <linux/debugfs.h>
#include <linux/string_helpers.h>
#include <drm/drm_debugfs.h>
@@ -10,13 +11,15 @@
#include <drm/drm_fourcc.h>
#include "hsw_ips.h"
-#include "i915_debugfs.h"
+#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "i9xx_wm_regs.h"
#include "intel_alpm.h"
+#include "intel_bo.h"
#include "intel_crtc.h"
-#include "intel_de.h"
#include "intel_crtc_state_dump.h"
+#include "intel_de.h"
#include "intel_display_debugfs.h"
#include "intel_display_debugfs_params.h"
#include "intel_display_power.h"
@@ -26,7 +29,9 @@
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
+#include "intel_dp_test.h"
#include "intel_drrs.h"
+#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_fbdev.h"
#include "intel_hdcp.h"
@@ -39,11 +44,28 @@
#include "intel_vdsc.h"
#include "intel_wm.h"
+static struct intel_display *node_to_intel_display(struct drm_info_node *node)
+{
+ return to_intel_display(node->minor->dev);
+}
+
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
return to_i915(node->minor->dev);
}
+static int intel_display_caps(struct seq_file *m, void *data)
+{
+ struct intel_display *display = node_to_intel_display(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ intel_display_device_info_print(DISPLAY_INFO(display),
+ DISPLAY_RUNTIME_INFO(display), &p);
+ intel_display_params_dump(&display->params, display->drm->driver->name, &p);
+
+ return 0;
+}
+
static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -106,7 +128,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fbdev_fb->base.format->cpp[0] * 8,
fbdev_fb->base.modifier,
drm_framebuffer_read_refcount(&fbdev_fb->base));
- i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
+ intel_bo_describe(m, intel_fb_bo(&fbdev_fb->base));
seq_putc(m, '\n');
}
#endif
@@ -124,7 +146,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.format->cpp[0] * 8,
fb->base.modifier,
drm_framebuffer_read_refcount(&fb->base));
- i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
+ intel_bo_describe(m, intel_fb_bo(&fb->base));
seq_putc(m, '\n');
}
mutex_unlock(&dev_priv->drm.mode_config.fb_lock);
@@ -424,7 +446,7 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
int num_scalers = crtc->num_scalers;
int i;
- /* Not all platformas have a scaler */
+ /* Not all platforms have a scaler */
if (num_scalers) {
seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d scaling_filter=%d",
num_scalers,
@@ -710,11 +732,12 @@ static bool
intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
enum i915_power_well_id power_well_id)
{
+ struct intel_display *display = &i915->display;
intel_wakeref_t wakeref;
bool is_enabled;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- is_enabled = intel_display_power_well_is_enabled(i915,
+ is_enabled = intel_display_power_well_is_enabled(display,
power_well_id);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@@ -773,198 +796,6 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
return 0;
}
-static ssize_t i915_displayport_test_active_write(struct file *file,
- const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- char *input_buffer;
- int status = 0;
- struct drm_device *dev;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp;
- int val = 0;
-
- dev = ((struct seq_file *)file->private_data)->private;
-
- if (len == 0)
- return 0;
-
- input_buffer = memdup_user_nul(ubuf, len);
- if (IS_ERR(input_buffer))
- return PTR_ERR(input_buffer);
-
- drm_dbg(dev, "Copied %d bytes from user\n", (unsigned int)len);
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- encoder = to_intel_encoder(connector->encoder);
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(encoder);
- status = kstrtoint(input_buffer, 10, &val);
- if (status < 0)
- break;
- drm_dbg(dev, "Got %d for test active\n", val);
- /* To prevent erroneous activation of the compliance
- * testing code, only accept an actual value of 1 here
- */
- if (val == 1)
- intel_dp->compliance.test_active = true;
- else
- intel_dp->compliance.test_active = false;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
- kfree(input_buffer);
- if (status < 0)
- return status;
-
- *offp += len;
- return len;
-}
-
-static int i915_displayport_test_active_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp;
-
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- encoder = to_intel_encoder(connector->encoder);
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->compliance.test_active)
- seq_puts(m, "1");
- else
- seq_puts(m, "0");
- } else
- seq_puts(m, "0");
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return 0;
-}
-
-static int i915_displayport_test_active_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, i915_displayport_test_active_show,
- inode->i_private);
-}
-
-static const struct file_operations i915_displayport_test_active_fops = {
- .owner = THIS_MODULE,
- .open = i915_displayport_test_active_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_displayport_test_active_write
-};
-
-static int i915_displayport_test_data_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp;
-
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- encoder = to_intel_encoder(connector->encoder);
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->compliance.test_type ==
- DP_TEST_LINK_EDID_READ)
- seq_printf(m, "%lx",
- intel_dp->compliance.test_data.edid);
- else if (intel_dp->compliance.test_type ==
- DP_TEST_LINK_VIDEO_PATTERN) {
- seq_printf(m, "hdisplay: %d\n",
- intel_dp->compliance.test_data.hdisplay);
- seq_printf(m, "vdisplay: %d\n",
- intel_dp->compliance.test_data.vdisplay);
- seq_printf(m, "bpc: %u\n",
- intel_dp->compliance.test_data.bpc);
- } else if (intel_dp->compliance.test_type ==
- DP_TEST_LINK_PHY_TEST_PATTERN) {
- seq_printf(m, "pattern: %d\n",
- intel_dp->compliance.test_data.phytest.phy_pattern);
- seq_printf(m, "Number of lanes: %d\n",
- intel_dp->compliance.test_data.phytest.num_lanes);
- seq_printf(m, "Link Rate: %d\n",
- intel_dp->compliance.test_data.phytest.link_rate);
- seq_printf(m, "level: %02x\n",
- intel_dp->train_set[0]);
- }
- } else
- seq_puts(m, "0");
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
-
-static int i915_displayport_test_type_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp;
-
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- encoder = to_intel_encoder(connector->encoder);
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(encoder);
- seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
- } else
- seq_puts(m, "0");
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
-
static ssize_t
i915_fifo_underrun_reset_write(struct file *filp,
const char __user *ubuf,
@@ -1025,6 +856,7 @@ static const struct file_operations i915_fifo_underrun_reset_ops = {
};
static const struct drm_info_list intel_display_debugfs_list[] = {
+ {"intel_display_caps", intel_display_caps, 0},
{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
{"i915_sr_status", i915_sr_status, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
@@ -1037,37 +869,22 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_lpsp_status", i915_lpsp_status, 0},
};
-static const struct {
- const char *name;
- const struct file_operations *fops;
-} intel_display_debugfs_files[] = {
- {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
- {"i915_dp_test_data", &i915_displayport_test_data_fops},
- {"i915_dp_test_type", &i915_displayport_test_type_fops},
- {"i915_dp_test_active", &i915_displayport_test_active_fops},
-};
-
void intel_display_debugfs_register(struct drm_i915_private *i915)
{
struct intel_display *display = &i915->display;
struct drm_minor *minor = i915->drm.primary;
- int i;
- for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
- debugfs_create_file(intel_display_debugfs_files[i].name,
- 0644,
- minor->debugfs_root,
- to_i915(minor->dev),
- intel_display_debugfs_files[i].fops);
- }
+ debugfs_create_file("i915_fifo_underrun_reset", 0644, minor->debugfs_root,
+ to_i915(minor->dev), &i915_fifo_underrun_reset_ops);
drm_debugfs_create_files(intel_display_debugfs_list,
ARRAY_SIZE(intel_display_debugfs_list),
minor->debugfs_root, minor);
intel_bios_debugfs_register(display);
- intel_cdclk_debugfs_register(i915);
- intel_dmc_debugfs_register(i915);
+ intel_cdclk_debugfs_register(display);
+ intel_dmc_debugfs_register(display);
+ intel_dp_test_debugfs_register(display);
intel_fbc_debugfs_register(display);
intel_hpd_debugfs_register(i915);
intel_opregion_debugfs_register(display);
@@ -1198,6 +1015,8 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
DP_DSC_YCbCr444)));
seq_printf(m, "DSC_Sink_BPP_Precision: %d\n",
drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd));
+ seq_printf(m, "DSC_Sink_Max_Slice_Count: %d\n",
+ drm_dp_dsc_sink_max_slice_count((connector->dp.dsc_dpcd), intel_dp_is_edp(intel_dp)));
seq_printf(m, "Force_DSC_Enable: %s\n",
str_yes_no(intel_dp->force_dsc_en));
if (!intel_dp_is_edp(intel_dp))
@@ -1502,6 +1321,68 @@ static int intel_crtc_pipe_show(struct seq_file *m, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(intel_crtc_pipe);
+static int i915_joiner_show(struct seq_file *m, void *data)
+{
+ struct intel_connector *connector = m->private;
+
+ seq_printf(m, "%d\n", connector->force_joined_pipes);
+
+ return 0;
+}
+
+static ssize_t i915_joiner_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct intel_connector *connector = m->private;
+ struct intel_display *display = to_intel_display(connector);
+ int force_joined_pipes = 0;
+ int ret;
+
+ if (len == 0)
+ return 0;
+
+ ret = kstrtoint_from_user(ubuf, len, 0, &force_joined_pipes);
+ if (ret < 0)
+ return ret;
+
+ switch (force_joined_pipes) {
+ case 0:
+ case 1:
+ case 2:
+ connector->force_joined_pipes = force_joined_pipes;
+ break;
+ case 4:
+ if (HAS_ULTRAJOINER(display)) {
+ connector->force_joined_pipes = force_joined_pipes;
+ break;
+ }
+
+ fallthrough;
+ default:
+ return -EINVAL;
+ }
+
+ *offp += len;
+
+ return len;
+}
+
+static int i915_joiner_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_joiner_show, inode->i_private);
+}
+
+static const struct file_operations i915_joiner_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_joiner_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_joiner_write
+};
+
/**
* intel_connector_debugfs_add - add i915 specific connector debugfs files
* @connector: pointer to a registered intel_connector
@@ -1548,11 +1429,11 @@ void intel_connector_debugfs_add(struct intel_connector *connector)
connector, &i915_dsc_fractional_bpp_fops);
}
- if (DISPLAY_VER(i915) >= 11 &&
- (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector_type == DRM_MODE_CONNECTOR_eDP)) {
- debugfs_create_bool("i915_bigjoiner_force_enable", 0644, root,
- &connector->force_bigjoiner_enable);
+ if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_eDP) &&
+ intel_dp_has_joiner(intel_attached_dp(connector))) {
+ debugfs_create_file("i915_joiner_force_enable", 0644, root,
+ connector, &i915_joiner_fops);
}
if (connector_type == DRM_MODE_CONNECTOR_DSI ||
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
index ec3ed29a83c9..88914a1f3f62 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
@@ -7,9 +7,10 @@
#include <linux/kernel.h>
#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include "intel_display_core.h"
#include "intel_display_debugfs_params.h"
-#include "i915_drv.h"
#include "intel_display_params.h"
/* int param */
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 1b46ba985580..68cb7f9b9ef3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -3,18 +3,20 @@
* Copyright © 2023 Intel Corporation
*/
-#include <drm/intel/i915_pciids.h>
+#include <drm/intel/pciids.h>
#include <drm/drm_color_mgmt.h>
#include <linux/pci.h>
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_cx0_phy_regs.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_device.h"
#include "intel_display_params.h"
#include "intel_display_power.h"
#include "intel_display_reg_defs.h"
+#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_step.h"
@@ -31,14 +33,25 @@ struct stepping_desc {
.step_info.size = ARRAY_SIZE(_map)
struct subplatform_desc {
- enum intel_display_subplatform subplatform;
+ struct intel_display_platforms platforms;
const char *name;
const u16 *pciidlist;
struct stepping_desc step_info;
};
+#define SUBPLATFORM(_platform, _subplatform) \
+ .platforms._platform##_##_subplatform = 1, \
+ .name = #_subplatform
+
+/*
+ * Group subplatform alias that matches multiple subplatforms. For making ult
+ * cover both ult and ulx on HSW/BDW.
+ */
+#define SUBPLATFORM_GROUP(_platform, _subplatform) \
+ .platforms._platform##_##_subplatform = 1
+
struct platform_desc {
- enum intel_display_platform platform;
+ struct intel_display_platforms platforms;
const char *name;
const struct subplatform_desc *subplatforms;
const struct intel_display_device_info *info; /* NULL for GMD ID */
@@ -46,9 +59,16 @@ struct platform_desc {
};
#define PLATFORM(_platform) \
- .platform = (INTEL_DISPLAY_##_platform), \
+ .platforms._platform = 1, \
.name = #_platform
+/*
+ * Group platform alias that matches multiple platforms. For aliases such as g4x
+ * that covers both g45 and gm45.
+ */
+#define PLATFORM_GROUP(_platform) \
+ .platforms._platform = 1
+
#define ID(id) (id)
static const struct intel_display_device_info no_display = {};
@@ -232,7 +252,8 @@ static const struct intel_display_device_info no_display = {};
.__runtime_defaults.cpu_transcoder_mask = BIT(TRANSCODER_A)
static const struct platform_desc i830_desc = {
- PLATFORM(I830),
+ PLATFORM(i830),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
I830_DISPLAY,
@@ -241,7 +262,7 @@ static const struct platform_desc i830_desc = {
};
static const struct platform_desc i845_desc = {
- PLATFORM(I845G),
+ PLATFORM(i845g),
.info = &(const struct intel_display_device_info) {
I845_DISPLAY,
@@ -250,7 +271,8 @@ static const struct platform_desc i845_desc = {
};
static const struct platform_desc i85x_desc = {
- PLATFORM(I85X),
+ PLATFORM(i85x),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
I830_DISPLAY,
@@ -260,7 +282,7 @@ static const struct platform_desc i85x_desc = {
};
static const struct platform_desc i865g_desc = {
- PLATFORM(I865G),
+ PLATFORM(i865g),
.info = &(const struct intel_display_device_info) {
I845_DISPLAY,
@@ -282,7 +304,7 @@ static const struct platform_desc i865g_desc = {
.__runtime_defaults.port_mask = BIT(PORT_B) | BIT(PORT_C) /* SDVO B/C */
static const struct platform_desc i915g_desc = {
- PLATFORM(I915G),
+ PLATFORM(i915g),
.info = &(const struct intel_display_device_info) {
GEN3_DISPLAY,
I845_COLORS,
@@ -292,7 +314,8 @@ static const struct platform_desc i915g_desc = {
};
static const struct platform_desc i915gm_desc = {
- PLATFORM(I915GM),
+ PLATFORM(i915gm),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
GEN3_DISPLAY,
I9XX_COLORS,
@@ -305,7 +328,7 @@ static const struct platform_desc i915gm_desc = {
};
static const struct platform_desc i945g_desc = {
- PLATFORM(I945G),
+ PLATFORM(i945g),
.info = &(const struct intel_display_device_info) {
GEN3_DISPLAY,
I845_COLORS,
@@ -316,7 +339,8 @@ static const struct platform_desc i945g_desc = {
};
static const struct platform_desc i945gm_desc = {
- PLATFORM(I915GM),
+ PLATFORM(i915gm),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
GEN3_DISPLAY,
I9XX_COLORS,
@@ -330,7 +354,7 @@ static const struct platform_desc i945gm_desc = {
};
static const struct platform_desc g33_desc = {
- PLATFORM(G33),
+ PLATFORM(g33),
.info = &(const struct intel_display_device_info) {
GEN3_DISPLAY,
I845_COLORS,
@@ -338,13 +362,21 @@ static const struct platform_desc g33_desc = {
},
};
-static const struct platform_desc pnv_desc = {
- PLATFORM(PINEVIEW),
- .info = &(const struct intel_display_device_info) {
- GEN3_DISPLAY,
- I9XX_COLORS,
- .has_hotplug = 1,
- },
+static const struct intel_display_device_info pnv_display = {
+ GEN3_DISPLAY,
+ I9XX_COLORS,
+ .has_hotplug = 1,
+};
+
+static const struct platform_desc pnv_g_desc = {
+ PLATFORM(pineview),
+ .info = &pnv_display,
+};
+
+static const struct platform_desc pnv_m_desc = {
+ PLATFORM(pineview),
+ PLATFORM_GROUP(mobile),
+ .info = &pnv_display,
};
#define GEN4_DISPLAY \
@@ -360,7 +392,7 @@ static const struct platform_desc pnv_desc = {
BIT(TRANSCODER_A) | BIT(TRANSCODER_B)
static const struct platform_desc i965g_desc = {
- PLATFORM(I965G),
+ PLATFORM(i965g),
.info = &(const struct intel_display_device_info) {
GEN4_DISPLAY,
.has_overlay = 1,
@@ -370,7 +402,8 @@ static const struct platform_desc i965g_desc = {
};
static const struct platform_desc i965gm_desc = {
- PLATFORM(I965GM),
+ PLATFORM(i965gm),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
GEN4_DISPLAY,
.has_overlay = 1,
@@ -382,7 +415,8 @@ static const struct platform_desc i965gm_desc = {
};
static const struct platform_desc g45_desc = {
- PLATFORM(G45),
+ PLATFORM(g45),
+ PLATFORM_GROUP(g4x),
.info = &(const struct intel_display_device_info) {
GEN4_DISPLAY,
@@ -391,7 +425,9 @@ static const struct platform_desc g45_desc = {
};
static const struct platform_desc gm45_desc = {
- PLATFORM(GM45),
+ PLATFORM(gm45),
+ PLATFORM_GROUP(g4x),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
GEN4_DISPLAY,
.supports_tv = 1,
@@ -414,14 +450,15 @@ static const struct platform_desc gm45_desc = {
.__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D) /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
static const struct platform_desc ilk_d_desc = {
- PLATFORM(IRONLAKE),
+ PLATFORM(ironlake),
.info = &(const struct intel_display_device_info) {
ILK_DISPLAY,
},
};
static const struct platform_desc ilk_m_desc = {
- PLATFORM(IRONLAKE),
+ PLATFORM(ironlake),
+ PLATFORM_GROUP(mobile),
.info = &(const struct intel_display_device_info) {
ILK_DISPLAY,
@@ -429,42 +466,58 @@ static const struct platform_desc ilk_m_desc = {
},
};
-static const struct platform_desc snb_desc = {
- PLATFORM(SANDYBRIDGE),
- .info = &(const struct intel_display_device_info) {
- .has_hotplug = 1,
- I9XX_PIPE_OFFSETS,
- I9XX_CURSOR_OFFSETS,
- ILK_COLORS,
+static const struct intel_display_device_info snb_display = {
+ .has_hotplug = 1,
+ I9XX_PIPE_OFFSETS,
+ I9XX_CURSOR_OFFSETS,
+ ILK_COLORS,
- .__runtime_defaults.ip.ver = 6,
- .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
- .__runtime_defaults.cpu_transcoder_mask =
- BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
- .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
- .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
- },
+ .__runtime_defaults.ip.ver = 6,
+ .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
+ .__runtime_defaults.cpu_transcoder_mask =
+ BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
+ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
};
-static const struct platform_desc ivb_desc = {
- PLATFORM(IVYBRIDGE),
- .info = &(const struct intel_display_device_info) {
- .has_hotplug = 1,
- IVB_PIPE_OFFSETS,
- IVB_CURSOR_OFFSETS,
- IVB_COLORS,
+static const struct platform_desc snb_d_desc = {
+ PLATFORM(sandybridge),
+ .info = &snb_display,
+};
- .__runtime_defaults.ip.ver = 7,
- .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .__runtime_defaults.cpu_transcoder_mask =
- BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
- .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
- .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
- },
+static const struct platform_desc snb_m_desc = {
+ PLATFORM(sandybridge),
+ PLATFORM_GROUP(mobile),
+ .info = &snb_display,
+};
+
+static const struct intel_display_device_info ivb_display = {
+ .has_hotplug = 1,
+ IVB_PIPE_OFFSETS,
+ IVB_CURSOR_OFFSETS,
+ IVB_COLORS,
+
+ .__runtime_defaults.ip.ver = 7,
+ .__runtime_defaults.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .__runtime_defaults.cpu_transcoder_mask =
+ BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
+ .__runtime_defaults.port_mask = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) | BIT(PORT_D), /* DP A, SDVO/HDMI/DP B, HDMI/DP C/D */
+ .__runtime_defaults.fbc_mask = BIT(INTEL_FBC_A),
+};
+
+static const struct platform_desc ivb_d_desc = {
+ PLATFORM(ivybridge),
+ .info = &ivb_display,
+};
+
+static const struct platform_desc ivb_m_desc = {
+ PLATFORM(ivybridge),
+ PLATFORM_GROUP(mobile),
+ .info = &ivb_display,
};
static const struct platform_desc vlv_desc = {
- PLATFORM(VALLEYVIEW),
+ PLATFORM(valleyview),
.info = &(const struct intel_display_device_info) {
.has_gmch = 1,
.has_hotplug = 1,
@@ -495,10 +548,19 @@ static const u16 hsw_ulx_ids[] = {
};
static const struct platform_desc hsw_desc = {
- PLATFORM(HASWELL),
+ PLATFORM(haswell),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_HASWELL_ULT, "ULT", hsw_ult_ids },
- { INTEL_DISPLAY_HASWELL_ULX, "ULX", hsw_ulx_ids },
+ /* Special case: Use ult both as group and subplatform. */
+ {
+ SUBPLATFORM(haswell, ult),
+ SUBPLATFORM_GROUP(haswell, ult),
+ .pciidlist = hsw_ult_ids,
+ },
+ {
+ SUBPLATFORM(haswell, ulx),
+ SUBPLATFORM_GROUP(haswell, ult),
+ .pciidlist = hsw_ulx_ids,
+ },
{},
},
.info = &(const struct intel_display_device_info) {
@@ -539,10 +601,19 @@ static const u16 bdw_ulx_ids[] = {
};
static const struct platform_desc bdw_desc = {
- PLATFORM(BROADWELL),
+ PLATFORM(broadwell),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_BROADWELL_ULT, "ULT", bdw_ult_ids },
- { INTEL_DISPLAY_BROADWELL_ULX, "ULX", bdw_ulx_ids },
+ /* Special case: Use ult both as group and subplatform. */
+ {
+ SUBPLATFORM(broadwell, ult),
+ SUBPLATFORM_GROUP(broadwell, ult),
+ .pciidlist = bdw_ult_ids,
+ },
+ {
+ SUBPLATFORM(broadwell, ulx),
+ SUBPLATFORM_GROUP(broadwell, ult),
+ .pciidlist = bdw_ulx_ids,
+ },
{},
},
.info = &(const struct intel_display_device_info) {
@@ -567,7 +638,7 @@ static const struct platform_desc bdw_desc = {
};
static const struct platform_desc chv_desc = {
- PLATFORM(CHERRYVIEW),
+ PLATFORM(cherryview),
.info = &(const struct intel_display_device_info) {
.has_hotplug = 1,
.has_gmch = 1,
@@ -630,10 +701,16 @@ static const enum intel_step skl_steppings[] = {
};
static const struct platform_desc skl_desc = {
- PLATFORM(SKYLAKE),
+ PLATFORM(skylake),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_SKYLAKE_ULT, "ULT", skl_ult_ids },
- { INTEL_DISPLAY_SKYLAKE_ULX, "ULX", skl_ulx_ids },
+ {
+ SUBPLATFORM(skylake, ult),
+ .pciidlist = skl_ult_ids,
+ },
+ {
+ SUBPLATFORM(skylake, ulx),
+ .pciidlist = skl_ulx_ids,
+ },
{},
},
.info = &skl_display,
@@ -665,10 +742,16 @@ static const enum intel_step kbl_steppings[] = {
};
static const struct platform_desc kbl_desc = {
- PLATFORM(KABYLAKE),
+ PLATFORM(kabylake),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_KABYLAKE_ULT, "ULT", kbl_ult_ids },
- { INTEL_DISPLAY_KABYLAKE_ULX, "ULX", kbl_ulx_ids },
+ {
+ SUBPLATFORM(kabylake, ult),
+ .pciidlist = kbl_ult_ids,
+ },
+ {
+ SUBPLATFORM(kabylake, ulx),
+ .pciidlist = kbl_ulx_ids,
+ },
{},
},
.info = &skl_display,
@@ -690,10 +773,16 @@ static const u16 cfl_ulx_ids[] = {
};
static const struct platform_desc cfl_desc = {
- PLATFORM(COFFEELAKE),
+ PLATFORM(coffeelake),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_COFFEELAKE_ULT, "ULT", cfl_ult_ids },
- { INTEL_DISPLAY_COFFEELAKE_ULX, "ULX", cfl_ulx_ids },
+ {
+ SUBPLATFORM(coffeelake, ult),
+ .pciidlist = cfl_ult_ids,
+ },
+ {
+ SUBPLATFORM(coffeelake, ulx),
+ .pciidlist = cfl_ulx_ids,
+ },
{},
},
.info = &skl_display,
@@ -706,9 +795,12 @@ static const u16 cml_ult_ids[] = {
};
static const struct platform_desc cml_desc = {
- PLATFORM(COMETLAKE),
+ PLATFORM(cometlake),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_COMETLAKE_ULT, "ULT", cml_ult_ids },
+ {
+ SUBPLATFORM(cometlake, ult),
+ .pciidlist = cml_ult_ids,
+ },
{},
},
.info = &skl_display,
@@ -745,7 +837,7 @@ static const enum intel_step bxt_steppings[] = {
};
static const struct platform_desc bxt_desc = {
- PLATFORM(BROXTON),
+ PLATFORM(broxton),
.info = &(const struct intel_display_device_info) {
GEN9_LP_DISPLAY,
.dbuf.size = 512 - 4, /* 4 blocks for bypass path allocation */
@@ -760,7 +852,7 @@ static const enum intel_step glk_steppings[] = {
};
static const struct platform_desc glk_desc = {
- PLATFORM(GEMINILAKE),
+ PLATFORM(geminilake),
.info = &(const struct intel_display_device_info) {
GEN9_LP_DISPLAY,
.dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */
@@ -822,9 +914,12 @@ static const enum intel_step icl_steppings[] = {
};
static const struct platform_desc icl_desc = {
- PLATFORM(ICELAKE),
+ PLATFORM(icelake),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_ICELAKE_PORT_F, "Port F", icl_port_f_ids },
+ {
+ SUBPLATFORM(icelake, port_f),
+ .pciidlist = icl_port_f_ids,
+ },
{},
},
.info = &(const struct intel_display_device_info) {
@@ -847,13 +942,13 @@ static const enum intel_step jsl_ehl_steppings[] = {
};
static const struct platform_desc jsl_desc = {
- PLATFORM(JASPERLAKE),
+ PLATFORM(jasperlake),
.info = &jsl_ehl_display,
STEP_INFO(jsl_ehl_steppings),
};
static const struct platform_desc ehl_desc = {
- PLATFORM(ELKHARTLAKE),
+ PLATFORM(elkhartlake),
.info = &jsl_ehl_display,
STEP_INFO(jsl_ehl_steppings),
};
@@ -919,10 +1014,13 @@ static const enum intel_step tgl_uy_steppings[] = {
};
static const struct platform_desc tgl_desc = {
- PLATFORM(TIGERLAKE),
+ PLATFORM(tigerlake),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_TIGERLAKE_UY, "UY", tgl_uy_ids,
- STEP_INFO(tgl_uy_steppings) },
+ {
+ SUBPLATFORM(tigerlake, uy),
+ .pciidlist = tgl_uy_ids,
+ STEP_INFO(tgl_uy_steppings),
+ },
{},
},
.info = &(const struct intel_display_device_info) {
@@ -944,7 +1042,8 @@ static const enum intel_step dg1_steppings[] = {
};
static const struct platform_desc dg1_desc = {
- PLATFORM(DG1),
+ PLATFORM(dg1),
+ PLATFORM_GROUP(dgfx),
.info = &(const struct intel_display_device_info) {
XE_D_DISPLAY,
@@ -961,7 +1060,7 @@ static const enum intel_step rkl_steppings[] = {
};
static const struct platform_desc rkl_desc = {
- PLATFORM(ROCKETLAKE),
+ PLATFORM(rocketlake),
.info = &(const struct intel_display_device_info) {
XE_D_DISPLAY,
.abox_mask = BIT(0),
@@ -996,10 +1095,13 @@ static const enum intel_step adl_s_rpl_s_steppings[] = {
};
static const struct platform_desc adl_s_desc = {
- PLATFORM(ALDERLAKE_S),
+ PLATFORM(alderlake_s),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_ALDERLAKE_S_RAPTORLAKE_S, "RPL-S", adls_rpls_ids,
- STEP_INFO(adl_s_rpl_s_steppings) },
+ {
+ SUBPLATFORM(alderlake_s, raptorlake_s),
+ .pciidlist = adls_rpls_ids,
+ STEP_INFO(adl_s_rpl_s_steppings),
+ },
{},
},
.info = &(const struct intel_display_device_info) {
@@ -1100,14 +1202,23 @@ static const enum intel_step adl_p_rpl_pu_steppings[] = {
};
static const struct platform_desc adl_p_desc = {
- PLATFORM(ALDERLAKE_P),
+ PLATFORM(alderlake_p),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_ALDERLAKE_P_ALDERLAKE_N, "ADL-N", adlp_adln_ids,
- STEP_INFO(adl_p_adl_n_steppings) },
- { INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_P, "RPL-P", adlp_rplp_ids,
- STEP_INFO(adl_p_rpl_pu_steppings) },
- { INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_U, "RPL-U", adlp_rplu_ids,
- STEP_INFO(adl_p_rpl_pu_steppings) },
+ {
+ SUBPLATFORM(alderlake_p, alderlake_n),
+ .pciidlist = adlp_adln_ids,
+ STEP_INFO(adl_p_adl_n_steppings),
+ },
+ {
+ SUBPLATFORM(alderlake_p, raptorlake_p),
+ .pciidlist = adlp_rplp_ids,
+ STEP_INFO(adl_p_rpl_pu_steppings),
+ },
+ {
+ SUBPLATFORM(alderlake_p, raptorlake_u),
+ .pciidlist = adlp_rplu_ids,
+ STEP_INFO(adl_p_rpl_pu_steppings),
+ },
{},
},
.info = &xe_lpd_display,
@@ -1159,14 +1270,24 @@ static const enum intel_step dg2_g12_steppings[] = {
};
static const struct platform_desc dg2_desc = {
- PLATFORM(DG2),
+ PLATFORM(dg2),
+ PLATFORM_GROUP(dgfx),
.subplatforms = (const struct subplatform_desc[]) {
- { INTEL_DISPLAY_DG2_G10, "G10", dg2_g10_ids,
- STEP_INFO(dg2_g10_steppings) },
- { INTEL_DISPLAY_DG2_G11, "G11", dg2_g11_ids,
- STEP_INFO(dg2_g11_steppings) },
- { INTEL_DISPLAY_DG2_G12, "G12", dg2_g12_ids,
- STEP_INFO(dg2_g12_steppings) },
+ {
+ SUBPLATFORM(dg2, g10),
+ .pciidlist = dg2_g10_ids,
+ STEP_INFO(dg2_g10_steppings),
+ },
+ {
+ SUBPLATFORM(dg2, g11),
+ .pciidlist = dg2_g11_ids,
+ STEP_INFO(dg2_g11_steppings),
+ },
+ {
+ SUBPLATFORM(dg2, g12),
+ .pciidlist = dg2_g12_ids,
+ STEP_INFO(dg2_g12_steppings),
+ },
{},
},
.info = &xe_hpd_display,
@@ -1227,6 +1348,7 @@ static const struct intel_display_device_info xe2_lpd_display = {
.__runtime_defaults.fbc_mask =
BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B) |
BIT(INTEL_FBC_C) | BIT(INTEL_FBC_D),
+ .__runtime_defaults.has_dbuf_overlap_detection = true,
};
static const struct intel_display_device_info xe2_hpd_display = {
@@ -1241,15 +1363,20 @@ static const struct intel_display_device_info xe2_hpd_display = {
* reported by the hardware.
*/
static const struct platform_desc mtl_desc = {
- PLATFORM(METEORLAKE),
+ PLATFORM(meteorlake),
};
static const struct platform_desc lnl_desc = {
- PLATFORM(LUNARLAKE),
+ PLATFORM(lunarlake),
};
static const struct platform_desc bmg_desc = {
- PLATFORM(BATTLEMAGE),
+ PLATFORM(battlemage),
+ PLATFORM_GROUP(dgfx),
+};
+
+static const struct platform_desc ptl_desc = {
+ PLATFORM(pantherlake),
};
__diag_pop();
@@ -1289,11 +1416,14 @@ static const struct {
INTEL_I965GM_IDS(INTEL_DISPLAY_DEVICE, &i965gm_desc),
INTEL_GM45_IDS(INTEL_DISPLAY_DEVICE, &gm45_desc),
INTEL_G45_IDS(INTEL_DISPLAY_DEVICE, &g45_desc),
- INTEL_PNV_IDS(INTEL_DISPLAY_DEVICE, &pnv_desc),
+ INTEL_PNV_G_IDS(INTEL_DISPLAY_DEVICE, &pnv_g_desc),
+ INTEL_PNV_M_IDS(INTEL_DISPLAY_DEVICE, &pnv_m_desc),
INTEL_ILK_D_IDS(INTEL_DISPLAY_DEVICE, &ilk_d_desc),
INTEL_ILK_M_IDS(INTEL_DISPLAY_DEVICE, &ilk_m_desc),
- INTEL_SNB_IDS(INTEL_DISPLAY_DEVICE, &snb_desc),
- INTEL_IVB_IDS(INTEL_DISPLAY_DEVICE, &ivb_desc),
+ INTEL_SNB_D_IDS(INTEL_DISPLAY_DEVICE, &snb_d_desc),
+ INTEL_SNB_M_IDS(INTEL_DISPLAY_DEVICE, &snb_m_desc),
+ INTEL_IVB_D_IDS(INTEL_DISPLAY_DEVICE, &ivb_d_desc),
+ INTEL_IVB_M_IDS(INTEL_DISPLAY_DEVICE, &ivb_m_desc),
INTEL_HSW_IDS(INTEL_DISPLAY_DEVICE, &hsw_desc),
INTEL_VLV_IDS(INTEL_DISPLAY_DEVICE, &vlv_desc),
INTEL_BDW_IDS(INTEL_DISPLAY_DEVICE, &bdw_desc),
@@ -1318,9 +1448,11 @@ static const struct {
INTEL_RPLU_IDS(INTEL_DISPLAY_DEVICE, &adl_p_desc),
INTEL_RPLP_IDS(INTEL_DISPLAY_DEVICE, &adl_p_desc),
INTEL_DG2_IDS(INTEL_DISPLAY_DEVICE, &dg2_desc),
+ INTEL_ARL_IDS(INTEL_DISPLAY_DEVICE, &mtl_desc),
INTEL_MTL_IDS(INTEL_DISPLAY_DEVICE, &mtl_desc),
INTEL_LNL_IDS(INTEL_DISPLAY_DEVICE, &lnl_desc),
INTEL_BMG_IDS(INTEL_DISPLAY_DEVICE, &bmg_desc),
+ INTEL_PTL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
};
static const struct {
@@ -1331,12 +1463,13 @@ static const struct {
{ 14, 0, &xe_lpdp_display },
{ 14, 1, &xe2_hpd_display },
{ 20, 0, &xe2_lpd_display },
+ { 30, 0, &xe2_lpd_display },
};
static const struct intel_display_device_info *
-probe_gmdid_display(struct drm_i915_private *i915, struct intel_display_ip_ver *ip_ver)
+probe_gmdid_display(struct intel_display *display, struct intel_display_ip_ver *ip_ver)
{
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
struct intel_display_ip_ver gmd_id;
void __iomem *addr;
u32 val;
@@ -1344,7 +1477,8 @@ probe_gmdid_display(struct drm_i915_private *i915, struct intel_display_ip_ver *
addr = pci_iomap_range(pdev, 0, i915_mmio_reg_offset(GMD_ID_DISPLAY), sizeof(u32));
if (!addr) {
- drm_err(&i915->drm, "Cannot map MMIO BAR to read display GMD_ID\n");
+ drm_err(display->drm,
+ "Cannot map MMIO BAR to read display GMD_ID\n");
return NULL;
}
@@ -1352,7 +1486,7 @@ probe_gmdid_display(struct drm_i915_private *i915, struct intel_display_ip_ver *
pci_iounmap(pdev, addr);
if (val == 0) {
- drm_dbg_kms(&i915->drm, "Device doesn't have display\n");
+ drm_dbg_kms(display->drm, "Device doesn't have display\n");
return NULL;
}
@@ -1368,7 +1502,8 @@ probe_gmdid_display(struct drm_i915_private *i915, struct intel_display_ip_ver *
}
}
- drm_err(&i915->drm, "Unrecognized display IP version %d.%02d; disabling display.\n",
+ drm_err(display->drm,
+ "Unrecognized display IP version %d.%02d; disabling display.\n",
gmd_id.ver, gmd_id.rel);
return NULL;
}
@@ -1391,7 +1526,7 @@ find_subplatform_desc(struct pci_dev *pdev, const struct platform_desc *desc)
const struct subplatform_desc *sp;
const u16 *id;
- for (sp = desc->subplatforms; sp && sp->subplatform; sp++)
+ for (sp = desc->subplatforms; sp && sp->pciidlist; sp++)
for (id = sp->pciidlist; *id; id++)
if (*id == pdev->device)
return sp;
@@ -1450,10 +1585,28 @@ static enum intel_step get_pre_gmdid_step(struct intel_display *display,
return step;
}
-void intel_display_device_probe(struct drm_i915_private *i915)
+/* Size of the entire bitmap, not the number of platforms */
+static unsigned int display_platforms_num_bits(void)
{
- struct intel_display *display = &i915->display;
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ return sizeof(((struct intel_display_platforms *)0)->bitmap) * BITS_PER_BYTE;
+}
+
+/* Number of platform bits set */
+static unsigned int display_platforms_weight(const struct intel_display_platforms *p)
+{
+ return bitmap_weight(p->bitmap, display_platforms_num_bits());
+}
+
+/* Merge the subplatform information from src to dst */
+static void display_platforms_or(struct intel_display_platforms *dst,
+ const struct intel_display_platforms *src)
+{
+ bitmap_or(dst->bitmap, dst->bitmap, src->bitmap, display_platforms_num_bits());
+}
+
+struct intel_display *intel_display_device_probe(struct pci_dev *pdev)
+{
+ struct intel_display *display = to_intel_display(pdev);
const struct intel_display_device_info *info;
struct intel_display_ip_ver ip_ver = {};
const struct platform_desc *desc;
@@ -1461,45 +1614,56 @@ void intel_display_device_probe(struct drm_i915_private *i915)
enum intel_step step;
/* Add drm device backpointer as early as possible. */
- i915->display.drm = &i915->drm;
+ display->drm = pci_get_drvdata(pdev);
- intel_display_params_copy(&i915->display.params);
+ intel_display_params_copy(&display->params);
if (has_no_display(pdev)) {
- drm_dbg_kms(&i915->drm, "Device doesn't have display\n");
+ drm_dbg_kms(display->drm, "Device doesn't have display\n");
goto no_display;
}
desc = find_platform_desc(pdev);
if (!desc) {
- drm_dbg_kms(&i915->drm, "Unknown device ID %04x; disabling display.\n",
+ drm_dbg_kms(display->drm,
+ "Unknown device ID %04x; disabling display.\n",
pdev->device);
goto no_display;
}
info = desc->info;
if (!info)
- info = probe_gmdid_display(i915, &ip_ver);
+ info = probe_gmdid_display(display, &ip_ver);
if (!info)
goto no_display;
- DISPLAY_INFO(i915) = info;
+ DISPLAY_INFO(display) = info;
- memcpy(DISPLAY_RUNTIME_INFO(i915),
- &DISPLAY_INFO(i915)->__runtime_defaults,
- sizeof(*DISPLAY_RUNTIME_INFO(i915)));
+ memcpy(DISPLAY_RUNTIME_INFO(display),
+ &DISPLAY_INFO(display)->__runtime_defaults,
+ sizeof(*DISPLAY_RUNTIME_INFO(display)));
- drm_WARN_ON(&i915->drm, !desc->platform || !desc->name);
- DISPLAY_RUNTIME_INFO(i915)->platform = desc->platform;
+ drm_WARN_ON(display->drm, !desc->name ||
+ !display_platforms_weight(&desc->platforms));
+
+ display->platform = desc->platforms;
subdesc = find_subplatform_desc(pdev, desc);
if (subdesc) {
- drm_WARN_ON(&i915->drm, !subdesc->subplatform || !subdesc->name);
- DISPLAY_RUNTIME_INFO(i915)->subplatform = subdesc->subplatform;
+ drm_WARN_ON(display->drm, !subdesc->name ||
+ !display_platforms_weight(&subdesc->platforms));
+
+ display_platforms_or(&display->platform, &subdesc->platforms);
+
+ /* Ensure platform and subplatform are distinct */
+ drm_WARN_ON(display->drm,
+ display_platforms_weight(&display->platform) !=
+ display_platforms_weight(&desc->platforms) +
+ display_platforms_weight(&subdesc->platforms));
}
if (ip_ver.ver || ip_ver.rel || ip_ver.step) {
- DISPLAY_RUNTIME_INFO(i915)->ip = ip_ver;
+ DISPLAY_RUNTIME_INFO(display)->ip = ip_ver;
step = STEP_A0 + ip_ver.step;
if (step > STEP_FUTURE) {
drm_dbg_kms(display->drm, "Using future display stepping\n");
@@ -1510,28 +1674,32 @@ void intel_display_device_probe(struct drm_i915_private *i915)
subdesc ? &subdesc->step_info : NULL);
}
- DISPLAY_RUNTIME_INFO(i915)->step = step;
+ DISPLAY_RUNTIME_INFO(display)->step = step;
- drm_info(&i915->drm, "Found %s%s%s (device ID %04x) display version %u.%02u stepping %s\n",
+ drm_info(display->drm, "Found %s%s%s (device ID %04x) %s display version %u.%02u stepping %s\n",
desc->name, subdesc ? "/" : "", subdesc ? subdesc->name : "",
- pdev->device, DISPLAY_RUNTIME_INFO(i915)->ip.ver,
- DISPLAY_RUNTIME_INFO(i915)->ip.rel,
+ pdev->device, display->platform.dgfx ? "discrete" : "integrated",
+ DISPLAY_RUNTIME_INFO(display)->ip.ver,
+ DISPLAY_RUNTIME_INFO(display)->ip.rel,
step != STEP_NONE ? intel_step_name(step) : "N/A");
- return;
+ return display;
no_display:
- DISPLAY_INFO(i915) = &no_display;
+ DISPLAY_INFO(display) = &no_display;
+
+ return display;
}
-void intel_display_device_remove(struct drm_i915_private *i915)
+void intel_display_device_remove(struct intel_display *display)
{
- intel_display_params_free(&i915->display.params);
+ intel_display_params_free(&display->params);
}
-static void __intel_display_device_info_runtime_init(struct drm_i915_private *i915)
+static void __intel_display_device_info_runtime_init(struct intel_display *display)
{
- struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(i915);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_display_runtime_info *display_runtime = DISPLAY_RUNTIME_INFO(display);
enum pipe pipe;
BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->pipe_mask) < I915_MAX_PIPES);
@@ -1539,35 +1707,35 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
BUILD_BUG_ON(BITS_PER_TYPE(display_runtime->port_mask) < I915_MAX_PORTS);
/* This covers both ULT and ULX */
- if (IS_HASWELL_ULT(i915) || IS_BROADWELL_ULT(i915))
+ if (display->platform.haswell_ult || display->platform.broadwell_ult)
display_runtime->port_mask &= ~BIT(PORT_D);
- if (IS_ICL_WITH_PORT_F(i915))
+ if (display->platform.icelake_port_f)
display_runtime->port_mask |= BIT(PORT_F);
/* Wa_14011765242: adl-s A0,A1 */
- if (IS_ALDERLAKE_S(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_A2))
- for_each_pipe(i915, pipe)
+ if (display->platform.alderlake_s && IS_DISPLAY_STEP(display, STEP_A0, STEP_A2))
+ for_each_pipe(display, pipe)
display_runtime->num_scalers[pipe] = 0;
- else if (DISPLAY_VER(i915) >= 11) {
- for_each_pipe(i915, pipe)
+ else if (DISPLAY_VER(display) >= 11) {
+ for_each_pipe(display, pipe)
display_runtime->num_scalers[pipe] = 2;
- } else if (DISPLAY_VER(i915) >= 9) {
+ } else if (DISPLAY_VER(display) >= 9) {
display_runtime->num_scalers[PIPE_A] = 2;
display_runtime->num_scalers[PIPE_B] = 2;
display_runtime->num_scalers[PIPE_C] = 1;
}
- if (DISPLAY_VER(i915) >= 13 || HAS_D12_PLANE_MINIMIZATION(i915))
- for_each_pipe(i915, pipe)
+ if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display))
+ for_each_pipe(display, pipe)
display_runtime->num_sprites[pipe] = 4;
- else if (DISPLAY_VER(i915) >= 11)
- for_each_pipe(i915, pipe)
+ else if (DISPLAY_VER(display) >= 11)
+ for_each_pipe(display, pipe)
display_runtime->num_sprites[pipe] = 6;
- else if (DISPLAY_VER(i915) == 10)
- for_each_pipe(i915, pipe)
+ else if (DISPLAY_VER(display) == 10)
+ for_each_pipe(display, pipe)
display_runtime->num_sprites[pipe] = 3;
- else if (IS_BROXTON(i915)) {
+ else if (display->platform.broxton) {
/*
* Skylake and Broxton currently don't expose the topmost plane as its
* use is exclusive with the legacy cursor and we only want to expose
@@ -1580,23 +1748,23 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
display_runtime->num_sprites[PIPE_A] = 2;
display_runtime->num_sprites[PIPE_B] = 2;
display_runtime->num_sprites[PIPE_C] = 1;
- } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
- for_each_pipe(i915, pipe)
+ } else if (display->platform.valleyview || display->platform.cherryview) {
+ for_each_pipe(display, pipe)
display_runtime->num_sprites[pipe] = 2;
- } else if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915)) {
- for_each_pipe(i915, pipe)
+ } else if (DISPLAY_VER(display) >= 5 || display->platform.g4x) {
+ for_each_pipe(display, pipe)
display_runtime->num_sprites[pipe] = 1;
}
- if ((IS_DGFX(i915) || DISPLAY_VER(i915) >= 14) &&
- !(intel_de_read(i915, GU_CNTL_PROTECTED) & DEPRESENT)) {
- drm_info(&i915->drm, "Display not present, disabling\n");
+ if ((display->platform.dgfx || DISPLAY_VER(display) >= 14) &&
+ !(intel_de_read(display, GU_CNTL_PROTECTED) & DEPRESENT)) {
+ drm_info(display->drm, "Display not present, disabling\n");
goto display_fused_off;
}
- if (IS_DISPLAY_VER(i915, 7, 8) && HAS_PCH_SPLIT(i915)) {
- u32 fuse_strap = intel_de_read(i915, FUSE_STRAP);
- u32 sfuse_strap = intel_de_read(i915, SFUSE_STRAP);
+ if (IS_DISPLAY_VER(display, 7, 8) && HAS_PCH_SPLIT(i915)) {
+ u32 fuse_strap = intel_de_read(display, FUSE_STRAP);
+ u32 sfuse_strap = intel_de_read(display, SFUSE_STRAP);
/*
* SFUSE_STRAP is supposed to have a bit signalling the display
@@ -1611,16 +1779,16 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
(HAS_PCH_CPT(i915) &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
- drm_info(&i915->drm,
+ drm_info(display->drm,
"Display fused off, disabling\n");
goto display_fused_off;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
- drm_info(&i915->drm, "PipeC fused off\n");
+ drm_info(display->drm, "PipeC fused off\n");
display_runtime->pipe_mask &= ~BIT(PIPE_C);
display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
- } else if (DISPLAY_VER(i915) >= 9) {
- u32 dfsm = intel_de_read(i915, SKL_DFSM);
+ } else if (DISPLAY_VER(display) >= 9) {
+ u32 dfsm = intel_de_read(display, SKL_DFSM);
if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
display_runtime->pipe_mask &= ~BIT(PIPE_A);
@@ -1638,7 +1806,7 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
display_runtime->fbc_mask &= ~BIT(INTEL_FBC_C);
}
- if (DISPLAY_VER(i915) >= 12 &&
+ if (DISPLAY_VER(display) >= 12 &&
(dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
display_runtime->pipe_mask &= ~BIT(PIPE_D);
display_runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
@@ -1651,19 +1819,25 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
display_runtime->has_hdcp = 0;
- if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
- display_runtime->fbc_mask = 0;
+ if (display->platform.dg2 || DISPLAY_VER(display) < 13) {
+ if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
+ display_runtime->fbc_mask = 0;
+ }
- if (DISPLAY_VER(i915) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
+ if (DISPLAY_VER(display) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
display_runtime->has_dmc = 0;
- if (IS_DISPLAY_VER(i915, 10, 12) &&
+ if (IS_DISPLAY_VER(display, 10, 12) &&
(dfsm & GLK_DFSM_DISPLAY_DSC_DISABLE))
display_runtime->has_dsc = 0;
+
+ if (DISPLAY_VER(display) >= 20 &&
+ (dfsm & XE2LPD_DFSM_DBUF_OVERLAP_DISABLE))
+ display_runtime->has_dbuf_overlap_detection = false;
}
- if (DISPLAY_VER(i915) >= 20) {
- u32 cap = intel_de_read(i915, XE2LPD_DE_CAP);
+ if (DISPLAY_VER(display) >= 20) {
+ u32 cap = intel_de_read(display, XE2LPD_DE_CAP);
if (REG_FIELD_GET(XE2LPD_DE_CAP_DSC_MASK, cap) ==
XE2LPD_DE_CAP_DSC_REMOVED)
@@ -1671,14 +1845,19 @@ static void __intel_display_device_info_runtime_init(struct drm_i915_private *i9
if (REG_FIELD_GET(XE2LPD_DE_CAP_SCALER_MASK, cap) ==
XE2LPD_DE_CAP_SCALER_SINGLE) {
- for_each_pipe(i915, pipe)
+ for_each_pipe(display, pipe)
if (display_runtime->num_scalers[pipe])
display_runtime->num_scalers[pipe] = 1;
}
}
- display_runtime->rawclk_freq = intel_read_rawclk(i915);
- drm_dbg_kms(&i915->drm, "rawclk rate: %d kHz\n", display_runtime->rawclk_freq);
+ if (DISPLAY_VER(display) >= 30)
+ display_runtime->edp_typec_support =
+ intel_de_read(display, PICA_PHY_CONFIG_CONTROL) & EDP_ON_TYPEC;
+
+ display_runtime->rawclk_freq = intel_read_rawclk(display);
+ drm_dbg_kms(display->drm, "rawclk rate: %d kHz\n",
+ display_runtime->rawclk_freq);
return;
@@ -1686,21 +1865,21 @@ display_fused_off:
memset(display_runtime, 0, sizeof(*display_runtime));
}
-void intel_display_device_info_runtime_init(struct drm_i915_private *i915)
+void intel_display_device_info_runtime_init(struct intel_display *display)
{
- if (HAS_DISPLAY(i915))
- __intel_display_device_info_runtime_init(i915);
+ if (HAS_DISPLAY(display))
+ __intel_display_device_info_runtime_init(display);
/* Display may have been disabled by runtime init */
- if (!HAS_DISPLAY(i915)) {
- i915->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
- i915->display.info.__device_info = &no_display;
+ if (!HAS_DISPLAY(display)) {
+ display->drm->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
+ display->info.__device_info = &no_display;
}
/* Disable nuclear pageflip by default on pre-g4x */
- if (!i915->display.params.nuclear_pageflip &&
- DISPLAY_VER(i915) < 5 && !IS_G4X(i915))
- i915->drm.driver_features &= ~DRIVER_ATOMIC;
+ if (!display->params.nuclear_pageflip &&
+ DISPLAY_VER(display) < 5 && !display->platform.g4x)
+ display->drm->driver_features &= ~DRIVER_ATOMIC;
}
void intel_display_device_info_print(const struct intel_display_device_info *info,
@@ -1737,10 +1916,8 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf
* Disabling display means taking over the display hardware, putting it to
* sleep, and preventing connectors from being connected via any means.
*/
-bool intel_display_device_enabled(struct drm_i915_private *i915)
+bool intel_display_device_enabled(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
/* Only valid when HAS_DISPLAY() is true */
drm_WARN_ON(display->drm, !HAS_DISPLAY(display));
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index dfb0c8bf5ca2..9a333d9e6601 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -6,97 +6,121 @@
#ifndef __INTEL_DISPLAY_DEVICE_H__
#define __INTEL_DISPLAY_DEVICE_H__
+#include <linux/bitops.h>
#include <linux/types.h>
#include "intel_display_conversion.h"
#include "intel_display_limits.h"
-struct drm_i915_private;
struct drm_printer;
+struct intel_display;
+struct pci_dev;
-/* Keep in gen based order, and chronological order within a gen */
-enum intel_display_platform {
- INTEL_DISPLAY_PLATFORM_UNINITIALIZED = 0,
- /* Display ver 2 */
- INTEL_DISPLAY_I830,
- INTEL_DISPLAY_I845G,
- INTEL_DISPLAY_I85X,
- INTEL_DISPLAY_I865G,
- /* Display ver 3 */
- INTEL_DISPLAY_I915G,
- INTEL_DISPLAY_I915GM,
- INTEL_DISPLAY_I945G,
- INTEL_DISPLAY_I945GM,
- INTEL_DISPLAY_G33,
- INTEL_DISPLAY_PINEVIEW,
- /* Display ver 4 */
- INTEL_DISPLAY_I965G,
- INTEL_DISPLAY_I965GM,
- INTEL_DISPLAY_G45,
- INTEL_DISPLAY_GM45,
- /* Display ver 5 */
- INTEL_DISPLAY_IRONLAKE,
- /* Display ver 6 */
- INTEL_DISPLAY_SANDYBRIDGE,
- /* Display ver 7 */
- INTEL_DISPLAY_IVYBRIDGE,
- INTEL_DISPLAY_VALLEYVIEW,
- INTEL_DISPLAY_HASWELL,
- /* Display ver 8 */
- INTEL_DISPLAY_BROADWELL,
- INTEL_DISPLAY_CHERRYVIEW,
- /* Display ver 9 */
- INTEL_DISPLAY_SKYLAKE,
- INTEL_DISPLAY_BROXTON,
- INTEL_DISPLAY_KABYLAKE,
- INTEL_DISPLAY_GEMINILAKE,
- INTEL_DISPLAY_COFFEELAKE,
- INTEL_DISPLAY_COMETLAKE,
- /* Display ver 11 */
- INTEL_DISPLAY_ICELAKE,
- INTEL_DISPLAY_JASPERLAKE,
- INTEL_DISPLAY_ELKHARTLAKE,
- /* Display ver 12 */
- INTEL_DISPLAY_TIGERLAKE,
- INTEL_DISPLAY_ROCKETLAKE,
- INTEL_DISPLAY_DG1,
- INTEL_DISPLAY_ALDERLAKE_S,
- /* Display ver 13 */
- INTEL_DISPLAY_ALDERLAKE_P,
- INTEL_DISPLAY_DG2,
- /* Display ver 14 (based on GMD ID) */
- INTEL_DISPLAY_METEORLAKE,
- /* Display ver 20 (based on GMD ID) */
- INTEL_DISPLAY_LUNARLAKE,
- /* Display ver 14.1 (based on GMD ID) */
- INTEL_DISPLAY_BATTLEMAGE,
-};
+/*
+ * Display platforms and subplatforms. Keep platforms in display version based
+ * order, chronological order within a version, and subplatforms next to the
+ * platform.
+ */
+#define INTEL_DISPLAY_PLATFORMS(func) \
+ /* Platform group aliases */ \
+ func(g4x) /* g45 and gm45 */ \
+ func(mobile) /* mobile platforms */ \
+ func(dgfx) /* discrete graphics */ \
+ /* Display ver 2 */ \
+ func(i830) \
+ func(i845g) \
+ func(i85x) \
+ func(i865g) \
+ /* Display ver 3 */ \
+ func(i915g) \
+ func(i915gm) \
+ func(i945g) \
+ func(i945gm) \
+ func(g33) \
+ func(pineview) \
+ /* Display ver 4 */ \
+ func(i965g) \
+ func(i965gm) \
+ func(g45) \
+ func(gm45) \
+ /* Display ver 5 */ \
+ func(ironlake) \
+ /* Display ver 6 */ \
+ func(sandybridge) \
+ /* Display ver 7 */ \
+ func(ivybridge) \
+ func(valleyview) \
+ func(haswell) \
+ func(haswell_ult) \
+ func(haswell_ulx) \
+ /* Display ver 8 */ \
+ func(broadwell) \
+ func(broadwell_ult) \
+ func(broadwell_ulx) \
+ func(cherryview) \
+ /* Display ver 9 */ \
+ func(skylake) \
+ func(skylake_ult) \
+ func(skylake_ulx) \
+ func(broxton) \
+ func(kabylake) \
+ func(kabylake_ult) \
+ func(kabylake_ulx) \
+ func(geminilake) \
+ func(coffeelake) \
+ func(coffeelake_ult) \
+ func(coffeelake_ulx) \
+ func(cometlake) \
+ func(cometlake_ult) \
+ func(cometlake_ulx) \
+ /* Display ver 11 */ \
+ func(icelake) \
+ func(icelake_port_f) \
+ func(jasperlake) \
+ func(elkhartlake) \
+ /* Display ver 12 */ \
+ func(tigerlake) \
+ func(tigerlake_uy) \
+ func(rocketlake) \
+ func(dg1) \
+ func(alderlake_s) \
+ func(alderlake_s_raptorlake_s) \
+ /* Display ver 13 */ \
+ func(alderlake_p) \
+ func(alderlake_p_alderlake_n) \
+ func(alderlake_p_raptorlake_p) \
+ func(alderlake_p_raptorlake_u) \
+ func(dg2) \
+ func(dg2_g10) \
+ func(dg2_g11) \
+ func(dg2_g12) \
+ /* Display ver 14 (based on GMD ID) */ \
+ func(meteorlake) \
+ /* Display ver 20 (based on GMD ID) */ \
+ func(lunarlake) \
+ /* Display ver 14.1 (based on GMD ID) */ \
+ func(battlemage) \
+ /* Display ver 30 (based on GMD ID) */ \
+ func(pantherlake)
+
+#define __MEMBER(name) unsigned long name:1;
+#define __COUNT(x) 1 +
-enum intel_display_subplatform {
- INTEL_DISPLAY_SUBPLATFORM_UNINITIALIZED = 0,
- INTEL_DISPLAY_HASWELL_ULT,
- INTEL_DISPLAY_HASWELL_ULX,
- INTEL_DISPLAY_BROADWELL_ULT,
- INTEL_DISPLAY_BROADWELL_ULX,
- INTEL_DISPLAY_SKYLAKE_ULT,
- INTEL_DISPLAY_SKYLAKE_ULX,
- INTEL_DISPLAY_KABYLAKE_ULT,
- INTEL_DISPLAY_KABYLAKE_ULX,
- INTEL_DISPLAY_COFFEELAKE_ULT,
- INTEL_DISPLAY_COFFEELAKE_ULX,
- INTEL_DISPLAY_COMETLAKE_ULT,
- INTEL_DISPLAY_COMETLAKE_ULX,
- INTEL_DISPLAY_ICELAKE_PORT_F,
- INTEL_DISPLAY_TIGERLAKE_UY,
- INTEL_DISPLAY_ALDERLAKE_S_RAPTORLAKE_S,
- INTEL_DISPLAY_ALDERLAKE_P_ALDERLAKE_N,
- INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_P,
- INTEL_DISPLAY_ALDERLAKE_P_RAPTORLAKE_U,
- INTEL_DISPLAY_DG2_G10,
- INTEL_DISPLAY_DG2_G11,
- INTEL_DISPLAY_DG2_G12,
+#define __NUM_PLATFORMS (INTEL_DISPLAY_PLATFORMS(__COUNT) 0)
+
+struct intel_display_platforms {
+ union {
+ struct {
+ INTEL_DISPLAY_PLATFORMS(__MEMBER);
+ };
+ DECLARE_BITMAP(bitmap, __NUM_PLATFORMS);
+ };
};
+#undef __MEMBER
+#undef __COUNT
+#undef __NUM_PLATFORMS
+
#define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \
/* Keep in alphabetical order */ \
func(cursor_needs_physical); \
@@ -116,55 +140,64 @@ enum intel_display_subplatform {
func(overlay_needs_physical); \
func(supports_tv);
-#define HAS_4TILE(i915) (IS_DG2(i915) || DISPLAY_VER(i915) >= 14)
-#define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5)
-#define HAS_CDCLK_CRAWL(i915) (DISPLAY_INFO(i915)->has_cdclk_crawl)
-#define HAS_CDCLK_SQUASH(i915) (DISPLAY_INFO(i915)->has_cdclk_squash)
-#define HAS_CUR_FBC(i915) (!HAS_GMCH(i915) && IS_DISPLAY_VER(i915, 7, 13))
-#define HAS_D12_PLANE_MINIMIZATION(i915) (IS_ROCKETLAKE(i915) || IS_ALDERLAKE_S(i915))
-#define HAS_DDI(i915) (DISPLAY_INFO(i915)->has_ddi)
-#define HAS_DISPLAY(i915) (DISPLAY_RUNTIME_INFO(i915)->pipe_mask != 0)
-#define HAS_DMC(i915) (DISPLAY_RUNTIME_INFO(i915)->has_dmc)
-#define HAS_DOUBLE_BUFFERED_M_N(i915) (DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915))
-#define HAS_DP_MST(i915) (DISPLAY_INFO(i915)->has_dp_mst)
-#define HAS_DP20(i915) (IS_DG2(i915) || DISPLAY_VER(i915) >= 14)
-#define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13)
-#define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb)
-#define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc)
-#define HAS_DSC_MST(__i915) (DISPLAY_VER(__i915) >= 12 && HAS_DSC(__i915))
-#define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0)
-#define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg)
-#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) >= 3)
-#define HAS_GMBUS_IRQ(i915) (DISPLAY_VER(i915) >= 4)
-#define HAS_GMBUS_BURST_READ(i915) (DISPLAY_VER(i915) >= 10 || IS_KABYLAKE(i915))
-#define HAS_GMCH(i915) (DISPLAY_INFO(i915)->has_gmch)
-#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
-#define HAS_IPC(i915) (DISPLAY_INFO(i915)->has_ipc)
-#define HAS_IPS(i915) (IS_HASWELL_ULT(i915) || IS_BROADWELL(i915))
-#define HAS_LRR(i915) (DISPLAY_VER(i915) >= 12)
-#define HAS_LSPCON(i915) (IS_DISPLAY_VER(i915, 9, 10))
-#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
-#define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12)
-#define HAS_OVERLAY(i915) (DISPLAY_INFO(i915)->has_overlay)
-#define HAS_PSR(i915) (DISPLAY_INFO(i915)->has_psr)
-#define HAS_PSR_HW_TRACKING(i915) (DISPLAY_INFO(i915)->has_psr_hw_tracking)
-#define HAS_PSR2_SEL_FETCH(i915) (DISPLAY_VER(i915) >= 12)
-#define HAS_SAGV(i915) (DISPLAY_VER(i915) >= 9 && !IS_LP(i915))
-#define HAS_TRANSCODER(i915, trans) ((DISPLAY_RUNTIME_INFO(i915)->cpu_transcoder_mask & \
- BIT(trans)) != 0)
-#define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11)
-#define HAS_AS_SDP(i915) (DISPLAY_VER(i915) >= 13)
-#define HAS_CMRR(i915) (DISPLAY_VER(i915) >= 20)
-#define INTEL_NUM_PIPES(i915) (hweight8(DISPLAY_RUNTIME_INFO(i915)->pipe_mask))
-#define I915_HAS_HOTPLUG(i915) (DISPLAY_INFO(i915)->has_hotplug)
-#define OVERLAY_NEEDS_PHYSICAL(i915) (DISPLAY_INFO(i915)->overlay_needs_physical)
-#define SUPPORTS_TV(i915) (DISPLAY_INFO(i915)->supports_tv)
+#define HAS_4TILE(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
+#define HAS_ASYNC_FLIPS(__display) (DISPLAY_VER(__display) >= 5)
+#define HAS_BIGJOINER(__display) (DISPLAY_VER(__display) >= 11 && HAS_DSC(__display))
+#define HAS_CDCLK_CRAWL(__display) (DISPLAY_INFO(__display)->has_cdclk_crawl)
+#define HAS_CDCLK_SQUASH(__display) (DISPLAY_INFO(__display)->has_cdclk_squash)
+#define HAS_CUR_FBC(__display) (!HAS_GMCH(__display) && IS_DISPLAY_VER(__display, 7, 13))
+#define HAS_D12_PLANE_MINIMIZATION(__display) ((__display)->platform.rocketlake || (__display)->platform.alderlake_s)
+#define HAS_DBUF_OVERLAP_DETECTION(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dbuf_overlap_detection)
+#define HAS_DDI(__display) (DISPLAY_INFO(__display)->has_ddi)
+#define HAS_DISPLAY(__display) (DISPLAY_RUNTIME_INFO(__display)->pipe_mask != 0)
+#define HAS_DMC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dmc)
+#define HAS_DMC_WAKELOCK(__display) (DISPLAY_VER(__display) >= 20)
+#define HAS_DOUBLE_BUFFERED_M_N(__display) (DISPLAY_VER(__display) >= 9 || (__display)->platform.broadwell)
+#define HAS_DOUBLE_WIDE(__display) (DISPLAY_VER(__display) < 4)
+#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst)
+#define HAS_DP20(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
+#define HAS_DPT(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_DSB(__display) (DISPLAY_INFO(__display)->has_dsb)
+#define HAS_DSC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dsc)
+#define HAS_DSC_MST(__display) (DISPLAY_VER(__display) >= 12 && HAS_DSC(__display))
+#define HAS_FBC(__display) (DISPLAY_RUNTIME_INFO(__display)->fbc_mask != 0)
+#define HAS_FPGA_DBG_UNCLAIMED(__display) (DISPLAY_INFO(__display)->has_fpga_dbg)
+#define HAS_FW_BLC(__display) (DISPLAY_VER(__display) >= 3)
+#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4)
+#define HAS_GMBUS_BURST_READ(__display) (DISPLAY_VER(__display) >= 10 || (__display)->platform.kabylake)
+#define HAS_GMCH(__display) (DISPLAY_INFO(__display)->has_gmch)
+#define HAS_HW_SAGV_WM(__display) (DISPLAY_VER(__display) >= 13 && !(__display)->platform.dgfx)
+#define HAS_IPC(__display) (DISPLAY_INFO(__display)->has_ipc)
+#define HAS_IPS(__display) ((__display)->platform.haswell_ult || (__display)->platform.broadwell)
+#define HAS_LRR(__display) (DISPLAY_VER(__display) >= 12)
+#define HAS_LSPCON(__display) (IS_DISPLAY_VER(__display, 9, 10))
+#define HAS_MBUS_JOINING(__display) ((__display)->platform.alderlake_p || DISPLAY_VER(__display) >= 14)
+#define HAS_MSO(__display) (DISPLAY_VER(__display) >= 12)
+#define HAS_OVERLAY(__display) (DISPLAY_INFO(__display)->has_overlay)
+#define HAS_PSR(__display) (DISPLAY_INFO(__display)->has_psr)
+#define HAS_PSR_HW_TRACKING(__display) (DISPLAY_INFO(__display)->has_psr_hw_tracking)
+#define HAS_PSR2_SEL_FETCH(__display) (DISPLAY_VER(__display) >= 12)
+#define HAS_SAGV(__display) (DISPLAY_VER(__display) >= 9 && \
+ !(__display)->platform.broxton && !(__display)->platform.geminilake)
+#define HAS_TRANSCODER(__display, trans) ((DISPLAY_RUNTIME_INFO(__display)->cpu_transcoder_mask & \
+ BIT(trans)) != 0)
+#define HAS_UNCOMPRESSED_JOINER(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_ULTRAJOINER(__display) ((DISPLAY_VER(__display) >= 20 || \
+ ((__display)->platform.dgfx && DISPLAY_VER(__display) == 14)) && \
+ HAS_DSC(__display))
+#define HAS_VRR(__display) (DISPLAY_VER(__display) >= 11)
+#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20)
+#define INTEL_NUM_PIPES(__display) (hweight8(DISPLAY_RUNTIME_INFO(__display)->pipe_mask))
+#define I915_HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug)
+#define OVERLAY_NEEDS_PHYSICAL(__display) (DISPLAY_INFO(__display)->overlay_needs_physical)
+#define SUPPORTS_TV(__display) (DISPLAY_INFO(__display)->supports_tv)
/* Check that device has a display IP version within the specific range. */
-#define IS_DISPLAY_VER_FULL(__i915, from, until) ( \
- BUILD_BUG_ON_ZERO((from) < IP_VER(2, 0)) + \
- (DISPLAY_VER_FULL(__i915) >= (from) && \
- DISPLAY_VER_FULL(__i915) <= (until)))
+#define IS_DISPLAY_VERx100(__display, from, until) ( \
+ BUILD_BUG_ON_ZERO((from) < 200) + \
+ (DISPLAY_VERx100(__display) >= (from) && \
+ DISPLAY_VERx100(__display) <= (until)))
/*
* Check if a device has a specific IP version as well as a stepping within the
@@ -175,35 +208,32 @@ enum intel_display_subplatform {
* hardware fix is present and the software workaround is no longer necessary.
* E.g.,
*
- * IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_B2)
- * IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_C0, STEP_FOREVER)
+ * IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B2)
+ * IS_DISPLAY_VERx100_STEP(display, 1400, STEP_C0, STEP_FOREVER)
*
* "STEP_FOREVER" can be passed as "until" for workarounds that have no upper
* stepping bound for the specified IP version.
*/
-#define IS_DISPLAY_VER_STEP(__i915, ipver, from, until) \
- (IS_DISPLAY_VER_FULL((__i915), (ipver), (ipver)) && \
- IS_DISPLAY_STEP((__i915), (from), (until)))
+#define IS_DISPLAY_VERx100_STEP(__display, ipver, from, until) \
+ (IS_DISPLAY_VERx100((__display), (ipver), (ipver)) && \
+ IS_DISPLAY_STEP((__display), (from), (until)))
-#define DISPLAY_INFO(i915) (__to_intel_display(i915)->info.__device_info)
-#define DISPLAY_RUNTIME_INFO(i915) (&__to_intel_display(i915)->info.__runtime_info)
+#define DISPLAY_INFO(__display) (__to_intel_display(__display)->info.__device_info)
+#define DISPLAY_RUNTIME_INFO(__display) (&__to_intel_display(__display)->info.__runtime_info)
-#define DISPLAY_VER(i915) (DISPLAY_RUNTIME_INFO(i915)->ip.ver)
-#define DISPLAY_VER_FULL(i915) IP_VER(DISPLAY_RUNTIME_INFO(i915)->ip.ver, \
- DISPLAY_RUNTIME_INFO(i915)->ip.rel)
-#define IS_DISPLAY_VER(i915, from, until) \
- (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
+#define DISPLAY_VER(__display) (DISPLAY_RUNTIME_INFO(__display)->ip.ver)
+#define DISPLAY_VERx100(__display) (DISPLAY_RUNTIME_INFO(__display)->ip.ver * 100 + \
+ DISPLAY_RUNTIME_INFO(__display)->ip.rel)
+#define IS_DISPLAY_VER(__display, from, until) \
+ (DISPLAY_VER(__display) >= (from) && DISPLAY_VER(__display) <= (until))
-#define INTEL_DISPLAY_STEP(__i915) (DISPLAY_RUNTIME_INFO(__i915)->step)
+#define INTEL_DISPLAY_STEP(__display) (DISPLAY_RUNTIME_INFO(__display)->step)
-#define IS_DISPLAY_STEP(__i915, since, until) \
- (drm_WARN_ON(__to_intel_display(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
- INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until))
+#define IS_DISPLAY_STEP(__display, since, until) \
+ (drm_WARN_ON(__to_intel_display(__display)->drm, INTEL_DISPLAY_STEP(__display) == STEP_NONE), \
+ INTEL_DISPLAY_STEP(__display) >= (since) && INTEL_DISPLAY_STEP(__display) < (until))
struct intel_display_runtime_info {
- enum intel_display_platform platform;
- enum intel_display_subplatform subplatform;
-
struct intel_display_ip_ver {
u16 ver;
u16 rel;
@@ -225,6 +255,8 @@ struct intel_display_runtime_info {
bool has_hdcp;
bool has_dmc;
bool has_dsc;
+ bool edp_typec_support;
+ bool has_dbuf_overlap_detection;
};
struct intel_display_device_info {
@@ -258,10 +290,10 @@ struct intel_display_device_info {
} color;
};
-bool intel_display_device_enabled(struct drm_i915_private *i915);
-void intel_display_device_probe(struct drm_i915_private *i915);
-void intel_display_device_remove(struct drm_i915_private *i915);
-void intel_display_device_info_runtime_init(struct drm_i915_private *i915);
+bool intel_display_device_enabled(struct intel_display *display);
+struct intel_display *intel_display_device_probe(struct pci_dev *pdev);
+void intel_display_device_remove(struct intel_display *display);
+void intel_display_device_info_runtime_init(struct intel_display *display);
void intel_display_device_info_print(const struct intel_display_device_info *info,
const struct intel_display_runtime_info *runtime,
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 069426d9260b..50ec0c3c7588 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -11,7 +11,7 @@
#include <acpi/video.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_client.h>
+#include <drm/drm_client_event.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_privacy_screen_consumer.h>
#include <drm/drm_probe_helper.h>
@@ -80,18 +80,19 @@ bool intel_display_driver_probe_defer(struct pci_dev *pdev)
return false;
}
-void intel_display_driver_init_hw(struct drm_i915_private *i915)
+void intel_display_driver_init_hw(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_cdclk_state *cdclk_state;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state);
+ cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state);
- intel_update_cdclk(i915);
- intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK");
- cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw;
+ intel_update_cdclk(display);
+ intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
+ cdclk_state->logical = cdclk_state->actual = display->cdclk.hw;
intel_display_wa_apply(i915);
}
@@ -111,12 +112,12 @@ static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
.atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
};
-static void intel_mode_config_init(struct drm_i915_private *i915)
+static void intel_mode_config_init(struct intel_display *display)
{
- struct drm_mode_config *mode_config = &i915->drm.mode_config;
+ struct drm_mode_config *mode_config = &display->drm->mode_config;
- drm_mode_config_init(&i915->drm);
- INIT_LIST_HEAD(&i915->display.global.obj_list);
+ drm_mode_config_init(display->drm);
+ INIT_LIST_HEAD(&display->global.obj_list);
mode_config->min_width = 0;
mode_config->min_height = 0;
@@ -127,19 +128,19 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
mode_config->funcs = &intel_mode_funcs;
mode_config->helper_private = &intel_mode_config_funcs;
- mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
+ mode_config->async_page_flip = HAS_ASYNC_FLIPS(display);
/*
* Maximum framebuffer dimensions, chosen to match
* the maximum render engine surface size on gen4+.
*/
- if (DISPLAY_VER(i915) >= 7) {
+ if (DISPLAY_VER(display) >= 7) {
mode_config->max_width = 16384;
mode_config->max_height = 16384;
- } else if (DISPLAY_VER(i915) >= 4) {
+ } else if (DISPLAY_VER(display) >= 4) {
mode_config->max_width = 8192;
mode_config->max_height = 8192;
- } else if (DISPLAY_VER(i915) == 3) {
+ } else if (DISPLAY_VER(display) == 3) {
mode_config->max_width = 4096;
mode_config->max_height = 4096;
} else {
@@ -147,11 +148,11 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
mode_config->max_height = 2048;
}
- if (IS_I845G(i915) || IS_I865G(i915)) {
- mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
+ if (display->platform.i845g || display->platform.i865g) {
+ mode_config->cursor_width = display->platform.i845g ? 64 : 512;
mode_config->cursor_height = 1023;
- } else if (IS_I830(i915) || IS_I85X(i915) ||
- IS_I915G(i915) || IS_I915GM(i915)) {
+ } else if (display->platform.i830 || display->platform.i85x ||
+ display->platform.i915g || display->platform.i915gm) {
mode_config->cursor_width = 64;
mode_config->cursor_height = 64;
} else {
@@ -160,94 +161,97 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
}
}
-static void intel_mode_config_cleanup(struct drm_i915_private *i915)
+static void intel_mode_config_cleanup(struct intel_display *display)
{
- intel_atomic_global_obj_cleanup(i915);
- drm_mode_config_cleanup(&i915->drm);
+ intel_atomic_global_obj_cleanup(display);
+ drm_mode_config_cleanup(display->drm);
}
-static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
+static void intel_plane_possible_crtcs_init(struct intel_display *display)
{
struct intel_plane *plane;
- for_each_intel_plane(&dev_priv->drm, plane) {
- struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
+ for_each_intel_plane(display->drm, plane) {
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display,
plane->pipe);
plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
}
}
-void intel_display_driver_early_probe(struct drm_i915_private *i915)
+void intel_display_driver_early_probe(struct intel_display *display)
{
- if (!HAS_DISPLAY(i915))
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (!HAS_DISPLAY(display))
return;
- spin_lock_init(&i915->display.fb_tracking.lock);
- mutex_init(&i915->display.backlight.lock);
- mutex_init(&i915->display.audio.mutex);
- mutex_init(&i915->display.wm.wm_mutex);
- mutex_init(&i915->display.pps.mutex);
- mutex_init(&i915->display.hdcp.hdcp_mutex);
+ spin_lock_init(&display->fb_tracking.lock);
+ mutex_init(&display->backlight.lock);
+ mutex_init(&display->audio.mutex);
+ mutex_init(&display->wm.wm_mutex);
+ mutex_init(&display->pps.mutex);
+ mutex_init(&display->hdcp.hdcp_mutex);
intel_display_irq_init(i915);
intel_dkl_phy_init(i915);
- intel_color_init_hooks(i915);
- intel_init_cdclk_hooks(i915);
+ intel_color_init_hooks(display);
+ intel_init_cdclk_hooks(display);
intel_audio_hooks_init(i915);
intel_dpll_init_clock_hook(i915);
intel_init_display_hooks(i915);
intel_fdi_init_hook(i915);
- intel_dmc_wl_init(&i915->display);
+ intel_dmc_wl_init(display);
}
/* part #1: call before irq install */
-int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
+int intel_display_driver_probe_noirq(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
if (i915_inject_probe_failure(i915))
return -ENODEV;
- if (HAS_DISPLAY(i915)) {
- ret = drm_vblank_init(&i915->drm,
- INTEL_NUM_PIPES(i915));
+ if (HAS_DISPLAY(display)) {
+ ret = drm_vblank_init(display->drm,
+ INTEL_NUM_PIPES(display));
if (ret)
return ret;
}
intel_bios_init(display);
- ret = intel_vga_register(i915);
+ ret = intel_vga_register(display);
if (ret)
goto cleanup_bios;
/* FIXME: completely on the wrong abstraction layer */
- ret = intel_power_domains_init(i915);
+ ret = intel_power_domains_init(display);
if (ret < 0)
goto cleanup_vga;
- intel_pmdemand_init_early(i915);
+ intel_pmdemand_init_early(display);
- intel_power_domains_init_hw(i915, false);
+ intel_power_domains_init_hw(display, false);
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return 0;
- intel_dmc_init(i915);
+ intel_dmc_init(display);
- i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
- i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
+ display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
+ display->wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+ display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0);
- intel_mode_config_init(i915);
+ intel_mode_config_init(display);
- ret = intel_cdclk_init(i915);
+ ret = intel_cdclk_init(display);
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
- ret = intel_color_init(i915);
+ ret = intel_color_init(display);
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
@@ -259,7 +263,7 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
- ret = intel_pmdemand_init(i915);
+ ret = intel_pmdemand_init(display);
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
@@ -270,17 +274,17 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
return 0;
cleanup_vga_client_pw_domain_dmc:
- intel_dmc_fini(i915);
- intel_power_domains_driver_remove(i915);
+ intel_dmc_fini(display);
+ intel_power_domains_driver_remove(display);
cleanup_vga:
- intel_vga_unregister(i915);
+ intel_vga_unregister(display);
cleanup_bios:
intel_bios_driver_remove(display);
return ret;
}
-static void set_display_access(struct drm_i915_private *i915,
+static void set_display_access(struct intel_display *display,
bool any_task_allowed,
struct task_struct *allowed_task)
{
@@ -288,20 +292,20 @@ static void set_display_access(struct drm_i915_private *i915,
int err;
intel_modeset_lock_ctx_retry(&ctx, NULL, 0, err) {
- err = drm_modeset_lock_all_ctx(&i915->drm, &ctx);
+ err = drm_modeset_lock_all_ctx(display->drm, &ctx);
if (err)
continue;
- i915->display.access.any_task_allowed = any_task_allowed;
- i915->display.access.allowed_task = allowed_task;
+ display->access.any_task_allowed = any_task_allowed;
+ display->access.allowed_task = allowed_task;
}
- drm_WARN_ON(&i915->drm, err);
+ drm_WARN_ON(display->drm, err);
}
/**
* intel_display_driver_enable_user_access - Enable display HW access for all threads
- * @i915: i915 device instance
+ * @display: display device instance
*
* Enable the display HW access for all threads. Examples for such accesses
* are modeset commits and connector probing.
@@ -309,16 +313,18 @@ static void set_display_access(struct drm_i915_private *i915,
* This function should be called during driver loading and system resume once
* all the HW initialization steps are done.
*/
-void intel_display_driver_enable_user_access(struct drm_i915_private *i915)
+void intel_display_driver_enable_user_access(struct intel_display *display)
{
- set_display_access(i915, true, NULL);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ set_display_access(display, true, NULL);
intel_hpd_enable_detection_work(i915);
}
/**
* intel_display_driver_disable_user_access - Disable display HW access for user threads
- * @i915: i915 device instance
+ * @display: display device instance
*
* Disable the display HW access for user threads. Examples for such accesses
* are modeset commits and connector probing. For the current thread the
@@ -333,16 +339,18 @@ void intel_display_driver_enable_user_access(struct drm_i915_private *i915)
* This function should be called during driver loading/unloading and system
* suspend/shutdown before starting the HW init/deinit programming.
*/
-void intel_display_driver_disable_user_access(struct drm_i915_private *i915)
+void intel_display_driver_disable_user_access(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
intel_hpd_disable_detection_work(i915);
- set_display_access(i915, false, current);
+ set_display_access(display, false, current);
}
/**
* intel_display_driver_suspend_access - Suspend display HW access for all threads
- * @i915: i915 device instance
+ * @display: display device instance
*
* Disable the display HW access for all threads. Examples for such accesses
* are modeset commits and connector probing. This call should be either
@@ -352,14 +360,14 @@ void intel_display_driver_disable_user_access(struct drm_i915_private *i915)
* This function should be called during driver unloading and system
* suspend/shutdown after completing the HW deinit programming.
*/
-void intel_display_driver_suspend_access(struct drm_i915_private *i915)
+void intel_display_driver_suspend_access(struct intel_display *display)
{
- set_display_access(i915, false, NULL);
+ set_display_access(display, false, NULL);
}
/**
* intel_display_driver_resume_access - Resume display HW access for the resume thread
- * @i915: i915 device instance
+ * @display: display device instance
*
* Enable the display HW access for the current resume thread, keeping the
* access disabled for all other (user) threads. Examples for such accesses
@@ -371,14 +379,14 @@ void intel_display_driver_suspend_access(struct drm_i915_private *i915)
* This function should be called during system resume before starting the HW
* init steps.
*/
-void intel_display_driver_resume_access(struct drm_i915_private *i915)
+void intel_display_driver_resume_access(struct intel_display *display)
{
- set_display_access(i915, false, current);
+ set_display_access(display, false, current);
}
/**
* intel_display_driver_check_access - Check if the current thread has disaplay HW access
- * @i915: i915 device instance
+ * @display: display device instance
*
* Check whether the current thread has display HW access, print a debug
* message if it doesn't. Such accesses are modeset commits and connector
@@ -387,26 +395,24 @@ void intel_display_driver_resume_access(struct drm_i915_private *i915)
* Returns %true if the current thread has display HW access, %false
* otherwise.
*/
-bool intel_display_driver_check_access(struct drm_i915_private *i915)
+bool intel_display_driver_check_access(struct intel_display *display)
{
- char comm[TASK_COMM_LEN];
char current_task[TASK_COMM_LEN + 16];
char allowed_task[TASK_COMM_LEN + 16] = "none";
- if (i915->display.access.any_task_allowed ||
- i915->display.access.allowed_task == current)
+ if (display->access.any_task_allowed ||
+ display->access.allowed_task == current)
return true;
snprintf(current_task, sizeof(current_task), "%s[%d]",
- get_task_comm(comm, current),
- task_pid_vnr(current));
+ current->comm, task_pid_vnr(current));
- if (i915->display.access.allowed_task)
+ if (display->access.allowed_task)
snprintf(allowed_task, sizeof(allowed_task), "%s[%d]",
- get_task_comm(comm, i915->display.access.allowed_task),
- task_pid_vnr(i915->display.access.allowed_task));
+ display->access.allowed_task->comm,
+ task_pid_vnr(display->access.allowed_task));
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Reject display access from task %s (allowed to %s)\n",
current_task, allowed_task);
@@ -414,14 +420,13 @@ bool intel_display_driver_check_access(struct drm_i915_private *i915)
}
/* part #2: call after irq install, but before gem init */
-int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
+int intel_display_driver_probe_nogem(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- struct drm_device *dev = display->drm;
+ struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe;
int ret;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return 0;
intel_wm_init(i915);
@@ -430,72 +435,73 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
intel_pps_setup(display);
- intel_gmbus_setup(i915);
+ intel_gmbus_setup(display);
- drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
- INTEL_NUM_PIPES(i915),
- INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
+ drm_dbg_kms(display->drm, "%d display pipe%s available.\n",
+ INTEL_NUM_PIPES(display),
+ INTEL_NUM_PIPES(display) > 1 ? "s" : "");
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
ret = intel_crtc_init(i915, pipe);
if (ret)
goto err_mode_config;
}
- intel_plane_possible_crtcs_init(i915);
+ intel_plane_possible_crtcs_init(display);
intel_shared_dpll_init(i915);
intel_fdi_pll_freq_update(i915);
intel_update_czclk(i915);
- intel_display_driver_init_hw(i915);
+ intel_display_driver_init_hw(display);
intel_dpll_update_ref_clks(i915);
- if (i915->display.cdclk.max_cdclk_freq == 0)
- intel_update_max_cdclk(i915);
+ if (display->cdclk.max_cdclk_freq == 0)
+ intel_update_max_cdclk(display);
intel_hti_init(display);
/* Just disable it once at startup */
- intel_vga_disable(i915);
+ intel_vga_disable(display);
intel_setup_outputs(i915);
ret = intel_dp_tunnel_mgr_init(display);
if (ret)
goto err_hdcp;
- intel_display_driver_disable_user_access(i915);
+ intel_display_driver_disable_user_access(display);
- drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx);
+ drm_modeset_lock_all(display->drm);
+ intel_modeset_setup_hw_state(i915, display->drm->mode_config.acquire_ctx);
intel_acpi_assign_connector_fwnodes(display);
- drm_modeset_unlock_all(dev);
+ drm_modeset_unlock_all(display->drm);
- intel_initial_plane_config(i915);
+ intel_initial_plane_config(display);
/*
* Make sure hardware watermarks really match the state we read out.
* Note that we need to do this after reconstructing the BIOS fb's
* since the watermark calculation done here will use pstate->fb.
*/
- if (!HAS_GMCH(i915))
+ if (!HAS_GMCH(display))
ilk_wm_sanitize(i915);
return 0;
err_hdcp:
- intel_hdcp_component_fini(i915);
+ intel_hdcp_component_fini(display);
err_mode_config:
- intel_mode_config_cleanup(i915);
+ intel_mode_config_cleanup(display);
return ret;
}
/* part #3: call after gem init */
-int intel_display_driver_probe(struct drm_i915_private *i915)
+int intel_display_driver_probe(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return 0;
/*
@@ -503,7 +509,7 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
* the BIOS fb takeover and whatever else magic ggtt reservations
* happen during gem/ggtt init.
*/
- intel_hdcp_component_init(i915);
+ intel_hdcp_component_init(display);
/*
* Force all active planes to recompute their states. So that on
@@ -511,11 +517,11 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
* are already calculated and there is no assert_plane warnings
* during bootup.
*/
- ret = intel_initial_commit(&i915->drm);
+ ret = intel_initial_commit(display->drm);
if (ret)
- drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
+ drm_dbg_kms(display->drm, "Initial modeset failed, %d\n", ret);
- intel_overlay_setup(i915);
+ intel_overlay_setup(display);
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(i915);
@@ -525,13 +531,13 @@ int intel_display_driver_probe(struct drm_i915_private *i915)
return 0;
}
-void intel_display_driver_register(struct drm_i915_private *i915)
+void intel_display_driver_register(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
- struct drm_printer p = drm_dbg_printer(&i915->drm, DRM_UT_KMS,
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS,
"i915 display info:");
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
/* Must be done after probing outputs */
@@ -540,7 +546,7 @@ void intel_display_driver_register(struct drm_i915_private *i915)
intel_audio_init(i915);
- intel_display_driver_enable_user_access(i915);
+ intel_display_driver_enable_user_access(display);
intel_audio_register(i915);
@@ -551,41 +557,42 @@ void intel_display_driver_register(struct drm_i915_private *i915)
* fbdev configuration, for which we use the
* fbdev->async_cookie.
*/
- drm_kms_helper_poll_init(&i915->drm);
+ drm_kms_helper_poll_init(display->drm);
intel_hpd_poll_disable(i915);
intel_fbdev_setup(i915);
- intel_display_device_info_print(DISPLAY_INFO(i915),
- DISPLAY_RUNTIME_INFO(i915), &p);
+ intel_display_device_info_print(DISPLAY_INFO(display),
+ DISPLAY_RUNTIME_INFO(display), &p);
}
/* part #1: call before irq uninstall */
-void intel_display_driver_remove(struct drm_i915_private *i915)
+void intel_display_driver_remove(struct intel_display *display)
{
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- flush_workqueue(i915->display.wq.flip);
- flush_workqueue(i915->display.wq.modeset);
+ flush_workqueue(display->wq.flip);
+ flush_workqueue(display->wq.modeset);
+ flush_workqueue(display->wq.cleanup);
/*
* MST topology needs to be suspended so we don't have any calls to
* fbdev after it's finalized. MST will be destroyed later as part of
* drm_mode_config_cleanup()
*/
- intel_dp_mst_suspend(i915);
+ intel_dp_mst_suspend(display);
}
/* part #2: call after irq uninstall */
-void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
+void intel_display_driver_remove_noirq(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- intel_display_driver_suspend_access(i915);
+ intel_display_driver_suspend_access(display);
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
@@ -598,57 +605,56 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
/* flush any delayed tasks or pending work */
flush_workqueue(i915->unordered_wq);
- intel_hdcp_component_fini(i915);
+ intel_hdcp_component_fini(display);
- intel_mode_config_cleanup(i915);
+ intel_mode_config_cleanup(display);
intel_dp_tunnel_mgr_cleanup(display);
- intel_overlay_cleanup(i915);
+ intel_overlay_cleanup(display);
- intel_gmbus_teardown(i915);
+ intel_gmbus_teardown(display);
- destroy_workqueue(i915->display.wq.flip);
- destroy_workqueue(i915->display.wq.modeset);
+ destroy_workqueue(display->wq.flip);
+ destroy_workqueue(display->wq.modeset);
+ destroy_workqueue(display->wq.cleanup);
- intel_fbc_cleanup(&i915->display);
+ intel_fbc_cleanup(display);
}
/* part #3: call after gem init */
-void intel_display_driver_remove_nogem(struct drm_i915_private *i915)
+void intel_display_driver_remove_nogem(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
- intel_dmc_fini(i915);
+ intel_dmc_fini(display);
- intel_power_domains_driver_remove(i915);
+ intel_power_domains_driver_remove(display);
- intel_vga_unregister(i915);
+ intel_vga_unregister(display);
intel_bios_driver_remove(display);
}
-void intel_display_driver_unregister(struct drm_i915_private *i915)
+void intel_display_driver_unregister(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- drm_client_dev_unregister(&i915->drm);
+ drm_client_dev_unregister(display->drm);
/*
* After flushing the fbdev (incl. a late async config which
* will have delayed queuing of a hotplug event), then flush
* the hotplug events.
*/
- drm_kms_helper_poll_fini(&i915->drm);
+ drm_kms_helper_poll_fini(display->drm);
- intel_display_driver_disable_user_access(i915);
+ intel_display_driver_disable_user_access(display);
intel_audio_deinit(i915);
- drm_atomic_helper_shutdown(&i915->drm);
+ drm_atomic_helper_shutdown(display->drm);
acpi_video_unregister();
intel_opregion_unregister(display);
@@ -658,35 +664,42 @@ void intel_display_driver_unregister(struct drm_i915_private *i915)
* turn all crtc's off, but do not adjust state
* This has to be paired with a call to intel_modeset_setup_hw_state.
*/
-int intel_display_driver_suspend(struct drm_i915_private *i915)
+int intel_display_driver_suspend(struct intel_display *display)
{
struct drm_atomic_state *state;
int ret;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return 0;
- state = drm_atomic_helper_suspend(&i915->drm);
+ state = drm_atomic_helper_suspend(display->drm);
ret = PTR_ERR_OR_ZERO(state);
if (ret)
- drm_err(&i915->drm, "Suspending crtc's failed with %i\n",
+ drm_err(display->drm, "Suspending crtc's failed with %i\n",
ret);
else
- i915->display.restore.modeset_state = state;
+ display->restore.modeset_state = state;
+
+ /* ensure all DPT VMAs have been unpinned for intel_dpt_suspend() */
+ flush_workqueue(display->wq.cleanup);
+
+ intel_dp_mst_suspend(display);
+
return ret;
}
int
-__intel_display_driver_resume(struct drm_i915_private *i915,
+__intel_display_driver_resume(struct intel_display *display,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int ret, i;
intel_modeset_setup_hw_state(i915, ctx);
- intel_vga_redisable(i915);
+ intel_vga_redisable(display);
if (!state)
return 0;
@@ -706,33 +719,37 @@ __intel_display_driver_resume(struct drm_i915_private *i915,
}
/* ignore any reset values/BIOS leftovers in the WM registers */
- if (!HAS_GMCH(i915))
+ if (!HAS_GMCH(display))
to_intel_atomic_state(state)->skip_intermediate_wm = true;
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
- drm_WARN_ON(&i915->drm, ret == -EDEADLK);
+ drm_WARN_ON(display->drm, ret == -EDEADLK);
return ret;
}
-void intel_display_driver_resume(struct drm_i915_private *i915)
+void intel_display_driver_resume(struct intel_display *display)
{
- struct drm_atomic_state *state = i915->display.restore.modeset_state;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct drm_atomic_state *state = display->restore.modeset_state;
struct drm_modeset_acquire_ctx ctx;
int ret;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- i915->display.restore.modeset_state = NULL;
+ /* MST sideband requires HPD interrupts enabled */
+ intel_dp_mst_resume(display);
+
+ display->restore.modeset_state = NULL;
if (state)
state->acquire_ctx = &ctx;
drm_modeset_acquire_init(&ctx, 0);
while (1) {
- ret = drm_modeset_lock_all_ctx(&i915->drm, &ctx);
+ ret = drm_modeset_lock_all_ctx(display->drm, &ctx);
if (ret != -EDEADLK)
break;
@@ -740,14 +757,14 @@ void intel_display_driver_resume(struct drm_i915_private *i915)
}
if (!ret)
- ret = __intel_display_driver_resume(i915, state, &ctx);
+ ret = __intel_display_driver_resume(display, state, &ctx);
skl_watermark_ipc_update(i915);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
if (ret)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Restoring old state failed with %i\n", ret);
if (state)
drm_atomic_state_put(state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.h b/drivers/gpu/drm/i915/display/intel_display_driver.h
index 42cc4af6d3fd..2966ff91b219 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.h
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.h
@@ -9,34 +9,34 @@
#include <linux/types.h>
struct drm_atomic_state;
-struct drm_i915_private;
struct drm_modeset_acquire_ctx;
+struct intel_display;
struct pci_dev;
bool intel_display_driver_probe_defer(struct pci_dev *pdev);
-void intel_display_driver_init_hw(struct drm_i915_private *i915);
-void intel_display_driver_early_probe(struct drm_i915_private *i915);
-int intel_display_driver_probe_noirq(struct drm_i915_private *i915);
-int intel_display_driver_probe_nogem(struct drm_i915_private *i915);
-int intel_display_driver_probe(struct drm_i915_private *i915);
-void intel_display_driver_register(struct drm_i915_private *i915);
-void intel_display_driver_remove(struct drm_i915_private *i915);
-void intel_display_driver_remove_noirq(struct drm_i915_private *i915);
-void intel_display_driver_remove_nogem(struct drm_i915_private *i915);
-void intel_display_driver_unregister(struct drm_i915_private *i915);
-int intel_display_driver_suspend(struct drm_i915_private *i915);
-void intel_display_driver_resume(struct drm_i915_private *i915);
+void intel_display_driver_init_hw(struct intel_display *display);
+void intel_display_driver_early_probe(struct intel_display *display);
+int intel_display_driver_probe_noirq(struct intel_display *display);
+int intel_display_driver_probe_nogem(struct intel_display *display);
+int intel_display_driver_probe(struct intel_display *display);
+void intel_display_driver_register(struct intel_display *display);
+void intel_display_driver_remove(struct intel_display *display);
+void intel_display_driver_remove_noirq(struct intel_display *display);
+void intel_display_driver_remove_nogem(struct intel_display *display);
+void intel_display_driver_unregister(struct intel_display *display);
+int intel_display_driver_suspend(struct intel_display *display);
+void intel_display_driver_resume(struct intel_display *display);
/* interface for intel_display_reset.c */
-int __intel_display_driver_resume(struct drm_i915_private *i915,
+int __intel_display_driver_resume(struct intel_display *display,
struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx);
-void intel_display_driver_enable_user_access(struct drm_i915_private *i915);
-void intel_display_driver_disable_user_access(struct drm_i915_private *i915);
-void intel_display_driver_suspend_access(struct drm_i915_private *i915);
-void intel_display_driver_resume_access(struct drm_i915_private *i915);
-bool intel_display_driver_check_access(struct drm_i915_private *i915);
+void intel_display_driver_enable_user_access(struct intel_display *display);
+void intel_display_driver_disable_user_access(struct intel_display *display);
+void intel_display_driver_suspend_access(struct intel_display *display);
+void intel_display_driver_resume_access(struct intel_display *display);
+bool intel_display_driver_check_access(struct intel_display *display);
#endif /* __INTEL_DISPLAY_DRIVER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index 73369847ed66..069043f9d894 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -3,6 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
+#include <drm/drm_vblank.h>
+
#include "gt/intel_rps.h"
#include "i915_drv.h"
#include "i915_irq.h"
@@ -27,7 +29,8 @@
static void
intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ struct intel_display *display = &dev_priv->display;
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
drm_crtc_handle_vblank(&crtc->base);
}
@@ -269,14 +272,17 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
intel_uncore_posting_read(&dev_priv->uncore, reg);
}
-static bool i915_has_asle(struct drm_i915_private *i915)
+static bool i915_has_legacy_blc_interrupt(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
- if (!IS_PINEVIEW(i915) && !IS_MOBILE(i915))
- return false;
+ if (IS_I85X(i915))
+ return true;
+
+ if (IS_PINEVIEW(i915))
+ return true;
- return intel_opregion_asle_present(display);
+ return IS_DISPLAY_VER(display, 3, 4) && IS_MOBILE(i915);
}
/**
@@ -285,7 +291,12 @@ static bool i915_has_asle(struct drm_i915_private *i915)
*/
void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
{
- if (!i915_has_asle(dev_priv))
+ struct intel_display *display = &dev_priv->display;
+
+ if (!intel_opregion_asle_present(display))
+ return;
+
+ if (!i915_has_legacy_blc_interrupt(display))
return;
spin_lock_irq(&dev_priv->irq_lock);
@@ -298,14 +309,15 @@ void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
-#if defined(CONFIG_DEBUG_FS)
+#if IS_ENABLED(CONFIG_DEBUG_FS)
static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe,
u32 crc0, u32 crc1,
u32 crc2, u32 crc3,
u32 crc4)
{
- struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ struct intel_display *display = &dev_priv->display;
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
@@ -344,7 +356,8 @@ display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
static void flip_done_handler(struct drm_i915_private *i915,
enum pipe pipe)
{
- struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
+ struct intel_display *display = &i915->display;
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
spin_lock(&i915->drm.event_lock);
@@ -400,7 +413,7 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
res1, res2);
}
-void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
+static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
@@ -421,7 +434,8 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
spin_lock(&dev_priv->irq_lock);
- if (!dev_priv->display.irq.display_irqs_enabled) {
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ !dev_priv->display.irq.vlv_display_irqs_enabled) {
spin_unlock(&dev_priv->irq_lock);
return;
}
@@ -480,28 +494,10 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
spin_unlock(&dev_priv->irq_lock);
}
-void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
- u16 iir, u32 pipe_stats[I915_MAX_PIPES])
-{
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe) {
- if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- intel_handle_vblank(dev_priv, pipe);
-
- if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
-
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
- }
-}
-
void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
struct intel_display *display = &dev_priv->display;
-
bool blc_event = false;
enum pipe pipe;
@@ -548,12 +544,13 @@ void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
intel_opregion_asle_intr(display);
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
- intel_gmbus_irq_handler(dev_priv);
+ intel_gmbus_irq_handler(display);
}
void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
u32 pipe_stats[I915_MAX_PIPES])
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
@@ -571,7 +568,7 @@ void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
}
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
- intel_gmbus_irq_handler(dev_priv);
+ intel_gmbus_irq_handler(display);
}
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
@@ -593,7 +590,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_dp_aux_irq_handler(display);
if (pch_iir & SDE_GMBUS)
- intel_gmbus_irq_handler(dev_priv);
+ intel_gmbus_irq_handler(display);
if (pch_iir & SDE_AUDIO_HDCP_MASK)
drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
@@ -682,7 +679,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_dp_aux_irq_handler(display);
if (pch_iir & SDE_GMBUS_CPT)
- intel_gmbus_irq_handler(dev_priv);
+ intel_gmbus_irq_handler(display);
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
@@ -847,7 +844,9 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
- if (DISPLAY_VER(dev_priv) >= 14)
+ struct intel_display *display = &dev_priv->display;
+
+ if (DISPLAY_VER(display) >= 14)
return MTL_PIPEDMC_ATS_FAULT |
MTL_PLANE_ATS_FAULT |
GEN12_PIPEDMC_FAULT |
@@ -857,7 +856,7 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
- if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
+ if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display))
return GEN12_PIPEDMC_FAULT |
GEN9_PIPE_CURSOR_FAULT |
GEN11_PIPE_PLANE5_FAULT |
@@ -865,7 +864,7 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
- else if (DISPLAY_VER(dev_priv) == 12)
+ else if (DISPLAY_VER(display) == 12)
return GEN12_PIPEDMC_FAULT |
GEN9_PIPE_CURSOR_FAULT |
GEN11_PIPE_PLANE7_FAULT |
@@ -875,7 +874,7 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
- else if (DISPLAY_VER(dev_priv) == 11)
+ else if (DISPLAY_VER(display) == 11)
return GEN9_PIPE_CURSOR_FAULT |
GEN11_PIPE_PLANE7_FAULT |
GEN11_PIPE_PLANE6_FAULT |
@@ -884,7 +883,7 @@ static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
GEN9_PIPE_PLANE3_FAULT |
GEN9_PIPE_PLANE2_FAULT |
GEN9_PIPE_PLANE1_FAULT;
- else if (DISPLAY_VER(dev_priv) >= 9)
+ else if (DISPLAY_VER(display) >= 9)
return GEN9_PIPE_CURSOR_FAULT |
GEN9_PIPE_PLANE4_FAULT |
GEN9_PIPE_PLANE3_FAULT |
@@ -907,6 +906,13 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
struct intel_display *display = &dev_priv->display;
bool found = false;
+ if (HAS_DBUF_OVERLAP_DETECTION(display)) {
+ if (iir & XE2LPD_DBUF_OVERLAP_DETECTED) {
+ drm_warn(display->drm, "DBuf overlap detected\n");
+ found = true;
+ }
+ }
+
if (DISPLAY_VER(dev_priv) >= 14) {
if (iir & (XELPDP_PMDEMAND_RSP |
XELPDP_PMDEMAND_RSPTOUT_ERR)) {
@@ -1026,17 +1032,6 @@ static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
return GEN8_PIPE_PRIMARY_FLIP_DONE;
}
-u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
-{
- u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
-
- if (DISPLAY_VER(dev_priv) >= 13)
- mask |= XELPD_PIPE_SOFT_UNDERRUN |
- XELPD_PIPE_HARD_UNDERRUN;
-
- return mask;
-}
-
static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir)
{
u32 pica_ier = 0;
@@ -1125,7 +1120,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
(iir & BXT_DE_PORT_GMBUS)) {
- intel_gmbus_irq_handler(dev_priv);
+ intel_gmbus_irq_handler(display);
found = true;
}
@@ -1182,7 +1177,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev_priv, pipe);
- if (iir & gen8_de_pipe_underrun_mask(dev_priv))
+ if (iir & GEN8_PIPE_FIFO_UNDERRUN)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
@@ -1226,15 +1221,14 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
{
- void __iomem * const regs = intel_uncore_regs(&i915->uncore);
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
return 0;
- iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
+ iir = intel_de_read(i915, GEN11_GU_MISC_IIR);
if (likely(iir))
- raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
+ intel_de_write(i915, GEN11_GU_MISC_IIR, iir);
return iir;
}
@@ -1249,25 +1243,56 @@ void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
void gen11_display_irq_handler(struct drm_i915_private *i915)
{
- void __iomem * const regs = intel_uncore_regs(&i915->uncore);
- const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
+ u32 disp_ctl;
disable_rpm_wakeref_asserts(&i915->runtime_pm);
/*
* GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
* for the display related bits.
*/
- raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
+ disp_ctl = intel_de_read(i915, GEN11_DISPLAY_INT_CTL);
+
+ intel_de_write(i915, GEN11_DISPLAY_INT_CTL, 0);
gen8_de_irq_handler(i915, disp_ctl);
- raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
- GEN11_DISPLAY_IRQ_ENABLE);
+ intel_de_write(i915, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
}
-/* Called from drm generic code, passed 'crtc' which
- * we use as a pipe index
- */
+static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915)
+{
+ lockdep_assert_held(&i915->drm.vblank_time_lock);
+
+ /*
+ * Vblank/CRC interrupts fail to wake the device up from C2+.
+ * Disabling render clock gating during C-states avoids
+ * the problem. There is a small power cost so we do this
+ * only when vblank/CRC interrupts are actually enabled.
+ */
+ if (i915->display.irq.vblank_enabled++ == 0)
+ intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+}
+
+static void i915gm_irq_cstate_wa_disable(struct drm_i915_private *i915)
+{
+ lockdep_assert_held(&i915->drm.vblank_time_lock);
+
+ if (--i915->display.irq.vblank_enabled == 0)
+ intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+}
+
+void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable)
+{
+ spin_lock_irq(&i915->drm.vblank_time_lock);
+
+ if (enable)
+ i915gm_irq_cstate_wa_enable(i915);
+ else
+ i915gm_irq_cstate_wa_disable(i915);
+
+ spin_unlock_irq(&i915->drm.vblank_time_lock);
+}
+
int i8xx_enable_vblank(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
@@ -1281,22 +1306,35 @@ int i8xx_enable_vblank(struct drm_crtc *crtc)
return 0;
}
+void i8xx_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
int i915gm_enable_vblank(struct drm_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(crtc->dev);
- /*
- * Vblank interrupts fail to wake the device up from C2+.
- * Disabling render clock gating during C-states avoids
- * the problem. There is a small power cost so we do this
- * only when vblank interrupts are actually enabled.
- */
- if (i915->display.irq.vblank_enabled++ == 0)
- intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ i915gm_irq_cstate_wa_enable(i915);
return i8xx_enable_vblank(crtc);
}
+void i915gm_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->dev);
+
+ i8xx_disable_vblank(crtc);
+
+ i915gm_irq_cstate_wa_disable(i915);
+}
+
int i965_enable_vblank(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
@@ -1311,6 +1349,18 @@ int i965_enable_vblank(struct drm_crtc *crtc)
return 0;
}
+void i965_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_STATUS);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
int ilk_enable_vblank(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
@@ -1332,6 +1382,19 @@ int ilk_enable_vblank(struct drm_crtc *crtc)
return 0;
}
+void ilk_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ unsigned long irqflags;
+ u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
+ DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ ilk_disable_display_irq(dev_priv, bit);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
bool enable)
{
@@ -1356,9 +1419,26 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
return true;
}
+static void intel_display_vblank_dc_work(struct work_struct *work)
+{
+ struct intel_display *display =
+ container_of(work, typeof(*display), irq.vblank_dc_work);
+ int vblank_wa_num_pipes = READ_ONCE(display->irq.vblank_wa_num_pipes);
+
+ /*
+ * NOTE: intel_display_power_set_target_dc_state is used only by PSR
+ * code for DC3CO handling. DC3CO target state is currently disabled in
+ * PSR code. If DC3CO is taken into use we need take that into account
+ * here as well.
+ */
+ intel_display_power_set_target_dc_state(display, vblank_wa_num_pipes ? DC_STATE_DISABLE :
+ DC_STATE_EN_UPTO_DC6);
+}
+
int bdw_enable_vblank(struct drm_crtc *_crtc)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
unsigned long irqflags;
@@ -1366,6 +1446,9 @@ int bdw_enable_vblank(struct drm_crtc *_crtc)
if (gen11_dsi_configure_te(crtc, true))
return 0;
+ if (crtc->block_dc_for_vblank && display->irq.vblank_wa_num_pipes++ == 0)
+ schedule_work(&display->irq.vblank_dc_work);
+
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -1379,58 +1462,10 @@ int bdw_enable_vblank(struct drm_crtc *_crtc)
return 0;
}
-/* Called from drm generic code, passed 'crtc' which
- * we use as a pipe index
- */
-void i8xx_disable_vblank(struct drm_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-}
-
-void i915gm_disable_vblank(struct drm_crtc *crtc)
-{
- struct drm_i915_private *i915 = to_i915(crtc->dev);
-
- i8xx_disable_vblank(crtc);
-
- if (--i915->display.irq.vblank_enabled == 0)
- intel_uncore_write(&i915->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
-}
-
-void i965_disable_vblank(struct drm_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
- PIPE_START_VBLANK_INTERRUPT_STATUS);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-}
-
-void ilk_disable_vblank(struct drm_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
- unsigned long irqflags;
- u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
- DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ilk_disable_display_irq(dev_priv, bit);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-}
-
void bdw_disable_vblank(struct drm_crtc *_crtc)
{
struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
unsigned long irqflags;
@@ -1441,9 +1476,12 @@ void bdw_disable_vblank(struct drm_crtc *_crtc)
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ if (crtc->block_dc_for_vblank && --display->irq.vblank_wa_num_pipes == 0)
+ schedule_work(&display->irq.vblank_dc_work);
}
-void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -1457,10 +1495,27 @@ void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
i9xx_pipestat_irq_reset(dev_priv);
- GEN3_IRQ_RESET(uncore, VLV_);
+ gen2_irq_reset(uncore, VLV_IRQ_REGS);
dev_priv->irq_mask = ~0u;
}
+void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->display.irq.vlv_display_irqs_enabled)
+ _vlv_display_irq_reset(dev_priv);
+}
+
+void i9xx_display_irq_reset(struct drm_i915_private *i915)
+{
+ if (I915_HAS_HOTPLUG(i915)) {
+ i915_hotplug_interrupt_update(i915, 0xffffffff, 0);
+ intel_uncore_rmw(&i915->uncore,
+ PORT_HOTPLUG_STAT(i915), 0, 0);
+ }
+
+ i9xx_pipestat_irq_reset(i915);
+}
+
void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -1469,6 +1524,9 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
u32 enable_mask;
enum pipe pipe;
+ if (!dev_priv->display.irq.vlv_display_irqs_enabled)
+ return;
+
pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
@@ -1489,7 +1547,7 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->irq_mask = ~enable_mask;
- GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
+ gen2_irq_init(uncore, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask);
}
void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
@@ -1506,10 +1564,10 @@ void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
- GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
+ gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe));
- GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
- GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
+ gen2_irq_reset(uncore, GEN8_DE_PORT_IRQ_REGS);
+ gen2_irq_reset(uncore, GEN8_DE_MISC_IRQ_REGS);
}
void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
@@ -1549,26 +1607,25 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
- GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
+ gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe));
- GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
- GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
+ gen2_irq_reset(uncore, GEN8_DE_PORT_IRQ_REGS);
+ gen2_irq_reset(uncore, GEN8_DE_MISC_IRQ_REGS);
if (DISPLAY_VER(dev_priv) >= 14)
- GEN3_IRQ_RESET(uncore, PICAINTERRUPT_);
+ gen2_irq_reset(uncore, PICAINTERRUPT_IRQ_REGS);
else
- GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
+ gen2_irq_reset(uncore, GEN11_DE_HPD_IRQ_REGS);
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- GEN3_IRQ_RESET(uncore, SDE);
+ gen2_irq_reset(uncore, SDE_IRQ_REGS);
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- u32 extra_ier = GEN8_PIPE_VBLANK |
- gen8_de_pipe_underrun_mask(dev_priv) |
+ u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
gen8_de_pipe_flip_done_mask(dev_priv);
enum pipe pipe;
@@ -1580,9 +1637,9 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
}
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
- GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
- dev_priv->display.irq.de_irq_mask[pipe],
- ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
+ gen2_irq_init(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe),
+ dev_priv->display.irq.de_irq_mask[pipe],
+ ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -1601,7 +1658,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
}
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
- GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
+ gen2_irq_reset(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe));
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1635,20 +1692,20 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
else
mask = SDE_GMBUS_CPT;
- GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
+ gen2_irq_init(uncore, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
lockdep_assert_held(&dev_priv->irq_lock);
- if (dev_priv->display.irq.display_irqs_enabled)
+ if (dev_priv->display.irq.vlv_display_irqs_enabled)
return;
- dev_priv->display.irq.display_irqs_enabled = true;
+ dev_priv->display.irq.vlv_display_irqs_enabled = true;
if (intel_irqs_enabled(dev_priv)) {
- vlv_display_irq_reset(dev_priv);
+ _vlv_display_irq_reset(dev_priv);
vlv_display_irq_postinstall(dev_priv);
}
}
@@ -1657,13 +1714,13 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
lockdep_assert_held(&dev_priv->irq_lock);
- if (!dev_priv->display.irq.display_irqs_enabled)
+ if (!dev_priv->display.irq.vlv_display_irqs_enabled)
return;
- dev_priv->display.irq.display_irqs_enabled = false;
+ dev_priv->display.irq.vlv_display_irqs_enabled = false;
if (intel_irqs_enabled(dev_priv))
- vlv_display_irq_reset(dev_priv);
+ _vlv_display_irq_reset(dev_priv);
}
void ilk_de_irq_postinstall(struct drm_i915_private *i915)
@@ -1692,7 +1749,7 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915)
}
if (IS_HASWELL(i915)) {
- gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
+ gen2_assert_iir_is_zero(uncore, EDP_PSR_IIR);
display_mask |= DE_EDP_PSR_INT_HSW;
}
@@ -1703,7 +1760,7 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915)
ibx_irq_postinstall(i915);
- GEN3_IRQ_INIT(uncore, DE, i915->irq_mask,
+ gen2_irq_init(uncore, DE_IRQ_REGS, i915->irq_mask,
display_mask | extra_mask);
}
@@ -1751,14 +1808,16 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
de_port_masked |= DSI0_TE | DSI1_TE;
}
+ if (HAS_DBUF_OVERLAP_DETECTION(display))
+ de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED;
+
if (HAS_DSB(dev_priv))
de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) |
GEN12_DSB_INT(INTEL_DSB_1) |
GEN12_DSB_INT(INTEL_DSB_2);
de_pipe_enables = de_pipe_masked |
- GEN8_PIPE_VBLANK |
- gen8_de_pipe_underrun_mask(dev_priv) |
+ GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
gen8_de_pipe_flip_done_mask(dev_priv);
de_port_enables = de_port_masked;
@@ -1777,11 +1836,11 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (!intel_display_power_is_enabled(dev_priv, domain))
continue;
- gen3_assert_iir_is_zero(uncore,
+ gen2_assert_iir_is_zero(uncore,
TRANS_PSR_IIR(dev_priv, trans));
}
} else {
- gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
+ gen2_assert_iir_is_zero(uncore, EDP_PSR_IIR);
}
for_each_pipe(dev_priv, pipe) {
@@ -1789,20 +1848,20 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
- GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
- dev_priv->display.irq.de_irq_mask[pipe],
- de_pipe_enables);
+ gen2_irq_init(uncore, GEN8_DE_PIPE_IRQ_REGS(pipe),
+ dev_priv->display.irq.de_irq_mask[pipe],
+ de_pipe_enables);
}
- GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
- GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
+ gen2_irq_init(uncore, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked, de_port_enables);
+ gen2_irq_init(uncore, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked, de_misc_masked);
if (IS_DISPLAY_VER(dev_priv, 11, 13)) {
u32 de_hpd_masked = 0;
u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
GEN11_DE_TBT_HOTPLUG_MASK;
- GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
+ gen2_irq_init(uncore, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked,
de_hpd_enables);
}
}
@@ -1815,10 +1874,10 @@ static void mtp_irq_postinstall(struct drm_i915_private *i915)
u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
XELPDP_TBT_HOTPLUG_MASK;
- GEN3_IRQ_INIT(uncore, PICAINTERRUPT_, ~de_hpd_mask,
+ gen2_irq_init(uncore, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask,
de_hpd_enables);
- GEN3_IRQ_INIT(uncore, SDE, ~sde_mask, 0xffffffff);
+ gen2_irq_init(uncore, SDE_IRQ_REGS, ~sde_mask, 0xffffffff);
}
static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -1826,7 +1885,7 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
u32 mask = SDE_GMBUS_ICP;
- GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
+ gen2_irq_init(uncore, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -1854,16 +1913,8 @@ void intel_display_irq_init(struct drm_i915_private *i915)
{
i915->drm.vblank_disable_immediate = true;
- /*
- * Most platforms treat the display irq block as an always-on power
- * domain. vlv/chv can disable it at runtime and need special care to
- * avoid writing any of the display block registers outside of the power
- * domain. We defer setting up the display irqs in this case to the
- * runtime pm.
- */
- i915->display.irq.display_irqs_enabled = true;
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- i915->display.irq.display_irqs_enabled = false;
-
intel_hotplug_irq_init(i915);
+
+ INIT_WORK(&i915->display.irq.vblank_dc_work,
+ intel_display_vblank_dc_work);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h
index 2a090dd6abd7..b077712b7be1 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.h
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.h
@@ -33,7 +33,6 @@ void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits);
void gen8_irq_power_well_post_enable(struct drm_i915_private *i915, u8 pipe_mask);
void gen8_irq_power_well_pre_disable(struct drm_i915_private *i915, u8 pipe_mask);
-u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *i915);
int i8xx_enable_vblank(struct drm_crtc *crtc);
int i915gm_enable_vblank(struct drm_crtc *crtc);
@@ -54,6 +53,7 @@ void gen11_display_irq_handler(struct drm_i915_private *i915);
u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl);
void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir);
+void i9xx_display_irq_reset(struct drm_i915_private *i915);
void vlv_display_irq_reset(struct drm_i915_private *i915);
void gen8_display_irq_reset(struct drm_i915_private *i915);
void gen11_display_irq_reset(struct drm_i915_private *i915);
@@ -68,15 +68,15 @@ u32 i915_pipestat_enable_mask(struct drm_i915_private *i915, enum pipe pipe);
void i915_enable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
void i915_disable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask);
void i915_enable_asle_pipestat(struct drm_i915_private *i915);
-void i9xx_pipestat_irq_reset(struct drm_i915_private *i915);
void i9xx_pipestat_irq_ack(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
void i915_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
void i965_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]);
void valleyview_pipestat_irq_handler(struct drm_i915_private *i915, u32 pipe_stats[I915_MAX_PIPES]);
-void i8xx_pipestat_irq_handler(struct drm_i915_private *i915, u16 iir, u32 pipe_stats[I915_MAX_PIPES]);
void intel_display_irq_init(struct drm_i915_private *i915);
+void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable);
+
#endif /* __INTEL_DISPLAY_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_limits.h b/drivers/gpu/drm/i915/display/intel_display_limits.h
index c4775c99dc83..f0fa27e365ab 100644
--- a/drivers/gpu/drm/i915/display/intel_display_limits.h
+++ b/drivers/gpu/drm/i915/display/intel_display_limits.h
@@ -50,6 +50,16 @@ enum transcoder {
};
/*
+ * Global legacy plane identifier. Valid only for primary/sprite
+ * planes on pre-g4x, and only for primary planes on g4x-bdw.
+ */
+enum i9xx_plane_id {
+ PLANE_A,
+ PLANE_B,
+ PLANE_C,
+};
+
+/*
* Per-pipe plane identifier.
* I915_MAX_PLANES in the enum below is the maximum (across all platforms)
* number of planes per CRTC. Not all platforms really have this many planes,
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
index 1a45d300b6f0..f92e4640a613 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -3,8 +3,13 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/string_choices.h>
+
+#include <drm/drm_print.h>
+
#include "intel_display_params.h"
-#include "i915_drv.h"
#define intel_display_param_named(name, T, perm, desc) \
module_param_named(name, intel_display_modparams.name, T, perm); \
@@ -123,10 +128,10 @@ intel_display_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
"(0=disabled, 1=enabled) "
"Default: 1");
-intel_display_param_named_unsafe(enable_dmc_wl, bool, 0400,
+intel_display_param_named_unsafe(enable_dmc_wl, int, 0400,
"Enable DMC wakelock "
- "(0=disabled, 1=enabled) "
- "Default: 0");
+ "(-1=use per-chip default, 0=disabled, 1=enabled) "
+ "Default: -1");
__maybe_unused
static void _param_print_bool(struct drm_printer *p, const char *driver_name,
@@ -173,14 +178,16 @@ static void _param_print_charp(struct drm_printer *p, const char *driver_name,
/**
* intel_display_params_dump - dump intel display modparams
- * @display: display device
+ * @params: display params
+ * @driver_name: driver name to use for printing
* @p: the &drm_printer
*
* Pretty printer for i915 modparams.
*/
-void intel_display_params_dump(struct intel_display *display, struct drm_printer *p)
+void intel_display_params_dump(const struct intel_display_params *params,
+ const char *driver_name, struct drm_printer *p)
{
-#define PRINT(T, x, ...) _param_print(p, display->drm->driver->name, #x, display->params.x);
+#define PRINT(T, x, ...) _param_print(p, driver_name, #x, params->x);
INTEL_DISPLAY_PARAMS_FOR_EACH(PRINT);
#undef PRINT
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h
index da8dc943234b..5317138e6044 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.h
+++ b/drivers/gpu/drm/i915/display/intel_display_params.h
@@ -9,7 +9,6 @@
#include <linux/types.h>
struct drm_printer;
-struct intel_display;
/*
* Invoke param, a function-like macro, for each intel display param, with
@@ -48,7 +47,7 @@ struct intel_display;
param(int, enable_psr, -1, 0600) \
param(bool, psr_safest_params, false, 0400) \
param(bool, enable_psr2_sel_fetch, true, 0400) \
- param(bool, enable_dmc_wl, false, 0400) \
+ param(int, enable_dmc_wl, -1, 0400) \
#define MEMBER(T, member, ...) T member;
struct intel_display_params {
@@ -56,8 +55,8 @@ struct intel_display_params {
};
#undef MEMBER
-void intel_display_params_dump(struct intel_display *display,
- struct drm_printer *p);
+void intel_display_params_dump(const struct intel_display_params *params,
+ const char *driver_name, struct drm_printer *p);
void intel_display_params_copy(struct intel_display_params *dest);
void intel_display_params_free(struct intel_display_params *params);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index ef2fdbf97346..d3b8453a1705 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -28,12 +28,12 @@
#include "skl_watermark_regs.h"
#include "vlv_sideband.h"
-#define for_each_power_domain_well(__dev_priv, __power_well, __domain) \
- for_each_power_well(__dev_priv, __power_well) \
+#define for_each_power_domain_well(__display, __power_well, __domain) \
+ for_each_power_well((__display), __power_well) \
for_each_if(test_bit((__domain), (__power_well)->domains.bits))
-#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \
- for_each_power_well_reverse(__dev_priv, __power_well) \
+#define for_each_power_domain_well_reverse(__display, __power_well, __domain) \
+ for_each_power_well_reverse((__display), __power_well) \
for_each_if(test_bit((__domain), (__power_well)->domains.bits))
static const char *
@@ -198,18 +198,18 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
}
}
-static bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+static bool __intel_display_power_is_enabled(struct intel_display *display,
enum intel_display_power_domain domain)
{
struct i915_power_well *power_well;
bool is_enabled;
- if (pm_runtime_suspended(dev_priv->drm.dev))
+ if (pm_runtime_suspended(display->drm->dev))
return false;
is_enabled = true;
- for_each_power_domain_well_reverse(dev_priv, power_well, domain) {
+ for_each_power_domain_well_reverse(display, power_well, domain) {
if (intel_power_well_is_always_on(power_well))
continue;
@@ -242,23 +242,22 @@ static bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains;
+ struct intel_display *display = &dev_priv->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
bool ret;
- power_domains = &dev_priv->display.power.domains;
-
mutex_lock(&power_domains->lock);
- ret = __intel_display_power_is_enabled(dev_priv, domain);
+ ret = __intel_display_power_is_enabled(display, domain);
mutex_unlock(&power_domains->lock);
return ret;
}
static u32
-sanitize_target_dc_state(struct drm_i915_private *i915,
+sanitize_target_dc_state(struct intel_display *display,
u32 target_dc_state)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
static const u32 states[] = {
DC_STATE_EN_UPTO_DC6,
DC_STATE_EN_UPTO_DC5,
@@ -282,43 +281,43 @@ sanitize_target_dc_state(struct drm_i915_private *i915,
/**
* intel_display_power_set_target_dc_state - Set target dc state.
- * @dev_priv: i915 device
+ * @display: display device
* @state: state which needs to be set as target_dc_state.
*
* This function set the "DC off" power well target_dc_state,
* based upon this target_dc_stste, "DC off" power well will
* enable desired DC state.
*/
-void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
+void intel_display_power_set_target_dc_state(struct intel_display *display,
u32 state)
{
struct i915_power_well *power_well;
bool dc_off_enabled;
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
mutex_lock(&power_domains->lock);
- power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
+ power_well = lookup_power_well(display, SKL_DISP_DC_OFF);
- if (drm_WARN_ON(&dev_priv->drm, !power_well))
+ if (drm_WARN_ON(display->drm, !power_well))
goto unlock;
- state = sanitize_target_dc_state(dev_priv, state);
+ state = sanitize_target_dc_state(display, state);
if (state == power_domains->target_dc_state)
goto unlock;
- dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
+ dc_off_enabled = intel_power_well_is_enabled(display, power_well);
/*
* If DC off power well is disabled, need to enable and disable the
* DC off power well to effect target DC state.
*/
if (!dc_off_enabled)
- intel_power_well_enable(dev_priv, power_well);
+ intel_power_well_enable(display, power_well);
power_domains->target_dc_state = state;
if (!dc_off_enabled)
- intel_power_well_disable(dev_priv, power_well);
+ intel_power_well_disable(display, power_well);
unlock:
mutex_unlock(&power_domains->lock);
@@ -338,11 +337,11 @@ static void __async_put_domains_mask(struct i915_power_domains *power_domains,
static bool
assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
- return !drm_WARN_ON(&i915->drm,
+ return !drm_WARN_ON(display->drm,
bitmap_intersects(power_domains->async_put_domains[0].bits,
power_domains->async_put_domains[1].bits,
POWER_DOMAIN_NUM));
@@ -351,21 +350,21 @@ assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
static bool
__async_put_domains_state_ok(struct i915_power_domains *power_domains)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
struct intel_power_domain_mask async_put_mask;
enum intel_display_power_domain domain;
bool err = false;
err |= !assert_async_put_domain_masks_disjoint(power_domains);
__async_put_domains_mask(power_domains, &async_put_mask);
- err |= drm_WARN_ON(&i915->drm,
+ err |= drm_WARN_ON(display->drm,
!!power_domains->async_put_wakeref !=
!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
for_each_power_domain(domain, &async_put_mask)
- err |= drm_WARN_ON(&i915->drm,
+ err |= drm_WARN_ON(display->drm,
power_domains->domain_use_count[domain] != 1);
return !err;
@@ -374,27 +373,27 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
static void print_power_domains(struct i915_power_domains *power_domains,
const char *prefix, struct intel_power_domain_mask *mask)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
enum intel_display_power_domain domain;
- drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
+ drm_dbg_kms(display->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
for_each_power_domain(domain, mask)
- drm_dbg(&i915->drm, "%s use_count %d\n",
- intel_display_power_domain_str(domain),
- power_domains->domain_use_count[domain]);
+ drm_dbg_kms(display->drm, "%s use_count %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
}
static void
print_async_put_domains_state(struct i915_power_domains *power_domains)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
- drm_dbg(&i915->drm, "async_put_wakeref: %s\n",
- str_yes_no(power_domains->async_put_wakeref));
+ drm_dbg_kms(display->drm, "async_put_wakeref: %s\n",
+ str_yes_no(power_domains->async_put_wakeref));
print_power_domains(power_domains, "async_put_domains[0]",
&power_domains->async_put_domains[0]);
@@ -454,10 +453,11 @@ cancel_async_put_work(struct i915_power_domains *power_domains, bool sync)
}
static bool
-intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
+intel_display_power_grab_async_put_ref(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
bool ret = false;
@@ -483,17 +483,17 @@ out_verify:
}
static void
-__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
+__intel_display_power_get_domain(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *power_well;
- if (intel_display_power_grab_async_put_ref(dev_priv, domain))
+ if (intel_display_power_grab_async_put_ref(display, domain))
return;
- for_each_power_domain_well(dev_priv, power_well, domain)
- intel_power_well_get(dev_priv, power_well);
+ for_each_power_domain_well(display, power_well, domain)
+ intel_power_well_get(display, power_well);
power_domains->domain_use_count[domain]++;
}
@@ -513,11 +513,12 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct intel_display *display = &dev_priv->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&power_domains->lock);
- __intel_display_power_get_domain(dev_priv, domain);
+ __intel_display_power_get_domain(display, domain);
mutex_unlock(&power_domains->lock);
return wakeref;
@@ -539,18 +540,19 @@ intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct intel_display *display = &dev_priv->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
intel_wakeref_t wakeref;
bool is_enabled;
wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
if (!wakeref)
- return false;
+ return NULL;
mutex_lock(&power_domains->lock);
- if (__intel_display_power_is_enabled(dev_priv, domain)) {
- __intel_display_power_get_domain(dev_priv, domain);
+ if (__intel_display_power_is_enabled(display, domain)) {
+ __intel_display_power_get_domain(display, domain);
is_enabled = true;
} else {
is_enabled = false;
@@ -560,45 +562,43 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
if (!is_enabled) {
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- wakeref = 0;
+ wakeref = NULL;
}
return wakeref;
}
static void
-__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
+__intel_display_power_put_domain(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *power_well;
const char *name = intel_display_power_domain_str(domain);
struct intel_power_domain_mask async_put_mask;
- power_domains = &dev_priv->display.power.domains;
-
- drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
+ drm_WARN(display->drm, !power_domains->domain_use_count[domain],
"Use count on domain %s is already zero\n",
name);
async_put_domains_mask(power_domains, &async_put_mask);
- drm_WARN(&dev_priv->drm,
+ drm_WARN(display->drm,
test_bit(domain, async_put_mask.bits),
"Async disabling of domain %s is pending\n",
name);
power_domains->domain_use_count[domain]--;
- for_each_power_domain_well_reverse(dev_priv, power_well, domain)
- intel_power_well_put(dev_priv, power_well);
+ for_each_power_domain_well_reverse(display, power_well, domain)
+ intel_power_well_put(display, power_well);
}
-static void __intel_display_power_put(struct drm_i915_private *dev_priv,
+static void __intel_display_power_put(struct intel_display *display,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
mutex_lock(&power_domains->lock);
- __intel_display_power_put_domain(dev_priv, domain);
+ __intel_display_power_put_domain(display, domain);
mutex_unlock(&power_domains->lock);
}
@@ -607,23 +607,24 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains,
intel_wakeref_t wakeref,
int delay_ms)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
- drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
+ drm_WARN_ON(display->drm, power_domains->async_put_wakeref);
power_domains->async_put_wakeref = wakeref;
- drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
- &power_domains->async_put_work,
- msecs_to_jiffies(delay_ms)));
+ drm_WARN_ON(display->drm, !queue_delayed_work(system_unbound_wq,
+ &power_domains->async_put_work,
+ msecs_to_jiffies(delay_ms)));
}
static void
release_async_put_domains(struct i915_power_domains *power_domains,
struct intel_power_domain_mask *mask)
{
- struct drm_i915_private *dev_priv =
- container_of(power_domains, struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
enum intel_display_power_domain domain;
intel_wakeref_t wakeref;
@@ -633,7 +634,7 @@ release_async_put_domains(struct i915_power_domains *power_domains,
for_each_power_domain(domain, mask) {
/* Clear before put, so put's sanity check is happy. */
async_put_domains_clear_domain(power_domains, domain);
- __intel_display_power_put_domain(dev_priv, domain);
+ __intel_display_power_put_domain(display, domain);
}
intel_runtime_pm_put(rpm, wakeref);
@@ -642,13 +643,13 @@ release_async_put_domains(struct i915_power_domains *power_domains,
static void
intel_display_power_put_async_work(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private,
- display.power.domains.async_put_work.work);
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct intel_display *display = container_of(work, struct intel_display,
+ power.domains.async_put_work.work);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
- intel_wakeref_t old_work_wakeref = 0;
+ intel_wakeref_t old_work_wakeref = NULL;
mutex_lock(&power_domains->lock);
@@ -711,7 +712,8 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
intel_wakeref_t wakeref,
int delay_ms)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct intel_display *display = &i915->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
@@ -720,12 +722,12 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
mutex_lock(&power_domains->lock);
if (power_domains->domain_use_count[domain] > 1) {
- __intel_display_power_put_domain(i915, domain);
+ __intel_display_power_put_domain(display, domain);
goto out_verify;
}
- drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
+ drm_WARN_ON(display->drm, power_domains->domain_use_count[domain] != 1);
/* Let a pending work requeue itself or queue a new one. */
if (power_domains->async_put_wakeref) {
@@ -764,7 +766,8 @@ out_verify:
*/
void intel_display_power_flush_work(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct intel_display *display = &i915->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
intel_wakeref_t work_wakeref;
@@ -789,22 +792,23 @@ out_verify:
/**
* intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
- * @i915: i915 device instance
+ * @display: display device instance
*
* Like intel_display_power_flush_work(), but also ensure that the work
* handler function is not running any more when this function returns.
*/
static void
-intel_display_power_flush_work_sync(struct drm_i915_private *i915)
+intel_display_power_flush_work_sync(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
intel_display_power_flush_work(i915);
cancel_async_put_work(power_domains, true);
verify_async_put_domains_state(power_domains);
- drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
+ drm_WARN_ON(display->drm, power_domains->async_put_wakeref);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
@@ -822,7 +826,9 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- __intel_display_power_put(dev_priv, domain);
+ struct intel_display *display = &dev_priv->display;
+
+ __intel_display_power_put(display, domain);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
#else
@@ -842,7 +848,9 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- __intel_display_power_put(dev_priv, domain);
+ struct intel_display *display = &dev_priv->display;
+
+ __intel_display_power_put(display, domain);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
#endif
@@ -852,9 +860,10 @@ intel_display_power_get_in_set(struct drm_i915_private *i915,
struct intel_display_power_domain_set *power_domain_set,
enum intel_display_power_domain domain)
{
+ struct intel_display *display = &i915->display;
intel_wakeref_t __maybe_unused wf;
- drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
+ drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
wf = intel_display_power_get(i915, domain);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
@@ -868,9 +877,10 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
struct intel_display_power_domain_set *power_domain_set,
enum intel_display_power_domain domain)
{
+ struct intel_display *display = &i915->display;
intel_wakeref_t wf;
- drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
+ drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
wf = intel_display_power_get_if_enabled(i915, domain);
if (!wf)
@@ -889,13 +899,14 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
struct intel_display_power_domain_set *power_domain_set,
struct intel_power_domain_mask *mask)
{
+ struct intel_display *display = &i915->display;
enum intel_display_power_domain domain;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
!bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
for_each_power_domain(domain, mask) {
- intel_wakeref_t __maybe_unused wf = -1;
+ intel_wakeref_t __maybe_unused wf = INTEL_WAKEREF_DEF;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
@@ -906,8 +917,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
}
static int
-sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
- int disable_power_well)
+sanitize_disable_power_well_option(int disable_power_well)
{
if (disable_power_well >= 0)
return !!disable_power_well;
@@ -915,27 +925,26 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
return 1;
}
-static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
- int enable_dc)
+static u32 get_allowed_dc_mask(struct intel_display *display, int enable_dc)
{
u32 mask;
int requested_dc;
int max_dc;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return 0;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
max_dc = 2;
- else if (IS_DG2(dev_priv))
+ else if (display->platform.dg2)
max_dc = 1;
- else if (IS_DG1(dev_priv))
+ else if (display->platform.dg1)
max_dc = 3;
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
max_dc = 4;
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
max_dc = 1;
- else if (DISPLAY_VER(dev_priv) >= 9)
+ else if (DISPLAY_VER(display) >= 9)
max_dc = 2;
else
max_dc = 0;
@@ -945,11 +954,10 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
* not depending on the DMC firmware. It's needed by system
* suspend/resume, so allow it unconditionally.
*/
- mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
- DISPLAY_VER(dev_priv) >= 11 ?
- DC_STATE_EN_DC9 : 0;
+ mask = display->platform.geminilake || display->platform.broxton ||
+ DISPLAY_VER(display) >= 11 ? DC_STATE_EN_DC9 : 0;
- if (!dev_priv->display.params.disable_power_well)
+ if (!display->params.disable_power_well)
max_dc = 0;
if (enable_dc >= 0 && enable_dc <= max_dc) {
@@ -957,12 +965,12 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
} else if (enable_dc == -1) {
requested_dc = max_dc;
} else if (enable_dc > max_dc && enable_dc <= 4) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Adjusting requested max DC state (%d->%d)\n",
enable_dc, max_dc);
requested_dc = max_dc;
} else {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Unexpected value for enable_dc (%d)\n", enable_dc);
requested_dc = max_dc;
}
@@ -982,30 +990,29 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
break;
}
- drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
+ drm_dbg_kms(display->drm, "Allowed DC state mask %02x\n", mask);
return mask;
}
/**
* intel_power_domains_init - initializes the power domain structures
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* Initializes the power domain structures for @dev_priv depending upon the
* supported platform.
*/
-int intel_power_domains_init(struct drm_i915_private *dev_priv)
+int intel_power_domains_init(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
- dev_priv->display.params.disable_power_well =
- sanitize_disable_power_well_option(dev_priv,
- dev_priv->display.params.disable_power_well);
+ display->params.disable_power_well =
+ sanitize_disable_power_well_option(display->params.disable_power_well);
power_domains->allowed_dc_mask =
- get_allowed_dc_mask(dev_priv, dev_priv->display.params.enable_dc);
+ get_allowed_dc_mask(display, display->params.enable_dc);
power_domains->target_dc_state =
- sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+ sanitize_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
mutex_init(&power_domains->lock);
@@ -1017,39 +1024,39 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
/**
* intel_power_domains_cleanup - clean up power domains resources
- * @dev_priv: i915 device instance
+ * @display: display device instance
*
* Release any resources acquired by intel_power_domains_init()
*/
-void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
+void intel_power_domains_cleanup(struct intel_display *display)
{
- intel_display_power_map_cleanup(&dev_priv->display.power.domains);
+ intel_display_power_map_cleanup(&display->power.domains);
}
-static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
+static void intel_power_domains_sync_hw(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *power_well;
mutex_lock(&power_domains->lock);
- for_each_power_well(dev_priv, power_well)
- intel_power_well_sync_hw(dev_priv, power_well);
+ for_each_power_well(display, power_well)
+ intel_power_well_sync_hw(display, power_well);
mutex_unlock(&power_domains->lock);
}
-static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
+static void gen9_dbuf_slice_set(struct intel_display *display,
enum dbuf_slice slice, bool enable)
{
i915_reg_t reg = DBUF_CTL_S(slice);
bool state;
- intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
+ intel_de_rmw(display, reg, DBUF_POWER_REQUEST,
enable ? DBUF_POWER_REQUEST : 0);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(display, reg);
udelay(10);
- state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
- drm_WARN(&dev_priv->drm, enable != state,
+ state = intel_de_read(display, reg) & DBUF_POWER_STATE;
+ drm_WARN(display->drm, enable != state,
"DBuf slice %d power %s timeout!\n",
slice, str_enable_disable(enable));
}
@@ -1057,15 +1064,16 @@ static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
- u8 slice_mask = DISPLAY_INFO(dev_priv)->dbuf.slice_mask;
+ struct intel_display *display = &dev_priv->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
+ u8 slice_mask = DISPLAY_INFO(display)->dbuf.slice_mask;
enum dbuf_slice slice;
- drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
+ drm_WARN(display->drm, req_slices & ~slice_mask,
"Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
req_slices, slice_mask);
- drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
+ drm_dbg_kms(display->drm, "Updating dbuf slices to 0x%x\n",
req_slices);
/*
@@ -1077,25 +1085,25 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
*/
mutex_lock(&power_domains->lock);
- for_each_dbuf_slice(dev_priv, slice)
- gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
+ for_each_dbuf_slice(display, slice)
+ gen9_dbuf_slice_set(display, slice, req_slices & BIT(slice));
- dev_priv->display.dbuf.enabled_slices = req_slices;
+ display->dbuf.enabled_slices = req_slices;
mutex_unlock(&power_domains->lock);
}
-static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+static void gen9_dbuf_enable(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u8 slices_mask;
- dev_priv->display.dbuf.enabled_slices =
- intel_enabled_dbuf_slices_mask(dev_priv);
+ display->dbuf.enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
- slices_mask = BIT(DBUF_S1) | dev_priv->display.dbuf.enabled_slices;
+ slices_mask = BIT(DBUF_S1) | display->dbuf.enabled_slices;
- if (DISPLAY_VER(dev_priv) >= 14)
- intel_pmdemand_program_dbuf(dev_priv, slices_mask);
+ if (DISPLAY_VER(display) >= 14)
+ intel_pmdemand_program_dbuf(display, slices_mask);
/*
* Just power up at least 1 slice, we will
@@ -1104,33 +1112,35 @@ static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
gen9_dbuf_slices_update(dev_priv, slices_mask);
}
-static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
+static void gen9_dbuf_disable(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
gen9_dbuf_slices_update(dev_priv, 0);
- if (DISPLAY_VER(dev_priv) >= 14)
- intel_pmdemand_program_dbuf(dev_priv, 0);
+ if (DISPLAY_VER(display) >= 14)
+ intel_pmdemand_program_dbuf(display, 0);
}
-static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
+static void gen12_dbuf_slices_config(struct intel_display *display)
{
enum dbuf_slice slice;
- if (IS_ALDERLAKE_P(dev_priv))
+ if (display->platform.alderlake_p)
return;
- for_each_dbuf_slice(dev_priv, slice)
- intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
+ for_each_dbuf_slice(display, slice)
+ intel_de_rmw(display, DBUF_CTL_S(slice),
DBUF_TRACKER_STATE_SERVICE_MASK,
DBUF_TRACKER_STATE_SERVICE(8));
}
-static void icl_mbus_init(struct drm_i915_private *dev_priv)
+static void icl_mbus_init(struct intel_display *display)
{
- unsigned long abox_regs = DISPLAY_INFO(dev_priv)->abox_mask;
+ unsigned long abox_regs = DISPLAY_INFO(display)->abox_mask;
u32 mask, val, i;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
+ if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
return;
mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
@@ -1147,16 +1157,16 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
* expect us to program the abox_ctl0 register as well, even though
* we don't have to program other instance-0 registers like BW_BUDDY.
*/
- if (DISPLAY_VER(dev_priv) == 12)
+ if (DISPLAY_VER(display) == 12)
abox_regs |= BIT(0);
for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
- intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
+ intel_de_rmw(display, MBUS_ABOX_CTL(i), mask, val);
}
-static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
+static void hsw_assert_cdclk(struct intel_display *display)
{
- u32 val = intel_de_read(dev_priv, LCPLL_CTL);
+ u32 val = intel_de_read(display, LCPLL_CTL);
/*
* The LCPLL register should be turned on by the BIOS. For now
@@ -1165,54 +1175,55 @@ static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
*/
if (val & LCPLL_CD_SOURCE_FCLK)
- drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
+ drm_err(display->drm, "CDCLK source is not LCPLL\n");
if (val & LCPLL_PLL_DISABLE)
- drm_err(&dev_priv->drm, "LCPLL is disabled\n");
+ drm_err(display->drm, "LCPLL is disabled\n");
if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
- drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
+ drm_err(display->drm, "LCPLL not using non-SSC reference\n");
}
-static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
+static void assert_can_disable_lcpll(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
- for_each_intel_crtc(&dev_priv->drm, crtc)
- I915_STATE_WARN(dev_priv, crtc->active,
- "CRTC for pipe %c enabled\n",
- pipe_name(crtc->pipe));
-
- I915_STATE_WARN(dev_priv, intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
- "Display power well on\n");
- I915_STATE_WARN(dev_priv,
- intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
- "SPLL enabled\n");
- I915_STATE_WARN(dev_priv,
- intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
- "WRPLL1 enabled\n");
- I915_STATE_WARN(dev_priv,
- intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
- "WRPLL2 enabled\n");
- I915_STATE_WARN(dev_priv,
- intel_de_read(dev_priv, PP_STATUS(dev_priv, 0)) & PP_ON,
- "Panel power on\n");
- I915_STATE_WARN(dev_priv,
- intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
- "CPU PWM1 enabled\n");
- if (IS_HASWELL(dev_priv))
- I915_STATE_WARN(dev_priv,
- intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
- "CPU PWM2 enabled\n");
- I915_STATE_WARN(dev_priv,
- intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
- "PCH PWM1 enabled\n");
- I915_STATE_WARN(dev_priv,
- (intel_de_read(dev_priv, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM),
- "Utility pin enabled in PWM mode\n");
- I915_STATE_WARN(dev_priv,
- intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
- "PCH GTC enabled\n");
+ for_each_intel_crtc(display->drm, crtc)
+ INTEL_DISPLAY_STATE_WARN(display, crtc->active,
+ "CRTC for pipe %c enabled\n",
+ pipe_name(crtc->pipe));
+
+ INTEL_DISPLAY_STATE_WARN(display, intel_de_read(display, HSW_PWR_WELL_CTL2),
+ "Display power well on\n");
+ INTEL_DISPLAY_STATE_WARN(display,
+ intel_de_read(display, SPLL_CTL) & SPLL_PLL_ENABLE,
+ "SPLL enabled\n");
+ INTEL_DISPLAY_STATE_WARN(display,
+ intel_de_read(display, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
+ "WRPLL1 enabled\n");
+ INTEL_DISPLAY_STATE_WARN(display,
+ intel_de_read(display, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
+ "WRPLL2 enabled\n");
+ INTEL_DISPLAY_STATE_WARN(display,
+ intel_de_read(display, PP_STATUS(display, 0)) & PP_ON,
+ "Panel power on\n");
+ INTEL_DISPLAY_STATE_WARN(display,
+ intel_de_read(display, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+ "CPU PWM1 enabled\n");
+ if (display->platform.haswell)
+ INTEL_DISPLAY_STATE_WARN(display,
+ intel_de_read(display, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ "CPU PWM2 enabled\n");
+ INTEL_DISPLAY_STATE_WARN(display,
+ intel_de_read(display, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+ "PCH PWM1 enabled\n");
+ INTEL_DISPLAY_STATE_WARN(display,
+ (intel_de_read(display, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM),
+ "Utility pin enabled in PWM mode\n");
+ INTEL_DISPLAY_STATE_WARN(display,
+ intel_de_read(display, PCH_GTC_CTL) & PCH_GTC_ENABLE,
+ "PCH GTC enabled\n");
/*
* In theory we can still leave IRQs enabled, as long as only the HPD
@@ -1220,27 +1231,28 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
* gen-specific and since we only disable LCPLL after we fully disable
* the interrupts, the check below should be enough.
*/
- I915_STATE_WARN(dev_priv, intel_irqs_enabled(dev_priv),
- "IRQs enabled\n");
+ INTEL_DISPLAY_STATE_WARN(display, intel_irqs_enabled(dev_priv),
+ "IRQs enabled\n");
}
-static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
+static u32 hsw_read_dcomp(struct intel_display *display)
{
- if (IS_HASWELL(dev_priv))
- return intel_de_read(dev_priv, D_COMP_HSW);
+ if (display->platform.haswell)
+ return intel_de_read(display, D_COMP_HSW);
else
- return intel_de_read(dev_priv, D_COMP_BDW);
+ return intel_de_read(display, D_COMP_BDW);
}
-static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
+static void hsw_write_dcomp(struct intel_display *display, u32 val)
{
- if (IS_HASWELL(dev_priv)) {
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ if (display->platform.haswell) {
if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
- drm_dbg_kms(&dev_priv->drm,
- "Failed to write to D_COMP\n");
+ drm_dbg_kms(display->drm, "Failed to write to D_COMP\n");
} else {
- intel_de_write(dev_priv, D_COMP_BDW, val);
- intel_de_posting_read(dev_priv, D_COMP_BDW);
+ intel_de_write(display, D_COMP_BDW, val);
+ intel_de_posting_read(display, D_COMP_BDW);
}
}
@@ -1252,45 +1264,45 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
* register. Callers should take care of disabling all the display engine
* functions, doing the mode unset, fixing interrupts, etc.
*/
-static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+static void hsw_disable_lcpll(struct intel_display *display,
bool switch_to_fclk, bool allow_power_down)
{
u32 val;
- assert_can_disable_lcpll(dev_priv);
+ assert_can_disable_lcpll(display);
- val = intel_de_read(dev_priv, LCPLL_CTL);
+ val = intel_de_read(display, LCPLL_CTL);
if (switch_to_fclk) {
val |= LCPLL_CD_SOURCE_FCLK;
- intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_write(display, LCPLL_CTL, val);
- if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
+ if (wait_for_us(intel_de_read(display, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE, 1))
- drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
+ drm_err(display->drm, "Switching to FCLK failed\n");
- val = intel_de_read(dev_priv, LCPLL_CTL);
+ val = intel_de_read(display, LCPLL_CTL);
}
val |= LCPLL_PLL_DISABLE;
- intel_de_write(dev_priv, LCPLL_CTL, val);
- intel_de_posting_read(dev_priv, LCPLL_CTL);
+ intel_de_write(display, LCPLL_CTL, val);
+ intel_de_posting_read(display, LCPLL_CTL);
- if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
- drm_err(&dev_priv->drm, "LCPLL still locked\n");
+ if (intel_de_wait_for_clear(display, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
+ drm_err(display->drm, "LCPLL still locked\n");
- val = hsw_read_dcomp(dev_priv);
+ val = hsw_read_dcomp(display);
val |= D_COMP_COMP_DISABLE;
- hsw_write_dcomp(dev_priv, val);
+ hsw_write_dcomp(display, val);
ndelay(100);
- if (wait_for((hsw_read_dcomp(dev_priv) &
+ if (wait_for((hsw_read_dcomp(display) &
D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
- drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
+ drm_err(display->drm, "D_COMP RCOMP still in progress\n");
if (allow_power_down) {
- intel_de_rmw(dev_priv, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW);
- intel_de_posting_read(dev_priv, LCPLL_CTL);
+ intel_de_rmw(display, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW);
+ intel_de_posting_read(display, LCPLL_CTL);
}
}
@@ -1298,11 +1310,12 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
* Fully restores LCPLL, disallowing power down and switching back to LCPLL
* source.
*/
-static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+static void hsw_restore_lcpll(struct intel_display *display)
{
+ struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
u32 val;
- val = intel_de_read(dev_priv, LCPLL_CTL);
+ val = intel_de_read(display, LCPLL_CTL);
if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
@@ -1316,35 +1329,35 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
- intel_de_write(dev_priv, LCPLL_CTL, val);
- intel_de_posting_read(dev_priv, LCPLL_CTL);
+ intel_de_write(display, LCPLL_CTL, val);
+ intel_de_posting_read(display, LCPLL_CTL);
}
- val = hsw_read_dcomp(dev_priv);
+ val = hsw_read_dcomp(display);
val |= D_COMP_COMP_FORCE;
val &= ~D_COMP_COMP_DISABLE;
- hsw_write_dcomp(dev_priv, val);
+ hsw_write_dcomp(display, val);
- val = intel_de_read(dev_priv, LCPLL_CTL);
+ val = intel_de_read(display, LCPLL_CTL);
val &= ~LCPLL_PLL_DISABLE;
- intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_write(display, LCPLL_CTL, val);
- if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
- drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
+ if (intel_de_wait_for_set(display, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
+ drm_err(display->drm, "LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
- intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
+ intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
- if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
+ if (wait_for_us((intel_de_read(display, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Switching back to LCPLL failed\n");
}
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- intel_update_cdclk(dev_priv);
- intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
+ intel_update_cdclk(display);
+ intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
}
/*
@@ -1370,36 +1383,42 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
* For more, read "Display Sequences for Package C8" on the hardware
* documentation.
*/
-static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
+static void hsw_enable_pc8(struct intel_display *display)
{
- drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ drm_dbg_kms(display->drm, "Enabling package C8+\n");
if (HAS_PCH_LPT_LP(dev_priv))
- intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
PCH_LP_PARTITION_LEVEL_DISABLE, 0);
lpt_disable_clkout_dp(dev_priv);
- hsw_disable_lcpll(dev_priv, true, true);
+ hsw_disable_lcpll(display, true, true);
}
-static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
+static void hsw_disable_pc8(struct intel_display *display)
{
- drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ drm_dbg_kms(display->drm, "Disabling package C8+\n");
- hsw_restore_lcpll(dev_priv);
+ hsw_restore_lcpll(display);
intel_init_pch_refclk(dev_priv);
/* Many display registers don't survive PC8+ */
+#ifdef I915 /* FIXME */
intel_clock_gating_init(dev_priv);
+#endif
}
-static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
+static void intel_pch_reset_handshake(struct intel_display *display,
bool enable)
{
i915_reg_t reg;
u32 reset_bits;
- if (IS_IVYBRIDGE(dev_priv)) {
+ if (display->platform.ivybridge) {
reg = GEN7_MSG_CTL;
reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
} else {
@@ -1407,59 +1426,60 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
}
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
- intel_de_rmw(dev_priv, reg, reset_bits, enable ? reset_bits : 0);
+ intel_de_rmw(display, reg, reset_bits, enable ? reset_bits : 0);
}
-static void skl_display_core_init(struct drm_i915_private *dev_priv,
+static void skl_display_core_init(struct intel_display *display,
bool resume)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_set_dc_state(display, DC_STATE_DISABLE);
/* enable PCH reset handshake */
- intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
/* enable PG1 and Misc I/O */
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_1);
+ intel_power_well_enable(display, well);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
- intel_power_well_enable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_MISC_IO);
+ intel_power_well_enable(display, well);
mutex_unlock(&power_domains->lock);
- intel_cdclk_init_hw(dev_priv);
+ intel_cdclk_init_hw(display);
- gen9_dbuf_enable(dev_priv);
+ gen9_dbuf_enable(display);
if (resume)
- intel_dmc_load_program(dev_priv);
+ intel_dmc_load_program(display);
}
-static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
+static void skl_display_core_uninit(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- gen9_disable_dc_states(dev_priv);
+ gen9_disable_dc_states(display);
/* TODO: disable DMC program */
- gen9_dbuf_disable(dev_priv);
+ gen9_dbuf_disable(display);
- intel_cdclk_uninit_hw(dev_priv);
+ intel_cdclk_uninit_hw(display);
/* The spec doesn't call for removing the reset handshake flag */
/* disable PG1 and Misc I/O */
@@ -1472,20 +1492,20 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
* Note that even though the driver's request is removed power well 1
* may stay enabled after this due to DMC's own request on it.
*/
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_1);
+ intel_power_well_disable(display, well);
mutex_unlock(&power_domains->lock);
usleep_range(10, 30); /* 10 us delay per Bspec */
}
-static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
+static void bxt_display_core_init(struct intel_display *display, bool resume)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_set_dc_state(display, DC_STATE_DISABLE);
/*
* NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
@@ -1493,41 +1513,41 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume
* Move the handshake programming to initialization sequence.
* Previously was left up to BIOS.
*/
- intel_pch_reset_handshake(dev_priv, false);
+ intel_pch_reset_handshake(display, false);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
/* Enable PG1 */
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_1);
+ intel_power_well_enable(display, well);
mutex_unlock(&power_domains->lock);
- intel_cdclk_init_hw(dev_priv);
+ intel_cdclk_init_hw(display);
- gen9_dbuf_enable(dev_priv);
+ gen9_dbuf_enable(display);
if (resume)
- intel_dmc_load_program(dev_priv);
+ intel_dmc_load_program(display);
}
-static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
+static void bxt_display_core_uninit(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- gen9_disable_dc_states(dev_priv);
+ gen9_disable_dc_states(display);
/* TODO: disable DMC program */
- gen9_dbuf_disable(dev_priv);
+ gen9_dbuf_disable(display);
- intel_cdclk_uninit_hw(dev_priv);
+ intel_cdclk_uninit_hw(display);
/* The spec doesn't call for removing the reset handshake flag */
@@ -1538,8 +1558,8 @@ static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
*/
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_1);
+ intel_power_well_disable(display, well);
mutex_unlock(&power_domains->lock);
@@ -1576,20 +1596,21 @@ static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
{}
};
-static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
+static void tgl_bw_buddy_init(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum intel_dram_type type = dev_priv->dram_info.type;
u8 num_channels = dev_priv->dram_info.num_channels;
const struct buddy_page_mask *table;
- unsigned long abox_mask = DISPLAY_INFO(dev_priv)->abox_mask;
+ unsigned long abox_mask = DISPLAY_INFO(display)->abox_mask;
int config, i;
/* BW_BUDDY registers are not used on dgpu's beyond DG1 */
- if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv))
+ if (display->platform.dgfx && !display->platform.dg1)
return;
- if (IS_ALDERLAKE_S(dev_priv) ||
- (IS_ROCKETLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)))
+ if (display->platform.alderlake_s ||
+ (display->platform.rocketlake && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)))
/* Wa_1409767108 */
table = wa_1409767108_buddy_page_masks;
else
@@ -1601,43 +1622,44 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
break;
if (table[config].page_mask == 0) {
- drm_dbg(&dev_priv->drm,
- "Unknown memory configuration; disabling address buddy logic.\n");
+ drm_dbg_kms(display->drm,
+ "Unknown memory configuration; disabling address buddy logic.\n");
for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
- intel_de_write(dev_priv, BW_BUDDY_CTL(i),
+ intel_de_write(display, BW_BUDDY_CTL(i),
BW_BUDDY_DISABLE);
} else {
for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
- intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
+ intel_de_write(display, BW_BUDDY_PAGE_MASK(i),
table[config].page_mask);
/* Wa_22010178259:tgl,dg1,rkl,adl-s */
- if (DISPLAY_VER(dev_priv) == 12)
- intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
+ if (DISPLAY_VER(display) == 12)
+ intel_de_rmw(display, BW_BUDDY_CTL(i),
BW_BUDDY_TLB_REQ_TIMER_MASK,
BW_BUDDY_TLB_REQ_TIMER(0x8));
}
}
}
-static void icl_display_core_init(struct drm_i915_private *dev_priv,
+static void icl_display_core_init(struct intel_display *display,
bool resume)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_set_dc_state(display, DC_STATE_DISABLE);
/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
- intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 0,
PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
/* 1. Enable PCH reset handshake. */
- intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(dev_priv));
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
/* 2. Initialize all combo phys */
@@ -1648,81 +1670,82 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
* The AUX IO power wells will be enabled on demand.
*/
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_1);
+ intel_power_well_enable(display, well);
mutex_unlock(&power_domains->lock);
- if (DISPLAY_VER(dev_priv) == 14)
- intel_de_rmw(dev_priv, DC_STATE_EN,
+ if (DISPLAY_VER(display) == 14)
+ intel_de_rmw(display, DC_STATE_EN,
HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0);
/* 4. Enable CDCLK. */
- intel_cdclk_init_hw(dev_priv);
+ intel_cdclk_init_hw(display);
- if (DISPLAY_VER(dev_priv) >= 12)
- gen12_dbuf_slices_config(dev_priv);
+ if (DISPLAY_VER(display) >= 12)
+ gen12_dbuf_slices_config(display);
/* 5. Enable DBUF. */
- gen9_dbuf_enable(dev_priv);
+ gen9_dbuf_enable(display);
/* 6. Setup MBUS. */
- icl_mbus_init(dev_priv);
+ icl_mbus_init(display);
/* 7. Program arbiter BW_BUDDY registers */
- if (DISPLAY_VER(dev_priv) >= 12)
- tgl_bw_buddy_init(dev_priv);
+ if (DISPLAY_VER(display) >= 12)
+ tgl_bw_buddy_init(display);
/* 8. Ensure PHYs have completed calibration and adaptation */
- if (IS_DG2(dev_priv))
+ if (display->platform.dg2)
intel_snps_phy_wait_for_calibration(dev_priv);
/* 9. XE2_HPD: Program CHICKEN_MISC_2 before any cursor or planes are enabled */
- if (DISPLAY_VER_FULL(dev_priv) == IP_VER(14, 1))
- intel_de_rmw(dev_priv, CHICKEN_MISC_2, BMG_DARB_HALF_BLK_END_BURST, 1);
+ if (DISPLAY_VERx100(display) == 1401)
+ intel_de_rmw(display, CHICKEN_MISC_2, BMG_DARB_HALF_BLK_END_BURST, 1);
if (resume)
- intel_dmc_load_program(dev_priv);
+ intel_dmc_load_program(display);
/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */
- if (IS_DISPLAY_VER_FULL(dev_priv, IP_VER(12, 0), IP_VER(13, 0)))
- intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0,
+ if (IS_DISPLAY_VERx100(display, 1200, 1300))
+ intel_de_rmw(display, GEN11_CHICKEN_DCPR_2, 0,
DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
/* Wa_14011503030:xelpd */
- if (DISPLAY_VER(dev_priv) == 13)
- intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
+ if (DISPLAY_VER(display) == 13)
+ intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
/* Wa_15013987218 */
- if (DISPLAY_VER(dev_priv) == 20) {
- intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ if (DISPLAY_VER(display) == 20) {
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
0, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE);
- intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, 0);
}
}
-static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
+static void icl_display_core_uninit(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *well;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- gen9_disable_dc_states(dev_priv);
- intel_dmc_disable_program(dev_priv);
+ gen9_disable_dc_states(display);
+ intel_dmc_disable_program(display);
/* 1. Disable all display engine functions -> aready done */
/* 2. Disable DBUF */
- gen9_dbuf_disable(dev_priv);
+ gen9_dbuf_disable(display);
/* 3. Disable CD clock */
- intel_cdclk_uninit_hw(dev_priv);
+ intel_cdclk_uninit_hw(display);
- if (DISPLAY_VER(dev_priv) == 14)
- intel_de_rmw(dev_priv, DC_STATE_EN, 0,
+ if (DISPLAY_VER(display) == 14)
+ intel_de_rmw(display, DC_STATE_EN, 0,
HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH);
/*
@@ -1731,20 +1754,20 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
* disabled at this point.
*/
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
+ well = lookup_power_well(display, SKL_DISP_PW_1);
+ intel_power_well_disable(display, well);
mutex_unlock(&power_domains->lock);
/* 5. */
intel_combo_phy_uninit(dev_priv);
}
-static void chv_phy_control_init(struct drm_i915_private *dev_priv)
+static void chv_phy_control_init(struct intel_display *display)
{
struct i915_power_well *cmn_bc =
- lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
struct i915_power_well *cmn_d =
- lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
+ lookup_power_well(display, CHV_DISP_PW_DPIO_CMN_D);
/*
* DISPLAY_PHY_CONTROL can get corrupted if read. As a
@@ -1753,7 +1776,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
* power well state and lane status to reconstruct the
* expected initial value.
*/
- dev_priv->display.power.chv_phy_control =
+ display->power.chv_phy_control =
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
@@ -1767,39 +1790,39 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
* override and set the lane powerdown bits accding to the
* current lane status.
*/
- if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
- u32 status = intel_de_read(dev_priv, DPLL(dev_priv, PIPE_A));
+ if (intel_power_well_is_enabled(display, cmn_bc)) {
+ u32 status = intel_de_read(display, DPLL(display, PIPE_A));
unsigned int mask;
mask = status & DPLL_PORTB_READY_MASK;
if (mask == 0xf)
mask = 0x0;
else
- dev_priv->display.power.chv_phy_control |=
+ display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
- dev_priv->display.power.chv_phy_control |=
+ display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
mask = (status & DPLL_PORTC_READY_MASK) >> 4;
if (mask == 0xf)
mask = 0x0;
else
- dev_priv->display.power.chv_phy_control |=
+ display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
- dev_priv->display.power.chv_phy_control |=
+ display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
- dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
+ display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
- dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false;
+ display->power.chv_phy_assert[DPIO_PHY0] = false;
} else {
- dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true;
+ display->power.chv_phy_assert[DPIO_PHY0] = true;
}
- if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
- u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
+ if (intel_power_well_is_enabled(display, cmn_d)) {
+ u32 status = intel_de_read(display, DPIO_PHY_STATUS);
unsigned int mask;
mask = status & DPLL_PORTD_READY_MASK;
@@ -1807,42 +1830,42 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
if (mask == 0xf)
mask = 0x0;
else
- dev_priv->display.power.chv_phy_control |=
+ display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
- dev_priv->display.power.chv_phy_control |=
+ display->power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
- dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
+ display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
- dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false;
+ display->power.chv_phy_assert[DPIO_PHY1] = false;
} else {
- dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true;
+ display->power.chv_phy_assert[DPIO_PHY1] = true;
}
- drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
- dev_priv->display.power.chv_phy_control);
+ drm_dbg_kms(display->drm, "Initial PHY_CONTROL=0x%08x\n",
+ display->power.chv_phy_control);
/* Defer application of initial phy_control to enabling the powerwell */
}
-static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+static void vlv_cmnlane_wa(struct intel_display *display)
{
struct i915_power_well *cmn =
- lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
struct i915_power_well *disp2d =
- lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
+ lookup_power_well(display, VLV_DISP_PW_DISP2D);
/* If the display might be already active skip this */
- if (intel_power_well_is_enabled(dev_priv, cmn) &&
- intel_power_well_is_enabled(dev_priv, disp2d) &&
- intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
+ if (intel_power_well_is_enabled(display, cmn) &&
+ intel_power_well_is_enabled(display, disp2d) &&
+ intel_de_read(display, DPIO_CTL) & DPIO_CMNRST)
return;
- drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
+ drm_dbg_kms(display->drm, "toggling display PHY side reset\n");
/* cmnlane needs DPLL registers */
- intel_power_well_enable(dev_priv, disp2d);
+ intel_power_well_enable(display, disp2d);
/*
* From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
@@ -1851,11 +1874,12 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
* Simply ungating isn't enough to reset the PHY enough to get
* ports and lanes running.
*/
- intel_power_well_disable(dev_priv, cmn);
+ intel_power_well_disable(display, cmn);
}
-static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
+static bool vlv_punit_is_power_gated(struct intel_display *display, u32 reg0)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
bool ret;
vlv_punit_get(dev_priv);
@@ -1865,14 +1889,14 @@ static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0
return ret;
}
-static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
+static void assert_ved_power_gated(struct intel_display *display)
{
- drm_WARN(&dev_priv->drm,
- !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
+ drm_WARN(display->drm,
+ !vlv_punit_is_power_gated(display, PUNIT_REG_VEDSSPM0),
"VED not power gated\n");
}
-static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
+static void assert_isp_power_gated(struct intel_display *display)
{
static const struct pci_device_id isp_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
@@ -1880,16 +1904,16 @@ static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
{}
};
- drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
- !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
+ drm_WARN(display->drm, !pci_dev_present(isp_ids) &&
+ !vlv_punit_is_power_gated(display, PUNIT_REG_ISPSSPM0),
"ISP not power gated\n");
}
-static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
+static void intel_power_domains_verify_state(struct intel_display *display);
/**
* intel_power_domains_init_hw - initialize hardware power domain state
- * @i915: i915 device instance
+ * @display: display device instance
* @resume: Called from resume code paths or not
*
* This function initializes the hardware power domain state and enables all
@@ -1903,34 +1927,35 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
* intel_power_domains_enable()) and must be paired with
* intel_power_domains_driver_remove().
*/
-void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
+void intel_power_domains_init_hw(struct intel_display *display, bool resume)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
power_domains->initializing = true;
- if (DISPLAY_VER(i915) >= 11) {
- icl_display_core_init(i915, resume);
- } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
- bxt_display_core_init(i915, resume);
- } else if (DISPLAY_VER(i915) == 9) {
- skl_display_core_init(i915, resume);
- } else if (IS_CHERRYVIEW(i915)) {
+ if (DISPLAY_VER(display) >= 11) {
+ icl_display_core_init(display, resume);
+ } else if (display->platform.geminilake || display->platform.broxton) {
+ bxt_display_core_init(display, resume);
+ } else if (DISPLAY_VER(display) == 9) {
+ skl_display_core_init(display, resume);
+ } else if (display->platform.cherryview) {
mutex_lock(&power_domains->lock);
- chv_phy_control_init(i915);
+ chv_phy_control_init(display);
mutex_unlock(&power_domains->lock);
- assert_isp_power_gated(i915);
- } else if (IS_VALLEYVIEW(i915)) {
+ assert_isp_power_gated(display);
+ } else if (display->platform.valleyview) {
mutex_lock(&power_domains->lock);
- vlv_cmnlane_wa(i915);
+ vlv_cmnlane_wa(display);
mutex_unlock(&power_domains->lock);
- assert_ved_power_gated(i915);
- assert_isp_power_gated(i915);
- } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
- hsw_assert_cdclk(i915);
- intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
- } else if (IS_IVYBRIDGE(i915)) {
- intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
+ assert_ved_power_gated(display);
+ assert_isp_power_gated(display);
+ } else if (display->platform.broadwell || display->platform.haswell) {
+ hsw_assert_cdclk(display);
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
+ } else if (display->platform.ivybridge) {
+ intel_pch_reset_handshake(display, !HAS_PCH_NOP(i915));
}
/*
@@ -1939,24 +1964,24 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
* resources powered until display HW readout is complete. We drop
* this reference in intel_power_domains_enable().
*/
- drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ drm_WARN_ON(display->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */
- if (!i915->display.params.disable_power_well) {
- drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
- i915->display.power.domains.disable_wakeref = intel_display_power_get(i915,
- POWER_DOMAIN_INIT);
+ if (!display->params.disable_power_well) {
+ drm_WARN_ON(display->drm, power_domains->disable_wakeref);
+ display->power.domains.disable_wakeref = intel_display_power_get(i915,
+ POWER_DOMAIN_INIT);
}
- intel_power_domains_sync_hw(i915);
+ intel_power_domains_sync_hw(display);
power_domains->initializing = false;
}
/**
* intel_power_domains_driver_remove - deinitialize hw power domain state
- * @i915: i915 device instance
+ * @display: display device instance
*
* De-initializes the display power domain HW state. It also ensures that the
* device stays powered up so that the driver can be reloaded.
@@ -1965,19 +1990,20 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
* intel_power_domains_disable()) and must be paired with
* intel_power_domains_init_hw().
*/
-void intel_power_domains_driver_remove(struct drm_i915_private *i915)
+void intel_power_domains_driver_remove(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->display.power.domains.init_wakeref);
+ fetch_and_zero(&display->power.domains.init_wakeref);
/* Remove the refcount we took to keep power well support disabled. */
- if (!i915->display.params.disable_power_well)
+ if (!display->params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
- fetch_and_zero(&i915->display.power.domains.disable_wakeref));
+ fetch_and_zero(&display->power.domains.disable_wakeref));
- intel_display_power_flush_work_sync(i915);
+ intel_display_power_flush_work_sync(display);
- intel_power_domains_verify_state(i915);
+ intel_power_domains_verify_state(display);
/* Keep the power well enabled, but cancel its rpm wakeref. */
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@@ -1985,7 +2011,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
/**
* intel_power_domains_sanitize_state - sanitize power domains state
- * @i915: i915 device instance
+ * @display: display device instance
*
* Sanitize the power domains state during driver loading and system resume.
* The function will disable all display power wells that BIOS has enabled
@@ -1993,22 +2019,22 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
* on it by the time this function is called, after the state of all the
* pipe, encoder, etc. HW resources have been sanitized).
*/
-void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
+void intel_power_domains_sanitize_state(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *power_well;
mutex_lock(&power_domains->lock);
- for_each_power_well_reverse(i915, power_well) {
+ for_each_power_well_reverse(display, power_well) {
if (power_well->desc->always_on || power_well->count ||
- !intel_power_well_is_enabled(i915, power_well))
+ !intel_power_well_is_enabled(display, power_well))
continue;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"BIOS left unused %s power well enabled, disabling it\n",
intel_power_well_name(power_well));
- intel_power_well_disable(i915, power_well);
+ intel_power_well_disable(display, power_well);
}
mutex_unlock(&power_domains->lock);
@@ -2016,7 +2042,7 @@ void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
/**
* intel_power_domains_enable - enable toggling of display power wells
- * @i915: i915 device instance
+ * @display: display device instance
*
* Enable the ondemand enabling/disabling of the display power wells. Note that
* power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
@@ -2026,36 +2052,38 @@ void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
* of display HW readout (which will acquire the power references reflecting
* the current HW state).
*/
-void intel_power_domains_enable(struct drm_i915_private *i915)
+void intel_power_domains_enable(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->display.power.domains.init_wakeref);
+ fetch_and_zero(&display->power.domains.init_wakeref);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
- intel_power_domains_verify_state(i915);
+ intel_power_domains_verify_state(display);
}
/**
* intel_power_domains_disable - disable toggling of display power wells
- * @i915: i915 device instance
+ * @display: display device instance
*
* Disable the ondemand enabling/disabling of the display power wells. See
* intel_power_domains_enable() for which power wells this call controls.
*/
-void intel_power_domains_disable(struct drm_i915_private *i915)
+void intel_power_domains_disable(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
- drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ drm_WARN_ON(display->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
- intel_power_domains_verify_state(i915);
+ intel_power_domains_verify_state(display);
}
/**
* intel_power_domains_suspend - suspend power domain state
- * @i915: i915 device instance
+ * @display: display device instance
* @s2idle: specifies whether we go to idle, or deeper sleep
*
* This function prepares the hardware power domain state before entering
@@ -2064,9 +2092,10 @@ void intel_power_domains_disable(struct drm_i915_private *i915)
* It must be called with power domains already disabled (after a call to
* intel_power_domains_disable()) and paired with intel_power_domains_resume().
*/
-void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
+void intel_power_domains_suspend(struct intel_display *display, bool s2idle)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&power_domains->init_wakeref);
@@ -2080,9 +2109,9 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
* that would be blocked if the firmware was inactive.
*/
if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle &&
- intel_dmc_has_payload(i915)) {
+ intel_dmc_has_payload(display)) {
intel_display_power_flush_work(i915);
- intel_power_domains_verify_state(i915);
+ intel_power_domains_verify_state(display);
return;
}
@@ -2090,26 +2119,26 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
* Even if power well support was disabled we still want to disable
* power wells if power domains must be deinitialized for suspend.
*/
- if (!i915->display.params.disable_power_well)
+ if (!display->params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
- fetch_and_zero(&i915->display.power.domains.disable_wakeref));
+ fetch_and_zero(&display->power.domains.disable_wakeref));
intel_display_power_flush_work(i915);
- intel_power_domains_verify_state(i915);
+ intel_power_domains_verify_state(display);
- if (DISPLAY_VER(i915) >= 11)
- icl_display_core_uninit(i915);
- else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
- bxt_display_core_uninit(i915);
- else if (DISPLAY_VER(i915) == 9)
- skl_display_core_uninit(i915);
+ if (DISPLAY_VER(display) >= 11)
+ icl_display_core_uninit(display);
+ else if (display->platform.geminilake || display->platform.broxton)
+ bxt_display_core_uninit(display);
+ else if (DISPLAY_VER(display) == 9)
+ skl_display_core_uninit(display);
power_domains->display_core_suspended = true;
}
/**
* intel_power_domains_resume - resume power domain state
- * @i915: i915 device instance
+ * @display: display device instance
*
* This function resume the hardware power domain state during system resume.
*
@@ -2117,45 +2146,46 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
* intel_power_domains_enable()) and must be paired with
* intel_power_domains_suspend().
*/
-void intel_power_domains_resume(struct drm_i915_private *i915)
+void intel_power_domains_resume(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
if (power_domains->display_core_suspended) {
- intel_power_domains_init_hw(i915, true);
+ intel_power_domains_init_hw(display, true);
power_domains->display_core_suspended = false;
} else {
- drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
+ drm_WARN_ON(display->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
- intel_power_domains_verify_state(i915);
+ intel_power_domains_verify_state(display);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-static void intel_power_domains_dump_info(struct drm_i915_private *i915)
+static void intel_power_domains_dump_info(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *power_well;
- for_each_power_well(i915, power_well) {
+ for_each_power_well(display, power_well) {
enum intel_display_power_domain domain;
- drm_dbg(&i915->drm, "%-25s %d\n",
- intel_power_well_name(power_well), intel_power_well_refcount(power_well));
+ drm_dbg_kms(display->drm, "%-25s %d\n",
+ intel_power_well_name(power_well), intel_power_well_refcount(power_well));
for_each_power_domain(domain, intel_power_well_domains(power_well))
- drm_dbg(&i915->drm, " %-23s %d\n",
- intel_display_power_domain_str(domain),
- power_domains->domain_use_count[domain]);
+ drm_dbg_kms(display->drm, " %-23s %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
}
}
/**
* intel_power_domains_verify_state - verify the HW/SW state for all power wells
- * @i915: i915 device instance
+ * @display: display device instance
*
* Verify if the reference count of each power well matches its HW enabled
* state and the total refcount of the domains it belongs to. This must be
@@ -2163,9 +2193,9 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915)
* acquiring reference counts for any power wells in use and disabling the
* ones left on by BIOS but not required by any active output.
*/
-static void intel_power_domains_verify_state(struct drm_i915_private *i915)
+static void intel_power_domains_verify_state(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
struct i915_power_well *power_well;
bool dump_domain_info;
@@ -2174,16 +2204,16 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
verify_async_put_domains_state(power_domains);
dump_domain_info = false;
- for_each_power_well(i915, power_well) {
+ for_each_power_well(display, power_well) {
enum intel_display_power_domain domain;
int domains_count;
bool enabled;
- enabled = intel_power_well_is_enabled(i915, power_well);
+ enabled = intel_power_well_is_enabled(display, power_well);
if ((intel_power_well_refcount(power_well) ||
intel_power_well_is_always_on(power_well)) !=
enabled)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"power well %s state mismatch (refcount %d/enabled %d)",
intel_power_well_name(power_well),
intel_power_well_refcount(power_well), enabled);
@@ -2193,7 +2223,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
domains_count += power_domains->domain_use_count[domain];
if (intel_power_well_refcount(power_well) != domains_count) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"power well %s refcount/domain refcount mismatch "
"(refcount %d/domains refcount %d)\n",
intel_power_well_name(power_well),
@@ -2207,7 +2237,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
static bool dumped;
if (!dumped) {
- intel_power_domains_dump_info(i915);
+ intel_power_domains_dump_info(display);
dumped = true;
}
}
@@ -2217,19 +2247,23 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
#else
-static void intel_power_domains_verify_state(struct drm_i915_private *i915)
+static void intel_power_domains_verify_state(struct intel_display *display)
{
}
#endif
-void intel_display_power_suspend_late(struct drm_i915_private *i915)
+void intel_display_power_suspend_late(struct intel_display *display, bool s2idle)
{
- if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
- IS_BROXTON(i915)) {
- bxt_enable_dc9(i915);
- } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
- hsw_enable_pc8(i915);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ intel_power_domains_suspend(display, s2idle);
+
+ if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
+ display->platform.broxton) {
+ bxt_enable_dc9(display);
+ } else if (display->platform.haswell || display->platform.broadwell) {
+ hsw_enable_pc8(display);
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
@@ -2237,61 +2271,66 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915)
intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
}
-void intel_display_power_resume_early(struct drm_i915_private *i915)
+void intel_display_power_resume_early(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
- IS_BROXTON(i915)) {
- gen9_sanitize_dc_state(i915);
- bxt_disable_dc9(i915);
- } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
- hsw_disable_pc8(i915);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (DISPLAY_VER(display) >= 11 || display->platform.geminilake ||
+ display->platform.broxton) {
+ gen9_sanitize_dc_state(display);
+ bxt_disable_dc9(display);
+ } else if (display->platform.haswell || display->platform.broadwell) {
+ hsw_disable_pc8(display);
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
+
+ intel_power_domains_resume(display);
}
-void intel_display_power_suspend(struct drm_i915_private *i915)
+void intel_display_power_suspend(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 11) {
- icl_display_core_uninit(i915);
- bxt_enable_dc9(i915);
- } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
- bxt_display_core_uninit(i915);
- bxt_enable_dc9(i915);
- } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
- hsw_enable_pc8(i915);
+ if (DISPLAY_VER(display) >= 11) {
+ icl_display_core_uninit(display);
+ bxt_enable_dc9(display);
+ } else if (display->platform.geminilake || display->platform.broxton) {
+ bxt_display_core_uninit(display);
+ bxt_enable_dc9(display);
+ } else if (display->platform.haswell || display->platform.broadwell) {
+ hsw_enable_pc8(display);
}
}
-void intel_display_power_resume(struct drm_i915_private *i915)
+void intel_display_power_resume(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
- if (DISPLAY_VER(i915) >= 11) {
- bxt_disable_dc9(i915);
- icl_display_core_init(i915, true);
- if (intel_dmc_has_payload(i915)) {
+ if (DISPLAY_VER(display) >= 11) {
+ bxt_disable_dc9(display);
+ icl_display_core_init(display, true);
+ if (intel_dmc_has_payload(display)) {
if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
- skl_enable_dc6(i915);
+ skl_enable_dc6(display);
else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
- gen9_enable_dc5(i915);
+ gen9_enable_dc5(display);
}
- } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
- bxt_disable_dc9(i915);
- bxt_display_core_init(i915, true);
- if (intel_dmc_has_payload(i915) &&
+ } else if (display->platform.geminilake || display->platform.broxton) {
+ bxt_disable_dc9(display);
+ bxt_display_core_init(display, true);
+ if (intel_dmc_has_payload(display) &&
(power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
- gen9_enable_dc5(i915);
- } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
- hsw_disable_pc8(i915);
+ gen9_enable_dc5(display);
+ } else if (display->platform.haswell || display->platform.broadwell) {
+ hsw_disable_pc8(display);
}
}
void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct intel_display *display = &i915->display;
+ struct i915_power_domains *power_domains = &display->power.domains;
int i;
mutex_lock(&power_domains->lock);
@@ -2436,17 +2475,17 @@ d13_port_domains[] = {
};
static void
-intel_port_domains_for_platform(struct drm_i915_private *i915,
+intel_port_domains_for_platform(struct intel_display *display,
const struct intel_ddi_port_domains **domains,
int *domains_size)
{
- if (DISPLAY_VER(i915) >= 13) {
+ if (DISPLAY_VER(display) >= 13) {
*domains = d13_port_domains;
*domains_size = ARRAY_SIZE(d13_port_domains);
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
*domains = d12_port_domains;
*domains_size = ARRAY_SIZE(d12_port_domains);
- } else if (DISPLAY_VER(i915) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
*domains = d11_port_domains;
*domains_size = ARRAY_SIZE(d11_port_domains);
} else {
@@ -2456,13 +2495,13 @@ intel_port_domains_for_platform(struct drm_i915_private *i915,
}
static const struct intel_ddi_port_domains *
-intel_port_domains_for_port(struct drm_i915_private *i915, enum port port)
+intel_port_domains_for_port(struct intel_display *display, enum port port)
{
const struct intel_ddi_port_domains *domains;
int domains_size;
int i;
- intel_port_domains_for_platform(i915, &domains, &domains_size);
+ intel_port_domains_for_platform(display, &domains, &domains_size);
for (i = 0; i < domains_size; i++)
if (port >= domains[i].port_start && port <= domains[i].port_end)
return &domains[i];
@@ -2473,9 +2512,10 @@ intel_port_domains_for_port(struct drm_i915_private *i915, enum port port)
enum intel_display_power_domain
intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
{
- const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
+ struct intel_display *display = &i915->display;
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port);
- if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
+ if (drm_WARN_ON(display->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_PORT_DDI_IO_A;
return domains->ddi_io + (int)(port - domains->port_start);
@@ -2484,22 +2524,23 @@ intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
enum intel_display_power_domain
intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port)
{
- const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
+ struct intel_display *display = &i915->display;
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(display, port);
- if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
+ if (drm_WARN_ON(display->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_PORT_DDI_LANES_A;
return domains->ddi_lanes + (int)(port - domains->port_start);
}
static const struct intel_ddi_port_domains *
-intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch)
+intel_port_domains_for_aux_ch(struct intel_display *display, enum aux_ch aux_ch)
{
const struct intel_ddi_port_domains *domains;
int domains_size;
int i;
- intel_port_domains_for_platform(i915, &domains, &domains_size);
+ intel_port_domains_for_platform(display, &domains, &domains_size);
for (i = 0; i < domains_size; i++)
if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
return &domains[i];
@@ -2510,9 +2551,10 @@ intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch)
enum intel_display_power_domain
intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
{
- const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
+ struct intel_display *display = &i915->display;
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
- if (drm_WARN_ON(&i915->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID))
+ if (drm_WARN_ON(display->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_AUX_IO_A;
return domains->aux_io + (int)(aux_ch - domains->aux_ch_start);
@@ -2521,9 +2563,10 @@ intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux
enum intel_display_power_domain
intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
{
- const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
+ struct intel_display *display = &i915->display;
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
- if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
+ if (drm_WARN_ON(display->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_AUX_A;
return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
@@ -2532,9 +2575,10 @@ intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch
enum intel_display_power_domain
intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
{
- const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
+ struct intel_display *display = &i915->display;
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(display, aux_ch);
- if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
+ if (drm_WARN_ON(display->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
return POWER_DOMAIN_AUX_TBT1;
return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 425452c5a469..7b294eec4431 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -15,6 +15,7 @@ enum aux_ch;
enum port;
struct drm_i915_private;
struct i915_power_well;
+struct intel_display;
struct intel_encoder;
struct seq_file;
@@ -166,21 +167,21 @@ struct intel_display_power_domain_set {
for ((__domain) = 0; (__domain) < POWER_DOMAIN_NUM; (__domain)++) \
for_each_if(test_bit((__domain), (__mask)->bits))
-int intel_power_domains_init(struct drm_i915_private *dev_priv);
-void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
-void intel_power_domains_driver_remove(struct drm_i915_private *dev_priv);
-void intel_power_domains_enable(struct drm_i915_private *dev_priv);
-void intel_power_domains_disable(struct drm_i915_private *dev_priv);
-void intel_power_domains_suspend(struct drm_i915_private *dev_priv, bool s2idle);
-void intel_power_domains_resume(struct drm_i915_private *dev_priv);
-void intel_power_domains_sanitize_state(struct drm_i915_private *dev_priv);
-
-void intel_display_power_suspend_late(struct drm_i915_private *i915);
-void intel_display_power_resume_early(struct drm_i915_private *i915);
-void intel_display_power_suspend(struct drm_i915_private *i915);
-void intel_display_power_resume(struct drm_i915_private *i915);
-void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
+int intel_power_domains_init(struct intel_display *display);
+void intel_power_domains_cleanup(struct intel_display *display);
+void intel_power_domains_init_hw(struct intel_display *display, bool resume);
+void intel_power_domains_driver_remove(struct intel_display *display);
+void intel_power_domains_enable(struct intel_display *display);
+void intel_power_domains_disable(struct intel_display *display);
+void intel_power_domains_suspend(struct intel_display *display, bool s2idle);
+void intel_power_domains_resume(struct intel_display *display);
+void intel_power_domains_sanitize_state(struct intel_display *display);
+
+void intel_display_power_suspend_late(struct intel_display *display, bool s2idle);
+void intel_display_power_resume_early(struct intel_display *display);
+void intel_display_power_suspend(struct intel_display *display);
+void intel_display_power_resume(struct intel_display *display);
+void intel_display_power_set_target_dc_state(struct intel_display *display,
u32 state);
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
@@ -232,7 +233,7 @@ intel_display_power_put_async(struct drm_i915_private *i915,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- __intel_display_power_put_async(i915, domain, -1, -1);
+ __intel_display_power_put_async(i915, domain, INTEL_WAKEREF_DEF, -1);
}
static inline void
@@ -241,7 +242,7 @@ intel_display_power_put_async_delay(struct drm_i915_private *i915,
intel_wakeref_t wakeref,
int delay_ms)
{
- __intel_display_power_put_async(i915, domain, -1, delay_ms);
+ __intel_display_power_put_async(i915, domain, INTEL_WAKEREF_DEF, delay_ms);
}
#endif
@@ -297,10 +298,10 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
#define with_intel_display_power(i915, domain, wf) \
for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
- intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
+ intel_display_power_put_async((i915), (domain), (wf)), (wf) = NULL)
#define with_intel_display_power_if_enabled(i915, domain, wf) \
for ((wf) = intel_display_power_get_if_enabled((i915), (domain)); (wf); \
- intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
+ intel_display_power_put_async((i915), (domain), (wf)), (wf) = NULL)
#endif /* __INTEL_DISPLAY_POWER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index 10948b3964ee..0c8ac1af6db7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -3,14 +3,12 @@
* Copyright © 2022 Intel Corporation
*/
-#include "i915_drv.h"
#include "i915_reg.h"
-
-#include "vlv_sideband_reg.h"
-
+#include "intel_display_core.h"
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
#include "intel_display_types.h"
+#include "vlv_sideband_reg.h"
#define __LIST_INLINE_ELEMS(__elem_type, ...) \
((__elem_type[]) { __VA_ARGS__ })
@@ -1586,6 +1584,136 @@ static const struct i915_power_well_desc_list xe2lpd_power_wells[] = {
I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica),
};
+/*
+ * Xe3 changes the power well hierarchy slightly from Xe_LPD+; PGB now
+ * depends on PG1 instead of PG2:
+ *
+ * PG0
+ * |
+ * --PG1--
+ * / | \
+ * PGA PGB PG2
+ * / \
+ * PGC PGD
+ */
+
+#define XE3LPD_PW_C_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C
+
+#define XE3LPD_PW_D_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_D, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_D
+
+#define XE3LPD_PW_2_POWER_DOMAINS \
+ XE3LPD_PW_C_POWER_DOMAINS, \
+ XE3LPD_PW_D_POWER_DOMAINS, \
+ POWER_DOMAIN_TRANSCODER_C, \
+ POWER_DOMAIN_TRANSCODER_D, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC1, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC2, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC3, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC4
+
+I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_pw_2,
+ XE3LPD_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_pw_b,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_pw_c,
+ XE3LPD_PW_C_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_pw_d,
+ XE3LPD_PW_D_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc xe3lpd_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &xe3lpd_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_A", &xelpd_pwdoms_pw_a,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_A),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_A),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_B", &xe3lpd_pwdoms_pw_b,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_B),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_C", &xe3lpd_pwdoms_pw_c,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_C),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_D", &xe3lpd_pwdoms_pw_d,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_D),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_D),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
+ I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1),
+ I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2),
+ I915_PW("AUX_TC3", &xelpdp_pwdoms_aux_tc3, .xelpdp.aux_ch = AUX_CH_USBC3),
+ I915_PW("AUX_TC4", &xelpdp_pwdoms_aux_tc4, .xelpdp.aux_ch = AUX_CH_USBC4),
+ ),
+ .ops = &xelpdp_aux_power_well_ops,
+ },
+};
+
+I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_dc_off,
+ POWER_DOMAIN_DC_OFF,
+ XE3LPD_PW_2_POWER_DOMAINS,
+ XE3LPD_PW_C_POWER_DOMAINS,
+ XE3LPD_PW_D_POWER_DOMAINS,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc xe3lpd_power_wells_dcoff[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &xe3lpd_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list xe3lpd_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(xe3lpd_power_wells_dcoff),
+ I915_PW_DESCRIPTORS(xe3lpd_power_wells_main),
+ I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica),
+};
+
static void init_power_well_domains(const struct i915_power_well_instance *inst,
struct i915_power_well *power_well)
{
@@ -1622,9 +1750,9 @@ __set_power_wells(struct i915_power_domains *power_domains,
const struct i915_power_well_desc_list *power_well_descs,
int power_well_descs_sz)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
u64 power_well_ids = 0;
const struct i915_power_well_desc_list *desc_list;
const struct i915_power_well_desc *desc;
@@ -1648,7 +1776,7 @@ __set_power_wells(struct i915_power_domains *power_domains,
enum i915_power_well_id id = inst->id;
pw->desc = desc;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
overflows_type(inst - desc->instances->list, pw->instance_idx));
pw->instance_idx = inst - desc->instances->list;
@@ -1659,8 +1787,8 @@ __set_power_wells(struct i915_power_domains *power_domains,
if (id == DISP_PW_ID_NONE)
continue;
- drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
- drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
+ drm_WARN_ON(display->drm, id >= sizeof(power_well_ids) * 8);
+ drm_WARN_ON(display->drm, power_well_ids & BIT_ULL(id));
power_well_ids |= BIT_ULL(id);
}
@@ -1681,51 +1809,53 @@ __set_power_wells(struct i915_power_domains *power_domains,
*/
int intel_display_power_map_init(struct i915_power_domains *power_domains)
{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- display.power.domains);
+ struct intel_display *display = container_of(power_domains,
+ struct intel_display,
+ power.domains);
/*
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (!HAS_DISPLAY(i915)) {
+ if (!HAS_DISPLAY(display)) {
power_domains->power_well_count = 0;
return 0;
}
- if (DISPLAY_VER(i915) >= 20)
+ if (DISPLAY_VER(display) >= 30)
+ return set_power_wells(power_domains, xe3lpd_power_wells);
+ else if (DISPLAY_VER(display) >= 20)
return set_power_wells(power_domains, xe2lpd_power_wells);
- else if (DISPLAY_VER(i915) >= 14)
+ else if (DISPLAY_VER(display) >= 14)
return set_power_wells(power_domains, xelpdp_power_wells);
- else if (IS_DG2(i915))
+ else if (display->platform.dg2)
return set_power_wells(power_domains, xehpd_power_wells);
- else if (DISPLAY_VER(i915) >= 13)
+ else if (DISPLAY_VER(display) >= 13)
return set_power_wells(power_domains, xelpd_power_wells);
- else if (IS_DG1(i915))
+ else if (display->platform.dg1)
return set_power_wells(power_domains, dg1_power_wells);
- else if (IS_ALDERLAKE_S(i915))
+ else if (display->platform.alderlake_s)
return set_power_wells(power_domains, adls_power_wells);
- else if (IS_ROCKETLAKE(i915))
+ else if (display->platform.rocketlake)
return set_power_wells(power_domains, rkl_power_wells);
- else if (DISPLAY_VER(i915) == 12)
+ else if (DISPLAY_VER(display) == 12)
return set_power_wells(power_domains, tgl_power_wells);
- else if (DISPLAY_VER(i915) == 11)
+ else if (DISPLAY_VER(display) == 11)
return set_power_wells(power_domains, icl_power_wells);
- else if (IS_GEMINILAKE(i915))
+ else if (display->platform.geminilake)
return set_power_wells(power_domains, glk_power_wells);
- else if (IS_BROXTON(i915))
+ else if (display->platform.broxton)
return set_power_wells(power_domains, bxt_power_wells);
- else if (DISPLAY_VER(i915) == 9)
+ else if (DISPLAY_VER(display) == 9)
return set_power_wells(power_domains, skl_power_wells);
- else if (IS_CHERRYVIEW(i915))
+ else if (display->platform.cherryview)
return set_power_wells(power_domains, chv_power_wells);
- else if (IS_BROADWELL(i915))
+ else if (display->platform.broadwell)
return set_power_wells(power_domains, bdw_power_wells);
- else if (IS_HASWELL(i915))
+ else if (display->platform.haswell)
return set_power_wells(power_domains, hsw_power_wells);
- else if (IS_VALLEYVIEW(i915))
+ else if (display->platform.valleyview)
return set_power_wells(power_domains, vlv_power_wells);
- else if (IS_I830(i915))
+ else if (display->platform.i830)
return set_power_wells(power_domains, i830_power_wells);
else
return set_power_wells(power_domains, i9xx_power_wells);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 46e9eff12c23..f45a4f9ba23c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -46,23 +46,23 @@ struct i915_power_well_ops {
* during driver init and resume time, possibly after first calling
* the enable/disable handlers.
*/
- void (*sync_hw)(struct drm_i915_private *i915,
+ void (*sync_hw)(struct intel_display *display,
struct i915_power_well *power_well);
/*
* Enable the well and resources that depend on it (for example
* interrupts located on the well). Called after the 0->1 refcount
* transition.
*/
- void (*enable)(struct drm_i915_private *i915,
+ void (*enable)(struct intel_display *display,
struct i915_power_well *power_well);
/*
* Disable the well and resources that depend on it. Called after
* the 1->0 refcount transition.
*/
- void (*disable)(struct drm_i915_private *i915,
+ void (*disable)(struct intel_display *display,
struct i915_power_well *power_well);
/* Returns the hw enabled state. */
- bool (*is_enabled)(struct drm_i915_private *i915,
+ bool (*is_enabled)(struct intel_display *display,
struct i915_power_well *power_well);
};
@@ -73,12 +73,12 @@ i915_power_well_instance(const struct i915_power_well *power_well)
}
struct i915_power_well *
-lookup_power_well(struct drm_i915_private *i915,
+lookup_power_well(struct intel_display *display,
enum i915_power_well_id power_well_id)
{
struct i915_power_well *power_well;
- for_each_power_well(i915, power_well)
+ for_each_power_well(display, power_well)
if (i915_power_well_instance(power_well)->id == power_well_id)
return power_well;
@@ -89,58 +89,57 @@ lookup_power_well(struct drm_i915_private *i915,
* the first power well and hope the WARN gets reported so we can fix
* our driver.
*/
- drm_WARN(&i915->drm, 1,
+ drm_WARN(display->drm, 1,
"Power well %d not defined for this platform\n",
power_well_id);
- return &i915->display.power.domains.power_wells[0];
+ return &display->power.domains.power_wells[0];
}
-void intel_power_well_enable(struct drm_i915_private *i915,
+void intel_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- drm_dbg_kms(&i915->drm, "enabling %s\n", intel_power_well_name(power_well));
- power_well->desc->ops->enable(i915, power_well);
+ drm_dbg_kms(display->drm, "enabling %s\n", intel_power_well_name(power_well));
+ power_well->desc->ops->enable(display, power_well);
power_well->hw_enabled = true;
}
-void intel_power_well_disable(struct drm_i915_private *i915,
+void intel_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- drm_dbg_kms(&i915->drm, "disabling %s\n", intel_power_well_name(power_well));
+ drm_dbg_kms(display->drm, "disabling %s\n", intel_power_well_name(power_well));
power_well->hw_enabled = false;
- power_well->desc->ops->disable(i915, power_well);
+ power_well->desc->ops->disable(display, power_well);
}
-void intel_power_well_sync_hw(struct drm_i915_private *i915,
+void intel_power_well_sync_hw(struct intel_display *display,
struct i915_power_well *power_well)
{
- power_well->desc->ops->sync_hw(i915, power_well);
- power_well->hw_enabled =
- power_well->desc->ops->is_enabled(i915, power_well);
+ power_well->desc->ops->sync_hw(display, power_well);
+ power_well->hw_enabled = power_well->desc->ops->is_enabled(display, power_well);
}
-void intel_power_well_get(struct drm_i915_private *i915,
+void intel_power_well_get(struct intel_display *display,
struct i915_power_well *power_well)
{
if (!power_well->count++)
- intel_power_well_enable(i915, power_well);
+ intel_power_well_enable(display, power_well);
}
-void intel_power_well_put(struct drm_i915_private *i915,
+void intel_power_well_put(struct intel_display *display,
struct i915_power_well *power_well)
{
- drm_WARN(&i915->drm, !power_well->count,
+ drm_WARN(display->drm, !power_well->count,
"Use count on power well %s is already zero",
i915_power_well_instance(power_well)->name);
if (!--power_well->count)
- intel_power_well_disable(i915, power_well);
+ intel_power_well_disable(display, power_well);
}
-bool intel_power_well_is_enabled(struct drm_i915_private *i915,
+bool intel_power_well_is_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- return power_well->desc->ops->is_enabled(i915, power_well);
+ return power_well->desc->ops->is_enabled(display, power_well);
}
bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well)
@@ -148,14 +147,14 @@ bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well)
return power_well->hw_enabled;
}
-bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+bool intel_display_power_well_is_enabled(struct intel_display *display,
enum i915_power_well_id power_well_id)
{
struct i915_power_well *power_well;
- power_well = lookup_power_well(dev_priv, power_well_id);
+ power_well = lookup_power_well(display, power_well_id);
- return intel_power_well_is_enabled(dev_priv, power_well);
+ return intel_power_well_is_enabled(display, power_well);
}
bool intel_power_well_is_always_on(struct i915_power_well *power_well)
@@ -184,19 +183,23 @@ int intel_power_well_refcount(struct i915_power_well *power_well)
* to be enabled, and it will only be disabled if none of the registers is
* requesting it to be enabled.
*/
-static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
+static void hsw_power_well_post_enable(struct intel_display *display,
u8 irq_pipe_mask, bool has_vga)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
if (has_vga)
- intel_vga_reset_io_mem(dev_priv);
+ intel_vga_reset_io_mem(display);
if (irq_pipe_mask)
gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
}
-static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
+static void hsw_power_well_pre_disable(struct intel_display *display,
u8 irq_pipe_mask)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
if (irq_pipe_mask)
gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
}
@@ -219,12 +222,12 @@ static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
}
static struct intel_digital_port *
-aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
+aux_ch_to_digital_port(struct intel_display *display,
enum aux_ch aux_ch)
{
struct intel_encoder *encoder;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_digital_port *dig_port;
/* We'll check the MST primary port */
@@ -240,11 +243,11 @@ aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
return NULL;
}
-static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
+static enum phy icl_aux_pw_to_phy(struct intel_display *display,
const struct i915_power_well *power_well)
{
enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
- struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
/*
* FIXME should we care about the (VBT defined) dig_port->aux_ch
@@ -256,7 +259,7 @@ static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
return dig_port ? intel_encoder_to_phy(&dig_port->base) : PHY_NONE;
}
-static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
+static void hsw_wait_for_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well,
bool timeout_expected)
{
@@ -269,39 +272,39 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
* an ack, but rather just wait a fixed amount of time and then
* proceed. This is only used on DG2.
*/
- if (IS_DG2(dev_priv) && power_well->desc->fixed_enable_delay) {
+ if (display->platform.dg2 && power_well->desc->fixed_enable_delay) {
usleep_range(600, 1200);
return;
}
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
- if (intel_de_wait_for_set(dev_priv, regs->driver,
+ if (intel_de_wait_for_set(display, regs->driver,
HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) {
- drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
+ drm_dbg_kms(display->drm, "%s power well enable timeout\n",
intel_power_well_name(power_well));
- drm_WARN_ON(&dev_priv->drm, !timeout_expected);
+ drm_WARN_ON(display->drm, !timeout_expected);
}
}
-static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
+static u32 hsw_power_well_requesters(struct intel_display *display,
const struct i915_power_well_regs *regs,
int pw_idx)
{
u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
u32 ret;
- ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
- ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
+ ret = intel_de_read(display, regs->bios) & req_mask ? 1 : 0;
+ ret |= intel_de_read(display, regs->driver) & req_mask ? 2 : 0;
if (regs->kvmr.reg)
- ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
- ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
+ ret |= intel_de_read(display, regs->kvmr) & req_mask ? 4 : 0;
+ ret |= intel_de_read(display, regs->debug) & req_mask ? 8 : 0;
return ret;
}
-static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
+static void hsw_wait_for_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
@@ -318,28 +321,28 @@ static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
* Skip the wait in case any of the request bits are set and print a
* diagnostic message.
*/
- wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
+ wait_for((disabled = !(intel_de_read(display, regs->driver) &
HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
- (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
+ (reqs = hsw_power_well_requesters(display, regs, pw_idx)), 1);
if (disabled)
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
intel_power_well_name(power_well),
!!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
}
-static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
+static void gen9_wait_for_power_well_fuses(struct intel_display *display,
enum skl_power_gate pg)
{
/* Timeout 5us for PG#0, for other PGs 1us */
- drm_WARN_ON(&dev_priv->drm,
- intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
+ drm_WARN_ON(display->drm,
+ intel_de_wait_for_set(display, SKL_FUSE_STATUS,
SKL_FUSE_PG_DIST_STATUS(pg), 1));
}
-static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+static void hsw_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
@@ -348,12 +351,12 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
if (power_well->desc->has_fuses) {
enum skl_power_gate pg;
- pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
SKL_PW_CTL_IDX_TO_PG(pw_idx);
/* Wa_16013190616:adlp */
- if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1)
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
+ if (display->platform.alderlake_p && pg == SKL_PG1)
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
/*
* For PW1 we have to wait both for the PW0/PG0 fuse state
@@ -363,112 +366,112 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
* after the enabling.
*/
if (pg == SKL_PG1)
- gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
+ gen9_wait_for_power_well_fuses(display, SKL_PG0);
}
- intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(display, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
- hsw_wait_for_power_well_enable(dev_priv, power_well, false);
+ hsw_wait_for_power_well_enable(display, power_well, false);
if (power_well->desc->has_fuses) {
enum skl_power_gate pg;
- pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
SKL_PW_CTL_IDX_TO_PG(pw_idx);
- gen9_wait_for_power_well_fuses(dev_priv, pg);
+ gen9_wait_for_power_well_fuses(display, pg);
}
- hsw_power_well_post_enable(dev_priv,
+ hsw_power_well_post_enable(display,
power_well->desc->irq_pipe_mask,
power_well->desc->has_vga);
}
-static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
+static void hsw_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- hsw_power_well_pre_disable(dev_priv,
+ hsw_power_well_pre_disable(display,
power_well->desc->irq_pipe_mask);
- intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
- hsw_wait_for_power_well_disable(dev_priv, power_well);
+ intel_de_rmw(display, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
+ hsw_wait_for_power_well_disable(display, power_well);
}
-static bool intel_aux_ch_is_edp(struct drm_i915_private *i915, enum aux_ch aux_ch)
+static bool intel_aux_ch_is_edp(struct intel_display *display, enum aux_ch aux_ch)
{
- struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
return dig_port && dig_port->base.type == INTEL_OUTPUT_EDP;
}
static void
-icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+icl_combo_phy_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
+ drm_WARN_ON(display->drm, !display->platform.icelake);
- intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
+ intel_de_rmw(display, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
/*
* FIXME not sure if we should derive the PHY from the pw_idx, or
* from the VBT defined AUX_CH->DDI->PHY mapping.
*/
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
+ intel_de_rmw(display, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
0, ICL_LANE_ENABLE_AUX);
- hsw_wait_for_power_well_enable(dev_priv, power_well, false);
+ hsw_wait_for_power_well_enable(display, power_well, false);
/* Display WA #1178: icl */
if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
- !intel_aux_ch_is_edp(dev_priv, ICL_AUX_PW_TO_CH(pw_idx)))
- intel_de_rmw(dev_priv, ICL_PORT_TX_DW6_AUX(ICL_AUX_PW_TO_PHY(pw_idx)),
+ !intel_aux_ch_is_edp(display, ICL_AUX_PW_TO_CH(pw_idx)))
+ intel_de_rmw(display, ICL_PORT_TX_DW6_AUX(ICL_AUX_PW_TO_PHY(pw_idx)),
0, O_FUNC_OVRD_EN | O_LDO_BYPASS_CRI);
}
static void
-icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+icl_combo_phy_aux_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
- drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
+ drm_WARN_ON(display->drm, !display->platform.icelake);
/*
* FIXME not sure if we should derive the PHY from the pw_idx, or
* from the VBT defined AUX_CH->DDI->PHY mapping.
*/
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
+ intel_de_rmw(display, ICL_PORT_CL_DW12(ICL_AUX_PW_TO_PHY(pw_idx)),
ICL_LANE_ENABLE_AUX, 0);
- intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
+ intel_de_rmw(display, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
- hsw_wait_for_power_well_disable(dev_priv, power_well);
+ hsw_wait_for_power_well_disable(display, power_well);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+static void icl_tc_port_assert_ref_held(struct intel_display *display,
struct i915_power_well *power_well,
struct intel_digital_port *dig_port)
{
- if (drm_WARN_ON(&dev_priv->drm, !dig_port))
+ if (drm_WARN_ON(display->drm, !dig_port))
return;
- if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
+ if (DISPLAY_VER(display) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
return;
- drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
+ drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port));
}
#else
-static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+static void icl_tc_port_assert_ref_held(struct intel_display *display,
struct i915_power_well *power_well,
struct intel_digital_port *dig_port)
{
@@ -478,8 +481,9 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
-static void icl_tc_cold_exit(struct drm_i915_private *i915)
+static void icl_tc_cold_exit(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret, tries = 0;
while (1) {
@@ -500,21 +504,22 @@ static void icl_tc_cold_exit(struct drm_i915_private *i915)
}
static void
-icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+icl_tc_phy_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
- struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
bool is_tbt = power_well->desc->is_tc_tbt;
bool timeout_expected;
- icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
+ icl_tc_port_assert_ref_held(display, power_well, dig_port);
- intel_de_rmw(dev_priv, DP_AUX_CH_CTL(aux_ch),
+ intel_de_rmw(display, DP_AUX_CH_CTL(aux_ch),
DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0);
- intel_de_rmw(dev_priv, regs->driver,
+ intel_de_rmw(display, regs->driver,
0,
HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
@@ -524,51 +529,53 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
* exit sequence.
*/
timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
- if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
- icl_tc_cold_exit(dev_priv);
+ if (DISPLAY_VER(display) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
+ icl_tc_cold_exit(display);
- hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
+ hsw_wait_for_power_well_enable(display, power_well, timeout_expected);
- if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) {
+ if (DISPLAY_VER(display) >= 12 && !is_tbt) {
enum tc_port tc_port;
tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
if (wait_for(intel_dkl_phy_read(dev_priv, DKL_CMN_UC_DW_27(tc_port)) &
DKL_CMN_UC_DW27_UC_HEALTH, 1))
- drm_warn(&dev_priv->drm,
+ drm_warn(display->drm,
"Timeout waiting TC uC health\n");
}
}
static void
-icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
+icl_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ enum phy phy = icl_aux_pw_to_phy(display, power_well);
if (intel_phy_is_tc(dev_priv, phy))
- return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
- else if (IS_ICELAKE(dev_priv))
- return icl_combo_phy_aux_power_well_enable(dev_priv,
+ return icl_tc_phy_aux_power_well_enable(display, power_well);
+ else if (display->platform.icelake)
+ return icl_combo_phy_aux_power_well_enable(display,
power_well);
else
- return hsw_power_well_enable(dev_priv, power_well);
+ return hsw_power_well_enable(display, power_well);
}
static void
-icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
+icl_aux_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ enum phy phy = icl_aux_pw_to_phy(display, power_well);
if (intel_phy_is_tc(dev_priv, phy))
- return hsw_power_well_disable(dev_priv, power_well);
- else if (IS_ICELAKE(dev_priv))
- return icl_combo_phy_aux_power_well_disable(dev_priv,
+ return hsw_power_well_disable(display, power_well);
+ else if (display->platform.icelake)
+ return icl_combo_phy_aux_power_well_disable(display,
power_well);
else
- return hsw_power_well_disable(dev_priv, power_well);
+ return hsw_power_well_disable(display, power_well);
}
/*
@@ -576,7 +583,7 @@ icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
* enable it, so check if it's enabled and also check if we've requested it to
* be enabled.
*/
-static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool hsw_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
@@ -586,7 +593,7 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
HSW_PWR_WELL_CTL_STATE(pw_idx);
u32 val;
- val = intel_de_read(dev_priv, regs->driver);
+ val = intel_de_read(display, regs->driver);
/*
* On GEN9 big core due to a DMC bug the driver's request bits for PW1
@@ -594,27 +601,29 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
* BIOS's own request bits, which are forced-on for these power wells
* when exiting DC5/6.
*/
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
+ if (DISPLAY_VER(display) == 9 && !display->platform.broxton &&
(id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
- val |= intel_de_read(dev_priv, regs->bios);
+ val |= intel_de_read(display, regs->bios);
return (val & mask) == mask;
}
-static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
+static void assert_can_enable_dc9(struct intel_display *display)
{
- drm_WARN_ONCE(&dev_priv->drm,
- (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ drm_WARN_ONCE(display->drm,
+ (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC9),
"DC9 already programmed to be enabled.\n");
- drm_WARN_ONCE(&dev_priv->drm,
- intel_de_read(dev_priv, DC_STATE_EN) &
+ drm_WARN_ONCE(display->drm,
+ intel_de_read(display, DC_STATE_EN) &
DC_STATE_EN_UPTO_DC5,
"DC5 still not disabled to enable DC9.\n");
- drm_WARN_ONCE(&dev_priv->drm,
- intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
+ drm_WARN_ONCE(display->drm,
+ intel_de_read(display, HSW_PWR_WELL_CTL2) &
HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
"Power well 2 on.\n");
- drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
+ drm_WARN_ONCE(display->drm, intel_irqs_enabled(dev_priv),
"Interrupts not disabled yet.\n");
/*
@@ -626,12 +635,14 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
*/
}
-static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
+static void assert_can_disable_dc9(struct intel_display *display)
{
- drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ drm_WARN_ONCE(display->drm, intel_irqs_enabled(dev_priv),
"Interrupts not disabled yet.\n");
- drm_WARN_ONCE(&dev_priv->drm,
- intel_de_read(dev_priv, DC_STATE_EN) &
+ drm_WARN_ONCE(display->drm,
+ intel_de_read(display, DC_STATE_EN) &
DC_STATE_EN_UPTO_DC5,
"DC5 still not disabled.\n");
@@ -644,14 +655,14 @@ static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
*/
}
-static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
+static void gen9_write_dc_state(struct intel_display *display,
u32 state)
{
int rewrites = 0;
int rereads = 0;
u32 v;
- intel_de_write(dev_priv, DC_STATE_EN, state);
+ intel_de_write(display, DC_STATE_EN, state);
/* It has been observed that disabling the dc6 state sometimes
* doesn't stick and dmc keeps returning old value. Make sure
@@ -659,10 +670,10 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
* we are confident that state is exactly what we want.
*/
do {
- v = intel_de_read(dev_priv, DC_STATE_EN);
+ v = intel_de_read(display, DC_STATE_EN);
if (v != state) {
- intel_de_write(dev_priv, DC_STATE_EN, state);
+ intel_de_write(display, DC_STATE_EN, state);
rewrites++;
rereads = 0;
} else if (rereads++ > 5) {
@@ -672,29 +683,29 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
} while (rewrites < 100);
if (v != state)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Writing dc state to 0x%x failed, now 0x%x\n",
state, v);
/* Most of the times we need one retry, avoid spam */
if (rewrites > 1)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Rewrote dc state to 0x%x %d times\n",
state, rewrites);
}
-static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
+static u32 gen9_dc_mask(struct intel_display *display)
{
u32 mask;
mask = DC_STATE_EN_UPTO_DC5;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
| DC_STATE_EN_DC9;
- else if (DISPLAY_VER(dev_priv) == 11)
+ else if (DISPLAY_VER(display) == 11)
mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
mask |= DC_STATE_EN_DC9;
else
mask |= DC_STATE_EN_UPTO_DC6;
@@ -702,17 +713,17 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
return mask;
}
-void gen9_sanitize_dc_state(struct drm_i915_private *i915)
+void gen9_sanitize_dc_state(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
u32 val;
- if (!HAS_DISPLAY(i915))
+ if (!HAS_DISPLAY(display))
return;
- val = intel_de_read(i915, DC_STATE_EN) & gen9_dc_mask(i915);
+ val = intel_de_read(display, DC_STATE_EN) & gen9_dc_mask(display);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Resetting DC state tracking from %02x to %02x\n",
power_domains->dc_state, val);
power_domains->dc_state = val;
@@ -720,7 +731,7 @@ void gen9_sanitize_dc_state(struct drm_i915_private *i915)
/**
* gen9_set_dc_state - set target display C power state
- * @dev_priv: i915 device instance
+ * @display: display instance
* @state: target DC power state
* - DC_STATE_DISABLE
* - DC_STATE_EN_UPTO_DC5
@@ -741,259 +752,264 @@ void gen9_sanitize_dc_state(struct drm_i915_private *i915)
* back on and register state is restored. This is guaranteed by the MMIO write
* to DC_STATE_EN blocking until the state is restored.
*/
-void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
+void gen9_set_dc_state(struct intel_display *display, u32 state)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
u32 val;
u32 mask;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ if (drm_WARN_ON_ONCE(display->drm,
state & ~power_domains->allowed_dc_mask))
state &= power_domains->allowed_dc_mask;
- val = intel_de_read(dev_priv, DC_STATE_EN);
- mask = gen9_dc_mask(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
+ val = intel_de_read(display, DC_STATE_EN);
+ mask = gen9_dc_mask(display);
+ drm_dbg_kms(display->drm, "Setting DC state from %02x to %02x\n",
val & mask, state);
/* Check if DMC is ignoring our DC state requests */
if ((val & mask) != power_domains->dc_state)
- drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
+ drm_err(display->drm, "DC state mismatch (0x%x -> 0x%x)\n",
power_domains->dc_state, val & mask);
val &= ~mask;
val |= state;
- gen9_write_dc_state(dev_priv, val);
+ gen9_write_dc_state(display, val);
power_domains->dc_state = val & mask;
}
-static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
+static void tgl_enable_dc3co(struct intel_display *display)
{
- drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
- gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
+ drm_dbg_kms(display->drm, "Enabling DC3CO\n");
+ gen9_set_dc_state(display, DC_STATE_EN_DC3CO);
}
-static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
+static void tgl_disable_dc3co(struct intel_display *display)
{
- drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
- intel_de_rmw(dev_priv, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0);
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ drm_dbg_kms(display->drm, "Disabling DC3CO\n");
+ intel_de_rmw(display, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0);
+ gen9_set_dc_state(display, DC_STATE_DISABLE);
/*
* Delay of 200us DC3CO Exit time B.Spec 49196
*/
usleep_range(200, 210);
}
-static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
+static void assert_can_enable_dc5(struct intel_display *display)
{
+ struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
enum i915_power_well_id high_pg;
/* Power wells at this level and above must be disabled for DC5 entry */
- if (DISPLAY_VER(dev_priv) == 12)
+ if (DISPLAY_VER(display) == 12)
high_pg = ICL_DISP_PW_3;
else
high_pg = SKL_DISP_PW_2;
- drm_WARN_ONCE(&dev_priv->drm,
- intel_display_power_well_is_enabled(dev_priv, high_pg),
+ drm_WARN_ONCE(display->drm,
+ intel_display_power_well_is_enabled(display, high_pg),
"Power wells above platform's DC5 limit still enabled.\n");
- drm_WARN_ONCE(&dev_priv->drm,
- (intel_de_read(dev_priv, DC_STATE_EN) &
+ drm_WARN_ONCE(display->drm,
+ (intel_de_read(display, DC_STATE_EN) &
DC_STATE_EN_UPTO_DC5),
"DC5 already programmed to be enabled.\n");
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
- assert_dmc_loaded(dev_priv);
+ assert_dmc_loaded(display);
}
-void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+void gen9_enable_dc5(struct intel_display *display)
{
- assert_can_enable_dc5(dev_priv);
+ assert_can_enable_dc5(display);
- drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
+ drm_dbg_kms(display->drm, "Enabling DC5\n");
/* Wa Display #1183: skl,kbl,cfl */
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ if (DISPLAY_VER(display) == 9 && !display->platform.broxton)
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
0, SKL_SELECT_ALTERNATE_DC_EXIT);
- intel_dmc_wl_enable(&dev_priv->display);
+ intel_dmc_wl_enable(display, DC_STATE_EN_UPTO_DC5);
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
+ gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC5);
}
-static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
+static void assert_can_enable_dc6(struct intel_display *display)
{
- drm_WARN_ONCE(&dev_priv->drm,
- (intel_de_read(dev_priv, UTIL_PIN_CTL) &
+ drm_WARN_ONCE(display->drm,
+ (intel_de_read(display, UTIL_PIN_CTL) &
(UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) ==
(UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM),
"Utility pin enabled in PWM mode\n");
- drm_WARN_ONCE(&dev_priv->drm,
- (intel_de_read(dev_priv, DC_STATE_EN) &
+ drm_WARN_ONCE(display->drm,
+ (intel_de_read(display, DC_STATE_EN) &
DC_STATE_EN_UPTO_DC6),
"DC6 already programmed to be enabled.\n");
- assert_dmc_loaded(dev_priv);
+ assert_dmc_loaded(display);
}
-void skl_enable_dc6(struct drm_i915_private *dev_priv)
+void skl_enable_dc6(struct intel_display *display)
{
- assert_can_enable_dc6(dev_priv);
+ assert_can_enable_dc6(display);
- drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
+ drm_dbg_kms(display->drm, "Enabling DC6\n");
/* Wa Display #1183: skl,kbl,cfl */
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ if (DISPLAY_VER(display) == 9 && !display->platform.broxton)
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
0, SKL_SELECT_ALTERNATE_DC_EXIT);
- intel_dmc_wl_enable(&dev_priv->display);
+ intel_dmc_wl_enable(display, DC_STATE_EN_UPTO_DC6);
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+ gen9_set_dc_state(display, DC_STATE_EN_UPTO_DC6);
}
-void bxt_enable_dc9(struct drm_i915_private *dev_priv)
+void bxt_enable_dc9(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
-
- assert_can_enable_dc9(dev_priv);
+ assert_can_enable_dc9(display);
- drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
+ drm_dbg_kms(display->drm, "Enabling DC9\n");
/*
- * Power sequencer reset is not needed on
- * platforms with South Display Engine on PCH,
- * because PPS registers are always on.
+ * Power sequencer reset is needed on BXT/GLK, because the PPS registers
+ * aren't always on, unlike with South Display Engine on PCH.
*/
- if (!HAS_PCH_SPLIT(dev_priv))
- intel_pps_reset_all(display);
- gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
+ if (display->platform.broxton || display->platform.geminilake)
+ bxt_pps_reset_all(display);
+ gen9_set_dc_state(display, DC_STATE_EN_DC9);
}
-void bxt_disable_dc9(struct drm_i915_private *dev_priv)
+void bxt_disable_dc9(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ assert_can_disable_dc9(display);
- assert_can_disable_dc9(dev_priv);
+ drm_dbg_kms(display->drm, "Disabling DC9\n");
- drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_set_dc_state(display, DC_STATE_DISABLE);
intel_pps_unlock_regs_wa(display);
}
-static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
+static void hsw_power_well_sync_hw(struct intel_display *display,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
- u32 bios_req = intel_de_read(dev_priv, regs->bios);
+ u32 bios_req = intel_de_read(display, regs->bios);
/* Take over the request bit if set by BIOS. */
if (bios_req & mask) {
- u32 drv_req = intel_de_read(dev_priv, regs->driver);
+ u32 drv_req = intel_de_read(display, regs->driver);
if (!(drv_req & mask))
- intel_de_write(dev_priv, regs->driver, drv_req | mask);
- intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
+ intel_de_write(display, regs->driver, drv_req | mask);
+ intel_de_write(display, regs->bios, bios_req & ~mask);
}
}
-static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+static void bxt_dpio_cmn_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- bxt_dpio_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_init(display, i915_power_well_instance(power_well)->bxt.phy);
}
-static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+static void bxt_dpio_cmn_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- bxt_dpio_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_uninit(display, i915_power_well_instance(power_well)->bxt.phy);
}
-static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool bxt_dpio_cmn_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- return bxt_dpio_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ return bxt_dpio_phy_is_enabled(display, i915_power_well_instance(power_well)->bxt.phy);
}
-static void bxt_verify_dpio_phy_power_wells(struct drm_i915_private *dev_priv)
+static void bxt_verify_dpio_phy_power_wells(struct intel_display *display)
{
struct i915_power_well *power_well;
- power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
+ power_well = lookup_power_well(display, BXT_DISP_PW_DPIO_CMN_A);
if (intel_power_well_refcount(power_well) > 0)
- bxt_dpio_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy);
- power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ power_well = lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
if (intel_power_well_refcount(power_well) > 0)
- bxt_dpio_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+ bxt_dpio_phy_verify_state(display, i915_power_well_instance(power_well)->bxt.phy);
- if (IS_GEMINILAKE(dev_priv)) {
- power_well = lookup_power_well(dev_priv,
+ if (display->platform.geminilake) {
+ power_well = lookup_power_well(display,
GLK_DISP_PW_DPIO_CMN_C);
if (intel_power_well_refcount(power_well) > 0)
- bxt_dpio_phy_verify_state(dev_priv,
+ bxt_dpio_phy_verify_state(display,
i915_power_well_instance(power_well)->bxt.phy);
}
}
-static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool gen9_dc_off_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
- (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
+ return ((intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
+ (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
}
-static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
+static void gen9_assert_dbuf_enabled(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
- u8 enabled_dbuf_slices = dev_priv->display.dbuf.enabled_slices;
+ u8 enabled_dbuf_slices = display->dbuf.enabled_slices;
- drm_WARN(&dev_priv->drm,
+ drm_WARN(display->drm,
hw_enabled_dbuf_slices != enabled_dbuf_slices,
"Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
hw_enabled_dbuf_slices,
enabled_dbuf_slices);
}
-void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
+void gen9_disable_dc_states(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
struct intel_cdclk_config cdclk_config = {};
+ u32 old_state = power_domains->dc_state;
if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) {
- tgl_disable_dc3co(dev_priv);
+ tgl_disable_dc3co(display);
return;
}
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- if (!HAS_DISPLAY(dev_priv))
+ if (HAS_DISPLAY(display)) {
+ intel_dmc_wl_get_noreg(display);
+ gen9_set_dc_state(display, DC_STATE_DISABLE);
+ intel_dmc_wl_put_noreg(display);
+ } else {
+ gen9_set_dc_state(display, DC_STATE_DISABLE);
return;
+ }
- intel_dmc_wl_disable(&dev_priv->display);
+ if (old_state == DC_STATE_EN_UPTO_DC5 ||
+ old_state == DC_STATE_EN_UPTO_DC6)
+ intel_dmc_wl_disable(display);
- intel_cdclk_get_cdclk(dev_priv, &cdclk_config);
+ intel_cdclk_get_cdclk(display, &cdclk_config);
/* Can't read out voltage_level so can't use intel_cdclk_changed() */
- drm_WARN_ON(&dev_priv->drm,
- intel_cdclk_clock_changed(&dev_priv->display.cdclk.hw,
+ drm_WARN_ON(display->drm,
+ intel_cdclk_clock_changed(&display->cdclk.hw,
&cdclk_config));
- gen9_assert_dbuf_enabled(dev_priv);
+ gen9_assert_dbuf_enabled(display);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_verify_dpio_phy_power_wells(dev_priv);
+ if (display->platform.geminilake || display->platform.broxton)
+ bxt_verify_dpio_phy_power_wells(display);
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
/*
* DMC retains HW context only for port A, the other combo
* PHY's HW context for port B is lost after DC transitions,
@@ -1002,84 +1018,85 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
intel_combo_phy_init(dev_priv);
}
-static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
+static void gen9_dc_off_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- gen9_disable_dc_states(dev_priv);
+ gen9_disable_dc_states(display);
}
-static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
+static void gen9_dc_off_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
- if (!intel_dmc_has_payload(dev_priv))
+ if (!intel_dmc_has_payload(display))
return;
switch (power_domains->target_dc_state) {
case DC_STATE_EN_DC3CO:
- tgl_enable_dc3co(dev_priv);
+ tgl_enable_dc3co(display);
break;
case DC_STATE_EN_UPTO_DC6:
- skl_enable_dc6(dev_priv);
+ skl_enable_dc6(display);
break;
case DC_STATE_EN_UPTO_DC5:
- gen9_enable_dc5(dev_priv);
+ gen9_enable_dc5(display);
break;
}
}
-static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
+static void i9xx_power_well_sync_hw_noop(struct intel_display *display,
struct i915_power_well *power_well)
{
}
-static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+static void i9xx_always_on_power_well_noop(struct intel_display *display,
struct i915_power_well *power_well)
{
}
-static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+static bool i9xx_always_on_power_well_enabled(struct intel_display *display,
+ struct i915_power_well *power_well)
{
return true;
}
-static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
+static void i830_pipes_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- if ((intel_de_read(dev_priv, TRANSCONF(dev_priv, PIPE_A)) & TRANSCONF_ENABLE) == 0)
- i830_enable_pipe(dev_priv, PIPE_A);
- if ((intel_de_read(dev_priv, TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE) == 0)
- i830_enable_pipe(dev_priv, PIPE_B);
+ if ((intel_de_read(display, TRANSCONF(display, PIPE_A)) & TRANSCONF_ENABLE) == 0)
+ i830_enable_pipe(display, PIPE_A);
+ if ((intel_de_read(display, TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE) == 0)
+ i830_enable_pipe(display, PIPE_B);
}
-static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
+static void i830_pipes_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- i830_disable_pipe(dev_priv, PIPE_B);
- i830_disable_pipe(dev_priv, PIPE_A);
+ i830_disable_pipe(display, PIPE_B);
+ i830_disable_pipe(display, PIPE_A);
}
-static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool i830_pipes_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- return intel_de_read(dev_priv, TRANSCONF(dev_priv, PIPE_A)) & TRANSCONF_ENABLE &&
- intel_de_read(dev_priv, TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE;
+ return intel_de_read(display, TRANSCONF(display, PIPE_A)) & TRANSCONF_ENABLE &&
+ intel_de_read(display, TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE;
}
-static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
+static void i830_pipes_power_well_sync_hw(struct intel_display *display,
struct i915_power_well *power_well)
{
if (intel_power_well_refcount(power_well) > 0)
- i830_pipes_power_well_enable(dev_priv, power_well);
+ i830_pipes_power_well_enable(display, power_well);
else
- i830_pipes_power_well_disable(dev_priv, power_well);
+ i830_pipes_power_well_disable(display, power_well);
}
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+static void vlv_set_power_well(struct intel_display *display,
struct i915_power_well *power_well, bool enable)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
u32 mask;
u32 state;
@@ -1103,7 +1120,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
if (wait_for(COND, 100))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
@@ -1114,21 +1131,22 @@ out:
vlv_punit_put(dev_priv);
}
-static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+static void vlv_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- vlv_set_power_well(dev_priv, power_well, true);
+ vlv_set_power_well(display, power_well, true);
}
-static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+static void vlv_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- vlv_set_power_well(dev_priv, power_well, false);
+ vlv_set_power_well(display, power_well, false);
}
-static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool vlv_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
bool enabled = false;
u32 mask;
@@ -1145,7 +1163,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
- drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
+ drm_WARN_ON(display->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
state != PUNIT_PWRGT_PWR_GATE(pw_idx));
if (state == ctrl)
enabled = true;
@@ -1155,14 +1173,14 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
* is poking at the power controls too.
*/
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
- drm_WARN_ON(&dev_priv->drm, ctrl != state);
+ drm_WARN_ON(display->drm, ctrl != state);
vlv_punit_put(dev_priv);
return enabled;
}
-static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+static void vlv_init_display_clock_gating(struct intel_display *display)
{
/*
* On driver load, a pipe may be active and driving a DSI display.
@@ -1170,25 +1188,25 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
- intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
+ intel_de_rmw(display, DSPCLK_GATE_D(display),
~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE);
/*
* Disable trickle feed and enable pnd deadline calculation
*/
- intel_de_write(dev_priv, MI_ARB_VLV,
+ intel_de_write(display, MI_ARB_VLV,
MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
- intel_de_write(dev_priv, CBR1_VLV, 0);
+ intel_de_write(display, CBR1_VLV, 0);
- drm_WARN_ON(&dev_priv->drm, DISPLAY_RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
- intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
- DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(dev_priv)->rawclk_freq,
+ drm_WARN_ON(display->drm, DISPLAY_RUNTIME_INFO(display)->rawclk_freq == 0);
+ intel_de_write(display, RAWCLK_FREQ_VLV,
+ DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(display)->rawclk_freq,
1000));
}
-static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
+static void vlv_display_power_well_init(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
enum pipe pipe;
@@ -1200,17 +1218,17 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
*
* CHV DPLL B/C have some issues if VGA mode is enabled.
*/
- for_each_pipe(dev_priv, pipe) {
- u32 val = intel_de_read(dev_priv, DPLL(dev_priv, pipe));
+ for_each_pipe(display, pipe) {
+ u32 val = intel_de_read(display, DPLL(display, pipe));
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
- intel_de_write(dev_priv, DPLL(dev_priv, pipe), val);
+ intel_de_write(display, DPLL(display, pipe), val);
}
- vlv_init_display_clock_gating(dev_priv);
+ vlv_init_display_clock_gating(display);
spin_lock_irq(&dev_priv->irq_lock);
valleyview_enable_display_irqs(dev_priv);
@@ -1220,26 +1238,26 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
* During driver initialization/resume we can avoid restoring the
* part of the HW/SW state that will be inited anyway explicitly.
*/
- if (dev_priv->display.power.domains.initializing)
+ if (display->power.domains.initializing)
return;
intel_hpd_init(dev_priv);
intel_hpd_poll_disable(dev_priv);
/* Re-enable the ADPA, if we have one */
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
if (encoder->type == INTEL_OUTPUT_ANALOG)
intel_crt_reset(&encoder->base);
}
- intel_vga_redisable_power_on(dev_priv);
+ intel_vga_redisable_power_on(display);
intel_pps_unlock_regs_wa(display);
}
-static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
+static void vlv_display_power_well_deinit(struct intel_display *display)
{
- struct intel_display *display = &dev_priv->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
spin_lock_irq(&dev_priv->irq_lock);
valleyview_disable_display_irqs(dev_priv);
@@ -1248,36 +1266,36 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
- intel_pps_reset_all(display);
+ vlv_pps_reset_all(display);
/* Prevent us from re-enabling polling on accident in late suspend */
- if (!dev_priv->drm.dev->power.is_suspended)
+ if (!display->drm->dev->power.is_suspended)
intel_hpd_poll_enable(dev_priv);
}
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+static void vlv_display_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- vlv_set_power_well(dev_priv, power_well, true);
+ vlv_set_power_well(display, power_well, true);
- vlv_display_power_well_init(dev_priv);
+ vlv_display_power_well_init(display);
}
-static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+static void vlv_display_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- vlv_display_power_well_deinit(dev_priv);
+ vlv_display_power_well_deinit(display);
- vlv_set_power_well(dev_priv, power_well, false);
+ vlv_set_power_well(display, power_well, false);
}
-static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+static void vlv_dpio_cmn_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
/* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
- vlv_set_power_well(dev_priv, power_well, true);
+ vlv_set_power_well(display, power_well, true);
/*
* From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
@@ -1290,32 +1308,33 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
* both PLLs disabled, or we risk losing DPIO and PLL
* synchronization.
*/
- intel_de_rmw(dev_priv, DPIO_CTL, 0, DPIO_CMNRST);
+ intel_de_rmw(display, DPIO_CTL, 0, DPIO_CMNRST);
}
-static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+static void vlv_dpio_cmn_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(display, pipe)
assert_pll_disabled(dev_priv, pipe);
/* Assert common reset */
- intel_de_rmw(dev_priv, DPIO_CTL, DPIO_CMNRST, 0);
+ intel_de_rmw(display, DPIO_CTL, DPIO_CMNRST, 0);
- vlv_set_power_well(dev_priv, power_well, false);
+ vlv_set_power_well(display, power_well, false);
}
#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
-static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
+static void assert_chv_phy_status(struct intel_display *display)
{
struct i915_power_well *cmn_bc =
- lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ lookup_power_well(display, VLV_DISP_PW_DPIO_CMN_BC);
struct i915_power_well *cmn_d =
- lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
- u32 phy_control = dev_priv->display.power.chv_phy_control;
+ lookup_power_well(display, CHV_DISP_PW_DPIO_CMN_D);
+ u32 phy_control = display->power.chv_phy_control;
u32 phy_status = 0;
u32 phy_status_mask = 0xffffffff;
@@ -1326,7 +1345,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
* reset (ie. the power well has been disabled at
* least once).
*/
- if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY0])
+ if (!display->power.chv_phy_assert[DPIO_PHY0])
phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
@@ -1334,12 +1353,12 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
- if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY1])
+ if (!display->power.chv_phy_assert[DPIO_PHY1])
phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
- if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
+ if (intel_power_well_is_enabled(display, cmn_bc)) {
phy_status |= PHY_POWERGOOD(DPIO_PHY0);
/* this assumes override is only used to enable lanes */
@@ -1362,7 +1381,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
*/
if (BITS_SET(phy_control,
PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
- (intel_de_read(dev_priv, DPLL(dev_priv, PIPE_B)) & DPLL_VCO_ENABLE) == 0)
+ (intel_de_read(display, DPLL(display, PIPE_B)) & DPLL_VCO_ENABLE) == 0)
phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
if (BITS_SET(phy_control,
@@ -1380,7 +1399,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
}
- if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
+ if (intel_power_well_is_enabled(display, cmn_d)) {
phy_status |= PHY_POWERGOOD(DPIO_PHY1);
/* this assumes override is only used to enable lanes */
@@ -1405,24 +1424,25 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
* The PHY may be busy with some initial calibration and whatnot,
* so the power state can take a while to actually change.
*/
- if (intel_de_wait(dev_priv, DISPLAY_PHY_STATUS,
+ if (intel_de_wait(display, DISPLAY_PHY_STATUS,
phy_status_mask, phy_status, 10))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
- intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
- phy_status, dev_priv->display.power.chv_phy_control);
+ intel_de_read(display, DISPLAY_PHY_STATUS) & phy_status_mask,
+ phy_status, display->power.chv_phy_control);
}
#undef BITS_SET
-static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+static void chv_dpio_cmn_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
enum dpio_phy phy;
u32 tmp;
- drm_WARN_ON_ONCE(&dev_priv->drm,
+ drm_WARN_ON_ONCE(display->drm,
id != VLV_DISP_PW_DPIO_CMN_BC &&
id != CHV_DISP_PW_DPIO_CMN_D);
@@ -1433,12 +1453,12 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
/* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
- vlv_set_power_well(dev_priv, power_well, true);
+ vlv_set_power_well(display, power_well, true);
/* Poll for phypwrgood signal */
- if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
+ if (intel_de_wait_for_set(display, DISPLAY_PHY_STATUS,
PHY_POWERGOOD(phy), 1))
- drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
+ drm_err(display->drm, "Display PHY %d is not power up\n",
phy);
vlv_dpio_get(dev_priv);
@@ -1466,24 +1486,25 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_dpio_put(dev_priv);
- dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
- intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->display.power.chv_phy_control);
+ display->power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
+ intel_de_write(display, DISPLAY_PHY_CONTROL,
+ display->power.chv_phy_control);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->display.power.chv_phy_control);
+ phy, display->power.chv_phy_control);
- assert_chv_phy_status(dev_priv);
+ assert_chv_phy_status(display);
}
-static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+static void chv_dpio_cmn_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
enum dpio_phy phy;
- drm_WARN_ON_ONCE(&dev_priv->drm,
+ drm_WARN_ON_ONCE(display->drm,
id != VLV_DISP_PW_DPIO_CMN_BC &&
id != CHV_DISP_PW_DPIO_CMN_D);
@@ -1496,25 +1517,26 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
assert_pll_disabled(dev_priv, PIPE_C);
}
- dev_priv->display.power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
- intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->display.power.chv_phy_control);
+ display->power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
+ intel_de_write(display, DISPLAY_PHY_CONTROL,
+ display->power.chv_phy_control);
- vlv_set_power_well(dev_priv, power_well, false);
+ vlv_set_power_well(display, power_well, false);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->display.power.chv_phy_control);
+ phy, display->power.chv_phy_control);
/* PHY is fully reset now, so we can enable the PHY state asserts */
- dev_priv->display.power.chv_phy_assert[phy] = true;
+ display->power.chv_phy_assert[phy] = true;
- assert_chv_phy_status(dev_priv);
+ assert_chv_phy_status(display);
}
-static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+static void assert_chv_phy_powergate(struct intel_display *display, enum dpio_phy phy,
enum dpio_channel ch, bool override, unsigned int mask)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 reg, val, expected, actual;
/*
@@ -1524,7 +1546,7 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
* reset (ie. the power well has been disabled at
* least once).
*/
- if (!dev_priv->display.power.chv_phy_assert[phy])
+ if (!display->power.chv_phy_assert[phy])
return;
if (ch == DPIO_CH0)
@@ -1567,7 +1589,7 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
actual = REG_FIELD_GET(DPIO_ANYDL_POWERDOWN_CH1 |
DPIO_ALLDL_POWERDOWN_CH1, val);
- drm_WARN(&dev_priv->drm, actual != expected,
+ drm_WARN(display->drm, actual != expected,
"Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
!!(actual & DPIO_ALLDL_POWERDOWN),
!!(actual & DPIO_ANYDL_POWERDOWN),
@@ -1576,32 +1598,32 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
reg, val);
}
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+bool chv_phy_powergate_ch(struct intel_display *display, enum dpio_phy phy,
enum dpio_channel ch, bool override)
{
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
bool was_override;
mutex_lock(&power_domains->lock);
- was_override = dev_priv->display.power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ was_override = display->power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
if (override == was_override)
goto out;
if (override)
- dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
else
- dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
- intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->display.power.chv_phy_control);
+ intel_de_write(display, DISPLAY_PHY_CONTROL,
+ display->power.chv_phy_control);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
- phy, ch, dev_priv->display.power.chv_phy_control);
+ phy, ch, display->power.chv_phy_control);
- assert_chv_phy_status(dev_priv);
+ assert_chv_phy_status(display);
out:
mutex_unlock(&power_domains->lock);
@@ -1612,38 +1634,39 @@ out:
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct intel_display *display = to_intel_display(encoder);
+ struct i915_power_domains *power_domains = &display->power.domains;
enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
mutex_lock(&power_domains->lock);
- dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
- dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
+ display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
+ display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
if (override)
- dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ display->power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
else
- dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ display->power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
- intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->display.power.chv_phy_control);
+ intel_de_write(display, DISPLAY_PHY_CONTROL,
+ display->power.chv_phy_control);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
- phy, ch, mask, dev_priv->display.power.chv_phy_control);
+ phy, ch, mask, display->power.chv_phy_control);
- assert_chv_phy_status(dev_priv);
+ assert_chv_phy_status(display);
- assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
+ assert_chv_phy_powergate(display, phy, ch, override, mask);
mutex_unlock(&power_domains->lock);
}
-static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool chv_pipe_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = PIPE_A;
bool enabled;
u32 state, ctrl;
@@ -1655,7 +1678,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
- drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
+ drm_WARN_ON(display->drm, state != DP_SSS_PWR_ON(pipe) &&
state != DP_SSS_PWR_GATE(pipe));
enabled = state == DP_SSS_PWR_ON(pipe);
@@ -1664,17 +1687,18 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
* is poking at the power controls too.
*/
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
- drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
+ drm_WARN_ON(display->drm, ctrl << 16 != state);
vlv_punit_put(dev_priv);
return enabled;
}
-static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
+static void chv_set_pipe_power_well(struct intel_display *display,
struct i915_power_well *power_well,
bool enable)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe = PIPE_A;
u32 state;
u32 ctrl;
@@ -1695,7 +1719,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
if (wait_for(COND, 100))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
@@ -1706,32 +1730,33 @@ out:
vlv_punit_put(dev_priv);
}
-static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
+static void chv_pipe_power_well_sync_hw(struct intel_display *display,
struct i915_power_well *power_well)
{
- intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->display.power.chv_phy_control);
+ intel_de_write(display, DISPLAY_PHY_CONTROL,
+ display->power.chv_phy_control);
}
-static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
+static void chv_pipe_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- chv_set_pipe_power_well(dev_priv, power_well, true);
+ chv_set_pipe_power_well(display, power_well, true);
- vlv_display_power_well_init(dev_priv);
+ vlv_display_power_well_init(display);
}
-static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
+static void chv_pipe_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- vlv_display_power_well_deinit(dev_priv);
+ vlv_display_power_well_deinit(display);
- chv_set_pipe_power_well(dev_priv, power_well, false);
+ chv_set_pipe_power_well(display, power_well, false);
}
static void
-tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
+tgl_tc_cold_request(struct intel_display *display, bool block)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
u8 tries = 0;
int ret;
@@ -1772,31 +1797,31 @@ tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
}
static void
-tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
+tgl_tc_cold_off_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- tgl_tc_cold_request(i915, true);
+ tgl_tc_cold_request(display, true);
}
static void
-tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
+tgl_tc_cold_off_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- tgl_tc_cold_request(i915, false);
+ tgl_tc_cold_request(display, false);
}
static void
-tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
+tgl_tc_cold_off_power_well_sync_hw(struct intel_display *display,
struct i915_power_well *power_well)
{
if (intel_power_well_refcount(power_well) > 0)
- tgl_tc_cold_off_power_well_enable(i915, power_well);
+ tgl_tc_cold_off_power_well_enable(display, power_well);
else
- tgl_tc_cold_off_power_well_disable(i915, power_well);
+ tgl_tc_cold_off_power_well_disable(display, power_well);
}
static bool
-tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
+tgl_tc_cold_off_power_well_is_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
/*
@@ -1806,17 +1831,18 @@ tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
return intel_power_well_refcount(power_well);
}
-static void xelpdp_aux_power_well_enable(struct drm_i915_private *dev_priv,
+static void xelpdp_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+ enum phy phy = icl_aux_pw_to_phy(display, power_well);
if (intel_phy_is_tc(dev_priv, phy))
- icl_tc_port_assert_ref_held(dev_priv, power_well,
- aux_ch_to_digital_port(dev_priv, aux_ch));
+ icl_tc_port_assert_ref_held(display, power_well,
+ aux_ch_to_digital_port(display, aux_ch));
- intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch),
+ intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
XELPDP_DP_AUX_CH_CTL_POWER_REQUEST);
@@ -1829,57 +1855,57 @@ static void xelpdp_aux_power_well_enable(struct drm_i915_private *dev_priv,
usleep_range(600, 1200);
}
-static void xelpdp_aux_power_well_disable(struct drm_i915_private *dev_priv,
+static void xelpdp_aux_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
- intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch),
+ intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
0);
usleep_range(10, 30);
}
-static bool xelpdp_aux_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool xelpdp_aux_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
- return intel_de_read(dev_priv, XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch)) &
+ return intel_de_read(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch)) &
XELPDP_DP_AUX_CH_CTL_POWER_STATUS;
}
-static void xe2lpd_pica_power_well_enable(struct drm_i915_private *dev_priv,
+static void xe2lpd_pica_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
- intel_de_write(dev_priv, XE2LPD_PICA_PW_CTL,
+ intel_de_write(display, XE2LPD_PICA_PW_CTL,
XE2LPD_PICA_CTL_POWER_REQUEST);
- if (intel_de_wait_for_set(dev_priv, XE2LPD_PICA_PW_CTL,
+ if (intel_de_wait_for_set(display, XE2LPD_PICA_PW_CTL,
XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
- drm_dbg_kms(&dev_priv->drm, "pica power well enable timeout\n");
+ drm_dbg_kms(display->drm, "pica power well enable timeout\n");
- drm_WARN(&dev_priv->drm, 1, "Power well PICA timeout when enabled");
+ drm_WARN(display->drm, 1, "Power well PICA timeout when enabled");
}
}
-static void xe2lpd_pica_power_well_disable(struct drm_i915_private *dev_priv,
+static void xe2lpd_pica_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
- intel_de_write(dev_priv, XE2LPD_PICA_PW_CTL, 0);
+ intel_de_write(display, XE2LPD_PICA_PW_CTL, 0);
- if (intel_de_wait_for_clear(dev_priv, XE2LPD_PICA_PW_CTL,
+ if (intel_de_wait_for_clear(display, XE2LPD_PICA_PW_CTL,
XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
- drm_dbg_kms(&dev_priv->drm, "pica power well disable timeout\n");
+ drm_dbg_kms(display->drm, "pica power well disable timeout\n");
- drm_WARN(&dev_priv->drm, 1, "Power well PICA timeout when disabled");
+ drm_WARN(display->drm, 1, "Power well PICA timeout when disabled");
}
}
-static bool xe2lpd_pica_power_well_enabled(struct drm_i915_private *dev_priv,
+static bool xe2lpd_pica_power_well_enabled(struct intel_display *display,
struct i915_power_well *power_well)
{
- return intel_de_read(dev_priv, XE2LPD_PICA_PW_CTL) &
+ return intel_de_read(display, XE2LPD_PICA_PW_CTL) &
XE2LPD_PICA_CTL_POWER_STATUS;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h
index 9357a9a73c06..338379dae44c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h
@@ -10,20 +10,20 @@
#include "intel_display_power.h"
#include "intel_dpio_phy.h"
-struct drm_i915_private;
struct i915_power_well_ops;
+struct intel_display;
struct intel_encoder;
-#define for_each_power_well(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->display.power.domains.power_wells; \
- (__power_well) - (__dev_priv)->display.power.domains.power_wells < \
- (__dev_priv)->display.power.domains.power_well_count; \
+#define for_each_power_well(___display, __power_well) \
+ for ((__power_well) = (___display)->power.domains.power_wells; \
+ (__power_well) - (___display)->power.domains.power_wells < \
+ (___display)->power.domains.power_well_count; \
(__power_well)++)
-#define for_each_power_well_reverse(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->display.power.domains.power_wells + \
- (__dev_priv)->display.power.domains.power_well_count - 1; \
- (__power_well) - (__dev_priv)->display.power.domains.power_wells >= 0; \
+#define for_each_power_well_reverse(___display, __power_well) \
+ for ((__power_well) = (___display)->power.domains.power_wells + \
+ (___display)->power.domains.power_well_count - 1; \
+ (__power_well) - (___display)->power.domains.power_wells >= 0; \
(__power_well)--)
/*
@@ -126,23 +126,23 @@ struct i915_power_well {
u8 instance_idx;
};
-struct i915_power_well *lookup_power_well(struct drm_i915_private *i915,
+struct i915_power_well *lookup_power_well(struct intel_display *display,
enum i915_power_well_id id);
-void intel_power_well_enable(struct drm_i915_private *i915,
+void intel_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well);
-void intel_power_well_disable(struct drm_i915_private *i915,
+void intel_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well);
-void intel_power_well_sync_hw(struct drm_i915_private *i915,
+void intel_power_well_sync_hw(struct intel_display *display,
struct i915_power_well *power_well);
-void intel_power_well_get(struct drm_i915_private *i915,
+void intel_power_well_get(struct intel_display *display,
struct i915_power_well *power_well);
-void intel_power_well_put(struct drm_i915_private *i915,
+void intel_power_well_put(struct intel_display *display,
struct i915_power_well *power_well);
-bool intel_power_well_is_enabled(struct drm_i915_private *i915,
+bool intel_power_well_is_enabled(struct intel_display *display,
struct i915_power_well *power_well);
bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well);
-bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+bool intel_display_power_well_is_enabled(struct intel_display *display,
enum i915_power_well_id power_well_id);
bool intel_power_well_is_always_on(struct i915_power_well *power_well);
const char *intel_power_well_name(struct i915_power_well *power_well);
@@ -151,16 +151,16 @@ int intel_power_well_refcount(struct i915_power_well *power_well);
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask);
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+bool chv_phy_powergate_ch(struct intel_display *display, enum dpio_phy phy,
enum dpio_channel ch, bool override);
-void gen9_enable_dc5(struct drm_i915_private *dev_priv);
-void skl_enable_dc6(struct drm_i915_private *dev_priv);
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
-void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state);
-void gen9_disable_dc_states(struct drm_i915_private *dev_priv);
-void bxt_enable_dc9(struct drm_i915_private *dev_priv);
-void bxt_disable_dc9(struct drm_i915_private *dev_priv);
+void gen9_enable_dc5(struct intel_display *display);
+void skl_enable_dc6(struct intel_display *display);
+void gen9_sanitize_dc_state(struct intel_display *display);
+void gen9_set_dc_state(struct intel_display *display, u32 state);
+void gen9_disable_dc_states(struct intel_display *display);
+void bxt_enable_dc9(struct intel_display *display);
+void bxt_disable_dc9(struct intel_display *display);
extern const struct i915_power_well_ops i9xx_always_on_power_well_ops;
extern const struct i915_power_well_ops chv_pipe_power_well_ops;
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index 49e2e650ebcd..093b386c95e8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -114,11 +114,11 @@ void intel_display_reset_finish(struct drm_i915_private *i915)
* so need a full re-initialization.
*/
intel_pps_unlock_regs_wa(display);
- intel_display_driver_init_hw(i915);
+ intel_display_driver_init_hw(display);
intel_clock_gating_init(i915);
intel_hpd_init(i915);
- ret = __intel_display_driver_resume(i915, state, ctx);
+ ret = __intel_display_driver_resume(display, state, ctx);
if (ret)
drm_err(&i915->drm,
"Restoring old state failed with %i\n", ret);
diff --git a/drivers/gpu/drm/i915/display/intel_display_snapshot.c b/drivers/gpu/drm/i915/display/intel_display_snapshot.c
new file mode 100644
index 000000000000..25ba043cbb65
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_snapshot.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2024 Intel Corporation */
+
+#include <linux/slab.h>
+
+#include <drm/drm_drv.h>
+
+#include "intel_display_core.h"
+#include "intel_display_device.h"
+#include "intel_display_params.h"
+#include "intel_display_snapshot.h"
+#include "intel_dmc.h"
+#include "intel_overlay.h"
+
+struct intel_display_snapshot {
+ struct intel_display *display;
+
+ struct intel_display_device_info info;
+ struct intel_display_runtime_info runtime_info;
+ struct intel_display_params params;
+ struct intel_overlay_snapshot *overlay;
+ struct intel_dmc_snapshot *dmc;
+};
+
+struct intel_display_snapshot *intel_display_snapshot_capture(struct intel_display *display)
+{
+ struct intel_display_snapshot *snapshot;
+
+ snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
+ if (!snapshot)
+ return NULL;
+
+ snapshot->display = display;
+
+ memcpy(&snapshot->info, DISPLAY_INFO(display), sizeof(snapshot->info));
+ memcpy(&snapshot->runtime_info, DISPLAY_RUNTIME_INFO(display),
+ sizeof(snapshot->runtime_info));
+
+ intel_display_params_copy(&snapshot->params);
+
+ snapshot->overlay = intel_overlay_snapshot_capture(display);
+ snapshot->dmc = intel_dmc_snapshot_capture(display);
+
+ return snapshot;
+}
+
+void intel_display_snapshot_print(const struct intel_display_snapshot *snapshot,
+ struct drm_printer *p)
+{
+ struct intel_display *display;
+
+ if (!snapshot)
+ return;
+
+ display = snapshot->display;
+
+ intel_display_device_info_print(&snapshot->info, &snapshot->runtime_info, p);
+ intel_display_params_dump(&snapshot->params, display->drm->driver->name, p);
+
+ intel_overlay_snapshot_print(snapshot->overlay, p);
+ intel_dmc_snapshot_print(snapshot->dmc, p);
+}
+
+void intel_display_snapshot_free(struct intel_display_snapshot *snapshot)
+{
+ if (!snapshot)
+ return;
+
+ intel_display_params_free(&snapshot->params);
+
+ kfree(snapshot->overlay);
+ kfree(snapshot->dmc);
+ kfree(snapshot);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_snapshot.h b/drivers/gpu/drm/i915/display/intel_display_snapshot.h
new file mode 100644
index 000000000000..7ed27cdea644
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_snapshot.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef __INTEL_DISPLAY_SNAPSHOT_H__
+#define __INTEL_DISPLAY_SNAPSHOT_H__
+
+struct drm_printer;
+struct intel_display;
+struct intel_display_snapshot;
+
+struct intel_display_snapshot *intel_display_snapshot_capture(struct intel_display *display);
+void intel_display_snapshot_print(const struct intel_display_snapshot *snapshot,
+ struct drm_printer *p);
+void intel_display_snapshot_free(struct intel_display_snapshot *snapshot);
+
+#endif /* __INTEL_DISPLAY_SNAPSHOT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h
index c734ef1fba3c..338b9f7b20b8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_trace.h
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
@@ -9,44 +9,85 @@
#if !defined(__INTEL_DISPLAY_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
#define __INTEL_DISPLAY_TRACE_H__
+#include <linux/string.h>
#include <linux/string_helpers.h>
#include <linux/types.h>
#include <linux/tracepoint.h>
-#include "i915_drv.h"
#include "intel_crtc.h"
+#include "intel_display_core.h"
+#include "intel_display_limits.h"
#include "intel_display_types.h"
#include "intel_vblank.h"
-#define __dev_name_i915(i915) dev_name((i915)->drm.dev)
+#define __dev_name_display(display) dev_name((display)->drm->dev)
#define __dev_name_kms(obj) dev_name((obj)->base.dev->dev)
+/*
+ * Using identifiers from enum pipe in TP_printk() will confuse tools that
+ * parse /sys/kernel/debug/tracing/{xe,i915}/<event>/format. So we use CPP
+ * macros instead.
+ */
+#define _TRACE_PIPE_A 0
+#define _TRACE_PIPE_B 1
+#define _TRACE_PIPE_C 2
+#define _TRACE_PIPE_D 3
+
+/*
+ * FIXME: Several TP_printk() calls below display frame and scanline numbers for
+ * all possible pipes (regardless of whether they are available) and that is
+ * done with a constant format string. A better approach would be to generate
+ * that info dynamically based on available pipes, but, while we do not have
+ * that implemented yet, let's assert that the constant format string indeed
+ * covers all possible pipes.
+ */
+static_assert(I915_MAX_PIPES - 1 == _TRACE_PIPE_D);
+
+#define _PIPES_FRAME_AND_SCANLINE_FMT \
+ "pipe A: frame=%u, scanline=%u" \
+ ", pipe B: frame=%u, scanline=%u" \
+ ", pipe C: frame=%u, scanline=%u" \
+ ", pipe D: frame=%u, scanline=%u"
+
+#define _PIPES_FRAME_AND_SCANLINE_VALUES \
+ __entry->frame[_TRACE_PIPE_A], __entry->scanline[_TRACE_PIPE_A] \
+ , __entry->frame[_TRACE_PIPE_B], __entry->scanline[_TRACE_PIPE_B] \
+ , __entry->frame[_TRACE_PIPE_C], __entry->scanline[_TRACE_PIPE_C] \
+ , __entry->frame[_TRACE_PIPE_D], __entry->scanline[_TRACE_PIPE_D]
+
+/*
+ * Paranoid sanity check that at least the enumeration starts at the
+ * same value as _TRACE_PIPE_A.
+ */
+static_assert(PIPE_A == _TRACE_PIPE_A);
+
TRACE_EVENT(intel_pipe_enable,
TP_PROTO(struct intel_crtc *crtc),
TP_ARGS(crtc),
TP_STRUCT__entry(
__string(dev, __dev_name_kms(crtc))
- __array(u32, frame, 3)
- __array(u32, scanline, 3)
- __field(enum pipe, pipe)
+ __array(u32, frame, I915_MAX_PIPES)
+ __array(u32, scanline, I915_MAX_PIPES)
+ __field(char, pipe_name)
),
TP_fast_assign(
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc *it__;
__assign_str(dev);
- for_each_intel_crtc(&dev_priv->drm, it__) {
+ memset(__entry->frame, 0,
+ sizeof(__entry->frame[0]) * I915_MAX_PIPES);
+ memset(__entry->scanline, 0,
+ sizeof(__entry->scanline[0]) * I915_MAX_PIPES);
+ for_each_intel_crtc(display->drm, it__) {
__entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
__entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
}
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
),
- TP_printk("dev %s, pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
- __get_str(dev), pipe_name(__entry->pipe),
- __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
- __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
- __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
+ TP_printk("dev %s, pipe %c enable, " _PIPES_FRAME_AND_SCANLINE_FMT,
+ __get_str(dev), __entry->pipe_name, _PIPES_FRAME_AND_SCANLINE_VALUES)
);
TRACE_EVENT(intel_pipe_disable,
@@ -55,27 +96,28 @@ TRACE_EVENT(intel_pipe_disable,
TP_STRUCT__entry(
__string(dev, __dev_name_kms(crtc))
- __array(u32, frame, 3)
- __array(u32, scanline, 3)
- __field(enum pipe, pipe)
+ __array(u32, frame, I915_MAX_PIPES)
+ __array(u32, scanline, I915_MAX_PIPES)
+ __field(char, pipe_name)
),
TP_fast_assign(
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc *it__;
__assign_str(dev);
- for_each_intel_crtc(&dev_priv->drm, it__) {
+ memset(__entry->frame, 0,
+ sizeof(__entry->frame[0]) * I915_MAX_PIPES);
+ memset(__entry->scanline, 0,
+ sizeof(__entry->scanline[0]) * I915_MAX_PIPES);
+ for_each_intel_crtc(display->drm, it__) {
__entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
__entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
}
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
),
- TP_printk("dev %s, pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
- __get_str(dev), pipe_name(__entry->pipe),
- __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
- __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
- __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
+ TP_printk("dev %s, pipe %c disable, " _PIPES_FRAME_AND_SCANLINE_FMT,
+ __get_str(dev), __entry->pipe_name, _PIPES_FRAME_AND_SCANLINE_VALUES)
);
TRACE_EVENT(intel_crtc_flip_done,
@@ -84,20 +126,20 @@ TRACE_EVENT(intel_crtc_flip_done,
TP_STRUCT__entry(
__string(dev, __dev_name_kms(crtc))
- __field(enum pipe, pipe)
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
),
TP_fast_assign(
__assign_str(dev);
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("dev %s, pipe %c, frame=%u, scanline=%u",
- __get_str(dev), pipe_name(__entry->pipe),
+ __get_str(dev), __entry->pipe_name,
__entry->frame, __entry->scanline)
);
@@ -107,7 +149,7 @@ TRACE_EVENT(intel_pipe_crc,
TP_STRUCT__entry(
__string(dev, __dev_name_kms(crtc))
- __field(enum pipe, pipe)
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
__array(u32, crcs, 5)
@@ -115,14 +157,14 @@ TRACE_EVENT(intel_pipe_crc,
TP_fast_assign(
__assign_str(dev);
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
memcpy(__entry->crcs, crcs, sizeof(__entry->crcs));
),
TP_printk("dev %s, pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x",
- __get_str(dev), pipe_name(__entry->pipe),
+ __get_str(dev), __entry->pipe_name,
__entry->frame, __entry->scanline,
__entry->crcs[0], __entry->crcs[1],
__entry->crcs[2], __entry->crcs[3],
@@ -130,62 +172,62 @@ TRACE_EVENT(intel_pipe_crc,
);
TRACE_EVENT(intel_cpu_fifo_underrun,
- TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
- TP_ARGS(dev_priv, pipe),
+ TP_PROTO(struct intel_display *display, enum pipe pipe),
+ TP_ARGS(display, pipe),
TP_STRUCT__entry(
- __string(dev, __dev_name_i915(dev_priv))
- __field(enum pipe, pipe)
+ __string(dev, __dev_name_display(display))
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
),
TP_fast_assign(
- struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
__assign_str(dev);
- __entry->pipe = pipe;
+ __entry->pipe_name = pipe_name(pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("dev %s, pipe %c, frame=%u, scanline=%u",
- __get_str(dev), pipe_name(__entry->pipe),
+ __get_str(dev), __entry->pipe_name,
__entry->frame, __entry->scanline)
);
TRACE_EVENT(intel_pch_fifo_underrun,
- TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pch_transcoder),
- TP_ARGS(dev_priv, pch_transcoder),
+ TP_PROTO(struct intel_display *display, enum pipe pch_transcoder),
+ TP_ARGS(display, pch_transcoder),
TP_STRUCT__entry(
- __string(dev, __dev_name_i915(dev_priv))
- __field(enum pipe, pipe)
+ __string(dev, __dev_name_display(display))
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
),
TP_fast_assign(
enum pipe pipe = pch_transcoder;
- struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
__assign_str(dev);
- __entry->pipe = pipe;
+ __entry->pipe_name = pipe_name(pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("dev %s, pch transcoder %c, frame=%u, scanline=%u",
- __get_str(dev), pipe_name(__entry->pipe),
+ __get_str(dev), __entry->pipe_name,
__entry->frame, __entry->scanline)
);
TRACE_EVENT(intel_memory_cxsr,
- TP_PROTO(struct drm_i915_private *dev_priv, bool old, bool new),
- TP_ARGS(dev_priv, old, new),
+ TP_PROTO(struct intel_display *display, bool old, bool new),
+ TP_ARGS(display, old, new),
TP_STRUCT__entry(
- __string(dev, __dev_name_i915(dev_priv))
- __array(u32, frame, 3)
- __array(u32, scanline, 3)
+ __string(dev, __dev_name_display(display))
+ __array(u32, frame, I915_MAX_PIPES)
+ __array(u32, scanline, I915_MAX_PIPES)
__field(bool, old)
__field(bool, new)
),
@@ -193,7 +235,11 @@ TRACE_EVENT(intel_memory_cxsr,
TP_fast_assign(
struct intel_crtc *crtc;
__assign_str(dev);
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ memset(__entry->frame, 0,
+ sizeof(__entry->frame[0]) * I915_MAX_PIPES);
+ memset(__entry->scanline, 0,
+ sizeof(__entry->scanline[0]) * I915_MAX_PIPES);
+ for_each_intel_crtc(display->drm, crtc) {
__entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc);
__entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc);
}
@@ -201,11 +247,9 @@ TRACE_EVENT(intel_memory_cxsr,
__entry->new = new;
),
- TP_printk("dev %s, cxsr %s->%s, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
+ TP_printk("dev %s, cxsr %s->%s, " _PIPES_FRAME_AND_SCANLINE_FMT,
__get_str(dev), str_on_off(__entry->old), str_on_off(__entry->new),
- __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
- __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
- __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
+ _PIPES_FRAME_AND_SCANLINE_VALUES)
);
TRACE_EVENT(g4x_wm,
@@ -214,7 +258,7 @@ TRACE_EVENT(g4x_wm,
TP_STRUCT__entry(
__string(dev, __dev_name_kms(crtc))
- __field(enum pipe, pipe)
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
__field(u16, primary)
@@ -233,7 +277,7 @@ TRACE_EVENT(g4x_wm,
TP_fast_assign(
__assign_str(dev);
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
@@ -251,7 +295,7 @@ TRACE_EVENT(g4x_wm,
),
TP_printk("dev %s, pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s",
- __get_str(dev), pipe_name(__entry->pipe),
+ __get_str(dev), __entry->pipe_name,
__entry->frame, __entry->scanline,
__entry->primary, __entry->sprite, __entry->cursor,
str_yes_no(__entry->cxsr), __entry->sr_plane, __entry->sr_cursor, __entry->sr_fbc,
@@ -265,7 +309,7 @@ TRACE_EVENT(vlv_wm,
TP_STRUCT__entry(
__string(dev, __dev_name_kms(crtc))
- __field(enum pipe, pipe)
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
__field(u32, level)
@@ -280,7 +324,7 @@ TRACE_EVENT(vlv_wm,
TP_fast_assign(
__assign_str(dev);
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->level = wm->level;
@@ -294,7 +338,7 @@ TRACE_EVENT(vlv_wm,
),
TP_printk("dev %s, pipe %c, frame=%u, scanline=%u, level=%d, cxsr=%d, wm %d/%d/%d/%d, sr %d/%d",
- __get_str(dev), pipe_name(__entry->pipe),
+ __get_str(dev), __entry->pipe_name,
__entry->frame, __entry->scanline,
__entry->level, __entry->cxsr,
__entry->primary, __entry->sprite0, __entry->sprite1, __entry->cursor,
@@ -307,7 +351,7 @@ TRACE_EVENT(vlv_fifo_size,
TP_STRUCT__entry(
__string(dev, __dev_name_kms(crtc))
- __field(enum pipe, pipe)
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
__field(u32, sprite0_start)
@@ -317,7 +361,7 @@ TRACE_EVENT(vlv_fifo_size,
TP_fast_assign(
__assign_str(dev);
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->sprite0_start = sprite0_start;
@@ -326,7 +370,7 @@ TRACE_EVENT(vlv_fifo_size,
),
TP_printk("dev %s, pipe %c, frame=%u, scanline=%u, %d/%d/%d",
- __get_str(dev), pipe_name(__entry->pipe),
+ __get_str(dev), __entry->pipe_name,
__entry->frame, __entry->scanline,
__entry->sprite0_start, __entry->sprite1_start, __entry->fifo_size)
);
@@ -337,7 +381,7 @@ TRACE_EVENT(intel_plane_async_flip,
TP_STRUCT__entry(
__string(dev, __dev_name_kms(plane))
- __field(enum pipe, pipe)
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
__field(bool, async_flip)
@@ -347,14 +391,14 @@ TRACE_EVENT(intel_plane_async_flip,
TP_fast_assign(
__assign_str(dev);
__assign_str(name);
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->async_flip = async_flip;
),
TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u, async_flip=%s",
- __get_str(dev), pipe_name(__entry->pipe), __get_str(name),
+ __get_str(dev), __entry->pipe_name, __get_str(name),
__entry->frame, __entry->scanline, str_yes_no(__entry->async_flip))
);
@@ -364,7 +408,7 @@ TRACE_EVENT(intel_plane_update_noarm,
TP_STRUCT__entry(
__string(dev, __dev_name_kms(plane))
- __field(enum pipe, pipe)
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
__array(int, src, 4)
@@ -375,7 +419,7 @@ TRACE_EVENT(intel_plane_update_noarm,
TP_fast_assign(
__assign_str(dev);
__assign_str(name);
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
memcpy(__entry->src, &plane->base.state->src, sizeof(__entry->src));
@@ -383,7 +427,7 @@ TRACE_EVENT(intel_plane_update_noarm,
),
TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
- __get_str(dev), pipe_name(__entry->pipe), __get_str(name),
+ __get_str(dev), __entry->pipe_name, __get_str(name),
__entry->frame, __entry->scanline,
DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
@@ -395,7 +439,7 @@ TRACE_EVENT(intel_plane_update_arm,
TP_STRUCT__entry(
__string(dev, __dev_name_kms(plane))
- __field(enum pipe, pipe)
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
__array(int, src, 4)
@@ -406,7 +450,7 @@ TRACE_EVENT(intel_plane_update_arm,
TP_fast_assign(
__assign_str(dev);
__assign_str(name);
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
memcpy(__entry->src, &plane->base.state->src, sizeof(__entry->src));
@@ -414,7 +458,7 @@ TRACE_EVENT(intel_plane_update_arm,
),
TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
- __get_str(dev), pipe_name(__entry->pipe), __get_str(name),
+ __get_str(dev), __entry->pipe_name, __get_str(name),
__entry->frame, __entry->scanline,
DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
@@ -426,7 +470,7 @@ TRACE_EVENT(intel_plane_disable_arm,
TP_STRUCT__entry(
__string(dev, __dev_name_kms(plane))
- __field(enum pipe, pipe)
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
__string(name, plane->base.name)
@@ -435,13 +479,13 @@ TRACE_EVENT(intel_plane_disable_arm,
TP_fast_assign(
__assign_str(dev);
__assign_str(name);
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u",
- __get_str(dev), pipe_name(__entry->pipe), __get_str(name),
+ __get_str(dev), __entry->pipe_name, __get_str(name),
__entry->frame, __entry->scanline)
);
@@ -452,23 +496,24 @@ TRACE_EVENT(intel_fbc_activate,
TP_STRUCT__entry(
__string(dev, __dev_name_kms(plane))
__string(name, plane->base.name)
- __field(enum pipe, pipe)
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
),
TP_fast_assign(
- struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
+ struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display,
plane->pipe);
__assign_str(dev);
__assign_str(name);
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u",
- __get_str(dev), pipe_name(__entry->pipe), __get_str(name),
+ __get_str(dev), __entry->pipe_name, __get_str(name),
__entry->frame, __entry->scanline)
);
@@ -479,23 +524,24 @@ TRACE_EVENT(intel_fbc_deactivate,
TP_STRUCT__entry(
__string(dev, __dev_name_kms(plane))
__string(name, plane->base.name)
- __field(enum pipe, pipe)
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
),
TP_fast_assign(
- struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
+ struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display,
plane->pipe);
__assign_str(dev);
__assign_str(name);
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u",
- __get_str(dev), pipe_name(__entry->pipe), __get_str(name),
+ __get_str(dev), __entry->pipe_name, __get_str(name),
__entry->frame, __entry->scanline)
);
@@ -506,23 +552,24 @@ TRACE_EVENT(intel_fbc_nuke,
TP_STRUCT__entry(
__string(dev, __dev_name_kms(plane))
__string(name, plane->base.name)
- __field(enum pipe, pipe)
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
),
TP_fast_assign(
- struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
+ struct intel_display *display = to_intel_display(plane->base.dev);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display,
plane->pipe);
__assign_str(dev);
__assign_str(name);
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("dev %s, pipe %c, plane %s, frame=%u, scanline=%u",
- __get_str(dev), pipe_name(__entry->pipe), __get_str(name),
+ __get_str(dev), __entry->pipe_name, __get_str(name),
__entry->frame, __entry->scanline)
);
@@ -532,20 +579,20 @@ TRACE_EVENT(intel_crtc_vblank_work_start,
TP_STRUCT__entry(
__string(dev, __dev_name_kms(crtc))
- __field(enum pipe, pipe)
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
),
TP_fast_assign(
__assign_str(dev);
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("dev %s, pipe %c, frame=%u, scanline=%u",
- __get_str(dev), pipe_name(__entry->pipe),
+ __get_str(dev), __entry->pipe_name,
__entry->frame, __entry->scanline)
);
@@ -555,20 +602,20 @@ TRACE_EVENT(intel_crtc_vblank_work_end,
TP_STRUCT__entry(
__string(dev, __dev_name_kms(crtc))
- __field(enum pipe, pipe)
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
),
TP_fast_assign(
__assign_str(dev);
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("dev %s, pipe %c, frame=%u, scanline=%u",
- __get_str(dev), pipe_name(__entry->pipe),
+ __get_str(dev), __entry->pipe_name,
__entry->frame, __entry->scanline)
);
@@ -578,7 +625,7 @@ TRACE_EVENT(intel_pipe_update_start,
TP_STRUCT__entry(
__string(dev, __dev_name_kms(crtc))
- __field(enum pipe, pipe)
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
__field(u32, min)
@@ -587,7 +634,7 @@ TRACE_EVENT(intel_pipe_update_start,
TP_fast_assign(
__assign_str(dev);
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->min = crtc->debug.min_vbl;
@@ -595,7 +642,7 @@ TRACE_EVENT(intel_pipe_update_start,
),
TP_printk("dev %s, pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
- __get_str(dev), pipe_name(__entry->pipe),
+ __get_str(dev), __entry->pipe_name,
__entry->frame, __entry->scanline,
__entry->min, __entry->max)
);
@@ -606,7 +653,7 @@ TRACE_EVENT(intel_pipe_update_vblank_evaded,
TP_STRUCT__entry(
__string(dev, __dev_name_kms(crtc))
- __field(enum pipe, pipe)
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
__field(u32, min)
@@ -615,7 +662,7 @@ TRACE_EVENT(intel_pipe_update_vblank_evaded,
TP_fast_assign(
__assign_str(dev);
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = crtc->debug.start_vbl_count;
__entry->scanline = crtc->debug.scanline_start;
__entry->min = crtc->debug.min_vbl;
@@ -623,7 +670,7 @@ TRACE_EVENT(intel_pipe_update_vblank_evaded,
),
TP_printk("dev %s, pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
- __get_str(dev), pipe_name(__entry->pipe),
+ __get_str(dev), __entry->pipe_name,
__entry->frame, __entry->scanline,
__entry->min, __entry->max)
);
@@ -634,30 +681,30 @@ TRACE_EVENT(intel_pipe_update_end,
TP_STRUCT__entry(
__string(dev, __dev_name_kms(crtc))
- __field(enum pipe, pipe)
+ __field(char, pipe_name)
__field(u32, frame)
__field(u32, scanline)
),
TP_fast_assign(
__assign_str(dev);
- __entry->pipe = crtc->pipe;
+ __entry->pipe_name = pipe_name(crtc->pipe);
__entry->frame = frame;
__entry->scanline = scanline_end;
),
TP_printk("dev %s, pipe %c, frame=%u, scanline=%u",
- __get_str(dev), pipe_name(__entry->pipe),
+ __get_str(dev), __entry->pipe_name,
__entry->frame, __entry->scanline)
);
TRACE_EVENT(intel_frontbuffer_invalidate,
- TP_PROTO(struct drm_i915_private *i915,
+ TP_PROTO(struct intel_display *display,
unsigned int frontbuffer_bits, unsigned int origin),
- TP_ARGS(i915, frontbuffer_bits, origin),
+ TP_ARGS(display, frontbuffer_bits, origin),
TP_STRUCT__entry(
- __string(dev, __dev_name_i915(i915))
+ __string(dev, __dev_name_display(display))
__field(unsigned int, frontbuffer_bits)
__field(unsigned int, origin)
),
@@ -673,12 +720,12 @@ TRACE_EVENT(intel_frontbuffer_invalidate,
);
TRACE_EVENT(intel_frontbuffer_flush,
- TP_PROTO(struct drm_i915_private *i915,
+ TP_PROTO(struct intel_display *display,
unsigned int frontbuffer_bits, unsigned int origin),
- TP_ARGS(i915, frontbuffer_bits, origin),
+ TP_ARGS(display, frontbuffer_bits, origin),
TP_STRUCT__entry(
- __string(dev, __dev_name_i915(i915))
+ __string(dev, __dev_name_display(display))
__field(unsigned int, frontbuffer_bits)
__field(unsigned int, origin)
),
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index f29e5dc3db91..8271e50e3644 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -26,10 +26,8 @@
#ifndef __INTEL_DISPLAY_TYPES_H__
#define __INTEL_DISPLAY_TYPES_H__
-#include <linux/i2c.h>
#include <linux/pm_qos.h>
#include <linux/pwm.h>
-#include <linux/sched/clock.h>
#include <drm/display/drm_dp_dual_mode_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
@@ -38,30 +36,28 @@
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
#include <drm/intel/i915_hdcp_interface.h>
-#include <media/cec-notifier.h>
-#include "gem/i915_gem_object_types.h" /* for to_intel_bo() */
#include "i915_vma.h"
#include "i915_vma_types.h"
#include "intel_bios.h"
#include "intel_display.h"
+#include "intel_display_conversion.h"
#include "intel_display_limits.h"
#include "intel_display_power.h"
#include "intel_dpll_mgr.h"
#include "intel_wm_types.h"
+struct cec_notifier;
struct drm_printer;
struct __intel_global_objs_state;
+struct intel_connector;
struct intel_ddi_buf_trans;
struct intel_fbc;
-struct intel_connector;
+struct intel_hdcp_shim;
struct intel_tc_port;
/*
@@ -306,6 +302,15 @@ struct intel_panel_bl_funcs {
u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
};
+/* in 100us units */
+struct intel_pps_delays {
+ u16 power_up; /* eDP: T1+T3, LVDS: T1+T2 */
+ u16 backlight_on; /* eDP: T8, LVDS: T5 */
+ u16 backlight_off; /* eDP: T9, LVDS: T6/TX */
+ u16 power_down; /* eDP: T10, LVDS: T3 */
+ u16 power_cycle; /* eDP: T11+T12, LVDS: T7+T4 */
+};
+
enum drrs_type {
DRRS_TYPE_NONE,
DRRS_TYPE_STATIC,
@@ -333,7 +338,7 @@ struct intel_vbt_panel_data {
int preemphasis;
int vswing;
int bpp;
- struct edp_power_seq pps;
+ struct intel_pps_delays pps;
u8 drrs_msa_timing_delay;
bool low_vswing;
bool hobl;
@@ -430,128 +435,6 @@ struct intel_panel {
struct intel_digital_port;
-enum check_link_response {
- HDCP_LINK_PROTECTED = 0,
- HDCP_TOPOLOGY_CHANGE,
- HDCP_LINK_INTEGRITY_FAILURE,
- HDCP_REAUTH_REQUEST
-};
-
-/*
- * This structure serves as a translation layer between the generic HDCP code
- * and the bus-specific code. What that means is that HDCP over HDMI differs
- * from HDCP over DP, so to account for these differences, we need to
- * communicate with the receiver through this shim.
- *
- * For completeness, the 2 buses differ in the following ways:
- * - DP AUX vs. DDC
- * HDCP registers on the receiver are set via DP AUX for DP, and
- * they are set via DDC for HDMI.
- * - Receiver register offsets
- * The offsets of the registers are different for DP vs. HDMI
- * - Receiver register masks/offsets
- * For instance, the ready bit for the KSV fifo is in a different
- * place on DP vs HDMI
- * - Receiver register names
- * Seriously. In the DP spec, the 16-bit register containing
- * downstream information is called BINFO, on HDMI it's called
- * BSTATUS. To confuse matters further, DP has a BSTATUS register
- * with a completely different definition.
- * - KSV FIFO
- * On HDMI, the ksv fifo is read all at once, whereas on DP it must
- * be read 3 keys at a time
- * - Aksv output
- * Since Aksv is hidden in hardware, there's different procedures
- * to send it over DP AUX vs DDC
- */
-struct intel_hdcp_shim {
- /* Outputs the transmitter's An and Aksv values to the receiver. */
- int (*write_an_aksv)(struct intel_digital_port *dig_port, u8 *an);
-
- /* Reads the receiver's key selection vector */
- int (*read_bksv)(struct intel_digital_port *dig_port, u8 *bksv);
-
- /*
- * Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The
- * definitions are the same in the respective specs, but the names are
- * different. Call it BSTATUS since that's the name the HDMI spec
- * uses and it was there first.
- */
- int (*read_bstatus)(struct intel_digital_port *dig_port,
- u8 *bstatus);
-
- /* Determines whether a repeater is present downstream */
- int (*repeater_present)(struct intel_digital_port *dig_port,
- bool *repeater_present);
-
- /* Reads the receiver's Ri' value */
- int (*read_ri_prime)(struct intel_digital_port *dig_port, u8 *ri);
-
- /* Determines if the receiver's KSV FIFO is ready for consumption */
- int (*read_ksv_ready)(struct intel_digital_port *dig_port,
- bool *ksv_ready);
-
- /* Reads the ksv fifo for num_downstream devices */
- int (*read_ksv_fifo)(struct intel_digital_port *dig_port,
- int num_downstream, u8 *ksv_fifo);
-
- /* Reads a 32-bit part of V' from the receiver */
- int (*read_v_prime_part)(struct intel_digital_port *dig_port,
- int i, u32 *part);
-
- /* Enables HDCP signalling on the port */
- int (*toggle_signalling)(struct intel_digital_port *dig_port,
- enum transcoder cpu_transcoder,
- bool enable);
-
- /* Enable/Disable stream encryption on DP MST Transport Link */
- int (*stream_encryption)(struct intel_connector *connector,
- bool enable);
-
- /* Ensures the link is still protected */
- bool (*check_link)(struct intel_digital_port *dig_port,
- struct intel_connector *connector);
-
- /* Detects panel's hdcp capability. This is optional for HDMI. */
- int (*hdcp_get_capability)(struct intel_digital_port *dig_port,
- bool *hdcp_capable);
-
- /* HDCP adaptation(DP/HDMI) required on the port */
- enum hdcp_wired_protocol protocol;
-
- /* Detects whether sink is HDCP2.2 capable */
- int (*hdcp_2_2_get_capability)(struct intel_connector *connector,
- bool *capable);
-
- /* Write HDCP2.2 messages */
- int (*write_2_2_msg)(struct intel_connector *connector,
- void *buf, size_t size);
-
- /* Read HDCP2.2 messages */
- int (*read_2_2_msg)(struct intel_connector *connector,
- u8 msg_id, void *buf, size_t size);
-
- /*
- * Implementation of DP HDCP2.2 Errata for the communication of stream
- * type to Receivers. In DP HDCP2.2 Stream type is one of the input to
- * the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI.
- */
- int (*config_stream_type)(struct intel_connector *connector,
- bool is_repeater, u8 type);
-
- /* Enable/Disable HDCP 2.2 stream encryption on DP MST Transport Link */
- int (*stream_2_2_encryption)(struct intel_connector *connector,
- bool enable);
-
- /* HDCP2.2 Link Integrity Check */
- int (*check_2_2_link)(struct intel_digital_port *dig_port,
- struct intel_connector *connector);
-
- /* HDCP remote sink cap */
- int (*get_remote_hdcp_capability)(struct intel_connector *connector,
- bool *hdcp_capable, bool *hdcp2_capable);
-};
-
struct intel_hdcp {
const struct intel_hdcp_shim *shim;
/* Mutex for hdcp state of the connector */
@@ -651,7 +534,7 @@ struct intel_connector {
struct intel_dp *mst_port;
- bool force_bigjoiner_enable;
+ int force_joined_pipes;
struct {
struct drm_dp_aux *dsc_decompression_aux;
@@ -714,6 +597,8 @@ struct intel_atomic_state {
bool skip_intermediate_wm;
bool rps_interactive;
+
+ struct work_struct cleanup_work;
};
struct intel_plane_state {
@@ -824,8 +709,8 @@ struct intel_initial_plane_config {
};
struct intel_scaler {
- int in_use;
u32 mode;
+ bool in_use;
};
struct intel_crtc_scaler_state {
@@ -896,6 +781,7 @@ struct skl_wm_level {
u8 lines;
bool enable;
bool ignore_lines;
+ bool auto_min_alloc_wm_enable;
bool can_sagv;
};
@@ -990,6 +876,13 @@ struct intel_crtc_wm_state {
struct skl_ddb_entry plane_ddb[I915_MAX_PLANES];
/* pre-icl: for planar Y */
struct skl_ddb_entry plane_ddb_y[I915_MAX_PLANES];
+
+ /*
+ * xe3: Minimum amount of display blocks and minimum
+ * sagv allocation required for async flip
+ */
+ u16 plane_min_ddb[I915_MAX_PLANES];
+ u16 plane_interim_ddb[I915_MAX_PLANES];
} skl;
struct {
@@ -1036,6 +929,10 @@ struct intel_csc_matrix {
u16 postoff[3];
};
+void intel_io_mmio_fw_write(void *ctx, i915_reg_t reg, u32 val);
+
+typedef void (*intel_io_reg_write)(void *ctx, i915_reg_t reg, u32 val);
+
struct intel_crtc_state {
/*
* uapi (drm) state. This is the software state shown to userspace.
@@ -1263,16 +1160,11 @@ struct intel_crtc_state {
bool double_wide;
- int pbn;
-
struct intel_crtc_scaler_state scaler_state;
/* w/a for waiting 2 vblanks during crtc enable */
enum pipe hsw_workaround_pipe;
- /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */
- bool disable_lp_wm;
-
struct intel_crtc_wm_state wm;
int min_cdclk[I915_MAX_PLANES];
@@ -1361,7 +1253,7 @@ struct intel_crtc_state {
/* Display Stream compression state */
struct {
bool compression_enable;
- bool dsc_split;
+ int num_streams;
/* Compressed Bpp in U6.4 format (first 4 bits for fractional part) */
u16 compressed_bpp_x16;
u8 slice_count;
@@ -1396,8 +1288,9 @@ struct intel_crtc_state {
/* Only valid on TGL+ */
enum transcoder mst_master_transcoder;
- /* For DSB based color LUT updates */
- struct intel_dsb *dsb_color_vblank, *dsb_color_commit;
+ /* For DSB based pipe updates */
+ struct intel_dsb *dsb_color_vblank, *dsb_commit;
+ bool use_dsb;
u32 psr2_man_track_ctl;
@@ -1488,6 +1381,8 @@ struct intel_crtc {
/* armed event for async flip */
struct drm_pending_vblank_event *flip_done_event;
+ /* armed event for DSB based updates */
+ struct drm_pending_vblank_event *dsb_event;
/* Access to these should be protected by dev_priv->irq_lock. */
bool cpu_fifo_underrun_disabled;
@@ -1540,6 +1435,8 @@ struct intel_crtc {
#ifdef CONFIG_DEBUG_FS
struct intel_pipe_crc pipe_crc;
#endif
+
+ bool block_dc_for_vblank;
};
struct intel_plane {
@@ -1578,22 +1475,26 @@ struct intel_plane {
u32 pixel_format, u64 modifier,
unsigned int rotation);
/* Write all non-self arming plane registers */
- void (*update_noarm)(struct intel_plane *plane,
+ void (*update_noarm)(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
/* Write all self-arming plane registers */
- void (*update_arm)(struct intel_plane *plane,
+ void (*update_arm)(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
/* Disable the plane, must arm */
- void (*disable_arm)(struct intel_plane *plane,
+ void (*disable_arm)(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
int (*min_cdclk)(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
- void (*async_flip)(struct intel_plane *plane,
+ void (*async_flip)(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
bool async_flip);
@@ -1601,14 +1502,6 @@ struct intel_plane {
void (*disable_flip_done)(struct intel_plane *plane);
};
-struct intel_watermark_params {
- u16 fifo_size;
- u16 max_wm;
- u8 default_wm;
- u8 guard_size;
- u8 cacheline_size;
-};
-
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
@@ -1622,8 +1515,6 @@ struct intel_watermark_params {
#define to_intel_framebuffer(fb) \
container_of_const((fb), struct intel_framebuffer, base)
-#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL)
-
struct intel_hdmi {
i915_reg_t hdmi_reg;
struct {
@@ -1676,7 +1567,7 @@ struct intel_pps {
* Pipe whose power sequencer is currently locked into
* this port. Only relevant on VLV/CHV.
*/
- enum pipe pps_pipe;
+ enum pipe vlv_pps_pipe;
/*
* Power sequencer index. Only relevant on BXT+.
@@ -1689,14 +1580,14 @@ struct intel_pps {
* the use of the PPS for any pipe currentrly driving
* external DP as that will mess things up on VLV.
*/
- enum pipe active_pipe;
+ enum pipe vlv_active_pipe;
/*
* Set if the sequencer may be reset due to a power transition,
* requiring a reinitialization. Only relevant on BXT+.
*/
- bool pps_reset;
- struct edp_power_seq pps_delays;
- struct edp_power_seq bios_pps_delays;
+ bool bxt_pps_reset;
+ struct intel_pps_delays pps_delays;
+ struct intel_pps_delays bios_pps_delays;
};
struct intel_psr {
@@ -1745,6 +1636,8 @@ struct intel_psr {
u32 dc3co_exit_delay;
struct delayed_work dc3co_work;
u8 entry_setup_frames;
+
+ bool link_ok;
};
struct intel_dp {
@@ -1892,6 +1785,7 @@ struct intel_dp {
/* When we last wrote the OUI for eDP */
unsigned long last_oui_write;
+ bool oui_valid;
bool colorimetry_support;
@@ -1927,11 +1821,13 @@ struct intel_lspcon {
struct intel_digital_port {
struct intel_encoder base;
- u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
struct intel_lspcon lspcon;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
+
+ bool lane_reversal;
+ bool ddi_a_4_lanes;
bool release_cl2_override;
u8 max_lanes;
/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
@@ -2050,7 +1946,10 @@ static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder)
static inline struct intel_dp *intel_attached_dp(struct intel_connector *connector)
{
- return enc_to_intel_dp(intel_attached_encoder(connector));
+ if (connector->mst_port)
+ return connector->mst_port;
+ else
+ return enc_to_intel_dp(intel_attached_encoder(connector));
}
static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
@@ -2067,6 +1966,19 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
}
}
+static inline bool intel_encoder_is_hdmi(struct intel_encoder *encoder)
+{
+ switch (encoder->type) {
+ case INTEL_OUTPUT_HDMI:
+ return true;
+ case INTEL_OUTPUT_DDI:
+ /* See if the HDMI encoder is valid. */
+ return i915_mmio_reg_valid(enc_to_intel_hdmi(encoder)->hdmi_reg);
+ default:
+ return false;
+ }
+}
+
static inline struct intel_lspcon *
enc_to_intel_lspcon(struct intel_encoder *encoder)
{
@@ -2207,7 +2119,7 @@ to_intel_frontbuffer(struct drm_framebuffer *fb)
* intel_display pointer.
*/
#define __drm_device_to_intel_display(p) \
- ((p) ? &to_i915(p)->display : NULL)
+ ((p) ? __drm_to_display(p) : NULL)
#define __device_to_intel_display(p) \
__drm_device_to_intel_display(dev_get_drvdata(p))
#define __pci_dev_to_intel_display(p) \
@@ -2228,6 +2140,10 @@ to_intel_frontbuffer(struct drm_framebuffer *fb)
__drm_device_to_intel_display((p)->base.dev)
#define __intel_hdmi_to_intel_display(p) \
__drm_device_to_intel_display(hdmi_to_dig_port(p)->base.base.dev)
+#define __intel_plane_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->base.dev)
+#define __intel_plane_state_to_intel_display(p) \
+ __drm_device_to_intel_display((p)->uapi.plane->dev)
/* Helper for generic association. Map types to conversion functions/macros. */
#define __assoc(type, p) \
@@ -2246,6 +2162,8 @@ to_intel_frontbuffer(struct drm_framebuffer *fb)
__assoc(intel_digital_port, p), \
__assoc(intel_dp, p), \
__assoc(intel_encoder, p), \
- __assoc(intel_hdmi, p))
+ __assoc(intel_hdmi, p), \
+ __assoc(intel_plane, p), \
+ __assoc(intel_plane_state, p))
#endif /* __INTEL_DISPLAY_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 7c756d5ba2a2..221d3abda791 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -52,7 +52,7 @@ enum intel_dmc_id {
};
struct intel_dmc {
- struct drm_i915_private *i915;
+ struct intel_display *display;
struct work_struct work;
const char *fw_path;
u32 max_fw_size; /* bytes */
@@ -70,21 +70,21 @@ struct intel_dmc {
};
/* Note: This may be NULL. */
-static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915)
+static struct intel_dmc *display_to_dmc(struct intel_display *display)
{
- return i915->display.dmc.dmc;
+ return display->dmc.dmc;
}
-static const char *dmc_firmware_param(struct drm_i915_private *i915)
+static const char *dmc_firmware_param(struct intel_display *display)
{
- const char *p = i915->display.params.dmc_firmware_path;
+ const char *p = display->params.dmc_firmware_path;
return p && *p ? p : NULL;
}
-static bool dmc_firmware_param_disabled(struct drm_i915_private *i915)
+static bool dmc_firmware_param_disabled(struct intel_display *display)
{
- const char *p = dmc_firmware_param(i915);
+ const char *p = dmc_firmware_param(display);
/* Magic path to indicate disabled */
return p && !strcmp(p, "/dev/null");
@@ -113,6 +113,9 @@ static bool dmc_firmware_param_disabled(struct drm_i915_private *i915)
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
+#define XE3LPD_DMC_PATH DMC_PATH(xe3lpd)
+MODULE_FIRMWARE(XE3LPD_DMC_PATH);
+
#define XE2LPD_DMC_PATH DMC_PATH(xe2lpd)
MODULE_FIRMWARE(XE2LPD_DMC_PATH);
@@ -162,18 +165,22 @@ MODULE_FIRMWARE(SKL_DMC_PATH);
#define BXT_DMC_MAX_FW_SIZE 0x3000
MODULE_FIRMWARE(BXT_DMC_PATH);
-static const char *dmc_firmware_default(struct drm_i915_private *i915, u32 *size)
+static const char *dmc_firmware_default(struct intel_display *display, u32 *size)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
const char *fw_path = NULL;
u32 max_fw_size = 0;
- if (DISPLAY_VER_FULL(i915) == IP_VER(20, 0)) {
+ if (DISPLAY_VERx100(display) == 3000) {
+ fw_path = XE3LPD_DMC_PATH;
+ max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
+ } else if (DISPLAY_VERx100(display) == 2000) {
fw_path = XE2LPD_DMC_PATH;
max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
- } else if (DISPLAY_VER_FULL(i915) == IP_VER(14, 1)) {
+ } else if (DISPLAY_VERx100(display) == 1401) {
fw_path = BMG_DMC_PATH;
max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
- } else if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) {
+ } else if (DISPLAY_VERx100(display) == 1400) {
fw_path = MTL_DMC_PATH;
max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
} else if (IS_DG2(i915)) {
@@ -194,7 +201,7 @@ static const char *dmc_firmware_default(struct drm_i915_private *i915, u32 *size
} else if (IS_TIGERLAKE(i915)) {
fw_path = TGL_DMC_PATH;
max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
- } else if (DISPLAY_VER(i915) == 11) {
+ } else if (DISPLAY_VER(display) == 11) {
fw_path = ICL_DMC_PATH;
max_fw_size = ICL_DMC_MAX_FW_SIZE;
} else if (IS_GEMINILAKE(i915)) {
@@ -375,70 +382,70 @@ static bool is_valid_dmc_id(enum intel_dmc_id dmc_id)
return dmc_id >= DMC_FW_MAIN && dmc_id < DMC_FW_MAX;
}
-static bool has_dmc_id_fw(struct drm_i915_private *i915, enum intel_dmc_id dmc_id)
+static bool has_dmc_id_fw(struct intel_display *display, enum intel_dmc_id dmc_id)
{
- struct intel_dmc *dmc = i915_to_dmc(i915);
+ struct intel_dmc *dmc = display_to_dmc(display);
return dmc && dmc->dmc_info[dmc_id].payload;
}
-bool intel_dmc_has_payload(struct drm_i915_private *i915)
+bool intel_dmc_has_payload(struct intel_display *display)
{
- return has_dmc_id_fw(i915, DMC_FW_MAIN);
+ return has_dmc_id_fw(display, DMC_FW_MAIN);
}
static const struct stepping_info *
-intel_get_stepping_info(struct drm_i915_private *i915,
+intel_get_stepping_info(struct intel_display *display,
struct stepping_info *si)
{
- const char *step_name = intel_step_name(INTEL_DISPLAY_STEP(i915));
+ const char *step_name = intel_step_name(INTEL_DISPLAY_STEP(display));
si->stepping = step_name[0];
si->substepping = step_name[1];
return si;
}
-static void gen9_set_dc_state_debugmask(struct drm_i915_private *i915)
+static void gen9_set_dc_state_debugmask(struct intel_display *display)
{
/* The below bit doesn't need to be cleared ever afterwards */
- intel_de_rmw(i915, DC_STATE_DEBUG, 0,
+ intel_de_rmw(display, DC_STATE_DEBUG, 0,
DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP);
- intel_de_posting_read(i915, DC_STATE_DEBUG);
+ intel_de_posting_read(display, DC_STATE_DEBUG);
}
-static void disable_event_handler(struct drm_i915_private *i915,
+static void disable_event_handler(struct intel_display *display,
i915_reg_t ctl_reg, i915_reg_t htp_reg)
{
- intel_de_write(i915, ctl_reg,
+ intel_de_write(display, ctl_reg,
REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
DMC_EVT_CTL_TYPE_EDGE_0_1) |
REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
DMC_EVT_CTL_EVENT_ID_FALSE));
- intel_de_write(i915, htp_reg, 0);
+ intel_de_write(display, htp_reg, 0);
}
-static void disable_all_event_handlers(struct drm_i915_private *i915)
+static void disable_all_event_handlers(struct intel_display *display)
{
enum intel_dmc_id dmc_id;
/* TODO: disable the event handlers on pre-GEN12 platforms as well */
- if (DISPLAY_VER(i915) < 12)
+ if (DISPLAY_VER(display) < 12)
return;
for_each_dmc_id(dmc_id) {
int handler;
- if (!has_dmc_id_fw(i915, dmc_id))
+ if (!has_dmc_id_fw(display, dmc_id))
continue;
for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
- disable_event_handler(i915,
- DMC_EVT_CTL(i915, dmc_id, handler),
- DMC_EVT_HTP(i915, dmc_id, handler));
+ disable_event_handler(display,
+ DMC_EVT_CTL(display, dmc_id, handler),
+ DMC_EVT_HTP(display, dmc_id, handler));
}
}
-static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
+static void adlp_pipedmc_clock_gating_wa(struct intel_display *display, bool enable)
{
enum pipe pipe;
@@ -451,84 +458,86 @@ static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool ena
*/
if (enable)
for (pipe = PIPE_A; pipe <= PIPE_D; pipe++)
- intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe),
+ intel_de_rmw(display, CLKGATE_DIS_PSL_EXT(pipe),
0, PIPEDMC_GATING_DIS);
else
for (pipe = PIPE_C; pipe <= PIPE_D; pipe++)
- intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe),
+ intel_de_rmw(display, CLKGATE_DIS_PSL_EXT(pipe),
PIPEDMC_GATING_DIS, 0);
}
-static void mtl_pipedmc_clock_gating_wa(struct drm_i915_private *i915)
+static void mtl_pipedmc_clock_gating_wa(struct intel_display *display)
{
/*
* Wa_16015201720
* The WA requires clock gating to be disabled all the time
* for pipe A and B.
*/
- intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0,
+ intel_de_rmw(display, GEN9_CLKGATE_DIS_0, 0,
MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B);
}
-static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
+static void pipedmc_clock_gating_wa(struct intel_display *display, bool enable)
{
- if (DISPLAY_VER(i915) >= 14 && enable)
- mtl_pipedmc_clock_gating_wa(i915);
- else if (DISPLAY_VER(i915) == 13)
- adlp_pipedmc_clock_gating_wa(i915, enable);
+ if (DISPLAY_VER(display) >= 14 && enable)
+ mtl_pipedmc_clock_gating_wa(display);
+ else if (DISPLAY_VER(display) == 13)
+ adlp_pipedmc_clock_gating_wa(display, enable);
}
-void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe)
+void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe)
{
enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
- if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id))
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
return;
- if (DISPLAY_VER(i915) >= 14)
- intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe));
+ if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe));
else
- intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE);
+ intel_de_rmw(display, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE);
}
-void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe)
+void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe)
{
enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
- if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id))
+ if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
return;
- if (DISPLAY_VER(i915) >= 14)
- intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0);
+ if (DISPLAY_VER(display) >= 14)
+ intel_de_rmw(display, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0);
else
- intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
+ intel_de_rmw(display, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
}
-static bool is_dmc_evt_ctl_reg(struct drm_i915_private *i915,
+static bool is_dmc_evt_ctl_reg(struct intel_display *display,
enum intel_dmc_id dmc_id, i915_reg_t reg)
{
u32 offset = i915_mmio_reg_offset(reg);
- u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, 0));
- u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
+ u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0));
+ u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
return offset >= start && offset < end;
}
-static bool is_dmc_evt_htp_reg(struct drm_i915_private *i915,
+static bool is_dmc_evt_htp_reg(struct intel_display *display,
enum intel_dmc_id dmc_id, i915_reg_t reg)
{
u32 offset = i915_mmio_reg_offset(reg);
- u32 start = i915_mmio_reg_offset(DMC_EVT_HTP(i915, dmc_id, 0));
- u32 end = i915_mmio_reg_offset(DMC_EVT_HTP(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
+ u32 start = i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0));
+ u32 end = i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
return offset >= start && offset < end;
}
-static bool disable_dmc_evt(struct drm_i915_private *i915,
+static bool disable_dmc_evt(struct intel_display *display,
enum intel_dmc_id dmc_id,
i915_reg_t reg, u32 data)
{
- if (!is_dmc_evt_ctl_reg(i915, dmc_id, reg))
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (!is_dmc_evt_ctl_reg(display, dmc_id, reg))
return false;
/* keep all pipe DMC events disabled by default */
@@ -548,11 +557,11 @@ static bool disable_dmc_evt(struct drm_i915_private *i915,
return false;
}
-static u32 dmc_mmiodata(struct drm_i915_private *i915,
+static u32 dmc_mmiodata(struct intel_display *display,
struct intel_dmc *dmc,
enum intel_dmc_id dmc_id, int i)
{
- if (disable_dmc_evt(i915, dmc_id,
+ if (disable_dmc_evt(display, dmc_id,
dmc->dmc_info[dmc_id].mmioaddr[i],
dmc->dmc_info[dmc_id].mmiodata[i]))
return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
@@ -565,25 +574,26 @@ static u32 dmc_mmiodata(struct drm_i915_private *i915,
/**
* intel_dmc_load_program() - write the firmware from memory to register.
- * @i915: i915 drm device.
+ * @display: display instance
*
* DMC firmware is read from a .bin file and kept in internal memory one time.
* Everytime display comes back from low power state this function is called to
* copy the firmware from internal memory to registers.
*/
-void intel_dmc_load_program(struct drm_i915_private *i915)
+void intel_dmc_load_program(struct intel_display *display)
{
- struct i915_power_domains *power_domains = &i915->display.power.domains;
- struct intel_dmc *dmc = i915_to_dmc(i915);
+ struct drm_i915_private *i915 __maybe_unused = to_i915(display->drm);
+ struct i915_power_domains *power_domains = &display->power.domains;
+ struct intel_dmc *dmc = display_to_dmc(display);
enum intel_dmc_id dmc_id;
u32 i;
- if (!intel_dmc_has_payload(i915))
+ if (!intel_dmc_has_payload(display))
return;
- pipedmc_clock_gating_wa(i915, true);
+ pipedmc_clock_gating_wa(display, true);
- disable_all_event_handlers(i915);
+ disable_all_event_handlers(display);
assert_rpm_wakelock_held(&i915->runtime_pm);
@@ -591,7 +601,7 @@ void intel_dmc_load_program(struct drm_i915_private *i915)
for_each_dmc_id(dmc_id) {
for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) {
- intel_de_write_fw(i915,
+ intel_de_write_fw(display,
DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i),
dmc->dmc_info[dmc_id].payload[i]);
}
@@ -601,48 +611,46 @@ void intel_dmc_load_program(struct drm_i915_private *i915)
for_each_dmc_id(dmc_id) {
for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
- intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i],
- dmc_mmiodata(i915, dmc, dmc_id, i));
+ intel_de_write(display, dmc->dmc_info[dmc_id].mmioaddr[i],
+ dmc_mmiodata(display, dmc, dmc_id, i));
}
}
power_domains->dc_state = 0;
- gen9_set_dc_state_debugmask(i915);
+ gen9_set_dc_state_debugmask(display);
- pipedmc_clock_gating_wa(i915, false);
+ pipedmc_clock_gating_wa(display, false);
}
/**
* intel_dmc_disable_program() - disable the firmware
- * @i915: i915 drm device
+ * @display: display instance
*
* Disable all event handlers in the firmware, making sure the firmware is
* inactive after the display is uninitialized.
*/
-void intel_dmc_disable_program(struct drm_i915_private *i915)
+void intel_dmc_disable_program(struct intel_display *display)
{
- if (!intel_dmc_has_payload(i915))
+ if (!intel_dmc_has_payload(display))
return;
- pipedmc_clock_gating_wa(i915, true);
- disable_all_event_handlers(i915);
- pipedmc_clock_gating_wa(i915, false);
-
- intel_dmc_wl_disable(&i915->display);
+ pipedmc_clock_gating_wa(display, true);
+ disable_all_event_handlers(display);
+ pipedmc_clock_gating_wa(display, false);
}
-void assert_dmc_loaded(struct drm_i915_private *i915)
+void assert_dmc_loaded(struct intel_display *display)
{
- struct intel_dmc *dmc = i915_to_dmc(i915);
+ struct intel_dmc *dmc = display_to_dmc(display);
- drm_WARN_ONCE(&i915->drm, !dmc, "DMC not initialized\n");
- drm_WARN_ONCE(&i915->drm, dmc &&
- !intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
+ drm_WARN_ONCE(display->drm, !dmc, "DMC not initialized\n");
+ drm_WARN_ONCE(display->drm, dmc &&
+ !intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
"DMC program storage start is NULL\n");
- drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE),
+ drm_WARN_ONCE(display->drm, !intel_de_read(display, DMC_SSP_BASE),
"DMC SSP Base Not fine\n");
- drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_HTP_SKL),
+ drm_WARN_ONCE(display->drm, !intel_de_read(display, DMC_HTP_SKL),
"DMC HTP Not fine\n");
}
@@ -673,7 +681,7 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
const struct stepping_info *si,
u8 package_ver)
{
- struct drm_i915_private *i915 = dmc->i915;
+ struct intel_display *display = dmc->display;
enum intel_dmc_id dmc_id;
unsigned int i;
@@ -681,7 +689,7 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
dmc_id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
if (!is_valid_dmc_id(dmc_id)) {
- drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", dmc_id);
+ drm_dbg(display->drm, "Unsupported firmware id: %u\n", dmc_id);
continue;
}
@@ -703,7 +711,7 @@ static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
const u32 *mmioaddr, u32 mmio_count,
int header_ver, enum intel_dmc_id dmc_id)
{
- struct drm_i915_private *i915 = dmc->i915;
+ struct intel_display *display = dmc->display;
u32 start_range, end_range;
int i;
@@ -713,14 +721,14 @@ static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
} else if (dmc_id == DMC_FW_MAIN) {
start_range = TGL_MAIN_MMIO_START;
end_range = TGL_MAIN_MMIO_END;
- } else if (DISPLAY_VER(i915) >= 13) {
+ } else if (DISPLAY_VER(display) >= 13) {
start_range = ADLP_PIPE_MMIO_START;
end_range = ADLP_PIPE_MMIO_END;
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
start_range = TGL_PIPE_MMIO_START(dmc_id);
end_range = TGL_PIPE_MMIO_END(dmc_id);
} else {
- drm_warn(&i915->drm, "Unknown mmio range for sanity check");
+ drm_warn(display->drm, "Unknown mmio range for sanity check");
return false;
}
@@ -736,7 +744,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
const struct intel_dmc_header_base *dmc_header,
size_t rem_size, enum intel_dmc_id dmc_id)
{
- struct drm_i915_private *i915 = dmc->i915;
+ struct intel_display *display = dmc->display;
struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id];
unsigned int header_len_bytes, dmc_header_size, payload_size, i;
const u32 *mmioaddr, *mmiodata;
@@ -784,39 +792,39 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
start_mmioaddr = DMC_V1_MMIO_START_RANGE;
dmc_header_size = sizeof(*v1);
} else {
- drm_err(&i915->drm, "Unknown DMC fw header version: %u\n",
+ drm_err(display->drm, "Unknown DMC fw header version: %u\n",
dmc_header->header_ver);
return 0;
}
if (header_len_bytes != dmc_header_size) {
- drm_err(&i915->drm, "DMC firmware has wrong dmc header length "
+ drm_err(display->drm, "DMC firmware has wrong dmc header length "
"(%u bytes)\n", header_len_bytes);
return 0;
}
/* Cache the dmc header info. */
if (mmio_count > mmio_count_max) {
- drm_err(&i915->drm, "DMC firmware has wrong mmio count %u\n", mmio_count);
+ drm_err(display->drm, "DMC firmware has wrong mmio count %u\n", mmio_count);
return 0;
}
if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count,
dmc_header->header_ver, dmc_id)) {
- drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n");
+ drm_err(display->drm, "DMC firmware has Wrong MMIO Addresses\n");
return 0;
}
- drm_dbg_kms(&i915->drm, "DMC %d:\n", dmc_id);
+ drm_dbg_kms(display->drm, "DMC %d:\n", dmc_id);
for (i = 0; i < mmio_count; i++) {
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
dmc_info->mmiodata[i] = mmiodata[i];
- drm_dbg_kms(&i915->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n",
+ drm_dbg_kms(display->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n",
i, mmioaddr[i], mmiodata[i],
- is_dmc_evt_ctl_reg(i915, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :
- is_dmc_evt_htp_reg(i915, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",
- disable_dmc_evt(i915, dmc_id, dmc_info->mmioaddr[i],
+ is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :
+ is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",
+ disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i],
dmc_info->mmiodata[i]) ? " (disabling)" : "");
}
dmc_info->mmio_count = mmio_count;
@@ -830,7 +838,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
goto error_truncated;
if (payload_size > dmc->max_fw_size) {
- drm_err(&i915->drm, "DMC FW too big (%u bytes)\n", payload_size);
+ drm_err(display->drm, "DMC FW too big (%u bytes)\n", payload_size);
return 0;
}
dmc_info->dmc_fw_size = dmc_header->fw_size;
@@ -845,7 +853,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
return header_len_bytes + payload_size;
error_truncated:
- drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
+ drm_err(display->drm, "Truncated DMC firmware, refusing.\n");
return 0;
}
@@ -855,7 +863,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc,
const struct stepping_info *si,
size_t rem_size)
{
- struct drm_i915_private *i915 = dmc->i915;
+ struct intel_display *display = dmc->display;
u32 package_size = sizeof(struct intel_package_header);
u32 num_entries, max_entries;
const struct intel_fw_info *fw_info;
@@ -868,7 +876,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc,
} else if (package_header->header_ver == 2) {
max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES;
} else {
- drm_err(&i915->drm, "DMC firmware has unknown header version %u\n",
+ drm_err(display->drm, "DMC firmware has unknown header version %u\n",
package_header->header_ver);
return 0;
}
@@ -882,7 +890,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc,
goto error_truncated;
if (package_header->header_len * 4 != package_size) {
- drm_err(&i915->drm, "DMC firmware has wrong package header length "
+ drm_err(display->drm, "DMC firmware has wrong package header length "
"(%u bytes)\n", package_size);
return 0;
}
@@ -900,7 +908,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc,
return package_size;
error_truncated:
- drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
+ drm_err(display->drm, "Truncated DMC firmware, refusing.\n");
return 0;
}
@@ -909,16 +917,16 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
struct intel_css_header *css_header,
size_t rem_size)
{
- struct drm_i915_private *i915 = dmc->i915;
+ struct intel_display *display = dmc->display;
if (rem_size < sizeof(struct intel_css_header)) {
- drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
+ drm_err(display->drm, "Truncated DMC firmware, refusing.\n");
return 0;
}
if (sizeof(struct intel_css_header) !=
(css_header->header_len * 4)) {
- drm_err(&i915->drm, "DMC firmware has wrong CSS header length "
+ drm_err(display->drm, "DMC firmware has wrong CSS header length "
"(%u bytes)\n",
(css_header->header_len * 4));
return 0;
@@ -931,12 +939,12 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
{
- struct drm_i915_private *i915 = dmc->i915;
+ struct intel_display *display = dmc->display;
struct intel_css_header *css_header;
struct intel_package_header *package_header;
struct intel_dmc_header_base *dmc_header;
struct stepping_info display_info = { '*', '*'};
- const struct stepping_info *si = intel_get_stepping_info(i915, &display_info);
+ const struct stepping_info *si = intel_get_stepping_info(display, &display_info);
enum intel_dmc_id dmc_id;
u32 readcount = 0;
u32 r, offset;
@@ -966,7 +974,7 @@ static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
offset = readcount + dmc->dmc_info[dmc_id].dmc_offset * 4;
if (offset > fw->size) {
- drm_err(&i915->drm, "Reading beyond the fw_size\n");
+ drm_err(display->drm, "Reading beyond the fw_size\n");
continue;
}
@@ -974,30 +982,35 @@ static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id);
}
- if (!intel_dmc_has_payload(i915)) {
- drm_err(&i915->drm, "DMC firmware main program not found\n");
+ if (!intel_dmc_has_payload(display)) {
+ drm_err(display->drm, "DMC firmware main program not found\n");
return -ENOENT;
}
return 0;
}
-static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915)
+static void intel_dmc_runtime_pm_get(struct intel_display *display)
{
- drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref);
- i915->display.dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ drm_WARN_ON(display->drm, display->dmc.wakeref);
+ display->dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
-static void intel_dmc_runtime_pm_put(struct drm_i915_private *i915)
+static void intel_dmc_runtime_pm_put(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->display.dmc.wakeref);
+ fetch_and_zero(&display->dmc.wakeref);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
}
-static const char *dmc_fallback_path(struct drm_i915_private *i915)
+static const char *dmc_fallback_path(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
if (IS_ALDERLAKE_P(i915))
return ADLP_DMC_FALLBACK_PATH;
@@ -1007,45 +1020,45 @@ static const char *dmc_fallback_path(struct drm_i915_private *i915)
static void dmc_load_work_fn(struct work_struct *work)
{
struct intel_dmc *dmc = container_of(work, typeof(*dmc), work);
- struct drm_i915_private *i915 = dmc->i915;
+ struct intel_display *display = dmc->display;
const struct firmware *fw = NULL;
const char *fallback_path;
int err;
- err = request_firmware(&fw, dmc->fw_path, i915->drm.dev);
+ err = request_firmware(&fw, dmc->fw_path, display->drm->dev);
- if (err == -ENOENT && !dmc_firmware_param(i915)) {
- fallback_path = dmc_fallback_path(i915);
+ if (err == -ENOENT && !dmc_firmware_param(display)) {
+ fallback_path = dmc_fallback_path(display);
if (fallback_path) {
- drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n",
+ drm_dbg_kms(display->drm, "%s not found, falling back to %s\n",
dmc->fw_path, fallback_path);
- err = request_firmware(&fw, fallback_path, i915->drm.dev);
+ err = request_firmware(&fw, fallback_path, display->drm->dev);
if (err == 0)
dmc->fw_path = fallback_path;
}
}
if (err) {
- drm_notice(&i915->drm,
+ drm_notice(display->drm,
"Failed to load DMC firmware %s (%pe). Disabling runtime power management.\n",
dmc->fw_path, ERR_PTR(err));
- drm_notice(&i915->drm, "DMC firmware homepage: %s",
+ drm_notice(display->drm, "DMC firmware homepage: %s",
INTEL_DMC_FIRMWARE_URL);
return;
}
err = parse_dmc_fw(dmc, fw);
if (err) {
- drm_notice(&i915->drm,
+ drm_notice(display->drm,
"Failed to parse DMC firmware %s (%pe). Disabling runtime power management.\n",
dmc->fw_path, ERR_PTR(err));
goto out;
}
- intel_dmc_load_program(i915);
- intel_dmc_runtime_pm_put(i915);
+ intel_dmc_load_program(display);
+ intel_dmc_runtime_pm_put(display);
- drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n",
+ drm_info(display->drm, "Finished loading DMC firmware %s (v%u.%u)\n",
dmc->fw_path, DMC_VERSION_MAJOR(dmc->version),
DMC_VERSION_MINOR(dmc->version));
@@ -1055,16 +1068,17 @@ out:
/**
* intel_dmc_init() - initialize the firmware loading.
- * @i915: i915 drm device.
+ * @display: display instance
*
* This function is called at the time of loading the display driver to read
* firmware from a .bin file and copied into a internal memory.
*/
-void intel_dmc_init(struct drm_i915_private *i915)
+void intel_dmc_init(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_dmc *dmc;
- if (!HAS_DMC(i915))
+ if (!HAS_DMC(display))
return;
/*
@@ -1075,35 +1089,35 @@ void intel_dmc_init(struct drm_i915_private *i915)
* suspend as runtime suspend *requires* a working DMC for whatever
* reason.
*/
- intel_dmc_runtime_pm_get(i915);
+ intel_dmc_runtime_pm_get(display);
dmc = kzalloc(sizeof(*dmc), GFP_KERNEL);
if (!dmc)
return;
- dmc->i915 = i915;
+ dmc->display = display;
INIT_WORK(&dmc->work, dmc_load_work_fn);
- dmc->fw_path = dmc_firmware_default(i915, &dmc->max_fw_size);
+ dmc->fw_path = dmc_firmware_default(display, &dmc->max_fw_size);
- if (dmc_firmware_param_disabled(i915)) {
- drm_info(&i915->drm, "Disabling DMC firmware and runtime PM\n");
+ if (dmc_firmware_param_disabled(display)) {
+ drm_info(display->drm, "Disabling DMC firmware and runtime PM\n");
goto out;
}
- if (dmc_firmware_param(i915))
- dmc->fw_path = dmc_firmware_param(i915);
+ if (dmc_firmware_param(display))
+ dmc->fw_path = dmc_firmware_param(display);
if (!dmc->fw_path) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"No known DMC firmware for platform, disabling runtime PM\n");
goto out;
}
- i915->display.dmc.dmc = dmc;
+ display->dmc.dmc = dmc;
- drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path);
+ drm_dbg_kms(display->drm, "Loading %s\n", dmc->fw_path);
queue_work(i915->unordered_wq, &dmc->work);
return;
@@ -1114,129 +1128,150 @@ out:
/**
* intel_dmc_suspend() - prepare DMC firmware before system suspend
- * @i915: i915 drm device
+ * @display: display instance
*
* Prepare the DMC firmware before entering system suspend. This includes
* flushing pending work items and releasing any resources acquired during
* init.
*/
-void intel_dmc_suspend(struct drm_i915_private *i915)
+void intel_dmc_suspend(struct intel_display *display)
{
- struct intel_dmc *dmc = i915_to_dmc(i915);
+ struct intel_dmc *dmc = display_to_dmc(display);
- if (!HAS_DMC(i915))
+ if (!HAS_DMC(display))
return;
if (dmc)
flush_work(&dmc->work);
- intel_dmc_wl_disable(&i915->display);
-
/* Drop the reference held in case DMC isn't loaded. */
- if (!intel_dmc_has_payload(i915))
- intel_dmc_runtime_pm_put(i915);
+ if (!intel_dmc_has_payload(display))
+ intel_dmc_runtime_pm_put(display);
}
/**
* intel_dmc_resume() - init DMC firmware during system resume
- * @i915: i915 drm device
+ * @display: display instance
*
* Reinitialize the DMC firmware during system resume, reacquiring any
* resources released in intel_dmc_suspend().
*/
-void intel_dmc_resume(struct drm_i915_private *i915)
+void intel_dmc_resume(struct intel_display *display)
{
- if (!HAS_DMC(i915))
+ if (!HAS_DMC(display))
return;
/*
* Reacquire the reference to keep RPM disabled in case DMC isn't
* loaded.
*/
- if (!intel_dmc_has_payload(i915))
- intel_dmc_runtime_pm_get(i915);
+ if (!intel_dmc_has_payload(display))
+ intel_dmc_runtime_pm_get(display);
}
/**
* intel_dmc_fini() - unload the DMC firmware.
- * @i915: i915 drm device.
+ * @display: display instance
*
* Firmmware unloading includes freeing the internal memory and reset the
* firmware loading status.
*/
-void intel_dmc_fini(struct drm_i915_private *i915)
+void intel_dmc_fini(struct intel_display *display)
{
- struct intel_dmc *dmc = i915_to_dmc(i915);
+ struct intel_dmc *dmc = display_to_dmc(display);
enum intel_dmc_id dmc_id;
- if (!HAS_DMC(i915))
+ if (!HAS_DMC(display))
return;
- intel_dmc_suspend(i915);
- drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref);
+ intel_dmc_suspend(display);
+ drm_WARN_ON(display->drm, display->dmc.wakeref);
if (dmc) {
for_each_dmc_id(dmc_id)
kfree(dmc->dmc_info[dmc_id].payload);
kfree(dmc);
- i915->display.dmc.dmc = NULL;
+ display->dmc.dmc = NULL;
}
}
-void intel_dmc_print_error_state(struct drm_printer *p,
- struct drm_i915_private *i915)
+struct intel_dmc_snapshot {
+ bool initialized;
+ bool loaded;
+ u32 version;
+};
+
+struct intel_dmc_snapshot *intel_dmc_snapshot_capture(struct intel_display *display)
{
- struct intel_dmc *dmc = i915_to_dmc(i915);
+ struct intel_dmc *dmc = display_to_dmc(display);
+ struct intel_dmc_snapshot *snapshot;
- if (!HAS_DMC(i915))
- return;
+ if (!HAS_DMC(display))
+ return NULL;
+
+ snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
+ if (!snapshot)
+ return NULL;
- drm_printf(p, "DMC initialized: %s\n", str_yes_no(dmc));
- drm_printf(p, "DMC loaded: %s\n",
- str_yes_no(intel_dmc_has_payload(i915)));
+ snapshot->initialized = dmc;
+ snapshot->loaded = intel_dmc_has_payload(display);
if (dmc)
+ snapshot->version = dmc->version;
+
+ return snapshot;
+}
+
+void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct drm_printer *p)
+{
+ if (!snapshot)
+ return;
+
+ drm_printf(p, "DMC initialized: %s\n", str_yes_no(snapshot->initialized));
+ drm_printf(p, "DMC loaded: %s\n", str_yes_no(snapshot->loaded));
+ if (snapshot->initialized)
drm_printf(p, "DMC fw version: %d.%d\n",
- DMC_VERSION_MAJOR(dmc->version),
- DMC_VERSION_MINOR(dmc->version));
+ DMC_VERSION_MAJOR(snapshot->version),
+ DMC_VERSION_MINOR(snapshot->version));
}
static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
{
- struct drm_i915_private *i915 = m->private;
- struct intel_dmc *dmc = i915_to_dmc(i915);
+ struct intel_display *display = m->private;
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_dmc *dmc = display_to_dmc(display);
intel_wakeref_t wakeref;
i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG;
- if (!HAS_DMC(i915))
+ if (!HAS_DMC(display))
return -ENODEV;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
seq_printf(m, "fw loaded: %s\n",
- str_yes_no(intel_dmc_has_payload(i915)));
+ str_yes_no(intel_dmc_has_payload(display)));
seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A");
seq_printf(m, "Pipe A fw needed: %s\n",
- str_yes_no(DISPLAY_VER(i915) >= 12));
+ str_yes_no(DISPLAY_VER(display) >= 12));
seq_printf(m, "Pipe A fw loaded: %s\n",
- str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA)));
+ str_yes_no(has_dmc_id_fw(display, DMC_FW_PIPEA)));
seq_printf(m, "Pipe B fw needed: %s\n",
str_yes_no(IS_ALDERLAKE_P(i915) ||
- DISPLAY_VER(i915) >= 14));
+ DISPLAY_VER(display) >= 14));
seq_printf(m, "Pipe B fw loaded: %s\n",
- str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEB)));
+ str_yes_no(has_dmc_id_fw(display, DMC_FW_PIPEB)));
- if (!intel_dmc_has_payload(i915))
+ if (!intel_dmc_has_payload(display))
goto out;
seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version),
DMC_VERSION_MINOR(dmc->version));
- if (DISPLAY_VER(i915) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
i915_reg_t dc3co_reg;
- if (IS_DGFX(i915) || DISPLAY_VER(i915) >= 14) {
+ if (IS_DGFX(i915) || DISPLAY_VER(display) >= 14) {
dc3co_reg = DG1_DMC_DEBUG3;
dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
} else {
@@ -1246,7 +1281,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
}
seq_printf(m, "DC3CO count: %d\n",
- intel_de_read(i915, dc3co_reg));
+ intel_de_read(display, dc3co_reg));
} else {
dc5_reg = IS_BROXTON(i915) ? BXT_DMC_DC3_DC5_COUNT :
SKL_DMC_DC3_DC5_COUNT;
@@ -1254,18 +1289,18 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
dc6_reg = SKL_DMC_DC5_DC6_COUNT;
}
- seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(i915, dc5_reg));
+ seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(display, dc5_reg));
if (i915_mmio_reg_valid(dc6_reg))
seq_printf(m, "DC5 -> DC6 count: %d\n",
- intel_de_read(i915, dc6_reg));
+ intel_de_read(display, dc6_reg));
seq_printf(m, "program base: 0x%08x\n",
- intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
+ intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
out:
seq_printf(m, "ssp base: 0x%08x\n",
- intel_de_read(i915, DMC_SSP_BASE));
- seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL));
+ intel_de_read(display, DMC_SSP_BASE));
+ seq_printf(m, "htp: 0x%08x\n", intel_de_read(display, DMC_HTP_SKL));
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@@ -1274,10 +1309,10 @@ out:
DEFINE_SHOW_ATTRIBUTE(intel_dmc_debugfs_status);
-void intel_dmc_debugfs_register(struct drm_i915_private *i915)
+void intel_dmc_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root,
- i915, &intel_dmc_debugfs_status_fops);
+ display, &intel_dmc_debugfs_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index 54cff6002e31..44cecef98e73 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -9,22 +9,24 @@
#include <linux/types.h>
enum pipe;
-struct drm_i915_private;
struct drm_printer;
+struct intel_display;
+struct intel_dmc_snapshot;
-void intel_dmc_init(struct drm_i915_private *i915);
-void intel_dmc_load_program(struct drm_i915_private *i915);
-void intel_dmc_disable_program(struct drm_i915_private *i915);
-void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe);
-void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe);
-void intel_dmc_fini(struct drm_i915_private *i915);
-void intel_dmc_suspend(struct drm_i915_private *i915);
-void intel_dmc_resume(struct drm_i915_private *i915);
-bool intel_dmc_has_payload(struct drm_i915_private *i915);
-void intel_dmc_debugfs_register(struct drm_i915_private *i915);
-void intel_dmc_print_error_state(struct drm_printer *p,
- struct drm_i915_private *i915);
+void intel_dmc_init(struct intel_display *display);
+void intel_dmc_load_program(struct intel_display *display);
+void intel_dmc_disable_program(struct intel_display *display);
+void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe);
+void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe);
+void intel_dmc_fini(struct intel_display *display);
+void intel_dmc_suspend(struct intel_display *display);
+void intel_dmc_resume(struct intel_display *display);
+bool intel_dmc_has_payload(struct intel_display *display);
+void intel_dmc_debugfs_register(struct intel_display *display);
-void assert_dmc_loaded(struct drm_i915_private *i915);
+struct intel_dmc_snapshot *intel_dmc_snapshot_capture(struct intel_display *display);
+void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct drm_printer *p);
+
+void assert_dmc_loaded(struct intel_display *display);
#endif /* __INTEL_DMC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
index d9864b9cc429..02de3ae15074 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
@@ -5,6 +5,10 @@
#include <linux/kernel.h>
+#include <drm/drm_print.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
#include "intel_de.h"
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
@@ -39,7 +43,11 @@
* potential future use.
*/
-#define DMC_WAKELOCK_CTL_TIMEOUT 5
+/*
+ * Define DMC_WAKELOCK_CTL_TIMEOUT_US in microseconds because we use the
+ * atomic variant of waiting MMIO.
+ */
+#define DMC_WAKELOCK_CTL_TIMEOUT_US 5000
#define DMC_WAKELOCK_HOLD_TIME 50
struct intel_dmc_wl_range {
@@ -47,8 +55,90 @@ struct intel_dmc_wl_range {
u32 end;
};
-static struct intel_dmc_wl_range lnl_wl_range[] = {
+static const struct intel_dmc_wl_range powered_off_ranges[] = {
{ .start = 0x60000, .end = 0x7ffff },
+ {},
+};
+
+static const struct intel_dmc_wl_range xe3lpd_dc5_dc6_dmc_ranges[] = {
+ { .start = 0x45500 }, /* DC_STATE_SEL */
+ { .start = 0x457a0, .end = 0x457b0 }, /* DC*_RESIDENCY_COUNTER */
+ { .start = 0x45504 }, /* DC_STATE_EN */
+ { .start = 0x45400, .end = 0x4540c }, /* PWR_WELL_CTL_* */
+ { .start = 0x454f0 }, /* RETENTION_CTRL */
+
+ /* DBUF_CTL_* */
+ { .start = 0x44300 },
+ { .start = 0x44304 },
+ { .start = 0x44f00 },
+ { .start = 0x44f04 },
+ { .start = 0x44fe8 },
+ { .start = 0x45008 },
+
+ { .start = 0x46070 }, /* CDCLK_PLL_ENABLE */
+ { .start = 0x46000 }, /* CDCLK_CTL */
+ { .start = 0x46008 }, /* CDCLK_SQUASH_CTL */
+
+ /* TRANS_CMTG_CTL_* */
+ { .start = 0x6fa88 },
+ { .start = 0x6fb88 },
+
+ { .start = 0x46430 }, /* CHICKEN_DCPR_1 */
+ { .start = 0x46434 }, /* CHICKEN_DCPR_2 */
+ { .start = 0x454a0 }, /* CHICKEN_DCPR_4 */
+ { .start = 0x42084 }, /* CHICKEN_MISC_2 */
+ { .start = 0x42088 }, /* CHICKEN_MISC_3 */
+ { .start = 0x46160 }, /* CMTG_CLK_SEL */
+ { .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
+
+ {},
+};
+
+static const struct intel_dmc_wl_range xe3lpd_dc3co_dmc_ranges[] = {
+ { .start = 0x454a0 }, /* CHICKEN_DCPR_4 */
+
+ { .start = 0x45504 }, /* DC_STATE_EN */
+
+ /* DBUF_CTL_* */
+ { .start = 0x44300 },
+ { .start = 0x44304 },
+ { .start = 0x44f00 },
+ { .start = 0x44f04 },
+ { .start = 0x44fe8 },
+ { .start = 0x45008 },
+
+ { .start = 0x46070 }, /* CDCLK_PLL_ENABLE */
+ { .start = 0x46000 }, /* CDCLK_CTL */
+ { .start = 0x46008 }, /* CDCLK_SQUASH_CTL */
+ { .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
+
+ /* Scanline registers */
+ { .start = 0x70000 },
+ { .start = 0x70004 },
+ { .start = 0x70014 },
+ { .start = 0x70018 },
+ { .start = 0x71000 },
+ { .start = 0x71004 },
+ { .start = 0x71014 },
+ { .start = 0x71018 },
+ { .start = 0x72000 },
+ { .start = 0x72004 },
+ { .start = 0x72014 },
+ { .start = 0x72018 },
+ { .start = 0x73000 },
+ { .start = 0x73004 },
+ { .start = 0x73014 },
+ { .start = 0x73018 },
+ { .start = 0x7b000 },
+ { .start = 0x7b004 },
+ { .start = 0x7b014 },
+ { .start = 0x7b018 },
+ { .start = 0x7c000 },
+ { .start = 0x7c004 },
+ { .start = 0x7c014 },
+ { .start = 0x7c018 },
+
+ {},
};
static void __intel_dmc_wl_release(struct intel_display *display)
@@ -72,15 +162,18 @@ static void intel_dmc_wl_work(struct work_struct *work)
spin_lock_irqsave(&wl->lock, flags);
- /* Bail out if refcount reached zero while waiting for the spinlock */
- if (!refcount_read(&wl->refcount))
+ /*
+ * Bail out if refcount became non-zero while waiting for the spinlock,
+ * meaning that the lock is now taken again.
+ */
+ if (refcount_read(&wl->refcount))
goto out_unlock;
__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
- if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
- DMC_WAKELOCK_CTL_ACK, 0,
- DMC_WAKELOCK_CTL_TIMEOUT)) {
+ if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
+ DMC_WAKELOCK_CTL_ACK, 0,
+ DMC_WAKELOCK_CTL_TIMEOUT_US)) {
WARN_RATELIMIT(1, "DMC wakelock release timed out");
goto out_unlock;
}
@@ -91,40 +184,110 @@ out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
-static bool intel_dmc_wl_check_range(u32 address)
+static void __intel_dmc_wl_take(struct intel_display *display)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+
+ /*
+ * Only try to take the wakelock if it's not marked as taken
+ * yet. It may be already taken at this point if we have
+ * already released the last reference, but the work has not
+ * run yet.
+ */
+ if (wl->taken)
+ return;
+
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
+ DMC_WAKELOCK_CTL_REQ);
+
+ /*
+ * We need to use the atomic variant of the waiting routine
+ * because the DMC wakelock is also taken in atomic context.
+ */
+ if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
+ DMC_WAKELOCK_CTL_ACK,
+ DMC_WAKELOCK_CTL_ACK,
+ DMC_WAKELOCK_CTL_TIMEOUT_US)) {
+ WARN_RATELIMIT(1, "DMC wakelock ack timed out");
+ return;
+ }
+
+ wl->taken = true;
+}
+
+static bool intel_dmc_wl_reg_in_range(i915_reg_t reg,
+ const struct intel_dmc_wl_range ranges[])
{
- int i;
- bool wl_needed = false;
-
- for (i = 0; i < ARRAY_SIZE(lnl_wl_range); i++) {
- if (address >= lnl_wl_range[i].start &&
- address <= lnl_wl_range[i].end) {
- wl_needed = true;
- break;
- }
+ u32 offset = i915_mmio_reg_offset(reg);
+
+ for (int i = 0; ranges[i].start; i++) {
+ u32 end = ranges[i].end ?: ranges[i].start;
+
+ if (ranges[i].start <= offset && offset <= end)
+ return true;
}
- return wl_needed;
+ return false;
}
-static bool __intel_dmc_wl_supported(struct intel_display *display)
+static bool intel_dmc_wl_check_range(i915_reg_t reg, u32 dc_state)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
+ const struct intel_dmc_wl_range *ranges;
- if (DISPLAY_VER(display) < 20 ||
- !intel_dmc_has_payload(i915) ||
- !display->params.enable_dmc_wl)
- return false;
+ /*
+ * Check that the offset is in one of the ranges for which
+ * registers are powered off during DC states.
+ */
+ if (intel_dmc_wl_reg_in_range(reg, powered_off_ranges))
+ return true;
- return true;
+ /*
+ * Check that the offset is for a register that is touched by
+ * the DMC and requires a DC exit for proper access.
+ */
+ switch (dc_state) {
+ case DC_STATE_EN_DC3CO:
+ ranges = xe3lpd_dc3co_dmc_ranges;
+ break;
+ case DC_STATE_EN_UPTO_DC5:
+ case DC_STATE_EN_UPTO_DC6:
+ ranges = xe3lpd_dc5_dc6_dmc_ranges;
+ break;
+ default:
+ ranges = NULL;
+ }
+
+ if (ranges && intel_dmc_wl_reg_in_range(reg, ranges))
+ return true;
+
+ return false;
+}
+
+static bool __intel_dmc_wl_supported(struct intel_display *display)
+{
+ return display->params.enable_dmc_wl && intel_dmc_has_payload(display);
+}
+
+static void intel_dmc_wl_sanitize_param(struct intel_display *display)
+{
+ if (!HAS_DMC_WAKELOCK(display))
+ display->params.enable_dmc_wl = 0;
+ else if (display->params.enable_dmc_wl >= 0)
+ display->params.enable_dmc_wl = !!display->params.enable_dmc_wl;
+ else
+ display->params.enable_dmc_wl = DISPLAY_VER(display) >= 30;
+
+ drm_dbg_kms(display->drm, "Sanitized enable_dmc_wl value: %d\n",
+ display->params.enable_dmc_wl);
}
void intel_dmc_wl_init(struct intel_display *display)
{
struct intel_dmc_wl *wl = &display->wl;
- /* don't call __intel_dmc_wl_supported(), DMC is not loaded yet */
- if (DISPLAY_VER(display) < 20 || !display->params.enable_dmc_wl)
+ intel_dmc_wl_sanitize_param(display);
+
+ if (!display->params.enable_dmc_wl)
return;
INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
@@ -132,7 +295,8 @@ void intel_dmc_wl_init(struct intel_display *display)
refcount_set(&wl->refcount, 0);
}
-void intel_dmc_wl_enable(struct intel_display *display)
+/* Must only be called as part of enabling dynamic DC states. */
+void intel_dmc_wl_enable(struct intel_display *display, u32 dc_state)
{
struct intel_dmc_wl *wl = &display->wl;
unsigned long flags;
@@ -142,7 +306,9 @@ void intel_dmc_wl_enable(struct intel_display *display)
spin_lock_irqsave(&wl->lock, flags);
- if (wl->enabled)
+ wl->dc_state = dc_state;
+
+ if (drm_WARN_ON(display->drm, wl->enabled))
goto out_unlock;
/*
@@ -153,12 +319,29 @@ void intel_dmc_wl_enable(struct intel_display *display)
__intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
wl->enabled = true;
- wl->taken = false;
+
+ /*
+ * This would be racy in the following scenario:
+ *
+ * 1. Function A calls intel_dmc_wl_get();
+ * 2. Some function calls intel_dmc_wl_disable();
+ * 3. Some function calls intel_dmc_wl_enable();
+ * 4. Concurrently with (3), function A performs the MMIO in between
+ * setting DMC_WAKELOCK_CFG_ENABLE and asserting the lock with
+ * __intel_dmc_wl_take().
+ *
+ * TODO: Check with the hardware team whether it is safe to assert the
+ * hardware lock before enabling to avoid such a scenario. Otherwise, we
+ * would need to deal with it via software synchronization.
+ */
+ if (refcount_read(&wl->refcount))
+ __intel_dmc_wl_take(display);
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
+/* Must only be called as part of disabling dynamic DC states. */
void intel_dmc_wl_disable(struct intel_display *display)
{
struct intel_dmc_wl *wl = &display->wl;
@@ -167,39 +350,62 @@ void intel_dmc_wl_disable(struct intel_display *display)
if (!__intel_dmc_wl_supported(display))
return;
- flush_delayed_work(&wl->work);
+ intel_dmc_wl_flush_release_work(display);
spin_lock_irqsave(&wl->lock, flags);
- if (!wl->enabled)
+ if (drm_WARN_ON(display->drm, !wl->enabled))
goto out_unlock;
/* Disable wakelock in DMC */
__intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
- refcount_set(&wl->refcount, 0);
wl->enabled = false;
+
+ /*
+ * The spec is not explicit about the expectation of existing
+ * lock users at the moment of disabling, but it does say that we must
+ * clear DMC_WAKELOCK_CTL_REQ, which gives us a clue that it is okay to
+ * disable with existing lock users.
+ *
+ * TODO: Get the correct expectation from the hardware team.
+ */
+ __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
+
wl->taken = false;
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
-void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
+void intel_dmc_wl_flush_release_work(struct intel_display *display)
{
struct intel_dmc_wl *wl = &display->wl;
- unsigned long flags;
if (!__intel_dmc_wl_supported(display))
return;
- if (!intel_dmc_wl_check_range(reg.reg))
+ flush_delayed_work(&wl->work);
+}
+
+void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
+{
+ struct intel_dmc_wl *wl = &display->wl;
+ unsigned long flags;
+
+ if (!__intel_dmc_wl_supported(display))
return;
spin_lock_irqsave(&wl->lock, flags);
- if (!wl->enabled)
+ if (i915_mmio_reg_valid(reg) && !intel_dmc_wl_check_range(reg, wl->dc_state))
+ goto out_unlock;
+
+ if (!wl->enabled) {
+ if (!refcount_inc_not_zero(&wl->refcount))
+ refcount_set(&wl->refcount, 1);
goto out_unlock;
+ }
cancel_delayed_work(&wl->work);
@@ -208,26 +414,7 @@ void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
refcount_set(&wl->refcount, 1);
- /*
- * Only try to take the wakelock if it's not marked as taken
- * yet. It may be already taken at this point if we have
- * already released the last reference, but the work has not
- * run yet.
- */
- if (!wl->taken) {
- __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
- DMC_WAKELOCK_CTL_REQ);
-
- if (__intel_de_wait_for_register_nowl(display, DMC_WAKELOCK1_CTL,
- DMC_WAKELOCK_CTL_ACK,
- DMC_WAKELOCK_CTL_ACK,
- DMC_WAKELOCK_CTL_TIMEOUT)) {
- WARN_RATELIMIT(1, "DMC wakelock ack timed out");
- goto out_unlock;
- }
-
- wl->taken = true;
- }
+ __intel_dmc_wl_take(display);
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
@@ -241,12 +428,9 @@ void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
if (!__intel_dmc_wl_supported(display))
return;
- if (!intel_dmc_wl_check_range(reg.reg))
- return;
-
spin_lock_irqsave(&wl->lock, flags);
- if (!wl->enabled)
+ if (i915_mmio_reg_valid(reg) && !intel_dmc_wl_check_range(reg, wl->dc_state))
goto out_unlock;
if (WARN_RATELIMIT(!refcount_read(&wl->refcount),
@@ -254,6 +438,9 @@ void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
goto out_unlock;
if (refcount_dec_and_test(&wl->refcount)) {
+ if (!wl->enabled)
+ goto out_unlock;
+
__intel_dmc_wl_release(display);
goto out_unlock;
@@ -262,3 +449,13 @@ void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
+
+void intel_dmc_wl_get_noreg(struct intel_display *display)
+{
+ intel_dmc_wl_get(display, INVALID_MMIO_REG);
+}
+
+void intel_dmc_wl_put_noreg(struct intel_display *display)
+{
+ intel_dmc_wl_put(display, INVALID_MMIO_REG);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.h b/drivers/gpu/drm/i915/display/intel_dmc_wl.h
index adab51208d0a..5488fbdf29b8 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_wl.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.h
@@ -15,17 +15,27 @@
struct intel_display;
struct intel_dmc_wl {
- spinlock_t lock; /* protects enabled, taken and refcount */
+ spinlock_t lock; /* protects enabled, taken, dc_state and refcount */
bool enabled;
bool taken;
refcount_t refcount;
+ /*
+ * We are keeping a copy of the enabled DC state because
+ * intel_display.power.domains is protected by a mutex and we do
+ * not want call mutex_lock() in atomic context, where some of
+ * the tracked MMIO operations happen.
+ */
+ u32 dc_state;
struct delayed_work work;
};
void intel_dmc_wl_init(struct intel_display *display);
-void intel_dmc_wl_enable(struct intel_display *display);
+void intel_dmc_wl_enable(struct intel_display *display, u32 dc_state);
void intel_dmc_wl_disable(struct intel_display *display);
+void intel_dmc_wl_flush_release_work(struct intel_display *display);
void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg);
void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg);
+void intel_dmc_wl_get_noreg(struct intel_display *display);
+void intel_dmc_wl_put_noreg(struct intel_display *display);
#endif /* __INTEL_WAKELOCK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 90fa73575feb..aa77ddcee42c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -28,6 +28,7 @@
#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/notifier.h>
+#include <linux/seq_buf.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/string_helpers.h>
@@ -67,6 +68,7 @@
#include "intel_dp_hdcp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
+#include "intel_dp_test.h"
#include "intel_dp_tunnel.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
@@ -82,16 +84,16 @@
#include "intel_modeset_lock.h"
#include "intel_panel.h"
#include "intel_pch_display.h"
+#include "intel_pfit.h"
#include "intel_pps.h"
#include "intel_psr.h"
+#include "intel_runtime_pm.h"
#include "intel_quirks.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
#include "intel_crtc_state_dump.h"
-#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev)
-
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
@@ -103,20 +105,22 @@
/* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */
#define DP_DSC_FEC_OVERHEAD_FACTOR 1028530
-/* Compliance test status bits */
-#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
-#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
-#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
-#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
-
-
/* Constants for DP DSC configurations */
static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
-/* With Single pipe configuration, HW is capable of supporting maximum
- * of 4 slices per line.
+/*
+ * With Single pipe configuration, HW is capable of supporting maximum of:
+ * 2 slices per line for ICL, BMG
+ * 4 slices per line for other platforms.
+ * For now consider a max of 2 slices per line, which works for all platforms.
+ * With this we can have max of 4 DSC Slices per pipe.
+ *
+ * For higher resolutions where 12 slice support is required with
+ * ultrajoiner, only then each pipe can support 3 slices.
+ *
+ * #TODO Split this better to use 4 slices/dsc engine where supported.
*/
-static const u8 valid_dsc_slicecount[] = {1, 2, 4};
+static const u8 valid_dsc_slicecount[] = {1, 2, 3, 4};
/**
* intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
@@ -261,6 +265,7 @@ static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp)
static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
@@ -270,7 +275,7 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
if (intel_dp->num_sink_rates)
return;
- drm_err(&dp_to_i915(intel_dp)->drm,
+ drm_err(display->drm,
"[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name);
@@ -285,6 +290,7 @@ static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp)
static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
@@ -298,7 +304,7 @@ static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp)
return;
}
- drm_err(&dp_to_i915(intel_dp)->drm,
+ drm_err(display->drm,
"[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
@@ -331,7 +337,9 @@ static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
int intel_dp_common_rate(struct intel_dp *intel_dp, int index)
{
- if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm,
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ if (drm_WARN_ON(display->drm,
index < 0 || index >= intel_dp->num_common_rates))
return 162000;
@@ -458,16 +466,16 @@ int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
bool intel_dp_has_joiner(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
/* eDP MSO is not compatible with joiner */
if (intel_dp->mso_link_count)
return false;
- return DISPLAY_VER(dev_priv) >= 12 ||
- (DISPLAY_VER(dev_priv) == 11 &&
+ return DISPLAY_VER(display) >= 12 ||
+ (DISPLAY_VER(display) == 11 &&
encoder->port != PORT_A);
}
@@ -496,12 +504,13 @@ static int ehl_max_source_rate(struct intel_dp *intel_dp)
static int mtl_max_source_rate(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
if (intel_encoder_is_c10phy(encoder))
return 810000;
- if (DISPLAY_VER_FULL(to_i915(encoder->base.dev)) == IP_VER(14, 1))
+ if (DISPLAY_VERx100(display) == 1401)
return 1350000;
return 2000000;
@@ -555,17 +564,16 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
static const int g4x_rates[] = {
162000, 270000
};
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(intel_dp);
const int *source_rates;
int size, max_rate = 0, vbt_max_rate;
/* This should only be done once */
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intel_dp->source_rates || intel_dp->num_source_rates);
- if (DISPLAY_VER(dev_priv) >= 14) {
- if (IS_BATTLEMAGE(dev_priv)) {
+ if (DISPLAY_VER(display) >= 14) {
+ if (display->platform.battlemage) {
source_rates = bmg_rates;
size = ARRAY_SIZE(bmg_rates);
} else {
@@ -573,26 +581,26 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
size = ARRAY_SIZE(mtl_rates);
}
max_rate = mtl_max_source_rate(intel_dp);
- } else if (DISPLAY_VER(dev_priv) >= 11) {
+ } else if (DISPLAY_VER(display) >= 11) {
source_rates = icl_rates;
size = ARRAY_SIZE(icl_rates);
- if (IS_DG2(dev_priv))
+ if (display->platform.dg2)
max_rate = dg2_max_source_rate(intel_dp);
- else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
- IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ else if (display->platform.alderlake_p || display->platform.alderlake_s ||
+ display->platform.dg1 || display->platform.rocketlake)
max_rate = 810000;
- else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
+ else if (display->platform.jasperlake || display->platform.elkhartlake)
max_rate = ehl_max_source_rate(intel_dp);
else
max_rate = icl_max_source_rate(intel_dp);
- } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ } else if (display->platform.geminilake || display->platform.broxton) {
source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
- } else if (DISPLAY_VER(dev_priv) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
- } else if ((IS_HASWELL(dev_priv) && !IS_HASWELL_ULX(dev_priv)) ||
- IS_BROADWELL(dev_priv)) {
+ } else if ((display->platform.haswell && !display->platform.haswell_ulx) ||
+ display->platform.broadwell) {
source_rates = hsw_rates;
size = ARRAY_SIZE(hsw_rates);
} else {
@@ -683,18 +691,18 @@ static int link_config_cmp_by_bw(const void *a, const void *b, const void *p)
static void intel_dp_link_config_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_dp_link_config *lc;
int num_common_lane_configs;
int i;
int j;
- if (drm_WARN_ON(&i915->drm, !is_power_of_2(intel_dp_max_common_lane_count(intel_dp))))
+ if (drm_WARN_ON(display->drm, !is_power_of_2(intel_dp_max_common_lane_count(intel_dp))))
return;
num_common_lane_configs = ilog2(intel_dp_max_common_lane_count(intel_dp)) + 1;
- if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates * num_common_lane_configs >
+ if (drm_WARN_ON(display->drm, intel_dp->num_common_rates * num_common_lane_configs >
ARRAY_SIZE(intel_dp->link.configs)))
return;
@@ -718,10 +726,10 @@ static void intel_dp_link_config_init(struct intel_dp *intel_dp)
void intel_dp_link_config_get(struct intel_dp *intel_dp, int idx, int *link_rate, int *lane_count)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
const struct intel_dp_link_config *lc;
- if (drm_WARN_ON(&i915->drm, idx < 0 || idx >= intel_dp->link.num_configs))
+ if (drm_WARN_ON(display->drm, idx < 0 || idx >= intel_dp->link.num_configs))
idx = 0;
lc = &intel_dp->link.configs[idx];
@@ -750,9 +758,9 @@ int intel_dp_link_config_index(struct intel_dp *intel_dp, int link_rate, int lan
static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
@@ -762,7 +770,7 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
intel_dp->common_rates);
/* Paranoia, there should always be something in common. */
- if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
+ if (drm_WARN_ON(display->drm, intel_dp->num_common_rates == 0)) {
intel_dp->common_rates[0] = 162000;
intel_dp->num_common_rates = 1;
}
@@ -770,8 +778,8 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
intel_dp_link_config_init(intel_dp);
}
-static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
- u8 lane_count)
+bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
+ u8 lane_count)
{
/*
* FIXME: we need to synchronize the current link parameters with
@@ -810,30 +818,30 @@ int intel_dp_bw_fec_overhead(bool fec_enabled)
}
static int
-small_joiner_ram_size_bits(struct drm_i915_private *i915)
+small_joiner_ram_size_bits(struct intel_display *display)
{
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return 17280 * 8;
- else if (DISPLAY_VER(i915) >= 11)
+ else if (DISPLAY_VER(display) >= 11)
return 7680 * 8;
else
return 6144 * 8;
}
-u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp)
+u32 intel_dp_dsc_nearest_valid_bpp(struct intel_display *display, u32 bpp, u32 pipe_bpp)
{
u32 bits_per_pixel = bpp;
int i;
/* Error out if the max bpp is less than smallest allowed valid bpp */
if (bits_per_pixel < valid_dsc_bpp[0]) {
- drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
+ drm_dbg_kms(display->drm, "Unsupported BPP %u, min %u\n",
bits_per_pixel, valid_dsc_bpp[0]);
return 0;
}
/* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */
- if (DISPLAY_VER(i915) >= 13) {
+ if (DISPLAY_VER(display) >= 13) {
bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1);
/*
@@ -845,7 +853,8 @@ u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 p
* DSC enabled.
*/
if (bits_per_pixel < 8) {
- drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min 8\n",
+ drm_dbg_kms(display->drm,
+ "Unsupported BPP %u, min 8\n",
bits_per_pixel);
return 0;
}
@@ -856,7 +865,7 @@ u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 p
if (bits_per_pixel < valid_dsc_bpp[i + 1])
break;
}
- drm_dbg_kms(&i915->drm, "Set dsc bpp from %d to VESA %d\n",
+ drm_dbg_kms(display->drm, "Set dsc bpp from %d to VESA %d\n",
bits_per_pixel, valid_dsc_bpp[i]);
bits_per_pixel = valid_dsc_bpp[i];
@@ -865,36 +874,72 @@ u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 p
return bits_per_pixel;
}
-static
-u32 get_max_compressed_bpp_with_joiner(struct drm_i915_private *i915,
- u32 mode_clock, u32 mode_hdisplay,
- bool bigjoiner)
+static int bigjoiner_interface_bits(struct intel_display *display)
+{
+ return DISPLAY_VER(display) >= 14 ? 36 : 24;
+}
+
+static u32 bigjoiner_bw_max_bpp(struct intel_display *display, u32 mode_clock,
+ int num_joined_pipes)
+{
+ u32 max_bpp;
+ /* With bigjoiner multiple dsc engines are used in parallel so PPC is 2 */
+ int ppc = 2;
+ int num_big_joiners = num_joined_pipes / 2;
+
+ max_bpp = display->cdclk.max_cdclk_freq * ppc * bigjoiner_interface_bits(display) /
+ intel_dp_mode_to_fec_clock(mode_clock);
+
+ max_bpp *= num_big_joiners;
+
+ return max_bpp;
+
+}
+
+static u32 small_joiner_ram_max_bpp(struct intel_display *display,
+ u32 mode_hdisplay,
+ int num_joined_pipes)
{
- u32 max_bpp_small_joiner_ram;
+ u32 max_bpp;
/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
- max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / mode_hdisplay;
+ max_bpp = small_joiner_ram_size_bits(display) / mode_hdisplay;
- if (bigjoiner) {
- int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24;
- /* With bigjoiner multiple dsc engines are used in parallel so PPC is 2 */
- int ppc = 2;
- u32 max_bpp_bigjoiner =
- i915->display.cdclk.max_cdclk_freq * ppc * bigjoiner_interface_bits /
- intel_dp_mode_to_fec_clock(mode_clock);
+ max_bpp *= num_joined_pipes;
- max_bpp_small_joiner_ram *= 2;
+ return max_bpp;
+}
- return min(max_bpp_small_joiner_ram, max_bpp_bigjoiner);
- }
+static int ultrajoiner_ram_bits(void)
+{
+ return 4 * 72 * 512;
+}
- return max_bpp_small_joiner_ram;
+static u32 ultrajoiner_ram_max_bpp(u32 mode_hdisplay)
+{
+ return ultrajoiner_ram_bits() / mode_hdisplay;
}
-u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,
+static
+u32 get_max_compressed_bpp_with_joiner(struct intel_display *display,
+ u32 mode_clock, u32 mode_hdisplay,
+ int num_joined_pipes)
+{
+ u32 max_bpp = small_joiner_ram_max_bpp(display, mode_hdisplay, num_joined_pipes);
+
+ if (num_joined_pipes > 1)
+ max_bpp = min(max_bpp, bigjoiner_bw_max_bpp(display, mode_clock,
+ num_joined_pipes));
+ if (num_joined_pipes == 4)
+ max_bpp = min(max_bpp, ultrajoiner_ram_max_bpp(mode_hdisplay));
+
+ return max_bpp;
+}
+
+u16 intel_dp_dsc_get_max_compressed_bpp(struct intel_display *display,
u32 link_clock, u32 lane_count,
u32 mode_clock, u32 mode_hdisplay,
- bool bigjoiner,
+ int num_joined_pipes,
enum intel_output_format output_format,
u32 pipe_bpp,
u32 timeslots)
@@ -933,26 +978,26 @@ u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,
if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
bits_per_pixel = min_t(u32, bits_per_pixel, 31);
- drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots "
+ drm_dbg_kms(display->drm, "Max link bpp is %u for %u timeslots "
"total bw %u pixel clock %u\n",
bits_per_pixel, timeslots,
(link_clock * lane_count * 8),
intel_dp_mode_to_fec_clock(mode_clock));
- joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, mode_clock,
- mode_hdisplay, bigjoiner);
+ joiner_max_bpp = get_max_compressed_bpp_with_joiner(display, mode_clock,
+ mode_hdisplay, num_joined_pipes);
bits_per_pixel = min(bits_per_pixel, joiner_max_bpp);
- bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(i915, bits_per_pixel, pipe_bpp);
+ bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(display, bits_per_pixel, pipe_bpp);
return bits_per_pixel;
}
u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int mode_clock, int mode_hdisplay,
- bool bigjoiner)
+ int num_joined_pipes)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
u8 min_slice_count, i;
int max_slice_width;
@@ -967,12 +1012,12 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
* Due to some DSC engine BW limitations, we need to enable second
* slice and VDSC engine, whenever we approach close enough to max CDCLK
*/
- if (mode_clock >= ((i915->display.cdclk.max_cdclk_freq * 85) / 100))
+ if (mode_clock >= ((display->cdclk.max_cdclk_freq * 85) / 100))
min_slice_count = max_t(u8, min_slice_count, 2);
max_slice_width = drm_dp_dsc_sink_max_slice_width(connector->dp.dsc_dpcd);
if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Unsupported slice width %d by DP DSC Sink device\n",
max_slice_width);
return 0;
@@ -984,21 +1029,35 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
/* Find the closest match to the valid slice count values */
for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
- u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner;
+ u8 test_slice_count = valid_dsc_slicecount[i] * num_joined_pipes;
+
+ /*
+ * 3 DSC Slices per pipe need 3 DSC engines,
+ * which is supported only with Ultrajoiner.
+ */
+ if (valid_dsc_slicecount[i] == 3 && num_joined_pipes != 4)
+ continue;
if (test_slice_count >
drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, false))
break;
- /* big joiner needs small joiner to be enabled */
- if (bigjoiner && test_slice_count < 4)
+ /*
+ * Bigjoiner needs small joiner to be enabled.
+ * So there should be at least 2 dsc slices per pipe,
+ * whenever bigjoiner is enabled.
+ */
+ if (num_joined_pipes > 1 && valid_dsc_slicecount[i] < 2)
+ continue;
+
+ if (mode_hdisplay % test_slice_count)
continue;
if (min_slice_count <= test_slice_count)
return test_slice_count;
}
- drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
+ drm_dbg_kms(display->drm, "Unsupported Slice Count %d\n",
min_slice_count);
return 0;
}
@@ -1006,7 +1065,7 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
static bool source_can_output(struct intel_dp *intel_dp,
enum intel_output_format format)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
switch (format) {
case INTEL_OUTPUT_FORMAT_RGB:
@@ -1018,11 +1077,11 @@ static bool source_can_output(struct intel_dp *intel_dp,
* Also, ILK doesn't seem capable of DP YCbCr output.
* The displayed image is severly corrupted. SNB+ is fine.
*/
- return !HAS_GMCH(i915) && !IS_IRONLAKE(i915);
+ return !HAS_GMCH(display) && !display->platform.ironlake;
case INTEL_OUTPUT_FORMAT_YCBCR420:
/* Platform < Gen 11 cannot output YCbCr420 format */
- return DISPLAY_VER(i915) >= 11;
+ return DISPLAY_VER(display) >= 11;
default:
MISSING_CASE(format);
@@ -1082,8 +1141,8 @@ static enum intel_output_format
intel_dp_output_format(struct intel_connector *connector,
enum intel_output_format sink_format)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
enum intel_output_format force_dsc_output_format =
intel_dp->force_dsc_output_format;
enum intel_output_format output_format;
@@ -1094,7 +1153,7 @@ intel_dp_output_format(struct intel_connector *connector,
dfp_can_convert(intel_dp, force_dsc_output_format, sink_format)))
return force_dsc_output_format;
- drm_dbg_kms(&i915->drm, "Cannot force DSC output format\n");
+ drm_dbg_kms(display->drm, "Cannot force DSC output format\n");
}
if (sink_format == INTEL_OUTPUT_FORMAT_RGB ||
@@ -1108,7 +1167,7 @@ intel_dp_output_format(struct intel_connector *connector,
else
output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
- drm_WARN_ON(&i915->drm, !source_can_output(intel_dp, output_format));
+ drm_WARN_ON(display->drm, !source_can_output(intel_dp, output_format));
return output_format;
}
@@ -1159,7 +1218,7 @@ intel_dp_mode_min_output_bpp(struct intel_connector *connector,
return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format));
}
-static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
+static bool intel_dp_hdisplay_bad(struct intel_display *display,
int hdisplay)
{
/*
@@ -1175,7 +1234,7 @@ static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
*
* TODO: confirm the behaviour on HSW+
*/
- return hdisplay == 4096 && !HAS_DDI(dev_priv);
+ return hdisplay == 4096 && !HAS_DDI(display);
}
static int intel_dp_max_tmds_clock(struct intel_dp *intel_dp)
@@ -1270,27 +1329,54 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
return MODE_OK;
}
-bool intel_dp_need_joiner(struct intel_dp *intel_dp,
- struct intel_connector *connector,
- int hdisplay, int clock)
+static
+bool intel_dp_needs_joiner(struct intel_dp *intel_dp,
+ struct intel_connector *connector,
+ int hdisplay, int clock,
+ int num_joined_pipes)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ int hdisplay_limit;
if (!intel_dp_has_joiner(intel_dp))
return false;
- return clock > i915->display.cdclk.max_dotclk_freq || hdisplay > 5120 ||
- connector->force_bigjoiner_enable;
+ num_joined_pipes /= 2;
+
+ hdisplay_limit = DISPLAY_VER(display) >= 30 ? 6144 : 5120;
+
+ return clock > num_joined_pipes * display->cdclk.max_dotclk_freq ||
+ hdisplay > num_joined_pipes * hdisplay_limit;
+}
+
+int intel_dp_num_joined_pipes(struct intel_dp *intel_dp,
+ struct intel_connector *connector,
+ int hdisplay, int clock)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ if (connector->force_joined_pipes)
+ return connector->force_joined_pipes;
+
+ if (HAS_ULTRAJOINER(display) &&
+ intel_dp_needs_joiner(intel_dp, connector, hdisplay, clock, 4))
+ return 4;
+
+ if ((HAS_BIGJOINER(display) || HAS_UNCOMPRESSED_JOINER(display)) &&
+ intel_dp_needs_joiner(intel_dp, connector, hdisplay, clock, 2))
+ return 2;
+
+ return 1;
}
bool intel_dp_has_dsc(const struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
- if (!HAS_DSC(i915))
+ if (!HAS_DSC(display))
return false;
- if (connector->mst_port && !HAS_DSC_MST(i915))
+ if (connector->mst_port && !HAS_DSC_MST(display))
return false;
if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
@@ -1307,17 +1393,19 @@ static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *_connector,
struct drm_display_mode *mode)
{
+ struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
const struct drm_display_mode *fixed_mode;
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
- int max_dotclk = dev_priv->display.cdclk.max_dotclk_freq;
+ int max_dotclk = display->cdclk.max_dotclk_freq;
u16 dsc_max_compressed_bpp = 0;
u8 dsc_slice_count = 0;
enum drm_mode_status status;
- bool dsc = false, joiner = false;
+ bool dsc = false;
+ int num_joined_pipes;
status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
if (status != MODE_OK)
@@ -1338,15 +1426,14 @@ intel_dp_mode_valid(struct drm_connector *_connector,
target_clock = fixed_mode->clock;
}
- if (intel_dp_need_joiner(intel_dp, connector,
- mode->hdisplay, target_clock)) {
- joiner = true;
- max_dotclk *= 2;
- }
+ num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector,
+ mode->hdisplay, target_clock);
+ max_dotclk *= num_joined_pipes;
+
if (target_clock > max_dotclk)
return MODE_CLOCK_HIGH;
- if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
+ if (intel_dp_hdisplay_bad(display, mode->hdisplay))
return MODE_H_ILLEGAL;
max_link_clock = intel_dp_max_link_rate(intel_dp);
@@ -1381,25 +1468,25 @@ intel_dp_mode_valid(struct drm_connector *_connector,
true);
} else if (drm_dp_sink_supports_fec(connector->dp.fec_capability)) {
dsc_max_compressed_bpp =
- intel_dp_dsc_get_max_compressed_bpp(dev_priv,
+ intel_dp_dsc_get_max_compressed_bpp(display,
max_link_clock,
max_lanes,
target_clock,
mode->hdisplay,
- joiner,
+ num_joined_pipes,
output_format,
pipe_bpp, 64);
dsc_slice_count =
intel_dp_dsc_get_slice_count(connector,
target_clock,
mode->hdisplay,
- joiner);
+ num_joined_pipes);
}
dsc = dsc_max_compressed_bpp && dsc_slice_count;
}
- if (intel_dp_joiner_needs_dsc(dev_priv, joiner) && !dsc)
+ if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc)
return MODE_CLOCK_HIGH;
if (mode_rate > max_rate && !dsc)
@@ -1409,54 +1496,46 @@ intel_dp_mode_valid(struct drm_connector *_connector,
if (status != MODE_OK)
return status;
- return intel_mode_valid_max_plane_size(dev_priv, mode, joiner);
+ return intel_mode_valid_max_plane_size(dev_priv, mode, num_joined_pipes);
}
-bool intel_dp_source_supports_tps3(struct drm_i915_private *i915)
+bool intel_dp_source_supports_tps3(struct intel_display *display)
{
- return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915);
+ return DISPLAY_VER(display) >= 9 ||
+ display->platform.broadwell || display->platform.haswell;
}
-bool intel_dp_source_supports_tps4(struct drm_i915_private *i915)
+bool intel_dp_source_supports_tps4(struct intel_display *display)
{
- return DISPLAY_VER(i915) >= 10;
+ return DISPLAY_VER(display) >= 10;
}
-static void snprintf_int_array(char *str, size_t len,
- const int *array, int nelem)
+static void seq_buf_print_array(struct seq_buf *s, const int *array, int nelem)
{
int i;
- str[0] = '\0';
-
- for (i = 0; i < nelem; i++) {
- int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
- if (r >= len)
- return;
- str += r;
- len -= r;
- }
+ for (i = 0; i < nelem; i++)
+ seq_buf_printf(s, "%s%d", i ? ", " : "", array[i]);
}
static void intel_dp_print_rates(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- char str[128]; /* FIXME: too big for stack? */
+ struct intel_display *display = to_intel_display(intel_dp);
+ DECLARE_SEQ_BUF(s, 128); /* FIXME: too big for stack? */
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- snprintf_int_array(str, sizeof(str),
- intel_dp->source_rates, intel_dp->num_source_rates);
- drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
+ seq_buf_print_array(&s, intel_dp->source_rates, intel_dp->num_source_rates);
+ drm_dbg_kms(display->drm, "source rates: %s\n", seq_buf_str(&s));
- snprintf_int_array(str, sizeof(str),
- intel_dp->sink_rates, intel_dp->num_sink_rates);
- drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
+ seq_buf_clear(&s);
+ seq_buf_print_array(&s, intel_dp->sink_rates, intel_dp->num_sink_rates);
+ drm_dbg_kms(display->drm, "sink rates: %s\n", seq_buf_str(&s));
- snprintf_int_array(str, sizeof(str),
- intel_dp->common_rates, intel_dp->num_common_rates);
- drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
+ seq_buf_clear(&s);
+ seq_buf_print_array(&s, intel_dp->common_rates, intel_dp->num_common_rates);
+ drm_dbg_kms(display->drm, "common rates: %s\n", seq_buf_str(&s));
}
static int forced_link_rate(struct intel_dp *intel_dp)
@@ -1493,11 +1572,11 @@ intel_dp_min_link_rate(struct intel_dp *intel_dp)
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int i = intel_dp_rate_index(intel_dp->sink_rates,
intel_dp->num_sink_rates, rate);
- if (drm_WARN_ON(&i915->drm, i < 0))
+ if (drm_WARN_ON(display->drm, i < 0))
i = 0;
return i;
@@ -1527,13 +1606,13 @@ bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp)
static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
const struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return true;
- if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
+ if (DISPLAY_VER(display) == 11 && encoder->port != PORT_A &&
!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
return true;
@@ -1548,13 +1627,15 @@ bool intel_dp_supports_fec(struct intel_dp *intel_dp,
drm_dp_sink_supports_fec(connector->dp.fec_capability);
}
-bool intel_dp_supports_dsc(const struct intel_connector *connector,
+bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
const struct intel_crtc_state *crtc_state)
{
if (!intel_dp_has_dsc(connector))
return false;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable)
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) &&
+ !intel_dp_supports_fec(intel_dp, connector, crtc_state))
return false;
return intel_dsc_source_support(crtc_state);
@@ -1596,8 +1677,8 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool respect_downstream_limits)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
int bpp, bpc;
bpc = crtc_state->pipe_bpp / 3;
@@ -1619,67 +1700,28 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp,
bpp = bpc * 3;
if (intel_dp_is_edp(intel_dp)) {
/* Get bpp from vbt only for panels that dont have bpp in edid */
- if (intel_connector->base.display_info.bpc == 0 &&
- intel_connector->panel.vbt.edp.bpp &&
- intel_connector->panel.vbt.edp.bpp < bpp) {
- drm_dbg_kms(&dev_priv->drm,
+ if (connector->base.display_info.bpc == 0 &&
+ connector->panel.vbt.edp.bpp &&
+ connector->panel.vbt.edp.bpp < bpp) {
+ drm_dbg_kms(display->drm,
"clamping bpp for eDP panel to BIOS-provided %i\n",
- intel_connector->panel.vbt.edp.bpp);
- bpp = intel_connector->panel.vbt.edp.bpp;
+ connector->panel.vbt.edp.bpp);
+ bpp = connector->panel.vbt.edp.bpp;
}
}
return bpp;
}
-/* Adjust link config limits based on compliance test requests. */
-void
-intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config,
- struct link_config_limits *limits)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-
- /* For DP Compliance we override the computed bpp for the pipe */
- if (intel_dp->compliance.test_data.bpc != 0) {
- int bpp = 3 * intel_dp->compliance.test_data.bpc;
-
- limits->pipe.min_bpp = limits->pipe.max_bpp = bpp;
- pipe_config->dither_force_disable = bpp == 6 * 3;
-
- drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
- }
-
- /* Use values requested by Compliance Test Request */
- if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
- int index;
-
- /* Validate the compliance test data since max values
- * might have changed due to link train fallback.
- */
- if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
- intel_dp->compliance.test_lane_count)) {
- index = intel_dp_rate_index(intel_dp->common_rates,
- intel_dp->num_common_rates,
- intel_dp->compliance.test_link_rate);
- if (index >= 0)
- limits->min_rate = limits->max_rate =
- intel_dp->compliance.test_link_rate;
- limits->min_lane_count = limits->max_lane_count =
- intel_dp->compliance.test_lane_count;
- }
- }
-}
-
static bool has_seamless_m_n(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
/*
* Seamless M/N reprogramming only implemented
* for BDW+ double buffered M/N registers so far.
*/
- return HAS_DOUBLE_BUFFERED_M_N(i915) &&
+ return HAS_DOUBLE_BUFFERED_M_N(display) &&
intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS;
}
@@ -1741,32 +1783,31 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return -EINVAL;
}
-static
-u8 intel_dp_dsc_max_src_input_bpc(struct drm_i915_private *i915)
+int intel_dp_dsc_max_src_input_bpc(struct intel_display *display)
{
/* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
return 12;
- if (DISPLAY_VER(i915) == 11)
+ if (DISPLAY_VER(display) == 11)
return 10;
- return 0;
+ return intel_dp_dsc_min_src_input_bpc();
}
int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
u8 max_req_bpc)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
int i, num_bpc;
u8 dsc_bpc[3] = {};
- u8 dsc_max_bpc;
+ int dsc_max_bpc;
- dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915);
+ dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(display);
if (!dsc_max_bpc)
return dsc_max_bpc;
- dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc);
+ dsc_max_bpc = min(dsc_max_bpc, max_req_bpc);
num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd,
dsc_bpc);
@@ -1778,9 +1819,9 @@ int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
return 0;
}
-static int intel_dp_source_dsc_version_minor(struct drm_i915_private *i915)
+static int intel_dp_source_dsc_version_minor(struct intel_display *display)
{
- return DISPLAY_VER(i915) >= 14 ? 2 : 1;
+ return DISPLAY_VER(display) >= 14 ? 2 : 1;
}
static int intel_dp_sink_dsc_version_minor(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
@@ -1814,7 +1855,7 @@ static int intel_dp_get_slice_height(int vactive)
static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
int ret;
@@ -1837,7 +1878,7 @@ static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
(connector->dp.dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
vdsc_cfg->dsc_version_minor =
- min(intel_dp_source_dsc_version_minor(i915),
+ min(intel_dp_source_dsc_version_minor(display),
intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd));
if (vdsc_cfg->convert_rgb)
vdsc_cfg->convert_rgb =
@@ -1847,7 +1888,7 @@ static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
vdsc_cfg->line_buf_depth = min(INTEL_DP_DSC_MAX_LINE_BUF_DEPTH,
drm_dp_dsc_sink_line_buf_depth(connector->dp.dsc_dpcd));
if (!vdsc_cfg->line_buf_depth) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"DSC Sink Line Buffer Depth invalid\n");
return -EINVAL;
}
@@ -1862,7 +1903,7 @@ static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
static bool intel_dp_dsc_supports_format(const struct intel_connector *connector,
enum intel_output_format output_format)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
u8 sink_dsc_format;
switch (output_format) {
@@ -1873,7 +1914,7 @@ static bool intel_dp_dsc_supports_format(const struct intel_connector *connector
sink_dsc_format = DP_DSC_YCbCr444;
break;
case INTEL_OUTPUT_FORMAT_YCBCR420:
- if (min(intel_dp_source_dsc_version_minor(i915),
+ if (min(intel_dp_source_dsc_version_minor(display),
intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd)) < 2)
return false;
sink_dsc_format = DP_DSC_YCbCr420_Native;
@@ -1934,7 +1975,7 @@ static int dsc_compute_link_config(struct intel_dp *intel_dp,
static
u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connector,
- struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *pipe_config,
int bpc)
{
u16 max_bppx16 = drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd);
@@ -1959,7 +2000,7 @@ u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connec
return 0;
}
-int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
+int intel_dp_dsc_sink_min_compressed_bpp(const struct intel_crtc_state *pipe_config)
{
/* From Mandatory bit rate range Support Table 2-157 (DP v2.0) */
switch (pipe_config->output_format) {
@@ -1977,7 +2018,7 @@ int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
}
int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
- struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *pipe_config,
int bpc)
{
return intel_dp_dsc_max_sink_compressed_bppx16(connector,
@@ -1992,13 +2033,22 @@ static int dsc_src_min_compressed_bpp(void)
static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ /*
+ * Forcing DSC and using the platform's max compressed bpp is seen to cause
+ * underruns. Since DSC isn't needed in these cases, limit the
+ * max compressed bpp to 18, which is a safe value across platforms with different
+ * pipe bpps.
+ */
+ if (intel_dp->force_dsc_en)
+ return 18;
/*
* Max Compressed bpp for Gen 13+ is 27bpp.
* For earlier platform is 23bpp. (Bspec:49259).
*/
- if (DISPLAY_VER(i915) < 13)
+ if (DISPLAY_VER(display) < 13)
return 23;
else
return 27;
@@ -2022,11 +2072,10 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
/* Compressed BPP should be less than the Input DSC bpp */
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
- for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
- if (valid_dsc_bpp[i] < dsc_min_bpp)
+ for (i = ARRAY_SIZE(valid_dsc_bpp) - 1; i >= 0; i--) {
+ if (valid_dsc_bpp[i] < dsc_min_bpp ||
+ valid_dsc_bpp[i] > dsc_max_bpp)
continue;
- if (valid_dsc_bpp[i] > dsc_max_bpp)
- break;
ret = dsc_compute_link_config(intel_dp,
pipe_config,
@@ -2059,13 +2108,13 @@ xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
int pipe_bpp,
int timeslots)
{
+ struct intel_display *display = to_intel_display(intel_dp);
u8 bppx16_incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u16 compressed_bppx16;
u8 bppx16_step;
int ret;
- if (DISPLAY_VER(i915) < 14 || bppx16_incr <= 1)
+ if (DISPLAY_VER(display) < 14 || bppx16_incr <= 1)
bppx16_step = 16;
else
bppx16_step = 16 / bppx16_incr;
@@ -2089,7 +2138,8 @@ xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16;
if (intel_dp->force_dsc_fractional_bpp_en &&
fxp_q4_to_frac(compressed_bppx16))
- drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n");
+ drm_dbg_kms(display->drm,
+ "Forcing DSC fractional bpp\n");
return 0;
}
@@ -2104,67 +2154,46 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
int pipe_bpp,
int timeslots)
{
+ struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
- int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
+ int dsc_min_bpp;
+ int dsc_max_bpp;
int dsc_joiner_max_bpp;
+ int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config);
- dsc_src_min_bpp = dsc_src_min_compressed_bpp();
- dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
- dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
- dsc_min_bpp = max(dsc_min_bpp, fxp_q4_to_int_roundup(limits->link.min_bpp_x16));
+ dsc_min_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16);
- dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
- dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
- pipe_config,
- pipe_bpp / 3);
- dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
-
- dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, adjusted_mode->clock,
+ dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(display, adjusted_mode->clock,
adjusted_mode->hdisplay,
- pipe_config->joiner_pipes);
- dsc_max_bpp = min(dsc_max_bpp, dsc_joiner_max_bpp);
- dsc_max_bpp = min(dsc_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16));
+ num_joined_pipes);
+ dsc_max_bpp = min(dsc_joiner_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16));
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(display) >= 13)
return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits,
dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
return icl_dsc_compute_link_config(intel_dp, pipe_config, limits,
dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
}
-static
-u8 intel_dp_dsc_min_src_input_bpc(struct drm_i915_private *i915)
+int intel_dp_dsc_min_src_input_bpc(void)
{
/* Min DSC Input BPC for ICL+ is 8 */
- return HAS_DSC(i915) ? 8 : 0;
+ return 8;
}
static
-bool is_dsc_pipe_bpp_sufficient(struct drm_i915_private *i915,
- struct drm_connector_state *conn_state,
- struct link_config_limits *limits,
+bool is_dsc_pipe_bpp_sufficient(struct link_config_limits *limits,
int pipe_bpp)
{
- u8 dsc_max_bpc, dsc_min_bpc, dsc_max_pipe_bpp, dsc_min_pipe_bpp;
-
- dsc_max_bpc = min(intel_dp_dsc_max_src_input_bpc(i915), conn_state->max_requested_bpc);
- dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915);
-
- dsc_max_pipe_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp);
- dsc_min_pipe_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp);
-
- return pipe_bpp >= dsc_min_pipe_bpp &&
- pipe_bpp <= dsc_max_pipe_bpp;
+ return pipe_bpp >= limits->pipe.min_bpp &&
+ pipe_bpp <= limits->pipe.max_bpp;
}
static
int intel_dp_force_dsc_pipe_bpp(struct intel_dp *intel_dp,
- struct drm_connector_state *conn_state,
struct link_config_limits *limits)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int forced_bpp;
if (!intel_dp->force_dsc_bpc)
@@ -2172,12 +2201,14 @@ int intel_dp_force_dsc_pipe_bpp(struct intel_dp *intel_dp,
forced_bpp = intel_dp->force_dsc_bpc * 3;
- if (is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, forced_bpp)) {
- drm_dbg_kms(&i915->drm, "Input DSC BPC forced to %d\n", intel_dp->force_dsc_bpc);
+ if (is_dsc_pipe_bpp_sufficient(limits, forced_bpp)) {
+ drm_dbg_kms(display->drm, "Input DSC BPC forced to %d\n",
+ intel_dp->force_dsc_bpc);
return forced_bpp;
}
- drm_dbg_kms(&i915->drm, "Cannot force DSC BPC:%d, due to DSC BPC limits\n",
+ drm_dbg_kms(display->drm,
+ "Cannot force DSC BPC:%d, due to DSC BPC limits\n",
intel_dp->force_dsc_bpc);
return 0;
@@ -2189,17 +2220,15 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
struct link_config_limits *limits,
int timeslots)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
const struct intel_connector *connector =
to_intel_connector(conn_state->connector);
- u8 max_req_bpc = conn_state->max_requested_bpc;
- u8 dsc_max_bpc, dsc_max_bpp;
- u8 dsc_min_bpc, dsc_min_bpp;
+ int dsc_max_bpp;
+ int dsc_min_bpp;
u8 dsc_bpc[3] = {};
int forced_bpp, pipe_bpp;
int num_bpc, i, ret;
- forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits);
+ forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, limits);
if (forced_bpp) {
ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config,
@@ -2210,15 +2239,8 @@ static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
}
}
- dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915);
- if (!dsc_max_bpc)
- return -EINVAL;
-
- dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc);
- dsc_max_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp);
-
- dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915);
- dsc_min_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp);
+ dsc_max_bpp = limits->pipe.max_bpp;
+ dsc_min_bpp = limits->pipe.min_bpp;
/*
* Get the maximum DSC bpc that will be supported by any valid
@@ -2247,24 +2269,24 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
struct drm_connector_state *conn_state,
struct link_config_limits *limits)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
int pipe_bpp, forced_bpp;
- int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
- int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
+ int dsc_min_bpp;
+ int dsc_max_bpp;
- forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits);
+ forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, limits);
if (forced_bpp) {
pipe_bpp = forced_bpp;
} else {
- int max_bpc = min(limits->pipe.max_bpp / 3, (int)conn_state->max_requested_bpc);
+ int max_bpc = limits->pipe.max_bpp / 3;
/* For eDP use max bpp that can be supported with DSC. */
pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, max_bpc);
- if (!is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, pipe_bpp)) {
- drm_dbg_kms(&i915->drm,
+ if (!is_dsc_pipe_bpp_sufficient(limits, pipe_bpp)) {
+ drm_dbg_kms(display->drm,
"Computed BPC is not in DSC BPC limits\n");
return -EINVAL;
}
@@ -2272,17 +2294,9 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
pipe_config->port_clock = limits->max_rate;
pipe_config->lane_count = limits->max_lane_count;
- dsc_src_min_bpp = dsc_src_min_compressed_bpp();
- dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
- dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
- dsc_min_bpp = max(dsc_min_bpp, fxp_q4_to_int_roundup(limits->link.min_bpp_x16));
+ dsc_min_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16);
- dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
- dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
- pipe_config,
- pipe_bpp / 3);
- dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
- dsc_max_bpp = min(dsc_max_bpp, fxp_q4_to_int(limits->link.max_bpp_x16));
+ dsc_max_bpp = fxp_q4_to_int(limits->link.max_bpp_x16);
/* Compressed BPP should be less than the Input DSC bpp */
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
@@ -2295,6 +2309,26 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
return 0;
}
+static void intel_dp_fec_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->fec_enable)
+ return;
+
+ /*
+ * Though eDP v1.5 supports FEC with DSC, unlike DP, it is optional.
+ * Since, FEC is a bandwidth overhead, continue to not enable it for
+ * eDP. Until, there is a good reason to do so.
+ */
+ if (intel_dp_is_edp(intel_dp))
+ return;
+
+ if (intel_dp_is_uhbr(crtc_state))
+ return;
+
+ crtc_state->fec_enable = true;
+}
+
int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
@@ -2302,20 +2336,15 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
int timeslots,
bool compute_pipe_bpp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(intel_dp);
const struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
+ int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config);
int ret;
- pipe_config->fec_enable = pipe_config->fec_enable ||
- (!intel_dp_is_edp(intel_dp) &&
- intel_dp_supports_fec(intel_dp, connector, pipe_config));
-
- if (!intel_dp_supports_dsc(connector, pipe_config))
- return -EINVAL;
+ intel_dp_fec_compute_config(intel_dp, pipe_config);
if (!intel_dp_dsc_supports_format(connector, pipe_config->output_format))
return -EINVAL;
@@ -2334,7 +2363,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
ret = intel_dp_dsc_compute_pipe_bpp(intel_dp, pipe_config,
conn_state, limits, timeslots);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"No Valid pipe bpp for given mode ret = %d\n", ret);
return ret;
}
@@ -2346,7 +2375,8 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd,
true);
if (!pipe_config->dsc.slice_count) {
- drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n",
+ drm_dbg_kms(display->drm,
+ "Unsupported Slice Count %d\n",
pipe_config->dsc.slice_count);
return -EINVAL;
}
@@ -2357,9 +2387,9 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
intel_dp_dsc_get_slice_count(connector,
adjusted_mode->crtc_clock,
adjusted_mode->crtc_hdisplay,
- pipe_config->joiner_pipes);
+ num_joined_pipes);
if (!dsc_dp_slice_count) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Compressed Slice Count not supported\n");
return -EINVAL;
}
@@ -2370,13 +2400,20 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
* VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
* is greater than the maximum Cdclock and if slice count is even
* then we need to use 2 VDSC instances.
+ * In case of Ultrajoiner along with 12 slices we need to use 3
+ * VDSC instances.
*/
- if (pipe_config->joiner_pipes || pipe_config->dsc.slice_count > 1)
- pipe_config->dsc.dsc_split = true;
+ if (pipe_config->joiner_pipes && num_joined_pipes == 4 &&
+ pipe_config->dsc.slice_count == 12)
+ pipe_config->dsc.num_streams = 3;
+ else if (pipe_config->joiner_pipes || pipe_config->dsc.slice_count > 1)
+ pipe_config->dsc.num_streams = 2;
+ else
+ pipe_config->dsc.num_streams = 1;
ret = intel_dp_dsc_compute_params(connector, pipe_config);
if (ret < 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Cannot compute valid DSC parameters for Input Bpp = %d"
"Compressed BPP = " FXP_Q4_FMT "\n",
pipe_config->pipe_bpp,
@@ -2385,7 +2422,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
}
pipe_config->dsc.compression_enable = true;
- drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
+ drm_dbg_kms(display->drm, "DP DSC computed with Input Bpp = %d "
"Compressed Bpp = " FXP_Q4_FMT " Slice Count = %d\n",
pipe_config->pipe_bpp,
FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16),
@@ -2394,25 +2431,18 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
return 0;
}
-/**
- * intel_dp_compute_config_link_bpp_limits - compute output link bpp limits
- * @intel_dp: intel DP
- * @crtc_state: crtc state
- * @dsc: DSC compression mode
- * @limits: link configuration limits
- *
- * Calculates the output link min, max bpp values in @limits based on the
- * pipe bpp range, @crtc_state and @dsc mode.
- *
- * Returns %true in case of success.
+/*
+ * Calculate the output link min, max bpp values in limits based on the pipe bpp
+ * range, crtc_state and dsc mode. Return true on success.
*/
-bool
+static bool
intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
const struct intel_crtc_state *crtc_state,
bool dsc,
struct link_config_limits *limits)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -2430,22 +2460,32 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
limits->link.min_bpp_x16 = fxp_q4_from_int(limits->pipe.min_bpp);
} else {
- /*
- * TODO: set the DSC link limits already here, atm these are
- * initialized only later in intel_edp_dsc_compute_pipe_bpp() /
- * intel_dp_dsc_compute_pipe_bpp()
- */
- limits->link.min_bpp_x16 = 0;
+ int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
+ int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
+
+ dsc_src_min_bpp = dsc_src_min_compressed_bpp();
+ dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
+ dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
+ limits->link.min_bpp_x16 = fxp_q4_from_int(dsc_min_bpp);
+
+ dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
+ dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
+ crtc_state,
+ limits->pipe.max_bpp / 3);
+ dsc_max_bpp = dsc_sink_max_bpp ?
+ min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
+
+ max_link_bpp_x16 = min(max_link_bpp_x16, fxp_q4_from_int(dsc_max_bpp));
}
limits->link.max_bpp_x16 = max_link_bpp_x16;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d max link_bpp " FXP_Q4_FMT "\n",
encoder->base.base.id, encoder->base.name,
crtc->base.base.id, crtc->base.name,
adjusted_mode->crtc_clock,
- dsc ? "on" : "off",
+ str_on_off(dsc),
limits->max_lane_count,
limits->max_rate,
limits->pipe.max_bpp,
@@ -2454,29 +2494,62 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
return true;
}
-static bool
+static void
+intel_dp_dsc_compute_pipe_bpp_limits(struct intel_dp *intel_dp,
+ struct link_config_limits *limits)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ int dsc_min_bpc = intel_dp_dsc_min_src_input_bpc();
+ int dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(display);
+
+ limits->pipe.max_bpp = clamp(limits->pipe.max_bpp, dsc_min_bpc * 3, dsc_max_bpc * 3);
+ limits->pipe.min_bpp = clamp(limits->pipe.min_bpp, dsc_min_bpc * 3, dsc_max_bpc * 3);
+}
+
+bool
intel_dp_compute_config_limits(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits,
bool dsc,
struct link_config_limits *limits)
{
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+
limits->min_rate = intel_dp_min_link_rate(intel_dp);
limits->max_rate = intel_dp_max_link_rate(intel_dp);
- /* FIXME 128b/132b SST support missing */
- limits->max_rate = min(limits->max_rate, 810000);
+ /* FIXME 128b/132b SST+DSC support missing */
+ if (!is_mst && dsc)
+ limits->max_rate = min(limits->max_rate, 810000);
limits->min_rate = min(limits->min_rate, limits->max_rate);
limits->min_lane_count = intel_dp_min_lane_count(intel_dp);
limits->max_lane_count = intel_dp_max_lane_count(intel_dp);
limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format);
- limits->pipe.max_bpp = intel_dp_max_bpp(intel_dp, crtc_state,
- respect_downstream_limits);
+ if (is_mst) {
+ /*
+ * FIXME: If all the streams can't fit into the link with their
+ * current pipe_bpp we should reduce pipe_bpp across the board
+ * until things start to fit. Until then we limit to <= 8bpc
+ * since that's what was hardcoded for all MST streams
+ * previously. This hack should be removed once we have the
+ * proper retry logic in place.
+ */
+ limits->pipe.max_bpp = min(crtc_state->pipe_bpp, 24);
+ } else {
+ limits->pipe.max_bpp = intel_dp_max_bpp(intel_dp, crtc_state,
+ respect_downstream_limits);
+ }
+
+ if (dsc)
+ intel_dp_dsc_compute_pipe_bpp_limits(intel_dp, limits);
- if (intel_dp->use_max_params) {
+ if (is_mst || intel_dp->use_max_params) {
/*
+ * For MST we always configure max link bw - the spec doesn't
+ * seem to suggest we should do otherwise.
+ *
* Use the maximum clock and number of lanes the eDP panel
* advertizes being capable of in case the initial fast
* optimal params failed us. The panels are generally
@@ -2488,9 +2561,10 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
limits->min_rate = limits->max_rate;
}
- intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits);
+ intel_dp_test_compute_config(intel_dp, crtc_state, limits);
return intel_dp_compute_config_link_bpp_limits(intel_dp,
+ intel_dp->attached_connector,
crtc_state,
dsc,
limits);
@@ -2507,14 +2581,17 @@ int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state)
return intel_dp_link_required(adjusted_mode->crtc_clock, bpp);
}
-bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner)
+bool intel_dp_joiner_needs_dsc(struct intel_display *display,
+ int num_joined_pipes)
{
/*
* Pipe joiner needs compression up to display 12 due to bandwidth
* limitation. DG2 onwards pipe joiner can be enabled without
* compression.
+ * Ultrajoiner always needs compression.
*/
- return DISPLAY_VER(i915) < 13 && use_joiner;
+ return (!HAS_UNCOMPRESSED_JOINER(display) && num_joined_pipes == 2) ||
+ num_joined_pipes == 4;
}
static int
@@ -2523,7 +2600,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state,
bool respect_downstream_limits)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
@@ -2532,18 +2609,20 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct link_config_limits limits;
bool dsc_needed, joiner_needs_dsc;
+ int num_joined_pipes;
int ret = 0;
if (pipe_config->fec_enable &&
!intel_dp_supports_fec(intel_dp, connector, pipe_config))
return -EINVAL;
- if (intel_dp_need_joiner(intel_dp, connector,
- adjusted_mode->crtc_hdisplay,
- adjusted_mode->crtc_clock))
- pipe_config->joiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
+ num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector,
+ adjusted_mode->crtc_hdisplay,
+ adjusted_mode->crtc_clock);
+ if (num_joined_pipes > 1)
+ pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1, crtc->pipe);
- joiner_needs_dsc = intel_dp_joiner_needs_dsc(i915, pipe_config->joiner_pipes);
+ joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
!intel_dp_compute_config_limits(intel_dp, pipe_config,
@@ -2558,12 +2637,25 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
*/
ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
conn_state, &limits);
+ if (!ret && intel_dp_is_uhbr(pipe_config))
+ ret = intel_dp_mtp_tu_compute_config(intel_dp,
+ pipe_config,
+ pipe_config->pipe_bpp,
+ pipe_config->pipe_bpp,
+ conn_state,
+ 0, false);
if (ret)
dsc_needed = true;
}
+ if (dsc_needed && !intel_dp_supports_dsc(intel_dp, connector, pipe_config)) {
+ drm_dbg_kms(display->drm, "DSC required but not available\n");
+ return -EINVAL;
+ }
+
if (dsc_needed) {
- drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
+ drm_dbg_kms(display->drm,
+ "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
@@ -2579,7 +2671,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return ret;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"DP lane count %d clock %d bpp input %d compressed " FXP_Q4_FMT " link rate required %d available %d\n",
pipe_config->lane_count, pipe_config->port_clock,
pipe_config->pipe_bpp,
@@ -2625,12 +2717,11 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
}
}
-static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
- enum port port)
+static bool intel_dp_port_has_audio(struct intel_display *display, enum port port)
{
- if (IS_G4X(dev_priv))
+ if (display->platform.g4x)
return false;
- if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A)
+ if (DISPLAY_VER(display) < 12 && port == PORT_A)
return false;
return true;
@@ -2640,8 +2731,7 @@ static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc
const struct drm_connector_state *conn_state,
struct drm_dp_vsc_sdp *vsc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc_state);
if (crtc_state->has_panel_replay) {
/*
@@ -2718,7 +2808,7 @@ static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc
vsc->bpc = crtc_state->pipe_bpp / 3;
/* only RGB pixelformat supports 6 bpc */
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
/* all YCbCr are always limited range */
@@ -2738,11 +2828,9 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
- /* Currently only DP_AS_SDP_AVT_FIXED_VTOTAL mode supported */
as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC;
as_sdp->length = 0x9;
as_sdp->duration_incr_ms = 0;
- as_sdp->duration_incr_ms = 0;
if (crtc_state->cmrr.enable) {
as_sdp->mode = DP_AS_SDP_FAVT_TRR_REACHED;
@@ -2750,7 +2838,7 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
as_sdp->target_rr = drm_mode_vrefresh(adjusted_mode);
as_sdp->target_rr_divider = true;
} else {
- as_sdp->mode = DP_AS_SDP_AVT_FIXED_VTOTAL;
+ as_sdp->mode = DP_AS_SDP_AVT_DYNAMIC_VTOTAL;
as_sdp->vtotal = adjusted_mode->vtotal;
as_sdp->target_rr = 0;
}
@@ -2809,8 +2897,8 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
int ret;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
if (!conn_state->hdr_output_metadata)
@@ -2819,7 +2907,8 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
if (ret) {
- drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
+ drm_dbg_kms(display->drm,
+ "couldn't set HDR metadata in infoframe\n");
return;
}
@@ -2861,6 +2950,7 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
struct intel_crtc_state *pipe_config,
int link_bpp_x16)
{
+ struct intel_display *display = to_intel_display(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *downclock_mode =
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
@@ -2879,7 +2969,8 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
return;
}
- if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915))
+ if (display->platform.ironlake || display->platform.sandybridge ||
+ display->platform.ivybridge)
pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay;
pipe_config->has_drrs = true;
@@ -2901,13 +2992,13 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
static bool intel_dp_has_audio(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
- if (!intel_dp_port_has_audio(i915, encoder->port))
+ if (!intel_dp_port_has_audio(display, encoder->port))
return false;
if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
@@ -2922,7 +3013,7 @@ intel_dp_compute_output_format(struct intel_encoder *encoder,
struct drm_connector_state *conn_state,
bool respect_downstream_limits)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_connector *connector = intel_dp->attached_connector;
const struct drm_display_info *info = &connector->base.display_info;
@@ -2933,7 +3024,7 @@ intel_dp_compute_output_format(struct intel_encoder *encoder,
ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode);
if (ycbcr_420_only && !connector->base.ycbcr_420_allowed) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB;
} else {
@@ -3017,7 +3108,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -3025,9 +3116,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_connector *connector = intel_dp->attached_connector;
int ret = 0, link_bpp_x16;
- if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
- pipe_config->has_pch_encoder = true;
-
fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode);
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
ret = intel_panel_compute_config(connector, adjusted_mode);
@@ -3045,7 +3133,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return -EINVAL;
- if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
+ if (intel_dp_hdisplay_bad(display, adjusted_mode->crtc_hdisplay))
return -EINVAL;
/*
@@ -3068,8 +3156,13 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
- pipe_config->enhanced_framing =
- drm_dp_enhanced_frame_cap(intel_dp->dpcd);
+ if (intel_dp_is_uhbr(pipe_config)) {
+ /* 128b/132b SST also needs this */
+ pipe_config->mst_master_transcoder = pipe_config->cpu_transcoder;
+ } else {
+ pipe_config->enhanced_framing =
+ drm_dp_enhanced_frame_cap(intel_dp->dpcd);
+ }
if (pipe_config->dsc.compression_enable)
link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16;
@@ -3085,7 +3178,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->splitter.link_count = n;
pipe_config->splitter.pixel_overlap = overlap;
- drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n",
+ drm_dbg_kms(display->drm,
+ "MSO link count %d, pixel overlap %d\n",
n, overlap);
adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap;
@@ -3099,20 +3193,19 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
- intel_link_compute_m_n(link_bpp_x16,
- pipe_config->lane_count,
- adjusted_mode->crtc_clock,
- pipe_config->port_clock,
- intel_dp_bw_fec_overhead(pipe_config->fec_enable),
- &pipe_config->dp_m_n);
+ if (!intel_dp_is_uhbr(pipe_config)) {
+ intel_link_compute_m_n(link_bpp_x16,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ intel_dp_bw_fec_overhead(pipe_config->fec_enable),
+ &pipe_config->dp_m_n);
+ }
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count;
- if (!HAS_DDI(dev_priv))
- g4x_dp_set_clock(encoder, pipe_config);
-
intel_vrr_compute_config(pipe_config, conn_state);
intel_dp_compute_as_sdp(intel_dp, pipe_config);
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
@@ -3149,13 +3242,13 @@ void intel_dp_reset_link_params(struct intel_dp *intel_dp)
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!intel_dp_is_edp(intel_dp))
return;
- drm_dbg_kms(&i915->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_backlight_enable(crtc_state, conn_state);
intel_pps_backlight_on(intel_dp);
@@ -3165,12 +3258,12 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (!intel_dp_is_edp(intel_dp))
return;
- drm_dbg_kms(&i915->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
intel_pps_backlight_off(intel_dp);
intel_backlight_disable(old_conn_state);
@@ -3213,11 +3306,11 @@ static void
intel_dp_sink_set_dsc_decompression(struct intel_connector *connector,
bool enable)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
if (write_dsc_decompression_flag(connector->dp.dsc_decompression_aux,
DP_DECOMPRESSION_EN, enable) < 0)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to %s sink decompression state\n",
str_enable_disable(enable));
}
@@ -3226,7 +3319,7 @@ static void
intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector,
bool enable)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_dp_aux *aux = connector->port ?
connector->port->passthrough_aux : NULL;
@@ -3235,7 +3328,7 @@ intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector,
if (write_dsc_decompression_flag(aux,
DP_DSC_PASSTHROUGH_EN, enable) < 0)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to %s sink compression passthrough state\n",
str_enable_disable(enable));
}
@@ -3244,7 +3337,7 @@ static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state,
const struct intel_connector *connector,
bool for_get_ref)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct drm_connector *_connector_iter;
struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
@@ -3269,7 +3362,7 @@ static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state,
if (!connector_iter->dp.dsc_decompression_enabled)
continue;
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
(for_get_ref && !new_conn_state->crtc) ||
(!for_get_ref && !old_conn_state->crtc));
@@ -3316,12 +3409,12 @@ void intel_dp_sink_enable_decompression(struct intel_atomic_state *state,
struct intel_connector *connector,
const struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
if (!new_crtc_state->dsc.compression_enable)
return;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
!connector->dp.dsc_decompression_aux ||
connector->dp.dsc_decompression_enabled))
return;
@@ -3347,12 +3440,12 @@ void intel_dp_sink_disable_decompression(struct intel_atomic_state *state,
struct intel_connector *connector,
const struct intel_crtc_state *old_crtc_state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
if (!old_crtc_state->dsc.compression_enable)
return;
- if (drm_WARN_ON(&i915->drm,
+ if (drm_WARN_ON(display->drm,
!connector->dp.dsc_decompression_aux ||
!connector->dp.dsc_decompression_enabled))
return;
@@ -3365,36 +3458,50 @@ void intel_dp_sink_disable_decompression(struct intel_atomic_state *state,
}
static void
-intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
+intel_dp_init_source_oui(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 oui[] = { 0x00, 0xaa, 0x01 };
u8 buf[3] = {};
+ if (READ_ONCE(intel_dp->oui_valid))
+ return;
+
+ WRITE_ONCE(intel_dp->oui_valid, true);
+
/*
* During driver init, we want to be careful and avoid changing the source OUI if it's
* already set to what we want, so as to avoid clearing any state by accident
*/
- if (careful) {
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
- drm_err(&i915->drm, "Failed to read source OUI\n");
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
+ drm_dbg_kms(display->drm, "Failed to read source OUI\n");
- if (memcmp(oui, buf, sizeof(oui)) == 0)
- return;
+ if (memcmp(oui, buf, sizeof(oui)) == 0) {
+ /* Assume the OUI was written now. */
+ intel_dp->last_oui_write = jiffies;
+ return;
}
- if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
- drm_err(&i915->drm, "Failed to write source OUI\n");
+ if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) {
+ drm_dbg_kms(display->drm, "Failed to write source OUI\n");
+ WRITE_ONCE(intel_dp->oui_valid, false);
+ }
intel_dp->last_oui_write = jiffies;
}
+void intel_dp_invalidate_source_oui(struct intel_dp *intel_dp)
+{
+ WRITE_ONCE(intel_dp->oui_valid, false);
+}
+
void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n",
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n",
connector->base.base.id, connector->base.name,
connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout);
@@ -3405,8 +3512,8 @@ void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
/* If the device supports it, try to set the power state appropriately */
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
int ret, i;
/* Should have a valid DPCD by this point */
@@ -3424,8 +3531,7 @@ void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
lspcon_resume(dp_to_dig_port(intel_dp));
/* Write the source OUI as early as possible */
- if (intel_dp_is_edp(intel_dp))
- intel_edp_init_source_oui(intel_dp, false);
+ intel_dp_init_source_oui(intel_dp);
/*
* When turning on, we need to retry for 1ms to give the sink
@@ -3443,7 +3549,8 @@ void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
}
if (ret != 1)
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Set power to %s failed\n",
encoder->base.base.id, encoder->base.name,
mode == DP_SET_POWER_D0 ? "D0" : "D3");
}
@@ -3486,7 +3593,7 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bool fastset = true;
@@ -3496,7 +3603,8 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
*/
if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates,
crtc_state->port_clock) < 0) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to unsupported link rate\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Forcing full modeset due to unsupported link rate\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.connectors_changed = true;
fastset = false;
@@ -3510,14 +3618,15 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
* Remove once we have readout for DSC.
*/
if (crtc_state->dsc.compression_enable) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to DSC being enabled\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Forcing full modeset due to DSC being enabled\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.mode_changed = true;
fastset = false;
}
if (CAN_PANEL_REPLAY(intel_dp)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] Forcing full modeset to compute panel replay state\n",
encoder->base.base.id, encoder->base.name);
crtc_state->uapi.mode_changed = true;
@@ -3529,7 +3638,7 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
/* Clear the cached register set to avoid using stale values */
@@ -3538,10 +3647,10 @@ static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
intel_dp->pcon_dsc_dpcd,
sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
- drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
+ drm_err(display->drm, "Failed to read DPCD register 0x%x\n",
DP_PCON_DSC_ENCODER);
- drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
+ drm_dbg_kms(display->drm, "PCON ENCODER DSC DPCD: %*ph\n",
(int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
}
@@ -3579,19 +3688,19 @@ static int intel_dp_pcon_set_frl_mask(int max_frl)
static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
- struct drm_connector *connector = &intel_connector->base;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_display_info *info = &connector->base.display_info;
int max_frl_rate;
int max_lanes, rate_per_lane;
int max_dsc_lanes, dsc_rate_per_lane;
- max_lanes = connector->display_info.hdmi.max_lanes;
- rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
+ max_lanes = info->hdmi.max_lanes;
+ rate_per_lane = info->hdmi.max_frl_rate_per_lane;
max_frl_rate = max_lanes * rate_per_lane;
- if (connector->display_info.hdmi.dsc_cap.v_1p2) {
- max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
- dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
+ if (info->hdmi.dsc_cap.v_1p2) {
+ max_dsc_lanes = info->hdmi.dsc_cap.max_lanes;
+ dsc_rate_per_lane = info->hdmi.dsc_cap.max_frl_rate_per_lane;
if (max_dsc_lanes && dsc_rate_per_lane)
max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
}
@@ -3613,19 +3722,19 @@ intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp,
static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
#define TIMEOUT_FRL_READY_MS 500
#define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
-
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
u8 max_frl_bw_mask = 0, frl_trained_mask;
bool is_active;
max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
- drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
+ drm_dbg(display->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
- drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
+ drm_dbg(display->drm, "Sink max rate from EDID = %d Gbps\n",
+ max_edid_frl_bw);
max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
@@ -3633,7 +3742,7 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
return -EINVAL;
max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
- drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask);
+ drm_dbg(display->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask);
if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask))
goto frl_trained;
@@ -3670,10 +3779,11 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
return -ETIMEDOUT;
frl_trained:
- drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask);
+ drm_dbg(display->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask);
intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
intel_dp->frl.is_trained = true;
- drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
+ drm_dbg(display->drm, "FRL trained with : %d Gbps\n",
+ intel_dp->frl.trained_rate_gbps);
return 0;
}
@@ -3712,7 +3822,7 @@ int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp)
void intel_dp_check_frl_training(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
/*
* Always go for FRL training if:
@@ -3727,14 +3837,16 @@ void intel_dp_check_frl_training(struct intel_dp *intel_dp)
if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
int ret, mode;
- drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n");
+ drm_dbg(display->drm,
+ "Couldn't set FRL mode, continuing with TMDS mode\n");
ret = intel_dp_pcon_set_tmds_mode(intel_dp);
mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
- drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
+ drm_dbg(display->drm,
+ "Issue with PCON, cannot set TMDS mode\n");
} else {
- drm_dbg(&dev_priv->drm, "FRL training Completed\n");
+ drm_dbg(display->drm, "FRL training Completed\n");
}
}
@@ -3750,10 +3862,10 @@ static int
intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
- struct drm_connector *connector = &intel_connector->base;
- int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
- int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_display_info *info = &connector->base.display_info;
+ int hdmi_throughput = info->hdmi.dsc_cap.clk_per_slice;
+ int hdmi_max_slices = info->hdmi.dsc_cap.max_slices;
int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
@@ -3767,13 +3879,13 @@ intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
int num_slices, int slice_width)
{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
- struct drm_connector *connector = &intel_connector->base;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_display_info *info = &connector->base.display_info;
int output_format = crtc_state->output_format;
- bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
+ bool hdmi_all_bpp = info->hdmi.dsc_cap.all_bpp;
int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
int hdmi_max_chunk_bytes =
- connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
+ info->hdmi.dsc_cap.total_chunk_kbytes * 1024;
return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
num_slices, output_format, hdmi_all_bpp,
@@ -3784,24 +3896,26 @@ void
intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_display_info *info;
u8 pps_param[6];
int slice_height;
int slice_width;
int num_slices;
int bits_per_pixel;
int ret;
- struct intel_connector *intel_connector = intel_dp->attached_connector;
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- struct drm_connector *connector;
bool hdmi_is_dsc_1_2;
if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
return;
- if (!intel_connector)
+ if (!connector)
return;
- connector = &intel_connector->base;
- hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
+
+ info = &connector->base.display_info;
+
+ hdmi_is_dsc_1_2 = info->hdmi.dsc_cap.v_1p2;
if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
!hdmi_is_dsc_1_2)
@@ -3832,13 +3946,13 @@ intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
if (ret < 0)
- drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
+ drm_dbg_kms(display->drm, "Failed to set pcon DSC\n");
}
void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
bool ycbcr444_to_420 = false;
bool rgb_to_ycbcr = false;
u8 tmp;
@@ -3853,7 +3967,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
- drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n",
+ drm_dbg_kms(display->drm,
+ "Failed to %s protocol converter HDMI mode\n",
str_enable_disable(intel_dp_has_hdmi_sink(intel_dp)));
if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
@@ -3888,19 +4003,19 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n",
str_enable_disable(intel_dp->dfp.ycbcr_444_to_420));
tmp = rgb_to_ycbcr ? DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0;
if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to %s protocol converter RGB->YCbCr conversion mode\n",
str_enable_disable(tmp));
}
-bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
+static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
{
u8 dprx = 0;
@@ -3928,7 +4043,7 @@ static void intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux,
void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
/*
* Clear the cached register set to avoid using stale values
@@ -3947,11 +4062,11 @@ void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector)
if (drm_dp_dpcd_readb(connector->dp.dsc_decompression_aux, DP_FEC_CAPABILITY,
&connector->dp.fec_capability) < 0) {
- drm_err(&i915->drm, "Failed to read FEC DPCD register\n");
+ drm_err(display->drm, "Failed to read FEC DPCD register\n");
return;
}
- drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
+ drm_dbg_kms(display->drm, "FEC CAPABILITY: %x\n",
connector->dp.fec_capability);
}
@@ -3963,11 +4078,28 @@ static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *
intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, connector->dp.dsc_dpcd);
}
+static void
+intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *connector)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
+ if (!HAS_DSC(display))
+ return;
+
+ if (intel_dp_is_edp(intel_dp))
+ intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0],
+ connector);
+ else
+ intel_dp_get_dsc_sink_cap(intel_dp->dpcd[DP_DPCD_REV],
+ connector);
+}
+
static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
struct drm_display_mode *mode)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
int n = intel_dp->mso_link_count;
int overlap = intel_dp->mso_pixel_overlap;
@@ -3982,7 +4114,7 @@ static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
drm_mode_set_name(mode);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] using generated MSO mode: " DRM_MODE_FMT "\n",
connector->base.base.id, connector->base.name,
DRM_MODE_ARG(mode));
@@ -3990,7 +4122,7 @@ static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_connector *connector = intel_dp->attached_connector;
@@ -4008,7 +4140,7 @@ void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp)
* up by the BIOS, and thus we can't get the mode at module
* load.
*/
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
pipe_bpp, connector->panel.vbt.edp.bpp);
connector->panel.vbt.edp.bpp = pipe_bpp;
@@ -4017,7 +4149,7 @@ void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp)
static void intel_edp_mso_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct drm_display_info *info = &connector->base.display_info;
u8 mso;
@@ -4026,23 +4158,25 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp)
return;
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) {
- drm_err(&i915->drm, "Failed to read MSO cap\n");
+ drm_err(display->drm, "Failed to read MSO cap\n");
return;
}
/* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */
mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK;
if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) {
- drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso);
+ drm_err(display->drm, "Invalid MSO link count cap %u\n", mso);
mso = 0;
}
if (mso) {
- drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n",
+ drm_dbg_kms(display->drm,
+ "Sink MSO %ux%u configuration, pixel overlap %u\n",
mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso,
info->mso_pixel_overlap);
- if (!HAS_MSO(i915)) {
- drm_err(&i915->drm, "No source MSO support, disabling\n");
+ if (!HAS_MSO(display)) {
+ drm_err(display->drm,
+ "No source MSO support, disabling\n");
mso = 0;
}
}
@@ -4051,14 +4185,52 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp)
intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0;
}
+static void
+intel_edp_set_sink_rates(struct intel_dp *intel_dp)
+{
+ intel_dp->num_sink_rates = 0;
+
+ if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+ int i;
+
+ drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
+ sink_rates, sizeof(sink_rates));
+
+ for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
+ int val = le16_to_cpu(sink_rates[i]);
+
+ if (val == 0)
+ break;
+
+ /* Value read multiplied by 200kHz gives the per-lane
+ * link rate in kHz. The source rates are, however,
+ * stored in terms of LS_Clk kHz. The full conversion
+ * back to symbols is
+ * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
+ */
+ intel_dp->sink_rates[i] = (val * 200) / 10;
+ }
+ intel_dp->num_sink_rates = i;
+ }
+
+ /*
+ * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
+ * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
+ */
+ if (intel_dp->num_sink_rates)
+ intel_dp->use_rate_select = true;
+ else
+ intel_dp_set_sink_rates(intel_dp);
+}
+
static bool
intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv =
- to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+ struct intel_display *display = to_intel_display(intel_dp);
/* this function is meant to be called only once */
- drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
+ drm_WARN_ON(display->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0)
return false;
@@ -4082,7 +4254,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
sizeof(intel_dp->edp_dpcd)) {
- drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
+ drm_dbg_kms(display->drm, "eDP DPCD: %*ph\n",
(int)sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
@@ -4090,59 +4262,22 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector
}
/*
+ * If needed, program our source OUI so we can make various Intel-specific AUX services
+ * available (such as HDR backlight controls)
+ */
+ intel_dp_init_source_oui(intel_dp);
+
+ /*
* This has to be called after intel_dp->edp_dpcd is filled, PSR checks
* for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
*/
intel_psr_init_dpcd(intel_dp);
- /* Clear the default sink rates */
- intel_dp->num_sink_rates = 0;
-
- /* Read the eDP 1.4+ supported link rates. */
- if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
- __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
- int i;
-
- drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
- sink_rates, sizeof(sink_rates));
-
- for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
- int val = le16_to_cpu(sink_rates[i]);
-
- if (val == 0)
- break;
-
- /* Value read multiplied by 200kHz gives the per-lane
- * link rate in kHz. The source rates are, however,
- * stored in terms of LS_Clk kHz. The full conversion
- * back to symbols is
- * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
- */
- intel_dp->sink_rates[i] = (val * 200) / 10;
- }
- intel_dp->num_sink_rates = i;
- }
-
- /*
- * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
- * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
- */
- if (intel_dp->num_sink_rates)
- intel_dp->use_rate_select = true;
- else
- intel_dp_set_sink_rates(intel_dp);
+ intel_edp_set_sink_rates(intel_dp);
intel_dp_set_max_sink_lane_count(intel_dp);
/* Read the eDP DSC DPCD registers */
- if (HAS_DSC(dev_priv))
- intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0],
- connector);
-
- /*
- * If needed, program our source OUI so we can make various Intel-specific AUX services
- * available (such as HDR backlight controls)
- */
- intel_edp_init_source_oui(intel_dp, true);
+ intel_dp_detect_dsc_caps(intel_dp, connector);
return true;
}
@@ -4230,9 +4365,9 @@ static enum drm_dp_mst_mode
intel_dp_mst_mode_choose(struct intel_dp *intel_dp,
enum drm_dp_mst_mode sink_mst_mode)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- if (!i915->display.params.enable_dp_mst)
+ if (!display->params.enable_dp_mst)
return DRM_DP_SST;
if (!intel_dp_mst_source_support(intel_dp))
@@ -4248,7 +4383,7 @@ intel_dp_mst_mode_choose(struct intel_dp *intel_dp,
static enum drm_dp_mst_mode
intel_dp_mst_detect(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
enum drm_dp_mst_mode sink_mst_mode;
enum drm_dp_mst_mode mst_detect;
@@ -4257,12 +4392,12 @@ intel_dp_mst_detect(struct intel_dp *intel_dp)
mst_detect = intel_dp_mst_mode_choose(intel_dp, sink_mst_mode);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s -> enable: %s\n",
encoder->base.base.id, encoder->base.name,
str_yes_no(intel_dp_mst_source_support(intel_dp)),
intel_dp_mst_mode_str(sink_mst_mode),
- str_yes_no(i915->display.params.enable_dp_mst),
+ str_yes_no(display->params.enable_dp_mst),
intel_dp_mst_mode_str(mst_detect));
return mst_detect;
@@ -4288,12 +4423,13 @@ intel_dp_mst_configure(struct intel_dp *intel_dp)
static void
intel_dp_mst_disconnect(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (!intel_dp->is_mst)
return;
- drm_dbg_kms(&i915->drm, "MST device may have disappeared %d vs %d\n",
+ drm_dbg_kms(display->drm,
+ "MST device may have disappeared %d vs %d\n",
intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
@@ -4374,7 +4510,7 @@ static ssize_t intel_dp_as_sdp_pack(const struct drm_dp_as_sdp *as_sdp,
}
static ssize_t
-intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915,
+intel_dp_hdr_metadata_infoframe_sdp_pack(struct intel_display *display,
const struct hdmi_drm_infoframe *drm_infoframe,
struct dp_sdp *sdp,
size_t size)
@@ -4391,12 +4527,13 @@ intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915,
len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
if (len < 0) {
- drm_dbg_kms(&i915->drm, "buffer size is smaller than hdr metadata infoframe\n");
+ drm_dbg_kms(display->drm,
+ "buffer size is smaller than hdr metadata infoframe\n");
return -ENOSPC;
}
if (len != infoframe_size) {
- drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n");
+ drm_dbg_kms(display->drm, "wrong static hdr metadata size\n");
return -ENOSPC;
}
@@ -4454,8 +4591,8 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct dp_sdp sdp = {};
ssize_t len;
@@ -4468,7 +4605,7 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
len = drm_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp);
break;
case HDMI_PACKET_TYPE_GAMUT_METADATA:
- len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv,
+ len = intel_dp_hdr_metadata_infoframe_sdp_pack(display,
&crtc_state->infoframes.drm.drm,
&sdp, sizeof(sdp));
break;
@@ -4481,7 +4618,7 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
return;
}
- if (drm_WARN_ON(&dev_priv->drm, len < 0))
+ if (drm_WARN_ON(display->drm, len < 0))
return;
dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
@@ -4492,20 +4629,19 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- i915_reg_t reg = HSW_TVIDEO_DIP_CTL(dev_priv,
- crtc_state->cpu_transcoder);
+ struct intel_display *display = to_intel_display(encoder);
+ i915_reg_t reg = HSW_TVIDEO_DIP_CTL(display, crtc_state->cpu_transcoder);
u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
- if (HAS_AS_SDP(dev_priv))
+ if (HAS_AS_SDP(display))
dip_enable |= VIDEO_DIP_ENABLE_AS_ADL;
- u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
+ u32 val = intel_de_read(display, reg) & ~dip_enable;
/* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */
- if (!enable && HAS_DSC(dev_priv))
+ if (!enable && HAS_DSC(display))
val &= ~VDIP_ENABLE_PPS;
/*
@@ -4515,8 +4651,8 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
if (!enable || !crtc_state->has_psr)
val &= ~VIDEO_DIP_ENABLE_VSC_HSW;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
if (!enable)
return;
@@ -4637,8 +4773,8 @@ intel_read_dp_as_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_dp_as_sdp *as_sdp)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
unsigned int type = DP_SDP_ADAPTIVE_SYNC;
struct dp_sdp sdp = {};
int ret;
@@ -4652,7 +4788,7 @@ intel_read_dp_as_sdp(struct intel_encoder *encoder,
ret = intel_dp_as_sdp_unpack(as_sdp, &sdp, sizeof(sdp));
if (ret)
- drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP AS SDP\n");
+ drm_dbg_kms(display->drm, "Failed to unpack DP AS SDP\n");
}
static int
@@ -4705,8 +4841,8 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_dp_vsc_sdp *vsc)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
unsigned int type = DP_SDP_VSC;
struct dp_sdp sdp = {};
int ret;
@@ -4720,15 +4856,15 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
if (ret)
- drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
+ drm_dbg_kms(display->drm, "Failed to unpack DP VSC SDP\n");
}
static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct hdmi_drm_infoframe *drm_infoframe)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
struct dp_sdp sdp = {};
int ret;
@@ -4744,7 +4880,7 @@ static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encod
sizeof(sdp));
if (ret)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Failed to unpack DP HDR Metadata Infoframe SDP\n");
}
@@ -4771,333 +4907,11 @@ void intel_read_dp_sdp(struct intel_encoder *encoder,
}
}
-static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- int status = 0;
- int test_link_rate;
- u8 test_lane_count, test_link_bw;
- /* (DP CTS 1.2)
- * 4.3.1.11
- */
- /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
- status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
- &test_lane_count);
-
- if (status <= 0) {
- drm_dbg_kms(&i915->drm, "Lane count read failed\n");
- return DP_TEST_NAK;
- }
- test_lane_count &= DP_MAX_LANE_COUNT_MASK;
-
- status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
- &test_link_bw);
- if (status <= 0) {
- drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
- return DP_TEST_NAK;
- }
- test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
-
- /* Validate the requested link rate and lane count */
- if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
- test_lane_count))
- return DP_TEST_NAK;
-
- intel_dp->compliance.test_lane_count = test_lane_count;
- intel_dp->compliance.test_link_rate = test_link_rate;
-
- return DP_TEST_ACK;
-}
-
-static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- u8 test_pattern;
- u8 test_misc;
- __be16 h_width, v_height;
- int status = 0;
-
- /* Read the TEST_PATTERN (DP CTS 3.1.5) */
- status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
- &test_pattern);
- if (status <= 0) {
- drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
- return DP_TEST_NAK;
- }
- if (test_pattern != DP_COLOR_RAMP)
- return DP_TEST_NAK;
-
- status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
- &h_width, 2);
- if (status <= 0) {
- drm_dbg_kms(&i915->drm, "H Width read failed\n");
- return DP_TEST_NAK;
- }
-
- status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
- &v_height, 2);
- if (status <= 0) {
- drm_dbg_kms(&i915->drm, "V Height read failed\n");
- return DP_TEST_NAK;
- }
-
- status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
- &test_misc);
- if (status <= 0) {
- drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
- return DP_TEST_NAK;
- }
- if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
- return DP_TEST_NAK;
- if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
- return DP_TEST_NAK;
- switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
- case DP_TEST_BIT_DEPTH_6:
- intel_dp->compliance.test_data.bpc = 6;
- break;
- case DP_TEST_BIT_DEPTH_8:
- intel_dp->compliance.test_data.bpc = 8;
- break;
- default:
- return DP_TEST_NAK;
- }
-
- intel_dp->compliance.test_data.video_pattern = test_pattern;
- intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
- intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
- /* Set test active flag here so userspace doesn't interrupt things */
- intel_dp->compliance.test_active = true;
-
- return DP_TEST_ACK;
-}
-
-static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- u8 test_result = DP_TEST_ACK;
- struct intel_connector *intel_connector = intel_dp->attached_connector;
- struct drm_connector *connector = &intel_connector->base;
-
- if (intel_connector->detect_edid == NULL ||
- connector->edid_corrupt ||
- intel_dp->aux.i2c_defer_count > 6) {
- /* Check EDID read for NACKs, DEFERs and corruption
- * (DP CTS 1.2 Core r1.1)
- * 4.2.2.4 : Failed EDID read, I2C_NAK
- * 4.2.2.5 : Failed EDID read, I2C_DEFER
- * 4.2.2.6 : EDID corruption detected
- * Use failsafe mode for all cases
- */
- if (intel_dp->aux.i2c_nack_count > 0 ||
- intel_dp->aux.i2c_defer_count > 0)
- drm_dbg_kms(&i915->drm,
- "EDID read had %d NACKs, %d DEFERs\n",
- intel_dp->aux.i2c_nack_count,
- intel_dp->aux.i2c_defer_count);
- intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
- } else {
- /* FIXME: Get rid of drm_edid_raw() */
- const struct edid *block = drm_edid_raw(intel_connector->detect_edid);
-
- /* We have to write the checksum of the last block read */
- block += block->extensions;
-
- if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
- block->checksum) <= 0)
- drm_dbg_kms(&i915->drm,
- "Failed to write EDID checksum\n");
-
- test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
- intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
- }
-
- /* Set test active flag here so userspace doesn't interrupt things */
- intel_dp->compliance.test_active = true;
-
- return test_result;
-}
-
-static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv =
- to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
- struct drm_dp_phy_test_params *data =
- &intel_dp->compliance.test_data.phytest;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- enum pipe pipe = crtc->pipe;
- u32 pattern_val;
-
- switch (data->phy_pattern) {
- case DP_LINK_QUAL_PATTERN_DISABLE:
- drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n");
- intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
- if (DISPLAY_VER(dev_priv) >= 10)
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
- DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK,
- DP_TP_CTL_LINK_TRAIN_NORMAL);
- break;
- case DP_LINK_QUAL_PATTERN_D10_2:
- drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n");
- intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
- DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
- break;
- case DP_LINK_QUAL_PATTERN_ERROR_RATE:
- drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n");
- intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
- DDI_DP_COMP_CTL_ENABLE |
- DDI_DP_COMP_CTL_SCRAMBLED_0);
- break;
- case DP_LINK_QUAL_PATTERN_PRBS7:
- drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n");
- intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
- DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
- break;
- case DP_LINK_QUAL_PATTERN_80BIT_CUSTOM:
- /*
- * FIXME: Ideally pattern should come from DPCD 0x250. As
- * current firmware of DPR-100 could not set it, so hardcoding
- * now for complaince test.
- */
- drm_dbg_kms(&dev_priv->drm,
- "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
- pattern_val = 0x3e0f83e0;
- intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
- pattern_val = 0x0f83e0f8;
- intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
- pattern_val = 0x0000f83e;
- intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
- intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
- DDI_DP_COMP_CTL_ENABLE |
- DDI_DP_COMP_CTL_CUSTOM80);
- break;
- case DP_LINK_QUAL_PATTERN_CP2520_PAT_1:
- /*
- * FIXME: Ideally pattern should come from DPCD 0x24A. As
- * current firmware of DPR-100 could not set it, so hardcoding
- * now for complaince test.
- */
- drm_dbg_kms(&dev_priv->drm, "Set HBR2 compliance Phy Test Pattern\n");
- pattern_val = 0xFB;
- intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
- DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
- pattern_val);
- break;
- case DP_LINK_QUAL_PATTERN_CP2520_PAT_3:
- if (DISPLAY_VER(dev_priv) < 10) {
- drm_warn(&dev_priv->drm, "Platform does not support TPS4\n");
- break;
- }
- drm_dbg_kms(&dev_priv->drm, "Set TPS4 compliance Phy Test Pattern\n");
- intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
- intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
- DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK,
- DP_TP_CTL_TRAIN_PAT4_SEL_TP4A | DP_TP_CTL_LINK_TRAIN_PAT4);
- break;
- default:
- drm_warn(&dev_priv->drm, "Invalid Phy Test Pattern\n");
- }
-}
-
-static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- struct drm_dp_phy_test_params *data =
- &intel_dp->compliance.test_data.phytest;
- u8 link_status[DP_LINK_STATUS_SIZE];
-
- if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
- link_status) < 0) {
- drm_dbg_kms(&i915->drm, "failed to get link status\n");
- return;
- }
-
- /* retrieve vswing & pre-emphasis setting */
- intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
- link_status);
-
- intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
-
- intel_dp_phy_pattern_update(intel_dp, crtc_state);
-
- drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
- intel_dp->train_set, crtc_state->lane_count);
-
- drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
- intel_dp->dpcd[DP_DPCD_REV]);
-}
-
-static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- struct drm_dp_phy_test_params *data =
- &intel_dp->compliance.test_data.phytest;
-
- if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
- drm_dbg_kms(&i915->drm, "DP Phy Test pattern AUX read failure\n");
- return DP_TEST_NAK;
- }
-
- /* Set test active flag here so userspace doesn't interrupt things */
- intel_dp->compliance.test_active = true;
-
- return DP_TEST_ACK;
-}
-
-static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- u8 response = DP_TEST_NAK;
- u8 request = 0;
- int status;
-
- status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
- if (status <= 0) {
- drm_dbg_kms(&i915->drm,
- "Could not read test request from sink\n");
- goto update_status;
- }
-
- switch (request) {
- case DP_TEST_LINK_TRAINING:
- drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
- response = intel_dp_autotest_link_training(intel_dp);
- break;
- case DP_TEST_LINK_VIDEO_PATTERN:
- drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
- response = intel_dp_autotest_video_pattern(intel_dp);
- break;
- case DP_TEST_LINK_EDID_READ:
- drm_dbg_kms(&i915->drm, "EDID test requested\n");
- response = intel_dp_autotest_edid(intel_dp);
- break;
- case DP_TEST_LINK_PHY_TEST_PATTERN:
- drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
- response = intel_dp_autotest_phy_pattern(intel_dp);
- break;
- default:
- drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
- request);
- break;
- }
-
- if (response & DP_TEST_ACK)
- intel_dp->compliance.test_type = request;
-
-update_status:
- status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
- if (status <= 0)
- drm_dbg_kms(&i915->drm,
- "Could not write test response to sink\n");
-}
-
static bool intel_dp_link_ok(struct intel_dp *intel_dp,
u8 link_status[DP_LINK_STATUS_SIZE])
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
bool uhbr = intel_dp->link_rate >= 1000000;
bool ok;
@@ -5111,7 +4925,7 @@ static bool intel_dp_link_ok(struct intel_dp *intel_dp,
return true;
intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] %s link not ok, retraining\n",
encoder->base.base.id, encoder->base.name,
uhbr ? "128b/132b" : "8b/10b");
@@ -5134,14 +4948,14 @@ intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 link_status[DP_LINK_STATUS_SIZE] = {};
const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2;
if (drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, link_status,
esi_link_status_size) != esi_link_status_size) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[ENCODER:%d:%s] Failed to read link status\n",
encoder->base.base.id, encoder->base.name);
return false;
@@ -5167,27 +4981,27 @@ static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
static bool
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
bool link_ok = true;
bool reprobe_needed = false;
- drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
+ drm_WARN_ON_ONCE(display->drm, intel_dp->active_mst_links < 0);
for (;;) {
u8 esi[4] = {};
u8 ack[4] = {};
if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"failed to get ESI - device may have failed\n");
link_ok = false;
break;
}
- drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n", esi);
+ drm_dbg_kms(display->drm, "DPRX ESI: %4ph\n", esi);
if (intel_dp->active_mst_links > 0 && link_ok &&
esi[3] & LINK_STATUS_CHANGED) {
@@ -5199,7 +5013,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
intel_dp_mst_hpd_irq(intel_dp, esi, ack);
if (esi[3] & DP_TUNNELING_IRQ) {
- if (drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr,
+ if (drm_dp_tunnel_handle_irq(display->dp_tunnel_mgr,
&intel_dp->aux))
reprobe_needed = true;
ack[3] |= DP_TUNNELING_IRQ;
@@ -5209,7 +5023,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
break;
if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
- drm_dbg_kms(&i915->drm, "Failed to ack ESI\n");
+ drm_dbg_kms(display->drm, "Failed to ack ESI\n");
if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY))
drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
@@ -5290,13 +5104,14 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
return true;
/* Retrain if link not ok */
- return !intel_dp_link_ok(intel_dp, link_status);
+ return !intel_dp_link_ok(intel_dp, link_status) &&
+ !intel_psr_link_ok(intel_dp);
}
-static bool intel_dp_has_connector(struct intel_dp *intel_dp,
- const struct drm_connector_state *conn_state)
+bool intel_dp_has_connector(struct intel_dp *intel_dp,
+ const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder;
enum pipe pipe;
@@ -5309,7 +5124,7 @@ static bool intel_dp_has_connector(struct intel_dp *intel_dp,
return true;
/* MST */
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
encoder = &intel_dp->mst_encoders[pipe]->base;
if (conn_state->best_encoder == &encoder->base)
return true;
@@ -5318,18 +5133,33 @@ static bool intel_dp_has_connector(struct intel_dp *intel_dp,
return false;
}
+static void wait_for_connector_hw_done(const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_display *display = to_intel_display(connector);
+
+ drm_modeset_lock_assert_held(&display->drm->mode_config.connection_mutex);
+
+ if (!conn_state->commit)
+ return;
+
+ drm_WARN_ON(display->drm,
+ !wait_for_completion_timeout(&conn_state->commit->hw_done,
+ msecs_to_jiffies(5000)));
+}
+
int intel_dp_get_active_pipes(struct intel_dp *intel_dp,
struct drm_modeset_acquire_ctx *ctx,
u8 *pipe_mask)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
int ret = 0;
*pipe_mask = 0;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
struct drm_connector_state *conn_state =
connector->base.state;
@@ -5349,15 +5179,13 @@ int intel_dp_get_active_pipes(struct intel_dp *intel_dp,
crtc_state = to_intel_crtc_state(crtc->base.state);
- drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
+ drm_WARN_ON(display->drm,
+ !intel_crtc_has_dp_encoder(crtc_state));
if (!crtc_state->hw.active)
continue;
- if (conn_state->commit)
- drm_WARN_ON(&i915->drm,
- !wait_for_completion_timeout(&conn_state->commit->hw_done,
- msecs_to_jiffies(5000)));
+ wait_for_connector_hw_done(conn_state);
*pipe_mask |= BIT(crtc->pipe);
}
@@ -5366,6 +5194,11 @@ int intel_dp_get_active_pipes(struct intel_dp *intel_dp,
return ret;
}
+void intel_dp_flush_connector_commits(struct intel_connector *connector)
+{
+ wait_for_connector_hw_done(connector->base.state);
+}
+
static bool intel_dp_is_connected(struct intel_dp *intel_dp)
{
struct intel_connector *connector = intel_dp->attached_connector;
@@ -5377,6 +5210,7 @@ static bool intel_dp_is_connected(struct intel_dp *intel_dp)
static int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 pipe_mask;
@@ -5385,7 +5219,7 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder,
if (!intel_dp_is_connected(intel_dp))
return 0;
- ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex,
ctx);
if (ret)
return ret;
@@ -5403,7 +5237,8 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder,
if (!intel_dp_needs_link_retrain(intel_dp))
return 0;
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link (forced %s)\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] retraining link (forced %s)\n",
encoder->base.base.id, encoder->base.name,
str_yes_no(intel_dp->link.force_retrain));
@@ -5414,7 +5249,7 @@ static int intel_dp_retrain_link(struct intel_encoder *encoder,
intel_dp->link.force_retrain = false;
if (ret)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] link retraining failed: %pe\n",
encoder->base.base.id, encoder->base.name,
ERR_PTR(ret));
@@ -5445,121 +5280,9 @@ void intel_dp_check_link_state(struct intel_dp *intel_dp)
intel_encoder_link_check_queue_work(encoder, 0);
}
-static int intel_dp_prep_phy_test(struct intel_dp *intel_dp,
- struct drm_modeset_acquire_ctx *ctx,
- u8 *pipe_mask)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- struct drm_connector_list_iter conn_iter;
- struct intel_connector *connector;
- int ret = 0;
-
- *pipe_mask = 0;
-
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
- for_each_intel_connector_iter(connector, &conn_iter) {
- struct drm_connector_state *conn_state =
- connector->base.state;
- struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
-
- if (!intel_dp_has_connector(intel_dp, conn_state))
- continue;
-
- crtc = to_intel_crtc(conn_state->crtc);
- if (!crtc)
- continue;
-
- ret = drm_modeset_lock(&crtc->base.mutex, ctx);
- if (ret)
- break;
-
- crtc_state = to_intel_crtc_state(crtc->base.state);
-
- drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
-
- if (!crtc_state->hw.active)
- continue;
-
- if (conn_state->commit &&
- !try_wait_for_completion(&conn_state->commit->hw_done))
- continue;
-
- *pipe_mask |= BIT(crtc->pipe);
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return ret;
-}
-
-static int intel_dp_do_phy_test(struct intel_encoder *encoder,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_crtc *crtc;
- u8 pipe_mask;
- int ret;
-
- ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
- ctx);
- if (ret)
- return ret;
-
- ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask);
- if (ret)
- return ret;
-
- if (pipe_mask == 0)
- return 0;
-
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n",
- encoder->base.base.id, encoder->base.name);
-
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- /* test on the MST master transcoder */
- if (DISPLAY_VER(dev_priv) >= 12 &&
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
- !intel_dp_mst_is_master_trans(crtc_state))
- continue;
-
- intel_dp_process_phy_request(intel_dp, crtc_state);
- break;
- }
-
- return 0;
-}
-
-void intel_dp_phy_test(struct intel_encoder *encoder)
-{
- struct drm_modeset_acquire_ctx ctx;
- int ret;
-
- drm_modeset_acquire_init(&ctx, 0);
-
- for (;;) {
- ret = intel_dp_do_phy_test(encoder, &ctx);
-
- if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- continue;
- }
-
- break;
- }
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
- drm_WARN(encoder->base.dev, ret,
- "Acquiring modeset locks failed with %i\n", ret);
-}
-
static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 val;
if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
@@ -5572,18 +5295,18 @@ static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
if (val & DP_AUTOMATED_TEST_REQUEST)
- intel_dp_handle_test_request(intel_dp);
+ intel_dp_test_request(intel_dp);
if (val & DP_CP_IRQ)
intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
if (val & DP_SINK_SPECIFIC_IRQ)
- drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
+ drm_dbg_kms(display->drm, "Sink specific irq unhandled\n");
}
static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
bool reprobe_needed = false;
u8 val;
@@ -5595,7 +5318,7 @@ static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
return false;
if ((val & DP_TUNNELING_IRQ) &&
- drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr,
+ drm_dp_tunnel_handle_irq(display->dp_tunnel_mgr,
&intel_dp->aux))
reprobe_needed = true;
@@ -5625,16 +5348,11 @@ static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
static bool
intel_dp_short_pulse(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u8 old_sink_count = intel_dp->sink_count;
bool reprobe_needed = false;
bool ret;
- /*
- * Clearing compliance test variables to allow capturing
- * of values for next automated test request.
- */
- memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
+ intel_dp_test_reset(intel_dp);
/*
* Now read the DPCD to see if it's actually running
@@ -5659,24 +5377,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
intel_psr_short_pulse(intel_dp);
- switch (intel_dp->compliance.test_type) {
- case DP_TEST_LINK_TRAINING:
- drm_dbg_kms(&dev_priv->drm,
- "Link Training Compliance Test requested\n");
- /* Send a Hotplug Uevent to userspace to start modeset */
- drm_kms_helper_hotplug_event(&dev_priv->drm);
- break;
- case DP_TEST_LINK_PHY_TEST_PATTERN:
- drm_dbg_kms(&dev_priv->drm,
- "PHY test pattern Compliance Test requested\n");
- /*
- * Schedule long hpd to do the test
- *
- * FIXME get rid of the ad-hoc phy test modeset code
- * and properly incorporate it into the normal modeset.
- */
+ if (intel_dp_test_short_pulse(intel_dp))
reprobe_needed = true;
- }
return !reprobe_needed;
}
@@ -5685,12 +5387,12 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u8 *dpcd = intel_dp->dpcd;
u8 type;
- if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
+ if (drm_WARN_ON(display->drm, intel_dp_is_edp(intel_dp)))
return connector_status_connected;
lspcon_resume(dig_port);
@@ -5733,7 +5435,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
}
/* Anything else is out of spec, warn and ignore */
- drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
+ drm_dbg_kms(display->drm, "Broken DP branch device, ignoring\n");
return connector_status_disconnected;
}
@@ -5828,7 +5530,7 @@ static void
intel_dp_update_dfp(struct intel_dp *intel_dp,
const struct drm_edid *drm_edid)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
intel_dp->dfp.max_bpc =
@@ -5852,7 +5554,7 @@ intel_dp_update_dfp(struct intel_dp *intel_dp,
drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
intel_dp->downstream_ports);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
connector->base.base.id, connector->base.name,
intel_dp->dfp.max_bpc,
@@ -5885,7 +5587,7 @@ intel_dp_can_ycbcr420(struct intel_dp *intel_dp)
static void
intel_dp_update_420(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
intel_dp->dfp.ycbcr420_passthrough =
@@ -5903,7 +5605,7 @@ intel_dp_update_420(struct intel_dp *intel_dp)
connector->base.ycbcr_420_allowed = intel_dp_can_ycbcr420(intel_dp);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
connector->base.base.id, connector->base.name,
str_yes_no(intel_dp->dfp.rgb_to_ycbcr),
@@ -5914,7 +5616,7 @@ intel_dp_update_420(struct intel_dp *intel_dp)
static void
intel_dp_set_edid(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
const struct drm_edid *drm_edid;
bool vrr_capable;
@@ -5927,7 +5629,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
drm_edid_connector_update(&connector->base, drm_edid);
vrr_capable = intel_vrr_is_capable(connector);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
connector->base.base.id, connector->base.name, str_yes_no(vrr_capable));
drm_connector_set_vrr_capable_property(&connector->base, vrr_capable);
@@ -5962,55 +5664,41 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
}
static void
-intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *connector)
-{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
-
- /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
- if (!HAS_DSC(i915))
- return;
-
- if (intel_dp_is_edp(intel_dp))
- intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0],
- connector);
- else
- intel_dp_get_dsc_sink_cap(intel_dp->dpcd[DP_DPCD_REV],
- connector);
-}
-
-static void
intel_dp_detect_sdp_caps(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- intel_dp->as_sdp_supported = HAS_AS_SDP(i915) &&
+ intel_dp->as_sdp_supported = HAS_AS_SDP(display) &&
drm_dp_as_sdp_supported(&intel_dp->aux, intel_dp->dpcd);
}
static int
-intel_dp_detect(struct drm_connector *connector,
+intel_dp_detect(struct drm_connector *_connector,
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_connector *intel_connector =
- to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
+ struct intel_display *display = to_intel_display(_connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum drm_connector_status status;
int ret;
- drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
- drm_WARN_ON(&dev_priv->drm,
- !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.base.id, connector->base.name);
+ drm_WARN_ON(display->drm,
+ !drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
- if (!intel_display_device_enabled(dev_priv))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(dev_priv))
- return connector->status;
+ if (!intel_display_driver_check_access(display))
+ return connector->base.status;
+
+ intel_dp_flush_connector_commits(connector);
+
+ intel_pps_vdd_on(intel_dp);
/* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
@@ -6033,8 +5721,8 @@ intel_dp_detect(struct drm_connector *connector,
status = connector_status_disconnected;
if (status == connector_status_disconnected) {
- memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
- memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd));
+ intel_dp_test_reset(intel_dp);
+ memset(connector->dp.dsc_dpcd, 0, sizeof(connector->dp.dsc_dpcd));
intel_dp->psr.sink_panel_replay_support = false;
intel_dp->psr.sink_panel_replay_su_support = false;
@@ -6042,20 +5730,25 @@ intel_dp_detect(struct drm_connector *connector,
intel_dp_tunnel_disconnect(intel_dp);
- goto out;
+ goto out_unset_edid;
}
+ intel_dp_init_source_oui(intel_dp);
+
ret = intel_dp_tunnel_detect(intel_dp, ctx);
- if (ret == -EDEADLK)
- return ret;
+ if (ret == -EDEADLK) {
+ status = ret;
+
+ goto out_vdd_off;
+ }
if (ret == 1)
- intel_connector->base.epoch_counter++;
+ connector->base.epoch_counter++;
if (!intel_dp_is_edp(intel_dp))
intel_psr_init_dpcd(intel_dp);
- intel_dp_detect_dsc_caps(intel_dp, intel_connector);
+ intel_dp_detect_dsc_caps(intel_dp, connector);
intel_dp_detect_sdp_caps(intel_dp);
@@ -6075,7 +5768,7 @@ intel_dp_detect(struct drm_connector *connector,
* with EDID on it
*/
status = connector_status_disconnected;
- goto out;
+ goto out_unset_edid;
}
/*
@@ -6098,36 +5791,36 @@ intel_dp_detect(struct drm_connector *connector,
intel_dp->aux.i2c_defer_count = 0;
intel_dp_set_edid(intel_dp);
- if (intel_dp_is_edp(intel_dp) ||
- to_intel_connector(connector)->detect_edid)
+ if (intel_dp_is_edp(intel_dp) || connector->detect_edid)
status = connector_status_connected;
intel_dp_check_device_service_irq(intel_dp);
-out:
+out_unset_edid:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
if (!intel_dp_is_edp(intel_dp))
- drm_dp_set_subconnector_property(connector,
+ drm_dp_set_subconnector_property(&connector->base,
status,
intel_dp->dpcd,
intel_dp->downstream_ports);
+out_vdd_off:
+ intel_pps_vdd_off(intel_dp);
+
return status;
}
static void
intel_dp_force(struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *intel_encoder = &dig_port->base;
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
- drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (!intel_display_driver_check_access(dev_priv))
+ if (!intel_display_driver_check_access(display))
return;
intel_dp_unset_edid(intel_dp);
@@ -6138,30 +5831,31 @@ intel_dp_force(struct drm_connector *connector)
intel_dp_set_edid(intel_dp);
}
-static int intel_dp_get_modes(struct drm_connector *connector)
+static int intel_dp_get_modes(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_display *display = to_intel_display(_connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
int num_modes;
/* drm_edid_connector_update() done in ->detect() or ->force() */
- num_modes = drm_edid_connector_add_modes(connector);
+ num_modes = drm_edid_connector_add_modes(&connector->base);
/* Also add fixed mode, which may or may not be present in EDID */
- if (intel_dp_is_edp(intel_attached_dp(intel_connector)))
- num_modes += intel_panel_get_modes(intel_connector);
+ if (intel_dp_is_edp(intel_dp))
+ num_modes += intel_panel_get_modes(connector);
if (num_modes)
return num_modes;
- if (!intel_connector->detect_edid) {
- struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
+ if (!connector->detect_edid) {
struct drm_display_mode *mode;
- mode = drm_dp_downstream_mode(connector->dev,
+ mode = drm_dp_downstream_mode(display->drm,
intel_dp->dpcd,
intel_dp->downstream_ports);
if (mode) {
- drm_mode_probed_add(connector, mode);
+ drm_mode_probed_add(&connector->base, mode);
num_modes++;
}
}
@@ -6172,7 +5866,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
static int
intel_dp_connector_register(struct drm_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_lspcon *lspcon = &dig_port->lspcon;
@@ -6182,7 +5876,7 @@ intel_dp_connector_register(struct drm_connector *connector)
if (ret)
return ret;
- drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
+ drm_dbg_kms(display->drm, "registering %s bus for %s\n",
intel_dp->aux.name, connector->kdev->kobj.name);
intel_dp->aux.dev = connector->kdev;
@@ -6219,10 +5913,11 @@ intel_dp_connector_unregister(struct drm_connector *connector)
void intel_dp_connector_sync_state(struct intel_connector *connector,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
if (crtc_state && crtc_state->dsc.compression_enable) {
- drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux);
+ drm_WARN_ON(display->drm,
+ !connector->dp.dsc_decompression_aux);
connector->dp.dsc_decompression_enabled = true;
} else {
connector->dp.dsc_decompression_enabled = false;
@@ -6252,18 +5947,18 @@ void intel_dp_encoder_flush_work(struct drm_encoder *_encoder)
intel_dp_aux_fini(intel_dp);
}
-void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+void intel_dp_encoder_suspend(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_pps_vdd_off_sync(intel_dp);
intel_dp_tunnel_suspend(intel_dp);
}
-void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
+void intel_dp_encoder_shutdown(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_pps_wait_power_cycle(intel_dp);
}
@@ -6271,12 +5966,12 @@ void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
static int intel_modeset_tile_group(struct intel_atomic_state *state,
int tile_group_id)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
int ret = 0;
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct drm_connector_state *conn_state;
struct intel_crtc_state *crtc_state;
@@ -6312,13 +6007,13 @@ static int intel_modeset_tile_group(struct intel_atomic_state *state,
static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
if (transcoders == 0)
return 0;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state;
int ret;
@@ -6345,7 +6040,7 @@ static int intel_modeset_affected_transcoders(struct intel_atomic_state *state,
transcoders &= ~BIT(crtc_state->cpu_transcoder);
}
- drm_WARN_ON(&dev_priv->drm, transcoders != 0);
+ drm_WARN_ON(display->drm, transcoders != 0);
return 0;
}
@@ -6379,7 +6074,7 @@ static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
static int intel_dp_connector_atomic_check(struct drm_connector *conn,
struct drm_atomic_state *_state)
{
- struct drm_i915_private *dev_priv = to_i915(conn->dev);
+ struct intel_display *display = to_intel_display(conn->dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn);
struct intel_connector *intel_conn = to_intel_connector(conn);
@@ -6409,7 +6104,7 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
* We don't enable port sync on BDW due to missing w/as and
* due to not having adjusted the modeset sequence appropriately.
*/
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
return 0;
if (conn->has_tile) {
@@ -6424,6 +6119,7 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
enum drm_connector_status hpd_state)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
struct drm_i915_private *i915 = to_i915(connector->dev);
bool hpd_high = hpd_state == connector_status_connected;
@@ -6431,10 +6127,12 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
bool need_work = false;
spin_lock_irq(&i915->irq_lock);
- if (hpd_high != test_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state)) {
- i915->display.hotplug.event_bits |= BIT(hpd_pin);
+ if (hpd_high != test_bit(hpd_pin, &display->hotplug.oob_hotplug_last_state)) {
+ display->hotplug.event_bits |= BIT(hpd_pin);
- __assign_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state, hpd_high);
+ __assign_bit(hpd_pin,
+ &display->hotplug.oob_hotplug_last_state,
+ hpd_high);
need_work = true;
}
spin_unlock_irq(&i915->irq_lock);
@@ -6466,19 +6164,22 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_dp *intel_dp = &dig_port->dp;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
if (dig_port->base.type == INTEL_OUTPUT_EDP &&
- (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) {
+ (long_hpd ||
+ intel_runtime_pm_suspended(&i915->runtime_pm) ||
+ !intel_pps_have_panel_power_or_vdd(intel_dp))) {
/*
* vdd off can generate a long/short pulse on eDP which
* would require vdd on to handle it, and thus we
* would end up in an endless cycle of
* "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
*/
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
long_hpd ? "long" : "short",
dig_port->base.base.base.id,
@@ -6486,7 +6187,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
return IRQ_HANDLED;
}
- drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
+ drm_dbg_kms(display->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
dig_port->base.base.base.id,
dig_port->base.base.name,
long_hpd ? "long" : "short");
@@ -6504,6 +6205,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
if (long_hpd) {
intel_dp->reset_link_params = true;
+ intel_dp_invalidate_source_oui(intel_dp);
+
return IRQ_NONE;
}
@@ -6517,7 +6220,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
return IRQ_HANDLED;
}
-static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv,
+static bool _intel_dp_is_port_edp(struct intel_display *display,
const struct intel_bios_encoder_data *devdata,
enum port port)
{
@@ -6525,41 +6228,40 @@ static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv,
* eDP not supported on g4x. so bail out early just
* for a bit extra safety in case the VBT is bonkers.
*/
- if (DISPLAY_VER(dev_priv) < 5)
+ if (DISPLAY_VER(display) < 5)
return false;
- if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A)
+ if (DISPLAY_VER(display) < 9 && port == PORT_A)
return true;
return devdata && intel_bios_encoder_supports_edp(devdata);
}
-bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port)
+bool intel_dp_is_port_edp(struct intel_display *display, enum port port)
{
- struct intel_display *display = &i915->display;
const struct intel_bios_encoder_data *devdata =
intel_bios_encoder_data_lookup(display, port);
- return _intel_dp_is_port_edp(i915, devdata, port);
+ return _intel_dp_is_port_edp(display, devdata, port);
}
bool
intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
if (intel_bios_encoder_is_lspcon(encoder->devdata))
return false;
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return true;
if (port == PORT_A)
return false;
- if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
- DISPLAY_VER(i915) >= 9)
+ if (display->platform.haswell || display->platform.broadwell ||
+ DISPLAY_VER(display) >= 9)
return true;
return false;
@@ -6568,19 +6270,19 @@ intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder)
static void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->base.port;
if (!intel_dp_is_edp(intel_dp))
drm_connector_attach_dp_subconnector_property(connector);
- if (!IS_G4X(dev_priv) && port != PORT_A)
+ if (!display->platform.g4x && port != PORT_A)
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
drm_connector_attach_max_bpc_property(connector, 6, 10);
- else if (DISPLAY_VER(dev_priv) >= 5)
+ else if (DISPLAY_VER(display) >= 5)
drm_connector_attach_max_bpc_property(connector, 6, 12);
/* Register HDMI colorspace for case of lspcon */
@@ -6594,22 +6296,22 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
if (intel_dp_has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base))
drm_connector_attach_hdr_output_metadata_property(connector);
- if (HAS_VRR(dev_priv))
+ if (HAS_VRR(display))
drm_connector_attach_vrr_capable_property(connector);
}
static void
intel_edp_add_properties(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *fixed_mode =
intel_panel_preferred_fixed_mode(connector);
intel_attach_scaling_mode_property(&connector->base);
drm_connector_set_panel_orientation_with_quirk(&connector->base,
- i915->display.vbt.orientation,
+ display->vbt.orientation,
fixed_mode->hdisplay,
fixed_mode->vdisplay);
}
@@ -6617,33 +6319,20 @@ intel_edp_add_properties(struct intel_dp *intel_dp)
static void intel_edp_backlight_setup(struct intel_dp *intel_dp,
struct intel_connector *connector)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum pipe pipe = INVALID_PIPE;
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
- /*
- * Figure out the current pipe for the initial backlight setup.
- * If the current pipe isn't valid, try the PPS pipe, and if that
- * fails just assume pipe A.
- */
- pipe = vlv_active_pipe(intel_dp);
-
- if (pipe != PIPE_A && pipe != PIPE_B)
- pipe = intel_dp->pps.pps_pipe;
-
- if (pipe != PIPE_A && pipe != PIPE_B)
- pipe = PIPE_A;
- }
+ if (display->platform.valleyview || display->platform.cherryview)
+ pipe = vlv_pps_backlight_initial_pipe(intel_dp);
intel_backlight_setup(connector, pipe);
}
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
- struct intel_connector *intel_connector)
+ struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct drm_connector *connector = &intel_connector->base;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct drm_display_mode *fixed_mode;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool has_dpcd;
@@ -6659,19 +6348,19 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* with an already powered-on LVDS power sequencer.
*/
if (intel_get_lvds_encoder(dev_priv)) {
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"LVDS was detected, not registering eDP\n");
return false;
}
- intel_bios_init_panel_early(display, &intel_connector->panel,
+ intel_bios_init_panel_early(display, &connector->panel,
encoder->devdata);
if (!intel_pps_init(intel_dp)) {
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"[ENCODER:%d:%s] unusable PPS, disabling eDP\n",
encoder->base.base.id, encoder->base.name);
/*
@@ -6694,11 +6383,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_alpm_init_dpcd(intel_dp);
/* Cache DPCD and EDID for edp. */
- has_dpcd = intel_edp_init_dpcd(intel_dp, intel_connector);
+ has_dpcd = intel_edp_init_dpcd(intel_dp, connector);
if (!has_dpcd) {
/* if this fails, presume the device is a ghost */
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"[ENCODER:%d:%s] failed to retrieve link info, disabling eDP\n",
encoder->base.base.id, encoder->base.name);
goto out_vdd_off;
@@ -6721,7 +6410,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* DPCD read? Would need sort out the VDD handling...
*/
if (!intel_digital_port_connected(encoder)) {
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"[ENCODER:%d:%s] HPD is down, disabling eDP\n",
encoder->base.base.id, encoder->base.name);
goto out_vdd_off;
@@ -6733,30 +6422,30 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* back to checking for a VGA branch device. Only do this
* on known affected platforms to minimize false positives.
*/
- if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(intel_dp->dpcd) &&
+ if (DISPLAY_VER(display) == 9 && drm_dp_is_branch(intel_dp->dpcd) &&
(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) ==
DP_DWN_STRM_PORT_TYPE_ANALOG) {
- drm_info(&dev_priv->drm,
+ drm_info(display->drm,
"[ENCODER:%d:%s] VGA converter detected, disabling eDP\n",
encoder->base.base.id, encoder->base.name);
goto out_vdd_off;
}
}
- mutex_lock(&dev_priv->drm.mode_config.mutex);
- drm_edid = drm_edid_read_ddc(connector, connector->ddc);
+ mutex_lock(&display->drm->mode_config.mutex);
+ drm_edid = drm_edid_read_ddc(&connector->base, connector->base.ddc);
if (!drm_edid) {
/* Fallback to EDID from ACPI OpRegion, if any */
- drm_edid = intel_opregion_get_edid(intel_connector);
+ drm_edid = intel_opregion_get_edid(connector);
if (drm_edid)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Using OpRegion EDID\n",
- connector->base.id, connector->name);
+ connector->base.base.id, connector->base.name);
}
if (drm_edid) {
- if (drm_edid_connector_update(connector, drm_edid) ||
- !drm_edid_connector_add_modes(connector)) {
- drm_edid_connector_update(connector, NULL);
+ if (drm_edid_connector_update(&connector->base, drm_edid) ||
+ !drm_edid_connector_add_modes(&connector->base)) {
+ drm_edid_connector_update(&connector->base, NULL);
drm_edid_free(drm_edid);
drm_edid = ERR_PTR(-EINVAL);
}
@@ -6764,34 +6453,34 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
drm_edid = ERR_PTR(-ENOENT);
}
- intel_bios_init_panel_late(display, &intel_connector->panel, encoder->devdata,
+ intel_bios_init_panel_late(display, &connector->panel, encoder->devdata,
IS_ERR(drm_edid) ? NULL : drm_edid);
- intel_panel_add_edid_fixed_modes(intel_connector, true);
+ intel_panel_add_edid_fixed_modes(connector, true);
/* MSO requires information from the EDID */
intel_edp_mso_init(intel_dp);
/* multiply the mode clock and horizontal timings for MSO */
- list_for_each_entry(fixed_mode, &intel_connector->panel.fixed_modes, head)
- intel_edp_mso_mode_fixup(intel_connector, fixed_mode);
+ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head)
+ intel_edp_mso_mode_fixup(connector, fixed_mode);
/* fallback to VBT if available for eDP */
- if (!intel_panel_preferred_fixed_mode(intel_connector))
- intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+ if (!intel_panel_preferred_fixed_mode(connector))
+ intel_panel_add_vbt_lfp_fixed_mode(connector);
- mutex_unlock(&dev_priv->drm.mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
- if (!intel_panel_preferred_fixed_mode(intel_connector)) {
- drm_info(&dev_priv->drm,
+ if (!intel_panel_preferred_fixed_mode(connector)) {
+ drm_info(display->drm,
"[ENCODER:%d:%s] failed to find fixed mode for the panel, disabling eDP\n",
encoder->base.base.id, encoder->base.name);
goto out_vdd_off;
}
- intel_panel_init(intel_connector, drm_edid);
+ intel_panel_init(connector, drm_edid);
- intel_edp_backlight_setup(intel_dp, intel_connector);
+ intel_edp_backlight_setup(intel_dp, connector);
intel_edp_add_properties(intel_dp);
@@ -6801,33 +6490,32 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
out_vdd_off:
intel_pps_vdd_off_sync(intel_dp);
+ intel_bios_fini_panel(&connector->panel);
return false;
}
static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
{
- struct intel_connector *intel_connector;
- struct drm_connector *connector;
+ struct intel_connector *connector = container_of(work, typeof(*connector),
+ modeset_retry_work);
+ struct intel_display *display = to_intel_display(connector);
- intel_connector = container_of(work, typeof(*intel_connector),
- modeset_retry_work);
- connector = &intel_connector->base;
- drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n", connector->base.id,
- connector->name);
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n", connector->base.base.id,
+ connector->base.name);
/* Grab the locks before changing connector property*/
- mutex_lock(&connector->dev->mode_config.mutex);
+ mutex_lock(&display->drm->mode_config.mutex);
/* Set connector link status to BAD and send a Uevent to notify
* userspace to do a modeset.
*/
- drm_connector_set_link_status_property(connector,
+ drm_connector_set_link_status_property(&connector->base,
DRM_MODE_LINK_STATUS_BAD);
- mutex_unlock(&connector->dev->mode_config.mutex);
+ mutex_unlock(&display->drm->mode_config.mutex);
/* Send Hotplug uevent so userspace can reprobe */
- drm_kms_helper_connector_hotplug_event(connector);
+ drm_kms_helper_connector_hotplug_event(&connector->base);
- drm_connector_put(connector);
+ drm_connector_put(&connector->base);
}
void intel_dp_init_modeset_retry_work(struct intel_connector *connector)
@@ -6838,45 +6526,44 @@ void intel_dp_init_modeset_retry_work(struct intel_connector *connector)
bool
intel_dp_init_connector(struct intel_digital_port *dig_port,
- struct intel_connector *intel_connector)
+ struct intel_connector *connector)
{
- struct drm_connector *connector = &intel_connector->base;
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_dp *intel_dp = &dig_port->dp;
- struct intel_encoder *intel_encoder = &dig_port->base;
- struct drm_device *dev = intel_encoder->base.dev;
+ struct intel_encoder *encoder = &dig_port->base;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum port port = intel_encoder->port;
+ enum port port = encoder->port;
int type;
/* Initialize the work for modeset in case of link train failure */
- intel_dp_init_modeset_retry_work(intel_connector);
+ intel_dp_init_modeset_retry_work(connector);
if (drm_WARN(dev, dig_port->max_lanes < 1,
"Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
- dig_port->max_lanes, intel_encoder->base.base.id,
- intel_encoder->base.name))
+ dig_port->max_lanes, encoder->base.base.id,
+ encoder->base.name))
return false;
intel_dp->reset_link_params = true;
- intel_dp->pps.pps_pipe = INVALID_PIPE;
- intel_dp->pps.active_pipe = INVALID_PIPE;
/* Preserve the current hw state. */
- intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
- intel_dp->attached_connector = intel_connector;
+ intel_dp->DP = intel_de_read(display, intel_dp->output_reg);
+ intel_dp->attached_connector = connector;
- if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) {
+ if (_intel_dp_is_port_edp(display, encoder->devdata, port)) {
/*
- * Currently we don't support eDP on TypeC ports, although in
- * theory it could work on TypeC legacy ports.
+ * Currently we don't support eDP on TypeC ports for DISPLAY_VER < 30,
+ * although in theory it could work on TypeC legacy ports.
*/
- drm_WARN_ON(dev, intel_encoder_is_tc(intel_encoder));
+ drm_WARN_ON(dev, intel_encoder_is_tc(encoder) &&
+ DISPLAY_VER(display) < 30);
type = DRM_MODE_CONNECTOR_eDP;
- intel_encoder->type = INTEL_OUTPUT_EDP;
+ encoder->type = INTEL_OUTPUT_EDP;
/* eDP only on port B and/or C on vlv/chv */
- if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv)) &&
+ if (drm_WARN_ON(dev, (display->platform.valleyview ||
+ display->platform.cherryview) &&
port != PORT_B && port != PORT_C))
return false;
} else {
@@ -6886,36 +6573,37 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp_set_default_sink_rates(intel_dp);
intel_dp_set_default_max_sink_lane_count(intel_dp);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
+ if (display->platform.valleyview || display->platform.cherryview)
+ vlv_pps_pipe_init(intel_dp);
intel_dp_aux_init(intel_dp);
- intel_connector->dp.dsc_decompression_aux = &intel_dp->aux;
+ connector->dp.dsc_decompression_aux = &intel_dp->aux;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Adding %s connector on [ENCODER:%d:%s]\n",
type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
- intel_encoder->base.base.id, intel_encoder->base.name);
+ encoder->base.base.id, encoder->base.name);
- drm_connector_init_with_ddc(dev, connector, &intel_dp_connector_funcs,
+ drm_connector_init_with_ddc(dev, &connector->base, &intel_dp_connector_funcs,
type, &intel_dp->aux.ddc);
- drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+ drm_connector_helper_add(&connector->base, &intel_dp_connector_helper_funcs);
- if (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) < 12)
- connector->interlace_allowed = true;
+ if (!HAS_GMCH(display) && DISPLAY_VER(display) < 12)
+ connector->base.interlace_allowed = true;
- intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
- intel_connector->base.polled = intel_connector->polled;
+ if (type != DRM_MODE_CONNECTOR_eDP)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ connector->base.polled = connector->polled;
- intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_connector_attach_encoder(connector, encoder);
- if (HAS_DDI(dev_priv))
- intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+ if (HAS_DDI(display))
+ connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
- intel_connector->get_hw_state = intel_connector_get_hw_state;
- intel_connector->sync_state = intel_dp_connector_sync_state;
+ connector->get_hw_state = intel_connector_get_hw_state;
+ connector->sync_state = intel_dp_connector_sync_state;
- if (!intel_edp_init_connector(intel_dp, intel_connector)) {
+ if (!intel_edp_init_connector(intel_dp, connector)) {
intel_dp_aux_fini(intel_dp);
goto fail;
}
@@ -6925,15 +6613,14 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_dp_reset_link_params(intel_dp);
/* init MST on ports that can support it */
- intel_dp_mst_encoder_init(dig_port,
- intel_connector->base.base.id);
+ intel_dp_mst_encoder_init(dig_port, connector->base.base.id);
- intel_dp_add_properties(intel_dp, connector);
+ intel_dp_add_properties(intel_dp, &connector->base);
- if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
- int ret = intel_dp_hdcp_init(dig_port, intel_connector);
+ if (is_hdcp_supported(display, port) && !intel_dp_is_edp(intel_dp)) {
+ int ret = intel_dp_hdcp_init(dig_port, connector);
if (ret)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"HDCP init failed, skipping.\n");
}
@@ -6946,19 +6633,19 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
fail:
intel_display_power_flush_work(dev_priv);
- drm_connector_cleanup(connector);
+ drm_connector_cleanup(&connector->base);
return false;
}
-void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
+void intel_dp_mst_suspend(struct intel_display *display)
{
struct intel_encoder *encoder;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_dp *intel_dp;
if (encoder->type != INTEL_OUTPUT_DDI)
@@ -6974,14 +6661,14 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
}
}
-void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
+void intel_dp_mst_resume(struct intel_display *display)
{
struct intel_encoder *encoder;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
- for_each_intel_encoder(&dev_priv->drm, encoder) {
+ for_each_intel_encoder(display->drm, encoder) {
struct intel_dp *intel_dp;
int ret;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 1b9aaddd8c35..ca49f0a05da5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -12,14 +12,14 @@ enum intel_output_format;
enum pipe;
enum port;
struct drm_connector_state;
+struct drm_dp_vsc_sdp;
struct drm_encoder;
-struct drm_i915_private;
struct drm_modeset_acquire_ctx;
-struct drm_dp_vsc_sdp;
struct intel_atomic_state;
struct intel_connector;
struct intel_crtc_state;
struct intel_digital_port;
+struct intel_display;
struct intel_dp;
struct intel_encoder;
@@ -37,9 +37,6 @@ struct link_config_limits {
};
void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp);
-void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config,
- struct link_config_limits *limits);
bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
int intel_dp_min_bpp(enum intel_output_format output_format);
@@ -57,6 +54,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
int intel_dp_get_active_pipes(struct intel_dp *intel_dp,
struct drm_modeset_acquire_ctx *ctx,
u8 *pipe_mask);
+void intel_dp_flush_connector_commits(struct intel_connector *connector);
void intel_dp_link_check(struct intel_encoder *encoder);
void intel_dp_check_link_state(struct intel_dp *intel_dp);
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode);
@@ -89,15 +87,15 @@ bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state);
bool intel_dp_has_dsc(const struct intel_connector *connector);
int intel_dp_link_symbol_size(int rate);
int intel_dp_link_symbol_clock(int rate);
-bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
+bool intel_dp_is_port_edp(struct intel_display *display, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
bool long_hpd);
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp);
-void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
-void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
+void intel_dp_mst_suspend(struct intel_display *display);
+void intel_dp_mst_resume(struct intel_display *display);
int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
int intel_dp_max_lane_count(struct intel_dp *intel_dp);
@@ -114,16 +112,16 @@ void intel_dp_reset_link_params(struct intel_dp *intel_dp);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select);
-bool intel_dp_source_supports_tps3(struct drm_i915_private *i915);
-bool intel_dp_source_supports_tps4(struct drm_i915_private *i915);
+bool intel_dp_source_supports_tps3(struct intel_display *display);
+bool intel_dp_source_supports_tps4(struct intel_display *display);
-bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
int bw_overhead);
int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
int max_dprx_rate, int max_dprx_lanes);
-bool intel_dp_joiner_needs_dsc(struct drm_i915_private *i915, bool use_joiner);
+bool intel_dp_joiner_needs_dsc(struct intel_display *display,
+ int num_joined_pipes);
bool intel_dp_has_joiner(struct intel_dp *intel_dp);
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
@@ -139,23 +137,23 @@ bool intel_digital_port_connected(struct intel_encoder *encoder);
bool intel_digital_port_connected_locked(struct intel_encoder *encoder);
int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
u8 dsc_max_bpc);
-u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,
+u16 intel_dp_dsc_get_max_compressed_bpp(struct intel_display *display,
u32 link_clock, u32 lane_count,
u32 mode_clock, u32 mode_hdisplay,
- bool bigjoiner,
+ int num_joined_pipes,
enum intel_output_format output_format,
u32 pipe_bpp,
u32 timeslots);
-int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config);
+int intel_dp_dsc_sink_min_compressed_bpp(const struct intel_crtc_state *pipe_config);
int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
- struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *pipe_config,
int bpc);
u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int mode_clock, int mode_hdisplay,
- bool bigjoiner);
-bool intel_dp_need_joiner(struct intel_dp *intel_dp,
- struct intel_connector *connector,
- int hdisplay, int clock);
+ int num_joined_pipes);
+int intel_dp_num_joined_pipes(struct intel_dp *intel_dp,
+ struct intel_connector *connector,
+ int hdisplay, int clock);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
@@ -172,10 +170,11 @@ bool intel_dp_supports_fec(struct intel_dp *intel_dp,
const struct intel_connector *connector,
const struct intel_crtc_state *pipe_config);
-bool intel_dp_supports_dsc(const struct intel_connector *connector,
+bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
const struct intel_crtc_state *crtc_state);
-u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp);
+u32 intel_dp_dsc_nearest_valid_bpp(struct intel_display *display, u32 bpp, u32 pipe_bpp);
void intel_ddi_update_pipe(struct intel_atomic_state *state,
struct intel_encoder *encoder,
@@ -190,18 +189,25 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
void intel_dp_check_frl_training(struct intel_dp *intel_dp);
void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
-void intel_dp_phy_test(struct intel_encoder *encoder);
+void intel_dp_invalidate_source_oui(struct intel_dp *intel_dp);
void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
int intel_dp_output_bpp(enum intel_output_format output_format, int bpp);
-bool
-intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- bool dsc,
- struct link_config_limits *limits);
+bool intel_dp_compute_config_limits(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ bool respect_downstream_limits,
+ bool dsc,
+ struct link_config_limits *limits);
void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector);
bool intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder);
+bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
+ u8 lane_count);
+bool intel_dp_has_connector(struct intel_dp *intel_dp,
+ const struct drm_connector_state *conn_state);
+int intel_dp_dsc_max_src_input_bpc(struct intel_display *display);
+int intel_dp_dsc_min_src_input_bpc(void);
+
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 04a7acd7f73c..40c697476b72 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -5,8 +5,6 @@
#include "i915_drv.h"
#include "i915_reg.h"
-#include "i915_trace.h"
-#include "intel_bios.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
@@ -15,6 +13,7 @@
#include "intel_pps.h"
#include "intel_quirks.h"
#include "intel_tc.h"
+#include "intel_uncore_trace.h"
#define AUX_CH_NAME_BUFSIZE 6
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 33f72db99b58..c846ef4acf5b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -34,8 +34,9 @@
* for some reason.
*/
-#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_backlight.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux_backlight.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index 3425b3643143..00c493cc8a4b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -19,6 +19,7 @@
#include "intel_dp_hdcp.h"
#include "intel_hdcp.h"
#include "intel_hdcp_regs.h"
+#include "intel_hdcp_shim.h"
static u32 transcoder_to_stream_enc_status(enum transcoder cpu_transcoder)
{
@@ -57,7 +58,7 @@ static
int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
u8 *an)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
u8 aksv[DRM_HDCP_KSV_LEN] = {};
ssize_t dpcd_ret;
@@ -65,7 +66,7 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN,
an, DRM_HDCP_AN_LEN);
if (dpcd_ret != DRM_HDCP_AN_LEN) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to write An over DP/AUX (%zd)\n",
dpcd_ret);
return dpcd_ret >= 0 ? -EIO : dpcd_ret;
@@ -81,7 +82,7 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AKSV,
aksv, DRM_HDCP_KSV_LEN);
if (dpcd_ret != DRM_HDCP_KSV_LEN) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to write Aksv over DP/AUX (%zd)\n",
dpcd_ret);
return dpcd_ret >= 0 ? -EIO : dpcd_ret;
@@ -92,13 +93,13 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port,
u8 *bksv)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
ssize_t ret;
ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret != DRM_HDCP_KSV_LEN) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Read Bksv from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
@@ -108,7 +109,7 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port,
static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port,
u8 *bstatus)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
ssize_t ret;
/*
@@ -119,7 +120,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port,
ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret != DRM_HDCP_BSTATUS_LEN) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
@@ -128,7 +129,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port,
static
int intel_dp_hdcp_read_bcaps(struct drm_dp_aux *aux,
- struct drm_i915_private *i915,
+ struct intel_display *display,
u8 *bcaps)
{
ssize_t ret;
@@ -136,7 +137,7 @@ int intel_dp_hdcp_read_bcaps(struct drm_dp_aux *aux,
ret = drm_dp_dpcd_read(aux, DP_AUX_HDCP_BCAPS,
bcaps, 1);
if (ret != 1) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Read bcaps from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
@@ -148,11 +149,11 @@ static
int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port,
bool *repeater_present)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
ssize_t ret;
u8 bcaps;
- ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps);
+ ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, display, &bcaps);
if (ret)
return ret;
@@ -164,13 +165,14 @@ static
int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
u8 *ri_prime)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
ssize_t ret;
ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret != DRM_HDCP_RI_LEN) {
- drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n",
+ drm_dbg_kms(display->drm,
+ "Read Ri' from DP/AUX failed (%zd)\n",
ret);
return ret >= 0 ? -EIO : ret;
}
@@ -181,14 +183,14 @@ static
int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
bool *ksv_ready)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
ssize_t ret;
u8 bstatus;
ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
&bstatus, 1);
if (ret != 1) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
@@ -200,7 +202,7 @@ static
int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
int num_downstream, u8 *ksv_fifo)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
ssize_t ret;
int i;
@@ -212,7 +214,7 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
ksv_fifo + i * DRM_HDCP_KSV_LEN,
len);
if (ret != len) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Read ksv[%d] from DP/AUX failed (%zd)\n",
i, ret);
return ret >= 0 ? -EIO : ret;
@@ -225,7 +227,7 @@ static
int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
int i, u32 *part)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
ssize_t ret;
if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
@@ -235,7 +237,7 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
DP_AUX_HDCP_V_PRIME(i), part,
DRM_HDCP_V_PRIME_PART_LEN);
if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
return ret >= 0 ? -EIO : ret;
}
@@ -255,14 +257,14 @@ static
bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port,
struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
ssize_t ret;
u8 bstatus;
ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
&bstatus, 1);
if (ret != 1) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Read bstatus from DP/AUX failed (%zd)\n", ret);
return false;
}
@@ -274,11 +276,11 @@ static
int intel_dp_hdcp_get_capability(struct intel_digital_port *dig_port,
bool *hdcp_capable)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
ssize_t ret;
u8 bcaps;
- ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps);
+ ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, display, &bcaps);
if (ret)
return ret;
@@ -341,7 +343,7 @@ static int
intel_dp_hdcp2_read_rx_status(struct intel_connector *connector,
u8 *rx_status)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_dp_aux *aux = &dig_port->dp.aux;
ssize_t ret;
@@ -350,7 +352,7 @@ intel_dp_hdcp2_read_rx_status(struct intel_connector *connector,
DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
HDCP_2_2_DP_RXSTATUS_LEN);
if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
@@ -396,7 +398,7 @@ static ssize_t
intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector,
const struct hdcp2_dp_msg_data *hdcp2_msg_data)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct intel_dp *dp = &dig_port->dp;
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
@@ -429,7 +431,7 @@ intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector,
}
if (ret)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"msg_id %d, ret %d, timeout(mSec): %d\n",
hdcp2_msg_data->msg_id, ret, timeout);
@@ -513,8 +515,8 @@ static
int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
u8 msg_id, void *buf, size_t size)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct drm_dp_aux *aux = &dig_port->dp.aux;
struct intel_dp *dp = &dig_port->dp;
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
@@ -567,7 +569,7 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
ret = drm_dp_dpcd_read(aux, offset,
(void *)byte, len);
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n",
+ drm_dbg_kms(display->drm, "msg_id %d, ret %zd\n",
msg_id, ret);
return ret;
}
@@ -580,7 +582,8 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
if (hdcp2_msg_data->msg_read_timeout > 0) {
msg_expired = ktime_after(ktime_get_raw(), msg_end);
if (msg_expired) {
- drm_dbg_kms(&i915->drm, "msg_id %d, entire msg read timeout(mSec): %d\n",
+ drm_dbg_kms(display->drm,
+ "msg_id %d, entire msg read timeout(mSec): %d\n",
msg_id, hdcp2_msg_data->msg_read_timeout);
return -ETIMEDOUT;
}
@@ -695,7 +698,7 @@ int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector,
bool *hdcp_capable,
bool *hdcp2_capable)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_dp_aux *aux;
u8 bcaps;
int ret;
@@ -708,10 +711,10 @@ int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector,
aux = &connector->port->aux;
ret = _intel_dp_hdcp2_get_capability(aux, hdcp2_capable);
if (ret)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"HDCP2 DPCD capability read failed err: %d\n", ret);
- ret = intel_dp_hdcp_read_bcaps(aux, i915, &bcaps);
+ ret = intel_dp_hdcp_read_bcaps(aux, display, &bcaps);
if (ret)
return ret;
@@ -744,8 +747,8 @@ static int
intel_dp_mst_toggle_hdcp_stream_select(struct intel_connector *connector,
bool enable)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
@@ -753,7 +756,7 @@ intel_dp_mst_toggle_hdcp_stream_select(struct intel_connector *connector,
hdcp->stream_transcoder, enable,
TRANS_DDI_HDCP_SELECT);
if (ret)
- drm_err(&i915->drm, "%s HDCP stream select failed (%d)\n",
+ drm_err(display->drm, "%s HDCP stream select failed (%d)\n",
enable ? "Enable" : "Disable", ret);
return ret;
}
@@ -762,8 +765,8 @@ static int
intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector,
bool enable)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->stream_transcoder;
@@ -779,11 +782,11 @@ intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector,
return -EINVAL;
/* Wait for encryption confirmation */
- if (intel_de_wait(i915, HDCP_STATUS(i915, cpu_transcoder, port),
+ if (intel_de_wait(display, HDCP_STATUS(display, cpu_transcoder, port),
stream_enc_status, enable ? stream_enc_status : 0,
HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
- drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
- transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
+ drm_err(display->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
+ transcoder_name(cpu_transcoder), str_enabled_disabled(enable));
return -ETIMEDOUT;
}
@@ -794,8 +797,8 @@ static int
intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
bool enable)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
enum transcoder cpu_transcoder = hdcp->stream_transcoder;
@@ -803,8 +806,8 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
enum port port = dig_port->base.port;
int ret;
- drm_WARN_ON(&i915->drm, enable &&
- !!(intel_de_read(i915, HDCP2_AUTH_STREAM(i915, cpu_transcoder, port))
+ drm_WARN_ON(display->drm, enable &&
+ !!(intel_de_read(display, HDCP2_AUTH_STREAM(display, cpu_transcoder, port))
& AUTH_STREAM_TYPE) != data->streams[0].stream_type);
ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable);
@@ -812,12 +815,12 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
return ret;
/* Wait for encryption confirmation */
- if (intel_de_wait(i915, HDCP2_STREAM_STATUS(i915, cpu_transcoder, pipe),
+ if (intel_de_wait(display, HDCP2_STREAM_STATUS(display, cpu_transcoder, pipe),
STREAM_ENCRYPTION_STATUS,
enable ? STREAM_ENCRYPTION_STATUS : 0,
HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
- drm_err(&i915->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
- transcoder_name(cpu_transcoder), enable ? "enabled" : "disabled");
+ drm_err(display->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
+ transcoder_name(cpu_transcoder), str_enabled_disabled(enable));
return -ETIMEDOUT;
}
@@ -872,13 +875,12 @@ static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
int intel_dp_hdcp_init(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector)
{
- struct drm_device *dev = intel_connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_encoder *intel_encoder = &dig_port->base;
enum port port = intel_encoder->port;
struct intel_dp *intel_dp = &dig_port->dp;
- if (!is_hdcp_supported(dev_priv, port))
+ if (!is_hdcp_supported(display, port))
return 0;
if (intel_connector->mst_port)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 40bedc31d6bf..8b1977cfec50 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -21,9 +21,12 @@
* IN THE SOFTWARE.
*/
+#include <linux/debugfs.h>
+
#include <drm/display/drm_dp_helper.h>
-#include "i915_drv.h"
+#include "i915_utils.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
@@ -208,8 +211,10 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI
lttpr_count = intel_dp_init_lttpr_phys(intel_dp, dpcd);
- for (i = 0; i < lttpr_count; i++)
+ for (i = 0; i < lttpr_count; i++) {
intel_dp_read_lttpr_phy_caps(intel_dp, dpcd, DP_PHY_LTTPR(i));
+ drm_dp_dump_lttpr_desc(&intel_dp->aux, DP_PHY_LTTPR(i));
+ }
return lttpr_count;
}
@@ -217,7 +222,6 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI
int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
if (intel_dp_is_edp(intel_dp))
return 0;
@@ -226,7 +230,7 @@ int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_S
* Detecting LTTPRs must be avoided on platforms with an AUX timeout
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
*/
- if (DISPLAY_VER(display) >= 10 && !IS_GEMINILAKE(i915))
+ if (DISPLAY_VER(display) >= 10 && !display->platform.geminilake)
if (drm_dp_dpcd_probe(&intel_dp->aux,
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
return -EIO;
@@ -258,7 +262,6 @@ int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_S
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
int lttpr_count = 0;
/*
@@ -266,7 +269,7 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
*/
if (!intel_dp_is_edp(intel_dp) &&
- (DISPLAY_VER(display) >= 10 && !IS_GEMINILAKE(i915))) {
+ (DISPLAY_VER(display) >= 10 && !display->platform.geminilake)) {
u8 dpcd[DP_RECEIVER_CAP_SIZE];
int err = intel_dp_read_dprx_caps(intel_dp, dpcd);
@@ -387,10 +390,9 @@ static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) ||
- DISPLAY_VER(display) >= 10 || IS_BROXTON(i915);
+ DISPLAY_VER(display) >= 10 || display->platform.broxton;
}
/* 128b/132b */
@@ -894,7 +896,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
voltage_tries = 1;
for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
- usleep_range(delay_us, 2 * delay_us);
+ fsleep(delay_us);
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
@@ -955,7 +957,6 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
bool source_tps3, sink_tps3, source_tps4, sink_tps4;
/* UHBR+ use separate 128b/132b TPS2 */
@@ -968,7 +969,7 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
* TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification.
* LTTPRs must support TPS4.
*/
- source_tps4 = intel_dp_source_supports_tps4(i915);
+ source_tps4 = intel_dp_source_supports_tps4(display);
sink_tps4 = dp_phy != DP_PHY_DPRX ||
drm_dp_tps4_supported(intel_dp->dpcd);
if (source_tps4 && sink_tps4) {
@@ -986,7 +987,7 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
* TPS3 support is mandatory for downstream devices that
* support HBR2. However, not all sinks follow the spec.
*/
- source_tps3 = intel_dp_source_supports_tps3(i915);
+ source_tps3 = intel_dp_source_supports_tps3(display);
sink_tps3 = dp_phy != DP_PHY_DPRX ||
drm_dp_tps3_supported(intel_dp->dpcd);
if (source_tps3 && sink_tps3) {
@@ -1036,7 +1037,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
}
for (tries = 0; tries < 5; tries++) {
- usleep_range(delay_us, 2 * delay_us);
+ fsleep(delay_us);
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
@@ -1410,16 +1411,10 @@ intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp,
}
/* Time budget for the LANEx_EQ_DONE Sequence */
- deadline = jiffies + msecs_to_jiffies_timeout(400);
+ deadline = jiffies + msecs_to_jiffies_timeout(450);
for (try = 0; try < max_tries; try++) {
- usleep_range(delay_us, 2 * delay_us);
-
- /*
- * The delay may get updated. The transmitter shall read the
- * delay before link status during link training.
- */
- delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
+ fsleep(delay_us);
if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n");
@@ -1447,8 +1442,15 @@ intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp,
if (time_after(jiffies, deadline))
timeout = true; /* try one last time after deadline */
- /* Update signal levels and training set as requested. */
+ /*
+ * During LT, Tx shall read AUX_RD_INTERVAL just before writing the new FFE
+ * presets.
+ */
+ delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
+
intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status);
+
+ /* Update signal levels and training set as requested. */
if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) {
lt_err(intel_dp, DP_PHY_DPRX, "Failed to update TX FFE settings\n");
return false;
@@ -1677,19 +1679,11 @@ void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp,
lt_dbg(intel_dp, DP_PHY_DPRX, "DP2.0 SDP CRC16 for 128b/132b enabled\n");
}
-static struct intel_dp *intel_connector_to_intel_dp(struct intel_connector *connector)
-{
- if (connector->mst_port)
- return connector->mst_port;
- else
- return enc_to_intel_dp(intel_attached_encoder(connector));
-}
-
static int i915_dp_force_link_rate_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = to_intel_connector(m->private);
struct intel_display *display = to_intel_display(connector);
- struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
int current_rate = -1;
int force_rate;
int err;
@@ -1760,7 +1754,7 @@ static ssize_t i915_dp_force_link_rate_write(struct file *file,
struct seq_file *m = file->private_data;
struct intel_connector *connector = to_intel_connector(m->private);
struct intel_display *display = to_intel_display(connector);
- struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
int rate;
int err;
@@ -1787,7 +1781,7 @@ static int i915_dp_force_lane_count_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = to_intel_connector(m->private);
struct intel_display *display = to_intel_display(connector);
- struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
int current_lane_count = -1;
int force_lane_count;
int err;
@@ -1862,7 +1856,7 @@ static ssize_t i915_dp_force_lane_count_write(struct file *file,
struct seq_file *m = file->private_data;
struct intel_connector *connector = to_intel_connector(m->private);
struct intel_display *display = to_intel_display(connector);
- struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
int lane_count;
int err;
@@ -1889,7 +1883,7 @@ static int i915_dp_max_link_rate_show(void *data, u64 *val)
{
struct intel_connector *connector = to_intel_connector(data);
struct intel_display *display = to_intel_display(connector);
- struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
int err;
err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
@@ -1908,7 +1902,7 @@ static int i915_dp_max_lane_count_show(void *data, u64 *val)
{
struct intel_connector *connector = to_intel_connector(data);
struct intel_display *display = to_intel_display(connector);
- struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
int err;
err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
@@ -1927,7 +1921,7 @@ static int i915_dp_force_link_training_failure_show(void *data, u64 *val)
{
struct intel_connector *connector = to_intel_connector(data);
struct intel_display *display = to_intel_display(connector);
- struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
int err;
err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
@@ -1945,7 +1939,7 @@ static int i915_dp_force_link_training_failure_write(void *data, u64 val)
{
struct intel_connector *connector = to_intel_connector(data);
struct intel_display *display = to_intel_display(connector);
- struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
int err;
if (val > 2)
@@ -1969,7 +1963,7 @@ static int i915_dp_force_link_retrain_show(void *data, u64 *val)
{
struct intel_connector *connector = to_intel_connector(data);
struct intel_display *display = to_intel_display(connector);
- struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
int err;
err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
@@ -1987,7 +1981,7 @@ static int i915_dp_force_link_retrain_write(void *data, u64 val)
{
struct intel_connector *connector = to_intel_connector(data);
struct intel_display *display = to_intel_display(connector);
- struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
int err;
err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
@@ -2010,7 +2004,7 @@ static int i915_dp_link_retrain_disabled_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = to_intel_connector(m->private);
struct intel_display *display = to_intel_display(connector);
- struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
int err;
err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 15541932b809..a65cf97ad12d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -41,9 +41,10 @@
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
+#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
+#include "intel_dp_test.h"
#include "intel_dp_tunnel.h"
-#include "intel_dp_link_training.h"
#include "intel_dpio_phy.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
@@ -52,14 +53,64 @@
#include "intel_vdsc.h"
#include "skl_scaler.h"
+/*
+ * DP MST (DisplayPort Multi-Stream Transport)
+ *
+ * MST support on the source depends on the platform and port. DP initialization
+ * sets up MST for each MST capable encoder. This will become the primary
+ * encoder for the port.
+ *
+ * MST initialization of each primary encoder creates MST stream encoders, one
+ * per pipe, and initializes the MST topology manager. The MST stream encoders
+ * are sometimes called "fake encoders", because they're virtual, not
+ * physical. Thus there are (number of MST capable ports) x (number of pipes)
+ * MST stream encoders in total.
+ *
+ * Decision to use MST for a sink happens at detect on the connector attached to
+ * the primary encoder, and this will not change while the sink is connected. We
+ * always use MST when possible, including for SST sinks with sideband messaging
+ * support.
+ *
+ * The connectors for the MST streams are added and removed dynamically by the
+ * topology manager. Their connection status is also determined by the topology
+ * manager.
+ *
+ * On hardware, each transcoder may be associated with a single DDI
+ * port. Multiple transcoders may be associated with the same DDI port only if
+ * the port is in MST mode.
+ *
+ * On TGL+, all the transcoders streaming on the same DDI port will indicate a
+ * primary transcoder; the TGL_DP_TP_CTL and TGL_DP_TP_STATUS registers are
+ * relevant only on the primary transcoder. Prior to that, they are port
+ * registers.
+ */
+
+/* From fake MST stream encoder to primary encoder */
+static struct intel_encoder *to_primary_encoder(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_digital_port *dig_port = intel_mst->primary;
+
+ return &dig_port->base;
+}
+
+/* From fake MST stream encoder to primary DP */
+static struct intel_dp *to_primary_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_digital_port *dig_port = intel_mst->primary;
+
+ return &dig_port->dp;
+}
+
static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
bool dsc)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(i915) >= 20 || !dsc)
+ if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(display) >= 20 || !dsc)
return INT_MAX;
/*
@@ -88,26 +139,19 @@ static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
}
static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
- const struct intel_connector *connector,
- bool ssc, bool dsc, int bpp_x16)
+ bool ssc, int dsc_slice_count, int bpp_x16)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
unsigned long flags = DRM_DP_BW_OVERHEAD_MST;
- int dsc_slice_count = 0;
int overhead;
flags |= intel_dp_is_uhbr(crtc_state) ? DRM_DP_BW_OVERHEAD_UHBR : 0;
flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0;
flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0;
- if (dsc) {
+ if (dsc_slice_count)
flags |= DRM_DP_BW_OVERHEAD_DSC;
- dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
- adjusted_mode->clock,
- adjusted_mode->hdisplay,
- crtc_state->joiner_pipes);
- }
overhead = drm_dp_bw_overhead(crtc_state->lane_count,
adjusted_mode->hdisplay,
@@ -123,7 +167,6 @@ static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
}
static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state,
- const struct intel_connector *connector,
int overhead,
int bpp_x16,
struct intel_link_m_n *m_n)
@@ -153,34 +196,35 @@ static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead)
return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000);
}
-static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- int max_bpp,
- int min_bpp,
- struct link_config_limits *limits,
- struct drm_connector_state *conn_state,
- int step,
- bool dsc)
+static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int num_joined_pipes = intel_crtc_num_joined_pipes(crtc_state);
+
+ return intel_dp_dsc_get_slice_count(connector,
+ adjusted_mode->clock,
+ adjusted_mode->hdisplay,
+ num_joined_pipes);
+}
+
+int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ int max_bpp, int min_bpp,
+ struct drm_connector_state *conn_state,
+ int step, bool dsc)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct drm_atomic_state *state = crtc_state->uapi.state;
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_dp *intel_dp = &intel_mst->primary->dp;
- struct drm_dp_mst_topology_state *mst_state;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ fixed20_12 pbn_div;
int bpp, slots = -EINVAL;
+ int dsc_slice_count = 0;
int max_dpt_bpp;
- int ret = 0;
-
- mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
- if (IS_ERR(mst_state))
- return PTR_ERR(mst_state);
-
- crtc_state->lane_count = limits->max_lane_count;
- crtc_state->port_clock = limits->max_rate;
if (dsc) {
if (!intel_dp_supports_fec(intel_dp, connector, crtc_state))
@@ -189,165 +233,197 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state);
}
- mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
- crtc_state->port_clock,
- crtc_state->lane_count);
+ pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock,
+ crtc_state->lane_count);
max_dpt_bpp = intel_dp_mst_max_dpt_bpp(crtc_state, dsc);
if (max_bpp > max_dpt_bpp) {
- drm_dbg_kms(&i915->drm, "Limiting bpp to max DPT bpp (%d -> %d)\n",
+ drm_dbg_kms(display->drm, "Limiting bpp to max DPT bpp (%d -> %d)\n",
max_bpp, max_dpt_bpp);
max_bpp = max_dpt_bpp;
}
- drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n",
+ drm_dbg_kms(display->drm, "Looking for slots in range min bpp %d max bpp %d\n",
min_bpp, max_bpp);
+ if (dsc) {
+ dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, crtc_state);
+ if (!dsc_slice_count) {
+ drm_dbg_kms(display->drm, "Can't get valid DSC slice count\n");
+
+ return -ENOSPC;
+ }
+ }
+
for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) {
int local_bw_overhead;
- int remote_bw_overhead;
int link_bpp_x16;
- int remote_tu;
- fixed20_12 pbn;
- drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp);
+ drm_dbg_kms(display->drm, "Trying bpp %d\n", bpp);
link_bpp_x16 = fxp_q4_from_int(dsc ? bpp :
intel_dp_output_bpp(crtc_state->output_format, bpp));
- local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
- false, dsc, link_bpp_x16);
- remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
- true, dsc, link_bpp_x16);
-
- intel_dp_mst_compute_m_n(crtc_state, connector,
+ local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state,
+ false, dsc_slice_count, link_bpp_x16);
+ intel_dp_mst_compute_m_n(crtc_state,
local_bw_overhead,
link_bpp_x16,
&crtc_state->dp_m_n);
- /*
- * The TU size programmed to the HW determines which slots in
- * an MTP frame are used for this stream, which needs to match
- * the payload size programmed to the first downstream branch
- * device's payload table.
- *
- * Note that atm the payload's PBN value DRM core sends via
- * the ALLOCATE_PAYLOAD side-band message matches the payload
- * size (which it calculates from the PBN value) it programs
- * to the first branch device's payload table. The allocation
- * in the payload table could be reduced though (to
- * crtc_state->dp_m_n.tu), provided that the driver doesn't
- * enable SSC on the corresponding link.
- */
- pbn.full = dfixed_const(intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock,
- link_bpp_x16,
- remote_bw_overhead));
- remote_tu = DIV_ROUND_UP(pbn.full, mst_state->pbn_div.full);
-
- /*
- * Aligning the TUs ensures that symbols consisting of multiple
- * (4) symbol cycles don't get split between two consecutive
- * MTPs, as required by Bspec.
- * TODO: remove the alignment restriction for 128b/132b links
- * on some platforms, where Bspec allows this.
- */
- remote_tu = ALIGN(remote_tu, 4 / crtc_state->lane_count);
-
- /*
- * Also align PBNs accordingly, since MST core will derive its
- * own copy of TU from the PBN in drm_dp_atomic_find_time_slots().
- * The above comment about the difference between the PBN
- * allocated for the whole path and the TUs allocated for the
- * first branch device's link also applies here.
- */
- pbn.full = remote_tu * mst_state->pbn_div.full;
- crtc_state->pbn = dfixed_trunc(pbn);
-
- drm_WARN_ON(&i915->drm, remote_tu < crtc_state->dp_m_n.tu);
- crtc_state->dp_m_n.tu = remote_tu;
+ if (intel_dp->is_mst) {
+ int remote_bw_overhead;
+ int remote_tu;
+ fixed20_12 pbn;
+
+ remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state,
+ true, dsc_slice_count, link_bpp_x16);
+
+ /*
+ * The TU size programmed to the HW determines which slots in
+ * an MTP frame are used for this stream, which needs to match
+ * the payload size programmed to the first downstream branch
+ * device's payload table.
+ *
+ * Note that atm the payload's PBN value DRM core sends via
+ * the ALLOCATE_PAYLOAD side-band message matches the payload
+ * size (which it calculates from the PBN value) it programs
+ * to the first branch device's payload table. The allocation
+ * in the payload table could be reduced though (to
+ * crtc_state->dp_m_n.tu), provided that the driver doesn't
+ * enable SSC on the corresponding link.
+ */
+ pbn.full = dfixed_const(intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock,
+ link_bpp_x16,
+ remote_bw_overhead));
+ remote_tu = DIV_ROUND_UP(pbn.full, pbn_div.full);
+
+ /*
+ * Aligning the TUs ensures that symbols consisting of multiple
+ * (4) symbol cycles don't get split between two consecutive
+ * MTPs, as required by Bspec.
+ * TODO: remove the alignment restriction for 128b/132b links
+ * on some platforms, where Bspec allows this.
+ */
+ remote_tu = ALIGN(remote_tu, 4 / crtc_state->lane_count);
+
+ /*
+ * Also align PBNs accordingly, since MST core will derive its
+ * own copy of TU from the PBN in drm_dp_atomic_find_time_slots().
+ * The above comment about the difference between the PBN
+ * allocated for the whole path and the TUs allocated for the
+ * first branch device's link also applies here.
+ */
+ pbn.full = remote_tu * pbn_div.full;
+
+ drm_WARN_ON(display->drm, remote_tu < crtc_state->dp_m_n.tu);
+ crtc_state->dp_m_n.tu = remote_tu;
+
+ slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
+ connector->port,
+ dfixed_trunc(pbn));
+ } else {
+ /* Same as above for remote_tu */
+ crtc_state->dp_m_n.tu = ALIGN(crtc_state->dp_m_n.tu,
+ 4 / crtc_state->lane_count);
+
+ if (crtc_state->dp_m_n.tu <= 64)
+ slots = crtc_state->dp_m_n.tu;
+ else
+ slots = -EINVAL;
+ }
- slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
- connector->port,
- crtc_state->pbn);
if (slots == -EDEADLK)
return slots;
if (slots >= 0) {
- drm_WARN_ON(&i915->drm, slots != crtc_state->dp_m_n.tu);
+ drm_WARN_ON(display->drm, slots != crtc_state->dp_m_n.tu);
break;
}
- }
- /* We failed to find a proper bpp/timeslots, return error */
- if (ret)
- slots = ret;
+ /* Allow using zero step to indicate one try */
+ if (!step)
+ break;
+ }
if (slots < 0) {
- drm_dbg_kms(&i915->drm, "failed finding vcpi slots:%d\n",
+ drm_dbg_kms(display->drm, "failed finding vcpi slots:%d\n",
slots);
- } else {
- if (!dsc)
- crtc_state->pipe_bpp = bpp;
- else
- crtc_state->dsc.compressed_bpp_x16 = fxp_q4_from_int(bpp);
- drm_dbg_kms(&i915->drm, "Got %d slots for pipe bpp %d dsc %d\n", slots, bpp, dsc);
+ return slots;
}
- return slots;
+ if (!dsc)
+ crtc_state->pipe_bpp = bpp;
+ else
+ crtc_state->dsc.compressed_bpp_x16 = fxp_q4_from_int(bpp);
+
+ drm_dbg_kms(display->drm, "Got %d slots for pipe bpp %d dsc %d\n",
+ slots, bpp, dsc);
+
+ return 0;
}
-static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
- struct link_config_limits *limits)
+static int mst_stream_find_vcpi_slots_for_bpp(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ int max_bpp, int min_bpp,
+ struct link_config_limits *limits,
+ struct drm_connector_state *conn_state,
+ int step, bool dsc)
{
- int slots = -EINVAL;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
+ struct drm_dp_mst_topology_state *mst_state;
+
+ mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ crtc_state->lane_count = limits->max_lane_count;
+ crtc_state->port_clock = limits->max_rate;
+
+ mst_state->pbn_div = drm_dp_get_vc_payload_bw(crtc_state->port_clock,
+ crtc_state->lane_count);
+
+ return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state,
+ max_bpp, min_bpp,
+ conn_state, step, dsc);
+}
+static int mst_stream_compute_link_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
+{
/*
* FIXME: allocate the BW according to link_bpp, which in the case of
* YUV420 is only half of the pipe bpp value.
*/
- slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state,
- fxp_q4_to_int(limits->link.max_bpp_x16),
- fxp_q4_to_int(limits->link.min_bpp_x16),
- limits,
- conn_state, 2 * 3, false);
-
- if (slots < 0)
- return slots;
-
- return 0;
+ return mst_stream_find_vcpi_slots_for_bpp(intel_dp, crtc_state,
+ fxp_q4_to_int(limits->link.max_bpp_x16),
+ fxp_q4_to_int(limits->link.min_bpp_x16),
+ limits,
+ conn_state, 2 * 3, false);
}
-static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
- struct link_config_limits *limits)
+static int mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
{
- struct intel_connector *connector =
- to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- int slots = -EINVAL;
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
int i, num_bpc;
u8 dsc_bpc[3] = {};
int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp;
- u8 dsc_max_bpc;
int min_compressed_bpp, max_compressed_bpp;
- /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
- if (DISPLAY_VER(i915) >= 12)
- dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
- else
- dsc_max_bpc = min_t(u8, 10, conn_state->max_requested_bpc);
-
- max_bpp = min_t(u8, dsc_max_bpc * 3, limits->pipe.max_bpp);
+ max_bpp = limits->pipe.max_bpp;
min_bpp = limits->pipe.min_bpp;
num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd,
dsc_bpc);
- drm_dbg_kms(&i915->drm, "DSC Source supported min bpp %d max bpp %d\n",
+ drm_dbg_kms(display->drm, "DSC Source supported min bpp %d max bpp %d\n",
min_bpp, max_bpp);
sink_max_bpp = dsc_bpc[0] * 3;
@@ -360,7 +436,7 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
sink_max_bpp = dsc_bpc[i] * 3;
}
- drm_dbg_kms(&i915->drm, "DSC Sink supported min bpp %d max bpp %d\n",
+ drm_dbg_kms(display->drm, "DSC Sink supported min bpp %d max bpp %d\n",
sink_min_bpp, sink_max_bpp);
if (min_bpp < sink_min_bpp)
@@ -371,41 +447,28 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
crtc_state->pipe_bpp = max_bpp;
- max_compressed_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
- crtc_state,
- max_bpp / 3);
- max_compressed_bpp = min(max_compressed_bpp,
- fxp_q4_to_int(limits->link.max_bpp_x16));
-
- min_compressed_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
- min_compressed_bpp = max(min_compressed_bpp,
- fxp_q4_to_int_roundup(limits->link.min_bpp_x16));
+ max_compressed_bpp = fxp_q4_to_int(limits->link.max_bpp_x16);
+ min_compressed_bpp = fxp_q4_to_int_roundup(limits->link.min_bpp_x16);
- drm_dbg_kms(&i915->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n",
+ drm_dbg_kms(display->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n",
min_compressed_bpp, max_compressed_bpp);
/* Align compressed bpps according to our own constraints */
- max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, max_compressed_bpp,
+ max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(display, max_compressed_bpp,
crtc_state->pipe_bpp);
- min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, min_compressed_bpp,
+ min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(display, min_compressed_bpp,
crtc_state->pipe_bpp);
- slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_compressed_bpp,
- min_compressed_bpp, limits,
- conn_state, 1, true);
-
- if (slots < 0)
- return slots;
-
- return 0;
+ return mst_stream_find_vcpi_slots_for_bpp(intel_dp, crtc_state, max_compressed_bpp,
+ min_compressed_bpp, limits,
+ conn_state, 1, true);
}
-static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+
+static int mst_stream_update_slots(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct intel_display *display = to_intel_display(intel_dp);
struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
struct drm_dp_mst_topology_state *topology_state;
u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ?
@@ -413,7 +476,7 @@ static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
topology_state = drm_atomic_get_mst_topology_state(conn_state->state, mgr);
if (IS_ERR(topology_state)) {
- drm_dbg_kms(&i915->drm, "slot update failed\n");
+ drm_dbg_kms(display->drm, "slot update failed\n");
return PTR_ERR(topology_state);
}
@@ -449,16 +512,20 @@ hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector,
if (mode_hblank_period_ns(adjusted_mode) > hblank_limit)
return false;
+ if (!intel_dp_mst_dsc_get_slice_count(connector, crtc_state))
+ return false;
+
return true;
}
static bool
-adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *connector,
+adjust_limits_for_dsc_hblank_expansion_quirk(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
const struct intel_crtc_state *crtc_state,
struct link_config_limits *limits,
bool dsc)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int min_bpp_x16 = limits->link.min_bpp_x16;
@@ -466,15 +533,15 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
return true;
if (!dsc) {
- if (intel_dp_supports_dsc(connector, crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ if (intel_dp_supports_dsc(intel_dp, connector, crtc_state)) {
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s][CONNECTOR:%d:%s] DSC needed by hblank expansion quirk\n",
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name);
return false;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to 24 due to hblank expansion quirk\n",
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name);
@@ -487,7 +554,7 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
return true;
}
- drm_WARN_ON(&i915->drm, limits->min_rate != limits->max_rate);
+ drm_WARN_ON(display->drm, limits->min_rate != limits->max_rate);
if (limits->max_rate < 540000)
min_bpp_x16 = fxp_q4_from_int(13);
@@ -497,7 +564,7 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
if (limits->link.min_bpp_x16 >= min_bpp_x16)
return true;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " FXP_Q4_FMT " in DSC mode due to hblank expansion quirk\n",
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name,
@@ -512,62 +579,38 @@ adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *conne
}
static bool
-intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp,
- const struct intel_connector *connector,
- struct intel_crtc_state *crtc_state,
- bool dsc,
- struct link_config_limits *limits)
-{
- /*
- * for MST we always configure max link bw - the spec doesn't
- * seem to suggest we should do otherwise.
- */
- limits->min_rate = limits->max_rate =
- intel_dp_max_link_rate(intel_dp);
-
- limits->min_lane_count = limits->max_lane_count =
- intel_dp_max_lane_count(intel_dp);
-
- limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format);
- /*
- * FIXME: If all the streams can't fit into the link with
- * their current pipe_bpp we should reduce pipe_bpp across
- * the board until things start to fit. Until then we
- * limit to <= 8bpc since that's what was hardcoded for all
- * MST streams previously. This hack should be removed once
- * we have the proper retry logic in place.
- */
- limits->pipe.max_bpp = min(crtc_state->pipe_bpp, 24);
-
- intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits);
-
- if (!intel_dp_compute_config_link_bpp_limits(intel_dp,
- crtc_state,
- dsc,
- limits))
+mst_stream_compute_config_limits(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
+ struct intel_crtc_state *crtc_state,
+ bool dsc,
+ struct link_config_limits *limits)
+{
+ if (!intel_dp_compute_config_limits(intel_dp, crtc_state, false, dsc,
+ limits))
return false;
- return adjust_limits_for_dsc_hblank_expansion_quirk(connector,
+ return adjust_limits_for_dsc_hblank_expansion_quirk(intel_dp,
+ connector,
crtc_state,
limits,
dsc);
}
-static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+static int mst_stream_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct link_config_limits limits;
bool dsc_needed, joiner_needs_dsc;
+ int num_joined_pipes;
int ret = 0;
if (pipe_config->fec_enable &&
@@ -577,27 +620,25 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- if (intel_dp_need_joiner(intel_dp, connector,
- adjusted_mode->crtc_hdisplay,
- adjusted_mode->crtc_clock))
- pipe_config->joiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
+ num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector,
+ adjusted_mode->crtc_hdisplay,
+ adjusted_mode->crtc_clock);
+ if (num_joined_pipes > 1)
+ pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1, crtc->pipe);
pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
- joiner_needs_dsc = intel_dp_joiner_needs_dsc(dev_priv, pipe_config->joiner_pipes);
+ joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
- !intel_dp_mst_compute_config_limits(intel_dp,
- connector,
- pipe_config,
- false,
- &limits);
+ !mst_stream_compute_config_limits(intel_dp, connector,
+ pipe_config, false, &limits);
if (!dsc_needed) {
- ret = intel_dp_mst_compute_link_config(encoder, pipe_config,
- conn_state, &limits);
+ ret = mst_stream_compute_link_config(intel_dp, pipe_config,
+ conn_state, &limits);
if (ret == -EDEADLK)
return ret;
@@ -606,35 +647,37 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
dsc_needed = true;
}
+ if (dsc_needed && !intel_dp_supports_dsc(intel_dp, connector, pipe_config)) {
+ drm_dbg_kms(display->drm, "DSC required but not available\n");
+ return -EINVAL;
+ }
+
/* enable compression if the mode doesn't fit available BW */
if (dsc_needed) {
- drm_dbg_kms(&dev_priv->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
+ drm_dbg_kms(display->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
- if (!intel_dp_supports_dsc(connector, pipe_config))
- return -EINVAL;
- if (!intel_dp_mst_compute_config_limits(intel_dp,
- connector,
- pipe_config,
- true,
- &limits))
+ if (!mst_stream_compute_config_limits(intel_dp, connector,
+ pipe_config, true,
+ &limits))
return -EINVAL;
/*
* FIXME: As bpc is hardcoded to 8, as mentioned above,
* WARN and ignore the debug flag force_dsc_bpc for now.
*/
- drm_WARN(&dev_priv->drm, intel_dp->force_dsc_bpc, "Cannot Force BPC for MST\n");
+ drm_WARN(display->drm, intel_dp->force_dsc_bpc,
+ "Cannot Force BPC for MST\n");
/*
* Try to get at least some timeslots and then see, if
* we can fit there with DSC.
*/
- drm_dbg_kms(&dev_priv->drm, "Trying to find VCPI slots in DSC mode\n");
+ drm_dbg_kms(display->drm, "Trying to find VCPI slots in DSC mode\n");
- ret = intel_dp_dsc_mst_compute_link_config(encoder, pipe_config,
- conn_state, &limits);
+ ret = mst_stream_dsc_compute_link_config(intel_dp, pipe_config,
+ conn_state, &limits);
if (ret < 0)
return ret;
@@ -646,14 +689,14 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- ret = intel_dp_mst_update_slots(encoder, pipe_config, conn_state);
+ ret = mst_stream_update_slots(intel_dp, pipe_config, conn_state);
if (ret)
return ret;
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ if (display->platform.geminilake || display->platform.broxton)
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
@@ -675,13 +718,13 @@ static unsigned int
intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
struct intel_dp *mst_port)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_digital_connector_state *conn_state;
struct intel_connector *connector;
u8 transcoders = 0;
int i;
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
return 0;
for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
@@ -735,7 +778,7 @@ static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
struct drm_dp_mst_topology_mgr *mst_mgr,
struct intel_link_bw_limits *limits)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
u8 mst_pipe_mask;
u8 fec_pipe_mask = 0;
@@ -743,12 +786,12 @@ static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL);
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mst_pipe_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, mst_pipe_mask) {
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
/* Atomic connector check should've added all the MST CRTCs. */
- if (drm_WARN_ON(&i915->drm, !crtc_state))
+ if (drm_WARN_ON(display->drm, !crtc_state))
return -EINVAL;
if (crtc_state->fec_enable)
@@ -827,13 +870,12 @@ int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
return 0;
}
-static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static int mst_stream_compute_config_late(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
/* lowest numbered transcoder will be designated master */
crtc_state->mst_master_transcoder =
@@ -856,10 +898,10 @@ static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
* recomputation of the corresponding CRTC states.
*/
static int
-intel_dp_mst_atomic_topology_check(struct intel_connector *connector,
- struct intel_atomic_state *state)
+mst_connector_atomic_topology_check(struct intel_connector *connector,
+ struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_connector_list_iter connector_list_iter;
struct intel_connector *connector_iter;
int ret = 0;
@@ -867,7 +909,7 @@ intel_dp_mst_atomic_topology_check(struct intel_connector *connector,
if (!intel_connector_needs_modeset(state, &connector->base))
return 0;
- drm_connector_list_iter_begin(&dev_priv->drm, &connector_list_iter);
+ drm_connector_list_iter_begin(display->drm, &connector_list_iter);
for_each_intel_connector_iter(connector_iter, &connector_list_iter) {
struct intel_digital_connector_state *conn_iter_state;
struct intel_crtc_state *crtc_state;
@@ -905,8 +947,8 @@ intel_dp_mst_atomic_topology_check(struct intel_connector *connector,
}
static int
-intel_dp_mst_atomic_check(struct drm_connector *connector,
- struct drm_atomic_state *_state)
+mst_connector_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *_state)
{
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct intel_connector *intel_connector =
@@ -917,7 +959,7 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
if (ret)
return ret;
- ret = intel_dp_mst_atomic_topology_check(intel_connector, state);
+ ret = mst_connector_atomic_topology_check(intel_connector, state);
if (ret)
return ret;
@@ -934,42 +976,18 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
intel_connector->port);
}
-static void clear_act_sent(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-
- intel_de_write(i915, dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_ACT_SENT);
-}
-
-static void wait_for_act_sent(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_dp *intel_dp = &intel_mst->primary->dp;
-
- if (intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_ACT_SENT, 1))
- drm_err(&i915->drm, "Timed out waiting for ACT sent\n");
-
- drm_dp_check_act_status(&intel_dp->mst_mgr);
-}
-
-static void intel_mst_disable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
+static void mst_stream_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- drm_dbg_kms(&i915->drm, "active links %d\n",
+ drm_dbg_kms(display->drm, "active links %d\n",
intel_dp->active_mst_links);
if (intel_dp->active_mst_links == 1)
@@ -980,14 +998,15 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
intel_dp_sink_disable_decompression(state, connector, old_crtc_state);
}
-static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
+static void mst_stream_post_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
struct drm_dp_mst_topology_state *old_mst_state =
@@ -998,18 +1017,16 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
drm_atomic_get_mst_payload_state(old_mst_state, connector->port);
struct drm_dp_mst_atomic_payload *new_payload =
drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_crtc *pipe_crtc;
bool last_mst_stream;
+ int i;
intel_dp->active_mst_links--;
last_mst_stream = intel_dp->active_mst_links == 0;
- drm_WARN_ON(&dev_priv->drm,
- DISPLAY_VER(dev_priv) >= 12 && last_mst_stream &&
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && last_mst_stream &&
!intel_dp_mst_is_master_trans(old_crtc_state));
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
- intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
const struct intel_crtc_state *old_pipe_crtc_state =
intel_atomic_get_old_crtc_state(state, pipe_crtc);
@@ -1020,27 +1037,27 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload);
- clear_act_sent(encoder, old_crtc_state);
+ intel_ddi_clear_act_sent(encoder, old_crtc_state);
- intel_de_rmw(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, old_crtc_state->cpu_transcoder),
+ intel_de_rmw(display,
+ TRANS_DDI_FUNC_CTL(display, old_crtc_state->cpu_transcoder),
TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0);
- wait_for_act_sent(encoder, old_crtc_state);
+ intel_ddi_wait_for_act_sent(encoder, old_crtc_state);
+ drm_dp_check_act_status(&intel_dp->mst_mgr);
drm_dp_remove_payload_part2(&intel_dp->mst_mgr, new_mst_state,
old_payload, new_payload);
intel_ddi_disable_transcoder_func(old_crtc_state);
- for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, pipe_crtc,
- intel_crtc_joined_pipe_mask(old_crtc_state)) {
+ for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) {
const struct intel_crtc_state *old_pipe_crtc_state =
intel_atomic_get_old_crtc_state(state, pipe_crtc);
intel_dsc_disable(old_pipe_crtc_state);
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
skl_scaler_disable(old_pipe_crtc_state);
else
ilk_pfit_disable(old_pipe_crtc_state);
@@ -1057,8 +1074,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
* BSpec 4287: disable DIP after the transcoder is disabled and before
* the transcoder clock select is set to none.
*/
- intel_dp_set_infoframes(&dig_port->base, false,
- old_crtc_state, NULL);
+ intel_dp_set_infoframes(primary_encoder, false, old_crtc_state, NULL);
/*
* From TGL spec: "If multi-stream slave transcoder: Configure
* Transcoder Clock Select to direct no clock to the transcoder"
@@ -1066,51 +1082,49 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
* From older GENs spec: "Configure Transcoder Clock Select to direct
* no clock to the transcoder"
*/
- if (DISPLAY_VER(dev_priv) < 12 || !last_mst_stream)
+ if (DISPLAY_VER(display) < 12 || !last_mst_stream)
intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_mst->connector = NULL;
if (last_mst_stream)
- dig_port->base.post_disable(state, &dig_port->base,
- old_crtc_state, NULL);
+ primary_encoder->post_disable(state, primary_encoder,
+ old_crtc_state, NULL);
- drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ drm_dbg_kms(display->drm, "active links %d\n",
intel_dp->active_mst_links);
}
-static void intel_mst_post_pll_disable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
+static void mst_stream_post_pll_disable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
if (intel_dp->active_mst_links == 0 &&
- dig_port->base.post_pll_disable)
- dig_port->base.post_pll_disable(state, encoder, old_crtc_state, old_conn_state);
+ primary_encoder->post_pll_disable)
+ primary_encoder->post_pll_disable(state, primary_encoder, old_crtc_state, old_conn_state);
}
-static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static void mst_stream_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
if (intel_dp->active_mst_links == 0)
- dig_port->base.pre_pll_enable(state, &dig_port->base,
- pipe_config, NULL);
+ primary_encoder->pre_pll_enable(state, primary_encoder,
+ pipe_config, NULL);
else
/*
* The port PLL state needs to get updated for secondary
* streams as for the primary stream.
*/
- intel_ddi_update_active_dpll(state, &dig_port->base,
+ intel_ddi_update_active_dpll(state, primary_encoder,
to_intel_crtc(pipe_config->uapi.crtc));
}
@@ -1141,15 +1155,15 @@ static void intel_mst_reprobe_topology(struct intel_dp *intel_dp,
crtc_state->port_clock, crtc_state->lane_count);
}
-static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static void mst_stream_pre_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(state);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct drm_dp_mst_topology_state *mst_state =
@@ -1163,11 +1177,10 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
connector->encoder = encoder;
intel_mst->connector = connector;
first_mst_stream = intel_dp->active_mst_links == 0;
- drm_WARN_ON(&dev_priv->drm,
- DISPLAY_VER(dev_priv) >= 12 && first_mst_stream &&
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 12 && first_mst_stream &&
!intel_dp_mst_is_master_trans(pipe_config));
- drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ drm_dbg_kms(display->drm, "active links %d\n",
intel_dp->active_mst_links);
if (first_mst_stream)
@@ -1178,8 +1191,8 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
intel_dp_sink_enable_decompression(state, connector, pipe_config);
if (first_mst_stream) {
- dig_port->base.pre_enable(state, &dig_port->base,
- pipe_config, NULL);
+ primary_encoder->pre_enable(state, primary_encoder,
+ pipe_config, NULL);
intel_mst_reprobe_topology(intel_dp, pipe_config);
}
@@ -1189,24 +1202,28 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state,
drm_atomic_get_mst_payload_state(mst_state, connector->port));
if (ret < 0)
- intel_dp_queue_modeset_retry_for_link(state, &dig_port->base, pipe_config);
+ intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config);
/*
* Before Gen 12 this is not done as part of
- * dig_port->base.pre_enable() and should be done here. For
+ * primary_encoder->pre_enable() and should be done here. For
* Gen 12+ the step in which this should be done is different for the
* first MST stream, so it's done on the DDI for the first stream and
* here for the following ones.
*/
- if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream)
+ if (DISPLAY_VER(display) < 12 || !first_mst_stream)
intel_ddi_enable_transcoder_clock(encoder, pipe_config);
- intel_dsc_dp_pps_write(&dig_port->base, pipe_config);
+ if (DISPLAY_VER(display) >= 13 && !first_mst_stream)
+ intel_ddi_config_transcoder_func(encoder, pipe_config);
+
+ intel_dsc_dp_pps_write(primary_encoder, pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
}
static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
u32 clear = 0;
u32 set = 0;
@@ -1214,7 +1231,7 @@ static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
if (!IS_ALDERLAKE_P(i915))
return;
- if (!IS_DISPLAY_STEP(i915, STEP_D0, STEP_FOREVER))
+ if (!IS_DISPLAY_STEP(display, STEP_D0, STEP_FOREVER))
return;
/* Wa_14013163432:adlp */
@@ -1222,7 +1239,7 @@ static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
set |= DP_MST_FEC_BS_JITTER_WA(crtc_state->cpu_transcoder);
/* Wa_14014143976:adlp */
- if (IS_DISPLAY_STEP(i915, STEP_E0, STEP_FOREVER)) {
+ if (IS_DISPLAY_STEP(display, STEP_E0, STEP_FOREVER)) {
if (intel_dp_is_uhbr(crtc_state))
set |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
else if (crtc_state->fec_enable)
@@ -1235,36 +1252,35 @@ static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
if (!clear && !set)
return;
- intel_de_rmw(i915, CHICKEN_MISC_3, clear, set);
+ intel_de_rmw(display, CHICKEN_MISC_3, clear, set);
}
-static void intel_mst_enable_dp(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static void mst_stream_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_dp_mst_topology_state *mst_state =
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
bool first_mst_stream = intel_dp->active_mst_links == 1;
struct intel_crtc *pipe_crtc;
- int ret;
+ int ret, i;
- drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
+ drm_WARN_ON(display->drm, pipe_config->has_pch_encoder);
if (intel_dp_is_uhbr(pipe_config)) {
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock);
- intel_de_write(dev_priv, TRANS_DP2_VFREQHIGH(pipe_config->cpu_transcoder),
+ intel_de_write(display, TRANS_DP2_VFREQHIGH(pipe_config->cpu_transcoder),
TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24));
- intel_de_write(dev_priv, TRANS_DP2_VFREQLOW(pipe_config->cpu_transcoder),
+ intel_de_write(display, TRANS_DP2_VFREQLOW(pipe_config->cpu_transcoder),
TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
}
@@ -1272,15 +1288,16 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_transcoder_func(encoder, pipe_config);
- clear_act_sent(encoder, pipe_config);
+ intel_ddi_clear_act_sent(encoder, pipe_config);
- intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, trans), 0,
+ intel_de_rmw(display, TRANS_DDI_FUNC_CTL(display, trans), 0,
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
- drm_dbg_kms(&dev_priv->drm, "active links %d\n",
+ drm_dbg_kms(display->drm, "active links %d\n",
intel_dp->active_mst_links);
- wait_for_act_sent(encoder, pipe_config);
+ intel_ddi_wait_for_act_sent(encoder, pipe_config);
+ drm_dp_check_act_status(&intel_dp->mst_mgr);
if (first_mst_stream)
intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
@@ -1289,10 +1306,10 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
drm_atomic_get_mst_payload_state(mst_state,
connector->port));
if (ret < 0)
- intel_dp_queue_modeset_retry_for_link(state, &dig_port->base, pipe_config);
+ intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config);
- if (DISPLAY_VER(dev_priv) >= 12)
- intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, trans),
+ if (DISPLAY_VER(display) >= 12)
+ intel_de_rmw(display, CHICKEN_TRANS(display, trans),
FECSTALL_DIS_DPTSTREAM_DPTTG,
pipe_config->fec_enable ? FECSTALL_DIS_DPTSTREAM_DPTTG : 0);
@@ -1300,8 +1317,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_enable_transcoder(pipe_config);
- for_each_intel_crtc_in_pipe_mask_reverse(&dev_priv->drm, pipe_crtc,
- intel_crtc_joined_pipe_mask(pipe_config)) {
+ for_each_pipe_crtc_modeset_enable(display, pipe_crtc, pipe_config, i) {
const struct intel_crtc_state *pipe_crtc_state =
intel_atomic_get_new_crtc_state(state, pipe_crtc);
@@ -1311,8 +1327,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_hdcp_enable(state, encoder, pipe_config, conn_state);
}
-static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
- enum pipe *pipe)
+static bool mst_stream_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
*pipe = intel_mst->pipe;
@@ -1321,28 +1337,26 @@ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
return false;
}
-static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+static void mst_stream_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
- dig_port->base.get_config(&dig_port->base, pipe_config);
+ primary_encoder->get_config(primary_encoder, pipe_config);
}
-static bool intel_dp_mst_initial_fastset_check(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state)
+static bool mst_stream_initial_fastset_check(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
- struct intel_digital_port *dig_port = intel_mst->primary;
+ struct intel_encoder *primary_encoder = to_primary_encoder(encoder);
- return intel_dp_initial_fastset_check(&dig_port->base, crtc_state);
+ return intel_dp_initial_fastset_check(primary_encoder, crtc_state);
}
-static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
+static int mst_connector_get_ddc_modes(struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_i915_private *i915 = to_i915(intel_connector->base.dev);
struct intel_dp *intel_dp = intel_connector->mst_port;
const struct drm_edid *drm_edid;
int ret;
@@ -1350,7 +1364,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
if (drm_connector_is_unregistered(connector))
return intel_connector_update_modes(connector, NULL);
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return drm_edid_connector_add_modes(connector);
drm_edid = drm_dp_mst_edid_read(connector, &intel_dp->mst_mgr, intel_connector->port);
@@ -1363,7 +1377,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
}
static int
-intel_dp_mst_connector_late_register(struct drm_connector *connector)
+mst_connector_late_register(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
int ret;
@@ -1382,7 +1396,7 @@ intel_dp_mst_connector_late_register(struct drm_connector *connector)
}
static void
-intel_dp_mst_connector_early_unregister(struct drm_connector *connector)
+mst_connector_early_unregister(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -1391,41 +1405,43 @@ intel_dp_mst_connector_early_unregister(struct drm_connector *connector)
intel_connector->port);
}
-static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
+static const struct drm_connector_funcs mst_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
- .late_register = intel_dp_mst_connector_late_register,
- .early_unregister = intel_dp_mst_connector_early_unregister,
+ .late_register = mst_connector_late_register,
+ .early_unregister = mst_connector_early_unregister,
.destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
-static int intel_dp_mst_get_modes(struct drm_connector *connector)
+static int mst_connector_get_modes(struct drm_connector *connector)
{
- return intel_dp_mst_get_ddc_modes(connector);
+ return mst_connector_get_ddc_modes(connector);
}
static int
-intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
- struct drm_display_mode *mode,
- struct drm_modeset_acquire_ctx *ctx,
- enum drm_mode_status *status)
+mst_connector_mode_valid_ctx(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx,
+ enum drm_mode_status *status)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
struct drm_dp_mst_port *port = intel_connector->port;
const int min_bpp = 18;
- int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
+ int max_dotclk = display->cdclk.max_dotclk_freq;
int max_rate, mode_rate, max_lanes, max_link_clock;
int ret;
- bool dsc = false, joiner = false;
+ bool dsc = false;
u16 dsc_max_compressed_bpp = 0;
u8 dsc_slice_count = 0;
int target_clock = mode->clock;
+ int num_joined_pipes;
if (drm_connector_is_unregistered(connector)) {
*status = MODE_ERROR;
@@ -1465,11 +1481,9 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
* corresponding link capabilities of the sink) in case the
* stream is uncompressed for it by the last branch device.
*/
- if (intel_dp_need_joiner(intel_dp, intel_connector,
- mode->hdisplay, target_clock)) {
- joiner = true;
- max_dotclk *= 2;
- }
+ num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, intel_connector,
+ mode->hdisplay, target_clock);
+ max_dotclk *= num_joined_pipes;
ret = drm_modeset_lock(&mgr->base.lock, ctx);
if (ret)
@@ -1490,25 +1504,25 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
if (drm_dp_sink_supports_fec(intel_connector->dp.fec_capability)) {
dsc_max_compressed_bpp =
- intel_dp_dsc_get_max_compressed_bpp(dev_priv,
+ intel_dp_dsc_get_max_compressed_bpp(display,
max_link_clock,
max_lanes,
target_clock,
mode->hdisplay,
- joiner,
+ num_joined_pipes,
INTEL_OUTPUT_FORMAT_RGB,
pipe_bpp, 64);
dsc_slice_count =
intel_dp_dsc_get_slice_count(intel_connector,
target_clock,
mode->hdisplay,
- joiner);
+ num_joined_pipes);
}
dsc = dsc_max_compressed_bpp && dsc_slice_count;
}
- if (intel_dp_joiner_needs_dsc(dev_priv, joiner) && !dsc) {
+ if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc) {
*status = MODE_CLOCK_HIGH;
return 0;
}
@@ -1518,12 +1532,13 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
- *status = intel_mode_valid_max_plane_size(dev_priv, mode, joiner);
+ *status = intel_mode_valid_max_plane_size(dev_priv, mode, num_joined_pipes);
return 0;
}
-static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
- struct drm_atomic_state *state)
+static struct drm_encoder *
+mst_connector_atomic_best_encoder(struct drm_connector *connector,
+ struct drm_atomic_state *state)
{
struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
connector);
@@ -1535,35 +1550,37 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
}
static int
-intel_dp_mst_detect(struct drm_connector *connector,
- struct drm_modeset_acquire_ctx *ctx, bool force)
+mst_connector_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
- if (!intel_display_device_enabled(i915))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
if (drm_connector_is_unregistered(connector))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return connector->status;
+ intel_dp_flush_connector_commits(intel_connector);
+
return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr,
intel_connector->port);
}
-static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
- .get_modes = intel_dp_mst_get_modes,
- .mode_valid_ctx = intel_dp_mst_mode_valid_ctx,
- .atomic_best_encoder = intel_mst_atomic_best_encoder,
- .atomic_check = intel_dp_mst_atomic_check,
- .detect_ctx = intel_dp_mst_detect,
+static const struct drm_connector_helper_funcs mst_connector_helper_funcs = {
+ .get_modes = mst_connector_get_modes,
+ .mode_valid_ctx = mst_connector_mode_valid_ctx,
+ .atomic_best_encoder = mst_connector_atomic_best_encoder,
+ .atomic_check = mst_connector_atomic_check,
+ .detect_ctx = mst_connector_detect_ctx,
};
-static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+static void mst_stream_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder));
@@ -1571,31 +1588,32 @@ static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_mst);
}
-static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
- .destroy = intel_dp_mst_encoder_destroy,
+static const struct drm_encoder_funcs mst_stream_encoder_funcs = {
+ .destroy = mst_stream_encoder_destroy,
};
-static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
+static bool mst_connector_get_hw_state(struct intel_connector *connector)
{
- if (intel_attached_encoder(connector) && connector->base.state->crtc) {
- enum pipe pipe;
- if (!intel_attached_encoder(connector)->get_hw_state(intel_attached_encoder(connector), &pipe))
- return false;
- return true;
- }
- return false;
+ /* This is the MST stream encoder set in ->pre_enable, if any */
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ enum pipe pipe;
+
+ if (!encoder || !connector->base.state->crtc)
+ return false;
+
+ return encoder->get_hw_state(encoder, &pipe);
}
-static int intel_dp_mst_add_properties(struct intel_dp *intel_dp,
- struct drm_connector *connector,
- const char *pathprop)
+static int mst_topology_add_connector_properties(struct intel_dp *intel_dp,
+ struct drm_connector *connector,
+ const char *pathprop)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(intel_dp);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.path_property, 0);
+ display->drm->mode_config.path_property, 0);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.tile_property, 0);
+ display->drm->mode_config.tile_property, 0);
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
@@ -1629,7 +1647,7 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_dp_aux *aux = connector->dp.dsc_decompression_aux;
struct drm_dp_desc desc;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
@@ -1667,21 +1685,21 @@ static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *conn
!(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE))
return false;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] DSC HBLANK expansion quirk detected\n",
connector->base.base.id, connector->base.name);
return true;
}
-static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port,
- const char *pathprop)
+static struct drm_connector *
+mst_topology_add_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ const char *pathprop)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_connector *intel_connector;
struct drm_connector *connector;
enum pipe pipe;
@@ -1691,7 +1709,9 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
if (!intel_connector)
return NULL;
- intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+ connector = &intel_connector->base;
+
+ intel_connector->get_hw_state = mst_connector_get_hw_state;
intel_connector->sync_state = intel_dp_connector_sync_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
@@ -1699,23 +1719,22 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_dp_init_modeset_retry_work(intel_connector);
- intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
- intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
- intel_connector->dp.dsc_hblank_expansion_quirk =
- detect_dsc_hblank_expansion_quirk(intel_connector);
-
- connector = &intel_connector->base;
- ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
+ ret = drm_connector_dynamic_init(display->drm, connector, &mst_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort, NULL);
if (ret) {
drm_dp_mst_put_port_malloc(port);
intel_connector_free(intel_connector);
return NULL;
}
- drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
+ intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
+ intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
+ intel_connector->dp.dsc_hblank_expansion_quirk =
+ detect_dsc_hblank_expansion_quirk(intel_connector);
+
+ drm_connector_helper_add(connector, &mst_connector_helper_funcs);
- for_each_pipe(dev_priv, pipe) {
+ for_each_pipe(display, pipe) {
struct drm_encoder *enc =
&intel_dp->mst_encoders[pipe]->base.base;
@@ -1724,13 +1743,13 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
goto err;
}
- ret = intel_dp_mst_add_properties(intel_dp, connector, pathprop);
+ ret = mst_topology_add_connector_properties(intel_dp, connector, pathprop);
if (ret)
goto err;
ret = intel_dp_hdcp_init(dig_port, intel_connector);
if (ret)
- drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
+ drm_dbg_kms(display->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
connector->name, connector->base.id);
return connector;
@@ -1741,24 +1760,26 @@ err:
}
static void
-intel_dp_mst_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr)
+mst_topology_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
}
-static const struct drm_dp_mst_topology_cbs mst_cbs = {
- .add_connector = intel_dp_add_mst_connector,
- .poll_hpd_irq = intel_dp_mst_poll_hpd_irq,
+static const struct drm_dp_mst_topology_cbs mst_topology_cbs = {
+ .add_connector = mst_topology_add_connector,
+ .poll_hpd_irq = mst_topology_poll_hpd_irq,
};
+/* Create a fake encoder for an individual MST stream */
static struct intel_dp_mst_encoder *
-intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe pipe)
+mst_stream_encoder_create(struct intel_digital_port *dig_port, enum pipe pipe)
{
+ struct intel_display *display = to_intel_display(dig_port);
+ struct intel_encoder *primary_encoder = &dig_port->base;
struct intel_dp_mst_encoder *intel_mst;
- struct intel_encoder *intel_encoder;
- struct drm_device *dev = dig_port->base.base.dev;
+ struct intel_encoder *encoder;
intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
@@ -1766,16 +1787,16 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe
return NULL;
intel_mst->pipe = pipe;
- intel_encoder = &intel_mst->base;
+ encoder = &intel_mst->base;
intel_mst->primary = dig_port;
- drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
+ drm_encoder_init(display->drm, &encoder->base, &mst_stream_encoder_funcs,
DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
- intel_encoder->type = INTEL_OUTPUT_DP_MST;
- intel_encoder->power_domain = dig_port->base.power_domain;
- intel_encoder->port = dig_port->base.port;
- intel_encoder->cloneable = 0;
+ encoder->type = INTEL_OUTPUT_DP_MST;
+ encoder->power_domain = primary_encoder->power_domain;
+ encoder->port = primary_encoder->port;
+ encoder->cloneable = 0;
/*
* This is wrong, but broken userspace uses the intersection
* of possible_crtcs of all the encoders of a given connector
@@ -1784,36 +1805,37 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe
* To keep such userspace functioning we must misconfigure
* this to make sure the intersection is not empty :(
*/
- intel_encoder->pipe_mask = ~0;
-
- intel_encoder->compute_config = intel_dp_mst_compute_config;
- intel_encoder->compute_config_late = intel_dp_mst_compute_config_late;
- intel_encoder->disable = intel_mst_disable_dp;
- intel_encoder->post_disable = intel_mst_post_disable_dp;
- intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
- intel_encoder->update_pipe = intel_ddi_update_pipe;
- intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
- intel_encoder->pre_enable = intel_mst_pre_enable_dp;
- intel_encoder->enable = intel_mst_enable_dp;
- intel_encoder->audio_enable = intel_audio_codec_enable;
- intel_encoder->audio_disable = intel_audio_codec_disable;
- intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
- intel_encoder->get_config = intel_dp_mst_enc_get_config;
- intel_encoder->initial_fastset_check = intel_dp_mst_initial_fastset_check;
+ encoder->pipe_mask = ~0;
+
+ encoder->compute_config = mst_stream_compute_config;
+ encoder->compute_config_late = mst_stream_compute_config_late;
+ encoder->disable = mst_stream_disable;
+ encoder->post_disable = mst_stream_post_disable;
+ encoder->post_pll_disable = mst_stream_post_pll_disable;
+ encoder->update_pipe = intel_ddi_update_pipe;
+ encoder->pre_pll_enable = mst_stream_pre_pll_enable;
+ encoder->pre_enable = mst_stream_pre_enable;
+ encoder->enable = mst_stream_enable;
+ encoder->audio_enable = intel_audio_codec_enable;
+ encoder->audio_disable = intel_audio_codec_disable;
+ encoder->get_hw_state = mst_stream_get_hw_state;
+ encoder->get_config = mst_stream_get_config;
+ encoder->initial_fastset_check = mst_stream_initial_fastset_check;
return intel_mst;
}
+/* Create the fake encoders for MST streams */
static bool
-intel_dp_create_fake_mst_encoders(struct intel_digital_port *dig_port)
+mst_stream_encoders_create(struct intel_digital_port *dig_port)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_dp *intel_dp = &dig_port->dp;
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum pipe pipe;
- for_each_pipe(dev_priv, pipe)
- intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(dig_port, pipe);
+ for_each_pipe(display, pipe)
+ intel_dp->mst_encoders[pipe] = mst_stream_encoder_create(dig_port, pipe);
return true;
}
@@ -1826,25 +1848,25 @@ intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port)
int
intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_dp *intel_dp = &dig_port->dp;
enum port port = dig_port->base.port;
int ret;
- if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
+ if (!HAS_DP_MST(display) || intel_dp_is_edp(intel_dp))
return 0;
- if (DISPLAY_VER(i915) < 12 && port == PORT_A)
+ if (DISPLAY_VER(display) < 12 && port == PORT_A)
return 0;
- if (DISPLAY_VER(i915) < 11 && port == PORT_E)
+ if (DISPLAY_VER(display) < 11 && port == PORT_E)
return 0;
- intel_dp->mst_mgr.cbs = &mst_cbs;
+ intel_dp->mst_mgr.cbs = &mst_topology_cbs;
/* create encoders */
- intel_dp_create_fake_mst_encoders(dig_port);
- ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
+ mst_stream_encoders_create(dig_port);
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, display->drm,
&intel_dp->aux, 16, 3, conn_base_id);
if (ret) {
intel_dp->mst_mgr.cbs = NULL;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index 8343804ce3f8..c6bdc1d190a4 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
+struct drm_connector_state;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -30,4 +31,10 @@ bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
void intel_dp_mst_prepare_probe(struct intel_dp *intel_dp);
bool intel_dp_mst_verify_dpcd_state(struct intel_dp *intel_dp);
+int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ int max_bpp, int min_bpp,
+ struct drm_connector_state *conn_state,
+ int step, bool dsc);
+
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_test.c b/drivers/gpu/drm/i915/display/intel_dp_test.c
new file mode 100644
index 000000000000..380b359b0420
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_test.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2024 Intel Corporation */
+
+#include <linux/debugfs.h>
+
+#include <drm/display/drm_dp.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
+
+#include "i915_reg.h"
+#include "intel_ddi.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_dp_link_training.h"
+#include "intel_dp_mst.h"
+#include "intel_dp_test.h"
+
+void intel_dp_test_reset(struct intel_dp *intel_dp)
+{
+ /*
+ * Clearing compliance test variables to allow capturing
+ * of values for next automated test request.
+ */
+ memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
+}
+
+/* Adjust link config limits based on compliance test requests. */
+void intel_dp_test_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct link_config_limits *limits)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ /* For DP Compliance we override the computed bpp for the pipe */
+ if (intel_dp->compliance.test_data.bpc != 0) {
+ int bpp = 3 * intel_dp->compliance.test_data.bpc;
+
+ limits->pipe.min_bpp = bpp;
+ limits->pipe.max_bpp = bpp;
+ pipe_config->dither_force_disable = bpp == 6 * 3;
+
+ drm_dbg_kms(display->drm, "Setting pipe_bpp to %d\n", bpp);
+ }
+
+ /* Use values requested by Compliance Test Request */
+ if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
+ int index;
+
+ /* Validate the compliance test data since max values
+ * might have changed due to link train fallback.
+ */
+ if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
+ intel_dp->compliance.test_lane_count)) {
+ index = intel_dp_rate_index(intel_dp->common_rates,
+ intel_dp->num_common_rates,
+ intel_dp->compliance.test_link_rate);
+ if (index >= 0) {
+ limits->min_rate = intel_dp->compliance.test_link_rate;
+ limits->max_rate = intel_dp->compliance.test_link_rate;
+ }
+ limits->min_lane_count = intel_dp->compliance.test_lane_count;
+ limits->max_lane_count = intel_dp->compliance.test_lane_count;
+ }
+ }
+}
+
+/* Compliance test status bits */
+#define INTEL_DP_RESOLUTION_PREFERRED 1
+#define INTEL_DP_RESOLUTION_STANDARD 2
+#define INTEL_DP_RESOLUTION_FAILSAFE 3
+
+static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ int status = 0;
+ int test_link_rate;
+ u8 test_lane_count, test_link_bw;
+ /* (DP CTS 1.2)
+ * 4.3.1.11
+ */
+ /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
+ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
+ &test_lane_count);
+
+ if (status <= 0) {
+ drm_dbg_kms(display->drm, "Lane count read failed\n");
+ return DP_TEST_NAK;
+ }
+ test_lane_count &= DP_MAX_LANE_COUNT_MASK;
+
+ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
+ &test_link_bw);
+ if (status <= 0) {
+ drm_dbg_kms(display->drm, "Link Rate read failed\n");
+ return DP_TEST_NAK;
+ }
+ test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
+
+ /* Validate the requested link rate and lane count */
+ if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
+ test_lane_count))
+ return DP_TEST_NAK;
+
+ intel_dp->compliance.test_lane_count = test_lane_count;
+ intel_dp->compliance.test_link_rate = test_link_rate;
+
+ return DP_TEST_ACK;
+}
+
+static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ u8 test_pattern;
+ u8 test_misc;
+ __be16 h_width, v_height;
+ int status = 0;
+
+ /* Read the TEST_PATTERN (DP CTS 3.1.5) */
+ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
+ &test_pattern);
+ if (status <= 0) {
+ drm_dbg_kms(display->drm, "Test pattern read failed\n");
+ return DP_TEST_NAK;
+ }
+ if (test_pattern != DP_COLOR_RAMP)
+ return DP_TEST_NAK;
+
+ status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
+ &h_width, 2);
+ if (status <= 0) {
+ drm_dbg_kms(display->drm, "H Width read failed\n");
+ return DP_TEST_NAK;
+ }
+
+ status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
+ &v_height, 2);
+ if (status <= 0) {
+ drm_dbg_kms(display->drm, "V Height read failed\n");
+ return DP_TEST_NAK;
+ }
+
+ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
+ &test_misc);
+ if (status <= 0) {
+ drm_dbg_kms(display->drm, "TEST MISC read failed\n");
+ return DP_TEST_NAK;
+ }
+ if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
+ return DP_TEST_NAK;
+ if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
+ return DP_TEST_NAK;
+ switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
+ case DP_TEST_BIT_DEPTH_6:
+ intel_dp->compliance.test_data.bpc = 6;
+ break;
+ case DP_TEST_BIT_DEPTH_8:
+ intel_dp->compliance.test_data.bpc = 8;
+ break;
+ default:
+ return DP_TEST_NAK;
+ }
+
+ intel_dp->compliance.test_data.video_pattern = test_pattern;
+ intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
+ intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
+ /* Set test active flag here so userspace doesn't interrupt things */
+ intel_dp->compliance.test_active = true;
+
+ return DP_TEST_ACK;
+}
+
+static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ u8 test_result = DP_TEST_ACK;
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+
+ if (!intel_connector->detect_edid || connector->edid_corrupt ||
+ intel_dp->aux.i2c_defer_count > 6) {
+ /* Check EDID read for NACKs, DEFERs and corruption
+ * (DP CTS 1.2 Core r1.1)
+ * 4.2.2.4 : Failed EDID read, I2C_NAK
+ * 4.2.2.5 : Failed EDID read, I2C_DEFER
+ * 4.2.2.6 : EDID corruption detected
+ * Use failsafe mode for all cases
+ */
+ if (intel_dp->aux.i2c_nack_count > 0 ||
+ intel_dp->aux.i2c_defer_count > 0)
+ drm_dbg_kms(display->drm,
+ "EDID read had %d NACKs, %d DEFERs\n",
+ intel_dp->aux.i2c_nack_count,
+ intel_dp->aux.i2c_defer_count);
+ intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
+ } else {
+ /* FIXME: Get rid of drm_edid_raw() */
+ const struct edid *block = drm_edid_raw(intel_connector->detect_edid);
+
+ /* We have to write the checksum of the last block read */
+ block += block->extensions;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
+ block->checksum) <= 0)
+ drm_dbg_kms(display->drm,
+ "Failed to write EDID checksum\n");
+
+ test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
+ intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
+ }
+
+ /* Set test active flag here so userspace doesn't interrupt things */
+ intel_dp->compliance.test_active = true;
+
+ return test_result;
+}
+
+static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_dp_phy_test_params *data =
+ &intel_dp->compliance.test_data.phytest;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum pipe pipe = crtc->pipe;
+ u32 pattern_val;
+
+ switch (data->phy_pattern) {
+ case DP_LINK_QUAL_PATTERN_DISABLE:
+ drm_dbg_kms(display->drm, "Disable Phy Test Pattern\n");
+ intel_de_write(display, DDI_DP_COMP_CTL(pipe), 0x0);
+ if (DISPLAY_VER(display) >= 10)
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK,
+ DP_TP_CTL_LINK_TRAIN_NORMAL);
+ break;
+ case DP_LINK_QUAL_PATTERN_D10_2:
+ drm_dbg_kms(display->drm, "Set D10.2 Phy Test Pattern\n");
+ intel_de_write(display, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
+ break;
+ case DP_LINK_QUAL_PATTERN_ERROR_RATE:
+ drm_dbg_kms(display->drm,
+ "Set Error Count Phy Test Pattern\n");
+ intel_de_write(display, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE |
+ DDI_DP_COMP_CTL_SCRAMBLED_0);
+ break;
+ case DP_LINK_QUAL_PATTERN_PRBS7:
+ drm_dbg_kms(display->drm, "Set PRBS7 Phy Test Pattern\n");
+ intel_de_write(display, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
+ break;
+ case DP_LINK_QUAL_PATTERN_80BIT_CUSTOM:
+ /*
+ * FIXME: Ideally pattern should come from DPCD 0x250. As
+ * current firmware of DPR-100 could not set it, so hardcoding
+ * now for complaince test.
+ */
+ drm_dbg_kms(display->drm,
+ "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
+ pattern_val = 0x3e0f83e0;
+ intel_de_write(display, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
+ pattern_val = 0x0f83e0f8;
+ intel_de_write(display, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
+ pattern_val = 0x0000f83e;
+ intel_de_write(display, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
+ intel_de_write(display, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE |
+ DDI_DP_COMP_CTL_CUSTOM80);
+ break;
+ case DP_LINK_QUAL_PATTERN_CP2520_PAT_1:
+ /*
+ * FIXME: Ideally pattern should come from DPCD 0x24A. As
+ * current firmware of DPR-100 could not set it, so hardcoding
+ * now for complaince test.
+ */
+ drm_dbg_kms(display->drm,
+ "Set HBR2 compliance Phy Test Pattern\n");
+ pattern_val = 0xFB;
+ intel_de_write(display, DDI_DP_COMP_CTL(pipe),
+ DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
+ pattern_val);
+ break;
+ case DP_LINK_QUAL_PATTERN_CP2520_PAT_3:
+ if (DISPLAY_VER(display) < 10) {
+ drm_warn(display->drm,
+ "Platform does not support TPS4\n");
+ break;
+ }
+ drm_dbg_kms(display->drm,
+ "Set TPS4 compliance Phy Test Pattern\n");
+ intel_de_write(display, DDI_DP_COMP_CTL(pipe), 0x0);
+ intel_de_rmw(display, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_TRAIN_PAT4_SEL_MASK | DP_TP_CTL_LINK_TRAIN_MASK,
+ DP_TP_CTL_TRAIN_PAT4_SEL_TP4A | DP_TP_CTL_LINK_TRAIN_PAT4);
+ break;
+ default:
+ drm_warn(display->drm, "Invalid Phy Test Pattern\n");
+ }
+}
+
+static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_dp_phy_test_params *data =
+ &intel_dp->compliance.test_data.phytest;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
+ link_status) < 0) {
+ drm_dbg_kms(display->drm, "failed to get link status\n");
+ return;
+ }
+
+ /* retrieve vswing & pre-emphasis setting */
+ intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
+ link_status);
+
+ intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
+
+ intel_dp_phy_pattern_update(intel_dp, crtc_state);
+
+ drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
+ intel_dp->train_set, crtc_state->lane_count);
+
+ drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
+ intel_dp->dpcd[DP_DPCD_REV]);
+}
+
+static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_dp_phy_test_params *data =
+ &intel_dp->compliance.test_data.phytest;
+
+ if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
+ drm_dbg_kms(display->drm,
+ "DP Phy Test pattern AUX read failure\n");
+ return DP_TEST_NAK;
+ }
+
+ /* Set test active flag here so userspace doesn't interrupt things */
+ intel_dp->compliance.test_active = true;
+
+ return DP_TEST_ACK;
+}
+
+void intel_dp_test_request(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ u8 response = DP_TEST_NAK;
+ u8 request = 0;
+ int status;
+
+ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
+ if (status <= 0) {
+ drm_dbg_kms(display->drm,
+ "Could not read test request from sink\n");
+ goto update_status;
+ }
+
+ switch (request) {
+ case DP_TEST_LINK_TRAINING:
+ drm_dbg_kms(display->drm, "LINK_TRAINING test requested\n");
+ response = intel_dp_autotest_link_training(intel_dp);
+ break;
+ case DP_TEST_LINK_VIDEO_PATTERN:
+ drm_dbg_kms(display->drm, "TEST_PATTERN test requested\n");
+ response = intel_dp_autotest_video_pattern(intel_dp);
+ break;
+ case DP_TEST_LINK_EDID_READ:
+ drm_dbg_kms(display->drm, "EDID test requested\n");
+ response = intel_dp_autotest_edid(intel_dp);
+ break;
+ case DP_TEST_LINK_PHY_TEST_PATTERN:
+ drm_dbg_kms(display->drm, "PHY_PATTERN test requested\n");
+ response = intel_dp_autotest_phy_pattern(intel_dp);
+ break;
+ default:
+ drm_dbg_kms(display->drm, "Invalid test request '%02x'\n",
+ request);
+ break;
+ }
+
+ if (response & DP_TEST_ACK)
+ intel_dp->compliance.test_type = request;
+
+update_status:
+ status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
+ if (status <= 0)
+ drm_dbg_kms(display->drm,
+ "Could not write test response to sink\n");
+}
+
+/* phy test */
+
+static int intel_dp_prep_phy_test(struct intel_dp *intel_dp,
+ struct drm_modeset_acquire_ctx *ctx,
+ u8 *pipe_mask)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
+ int ret = 0;
+
+ *pipe_mask = 0;
+
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ struct drm_connector_state *conn_state =
+ connector->base.state;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ if (!intel_dp_has_connector(intel_dp, conn_state))
+ continue;
+
+ crtc = to_intel_crtc(conn_state->crtc);
+ if (!crtc)
+ continue;
+
+ ret = drm_modeset_lock(&crtc->base.mutex, ctx);
+ if (ret)
+ break;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ drm_WARN_ON(display->drm,
+ !intel_crtc_has_dp_encoder(crtc_state));
+
+ if (!crtc_state->hw.active)
+ continue;
+
+ if (conn_state->commit &&
+ !try_wait_for_completion(&conn_state->commit->hw_done))
+ continue;
+
+ *pipe_mask |= BIT(crtc->pipe);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return ret;
+}
+
+static int intel_dp_do_phy_test(struct intel_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_crtc *crtc;
+ u8 pipe_mask;
+ int ret;
+
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex,
+ ctx);
+ if (ret)
+ return ret;
+
+ ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask);
+ if (ret)
+ return ret;
+
+ if (pipe_mask == 0)
+ return 0;
+
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] PHY test\n",
+ encoder->base.base.id, encoder->base.name);
+
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ /* test on the MST master transcoder */
+ if (DISPLAY_VER(display) >= 12 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
+ !intel_dp_mst_is_master_trans(crtc_state))
+ continue;
+
+ intel_dp_process_phy_request(intel_dp, crtc_state);
+ break;
+ }
+
+ return 0;
+}
+
+bool intel_dp_test_phy(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+
+ if (!intel_dp->compliance.test_active ||
+ intel_dp->compliance.test_type != DP_TEST_LINK_PHY_TEST_PATTERN)
+ return false;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ for (;;) {
+ ret = intel_dp_do_phy_test(encoder, &ctx);
+
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ continue;
+ }
+
+ break;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ drm_WARN(encoder->base.dev, ret,
+ "Acquiring modeset locks failed with %i\n", ret);
+
+ return true;
+}
+
+bool intel_dp_test_short_pulse(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ bool reprobe_needed = false;
+
+ switch (intel_dp->compliance.test_type) {
+ case DP_TEST_LINK_TRAINING:
+ drm_dbg_kms(display->drm,
+ "Link Training Compliance Test requested\n");
+ /* Send a Hotplug Uevent to userspace to start modeset */
+ drm_kms_helper_hotplug_event(display->drm);
+ break;
+ case DP_TEST_LINK_PHY_TEST_PATTERN:
+ drm_dbg_kms(display->drm,
+ "PHY test pattern Compliance Test requested\n");
+ /*
+ * Schedule long hpd to do the test
+ *
+ * FIXME get rid of the ad-hoc phy test modeset code
+ * and properly incorporate it into the normal modeset.
+ */
+ reprobe_needed = true;
+ }
+
+ return reprobe_needed;
+}
+
+static ssize_t i915_displayport_test_active_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct intel_display *display = m->private;
+ char *input_buffer;
+ int status = 0;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+ int val = 0;
+
+ if (len == 0)
+ return 0;
+
+ input_buffer = memdup_user_nul(ubuf, len);
+ if (IS_ERR(input_buffer))
+ return PTR_ERR(input_buffer);
+
+ drm_dbg_kms(display->drm, "Copied %d bytes from user\n", (unsigned int)len);
+
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ status = kstrtoint(input_buffer, 10, &val);
+ if (status < 0)
+ break;
+ drm_dbg_kms(display->drm, "Got %d for test active\n", val);
+ /* To prevent erroneous activation of the compliance
+ * testing code, only accept an actual value of 1 here
+ */
+ if (val == 1)
+ intel_dp->compliance.test_active = true;
+ else
+ intel_dp->compliance.test_active = false;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ kfree(input_buffer);
+ if (status < 0)
+ return status;
+
+ *offp += len;
+ return len;
+}
+
+static int i915_displayport_test_active_show(struct seq_file *m, void *data)
+{
+ struct intel_display *display = m->private;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->compliance.test_active)
+ seq_puts(m, "1");
+ else
+ seq_puts(m, "0");
+ } else {
+ seq_puts(m, "0");
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+
+static int i915_displayport_test_active_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, i915_displayport_test_active_show,
+ inode->i_private);
+}
+
+static const struct file_operations i915_displayport_test_active_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_displayport_test_active_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_displayport_test_active_write
+};
+
+static int i915_displayport_test_data_show(struct seq_file *m, void *data)
+{
+ struct intel_display *display = m->private;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_EDID_READ)
+ seq_printf(m, "%lx",
+ intel_dp->compliance.test_data.edid);
+ else if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_VIDEO_PATTERN) {
+ seq_printf(m, "hdisplay: %d\n",
+ intel_dp->compliance.test_data.hdisplay);
+ seq_printf(m, "vdisplay: %d\n",
+ intel_dp->compliance.test_data.vdisplay);
+ seq_printf(m, "bpc: %u\n",
+ intel_dp->compliance.test_data.bpc);
+ } else if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_PHY_TEST_PATTERN) {
+ seq_printf(m, "pattern: %d\n",
+ intel_dp->compliance.test_data.phytest.phy_pattern);
+ seq_printf(m, "Number of lanes: %d\n",
+ intel_dp->compliance.test_data.phytest.num_lanes);
+ seq_printf(m, "Link Rate: %d\n",
+ intel_dp->compliance.test_data.phytest.link_rate);
+ seq_printf(m, "level: %02x\n",
+ intel_dp->train_set[0]);
+ }
+ } else {
+ seq_puts(m, "0");
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
+
+static int i915_displayport_test_type_show(struct seq_file *m, void *data)
+{
+ struct intel_display *display = m->private;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
+ } else {
+ seq_puts(m, "0");
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
+
+static const struct {
+ const char *name;
+ const struct file_operations *fops;
+} intel_display_debugfs_files[] = {
+ {"i915_dp_test_data", &i915_displayport_test_data_fops},
+ {"i915_dp_test_type", &i915_displayport_test_type_fops},
+ {"i915_dp_test_active", &i915_displayport_test_active_fops},
+};
+
+void intel_dp_test_debugfs_register(struct intel_display *display)
+{
+ struct drm_minor *minor = display->drm->primary;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
+ debugfs_create_file(intel_display_debugfs_files[i].name,
+ 0644,
+ minor->debugfs_root,
+ display,
+ intel_display_debugfs_files[i].fops);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_test.h b/drivers/gpu/drm/i915/display/intel_dp_test.h
new file mode 100644
index 000000000000..dcc167e4c7f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_test.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef __INTEL_DP_TEST_H__
+#define __INTEL_DP_TEST_H__
+
+#include <linux/types.h>
+
+struct intel_crtc_state;
+struct intel_display;
+struct intel_dp;
+struct link_config_limits;
+
+void intel_dp_test_reset(struct intel_dp *intel_dp);
+void intel_dp_test_request(struct intel_dp *intel_dp);
+void intel_dp_test_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct link_config_limits *limits);
+bool intel_dp_test_phy(struct intel_dp *intel_dp);
+bool intel_dp_test_short_pulse(struct intel_dp *intel_dp);
+void intel_dp_test_debugfs_register(struct intel_display *display);
+
+#endif /* __INTEL_DP_TEST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
index 94198bc04939..589872babdd7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
@@ -3,11 +3,10 @@
* Copyright © 2023 Intel Corporation
*/
-#include "i915_drv.h"
-
#include <drm/display/drm_dp_tunnel.h>
#include "intel_atomic.h"
+#include "intel_display_core.h"
#include "intel_display_limits.h"
#include "intel_display_types.h"
#include "intel_dp.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
index a0c00b7d3303..e9314cf25a19 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
@@ -20,7 +20,7 @@ struct intel_dp;
struct intel_encoder;
struct intel_link_bw_limits;
-#if defined(CONFIG_DRM_I915_DP_TUNNEL) && defined(I915)
+#if IS_ENABLED(CONFIG_DRM_I915_DP_TUNNEL) && defined(I915)
int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx);
void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index d20e4e9cf7f7..52a36a2281e6 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -22,6 +22,7 @@
*/
#include "bxt_dpio_phy_regs.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
@@ -219,8 +220,10 @@ static const struct bxt_dpio_phy_info glk_dpio_phy_info[] = {
};
static const struct bxt_dpio_phy_info *
-bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
+bxt_get_phy_list(struct intel_display *display, int *count)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
if (IS_GEMINILAKE(dev_priv)) {
*count = ARRAY_SIZE(glk_dpio_phy_info);
return glk_dpio_phy_info;
@@ -231,22 +234,22 @@ bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
}
static const struct bxt_dpio_phy_info *
-bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+bxt_get_phy_info(struct intel_display *display, enum dpio_phy phy)
{
int count;
const struct bxt_dpio_phy_info *phy_list =
- bxt_get_phy_list(dev_priv, &count);
+ bxt_get_phy_list(display, &count);
return &phy_list[phy];
}
-void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
+void bxt_port_to_phy_channel(struct intel_display *display, enum port port,
enum dpio_phy *phy, enum dpio_channel *ch)
{
const struct bxt_dpio_phy_info *phy_info, *phys;
int i, count;
- phys = bxt_get_phy_list(dev_priv, &count);
+ phys = bxt_get_phy_list(display, &count);
for (i = 0; i < count; i++) {
phy_info = &phys[i];
@@ -265,7 +268,7 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
}
}
- drm_WARN(&dev_priv->drm, 1, "PHY not found for PORT %c",
+ drm_WARN(display->drm, 1, "PHY not found for PORT %c",
port_name(port));
*phy = DPIO_PHY0;
*ch = DPIO_CH0;
@@ -275,16 +278,16 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
* Like intel_de_rmw() but reads from a single per-lane register and
* writes to the group register to write the same value to all the lanes.
*/
-static u32 bxt_dpio_phy_rmw_grp(struct drm_i915_private *i915,
+static u32 bxt_dpio_phy_rmw_grp(struct intel_display *display,
i915_reg_t reg_single,
i915_reg_t reg_group,
u32 clear, u32 set)
{
u32 old, val;
- old = intel_de_read(i915, reg_single);
+ old = intel_de_read(display, reg_single);
val = (old & ~clear) | set;
- intel_de_write(i915, reg_group, val);
+ intel_de_write(display, reg_group, val);
return old;
}
@@ -292,30 +295,30 @@ static u32 bxt_dpio_phy_rmw_grp(struct drm_i915_private *i915,
void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct intel_ddi_buf_trans *trans;
enum dpio_channel ch;
enum dpio_phy phy;
int lane, n_entries;
trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ if (drm_WARN_ON_ONCE(display->drm, !trans))
return;
- bxt_port_to_phy_channel(dev_priv, encoder->port, &phy, &ch);
+ bxt_port_to_phy_channel(display, encoder->port, &phy, &ch);
/*
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
- bxt_dpio_phy_rmw_grp(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch),
+ bxt_dpio_phy_rmw_grp(display, BXT_PORT_PCS_DW10_LN01(phy, ch),
BXT_PORT_PCS_DW10_GRP(phy, ch),
TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT, 0);
for (lane = 0; lane < crtc_state->lane_count; lane++) {
int level = intel_ddi_level(encoder, crtc_state, lane);
- intel_de_rmw(dev_priv, BXT_PORT_TX_DW2_LN(phy, ch, lane),
+ intel_de_rmw(display, BXT_PORT_TX_DW2_LN(phy, ch, lane),
MARGIN_000_MASK | UNIQ_TRANS_SCALE_MASK,
MARGIN_000(trans->entries[level].bxt.margin) |
UNIQ_TRANS_SCALE(trans->entries[level].bxt.scale));
@@ -325,50 +328,50 @@ void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder,
int level = intel_ddi_level(encoder, crtc_state, lane);
u32 val;
- intel_de_rmw(dev_priv, BXT_PORT_TX_DW3_LN(phy, ch, lane),
+ intel_de_rmw(display, BXT_PORT_TX_DW3_LN(phy, ch, lane),
SCALE_DCOMP_METHOD,
trans->entries[level].bxt.enable ?
SCALE_DCOMP_METHOD : 0);
- val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN(phy, ch, lane));
+ val = intel_de_read(display, BXT_PORT_TX_DW3_LN(phy, ch, lane));
if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Disabled scaling while ouniqetrangenmethod was set");
}
for (lane = 0; lane < crtc_state->lane_count; lane++) {
int level = intel_ddi_level(encoder, crtc_state, lane);
- intel_de_rmw(dev_priv, BXT_PORT_TX_DW4_LN(phy, ch, lane),
+ intel_de_rmw(display, BXT_PORT_TX_DW4_LN(phy, ch, lane),
DE_EMPHASIS_MASK,
DE_EMPHASIS(trans->entries[level].bxt.deemphasis));
}
- bxt_dpio_phy_rmw_grp(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch),
+ bxt_dpio_phy_rmw_grp(display, BXT_PORT_PCS_DW10_LN01(phy, ch),
BXT_PORT_PCS_DW10_GRP(phy, ch),
0, TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
}
-bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv,
+bool bxt_dpio_phy_is_enabled(struct intel_display *display,
enum dpio_phy phy)
{
const struct bxt_dpio_phy_info *phy_info;
- phy_info = bxt_get_phy_info(dev_priv, phy);
+ phy_info = bxt_get_phy_info(display, phy);
- if (!(intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
+ if (!(intel_de_read(display, BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
return false;
- if ((intel_de_read(dev_priv, BXT_PORT_CL1CM_DW0(phy)) &
+ if ((intel_de_read(display, BXT_PORT_CL1CM_DW0(phy)) &
(PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"DDI PHY %d powered, but power hasn't settled\n", phy);
return false;
}
- if (!(intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
- drm_dbg(&dev_priv->drm,
+ if (!(intel_de_read(display, BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
+ drm_dbg(display->drm,
"DDI PHY %d powered, but still in reset\n", phy);
return false;
@@ -377,47 +380,44 @@ bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv,
return true;
}
-static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+static u32 bxt_get_grc(struct intel_display *display, enum dpio_phy phy)
{
- u32 val = intel_de_read(dev_priv, BXT_PORT_REF_DW6(phy));
+ u32 val = intel_de_read(display, BXT_PORT_REF_DW6(phy));
return REG_FIELD_GET(GRC_CODE_MASK, val);
}
-static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
+static void bxt_phy_wait_grc_done(struct intel_display *display,
enum dpio_phy phy)
{
- if (intel_de_wait_for_set(dev_priv, BXT_PORT_REF_DW3(phy),
- GRC_DONE, 10))
- drm_err(&dev_priv->drm, "timeout waiting for PHY%d GRC\n",
- phy);
+ if (intel_de_wait_for_set(display, BXT_PORT_REF_DW3(phy), GRC_DONE, 10))
+ drm_err(display->drm, "timeout waiting for PHY%d GRC\n", phy);
}
-static void _bxt_dpio_phy_init(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+static void _bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy)
{
const struct bxt_dpio_phy_info *phy_info;
u32 val;
- phy_info = bxt_get_phy_info(dev_priv, phy);
+ phy_info = bxt_get_phy_info(display, phy);
- if (bxt_dpio_phy_is_enabled(dev_priv, phy)) {
+ if (bxt_dpio_phy_is_enabled(display, phy)) {
/* Still read out the GRC value for state verification */
if (phy_info->rcomp_phy != -1)
- dev_priv->display.state.bxt_phy_grc = bxt_get_grc(dev_priv, phy);
+ display->state.bxt_phy_grc = bxt_get_grc(display, phy);
- if (bxt_dpio_phy_verify_state(dev_priv, phy)) {
- drm_dbg(&dev_priv->drm, "DDI PHY %d already enabled, "
+ if (bxt_dpio_phy_verify_state(display, phy)) {
+ drm_dbg(display->drm, "DDI PHY %d already enabled, "
"won't reprogram it\n", phy);
return;
}
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"DDI PHY %d enabled with invalid state, "
"force reprogramming it\n", phy);
}
- intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, phy_info->pwron_mask);
+ intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, 0, phy_info->pwron_mask);
/*
* The PHY registers start out inaccessible and respond to reads with
@@ -427,92 +427,91 @@ static void _bxt_dpio_phy_init(struct drm_i915_private *dev_priv,
* The flag should get set in 100us according to the HW team, but
* use 1ms due to occasional timeouts observed with that.
*/
- if (intel_de_wait_fw(dev_priv, BXT_PORT_CL1CM_DW0(phy),
+ if (intel_de_wait_fw(display, BXT_PORT_CL1CM_DW0(phy),
PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1))
- drm_err(&dev_priv->drm, "timeout during PHY%d power on\n",
+ drm_err(display->drm, "timeout during PHY%d power on\n",
phy);
/* Program PLL Rcomp code offset */
- intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy),
+ intel_de_rmw(display, BXT_PORT_CL1CM_DW9(phy),
IREF0RC_OFFSET_MASK, IREF0RC_OFFSET(0xE4));
- intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy),
+ intel_de_rmw(display, BXT_PORT_CL1CM_DW10(phy),
IREF1RC_OFFSET_MASK, IREF1RC_OFFSET(0xE4));
/* Program power gating */
- intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW28(phy), 0,
+ intel_de_rmw(display, BXT_PORT_CL1CM_DW28(phy), 0,
OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG);
if (phy_info->dual_channel)
- intel_de_rmw(dev_priv, BXT_PORT_CL2CM_DW6(phy), 0,
+ intel_de_rmw(display, BXT_PORT_CL2CM_DW6(phy), 0,
DW6_OLDO_DYN_PWR_DOWN_EN);
if (phy_info->rcomp_phy != -1) {
u32 grc_code;
- bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy);
+ bxt_phy_wait_grc_done(display, phy_info->rcomp_phy);
/*
* PHY0 isn't connected to an RCOMP resistor so copy over
* the corresponding calibrated value from PHY1, and disable
* the automatic calibration on PHY0.
*/
- val = bxt_get_grc(dev_priv, phy_info->rcomp_phy);
- dev_priv->display.state.bxt_phy_grc = val;
+ val = bxt_get_grc(display, phy_info->rcomp_phy);
+ display->state.bxt_phy_grc = val;
grc_code = GRC_CODE_FAST(val) |
GRC_CODE_SLOW(val) |
GRC_CODE_NOM(val);
- intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code);
- intel_de_rmw(dev_priv, BXT_PORT_REF_DW8(phy),
+ intel_de_write(display, BXT_PORT_REF_DW6(phy), grc_code);
+ intel_de_rmw(display, BXT_PORT_REF_DW8(phy),
0, GRC_DIS | GRC_RDY_OVRD);
}
if (phy_info->reset_delay)
udelay(phy_info->reset_delay);
- intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS);
+ intel_de_rmw(display, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS);
}
-void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+void bxt_dpio_phy_uninit(struct intel_display *display, enum dpio_phy phy)
{
const struct bxt_dpio_phy_info *phy_info;
- phy_info = bxt_get_phy_info(dev_priv, phy);
+ phy_info = bxt_get_phy_info(display, phy);
- intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), COMMON_RESET_DIS, 0);
+ intel_de_rmw(display, BXT_PHY_CTL_FAMILY(phy), COMMON_RESET_DIS, 0);
- intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0);
+ intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0);
}
-void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+void bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy)
{
- const struct bxt_dpio_phy_info *phy_info =
- bxt_get_phy_info(dev_priv, phy);
+ const struct bxt_dpio_phy_info *phy_info = bxt_get_phy_info(display, phy);
enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
bool was_enabled;
- lockdep_assert_held(&dev_priv->display.power.domains.lock);
+ lockdep_assert_held(&display->power.domains.lock);
was_enabled = true;
if (rcomp_phy != -1)
- was_enabled = bxt_dpio_phy_is_enabled(dev_priv, rcomp_phy);
+ was_enabled = bxt_dpio_phy_is_enabled(display, rcomp_phy);
/*
* We need to copy the GRC calibration value from rcomp_phy,
* so make sure it's powered up.
*/
if (!was_enabled)
- _bxt_dpio_phy_init(dev_priv, rcomp_phy);
+ _bxt_dpio_phy_init(display, rcomp_phy);
- _bxt_dpio_phy_init(dev_priv, phy);
+ _bxt_dpio_phy_init(display, phy);
if (!was_enabled)
- bxt_dpio_phy_uninit(dev_priv, rcomp_phy);
+ bxt_dpio_phy_uninit(display, rcomp_phy);
}
static bool __printf(6, 7)
-__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+__phy_reg_verify_state(struct intel_display *display, enum dpio_phy phy,
i915_reg_t reg, u32 mask, u32 expected,
const char *reg_fmt, ...)
{
@@ -520,7 +519,7 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
va_list args;
u32 val;
- val = intel_de_read(dev_priv, reg);
+ val = intel_de_read(display, reg);
if ((val & mask) == expected)
return true;
@@ -528,7 +527,7 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
vaf.fmt = reg_fmt;
vaf.va = &args;
- drm_dbg(&dev_priv->drm, "DDI PHY %d reg %pV [%08x] state mismatch: "
+ drm_dbg(display->drm, "DDI PHY %d reg %pV [%08x] state mismatch: "
"current %08x, expected %08x (mask %08x)\n",
phy, &vaf, reg.reg, val, (val & ~mask) | expected,
mask);
@@ -538,20 +537,20 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
return false;
}
-bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv,
+bool bxt_dpio_phy_verify_state(struct intel_display *display,
enum dpio_phy phy)
{
const struct bxt_dpio_phy_info *phy_info;
u32 mask;
bool ok;
- phy_info = bxt_get_phy_info(dev_priv, phy);
+ phy_info = bxt_get_phy_info(display, phy);
#define _CHK(reg, mask, exp, fmt, ...) \
- __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
+ __phy_reg_verify_state(display, phy, reg, mask, exp, fmt, \
## __VA_ARGS__)
- if (!bxt_dpio_phy_is_enabled(dev_priv, phy))
+ if (!bxt_dpio_phy_is_enabled(display, phy))
return false;
ok = true;
@@ -575,7 +574,7 @@ bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv,
"BXT_PORT_CL2CM_DW6(%d)", phy);
if (phy_info->rcomp_phy != -1) {
- u32 grc_code = dev_priv->display.state.bxt_phy_grc;
+ u32 grc_code = display->state.bxt_phy_grc;
grc_code = GRC_CODE_FAST(grc_code) |
GRC_CODE_SLOW(grc_code) |
@@ -614,20 +613,20 @@ bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count)
void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder,
u8 lane_lat_optim_mask)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
enum dpio_phy phy;
enum dpio_channel ch;
int lane;
- bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
+ bxt_port_to_phy_channel(display, port, &phy, &ch);
for (lane = 0; lane < 4; lane++) {
/*
* Note that on CHV this flag is called UPAR, but has
* the same function.
*/
- intel_de_rmw(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane),
+ intel_de_rmw(display, BXT_PORT_TX_DW14_LN(phy, ch, lane),
LATENCY_OPTIM,
lane_lat_optim_mask & BIT(lane) ? LATENCY_OPTIM : 0);
}
@@ -636,18 +635,18 @@ void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder,
u8
bxt_dpio_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
enum dpio_phy phy;
enum dpio_channel ch;
int lane;
u8 mask;
- bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
+ bxt_port_to_phy_channel(display, port, &phy, &ch);
mask = 0;
for (lane = 0; lane < 4; lane++) {
- u32 val = intel_de_read(dev_priv,
+ u32 val = intel_de_read(display,
BXT_PORT_TX_DW14_LN(phy, ch, lane));
if (val & LATENCY_OPTIM)
@@ -857,6 +856,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -873,7 +873,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
*/
if (ch == DPIO_CH0 && pipe == PIPE_B)
dig_port->release_cl2_override =
- !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
+ !chv_phy_powergate_ch(display, DPIO_PHY0, DPIO_CH1, true);
chv_phy_powergate_lanes(encoder, true, lane_mask);
@@ -1015,11 +1015,11 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
void chv_phy_release_cl2_override(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (dig_port->release_cl2_override) {
- chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
+ chv_phy_powergate_ch(display, DPIO_PHY0, DPIO_CH1, false);
dig_port->release_cl2_override = false;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
index 226994dcb89b..a82939165546 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
@@ -10,9 +10,9 @@
enum pipe;
enum port;
-struct drm_i915_private;
struct intel_crtc_state;
struct intel_digital_port;
+struct intel_display;
struct intel_encoder;
enum dpio_channel {
@@ -27,15 +27,15 @@ enum dpio_phy {
};
#ifdef I915
-void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
+void bxt_port_to_phy_channel(struct intel_display *display, enum port port,
enum dpio_phy *phy, enum dpio_channel *ch);
void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
-void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
-bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv,
+void bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy);
+void bxt_dpio_phy_uninit(struct intel_display *display, enum dpio_phy phy);
+bool bxt_dpio_phy_is_enabled(struct intel_display *display,
enum dpio_phy phy);
-bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv,
+bool bxt_dpio_phy_verify_state(struct intel_display *display,
enum dpio_phy phy);
u8 bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count);
void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder,
@@ -73,7 +73,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state);
#else
-static inline void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
+static inline void bxt_port_to_phy_channel(struct intel_display *display, enum port port,
enum dpio_phy *phy, enum dpio_channel *ch)
{
}
@@ -81,18 +81,18 @@ static inline void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
}
-static inline void bxt_dpio_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+static inline void bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy)
{
}
-static inline void bxt_dpio_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+static inline void bxt_dpio_phy_uninit(struct intel_display *display, enum dpio_phy phy)
{
}
-static inline bool bxt_dpio_phy_is_enabled(struct drm_i915_private *dev_priv,
+static inline bool bxt_dpio_phy_is_enabled(struct intel_display *display,
enum dpio_phy phy)
{
return false;
}
-static inline bool bxt_dpio_phy_verify_state(struct drm_i915_private *dev_priv,
+static inline bool bxt_dpio_phy_verify_state(struct intel_display *display,
enum dpio_phy phy)
{
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 340dfce480b8..3256b1293f7f 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/string_helpers.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
@@ -589,11 +590,14 @@ static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
return false;
- if (!IS_PINEVIEW(dev_priv) && !IS_LP(dev_priv))
+ if (!IS_PINEVIEW(dev_priv) &&
+ !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv))
if (clock->m1 <= clock->m2)
return false;
- if (!IS_LP(dev_priv)) {
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
+ !IS_BROXTON(dev_priv) && !IS_GEMINILAKE(dev_priv)) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
return false;
if (clock->m < limit->m.min || limit->m.max < clock->m)
@@ -780,7 +784,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
max_n = limit->n.max;
/* based on hardware requirement, prefer smaller n to precision */
for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
- /* based on hardware requirement, prefere larger m1,m2 */
+ /* based on hardware requirement, prefer larger m1,m2 */
for (clock.m1 = limit->m1.max;
clock.m1 >= limit->m1.min; clock.m1--) {
for (clock.m2 = limit->m2.max;
@@ -1000,6 +1004,7 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
const struct dpll *clock,
const struct dpll *reduced_clock)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
@@ -1058,7 +1063,7 @@ static u32 i9xx_dpll(const struct intel_crtc_state *crtc_state,
if (crtc_state->sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv))
+ intel_panel_use_ssc(display))
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
@@ -1092,6 +1097,7 @@ static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
const struct dpll *clock,
const struct dpll *reduced_clock)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
@@ -1128,7 +1134,7 @@ static u32 i8xx_dpll(const struct intel_crtc_state *crtc_state,
dpll |= DPLL_DVO_2X_MODE;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv))
+ intel_panel_use_ssc(display))
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
@@ -1234,11 +1240,12 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- ((intel_panel_use_ssc(i915) && i915->display.vbt.lvds_ssc_freq == 100000) ||
+ ((intel_panel_use_ssc(display) && i915->display.vbt.lvds_ssc_freq == 100000) ||
(HAS_PCH_IBX(i915) && intel_is_dual_link_lvds(i915))))
return 25;
@@ -1268,6 +1275,7 @@ static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
const struct dpll *clock,
const struct dpll *reduced_clock)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
@@ -1329,7 +1337,7 @@ static u32 ilk_dpll(const struct intel_crtc_state *crtc_state,
WARN_ON(reduced_clock->p2 != clock->p2);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv))
+ intel_panel_use_ssc(display))
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
@@ -1353,6 +1361,7 @@ static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -1365,7 +1374,7 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
+ if (intel_panel_use_ssc(display)) {
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
dev_priv->display.vbt.lvds_ssc_freq);
@@ -1529,6 +1538,7 @@ static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -1536,7 +1546,7 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
int refclk = 96000;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
+ if (intel_panel_use_ssc(display)) {
refclk = dev_priv->display.vbt.lvds_ssc_freq;
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
@@ -1578,6 +1588,7 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -1585,7 +1596,7 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
int refclk = 96000;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
+ if (intel_panel_use_ssc(display)) {
refclk = dev_priv->display.vbt.lvds_ssc_freq;
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
@@ -1616,6 +1627,7 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -1623,7 +1635,7 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
int refclk = 96000;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
+ if (intel_panel_use_ssc(display)) {
refclk = dev_priv->display.vbt.lvds_ssc_freq;
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
@@ -1656,6 +1668,7 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -1663,7 +1676,7 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
int refclk = 48000;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_panel_use_ssc(dev_priv)) {
+ if (intel_panel_use_ssc(display)) {
refclk = dev_priv->display.vbt.lvds_ssc_freq;
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
@@ -2212,7 +2225,8 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll)
{
- struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ struct intel_display *display = &dev_priv->display;
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
struct intel_crtc_state *crtc_state;
crtc_state = intel_crtc_state_alloc(crtc);
@@ -2318,12 +2332,13 @@ void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
static void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
+ struct intel_display *display = &dev_priv->display;
bool cur_state;
- cur_state = intel_de_read(dev_priv, DPLL(dev_priv, pipe)) & DPLL_VCO_ENABLE;
- I915_STATE_WARN(dev_priv, cur_state != state,
- "PLL state assertion failure (expected %s, current %s)\n",
- str_on_off(state), str_on_off(cur_state));
+ cur_state = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE;
+ INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
+ "PLL state assertion failure (expected %s, current %s)\n",
+ str_on_off(state), str_on_off(cur_state));
}
void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index f490b2157828..d86cc9ffd4ac 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -25,6 +25,7 @@
#include <linux/string_helpers.h>
#include "bxt_dpio_phy_regs.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -173,18 +174,19 @@ void assert_shared_dpll(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
bool state)
{
+ struct intel_display *display = &i915->display;
bool cur_state;
struct intel_dpll_hw_state hw_state;
- if (drm_WARN(&i915->drm, !pll,
+ if (drm_WARN(display->drm, !pll,
"asserting DPLL %s with no DPLL\n", str_on_off(state)))
return;
cur_state = intel_dpll_get_hw_state(i915, pll, &hw_state);
- I915_STATE_WARN(i915, cur_state != state,
- "%s assertion failure (expected %s, current %s)\n",
- pll->info->name, str_on_off(state),
- str_on_off(cur_state));
+ INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
+ "%s assertion failure (expected %s, current %s)\n",
+ pll->info->name, str_on_off(state),
+ str_on_off(cur_state));
}
static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
@@ -545,14 +547,15 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *i915,
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
u32 val;
bool enabled;
- val = intel_de_read(i915, PCH_DREF_CONTROL);
+ val = intel_de_read(display, PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
DREF_SUPERSPREAD_SOURCE_MASK));
- I915_STATE_WARN(i915, !enabled,
- "PCH refclk assertion failure, should be active but is disabled\n");
+ INTEL_DISPLAY_STATE_WARN(display, !enabled,
+ "PCH refclk assertion failure, should be active but is disabled\n");
}
static void ibx_pch_dpll_enable(struct drm_i915_private *i915,
@@ -2035,13 +2038,14 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct intel_display *display = &i915->display;
const struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
enum dpio_phy phy;
enum dpio_channel ch;
u32 temp;
- bxt_port_to_phy_channel(i915, port, &phy, &ch);
+ bxt_port_to_phy_channel(display, port, &phy, &ch);
/* Non-SSC reference */
intel_de_rmw(i915, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
@@ -2157,6 +2161,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
+ struct intel_display *display = &i915->display;
struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
intel_wakeref_t wakeref;
@@ -2165,7 +2170,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *i915,
u32 val;
bool ret;
- bxt_port_to_phy_channel(i915, port, &phy, &ch);
+ bxt_port_to_phy_channel(display, port, &phy, &ch);
wakeref = intel_display_power_get_if_enabled(i915,
POWER_DOMAIN_DISPLAY_CORE);
@@ -4619,6 +4624,7 @@ verify_single_dpll_state(struct drm_i915_private *i915,
struct intel_crtc *crtc,
const struct intel_crtc_state *new_crtc_state)
{
+ struct intel_display *display = &i915->display;
struct intel_dpll_hw_state dpll_hw_state = {};
u8 pipe_mask;
bool active;
@@ -4626,22 +4632,22 @@ verify_single_dpll_state(struct drm_i915_private *i915,
active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
if (!pll->info->always_on) {
- I915_STATE_WARN(i915, !pll->on && pll->active_mask,
- "%s: pll in active use but not on in sw tracking\n",
- pll->info->name);
- I915_STATE_WARN(i915, pll->on && !pll->active_mask,
- "%s: pll is on but not used by any active pipe\n",
- pll->info->name);
- I915_STATE_WARN(i915, pll->on != active,
- "%s: pll on state mismatch (expected %i, found %i)\n",
- pll->info->name, pll->on, active);
+ INTEL_DISPLAY_STATE_WARN(display, !pll->on && pll->active_mask,
+ "%s: pll in active use but not on in sw tracking\n",
+ pll->info->name);
+ INTEL_DISPLAY_STATE_WARN(display, pll->on && !pll->active_mask,
+ "%s: pll is on but not used by any active pipe\n",
+ pll->info->name);
+ INTEL_DISPLAY_STATE_WARN(display, pll->on != active,
+ "%s: pll on state mismatch (expected %i, found %i)\n",
+ pll->info->name, pll->on, active);
}
if (!crtc) {
- I915_STATE_WARN(i915,
- pll->active_mask & ~pll->state.pipe_mask,
- "%s: more active pll users than references: 0x%x vs 0x%x\n",
- pll->info->name, pll->active_mask, pll->state.pipe_mask);
+ INTEL_DISPLAY_STATE_WARN(display,
+ pll->active_mask & ~pll->state.pipe_mask,
+ "%s: more active pll users than references: 0x%x vs 0x%x\n",
+ pll->info->name, pll->active_mask, pll->state.pipe_mask);
return;
}
@@ -4649,23 +4655,23 @@ verify_single_dpll_state(struct drm_i915_private *i915,
pipe_mask = BIT(crtc->pipe);
if (new_crtc_state->hw.active)
- I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask),
- "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
- pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
+ INTEL_DISPLAY_STATE_WARN(display, !(pll->active_mask & pipe_mask),
+ "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
+ pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
else
- I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
- "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
- pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
+ INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
+ "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
+ pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
- I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask),
- "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
- pll->info->name, pipe_mask, pll->state.pipe_mask);
+ INTEL_DISPLAY_STATE_WARN(display, !(pll->state.pipe_mask & pipe_mask),
+ "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
+ pll->info->name, pipe_mask, pll->state.pipe_mask);
- I915_STATE_WARN(i915,
- pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
- sizeof(dpll_hw_state)),
- "%s: pll hw state mismatch\n",
- pll->info->name);
+ INTEL_DISPLAY_STATE_WARN(display,
+ pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
+ sizeof(dpll_hw_state)),
+ "%s: pll hw state mismatch\n",
+ pll->info->name);
}
static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
@@ -4678,6 +4684,7 @@ static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -4693,16 +4700,16 @@ void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
u8 pipe_mask = BIT(crtc->pipe);
struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
- I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
- "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
- pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
+ INTEL_DISPLAY_STATE_WARN(display, pll->active_mask & pipe_mask,
+ "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
+ pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
/* TC ports have both MG/TC and TBT PLL referenced simultaneously */
- I915_STATE_WARN(i915, !has_alt_port_dpll(old_crtc_state->shared_dpll,
- new_crtc_state->shared_dpll) &&
- pll->state.pipe_mask & pipe_mask,
- "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
- pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
+ INTEL_DISPLAY_STATE_WARN(display, !has_alt_port_dpll(old_crtc_state->shared_dpll,
+ new_crtc_state->shared_dpll) &&
+ pll->state.pipe_mask & pipe_mask,
+ "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
+ pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 3a6d99044828..8b1f0e92a11c 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -205,7 +205,7 @@ void intel_dpt_resume(struct drm_i915_private *i915)
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
if (fb->dpt_vm)
- i915_ggtt_resume_vm(fb->dpt_vm);
+ i915_ggtt_resume_vm(fb->dpt_vm, true);
}
mutex_unlock(&i915->drm.mode_config.fb_lock);
}
@@ -233,7 +233,7 @@ void intel_dpt_suspend(struct drm_i915_private *i915)
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
if (fb->dpt_vm)
- i915_ggtt_suspend_vm(fb->dpt_vm);
+ i915_ggtt_suspend_vm(fb->dpt_vm, true);
}
mutex_unlock(&i915->drm.mode_config.fb_lock);
@@ -242,7 +242,7 @@ void intel_dpt_suspend(struct drm_i915_private *i915)
struct i915_address_space *
intel_dpt_create(struct intel_framebuffer *fb)
{
- struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
+ struct drm_gem_object *obj = intel_fb_bo(&fb->base);
struct drm_i915_private *i915 = to_i915(obj->dev);
struct drm_i915_gem_object *dpt_obj;
struct i915_address_space *vm;
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.c b/drivers/gpu/drm/i915/display/intel_dpt_common.c
index 573f72068899..d2dede0a5229 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt_common.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt_common.c
@@ -3,6 +3,7 @@
* Copyright © 2023 Intel Corporation
*/
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 3ca29afa5422..0fec01b79b23 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -3,6 +3,8 @@
* Copyright © 2021 Intel Corporation
*/
+#include <linux/debugfs.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
@@ -66,7 +68,9 @@ const char *intel_drrs_type_str(enum drrs_type drrs_type)
bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
enum transcoder cpu_transcoder)
{
- if (HAS_DOUBLE_BUFFERED_M_N(i915))
+ struct intel_display *display = &i915->display;
+
+ if (HAS_DOUBLE_BUFFERED_M_N(display))
return true;
return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index da24e041d269..e6f8fc743fb4 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -4,6 +4,8 @@
*
*/
+#include <drm/drm_vblank.h>
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -37,9 +39,16 @@ struct intel_dsb {
unsigned int free_pos;
/*
- * ins_start_offset will help to store start dword of the dsb
- * instuction and help in identifying the batch of auto-increment
- * register.
+ * Previously emitted DSB instruction. Used to
+ * identify/adjust the instruction for indexed
+ * register writes.
+ */
+ u32 ins[2];
+
+ /*
+ * Start of the previously emitted DSB instruction.
+ * Used to adjust the instruction for indexed
+ * register writes.
*/
unsigned int ins_start_offset;
@@ -119,6 +128,12 @@ pre_commit_crtc_state(struct intel_atomic_state *state,
return old_crtc_state;
}
+static int dsb_vblank_delay(const struct intel_crtc_state *crtc_state)
+{
+ return intel_mode_vblank_start(&crtc_state->hw.adjusted_mode) -
+ intel_mode_vdisplay(&crtc_state->hw.adjusted_mode);
+}
+
static int dsb_vtotal(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -215,9 +230,11 @@ static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
dsb->free_pos = ALIGN(dsb->free_pos, 2);
dsb->ins_start_offset = dsb->free_pos;
+ dsb->ins[0] = ldw;
+ dsb->ins[1] = udw;
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, ldw);
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, udw);
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, dsb->ins[0]);
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, dsb->ins[1]);
}
static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
@@ -233,23 +250,12 @@ static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
if (dsb->free_pos == 0)
return false;
- prev_opcode = intel_dsb_buffer_read(&dsb->dsb_buf,
- dsb->ins_start_offset + 1) & ~DSB_REG_VALUE_MASK;
- prev_reg = intel_dsb_buffer_read(&dsb->dsb_buf,
- dsb->ins_start_offset + 1) & DSB_REG_VALUE_MASK;
+ prev_opcode = dsb->ins[1] & ~DSB_REG_VALUE_MASK;
+ prev_reg = dsb->ins[1] & DSB_REG_VALUE_MASK;
return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg);
}
-static bool intel_dsb_prev_ins_is_mmio_write(struct intel_dsb *dsb, i915_reg_t reg)
-{
- /* only full byte-enables can be converted to indexed writes */
- return intel_dsb_prev_ins_is_write(dsb,
- DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT |
- DSB_BYTE_EN << DSB_BYTE_EN_SHIFT,
- reg);
-}
-
static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_t reg)
{
return intel_dsb_prev_ins_is_write(dsb,
@@ -258,19 +264,21 @@ static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_
}
/**
- * intel_dsb_reg_write() - Emit register wriite to the DSB context
+ * intel_dsb_reg_write_indexed() - Emit indexed register write to the DSB context
* @dsb: DSB context
* @reg: register address.
* @val: value.
*
* This function is used for writing register-value pair in command
* buffer of DSB.
+ *
+ * Note that indexed writes are slower than normal MMIO writes
+ * for a small number (less than 5 or so) of writes to the same
+ * register.
*/
-void intel_dsb_reg_write(struct intel_dsb *dsb,
- i915_reg_t reg, u32 val)
+void intel_dsb_reg_write_indexed(struct intel_dsb *dsb,
+ i915_reg_t reg, u32 val)
{
- u32 old_val;
-
/*
* For example the buffer will look like below for 3 dwords for auto
* increment register:
@@ -287,40 +295,32 @@ void intel_dsb_reg_write(struct intel_dsb *dsb,
* we are writing odd no of dwords, Zeros will be added in the end for
* padding.
*/
- if (!intel_dsb_prev_ins_is_mmio_write(dsb, reg) &&
- !intel_dsb_prev_ins_is_indexed_write(dsb, reg)) {
- intel_dsb_emit(dsb, val,
- (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
- (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
+ if (!intel_dsb_prev_ins_is_indexed_write(dsb, reg))
+ intel_dsb_emit(dsb, 0, /* count */
+ (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
i915_mmio_reg_offset(reg));
- } else {
- if (!assert_dsb_has_room(dsb))
- return;
-
- /* convert to indexed write? */
- if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) {
- u32 prev_val = intel_dsb_buffer_read(&dsb->dsb_buf,
- dsb->ins_start_offset + 0);
-
- intel_dsb_buffer_write(&dsb->dsb_buf,
- dsb->ins_start_offset + 0, 1); /* count */
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 1,
- (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
- i915_mmio_reg_offset(reg));
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 2, prev_val);
-
- dsb->free_pos++;
- }
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val);
- /* Update the count */
- old_val = intel_dsb_buffer_read(&dsb->dsb_buf, dsb->ins_start_offset);
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset, old_val + 1);
+ if (!assert_dsb_has_room(dsb))
+ return;
+
+ /* Update the count */
+ dsb->ins[0]++;
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0,
+ dsb->ins[0]);
- /* if number of data words is odd, then the last dword should be 0.*/
- if (dsb->free_pos & 0x1)
- intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0);
- }
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val);
+ /* if number of data words is odd, then the last dword should be 0.*/
+ if (dsb->free_pos & 0x1)
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0);
+}
+
+void intel_dsb_reg_write(struct intel_dsb *dsb,
+ i915_reg_t reg, u32 val)
+{
+ intel_dsb_emit(dsb, val,
+ (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
+ (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
+ i915_mmio_reg_offset(reg));
}
static u32 intel_dsb_mask_to_byte_en(u32 mask)
@@ -370,6 +370,24 @@ void intel_dsb_nonpost_end(struct intel_dsb *dsb)
intel_dsb_noop(dsb, 4);
}
+void intel_dsb_interrupt(struct intel_dsb *dsb)
+{
+ intel_dsb_emit(dsb, 0,
+ DSB_OPCODE_INTERRUPT << DSB_OPCODE_SHIFT);
+}
+
+void intel_dsb_wait_usec(struct intel_dsb *dsb, int count)
+{
+ intel_dsb_emit(dsb, count,
+ DSB_OPCODE_WAIT_USEC << DSB_OPCODE_SHIFT);
+}
+
+void intel_dsb_wait_vblanks(struct intel_dsb *dsb, int count)
+{
+ intel_dsb_emit(dsb, count,
+ DSB_OPCODE_WAIT_VBLANKS << DSB_OPCODE_SHIFT);
+}
+
static void intel_dsb_emit_wait_dsl(struct intel_dsb *dsb,
u32 opcode, int lower, int upper)
{
@@ -510,6 +528,31 @@ static u32 dsb_error_int_en(struct intel_display *display)
return errors;
}
+void intel_dsb_vblank_evade(struct intel_atomic_state *state,
+ struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = dsb->crtc;
+ const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+ /* FIXME calibrate sensibly */
+ int latency = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, 20);
+ int vblank_delay = dsb_vblank_delay(crtc_state);
+ int start, end;
+
+ if (pre_commit_is_vrr_active(state, crtc)) {
+ end = intel_vrr_vmin_vblank_start(crtc_state);
+ start = end - vblank_delay - latency;
+ intel_dsb_wait_scanline_out(state, dsb, start, end);
+
+ end = intel_vrr_vmax_vblank_start(crtc_state);
+ start = end - vblank_delay - latency;
+ intel_dsb_wait_scanline_out(state, dsb, start, end);
+ } else {
+ end = intel_mode_vblank_start(&crtc_state->hw.adjusted_mode);
+ start = end - vblank_delay - latency;
+ intel_dsb_wait_scanline_out(state, dsb, start, end);
+ }
+}
+
static void _intel_dsb_chain(struct intel_atomic_state *state,
struct intel_dsb *dsb,
struct intel_dsb *chained_dsb,
@@ -535,7 +578,7 @@ static void _intel_dsb_chain(struct intel_atomic_state *state,
intel_dsb_reg_write(dsb, DSB_INTERRUPT(pipe, chained_dsb->id),
dsb_error_int_status(display) | DSB_PROG_INT_STATUS |
- dsb_error_int_en(display));
+ dsb_error_int_en(display) | DSB_PROG_INT_EN);
if (ctrl & DSB_WAIT_FOR_VBLANK) {
int dewake_scanline = dsb_dewake_scanline_start(state, crtc);
@@ -577,6 +620,17 @@ void intel_dsb_chain(struct intel_atomic_state *state,
wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0);
}
+void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state,
+ struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = dsb->crtc;
+ const struct intel_crtc_state *crtc_state = pre_commit_crtc_state(state, crtc);
+ int usecs = intel_scanlines_to_usecs(&crtc_state->hw.adjusted_mode,
+ dsb_vblank_delay(crtc_state)) + 1;
+
+ intel_dsb_wait_usec(dsb, usecs);
+}
+
static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
int hw_dewake_scanline)
{
@@ -603,7 +657,7 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id),
dsb_error_int_status(display) | DSB_PROG_INT_STATUS |
- dsb_error_int_en(display));
+ dsb_error_int_en(display) | DSB_PROG_INT_EN);
intel_de_write_fw(display, DSB_HEAD(pipe, dsb->id),
intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
@@ -671,6 +725,9 @@ void intel_dsb_wait(struct intel_dsb *dsb)
/* Attempt to reset it */
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
+ dsb->ins[0] = 0;
+ dsb->ins[1] = 0;
+
intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id), 0);
intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb->id),
@@ -706,10 +763,6 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
if (!i915->display.params.enable_dsb)
return NULL;
- /* TODO: DSB is broken in Xe KMD, so disabling it until fixed */
- if (!IS_ENABLED(I915))
- return NULL;
-
dsb = kzalloc(sizeof(*dsb), GFP_KERNEL);
if (!dsb)
goto out;
@@ -727,8 +780,6 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
dsb->id = dsb_id;
dsb->crtc = crtc;
dsb->size = size / 4; /* in dwords */
- dsb->free_pos = 0;
- dsb->ins_start_offset = 0;
dsb->chicken = dsb_chicken(state, crtc);
dsb->hw_dewake_scanline =
@@ -763,12 +814,29 @@ void intel_dsb_cleanup(struct intel_dsb *dsb)
void intel_dsb_irq_handler(struct intel_display *display,
enum pipe pipe, enum intel_dsb_id dsb_id)
{
- struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(display->drm), pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
u32 tmp, errors;
tmp = intel_de_read_fw(display, DSB_INTERRUPT(pipe, dsb_id));
intel_de_write_fw(display, DSB_INTERRUPT(pipe, dsb_id), tmp);
+ if (tmp & DSB_PROG_INT_STATUS) {
+ spin_lock(&display->drm->event_lock);
+
+ if (crtc->dsb_event) {
+ /*
+ * Update vblank counter/timestmap in case it
+ * hasn't been done yet for this frame.
+ */
+ drm_crtc_accurate_vblank_count(&crtc->base);
+
+ drm_crtc_send_vblank_event(&crtc->base, crtc->dsb_event);
+ crtc->dsb_event = NULL;
+ }
+
+ spin_unlock(&display->drm->event_lock);
+ }
+
errors = tmp & dsb_error_int_status(display);
if (errors)
drm_err(display->drm, "[CRTC:%d:%s] DSB %d error interrupt: 0x%x\n",
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index c352c12aa59f..da6df07a3c83 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -34,17 +34,26 @@ void intel_dsb_finish(struct intel_dsb *dsb);
void intel_dsb_cleanup(struct intel_dsb *dsb);
void intel_dsb_reg_write(struct intel_dsb *dsb,
i915_reg_t reg, u32 val);
+void intel_dsb_reg_write_indexed(struct intel_dsb *dsb,
+ i915_reg_t reg, u32 val);
void intel_dsb_reg_write_masked(struct intel_dsb *dsb,
i915_reg_t reg, u32 mask, u32 val);
void intel_dsb_noop(struct intel_dsb *dsb, int count);
void intel_dsb_nonpost_start(struct intel_dsb *dsb);
void intel_dsb_nonpost_end(struct intel_dsb *dsb);
+void intel_dsb_interrupt(struct intel_dsb *dsb);
+void intel_dsb_wait_usec(struct intel_dsb *dsb, int count);
+void intel_dsb_wait_vblanks(struct intel_dsb *dsb, int count);
+void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state,
+ struct intel_dsb *dsb);
void intel_dsb_wait_scanline_in(struct intel_atomic_state *state,
struct intel_dsb *dsb,
int lower, int upper);
void intel_dsb_wait_scanline_out(struct intel_atomic_state *state,
struct intel_dsb *dsb,
int lower, int upper);
+void intel_dsb_vblank_evade(struct intel_atomic_state *state,
+ struct intel_dsb *dsb);
void intel_dsb_chain(struct intel_atomic_state *state,
struct intel_dsb *dsb,
struct intel_dsb *chained_dsb,
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index bd5888ce4852..0be46c6c9611 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -76,7 +76,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
if (fixed_mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
- return intel_mode_valid_max_plane_size(dev_priv, mode, false);
+ return intel_mode_valid_max_plane_size(dev_priv, mode, 1);
}
struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index f0e3be0fe420..b2b78f39cfd3 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -323,6 +323,7 @@ enum {
static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
int gpio, bool value)
{
+ struct intel_display *display = &dev_priv->display;
int index;
if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2))
@@ -367,7 +368,7 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
case MIPI_AVEE_EN_2:
index = gpio == MIPI_AVEE_EN_1 ? 1 : 2;
- intel_de_rmw(dev_priv, GPIO(dev_priv, index),
+ intel_de_rmw(display, GPIO(display, index),
GPIO_CLOCK_VAL_OUT,
GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_DIR_OUT |
GPIO_CLOCK_VAL_MASK | (value ? GPIO_CLOCK_VAL_OUT : 0));
@@ -376,7 +377,7 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
case MIPI_VIO_EN_2:
index = gpio == MIPI_VIO_EN_1 ? 1 : 2;
- intel_de_rmw(dev_priv, GPIO(dev_priv, index),
+ intel_de_rmw(display, GPIO(display, index),
GPIO_DATA_VAL_OUT,
GPIO_DATA_DIR_MASK | GPIO_DATA_DIR_OUT |
GPIO_DATA_VAL_MASK | (value ? GPIO_DATA_VAL_OUT : 0));
@@ -744,6 +745,23 @@ void intel_dsi_log_params(struct intel_dsi *intel_dsi)
str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
}
+static enum mipi_dsi_pixel_format vbt_to_dsi_pixel_format(unsigned int format)
+{
+ switch (format) {
+ case PIXEL_FORMAT_RGB888:
+ return MIPI_DSI_FMT_RGB888;
+ case PIXEL_FORMAT_RGB666_LOOSELY_PACKED:
+ return MIPI_DSI_FMT_RGB666;
+ case PIXEL_FORMAT_RGB666:
+ return MIPI_DSI_FMT_RGB666_PACKED;
+ case PIXEL_FORMAT_RGB565:
+ return MIPI_DSI_FMT_RGB565;
+ default:
+ MISSING_CASE(format);
+ return MIPI_DSI_FMT_RGB666;
+ }
+}
+
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
struct drm_device *dev = intel_dsi->base.base.dev;
@@ -761,8 +779,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
intel_dsi->lane_count = mipi_config->lane_cnt + 1;
intel_dsi->pixel_format =
- pixel_format_from_register_bits(
- mipi_config->videomode_color_format << 7);
+ vbt_to_dsi_pixel_format(mipi_config->videomode_color_format);
intel_dsi->dual_link = mipi_config->dual_link;
intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 12e7628cbecf..abf19dfd6d9d 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -31,6 +31,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -317,6 +318,7 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
static enum drm_connector_status
intel_dvo_detect(struct drm_connector *_connector, bool force)
{
+ struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
@@ -324,10 +326,10 @@ intel_dvo_detect(struct drm_connector *_connector, bool force)
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
connector->base.base.id, connector->base.name);
- if (!intel_display_device_enabled(i915))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return connector->base.status;
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
@@ -335,11 +337,11 @@ intel_dvo_detect(struct drm_connector *_connector, bool force)
static int intel_dvo_get_modes(struct drm_connector *_connector)
{
+ struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
int num_modes;
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return drm_edid_connector_add_modes(&connector->base);
/*
@@ -416,6 +418,7 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
struct intel_dvo *intel_dvo,
const struct intel_dvo_device *dvo)
{
+ struct intel_display *display = &dev_priv->display;
struct i2c_adapter *i2c;
u32 dpll[I915_MAX_PIPES];
enum pipe pipe;
@@ -427,7 +430,7 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
* special cases, but otherwise default to what's defined
* in the spec.
*/
- if (intel_gmbus_is_valid_pin(dev_priv, dvo->gpio))
+ if (intel_gmbus_is_valid_pin(display, dvo->gpio))
gpio = dvo->gpio;
else if (dvo->type == INTEL_DVO_CHIP_LVDS)
gpio = GMBUS_PIN_SSC;
@@ -439,7 +442,7 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
* It appears that everything is on GPIOE except for panels
* on i830 laptops, which are on GPIOB (DVOA).
*/
- i2c = intel_gmbus_get_adapter(dev_priv, gpio);
+ i2c = intel_gmbus_get_adapter(display, gpio);
intel_dvo->dev = *dvo;
@@ -488,6 +491,7 @@ static bool intel_dvo_probe(struct drm_i915_private *i915,
void intel_dvo_init(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct intel_connector *connector;
struct intel_encoder *encoder;
struct intel_dvo *intel_dvo;
@@ -548,7 +552,7 @@ void intel_dvo_init(struct drm_i915_private *i915)
drm_connector_init_with_ddc(&i915->drm, &connector->base,
&intel_dvo_connector_funcs,
intel_dvo_connector_type(&intel_dvo->dev),
- intel_gmbus_get_adapter(i915, GMBUS_PIN_DPC));
+ intel_gmbus_get_adapter(display, GMBUS_PIN_DPC));
drm_connector_helper_add(&connector->base,
&intel_dvo_connector_helper_funcs);
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 5be7bb43e2e0..223c4218c019 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -3,15 +3,16 @@
* Copyright © 2021 Intel Corporation
*/
-#include <drm/drm_blend.h>
-#include <drm/drm_modeset_helper.h>
-
#include <linux/dma-fence.h>
#include <linux/dma-resv.h>
-#include "gem/i915_gem_object.h"
+#include <drm/drm_blend.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_modeset_helper.h>
+
#include "i915_drv.h"
#include "intel_atomic_plane.h"
+#include "intel_bo.h"
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
@@ -44,6 +45,14 @@ static const struct drm_format_info skl_ccs_formats[] = {
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
+ { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+ { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+ { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
+ { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2,
+ .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
};
/*
@@ -66,6 +75,30 @@ static const struct drm_format_info gen12_ccs_formats[] = {
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
.char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_XRGB16161616F, .depth = 0, .num_planes = 2,
+ .char_per_block = { 8, 1 }, .block_w = { 1, 1 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 2,
+ .char_per_block = { 8, 1 }, .block_w = { 1, 1 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 2,
+ .char_per_block = { 8, 1 }, .block_w = { 1, 1 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 2,
+ .char_per_block = { 8, 1 }, .block_w = { 1, 1 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_YUYV, .num_planes = 2,
.char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
.hsub = 2, .vsub = 1, .is_yuv = true },
@@ -101,31 +134,79 @@ static const struct drm_format_info gen12_ccs_formats[] = {
*/
static const struct drm_format_info gen12_ccs_cc_formats[] = {
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
- .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 },
.hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
- .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 },
.hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
- .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 },
.hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
- .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3,
+ .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 0 }, .block_h = { 1, 1, 0 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_XRGB16161616F, .depth = 0, .num_planes = 3,
+ .char_per_block = { 8, 1, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 3,
+ .char_per_block = { 8, 1, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 3,
+ .char_per_block = { 8, 1, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 3,
+ .char_per_block = { 8, 1, 0 }, .block_w = { 1, 1, 0 }, .block_h = { 1, 1, 0 },
.hsub = 1, .vsub = 1, .has_alpha = true },
};
static const struct drm_format_info gen12_flat_ccs_cc_formats[] = {
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
- .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 },
.hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
- .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 },
.hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
- .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 },
.hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
- .char_per_block = { 4, 0 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2,
+ .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2,
+ .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2,
+ .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2,
+ .char_per_block = { 4, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_XRGB16161616F, .depth = 0, .num_planes = 2,
+ .char_per_block = { 8, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 2,
+ .char_per_block = { 8, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 2,
+ .char_per_block = { 8, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 2,
+ .char_per_block = { 8, 0 }, .block_w = { 1, 0 }, .block_h = { 1, 0 },
.hsub = 1, .vsub = 1, .has_alpha = true },
};
@@ -268,7 +349,7 @@ static const struct intel_modifier_desc intel_modifiers[] = {
.plane_caps = INTEL_PLANE_CAP_TILING_Y,
}, {
.modifier = I915_FORMAT_MOD_X_TILED,
- .display_ver = DISPLAY_VER_ALL,
+ .display_ver = { 0, 29 },
.plane_caps = INTEL_PLANE_CAP_TILING_X,
}, {
.modifier = DRM_FORMAT_MOD_LINEAR,
@@ -438,6 +519,19 @@ bool intel_fb_needs_64k_phys(u64 modifier)
INTEL_PLANE_CAP_NEED64K_PHYS);
}
+/**
+ * intel_fb_is_tile4_modifier: Check if a modifier is a tile4 modifier type
+ * @modifier: Modifier to check
+ *
+ * Returns:
+ * Returns %true if @modifier is a tile4 modifier.
+ */
+bool intel_fb_is_tile4_modifier(u64 modifier)
+{
+ return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps,
+ INTEL_PLANE_CAP_TILING_4);
+}
+
static bool check_modifier_display_ver_range(const struct intel_modifier_desc *md,
u8 display_ver_from, u8 display_ver_until)
{
@@ -1224,7 +1318,7 @@ static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
static int convert_plane_offset_to_xy(const struct intel_framebuffer *fb, int color_plane,
int plane_width, int *x, int *y)
{
- struct drm_i915_gem_object *obj = intel_fb_obj(&fb->base);
+ struct drm_gem_object *obj = intel_fb_bo(&fb->base);
int ret;
ret = intel_fb_offset_to_xy(x, y, &fb->base, color_plane);
@@ -1248,7 +1342,7 @@ static int convert_plane_offset_to_xy(const struct intel_framebuffer *fb, int co
* fb layout agrees with the fence layout. We already check that the
* fb stride matches the fence stride elsewhere.
*/
- if (color_plane == 0 && i915_gem_object_is_tiled(obj) &&
+ if (color_plane == 0 && intel_bo_is_tiled(obj) &&
(*x + plane_width) * fb->base.format->cpp[color_plane] > fb->base.pitches[color_plane]) {
drm_dbg_kms(fb->base.dev,
"bad fb plane %d offset: 0x%x\n",
@@ -1568,7 +1662,7 @@ static unsigned int intel_fb_min_alignment(const struct drm_framebuffer *fb)
int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *fb)
{
- struct drm_i915_gem_object *obj = intel_fb_obj(&fb->base);
+ struct drm_gem_object *obj = intel_fb_bo(&fb->base);
u32 gtt_offset_rotated = 0;
u32 gtt_offset_remapped = 0;
unsigned int max_size = 0;
@@ -1600,7 +1694,7 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
* arithmetic related to alignment and offset calculation.
*/
if (is_gen12_ccs_cc_plane(&fb->base, i)) {
- if (IS_ALIGNED(fb->base.offsets[i], PAGE_SIZE))
+ if (IS_ALIGNED(fb->base.offsets[i], 64))
continue;
else
return -EINVAL;
@@ -1641,10 +1735,10 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
max_size = max(max_size, offset + size);
}
- if (mul_u32_u32(max_size, tile_size) > intel_bo_to_drm_bo(obj)->size) {
+ if (mul_u32_u32(max_size, tile_size) > obj->size) {
drm_dbg_kms(&i915->drm,
"fb too big for bo (need %llu bytes, have %zu bytes)\n",
- mul_u32_u32(max_size, tile_size), intel_bo_to_drm_bo(obj)->size);
+ mul_u32_u32(max_size, tile_size), obj->size);
return -EINVAL;
}
@@ -1868,7 +1962,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
intel_frontbuffer_put(intel_fb->frontbuffer);
- intel_fb_bo_framebuffer_fini(intel_fb_obj(fb));
+ intel_fb_bo_framebuffer_fini(intel_fb_bo(fb));
kfree(intel_fb);
}
@@ -1877,16 +1971,16 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file,
unsigned int *handle)
{
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_private *i915 = to_i915(intel_bo_to_drm_bo(obj)->dev);
+ struct drm_gem_object *obj = intel_fb_bo(fb);
+ struct intel_display *display = to_intel_display(obj->dev);
- if (i915_gem_object_is_userptr(obj)) {
- drm_dbg(&i915->drm,
+ if (intel_bo_is_userptr(obj)) {
+ drm_dbg(display->drm,
"attempting to use a userptr for a framebuffer, denied\n");
return -EINVAL;
}
- return drm_gem_handle_create(file, intel_bo_to_drm_bo(obj), handle);
+ return drm_gem_handle_create(file, obj, handle);
}
struct frontbuffer_fence_cb {
@@ -1910,7 +2004,7 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_clip_rect *clips,
unsigned int num_clips)
{
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_gem_object *obj = intel_fb_bo(fb);
struct intel_frontbuffer *front = to_intel_frontbuffer(fb);
struct dma_fence *fence;
struct frontbuffer_fence_cb *cb;
@@ -1919,10 +2013,10 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
if (!atomic_read(&front->bits))
return 0;
- if (dma_resv_test_signaled(intel_bo_to_drm_bo(obj)->resv, dma_resv_usage_rw(false)))
+ if (dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(false)))
goto flush;
- ret = dma_resv_get_singleton(intel_bo_to_drm_bo(obj)->resv, dma_resv_usage_rw(false),
+ ret = dma_resv_get_singleton(obj->resv, dma_resv_usage_rw(false),
&fence);
if (ret || !fence)
goto flush;
@@ -1949,7 +2043,7 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
return ret;
flush:
- i915_gem_object_flush_if_display(obj);
+ intel_bo_flush_if_display(obj);
intel_frontbuffer_flush(front, ORIGIN_DIRTYFB);
return ret;
}
@@ -1961,10 +2055,10 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
};
int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
- struct drm_i915_gem_object *obj,
+ struct drm_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct drm_i915_private *dev_priv = to_i915(intel_bo_to_drm_bo(obj)->dev);
+ struct drm_i915_private *dev_priv = to_i915(obj->dev);
struct drm_framebuffer *fb = &intel_fb->base;
u32 max_stride;
int ret = -EINVAL;
@@ -2040,7 +2134,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
}
}
- fb->obj[i] = intel_bo_to_drm_bo(obj);
+ fb->obj[i] = obj;
}
ret = intel_fill_fb_info(dev_priv, intel_fb);
@@ -2084,7 +2178,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *user_mode_cmd)
{
struct drm_framebuffer *fb;
- struct drm_i915_gem_object *obj;
+ struct drm_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
struct drm_i915_private *i915 = to_i915(dev);
@@ -2093,13 +2187,13 @@ intel_user_framebuffer_create(struct drm_device *dev,
return ERR_CAST(obj);
fb = intel_framebuffer_create(obj, &mode_cmd);
- drm_gem_object_put(intel_bo_to_drm_bo(obj));
+ drm_gem_object_put(obj);
return fb;
}
struct drm_framebuffer *
-intel_framebuffer_create(struct drm_i915_gem_object *obj,
+intel_framebuffer_create(struct drm_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct intel_framebuffer *intel_fb;
@@ -2119,3 +2213,8 @@ err:
kfree(intel_fb);
return ERR_PTR(ret);
}
+
+struct drm_gem_object *intel_fb_bo(const struct drm_framebuffer *fb)
+{
+ return fb ? fb->obj[0] : NULL;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index 10de437e8ef8..d78993e5eb62 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -12,6 +12,7 @@
struct drm_device;
struct drm_file;
struct drm_framebuffer;
+struct drm_gem_object;
struct drm_i915_gem_object;
struct drm_i915_private;
struct drm_mode_fb_cmd2;
@@ -35,6 +36,7 @@ bool intel_fb_is_ccs_modifier(u64 modifier);
bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier);
bool intel_fb_is_mc_ccs_modifier(u64 modifier);
bool intel_fb_needs_64k_phys(u64 modifier);
+bool intel_fb_is_tile4_modifier(u64 modifier);
bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane);
int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb);
@@ -84,9 +86,12 @@ void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotatio
int intel_plane_compute_gtt(struct intel_plane_state *plane_state);
int intel_framebuffer_init(struct intel_framebuffer *ifb,
- struct drm_i915_gem_object *obj,
+ struct drm_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *
+intel_framebuffer_create(struct drm_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
const struct drm_mode_fb_cmd2 *user_mode_cmd);
@@ -96,4 +101,6 @@ bool intel_fb_uses_dpt(const struct drm_framebuffer *fb);
unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier);
+struct drm_gem_object *intel_fb_bo(const struct drm_framebuffer *fb);
+
#endif /* __INTEL_FB_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.c b/drivers/gpu/drm/i915/display/intel_fb_bo.c
index 4be09541e509..810ca6ff8640 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.c
@@ -11,15 +11,16 @@
#include "intel_fb.h"
#include "intel_fb_bo.h"
-void intel_fb_bo_framebuffer_fini(struct drm_i915_gem_object *obj)
+void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj)
{
/* Nothing to do for i915 */
}
int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
- struct drm_i915_gem_object *obj,
+ struct drm_gem_object *_obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
+ struct drm_i915_gem_object *obj = to_intel_bo(_obj);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned int tiling, stride;
@@ -74,7 +75,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
return 0;
}
-struct drm_i915_gem_object *
+struct drm_gem_object *
intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
struct drm_file *filp,
const struct drm_mode_fb_cmd2 *mode_cmd)
@@ -93,5 +94,5 @@ intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
return ERR_PTR(-EREMOTE);
}
- return obj;
+ return intel_bo_to_drm_bo(obj);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.h b/drivers/gpu/drm/i915/display/intel_fb_bo.h
index 232bf898b013..e71acd1bcb24 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_bo.h
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.h
@@ -7,18 +7,18 @@
#define __INTEL_FB_BO_H__
struct drm_file;
-struct drm_mode_fb_cmd2;
-struct drm_i915_gem_object;
+struct drm_gem_object;
struct drm_i915_private;
+struct drm_mode_fb_cmd2;
struct intel_framebuffer;
-void intel_fb_bo_framebuffer_fini(struct drm_i915_gem_object *obj);
+void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj);
int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
- struct drm_i915_gem_object *obj,
+ struct drm_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
-struct drm_i915_gem_object *
+struct drm_gem_object *
intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
struct drm_file *filp,
const struct drm_mode_fb_cmd2 *user_mode_cmd);
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 575b271e012b..d3a86f9c6bc8 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -26,7 +26,8 @@ intel_fb_pin_to_dpt(const struct drm_framebuffer *fb,
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_gem_object *_obj = intel_fb_bo(fb);
+ struct drm_i915_gem_object *obj = to_intel_bo(_obj);
struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
int ret;
@@ -111,7 +112,8 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_gem_object *_obj = intel_fb_bo(fb);
+ struct drm_i915_gem_object *obj = to_intel_bo(_obj);
intel_wakeref_t wakeref;
struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
@@ -274,9 +276,11 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
* will trigger might_sleep() even if it won't actually sleep,
* which is the case when the fb has already been pinned.
*/
- if (intel_plane_needs_physical(plane))
- plane_state->phys_dma_addr =
- i915_gem_object_get_dma_address(intel_fb_obj(&fb->base), 0);
+ if (intel_plane_needs_physical(plane)) {
+ struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base));
+
+ plane_state->phys_dma_addr = i915_gem_object_get_dma_address(obj, 0);
+ }
} else {
unsigned int alignment = intel_plane_fb_min_alignment(plane_state);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 52b79bacef4d..df05904bac8a 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -38,6 +38,7 @@
* forcibly disable it to allow proper screen updates.
*/
+#include <linux/debugfs.h>
#include <linux/string_helpers.h>
#include <drm/drm_blend.h>
@@ -1346,7 +1347,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
/* Wa_14016291713 */
if ((IS_DISPLAY_VER(display, 12, 13) ||
- IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0)) &&
+ IS_DISPLAY_VERx100_STEP(i915, 1400, STEP_A0, STEP_C0)) &&
crtc_state->has_psr && !crtc_state->has_panel_replay) {
plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)";
return 0;
@@ -1792,7 +1793,6 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work)
{
struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work);
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
mutex_lock(&fbc->lock);
@@ -1805,7 +1805,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work)
intel_fbc_deactivate(fbc, "FIFO underrun");
if (!fbc->flip_pending)
- intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(i915, fbc->state.plane->pipe));
+ intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(display, fbc->state.plane->pipe));
__intel_fbc_disable(fbc);
out:
mutex_unlock(&fbc->lock);
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 49a1ac4f5491..00852ff5b247 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -41,12 +41,11 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include "gem/i915_gem_mman.h"
-#include "gem/i915_gem_object.h"
-
#include "i915_drv.h"
+#include "intel_bo.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
@@ -129,10 +128,9 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct intel_fbdev *fbdev = to_intel_fbdev(info->par);
- struct drm_gem_object *bo = drm_gem_fb_get_obj(&fbdev->fb->base, 0);
- struct drm_i915_gem_object *obj = to_intel_bo(bo);
+ struct drm_gem_object *obj = drm_gem_fb_get_obj(&fbdev->fb->base, 0);
- return i915_gem_fb_mmap(obj, vma);
+ return intel_bo_fb_mmap(obj, vma);
}
static void intel_fbdev_fb_destroy(struct fb_info *info)
@@ -187,7 +185,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct i915_vma *vma;
unsigned long flags = 0;
bool prealloc = false;
- struct drm_i915_gem_object *obj;
+ struct drm_gem_object *obj;
int ret;
mutex_lock(&ifbdev->hpd_lock);
@@ -209,7 +207,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
drm_framebuffer_put(&fb->base);
fb = NULL;
}
- if (!fb || drm_WARN_ON(dev, !intel_fb_obj(&fb->base))) {
+ if (!fb || drm_WARN_ON(dev, !intel_fb_bo(&fb->base))) {
drm_dbg_kms(&dev_priv->drm,
"no BIOS fb, allocating a new one\n");
fb = intel_fbdev_fb_alloc(helper, sizes);
@@ -247,7 +245,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->fbops = &intelfb_ops;
- obj = intel_fb_obj(&fb->base);
+ obj = intel_fb_bo(&fb->base);
ret = intel_fbdev_fb_fill_info(dev_priv, info, obj, vma);
if (ret)
@@ -259,7 +257,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
- if (!i915_gem_object_is_shmem(obj) && !prealloc)
+ if (!intel_bo_is_shmem(obj) && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -323,8 +321,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- struct drm_i915_gem_object *obj =
- intel_fb_obj(plane_state->uapi.fb);
+ struct drm_gem_object *obj = intel_fb_bo(plane_state->uapi.fb);
if (!crtc_state->uapi.active) {
drm_dbg_kms(&i915->drm,
@@ -340,12 +337,12 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
continue;
}
- if (intel_bo_to_drm_bo(obj)->size > max_size) {
+ if (obj->size > max_size) {
drm_dbg_kms(&i915->drm,
"found possible fb from [PLANE:%d:%s]\n",
plane->base.base.id, plane->base.name);
fb = to_intel_framebuffer(plane_state->uapi.fb);
- max_size = intel_bo_to_drm_bo(obj)->size;
+ max_size = obj->size;
}
}
@@ -533,7 +530,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
* full of whatever garbage was left in there.
*/
if (state == FBINFO_STATE_RUNNING &&
- !i915_gem_object_is_shmem(intel_fb_obj(&ifbdev->fb->base)))
+ !intel_bo_is_shmem(intel_fb_bo(&ifbdev->fb->base)))
memset_io(info->screen_base, 0, info->screen_size);
drm_fb_helper_set_suspend(&ifbdev->helper, state);
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
index 497525ef9668..4991c35a2632 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
@@ -9,6 +9,7 @@
#include "i915_drv.h"
#include "intel_display_types.h"
+#include "intel_fb.h"
#include "intel_fbdev_fb.h"
struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
@@ -60,15 +61,16 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
return ERR_PTR(-ENOMEM);
}
- fb = intel_framebuffer_create(obj, &mode_cmd);
+ fb = intel_framebuffer_create(intel_bo_to_drm_bo(obj), &mode_cmd);
i915_gem_object_put(obj);
return to_intel_framebuffer(fb);
}
int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
- struct drm_i915_gem_object *obj, struct i915_vma *vma)
+ struct drm_gem_object *_obj, struct i915_vma *vma)
{
+ struct drm_i915_gem_object *obj = to_intel_bo(_obj);
struct i915_gem_ww_ctx ww;
void __iomem *vaddr;
int ret;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
index 4832fe688fbf..e502ae375fc0 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
@@ -8,7 +8,7 @@
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
-struct drm_i915_gem_object;
+struct drm_gem_object;
struct drm_i915_private;
struct fb_info;
struct i915_vma;
@@ -16,6 +16,6 @@ struct i915_vma;
struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes);
int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
- struct drm_i915_gem_object *obj, struct i915_vma *vma);
+ struct drm_gem_object *obj, struct i915_vma *vma);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 222cd0e1a2bc..37cdfa9c692a 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -7,6 +7,7 @@
#include <drm/drm_fixed.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
@@ -26,9 +27,10 @@ struct intel_fdi_funcs {
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
+ struct intel_display *display = &dev_priv->display;
bool cur_state;
- if (HAS_DDI(dev_priv)) {
+ if (HAS_DDI(display)) {
/*
* DDI does not have a specific FDI_TX register.
*
@@ -36,14 +38,14 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
* so pipe->transcoder cast is fine here.
*/
enum transcoder cpu_transcoder = (enum transcoder)pipe;
- cur_state = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
+ cur_state = intel_de_read(display,
+ TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
} else {
- cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
+ cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
}
- I915_STATE_WARN(dev_priv, cur_state != state,
- "FDI TX state assertion failure (expected %s, current %s)\n",
- str_on_off(state), str_on_off(cur_state));
+ INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
+ "FDI TX state assertion failure (expected %s, current %s)\n",
+ str_on_off(state), str_on_off(cur_state));
}
void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
@@ -59,12 +61,13 @@ void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
+ struct intel_display *display = &dev_priv->display;
bool cur_state;
- cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
- I915_STATE_WARN(dev_priv, cur_state != state,
- "FDI RX state assertion failure (expected %s, current %s)\n",
- str_on_off(state), str_on_off(cur_state));
+ cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
+ INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
+ "FDI RX state assertion failure (expected %s, current %s)\n",
+ str_on_off(state), str_on_off(cur_state));
}
void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
@@ -80,6 +83,7 @@ void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
enum pipe pipe)
{
+ struct intel_display *display = &i915->display;
bool cur_state;
/* ILK FDI PLL is always enabled */
@@ -87,23 +91,24 @@ void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
return;
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
- if (HAS_DDI(i915))
+ if (HAS_DDI(display))
return;
- cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
- I915_STATE_WARN(i915, !cur_state,
- "FDI TX PLL assertion failure, should be active but is disabled\n");
+ cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
+ INTEL_DISPLAY_STATE_WARN(display, !cur_state,
+ "FDI TX PLL assertion failure, should be active but is disabled\n");
}
static void assert_fdi_rx_pll(struct drm_i915_private *i915,
enum pipe pipe, bool state)
{
+ struct intel_display *display = &i915->display;
bool cur_state;
- cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
- I915_STATE_WARN(i915, cur_state != state,
- "FDI RX PLL assertion failure (expected %s, current %s)\n",
- str_on_off(state), str_on_off(cur_state));
+ cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
+ INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
+ "FDI RX PLL assertion failure (expected %s, current %s)\n",
+ str_on_off(state), str_on_off(cur_state));
}
void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
@@ -137,6 +142,7 @@ void intel_fdi_link_train(struct intel_crtc *crtc,
*/
int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state;
const struct intel_crtc_state *new_crtc_state;
@@ -145,7 +151,7 @@ int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state)
if (!IS_IVYBRIDGE(i915) || INTEL_NUM_PIPES(i915) != 3)
return 0;
- crtc = intel_crtc_for_pipe(i915, PIPE_C);
+ crtc = intel_crtc_for_pipe(display, PIPE_C);
new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
if (!new_crtc_state)
return 0;
@@ -157,7 +163,7 @@ int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state)
if (!old_crtc_state->fdi_lanes)
return 0;
- crtc = intel_crtc_for_pipe(i915, PIPE_B);
+ crtc = intel_crtc_for_pipe(display, PIPE_B);
new_crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
if (IS_ERR(new_crtc_state))
return PTR_ERR(new_crtc_state);
@@ -184,6 +190,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
struct intel_crtc_state *pipe_config,
enum pipe *pipe_to_reduce)
{
+ struct intel_display *display = to_intel_display(dev);
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state = pipe_config->uapi.state;
struct intel_crtc *other_crtc;
@@ -223,7 +230,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
if (pipe_config->fdi_lanes <= 2)
return 0;
- other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C);
+ other_crtc = intel_crtc_for_pipe(display, PIPE_C);
other_crtc_state =
intel_atomic_get_crtc_state(state, other_crtc);
if (IS_ERR(other_crtc_state))
@@ -244,7 +251,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return -EINVAL;
}
- other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B);
+ other_crtc = intel_crtc_for_pipe(display, PIPE_B);
other_crtc_state =
intel_atomic_get_crtc_state(state, other_crtc);
if (IS_ERR(other_crtc_state))
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 8949fbb1cc60..cda1daf4cdea 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -57,6 +57,7 @@
static bool ivb_can_enable_err_int(struct drm_device *dev)
{
+ struct intel_display *display = to_intel_display(dev);
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
enum pipe pipe;
@@ -64,7 +65,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
lockdep_assert_held(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
- crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ crtc = intel_crtc_for_pipe(display, pipe);
if (crtc->cpu_fifo_underrun_disabled)
return false;
@@ -75,6 +76,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
static bool cpt_can_enable_serr_int(struct drm_device *dev)
{
+ struct intel_display *display = to_intel_display(dev);
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
struct intel_crtc *crtc;
@@ -82,7 +84,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
lockdep_assert_held(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
- crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ crtc = intel_crtc_for_pipe(display, pipe);
if (crtc->pch_fifo_underrun_disabled)
return false;
@@ -93,6 +95,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
i915_reg_t reg = PIPESTAT(dev_priv, crtc->pipe);
u32 enable_mask;
@@ -106,7 +109,7 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
intel_de_write(dev_priv, reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
intel_de_posting_read(dev_priv, reg);
- trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe);
+ trace_intel_cpu_fifo_underrun(display, crtc->pipe);
drm_err(&dev_priv->drm, "pipe %c underrun\n", pipe_name(crtc->pipe));
}
@@ -147,6 +150,7 @@ static void ilk_set_fifo_underrun_reporting(struct drm_device *dev,
static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 err_int = intel_de_read(dev_priv, GEN7_ERR_INT);
@@ -159,7 +163,7 @@ static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
intel_de_write(dev_priv, GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
intel_de_posting_read(dev_priv, GEN7_ERR_INT);
- trace_intel_cpu_fifo_underrun(dev_priv, pipe);
+ trace_intel_cpu_fifo_underrun(display, pipe);
drm_err(&dev_priv->drm, "fifo underrun on pipe %c\n", pipe_name(pipe));
}
@@ -188,35 +192,15 @@ static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
}
}
-static u32
-icl_pipe_status_underrun_mask(struct drm_i915_private *dev_priv)
-{
- u32 mask = PIPE_STATUS_UNDERRUN;
-
- if (DISPLAY_VER(dev_priv) >= 13)
- mask |= PIPE_STATUS_SOFT_UNDERRUN_XELPD |
- PIPE_STATUS_HARD_UNDERRUN_XELPD |
- PIPE_STATUS_PORT_UNDERRUN_XELPD;
-
- return mask;
-}
-
static void bdw_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- u32 mask = gen8_de_pipe_underrun_mask(dev_priv);
- if (enable) {
- if (DISPLAY_VER(dev_priv) >= 11)
- intel_de_write(dev_priv,
- ICL_PIPESTATUS(dev_priv, pipe),
- icl_pipe_status_underrun_mask(dev_priv));
-
- bdw_enable_pipe_irq(dev_priv, pipe, mask);
- } else {
- bdw_disable_pipe_irq(dev_priv, pipe, mask);
- }
+ if (enable)
+ bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
+ else
+ bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
}
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -235,6 +219,7 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pch_transcoder = crtc->pipe;
u32 serr_int = intel_de_read(dev_priv, SERR_INT);
@@ -248,7 +233,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
intel_de_posting_read(dev_priv, SERR_INT);
- trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
+ trace_intel_pch_fifo_underrun(display, pch_transcoder);
drm_err(&dev_priv->drm, "pch fifo underrun on pch transcoder %c\n",
pipe_name(pch_transcoder));
}
@@ -282,8 +267,9 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
+ struct intel_display *display = to_intel_display(dev);
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
bool old;
lockdep_assert_held(&dev_priv->irq_lock);
@@ -351,8 +337,9 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pch_transcoder,
bool enable)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_crtc *crtc =
- intel_crtc_for_pipe(dev_priv, pch_transcoder);
+ intel_crtc_for_pipe(display, pch_transcoder);
unsigned long flags;
bool old;
@@ -395,8 +382,8 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
- u32 underruns = 0;
+ struct intel_display *display = &dev_priv->display;
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
/* We may be called too early in init, thanks BIOS! */
if (crtc == NULL)
@@ -407,37 +394,10 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
crtc->cpu_fifo_underrun_disabled)
return;
- /*
- * Starting with display version 11, the PIPE_STAT register records
- * whether an underrun has happened, and on XELPD+, it will also record
- * whether the underrun was soft/hard and whether it was triggered by
- * the downstream port logic. We should clear these bits (which use
- * write-1-to-clear logic) too.
- *
- * Note that although the IIR gives us the same underrun and soft/hard
- * information, PIPE_STAT is the only place we can find out whether
- * the underrun was caused by the downstream port.
- */
- if (DISPLAY_VER(dev_priv) >= 11) {
- underruns = intel_de_read(dev_priv,
- ICL_PIPESTATUS(dev_priv, pipe)) &
- icl_pipe_status_underrun_mask(dev_priv);
- intel_de_write(dev_priv, ICL_PIPESTATUS(dev_priv, pipe),
- underruns);
- }
-
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) {
- trace_intel_cpu_fifo_underrun(dev_priv, pipe);
-
- if (DISPLAY_VER(dev_priv) >= 11)
- drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun: %s%s%s%s\n",
- pipe_name(pipe),
- underruns & PIPE_STATUS_SOFT_UNDERRUN_XELPD ? "soft," : "",
- underruns & PIPE_STATUS_HARD_UNDERRUN_XELPD ? "hard," : "",
- underruns & PIPE_STATUS_PORT_UNDERRUN_XELPD ? "port," : "",
- underruns & PIPE_STATUS_UNDERRUN ? "transcoder," : "");
- else
- drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe));
+ trace_intel_cpu_fifo_underrun(display, pipe);
+
+ drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe));
}
intel_fbc_handle_fifo_underrun_irq(&dev_priv->display);
@@ -455,9 +415,11 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pch_transcoder)
{
+ struct intel_display *display = &dev_priv->display;
+
if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
false)) {
- trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
+ trace_intel_pch_fifo_underrun(display, pch_transcoder);
drm_err(&dev_priv->drm, "PCH transcoder %c FIFO underrun\n",
pipe_name(pch_transcoder));
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index af4576dee92a..6ed5f726ee60 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -55,9 +55,11 @@
* cancelled as soon as busyness is detected.
*/
-#include "gem/i915_gem_object_frontbuffer.h"
+#include <drm/drm_gem.h>
+
#include "i915_active.h"
#include "i915_drv.h"
+#include "intel_bo.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_dp.h"
@@ -93,7 +95,7 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
if (!frontbuffer_bits)
return;
- trace_intel_frontbuffer_flush(i915, frontbuffer_bits, origin);
+ trace_intel_frontbuffer_flush(display, frontbuffer_bits, origin);
might_sleep();
intel_td_flush(i915);
@@ -173,17 +175,17 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
enum fb_op_origin origin,
unsigned int frontbuffer_bits)
{
- struct drm_i915_private *i915 = intel_bo_to_i915(front->obj);
- struct intel_display *display = &i915->display;
+ struct intel_display *display = to_intel_display(front->obj->dev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (origin == ORIGIN_CS) {
- spin_lock(&i915->display.fb_tracking.lock);
- i915->display.fb_tracking.busy_bits |= frontbuffer_bits;
- i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
+ display->fb_tracking.busy_bits |= frontbuffer_bits;
+ display->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&display->fb_tracking.lock);
}
- trace_intel_frontbuffer_invalidate(i915, frontbuffer_bits, origin);
+ trace_intel_frontbuffer_invalidate(display, frontbuffer_bits, origin);
might_sleep();
intel_psr_invalidate(display, frontbuffer_bits, origin);
@@ -195,14 +197,15 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
enum fb_op_origin origin,
unsigned int frontbuffer_bits)
{
- struct drm_i915_private *i915 = intel_bo_to_i915(front->obj);
+ struct intel_display *display = to_intel_display(front->obj->dev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (origin == ORIGIN_CS) {
- spin_lock(&i915->display.fb_tracking.lock);
+ spin_lock(&display->fb_tracking.lock);
/* Filter out new bits since rendering started. */
- frontbuffer_bits &= i915->display.fb_tracking.busy_bits;
- i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->display.fb_tracking.lock);
+ frontbuffer_bits &= display->fb_tracking.busy_bits;
+ display->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&display->fb_tracking.lock);
}
if (frontbuffer_bits)
@@ -214,7 +217,7 @@ static void intel_frontbuffer_flush_work(struct work_struct *work)
struct intel_frontbuffer *front =
container_of(work, struct intel_frontbuffer, flush_work);
- i915_gem_object_flush_if_display(front->obj);
+ intel_bo_flush_if_display(front->obj);
intel_frontbuffer_flush(front, ORIGIN_DIRTYFB);
intel_frontbuffer_put(front);
}
@@ -255,31 +258,32 @@ static void frontbuffer_retire(struct i915_active *ref)
}
static void frontbuffer_release(struct kref *ref)
- __releases(&intel_bo_to_i915(front->obj)->display.fb_tracking.lock)
+ __releases(&to_intel_display(front->obj->dev)->fb_tracking.lock)
{
struct intel_frontbuffer *ret, *front =
container_of(ref, typeof(*front), ref);
- struct drm_i915_gem_object *obj = front->obj;
+ struct drm_gem_object *obj = front->obj;
+ struct intel_display *display = to_intel_display(obj->dev);
- drm_WARN_ON(&intel_bo_to_i915(obj)->drm, atomic_read(&front->bits));
+ drm_WARN_ON(display->drm, atomic_read(&front->bits));
- i915_ggtt_clear_scanout(obj);
+ i915_ggtt_clear_scanout(to_intel_bo(obj));
- ret = i915_gem_object_set_frontbuffer(obj, NULL);
- drm_WARN_ON(&intel_bo_to_i915(obj)->drm, ret);
- spin_unlock(&intel_bo_to_i915(obj)->display.fb_tracking.lock);
+ ret = intel_bo_set_frontbuffer(obj, NULL);
+ drm_WARN_ON(display->drm, ret);
+ spin_unlock(&display->fb_tracking.lock);
i915_active_fini(&front->write);
kfree_rcu(front, rcu);
}
struct intel_frontbuffer *
-intel_frontbuffer_get(struct drm_i915_gem_object *obj)
+intel_frontbuffer_get(struct drm_gem_object *obj)
{
- struct drm_i915_private *i915 = intel_bo_to_i915(obj);
+ struct drm_i915_private *i915 = to_i915(obj->dev);
struct intel_frontbuffer *front, *cur;
- front = i915_gem_object_get_frontbuffer(obj);
+ front = intel_bo_get_frontbuffer(obj);
if (front)
return front;
@@ -297,7 +301,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
INIT_WORK(&front->flush_work, intel_frontbuffer_flush_work);
spin_lock(&i915->display.fb_tracking.lock);
- cur = i915_gem_object_set_frontbuffer(obj, front);
+ cur = intel_bo_set_frontbuffer(obj, front);
spin_unlock(&i915->display.fb_tracking.lock);
if (cur != front)
kfree(front);
@@ -308,7 +312,7 @@ void intel_frontbuffer_put(struct intel_frontbuffer *front)
{
kref_put_lock(&front->ref,
frontbuffer_release,
- &intel_bo_to_i915(front->obj)->display.fb_tracking.lock);
+ &to_intel_display(front->obj->dev)->fb_tracking.lock);
}
/**
@@ -337,13 +341,17 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE);
if (old) {
- drm_WARN_ON(&intel_bo_to_i915(old->obj)->drm,
+ struct intel_display *display = to_intel_display(old->obj->dev);
+
+ drm_WARN_ON(display->drm,
!(atomic_read(&old->bits) & frontbuffer_bits));
atomic_andnot(frontbuffer_bits, &old->bits);
}
if (new) {
- drm_WARN_ON(&intel_bo_to_i915(new->obj)->drm,
+ struct intel_display *display = to_intel_display(new->obj->dev);
+
+ drm_WARN_ON(display->drm,
atomic_read(&new->bits) & frontbuffer_bits);
atomic_or(frontbuffer_bits, &new->bits);
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index abb51e8bb920..6237780a9f68 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -30,6 +30,7 @@
#include "i915_active_types.h"
+struct drm_gem_object;
struct drm_i915_private;
enum fb_op_origin {
@@ -44,7 +45,7 @@ struct intel_frontbuffer {
struct kref ref;
atomic_t bits;
struct i915_active write;
- struct drm_i915_gem_object *obj;
+ struct drm_gem_object *obj;
struct rcu_head rcu;
struct work_struct flush_work;
@@ -77,7 +78,7 @@ void intel_frontbuffer_flip(struct drm_i915_private *i915,
void intel_frontbuffer_put(struct intel_frontbuffer *front);
struct intel_frontbuffer *
-intel_frontbuffer_get(struct drm_i915_gem_object *obj);
+intel_frontbuffer_get(struct drm_gem_object *obj);
void __intel_fb_invalidate(struct intel_frontbuffer *front,
enum fb_op_origin origin,
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
index cbcd1e91b7be..8a49e2bb37fa 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.c
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -75,7 +75,7 @@ intel_atomic_global_state_get(struct intel_global_state *obj_state)
return obj_state;
}
-void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
+void intel_atomic_global_obj_init(struct intel_display *display,
struct intel_global_obj *obj,
struct intel_global_state *state,
const struct intel_global_state_funcs *funcs)
@@ -88,26 +88,26 @@ void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
obj->state = state;
obj->funcs = funcs;
- list_add_tail(&obj->head, &dev_priv->display.global.obj_list);
+ list_add_tail(&obj->head, &display->global.obj_list);
}
-void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv)
+void intel_atomic_global_obj_cleanup(struct intel_display *display)
{
struct intel_global_obj *obj, *next;
- list_for_each_entry_safe(obj, next, &dev_priv->display.global.obj_list, head) {
+ list_for_each_entry_safe(obj, next, &display->global.obj_list, head) {
list_del(&obj->head);
- drm_WARN_ON(&dev_priv->drm, kref_read(&obj->state->ref) != 1);
+ drm_WARN_ON(display->drm, kref_read(&obj->state->ref) != 1);
intel_atomic_global_state_put(obj->state);
}
}
-static void assert_global_state_write_locked(struct drm_i915_private *dev_priv)
+static void assert_global_state_write_locked(struct intel_display *display)
{
struct intel_crtc *crtc;
- for_each_intel_crtc(&dev_priv->drm, crtc)
+ for_each_intel_crtc(display->drm, crtc)
drm_modeset_lock_assert_held(&crtc->base.mutex);
}
@@ -126,23 +126,23 @@ static bool modeset_lock_is_held(struct drm_modeset_acquire_ctx *ctx,
static void assert_global_state_read_locked(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_modeset_acquire_ctx *ctx = state->base.acquire_ctx;
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
if (modeset_lock_is_held(ctx, &crtc->base.mutex))
return;
}
- drm_WARN(&dev_priv->drm, 1, "Global state not read locked\n");
+ drm_WARN(display->drm, 1, "Global state not read locked\n");
}
struct intel_global_state *
intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
struct intel_global_obj *obj)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
int index, num_objs, i;
size_t size;
struct __intel_global_objs_state *arr;
@@ -184,7 +184,7 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
state->num_global_objs = num_objs;
- drm_dbg_atomic(&i915->drm, "Added new global object %p state %p to %p\n",
+ drm_dbg_atomic(display->drm, "Added new global object %p state %p to %p\n",
obj, obj_state, state);
return obj_state;
@@ -218,14 +218,14 @@ intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
void intel_atomic_swap_global_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *old_obj_state, *new_obj_state;
struct intel_global_obj *obj;
int i;
for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
new_obj_state, i) {
- drm_WARN_ON(&dev_priv->drm, obj->state != old_obj_state);
+ drm_WARN_ON(display->drm, obj->state != old_obj_state);
/*
* If the new state wasn't modified (and properly
@@ -234,7 +234,7 @@ void intel_atomic_swap_global_state(struct intel_atomic_state *state)
if (!new_obj_state->changed)
continue;
- assert_global_state_write_locked(dev_priv);
+ assert_global_state_write_locked(display);
old_obj_state->state = state;
new_obj_state->state = NULL;
@@ -265,10 +265,10 @@ void intel_atomic_clear_global_state(struct intel_atomic_state *state)
int intel_atomic_lock_global_state(struct intel_global_state *obj_state)
{
struct intel_atomic_state *state = obj_state->state;
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
int ret;
ret = drm_modeset_lock(&crtc->base.mutex,
@@ -298,10 +298,10 @@ int intel_atomic_serialize_global_state(struct intel_global_state *obj_state)
bool
intel_atomic_global_state_is_serialized(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc)
+ for_each_intel_crtc(display->drm, crtc)
if (!intel_atomic_get_new_crtc_state(state, crtc))
return false;
return true;
@@ -344,7 +344,7 @@ intel_atomic_global_state_setup_commit(struct intel_atomic_state *state)
int
intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_global_state *old_obj_state;
struct intel_global_obj *obj;
int i;
@@ -358,7 +358,7 @@ intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state *state
ret = wait_for_completion_timeout(&commit->done, 10 * HZ);
if (ret == 0) {
- drm_err(&i915->drm, "global state timed out\n");
+ drm_err(display->drm, "global state timed out\n");
return -ETIMEDOUT;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h
index 6506a8e32972..d42fb2547ee9 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.h
+++ b/drivers/gpu/drm/i915/display/intel_global_state.h
@@ -9,8 +9,8 @@
#include <linux/kref.h>
#include <linux/list.h>
-struct drm_i915_private;
struct intel_atomic_state;
+struct intel_display;
struct intel_global_obj;
struct intel_global_state;
@@ -69,11 +69,11 @@ struct __intel_global_objs_state {
struct intel_global_state *state, *old_state, *new_state;
};
-void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
+void intel_atomic_global_obj_init(struct intel_display *display,
struct intel_global_obj *obj,
struct intel_global_state *state,
const struct intel_global_state_funcs *funcs);
-void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv);
+void intel_atomic_global_obj_cleanup(struct intel_display *display);
struct intel_global_state *
intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 6470f75106bd..807cf606e7a8 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -48,7 +48,7 @@ struct intel_gmbus {
u32 reg0;
i915_reg_t gpio_reg;
struct i2c_algo_bit_data bit_algo;
- struct drm_i915_private *i915;
+ struct intel_display *display;
};
enum gmbus_gpio {
@@ -149,9 +149,10 @@ static const struct gmbus_pin gmbus_pins_mtp[] = {
[GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
};
-static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915,
+static const struct gmbus_pin *get_gmbus_pin(struct intel_display *display,
unsigned int pin)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct gmbus_pin *pins;
size_t size;
@@ -173,7 +174,7 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915,
} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
pins = gmbus_pins_bxt;
size = ARRAY_SIZE(gmbus_pins_bxt);
- } else if (DISPLAY_VER(i915) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
pins = gmbus_pins_skl;
size = ARRAY_SIZE(gmbus_pins_skl);
} else if (IS_BROADWELL(i915)) {
@@ -190,9 +191,9 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915,
return &pins[pin];
}
-bool intel_gmbus_is_valid_pin(struct drm_i915_private *i915, unsigned int pin)
+bool intel_gmbus_is_valid_pin(struct intel_display *display, unsigned int pin)
{
- return get_gmbus_pin(i915, pin);
+ return get_gmbus_pin(display, pin);
}
/* Intel GPIO access functions */
@@ -206,42 +207,45 @@ to_intel_gmbus(struct i2c_adapter *i2c)
}
void
-intel_gmbus_reset(struct drm_i915_private *i915)
+intel_gmbus_reset(struct intel_display *display)
{
- intel_de_write(i915, GMBUS0(i915), 0);
- intel_de_write(i915, GMBUS4(i915), 0);
+ intel_de_write(display, GMBUS0(display), 0);
+ intel_de_write(display, GMBUS4(display), 0);
}
-static void pnv_gmbus_clock_gating(struct drm_i915_private *i915,
+static void pnv_gmbus_clock_gating(struct intel_display *display,
bool enable)
{
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- intel_de_rmw(i915, DSPCLK_GATE_D(i915), PNV_GMBUSUNIT_CLOCK_GATE_DISABLE,
+ intel_de_rmw(display, DSPCLK_GATE_D(display),
+ PNV_GMBUSUNIT_CLOCK_GATE_DISABLE,
!enable ? PNV_GMBUSUNIT_CLOCK_GATE_DISABLE : 0);
}
-static void pch_gmbus_clock_gating(struct drm_i915_private *i915,
+static void pch_gmbus_clock_gating(struct intel_display *display,
bool enable)
{
- intel_de_rmw(i915, SOUTH_DSPCLK_GATE_D, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE,
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
+ PCH_GMBUSUNIT_CLOCK_GATE_DISABLE,
!enable ? PCH_GMBUSUNIT_CLOCK_GATE_DISABLE : 0);
}
-static void bxt_gmbus_clock_gating(struct drm_i915_private *i915,
+static void bxt_gmbus_clock_gating(struct intel_display *display,
bool enable)
{
- intel_de_rmw(i915, GEN9_CLKGATE_DIS_4, BXT_GMBUS_GATING_DIS,
+ intel_de_rmw(display, GEN9_CLKGATE_DIS_4, BXT_GMBUS_GATING_DIS,
!enable ? BXT_GMBUS_GATING_DIS : 0);
}
static u32 get_reserved(struct intel_gmbus *bus)
{
- struct drm_i915_private *i915 = bus->i915;
+ struct intel_display *display = bus->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
if (!IS_I830(i915) && !IS_I845G(i915))
- reserved = intel_de_read_notrace(i915, bus->gpio_reg) &
+ reserved = intel_de_read_notrace(display, bus->gpio_reg) &
(GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE);
return reserved;
@@ -250,31 +254,31 @@ static u32 get_reserved(struct intel_gmbus *bus)
static int get_clock(void *data)
{
struct intel_gmbus *bus = data;
- struct drm_i915_private *i915 = bus->i915;
+ struct intel_display *display = bus->display;
u32 reserved = get_reserved(bus);
- intel_de_write_notrace(i915, bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK);
- intel_de_write_notrace(i915, bus->gpio_reg, reserved);
+ intel_de_write_notrace(display, bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK);
+ intel_de_write_notrace(display, bus->gpio_reg, reserved);
- return (intel_de_read_notrace(i915, bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0;
+ return (intel_de_read_notrace(display, bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
{
struct intel_gmbus *bus = data;
- struct drm_i915_private *i915 = bus->i915;
+ struct intel_display *display = bus->display;
u32 reserved = get_reserved(bus);
- intel_de_write_notrace(i915, bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK);
- intel_de_write_notrace(i915, bus->gpio_reg, reserved);
+ intel_de_write_notrace(display, bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK);
+ intel_de_write_notrace(display, bus->gpio_reg, reserved);
- return (intel_de_read_notrace(i915, bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0;
+ return (intel_de_read_notrace(display, bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
{
struct intel_gmbus *bus = data;
- struct drm_i915_private *i915 = bus->i915;
+ struct intel_display *display = bus->display;
u32 reserved = get_reserved(bus);
u32 clock_bits;
@@ -284,14 +288,14 @@ static void set_clock(void *data, int state_high)
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
- intel_de_write_notrace(i915, bus->gpio_reg, reserved | clock_bits);
- intel_de_posting_read(i915, bus->gpio_reg);
+ intel_de_write_notrace(display, bus->gpio_reg, reserved | clock_bits);
+ intel_de_posting_read(display, bus->gpio_reg);
}
static void set_data(void *data, int state_high)
{
struct intel_gmbus *bus = data;
- struct drm_i915_private *i915 = bus->i915;
+ struct intel_display *display = bus->display;
u32 reserved = get_reserved(bus);
u32 data_bits;
@@ -301,20 +305,21 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
- intel_de_write_notrace(i915, bus->gpio_reg, reserved | data_bits);
- intel_de_posting_read(i915, bus->gpio_reg);
+ intel_de_write_notrace(display, bus->gpio_reg, reserved | data_bits);
+ intel_de_posting_read(display, bus->gpio_reg);
}
static int
intel_gpio_pre_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *i915 = bus->i915;
+ struct intel_display *display = bus->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
- intel_gmbus_reset(i915);
+ intel_gmbus_reset(display);
if (IS_PINEVIEW(i915))
- pnv_gmbus_clock_gating(i915, false);
+ pnv_gmbus_clock_gating(display, false);
set_data(bus, 1);
set_clock(bus, 1);
@@ -326,13 +331,14 @@ static void
intel_gpio_post_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *i915 = bus->i915;
+ struct intel_display *display = bus->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
set_data(bus, 1);
set_clock(bus, 1);
if (IS_PINEVIEW(i915))
- pnv_gmbus_clock_gating(i915, true);
+ pnv_gmbus_clock_gating(display, true);
}
static void
@@ -355,16 +361,17 @@ intel_gpio_setup(struct intel_gmbus *bus, i915_reg_t gpio_reg)
algo->data = bus;
}
-static bool has_gmbus_irq(struct drm_i915_private *i915)
+static bool has_gmbus_irq(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
/*
* encoder->shutdown() may want to use GMBUS
* after irqs have already been disabled.
*/
- return HAS_GMBUS_IRQ(i915) && intel_irqs_enabled(i915);
+ return HAS_GMBUS_IRQ(display) && intel_irqs_enabled(i915);
}
-static int gmbus_wait(struct drm_i915_private *i915, u32 status, u32 irq_en)
+static int gmbus_wait(struct intel_display *display, u32 status, u32 irq_en)
{
DEFINE_WAIT(wait);
u32 gmbus2;
@@ -374,21 +381,21 @@ static int gmbus_wait(struct drm_i915_private *i915, u32 status, u32 irq_en)
* we also need to check for NAKs besides the hw ready/idle signal, we
* need to wake up periodically and check that ourselves.
*/
- if (!has_gmbus_irq(i915))
+ if (!has_gmbus_irq(display))
irq_en = 0;
- add_wait_queue(&i915->display.gmbus.wait_queue, &wait);
- intel_de_write_fw(i915, GMBUS4(i915), irq_en);
+ add_wait_queue(&display->gmbus.wait_queue, &wait);
+ intel_de_write_fw(display, GMBUS4(display), irq_en);
status |= GMBUS_SATOER;
- ret = wait_for_us((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status,
+ ret = wait_for_us((gmbus2 = intel_de_read_fw(display, GMBUS2(display))) & status,
2);
if (ret)
- ret = wait_for((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status,
+ ret = wait_for((gmbus2 = intel_de_read_fw(display, GMBUS2(display))) & status,
50);
- intel_de_write_fw(i915, GMBUS4(i915), 0);
- remove_wait_queue(&i915->display.gmbus.wait_queue, &wait);
+ intel_de_write_fw(display, GMBUS4(display), 0);
+ remove_wait_queue(&display->gmbus.wait_queue, &wait);
if (gmbus2 & GMBUS_SATOER)
return -ENXIO;
@@ -397,7 +404,7 @@ static int gmbus_wait(struct drm_i915_private *i915, u32 status, u32 irq_en)
}
static int
-gmbus_wait_idle(struct drm_i915_private *i915)
+gmbus_wait_idle(struct intel_display *display)
{
DEFINE_WAIT(wait);
u32 irq_enable;
@@ -405,33 +412,33 @@ gmbus_wait_idle(struct drm_i915_private *i915)
/* Important: The hw handles only the first bit, so set only one! */
irq_enable = 0;
- if (has_gmbus_irq(i915))
+ if (has_gmbus_irq(display))
irq_enable = GMBUS_IDLE_EN;
- add_wait_queue(&i915->display.gmbus.wait_queue, &wait);
- intel_de_write_fw(i915, GMBUS4(i915), irq_enable);
+ add_wait_queue(&display->gmbus.wait_queue, &wait);
+ intel_de_write_fw(display, GMBUS4(display), irq_enable);
- ret = intel_de_wait_fw(i915, GMBUS2(i915), GMBUS_ACTIVE, 0, 10);
+ ret = intel_de_wait_fw(display, GMBUS2(display), GMBUS_ACTIVE, 0, 10);
- intel_de_write_fw(i915, GMBUS4(i915), 0);
- remove_wait_queue(&i915->display.gmbus.wait_queue, &wait);
+ intel_de_write_fw(display, GMBUS4(display), 0);
+ remove_wait_queue(&display->gmbus.wait_queue, &wait);
return ret;
}
-static unsigned int gmbus_max_xfer_size(struct drm_i915_private *i915)
+static unsigned int gmbus_max_xfer_size(struct intel_display *display)
{
- return DISPLAY_VER(i915) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
+ return DISPLAY_VER(display) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
GMBUS_BYTE_COUNT_MAX;
}
static int
-gmbus_xfer_read_chunk(struct drm_i915_private *i915,
+gmbus_xfer_read_chunk(struct intel_display *display,
unsigned short addr, u8 *buf, unsigned int len,
u32 gmbus0_reg, u32 gmbus1_index)
{
unsigned int size = len;
- bool burst_read = len > gmbus_max_xfer_size(i915);
+ bool burst_read = len > gmbus_max_xfer_size(display);
bool extra_byte_added = false;
if (burst_read) {
@@ -444,21 +451,21 @@ gmbus_xfer_read_chunk(struct drm_i915_private *i915,
len++;
}
size = len % 256 + 256;
- intel_de_write_fw(i915, GMBUS0(i915),
+ intel_de_write_fw(display, GMBUS0(display),
gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE);
}
- intel_de_write_fw(i915, GMBUS1(i915),
+ intel_de_write_fw(display, GMBUS1(display),
gmbus1_index | GMBUS_CYCLE_WAIT | (size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) {
int ret;
u32 val, loop = 0;
- ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
+ ret = gmbus_wait(display, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
if (ret)
return ret;
- val = intel_de_read_fw(i915, GMBUS3(i915));
+ val = intel_de_read_fw(display, GMBUS3(display));
do {
if (extra_byte_added && len == 1)
break;
@@ -469,7 +476,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *i915,
if (burst_read && len == size - 4)
/* Reset the override bit */
- intel_de_write_fw(i915, GMBUS0(i915), gmbus0_reg);
+ intel_de_write_fw(display, GMBUS0(display), gmbus0_reg);
}
return 0;
@@ -486,7 +493,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *i915,
#define INTEL_GMBUS_BURST_READ_MAX_LEN 767U
static int
-gmbus_xfer_read(struct drm_i915_private *i915, struct i2c_msg *msg,
+gmbus_xfer_read(struct intel_display *display, struct i2c_msg *msg,
u32 gmbus0_reg, u32 gmbus1_index)
{
u8 *buf = msg->buf;
@@ -495,12 +502,12 @@ gmbus_xfer_read(struct drm_i915_private *i915, struct i2c_msg *msg,
int ret;
do {
- if (HAS_GMBUS_BURST_READ(i915))
+ if (HAS_GMBUS_BURST_READ(display))
len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN);
else
- len = min(rx_size, gmbus_max_xfer_size(i915));
+ len = min(rx_size, gmbus_max_xfer_size(display));
- ret = gmbus_xfer_read_chunk(i915, msg->addr, buf, len,
+ ret = gmbus_xfer_read_chunk(display, msg->addr, buf, len,
gmbus0_reg, gmbus1_index);
if (ret)
return ret;
@@ -513,7 +520,7 @@ gmbus_xfer_read(struct drm_i915_private *i915, struct i2c_msg *msg,
}
static int
-gmbus_xfer_write_chunk(struct drm_i915_private *i915,
+gmbus_xfer_write_chunk(struct intel_display *display,
unsigned short addr, u8 *buf, unsigned int len,
u32 gmbus1_index)
{
@@ -526,8 +533,8 @@ gmbus_xfer_write_chunk(struct drm_i915_private *i915,
len -= 1;
}
- intel_de_write_fw(i915, GMBUS3(i915), val);
- intel_de_write_fw(i915, GMBUS1(i915),
+ intel_de_write_fw(display, GMBUS3(display), val);
+ intel_de_write_fw(display, GMBUS1(display),
gmbus1_index | GMBUS_CYCLE_WAIT | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
@@ -537,9 +544,9 @@ gmbus_xfer_write_chunk(struct drm_i915_private *i915,
val |= *buf++ << (8 * loop);
} while (--len && ++loop < 4);
- intel_de_write_fw(i915, GMBUS3(i915), val);
+ intel_de_write_fw(display, GMBUS3(display), val);
- ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
+ ret = gmbus_wait(display, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
if (ret)
return ret;
}
@@ -548,7 +555,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *i915,
}
static int
-gmbus_xfer_write(struct drm_i915_private *i915, struct i2c_msg *msg,
+gmbus_xfer_write(struct intel_display *display, struct i2c_msg *msg,
u32 gmbus1_index)
{
u8 *buf = msg->buf;
@@ -557,9 +564,9 @@ gmbus_xfer_write(struct drm_i915_private *i915, struct i2c_msg *msg,
int ret;
do {
- len = min(tx_size, gmbus_max_xfer_size(i915));
+ len = min(tx_size, gmbus_max_xfer_size(display));
- ret = gmbus_xfer_write_chunk(i915, msg->addr, buf, len,
+ ret = gmbus_xfer_write_chunk(display, msg->addr, buf, len,
gmbus1_index);
if (ret)
return ret;
@@ -586,7 +593,7 @@ gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num)
}
static int
-gmbus_index_xfer(struct drm_i915_private *i915, struct i2c_msg *msgs,
+gmbus_index_xfer(struct intel_display *display, struct i2c_msg *msgs,
u32 gmbus0_reg)
{
u32 gmbus1_index = 0;
@@ -602,17 +609,17 @@ gmbus_index_xfer(struct drm_i915_private *i915, struct i2c_msg *msgs,
/* GMBUS5 holds 16-bit index */
if (gmbus5)
- intel_de_write_fw(i915, GMBUS5(i915), gmbus5);
+ intel_de_write_fw(display, GMBUS5(display), gmbus5);
if (msgs[1].flags & I2C_M_RD)
- ret = gmbus_xfer_read(i915, &msgs[1], gmbus0_reg,
+ ret = gmbus_xfer_read(display, &msgs[1], gmbus0_reg,
gmbus1_index);
else
- ret = gmbus_xfer_write(i915, &msgs[1], gmbus1_index);
+ ret = gmbus_xfer_write(display, &msgs[1], gmbus1_index);
/* Clear GMBUS5 after each index transfer */
if (gmbus5)
- intel_de_write_fw(i915, GMBUS5(i915), 0);
+ intel_de_write_fw(display, GMBUS5(display), 0);
return ret;
}
@@ -622,34 +629,35 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
u32 gmbus0_source)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *i915 = bus->i915;
+ struct intel_display *display = bus->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
int i = 0, inc, try = 0;
int ret = 0;
/* Display WA #0868: skl,bxt,kbl,cfl,glk */
if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
- bxt_gmbus_clock_gating(i915, false);
+ bxt_gmbus_clock_gating(display, false);
else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
- pch_gmbus_clock_gating(i915, false);
+ pch_gmbus_clock_gating(display, false);
retry:
- intel_de_write_fw(i915, GMBUS0(i915), gmbus0_source | bus->reg0);
+ intel_de_write_fw(display, GMBUS0(display), gmbus0_source | bus->reg0);
for (; i < num; i += inc) {
inc = 1;
if (gmbus_is_index_xfer(msgs, i, num)) {
- ret = gmbus_index_xfer(i915, &msgs[i],
+ ret = gmbus_index_xfer(display, &msgs[i],
gmbus0_source | bus->reg0);
inc = 2; /* an index transmission is two msgs */
} else if (msgs[i].flags & I2C_M_RD) {
- ret = gmbus_xfer_read(i915, &msgs[i],
+ ret = gmbus_xfer_read(display, &msgs[i],
gmbus0_source | bus->reg0, 0);
} else {
- ret = gmbus_xfer_write(i915, &msgs[i], 0);
+ ret = gmbus_xfer_write(display, &msgs[i], 0);
}
if (!ret)
- ret = gmbus_wait(i915,
+ ret = gmbus_wait(display,
GMBUS_HW_WAIT_PHASE, GMBUS_HW_WAIT_EN);
if (ret == -ETIMEDOUT)
goto timeout;
@@ -661,19 +669,19 @@ retry:
* a STOP on the very first cycle. To simplify the code we
* unconditionally generate the STOP condition with an additional gmbus
* cycle. */
- intel_de_write_fw(i915, GMBUS1(i915), GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+ intel_de_write_fw(display, GMBUS1(display), GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
/* Mark the GMBUS interface as disabled after waiting for idle.
* We will re-enable it at the start of the next xfer,
* till then let it sleep.
*/
- if (gmbus_wait_idle(i915)) {
- drm_dbg_kms(&i915->drm,
+ if (gmbus_wait_idle(display)) {
+ drm_dbg_kms(display->drm,
"GMBUS [%s] timed out waiting for idle\n",
adapter->name);
ret = -ETIMEDOUT;
}
- intel_de_write_fw(i915, GMBUS0(i915), 0);
+ intel_de_write_fw(display, GMBUS0(display), 0);
ret = ret ?: i;
goto out;
@@ -692,8 +700,8 @@ clear_err:
* it's slow responding and only answers on the 2nd retry.
*/
ret = -ENXIO;
- if (gmbus_wait_idle(i915)) {
- drm_dbg_kms(&i915->drm,
+ if (gmbus_wait_idle(display)) {
+ drm_dbg_kms(display->drm,
"GMBUS [%s] timed out after NAK\n",
adapter->name);
ret = -ETIMEDOUT;
@@ -703,11 +711,11 @@ clear_err:
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the target's NAK.
*/
- intel_de_write_fw(i915, GMBUS1(i915), GMBUS_SW_CLR_INT);
- intel_de_write_fw(i915, GMBUS1(i915), 0);
- intel_de_write_fw(i915, GMBUS0(i915), 0);
+ intel_de_write_fw(display, GMBUS1(display), GMBUS_SW_CLR_INT);
+ intel_de_write_fw(display, GMBUS1(display), 0);
+ intel_de_write_fw(display, GMBUS0(display), 0);
- drm_dbg_kms(&i915->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+ drm_dbg_kms(display->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n",
adapter->name, msgs[i].addr,
(msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
@@ -718,7 +726,7 @@ clear_err:
* drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
*/
if (ret == -ENXIO && i == 0 && try++ == 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"GMBUS [%s] NAK on first message, retry\n",
adapter->name);
goto retry;
@@ -727,10 +735,10 @@ clear_err:
goto out;
timeout:
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
bus->adapter.name, bus->reg0 & 0xff);
- intel_de_write_fw(i915, GMBUS0(i915), 0);
+ intel_de_write_fw(display, GMBUS0(display), 0);
/*
* Hardware may not support GMBUS over these pins? Try GPIO bitbanging
@@ -741,9 +749,9 @@ timeout:
out:
/* Display WA #0868: skl,bxt,kbl,cfl,glk */
if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
- bxt_gmbus_clock_gating(i915, true);
+ bxt_gmbus_clock_gating(display, true);
else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
- pch_gmbus_clock_gating(i915, true);
+ pch_gmbus_clock_gating(display, true);
return ret;
}
@@ -752,7 +760,8 @@ static int
gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *i915 = bus->i915;
+ struct intel_display *display = bus->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref;
int ret;
@@ -776,7 +785,8 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *i915 = bus->i915;
+ struct intel_display *display = bus->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
u8 cmd = DRM_HDCP_DDC_AKSV;
u8 buf[DRM_HDCP_KSV_LEN] = {};
struct i2c_msg msgs[] = {
@@ -797,7 +807,7 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
int ret;
wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS);
- mutex_lock(&i915->display.gmbus.mutex);
+ mutex_lock(&display->gmbus.mutex);
/*
* In order to output Aksv to the receiver, use an indexed write to
@@ -806,7 +816,7 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
*/
ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT);
- mutex_unlock(&i915->display.gmbus.mutex);
+ mutex_unlock(&display->gmbus.mutex);
intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref);
return ret;
@@ -830,27 +840,27 @@ static void gmbus_lock_bus(struct i2c_adapter *adapter,
unsigned int flags)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *i915 = bus->i915;
+ struct intel_display *display = bus->display;
- mutex_lock(&i915->display.gmbus.mutex);
+ mutex_lock(&display->gmbus.mutex);
}
static int gmbus_trylock_bus(struct i2c_adapter *adapter,
unsigned int flags)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *i915 = bus->i915;
+ struct intel_display *display = bus->display;
- return mutex_trylock(&i915->display.gmbus.mutex);
+ return mutex_trylock(&display->gmbus.mutex);
}
static void gmbus_unlock_bus(struct i2c_adapter *adapter,
unsigned int flags)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *i915 = bus->i915;
+ struct intel_display *display = bus->display;
- mutex_unlock(&i915->display.gmbus.mutex);
+ mutex_unlock(&display->gmbus.mutex);
}
static const struct i2c_lock_operations gmbus_lock_ops = {
@@ -861,31 +871,32 @@ static const struct i2c_lock_operations gmbus_lock_ops = {
/**
* intel_gmbus_setup - instantiate all Intel i2c GMBuses
- * @i915: i915 device private
+ * @display: display device
*/
-int intel_gmbus_setup(struct drm_i915_private *i915)
+int intel_gmbus_setup(struct intel_display *display)
{
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
unsigned int pin;
int ret;
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- i915->display.gmbus.mmio_base = VLV_DISPLAY_BASE;
- else if (!HAS_GMCH(i915))
+ display->gmbus.mmio_base = VLV_DISPLAY_BASE;
+ else if (!HAS_GMCH(display))
/*
* Broxton uses the same PCH offsets for South Display Engine,
* even though it doesn't have a PCH.
*/
- i915->display.gmbus.mmio_base = PCH_DISPLAY_BASE;
+ display->gmbus.mmio_base = PCH_DISPLAY_BASE;
- mutex_init(&i915->display.gmbus.mutex);
- init_waitqueue_head(&i915->display.gmbus.wait_queue);
+ mutex_init(&display->gmbus.mutex);
+ init_waitqueue_head(&display->gmbus.wait_queue);
- for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) {
+ for (pin = 0; pin < ARRAY_SIZE(display->gmbus.bus); pin++) {
const struct gmbus_pin *gmbus_pin;
struct intel_gmbus *bus;
- gmbus_pin = get_gmbus_pin(i915, pin);
+ gmbus_pin = get_gmbus_pin(display, pin);
if (!gmbus_pin)
continue;
@@ -901,7 +912,7 @@ int intel_gmbus_setup(struct drm_i915_private *i915)
"i915 gmbus %s", gmbus_pin->name);
bus->adapter.dev.parent = &pdev->dev;
- bus->i915 = i915;
+ bus->display = display;
bus->adapter.algo = &gmbus_algorithm;
bus->adapter.lock_ops = &gmbus_lock_ops;
@@ -919,7 +930,7 @@ int intel_gmbus_setup(struct drm_i915_private *i915)
if (IS_I830(i915))
bus->force_bit = 1;
- intel_gpio_setup(bus, GPIO(i915, gmbus_pin->gpio));
+ intel_gpio_setup(bus, GPIO(display, gmbus_pin->gpio));
ret = i2c_add_adapter(&bus->adapter);
if (ret) {
@@ -927,43 +938,43 @@ int intel_gmbus_setup(struct drm_i915_private *i915)
goto err;
}
- i915->display.gmbus.bus[pin] = bus;
+ display->gmbus.bus[pin] = bus;
}
- intel_gmbus_reset(i915);
+ intel_gmbus_reset(display);
return 0;
err:
- intel_gmbus_teardown(i915);
+ intel_gmbus_teardown(display);
return ret;
}
-struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *i915,
+struct i2c_adapter *intel_gmbus_get_adapter(struct intel_display *display,
unsigned int pin)
{
- if (drm_WARN_ON(&i915->drm, pin >= ARRAY_SIZE(i915->display.gmbus.bus) ||
- !i915->display.gmbus.bus[pin]))
+ if (drm_WARN_ON(display->drm, pin >= ARRAY_SIZE(display->gmbus.bus) ||
+ !display->gmbus.bus[pin]))
return NULL;
- return &i915->display.gmbus.bus[pin]->adapter;
+ return &display->gmbus.bus[pin]->adapter;
}
void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *i915 = bus->i915;
+ struct intel_display *display = bus->display;
- mutex_lock(&i915->display.gmbus.mutex);
+ mutex_lock(&display->gmbus.mutex);
bus->force_bit += force_bit ? 1 : -1;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"%sabling bit-banging on %s. force bit now %d\n",
force_bit ? "en" : "dis", adapter->name,
bus->force_bit);
- mutex_unlock(&i915->display.gmbus.mutex);
+ mutex_unlock(&display->gmbus.mutex);
}
bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
@@ -973,25 +984,25 @@ bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
return bus->force_bit;
}
-void intel_gmbus_teardown(struct drm_i915_private *i915)
+void intel_gmbus_teardown(struct intel_display *display)
{
unsigned int pin;
- for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) {
+ for (pin = 0; pin < ARRAY_SIZE(display->gmbus.bus); pin++) {
struct intel_gmbus *bus;
- bus = i915->display.gmbus.bus[pin];
+ bus = display->gmbus.bus[pin];
if (!bus)
continue;
i2c_del_adapter(&bus->adapter);
kfree(bus);
- i915->display.gmbus.bus[pin] = NULL;
+ display->gmbus.bus[pin] = NULL;
}
}
-void intel_gmbus_irq_handler(struct drm_i915_private *i915)
+void intel_gmbus_irq_handler(struct intel_display *display)
{
- wake_up_all(&i915->display.gmbus.wait_queue);
+ wake_up_all(&display->gmbus.wait_queue);
}
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.h b/drivers/gpu/drm/i915/display/intel_gmbus.h
index 8111eb23e2af..35a200a9efc0 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.h
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.h
@@ -8,8 +8,8 @@
#include <linux/types.h>
-struct drm_i915_private;
struct i2c_adapter;
+struct intel_display;
#define GMBUS_PIN_DISABLED 0
#define GMBUS_PIN_SSC 1
@@ -34,18 +34,17 @@ struct i2c_adapter;
#define GMBUS_NUM_PINS 15 /* including 0 */
-int intel_gmbus_setup(struct drm_i915_private *dev_priv);
-void intel_gmbus_teardown(struct drm_i915_private *dev_priv);
-bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
- unsigned int pin);
+int intel_gmbus_setup(struct intel_display *display);
+void intel_gmbus_teardown(struct intel_display *display);
+bool intel_gmbus_is_valid_pin(struct intel_display *display, unsigned int pin);
int intel_gmbus_output_aksv(struct i2c_adapter *adapter);
struct i2c_adapter *
-intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
+intel_gmbus_get_adapter(struct intel_display *display, unsigned int pin);
void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter);
-void intel_gmbus_reset(struct drm_i915_private *dev_priv);
+void intel_gmbus_reset(struct intel_display *display);
-void intel_gmbus_irq_handler(struct drm_i915_private *i915);
+void intel_gmbus_irq_handler(struct intel_display *display);
#endif /* __INTEL_GMBUS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus_regs.h b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
index 53aacbda983c..59bad1dda6d6 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
@@ -8,9 +8,9 @@
#include "i915_reg_defs.h"
-#define GMBUS_MMIO_BASE(__i915) ((__i915)->display.gmbus.mmio_base)
+#define __GMBUS_MMIO_BASE(__display) ((__display)->gmbus.mmio_base)
-#define GPIO(__i915, gpio) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5010 + 4 * (gpio))
+#define GPIO(__display, gpio) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x5010 + 4 * (gpio))
#define GPIO_CLOCK_DIR_MASK (1 << 0)
#define GPIO_CLOCK_DIR_IN (0 << 1)
#define GPIO_CLOCK_DIR_OUT (1 << 1)
@@ -27,7 +27,7 @@
#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
/* clock/port select */
-#define GMBUS0(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5100)
+#define GMBUS0(__display) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x5100)
#define GMBUS_AKSV_SELECT (1 << 11)
#define GMBUS_RATE_100KHZ (0 << 8)
#define GMBUS_RATE_50KHZ (1 << 8)
@@ -37,7 +37,7 @@
#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6)
/* command/status */
-#define GMBUS1(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5104)
+#define GMBUS1(__display) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x5104)
#define GMBUS_SW_CLR_INT (1 << 31)
#define GMBUS_SW_RDY (1 << 30)
#define GMBUS_ENT (1 << 29) /* enable timeout */
@@ -54,7 +54,7 @@
#define GMBUS_SLAVE_WRITE (0 << 0)
/* status */
-#define GMBUS2(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5108)
+#define GMBUS2(__display) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x5108)
#define GMBUS_INUSE (1 << 15)
#define GMBUS_HW_WAIT_PHASE (1 << 14)
#define GMBUS_STALL_TIMEOUT (1 << 13)
@@ -64,10 +64,10 @@
#define GMBUS_ACTIVE (1 << 9)
/* data buffer bytes 3-0 */
-#define GMBUS3(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x510c)
+#define GMBUS3(__display) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x510c)
/* interrupt mask (Pineview+) */
-#define GMBUS4(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5110)
+#define GMBUS4(__display) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x5110)
#define GMBUS_SLAVE_TIMEOUT_EN (1 << 4)
#define GMBUS_NAK_EN (1 << 3)
#define GMBUS_IDLE_EN (1 << 2)
@@ -75,7 +75,7 @@
#define GMBUS_HW_RDY_EN (1 << 0)
/* byte index */
-#define GMBUS5(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5120)
+#define GMBUS5(__display) _MMIO(__GMBUS_MMIO_BASE(__display) + 0x5120)
#define GMBUS_2BYTE_INDEX_EN (1 << 31)
#endif /* __INTEL_GMBUS_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 6980b98792c2..1bab7c34a794 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -25,32 +25,39 @@
#include "intel_hdcp.h"
#include "intel_hdcp_gsc.h"
#include "intel_hdcp_regs.h"
+#include "intel_hdcp_shim.h"
#include "intel_pcode.h"
#define KEY_LOAD_TRIES 5
#define HDCP2_LC_RETRY_CNT 3
-/* WA: 16022217614 */
static void
-intel_hdcp_disable_hdcp_line_rekeying(struct intel_encoder *encoder,
- struct intel_hdcp *hdcp)
+intel_hdcp_adjust_hdcp_line_rekeying(struct intel_encoder *encoder,
+ struct intel_hdcp *hdcp,
+ bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
+ i915_reg_t rekey_reg;
+ u32 rekey_bit = 0;
/* Here we assume HDMI is in TMDS mode of operation */
- if (encoder->type != INTEL_OUTPUT_HDMI)
+ if (!intel_encoder_is_hdmi(encoder))
return;
- if (DISPLAY_VER(dev_priv) >= 14) {
- if (IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 0), STEP_D0, STEP_FOREVER))
- intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(hdcp->cpu_transcoder),
- 0, HDCP_LINE_REKEY_DISABLE);
- else if (IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 1), STEP_B0, STEP_FOREVER) ||
- IS_DISPLAY_VER_STEP(dev_priv, IP_VER(20, 0), STEP_B0, STEP_FOREVER))
- intel_de_rmw(dev_priv,
- TRANS_DDI_FUNC_CTL(dev_priv, hdcp->cpu_transcoder),
- 0, TRANS_DDI_HDCP_LINE_REKEY_DISABLE);
+ if (DISPLAY_VER(display) >= 30) {
+ rekey_reg = TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder);
+ rekey_bit = XE3_TRANS_DDI_HDCP_LINE_REKEY_DISABLE;
+ } else if (IS_DISPLAY_VERx100_STEP(display, 1401, STEP_B0, STEP_FOREVER) ||
+ IS_DISPLAY_VERx100_STEP(display, 2000, STEP_B0, STEP_FOREVER)) {
+ rekey_reg = TRANS_DDI_FUNC_CTL(display, hdcp->cpu_transcoder);
+ rekey_bit = TRANS_DDI_HDCP_LINE_REKEY_DISABLE;
+ } else if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_D0, STEP_FOREVER)) {
+ rekey_reg = CHICKEN_TRANS(display, hdcp->cpu_transcoder);
+ rekey_bit = HDCP_LINE_REKEY_DISABLE;
}
+
+ if (rekey_bit)
+ intel_de_rmw(display, rekey_reg, rekey_bit, enable ? 0 : rekey_bit);
}
static int intel_conn_to_vcpi(struct intel_atomic_state *state,
@@ -95,10 +102,10 @@ static int
intel_hdcp_required_content_stream(struct intel_atomic_state *state,
struct intel_digital_port *dig_port)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_connector_list_iter conn_iter;
struct intel_digital_port *conn_dig_port;
struct intel_connector *connector;
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
bool enforce_type0 = false;
int k;
@@ -111,7 +118,7 @@ intel_hdcp_required_content_stream(struct intel_atomic_state *state,
if (!dig_port->hdcp_mst_type1_capable)
enforce_type0 = true;
- drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->base.status == connector_status_disconnected)
continue;
@@ -133,7 +140,7 @@ intel_hdcp_required_content_stream(struct intel_atomic_state *state,
}
drm_connector_list_iter_end(&conn_iter);
- if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
+ if (drm_WARN_ON(display->drm, data->k > INTEL_NUM_PIPES(display) || data->k == 0))
return -EINVAL;
/*
@@ -181,7 +188,7 @@ static
int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim, u8 *bksv)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret, i, tries = 2;
/* HDCP spec states that we must retry the bksv if it is invalid */
@@ -193,7 +200,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
break;
}
if (i == tries) {
- drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
+ drm_dbg_kms(display->drm, "Bksv is invalid\n");
return -ENODEV;
}
@@ -232,7 +239,7 @@ bool intel_hdcp_get_capability(struct intel_connector *connector)
*/
static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
/* I915 support for HDCP2.2 */
@@ -240,18 +247,18 @@ static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
return false;
/* If MTL+ make sure gsc is loaded and proxy is setup */
- if (intel_hdcp_gsc_cs_required(i915)) {
- if (!intel_hdcp_gsc_check_status(i915))
+ if (intel_hdcp_gsc_cs_required(display)) {
+ if (!intel_hdcp_gsc_check_status(display))
return false;
}
/* MEI/GSC interface is solid depending on which is used */
- mutex_lock(&i915->display.hdcp.hdcp_mutex);
- if (!i915->display.hdcp.comp_added || !i915->display.hdcp.arbiter) {
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ if (!display->hdcp.comp_added || !display->hdcp.arbiter) {
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return false;
}
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return true;
}
@@ -287,19 +294,19 @@ void intel_hdcp_get_remote_capability(struct intel_connector *connector,
*hdcp2_capable = false;
}
-static bool intel_hdcp_in_use(struct drm_i915_private *i915,
+static bool intel_hdcp_in_use(struct intel_display *display,
enum transcoder cpu_transcoder, enum port port)
{
- return intel_de_read(i915,
- HDCP_STATUS(i915, cpu_transcoder, port)) &
+ return intel_de_read(display,
+ HDCP_STATUS(display, cpu_transcoder, port)) &
HDCP_STATUS_ENC;
}
-static bool intel_hdcp2_in_use(struct drm_i915_private *i915,
+static bool intel_hdcp2_in_use(struct intel_display *display,
enum transcoder cpu_transcoder, enum port port)
{
- return intel_de_read(i915,
- HDCP2_STATUS(i915, cpu_transcoder, port)) &
+ return intel_de_read(display,
+ HDCP2_STATUS(display, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS;
}
@@ -324,8 +331,9 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
return 0;
}
-static bool hdcp_key_loadable(struct drm_i915_private *i915)
+static bool hdcp_key_loadable(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
enum i915_power_well_id id;
intel_wakeref_t wakeref;
bool enabled = false;
@@ -341,7 +349,7 @@ static bool hdcp_key_loadable(struct drm_i915_private *i915)
/* PG1 (power well #1) needs to be enabled */
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- enabled = intel_display_power_well_is_enabled(i915, id);
+ enabled = intel_display_power_well_is_enabled(display, id);
/*
* Another req for hdcp key loadability is enabled state of pll for
@@ -352,19 +360,20 @@ static bool hdcp_key_loadable(struct drm_i915_private *i915)
return enabled;
}
-static void intel_hdcp_clear_keys(struct drm_i915_private *i915)
+static void intel_hdcp_clear_keys(struct intel_display *display)
{
- intel_de_write(i915, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
- intel_de_write(i915, HDCP_KEY_STATUS,
+ intel_de_write(display, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
+ intel_de_write(display, HDCP_KEY_STATUS,
HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
}
-static int intel_hdcp_load_keys(struct drm_i915_private *i915)
+static int intel_hdcp_load_keys(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
u32 val;
- val = intel_de_read(i915, HDCP_KEY_STATUS);
+ val = intel_de_read(display, HDCP_KEY_STATUS);
if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
return 0;
@@ -373,7 +382,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *i915)
* out of reset. So if Key is not already loaded, its an error state.
*/
if (IS_HASWELL(i915) || IS_BROADWELL(i915))
- if (!(intel_de_read(i915, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
+ if (!(intel_de_read(display, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
return -ENXIO;
/*
@@ -384,20 +393,20 @@ static int intel_hdcp_load_keys(struct drm_i915_private *i915)
* process from other platforms. These platforms use the GT Driver
* Mailbox interface.
*/
- if (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)) {
+ if (DISPLAY_VER(display) == 9 && !IS_BROXTON(i915)) {
ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
if (ret) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Failed to initiate HDCP key load (%d)\n",
ret);
return ret;
}
} else {
- intel_de_write(i915, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
+ intel_de_write(display, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
}
/* Wait for the keys to load (500us) */
- ret = intel_de_wait_custom(i915, HDCP_KEY_STATUS,
+ ret = intel_de_wait_custom(display, HDCP_KEY_STATUS,
HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
10, 1, &val);
if (ret)
@@ -406,27 +415,27 @@ static int intel_hdcp_load_keys(struct drm_i915_private *i915)
return -ENXIO;
/* Send Aksv over to PCH display for use in authentication */
- intel_de_write(i915, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
+ intel_de_write(display, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
return 0;
}
/* Returns updated SHA-1 index */
-static int intel_write_sha_text(struct drm_i915_private *i915, u32 sha_text)
+static int intel_write_sha_text(struct intel_display *display, u32 sha_text)
{
- intel_de_write(i915, HDCP_SHA_TEXT, sha_text);
- if (intel_de_wait_for_set(i915, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
- drm_err(&i915->drm, "Timed out waiting for SHA1 ready\n");
+ intel_de_write(display, HDCP_SHA_TEXT, sha_text);
+ if (intel_de_wait_for_set(display, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
+ drm_err(display->drm, "Timed out waiting for SHA1 ready\n");
return -ETIMEDOUT;
}
return 0;
}
static
-u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915,
+u32 intel_hdcp_get_repeater_ctl(struct intel_display *display,
enum transcoder cpu_transcoder, enum port port)
{
- if (DISPLAY_VER(i915) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
switch (cpu_transcoder) {
case TRANSCODER_A:
return HDCP_TRANSA_REP_PRESENT |
@@ -441,7 +450,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915,
return HDCP_TRANSD_REP_PRESENT |
HDCP_TRANSD_SHA1_M0;
default:
- drm_err(&i915->drm, "Unknown transcoder %d\n",
+ drm_err(display->drm, "Unknown transcoder %d\n",
cpu_transcoder);
return 0;
}
@@ -459,7 +468,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915,
case PORT_E:
return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
default:
- drm_err(&i915->drm, "Unknown port %d\n", port);
+ drm_err(display->drm, "Unknown port %d\n", port);
return 0;
}
}
@@ -469,8 +478,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
const struct intel_hdcp_shim *shim,
u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
enum port port = dig_port->base.port;
u32 vprime, sha_text, sha_leftovers, rep_ctl;
@@ -481,7 +490,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
ret = shim->read_v_prime_part(dig_port, i, &vprime);
if (ret)
return ret;
- intel_de_write(i915, HDCP_SHA_V_PRIME(i), vprime);
+ intel_de_write(display, HDCP_SHA_V_PRIME(i), vprime);
}
/*
@@ -497,8 +506,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
sha_idx = 0;
sha_text = 0;
sha_leftovers = 0;
- rep_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port);
- intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ rep_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port);
+ intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
for (i = 0; i < num_downstream; i++) {
unsigned int sha_empty;
u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
@@ -510,14 +519,14 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
sha_text |= ksv[j] << off;
}
- ret = intel_write_sha_text(i915, sha_text);
+ ret = intel_write_sha_text(display, sha_text);
if (ret < 0)
return ret;
/* Programming guide writes this every 64 bytes */
sha_idx += sizeof(sha_text);
if (!(sha_idx % 64))
- intel_de_write(i915, HDCP_REP_CTL,
+ intel_de_write(display, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_32);
/* Store the leftover bytes from the ksv in sha_text */
@@ -534,7 +543,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
if (sizeof(sha_text) > sha_leftovers)
continue;
- ret = intel_write_sha_text(i915, sha_text);
+ ret = intel_write_sha_text(display, sha_text);
if (ret < 0)
return ret;
sha_leftovers = 0;
@@ -550,73 +559,73 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
*/
if (sha_leftovers == 0) {
/* Write 16 bits of text, 16 bits of M0 */
- intel_de_write(i915, HDCP_REP_CTL,
+ intel_de_write(display, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_16);
- ret = intel_write_sha_text(i915,
+ ret = intel_write_sha_text(display,
bstatus[0] << 8 | bstatus[1]);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 32 bits of M0 */
- intel_de_write(i915, HDCP_REP_CTL,
+ intel_de_write(display, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_0);
- ret = intel_write_sha_text(i915, 0);
+ ret = intel_write_sha_text(display, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 16 bits of M0 */
- intel_de_write(i915, HDCP_REP_CTL,
+ intel_de_write(display, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_16);
- ret = intel_write_sha_text(i915, 0);
+ ret = intel_write_sha_text(display, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
} else if (sha_leftovers == 1) {
/* Write 24 bits of text, 8 bits of M0 */
- intel_de_write(i915, HDCP_REP_CTL,
+ intel_de_write(display, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_24);
sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
/* Only 24-bits of data, must be in the LSB */
sha_text = (sha_text & 0xffffff00) >> 8;
- ret = intel_write_sha_text(i915, sha_text);
+ ret = intel_write_sha_text(display, sha_text);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 32 bits of M0 */
- intel_de_write(i915, HDCP_REP_CTL,
+ intel_de_write(display, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_0);
- ret = intel_write_sha_text(i915, 0);
+ ret = intel_write_sha_text(display, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 24 bits of M0 */
- intel_de_write(i915, HDCP_REP_CTL,
+ intel_de_write(display, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_8);
- ret = intel_write_sha_text(i915, 0);
+ ret = intel_write_sha_text(display, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
} else if (sha_leftovers == 2) {
/* Write 32 bits of text */
- intel_de_write(i915, HDCP_REP_CTL,
+ intel_de_write(display, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_32);
sha_text |= bstatus[0] << 8 | bstatus[1];
- ret = intel_write_sha_text(i915, sha_text);
+ ret = intel_write_sha_text(display, sha_text);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 64 bits of M0 */
- intel_de_write(i915, HDCP_REP_CTL,
+ intel_de_write(display, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_0);
for (i = 0; i < 2; i++) {
- ret = intel_write_sha_text(i915, 0);
+ ret = intel_write_sha_text(display, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
@@ -626,56 +635,56 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
* Terminate the SHA-1 stream by hand. For the other leftover
* cases this is appended by the hardware.
*/
- intel_de_write(i915, HDCP_REP_CTL,
+ intel_de_write(display, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_32);
sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
- ret = intel_write_sha_text(i915, sha_text);
+ ret = intel_write_sha_text(display, sha_text);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
} else if (sha_leftovers == 3) {
/* Write 32 bits of text (filled from LSB) */
- intel_de_write(i915, HDCP_REP_CTL,
+ intel_de_write(display, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_32);
sha_text |= bstatus[0];
- ret = intel_write_sha_text(i915, sha_text);
+ ret = intel_write_sha_text(display, sha_text);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 8 bits of text (filled from LSB), 24 bits of M0 */
- intel_de_write(i915, HDCP_REP_CTL,
+ intel_de_write(display, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_8);
- ret = intel_write_sha_text(i915, bstatus[1]);
+ ret = intel_write_sha_text(display, bstatus[1]);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 32 bits of M0 */
- intel_de_write(i915, HDCP_REP_CTL,
+ intel_de_write(display, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_0);
- ret = intel_write_sha_text(i915, 0);
+ ret = intel_write_sha_text(display, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 8 bits of M0 */
- intel_de_write(i915, HDCP_REP_CTL,
+ intel_de_write(display, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_24);
- ret = intel_write_sha_text(i915, 0);
+ ret = intel_write_sha_text(display, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
} else {
- drm_dbg_kms(&i915->drm, "Invalid number of leftovers %d\n",
+ drm_dbg_kms(display->drm, "Invalid number of leftovers %d\n",
sha_leftovers);
return -EINVAL;
}
- intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ intel_de_write(display, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
- ret = intel_write_sha_text(i915, 0);
+ ret = intel_write_sha_text(display, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
@@ -687,20 +696,20 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
* - 10 bytes for BINFO/BSTATUS(2), M0(8)
*/
sha_text = (num_downstream * 5 + 10) * 8;
- ret = intel_write_sha_text(i915, sha_text);
+ ret = intel_write_sha_text(display, sha_text);
if (ret < 0)
return ret;
/* Tell the HW we're done with the hash and wait for it to ACK */
- intel_de_write(i915, HDCP_REP_CTL,
+ intel_de_write(display, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_COMPLETE_HASH);
- if (intel_de_wait_for_set(i915, HDCP_REP_CTL,
+ if (intel_de_wait_for_set(display, HDCP_REP_CTL,
HDCP_SHA1_COMPLETE, 1)) {
- drm_err(&i915->drm, "Timed out waiting for SHA1 complete\n");
+ drm_err(display->drm, "Timed out waiting for SHA1 complete\n");
return -ETIMEDOUT;
}
- if (!(intel_de_read(i915, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
- drm_dbg_kms(&i915->drm, "SHA-1 mismatch, HDCP failed\n");
+ if (!(intel_de_read(display, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
+ drm_dbg_kms(display->drm, "SHA-1 mismatch, HDCP failed\n");
return -ENXIO;
}
@@ -711,15 +720,15 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
static
int intel_hdcp_auth_downstream(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
u8 bstatus[2], num_downstream, *ksv_fifo;
int ret, i, tries = 3;
ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
if (ret) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"KSV list failed to become ready (%d)\n", ret);
return ret;
}
@@ -730,7 +739,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
- drm_dbg_kms(&i915->drm, "Max Topology Limit Exceeded\n");
+ drm_dbg_kms(display->drm, "Max Topology Limit Exceeded\n");
return -EPERM;
}
@@ -743,14 +752,14 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
*/
num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
if (num_downstream == 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Repeater with zero downstream devices\n");
return -EINVAL;
}
ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
if (!ksv_fifo) {
- drm_dbg_kms(&i915->drm, "Out of mem: ksv_fifo\n");
+ drm_dbg_kms(display->drm, "Out of mem: ksv_fifo\n");
return -ENOMEM;
}
@@ -758,9 +767,9 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
if (ret)
goto err;
- if (drm_hdcp_check_ksvs_revoked(&i915->drm, ksv_fifo,
+ if (drm_hdcp_check_ksvs_revoked(display->drm, ksv_fifo,
num_downstream) > 0) {
- drm_err(&i915->drm, "Revoked Ksv(s) in ksv_fifo\n");
+ drm_err(display->drm, "Revoked Ksv(s) in ksv_fifo\n");
ret = -EPERM;
goto err;
}
@@ -778,12 +787,12 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
}
if (i == tries) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"V Prime validation failed.(%d)\n", ret);
goto err;
}
- drm_dbg_kms(&i915->drm, "HDCP is enabled (%d downstream devices)\n",
+ drm_dbg_kms(display->drm, "HDCP is enabled (%d downstream devices)\n",
num_downstream);
ret = 0;
err:
@@ -794,8 +803,8 @@ err:
/* Implements Part 1 of the HDCP authorization procedure */
static int intel_hdcp_auth(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
const struct intel_hdcp_shim *shim = hdcp->shim;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
@@ -827,7 +836,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (ret)
return ret;
if (!hdcp_capable) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel is not HDCP capable\n");
return -EINVAL;
}
@@ -835,24 +844,24 @@ static int intel_hdcp_auth(struct intel_connector *connector)
/* Initialize An with 2 random values and acquire it */
for (i = 0; i < 2; i++)
- intel_de_write(i915,
- HDCP_ANINIT(i915, cpu_transcoder, port),
+ intel_de_write(display,
+ HDCP_ANINIT(display, cpu_transcoder, port),
get_random_u32());
- intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port),
+ intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port),
HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
- if (intel_de_wait_for_set(i915,
- HDCP_STATUS(i915, cpu_transcoder, port),
+ if (intel_de_wait_for_set(display,
+ HDCP_STATUS(display, cpu_transcoder, port),
HDCP_STATUS_AN_READY, 1)) {
- drm_err(&i915->drm, "Timed out waiting for An\n");
+ drm_err(display->drm, "Timed out waiting for An\n");
return -ETIMEDOUT;
}
- an.reg[0] = intel_de_read(i915,
- HDCP_ANLO(i915, cpu_transcoder, port));
- an.reg[1] = intel_de_read(i915,
- HDCP_ANHI(i915, cpu_transcoder, port));
+ an.reg[0] = intel_de_read(display,
+ HDCP_ANLO(display, cpu_transcoder, port));
+ an.reg[1] = intel_de_read(display,
+ HDCP_ANHI(display, cpu_transcoder, port));
ret = shim->write_an_aksv(dig_port, an.shim);
if (ret)
return ret;
@@ -865,34 +874,34 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (ret < 0)
return ret;
- if (drm_hdcp_check_ksvs_revoked(&i915->drm, bksv.shim, 1) > 0) {
- drm_err(&i915->drm, "BKSV is revoked\n");
+ if (drm_hdcp_check_ksvs_revoked(display->drm, bksv.shim, 1) > 0) {
+ drm_err(display->drm, "BKSV is revoked\n");
return -EPERM;
}
- intel_de_write(i915, HDCP_BKSVLO(i915, cpu_transcoder, port),
+ intel_de_write(display, HDCP_BKSVLO(display, cpu_transcoder, port),
bksv.reg[0]);
- intel_de_write(i915, HDCP_BKSVHI(i915, cpu_transcoder, port),
+ intel_de_write(display, HDCP_BKSVHI(display, cpu_transcoder, port),
bksv.reg[1]);
ret = shim->repeater_present(dig_port, &repeater_present);
if (ret)
return ret;
if (repeater_present)
- intel_de_write(i915, HDCP_REP_CTL,
- intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port));
+ intel_de_write(display, HDCP_REP_CTL,
+ intel_hdcp_get_repeater_ctl(display, cpu_transcoder, port));
ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
if (ret)
return ret;
- intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port),
+ intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port),
HDCP_CONF_AUTH_AND_ENC);
/* Wait for R0 ready */
- if (wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
+ if (wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
(HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
- drm_err(&i915->drm, "Timed out waiting for R0 ready\n");
+ drm_err(display->drm, "Timed out waiting for R0 ready\n");
return -ETIMEDOUT;
}
@@ -918,30 +927,30 @@ static int intel_hdcp_auth(struct intel_connector *connector)
ret = shim->read_ri_prime(dig_port, ri.shim);
if (ret)
return ret;
- intel_de_write(i915,
- HDCP_RPRIME(i915, cpu_transcoder, port),
+ intel_de_write(display,
+ HDCP_RPRIME(display, cpu_transcoder, port),
ri.reg);
/* Wait for Ri prime match */
- if (!wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
+ if (!wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
break;
}
if (i == tries) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Timed out waiting for Ri prime match (%x)\n",
- intel_de_read(i915,
- HDCP_STATUS(i915, cpu_transcoder, port)));
+ intel_de_read(display,
+ HDCP_STATUS(display, cpu_transcoder, port)));
return -ETIMEDOUT;
}
/* Wait for encryption confirmation */
- if (intel_de_wait_for_set(i915,
- HDCP_STATUS(i915, cpu_transcoder, port),
+ if (intel_de_wait_for_set(display,
+ HDCP_STATUS(display, cpu_transcoder, port),
HDCP_STATUS_ENC,
HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
- drm_err(&i915->drm, "Timed out waiting for encryption\n");
+ drm_err(display->drm, "Timed out waiting for encryption\n");
return -ETIMEDOUT;
}
@@ -949,42 +958,42 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (shim->stream_encryption) {
ret = shim->stream_encryption(connector, true);
if (ret) {
- drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n",
+ drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 1.4 stream enc\n",
connector->base.base.id, connector->base.name);
return ret;
}
- drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
+ drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
transcoder_name(hdcp->stream_transcoder));
}
if (repeater_present)
return intel_hdcp_auth_downstream(connector);
- drm_dbg_kms(&i915->drm, "HDCP is enabled (no repeater present)\n");
+ drm_dbg_kms(display->drm, "HDCP is enabled (no repeater present)\n");
return 0;
}
static int _intel_hdcp_disable(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
u32 repeater_ctl;
int ret;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being disabled...\n",
connector->base.base.id, connector->base.name);
if (hdcp->shim->stream_encryption) {
ret = hdcp->shim->stream_encryption(connector, false);
if (ret) {
- drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n",
+ drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 1.4 stream enc\n",
connector->base.base.id, connector->base.name);
return ret;
}
- drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
+ drm_dbg_kms(display->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
transcoder_name(hdcp->stream_transcoder));
/*
* If there are other connectors on this port using HDCP,
@@ -996,55 +1005,57 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
}
hdcp->hdcp_encrypted = false;
- intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), 0);
- if (intel_de_wait_for_clear(i915,
- HDCP_STATUS(i915, cpu_transcoder, port),
+ intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 0);
+ if (intel_de_wait_for_clear(display,
+ HDCP_STATUS(display, cpu_transcoder, port),
~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
}
- repeater_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder,
+ repeater_ctl = intel_hdcp_get_repeater_ctl(display, cpu_transcoder,
port);
- intel_de_rmw(i915, HDCP_REP_CTL, repeater_ctl, 0);
+ intel_de_rmw(display, HDCP_REP_CTL, repeater_ctl, 0);
ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
if (ret) {
- drm_err(&i915->drm, "Failed to disable HDCP signalling\n");
+ drm_err(display->drm, "Failed to disable HDCP signalling\n");
return ret;
}
- drm_dbg_kms(&i915->drm, "HDCP is disabled\n");
+ drm_dbg_kms(display->drm, "HDCP is disabled\n");
return 0;
}
static int intel_hdcp1_enable(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
int i, ret, tries = 3;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP is being enabled...\n",
connector->base.base.id, connector->base.name);
- if (!hdcp_key_loadable(i915)) {
- drm_err(&i915->drm, "HDCP key Load is not possible\n");
+ if (!hdcp_key_loadable(display)) {
+ drm_err(display->drm, "HDCP key Load is not possible\n");
return -ENXIO;
}
for (i = 0; i < KEY_LOAD_TRIES; i++) {
- ret = intel_hdcp_load_keys(i915);
+ ret = intel_hdcp_load_keys(display);
if (!ret)
break;
- intel_hdcp_clear_keys(i915);
+ intel_hdcp_clear_keys(display);
}
if (ret) {
- drm_err(&i915->drm, "Could not load HDCP keys, (%d)\n",
+ drm_err(display->drm, "Could not load HDCP keys, (%d)\n",
ret);
return ret;
}
+ intel_hdcp_adjust_hdcp_line_rekeying(connector->encoder, hdcp, true);
+
/* Incase of authentication failures, HDCP spec expects reauth. */
for (i = 0; i < tries; i++) {
ret = intel_hdcp_auth(connector);
@@ -1053,13 +1064,13 @@ static int intel_hdcp1_enable(struct intel_connector *connector)
return 0;
}
- drm_dbg_kms(&i915->drm, "HDCP Auth failure (%d)\n", ret);
+ drm_dbg_kms(display->drm, "HDCP Auth failure (%d)\n", ret);
/* Ensuring HDCP encryption and signalling are stopped. */
_intel_hdcp_disable(connector);
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"HDCP authentication failed (%d tries/%d)\n", tries, ret);
return ret;
}
@@ -1072,20 +1083,20 @@ static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
static void intel_hdcp_update_value(struct intel_connector *connector,
u64 value, bool update_property)
{
- struct drm_device *dev = connector->base.dev;
+ struct intel_display *display = to_intel_display(connector);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
+ drm_WARN_ON(display->drm, !mutex_is_locked(&hdcp->mutex));
if (hdcp->value == value)
return;
- drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
+ drm_WARN_ON(display->drm, !mutex_is_locked(&dig_port->hdcp_mutex));
if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
- if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
+ if (!drm_WARN_ON(display->drm, dig_port->num_hdcp_streams == 0))
dig_port->num_hdcp_streams--;
} else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
dig_port->num_hdcp_streams++;
@@ -1094,15 +1105,16 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
hdcp->value = value;
if (update_property) {
drm_connector_get(&connector->base);
- queue_work(i915->unordered_wq, &hdcp->prop_work);
+ if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
+ drm_connector_put(&connector->base);
}
}
/* Implements Part 3 of the HDCP authorization procedure */
static int intel_hdcp_check_link(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = dig_port->base.port;
enum transcoder cpu_transcoder;
@@ -1120,12 +1132,12 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- if (drm_WARN_ON(&i915->drm,
- !intel_hdcp_in_use(i915, cpu_transcoder, port))) {
- drm_err(&i915->drm,
+ if (drm_WARN_ON(display->drm,
+ !intel_hdcp_in_use(display, cpu_transcoder, port))) {
+ drm_err(display->drm,
"[CONNECTOR:%d:%s] HDCP link stopped encryption,%x\n",
connector->base.base.id, connector->base.name,
- intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)));
+ intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)));
ret = -ENXIO;
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED,
@@ -1141,22 +1153,28 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] HDCP link failed, retrying authentication\n",
connector->base.base.id, connector->base.name);
ret = _intel_hdcp_disable(connector);
if (ret) {
- drm_err(&i915->drm, "Failed to disable hdcp (%d)\n", ret);
+ drm_err(display->drm, "Failed to disable hdcp (%d)\n", ret);
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED,
+ true);
+ goto out;
+ }
+
+ ret = intel_hdcp1_enable(connector);
+ if (ret) {
+ drm_err(display->drm, "Failed to enable hdcp (%d)\n", ret);
intel_hdcp_update_value(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED,
true);
goto out;
}
- intel_hdcp_update_value(connector,
- DRM_MODE_CONTENT_PROTECTION_DESIRED,
- true);
out:
mutex_unlock(&dig_port->hdcp_mutex);
mutex_unlock(&hdcp->mutex);
@@ -1168,9 +1186,9 @@ static void intel_hdcp_prop_work(struct work_struct *work)
struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
prop_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
- drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL);
+ drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
mutex_lock(&hdcp->mutex);
/*
@@ -1183,40 +1201,40 @@ static void intel_hdcp_prop_work(struct work_struct *work)
hdcp->value);
mutex_unlock(&hdcp->mutex);
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
drm_connector_put(&connector->base);
}
-bool is_hdcp_supported(struct drm_i915_private *i915, enum port port)
+bool is_hdcp_supported(struct intel_display *display, enum port port)
{
- return DISPLAY_RUNTIME_INFO(i915)->has_hdcp &&
- (DISPLAY_VER(i915) >= 12 || port < PORT_E);
+ return DISPLAY_RUNTIME_INFO(display)->has_hdcp &&
+ (DISPLAY_VER(display) >= 12 || port < PORT_E);
}
static int
hdcp2_prepare_ake_init(struct intel_connector *connector,
struct hdcp2_ake_init *ake_data)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct i915_hdcp_arbiter *arbiter;
int ret;
- mutex_lock(&i915->display.hdcp.hdcp_mutex);
- arbiter = i915->display.hdcp.arbiter;
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ arbiter = display->hdcp.arbiter;
if (!arbiter || !arbiter->ops) {
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return -EINVAL;
}
ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data);
if (ret)
- drm_dbg_kms(&i915->drm, "Prepare_ake_init failed. %d\n",
+ drm_dbg_kms(display->drm, "Prepare_ake_init failed. %d\n",
ret);
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return ret;
}
@@ -1228,17 +1246,17 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
struct hdcp2_ake_no_stored_km *ek_pub_km,
size_t *msg_sz)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct i915_hdcp_arbiter *arbiter;
int ret;
- mutex_lock(&i915->display.hdcp.hdcp_mutex);
- arbiter = i915->display.hdcp.arbiter;
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ arbiter = display->hdcp.arbiter;
if (!arbiter || !arbiter->ops) {
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return -EINVAL;
}
@@ -1246,9 +1264,9 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
rx_cert, paired,
ek_pub_km, msg_sz);
if (ret < 0)
- drm_dbg_kms(&i915->drm, "Verify rx_cert failed. %d\n",
+ drm_dbg_kms(display->drm, "Verify rx_cert failed. %d\n",
ret);
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return ret;
}
@@ -1256,24 +1274,24 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
static int hdcp2_verify_hprime(struct intel_connector *connector,
struct hdcp2_ake_send_hprime *rx_hprime)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct i915_hdcp_arbiter *arbiter;
int ret;
- mutex_lock(&i915->display.hdcp.hdcp_mutex);
- arbiter = i915->display.hdcp.arbiter;
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ arbiter = display->hdcp.arbiter;
if (!arbiter || !arbiter->ops) {
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return -EINVAL;
}
ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime);
if (ret < 0)
- drm_dbg_kms(&i915->drm, "Verify hprime failed. %d\n", ret);
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ drm_dbg_kms(display->drm, "Verify hprime failed. %d\n", ret);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return ret;
}
@@ -1282,25 +1300,25 @@ static int
hdcp2_store_pairing_info(struct intel_connector *connector,
struct hdcp2_ake_send_pairing_info *pairing_info)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct i915_hdcp_arbiter *arbiter;
int ret;
- mutex_lock(&i915->display.hdcp.hdcp_mutex);
- arbiter = i915->display.hdcp.arbiter;
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ arbiter = display->hdcp.arbiter;
if (!arbiter || !arbiter->ops) {
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return -EINVAL;
}
ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info);
if (ret < 0)
- drm_dbg_kms(&i915->drm, "Store pairing info failed. %d\n",
+ drm_dbg_kms(display->drm, "Store pairing info failed. %d\n",
ret);
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return ret;
}
@@ -1309,25 +1327,25 @@ static int
hdcp2_prepare_lc_init(struct intel_connector *connector,
struct hdcp2_lc_init *lc_init)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct i915_hdcp_arbiter *arbiter;
int ret;
- mutex_lock(&i915->display.hdcp.hdcp_mutex);
- arbiter = i915->display.hdcp.arbiter;
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ arbiter = display->hdcp.arbiter;
if (!arbiter || !arbiter->ops) {
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return -EINVAL;
}
ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init);
if (ret < 0)
- drm_dbg_kms(&i915->drm, "Prepare lc_init failed. %d\n",
+ drm_dbg_kms(display->drm, "Prepare lc_init failed. %d\n",
ret);
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return ret;
}
@@ -1336,25 +1354,25 @@ static int
hdcp2_verify_lprime(struct intel_connector *connector,
struct hdcp2_lc_send_lprime *rx_lprime)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct i915_hdcp_arbiter *arbiter;
int ret;
- mutex_lock(&i915->display.hdcp.hdcp_mutex);
- arbiter = i915->display.hdcp.arbiter;
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ arbiter = display->hdcp.arbiter;
if (!arbiter || !arbiter->ops) {
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return -EINVAL;
}
ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime);
if (ret < 0)
- drm_dbg_kms(&i915->drm, "Verify L_Prime failed. %d\n",
+ drm_dbg_kms(display->drm, "Verify L_Prime failed. %d\n",
ret);
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return ret;
}
@@ -1362,25 +1380,25 @@ hdcp2_verify_lprime(struct intel_connector *connector,
static int hdcp2_prepare_skey(struct intel_connector *connector,
struct hdcp2_ske_send_eks *ske_data)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct i915_hdcp_arbiter *arbiter;
int ret;
- mutex_lock(&i915->display.hdcp.hdcp_mutex);
- arbiter = i915->display.hdcp.arbiter;
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ arbiter = display->hdcp.arbiter;
if (!arbiter || !arbiter->ops) {
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return -EINVAL;
}
ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data);
if (ret < 0)
- drm_dbg_kms(&i915->drm, "Get session key failed. %d\n",
+ drm_dbg_kms(display->drm, "Get session key failed. %d\n",
ret);
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return ret;
}
@@ -1391,17 +1409,17 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
*rep_topology,
struct hdcp2_rep_send_ack *rep_send_ack)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct i915_hdcp_arbiter *arbiter;
int ret;
- mutex_lock(&i915->display.hdcp.hdcp_mutex);
- arbiter = i915->display.hdcp.arbiter;
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ arbiter = display->hdcp.arbiter;
if (!arbiter || !arbiter->ops) {
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return -EINVAL;
}
@@ -1410,9 +1428,9 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
rep_topology,
rep_send_ack);
if (ret < 0)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Verify rep topology failed. %d\n", ret);
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return ret;
}
@@ -1421,71 +1439,71 @@ static int
hdcp2_verify_mprime(struct intel_connector *connector,
struct hdcp2_rep_stream_ready *stream_ready)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct i915_hdcp_arbiter *arbiter;
int ret;
- mutex_lock(&i915->display.hdcp.hdcp_mutex);
- arbiter = i915->display.hdcp.arbiter;
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ arbiter = display->hdcp.arbiter;
if (!arbiter || !arbiter->ops) {
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return -EINVAL;
}
ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready);
if (ret < 0)
- drm_dbg_kms(&i915->drm, "Verify mprime failed. %d\n", ret);
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ drm_dbg_kms(display->drm, "Verify mprime failed. %d\n", ret);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return ret;
}
static int hdcp2_authenticate_port(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct i915_hdcp_arbiter *arbiter;
int ret;
- mutex_lock(&i915->display.hdcp.hdcp_mutex);
- arbiter = i915->display.hdcp.arbiter;
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ arbiter = display->hdcp.arbiter;
if (!arbiter || !arbiter->ops) {
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return -EINVAL;
}
ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data);
if (ret < 0)
- drm_dbg_kms(&i915->drm, "Enable hdcp auth failed. %d\n",
+ drm_dbg_kms(display->drm, "Enable hdcp auth failed. %d\n",
ret);
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return ret;
}
static int hdcp2_close_session(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct i915_hdcp_arbiter *arbiter;
int ret;
- mutex_lock(&i915->display.hdcp.hdcp_mutex);
- arbiter = i915->display.hdcp.arbiter;
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ arbiter = display->hdcp.arbiter;
if (!arbiter || !arbiter->ops) {
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return -EINVAL;
}
ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
&dig_port->hdcp_port_data);
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return ret;
}
@@ -1498,7 +1516,9 @@ static int hdcp2_deauthenticate_port(struct intel_connector *connector)
/* Authentication flow starts from here */
static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_digital_port *dig_port =
+ intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_ake_init ake_init;
@@ -1509,37 +1529,66 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
} msgs;
const struct intel_hdcp_shim *shim = hdcp->shim;
size_t size;
- int ret;
+ int ret, i, max_retries;
/* Init for seq_num */
hdcp->seq_num_v = 0;
hdcp->seq_num_m = 0;
+ if (intel_encoder_is_dp(&dig_port->base) ||
+ intel_encoder_is_mst(&dig_port->base))
+ max_retries = 10;
+ else
+ max_retries = 1;
+
ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
if (ret < 0)
return ret;
- ret = shim->write_2_2_msg(connector, &msgs.ake_init,
- sizeof(msgs.ake_init));
- if (ret < 0)
- return ret;
+ /*
+ * Retry the first read and write to downstream at least 10 times
+ * with a 50ms delay if not hdcp2 capable for DP/DPMST encoders
+ * (dock decides to stop advertising hdcp2 capability for some reason).
+ * The reason being that during suspend resume dock usually keeps the
+ * HDCP2 registers inaccesible causing AUX error. This wouldn't be a
+ * big problem if the userspace just kept retrying with some delay while
+ * it continues to play low value content but most userpace applications
+ * end up throwing an error when it receives one from KMD. This makes
+ * sure we give the dock and the sink devices to complete its power cycle
+ * and then try HDCP authentication. The values of 10 and delay of 50ms
+ * was decided based on multiple trial and errors.
+ */
+ for (i = 0; i < max_retries; i++) {
+ if (!intel_hdcp2_get_capability(connector)) {
+ msleep(50);
+ continue;
+ }
+
+ ret = shim->write_2_2_msg(connector, &msgs.ake_init,
+ sizeof(msgs.ake_init));
+ if (ret < 0)
+ continue;
+
+ ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT,
+ &msgs.send_cert, sizeof(msgs.send_cert));
+ if (ret > 0)
+ break;
+ }
- ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT,
- &msgs.send_cert, sizeof(msgs.send_cert));
if (ret < 0)
return ret;
if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
- drm_dbg_kms(&i915->drm, "cert.rx_caps dont claim HDCP2.2\n");
+ drm_dbg_kms(display->drm, "cert.rx_caps dont claim HDCP2.2\n");
return -EINVAL;
}
hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
- if (drm_hdcp_check_ksvs_revoked(&i915->drm,
+ if (drm_hdcp_check_ksvs_revoked(display->drm,
msgs.send_cert.cert_rx.receiver_id,
1) > 0) {
- drm_err(&i915->drm, "Receiver ID is revoked\n");
+ drm_err(display->drm, "Receiver ID is revoked\n");
return -EPERM;
}
@@ -1690,8 +1739,8 @@ out:
static
int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_rep_send_receiverid_list recvid_list;
@@ -1711,7 +1760,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
- drm_dbg_kms(&i915->drm, "Topology Max Size Exceeded\n");
+ drm_dbg_kms(display->drm, "Topology Max Size Exceeded\n");
return -EINVAL;
}
@@ -1724,7 +1773,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
!HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
if (!dig_port->hdcp_mst_type1_capable && hdcp->content_type) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"HDCP1.x or 2.0 Legacy Device Downstream\n");
return -EINVAL;
}
@@ -1734,23 +1783,23 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
if (!hdcp->hdcp2_encrypted && seq_num_v) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Non zero Seq_num_v at first RecvId_List msg\n");
return -EINVAL;
}
if (seq_num_v < hdcp->seq_num_v) {
/* Roll over of the seq_num_v from repeater. Reauthenticate. */
- drm_dbg_kms(&i915->drm, "Seq_num_v roll over.\n");
+ drm_dbg_kms(display->drm, "Seq_num_v roll over.\n");
return -EINVAL;
}
device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
- if (drm_hdcp_check_ksvs_revoked(&i915->drm,
+ if (drm_hdcp_check_ksvs_revoked(display->drm,
msgs.recvid_list.receiver_ids,
device_cnt) > 0) {
- drm_err(&i915->drm, "Revoked receiver ID(s) is in list\n");
+ drm_err(display->drm, "Revoked receiver ID(s) is in list\n");
return -EPERM;
}
@@ -1771,27 +1820,27 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
static int hdcp2_authenticate_sink(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
const struct intel_hdcp_shim *shim = hdcp->shim;
int ret;
ret = hdcp2_authentication_key_exchange(connector);
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
+ drm_dbg_kms(display->drm, "AKE Failed. Err : %d\n", ret);
return ret;
}
ret = hdcp2_locality_check(connector);
if (ret < 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Locality Check failed. Err : %d\n", ret);
return ret;
}
ret = hdcp2_session_key_exchange(connector);
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
+ drm_dbg_kms(display->drm, "SKE Failed. Err : %d\n", ret);
return ret;
}
@@ -1806,7 +1855,7 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
if (hdcp->is_repeater) {
ret = hdcp2_authenticate_repeater_topology(connector);
if (ret < 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Repeater Auth Failed. Err: %d\n", ret);
return ret;
}
@@ -1817,17 +1866,17 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
enum port port = dig_port->base.port;
int ret = 0;
- if (!(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
+ if (!(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS)) {
- drm_err(&i915->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n",
+ drm_err(display->drm, "[CONNECTOR:%d:%s] HDCP 2.2 Link is not encrypted\n",
connector->base.base.id, connector->base.name);
ret = -EPERM;
goto link_recover;
@@ -1836,11 +1885,11 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
if (hdcp->shim->stream_2_2_encryption) {
ret = hdcp->shim->stream_2_2_encryption(connector, true);
if (ret) {
- drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n",
+ drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to enable HDCP 2.2 stream enc\n",
connector->base.base.id, connector->base.name);
return ret;
}
- drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
+ drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
transcoder_name(hdcp->stream_transcoder));
}
@@ -1848,7 +1897,7 @@ static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
link_recover:
if (hdcp2_deauthenticate_port(connector) < 0)
- drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
+ drm_dbg_kms(display->drm, "Port deauth failed.\n");
dig_port->hdcp_auth_status = false;
data->k = 0;
@@ -1858,35 +1907,35 @@ link_recover:
static int hdcp2_enable_encryption(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- drm_WARN_ON(&i915->drm,
- intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
+ drm_WARN_ON(display->drm,
+ intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS);
if (hdcp->shim->toggle_signalling) {
ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
true);
if (ret) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Failed to enable HDCP signalling. %d\n",
ret);
return ret;
}
}
- if (intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
+ if (intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
LINK_AUTH_STATUS)
/* Link is Authenticated. Now set for Encryption */
- intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port),
+ intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port),
0, CTL_LINK_ENCRYPTION_REQ);
- ret = intel_de_wait_for_set(i915,
- HDCP2_STATUS(i915, cpu_transcoder,
+ ret = intel_de_wait_for_set(display,
+ HDCP2_STATUS(display, cpu_transcoder,
port),
LINK_ENCRYPTION_STATUS,
HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
@@ -1897,32 +1946,33 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
static int hdcp2_disable_encryption(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- drm_WARN_ON(&i915->drm, !(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
- LINK_ENCRYPTION_STATUS));
+ drm_WARN_ON(display->drm,
+ !(intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS));
- intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port),
+ intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port),
CTL_LINK_ENCRYPTION_REQ, 0);
- ret = intel_de_wait_for_clear(i915,
- HDCP2_STATUS(i915, cpu_transcoder,
+ ret = intel_de_wait_for_clear(display,
+ HDCP2_STATUS(display, cpu_transcoder,
port),
LINK_ENCRYPTION_STATUS,
HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)
- drm_dbg_kms(&i915->drm, "Disable Encryption Timedout");
+ drm_dbg_kms(display->drm, "Disable Encryption Timedout");
if (hdcp->shim->toggle_signalling) {
ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
false);
if (ret) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Failed to disable HDCP signalling. %d\n",
ret);
return ret;
@@ -1935,7 +1985,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
static int
hdcp2_propagate_stream_management_info(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
int i, tries = 3, ret;
if (!connector->hdcp.is_repeater)
@@ -1948,12 +1998,12 @@ hdcp2_propagate_stream_management_info(struct intel_connector *connector)
/* Lets restart the auth incase of seq_num_m roll over */
if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"seq_num_m roll over.(%d)\n", ret);
break;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"HDCP2 stream management %d of %d Failed.(%d)\n",
i + 1, tries, ret);
}
@@ -1964,8 +2014,8 @@ hdcp2_propagate_stream_management_info(struct intel_connector *connector)
static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret = 0, i, tries = 3;
for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
@@ -1973,7 +2023,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
if (!ret) {
ret = intel_hdcp_prepare_streams(state, connector);
if (ret) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Prepare stream failed.(%d)\n",
ret);
break;
@@ -1981,7 +2031,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
ret = hdcp2_propagate_stream_management_info(connector);
if (ret) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Stream management failed.(%d)\n",
ret);
break;
@@ -1990,15 +2040,15 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
ret = hdcp2_authenticate_port(connector);
if (!ret)
break;
- drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
+ drm_dbg_kms(display->drm, "HDCP2 port auth failed.(%d)\n",
ret);
}
/* Clearing the mei hdcp session */
- drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
+ drm_dbg_kms(display->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
i + 1, tries, ret);
if (hdcp2_deauthenticate_port(connector) < 0)
- drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
+ drm_dbg_kms(display->drm, "Port deauth failed.\n");
}
if (!ret && !dig_port->hdcp_auth_status) {
@@ -2009,10 +2059,10 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
ret = hdcp2_enable_encryption(connector);
if (ret < 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Encryption Enable Failed.(%d)\n", ret);
if (hdcp2_deauthenticate_port(connector) < 0)
- drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
+ drm_dbg_kms(display->drm, "Port deauth failed.\n");
}
}
@@ -2025,24 +2075,24 @@ static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
static int _intel_hdcp2_enable(struct intel_atomic_state *state,
struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being enabled. Type: %d\n",
connector->base.base.id, connector->base.name,
hdcp->content_type);
- intel_hdcp_disable_hdcp_line_rekeying(connector->encoder, hdcp);
+ intel_hdcp_adjust_hdcp_line_rekeying(connector->encoder, hdcp, false);
ret = hdcp2_authenticate_and_encrypt(state, connector);
if (ret) {
- drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
+ drm_dbg_kms(display->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
hdcp->content_type, ret);
return ret;
}
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is enabled. Type %d\n",
connector->base.base.id, connector->base.name,
hdcp->content_type);
@@ -2053,23 +2103,23 @@ static int _intel_hdcp2_enable(struct intel_atomic_state *state,
static int
_intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] HDCP2.2 is being Disabled\n",
connector->base.base.id, connector->base.name);
if (hdcp->shim->stream_2_2_encryption) {
ret = hdcp->shim->stream_2_2_encryption(connector, false);
if (ret) {
- drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n",
+ drm_err(display->drm, "[CONNECTOR:%d:%s] Failed to disable HDCP 2.2 stream enc\n",
connector->base.base.id, connector->base.name);
return ret;
}
- drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
+ drm_dbg_kms(display->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
transcoder_name(hdcp->stream_transcoder));
if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
@@ -2079,7 +2129,7 @@ _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery
ret = hdcp2_disable_encryption(connector);
if (hdcp2_deauthenticate_port(connector) < 0)
- drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
+ drm_dbg_kms(display->drm, "Port deauth failed.\n");
connector->hdcp.hdcp2_encrypted = false;
dig_port->hdcp_auth_status = false;
@@ -2091,8 +2141,8 @@ _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery
/* Implements the Link Integrity Check for HDCP2.2 */
static int intel_hdcp2_check_link(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = dig_port->base.port;
enum transcoder cpu_transcoder;
@@ -2109,11 +2159,11 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
- if (drm_WARN_ON(&i915->drm,
- !intel_hdcp2_in_use(i915, cpu_transcoder, port))) {
- drm_err(&i915->drm,
+ if (drm_WARN_ON(display->drm,
+ !intel_hdcp2_in_use(display, cpu_transcoder, port))) {
+ drm_err(display->drm,
"HDCP2.2 link stopped the encryption, %x\n",
- intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)));
+ intel_de_read(display, HDCP2_STATUS(display, cpu_transcoder, port)));
ret = -ENXIO;
_intel_hdcp2_disable(connector, true);
intel_hdcp_update_value(connector,
@@ -2136,17 +2186,30 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
goto out;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"HDCP2.2 Downstream topology change\n");
+
+ ret = hdcp2_authenticate_repeater_topology(connector);
+ if (!ret) {
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED,
+ true);
+ goto out;
+ }
+
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n",
+ connector->base.base.id, connector->base.name,
+ ret);
} else {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
connector->base.base.id, connector->base.name);
}
ret = _intel_hdcp2_disable(connector, true);
if (ret) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"[CONNECTOR:%d:%s] Failed to disable hdcp2.2 (%d)\n",
connector->base.base.id, connector->base.name, ret);
intel_hdcp_update_value(connector,
@@ -2168,7 +2231,8 @@ static void intel_hdcp_check_work(struct work_struct *work)
struct intel_hdcp,
check_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (drm_connector_is_unregistered(&connector->base))
return;
@@ -2185,13 +2249,12 @@ static int i915_hdcp_component_bind(struct device *drv_kdev,
struct device *mei_kdev, void *data)
{
struct intel_display *display = to_intel_display(drv_kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
- drm_dbg(&i915->drm, "I915 HDCP comp bind\n");
- mutex_lock(&i915->display.hdcp.hdcp_mutex);
- i915->display.hdcp.arbiter = (struct i915_hdcp_arbiter *)data;
- i915->display.hdcp.arbiter->hdcp_dev = mei_kdev;
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ drm_dbg(display->drm, "I915 HDCP comp bind\n");
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ display->hdcp.arbiter = (struct i915_hdcp_arbiter *)data;
+ display->hdcp.arbiter->hdcp_dev = mei_kdev;
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return 0;
}
@@ -2200,12 +2263,11 @@ static void i915_hdcp_component_unbind(struct device *drv_kdev,
struct device *mei_kdev, void *data)
{
struct intel_display *display = to_intel_display(drv_kdev);
- struct drm_i915_private *i915 = to_i915(display->drm);
- drm_dbg(&i915->drm, "I915 HDCP comp unbind\n");
- mutex_lock(&i915->display.hdcp.hdcp_mutex);
- i915->display.hdcp.arbiter = NULL;
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ drm_dbg(display->drm, "I915 HDCP comp unbind\n");
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ display->hdcp.arbiter = NULL;
+ mutex_unlock(&display->hdcp.hdcp_mutex);
}
static const struct component_ops i915_hdcp_ops = {
@@ -2239,11 +2301,11 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct hdcp_port_data *data = &dig_port->hdcp_port_data;
enum port port = dig_port->base.port;
- if (DISPLAY_VER(i915) < 12)
+ if (DISPLAY_VER(display) < 12)
data->hdcp_ddi = intel_get_hdcp_ddi_index(port);
else
/*
@@ -2263,55 +2325,57 @@ static int initialize_hdcp_port_data(struct intel_connector *connector,
data->protocol = (u8)shim->protocol;
if (!data->streams)
- data->streams = kcalloc(INTEL_NUM_PIPES(i915),
+ data->streams = kcalloc(INTEL_NUM_PIPES(display),
sizeof(struct hdcp2_streamid_type),
GFP_KERNEL);
if (!data->streams) {
- drm_err(&i915->drm, "Out of Memory\n");
+ drm_err(display->drm, "Out of Memory\n");
return -ENOMEM;
}
return 0;
}
-static bool is_hdcp2_supported(struct drm_i915_private *i915)
+static bool is_hdcp2_supported(struct intel_display *display)
{
- if (intel_hdcp_gsc_cs_required(i915))
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
+ if (intel_hdcp_gsc_cs_required(display))
return true;
if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
return false;
- return (DISPLAY_VER(i915) >= 10 ||
+ return (DISPLAY_VER(display) >= 10 ||
IS_KABYLAKE(i915) ||
IS_COFFEELAKE(i915) ||
IS_COMETLAKE(i915));
}
-void intel_hdcp_component_init(struct drm_i915_private *i915)
+void intel_hdcp_component_init(struct intel_display *display)
{
int ret;
- if (!is_hdcp2_supported(i915))
+ if (!is_hdcp2_supported(display))
return;
- mutex_lock(&i915->display.hdcp.hdcp_mutex);
- drm_WARN_ON(&i915->drm, i915->display.hdcp.comp_added);
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ drm_WARN_ON(display->drm, display->hdcp.comp_added);
- i915->display.hdcp.comp_added = true;
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
- if (intel_hdcp_gsc_cs_required(i915))
- ret = intel_hdcp_gsc_init(i915);
+ display->hdcp.comp_added = true;
+ mutex_unlock(&display->hdcp.hdcp_mutex);
+ if (intel_hdcp_gsc_cs_required(display))
+ ret = intel_hdcp_gsc_init(display);
else
- ret = component_add_typed(i915->drm.dev, &i915_hdcp_ops,
+ ret = component_add_typed(display->drm->dev, &i915_hdcp_ops,
I915_COMPONENT_HDCP);
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "Failed at fw component add(%d)\n",
+ drm_dbg_kms(display->drm, "Failed at fw component add(%d)\n",
ret);
- mutex_lock(&i915->display.hdcp.hdcp_mutex);
- i915->display.hdcp.comp_added = false;
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ display->hdcp.comp_added = false;
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return;
}
}
@@ -2320,13 +2384,13 @@ static void intel_hdcp2_init(struct intel_connector *connector,
struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
ret = initialize_hdcp_port_data(connector, dig_port, shim);
if (ret) {
- drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
+ drm_dbg_kms(display->drm, "Mei hdcp data init failed\n");
return;
}
@@ -2337,19 +2401,18 @@ int intel_hdcp_init(struct intel_connector *connector,
struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *shim)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
if (!shim)
return -EINVAL;
- if (is_hdcp2_supported(i915))
+ if (is_hdcp2_supported(display))
intel_hdcp2_init(connector, dig_port, shim);
- ret =
- drm_connector_attach_content_protection_property(&connector->base,
- hdcp->hdcp2_supported);
+ ret = drm_connector_attach_content_protection_property(&connector->base,
+ hdcp->hdcp2_supported);
if (ret) {
hdcp->hdcp2_supported = false;
kfree(dig_port->hdcp_port_data.streams);
@@ -2370,7 +2433,8 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -2382,14 +2446,14 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
return -ENOENT;
if (!connector->encoder) {
- drm_err(&i915->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n",
+ drm_err(display->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n",
connector->base.base.id, connector->base.name);
return -ENODEV;
}
mutex_lock(&hdcp->mutex);
mutex_lock(&dig_port->hdcp_mutex);
- drm_WARN_ON(&i915->drm,
+ drm_WARN_ON(display->drm,
hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
hdcp->content_type = (u8)conn_state->hdcp_content_type;
@@ -2401,7 +2465,7 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
hdcp->stream_transcoder = INVALID_TRANSCODER;
}
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
dig_port->hdcp_port_data.hdcp_transcoder =
intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
@@ -2524,7 +2588,8 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
mutex_lock(&hdcp->mutex);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
drm_connector_get(&connector->base);
- queue_work(i915->unordered_wq, &hdcp->prop_work);
+ if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
+ drm_connector_put(&connector->base);
mutex_unlock(&hdcp->mutex);
}
@@ -2541,7 +2606,9 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
*/
if (!desired_and_not_enabled && !content_protection_type_changed) {
drm_connector_get(&connector->base);
- queue_work(i915->unordered_wq, &hdcp->prop_work);
+ if (!queue_work(i915->unordered_wq, &hdcp->prop_work))
+ drm_connector_put(&connector->base);
+
}
}
@@ -2549,21 +2616,21 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
_intel_hdcp_enable(state, encoder, crtc_state, conn_state);
}
-void intel_hdcp_component_fini(struct drm_i915_private *i915)
+void intel_hdcp_component_fini(struct intel_display *display)
{
- mutex_lock(&i915->display.hdcp.hdcp_mutex);
- if (!i915->display.hdcp.comp_added) {
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ if (!display->hdcp.comp_added) {
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return;
}
- i915->display.hdcp.comp_added = false;
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ display->hdcp.comp_added = false;
+ mutex_unlock(&display->hdcp.hdcp_mutex);
- if (intel_hdcp_gsc_cs_required(i915))
- intel_hdcp_gsc_fini(i915);
+ if (intel_hdcp_gsc_cs_required(display))
+ intel_hdcp_gsc_fini(display);
else
- component_del(i915->drm.dev, &i915_hdcp_ops);
+ component_del(display->drm->dev, &i915_hdcp_ops);
}
void intel_hdcp_cleanup(struct intel_connector *connector)
@@ -2653,7 +2720,8 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (!hdcp->shim)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 477f2d2bb120..d99830cfb798 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -12,13 +12,13 @@
struct drm_connector;
struct drm_connector_state;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_connector;
struct intel_crtc_state;
+struct intel_digital_port;
+struct intel_display;
struct intel_encoder;
struct intel_hdcp_shim;
-struct intel_digital_port;
enum port;
enum transcoder;
@@ -37,14 +37,14 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
-bool is_hdcp_supported(struct drm_i915_private *i915, enum port port);
+bool is_hdcp_supported(struct intel_display *display, enum port port);
bool intel_hdcp_get_capability(struct intel_connector *connector);
bool intel_hdcp2_get_capability(struct intel_connector *connector);
void intel_hdcp_get_remote_capability(struct intel_connector *connector,
bool *hdcp_capable,
bool *hdcp2_capable);
-void intel_hdcp_component_init(struct drm_i915_private *i915);
-void intel_hdcp_component_fini(struct drm_i915_private *i915);
+void intel_hdcp_component_init(struct intel_display *display);
+void intel_hdcp_component_fini(struct intel_display *display);
void intel_hdcp_cleanup(struct intel_connector *connector);
void intel_hdcp_handle_cp_irq(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
index 16afeb8a3a8d..55965844d829 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
@@ -19,18 +19,19 @@ struct intel_hdcp_gsc_message {
void *hdcp_cmd_out;
};
-bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915)
+bool intel_hdcp_gsc_cs_required(struct intel_display *display)
{
- return DISPLAY_VER(i915) >= 14;
+ return DISPLAY_VER(display) >= 14;
}
-bool intel_hdcp_gsc_check_status(struct drm_i915_private *i915)
+bool intel_hdcp_gsc_check_status(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_gt *gt = i915->media_gt;
struct intel_gsc_uc *gsc = gt ? &gt->uc.gsc : NULL;
if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"GSC components required for HDCP2.2 are not ready\n");
return false;
}
@@ -106,8 +107,9 @@ static const struct i915_hdcp_ops gsc_hdcp_ops = {
.close_hdcp_session = intel_hdcp_gsc_close_session,
};
-static int intel_hdcp_gsc_hdcp2_init(struct drm_i915_private *i915)
+static int intel_hdcp_gsc_hdcp2_init(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_hdcp_gsc_message *hdcp_message;
int ret;
@@ -120,19 +122,19 @@ static int intel_hdcp_gsc_hdcp2_init(struct drm_i915_private *i915)
* NOTE: No need to lock the comp mutex here as it is already
* going to be taken before this function called
*/
- i915->display.hdcp.hdcp_message = hdcp_message;
+ display->hdcp.hdcp_message = hdcp_message;
ret = intel_hdcp_gsc_initialize_message(i915, hdcp_message);
if (ret)
- drm_err(&i915->drm, "Could not initialize hdcp_message\n");
+ drm_err(display->drm, "Could not initialize hdcp_message\n");
return ret;
}
-static void intel_hdcp_gsc_free_message(struct drm_i915_private *i915)
+static void intel_hdcp_gsc_free_message(struct intel_display *display)
{
struct intel_hdcp_gsc_message *hdcp_message =
- i915->display.hdcp.hdcp_message;
+ display->hdcp.hdcp_message;
hdcp_message->hdcp_cmd_in = NULL;
hdcp_message->hdcp_cmd_out = NULL;
@@ -140,7 +142,7 @@ static void intel_hdcp_gsc_free_message(struct drm_i915_private *i915)
kfree(hdcp_message);
}
-int intel_hdcp_gsc_init(struct drm_i915_private *i915)
+int intel_hdcp_gsc_init(struct intel_display *display)
{
struct i915_hdcp_arbiter *data;
int ret;
@@ -149,20 +151,20 @@ int intel_hdcp_gsc_init(struct drm_i915_private *i915)
if (!data)
return -ENOMEM;
- mutex_lock(&i915->display.hdcp.hdcp_mutex);
- i915->display.hdcp.arbiter = data;
- i915->display.hdcp.arbiter->hdcp_dev = i915->drm.dev;
- i915->display.hdcp.arbiter->ops = &gsc_hdcp_ops;
- ret = intel_hdcp_gsc_hdcp2_init(i915);
- mutex_unlock(&i915->display.hdcp.hdcp_mutex);
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ display->hdcp.arbiter = data;
+ display->hdcp.arbiter->hdcp_dev = display->drm->dev;
+ display->hdcp.arbiter->ops = &gsc_hdcp_ops;
+ ret = intel_hdcp_gsc_hdcp2_init(display);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return ret;
}
-void intel_hdcp_gsc_fini(struct drm_i915_private *i915)
+void intel_hdcp_gsc_fini(struct intel_display *display)
{
- intel_hdcp_gsc_free_message(i915);
- kfree(i915->display.hdcp.arbiter);
+ intel_hdcp_gsc_free_message(display);
+ kfree(display->hdcp.arbiter);
}
static int intel_gsc_send_sync(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
index 5f610df61cc9..5695a5e4f609 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
@@ -10,14 +10,15 @@
#include <linux/types.h>
struct drm_i915_private;
+struct intel_display;
struct intel_hdcp_gsc_message;
-bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915);
+bool intel_hdcp_gsc_cs_required(struct intel_display *display);
ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
size_t msg_in_len, u8 *msg_out,
size_t msg_out_len);
-int intel_hdcp_gsc_init(struct drm_i915_private *i915);
-void intel_hdcp_gsc_fini(struct drm_i915_private *i915);
-bool intel_hdcp_gsc_check_status(struct drm_i915_private *i915);
+int intel_hdcp_gsc_init(struct intel_display *display);
+void intel_hdcp_gsc_fini(struct intel_display *display);
+bool intel_hdcp_gsc_check_status(struct intel_display *display);
#endif /* __INTEL_HDCP_GCS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
index 35bdb532bbb3..129104fa9b16 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
@@ -46,12 +46,12 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
(u8 *)&session_init_out,
sizeof(session_init_out));
if (byte < 0) {
- drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (session_init_out.header.status != FW_HDCP_STATUS_SUCCESS) {
- drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n",
+ drm_dbg_kms(display->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n",
WIRED_INITIATE_HDCP2_SESSION,
session_init_out.header.status);
return -EIO;
@@ -108,12 +108,12 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
(u8 *)&verify_rxcert_out,
sizeof(verify_rxcert_out));
if (byte < 0) {
- drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte);
+ drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte);
return byte;
}
if (verify_rxcert_out.header.status != FW_HDCP_STATUS_SUCCESS) {
- drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n",
+ drm_dbg_kms(display->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n",
WIRED_VERIFY_RECEIVER_CERT,
verify_rxcert_out.header.status);
return -EIO;
@@ -171,12 +171,12 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
(u8 *)&send_hprime_out,
sizeof(send_hprime_out));
if (byte < 0) {
- drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (send_hprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
- drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n",
+ drm_dbg_kms(display->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n",
WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status);
return -EIO;
}
@@ -222,12 +222,12 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat
(u8 *)&pairing_info_out,
sizeof(pairing_info_out));
if (byte < 0) {
- drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (pairing_info_out.header.status != FW_HDCP_STATUS_SUCCESS) {
- drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. Status: 0x%X\n",
+ drm_dbg_kms(display->drm, "FW cmd 0x%08X failed. Status: 0x%X\n",
WIRED_AKE_SEND_PAIRING_INFO,
pairing_info_out.header.status);
return -EIO;
@@ -269,12 +269,12 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev,
byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&lc_init_in, sizeof(lc_init_in),
(u8 *)&lc_init_out, sizeof(lc_init_out));
if (byte < 0) {
- drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (lc_init_out.header.status != FW_HDCP_STATUS_SUCCESS) {
- drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. status: 0x%X\n",
+ drm_dbg_kms(display->drm, "FW cmd 0x%08X Failed. status: 0x%X\n",
WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status);
return -EIO;
}
@@ -323,12 +323,12 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
(u8 *)&verify_lprime_out,
sizeof(verify_lprime_out));
if (byte < 0) {
- drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (verify_lprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
- drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ drm_dbg_kms(display->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
WIRED_VALIDATE_LOCALITY,
verify_lprime_out.header.status);
return -EIO;
@@ -369,12 +369,12 @@ int intel_hdcp_gsc_get_session_key(struct device *dev,
byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&get_skey_in, sizeof(get_skey_in),
(u8 *)&get_skey_out, sizeof(get_skey_out));
if (byte < 0) {
- drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (get_skey_out.header.status != FW_HDCP_STATUS_SUCCESS) {
- drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ drm_dbg_kms(display->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
WIRED_GET_SESSION_KEY, get_skey_out.header.status);
return -EIO;
}
@@ -435,12 +435,12 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
(u8 *)&verify_repeater_out,
sizeof(verify_repeater_out));
if (byte < 0) {
- drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (verify_repeater_out.header.status != FW_HDCP_STATUS_SUCCESS) {
- drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ drm_dbg_kms(display->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
WIRED_VERIFY_REPEATER,
verify_repeater_out.header.status);
return -EIO;
@@ -504,12 +504,12 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
sizeof(verify_mprime_out));
kfree(verify_mprime_in);
if (byte < 0) {
- drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (verify_mprime_out.header.status != FW_HDCP_STATUS_SUCCESS) {
- drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ drm_dbg_kms(display->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
WIRED_REPEATER_AUTH_STREAM_REQ,
verify_mprime_out.header.status);
return -EIO;
@@ -552,12 +552,12 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev,
(u8 *)&enable_auth_out,
sizeof(enable_auth_out));
if (byte < 0) {
- drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (enable_auth_out.header.status != FW_HDCP_STATUS_SUCCESS) {
- drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
+ drm_dbg_kms(display->drm, "FW cmd 0x%08X failed. status: 0x%X\n",
WIRED_ENABLE_AUTH, enable_auth_out.header.status);
return -EIO;
}
@@ -599,12 +599,12 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
(u8 *)&session_close_out,
sizeof(session_close_out));
if (byte < 0) {
- drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
+ drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
}
if (session_close_out.header.status != FW_HDCP_STATUS_SUCCESS) {
- drm_dbg_kms(&i915->drm, "Session Close Failed. status: 0x%X\n",
+ drm_dbg_kms(display->drm, "Session Close Failed. status: 0x%X\n",
session_close_out.header.status);
return -EIO;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h
index ce199d6f6232..2d597f27e931 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.h
@@ -22,11 +22,12 @@ struct hdcp2_ske_send_eks;
struct hdcp2_rep_send_receiverid_list;
struct hdcp2_rep_send_ack;
struct hdcp2_rep_stream_ready;
+struct intel_display;
ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in,
size_t msg_in_len, u8 *msg_out,
size_t msg_out_len);
-bool intel_hdcp_gsc_check_status(struct drm_i915_private *i915);
+bool intel_hdcp_gsc_check_status(struct intel_display *display);
int
intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
struct hdcp2_ake_init *ake_data);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_shim.h b/drivers/gpu/drm/i915/display/intel_hdcp_shim.h
new file mode 100644
index 000000000000..abf9ae2f4ada
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_shim.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef __INTEL_HDCP_SHIM_H__
+#define __INTEL_HDCP_SHIM_H__
+
+#include <linux/types.h>
+
+#include <drm/intel/i915_hdcp_interface.h>
+
+enum transcoder;
+struct intel_connector;
+struct intel_digital_port;
+
+enum check_link_response {
+ HDCP_LINK_PROTECTED = 0,
+ HDCP_TOPOLOGY_CHANGE,
+ HDCP_LINK_INTEGRITY_FAILURE,
+ HDCP_REAUTH_REQUEST
+};
+
+/*
+ * This structure serves as a translation layer between the generic HDCP code
+ * and the bus-specific code. What that means is that HDCP over HDMI differs
+ * from HDCP over DP, so to account for these differences, we need to
+ * communicate with the receiver through this shim.
+ *
+ * For completeness, the 2 buses differ in the following ways:
+ * - DP AUX vs. DDC
+ * HDCP registers on the receiver are set via DP AUX for DP, and
+ * they are set via DDC for HDMI.
+ * - Receiver register offsets
+ * The offsets of the registers are different for DP vs. HDMI
+ * - Receiver register masks/offsets
+ * For instance, the ready bit for the KSV fifo is in a different
+ * place on DP vs HDMI
+ * - Receiver register names
+ * Seriously. In the DP spec, the 16-bit register containing
+ * downstream information is called BINFO, on HDMI it's called
+ * BSTATUS. To confuse matters further, DP has a BSTATUS register
+ * with a completely different definition.
+ * - KSV FIFO
+ * On HDMI, the ksv fifo is read all at once, whereas on DP it must
+ * be read 3 keys at a time
+ * - Aksv output
+ * Since Aksv is hidden in hardware, there's different procedures
+ * to send it over DP AUX vs DDC
+ */
+struct intel_hdcp_shim {
+ /* Outputs the transmitter's An and Aksv values to the receiver. */
+ int (*write_an_aksv)(struct intel_digital_port *dig_port, u8 *an);
+
+ /* Reads the receiver's key selection vector */
+ int (*read_bksv)(struct intel_digital_port *dig_port, u8 *bksv);
+
+ /*
+ * Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The
+ * definitions are the same in the respective specs, but the names are
+ * different. Call it BSTATUS since that's the name the HDMI spec
+ * uses and it was there first.
+ */
+ int (*read_bstatus)(struct intel_digital_port *dig_port,
+ u8 *bstatus);
+
+ /* Determines whether a repeater is present downstream */
+ int (*repeater_present)(struct intel_digital_port *dig_port,
+ bool *repeater_present);
+
+ /* Reads the receiver's Ri' value */
+ int (*read_ri_prime)(struct intel_digital_port *dig_port, u8 *ri);
+
+ /* Determines if the receiver's KSV FIFO is ready for consumption */
+ int (*read_ksv_ready)(struct intel_digital_port *dig_port,
+ bool *ksv_ready);
+
+ /* Reads the ksv fifo for num_downstream devices */
+ int (*read_ksv_fifo)(struct intel_digital_port *dig_port,
+ int num_downstream, u8 *ksv_fifo);
+
+ /* Reads a 32-bit part of V' from the receiver */
+ int (*read_v_prime_part)(struct intel_digital_port *dig_port,
+ int i, u32 *part);
+
+ /* Enables HDCP signalling on the port */
+ int (*toggle_signalling)(struct intel_digital_port *dig_port,
+ enum transcoder cpu_transcoder,
+ bool enable);
+
+ /* Enable/Disable stream encryption on DP MST Transport Link */
+ int (*stream_encryption)(struct intel_connector *connector,
+ bool enable);
+
+ /* Ensures the link is still protected */
+ bool (*check_link)(struct intel_digital_port *dig_port,
+ struct intel_connector *connector);
+
+ /* Detects panel's hdcp capability. This is optional for HDMI. */
+ int (*hdcp_get_capability)(struct intel_digital_port *dig_port,
+ bool *hdcp_capable);
+
+ /* HDCP adaptation(DP/HDMI) required on the port */
+ enum hdcp_wired_protocol protocol;
+
+ /* Detects whether sink is HDCP2.2 capable */
+ int (*hdcp_2_2_get_capability)(struct intel_connector *connector,
+ bool *capable);
+
+ /* Write HDCP2.2 messages */
+ int (*write_2_2_msg)(struct intel_connector *connector,
+ void *buf, size_t size);
+
+ /* Read HDCP2.2 messages */
+ int (*read_2_2_msg)(struct intel_connector *connector,
+ u8 msg_id, void *buf, size_t size);
+
+ /*
+ * Implementation of DP HDCP2.2 Errata for the communication of stream
+ * type to Receivers. In DP HDCP2.2 Stream type is one of the input to
+ * the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI.
+ */
+ int (*config_stream_type)(struct intel_connector *connector,
+ bool is_repeater, u8 type);
+
+ /* Enable/Disable HDCP 2.2 stream encryption on DP MST Transport Link */
+ int (*stream_2_2_encryption)(struct intel_connector *connector,
+ bool enable);
+
+ /* HDCP2.2 Link Integrity Check */
+ int (*check_2_2_link)(struct intel_digital_port *dig_port,
+ struct intel_connector *connector);
+
+ /* HDCP remote sink cap */
+ int (*get_remote_hdcp_capability)(struct intel_connector *connector,
+ bool *hdcp_capable, bool *hdcp2_capable);
+};
+
+#endif /* __INTEL_HDCP_SHIM_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index cd9ee171e0df..ed29dd0ccef0 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -38,8 +38,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
#include <drm/intel/intel_lpe_audio.h>
+#include <media/cec-notifier.h>
+
#include "g4x_hdmi.h"
#include "i915_drv.h"
#include "i915_reg.h"
@@ -55,9 +58,11 @@
#include "intel_gmbus.h"
#include "intel_hdcp.h"
#include "intel_hdcp_regs.h"
+#include "intel_hdcp_shim.h"
#include "intel_hdmi.h"
#include "intel_lspcon.h"
#include "intel_panel.h"
+#include "intel_pfit.h"
#include "intel_snps_phy.h"
static void
@@ -1207,6 +1212,30 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
&crtc_state->infoframes.hdmi);
}
+void intel_hdmi_fastset_infoframes(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ i915_reg_t reg = HSW_TVIDEO_DIP_CTL(display,
+ crtc_state->cpu_transcoder);
+ u32 val = intel_de_read(display, reg);
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM)) == 0 &&
+ (val & VIDEO_DIP_ENABLE_DRM_GLK) == 0)
+ return;
+
+ val &= ~(VIDEO_DIP_ENABLE_DRM_GLK);
+
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
+
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_DRM,
+ &crtc_state->infoframes.drm);
+}
+
static void hsw_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
@@ -1310,8 +1339,8 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *dig_port,
memcpy(&write_buf[1], buffer, size);
msg.addr = DRM_HDCP_DDC_ADDR;
- msg.flags = 0,
- msg.len = size + 1,
+ msg.flags = 0;
+ msg.len = size + 1;
msg.buf = write_buf;
ret = i2c_transfer(ddc, &msg, 1);
@@ -1571,14 +1600,12 @@ static
bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port,
struct intel_connector *connector)
{
- struct intel_display *display = to_intel_display(dig_port);
int retry;
for (retry = 0; retry < 3; retry++)
if (intel_hdmi_hdcp_check_link_once(dig_port, connector))
return true;
- drm_err(display->drm, "Link check failed\n");
return false;
}
@@ -2053,7 +2080,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
return status;
}
- return intel_mode_valid_max_plane_size(dev_priv, mode, false);
+ return intel_mode_valid_max_plane_size(dev_priv, mode, 1);
}
bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state,
@@ -2527,10 +2554,10 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (!intel_display_device_enabled(dev_priv))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(dev_priv))
+ if (!intel_display_driver_check_access(display))
return connector->status;
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
@@ -2557,12 +2584,11 @@ static void
intel_hdmi_force(struct drm_connector *connector)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *i915 = to_i915(connector->dev);
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return;
intel_hdmi_unset_edid(connector);
@@ -2913,7 +2939,6 @@ static struct intel_encoder *
get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_encoder *other;
for_each_intel_encoder(display->drm, other) {
@@ -2927,7 +2952,7 @@ get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin)
connector = enc_to_dig_port(other)->hdmi.attached_connector;
- if (connector && connector->base.ddc == intel_gmbus_get_adapter(i915, ddc_pin))
+ if (connector && connector->base.ddc == intel_gmbus_get_adapter(display, ddc_pin))
return other;
}
@@ -2937,7 +2962,6 @@ get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin)
static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_encoder *other;
const char *source;
u8 ddc_pin;
@@ -2950,7 +2974,7 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
source = "platform default";
}
- if (!intel_gmbus_is_valid_pin(i915, ddc_pin)) {
+ if (!intel_gmbus_is_valid_pin(display, ddc_pin)) {
drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] Invalid DDC pin %d\n",
encoder->base.base.id, encoder->base.name, ddc_pin);
@@ -3015,7 +3039,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
}
}
-void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
+bool intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector)
{
struct intel_display *display = to_intel_display(dig_port);
@@ -3023,7 +3047,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
struct intel_encoder *intel_encoder = &dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_encoder->port;
struct cec_connector_info conn_info;
u8 ddc_pin;
@@ -3033,22 +3056,22 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
intel_encoder->base.base.id, intel_encoder->base.name);
if (DISPLAY_VER(display) < 12 && drm_WARN_ON(dev, port == PORT_A))
- return;
+ return false;
if (drm_WARN(dev, dig_port->max_lanes < 4,
"Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
dig_port->max_lanes, intel_encoder->base.base.id,
intel_encoder->base.name))
- return;
+ return false;
ddc_pin = intel_hdmi_ddc_pin(intel_encoder);
if (!ddc_pin)
- return;
+ return false;
drm_connector_init_with_ddc(dev, connector,
&intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA,
- intel_gmbus_get_adapter(dev_priv, ddc_pin));
+ intel_gmbus_get_adapter(display, ddc_pin));
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
@@ -3073,7 +3096,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_hdmi->attached_connector = intel_connector;
- if (is_hdcp_supported(dev_priv, port)) {
+ if (is_hdcp_supported(display, port)) {
int ret = intel_hdcp_init(intel_connector, dig_port,
&intel_hdmi_hdcp_shim);
if (ret)
@@ -3088,6 +3111,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
&conn_info);
if (!intel_hdmi->cec_notifier)
drm_dbg_kms(display->drm, "CEC notifier get failed\n");
+
+ return true;
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 9b97623665c5..38deaeb302a2 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -22,7 +22,7 @@ struct intel_encoder;
struct intel_hdmi;
union hdmi_infoframe;
-void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
+bool intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector);
bool intel_hdmi_compute_has_hdmi_sink(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -42,6 +42,9 @@ u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
u32 intel_hdmi_infoframe_enable(unsigned int type);
void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state);
+void intel_hdmi_fastset_infoframes(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
void intel_read_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type,
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index d9ec349f3c8c..3adc791d3776 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -21,8 +21,11 @@
* IN THE SOFTWARE.
*/
+#include <linux/debugfs.h>
#include <linux/kernel.h>
+#include <drm/drm_probe_helper.h>
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_display_power.h"
@@ -810,8 +813,10 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
*/
void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
if (!HAS_DISPLAY(dev_priv) ||
- !intel_display_device_enabled(dev_priv))
+ !intel_display_device_enabled(display))
return;
WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 2c4e946d5575..476ac88087e0 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -556,6 +556,7 @@ void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir)
void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
+ struct intel_display *display = &dev_priv->display;
u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
u32 pin_mask = 0, long_mask = 0;
@@ -589,11 +590,12 @@ void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
if (pch_iir & SDE_GMBUS_ICP)
- intel_gmbus_irq_handler(dev_priv);
+ intel_gmbus_irq_handler(display);
}
void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
+ struct intel_display *display = &dev_priv->display;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
~SDE_PORTE_HOTPLUG_SPT;
u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
@@ -625,7 +627,7 @@ void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
if (pch_iir & SDE_GMBUS_CPT)
- intel_gmbus_irq_handler(dev_priv);
+ intel_gmbus_irq_handler(display);
}
void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
@@ -849,10 +851,11 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
- if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
- intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
- else
- intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
+ /*
+ * We reduce the value to 250us to be able to detect SHPD when an external display
+ * is connected. This is also expected of us as stated in DP1.4a Table 3-4.
+ */
+ intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -1060,6 +1063,10 @@ static void mtp_hpd_irq_setup(struct drm_i915_private *i915)
enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd);
hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd);
+ /*
+ * Use 250us here to align with the DP1.4a(Table 3-4) spec as to what the
+ * SHPD_FILTER_CNT value should be.
+ */
intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250);
mtp_hpd_invert(i915);
@@ -1450,7 +1457,11 @@ void intel_hpd_enable_detection(struct intel_encoder *encoder)
void intel_hpd_irq_setup(struct drm_i915_private *i915)
{
- if (i915->display.irq.display_irqs_enabled && i915->display.funcs.hotplug)
+ if ((IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
+ !i915->display.irq.vlv_display_irqs_enabled)
+ return;
+
+ if (i915->display.funcs.hotplug)
i915->display.funcs.hotplug->hpd_irq_setup(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hti.c b/drivers/gpu/drm/i915/display/intel_hti.c
index 19d1f196d9fb..fb6b84f6a81d 100644
--- a/drivers/gpu/drm/i915/display/intel_hti.c
+++ b/drivers/gpu/drm/i915/display/intel_hti.c
@@ -3,7 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
-#include "i915_drv.h"
+#include <drm/drm_device.h>
+
#include "intel_de.h"
#include "intel_display.h"
#include "intel_hti.h"
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index e7a9b860fac6..29705c159119 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -5,10 +5,9 @@
#include <drm/drm_fixed.h>
-#include "i915_drv.h"
-
#include "intel_atomic.h"
#include "intel_crtc.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_dp_mst.h"
#include "intel_dp_tunnel.h"
@@ -26,7 +25,6 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *i915 = to_i915(state->base.dev);
enum pipe pipe;
limits->force_fec_pipes = 0;
@@ -34,7 +32,7 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state,
for_each_pipe(display, pipe) {
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state,
- intel_crtc_for_pipe(i915, pipe));
+ intel_crtc_for_pipe(display, pipe));
if (state->base.duplicated && crtc_state) {
limits->max_bpp_x16[pipe] = crtc_state->max_link_bpp_x16;
diff --git a/drivers/gpu/drm/i915/display/intel_load_detect.c b/drivers/gpu/drm/i915/display/intel_load_detect.c
index b457c69dc0be..86cc03a4413c 100644
--- a/drivers/gpu/drm/i915/display/intel_load_detect.c
+++ b/drivers/gpu/drm/i915/display/intel_load_detect.c
@@ -7,9 +7,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
-#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_load_detect.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index f9db867fae89..d75dd17fad32 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -29,11 +29,12 @@
#include <drm/drm_edid.h>
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
-#include "intel_lspcon.h"
#include "intel_hdmi.h"
+#include "intel_lspcon.h"
/* LSPCON OUI Vendor ID(signatures) */
#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index fb4ed9f7855b..6ffd55c17445 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -37,6 +37,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -51,16 +52,12 @@
#include "intel_lvds.h"
#include "intel_lvds_regs.h"
#include "intel_panel.h"
+#include "intel_pfit.h"
#include "intel_pps_regs.h"
/* Private structure for the integrated LVDS support */
struct intel_lvds_pps {
- /* 100us units */
- int t1_t2;
- int t3;
- int t4;
- int t5;
- int tx;
+ struct intel_pps_delays delays;
int divider;
@@ -166,12 +163,12 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
val = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, 0));
pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val);
- pps->t1_t2 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
- pps->t5 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
+ pps->delays.power_up = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
+ pps->delays.backlight_on = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
val = intel_de_read(dev_priv, PP_OFF_DELAYS(dev_priv, 0));
- pps->t3 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
- pps->tx = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
+ pps->delays.power_down = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
+ pps->delays.backlight_off = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
val = intel_de_read(dev_priv, PP_DIVISOR(dev_priv, 0));
pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val);
@@ -184,25 +181,30 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
if (val)
val--;
/* Convert from 100ms to 100us units */
- pps->t4 = val * 1000;
+ pps->delays.power_cycle = val * 1000;
if (DISPLAY_VER(dev_priv) < 5 &&
- pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
+ pps->delays.power_up == 0 &&
+ pps->delays.backlight_on == 0 &&
+ pps->delays.power_down == 0 &&
+ pps->delays.backlight_off == 0) {
drm_dbg_kms(&dev_priv->drm,
"Panel power timings uninitialized, "
"setting defaults\n");
/* Set T2 to 40ms and T5 to 200ms in 100 usec units */
- pps->t1_t2 = 40 * 10;
- pps->t5 = 200 * 10;
+ pps->delays.power_up = 40 * 10;
+ pps->delays.backlight_on = 200 * 10;
/* Set T3 to 35ms and Tx to 200ms in 100 usec units */
- pps->t3 = 35 * 10;
- pps->tx = 200 * 10;
+ pps->delays.power_down = 35 * 10;
+ pps->delays.backlight_off = 200 * 10;
}
- drm_dbg(&dev_priv->drm, "LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
+ drm_dbg(&dev_priv->drm, "LVDS PPS:power_up %d power_down %d power_cycle %d backlight_on %d backlight_off %d "
"divider %d port %d powerdown_on_reset %d\n",
- pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
- pps->divider, pps->port, pps->powerdown_on_reset);
+ pps->delays.power_up, pps->delays.power_down,
+ pps->delays.power_cycle, pps->delays.backlight_on,
+ pps->delays.backlight_off, pps->divider,
+ pps->port, pps->powerdown_on_reset);
}
static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
@@ -219,16 +221,17 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, PP_ON_DELAYS(dev_priv, 0),
REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
- REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) |
- REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
+ REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->delays.power_up) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->delays.backlight_on));
intel_de_write(dev_priv, PP_OFF_DELAYS(dev_priv, 0),
- REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) |
- REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->delays.power_down) |
+ REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->delays.backlight_off));
intel_de_write(dev_priv, PP_DIVISOR(dev_priv, 0),
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
- REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
+ REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
+ DIV_ROUND_UP(pps->delays.power_cycle, 1000) + 1));
}
static void intel_pre_enable_lvds(struct intel_atomic_state *state,
@@ -263,7 +266,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state,
temp |= LVDS_PIPE_SEL(pipe);
}
- /* set the corresponsding LVDS_BORDER bit */
+ /* set the corresponding LVDS_BORDER bit */
temp &= ~LVDS_BORDER_ENABLE;
temp |= crtc_state->gmch_pfit.lvds_border_bits;
@@ -899,7 +902,7 @@ void intel_lvds_init(struct drm_i915_private *i915)
drm_connector_init_with_ddc(&i915->drm, &connector->base,
&intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS,
- intel_gmbus_get_adapter(i915, ddc_pin));
+ intel_gmbus_get_adapter(display, ddc_pin));
drm_encoder_init(&i915->drm, &encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS, "LVDS");
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 72694dde3c22..9a2bea19f17b 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -8,6 +8,7 @@
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_vblank.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -115,6 +116,7 @@ static void set_encoder_for_connector(struct intel_connector *connector,
static void reset_encoder_connector_state(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_pmdemand_state *pmdemand_state =
to_intel_pmdemand_state(i915->display.pmdemand.obj.state);
@@ -127,7 +129,7 @@ static void reset_encoder_connector_state(struct intel_encoder *encoder)
continue;
/* Clear the corresponding bit in pmdemand active phys mask */
- intel_pmdemand_update_phys_mask(i915, encoder,
+ intel_pmdemand_update_phys_mask(display, encoder,
pmdemand_state, false);
set_encoder_for_connector(connector, NULL);
@@ -151,6 +153,7 @@ static void reset_crtc_encoder_state(struct intel_crtc *crtc)
static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_bw_state *bw_state =
to_intel_bw_state(i915->display.bw.obj.state);
@@ -184,7 +187,7 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
- intel_pmdemand_update_port_clock(i915, pmdemand_state, pipe, 0);
+ intel_pmdemand_update_port_clock(display, pmdemand_state, pipe, 0);
}
/*
@@ -221,6 +224,7 @@ static u8 get_transcoder_pipes(struct drm_i915_private *i915,
static void get_portsync_pipes(struct intel_crtc *crtc,
u8 *master_pipe_mask, u8 *slave_pipes_mask)
{
+ struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
@@ -243,7 +247,7 @@ static void get_portsync_pipes(struct intel_crtc *crtc,
*master_pipe_mask = get_transcoder_pipes(i915, BIT(master_transcoder));
drm_WARN_ON(&i915->drm, !is_power_of_2(*master_pipe_mask));
- master_crtc = intel_crtc_for_pipe(i915, ffs(*master_pipe_mask) - 1);
+ master_crtc = intel_crtc_for_pipe(display, ffs(*master_pipe_mask) - 1);
master_crtc_state = to_intel_crtc_state(master_crtc->base.state);
*slave_pipes_mask = get_transcoder_pipes(i915, master_crtc_state->sync_mode_slaves_mask);
}
@@ -375,6 +379,7 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
static void
intel_sanitize_plane_mapping(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct intel_crtc *crtc;
if (DISPLAY_VER(i915) >= 4)
@@ -396,7 +401,7 @@ intel_sanitize_plane_mapping(struct drm_i915_private *i915)
"[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
plane->base.base.id, plane->base.name);
- plane_crtc = intel_crtc_for_pipe(i915, pipe);
+ plane_crtc = intel_crtc_for_pipe(display, pipe);
intel_plane_disable_noatomic(plane_crtc, plane);
}
}
@@ -490,8 +495,8 @@ static bool intel_sanitize_crtc(struct intel_crtc *crtc,
}
/* Disable any background color/etc. set by the BIOS */
- intel_color_commit_noarm(crtc_state);
- intel_color_commit_arm(crtc_state);
+ intel_color_commit_noarm(NULL, crtc_state);
+ intel_color_commit_arm(NULL, crtc_state);
}
if (!crtc_state->hw.active ||
@@ -579,6 +584,7 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_connector *connector;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
@@ -610,7 +616,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
encoder->base.name);
/* Clear the corresponding bit in pmdemand active phys mask */
- intel_pmdemand_update_phys_mask(i915, encoder,
+ intel_pmdemand_update_phys_mask(display, encoder,
pmdemand_state, false);
/*
@@ -662,6 +668,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
/* FIXME read out full plane state for all planes */
static void readout_plane_state(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct intel_plane *plane;
struct intel_crtc *crtc;
@@ -674,7 +681,7 @@ static void readout_plane_state(struct drm_i915_private *i915)
visible = plane->get_hw_state(plane, &pipe);
- crtc = intel_crtc_for_pipe(i915, pipe);
+ crtc = intel_crtc_for_pipe(display, pipe);
crtc_state = to_intel_crtc_state(crtc->base.state);
intel_set_plane_visible(crtc_state, plane_state, visible);
@@ -695,6 +702,7 @@ static void readout_plane_state(struct drm_i915_private *i915)
static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(i915->display.cdclk.obj.state);
struct intel_dbuf_state *dbuf_state =
@@ -743,7 +751,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
pipe = 0;
if (encoder->get_hw_state(encoder, &pipe)) {
- crtc = intel_crtc_for_pipe(i915, pipe);
+ crtc = intel_crtc_for_pipe(display, pipe);
crtc_state = to_intel_crtc_state(crtc->base.state);
encoder->base.crtc = &crtc->base;
@@ -765,11 +773,11 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
}
}
- intel_pmdemand_update_phys_mask(i915, encoder,
+ intel_pmdemand_update_phys_mask(display, encoder,
pmdemand_state,
true);
} else {
- intel_pmdemand_update_phys_mask(i915, encoder,
+ intel_pmdemand_update_phys_mask(display, encoder,
pmdemand_state,
false);
@@ -894,13 +902,13 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
cdclk_state->min_voltage_level[crtc->pipe] =
crtc_state->min_voltage_level;
- intel_pmdemand_update_port_clock(i915, pmdemand_state, pipe,
+ intel_pmdemand_update_port_clock(display, pmdemand_state, pipe,
crtc_state->port_clock);
intel_bw_crtc_update(bw_state, crtc_state);
}
- intel_pmdemand_init_pmdemand_params(i915, pmdemand_state);
+ intel_pmdemand_init_pmdemand_params(display, pmdemand_state);
}
static void
@@ -955,6 +963,7 @@ static void intel_early_display_was(struct drm_i915_private *i915)
void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
struct drm_modeset_acquire_ctx *ctx)
{
+ struct intel_display *display = &i915->display;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
intel_wakeref_t wakeref;
@@ -982,7 +991,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
drm_crtc_vblank_reset(&crtc->base);
if (crtc_state->hw.active) {
- intel_dmc_enable_pipe(i915, crtc->pipe);
+ intel_dmc_enable_pipe(display, crtc->pipe);
intel_crtc_vblank_on(crtc_state);
}
}
@@ -1018,5 +1027,5 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
- intel_power_domains_sanitize_state(i915);
+ intel_power_domains_sanitize_state(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index 3491db5cad31..bc70e72ccc2e 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -27,6 +27,7 @@ static void intel_connector_verify_state(const struct intel_crtc_state *crtc_sta
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_display *display = to_intel_display(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
@@ -35,29 +36,29 @@ static void intel_connector_verify_state(const struct intel_crtc_state *crtc_sta
if (connector->get_hw_state(connector)) {
struct intel_encoder *encoder = intel_attached_encoder(connector);
- I915_STATE_WARN(i915, !crtc_state,
- "connector enabled without attached crtc\n");
+ INTEL_DISPLAY_STATE_WARN(display, !crtc_state,
+ "connector enabled without attached crtc\n");
if (!crtc_state)
return;
- I915_STATE_WARN(i915, !crtc_state->hw.active,
- "connector is active, but attached crtc isn't\n");
+ INTEL_DISPLAY_STATE_WARN(display, !crtc_state->hw.active,
+ "connector is active, but attached crtc isn't\n");
if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
return;
- I915_STATE_WARN(i915,
- conn_state->best_encoder != &encoder->base,
- "atomic encoder doesn't match attached encoder\n");
+ INTEL_DISPLAY_STATE_WARN(display,
+ conn_state->best_encoder != &encoder->base,
+ "atomic encoder doesn't match attached encoder\n");
- I915_STATE_WARN(i915, conn_state->crtc != encoder->base.crtc,
- "attached encoder crtc differs from connector crtc\n");
+ INTEL_DISPLAY_STATE_WARN(display, conn_state->crtc != encoder->base.crtc,
+ "attached encoder crtc differs from connector crtc\n");
} else {
- I915_STATE_WARN(i915, crtc_state && crtc_state->hw.active,
- "attached crtc is active, but connector isn't\n");
- I915_STATE_WARN(i915, !crtc_state && conn_state->best_encoder,
- "best encoder set without crtc!\n");
+ INTEL_DISPLAY_STATE_WARN(display, crtc_state && crtc_state->hw.active,
+ "attached crtc is active, but connector isn't\n");
+ INTEL_DISPLAY_STATE_WARN(display, !crtc_state && conn_state->best_encoder,
+ "best encoder set without crtc!\n");
}
}
@@ -65,6 +66,7 @@ static void
verify_connector_state(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_connector *connector;
const struct drm_connector_state *new_conn_state;
int i;
@@ -81,8 +83,8 @@ verify_connector_state(struct intel_atomic_state *state,
intel_connector_verify_state(crtc_state, new_conn_state);
- I915_STATE_WARN(to_i915(connector->dev), new_conn_state->best_encoder != encoder,
- "connector's atomic encoder doesn't match legacy encoder\n");
+ INTEL_DISPLAY_STATE_WARN(display, new_conn_state->best_encoder != encoder,
+ "connector's atomic encoder doesn't match legacy encoder\n");
}
}
@@ -109,6 +111,7 @@ static void intel_pipe_config_sanity_check(const struct intel_crtc_state *crtc_s
static void
verify_encoder_state(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_encoder *encoder;
struct drm_connector *connector;
@@ -134,25 +137,25 @@ verify_encoder_state(struct intel_atomic_state *state)
found = true;
enabled = true;
- I915_STATE_WARN(i915,
- new_conn_state->crtc != encoder->base.crtc,
- "connector's crtc doesn't match encoder crtc\n");
+ INTEL_DISPLAY_STATE_WARN(display,
+ new_conn_state->crtc != encoder->base.crtc,
+ "connector's crtc doesn't match encoder crtc\n");
}
if (!found)
continue;
- I915_STATE_WARN(i915, !!encoder->base.crtc != enabled,
- "encoder's enabled state mismatch (expected %i, found %i)\n",
- !!encoder->base.crtc, enabled);
+ INTEL_DISPLAY_STATE_WARN(display, !!encoder->base.crtc != enabled,
+ "encoder's enabled state mismatch (expected %i, found %i)\n",
+ !!encoder->base.crtc, enabled);
if (!encoder->base.crtc) {
bool active;
active = encoder->get_hw_state(encoder, &pipe);
- I915_STATE_WARN(i915, active,
- "encoder detached but still enabled on pipe %c.\n",
- pipe_name(pipe));
+ INTEL_DISPLAY_STATE_WARN(display, active,
+ "encoder detached but still enabled on pipe %c.\n",
+ pipe_name(pipe));
}
}
}
@@ -161,8 +164,8 @@ static void
verify_crtc_state(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_display *display = to_intel_display(state);
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_crtc_state *sw_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc_state *hw_crtc_state;
@@ -173,7 +176,7 @@ verify_crtc_state(struct intel_atomic_state *state,
if (!hw_crtc_state)
return;
- drm_dbg_kms(&i915->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
crtc->base.name);
hw_crtc_state->hw.enable = sw_crtc_state->hw.enable;
@@ -184,30 +187,30 @@ verify_crtc_state(struct intel_atomic_state *state,
if (IS_I830(i915) && hw_crtc_state->hw.active)
hw_crtc_state->hw.active = sw_crtc_state->hw.active;
- I915_STATE_WARN(i915,
- sw_crtc_state->hw.active != hw_crtc_state->hw.active,
- "crtc active state doesn't match with hw state (expected %i, found %i)\n",
- sw_crtc_state->hw.active, hw_crtc_state->hw.active);
+ INTEL_DISPLAY_STATE_WARN(display,
+ sw_crtc_state->hw.active != hw_crtc_state->hw.active,
+ "crtc active state doesn't match with hw state (expected %i, found %i)\n",
+ sw_crtc_state->hw.active, hw_crtc_state->hw.active);
- I915_STATE_WARN(i915, crtc->active != sw_crtc_state->hw.active,
- "transitional active state does not match atomic hw state (expected %i, found %i)\n",
- sw_crtc_state->hw.active, crtc->active);
+ INTEL_DISPLAY_STATE_WARN(display, crtc->active != sw_crtc_state->hw.active,
+ "transitional active state does not match atomic hw state (expected %i, found %i)\n",
+ sw_crtc_state->hw.active, crtc->active);
primary_crtc = intel_primary_crtc(sw_crtc_state);
- for_each_encoder_on_crtc(dev, &primary_crtc->base, encoder) {
+ for_each_encoder_on_crtc(display->drm, &primary_crtc->base, encoder) {
enum pipe pipe;
bool active;
active = encoder->get_hw_state(encoder, &pipe);
- I915_STATE_WARN(i915, active != sw_crtc_state->hw.active,
- "[ENCODER:%i] active %i with crtc active %i\n",
- encoder->base.base.id, active,
- sw_crtc_state->hw.active);
+ INTEL_DISPLAY_STATE_WARN(display, active != sw_crtc_state->hw.active,
+ "[ENCODER:%i] active %i with crtc active %i\n",
+ encoder->base.base.id, active,
+ sw_crtc_state->hw.active);
- I915_STATE_WARN(i915, active && primary_crtc->pipe != pipe,
- "Encoder connected to wrong pipe %c\n",
- pipe_name(pipe));
+ INTEL_DISPLAY_STATE_WARN(display, active && primary_crtc->pipe != pipe,
+ "Encoder connected to wrong pipe %c\n",
+ pipe_name(pipe));
if (active)
intel_encoder_get_config(encoder, hw_crtc_state);
@@ -220,7 +223,7 @@ verify_crtc_state(struct intel_atomic_state *state,
if (!intel_pipe_config_compare(sw_crtc_state,
hw_crtc_state, false)) {
- I915_STATE_WARN(i915, 1, "pipe state doesn't match!\n");
+ INTEL_DISPLAY_STATE_WARN(display, 1, "pipe state doesn't match!\n");
intel_crtc_state_dump(hw_crtc_state, NULL, "hw state");
intel_crtc_state_dump(sw_crtc_state, NULL, "sw state");
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index ff11836459de..0eaa6cd6fe80 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -26,6 +26,7 @@
*/
#include <linux/acpi.h>
+#include <linux/debugfs.h>
#include <linux/dmi.h>
#include <acpi/video.h>
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 06b1122ec13e..ca30fff61876 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -183,7 +183,7 @@ struct overlay_registers {
};
struct intel_overlay {
- struct drm_i915_private *i915;
+ struct intel_display *display;
struct intel_context *context;
struct intel_crtc *crtc;
struct i915_vma *vma;
@@ -205,17 +205,17 @@ struct intel_overlay {
void (*flip_complete)(struct intel_overlay *ovl);
};
-static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
+static void i830_overlay_clock_gating(struct intel_display *display,
bool enable)
{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
u8 val;
/* WA_OVERLAY_CLKGATE:alm */
if (enable)
- intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), 0);
+ intel_de_write(display, DSPCLK_GATE_D(display), 0);
else
- intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv),
+ intel_de_write(display, DSPCLK_GATE_D(display),
OVRUNIT_CLOCK_GATE_DISABLE);
/* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
@@ -253,11 +253,11 @@ alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
/* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_display *display = overlay->display;
struct i915_request *rq;
u32 *cs;
- drm_WARN_ON(&dev_priv->drm, overlay->active);
+ drm_WARN_ON(display->drm, overlay->active);
rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
@@ -271,8 +271,8 @@ static int intel_overlay_on(struct intel_overlay *overlay)
overlay->active = true;
- if (IS_I830(dev_priv))
- i830_overlay_clock_gating(dev_priv, false);
+ if (display->platform.i830)
+ i830_overlay_clock_gating(display, false);
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
*cs++ = overlay->flip_addr | OFC_UPDATE;
@@ -288,13 +288,15 @@ static int intel_overlay_on(struct intel_overlay *overlay)
static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
struct i915_vma *vma)
{
+ struct intel_display *display = overlay->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe = overlay->crtc->pipe;
struct intel_frontbuffer *frontbuffer = NULL;
- drm_WARN_ON(&overlay->i915->drm, overlay->old_vma);
+ drm_WARN_ON(display->drm, overlay->old_vma);
if (vma)
- frontbuffer = intel_frontbuffer_get(vma->obj);
+ frontbuffer = intel_frontbuffer_get(intel_bo_to_drm_bo(vma->obj));
intel_frontbuffer_track(overlay->frontbuffer, frontbuffer,
INTEL_FRONTBUFFER_OVERLAY(pipe));
@@ -303,8 +305,7 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
intel_frontbuffer_put(overlay->frontbuffer);
overlay->frontbuffer = frontbuffer;
- intel_frontbuffer_flip_prepare(overlay->i915,
- INTEL_FRONTBUFFER_OVERLAY(pipe));
+ intel_frontbuffer_flip_prepare(i915, INTEL_FRONTBUFFER_OVERLAY(pipe));
overlay->old_vma = overlay->vma;
if (vma)
@@ -318,20 +319,20 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
struct i915_vma *vma,
bool load_polyphase_filter)
{
- struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_display *display = overlay->display;
struct i915_request *rq;
u32 flip_addr = overlay->flip_addr;
u32 tmp, *cs;
- drm_WARN_ON(&dev_priv->drm, !overlay->active);
+ drm_WARN_ON(display->drm, !overlay->active);
if (load_polyphase_filter)
flip_addr |= OFC_UPDATE;
/* check for underruns */
- tmp = intel_de_read(dev_priv, DOVSTA);
+ tmp = intel_de_read(display, DOVSTA);
if (tmp & (1 << 17))
- drm_dbg(&dev_priv->drm, "overlay underrun, DOVSTA: %x\n", tmp);
+ drm_dbg(display->drm, "overlay underrun, DOVSTA: %x\n", tmp);
rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
@@ -355,14 +356,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
{
+ struct intel_display *display = overlay->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_vma *vma;
vma = fetch_and_zero(&overlay->old_vma);
- if (drm_WARN_ON(&overlay->i915->drm, !vma))
+ if (drm_WARN_ON(display->drm, !vma))
return;
- intel_frontbuffer_flip_complete(overlay->i915,
- INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+ intel_frontbuffer_flip_complete(i915, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
i915_vma_unpin(vma);
i915_vma_put(vma);
@@ -376,7 +378,7 @@ intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
static void intel_overlay_off_tail(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_display *display = overlay->display;
intel_overlay_release_old_vma(overlay);
@@ -384,8 +386,8 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
overlay->crtc = NULL;
overlay->active = false;
- if (IS_I830(dev_priv))
- i830_overlay_clock_gating(dev_priv, true);
+ if (display->platform.i830)
+ i830_overlay_clock_gating(display, true);
}
static void intel_overlay_last_flip_retire(struct i915_active *active)
@@ -400,10 +402,11 @@ static void intel_overlay_last_flip_retire(struct i915_active *active)
/* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay)
{
+ struct intel_display *display = overlay->display;
struct i915_request *rq;
u32 *cs, flip_addr = overlay->flip_addr;
- drm_WARN_ON(&overlay->i915->drm, !overlay->active);
+ drm_WARN_ON(display->drm, !overlay->active);
/* According to intel docs the overlay hw may hang (when switching
* off) without loading the filter coeffs. It is however unclear whether
@@ -452,7 +455,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
*/
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_display *display = overlay->display;
struct i915_request *rq;
u32 *cs;
@@ -463,7 +466,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (!overlay->old_vma)
return 0;
- if (!(intel_de_read(dev_priv, GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
+ if (!(intel_de_read(display, GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
intel_overlay_release_old_vid_tail(overlay);
return 0;
}
@@ -487,9 +490,9 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
return i915_active_wait(&overlay->last_flip);
}
-void intel_overlay_reset(struct drm_i915_private *dev_priv)
+void intel_overlay_reset(struct intel_display *display)
{
- struct intel_overlay *overlay = dev_priv->display.overlay;
+ struct intel_overlay *overlay = display->overlay;
if (!overlay)
return;
@@ -550,11 +553,11 @@ static int uv_vsubsampling(u32 format)
}
}
-static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
+static u32 calc_swidthsw(struct intel_display *display, u32 offset, u32 width)
{
u32 sw;
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
sw = ALIGN((offset & 31) + width, 32);
else
sw = ALIGN((offset & 63) + width, 64);
@@ -789,16 +792,17 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct drm_i915_gem_object *new_bo,
struct drm_intel_overlay_put_image *params)
{
+ struct intel_display *display = overlay->display;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct overlay_registers __iomem *regs = overlay->regs;
- struct drm_i915_private *dev_priv = overlay->i915;
u32 swidth, swidthsw, sheight, ostride;
enum pipe pipe = overlay->crtc->pipe;
bool scale_changed = false;
struct i915_vma *vma;
int ret, tmp_width;
- drm_WARN_ON(&dev_priv->drm,
- !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_WARN_ON(display->drm,
+ !drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
ret = intel_overlay_release_old_vid(overlay);
if (ret != 0)
@@ -824,7 +828,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
oconfig |= OCONF_CC_OUT_8BIT;
if (crtc_state->gamma_enable)
oconfig |= OCONF_GAMMA2_ENABLE;
- if (DISPLAY_VER(dev_priv) == 4)
+ if (DISPLAY_VER(display) == 4)
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
@@ -845,7 +849,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
tmp_width = params->src_width;
swidth = params->src_width;
- swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
+ swidthsw = calc_swidthsw(display, params->offset_Y, tmp_width);
sheight = params->src_height;
iowrite32(i915_ggtt_offset(vma) + params->offset_Y, &regs->OBUF_0Y);
ostride = params->stride_Y;
@@ -858,9 +862,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
swidth |= (params->src_width / uv_hscale) << 16;
sheight |= (params->src_height / uv_vscale) << 16;
- tmp_U = calc_swidthsw(dev_priv, params->offset_U,
+ tmp_U = calc_swidthsw(display, params->offset_U,
params->src_width / uv_hscale);
- tmp_V = calc_swidthsw(dev_priv, params->offset_V,
+ tmp_V = calc_swidthsw(display, params->offset_V,
params->src_width / uv_hscale);
swidthsw |= max(tmp_U, tmp_V) << 16;
@@ -899,11 +903,11 @@ out_pin_section:
int intel_overlay_switch_off(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_display *display = overlay->display;
int ret;
- drm_WARN_ON(&dev_priv->drm,
- !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_WARN_ON(display->drm,
+ !drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
@@ -936,26 +940,24 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
+ struct intel_display *display = overlay->display;
u32 ratio;
/* XXX: This is not the same logic as in the xorg driver, but more in
* line with the intel documentation for the i965
*/
- if (DISPLAY_VER(dev_priv) >= 4) {
- u32 tmp = intel_de_read(dev_priv, PFIT_PGM_RATIOS(dev_priv));
+ if (DISPLAY_VER(display) >= 4) {
+ u32 tmp = intel_de_read(display, PFIT_PGM_RATIOS(display));
/* on i965 use the PGM reg to read out the autoscaler values */
ratio = REG_FIELD_GET(PFIT_VERT_SCALE_MASK_965, tmp);
} else {
u32 tmp;
- if (intel_de_read(dev_priv, PFIT_CONTROL(dev_priv)) & PFIT_VERT_AUTO_SCALE)
- tmp = intel_de_read(dev_priv,
- PFIT_AUTO_RATIOS(dev_priv));
+ if (intel_de_read(display, PFIT_CONTROL(display)) & PFIT_VERT_AUTO_SCALE)
+ tmp = intel_de_read(display, PFIT_AUTO_RATIOS(display));
else
- tmp = intel_de_read(dev_priv,
- PFIT_PGM_RATIOS(dev_priv));
+ tmp = intel_de_read(display, PFIT_PGM_RATIOS(display));
ratio = REG_FIELD_GET(PFIT_VERT_SCALE_MASK, tmp);
}
@@ -1000,7 +1002,7 @@ static int check_overlay_scaling(struct drm_intel_overlay_put_image *rec)
return 0;
}
-static int check_overlay_src(struct drm_i915_private *dev_priv,
+static int check_overlay_src(struct intel_display *display,
struct drm_intel_overlay_put_image *rec,
struct drm_i915_gem_object *new_bo)
{
@@ -1011,7 +1013,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
u32 tmp;
/* check src dimensions */
- if (IS_I845G(dev_priv) || IS_I830(dev_priv)) {
+ if (display->platform.i845g || display->platform.i830) {
if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
return -EINVAL;
@@ -1063,14 +1065,14 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
return -EINVAL;
/* stride checking */
- if (IS_I830(dev_priv) || IS_I845G(dev_priv))
+ if (display->platform.i830 || display->platform.i845g)
stride_mask = 255;
else
stride_mask = 63;
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
return -EINVAL;
- if (DISPLAY_VER(dev_priv) == 4 && rec->stride_Y < 512)
+ if (DISPLAY_VER(display) == 4 && rec->stride_Y < 512)
return -EINVAL;
tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@@ -1114,17 +1116,17 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct intel_display *display = to_intel_display(dev);
struct drm_intel_overlay_put_image *params = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_overlay *overlay;
struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
struct drm_i915_gem_object *new_bo;
int ret;
- overlay = dev_priv->display.overlay;
+ overlay = display->overlay;
if (!overlay) {
- drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
+ drm_dbg(display->drm, "userspace bug: no overlay\n");
return -ENODEV;
}
@@ -1148,7 +1150,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
if (i915_gem_object_is_tiled(new_bo)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"buffer used for overlay image can not be tiled\n");
ret = -EINVAL;
goto out_unlock;
@@ -1197,7 +1199,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = check_overlay_src(dev_priv, params, new_bo);
+ ret = check_overlay_src(display, params, new_bo);
if (ret != 0)
goto out_unlock;
@@ -1277,14 +1279,14 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs)
int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct intel_display *display = to_intel_display(dev);
struct drm_intel_overlay_attrs *attrs = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_overlay *overlay;
int ret;
- overlay = dev_priv->display.overlay;
+ overlay = display->overlay;
if (!overlay) {
- drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
+ drm_dbg(display->drm, "userspace bug: no overlay\n");
return -ENODEV;
}
@@ -1297,13 +1299,13 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
attrs->contrast = overlay->contrast;
attrs->saturation = overlay->saturation;
- if (DISPLAY_VER(dev_priv) != 2) {
- attrs->gamma0 = intel_de_read(dev_priv, OGAMC0);
- attrs->gamma1 = intel_de_read(dev_priv, OGAMC1);
- attrs->gamma2 = intel_de_read(dev_priv, OGAMC2);
- attrs->gamma3 = intel_de_read(dev_priv, OGAMC3);
- attrs->gamma4 = intel_de_read(dev_priv, OGAMC4);
- attrs->gamma5 = intel_de_read(dev_priv, OGAMC5);
+ if (DISPLAY_VER(display) != 2) {
+ attrs->gamma0 = intel_de_read(display, OGAMC0);
+ attrs->gamma1 = intel_de_read(display, OGAMC1);
+ attrs->gamma2 = intel_de_read(display, OGAMC2);
+ attrs->gamma3 = intel_de_read(display, OGAMC3);
+ attrs->gamma4 = intel_de_read(display, OGAMC4);
+ attrs->gamma5 = intel_de_read(display, OGAMC5);
}
} else {
if (attrs->brightness < -128 || attrs->brightness > 127)
@@ -1321,7 +1323,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
update_reg_attrs(overlay, overlay->regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
- if (DISPLAY_VER(dev_priv) == 2)
+ if (DISPLAY_VER(display) == 2)
goto out_unlock;
if (overlay->active) {
@@ -1333,12 +1335,12 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
if (ret)
goto out_unlock;
- intel_de_write(dev_priv, OGAMC0, attrs->gamma0);
- intel_de_write(dev_priv, OGAMC1, attrs->gamma1);
- intel_de_write(dev_priv, OGAMC2, attrs->gamma2);
- intel_de_write(dev_priv, OGAMC3, attrs->gamma3);
- intel_de_write(dev_priv, OGAMC4, attrs->gamma4);
- intel_de_write(dev_priv, OGAMC5, attrs->gamma5);
+ intel_de_write(display, OGAMC0, attrs->gamma0);
+ intel_de_write(display, OGAMC1, attrs->gamma1);
+ intel_de_write(display, OGAMC2, attrs->gamma2);
+ intel_de_write(display, OGAMC3, attrs->gamma3);
+ intel_de_write(display, OGAMC4, attrs->gamma4);
+ intel_de_write(display, OGAMC5, attrs->gamma5);
}
}
overlay->color_key_enabled = (attrs->flags & I915_OVERLAY_DISABLE_DEST_COLORKEY) == 0;
@@ -1352,12 +1354,13 @@ out_unlock:
static int get_registers(struct intel_overlay *overlay, bool use_phys)
{
- struct drm_i915_private *i915 = overlay->i915;
+ struct intel_display *display = overlay->display;
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_i915_gem_object *obj = ERR_PTR(-ENODEV);
struct i915_vma *vma;
int err;
- if (!IS_METEORLAKE(i915)) /* Wa_22018444074 */
+ if (!display->platform.meteorlake) /* Wa_22018444074 */
obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
@@ -1390,13 +1393,14 @@ err_put_bo:
return err;
}
-void intel_overlay_setup(struct drm_i915_private *dev_priv)
+void intel_overlay_setup(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_overlay *overlay;
struct intel_engine_cs *engine;
int ret;
- if (!HAS_OVERLAY(dev_priv))
+ if (!HAS_OVERLAY(display))
return;
engine = to_gt(dev_priv)->engine[RCS0];
@@ -1407,7 +1411,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
if (!overlay)
return;
- overlay->i915 = dev_priv;
+ overlay->display = display;
overlay->context = engine->kernel_context;
overlay->color_key = 0x0101fe;
overlay->color_key_enabled = true;
@@ -1418,7 +1422,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
i915_active_init(&overlay->last_flip,
NULL, intel_overlay_last_flip_retire, 0);
- ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
+ ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(display));
if (ret)
goto out_free;
@@ -1426,19 +1430,24 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
update_polyphase_filter(overlay->regs);
update_reg_attrs(overlay, overlay->regs);
- dev_priv->display.overlay = overlay;
- drm_info(&dev_priv->drm, "Initialized overlay support.\n");
+ display->overlay = overlay;
+ drm_info(display->drm, "Initialized overlay support.\n");
return;
out_free:
kfree(overlay);
}
-void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
+bool intel_overlay_available(struct intel_display *display)
+{
+ return display->overlay;
+}
+
+void intel_overlay_cleanup(struct intel_display *display)
{
struct intel_overlay *overlay;
- overlay = fetch_and_zero(&dev_priv->display.overlay);
+ overlay = fetch_and_zero(&display->overlay);
if (!overlay)
return;
@@ -1447,7 +1456,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
* Furthermore modesetting teardown happens beforehand so the
* hardware should be off already.
*/
- drm_WARN_ON(&dev_priv->drm, overlay->active);
+ drm_WARN_ON(display->drm, overlay->active);
i915_gem_object_put(overlay->reg_bo);
i915_active_fini(&overlay->last_flip);
@@ -1457,18 +1466,18 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-struct intel_overlay_error_state {
+struct intel_overlay_snapshot {
struct overlay_registers regs;
unsigned long base;
u32 dovsta;
u32 isr;
};
-struct intel_overlay_error_state *
-intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
+struct intel_overlay_snapshot *
+intel_overlay_snapshot_capture(struct intel_display *display)
{
- struct intel_overlay *overlay = dev_priv->display.overlay;
- struct intel_overlay_error_state *error;
+ struct intel_overlay *overlay = display->overlay;
+ struct intel_overlay_snapshot *error;
if (!overlay || !overlay->active)
return NULL;
@@ -1477,8 +1486,8 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
if (error == NULL)
return NULL;
- error->dovsta = intel_de_read(dev_priv, DOVSTA);
- error->isr = intel_de_read(dev_priv, GEN2_ISR);
+ error->dovsta = intel_de_read(display, DOVSTA);
+ error->isr = intel_de_read(display, GEN2_ISR);
error->base = overlay->flip_addr;
memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
@@ -1487,9 +1496,12 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
}
void
-intel_overlay_print_error_state(struct drm_printer *p,
- struct intel_overlay_error_state *error)
+intel_overlay_snapshot_print(const struct intel_overlay_snapshot *error,
+ struct drm_printer *p)
{
+ if (!error)
+ return;
+
drm_printf(p, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
error->dovsta, error->isr);
drm_printf(p, " Register file at 0x%08lx:\n", error->base);
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.h b/drivers/gpu/drm/i915/display/intel_overlay.h
index f28a09c062d0..45a42fce754e 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.h
+++ b/drivers/gpu/drm/i915/display/intel_overlay.h
@@ -6,31 +6,35 @@
#ifndef __INTEL_OVERLAY_H__
#define __INTEL_OVERLAY_H__
+#include <linux/types.h>
+
struct drm_device;
struct drm_file;
struct drm_i915_private;
struct drm_printer;
+struct intel_display;
struct intel_overlay;
-struct intel_overlay_error_state;
+struct intel_overlay_snapshot;
#ifdef I915
-void intel_overlay_setup(struct drm_i915_private *dev_priv);
-void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
+void intel_overlay_setup(struct intel_display *display);
+bool intel_overlay_available(struct intel_display *display);
+void intel_overlay_cleanup(struct intel_display *display);
int intel_overlay_switch_off(struct intel_overlay *overlay);
int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void intel_overlay_reset(struct drm_i915_private *dev_priv);
-struct intel_overlay_error_state *
-intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
-void intel_overlay_print_error_state(struct drm_printer *p,
- struct intel_overlay_error_state *error);
+void intel_overlay_reset(struct intel_display *display);
#else
-static inline void intel_overlay_setup(struct drm_i915_private *dev_priv)
+static inline void intel_overlay_setup(struct intel_display *display)
+{
+}
+static inline bool intel_overlay_available(struct intel_display *display)
{
+ return false;
}
-static inline void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
+static inline void intel_overlay_cleanup(struct intel_display *display)
{
}
static inline int intel_overlay_switch_off(struct intel_overlay *overlay)
@@ -38,7 +42,7 @@ static inline int intel_overlay_switch_off(struct intel_overlay *overlay)
return 0;
}
static inline int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
return 0;
}
@@ -47,16 +51,24 @@ static inline int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
{
return 0;
}
-static inline void intel_overlay_reset(struct drm_i915_private *dev_priv)
+static inline void intel_overlay_reset(struct intel_display *display)
{
}
-static inline struct intel_overlay_error_state *
-intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) && defined(I915)
+struct intel_overlay_snapshot *
+intel_overlay_snapshot_capture(struct intel_display *display);
+void intel_overlay_snapshot_print(const struct intel_overlay_snapshot *error,
+ struct drm_printer *p);
+#else
+static inline struct intel_overlay_snapshot *
+intel_overlay_snapshot_capture(struct intel_display *display)
{
return NULL;
}
-static inline void intel_overlay_print_error_state(struct drm_printer *p,
- struct intel_overlay_error_state *error)
+static inline void intel_overlay_snapshot_print(const struct intel_overlay_snapshot *error,
+ struct drm_printer *p)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 71454ddef20f..4e6c5592c7ae 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -33,22 +33,18 @@
#include <drm/drm_edid.h>
-#include "i915_reg.h"
#include "intel_backlight.h"
#include "intel_connector.h"
-#include "intel_de.h"
+#include "intel_display_core.h"
#include "intel_display_driver.h"
#include "intel_display_types.h"
#include "intel_drrs.h"
-#include "intel_lvds_regs.h"
#include "intel_panel.h"
#include "intel_quirks.h"
#include "intel_vrr.h"
-bool intel_panel_use_ssc(struct drm_i915_private *i915)
+bool intel_panel_use_ssc(struct intel_display *display)
{
- struct intel_display *display = &i915->display;
-
if (display->params.panel_use_ssc >= 0)
return display->params.panel_use_ssc != 0;
return display->vbt.lvds_use_ssc &&
@@ -252,7 +248,7 @@ int intel_panel_compute_config(struct intel_connector *connector,
static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
const struct drm_display_mode *preferred_mode =
intel_panel_preferred_fixed_mode(connector);
struct drm_display_mode *mode, *next;
@@ -261,7 +257,7 @@ static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connect
if (!is_alt_fixed_mode(mode, preferred_mode))
continue;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] using alternate EDID fixed mode: " DRM_MODE_FMT "\n",
connector->base.base.id, connector->base.name,
DRM_MODE_ARG(mode));
@@ -272,7 +268,7 @@ static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connect
static void intel_panel_add_edid_preferred_mode(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_display_mode *scan, *fixed_mode = NULL;
if (list_empty(&connector->base.probed_modes))
@@ -290,7 +286,7 @@ static void intel_panel_add_edid_preferred_mode(struct intel_connector *connecto
fixed_mode = list_first_entry(&connector->base.probed_modes,
typeof(*fixed_mode), head);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] using %s EDID fixed mode: " DRM_MODE_FMT "\n",
connector->base.base.id, connector->base.name,
fixed_mode->type & DRM_MODE_TYPE_PREFERRED ? "preferred" : "first",
@@ -303,16 +299,16 @@ static void intel_panel_add_edid_preferred_mode(struct intel_connector *connecto
static void intel_panel_destroy_probed_modes(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_display_mode *mode, *next;
list_for_each_entry_safe(mode, next, &connector->base.probed_modes, head) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] not using EDID mode: " DRM_MODE_FMT "\n",
connector->base.base.id, connector->base.name,
DRM_MODE_ARG(mode));
list_del(&mode->head);
- drm_mode_destroy(&i915->drm, mode);
+ drm_mode_destroy(display->drm, mode);
}
}
@@ -329,7 +325,7 @@ static void intel_panel_add_fixed_mode(struct intel_connector *connector,
struct drm_display_mode *fixed_mode,
const char *type)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct drm_display_info *info = &connector->base.display_info;
if (!fixed_mode)
@@ -340,7 +336,7 @@ static void intel_panel_add_fixed_mode(struct intel_connector *connector,
info->width_mm = fixed_mode->width_mm;
info->height_mm = fixed_mode->height_mm;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] using %s fixed mode: " DRM_MODE_FMT "\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] using %s fixed mode: " DRM_MODE_FMT "\n",
connector->base.base.id, connector->base.name, type,
DRM_MODE_ARG(fixed_mode));
@@ -349,7 +345,7 @@ static void intel_panel_add_fixed_mode(struct intel_connector *connector,
void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
const struct drm_display_mode *mode;
mode = connector->panel.vbt.lfp_vbt_mode;
@@ -357,13 +353,13 @@ void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector)
return;
intel_panel_add_fixed_mode(connector,
- drm_mode_duplicate(&i915->drm, mode),
+ drm_mode_duplicate(display->drm, mode),
"VBT LFP");
}
void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
const struct drm_display_mode *mode;
mode = connector->panel.vbt.sdvo_lvds_vbt_mode;
@@ -371,7 +367,7 @@ void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector)
return;
intel_panel_add_fixed_mode(connector,
- drm_mode_duplicate(&i915->drm, mode),
+ drm_mode_duplicate(display->drm, mode),
"VBT SDVO");
}
@@ -383,310 +379,15 @@ void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector,
"current (BIOS)");
}
-/* adjusted_mode has been preset to be the panel's fixed mode */
-static int pch_panel_fitting(struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
-{
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
- int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
- int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
- int x, y, width, height;
-
- /* Native modes don't need fitting */
- if (adjusted_mode->crtc_hdisplay == pipe_src_w &&
- adjusted_mode->crtc_vdisplay == pipe_src_h &&
- crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
- return 0;
-
- switch (conn_state->scaling_mode) {
- case DRM_MODE_SCALE_CENTER:
- width = pipe_src_w;
- height = pipe_src_h;
- x = (adjusted_mode->crtc_hdisplay - width + 1)/2;
- y = (adjusted_mode->crtc_vdisplay - height + 1)/2;
- break;
-
- case DRM_MODE_SCALE_ASPECT:
- /* Scale but preserve the aspect ratio */
- {
- u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h;
- u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay;
- if (scaled_width > scaled_height) { /* pillar */
- width = scaled_height / pipe_src_h;
- if (width & 1)
- width++;
- x = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
- y = 0;
- height = adjusted_mode->crtc_vdisplay;
- } else if (scaled_width < scaled_height) { /* letter */
- height = scaled_width / pipe_src_w;
- if (height & 1)
- height++;
- y = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
- x = 0;
- width = adjusted_mode->crtc_hdisplay;
- } else {
- x = y = 0;
- width = adjusted_mode->crtc_hdisplay;
- height = adjusted_mode->crtc_vdisplay;
- }
- }
- break;
-
- case DRM_MODE_SCALE_NONE:
- WARN_ON(adjusted_mode->crtc_hdisplay != pipe_src_w);
- WARN_ON(adjusted_mode->crtc_vdisplay != pipe_src_h);
- fallthrough;
- case DRM_MODE_SCALE_FULLSCREEN:
- x = y = 0;
- width = adjusted_mode->crtc_hdisplay;
- height = adjusted_mode->crtc_vdisplay;
- break;
-
- default:
- MISSING_CASE(conn_state->scaling_mode);
- return -EINVAL;
- }
-
- drm_rect_init(&crtc_state->pch_pfit.dst,
- x, y, width, height);
- crtc_state->pch_pfit.enabled = true;
-
- return 0;
-}
-
-static void
-centre_horizontally(struct drm_display_mode *adjusted_mode,
- int width)
-{
- u32 border, sync_pos, blank_width, sync_width;
-
- /* keep the hsync and hblank widths constant */
- sync_width = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
- blank_width = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
- sync_pos = (blank_width - sync_width + 1) / 2;
-
- border = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
- border += border & 1; /* make the border even */
-
- adjusted_mode->crtc_hdisplay = width;
- adjusted_mode->crtc_hblank_start = width + border;
- adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_start + blank_width;
-
- adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hblank_start + sync_pos;
- adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + sync_width;
-}
-
-static void
-centre_vertically(struct drm_display_mode *adjusted_mode,
- int height)
-{
- u32 border, sync_pos, blank_width, sync_width;
-
- /* keep the vsync and vblank widths constant */
- sync_width = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
- blank_width = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start;
- sync_pos = (blank_width - sync_width + 1) / 2;
-
- border = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
-
- adjusted_mode->crtc_vdisplay = height;
- adjusted_mode->crtc_vblank_start = height + border;
- adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vblank_start + blank_width;
-
- adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vblank_start + sync_pos;
- adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width;
-}
-
-static u32 panel_fitter_scaling(u32 source, u32 target)
-{
- /*
- * Floating point operation is not supported. So the FACTOR
- * is defined, which can avoid the floating point computation
- * when calculating the panel ratio.
- */
-#define ACCURACY 12
-#define FACTOR (1 << ACCURACY)
- u32 ratio = source * FACTOR / target;
- return (FACTOR * ratio + FACTOR/2) / FACTOR;
-}
-
-static void i965_scale_aspect(struct intel_crtc_state *crtc_state,
- u32 *pfit_control)
-{
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
- int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
- int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
- u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h;
- u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay;
-
- /* 965+ is easy, it does everything in hw */
- if (scaled_width > scaled_height)
- *pfit_control |= PFIT_ENABLE |
- PFIT_SCALING_PILLAR;
- else if (scaled_width < scaled_height)
- *pfit_control |= PFIT_ENABLE |
- PFIT_SCALING_LETTER;
- else if (adjusted_mode->crtc_hdisplay != pipe_src_w)
- *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
-}
-
-static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state,
- u32 *pfit_control, u32 *pfit_pgm_ratios,
- u32 *border)
-{
- struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
- int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
- u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h;
- u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay;
- u32 bits;
-
- /*
- * For earlier chips we have to calculate the scaling
- * ratio by hand and program it into the
- * PFIT_PGM_RATIO register
- */
- if (scaled_width > scaled_height) { /* pillar */
- centre_horizontally(adjusted_mode,
- scaled_height / pipe_src_h);
-
- *border = LVDS_BORDER_ENABLE;
- if (pipe_src_h != adjusted_mode->crtc_vdisplay) {
- bits = panel_fitter_scaling(pipe_src_h,
- adjusted_mode->crtc_vdisplay);
-
- *pfit_pgm_ratios |= (PFIT_HORIZ_SCALE(bits) |
- PFIT_VERT_SCALE(bits));
- *pfit_control |= (PFIT_ENABLE |
- PFIT_VERT_INTERP_BILINEAR |
- PFIT_HORIZ_INTERP_BILINEAR);
- }
- } else if (scaled_width < scaled_height) { /* letter */
- centre_vertically(adjusted_mode,
- scaled_width / pipe_src_w);
-
- *border = LVDS_BORDER_ENABLE;
- if (pipe_src_w != adjusted_mode->crtc_hdisplay) {
- bits = panel_fitter_scaling(pipe_src_w,
- adjusted_mode->crtc_hdisplay);
-
- *pfit_pgm_ratios |= (PFIT_HORIZ_SCALE(bits) |
- PFIT_VERT_SCALE(bits));
- *pfit_control |= (PFIT_ENABLE |
- PFIT_VERT_INTERP_BILINEAR |
- PFIT_HORIZ_INTERP_BILINEAR);
- }
- } else {
- /* Aspects match, Let hw scale both directions */
- *pfit_control |= (PFIT_ENABLE |
- PFIT_VERT_AUTO_SCALE |
- PFIT_HORIZ_AUTO_SCALE |
- PFIT_VERT_INTERP_BILINEAR |
- PFIT_HORIZ_INTERP_BILINEAR);
- }
-}
-
-static int gmch_panel_fitting(struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
- struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
- int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
-
- /* Native modes don't need fitting */
- if (adjusted_mode->crtc_hdisplay == pipe_src_w &&
- adjusted_mode->crtc_vdisplay == pipe_src_h)
- goto out;
-
- switch (conn_state->scaling_mode) {
- case DRM_MODE_SCALE_CENTER:
- /*
- * For centered modes, we have to calculate border widths &
- * heights and modify the values programmed into the CRTC.
- */
- centre_horizontally(adjusted_mode, pipe_src_w);
- centre_vertically(adjusted_mode, pipe_src_h);
- border = LVDS_BORDER_ENABLE;
- break;
- case DRM_MODE_SCALE_ASPECT:
- /* Scale but preserve the aspect ratio */
- if (DISPLAY_VER(dev_priv) >= 4)
- i965_scale_aspect(crtc_state, &pfit_control);
- else
- i9xx_scale_aspect(crtc_state, &pfit_control,
- &pfit_pgm_ratios, &border);
- break;
- case DRM_MODE_SCALE_FULLSCREEN:
- /*
- * Full scaling, even if it changes the aspect ratio.
- * Fortunately this is all done for us in hw.
- */
- if (pipe_src_h != adjusted_mode->crtc_vdisplay ||
- pipe_src_w != adjusted_mode->crtc_hdisplay) {
- pfit_control |= PFIT_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 4)
- pfit_control |= PFIT_SCALING_AUTO;
- else
- pfit_control |= (PFIT_VERT_AUTO_SCALE |
- PFIT_VERT_INTERP_BILINEAR |
- PFIT_HORIZ_AUTO_SCALE |
- PFIT_HORIZ_INTERP_BILINEAR);
- }
- break;
- default:
- MISSING_CASE(conn_state->scaling_mode);
- return -EINVAL;
- }
-
- /* 965+ wants fuzzy fitting */
- /* FIXME: handle multiple panels by failing gracefully */
- if (DISPLAY_VER(dev_priv) >= 4)
- pfit_control |= PFIT_PIPE(crtc->pipe) | PFIT_FILTER_FUZZY;
-
-out:
- if ((pfit_control & PFIT_ENABLE) == 0) {
- pfit_control = 0;
- pfit_pgm_ratios = 0;
- }
-
- /* Make sure pre-965 set dither correctly for 18bpp panels. */
- if (DISPLAY_VER(dev_priv) < 4 && crtc_state->pipe_bpp == 18)
- pfit_control |= PFIT_PANEL_8TO6_DITHER_ENABLE;
-
- crtc_state->gmch_pfit.control = pfit_control;
- crtc_state->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
- crtc_state->gmch_pfit.lvds_border_bits = border;
-
- return 0;
-}
-
-int intel_panel_fitting(struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-
- if (HAS_GMCH(i915))
- return gmch_panel_fitting(crtc_state, conn_state);
- else
- return pch_panel_fitting(crtc_state, conn_state);
-}
-
enum drm_connector_status
intel_panel_detect(struct drm_connector *connector, bool force)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
- if (!intel_display_device_enabled(i915))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return connector->status;
return connector_status_connected;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index 15a8c897b33f..b60d12322e5d 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -14,9 +14,9 @@ struct drm_connector;
struct drm_connector_state;
struct drm_display_mode;
struct drm_edid;
-struct drm_i915_private;
struct intel_connector;
struct intel_crtc_state;
+struct intel_display;
struct intel_encoder;
void intel_panel_init_alloc(struct intel_connector *connector);
@@ -25,7 +25,7 @@ int intel_panel_init(struct intel_connector *connector,
void intel_panel_fini(struct intel_connector *connector);
enum drm_connector_status
intel_panel_detect(struct drm_connector *connector, bool force);
-bool intel_panel_use_ssc(struct drm_i915_private *i915);
+bool intel_panel_use_ssc(struct intel_display *display);
const struct drm_display_mode *
intel_panel_preferred_fixed_mode(struct intel_connector *connector);
const struct drm_display_mode *
@@ -42,8 +42,6 @@ enum drrs_type intel_panel_drrs_type(struct intel_connector *connector);
enum drm_mode_status
intel_panel_mode_valid(struct intel_connector *connector,
const struct drm_display_mode *mode);
-int intel_panel_fitting(struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
int intel_panel_compute_config(struct intel_connector *connector,
struct drm_display_mode *adjusted_mode);
void intel_panel_add_edid_fixed_modes(struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index f13ab680c2cf..8fa5a6334d10 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -4,8 +4,10 @@
*/
#include "g4x_dp.h"
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_crt.h"
+#include "intel_crt_regs.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
@@ -39,58 +41,61 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, enum port port,
i915_reg_t dp_reg)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe port_pipe;
bool state;
state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
- I915_STATE_WARN(dev_priv, state && port_pipe == pipe,
- "PCH DP %c enabled on transcoder %c, should be disabled\n",
- port_name(port), pipe_name(pipe));
+ INTEL_DISPLAY_STATE_WARN(display, state && port_pipe == pipe,
+ "PCH DP %c enabled on transcoder %c, should be disabled\n",
+ port_name(port), pipe_name(pipe));
- I915_STATE_WARN(dev_priv,
- HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
- "IBX PCH DP %c still using transcoder B\n",
- port_name(port));
+ INTEL_DISPLAY_STATE_WARN(display,
+ HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ "IBX PCH DP %c still using transcoder B\n",
+ port_name(port));
}
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, enum port port,
i915_reg_t hdmi_reg)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe port_pipe;
bool state;
state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
- I915_STATE_WARN(dev_priv, state && port_pipe == pipe,
- "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
- port_name(port), pipe_name(pipe));
+ INTEL_DISPLAY_STATE_WARN(display, state && port_pipe == pipe,
+ "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
+ port_name(port), pipe_name(pipe));
- I915_STATE_WARN(dev_priv,
- HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
- "IBX PCH HDMI %c still using transcoder B\n",
- port_name(port));
+ INTEL_DISPLAY_STATE_WARN(display,
+ HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+ "IBX PCH HDMI %c still using transcoder B\n",
+ port_name(port));
}
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe port_pipe;
assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
- I915_STATE_WARN(dev_priv,
- intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && port_pipe == pipe,
- "PCH VGA enabled on transcoder %c, should be disabled\n",
- pipe_name(pipe));
+ INTEL_DISPLAY_STATE_WARN(display,
+ intel_crt_port_enabled(display, PCH_ADPA, &port_pipe) && port_pipe == pipe,
+ "PCH VGA enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
- I915_STATE_WARN(dev_priv,
- intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && port_pipe == pipe,
- "PCH LVDS enabled on transcoder %c, should be disabled\n",
- pipe_name(pipe));
+ INTEL_DISPLAY_STATE_WARN(display,
+ intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && port_pipe == pipe,
+ "PCH LVDS enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
/* PCH SDVOB multiplex with HDMIB */
assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
@@ -101,14 +106,15 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
+ struct intel_display *display = &dev_priv->display;
u32 val;
bool enabled;
- val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
+ val = intel_de_read(display, PCH_TRANSCONF(pipe));
enabled = !!(val & TRANS_ENABLE);
- I915_STATE_WARN(dev_priv, enabled,
- "transcoder assertion failed, should be off on pipe %c but is still active\n",
- pipe_name(pipe));
+ INTEL_DISPLAY_STATE_WARN(display, enabled,
+ "transcoder assertion failed, should be off on pipe %c but is still active\n",
+ pipe_name(pipe));
}
static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 713cfba71475..71471c1d7dc9 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -3,6 +3,7 @@
* Copyright © 2021 Intel Corporation
*/
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -108,13 +109,13 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
- mutex_lock(&dev_priv->sb_lock);
+ intel_sbi_lock(dev_priv);
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
temp |= SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
}
struct iclkip_params {
@@ -195,7 +196,7 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
"iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc);
- mutex_lock(&dev_priv->sb_lock);
+ intel_sbi_lock(dev_priv);
/* Program SSCDIVINTPHASE6 */
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
@@ -218,7 +219,7 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
temp &= ~SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
/* Wait for initialization time */
udelay(24);
@@ -236,11 +237,11 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
iclkip_params_init(&p);
- mutex_lock(&dev_priv->sb_lock);
+ intel_sbi_lock(dev_priv);
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
if (temp & SBI_SSCCTL_DISABLE) {
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
return 0;
}
@@ -254,7 +255,7 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
p.auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
p.desired_divisor = (p.divsel + 2) * p.iclk_pi_range + p.phaseinc;
@@ -279,7 +280,7 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
- mutex_lock(&dev_priv->sb_lock);
+ intel_sbi_lock(dev_priv);
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_DISABLE;
@@ -302,7 +303,7 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
}
/* Sequence to disable CLKOUT_DP */
@@ -310,7 +311,7 @@ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
{
u32 reg, tmp;
- mutex_lock(&dev_priv->sb_lock);
+ intel_sbi_lock(dev_priv);
reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
@@ -328,7 +329,7 @@ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
}
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
}
#define BEND_IDX(steps) ((50 + (steps)) / 5)
@@ -374,7 +375,7 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
return;
- mutex_lock(&dev_priv->sb_lock);
+ intel_sbi_lock(dev_priv);
if (steps % 10 != 0)
tmp = 0xAAAAAAAB;
@@ -387,7 +388,7 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
tmp |= sscdivintphase[idx];
intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
- mutex_unlock(&dev_priv->sb_lock);
+ intel_sbi_unlock(dev_priv);
}
#undef BEND_IDX
@@ -491,6 +492,7 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_encoder *encoder;
struct intel_shared_dpll *pll;
int i;
@@ -572,11 +574,11 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
if (has_panel) {
final |= DREF_SSC_SOURCE_ENABLE;
- if (intel_panel_use_ssc(dev_priv) && can_ssc)
+ if (intel_panel_use_ssc(display) && can_ssc)
final |= DREF_SSC1_ENABLE;
if (has_cpu_edp) {
- if (intel_panel_use_ssc(dev_priv) && can_ssc)
+ if (intel_panel_use_ssc(display) && can_ssc)
final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
else
final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
@@ -604,7 +606,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
val |= DREF_SSC_SOURCE_ENABLE;
/* SSC must be turned on before enabling the CPU output */
- if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+ if (intel_panel_use_ssc(display) && can_ssc) {
drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
val |= DREF_SSC1_ENABLE;
} else {
@@ -620,7 +622,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* Enable CPU source on CPU attached eDP */
if (has_cpu_edp) {
- if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+ if (intel_panel_use_ssc(display) && can_ssc) {
drm_dbg_kms(&dev_priv->drm,
"Using SSC on eDP\n");
val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.c b/drivers/gpu/drm/i915/display/intel_pfit.c
new file mode 100644
index 000000000000..4ee03d9d14ad
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pfit.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "i915_reg.h"
+#include "i915_utils.h"
+#include "intel_display_core.h"
+#include "intel_display_driver.h"
+#include "intel_display_types.h"
+#include "intel_lvds_regs.h"
+#include "intel_pfit.h"
+
+static int intel_pch_pfit_check_dst_window(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
+ int width = drm_rect_width(dst);
+ int height = drm_rect_height(dst);
+ int x = dst->x1;
+ int y = dst->y1;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE &&
+ (y & 1 || height & 1)) {
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] pfit window (" DRM_RECT_FMT ") misaligned for interlaced output\n",
+ crtc->base.base.id, crtc->base.name, DRM_RECT_ARG(dst));
+ return -EINVAL;
+ }
+
+ /*
+ * "Restriction : When pipe scaling is enabled, the scaled
+ * output must equal the pipe active area, so Pipe active
+ * size = (2 * PF window position) + PF window size."
+ *
+ * The vertical direction seems more forgiving than the
+ * horizontal direction, but still has some issues so
+ * let's follow the same hard rule for both.
+ */
+ if (adjusted_mode->crtc_hdisplay != 2 * x + width ||
+ adjusted_mode->crtc_vdisplay != 2 * y + height) {
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] pfit window (" DRM_RECT_FMT ") not centered\n",
+ crtc->base.base.id, crtc->base.name, DRM_RECT_ARG(dst));
+ return -EINVAL;
+ }
+
+ /*
+ * "Restriction : The X position must not be programmed
+ * to be 1 (28:16=0 0000 0000 0001b)."
+ */
+ if (x == 1) {
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] pfit window (" DRM_RECT_FMT ") badly positioned\n",
+ crtc->base.base.id, crtc->base.name, DRM_RECT_ARG(dst));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int intel_pch_pfit_check_src_size(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
+ int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
+ int max_src_w, max_src_h;
+
+ if (DISPLAY_VER(display) >= 8) {
+ max_src_w = 4096;
+ max_src_h = 4096;
+ } else if (DISPLAY_VER(display) >= 7) {
+ /*
+ * PF0 7x5 capable
+ * PF1 3x3 capable (could be switched to 7x5
+ * mode on HSW when PF2 unused)
+ * PF2 3x3 capable
+ *
+ * This assumes we use a 1:1 mapping between pipe and PF.
+ */
+ max_src_w = crtc->pipe == PIPE_A ? 4096 : 2048;
+ max_src_h = 4096;
+ } else {
+ max_src_w = 4096;
+ max_src_h = 4096;
+ }
+
+ if (pipe_src_w > max_src_w || pipe_src_h > max_src_h) {
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] source size (%dx%d) exceeds pfit max (%dx%d)\n",
+ crtc->base.base.id, crtc->base.name,
+ pipe_src_w, pipe_src_h, max_src_w, max_src_h);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int intel_pch_pfit_check_scaling(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
+ int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
+ int hscale, vscale, max_scale = 0x12000; /* 1.125 */
+ struct drm_rect src;
+
+ drm_rect_init(&src, 0, 0, pipe_src_w << 16, pipe_src_h << 16);
+
+ hscale = drm_rect_calc_hscale(&src, dst, 0, max_scale);
+ if (hscale < 0) {
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] pfit horizontal downscaling (%d->%d) exceeds max (0x%x)\n",
+ crtc->base.base.id, crtc->base.name,
+ pipe_src_w, drm_rect_width(dst),
+ max_scale);
+ return hscale;
+ }
+
+ vscale = drm_rect_calc_vscale(&src, dst, 0, max_scale);
+ if (vscale < 0) {
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] pfit vertical downscaling (%d->%d) exceeds max (0x%x)\n",
+ crtc->base.base.id, crtc->base.name,
+ pipe_src_h, drm_rect_height(dst),
+ max_scale);
+ return vscale;
+ }
+
+ return 0;
+}
+
+static int intel_pch_pfit_check_timings(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (adjusted_mode->crtc_vdisplay < 7) {
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] vertical active (%d) below minimum (%d) for pfit\n",
+ crtc->base.base.id, crtc->base.name,
+ adjusted_mode->crtc_vdisplay, 7);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int intel_pch_pfit_check_cloning(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ /*
+ * The panel fitter is in the pipe and thus would affect every
+ * cloned output. The relevant properties (scaling mode, TV
+ * margins) are per-connector so we'd have to make sure each
+ * output sets them up identically. Seems like a very niche use
+ * case so let's just reject cloning entirely when pfit is used.
+ */
+ if (crtc_state->uapi.encoder_mask &&
+ !is_power_of_2(crtc_state->uapi.encoder_mask)) {
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] no pfit when cloning\n",
+ crtc->base.base.id, crtc->base.name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* adjusted_mode has been preset to be the panel's fixed mode */
+static int pch_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
+ int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
+ int ret, x, y, width, height;
+
+ /* Native modes don't need fitting */
+ if (adjusted_mode->crtc_hdisplay == pipe_src_w &&
+ adjusted_mode->crtc_vdisplay == pipe_src_h &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
+ return 0;
+
+ switch (conn_state->scaling_mode) {
+ case DRM_MODE_SCALE_CENTER:
+ width = pipe_src_w;
+ height = pipe_src_h;
+ x = (adjusted_mode->crtc_hdisplay - width + 1)/2;
+ y = (adjusted_mode->crtc_vdisplay - height + 1)/2;
+ break;
+
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the aspect ratio */
+ {
+ u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h;
+ u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay;
+
+ if (scaled_width > scaled_height) { /* pillar */
+ width = scaled_height / pipe_src_h;
+ if (width & 1)
+ width++;
+ x = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
+ y = 0;
+ height = adjusted_mode->crtc_vdisplay;
+ } else if (scaled_width < scaled_height) { /* letter */
+ height = scaled_width / pipe_src_w;
+ if (height & 1)
+ height++;
+ y = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
+ x = 0;
+ width = adjusted_mode->crtc_hdisplay;
+ } else {
+ x = y = 0;
+ width = adjusted_mode->crtc_hdisplay;
+ height = adjusted_mode->crtc_vdisplay;
+ }
+ }
+ break;
+
+ case DRM_MODE_SCALE_NONE:
+ WARN_ON(adjusted_mode->crtc_hdisplay != pipe_src_w);
+ WARN_ON(adjusted_mode->crtc_vdisplay != pipe_src_h);
+ fallthrough;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ x = y = 0;
+ width = adjusted_mode->crtc_hdisplay;
+ height = adjusted_mode->crtc_vdisplay;
+ break;
+
+ default:
+ MISSING_CASE(conn_state->scaling_mode);
+ return -EINVAL;
+ }
+
+ drm_rect_init(&crtc_state->pch_pfit.dst,
+ x, y, width, height);
+ crtc_state->pch_pfit.enabled = true;
+
+ /*
+ * SKL+ have unified scalers for pipes/planes so the
+ * checks are done in a single place for all scalers.
+ */
+ if (DISPLAY_VER(display) >= 9)
+ return 0;
+
+ ret = intel_pch_pfit_check_dst_window(crtc_state);
+ if (ret)
+ return ret;
+
+ ret = intel_pch_pfit_check_src_size(crtc_state);
+ if (ret)
+ return ret;
+
+ ret = intel_pch_pfit_check_scaling(crtc_state);
+ if (ret)
+ return ret;
+
+ ret = intel_pch_pfit_check_timings(crtc_state);
+ if (ret)
+ return ret;
+
+ ret = intel_pch_pfit_check_cloning(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+centre_horizontally(struct drm_display_mode *adjusted_mode,
+ int width)
+{
+ u32 border, sync_pos, blank_width, sync_width;
+
+ /* keep the hsync and hblank widths constant */
+ sync_width = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
+ blank_width = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
+ sync_pos = (blank_width - sync_width + 1) / 2;
+
+ border = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
+ border += border & 1; /* make the border even */
+
+ adjusted_mode->crtc_hdisplay = width;
+ adjusted_mode->crtc_hblank_start = width + border;
+ adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_start + blank_width;
+
+ adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hblank_start + sync_pos;
+ adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + sync_width;
+}
+
+static void
+centre_vertically(struct drm_display_mode *adjusted_mode,
+ int height)
+{
+ u32 border, sync_pos, blank_width, sync_width;
+
+ /* keep the vsync and vblank widths constant */
+ sync_width = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
+ blank_width = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start;
+ sync_pos = (blank_width - sync_width + 1) / 2;
+
+ border = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
+
+ adjusted_mode->crtc_vdisplay = height;
+ adjusted_mode->crtc_vblank_start = height + border;
+ adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vblank_start + blank_width;
+
+ adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vblank_start + sync_pos;
+ adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width;
+}
+
+static u32 panel_fitter_scaling(u32 source, u32 target)
+{
+ /*
+ * Floating point operation is not supported. So the FACTOR
+ * is defined, which can avoid the floating point computation
+ * when calculating the panel ratio.
+ */
+#define ACCURACY 12
+#define FACTOR (1 << ACCURACY)
+ u32 ratio = source * FACTOR / target;
+ return (FACTOR * ratio + FACTOR/2) / FACTOR;
+}
+
+static void i965_scale_aspect(struct intel_crtc_state *crtc_state,
+ u32 *pfit_control)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
+ int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
+ u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h;
+ u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay;
+
+ /* 965+ is easy, it does everything in hw */
+ if (scaled_width > scaled_height)
+ *pfit_control |= PFIT_ENABLE |
+ PFIT_SCALING_PILLAR;
+ else if (scaled_width < scaled_height)
+ *pfit_control |= PFIT_ENABLE |
+ PFIT_SCALING_LETTER;
+ else if (adjusted_mode->crtc_hdisplay != pipe_src_w)
+ *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
+}
+
+static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state,
+ u32 *pfit_control, u32 *pfit_pgm_ratios,
+ u32 *border)
+{
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
+ int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
+ u32 scaled_width = adjusted_mode->crtc_hdisplay * pipe_src_h;
+ u32 scaled_height = pipe_src_w * adjusted_mode->crtc_vdisplay;
+ u32 bits;
+
+ /*
+ * For earlier chips we have to calculate the scaling
+ * ratio by hand and program it into the
+ * PFIT_PGM_RATIO register
+ */
+ if (scaled_width > scaled_height) { /* pillar */
+ centre_horizontally(adjusted_mode,
+ scaled_height / pipe_src_h);
+
+ *border = LVDS_BORDER_ENABLE;
+ if (pipe_src_h != adjusted_mode->crtc_vdisplay) {
+ bits = panel_fitter_scaling(pipe_src_h,
+ adjusted_mode->crtc_vdisplay);
+
+ *pfit_pgm_ratios |= (PFIT_HORIZ_SCALE(bits) |
+ PFIT_VERT_SCALE(bits));
+ *pfit_control |= (PFIT_ENABLE |
+ PFIT_VERT_INTERP_BILINEAR |
+ PFIT_HORIZ_INTERP_BILINEAR);
+ }
+ } else if (scaled_width < scaled_height) { /* letter */
+ centre_vertically(adjusted_mode,
+ scaled_width / pipe_src_w);
+
+ *border = LVDS_BORDER_ENABLE;
+ if (pipe_src_w != adjusted_mode->crtc_hdisplay) {
+ bits = panel_fitter_scaling(pipe_src_w,
+ adjusted_mode->crtc_hdisplay);
+
+ *pfit_pgm_ratios |= (PFIT_HORIZ_SCALE(bits) |
+ PFIT_VERT_SCALE(bits));
+ *pfit_control |= (PFIT_ENABLE |
+ PFIT_VERT_INTERP_BILINEAR |
+ PFIT_HORIZ_INTERP_BILINEAR);
+ }
+ } else {
+ /* Aspects match, Let hw scale both directions */
+ *pfit_control |= (PFIT_ENABLE |
+ PFIT_VERT_AUTO_SCALE |
+ PFIT_HORIZ_AUTO_SCALE |
+ PFIT_VERT_INTERP_BILINEAR |
+ PFIT_HORIZ_INTERP_BILINEAR);
+ }
+}
+
+static int intel_gmch_pfit_check_timings(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int min;
+
+ if (DISPLAY_VER(display) >= 4)
+ min = 3;
+ else
+ min = 2;
+
+ if (adjusted_mode->crtc_hdisplay < min) {
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] horizontal active (%d) below minimum (%d) for pfit\n",
+ crtc->base.base.id, crtc->base.name,
+ adjusted_mode->crtc_hdisplay, min);
+ return -EINVAL;
+ }
+
+ if (adjusted_mode->crtc_vdisplay < min) {
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] vertical active (%d) below minimum (%d) for pfit\n",
+ crtc->base.base.id, crtc->base.name,
+ adjusted_mode->crtc_vdisplay, min);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int gmch_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
+ int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
+
+ /* Native modes don't need fitting */
+ if (adjusted_mode->crtc_hdisplay == pipe_src_w &&
+ adjusted_mode->crtc_vdisplay == pipe_src_h)
+ goto out;
+
+ /*
+ * TODO: implement downscaling for i965+. Need to account
+ * for downscaling in intel_crtc_compute_pixel_rate().
+ */
+ if (adjusted_mode->crtc_hdisplay < pipe_src_w) {
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] pfit horizontal downscaling (%d->%d) not supported\n",
+ crtc->base.base.id, crtc->base.name,
+ pipe_src_w, adjusted_mode->crtc_hdisplay);
+ return -EINVAL;
+ }
+ if (adjusted_mode->crtc_vdisplay < pipe_src_h) {
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] pfit vertical downscaling (%d->%d) not supported\n",
+ crtc->base.base.id, crtc->base.name,
+ pipe_src_h, adjusted_mode->crtc_vdisplay);
+ return -EINVAL;
+ }
+
+ switch (conn_state->scaling_mode) {
+ case DRM_MODE_SCALE_CENTER:
+ /*
+ * For centered modes, we have to calculate border widths &
+ * heights and modify the values programmed into the CRTC.
+ */
+ centre_horizontally(adjusted_mode, pipe_src_w);
+ centre_vertically(adjusted_mode, pipe_src_h);
+ border = LVDS_BORDER_ENABLE;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the aspect ratio */
+ if (DISPLAY_VER(display) >= 4)
+ i965_scale_aspect(crtc_state, &pfit_control);
+ else
+ i9xx_scale_aspect(crtc_state, &pfit_control,
+ &pfit_pgm_ratios, &border);
+ break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ /*
+ * Full scaling, even if it changes the aspect ratio.
+ * Fortunately this is all done for us in hw.
+ */
+ if (pipe_src_h != adjusted_mode->crtc_vdisplay ||
+ pipe_src_w != adjusted_mode->crtc_hdisplay) {
+ pfit_control |= PFIT_ENABLE;
+ if (DISPLAY_VER(display) >= 4)
+ pfit_control |= PFIT_SCALING_AUTO;
+ else
+ pfit_control |= (PFIT_VERT_AUTO_SCALE |
+ PFIT_VERT_INTERP_BILINEAR |
+ PFIT_HORIZ_AUTO_SCALE |
+ PFIT_HORIZ_INTERP_BILINEAR);
+ }
+ break;
+ default:
+ MISSING_CASE(conn_state->scaling_mode);
+ return -EINVAL;
+ }
+
+ /* 965+ wants fuzzy fitting */
+ /* FIXME: handle multiple panels by failing gracefully */
+ if (DISPLAY_VER(display) >= 4)
+ pfit_control |= PFIT_PIPE(crtc->pipe) | PFIT_FILTER_FUZZY;
+
+out:
+ if ((pfit_control & PFIT_ENABLE) == 0) {
+ pfit_control = 0;
+ pfit_pgm_ratios = 0;
+ }
+
+ /* Make sure pre-965 set dither correctly for 18bpp panels. */
+ if (DISPLAY_VER(display) < 4 && crtc_state->pipe_bpp == 18)
+ pfit_control |= PFIT_PANEL_8TO6_DITHER_ENABLE;
+
+ crtc_state->gmch_pfit.control = pfit_control;
+ crtc_state->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
+ crtc_state->gmch_pfit.lvds_border_bits = border;
+
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return 0;
+
+ return intel_gmch_pfit_check_timings(crtc_state);
+}
+
+int intel_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (HAS_GMCH(display))
+ return gmch_panel_fitting(crtc_state, conn_state);
+ else
+ return pch_panel_fitting(crtc_state, conn_state);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.h b/drivers/gpu/drm/i915/display/intel_pfit.h
new file mode 100644
index 000000000000..add8d78de2c9
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_pfit.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __INTEL_PFIT_H__
+#define __INTEL_PFIT_H__
+
+struct drm_connector_state;
+struct intel_crtc_state;
+
+int intel_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+
+#endif /* __INTEL_PFIT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 82ceede0b2b1..90efc6f64e52 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -28,10 +28,12 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_de.h"
+#include "intel_display_irq.h"
#include "intel_display_types.h"
#include "intel_pipe_crc.h"
#include "intel_pipe_crc_regs.h"
@@ -285,6 +287,9 @@ intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
struct drm_modeset_acquire_ctx ctx;
int ret;
+ if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
+ i915gm_irq_cstate_wa(dev_priv, enable);
+
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(&dev_priv->drm);
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index ada1792df5b3..6789b7f14095 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -20,10 +20,10 @@ intel_reuse_initial_plane_obj(struct intel_crtc *this,
struct drm_framebuffer **fb,
struct i915_vma **vma)
{
- struct drm_i915_private *i915 = to_i915(this->base.dev);
+ struct intel_display *display = to_intel_display(this);
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_plane *plane =
to_intel_plane(crtc->base.primary);
const struct intel_plane_state *plane_state =
@@ -48,9 +48,10 @@ intel_reuse_initial_plane_obj(struct intel_crtc *this,
}
static bool
-initial_plane_phys_lmem(struct drm_i915_private *i915,
+initial_plane_phys_lmem(struct intel_display *display,
struct intel_initial_plane_config *plane_config)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
struct intel_memory_region *mem;
dma_addr_t dma_addr;
@@ -63,7 +64,7 @@ initial_plane_phys_lmem(struct drm_i915_private *i915,
pte = ioread64(gte);
if (!(pte & GEN12_GGTT_PTE_LM)) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Initial plane programming missing PTE_LM bit\n");
return false;
}
@@ -75,7 +76,7 @@ initial_plane_phys_lmem(struct drm_i915_private *i915,
else
mem = i915->mm.stolen_region;
if (!mem) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Initial plane memory region not initialized\n");
return false;
}
@@ -85,13 +86,13 @@ initial_plane_phys_lmem(struct drm_i915_private *i915,
* ever be placed in the stolen portion.
*/
if (dma_addr < mem->region.start || dma_addr > mem->region.end) {
- drm_err(&i915->drm,
+ drm_err(display->drm,
"Initial plane programming using invalid range, dma_addr=%pa (%s [%pa-%pa])\n",
&dma_addr, mem->region.name, &mem->region.start, &mem->region.end);
return false;
}
- drm_dbg(&i915->drm,
+ drm_dbg(display->drm,
"Using dma_addr=%pa, based on initial plane programming\n",
&dma_addr);
@@ -102,9 +103,10 @@ initial_plane_phys_lmem(struct drm_i915_private *i915,
}
static bool
-initial_plane_phys_smem(struct drm_i915_private *i915,
+initial_plane_phys_smem(struct intel_display *display,
struct intel_initial_plane_config *plane_config)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_memory_region *mem;
u32 base;
@@ -112,7 +114,7 @@ initial_plane_phys_smem(struct drm_i915_private *i915,
mem = i915->mm.stolen_region;
if (!mem) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Initial plane memory region not initialized\n");
return false;
}
@@ -125,19 +127,22 @@ initial_plane_phys_smem(struct drm_i915_private *i915,
}
static bool
-initial_plane_phys(struct drm_i915_private *i915,
+initial_plane_phys(struct intel_display *display,
struct intel_initial_plane_config *plane_config)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
if (IS_DGFX(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915))
- return initial_plane_phys_lmem(i915, plane_config);
+ return initial_plane_phys_lmem(display, plane_config);
else
- return initial_plane_phys_smem(i915, plane_config);
+ return initial_plane_phys_smem(display, plane_config);
}
static struct i915_vma *
-initial_plane_vma(struct drm_i915_private *i915,
+initial_plane_vma(struct intel_display *display,
struct intel_initial_plane_config *plane_config)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_memory_region *mem;
struct drm_i915_gem_object *obj;
struct drm_mm_node orig_mm = {};
@@ -149,7 +154,7 @@ initial_plane_vma(struct drm_i915_private *i915,
if (plane_config->size == 0)
return NULL;
- if (!initial_plane_phys(i915, plane_config))
+ if (!initial_plane_phys(display, plane_config))
return NULL;
phys_base = plane_config->phys_base;
@@ -168,7 +173,7 @@ initial_plane_vma(struct drm_i915_private *i915,
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
mem == i915->mm.stolen_region &&
size * 2 > i915->dsm.usable_size) {
- drm_dbg_kms(&i915->drm, "Initial FB size exceeds half of stolen, discarding\n");
+ drm_dbg_kms(display->drm, "Initial FB size exceeds half of stolen, discarding\n");
return NULL;
}
@@ -176,7 +181,7 @@ initial_plane_vma(struct drm_i915_private *i915,
I915_BO_ALLOC_USER |
I915_BO_PREALLOC);
if (IS_ERR(obj)) {
- drm_dbg_kms(&i915->drm, "Failed to preallocate initial FB in %s\n",
+ drm_dbg_kms(display->drm, "Failed to preallocate initial FB in %s\n",
mem->region.name);
return NULL;
}
@@ -254,7 +259,7 @@ retry:
if (drm_mm_node_allocated(&orig_mm))
drm_mm_remove_node(&orig_mm);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Initial plane fb bound to 0x%x in the ggtt (original 0x%x)\n",
i915_ggtt_offset(vma), plane_config->base);
@@ -271,8 +276,7 @@ static bool
intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_framebuffer *fb = &plane_config->fb->base;
struct i915_vma *vma;
@@ -284,13 +288,13 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
case I915_FORMAT_MOD_4_TILED:
break;
default:
- drm_dbg(&dev_priv->drm,
+ drm_dbg(display->drm,
"Unsupported modifier for initial FB: 0x%llx\n",
fb->modifier);
return false;
}
- vma = initial_plane_vma(dev_priv, plane_config);
+ vma = initial_plane_vma(display, plane_config);
if (!vma)
return false;
@@ -302,8 +306,8 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
if (intel_framebuffer_init(to_intel_framebuffer(fb),
- vma->obj, &mode_cmd)) {
- drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
+ intel_bo_to_drm_bo(vma->obj), &mode_cmd)) {
+ drm_dbg_kms(display->drm, "intel fb init failed\n");
goto err_vma;
}
@@ -410,12 +414,12 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config)
i915_vma_put(plane_config->vma);
}
-void intel_initial_plane_config(struct drm_i915_private *i915)
+void intel_initial_plane_config(struct intel_display *display)
{
struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_initial_plane_config *plane_config =
&plane_configs[crtc->pipe];
@@ -429,7 +433,7 @@ void intel_initial_plane_config(struct drm_i915_private *i915)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- i915->display.funcs.display->get_initial_plane_config(crtc, plane_config);
+ display->funcs.display->get_initial_plane_config(crtc, plane_config);
/*
* If the fb is shared between multiple heads, we'll
@@ -437,7 +441,7 @@ void intel_initial_plane_config(struct drm_i915_private *i915)
*/
intel_find_initial_plane_obj(crtc, plane_configs);
- if (i915->display.funcs.display->fixup_initial_plane_config(crtc, plane_config))
+ if (display->funcs.display->fixup_initial_plane_config(crtc, plane_config))
intel_crtc_wait_for_next_vblank(crtc);
plane_config_fini(plane_config);
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.h b/drivers/gpu/drm/i915/display/intel_plane_initial.h
index 64ab95239cd4..6c6aa717ed21 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.h
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.h
@@ -6,8 +6,8 @@
#ifndef __INTEL_PLANE_INITIAL_H__
#define __INTEL_PLANE_INITIAL_H__
-struct drm_i915_private;
+struct intel_display;
-void intel_initial_plane_config(struct drm_i915_private *i915);
+void intel_initial_plane_config(struct intel_display *display);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index ceaf9e3147da..975520322136 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -5,16 +5,50 @@
#include <linux/bitops.h>
-#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_trace.h"
#include "intel_pmdemand.h"
+#include "intel_step.h"
#include "skl_watermark.h"
+struct pmdemand_params {
+ u16 qclk_gv_bw;
+ u8 voltage_index;
+ u8 qclk_gv_index;
+ u8 active_pipes;
+ u8 active_dbufs; /* pre-Xe3 only */
+ /* Total number of non type C active phys from active_phys_mask */
+ u8 active_phys;
+ u8 plls;
+ u16 cdclk_freq_mhz;
+ /* max from ddi_clocks[] */
+ u16 ddiclk_max;
+ u8 scalers; /* pre-Xe3 only */
+};
+
+struct intel_pmdemand_state {
+ struct intel_global_state base;
+
+ /* Maintain a persistent list of port clocks across all crtcs */
+ int ddi_clocks[I915_MAX_PIPES];
+
+ /* Maintain a persistent list of non type C phys mask */
+ u16 active_combo_phys_mask;
+
+ /* Parameters to be configured in the pmdemand registers */
+ struct pmdemand_params params;
+};
+
+struct intel_pmdemand_state *to_intel_pmdemand_state(struct intel_global_state *obj_state)
+{
+ return container_of(obj_state, struct intel_pmdemand_state, base);
+}
+
static struct intel_global_state *
intel_pmdemand_duplicate_state(struct intel_global_obj *obj)
{
@@ -41,10 +75,10 @@ static const struct intel_global_state_funcs intel_pmdemand_funcs = {
static struct intel_pmdemand_state *
intel_atomic_get_pmdemand_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *pmdemand_state =
intel_atomic_get_global_obj_state(state,
- &i915->display.pmdemand.obj);
+ &display->pmdemand.obj);
if (IS_ERR(pmdemand_state))
return ERR_CAST(pmdemand_state);
@@ -55,10 +89,10 @@ intel_atomic_get_pmdemand_state(struct intel_atomic_state *state)
static struct intel_pmdemand_state *
intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *pmdemand_state =
intel_atomic_get_old_global_obj_state(state,
- &i915->display.pmdemand.obj);
+ &display->pmdemand.obj);
if (!pmdemand_state)
return NULL;
@@ -69,10 +103,10 @@ intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state)
static struct intel_pmdemand_state *
intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_global_state *pmdemand_state =
intel_atomic_get_new_global_obj_state(state,
- &i915->display.pmdemand.obj);
+ &display->pmdemand.obj);
if (!pmdemand_state)
return NULL;
@@ -80,7 +114,7 @@ intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state)
return to_intel_pmdemand_state(pmdemand_state);
}
-int intel_pmdemand_init(struct drm_i915_private *i915)
+int intel_pmdemand_init(struct intel_display *display)
{
struct intel_pmdemand_state *pmdemand_state;
@@ -88,32 +122,32 @@ int intel_pmdemand_init(struct drm_i915_private *i915)
if (!pmdemand_state)
return -ENOMEM;
- intel_atomic_global_obj_init(i915, &i915->display.pmdemand.obj,
+ intel_atomic_global_obj_init(display, &display->pmdemand.obj,
&pmdemand_state->base,
&intel_pmdemand_funcs);
- if (IS_DISPLAY_VER_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0))
+ if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_C0))
/* Wa_14016740474 */
- intel_de_rmw(i915, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
+ intel_de_rmw(display, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
return 0;
}
-void intel_pmdemand_init_early(struct drm_i915_private *i915)
+void intel_pmdemand_init_early(struct intel_display *display)
{
- mutex_init(&i915->display.pmdemand.lock);
- init_waitqueue_head(&i915->display.pmdemand.waitqueue);
+ mutex_init(&display->pmdemand.lock);
+ init_waitqueue_head(&display->pmdemand.waitqueue);
}
void
-intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
+intel_pmdemand_update_phys_mask(struct intel_display *display,
struct intel_encoder *encoder,
struct intel_pmdemand_state *pmdemand_state,
bool set_bit)
{
enum phy phy;
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
if (!encoder)
@@ -131,18 +165,18 @@ intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
}
void
-intel_pmdemand_update_port_clock(struct drm_i915_private *i915,
+intel_pmdemand_update_port_clock(struct intel_display *display,
struct intel_pmdemand_state *pmdemand_state,
enum pipe pipe, int port_clock)
{
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
pmdemand_state->ddi_clocks[pipe] = port_clock;
}
static void
-intel_pmdemand_update_max_ddiclk(struct drm_i915_private *i915,
+intel_pmdemand_update_max_ddiclk(struct intel_display *display,
struct intel_atomic_state *state,
struct intel_pmdemand_state *pmdemand_state)
{
@@ -152,7 +186,7 @@ intel_pmdemand_update_max_ddiclk(struct drm_i915_private *i915,
int i;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
- intel_pmdemand_update_port_clock(i915, pmdemand_state,
+ intel_pmdemand_update_port_clock(display, pmdemand_state,
crtc->pipe,
new_crtc_state->port_clock);
@@ -163,7 +197,7 @@ intel_pmdemand_update_max_ddiclk(struct drm_i915_private *i915,
}
static void
-intel_pmdemand_update_connector_phys(struct drm_i915_private *i915,
+intel_pmdemand_update_connector_phys(struct intel_display *display,
struct intel_atomic_state *state,
struct drm_connector_state *conn_state,
bool set_bit,
@@ -184,12 +218,12 @@ intel_pmdemand_update_connector_phys(struct drm_i915_private *i915,
if (!crtc_state->hw.active)
return;
- intel_pmdemand_update_phys_mask(i915, encoder, pmdemand_state,
+ intel_pmdemand_update_phys_mask(display, encoder, pmdemand_state,
set_bit);
}
static void
-intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private *i915,
+intel_pmdemand_update_active_non_tc_phys(struct intel_display *display,
struct intel_atomic_state *state,
struct intel_pmdemand_state *pmdemand_state)
{
@@ -204,12 +238,12 @@ intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private *i915,
continue;
/* First clear the active phys in the old connector state */
- intel_pmdemand_update_connector_phys(i915, state,
+ intel_pmdemand_update_connector_phys(display, state,
old_conn_state, false,
pmdemand_state);
/* Then set the active phys in new connector state */
- intel_pmdemand_update_connector_phys(i915, state,
+ intel_pmdemand_update_connector_phys(display, state,
new_conn_state, true,
pmdemand_state);
}
@@ -220,7 +254,7 @@ intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private *i915,
}
static bool
-intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915,
+intel_pmdemand_encoder_has_tc_phy(struct intel_display *display,
struct intel_encoder *encoder)
{
return encoder && intel_encoder_is_tc(encoder);
@@ -229,7 +263,7 @@ intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915,
static bool
intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct drm_connector_state *old_conn_state;
struct drm_connector_state *new_conn_state;
struct drm_connector *connector;
@@ -246,8 +280,8 @@ intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
continue;
if (old_encoder == new_encoder ||
- (intel_pmdemand_encoder_has_tc_phy(i915, old_encoder) &&
- intel_pmdemand_encoder_has_tc_phy(i915, new_encoder)))
+ (intel_pmdemand_encoder_has_tc_phy(display, old_encoder) &&
+ intel_pmdemand_encoder_has_tc_phy(display, new_encoder)))
continue;
return true;
@@ -258,6 +292,7 @@ intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *new_bw_state, *old_bw_state;
const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
@@ -274,12 +309,16 @@ static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
if (new_dbuf_state &&
- (new_dbuf_state->active_pipes !=
- old_dbuf_state->active_pipes ||
- new_dbuf_state->enabled_slices !=
- old_dbuf_state->enabled_slices))
+ new_dbuf_state->active_pipes != old_dbuf_state->active_pipes)
return true;
+ if (DISPLAY_VER(display) < 30) {
+ if (new_dbuf_state &&
+ new_dbuf_state->enabled_slices !=
+ old_dbuf_state->enabled_slices)
+ return true;
+ }
+
new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
if (new_cdclk_state &&
@@ -299,13 +338,13 @@ static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *new_bw_state;
const struct intel_cdclk_state *new_cdclk_state;
const struct intel_dbuf_state *new_dbuf_state;
struct intel_pmdemand_state *new_pmdemand_state;
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return 0;
if (!intel_pmdemand_needs_update(state))
@@ -327,10 +366,15 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
if (IS_ERR(new_dbuf_state))
return PTR_ERR(new_dbuf_state);
- new_pmdemand_state->params.active_pipes =
- min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
- new_pmdemand_state->params.active_dbufs =
- min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
+ if (DISPLAY_VER(display) < 30) {
+ new_pmdemand_state->params.active_dbufs =
+ min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
+ new_pmdemand_state->params.active_pipes =
+ min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
+ } else {
+ new_pmdemand_state->params.active_pipes =
+ min_t(u8, hweight8(new_dbuf_state->active_pipes), INTEL_NUM_PIPES(display));
+ }
new_cdclk_state = intel_atomic_get_cdclk_state(state);
if (IS_ERR(new_cdclk_state))
@@ -341,9 +385,9 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
new_pmdemand_state->params.cdclk_freq_mhz =
DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000);
- intel_pmdemand_update_max_ddiclk(i915, state, new_pmdemand_state);
+ intel_pmdemand_update_max_ddiclk(display, state, new_pmdemand_state);
- intel_pmdemand_update_active_non_tc_phys(i915, state, new_pmdemand_state);
+ intel_pmdemand_update_active_non_tc_phys(display, state, new_pmdemand_state);
/*
* Active_PLLs starts with 1 because of CDCLK PLL.
@@ -364,103 +408,113 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
return intel_atomic_lock_global_state(&new_pmdemand_state->base);
}
-static bool intel_pmdemand_check_prev_transaction(struct drm_i915_private *i915)
+static bool intel_pmdemand_check_prev_transaction(struct intel_display *display)
{
- return !(intel_de_wait_for_clear(i915,
+ return !(intel_de_wait_for_clear(display,
XELPDP_INITIATE_PMDEMAND_REQUEST(1),
XELPDP_PMDEMAND_REQ_ENABLE, 10) ||
- intel_de_wait_for_clear(i915,
+ intel_de_wait_for_clear(display,
GEN12_DCPR_STATUS_1,
XELPDP_PMDEMAND_INFLIGHT_STATUS, 10));
}
void
-intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915,
+intel_pmdemand_init_pmdemand_params(struct intel_display *display,
struct intel_pmdemand_state *pmdemand_state)
{
u32 reg1, reg2;
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
- mutex_lock(&i915->display.pmdemand.lock);
- if (drm_WARN_ON(&i915->drm,
- !intel_pmdemand_check_prev_transaction(i915))) {
+ mutex_lock(&display->pmdemand.lock);
+ if (drm_WARN_ON(display->drm,
+ !intel_pmdemand_check_prev_transaction(display))) {
memset(&pmdemand_state->params, 0,
sizeof(pmdemand_state->params));
goto unlock;
}
- reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
+ reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
- reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
+ reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
- /* Set 1*/
pmdemand_state->params.qclk_gv_bw =
REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1);
pmdemand_state->params.voltage_index =
REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1);
pmdemand_state->params.qclk_gv_index =
REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1);
- pmdemand_state->params.active_pipes =
- REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1);
- pmdemand_state->params.active_dbufs =
- REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1);
pmdemand_state->params.active_phys =
REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1);
- /* Set 2*/
pmdemand_state->params.cdclk_freq_mhz =
REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2);
pmdemand_state->params.ddiclk_max =
REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2);
- pmdemand_state->params.scalers =
- REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2);
+
+ if (DISPLAY_VER(display) >= 30) {
+ pmdemand_state->params.active_pipes =
+ REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1);
+ } else {
+ pmdemand_state->params.active_pipes =
+ REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1);
+ pmdemand_state->params.active_dbufs =
+ REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1);
+
+ pmdemand_state->params.scalers =
+ REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2);
+ }
unlock:
- mutex_unlock(&i915->display.pmdemand.lock);
+ mutex_unlock(&display->pmdemand.lock);
}
-static bool intel_pmdemand_req_complete(struct drm_i915_private *i915)
+static bool intel_pmdemand_req_complete(struct intel_display *display)
{
- return !(intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) &
+ return !(intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) &
XELPDP_PMDEMAND_REQ_ENABLE);
}
-static void intel_pmdemand_wait(struct drm_i915_private *i915)
+static void intel_pmdemand_wait(struct intel_display *display)
{
- if (!wait_event_timeout(i915->display.pmdemand.waitqueue,
- intel_pmdemand_req_complete(i915),
+ if (!wait_event_timeout(display->pmdemand.waitqueue,
+ intel_pmdemand_req_complete(display),
msecs_to_jiffies_timeout(10)))
- drm_err(&i915->drm,
+ drm_err(display->drm,
"timed out waiting for Punit PM Demand Response\n");
}
/* Required to be programmed during Display Init Sequences. */
-void intel_pmdemand_program_dbuf(struct drm_i915_private *i915,
+void intel_pmdemand_program_dbuf(struct intel_display *display,
u8 dbuf_slices)
{
u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3);
- mutex_lock(&i915->display.pmdemand.lock);
- if (drm_WARN_ON(&i915->drm,
- !intel_pmdemand_check_prev_transaction(i915)))
+ /* PM Demand only tracks active dbufs on pre-Xe3 platforms */
+ if (DISPLAY_VER(display) >= 30)
+ return;
+
+ mutex_lock(&display->pmdemand.lock);
+ if (drm_WARN_ON(display->drm,
+ !intel_pmdemand_check_prev_transaction(display)))
goto unlock;
- intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
+ intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
XELPDP_PMDEMAND_DBUFS_MASK,
REG_FIELD_PREP(XELPDP_PMDEMAND_DBUFS_MASK, dbufs));
- intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
+ intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
XELPDP_PMDEMAND_REQ_ENABLE);
- intel_pmdemand_wait(i915);
+ intel_pmdemand_wait(display);
unlock:
- mutex_unlock(&i915->display.pmdemand.lock);
+ mutex_unlock(&display->pmdemand.lock);
}
static void
-intel_pmdemand_update_params(const struct intel_pmdemand_state *new,
+intel_pmdemand_update_params(struct intel_display *display,
+ const struct intel_pmdemand_state *new,
const struct intel_pmdemand_state *old,
u32 *reg1, u32 *reg2, bool serialized)
{
@@ -495,21 +549,27 @@ intel_pmdemand_update_params(const struct intel_pmdemand_state *new,
update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK);
update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK);
update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK);
- update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK);
- update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK);
update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK);
/* Set 2*/
update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK);
update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK);
- update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK);
update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK);
+ if (DISPLAY_VER(display) >= 30) {
+ update_reg(reg1, active_pipes, XE3_PMDEMAND_PIPES_MASK);
+ } else {
+ update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK);
+ update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK);
+
+ update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK);
+ }
+
#undef update_reg
}
static void
-intel_pmdemand_program_params(struct drm_i915_private *i915,
+intel_pmdemand_program_params(struct intel_display *display,
const struct intel_pmdemand_state *new,
const struct intel_pmdemand_state *old,
bool serialized)
@@ -518,28 +578,28 @@ intel_pmdemand_program_params(struct drm_i915_private *i915,
u32 reg1, mod_reg1;
u32 reg2, mod_reg2;
- mutex_lock(&i915->display.pmdemand.lock);
- if (drm_WARN_ON(&i915->drm,
- !intel_pmdemand_check_prev_transaction(i915)))
+ mutex_lock(&display->pmdemand.lock);
+ if (drm_WARN_ON(display->drm,
+ !intel_pmdemand_check_prev_transaction(display)))
goto unlock;
- reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
+ reg1 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
mod_reg1 = reg1;
- reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
+ reg2 = intel_de_read(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
mod_reg2 = reg2;
- intel_pmdemand_update_params(new, old, &mod_reg1, &mod_reg2,
+ intel_pmdemand_update_params(display, new, old, &mod_reg1, &mod_reg2,
serialized);
if (reg1 != mod_reg1) {
- intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
+ intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
mod_reg1);
changed = true;
}
if (reg2 != mod_reg2) {
- intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
+ intel_de_write(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
mod_reg2);
changed = true;
}
@@ -548,17 +608,17 @@ intel_pmdemand_program_params(struct drm_i915_private *i915,
if (!changed)
goto unlock;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"initate pmdemand request values: (0x%x 0x%x)\n",
mod_reg1, mod_reg2);
- intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
+ intel_de_rmw(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
XELPDP_PMDEMAND_REQ_ENABLE);
- intel_pmdemand_wait(i915);
+ intel_pmdemand_wait(display);
unlock:
- mutex_unlock(&i915->display.pmdemand.lock);
+ mutex_unlock(&display->pmdemand.lock);
}
static bool
@@ -570,13 +630,13 @@ intel_pmdemand_state_changed(const struct intel_pmdemand_state *new,
void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_pmdemand_state *new_pmdemand_state =
intel_atomic_get_new_pmdemand_state(state);
const struct intel_pmdemand_state *old_pmdemand_state =
intel_atomic_get_old_pmdemand_state(state);
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
if (!new_pmdemand_state ||
@@ -586,20 +646,20 @@ void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state)
WARN_ON(!new_pmdemand_state->base.changed);
- intel_pmdemand_program_params(i915, new_pmdemand_state,
+ intel_pmdemand_program_params(display, new_pmdemand_state,
old_pmdemand_state,
intel_atomic_global_state_is_serialized(state));
}
void intel_pmdemand_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_pmdemand_state *new_pmdemand_state =
intel_atomic_get_new_pmdemand_state(state);
const struct intel_pmdemand_state *old_pmdemand_state =
intel_atomic_get_old_pmdemand_state(state);
- if (DISPLAY_VER(i915) < 14)
+ if (DISPLAY_VER(display) < 14)
return;
if (!new_pmdemand_state ||
@@ -609,6 +669,6 @@ void intel_pmdemand_post_plane_update(struct intel_atomic_state *state)
WARN_ON(!new_pmdemand_state->base.changed);
- intel_pmdemand_program_params(i915, new_pmdemand_state, NULL,
+ intel_pmdemand_program_params(display, new_pmdemand_state, NULL,
intel_atomic_global_state_is_serialized(state));
}
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.h b/drivers/gpu/drm/i915/display/intel_pmdemand.h
index 128fd61f8f14..821ef2c4134a 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.h
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.h
@@ -6,58 +6,31 @@
#ifndef __INTEL_PMDEMAND_H__
#define __INTEL_PMDEMAND_H__
-#include "intel_display_limits.h"
-#include "intel_global_state.h"
+#include <linux/types.h>
-struct drm_i915_private;
+enum pipe;
struct intel_atomic_state;
struct intel_crtc_state;
+struct intel_display;
struct intel_encoder;
+struct intel_global_state;
struct intel_plane_state;
+struct intel_pmdemand_state;
-struct pmdemand_params {
- u16 qclk_gv_bw;
- u8 voltage_index;
- u8 qclk_gv_index;
- u8 active_pipes;
- u8 active_dbufs;
- /* Total number of non type C active phys from active_phys_mask */
- u8 active_phys;
- u8 plls;
- u16 cdclk_freq_mhz;
- /* max from ddi_clocks[] */
- u16 ddiclk_max;
- u8 scalers;
-};
+struct intel_pmdemand_state *to_intel_pmdemand_state(struct intel_global_state *obj_state);
-struct intel_pmdemand_state {
- struct intel_global_state base;
-
- /* Maintain a persistent list of port clocks across all crtcs */
- int ddi_clocks[I915_MAX_PIPES];
-
- /* Maintain a persistent list of non type C phys mask */
- u16 active_combo_phys_mask;
-
- /* Parameters to be configured in the pmdemand registers */
- struct pmdemand_params params;
-};
-
-#define to_intel_pmdemand_state(global_state) \
- container_of_const((global_state), struct intel_pmdemand_state, base)
-
-void intel_pmdemand_init_early(struct drm_i915_private *i915);
-int intel_pmdemand_init(struct drm_i915_private *i915);
-void intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915,
+void intel_pmdemand_init_early(struct intel_display *display);
+int intel_pmdemand_init(struct intel_display *display);
+void intel_pmdemand_init_pmdemand_params(struct intel_display *display,
struct intel_pmdemand_state *pmdemand_state);
-void intel_pmdemand_update_port_clock(struct drm_i915_private *i915,
+void intel_pmdemand_update_port_clock(struct intel_display *display,
struct intel_pmdemand_state *pmdemand_state,
enum pipe pipe, int port_clock);
-void intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
+void intel_pmdemand_update_phys_mask(struct intel_display *display,
struct intel_encoder *encoder,
struct intel_pmdemand_state *pmdemand_state,
bool clear_bit);
-void intel_pmdemand_program_dbuf(struct drm_i915_private *i915,
+void intel_pmdemand_program_dbuf(struct intel_display *display,
u8 dbuf_slices);
void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state);
void intel_pmdemand_post_plane_update(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index feddc30e3375..eb35f0249f2b 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -3,6 +3,8 @@
* Copyright © 2020 Intel Corporation
*/
+#include <linux/debugfs.h>
+
#include "g4x_dp.h"
#include "i915_drv.h"
#include "i915_reg.h"
@@ -27,11 +29,10 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
static const char *pps_name(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_pps *pps = &intel_dp->pps;
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
- switch (pps->pps_pipe) {
+ if (display->platform.valleyview || display->platform.cherryview) {
+ switch (pps->vlv_pps_pipe) {
case INVALID_PIPE:
/*
* FIXME would be nice if we can guarantee
@@ -43,7 +44,7 @@ static const char *pps_name(struct intel_dp *intel_dp)
case PIPE_B:
return "PPS B";
default:
- MISSING_CASE(pps->pps_pipe);
+ MISSING_CASE(pps->vlv_pps_pipe);
break;
}
} else {
@@ -68,7 +69,7 @@ intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
intel_wakeref_t wakeref;
/*
- * See intel_pps_reset_all() why we need a power domain reference here.
+ * See vlv_pps_reset_all() why we need a power domain reference here.
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
mutex_lock(&display->pps.mutex);
@@ -85,7 +86,7 @@ intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
mutex_unlock(&display->pps.mutex);
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
- return 0;
+ return NULL;
}
static void
@@ -94,7 +95,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
struct intel_display *display = to_intel_display(intel_dp);
struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum pipe pipe = intel_dp->pps.pps_pipe;
+ enum pipe pipe = intel_dp->pps.vlv_pps_pipe;
bool pll_enabled, release_cl_override = false;
enum dpio_phy phy = vlv_pipe_to_phy(pipe);
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
@@ -120,7 +121,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
DP |= DP_PORT_WIDTH(1);
DP |= DP_LINK_TRAIN_PAT_1;
- if (IS_CHERRYVIEW(dev_priv))
+ if (display->platform.cherryview)
DP |= DP_PIPE_SEL_CHV(pipe);
else
DP |= DP_PIPE_SEL(pipe);
@@ -132,8 +133,8 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
* So enable temporarily it if it's not already enabled.
*/
if (!pll_enabled) {
- release_cl_override = IS_CHERRYVIEW(dev_priv) &&
- !chv_phy_powergate_ch(dev_priv, phy, ch, true);
+ release_cl_override = display->platform.cherryview &&
+ !chv_phy_powergate_ch(display, phy, ch, true);
if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
drm_err(display->drm,
@@ -162,7 +163,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
vlv_force_pll_off(dev_priv, pipe);
if (release_cl_override)
- chv_phy_powergate_ch(dev_priv, phy, ch, false);
+ chv_phy_powergate_ch(display, phy, ch, false);
}
}
@@ -180,18 +181,18 @@ static enum pipe vlv_find_free_pps(struct intel_display *display)
if (encoder->type == INTEL_OUTPUT_EDP) {
drm_WARN_ON(display->drm,
- intel_dp->pps.active_pipe != INVALID_PIPE &&
- intel_dp->pps.active_pipe !=
- intel_dp->pps.pps_pipe);
+ intel_dp->pps.vlv_active_pipe != INVALID_PIPE &&
+ intel_dp->pps.vlv_active_pipe !=
+ intel_dp->pps.vlv_pps_pipe);
- if (intel_dp->pps.pps_pipe != INVALID_PIPE)
- pipes &= ~(1 << intel_dp->pps.pps_pipe);
+ if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE)
+ pipes &= ~(1 << intel_dp->pps.vlv_pps_pipe);
} else {
drm_WARN_ON(display->drm,
- intel_dp->pps.pps_pipe != INVALID_PIPE);
+ intel_dp->pps.vlv_pps_pipe != INVALID_PIPE);
- if (intel_dp->pps.active_pipe != INVALID_PIPE)
- pipes &= ~(1 << intel_dp->pps.active_pipe);
+ if (intel_dp->pps.vlv_active_pipe != INVALID_PIPE)
+ pipes &= ~(1 << intel_dp->pps.vlv_active_pipe);
}
}
@@ -213,11 +214,11 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
/* We should never land here with regular DP ports */
drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp));
- drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
- intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
+ drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE &&
+ intel_dp->pps.vlv_active_pipe != intel_dp->pps.vlv_pps_pipe);
- if (intel_dp->pps.pps_pipe != INVALID_PIPE)
- return intel_dp->pps.pps_pipe;
+ if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE)
+ return intel_dp->pps.vlv_pps_pipe;
pipe = vlv_find_free_pps(display);
@@ -229,7 +230,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
pipe = PIPE_A;
vlv_steal_power_sequencer(display, pipe);
- intel_dp->pps.pps_pipe = pipe;
+ intel_dp->pps.vlv_pps_pipe = pipe;
drm_dbg_kms(display->drm,
"picked %s for [ENCODER:%d:%s]\n",
@@ -246,7 +247,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
*/
vlv_power_sequencer_kick(intel_dp);
- return intel_dp->pps.pps_pipe;
+ return intel_dp->pps.vlv_pps_pipe;
}
static int
@@ -260,10 +261,10 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
/* We should never land here with regular DP ports */
drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp));
- if (!intel_dp->pps.pps_reset)
+ if (!intel_dp->pps.bxt_pps_reset)
return pps_idx;
- intel_dp->pps.pps_reset = false;
+ intel_dp->pps.bxt_pps_reset = false;
/*
* Only the HW needs to be reprogrammed, the SW state is fixed and
@@ -325,19 +326,19 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
/* try to find a pipe with this port selected */
/* first pick one where the panel is on */
- intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port,
- pps_has_pp_on);
+ intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port,
+ pps_has_pp_on);
/* didn't find one? pick one where vdd is on */
- if (intel_dp->pps.pps_pipe == INVALID_PIPE)
- intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port,
- pps_has_vdd_on);
+ if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE)
+ intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port,
+ pps_has_vdd_on);
/* didn't find one? pick one with just the correct port */
- if (intel_dp->pps.pps_pipe == INVALID_PIPE)
- intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port,
- pps_any);
+ if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE)
+ intel_dp->pps.vlv_pps_pipe = vlv_initial_pps_pipe(display, port,
+ pps_any);
/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
- if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
+ if (intel_dp->pps.vlv_pps_pipe == INVALID_PIPE) {
drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] no initial power sequencer\n",
dig_port->base.base.base.id, dig_port->base.base.name);
@@ -354,10 +355,10 @@ static int intel_num_pps(struct intel_display *display)
{
struct drm_i915_private *i915 = to_i915(display->drm);
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ if (display->platform.valleyview || display->platform.cherryview)
return 2;
- if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ if (display->platform.geminilake || display->platform.broxton)
return 2;
if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
@@ -404,11 +405,10 @@ pps_initial_setup(struct intel_dp *intel_dp)
struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
lockdep_assert_held(&display->pps.mutex);
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
vlv_initial_power_sequencer_setup(intel_dp);
return true;
}
@@ -446,21 +446,17 @@ pps_initial_setup(struct intel_dp *intel_dp)
return intel_pps_is_valid(intel_dp);
}
-void intel_pps_reset_all(struct intel_display *display)
+void vlv_pps_reset_all(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
- if (drm_WARN_ON(display->drm, !IS_LP(dev_priv)))
- return;
-
if (!HAS_DISPLAY(display))
return;
/*
* We can't grab pps_mutex here due to deadlock with power_domain
* mutex when power_domain functions are called while holding pps_mutex.
- * That also means that in order to use pps_pipe the code needs to
+ * That also means that in order to use vlv_pps_pipe the code needs to
* hold both a power domain reference and pps_mutex, and the power domain
* reference get/put must be done while _not_ holding pps_mutex.
* pps_{lock,unlock}() do these steps in the correct order, so one
@@ -470,16 +466,27 @@ void intel_pps_reset_all(struct intel_display *display)
for_each_intel_dp(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- drm_WARN_ON(display->drm,
- intel_dp->pps.active_pipe != INVALID_PIPE);
+ drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE);
- if (encoder->type != INTEL_OUTPUT_EDP)
- continue;
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ intel_dp->pps.vlv_pps_pipe = INVALID_PIPE;
+ }
+}
+
+void bxt_pps_reset_all(struct intel_display *display)
+{
+ struct intel_encoder *encoder;
+
+ if (!HAS_DISPLAY(display))
+ return;
+
+ /* See vlv_pps_reset_all() for why we can't grab pps_mutex here. */
+
+ for_each_intel_dp(display->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (DISPLAY_VER(display) >= 9)
- intel_dp->pps.pps_reset = true;
- else
- intel_dp->pps.pps_pipe = INVALID_PIPE;
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ intel_dp->pps.bxt_pps_reset = true;
}
}
@@ -500,9 +507,9 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
memset(regs, 0, sizeof(*regs));
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (display->platform.valleyview || display->platform.cherryview)
pps_idx = vlv_power_sequencer_pipe(intel_dp);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ else if (display->platform.geminilake || display->platform.broxton)
pps_idx = bxt_power_sequencer_idx(intel_dp);
else
pps_idx = intel_dp->pps.pps_idx;
@@ -513,7 +520,7 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
regs->pp_off = PP_OFF_DELAYS(display, pps_idx);
/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
+ if (display->platform.geminilake || display->platform.broxton ||
INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
regs->pp_div = INVALID_MMIO_REG;
else
@@ -543,12 +550,11 @@ _pp_stat_reg(struct intel_dp *intel_dp)
static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
lockdep_assert_held(&display->pps.mutex);
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_dp->pps.pps_pipe == INVALID_PIPE)
+ if ((display->platform.valleyview || display->platform.cherryview) &&
+ intel_dp->pps.vlv_pps_pipe == INVALID_PIPE)
return false;
return (intel_de_read(display, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
@@ -557,12 +563,11 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
lockdep_assert_held(&display->pps.mutex);
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_dp->pps.pps_pipe == INVALID_PIPE)
+ if ((display->platform.valleyview || display->platform.cherryview) &&
+ intel_dp->pps.vlv_pps_pipe == INVALID_PIPE)
return false;
return intel_de_read(display, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
@@ -663,23 +668,24 @@ static void wait_panel_power_cycle(struct intel_dp *intel_dp)
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
ktime_t panel_power_on_time;
- s64 panel_power_off_duration;
-
- drm_dbg_kms(display->drm,
- "[ENCODER:%d:%s] %s wait for panel power cycle\n",
- dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(intel_dp));
+ s64 panel_power_off_duration, remaining;
/* take the difference of current time and panel power off time
- * and then make panel wait for t11_t12 if needed. */
+ * and then make panel wait for power_cycle if needed. */
panel_power_on_time = ktime_get_boottime();
panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
+ remaining = max(0, intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
+
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] %s wait for panel power cycle (%lld ms remaining)\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(intel_dp), remaining);
+
/* When we disable the VDD override bit last we have to do the manual
* wait. */
- if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
- wait_remaining_ms_from_jiffies(jiffies,
- intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
+ if (remaining)
+ wait_remaining_ms_from_jiffies(jiffies, remaining);
wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
}
@@ -792,7 +798,8 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
}
/*
- * Must be paired with intel_pps_off().
+ * Must be paired with intel_pps_vdd_off() or - to disable
+ * both VDD and panel power - intel_pps_off().
* Nested calls to these functions are not allowed since
* we drop the lock. Caller must use some higher level
* locking to prevent nested calls from other threads.
@@ -800,7 +807,6 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
void intel_pps_vdd_on(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref;
bool vdd;
@@ -810,10 +816,10 @@ void intel_pps_vdd_on(struct intel_dp *intel_dp)
vdd = false;
with_intel_pps_lock(intel_dp, wakeref)
vdd = intel_pps_vdd_on_unlocked(intel_dp);
- I915_STATE_WARN(i915, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name,
- pps_name(intel_dp));
+ INTEL_DISPLAY_STATE_WARN(display, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name,
+ pps_name(intel_dp));
}
static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
@@ -852,8 +858,10 @@ static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
intel_de_read(display, pp_stat_reg),
intel_de_read(display, pp_ctrl_reg));
- if ((pp & PANEL_POWER_ON) == 0)
+ if ((pp & PANEL_POWER_ON) == 0) {
intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+ intel_dp_invalidate_source_oui(intel_dp);
+ }
intel_display_power_put(dev_priv,
intel_aux_power_domain(dig_port),
@@ -920,18 +928,17 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
lockdep_assert_held(&display->pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
- I915_STATE_WARN(dev_priv, !intel_dp->pps.want_panel_vdd,
- "[ENCODER:%d:%s] %s VDD not forced on",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name,
- pps_name(intel_dp));
+ INTEL_DISPLAY_STATE_WARN(display, !intel_dp->pps.want_panel_vdd,
+ "[ENCODER:%d:%s] %s VDD not forced on",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name,
+ pps_name(intel_dp));
intel_dp->pps.want_panel_vdd = false;
@@ -941,10 +948,20 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
edp_panel_vdd_schedule_off(intel_dp);
}
+void intel_pps_vdd_off(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_dp_is_edp(intel_dp))
+ return;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_pps_vdd_off_unlocked(intel_dp, false);
+}
+
void intel_pps_on_unlocked(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -969,7 +986,7 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp)
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp = ilk_get_pp_control(intel_dp);
- if (IS_IRONLAKE(dev_priv)) {
+ if (display->platform.ironlake) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
intel_de_write(display, pp_ctrl_reg, pp);
@@ -985,7 +1002,7 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp)
0, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
pp |= PANEL_POWER_ON;
- if (!IS_IRONLAKE(dev_priv))
+ if (!display->platform.ironlake)
pp |= PANEL_POWER_RESET;
intel_de_write(display, pp_ctrl_reg, pp);
@@ -998,7 +1015,7 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp)
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
PCH_DPLSUNIT_CLOCK_GATE_DISABLE, 0);
- if (IS_IRONLAKE(dev_priv)) {
+ if (display->platform.ironlake) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
intel_de_write(display, pp_ctrl_reg, pp);
intel_de_posting_read(display, pp_ctrl_reg);
@@ -1054,6 +1071,8 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp)
wait_panel_off(intel_dp);
intel_dp->pps.panel_power_off_time = ktime_get_boottime();
+ intel_dp_invalidate_source_oui(intel_dp);
+
/* We got a reference when we enabled the VDD. */
intel_display_power_put(dev_priv,
intel_aux_power_domain(dig_port),
@@ -1139,7 +1158,7 @@ void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
return;
drm_dbg_kms(display->drm, "panel power control backlight %s\n",
- enable ? "enable" : "disable");
+ str_enable_disable(enable));
if (enable)
intel_pps_backlight_on(intel_dp);
@@ -1151,10 +1170,10 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- enum pipe pipe = intel_dp->pps.pps_pipe;
+ enum pipe pipe = intel_dp->pps.vlv_pps_pipe;
i915_reg_t pp_on_reg = PP_ON_DELAYS(display, pipe);
- drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+ drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE);
if (drm_WARN_ON(display->drm, pipe != PIPE_A && pipe != PIPE_B))
return;
@@ -1177,7 +1196,7 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
intel_de_write(display, pp_on_reg, 0);
intel_de_posting_read(display, pp_on_reg);
- intel_dp->pps.pps_pipe = INVALID_PIPE;
+ intel_dp->pps.vlv_pps_pipe = INVALID_PIPE;
}
static void vlv_steal_power_sequencer(struct intel_display *display,
@@ -1190,12 +1209,12 @@ static void vlv_steal_power_sequencer(struct intel_display *display,
for_each_intel_dp(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- drm_WARN(display->drm, intel_dp->pps.active_pipe == pipe,
+ drm_WARN(display->drm, intel_dp->pps.vlv_active_pipe == pipe,
"stealing PPS %c from active [ENCODER:%d:%s]\n",
pipe_name(pipe), encoder->base.base.id,
encoder->base.name);
- if (intel_dp->pps.pps_pipe != pipe)
+ if (intel_dp->pps.vlv_pps_pipe != pipe)
continue;
drm_dbg_kms(display->drm,
@@ -1208,8 +1227,59 @@ static void vlv_steal_power_sequencer(struct intel_display *display,
}
}
-void vlv_pps_init(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ enum pipe pipe;
+
+ if (g4x_dp_port_enabled(dev_priv, intel_dp->output_reg,
+ encoder->port, &pipe))
+ return pipe;
+
+ return INVALID_PIPE;
+}
+
+/* Call on all DP, not just eDP */
+void vlv_pps_pipe_init(struct intel_dp *intel_dp)
+{
+ intel_dp->pps.vlv_pps_pipe = INVALID_PIPE;
+ intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp);
+}
+
+/* Call on all DP, not just eDP */
+void vlv_pps_pipe_reset(struct intel_dp *intel_dp)
+{
+ intel_wakeref_t wakeref;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp);
+}
+
+enum pipe vlv_pps_backlight_initial_pipe(struct intel_dp *intel_dp)
+{
+ enum pipe pipe;
+
+ /*
+ * Figure out the current pipe for the initial backlight setup. If the
+ * current pipe isn't valid, try the PPS pipe, and if that fails just
+ * assume pipe A.
+ */
+ pipe = vlv_active_pipe(intel_dp);
+
+ if (pipe != PIPE_A && pipe != PIPE_B)
+ pipe = intel_dp->pps.vlv_pps_pipe;
+
+ if (pipe != PIPE_A && pipe != PIPE_B)
+ pipe = PIPE_A;
+
+ return pipe;
+}
+
+/* Call on all DP, not just eDP */
+void vlv_pps_port_enable_unlocked(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1217,10 +1287,10 @@ void vlv_pps_init(struct intel_encoder *encoder,
lockdep_assert_held(&display->pps.mutex);
- drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+ drm_WARN_ON(display->drm, intel_dp->pps.vlv_active_pipe != INVALID_PIPE);
- if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
- intel_dp->pps.pps_pipe != crtc->pipe) {
+ if (intel_dp->pps.vlv_pps_pipe != INVALID_PIPE &&
+ intel_dp->pps.vlv_pps_pipe != crtc->pipe) {
/*
* If another power sequencer was being used on this
* port previously make sure to turn off vdd there while
@@ -1235,13 +1305,13 @@ void vlv_pps_init(struct intel_encoder *encoder,
*/
vlv_steal_power_sequencer(display, crtc->pipe);
- intel_dp->pps.active_pipe = crtc->pipe;
+ intel_dp->pps.vlv_active_pipe = crtc->pipe;
if (!intel_dp_is_edp(intel_dp))
return;
/* now it's all ours */
- intel_dp->pps.pps_pipe = crtc->pipe;
+ intel_dp->pps.vlv_pps_pipe = crtc->pipe;
drm_dbg_kms(display->drm,
"initializing %s for [ENCODER:%d:%s]\n",
@@ -1253,6 +1323,18 @@ void vlv_pps_init(struct intel_encoder *encoder,
pps_init_registers(intel_dp, true);
}
+/* Call on all DP, not just eDP */
+void vlv_pps_port_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_wakeref_t wakeref;
+
+ with_intel_pps_lock(intel_dp, wakeref)
+ intel_dp->pps.vlv_active_pipe = INVALID_PIPE;
+}
+
static void pps_vdd_init(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -1306,10 +1388,10 @@ static void pps_init_timestamps(struct intel_dp *intel_dp)
}
static void
-intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
+intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct intel_pps_delays *seq)
{
struct intel_display *display = to_intel_display(intel_dp);
- u32 pp_on, pp_off, pp_ctl;
+ u32 pp_on, pp_off, pp_ctl, power_cycle_delay;
struct pps_registers regs;
intel_pps_get_registers(intel_dp, &regs);
@@ -1324,59 +1406,77 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
pp_off = intel_de_read(display, regs.pp_off);
/* Pull timing values out of registers */
- seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
- seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
- seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
- seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
+ seq->power_up = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
+ seq->backlight_on = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
+ seq->backlight_off = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
+ seq->power_down = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
if (i915_mmio_reg_valid(regs.pp_div)) {
u32 pp_div;
pp_div = intel_de_read(display, regs.pp_div);
- seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
+ power_cycle_delay = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div);
} else {
- seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
+ power_cycle_delay = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl);
}
+
+ /* hardware wants <delay>+1 in 100ms units */
+ seq->power_cycle = power_cycle_delay ? (power_cycle_delay - 1) * 1000 : 0;
}
static void
intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
- const struct edp_power_seq *seq)
+ const struct intel_pps_delays *seq)
{
struct intel_display *display = to_intel_display(intel_dp);
drm_dbg_kms(display->drm,
- "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- state_name,
- seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
+ "%s power_up %d backlight_on %d backlight_off %d power_down %d power_cycle %d\n",
+ state_name, seq->power_up, seq->backlight_on,
+ seq->backlight_off, seq->power_down, seq->power_cycle);
}
static void
intel_pps_verify_state(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct edp_power_seq hw;
- struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
+ struct intel_pps_delays hw;
+ struct intel_pps_delays *sw = &intel_dp->pps.pps_delays;
intel_pps_readout_hw_state(intel_dp, &hw);
- if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
- hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
+ if (hw.power_up != sw->power_up ||
+ hw.backlight_on != sw->backlight_on ||
+ hw.backlight_off != sw->backlight_off ||
+ hw.power_down != sw->power_down ||
+ hw.power_cycle != sw->power_cycle) {
drm_err(display->drm, "PPS state mismatch\n");
intel_pps_dump_state(intel_dp, "sw", sw);
intel_pps_dump_state(intel_dp, "hw", &hw);
}
}
-static bool pps_delays_valid(struct edp_power_seq *delays)
+static bool pps_delays_valid(struct intel_pps_delays *delays)
+{
+ return delays->power_up || delays->backlight_on || delays->backlight_off ||
+ delays->power_down || delays->power_cycle;
+}
+
+static int msecs_to_pps_units(int msecs)
{
- return delays->t1_t3 || delays->t8 || delays->t9 ||
- delays->t10 || delays->t11_t12;
+ /* PPS uses 100us units */
+ return msecs * 10;
+}
+
+static int pps_units_to_msecs(int val)
+{
+ /* PPS uses 100us units */
+ return DIV_ROUND_UP(val, 10);
}
static void pps_init_delays_bios(struct intel_dp *intel_dp,
- struct edp_power_seq *bios)
+ struct intel_pps_delays *bios)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -1391,7 +1491,7 @@ static void pps_init_delays_bios(struct intel_dp *intel_dp,
}
static void pps_init_delays_vbt(struct intel_dp *intel_dp,
- struct edp_power_seq *vbt)
+ struct intel_pps_delays *vbt)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
@@ -1407,39 +1507,28 @@ static void pps_init_delays_vbt(struct intel_dp *intel_dp,
* seems sufficient to avoid this problem.
*/
if (intel_has_quirk(display, QUIRK_INCREASE_T12_DELAY)) {
- vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
+ vbt->power_cycle = max_t(u16, vbt->power_cycle, msecs_to_pps_units(1300));
drm_dbg_kms(display->drm,
"Increasing T12 panel delay as per the quirk to %d\n",
- vbt->t11_t12);
+ vbt->power_cycle);
}
- /* T11_T12 delay is special and actually in units of 100ms, but zero
- * based in the hw (so we need to add 100 ms). But the sw vbt
- * table multiplies it with 1000 to make it in units of 100usec,
- * too. */
- vbt->t11_t12 += 100 * 10;
-
intel_pps_dump_state(intel_dp, "vbt", vbt);
}
static void pps_init_delays_spec(struct intel_dp *intel_dp,
- struct edp_power_seq *spec)
+ struct intel_pps_delays *spec)
{
struct intel_display *display = to_intel_display(intel_dp);
lockdep_assert_held(&display->pps.mutex);
- /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
- * our hw here, which are all in 100usec. */
- spec->t1_t3 = 210 * 10;
- spec->t8 = 50 * 10; /* no limit for t8, use t7 instead */
- spec->t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
- spec->t10 = 500 * 10;
- /* This one is special and actually in units of 100ms, but zero
- * based in the hw (so we need to add 100 ms). But the sw vbt
- * table multiplies it with 1000 to make it in units of 100usec,
- * too. */
- spec->t11_t12 = (510 + 100) * 10;
+ /* Upper limits from eDP 1.3 spec */
+ spec->power_up = msecs_to_pps_units(10 + 200); /* T1+T3 */
+ spec->backlight_on = msecs_to_pps_units(50); /* no limit for T8, use T7 instead */
+ spec->backlight_off = msecs_to_pps_units(50); /* no limit for T9, make it symmetric with T8 */
+ spec->power_down = msecs_to_pps_units(500); /* T10 */
+ spec->power_cycle = msecs_to_pps_units(10 + 500); /* T11+T12 */
intel_pps_dump_state(intel_dp, "spec", spec);
}
@@ -1447,7 +1536,7 @@ static void pps_init_delays_spec(struct intel_dp *intel_dp,
static void pps_init_delays(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct edp_power_seq cur, vbt, spec,
+ struct intel_pps_delays cur, vbt, spec,
*final = &intel_dp->pps.pps_delays;
lockdep_assert_held(&display->pps.mutex);
@@ -1465,20 +1554,18 @@ static void pps_init_delays(struct intel_dp *intel_dp)
#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
spec.field : \
max(cur.field, vbt.field))
- assign_final(t1_t3);
- assign_final(t8);
- assign_final(t9);
- assign_final(t10);
- assign_final(t11_t12);
+ assign_final(power_up);
+ assign_final(backlight_on);
+ assign_final(backlight_off);
+ assign_final(power_down);
+ assign_final(power_cycle);
#undef assign_final
-#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
- intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
- intel_dp->pps.backlight_on_delay = get_delay(t8);
- intel_dp->pps.backlight_off_delay = get_delay(t9);
- intel_dp->pps.panel_power_down_delay = get_delay(t10);
- intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
-#undef get_delay
+ intel_dp->pps.panel_power_up_delay = pps_units_to_msecs(final->power_up);
+ intel_dp->pps.backlight_on_delay = pps_units_to_msecs(final->backlight_on);
+ intel_dp->pps.backlight_off_delay = pps_units_to_msecs(final->backlight_off);
+ intel_dp->pps.panel_power_down_delay = pps_units_to_msecs(final->power_down);
+ intel_dp->pps.panel_power_cycle_delay = pps_units_to_msecs(final->power_cycle);
drm_dbg_kms(display->drm,
"panel power up delay %d, power down delay %d, power cycle delay %d\n",
@@ -1492,19 +1579,20 @@ static void pps_init_delays(struct intel_dp *intel_dp)
/*
* We override the HW backlight delays to 1 because we do manual waits
- * on them. For T8, even BSpec recommends doing it. For T9, if we
- * don't do this, we'll end up waiting for the backlight off delay
- * twice: once when we do the manual sleep, and once when we disable
- * the panel and wait for the PP_STATUS bit to become zero.
+ * on them. For backlight_on, even BSpec recommends doing it. For
+ * backlight_off, if we don't do this, we'll end up waiting for the
+ * backlight off delay twice: once when we do the manual sleep, and
+ * once when we disable the panel and wait for the PP_STATUS bit to
+ * become zero.
*/
- final->t8 = 1;
- final->t9 = 1;
+ final->backlight_on = 1;
+ final->backlight_off = 1;
/*
- * HW has only a 100msec granularity for t11_t12 so round it up
+ * HW has only a 100msec granularity for power_cycle so round it up
* accordingly.
*/
- final->t11_t12 = roundup(final->t11_t12, 100 * 10);
+ final->power_cycle = roundup(final->power_cycle, msecs_to_pps_units(100));
}
static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
@@ -1515,7 +1603,7 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
int div = DISPLAY_RUNTIME_INFO(display)->rawclk_freq / 1000;
struct pps_registers regs;
enum port port = dp_to_dig_port(intel_dp)->base.port;
- const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
+ const struct intel_pps_delays *seq = &intel_dp->pps.pps_delays;
lockdep_assert_held(&display->pps.mutex);
@@ -1548,14 +1636,14 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
intel_de_write(display, regs.pp_ctrl, pp);
}
- pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
- REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
- pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
- REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
+ pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->power_up) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->backlight_on);
+ pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->backlight_off) |
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->power_down);
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (display->platform.valleyview || display->platform.cherryview) {
port_sel = PANEL_PORT_SELECT_VLV(port);
} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
switch (port) {
@@ -1584,11 +1672,14 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
*/
if (i915_mmio_reg_valid(regs.pp_div))
intel_de_write(display, regs.pp_div,
- REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK,
+ (100 * div) / 2 - 1) |
+ REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
+ DIV_ROUND_UP(seq->power_cycle, 1000) + 1));
else
intel_de_rmw(display, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK,
REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK,
- DIV_ROUND_UP(seq->t11_t12, 1000)));
+ DIV_ROUND_UP(seq->power_cycle, 1000) + 1));
drm_dbg_kms(display->drm,
"panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
@@ -1602,7 +1693,6 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
void intel_pps_encoder_reset(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
@@ -1613,7 +1703,7 @@ void intel_pps_encoder_reset(struct intel_dp *intel_dp)
* Reinit the power sequencer also on the resume path, in case
* BIOS did something nasty with it.
*/
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ if (display->platform.valleyview || display->platform.cherryview)
vlv_initial_power_sequencer_setup(intel_dp);
pps_init_delays(intel_dp);
@@ -1649,11 +1739,10 @@ bool intel_pps_init(struct intel_dp *intel_dp)
static void pps_init_late(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_connector *connector = intel_dp->attached_connector;
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ if (display->platform.valleyview || display->platform.cherryview)
return;
if (intel_num_pps(display) < 2)
@@ -1711,9 +1800,9 @@ void intel_pps_setup(struct intel_display *display)
{
struct drm_i915_private *i915 = to_i915(display->drm);
- if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ if (HAS_PCH_SPLIT(i915) || display->platform.geminilake || display->platform.broxton)
display->pps.mmio_base = PCH_PPS_BASE;
- else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ else if (display->platform.valleyview || display->platform.cherryview)
display->pps.mmio_base = VLV_PPS_BASE;
else
display->pps.mmio_base = PPS_BASE;
@@ -1731,6 +1820,8 @@ static int intel_pps_show(struct seq_file *m, void *data)
intel_dp->pps.panel_power_up_delay);
seq_printf(m, "Panel power down delay: %d\n",
intel_dp->pps.panel_power_down_delay);
+ seq_printf(m, "Panel power cycle delay: %d\n",
+ intel_dp->pps.panel_power_cycle_delay);
seq_printf(m, "Backlight on delay: %d\n",
intel_dp->pps.backlight_on_delay);
seq_printf(m, "Backlight off delay: %d\n",
@@ -1785,7 +1876,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
MISSING_CASE(port_sel);
break;
}
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ } else if (display->platform.valleyview || display->platform.cherryview) {
/* presumably write lock depends on pipe, not port select */
pp_reg = PP_CONTROL(display, pipe);
panel_pipe = pipe;
@@ -1806,7 +1897,7 @@ void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
locked = false;
- I915_STATE_WARN(dev_priv, panel_pipe == pipe && locked,
- "panel assertion failure, pipe %c regs locked\n",
- pipe_name(pipe));
+ INTEL_DISPLAY_STATE_WARN(display, panel_pipe == pipe && locked,
+ "panel assertion failure, pipe %c regs locked\n",
+ pipe_name(pipe));
}
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
index 0c5da83a559e..c83007152f07 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.h
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -34,6 +34,7 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp);
void intel_pps_check_power_unlocked(struct intel_dp *intel_dp);
void intel_pps_vdd_on(struct intel_dp *intel_dp);
+void intel_pps_vdd_off(struct intel_dp *intel_dp);
void intel_pps_on(struct intel_dp *intel_dp);
void intel_pps_off(struct intel_dp *intel_dp);
void intel_pps_vdd_off_sync(struct intel_dp *intel_dp);
@@ -43,10 +44,16 @@ void intel_pps_wait_power_cycle(struct intel_dp *intel_dp);
bool intel_pps_init(struct intel_dp *intel_dp);
void intel_pps_init_late(struct intel_dp *intel_dp);
void intel_pps_encoder_reset(struct intel_dp *intel_dp);
-void intel_pps_reset_all(struct intel_display *display);
-void vlv_pps_init(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
+void vlv_pps_pipe_init(struct intel_dp *intel_dp);
+void vlv_pps_pipe_reset(struct intel_dp *intel_dp);
+enum pipe vlv_pps_backlight_initial_pipe(struct intel_dp *intel_dp);
+void vlv_pps_port_enable_unlocked(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void vlv_pps_port_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void vlv_pps_reset_all(struct intel_display *display);
+void bxt_pps_reset_all(struct intel_display *display);
void intel_pps_unlock_regs_wa(struct intel_display *display);
void intel_pps_setup(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 136a0d6ca970..0b021acb330f 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -21,6 +21,8 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/debugfs.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_debugfs.h>
@@ -33,6 +35,7 @@
#include "intel_cursor_regs.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_irq.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
@@ -230,7 +233,9 @@ static bool psr_global_enabled(struct intel_dp *intel_dp)
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
if (display->params.enable_psr == -1)
- return connector->panel.vbt.psr.enable;
+ return intel_dp_is_edp(intel_dp) ?
+ connector->panel.vbt.psr.enable :
+ true;
return display->params.enable_psr;
case I915_PSR_DEBUG_DISABLE:
return false;
@@ -762,7 +767,7 @@ static void _psr_enable_sink(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- u8 val = DP_PSR_ENABLE;
+ u8 val = 0;
if (crtc_state->has_sel_update) {
val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
@@ -782,7 +787,9 @@ static void _psr_enable_sink(struct intel_dp *intel_dp,
if (intel_dp->psr.entry_setup_frames > 0)
val |= DP_PSR_FRAME_CAPTURE;
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
+ val |= DP_PSR_ENABLE;
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
}
@@ -864,7 +871,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
val |= EDP_PSR_TP2_TP3_TIME_100us;
check_tp3_sel:
- if (intel_dp_source_supports_tps3(dev_priv) &&
+ if (intel_dp_source_supports_tps3(display) &&
drm_dp_tps3_supported(intel_dp->dpcd))
val |= EDP_PSR_TP_TP1_TP3;
else
@@ -1123,18 +1130,16 @@ static void psr2_program_idle_frames(struct intel_dp *intel_dp,
static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
psr2_program_idle_frames(intel_dp, 0);
- intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
+ intel_display_power_set_target_dc_state(display, DC_STATE_EN_DC3CO);
}
static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+ intel_display_power_set_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
}
@@ -1446,11 +1451,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (DISPLAY_VER(display) >= 12) {
+ if (DISPLAY_VER(display) >= 20) {
+ psr_max_h = crtc_hdisplay;
+ psr_max_v = crtc_vdisplay;
+ max_bpp = crtc_state->pipe_bpp;
+ } else if (IS_DISPLAY_VER(display, 12, 14)) {
psr_max_h = 5120;
psr_max_v = 3200;
max_bpp = 30;
- } else if (DISPLAY_VER(display) >= 10) {
+ } else if (IS_DISPLAY_VER(display, 10, 11)) {
psr_max_h = 4096;
psr_max_v = 2304;
max_bpp = 24;
@@ -1553,13 +1562,6 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
int entry_setup_frames;
- /*
- * Current PSR panels don't work reliably with VRR enabled
- * So if VRR is enabled, do not enable PSR.
- */
- if (crtc_state->vrr.enable)
- return false;
-
if (!CAN_PSR(intel_dp))
return false;
@@ -1599,6 +1601,10 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
/* Remaining checks are for eDP only */
+ if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A &&
+ to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_B)
+ return false;
+
/* 128b/132b Panel Replay is not supported on eDP */
if (intel_dp_is_uhbr(crtc_state)) {
drm_dbg_kms(display->drm,
@@ -1629,6 +1635,15 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
return true;
}
+static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ return (DISPLAY_VER(display) == 20 && intel_dp->psr.entry_setup_frames > 0 &&
+ !crtc_state->has_sel_update);
+}
+
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -1664,6 +1679,12 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
+ /*
+ * Currently PSR/PR doesn't work reliably with VRR enabled.
+ */
+ if (crtc_state->vrr.enable)
+ return;
+
crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
crtc_state,
conn_state);
@@ -1675,6 +1696,13 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
+
+ /* Wa_18037818876 */
+ if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
+ crtc_state->has_psr = false;
+ drm_dbg_kms(display->drm,
+ "PSR disabled to workaround PSR FSM hang issue\n");
+ }
}
void intel_psr_get_config(struct intel_encoder *encoder,
@@ -1758,23 +1786,6 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
intel_dp->psr.active = true;
}
-static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
-{
- switch (intel_dp->psr.pipe) {
- case PIPE_A:
- return LATENCY_REPORTING_REMOVED_PIPE_A;
- case PIPE_B:
- return LATENCY_REPORTING_REMOVED_PIPE_B;
- case PIPE_C:
- return LATENCY_REPORTING_REMOVED_PIPE_C;
- case PIPE_D:
- return LATENCY_REPORTING_REMOVED_PIPE_D;
- default:
- MISSING_CASE(intel_dp->psr.pipe);
- return 0;
- }
-}
-
/*
* Wa_16013835468
* Wa_14015648006
@@ -1783,23 +1794,25 @@ static void wm_optimization_wa(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- bool set_wa_bit = false;
+ enum pipe pipe = intel_dp->psr.pipe;
+ bool activate = false;
/* Wa_14015648006 */
- if (IS_DISPLAY_VER(display, 11, 14))
- set_wa_bit |= crtc_state->wm_level_disabled;
+ if (IS_DISPLAY_VER(display, 11, 14) && crtc_state->wm_level_disabled)
+ activate = true;
/* Wa_16013835468 */
- if (DISPLAY_VER(display) == 12)
- set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
- crtc_state->hw.adjusted_mode.crtc_vdisplay;
+ if (DISPLAY_VER(display) == 12 &&
+ crtc_state->hw.adjusted_mode.crtc_vblank_start !=
+ crtc_state->hw.adjusted_mode.crtc_vdisplay)
+ activate = true;
- if (set_wa_bit)
+ if (activate)
intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
- 0, wa_16013835468_bit_get(intel_dp));
+ 0, LATENCY_REPORTING_REMOVED(pipe));
else
intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
- wa_16013835468_bit_get(intel_dp), 0);
+ LATENCY_REPORTING_REMOVED(pipe), 0);
}
static void intel_psr_enable_source(struct intel_dp *intel_dp,
@@ -1893,7 +1906,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (intel_dp->psr.sel_update_enabled) {
if (DISPLAY_VER(display) == 9)
- intel_de_rmw(display, CHICKEN_TRANS(cpu_transcoder), 0,
+ intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 0,
PSR2_VSC_ENABLE_PROG_HEADER |
PSR2_ADD_VERTICAL_LINE_COUNT);
@@ -1903,14 +1916,14 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* cause issues if non-supported panels are used.
*/
if (!intel_dp->psr.panel_replay_enabled &&
- (IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0) ||
+ (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
IS_ALDERLAKE_P(dev_priv)))
- intel_de_rmw(display, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
0, ADLP_1_BASED_X_GRANULARITY);
/* Wa_16012604467:adlp,mtl[a0,b0] */
if (!intel_dp->psr.panel_replay_enabled &&
- IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0))
+ IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
intel_de_rmw(display,
MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
0,
@@ -1998,6 +2011,15 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.enabled = true;
intel_dp->psr.paused = false;
+ /*
+ * Link_ok is sticky and set here on PSR enable. We can assume link
+ * training is complete as we never continue to PSR enable with
+ * untrained link. Link_ok is kept as set until first short pulse
+ * interrupt. This is targeted to workaround panels stating bad link
+ * after PSR is enabled.
+ */
+ intel_dp->psr.link_ok = true;
+
intel_psr_activate(intel_dp);
}
@@ -2090,12 +2112,12 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
*/
if (DISPLAY_VER(display) >= 11)
intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
- wa_16013835468_bit_get(intel_dp), 0);
+ LATENCY_REPORTING_REMOVED(intel_dp->psr.pipe), 0);
if (intel_dp->psr.sel_update_enabled) {
/* Wa_16012604467:adlp,mtl[a0,b0] */
if (!intel_dp->psr.panel_replay_enabled &&
- IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0))
+ IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
intel_de_rmw(display,
MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
@@ -2114,7 +2136,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
intel_de_rmw(display,
- PORT_ALPM_CTL(display, cpu_transcoder),
+ PORT_ALPM_CTL(cpu_transcoder),
PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
}
@@ -2157,6 +2179,8 @@ void intel_psr_disable(struct intel_dp *intel_dp,
intel_psr_disable_locked(intel_dp);
+ intel_dp->psr.link_ok = false;
+
mutex_unlock(&intel_dp->psr.lock);
cancel_work_sync(&intel_dp->psr.work);
cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
@@ -2221,6 +2245,36 @@ unlock:
mutex_unlock(&psr->lock);
}
+/**
+ * intel_psr_needs_block_dc_vblank - Check if block dc entry is needed
+ * @crtc_state: CRTC status
+ *
+ * We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't
+ * prevent it in case of Panel Replay. Panel Replay switches main link off on
+ * DC entry. This means vblank interrupts are not fired and is a problem if
+ * user-space is polling for vblank events.
+ */
+bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_encoder *encoder;
+
+ for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) {
+ struct intel_dp *intel_dp;
+
+ if (!intel_encoder_is_dp(encoder))
+ continue;
+
+ intel_dp = enc_to_intel_dp(encoder);
+
+ if (intel_dp_is_edp(intel_dp) &&
+ CAN_PANEL_REPLAY(intel_dp))
+ return true;
+ }
+
+ return false;
+}
+
static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
@@ -2480,11 +2534,60 @@ static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *c
return true;
}
+/* Wa 14019834836 */
+static void intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_encoder *encoder;
+ int hactive_limit;
+
+ if (crtc_state->psr2_su_area.y1 != 0 ||
+ crtc_state->psr2_su_area.y2 != 0)
+ return;
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ hactive_limit = intel_dp_is_uhbr(crtc_state) ? 1230 : 546;
+ else
+ hactive_limit = intel_dp_is_uhbr(crtc_state) ? 615 : 273;
+
+ if (crtc_state->hw.adjusted_mode.hdisplay < hactive_limit)
+ return;
+
+ for_each_intel_encoder_mask_with_psr(display->drm, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (!intel_dp_is_edp(intel_dp) &&
+ intel_dp->psr.panel_replay_enabled &&
+ intel_dp->psr.sel_update_enabled) {
+ crtc_state->psr2_su_area.y2++;
+ return;
+ }
+ }
+}
+
+static void
+intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ /* Wa_14014971492 */
+ if (!crtc_state->has_panel_replay &&
+ ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
+ IS_ALDERLAKE_P(i915) || IS_TIGERLAKE(i915))) &&
+ crtc_state->splitter.enable)
+ crtc_state->psr2_su_area.y1 = 0;
+
+ /* Wa 14019834836 */
+ if (DISPLAY_VER(display) == 30)
+ intel_psr_apply_pr_link_on_su_wa(crtc_state);
+}
+
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane_state *new_plane_state, *old_plane_state;
struct intel_plane *plane;
@@ -2589,12 +2692,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (full_update)
goto skip_sel_fetch_set_loop;
- /* Wa_14014971492 */
- if (!crtc_state->has_panel_replay &&
- ((IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0) ||
- IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv))) &&
- crtc_state->splitter.enable)
- crtc_state->psr2_su_area.y1 = 0;
+ intel_psr_apply_su_area_workarounds(crtc_state);
ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
if (ret)
@@ -3235,11 +3333,10 @@ unlock:
void intel_psr_init(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- if (!(HAS_PSR(display) || HAS_DP20(dev_priv)))
+ if (!(HAS_PSR(display) || HAS_DP20(display)))
return;
/*
@@ -3257,7 +3354,7 @@ void intel_psr_init(struct intel_dp *intel_dp)
return;
}
- if ((HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp)) ||
+ if ((HAS_DP20(display) && !intel_dp_is_edp(intel_dp)) ||
DISPLAY_VER(display) >= 20)
intel_dp->psr.source_panel_replay_support = true;
@@ -3373,6 +3470,8 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
mutex_lock(&psr->lock);
+ psr->link_ok = false;
+
if (!psr->enabled)
goto exit;
@@ -3433,6 +3532,33 @@ bool intel_psr_enabled(struct intel_dp *intel_dp)
}
/**
+ * intel_psr_link_ok - return psr->link_ok
+ * @intel_dp: struct intel_dp
+ *
+ * We are seeing unexpected link re-trainings with some panels. This is caused
+ * by panel stating bad link status after PSR is enabled. Code checking link
+ * status can call this to ensure it can ignore bad link status stated by the
+ * panel I.e. if panel is stating bad link and intel_psr_link_ok is stating link
+ * is ok caller should rely on latter.
+ *
+ * Return value of link_ok
+ */
+bool intel_psr_link_ok(struct intel_dp *intel_dp)
+{
+ bool ret;
+
+ if ((!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) ||
+ !intel_dp_is_edp(intel_dp))
+ return false;
+
+ mutex_lock(&intel_dp->psr.lock);
+ ret = intel_dp->psr.link_ok;
+ mutex_unlock(&intel_dp->psr.lock);
+
+ return ret;
+}
+
+/**
* intel_psr_lock - grab PSR lock
* @crtc_state: the crtc state
*
@@ -3845,19 +3971,16 @@ DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
void intel_psr_connector_debugfs_add(struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct dentry *root = connector->base.debugfs_entry;
- /* TODO: Add support for MST connectors as well. */
- if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
- connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
- connector->mst_port)
+ if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
+ connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
return;
debugfs_create_file("i915_psr_sink_status", 0444, root,
connector, &i915_psr_sink_status_fops);
- if (HAS_PSR(display) || HAS_DP20(i915))
+ if (HAS_PSR(display) || HAS_DP20(display))
debugfs_create_file("i915_psr_status", 0444, root,
connector, &i915_psr_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 6eb5f15f674f..956be263c09e 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -58,6 +58,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state);
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
+bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state);
+bool intel_psr_link_ok(struct intel_dp *intel_dp);
void intel_psr_lock(const struct intel_crtc_state *crtc_state);
void intel_psr_unlock(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
index 642bb15fb547..9ad7611506e8 100644
--- a/drivers/gpu/drm/i915/display/intel_psr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
@@ -9,6 +9,7 @@
#include "intel_display_reg_defs.h"
#include "intel_dp_aux_regs.h"
+#define _TRANS_EXITLINE_A 0x60018
#define TRANS_EXITLINE(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_EXITLINE_A)
#define EXITLINE_ENABLE REG_BIT(31)
#define EXITLINE_MASK REG_GENMASK(12, 0)
@@ -295,9 +296,9 @@
#define _PORT_ALPM_CTL_A 0x16fa2c
#define _PORT_ALPM_CTL_B 0x16fc2c
-#define PORT_ALPM_CTL(dev_priv, port) _MMIO_PORT(port, _PORT_ALPM_CTL_A, _PORT_ALPM_CTL_B)
+#define PORT_ALPM_CTL(port) _MMIO_PORT(port, _PORT_ALPM_CTL_A, _PORT_ALPM_CTL_B)
#define PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE REG_BIT(31)
-#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK REG_GENMASK(23, 20)
+#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK REG_GENMASK(25, 20)
#define PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(val) REG_FIELD_PREP(PORT_ALPM_CTL_MAX_PHY_SWING_SETUP_MASK, val)
#define PORT_ALPM_CTL_MAX_PHY_SWING_HOLD_MASK REG_GENMASK(19, 16)
#define PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(val) REG_FIELD_PREP(PORT_ALPM_CTL_MAX_PHY_SWING_HOLD_MASK, val)
@@ -306,7 +307,7 @@
#define _PORT_ALPM_LFPS_CTL_A 0x16fa30
#define _PORT_ALPM_LFPS_CTL_B 0x16fc30
-#define PORT_ALPM_LFPS_CTL(dev_priv, port) _MMIO_PORT(port, _PORT_ALPM_LFPS_CTL_A, _PORT_ALPM_LFPS_CTL_B)
+#define PORT_ALPM_LFPS_CTL(port) _MMIO_PORT(port, _PORT_ALPM_LFPS_CTL_A, _PORT_ALPM_LFPS_CTL_B)
#define PORT_ALPM_LFPS_CTL_LFPS_START_POLARITY REG_BIT(31)
#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MASK REG_GENMASK(27, 24)
#define PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT_MIN 7
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 29b56d53a340..8b30e9fd936e 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -5,7 +5,7 @@
#include <linux/dmi.h>
-#include "i915_drv.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_quirks.h"
@@ -231,7 +231,7 @@ static struct intel_quirk intel_quirks[] = {
{ 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
};
-static struct intel_dpcd_quirk intel_dpcd_quirks[] = {
+static const struct intel_dpcd_quirk intel_dpcd_quirks[] = {
/* Dell Precision 5490 */
{
.device = 0x7d55,
@@ -272,7 +272,7 @@ void intel_init_dpcd_quirks(struct intel_dp *intel_dp,
int i;
for (i = 0; i < ARRAY_SIZE(intel_dpcd_quirks); i++) {
- struct intel_dpcd_quirk *q = &intel_dpcd_quirks[i];
+ const struct intel_dpcd_quirk *q = &intel_dpcd_quirks[i];
if (d->device == q->device &&
(d->subsystem_vendor == q->subsystem_vendor ||
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 7cc519b402e9..498b35ec4e0f 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -36,6 +36,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_eld.h>
+#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -2081,10 +2082,10 @@ intel_sdvo_get_edid(struct drm_connector *connector)
static const struct drm_edid *
intel_sdvo_get_analog_edid(struct drm_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct i2c_adapter *ddc;
- ddc = intel_gmbus_get_adapter(i915, i915->display.vbt.crt_ddc_pin);
+ ddc = intel_gmbus_get_adapter(display, display->vbt.crt_ddc_pin);
if (!ddc)
return NULL;
@@ -2135,6 +2136,7 @@ intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
@@ -2144,10 +2146,10 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (!intel_display_device_enabled(i915))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return connector->status;
if (!intel_sdvo_set_target_output(intel_sdvo,
@@ -2195,14 +2197,14 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
static int intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
int num_modes = 0;
const struct drm_edid *drm_edid;
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return drm_edid_connector_add_modes(connector);
/* set the bus switch and get the modes */
@@ -2296,6 +2298,7 @@ static const struct drm_display_mode sdvo_tv_modes[] = {
static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
struct intel_sdvo_connector *intel_sdvo_connector =
@@ -2309,7 +2312,7 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return 0;
/*
@@ -2637,6 +2640,7 @@ intel_sdvo_select_ddc_bus(struct intel_sdvo *sdvo,
static void
intel_sdvo_select_i2c_bus(struct intel_sdvo *sdvo)
{
+ struct intel_display *display = to_intel_display(&sdvo->base);
struct drm_i915_private *dev_priv = to_i915(sdvo->base.base.dev);
const struct sdvo_device_mapping *mapping;
u8 pin;
@@ -2647,7 +2651,7 @@ intel_sdvo_select_i2c_bus(struct intel_sdvo *sdvo)
mapping = &dev_priv->display.vbt.sdvo_mappings[1];
if (mapping->initialized &&
- intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin))
+ intel_gmbus_is_valid_pin(display, mapping->i2c_pin))
pin = mapping->i2c_pin;
else
pin = GMBUS_PIN_DPB;
@@ -2656,7 +2660,7 @@ intel_sdvo_select_i2c_bus(struct intel_sdvo *sdvo)
sdvo->base.base.base.id, sdvo->base.base.name,
pin, sdvo->target_addr);
- sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
+ sdvo->i2c = intel_gmbus_get_adapter(display, pin);
/*
* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index e6df1f92def5..41fe26dc200b 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -5,6 +5,7 @@
#include <linux/math.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
@@ -1997,6 +1998,7 @@ int intel_snps_phy_check_hdmi_link_rate(int clock)
void intel_mpllb_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -2019,11 +2021,11 @@ void intel_mpllb_state_verify(struct intel_atomic_state *state,
intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
#define MPLLB_CHECK(__name) \
- I915_STATE_WARN(i915, mpllb_sw_state->__name != mpllb_hw_state.__name, \
- "[CRTC:%d:%s] mismatch in MPLLB: %s (expected 0x%08x, found 0x%08x)", \
- crtc->base.base.id, crtc->base.name, \
- __stringify(__name), \
- mpllb_sw_state->__name, mpllb_hw_state.__name)
+ INTEL_DISPLAY_STATE_WARN(display, mpllb_sw_state->__name != mpllb_hw_state.__name, \
+ "[CRTC:%d:%s] mismatch in MPLLB: %s (expected 0x%08x, found 0x%08x)", \
+ crtc->base.base.id, crtc->base.name, \
+ __stringify(__name), \
+ mpllb_sw_state->__name, mpllb_hw_state.__name)
MPLLB_CHECK(mpllb_cp);
MPLLB_CHECK(mpllb_div);
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index e657b09ede99..e6fadcef58e0 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -378,7 +378,8 @@ static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state)
}
static void
-vlv_sprite_update_noarm(struct intel_plane *plane,
+vlv_sprite_update_noarm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -399,7 +400,8 @@ vlv_sprite_update_noarm(struct intel_plane *plane,
}
static void
-vlv_sprite_update_arm(struct intel_plane *plane,
+vlv_sprite_update_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -449,7 +451,8 @@ vlv_sprite_update_arm(struct intel_plane *plane,
}
static void
-vlv_sprite_disable_arm(struct intel_plane *plane,
+vlv_sprite_disable_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(plane->base.dev);
@@ -795,7 +798,8 @@ static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state)
}
static void
-ivb_sprite_update_noarm(struct intel_plane *plane,
+ivb_sprite_update_noarm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -826,7 +830,8 @@ ivb_sprite_update_noarm(struct intel_plane *plane,
}
static void
-ivb_sprite_update_arm(struct intel_plane *plane,
+ivb_sprite_update_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -874,7 +879,8 @@ ivb_sprite_update_arm(struct intel_plane *plane,
}
static void
-ivb_sprite_disable_arm(struct intel_plane *plane,
+ivb_sprite_disable_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(plane->base.dev);
@@ -1133,7 +1139,8 @@ static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state)
}
static void
-g4x_sprite_update_noarm(struct intel_plane *plane,
+g4x_sprite_update_noarm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -1162,7 +1169,8 @@ g4x_sprite_update_noarm(struct intel_plane *plane,
}
static void
-g4x_sprite_update_arm(struct intel_plane *plane,
+g4x_sprite_update_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -1206,7 +1214,8 @@ g4x_sprite_update_arm(struct intel_plane *plane,
}
static void
-g4x_sprite_disable_arm(struct intel_plane *plane,
+g4x_sprite_disable_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(plane->base.dev);
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index 044a032e41b9..531079979c05 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -8,9 +8,6 @@
#include <linux/types.h>
-struct drm_device;
-struct drm_display_mode;
-struct drm_file;
struct drm_i915_private;
struct intel_crtc_state;
struct intel_plane_state;
@@ -19,8 +16,6 @@ enum pipe;
#ifdef I915
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, int plane);
-int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
diff --git a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
index 4853c4806004..1d0b84b464c1 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
@@ -42,6 +42,7 @@ static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct intel_display *display = to_intel_display(dev);
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_intel_sprite_colorkey *set = data;
struct drm_plane *plane;
@@ -100,7 +101,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
*/
if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
struct intel_crtc *crtc =
- intel_crtc_for_pipe(dev_priv,
+ intel_crtc_for_pipe(display,
to_intel_plane(plane)->pipe);
plane_state = drm_atomic_get_plane_state(state,
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 6f2ee7dbc43b..13811244c82b 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -390,7 +390,7 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_tc_port *tc = to_tc_port(dig_port);
- bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+ bool lane_reversal = dig_port->lane_reversal;
u32 val;
if (DISPLAY_VER(i915) >= 14)
@@ -1005,7 +1005,7 @@ xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
drm_dbg_kms(&i915->drm,
"Port %s: timeout waiting for TCSS power to get %s\n",
- enabled ? "enabled" : "disabled",
+ str_enabled_disabled(enabled),
tc->port_name);
return false;
}
@@ -1013,21 +1013,52 @@ xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
return true;
}
+/*
+ * Gfx driver WA 14020908590 for PTL tcss_rxdetect_clkswb_req/ack
+ * handshake violation when pwwreq= 0->1 during TC7/10 entry
+ */
+static void xelpdp_tc_power_request_wa(struct intel_display *display, bool enable)
+{
+ /* check if mailbox is running busy */
+ if (intel_de_wait_for_clear(display, TCSS_DISP_MAILBOX_IN_CMD,
+ TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
+ drm_dbg_kms(display->drm,
+ "Timeout waiting for TCSS mailbox run/busy bit to clear\n");
+ return;
+ }
+
+ intel_de_write(display, TCSS_DISP_MAILBOX_IN_DATA, enable ? 1 : 0);
+ intel_de_write(display, TCSS_DISP_MAILBOX_IN_CMD,
+ TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY |
+ TCSS_DISP_MAILBOX_IN_CMD_DATA(0x1));
+
+ /* wait to clear mailbox running busy bit before continuing */
+ if (intel_de_wait_for_clear(display, TCSS_DISP_MAILBOX_IN_CMD,
+ TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
+ drm_dbg_kms(display->drm,
+ "Timeout after writing data to mailbox. Mailbox run/busy bit did not clear\n");
+ return;
+ }
+}
+
static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
{
- struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_display *display = to_intel_display(tc->dig_port);
enum port port = tc->dig_port->base.port;
- i915_reg_t reg = XELPDP_PORT_BUF_CTL1(i915, port);
+ i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
u32 val;
assert_tc_cold_blocked(tc);
- val = intel_de_read(i915, reg);
+ if (DISPLAY_VER(display) == 30)
+ xelpdp_tc_power_request_wa(display, enable);
+
+ val = intel_de_read(display, reg);
if (enable)
val |= XELPDP_TCSS_POWER_REQUEST;
else
val &= ~XELPDP_TCSS_POWER_REQUEST;
- intel_de_write(i915, reg, val);
+ intel_de_write(display, reg, val);
}
static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 581844d1db9a..6e311dcc1a61 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -33,6 +33,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -928,7 +929,7 @@ intel_enable_tv(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_display *display = to_intel_display(state);
+ struct intel_display *display = to_intel_display(encoder);
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc));
@@ -942,7 +943,7 @@ intel_disable_tv(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct intel_display *display = to_intel_display(state);
+ struct intel_display *display = to_intel_display(encoder);
intel_de_rmw(display, TV_CTL, TV_ENC_ENABLE, 0);
}
@@ -1092,7 +1093,6 @@ intel_tv_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_display *display = to_intel_display(encoder);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct drm_display_mode mode = {};
@@ -1166,7 +1166,7 @@ intel_tv_get_config(struct intel_encoder *encoder,
adjusted_mode->crtc_clock /= 2;
/* pixel counter doesn't work on i965gm TV output */
- if (IS_I965GM(dev_priv))
+ if (display->platform.i965gm)
pipe_config->mode_flags |=
I915_MODE_FLAG_USE_SCANLINE_COUNTER;
}
@@ -1196,7 +1196,6 @@ intel_tv_compute_config(struct intel_encoder *encoder,
struct intel_atomic_state *state =
to_intel_atomic_state(pipe_config->uapi.state);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_tv_connector_state *tv_conn_state =
to_intel_tv_connector_state(conn_state);
const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
@@ -1348,7 +1347,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
adjusted_mode->name[0] = '\0';
/* pixel counter doesn't work on i965gm TV output */
- if (IS_I965GM(dev_priv))
+ if (display->platform.i965gm)
pipe_config->mode_flags |=
I915_MODE_FLAG_USE_SCANLINE_COUNTER;
@@ -1524,7 +1523,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
/* Enable two fixes for the chips that need them. */
- if (IS_I915GM(dev_priv))
+ if (display->platform.i915gm)
tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
set_tv_mode_timings(display, tv_mode, burst_ena);
@@ -1626,7 +1625,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
* The TV sense state should be cleared to zero on cantiga platform. Otherwise
* the TV is misdetected. This is hardware requirement.
*/
- if (IS_GM45(dev_priv))
+ if (display->platform.gm45)
tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
@@ -1715,7 +1714,6 @@ intel_tv_detect(struct drm_connector *connector,
bool force)
{
struct intel_display *display = to_intel_display(connector->dev);
- struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
enum drm_connector_status status;
int type;
@@ -1723,10 +1721,10 @@ intel_tv_detect(struct drm_connector *connector,
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, connector->name, force);
- if (!intel_display_device_enabled(i915))
+ if (!intel_display_device_enabled(display))
return connector_status_disconnected;
- if (!intel_display_driver_check_access(i915))
+ if (!intel_display_driver_check_access(display))
return connector->status;
if (force) {
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 0b7f2134e441..a95fb3349eba 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -3,6 +3,8 @@
* Copyright © 2022-2023 Intel Corporation
*/
+#include <drm/drm_vblank.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_color.h"
@@ -193,7 +195,6 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
/*
* The scanline counter increments at the leading edge of hsync.
@@ -223,7 +224,7 @@ int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
*/
if (DISPLAY_VER(display) == 2)
return -1;
- else if (HAS_DDI(i915) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ else if (HAS_DDI(display) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
return 2;
else
return 1;
@@ -325,14 +326,13 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
const struct drm_display_mode *mode)
{
struct intel_display *display = to_intel_display(_crtc->dev);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc = to_intel_crtc(_crtc);
enum pipe pipe = crtc->pipe;
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
unsigned long irqflags;
bool use_scanline_counter = DISPLAY_VER(display) >= 5 ||
- IS_G4X(dev_priv) || DISPLAY_VER(display) == 2 ||
+ display->platform.g4x || DISPLAY_VER(display) == 2 ||
crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
if (drm_WARN_ON(display->drm, !mode->crtc_clock)) {
@@ -601,14 +601,15 @@ void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state,
struct intel_vblank_evade_ctx *evade)
{
+ struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct intel_crtc_state *crtc_state;
const struct drm_display_mode *adjusted_mode;
evade->crtc = crtc;
- evade->need_vlv_dsi_wa = (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) &&
+ evade->need_vlv_dsi_wa = (display->platform.valleyview ||
+ display->platform.cherryview) &&
intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 42022756bbd5..e9b809568cd4 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -1014,6 +1014,14 @@ struct bdb_tv_options {
* Block 27 - eDP VBT Block
*/
+struct edp_power_seq {
+ u16 t1_t3;
+ u16 t8;
+ u16 t9;
+ u16 t10;
+ u16 t11_t12;
+} __packed;
+
#define EDP_18BPP 0
#define EDP_24BPP 1
#define EDP_30BPP 2
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 2e849b015e74..b355c479eda3 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -14,6 +14,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_dsi.h"
#include "intel_qp_tables.h"
#include "intel_vdsc.h"
@@ -306,6 +307,12 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3;
+ if (vdsc_cfg->bits_per_component < 8) {
+ drm_dbg_kms(&dev_priv->drm, "DSC bpc requirements not met bpc: %d\n",
+ vdsc_cfg->bits_per_component);
+ return -EINVAL;
+ }
+
drm_dsc_set_rc_buf_thresh(vdsc_cfg);
/*
@@ -373,15 +380,15 @@ intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
static int intel_dsc_get_vdsc_per_pipe(const struct intel_crtc_state *crtc_state)
{
- return crtc_state->dsc.dsc_split ? 2 : 1;
+ return crtc_state->dsc.num_streams;
}
int intel_dsc_get_num_vdsc_instances(const struct intel_crtc_state *crtc_state)
{
int num_vdsc_instances = intel_dsc_get_vdsc_per_pipe(crtc_state);
+ int num_joined_pipes = intel_crtc_num_joined_pipes(crtc_state);
- if (crtc_state->joiner_pipes)
- num_vdsc_instances *= 2;
+ num_vdsc_instances *= num_joined_pipes;
return num_vdsc_instances;
}
@@ -396,8 +403,10 @@ static void intel_dsc_get_pps_reg(const struct intel_crtc_state *crtc_state, int
pipe_dsc = is_pipe_dsc(crtc, cpu_transcoder);
- if (dsc_reg_num >= 3)
+ if (dsc_reg_num >= 4)
MISSING_CASE(dsc_reg_num);
+ if (dsc_reg_num >= 3)
+ dsc_reg[2] = BMG_DSC2_PPS(pipe, pps);
if (dsc_reg_num >= 2)
dsc_reg[1] = pipe_dsc ? ICL_DSC1_PPS(pipe, pps) : DSCC_PPS(pps);
if (dsc_reg_num >= 1)
@@ -409,7 +418,7 @@ static void intel_dsc_pps_write(const struct intel_crtc_state *crtc_state,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- i915_reg_t dsc_reg[2];
+ i915_reg_t dsc_reg[3];
int i, vdsc_per_pipe, dsc_reg_num;
vdsc_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state);
@@ -742,7 +751,7 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
u32 dss_ctl1_val = 0;
if (crtc_state->joiner_pipes && !crtc_state->dsc.compression_enable) {
- if (intel_crtc_is_joiner_secondary(crtc_state))
+ if (intel_crtc_is_bigjoiner_secondary(crtc_state))
dss_ctl1_val |= UNCOMPRESSED_JOINER_SECONDARY;
else
dss_ctl1_val |= UNCOMPRESSED_JOINER_PRIMARY;
@@ -764,14 +773,27 @@ void intel_dsc_enable(const struct intel_crtc_state *crtc_state)
intel_dsc_pps_configure(crtc_state);
- dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE;
+ dss_ctl2_val |= VDSC0_ENABLE;
if (vdsc_instances_per_pipe > 1) {
- dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
+ dss_ctl2_val |= VDSC1_ENABLE;
dss_ctl1_val |= JOINER_ENABLE;
}
+
+ if (vdsc_instances_per_pipe > 2) {
+ dss_ctl2_val |= VDSC2_ENABLE;
+ dss_ctl2_val |= SMALL_JOINER_CONFIG_3_ENGINES;
+ }
+
if (crtc_state->joiner_pipes) {
+ if (intel_crtc_ultrajoiner_enable_needed(crtc_state))
+ dss_ctl1_val |= ULTRA_JOINER_ENABLE;
+
+ if (intel_crtc_is_ultrajoiner_primary(crtc_state))
+ dss_ctl1_val |= PRIMARY_ULTRA_JOINER_ENABLE;
+
dss_ctl1_val |= BIG_JOINER_ENABLE;
- if (!intel_crtc_is_joiner_secondary(crtc_state))
+
+ if (intel_crtc_is_bigjoiner_primary(crtc_state))
dss_ctl1_val |= PRIMARY_BIG_JOINER_ENABLE;
}
intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val);
@@ -796,7 +818,7 @@ static u32 intel_dsc_pps_read(struct intel_crtc_state *crtc_state, int pps,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- i915_reg_t dsc_reg[2];
+ i915_reg_t dsc_reg[3];
int i, vdsc_per_pipe, dsc_reg_num;
u32 val;
@@ -959,12 +981,16 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc, cpu_transcoder));
dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg(crtc, cpu_transcoder));
- crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE;
+ crtc_state->dsc.compression_enable = dss_ctl2 & VDSC0_ENABLE;
if (!crtc_state->dsc.compression_enable)
goto out;
- crtc_state->dsc.dsc_split = (dss_ctl2 & RIGHT_BRANCH_VDSC_ENABLE) &&
- (dss_ctl1 & JOINER_ENABLE);
+ if (dss_ctl1 & JOINER_ENABLE && dss_ctl2 & (VDSC2_ENABLE | SMALL_JOINER_CONFIG_3_ENGINES))
+ crtc_state->dsc.num_streams = 3;
+ else if (dss_ctl1 & JOINER_ENABLE && dss_ctl2 & VDSC1_ENABLE)
+ crtc_state->dsc.num_streams = 2;
+ else
+ crtc_state->dsc.num_streams = 1;
intel_dsc_get_pps_config(crtc_state);
out:
@@ -975,10 +1001,10 @@ static void intel_vdsc_dump_state(struct drm_printer *p, int indent,
const struct intel_crtc_state *crtc_state)
{
drm_printf_indent(p, indent,
- "dsc-dss: compressed-bpp:" FXP_Q4_FMT ", slice-count: %d, split: %s\n",
+ "dsc-dss: compressed-bpp:" FXP_Q4_FMT ", slice-count: %d, num_streams: %d\n",
FXP_Q4_ARGS(crtc_state->dsc.compressed_bpp_x16),
crtc_state->dsc.slice_count,
- str_yes_no(crtc_state->dsc.dsc_split));
+ crtc_state->dsc.num_streams);
}
void intel_vdsc_state_dump(struct drm_printer *p, int indent,
@@ -990,3 +1016,48 @@ void intel_vdsc_state_dump(struct drm_printer *p, int indent,
intel_vdsc_dump_state(p, indent, crtc_state);
drm_dsc_dump_config(p, indent, &crtc_state->dsc.config);
}
+
+int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_display *display = to_intel_display(crtc);
+ int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state);
+ int min_cdclk;
+
+ if (!crtc_state->dsc.compression_enable)
+ return 0;
+
+ /*
+ * When we decide to use only one VDSC engine, since
+ * each VDSC operates with 1 ppc throughput, pixel clock
+ * cannot be higher than the VDSC clock (cdclk)
+ * If there 2 VDSC engines, then pixel clock can't be higher than
+ * VDSC clock(cdclk) * 2 and so on.
+ */
+ min_cdclk = DIV_ROUND_UP(crtc_state->pixel_rate, num_vdsc_instances);
+
+ if (crtc_state->joiner_pipes) {
+ int pixel_clock = intel_dp_mode_to_fec_clock(crtc_state->hw.adjusted_mode.clock);
+
+ /*
+ * According to Bigjoiner bw check:
+ * compressed_bpp <= PPC * CDCLK * Big joiner Interface bits / Pixel clock
+ *
+ * We have already computed compressed_bpp, so now compute the min CDCLK that
+ * is required to support this compressed_bpp.
+ *
+ * => CDCLK >= compressed_bpp * Pixel clock / (PPC * Bigjoiner Interface bits)
+ *
+ * Since PPC = 2 with bigjoiner
+ * => CDCLK >= compressed_bpp * Pixel clock / 2 * Bigjoiner Interface bits
+ */
+ int bigjoiner_interface_bits = DISPLAY_VER(display) >= 14 ? 36 : 24;
+ int min_cdclk_bj =
+ (fxp_q4_to_int_roundup(crtc_state->dsc.compressed_bpp_x16) *
+ pixel_clock) / (2 * bigjoiner_interface_bits);
+
+ min_cdclk = max(min_cdclk, min_cdclk_bj);
+ }
+
+ return min_cdclk;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 290b2e9b3482..9e2812f99dd7 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -31,5 +31,6 @@ void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_vdsc_state_dump(struct drm_printer *p, int indent,
const struct intel_crtc_state *crtc_state);
+int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VDSC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
index f921ad67b587..2d478a84b07c 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
@@ -21,8 +21,10 @@
#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0
#define DSS_CTL2 _MMIO(0x67404)
-#define LEFT_BRANCH_VDSC_ENABLE (1 << 31)
-#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15)
+#define VDSC0_ENABLE REG_BIT(31)
+#define VDSC2_ENABLE REG_BIT(30)
+#define SMALL_JOINER_CONFIG_3_ENGINES REG_BIT(23)
+#define VDSC1_ENABLE REG_BIT(15)
#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
@@ -37,6 +39,8 @@
#define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25)
#define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0)
#define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1)
+#define ULTRA_JOINER_ENABLE REG_BIT(23)
+#define PRIMARY_ULTRA_JOINER_ENABLE REG_BIT(22)
#define UNCOMPRESSED_JOINER_PRIMARY (1 << 21)
#define UNCOMPRESSED_JOINER_SECONDARY (1 << 20)
@@ -55,8 +59,10 @@
#define DSCC_PPS(pps) _MMIO(_DSCC_PPS_0 + ((pps) < 12 ? (pps) : (pps) + 12) * 4)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
+#define _BMG_DSC2_PICTURE_PARAMETER_SET_0_PB 0x78970
#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570
+#define _BMG_DSC2_PICTURE_PARAMETER_SET_0_PC 0x78A70
#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
_ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \
_ICL_DSC0_PICTURE_PARAMETER_SET_0_PC)
@@ -69,8 +75,12 @@
#define _ICL_DSC1_PPS_0(pipe) _PICK_EVEN((pipe) - PIPE_B, \
_ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
+#define _BMG_DSC2_PPS_0(pipe) _PICK_EVEN((pipe) - PIPE_B, \
+ _BMG_DSC2_PICTURE_PARAMETER_SET_0_PB, \
+ _BMG_DSC2_PICTURE_PARAMETER_SET_0_PC)
#define ICL_DSC0_PPS(pipe, pps) _MMIO(_ICL_DSC0_PPS_0(pipe) + ((pps) * 4))
#define ICL_DSC1_PPS(pipe, pps) _MMIO(_ICL_DSC1_PPS_0(pipe) + ((pps) * 4))
+#define BMG_DSC2_PPS(pipe, pps) _MMIO(_BMG_DSC2_PPS_0(pipe) + ((pps) * 4))
/* PPS 0 */
#define DSC_PPS0_NATIVE_422_ENABLE REG_BIT(23)
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index 0b5916c15307..fd18dd07ae49 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -14,24 +14,24 @@
#include "intel_de.h"
#include "intel_vga.h"
-static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915)
+static i915_reg_t intel_vga_cntrl_reg(struct intel_display *display)
{
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ if (display->platform.valleyview || display->platform.cherryview)
return VLV_VGACNTRL;
- else if (DISPLAY_VER(i915) >= 5)
+ else if (DISPLAY_VER(display) >= 5)
return CPU_VGACNTRL;
else
return VGACNTRL;
}
/* Disable the VGA plane that we never use */
-void intel_vga_disable(struct drm_i915_private *dev_priv)
+void intel_vga_disable(struct intel_display *display)
{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+ i915_reg_t vga_reg = intel_vga_cntrl_reg(display);
u8 sr1;
- if (intel_de_read(dev_priv, vga_reg) & VGA_DISP_DISABLE)
+ if (intel_de_read(display, vga_reg) & VGA_DISP_DISABLE)
return;
/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
@@ -42,23 +42,24 @@ void intel_vga_disable(struct drm_i915_private *dev_priv)
vga_put(pdev, VGA_RSRC_LEGACY_IO);
udelay(300);
- intel_de_write(dev_priv, vga_reg, VGA_DISP_DISABLE);
- intel_de_posting_read(dev_priv, vga_reg);
+ intel_de_write(display, vga_reg, VGA_DISP_DISABLE);
+ intel_de_posting_read(display, vga_reg);
}
-void intel_vga_redisable_power_on(struct drm_i915_private *dev_priv)
+void intel_vga_redisable_power_on(struct intel_display *display)
{
- i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
+ i915_reg_t vga_reg = intel_vga_cntrl_reg(display);
- if (!(intel_de_read(dev_priv, vga_reg) & VGA_DISP_DISABLE)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!(intel_de_read(display, vga_reg) & VGA_DISP_DISABLE)) {
+ drm_dbg_kms(display->drm,
"Something enabled VGA plane, disabling it\n");
- intel_vga_disable(dev_priv);
+ intel_vga_disable(display);
}
}
-void intel_vga_redisable(struct drm_i915_private *i915)
+void intel_vga_redisable(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref;
/*
@@ -74,14 +75,14 @@ void intel_vga_redisable(struct drm_i915_private *i915)
if (!wakeref)
return;
- intel_vga_redisable_power_on(i915);
+ intel_vga_redisable_power_on(display);
intel_display_power_put(i915, POWER_DOMAIN_VGA, wakeref);
}
-void intel_vga_reset_io_mem(struct drm_i915_private *i915)
+void intel_vga_reset_io_mem(struct intel_display *display)
{
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
@@ -98,10 +99,10 @@ void intel_vga_reset_io_mem(struct drm_i915_private *i915)
vga_put(pdev, VGA_RSRC_LEGACY_IO);
}
-int intel_vga_register(struct drm_i915_private *i915)
+int intel_vga_register(struct intel_display *display)
{
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
int ret;
/*
@@ -119,9 +120,9 @@ int intel_vga_register(struct drm_i915_private *i915)
return 0;
}
-void intel_vga_unregister(struct drm_i915_private *i915)
+void intel_vga_unregister(struct intel_display *display)
{
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
vga_client_unregister(pdev);
}
diff --git a/drivers/gpu/drm/i915/display/intel_vga.h b/drivers/gpu/drm/i915/display/intel_vga.h
index ba5b55b917f0..824dfc32a199 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.h
+++ b/drivers/gpu/drm/i915/display/intel_vga.h
@@ -6,13 +6,13 @@
#ifndef __INTEL_VGA_H__
#define __INTEL_VGA_H__
-struct drm_i915_private;
+struct intel_display;
-void intel_vga_reset_io_mem(struct drm_i915_private *i915);
-void intel_vga_disable(struct drm_i915_private *i915);
-void intel_vga_redisable(struct drm_i915_private *i915);
-void intel_vga_redisable_power_on(struct drm_i915_private *i915);
-int intel_vga_register(struct drm_i915_private *i915);
-void intel_vga_unregister(struct drm_i915_private *i915);
+void intel_vga_reset_io_mem(struct intel_display *display);
+void intel_vga_disable(struct intel_display *display);
+void intel_vga_redisable(struct intel_display *display);
+void intel_vga_redisable_power_on(struct intel_display *display);
+int intel_vga_register(struct intel_display *display);
+void intel_vga_unregister(struct intel_display *display);
#endif /* __INTEL_VGA_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 9a51f5bac307..70088e355055 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -4,7 +4,6 @@
*
*/
-#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -56,6 +55,11 @@ bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh)
vrefresh <= info->monitor_range.max_vfreq;
}
+bool intel_vrr_possible(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->vrr.flipline;
+}
+
void
intel_vrr_check_modeset(struct intel_atomic_state *state)
{
@@ -239,11 +243,16 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
(crtc_state->hw.adjusted_mode.crtc_vtotal -
crtc_state->hw.adjusted_mode.vsync_end);
}
+}
+
+void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+
+ if (!intel_vrr_possible(crtc_state))
+ return;
- /*
- * For XE_LPD+, we use guardband and pipeline override
- * is deprecated.
- */
if (DISPLAY_VER(display) >= 13) {
crtc_state->vrr.guardband =
crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vblank_start;
@@ -278,10 +287,10 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
* ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR
*/
if (IS_DISPLAY_VER(display, 12, 13))
- intel_de_rmw(display, CHICKEN_TRANS(cpu_transcoder),
+ intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
0, PIPE_VBLANK_WITH_DELAY);
- if (!crtc_state->vrr.flipline) {
+ if (!intel_vrr_possible(crtc_state)) {
intel_de_write(display,
TRANS_VRR_CTL(display, cpu_transcoder), 0);
return;
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
index 89937858200d..b3b45c675020 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -15,9 +15,11 @@ struct intel_crtc_state;
bool intel_vrr_is_capable(struct intel_connector *connector);
bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh);
+bool intel_vrr_possible(const struct intel_crtc_state *crtc_state);
void intel_vrr_check_modeset(struct intel_atomic_state *state);
void intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
+void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state);
void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
void intel_vrr_enable(const struct intel_crtc_state *crtc_state);
void intel_vrr_send_push(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c
index 82c4933ad507..d7dc49aecd27 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.c
+++ b/drivers/gpu/drm/i915/display/intel_wm.c
@@ -3,6 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/debugfs.h>
+
#include "i915_drv.h"
#include "i9xx_wm.h"
#include "intel_display_types.h"
@@ -48,29 +50,15 @@ void intel_update_watermarks(struct drm_i915_private *i915)
i915->display.funcs.wm->update_wm(i915);
}
-int intel_compute_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+int intel_wm_compute(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
-
- if (i915->display.funcs.wm->compute_pipe_wm)
- return i915->display.funcs.wm->compute_pipe_wm(state, crtc);
-
- return 0;
-}
-
-int intel_compute_intermediate_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
-
- if (!i915->display.funcs.wm->compute_intermediate_wm)
- return 0;
+ struct intel_display *display = to_intel_display(state);
- if (drm_WARN_ON(&i915->drm, !i915->display.funcs.wm->compute_pipe_wm))
+ if (!display->funcs.wm->compute_watermarks)
return 0;
- return i915->display.funcs.wm->compute_intermediate_wm(state, crtc);
+ return display->funcs.wm->compute_watermarks(state, crtc);
}
bool intel_initial_watermarks(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_wm.h b/drivers/gpu/drm/i915/display/intel_wm.h
index 48429ac140d2..e97cdca89a5c 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.h
+++ b/drivers/gpu/drm/i915/display/intel_wm.h
@@ -15,10 +15,8 @@ struct intel_crtc_state;
struct intel_plane_state;
void intel_update_watermarks(struct drm_i915_private *i915);
-int intel_compute_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-int intel_compute_intermediate_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
+int intel_wm_compute(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
bool intel_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_atomic_update_watermarks(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index baa601d27815..ae21fce534dc 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -3,6 +3,7 @@
* Copyright © 2020 Intel Corporation
*/
+#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -105,10 +106,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
const struct drm_format_info *format,
u64 modifier, bool need_scaler)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
@@ -130,9 +131,9 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
* Once NV12 is enabled, handle it here while allocating scaler
* for NV12.
*/
- if (DISPLAY_VER(dev_priv) >= 9 && crtc_state->hw.enable &&
+ if (DISPLAY_VER(display) >= 9 && crtc_state->hw.enable &&
need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Pipe/Plane scaling not supported with IF-ID mode\n");
return -EINVAL;
}
@@ -150,9 +151,9 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
if (force_detach || !need_scaler) {
if (*scaler_id >= 0) {
scaler_state->scaler_users &= ~(1 << scaler_user);
- scaler_state->scalers[*scaler_id].in_use = 0;
+ scaler_state->scalers[*scaler_id].in_use = false;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"scaler_user index %u.%u: "
"Staged freeing scaler id %d scaler_users = 0x%x\n",
crtc->pipe, scaler_user, *scaler_id,
@@ -164,7 +165,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Planar YUV: src dimensions not met\n");
return -EINVAL;
}
@@ -174,17 +175,17 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
min_dst_w = SKL_MIN_DST_W;
min_dst_h = SKL_MIN_DST_H;
- if (DISPLAY_VER(dev_priv) < 11) {
+ if (DISPLAY_VER(display) < 11) {
max_src_w = SKL_MAX_SRC_W;
max_src_h = SKL_MAX_SRC_H;
max_dst_w = SKL_MAX_DST_W;
max_dst_h = SKL_MAX_DST_H;
- } else if (DISPLAY_VER(dev_priv) < 12) {
+ } else if (DISPLAY_VER(display) < 12) {
max_src_w = ICL_MAX_SRC_W;
max_src_h = ICL_MAX_SRC_H;
max_dst_w = ICL_MAX_DST_W;
max_dst_h = ICL_MAX_DST_H;
- } else if (DISPLAY_VER(dev_priv) < 14) {
+ } else if (DISPLAY_VER(display) < 14) {
max_src_w = TGL_MAX_SRC_W;
max_src_h = TGL_MAX_SRC_H;
max_dst_w = TGL_MAX_DST_W;
@@ -201,7 +202,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
dst_w < min_dst_w || dst_h < min_dst_h ||
src_w > max_src_w || src_h > max_src_h ||
dst_w > max_dst_w || dst_h > max_dst_h) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"scaler_user index %u.%u: src %ux%u dst %ux%u "
"size is out of scaler range\n",
crtc->pipe, scaler_user, src_w, src_h,
@@ -218,7 +219,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
* now.
*/
if (pipe_src_w > max_dst_w || pipe_src_h > max_dst_h) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"scaler_user index %u.%u: pipe src size %ux%u "
"is out of scaler range\n",
crtc->pipe, scaler_user, pipe_src_w, pipe_src_h);
@@ -227,7 +228,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
/* mark this plane as a scaler user in crtc_state */
scaler_state->scaler_users |= (1 << scaler_user);
- drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
+ drm_dbg_kms(display->drm, "scaler_user index %u.%u: "
"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
scaler_state->scaler_users);
@@ -268,110 +269,60 @@ int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_plane *intel_plane =
- to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct drm_framebuffer *fb = plane_state->hw.fb;
- int ret;
bool force_detach = !fb || !plane_state->uapi.visible;
bool need_scaler = false;
/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
- if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
+ if (!icl_is_hdr_plane(dev_priv, plane->id) &&
fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
need_scaler = true;
- ret = skl_update_scaler(crtc_state, force_detach,
- drm_plane_index(&intel_plane->base),
- &plane_state->scaler_id,
- drm_rect_width(&plane_state->uapi.src) >> 16,
- drm_rect_height(&plane_state->uapi.src) >> 16,
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst),
- fb ? fb->format : NULL,
- fb ? fb->modifier : 0,
- need_scaler);
-
- if (ret || plane_state->scaler_id < 0)
- return ret;
-
- /* check colorkey */
- if (plane_state->ckey.flags) {
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] scaling with color key not allowed",
- intel_plane->base.base.id,
- intel_plane->base.name);
- return -EINVAL;
- }
+ return skl_update_scaler(crtc_state, force_detach,
+ drm_plane_index(&plane->base),
+ &plane_state->scaler_id,
+ drm_rect_width(&plane_state->uapi.src) >> 16,
+ drm_rect_height(&plane_state->uapi.src) >> 16,
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst),
+ fb ? fb->format : NULL,
+ fb ? fb->modifier : 0,
+ need_scaler);
+}
- /* Check src format */
- switch (fb->format->format) {
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_XYUV8888:
- case DRM_FORMAT_P010:
- case DRM_FORMAT_P012:
- case DRM_FORMAT_P016:
- case DRM_FORMAT_Y210:
- case DRM_FORMAT_Y212:
- case DRM_FORMAT_Y216:
- case DRM_FORMAT_XVYU2101010:
- case DRM_FORMAT_XVYU12_16161616:
- case DRM_FORMAT_XVYU16161616:
- break;
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_ARGB16161616F:
- if (DISPLAY_VER(dev_priv) >= 11)
- break;
- fallthrough;
- default:
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
- intel_plane->base.base.id, intel_plane->base.name,
- fb->base.id, fb->format->format);
- return -EINVAL;
+static int intel_allocate_scaler(struct intel_crtc_scaler_state *scaler_state,
+ struct intel_crtc *crtc)
+{
+ int i;
+
+ for (i = 0; i < crtc->num_scalers; i++) {
+ if (scaler_state->scalers[i].in_use)
+ continue;
+
+ scaler_state->scalers[i].in_use = true;
+
+ return i;
}
- return 0;
+ return -1;
}
static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
- int num_scalers_need, struct intel_crtc *intel_crtc,
+ int num_scalers_need, struct intel_crtc *crtc,
const char *name, int idx,
struct intel_plane_state *plane_state,
int *scaler_id)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- int j;
+ struct intel_display *display = to_intel_display(crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 mode;
- if (*scaler_id < 0) {
- /* find a free scaler */
- for (j = 0; j < intel_crtc->num_scalers; j++) {
- if (scaler_state->scalers[j].in_use)
- continue;
+ if (*scaler_id < 0)
+ *scaler_id = intel_allocate_scaler(scaler_state, crtc);
- *scaler_id = j;
- scaler_state->scalers[*scaler_id].in_use = 1;
- break;
- }
- }
-
- if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
+ if (drm_WARN(display->drm, *scaler_id < 0,
"Cannot find scaler for %s:%d\n", name, idx))
return -EINVAL;
@@ -381,7 +332,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
plane_state->hw.fb->format->num_planes > 1) {
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- if (DISPLAY_VER(dev_priv) == 9) {
+ if (DISPLAY_VER(display) == 9) {
mode = SKL_PS_SCALER_MODE_NV12;
} else if (icl_is_hdr_plane(dev_priv, plane->id)) {
/*
@@ -399,17 +350,17 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
if (linked)
mode |= PS_BINDING_Y_PLANE(linked->id);
}
- } else if (DISPLAY_VER(dev_priv) >= 10) {
+ } else if (DISPLAY_VER(display) >= 10) {
mode = PS_SCALER_MODE_NORMAL;
- } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
+ } else if (num_scalers_need == 1 && crtc->num_scalers > 1) {
/*
* when only 1 scaler is in use on a pipe with 2 scalers
* scaler 0 operates in high quality (HQ) mode.
* In this case use scaler 0 to take advantage of HQ mode
*/
- scaler_state->scalers[*scaler_id].in_use = 0;
+ scaler_state->scalers[*scaler_id].in_use = false;
*scaler_id = 0;
- scaler_state->scalers[0].in_use = 1;
+ scaler_state->scalers[0].in_use = true;
mode = SKL_PS_SCALER_MODE_HQ;
} else {
mode = SKL_PS_SCALER_MODE_DYN;
@@ -433,7 +384,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
* unnecessarily.
*/
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
/*
* On versions 14 and up, only the first
* scaler supports a vertical scaling factor
@@ -446,7 +397,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
else
max_vscale = 0x10000;
- } else if (DISPLAY_VER(dev_priv) >= 10 ||
+ } else if (DISPLAY_VER(display) >= 10 ||
!intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
max_hscale = 0x30000 - 1;
max_vscale = 0x30000 - 1;
@@ -465,7 +416,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
vscale = drm_rect_calc_vscale(src, dst, 1, max_vscale);
if (hscale < 0 || vscale < 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Scaler %d doesn't support required plane scaling\n",
*scaler_id);
drm_rect_debug_print("src: ", src, true);
@@ -475,18 +426,66 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
}
}
- drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
- intel_crtc->pipe, *scaler_id, name, idx);
+ drm_dbg_kms(display->drm, "Attached scaler id %u.%u to %s:%d\n",
+ crtc->pipe, *scaler_id, name, idx);
scaler_state->scalers[*scaler_id].mode = mode;
return 0;
}
+static int setup_crtc_scaler(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+
+ return intel_atomic_setup_scaler(scaler_state,
+ hweight32(scaler_state->scaler_users),
+ crtc, "CRTC", crtc->base.base.id,
+ NULL, &scaler_state->scaler_id);
+}
+
+static int setup_plane_scaler(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_plane *plane)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ struct intel_plane_state *plane_state;
+
+ /* plane on different crtc cannot be a scaler user of this crtc */
+ if (drm_WARN_ON(display->drm, plane->pipe != crtc->pipe))
+ return 0;
+
+ plane_state = intel_atomic_get_new_plane_state(state, plane);
+
+ /*
+ * GLK+ scalers don't have a HQ mode so it
+ * isn't necessary to change between HQ and dyn mode
+ * on those platforms.
+ */
+ if (!plane_state && DISPLAY_VER(display) >= 10)
+ return 0;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ return intel_atomic_setup_scaler(scaler_state,
+ hweight32(scaler_state->scaler_users),
+ crtc, "PLANE", plane->base.base.id,
+ plane_state, &plane_state->scaler_id);
+}
+
/**
* intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
- * @dev_priv: i915 device
- * @intel_crtc: intel crtc
- * @crtc_state: incoming crtc_state to validate and setup scalers
+ * @state: atomic state
+ * @crtc: crtc
*
* This function sets up scalers based on staged scaling requests for
* a @crtc and its planes. It is called from crtc level check path. If request
@@ -499,16 +498,14 @@ static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_stat
* 0 - scalers were setup successfully
* error code - otherwise
*/
-int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state)
+int intel_atomic_setup_scalers(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_plane *plane = NULL;
- struct intel_plane *intel_plane;
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
- struct drm_atomic_state *drm_state = crtc_state->uapi.state;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
int num_scalers_need;
int i;
@@ -527,80 +524,33 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
*/
/* fail if required scalers > available scalers */
- if (num_scalers_need > intel_crtc->num_scalers) {
- drm_dbg_kms(&dev_priv->drm,
+ if (num_scalers_need > crtc->num_scalers) {
+ drm_dbg_kms(display->drm,
"Too many scaling requests %d > %d\n",
- num_scalers_need, intel_crtc->num_scalers);
+ num_scalers_need, crtc->num_scalers);
return -EINVAL;
}
/* walkthrough scaler_users bits and start assigning scalers */
for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
- struct intel_plane_state *plane_state = NULL;
- int *scaler_id;
- const char *name;
- int idx, ret;
+ int ret;
/* skip if scaler not required */
if (!(scaler_state->scaler_users & (1 << i)))
continue;
if (i == SKL_CRTC_INDEX) {
- name = "CRTC";
- idx = intel_crtc->base.base.id;
-
- /* panel fitter case: assign as a crtc scaler */
- scaler_id = &scaler_state->scaler_id;
+ ret = setup_crtc_scaler(state, crtc);
+ if (ret)
+ return ret;
} else {
- name = "PLANE";
+ struct intel_plane *plane =
+ to_intel_plane(drm_plane_from_index(display->drm, i));
- /* plane scaler case: assign as a plane scaler */
- /* find the plane that set the bit as scaler_user */
- plane = drm_state->planes[i].ptr;
-
- /*
- * to enable/disable hq mode, add planes that are using scaler
- * into this transaction
- */
- if (!plane) {
- struct drm_plane_state *state;
-
- /*
- * GLK+ scalers don't have a HQ mode so it
- * isn't necessary to change between HQ and dyn mode
- * on those platforms.
- */
- if (DISPLAY_VER(dev_priv) >= 10)
- continue;
-
- plane = drm_plane_from_index(&dev_priv->drm, i);
- state = drm_atomic_get_plane_state(drm_state, plane);
- if (IS_ERR(state)) {
- drm_dbg_kms(&dev_priv->drm,
- "Failed to add [PLANE:%d] to drm_state\n",
- plane->base.id);
- return PTR_ERR(state);
- }
- }
-
- intel_plane = to_intel_plane(plane);
- idx = plane->base.id;
-
- /* plane on different crtc cannot be a scaler user of this crtc */
- if (drm_WARN_ON(&dev_priv->drm,
- intel_plane->pipe != intel_crtc->pipe))
- continue;
-
- plane_state = intel_atomic_get_new_plane_state(intel_state,
- intel_plane);
- scaler_id = &plane_state->scaler_id;
+ ret = setup_plane_scaler(state, crtc, plane);
+ if (ret)
+ return ret;
}
-
- ret = intel_atomic_setup_scaler(scaler_state, num_scalers_need,
- intel_crtc, name, idx,
- plane_state, scaler_id);
- if (ret < 0)
- return ret;
}
return 0;
@@ -653,12 +603,12 @@ static u16 glk_nearest_filter_coef(int t)
*
*/
-static void glk_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
+static void glk_program_nearest_filter_coefs(struct intel_display *display,
enum pipe pipe, int id, int set)
{
int i;
- intel_de_write_fw(dev_priv, GLK_PS_COEF_INDEX_SET(pipe, id, set),
+ intel_de_write_fw(display, GLK_PS_COEF_INDEX_SET(pipe, id, set),
PS_COEF_INDEX_AUTO_INC);
for (i = 0; i < 17 * 7; i += 2) {
@@ -671,11 +621,11 @@ static void glk_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
t = glk_coef_tap(i + 1);
tmp |= glk_nearest_filter_coef(t) << 16;
- intel_de_write_fw(dev_priv, GLK_PS_COEF_DATA_SET(pipe, id, set),
+ intel_de_write_fw(display, GLK_PS_COEF_DATA_SET(pipe, id, set),
tmp);
}
- intel_de_write_fw(dev_priv, GLK_PS_COEF_INDEX_SET(pipe, id, set), 0);
+ intel_de_write_fw(display, GLK_PS_COEF_INDEX_SET(pipe, id, set), 0);
}
static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
@@ -691,14 +641,14 @@ static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
return PS_FILTER_MEDIUM;
}
-static void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe pipe,
+static void skl_scaler_setup_filter(struct intel_display *display, enum pipe pipe,
int id, int set, enum drm_scaling_filter filter)
{
switch (filter) {
case DRM_SCALING_FILTER_DEFAULT:
break;
case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
- glk_program_nearest_filter_coefs(dev_priv, pipe, id, set);
+ glk_program_nearest_filter_coefs(display, pipe, id, set);
break;
default:
MISSING_CASE(filter);
@@ -707,8 +657,8 @@ static void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe
void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
@@ -726,7 +676,7 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
if (!crtc_state->pch_pfit.enabled)
return;
- if (drm_WARN_ON(&dev_priv->drm,
+ if (drm_WARN_ON(display->drm,
crtc_state->scaler_state.scaler_id < 0))
return;
@@ -745,18 +695,18 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
ps_ctrl = PS_SCALER_EN | PS_BINDING_PIPE | scaler_state->scalers[id].mode |
skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
- skl_scaler_setup_filter(dev_priv, pipe, id, 0,
+ skl_scaler_setup_filter(display, pipe, id, 0,
crtc_state->hw.scaling_filter);
- intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), ps_ctrl);
+ intel_de_write_fw(display, SKL_PS_CTRL(pipe, id), ps_ctrl);
- intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
+ intel_de_write_fw(display, SKL_PS_VPHASE(pipe, id),
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
+ intel_de_write_fw(display, SKL_PS_HPHASE(pipe, id),
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
+ intel_de_write_fw(display, SKL_PS_WIN_POS(pipe, id),
PS_WIN_XPOS(x) | PS_WIN_YPOS(y));
- intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
+ intel_de_write_fw(display, SKL_PS_WIN_SZ(pipe, id),
PS_WIN_XSIZE(width) | PS_WIN_YSIZE(height));
}
@@ -765,6 +715,7 @@ skl_program_plane_scaler(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
@@ -808,28 +759,27 @@ skl_program_plane_scaler(struct intel_plane *plane,
ps_ctrl = PS_SCALER_EN | PS_BINDING_PLANE(plane->id) | scaler->mode |
skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0);
- skl_scaler_setup_filter(dev_priv, pipe, scaler_id, 0,
+ skl_scaler_setup_filter(display, pipe, scaler_id, 0,
plane_state->hw.scaling_filter);
- intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
- intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id),
+ intel_de_write_fw(display, SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
+ intel_de_write_fw(display, SKL_PS_VPHASE(pipe, scaler_id),
PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id),
+ intel_de_write_fw(display, SKL_PS_HPHASE(pipe, scaler_id),
PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id),
+ intel_de_write_fw(display, SKL_PS_WIN_POS(pipe, scaler_id),
PS_WIN_XPOS(crtc_x) | PS_WIN_YPOS(crtc_y));
- intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id),
+ intel_de_write_fw(display, SKL_PS_WIN_SZ(pipe, scaler_id),
PS_WIN_XSIZE(crtc_w) | PS_WIN_YSIZE(crtc_h));
}
static void skl_detach_scaler(struct intel_crtc *crtc, int id)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = to_intel_display(crtc);
- intel_de_write_fw(dev_priv, SKL_PS_CTRL(crtc->pipe, id), 0);
- intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(crtc->pipe, id), 0);
- intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, id), 0);
+ intel_de_write_fw(display, SKL_PS_CTRL(crtc->pipe, id), 0);
+ intel_de_write_fw(display, SKL_PS_WIN_POS(crtc->pipe, id), 0);
+ intel_de_write_fw(display, SKL_PS_WIN_SZ(crtc->pipe, id), 0);
}
/*
@@ -860,8 +810,8 @@ void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
void skl_scaler_get_config(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
int id = -1;
int i;
@@ -870,15 +820,15 @@ void skl_scaler_get_config(struct intel_crtc_state *crtc_state)
for (i = 0; i < crtc->num_scalers; i++) {
u32 ctl, pos, size;
- ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
+ ctl = intel_de_read(display, SKL_PS_CTRL(crtc->pipe, i));
if ((ctl & (PS_SCALER_EN | PS_BINDING_MASK)) != (PS_SCALER_EN | PS_BINDING_PIPE))
continue;
id = i;
crtc_state->pch_pfit.enabled = true;
- pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
- size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
+ pos = intel_de_read(display, SKL_PS_WIN_POS(crtc->pipe, i));
+ size = intel_de_read(display, SKL_PS_WIN_SZ(crtc->pipe, i));
drm_rect_init(&crtc_state->pch_pfit.dst,
REG_FIELD_GET(PS_WIN_XPOS_MASK, pos),
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h
index 63f93ca03c89..4d2e2dbb1666 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.h
+++ b/drivers/gpu/drm/i915/display/skl_scaler.h
@@ -5,11 +5,7 @@
#ifndef INTEL_SCALER_H
#define INTEL_SCALER_H
-#include <linux/types.h>
-
-enum drm_scaling_filter;
-enum pipe;
-struct drm_i915_private;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_plane;
@@ -20,9 +16,8 @@ int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
-int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state);
+int intel_atomic_setup_scalers(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 17d4c880ecc4..80e558042d97 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -11,6 +11,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic_plane.h"
+#include "intel_bo.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_types.h"
@@ -105,8 +106,6 @@ static const u32 icl_sdr_y_plane_formats[] = {
DRM_FORMAT_Y216,
DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
};
static const u32 icl_sdr_uv_plane_formats[] = {
@@ -133,8 +132,6 @@ static const u32 icl_sdr_uv_plane_formats[] = {
DRM_FORMAT_Y216,
DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
};
static const u32 icl_hdr_plane_formats[] = {
@@ -238,7 +235,9 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
static u8 icl_nv12_y_plane_mask(struct drm_i915_private *i915)
{
- if (DISPLAY_VER(i915) >= 13 || HAS_D12_PLANE_MINIMIZATION(i915))
+ struct intel_display *display = &i915->display;
+
+ if (DISPLAY_VER(display) >= 13 || HAS_D12_PLANE_MINIMIZATION(display))
return BIT(PLANE_4) | BIT(PLANE_5);
else
return BIT(PLANE_6) | BIT(PLANE_7);
@@ -349,7 +348,6 @@ static int skl_plane_max_width(const struct drm_framebuffer *fb,
return 5120;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
/* FIXME AUX plane? */
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
@@ -431,6 +429,16 @@ static int icl_plane_min_width(const struct drm_framebuffer *fb,
}
}
+static int xe3_plane_max_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
+ return 4096;
+ else
+ return 6144;
+}
+
static int icl_hdr_plane_max_width(const struct drm_framebuffer *fb,
int color_plane,
unsigned int rotation)
@@ -593,11 +601,11 @@ static u32 skl_plane_min_alignment(struct intel_plane *plane,
* in full-range YCbCr.
*/
static void
-icl_program_input_csc(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
+icl_program_input_csc(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
@@ -641,31 +649,31 @@ icl_program_input_csc(struct intel_plane *plane,
};
const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding];
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
- ROFF(csc[0]) | GOFF(csc[1]));
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1),
- BOFF(csc[2]));
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2),
- ROFF(csc[3]) | GOFF(csc[4]));
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3),
- BOFF(csc[5]));
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4),
- ROFF(csc[6]) | GOFF(csc[7]));
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5),
- BOFF(csc[8]));
-
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
- PREOFF_YUV_TO_RGB_HI);
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- PREOFF_YUV_TO_RGB_ME);
- intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
- PREOFF_YUV_TO_RGB_LO);
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
- intel_de_write_fw(dev_priv,
- PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
+ intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
+ ROFF(csc[0]) | GOFF(csc[1]));
+ intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1),
+ BOFF(csc[2]));
+ intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2),
+ ROFF(csc[3]) | GOFF(csc[4]));
+ intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3),
+ BOFF(csc[5]));
+ intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4),
+ ROFF(csc[6]) | GOFF(csc[7]));
+ intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5),
+ BOFF(csc[8]));
+
+ intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
+ PREOFF_YUV_TO_RGB_HI);
+ intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
+ intel_de_write_dsb(display, dsb, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
+ PREOFF_YUV_TO_RGB_LO);
+ intel_de_write_dsb(display, dsb,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
+ intel_de_write_dsb(display, dsb,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
+ intel_de_write_dsb(display, dsb,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
}
static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
@@ -705,6 +713,22 @@ static u32 skl_plane_ddb_reg_val(const struct skl_ddb_entry *entry)
PLANE_BUF_START(entry->start);
}
+static u32 xe3_plane_min_ddb_reg_val(const u16 *min_ddb,
+ const u16 *interim_ddb)
+{
+ u32 val = 0;
+
+ if (*min_ddb)
+ val |= PLANE_MIN_DBUF_BLOCKS(*min_ddb);
+
+ if (*interim_ddb)
+ val |= PLANE_INTERIM_DBUF_BLOCKS(*interim_ddb);
+
+ val |= val ? PLANE_AUTO_MIN_DBUF_EN : 0;
+
+ return val;
+}
+
static u32 skl_plane_wm_reg_val(const struct skl_wm_level *level)
{
u32 val = 0;
@@ -713,16 +737,20 @@ static u32 skl_plane_wm_reg_val(const struct skl_wm_level *level)
val |= PLANE_WM_EN;
if (level->ignore_lines)
val |= PLANE_WM_IGNORE_LINES;
+ if (level->auto_min_alloc_wm_enable)
+ val |= PLANE_WM_AUTO_MIN_ALLOC_EN;
+
val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
return val;
}
-static void skl_write_plane_wm(struct intel_plane *plane,
+static void skl_write_plane_wm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
@@ -730,74 +758,85 @@ static void skl_write_plane_wm(struct intel_plane *plane,
&crtc_state->wm.skl.plane_ddb[plane_id];
const struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const u16 *min_ddb = &crtc_state->wm.skl.plane_min_ddb[plane_id];
+ const u16 *interim_ddb =
+ &crtc_state->wm.skl.plane_interim_ddb[plane_id];
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++)
- intel_de_write_fw(i915, PLANE_WM(pipe, plane_id, level),
- skl_plane_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level)));
+ for (level = 0; level < display->wm.num_levels; level++)
+ intel_de_write_dsb(display, dsb, PLANE_WM(pipe, plane_id, level),
+ skl_plane_wm_reg_val(skl_plane_wm_level(pipe_wm, plane_id, level)));
- intel_de_write_fw(i915, PLANE_WM_TRANS(pipe, plane_id),
- skl_plane_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id)));
+ intel_de_write_dsb(display, dsb, PLANE_WM_TRANS(pipe, plane_id),
+ skl_plane_wm_reg_val(skl_plane_trans_wm(pipe_wm, plane_id)));
- if (HAS_HW_SAGV_WM(i915)) {
+ if (HAS_HW_SAGV_WM(display)) {
const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
- intel_de_write_fw(i915, PLANE_WM_SAGV(pipe, plane_id),
- skl_plane_wm_reg_val(&wm->sagv.wm0));
- intel_de_write_fw(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id),
- skl_plane_wm_reg_val(&wm->sagv.trans_wm));
+ intel_de_write_dsb(display, dsb, PLANE_WM_SAGV(pipe, plane_id),
+ skl_plane_wm_reg_val(&wm->sagv.wm0));
+ intel_de_write_dsb(display, dsb, PLANE_WM_SAGV_TRANS(pipe, plane_id),
+ skl_plane_wm_reg_val(&wm->sagv.trans_wm));
}
- intel_de_write_fw(i915, PLANE_BUF_CFG(pipe, plane_id),
- skl_plane_ddb_reg_val(ddb));
+ intel_de_write_dsb(display, dsb, PLANE_BUF_CFG(pipe, plane_id),
+ skl_plane_ddb_reg_val(ddb));
- if (DISPLAY_VER(i915) < 11)
- intel_de_write_fw(i915, PLANE_NV12_BUF_CFG(pipe, plane_id),
- skl_plane_ddb_reg_val(ddb_y));
+ if (DISPLAY_VER(display) < 11)
+ intel_de_write_dsb(display, dsb, PLANE_NV12_BUF_CFG(pipe, plane_id),
+ skl_plane_ddb_reg_val(ddb_y));
+
+ if (DISPLAY_VER(display) >= 30)
+ intel_de_write_dsb(display, dsb, PLANE_MIN_BUF_CFG(pipe, plane_id),
+ xe3_plane_min_ddb_reg_val(min_ddb, interim_ddb));
}
static void
-skl_plane_disable_arm(struct intel_plane *plane,
+skl_plane_disable_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- skl_write_plane_wm(plane, crtc_state);
+ skl_write_plane_wm(dsb, plane, crtc_state);
- intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), 0);
+ intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), 0);
}
-static void icl_plane_disable_sel_fetch_arm(struct intel_plane *plane,
+static void icl_plane_disable_sel_fetch_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum pipe pipe = plane->pipe;
if (!crtc_state->enable_psr2_sel_fetch)
return;
- intel_de_write_fw(i915, SEL_FETCH_PLANE_CTL(pipe, plane->id), 0);
+ intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_CTL(pipe, plane->id), 0);
}
static void
-icl_plane_disable_arm(struct intel_plane *plane,
+icl_plane_disable_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
if (icl_is_hdr_plane(dev_priv, plane_id))
- intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CUS_CTL(pipe, plane_id), 0);
- skl_write_plane_wm(plane, crtc_state);
+ skl_write_plane_wm(dsb, plane, crtc_state);
- icl_plane_disable_sel_fetch_arm(plane, crtc_state);
- intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
+ icl_plane_disable_sel_fetch_arm(dsb, plane, crtc_state);
+ intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), 0);
+ intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), 0);
}
static bool
@@ -1234,28 +1273,30 @@ static u32 skl_plane_keymsk(const struct intel_plane_state *plane_state)
return keymsk;
}
-static void icl_plane_csc_load_black(struct intel_plane *plane)
+static void icl_plane_csc_load_black(struct intel_dsb *dsb,
+ struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 0), 0);
- intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 1), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 0), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 1), 0);
- intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 2), 0);
- intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 3), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 2), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 3), 0);
- intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 4), 0);
- intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 5), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 4), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane_id, 5), 0);
- intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0);
- intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0);
- intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0);
- intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0);
- intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0);
- intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0);
}
static int icl_plane_color_plane(const struct intel_plane_state *plane_state)
@@ -1268,11 +1309,12 @@ static int icl_plane_color_plane(const struct intel_plane_state *plane_state)
}
static void
-skl_plane_update_noarm(struct intel_plane *plane,
+skl_plane_update_noarm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
u32 stride = skl_plane_stride(plane_state, 0);
@@ -1287,21 +1329,23 @@ skl_plane_update_noarm(struct intel_plane *plane,
crtc_y = 0;
}
- intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id),
- PLANE_STRIDE_(stride));
- intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id),
- PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x));
- intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
- PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1));
+ intel_de_write_dsb(display, dsb, PLANE_STRIDE(pipe, plane_id),
+ PLANE_STRIDE_(stride));
+ intel_de_write_dsb(display, dsb, PLANE_POS(pipe, plane_id),
+ PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x));
+ intel_de_write_dsb(display, dsb, PLANE_SIZE(pipe, plane_id),
+ PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1));
- skl_write_plane_wm(plane, crtc_state);
+ skl_write_plane_wm(dsb, plane, crtc_state);
}
static void
-skl_plane_update_arm(struct intel_plane *plane,
+skl_plane_update_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
@@ -1321,22 +1365,26 @@ skl_plane_update_arm(struct intel_plane *plane,
plane_color_ctl = plane_state->color_ctl |
glk_plane_color_ctl_crtc(crtc_state);
- intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state));
- intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state));
- intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state));
+ intel_de_write_dsb(display, dsb, PLANE_KEYVAL(pipe, plane_id),
+ skl_plane_keyval(plane_state));
+ intel_de_write_dsb(display, dsb, PLANE_KEYMSK(pipe, plane_id),
+ skl_plane_keymsk(plane_state));
+ intel_de_write_dsb(display, dsb, PLANE_KEYMAX(pipe, plane_id),
+ skl_plane_keymax(plane_state));
- intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
- PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x));
+ intel_de_write_dsb(display, dsb, PLANE_OFFSET(pipe, plane_id),
+ PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x));
- intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id),
- skl_plane_aux_dist(plane_state, 0));
+ intel_de_write_dsb(display, dsb, PLANE_AUX_DIST(pipe, plane_id),
+ skl_plane_aux_dist(plane_state, 0));
- intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id),
- PLANE_OFFSET_Y(plane_state->view.color_plane[1].y) |
- PLANE_OFFSET_X(plane_state->view.color_plane[1].x));
+ intel_de_write_dsb(display, dsb, PLANE_AUX_OFFSET(pipe, plane_id),
+ PLANE_OFFSET_Y(plane_state->view.color_plane[1].y) |
+ PLANE_OFFSET_X(plane_state->view.color_plane[1].x));
if (DISPLAY_VER(dev_priv) >= 10)
- intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
+ intel_de_write_dsb(display, dsb, PLANE_COLOR_CTL(pipe, plane_id),
+ plane_color_ctl);
/*
* Enable the scaler before the plane so that we don't
@@ -1353,17 +1401,19 @@ skl_plane_update_arm(struct intel_plane *plane,
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
- skl_plane_surf(plane_state, 0));
+ intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id),
+ plane_ctl);
+ intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id),
+ skl_plane_surf(plane_state, 0));
}
-static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane,
+static void icl_plane_update_sel_fetch_noarm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int color_plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum pipe pipe = plane->pipe;
const struct drm_rect *clip;
u32 val;
@@ -1380,7 +1430,7 @@ static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane,
y = (clip->y1 + plane_state->uapi.dst.y1);
val = y << 16;
val |= plane_state->uapi.dst.x1;
- intel_de_write_fw(i915, SEL_FETCH_PLANE_POS(pipe, plane->id), val);
+ intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_POS(pipe, plane->id), val);
x = plane_state->view.color_plane[color_plane].x;
@@ -1395,20 +1445,21 @@ static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane,
val = y << 16 | x;
- intel_de_write_fw(i915, SEL_FETCH_PLANE_OFFSET(pipe, plane->id),
- val);
+ intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_OFFSET(pipe, plane->id), val);
/* Sizes are 0 based */
val = (drm_rect_height(clip) - 1) << 16;
val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
- intel_de_write_fw(i915, SEL_FETCH_PLANE_SIZE(pipe, plane->id), val);
+ intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_SIZE(pipe, plane->id), val);
}
static void
-icl_plane_update_noarm(struct intel_plane *plane,
+icl_plane_update_noarm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_display *display = to_intel_display(plane->base.dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
@@ -1432,76 +1483,82 @@ icl_plane_update_noarm(struct intel_plane *plane,
crtc_y = 0;
}
- intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id),
- PLANE_STRIDE_(stride));
- intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id),
- PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x));
- intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
- PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1));
+ intel_de_write_dsb(display, dsb, PLANE_STRIDE(pipe, plane_id),
+ PLANE_STRIDE_(stride));
+ intel_de_write_dsb(display, dsb, PLANE_POS(pipe, plane_id),
+ PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x));
+ intel_de_write_dsb(display, dsb, PLANE_SIZE(pipe, plane_id),
+ PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1));
- intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state));
- intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state));
- intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state));
+ intel_de_write_dsb(display, dsb, PLANE_KEYVAL(pipe, plane_id),
+ skl_plane_keyval(plane_state));
+ intel_de_write_dsb(display, dsb, PLANE_KEYMSK(pipe, plane_id),
+ skl_plane_keymsk(plane_state));
+ intel_de_write_dsb(display, dsb, PLANE_KEYMAX(pipe, plane_id),
+ skl_plane_keymax(plane_state));
- intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
- PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x));
+ intel_de_write_dsb(display, dsb, PLANE_OFFSET(pipe, plane_id),
+ PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x));
if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier)) {
- intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 0),
- lower_32_bits(plane_state->ccval));
- intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 1),
- upper_32_bits(plane_state->ccval));
+ intel_de_write_dsb(display, dsb, PLANE_CC_VAL(pipe, plane_id, 0),
+ lower_32_bits(plane_state->ccval));
+ intel_de_write_dsb(display, dsb, PLANE_CC_VAL(pipe, plane_id, 1),
+ upper_32_bits(plane_state->ccval));
}
/* FLAT CCS doesn't need to program AUX_DIST */
if (!HAS_FLAT_CCS(dev_priv) && DISPLAY_VER(dev_priv) < 20)
- intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id),
- skl_plane_aux_dist(plane_state, color_plane));
+ intel_de_write_dsb(display, dsb, PLANE_AUX_DIST(pipe, plane_id),
+ skl_plane_aux_dist(plane_state, color_plane));
if (icl_is_hdr_plane(dev_priv, plane_id))
- intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id),
- plane_state->cus_ctl);
+ intel_de_write_dsb(display, dsb, PLANE_CUS_CTL(pipe, plane_id),
+ plane_state->cus_ctl);
- intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
+ intel_de_write_dsb(display, dsb, PLANE_COLOR_CTL(pipe, plane_id),
+ plane_color_ctl);
if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
- icl_program_input_csc(plane, crtc_state, plane_state);
+ icl_program_input_csc(dsb, plane, plane_state);
- skl_write_plane_wm(plane, crtc_state);
+ skl_write_plane_wm(dsb, plane, crtc_state);
/*
* FIXME: pxp session invalidation can hit any time even at time of commit
* or after the commit, display content will be garbage.
*/
if (plane_state->force_black)
- icl_plane_csc_load_black(plane);
+ icl_plane_csc_load_black(dsb, plane, crtc_state);
- icl_plane_update_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane);
+ icl_plane_update_sel_fetch_noarm(dsb, plane, crtc_state, plane_state, color_plane);
}
-static void icl_plane_update_sel_fetch_arm(struct intel_plane *plane,
+static void icl_plane_update_sel_fetch_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum pipe pipe = plane->pipe;
if (!crtc_state->enable_psr2_sel_fetch)
return;
if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0)
- intel_de_write_fw(i915, SEL_FETCH_PLANE_CTL(pipe, plane->id),
- SEL_FETCH_PLANE_CTL_ENABLE);
+ intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_CTL(pipe, plane->id),
+ SEL_FETCH_PLANE_CTL_ENABLE);
else
- icl_plane_disable_sel_fetch_arm(plane, crtc_state);
+ icl_plane_disable_sel_fetch_arm(dsb, plane, crtc_state);
}
static void
-icl_plane_update_arm(struct intel_plane *plane,
+icl_plane_update_arm(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
int color_plane = icl_plane_color_plane(plane_state);
@@ -1520,37 +1577,45 @@ icl_plane_update_arm(struct intel_plane *plane,
if (plane_state->scaler_id >= 0)
skl_program_plane_scaler(plane, crtc_state, plane_state);
- icl_plane_update_sel_fetch_arm(plane, crtc_state, plane_state);
+ icl_plane_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state);
/*
* The control register self-arms if the plane was previously
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
- skl_plane_surf(plane_state, color_plane));
+ intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id),
+ plane_ctl);
+ intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id),
+ skl_plane_surf(plane_state, color_plane));
}
static void
-skl_plane_async_flip(struct intel_plane *plane,
+skl_plane_async_flip(struct intel_dsb *dsb,
+ struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
bool async_flip)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- u32 plane_ctl = plane_state->ctl;
+ u32 plane_ctl = plane_state->ctl, plane_surf;
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
+ plane_surf = skl_plane_surf(plane_state, 0);
- if (async_flip)
- plane_ctl |= PLANE_CTL_ASYNC_FLIP;
+ if (async_flip) {
+ if (DISPLAY_VER(display) >= 30)
+ plane_surf |= PLANE_SURF_ASYNC_UPDATE;
+ else
+ plane_ctl |= PLANE_CTL_ASYNC_FLIP;
+ }
- intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
- skl_plane_surf(plane_state, 0));
+ intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id),
+ plane_ctl);
+ intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id),
+ plane_surf);
}
static bool intel_format_is_p01x(u32 format)
@@ -1591,6 +1656,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
return -EINVAL;
}
+ /*
+ * Display20 onward tile4 hflip is not supported
+ */
+ if (rotation & DRM_MODE_REFLECT_X &&
+ intel_fb_is_tile4_modifier(fb->modifier) &&
+ DISPLAY_VER(dev_priv) >= 20) {
+ drm_dbg_kms(&dev_priv->drm,
+ "horizontal flip is not supported with tile4 surface formats\n");
+ return -EINVAL;
+ }
+
if (drm_rotation_90_or_270(rotation)) {
if (!intel_fb_supports_90_270_rotation(to_intel_framebuffer(fb))) {
drm_dbg_kms(&dev_priv->drm,
@@ -2084,13 +2160,13 @@ static void check_protection(struct intel_plane_state *plane_state)
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *i915 = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_gem_object *obj = intel_fb_bo(fb);
if (DISPLAY_VER(i915) < 11)
return;
plane_state->decrypt = intel_pxp_key_check(i915->pxp, obj, false) == 0;
- plane_state->force_black = i915_gem_object_is_protected(obj) &&
+ plane_state->force_black = intel_bo_is_protected(obj) &&
!plane_state->decrypt;
}
@@ -2302,8 +2378,8 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
}
}
-static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
+static bool icl_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
{
struct intel_plane *plane = to_intel_plane(_plane);
@@ -2315,9 +2391,14 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
if (intel_fb_is_ccs_modifier(modifier))
return true;
fallthrough;
+ case DRM_FORMAT_RGB565:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -2327,20 +2408,69 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_P010:
case DRM_FORMAT_P012:
case DRM_FORMAT_P016:
- if (intel_fb_is_mc_ccs_modifier(modifier))
+ case DRM_FORMAT_XVYU2101010:
+ if (modifier == I915_FORMAT_MOD_Yf_TILED)
return true;
fallthrough;
- case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED)
+ return true;
+ fallthrough;
+ default:
+ return false;
+ }
+}
+
+static bool tgl_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ struct intel_plane *plane = to_intel_plane(_plane);
+
+ if (!intel_fb_plane_supports_modifier(plane, modifier))
+ return false;
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_XVYU2101010:
- case DRM_FORMAT_C8:
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ABGR16161616F:
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_ARGB16161616F:
+ if (intel_fb_is_ccs_modifier(modifier))
+ return true;
+ fallthrough;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_XYUV8888:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ if (intel_fb_is_mc_ccs_modifier(modifier))
+ return true;
+ fallthrough;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_C8:
case DRM_FORMAT_Y210:
case DRM_FORMAT_Y212:
case DRM_FORMAT_Y216:
@@ -2363,13 +2493,22 @@ static const struct drm_plane_funcs skl_plane_funcs = {
.format_mod_supported = skl_plane_format_mod_supported,
};
-static const struct drm_plane_funcs gen12_plane_funcs = {
+static const struct drm_plane_funcs icl_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = icl_plane_format_mod_supported,
+};
+
+static const struct drm_plane_funcs tgl_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = gen12_plane_format_mod_supported,
+ .format_mod_supported = tgl_plane_format_mod_supported,
};
static void
@@ -2411,8 +2550,8 @@ static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915,
(plane_id == PLANE_1 || plane_id == PLANE_2);
}
-static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915,
- enum plane_id plane_id)
+static bool tgl_plane_has_mc_ccs(struct drm_i915_private *i915,
+ enum plane_id plane_id)
{
if (DISPLAY_VER(i915) < 12)
return false;
@@ -2432,13 +2571,14 @@ static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915,
static u8 skl_get_plane_caps(struct drm_i915_private *i915,
enum pipe pipe, enum plane_id plane_id)
{
+ struct intel_display *display = &i915->display;
u8 caps = INTEL_PLANE_CAP_TILING_X;
- if (DISPLAY_VER(i915) < 13 || IS_ALDERLAKE_P(i915))
+ if (DISPLAY_VER(display) < 13 || display->platform.alderlake_p)
caps |= INTEL_PLANE_CAP_TILING_Y;
- if (DISPLAY_VER(i915) < 12)
+ if (DISPLAY_VER(display) < 12)
caps |= INTEL_PLANE_CAP_TILING_Yf;
- if (HAS_4TILE(i915))
+ if (HAS_4TILE(display))
caps |= INTEL_PLANE_CAP_TILING_4;
if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(i915))
@@ -2446,14 +2586,14 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915,
if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {
caps |= INTEL_PLANE_CAP_CCS_RC;
- if (DISPLAY_VER(i915) >= 12)
+ if (DISPLAY_VER(display) >= 12)
caps |= INTEL_PLANE_CAP_CCS_RC_CC;
}
- if (gen12_plane_has_mc_ccs(i915, plane_id))
+ if (tgl_plane_has_mc_ccs(i915, plane_id))
caps |= INTEL_PLANE_CAP_CCS_MC;
- if (DISPLAY_VER(i915) >= 14 && IS_DGFX(i915))
+ if (DISPLAY_VER(display) >= 14 && display->platform.dgfx)
caps |= INTEL_PLANE_CAP_NEED64K_PHYS;
return caps;
@@ -2483,7 +2623,11 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
intel_fbc_add_plane(skl_plane_fbc(dev_priv, pipe, plane_id), plane);
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(dev_priv) >= 30) {
+ plane->max_width = xe3_plane_max_width;
+ plane->max_height = icl_plane_max_height;
+ plane->min_cdclk = icl_plane_min_cdclk;
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
plane->min_width = icl_plane_min_width;
if (icl_is_hdr_plane(dev_priv, plane_id))
plane->max_width = icl_hdr_plane_max_width;
@@ -2541,7 +2685,9 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane_id, &num_formats);
if (DISPLAY_VER(dev_priv) >= 12)
- plane_funcs = &gen12_plane_funcs;
+ plane_funcs = &tgl_plane_funcs;
+ else if (DISPLAY_VER(dev_priv) == 11)
+ plane_funcs = &icl_plane_funcs;
else
plane_funcs = &skl_plane_funcs;
@@ -2621,6 +2767,7 @@ void
skl_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
+ struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2702,7 +2849,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
fb->modifier = I915_FORMAT_MOD_Y_TILED;
break;
case PLANE_CTL_TILED_YF: /* aka PLANE_CTL_TILED_4 on XE_LPD+ */
- if (HAS_4TILE(dev_priv)) {
+ if (HAS_4TILE(display)) {
u32 rc_mask = PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
PLANE_CTL_CLEAR_COLOR_DISABLE;
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
index 4ddcd7d46bbd..ca9fdfbbe57c 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
@@ -159,6 +159,7 @@
_PLANE_SURF_2_A, _PLANE_SURF_2_B)
#define PLANE_SURF_ADDR_MASK REG_GENMASK(31, 12)
#define PLANE_SURF_DECRYPT REG_BIT(2)
+#define PLANE_SURF_ASYNC_UPDATE REG_BIT(0)
#define _PLANE_KEYMAX_1_A 0x701a0
#define _PLANE_KEYMAX_2_A 0x702a0
@@ -321,6 +322,7 @@
_PLANE_WM_2_A_0, _PLANE_WM_2_B_0)
#define PLANE_WM_EN REG_BIT(31)
#define PLANE_WM_IGNORE_LINES REG_BIT(30)
+#define PLANE_WM_AUTO_MIN_ALLOC_EN REG_BIT(29)
#define PLANE_WM_LINES_MASK REG_GENMASK(26, 14)
#define PLANE_WM_BLOCKS_MASK REG_GENMASK(11, 0)
@@ -372,12 +374,26 @@
#define PLANE_BUF_CFG(pipe, plane) _MMIO_SKL_PLANE((pipe), (plane), \
_PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B, \
_PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B)
+
/* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits */
#define PLANE_BUF_END_MASK REG_GENMASK(27, 16)
#define PLANE_BUF_END(end) REG_FIELD_PREP(PLANE_BUF_END_MASK, (end))
#define PLANE_BUF_START_MASK REG_GENMASK(11, 0)
#define PLANE_BUF_START(start) REG_FIELD_PREP(PLANE_BUF_START_MASK, (start))
+#define _PLANE_MIN_BUF_CFG_1_A 0x70274
+#define _PLANE_MIN_BUF_CFG_2_A 0x70374
+#define _PLANE_MIN_BUF_CFG_1_B 0x71274
+#define _PLANE_MIN_BUF_CFG_2_B 0x71374
+#define PLANE_MIN_BUF_CFG(pipe, plane) _MMIO_SKL_PLANE((pipe), (plane), \
+ _PLANE_MIN_BUF_CFG_1_A, _PLANE_MIN_BUF_CFG_1_B, \
+ _PLANE_MIN_BUF_CFG_2_A, _PLANE_MIN_BUF_CFG_2_B)
+#define PLANE_AUTO_MIN_DBUF_EN REG_BIT(31)
+#define PLANE_MIN_DBUF_BLOCKS_MASK REG_GENMASK(27, 16)
+#define PLANE_MIN_DBUF_BLOCKS(val) REG_FIELD_PREP(PLANE_MIN_DBUF_BLOCKS_MASK, (val))
+#define PLANE_INTERIM_DBUF_BLOCKS_MASK REG_GENMASK(11, 0)
+#define PLANE_INTERIM_DBUF_BLOCKS(val) REG_FIELD_PREP(PLANE_INTERIM_DBUF_BLOCKS_MASK, (val))
+
/* tgl+ */
#define _SEL_FETCH_PLANE_CTL_1_A 0x70890
#define _SEL_FETCH_PLANE_CTL_2_A 0x708b0
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 045c7cac166b..f4458d1185b3 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -3,6 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
+#include <linux/debugfs.h>
+
#include <drm/drm_blend.h>
#include "i915_drv.h"
@@ -75,20 +77,23 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
bool
intel_has_sagv(struct drm_i915_private *i915)
{
- return HAS_SAGV(i915) &&
- i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
+ struct intel_display *display = &i915->display;
+
+ return HAS_SAGV(display) && display->sagv.status != I915_SAGV_NOT_CONTROLLED;
}
static u32
intel_sagv_block_time(struct drm_i915_private *i915)
{
- if (DISPLAY_VER(i915) >= 14) {
+ struct intel_display *display = &i915->display;
+
+ if (DISPLAY_VER(display) >= 14) {
u32 val;
- val = intel_de_read(i915, MTL_LATENCY_SAGV);
+ val = intel_de_read(display, MTL_LATENCY_SAGV);
return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val);
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
u32 val = 0;
int ret;
@@ -96,14 +101,14 @@ intel_sagv_block_time(struct drm_i915_private *i915)
GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
&val, NULL);
if (ret) {
- drm_dbg_kms(&i915->drm, "Couldn't read SAGV block time!\n");
+ drm_dbg_kms(display->drm, "Couldn't read SAGV block time!\n");
return 0;
}
return val;
- } else if (DISPLAY_VER(i915) == 11) {
+ } else if (DISPLAY_VER(display) == 11) {
return 10;
- } else if (HAS_SAGV(i915)) {
+ } else if (HAS_SAGV(display)) {
return 30;
} else {
return 0;
@@ -112,31 +117,33 @@ intel_sagv_block_time(struct drm_i915_private *i915)
static void intel_sagv_init(struct drm_i915_private *i915)
{
- if (!HAS_SAGV(i915))
- i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ struct intel_display *display = &i915->display;
+
+ if (!HAS_SAGV(display))
+ display->sagv.status = I915_SAGV_NOT_CONTROLLED;
/*
* Probe to see if we have working SAGV control.
* For icl+ this was already determined by intel_bw_init_hw().
*/
- if (DISPLAY_VER(i915) < 11)
+ if (DISPLAY_VER(display) < 11)
skl_sagv_disable(i915);
- drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN);
+ drm_WARN_ON(display->drm, display->sagv.status == I915_SAGV_UNKNOWN);
- i915->display.sagv.block_time_us = intel_sagv_block_time(i915);
+ display->sagv.block_time_us = intel_sagv_block_time(i915);
- drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
- str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us);
+ drm_dbg_kms(display->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
+ str_yes_no(intel_has_sagv(i915)), display->sagv.block_time_us);
/* avoid overflow when adding with wm0 latency/etc. */
- if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX,
+ if (drm_WARN(display->drm, display->sagv.block_time_us > U16_MAX,
"Excessive SAGV block time %u, ignoring\n",
- i915->display.sagv.block_time_us))
- i915->display.sagv.block_time_us = 0;
+ display->sagv.block_time_us))
+ display->sagv.block_time_us = 0;
if (!intel_has_sagv(i915))
- i915->display.sagv.block_time_us = 0;
+ display->sagv.block_time_us = 0;
}
/*
@@ -442,6 +449,7 @@ bool intel_can_enable_sagv(struct drm_i915_private *i915,
static int intel_compute_sagv_mask(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
int ret;
struct intel_crtc *crtc;
@@ -477,7 +485,7 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
* other crtcs can't be allowed to use the more optimal
* normal (ie. non-SAGV) watermarks.
*/
- pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
+ pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(display) &&
DISPLAY_VER(i915) >= 12 &&
intel_crtc_can_enable_sagv(new_crtc_state);
@@ -716,7 +724,7 @@ static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
int width, const struct drm_format_info *format,
u64 modifier, unsigned int rotation,
u32 plane_pixel_rate, struct skl_wm_params *wp,
- int color_plane);
+ int color_plane, unsigned int pan_x);
static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
struct intel_plane *plane,
@@ -763,7 +771,7 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
drm_format_info(DRM_FORMAT_ARGB8888),
DRM_FORMAT_MOD_LINEAR,
DRM_MODE_ROTATE_0,
- crtc_state->pixel_rate, &wp, 0);
+ crtc_state->pixel_rate, &wp, 0, 0);
drm_WARN_ON(&i915->drm, ret);
for (level = 0; level < i915->display.wm.num_levels; level++) {
@@ -793,30 +801,40 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
const enum pipe pipe,
const enum plane_id plane_id,
struct skl_ddb_entry *ddb,
- struct skl_ddb_entry *ddb_y)
+ struct skl_ddb_entry *ddb_y,
+ u16 *min_ddb, u16 *interim_ddb)
{
+ struct intel_display *display = &i915->display;
u32 val;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
- val = intel_de_read(i915, CUR_BUF_CFG(pipe));
+ val = intel_de_read(display, CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(ddb, val);
return;
}
- val = intel_de_read(i915, PLANE_BUF_CFG(pipe, plane_id));
+ val = intel_de_read(display, PLANE_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(ddb, val);
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(display) >= 30) {
+ val = intel_de_read(display, PLANE_MIN_BUF_CFG(pipe, plane_id));
+
+ *min_ddb = REG_FIELD_GET(PLANE_MIN_DBUF_BLOCKS_MASK, val);
+ *interim_ddb = REG_FIELD_GET(PLANE_INTERIM_DBUF_BLOCKS_MASK, val);
+ }
+
+ if (DISPLAY_VER(display) >= 11)
return;
- val = intel_de_read(i915, PLANE_NV12_BUF_CFG(pipe, plane_id));
+ val = intel_de_read(display, PLANE_NV12_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(ddb_y, val);
}
static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb,
- struct skl_ddb_entry *ddb_y)
+ struct skl_ddb_entry *ddb_y,
+ u16 *min_ddb, u16 *interim_ddb)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
@@ -833,7 +851,9 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
skl_ddb_get_hw_plane_state(i915, pipe,
plane_id,
&ddb[plane_id],
- &ddb_y[plane_id]);
+ &ddb_y[plane_id],
+ &min_ddb[plane_id],
+ &interim_ddb[plane_id]);
intel_display_power_put(i915, power_domain, wakeref);
}
@@ -1368,13 +1388,30 @@ static bool
use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
struct intel_plane *plane)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
- return DISPLAY_VER(i915) >= 13 &&
+ /* Xe3+ are auto minimum DDB capble. So don't force minimal wm0 */
+ return IS_DISPLAY_VER(display, 13, 20) &&
crtc_state->uapi.async_flip &&
plane->async_flip;
}
+unsigned int
+skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane, int width, int height,
+ int cpp)
+{
+ /*
+ * We calculate extra ddb based on ratio plane rate/total data rate
+ * in case, in some cases we should not allocate extra ddb for the plane,
+ * so do not count its data rate, if this is the case.
+ */
+ if (use_minimal_wm0_only(crtc_state, plane))
+ return 0;
+
+ return width * height * cpp;
+}
+
static u64
skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
{
@@ -1511,6 +1548,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
const struct intel_dbuf_state *dbuf_state =
intel_atomic_get_new_dbuf_state(state);
const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
+ struct intel_display *display = to_intel_display(state);
int num_active = hweight8(dbuf_state->active_pipes);
struct skl_plane_ddb_iter iter;
enum plane_id plane_id;
@@ -1521,6 +1559,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
/* Clear the partitioning for disabled planes. */
memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
+ memset(crtc_state->wm.skl.plane_min_ddb, 0,
+ sizeof(crtc_state->wm.skl.plane_min_ddb));
+ memset(crtc_state->wm.skl.plane_interim_ddb, 0,
+ sizeof(crtc_state->wm.skl.plane_interim_ddb));
if (!crtc_state->hw.active)
return 0;
@@ -1593,6 +1635,9 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.plane_ddb[plane_id];
struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ u16 *min_ddb = &crtc_state->wm.skl.plane_min_ddb[plane_id];
+ u16 *interim_ddb =
+ &crtc_state->wm.skl.plane_interim_ddb[plane_id];
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -1609,6 +1654,11 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
crtc_state->rel_data_rate[plane_id]);
}
+
+ if (DISPLAY_VER(display) >= 30) {
+ *min_ddb = wm->wm[0].min_ddb_alloc;
+ *interim_ddb = wm->sagv.wm0.min_ddb_alloc;
+ }
}
drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
@@ -1652,6 +1702,8 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
&crtc_state->wm.skl.plane_ddb[plane_id];
const struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ u16 *interim_ddb =
+ &crtc_state->wm.skl.plane_interim_ddb[plane_id];
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -1665,6 +1717,9 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
}
skl_check_wm_level(&wm->sagv.wm0, ddb);
+ if (DISPLAY_VER(display) >= 30)
+ *interim_ddb = wm->sagv.wm0.min_ddb_alloc;
+
skl_check_wm_level(&wm->sagv.trans_wm, ddb);
}
@@ -1740,9 +1795,10 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
int width, const struct drm_format_info *format,
u64 modifier, unsigned int rotation,
u32 plane_pixel_rate, struct skl_wm_params *wp,
- int color_plane)
+ int color_plane, unsigned int pan_x)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 interm_pbpl;
@@ -1801,7 +1857,9 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->y_min_scanlines,
wp->dbuf_block_size);
- if (DISPLAY_VER(i915) >= 10)
+ if (DISPLAY_VER(display) >= 30)
+ interm_pbpl += (pan_x != 0);
+ else if (DISPLAY_VER(i915) >= 10)
interm_pbpl++;
wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
@@ -1843,7 +1901,8 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
fb->format, fb->modifier,
plane_state->hw.rotation,
intel_plane_pixel_rate(crtc_state, plane_state),
- wp, color_plane);
+ wp, color_plane,
+ plane_state->uapi.src.x1);
}
static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
@@ -1863,6 +1922,13 @@ static int skl_wm_max_lines(struct drm_i915_private *i915)
return 31;
}
+static bool xe3_auto_min_alloc_capable(struct intel_plane *plane, int level)
+{
+ struct intel_display *display = to_intel_display(plane);
+
+ return DISPLAY_VER(display) >= 30 && level == 0 && plane->id != PLANE_CURSOR;
+}
+
static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
struct intel_plane *plane,
int level,
@@ -1907,7 +1973,10 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
}
- blocks = fixed16_to_u32_round_up(selected_result) + 1;
+ blocks = fixed16_to_u32_round_up(selected_result);
+ if (DISPLAY_VER(i915) < 30)
+ blocks++;
+
/*
* Lets have blocks at minimum equivalent to plane_blocks_per_line
* as there will be at minimum one line for lines configuration. This
@@ -1992,6 +2061,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
/* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
result->enable = true;
+ result->auto_min_alloc_wm_enable = xe3_auto_min_alloc_capable(plane, level);
if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
result->can_sagv = latency >= i915->display.sagv.block_time_us;
@@ -2371,16 +2441,18 @@ static bool skl_wm_level_equals(const struct skl_wm_level *l1,
return l1->enable == l2->enable &&
l1->ignore_lines == l2->ignore_lines &&
l1->lines == l2->lines &&
- l1->blocks == l2->blocks;
+ l1->blocks == l2->blocks &&
+ l1->auto_min_alloc_wm_enable == l2->auto_min_alloc_wm_enable;
}
static bool skl_plane_wm_equals(struct drm_i915_private *i915,
const struct skl_plane_wm *wm1,
const struct skl_plane_wm *wm2)
{
+ struct intel_display *display = &i915->display;
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
/*
* We don't check uv_wm as the hardware doesn't actually
* use it. It only gets used for calculating the required
@@ -2488,6 +2560,7 @@ static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
static int
skl_compute_ddb(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_dbuf_state *old_dbuf_state;
struct intel_dbuf_state *new_dbuf_state = NULL;
@@ -2516,7 +2589,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
- if (HAS_MBUS_JOINING(i915)) {
+ if (HAS_MBUS_JOINING(display)) {
new_dbuf_state->joined_mbus =
adlp_check_mbus_joined(new_dbuf_state->active_pipes);
@@ -2734,10 +2807,10 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
const struct skl_pipe_wm *old_pipe_wm,
const struct skl_pipe_wm *new_pipe_wm)
{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ struct intel_display *display = to_intel_display(plane);
int level;
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
/*
* We don't check uv_wm as the hardware doesn't actually
* use it. It only gets used for calculating the required
@@ -2748,7 +2821,7 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
return false;
}
- if (HAS_HW_SAGV_WM(i915)) {
+ if (HAS_HW_SAGV_WM(display)) {
const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
@@ -2839,32 +2912,58 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
* Program DEEP PKG_C_LATENCY Pkg C with all 1's.
* Program PKG_C_LATENCY Added Wake Time = 0
*/
-static void
-skl_program_dpkgc_latency(struct drm_i915_private *i915, bool enable_dpkgc)
+void
+intel_program_dpkgc_latency(struct intel_atomic_state *state)
{
- u32 max_latency = 0;
- u32 clear = 0, val = 0;
+ struct intel_display *display = to_intel_display(state);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *new_crtc_state;
+ u32 latency = LNL_PKG_C_LATENCY_MASK;
u32 added_wake_time = 0;
+ u32 max_linetime = 0;
+ u32 clear, val;
+ bool fixed_refresh_rate = false;
+ int i;
- if (DISPLAY_VER(i915) < 20)
+ if (DISPLAY_VER(display) < 20)
return;
- if (enable_dpkgc) {
- max_latency = skl_watermark_max_latency(i915, 1);
- if (max_latency == 0)
- max_latency = LNL_PKG_C_LATENCY_MASK;
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!new_crtc_state->vrr.enable ||
+ (new_crtc_state->vrr.vmin == new_crtc_state->vrr.vmax &&
+ new_crtc_state->vrr.vmin == new_crtc_state->vrr.flipline))
+ fixed_refresh_rate = true;
+
+ max_linetime = max(new_crtc_state->linetime, max_linetime);
+ }
+
+ if (fixed_refresh_rate) {
added_wake_time = DSB_EXE_TIME +
- i915->display.sagv.block_time_us;
- } else {
- max_latency = LNL_PKG_C_LATENCY_MASK;
- added_wake_time = 0;
+ display->sagv.block_time_us;
+
+ latency = skl_watermark_max_latency(i915, 1);
+
+ /* Wa_22020432604 */
+ if ((DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30) && !latency) {
+ latency += added_wake_time;
+ added_wake_time = 0;
+ }
+
+ /* Wa_22020299601 */
+ if ((latency && max_linetime) &&
+ (DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30)) {
+ latency = max_linetime * DIV_ROUND_UP(latency, max_linetime);
+ } else if (!latency) {
+ latency = LNL_PKG_C_LATENCY_MASK;
+ }
}
- clear |= LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK;
- val |= REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, max_latency);
- val |= REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time);
+ clear = LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK;
+ val = REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, latency) |
+ REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time);
- intel_uncore_rmw(&i915->uncore, LNL_PKG_C_LATENCY, clear, val);
+ intel_de_rmw(display, LNL_PKG_C_LATENCY, clear, val);
}
static int
@@ -2873,7 +2972,6 @@ skl_compute_wm(struct intel_atomic_state *state)
struct intel_crtc *crtc;
struct intel_crtc_state __maybe_unused *new_crtc_state;
int ret, i;
- bool enable_dpkgc = false;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
ret = skl_build_pipe_wm(state, crtc);
@@ -2898,32 +2996,28 @@ skl_compute_wm(struct intel_atomic_state *state)
ret = skl_wm_add_affected_planes(state, crtc);
if (ret)
return ret;
-
- if ((new_crtc_state->vrr.vmin == new_crtc_state->vrr.vmax &&
- new_crtc_state->vrr.vmin == new_crtc_state->vrr.flipline) ||
- !new_crtc_state->vrr.enable)
- enable_dpkgc = true;
}
- skl_program_dpkgc_latency(to_i915(state->base.dev), enable_dpkgc);
-
skl_print_wm_changes(state);
return 0;
}
-static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
+static void skl_wm_level_from_reg_val(struct intel_display *display,
+ u32 val, struct skl_wm_level *level)
{
level->enable = val & PLANE_WM_EN;
level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
+ level->auto_min_alloc_wm_enable = DISPLAY_VER(display) >= 30 ?
+ val & PLANE_WM_AUTO_MIN_ALLOC_EN : 0;
}
static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
enum plane_id plane_id;
int level;
@@ -2932,37 +3026,37 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm = &out->planes[plane_id];
- for (level = 0; level < i915->display.wm.num_levels; level++) {
+ for (level = 0; level < display->wm.num_levels; level++) {
if (plane_id != PLANE_CURSOR)
- val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level));
+ val = intel_de_read(display, PLANE_WM(pipe, plane_id, level));
else
- val = intel_de_read(i915, CUR_WM(pipe, level));
+ val = intel_de_read(display, CUR_WM(pipe, level));
- skl_wm_level_from_reg_val(val, &wm->wm[level]);
+ skl_wm_level_from_reg_val(display, val, &wm->wm[level]);
}
if (plane_id != PLANE_CURSOR)
- val = intel_de_read(i915, PLANE_WM_TRANS(pipe, plane_id));
+ val = intel_de_read(display, PLANE_WM_TRANS(pipe, plane_id));
else
- val = intel_de_read(i915, CUR_WM_TRANS(pipe));
+ val = intel_de_read(display, CUR_WM_TRANS(pipe));
- skl_wm_level_from_reg_val(val, &wm->trans_wm);
+ skl_wm_level_from_reg_val(display, val, &wm->trans_wm);
- if (HAS_HW_SAGV_WM(i915)) {
+ if (HAS_HW_SAGV_WM(display)) {
if (plane_id != PLANE_CURSOR)
- val = intel_de_read(i915, PLANE_WM_SAGV(pipe, plane_id));
+ val = intel_de_read(display, PLANE_WM_SAGV(pipe, plane_id));
else
- val = intel_de_read(i915, CUR_WM_SAGV(pipe));
+ val = intel_de_read(display, CUR_WM_SAGV(pipe));
- skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
+ skl_wm_level_from_reg_val(display, val, &wm->sagv.wm0);
if (plane_id != PLANE_CURSOR)
- val = intel_de_read(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id));
+ val = intel_de_read(display, PLANE_WM_SAGV_TRANS(pipe, plane_id));
else
- val = intel_de_read(i915, CUR_WM_SAGV_TRANS(pipe));
+ val = intel_de_read(display, CUR_WM_SAGV_TRANS(pipe));
- skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
- } else if (DISPLAY_VER(i915) >= 12) {
+ skl_wm_level_from_reg_val(display, val, &wm->sagv.trans_wm);
+ } else if (DISPLAY_VER(display) >= 12) {
wm->sagv.wm0 = wm->wm[0];
wm->sagv.trans_wm = wm->trans_wm;
}
@@ -2971,16 +3065,17 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
static void skl_wm_get_hw_state(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(i915->display.dbuf.obj.state);
struct intel_crtc *crtc;
- if (HAS_MBUS_JOINING(i915))
- dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN;
+ if (HAS_MBUS_JOINING(display))
+ dbuf_state->joined_mbus = intel_de_read(display, MBUS_CTL) & MBUS_JOIN;
- dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(i915, &i915->display.cdclk.hw);
+ dbuf_state->mdclk_cdclk_ratio = intel_mdclk_cdclk_ratio(display, &display->cdclk.hw);
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum pipe pipe = crtc->pipe;
@@ -3001,12 +3096,17 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
&crtc_state->wm.skl.plane_ddb[plane_id];
struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ u16 *min_ddb =
+ &crtc_state->wm.skl.plane_min_ddb[plane_id];
+ u16 *interim_ddb =
+ &crtc_state->wm.skl.plane_interim_ddb[plane_id];
if (!crtc_state->hw.active)
continue;
skl_ddb_get_hw_plane_state(i915, crtc->pipe,
- plane_id, ddb, ddb_y);
+ plane_id, ddb, ddb_y,
+ min_ddb, interim_ddb);
skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
@@ -3028,7 +3128,7 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
dbuf_state->slices[pipe] =
skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
crtc->base.base.id, crtc->base.name,
dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
@@ -3036,203 +3136,7 @@ static void skl_wm_get_hw_state(struct drm_i915_private *i915)
str_yes_no(dbuf_state->joined_mbus));
}
- dbuf_state->enabled_slices = i915->display.dbuf.enabled_slices;
-}
-
-static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
-{
- const struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->display.dbuf.obj.state);
- struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(&i915->drm, crtc) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- entries[crtc->pipe] = crtc_state->wm.skl.ddb;
- }
-
- for_each_intel_crtc(&i915->drm, crtc) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- u8 slices;
-
- slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
- dbuf_state->joined_mbus);
- if (dbuf_state->slices[crtc->pipe] & ~slices)
- return true;
-
- if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
- I915_MAX_PIPES, crtc->pipe))
- return true;
- }
-
- return false;
-}
-
-static void skl_wm_sanitize(struct drm_i915_private *i915)
-{
- struct intel_crtc *crtc;
-
- /*
- * On TGL/RKL (at least) the BIOS likes to assign the planes
- * to the wrong DBUF slices. This will cause an infinite loop
- * in skl_commit_modeset_enables() as it can't find a way to
- * transition between the old bogus DBUF layout to the new
- * proper DBUF layout without DBUF allocation overlaps between
- * the planes (which cannot be allowed or else the hardware
- * may hang). If we detect a bogus DBUF layout just turn off
- * all the planes so that skl_commit_modeset_enables() can
- * simply ignore them.
- */
- if (!skl_dbuf_is_misconfigured(i915))
- return;
-
- drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
-
- for_each_intel_crtc(&i915->drm, crtc) {
- struct intel_plane *plane = to_intel_plane(crtc->base.primary);
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- if (plane_state->uapi.visible)
- intel_plane_disable_noatomic(crtc, plane);
-
- drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
-
- memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
- }
-}
-
-static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
-{
- skl_wm_get_hw_state(i915);
- skl_wm_sanitize(i915);
-}
-
-void intel_wm_state_verify(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- struct skl_hw_state {
- struct skl_ddb_entry ddb[I915_MAX_PLANES];
- struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
- struct skl_pipe_wm wm;
- } *hw;
- const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
- struct intel_plane *plane;
- u8 hw_enabled_slices;
- int level;
-
- if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
- return;
-
- hw = kzalloc(sizeof(*hw), GFP_KERNEL);
- if (!hw)
- return;
-
- skl_pipe_wm_get_hw_state(crtc, &hw->wm);
-
- skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
-
- hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
-
- if (DISPLAY_VER(i915) >= 11 &&
- hw_enabled_slices != i915->display.dbuf.enabled_slices)
- drm_err(&i915->drm,
- "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
- i915->display.dbuf.enabled_slices,
- hw_enabled_slices);
-
- for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
- const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
- const struct skl_wm_level *hw_wm_level, *sw_wm_level;
-
- /* Watermarks */
- for (level = 0; level < i915->display.wm.num_levels; level++) {
- hw_wm_level = &hw->wm.planes[plane->id].wm[level];
- sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
-
- if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
- continue;
-
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name, level,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
- sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
-
- if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
- sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
-
- if (HAS_HW_SAGV_WM(i915) &&
- !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
- sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
-
- if (HAS_HW_SAGV_WM(i915) &&
- !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- /* DDB */
- hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
- sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
-
- if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- drm_err(&i915->drm,
- "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
- plane->base.base.id, plane->base.name,
- sw_ddb_entry->start, sw_ddb_entry->end,
- hw_ddb_entry->start, hw_ddb_entry->end);
- }
- }
-
- kfree(hw);
+ dbuf_state->enabled_slices = display->dbuf.enabled_slices;
}
bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
@@ -3377,31 +3281,19 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
static void skl_setup_wm_latency(struct drm_i915_private *i915)
{
- if (HAS_HW_SAGV_WM(i915))
- i915->display.wm.num_levels = 6;
- else
- i915->display.wm.num_levels = 8;
+ struct intel_display *display = &i915->display;
- if (DISPLAY_VER(i915) >= 14)
- mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
+ if (HAS_HW_SAGV_WM(display))
+ display->wm.num_levels = 6;
else
- skl_read_wm_latency(i915, i915->display.wm.skl_latency);
+ display->wm.num_levels = 8;
- intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency);
-}
-
-static const struct intel_wm_funcs skl_wm_funcs = {
- .compute_global_watermarks = skl_compute_wm,
- .get_hw_state = skl_wm_get_hw_state_and_sanitize,
-};
-
-void skl_wm_init(struct drm_i915_private *i915)
-{
- intel_sagv_init(i915);
-
- skl_setup_wm_latency(i915);
+ if (DISPLAY_VER(display) >= 14)
+ mtl_read_wm_latency(i915, display->wm.skl_latency);
+ else
+ skl_read_wm_latency(i915, display->wm.skl_latency);
- i915->display.funcs.wm = &skl_wm_funcs;
+ intel_print_wm_latency(i915, "Gen9 Plane", display->wm.skl_latency);
}
static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
@@ -3441,13 +3333,14 @@ intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
int intel_dbuf_init(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
struct intel_dbuf_state *dbuf_state;
dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
if (!dbuf_state)
return -ENOMEM;
- intel_atomic_global_obj_init(i915, &i915->display.dbuf.obj,
+ intel_atomic_global_obj_init(display, &display->dbuf.obj,
&dbuf_state->base, &intel_dbuf_funcs);
return 0;
@@ -3457,38 +3350,27 @@ static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
{
switch (pipe) {
case PIPE_A:
- return !(active_pipes & BIT(PIPE_D));
case PIPE_D:
- return !(active_pipes & BIT(PIPE_A));
+ active_pipes &= BIT(PIPE_A) | BIT(PIPE_D);
+ break;
case PIPE_B:
- return !(active_pipes & BIT(PIPE_C));
case PIPE_C:
- return !(active_pipes & BIT(PIPE_B));
+ active_pipes &= BIT(PIPE_B) | BIT(PIPE_C);
+ break;
default: /* to suppress compiler warning */
MISSING_CASE(pipe);
- break;
+ return false;
}
- return false;
+ return is_power_of_2(active_pipes);
}
-static void intel_mbus_dbox_update(struct intel_atomic_state *state)
+static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
+ const struct intel_dbuf_state *dbuf_state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
- const struct intel_crtc *crtc;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
u32 val = 0;
- if (DISPLAY_VER(i915) < 11)
- return;
-
- new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
- if (!new_dbuf_state ||
- (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
- new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
- return;
-
if (DISPLAY_VER(i915) >= 14)
val |= MBUS_DBOX_I_CREDIT(2);
@@ -3499,12 +3381,12 @@ static void intel_mbus_dbox_update(struct intel_atomic_state *state)
}
if (DISPLAY_VER(i915) >= 14)
- val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12) :
- MBUS_DBOX_A_CREDIT(8);
+ val |= dbuf_state->joined_mbus ?
+ MBUS_DBOX_A_CREDIT(12) : MBUS_DBOX_A_CREDIT(8);
else if (IS_ALDERLAKE_P(i915))
/* Wa_22010947358:adl-p */
- val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
- MBUS_DBOX_A_CREDIT(4);
+ val |= dbuf_state->joined_mbus ?
+ MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
else
val |= MBUS_DBOX_A_CREDIT(2);
@@ -3521,19 +3403,42 @@ static void intel_mbus_dbox_update(struct intel_atomic_state *state)
val |= MBUS_DBOX_B_CREDIT(8);
}
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, new_dbuf_state->active_pipes) {
- u32 pipe_val = val;
+ if (DISPLAY_VERx100(i915) == 1400) {
+ if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, dbuf_state->active_pipes))
+ val |= MBUS_DBOX_BW_8CREDITS_MTL;
+ else
+ val |= MBUS_DBOX_BW_4CREDITS_MTL;
+ }
- if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) {
- if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe,
- new_dbuf_state->active_pipes))
- pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL;
- else
- pipe_val |= MBUS_DBOX_BW_4CREDITS_MTL;
- }
+ return val;
+}
- intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), pipe_val);
- }
+static void pipe_mbus_dbox_ctl_update(struct drm_i915_private *i915,
+ const struct intel_dbuf_state *dbuf_state)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, dbuf_state->active_pipes)
+ intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe),
+ pipe_mbus_dbox_ctl(crtc, dbuf_state));
+}
+
+static void intel_mbus_dbox_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
+
+ if (DISPLAY_VER(i915) < 11)
+ return;
+
+ new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ if (!new_dbuf_state ||
+ (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
+ new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
+ return;
+
+ pipe_mbus_dbox_ctl_update(i915, new_dbuf_state);
}
int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
@@ -3553,23 +3458,24 @@ int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
int ratio, bool joined_mbus)
{
+ struct intel_display *display = &i915->display;
enum dbuf_slice slice;
- if (!HAS_MBUS_JOINING(i915))
+ if (!HAS_MBUS_JOINING(display))
return;
- if (DISPLAY_VER(i915) >= 20)
- intel_de_rmw(i915, MBUS_CTL, MBUS_TRANSLATION_THROTTLE_MIN_MASK,
+ if (DISPLAY_VER(display) >= 20)
+ intel_de_rmw(display, MBUS_CTL, MBUS_TRANSLATION_THROTTLE_MIN_MASK,
MBUS_TRANSLATION_THROTTLE_MIN(ratio - 1));
if (joined_mbus)
ratio *= 2;
- drm_dbg_kms(&i915->drm, "Updating dbuf ratio to %d (mbus joined: %s)\n",
+ drm_dbg_kms(display->drm, "Updating dbuf ratio to %d (mbus joined: %s)\n",
ratio, str_yes_no(joined_mbus));
- for_each_dbuf_slice(i915, slice)
- intel_de_rmw(i915, DBUF_CTL_S(slice),
+ for_each_dbuf_slice(display, slice)
+ intel_de_rmw(display, DBUF_CTL_S(slice),
DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1));
}
@@ -3598,6 +3504,7 @@ static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state
static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
const struct intel_dbuf_state *dbuf_state)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
enum pipe pipe = ffs(dbuf_state->active_pipes) - 1;
const struct intel_crtc_state *new_crtc_state;
@@ -3606,7 +3513,7 @@ static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
drm_WARN_ON(&i915->drm, !dbuf_state->joined_mbus);
drm_WARN_ON(&i915->drm, !is_power_of_2(dbuf_state->active_pipes));
- crtc = intel_crtc_for_pipe(i915, pipe);
+ crtc = intel_crtc_for_pipe(display, pipe);
new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
if (new_crtc_state && !intel_crtc_needs_modeset(new_crtc_state))
@@ -3615,22 +3522,13 @@ static enum pipe intel_mbus_joined_pipe(struct intel_atomic_state *state,
return INVALID_PIPE;
}
-static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
- enum pipe pipe)
+static void mbus_ctl_join_update(struct drm_i915_private *i915,
+ const struct intel_dbuf_state *dbuf_state,
+ enum pipe pipe)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
- const struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
u32 mbus_ctl;
- drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
- str_yes_no(old_dbuf_state->joined_mbus),
- str_yes_no(new_dbuf_state->joined_mbus),
- pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
-
- if (new_dbuf_state->joined_mbus)
+ if (dbuf_state->joined_mbus)
mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN;
else
mbus_ctl = MBUS_HASHING_MODE_2x2;
@@ -3645,6 +3543,23 @@ static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
}
+static void intel_dbuf_mbus_join_update(struct intel_atomic_state *state,
+ enum pipe pipe)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+
+ drm_dbg_kms(&i915->drm, "Changing mbus joined: %s -> %s (pipe: %c)\n",
+ str_yes_no(old_dbuf_state->joined_mbus),
+ str_yes_no(new_dbuf_state->joined_mbus),
+ pipe != INVALID_PIPE ? pipe_name(pipe) : '*');
+
+ mbus_ctl_join_update(i915, new_dbuf_state, pipe);
+}
+
void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state)
{
const struct intel_dbuf_state *new_dbuf_state =
@@ -3668,7 +3583,7 @@ void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state)
void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_dbuf_state *new_dbuf_state =
intel_atomic_get_new_dbuf_state(state);
const struct intel_dbuf_state *old_dbuf_state =
@@ -3687,7 +3602,7 @@ void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state)
intel_dbuf_mbus_join_update(state, pipe);
if (pipe != INVALID_PIPE) {
- struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
intel_crtc_wait_for_next_vblank(crtc);
}
@@ -3747,6 +3662,245 @@ void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
gen9_dbuf_slices_update(i915, new_slices);
}
+static void skl_mbus_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_display *display = &i915->display;
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(display->dbuf.obj.state);
+
+ if (!HAS_MBUS_JOINING(display))
+ return;
+
+ if (!dbuf_state->joined_mbus ||
+ adlp_check_mbus_joined(dbuf_state->active_pipes))
+ return;
+
+ drm_dbg_kms(display->drm, "Disabling redundant MBUS joining (active pipes 0x%x)\n",
+ dbuf_state->active_pipes);
+
+ dbuf_state->joined_mbus = false;
+ intel_dbuf_mdclk_cdclk_ratio_update(i915,
+ dbuf_state->mdclk_cdclk_ratio,
+ dbuf_state->joined_mbus);
+ pipe_mbus_dbox_ctl_update(i915, dbuf_state);
+ mbus_ctl_join_update(i915, dbuf_state, INVALID_PIPE);
+}
+
+static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
+{
+ const struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ entries[crtc->pipe] = crtc_state->wm.skl.ddb;
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ u8 slices;
+
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ if (dbuf_state->slices[crtc->pipe] & ~slices)
+ return true;
+
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
+ I915_MAX_PIPES, crtc->pipe))
+ return true;
+ }
+
+ return false;
+}
+
+static void skl_dbuf_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_crtc *crtc;
+
+ /*
+ * On TGL/RKL (at least) the BIOS likes to assign the planes
+ * to the wrong DBUF slices. This will cause an infinite loop
+ * in skl_commit_modeset_enables() as it can't find a way to
+ * transition between the old bogus DBUF layout to the new
+ * proper DBUF layout without DBUF allocation overlaps between
+ * the planes (which cannot be allowed or else the hardware
+ * may hang). If we detect a bogus DBUF layout just turn off
+ * all the planes so that skl_commit_modeset_enables() can
+ * simply ignore them.
+ */
+ if (!skl_dbuf_is_misconfigured(i915))
+ return;
+
+ drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (plane_state->uapi.visible)
+ intel_plane_disable_noatomic(crtc, plane);
+
+ drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
+
+ memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
+ }
+}
+
+static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
+{
+ skl_wm_get_hw_state(i915);
+
+ skl_mbus_sanitize(i915);
+ skl_dbuf_sanitize(i915);
+}
+
+void intel_wm_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct skl_hw_state {
+ struct skl_ddb_entry ddb[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
+ u16 min_ddb[I915_MAX_PLANES];
+ u16 interim_ddb[I915_MAX_PLANES];
+ struct skl_pipe_wm wm;
+ } *hw;
+ const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
+ struct intel_plane *plane;
+ u8 hw_enabled_slices;
+ int level;
+
+ if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
+ return;
+
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return;
+
+ skl_pipe_wm_get_hw_state(crtc, &hw->wm);
+
+ skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y, hw->min_ddb, hw->interim_ddb);
+
+ hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
+
+ if (DISPLAY_VER(i915) >= 11 &&
+ hw_enabled_slices != i915->display.dbuf.enabled_slices)
+ drm_err(&i915->drm,
+ "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
+ i915->display.dbuf.enabled_slices,
+ hw_enabled_slices);
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ const struct skl_wm_level *hw_wm_level, *sw_wm_level;
+
+ /* Watermarks */
+ for (level = 0; level < i915->display.wm.num_levels; level++) {
+ hw_wm_level = &hw->wm.planes[plane->id].wm[level];
+ sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
+
+ if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
+ continue;
+
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name, level,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
+ sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
+
+ if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
+
+ if (HAS_HW_SAGV_WM(display) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
+
+ if (HAS_HW_SAGV_WM(display) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ /* DDB */
+ hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
+
+ if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
+ plane->base.base.id, plane->base.name,
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
+ }
+ }
+
+ kfree(hw);
+}
+
+static const struct intel_wm_funcs skl_wm_funcs = {
+ .compute_global_watermarks = skl_compute_wm,
+ .get_hw_state = skl_wm_get_hw_state_and_sanitize,
+};
+
+void skl_wm_init(struct drm_i915_private *i915)
+{
+ intel_sagv_init(i915);
+
+ skl_setup_wm_latency(i915);
+
+ i915->display.funcs.wm = &skl_wm_funcs;
+}
+
static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = m->private;
@@ -3820,13 +3974,14 @@ DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
void skl_watermark_debugfs_register(struct drm_i915_private *i915)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct intel_display *display = &i915->display;
+ struct drm_minor *minor = display->drm->primary;
- if (HAS_IPC(i915))
+ if (HAS_IPC(display))
debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
&skl_watermark_ipc_status_fops);
- if (HAS_SAGV(i915))
+ if (HAS_SAGV(display))
debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915,
&intel_sagv_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index 78b121941237..8659f89427f2 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -18,6 +18,7 @@ struct intel_bw_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_plane;
+struct intel_plane_state;
struct skl_pipe_wm;
struct skl_wm_level;
@@ -53,6 +54,9 @@ const struct skl_wm_level *skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
int level);
const struct skl_wm_level *skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
enum plane_id plane_id);
+unsigned int skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane, int width,
+ int height, int cpp);
struct intel_dbuf_state {
struct intel_global_state base;
@@ -73,9 +77,9 @@ intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
container_of_const((global_state), struct intel_dbuf_state, base)
#define intel_atomic_get_old_dbuf_state(state) \
- to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj))
+ to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
#define intel_atomic_get_new_dbuf_state(state) \
- to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj))
+ to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->dbuf.obj))
int intel_dbuf_init(struct drm_i915_private *i915);
int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state,
@@ -87,6 +91,7 @@ void intel_dbuf_mdclk_cdclk_ratio_update(struct drm_i915_private *i915,
int ratio, bool joined_mbus);
void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state);
void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state);
+void intel_program_dpkgc_latency(struct intel_atomic_state *state);
#endif /* __SKL_WATERMARK_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index d21f3fb39706..d49e9b3c7627 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -30,6 +30,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -43,6 +44,7 @@
#include "intel_dsi_vbt.h"
#include "intel_fifo_underrun.h"
#include "intel_panel.h"
+#include "intel_pfit.h"
#include "skl_scaler.h"
#include "vlv_dsi.h"
#include "vlv_dsi_pll.h"
@@ -65,9 +67,8 @@ static u16 pixels_from_txbyteclkhs(u16 clk_hs, int bpp, int lane_count,
(bpp * burst_mode_ratio));
}
-enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
+static enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
{
- /* It just so happens the VBT matches register contents. */
switch (fmt) {
case VID_MODE_FORMAT_RGB888:
return MIPI_DSI_FMT_RGB888;
@@ -1071,7 +1072,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
hsync = intel_de_read(display, MIPI_HSYNC_PADDING_COUNT(display, port));
hbp = intel_de_read(display, MIPI_HBP_COUNT(display, port));
- /* harizontal values are in terms of high speed byte clock */
+ /* horizontal values are in terms of high speed byte clock */
hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count,
intel_dsi->burst_mode_ratio);
hsync = pixels_from_txbyteclkhs(hsync, bpp, lane_count,
@@ -1758,6 +1759,31 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
intel_dsi_log_params(intel_dsi);
}
+int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ /*
+ * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
+ * than 320000KHz.
+ */
+ if (IS_VALLEYVIEW(dev_priv))
+ return 320000;
+
+ /*
+ * On Geminilake once the CDCLK gets as low as 79200
+ * picture gets unstable, despite that values are
+ * correct for DSI PLL and DE PLL.
+ */
+ if (IS_GEMINILAKE(dev_priv))
+ return 158400;
+
+ return 0;
+}
+
typedef void (*vlv_dsi_dmi_quirk_func)(struct intel_dsi *intel_dsi);
/*
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.h b/drivers/gpu/drm/i915/display/vlv_dsi.h
index cf9d7b82f288..277bacfbc551 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.h
@@ -6,21 +6,20 @@
#ifndef __VLV_DSI_H__
#define __VLV_DSI_H__
-#include <linux/types.h>
-
enum port;
struct drm_i915_private;
+struct intel_crtc_state;
struct intel_dsi;
#ifdef I915
void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
-enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
+int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state);
void vlv_dsi_init(struct drm_i915_private *dev_priv);
#else
static inline void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
}
-static inline enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
+static inline int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state)
{
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 70c5a13a3c75..59a50647f2c3 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -592,15 +592,16 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
static void assert_dsi_pll(struct drm_i915_private *i915, bool state)
{
+ struct intel_display *display = &i915->display;
bool cur_state;
vlv_cck_get(i915);
cur_state = vlv_cck_read(i915, CCK_REG_DSI_PLL_CONTROL) & DSI_PLL_VCO_EN;
vlv_cck_put(i915);
- I915_STATE_WARN(i915, cur_state != state,
- "DSI PLL state assertion failure (expected %s, current %s)\n",
- str_on_off(state), str_on_off(cur_state));
+ INTEL_DISPLAY_STATE_WARN(display, cur_state != state,
+ "DSI PLL state assertion failure (expected %s, current %s)\n",
+ str_on_off(state), str_on_off(cur_state));
}
void assert_dsi_pll_enabled(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 1df74f7aa3dc..9473050ac842 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -16,7 +16,7 @@
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
I915_SELFTEST_DECLARE(static bool force_different_devices;)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index a3b83cfe1726..f151640c1d13 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -915,7 +915,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
*/
if (i915_gem_context_uses_protected_content(eb->gem_context) &&
i915_gem_object_is_protected(obj)) {
- err = intel_pxp_key_check(eb->i915->pxp, obj, true);
+ err = intel_pxp_key_check(eb->i915->pxp, intel_bo_to_drm_bo(obj), true);
if (err) {
i915_gem_object_put(obj);
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 3198b64ad7db..388f90784d8a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -53,29 +53,6 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
}
/**
- * __i915_gem_object_is_lmem - Whether the object is resident in
- * lmem while in the fence signaling critical path.
- * @obj: The object to check.
- *
- * This function is intended to be called from within the fence signaling
- * path where the fence, or a pin, keeps the object from being migrated. For
- * example during gpu reset or similar.
- *
- * Return: Whether the object is resident in lmem.
- */
-bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
-{
- struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
-
-#ifdef CONFIG_LOCKDEP
- GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) &&
- i915_gem_object_evictable(obj));
-#endif
- return mr && (mr->type == INTEL_MEMORY_LOCAL ||
- mr->type == INTEL_MEMORY_STOLEN_LOCAL);
-}
-
-/**
* __i915_gem_object_create_lmem_with_ps - Create lmem object and force the
* minimum page size for the backing pages.
* @i915: The i915 instance.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index 5a7a14e85c3f..ecd8f1a633a1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -19,8 +19,6 @@ i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
-bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
-
struct drm_i915_gem_object *
i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
const void *data, size_t size);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 3dc61cbd2e11..bb713e096db2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -283,9 +283,7 @@ bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
- /* TODO: make DPT shrinkable when it has no bound vmas */
- return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE) &&
- !obj->is_dpt;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
}
static inline bool
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 3b27218aabe2..900c08337942 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -13,7 +13,7 @@
#include "i915_driver.h"
#include "i915_drv.h"
-#if defined(CONFIG_X86)
+#if IS_ENABLED(CONFIG_X86)
#include <asm/smp.h>
#else
#define wbinvd_on_all_cpus() \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index fe69f2c8527d..ae3343c81a64 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -209,8 +209,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
struct address_space *mapping = obj->base.filp->f_mapping;
unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
struct sg_table *st;
- struct sgt_iter sgt_iter;
- struct page *page;
int ret;
/*
@@ -239,9 +237,7 @@ rebuild_st:
* for PAGE_SIZE chunks instead may be helpful.
*/
if (max_segment > PAGE_SIZE) {
- for_each_sgt_page(page, sgt_iter, st)
- put_page(page);
- sg_free_table(st);
+ shmem_sg_free_table(st, mapping, false, false);
kfree(st);
max_segment = PAGE_SIZE;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index d166052eb2ce..9117e9422844 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -117,7 +117,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
},
{ NULL, 0 },
}, *phase;
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
unsigned long count = 0;
unsigned long scanned = 0;
int err = 0, i = 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index d29005980806..9d958a6f377e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -457,7 +457,7 @@ static int init_reserved_stolen(struct drm_i915_private *i915)
icl_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
} else if (GRAPHICS_VER(i915) >= 8) {
- if (IS_LP(i915))
+ if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915) || IS_GEMINILAKE(i915))
chv_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
else
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index b22e2019768f..10d8673641f7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -808,7 +808,7 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
}
if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
- ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
+ ret = ttm_bo_populate(bo, &ctx);
if (ret)
return ret;
@@ -1038,7 +1038,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
struct ttm_buffer_object *bo = area->vm_private_data;
struct drm_device *dev = bo->base.dev;
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
vm_fault_t ret;
int idx;
@@ -1195,7 +1195,7 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
assert_object_held_shared(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index 03b00a03a634..041dab543b78 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -624,7 +624,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
/* Populate ttm with pages if needed. Typically system memory. */
if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) {
- ret = ttm_tt_populate(bo->bdev, ttm, ctx);
+ ret = ttm_bo_populate(bo, ctx);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
index ad649523d5e0..61596cecce4d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
@@ -90,7 +90,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
goto out_no_lock;
backup_bo = i915_gem_to_ttm(backup);
- err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
+ err = ttm_bo_populate(backup_bo, &ctx);
if (err)
goto out_no_populate;
@@ -189,7 +189,7 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
if (!backup_bo->resource)
err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx);
if (!err)
- err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
+ err = ttm_bo_populate(backup_bo, &ctx);
if (!err) {
err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
false);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 89d4dc8b60c6..eb0158e43417 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -369,7 +369,7 @@ static int live_parallel_switch(void *arg)
if (!data[n].ce[0])
continue;
- worker = kthread_create_worker(0, "igt/parallel:%s",
+ worker = kthread_run_worker(0, "igt/parallel:%s",
data[n].ce[0]->engine->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
index 8fe0499308ff..4904d0f4162c 100644
--- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
@@ -169,7 +169,7 @@ static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs,
return cs;
}
-u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+u32 *gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
return __gen2_emit_breadcrumb(rq, cs, 16, 8);
}
@@ -248,7 +248,7 @@ int i830_emit_bb_start(struct i915_request *rq,
return 0;
}
-int gen3_emit_bb_start(struct i915_request *rq,
+int gen2_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
@@ -292,29 +292,12 @@ int gen4_emit_bb_start(struct i915_request *rq,
void gen2_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = engine->i915;
-
- i915->irq_mask &= ~engine->irq_enable_mask;
- intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
- ENGINE_POSTING_READ16(engine, RING_IMR);
-}
-
-void gen2_irq_disable(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *i915 = engine->i915;
-
- i915->irq_mask |= engine->irq_enable_mask;
- intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
-}
-
-void gen3_irq_enable(struct intel_engine_cs *engine)
-{
engine->i915->irq_mask &= ~engine->irq_enable_mask;
intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR);
}
-void gen3_irq_disable(struct intel_engine_cs *engine)
+void gen2_irq_disable(struct intel_engine_cs *engine)
{
engine->i915->irq_mask |= engine->irq_enable_mask;
intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.h b/drivers/gpu/drm/i915/gt/gen2_engine_cs.h
index a5cd64a65c9e..7b37560fc356 100644
--- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.h
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.h
@@ -15,13 +15,13 @@ int gen2_emit_flush(struct i915_request *rq, u32 mode);
int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode);
int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode);
-u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs);
+u32 *gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs);
u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs);
int i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags);
-int gen3_emit_bb_start(struct i915_request *rq,
+int gen2_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags);
int gen4_emit_bb_start(struct i915_request *rq,
@@ -30,8 +30,6 @@ int gen4_emit_bb_start(struct i915_request *rq,
void gen2_irq_enable(struct intel_engine_cs *engine);
void gen2_irq_disable(struct intel_engine_cs *engine);
-void gen3_irq_enable(struct intel_engine_cs *engine);
-void gen3_irq_disable(struct intel_engine_cs *engine);
void gen5_irq_enable(struct intel_engine_cs *engine);
void gen5_irq_disable(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
index d38b914d1206..6e89112f68ae 100644
--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -399,7 +399,8 @@ static void emit_batch(struct i915_vma * const vma,
batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
batch_add(&cmds, 0xffff0000 |
- ((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ?
+ (((IS_IVYBRIDGE(i915) && INTEL_INFO(i915)->gt == 1) ||
+ IS_VALLEYVIEW(i915)) ?
HIZ_RAW_STALL_OPT_DISABLE :
0));
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 20b9b04ec1e0..cc866773ba6f 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -70,7 +70,7 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
if (!--b->irq_enabled)
b->irq_disable(b);
- WRITE_ONCE(b->irq_armed, 0);
+ WRITE_ONCE(b->irq_armed, NULL);
intel_gt_pm_put_async(b->irq_engine->gt, wakeref);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 40269e4c1e31..325da0414d94 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -126,9 +126,6 @@ execlists_active(const struct intel_engine_execlists *execlists)
return active;
}
-struct i915_request *
-execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
-
static inline u32
intel_read_status_page(const struct intel_engine_cs *engine, int reg)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
index a8eac59e3779..1c4784cb296c 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
@@ -15,6 +15,7 @@
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
#define HEAD_ADDR 0x001FFFFC
+#define HEAD_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */
#define RING_START(base) _MMIO((base) + 0x38)
#define RING_CTL(base) _MMIO((base) + 0x3c)
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
@@ -26,7 +27,6 @@
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
-#define RING_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */
#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */
#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
#define RING_SYNC_0(base) _MMIO((base) + 0x40)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index ba55c059063d..fe1f85e5dda3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -343,6 +343,11 @@ struct intel_engine_guc_stats {
* @start_gt_clk: GT clock time of last idle to active transition.
*/
u64 start_gt_clk;
+
+ /**
+ * @total: The last value of total returned
+ */
+ u64 total;
};
union intel_engine_tlb_inv_reg {
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 72090f52fb85..4a80ffa1b962 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -405,15 +405,6 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
return active;
}
-struct i915_request *
-execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
-{
- struct intel_engine_cs *engine =
- container_of(execlists, typeof(*engine), execlists);
-
- return __unwind_incomplete_requests(engine);
-}
-
static void
execlists_context_status_change(struct i915_request *rq, unsigned long status)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index d60a6ca0cae5..f6c59f20832f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -107,11 +107,12 @@ int i915_ggtt_init_hw(struct drm_i915_private *i915)
/**
* i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
* @vm: The VM to suspend the mappings for
+ * @evict_all: Evict all VMAs
*
* Suspend the memory mappings for all objects mapped to HW via the GGTT or a
* DPT page table.
*/
-void i915_ggtt_suspend_vm(struct i915_address_space *vm)
+void i915_ggtt_suspend_vm(struct i915_address_space *vm, bool evict_all)
{
struct i915_vma *vma, *vn;
int save_skip_rewrite;
@@ -157,7 +158,7 @@ retry:
goto retry;
}
- if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
+ if (evict_all || !i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
i915_vma_wait_for_bind(vma);
__i915_vma_evict(vma, false);
@@ -172,13 +173,15 @@ retry:
vm->skip_pte_rewrite = save_skip_rewrite;
mutex_unlock(&vm->mutex);
+
+ drm_WARN_ON(&vm->i915->drm, evict_all && !list_empty(&vm->bound_list));
}
void i915_ggtt_suspend(struct i915_ggtt *ggtt)
{
struct intel_gt *gt;
- i915_ggtt_suspend_vm(&ggtt->vm);
+ i915_ggtt_suspend_vm(&ggtt->vm, false);
ggtt->invalidate(ggtt);
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
@@ -1545,6 +1548,7 @@ int i915_ggtt_enable_hw(struct drm_i915_private *i915)
/**
* i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
* @vm: The VM to restore the mappings for
+ * @all_evicted: Were all VMAs expected to be evicted on suspend?
*
* Restore the memory mappings for all objects mapped to HW via the GGTT or a
* DPT page table.
@@ -1552,13 +1556,18 @@ int i915_ggtt_enable_hw(struct drm_i915_private *i915)
* Returns %true if restoring the mapping for any object that was in a write
* domain before suspend.
*/
-bool i915_ggtt_resume_vm(struct i915_address_space *vm)
+bool i915_ggtt_resume_vm(struct i915_address_space *vm, bool all_evicted)
{
struct i915_vma *vma;
bool write_domain_objs = false;
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
+ if (all_evicted) {
+ drm_WARN_ON(&vm->i915->drm, !list_empty(&vm->bound_list));
+ return false;
+ }
+
/* First fill our portion of the GTT with scratch pages */
vm->clear_range(vm, 0, vm->total);
@@ -1598,7 +1607,7 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
intel_gt_check_and_clear_faults(gt);
- flush = i915_ggtt_resume_vm(&ggtt->vm);
+ flush = i915_ggtt_resume_vm(&ggtt->vm, false);
if (drm_mm_node_allocated(&ggtt->error_capture))
ggtt->vm.scratch_range(&ggtt->vm, ggtt->error_capture.start,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index a6c69a706fd7..c4a351ebf395 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -185,7 +185,7 @@ int intel_gt_init_hw(struct intel_gt *gt)
if (IS_HASWELL(i915))
intel_uncore_write(uncore,
HSW_MI_PREDICATE_RESULT_2,
- IS_HASWELL_GT3(i915) ?
+ INTEL_INFO(i915)->gt == 3 ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
/* Apply the GT workarounds... */
@@ -302,7 +302,7 @@ static void gen6_check_faults(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 fault;
+ unsigned long fault;
for_each_engine(engine, gt, id) {
fault = GEN6_RING_FAULT_REG_READ(engine);
@@ -310,8 +310,8 @@ static void gen6_check_faults(struct intel_gt *gt)
gt_dbg(gt, "Unexpected fault\n"
"\tAddr: 0x%08lx\n"
"\tAddress space: %s\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
+ "\tSource ID: %ld\n"
+ "\tType: %ld\n",
fault & PAGE_MASK,
fault & RING_FAULT_GTTSEL_MASK ?
"GGTT" : "PPGTT",
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index ad4c51f18d3a..1240d44eeb85 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -452,10 +452,10 @@ void gen8_gt_irq_reset(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
- GEN8_IRQ_RESET_NDX(uncore, GT, 0);
- GEN8_IRQ_RESET_NDX(uncore, GT, 1);
- GEN8_IRQ_RESET_NDX(uncore, GT, 2);
- GEN8_IRQ_RESET_NDX(uncore, GT, 3);
+ gen2_irq_reset(uncore, GEN8_GT_IRQ_REGS(0));
+ gen2_irq_reset(uncore, GEN8_GT_IRQ_REGS(1));
+ gen2_irq_reset(uncore, GEN8_GT_IRQ_REGS(2));
+ gen2_irq_reset(uncore, GEN8_GT_IRQ_REGS(3));
}
void gen8_gt_irq_postinstall(struct intel_gt *gt)
@@ -476,14 +476,14 @@ void gen8_gt_irq_postinstall(struct intel_gt *gt)
gt->pm_ier = 0x0;
gt->pm_imr = ~gt->pm_ier;
- GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
- GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
+ gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(0), ~gt_interrupts[0], gt_interrupts[0]);
+ gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(1), ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled. Same wil be the case for GuC interrupts.
*/
- GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
- GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
+ gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(2), gt->pm_imr, gt->pm_ier);
+ gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(3), ~gt_interrupts[3], gt_interrupts[3]);
}
static void gen5_gt_update_irq(struct intel_gt *gt,
@@ -514,9 +514,9 @@ void gen5_gt_irq_reset(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
- GEN3_IRQ_RESET(uncore, GT);
+ gen2_irq_reset(uncore, GT_IRQ_REGS);
if (GRAPHICS_VER(gt->i915) >= 6)
- GEN3_IRQ_RESET(uncore, GEN6_PM);
+ gen2_irq_reset(uncore, GEN6_PM_IRQ_REGS);
}
void gen5_gt_irq_postinstall(struct intel_gt *gt)
@@ -538,7 +538,7 @@ void gen5_gt_irq_postinstall(struct intel_gt *gt)
else
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
- GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
+ gen2_irq_init(uncore, GT_IRQ_REGS, gt->gt_imr, gt_irqs);
if (GRAPHICS_VER(gt->i915) >= 6) {
/*
@@ -551,6 +551,6 @@ void gen5_gt_irq_postinstall(struct intel_gt *gt)
}
gt->pm_imr = 0xffffffff;
- GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs);
+ gen2_irq_init(uncore, GEN6_PM_IRQ_REGS, gt->pm_imr, pm_irqs);
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 911fd0160221..6f25c747bc29 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -35,7 +35,7 @@ static inline void __intel_gt_pm_get(struct intel_gt *gt)
static inline intel_wakeref_t intel_gt_pm_get_if_awake(struct intel_gt *gt)
{
if (!intel_wakeref_get_if_active(&gt->wakeref))
- return 0;
+ return NULL;
return intel_wakeref_track(&gt->wakeref);
}
@@ -73,7 +73,7 @@ static inline void intel_gt_pm_put_async(struct intel_gt *gt, intel_wakeref_t ha
}
#define with_intel_gt_pm(gt, wf) \
- for (wf = intel_gt_pm_get(gt); wf; intel_gt_pm_put(gt, wf), wf = 0)
+ for ((wf) = intel_gt_pm_get(gt); (wf); intel_gt_pm_put((gt), (wf)), (wf) = NULL)
/**
* with_intel_gt_pm_if_awake - if GT is PM awake, get a reference to prevent
@@ -84,7 +84,7 @@ static inline void intel_gt_pm_put_async(struct intel_gt *gt, intel_wakeref_t ha
* @wf: pointer to a temporary wakeref.
*/
#define with_intel_gt_pm_if_awake(gt, wf) \
- for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt, wf), wf = 0)
+ for ((wf) = intel_gt_pm_get_if_awake(gt); (wf); intel_gt_pm_put_async((gt), (wf)), (wf) = NULL)
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
{
@@ -105,9 +105,13 @@ int intel_gt_runtime_resume(struct intel_gt *gt);
ktime_t intel_gt_get_awake_time(const struct intel_gt *gt);
+#define INTEL_WAKEREF_MOCK_GT ERR_PTR(-ENODEV)
+
static inline bool is_mock_gt(const struct intel_gt *gt)
{
- return I915_SELFTEST_ONLY(gt->awake == -ENODEV);
+ BUILD_BUG_ON(INTEL_WAKEREF_DEF == INTEL_WAKEREF_MOCK_GT);
+
+ return I915_SELFTEST_ONLY(gt->awake == INTEL_WAKEREF_MOCK_GT);
}
#endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index 8d08b38874ef..b635aa2820d9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -431,7 +431,7 @@ static int llc_show(struct seq_file *m, void *data)
max_gpu_freq /= GEN9_FREQ_SCALER;
}
- seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
+ seq_puts(m, "GPU freq (MHz)\tEffective GPU freq (MHz)\tEffective Ring freq (MHz)\n");
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 57a3c83d3655..6dba65e54cdb 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -432,6 +432,7 @@
#define XEHPG_INSTDONE_GEOM_SVG MCR_REG(0x666c)
#define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */
+#define DISABLE_REPACKING_FOR_COMPRESSION REG_BIT(15) /* jsl+ */
#define RC_OP_FLUSH_ENABLE (1 << 0)
#define HIZ_RAW_STALL_OPT_DISABLE (1 << 2)
#define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */
@@ -1472,6 +1473,10 @@
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
+#define GEN6_PM_IRQ_REGS I915_IRQ_REGS(GEN6_PMIMR, \
+ GEN6_PMIER, \
+ GEN6_PMIIR)
+
#define GEN7_GT_SCRATCH(i) _MMIO(0x4f100 + (i) * 4)
#define GEN7_GT_SCRATCH_REG_NUM 8
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 6b85222ee3ea..0a36ea751b63 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -608,8 +608,8 @@ int i915_ppgtt_init_hw(struct intel_gt *gt);
struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
unsigned long lmem_pt_obj_flags);
-void i915_ggtt_suspend_vm(struct i915_address_space *vm);
-bool i915_ggtt_resume_vm(struct i915_address_space *vm);
+void i915_ggtt_suspend_vm(struct i915_address_space *vm, bool evict_all);
+bool i915_ggtt_resume_vm(struct i915_address_space *vm, bool all_evicted);
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 7bd5d2c29056..51847a846002 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -820,8 +820,10 @@ static bool ctx_needs_runalone(const struct intel_context *ce)
bool ctx_is_protected = false;
/*
- * On MTL and newer platforms, protected contexts require setting
- * the LRC run-alone bit or else the encryption will not happen.
+ * Wa_14019159160 - Case 2.
+ * On some platforms, protected contexts require setting
+ * the LRC run-alone bit or else the encryption/decryption will not happen.
+ * NOTE: Case 2 only applies to PXP use-case of said workaround.
*/
if (GRAPHICS_VER_FULL(ce->engine->i915) >= IP_VER(12, 70) &&
(ce->engine->class == COMPUTE_CLASS || ce->engine->class == RENDER_CLASS)) {
@@ -850,6 +852,7 @@ static void init_common_regs(u32 * const regs,
if (GRAPHICS_VER(engine->i915) < 11)
ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
CTX_CTRL_RS_CTX_ENABLE);
+ /* Wa_14019159160 - Case 2.*/
if (ctx_needs_runalone(ce))
ctl |= _MASKED_BIT_ENABLE(GEN12_CTX_CTRL_RUNALONE_MODE);
regs[CTX_CONTEXT_CONTROL] = ctl;
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index c864d101faf9..9378d5901c49 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -133,7 +133,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
GEN9_MEDIA_PG_ENABLE |
GEN11_MEDIA_SAMPLER_PG_ENABLE;
- if (GRAPHICS_VER(gt->i915) >= 12) {
+ if (GRAPHICS_VER(gt->i915) >= 12 && !IS_DG1(gt->i915)) {
for (i = 0; i < I915_MAX_VCS; i++)
if (HAS_ENGINE(gt, _VCS(i)))
pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 8f1ea95471ef..aae5a081cb53 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1113,6 +1113,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
* Warn CI about the unrecoverable wedged condition.
* Time for a reboot.
*/
+ gt_err(gt, "Unrecoverable wedged condition\n");
add_taint_for_CI(gt->i915, TAINT_WARN);
return false;
}
@@ -1198,6 +1199,7 @@ void intel_gt_reset(struct intel_gt *gt,
intel_engine_mask_t stalled_mask,
const char *reason)
{
+ struct intel_display *display = &gt->i915->display;
intel_engine_mask_t awake;
int ret;
@@ -1233,7 +1235,7 @@ void intel_gt_reset(struct intel_gt *gt,
}
if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
- intel_runtime_pm_disable_interrupts(gt->i915);
+ intel_irq_suspend(gt->i915);
if (do_reset(gt, stalled_mask)) {
gt_err(gt, "Failed to reset chip\n");
@@ -1241,9 +1243,9 @@ void intel_gt_reset(struct intel_gt *gt,
}
if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
- intel_runtime_pm_enable_interrupts(gt->i915);
+ intel_irq_resume(gt->i915);
- intel_overlay_reset(gt->i915);
+ intel_overlay_reset(display);
/* sanitize uC after engine reset */
if (!intel_uc_uses_guc_submission(&gt->uc))
@@ -1263,8 +1265,10 @@ void intel_gt_reset(struct intel_gt *gt,
}
ret = resume(gt);
- if (ret)
+ if (ret) {
+ gt_err(gt, "Failed to resume (%d)\n", ret);
goto taint;
+ }
finish:
reset_finish(gt, awake);
@@ -1607,6 +1611,7 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt)
set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
/* Wedged on init is non-recoverable */
+ gt_err(gt, "Non-recoverable wedged on init\n");
add_taint_for_CI(gt->i915, TAINT_WARN);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 59da4b7bd262..b74d9205c0f5 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -308,30 +308,6 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
return cs;
}
-/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct i915_request *rq)
-{
- int num_dwords;
- void *cs;
-
- num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
- if (num_dwords == 0)
- return 0;
-
- num_dwords = CACHELINE_DWORDS - num_dwords;
- GEM_BUG_ON(num_dwords & 1);
-
- cs = intel_ring_begin(rq, num_dwords);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
- intel_ring_advance(rq, cs + num_dwords);
-
- GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
- return 0;
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_ring.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
index 1b32dadfb8c3..64b322e25f36 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.h
+++ b/drivers/gpu/drm/i915/gt/intel_ring.h
@@ -16,7 +16,6 @@ struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords);
-int intel_ring_cacheline_align(struct i915_request *rq);
unsigned int intel_ring_update_space(struct intel_ring *ring);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 72277bc8322e..458e29d89978 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -26,6 +26,7 @@
#include "shmem_utils.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
+#include "intel_gt_print.h"
/* Rough estimate of the typical request size, performing a flush,
* set-context and then emitting the batch.
@@ -192,6 +193,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
static int xcs_resume(struct intel_engine_cs *engine)
{
struct intel_ring *ring = engine->legacy.ring;
+ ktime_t kt;
ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
ring->head, ring->tail);
@@ -229,10 +231,33 @@ static int xcs_resume(struct intel_engine_cs *engine)
set_pp_dir(engine);
- /* First wake the ring up to an empty/idle ring */
- ENGINE_WRITE_FW(engine, RING_HEAD, ring->head);
+ /*
+ * First wake the ring up to an empty/idle ring.
+ * Use 50ms of delay to let the engine write successfully
+ * for all platforms. Experimented with different values and
+ * determined that 50ms works best based on testing.
+ */
+ for ((kt) = ktime_get() + (50 * NSEC_PER_MSEC);
+ ktime_before(ktime_get(), (kt)); cpu_relax()) {
+ /*
+ * In case of resets fails because engine resumes from
+ * incorrect RING_HEAD and then GPU may be then fed
+ * to invalid instrcutions, which may lead to unrecoverable
+ * hang. So at first write doesn't succeed then try again.
+ */
+ ENGINE_WRITE_FW(engine, RING_HEAD, ring->head);
+ if (ENGINE_READ_FW(engine, RING_HEAD) == ring->head)
+ break;
+ }
+
ENGINE_WRITE_FW(engine, RING_TAIL, ring->head);
- ENGINE_POSTING_READ(engine, RING_TAIL);
+ if (ENGINE_READ_FW(engine, RING_HEAD) != ENGINE_READ_FW(engine, RING_TAIL)) {
+ ENGINE_TRACE(engine, "failed to reset empty ring: [%x, %x]: %x\n",
+ ENGINE_READ_FW(engine, RING_HEAD),
+ ENGINE_READ_FW(engine, RING_TAIL),
+ ring->head);
+ goto err;
+ }
ENGINE_WRITE_FW(engine, RING_CTL,
RING_CTL_SIZE(ring->size) | RING_VALID);
@@ -241,12 +266,16 @@ static int xcs_resume(struct intel_engine_cs *engine)
if (__intel_wait_for_register_fw(engine->uncore,
RING_CTL(engine->mmio_base),
RING_VALID, RING_VALID,
- 5000, 0, NULL))
+ 5000, 0, NULL)) {
+ ENGINE_TRACE(engine, "failed to restart\n");
goto err;
+ }
- if (GRAPHICS_VER(engine->i915) > 2)
+ if (GRAPHICS_VER(engine->i915) > 2) {
ENGINE_WRITE_FW(engine,
RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_POSTING_READ(engine, RING_MI_MODE);
+ }
/* Now awake, let it get started */
if (ring->tail != ring->head) {
@@ -259,16 +288,16 @@ static int xcs_resume(struct intel_engine_cs *engine)
return 0;
err:
- drm_err(&engine->i915->drm,
- "%s initialization failed; "
- "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
- engine->name,
- ENGINE_READ(engine, RING_CTL),
- ENGINE_READ(engine, RING_CTL) & RING_VALID,
- ENGINE_READ(engine, RING_HEAD), ring->head,
- ENGINE_READ(engine, RING_TAIL), ring->tail,
- ENGINE_READ(engine, RING_START),
- i915_ggtt_offset(ring->vma));
+ gt_err(engine->gt, "%s initialization failed\n", engine->name);
+ ENGINE_TRACE(engine,
+ "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_CTL) & RING_VALID,
+ ENGINE_READ(engine, RING_HEAD), ring->head,
+ ENGINE_READ(engine, RING_TAIL), ring->tail,
+ ENGINE_READ(engine, RING_START),
+ i915_ggtt_offset(ring->vma));
+ GEM_TRACE_DUMP();
return -EIO;
}
@@ -1090,9 +1119,6 @@ static void setup_irq(struct intel_engine_cs *engine)
} else if (GRAPHICS_VER(i915) >= 5) {
engine->irq_enable = gen5_irq_enable;
engine->irq_disable = gen5_irq_disable;
- } else if (GRAPHICS_VER(i915) >= 3) {
- engine->irq_enable = gen3_irq_enable;
- engine->irq_disable = gen3_irq_disable;
} else {
engine->irq_enable = gen2_irq_enable;
engine->irq_disable = gen2_irq_disable;
@@ -1146,7 +1172,7 @@ static void setup_common(struct intel_engine_cs *engine)
* equivalent to our next initial bread so we can elide
* engine->emit_init_breadcrumb().
*/
- engine->emit_fini_breadcrumb = gen3_emit_breadcrumb;
+ engine->emit_fini_breadcrumb = gen2_emit_breadcrumb;
if (GRAPHICS_VER(i915) == 5)
engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
@@ -1159,7 +1185,7 @@ static void setup_common(struct intel_engine_cs *engine)
else if (IS_I830(i915) || IS_I845G(i915))
engine->emit_bb_start = i830_emit_bb_start;
else
- engine->emit_bb_start = gen3_emit_bb_start;
+ engine->emit_bb_start = gen2_emit_bb_start;
}
static void setup_rcs(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c b/drivers/gpu/drm/i915/gt/intel_tlb.c
index 756e9ebbc725..2487768bc230 100644
--- a/drivers/gpu/drm/i915/gt/intel_tlb.c
+++ b/drivers/gpu/drm/i915/gt/intel_tlb.c
@@ -122,7 +122,7 @@ void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno)
{
intel_wakeref_t wakeref;
- if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
+ if (is_mock_gt(gt))
return;
if (intel_gt_is_wedged(gt))
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index e539a656cfc3..570c91878189 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -418,7 +418,7 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
/* WaForceContextSaveRestoreNonCoherent:bdw */
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BROADWELL_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+ (INTEL_INFO(i915)->gt == 3 ? HDC_FENCE_DEST_SLM_DISABLE : 0));
}
static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -2299,6 +2299,15 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN8_RC_SEMA_IDLE_MSG_DISABLE);
}
+ if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) {
+ /*
+ * "Disable Repacking for Compression (masked R/W access)
+ * before rendering compressed surfaces for display."
+ */
+ wa_masked_en(wal, CACHE_MODE_0_GEN7,
+ DISABLE_REPACKING_FOR_COMPRESSION);
+ }
+
if (GRAPHICS_VER(i915) == 11) {
/* This is not an Wa. Enable for better image quality */
wa_masked_en(wal,
@@ -2537,7 +2546,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_FF_DS_SCHED_HW);
/* WaDisablePSDDualDispatchEnable:ivb */
- if (IS_IVB_GT1(i915))
+ if (INTEL_INFO(i915)->gt == 1)
wa_masked_en(wal,
GEN7_HALF_SLICE_CHICKEN1,
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 222ca7c44951..81c31396eceb 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -3574,7 +3574,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
arg[id].batch = NULL;
arg[id].count = 0;
- worker[id] = kthread_create_worker(0, "igt/smoke:%d", id);
+ worker[id] = kthread_run_worker(0, "igt/smoke:%d", id);
if (IS_ERR(worker[id])) {
err = PTR_ERR(worker[id]);
break;
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 9ce8ff1c04fe..9d3aeb237295 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -1025,7 +1025,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
threads[tmp].engine = other;
threads[tmp].flags = flags;
- worker = kthread_create_worker(0, "igt/%s",
+ worker = kthread_run_worker(0, "igt/%s",
other->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index ca460cee4f8b..1bf7b88d9a9d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -262,7 +262,7 @@ static int clear(struct intel_migrate *migrate,
{
struct drm_i915_private *i915 = migrate->context->engine->i915;
struct drm_i915_gem_object *obj;
- struct i915_request *rq;
+ struct i915_request *rq = NULL;
struct i915_gem_ww_ctx ww;
u32 *vaddr, val = 0;
bool ccs_cap = false;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 1aa1446c8fb0..27b6d51ef145 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -8,6 +8,7 @@
#include "intel_gpu_commands.h"
#include "intel_gt_requests.h"
#include "intel_ring.h"
+#include "intel_rps.h"
#include "selftest_rc6.h"
#include "selftests/i915_random.h"
@@ -38,6 +39,9 @@ int live_rc6_manual(void *arg)
ktime_t dt;
u64 res[2];
int err = 0;
+ u32 rc0_freq = 0;
+ u32 rc6_freq = 0;
+ struct intel_rps *rps = &gt->rps;
/*
* Our claim is that we can "encourage" the GPU to enter rc6 at will.
@@ -66,6 +70,7 @@ int live_rc6_manual(void *arg)
rc0_power = librapl_energy_uJ() - rc0_power;
dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
+ rc0_freq = intel_rps_read_actual_frequency_fw(rps);
if ((res[1] - res[0]) >> 10) {
pr_err("RC6 residency increased by %lldus while disabled for 1000ms!\n",
(res[1] - res[0]) >> 10);
@@ -77,7 +82,11 @@ int live_rc6_manual(void *arg)
rc0_power = div64_u64(NSEC_PER_SEC * rc0_power,
ktime_to_ns(dt));
if (!rc0_power) {
- pr_err("No power measured while in RC0\n");
+ if (rc0_freq)
+ pr_debug("No power measured while in RC0! GPU Freq: %u in RC0\n",
+ rc0_freq);
+ else
+ pr_err("No power and freq measured while in RC0\n");
err = -EINVAL;
goto out_unlock;
}
@@ -90,7 +99,8 @@ int live_rc6_manual(void *arg)
intel_uncore_forcewake_flush(rc6_to_uncore(rc6), FORCEWAKE_ALL);
dt = ktime_get();
rc6_power = librapl_energy_uJ();
- msleep(100);
+ msleep(1000);
+ rc6_freq = intel_rps_read_actual_frequency_fw(rps);
rc6_power = librapl_energy_uJ() - rc6_power;
dt = ktime_sub(ktime_get(), dt);
res[1] = rc6_residency(rc6);
@@ -108,7 +118,8 @@ int live_rc6_manual(void *arg)
pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n",
rc0_power, rc6_power);
if (2 * rc6_power > rc0_power) {
- pr_err("GPU leaked energy while in RC6!\n");
+ pr_err("GPU leaked energy while in RC6! GPU Freq: %u in RC6 and %u in RC0\n",
+ rc6_freq, rc0_freq);
err = -EINVAL;
goto out_unlock;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index dcef8d498919..c207a4fb03bf 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -1125,6 +1125,7 @@ static u64 measure_power(struct intel_rps *rps, int *freq)
static u64 measure_power_at(struct intel_rps *rps, int *freq)
{
*freq = rps_set_check(rps, *freq);
+ msleep(100);
return measure_power(rps, freq);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index 4ecc4ae74a54..e218b229681f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -489,7 +489,7 @@ static int live_slpc_tile_interaction(void *arg)
return -ENOMEM;
for_each_gt(gt, i915, i) {
- threads[i].worker = kthread_create_worker(0, "igt/slpc_parallel:%d", gt->info.id);
+ threads[i].worker = kthread_run_worker(0, "igt/slpc_parallel:%d", gt->info.id);
if (IS_ERR(threads[i].worker)) {
ret = PTR_ERR(threads[i].worker);
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index 1fb6ff77fd89..bb696b29ee2c 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -40,7 +40,7 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
if (i915_gem_object_is_shmem(obj)) {
file = obj->base.filp;
- atomic_long_inc(&file->f_count);
+ get_file(file);
return file;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
index 551b0d7974ff..5dc0ccd07636 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -80,6 +80,7 @@ int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, s
const struct intel_gsc_cpd_header_v2 *cpd_header = NULL;
const struct intel_gsc_cpd_entry *cpd_entry = NULL;
const struct intel_gsc_manifest_header *manifest;
+ struct intel_uc_fw_ver min_ver = { 0 };
size_t min_size = sizeof(*layout);
int i;
@@ -212,33 +213,46 @@ int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, s
}
}
- if (IS_ARROWLAKE(gt->i915)) {
+ /*
+ * ARL SKUs require newer firmwares, but the blob is actually common
+ * across all MTL and ARL SKUs, so we need to do an explicit version check
+ * here rather than using a separate table entry. If a too old version
+ * is found, then just don't use GSC rather than aborting the driver load.
+ * Note that the major number in the GSC FW version is used to indicate
+ * the platform, so we expect it to always be 102 for MTL/ARL binaries.
+ */
+ if (IS_ARROWLAKE_S(gt->i915))
+ min_ver = (struct intel_uc_fw_ver){ 102, 0, 10, 1878 };
+ else if (IS_ARROWLAKE_H(gt->i915) || IS_ARROWLAKE_U(gt->i915))
+ min_ver = (struct intel_uc_fw_ver){ 102, 1, 15, 1926 };
+
+ if (IS_METEORLAKE(gt->i915) && gsc->release.major != 102) {
+ gt_info(gt, "Invalid GSC firmware for MTL/ARL, got %d.%d.%d.%d but need 102.x.x.x",
+ gsc->release.major, gsc->release.minor,
+ gsc->release.patch, gsc->release.build);
+ return -EINVAL;
+ }
+
+ if (min_ver.major) {
bool too_old = false;
- /*
- * ARL requires a newer firmware than MTL did (102.0.10.1878) but the
- * firmware is actually common. So, need to do an explicit version check
- * here rather than using a separate table entry. And if the older
- * MTL-only version is found, then just don't use GSC rather than aborting
- * the driver load.
- */
- if (gsc->release.major < 102) {
+ if (gsc->release.minor < min_ver.minor) {
too_old = true;
- } else if (gsc->release.major == 102) {
- if (gsc->release.minor == 0) {
- if (gsc->release.patch < 10) {
+ } else if (gsc->release.minor == min_ver.minor) {
+ if (gsc->release.patch < min_ver.patch) {
+ too_old = true;
+ } else if (gsc->release.patch == min_ver.patch) {
+ if (gsc->release.build < min_ver.build)
too_old = true;
- } else if (gsc->release.patch == 10) {
- if (gsc->release.build < 1878)
- too_old = true;
- }
}
}
if (too_old) {
- gt_info(gt, "GSC firmware too old for ARL, got %d.%d.%d.%d but need at least 102.0.10.1878",
+ gt_info(gt, "GSC firmware too old for ARL, got %d.%d.%d.%d but need at least %d.%d.%d.%d",
gsc->release.major, gsc->release.minor,
- gsc->release.patch, gsc->release.build);
+ gsc->release.patch, gsc->release.build,
+ min_ver.major, min_ver.minor,
+ min_ver.patch, min_ver.build);
return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 097fc6bd1285..5949ff0b0161 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -239,8 +239,16 @@ static u32 guc_ctl_debug_flags(struct intel_guc *guc)
static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
u32 flags = 0;
+ /*
+ * Enable PXP GuC autoteardown flow.
+ * NB: MTL does things differently.
+ */
+ if (HAS_PXP(gt->i915) && !IS_METEORLAKE(gt->i915))
+ flags |= GUC_CTL_ENABLE_GUC_PXP_CTL;
+
if (!intel_guc_submission_is_used(guc))
flags |= GUC_CTL_DISABLE_SCHEDULER;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 23f54c84cbab..fe53e8eccf4b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -145,7 +145,7 @@ static inline bool guc_load_done(struct intel_uncore *uncore, u32 *status, bool
* an end user should hit the timeout is in case of extreme thermal throttling.
* And a system that is that hot during boot is probably dead anyway!
*/
-#if defined(CONFIG_DRM_I915_DEBUG_GEM)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
#define GUC_LOAD_RETRY_LIMIT 20
#else
#define GUC_LOAD_RETRY_LIMIT 3
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 263c9c3f6a03..4ce6e2332a63 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -105,6 +105,7 @@
#define GUC_WA_ENABLE_TSC_CHECK_ON_RC6 BIT(22)
#define GUC_CTL_FEATURE 2
+#define GUC_CTL_ENABLE_GUC_PXP_CTL BIT(1)
#define GUC_CTL_ENABLE_SLPC BIT(2)
#define GUC_CTL_DISABLE_SCHEDULER BIT(14)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index bf16351c9349..e8a04e476c57 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -14,11 +14,11 @@
#include "intel_guc_log.h"
#include "intel_guc_print.h"
-#if defined(CONFIG_DRM_I915_DEBUG_GUC)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M
#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_16M
#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
-#elif defined(CONFIG_DRM_I915_DEBUG_GEM)
+#elif IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_1M
#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_2M
#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index ed979847187f..cc05bd9e43b4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1243,6 +1243,21 @@ static void __get_engine_usage_record(struct intel_engine_cs *engine,
} while (++i < 6);
}
+static void __set_engine_usage_record(struct intel_engine_cs *engine,
+ u32 last_in, u32 id, u32 total)
+{
+ struct iosys_map rec_map = intel_guc_engine_usage_record_map(engine);
+
+#define record_write(map_, field_, val_) \
+ iosys_map_wr_field(map_, 0, struct guc_engine_usage_record, field_, val_)
+
+ record_write(&rec_map, last_switch_in_stamp, last_in);
+ record_write(&rec_map, current_context_index, id);
+ record_write(&rec_map, total_runtime, total);
+
+#undef record_write
+}
+
static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
{
struct intel_engine_guc_stats *stats = &engine->stats.guc;
@@ -1339,7 +1354,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
* start_gt_clk is derived from GuC state. To get a consistent
* view of activity, we query the GuC state only if gt is awake.
*/
- wakeref = in_reset ? 0 : intel_gt_pm_get_if_awake(gt);
+ wakeref = in_reset ? NULL : intel_gt_pm_get_if_awake(gt);
if (wakeref) {
stats_saved = *stats;
gt_stamp_saved = guc->timestamp.gt_stamp;
@@ -1363,9 +1378,12 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
total += intel_gt_clock_interval_to_ns(gt, clk);
}
+ if (total > stats->total)
+ stats->total = total;
+
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
- return ns_to_ktime(total);
+ return ns_to_ktime(stats->total);
}
static void guc_enable_busyness_worker(struct intel_guc *guc)
@@ -1431,13 +1449,39 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
guc_update_pm_timestamp(guc, &unused);
for_each_engine(engine, gt, id) {
+ struct intel_engine_guc_stats *stats = &engine->stats.guc;
+
guc_update_engine_gt_clks(engine);
- engine->stats.guc.prev_total = 0;
+
+ /*
+ * If resetting a running context, accumulate the active
+ * time as well since there will be no context switch.
+ */
+ if (stats->running) {
+ u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
+
+ stats->total_gt_clks += clk;
+ }
+ stats->prev_total = 0;
+ stats->running = 0;
}
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
}
+static void __update_guc_busyness_running_state(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+ for_each_engine(engine, gt, id)
+ engine->stats.guc.running = false;
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+}
+
static void __update_guc_busyness_stats(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -1543,6 +1587,9 @@ err_trylock:
static int guc_action_enable_usage_stats(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u32 offset = intel_guc_engine_usage_offset(guc);
u32 action[] = {
INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
@@ -1550,6 +1597,9 @@ static int guc_action_enable_usage_stats(struct intel_guc *guc)
0,
};
+ for_each_engine(engine, gt, id)
+ __set_engine_usage_record(engine, 0, 0xffffffff, 0);
+
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
@@ -1582,6 +1632,9 @@ void intel_guc_busyness_park(struct intel_gt *gt)
if (!guc_submission_initialized(guc))
return;
+ /* Assume no engines are running and set running state to false */
+ __update_guc_busyness_running_state(guc);
+
/*
* There is a race with suspend flow where the worker runs after suspend
* and causes an unclaimed register access warning. Cancel the worker
@@ -1688,6 +1741,10 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
spin_lock_irq(guc_to_gt(guc)->irq_lock);
spin_unlock_irq(guc_to_gt(guc)->irq_lock);
+ /* Flush tasklet */
+ tasklet_disable(&guc->ct.receive_tasklet);
+ tasklet_enable(&guc->ct.receive_tasklet);
+
guc_flush_submissions(guc);
guc_flush_destroyed_contexts(guc);
flush_work(&guc->ct.requests.worker);
@@ -2005,6 +2062,8 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
void intel_guc_submission_reset_finish(struct intel_guc *guc)
{
+ int outstanding;
+
/* Reset called during driver load or during wedge? */
if (unlikely(!guc_submission_initialized(guc) ||
!intel_guc_is_fw_running(guc) ||
@@ -2018,8 +2077,10 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc)
* see in CI if this happens frequently / a precursor to taking down the
* machine.
*/
- if (atomic_read(&guc->outstanding_submission_g2h))
- guc_err(guc, "Unexpected outstanding GuC to Host in reset finish\n");
+ outstanding = atomic_read(&guc->outstanding_submission_g2h);
+ if (outstanding)
+ guc_err(guc, "Unexpected outstanding GuC to Host response(s) in reset finish: %d\n",
+ outstanding);
atomic_set(&guc->outstanding_submission_g2h, 0);
intel_guc_global_policies_update(guc);
@@ -5474,12 +5535,20 @@ static inline void guc_log_context(struct drm_printer *p,
{
drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
- drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
- ce->ring->head,
- ce->lrc_reg_state[CTX_RING_HEAD]);
- drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
- ce->ring->tail,
- ce->lrc_reg_state[CTX_RING_TAIL]);
+ if (intel_context_pin_if_active(ce)) {
+ drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
+ ce->ring->head,
+ ce->lrc_reg_state[CTX_RING_HEAD]);
+ drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
+ ce->ring->tail,
+ ce->lrc_reg_state[CTX_RING_TAIL]);
+ intel_context_unpin(ce);
+ } else {
+ drm_printf(p, "\t\tLRC Head: Internal %u, Memory not pinned\n",
+ ce->ring->head);
+ drm_printf(p, "\t\tLRC Tail: Internal %u, Memory not pinned\n",
+ ce->ring->tail);
+ }
drm_printf(p, "\t\tContext Pin Count: %u\n",
atomic_read(&ce->pin_count));
drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 2d9152eb7282..b3cbf85c00cb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -427,19 +427,6 @@ void intel_huc_fini(struct intel_huc *huc)
intel_uc_fw_fini(&huc->fw);
}
-void intel_huc_suspend(struct intel_huc *huc)
-{
- if (!intel_uc_fw_is_loadable(&huc->fw))
- return;
-
- /*
- * in the unlikely case that we're suspending before the GSC has
- * completed its loading sequence, just stop waiting. We'll restart
- * on resume.
- */
- delayed_huc_load_complete(huc);
-}
-
static const char *auth_mode_string(struct intel_huc *huc,
enum intel_huc_authentication_type type)
{
@@ -455,7 +442,7 @@ static const char *auth_mode_string(struct intel_huc *huc,
* an end user should hit the timeout is in case of extreme thermal throttling.
* And a system that is that hot during boot is probably dead anyway!
*/
-#if defined(CONFIG_DRM_I915_DEBUG_GEM)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
#define HUC_LOAD_RETRY_LIMIT 20
#else
#define HUC_LOAD_RETRY_LIMIT 3
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index ba5cb08e9e7b..d5e441b9e08d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -57,7 +57,6 @@ int intel_huc_sanitize(struct intel_huc *huc);
void intel_huc_init_early(struct intel_huc *huc);
int intel_huc_init(struct intel_huc *huc);
void intel_huc_fini(struct intel_huc *huc);
-void intel_huc_suspend(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type);
int intel_huc_wait_for_auth_complete(struct intel_huc *huc,
enum intel_huc_authentication_type type);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 2f4c9c66b40b..6439c8e91a8d 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -50,7 +50,6 @@
#include "trace.h"
#include "display/i9xx_plane_regs.h"
-#include "display/intel_display.h"
#include "display/intel_sprite_regs.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
@@ -1287,6 +1286,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->engine->i915;
+ struct intel_display *display = &dev_priv->display;
struct plane_code_mapping gen8_plane_code[] = {
[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
@@ -1315,9 +1315,9 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
if (info->plane == PLANE_A) {
- info->ctrl_reg = DSPCNTR(dev_priv, info->pipe);
- info->stride_reg = DSPSTRIDE(dev_priv, info->pipe);
- info->surf_reg = DSPSURF(dev_priv, info->pipe);
+ info->ctrl_reg = DSPCNTR(display, info->pipe);
+ info->stride_reg = DSPSTRIDE(display, info->pipe);
+ info->surf_reg = DSPSURF(display, info->pipe);
} else if (info->plane == PLANE_B) {
info->ctrl_reg = SPRCTL(info->pipe);
info->stride_reg = SPRSTRIDE(info->pipe);
@@ -1333,6 +1333,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->engine->i915;
+ struct intel_display *display = &dev_priv->display;
struct intel_vgpu *vgpu = s->vgpu;
u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1);
@@ -1381,9 +1382,9 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
- info->ctrl_reg = DSPCNTR(dev_priv, info->pipe);
- info->stride_reg = DSPSTRIDE(dev_priv, info->pipe);
- info->surf_reg = DSPSURF(dev_priv, info->pipe);
+ info->ctrl_reg = DSPCNTR(display, info->pipe);
+ info->stride_reg = DSPSTRIDE(display, info->pipe);
+ info->surf_reg = DSPSURF(display, info->pipe);
return 0;
}
@@ -1420,6 +1421,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct mi_display_flip_command_info *info)
{
struct drm_i915_private *dev_priv = s->engine->i915;
+ struct intel_display *display = &dev_priv->display;
struct intel_vgpu *vgpu = s->vgpu;
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
@@ -1437,7 +1439,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
}
if (info->plane == PLANE_PRIMARY)
- vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(dev_priv, info->pipe))++;
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(display, info->pipe))++;
if (info->async_flip)
intel_vgpu_trigger_virtual_event(vgpu, info->event);
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index c66d6d3177c8..95570cabdf27 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -32,12 +32,15 @@
*
*/
+#include <drm/display/drm_dp.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "gvt.h"
#include "display/bxt_dpio_phy_regs.h"
#include "display/i9xx_plane_regs.h"
+#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
#include "display/intel_display.h"
#include "display/intel_dpio_phy.h"
@@ -66,8 +69,9 @@ static int get_edp_pipe(struct intel_vgpu *vgpu)
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
- if (!(vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, TRANSCODER_EDP)) & TRANSCONF_ENABLE))
+ if (!(vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_EDP)) & TRANSCONF_ENABLE))
return 0;
if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
@@ -78,12 +82,13 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
if (drm_WARN_ON(&dev_priv->drm,
pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL;
- if (vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, pipe)) & TRANSCONF_ENABLE)
+ if (vgpu_vreg_t(vgpu, TRANSCONF(display, pipe)) & TRANSCONF_ENABLE)
return 1;
if (edp_pipe_is_enabled(vgpu) &&
@@ -178,6 +183,7 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
int pipe;
if (IS_BROXTON(dev_priv)) {
@@ -190,21 +196,21 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) |
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C));
- for_each_pipe(dev_priv, pipe) {
- vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, pipe)) &=
+ for_each_pipe(display, pipe) {
+ vgpu_vreg_t(vgpu, TRANSCONF(display, pipe)) &=
~(TRANSCONF_ENABLE | TRANSCONF_STATE_ENABLE);
- vgpu_vreg_t(vgpu, DSPCNTR(dev_priv, pipe)) &= ~DISP_ENABLE;
+ vgpu_vreg_t(vgpu, DSPCNTR(display, pipe)) &= ~DISP_ENABLE;
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
- vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe)) &= ~MCURSOR_MODE_MASK;
- vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe)) |= MCURSOR_MODE_DISABLE;
+ vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) &= ~MCURSOR_MODE_MASK;
+ vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) |= MCURSOR_MODE_DISABLE;
}
for (trans = TRANSCODER_A; trans <= TRANSCODER_EDP; trans++) {
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, trans)) &=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, trans)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK | TRANS_DDI_FUNC_ENABLE);
}
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
@@ -252,8 +258,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
* TRANSCODER_A can be enabled. PORT_x depends on the input of
* setup_virtual_dp_monitor.
*/
- vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, TRANSCODER_A)) |= TRANSCONF_ENABLE;
- vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, TRANSCODER_A)) |= TRANSCONF_STATE_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_A)) |= TRANSCONF_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_A)) |= TRANSCONF_STATE_ENABLE;
/*
* Golden M/N are calculated based on:
@@ -261,11 +267,11 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
* DP link clk 1620 MHz and non-constant_n.
* TODO: calculate DP link symbol clk and stream clk m/n.
*/
- vgpu_vreg_t(vgpu, PIPE_DATA_M1(dev_priv, TRANSCODER_A)) = TU_SIZE(64);
- vgpu_vreg_t(vgpu, PIPE_DATA_M1(dev_priv, TRANSCODER_A)) |= 0x5b425e;
- vgpu_vreg_t(vgpu, PIPE_DATA_N1(dev_priv, TRANSCODER_A)) = 0x800000;
- vgpu_vreg_t(vgpu, PIPE_LINK_M1(dev_priv, TRANSCODER_A)) = 0x3cd6e;
- vgpu_vreg_t(vgpu, PIPE_LINK_N1(dev_priv, TRANSCODER_A)) = 0x80000;
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) = TU_SIZE(64);
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) |= 0x5b425e;
+ vgpu_vreg_t(vgpu, PIPE_DATA_N1(display, TRANSCODER_A)) = 0x800000;
+ vgpu_vreg_t(vgpu, PIPE_LINK_M1(display, TRANSCODER_A)) = 0x3cd6e;
+ vgpu_vreg_t(vgpu, PIPE_LINK_N1(display, TRANSCODER_A)) = 0x80000;
/* Enable per-DDI/PORT vreg */
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
@@ -288,7 +294,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) &=
~DDI_BUF_IS_IDLE;
vgpu_vreg_t(vgpu,
- TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_EDP)) |=
+ TRANS_DDI_FUNC_CTL(display, TRANSCODER_EDP)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
TRANS_DDI_FUNC_ENABLE);
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
@@ -318,7 +324,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &=
~DDI_BUF_IS_IDLE;
vgpu_vreg_t(vgpu,
- TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |=
+ TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
@@ -349,7 +355,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &=
~DDI_BUF_IS_IDLE;
vgpu_vreg_t(vgpu,
- TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |=
+ TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
@@ -398,11 +404,11 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
* DP link clk 1620 MHz and non-constant_n.
* TODO: calculate DP link symbol clk and stream clk m/n.
*/
- vgpu_vreg_t(vgpu, PIPE_DATA_M1(dev_priv, TRANSCODER_A)) = TU_SIZE(64);
- vgpu_vreg_t(vgpu, PIPE_DATA_M1(dev_priv, TRANSCODER_A)) |= 0x5b425e;
- vgpu_vreg_t(vgpu, PIPE_DATA_N1(dev_priv, TRANSCODER_A)) = 0x800000;
- vgpu_vreg_t(vgpu, PIPE_LINK_M1(dev_priv, TRANSCODER_A)) = 0x3cd6e;
- vgpu_vreg_t(vgpu, PIPE_LINK_N1(dev_priv, TRANSCODER_A)) = 0x80000;
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) = TU_SIZE(64);
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(display, TRANSCODER_A)) |= 0x5b425e;
+ vgpu_vreg_t(vgpu, PIPE_DATA_N1(display, TRANSCODER_A)) = 0x800000;
+ vgpu_vreg_t(vgpu, PIPE_LINK_M1(display, TRANSCODER_A)) = 0x3cd6e;
+ vgpu_vreg_t(vgpu, PIPE_LINK_N1(display, TRANSCODER_A)) = 0x80000;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
@@ -413,10 +419,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B);
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
@@ -439,10 +445,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C);
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
@@ -465,10 +471,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D);
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
- vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) |=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
@@ -506,14 +512,14 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
/* Disable Primary/Sprite/Cursor plane */
- for_each_pipe(dev_priv, pipe) {
- vgpu_vreg_t(vgpu, DSPCNTR(dev_priv, pipe)) &= ~DISP_ENABLE;
+ for_each_pipe(display, pipe) {
+ vgpu_vreg_t(vgpu, DSPCNTR(display, pipe)) &= ~DISP_ENABLE;
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
- vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe)) &= ~MCURSOR_MODE_MASK;
- vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe)) |= MCURSOR_MODE_DISABLE;
+ vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) &= ~MCURSOR_MODE_MASK;
+ vgpu_vreg_t(vgpu, CURCNTR(display, pipe)) |= MCURSOR_MODE_DISABLE;
}
- vgpu_vreg_t(vgpu, TRANSCONF(dev_priv, TRANSCODER_A)) |= TRANSCONF_ENABLE;
+ vgpu_vreg_t(vgpu, TRANSCONF(display, TRANSCODER_A)) |= TRANSCONF_ENABLE;
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
@@ -568,7 +574,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE);
port->dpcd->data_valid = true;
- port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
+ port->dpcd->data[DP_SINK_COUNT] = 0x1;
port->type = type;
port->id = resolution;
port->vrefresh_k = GVT_DEFAULT_REFRESH_RATE * MSEC_PER_SEC;
@@ -629,6 +635,7 @@ void vgpu_update_vblank_emulation(struct intel_vgpu *vgpu, bool turnon)
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
struct intel_vgpu_irq *irq = &vgpu->irq;
int vblank_event[] = {
[PIPE_A] = PIPE_A_VBLANK,
@@ -650,17 +657,19 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
}
if (pipe_is_enabled(vgpu, pipe)) {
- vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(dev_priv, pipe))++;
+ vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(display, pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
}
}
void intel_vgpu_emulate_vblank(struct intel_vgpu *vgpu)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+ struct intel_display *display = &i915->display;
int pipe;
mutex_lock(&vgpu->vgpu_lock);
- for_each_pipe(vgpu->gvt->gt->i915, pipe)
+ for_each_pipe(display, pipe)
emulate_vblank_on_pipe(vgpu, pipe);
mutex_unlock(&vgpu->vgpu_lock);
}
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index f5616f99ef2f..8090bc53c7e1 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -59,52 +59,10 @@ struct intel_vgpu;
#define INTEL_GVT_MAX_UEVENT_VARS 3
-/* DPCD start */
-#define DPCD_SIZE 0x700
-
-/* DPCD */
-#define DP_SET_POWER 0x600
-#define DP_SET_POWER_D0 0x1
-#define AUX_NATIVE_WRITE 0x8
-#define AUX_NATIVE_READ 0x9
-
-#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
-#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
#define AUX_NATIVE_REPLY_NAK (0x1 << 4)
-#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
#define AUX_BURST_SIZE 20
-/* DPCD addresses */
-#define DPCD_REV 0x000
-#define DPCD_MAX_LINK_RATE 0x001
-#define DPCD_MAX_LANE_COUNT 0x002
-
-#define DPCD_TRAINING_PATTERN_SET 0x102
-#define DPCD_SINK_COUNT 0x200
-#define DPCD_LANE0_1_STATUS 0x202
-#define DPCD_LANE2_3_STATUS 0x203
-#define DPCD_LANE_ALIGN_STATUS_UPDATED 0x204
-#define DPCD_SINK_STATUS 0x205
-
-/* link training */
-#define DPCD_TRAINING_PATTERN_SET_MASK 0x03
-#define DPCD_LINK_TRAINING_DISABLED 0x00
-#define DPCD_TRAINING_PATTERN_1 0x01
-#define DPCD_TRAINING_PATTERN_2 0x02
-
-#define DPCD_CP_READY_MASK (1 << 6)
-
-/* lane status */
-#define DPCD_LANES_CR_DONE 0x11
-#define DPCD_LANES_EQ_DONE 0x22
-#define DPCD_SYMBOL_LOCKED 0x44
-
-#define DPCD_INTERLANE_ALIGN_DONE 0x01
-
-#define DPCD_SINK_IN_SYNC 0x03
-/* DPCD end */
-
#define SBI_RESPONSE_MASK 0x3
#define SBI_RESPONSE_SHIFT 0x1
#define SBI_STAT_MASK 0x1
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index c022dc736045..0a357ca42db1 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -32,6 +32,8 @@
*
*/
+#include <drm/display/drm_dp.h>
+
#include "display/intel_dp_aux_regs.h"
#include "display/intel_gmbus_regs.h"
#include "gvt.h"
@@ -504,13 +506,13 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
}
/* Always set the wanted value for vms. */
- ret_msg_size = (((op & 0x1) == GVT_AUX_I2C_READ) ? 2 : 1);
+ ret_msg_size = (((op & 0x1) == DP_AUX_I2C_READ) ? 2 : 1);
vgpu_vreg(vgpu, offset) =
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_MESSAGE_SIZE(ret_msg_size);
if (msg_length == 3) {
- if (!(op & GVT_AUX_I2C_MOT)) {
+ if (!(op & DP_AUX_I2C_MOT)) {
/* stop */
intel_vgpu_init_i2c_edid(vgpu);
} else {
@@ -530,7 +532,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
i2c_edid->edid_available = true;
}
}
- } else if ((op & 0x1) == GVT_AUX_I2C_WRITE) {
+ } else if ((op & 0x1) == DP_AUX_I2C_WRITE) {
/* TODO
* We only support EDID reading from I2C_over_AUX. And
* we do not expect the index mode to be used. Right now
@@ -538,7 +540,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
* support the gfx driver to do EDID access.
*/
} else {
- if (drm_WARN_ON(&i915->drm, (op & 0x1) != GVT_AUX_I2C_READ))
+ if (drm_WARN_ON(&i915->drm, (op & 0x1) != DP_AUX_I2C_READ))
return;
if (drm_WARN_ON(&i915->drm, msg_length != 4))
return;
@@ -553,7 +555,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
* ACK of I2C_WRITE
* returned byte if it is READ
*/
- aux_data_for_write |= GVT_AUX_I2C_REPLY_ACK << 24;
+ aux_data_for_write |= DP_AUX_I2C_REPLY_ACK << 24;
vgpu_vreg(vgpu, offset + 4) = aux_data_for_write;
}
diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h
index c3b5a55aecb3..13fd06590929 100644
--- a/drivers/gpu/drm/i915/gvt/edid.h
+++ b/drivers/gpu/drm/i915/gvt/edid.h
@@ -42,14 +42,6 @@ struct intel_vgpu;
#define EDID_SIZE 128
#define EDID_ADDR 0x50 /* Linux hvm EDID addr */
-#define GVT_AUX_NATIVE_WRITE 0x8
-#define GVT_AUX_NATIVE_READ 0x9
-#define GVT_AUX_I2C_WRITE 0x0
-#define GVT_AUX_I2C_READ 0x1
-#define GVT_AUX_I2C_STATUS 0x2
-#define GVT_AUX_I2C_MOT 0x4
-#define GVT_AUX_I2C_REPLY_ACK 0x0
-
struct intel_vgpu_edid_data {
bool data_valid;
unsigned char edid_block[EDID_SIZE];
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index c454e25b2b0f..15cce973e1ae 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -154,8 +154,9 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 tiled, int stride_mask, int bpp)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
- u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(dev_priv, pipe)) & stride_mask;
+ u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(display, pipe)) & stride_mask;
u32 stride = stride_reg;
if (GRAPHICS_VER(dev_priv) >= 9) {
@@ -210,6 +211,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
u32 val, fmt;
int pipe;
@@ -217,7 +219,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
if (pipe >= I915_MAX_PIPES)
return -ENODEV;
- val = vgpu_vreg_t(vgpu, DSPCNTR(dev_priv, pipe));
+ val = vgpu_vreg_t(vgpu, DSPCNTR(display, pipe));
plane->enabled = !!(val & DISP_ENABLE);
if (!plane->enabled)
return -ENODEV;
@@ -251,7 +253,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->hw_format = fmt;
- plane->base = vgpu_vreg_t(vgpu, DSPSURF(dev_priv, pipe)) & I915_GTT_PAGE_MASK;
+ plane->base = vgpu_vreg_t(vgpu, DSPSURF(display, pipe)) & I915_GTT_PAGE_MASK;
if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
@@ -267,14 +269,14 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
(_PRI_PLANE_STRIDE_MASK >> 6) :
_PRI_PLANE_STRIDE_MASK, plane->bpp);
- plane->width = (vgpu_vreg_t(vgpu, PIPESRC(dev_priv, pipe)) & _PIPE_H_SRCSZ_MASK) >>
+ plane->width = (vgpu_vreg_t(vgpu, PIPESRC(display, pipe)) & _PIPE_H_SRCSZ_MASK) >>
_PIPE_H_SRCSZ_SHIFT;
plane->width += 1;
- plane->height = (vgpu_vreg_t(vgpu, PIPESRC(dev_priv, pipe)) &
+ plane->height = (vgpu_vreg_t(vgpu, PIPESRC(display, pipe)) &
_PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT;
plane->height += 1; /* raw height is one minus the real value */
- val = vgpu_vreg_t(vgpu, DSPTILEOFF(dev_priv, pipe));
+ val = vgpu_vreg_t(vgpu, DSPTILEOFF(display, pipe));
plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >>
_PRI_PLANE_X_OFF_SHIFT;
plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >>
@@ -340,6 +342,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_cursor_plane_format *plane)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
u32 val, mode, index;
u32 alpha_plane, alpha_force;
int pipe;
@@ -348,7 +351,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
if (pipe >= I915_MAX_PIPES)
return -ENODEV;
- val = vgpu_vreg_t(vgpu, CURCNTR(dev_priv, pipe));
+ val = vgpu_vreg_t(vgpu, CURCNTR(display, pipe));
mode = val & MCURSOR_MODE_MASK;
plane->enabled = (mode != MCURSOR_MODE_DISABLE);
if (!plane->enabled)
@@ -374,7 +377,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n",
alpha_plane, alpha_force);
- plane->base = vgpu_vreg_t(vgpu, CURBASE(dev_priv, pipe)) & I915_GTT_PAGE_MASK;
+ plane->base = vgpu_vreg_t(vgpu, CURBASE(display, pipe)) & I915_GTT_PAGE_MASK;
if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
@@ -385,7 +388,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
return -EINVAL;
}
- val = vgpu_vreg_t(vgpu, CURPOS(dev_priv, pipe));
+ val = vgpu_vreg_t(vgpu, CURPOS(display, pipe));
plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT;
plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT;
plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 58cca4906f41..1bce1493b86f 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1190,7 +1190,7 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
ppgtt_set_shadow_entry(spt, se, index);
return 0;
err:
- /* Cancel the existing addess mappings of DMA addr. */
+ /* Cancel the existing address mappings of DMA addr. */
for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
gvt_vdbg_mm("invalidate 4K entry\n");
ppgtt_invalidate_pte(sub_spt, &sub_se);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 0f09344d3c20..241cff0fc683 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -36,6 +36,8 @@
*/
+#include <drm/display/drm_dp.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "gvt.h"
@@ -43,6 +45,7 @@
#include "intel_mchbar_regs.h"
#include "display/bxt_dpio_phy_regs.h"
#include "display/i9xx_plane_regs.h"
+#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
@@ -653,11 +656,12 @@ static u32 skl_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
enum port port;
u32 dp_br, link_m, link_n, htotal, vtotal;
/* Find DDI/PORT assigned to TRANSCODER_A, expect B or D */
- port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(dev_priv, TRANSCODER_A)) &
+ port = (vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(display, TRANSCODER_A)) &
TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
if (port != PORT_B && port != PORT_D) {
gvt_dbg_dpy("vgpu-%d unsupported PORT_%c\n", vgpu->id, port_name(port));
@@ -673,12 +677,12 @@ static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
dp_br = skl_vgpu_get_dp_bitrate(vgpu, port);
/* Get DP link symbol clock M/N */
- link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(dev_priv, TRANSCODER_A));
- link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(dev_priv, TRANSCODER_A));
+ link_m = vgpu_vreg_t(vgpu, PIPE_LINK_M1(display, TRANSCODER_A));
+ link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(display, TRANSCODER_A));
/* Get H/V total from transcoder timing */
- htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(dev_priv, TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
- vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(dev_priv, TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
+ htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(display, TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
+ vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(display, TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
if (dp_br && link_n && htotal && vtotal) {
u64 pixel_clk = 0;
@@ -1009,22 +1013,23 @@ static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
return 0;
}
-#define DSPSURF_TO_PIPE(dev_priv, offset) \
- calc_index(offset, DSPSURF(dev_priv, PIPE_A), DSPSURF(dev_priv, PIPE_B), DSPSURF(dev_priv, PIPE_C))
+#define DSPSURF_TO_PIPE(display, offset) \
+ calc_index(offset, DSPSURF(display, PIPE_A), DSPSURF(display, PIPE_B), DSPSURF(display, PIPE_C))
static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- u32 pipe = DSPSURF_TO_PIPE(dev_priv, offset);
+ struct intel_display *display = &dev_priv->display;
+ u32 pipe = DSPSURF_TO_PIPE(display, offset);
int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg_t(vgpu, DSPSURFLIVE(dev_priv, pipe)) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, DSPSURFLIVE(display, pipe)) = vgpu_vreg(vgpu, offset);
- vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(dev_priv, pipe))++;
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(display, pipe))++;
- if (vgpu_vreg_t(vgpu, DSPCNTR(dev_priv, pipe)) & PLANE_CTL_ASYNC_FLIP)
+ if (vgpu_vreg_t(vgpu, DSPCNTR(display, pipe)) & PLANE_CTL_ASYNC_FLIP)
intel_vgpu_trigger_virtual_event(vgpu, event);
else
set_bit(event, vgpu->irq.flip_done_event[pipe]);
@@ -1057,14 +1062,15 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe = REG_50080_TO_PIPE(offset);
enum plane_id plane = REG_50080_TO_PLANE(offset);
int event = SKL_FLIP_EVENT(pipe, plane);
write_vreg(vgpu, offset, p_data, bytes);
if (plane == PLANE_PRIMARY) {
- vgpu_vreg_t(vgpu, DSPSURFLIVE(dev_priv, pipe)) = vgpu_vreg(vgpu, offset);
- vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(dev_priv, pipe))++;
+ vgpu_vreg_t(vgpu, DSPSURFLIVE(display, pipe)) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(display, pipe))++;
} else {
vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
}
@@ -1129,29 +1135,36 @@ static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
u8 t)
{
- if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
+ if ((t & DP_TRAINING_PATTERN_MASK) == DP_TRAINING_PATTERN_1) {
/* training pattern 1 for CR */
/* set LANE0_CR_DONE, LANE1_CR_DONE */
- dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_CR_DONE;
+ dpcd->data[DP_LANE0_1_STATUS] |= DP_LANE_CR_DONE |
+ DP_LANE_CR_DONE << 4;
/* set LANE2_CR_DONE, LANE3_CR_DONE */
- dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_CR_DONE;
- } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
- DPCD_TRAINING_PATTERN_2) {
+ dpcd->data[DP_LANE2_3_STATUS] |= DP_LANE_CR_DONE |
+ DP_LANE_CR_DONE << 4;
+ } else if ((t & DP_TRAINING_PATTERN_MASK) ==
+ DP_TRAINING_PATTERN_2) {
/* training pattern 2 for EQ */
/* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane0_1 */
- dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_LANES_EQ_DONE;
- dpcd->data[DPCD_LANE0_1_STATUS] |= DPCD_SYMBOL_LOCKED;
+ dpcd->data[DP_LANE0_1_STATUS] |= DP_LANE_CHANNEL_EQ_DONE |
+ DP_LANE_CHANNEL_EQ_DONE << 4;
+ dpcd->data[DP_LANE0_1_STATUS] |= DP_LANE_SYMBOL_LOCKED |
+ DP_LANE_SYMBOL_LOCKED << 4;
/* Set CHANNEL_EQ_DONE and SYMBOL_LOCKED for Lane2_3 */
- dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_LANES_EQ_DONE;
- dpcd->data[DPCD_LANE2_3_STATUS] |= DPCD_SYMBOL_LOCKED;
+ dpcd->data[DP_LANE2_3_STATUS] |= DP_LANE_CHANNEL_EQ_DONE |
+ DP_LANE_CHANNEL_EQ_DONE << 4;
+ dpcd->data[DP_LANE2_3_STATUS] |= DP_LANE_SYMBOL_LOCKED |
+ DP_LANE_SYMBOL_LOCKED << 4;
/* set INTERLANE_ALIGN_DONE */
- dpcd->data[DPCD_LANE_ALIGN_STATUS_UPDATED] |=
- DPCD_INTERLANE_ALIGN_DONE;
- } else if ((t & DPCD_TRAINING_PATTERN_SET_MASK) ==
- DPCD_LINK_TRAINING_DISABLED) {
+ dpcd->data[DP_LANE_ALIGN_STATUS_UPDATED] |=
+ DP_INTERLANE_ALIGN_DONE;
+ } else if ((t & DP_TRAINING_PATTERN_MASK) ==
+ DP_TRAINING_PATTERN_DISABLE) {
/* finish link training */
/* set sink status as synchronized */
- dpcd->data[DPCD_SINK_STATUS] = DPCD_SINK_IN_SYNC;
+ dpcd->data[DP_SINK_STATUS] = DP_RECEIVE_PORT_0_STATUS |
+ DP_RECEIVE_PORT_1_STATUS;
}
}
@@ -1206,7 +1219,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
len = msg & 0xff;
op = ctrl >> 4;
- if (op == GVT_AUX_NATIVE_WRITE) {
+ if (op == DP_AUX_NATIVE_WRITE) {
int t;
u8 buf[16];
@@ -1252,7 +1265,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
dpcd->data[p] = buf[t];
/* check for link training */
- if (p == DPCD_TRAINING_PATTERN_SET)
+ if (p == DP_TRAINING_PATTERN_SET)
dp_aux_ch_ctl_link_training(dpcd,
buf[t]);
}
@@ -1265,7 +1278,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
return 0;
}
- if (op == GVT_AUX_NATIVE_READ) {
+ if (op == DP_AUX_NATIVE_READ) {
int idx, i, ret = 0;
if ((addr + len + 1) >= DPCD_SIZE) {
@@ -2183,6 +2196,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
static int init_generic_mmio_info(struct intel_gvt *gvt)
{
struct drm_i915_private *dev_priv = gvt->gt->i915;
+ struct intel_display *display = &dev_priv->display;
int ret;
MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL,
@@ -2271,21 +2285,21 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* display */
- MMIO_DH(TRANSCONF(dev_priv, TRANSCODER_A), D_ALL, NULL,
+ MMIO_DH(TRANSCONF(display, TRANSCODER_A), D_ALL, NULL,
pipeconf_mmio_write);
- MMIO_DH(TRANSCONF(dev_priv, TRANSCODER_B), D_ALL, NULL,
+ MMIO_DH(TRANSCONF(display, TRANSCODER_B), D_ALL, NULL,
pipeconf_mmio_write);
- MMIO_DH(TRANSCONF(dev_priv, TRANSCODER_C), D_ALL, NULL,
+ MMIO_DH(TRANSCONF(display, TRANSCODER_C), D_ALL, NULL,
pipeconf_mmio_write);
- MMIO_DH(TRANSCONF(dev_priv, TRANSCODER_EDP), D_ALL, NULL,
+ MMIO_DH(TRANSCONF(display, TRANSCODER_EDP), D_ALL, NULL,
pipeconf_mmio_write);
- MMIO_DH(DSPSURF(dev_priv, PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_DH(DSPSURF(display, PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
- MMIO_DH(DSPSURF(dev_priv, PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_DH(DSPSURF(display, PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
- MMIO_DH(DSPSURF(dev_priv, PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
+ MMIO_DH(DSPSURF(display, PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index ca0fb126b02d..b27ff77bfb50 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -53,8 +53,8 @@
#include "intel_gvt.h"
#include "gvt.h"
-MODULE_IMPORT_NS(DMA_BUF);
-MODULE_IMPORT_NS(I915_GVT);
+MODULE_IMPORT_NS("DMA_BUF");
+MODULE_IMPORT_NS("I915_GVT");
/* helper macros copied from vfio-pci */
#define VFIO_PCI_OFFSET_SHIFT 40
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 908f910420c2..509f9ccae3a9 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -439,7 +439,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
gvt_vgpu_err("requesting SMI service\n");
return 0;
}
- /* ignore non 0->1 trasitions */
+ /* ignore non 0->1 transitions */
if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI]
& SWSCI_SCI_TRIGGER) ||
!(swsci & SWSCI_SCI_TRIGGER)) {
diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c
index 60a65435556d..20c3cd807488 100644
--- a/drivers/gpu/drm/i915/gvt/page_track.c
+++ b/drivers/gpu/drm/i915/gvt/page_track.c
@@ -167,7 +167,7 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
return -ENXIO;
if (unlikely(vgpu->failsafe)) {
- /* Remove write protection to prevent furture traps. */
+ /* Remove write protection to prevent future traps. */
intel_gvt_page_track_remove(vgpu, gpa >> PAGE_SHIFT);
} else {
ret = page_track->handler(page_track, gpa, data, bytes);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index a5c8005ec484..23f2cc397ec9 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1052,7 +1052,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
struct intel_vgpu_workload *pos, *n;
intel_engine_mask_t tmp;
- /* free the unsubmited workloads in the queues. */
+ /* free the unsubmitted workloads in the queues. */
for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) {
list_for_each_entry_safe(pos, n,
&s->workload_q_head[engine->id], list) {
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 5ec293011d99..0dbc4e289300 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -212,7 +212,7 @@ active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
struct i915_active_fence *active =
container_of(cb, typeof(*active), cb);
- return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
+ return try_cmpxchg(__active_fence_slot(active), &fence, NULL);
}
static void
@@ -527,24 +527,6 @@ int i915_active_acquire(struct i915_active *ref)
return err;
}
-int i915_active_acquire_for_context(struct i915_active *ref, u64 idx)
-{
- struct i915_active_fence *active;
- int err;
-
- err = i915_active_acquire(ref);
- if (err)
- return err;
-
- active = active_instance(ref, idx);
- if (!active) {
- i915_active_release(ref);
- return -ENOMEM;
- }
-
- return 0; /* return with active ref */
-}
-
void i915_active_release(struct i915_active *ref)
{
debug_active_assert(ref);
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 77c676ecc263..821f7c21ea9b 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -186,7 +186,6 @@ int i915_request_await_active(struct i915_request *rq,
#define I915_ACTIVE_AWAIT_BARRIER BIT(2)
int i915_active_acquire(struct i915_active *ref);
-int i915_active_acquire_for_context(struct i915_active *ref, u64 idx);
bool i915_active_acquire_if_busy(struct i915_active *ref);
void i915_active_release(struct i915_active *ref);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index f969f585d07b..1c2a97f593c7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -33,8 +33,6 @@
#include <linux/debugfs.h>
#include <drm/drm_debugfs.h>
-#include "display/intel_display_params.h"
-
#include "gem/i915_gem_context.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_buffer_pool.h"
@@ -66,7 +64,6 @@ static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
static int i915_capabilities(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_display *display = &i915->display;
struct drm_printer p = drm_seq_file_printer(m);
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
@@ -76,10 +73,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
intel_gt_info_print(&to_gt(i915)->info, &p);
intel_driver_caps_print(&i915->caps, &p);
- kernel_param_lock(THIS_MODULE);
i915_params_dump(&i915->params, &p);
- intel_display_params_dump(display, &p);
- kernel_param_unlock(THIS_MODULE);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index a40f05b993da..c2ae37d6b94d 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -27,6 +27,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/module.h>
@@ -39,17 +40,17 @@
#include <linux/vga_switcheroo.h>
#include <linux/vt.h>
-#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
+#include "display/i9xx_display_sr.h"
#include "display/intel_acpi.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
+#include "display/intel_crtc.h"
#include "display/intel_display_driver.h"
-#include "display/intel_display.h"
#include "display/intel_dmc.h"
#include "display/intel_dp.h"
#include "display/intel_dpt.h"
@@ -59,7 +60,8 @@
#include "display/intel_overlay.h"
#include "display/intel_pch_refclk.h"
#include "display/intel_pps.h"
-#include "display/intel_sprite.h"
+#include "display/intel_sprite_uapi.h"
+#include "display/intel_vga.h"
#include "display/skl_watermark.h"
#include "gem/i915_gem_context.h"
@@ -93,17 +95,20 @@
#include "i915_memcpy.h"
#include "i915_perf.h"
#include "i915_query.h"
-#include "i915_suspend.h"
+#include "i915_reg.h"
#include "i915_switcheroo.h"
#include "i915_sysfs.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
#include "intel_clock_gating.h"
+#include "intel_cpu_info.h"
#include "intel_gvt.h"
#include "intel_memory_region.h"
#include "intel_pci_config.h"
#include "intel_pcode.h"
#include "intel_region_ttm.h"
+#include "intel_sbi.h"
+#include "vlv_sideband.h"
#include "vlv_suspend.h"
static const struct drm_driver i915_drm_driver;
@@ -217,6 +222,7 @@ static void sanitize_gpu(struct drm_i915_private *i915)
*/
static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
int ret = 0;
if (i915_inject_probe_failure(dev_priv))
@@ -231,8 +237,9 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
+ intel_sbi_init(dev_priv);
+ vlv_iosf_sb_init(dev_priv);
mutex_init(&dev_priv->sb_lock);
- cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
i915_memcpy_init_early(dev_priv);
intel_runtime_pm_init_early(&dev_priv->runtime_pm);
@@ -259,7 +266,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_detect_pch(dev_priv);
intel_irq_init(dev_priv);
- intel_display_driver_early_probe(dev_priv);
+ intel_display_driver_early_probe(display);
intel_clock_gating_hooks_init(dev_priv);
intel_detect_preproduction_hw(dev_priv);
@@ -282,16 +289,19 @@ err_workqueues:
*/
static void i915_driver_late_release(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
intel_irq_fini(dev_priv);
- intel_power_domains_cleanup(dev_priv);
+ intel_power_domains_cleanup(display);
i915_gem_cleanup_early(dev_priv);
intel_gt_driver_late_release_all(dev_priv);
intel_region_ttm_device_fini(dev_priv);
vlv_suspend_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
- cpu_latency_qos_remove_request(&dev_priv->sb_qos);
mutex_destroy(&dev_priv->sb_lock);
+ vlv_iosf_sb_fini(dev_priv);
+ intel_sbi_fini(dev_priv);
i915_params_free(&dev_priv->params);
}
@@ -307,6 +317,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
*/
static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt;
int ret, i;
@@ -332,7 +343,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
/* Try to make sure MCHBAR is enabled before poking at it */
intel_gmch_bar_setup(dev_priv);
intel_device_info_runtime_init(dev_priv);
- intel_display_device_info_runtime_init(dev_priv);
+ intel_display_device_info_runtime_init(display);
for_each_gt(gt, dev_priv, i) {
ret = intel_gt_init_mmio(gt);
@@ -415,6 +426,18 @@ mask_err:
return ret;
}
+/* Wa_14022698537:dg2 */
+static void i915_enable_g8(struct drm_i915_private *i915)
+{
+ if (IS_DG2(i915)) {
+ if (IS_DG2_D(i915) && !intel_match_g8_cpu())
+ return;
+
+ snb_pcode_write_p(&i915->uncore, PCODE_POWER_SETUP,
+ POWER_SETUP_SUBCOMMAND_G8_ENABLE, 0, 0);
+ }
+}
+
static int i915_pcode_init(struct drm_i915_private *i915)
{
struct intel_gt *gt;
@@ -428,6 +451,7 @@ static int i915_pcode_init(struct drm_i915_private *i915)
}
}
+ i915_enable_g8(i915);
return 0;
}
@@ -485,7 +509,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_perf;
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, dev_priv->drm.driver);
+ ret = aperture_remove_conflicting_pci_devices(pdev, dev_priv->drm.driver->name);
if (ret)
goto err_ggtt;
@@ -599,6 +623,7 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
*/
static void i915_driver_register(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt;
unsigned int i;
@@ -627,9 +652,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
i915_hwmon_register(dev_priv);
- intel_display_driver_register(dev_priv);
+ intel_display_driver_register(display);
- intel_power_domains_enable(dev_priv);
+ intel_power_domains_enable(display);
intel_runtime_pm_enable(&dev_priv->runtime_pm);
intel_register_dsm_handler();
@@ -644,6 +669,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_gt *gt;
unsigned int i;
@@ -652,9 +678,9 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_unregister_dsm_handler();
intel_runtime_pm_disable(&dev_priv->runtime_pm);
- intel_power_domains_disable(dev_priv);
+ intel_power_domains_disable(display);
- intel_display_driver_unregister(dev_priv);
+ intel_display_driver_unregister(display);
intel_pxp_fini(dev_priv);
@@ -731,7 +757,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Set up device info and initial runtime info. */
intel_device_info_driver_create(i915, pdev->device, match_info);
- intel_display_device_probe(i915);
+ intel_display_device_probe(pdev);
return i915;
}
@@ -750,6 +776,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct drm_i915_private *i915;
+ struct intel_display *display;
int ret;
ret = pci_enable_device(pdev);
@@ -764,6 +791,8 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return PTR_ERR(i915);
}
+ display = &i915->display;
+
ret = i915_driver_early_probe(i915);
if (ret < 0)
goto out_pci_disable;
@@ -784,7 +813,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto out_cleanup_mmio;
- ret = intel_display_driver_probe_noirq(i915);
+ ret = intel_display_driver_probe_noirq(display);
if (ret < 0)
goto out_cleanup_hw;
@@ -792,7 +821,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_cleanup_modeset;
- ret = intel_display_driver_probe_nogem(i915);
+ ret = intel_display_driver_probe_nogem(display);
if (ret)
goto out_cleanup_irq;
@@ -804,7 +833,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret && ret != -ENODEV)
drm_dbg(&i915->drm, "pxp init failed with %d\n", ret);
- ret = intel_display_driver_probe(i915);
+ ret = intel_display_driver_probe(display);
if (ret)
goto out_cleanup_gem;
@@ -824,14 +853,14 @@ out_cleanup_gem:
i915_gem_driver_release(i915);
out_cleanup_modeset2:
/* FIXME clean up the error path */
- intel_display_driver_remove(i915);
+ intel_display_driver_remove(display);
intel_irq_uninstall(i915);
- intel_display_driver_remove_noirq(i915);
+ intel_display_driver_remove_noirq(display);
goto out_cleanup_modeset;
out_cleanup_irq:
intel_irq_uninstall(i915);
out_cleanup_modeset:
- intel_display_driver_remove_nogem(i915);
+ intel_display_driver_remove_nogem(display);
out_cleanup_hw:
i915_driver_hw_remove(i915);
intel_memory_regions_driver_release(i915);
@@ -851,6 +880,7 @@ out_pci_disable:
void i915_driver_remove(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
@@ -864,16 +894,16 @@ void i915_driver_remove(struct drm_i915_private *i915)
intel_gvt_driver_remove(i915);
- intel_display_driver_remove(i915);
+ intel_display_driver_remove(display);
intel_irq_uninstall(i915);
- intel_display_driver_remove_noirq(i915);
+ intel_display_driver_remove_noirq(display);
i915_reset_error_state(i915);
i915_gem_driver_remove(i915);
- intel_display_driver_remove_nogem(i915);
+ intel_display_driver_remove_nogem(display);
i915_driver_hw_remove(i915);
@@ -883,6 +913,7 @@ void i915_driver_remove(struct drm_i915_private *i915)
static void i915_driver_release(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = &dev_priv->display;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
intel_wakeref_t wakeref;
@@ -906,7 +937,7 @@ static void i915_driver_release(struct drm_device *dev)
i915_driver_late_release(dev_priv);
- intel_display_device_remove(dev_priv);
+ intel_display_device_remove(display);
}
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
@@ -936,30 +967,32 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
void i915_driver_shutdown(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
+
disable_rpm_wakeref_asserts(&i915->runtime_pm);
intel_runtime_pm_disable(&i915->runtime_pm);
- intel_power_domains_disable(i915);
+ intel_power_domains_disable(display);
intel_fbdev_set_suspend(&i915->drm, FBINFO_STATE_SUSPENDED, true);
if (HAS_DISPLAY(i915)) {
drm_kms_helper_poll_disable(&i915->drm);
- intel_display_driver_disable_user_access(i915);
+ intel_display_driver_disable_user_access(display);
drm_atomic_helper_shutdown(&i915->drm);
}
- intel_dp_mst_suspend(i915);
+ intel_dp_mst_suspend(display);
- intel_runtime_pm_disable_interrupts(i915);
+ intel_irq_suspend(i915);
intel_hpd_cancel_work(i915);
if (HAS_DISPLAY(i915))
- intel_display_driver_suspend_access(i915);
+ intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(&i915->display);
intel_encoder_shutdown_all(&i915->display);
- intel_dmc_suspend(i915);
+ intel_dmc_suspend(&i915->display);
i915_gem_suspend(i915);
@@ -974,7 +1007,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
* - unify the driver remove and system/runtime suspend sequences with
* the above unified shutdown/poweroff sequence.
*/
- intel_power_domains_driver_remove(i915);
+ intel_power_domains_driver_remove(display);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
intel_runtime_pm_driver_last_release(&i915->runtime_pm);
@@ -1022,24 +1055,22 @@ static int i915_drm_suspend(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
- intel_power_domains_disable(dev_priv);
+ intel_power_domains_disable(display);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
if (HAS_DISPLAY(dev_priv)) {
drm_kms_helper_poll_disable(dev);
- intel_display_driver_disable_user_access(dev_priv);
+ intel_display_driver_disable_user_access(display);
}
pci_save_state(pdev);
- intel_display_driver_suspend(dev_priv);
+ intel_display_driver_suspend(display);
- intel_dp_mst_suspend(dev_priv);
-
- intel_runtime_pm_disable_interrupts(dev_priv);
+ intel_irq_suspend(dev_priv);
intel_hpd_cancel_work(dev_priv);
if (HAS_DISPLAY(dev_priv))
- intel_display_driver_suspend_access(dev_priv);
+ intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(&dev_priv->display);
@@ -1047,14 +1078,14 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_dpt_suspend(dev_priv);
i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
- i915_save_display(dev_priv);
+ i9xx_display_sr_save(display);
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_suspend(display, opregion_target_state);
dev_priv->suspend_count++;
- intel_dmc_suspend(dev_priv);
+ intel_dmc_suspend(display);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1066,6 +1097,7 @@ static int i915_drm_suspend(struct drm_device *dev)
static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = &dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct intel_gt *gt;
@@ -1081,14 +1113,12 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
for_each_gt(gt, dev_priv, i)
intel_uncore_suspend(gt->uncore);
- intel_power_domains_suspend(dev_priv, s2idle);
-
- intel_display_power_suspend_late(dev_priv);
+ intel_display_power_suspend_late(display, s2idle);
ret = vlv_suspend_complete(dev_priv);
if (ret) {
drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
- intel_power_domains_resume(dev_priv);
+ intel_display_power_resume_early(display);
goto out;
}
@@ -1164,9 +1194,14 @@ static int i915_drm_resume(struct drm_device *dev)
/* Must be called after GGTT is resumed. */
intel_dpt_resume(dev_priv);
- intel_dmc_resume(dev_priv);
+ intel_dmc_resume(display);
+
+ i9xx_display_sr_restore(display);
+
+ intel_vga_redisable(display);
+
+ intel_gmbus_reset(display);
- i915_restore_display(dev_priv);
intel_pps_unlock_regs_wa(display);
intel_init_pch_refclk(dev_priv);
@@ -1181,28 +1216,26 @@ static int i915_drm_resume(struct drm_device *dev)
* Modeset enabling in intel_display_driver_init_hw() also needs working
* interrupts.
*/
- intel_runtime_pm_enable_interrupts(dev_priv);
+ intel_irq_resume(dev_priv);
if (HAS_DISPLAY(dev_priv))
drm_mode_config_reset(dev);
i915_gem_resume(dev_priv);
- intel_display_driver_init_hw(dev_priv);
+ intel_display_driver_init_hw(display);
intel_clock_gating_init(dev_priv);
if (HAS_DISPLAY(dev_priv))
- intel_display_driver_resume_access(dev_priv);
+ intel_display_driver_resume_access(display);
intel_hpd_init(dev_priv);
- /* MST sideband requires HPD interrupts enabled */
- intel_dp_mst_resume(dev_priv);
- intel_display_driver_resume(dev_priv);
+ intel_display_driver_resume(display);
if (HAS_DISPLAY(dev_priv)) {
- intel_display_driver_enable_user_access(dev_priv);
+ intel_display_driver_enable_user_access(display);
drm_kms_helper_poll_enable(dev);
}
intel_hpd_poll_disable(dev_priv);
@@ -1211,7 +1244,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
- intel_power_domains_enable(dev_priv);
+ intel_power_domains_enable(display);
intel_gvt_resume(dev_priv);
@@ -1223,6 +1256,7 @@ static int i915_drm_resume(struct drm_device *dev)
static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_display *display = &dev_priv->display;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_gt *gt;
int ret, i;
@@ -1282,9 +1316,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
for_each_gt(gt, dev_priv, i)
intel_gt_resume_early(gt);
- intel_display_power_resume_early(dev_priv);
-
- intel_power_domains_resume(dev_priv);
+ intel_display_power_resume_early(display);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1481,12 +1513,12 @@ static int intel_runtime_suspend(struct device *kdev)
for_each_gt(gt, dev_priv, i)
intel_gt_runtime_suspend(gt);
- intel_runtime_pm_disable_interrupts(dev_priv);
+ intel_irq_suspend(dev_priv);
for_each_gt(gt, dev_priv, i)
intel_uncore_suspend(gt->uncore);
- intel_display_power_suspend(dev_priv);
+ intel_display_power_suspend(display);
ret = vlv_suspend_complete(dev_priv);
if (ret) {
@@ -1494,7 +1526,7 @@ static int intel_runtime_suspend(struct device *kdev)
"Runtime suspend failed, disabling it (%d)\n", ret);
intel_uncore_runtime_resume(&dev_priv->uncore);
- intel_runtime_pm_enable_interrupts(dev_priv);
+ intel_irq_resume(dev_priv);
for_each_gt(gt, dev_priv, i)
intel_gt_runtime_resume(gt);
@@ -1580,14 +1612,14 @@ static int intel_runtime_resume(struct device *kdev)
drm_dbg(&dev_priv->drm,
"Unclaimed access during suspend, bios?\n");
- intel_display_power_resume(dev_priv);
+ intel_display_power_resume(display);
ret = vlv_resume_prepare(dev_priv, true);
for_each_gt(gt, dev_priv, i)
intel_uncore_runtime_resume(gt->uncore);
- intel_runtime_pm_enable_interrupts(dev_priv);
+ intel_irq_resume(dev_priv);
/*
* No point of rolling back things in case of an error, as the best
@@ -1725,7 +1757,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_crtc_get_pipe_from_crtc_id_ioctl, 0),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
@@ -1785,7 +1817,6 @@ static const struct drm_driver i915_drm_driver = {
.fops = &i915_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h
index 94a70d8ec5d5..4b67ad9a61cd 100644
--- a/drivers/gpu/drm/i915/i915_driver.h
+++ b/drivers/gpu/drm/i915/i915_driver.h
@@ -15,7 +15,6 @@ struct drm_printer;
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20230929"
#define DRIVER_TIMESTAMP 1695980603
extern const struct dev_pm_ops i915_pm_ops;
diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
index f58682505491..168d7375304b 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -102,6 +102,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
for_each_memory_region(mr, i915, id)
drm_print_memory_stats(p,
&stats[id],
+ DRM_GEM_OBJECT_ACTIVE |
DRM_GEM_OBJECT_RESIDENT |
DRM_GEM_OBJECT_PURGEABLE,
mr->uabi_name);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 39f6614a0a99..b96b8de12756 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -101,14 +101,6 @@ struct i915_dsm {
resource_size_t usable_size;
};
-struct i915_suspend_saved_registers {
- u32 saveDSPARB;
- u32 saveSWF0[16];
- u32 saveSWF1[16];
- u32 saveSWF3[3];
- u16 saveGCDGMBUS;
-};
-
#define MAX_L3_SLICES 2
struct intel_l3_parity {
u32 *remap_info[MAX_L3_SLICES];
@@ -234,10 +226,19 @@ struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
+ bool irqs_enabled;
+
+ /* LPT/WPT IOSF sideband protection */
+ struct mutex sbi_lock;
+
+ /* VLV/CHV IOSF sideband */
+ struct {
+ struct mutex lock; /* protect sideband access */
+ struct pm_qos_request qos;
+ } vlv_iosf_sb;
/* Sideband mailbox protection */
struct mutex sb_lock;
- struct pm_qos_request sb_qos;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask;
@@ -290,7 +291,6 @@ struct drm_i915_private {
struct i915_gpu_error gpu_error;
u32 suspend_count;
- struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state *vlv_s0ix_state;
struct dram_info {
@@ -343,8 +343,6 @@ struct drm_i915_private {
struct intel_pxp *pxp;
- bool irq_enabled;
-
struct i915_pmu pmu;
/* The TTM device structure. */
@@ -508,8 +506,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_PLATFORM(i915, INTEL_IRONLAKE) && IS_MOBILE(i915))
#define IS_SANDYBRIDGE(i915) IS_PLATFORM(i915, INTEL_SANDYBRIDGE)
#define IS_IVYBRIDGE(i915) IS_PLATFORM(i915, INTEL_IVYBRIDGE)
-#define IS_IVB_GT1(i915) (IS_IVYBRIDGE(i915) && \
- INTEL_INFO(i915)->gt == 1)
#define IS_VALLEYVIEW(i915) IS_PLATFORM(i915, INTEL_VALLEYVIEW)
#define IS_CHERRYVIEW(i915) IS_PLATFORM(i915, INTEL_CHERRYVIEW)
#define IS_HASWELL(i915) IS_PLATFORM(i915, INTEL_HASWELL)
@@ -539,15 +535,22 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
*/
#define IS_LUNARLAKE(i915) (0 && i915)
#define IS_BATTLEMAGE(i915) (0 && i915)
-
-#define IS_ARROWLAKE(i915) \
- IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL)
+#define IS_PANTHERLAKE(i915) (0 && i915)
+
+#define IS_ARROWLAKE_H(i915) \
+ IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_H)
+#define IS_ARROWLAKE_U(i915) \
+ IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_U)
+#define IS_ARROWLAKE_S(i915) \
+ IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_S)
#define IS_DG2_G10(i915) \
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G10)
#define IS_DG2_G11(i915) \
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G11)
#define IS_DG2_G12(i915) \
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G12)
+#define IS_DG2_D(i915) \
+ IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_D)
#define IS_RAPTORLAKE_S(i915) \
IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL)
#define IS_ALDERLAKE_P_N(i915) \
@@ -562,14 +565,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
#define IS_BROADWELL_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
-#define IS_BROADWELL_GT3(i915) (IS_BROADWELL(i915) && \
- INTEL_INFO(i915)->gt == 3)
#define IS_HASWELL_ULT(i915) \
IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
-#define IS_HASWELL_GT3(i915) (IS_HASWELL(i915) && \
- INTEL_INFO(i915)->gt == 3)
-#define IS_HASWELL_GT1(i915) (IS_HASWELL(i915) && \
- INTEL_INFO(i915)->gt == 1)
/* ULX machines are also considered ULT. */
#define IS_HASWELL_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
@@ -581,31 +578,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
#define IS_KABYLAKE_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_SKYLAKE_GT2(i915) (IS_SKYLAKE(i915) && \
- INTEL_INFO(i915)->gt == 2)
-#define IS_SKYLAKE_GT3(i915) (IS_SKYLAKE(i915) && \
- INTEL_INFO(i915)->gt == 3)
-#define IS_SKYLAKE_GT4(i915) (IS_SKYLAKE(i915) && \
- INTEL_INFO(i915)->gt == 4)
-#define IS_KABYLAKE_GT2(i915) (IS_KABYLAKE(i915) && \
- INTEL_INFO(i915)->gt == 2)
-#define IS_KABYLAKE_GT3(i915) (IS_KABYLAKE(i915) && \
- INTEL_INFO(i915)->gt == 3)
#define IS_COFFEELAKE_ULT(i915) \
IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
#define IS_COFFEELAKE_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_COFFEELAKE_GT2(i915) (IS_COFFEELAKE(i915) && \
- INTEL_INFO(i915)->gt == 2)
-#define IS_COFFEELAKE_GT3(i915) (IS_COFFEELAKE(i915) && \
- INTEL_INFO(i915)->gt == 3)
-
#define IS_COMETLAKE_ULT(i915) \
IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
#define IS_COMETLAKE_ULX(i915) \
IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_COMETLAKE_GT2(i915) (IS_COMETLAKE(i915) && \
- INTEL_INFO(i915)->gt == 2)
#define IS_ICL_WITH_PORT_F(i915) \
IS_SUBPLATFORM(i915, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
@@ -613,9 +593,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_TIGERLAKE_UY(i915) \
IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY)
-#define IS_LP(i915) (INTEL_INFO(i915)->is_lp)
-#define IS_GEN9_LP(i915) (GRAPHICS_VER(i915) == 9 && IS_LP(i915))
-#define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_LP(i915))
+#define IS_GEN9_LP(i915) (IS_BROXTON(i915) || IS_GEMINILAKE(i915))
+#define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_GEN9_LP(i915))
#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
@@ -679,7 +658,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
/* WaRsDisableCoarsePowerGating:skl,cnl */
#define NEEDS_WaRsDisableCoarsePowerGating(i915) \
- (IS_SKYLAKE_GT3(i915) || IS_SKYLAKE_GT4(i915))
+ (IS_SKYLAKE(i915) && (INTEL_INFO(i915)->gt == 3 || INTEL_INFO(i915)->gt == 4))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
@@ -693,6 +672,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_RPS(i915) (INTEL_INFO(i915)->has_rps)
+#define HAS_PXP(i915) \
+ (IS_ENABLED(CONFIG_DRM_I915_PXP) && INTEL_INFO(i915)->has_pxp)
+
#define HAS_HECI_PXP(i915) \
(INTEL_INFO(i915)->has_heci_pxp)
@@ -740,7 +722,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(i915) (INTEL_INFO(i915)->has_l3_dpf)
-#define NUM_L3_SLICES(i915) (IS_HASWELL_GT3(i915) ? \
+#define NUM_L3_SLICES(i915) (IS_HASWELL(i915) && INTEL_INFO(i915)->gt == 3 ? \
2 : HAS_L3_DPF(i915))
#define HAS_GUC_DEPRIVILEGE(i915) \
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a9662cc6ed1e..25295eb626dc 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -71,7 +71,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
* i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
* @vm: the &struct i915_address_space
* @ww: An optional struct i915_gem_ww_ctx.
- * @node: the &struct drm_mm_node (typically i915_vma.mode)
+ * @node: the &struct drm_mm_node (typically i915_vma.node)
* @size: how much space to allocate inside the GTT,
* must be #I915_GTT_PAGE_SIZE aligned
* @offset: where to insert inside the GTT,
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index a62405787e77..be8149e46281 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -2,9 +2,9 @@
* SPDX-License-Identifier: MIT
*/
+#include "display/intel_overlay.h"
#include "gem/i915_gem_mman.h"
#include "gt/intel_engine_user.h"
-
#include "pxp/intel_pxp.h"
#include "i915_cmd_parser.h"
@@ -16,6 +16,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_display *display = &i915->display;
struct pci_dev *pdev = to_pci_dev(dev->dev);
const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
drm_i915_getparam_t *param = data;
@@ -38,7 +39,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = to_gt(i915)->ggtt->num_fences;
break;
case I915_PARAM_HAS_OVERLAY:
- value = !!i915->display.overlay;
+ value = intel_overlay_available(display);
break;
case I915_PARAM_HAS_BSD:
value = !!intel_engine_lookup_user(i915,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 6469b9bcf2ec..819ab933bb10 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -40,8 +40,7 @@
#include <drm/drm_cache.h>
#include <drm/drm_print.h>
-#include "display/intel_dmc.h"
-#include "display/intel_overlay.h"
+#include "display/intel_display_snapshot.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_lmem.h"
@@ -651,8 +650,6 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m,
struct drm_printer p = i915_error_printer(m);
intel_device_info_print(&error->device_info, &error->runtime_info, &p);
- intel_display_device_info_print(&error->display_device_info,
- &error->display_runtime_info, &p);
intel_driver_caps_print(&error->driver_caps, &p);
}
@@ -660,10 +657,8 @@ static void err_print_params(struct drm_i915_error_state_buf *m,
const struct i915_params *params)
{
struct drm_printer p = i915_error_printer(m);
- struct intel_display *display = &m->i915->display;
i915_params_dump(params, &p);
- intel_display_params_dump(display, &p);
}
static void err_print_pciid(struct drm_i915_error_state_buf *m,
@@ -846,7 +841,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, "Kernel: %s %s\n",
init_utsname()->release,
init_utsname()->machine);
- err_printf(m, "Driver: %s\n", DRIVER_DATE);
ts = ktime_to_timespec64(error->time);
err_printf(m, "Time: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
@@ -875,8 +869,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
- intel_dmc_print_error_state(&p, m->i915);
-
err_printf(m, "RPM wakelock: %s\n", str_yes_no(error->wakelock));
err_printf(m, "PM suspended: %s\n", str_yes_no(error->suspended));
@@ -905,11 +897,10 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_print_gt_info(m, error->gt);
}
- if (error->overlay)
- intel_overlay_print_error_state(&p, error->overlay);
-
err_print_capabilities(m, error);
err_print_params(m, &error->params);
+
+ intel_display_snapshot_print(error->display_snapshot, &p);
}
static int err_print_to_sgl(struct i915_gpu_coredump *error)
@@ -1032,7 +1023,6 @@ static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
static void cleanup_params(struct i915_gpu_coredump *error)
{
i915_params_free(&error->params);
- intel_display_params_free(&error->display_params);
}
static void cleanup_uc(struct intel_uc_coredump *uc)
@@ -1077,7 +1067,7 @@ void __i915_gpu_coredump_free(struct kref *error_ref)
cleanup_gt(gt);
}
- kfree(error->overlay);
+ intel_display_snapshot_free(error->display_snapshot);
cleanup_params(error);
@@ -1113,7 +1103,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
}
INIT_LIST_HEAD(&dst->page_list);
- strcpy(dst->name, name);
+ strscpy(dst->name, name);
dst->next = NULL;
dst->gtt_offset = vma_res->start;
@@ -1413,7 +1403,7 @@ static bool record_context(struct i915_gem_context_coredump *e,
rcu_read_lock();
task = pid_task(ctx->pid, PIDTYPE_PID);
if (task) {
- strcpy(e->comm, task->comm);
+ strscpy(e->comm, task->comm);
e->pid = task->pid;
}
rcu_read_unlock();
@@ -1459,7 +1449,7 @@ capture_vma_snapshot(struct intel_engine_capture_vma *next,
return next;
}
- strcpy(c->name, name);
+ strscpy(c->name, name);
c->vma_res = i915_vma_resource_get(vma_res);
c->next = next;
@@ -1652,9 +1642,21 @@ capture_engine(struct intel_engine_cs *engine,
return NULL;
intel_engine_get_hung_entity(engine, &ce, &rq);
- if (rq && !i915_request_started(rq))
- drm_info(&engine->gt->i915->drm, "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n",
- engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
+ if (rq && !i915_request_started(rq)) {
+ /*
+ * We want to know also what is the guc_id of the context,
+ * but if we don't have the context reference, then skip
+ * printing it.
+ */
+ if (ce)
+ drm_info(&engine->gt->i915->drm,
+ "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n",
+ engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
+ else
+ drm_info(&engine->gt->i915->drm,
+ "Got hung context on %s with active request %lld:%lld not yet started\n",
+ engine->name, rq->fence.context, rq->fence.seqno);
+ }
if (rq) {
capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
@@ -1993,17 +1995,12 @@ static void capture_gen(struct i915_gpu_coredump *error)
error->suspend_count = i915->suspend_count;
i915_params_copy(&error->params, &i915->params);
- intel_display_params_copy(&error->display_params);
memcpy(&error->device_info,
INTEL_INFO(i915),
sizeof(error->device_info));
memcpy(&error->runtime_info,
RUNTIME_INFO(i915),
sizeof(error->runtime_info));
- memcpy(&error->display_device_info, DISPLAY_INFO(i915),
- sizeof(error->display_device_info));
- memcpy(&error->display_runtime_info, DISPLAY_RUNTIME_INFO(i915),
- sizeof(error->display_runtime_info));
error->driver_caps = i915->caps;
}
@@ -2097,6 +2094,7 @@ static struct i915_gpu_coredump *
__i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
{
struct drm_i915_private *i915 = gt->i915;
+ struct intel_display *display = &i915->display;
struct i915_gpu_coredump *error;
/* Check if GPU capture has been disabled */
@@ -2138,7 +2136,7 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 du
error->simulated |= error->gt->simulated;
}
- error->overlay = intel_overlay_capture_error_state(i915);
+ error->display_snapshot = intel_display_snapshot_capture(display);
return error;
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 7c255bb1c319..78a8928562a9 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -14,8 +14,6 @@
#include <drm/drm_mm.h>
-#include "display/intel_display_device.h"
-#include "display/intel_display_params.h"
#include "gt/intel_engine.h"
#include "gt/intel_engine_types.h"
#include "gt/intel_gt_types.h"
@@ -31,7 +29,7 @@
struct drm_i915_private;
struct i915_vma_compress;
struct intel_engine_capture_vma;
-struct intel_overlay_error_state;
+struct intel_display_snapshot;
struct i915_vma_coredump {
struct i915_vma_coredump *next;
@@ -212,15 +210,12 @@ struct i915_gpu_coredump {
struct intel_device_info device_info;
struct intel_runtime_info runtime_info;
- struct intel_display_device_info display_device_info;
- struct intel_display_runtime_info display_runtime_info;
struct intel_driver_caps driver_caps;
struct i915_params params;
- struct intel_display_params display_params;
-
- struct intel_overlay_error_state *overlay;
struct scatterlist *sgl, *fit;
+
+ struct intel_display_snapshot *display_snapshot;
};
struct i915_gpu_error {
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index 17d30f6b84b0..7dfe1784153f 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -7,6 +7,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/jiffies.h>
#include <linux/types.h>
+#include <linux/units.h>
#include "i915_drv.h"
#include "i915_hwmon.h"
@@ -32,6 +33,7 @@
struct hwm_reg {
i915_reg_t gt_perf_status;
+ i915_reg_t pkg_temp;
i915_reg_t pkg_power_sku_unit;
i915_reg_t pkg_power_sku;
i915_reg_t pkg_rapl_limit;
@@ -280,6 +282,7 @@ static const struct attribute_group *hwm_groups[] = {
};
static const struct hwmon_channel_info * const hwm_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
HWMON_CHANNEL_INFO(in, HWMON_I_INPUT),
HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT),
HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT),
@@ -311,6 +314,37 @@ static int hwm_pcode_write_i1(struct drm_i915_private *i915, u32 uval)
}
static umode_t
+hwm_temp_is_visible(const struct hwm_drvdata *ddat, u32 attr)
+{
+ struct i915_hwmon *hwmon = ddat->hwmon;
+
+ if (attr == hwmon_temp_input && i915_mmio_reg_valid(hwmon->rg.pkg_temp))
+ return 0444;
+
+ return 0;
+}
+
+static int
+hwm_temp_read(struct hwm_drvdata *ddat, u32 attr, long *val)
+{
+ struct i915_hwmon *hwmon = ddat->hwmon;
+ intel_wakeref_t wakeref;
+ u32 reg_val;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ with_intel_runtime_pm(ddat->uncore->rpm, wakeref)
+ reg_val = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_temp);
+
+ /* HW register value is in degrees Celsius, convert to millidegrees. */
+ *val = REG_FIELD_GET(TEMP_MASK, reg_val) * MILLIDEGREE_PER_DEGREE;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t
hwm_in_is_visible(const struct hwm_drvdata *ddat, u32 attr)
{
struct drm_i915_private *i915 = ddat->uncore->i915;
@@ -692,6 +726,8 @@ hwm_is_visible(const void *drvdata, enum hwmon_sensor_types type,
struct hwm_drvdata *ddat = (struct hwm_drvdata *)drvdata;
switch (type) {
+ case hwmon_temp:
+ return hwm_temp_is_visible(ddat, attr);
case hwmon_in:
return hwm_in_is_visible(ddat, attr);
case hwmon_power:
@@ -714,6 +750,8 @@ hwm_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
struct hwm_drvdata *ddat = dev_get_drvdata(dev);
switch (type) {
+ case hwmon_temp:
+ return hwm_temp_read(ddat, attr, val);
case hwmon_in:
return hwm_in_read(ddat, attr, val);
case hwmon_power:
@@ -810,6 +848,7 @@ hwm_get_preregistration_info(struct drm_i915_private *i915)
hwmon->rg.gt_perf_status = GEN12_RPSTAT1;
if (IS_DG1(i915) || IS_DG2(i915)) {
+ hwmon->rg.pkg_temp = PCU_PACKAGE_TEMPERATURE;
hwmon->rg.pkg_power_sku_unit = PCU_PACKAGE_POWER_SKU_UNIT;
hwmon->rg.pkg_power_sku = PCU_PACKAGE_POWER_SKU;
hwmon->rg.pkg_rapl_limit = PCU_PACKAGE_RAPL_LIMIT;
@@ -817,6 +856,7 @@ hwm_get_preregistration_info(struct drm_i915_private *i915)
hwmon->rg.energy_status_tile = INVALID_MMIO_REG;
hwmon->rg.fan_speed = PCU_PWM_FAN_SPEED;
} else {
+ hwmon->rg.pkg_temp = INVALID_MMIO_REG;
hwmon->rg.pkg_power_sku_unit = INVALID_MMIO_REG;
hwmon->rg.pkg_power_sku = INVALID_MMIO_REG;
hwmon->rg.pkg_rapl_limit = INVALID_MMIO_REG;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2321de48d169..7920ad9585ae 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -77,39 +77,24 @@ static inline void pmu_irq_stats(struct drm_i915_private *i915,
WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
}
-void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
- i915_reg_t iir, i915_reg_t ier)
+void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs)
{
- intel_uncore_write(uncore, imr, 0xffffffff);
- intel_uncore_posting_read(uncore, imr);
+ intel_uncore_write(uncore, regs.imr, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.imr);
- intel_uncore_write(uncore, ier, 0);
+ intel_uncore_write(uncore, regs.ier, 0);
/* IIR can theoretically queue up two events. Be paranoid. */
- intel_uncore_write(uncore, iir, 0xffffffff);
- intel_uncore_posting_read(uncore, iir);
- intel_uncore_write(uncore, iir, 0xffffffff);
- intel_uncore_posting_read(uncore, iir);
-}
-
-static void gen2_irq_reset(struct intel_uncore *uncore)
-{
- intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
- intel_uncore_posting_read16(uncore, GEN2_IMR);
-
- intel_uncore_write16(uncore, GEN2_IER, 0);
-
- /* IIR can theoretically queue up two events. Be paranoid. */
- intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
- intel_uncore_posting_read16(uncore, GEN2_IIR);
- intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
- intel_uncore_posting_read16(uncore, GEN2_IIR);
+ intel_uncore_write(uncore, regs.iir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.iir);
+ intel_uncore_write(uncore, regs.iir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.iir);
}
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
-void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
+void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
{
u32 val = intel_uncore_read(uncore, reg);
@@ -125,42 +110,14 @@ void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
intel_uncore_posting_read(uncore, reg);
}
-static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
-{
- u16 val = intel_uncore_read16(uncore, GEN2_IIR);
-
- if (val == 0)
- return;
-
- drm_WARN(&uncore->i915->drm, 1,
- "Interrupt register 0x%x is not zero: 0x%08x\n",
- i915_mmio_reg_offset(GEN2_IIR), val);
- intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
- intel_uncore_posting_read16(uncore, GEN2_IIR);
- intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
- intel_uncore_posting_read16(uncore, GEN2_IIR);
-}
-
-void gen3_irq_init(struct intel_uncore *uncore,
- i915_reg_t imr, u32 imr_val,
- i915_reg_t ier, u32 ier_val,
- i915_reg_t iir)
-{
- gen3_assert_iir_is_zero(uncore, iir);
-
- intel_uncore_write(uncore, ier, ier_val);
- intel_uncore_write(uncore, imr, imr_val);
- intel_uncore_posting_read(uncore, imr);
-}
-
-static void gen2_irq_init(struct intel_uncore *uncore,
- u32 imr_val, u32 ier_val)
+void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs,
+ u32 imr_val, u32 ier_val)
{
- gen2_assert_iir_is_zero(uncore);
+ gen2_assert_iir_is_zero(uncore, regs.iir);
- intel_uncore_write16(uncore, GEN2_IER, ier_val);
- intel_uncore_write16(uncore, GEN2_IMR, imr_val);
- intel_uncore_posting_read16(uncore, GEN2_IMR);
+ intel_uncore_write(uncore, regs.ier, ier_val);
+ intel_uncore_write(uncore, regs.imr, imr_val);
+ intel_uncore_posting_read(uncore, regs.imr);
}
/**
@@ -298,7 +255,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
/* Call regardless, as some status bits might not be
- * signalled in iir */
+ * signalled in IIR */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
@@ -380,7 +337,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
/* Call regardless, as some status bits might not be
- * signalled in iir */
+ * signalled in IIR */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
@@ -665,7 +622,7 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv)
if (HAS_PCH_NOP(dev_priv))
return;
- GEN3_IRQ_RESET(uncore, SDE);
+ gen2_irq_reset(uncore, SDE_IRQ_REGS);
if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
@@ -677,7 +634,7 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- GEN3_IRQ_RESET(uncore, DE);
+ gen2_irq_reset(uncore, DE_IRQ_REGS);
dev_priv->irq_mask = ~0u;
if (GRAPHICS_VER(dev_priv) == 7)
@@ -701,8 +658,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
gen5_gt_irq_reset(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.irq.display_irqs_enabled)
- vlv_display_irq_reset(dev_priv);
+ vlv_display_irq_reset(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -714,7 +670,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
gen8_gt_irq_reset(to_gt(dev_priv));
gen8_display_irq_reset(dev_priv);
- GEN3_IRQ_RESET(uncore, GEN8_PCU_);
+ gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_reset(dev_priv);
@@ -731,8 +687,8 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
gen11_gt_irq_reset(gt);
gen11_display_irq_reset(dev_priv);
- GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
- GEN3_IRQ_RESET(uncore, GEN8_PCU_);
+ gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
+ gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
}
static void dg1_irq_reset(struct drm_i915_private *dev_priv)
@@ -748,8 +704,8 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv)
gen11_display_irq_reset(dev_priv);
- GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
- GEN3_IRQ_RESET(uncore, GEN8_PCU_);
+ gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS);
+ gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
intel_uncore_write(uncore, GEN11_GFX_MSTR_IRQ, ~0);
}
@@ -763,11 +719,10 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
gen8_gt_irq_reset(to_gt(dev_priv));
- GEN3_IRQ_RESET(uncore, GEN8_PCU_);
+ gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS);
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.irq.display_irqs_enabled)
- vlv_display_irq_reset(dev_priv);
+ vlv_display_irq_reset(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -783,8 +738,7 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
gen5_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.irq.display_irqs_enabled)
- vlv_display_irq_postinstall(dev_priv);
+ vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
@@ -808,7 +762,7 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
gen11_gt_irq_postinstall(gt);
gen11_de_irq_postinstall(dev_priv);
- GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
+ gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
gen11_master_intr_enable(intel_uncore_regs(uncore));
intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
@@ -824,7 +778,7 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
for_each_gt(gt, dev_priv, i)
gen11_gt_irq_postinstall(gt);
- GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
+ gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked);
dg1_de_irq_postinstall(dev_priv);
@@ -837,24 +791,13 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
gen8_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.irq.display_irqs_enabled)
- vlv_display_irq_postinstall(dev_priv);
+ vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
}
-static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- i9xx_pipestat_irq_reset(dev_priv);
-
- gen2_irq_reset(uncore);
- dev_priv->irq_mask = ~0u;
-}
-
static u32 i9xx_error_mask(struct drm_i915_private *i915)
{
/*
@@ -876,76 +819,6 @@ static u32 i9xx_error_mask(struct drm_i915_private *i915)
I915_ERROR_MEMORY_REFRESH);
}
-static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u16 enable_mask;
-
- intel_uncore_write16(uncore, EMR, i9xx_error_mask(dev_priv));
-
- /* Unmask the interrupts that we always want on. */
- dev_priv->irq_mask =
- ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_MASTER_ERROR_INTERRUPT);
-
- enable_mask =
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_MASTER_ERROR_INTERRUPT |
- I915_USER_INTERRUPT;
-
- gen2_irq_init(uncore, dev_priv->irq_mask, enable_mask);
-
- /* Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked check happy. */
- spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-static void i8xx_error_irq_ack(struct drm_i915_private *i915,
- u16 *eir, u16 *eir_stuck)
-{
- struct intel_uncore *uncore = &i915->uncore;
- u16 emr;
-
- *eir = intel_uncore_read16(uncore, EIR);
- intel_uncore_write16(uncore, EIR, *eir);
-
- *eir_stuck = intel_uncore_read16(uncore, EIR);
- if (*eir_stuck == 0)
- return;
-
- /*
- * Toggle all EMR bits to make sure we get an edge
- * in the ISR master error bit if we don't clear
- * all the EIR bits. Otherwise the edge triggered
- * IIR on i965/g4x wouldn't notice that an interrupt
- * is still pending. Also some EIR bits can't be
- * cleared except by handling the underlying error
- * (or by a GPU reset) so we mask any bit that
- * remains set.
- */
- emr = intel_uncore_read16(uncore, EMR);
- intel_uncore_write16(uncore, EMR, 0xffff);
- intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
-}
-
-static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
- u16 eir, u16 eir_stuck)
-{
- drm_dbg(&dev_priv->drm, "Master Error: EIR 0x%04x\n", eir);
-
- if (eir_stuck)
- drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
- eir_stuck);
-
- drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n",
- intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
-}
-
static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
u32 *eir, u32 *eir_stuck)
{
@@ -986,66 +859,13 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
intel_uncore_read(&dev_priv->uncore, PGTBL_ER));
}
-static irqreturn_t i8xx_irq_handler(int irq, void *arg)
-{
- struct drm_i915_private *dev_priv = arg;
- irqreturn_t ret = IRQ_NONE;
-
- if (!intel_irqs_enabled(dev_priv))
- return IRQ_NONE;
-
- /* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
-
- do {
- u32 pipe_stats[I915_MAX_PIPES] = {};
- u16 eir = 0, eir_stuck = 0;
- u16 iir;
-
- iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
- if (iir == 0)
- break;
-
- ret = IRQ_HANDLED;
-
- /* Call regardless, as some status bits might not be
- * signalled in iir */
- i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
-
- if (iir & I915_MASTER_ERROR_INTERRUPT)
- i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
-
- intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
-
- if (iir & I915_USER_INTERRUPT)
- intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
-
- if (iir & I915_MASTER_ERROR_INTERRUPT)
- i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
-
- i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
- } while (0);
-
- pmu_irq_stats(dev_priv, ret);
-
- enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
-
- return ret;
-}
-
static void i915_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- if (I915_HAS_HOTPLUG(dev_priv)) {
- i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- intel_uncore_rmw(&dev_priv->uncore,
- PORT_HOTPLUG_STAT(dev_priv), 0, 0);
- }
-
- i9xx_pipestat_irq_reset(dev_priv);
+ i9xx_display_irq_reset(dev_priv);
- GEN3_IRQ_RESET(uncore, GEN2_);
+ gen2_irq_reset(uncore, GEN2_IRQ_REGS);
dev_priv->irq_mask = ~0u;
}
@@ -1056,28 +876,28 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv));
- /* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
- ~(I915_ASLE_INTERRUPT |
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_MASTER_ERROR_INTERRUPT);
enable_mask =
- I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
+ if (DISPLAY_VER(dev_priv) >= 3) {
+ dev_priv->irq_mask &= ~I915_ASLE_INTERRUPT;
+ enable_mask |= I915_ASLE_INTERRUPT;
+ }
+
if (I915_HAS_HOTPLUG(dev_priv)) {
- /* Enable in IER... */
- enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
- /* and unmask in IMR */
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
+ enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
}
- GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
+ gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -1117,7 +937,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
/* Call regardless, as some status bits might not be
- * signalled in iir */
+ * signalled in IIR */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
if (iir & I915_MASTER_ERROR_INTERRUPT)
@@ -1148,12 +968,9 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT(dev_priv), 0, 0);
+ i9xx_display_irq_reset(dev_priv);
- i9xx_pipestat_irq_reset(dev_priv);
-
- GEN3_IRQ_RESET(uncore, GEN2_);
+ gen2_irq_reset(uncore, GEN2_IRQ_REGS);
dev_priv->irq_mask = ~0u;
}
@@ -1183,7 +1000,6 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv));
- /* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
~(I915_ASLE_INTERRUPT |
I915_DISPLAY_PORT_INTERRUPT |
@@ -1202,7 +1018,7 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
if (IS_G4X(dev_priv))
enable_mask |= I915_BSD_USER_INTERRUPT;
- GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
+ gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -1242,7 +1058,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
/* Call regardless, as some status bits might not be
- * signalled in iir */
+ * signalled in IIR */
i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
if (iir & I915_MASTER_ERROR_INTERRUPT)
@@ -1317,10 +1133,8 @@ static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
return valleyview_irq_handler;
else if (GRAPHICS_VER(dev_priv) == 4)
return i965_irq_handler;
- else if (GRAPHICS_VER(dev_priv) == 3)
- return i915_irq_handler;
else
- return i8xx_irq_handler;
+ return i915_irq_handler;
} else {
if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
return dg1_irq_handler;
@@ -1342,10 +1156,8 @@ static void intel_irq_reset(struct drm_i915_private *dev_priv)
valleyview_irq_reset(dev_priv);
else if (GRAPHICS_VER(dev_priv) == 4)
i965_irq_reset(dev_priv);
- else if (GRAPHICS_VER(dev_priv) == 3)
- i915_irq_reset(dev_priv);
else
- i8xx_irq_reset(dev_priv);
+ i915_irq_reset(dev_priv);
} else {
if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
dg1_irq_reset(dev_priv);
@@ -1367,10 +1179,8 @@ static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
valleyview_irq_postinstall(dev_priv);
else if (GRAPHICS_VER(dev_priv) == 4)
i965_irq_postinstall(dev_priv);
- else if (GRAPHICS_VER(dev_priv) == 3)
- i915_irq_postinstall(dev_priv);
else
- i8xx_irq_postinstall(dev_priv);
+ i915_irq_postinstall(dev_priv);
} else {
if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
dg1_irq_postinstall(dev_priv);
@@ -1404,16 +1214,14 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
* interrupts as enabled _before_ actually enabling them to avoid
* special cases in our ordering checks.
*/
- dev_priv->runtime_pm.irqs_enabled = true;
-
- dev_priv->irq_enabled = true;
+ dev_priv->irqs_enabled = true;
intel_irq_reset(dev_priv);
ret = request_irq(irq, intel_irq_handler(dev_priv),
IRQF_SHARED, DRIVER_NAME, dev_priv);
if (ret < 0) {
- dev_priv->irq_enabled = false;
+ dev_priv->irqs_enabled = false;
return ret;
}
@@ -1433,56 +1241,46 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
int irq = to_pci_dev(dev_priv->drm.dev)->irq;
- /*
- * FIXME we can get called twice during driver probe
- * error handling as well as during driver remove due to
- * intel_display_driver_remove() calling us out of sequence.
- * Would be nice if it didn't do that...
- */
- if (!dev_priv->irq_enabled)
+ if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled))
return;
- dev_priv->irq_enabled = false;
-
intel_irq_reset(dev_priv);
free_irq(irq, dev_priv);
intel_hpd_cancel_work(dev_priv);
- dev_priv->runtime_pm.irqs_enabled = false;
+ dev_priv->irqs_enabled = false;
}
/**
- * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
- * @dev_priv: i915 device instance
+ * intel_irq_suspend - Suspend interrupts
+ * @i915: i915 device instance
*
- * This function is used to disable interrupts at runtime, both in the runtime
- * pm and the system suspend/resume code.
+ * This function is used to disable interrupts at runtime.
*/
-void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
+void intel_irq_suspend(struct drm_i915_private *i915)
{
- intel_irq_reset(dev_priv);
- dev_priv->runtime_pm.irqs_enabled = false;
- intel_synchronize_irq(dev_priv);
+ intel_irq_reset(i915);
+ i915->irqs_enabled = false;
+ intel_synchronize_irq(i915);
}
/**
- * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
- * @dev_priv: i915 device instance
+ * intel_irq_resume - Resume interrupts
+ * @i915: i915 device instance
*
- * This function is used to enable interrupts at runtime, both in the runtime
- * pm and the system suspend/resume code.
+ * This function is used to enable interrupts at runtime.
*/
-void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
+void intel_irq_resume(struct drm_i915_private *i915)
{
- dev_priv->runtime_pm.irqs_enabled = true;
- intel_irq_reset(dev_priv);
- intel_irq_postinstall(dev_priv);
+ i915->irqs_enabled = true;
+ intel_irq_reset(i915);
+ intel_irq_postinstall(i915);
}
bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
{
- return dev_priv->runtime_pm.irqs_enabled;
+ return dev_priv->irqs_enabled;
}
void intel_synchronize_irq(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index e665a1b007dc..0457f6402e05 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -34,45 +34,17 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask);
-void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
+void intel_irq_suspend(struct drm_i915_private *i915);
+void intel_irq_resume(struct drm_i915_private *i915);
bool intel_irqs_enabled(struct drm_i915_private *dev_priv);
void intel_synchronize_irq(struct drm_i915_private *i915);
void intel_synchronize_hardirq(struct drm_i915_private *i915);
-void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg);
+void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg);
-void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
- i915_reg_t iir, i915_reg_t ier);
+void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs);
-void gen3_irq_init(struct intel_uncore *uncore,
- i915_reg_t imr, u32 imr_val,
- i915_reg_t ier, u32 ier_val,
- i915_reg_t iir);
-
-#define GEN8_IRQ_RESET_NDX(uncore, type, which) \
-({ \
- unsigned int which_ = which; \
- gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
- GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
-})
-
-#define GEN3_IRQ_RESET(uncore, type) \
- gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
-
-#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
-({ \
- unsigned int which_ = which; \
- gen3_irq_init((uncore), \
- GEN8_##type##_IMR(which_), imr_val, \
- GEN8_##type##_IER(which_), ier_val, \
- GEN8_##type##_IIR(which_)); \
-})
-
-#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
- gen3_irq_init((uncore), \
- type##IMR, imr_val, \
- type##IER, ier_val, \
- type##IIR)
+void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs,
+ u32 imr_val, u32 ier_val);
#endif /* __I915_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index f5c97a620962..76e2801619f0 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -143,8 +143,8 @@ int remap_io_sg(struct vm_area_struct *vma,
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
- while (offset >= sg_dma_len(r.sgt.sgp) >> PAGE_SHIFT) {
- offset -= sg_dma_len(r.sgt.sgp) >> PAGE_SHIFT;
+ while (offset >= r.sgt.max >> PAGE_SHIFT) {
+ offset -= r.sgt.max >> PAGE_SHIFT;
r.sgt = __sgt_iter(__sg_next(r.sgt.sgp), use_dma(iobase));
if (!r.sgt.sgp)
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index d37bb3a704d0..21006c7f615c 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -24,7 +24,7 @@
#include <drm/drm_color_mgmt.h>
#include <drm/drm_drv.h>
-#include <drm/intel/i915_pciids.h>
+#include <drm/intel/pciids.h>
#include "display/intel_display_driver.h"
#include "gt/intel_gt_regs.h"
@@ -367,7 +367,6 @@ static const struct intel_device_info ivb_q_info = {
static const struct intel_device_info vlv_info = {
PLATFORM(INTEL_VALLEYVIEW),
GEN(7),
- .is_lp = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_reset_engine = true,
@@ -451,7 +450,6 @@ static const struct intel_device_info bdw_gt3_info = {
static const struct intel_device_info chv_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
- .is_lp = 1,
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
@@ -512,7 +510,6 @@ static const struct intel_device_info skl_gt4_info = {
#define GEN9_LP_FEATURES \
GEN(9), \
- .is_lp = 1, \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.has_3d_pipeline = 1, \
.has_64bit_reloc = 1, \
@@ -870,6 +867,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_info),
INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_info),
INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_info),
+ INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_info),
INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_info),
{}
};
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2406cda75b7b..5384d1bb4923 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -4802,7 +4802,7 @@ err_unlock:
return ret;
}
-static struct ctl_table oa_table[] = {
+static const struct ctl_table oa_table[] = {
{
.procname = "perf_stream_paranoid",
.data = &i915_perf_stream_paranoid,
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 21eb0c5b320d..e55db036be1b 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -302,7 +302,7 @@ void i915_pmu_gt_parked(struct intel_gt *gt)
{
struct i915_pmu *pmu = &gt->i915->pmu;
- if (!pmu->base.event_init)
+ if (!pmu->registered)
return;
spin_lock_irq(&pmu->lock);
@@ -324,7 +324,7 @@ void i915_pmu_gt_unparked(struct intel_gt *gt)
{
struct i915_pmu *pmu = &gt->i915->pmu;
- if (!pmu->base.event_init)
+ if (!pmu->registered)
return;
spin_lock_irq(&pmu->lock);
@@ -356,7 +356,7 @@ static bool exclusive_mmio_access(const struct drm_i915_private *i915)
return GRAPHICS_VER(i915) == 7;
}
-static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
+static void gen3_engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
{
struct intel_engine_pmu *pmu = &engine->pmu;
bool busy;
@@ -391,6 +391,31 @@ static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns
add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
}
+static void gen2_engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
+{
+ struct intel_engine_pmu *pmu = &engine->pmu;
+ u32 tail, head, acthd;
+
+ tail = ENGINE_READ_FW(engine, RING_TAIL);
+ head = ENGINE_READ_FW(engine, RING_HEAD);
+ acthd = ENGINE_READ_FW(engine, ACTHD);
+
+ if (head & HEAD_WAIT_I8XX)
+ add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
+
+ if (head & HEAD_WAIT_I8XX || head != acthd ||
+ (head & HEAD_ADDR) != (tail & TAIL_ADDR))
+ add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
+}
+
+static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
+{
+ if (GRAPHICS_VER(engine->i915) >= 3)
+ gen3_engine_sample(engine, period_ns);
+ else
+ gen2_engine_sample(engine, period_ns);
+}
+
static void
engines_sample(struct intel_gt *gt, unsigned int period_ns)
{
@@ -601,7 +626,7 @@ static int i915_pmu_event_init(struct perf_event *event)
struct drm_i915_private *i915 = pmu_to_i915(pmu);
int ret;
- if (pmu->closed)
+ if (!pmu->registered)
return -ENODEV;
if (event->attr.type != event->pmu->type)
@@ -699,7 +724,7 @@ static void i915_pmu_event_read(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
u64 prev, new;
- if (pmu->closed) {
+ if (!pmu->registered) {
event->hw.state = PERF_HES_STOPPED;
return;
}
@@ -825,7 +850,7 @@ static void i915_pmu_event_start(struct perf_event *event, int flags)
{
struct i915_pmu *pmu = event_to_pmu(event);
- if (pmu->closed)
+ if (!pmu->registered)
return;
i915_pmu_enable(event);
@@ -834,15 +859,14 @@ static void i915_pmu_event_start(struct perf_event *event, int flags)
static void i915_pmu_event_stop(struct perf_event *event, int flags)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = event_to_pmu(event);
- if (pmu->closed)
+ if (!pmu->registered)
goto out;
if (flags & PERF_EF_UPDATE)
i915_pmu_event_read(event);
+
i915_pmu_disable(event);
out:
@@ -853,7 +877,7 @@ static int i915_pmu_event_add(struct perf_event *event, int flags)
{
struct i915_pmu *pmu = event_to_pmu(event);
- if (pmu->closed)
+ if (!pmu->registered)
return -ENODEV;
if (flags & PERF_EF_START)
@@ -1153,8 +1177,6 @@ static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
{
struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
- GEM_BUG_ON(!pmu->base.event_init);
-
/* Select the first online CPU as a designated reader. */
if (cpumask_empty(&i915_pmu_cpumask))
cpumask_set_cpu(cpu, &i915_pmu_cpumask);
@@ -1167,13 +1189,11 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
unsigned int target = i915_pmu_target_cpu;
- GEM_BUG_ON(!pmu->base.event_init);
-
/*
* Unregistering an instance generates a CPU offline event which we must
* ignore to avoid incorrectly modifying the shared i915_pmu_cpumask.
*/
- if (pmu->closed)
+ if (!pmu->registered)
return 0;
if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
@@ -1194,7 +1214,7 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
return 0;
}
-static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
+static enum cpuhp_state cpuhp_state = CPUHP_INVALID;
int i915_pmu_init(void)
{
@@ -1208,39 +1228,28 @@ int i915_pmu_init(void)
pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n",
ret);
else
- cpuhp_slot = ret;
+ cpuhp_state = ret;
return 0;
}
void i915_pmu_exit(void)
{
- if (cpuhp_slot != CPUHP_INVALID)
- cpuhp_remove_multi_state(cpuhp_slot);
+ if (cpuhp_state != CPUHP_INVALID)
+ cpuhp_remove_multi_state(cpuhp_state);
}
static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
{
- if (cpuhp_slot == CPUHP_INVALID)
+ if (cpuhp_state == CPUHP_INVALID)
return -EINVAL;
- return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node);
+ return cpuhp_state_add_instance(cpuhp_state, &pmu->cpuhp.node);
}
static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
{
- cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node);
-}
-
-static bool is_igp(struct drm_i915_private *i915)
-{
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
-
- /* IGP is 0000:00:02.0 */
- return pci_domain_nr(pdev->bus) == 0 &&
- pdev->bus->number == 0 &&
- PCI_SLOT(pdev->devfn) == 2 &&
- PCI_FUNC(pdev->devfn) == 0;
+ cpuhp_state_remove_instance(cpuhp_state, &pmu->cpuhp.node);
}
void i915_pmu_register(struct drm_i915_private *i915)
@@ -1252,21 +1261,15 @@ void i915_pmu_register(struct drm_i915_private *i915)
&i915_pmu_cpumask_attr_group,
NULL
};
-
int ret = -ENOMEM;
- if (GRAPHICS_VER(i915) <= 2) {
- drm_info(&i915->drm, "PMU not supported for this GPU.");
- return;
- }
-
spin_lock_init(&pmu->lock);
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->timer.function = i915_sample;
pmu->cpuhp.cpu = -1;
init_rc6(pmu);
- if (!is_igp(i915)) {
+ if (IS_DGFX(i915)) {
pmu->name = kasprintf(GFP_KERNEL,
"i915_%s",
dev_name(i915->drm.dev));
@@ -1308,6 +1311,8 @@ void i915_pmu_register(struct drm_i915_private *i915)
if (ret)
goto err_unreg;
+ pmu->registered = true;
+
return;
err_unreg:
@@ -1315,10 +1320,9 @@ err_unreg:
err_groups:
kfree(pmu->base.attr_groups);
err_attr:
- pmu->base.event_init = NULL;
free_event_attributes(pmu);
err_name:
- if (!is_igp(i915))
+ if (IS_DGFX(i915))
kfree(pmu->name);
err:
drm_notice(&i915->drm, "Failed to register PMU!\n");
@@ -1328,25 +1332,19 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
{
struct i915_pmu *pmu = &i915->pmu;
- if (!pmu->base.event_init)
+ if (!pmu->registered)
return;
- /*
- * "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu
- * ensures all currently executing ones will have exited before we
- * proceed with unregistration.
- */
- pmu->closed = true;
- synchronize_rcu();
+ /* Disconnect the PMU callbacks */
+ pmu->registered = false;
hrtimer_cancel(&pmu->timer);
i915_pmu_unregister_cpuhp_state(pmu);
perf_pmu_unregister(&pmu->base);
- pmu->base.event_init = NULL;
kfree(pmu->base.attr_groups);
- if (!is_igp(i915))
+ if (IS_DGFX(i915))
kfree(pmu->name);
free_event_attributes(pmu);
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 41af038c3738..8e66d63d0c9f 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -68,9 +68,9 @@ struct i915_pmu {
*/
struct pmu base;
/**
- * @closed: i915 is unregistering.
+ * @registered: PMU is registered and not in the unregistering process.
*/
- bool closed;
+ bool registered;
/**
* @name: Name as registered with perf core.
*/
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 41f4350a7c6c..765e6c0528fb 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -144,8 +144,6 @@
#define GEN6_STOLEN_RESERVED_ENABLE (1 << 0)
#define GEN11_STOLEN_RESERVED_ADDR_MASK (0xFFFFFFFFFFFULL << 20)
-#define _VGA_MSR_WRITE _MMIO(0x3c2)
-
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
#define GEN7_PIPE_DE_LOAD_SL(pipe) _MMIO_PIPE(pipe, _GEN7_PIPEA_DE_LOAD_SL, _GEN7_PIPEB_DE_LOAD_SL)
@@ -422,6 +420,11 @@
#define GEN2_IIR _MMIO(0x20a4)
#define GEN2_IMR _MMIO(0x20a8)
#define GEN2_ISR _MMIO(0x20ac)
+
+#define GEN2_IRQ_REGS I915_IRQ_REGS(GEN2_IMR, \
+ GEN2_IER, \
+ GEN2_IIR)
+
#define VLV_GUNIT_CLOCK_GATE _MMIO(VLV_DISPLAY_BASE + 0x2060)
#define GINT_DIS (1 << 22)
#define GCFG_DIS (1 << 8)
@@ -434,6 +437,10 @@
#define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120)
#define VLV_PCBR_ADDR_SHIFT 12
+#define VLV_IRQ_REGS I915_IRQ_REGS(VLV_IMR, \
+ VLV_IER, \
+ VLV_IIR)
+
#define DISPLAY_PLANE_FLIP_PENDING(plane) (1 << (11 - (plane))) /* A and B only */
#define EIR _MMIO(0x20b0)
#define EMR _MMIO(0x20b4)
@@ -1060,141 +1067,78 @@
#define CLKGATE_DIS_PSL_EXT(pipe) \
_MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_EXT_A, _CLKGATE_DIS_PSL_EXT_B)
-/* DDI Buffer Control */
-#define _DDI_CLK_VALFREQ_A 0x64030
-#define _DDI_CLK_VALFREQ_B 0x64130
-#define DDI_CLK_VALFREQ(port) _MMIO_PORT(port, _DDI_CLK_VALFREQ_A, _DDI_CLK_VALFREQ_B)
-
/*
* Display engine regs
*/
/* Pipe/transcoder A timing regs */
#define _TRANS_HTOTAL_A 0x60000
+#define _TRANS_HTOTAL_B 0x61000
+#define TRANS_HTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HTOTAL_A)
#define HTOTAL_MASK REG_GENMASK(31, 16)
#define HTOTAL(htotal) REG_FIELD_PREP(HTOTAL_MASK, (htotal))
#define HACTIVE_MASK REG_GENMASK(15, 0)
#define HACTIVE(hdisplay) REG_FIELD_PREP(HACTIVE_MASK, (hdisplay))
+
#define _TRANS_HBLANK_A 0x60004
+#define _TRANS_HBLANK_B 0x61004
+#define TRANS_HBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HBLANK_A)
#define HBLANK_END_MASK REG_GENMASK(31, 16)
#define HBLANK_END(hblank_end) REG_FIELD_PREP(HBLANK_END_MASK, (hblank_end))
#define HBLANK_START_MASK REG_GENMASK(15, 0)
#define HBLANK_START(hblank_start) REG_FIELD_PREP(HBLANK_START_MASK, (hblank_start))
+
#define _TRANS_HSYNC_A 0x60008
+#define _TRANS_HSYNC_B 0x61008
+#define TRANS_HSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HSYNC_A)
#define HSYNC_END_MASK REG_GENMASK(31, 16)
#define HSYNC_END(hsync_end) REG_FIELD_PREP(HSYNC_END_MASK, (hsync_end))
#define HSYNC_START_MASK REG_GENMASK(15, 0)
#define HSYNC_START(hsync_start) REG_FIELD_PREP(HSYNC_START_MASK, (hsync_start))
+
#define _TRANS_VTOTAL_A 0x6000c
+#define _TRANS_VTOTAL_B 0x6100c
+#define TRANS_VTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VTOTAL_A)
#define VTOTAL_MASK REG_GENMASK(31, 16)
#define VTOTAL(vtotal) REG_FIELD_PREP(VTOTAL_MASK, (vtotal))
#define VACTIVE_MASK REG_GENMASK(15, 0)
#define VACTIVE(vdisplay) REG_FIELD_PREP(VACTIVE_MASK, (vdisplay))
+
#define _TRANS_VBLANK_A 0x60010
+#define _TRANS_VBLANK_B 0x61010
+#define TRANS_VBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VBLANK_A)
#define VBLANK_END_MASK REG_GENMASK(31, 16)
#define VBLANK_END(vblank_end) REG_FIELD_PREP(VBLANK_END_MASK, (vblank_end))
#define VBLANK_START_MASK REG_GENMASK(15, 0)
#define VBLANK_START(vblank_start) REG_FIELD_PREP(VBLANK_START_MASK, (vblank_start))
+
#define _TRANS_VSYNC_A 0x60014
+#define _TRANS_VSYNC_B 0x61014
+#define TRANS_VSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNC_A)
#define VSYNC_END_MASK REG_GENMASK(31, 16)
#define VSYNC_END(vsync_end) REG_FIELD_PREP(VSYNC_END_MASK, (vsync_end))
#define VSYNC_START_MASK REG_GENMASK(15, 0)
#define VSYNC_START(vsync_start) REG_FIELD_PREP(VSYNC_START_MASK, (vsync_start))
-#define _TRANS_EXITLINE_A 0x60018
+
#define _PIPEASRC 0x6001c
+#define _PIPEBSRC 0x6101c
+#define PIPESRC(dev_priv, pipe) _MMIO_TRANS2(dev_priv, (pipe), _PIPEASRC)
#define PIPESRC_WIDTH_MASK REG_GENMASK(31, 16)
#define PIPESRC_WIDTH(w) REG_FIELD_PREP(PIPESRC_WIDTH_MASK, (w))
#define PIPESRC_HEIGHT_MASK REG_GENMASK(15, 0)
#define PIPESRC_HEIGHT(h) REG_FIELD_PREP(PIPESRC_HEIGHT_MASK, (h))
-#define _BCLRPAT_A 0x60020
-#define _TRANS_VSYNCSHIFT_A 0x60028
-#define _TRANS_MULT_A 0x6002c
-/* Pipe/transcoder B timing regs */
-#define _TRANS_HTOTAL_B 0x61000
-#define _TRANS_HBLANK_B 0x61004
-#define _TRANS_HSYNC_B 0x61008
-#define _TRANS_VTOTAL_B 0x6100c
-#define _TRANS_VBLANK_B 0x61010
-#define _TRANS_VSYNC_B 0x61014
-#define _PIPEBSRC 0x6101c
+#define _BCLRPAT_A 0x60020
#define _BCLRPAT_B 0x61020
-#define _TRANS_VSYNCSHIFT_B 0x61028
-#define _TRANS_MULT_B 0x6102c
-
-/* DSI 0 timing regs */
-#define _TRANS_HTOTAL_DSI0 0x6b000
-#define _TRANS_HSYNC_DSI0 0x6b008
-#define _TRANS_VTOTAL_DSI0 0x6b00c
-#define _TRANS_VSYNC_DSI0 0x6b014
-#define _TRANS_VSYNCSHIFT_DSI0 0x6b028
-
-/* DSI 1 timing regs */
-#define _TRANS_HTOTAL_DSI1 0x6b800
-#define _TRANS_HSYNC_DSI1 0x6b808
-#define _TRANS_VTOTAL_DSI1 0x6b80c
-#define _TRANS_VSYNC_DSI1 0x6b814
-#define _TRANS_VSYNCSHIFT_DSI1 0x6b828
-
-#define TRANS_HTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HTOTAL_A)
-#define TRANS_HBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HBLANK_A)
-#define TRANS_HSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_HSYNC_A)
-#define TRANS_VTOTAL(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VTOTAL_A)
-#define TRANS_VBLANK(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VBLANK_A)
-#define TRANS_VSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNC_A)
#define BCLRPAT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _BCLRPAT_A)
-#define TRANS_VSYNCSHIFT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNCSHIFT_A)
-#define PIPESRC(dev_priv, pipe) _MMIO_TRANS2(dev_priv, (pipe), _PIPEASRC)
-#define TRANS_MULT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_MULT_A)
-/* VGA port control */
-#define ADPA _MMIO(0x61100)
-#define PCH_ADPA _MMIO(0xe1100)
-#define VLV_ADPA _MMIO(VLV_DISPLAY_BASE + 0x61100)
-
-#define ADPA_DAC_ENABLE (1 << 31)
-#define ADPA_DAC_DISABLE 0
-#define ADPA_PIPE_SEL_SHIFT 30
-#define ADPA_PIPE_SEL_MASK (1 << 30)
-#define ADPA_PIPE_SEL(pipe) ((pipe) << 30)
-#define ADPA_PIPE_SEL_SHIFT_CPT 29
-#define ADPA_PIPE_SEL_MASK_CPT (3 << 29)
-#define ADPA_PIPE_SEL_CPT(pipe) ((pipe) << 29)
-#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
-#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0 << 24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3 << 24)
-#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3 << 24)
-#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2 << 24)
-#define ADPA_CRT_HOTPLUG_ENABLE (1 << 23)
-#define ADPA_CRT_HOTPLUG_PERIOD_64 (0 << 22)
-#define ADPA_CRT_HOTPLUG_PERIOD_128 (1 << 22)
-#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0 << 21)
-#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1 << 21)
-#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0 << 20)
-#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1 << 20)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0 << 18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1 << 18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2 << 18)
-#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3 << 18)
-#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0 << 17)
-#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1 << 17)
-#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1 << 16)
-#define ADPA_USE_VGA_HVPOLARITY (1 << 15)
-#define ADPA_SETS_HVPOLARITY 0
-#define ADPA_VSYNC_CNTL_DISABLE (1 << 10)
-#define ADPA_VSYNC_CNTL_ENABLE 0
-#define ADPA_HSYNC_CNTL_DISABLE (1 << 11)
-#define ADPA_HSYNC_CNTL_ENABLE 0
-#define ADPA_VSYNC_ACTIVE_HIGH (1 << 4)
-#define ADPA_VSYNC_ACTIVE_LOW 0
-#define ADPA_HSYNC_ACTIVE_HIGH (1 << 3)
-#define ADPA_HSYNC_ACTIVE_LOW 0
-#define ADPA_DPMS_MASK (~(3 << 10))
-#define ADPA_DPMS_ON (0 << 10)
-#define ADPA_DPMS_SUSPEND (1 << 10)
-#define ADPA_DPMS_STANDBY (2 << 10)
-#define ADPA_DPMS_OFF (3 << 10)
+#define _TRANS_VSYNCSHIFT_A 0x60028
+#define _TRANS_VSYNCSHIFT_B 0x61028
+#define TRANS_VSYNCSHIFT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_VSYNCSHIFT_A)
+#define _TRANS_MULT_A 0x6002c
+#define _TRANS_MULT_B 0x6102c
+#define TRANS_MULT(dev_priv, trans) _MMIO_TRANS2(dev_priv, (trans), _TRANS_MULT_A)
/* Hotplug control (945+ only) */
#define PORT_HOTPLUG_EN(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110)
@@ -1446,11 +1390,9 @@
#define DP_B _MMIO(0x64100)
#define DP_C _MMIO(0x64200)
#define DP_D _MMIO(0x64300)
-
#define VLV_DP_B _MMIO(VLV_DISPLAY_BASE + 0x64100)
#define VLV_DP_C _MMIO(VLV_DISPLAY_BASE + 0x64200)
#define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300)
-
#define DP_PORT_EN (1 << 31)
#define DP_PIPE_SEL_SHIFT 30
#define DP_PIPE_SEL_MASK (1 << 30)
@@ -1549,16 +1491,16 @@
*/
#define _PIPEA_DATA_M_G4X 0x70050
#define _PIPEB_DATA_M_G4X 0x71050
-
+#define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
#define TU_SIZE_MASK REG_GENMASK(30, 25)
#define TU_SIZE(x) REG_FIELD_PREP(TU_SIZE_MASK, (x) - 1) /* default size 64 */
-
#define DATA_LINK_M_N_MASK REG_GENMASK(23, 0)
#define DATA_LINK_N_MAX (0x800000)
#define _PIPEA_DATA_N_G4X 0x70054
#define _PIPEB_DATA_N_G4X 0x71054
+#define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
/*
* Computing Link M and N values for the Display Port link
@@ -1570,22 +1512,22 @@
* The Link value is transmitted in the Main Stream
* Attributes and VB-ID.
*/
-
#define _PIPEA_LINK_M_G4X 0x70060
#define _PIPEB_LINK_M_G4X 0x71060
+#define PIPE_LINK_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
+
#define _PIPEA_LINK_N_G4X 0x70064
#define _PIPEB_LINK_N_G4X 0x71064
-
-#define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X)
-#define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X)
-#define PIPE_LINK_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_M_G4X, _PIPEB_LINK_M_G4X)
#define PIPE_LINK_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_LINK_N_G4X, _PIPEB_LINK_N_G4X)
/* Pipe A */
#define _PIPEADSL 0x70000
+#define PIPEDSL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEADSL)
#define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */
#define PIPEDSL_LINE_MASK REG_GENMASK(19, 0)
+
#define _TRANSACONF 0x70008
+#define TRANSCONF(dev_priv, trans) _MMIO_PIPE2(dev_priv, (trans), _TRANSACONF)
#define TRANSCONF_ENABLE REG_BIT(31)
#define TRANSCONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */
#define TRANSCONF_STATE_ENABLE REG_BIT(30) /* i965+ */
@@ -1645,6 +1587,7 @@
#define TRANSCONF_PIXEL_COUNT_SCALING_X4 1
#define _PIPEASTAT 0x70024
+#define PIPESTAT(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEASTAT)
#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31)
#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30)
#define PIPE_CRC_ERROR_ENABLE (1UL << 29)
@@ -1691,15 +1634,8 @@
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1)
#define PIPE_HBLANK_INT_STATUS (1UL << 0)
#define PIPE_OVERLAY_UPDATED_STATUS (1UL << 0)
-
-#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
-#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
-
-#define TRANSCONF(dev_priv, trans) _MMIO_PIPE2(dev_priv, (trans), _TRANSACONF)
-#define PIPEDSL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEADSL)
-#define PIPEFRAME(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEHIGH)
-#define PIPEFRAMEPIXEL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEPIXEL)
-#define PIPESTAT(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEASTAT)
+#define PIPESTAT_INT_ENABLE_MASK 0x7fff0000
+#define PIPESTAT_INT_STATUS_MASK 0x0000ffff
#define _PIPE_ARB_CTL_A 0x70028 /* icl+ */
#define PIPE_ARB_CTL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPE_ARB_CTL_A)
@@ -1707,6 +1643,7 @@
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
+#define PIPE_MISC(pipe) _MMIO_PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
#define PIPE_MISC_YUV420_ENABLE REG_BIT(27) /* glk+ */
#define PIPE_MISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */
#define PIPE_MISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */
@@ -1734,23 +1671,15 @@
#define PIPE_MISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 1)
#define PIPE_MISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 2)
#define PIPE_MISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 3)
-#define PIPE_MISC(pipe) _MMIO_PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B)
#define _PIPE_MISC2_A 0x7002C
#define _PIPE_MISC2_B 0x7102C
+#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B)
#define PIPE_MISC2_BUBBLE_COUNTER_MASK REG_GENMASK(31, 24)
#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 80)
#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 20)
#define PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK REG_GENMASK(2, 0) /* tgl+ */
#define PIPE_MISC2_FLIP_INFO_PLANE_SEL(plane_id) REG_FIELD_PREP(PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK, (plane_id))
-#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B)
-
-#define _ICL_PIPE_A_STATUS 0x70058
-#define ICL_PIPESTATUS(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _ICL_PIPE_A_STATUS)
-#define PIPE_STATUS_UNDERRUN REG_BIT(31)
-#define PIPE_STATUS_SOFT_UNDERRUN_XELPD REG_BIT(28)
-#define PIPE_STATUS_HARD_UNDERRUN_XELPD REG_BIT(27)
-#define PIPE_STATUS_PORT_UNDERRUN_XELPD REG_BIT(26)
#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN REG_BIT(29)
@@ -1803,180 +1732,6 @@
#define SPRITEA_INVALID_GTT_STATUS REG_BIT(1)
#define PLANEA_INVALID_GTT_STATUS REG_BIT(0)
-#define DSPARB(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030)
-#define DSPARB_CSTART_MASK (0x7f << 7)
-#define DSPARB_CSTART_SHIFT 7
-#define DSPARB_BSTART_MASK (0x7f)
-#define DSPARB_BSTART_SHIFT 0
-#define DSPARB_BEND_SHIFT 9 /* on 855 */
-#define DSPARB_AEND_SHIFT 0
-#define DSPARB_SPRITEA_SHIFT_VLV 0
-#define DSPARB_SPRITEA_MASK_VLV (0xff << 0)
-#define DSPARB_SPRITEB_SHIFT_VLV 8
-#define DSPARB_SPRITEB_MASK_VLV (0xff << 8)
-#define DSPARB_SPRITEC_SHIFT_VLV 16
-#define DSPARB_SPRITEC_MASK_VLV (0xff << 16)
-#define DSPARB_SPRITED_SHIFT_VLV 24
-#define DSPARB_SPRITED_MASK_VLV (0xff << 24)
-#define DSPARB2 _MMIO(VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
-#define DSPARB_SPRITEA_HI_SHIFT_VLV 0
-#define DSPARB_SPRITEA_HI_MASK_VLV (0x1 << 0)
-#define DSPARB_SPRITEB_HI_SHIFT_VLV 4
-#define DSPARB_SPRITEB_HI_MASK_VLV (0x1 << 4)
-#define DSPARB_SPRITEC_HI_SHIFT_VLV 8
-#define DSPARB_SPRITEC_HI_MASK_VLV (0x1 << 8)
-#define DSPARB_SPRITED_HI_SHIFT_VLV 12
-#define DSPARB_SPRITED_HI_MASK_VLV (0x1 << 12)
-#define DSPARB_SPRITEE_HI_SHIFT_VLV 16
-#define DSPARB_SPRITEE_HI_MASK_VLV (0x1 << 16)
-#define DSPARB_SPRITEF_HI_SHIFT_VLV 20
-#define DSPARB_SPRITEF_HI_MASK_VLV (0x1 << 20)
-#define DSPARB3 _MMIO(VLV_DISPLAY_BASE + 0x7006c) /* chv */
-#define DSPARB_SPRITEE_SHIFT_VLV 0
-#define DSPARB_SPRITEE_MASK_VLV (0xff << 0)
-#define DSPARB_SPRITEF_SHIFT_VLV 8
-#define DSPARB_SPRITEF_MASK_VLV (0xff << 8)
-
-/* pnv/gen4/g4x/vlv/chv */
-#define DSPFW1(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70034)
-#define DSPFW_SR_SHIFT 23
-#define DSPFW_SR_MASK (0x1ff << 23)
-#define DSPFW_CURSORB_SHIFT 16
-#define DSPFW_CURSORB_MASK (0x3f << 16)
-#define DSPFW_PLANEB_SHIFT 8
-#define DSPFW_PLANEB_MASK (0x7f << 8)
-#define DSPFW_PLANEB_MASK_VLV (0xff << 8) /* vlv/chv */
-#define DSPFW_PLANEA_SHIFT 0
-#define DSPFW_PLANEA_MASK (0x7f << 0)
-#define DSPFW_PLANEA_MASK_VLV (0xff << 0) /* vlv/chv */
-#define DSPFW2(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70038)
-#define DSPFW_FBC_SR_EN (1 << 31) /* g4x */
-#define DSPFW_FBC_SR_SHIFT 28
-#define DSPFW_FBC_SR_MASK (0x7 << 28) /* g4x */
-#define DSPFW_FBC_HPLL_SR_SHIFT 24
-#define DSPFW_FBC_HPLL_SR_MASK (0xf << 24) /* g4x */
-#define DSPFW_SPRITEB_SHIFT (16)
-#define DSPFW_SPRITEB_MASK (0x7f << 16) /* g4x */
-#define DSPFW_SPRITEB_MASK_VLV (0xff << 16) /* vlv/chv */
-#define DSPFW_CURSORA_SHIFT 8
-#define DSPFW_CURSORA_MASK (0x3f << 8)
-#define DSPFW_PLANEC_OLD_SHIFT 0
-#define DSPFW_PLANEC_OLD_MASK (0x7f << 0) /* pre-gen4 sprite C */
-#define DSPFW_SPRITEA_SHIFT 0
-#define DSPFW_SPRITEA_MASK (0x7f << 0) /* g4x */
-#define DSPFW_SPRITEA_MASK_VLV (0xff << 0) /* vlv/chv */
-#define DSPFW3(dev_priv) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x7003c)
-#define DSPFW_HPLL_SR_EN (1 << 31)
-#define PINEVIEW_SELF_REFRESH_EN (1 << 30)
-#define DSPFW_CURSOR_SR_SHIFT 24
-#define DSPFW_CURSOR_SR_MASK (0x3f << 24)
-#define DSPFW_HPLL_CURSOR_SHIFT 16
-#define DSPFW_HPLL_CURSOR_MASK (0x3f << 16)
-#define DSPFW_HPLL_SR_SHIFT 0
-#define DSPFW_HPLL_SR_MASK (0x1ff << 0)
-
-/* vlv/chv */
-#define DSPFW4 _MMIO(VLV_DISPLAY_BASE + 0x70070)
-#define DSPFW_SPRITEB_WM1_SHIFT 16
-#define DSPFW_SPRITEB_WM1_MASK (0xff << 16)
-#define DSPFW_CURSORA_WM1_SHIFT 8
-#define DSPFW_CURSORA_WM1_MASK (0x3f << 8)
-#define DSPFW_SPRITEA_WM1_SHIFT 0
-#define DSPFW_SPRITEA_WM1_MASK (0xff << 0)
-#define DSPFW5 _MMIO(VLV_DISPLAY_BASE + 0x70074)
-#define DSPFW_PLANEB_WM1_SHIFT 24
-#define DSPFW_PLANEB_WM1_MASK (0xff << 24)
-#define DSPFW_PLANEA_WM1_SHIFT 16
-#define DSPFW_PLANEA_WM1_MASK (0xff << 16)
-#define DSPFW_CURSORB_WM1_SHIFT 8
-#define DSPFW_CURSORB_WM1_MASK (0x3f << 8)
-#define DSPFW_CURSOR_SR_WM1_SHIFT 0
-#define DSPFW_CURSOR_SR_WM1_MASK (0x3f << 0)
-#define DSPFW6 _MMIO(VLV_DISPLAY_BASE + 0x70078)
-#define DSPFW_SR_WM1_SHIFT 0
-#define DSPFW_SR_WM1_MASK (0x1ff << 0)
-#define DSPFW7 _MMIO(VLV_DISPLAY_BASE + 0x7007c)
-#define DSPFW7_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
-#define DSPFW_SPRITED_WM1_SHIFT 24
-#define DSPFW_SPRITED_WM1_MASK (0xff << 24)
-#define DSPFW_SPRITED_SHIFT 16
-#define DSPFW_SPRITED_MASK_VLV (0xff << 16)
-#define DSPFW_SPRITEC_WM1_SHIFT 8
-#define DSPFW_SPRITEC_WM1_MASK (0xff << 8)
-#define DSPFW_SPRITEC_SHIFT 0
-#define DSPFW_SPRITEC_MASK_VLV (0xff << 0)
-#define DSPFW8_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b8)
-#define DSPFW_SPRITEF_WM1_SHIFT 24
-#define DSPFW_SPRITEF_WM1_MASK (0xff << 24)
-#define DSPFW_SPRITEF_SHIFT 16
-#define DSPFW_SPRITEF_MASK_VLV (0xff << 16)
-#define DSPFW_SPRITEE_WM1_SHIFT 8
-#define DSPFW_SPRITEE_WM1_MASK (0xff << 8)
-#define DSPFW_SPRITEE_SHIFT 0
-#define DSPFW_SPRITEE_MASK_VLV (0xff << 0)
-#define DSPFW9_CHV _MMIO(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
-#define DSPFW_PLANEC_WM1_SHIFT 24
-#define DSPFW_PLANEC_WM1_MASK (0xff << 24)
-#define DSPFW_PLANEC_SHIFT 16
-#define DSPFW_PLANEC_MASK_VLV (0xff << 16)
-#define DSPFW_CURSORC_WM1_SHIFT 8
-#define DSPFW_CURSORC_WM1_MASK (0x3f << 16)
-#define DSPFW_CURSORC_SHIFT 0
-#define DSPFW_CURSORC_MASK (0x3f << 0)
-
-/* vlv/chv high order bits */
-#define DSPHOWM _MMIO(VLV_DISPLAY_BASE + 0x70064)
-#define DSPFW_SR_HI_SHIFT 24
-#define DSPFW_SR_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
-#define DSPFW_SPRITEF_HI_SHIFT 23
-#define DSPFW_SPRITEF_HI_MASK (1 << 23)
-#define DSPFW_SPRITEE_HI_SHIFT 22
-#define DSPFW_SPRITEE_HI_MASK (1 << 22)
-#define DSPFW_PLANEC_HI_SHIFT 21
-#define DSPFW_PLANEC_HI_MASK (1 << 21)
-#define DSPFW_SPRITED_HI_SHIFT 20
-#define DSPFW_SPRITED_HI_MASK (1 << 20)
-#define DSPFW_SPRITEC_HI_SHIFT 16
-#define DSPFW_SPRITEC_HI_MASK (1 << 16)
-#define DSPFW_PLANEB_HI_SHIFT 12
-#define DSPFW_PLANEB_HI_MASK (1 << 12)
-#define DSPFW_SPRITEB_HI_SHIFT 8
-#define DSPFW_SPRITEB_HI_MASK (1 << 8)
-#define DSPFW_SPRITEA_HI_SHIFT 4
-#define DSPFW_SPRITEA_HI_MASK (1 << 4)
-#define DSPFW_PLANEA_HI_SHIFT 0
-#define DSPFW_PLANEA_HI_MASK (1 << 0)
-#define DSPHOWM1 _MMIO(VLV_DISPLAY_BASE + 0x70068)
-#define DSPFW_SR_WM1_HI_SHIFT 24
-#define DSPFW_SR_WM1_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */
-#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
-#define DSPFW_SPRITEF_WM1_HI_MASK (1 << 23)
-#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
-#define DSPFW_SPRITEE_WM1_HI_MASK (1 << 22)
-#define DSPFW_PLANEC_WM1_HI_SHIFT 21
-#define DSPFW_PLANEC_WM1_HI_MASK (1 << 21)
-#define DSPFW_SPRITED_WM1_HI_SHIFT 20
-#define DSPFW_SPRITED_WM1_HI_MASK (1 << 20)
-#define DSPFW_SPRITEC_WM1_HI_SHIFT 16
-#define DSPFW_SPRITEC_WM1_HI_MASK (1 << 16)
-#define DSPFW_PLANEB_WM1_HI_SHIFT 12
-#define DSPFW_PLANEB_WM1_HI_MASK (1 << 12)
-#define DSPFW_SPRITEB_WM1_HI_SHIFT 8
-#define DSPFW_SPRITEB_WM1_HI_MASK (1 << 8)
-#define DSPFW_SPRITEA_WM1_HI_SHIFT 4
-#define DSPFW_SPRITEA_WM1_HI_MASK (1 << 4)
-#define DSPFW_PLANEA_WM1_HI_SHIFT 0
-#define DSPFW_PLANEA_WM1_HI_MASK (1 << 0)
-
-/* drain latency register values*/
-#define VLV_DDL(pipe) _MMIO(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
-#define DDL_CURSOR_SHIFT 24
-#define DDL_SPRITE_SHIFT(sprite) (8 + 8 * (sprite))
-#define DDL_PLANE_SHIFT 0
-#define DDL_PRECISION_HIGH (1 << 7)
-#define DDL_PRECISION_LOW (0 << 7)
-#define DRAIN_LATENCY_MASK 0x7f
-
#define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400)
#define CBR_PND_DEADLINE_DISABLE (1 << 31)
#define CBR_PWM_CLOCK_MUX_SELECT (1 << 30)
@@ -1984,72 +1739,6 @@
#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
#define CBR_DPLLBMD_PIPE(pipe) (1 << (7 + (pipe) * 11)) /* pipes B and C */
-/* FIFO watermark sizes etc */
-#define G4X_FIFO_LINE_SIZE 64
-#define I915_FIFO_LINE_SIZE 64
-#define I830_FIFO_LINE_SIZE 32
-
-#define VALLEYVIEW_FIFO_SIZE 255
-#define G4X_FIFO_SIZE 127
-#define I965_FIFO_SIZE 512
-#define I945_FIFO_SIZE 127
-#define I915_FIFO_SIZE 95
-#define I855GM_FIFO_SIZE 127 /* In cachelines */
-#define I830_FIFO_SIZE 95
-
-#define VALLEYVIEW_MAX_WM 0xff
-#define G4X_MAX_WM 0x3f
-#define I915_MAX_WM 0x3f
-
-#define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */
-#define PINEVIEW_FIFO_LINE_SIZE 64
-#define PINEVIEW_MAX_WM 0x1ff
-#define PINEVIEW_DFT_WM 0x3f
-#define PINEVIEW_DFT_HPLLOFF_WM 0
-#define PINEVIEW_GUARD_WM 10
-#define PINEVIEW_CURSOR_FIFO 64
-#define PINEVIEW_CURSOR_MAX_WM 0x3f
-#define PINEVIEW_CURSOR_DFT_WM 0
-#define PINEVIEW_CURSOR_GUARD_WM 5
-
-#define VALLEYVIEW_CURSOR_MAX_WM 64
-#define I965_CURSOR_FIFO 64
-#define I965_CURSOR_MAX_WM 32
-#define I965_CURSOR_DFT_WM 8
-
-/* define the Watermark register on Ironlake */
-#define _WM0_PIPEA_ILK 0x45100
-#define _WM0_PIPEB_ILK 0x45104
-#define _WM0_PIPEC_IVB 0x45200
-#define WM0_PIPE_ILK(pipe) _MMIO_BASE_PIPE3(0, (pipe), _WM0_PIPEA_ILK, \
- _WM0_PIPEB_ILK, _WM0_PIPEC_IVB)
-#define WM0_PIPE_PRIMARY_MASK REG_GENMASK(31, 16)
-#define WM0_PIPE_SPRITE_MASK REG_GENMASK(15, 8)
-#define WM0_PIPE_CURSOR_MASK REG_GENMASK(7, 0)
-#define WM0_PIPE_PRIMARY(x) REG_FIELD_PREP(WM0_PIPE_PRIMARY_MASK, (x))
-#define WM0_PIPE_SPRITE(x) REG_FIELD_PREP(WM0_PIPE_SPRITE_MASK, (x))
-#define WM0_PIPE_CURSOR(x) REG_FIELD_PREP(WM0_PIPE_CURSOR_MASK, (x))
-#define WM1_LP_ILK _MMIO(0x45108)
-#define WM2_LP_ILK _MMIO(0x4510c)
-#define WM3_LP_ILK _MMIO(0x45110)
-#define WM_LP_ENABLE REG_BIT(31)
-#define WM_LP_LATENCY_MASK REG_GENMASK(30, 24)
-#define WM_LP_FBC_MASK_BDW REG_GENMASK(23, 19)
-#define WM_LP_FBC_MASK_ILK REG_GENMASK(23, 20)
-#define WM_LP_PRIMARY_MASK REG_GENMASK(18, 8)
-#define WM_LP_CURSOR_MASK REG_GENMASK(7, 0)
-#define WM_LP_LATENCY(x) REG_FIELD_PREP(WM_LP_LATENCY_MASK, (x))
-#define WM_LP_FBC_BDW(x) REG_FIELD_PREP(WM_LP_FBC_MASK_BDW, (x))
-#define WM_LP_FBC_ILK(x) REG_FIELD_PREP(WM_LP_FBC_MASK_ILK, (x))
-#define WM_LP_PRIMARY(x) REG_FIELD_PREP(WM_LP_PRIMARY_MASK, (x))
-#define WM_LP_CURSOR(x) REG_FIELD_PREP(WM_LP_CURSOR_MASK, (x))
-#define WM1S_LP_ILK _MMIO(0x45120)
-#define WM2S_LP_IVB _MMIO(0x45124)
-#define WM3S_LP_IVB _MMIO(0x45128)
-#define WM_LP_SPRITE_ENABLE REG_BIT(31) /* ilk/snb WM1S only */
-#define WM_LP_SPRITE_MASK REG_GENMASK(10, 0)
-#define WM_LP_SPRITE(x) REG_FIELD_PREP(WM_LP_SPRITE_MASK, (x))
-
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
@@ -2066,33 +1755,38 @@
* frame = (high1 << 8) | low1;
*/
#define _PIPEAFRAMEHIGH 0x70040
+#define PIPEFRAME(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEHIGH)
#define PIPE_FRAME_HIGH_MASK 0x0000ffff
#define PIPE_FRAME_HIGH_SHIFT 0
+
#define _PIPEAFRAMEPIXEL 0x70044
+#define PIPEFRAMEPIXEL(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEAFRAMEPIXEL)
#define PIPE_FRAME_LOW_MASK 0xff000000
#define PIPE_FRAME_LOW_SHIFT 24
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
+
/* GM45+ just has to be different */
#define _PIPEA_FRMCOUNT_G4X 0x70040
-#define _PIPEA_FLIPCOUNT_G4X 0x70044
#define PIPE_FRMCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FRMCOUNT_G4X)
+
+#define _PIPEA_FLIPCOUNT_G4X 0x70044
#define PIPE_FLIPCOUNT_G4X(dev_priv, pipe) _MMIO_PIPE2(dev_priv, pipe, _PIPEA_FLIPCOUNT_G4X)
/* CHV pipe B blender */
#define _CHV_BLEND_A 0x60a00
+#define CHV_BLEND(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_BLEND_A)
#define CHV_BLEND_MASK REG_GENMASK(31, 30)
#define CHV_BLEND_LEGACY REG_FIELD_PREP(CHV_BLEND_MASK, 0)
#define CHV_BLEND_ANDROID REG_FIELD_PREP(CHV_BLEND_MASK, 1)
#define CHV_BLEND_MPO REG_FIELD_PREP(CHV_BLEND_MASK, 2)
+
#define _CHV_CANVAS_A 0x60a04
+#define CHV_CANVAS(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_CANVAS_A)
#define CHV_CANVAS_RED_MASK REG_GENMASK(29, 20)
#define CHV_CANVAS_GREEN_MASK REG_GENMASK(19, 10)
#define CHV_CANVAS_BLUE_MASK REG_GENMASK(9, 0)
-#define CHV_BLEND(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_BLEND_A)
-#define CHV_CANVAS(dev_priv, pipe) _MMIO_TRANS2(dev_priv, pipe, _CHV_CANVAS_A)
-
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
#define I915_LO_DISPBASE(val) ((val) & ~DISP_BASEADDR_MASK)
@@ -2114,11 +1808,6 @@
#define SWF3(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
-/* ICL DSI 0 and 1 */
-#define _PIPEDSI0CONF 0x7b008
-#define _PIPEDSI1CONF 0x7b808
-
-
/* VBIOS regs */
#define VGACNTRL _MMIO(0x71400)
# define VGA_DISP_DISABLE (1 << 31)
@@ -2156,38 +1845,42 @@
# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
#define _PIPEA_DATA_M1 0x60030
-#define _PIPEA_DATA_N1 0x60034
-#define _PIPEA_DATA_M2 0x60038
-#define _PIPEA_DATA_N2 0x6003c
-#define _PIPEA_LINK_M1 0x60040
-#define _PIPEA_LINK_N1 0x60044
-#define _PIPEA_LINK_M2 0x60048
-#define _PIPEA_LINK_N2 0x6004c
-
-/* PIPEB timing regs are same start from 0x61000 */
-
#define _PIPEB_DATA_M1 0x61030
-#define _PIPEB_DATA_N1 0x61034
-#define _PIPEB_DATA_M2 0x61038
-#define _PIPEB_DATA_N2 0x6103c
-#define _PIPEB_LINK_M1 0x61040
-#define _PIPEB_LINK_N1 0x61044
-#define _PIPEB_LINK_M2 0x61048
-#define _PIPEB_LINK_N2 0x6104c
-
#define PIPE_DATA_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M1)
+
+#define _PIPEA_DATA_N1 0x60034
+#define _PIPEB_DATA_N1 0x61034
#define PIPE_DATA_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N1)
+
+#define _PIPEA_DATA_M2 0x60038
+#define _PIPEB_DATA_M2 0x61038
#define PIPE_DATA_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_M2)
+
+#define _PIPEA_DATA_N2 0x6003c
+#define _PIPEB_DATA_N2 0x6103c
#define PIPE_DATA_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_DATA_N2)
+
+#define _PIPEA_LINK_M1 0x60040
+#define _PIPEB_LINK_M1 0x61040
#define PIPE_LINK_M1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M1)
+
+#define _PIPEA_LINK_N1 0x60044
+#define _PIPEB_LINK_N1 0x61044
#define PIPE_LINK_N1(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N1)
+
+#define _PIPEA_LINK_M2 0x60048
+#define _PIPEB_LINK_M2 0x61048
#define PIPE_LINK_M2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_M2)
+
+#define _PIPEA_LINK_N2 0x6004c
+#define _PIPEB_LINK_N2 0x6104c
#define PIPE_LINK_N2(dev_priv, tran) _MMIO_TRANS2(dev_priv, tran, _PIPEA_LINK_N2)
/* CPU panel fitter */
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
#define _PFA_CTL_1 0x68080
#define _PFB_CTL_1 0x68880
+#define PF_CTL(pipe) _MMIO_PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
#define PF_ENABLE REG_BIT(31)
#define PF_PIPE_SEL_MASK_IVB REG_GENMASK(30, 29) /* ivb/hsw */
#define PF_PIPE_SEL_IVB(pipe) REG_FIELD_PREP(PF_PIPE_SEL_MASK_IVB, (pipe))
@@ -2196,37 +1889,43 @@
#define PF_FILTER_MED_3x3 REG_FIELD_PREP(PF_FILTER_MASK, 1)
#define PF_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PF_FILTER_EDGE_MASK, 2)
#define PF_FILTER_EDGE_SOFTEN REG_FIELD_PREP(PF_FILTER_EDGE_MASK, 3)
+
#define _PFA_WIN_SZ 0x68074
#define _PFB_WIN_SZ 0x68874
+#define PF_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
#define PF_WIN_XSIZE_MASK REG_GENMASK(31, 16)
#define PF_WIN_XSIZE(w) REG_FIELD_PREP(PF_WIN_XSIZE_MASK, (w))
#define PF_WIN_YSIZE_MASK REG_GENMASK(15, 0)
#define PF_WIN_YSIZE(h) REG_FIELD_PREP(PF_WIN_YSIZE_MASK, (h))
+
#define _PFA_WIN_POS 0x68070
#define _PFB_WIN_POS 0x68870
+#define PF_WIN_POS(pipe) _MMIO_PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
#define PF_WIN_XPOS_MASK REG_GENMASK(31, 16)
#define PF_WIN_XPOS(x) REG_FIELD_PREP(PF_WIN_XPOS_MASK, (x))
#define PF_WIN_YPOS_MASK REG_GENMASK(15, 0)
#define PF_WIN_YPOS(y) REG_FIELD_PREP(PF_WIN_YPOS_MASK, (y))
+
#define _PFA_VSCALE 0x68084
#define _PFB_VSCALE 0x68884
+#define PF_VSCALE(pipe) _MMIO_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
+
#define _PFA_HSCALE 0x68090
#define _PFB_HSCALE 0x68890
-
-#define PF_CTL(pipe) _MMIO_PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
-#define PF_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
-#define PF_WIN_POS(pipe) _MMIO_PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
-#define PF_VSCALE(pipe) _MMIO_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
#define PF_HSCALE(pipe) _MMIO_PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
/*
* Skylake scalers
*/
+#define _ID(id, a, b) _PICK_EVEN(id, a, b)
#define _PS_1A_CTRL 0x68180
#define _PS_2A_CTRL 0x68280
#define _PS_1B_CTRL 0x68980
#define _PS_2B_CTRL 0x68A80
#define _PS_1C_CTRL 0x69180
+#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
+ _ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
#define PS_SCALER_EN REG_BIT(31)
#define PS_SCALER_TYPE_MASK REG_BIT(30) /* icl+ */
#define PS_SCALER_TYPE_NON_LINEAR REG_FIELD_PREP(PS_SCALER_TYPE_MASK, 0)
@@ -2279,6 +1978,9 @@
#define _PS_PWR_GATE_1B 0x68960
#define _PS_PWR_GATE_2B 0x68A60
#define _PS_PWR_GATE_1C 0x69160
+#define SKL_PS_PWR_GATE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \
+ _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B))
#define PS_PWR_GATE_DIS_OVERRIDE REG_BIT(31)
#define PS_PWR_GATE_SETTLING_TIME_MASK REG_GENMASK(4, 3)
#define PS_PWR_GATE_SETTLING_TIME_32 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 0)
@@ -2296,6 +1998,9 @@
#define _PS_WIN_POS_1B 0x68970
#define _PS_WIN_POS_2B 0x68A70
#define _PS_WIN_POS_1C 0x69170
+#define SKL_PS_WIN_POS(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \
+ _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B))
#define PS_WIN_XPOS_MASK REG_GENMASK(31, 16)
#define PS_WIN_XPOS(x) REG_FIELD_PREP(PS_WIN_XPOS_MASK, (x))
#define PS_WIN_YPOS_MASK REG_GENMASK(15, 0)
@@ -2306,6 +2011,9 @@
#define _PS_WIN_SZ_1B 0x68974
#define _PS_WIN_SZ_2B 0x68A74
#define _PS_WIN_SZ_1C 0x69174
+#define SKL_PS_WIN_SZ(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \
+ _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B))
#define PS_WIN_XSIZE_MASK REG_GENMASK(31, 16)
#define PS_WIN_XSIZE(w) REG_FIELD_PREP(PS_WIN_XSIZE_MASK, (w))
#define PS_WIN_YSIZE_MASK REG_GENMASK(15, 0)
@@ -2316,18 +2024,27 @@
#define _PS_VSCALE_1B 0x68984
#define _PS_VSCALE_2B 0x68A84
#define _PS_VSCALE_1C 0x69184
+#define SKL_PS_VSCALE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \
+ _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B))
#define _PS_HSCALE_1A 0x68190
#define _PS_HSCALE_2A 0x68290
#define _PS_HSCALE_1B 0x68990
#define _PS_HSCALE_2B 0x68A90
#define _PS_HSCALE_1C 0x69190
+#define SKL_PS_HSCALE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \
+ _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B))
#define _PS_VPHASE_1A 0x68188
#define _PS_VPHASE_2A 0x68288
#define _PS_VPHASE_1B 0x68988
#define _PS_VPHASE_2B 0x68A88
#define _PS_VPHASE_1C 0x69188
+#define SKL_PS_VPHASE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \
+ _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B))
#define PS_Y_PHASE_MASK REG_GENMASK(31, 16)
#define PS_Y_PHASE(x) REG_FIELD_PREP(PS_Y_PHASE_MASK, (x))
#define PS_UV_RGB_PHASE_MASK REG_GENMASK(15, 0)
@@ -2340,56 +2057,32 @@
#define _PS_HPHASE_1B 0x68994
#define _PS_HPHASE_2B 0x68A94
#define _PS_HPHASE_1C 0x69194
+#define SKL_PS_HPHASE(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \
+ _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B))
#define _PS_ECC_STAT_1A 0x681D0
#define _PS_ECC_STAT_2A 0x682D0
#define _PS_ECC_STAT_1B 0x689D0
#define _PS_ECC_STAT_2B 0x68AD0
#define _PS_ECC_STAT_1C 0x691D0
+#define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \
+ _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B))
#define _PS_COEF_SET0_INDEX_1A 0x68198
#define _PS_COEF_SET0_INDEX_2A 0x68298
#define _PS_COEF_SET0_INDEX_1B 0x68998
#define _PS_COEF_SET0_INDEX_2B 0x68A98
+#define GLK_PS_COEF_INDEX_SET(pipe, id, set) _MMIO_PIPE(pipe, \
+ _ID(id, _PS_COEF_SET0_INDEX_1A, _PS_COEF_SET0_INDEX_2A) + (set) * 8, \
+ _ID(id, _PS_COEF_SET0_INDEX_1B, _PS_COEF_SET0_INDEX_2B) + (set) * 8)
#define PS_COEF_INDEX_AUTO_INC REG_BIT(10)
#define _PS_COEF_SET0_DATA_1A 0x6819C
#define _PS_COEF_SET0_DATA_2A 0x6829C
#define _PS_COEF_SET0_DATA_1B 0x6899C
#define _PS_COEF_SET0_DATA_2B 0x68A9C
-
-#define _ID(id, a, b) _PICK_EVEN(id, a, b)
-#define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
- _ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
-#define SKL_PS_PWR_GATE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \
- _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B))
-#define SKL_PS_WIN_POS(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \
- _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B))
-#define SKL_PS_WIN_SZ(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \
- _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B))
-#define SKL_PS_VSCALE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \
- _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B))
-#define SKL_PS_HSCALE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \
- _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B))
-#define SKL_PS_VPHASE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \
- _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B))
-#define SKL_PS_HPHASE(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \
- _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B))
-#define SKL_PS_ECC_STAT(pipe, id) _MMIO_PIPE(pipe, \
- _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \
- _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B))
-#define GLK_PS_COEF_INDEX_SET(pipe, id, set) _MMIO_PIPE(pipe, \
- _ID(id, _PS_COEF_SET0_INDEX_1A, _PS_COEF_SET0_INDEX_2A) + (set) * 8, \
- _ID(id, _PS_COEF_SET0_INDEX_1B, _PS_COEF_SET0_INDEX_2B) + (set) * 8)
-
#define GLK_PS_COEF_DATA_SET(pipe, id, set) _MMIO_PIPE(pipe, \
_ID(id, _PS_COEF_SET0_DATA_1A, _PS_COEF_SET0_DATA_2A) + (set) * 8, \
_ID(id, _PS_COEF_SET0_DATA_1B, _PS_COEF_SET0_DATA_2B) + (set) * 8)
@@ -2459,11 +2152,19 @@
#define DEIIR _MMIO(0x44008)
#define DEIER _MMIO(0x4400c)
+#define DE_IRQ_REGS I915_IRQ_REGS(DEIMR, \
+ DEIER, \
+ DEIIR)
+
#define GTISR _MMIO(0x44010)
#define GTIMR _MMIO(0x44014)
#define GTIIR _MMIO(0x44018)
#define GTIER _MMIO(0x4401c)
+#define GT_IRQ_REGS I915_IRQ_REGS(GTIMR, \
+ GTIER, \
+ GTIIR)
+
#define GEN8_MASTER_IRQ _MMIO(0x44200)
#define GEN8_MASTER_IRQ_CONTROL (1 << 31)
#define GEN8_PCU_IRQ (1 << 30)
@@ -2489,6 +2190,10 @@
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
#define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
+#define GEN8_GT_IRQ_REGS(which) I915_IRQ_REGS(GEN8_GT_IMR(which), \
+ GEN8_GT_IER(which), \
+ GEN8_GT_IIR(which))
+
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
#define GEN8_VCS0_IRQ_SHIFT 0 /* NB: VCS1 in bspec! */
@@ -2506,9 +2211,7 @@
#define GEN12_PIPEDMC_INTERRUPT REG_BIT(26) /* tgl+ */
#define GEN12_PIPEDMC_FAULT REG_BIT(25) /* tgl+ */
#define MTL_PIPEDMC_ATS_FAULT REG_BIT(24) /* mtl+ */
-#define XELPD_PIPE_SOFT_UNDERRUN REG_BIT(22) /* adl/dg2+ */
#define GEN11_PIPE_PLANE7_FAULT REG_BIT(22) /* icl/tgl */
-#define XELPD_PIPE_HARD_UNDERRUN REG_BIT(21) /* adl/dg2+ */
#define GEN11_PIPE_PLANE6_FAULT REG_BIT(21) /* icl/tgl */
#define GEN11_PIPE_PLANE5_FAULT REG_BIT(20) /* icl+ */
#define GEN12_PIPE_VBLANK_UNMOD REG_BIT(19) /* tgl+ */
@@ -2540,6 +2243,10 @@
#define GEN8_PIPE_VSYNC REG_BIT(1)
#define GEN8_PIPE_VBLANK REG_BIT(0)
+#define GEN8_DE_PIPE_IRQ_REGS(pipe) I915_IRQ_REGS(GEN8_DE_PIPE_IMR(pipe), \
+ GEN8_DE_PIPE_IER(pipe), \
+ GEN8_DE_PIPE_IIR(pipe))
+
#define _HPD_PIN_DDI(hpd_pin) ((hpd_pin) - HPD_PORT_A)
#define _HPD_PIN_TC(hpd_pin) ((hpd_pin) - HPD_PORT_TC1)
@@ -2575,6 +2282,10 @@
#define TGL_DE_PORT_AUX_DDIB REG_BIT(1)
#define TGL_DE_PORT_AUX_DDIA REG_BIT(0)
+#define GEN8_DE_PORT_IRQ_REGS I915_IRQ_REGS(GEN8_DE_PORT_IMR, \
+ GEN8_DE_PORT_IER, \
+ GEN8_DE_PORT_IIR)
+
#define GEN8_DE_MISC_ISR _MMIO(0x44460)
#define GEN8_DE_MISC_IMR _MMIO(0x44464)
#define GEN8_DE_MISC_IIR _MMIO(0x44468)
@@ -2584,18 +2295,31 @@
#define GEN8_DE_MISC_GSE REG_BIT(27)
#define GEN8_DE_EDP_PSR REG_BIT(19)
#define XELPDP_PMDEMAND_RSP REG_BIT(3)
+#define XE2LPD_DBUF_OVERLAP_DETECTED REG_BIT(1)
+
+#define GEN8_DE_MISC_IRQ_REGS I915_IRQ_REGS(GEN8_DE_MISC_IMR, \
+ GEN8_DE_MISC_IER, \
+ GEN8_DE_MISC_IIR)
#define GEN8_PCU_ISR _MMIO(0x444e0)
#define GEN8_PCU_IMR _MMIO(0x444e4)
#define GEN8_PCU_IIR _MMIO(0x444e8)
#define GEN8_PCU_IER _MMIO(0x444ec)
+#define GEN8_PCU_IRQ_REGS I915_IRQ_REGS(GEN8_PCU_IMR, \
+ GEN8_PCU_IER, \
+ GEN8_PCU_IIR)
+
#define GEN11_GU_MISC_ISR _MMIO(0x444f0)
#define GEN11_GU_MISC_IMR _MMIO(0x444f4)
#define GEN11_GU_MISC_IIR _MMIO(0x444f8)
#define GEN11_GU_MISC_IER _MMIO(0x444fc)
#define GEN11_GU_MISC_GSE (1 << 27)
+#define GEN11_GU_MISC_IRQ_REGS I915_IRQ_REGS(GEN11_GU_MISC_IMR, \
+ GEN11_GU_MISC_IER, \
+ GEN11_GU_MISC_IIR)
+
#define GEN11_GFX_MSTR_IRQ _MMIO(0x190010)
#define GEN11_MASTER_IRQ (1 << 31)
#define GEN11_PCU_IRQ (1 << 30)
@@ -2639,6 +2363,10 @@
GEN11_TBT_HOTPLUG(HPD_PORT_TC2) | \
GEN11_TBT_HOTPLUG(HPD_PORT_TC1))
+#define GEN11_DE_HPD_IRQ_REGS I915_IRQ_REGS(GEN11_DE_HPD_IMR, \
+ GEN11_DE_HPD_IER, \
+ GEN11_DE_HPD_IIR)
+
#define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030)
#define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038)
#define GEN11_HOTPLUG_CTL_ENABLE(hpd_pin) (8 << (_HPD_PIN_TC(hpd_pin) * 4))
@@ -2659,6 +2387,10 @@
#define XELPDP_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin))
#define XELPDP_TBT_HOTPLUG_MASK REG_GENMASK(3, 0)
+#define PICAINTERRUPT_IRQ_REGS I915_IRQ_REGS(PICAINTERRUPT_IMR, \
+ PICAINTERRUPT_IER, \
+ PICAINTERRUPT_IIR)
+
#define XELPDP_PORT_HOTPLUG_CTL(hpd_pin) _MMIO(0x16F270 + (_HPD_PIN_TC(hpd_pin) * 0x200))
#define XELPDP_TBT_HOTPLUG_ENABLE REG_BIT(6)
#define XELPDP_TBT_HPD_LONG_DETECT REG_BIT(5)
@@ -2671,6 +2403,7 @@
#define XELPDP_PMDEMAND_QCLK_GV_BW_MASK REG_GENMASK(31, 16)
#define XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK REG_GENMASK(14, 12)
#define XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK REG_GENMASK(11, 8)
+#define XE3_PMDEMAND_PIPES_MASK REG_GENMASK(7, 4)
#define XELPDP_PMDEMAND_PIPES_MASK REG_GENMASK(7, 6)
#define XELPDP_PMDEMAND_DBUFS_MASK REG_GENMASK(5, 4)
#define XELPDP_PMDEMAND_PHYS_MASK REG_GENMASK(2, 0)
@@ -2775,7 +2508,7 @@
#define _CHICKEN_TRANS_C 0x420c8
#define _CHICKEN_TRANS_EDP 0x420cc
#define _CHICKEN_TRANS_D 0x420d8
-#define CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \
+#define _CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \
[TRANSCODER_EDP] = _CHICKEN_TRANS_EDP, \
[TRANSCODER_A] = _CHICKEN_TRANS_A, \
[TRANSCODER_B] = _CHICKEN_TRANS_B, \
@@ -2783,9 +2516,10 @@
[TRANSCODER_D] = _CHICKEN_TRANS_D))
#define _MTL_CHICKEN_TRANS_A 0x604e0
#define _MTL_CHICKEN_TRANS_B 0x614e0
-#define MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
+#define _MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
_MTL_CHICKEN_TRANS_A, \
_MTL_CHICKEN_TRANS_B)
+#define CHICKEN_TRANS(display, trans) (DISPLAY_VER(display) >= 14 ? _MTL_CHICKEN_TRANS(trans) : _CHICKEN_TRANS(trans))
#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* tgl+ */
#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */
#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
@@ -2836,11 +2570,16 @@
#define RESET_PCH_HANDSHAKE_ENABLE REG_BIT(4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
-#define LATENCY_REPORTING_REMOVED_PIPE_D REG_BIT(31)
+#define _LATENCY_REPORTING_REMOVED_PIPE_D REG_BIT(31)
#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
-#define LATENCY_REPORTING_REMOVED_PIPE_C REG_BIT(25)
-#define LATENCY_REPORTING_REMOVED_PIPE_B REG_BIT(24)
-#define LATENCY_REPORTING_REMOVED_PIPE_A REG_BIT(23)
+#define _LATENCY_REPORTING_REMOVED_PIPE_C REG_BIT(25)
+#define _LATENCY_REPORTING_REMOVED_PIPE_B REG_BIT(24)
+#define _LATENCY_REPORTING_REMOVED_PIPE_A REG_BIT(23)
+#define LATENCY_REPORTING_REMOVED(pipe) _PICK((pipe), \
+ _LATENCY_REPORTING_REMOVED_PIPE_A, \
+ _LATENCY_REPORTING_REMOVED_PIPE_B, \
+ _LATENCY_REPORTING_REMOVED_PIPE_C, \
+ _LATENCY_REPORTING_REMOVED_PIPE_D)
#define ICL_DELAY_PMRSP REG_BIT(22)
#define DISABLE_FLR_SRC REG_BIT(15)
#define MASK_WAKEMEM REG_BIT(13)
@@ -2869,6 +2608,7 @@
#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
#define TGL_DFSM_PIPE_D_DISABLE (1 << 22)
#define GLK_DFSM_DISPLAY_DSC_DISABLE (1 << 7)
+#define XE2LPD_DFSM_DBUF_OVERLAP_DISABLE (1 << 3)
#define XE2LPD_DE_CAP _MMIO(0x41100)
#define XE2LPD_DE_CAP_3DLUT_MASK REG_GENMASK(31, 30)
@@ -3015,6 +2755,10 @@
#define SDEIIR _MMIO(0xc4008)
#define SDEIER _MMIO(0xc400c)
+#define SDE_IRQ_REGS I915_IRQ_REGS(SDEIMR, \
+ SDEIER, \
+ SDEIIR)
+
#define SERR_INT _MMIO(0xc4040)
#define SERR_INT_POISON (1 << 31)
#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
@@ -3098,11 +2842,12 @@
#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define _PCH_FPA0 0xc6040
+#define _PCH_FPB0 0xc6048
+#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0)
#define FP_CB_TUNE (0x3 << 22)
+
#define _PCH_FPA1 0xc6044
-#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
-#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0)
#define PCH_FP1(pll) _MMIO((pll) == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define PCH_DPLL_TEST _MMIO(0xc606c)
@@ -3155,50 +2900,93 @@
/* transcoder */
#define _PCH_TRANS_HTOTAL_A 0xe0000
+#define _PCH_TRANS_HTOTAL_B 0xe1000
+#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
#define TRANS_HTOTAL_SHIFT 16
#define TRANS_HACTIVE_SHIFT 0
+
#define _PCH_TRANS_HBLANK_A 0xe0004
+#define _PCH_TRANS_HBLANK_B 0xe1004
+#define PCH_TRANS_HBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
#define TRANS_HBLANK_END_SHIFT 16
#define TRANS_HBLANK_START_SHIFT 0
+
#define _PCH_TRANS_HSYNC_A 0xe0008
+#define _PCH_TRANS_HSYNC_B 0xe1008
+#define PCH_TRANS_HSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
#define TRANS_HSYNC_END_SHIFT 16
#define TRANS_HSYNC_START_SHIFT 0
+
#define _PCH_TRANS_VTOTAL_A 0xe000c
+#define _PCH_TRANS_VTOTAL_B 0xe100c
+#define PCH_TRANS_VTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
#define TRANS_VTOTAL_SHIFT 16
#define TRANS_VACTIVE_SHIFT 0
+
#define _PCH_TRANS_VBLANK_A 0xe0010
+#define _PCH_TRANS_VBLANK_B 0xe1010
+#define PCH_TRANS_VBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
#define TRANS_VBLANK_END_SHIFT 16
#define TRANS_VBLANK_START_SHIFT 0
+
#define _PCH_TRANS_VSYNC_A 0xe0014
+#define _PCH_TRANS_VSYNC_B 0xe1014
+#define PCH_TRANS_VSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
#define TRANS_VSYNC_END_SHIFT 16
#define TRANS_VSYNC_START_SHIFT 0
+
#define _PCH_TRANS_VSYNCSHIFT_A 0xe0028
+#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
+#define PCH_TRANS_VSYNCSHIFT(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, _PCH_TRANS_VSYNCSHIFT_B)
#define _PCH_TRANSA_DATA_M1 0xe0030
+#define _PCH_TRANSB_DATA_M1 0xe1030
+#define PCH_TRANS_DATA_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
+
#define _PCH_TRANSA_DATA_N1 0xe0034
+#define _PCH_TRANSB_DATA_N1 0xe1034
+#define PCH_TRANS_DATA_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
+
#define _PCH_TRANSA_DATA_M2 0xe0038
+#define _PCH_TRANSB_DATA_M2 0xe1038
+#define PCH_TRANS_DATA_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
+
#define _PCH_TRANSA_DATA_N2 0xe003c
+#define _PCH_TRANSB_DATA_N2 0xe103c
+#define PCH_TRANS_DATA_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
+
#define _PCH_TRANSA_LINK_M1 0xe0040
+#define _PCH_TRANSB_LINK_M1 0xe1040
+#define PCH_TRANS_LINK_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
+
#define _PCH_TRANSA_LINK_N1 0xe0044
+#define _PCH_TRANSB_LINK_N1 0xe1044
+#define PCH_TRANS_LINK_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
+
#define _PCH_TRANSA_LINK_M2 0xe0048
+#define _PCH_TRANSB_LINK_M2 0xe1048
+#define PCH_TRANS_LINK_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
+
#define _PCH_TRANSA_LINK_N2 0xe004c
+#define _PCH_TRANSB_LINK_N2 0xe104c
+#define PCH_TRANS_LINK_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
/* Per-transcoder DIP controls (PCH) */
#define _VIDEO_DIP_CTL_A 0xe0200
+#define _VIDEO_DIP_CTL_B 0xe1200
+#define TVIDEO_DIP_CTL(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
+
#define _VIDEO_DIP_DATA_A 0xe0208
+#define _VIDEO_DIP_DATA_B 0xe1208
+#define TVIDEO_DIP_DATA(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
+
#define _VIDEO_DIP_GCP_A 0xe0210
+#define _VIDEO_DIP_GCP_B 0xe1210
+#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
#define GCP_COLOR_INDICATION (1 << 2)
#define GCP_DEFAULT_PHASE_ENABLE (1 << 1)
#define GCP_AV_MUTE (1 << 0)
-#define _VIDEO_DIP_CTL_B 0xe1200
-#define _VIDEO_DIP_DATA_B 0xe1208
-#define _VIDEO_DIP_GCP_B 0xe1210
-
-#define TVIDEO_DIP_CTL(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
-#define TVIDEO_DIP_DATA(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
-#define TVIDEO_DIP_GCP(pipe) _MMIO_PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
-
/* Per-transcoder DIP controls (VLV) */
#define _VLV_VIDEO_DIP_CTL_A 0x60200
#define _VLV_VIDEO_DIP_CTL_B 0x61170
@@ -3225,36 +3013,54 @@
_CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
/* Haswell DIP controls */
-
#define _HSW_VIDEO_DIP_CTL_A 0x60200
-#define _HSW_VIDEO_DIP_AVI_DATA_A 0x60220
-#define _HSW_VIDEO_DIP_VS_DATA_A 0x60260
-#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
-#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
-#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320
-#define _ADL_VIDEO_DIP_AS_DATA_A 0x60484
-#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440
-#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240
-#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280
-#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
-#define _HSW_VIDEO_DIP_GMP_ECC_A 0x60300
-#define _HSW_VIDEO_DIP_VSC_ECC_A 0x60344
-#define _HSW_VIDEO_DIP_GCP_A 0x60210
-
#define _HSW_VIDEO_DIP_CTL_B 0x61200
+#define HSW_TVIDEO_DIP_CTL(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_CTL_A)
+
+#define _HSW_VIDEO_DIP_AVI_DATA_A 0x60220
#define _HSW_VIDEO_DIP_AVI_DATA_B 0x61220
+#define HSW_TVIDEO_DIP_AVI_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_VS_DATA_A 0x60260
#define _HSW_VIDEO_DIP_VS_DATA_B 0x61260
+#define HSW_TVIDEO_DIP_VS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
#define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
+#define HSW_TVIDEO_DIP_SPD_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
#define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
+#define HSW_TVIDEO_DIP_GMP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320
#define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320
+#define HSW_TVIDEO_DIP_VSC_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
+
+/*ADLP and later: */
+#define _ADL_VIDEO_DIP_AS_DATA_A 0x60484
#define _ADL_VIDEO_DIP_AS_DATA_B 0x61484
+#define ADL_TVIDEO_DIP_AS_SDP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans,\
+ _ADL_VIDEO_DIP_AS_DATA_A + (i) * 4)
+
+#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440
#define _GLK_VIDEO_DIP_DRM_DATA_B 0x61440
+#define GLK_TVIDEO_DIP_DRM_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4)
+
+#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240
#define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240
+#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280
#define _HSW_VIDEO_DIP_VS_ECC_B 0x61280
+#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
#define _HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
+#define _HSW_VIDEO_DIP_GMP_ECC_A 0x60300
#define _HSW_VIDEO_DIP_GMP_ECC_B 0x61300
+#define _HSW_VIDEO_DIP_VSC_ECC_A 0x60344
#define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344
+
+#define _HSW_VIDEO_DIP_GCP_A 0x60210
#define _HSW_VIDEO_DIP_GCP_B 0x61210
+#define HSW_TVIDEO_DIP_GCP(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GCP_A)
/* Icelake PPS_DATA and _ECC DIP Registers.
* These are available for transcoders B,C and eDP.
@@ -3264,62 +3070,16 @@
#define _ICL_VIDEO_DIP_PPS_DATA_A 0x60350
#define _ICL_VIDEO_DIP_PPS_DATA_B 0x61350
+#define ICL_VIDEO_DIP_PPS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
+
#define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4
#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
-
-#define HSW_TVIDEO_DIP_CTL(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_CTL_A)
-#define HSW_TVIDEO_DIP_GCP(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GCP_A)
-#define HSW_TVIDEO_DIP_AVI_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_VS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_SPD_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_GMP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_VSC_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
-#define GLK_TVIDEO_DIP_DRM_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4)
-#define ICL_VIDEO_DIP_PPS_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
#define ICL_VIDEO_DIP_PPS_ECC(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
-/*ADLP and later: */
-#define ADL_TVIDEO_DIP_AS_SDP_DATA(dev_priv, trans, i) _MMIO_TRANS2(dev_priv, trans,\
- _ADL_VIDEO_DIP_AS_DATA_A + (i) * 4)
#define _HSW_STEREO_3D_CTL_A 0x70020
-#define S3D_ENABLE (1 << 31)
#define _HSW_STEREO_3D_CTL_B 0x71020
-
#define HSW_STEREO_3D_CTL(dev_priv, trans) _MMIO_PIPE2(dev_priv, trans, _HSW_STEREO_3D_CTL_A)
-
-#define _PCH_TRANS_HTOTAL_B 0xe1000
-#define _PCH_TRANS_HBLANK_B 0xe1004
-#define _PCH_TRANS_HSYNC_B 0xe1008
-#define _PCH_TRANS_VTOTAL_B 0xe100c
-#define _PCH_TRANS_VBLANK_B 0xe1010
-#define _PCH_TRANS_VSYNC_B 0xe1014
-#define _PCH_TRANS_VSYNCSHIFT_B 0xe1028
-
-#define PCH_TRANS_HTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HTOTAL_A, _PCH_TRANS_HTOTAL_B)
-#define PCH_TRANS_HBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HBLANK_A, _PCH_TRANS_HBLANK_B)
-#define PCH_TRANS_HSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_HSYNC_A, _PCH_TRANS_HSYNC_B)
-#define PCH_TRANS_VTOTAL(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VTOTAL_A, _PCH_TRANS_VTOTAL_B)
-#define PCH_TRANS_VBLANK(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VBLANK_A, _PCH_TRANS_VBLANK_B)
-#define PCH_TRANS_VSYNC(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNC_A, _PCH_TRANS_VSYNC_B)
-#define PCH_TRANS_VSYNCSHIFT(pipe) _MMIO_PIPE(pipe, _PCH_TRANS_VSYNCSHIFT_A, _PCH_TRANS_VSYNCSHIFT_B)
-
-#define _PCH_TRANSB_DATA_M1 0xe1030
-#define _PCH_TRANSB_DATA_N1 0xe1034
-#define _PCH_TRANSB_DATA_M2 0xe1038
-#define _PCH_TRANSB_DATA_N2 0xe103c
-#define _PCH_TRANSB_LINK_M1 0xe1040
-#define _PCH_TRANSB_LINK_N1 0xe1044
-#define _PCH_TRANSB_LINK_M2 0xe1048
-#define _PCH_TRANSB_LINK_N2 0xe104c
-
-#define PCH_TRANS_DATA_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M1, _PCH_TRANSB_DATA_M1)
-#define PCH_TRANS_DATA_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N1, _PCH_TRANSB_DATA_N1)
-#define PCH_TRANS_DATA_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_M2, _PCH_TRANSB_DATA_M2)
-#define PCH_TRANS_DATA_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_DATA_N2, _PCH_TRANSB_DATA_N2)
-#define PCH_TRANS_LINK_M1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M1, _PCH_TRANSB_LINK_M1)
-#define PCH_TRANS_LINK_N1(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N1, _PCH_TRANSB_LINK_N1)
-#define PCH_TRANS_LINK_M2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_M2, _PCH_TRANSB_LINK_M2)
-#define PCH_TRANS_LINK_N2(pipe) _MMIO_PIPE(pipe, _PCH_TRANSA_LINK_N2, _PCH_TRANSB_LINK_N2)
+#define S3D_ENABLE (1 << 31)
#define _PCH_TRANSACONF 0xf0008
#define _PCH_TRANSBCONF 0xf1008
@@ -3571,6 +3331,7 @@
#define POWER_SETUP_I1_WATTS REG_BIT(31)
#define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */
#define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0)
+#define POWER_SETUP_SUBCOMMAND_G8_ENABLE 0x6
#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
#define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* pvc */
/* XEHP_PCODE_FREQUENCY_CONFIG sub-commands (param1) */
@@ -3771,6 +3532,7 @@ enum skl_power_gate {
#define TRANS_DDI_PVSYNC (1 << 17)
#define TRANS_DDI_PHSYNC (1 << 16)
#define TRANS_DDI_PORT_SYNC_ENABLE REG_BIT(15)
+#define XE3_TRANS_DDI_HDCP_LINE_REKEY_DISABLE REG_BIT(15)
#define TRANS_DDI_EDP_INPUT_MASK (7 << 12)
#define TRANS_DDI_EDP_INPUT_A_ON (0 << 12)
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
@@ -3815,25 +3577,26 @@ enum skl_power_gate {
#define _TGL_DP_TP_CTL_A 0x60540
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
#define TGL_DP_TP_CTL(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_CTL_A)
-#define DP_TP_CTL_ENABLE (1 << 31)
-#define DP_TP_CTL_FEC_ENABLE (1 << 30)
-#define DP_TP_CTL_MODE_SST (0 << 27)
-#define DP_TP_CTL_MODE_MST (1 << 27)
-#define DP_TP_CTL_FORCE_ACT (1 << 25)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_MASK (3 << 19)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4A (0 << 19)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4B (1 << 19)
-#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4C (2 << 19)
-#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1 << 18)
-#define DP_TP_CTL_FDI_AUTOTRAIN (1 << 15)
-#define DP_TP_CTL_LINK_TRAIN_MASK (7 << 8)
-#define DP_TP_CTL_LINK_TRAIN_PAT1 (0 << 8)
-#define DP_TP_CTL_LINK_TRAIN_PAT2 (1 << 8)
-#define DP_TP_CTL_LINK_TRAIN_PAT3 (4 << 8)
-#define DP_TP_CTL_LINK_TRAIN_PAT4 (5 << 8)
-#define DP_TP_CTL_LINK_TRAIN_IDLE (2 << 8)
-#define DP_TP_CTL_LINK_TRAIN_NORMAL (3 << 8)
-#define DP_TP_CTL_SCRAMBLE_DISABLE (1 << 7)
+#define DP_TP_CTL_ENABLE REG_BIT(31)
+#define DP_TP_CTL_FEC_ENABLE REG_BIT(30)
+#define DP_TP_CTL_MODE_MASK REG_BIT(27)
+#define DP_TP_CTL_MODE_SST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 0)
+#define DP_TP_CTL_MODE_MST REG_FIELD_PREP(DP_TP_CTL_MODE_MASK, 1)
+#define DP_TP_CTL_FORCE_ACT REG_BIT(25)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_MASK REG_GENMASK(20, 19)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4A REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 0)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4B REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 1)
+#define DP_TP_CTL_TRAIN_PAT4_SEL_TP4C REG_FIELD_PREP(DP_TP_CTL_TRAIN_PAT4_SEL_MASK, 2)
+#define DP_TP_CTL_ENHANCED_FRAME_ENABLE REG_BIT(18)
+#define DP_TP_CTL_FDI_AUTOTRAIN REG_BIT(15)
+#define DP_TP_CTL_LINK_TRAIN_MASK REG_GENMASK(10, 8)
+#define DP_TP_CTL_LINK_TRAIN_PAT1 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 0)
+#define DP_TP_CTL_LINK_TRAIN_PAT2 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 1)
+#define DP_TP_CTL_LINK_TRAIN_PAT3 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 4)
+#define DP_TP_CTL_LINK_TRAIN_PAT4 REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 5)
+#define DP_TP_CTL_LINK_TRAIN_IDLE REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 2)
+#define DP_TP_CTL_LINK_TRAIN_NORMAL REG_FIELD_PREP(DP_TP_CTL_LINK_TRAIN_MASK, 3)
+#define DP_TP_CTL_SCRAMBLE_DISABLE REG_BIT(7)
/* DisplayPort Transport Status */
#define _DP_TP_STATUS_A 0x64044
@@ -3841,14 +3604,15 @@ enum skl_power_gate {
#define _TGL_DP_TP_STATUS_A 0x60544
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
#define TGL_DP_TP_STATUS(dev_priv, tran) _MMIO_TRANS2(dev_priv, (tran), _TGL_DP_TP_STATUS_A)
-#define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28)
-#define DP_TP_STATUS_IDLE_DONE (1 << 25)
-#define DP_TP_STATUS_ACT_SENT (1 << 24)
-#define DP_TP_STATUS_MODE_STATUS_MST (1 << 23)
-#define DP_TP_STATUS_AUTOTRAIN_DONE (1 << 12)
-#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2 (3 << 8)
-#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1 (3 << 4)
-#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0)
+#define DP_TP_STATUS_FEC_ENABLE_LIVE REG_BIT(28)
+#define DP_TP_STATUS_IDLE_DONE REG_BIT(25)
+#define DP_TP_STATUS_ACT_SENT REG_BIT(24)
+#define DP_TP_STATUS_MODE_STATUS_MST REG_BIT(23)
+#define DP_TP_STATUS_STREAMS_ENABLED_MASK REG_GENMASK(18, 16) /* 17:16 on hsw but bit 18 mbz */
+#define DP_TP_STATUS_AUTOTRAIN_DONE REG_BIT(12)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2_MASK REG_GENMASK(9, 8)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1_MASK REG_GENMASK(5, 4)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0_MASK REG_GENMASK(1, 0)
/* DDI Buffer Control */
#define _DDI_BUF_CTL_A 0x64000
@@ -4125,6 +3889,7 @@ enum skl_power_gate {
#define _DPLL1_CFGCR1 0x6C040
#define _DPLL2_CFGCR1 0x6C048
#define _DPLL3_CFGCR1 0x6C050
+#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
#define DPLL_CFGCR1_FREQ_ENABLE (1 << 31)
#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff << 9)
#define DPLL_CFGCR1_DCO_FRACTION(x) ((x) << 9)
@@ -4133,6 +3898,7 @@ enum skl_power_gate {
#define _DPLL1_CFGCR2 0x6C044
#define _DPLL2_CFGCR2 0x6C04C
#define _DPLL3_CFGCR2 0x6C054
+#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff << 8)
#define DPLL_CFGCR2_QDIV_RATIO(x) ((x) << 8)
#define DPLL_CFGCR2_QDIV_MODE(x) ((x) << 7)
@@ -4151,9 +3917,6 @@ enum skl_power_gate {
#define DPLL_CFGCR2_PDIV_7_INVALID (5 << 2)
#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
-#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
-#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
-
/* ICL Clocks */
#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280)
#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24, 4, 5))
@@ -4246,7 +4009,6 @@ enum skl_power_gate {
/* ADL-P Type C PLL */
#define PORTTC1_PLL_ENABLE 0x46038
#define PORTTC2_PLL_ENABLE 0x46040
-
#define ADLP_PORTTC_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), \
PORTTC1_PLL_ENABLE, \
PORTTC2_PLL_ENABLE)
@@ -4398,14 +4160,6 @@ enum skl_power_gate {
#define SFUSE_STRAP_DDIC_DETECTED (1 << 1)
#define SFUSE_STRAP_DDID_DETECTED (1 << 0)
-#define WM_MISC _MMIO(0x45260)
-#define WM_MISC_DATA_PARTITION_5_6 (1 << 0)
-
-#define WM_DBG _MMIO(0x45280)
-#define WM_DBG_DISALLOW_MULTIPLE_LP (1 << 0)
-#define WM_DBG_DISALLOW_MAXFIFO (1 << 1)
-#define WM_DBG_DISALLOW_SPRITE (1 << 2)
-
/* Gen4+ Timestamp and Pipe Frame time stamp registers */
#define GEN4_TIMESTAMP _MMIO(0x2358)
#define ILK_TIMESTAMP_HI _MMIO(0x70070)
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index a685db1e815d..e251bcc0c89f 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -284,4 +284,14 @@ typedef struct {
#define i915_mmio_reg_equal(a, b) (i915_mmio_reg_offset(a) == i915_mmio_reg_offset(b))
#define i915_mmio_reg_valid(r) (!i915_mmio_reg_equal(r, INVALID_MMIO_REG))
+/* A triplet for IMR/IER/IIR registers. */
+struct i915_irq_regs {
+ i915_reg_t imr;
+ i915_reg_t ier;
+ i915_reg_t iir;
+};
+
+#define I915_IRQ_REGS(_imr, _ier, _iir) \
+ ((const struct i915_irq_regs){ .imr = (_imr), .ier = (_ier), .iir = (_iir) })
+
#endif /* __I915_REG_DEFS__ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 519e096c607c..8f62cfa23fb7 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -273,11 +273,6 @@ i915_request_active_engine(struct i915_request *rq,
return ret;
}
-static void __rq_init_watchdog(struct i915_request *rq)
-{
- rq->watchdog.timer.function = NULL;
-}
-
static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer)
{
struct i915_request *rq =
@@ -294,6 +289,14 @@ static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer)
return HRTIMER_NORESTART;
}
+static void __rq_init_watchdog(struct i915_request *rq)
+{
+ struct i915_request_watchdog *wdg = &rq->watchdog;
+
+ hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ wdg->timer.function = __rq_watchdog_expired;
+}
+
static void __rq_arm_watchdog(struct i915_request *rq)
{
struct i915_request_watchdog *wdg = &rq->watchdog;
@@ -304,8 +307,6 @@ static void __rq_arm_watchdog(struct i915_request *rq)
i915_request_get(rq);
- hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- wdg->timer.function = __rq_watchdog_expired;
hrtimer_start_range_ns(&wdg->timer,
ns_to_ktime(ce->watchdog.timeout_us *
NSEC_PER_USEC),
@@ -317,7 +318,7 @@ static void __rq_cancel_watchdog(struct i915_request *rq)
{
struct i915_request_watchdog *wdg = &rq->watchdog;
- if (wdg->timer.function && hrtimer_try_to_cancel(&wdg->timer) > 0)
+ if (hrtimer_try_to_cancel(&wdg->timer) > 0)
i915_request_put(rq);
}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 762127dd56c5..70a854557e6e 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -506,6 +506,6 @@ int __init i915_scheduler_module_init(void)
return 0;
err_priorities:
- kmem_cache_destroy(slab_priorities);
+ kmem_cache_destroy(slab_dependencies);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
deleted file mode 100644
index f8373a461f17..000000000000
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- *
- * Copyright 2008 (c) Intel Corporation
- * Jesse Barnes <jbarnes@virtuousgeek.org>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "display/intel_de.h"
-#include "display/intel_gmbus.h"
-#include "display/intel_vga.h"
-
-#include "i915_drv.h"
-#include "i915_reg.h"
-#include "i915_suspend.h"
-#include "intel_pci_config.h"
-
-static void intel_save_swf(struct drm_i915_private *dev_priv)
-{
- int i;
-
- /* Scratch space */
- if (GRAPHICS_VER(dev_priv) == 2 && IS_MOBILE(dev_priv)) {
- for (i = 0; i < 7; i++) {
- dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv,
- SWF0(dev_priv, i));
- dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv,
- SWF1(dev_priv, i));
- }
- for (i = 0; i < 3; i++)
- dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv,
- SWF3(dev_priv, i));
- } else if (GRAPHICS_VER(dev_priv) == 2) {
- for (i = 0; i < 7; i++)
- dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv,
- SWF1(dev_priv, i));
- } else if (HAS_GMCH(dev_priv)) {
- for (i = 0; i < 16; i++) {
- dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv,
- SWF0(dev_priv, i));
- dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv,
- SWF1(dev_priv, i));
- }
- for (i = 0; i < 3; i++)
- dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv,
- SWF3(dev_priv, i));
- }
-}
-
-static void intel_restore_swf(struct drm_i915_private *dev_priv)
-{
- int i;
-
- /* Scratch space */
- if (GRAPHICS_VER(dev_priv) == 2 && IS_MOBILE(dev_priv)) {
- for (i = 0; i < 7; i++) {
- intel_de_write(dev_priv, SWF0(dev_priv, i),
- dev_priv->regfile.saveSWF0[i]);
- intel_de_write(dev_priv, SWF1(dev_priv, i),
- dev_priv->regfile.saveSWF1[i]);
- }
- for (i = 0; i < 3; i++)
- intel_de_write(dev_priv, SWF3(dev_priv, i),
- dev_priv->regfile.saveSWF3[i]);
- } else if (GRAPHICS_VER(dev_priv) == 2) {
- for (i = 0; i < 7; i++)
- intel_de_write(dev_priv, SWF1(dev_priv, i),
- dev_priv->regfile.saveSWF1[i]);
- } else if (HAS_GMCH(dev_priv)) {
- for (i = 0; i < 16; i++) {
- intel_de_write(dev_priv, SWF0(dev_priv, i),
- dev_priv->regfile.saveSWF0[i]);
- intel_de_write(dev_priv, SWF1(dev_priv, i),
- dev_priv->regfile.saveSWF1[i]);
- }
- for (i = 0; i < 3; i++)
- intel_de_write(dev_priv, SWF3(dev_priv, i),
- dev_priv->regfile.saveSWF3[i]);
- }
-}
-
-void i915_save_display(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
-
- if (!HAS_DISPLAY(dev_priv))
- return;
-
- /* Display arbitration control */
- if (GRAPHICS_VER(dev_priv) <= 4)
- dev_priv->regfile.saveDSPARB = intel_de_read(dev_priv,
- DSPARB(dev_priv));
-
- if (GRAPHICS_VER(dev_priv) == 4)
- pci_read_config_word(pdev, GCDGMBUS,
- &dev_priv->regfile.saveGCDGMBUS);
-
- intel_save_swf(dev_priv);
-}
-
-void i915_restore_display(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
-
- if (!HAS_DISPLAY(dev_priv))
- return;
-
- intel_restore_swf(dev_priv);
-
- if (GRAPHICS_VER(dev_priv) == 4)
- pci_write_config_word(pdev, GCDGMBUS,
- dev_priv->regfile.saveGCDGMBUS);
-
- /* Display arbitration */
- if (GRAPHICS_VER(dev_priv) <= 4)
- intel_de_write(dev_priv, DSPARB(dev_priv),
- dev_priv->regfile.saveDSPARB);
-
- intel_vga_redisable(dev_priv);
-
- intel_gmbus_reset(dev_priv);
-}
diff --git a/drivers/gpu/drm/i915/i915_suspend.h b/drivers/gpu/drm/i915/i915_suspend.h
deleted file mode 100644
index e5a611ee3d15..000000000000
--- a/drivers/gpu/drm/i915/i915_suspend.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef __I915_SUSPEND_H__
-#define __I915_SUSPEND_H__
-
-struct drm_i915_private;
-
-void i915_save_display(struct drm_i915_private *i915);
-void i915_restore_display(struct drm_i915_private *i915);
-
-#endif /* __I915_SUSPEND_H__ */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index ce1cbee1b39d..7ed41ce9b708 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -322,7 +322,7 @@ DEFINE_EVENT(i915_request, i915_request_add,
TP_ARGS(rq)
);
-#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
+#if IS_ENABLED(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
DEFINE_EVENT(i915_request, i915_request_guc_submit,
TP_PROTO(struct i915_request *rq),
TP_ARGS(rq)
@@ -642,34 +642,6 @@ DEFINE_EVENT(i915_request, i915_request_wait_end,
TP_ARGS(rq)
);
-TRACE_EVENT_CONDITION(i915_reg_rw,
- TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
-
- TP_ARGS(write, reg, val, len, trace),
-
- TP_CONDITION(trace),
-
- TP_STRUCT__entry(
- __field(u64, val)
- __field(u32, reg)
- __field(u16, write)
- __field(u16, len)
- ),
-
- TP_fast_assign(
- __entry->val = (u64)val;
- __entry->reg = i915_mmio_reg_offset(reg);
- __entry->write = write;
- __entry->len = len;
- ),
-
- TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
- __entry->write ? "write" : "read",
- __entry->reg, __entry->len,
- (u32)(__entry->val & 0xffffffff),
- (u32)(__entry->val >> 32))
-);
-
/**
* DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
*
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 71bdc89bd621..609214231ffc 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -270,7 +270,7 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
-#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) && IS_ENABLED(CONFIG_PREEMPT_COUNT)
# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
#else
# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index d2f064d2525c..776f8cc51b2f 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -2157,7 +2157,7 @@ static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
int i915_vma_unbind(struct i915_vma *vma)
{
struct i915_address_space *vm = vma->vm;
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
int err;
assert_object_held_shared(vma->obj);
@@ -2196,7 +2196,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
{
struct drm_i915_gem_object *obj = vma->obj;
struct i915_address_space *vm = vma->vm;
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
struct dma_fence *fence;
int err;
diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c
index 26c4dbda076e..f76642886569 100644
--- a/drivers/gpu/drm/i915/intel_clock_gating.c
+++ b/drivers/gpu/drm/i915/intel_clock_gating.c
@@ -502,7 +502,7 @@ static void ivb_init_clock_gating(struct drm_i915_private *i915)
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
- if (IS_IVB_GT1(i915))
+ if (INTEL_INFO(i915)->gt == 1)
intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
else {
diff --git a/drivers/gpu/drm/i915/intel_cpu_info.c b/drivers/gpu/drm/i915/intel_cpu_info.c
new file mode 100644
index 000000000000..e52d0ac713a9
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_cpu_info.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ *
+ * Avoid INTEL_<PLATFORM> name collisions between asm/intel-family.h and
+ * intel_device_info.h by having a separate file.
+ */
+
+#include "intel_cpu_info.h"
+
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+static const struct x86_cpu_id g8_cpu_ids[] = {
+ X86_MATCH_VFM(INTEL_ALDERLAKE, NULL),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_COMETLAKE, NULL),
+ X86_MATCH_VFM(INTEL_KABYLAKE, NULL),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL),
+ X86_MATCH_VFM(INTEL_ROCKETLAKE, NULL),
+ {}
+};
+
+/**
+ * intel_match_g8_cpu - match current CPU against g8_cpu_ids
+ *
+ * This matches current CPU against g8_cpu_ids, which are applicable
+ * for G8 workaround.
+ *
+ * Returns: %true if matches, %false otherwise.
+ */
+bool intel_match_g8_cpu(void)
+{
+ return x86_match_cpu(g8_cpu_ids);
+}
+#else /* CONFIG_X86 */
+
+bool intel_match_g8_cpu(void) { return false; }
+
+#endif /* CONFIG_X86 */
diff --git a/drivers/gpu/drm/i915/intel_cpu_info.h b/drivers/gpu/drm/i915/intel_cpu_info.h
new file mode 100644
index 000000000000..d898fb463d31
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_cpu_info.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _INTEL_CPU_INFO_H_
+#define _INTEL_CPU_INFO_H_
+
+#include <linux/types.h>
+
+bool intel_match_g8_cpu(void);
+
+#endif /* _INTEL_CPU_INFO_H_ */
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 3c47c625993e..bbe3a24fe3d9 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -25,7 +25,7 @@
#include <linux/string_helpers.h>
#include <drm/drm_print.h>
-#include <drm/intel/i915_pciids.h>
+#include <drm/intel/pciids.h>
#include "gt/intel_gt_regs.h"
#include "i915_drv.h"
@@ -200,8 +200,20 @@ static const u16 subplatform_g12_ids[] = {
INTEL_DG2_G12_IDS(ID),
};
-static const u16 subplatform_arl_ids[] = {
- INTEL_ARL_IDS(ID),
+static const u16 subplatform_dg2_d_ids[] = {
+ INTEL_DG2_D_IDS(ID),
+};
+
+static const u16 subplatform_arl_h_ids[] = {
+ INTEL_ARL_H_IDS(ID),
+};
+
+static const u16 subplatform_arl_u_ids[] = {
+ INTEL_ARL_U_IDS(ID),
+};
+
+static const u16 subplatform_arl_s_ids[] = {
+ INTEL_ARL_S_IDS(ID),
};
static bool find_devid(u16 id, const u16 *p, unsigned int num)
@@ -261,11 +273,22 @@ static void intel_device_info_subplatform_init(struct drm_i915_private *i915)
} else if (find_devid(devid, subplatform_g12_ids,
ARRAY_SIZE(subplatform_g12_ids))) {
mask = BIT(INTEL_SUBPLATFORM_G12);
- } else if (find_devid(devid, subplatform_arl_ids,
- ARRAY_SIZE(subplatform_arl_ids))) {
- mask = BIT(INTEL_SUBPLATFORM_ARL);
+ } else if (find_devid(devid, subplatform_arl_h_ids,
+ ARRAY_SIZE(subplatform_arl_h_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_ARL_H);
+ } else if (find_devid(devid, subplatform_arl_u_ids,
+ ARRAY_SIZE(subplatform_arl_u_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_ARL_U);
+ } else if (find_devid(devid, subplatform_arl_s_ids,
+ ARRAY_SIZE(subplatform_arl_s_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_ARL_S);
}
+ /* DG2_D ids span across multiple DG2 subplatforms */
+ if (find_devid(devid, subplatform_dg2_d_ids,
+ ARRAY_SIZE(subplatform_dg2_d_ids)))
+ mask |= BIT(INTEL_SUBPLATFORM_D);
+
GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_MASK);
RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 643ff1bf74ee..9387385cb418 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -95,9 +95,11 @@ enum intel_platform {
/*
* Subplatform bits share the same namespace per parent platform. In other words
* it is fine for the same bit to be used on multiple parent platforms.
+ * Devices can belong to multiple subplatforms if needed, so it's possible to set
+ * multiple bits for same device.
*/
-#define INTEL_SUBPLATFORM_BITS (3)
+#define INTEL_SUBPLATFORM_BITS (4)
#define INTEL_SUBPLATFORM_MASK (BIT(INTEL_SUBPLATFORM_BITS) - 1)
/* HSW/BDW/SKL/KBL/CFL */
@@ -114,6 +116,7 @@ enum intel_platform {
#define INTEL_SUBPLATFORM_G10 0
#define INTEL_SUBPLATFORM_G11 1
#define INTEL_SUBPLATFORM_G12 2
+#define INTEL_SUBPLATFORM_D 3
/* ADL */
#define INTEL_SUBPLATFORM_RPL 0
@@ -128,7 +131,9 @@ enum intel_platform {
#define INTEL_SUBPLATFORM_RPLU 2
/* MTL */
-#define INTEL_SUBPLATFORM_ARL 0
+#define INTEL_SUBPLATFORM_ARL_H 0
+#define INTEL_SUBPLATFORM_ARL_U 1
+#define INTEL_SUBPLATFORM_ARL_S 2
enum intel_ppgtt_type {
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
@@ -138,7 +143,6 @@ enum intel_ppgtt_type {
#define DEV_INFO_FOR_EACH_FLAG(func) \
func(is_mobile); \
- func(is_lp); \
func(require_force_probe); \
func(is_dgfx); \
/* Keep has_* in alphabetical order */ \
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 5a01d60e5186..a5383a2bc64b 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -204,7 +204,7 @@ int intel_gvt_set_ops(const struct intel_vgpu_ops *ops)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(intel_gvt_set_ops, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_gvt_set_ops, "I915_GVT");
void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops)
{
@@ -222,7 +222,7 @@ void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops)
intel_gvt_ops = NULL;
mutex_unlock(&intel_gvt_mutex);
}
-EXPORT_SYMBOL_NS_GPL(intel_gvt_clear_ops, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_gvt_clear_ops, "I915_GVT");
/**
* intel_gvt_init - initialize GVT components
@@ -284,40 +284,40 @@ void intel_gvt_resume(struct drm_i915_private *dev_priv)
* Exported here so that the exports only get created when GVT support is
* actually enabled.
*/
-EXPORT_SYMBOL_NS_GPL(i915_gem_object_alloc, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(i915_gem_object_create_shmem, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(i915_gem_object_init, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(i915_gem_object_ggtt_pin_ww, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(i915_gem_object_pin_map, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(i915_gem_object_set_to_cpu_domain, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(__i915_gem_object_flush_map, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(__i915_gem_object_set_pages, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(i915_gem_gtt_insert, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(i915_gem_prime_export, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_init, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_backoff, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_fini, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(i915_ppgtt_create, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(i915_request_add, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(i915_request_create, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(i915_request_wait, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(i915_reserve_fence, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(i915_unreserve_fence, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(i915_vm_release, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(_i915_vma_move_to_active, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(intel_context_create, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(__intel_context_do_pin, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(__intel_context_do_unpin, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(intel_ring_begin, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_get, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_alloc, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_create_shmem, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_init, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_ggtt_pin_ww, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_pin_map, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_set_to_cpu_domain, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(__i915_gem_object_flush_map, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(__i915_gem_object_set_pages, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(i915_gem_gtt_insert, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(i915_gem_prime_export, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_init, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_backoff, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_fini, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(i915_ppgtt_create, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(i915_request_add, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(i915_request_create, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(i915_request_wait, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(i915_reserve_fence, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(i915_unreserve_fence, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(i915_vm_release, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(_i915_vma_move_to_active, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(intel_context_create, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(__intel_context_do_pin, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(__intel_context_do_unpin, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(intel_ring_begin, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_get, "I915_GVT");
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put, "I915_GVT");
#endif
-EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put_unchecked, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_for_reg, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_get, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_put, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(shmem_pin_map, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(shmem_unpin_map, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(__px_dma, I915_GVT);
-EXPORT_SYMBOL_NS_GPL(i915_fence_ops, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put_unchecked, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_for_reg, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_get, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_put, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(shmem_pin_map, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(shmem_unpin_map, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(__px_dma, "I915_GVT");
+EXPORT_SYMBOL_NS_GPL(i915_fence_ops, "I915_GVT");
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 955c9a33212a..ee1cd2126f97 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -5,9 +5,11 @@
#include "display/bxt_dpio_phy_regs.h"
#include "display/i9xx_plane_regs.h"
+#include "display/i9xx_wm_regs.h"
#include "display/intel_audio_regs.h"
#include "display/intel_backlight_regs.h"
#include "display/intel_color_regs.h"
+#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
@@ -1308,4 +1310,4 @@ int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter)
err:
return ret;
}
-EXPORT_SYMBOL_NS_GPL(intel_gvt_iterate_mmio_table, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_gvt_iterate_mmio_table, "I915_GVT");
diff --git a/drivers/gpu/drm/i915/intel_mchbar_regs.h b/drivers/gpu/drm/i915/intel_mchbar_regs.h
index 73900c098d59..dc2477179c3e 100644
--- a/drivers/gpu/drm/i915/intel_mchbar_regs.h
+++ b/drivers/gpu/drm/i915/intel_mchbar_regs.h
@@ -207,6 +207,10 @@
#define PCU_PACKAGE_ENERGY_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x593c)
#define GEN6_GT_PERF_STATUS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5948)
+
+#define PCU_PACKAGE_TEMPERATURE _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5978)
+#define TEMP_MASK REG_GENMASK(7, 0)
+
#define GEN6_RP_STATE_LIMITS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994)
#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
#define RP0_CAP_MASK REG_GENMASK(7, 0)
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 2d0647aca964..1a47ecfd3fd8 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -66,7 +66,7 @@ static intel_wakeref_t
track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
if (!rpm->available || rpm->no_wakeref_tracking)
- return -1;
+ return INTEL_WAKEREF_DEF;
return intel_ref_tracker_alloc(&rpm->debug);
}
@@ -114,7 +114,7 @@ static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
static intel_wakeref_t
track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
- return -1;
+ return INTEL_WAKEREF_DEF;
}
static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
@@ -250,7 +250,7 @@ static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm
pm_runtime_get_if_active(rpm->kdev) <= 0) ||
(!ignore_usecount &&
pm_runtime_get_if_in_use(rpm->kdev) <= 0))
- return 0;
+ return NULL;
}
intel_runtime_pm_acquire(rpm, true);
@@ -336,7 +336,7 @@ intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
*/
void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm)
{
- __intel_runtime_pm_put(rpm, -1, true);
+ __intel_runtime_pm_put(rpm, INTEL_WAKEREF_DEF, true);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index de3579d399e1..e22669d61e95 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -42,7 +42,6 @@ struct intel_runtime_pm {
atomic_t wakeref_count;
struct device *kdev; /* points to i915->drm.dev */
bool available;
- bool irqs_enabled;
bool no_wakeref_tracking;
/*
@@ -97,10 +96,16 @@ intel_rpm_wakelock_count(int wakeref_count)
return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT;
}
+static inline bool
+intel_runtime_pm_suspended(struct intel_runtime_pm *rpm)
+{
+ return pm_runtime_suspended(rpm->kdev);
+}
+
static inline void
assert_rpm_device_not_suspended(struct intel_runtime_pm *rpm)
{
- WARN_ONCE(pm_runtime_suspended(rpm->kdev),
+ WARN_ONCE(intel_runtime_pm_suspended(rpm),
"Device suspended during HW access\n");
}
@@ -189,15 +194,15 @@ intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
#define with_intel_runtime_pm(rpm, wf) \
for ((wf) = intel_runtime_pm_get(rpm); (wf); \
- intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
+ intel_runtime_pm_put((rpm), (wf)), (wf) = NULL)
#define with_intel_runtime_pm_if_in_use(rpm, wf) \
for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
- intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
+ intel_runtime_pm_put((rpm), (wf)), (wf) = NULL)
#define with_intel_runtime_pm_if_active(rpm, wf) \
for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \
- intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
+ intel_runtime_pm_put((rpm), (wf)), (wf) = NULL)
void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
diff --git a/drivers/gpu/drm/i915/intel_sbi.c b/drivers/gpu/drm/i915/intel_sbi.c
index 5c6e517c73f4..41e85ac773dc 100644
--- a/drivers/gpu/drm/i915/intel_sbi.c
+++ b/drivers/gpu/drm/i915/intel_sbi.c
@@ -17,7 +17,7 @@ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
struct intel_uncore *uncore = &i915->uncore;
u32 cmd;
- lockdep_assert_held(&i915->sb_lock);
+ lockdep_assert_held(&i915->sbi_lock);
if (intel_wait_for_register_fw(uncore,
SBI_CTL_STAT, SBI_BUSY, 0,
@@ -57,6 +57,16 @@ static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
return 0;
}
+void intel_sbi_lock(struct drm_i915_private *i915)
+{
+ mutex_lock(&i915->sbi_lock);
+}
+
+void intel_sbi_unlock(struct drm_i915_private *i915)
+{
+ mutex_unlock(&i915->sbi_lock);
+}
+
u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
enum intel_sbi_destination destination)
{
@@ -72,3 +82,13 @@ void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
{
intel_sbi_rw(i915, reg, destination, &value, false);
}
+
+void intel_sbi_init(struct drm_i915_private *i915)
+{
+ mutex_init(&i915->sbi_lock);
+}
+
+void intel_sbi_fini(struct drm_i915_private *i915)
+{
+ mutex_destroy(&i915->sbi_lock);
+}
diff --git a/drivers/gpu/drm/i915/intel_sbi.h b/drivers/gpu/drm/i915/intel_sbi.h
index f5a862210454..85161a4f13b8 100644
--- a/drivers/gpu/drm/i915/intel_sbi.h
+++ b/drivers/gpu/drm/i915/intel_sbi.h
@@ -15,6 +15,10 @@ enum intel_sbi_destination {
SBI_MPHY,
};
+void intel_sbi_init(struct drm_i915_private *i915);
+void intel_sbi_fini(struct drm_i915_private *i915);
+void intel_sbi_lock(struct drm_i915_private *i915);
+void intel_sbi_unlock(struct drm_i915_private *i915);
u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
enum intel_sbi_destination destination);
void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 6aa179a3e92a..eed4937c3ff3 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -31,12 +31,17 @@
#include "i915_drv.h"
#include "i915_iosf_mbi.h"
#include "i915_reg.h"
-#include "i915_trace.h"
#include "i915_vgpu.h"
+#include "intel_uncore_trace.h"
#define FORCEWAKE_ACK_TIMEOUT_MS 50
#define GT_FIFO_TIMEOUT_MS 10
+struct intel_uncore *to_intel_uncore(struct drm_device *drm)
+{
+ return &to_i915(drm)->uncore;
+}
+
#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
static void
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index f419c311a0de..e39582950627 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -501,6 +501,8 @@ static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)
return uncore->regs;
}
+struct intel_uncore *to_intel_uncore(struct drm_device *drm);
+
/*
* The raw_reg_{read,write} macros are intended as a micro-optimization for
* interrupt handlers so that the pointer indirection on uncore->regs can
diff --git a/drivers/gpu/drm/i915/intel_uncore_trace.c b/drivers/gpu/drm/i915/intel_uncore_trace.c
new file mode 100644
index 000000000000..86f0c3942b1d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uncore_trace.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "intel_uncore_trace.h"
+#endif
diff --git a/drivers/gpu/drm/i915/intel_uncore_trace.h b/drivers/gpu/drm/i915/intel_uncore_trace.h
new file mode 100644
index 000000000000..f13ff71edf2d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uncore_trace.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright © 2024 Intel Corporation */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i915
+
+#if !defined(__INTEL_UNCORE_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __INTEL_UNCORE_TRACE_H__
+
+#include "i915_reg_defs.h"
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT_CONDITION(i915_reg_rw,
+ TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
+
+ TP_ARGS(write, reg, val, len, trace),
+
+ TP_CONDITION(trace),
+
+ TP_STRUCT__entry(
+ __field(u64, val)
+ __field(u32, reg)
+ __field(u16, write)
+ __field(u16, len)
+ ),
+
+ TP_fast_assign(
+ __entry->val = (u64)val;
+ __entry->reg = i915_mmio_reg_offset(reg);
+ __entry->write = write;
+ __entry->len = len;
+ ),
+
+ TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
+ __entry->write ? "write" : "read",
+ __entry->reg, __entry->len,
+ (u32)(__entry->val & 0xffffffff),
+ (u32)(__entry->val >> 32))
+);
+#endif /* __INTEL_UNCORE_TRACE_H__ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
+#define TRACE_INCLUDE_FILE intel_uncore_trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index dea2f63184f8..87f246047312 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -27,11 +27,11 @@ int __intel_wakeref_get_first(struct intel_wakeref *wf)
if (!atomic_read(&wf->count)) {
INTEL_WAKEREF_BUG_ON(wf->wakeref);
wf->wakeref = wakeref;
- wakeref = 0;
+ wakeref = NULL;
ret = wf->ops->get(wf);
if (ret) {
- wakeref = xchg(&wf->wakeref, 0);
+ wakeref = xchg(&wf->wakeref, NULL);
wake_up_var(&wf->wakeref);
goto unlock;
}
@@ -52,7 +52,7 @@ unlock:
static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
{
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref = NULL;
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
if (unlikely(!atomic_dec_and_test(&wf->count)))
@@ -61,7 +61,7 @@ static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
/* ops->put() must reschedule its own release on error/deferral */
if (likely(!wf->ops->put(wf))) {
INTEL_WAKEREF_BUG_ON(!wf->wakeref);
- wakeref = xchg(&wf->wakeref, 0);
+ wakeref = xchg(&wf->wakeref, NULL);
wake_up_var(&wf->wakeref);
}
@@ -107,7 +107,7 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
__mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex);
atomic_set(&wf->count, 0);
- wf->wakeref = 0;
+ wf->wakeref = NULL;
INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
lockdep_init_map(&wf->work.work.lockdep_map,
@@ -142,7 +142,7 @@ static void wakeref_auto_timeout(struct timer_list *t)
if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags))
return;
- wakeref = fetch_and_zero(&wf->wakeref);
+ wakeref = xchg(&wf->wakeref, NULL);
spin_unlock_irqrestore(&wf->lock, flags);
intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
@@ -154,7 +154,7 @@ void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
spin_lock_init(&wf->lock);
timer_setup(&wf->timer, wakeref_auto_timeout, 0);
refcount_set(&wf->count, 0);
- wf->wakeref = 0;
+ wf->wakeref = NULL;
wf->i915 = i915;
}
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 68aa3be48251..48836ef52d40 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -21,7 +21,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
-typedef unsigned long intel_wakeref_t;
+typedef struct ref_tracker *intel_wakeref_t;
#define INTEL_REFTRACK_DEAD_COUNT 16
#define INTEL_REFTRACK_PRINT_LIMIT 16
@@ -273,7 +273,7 @@ __intel_wakeref_defer_park(struct intel_wakeref *wf)
*/
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
-#define INTEL_WAKEREF_DEF ((intel_wakeref_t)(-1))
+#define INTEL_WAKEREF_DEF ERR_PTR(-ENOENT)
static inline intel_wakeref_t intel_ref_tracker_alloc(struct ref_tracker_dir *dir)
{
@@ -281,17 +281,19 @@ static inline intel_wakeref_t intel_ref_tracker_alloc(struct ref_tracker_dir *di
ref_tracker_alloc(dir, &user, GFP_NOWAIT);
- return (intel_wakeref_t)user ?: INTEL_WAKEREF_DEF;
+ return user ?: INTEL_WAKEREF_DEF;
}
static inline void intel_ref_tracker_free(struct ref_tracker_dir *dir,
- intel_wakeref_t handle)
+ intel_wakeref_t wakeref)
{
- struct ref_tracker *user;
+ if (wakeref == INTEL_WAKEREF_DEF)
+ wakeref = NULL;
- user = (handle == INTEL_WAKEREF_DEF) ? NULL : (void *)handle;
+ if (WARN_ON(IS_ERR(wakeref)))
+ return;
- ref_tracker_free(dir, &user);
+ ref_tracker_free(dir, &wakeref);
}
void intel_ref_tracker_show(struct ref_tracker_dir *dir,
@@ -314,7 +316,7 @@ static inline void intel_wakeref_untrack(struct intel_wakeref *wf,
static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf)
{
- return -1;
+ return INTEL_WAKEREF_DEF;
}
static inline void intel_wakeref_untrack(struct intel_wakeref *wf,
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index 75278e78ca90..9cf169665d7c 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -170,7 +170,7 @@ static struct intel_gt *find_gt_for_required_teelink(struct drm_i915_private *i9
static struct intel_gt *find_gt_for_required_protected_content(struct drm_i915_private *i915)
{
- if (!IS_ENABLED(CONFIG_DRM_I915_PXP) || !INTEL_INFO(i915)->has_pxp)
+ if (!HAS_PXP(i915))
return NULL;
/*
@@ -461,9 +461,11 @@ void intel_pxp_fini_hw(struct intel_pxp *pxp)
}
int intel_pxp_key_check(struct intel_pxp *pxp,
- struct drm_i915_gem_object *obj,
+ struct drm_gem_object *_obj,
bool assign)
{
+ struct drm_i915_gem_object *obj = to_intel_bo(_obj);
+
if (!intel_pxp_is_active(pxp))
return -ENODEV;
@@ -529,7 +531,7 @@ void intel_pxp_invalidate(struct intel_pxp *pxp)
if (ctx->pxp_wakeref) {
intel_runtime_pm_put(&i915->runtime_pm,
ctx->pxp_wakeref);
- ctx->pxp_wakeref = 0;
+ ctx->pxp_wakeref = NULL;
}
spin_lock_irq(&i915->gem.contexts.lock);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h
index d9372f6f7797..4ed97db5e7c6 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h
@@ -9,7 +9,7 @@
#include <linux/errno.h>
#include <linux/types.h>
-struct drm_i915_gem_object;
+struct drm_gem_object;
struct drm_i915_private;
struct intel_pxp;
@@ -32,7 +32,7 @@ int intel_pxp_start(struct intel_pxp *pxp);
void intel_pxp_end(struct intel_pxp *pxp);
int intel_pxp_key_check(struct intel_pxp *pxp,
- struct drm_i915_gem_object *obj,
+ struct drm_gem_object *obj,
bool assign);
void intel_pxp_invalidate(struct intel_pxp *pxp);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 61da4ed9d521..0727492576be 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -4,7 +4,7 @@
* Copyright © 2018 Intel Corporation
*/
-#include <linux/random.h>
+#include <linux/prandom.h>
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h
index 05364eca20f7..70330a2e80f2 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.h
+++ b/drivers/gpu/drm/i915/selftests/i915_random.h
@@ -26,7 +26,7 @@
#define __I915_SELFTESTS_RANDOM_H__
#include <linux/math64.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include "../i915_selftest.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index acae30a04a94..88870844b5bd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -492,7 +492,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
for (n = 0; n < ncpus; n++) {
struct kthread_worker *worker;
- worker = kthread_create_worker(0, "igt/%d", n);
+ worker = kthread_run_worker(0, "igt/%d", n);
if (IS_ERR(worker)) {
ret = PTR_ERR(worker);
ncpus = n;
@@ -1645,7 +1645,7 @@ static int live_parallel_engines(void *arg)
for_each_uabi_engine(engine, i915) {
struct kthread_worker *worker;
- worker = kthread_create_worker(0, "igt/parallel:%s",
+ worker = kthread_run_worker(0, "igt/parallel:%s",
engine->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
@@ -1806,7 +1806,7 @@ static int live_breadcrumbs_smoketest(void *arg)
unsigned int i = idx * ncpus + n;
struct kthread_worker *worker;
- worker = kthread_create_worker(0, "igt/%d.%d", idx, n);
+ worker = kthread_run_worker(0, "igt/%d.%d", idx, n);
if (IS_ERR(worker)) {
ret = PTR_ERR(worker);
goto out_flush;
@@ -3219,7 +3219,7 @@ static int perf_parallel_engines(void *arg)
memset(&engines[idx].p, 0, sizeof(engines[idx].p));
- worker = kthread_create_worker(0, "igt:%s",
+ worker = kthread_run_worker(0, "igt:%s",
engine->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index 29110abb4fe0..c383d31d46b0 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -19,12 +19,22 @@ int igt_flush_test(struct drm_i915_private *i915)
int ret = 0;
for_each_gt(gt, i915, i) {
+ struct intel_engine_cs *engine;
+ unsigned long timeout_ms = 0;
+ unsigned int id;
+
if (intel_gt_is_wedged(gt))
ret = -EIO;
+ for_each_engine(engine, gt, id) {
+ if (engine->props.preempt_timeout_ms > timeout_ms)
+ timeout_ms = engine->props.preempt_timeout_ms;
+ }
+
cond_resched();
- if (intel_gt_wait_for_idle(gt, HZ * 3) == -ETIME) {
+ /* 2x longest preempt timeout, experimentally determined */
+ if (intel_gt_wait_for_idle(gt, HZ * timeout_ms / 500) == -ETIME) {
pr_err("%pS timed out, cancelling all further testing.\n",
__builtin_return_address(0));
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 91794ca17a58..a77e5b26542c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -137,7 +137,7 @@ static const struct intel_device_info mock_info = {
struct drm_i915_private *mock_gem_device(void)
{
-#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
+#if IS_ENABLED(CONFIG_IOMMU_API) && IS_ENABLED(CONFIG_INTEL_IOMMU)
static struct dev_iommu fake_iommu = { .priv = (void *)-1 };
#endif
struct drm_i915_private *i915;
@@ -153,7 +153,7 @@ struct drm_i915_private *mock_gem_device(void)
dev_set_name(&pdev->dev, "mock");
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
-#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
+#if IS_ENABLED(CONFIG_IOMMU_API) && IS_ENABLED(CONFIG_INTEL_IOMMU)
/* HACK to disable iommu for the fake device; force identity mapping */
pdev->dev.iommu = &fake_iommu;
#endif
@@ -180,7 +180,7 @@ struct drm_i915_private *mock_gem_device(void)
/* Set up device info and initial runtime info. */
intel_device_info_driver_create(i915, pdev->device, &mock_info);
- intel_display_device_probe(i915);
+ intel_display_device_probe(pdev);
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
@@ -203,7 +203,7 @@ struct drm_i915_private *mock_gem_device(void)
intel_root_gt_init_early(i915);
mock_uncore_init(&i915->uncore, i915);
atomic_inc(&to_gt(i915)->wakeref.count); /* disable; no hw support */
- to_gt(i915)->awake = -ENODEV;
+ to_gt(i915)->awake = INTEL_WAKEREF_MOCK_GT;
mock_gt_probe(i915);
ret = intel_region_ttm_device_init(i915);
diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c
index 805c4bfb85fe..7e59591bbed6 100644
--- a/drivers/gpu/drm/i915/selftests/scatterlist.c
+++ b/drivers/gpu/drm/i915/selftests/scatterlist.c
@@ -22,7 +22,7 @@
*/
#include <linux/prime_numbers.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include "i915_selftest.h"
#include "i915_utils.h"
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index 4aba47bccc63..9e310f4099f4 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -714,7 +714,7 @@ void intel_dram_detect(struct drm_i915_private *i915)
* Assume level 0 watermark latency adjustment is needed until proven
* otherwise, this w/a is not needed by bxt/glk.
*/
- dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915);
+ dram_info->wm_lv_0_adjust_needed = !IS_BROXTON(i915) && !IS_GEMINILAKE(i915);
if (DISPLAY_VER(i915) >= 14)
ret = xelpdp_get_dram_info(i915);
@@ -722,7 +722,7 @@ void intel_dram_detect(struct drm_i915_private *i915)
ret = gen12_get_dram_info(i915);
else if (GRAPHICS_VER(i915) >= 11)
ret = gen11_get_dram_info(i915);
- else if (IS_GEN9_LP(i915))
+ else if (IS_BROXTON(i915) || IS_GEMINILAKE(i915))
ret = bxt_get_dram_info(i915);
else
ret = skl_get_dram_info(i915);
diff --git a/drivers/gpu/drm/i915/soc/intel_pch.c b/drivers/gpu/drm/i915/soc/intel_pch.c
index 542eea50093c..842db43e46c0 100644
--- a/drivers/gpu/drm/i915/soc/intel_pch.c
+++ b/drivers/gpu/drm/i915/soc/intel_pch.c
@@ -124,7 +124,10 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv) &&
!IS_ROCKETLAKE(dev_priv) &&
- !IS_GEN9_BC(dev_priv));
+ !IS_SKYLAKE(dev_priv) &&
+ !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv) &&
+ !IS_COMETLAKE(dev_priv));
return PCH_TGP;
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
diff --git a/drivers/gpu/drm/i915/soc/intel_rom.c b/drivers/gpu/drm/i915/soc/intel_rom.c
new file mode 100644
index 000000000000..243d98cab8c3
--- /dev/null
+++ b/drivers/gpu/drm/i915/soc/intel_rom.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+
+#include "intel_rom.h"
+#include "intel_uncore.h"
+
+struct intel_rom {
+ /* for PCI ROM */
+ struct pci_dev *pdev;
+ void __iomem *oprom;
+
+ /* for SPI */
+ struct intel_uncore *uncore;
+ loff_t offset;
+
+ size_t size;
+
+ u32 (*read32)(struct intel_rom *rom, loff_t offset);
+ u16 (*read16)(struct intel_rom *rom, loff_t offset);
+ void (*read_block)(struct intel_rom *rom, void *data, loff_t offset, size_t size);
+ void (*free)(struct intel_rom *rom);
+};
+
+static u32 spi_read32(struct intel_rom *rom, loff_t offset)
+{
+ intel_uncore_write(rom->uncore, PRIMARY_SPI_ADDRESS,
+ rom->offset + offset);
+
+ return intel_uncore_read(rom->uncore, PRIMARY_SPI_TRIGGER);
+}
+
+static u16 spi_read16(struct intel_rom *rom, loff_t offset)
+{
+ return spi_read32(rom, offset) & 0xffff;
+}
+
+struct intel_rom *intel_rom_spi(struct drm_i915_private *i915)
+{
+ struct intel_rom *rom;
+ u32 static_region;
+
+ rom = kzalloc(sizeof(*rom), GFP_KERNEL);
+ if (!rom)
+ return NULL;
+
+ rom->uncore = &i915->uncore;
+
+ static_region = intel_uncore_read(rom->uncore, SPI_STATIC_REGIONS);
+ static_region &= OPTIONROM_SPI_REGIONID_MASK;
+ intel_uncore_write(rom->uncore, PRIMARY_SPI_REGIONID, static_region);
+
+ rom->offset = intel_uncore_read(rom->uncore, OROM_OFFSET) & OROM_OFFSET_MASK;
+
+ rom->size = 0x200000;
+
+ rom->read32 = spi_read32;
+ rom->read16 = spi_read16;
+
+ return rom;
+}
+
+static u32 pci_read32(struct intel_rom *rom, loff_t offset)
+{
+ return ioread32(rom->oprom + offset);
+}
+
+static u16 pci_read16(struct intel_rom *rom, loff_t offset)
+{
+ return ioread16(rom->oprom + offset);
+}
+
+static void pci_read_block(struct intel_rom *rom, void *data,
+ loff_t offset, size_t size)
+{
+ memcpy_fromio(data, rom->oprom + offset, size);
+}
+
+static void pci_free(struct intel_rom *rom)
+{
+ pci_unmap_rom(rom->pdev, rom->oprom);
+}
+
+struct intel_rom *intel_rom_pci(struct drm_i915_private *i915)
+{
+ struct intel_rom *rom;
+
+ rom = kzalloc(sizeof(*rom), GFP_KERNEL);
+ if (!rom)
+ return NULL;
+
+ rom->pdev = to_pci_dev(i915->drm.dev);
+
+ rom->oprom = pci_map_rom(rom->pdev, &rom->size);
+ if (!rom->oprom) {
+ kfree(rom);
+ return NULL;
+ }
+
+ rom->read32 = pci_read32;
+ rom->read16 = pci_read16;
+ rom->read_block = pci_read_block;
+ rom->free = pci_free;
+
+ return rom;
+}
+
+u32 intel_rom_read32(struct intel_rom *rom, loff_t offset)
+{
+ return rom->read32(rom, offset);
+}
+
+u16 intel_rom_read16(struct intel_rom *rom, loff_t offset)
+{
+ return rom->read16(rom, offset);
+}
+
+void intel_rom_read_block(struct intel_rom *rom, void *data,
+ loff_t offset, size_t size)
+{
+ u32 *ptr = data;
+ loff_t index;
+
+ if (rom->read_block) {
+ rom->read_block(rom, data, offset, size);
+ return;
+ }
+
+ for (index = 0; index < size; index += 4)
+ *ptr++ = rom->read32(rom, offset + index);
+}
+
+loff_t intel_rom_find(struct intel_rom *rom, u32 needle)
+{
+ loff_t offset;
+
+ for (offset = 0; offset < rom->size; offset += 4) {
+ if (rom->read32(rom, offset) == needle)
+ return offset;
+ }
+
+ return -ENOENT;
+}
+
+size_t intel_rom_size(struct intel_rom *rom)
+{
+ return rom->size;
+}
+
+void intel_rom_free(struct intel_rom *rom)
+{
+ if (rom && rom->free)
+ rom->free(rom);
+
+ kfree(rom);
+}
diff --git a/drivers/gpu/drm/i915/soc/intel_rom.h b/drivers/gpu/drm/i915/soc/intel_rom.h
new file mode 100644
index 000000000000..fb2979c8ef7f
--- /dev/null
+++ b/drivers/gpu/drm/i915/soc/intel_rom.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __INTEL_ROM_H__
+#define __INTEL_ROM_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_rom;
+
+struct intel_rom *intel_rom_spi(struct drm_i915_private *i915);
+struct intel_rom *intel_rom_pci(struct drm_i915_private *i915);
+
+u32 intel_rom_read32(struct intel_rom *rom, loff_t offset);
+u16 intel_rom_read16(struct intel_rom *rom, loff_t offset);
+void intel_rom_read_block(struct intel_rom *rom, void *data,
+ loff_t offset, size_t size);
+loff_t intel_rom_find(struct intel_rom *rom, u32 needle);
+size_t intel_rom_size(struct intel_rom *rom);
+void intel_rom_free(struct intel_rom *rom);
+
+#endif /* __INTEL_ROM_H__ */
diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c
index 68291412f4cb..114ae8eb9cd5 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.c
+++ b/drivers/gpu/drm/i915/vlv_sideband.c
@@ -43,7 +43,7 @@ static void __vlv_punit_get(struct drm_i915_private *i915)
* to the Valleyview P-unit and not all sideband communications.
*/
if (IS_VALLEYVIEW(i915)) {
- cpu_latency_qos_update_request(&i915->sb_qos, 0);
+ cpu_latency_qos_update_request(&i915->vlv_iosf_sb.qos, 0);
on_each_cpu(ping, NULL, 1);
}
}
@@ -51,7 +51,7 @@ static void __vlv_punit_get(struct drm_i915_private *i915)
static void __vlv_punit_put(struct drm_i915_private *i915)
{
if (IS_VALLEYVIEW(i915))
- cpu_latency_qos_update_request(&i915->sb_qos,
+ cpu_latency_qos_update_request(&i915->vlv_iosf_sb.qos,
PM_QOS_DEFAULT_VALUE);
iosf_mbi_punit_release();
@@ -62,12 +62,12 @@ void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
if (ports & BIT(VLV_IOSF_SB_PUNIT))
__vlv_punit_get(i915);
- mutex_lock(&i915->sb_lock);
+ mutex_lock(&i915->vlv_iosf_sb.lock);
}
void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
{
- mutex_unlock(&i915->sb_lock);
+ mutex_unlock(&i915->vlv_iosf_sb.lock);
if (ports & BIT(VLV_IOSF_SB_PUNIT))
__vlv_punit_put(i915);
@@ -81,7 +81,7 @@ static int vlv_sideband_rw(struct drm_i915_private *i915,
const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
int err;
- lockdep_assert_held(&i915->sb_lock);
+ lockdep_assert_held(&i915->vlv_iosf_sb.lock);
if (port == IOSF_PORT_PUNIT)
iosf_mbi_assert_punit_acquired();
@@ -249,3 +249,21 @@ void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
reg, &val);
}
+
+void vlv_iosf_sb_init(struct drm_i915_private *i915)
+{
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ mutex_init(&i915->vlv_iosf_sb.lock);
+
+ if (IS_VALLEYVIEW(i915))
+ cpu_latency_qos_add_request(&i915->vlv_iosf_sb.qos, PM_QOS_DEFAULT_VALUE);
+}
+
+void vlv_iosf_sb_fini(struct drm_i915_private *i915)
+{
+ if (IS_VALLEYVIEW(i915))
+ cpu_latency_qos_remove_request(&i915->vlv_iosf_sb.qos);
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ mutex_destroy(&i915->vlv_iosf_sb.lock);
+}
diff --git a/drivers/gpu/drm/i915/vlv_sideband.h b/drivers/gpu/drm/i915/vlv_sideband.h
index c20cf41b2d39..31813e07c56f 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.h
+++ b/drivers/gpu/drm/i915/vlv_sideband.h
@@ -25,6 +25,9 @@ enum {
VLV_IOSF_SB_PUNIT,
};
+void vlv_iosf_sb_init(struct drm_i915_private *i915);
+void vlv_iosf_sb_fini(struct drm_i915_private *i915);
+
void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports);
void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports);
diff --git a/drivers/gpu/drm/i915/vlv_suspend.c b/drivers/gpu/drm/i915/vlv_suspend.c
index 94595dde2b96..fc9f311ea1db 100644
--- a/drivers/gpu/drm/i915/vlv_suspend.c
+++ b/drivers/gpu/drm/i915/vlv_suspend.c
@@ -13,6 +13,7 @@
#include "i915_trace.h"
#include "i915_utils.h"
#include "intel_clock_gating.h"
+#include "intel_uncore_trace.h"
#include "vlv_suspend.h"
#include "gt/intel_gt_regs.h"
diff --git a/drivers/gpu/drm/imagination/pvr_ccb.c b/drivers/gpu/drm/imagination/pvr_ccb.c
index 4deeac7ed40a..2bbdc05a3b97 100644
--- a/drivers/gpu/drm/imagination/pvr_ccb.c
+++ b/drivers/gpu/drm/imagination/pvr_ccb.c
@@ -321,7 +321,7 @@ static int pvr_kccb_reserve_slot_sync(struct pvr_device *pvr_dev)
bool reserved = false;
u32 retries = 0;
- while ((jiffies - start_timestamp) < (u32)RESERVE_SLOT_TIMEOUT ||
+ while (time_before(jiffies, start_timestamp + RESERVE_SLOT_TIMEOUT) ||
retries < RESERVE_SLOT_MIN_RETRIES) {
reserved = pvr_kccb_try_reserve_slot(pvr_dev);
if (reserved)
diff --git a/drivers/gpu/drm/imagination/pvr_context.c b/drivers/gpu/drm/imagination/pvr_context.c
index eded5e955cc0..5edc3c01af72 100644
--- a/drivers/gpu/drm/imagination/pvr_context.c
+++ b/drivers/gpu/drm/imagination/pvr_context.c
@@ -17,10 +17,14 @@
#include <drm/drm_auth.h>
#include <drm/drm_managed.h>
+
+#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/xarray.h>
@@ -69,26 +73,14 @@ process_static_context_state(struct pvr_device *pvr_dev, const struct pvr_stream
void *stream;
int err;
- stream = kzalloc(stream_size, GFP_KERNEL);
- if (!stream)
- return -ENOMEM;
-
- if (copy_from_user(stream, u64_to_user_ptr(stream_user_ptr), stream_size)) {
- err = -EFAULT;
- goto err_free;
- }
+ stream = memdup_user(u64_to_user_ptr(stream_user_ptr), stream_size);
+ if (IS_ERR(stream))
+ return PTR_ERR(stream);
err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, dest);
- if (err)
- goto err_free;
kfree(stream);
- return 0;
-
-err_free:
- kfree(stream);
-
return err;
}
@@ -354,6 +346,10 @@ int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_co
return err;
}
+ spin_lock(&pvr_dev->ctx_list_lock);
+ list_add_tail(&ctx->file_link, &pvr_file->contexts);
+ spin_unlock(&pvr_dev->ctx_list_lock);
+
return 0;
err_destroy_fw_obj:
@@ -380,6 +376,11 @@ pvr_context_release(struct kref *ref_count)
container_of(ref_count, struct pvr_context, ref_count);
struct pvr_device *pvr_dev = ctx->pvr_dev;
+ WARN_ON(in_interrupt());
+ spin_lock(&pvr_dev->ctx_list_lock);
+ list_del(&ctx->file_link);
+ spin_unlock(&pvr_dev->ctx_list_lock);
+
xa_erase(&pvr_dev->ctx_ids, ctx->ctx_id);
pvr_context_destroy_queues(ctx);
pvr_fw_object_destroy(ctx->fw_obj);
@@ -437,11 +438,30 @@ pvr_context_destroy(struct pvr_file *pvr_file, u32 handle)
*/
void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file)
{
+ struct pvr_device *pvr_dev = pvr_file->pvr_dev;
struct pvr_context *ctx;
unsigned long handle;
xa_for_each(&pvr_file->ctx_handles, handle, ctx)
pvr_context_destroy(pvr_file, handle);
+
+ spin_lock(&pvr_dev->ctx_list_lock);
+ ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link);
+
+ while (!list_entry_is_head(ctx, &pvr_file->contexts, file_link)) {
+ list_del_init(&ctx->file_link);
+
+ if (pvr_context_get_if_referenced(ctx)) {
+ spin_unlock(&pvr_dev->ctx_list_lock);
+
+ pvr_vm_unmap_all(ctx->vm_ctx);
+
+ pvr_context_put(ctx);
+ spin_lock(&pvr_dev->ctx_list_lock);
+ }
+ ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link);
+ }
+ spin_unlock(&pvr_dev->ctx_list_lock);
}
/**
@@ -451,6 +471,7 @@ void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file)
void pvr_context_device_init(struct pvr_device *pvr_dev)
{
xa_init_flags(&pvr_dev->ctx_ids, XA_FLAGS_ALLOC1);
+ spin_lock_init(&pvr_dev->ctx_list_lock);
}
/**
diff --git a/drivers/gpu/drm/imagination/pvr_context.h b/drivers/gpu/drm/imagination/pvr_context.h
index 0c7b97dfa6ba..07afa179cdf4 100644
--- a/drivers/gpu/drm/imagination/pvr_context.h
+++ b/drivers/gpu/drm/imagination/pvr_context.h
@@ -85,6 +85,9 @@ struct pvr_context {
/** @compute: Transfer queue. */
struct pvr_queue *transfer;
} queues;
+
+ /** @file_link: pvr_file PVR context list link. */
+ struct list_head file_link;
};
static __always_inline struct pvr_queue *
@@ -124,6 +127,24 @@ pvr_context_get(struct pvr_context *ctx)
}
/**
+ * pvr_context_get_if_referenced() - Take an additional reference on a still
+ * referenced context.
+ * @ctx: Context pointer.
+ *
+ * Call pvr_context_put() to release.
+ *
+ * Returns:
+ * * True on success, or
+ * * false if no context pointer passed, or the context wasn't still
+ * * referenced.
+ */
+static __always_inline bool
+pvr_context_get_if_referenced(struct pvr_context *ctx)
+{
+ return ctx != NULL && kref_get_unless_zero(&ctx->ref_count) != 0;
+}
+
+/**
* pvr_context_lookup() - Lookup context pointer from handle and file.
* @pvr_file: Pointer to pvr_file structure.
* @handle: Context handle.
diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h
index b574e23d484b..6d0dfacb677b 100644
--- a/drivers/gpu/drm/imagination/pvr_device.h
+++ b/drivers/gpu/drm/imagination/pvr_device.h
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/math.h>
#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/wait.h>
@@ -293,6 +294,12 @@ struct pvr_device {
/** @sched_wq: Workqueue for schedulers. */
struct workqueue_struct *sched_wq;
+
+ /**
+ * @ctx_list_lock: Lock to be held when accessing the context list in
+ * struct pvr_file.
+ */
+ spinlock_t ctx_list_lock;
};
/**
@@ -344,6 +351,9 @@ struct pvr_file {
* This array is used to allocate handles returned to userspace.
*/
struct xarray vm_ctx_handles;
+
+ /** @contexts: PVR context list. */
+ struct list_head contexts;
};
/**
diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c
index 1a0cb7aa9cea..0639502137b4 100644
--- a/drivers/gpu/drm/imagination/pvr_drv.c
+++ b/drivers/gpu/drm/imagination/pvr_drv.c
@@ -28,6 +28,7 @@
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -220,7 +221,7 @@ err_drm_dev_exit:
return ret;
}
-static __always_inline u64
+static __always_inline __maybe_unused u64
pvr_fw_version_packed(u32 major, u32 minor)
{
return ((u64)major << 32) | minor;
@@ -1326,6 +1327,8 @@ pvr_drm_driver_open(struct drm_device *drm_dev, struct drm_file *file)
*/
pvr_file->pvr_dev = pvr_dev;
+ INIT_LIST_HEAD(&pvr_file->contexts);
+
xa_init_flags(&pvr_file->ctx_handles, XA_FLAGS_ALLOC1);
xa_init_flags(&pvr_file->free_list_handles, XA_FLAGS_ALLOC1);
xa_init_flags(&pvr_file->hwrt_handles, XA_FLAGS_ALLOC1);
@@ -1384,7 +1387,6 @@ static struct drm_driver pvr_drm_driver = {
.name = PVR_DRIVER_NAME,
.desc = PVR_DRIVER_DESC,
- .date = PVR_DRIVER_DATE,
.major = PVR_DRIVER_MAJOR,
.minor = PVR_DRIVER_MINOR,
.patchlevel = PVR_DRIVER_PATCHLEVEL,
@@ -1482,7 +1484,7 @@ static const struct dev_pm_ops pvr_pm_ops = {
static struct platform_driver pvr_driver = {
.probe = pvr_probe,
- .remove_new = pvr_remove,
+ .remove = pvr_remove,
.driver = {
.name = PVR_DRIVER_NAME,
.pm = &pvr_pm_ops,
@@ -1494,5 +1496,5 @@ module_platform_driver(pvr_driver);
MODULE_AUTHOR("Imagination Technologies Ltd.");
MODULE_DESCRIPTION(PVR_DRIVER_DESC);
MODULE_LICENSE("Dual MIT/GPL");
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw");
diff --git a/drivers/gpu/drm/imagination/pvr_drv.h b/drivers/gpu/drm/imagination/pvr_drv.h
index 378fe477b759..7fa147312dd1 100644
--- a/drivers/gpu/drm/imagination/pvr_drv.h
+++ b/drivers/gpu/drm/imagination/pvr_drv.h
@@ -9,7 +9,6 @@
#define PVR_DRIVER_NAME "powervr"
#define PVR_DRIVER_DESC "Imagination PowerVR (Series 6 and later) & IMG Graphics"
-#define PVR_DRIVER_DATE "20230904"
/*
* Driver interface version:
diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c
index 78c2f3c6dce0..618503a212a7 100644
--- a/drivers/gpu/drm/imagination/pvr_job.c
+++ b/drivers/gpu/drm/imagination/pvr_job.c
@@ -90,20 +90,13 @@ static int pvr_fw_cmd_init(struct pvr_device *pvr_dev, struct pvr_job *job,
void *stream;
int err;
- stream = kzalloc(stream_len, GFP_KERNEL);
- if (!stream)
- return -ENOMEM;
-
- if (copy_from_user(stream, u64_to_user_ptr(stream_userptr), stream_len)) {
- err = -EFAULT;
- goto err_free_stream;
- }
+ stream = memdup_user(u64_to_user_ptr(stream_userptr), stream_len);
+ if (IS_ERR(stream))
+ return PTR_ERR(stream);
err = pvr_job_process_stream(pvr_dev, stream_def, stream, stream_len, job);
-err_free_stream:
kfree(stream);
-
return err;
}
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index 20cb46012082..c4f08432882b 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -782,7 +782,7 @@ static void pvr_queue_start(struct pvr_queue *queue)
}
}
- drm_sched_start(&queue->scheduler);
+ drm_sched_start(&queue->scheduler, 0);
}
/**
@@ -842,7 +842,7 @@ pvr_queue_timedout_job(struct drm_sched_job *s_job)
}
mutex_unlock(&pvr_dev->queues.lock);
- drm_sched_start(sched);
+ drm_sched_start(sched, 0);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index 97c0f772ed65..363f885a7098 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -14,6 +14,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_gpuvm.h>
+#include <linux/bug.h>
#include <linux/container_of.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -597,12 +598,26 @@ err_free:
}
/**
- * pvr_vm_context_release() - Teardown a VM context.
- * @ref_count: Pointer to reference counter of the VM context.
+ * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
+ * @vm_ctx: Target VM context.
*
* This function ensures that no mappings are left dangling by unmapping them
* all in order of ascending device-virtual address.
*/
+void
+pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
+{
+ WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
+ vm_ctx->gpuvm_mgr.mm_range));
+}
+
+/**
+ * pvr_vm_context_release() - Teardown a VM context.
+ * @ref_count: Pointer to reference counter of the VM context.
+ *
+ * This function also ensures that no mappings are left dangling by calling
+ * pvr_vm_unmap_all.
+ */
static void
pvr_vm_context_release(struct kref *ref_count)
{
@@ -612,8 +627,7 @@ pvr_vm_context_release(struct kref *ref_count)
if (vm_ctx->fw_mem_ctx_obj)
pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj);
- WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
- vm_ctx->gpuvm_mgr.mm_range));
+ pvr_vm_unmap_all(vm_ctx);
pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
drm_gem_private_object_fini(&vm_ctx->dummy_gem);
@@ -640,9 +654,7 @@ pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle)
xa_lock(&pvr_file->vm_ctx_handles);
vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle);
- if (vm_ctx)
- kref_get(&vm_ctx->ref_count);
-
+ pvr_vm_context_get(vm_ctx);
xa_unlock(&pvr_file->vm_ctx_handles);
return vm_ctx;
diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h
index f2a6463f2b05..79406243617c 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.h
+++ b/drivers/gpu/drm/imagination/pvr_vm.h
@@ -39,6 +39,7 @@ int pvr_vm_map(struct pvr_vm_context *vm_ctx,
struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset,
u64 device_addr, u64 size);
int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size);
+void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx);
dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx);
struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx);
diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig
index 59e3b6a1dff0..e014ed3ae66c 100644
--- a/drivers/gpu/drm/imx/dcss/Kconfig
+++ b/drivers/gpu/drm/imx/dcss/Kconfig
@@ -1,12 +1,13 @@
config DRM_IMX_DCSS
tristate "i.MX8MQ DCSS"
select IMX_IRQSTEER
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
- depends on DRM && ARCH_MXC && ARM64
+ depends on DRM && ((ARCH_MXC && ARM64) || COMPILE_TEST)
help
Choose this if you have a NXP i.MX8MQ based system and want to use the
Display Controller Subsystem. This option enables DCSS support.
diff --git a/drivers/gpu/drm/imx/dcss/dcss-crtc.c b/drivers/gpu/drm/imx/dcss/dcss-crtc.c
index 31267c00782f..af91e45b5d13 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-crtc.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-crtc.c
@@ -206,15 +206,13 @@ int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm)
if (crtc->irq < 0)
return crtc->irq;
- ret = request_irq(crtc->irq, dcss_crtc_irq_handler,
- 0, "dcss_drm", crtc);
+ ret = request_irq(crtc->irq, dcss_crtc_irq_handler, IRQF_NO_AUTOEN,
+ "dcss_drm", crtc);
if (ret) {
dev_err(dcss->dev, "irq request failed with %d.\n", ret);
return ret;
}
- disable_irq(crtc->irq);
-
return 0;
}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-drv.c b/drivers/gpu/drm/imx/dcss/dcss-drv.c
index d881f5a34760..19b027cc1dc4 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-drv.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-drv.c
@@ -112,7 +112,7 @@ MODULE_DEVICE_TABLE(of, dcss_of_match);
static struct platform_driver dcss_platform_driver = {
.probe = dcss_drv_platform_probe,
- .remove_new = dcss_drv_platform_remove,
+ .remove = dcss_drv_platform_remove,
.shutdown = dcss_drv_platform_shutdown,
.driver = {
.name = "imx-dcss",
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dtg.c b/drivers/gpu/drm/imx/dcss/dcss-dtg.c
index 2968f5d5bd41..6bbfd9aa27ac 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dtg.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-dtg.c
@@ -134,14 +134,12 @@ static int dcss_dtg_irq_config(struct dcss_dtg *dtg,
dtg->base_reg + DCSS_DTG_INT_MASK);
ret = request_irq(dtg->ctxld_kick_irq, dcss_dtg_irq_handler,
- 0, "dcss_ctxld_kick", dtg);
+ IRQF_NO_AUTOEN, "dcss_ctxld_kick", dtg);
if (ret) {
dev_err(dtg->dev, "dtg: irq request failed.\n");
return ret;
}
- disable_irq(dtg->ctxld_kick_irq);
-
dtg->ctxld_kick_irq_en = false;
return 0;
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
index d0ea4e97cded..3633e8f3aff6 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-kms.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -3,8 +3,10 @@
* Copyright 2019 NXP.
*/
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -28,10 +30,10 @@ static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = {
static const struct drm_driver dcss_kms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &dcss_cma_fops,
.name = "imx-dcss",
.desc = "i.MX8MQ Display Subsystem",
- .date = "20190917",
.major = 1,
.minor = 0,
.patchlevel = 0,
@@ -145,7 +147,7 @@ struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss)
if (ret)
goto cleanup_crtc;
- drm_fbdev_dma_setup(drm, 32);
+ drm_client_setup(drm, NULL);
return kms;
diff --git a/drivers/gpu/drm/imx/dcss/dcss-scaler.c b/drivers/gpu/drm/imx/dcss/dcss-scaler.c
index 825728c356ff..32c3f46b21da 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-scaler.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-scaler.c
@@ -136,7 +136,7 @@ static int div_q(int A, int B)
else
temp -= B / 2;
- result = (int)(temp / B);
+ result = div_s64(temp, B);
return result;
}
@@ -239,7 +239,7 @@ static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps,
ll_temp = coef[phase][i];
ll_temp <<= PSC_COEFF_PRECISION;
ll_temp += sum >> 1;
- ll_temp /= sum;
+ ll_temp = div_s64(ll_temp, sum);
coef[phase][i] = (int)ll_temp;
}
}
diff --git a/drivers/gpu/drm/imx/ipuv3/Kconfig b/drivers/gpu/drm/imx/ipuv3/Kconfig
index bacf0655ebaf..acaf25089001 100644
--- a/drivers/gpu/drm/imx/ipuv3/Kconfig
+++ b/drivers/gpu/drm/imx/ipuv3/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_IMX
tristate "DRM Support for Freescale i.MX"
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select DRM_GEM_DMA_HELPER
@@ -11,8 +12,11 @@ config DRM_IMX
config DRM_IMX_PARALLEL_DISPLAY
tristate "Support for parallel displays"
- select DRM_PANEL
depends on DRM_IMX
+ select DRM_BRIDGE
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_IMX_LEGACY_BRIDGE
+ select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS
config DRM_IMX_TVE
@@ -26,9 +30,13 @@ config DRM_IMX_TVE
config DRM_IMX_LDB
tristate "Support for LVDS displays"
- depends on DRM_IMX && MFD_SYSCON
+ depends on DRM_IMX
depends on COMMON_CLK
- select DRM_PANEL
+ select MFD_SYSCON
+ select DRM_BRIDGE
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_PANEL_BRIDGE
+ select DRM_IMX_LEGACY_BRIDGE
help
Choose this to enable the internal LVDS Display Bridge (LDB)
found on i.MX53 and i.MX6 processors.
diff --git a/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c b/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c
index 0006ea52b83c..8333c4bf7369 100644
--- a/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c
@@ -265,7 +265,7 @@ static void dw_hdmi_imx_remove(struct platform_device *pdev)
static struct platform_driver dw_hdmi_imx_platform_driver = {
.probe = dw_hdmi_imx_probe,
- .remove_new = dw_hdmi_imx_remove,
+ .remove = dw_hdmi_imx_remove,
.driver = {
.name = "dwhdmi-imx",
.of_match_table = dw_hdmi_imx_dt_ids,
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
index 4cfabcf7375a..ec5fd9a01f1e 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
@@ -13,6 +13,7 @@
#include <video/imx-ipu-v3.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
@@ -34,13 +35,6 @@ module_param(legacyfb_depth, int, 0444);
DEFINE_DRM_GEM_DMA_FOPS(imx_drm_driver_fops);
-void imx_drm_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-EXPORT_SYMBOL_GPL(imx_drm_connector_destroy);
-
static int imx_drm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -163,12 +157,12 @@ static int imx_drm_dumb_create(struct drm_file *file_priv,
static const struct drm_driver imx_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(imx_drm_dumb_create),
+ DRM_FBDEV_DMA_DRIVER_OPS,
.ioctls = imx_drm_ioctls,
.num_ioctls = ARRAY_SIZE(imx_drm_ioctls),
.fops = &imx_drm_driver_fops,
.name = "imx-drm",
.desc = "i.MX DRM graphics",
- .date = "20120507",
.major = 1,
.minor = 0,
.patchlevel = 0,
@@ -249,7 +243,7 @@ static int imx_drm_bind(struct device *dev)
if (ret)
goto err_poll_fini;
- drm_fbdev_dma_setup(drm, legacyfb_depth);
+ drm_client_setup_with_color_mode(drm, legacyfb_depth);
return 0;
@@ -330,7 +324,7 @@ MODULE_DEVICE_TABLE(of, imx_drm_dt_ids);
static struct platform_driver imx_drm_pdrv = {
.probe = imx_drm_platform_probe,
- .remove_new = imx_drm_platform_remove,
+ .remove = imx_drm_platform_remove,
.shutdown = imx_drm_platform_shutdown,
.driver = {
.name = "imx-drm",
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-drm.h b/drivers/gpu/drm/imx/ipuv3/imx-drm.h
index e721bebda2bd..0c85bf83ffbf 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-drm.h
+++ b/drivers/gpu/drm/imx/ipuv3/imx-drm.h
@@ -3,14 +3,9 @@
#define _IMX_DRM_H_
struct device_node;
-struct drm_crtc;
struct drm_connector;
struct drm_device;
-struct drm_display_mode;
struct drm_encoder;
-struct drm_framebuffer;
-struct drm_plane;
-struct platform_device;
struct imx_crtc_state {
struct drm_crtc_state base;
@@ -24,21 +19,12 @@ static inline struct imx_crtc_state *to_imx_crtc_state(struct drm_crtc_state *s)
{
return container_of(s, struct imx_crtc_state, base);
}
-int imx_drm_init_drm(struct platform_device *pdev,
- int preferred_bpp);
-int imx_drm_exit_drm(void);
extern struct platform_driver ipu_drm_driver;
-void imx_drm_mode_config_init(struct drm_device *drm);
-
-struct drm_gem_dma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
-
int imx_drm_encoder_parse_of(struct drm_device *drm,
struct drm_encoder *encoder, struct device_node *np);
-void imx_drm_connector_destroy(struct drm_connector *connector);
-
int ipu_planes_assign_pre(struct drm_device *dev,
struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
index 793dfb1a3ed0..6be7a57ad03d 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
@@ -19,19 +19,16 @@
#include <linux/regmap.h>
#include <linux/videodev2.h>
-#include <video/of_display_timing.h>
-#include <video/of_videomode.h>
-
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_edid.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_managed.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
+#include <drm/bridge/imx.h>
#include "imx-drm.h"
@@ -55,7 +52,6 @@
struct imx_ldb_channel;
struct imx_ldb_encoder {
- struct drm_connector connector;
struct drm_encoder encoder;
struct imx_ldb_channel *channel;
};
@@ -65,25 +61,13 @@ struct imx_ldb;
struct imx_ldb_channel {
struct imx_ldb *ldb;
- /* Defines what is connected to the ldb, only one at a time */
- struct drm_panel *panel;
struct drm_bridge *bridge;
struct device_node *child;
- struct i2c_adapter *ddc;
int chno;
- const struct drm_edid *drm_edid;
- struct drm_display_mode mode;
- int mode_valid;
u32 bus_format;
- u32 bus_flags;
};
-static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c)
-{
- return container_of(c, struct imx_ldb_encoder, connector)->channel;
-}
-
static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e)
{
return container_of(e, struct imx_ldb_encoder, encoder)->channel;
@@ -133,38 +117,6 @@ static void imx_ldb_ch_set_bus_format(struct imx_ldb_channel *imx_ldb_ch,
}
}
-static int imx_ldb_connector_get_modes(struct drm_connector *connector)
-{
- struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
- int num_modes;
-
- num_modes = drm_panel_get_modes(imx_ldb_ch->panel, connector);
- if (num_modes > 0)
- return num_modes;
-
- if (!imx_ldb_ch->drm_edid && imx_ldb_ch->ddc) {
- imx_ldb_ch->drm_edid = drm_edid_read_ddc(connector,
- imx_ldb_ch->ddc);
- drm_edid_connector_update(connector, imx_ldb_ch->drm_edid);
- }
-
- if (imx_ldb_ch->drm_edid)
- num_modes = drm_edid_connector_add_modes(connector);
-
- if (imx_ldb_ch->mode_valid) {
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, &imx_ldb_ch->mode);
- if (!mode)
- return -EINVAL;
- mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, mode);
- num_modes++;
- }
-
- return num_modes;
-}
-
static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
unsigned long serial_clk, unsigned long di_clk)
{
@@ -205,8 +157,6 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
return;
}
- drm_panel_prepare(imx_ldb_ch->panel);
-
if (dual) {
clk_set_parent(ldb->clk_sel[mux], ldb->clk[0]);
clk_set_parent(ldb->clk_sel[mux], ldb->clk[1]);
@@ -245,8 +195,6 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
}
regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl);
-
- drm_panel_enable(imx_ldb_ch->panel);
}
static void
@@ -323,8 +271,6 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
int mux, ret;
- drm_panel_disable(imx_ldb_ch->panel);
-
if (imx_ldb_ch == &ldb->channel[0] || dual)
ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
if (imx_ldb_ch == &ldb->channel[1] || dual)
@@ -358,8 +304,6 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
dev_err(ldb->dev,
"unable to set di%d parent clock to original parent\n",
mux);
-
- drm_panel_unprepare(imx_ldb_ch->panel);
}
static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder,
@@ -374,11 +318,12 @@ static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder,
/* Bus format description in DT overrides connector display info. */
if (!bus_format && di->num_bus_formats) {
bus_format = di->bus_formats[0];
- imx_crtc_state->bus_flags = di->bus_flags;
} else {
bus_format = imx_ldb_ch->bus_format;
- imx_crtc_state->bus_flags = imx_ldb_ch->bus_flags;
}
+
+ imx_crtc_state->bus_flags = di->bus_flags;
+
switch (bus_format) {
case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
@@ -398,18 +343,6 @@ static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder,
}
-static const struct drm_connector_funcs imx_ldb_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = imx_drm_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = {
- .get_modes = imx_ldb_connector_get_modes,
-};
-
static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
.atomic_mode_set = imx_ldb_encoder_atomic_mode_set,
.enable = imx_ldb_encoder_enable,
@@ -447,7 +380,6 @@ static int imx_ldb_register(struct drm_device *drm,
return PTR_ERR(ldb_encoder);
ldb_encoder->channel = imx_ldb_ch;
- connector = &ldb_encoder->connector;
encoder = &ldb_encoder->encoder;
ret = imx_drm_encoder_parse_of(drm, encoder, imx_ldb_ch->child);
@@ -466,25 +398,16 @@ static int imx_ldb_register(struct drm_device *drm,
drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs);
- if (imx_ldb_ch->bridge) {
- ret = drm_bridge_attach(encoder, imx_ldb_ch->bridge, NULL, 0);
- if (ret)
- return ret;
- } else {
- /*
- * We want to add the connector whenever there is no bridge
- * that brings its own, not only when there is a panel. For
- * historical reasons, the ldb driver can also work without
- * a panel.
- */
- drm_connector_helper_add(connector,
- &imx_ldb_connector_helper_funcs);
- drm_connector_init_with_ddc(drm, connector,
- &imx_ldb_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS,
- imx_ldb_ch->ddc);
- drm_connector_attach_encoder(connector, encoder);
- }
+ ret = drm_bridge_attach(encoder, imx_ldb_ch->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ return ret;
+
+ connector = drm_bridge_connector_init(drm, encoder);
+ if (IS_ERR(connector))
+ return PTR_ERR(connector);
+
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
@@ -549,47 +472,6 @@ static const struct of_device_id imx_ldb_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, imx_ldb_dt_ids);
-static int imx_ldb_panel_ddc(struct device *dev,
- struct imx_ldb_channel *channel, struct device_node *child)
-{
- struct device_node *ddc_node;
- int ret;
-
- ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0);
- if (ddc_node) {
- channel->ddc = of_find_i2c_adapter_by_node(ddc_node);
- of_node_put(ddc_node);
- if (!channel->ddc) {
- dev_warn(dev, "failed to get ddc i2c adapter\n");
- return -EPROBE_DEFER;
- }
- }
-
- if (!channel->ddc) {
- const void *edidp;
- int edid_len;
-
- /* if no DDC available, fallback to hardcoded EDID */
- dev_dbg(dev, "no ddc available\n");
-
- edidp = of_get_property(child, "edid", &edid_len);
- if (edidp) {
- channel->drm_edid = drm_edid_alloc(edidp, edid_len);
- if (!channel->drm_edid)
- return -ENOMEM;
- } else if (!channel->panel) {
- /* fallback to display-timings node */
- ret = of_get_drm_display_mode(child,
- &channel->mode,
- &channel->bus_flags,
- OF_USE_NATIVE_MODE);
- if (!ret)
- channel->mode_valid = 1;
- }
- }
- return 0;
-}
-
static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = data;
@@ -694,29 +576,22 @@ static int imx_ldb_probe(struct platform_device *pdev)
* The output port is port@4 with an external 4-port mux or
* port@2 with the internal 2-port mux.
*/
- ret = drm_of_find_panel_or_bridge(child,
- imx_ldb->lvds_mux ? 4 : 2, 0,
- &channel->panel, &channel->bridge);
- if (ret && ret != -ENODEV)
- goto free_child;
-
- /* panel ddc only if there is no bridge */
- if (!channel->bridge) {
- ret = imx_ldb_panel_ddc(dev, channel, child);
- if (ret)
+ channel->bridge = devm_drm_of_get_bridge(dev, child,
+ imx_ldb->lvds_mux ? 4 : 2, 0);
+ if (IS_ERR(channel->bridge)) {
+ ret = PTR_ERR(channel->bridge);
+ if (ret != -ENODEV)
goto free_child;
+ channel->bridge = NULL;
}
bus_format = of_get_bus_format(dev, child);
- if (bus_format == -EINVAL) {
- /*
- * If no bus format was specified in the device tree,
- * we can still get it from the connected panel later.
- */
- if (channel->panel && channel->panel->funcs &&
- channel->panel->funcs->get_modes)
- bus_format = 0;
- }
+ /*
+ * If no bus format was specified in the device tree,
+ * we can still get it from the connected panel later.
+ */
+ if (bus_format == -EINVAL && channel->bridge)
+ bus_format = 0;
if (bus_format < 0) {
dev_err(dev, "could not determine data mapping: %d\n",
bus_format);
@@ -724,6 +599,20 @@ static int imx_ldb_probe(struct platform_device *pdev)
goto free_child;
}
channel->bus_format = bus_format;
+
+ /*
+ * legacy bridge doesn't handle bus_format, so create it after
+ * checking the bus_format property.
+ */
+ if (!channel->bridge) {
+ channel->bridge = devm_imx_drm_legacy_bridge(dev, child,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (IS_ERR(channel->bridge)) {
+ ret = PTR_ERR(channel->bridge);
+ goto free_child;
+ }
+ }
+
channel->child = child;
}
@@ -738,22 +627,12 @@ free_child:
static void imx_ldb_remove(struct platform_device *pdev)
{
- struct imx_ldb *imx_ldb = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < 2; i++) {
- struct imx_ldb_channel *channel = &imx_ldb->channel[i];
-
- drm_edid_free(channel->drm_edid);
- i2c_put_adapter(channel->ddc);
- }
-
component_del(&pdev->dev, &imx_ldb_ops);
}
static struct platform_driver imx_ldb_driver = {
.probe = imx_ldb_probe,
- .remove_new = imx_ldb_remove,
+ .remove = imx_ldb_remove,
.driver = {
.of_match_table = imx_ldb_dt_ids,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-tve.c b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
index 29f494bfff67..3a3c8a195119 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-tve.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
@@ -305,9 +305,15 @@ static int imx_tve_atomic_check(struct drm_encoder *encoder,
return 0;
}
+static void imx_tve_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
static const struct drm_connector_funcs imx_tve_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = imx_drm_connector_destroy,
+ .destroy = imx_tve_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -656,7 +662,7 @@ MODULE_DEVICE_TABLE(of, imx_tve_dt_ids);
static struct platform_driver imx_tve_driver = {
.probe = imx_tve_probe,
- .remove_new = imx_tve_remove,
+ .remove = imx_tve_remove,
.driver = {
.of_match_table = imx_tve_dt_ids,
.name = "imx-tve",
diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
index ef29c9a61a46..cf7b02b2d52c 100644
--- a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
@@ -410,14 +410,12 @@ static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
}
ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]);
- ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, 0,
- "imx_drm", ipu_crtc);
+ ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler,
+ IRQF_NO_AUTOEN, "imx_drm", ipu_crtc);
if (ret < 0) {
dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
return ret;
}
- /* Only enable IRQ when we actually need it to trigger work. */
- disable_irq(ipu_crtc->irq);
return 0;
}
@@ -451,5 +449,5 @@ struct platform_driver ipu_drm_driver = {
.name = "imx-ipuv3-crtc",
},
.probe = ipu_drm_probe,
- .remove_new = ipu_drm_remove,
+ .remove = ipu_drm_remove,
};
diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 91d7808a2d8d..9e66eb77b1eb 100644
--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
@@ -12,21 +12,18 @@
#include <linux/platform_device.h>
#include <linux/videodev2.h>
-#include <video/of_display_timing.h>
-
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_edid.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_managed.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
+#include <drm/bridge/imx.h>
#include "imx-drm.h"
struct imx_parallel_display_encoder {
- struct drm_connector connector;
struct drm_encoder encoder;
struct drm_bridge bridge;
struct imx_parallel_display *pd;
@@ -34,79 +31,15 @@ struct imx_parallel_display_encoder {
struct imx_parallel_display {
struct device *dev;
- const struct drm_edid *drm_edid;
u32 bus_format;
- u32 bus_flags;
- struct drm_display_mode mode;
- struct drm_panel *panel;
struct drm_bridge *next_bridge;
};
-static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
-{
- return container_of(c, struct imx_parallel_display_encoder, connector)->pd;
-}
-
static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
{
return container_of(b, struct imx_parallel_display_encoder, bridge)->pd;
}
-static int imx_pd_connector_get_modes(struct drm_connector *connector)
-{
- struct imx_parallel_display *imxpd = con_to_imxpd(connector);
- struct device_node *np = imxpd->dev->of_node;
- int num_modes;
-
- num_modes = drm_panel_get_modes(imxpd->panel, connector);
- if (num_modes > 0)
- return num_modes;
-
- if (imxpd->drm_edid) {
- drm_edid_connector_update(connector, imxpd->drm_edid);
- num_modes = drm_edid_connector_add_modes(connector);
- }
-
- if (np) {
- struct drm_display_mode *mode = drm_mode_create(connector->dev);
- int ret;
-
- if (!mode)
- return 0;
-
- ret = of_get_drm_display_mode(np, &imxpd->mode,
- &imxpd->bus_flags,
- OF_USE_NATIVE_MODE);
- if (ret) {
- drm_mode_destroy(connector->dev, mode);
- return 0;
- }
-
- drm_mode_copy(mode, &imxpd->mode);
- mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, mode);
- num_modes++;
- }
-
- return num_modes;
-}
-
-static void imx_pd_bridge_enable(struct drm_bridge *bridge)
-{
- struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
-
- drm_panel_prepare(imxpd->panel);
- drm_panel_enable(imxpd->panel);
-}
-
-static void imx_pd_bridge_disable(struct drm_bridge *bridge)
-{
- struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
-
- drm_panel_disable(imxpd->panel);
- drm_panel_unprepare(imxpd->panel);
-}
-
static const u32 imx_pd_bus_fmts[] = {
MEDIA_BUS_FMT_RGB888_1X24,
MEDIA_BUS_FMT_BGR888_1X24,
@@ -200,7 +133,6 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
{
struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
struct drm_display_info *di = &conn_state->connector->display_info;
- struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
struct drm_bridge_state *next_bridge_state = NULL;
struct drm_bridge *next_bridge;
u32 bus_flags, bus_fmt;
@@ -212,10 +144,8 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
if (next_bridge_state)
bus_flags = next_bridge_state->input_bus_cfg.flags;
- else if (di->num_bus_formats)
- bus_flags = di->bus_flags;
else
- bus_flags = imxpd->bus_flags;
+ bus_flags = di->bus_flags;
bus_fmt = bridge_state->input_bus_cfg.format;
if (!imx_pd_format_supported(bus_fmt))
@@ -231,21 +161,16 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
return 0;
}
-static const struct drm_connector_funcs imx_pd_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = imx_drm_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
+static int imx_pd_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
-static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
- .get_modes = imx_pd_connector_get_modes,
-};
+ return drm_bridge_attach(bridge->encoder, imxpd->next_bridge, bridge, flags);
+}
static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
- .enable = imx_pd_bridge_enable,
- .disable = imx_pd_bridge_disable,
+ .attach = imx_pd_bridge_attach,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
@@ -270,7 +195,6 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(imxpd_encoder);
imxpd_encoder->pd = imxpd;
- connector = &imxpd_encoder->connector;
encoder = &imxpd_encoder->encoder;
bridge = &imxpd_encoder->bridge;
@@ -278,28 +202,14 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- /* set the connector's dpms to OFF so that
- * drm_helper_connector_dpms() won't return
- * immediately since the current state is ON
- * at this point.
- */
- connector->dpms = DRM_MODE_DPMS_OFF;
-
bridge->funcs = &imx_pd_bridge_funcs;
- drm_bridge_attach(encoder, bridge, NULL, 0);
-
- if (imxpd->next_bridge) {
- ret = drm_bridge_attach(encoder, imxpd->next_bridge, bridge, 0);
- if (ret < 0)
- return ret;
- } else {
- drm_connector_helper_add(connector,
- &imx_pd_connector_helper_funcs);
- drm_connector_init(drm, connector, &imx_pd_connector_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
- drm_connector_attach_encoder(connector, encoder);
- }
+ drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+
+ connector = drm_bridge_connector_init(drm, encoder);
+ if (IS_ERR(connector))
+ return PTR_ERR(connector);
+
+ drm_connector_attach_encoder(connector, encoder);
return 0;
}
@@ -312,9 +222,7 @@ static int imx_pd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- const u8 *edidp;
struct imx_parallel_display *imxpd;
- int edid_len;
int ret;
u32 bus_format = 0;
const char *fmt;
@@ -324,14 +232,13 @@ static int imx_pd_probe(struct platform_device *pdev)
return -ENOMEM;
/* port@1 is the output port */
- ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel,
- &imxpd->next_bridge);
- if (ret && ret != -ENODEV)
+ imxpd->next_bridge = devm_drm_of_get_bridge(dev, np, 1, 0);
+ if (imxpd->next_bridge == ERR_PTR(-ENODEV))
+ imxpd->next_bridge = devm_imx_drm_legacy_bridge(dev, np, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(imxpd->next_bridge)) {
+ ret = PTR_ERR(imxpd->next_bridge);
return ret;
-
- edidp = of_get_property(np, "edid", &edid_len);
- if (edidp)
- imxpd->drm_edid = drm_edid_alloc(edidp, edid_len);
+ }
ret = of_property_read_string(np, "interface-pix-fmt", &fmt);
if (!ret) {
@@ -355,11 +262,7 @@ static int imx_pd_probe(struct platform_device *pdev)
static void imx_pd_remove(struct platform_device *pdev)
{
- struct imx_parallel_display *imxpd = platform_get_drvdata(pdev);
-
component_del(&pdev->dev, &imx_pd_ops);
-
- drm_edid_free(imxpd->drm_edid);
}
static const struct of_device_id imx_pd_dt_ids[] = {
@@ -370,7 +273,7 @@ MODULE_DEVICE_TABLE(of, imx_pd_dt_ids);
static struct platform_driver imx_pd_driver = {
.probe = imx_pd_probe,
- .remove_new = imx_pd_remove,
+ .remove = imx_pd_remove,
.driver = {
.of_match_table = imx_pd_dt_ids,
.name = "imx-parallel-display",
diff --git a/drivers/gpu/drm/imx/lcdc/Kconfig b/drivers/gpu/drm/imx/lcdc/Kconfig
index 9c28bb0f4662..75869489b0e6 100644
--- a/drivers/gpu/drm/imx/lcdc/Kconfig
+++ b/drivers/gpu/drm/imx/lcdc/Kconfig
@@ -1,6 +1,7 @@
config DRM_IMX_LCDC
tristate "Freescale i.MX LCDC displays"
depends on DRM && (ARCH_MXC || COMPILE_TEST)
+ select DRM_CLIENT_SELECTION
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
index 36668455aee8..8d6a0bb31c48 100644
--- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
+++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: 2020 Marian Cichy <M.Cichy@pengutronix.de>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_damage_helper.h>
@@ -348,9 +349,9 @@ static struct drm_driver imx_lcdc_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &imx_lcdc_drm_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.name = "imx-lcdc",
.desc = "i.MX LCDC driver",
- .date = "20200716",
};
static const struct of_device_id imx_lcdc_of_dev_id[] = {
@@ -501,7 +502,7 @@ static int imx_lcdc_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "Cannot register device\n");
- drm_fbdev_dma_setup(drm, 0);
+ drm_client_setup(drm, NULL);
return 0;
}
@@ -525,7 +526,7 @@ static struct platform_driver imx_lcdc_driver = {
.of_match_table = imx_lcdc_of_dev_id,
},
.probe = imx_lcdc_probe,
- .remove_new = imx_lcdc_remove,
+ .remove = imx_lcdc_remove,
.shutdown = imx_lcdc_shutdown,
};
module_platform_driver(imx_lcdc_driver);
diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig
index 8cd7b750dffe..04ecfb0c5dd6 100644
--- a/drivers/gpu/drm/ingenic/Kconfig
+++ b/drivers/gpu/drm/ingenic/Kconfig
@@ -6,6 +6,7 @@ config DRM_INGENIC
depends on OF
depends on COMMON_CLK
select DRM_BRIDGE
+ select DRM_CLIENT_SELECTION
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 39fa291f43dd..c23ee2d214de 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -20,6 +20,7 @@
#include <linux/pm.h>
#include <linux/regmap.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
@@ -952,7 +953,6 @@ static const struct drm_driver ingenic_drm_driver_data = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "ingenic-drm",
.desc = "DRM module for Ingenic SoCs",
- .date = "20200716",
.major = 1,
.minor = 1,
.patchlevel = 0,
@@ -960,6 +960,7 @@ static const struct drm_driver ingenic_drm_driver_data = {
.fops = &ingenic_drm_fops,
.gem_create_object = ingenic_drm_gem_create_object,
DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
};
static const struct drm_plane_funcs ingenic_drm_primary_plane_funcs = {
@@ -1399,7 +1400,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
goto err_clk_notifier_unregister;
}
- drm_fbdev_dma_setup(drm, 32);
+ drm_client_setup(drm, NULL);
return 0;
@@ -1630,7 +1631,7 @@ static struct platform_driver ingenic_drm_driver = {
.of_match_table = of_match_ptr(ingenic_drm_of_match),
},
.probe = ingenic_drm_probe,
- .remove_new = ingenic_drm_remove,
+ .remove = ingenic_drm_remove,
.shutdown = ingenic_drm_shutdown,
};
diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c
index 5bd9072352b5..26ebf424d63e 100644
--- a/drivers/gpu/drm/ingenic/ingenic-ipu.c
+++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c
@@ -991,7 +991,7 @@ static struct platform_driver ingenic_ipu_driver = {
.of_match_table = ingenic_ipu_of_match,
},
.probe = ingenic_ipu_probe,
- .remove_new = ingenic_ipu_remove,
+ .remove = ingenic_ipu_remove,
};
struct platform_driver *ingenic_ipu_driver_ptr = &ingenic_ipu_driver;
diff --git a/drivers/gpu/drm/kmb/Kconfig b/drivers/gpu/drm/kmb/Kconfig
index e5ae3ec52392..7a2aa892a957 100644
--- a/drivers/gpu/drm/kmb/Kconfig
+++ b/drivers/gpu/drm/kmb/Kconfig
@@ -2,6 +2,7 @@ config DRM_KMB_DISPLAY
tristate "Intel Keembay Display"
depends on DRM
depends on ARCH_KEEMBAY || COMPILE_TEST
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index 169b83987ce2..32cda134ae3e 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -13,6 +13,7 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -441,9 +442,9 @@ static const struct drm_driver kmb_driver = {
/* GEM Operations */
.fops = &fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.name = "kmb-drm",
.desc = "KEEMBAY DISPLAY DRIVER",
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
@@ -561,7 +562,7 @@ static int kmb_probe(struct platform_device *pdev)
if (ret)
goto err_register;
- drm_fbdev_dma_setup(&kmb->drm, 0);
+ drm_client_setup(&kmb->drm, NULL);
return 0;
@@ -620,7 +621,7 @@ static SIMPLE_DEV_PM_OPS(kmb_pm_ops, kmb_pm_suspend, kmb_pm_resume);
static struct platform_driver kmb_platform_driver = {
.probe = kmb_probe,
- .remove_new = kmb_remove,
+ .remove = kmb_remove,
.driver = {
.name = "kmb-drm",
.pm = &kmb_pm_ops,
diff --git a/drivers/gpu/drm/kmb/kmb_drv.h b/drivers/gpu/drm/kmb/kmb_drv.h
index bf085e95b28f..1f0c10d317fe 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.h
+++ b/drivers/gpu/drm/kmb/kmb_drv.h
@@ -16,7 +16,6 @@
#define KMB_MIN_WIDTH 1920 /*Max width in pixels */
#define KMB_MIN_HEIGHT 1080 /*Max height in pixels */
-#define DRIVER_DATE "20210223"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
diff --git a/drivers/gpu/drm/kmb/kmb_dsi.c b/drivers/gpu/drm/kmb/kmb_dsi.c
index cf7cf0b07541..faf38ca9e44c 100644
--- a/drivers/gpu/drm/kmb/kmb_dsi.c
+++ b/drivers/gpu/drm/kmb/kmb_dsi.c
@@ -818,7 +818,7 @@ static void test_mode_send(struct kmb_dsi *kmb_dsi, u32 dphy_no,
}
}
-static inline void
+static inline __maybe_unused void
set_test_mode_src_osc_freq_target_low_bits(struct kmb_dsi *kmb_dsi,
u32 dphy_no,
u32 freq)
@@ -830,7 +830,7 @@ static inline void
(freq & 0x7f));
}
-static inline void
+static inline __maybe_unused void
set_test_mode_src_osc_freq_target_hi_bits(struct kmb_dsi *kmb_dsi,
u32 dphy_no,
u32 freq)
diff --git a/drivers/gpu/drm/lib/drm_random.h b/drivers/gpu/drm/lib/drm_random.h
index 5543bf0474bc..9f827260a89d 100644
--- a/drivers/gpu/drm/lib/drm_random.h
+++ b/drivers/gpu/drm/lib/drm_random.h
@@ -6,7 +6,7 @@
* be transposed to lib/ at the earliest convenience.
*/
-#include <linux/random.h>
+#include <linux/prandom.h>
#define DRM_RND_STATE_INITIALIZER(seed__) ({ \
struct rnd_state state__; \
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 10bce18b7c31..2067c5b65c57 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -271,7 +271,6 @@ static const struct drm_driver lima_drm_driver = {
.fops = &lima_drm_driver_fops,
.name = "lima",
.desc = "lima DRM",
- .date = "20191231",
.major = 1,
.minor = 1,
.patchlevel = 0,
@@ -488,7 +487,7 @@ static const struct dev_pm_ops lima_pm_ops = {
static struct platform_driver lima_platform_driver = {
.probe = lima_pdev_probe,
- .remove_new = lima_pdev_remove,
+ .remove = lima_pdev_remove,
.driver = {
.name = "lima",
.pm = &lima_pm_ops,
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 1a944edb6ddc..b40c90e97d7e 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -463,7 +463,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
lima_pm_idle(ldev);
drm_sched_resubmit_jobs(&pipe->base);
- drm_sched_start(&pipe->base);
+ drm_sched_start(&pipe->base, 0);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
diff --git a/drivers/gpu/drm/logicvc/Kconfig b/drivers/gpu/drm/logicvc/Kconfig
index 1df22a852a23..579a358ed5cf 100644
--- a/drivers/gpu/drm/logicvc/Kconfig
+++ b/drivers/gpu/drm/logicvc/Kconfig
@@ -2,6 +2,7 @@ config DRM_LOGICVC
tristate "LogiCVC DRM"
depends on DRM
depends on OF || COMPILE_TEST
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_KMS_DMA_HELPER
select DRM_GEM_DMA_HELPER
diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c
index 01a37e28c080..204b0fee55d0 100644
--- a/drivers/gpu/drm/logicvc/logicvc_drm.c
+++ b/drivers/gpu/drm/logicvc/logicvc_drm.c
@@ -15,9 +15,11 @@
#include <linux/regmap.h>
#include <linux/types.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_print.h>
@@ -50,11 +52,11 @@ static struct drm_driver logicvc_drm_driver = {
.fops = &logicvc_drm_fops,
.name = "logicvc-drm",
.desc = "Xylon LogiCVC DRM driver",
- .date = "20200403",
.major = 1,
.minor = 0,
DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(logicvc_drm_gem_dma_dumb_create),
+ DRM_FBDEV_DMA_DRIVER_OPS,
};
static struct regmap_config logicvc_drm_regmap_config = {
@@ -301,7 +303,6 @@ static int logicvc_drm_probe(struct platform_device *pdev)
struct regmap *regmap = NULL;
struct resource res;
void __iomem *base;
- unsigned int preferred_bpp;
int irq;
int ret;
@@ -439,17 +440,7 @@ static int logicvc_drm_probe(struct platform_device *pdev)
goto error_mode;
}
- switch (drm_dev->mode_config.preferred_depth) {
- case 16:
- preferred_bpp = 16;
- break;
- case 24:
- case 32:
- default:
- preferred_bpp = 32;
- break;
- }
- drm_fbdev_dma_setup(drm_dev, preferred_bpp);
+ drm_client_setup(drm_dev, NULL);
return 0;
@@ -499,7 +490,7 @@ MODULE_DEVICE_TABLE(of, logicvc_drm_of_table);
static struct platform_driver logicvc_drm_platform_driver = {
.probe = logicvc_drm_probe,
- .remove_new = logicvc_drm_remove,
+ .remove = logicvc_drm_remove,
.shutdown = logicvc_drm_shutdown,
.driver = {
.name = "logicvc-drm",
diff --git a/drivers/gpu/drm/loongson/Kconfig b/drivers/gpu/drm/loongson/Kconfig
index 9ed463a76ae2..552edfec7afb 100644
--- a/drivers/gpu/drm/loongson/Kconfig
+++ b/drivers/gpu/drm/loongson/Kconfig
@@ -4,6 +4,7 @@ config DRM_LOONGSON
tristate "DRM support for Loongson Graphics"
depends on DRM && PCI && MMU
depends on LOONGARCH || MIPS || COMPILE_TEST
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_TTM
select DRM_TTM_HELPER
diff --git a/drivers/gpu/drm/loongson/lsdc_drv.c b/drivers/gpu/drm/loongson/lsdc_drv.c
index adc7344d2f80..12193d2a301a 100644
--- a/drivers/gpu/drm/loongson/lsdc_drv.c
+++ b/drivers/gpu/drm/loongson/lsdc_drv.c
@@ -3,10 +3,11 @@
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
+#include <linux/aperture.h>
#include <linux/pci.h>
#include <linux/vgaarb.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
@@ -25,7 +26,6 @@
#define DRIVER_AUTHOR "Sui Jingfeng <suijingfeng@loongson.cn>"
#define DRIVER_NAME "loongson"
#define DRIVER_DESC "drm driver for loongson graphics"
-#define DRIVER_DATE "20220701"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
@@ -38,7 +38,6 @@ static const struct drm_driver lsdc_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -47,6 +46,7 @@ static const struct drm_driver lsdc_drm_driver = {
.dumb_create = lsdc_dumb_create,
.dumb_map_offset = lsdc_dumb_map_offset,
.gem_prime_import_sg_table = lsdc_prime_import_sg_table,
+ DRM_FBDEV_TTM_DRIVER_OPS,
};
static const struct drm_mode_config_funcs lsdc_mode_config_funcs = {
@@ -213,9 +213,9 @@ lsdc_create_device(struct pci_dev *pdev,
return ERR_PTR(ret);
}
- ret = drm_aperture_remove_conflicting_framebuffers(ldev->vram_base,
- ldev->vram_size,
- driver);
+ ret = aperture_remove_conflicting_devices(ldev->vram_base,
+ ldev->vram_size,
+ driver->name);
if (ret) {
drm_err(ddev, "Remove firmware framebuffers failed: %d\n", ret);
return ERR_PTR(ret);
@@ -230,9 +230,9 @@ lsdc_create_device(struct pci_dev *pdev,
lsdc_gem_init(ddev);
/* Bar 0 of the DC device contains the MMIO register's base address */
- ldev->reg_base = pcim_iomap(pdev, 0, 0);
- if (!ldev->reg_base)
- return ERR_PTR(-ENODEV);
+ ldev->reg_base = pcim_iomap_region(pdev, 0, "lsdc");
+ if (IS_ERR(ldev->reg_base))
+ return ldev->reg_base;
spin_lock_init(&ldev->reglock);
@@ -314,7 +314,7 @@ static int lsdc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- drm_fbdev_ttm_setup(ddev, 32);
+ drm_client_setup(ddev, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/mcde/Kconfig b/drivers/gpu/drm/mcde/Kconfig
index 907460b69d4f..3516c8d2a5d9 100644
--- a/drivers/gpu/drm/mcde/Kconfig
+++ b/drivers/gpu/drm/mcde/Kconfig
@@ -6,6 +6,7 @@ config DRM_MCDE
depends on OF
depends on COMMON_CLK
select MFD_SYSCON
+ select DRM_CLIENT_SELECTION
select DRM_MIPI_DSI
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 10c06440c7e7..5f2c462bad7e 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -65,6 +65,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
@@ -207,11 +208,11 @@ static const struct drm_driver mcde_drm_driver = {
.fops = &drm_fops,
.name = "mcde",
.desc = DRIVER_DESC,
- .date = "20180529",
.major = 1,
.minor = 0,
.patchlevel = 0,
DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
};
static int mcde_drm_bind(struct device *dev)
@@ -237,7 +238,7 @@ static int mcde_drm_bind(struct device *dev)
if (ret < 0)
goto unbind;
- drm_fbdev_dma_setup(drm, 32);
+ drm_client_setup(drm, NULL);
return 0;
@@ -473,6 +474,7 @@ static const struct of_device_id mcde_of_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, mcde_of_match);
static struct platform_driver mcde_driver = {
.driver = {
@@ -480,7 +482,7 @@ static struct platform_driver mcde_driver = {
.of_match_table = mcde_of_match,
},
.probe = mcde_probe,
- .remove_new = mcde_remove,
+ .remove = mcde_remove,
.shutdown = mcde_shutdown,
};
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index e2fad1a048b5..395449a72f0a 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -1229,5 +1229,5 @@ struct platform_driver mcde_dsi_driver = {
.of_match_table = mcde_dsi_of_match,
},
.probe = mcde_dsi_probe,
- .remove_new = mcde_dsi_remove,
+ .remove = mcde_dsi_remove,
};
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index 417ac8c9af41..e47debd60619 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -2,20 +2,18 @@
config DRM_MEDIATEK
tristate "DRM Support for Mediatek SoCs"
depends on DRM
- depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST)
+ depends on ARCH_MEDIATEK || COMPILE_TEST
depends on COMMON_CLK
- depends on HAVE_ARM_SMCCC
+ depends on HAVE_ARM_SMCCC || COMPILE_TEST
depends on OF
depends on MTK_MMSYS
+ select DRM_CLIENT_SELECTION
select DRM_GEM_DMA_HELPER if DRM_FBDEV_EMULATION
select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
select DRM_MIPI_DSI
select DRM_PANEL
- select MEMORY
- select MTK_SMI
- select PHY_MTK_MIPI_DSI
select VIDEOMODE_HELPERS
help
Choose this option if you have a Mediatek SoCs.
@@ -26,7 +24,6 @@ config DRM_MEDIATEK
config DRM_MEDIATEK_DP
tristate "DRM DPTX Support for MediaTek SoCs"
depends on DRM_MEDIATEK
- select PHY_MTK_DP
select DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_DP_AUX_BUS
@@ -37,6 +34,5 @@ config DRM_MEDIATEK_HDMI
tristate "DRM HDMI Support for Mediatek SoCs"
depends on DRM_MEDIATEK
select SND_SOC_HDMI_CODEC if SND_SOC
- select PHY_MTK_HDMI
help
DRM/KMS HDMI driver for Mediatek SoCs
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
index 2de248443147..b42c0d87eba3 100644
--- a/drivers/gpu/drm/mediatek/mtk_cec.c
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -241,7 +241,7 @@ MODULE_DEVICE_TABLE(of, mtk_cec_of_ids);
struct platform_driver mtk_cec_driver = {
.probe = mtk_cec_probe,
- .remove_new = mtk_cec_remove,
+ .remove = mtk_cec_remove,
.driver = {
.name = "mediatek-cec",
.of_match_table = mtk_cec_of_ids,
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
index 175b00e5a253..5674f5707cca 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
@@ -112,6 +112,11 @@ static void mtk_drm_finish_page_flip(struct mtk_crtc *mtk_crtc)
drm_crtc_handle_vblank(&mtk_crtc->base);
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ if (mtk_crtc->cmdq_client.chan)
+ return;
+#endif
+
spin_lock_irqsave(&mtk_crtc->config_lock, flags);
if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
mtk_crtc_finish_page_flip(mtk_crtc);
@@ -127,9 +132,8 @@ static void mtk_crtc_destroy(struct drm_crtc *crtc)
mtk_mutex_put(mtk_crtc->mutex);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle);
-
if (mtk_crtc->cmdq_client.chan) {
+ cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle);
mbox_free_channel(mtk_crtc->cmdq_client.chan);
mtk_crtc->cmdq_client.chan = NULL;
}
@@ -285,10 +289,8 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
state = to_mtk_crtc_state(mtk_crtc->base.state);
spin_lock_irqsave(&mtk_crtc->config_lock, flags);
- if (mtk_crtc->config_updating) {
- spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+ if (mtk_crtc->config_updating)
goto ddp_cmdq_cb_out;
- }
state->pending_config = false;
@@ -316,10 +318,15 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
mtk_crtc->pending_async_planes = false;
}
- spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
-
ddp_cmdq_cb_out:
+ if (mtk_crtc->pending_needs_vblank) {
+ mtk_crtc_finish_page_flip(mtk_crtc);
+ mtk_crtc->pending_needs_vblank = false;
+ }
+
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+
mtk_crtc->cmdq_vblank_cnt = 0;
wake_up(&mtk_crtc->cb_blocking_queue);
}
@@ -607,13 +614,18 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
*/
mtk_crtc->cmdq_vblank_cnt = 3;
+ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
+ mtk_crtc->config_updating = false;
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+
mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
}
-#endif
+#else
spin_lock_irqsave(&mtk_crtc->config_lock, flags);
mtk_crtc->config_updating = false;
spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+#endif
mutex_unlock(&mtk_crtc->hw_lock);
}
@@ -913,6 +925,7 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev,
BIT(pipe),
mtk_crtc_plane_type(mtk_crtc->layer_nr, num_planes),
mtk_ddp_comp_supported_rotations(comp),
+ mtk_ddp_comp_get_blend_modes(comp),
mtk_ddp_comp_get_formats(comp),
mtk_ddp_comp_get_num_formats(comp), i);
if (ret)
diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
index be66d94be361..edc6417639e6 100644
--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
@@ -363,6 +363,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = {
.layer_config = mtk_ovl_layer_config,
.bgclr_in_on = mtk_ovl_bgclr_in_on,
.bgclr_in_off = mtk_ovl_bgclr_in_off,
+ .get_blend_modes = mtk_ovl_get_blend_modes,
.get_formats = mtk_ovl_get_formats,
.get_num_formats = mtk_ovl_get_num_formats,
};
@@ -416,6 +417,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl_adaptor = {
.disconnect = mtk_ovl_adaptor_disconnect,
.add = mtk_ovl_adaptor_add_comp,
.remove = mtk_ovl_adaptor_remove_comp,
+ .get_blend_modes = mtk_ovl_adaptor_get_blend_modes,
.get_formats = mtk_ovl_adaptor_get_formats,
.get_num_formats = mtk_ovl_adaptor_get_num_formats,
.mode_valid = mtk_ovl_adaptor_mode_valid,
diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
index ecf6dc283cd7..39720b27f4e9 100644
--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
@@ -80,6 +80,7 @@ struct mtk_ddp_comp_funcs {
void (*ctm_set)(struct device *dev,
struct drm_crtc_state *state);
struct device * (*dma_dev_get)(struct device *dev);
+ u32 (*get_blend_modes)(struct device *dev);
const u32 *(*get_formats)(struct device *dev);
size_t (*get_num_formats)(struct device *dev);
void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next);
@@ -267,6 +268,15 @@ static inline struct device *mtk_ddp_comp_dma_dev_get(struct mtk_ddp_comp *comp)
}
static inline
+u32 mtk_ddp_comp_get_blend_modes(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->get_blend_modes)
+ return comp->funcs->get_blend_modes(comp->dev);
+
+ return 0;
+}
+
+static inline
const u32 *mtk_ddp_comp_get_formats(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->get_formats)
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_aal.c b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
index 59fb9a08d54b..abc9e5525d03 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_aal.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
@@ -218,7 +218,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_aal_driver_dt_match);
struct platform_driver mtk_disp_aal_driver = {
.probe = mtk_disp_aal_probe,
- .remove_new = mtk_disp_aal_remove,
+ .remove = mtk_disp_aal_remove,
.driver = {
.name = "mediatek-disp-aal",
.of_match_table = mtk_disp_aal_driver_dt_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
index 9b75727e0861..10d60d2c2a56 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
@@ -209,7 +209,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_ccorr_driver_dt_match);
struct platform_driver mtk_disp_ccorr_driver = {
.probe = mtk_disp_ccorr_probe,
- .remove_new = mtk_disp_ccorr_remove,
+ .remove = mtk_disp_ccorr_remove,
.driver = {
.name = "mediatek-disp-ccorr",
.of_match_table = mtk_disp_ccorr_driver_dt_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 2fd5e7dc9e24..dd8433a38282 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -159,7 +159,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_color_driver_dt_match);
struct platform_driver mtk_disp_color_driver = {
.probe = mtk_disp_color_probe,
- .remove_new = mtk_disp_color_remove,
+ .remove = mtk_disp_color_remove,
.driver = {
.name = "mediatek-disp-color",
.of_match_table = mtk_disp_color_driver_dt_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
index 082ac18fe04a..04217a36939c 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
@@ -103,11 +103,13 @@ void mtk_ovl_register_vblank_cb(struct device *dev,
void mtk_ovl_unregister_vblank_cb(struct device *dev);
void mtk_ovl_enable_vblank(struct device *dev);
void mtk_ovl_disable_vblank(struct device *dev);
+u32 mtk_ovl_get_blend_modes(struct device *dev);
const u32 *mtk_ovl_get_formats(struct device *dev);
size_t mtk_ovl_get_num_formats(struct device *dev);
void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex);
void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex);
+bool mtk_ovl_adaptor_is_comp_present(struct device_node *node);
void mtk_ovl_adaptor_connect(struct device *dev, struct device *mmsys_dev,
unsigned int next);
void mtk_ovl_adaptor_disconnect(struct device *dev, struct device *mmsys_dev,
@@ -131,6 +133,7 @@ void mtk_ovl_adaptor_start(struct device *dev);
void mtk_ovl_adaptor_stop(struct device *dev);
unsigned int mtk_ovl_adaptor_layer_nr(struct device *dev);
struct device *mtk_ovl_adaptor_dma_dev_get(struct device *dev);
+u32 mtk_ovl_adaptor_get_blend_modes(struct device *dev);
const u32 *mtk_ovl_adaptor_get_formats(struct device *dev);
size_t mtk_ovl_adaptor_get_num_formats(struct device *dev);
enum drm_mode_status mtk_ovl_adaptor_mode_valid(struct device *dev,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
index f0b38817ba6c..b17b11d93846 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
@@ -329,7 +329,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_gamma_driver_dt_match);
struct platform_driver mtk_disp_gamma_driver = {
.probe = mtk_disp_gamma_probe,
- .remove_new = mtk_disp_gamma_remove,
+ .remove = mtk_disp_gamma_remove,
.driver = {
.name = "mediatek-disp-gamma",
.of_match_table = mtk_disp_gamma_driver_dt_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_merge.c b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
index 435e5d9c8520..563b1b248fbb 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_merge.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_merge.c
@@ -370,7 +370,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_merge_driver_dt_match);
struct platform_driver mtk_disp_merge_driver = {
.probe = mtk_disp_merge_probe,
- .remove_new = mtk_disp_merge_remove,
+ .remove = mtk_disp_merge_remove,
.driver = {
.name = "mediatek-disp-merge",
.of_match_table = mtk_disp_merge_driver_dt_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 89b439dcf3a6..df82cea4bb79 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -65,8 +65,8 @@
#define OVL_CON_CLRFMT_RGB (1 << 12)
#define OVL_CON_CLRFMT_ARGB8888 (2 << 12)
#define OVL_CON_CLRFMT_RGBA8888 (3 << 12)
-#define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP)
-#define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP)
+#define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP)
+#define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP)
#define OVL_CON_CLRFMT_UYVY (4 << 12)
#define OVL_CON_CLRFMT_YUYV (5 << 12)
#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
@@ -146,6 +146,7 @@ struct mtk_disp_ovl_data {
bool fmt_rgb565_is_0;
bool smi_id_en;
bool supports_afbc;
+ const u32 blend_modes;
const u32 *formats;
size_t num_formats;
bool supports_clrfmt_ext;
@@ -214,6 +215,13 @@ void mtk_ovl_disable_vblank(struct device *dev)
writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_INTEN);
}
+u32 mtk_ovl_get_blend_modes(struct device *dev)
+{
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+ return ovl->data->blend_modes;
+}
+
const u32 *mtk_ovl_get_formats(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
@@ -386,14 +394,27 @@ void mtk_ovl_layer_off(struct device *dev, unsigned int idx,
DISP_REG_OVL_RDMA_CTRL(idx));
}
-static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt,
- unsigned int blend_mode)
+static unsigned int mtk_ovl_fmt_convert(struct mtk_disp_ovl *ovl,
+ struct mtk_plane_state *state)
{
- /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
- * is defined in mediatek HW data sheet.
- * The alphabet order in XXX is no relation to data
- * arrangement in memory.
+ unsigned int fmt = state->pending.format;
+ unsigned int blend_mode = DRM_MODE_BLEND_COVERAGE;
+
+ /*
+ * For the platforms where OVL_CON_CLRFMT_MAN is defined in the hardware data sheet
+ * and supports premultiplied color formats, such as OVL_CON_CLRFMT_PARGB8888.
+ *
+ * Check blend_modes in the driver data to see if premultiplied mode is supported.
+ * If not, use coverage mode instead to set it to the supported color formats.
+ *
+ * Current DRM assumption is that alpha is default premultiplied, so the bitmask of
+ * blend_modes must include BIT(DRM_MODE_BLEND_PREMULTI). Otherwise, mtk_plane_init()
+ * will get an error return from drm_plane_create_blend_mode_property() and
+ * state->base.pixel_blend_mode should not be used.
*/
+ if (ovl->data->blend_modes & BIT(DRM_MODE_BLEND_PREMULTI))
+ blend_mode = state->base.pixel_blend_mode;
+
switch (fmt) {
default:
case DRM_FORMAT_RGB565:
@@ -439,6 +460,29 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt,
}
}
+static void mtk_ovl_afbc_layer_config(struct mtk_disp_ovl *ovl,
+ unsigned int idx,
+ struct mtk_plane_pending_state *pending,
+ struct cmdq_pkt *cmdq_pkt)
+{
+ unsigned int pitch_msb = pending->pitch >> 16;
+ unsigned int hdr_pitch = pending->hdr_pitch;
+ unsigned int hdr_addr = pending->hdr_addr;
+
+ if (pending->modifier != DRM_FORMAT_MOD_LINEAR) {
+ mtk_ddp_write_relaxed(cmdq_pkt, hdr_addr, &ovl->cmdq_reg, ovl->regs,
+ DISP_REG_OVL_HDR_ADDR(ovl, idx));
+ mtk_ddp_write_relaxed(cmdq_pkt,
+ OVL_PITCH_MSB_2ND_SUBBUF | pitch_msb,
+ &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
+ mtk_ddp_write_relaxed(cmdq_pkt, hdr_pitch, &ovl->cmdq_reg, ovl->regs,
+ DISP_REG_OVL_HDR_PITCH(ovl, idx));
+ } else {
+ mtk_ddp_write_relaxed(cmdq_pkt, pitch_msb,
+ &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
+ }
+}
+
void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
struct mtk_plane_state *state,
struct cmdq_pkt *cmdq_pkt)
@@ -446,62 +490,65 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
struct mtk_plane_pending_state *pending = &state->pending;
unsigned int addr = pending->addr;
- unsigned int hdr_addr = pending->hdr_addr;
- unsigned int pitch = pending->pitch;
- unsigned int hdr_pitch = pending->hdr_pitch;
+ unsigned int pitch_lsb = pending->pitch & GENMASK(15, 0);
unsigned int fmt = pending->format;
+ unsigned int rotation = pending->rotation;
unsigned int offset = (pending->y << 16) | pending->x;
unsigned int src_size = (pending->height << 16) | pending->width;
unsigned int blend_mode = state->base.pixel_blend_mode;
unsigned int ignore_pixel_alpha = 0;
unsigned int con;
- bool is_afbc = pending->modifier != DRM_FORMAT_MOD_LINEAR;
- union overlay_pitch {
- struct split_pitch {
- u16 lsb;
- u16 msb;
- } split_pitch;
- u32 pitch;
- } overlay_pitch;
-
- overlay_pitch.pitch = pitch;
if (!pending->enable) {
mtk_ovl_layer_off(dev, idx, cmdq_pkt);
return;
}
- con = ovl_fmt_convert(ovl, fmt, blend_mode);
+ con = mtk_ovl_fmt_convert(ovl, state);
if (state->base.fb) {
- con |= OVL_CON_AEN;
con |= state->base.alpha & OVL_CON_ALPHA;
+
+ /*
+ * For blend_modes supported SoCs, always enable alpha blending.
+ * For blend_modes unsupported SoCs, enable alpha blending when has_alpha is set.
+ */
+ if (blend_mode || state->base.fb->format->has_alpha)
+ con |= OVL_CON_AEN;
+
+ /*
+ * Although the alpha channel can be ignored, CONST_BLD must be enabled
+ * for XRGB format, otherwise OVL will still read the value from memory.
+ * For RGB888 related formats, whether CONST_BLD is enabled or not won't
+ * affect the result. Therefore we use !has_alpha as the condition.
+ */
+ if (blend_mode == DRM_MODE_BLEND_PIXEL_NONE || !state->base.fb->format->has_alpha)
+ ignore_pixel_alpha = OVL_CONST_BLEND;
}
- /* CONST_BLD must be enabled for XRGB formats although the alpha channel
- * can be ignored, or OVL will still read the value from memory.
- * For RGB888 related formats, whether CONST_BLD is enabled or not won't
- * affect the result. Therefore we use !has_alpha as the condition.
+ /*
+ * Treat rotate 180 as flip x + flip y, and XOR the original rotation value
+ * to flip x + flip y to support both in the same time.
*/
- if ((state->base.fb && !state->base.fb->format->has_alpha) ||
- blend_mode == DRM_MODE_BLEND_PIXEL_NONE)
- ignore_pixel_alpha = OVL_CONST_BLEND;
+ if (rotation & DRM_MODE_ROTATE_180)
+ rotation ^= DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
- if (pending->rotation & DRM_MODE_REFLECT_Y) {
+ if (rotation & DRM_MODE_REFLECT_Y) {
con |= OVL_CON_VIRT_FLIP;
addr += (pending->height - 1) * pending->pitch;
}
- if (pending->rotation & DRM_MODE_REFLECT_X) {
+ if (rotation & DRM_MODE_REFLECT_X) {
con |= OVL_CON_HORZ_FLIP;
addr += pending->pitch - 1;
}
if (ovl->data->supports_afbc)
- mtk_ovl_set_afbc(ovl, cmdq_pkt, idx, is_afbc);
+ mtk_ovl_set_afbc(ovl, cmdq_pkt, idx,
+ pending->modifier != DRM_FORMAT_MOD_LINEAR);
mtk_ddp_write_relaxed(cmdq_pkt, con, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_CON(idx));
- mtk_ddp_write_relaxed(cmdq_pkt, overlay_pitch.split_pitch.lsb | ignore_pixel_alpha,
+ mtk_ddp_write_relaxed(cmdq_pkt, pitch_lsb | ignore_pixel_alpha,
&ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH(idx));
mtk_ddp_write_relaxed(cmdq_pkt, src_size, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_SRC_SIZE(idx));
@@ -510,19 +557,8 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
mtk_ddp_write_relaxed(cmdq_pkt, addr, &ovl->cmdq_reg, ovl->regs,
DISP_REG_OVL_ADDR(ovl, idx));
- if (is_afbc) {
- mtk_ddp_write_relaxed(cmdq_pkt, hdr_addr, &ovl->cmdq_reg, ovl->regs,
- DISP_REG_OVL_HDR_ADDR(ovl, idx));
- mtk_ddp_write_relaxed(cmdq_pkt,
- OVL_PITCH_MSB_2ND_SUBBUF | overlay_pitch.split_pitch.msb,
- &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
- mtk_ddp_write_relaxed(cmdq_pkt, hdr_pitch, &ovl->cmdq_reg, ovl->regs,
- DISP_REG_OVL_HDR_PITCH(ovl, idx));
- } else {
- mtk_ddp_write_relaxed(cmdq_pkt,
- overlay_pitch.split_pitch.msb,
- &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
- }
+ if (ovl->data->supports_afbc)
+ mtk_ovl_afbc_layer_config(ovl, idx, pending, cmdq_pkt);
mtk_ovl_set_bit_depth(dev, idx, fmt, cmdq_pkt);
mtk_ovl_layer_on(dev, idx, cmdq_pkt);
@@ -663,6 +699,9 @@ static const struct mtk_disp_ovl_data mt8192_ovl_driver_data = {
.layer_nr = 4,
.fmt_rgb565_is_0 = true,
.smi_id_en = true,
+ .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE) |
+ BIT(DRM_MODE_BLEND_PIXEL_NONE),
.formats = mt8173_formats,
.num_formats = ARRAY_SIZE(mt8173_formats),
};
@@ -673,6 +712,9 @@ static const struct mtk_disp_ovl_data mt8192_ovl_2l_driver_data = {
.layer_nr = 2,
.fmt_rgb565_is_0 = true,
.smi_id_en = true,
+ .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE) |
+ BIT(DRM_MODE_BLEND_PIXEL_NONE),
.formats = mt8173_formats,
.num_formats = ARRAY_SIZE(mt8173_formats),
};
@@ -684,6 +726,9 @@ static const struct mtk_disp_ovl_data mt8195_ovl_driver_data = {
.fmt_rgb565_is_0 = true,
.smi_id_en = true,
.supports_afbc = true,
+ .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE) |
+ BIT(DRM_MODE_BLEND_PIXEL_NONE),
.formats = mt8195_formats,
.num_formats = ARRAY_SIZE(mt8195_formats),
.supports_clrfmt_ext = true,
@@ -710,7 +755,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
struct platform_driver mtk_disp_ovl_driver = {
.probe = mtk_disp_ovl_probe,
- .remove_new = mtk_disp_ovl_remove,
+ .remove = mtk_disp_ovl_remove,
.driver = {
.name = "mediatek-disp-ovl",
.of_match_table = mtk_disp_ovl_driver_dt_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index c6768210b08b..fa0e95dd29a0 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -400,6 +400,13 @@ void mtk_ovl_adaptor_disable_vblank(struct device *dev)
mtk_ethdr_disable_vblank(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]);
}
+u32 mtk_ovl_adaptor_get_blend_modes(struct device *dev)
+{
+ struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
+
+ return mtk_ethdr_get_blend_modes(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]);
+}
+
const u32 *mtk_ovl_adaptor_get_formats(struct device *dev)
{
struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
@@ -490,6 +497,41 @@ static int compare_of(struct device *dev, void *data)
return dev->of_node == data;
}
+static int ovl_adaptor_of_get_ddp_comp_type(struct device_node *node,
+ enum mtk_ovl_adaptor_comp_type *ctype)
+{
+ const struct of_device_id *of_id = of_match_node(mtk_ovl_adaptor_comp_dt_ids, node);
+
+ if (!of_id)
+ return -EINVAL;
+
+ *ctype = (enum mtk_ovl_adaptor_comp_type)((uintptr_t)of_id->data);
+
+ return 0;
+}
+
+bool mtk_ovl_adaptor_is_comp_present(struct device_node *node)
+{
+ enum mtk_ovl_adaptor_comp_type type;
+ int ret;
+
+ ret = ovl_adaptor_of_get_ddp_comp_type(node, &type);
+ if (ret)
+ return false;
+
+ if (type >= OVL_ADAPTOR_TYPE_NUM)
+ return false;
+
+ /*
+ * In the context of mediatek-drm, ETHDR, MDP_RDMA and Padding are
+ * used exclusively by OVL Adaptor: if this component is not one of
+ * those, it's likely not an OVL Adaptor path.
+ */
+ return type == OVL_ADAPTOR_TYPE_ETHDR ||
+ type == OVL_ADAPTOR_TYPE_MDP_RDMA ||
+ type == OVL_ADAPTOR_TYPE_PADDING;
+}
+
static int ovl_adaptor_comp_init(struct device *dev, struct component_match **match)
{
struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev);
@@ -499,12 +541,11 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma
parent = dev->parent->parent->of_node->parent;
for_each_child_of_node_scoped(parent, node) {
- const struct of_device_id *of_id;
enum mtk_ovl_adaptor_comp_type type;
- int id;
+ int id, ret;
- of_id = of_match_node(mtk_ovl_adaptor_comp_dt_ids, node);
- if (!of_id)
+ ret = ovl_adaptor_of_get_ddp_comp_type(node, &type);
+ if (ret)
continue;
if (!of_device_is_available(node)) {
@@ -513,7 +554,6 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma
continue;
}
- type = (enum mtk_ovl_adaptor_comp_type)(uintptr_t)of_id->data;
id = ovl_adaptor_comp_get_id(dev, node, type);
if (id < 0) {
dev_warn(dev, "Skipping unknown component %pOF\n",
@@ -625,7 +665,7 @@ static void mtk_disp_ovl_adaptor_remove(struct platform_device *pdev)
struct platform_driver mtk_disp_ovl_adaptor_driver = {
.probe = mtk_disp_ovl_adaptor_probe,
- .remove_new = mtk_disp_ovl_adaptor_remove,
+ .remove = mtk_disp_ovl_adaptor_remove,
.driver = {
.name = "mediatek-disp-ovl-adaptor",
},
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 07243f372260..bf47790e4d6b 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -417,7 +417,7 @@ MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match);
struct platform_driver mtk_disp_rdma_driver = {
.probe = mtk_disp_rdma_probe,
- .remove_new = mtk_disp_rdma_remove,
+ .remove = mtk_disp_rdma_remove,
.driver = {
.name = "mediatek-disp-rdma",
.of_match_table = mtk_disp_rdma_driver_dt_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index d8796a904eca..cd385ba4c66a 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -145,6 +145,89 @@ struct mtk_dp_data {
u16 audio_m_div2_bit;
};
+static const struct mtk_dp_efuse_fmt mt8188_dp_efuse_fmt[MTK_DP_CAL_MAX] = {
+ [MTK_DP_CAL_GLB_BIAS_TRIM] = {
+ .idx = 0,
+ .shift = 10,
+ .mask = 0x1f,
+ .min_val = 1,
+ .max_val = 0x1e,
+ .default_val = 0xf,
+ },
+ [MTK_DP_CAL_CLKTX_IMPSE] = {
+ .idx = 0,
+ .shift = 15,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] = {
+ .idx = 1,
+ .shift = 0,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] = {
+ .idx = 1,
+ .shift = 8,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] = {
+ .idx = 1,
+ .shift = 16,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] = {
+ .idx = 1,
+ .shift = 24,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] = {
+ .idx = 1,
+ .shift = 4,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] = {
+ .idx = 1,
+ .shift = 12,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] = {
+ .idx = 1,
+ .shift = 20,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] = {
+ .idx = 1,
+ .shift = 28,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+};
+
static const struct mtk_dp_efuse_fmt mt8195_edp_efuse_fmt[MTK_DP_CAL_MAX] = {
[MTK_DP_CAL_GLB_BIAS_TRIM] = {
.idx = 3,
@@ -311,7 +394,7 @@ static const struct mtk_dp_efuse_fmt mt8195_dp_efuse_fmt[MTK_DP_CAL_MAX] = {
},
};
-static struct regmap_config mtk_dp_regmap_config = {
+static const struct regmap_config mtk_dp_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -460,18 +543,16 @@ static int mtk_dp_set_color_format(struct mtk_dp *mtk_dp,
enum dp_pixelformat color_format)
{
u32 val;
-
- /* update MISC0 */
- mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034,
- color_format << DP_TEST_COLOR_FORMAT_SHIFT,
- DP_TEST_COLOR_FORMAT_MASK);
+ u32 misc0_color;
switch (color_format) {
case DP_PIXELFORMAT_YUV422:
val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_YCBCR422;
+ misc0_color = DP_COLOR_FORMAT_YCbCr422;
break;
case DP_PIXELFORMAT_RGB:
val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_RGB;
+ misc0_color = DP_COLOR_FORMAT_RGB;
break;
default:
drm_warn(mtk_dp->drm_dev, "Unsupported color format: %d\n",
@@ -479,6 +560,11 @@ static int mtk_dp_set_color_format(struct mtk_dp *mtk_dp,
return -EINVAL;
}
+ /* update MISC0 */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034,
+ misc0_color,
+ DP_TEST_COLOR_FORMAT_MASK);
+
mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C,
val, PIXEL_ENCODE_FORMAT_DP_ENC0_P0_MASK);
return 0;
@@ -1052,6 +1138,18 @@ static void mtk_dp_digital_sw_reset(struct mtk_dp *mtk_dp)
0, DP_TX_TRANSMITTER_4P_RESET_SW_DP_TRANS_P0);
}
+static void mtk_dp_sdp_path_reset(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004,
+ SDP_RESET_SW_DP_ENC0_P0,
+ SDP_RESET_SW_DP_ENC0_P0);
+
+ /* Wait for sdp path reset to complete */
+ usleep_range(1000, 5000);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004,
+ 0, SDP_RESET_SW_DP_ENC0_P0);
+}
+
static void mtk_dp_set_lanes(struct mtk_dp *mtk_dp, int lanes)
{
mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_35F0,
@@ -1082,17 +1180,25 @@ static void mtk_dp_get_calibration_data(struct mtk_dp *mtk_dp)
buf = (u32 *)nvmem_cell_read(cell, &len);
nvmem_cell_put(cell);
- if (IS_ERR(buf) || ((len / sizeof(u32)) != 4)) {
+ if (IS_ERR(buf)) {
dev_warn(dev, "Failed to read nvmem_cell_read\n");
-
- if (!IS_ERR(buf))
- kfree(buf);
-
goto use_default_val;
}
+ /* The cell length is in bytes. Convert it to be compatible with u32 buffer. */
+ len /= sizeof(u32);
+
for (i = 0; i < MTK_DP_CAL_MAX; i++) {
fmt = &mtk_dp->data->efuse_fmt[i];
+
+ if (fmt->idx >= len) {
+ dev_warn(mtk_dp->dev,
+ "Out-of-bound efuse data access, fmt idx = %d, buf len = %zu\n",
+ fmt->idx, len);
+ kfree(buf);
+ goto use_default_val;
+ }
+
cal_data[i] = (buf[fmt->idx] >> fmt->shift) & fmt->mask;
if (cal_data[i] < fmt->min_val || cal_data[i] > fmt->max_val) {
@@ -2017,7 +2123,6 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
enum drm_connector_status ret = connector_status_disconnected;
bool enabled = mtk_dp->enabled;
- u8 sink_count = 0;
if (!mtk_dp->train_info.cable_plugged_in)
return ret;
@@ -2032,8 +2137,8 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
* function, we just need to check the HPD connection to check
* whether we connect to a sink device.
*/
- drm_dp_dpcd_readb(&mtk_dp->aux, DP_SINK_COUNT, &sink_count);
- if (DP_GET_SINK_COUNT(sink_count))
+
+ if (drm_dp_read_sink_count(&mtk_dp->aux) > 0)
ret = connector_status_connected;
if (!enabled)
@@ -2314,6 +2419,9 @@ static void mtk_dp_bridge_atomic_disable(struct drm_bridge *bridge,
DP_PWR_STATE_BANDGAP_TPLL,
DP_PWR_STATE_MASK);
+ /* SDP path reset sw*/
+ mtk_dp_sdp_path_reset(mtk_dp);
+
/* Ensure the sink is muted */
msleep(20);
}
@@ -2325,12 +2433,19 @@ mtk_dp_bridge_mode_valid(struct drm_bridge *bridge,
{
struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
u32 bpp = info->color_formats & DRM_COLOR_FORMAT_YCBCR422 ? 16 : 24;
- u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) *
- drm_dp_max_lane_count(mtk_dp->rx_cap),
- drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) *
- mtk_dp->max_lanes);
+ u32 lane_count_min = mtk_dp->train_info.lane_count;
+ u32 rate = drm_dp_bw_code_to_link_rate(mtk_dp->train_info.link_rate) *
+ lane_count_min;
- if (rate < mode->clock * bpp / 8)
+ /*
+ *FEC overhead is approximately 2.4% from DP 1.4a spec 2.2.1.4.2.
+ *The down-spread amplitude shall either be disabled (0.0%) or up
+ *to 0.5% from 1.4a 3.5.2.6. Add up to approximately 3% total overhead.
+ *
+ *Because rate is already divided by 10,
+ *mode->clock does not need to be multiplied by 10
+ */
+ if ((rate * 97 / 100) < (mode->clock * bpp / 8))
return MODE_CLOCK_HIGH;
return MODE_OK;
@@ -2371,10 +2486,9 @@ static u32 *mtk_dp_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct drm_display_info *display_info =
&conn_state->connector->display_info;
- u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) *
- drm_dp_max_lane_count(mtk_dp->rx_cap),
- drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) *
- mtk_dp->max_lanes);
+ u32 lane_count_min = mtk_dp->train_info.lane_count;
+ u32 rate = drm_dp_bw_code_to_link_rate(mtk_dp->train_info.link_rate) *
+ lane_count_min;
*num_input_fmts = 0;
@@ -2383,8 +2497,8 @@ static u32 *mtk_dp_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
* datarate of YUV422 and sink device supports YUV422, we output YUV422
* format. Use this condition, we can support more resolution.
*/
- if ((rate < (mode->clock * 24 / 8)) &&
- (rate > (mode->clock * 16 / 8)) &&
+ if (((rate * 97 / 100) < (mode->clock * 24 / 8)) &&
+ ((rate * 97 / 100) > (mode->clock * 16 / 8)) &&
(display_info->color_formats & DRM_COLOR_FORMAT_YCBCR422)) {
input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL);
if (!input_fmts)
@@ -2532,7 +2646,6 @@ static const struct hdmi_codec_ops mtk_dp_audio_codec_ops = {
.audio_shutdown = mtk_dp_audio_shutdown,
.get_eld = mtk_dp_audio_get_eld,
.hook_plugged_cb = mtk_dp_audio_hook_plugged_cb,
- .no_capture_mute = 1,
};
static int mtk_dp_register_audio_driver(struct device *dev)
@@ -2543,6 +2656,7 @@ static int mtk_dp_register_audio_driver(struct device *dev)
.max_i2s_channels = 8,
.i2s = 1,
.data = mtk_dp,
+ .no_capture_mute = 1,
};
mtk_dp->audio_pdev = platform_device_register_data(dev,
@@ -2771,7 +2885,7 @@ static SIMPLE_DEV_PM_OPS(mtk_dp_pm_ops, mtk_dp_suspend, mtk_dp_resume);
static const struct mtk_dp_data mt8188_dp_data = {
.bridge_type = DRM_MODE_CONNECTOR_DisplayPort,
.smc_cmd = MTK_DP_SIP_ATF_VIDEO_UNMUTE,
- .efuse_fmt = mt8195_dp_efuse_fmt,
+ .efuse_fmt = mt8188_dp_efuse_fmt,
.audio_supported = true,
.audio_pkt_in_hblank_area = true,
.audio_m_div2_bit = MT8188_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_2,
@@ -2816,7 +2930,7 @@ MODULE_DEVICE_TABLE(of, mtk_dp_of_match);
static struct platform_driver mtk_dp_driver = {
.probe = mtk_dp_probe,
- .remove_new = mtk_dp_remove,
+ .remove = mtk_dp_remove,
.driver = {
.name = "mediatek-drm-dp",
.of_match_table = mtk_dp_of_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_dp_reg.h b/drivers/gpu/drm/mediatek/mtk_dp_reg.h
index 709b79480693..8ad7a9cc259e 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp_reg.h
+++ b/drivers/gpu/drm/mediatek/mtk_dp_reg.h
@@ -86,6 +86,7 @@
#define MTK_DP_ENC0_P0_3004 0x3004
#define VIDEO_M_CODE_SEL_DP_ENC0_P0_MASK BIT(8)
#define DP_TX_ENCODER_4P_RESET_SW_DP_ENC0_P0 BIT(9)
+#define SDP_RESET_SW_DP_ENC0_P0 BIT(13)
#define MTK_DP_ENC0_P0_3010 0x3010
#define HTOTAL_SW_DP_ENC0_P0_MASK GENMASK(15, 0)
#define MTK_DP_ENC0_P0_3014 0x3014
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index a08d20654954..1864eb02dbf5 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -704,6 +704,20 @@ static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct mtk_dpi *dpi = bridge_to_dpi(bridge);
+ int ret;
+
+ dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 1, -1);
+ if (IS_ERR(dpi->next_bridge)) {
+ ret = PTR_ERR(dpi->next_bridge);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ /* Old devicetree has only one endpoint */
+ dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 0, 0);
+ if (IS_ERR(dpi->next_bridge))
+ return dev_err_probe(dpi->dev, PTR_ERR(dpi->next_bridge),
+ "Failed to get bridge\n");
+ }
return drm_bridge_attach(bridge->encoder, dpi->next_bridge,
&dpi->bridge, flags);
@@ -1058,13 +1072,6 @@ static int mtk_dpi_probe(struct platform_device *pdev)
if (dpi->irq < 0)
return dpi->irq;
- dpi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
- if (IS_ERR(dpi->next_bridge))
- return dev_err_probe(dev, PTR_ERR(dpi->next_bridge),
- "Failed to get bridge\n");
-
- dev_info(dev, "Found bridge node: %pOF\n", dpi->next_bridge->of_node);
-
platform_set_drvdata(pdev, dpi);
dpi->bridge.funcs = &mtk_dpi_bridge_funcs;
@@ -1101,7 +1108,7 @@ MODULE_DEVICE_TABLE(of, mtk_dpi_of_ids);
struct platform_driver mtk_dpi_driver = {
.probe = mtk_dpi_probe,
- .remove_new = mtk_dpi_remove,
+ .remove = mtk_dpi_remove,
.driver = {
.name = "mediatek-dpi",
.of_match_table = mtk_dpi_of_ids,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 3e807195a0d0..f22ad2882697 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -12,6 +12,7 @@
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
@@ -26,12 +27,12 @@
#include "mtk_crtc.h"
#include "mtk_ddp_comp.h"
+#include "mtk_disp_drv.h"
#include "mtk_drm_drv.h"
#include "mtk_gem.h"
#define DRIVER_NAME "mediatek"
#define DRIVER_DESC "Mediatek SoC DRM"
-#define DRIVER_DATE "20150513"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -357,7 +358,7 @@ static const struct of_device_id mtk_drm_of_ids[] = {
};
MODULE_DEVICE_TABLE(of, mtk_drm_of_ids);
-static int mtk_drm_match(struct device *dev, void *data)
+static int mtk_drm_match(struct device *dev, const void *data)
{
if (!strncmp(dev_name(dev), "mediatek-drm", sizeof("mediatek-drm") - 1))
return true;
@@ -405,8 +406,10 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
if (temp_drm_priv->mtk_drm_bound)
cnt++;
- if (cnt == MAX_CRTC)
+ if (cnt == MAX_CRTC) {
+ of_node_put(node);
break;
+ }
}
if (drm_priv->data->mmsys_dev_num == cnt) {
@@ -606,6 +609,7 @@ static const struct drm_driver mtk_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.dumb_create = mtk_gem_dumb_create,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.gem_prime_import = mtk_gem_prime_import,
.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
@@ -613,7 +617,6 @@ static const struct drm_driver mtk_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
@@ -662,7 +665,7 @@ static int mtk_drm_bind(struct device *dev)
if (ret < 0)
goto err_deinit;
- drm_fbdev_dma_setup(drm, 32);
+ drm_client_setup(drm, NULL);
return 0;
@@ -671,6 +674,8 @@ err_deinit:
err_free:
private->drm = NULL;
drm_dev_put(drm);
+ for (i = 0; i < private->data->mmsys_dev_num; i++)
+ private->all_drm_private[i]->drm = NULL;
return ret;
}
@@ -818,12 +823,235 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
{ }
};
+static int mtk_drm_of_get_ddp_comp_type(struct device_node *node, enum mtk_ddp_comp_type *ctype)
+{
+ const struct of_device_id *of_id = of_match_node(mtk_ddp_comp_dt_ids, node);
+
+ if (!of_id)
+ return -EINVAL;
+
+ *ctype = (enum mtk_ddp_comp_type)((uintptr_t)of_id->data);
+
+ return 0;
+}
+
+static int mtk_drm_of_get_ddp_ep_cid(struct device_node *node,
+ int output_port, enum mtk_crtc_path crtc_path,
+ struct device_node **next, unsigned int *cid)
+{
+ struct device_node *ep_dev_node, *ep_out;
+ enum mtk_ddp_comp_type comp_type;
+ int ret;
+
+ ep_out = of_graph_get_endpoint_by_regs(node, output_port, crtc_path);
+ if (!ep_out)
+ return -ENOENT;
+
+ ep_dev_node = of_graph_get_remote_port_parent(ep_out);
+ of_node_put(ep_out);
+ if (!ep_dev_node)
+ return -EINVAL;
+
+ /*
+ * Pass the next node pointer regardless of failures in the later code
+ * so that if this function is called in a loop it will walk through all
+ * of the subsequent endpoints anyway.
+ */
+ *next = ep_dev_node;
+
+ if (!of_device_is_available(ep_dev_node))
+ return -ENODEV;
+
+ ret = mtk_drm_of_get_ddp_comp_type(ep_dev_node, &comp_type);
+ if (ret) {
+ if (mtk_ovl_adaptor_is_comp_present(ep_dev_node)) {
+ *cid = (unsigned int)DDP_COMPONENT_DRM_OVL_ADAPTOR;
+ return 0;
+ }
+ return ret;
+ }
+
+ ret = mtk_ddp_comp_get_id(ep_dev_node, comp_type);
+ if (ret < 0)
+ return ret;
+
+ /* All ok! Pass the Component ID to the caller. */
+ *cid = (unsigned int)ret;
+
+ return 0;
+}
+
+/**
+ * mtk_drm_of_ddp_path_build_one - Build a Display HW Pipeline for a CRTC Path
+ * @dev: The mediatek-drm device
+ * @cpath: CRTC Path relative to a VDO or MMSYS
+ * @out_path: Pointer to an array that will contain the new pipeline
+ * @out_path_len: Number of entries in the pipeline array
+ *
+ * MediaTek SoCs can use different DDP hardware pipelines (or paths) depending
+ * on the board-specific desired display configuration; this function walks
+ * through all of the output endpoints starting from a VDO or MMSYS hardware
+ * instance and builds the right pipeline as specified in device trees.
+ *
+ * Return:
+ * * %0 - Display HW Pipeline successfully built and validated
+ * * %-ENOENT - Display pipeline was not specified in device tree
+ * * %-EINVAL - Display pipeline built but validation failed
+ * * %-ENOMEM - Failure to allocate pipeline array to pass to the caller
+ */
+static int mtk_drm_of_ddp_path_build_one(struct device *dev, enum mtk_crtc_path cpath,
+ const unsigned int **out_path,
+ unsigned int *out_path_len)
+{
+ struct device_node *next = NULL, *prev, *vdo = dev->parent->of_node;
+ unsigned int temp_path[DDP_COMPONENT_DRM_ID_MAX] = { 0 };
+ unsigned int *final_ddp_path;
+ unsigned short int idx = 0;
+ bool ovl_adaptor_comp_added = false;
+ int ret;
+
+ /* Get the first entry for the temp_path array */
+ ret = mtk_drm_of_get_ddp_ep_cid(vdo, 0, cpath, &next, &temp_path[idx]);
+ if (ret) {
+ if (next && temp_path[idx] == DDP_COMPONENT_DRM_OVL_ADAPTOR) {
+ dev_dbg(dev, "Adding OVL Adaptor for %pOF\n", next);
+ ovl_adaptor_comp_added = true;
+ } else {
+ if (next)
+ dev_err(dev, "Invalid component %pOF\n", next);
+ else
+ dev_err(dev, "Cannot find first endpoint for path %d\n", cpath);
+
+ return ret;
+ }
+ }
+ idx++;
+
+ /*
+ * Walk through port outputs until we reach the last valid mediatek-drm component.
+ * To be valid, this must end with an "invalid" component that is a display node.
+ */
+ do {
+ prev = next;
+ ret = mtk_drm_of_get_ddp_ep_cid(next, 1, cpath, &next, &temp_path[idx]);
+ of_node_put(prev);
+ if (ret) {
+ of_node_put(next);
+ break;
+ }
+
+ /*
+ * If this is an OVL adaptor exclusive component and one of those
+ * was already added, don't add another instance of the generic
+ * DDP_COMPONENT_OVL_ADAPTOR, as this is used only to decide whether
+ * to probe that component master driver of which only one instance
+ * is needed and possible.
+ */
+ if (temp_path[idx] == DDP_COMPONENT_DRM_OVL_ADAPTOR) {
+ if (!ovl_adaptor_comp_added)
+ ovl_adaptor_comp_added = true;
+ else
+ idx--;
+ }
+ } while (++idx < DDP_COMPONENT_DRM_ID_MAX);
+
+ /*
+ * The device component might not be enabled: in that case, don't
+ * check the last entry and just report that the device is missing.
+ */
+ if (ret == -ENODEV)
+ return ret;
+
+ /* If the last entry is not a final display output, the configuration is wrong */
+ switch (temp_path[idx - 1]) {
+ case DDP_COMPONENT_DP_INTF0:
+ case DDP_COMPONENT_DP_INTF1:
+ case DDP_COMPONENT_DPI0:
+ case DDP_COMPONENT_DPI1:
+ case DDP_COMPONENT_DSI0:
+ case DDP_COMPONENT_DSI1:
+ case DDP_COMPONENT_DSI2:
+ case DDP_COMPONENT_DSI3:
+ break;
+ default:
+ dev_err(dev, "Invalid display hw pipeline. Last component: %d (ret=%d)\n",
+ temp_path[idx - 1], ret);
+ return -EINVAL;
+ }
+
+ final_ddp_path = devm_kmemdup(dev, temp_path, idx * sizeof(temp_path[0]), GFP_KERNEL);
+ if (!final_ddp_path)
+ return -ENOMEM;
+
+ dev_dbg(dev, "Display HW Pipeline built with %d components.\n", idx);
+
+ /* Pipeline built! */
+ *out_path = final_ddp_path;
+ *out_path_len = idx;
+
+ return 0;
+}
+
+static int mtk_drm_of_ddp_path_build(struct device *dev, struct device_node *node,
+ struct mtk_mmsys_driver_data *data)
+{
+ struct device_node *ep_node;
+ struct of_endpoint of_ep;
+ bool output_present[MAX_CRTC] = { false };
+ int ret;
+
+ for_each_endpoint_of_node(node, ep_node) {
+ ret = of_graph_parse_endpoint(ep_node, &of_ep);
+ if (ret) {
+ dev_err_probe(dev, ret, "Cannot parse endpoint\n");
+ break;
+ }
+
+ if (of_ep.id >= MAX_CRTC) {
+ ret = dev_err_probe(dev, -EINVAL,
+ "Invalid endpoint%u number\n", of_ep.port);
+ break;
+ }
+
+ output_present[of_ep.id] = true;
+ }
+
+ if (ret) {
+ of_node_put(ep_node);
+ return ret;
+ }
+
+ if (output_present[CRTC_MAIN]) {
+ ret = mtk_drm_of_ddp_path_build_one(dev, CRTC_MAIN,
+ &data->main_path, &data->main_len);
+ if (ret && ret != -ENODEV)
+ return ret;
+ }
+
+ if (output_present[CRTC_EXT]) {
+ ret = mtk_drm_of_ddp_path_build_one(dev, CRTC_EXT,
+ &data->ext_path, &data->ext_len);
+ if (ret && ret != -ENODEV)
+ return ret;
+ }
+
+ if (output_present[CRTC_THIRD]) {
+ ret = mtk_drm_of_ddp_path_build_one(dev, CRTC_THIRD,
+ &data->third_path, &data->third_len);
+ if (ret && ret != -ENODEV)
+ return ret;
+ }
+
+ return 0;
+}
+
static int mtk_drm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *phandle = dev->parent->of_node;
const struct of_device_id *of_id;
struct mtk_drm_private *private;
+ struct mtk_mmsys_driver_data *mtk_drm_data;
struct device_node *node;
struct component_match *match = NULL;
struct platform_device *ovl_adaptor;
@@ -844,7 +1072,27 @@ static int mtk_drm_probe(struct platform_device *pdev)
if (!of_id)
return -ENODEV;
- private->data = of_id->data;
+ mtk_drm_data = (struct mtk_mmsys_driver_data *)of_id->data;
+ if (!mtk_drm_data)
+ return -EINVAL;
+
+ /* Try to build the display pipeline from devicetree graphs */
+ if (of_graph_is_present(phandle)) {
+ dev_dbg(dev, "Building display pipeline for MMSYS %u\n",
+ mtk_drm_data->mmsys_id);
+ private->data = devm_kmemdup(dev, mtk_drm_data,
+ sizeof(*mtk_drm_data), GFP_KERNEL);
+ if (!private->data)
+ return -ENOMEM;
+
+ ret = mtk_drm_of_ddp_path_build(dev, phandle, private->data);
+ if (ret)
+ return ret;
+ } else {
+ /* No devicetree graphs support: go with hardcoded paths if present */
+ dev_dbg(dev, "Using hardcoded paths for MMSYS %u\n", mtk_drm_data->mmsys_id);
+ private->data = mtk_drm_data;
+ }
private->all_drm_private = devm_kmalloc_array(dev, private->data->mmsys_dev_num,
sizeof(*private->all_drm_private),
@@ -866,12 +1114,11 @@ static int mtk_drm_probe(struct platform_device *pdev)
/* Iterate over sibling DISP function blocks */
for_each_child_of_node(phandle->parent, node) {
- const struct of_device_id *of_id;
enum mtk_ddp_comp_type comp_type;
int comp_id;
- of_id = of_match_node(mtk_ddp_comp_dt_ids, node);
- if (!of_id)
+ ret = mtk_drm_of_get_ddp_comp_type(node, &comp_type);
+ if (ret)
continue;
if (!of_device_is_available(node)) {
@@ -880,8 +1127,6 @@ static int mtk_drm_probe(struct platform_device *pdev)
continue;
}
- comp_type = (enum mtk_ddp_comp_type)(uintptr_t)of_id->data;
-
if (comp_type == MTK_DISP_MUTEX) {
int id;
@@ -1009,7 +1254,7 @@ static const struct dev_pm_ops mtk_drm_pm_ops = {
static struct platform_driver mtk_drm_platform_driver = {
.probe = mtk_drm_probe,
- .remove_new = mtk_drm_remove,
+ .remove = mtk_drm_remove,
.shutdown = mtk_drm_shutdown,
.driver = {
.name = "mediatek-drm",
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index ce897984de51..675cdc90a440 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -63,7 +63,7 @@ struct mtk_drm_private {
struct device *mmsys_dev;
struct device_node *comp_node[DDP_COMPONENT_DRM_ID_MAX];
struct mtk_ddp_comp ddp_comp[DDP_COMPONENT_DRM_ID_MAX];
- const struct mtk_mmsys_driver_data *data;
+ struct mtk_mmsys_driver_data *data;
struct drm_atomic_state *suspend_state;
unsigned int mbox_index;
struct mtk_drm_private **all_drm_private;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index eeec641cab60..40752f232054 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -139,11 +139,11 @@
#define CLK_HS_POST GENMASK(15, 8)
#define CLK_HS_EXIT GENMASK(23, 16)
-#define DSI_VM_CMD_CON 0x130
+/* DSI_VM_CMD_CON */
#define VM_CMD_EN BIT(0)
#define TS_VFP_EN BIT(5)
-#define DSI_SHADOW_DEBUG 0x190U
+/* DSI_SHADOW_DEBUG */
#define FORCE_COMMIT BIT(0)
#define BYPASS_SHADOW BIT(1)
@@ -187,6 +187,8 @@ struct phy;
struct mtk_dsi_driver_data {
const u32 reg_cmdq_off;
+ const u32 reg_vm_cmd_off;
+ const u32 reg_shadow_dbg_off;
bool has_shadow_ctl;
bool has_size_ctl;
bool cmdq_long_packet_ctl;
@@ -246,23 +248,22 @@ static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, HZ_PER_MHZ);
struct mtk_phy_timing *timing = &dsi->phy_timing;
- timing->lpx = (80 * data_rate_mhz / (8 * 1000)) + 1;
- timing->da_hs_prepare = (59 * data_rate_mhz + 4 * 1000) / 8000 + 1;
- timing->da_hs_zero = (163 * data_rate_mhz + 11 * 1000) / 8000 + 1 -
+ timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000;
+ timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 -
timing->da_hs_prepare;
- timing->da_hs_trail = (78 * data_rate_mhz + 7 * 1000) / 8000 + 1;
+ timing->da_hs_trail = timing->da_hs_prepare + 1;
- timing->ta_go = 4 * timing->lpx;
- timing->ta_sure = 3 * timing->lpx / 2;
- timing->ta_get = 5 * timing->lpx;
- timing->da_hs_exit = (118 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->ta_go = 4 * timing->lpx - 2;
+ timing->ta_sure = timing->lpx + 2;
+ timing->ta_get = 4 * timing->lpx;
+ timing->da_hs_exit = 2 * timing->lpx + 1;
- timing->clk_hs_prepare = (57 * data_rate_mhz / (8 * 1000)) + 1;
- timing->clk_hs_post = (65 * data_rate_mhz + 53 * 1000) / 8000 + 1;
- timing->clk_hs_trail = (78 * data_rate_mhz + 7 * 1000) / 8000 + 1;
- timing->clk_hs_zero = (330 * data_rate_mhz / (8 * 1000)) + 1 -
- timing->clk_hs_prepare;
- timing->clk_hs_exit = (118 * data_rate_mhz / (8 * 1000)) + 1;
+ timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000);
+ timing->clk_hs_post = timing->clk_hs_prepare + 8;
+ timing->clk_hs_trail = timing->clk_hs_prepare;
+ timing->clk_hs_zero = timing->clk_hs_trail * 4;
+ timing->clk_hs_exit = 2 * timing->clk_hs_trail;
timcon0 = FIELD_PREP(LPX, timing->lpx) |
FIELD_PREP(HS_PREP, timing->da_hs_prepare) |
@@ -367,8 +368,8 @@ static void mtk_dsi_set_mode(struct mtk_dsi *dsi)
static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi)
{
- mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN);
- mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN);
+ mtk_dsi_mask(dsi, dsi->driver_data->reg_vm_cmd_off, VM_CMD_EN, VM_CMD_EN);
+ mtk_dsi_mask(dsi, dsi->driver_data->reg_vm_cmd_off, TS_VFP_EN, TS_VFP_EN);
}
static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
@@ -714,7 +715,7 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
if (dsi->driver_data->has_shadow_ctl)
writel(FORCE_COMMIT | BYPASS_SHADOW,
- dsi->regs + DSI_SHADOW_DEBUG);
+ dsi->regs + dsi->driver_data->reg_shadow_dbg_off);
mtk_dsi_reset_engine(dsi);
mtk_dsi_phy_timconfig(dsi);
@@ -988,9 +989,17 @@ static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
dsi->lanes = device->lanes;
dsi->format = device->format;
dsi->mode_flags = device->mode_flags;
- dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
- if (IS_ERR(dsi->next_bridge))
- return PTR_ERR(dsi->next_bridge);
+ dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
+ if (IS_ERR(dsi->next_bridge)) {
+ ret = PTR_ERR(dsi->next_bridge);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ /* Old devicetree has only one endpoint */
+ dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ if (IS_ERR(dsi->next_bridge))
+ return PTR_ERR(dsi->next_bridge);
+ }
drm_bridge_add(&dsi->bridge);
@@ -1255,26 +1264,36 @@ static void mtk_dsi_remove(struct platform_device *pdev)
static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = {
.reg_cmdq_off = 0x200,
+ .reg_vm_cmd_off = 0x130,
+ .reg_shadow_dbg_off = 0x190
};
static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = {
.reg_cmdq_off = 0x180,
+ .reg_vm_cmd_off = 0x130,
+ .reg_shadow_dbg_off = 0x190
};
static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = {
.reg_cmdq_off = 0x200,
+ .reg_vm_cmd_off = 0x130,
+ .reg_shadow_dbg_off = 0x190,
.has_shadow_ctl = true,
.has_size_ctl = true,
};
static const struct mtk_dsi_driver_data mt8186_dsi_driver_data = {
.reg_cmdq_off = 0xd00,
+ .reg_vm_cmd_off = 0x200,
+ .reg_shadow_dbg_off = 0xc00,
.has_shadow_ctl = true,
.has_size_ctl = true,
};
static const struct mtk_dsi_driver_data mt8188_dsi_driver_data = {
.reg_cmdq_off = 0xd00,
+ .reg_vm_cmd_off = 0x200,
+ .reg_shadow_dbg_off = 0xc00,
.has_shadow_ctl = true,
.has_size_ctl = true,
.cmdq_long_packet_ctl = true,
@@ -1293,7 +1312,7 @@ MODULE_DEVICE_TABLE(of, mtk_dsi_of_match);
struct platform_driver mtk_dsi_driver = {
.probe = mtk_dsi_probe,
- .remove_new = mtk_dsi_remove,
+ .remove = mtk_dsi_remove,
.driver = {
.name = "mtk-dsi",
.of_match_table = mtk_dsi_of_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c
index d1d9cf8b10e1..96832d0cca37 100644
--- a/drivers/gpu/drm/mediatek/mtk_ethdr.c
+++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c
@@ -145,6 +145,13 @@ static irqreturn_t mtk_ethdr_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+u32 mtk_ethdr_get_blend_modes(struct device *dev)
+{
+ return BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE) |
+ BIT(DRM_MODE_BLEND_PIXEL_NONE);
+}
+
void mtk_ethdr_layer_config(struct device *dev, unsigned int idx,
struct mtk_plane_state *state,
struct cmdq_pkt *cmdq_pkt)
@@ -381,7 +388,7 @@ MODULE_DEVICE_TABLE(of, mtk_ethdr_driver_dt_match);
struct platform_driver mtk_ethdr_driver = {
.probe = mtk_ethdr_probe,
- .remove_new = mtk_ethdr_remove,
+ .remove = mtk_ethdr_remove,
.driver = {
.name = "mediatek-disp-ethdr",
.of_match_table = mtk_ethdr_driver_dt_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.h b/drivers/gpu/drm/mediatek/mtk_ethdr.h
index 81af9edea3f7..a72aeee46829 100644
--- a/drivers/gpu/drm/mediatek/mtk_ethdr.h
+++ b/drivers/gpu/drm/mediatek/mtk_ethdr.h
@@ -13,6 +13,7 @@ void mtk_ethdr_clk_disable(struct device *dev);
void mtk_ethdr_config(struct device *dev, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt);
+u32 mtk_ethdr_get_blend_modes(struct device *dev);
void mtk_ethdr_layer_config(struct device *dev, unsigned int idx,
struct mtk_plane_state *state,
struct cmdq_pkt *cmdq_pkt);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 7687f673964e..ca82bc829cb9 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1660,7 +1660,6 @@ static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = {
.mute_stream = mtk_hdmi_audio_mute,
.get_eld = mtk_hdmi_audio_get_eld,
.hook_plugged_cb = mtk_hdmi_audio_hook_plugged_cb,
- .no_capture_mute = 1,
};
static int mtk_hdmi_register_audio_driver(struct device *dev)
@@ -1671,6 +1670,7 @@ static int mtk_hdmi_register_audio_driver(struct device *dev)
.max_i2s_channels = 2,
.i2s = 1,
.data = hdmi,
+ .no_capture_mute = 1,
};
struct platform_device *pdev;
@@ -1795,7 +1795,7 @@ MODULE_DEVICE_TABLE(of, mtk_hdmi_of_ids);
static struct platform_driver mtk_hdmi_driver = {
.probe = mtk_hdmi_probe,
- .remove_new = mtk_hdmi_remove,
+ .remove = mtk_hdmi_remove,
.driver = {
.name = "mediatek-drm-hdmi",
.of_match_table = mtk_hdmi_of_ids,
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
index 8e60631d4cd2..07db68067844 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
@@ -331,7 +331,7 @@ MODULE_DEVICE_TABLE(of, mtk_hdmi_ddc_match);
struct platform_driver mtk_hdmi_ddc_driver = {
.probe = mtk_hdmi_ddc_probe,
- .remove_new = mtk_hdmi_ddc_remove,
+ .remove = mtk_hdmi_ddc_remove,
.driver = {
.name = "mediatek-hdmi-ddc",
.of_match_table = mtk_hdmi_ddc_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
index 7c1a8c796833..fc69ee38ce7d 100644
--- a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c
@@ -341,7 +341,7 @@ MODULE_DEVICE_TABLE(of, mtk_mdp_rdma_driver_dt_match);
struct platform_driver mtk_mdp_rdma_driver = {
.probe = mtk_mdp_rdma_probe,
- .remove_new = mtk_mdp_rdma_remove,
+ .remove = mtk_mdp_rdma_remove,
.driver = {
.name = "mediatek-mdp-rdma",
.of_match_table = mtk_mdp_rdma_driver_dt_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_padding.c b/drivers/gpu/drm/mediatek/mtk_padding.c
index 4bebd13a07bd..b4e3e5a3428b 100644
--- a/drivers/gpu/drm/mediatek/mtk_padding.c
+++ b/drivers/gpu/drm/mediatek/mtk_padding.c
@@ -146,7 +146,7 @@ MODULE_DEVICE_TABLE(of, mtk_padding_driver_dt_match);
struct platform_driver mtk_padding_driver = {
.probe = mtk_padding_probe,
- .remove_new = mtk_padding_remove,
+ .remove = mtk_padding_remove,
.driver = {
.name = "mediatek-disp-padding",
.of_match_table = mtk_padding_driver_dt_match,
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index 7d2cb4e0fafa..8a48b3b0a956 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -320,8 +320,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
- unsigned int supported_rotations, const u32 *formats,
- size_t num_formats, unsigned int plane_idx)
+ unsigned int supported_rotations, const u32 blend_modes,
+ const u32 *formats, size_t num_formats, unsigned int plane_idx)
{
int err;
@@ -366,12 +366,11 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (err)
DRM_ERROR("failed to create property: alpha\n");
- err = drm_plane_create_blend_mode_property(plane,
- BIT(DRM_MODE_BLEND_PREMULTI) |
- BIT(DRM_MODE_BLEND_COVERAGE) |
- BIT(DRM_MODE_BLEND_PIXEL_NONE));
- if (err)
- DRM_ERROR("failed to create property: blend_mode\n");
+ if (blend_modes) {
+ err = drm_plane_create_blend_mode_property(plane, blend_modes);
+ if (err)
+ DRM_ERROR("failed to create property: blend_mode\n");
+ }
drm_plane_helper_add(plane, &mtk_plane_helper_funcs);
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h
index 5b177eac67b7..3b13b89989c7 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_plane.h
@@ -48,6 +48,6 @@ to_mtk_plane_state(struct drm_plane_state *state)
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
- unsigned int supported_rotations, const u32 *formats,
- size_t num_formats, unsigned int plane_idx);
+ unsigned int supported_rotations, const u32 blend_modes,
+ const u32 *formats, size_t num_formats, unsigned int plane_idx);
#endif
diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig
index 2544756538cc..417f79829cf8 100644
--- a/drivers/gpu/drm/meson/Kconfig
+++ b/drivers/gpu/drm/meson/Kconfig
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_MESON
tristate "DRM Support for Amlogic Meson Display Controller"
- depends on DRM && OF && (ARM || ARM64)
+ depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on ARCH_MESON || COMPILE_TEST
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 4bd0baa2a4f5..81d2ee37e773 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -8,6 +8,7 @@
* Jasper St. Pierre <jstpierre@mecheye.net>
*/
+#include <linux/aperture.h>
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of_graph.h>
@@ -15,7 +16,7 @@
#include <linux/platform_device.h>
#include <linux/soc/amlogic/meson-canvas.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -98,12 +99,12 @@ static const struct drm_driver meson_driver = {
/* DMA Ops */
DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(meson_dumb_create),
+ DRM_FBDEV_DMA_DRIVER_OPS,
/* Misc */
.fops = &fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = "20161109",
.major = 1,
.minor = 0,
};
@@ -126,7 +127,7 @@ static bool meson_vpu_has_available_connectors(struct device *dev)
return false;
}
-static struct regmap_config meson_regmap_config = {
+static const struct regmap_config meson_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -277,7 +278,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
* Remove early framebuffers (ie. simplefb). The framebuffer can be
* located anywhere in RAM
*/
- ret = drm_aperture_remove_framebuffers(&meson_driver);
+ ret = aperture_remove_all_conflicting_devices(meson_driver.name);
if (ret)
goto free_canvas_vd1_2;
@@ -353,7 +354,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
if (ret)
goto uninstall_irq;
- drm_fbdev_dma_setup(drm, 32);
+ drm_client_setup(drm, NULL);
return 0;
@@ -557,7 +558,7 @@ static const struct dev_pm_ops meson_drv_pm_ops = {
static struct platform_driver meson_drm_platform_driver = {
.probe = meson_drv_probe,
- .remove_new = meson_drv_remove,
+ .remove = meson_drv_remove,
.shutdown = meson_drv_shutdown,
.driver = {
.name = "meson-drm",
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 5565f7777529..0d7c68b29dff 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -272,20 +272,6 @@ static inline void dw_hdmi_g12a_dwc_write(struct meson_dw_hdmi *dw_hdmi,
writeb(data, dw_hdmi->hdmitx + addr);
}
-/* Helper to change specific bits in controller registers */
-static inline void dw_hdmi_dwc_write_bits(struct meson_dw_hdmi *dw_hdmi,
- unsigned int addr,
- unsigned int mask,
- unsigned int val)
-{
- unsigned int data = dw_hdmi->data->dwc_read(dw_hdmi, addr);
-
- data &= ~mask;
- data |= val;
-
- dw_hdmi->data->dwc_write(dw_hdmi, addr, data);
-}
-
/* Bridge */
/* Setup PHY bandwidth modes */
@@ -870,7 +856,7 @@ MODULE_DEVICE_TABLE(of, meson_dw_hdmi_of_table);
static struct platform_driver meson_dw_hdmi_platform_driver = {
.probe = meson_dw_hdmi_probe,
- .remove_new = meson_dw_hdmi_remove,
+ .remove = meson_dw_hdmi_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = meson_dw_hdmi_of_table,
diff --git a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
index a10cff3ca1fe..66c73c512b0e 100644
--- a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
+++ b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
@@ -345,7 +345,7 @@ MODULE_DEVICE_TABLE(of, meson_dw_mipi_dsi_of_table);
static struct platform_driver meson_dw_mipi_dsi_platform_driver = {
.probe = meson_dw_mipi_dsi_probe,
- .remove_new = meson_dw_mipi_dsi_remove,
+ .remove = meson_dw_mipi_dsi_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = meson_dw_mipi_dsi_of_table,
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index 3096944a8f0a..412dcbea0e2d 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -2,6 +2,7 @@
config DRM_MGAG200
tristate "Matrox G200"
depends on DRM && PCI && MMU
+ select DRM_CLIENT_SELECTION
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
select I2C
@@ -20,4 +21,4 @@ config DRM_MGAG200_DISABLE_WRITECOMBINE
performances. This can interfere with real-time tasks; even if they
are running on other CPU cores than the graphics output.
Enable this option only if you run realtime tasks on a server with a
- Matrox G200. \ No newline at end of file
+ Matrox G200.
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 6623ee4e3277..069fdd2dc8f6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -6,19 +6,20 @@
* Dave Airlie
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
#include <drm/drm_pciids.h>
-#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -85,34 +86,6 @@ resource_size_t mgag200_probe_vram(void __iomem *mem, resource_size_t size)
return offset - 65536;
}
-static irqreturn_t mgag200_irq_handler(int irq, void *arg)
-{
- struct drm_device *dev = arg;
- struct mga_device *mdev = to_mga_device(dev);
- struct drm_crtc *crtc;
- u32 status, ien;
-
- status = RREG32(MGAREG_STATUS);
-
- if (status & MGAREG_STATUS_VLINEPEN) {
- ien = RREG32(MGAREG_IEN);
- if (!(ien & MGAREG_IEN_VLINEIEN))
- goto out;
-
- crtc = drm_crtc_from_index(dev, 0);
- if (WARN_ON_ONCE(!crtc))
- goto out;
- drm_crtc_handle_vblank(crtc);
-
- WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR);
-
- return IRQ_HANDLED;
- }
-
-out:
- return IRQ_NONE;
-}
-
/*
* DRM driver
*/
@@ -124,11 +97,11 @@ static const struct drm_driver mgag200_driver = {
.fops = &mgag200_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
};
/*
@@ -196,7 +169,6 @@ int mgag200_device_init(struct mga_device *mdev,
const struct mgag200_device_funcs *funcs)
{
struct drm_device *dev = &mdev->base;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 crtcext3, misc;
int ret;
@@ -223,14 +195,6 @@ int mgag200_device_init(struct mga_device *mdev,
mutex_unlock(&mdev->rmmio_lock);
WREG32(MGAREG_IEN, 0);
- WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR);
-
- ret = devm_request_irq(&pdev->dev, pdev->irq, mgag200_irq_handler, IRQF_SHARED,
- dev->driver->name, dev);
- if (ret) {
- drm_err(dev, "Failed to acquire interrupt, error %d\n", ret);
- return ret;
- }
return 0;
}
@@ -263,7 +227,7 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct drm_device *dev;
int ret;
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &mgag200_driver);
+ ret = aperture_remove_conflicting_pci_devices(pdev, mgag200_driver.name);
if (ret)
return ret;
@@ -314,7 +278,7 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* FIXME: A 24-bit color depth does not work with 24 bpp on
* G200ER. Force 32 bpp.
*/
- drm_fbdev_shmem_setup(dev, 32);
+ drm_client_setup_with_fourcc(dev, DRM_FORMAT_XRGB8888);
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 4760ba92871b..0608fc63e588 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -25,7 +25,6 @@
#define DRIVER_NAME "mgag200"
#define DRIVER_DESC "MGA G200 SE"
-#define DRIVER_DATE "20110418"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -391,24 +390,17 @@ int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_st
void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
-bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq,
- int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
#define MGAG200_CRTC_HELPER_FUNCS \
.mode_valid = mgag200_crtc_helper_mode_valid, \
.atomic_check = mgag200_crtc_helper_atomic_check, \
.atomic_flush = mgag200_crtc_helper_atomic_flush, \
.atomic_enable = mgag200_crtc_helper_atomic_enable, \
- .atomic_disable = mgag200_crtc_helper_atomic_disable, \
- .get_scanout_position = mgag200_crtc_helper_get_scanout_position
+ .atomic_disable = mgag200_crtc_helper_atomic_disable
void mgag200_crtc_reset(struct drm_crtc *crtc);
struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc);
void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state);
-int mgag200_crtc_enable_vblank(struct drm_crtc *crtc);
-void mgag200_crtc_disable_vblank(struct drm_crtc *crtc);
#define MGAG200_CRTC_FUNCS \
.reset = mgag200_crtc_reset, \
@@ -416,10 +408,7 @@ void mgag200_crtc_disable_vblank(struct drm_crtc *crtc);
.set_config = drm_atomic_helper_set_config, \
.page_flip = drm_atomic_helper_page_flip, \
.atomic_duplicate_state = mgag200_crtc_atomic_duplicate_state, \
- .atomic_destroy_state = mgag200_crtc_atomic_destroy_state, \
- .enable_vblank = mgag200_crtc_enable_vblank, \
- .disable_vblank = mgag200_crtc_disable_vblank, \
- .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp
+ .atomic_destroy_state = mgag200_crtc_atomic_destroy_state
void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode,
bool set_vidrst);
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200.c b/drivers/gpu/drm/mgag200/mgag200_g200.c
index 77ce8d36cef0..f874e2949840 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200.c
@@ -8,7 +8,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -404,9 +403,5 @@ struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
- ret = drm_vblank_init(dev, 1);
- if (ret)
- return ERR_PTR(ret);
-
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh.c b/drivers/gpu/drm/mgag200/mgag200_g200eh.c
index 09ced65c1d2f..e2305f8e00f8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200eh.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh.c
@@ -8,7 +8,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -276,9 +275,5 @@ struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
- ret = drm_vblank_init(dev, 1);
- if (ret)
- return ERR_PTR(ret);
-
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
index 5daa469137bd..11ae76eb081d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
@@ -7,7 +7,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -181,9 +180,5 @@ struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev,
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
- ret = drm_vblank_init(dev, 1);
- if (ret)
- return ERR_PTR(ret);
-
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c
index 09cfffafe130..c20ed0ab50ec 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200er.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c
@@ -8,7 +8,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -206,8 +205,6 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_crtc_set_gamma_linear(mdev, format);
mgag200_enable_display(mdev);
-
- drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = {
@@ -215,8 +212,7 @@ static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = {
.atomic_check = mgag200_crtc_helper_atomic_check,
.atomic_flush = mgag200_crtc_helper_atomic_flush,
.atomic_enable = mgag200_g200er_crtc_helper_atomic_enable,
- .atomic_disable = mgag200_crtc_helper_atomic_disable,
- .get_scanout_position = mgag200_crtc_helper_get_scanout_position,
+ .atomic_disable = mgag200_crtc_helper_atomic_disable
};
static const struct drm_crtc_funcs mgag200_g200er_crtc_funcs = {
@@ -312,9 +308,5 @@ struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
- ret = drm_vblank_init(dev, 1);
- if (ret)
- return ERR_PTR(ret);
-
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
index 3d48baa91d8b..78be964eb97c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
@@ -8,7 +8,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -207,8 +206,6 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_crtc_set_gamma_linear(mdev, format);
mgag200_enable_display(mdev);
-
- drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = {
@@ -216,8 +213,7 @@ static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = {
.atomic_check = mgag200_crtc_helper_atomic_check,
.atomic_flush = mgag200_crtc_helper_atomic_flush,
.atomic_enable = mgag200_g200ev_crtc_helper_atomic_enable,
- .atomic_disable = mgag200_crtc_helper_atomic_disable,
- .get_scanout_position = mgag200_crtc_helper_get_scanout_position,
+ .atomic_disable = mgag200_crtc_helper_atomic_disable
};
static const struct drm_crtc_funcs mgag200_g200ev_crtc_funcs = {
@@ -317,9 +313,5 @@ struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
- ret = drm_vblank_init(dev, 1);
- if (ret)
- return ERR_PTR(ret);
-
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
index dabc778e64e8..31624c9ab7b7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
@@ -7,7 +7,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -199,9 +198,5 @@ struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev,
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
- ret = drm_vblank_init(dev, 1);
- if (ret)
- return ERR_PTR(ret);
-
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
index 9dcbe8304271..7a32d3b1d226 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
@@ -8,7 +8,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -338,8 +337,6 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_crtc_set_gamma_linear(mdev, format);
mgag200_enable_display(mdev);
-
- drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = {
@@ -347,8 +344,7 @@ static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = {
.atomic_check = mgag200_crtc_helper_atomic_check,
.atomic_flush = mgag200_crtc_helper_atomic_flush,
.atomic_enable = mgag200_g200se_crtc_helper_atomic_enable,
- .atomic_disable = mgag200_crtc_helper_atomic_disable,
- .get_scanout_position = mgag200_crtc_helper_get_scanout_position,
+ .atomic_disable = mgag200_crtc_helper_atomic_disable
};
static const struct drm_crtc_funcs mgag200_g200se_crtc_funcs = {
@@ -517,9 +513,5 @@ struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
- ret = drm_vblank_init(dev, 1);
- if (ret)
- return ERR_PTR(ret);
-
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200wb.c b/drivers/gpu/drm/mgag200/mgag200_g200wb.c
index 83a24aedbf2f..a0e7b9ad46cd 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200wb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200wb.c
@@ -8,7 +8,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "mgag200_drv.h"
@@ -323,9 +322,5 @@ struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const stru
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
- ret = drm_vblank_init(dev, 1);
- if (ret)
- return ERR_PTR(ret);
-
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 7159909aca1e..fb71658c3117 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -22,7 +22,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_panic.h>
#include <drm/drm_print.h>
-#include <drm/drm_vblank.h>
#include "mgag200_ddc.h"
#include "mgag200_drv.h"
@@ -227,14 +226,7 @@ void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mod
vblkstr = mode->crtc_vblank_start;
vblkend = vtotal + 1;
- /*
- * There's no VBLANK interrupt on Matrox chipsets, so we use
- * the VLINE interrupt instead. It triggers when the current
- * <linecomp> has been reached. For VBLANK, this is the first
- * non-visible line at the bottom of the screen. Therefore,
- * keep <linecomp> in sync with <vblkstr>.
- */
- linecomp = vblkstr;
+ linecomp = vdispend;
misc = RREG8(MGA_MISC_IN);
@@ -645,8 +637,6 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
struct drm_device *dev = crtc->dev;
struct mga_device *mdev = to_mga_device(dev);
- struct drm_pending_vblank_event *event;
- unsigned long flags;
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
const struct drm_format_info *format = mgag200_crtc_state->format;
@@ -656,18 +646,6 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s
else
mgag200_crtc_set_gamma_linear(mdev, format);
}
-
- event = crtc->state->event;
- if (event) {
- crtc->state->event = NULL;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- if (drm_crtc_vblank_get(crtc) != 0)
- drm_crtc_send_vblank_event(crtc, event);
- else
- drm_crtc_arm_vblank_event(crtc, event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
}
void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
@@ -692,44 +670,15 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_
mgag200_crtc_set_gamma_linear(mdev, format);
mgag200_enable_display(mdev);
-
- drm_crtc_vblank_on(crtc);
}
void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
{
struct mga_device *mdev = to_mga_device(crtc->dev);
- drm_crtc_vblank_off(crtc);
-
mgag200_disable_display(mdev);
}
-bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq,
- int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
-{
- struct mga_device *mdev = to_mga_device(crtc->dev);
- u32 vcount;
-
- if (stime)
- *stime = ktime_get();
-
- if (vpos) {
- vcount = RREG32(MGAREG_VCOUNT);
- *vpos = vcount & GENMASK(11, 0);
- }
-
- if (hpos)
- *hpos = mode->htotal >> 1; // near middle of scanline on average
-
- if (etime)
- *etime = ktime_get();
-
- return true;
-}
-
void mgag200_crtc_reset(struct drm_crtc *crtc)
{
struct mgag200_crtc_state *mgag200_crtc_state;
@@ -774,30 +723,6 @@ void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_st
kfree(mgag200_crtc_state);
}
-int mgag200_crtc_enable_vblank(struct drm_crtc *crtc)
-{
- struct mga_device *mdev = to_mga_device(crtc->dev);
- u32 ien;
-
- WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR);
-
- ien = RREG32(MGAREG_IEN);
- ien |= MGAREG_IEN_VLINEIEN;
- WREG32(MGAREG_IEN, ien);
-
- return 0;
-}
-
-void mgag200_crtc_disable_vblank(struct drm_crtc *crtc)
-{
- struct mga_device *mdev = to_mga_device(crtc->dev);
- u32 ien;
-
- ien = RREG32(MGAREG_IEN);
- ien &= ~(MGAREG_IEN_VLINEIEN);
- WREG32(MGAREG_IEN, ien);
-}
-
/*
* Mode config
*/
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 90c68106b63b..7ec833b6d829 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -6,6 +6,7 @@ config DRM_MSM
depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
depends on COMMON_CLK
depends on IOMMU_SUPPORT
+ depends on OF
depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
depends on QCOM_OCMEM || QCOM_OCMEM=n
depends on QCOM_LLCC || QCOM_LLCC=n
@@ -14,6 +15,7 @@ config DRM_MSM
select IOMMU_IO_PGTABLE
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
+ select DRM_CLIENT_SELECTION
select DRM_DISPLAY_DP_AUX_BUS
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
@@ -92,6 +94,7 @@ config DRM_MSM_DPU
bool "Enable DPU support in MSM DRM driver"
depends on DRM_MSM
select DRM_MSM_MDSS
+ select DRM_DISPLAY_DSC_HELPER
default y
help
Compile in support for the Display Processing Unit in
@@ -113,6 +116,7 @@ config DRM_MSM_DSI
depends on DRM_MSM
select DRM_PANEL
select DRM_MIPI_DSI
+ select DRM_DISPLAY_DSC_HELPER
default y
help
Choose this option if you have a need for MIPI DSI connector
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 13110fcc46a8..5df20cbeafb8 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -23,6 +23,7 @@ adreno-y := \
adreno/a6xx_gpu.o \
adreno/a6xx_gmu.o \
adreno/a6xx_hfi.o \
+ adreno/a6xx_preempt.o \
adreno-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
@@ -77,6 +78,7 @@ msm-display-$(CONFIG_DRM_MSM_DPU) += \
disp/dpu1/dpu_hw_catalog.o \
disp/dpu1/dpu_hw_cdm.o \
disp/dpu1/dpu_hw_ctl.o \
+ disp/dpu1/dpu_hw_cwb.o \
disp/dpu1/dpu_hw_dsc.o \
disp/dpu1/dpu_hw_dsc_1_2.o \
disp/dpu1/dpu_hw_interrupts.o \
@@ -210,6 +212,7 @@ DISPLAY_HEADERS = \
generated/mdp4.xml.h \
generated/mdp5.xml.h \
generated/mdp_common.xml.h \
+ generated/mdss.xml.h \
generated/sfpb.xml.h
$(addprefix $(obj)/,$(adreno-y)): $(addprefix $(obj)/,$(ADRENO_HEADERS))
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 0dc255ddf5ce..379a3d346c30 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -22,7 +22,7 @@ static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index b46ff49f47cf..b6df115bb567 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -40,7 +40,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 8b4cdf95f445..f1b18a6663f7 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -34,7 +34,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -251,8 +251,8 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07);
/* Disable L2 bypass to avoid UCHE out of bounds errors */
- gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
- gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
+ gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base));
+ gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base));
gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
(adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
@@ -693,6 +693,8 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
if (ret)
goto fail;
+ adreno_gpu->uche_trap_base = 0xffff0000ffff0000ull;
+
if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index e09044930547..71dca78cd7a5 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -77,7 +77,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -132,7 +132,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
unsigned int i, ibs = 0;
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
- gpu->cur_ctx_seqno = 0;
+ ring->cur_ctx_seqno = 0;
a5xx_submit_in_rb(gpu, submit);
return;
}
@@ -171,7 +171,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -750,10 +750,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
/* Disable L2 bypass in the UCHE */
- gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
- gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
- gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
- gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base));
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base));
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base));
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base));
/* Set the GMEM VA range (0 to gpu->gmem) */
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
@@ -1760,11 +1760,6 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
unsigned int nr_rings;
int ret;
- if (!pdev) {
- DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n");
- return ERR_PTR(-ENXIO);
- }
-
a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
if (!a5xx_gpu)
return ERR_PTR(-ENOMEM);
@@ -1805,5 +1800,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
adreno_gpu->ubwc_config.macrotile_mode = 0;
adreno_gpu->ubwc_config.ubwc_swizzle = 0x7;
+ adreno_gpu->uche_trap_base = 0x0001ffffffff0000ull;
+
return gpu;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 7705f8010484..6b91e0bd1514 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -307,7 +307,7 @@ int a5xx_power_init(struct msm_gpu *gpu)
else if (adreno_is_a540(adreno_gpu))
a540_lm_setup(gpu);
- /* Set up SP/TP power collpase */
+ /* Set up SP/TP power collapse */
a5xx_pc_init(gpu);
/* Start the GPMU */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 0312b6ee0356..edffb7737a97 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -973,6 +973,25 @@ static const struct adreno_info a6xx_gpus[] = {
},
.address_space_size = SZ_16G,
}, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06060300),
+ .family = ADRENO_6XX_GEN4,
+ .fw = {
+ [ADRENO_FW_SQE] = "a660_sqe.fw",
+ [ADRENO_FW_GMU] = "a663_gmu.bin",
+ },
+ .gmem = SZ_1M + SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .a6xx = &(const struct a6xx_info) {
+ .hwcg = a690_hwcg,
+ .protect = &a660_protect,
+ .gmu_cgc_mode = 0x00020200,
+ .prim_fifo_threshold = 0x00300200,
+ },
+ .address_space_size = SZ_16G,
+ }, {
.chip_ids = ADRENO_CHIP_IDS(0x06030500),
.family = ADRENO_6XX_GEN4,
.fw = {
@@ -1281,6 +1300,28 @@ static const u32 a730_protect_regs[] = {
};
DECLARE_ADRENO_PROTECT(a730_protect, 48);
+static const uint32_t a7xx_pwrup_reglist_regs[] = {
+ REG_A6XX_UCHE_TRAP_BASE,
+ REG_A6XX_UCHE_TRAP_BASE + 1,
+ REG_A6XX_UCHE_WRITE_THRU_BASE,
+ REG_A6XX_UCHE_WRITE_THRU_BASE + 1,
+ REG_A6XX_UCHE_GMEM_RANGE_MIN,
+ REG_A6XX_UCHE_GMEM_RANGE_MIN + 1,
+ REG_A6XX_UCHE_GMEM_RANGE_MAX,
+ REG_A6XX_UCHE_GMEM_RANGE_MAX + 1,
+ REG_A6XX_UCHE_CACHE_WAYS,
+ REG_A6XX_UCHE_MODE_CNTL,
+ REG_A6XX_RB_NC_MODE_CNTL,
+ REG_A6XX_RB_CMP_DBG_ECO_CNTL,
+ REG_A7XX_GRAS_NC_MODE_CNTL,
+ REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
+ REG_A6XX_UCHE_GBIF_GX_CONFIG,
+ REG_A6XX_UCHE_CLIENT_PF,
+ REG_A6XX_TPL1_DBG_ECO_CNTL1,
+};
+
+DECLARE_ADRENO_REGLIST_LIST(a7xx_pwrup_reglist);
+
static const struct adreno_info a7xx_gpus[] = {
{
.chip_ids = ADRENO_CHIP_IDS(0x07000200),
@@ -1315,15 +1356,18 @@ static const struct adreno_info a7xx_gpus[] = {
.gmem = SZ_2M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
- ADRENO_QUIRK_HAS_HW_APRIV,
+ ADRENO_QUIRK_HAS_HW_APRIV |
+ ADRENO_QUIRK_PREEMPTION,
.init = a6xx_gpu_init,
.zapfw = "a730_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a730_hwcg,
.protect = &a730_protect,
+ .pwrup_reglist = &a7xx_pwrup_reglist,
.gmu_cgc_mode = 0x00020000,
},
.address_space_size = SZ_16G,
+ .preempt_record_size = 2860 * SZ_1K,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43050a01), /* "C510v2" */
.family = ADRENO_7XX_GEN2,
@@ -1334,16 +1378,30 @@ static const struct adreno_info a7xx_gpus[] = {
.gmem = 3 * SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
- ADRENO_QUIRK_HAS_HW_APRIV,
+ ADRENO_QUIRK_HAS_HW_APRIV |
+ ADRENO_QUIRK_PREEMPTION,
.init = a6xx_gpu_init,
.zapfw = "a740_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a740_hwcg,
.protect = &a730_protect,
+ .pwrup_reglist = &a7xx_pwrup_reglist,
.gmu_chipid = 0x7020100,
.gmu_cgc_mode = 0x00020202,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(3),
+ .perfmode_bw = 16500000,
+ },
+ { /* sentinel */ },
+ },
},
.address_space_size = SZ_16G,
+ .preempt_record_size = 4192 * SZ_1K,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43050c01), /* "C512v2" */
.family = ADRENO_7XX_GEN2,
@@ -1354,15 +1412,18 @@ static const struct adreno_info a7xx_gpus[] = {
.gmem = 3 * SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
- ADRENO_QUIRK_HAS_HW_APRIV,
+ ADRENO_QUIRK_HAS_HW_APRIV |
+ ADRENO_QUIRK_PREEMPTION,
.init = a6xx_gpu_init,
.a6xx = &(const struct a6xx_info) {
.hwcg = a740_hwcg,
.protect = &a730_protect,
+ .pwrup_reglist = &a7xx_pwrup_reglist,
.gmu_chipid = 0x7050001,
.gmu_cgc_mode = 0x00020202,
},
.address_space_size = SZ_256G,
+ .preempt_record_size = 4192 * SZ_1K,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43051401), /* "C520v2" */
.family = ADRENO_7XX_GEN3,
@@ -1373,15 +1434,29 @@ static const struct adreno_info a7xx_gpus[] = {
.gmem = 3 * SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
- ADRENO_QUIRK_HAS_HW_APRIV,
+ ADRENO_QUIRK_HAS_HW_APRIV |
+ ADRENO_QUIRK_PREEMPTION,
.init = a6xx_gpu_init,
.zapfw = "gen70900_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.protect = &a730_protect,
+ .pwrup_reglist = &a7xx_pwrup_reglist,
.gmu_chipid = 0x7090100,
.gmu_cgc_mode = 0x00020202,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(2),
+ .perfmode_bw = 10687500,
+ },
+ { /* sentinel */ },
+ },
},
.address_space_size = SZ_16G,
+ .preempt_record_size = 3572 * SZ_1K,
}
};
DECLARE_ADRENO_GPULIST(a7xx);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 37927bdd6fbe..65d38b25c070 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -9,6 +9,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/tcs.h>
#include <drm/drm_gem.h>
#include "a6xx_gpu.h"
@@ -109,9 +110,11 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
bool suspended)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
u32 perf_index;
+ u32 bw_index = 0;
unsigned long gpu_freq;
int ret = 0;
@@ -124,6 +127,37 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
if (gpu_freq == gmu->gpu_freqs[perf_index])
break;
+ /* If enabled, find the corresponding DDR bandwidth index */
+ if (info->bcms && gmu->nr_gpu_bws > 1) {
+ unsigned int bw = dev_pm_opp_get_bw(opp, true, 0);
+
+ for (bw_index = 0; bw_index < gmu->nr_gpu_bws - 1; bw_index++) {
+ if (bw == gmu->gpu_bw_table[bw_index])
+ break;
+ }
+
+ /* Vote AB as a fraction of the max bandwidth, starting from A750 */
+ if (bw && adreno_is_a750_family(adreno_gpu)) {
+ u64 tmp;
+
+ /* For now, vote for 25% of the bandwidth */
+ tmp = bw * 25;
+ do_div(tmp, 100);
+
+ /*
+ * The AB vote consists of a 16 bit wide quantized level
+ * against the maximum supported bandwidth.
+ * Quantization can be calculated as below:
+ * vote = (bandwidth * 2^16) / max bandwidth
+ */
+ tmp *= MAX_AB_VOTE;
+ do_div(tmp, gmu->gpu_bw_table[gmu->nr_gpu_bws - 1]);
+
+ bw_index |= AB_VOTE(clamp(tmp, 1, MAX_AB_VOTE));
+ bw_index |= AB_VOTE_ENABLE;
+ }
+ }
+
gmu->current_perf_index = perf_index;
gmu->freq = gmu->gpu_freqs[perf_index];
@@ -139,8 +173,10 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
return;
if (!gmu->legacy) {
- a6xx_hfi_set_freq(gmu, perf_index);
- dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
+ a6xx_hfi_set_freq(gmu, perf_index, bw_index);
+ /* With Bandwidth voting, we now vote for all resources, so skip OPP set */
+ if (!bw_index)
+ dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
return;
}
@@ -729,6 +765,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
const struct block_header *blk;
u32 reg_offset;
+ u32 ver;
u32 itcm_base = 0x00000000;
u32 dtcm_base = 0x00040000;
@@ -775,6 +812,12 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
}
}
+ ver = gmu_read(gmu, REG_A6XX_GMU_CORE_FW_VERSION);
+ DRM_INFO("Loaded GMU firmware v%u.%u.%u\n",
+ FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MAJOR__MASK, ver),
+ FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MINOR__MASK, ver),
+ FIELD_GET(A6XX_GMU_CORE_FW_VERSION_STEP__MASK, ver));
+
return 0;
}
@@ -1265,7 +1308,7 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
bo->virt = msm_gem_get_vaddr(bo->obj);
bo->size = size;
- msm_gem_object_set_name(bo->obj, name);
+ msm_gem_object_set_name(bo->obj, "%s", name);
return 0;
}
@@ -1287,6 +1330,104 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
return 0;
}
+/**
+ * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM)
+ * @unit: divisor used to convert bytes/sec bw value to an RPMh msg
+ * @width: multiplier used to convert bytes/sec bw value to an RPMh msg
+ * @vcd: virtual clock domain that this bcm belongs to
+ * @reserved: reserved field
+ */
+struct bcm_db {
+ __le32 unit;
+ __le16 width;
+ u8 vcd;
+ u8 reserved;
+};
+
+static int a6xx_gmu_rpmh_bw_votes_init(struct adreno_gpu *adreno_gpu,
+ const struct a6xx_info *info,
+ struct a6xx_gmu *gmu)
+{
+ const struct bcm_db *bcm_data[GMU_MAX_BCMS] = { 0 };
+ unsigned int bcm_index, bw_index, bcm_count = 0;
+
+ /* Retrieve BCM data from cmd-db */
+ for (bcm_index = 0; bcm_index < GMU_MAX_BCMS; bcm_index++) {
+ const struct a6xx_bcm *bcm = &info->bcms[bcm_index];
+ size_t count;
+
+ /* Stop at NULL terminated bcm entry */
+ if (!bcm->name)
+ break;
+
+ bcm_data[bcm_index] = cmd_db_read_aux_data(bcm->name, &count);
+ if (IS_ERR(bcm_data[bcm_index]))
+ return PTR_ERR(bcm_data[bcm_index]);
+
+ if (!count) {
+ dev_err(gmu->dev, "invalid BCM '%s' aux data size\n",
+ bcm->name);
+ return -EINVAL;
+ }
+
+ bcm_count++;
+ }
+
+ /* Generate BCM votes values for each bandwidth & BCM */
+ for (bw_index = 0; bw_index < gmu->nr_gpu_bws; bw_index++) {
+ u32 *data = gmu->gpu_ib_votes[bw_index];
+ u32 bw = gmu->gpu_bw_table[bw_index];
+
+ /* Calculations loosely copied from bcm_aggregate() & tcs_cmd_gen() */
+ for (bcm_index = 0; bcm_index < bcm_count; bcm_index++) {
+ const struct a6xx_bcm *bcm = &info->bcms[bcm_index];
+ bool commit = false;
+ u64 peak;
+ u32 vote;
+
+ if (bcm_index == bcm_count - 1 ||
+ (bcm_data[bcm_index + 1] &&
+ bcm_data[bcm_index]->vcd != bcm_data[bcm_index + 1]->vcd))
+ commit = true;
+
+ if (!bw) {
+ data[bcm_index] = BCM_TCS_CMD(commit, false, 0, 0);
+ continue;
+ }
+
+ if (bcm->fixed) {
+ u32 perfmode = 0;
+
+ /* GMU on A6xx votes perfmode on all valid bandwidth */
+ if (!adreno_is_a7xx(adreno_gpu) ||
+ (bcm->perfmode_bw && bw >= bcm->perfmode_bw))
+ perfmode = bcm->perfmode;
+
+ data[bcm_index] = BCM_TCS_CMD(commit, true, 0, perfmode);
+ continue;
+ }
+
+ /* Multiply the bandwidth by the width of the connection */
+ peak = (u64)bw * le16_to_cpu(bcm_data[bcm_index]->width);
+ do_div(peak, bcm->buswidth);
+
+ /* Input bandwidth value is in KBps, scale the value to BCM unit */
+ peak *= 1000;
+ do_div(peak, le32_to_cpu(bcm_data[bcm_index]->unit));
+
+ vote = clamp(peak, 1, BCM_TCS_CMD_VOTE_MASK);
+
+ /* GMUs on A7xx votes on both x & y */
+ if (adreno_is_a7xx(adreno_gpu))
+ data[bcm_index] = BCM_TCS_CMD(commit, true, vote, vote);
+ else
+ data[bcm_index] = BCM_TCS_CMD(commit, true, 0, vote);
+ }
+ }
+
+ return 0;
+}
+
/* Return the 'arc-level' for the given frequency */
static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
unsigned long freq)
@@ -1390,12 +1531,15 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
* The GMU votes with the RPMh for itself and on behalf of the GPU but we need
* to construct the list of votes on the CPU and send it over. Query the RPMh
* voltage levels and build the votes
+ * The GMU can also vote for DDR interconnects, use the OPP bandwidth entries
+ * and BCM parameters to build the votes.
*/
static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
struct msm_gpu *gpu = &adreno_gpu->base;
int ret;
@@ -1407,6 +1551,10 @@ static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
+ /* Build the interconnect votes */
+ if (info->bcms && gmu->nr_gpu_bws > 1)
+ ret |= a6xx_gmu_rpmh_bw_votes_init(adreno_gpu, info, gmu);
+
return ret;
}
@@ -1442,10 +1590,43 @@ static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
return index;
}
+static int a6xx_gmu_build_bw_table(struct device *dev, unsigned long *bandwidths,
+ u32 size)
+{
+ int count = dev_pm_opp_get_opp_count(dev);
+ struct dev_pm_opp *opp;
+ int i, index = 0;
+ unsigned int bandwidth = 1;
+
+ /*
+ * The OPP table doesn't contain the "off" bandwidth level so we need to
+ * add 1 to the table size to account for it
+ */
+
+ if (WARN(count + 1 > size,
+ "The GMU bandwidth table is being truncated\n"))
+ count = size - 1;
+
+ /* Set the "off" bandwidth */
+ bandwidths[index++] = 0;
+
+ for (i = 0; i < count; i++) {
+ opp = dev_pm_opp_find_bw_ceil(dev, &bandwidth, 0);
+ if (IS_ERR(opp))
+ break;
+
+ dev_pm_opp_put(opp);
+ bandwidths[index++] = bandwidth++;
+ }
+
+ return index;
+}
+
static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
struct msm_gpu *gpu = &adreno_gpu->base;
int ret = 0;
@@ -1472,6 +1653,14 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
+ /*
+ * The GMU also handles GPU Interconnect Votes so build a list
+ * of DDR bandwidths from the GPU OPP table
+ */
+ if (info->bcms)
+ gmu->nr_gpu_bws = a6xx_gmu_build_bw_table(&gpu->pdev->dev,
+ gmu->gpu_bw_table, ARRAY_SIZE(gmu->gpu_bw_table));
+
/* Build the list of RPMh votes that we'll send to the GMU */
return a6xx_gmu_rpmh_votes_init(gmu);
}
@@ -1522,15 +1711,13 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
irq = platform_get_irq_byname(pdev, name);
- ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
+ ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, name, gmu);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
name, ret);
return ret;
}
- disable_irq(irq);
-
return irq;
}
@@ -1605,7 +1792,9 @@ int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
- of_dma_configure(gmu->dev, node, true);
+ ret = of_dma_configure(gmu->dev, node, true);
+ if (ret)
+ return ret;
pm_runtime_enable(gmu->dev);
@@ -1670,7 +1859,9 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
- of_dma_configure(gmu->dev, node, true);
+ ret = of_dma_configure(gmu->dev, node, true);
+ if (ret)
+ return ret;
/* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 94b6c5cab6f4..0c888b326cfb 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -19,6 +19,18 @@ struct a6xx_gmu_bo {
u64 iova;
};
+#define GMU_MAX_GX_FREQS 16
+#define GMU_MAX_CX_FREQS 4
+#define GMU_MAX_BCMS 3
+
+struct a6xx_bcm {
+ char *name;
+ unsigned int buswidth;
+ bool fixed;
+ unsigned int perfmode;
+ unsigned int perfmode_bw;
+};
+
/*
* These define the different GMU wake up options - these define how both the
* CPU and the GMU bring up the hardware
@@ -79,12 +91,16 @@ struct a6xx_gmu {
int current_perf_index;
int nr_gpu_freqs;
- unsigned long gpu_freqs[16];
- u32 gx_arc_votes[16];
+ unsigned long gpu_freqs[GMU_MAX_GX_FREQS];
+ u32 gx_arc_votes[GMU_MAX_GX_FREQS];
+
+ int nr_gpu_bws;
+ unsigned long gpu_bw_table[GMU_MAX_GX_FREQS];
+ u32 gpu_ib_votes[GMU_MAX_GX_FREQS][GMU_MAX_BCMS];
int nr_gmu_freqs;
- unsigned long gmu_freqs[4];
- u32 cx_arc_votes[4];
+ unsigned long gmu_freqs[GMU_MAX_CX_FREQS];
+ u32 cx_arc_votes[GMU_MAX_CX_FREQS];
unsigned long freq;
@@ -99,6 +115,7 @@ struct a6xx_gmu {
struct completion pd_gate;
struct qmp *qmp;
+ struct a6xx_hfi_msg_bw_table *bw_table;
};
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
@@ -192,7 +209,7 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu);
-int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index);
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, u32 perf_index, u32 bw_index);
bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 06cab2c6fd66..0ae29a7c8a4d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -68,6 +68,8 @@ static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
uint32_t wptr;
unsigned long flags;
@@ -81,12 +83,17 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
/* Make sure to wrap wptr if we need to */
wptr = get_wptr(ring);
- spin_unlock_irqrestore(&ring->preempt_lock, flags);
-
- /* Make sure everything is posted before making a decision */
- mb();
+ /* Update HW if this is the current ring and we are not in preempt*/
+ if (!a6xx_in_preempt(a6xx_gpu)) {
+ if (a6xx_gpu->cur_ring == ring)
+ gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+ else
+ ring->restore_wptr = true;
+ } else {
+ ring->restore_wptr = true;
+ }
- gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
}
static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
@@ -101,20 +108,30 @@ static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
}
static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
- struct msm_ringbuffer *ring, struct msm_file_private *ctx)
+ struct msm_ringbuffer *ring, struct msm_gem_submit *submit)
{
bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
+ struct msm_file_private *ctx = submit->queue->ctx;
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
phys_addr_t ttbr;
u32 asid;
u64 memptr = rbmemptr(ring, ttbr0);
- if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno)
+ if (ctx->seqno == ring->cur_ctx_seqno)
return;
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
return;
+ if (adreno_gpu->info->family >= ADRENO_7XX_GEN1) {
+ /* Wait for previous submit to complete before continuing: */
+ OUT_PKT7(ring, CP_WAIT_TIMESTAMP, 4);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, submit->seqno - 1);
+ }
+
if (!sysprof) {
if (!adreno_is_a7xx(adreno_gpu)) {
/* Turn off protected mode to write to special registers */
@@ -138,12 +155,14 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
/*
* Write the new TTBR0 to the memstore. This is good for debugging.
+ * Needed for preemption
*/
- OUT_PKT7(ring, CP_MEM_WRITE, 4);
+ OUT_PKT7(ring, CP_MEM_WRITE, 5);
OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr)));
OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr)));
OUT_RING(ring, lower_32_bits(ttbr));
- OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr));
+ OUT_RING(ring, upper_32_bits(ttbr));
+ OUT_RING(ring, ctx->seqno);
/*
* Sync both threads after switching pagetables and enable BR only
@@ -193,7 +212,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
- a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
+ a6xx_set_pagetable(a6xx_gpu, ring, submit);
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
rbmemptr_stats(ring, index, cpcycles_start));
@@ -219,7 +238,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -268,6 +287,46 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
a6xx_flush(gpu, ring);
}
+static void a6xx_emit_set_pseudo_reg(struct msm_ringbuffer *ring,
+ struct a6xx_gpu *a6xx_gpu, struct msm_gpu_submitqueue *queue)
+{
+ u64 preempt_postamble;
+
+ OUT_PKT7(ring, CP_SET_PSEUDO_REG, 12);
+
+ OUT_RING(ring, SMMU_INFO);
+ /* don't save SMMU, we write the record from the kernel instead */
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+
+ /* privileged and non secure buffer save */
+ OUT_RING(ring, NON_SECURE_SAVE_ADDR);
+ OUT_RING(ring, lower_32_bits(
+ a6xx_gpu->preempt_iova[ring->id]));
+ OUT_RING(ring, upper_32_bits(
+ a6xx_gpu->preempt_iova[ring->id]));
+
+ /* user context buffer save, seems to be unnused by fw */
+ OUT_RING(ring, NON_PRIV_SAVE_ADDR);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+
+ OUT_RING(ring, COUNTER);
+ /* seems OK to set to 0 to disable it */
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+
+ /* Emit postamble to clear perfcounters */
+ preempt_postamble = a6xx_gpu->preempt_postamble_iova;
+
+ OUT_PKT7(ring, CP_SET_AMBLE, 3);
+ OUT_RING(ring, lower_32_bits(preempt_postamble));
+ OUT_RING(ring, upper_32_bits(preempt_postamble));
+ OUT_RING(ring, CP_SET_AMBLE_2_DWORDS(
+ a6xx_gpu->preempt_postamble_len) |
+ CP_SET_AMBLE_2_TYPE(KMD_AMBLE_TYPE));
+}
+
static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
@@ -283,7 +342,14 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
- a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
+ a6xx_set_pagetable(a6xx_gpu, ring, submit);
+
+ /*
+ * If preemption is enabled, then set the pseudo register for the save
+ * sequence
+ */
+ if (gpu->nr_rings > 1)
+ a6xx_emit_set_pseudo_reg(ring, a6xx_gpu, submit->queue);
get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
rbmemptr_stats(ring, index, cpcycles_start));
@@ -296,8 +362,10 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT7(ring, CP_SET_MARKER, 1);
OUT_RING(ring, 0x101); /* IFPC disable */
- OUT_PKT7(ring, CP_SET_MARKER, 1);
- OUT_RING(ring, 0x00d); /* IB1LIST start */
+ if (submit->queue->flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT) {
+ OUT_PKT7(ring, CP_SET_MARKER, 1);
+ OUT_RING(ring, 0x00d); /* IB1LIST start */
+ }
/* Submit the commands */
for (i = 0; i < submit->nr_cmds; i++) {
@@ -305,7 +373,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ if (ring->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -328,8 +396,10 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
update_shadow_rptr(gpu, ring);
}
- OUT_PKT7(ring, CP_SET_MARKER, 1);
- OUT_RING(ring, 0x00e); /* IB1LIST end */
+ if (submit->queue->flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT) {
+ OUT_PKT7(ring, CP_SET_MARKER, 1);
+ OUT_RING(ring, 0x00e); /* IB1LIST end */
+ }
get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
rbmemptr_stats(ring, index, cpcycles_end));
@@ -376,6 +446,8 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_RING(ring, upper_32_bits(rbmemptr(ring, bv_fence)));
OUT_RING(ring, submit->seqno);
+ a6xx_gpu->last_seqno[ring->id] = submit->seqno;
+
/* write the ringbuffer timestamp */
OUT_PKT7(ring, CP_EVENT_WRITE, 4);
OUT_RING(ring, CACHE_CLEAN | CP_EVENT_WRITE_0_IRQ | BIT(27));
@@ -389,10 +461,32 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT7(ring, CP_SET_MARKER, 1);
OUT_RING(ring, 0x100); /* IFPC enable */
+ /* If preemption is enabled */
+ if (gpu->nr_rings > 1) {
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+
+ /*
+ * If dword[2:1] are non zero, they specify an address for
+ * the CP to write the value of dword[3] to on preemption
+ * complete. Write 0 to skip the write
+ */
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ /* Data value - not used if the address above is 0 */
+ OUT_RING(ring, 0x01);
+ /* generate interrupt on preemption completion */
+ OUT_RING(ring, 0x00);
+ }
+
+
trace_msm_gpu_submit_flush(submit,
gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER));
a6xx_flush(gpu, ring);
+
+ /* Check to see if we need to start preemption */
+ a6xx_preempt_trigger(gpu);
}
static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
@@ -541,6 +635,15 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.macrotile_mode = 1;
}
+ if (adreno_is_a663(gpu)) {
+ gpu->ubwc_config.highest_bank_bit = 13;
+ gpu->ubwc_config.amsbc = 1;
+ gpu->ubwc_config.rgb565_predicator = 1;
+ gpu->ubwc_config.uavflagprd_inv = 2;
+ gpu->ubwc_config.macrotile_mode = 1;
+ gpu->ubwc_config.ubwc_swizzle = 0x4;
+ }
+
if (adreno_is_7c3(gpu)) {
gpu->ubwc_config.highest_bank_bit = 14;
gpu->ubwc_config.amsbc = 1;
@@ -599,6 +702,77 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
adreno_gpu->ubwc_config.macrotile_mode);
}
+static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ const struct adreno_reglist_list *reglist;
+ void *ptr = a6xx_gpu->pwrup_reglist_ptr;
+ struct cpu_gpu_lock *lock = ptr;
+ u32 *dest = (u32 *)&lock->regs[0];
+ int i;
+
+ reglist = adreno_gpu->info->a6xx->pwrup_reglist;
+
+ lock->gpu_req = lock->cpu_req = lock->turn = 0;
+ lock->ifpc_list_len = 0;
+ lock->preemption_list_len = reglist->count;
+
+ /*
+ * For each entry in each of the lists, write the offset and the current
+ * register value into the GPU buffer
+ */
+ for (i = 0; i < reglist->count; i++) {
+ *dest++ = reglist->regs[i];
+ *dest++ = gpu_read(gpu, reglist->regs[i]);
+ }
+
+ /*
+ * The overall register list is composed of
+ * 1. Static IFPC-only registers
+ * 2. Static IFPC + preemption registers
+ * 3. Dynamic IFPC + preemption registers (ex: perfcounter selects)
+ *
+ * The first two lists are static. Size of these lists are stored as
+ * number of pairs in ifpc_list_len and preemption_list_len
+ * respectively. With concurrent binning, Some of the perfcounter
+ * registers being virtualized, CP needs to know the pipe id to program
+ * the aperture inorder to restore the same. Thus, third list is a
+ * dynamic list with triplets as
+ * (<aperture, shifted 12 bits> <address> <data>), and the length is
+ * stored as number for triplets in dynamic_list_len.
+ */
+ lock->dynamic_list_len = 0;
+}
+
+static int a7xx_preempt_start(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ if (gpu->nr_rings <= 1)
+ return 0;
+
+ /* Turn CP protection off */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ a6xx_emit_set_pseudo_reg(ring, a6xx_gpu, NULL);
+
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ /* Generate interrupt on preemption completion */
+ OUT_RING(ring, 0x00);
+
+ a6xx_flush(gpu, ring);
+
+ return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
static int a6xx_cp_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb[0];
@@ -630,6 +804,8 @@ static int a6xx_cp_init(struct msm_gpu *gpu)
static int a7xx_cp_init(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = gpu->rb[0];
u32 mask;
@@ -667,11 +843,11 @@ static int a7xx_cp_init(struct msm_gpu *gpu)
/* *Don't* send a power up reg list for concurrent binning (TODO) */
/* Lo address */
- OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, lower_32_bits(a6xx_gpu->pwrup_reglist_iova));
/* Hi address */
- OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, upper_32_bits(a6xx_gpu->pwrup_reglist_iova));
/* BIT(31) set => read the regs from the list */
- OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, BIT(31));
a6xx_flush(gpu, ring);
return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
@@ -795,6 +971,16 @@ static int a6xx_ucode_load(struct msm_gpu *gpu)
msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow");
}
+ a6xx_gpu->pwrup_reglist_ptr = msm_gem_kernel_new(gpu->dev, PAGE_SIZE,
+ MSM_BO_WC | MSM_BO_MAP_PRIV,
+ gpu->aspace, &a6xx_gpu->pwrup_reglist_bo,
+ &a6xx_gpu->pwrup_reglist_iova);
+
+ if (IS_ERR(a6xx_gpu->pwrup_reglist_ptr))
+ return PTR_ERR(a6xx_gpu->pwrup_reglist_ptr);
+
+ msm_gem_object_set_name(a6xx_gpu->pwrup_reglist_bo, "pwrup_reglist");
+
return 0;
}
@@ -854,6 +1040,7 @@ static int hw_init(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
u64 gmem_range_min;
+ unsigned int i;
int ret;
if (!adreno_has_gmu_wrapper(adreno_gpu)) {
@@ -936,12 +1123,12 @@ static int hw_init(struct msm_gpu *gpu)
/* Disable L2 bypass in the UCHE */
if (adreno_is_a7xx(adreno_gpu)) {
- gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
- gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
+ gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, adreno_gpu->uche_trap_base);
+ gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base);
} else {
- gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu);
- gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
- gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
+ gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, adreno_gpu->uche_trap_base + 0xfc0);
+ gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, adreno_gpu->uche_trap_base);
+ gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base);
}
if (!(adreno_is_a650_family(adreno_gpu) ||
@@ -1062,7 +1249,7 @@ static int hw_init(struct msm_gpu *gpu)
if (adreno_is_a690(adreno_gpu))
gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x90);
/* Set dualQ + disable afull for A660 GPU */
- else if (adreno_is_a660(adreno_gpu))
+ else if (adreno_is_a660(adreno_gpu) || adreno_is_a663(adreno_gpu))
gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906);
else if (adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG,
@@ -1124,22 +1311,32 @@ static int hw_init(struct msm_gpu *gpu)
if (a6xx_gpu->shadow_bo) {
gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR,
shadowptr(a6xx_gpu, gpu->rb[0]));
+ for (unsigned int i = 0; i < gpu->nr_rings; i++)
+ a6xx_gpu->shadow[i] = 0;
}
/* ..which means "always" on A7xx, also for BV shadow */
if (adreno_is_a7xx(adreno_gpu)) {
gpu_write64(gpu, REG_A7XX_CP_BV_RB_RPTR_ADDR,
- rbmemptr(gpu->rb[0], bv_fence));
+ rbmemptr(gpu->rb[0], bv_rptr));
}
+ a6xx_preempt_hw_init(gpu);
+
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
- gpu->cur_ctx_seqno = 0;
+ for (i = 0; i < gpu->nr_rings; i++)
+ gpu->rb[i]->cur_ctx_seqno = 0;
/* Enable the SQE_to start the CP engine */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
+ if (adreno_is_a7xx(adreno_gpu) && !a6xx_gpu->pwrup_reglist_emitted) {
+ a7xx_patch_pwrup_reglist(gpu);
+ a6xx_gpu->pwrup_reglist_emitted = true;
+ }
+
ret = adreno_is_a7xx(adreno_gpu) ? a7xx_cp_init(gpu) : a6xx_cp_init(gpu);
if (ret)
goto out;
@@ -1177,6 +1374,10 @@ static int hw_init(struct msm_gpu *gpu)
out:
if (adreno_has_gmu_wrapper(adreno_gpu))
return ret;
+
+ /* Last step - yield the ringbuffer */
+ a7xx_preempt_start(gpu);
+
/*
* Tell the GMU that we are done touching the GPU and it can start power
* management
@@ -1554,8 +1755,13 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
if (status & A6XX_RBBM_INT_0_MASK_SWFUSEVIOLATION)
a7xx_sw_fuse_violation_irq(gpu);
- if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
+ if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
msm_gpu_retire(gpu);
+ a6xx_preempt_trigger(gpu);
+ }
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_SW)
+ a6xx_preempt_irq(gpu);
return IRQ_HANDLED;
}
@@ -2249,6 +2455,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
struct a6xx_gpu *a6xx_gpu;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
+ extern int enable_preemption;
bool is_a7xx;
int ret;
@@ -2287,7 +2494,10 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
return ERR_PTR(ret);
}
- if (is_a7xx)
+ if ((enable_preemption == 1) || (enable_preemption == -1 &&
+ (config->info->quirks & ADRENO_QUIRK_PREEMPTION)))
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 4);
+ else if (is_a7xx)
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 1);
else if (adreno_has_gmu_wrapper(adreno_gpu))
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1);
@@ -2323,11 +2533,15 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
}
}
+ adreno_gpu->uche_trap_base = 0x1fffffffff000ull;
+
if (gpu->aspace)
msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
a6xx_fault_handler);
a6xx_calc_ubwc_config(adreno_gpu);
+ /* Set up the preemption specific bits and pieces for each ringbuffer */
+ a6xx_preempt_init(gpu);
return gpu;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 0fb7febf70e7..9201a53dd341 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -12,18 +12,39 @@
extern bool hang_debug;
+struct cpu_gpu_lock {
+ uint32_t gpu_req;
+ uint32_t cpu_req;
+ uint32_t turn;
+ union {
+ struct {
+ uint16_t list_length;
+ uint16_t list_offset;
+ };
+ struct {
+ uint8_t ifpc_list_len;
+ uint8_t preemption_list_len;
+ uint16_t dynamic_list_len;
+ };
+ };
+ uint64_t regs[62];
+};
+
/**
* struct a6xx_info - a6xx specific information from device table
*
* @hwcg: hw clock gating register sequence
* @protect: CP_PROTECT settings
+ * @pwrup_reglist pwrup reglist for preemption
*/
struct a6xx_info {
const struct adreno_reglist *hwcg;
const struct adreno_protect *protect;
+ const struct adreno_reglist_list *pwrup_reglist;
u32 gmu_chipid;
u32 gmu_cgc_mode;
u32 prim_fifo_threshold;
+ const struct a6xx_bcm *bcms;
};
struct a6xx_gpu {
@@ -33,6 +54,29 @@ struct a6xx_gpu {
uint64_t sqe_iova;
struct msm_ringbuffer *cur_ring;
+ struct msm_ringbuffer *next_ring;
+
+ struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
+ void *preempt[MSM_GPU_MAX_RINGS];
+ uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
+ struct drm_gem_object *preempt_smmu_bo[MSM_GPU_MAX_RINGS];
+ void *preempt_smmu[MSM_GPU_MAX_RINGS];
+ uint64_t preempt_smmu_iova[MSM_GPU_MAX_RINGS];
+ uint32_t last_seqno[MSM_GPU_MAX_RINGS];
+
+ atomic_t preempt_state;
+ spinlock_t eval_lock;
+ struct timer_list preempt_timer;
+
+ unsigned int preempt_level;
+ bool uses_gmem;
+ bool skip_save_restore;
+
+ struct drm_gem_object *preempt_postamble_bo;
+ void *preempt_postamble_ptr;
+ uint64_t preempt_postamble_iova;
+ uint64_t preempt_postamble_len;
+ bool postamble_enabled;
struct a6xx_gmu gmu;
@@ -40,6 +84,11 @@ struct a6xx_gpu {
uint64_t shadow_iova;
uint32_t *shadow;
+ struct drm_gem_object *pwrup_reglist_bo;
+ void *pwrup_reglist_ptr;
+ uint64_t pwrup_reglist_iova;
+ bool pwrup_reglist_emitted;
+
bool has_whereami;
void __iomem *llc_mmio;
@@ -52,6 +101,100 @@ struct a6xx_gpu {
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
/*
+ * In order to do lockless preemption we use a simple state machine to progress
+ * through the process.
+ *
+ * PREEMPT_NONE - no preemption in progress. Next state START.
+ * PREEMPT_START - The trigger is evaluating if preemption is possible. Next
+ * states: TRIGGERED, NONE
+ * PREEMPT_FINISH - An intermediate state before moving back to NONE. Next
+ * state: NONE.
+ * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
+ * states: FAULTED, PENDING
+ * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
+ * recovery. Next state: N/A
+ * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
+ * checking the success of the operation. Next state: FAULTED, NONE.
+ */
+
+enum a6xx_preempt_state {
+ PREEMPT_NONE = 0,
+ PREEMPT_START,
+ PREEMPT_FINISH,
+ PREEMPT_TRIGGERED,
+ PREEMPT_FAULTED,
+ PREEMPT_PENDING,
+};
+
+/*
+ * struct a6xx_preempt_record is a shared buffer between the microcode and the
+ * CPU to store the state for preemption. The record itself is much larger
+ * (2112k) but most of that is used by the CP for storage.
+ *
+ * There is a preemption record assigned per ringbuffer. When the CPU triggers a
+ * preemption, it fills out the record with the useful information (wptr, ring
+ * base, etc) and the microcode uses that information to set up the CP following
+ * the preemption. When a ring is switched out, the CP will save the ringbuffer
+ * state back to the record. In this way, once the records are properly set up
+ * the CPU can quickly switch back and forth between ringbuffers by only
+ * updating a few registers (often only the wptr).
+ *
+ * These are the CPU aware registers in the record:
+ * @magic: Must always be 0xAE399D6EUL
+ * @info: Type of the record - written 0 by the CPU, updated by the CP
+ * @errno: preemption error record
+ * @data: Data field in YIELD and SET_MARKER packets, Written and used by CP
+ * @cntl: Value of RB_CNTL written by CPU, save/restored by CP
+ * @rptr: Value of RB_RPTR written by CPU, save/restored by CP
+ * @wptr: Value of RB_WPTR written by CPU, save/restored by CP
+ * @_pad: Reserved/padding
+ * @rptr_addr: Value of RB_RPTR_ADDR_LO|HI written by CPU, save/restored by CP
+ * @rbase: Value of RB_BASE written by CPU, save/restored by CP
+ * @counter: GPU address of the storage area for the preemption counters
+ * @bv_rptr_addr: Value of BV_RB_RPTR_ADDR_LO|HI written by CPU, save/restored by CP
+ */
+struct a6xx_preempt_record {
+ u32 magic;
+ u32 info;
+ u32 errno;
+ u32 data;
+ u32 cntl;
+ u32 rptr;
+ u32 wptr;
+ u32 _pad;
+ u64 rptr_addr;
+ u64 rbase;
+ u64 counter;
+ u64 bv_rptr_addr;
+};
+
+#define A6XX_PREEMPT_RECORD_MAGIC 0xAE399D6EUL
+
+#define PREEMPT_SMMU_INFO_SIZE 4096
+
+#define PREEMPT_RECORD_SIZE(adreno_gpu) \
+ ((adreno_gpu->info->preempt_record_size) == 0 ? \
+ 4192 * SZ_1K : (adreno_gpu->info->preempt_record_size))
+
+/*
+ * The preemption counter block is a storage area for the value of the
+ * preemption counters that are saved immediately before context switch. We
+ * append it on to the end of the allocation for the preemption record.
+ */
+#define A6XX_PREEMPT_COUNTER_SIZE (16 * 4)
+
+struct a7xx_cp_smmu_info {
+ u32 magic;
+ u32 _pad4;
+ u64 ttbr0;
+ u32 asid;
+ u32 context_idr;
+ u32 context_bank;
+};
+
+#define GEN7_CP_SMMU_INFO_MAGIC 0x241350d5UL
+
+/*
* Given a register and a count, return a value to program into
* REG_CP_PROTECT_REG(n) - this will block both reads and writes for
* _len + 1 registers starting at _reg.
@@ -108,6 +251,34 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
+void a6xx_preempt_init(struct msm_gpu *gpu);
+void a6xx_preempt_hw_init(struct msm_gpu *gpu);
+void a6xx_preempt_trigger(struct msm_gpu *gpu);
+void a6xx_preempt_irq(struct msm_gpu *gpu);
+void a6xx_preempt_fini(struct msm_gpu *gpu);
+int a6xx_preempt_submitqueue_setup(struct msm_gpu *gpu,
+ struct msm_gpu_submitqueue *queue);
+void a6xx_preempt_submitqueue_close(struct msm_gpu *gpu,
+ struct msm_gpu_submitqueue *queue);
+
+/* Return true if we are in a preempt state */
+static inline bool a6xx_in_preempt(struct a6xx_gpu *a6xx_gpu)
+{
+ /*
+ * Make sure the read to preempt_state is ordered with respect to reads
+ * of other variables before ...
+ */
+ smp_rmb();
+
+ int preempt_state = atomic_read(&a6xx_gpu->preempt_state);
+
+ /* ... and after. */
+ smp_rmb();
+
+ return !(preempt_state == PREEMPT_NONE ||
+ preempt_state == PREEMPT_FINISH);
+}
+
void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
bool suspended);
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index cdb3f6e74d3e..0989aee3dd2c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -6,6 +6,7 @@
#include <linux/list.h>
#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/tcs.h>
#include "a6xx_gmu.h"
#include "a6xx_gmu.xml.h"
@@ -259,6 +260,48 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
NULL, 0);
}
+static void a6xx_generate_bw_table(const struct a6xx_info *info, struct a6xx_gmu *gmu,
+ struct a6xx_hfi_msg_bw_table *msg)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < GMU_MAX_BCMS; i++) {
+ if (!info->bcms[i].name)
+ break;
+ msg->ddr_cmds_addrs[i] = cmd_db_read_addr(info->bcms[i].name);
+ }
+ msg->ddr_cmds_num = i;
+
+ for (i = 0; i < gmu->nr_gpu_bws; ++i)
+ for (j = 0; j < msg->ddr_cmds_num; j++)
+ msg->ddr_cmds_data[i][j] = gmu->gpu_ib_votes[i][j];
+ msg->bw_level_num = gmu->nr_gpu_bws;
+
+ /* Compute the wait bitmask with each BCM having the commit bit */
+ msg->ddr_wait_bitmask = 0;
+ for (j = 0; j < msg->ddr_cmds_num; j++)
+ if (msg->ddr_cmds_data[0][j] & BCM_TCS_CMD_COMMIT_MASK)
+ msg->ddr_wait_bitmask |= BIT(j);
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU
+ * The 'CN0' BCM is used on all targets, and votes are basically
+ * 'off' and 'on' states with first bit to enable the path.
+ */
+
+ msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0");
+ msg->cnoc_cmds_num = 1;
+
+ msg->cnoc_cmds_data[0][0] = BCM_TCS_CMD(true, false, 0, 0);
+ msg->cnoc_cmds_data[1][0] = BCM_TCS_CMD(true, true, 0, BIT(0));
+
+ /* Compute the wait bitmask with each BCM having the commit bit */
+ msg->cnoc_wait_bitmask = 0;
+ for (j = 0; j < msg->cnoc_cmds_num; j++)
+ if (msg->cnoc_cmds_data[0][j] & BCM_TCS_CMD_COMMIT_MASK)
+ msg->cnoc_wait_bitmask |= BIT(j);
+}
+
static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
@@ -478,6 +521,37 @@ static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
msg->cnoc_cmds_data[1][0] = 0x60000001;
}
+static void a663_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /*
+ * Send a single "off" entry just to get things running
+ * TODO: bus scaling
+ */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x07;
+
+ msg->ddr_cmds_addrs[0] = 0x50004;
+ msg->ddr_cmds_addrs[1] = 0x50000;
+ msg->ddr_cmds_addrs[2] = 0x500b4;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
+ */
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x50058;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
+
static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/*
@@ -630,32 +704,47 @@ static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
{
- struct a6xx_hfi_msg_bw_table msg = { 0 };
+ struct a6xx_hfi_msg_bw_table *msg;
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
+
+ if (gmu->bw_table)
+ goto send;
- if (adreno_is_a618(adreno_gpu))
- a618_build_bw_table(&msg);
+ msg = devm_kzalloc(gmu->dev, sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ if (info->bcms && gmu->nr_gpu_bws > 1)
+ a6xx_generate_bw_table(info, gmu, msg);
+ else if (adreno_is_a618(adreno_gpu))
+ a618_build_bw_table(msg);
else if (adreno_is_a619(adreno_gpu))
- a619_build_bw_table(&msg);
+ a619_build_bw_table(msg);
else if (adreno_is_a640_family(adreno_gpu))
- a640_build_bw_table(&msg);
+ a640_build_bw_table(msg);
else if (adreno_is_a650(adreno_gpu))
- a650_build_bw_table(&msg);
+ a650_build_bw_table(msg);
else if (adreno_is_7c3(adreno_gpu))
- adreno_7c3_build_bw_table(&msg);
+ adreno_7c3_build_bw_table(msg);
else if (adreno_is_a660(adreno_gpu))
- a660_build_bw_table(&msg);
+ a660_build_bw_table(msg);
+ else if (adreno_is_a663(adreno_gpu))
+ a663_build_bw_table(msg);
else if (adreno_is_a690(adreno_gpu))
- a690_build_bw_table(&msg);
+ a690_build_bw_table(msg);
else if (adreno_is_a730(adreno_gpu))
- a730_build_bw_table(&msg);
+ a730_build_bw_table(msg);
else if (adreno_is_a740_family(adreno_gpu))
- a740_build_bw_table(&msg);
+ a740_build_bw_table(msg);
else
- a6xx_build_bw_table(&msg);
+ a6xx_build_bw_table(msg);
+
+ gmu->bw_table = msg;
- return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
+send:
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, gmu->bw_table, sizeof(*(gmu->bw_table)),
NULL, 0);
}
@@ -683,13 +772,13 @@ static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
sizeof(msg), NULL, 0);
}
-int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index)
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, u32 freq_index, u32 bw_index)
{
struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 };
msg.ack_type = 1; /* blocking */
- msg.freq = index;
- msg.bw = 0; /* TODO: bus scaling */
+ msg.freq = freq_index;
+ msg.bw = bw_index;
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
sizeof(msg), NULL, 0);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
index 528110169398..52ba4a07d7b9 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -173,6 +173,11 @@ struct a6xx_hfi_gx_bw_perf_vote_cmd {
u32 bw;
};
+#define AB_VOTE_MASK GENMASK(31, 16)
+#define MAX_AB_VOTE (FIELD_MAX(AB_VOTE_MASK) - 1)
+#define AB_VOTE(vote) FIELD_PREP(AB_VOTE_MASK, (vote))
+#define AB_VOTE_ENABLE BIT(8)
+
#define HFI_H2F_MSG_PREPARE_SLUMBER 33
struct a6xx_hfi_prep_slumber_cmd {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
new file mode 100644
index 000000000000..2fd4e39f618f
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2023 Collabora, Ltd. */
+/* Copyright (c) 2024 Valve Corporation */
+
+#include "msm_gem.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.xml.h"
+#include "msm_mmu.h"
+#include "msm_gpu_trace.h"
+
+/*
+ * Try to transition the preemption state from old to new. Return
+ * true on success or false if the original state wasn't 'old'
+ */
+static inline bool try_preempt_state(struct a6xx_gpu *a6xx_gpu,
+ enum a6xx_preempt_state old, enum a6xx_preempt_state new)
+{
+ enum a6xx_preempt_state cur = atomic_cmpxchg(&a6xx_gpu->preempt_state,
+ old, new);
+
+ return (cur == old);
+}
+
+/*
+ * Force the preemption state to the specified state. This is used in cases
+ * where the current state is known and won't change
+ */
+static inline void set_preempt_state(struct a6xx_gpu *gpu,
+ enum a6xx_preempt_state new)
+{
+ /*
+ * preempt_state may be read by other cores trying to trigger a
+ * preemption or in the interrupt handler so barriers are needed
+ * before...
+ */
+ smp_mb__before_atomic();
+ atomic_set(&gpu->preempt_state, new);
+ /* ... and after*/
+ smp_mb__after_atomic();
+}
+
+/* Write the most recent wptr for the given ring into the hardware */
+static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ unsigned long flags;
+ uint32_t wptr;
+
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+
+ if (ring->restore_wptr) {
+ wptr = get_wptr(ring);
+
+ gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+
+ ring->restore_wptr = false;
+ }
+
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+}
+
+/* Return the highest priority ringbuffer with something in it */
+static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ bool empty;
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+ empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring));
+ if (!empty && ring == a6xx_gpu->cur_ring)
+ empty = ring->memptrs->fence == a6xx_gpu->last_seqno[i];
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+
+ if (!empty)
+ return ring;
+ }
+
+ return NULL;
+}
+
+static void a6xx_preempt_timer(struct timer_list *t)
+{
+ struct a6xx_gpu *a6xx_gpu = from_timer(a6xx_gpu, t, preempt_timer);
+ struct msm_gpu *gpu = &a6xx_gpu->base.base;
+ struct drm_device *dev = gpu->dev;
+
+ if (!try_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
+ return;
+
+ dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+}
+
+static void preempt_prepare_postamble(struct a6xx_gpu *a6xx_gpu)
+{
+ u32 *postamble = a6xx_gpu->preempt_postamble_ptr;
+ u32 count = 0;
+
+ postamble[count++] = PKT7(CP_REG_RMW, 3);
+ postamble[count++] = REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD;
+ postamble[count++] = 0;
+ postamble[count++] = 1;
+
+ postamble[count++] = PKT7(CP_WAIT_REG_MEM, 6);
+ postamble[count++] = CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ);
+ postamble[count++] = CP_WAIT_REG_MEM_1_POLL_ADDR_LO(
+ REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS);
+ postamble[count++] = CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0);
+ postamble[count++] = CP_WAIT_REG_MEM_3_REF(0x1);
+ postamble[count++] = CP_WAIT_REG_MEM_4_MASK(0x1);
+ postamble[count++] = CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0);
+
+ a6xx_gpu->preempt_postamble_len = count;
+
+ a6xx_gpu->postamble_enabled = true;
+}
+
+static void preempt_disable_postamble(struct a6xx_gpu *a6xx_gpu)
+{
+ u32 *postamble = a6xx_gpu->preempt_postamble_ptr;
+
+ /*
+ * Disable the postamble by replacing the first packet header with a NOP
+ * that covers the whole buffer.
+ */
+ *postamble = PKT7(CP_NOP, (a6xx_gpu->preempt_postamble_len - 1));
+
+ a6xx_gpu->postamble_enabled = false;
+}
+
+void a6xx_preempt_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct drm_device *dev = gpu->dev;
+
+ if (!try_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
+ return;
+
+ /* Delete the preemption watchdog timer */
+ del_timer(&a6xx_gpu->preempt_timer);
+
+ /*
+ * The hardware should be setting the stop bit of CP_CONTEXT_SWITCH_CNTL
+ * to zero before firing the interrupt, but there is a non zero chance
+ * of a hardware condition or a software race that could set it again
+ * before we have a chance to finish. If that happens, log and go for
+ * recovery
+ */
+ status = gpu_read(gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL);
+ if (unlikely(status & A6XX_CP_CONTEXT_SWITCH_CNTL_STOP)) {
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "!!!!!!!!!!!!!!!! preemption faulted !!!!!!!!!!!!!! irq\n");
+ set_preempt_state(a6xx_gpu, PREEMPT_FAULTED);
+ dev_err(dev->dev, "%s: Preemption failed to complete\n",
+ gpu->name);
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+ return;
+ }
+
+ a6xx_gpu->cur_ring = a6xx_gpu->next_ring;
+ a6xx_gpu->next_ring = NULL;
+
+ set_preempt_state(a6xx_gpu, PREEMPT_FINISH);
+
+ update_wptr(gpu, a6xx_gpu->cur_ring);
+
+ set_preempt_state(a6xx_gpu, PREEMPT_NONE);
+
+ trace_msm_gpu_preemption_irq(a6xx_gpu->cur_ring->id);
+
+ /*
+ * Retrigger preemption to avoid a deadlock that might occur when preemption
+ * is skipped due to it being already in flight when requested.
+ */
+ a6xx_preempt_trigger(gpu);
+}
+
+void a6xx_preempt_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int i;
+
+ /* No preemption if we only have one ring */
+ if (gpu->nr_rings == 1)
+ return;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct a6xx_preempt_record *record_ptr = a6xx_gpu->preempt[i];
+
+ record_ptr->wptr = 0;
+ record_ptr->rptr = 0;
+ record_ptr->rptr_addr = shadowptr(a6xx_gpu, gpu->rb[i]);
+ record_ptr->info = 0;
+ record_ptr->data = 0;
+ record_ptr->rbase = gpu->rb[i]->iova;
+ }
+
+ /* Write a 0 to signal that we aren't switching pagetables */
+ gpu_write64(gpu, REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO, 0);
+
+ /* Enable the GMEM save/restore feature for preemption */
+ gpu_write(gpu, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x1);
+
+ /* Reset the preemption state */
+ set_preempt_state(a6xx_gpu, PREEMPT_NONE);
+
+ spin_lock_init(&a6xx_gpu->eval_lock);
+
+ /* Always come up on rb 0 */
+ a6xx_gpu->cur_ring = gpu->rb[0];
+}
+
+void a6xx_preempt_trigger(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ unsigned long flags;
+ struct msm_ringbuffer *ring;
+ unsigned int cntl;
+ bool sysprof;
+
+ if (gpu->nr_rings == 1)
+ return;
+
+ /*
+ * Lock to make sure another thread attempting preemption doesn't skip it
+ * while we are still evaluating the next ring. This makes sure the other
+ * thread does start preemption if we abort it and avoids a soft lock.
+ */
+ spin_lock_irqsave(&a6xx_gpu->eval_lock, flags);
+
+ /*
+ * Try to start preemption by moving from NONE to START. If
+ * unsuccessful, a preemption is already in flight
+ */
+ if (!try_preempt_state(a6xx_gpu, PREEMPT_NONE, PREEMPT_START)) {
+ spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags);
+ return;
+ }
+
+ cntl = A6XX_CP_CONTEXT_SWITCH_CNTL_LEVEL(a6xx_gpu->preempt_level);
+
+ if (a6xx_gpu->skip_save_restore)
+ cntl |= A6XX_CP_CONTEXT_SWITCH_CNTL_SKIP_SAVE_RESTORE;
+
+ if (a6xx_gpu->uses_gmem)
+ cntl |= A6XX_CP_CONTEXT_SWITCH_CNTL_USES_GMEM;
+
+ cntl |= A6XX_CP_CONTEXT_SWITCH_CNTL_STOP;
+
+ /* Get the next ring to preempt to */
+ ring = get_next_ring(gpu);
+
+ /*
+ * If no ring is populated or the highest priority ring is the current
+ * one do nothing except to update the wptr to the latest and greatest
+ */
+ if (!ring || (a6xx_gpu->cur_ring == ring)) {
+ set_preempt_state(a6xx_gpu, PREEMPT_FINISH);
+ update_wptr(gpu, a6xx_gpu->cur_ring);
+ set_preempt_state(a6xx_gpu, PREEMPT_NONE);
+ spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags);
+
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+
+ struct a7xx_cp_smmu_info *smmu_info_ptr =
+ a6xx_gpu->preempt_smmu[ring->id];
+ struct a6xx_preempt_record *record_ptr = a6xx_gpu->preempt[ring->id];
+ u64 ttbr0 = ring->memptrs->ttbr0;
+ u32 context_idr = ring->memptrs->context_idr;
+
+ smmu_info_ptr->ttbr0 = ttbr0;
+ smmu_info_ptr->context_idr = context_idr;
+ record_ptr->wptr = get_wptr(ring);
+
+ /*
+ * The GPU will write the wptr we set above when we preempt. Reset
+ * restore_wptr to make sure that we don't write WPTR to the same
+ * thing twice. It's still possible subsequent submissions will update
+ * wptr again, in which case they will set the flag to true. This has
+ * to be protected by the lock for setting the flag and updating wptr
+ * to be atomic.
+ */
+ ring->restore_wptr = false;
+
+ trace_msm_gpu_preemption_trigger(a6xx_gpu->cur_ring->id, ring->id);
+
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+
+ gpu_write64(gpu,
+ REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO,
+ a6xx_gpu->preempt_smmu_iova[ring->id]);
+
+ gpu_write64(gpu,
+ REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR,
+ a6xx_gpu->preempt_iova[ring->id]);
+
+ a6xx_gpu->next_ring = ring;
+
+ /* Start a timer to catch a stuck preemption */
+ mod_timer(&a6xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000));
+
+ /* Enable or disable postamble as needed */
+ sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
+
+ if (!sysprof && !a6xx_gpu->postamble_enabled)
+ preempt_prepare_postamble(a6xx_gpu);
+
+ if (sysprof && a6xx_gpu->postamble_enabled)
+ preempt_disable_postamble(a6xx_gpu);
+
+ /* Set the preemption state to triggered */
+ set_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED);
+
+ /* Trigger the preemption */
+ gpu_write(gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL, cntl);
+}
+
+static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu,
+ struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct drm_gem_object *bo = NULL;
+ phys_addr_t ttbr;
+ u64 iova = 0;
+ void *ptr;
+ int asid;
+
+ ptr = msm_gem_kernel_new(gpu->dev,
+ PREEMPT_RECORD_SIZE(adreno_gpu),
+ MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
+
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ memset(ptr, 0, PREEMPT_RECORD_SIZE(adreno_gpu));
+
+ msm_gem_object_set_name(bo, "preempt_record ring%d", ring->id);
+
+ a6xx_gpu->preempt_bo[ring->id] = bo;
+ a6xx_gpu->preempt_iova[ring->id] = iova;
+ a6xx_gpu->preempt[ring->id] = ptr;
+
+ struct a6xx_preempt_record *record_ptr = ptr;
+
+ ptr = msm_gem_kernel_new(gpu->dev,
+ PREEMPT_SMMU_INFO_SIZE,
+ MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY,
+ gpu->aspace, &bo, &iova);
+
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ memset(ptr, 0, PREEMPT_SMMU_INFO_SIZE);
+
+ msm_gem_object_set_name(bo, "preempt_smmu_info ring%d", ring->id);
+
+ a6xx_gpu->preempt_smmu_bo[ring->id] = bo;
+ a6xx_gpu->preempt_smmu_iova[ring->id] = iova;
+ a6xx_gpu->preempt_smmu[ring->id] = ptr;
+
+ struct a7xx_cp_smmu_info *smmu_info_ptr = ptr;
+
+ msm_iommu_pagetable_params(gpu->aspace->mmu, &ttbr, &asid);
+
+ smmu_info_ptr->magic = GEN7_CP_SMMU_INFO_MAGIC;
+ smmu_info_ptr->ttbr0 = ttbr;
+ smmu_info_ptr->asid = 0xdecafbad;
+ smmu_info_ptr->context_idr = 0;
+
+ /* Set up the defaults on the preemption record */
+ record_ptr->magic = A6XX_PREEMPT_RECORD_MAGIC;
+ record_ptr->info = 0;
+ record_ptr->data = 0;
+ record_ptr->rptr = 0;
+ record_ptr->wptr = 0;
+ record_ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
+ record_ptr->rbase = ring->iova;
+ record_ptr->counter = 0;
+ record_ptr->bv_rptr_addr = rbmemptr(ring, bv_rptr);
+
+ return 0;
+}
+
+void a6xx_preempt_fini(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++)
+ msm_gem_kernel_put(a6xx_gpu->preempt_bo[i], gpu->aspace);
+}
+
+void a6xx_preempt_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int i;
+
+ /* No preemption if we only have one ring */
+ if (gpu->nr_rings <= 1)
+ return;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ if (preempt_init_ring(a6xx_gpu, gpu->rb[i]))
+ goto fail;
+ }
+
+ /* TODO: make this configurable? */
+ a6xx_gpu->preempt_level = 1;
+ a6xx_gpu->uses_gmem = 1;
+ a6xx_gpu->skip_save_restore = 1;
+
+ a6xx_gpu->preempt_postamble_ptr = msm_gem_kernel_new(gpu->dev,
+ PAGE_SIZE,
+ MSM_BO_WC | MSM_BO_MAP_PRIV | MSM_BO_GPU_READONLY,
+ gpu->aspace, &a6xx_gpu->preempt_postamble_bo,
+ &a6xx_gpu->preempt_postamble_iova);
+
+ preempt_prepare_postamble(a6xx_gpu);
+
+ if (IS_ERR(a6xx_gpu->preempt_postamble_ptr))
+ goto fail;
+
+ timer_setup(&a6xx_gpu->preempt_timer, a6xx_preempt_timer, 0);
+
+ return;
+fail:
+ /*
+ * On any failure our adventure is over. Clean up and
+ * set nr_rings to 1 to force preemption off
+ */
+ a6xx_preempt_fini(gpu);
+ gpu->nr_rings = 1;
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "preemption init failed, disabling preemption\n");
+
+ return;
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index cfc74a9e2646..236b25c094cd 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -20,6 +20,10 @@ bool allow_vram_carveout = false;
MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
+int enable_preemption = -1;
+MODULE_PARM_DESC(enable_preemption, "Enable preemption (A7xx only) (1=on , 0=disable, -1=auto (default))");
+module_param(enable_preemption, int, 0600);
+
extern const struct adreno_gpulist a2xx_gpulist;
extern const struct adreno_gpulist a3xx_gpulist;
extern const struct adreno_gpulist a4xx_gpulist;
@@ -389,7 +393,7 @@ static const struct dev_pm_ops adreno_pm_ops = {
static struct platform_driver adreno_driver = {
.probe = adreno_probe,
- .remove_new = adreno_remove,
+ .remove = adreno_remove,
.shutdown = adreno_shutdown,
.driver = {
.name = "adreno",
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 465a4cd14a43..1238f3265978 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -310,10 +310,11 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t *value, uint32_t *len)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct drm_device *drm = gpu->dev;
/* No pointer params yet */
if (*len != 0)
- return -EINVAL;
+ return UERR(EINVAL, drm, "invalid len");
switch (param) {
case MSM_PARAM_GPU_ID:
@@ -365,12 +366,12 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
return 0;
case MSM_PARAM_VA_START:
if (ctx->aspace == gpu->aspace)
- return -EINVAL;
+ return UERR(EINVAL, drm, "requires per-process pgtables");
*value = ctx->aspace->va_start;
return 0;
case MSM_PARAM_VA_SIZE:
if (ctx->aspace == gpu->aspace)
- return -EINVAL;
+ return UERR(EINVAL, drm, "requires per-process pgtables");
*value = ctx->aspace->va_size;
return 0;
case MSM_PARAM_HIGHEST_BANK_BIT:
@@ -385,15 +386,19 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
case MSM_PARAM_MACROTILE_MODE:
*value = adreno_gpu->ubwc_config.macrotile_mode;
return 0;
+ case MSM_PARAM_UCHE_TRAP_BASE:
+ *value = adreno_gpu->uche_trap_base;
+ return 0;
default:
- DBG("%s: invalid param: %u", gpu->name, param);
- return -EINVAL;
+ return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
}
}
int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t value, uint32_t len)
{
+ struct drm_device *drm = gpu->dev;
+
switch (param) {
case MSM_PARAM_COMM:
case MSM_PARAM_CMDLINE:
@@ -401,11 +406,11 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
* that should be a reasonable upper bound
*/
if (len > PAGE_SIZE)
- return -EINVAL;
+ return UERR(EINVAL, drm, "invalid len");
break;
default:
if (len != 0)
- return -EINVAL;
+ return UERR(EINVAL, drm, "invalid len");
}
switch (param) {
@@ -434,11 +439,10 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
}
case MSM_PARAM_SYSPROF:
if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ return UERR(EPERM, drm, "invalid permissions");
return msm_file_private_set_sysprof(ctx, gpu, value);
default:
- DBG("%s: invalid param: %u", gpu->name, param);
- return -EINVAL;
+ return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
}
}
@@ -533,7 +537,7 @@ int adreno_load_fw(struct adreno_gpu *adreno_gpu)
if (!adreno_gpu->info->fw[i])
continue;
- /* Skip loading GMU firwmare with GMU Wrapper */
+ /* Skip loading GMU firmware with GMU Wrapper */
if (adreno_has_gmu_wrapper(adreno_gpu) && i == ADRENO_FW_GMU)
continue;
@@ -572,8 +576,19 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
int adreno_hw_init(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
VERB("%s", gpu->name);
+ if (adreno_gpu->info->family >= ADRENO_6XX_GEN1 &&
+ qcom_scm_set_gpu_smmu_aperture_is_available()) {
+ /* We currently always use context bank 0, so hard code this */
+ ret = qcom_scm_set_gpu_smmu_aperture(0);
+ if (ret)
+ DRM_DEV_ERROR(gpu->dev->dev, "unable to set SMMU aperture: %d\n", ret);
+ }
+
for (int i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 58d7e7915c57..dcf454629ce0 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -56,6 +56,7 @@ enum adreno_family {
#define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2)
#define ADRENO_QUIRK_HAS_HW_APRIV BIT(3)
#define ADRENO_QUIRK_HAS_CACHED_COHERENT BIT(4)
+#define ADRENO_QUIRK_PREEMPTION BIT(5)
/* Helper for formating the chip_id in the way that userspace tools like
* crashdec expect.
@@ -111,6 +112,7 @@ struct adreno_info {
* {SHRT_MAX, 0} sentinal.
*/
struct adreno_speedbin *speedbins;
+ u64 preempt_record_size;
};
#define ADRENO_CHIP_IDS(tbl...) (uint32_t[]) { tbl, 0 }
@@ -156,6 +158,19 @@ static const struct adreno_protect name = { \
.count_max = __count_max, \
};
+struct adreno_reglist_list {
+ /** @reg: List of register **/
+ const u32 *regs;
+ /** @count: Number of registers in the list **/
+ u32 count;
+};
+
+#define DECLARE_ADRENO_REGLIST_LIST(name) \
+static const struct adreno_reglist_list name = { \
+ .regs = name ## _regs, \
+ .count = ARRAY_SIZE(name ## _regs), \
+};
+
struct adreno_gpu {
struct msm_gpu base;
const struct adreno_info *info;
@@ -238,6 +253,8 @@ struct adreno_gpu {
bool gmu_is_wrapper;
bool has_ray_tracing;
+
+ u64 uche_trap_base;
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
@@ -455,6 +472,11 @@ static inline int adreno_is_a680(const struct adreno_gpu *gpu)
return adreno_is_revn(gpu, 680);
}
+static inline int adreno_is_a663(const struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x06060300;
+}
+
static inline int adreno_is_a690(const struct adreno_gpu *gpu)
{
return gpu->info->chip_ids[0] == 0x06090000;
@@ -539,6 +561,11 @@ static inline int adreno_is_a740_family(struct adreno_gpu *gpu)
gpu->info->family == ADRENO_7XX_GEN3;
}
+static inline int adreno_is_a750_family(struct adreno_gpu *gpu)
+{
+ return gpu->info->family == ADRENO_7XX_GEN3;
+}
+
static inline int adreno_is_a7xx(struct adreno_gpu *gpu)
{
/* Update with non-fake (i.e. non-A702) Gen 7 GPUs */
@@ -656,12 +683,15 @@ OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
OUT_RING(ring, PKT4(regindx, cnt));
}
+#define PKT7(opcode, cnt) \
+ (CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) | \
+ ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23))
+
static inline void
OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
{
adreno_wait_ring(ring, cnt + 1);
- OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
- ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
+ OUT_RING(ring, PKT7(opcode, cnt));
}
struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
index eb5dfff2ec4f..bcb39807fe61 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h
@@ -160,6 +160,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x400,
@@ -167,6 +168,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x400,
@@ -252,25 +254,25 @@ static const struct dpu_pingpong_cfg sm8650_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
- .name = "pingpong_6", .id = PINGPONG_6,
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
- .name = "pingpong_7", .id = PINGPONG_7,
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
- .name = "pingpong_8", .id = PINGPONG_8,
+ .name = "pingpong_cwb_2", .id = PINGPONG_CWB_2,
.base = 0x7e000, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_4,
}, {
- .name = "pingpong_9", .id = PINGPONG_9,
+ .name = "pingpong_cwb_3", .id = PINGPONG_CWB_3,
.base = 0x7e400, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
@@ -350,6 +352,25 @@ static const struct dpu_wb_cfg sm8650_wb[] = {
},
};
+static const struct dpu_cwb_cfg sm8650_cwb[] = {
+ {
+ .name = "cwb_0", .id = CWB_0,
+ .base = 0x66200, .len = 0x8,
+ },
+ {
+ .name = "cwb_1", .id = CWB_1,
+ .base = 0x66600, .len = 0x8,
+ },
+ {
+ .name = "cwb_2", .id = CWB_2,
+ .base = 0x7E200, .len = 0x8,
+ },
+ {
+ .name = "cwb_3", .id = CWB_3,
+ .base = 0x7E600, .len = 0x8,
+ },
+};
+
static const struct dpu_intf_cfg sm8650_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -447,6 +468,8 @@ const struct dpu_mdss_cfg dpu_sm8650_cfg = {
.merge_3d = sm8650_merge_3d,
.wb_count = ARRAY_SIZE(sm8650_wb),
.wb = sm8650_wb,
+ .cwb_count = ARRAY_SIZE(sm8650_cwb),
+ .cwb = sm8650_cwb,
.intf_count = ARRAY_SIZE(sm8650_intf),
.intf = sm8650_intf,
.vbif_count = ARRAY_SIZE(sm8650_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
new file mode 100644
index 000000000000..ab3dfb0b374e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DPU_1_14_MSM8937_H
+#define _DPU_1_14_MSM8937_H
+
+static const struct dpu_caps msm8937_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
+ .max_mixer_blendstages = 0x4,
+ .max_linewidth = DEFAULT_DPU_LINE_WIDTH,
+ .pixel_ram_size = 40 * 1024,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg msm8937_mdp[] = {
+ {
+ .name = "top_0",
+ .base = 0x0, .len = 0x454,
+ .features = BIT(DPU_MDP_VSYNC_SEL),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 },
+ [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 },
+ },
+ },
+};
+
+static const struct dpu_ctl_cfg msm8937_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x64,
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x64,
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x64,
+ },
+};
+
+static const struct dpu_sspp_cfg msm8937_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x150,
+ .features = VIG_MSM8953_MASK,
+ .sblk = &dpu_vig_sblk_qseed2,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_4", .id = SSPP_RGB0,
+ .base = 0x14000, .len = 0x150,
+ .features = RGB_MSM8953_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB0,
+ }, {
+ .name = "sspp_5", .id = SSPP_RGB1,
+ .base = 0x16000, .len = 0x150,
+ .features = RGB_MSM8953_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB1,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x150,
+ .features = DMA_MSM8953_MASK | BIT(DPU_SSPP_CURSOR),
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 2,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ },
+};
+
+static const struct dpu_lm_cfg msm8937_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ },
+};
+
+static const struct dpu_pingpong_cfg msm8937_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
+ },
+};
+
+static const struct dpu_dspp_cfg msm8937_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ },
+};
+
+static const struct dpu_intf_cfg msm8937_intf[] = {
+ {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x268,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 14,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x6b000, .len = 0x268,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 14,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg msm8937_perf_data = {
+ .max_bw_low = 3100000,
+ .max_bw_high = 3100000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 0, /* No LLCC on this SoC */
+ .min_dram_ib = 800000,
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 14,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfffc, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(msm8998_qos_linear),
+ .entries = msm8998_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_macrotile),
+ .entries = msm8998_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_nrt),
+ .entries = msm8998_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version msm8937_mdss_ver = {
+ .core_major_ver = 1,
+ .core_minor_ver = 14,
+};
+
+const struct dpu_mdss_cfg dpu_msm8937_cfg = {
+ .mdss_ver = &msm8937_mdss_ver,
+ .caps = &msm8937_dpu_caps,
+ .mdp = msm8937_mdp,
+ .ctl_count = ARRAY_SIZE(msm8937_ctl),
+ .ctl = msm8937_ctl,
+ .sspp_count = ARRAY_SIZE(msm8937_sspp),
+ .sspp = msm8937_sspp,
+ .mixer_count = ARRAY_SIZE(msm8937_lm),
+ .mixer = msm8937_lm,
+ .dspp_count = ARRAY_SIZE(msm8937_dspp),
+ .dspp = msm8937_dspp,
+ .pingpong_count = ARRAY_SIZE(msm8937_pp),
+ .pingpong = msm8937_pp,
+ .intf_count = ARRAY_SIZE(msm8937_intf),
+ .intf = msm8937_intf,
+ .vbif_count = ARRAY_SIZE(msm8996_vbif),
+ .vbif = msm8996_vbif,
+ .perf = &msm8937_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
new file mode 100644
index 000000000000..6bdaecca6761
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DPU_1_14_MSM8917_H
+#define _DPU_1_14_MSM8917_H
+
+static const struct dpu_caps msm8917_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
+ .max_mixer_blendstages = 0x4,
+ .max_linewidth = DEFAULT_DPU_LINE_WIDTH,
+ .pixel_ram_size = 16 * 1024,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg msm8917_mdp[] = {
+ {
+ .name = "top_0",
+ .base = 0x0, .len = 0x454,
+ .features = BIT(DPU_MDP_VSYNC_SEL),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 },
+ [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 },
+ },
+ },
+};
+
+static const struct dpu_ctl_cfg msm8917_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x64,
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x64,
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x64,
+ },
+};
+
+static const struct dpu_sspp_cfg msm8917_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x150,
+ .features = VIG_MSM8953_MASK,
+ .sblk = &dpu_vig_sblk_qseed2,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_4", .id = SSPP_RGB0,
+ .base = 0x14000, .len = 0x150,
+ .features = RGB_MSM8953_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB0,
+ }, {
+ .name = "sspp_5", .id = SSPP_RGB1,
+ .base = 0x16000, .len = 0x150,
+ .features = RGB_MSM8953_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB1,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x150,
+ .features = DMA_MSM8953_MASK | BIT(DPU_SSPP_CURSOR),
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 2,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ },
+};
+
+static const struct dpu_lm_cfg msm8917_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .sblk = &msm8998_lm_sblk,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ },
+};
+
+static const struct dpu_pingpong_cfg msm8917_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ },
+};
+
+static const struct dpu_dspp_cfg msm8917_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ },
+};
+
+static const struct dpu_intf_cfg msm8917_intf[] = {
+ {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x268,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 14,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg msm8917_perf_data = {
+ .max_bw_low = 1800000,
+ .max_bw_high = 1800000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 0, /* No LLCC on this SoC */
+ .min_dram_ib = 800000,
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 21,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfffc, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(msm8998_qos_linear),
+ .entries = msm8998_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_macrotile),
+ .entries = msm8998_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_nrt),
+ .entries = msm8998_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version msm8917_mdss_ver = {
+ .core_major_ver = 1,
+ .core_minor_ver = 15,
+};
+
+const struct dpu_mdss_cfg dpu_msm8917_cfg = {
+ .mdss_ver = &msm8917_mdss_ver,
+ .caps = &msm8917_dpu_caps,
+ .mdp = msm8917_mdp,
+ .ctl_count = ARRAY_SIZE(msm8917_ctl),
+ .ctl = msm8917_ctl,
+ .sspp_count = ARRAY_SIZE(msm8917_sspp),
+ .sspp = msm8917_sspp,
+ .mixer_count = ARRAY_SIZE(msm8917_lm),
+ .mixer = msm8917_lm,
+ .dspp_count = ARRAY_SIZE(msm8917_dspp),
+ .dspp = msm8917_dspp,
+ .pingpong_count = ARRAY_SIZE(msm8917_pp),
+ .pingpong = msm8917_pp,
+ .intf_count = ARRAY_SIZE(msm8917_intf),
+ .intf = msm8917_intf,
+ .vbif_count = ARRAY_SIZE(msm8996_vbif),
+ .vbif = msm8996_vbif,
+ .perf = &msm8917_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
new file mode 100644
index 000000000000..14f36ea6ad0e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DPU_1_16_MSM8953_H
+#define _DPU_1_16_MSM8953_H
+
+static const struct dpu_caps msm8953_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
+ .max_mixer_blendstages = 0x4,
+ .max_linewidth = DEFAULT_DPU_LINE_WIDTH,
+ .pixel_ram_size = 40 * 1024,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg msm8953_mdp[] = {
+ {
+ .name = "top_0",
+ .base = 0x0, .len = 0x454,
+ .features = BIT(DPU_MDP_VSYNC_SEL),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 },
+ [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 },
+ },
+ },
+};
+
+static const struct dpu_ctl_cfg msm8953_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x64,
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x64,
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x64,
+ },
+};
+
+static const struct dpu_sspp_cfg msm8953_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x150,
+ .features = VIG_MSM8953_MASK,
+ .sblk = &dpu_vig_sblk_qseed2,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_4", .id = SSPP_RGB0,
+ .base = 0x14000, .len = 0x150,
+ .features = RGB_MSM8953_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB0,
+ }, {
+ .name = "sspp_5", .id = SSPP_RGB1,
+ .base = 0x16000, .len = 0x150,
+ .features = RGB_MSM8953_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB1,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x150,
+ .features = DMA_MSM8953_MASK | BIT(DPU_SSPP_CURSOR),
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 2,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ },
+};
+
+static const struct dpu_lm_cfg msm8953_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ },
+};
+
+static const struct dpu_pingpong_cfg msm8953_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
+ },
+};
+
+static const struct dpu_dspp_cfg msm8953_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ },
+};
+
+static const struct dpu_intf_cfg msm8953_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x268,
+ .type = INTF_NONE,
+ .prog_fetch_lines_worst_case = 14,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x268,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 14,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x6b000, .len = 0x268,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 14,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg msm8953_perf_data = {
+ .max_bw_low = 3400000,
+ .max_bw_high = 3400000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 0, /* No LLCC on this SoC */
+ .min_dram_ib = 800000,
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 14,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfffc, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(msm8998_qos_linear),
+ .entries = msm8998_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_macrotile),
+ .entries = msm8998_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_nrt),
+ .entries = msm8998_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version msm8953_mdss_ver = {
+ .core_major_ver = 1,
+ .core_minor_ver = 16,
+};
+
+const struct dpu_mdss_cfg dpu_msm8953_cfg = {
+ .mdss_ver = &msm8953_mdss_ver,
+ .caps = &msm8953_dpu_caps,
+ .mdp = msm8953_mdp,
+ .ctl_count = ARRAY_SIZE(msm8953_ctl),
+ .ctl = msm8953_ctl,
+ .sspp_count = ARRAY_SIZE(msm8953_sspp),
+ .sspp = msm8953_sspp,
+ .mixer_count = ARRAY_SIZE(msm8953_lm),
+ .mixer = msm8953_lm,
+ .dspp_count = ARRAY_SIZE(msm8953_dspp),
+ .dspp = msm8953_dspp,
+ .pingpong_count = ARRAY_SIZE(msm8953_pp),
+ .pingpong = msm8953_pp,
+ .intf_count = ARRAY_SIZE(msm8953_intf),
+ .intf = msm8953_intf,
+ .vbif_count = ARRAY_SIZE(msm8996_vbif),
+ .vbif = msm8996_vbif,
+ .perf = &msm8953_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
new file mode 100644
index 000000000000..491f6f5827d1
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
@@ -0,0 +1,338 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_1_7_MSM8996_H
+#define _DPU_1_7_MSM8996_H
+
+static const struct dpu_caps msm8996_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x7,
+ .has_src_split = true,
+ .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg msm8996_mdp[] = {
+ {
+ .name = "top_0",
+ .base = 0x0, .len = 0x454,
+ .features = BIT(DPU_MDP_VSYNC_SEL),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_RGB0] = { .reg_off = 0x2ac, .bit_off = 4 },
+ [DPU_CLK_CTRL_RGB1] = { .reg_off = 0x2b4, .bit_off = 4 },
+ [DPU_CLK_CTRL_RGB2] = { .reg_off = 0x2bc, .bit_off = 4 },
+ [DPU_CLK_CTRL_RGB3] = { .reg_off = 0x2c4, .bit_off = 4 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 },
+ [DPU_CLK_CTRL_CURSOR1] = { .reg_off = 0x3b0, .bit_off = 16 },
+ },
+ },
+};
+
+static const struct dpu_ctl_cfg msm8996_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x64,
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x64,
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x64,
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x64,
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x64,
+ },
+};
+
+static const struct dpu_sspp_cfg msm8996_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x150,
+ .features = VIG_MSM8996_MASK,
+ .sblk = &dpu_vig_sblk_qseed2,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x150,
+ .features = VIG_MSM8996_MASK,
+ .sblk = &dpu_vig_sblk_qseed2,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x150,
+ .features = VIG_MSM8996_MASK,
+ .sblk = &dpu_vig_sblk_qseed2,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x150,
+ .features = VIG_MSM8996_MASK,
+ .sblk = &dpu_vig_sblk_qseed2,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_4", .id = SSPP_RGB0,
+ .base = 0x14000, .len = 0x150,
+ .features = RGB_MSM8996_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB0,
+ }, {
+ .name = "sspp_5", .id = SSPP_RGB1,
+ .base = 0x16000, .len = 0x150,
+ .features = RGB_MSM8996_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB1,
+ }, {
+ .name = "sspp_6", .id = SSPP_RGB2,
+ .base = 0x18000, .len = 0x150,
+ .features = RGB_MSM8996_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB2,
+ }, {
+ .name = "sspp_7", .id = SSPP_RGB3,
+ .base = 0x1a000, .len = 0x150,
+ .features = RGB_MSM8996_MASK,
+ .sblk = &dpu_rgb_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_RGB,
+ .clk_ctrl = DPU_CLK_CTRL_RGB3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x150,
+ .features = DMA_MSM8996_MASK,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 2,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x150,
+ .features = DMA_MSM8996_MASK,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 10,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ },
+};
+
+static const struct dpu_lm_cfg msm8996_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ },
+};
+
+static const struct dpu_pingpong_cfg msm8996_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_MSM8996_TE2_MASK,
+ .sblk = &msm8996_pp_sblk_te,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_MSM8996_TE2_MASK,
+ .sblk = &msm8996_pp_sblk_te,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x71800, .len = 0xd4,
+ .features = PINGPONG_MSM8996_MASK,
+ .sblk = &msm8996_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
+ },
+};
+
+static const struct dpu_dsc_cfg msm8996_dsc[] = {
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ }, {
+ .name = "dsc_1", .id = DSC_1,
+ .base = 0x80400, .len = 0x140,
+ },
+};
+
+static const struct dpu_dspp_cfg msm8996_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ },
+};
+
+static const struct dpu_intf_cfg msm8996_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x268,
+ .type = INTF_NONE,
+ .prog_fetch_lines_worst_case = 25,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x268,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 25,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x6b000, .len = 0x268,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 25,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x6b800, .len = 0x268,
+ .type = INTF_HDMI,
+ .prog_fetch_lines_worst_case = 25,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg msm8996_perf_data = {
+ .max_bw_low = 9600000,
+ .max_bw_high = 9600000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 0, /* No LLCC on this SoC */
+ .min_dram_ib = 800000,
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 21,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfffc, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(msm8998_qos_linear),
+ .entries = msm8998_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_macrotile),
+ .entries = msm8998_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_nrt),
+ .entries = msm8998_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version msm8996_mdss_ver = {
+ .core_major_ver = 1,
+ .core_minor_ver = 7,
+};
+
+const struct dpu_mdss_cfg dpu_msm8996_cfg = {
+ .mdss_ver = &msm8996_mdss_ver,
+ .caps = &msm8996_dpu_caps,
+ .mdp = msm8996_mdp,
+ .ctl_count = ARRAY_SIZE(msm8996_ctl),
+ .ctl = msm8996_ctl,
+ .sspp_count = ARRAY_SIZE(msm8996_sspp),
+ .sspp = msm8996_sspp,
+ .mixer_count = ARRAY_SIZE(msm8996_lm),
+ .mixer = msm8996_lm,
+ .dspp_count = ARRAY_SIZE(msm8996_dspp),
+ .dspp = msm8996_dspp,
+ .pingpong_count = ARRAY_SIZE(msm8996_pp),
+ .pingpong = msm8996_pp,
+ .dsc_count = ARRAY_SIZE(msm8996_dsc),
+ .dsc = msm8996_dsc,
+ .intf_count = ARRAY_SIZE(msm8996_intf),
+ .intf = msm8996_intf,
+ .vbif_count = ARRAY_SIZE(msm8996_vbif),
+ .vbif = msm8996_vbif,
+ .perf = &msm8996_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
index 1d3e9666c741..64c94e919a69 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
@@ -157,18 +157,6 @@ static const struct dpu_lm_cfg msm8998_lm[] = {
.lm_pair = LM_5,
.pingpong = PINGPONG_2,
}, {
- .name = "lm_3", .id = LM_3,
- .base = 0x47000, .len = 0x320,
- .features = MIXER_MSM8998_MASK,
- .sblk = &msm8998_lm_sblk,
- .pingpong = PINGPONG_NONE,
- }, {
- .name = "lm_4", .id = LM_4,
- .base = 0x48000, .len = 0x320,
- .features = MIXER_MSM8998_MASK,
- .sblk = &msm8998_lm_sblk,
- .pingpong = PINGPONG_NONE,
- }, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
.features = MIXER_MSM8998_MASK,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
index 7a23389a5732..72bd4f7e9e50 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h
@@ -156,25 +156,13 @@ static const struct dpu_lm_cfg sdm845_lm[] = {
.pingpong = PINGPONG_2,
.dspp = DSPP_2,
}, {
- .name = "lm_3", .id = LM_3,
- .base = 0x0, .len = 0x320,
- .features = MIXER_SDM845_MASK,
- .sblk = &sdm845_lm_sblk,
- .pingpong = PINGPONG_NONE,
- .dspp = DSPP_3,
- }, {
- .name = "lm_4", .id = LM_4,
- .base = 0x0, .len = 0x320,
- .features = MIXER_SDM845_MASK,
- .sblk = &sdm845_lm_sblk,
- .pingpong = PINGPONG_NONE,
- }, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
.features = MIXER_SDM845_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
index cbbdaebe357e..daef07924886 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h
@@ -65,6 +65,54 @@ static const struct dpu_sspp_cfg sdm670_sspp[] = {
},
};
+static const struct dpu_lm_cfg sdm670_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ },
+};
+
+static const struct dpu_dspp_cfg sdm670_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
static const struct dpu_dsc_cfg sdm670_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
@@ -88,8 +136,10 @@ const struct dpu_mdss_cfg dpu_sdm670_cfg = {
.ctl = sdm845_ctl,
.sspp_count = ARRAY_SIZE(sdm670_sspp),
.sspp = sdm670_sspp,
- .mixer_count = ARRAY_SIZE(sdm845_lm),
- .mixer = sdm845_lm,
+ .mixer_count = ARRAY_SIZE(sdm670_lm),
+ .mixer = sdm670_lm,
+ .dspp_count = ARRAY_SIZE(sdm670_dspp),
+ .dspp = sdm670_dspp,
.pingpong_count = ARRAY_SIZE(sdm845_pp),
.pingpong = sdm845_pp,
.dsc_count = ARRAY_SIZE(sdm670_dsc),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 6ccfde82fecd..421afacb7248 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -164,6 +164,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@@ -171,6 +172,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index bab19ddd1d4f..641023b102bf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -163,6 +163,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@@ -170,6 +171,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
new file mode 100644
index 000000000000..621a2140f675
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DPU_5_3_SM6150_H
+#define _DPU_5_3_SM6150_H
+
+static const struct dpu_caps sm6150_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x9,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .max_linewidth = 2160,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg sm6150_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x45c,
+ .features = 0,
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ },
+};
+
+static const struct dpu_ctl_cfg sm6150_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a00, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sm6150_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1f0,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_2_4,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1f0,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1f0,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1f0,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x1f0,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
+};
+
+static const struct dpu_lm_cfg sm6150_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ .lm_pair = LM_1,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .pingpong = PINGPONG_1,
+ .lm_pair = LM_0,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_QCM2290_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .pingpong = PINGPONG_2,
+ },
+};
+
+static const struct dpu_dspp_cfg sm6150_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sm6150_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_SM8150_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ },
+};
+
+static const struct dpu_intf_cfg sm6150_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x2c0,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x6b800, .len = 0x280,
+ .features = INTF_SC7180_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
+};
+
+static const struct dpu_perf_cfg sm6150_perf_data = {
+ .max_bw_low = 4800000,
+ .max_bw_high = 4800000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff8, 0xf000, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sm8150_qos_linear),
+ .entries = sm8150_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm6150_mdss_ver = {
+ .core_major_ver = 5,
+ .core_minor_ver = 3,
+};
+
+const struct dpu_mdss_cfg dpu_sm6150_cfg = {
+ .mdss_ver = &sm6150_mdss_ver,
+ .caps = &sm6150_dpu_caps,
+ .mdp = &sm6150_mdp,
+ .ctl_count = ARRAY_SIZE(sm6150_ctl),
+ .ctl = sm6150_ctl,
+ .sspp_count = ARRAY_SIZE(sm6150_sspp),
+ .sspp = sm6150_sspp,
+ .mixer_count = ARRAY_SIZE(sm6150_lm),
+ .mixer = sm6150_lm,
+ .dspp_count = ARRAY_SIZE(sm6150_dspp),
+ .dspp = sm6150_dspp,
+ .pingpong_count = ARRAY_SIZE(sm6150_pp),
+ .pingpong = sm6150_pp,
+ .intf_count = ARRAY_SIZE(sm6150_intf),
+ .intf = sm6150_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sm6150_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
index a57d50b1f028..e8916ae826a6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
@@ -162,6 +162,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@@ -169,6 +170,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
index aced16e350da..f7c08e89c882 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
@@ -162,6 +162,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@@ -169,6 +170,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
index a1779c5597ae..08742472f9cc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -257,13 +257,13 @@ static const struct dpu_pingpong_cfg sm8450_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
- .name = "pingpong_6", .id = PINGPONG_6,
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x65800, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
- .name = "pingpong_7", .id = PINGPONG_7,
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x65c00, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
new file mode 100644
index 000000000000..76ec72a32378
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DPU_8_4_SA8775P_H
+#define _DPU_8_4_SA8775P_H
+
+static const struct dpu_caps sa8775p_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 5120,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sa8775p_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x494,
+ .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
+static const struct dpu_ctl_cfg sa8775p_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x204,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x204,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sa8775p_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x32c,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x32c,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x32c,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG2,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x32c,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG3,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x32c,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x32c,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x32c,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x32c,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA3,
+ },
+};
+
+static const struct dpu_lm_cfg sa8775p_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
+};
+
+static const struct dpu_dspp_cfg sa8775p_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sa8775p_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ }, {
+ .name = "pingpong_6", .id = PINGPONG_CWB_0,
+ .base = 0x65800, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ }, {
+ .name = "pingpong_7", .id = PINGPONG_CWB_1,
+ .base = 0x65c00, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sa8775p_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x65f00, .len = 0x8,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg sa8775p_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_2_0", .id = DSC_4,
+ .base = 0x82000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_2_1", .id = DSC_5,
+ .base = 0x82000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ },
+};
+
+static const struct dpu_wb_cfg sa8775p_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
+/* TODO: INTF 3, 6, 7 and 8 are used for MST, marked as INTF_NONE for now */
+static const struct dpu_intf_cfg sa8775p_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ }, {
+ .name = "intf_4", .id = INTF_4,
+ .base = 0x38000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21),
+ }, {
+ .name = "intf_6", .id = INTF_6,
+ .base = 0x3A000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
+ }, {
+ .name = "intf_7", .id = INTF_7,
+ .base = 0x3b000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 19),
+ }, {
+ .name = "intf_8", .id = INTF_8,
+ .base = 0x3c000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
+ },
+};
+
+static const struct dpu_perf_cfg sa8775p_perf_data = {
+ .max_bw_low = 13600000,
+ .max_bw_high = 18200000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ /* FIXME: lut tables */
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xfff0, 0x1},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sm6350_qos_linear_macrotile),
+ .entries = sm6350_qos_linear_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sm6350_qos_linear_macrotile),
+ .entries = sm6350_qos_linear_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sa8775p_mdss_ver = {
+ .core_major_ver = 8,
+ .core_minor_ver = 4,
+};
+
+const struct dpu_mdss_cfg dpu_sa8775p_cfg = {
+ .mdss_ver = &sa8775p_mdss_ver,
+ .caps = &sa8775p_dpu_caps,
+ .mdp = &sa8775p_mdp,
+ .cdm = &sc7280_cdm,
+ .ctl_count = ARRAY_SIZE(sa8775p_ctl),
+ .ctl = sa8775p_ctl,
+ .sspp_count = ARRAY_SIZE(sa8775p_sspp),
+ .sspp = sa8775p_sspp,
+ .mixer_count = ARRAY_SIZE(sa8775p_lm),
+ .mixer = sa8775p_lm,
+ .dspp_count = ARRAY_SIZE(sa8775p_dspp),
+ .dspp = sa8775p_dspp,
+ .pingpong_count = ARRAY_SIZE(sa8775p_pp),
+ .pingpong = sa8775p_pp,
+ .dsc_count = ARRAY_SIZE(sa8775p_dsc),
+ .dsc = sa8775p_dsc,
+ .merge_3d_count = ARRAY_SIZE(sa8775p_merge_3d),
+ .merge_3d = sa8775p_merge_3d,
+ .wb_count = ARRAY_SIZE(sa8775p_wb),
+ .wb = sa8775p_wb,
+ .intf_count = ARRAY_SIZE(sa8775p_intf),
+ .intf = sa8775p_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sa8775p_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
index ad48defa154f..4d3787fceb72 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -160,6 +160,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@@ -167,6 +168,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
@@ -251,13 +253,13 @@ static const struct dpu_pingpong_cfg sm8550_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
- .name = "pingpong_6", .id = PINGPONG_6,
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
- .name = "pingpong_7", .id = PINGPONG_7,
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
index a3e60ac70689..6b112e3d17da 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -159,6 +159,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@@ -166,6 +167,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
@@ -251,13 +253,13 @@ static const struct dpu_pingpong_cfg x1e80100_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
- .name = "pingpong_6", .id = PINGPONG_6,
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
- .name = "pingpong_7", .id = PINGPONG_7,
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
@@ -389,8 +391,8 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_2,
.prog_fetch_lines_worst_case = 24,
- .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
- .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
}, {
.name = "intf_7", .id = INTF_7,
.base = 0x3b000, .len = 0x280,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
index 7c286bafb948..e7183cf05776 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -8,72 +8,26 @@
#include "dpu_kms.h"
#include "dpu_hw_interrupts.h"
-/**
- * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
- * @kms: MSM KMS handle
- * @return: none
- */
void dpu_core_irq_preinstall(struct msm_kms *kms);
-/**
- * dpu_core_irq_uninstall - uninstall core IRQ handler
- * @kms: MSM KMS handle
- * @return: none
- */
void dpu_core_irq_uninstall(struct msm_kms *kms);
-/**
- * dpu_core_irq - core IRQ handler
- * @kms: MSM KMS handle
- * @return: interrupt handling status
- */
irqreturn_t dpu_core_irq(struct msm_kms *kms);
-/**
- * dpu_core_irq_read - IRQ helper function for reading IRQ status
- * @dpu_kms: DPU handle
- * @irq_idx: irq index
- * @return: non-zero if irq detected; otherwise no irq detected
- */
u32 dpu_core_irq_read(
struct dpu_kms *dpu_kms,
unsigned int irq_idx);
-/**
- * dpu_core_irq_register_callback - For registering callback function on IRQ
- * interrupt
- * @dpu_kms: DPU handle
- * @irq_idx: irq index
- * @irq_cb: IRQ callback funcion.
- * @irq_arg: IRQ callback argument.
- * @return: 0 for success registering callback, otherwise failure
- *
- * This function supports registration of multiple callbacks for each interrupt.
- */
int dpu_core_irq_register_callback(
struct dpu_kms *dpu_kms,
unsigned int irq_idx,
void (*irq_cb)(void *arg),
void *irq_arg);
-/**
- * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
- * interrupt
- * @dpu_kms: DPU handle
- * @irq_idx: irq index
- * @return: 0 for success registering callback, otherwise failure
- *
- * This function supports registration of multiple callbacks for each interrupt.
- */
int dpu_core_irq_unregister_callback(
struct dpu_kms *dpu_kms,
unsigned int irq_idx);
-/**
- * dpu_debugfs_core_irq_init - register core irq debugfs
- * @dpu_kms: pointer to kms
- * @parent: debugfs directory root
- */
void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 68fae048a9a8..6f0a37f954fe 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -80,7 +80,7 @@ static u64 _dpu_core_perf_calc_clk(const struct dpu_perf_cfg *perf_cfg,
mode = &state->adjusted_mode;
- crtc_clk = mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
+ crtc_clk = (u64)mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
drm_atomic_crtc_for_each_plane(plane, crtc) {
pstate = to_dpu_plane_state(plane->state);
@@ -140,6 +140,12 @@ static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf,
perf->max_per_pipe_ib, perf->bw_ctl);
}
+/**
+ * dpu_core_perf_crtc_check - validate performance of the given crtc state
+ * @crtc: Pointer to crtc
+ * @state: Pointer to new crtc state
+ * return: zero if success, or error code otherwise
+ */
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -301,6 +307,12 @@ static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
return clk_rate;
}
+/**
+ * dpu_core_perf_crtc_update - update performance of the given crtc
+ * @crtc: Pointer to crtc
+ * @params_changed: true if crtc parameters are modified
+ * return: zero if success, or error code otherwise
+ */
int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
int params_changed)
{
@@ -446,6 +458,11 @@ static const struct file_operations dpu_core_perf_mode_fops = {
.write = _dpu_core_perf_mode_write,
};
+/**
+ * dpu_core_perf_debugfs_init - initialize debugfs for core performance context
+ * @dpu_kms: Pointer to the dpu_kms struct
+ * @parent: Pointer to parent debugfs
+ */
int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
{
struct dpu_core_perf *perf = &dpu_kms->perf;
@@ -482,6 +499,12 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
}
#endif
+/**
+ * dpu_core_perf_init - initialize the given core performance context
+ * @perf: Pointer to core performance context
+ * @perf_cfg: Pointer to platform performance configuration
+ * @max_core_clk_rate: Maximum core clock rate
+ */
int dpu_core_perf_init(struct dpu_core_perf *perf,
const struct dpu_perf_cfg *perf_cfg,
unsigned long max_core_clk_rate)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
index 4186977390bd..451bf8021114 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -54,47 +54,20 @@ struct dpu_core_perf {
u64 fix_core_ab_vote;
};
-/**
- * dpu_core_perf_crtc_check - validate performance of the given crtc state
- * @crtc: Pointer to crtc
- * @state: Pointer to new crtc state
- * return: zero if success, or error code otherwise
- */
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state);
-/**
- * dpu_core_perf_crtc_update - update performance of the given crtc
- * @crtc: Pointer to crtc
- * @params_changed: true if crtc parameters are modified
- * return: zero if success, or error code otherwise
- */
int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
int params_changed);
-/**
- * dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc
- * @crtc: Pointer to crtc
- */
void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc);
-/**
- * dpu_core_perf_init - initialize the given core performance context
- * @perf: Pointer to core performance context
- * @perf_cfg: Pointer to platform performance configuration
- * @max_core_clk_rate: Maximum core clock rate
- */
int dpu_core_perf_init(struct dpu_core_perf *perf,
const struct dpu_perf_cfg *perf_cfg,
unsigned long max_core_clk_rate);
struct dpu_kms;
-/**
- * dpu_core_perf_debugfs_init - initialize debugfs for core performance context
- * @dpu_kms: Pointer to the dpu_kms struct
- * @debugfs_parent: Pointer to parent debugfs
- */
int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent);
#endif /* _DPU_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 4c1be2f0555f..7191b1a6d41b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -572,6 +572,10 @@ static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+/**
+ * dpu_crtc_get_intf_mode - get interface mode of the given crtc
+ * @crtc: Pointert to crtc
+ */
enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
@@ -594,6 +598,10 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
return INTF_MODE_NONE;
}
+/**
+ * dpu_crtc_vblank_callback - called on vblank irq, issues completion events
+ * @crtc: Pointer to drm crtc object
+ */
void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
@@ -704,6 +712,10 @@ void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event)
kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
}
+/**
+ * dpu_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ */
void dpu_crtc_complete_commit(struct drm_crtc *crtc)
{
trace_dpu_crtc_complete_commit(DRMID(crtc));
@@ -711,14 +723,22 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc)
_dpu_crtc_complete_flip(crtc);
}
-static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
+static int _dpu_crtc_check_and_setup_lm_bounds(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
struct drm_display_mode *adj_mode = &state->adjusted_mode;
u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
int i;
+ /* if we cannot merge 2 LMs (no 3d mux) better to fail earlier
+ * before even checking the width after the split
+ */
+ if (!dpu_kms->catalog->caps->has_3d_merge &&
+ adj_mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width)
+ return -E2BIG;
+
for (i = 0; i < cstate->num_mixers; i++) {
struct drm_rect *r = &cstate->lm_bounds[i];
r->x1 = crtc_split_width * i;
@@ -727,7 +747,12 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
r->y2 = adj_mode->vdisplay;
trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
+
+ if (drm_rect_width(r) > dpu_kms->catalog->caps->max_mixer_width)
+ return -E2BIG;
}
+
+ return 0;
}
static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
@@ -803,7 +828,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
- _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
+ _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc->state);
/* encoder will trigger pending mask now */
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
@@ -928,6 +953,10 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
return rc;
}
+/**
+ * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
@@ -1091,9 +1120,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
dpu_core_perf_crtc_update(crtc, 0);
- memset(cstate->mixers, 0, sizeof(cstate->mixers));
- cstate->num_mixers = 0;
-
/* disable clk & bw control until clk & bw properties are set */
cstate->bw_control = false;
cstate->bw_split_vote = false;
@@ -1163,6 +1189,49 @@ static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate)
return false;
}
+static int dpu_crtc_reassign_planes(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state)
+{
+ int total_planes = crtc->dev->mode_config.num_total_plane;
+ struct drm_atomic_state *state = crtc_state->state;
+ struct dpu_global_state *global_state;
+ struct drm_plane_state **states;
+ struct drm_plane *plane;
+ int ret;
+
+ global_state = dpu_kms_get_global_state(crtc_state->state);
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
+ dpu_rm_release_all_sspp(global_state, crtc);
+
+ if (!crtc_state->enable)
+ return 0;
+
+ states = kcalloc(total_planes, sizeof(*states), GFP_KERNEL);
+ if (!states)
+ return -ENOMEM;
+
+ drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_plane_state(state, plane);
+
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto done;
+ }
+
+ states[plane_state->normalized_zpos] = plane_state;
+ }
+
+ ret = dpu_assign_plane_resources(global_state, state, crtc, states, total_planes);
+
+done:
+ kfree(states);
+ return ret;
+
+ return 0;
+}
+
static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1178,6 +1247,13 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
+ if (dpu_use_virtual_planes &&
+ (crtc_state->planes_changed || crtc_state->zpos_changed)) {
+ rc = dpu_crtc_reassign_planes(crtc, crtc_state);
+ if (rc < 0)
+ return rc;
+ }
+
if (!crtc_state->enable || !drm_atomic_crtc_effectively_active(crtc_state)) {
DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
crtc->base.id, crtc_state->enable,
@@ -1192,8 +1268,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
if (crtc_state->active_changed)
crtc_state->mode_changed = true;
- if (cstate->num_mixers)
- _dpu_crtc_setup_lm_bounds(crtc, crtc_state);
+ if (cstate->num_mixers) {
+ rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state);
+ if (rc)
+ return rc;
+ }
/* FIXME: move this to dpu_plane_atomic_check? */
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
@@ -1224,6 +1303,30 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
+static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+
+ /* if there is no 3d_mux block we cannot merge LMs so we cannot
+ * split the large layer into 2 LMs, filter out such modes
+ */
+ if (!dpu_kms->catalog->caps->has_3d_merge &&
+ mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width)
+ return MODE_BAD_HVALUE;
+ /*
+ * max crtc width is equal to the max mixer width * 2 and max height is 4K
+ */
+ return drm_mode_validate_size(mode,
+ 2 * dpu_kms->catalog->caps->max_mixer_width,
+ 4096);
+}
+
+/**
+ * dpu_crtc_vblank - enable or disable vblanks for this crtc
+ * @crtc: Pointer to drm crtc object
+ * @en: true to enable vblanks, false to disable
+ */
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
@@ -1439,10 +1542,19 @@ static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
.atomic_check = dpu_crtc_atomic_check,
.atomic_begin = dpu_crtc_atomic_begin,
.atomic_flush = dpu_crtc_atomic_flush,
+ .mode_valid = dpu_crtc_mode_valid,
.get_scanout_position = dpu_crtc_get_scanout_position,
};
-/* initialize crtc */
+/**
+ * dpu_crtc_init - create a new crtc object
+ * @dev: dpu device
+ * @plane: base plane
+ * @cursor: cursor plane
+ * @return: new crtc object or error
+ *
+ * initialize CRTC
+ */
struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
struct drm_plane *cursor)
{
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index febc3e764a63..0b148f3ce0d7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -239,55 +239,17 @@ static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL;
}
-/**
- * dpu_crtc_vblank - enable or disable vblanks for this crtc
- * @crtc: Pointer to drm crtc object
- * @en: true to enable vblanks, false to disable
- */
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
-/**
- * dpu_crtc_vblank_callback - called on vblank irq, issues completion events
- * @crtc: Pointer to drm crtc object
- */
void dpu_crtc_vblank_callback(struct drm_crtc *crtc);
-/**
- * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
- * @crtc: Pointer to drm crtc object
- */
void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
-/**
- * dpu_crtc_complete_commit - callback signalling completion of current commit
- * @crtc: Pointer to drm crtc object
- */
void dpu_crtc_complete_commit(struct drm_crtc *crtc);
-/**
- * dpu_crtc_init - create a new crtc object
- * @dev: dpu device
- * @plane: base plane
- * @cursor: cursor plane
- * @Return: new crtc object or error
- */
struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
struct drm_plane *cursor);
-/**
- * dpu_crtc_register_custom_event - api for enabling/disabling crtc event
- * @kms: Pointer to dpu_kms
- * @crtc_drm: Pointer to crtc object
- * @event: Event that client is interested
- * @en: Flag to enable/disable the event
- */
-int dpu_crtc_register_custom_event(struct dpu_kms *kms,
- struct drm_crtc *crtc_drm, u32 event, bool en);
-
-/**
- * dpu_crtc_get_intf_mode - get interface mode of the given crtc
- * @crtc: Pointert to crtc
- */
enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 3b171bf227d1..5172ab4dea99 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -217,6 +217,10 @@ static u32 dither_matrix[DITHER_MATRIX_SZ] = {
15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
};
+/**
+ * dpu_encoder_get_drm_fmt - return DRM fourcc format
+ * @phys_enc: Pointer to physical encoder structure
+ */
u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc)
{
struct drm_encoder *drm_enc;
@@ -235,6 +239,11 @@ u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc)
return DRM_FORMAT_RGB888;
}
+/**
+ * dpu_encoder_needs_periph_flush - return true if physical encoder requires
+ * peripheral flush
+ * @phys_enc: Pointer to physical encoder structure
+ */
bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc)
{
struct drm_encoder *drm_enc;
@@ -253,6 +262,10 @@ bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc)
msm_dp_needs_periph_flush(priv->dp[disp_info->h_tile_instance[0]], mode);
}
+/**
+ * dpu_encoder_is_widebus_enabled - return bool value if widebus is enabled
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
{
const struct dpu_encoder_virt *dpu_enc;
@@ -272,6 +285,11 @@ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
return false;
}
+/**
+ * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled
+ * for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc)
{
const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
@@ -279,6 +297,12 @@ bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc)
return dpu_enc->dsc ? true : false;
}
+/**
+ * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained
+ * in virtual encoder that can collect CRC values
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * Returns: Number of physical encoders for given drm encoder
+ */
int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -297,6 +321,10 @@ int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
return num_intf;
}
+/**
+ * dpu_encoder_setup_misr - enable misr calculations
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -315,6 +343,13 @@ void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
}
}
+/**
+ * dpu_encoder_get_crc - get the crc value from interface blocks
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @crcs: array to fill with CRC data
+ * @pos: offset into the @crcs array
+ * Returns: 0 on success, error otherwise
+ */
int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos)
{
struct dpu_encoder_virt *dpu_enc;
@@ -385,6 +420,12 @@ static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)
}
}
+/**
+ * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has
+ * timed out, including reporting frame error event to crtc and debug dump
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: Failing interrupt index
+ */
void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx)
{
@@ -402,6 +443,15 @@ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
u32 irq_idx, struct dpu_encoder_wait_info *info);
+/**
+ * dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
+ * note: will call dpu_encoder_helper_wait_for_irq on timeout
+ * @phys_enc: Pointer to physical encoder structure
+ * @irq_idx: IRQ index
+ * @func: IRQ callback to be called in case of timeout
+ * @wait_info: wait info struct
+ * @return: 0 or -ERROR
+ */
int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
unsigned int irq_idx,
void (*func)(void *arg),
@@ -473,6 +523,10 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
return ret;
}
+/**
+ * dpu_encoder_get_vsync_count - get vsync count for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
@@ -480,6 +534,10 @@ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
return phys ? atomic_read(&phys->vsync_cnt) : 0;
}
+/**
+ * dpu_encoder_get_linecount - get interface line count for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -495,6 +553,13 @@ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
return linecount;
}
+/**
+ * dpu_encoder_helper_split_config - split display configuration helper function
+ * This helper function may be used by physical encoders to configure
+ * the split display related registers.
+ * @phys_enc: Pointer to physical encoder structure
+ * @interface: enum dpu_intf setting
+ */
void dpu_encoder_helper_split_config(
struct dpu_encoder_phys *phys_enc,
enum dpu_intf interface)
@@ -544,6 +609,10 @@ void dpu_encoder_helper_split_config(
}
}
+/**
+ * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
@@ -560,6 +629,12 @@ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
return (num_dsc > 0) && (num_dsc > intf_count);
}
+/**
+ * dpu_encoder_get_dsc_config - get DSC config for the DPU encoder
+ * This helper function is used by physical encoder to get DSC config
+ * used for this encoder.
+ * @drm_enc: Pointer to encoder structure
+ */
struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
{
struct msm_drm_private *priv = drm_enc->dev->dev_private;
@@ -624,6 +699,40 @@ static struct msm_display_topology dpu_encoder_get_topology(
return topology;
}
+static void dpu_encoder_assign_crtc_resources(struct dpu_kms *dpu_kms,
+ struct drm_encoder *drm_enc,
+ struct dpu_global_state *global_state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct dpu_crtc_state *cstate;
+ struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC];
+ int num_lm, num_ctl, num_dspp, i;
+
+ cstate = to_dpu_crtc_state(crtc_state);
+
+ memset(cstate->mixers, 0, sizeof(cstate->mixers));
+
+ num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
+ num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
+ num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
+ ARRAY_SIZE(hw_dspp));
+
+ for (i = 0; i < num_lm; i++) {
+ int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
+
+ cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
+ cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
+ cstate->mixers[i].hw_dspp = i < num_dspp ? to_dpu_hw_dspp(hw_dspp[i]) : NULL;
+ }
+
+ cstate->num_mixers = num_lm;
+}
+
static int dpu_encoder_virt_atomic_check(
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
@@ -691,7 +800,10 @@ static int dpu_encoder_virt_atomic_check(
if (!crtc_state->active_changed || crtc_state->enable)
ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
- drm_enc, crtc_state, topology);
+ drm_enc, crtc_state, &topology);
+ if (!ret)
+ dpu_encoder_assign_crtc_resources(dpu_kms, drm_enc,
+ global_state, crtc_state);
}
trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
@@ -1052,6 +1164,11 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
return 0;
}
+/**
+ * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @job: Pointer to the current drm writeback job
+ */
void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
struct drm_writeback_job *job)
{
@@ -1069,6 +1186,11 @@ void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
}
}
+/**
+ * dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @job: Pointer to the current drm writeback job
+ */
void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
struct drm_writeback_job *job)
{
@@ -1093,14 +1215,11 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
struct dpu_encoder_virt *dpu_enc;
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
- struct dpu_crtc_state *cstate;
struct dpu_global_state *global_state;
struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
- struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
- struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
- int num_lm, num_ctl, num_pp, num_dsc;
+ int num_ctl, num_pp, num_dsc;
unsigned int dsc_mask = 0;
int i;
@@ -1129,11 +1248,6 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
ARRAY_SIZE(hw_pp));
num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
- num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
- dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
- drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
- ARRAY_SIZE(hw_dspp));
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
@@ -1159,36 +1273,23 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL;
}
- cstate = to_dpu_crtc_state(crtc_state);
-
- for (i = 0; i < num_lm; i++) {
- int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
-
- cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
- cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
- cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
- }
-
- cstate->num_mixers = num_lm;
-
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (!dpu_enc->hw_pp[i]) {
+ phys->hw_pp = dpu_enc->hw_pp[i];
+ if (!phys->hw_pp) {
DPU_ERROR_ENC(dpu_enc,
"no pp block assigned at idx: %d\n", i);
return;
}
- if (!hw_ctl[i]) {
+ phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL;
+ if (!phys->hw_ctl) {
DPU_ERROR_ENC(dpu_enc,
"no ctl block assigned at idx: %d\n", i);
return;
}
- phys->hw_pp = dpu_enc->hw_pp[i];
- phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
-
phys->cached_mode = crtc_state->adjusted_mode;
if (phys->ops.atomic_mode_set)
phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
@@ -1232,6 +1333,10 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
}
}
+/**
+ * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs
+ * @drm_enc: encoder pointer
+ */
void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
@@ -1373,6 +1478,12 @@ static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catal
return NULL;
}
+/**
+ * dpu_encoder_vblank_callback - Notify virtual encoder of vblank IRQ reception
+ * @drm_enc: Pointer to drm encoder structure
+ * @phy_enc: Pointer to physical encoder
+ * Note: This is called from IRQ handler context.
+ */
void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phy_enc)
{
@@ -1395,6 +1506,12 @@ void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
DPU_ATRACE_END("encoder_vblank_callback");
}
+/**
+ * dpu_encoder_underrun_callback - Notify virtual encoder of underrun IRQ reception
+ * @drm_enc: Pointer to drm encoder structure
+ * @phy_enc: Pointer to physical encoder
+ * Note: This is called from IRQ handler context.
+ */
void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phy_enc)
{
@@ -1413,6 +1530,11 @@ void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
DPU_ATRACE_END("encoder_underrun_callback");
}
+/**
+ * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to
+ * @drm_enc: encoder pointer
+ * @crtc: crtc pointer
+ */
void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
@@ -1425,6 +1547,13 @@ void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
}
+/**
+ * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if
+ * the encoder is assigned to the given crtc
+ * @drm_enc: encoder pointer
+ * @crtc: crtc pointer
+ * @enable: true if vblank should be enabled
+ */
void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
struct drm_crtc *crtc, bool enable)
{
@@ -1449,6 +1578,13 @@ void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
}
}
+/**
+ * dpu_encoder_frame_done_callback - Notify virtual encoder that this phys
+ * encoder completes last request frame
+ * @drm_enc: Pointer to drm encoder structure
+ * @ready_phys: Pointer to physical encoder
+ * @event: Event to process
+ */
void dpu_encoder_frame_done_callback(
struct drm_encoder *drm_enc,
struct dpu_encoder_phys *ready_phys, u32 event)
@@ -1571,6 +1707,12 @@ static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
phys->ops.trigger_start(phys);
}
+/**
+ * dpu_encoder_helper_trigger_start - control start helper function
+ * This helper function may be optionally specified by physical
+ * encoders if they require ctl_start triggering.
+ * @phys_enc: Pointer to physical encoder structure
+ */
void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl;
@@ -1692,6 +1834,11 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
}
+/**
+ * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
+ * kickoff and trigger the ctl prepare progress for command mode display.
+ * @drm_enc: encoder pointer
+ */
void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -1768,6 +1915,11 @@ static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
return line_time;
}
+/**
+ * dpu_encoder_vsync_time - get the time of the next vsync
+ * @drm_enc: encoder pointer
+ * @wakeup_time: pointer to ktime_t to write the vsync time to
+ */
int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
{
struct drm_display_mode *mode;
@@ -1914,6 +2066,13 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
dsc, dsc_common_mode, initial_lines);
}
+/**
+ * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
+ * path (i.e. ctl flush and start) at next appropriate time.
+ * Immediately: if no previous commit is outstanding.
+ * Delayed: Block until next trigger can be issued.
+ * @drm_enc: encoder pointer
+ */
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -1950,6 +2109,10 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
}
+/**
+ * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit.
+ * @drm_enc: Pointer to drm encoder structure
+ */
bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -1971,6 +2134,11 @@ bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
return true;
}
+/**
+ * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
+ * (i.e. ctl flush and start) immediately.
+ * @drm_enc: encoder pointer
+ */
void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -2069,6 +2237,10 @@ static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc)
}
}
+/**
+ * dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline
+ * @phys_enc: Pointer to physical encoder structure
+ */
void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
@@ -2152,6 +2324,12 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
ctl->ops.clear_pending_flush(ctl);
}
+/**
+ * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block
+ * @phys_enc: Pointer to physical encoder
+ * @dpu_fmt: Pinter to the format description
+ * @output_type: HDMI/WB
+ */
void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc,
const struct msm_format *dpu_fmt,
u32 output_type)
@@ -2456,6 +2634,13 @@ static const struct drm_encoder_funcs dpu_encoder_funcs = {
.debugfs_init = dpu_encoder_debugfs_init,
};
+/**
+ * dpu_encoder_init - initialize virtual encoder object
+ * @dev: Pointer to drm device structure
+ * @drm_enc_mode: corresponding DRM_MODE_ENCODER_* constant
+ * @disp_info: Pointer to display information structure
+ * Returns: Pointer to newly created drm encoder
+ */
struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
int drm_enc_mode,
struct msm_display_info *disp_info)
@@ -2577,6 +2762,10 @@ int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_enc)
return ret;
}
+/**
+ * dpu_encoder_get_intf_mode - get interface mode of the given encoder
+ * @encoder: Pointer to drm encoder object
+ */
enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
{
struct dpu_encoder_virt *dpu_enc = NULL;
@@ -2596,6 +2785,12 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
return INTF_MODE_NONE;
}
+/**
+ * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder
+ * This helper function is used by physical encoder to get DSC blocks mask
+ * used for this encoder.
+ * @phys_enc: Pointer to physical encoder structure
+ */
unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
{
struct drm_encoder *encoder = phys_enc->parent;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index f7465a1774aa..92b5ee390788 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -19,6 +19,8 @@
#define IDLE_TIMEOUT (66 - 16/2)
+#define MAX_H_TILES_PER_DISPLAY 2
+
/**
* struct msm_display_info - defines display properties
* @intf_type: INTF_ type
@@ -36,159 +38,54 @@ struct msm_display_info {
enum dpu_vsync_source vsync_source;
};
-/**
- * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to
- * @encoder: encoder pointer
- * @crtc: crtc pointer
- */
void dpu_encoder_assign_crtc(struct drm_encoder *encoder,
struct drm_crtc *crtc);
-/**
- * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if
- * the encoder is assigned to the given crtc
- * @encoder: encoder pointer
- * @crtc: crtc pointer
- * @enable: true if vblank should be enabled
- */
void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *encoder,
struct drm_crtc *crtc, bool enable);
-/**
- * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
- * path (i.e. ctl flush and start) at next appropriate time.
- * Immediately: if no previous commit is outstanding.
- * Delayed: Block until next trigger can be issued.
- * @encoder: encoder pointer
- */
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder);
-/**
- * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
- * kickoff and trigger the ctl prepare progress for command mode display.
- * @encoder: encoder pointer
- */
void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
-/**
- * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
- * (i.e. ctl flush and start) immediately.
- * @encoder: encoder pointer
- */
void dpu_encoder_kickoff(struct drm_encoder *encoder);
-/**
- * dpu_encoder_wakeup_time - get the time of the next vsync
- */
int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time);
int dpu_encoder_wait_for_commit_done(struct drm_encoder *drm_encoder);
int dpu_encoder_wait_for_tx_complete(struct drm_encoder *drm_encoder);
-/*
- * dpu_encoder_get_intf_mode - get interface mode of the given encoder
- * @encoder: Pointer to drm encoder object
- */
enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
-/**
- * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs
- * @encoder: encoder pointer
- */
void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder);
-/**
- * dpu_encoder_init - initialize virtual encoder object
- * @dev: Pointer to drm device structure
- * @drm_enc_mode: corresponding DRM_MODE_ENCODER_* constant
- * @disp_info: Pointer to display information structure
- * Returns: Pointer to newly created drm encoder
- */
struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
int drm_enc_mode,
struct msm_display_info *disp_info);
-/**
- * dpu_encoder_set_idle_timeout - set the idle timeout for video
- * and command mode encoders.
- * @drm_enc: Pointer to previously created drm encoder structure
- * @idle_timeout: idle timeout duration in milliseconds
- */
-void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
- u32 idle_timeout);
-/**
- * dpu_encoder_get_linecount - get interface line count for the encoder.
- * @drm_enc: Pointer to previously created drm encoder structure
- */
int dpu_encoder_get_linecount(struct drm_encoder *drm_enc);
-/**
- * dpu_encoder_get_vsync_count - get vsync count for the encoder.
- * @drm_enc: Pointer to previously created drm encoder structure
- */
int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc);
-/**
- * dpu_encoder_is_widebus_enabled - return bool value if widebus is enabled
- * @drm_enc: Pointer to previously created drm encoder structure
- */
bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc);
-/**
- * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled
- * for the encoder.
- * @drm_enc: Pointer to previously created drm encoder structure
- */
bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc);
-/**
- * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained
- * in virtual encoder that can collect CRC values
- * @drm_enc: Pointer to previously created drm encoder structure
- * Returns: Number of physical encoders for given drm encoder
- */
int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc);
-/**
- * dpu_encoder_setup_misr - enable misr calculations
- * @drm_enc: Pointer to previously created drm encoder structure
- */
void dpu_encoder_setup_misr(const struct drm_encoder *drm_encoder);
-/**
- * dpu_encoder_get_crc - get the crc value from interface blocks
- * @drm_enc: Pointer to previously created drm encoder structure
- * Returns: 0 on success, error otherwise
- */
int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos);
-/**
- * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology.
- * @drm_enc: Pointer to previously created drm encoder structure
- */
bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc);
-/**
- * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder.
- * @drm_enc: Pointer to previously created drm encoder structure
- * @job: Pointer to the current drm writeback job
- */
void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
struct drm_writeback_job *job);
-/**
- * dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder.
- * @drm_enc: Pointer to previously created drm encoder structure
- * @job: Pointer to the current drm writeback job
- */
void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
struct drm_writeback_job *job);
-/**
- * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit.
- * @drm_enc: Pointer to drm encoder structure
- */
bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc);
#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index e77ebe3a68da..63f09857025c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -279,37 +279,15 @@ struct dpu_encoder_wait_info {
s64 timeout_ms;
};
-/**
- * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder
- * @p: Pointer to init params structure
- * Return: Error code or newly allocated encoder
- */
struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p);
-/**
- * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
- * @dev: Corresponding device for devres management
- * @p: Pointer to init params structure
- * Return: Error code or newly allocated encoder
- */
struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p);
-/**
- * dpu_encoder_phys_wb_init - initialize writeback encoder
- * @dev: Corresponding device for devres management
- * @init: Pointer to init info structure with initialization params
- */
struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p);
-/**
- * dpu_encoder_helper_trigger_start - control start helper function
- * This helper function may be optionally specified by physical
- * encoders if they require ctl_start triggering.
- * @phys_enc: Pointer to physical encoder structure
- */
void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
@@ -331,106 +309,38 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
return BLEND_3D_NONE;
}
-/**
- * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder
- * This helper function is used by physical encoder to get DSC blocks mask
- * used for this encoder.
- * @phys_enc: Pointer to physical encoder structure
- */
unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc);
-/**
- * dpu_encoder_get_dsc_config - get DSC config for the DPU encoder
- * This helper function is used by physical encoder to get DSC config
- * used for this encoder.
- * @drm_enc: Pointer to encoder structure
- */
struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc);
-/**
- * dpu_encoder_get_drm_fmt - return DRM fourcc format
- * @phys_enc: Pointer to physical encoder structure
- */
u32 dpu_encoder_get_drm_fmt(struct dpu_encoder_phys *phys_enc);
-/**
- * dpu_encoder_needs_periph_flush - return true if physical encoder requires
- * peripheral flush
- * @phys_enc: Pointer to physical encoder structure
- */
bool dpu_encoder_needs_periph_flush(struct dpu_encoder_phys *phys_enc);
-/**
- * dpu_encoder_helper_split_config - split display configuration helper function
- * This helper function may be used by physical encoders to configure
- * the split display related registers.
- * @phys_enc: Pointer to physical encoder structure
- * @interface: enum dpu_intf setting
- */
void dpu_encoder_helper_split_config(
struct dpu_encoder_phys *phys_enc,
enum dpu_intf interface);
-/**
- * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has
- * timed out, including reporting frame error event to crtc and debug dump
- * @phys_enc: Pointer to physical encoder structure
- * @intr_idx: Failing interrupt index
- */
void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx);
-/**
- * dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
- * note: will call dpu_encoder_helper_wait_for_irq on timeout
- * @phys_enc: Pointer to physical encoder structure
- * @irq: IRQ index
- * @func: IRQ callback to be called in case of timeout
- * @wait_info: wait info struct
- * @Return: 0 or -ERROR
- */
int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
unsigned int irq,
void (*func)(void *arg),
struct dpu_encoder_wait_info *wait_info);
-/**
- * dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline
- * @phys_enc: Pointer to physical encoder structure
- */
void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc);
-/**
- * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block
- * @phys_enc: Pointer to physical encoder
- * @output_type: HDMI/WB
- */
void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc,
const struct msm_format *dpu_fmt,
u32 output_type);
-/**
- * dpu_encoder_vblank_callback - Notify virtual encoder of vblank IRQ reception
- * @drm_enc: Pointer to drm encoder structure
- * @phys_enc: Pointer to physical encoder
- * Note: This is called from IRQ handler context.
- */
void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phy_enc);
-/** dpu_encoder_underrun_callback - Notify virtual encoder of underrun IRQ reception
- * @drm_enc: Pointer to drm encoder structure
- * @phys_enc: Pointer to physical encoder
- * Note: This is called from IRQ handler context.
- */
void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phy_enc);
-/** dpu_encoder_frame_done_callback -- Notify virtual encoder that this phys encoder completes last request frame
- * @drm_enc: Pointer to drm encoder structure
- * @phys_enc: Pointer to physical encoder
- * @event: Event to process
- */
void dpu_encoder_frame_done_callback(
struct drm_encoder *drm_enc,
struct dpu_encoder_phys *ready_phys, u32 event);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 6fc31d47cd1d..e9bbccc44dad 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -720,6 +720,12 @@ static void dpu_encoder_phys_cmd_init_ops(
ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
}
+/**
+ * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @dev: Corresponding device for devres management
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p)
{
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index ba8878d21cf0..abd6600046cb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -302,7 +302,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
- if (phys_enc->hw_pp->merge_3d)
+ if (intf_cfg.mode_3d && phys_enc->hw_pp->merge_3d)
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
@@ -440,10 +440,12 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
struct dpu_hw_ctl *ctl;
const struct msm_format *fmt;
u32 fmt_fourcc;
+ u32 mode_3d;
ctl = phys_enc->hw_ctl;
fmt_fourcc = dpu_encoder_get_drm_fmt(phys_enc);
fmt = mdp_get_format(&phys_enc->dpu_kms->base, fmt_fourcc, 0);
+ mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
DPU_DEBUG_VIDENC(phys_enc, "\n");
@@ -466,7 +468,8 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
goto skip_flush;
ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
- if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d)
+ if (mode_3d && ctl->ops.update_pending_flush_merge_3d &&
+ phys_enc->hw_pp->merge_3d)
ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx);
if (ctl->ops.update_pending_flush_cdm && phys_enc->hw_cdm)
@@ -743,6 +746,12 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count;
}
+/**
+ * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder
+ * @dev: Corresponding device for devres management
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p)
{
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 882c717859ce..4c006ec74575 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -166,10 +166,10 @@ static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc)
/**
* dpu_encoder_phys_wb_setup_fb - setup output framebuffer
* @phys_enc: Pointer to physical encoder
- * @fb: Pointer to output framebuffer
+ * @format: Format of the framebuffer
*/
static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
- struct drm_framebuffer *fb)
+ const struct msm_format *format)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
struct dpu_hw_wb *hw_wb;
@@ -193,12 +193,12 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
hw_wb->ops.setup_roi(hw_wb, wb_cfg);
if (hw_wb->ops.setup_outformat)
- hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
+ hw_wb->ops.setup_outformat(hw_wb, wb_cfg, format);
if (hw_wb->ops.setup_cdp) {
const struct dpu_perf_cfg *perf = phys_enc->dpu_kms->catalog->perf;
- hw_wb->ops.setup_cdp(hw_wb, wb_cfg->dest.format,
+ hw_wb->ops.setup_cdp(hw_wb, format,
perf->cdp_cfg[DPU_PERF_CDP_USAGE_NRT].wr_enable);
}
@@ -275,6 +275,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
struct dpu_hw_pingpong *hw_pp;
struct dpu_hw_cdm *hw_cdm;
u32 pending_flush = 0;
+ u32 mode_3d;
if (!phys_enc)
return;
@@ -283,6 +284,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
hw_pp = phys_enc->hw_pp;
hw_ctl = phys_enc->hw_ctl;
hw_cdm = phys_enc->hw_cdm;
+ mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
@@ -294,7 +296,8 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
if (hw_ctl->ops.update_pending_flush_wb)
hw_ctl->ops.update_pending_flush_wb(hw_ctl, hw_wb->idx);
- if (hw_ctl->ops.update_pending_flush_merge_3d && hw_pp && hw_pp->merge_3d)
+ if (mode_3d && hw_ctl->ops.update_pending_flush_merge_3d &&
+ hw_pp && hw_pp->merge_3d)
hw_ctl->ops.update_pending_flush_merge_3d(hw_ctl,
hw_pp->merge_3d->idx);
@@ -318,15 +321,10 @@ static void dpu_encoder_phys_wb_setup(
{
struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
struct drm_display_mode mode = phys_enc->cached_mode;
- struct drm_framebuffer *fb = NULL;
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
- struct drm_writeback_job *wb_job;
const struct msm_format *format;
- const struct msm_format *dpu_fmt;
- wb_job = wb_enc->wb_job;
format = msm_framebuffer_format(wb_enc->wb_job->fb);
- dpu_fmt = mdp_get_format(&phys_enc->dpu_kms->base, format->pixel_format, wb_job->fb->modifier);
DPU_DEBUG("[mode_set:%d, \"%s\",%d,%d]\n",
hw_wb->idx - WB_0, mode.name,
@@ -338,9 +336,9 @@ static void dpu_encoder_phys_wb_setup(
dpu_encoder_phys_wb_set_qos(phys_enc);
- dpu_encoder_phys_wb_setup_fb(phys_enc, fb);
+ dpu_encoder_phys_wb_setup_fb(phys_enc, format);
- dpu_encoder_helper_phys_setup_cdm(phys_enc, dpu_fmt, CDM_CDWN_OUTPUT_WB);
+ dpu_encoder_helper_phys_setup_cdm(phys_enc, format, CDM_CDWN_OUTPUT_WB);
dpu_encoder_phys_wb_setup_ctl(phys_enc);
}
@@ -584,26 +582,20 @@ static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc
format = msm_framebuffer_format(job->fb);
- wb_cfg->dest.format = mdp_get_format(&phys_enc->dpu_kms->base,
- format->pixel_format, job->fb->modifier);
- if (!wb_cfg->dest.format) {
- /* this error should be detected during atomic_check */
- DPU_ERROR("failed to get format %p4cc\n", &format->pixel_format);
- return;
- }
-
- ret = dpu_format_populate_layout(aspace, job->fb, &wb_cfg->dest);
+ ret = dpu_format_populate_plane_sizes(job->fb, &wb_cfg->dest);
if (ret) {
- DPU_DEBUG("failed to populate layout %d\n", ret);
+ DPU_DEBUG("failed to populate plane sizes%d\n", ret);
return;
}
+ dpu_format_populate_addrs(aspace, job->fb, &wb_cfg->dest);
+
wb_cfg->dest.width = job->fb->width;
wb_cfg->dest.height = job->fb->height;
- wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
+ wb_cfg->dest.num_planes = format->num_planes;
- if ((wb_cfg->dest.format->fetch_type == MDP_PLANE_PLANAR) &&
- (wb_cfg->dest.format->element[0] == C1_B_Cb))
+ if ((format->fetch_type == MDP_PLANE_PLANAR) &&
+ (format->element[0] == C1_B_Cb))
swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]);
DPU_DEBUG("[fb_offset:%8.8x,%8.8x,%8.8x,%8.8x]\n",
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 6b1e9a617da3..59c9427da7dd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -13,9 +13,6 @@
#define DPU_UBWC_PLANE_SIZE_ALIGNMENT 4096
-#define DPU_MAX_IMG_WIDTH 0x3FFF
-#define DPU_MAX_IMG_HEIGHT 0x3FFF
-
/*
* struct dpu_media_color_map - maps drm format to media format
* @format: DRM base pixel format
@@ -93,10 +90,9 @@ static int _dpu_format_get_media_color_ubwc(const struct msm_format *fmt)
return color_fmt;
}
-static int _dpu_format_get_plane_sizes_ubwc(
+static int _dpu_format_populate_plane_sizes_ubwc(
const struct msm_format *fmt,
- const uint32_t width,
- const uint32_t height,
+ struct drm_framebuffer *fb,
struct dpu_hw_fmt_layout *layout)
{
int i;
@@ -104,9 +100,8 @@ static int _dpu_format_get_plane_sizes_ubwc(
bool meta = MSM_FORMAT_IS_UBWC(fmt);
memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
- layout->format = fmt;
- layout->width = width;
- layout->height = height;
+ layout->width = fb->width;
+ layout->height = fb->height;
layout->num_planes = fmt->num_planes;
color = _dpu_format_get_media_color_ubwc(fmt);
@@ -116,19 +111,19 @@ static int _dpu_format_get_plane_sizes_ubwc(
return -EINVAL;
}
- if (MSM_FORMAT_IS_YUV(layout->format)) {
+ if (MSM_FORMAT_IS_YUV(fmt)) {
uint32_t y_sclines, uv_sclines;
uint32_t y_meta_scanlines = 0;
uint32_t uv_meta_scanlines = 0;
layout->num_planes = 2;
- layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width);
- y_sclines = VENUS_Y_SCANLINES(color, height);
+ layout->plane_pitch[0] = VENUS_Y_STRIDE(color, fb->width);
+ y_sclines = VENUS_Y_SCANLINES(color, fb->height);
layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
y_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
- layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width);
- uv_sclines = VENUS_UV_SCANLINES(color, height);
+ layout->plane_pitch[1] = VENUS_UV_STRIDE(color, fb->width);
+ uv_sclines = VENUS_UV_SCANLINES(color, fb->height);
layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] *
uv_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
@@ -136,13 +131,13 @@ static int _dpu_format_get_plane_sizes_ubwc(
goto done;
layout->num_planes += 2;
- layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width);
- y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height);
+ layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, fb->width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color, fb->height);
layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
y_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
- layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width);
- uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height);
+ layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, fb->width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, fb->height);
layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] *
uv_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
@@ -151,16 +146,16 @@ static int _dpu_format_get_plane_sizes_ubwc(
layout->num_planes = 1;
- layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width);
- rgb_scanlines = VENUS_RGB_SCANLINES(color, height);
+ layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, fb->width);
+ rgb_scanlines = VENUS_RGB_SCANLINES(color, fb->height);
layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
rgb_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
if (!meta)
goto done;
layout->num_planes += 2;
- layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width);
- rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height);
+ layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, fb->width);
+ rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, fb->height);
layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
rgb_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
}
@@ -172,26 +167,23 @@ done:
return 0;
}
-static int _dpu_format_get_plane_sizes_linear(
+static int _dpu_format_populate_plane_sizes_linear(
const struct msm_format *fmt,
- const uint32_t width,
- const uint32_t height,
- struct dpu_hw_fmt_layout *layout,
- const uint32_t *pitches)
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
{
int i;
memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
- layout->format = fmt;
- layout->width = width;
- layout->height = height;
+ layout->width = fb->width;
+ layout->height = fb->height;
layout->num_planes = fmt->num_planes;
/* Due to memset above, only need to set planes of interest */
if (fmt->fetch_type == MDP_PLANE_INTERLEAVED) {
layout->num_planes = 1;
- layout->plane_size[0] = width * height * layout->format->bpp;
- layout->plane_pitch[0] = width * layout->format->bpp;
+ layout->plane_size[0] = fb->width * fb->height * fmt->bpp;
+ layout->plane_pitch[0] = fb->width * fmt->bpp;
} else {
uint32_t v_subsample, h_subsample;
uint32_t chroma_samp;
@@ -201,7 +193,7 @@ static int _dpu_format_get_plane_sizes_linear(
_dpu_get_v_h_subsample_rate(chroma_samp, &v_subsample,
&h_subsample);
- if (width % h_subsample || height % v_subsample) {
+ if (fb->width % h_subsample || fb->height % v_subsample) {
DRM_ERROR("mismatch in subsample vs dimensions\n");
return -EINVAL;
}
@@ -209,11 +201,11 @@ static int _dpu_format_get_plane_sizes_linear(
if ((fmt->pixel_format == DRM_FORMAT_NV12) &&
(MSM_FORMAT_IS_DX(fmt)))
bpp = 2;
- layout->plane_pitch[0] = width * bpp;
+ layout->plane_pitch[0] = fb->width * bpp;
layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample;
- layout->plane_size[0] = layout->plane_pitch[0] * height;
+ layout->plane_size[0] = layout->plane_pitch[0] * fb->height;
layout->plane_size[1] = layout->plane_pitch[1] *
- (height / v_subsample);
+ (fb->height / v_subsample);
if (fmt->fetch_type == MDP_PLANE_PSEUDO_PLANAR) {
layout->num_planes = 2;
@@ -234,8 +226,13 @@ static int _dpu_format_get_plane_sizes_linear(
* all the components based on ubwc specifications.
*/
for (i = 0; i < layout->num_planes && i < DPU_MAX_PLANES; ++i) {
- if (pitches && layout->plane_pitch[i] < pitches[i])
- layout->plane_pitch[i] = pitches[i];
+ if (layout->plane_pitch[i] <= fb->pitches[i]) {
+ layout->plane_pitch[i] = fb->pitches[i];
+ } else {
+ DRM_DEBUG("plane %u expected pitch %u, fb %u\n",
+ i, layout->plane_pitch[i], fb->pitches[i]);
+ return -EINVAL;
+ }
}
for (i = 0; i < DPU_MAX_PLANES; i++)
@@ -244,53 +241,54 @@ static int _dpu_format_get_plane_sizes_linear(
return 0;
}
-static int dpu_format_get_plane_sizes(
- const struct msm_format *fmt,
- const uint32_t w,
- const uint32_t h,
- struct dpu_hw_fmt_layout *layout,
- const uint32_t *pitches)
+/**
+ * dpu_format_populate_plane_sizes - populate non-address part of the layout based on
+ * fb, and format found in the fb
+ * @fb: framebuffer pointer
+ * @layout: format layout structure to populate
+ *
+ * Return: error code on failure or 0 if new addresses were populated
+ */
+int dpu_format_populate_plane_sizes(
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
{
- if (!layout || !fmt) {
+ const struct msm_format *fmt;
+
+ if (!layout || !fb) {
DRM_ERROR("invalid pointer\n");
return -EINVAL;
}
- if ((w > DPU_MAX_IMG_WIDTH) || (h > DPU_MAX_IMG_HEIGHT)) {
+ if (fb->width > DPU_MAX_IMG_WIDTH ||
+ fb->height > DPU_MAX_IMG_HEIGHT) {
DRM_ERROR("image dimensions outside max range\n");
return -ERANGE;
}
+ fmt = msm_framebuffer_format(fb);
+
if (MSM_FORMAT_IS_UBWC(fmt) || MSM_FORMAT_IS_TILE(fmt))
- return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout);
+ return _dpu_format_populate_plane_sizes_ubwc(fmt, fb, layout);
- return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
+ return _dpu_format_populate_plane_sizes_linear(fmt, fb, layout);
}
-static int _dpu_format_populate_addrs_ubwc(
- struct msm_gem_address_space *aspace,
- struct drm_framebuffer *fb,
- struct dpu_hw_fmt_layout *layout)
+static void _dpu_format_populate_addrs_ubwc(struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
{
+ const struct msm_format *fmt;
uint32_t base_addr = 0;
bool meta;
- if (!fb || !layout) {
- DRM_ERROR("invalid pointers\n");
- return -EINVAL;
- }
-
- if (aspace)
- base_addr = msm_framebuffer_iova(fb, aspace, 0);
- if (!base_addr) {
- DRM_ERROR("failed to retrieve base addr\n");
- return -EFAULT;
- }
+ base_addr = msm_framebuffer_iova(fb, aspace, 0);
- meta = MSM_FORMAT_IS_UBWC(layout->format);
+ fmt = msm_framebuffer_format(fb);
+ meta = MSM_FORMAT_IS_UBWC(fmt);
/* Per-format logic for verifying active planes */
- if (MSM_FORMAT_IS_YUV(layout->format)) {
+ if (MSM_FORMAT_IS_YUV(fmt)) {
/************************************************/
/* UBWC ** */
/* buffer ** DPU PLANE */
@@ -319,7 +317,7 @@ static int _dpu_format_populate_addrs_ubwc(
+ layout->plane_size[2] + layout->plane_size[3];
if (!meta)
- return 0;
+ return;
/* configure Y metadata plane */
layout->plane_addr[2] = base_addr;
@@ -350,119 +348,43 @@ static int _dpu_format_populate_addrs_ubwc(
layout->plane_addr[1] = 0;
if (!meta)
- return 0;
+ return;
layout->plane_addr[2] = base_addr;
layout->plane_addr[3] = 0;
}
- return 0;
}
-static int _dpu_format_populate_addrs_linear(
- struct msm_gem_address_space *aspace,
- struct drm_framebuffer *fb,
- struct dpu_hw_fmt_layout *layout)
+static void _dpu_format_populate_addrs_linear(struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
{
unsigned int i;
- /* Can now check the pitches given vs pitches expected */
- for (i = 0; i < layout->num_planes; ++i) {
- if (layout->plane_pitch[i] > fb->pitches[i]) {
- DRM_ERROR("plane %u expected pitch %u, fb %u\n",
- i, layout->plane_pitch[i], fb->pitches[i]);
- return -EINVAL;
- }
- }
-
/* Populate addresses for simple formats here */
- for (i = 0; i < layout->num_planes; ++i) {
- if (aspace)
- layout->plane_addr[i] =
- msm_framebuffer_iova(fb, aspace, i);
- if (!layout->plane_addr[i]) {
- DRM_ERROR("failed to retrieve base addr\n");
- return -EFAULT;
- }
- }
-
- return 0;
+ for (i = 0; i < layout->num_planes; ++i)
+ layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i);
}
-int dpu_format_populate_layout(
- struct msm_gem_address_space *aspace,
- struct drm_framebuffer *fb,
- struct dpu_hw_fmt_layout *layout)
+/**
+ * dpu_format_populate_addrs - populate buffer addresses based on
+ * mmu, fb, and format found in the fb
+ * @aspace: address space pointer
+ * @fb: framebuffer pointer
+ * @layout: format layout structure to populate
+ */
+void dpu_format_populate_addrs(struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
{
- int ret;
-
- if (!fb || !layout) {
- DRM_ERROR("invalid arguments\n");
- return -EINVAL;
- }
+ const struct msm_format *fmt;
- if ((fb->width > DPU_MAX_IMG_WIDTH) ||
- (fb->height > DPU_MAX_IMG_HEIGHT)) {
- DRM_ERROR("image dimensions outside max range\n");
- return -ERANGE;
- }
-
- layout->format = msm_framebuffer_format(fb);
-
- /* Populate the plane sizes etc via get_format */
- ret = dpu_format_get_plane_sizes(layout->format, fb->width, fb->height,
- layout, fb->pitches);
- if (ret)
- return ret;
+ fmt = msm_framebuffer_format(fb);
/* Populate the addresses given the fb */
- if (MSM_FORMAT_IS_UBWC(layout->format) ||
- MSM_FORMAT_IS_TILE(layout->format))
- ret = _dpu_format_populate_addrs_ubwc(aspace, fb, layout);
+ if (MSM_FORMAT_IS_UBWC(fmt) ||
+ MSM_FORMAT_IS_TILE(fmt))
+ _dpu_format_populate_addrs_ubwc(aspace, fb, layout);
else
- ret = _dpu_format_populate_addrs_linear(aspace, fb, layout);
-
- return ret;
-}
-
-int dpu_format_check_modified_format(
- const struct msm_kms *kms,
- const struct msm_format *fmt,
- const struct drm_mode_fb_cmd2 *cmd,
- struct drm_gem_object **bos)
-{
- const struct drm_format_info *info;
- struct dpu_hw_fmt_layout layout;
- uint32_t bos_total_size = 0;
- int ret, i;
-
- if (!fmt || !cmd || !bos) {
- DRM_ERROR("invalid arguments\n");
- return -EINVAL;
- }
-
- info = drm_format_info(fmt->pixel_format);
- if (!info)
- return -EINVAL;
-
- ret = dpu_format_get_plane_sizes(fmt, cmd->width, cmd->height,
- &layout, cmd->pitches);
- if (ret)
- return ret;
-
- for (i = 0; i < info->num_planes; i++) {
- if (!bos[i]) {
- DRM_ERROR("invalid handle for plane %d\n", i);
- return -EINVAL;
- }
- if ((i == 0) || (bos[i] != bos[0]))
- bos_total_size += bos[i]->size;
- }
-
- if (bos_total_size < layout.total_size) {
- DRM_ERROR("buffers total size too small %u expected %u\n",
- bos_total_size, layout.total_size);
- return -EINVAL;
- }
-
- return 0;
+ _dpu_format_populate_addrs_linear(aspace, fb, layout);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
index 210d0ed5f0af..c6145d43aa3f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -31,35 +31,12 @@ static inline bool dpu_find_format(u32 format, const u32 *supported_formats,
return false;
}
-/**
- * dpu_format_check_modified_format - validate format and buffers for
- * dpu non-standard, i.e. modified format
- * @kms: kms driver
- * @msm_fmt: pointer to the msm_fmt base pointer of an msm_format
- * @cmd: fb_cmd2 structure user request
- * @bos: gem buffer object list
- *
- * Return: error code on failure, 0 on success
- */
-int dpu_format_check_modified_format(
- const struct msm_kms *kms,
- const struct msm_format *msm_fmt,
- const struct drm_mode_fb_cmd2 *cmd,
- struct drm_gem_object **bos);
+void dpu_format_populate_addrs(struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout);
-/**
- * dpu_format_populate_layout - populate the given format layout based on
- * mmu, fb, and format found in the fb
- * @aspace: address space pointer
- * @fb: framebuffer pointer
- * @fmtl: format layout structure to populate
- *
- * Return: error code on failure, -EAGAIN if success but the addresses
- * are the same as before or 0 if new addresses were populated
- */
-int dpu_format_populate_layout(
- struct msm_gem_address_space *aspace,
+int dpu_format_populate_plane_sizes(
struct drm_framebuffer *fb,
- struct dpu_hw_fmt_layout *fmtl);
+ struct dpu_hw_fmt_layout *layout);
#endif /*_DPU_FORMATS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index dcb4fd85e73b..0b342c043875 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -21,6 +21,16 @@
(VIG_BASE_MASK | \
BIT(DPU_SSPP_CSC_10BIT))
+#define VIG_MSM8953_MASK \
+ (BIT(DPU_SSPP_QOS) |\
+ BIT(DPU_SSPP_SCALER_QSEED2) |\
+ BIT(DPU_SSPP_CSC))
+
+#define VIG_MSM8996_MASK \
+ (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_CDP) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_SCALER_QSEED2) |\
+ BIT(DPU_SSPP_CSC))
+
#define VIG_MSM8998_MASK \
(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
@@ -32,6 +42,12 @@
#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
+#define DMA_MSM8953_MASK \
+ (BIT(DPU_SSPP_QOS))
+
+#define DMA_MSM8996_MASK \
+ (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_CDP))
+
#define DMA_MSM8998_MASK \
(BIT(DPU_SSPP_QOS) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
@@ -57,9 +73,19 @@
#define DMA_CURSOR_SDM845_MASK_SDMA \
(DMA_CURSOR_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
+#define DMA_CURSOR_MSM8996_MASK \
+ (DMA_MSM8996_MASK | BIT(DPU_SSPP_CURSOR))
+
#define DMA_CURSOR_MSM8998_MASK \
(DMA_MSM8998_MASK | BIT(DPU_SSPP_CURSOR))
+#define RGB_MSM8953_MASK \
+ (BIT(DPU_SSPP_QOS))
+
+#define RGB_MSM8996_MASK \
+ (BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_CDP) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_SCALER_RGB))
+
#define MIXER_MSM8998_MASK \
(BIT(DPU_MIXER_SOURCESPLIT))
@@ -69,6 +95,12 @@
#define MIXER_QCM2290_MASK \
(BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
+#define PINGPONG_MSM8996_MASK \
+ (BIT(DPU_PINGPONG_DSC))
+
+#define PINGPONG_MSM8996_TE2_MASK \
+ (PINGPONG_MSM8996_MASK | BIT(DPU_PINGPONG_TE2))
+
#define PINGPONG_SDM845_MASK \
(BIT(DPU_PINGPONG_DITHER) | BIT(DPU_PINGPONG_DSC))
@@ -115,10 +147,6 @@
#define MAX_HORZ_DECIMATION 4
#define MAX_VERT_DECIMATION 4
-#define MAX_UPSCALE_RATIO 20
-#define MAX_DOWNSCALE_RATIO 4
-#define SSPP_UNITY_SCALE 1
-
#define STRCAT(X, Y) (X Y)
static const uint32_t plane_formats[] = {
@@ -276,8 +304,6 @@ static const u32 wb2_formats_rgb_yuv[] = {
/* SSPP common configuration */
#define _VIG_SBLK(scaler_ver) \
{ \
- .maxdwnscale = MAX_DOWNSCALE_RATIO, \
- .maxupscale = MAX_UPSCALE_RATIO, \
.scaler_blk = {.name = "scaler", \
.version = scaler_ver, \
.base = 0xa00, .len = 0xa0,}, \
@@ -285,15 +311,11 @@ static const u32 wb2_formats_rgb_yuv[] = {
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
- .virt_format_list = plane_formats, \
- .virt_num_formats = ARRAY_SIZE(plane_formats), \
.rotation_cfg = NULL, \
}
#define _VIG_SBLK_ROT(scaler_ver, rot_cfg) \
{ \
- .maxdwnscale = MAX_DOWNSCALE_RATIO, \
- .maxupscale = MAX_UPSCALE_RATIO, \
.scaler_blk = {.name = "scaler", \
.version = scaler_ver, \
.base = 0xa00, .len = 0xa0,}, \
@@ -301,29 +323,40 @@ static const u32 wb2_formats_rgb_yuv[] = {
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
- .virt_format_list = plane_formats, \
- .virt_num_formats = ARRAY_SIZE(plane_formats), \
.rotation_cfg = rot_cfg, \
}
#define _VIG_SBLK_NOSCALE() \
{ \
- .maxdwnscale = SSPP_UNITY_SCALE, \
- .maxupscale = SSPP_UNITY_SCALE, \
.format_list = plane_formats, \
.num_formats = ARRAY_SIZE(plane_formats), \
- .virt_format_list = plane_formats, \
- .virt_num_formats = ARRAY_SIZE(plane_formats), \
+ }
+
+/* qseed2 is not supported, so disabled scaling */
+#define _VIG_SBLK_QSEED2() \
+ { \
+ .scaler_blk = {.name = "scaler", \
+ /* no version for qseed2 */ \
+ .base = 0x200, .len = 0xa0,}, \
+ .csc_blk = {.name = "csc", \
+ .base = 0x320, .len = 0x100,}, \
+ .format_list = plane_formats_yuv, \
+ .num_formats = ARRAY_SIZE(plane_formats_yuv), \
+ .rotation_cfg = NULL, \
+ }
+
+#define _RGB_SBLK() \
+ { \
+ .scaler_blk = {.name = "scaler", \
+ .base = 0x200, .len = 0x28,}, \
+ .format_list = plane_formats, \
+ .num_formats = ARRAY_SIZE(plane_formats), \
}
#define _DMA_SBLK() \
{ \
- .maxdwnscale = SSPP_UNITY_SCALE, \
- .maxupscale = SSPP_UNITY_SCALE, \
.format_list = plane_formats, \
.num_formats = ARRAY_SIZE(plane_formats), \
- .virt_format_list = plane_formats, \
- .virt_num_formats = ARRAY_SIZE(plane_formats), \
}
static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
@@ -332,6 +365,9 @@ static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
.rot_format_list = rotation_v2_formats,
};
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed2 =
+ _VIG_SBLK_QSEED2();
+
static const struct dpu_sspp_sub_blks dpu_vig_sblk_noscale =
_VIG_SBLK_NOSCALE();
@@ -363,6 +399,8 @@ static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_2 =
static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_3 =
_VIG_SBLK(SSPP_SCALER_VER(3, 3));
+static const struct dpu_sspp_sub_blks dpu_rgb_sblk = _RGB_SBLK();
+
static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK();
/*************************************************************
@@ -427,6 +465,15 @@ static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = {
/*************************************************************
* PINGPONG sub blocks config
*************************************************************/
+static const struct dpu_pingpong_sub_blks msm8996_pp_sblk_te = {
+ .te2 = {.name = "te2", .base = 0x2000, .len = 0x0,
+ .version = 0x1},
+};
+
+static const struct dpu_pingpong_sub_blks msm8996_pp_sblk = {
+ /* No dither block */
+};
+
static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
.te2 = {.name = "te2", .base = 0x2000, .len = 0x0,
.version = 0x1},
@@ -492,6 +539,34 @@ static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = {
},
};
+static const struct dpu_vbif_cfg msm8996_vbif[] = {
+ {
+ .name = "vbif_rt", .id = VBIF_RT,
+ .base = 0, .len = 0x1040,
+ .default_ot_rd_limit = 32,
+ .default_ot_wr_limit = 16,
+ .features = BIT(DPU_VBIF_QOS_REMAP) | BIT(DPU_VBIF_QOS_OTLIM),
+ .xin_halt_timeout = 0x4000,
+ .qos_rp_remap_size = 0x20,
+ .dynamic_ot_rd_tbl = {
+ .count = ARRAY_SIZE(msm8998_ot_rdwr_cfg),
+ .cfg = msm8998_ot_rdwr_cfg,
+ },
+ .dynamic_ot_wr_tbl = {
+ .count = ARRAY_SIZE(msm8998_ot_rdwr_cfg),
+ .cfg = msm8998_ot_rdwr_cfg,
+ },
+ .qos_rt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(msm8998_rt_pri_lvl),
+ .priority_lvl = msm8998_rt_pri_lvl,
+ },
+ .qos_nrt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(msm8998_nrt_pri_lvl),
+ .priority_lvl = msm8998_nrt_pri_lvl,
+ },
+ },
+};
+
static const struct dpu_vbif_cfg msm8998_vbif[] = {
{
.name = "vbif_rt", .id = VBIF_RT,
@@ -675,6 +750,11 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
* Hardware catalog
*************************************************************/
+#include "catalog/dpu_1_7_msm8996.h"
+#include "catalog/dpu_1_14_msm8937.h"
+#include "catalog/dpu_1_15_msm8917.h"
+#include "catalog/dpu_1_16_msm8953.h"
+
#include "catalog/dpu_3_0_msm8998.h"
#include "catalog/dpu_3_2_sdm660.h"
#include "catalog/dpu_3_3_sdm630.h"
@@ -685,6 +765,7 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_5_0_sm8150.h"
#include "catalog/dpu_5_1_sc8180x.h"
#include "catalog/dpu_5_2_sm7150.h"
+#include "catalog/dpu_5_3_sm6150.h"
#include "catalog/dpu_5_4_sm6125.h"
#include "catalog/dpu_6_0_sm8250.h"
@@ -699,6 +780,7 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_8_0_sc8280xp.h"
#include "catalog/dpu_8_1_sm8450.h"
+#include "catalog/dpu_8_4_sa8775p.h"
#include "catalog/dpu_9_0_sm8550.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 37e18e820a20..4cea19e1a203 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -21,8 +21,8 @@
#define DPU_HW_BLK_NAME_LEN 16
-#define MAX_IMG_WIDTH 0x3fff
-#define MAX_IMG_HEIGHT 0x3fff
+#define DPU_MAX_IMG_WIDTH 0x3fff
+#define DPU_MAX_IMG_HEIGHT 0x3fff
#define CRTC_DUAL_MIXERS 2
@@ -364,21 +364,15 @@ struct dpu_caps {
/**
* struct dpu_sspp_sub_blks : SSPP sub-blocks
* common: Pointer to common configurations shared by sub blocks
- * @maxdwnscale: max downscale ratio supported(without DECIMATION)
- * @maxupscale: maxupscale ratio supported
* @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
* @qseed_ver: qseed version
* @scaler_blk:
* @csc_blk:
* @format_list: Pointer to list of supported formats
* @num_formats: Number of supported formats
- * @virt_format_list: Pointer to list of supported formats for virtual planes
- * @virt_num_formats: Number of supported formats for virtual planes
* @dpu_rotation_cfg: inline rotation configuration
*/
struct dpu_sspp_sub_blks {
- u32 maxdwnscale;
- u32 maxupscale;
u32 max_per_pipe_bw;
u32 qseed_ver;
struct dpu_scaler_blk scaler_blk;
@@ -386,8 +380,6 @@ struct dpu_sspp_sub_blks {
const u32 *format_list;
u32 num_formats;
- const u32 *virt_format_list;
- u32 virt_num_formats;
const struct dpu_rotation_cfg *rotation_cfg;
};
@@ -621,6 +613,16 @@ struct dpu_wb_cfg {
enum dpu_clk_ctrl_type clk_ctrl;
};
+/*
+ * struct dpu_cwb_cfg : MDP CWB mux instance info
+ * @id: enum identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ */
+struct dpu_cwb_cfg {
+ DPU_HW_BLK_INFO;
+};
+
/**
* struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
* @pps pixel per seconds
@@ -823,6 +825,9 @@ struct dpu_mdss_cfg {
u32 dspp_count;
const struct dpu_dspp_cfg *dspp;
+ u32 cwb_count;
+ const struct dpu_cwb_cfg *cwb;
+
/* Add additional block data structures here */
const struct dpu_perf_cfg *perf;
@@ -831,6 +836,10 @@ struct dpu_mdss_cfg {
const struct dpu_format_extended *vig_formats;
};
+extern const struct dpu_mdss_cfg dpu_msm8917_cfg;
+extern const struct dpu_mdss_cfg dpu_msm8937_cfg;
+extern const struct dpu_mdss_cfg dpu_msm8953_cfg;
+extern const struct dpu_mdss_cfg dpu_msm8996_cfg;
extern const struct dpu_mdss_cfg dpu_msm8998_cfg;
extern const struct dpu_mdss_cfg dpu_sdm630_cfg;
extern const struct dpu_mdss_cfg dpu_sdm660_cfg;
@@ -843,6 +852,7 @@ extern const struct dpu_mdss_cfg dpu_sm8250_cfg;
extern const struct dpu_mdss_cfg dpu_sc7180_cfg;
extern const struct dpu_mdss_cfg dpu_sm6115_cfg;
extern const struct dpu_mdss_cfg dpu_sm6125_cfg;
+extern const struct dpu_mdss_cfg dpu_sm6150_cfg;
extern const struct dpu_mdss_cfg dpu_sm6350_cfg;
extern const struct dpu_mdss_cfg dpu_qcm2290_cfg;
extern const struct dpu_mdss_cfg dpu_sm6375_cfg;
@@ -850,6 +860,7 @@ extern const struct dpu_mdss_cfg dpu_sm8350_cfg;
extern const struct dpu_mdss_cfg dpu_sc7280_cfg;
extern const struct dpu_mdss_cfg dpu_sc8280xp_cfg;
extern const struct dpu_mdss_cfg dpu_sm8450_cfg;
+extern const struct dpu_mdss_cfg dpu_sa8775p_cfg;
extern const struct dpu_mdss_cfg dpu_sm8550_cfg;
extern const struct dpu_mdss_cfg dpu_sm8650_cfg;
extern const struct dpu_mdss_cfg dpu_x1e80100_cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
index 55d2768a6d4d..ae1534c49ae0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
@@ -222,6 +222,14 @@ static void dpu_hw_cdm_bind_pingpong_blk(struct dpu_hw_cdm *ctx, const enum dpu_
DPU_REG_WRITE(c, CDM_MUX, mux_cfg);
}
+/**
+ * dpu_hw_cdm_init - initializes the cdm hw driver object.
+ * should be called once before accessing every cdm.
+ * @dev: DRM device handle
+ * @cfg: CDM catalog entry for which driver object is required
+ * @addr : mapped register io address of MDSS
+ * @mdss_rev: mdss hw core revision
+ */
struct dpu_hw_cdm *dpu_hw_cdm_init(struct drm_device *dev,
const struct dpu_cdm_cfg *cfg, void __iomem *addr,
const struct dpu_mdss_version *mdss_rev)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
index ec71c9886d75..6bb3476a05f8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
@@ -122,14 +122,6 @@ struct dpu_hw_cdm {
struct dpu_hw_cdm_ops ops;
};
-/**
- * dpu_hw_cdm_init - initializes the cdm hw driver object.
- * should be called once before accessing every cdm.
- * @dev: DRM device handle
- * @cdm: CDM catalog entry for which driver object is required
- * @addr : mapped register io address of MDSS
- * @mdss_rev: mdss hw core revision
- */
struct dpu_hw_cdm *dpu_hw_cdm_init(struct drm_device *dev,
const struct dpu_cdm_cfg *cdm, void __iomem *addr,
const struct dpu_mdss_version *mdss_rev);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 2e50049f2f85..4893f10d6a58 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -736,6 +736,15 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
};
+/**
+ * dpu_hw_ctl_init() - Initializes the ctl_path hw driver object.
+ * Should be called before accessing any ctl_path register.
+ * @dev: Corresponding device for devres management
+ * @cfg: ctl_path catalog entry for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @mixer_count: Number of mixers in @mixer
+ * @mixer: Pointer to an array of Layer Mixers defined in the catalog
+ */
struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
const struct dpu_ctl_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 4401fdc0f3e4..85c6c835cc87 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -294,15 +294,6 @@ static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
return container_of(hw, struct dpu_hw_ctl, base);
}
-/**
- * dpu_hw_ctl_init() - Initializes the ctl_path hw driver object.
- * Should be called before accessing any ctl_path register.
- * @dev: Corresponding device for devres management
- * @cfg: ctl_path catalog entry for which driver object is required
- * @addr: mapped register io address of MDP
- * @mixer_count: Number of mixers in @mixer
- * @mixer: Pointer to an array of Layer Mixers defined in the catalog
- */
struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
const struct dpu_ctl_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.c
new file mode 100644
index 000000000000..ae785f4ff0d4
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+
+#include <drm/drm_managed.h>
+#include "dpu_hw_cwb.h"
+
+#include <linux/bitfield.h>
+
+#define CWB_MUX 0x000
+#define CWB_MODE 0x004
+
+/* CWB mux block bit definitions */
+#define CWB_MUX_MASK GENMASK(3, 0)
+#define CWB_MODE_MASK GENMASK(2, 0)
+
+static void dpu_hw_cwb_config(struct dpu_hw_cwb *ctx,
+ struct dpu_hw_cwb_setup_cfg *cwb_cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int cwb_mux_cfg = 0xF;
+ enum dpu_pingpong pp;
+ enum cwb_mode_input input;
+
+ if (!cwb_cfg)
+ return;
+
+ input = cwb_cfg->input;
+ pp = cwb_cfg->pp_idx;
+
+ if (input >= INPUT_MODE_MAX)
+ return;
+
+ /*
+ * The CWB_MUX register takes the pingpong index for the real-time
+ * display
+ */
+ if ((pp != PINGPONG_NONE) && (pp < PINGPONG_MAX))
+ cwb_mux_cfg = FIELD_PREP(CWB_MUX_MASK, pp - PINGPONG_0);
+
+ input = FIELD_PREP(CWB_MODE_MASK, input);
+
+ DPU_REG_WRITE(c, CWB_MUX, cwb_mux_cfg);
+ DPU_REG_WRITE(c, CWB_MODE, input);
+}
+
+/**
+ * dpu_hw_cwb_init() - Initializes the writeback hw driver object with cwb.
+ * @dev: Corresponding device for devres management
+ * @cfg: wb_path catalog entry for which driver object is required
+ * @addr: mapped register io address of MDP
+ * Return: Error code or allocated dpu_hw_wb context
+ */
+struct dpu_hw_cwb *dpu_hw_cwb_init(struct drm_device *dev,
+ const struct dpu_cwb_cfg *cfg,
+ void __iomem *addr)
+{
+ struct dpu_hw_cwb *c;
+
+ if (!addr)
+ return ERR_PTR(-EINVAL);
+
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ c->hw.blk_addr = addr + cfg->base;
+ c->hw.log_mask = DPU_DBG_MASK_CWB;
+
+ c->idx = cfg->id;
+ c->ops.config_cwb = dpu_hw_cwb_config;
+
+ return c;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h
new file mode 100644
index 000000000000..96b6edf6b2bb
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+
+#ifndef _DPU_HW_CWB_H
+#define _DPU_HW_CWB_H
+
+#include "dpu_hw_util.h"
+
+struct dpu_hw_cwb;
+
+enum cwb_mode_input {
+ INPUT_MODE_LM_OUT,
+ INPUT_MODE_DSPP_OUT,
+ INPUT_MODE_MAX
+};
+
+/**
+ * struct dpu_hw_cwb_setup_cfg : Describes configuration for CWB mux
+ * @pp_idx: Index of the real-time pinpong that the CWB mux will
+ * feed the CWB mux
+ * @input: Input tap point
+ */
+struct dpu_hw_cwb_setup_cfg {
+ enum dpu_pingpong pp_idx;
+ enum cwb_mode_input input;
+};
+
+/**
+ *
+ * struct dpu_hw_cwb_ops : Interface to the cwb hw driver functions
+ * @config_cwb: configure CWB mux
+ */
+struct dpu_hw_cwb_ops {
+ void (*config_cwb)(struct dpu_hw_cwb *ctx,
+ struct dpu_hw_cwb_setup_cfg *cwb_cfg);
+};
+
+/**
+ * struct dpu_hw_cwb : CWB mux driver object
+ * @base: Hardware block base structure
+ * @hw: Block hardware details
+ * @idx: CWB index
+ * @ops: handle to operations possible for this CWB
+ */
+struct dpu_hw_cwb {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ enum dpu_cwb idx;
+
+ struct dpu_hw_cwb_ops ops;
+};
+
+/**
+ * dpu_hw_cwb - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_cwb *to_dpu_hw_cwb(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_cwb, base);
+}
+
+struct dpu_hw_cwb *dpu_hw_cwb_init(struct drm_device *dev,
+ const struct dpu_cwb_cfg *cfg,
+ void __iomem *addr);
+
+#endif /*_DPU_HW_CWB_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
index 5e9aad1b2aa2..657200401f57 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -190,6 +190,13 @@ static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk;
};
+/**
+ * dpu_hw_dsc_init() - Initializes the DSC hw driver object.
+ * @dev: Corresponding device for devres management
+ * @cfg: DSC catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * Return: Error code or allocated dpu_hw_dsc context
+ */
struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
const struct dpu_dsc_cfg *cfg,
void __iomem *addr)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
index 989c88d2449b..fc171bdeca48 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -62,24 +62,10 @@ struct dpu_hw_dsc {
struct dpu_hw_dsc_ops ops;
};
-/**
- * dpu_hw_dsc_init() - Initializes the DSC hw driver object.
- * @dev: Corresponding device for devres management
- * @cfg: DSC catalog entry for which driver object is required
- * @addr: Mapped register io address of MDP
- * Return: Error code or allocated dpu_hw_dsc context
- */
struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
const struct dpu_dsc_cfg *cfg,
void __iomem *addr);
-/**
- * dpu_hw_dsc_init_1_2() - initializes the v1.2 DSC hw driver object
- * @dev: Corresponding device for devres management
- * @cfg: DSC catalog entry for which driver object is required
- * @addr: Mapped register io address of MDP
- * Returns: Error code or allocated dpu_hw_dsc context
- */
struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev,
const struct dpu_dsc_cfg *cfg,
void __iomem *addr);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
index ba193b0376fe..b9c433567262 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
@@ -369,6 +369,13 @@ static void _setup_dcs_ops_1_2(struct dpu_hw_dsc_ops *ops,
ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk_1_2;
}
+/**
+ * dpu_hw_dsc_init_1_2() - initializes the v1.2 DSC hw driver object
+ * @dev: Corresponding device for devres management
+ * @cfg: DSC catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * Returns: Error code or allocated dpu_hw_dsc context
+ */
struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev,
const struct dpu_dsc_cfg *cfg,
void __iomem *addr)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
index b1da88e2935f..829ca272873e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -70,6 +70,14 @@ static void _setup_dspp_ops(struct dpu_hw_dspp *c,
c->ops.setup_pcc = dpu_setup_dspp_pcc;
}
+/**
+ * dpu_hw_dspp_init() - Initializes the DSPP hw driver object.
+ * should be called once before accessing every DSPP.
+ * @dev: Corresponding device for devres management
+ * @cfg: DSPP catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * Return: pointer to structure or ERR_PTR
+ */
struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev,
const struct dpu_dspp_cfg *cfg,
void __iomem *addr)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
index 3b435690b6cc..45c26cd49fa3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
@@ -78,14 +78,6 @@ static inline struct dpu_hw_dspp *to_dpu_hw_dspp(struct dpu_hw_blk *hw)
return container_of(hw, struct dpu_hw_dspp, base);
}
-/**
- * dpu_hw_dspp_init() - Initializes the DSPP hw driver object.
- * should be called once before accessing every DSPP.
- * @dev: Corresponding device for devres management
- * @cfg: DSPP catalog entry for which driver object is required
- * @addr: Mapped register io address of MDP
- * Return: pointer to structure or ERR_PTR
- */
struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev,
const struct dpu_dspp_cfg *cfg,
void __iomem *addr);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index b85881aab047..49bd77a755aa 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -237,6 +237,11 @@ static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, unsigned int
irq_entry->cb(irq_entry->arg);
}
+/**
+ * dpu_core_irq - core IRQ handler
+ * @kms: MSM KMS handle
+ * @return: interrupt handling status
+ */
irqreturn_t dpu_core_irq(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
@@ -442,6 +447,12 @@ static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
wmb();
}
+/**
+ * dpu_core_irq_read - IRQ helper function for reading IRQ status
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @return: non-zero if irq detected; otherwise no irq detected
+ */
u32 dpu_core_irq_read(struct dpu_kms *dpu_kms,
unsigned int irq_idx)
{
@@ -476,6 +487,12 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms,
return intr_status;
}
+/**
+ * dpu_hw_intr_init(): Initializes the interrupts hw object
+ * @dev: Corresponding device for devres management
+ * @addr: mapped register io address of MDP
+ * @m: pointer to MDSS catalog data
+ */
struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev,
void __iomem *addr,
const struct dpu_mdss_cfg *m)
@@ -517,6 +534,17 @@ struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev,
return intr;
}
+/**
+ * dpu_core_irq_register_callback - For registering callback function on IRQ
+ * interrupt
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback function.
+ * @irq_arg: IRQ callback argument.
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
unsigned int irq_idx,
void (*irq_cb)(void *arg),
@@ -567,6 +595,15 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
return 0;
}
+/**
+ * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
+ * interrupt
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms,
unsigned int irq_idx)
{
@@ -628,6 +665,11 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
+/**
+ * dpu_debugfs_core_irq_init - register core irq debugfs
+ * @dpu_kms: pointer to kms
+ * @parent: debugfs directory root
+ */
void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
@@ -636,6 +678,11 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
}
#endif
+/**
+ * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
+ * @kms: MSM KMS handle
+ * @return: none
+ */
void dpu_core_irq_preinstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
@@ -653,6 +700,11 @@ void dpu_core_irq_preinstall(struct msm_kms *kms)
}
}
+/**
+ * dpu_core_irq_uninstall - uninstall core IRQ handler
+ * @kms: MSM KMS handle
+ * @return: none
+ */
void dpu_core_irq_uninstall(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index 564b750a28fe..142358a105c5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -68,12 +68,6 @@ struct dpu_hw_intr {
struct dpu_hw_intr_entry irq_tbl[DPU_NUM_IRQS];
};
-/**
- * dpu_hw_intr_init(): Initializes the interrupts hw object
- * @dev: Corresponding device for devres management
- * @addr: mapped register io address of MDP
- * @m: pointer to MDSS catalog data
- */
struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev,
void __iomem *addr,
const struct dpu_mdss_cfg *m);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 29cb854f831a..fb1d25baa518 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -547,6 +547,14 @@ static void dpu_hw_intf_program_intf_cmd_cfg(struct dpu_hw_intf *intf,
DPU_REG_WRITE(&intf->hw, INTF_CONFIG2, intf_cfg2);
}
+/**
+ * dpu_hw_intf_init() - Initializes the INTF driver for the passed
+ * interface catalog entry.
+ * @dev: Corresponding device for devres management
+ * @cfg: interface catalog entry for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @mdss_rev: dpu core's major and minor versions
+ */
struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev,
const struct dpu_intf_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index fc23650dfbf0..114be272ac0a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -130,14 +130,6 @@ struct dpu_hw_intf {
struct dpu_hw_intf_ops ops;
};
-/**
- * dpu_hw_intf_init() - Initializes the INTF driver for the passed
- * interface catalog entry.
- * @dev: Corresponding device for devres management
- * @cfg: interface catalog entry for which driver object is required
- * @addr: mapped register io address of MDP
- * @mdss_rev: dpu core's major and minor versions
- */
struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev,
const struct dpu_intf_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 1d3ccf3228c6..81b56f066519 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -158,6 +158,13 @@ static void _setup_mixer_ops(struct dpu_hw_lm_ops *ops,
ops->collect_misr = dpu_hw_lm_collect_misr;
}
+/**
+ * dpu_hw_lm_init() - Initializes the mixer hw driver object.
+ * should be called once before accessing every mixer.
+ * @dev: Corresponding device for devres management
+ * @cfg: mixer catalog entry for which driver object is required
+ * @addr: mapped register io address of MDP
+ */
struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
const struct dpu_lm_cfg *cfg,
void __iomem *addr)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 0a3381755249..6f60fa9b3cd7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -93,13 +93,6 @@ static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
return container_of(hw, struct dpu_hw_mixer, base);
}
-/**
- * dpu_hw_lm_init() - Initializes the mixer hw driver object.
- * should be called once before accessing every mixer.
- * @dev: Corresponding device for devres management
- * @cfg: mixer catalog entry for which driver object is required
- * @addr: mapped register io address of MDP
- */
struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
const struct dpu_lm_cfg *cfg,
void __iomem *addr);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index a2eff36a2224..ba7bb05efe9b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -1,5 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _DPU_HW_MDSS_H
@@ -181,10 +183,10 @@ enum dpu_pingpong {
PINGPONG_3,
PINGPONG_4,
PINGPONG_5,
- PINGPONG_6,
- PINGPONG_7,
- PINGPONG_8,
- PINGPONG_9,
+ PINGPONG_CWB_0,
+ PINGPONG_CWB_1,
+ PINGPONG_CWB_2,
+ PINGPONG_CWB_3,
PINGPONG_S0,
PINGPONG_MAX
};
@@ -293,7 +295,6 @@ enum dpu_3d_blend_mode {
/**
* struct dpu_hw_fmt_layout - format information of the source pixel data
- * @format: pixel format parameters
* @num_planes: number of planes (including meta data planes)
* @width: image width
* @height: image height
@@ -303,7 +304,6 @@ enum dpu_3d_blend_mode {
* @plane_pitch: pitch of each plane
*/
struct dpu_hw_fmt_layout {
- const struct msm_format *format;
uint32_t num_planes;
uint32_t width;
uint32_t height;
@@ -352,6 +352,7 @@ struct dpu_mdss_color {
#define DPU_DBG_MASK_DSPP (1 << 10)
#define DPU_DBG_MASK_DSC (1 << 11)
#define DPU_DBG_MASK_CDM (1 << 12)
+#define DPU_DBG_MASK_CWB (1 << 13)
/**
* struct dpu_hw_tear_check - Struct contains parameters to configure
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
index ddfa40a959cb..0b3325f9c870 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
@@ -39,6 +39,14 @@ static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c,
c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode;
};
+/**
+ * dpu_hw_merge_3d_init() - Initializes the merge_3d driver for the passed
+ * merge3d catalog entry.
+ * @dev: Corresponding device for devres management
+ * @cfg: Pingpong catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * Return: Error code or allocated dpu_hw_merge_3d context
+ */
struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev,
const struct dpu_merge_3d_cfg *cfg,
void __iomem *addr)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
index c192f02ec1ab..6833c0207523 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
@@ -45,14 +45,6 @@ static inline struct dpu_hw_merge_3d *to_dpu_hw_merge_3d(struct dpu_hw_blk *hw)
return container_of(hw, struct dpu_hw_merge_3d, base);
}
-/**
- * dpu_hw_merge_3d_init() - Initializes the merge_3d driver for the passed
- * merge3d catalog entry.
- * @dev: Corresponding device for devres management
- * @cfg: Pingpong catalog entry for which driver object is required
- * @addr: Mapped register io address of MDP
- * Return: Error code or allocated dpu_hw_merge_3d context
- */
struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev,
const struct dpu_merge_3d_cfg *cfg,
void __iomem *addr);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index 2db4c6fba37a..36c0ec775b92 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -283,6 +283,15 @@ static int dpu_hw_pp_setup_dsc(struct dpu_hw_pingpong *pp)
return 0;
}
+/**
+ * dpu_hw_pingpong_init() - initializes the pingpong driver for the passed
+ * pingpong catalog entry.
+ * @dev: Corresponding device for devres management
+ * @cfg: Pingpong catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @mdss_rev: dpu core's major and minor versions
+ * Return: Error code or allocated dpu_hw_pingpong context
+ */
struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev,
const struct dpu_pingpong_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index a48b69fd79a3..dd99e1c21a1e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -118,15 +118,6 @@ static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
return container_of(hw, struct dpu_hw_pingpong, base);
}
-/**
- * dpu_hw_pingpong_init() - initializes the pingpong driver for the passed
- * pingpong catalog entry.
- * @dev: Corresponding device for devres management
- * @cfg: Pingpong catalog entry for which driver object is required
- * @addr: Mapped register io address of MDP
- * @mdss_rev: dpu core's major and minor versions
- * Return: Error code or allocated dpu_hw_pingpong context
- */
struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev,
const struct dpu_pingpong_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 2c720f1fc1b2..32c7c8084553 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -672,6 +672,15 @@ int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
}
#endif
+/**
+ * dpu_hw_sspp_init() - Initializes the sspp hw driver object.
+ * Should be called once before accessing every pipe.
+ * @dev: Corresponding device for devres management
+ * @cfg: Pipe catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @mdss_data: UBWC / MDSS configuration data
+ * @mdss_rev: dpu core's major and minor versions
+ */
struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
const struct dpu_sspp_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 4a910b808687..56a0edf2a57c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -12,6 +12,8 @@
struct dpu_hw_sspp;
+#define DPU_SSPP_MAX_PITCH_SIZE 0xffff
+
/**
* Flags
*/
@@ -142,10 +144,12 @@ struct dpu_hw_pixel_ext {
* @src_rect: src ROI, caller takes into account the different operations
* such as decimation, flip etc to program this field
* @dest_rect: destination ROI.
+ * @rotation: simplified drm rotation hint
*/
struct dpu_sw_pipe_cfg {
struct drm_rect src_rect;
struct drm_rect dst_rect;
+ unsigned int rotation;
};
/**
@@ -315,15 +319,7 @@ struct dpu_hw_sspp {
};
struct dpu_kms;
-/**
- * dpu_hw_sspp_init() - Initializes the sspp hw driver object.
- * Should be called once before accessing every pipe.
- * @dev: Corresponding device for devres management
- * @cfg: Pipe catalog entry for which driver object is required
- * @addr: Mapped register io address of MDP
- * @mdss_data: UBWC / MDSS configuration data
- * @mdss_rev: dpu core's major and minor versions
- */
+
struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
const struct dpu_sspp_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index 0f40eea7f5e2..ad19330de61a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -284,6 +284,13 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
ops->intf_audio_select = dpu_hw_intf_audio_select;
}
+/**
+ * dpu_hw_mdptop_init - initializes the top driver for the passed config
+ * @dev: Corresponding device for devres management
+ * @cfg: MDP TOP configuration from catalog
+ * @addr: Mapped register io address of MDP
+ * @mdss_rev: dpu core's major and minor versions
+ */
struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
const struct dpu_mdp_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index f1ab9fd106e5..04efdcd21ceb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -157,18 +157,9 @@ struct dpu_hw_mdp {
struct dpu_hw_mdp_ops ops;
};
-/**
- * dpu_hw_mdptop_init - initializes the top driver for the passed config
- * @dev: Corresponding device for devres management
- * @cfg: MDP TOP configuration from catalog
- * @addr: Mapped register io address of MDP
- * @mdss_rev: dpu core's major and minor versions
- */
struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
const struct dpu_mdp_cfg *cfg,
void __iomem *addr,
const struct dpu_mdss_version *mdss_rev);
-void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
-
#endif /*_DPU_HW_TOP_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
index 98e34afde2d2..af76ad8a8103 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -213,6 +213,13 @@ static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
ops->set_write_gather_en = dpu_hw_set_write_gather_en;
}
+/**
+ * dpu_hw_vbif_init() - Initializes the VBIF driver for the passed
+ * VBIF catalog entry.
+ * @dev: Corresponding device for devres management
+ * @cfg: VBIF catalog entry for which driver object is required
+ * @addr: Mapped register io address of MDSS
+ */
struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev,
const struct dpu_vbif_cfg *cfg,
void __iomem *addr)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
index e2b4307500e4..285121ec804c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
@@ -105,13 +105,6 @@ struct dpu_hw_vbif {
struct dpu_hw_vbif_ops ops;
};
-/**
- * dpu_hw_vbif_init() - Initializes the VBIF driver for the passed
- * VBIF catalog entry.
- * @dev: Corresponding device for devres management
- * @cfg: VBIF catalog entry for which driver object is required
- * @addr: Mapped register io address of MDSS
- */
struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev,
const struct dpu_vbif_cfg *cfg,
void __iomem *addr);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
index 93ff01c889b5..4853e516c487 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
@@ -64,10 +64,10 @@ static void dpu_hw_wb_setup_outaddress(struct dpu_hw_wb *ctx,
}
static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx,
- struct dpu_hw_wb_cfg *data)
+ struct dpu_hw_wb_cfg *data,
+ const struct msm_format *fmt)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
- const struct msm_format *fmt = data->dest.format;
u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
u32 write_config = 0;
u32 opmode = 0;
@@ -173,7 +173,9 @@ static void dpu_hw_wb_bind_pingpong_blk(
mux_cfg = DPU_REG_READ(c, WB_MUX);
mux_cfg &= ~0xf;
- if (pp)
+ if (pp >= PINGPONG_CWB_0)
+ mux_cfg |= (pp < PINGPONG_CWB_2) ? 0xd : 0xb;
+ else if (pp)
mux_cfg |= (pp - PINGPONG_0) & 0x7;
else
mux_cfg |= 0xf;
@@ -213,6 +215,14 @@ static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
ops->setup_clk_force_ctrl = dpu_hw_wb_setup_clk_force_ctrl;
}
+/**
+ * dpu_hw_wb_init() - Initializes the writeback hw driver object.
+ * @dev: Corresponding device for devres management
+ * @cfg: wb_path catalog entry for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @mdss_rev: dpu core's major and minor versions
+ * Return: Error code or allocated dpu_hw_wb context
+ */
struct dpu_hw_wb *dpu_hw_wb_init(struct drm_device *dev,
const struct dpu_wb_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
index 37497473e16c..ee5e5ab786e1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
@@ -37,7 +37,8 @@ struct dpu_hw_wb_ops {
struct dpu_hw_wb_cfg *wb);
void (*setup_outformat)(struct dpu_hw_wb *ctx,
- struct dpu_hw_wb_cfg *wb);
+ struct dpu_hw_wb_cfg *wb,
+ const struct msm_format *fmt);
void (*setup_roi)(struct dpu_hw_wb *ctx,
struct dpu_hw_wb_cfg *wb);
@@ -74,14 +75,6 @@ struct dpu_hw_wb {
struct dpu_hw_wb_ops ops;
};
-/**
- * dpu_hw_wb_init() - Initializes the writeback hw driver object.
- * @dev: Corresponding device for devres management
- * @cfg: wb_path catalog entry for which driver object is required
- * @addr: mapped register io address of MDP
- * @mdss_rev: dpu core's major and minor versions
- * Return: Error code or allocated dpu_hw_wb context
- */
struct dpu_hw_wb *dpu_hw_wb_init(struct drm_device *dev,
const struct dpu_wb_cfg *cfg,
void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 9bcae53c4f45..97e9cb8c2b09 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -51,6 +51,9 @@
#define DPU_DEBUGFS_DIR "msm_dpu"
#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
+bool dpu_use_virtual_planes;
+module_param(dpu_use_virtual_planes, bool, 0);
+
static int dpu_kms_hw_init(struct msm_kms *kms);
static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
@@ -230,6 +233,21 @@ static int dpu_regset32_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(dpu_regset32);
+/**
+ * dpu_debugfs_create_regset32 - Create register read back file for debugfs
+ *
+ * This function is almost identical to the standard debugfs_create_regset32()
+ * function, with the main difference being that a list of register
+ * names/offsets do not need to be provided. The 'read' function simply outputs
+ * sequential register values over a specified range.
+ *
+ * @name: File name within debugfs
+ * @mode: File mode within debugfs
+ * @parent: Parent directory entry within debugfs, can be NULL
+ * @offset: sub-block offset
+ * @length: sub-block length, in bytes
+ * @dpu_kms: pointer to dpu kms structure
+ */
void dpu_debugfs_create_regset32(const char *name, umode_t mode,
void *parent,
uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
@@ -814,8 +832,11 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
type, catalog->sspp[i].features,
catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
- plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
- (1UL << max_crtc_count) - 1);
+ if (dpu_use_virtual_planes)
+ plane = dpu_plane_init_virtual(dev, type, (1UL << max_crtc_count) - 1);
+ else
+ plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
+ (1UL << max_crtc_count) - 1);
if (IS_ERR(plane)) {
DPU_ERROR("dpu_plane_init failed\n");
ret = PTR_ERR(plane);
@@ -917,12 +938,14 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
/* dump CTL sub-blocks HW regs info */
for (i = 0; i < cat->ctl_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len,
- dpu_kms->mmio + cat->ctl[i].base, cat->ctl[i].name);
+ dpu_kms->mmio + cat->ctl[i].base, "%s",
+ cat->ctl[i].name);
/* dump DSPP sub-blocks HW regs info */
for (i = 0; i < cat->dspp_count; i++) {
base = dpu_kms->mmio + cat->dspp[i].base;
- msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len, base, cat->dspp[i].name);
+ msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len, base,
+ "%s", cat->dspp[i].name);
if (cat->dspp[i].sblk && cat->dspp[i].sblk->pcc.len > 0)
msm_disp_snapshot_add_block(disp_state, cat->dspp[i].sblk->pcc.len,
@@ -934,13 +957,14 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
/* dump INTF sub-blocks HW regs info */
for (i = 0; i < cat->intf_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->intf[i].len,
- dpu_kms->mmio + cat->intf[i].base, cat->intf[i].name);
+ dpu_kms->mmio + cat->intf[i].base, "%s",
+ cat->intf[i].name);
/* dump PP sub-blocks HW regs info */
for (i = 0; i < cat->pingpong_count; i++) {
base = dpu_kms->mmio + cat->pingpong[i].base;
msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len, base,
- cat->pingpong[i].name);
+ "%s", cat->pingpong[i].name);
/* TE2 sub-block has length of 0, so will not print it */
@@ -954,7 +978,8 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
/* dump SSPP sub-blocks HW regs info */
for (i = 0; i < cat->sspp_count; i++) {
base = dpu_kms->mmio + cat->sspp[i].base;
- msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len, base, cat->sspp[i].name);
+ msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len, base,
+ "%s", cat->sspp[i].name);
if (cat->sspp[i].sblk && cat->sspp[i].sblk->scaler_blk.len > 0)
msm_disp_snapshot_add_block(disp_state, cat->sspp[i].sblk->scaler_blk.len,
@@ -972,12 +997,14 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
/* dump LM sub-blocks HW regs info */
for (i = 0; i < cat->mixer_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
- dpu_kms->mmio + cat->mixer[i].base, cat->mixer[i].name);
+ dpu_kms->mmio + cat->mixer[i].base,
+ "%s", cat->mixer[i].name);
/* dump WB sub-blocks HW regs info */
for (i = 0; i < cat->wb_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
- dpu_kms->mmio + cat->wb[i].base, cat->wb[i].name);
+ dpu_kms->mmio + cat->wb[i].base, "%s",
+ cat->wb[i].name);
if (cat->mdp[0].features & BIT(DPU_MDP_PERIPH_0_REMOVED)) {
msm_disp_snapshot_add_block(disp_state, MDP_PERIPH_TOP0,
@@ -989,10 +1016,16 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
dpu_kms->mmio + cat->mdp[0].base, "top");
}
+ /* dump CWB sub-blocks HW regs info */
+ for (i = 0; i < cat->cwb_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->cwb[i].len,
+ dpu_kms->mmio + cat->cwb[i].base, cat->cwb[i].name);
+
/* dump DSC sub-blocks HW regs info */
for (i = 0; i < cat->dsc_count; i++) {
base = dpu_kms->mmio + cat->dsc[i].base;
- msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len, base, cat->dsc[i].name);
+ msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len, base,
+ "%s", cat->dsc[i].name);
if (cat->dsc[i].features & BIT(DPU_DSC_HW_REV_1_2)) {
struct dpu_dsc_blk enc = cat->dsc[i].sblk->enc;
@@ -1007,7 +1040,16 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
if (cat->cdm)
msm_disp_snapshot_add_block(disp_state, cat->cdm->len,
- dpu_kms->mmio + cat->cdm->base, cat->cdm->name);
+ dpu_kms->mmio + cat->cdm->base,
+ "%s", cat->cdm->name);
+
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+
+ msm_disp_snapshot_add_block(disp_state, vbif->len,
+ dpu_kms->vbif[vbif->id] + vbif->base,
+ "%s", vbif->name);
+ }
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
@@ -1025,7 +1067,6 @@ static const struct msm_kms_funcs kms_funcs = {
.complete_commit = dpu_kms_complete_commit,
.enable_vblank = dpu_kms_enable_vblank,
.disable_vblank = dpu_kms_disable_vblank,
- .check_modified_format = dpu_format_check_modified_format,
.destroy = dpu_kms_destroy,
.snapshot = dpu_kms_mdp_snapshot,
#ifdef CONFIG_DEBUG_FS
@@ -1061,6 +1102,13 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
return 0;
}
+/**
+ * dpu_kms_get_clk_rate() - get the clock rate
+ * @dpu_kms: pointer to dpu_kms structure
+ * @clock_name: clock name to get the rate
+ *
+ * Return: current clock rate
+ */
unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
{
struct clk *clk;
@@ -1202,13 +1250,8 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
- /*
- * max crtc width is equal to the max mixer width * 2 and max height is
- * is 4K
- */
- dev->mode_config.max_width =
- dpu_kms->catalog->caps->max_mixer_width * 2;
- dev->mode_config.max_height = 4096;
+ dev->mode_config.max_width = DPU_MAX_IMG_WIDTH;
+ dev->mode_config.max_height = DPU_MAX_IMG_HEIGHT;
dev->max_vblank_count = 0xffffffff;
/* Disable vblank irqs aggressively for power-saving */
@@ -1445,8 +1488,13 @@ static const struct dev_pm_ops dpu_pm_ops = {
};
static const struct of_device_id dpu_dt_match[] = {
+ { .compatible = "qcom,msm8917-mdp5", .data = &dpu_msm8917_cfg, },
+ { .compatible = "qcom,msm8937-mdp5", .data = &dpu_msm8937_cfg, },
+ { .compatible = "qcom,msm8953-mdp5", .data = &dpu_msm8953_cfg, },
+ { .compatible = "qcom,msm8996-mdp5", .data = &dpu_msm8996_cfg, },
{ .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, },
{ .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, },
+ { .compatible = "qcom,sa8775p-dpu", .data = &dpu_sa8775p_cfg, },
{ .compatible = "qcom,sdm630-mdp5", .data = &dpu_sdm630_cfg, },
{ .compatible = "qcom,sdm660-mdp5", .data = &dpu_sdm660_cfg, },
{ .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, },
@@ -1457,6 +1505,7 @@ static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sc8280xp-dpu", .data = &dpu_sc8280xp_cfg, },
{ .compatible = "qcom,sm6115-dpu", .data = &dpu_sm6115_cfg, },
{ .compatible = "qcom,sm6125-dpu", .data = &dpu_sm6125_cfg, },
+ { .compatible = "qcom,sm6150-dpu", .data = &dpu_sm6150_cfg, },
{ .compatible = "qcom,sm6350-dpu", .data = &dpu_sm6350_cfg, },
{ .compatible = "qcom,sm6375-dpu", .data = &dpu_sm6375_cfg, },
{ .compatible = "qcom,sm7150-dpu", .data = &dpu_sm7150_cfg, },
@@ -1473,7 +1522,7 @@ MODULE_DEVICE_TABLE(of, dpu_dt_match);
static struct platform_driver dpu_driver = {
.probe = dpu_dev_probe,
- .remove_new = dpu_dev_remove,
+ .remove = dpu_dev_remove,
.shutdown = msm_kms_shutdown,
.driver = {
.name = "msm_dpu",
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 935ff6fd172c..547cdb2c0c78 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -54,6 +54,8 @@
#define ktime_compare_safe(A, B) \
ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
+extern bool dpu_use_virtual_planes;
+
struct dpu_kms {
struct msm_kms base;
struct drm_device *dev;
@@ -128,6 +130,8 @@ struct dpu_global_state {
uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
uint32_t dsc_to_enc_id[DSC_MAX - DSC_0];
uint32_t cdm_to_enc_id;
+
+ uint32_t sspp_to_crtc_id[SSPP_MAX - SSPP_NONE];
};
struct dpu_global_state
@@ -145,38 +149,11 @@ struct dpu_global_state
* @dpu_debugfs_create_regset32: Create 32-bit register dump file
*/
-/**
- * dpu_debugfs_create_regset32 - Create register read back file for debugfs
- *
- * This function is almost identical to the standard debugfs_create_regset32()
- * function, with the main difference being that a list of register
- * names/offsets do not need to be provided. The 'read' function simply outputs
- * sequential register values over a specified range.
- *
- * @name: File name within debugfs
- * @mode: File mode within debugfs
- * @parent: Parent directory entry within debugfs, can be NULL
- * @offset: sub-block offset
- * @length: sub-block length, in bytes
- * @dpu_kms: pointer to dpu kms structure
- */
void dpu_debugfs_create_regset32(const char *name, umode_t mode,
void *parent,
uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms);
/**
- * dpu_debugfs_get_root - Return root directory entry for KMS's debugfs
- *
- * The return value should be passed as the 'parent' argument to subsequent
- * debugfs create calls.
- *
- * @dpu_kms: Pointer to DPU's KMS structure
- *
- * Return: dentry pointer for DPU's debugfs location
- */
-void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms);
-
-/**
* DPU info management functions
* These functions/definitions allow for building up a 'dpu_info' structure
* containing one or more "key=value\n" entries.
@@ -189,13 +166,6 @@ void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms);
int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
-/**
- * dpu_kms_get_clk_rate() - get the clock rate
- * @dpu_kms: pointer to dpu_kms structure
- * @clock_name: clock name to get the rate
- *
- * Return: current clock rate
- */
unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name);
#endif /* __dpu_kms_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 29298e066163..098abc2c0003 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -20,7 +20,6 @@
#include "msm_drv.h"
#include "msm_mdss.h"
#include "dpu_kms.h"
-#include "dpu_formats.h"
#include "dpu_hw_sspp.h"
#include "dpu_hw_util.h"
#include "dpu_trace.h"
@@ -528,8 +527,7 @@ static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_sw_pipe *pipe,
static void _dpu_plane_setup_scaler(struct dpu_sw_pipe *pipe,
const struct msm_format *fmt, bool color_fill,
- struct dpu_sw_pipe_cfg *pipe_cfg,
- unsigned int rotation)
+ struct dpu_sw_pipe_cfg *pipe_cfg)
{
struct dpu_hw_sspp *pipe_hw = pipe->sspp;
const struct drm_format_info *info = drm_format_info(fmt->pixel_format);
@@ -552,7 +550,7 @@ static void _dpu_plane_setup_scaler(struct dpu_sw_pipe *pipe,
dst_height,
&scaler3_cfg, fmt,
info->hsub, info->vsub,
- rotation);
+ pipe_cfg->rotation);
/* configure pixel extension based on scalar config */
_dpu_plane_setup_pixel_ext(&scaler3_cfg, &pixel_ext,
@@ -604,7 +602,7 @@ static void _dpu_plane_color_fill_pipe(struct dpu_plane_state *pstate,
if (pipe->sspp->ops.setup_rects)
pipe->sspp->ops.setup_rects(pipe, &pipe_cfg);
- _dpu_plane_setup_scaler(pipe, fmt, true, &pipe_cfg, pstate->rotation);
+ _dpu_plane_setup_scaler(pipe, fmt, true, &pipe_cfg);
}
/**
@@ -648,7 +646,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_framebuffer *fb = new_state->fb;
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
- struct dpu_hw_fmt_layout layout;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
@@ -676,17 +673,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
}
}
- /* validate framebuffer layout before commit */
- ret = dpu_format_populate_layout(pstate->aspace,
- new_state->fb, &layout);
- if (ret) {
- DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
- if (pstate->aspace)
- msm_framebuffer_cleanup(new_state->fb, pstate->aspace,
- pstate->needs_dirtyfb);
- return ret;
- }
-
return 0;
}
@@ -708,12 +694,17 @@ static void dpu_plane_cleanup_fb(struct drm_plane *plane,
}
static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
- const struct dpu_sspp_sub_blks *sblk,
- struct drm_rect src, const struct msm_format *fmt)
+ struct dpu_sw_pipe *pipe,
+ struct drm_rect src,
+ const struct msm_format *fmt)
{
+ const struct dpu_sspp_sub_blks *sblk = pipe->sspp->cap->sblk;
size_t num_formats;
const u32 *supported_formats;
+ if (!test_bit(DPU_SSPP_INLINE_ROTATION, &pipe->sspp->cap->features))
+ return -EINVAL;
+
if (!sblk->rotation_cfg) {
DPU_ERROR("invalid rotation cfg\n");
return -EINVAL;
@@ -743,6 +734,7 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
{
uint32_t min_src_size;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
+ int ret;
min_src_size = MSM_FORMAT_IS_YUV(fmt) ? 2 : 1;
@@ -780,6 +772,12 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
return -EINVAL;
}
+ if (pipe_cfg->rotation & DRM_MODE_ROTATE_90) {
+ ret = dpu_plane_check_inline_rotation(pdpu, pipe, pipe_cfg->src_rect, fmt);
+ if (ret)
+ return ret;
+ }
+
/* max clk check */
if (_dpu_plane_calc_clk(mode, pipe_cfg) > kms->perf.max_core_clk_rate) {
DPU_DEBUG_PLANE(pdpu, "plane exceeds max mdp core clk limits\n");
@@ -789,37 +787,29 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
return 0;
}
-static int dpu_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
+#define MAX_UPSCALE_RATIO 20
+#define MAX_DOWNSCALE_RATIO 4
+
+static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
+ struct drm_plane_state *new_plane_state,
+ const struct drm_crtc_state *crtc_state)
{
- struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
- plane);
- int ret = 0, min_scale;
+ int i, ret = 0, min_scale, max_scale;
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
u64 max_mdp_clk_rate = kms->perf.max_core_clk_rate;
struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
- struct dpu_sw_pipe *pipe = &pstate->pipe;
- struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
- const struct drm_crtc_state *crtc_state = NULL;
- const struct msm_format *fmt;
struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
struct drm_rect fb_rect = { 0 };
uint32_t max_linewidth;
- unsigned int rotation;
- uint32_t supported_rotations;
- const struct dpu_sspp_cfg *pipe_hw_caps = pstate->pipe.sspp->cap;
- const struct dpu_sspp_sub_blks *sblk = pstate->pipe.sspp->cap->sblk;
- if (new_plane_state->crtc)
- crtc_state = drm_atomic_get_new_crtc_state(state,
- new_plane_state->crtc);
+ min_scale = FRAC_16_16(1, MAX_UPSCALE_RATIO);
+ max_scale = MAX_DOWNSCALE_RATIO << 16;
- min_scale = FRAC_16_16(1, sblk->maxupscale);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
min_scale,
- sblk->maxdwnscale << 16,
+ max_scale,
true, true);
if (ret) {
DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
@@ -828,12 +818,6 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
if (!new_plane_state->visible)
return 0;
- pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
- r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
- r_pipe->sspp = NULL;
-
pstate->stage = DPU_STAGE_0 + pstate->base.normalized_zpos;
if (pstate->stage >= pdpu->catalog->caps->max_mixer_blendstages) {
DPU_ERROR("> %d plane stages assigned\n",
@@ -841,13 +825,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- pipe_cfg->src_rect = new_plane_state->src;
-
/* state->src is 16.16, src_rect is not */
- pipe_cfg->src_rect.x1 >>= 16;
- pipe_cfg->src_rect.x2 >>= 16;
- pipe_cfg->src_rect.y1 >>= 16;
- pipe_cfg->src_rect.y2 >>= 16;
+ drm_rect_fp_to_int(&pipe_cfg->src_rect, &new_plane_state->src);
pipe_cfg->dst_rect = new_plane_state->dst;
@@ -855,14 +834,22 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
fb_rect.y2 = new_plane_state->fb->height;
/* Ensure fb size is supported */
- if (drm_rect_width(&fb_rect) > MAX_IMG_WIDTH ||
- drm_rect_height(&fb_rect) > MAX_IMG_HEIGHT) {
+ if (drm_rect_width(&fb_rect) > DPU_MAX_IMG_WIDTH ||
+ drm_rect_height(&fb_rect) > DPU_MAX_IMG_HEIGHT) {
DPU_DEBUG_PLANE(pdpu, "invalid framebuffer " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&fb_rect));
return -E2BIG;
}
- fmt = msm_framebuffer_format(new_plane_state->fb);
+ ret = dpu_format_populate_plane_sizes(new_plane_state->fb, &pstate->layout);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "failed to get format plane sizes, %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < pstate->layout.num_planes; i++)
+ if (pstate->layout.plane_pitch[i] > DPU_SSPP_MAX_PITCH_SIZE)
+ return -E2BIG;
max_linewidth = pdpu->catalog->caps->max_linewidth;
@@ -872,93 +859,329 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) ||
_dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) {
- /*
- * In parallel multirect case only the half of the usual width
- * is supported for tiled formats. If we are here, we know that
- * full width is more than max_linewidth, thus each rect is
- * wider than allowed.
- */
- if (MSM_FORMAT_IS_UBWC(fmt) &&
- drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) {
- DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, tiled format\n",
- DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
- return -E2BIG;
- }
-
if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) {
DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
return -E2BIG;
}
- if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) ||
- drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect) ||
- (!test_bit(DPU_SSPP_SMART_DMA_V1, &pipe->sspp->cap->features) &&
- !test_bit(DPU_SSPP_SMART_DMA_V2, &pipe->sspp->cap->features)) ||
- MSM_FORMAT_IS_YUV(fmt)) {
- DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, can't use split source\n",
- DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
- return -E2BIG;
- }
-
- /*
- * Use multirect for wide plane. We do not support dynamic
- * assignment of SSPPs, so we know the configuration.
- */
- pipe->multirect_index = DPU_SSPP_RECT_0;
- pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
-
- r_pipe->sspp = pipe->sspp;
- r_pipe->multirect_index = DPU_SSPP_RECT_1;
- r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
-
*r_pipe_cfg = *pipe_cfg;
pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1;
pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1;
r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2;
r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
+ } else {
+ memset(r_pipe_cfg, 0, sizeof(*r_pipe_cfg));
}
drm_rect_rotate_inv(&pipe_cfg->src_rect,
new_plane_state->fb->width, new_plane_state->fb->height,
new_plane_state->rotation);
- if (r_pipe->sspp)
+ if (drm_rect_width(&r_pipe_cfg->src_rect) != 0)
drm_rect_rotate_inv(&r_pipe_cfg->src_rect,
new_plane_state->fb->width, new_plane_state->fb->height,
new_plane_state->rotation);
- ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode);
+ pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state);
+
+ return 0;
+}
+
+static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
+ struct dpu_sw_pipe_cfg *pipe_cfg,
+ const struct msm_format *fmt,
+ uint32_t max_linewidth)
+{
+ if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) ||
+ drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect))
+ return false;
+
+ if (pipe_cfg->rotation & DRM_MODE_ROTATE_90)
+ return false;
+
+ if (MSM_FORMAT_IS_YUV(fmt))
+ return false;
+
+ if (MSM_FORMAT_IS_UBWC(fmt) &&
+ drm_rect_width(&pipe_cfg->src_rect) > max_linewidth / 2)
+ return false;
+
+ if (!test_bit(DPU_SSPP_SMART_DMA_V1, &sspp->cap->features) &&
+ !test_bit(DPU_SSPP_SMART_DMA_V2, &sspp->cap->features))
+ return false;
+
+ return true;
+}
+
+static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
+ struct drm_atomic_state *state,
+ const struct drm_crtc_state *crtc_state)
+{
+ struct drm_plane_state *new_plane_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
+ struct dpu_sw_pipe *pipe = &pstate->pipe;
+ struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
+ const struct msm_format *fmt;
+ struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
+ struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
+ uint32_t supported_rotations;
+ const struct dpu_sspp_cfg *pipe_hw_caps;
+ const struct dpu_sspp_sub_blks *sblk;
+ int ret = 0;
+
+ pipe_hw_caps = pipe->sspp->cap;
+ sblk = pipe->sspp->cap->sblk;
+
+ /*
+ * We already have verified scaling against platform limitations.
+ * Now check if the SSPP supports scaling at all.
+ */
+ if (!sblk->scaler_blk.len &&
+ ((drm_rect_width(&new_plane_state->src) >> 16 !=
+ drm_rect_width(&new_plane_state->dst)) ||
+ (drm_rect_height(&new_plane_state->src) >> 16 !=
+ drm_rect_height(&new_plane_state->dst))))
+ return -ERANGE;
+
+ fmt = msm_framebuffer_format(new_plane_state->fb);
+
+ supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
+
+ if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION))
+ supported_rotations |= DRM_MODE_ROTATE_90;
+
+ pipe_cfg->rotation = drm_rotation_simplify(new_plane_state->rotation,
+ supported_rotations);
+ r_pipe_cfg->rotation = pipe_cfg->rotation;
+
+ ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt,
+ &crtc_state->adjusted_mode);
if (ret)
return ret;
- if (r_pipe->sspp) {
+ if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) {
ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt,
&crtc_state->adjusted_mode);
if (ret)
return ret;
}
- supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
+ return 0;
+}
- if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION))
- supported_rotations |= DRM_MODE_ROTATE_90;
+static bool dpu_plane_try_multirect_parallel(struct dpu_sw_pipe *pipe, struct dpu_sw_pipe_cfg *pipe_cfg,
+ struct dpu_sw_pipe *r_pipe, struct dpu_sw_pipe_cfg *r_pipe_cfg,
+ struct dpu_hw_sspp *sspp, const struct msm_format *fmt,
+ uint32_t max_linewidth)
+{
+ r_pipe->sspp = NULL;
- rotation = drm_rotation_simplify(new_plane_state->rotation,
- supported_rotations);
+ pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
- if ((pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) &&
- (rotation & DRM_MODE_ROTATE_90)) {
- ret = dpu_plane_check_inline_rotation(pdpu, sblk, pipe_cfg->src_rect, fmt);
- if (ret)
- return ret;
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) {
+ if (!dpu_plane_is_multirect_parallel_capable(pipe->sspp, pipe_cfg, fmt, max_linewidth) ||
+ !dpu_plane_is_multirect_parallel_capable(pipe->sspp, r_pipe_cfg, fmt, max_linewidth))
+ return false;
+
+ r_pipe->sspp = pipe->sspp;
+
+ pipe->multirect_index = DPU_SSPP_RECT_0;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+ r_pipe->multirect_index = DPU_SSPP_RECT_1;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
}
- pstate->rotation = rotation;
- pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state);
+ return true;
+}
+
+static int dpu_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ int ret = 0;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+ struct dpu_sw_pipe *pipe = &pstate->pipe;
+ struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
+ struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
+ struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
+ const struct drm_crtc_state *crtc_state = NULL;
+ uint32_t max_linewidth = dpu_kms->catalog->caps->max_linewidth;
+
+ if (new_plane_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
+
+ pipe->sspp = dpu_rm_get_sspp(&dpu_kms->rm, pdpu->pipe);
+
+ if (!pipe->sspp)
+ return -EINVAL;
+
+ ret = dpu_plane_atomic_check_nosspp(plane, new_plane_state, crtc_state);
+ if (ret)
+ return ret;
+
+ if (!new_plane_state->visible)
+ return 0;
+
+ if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg,
+ pipe->sspp,
+ msm_framebuffer_format(new_plane_state->fb),
+ max_linewidth)) {
+ DPU_DEBUG_PLANE(pdpu, "invalid " DRM_RECT_FMT " /" DRM_RECT_FMT
+ " max_line:%u, can't use split source\n",
+ DRM_RECT_ARG(&pipe_cfg->src_rect),
+ DRM_RECT_ARG(&r_pipe_cfg->src_rect),
+ max_linewidth);
+ return -E2BIG;
+ }
+
+ return dpu_plane_atomic_check_sspp(plane, state, crtc_state);
+}
+
+static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state =
+ drm_atomic_get_old_plane_state(state, plane);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(plane_state);
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ if (plane_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ plane_state->crtc);
+
+ ret = dpu_plane_atomic_check_nosspp(plane, plane_state, crtc_state);
+ if (ret)
+ return ret;
+
+ if (!plane_state->visible) {
+ /*
+ * resources are freed by dpu_crtc_assign_plane_resources(),
+ * but clean them here.
+ */
+ pstate->pipe.sspp = NULL;
+ pstate->r_pipe.sspp = NULL;
+
+ return 0;
+ }
+
+ /*
+ * Force resource reallocation if the format of FB or src/dst have
+ * changed. We might need to allocate different SSPP or SSPPs for this
+ * plane than the one used previously.
+ */
+ if (!old_plane_state || !old_plane_state->fb ||
+ old_plane_state->src_w != plane_state->src_w ||
+ old_plane_state->src_h != plane_state->src_h ||
+ old_plane_state->src_w != plane_state->src_w ||
+ old_plane_state->crtc_h != plane_state->crtc_h ||
+ msm_framebuffer_format(old_plane_state->fb) !=
+ msm_framebuffer_format(plane_state->fb))
+ crtc_state->planes_changed = true;
return 0;
}
+static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc,
+ struct dpu_global_state *global_state,
+ struct drm_atomic_state *state,
+ struct drm_plane_state *plane_state)
+{
+ const struct drm_crtc_state *crtc_state = NULL;
+ struct drm_plane *plane = plane_state->plane;
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+ struct dpu_rm_sspp_requirements reqs;
+ struct dpu_plane_state *pstate;
+ struct dpu_sw_pipe *pipe;
+ struct dpu_sw_pipe *r_pipe;
+ struct dpu_sw_pipe_cfg *pipe_cfg;
+ struct dpu_sw_pipe_cfg *r_pipe_cfg;
+ const struct msm_format *fmt;
+
+ if (plane_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ plane_state->crtc);
+
+ pstate = to_dpu_plane_state(plane_state);
+ pipe = &pstate->pipe;
+ r_pipe = &pstate->r_pipe;
+ pipe_cfg = &pstate->pipe_cfg;
+ r_pipe_cfg = &pstate->r_pipe_cfg;
+
+ pipe->sspp = NULL;
+ r_pipe->sspp = NULL;
+
+ if (!plane_state->fb)
+ return -EINVAL;
+
+ fmt = msm_framebuffer_format(plane_state->fb);
+ reqs.yuv = MSM_FORMAT_IS_YUV(fmt);
+ reqs.scale = (plane_state->src_w >> 16 != plane_state->crtc_w) ||
+ (plane_state->src_h >> 16 != plane_state->crtc_h);
+
+ reqs.rot90 = drm_rotation_90_or_270(plane_state->rotation);
+
+ pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
+ if (!pipe->sspp)
+ return -ENODEV;
+
+ if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg,
+ pipe->sspp,
+ msm_framebuffer_format(plane_state->fb),
+ dpu_kms->catalog->caps->max_linewidth)) {
+ /* multirect is not possible, use two SSPP blocks */
+ r_pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
+ if (!r_pipe->sspp)
+ return -ENODEV;
+
+ pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ }
+
+ return dpu_plane_atomic_check_sspp(plane, state, crtc_state);
+}
+
+int dpu_assign_plane_resources(struct dpu_global_state *global_state,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_plane_state **states,
+ unsigned int num_planes)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < num_planes; i++) {
+ struct drm_plane_state *plane_state = states[i];
+
+ if (!plane_state ||
+ !plane_state->visible)
+ continue;
+
+ ret = dpu_plane_virtual_assign_resources(crtc, global_state,
+ state, plane_state);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe)
{
const struct msm_format *format =
@@ -981,6 +1204,10 @@ static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe
}
+/**
+ * dpu_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
void dpu_plane_flush(struct drm_plane *plane)
{
struct dpu_plane *pdpu;
@@ -1060,14 +1287,14 @@ static void dpu_plane_sspp_update_pipe(struct drm_plane *plane,
pipe_cfg);
}
- _dpu_plane_setup_scaler(pipe, fmt, false, pipe_cfg, pstate->rotation);
+ _dpu_plane_setup_scaler(pipe, fmt, false, pipe_cfg);
if (pipe->sspp->ops.setup_multirect)
pipe->sspp->ops.setup_multirect(
pipe);
if (pipe->sspp->ops.setup_format) {
- unsigned int rotation = pstate->rotation;
+ unsigned int rotation = pipe_cfg->rotation;
src_flags = 0x0;
@@ -1101,7 +1328,8 @@ static void dpu_plane_sspp_update_pipe(struct drm_plane *plane,
_dpu_plane_set_qos_remap(plane, pipe);
}
-static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
+static void dpu_plane_sspp_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct drm_plane_state *state = plane->state;
@@ -1115,17 +1343,6 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
msm_framebuffer_format(fb);
struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
- struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
- struct msm_gem_address_space *aspace = kms->base.aspace;
- struct dpu_hw_fmt_layout layout;
- bool layout_valid = false;
- int ret;
-
- ret = dpu_format_populate_layout(aspace, fb, &layout);
- if (ret)
- DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
- else
- layout_valid = true;
pstate->pending = true;
@@ -1133,6 +1350,8 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
pstate->needs_qos_remap |= (is_rt_pipe != pdpu->is_rt_pipe);
pdpu->is_rt_pipe = is_rt_pipe;
+ dpu_format_populate_addrs(pstate->aspace, new_state->fb, &pstate->layout);
+
DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
", %p4cc ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src),
crtc->base.id, DRM_RECT_ARG(&state->dst),
@@ -1140,12 +1359,12 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
dpu_plane_sspp_update_pipe(plane, pipe, pipe_cfg, fmt,
drm_mode_vrefresh(&crtc->mode),
- layout_valid ? &layout : NULL);
+ &pstate->layout);
if (r_pipe->sspp) {
dpu_plane_sspp_update_pipe(plane, r_pipe, r_pipe_cfg, fmt,
drm_mode_vrefresh(&crtc->mode),
- layout_valid ? &layout : NULL);
+ &pstate->layout);
}
if (pstate->needs_qos_remap)
@@ -1197,7 +1416,7 @@ static void dpu_plane_atomic_update(struct drm_plane *plane,
if (!new_state->visible) {
_dpu_plane_atomic_disable(plane);
} else {
- dpu_plane_sspp_atomic_update(plane);
+ dpu_plane_sspp_atomic_update(plane, new_state);
}
}
@@ -1279,12 +1498,15 @@ static void dpu_plane_atomic_print_state(struct drm_printer *p,
drm_printf(p, "\tstage=%d\n", pstate->stage);
- drm_printf(p, "\tsspp[0]=%s\n", pipe->sspp->cap->name);
- drm_printf(p, "\tmultirect_mode[0]=%s\n", dpu_get_multirect_mode(pipe->multirect_mode));
- drm_printf(p, "\tmultirect_index[0]=%s\n",
- dpu_get_multirect_index(pipe->multirect_index));
- drm_printf(p, "\tsrc[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->src_rect));
- drm_printf(p, "\tdst[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->dst_rect));
+ if (pipe->sspp) {
+ drm_printf(p, "\tsspp[0]=%s\n", pipe->sspp->cap->name);
+ drm_printf(p, "\tmultirect_mode[0]=%s\n",
+ dpu_get_multirect_mode(pipe->multirect_mode));
+ drm_printf(p, "\tmultirect_index[0]=%s\n",
+ dpu_get_multirect_index(pipe->multirect_index));
+ drm_printf(p, "\tsrc[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->src_rect));
+ drm_printf(p, "\tdst[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->dst_rect));
+ }
if (r_pipe->sspp) {
drm_printf(p, "\tsspp[1]=%s\n", r_pipe->sspp->cap->name);
@@ -1301,7 +1523,6 @@ static void dpu_plane_reset(struct drm_plane *plane)
{
struct dpu_plane *pdpu;
struct dpu_plane_state *pstate;
- struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
if (!plane) {
DPU_ERROR("invalid plane\n");
@@ -1323,16 +1544,6 @@ static void dpu_plane_reset(struct drm_plane *plane)
return;
}
- /*
- * Set the SSPP here until we have proper virtualized DPU planes.
- * This is the place where the state is allocated, so fill it fully.
- */
- pstate->pipe.sspp = dpu_rm_get_sspp(&dpu_kms->rm, pdpu->pipe);
- pstate->pipe.multirect_index = DPU_SSPP_RECT_SOLO;
- pstate->pipe.multirect_mode = DPU_SSPP_MULTIRECT_NONE;
-
- pstate->r_pipe.sspp = NULL;
-
__drm_atomic_helper_plane_reset(plane, &pstate->base);
}
@@ -1388,31 +1599,29 @@ static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
.atomic_update = dpu_plane_atomic_update,
};
+static const struct drm_plane_helper_funcs dpu_plane_virtual_helper_funcs = {
+ .prepare_fb = dpu_plane_prepare_fb,
+ .cleanup_fb = dpu_plane_cleanup_fb,
+ .atomic_check = dpu_plane_virtual_atomic_check,
+ .atomic_update = dpu_plane_atomic_update,
+};
+
/* initialize plane */
-struct drm_plane *dpu_plane_init(struct drm_device *dev,
- uint32_t pipe, enum drm_plane_type type,
- unsigned long possible_crtcs)
+static struct drm_plane *dpu_plane_init_common(struct drm_device *dev,
+ enum drm_plane_type type,
+ unsigned long possible_crtcs,
+ bool inline_rotation,
+ const uint32_t *format_list,
+ uint32_t num_formats,
+ enum dpu_sspp pipe)
{
struct drm_plane *plane = NULL;
- const uint32_t *format_list;
struct dpu_plane *pdpu;
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *kms = to_dpu_kms(priv->kms);
- struct dpu_hw_sspp *pipe_hw;
- uint32_t num_formats;
uint32_t supported_rotations;
int ret;
- /* initialize underlying h/w driver */
- pipe_hw = dpu_rm_get_sspp(&kms->rm, pipe);
- if (!pipe_hw || !pipe_hw->cap || !pipe_hw->cap->sblk) {
- DPU_ERROR("[%u]SSPP is invalid\n", pipe);
- return ERR_PTR(-EINVAL);
- }
-
- format_list = pipe_hw->cap->sblk->format_list;
- num_formats = pipe_hw->cap->sblk->num_formats;
-
pdpu = drmm_universal_plane_alloc(dev, struct dpu_plane, base,
0xff, &dpu_plane_funcs,
format_list, num_formats,
@@ -1438,7 +1647,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
- if (pipe_hw->cap->features & BIT(DPU_SSPP_INLINE_ROTATION))
+ if (inline_rotation)
supported_rotations |= DRM_MODE_ROTATE_MASK;
drm_plane_create_rotation_property(plane,
@@ -1446,10 +1655,98 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
drm_plane_enable_fb_damage_clips(plane);
- /* success! finalize initialization */
+ DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name,
+ pipe, plane->base.id);
+ return plane;
+}
+
+/**
+ * dpu_plane_init - create new dpu plane for the given pipe
+ * @dev: Pointer to DRM device
+ * @pipe: dpu hardware pipe identifier
+ * @type: Plane type - PRIMARY/OVERLAY/CURSOR
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ *
+ * Initialize the plane.
+ */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+ uint32_t pipe, enum drm_plane_type type,
+ unsigned long possible_crtcs)
+{
+ struct drm_plane *plane = NULL;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *kms = to_dpu_kms(priv->kms);
+ struct dpu_hw_sspp *pipe_hw;
+
+ /* initialize underlying h/w driver */
+ pipe_hw = dpu_rm_get_sspp(&kms->rm, pipe);
+ if (!pipe_hw || !pipe_hw->cap || !pipe_hw->cap->sblk) {
+ DPU_ERROR("[%u]SSPP is invalid\n", pipe);
+ return ERR_PTR(-EINVAL);
+ }
+
+
+ plane = dpu_plane_init_common(dev, type, possible_crtcs,
+ pipe_hw->cap->features & BIT(DPU_SSPP_INLINE_ROTATION),
+ pipe_hw->cap->sblk->format_list,
+ pipe_hw->cap->sblk->num_formats,
+ pipe);
+ if (IS_ERR(plane))
+ return plane;
+
drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name,
pipe, plane->base.id);
+
+ return plane;
+}
+
+/**
+ * dpu_plane_init_virtual - create new virtualized DPU plane
+ * @dev: Pointer to DRM device
+ * @type: Plane type - PRIMARY/OVERLAY/CURSOR
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ *
+ * Initialize the virtual plane with no backing SSPP / pipe.
+ */
+struct drm_plane *dpu_plane_init_virtual(struct drm_device *dev,
+ enum drm_plane_type type,
+ unsigned long possible_crtcs)
+{
+ struct drm_plane *plane = NULL;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *kms = to_dpu_kms(priv->kms);
+ bool has_inline_rotation = false;
+ const u32 *format_list = NULL;
+ u32 num_formats = 0;
+ int i;
+
+ /* Determine the largest configuration that we can implement */
+ for (i = 0; i < kms->catalog->sspp_count; i++) {
+ const struct dpu_sspp_cfg *cfg = &kms->catalog->sspp[i];
+
+ if (test_bit(DPU_SSPP_INLINE_ROTATION, &cfg->features))
+ has_inline_rotation = true;
+
+ if (!format_list ||
+ cfg->sblk->csc_blk.len) {
+ format_list = cfg->sblk->format_list;
+ num_formats = cfg->sblk->num_formats;
+ }
+ }
+
+ plane = dpu_plane_init_common(dev, type, possible_crtcs,
+ has_inline_rotation,
+ format_list,
+ num_formats,
+ SSPP_NONE);
+ if (IS_ERR(plane))
+ return plane;
+
+ drm_plane_helper_add(plane, &dpu_plane_virtual_helper_funcs);
+
+ DPU_DEBUG("%s created virtual id:%u\n", plane->name, plane->base.id);
+
return plane;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index abd6b21a049b..acd5725175cd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -30,7 +30,7 @@
* @plane_fetch_bw: calculated BW per plane
* @plane_clk: calculated clk per plane
* @needs_dirtyfb: whether attached CRTC needs pixel data explicitly flushed
- * @rotation: simplified drm rotation hint
+ * @layout: framebuffer memory layout
*/
struct dpu_plane_state {
struct drm_plane_state base;
@@ -47,43 +47,25 @@ struct dpu_plane_state {
u64 plane_clk;
bool needs_dirtyfb;
- unsigned int rotation;
+
+ struct dpu_hw_fmt_layout layout;
};
#define to_dpu_plane_state(x) \
container_of(x, struct dpu_plane_state, base)
-/**
- * dpu_plane_flush - final plane operations before commit flush
- * @plane: Pointer to drm plane structure
- */
void dpu_plane_flush(struct drm_plane *plane);
-/**
- * dpu_plane_set_error: enable/disable error condition
- * @plane: pointer to drm_plane structure
- */
void dpu_plane_set_error(struct drm_plane *plane, bool error);
-/**
- * dpu_plane_init - create new dpu plane for the given pipe
- * @dev: Pointer to DRM device
- * @pipe: dpu hardware pipe identifier
- * @type: Plane type - PRIMARY/OVERLAY/CURSOR
- * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
- *
- */
struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs);
-/**
- * dpu_plane_color_fill - enables color fill on plane
- * @plane: Pointer to DRM plane object
- * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
- * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
- * Returns: 0 on success
- */
+struct drm_plane *dpu_plane_init_virtual(struct drm_device *dev,
+ enum drm_plane_type type,
+ unsigned long possible_crtcs);
+
int dpu_plane_color_fill(struct drm_plane *plane,
uint32_t color, uint32_t alpha);
@@ -93,4 +75,10 @@ void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable);
static inline void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) {}
#endif
+int dpu_assign_plane_resources(struct dpu_global_state *global_state,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_plane_state **states,
+ unsigned int num_planes);
+
#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 44938ba7a2b7..5baf9df702b8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s] " fmt, __func__
@@ -9,6 +9,7 @@
#include "dpu_hw_lm.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_cdm.h"
+#include "dpu_hw_cwb.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_sspp.h"
#include "dpu_hw_intf.h"
@@ -27,13 +28,15 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx,
}
/**
- * struct dpu_rm_requirements - Reservation requirements parameter bundle
- * @topology: selected topology for the display
+ * dpu_rm_init - Read hardware catalog and create reservation tracking objects
+ * for all HW blocks.
+ * @dev: Corresponding device for devres management
+ * @rm: DPU Resource Manager handle
+ * @cat: Pointer to hardware catalog
+ * @mdss_data: Pointer to MDSS / UBWC configuration
+ * @mmio: mapped register io address of MDP
+ * @return: 0 on Success otherwise -ERROR
*/
-struct dpu_rm_requirements {
- struct msm_display_topology topology;
-};
-
int dpu_rm_init(struct drm_device *dev,
struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
@@ -120,6 +123,19 @@ int dpu_rm_init(struct drm_device *dev,
rm->hw_wb[wb->id - WB_0] = hw;
}
+ for (i = 0; i < cat->cwb_count; i++) {
+ struct dpu_hw_cwb *hw;
+ const struct dpu_cwb_cfg *cwb = &cat->cwb[i];
+
+ hw = dpu_hw_cwb_init(dev, cwb, mmio);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed cwb object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->cwb_blks[cwb->id - CWB_0] = &hw->base;
+ }
+
for (i = 0; i < cat->ctl_count; i++) {
struct dpu_hw_ctl *hw;
const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
@@ -231,14 +247,13 @@ static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
* mixer in rm->pingpong_blks[].
* @dspp_idx: output parameter, index of dspp block attached to the layer
* mixer in rm->dspp_blks[].
- * @reqs: input parameter, rm requirements for HW blocks needed in the
- * datapath.
+ * @topology: selected topology for the display
* Return: true if lm matches all requirements, false otherwise
*/
static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
struct dpu_global_state *global_state,
uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
- struct dpu_rm_requirements *reqs)
+ struct msm_display_topology *topology)
{
const struct dpu_lm_cfg *lm_cfg;
int idx;
@@ -263,7 +278,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
}
*pp_idx = idx;
- if (!reqs->topology.num_dspp)
+ if (!topology->num_dspp)
return true;
idx = lm_cfg->dspp - DSPP_0;
@@ -285,7 +300,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
struct dpu_global_state *global_state,
uint32_t enc_id,
- struct dpu_rm_requirements *reqs)
+ struct msm_display_topology *topology)
{
int lm_idx[MAX_BLOCKS];
@@ -293,14 +308,14 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
int dspp_idx[MAX_BLOCKS] = {0};
int i, lm_count = 0;
- if (!reqs->topology.num_lm) {
- DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
+ if (!topology->num_lm) {
+ DPU_ERROR("invalid number of lm: %d\n", topology->num_lm);
return -EINVAL;
}
/* Find a primary mixer */
for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
- lm_count < reqs->topology.num_lm; i++) {
+ lm_count < topology->num_lm; i++) {
if (!rm->mixer_blks[i])
continue;
@@ -309,14 +324,14 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
enc_id, i, &pp_idx[lm_count],
- &dspp_idx[lm_count], reqs)) {
+ &dspp_idx[lm_count], topology)) {
continue;
}
++lm_count;
/* Valid primary mixer found, find matching peers */
- if (lm_count < reqs->topology.num_lm) {
+ if (lm_count < topology->num_lm) {
int j = _dpu_rm_get_lm_peer(rm, i);
/* ignore the peer if there is an error or if the peer was already processed */
@@ -329,7 +344,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
global_state, enc_id, j,
&pp_idx[lm_count], &dspp_idx[lm_count],
- reqs)) {
+ topology)) {
continue;
}
@@ -338,7 +353,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
}
}
- if (lm_count != reqs->topology.num_lm) {
+ if (lm_count != topology->num_lm) {
DPU_DEBUG("unable to find appropriate mixers\n");
return -ENAVAIL;
}
@@ -347,7 +362,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
global_state->dspp_to_enc_id[dspp_idx[i]] =
- reqs->topology.num_dspp ? enc_id : 0;
+ topology->num_dspp ? enc_id : 0;
trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
pp_idx[i] + PINGPONG_0);
@@ -584,28 +599,28 @@ static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_encoder *enc,
- struct dpu_rm_requirements *reqs)
+ struct msm_display_topology *topology)
{
int ret;
- ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
+ ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, topology);
if (ret) {
DPU_ERROR("unable to find appropriate mixers\n");
return ret;
}
ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
- &reqs->topology);
+ topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
- ret = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology);
+ ret = _dpu_rm_reserve_dsc(rm, global_state, enc, topology);
if (ret)
return ret;
- if (reqs->topology.needs_cdm) {
+ if (topology->needs_cdm) {
ret = _dpu_rm_reserve_cdm(rm, global_state, enc);
if (ret) {
DPU_ERROR("unable to find CDM blk\n");
@@ -616,20 +631,6 @@ static int _dpu_rm_make_reservation(
return ret;
}
-static int _dpu_rm_populate_requirements(
- struct drm_encoder *enc,
- struct dpu_rm_requirements *reqs,
- struct msm_display_topology req_topology)
-{
- reqs->topology = req_topology;
-
- DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d cdm: %d\n",
- reqs->topology.num_lm, reqs->topology.num_dsc,
- reqs->topology.num_intf, reqs->topology.needs_cdm);
-
- return 0;
-}
-
static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
uint32_t enc_id)
{
@@ -641,6 +642,13 @@ static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
}
}
+/**
+ * dpu_rm_release - Given the encoder for the display chain, release any
+ * HW blocks previously reserved for that use case.
+ * @global_state: resources shared across multiple kms objects
+ * @enc: DRM Encoder handle
+ * @return: 0 on Success otherwise -ERROR
+ */
void dpu_rm_release(struct dpu_global_state *global_state,
struct drm_encoder *enc)
{
@@ -657,14 +665,27 @@ void dpu_rm_release(struct dpu_global_state *global_state,
_dpu_rm_clear_mapping(&global_state->cdm_to_enc_id, 1, enc->base.id);
}
+/**
+ * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
+ * the use connections and user requirements, specified through related
+ * topology control properties, and reserve hardware blocks to that
+ * display chain.
+ * HW blocks can then be accessed through dpu_rm_get_* functions.
+ * HW Reservations should be released via dpu_rm_release_hw.
+ * @rm: DPU Resource Manager handle
+ * @global_state: resources shared across multiple kms objects
+ * @enc: DRM Encoder handle
+ * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @topology: Pointer to topology info for the display
+ * @return: 0 on Success otherwise -ERROR
+ */
int dpu_rm_reserve(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
- struct msm_display_topology topology)
+ struct msm_display_topology *topology)
{
- struct dpu_rm_requirements reqs;
int ret;
/* Check if this is just a page-flip */
@@ -679,13 +700,11 @@ int dpu_rm_reserve(
DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
enc->base.id, crtc_state->crtc->base.id);
- ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
- if (ret) {
- DPU_ERROR("failed to populate hw requirements\n");
- return ret;
- }
+ DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n",
+ topology->num_lm, topology->num_dsc,
+ topology->num_intf);
- ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
+ ret = _dpu_rm_make_reservation(rm, global_state, enc, topology);
if (ret)
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
@@ -694,6 +713,98 @@ int dpu_rm_reserve(
return ret;
}
+static struct dpu_hw_sspp *dpu_rm_try_sspp(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_crtc *crtc,
+ struct dpu_rm_sspp_requirements *reqs,
+ unsigned int type)
+{
+ uint32_t crtc_id = crtc->base.id;
+ struct dpu_hw_sspp *hw_sspp;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rm->hw_sspp); i++) {
+ if (!rm->hw_sspp[i])
+ continue;
+
+ if (global_state->sspp_to_crtc_id[i])
+ continue;
+
+ hw_sspp = rm->hw_sspp[i];
+
+ if (hw_sspp->cap->type != type)
+ continue;
+
+ if (reqs->scale && !hw_sspp->cap->sblk->scaler_blk.len)
+ continue;
+
+ // TODO: QSEED2 and RGB scalers are not yet supported
+ if (reqs->scale && !hw_sspp->ops.setup_scaler)
+ continue;
+
+ if (reqs->yuv && !hw_sspp->cap->sblk->csc_blk.len)
+ continue;
+
+ if (reqs->rot90 && !(hw_sspp->cap->features & DPU_SSPP_INLINE_ROTATION))
+ continue;
+
+ global_state->sspp_to_crtc_id[i] = crtc_id;
+
+ return rm->hw_sspp[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * dpu_rm_reserve_sspp - Reserve the required SSPP for the provided CRTC
+ * @rm: DPU Resource Manager handle
+ * @global_state: private global state
+ * @crtc: DRM CRTC handle
+ * @reqs: SSPP required features
+ */
+struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_crtc *crtc,
+ struct dpu_rm_sspp_requirements *reqs)
+{
+ struct dpu_hw_sspp *hw_sspp = NULL;
+
+ if (!reqs->scale && !reqs->yuv)
+ hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA);
+ if (!hw_sspp && reqs->scale)
+ hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB);
+ if (!hw_sspp)
+ hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG);
+
+ return hw_sspp;
+}
+
+/**
+ * dpu_rm_release_all_sspp - Given the CRTC, release all SSPP
+ * blocks previously reserved for that use case.
+ * @global_state: resources shared across multiple kms objects
+ * @crtc: DRM CRTC handle
+ */
+void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
+ struct drm_crtc *crtc)
+{
+ uint32_t crtc_id = crtc->base.id;
+
+ _dpu_rm_clear_mapping(global_state->sspp_to_crtc_id,
+ ARRAY_SIZE(global_state->sspp_to_crtc_id), crtc_id);
+}
+
+/**
+ * dpu_rm_get_assigned_resources - Get hw resources of the given type that are
+ * assigned to this encoder
+ * @rm: DPU Resource Manager handle
+ * @global_state: resources shared across multiple kms objects
+ * @enc_id: encoder id requesting for allocation
+ * @type: resource type to return data for
+ * @blks: pointer to the array to be filled by HW resources
+ * @blks_size: size of the @blks array
+ */
int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
struct dpu_global_state *global_state, uint32_t enc_id,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
@@ -772,6 +883,11 @@ static void dpu_rm_print_state_helper(struct drm_printer *p,
}
+/**
+ * dpu_rm_print_state - output the RM private state
+ * @p: DRM printer
+ * @global_state: global state
+ */
void dpu_rm_print_state(struct drm_printer *p,
const struct dpu_global_state *global_state)
{
@@ -813,4 +929,11 @@ void dpu_rm_print_state(struct drm_printer *p,
dpu_rm_print_state_helper(p, rm->cdm_blk,
global_state->cdm_to_enc_id);
drm_puts(p, "\n");
+
+ drm_puts(p, "\tsspp=");
+ /* skip SSPP_NONE and start from the next index */
+ for (i = SSPP_NONE + 1; i < ARRAY_SIZE(global_state->sspp_to_crtc_id); i++)
+ dpu_rm_print_state_helper(p, rm->hw_sspp[i] ? &rm->hw_sspp[i]->base : NULL,
+ global_state->sspp_to_crtc_id[i]);
+ drm_puts(p, "\n");
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index e63db8ace6b9..99bd594ee0d1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -20,6 +20,7 @@ struct dpu_global_state;
* @ctl_blks: array of ctl hardware resources
* @hw_intf: array of intf hardware resources
* @hw_wb: array of wb hardware resources
+ * @hw_cwb: array of cwb hardware resources
* @dspp_blks: array of dspp hardware resources
* @hw_sspp: array of sspp hardware resources
* @cdm_blk: cdm hardware resource
@@ -30,6 +31,7 @@ struct dpu_rm {
struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
struct dpu_hw_intf *hw_intf[INTF_MAX - INTF_0];
struct dpu_hw_wb *hw_wb[WB_MAX - WB_0];
+ struct dpu_hw_blk *cwb_blks[CWB_MAX - CWB_0];
struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0];
struct dpu_hw_blk *merge_3d_blks[MERGE_3D_MAX - MERGE_3D_0];
struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0];
@@ -37,63 +39,55 @@ struct dpu_rm {
struct dpu_hw_blk *cdm_blk;
};
+struct dpu_rm_sspp_requirements {
+ bool yuv;
+ bool scale;
+ bool rot90;
+};
+
/**
- * dpu_rm_init - Read hardware catalog and create reservation tracking objects
- * for all HW blocks.
- * @dev: Corresponding device for devres management
- * @rm: DPU Resource Manager handle
- * @cat: Pointer to hardware catalog
- * @mdss_data: Pointer to MDSS / UBWC configuration
- * @mmio: mapped register io address of MDP
- * @Return: 0 on Success otherwise -ERROR
+ * struct msm_display_topology - defines a display topology pipeline
+ * @num_lm: number of layer mixers used
+ * @num_intf: number of interfaces the panel is mounted on
+ * @num_dspp: number of dspp blocks used
+ * @num_dsc: number of Display Stream Compression (DSC) blocks used
+ * @needs_cdm: indicates whether cdm block is needed for this display topology
*/
+struct msm_display_topology {
+ u32 num_lm;
+ u32 num_intf;
+ u32 num_dspp;
+ u32 num_dsc;
+ bool needs_cdm;
+};
+
int dpu_rm_init(struct drm_device *dev,
struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
const struct msm_mdss_data *mdss_data,
void __iomem *mmio);
-/**
- * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
- * the use connections and user requirements, specified through related
- * topology control properties, and reserve hardware blocks to that
- * display chain.
- * HW blocks can then be accessed through dpu_rm_get_* functions.
- * HW Reservations should be released via dpu_rm_release_hw.
- * @rm: DPU Resource Manager handle
- * @drm_enc: DRM Encoder handle
- * @crtc_state: Proposed Atomic DRM CRTC State handle
- * @topology: Pointer to topology info for the display
- * @Return: 0 on Success otherwise -ERROR
- */
int dpu_rm_reserve(struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
- struct msm_display_topology topology);
+ struct msm_display_topology *topology);
-/**
- * dpu_rm_reserve - Given the encoder for the display chain, release any
- * HW blocks previously reserved for that use case.
- * @rm: DPU Resource Manager handle
- * @enc: DRM Encoder handle
- * @Return: 0 on Success otherwise -ERROR
- */
void dpu_rm_release(struct dpu_global_state *global_state,
struct drm_encoder *enc);
-/**
- * Get hw resources of the given type that are assigned to this encoder.
- */
+struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_crtc *crtc,
+ struct dpu_rm_sspp_requirements *reqs);
+
+void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
+ struct drm_crtc *crtc);
+
int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
struct dpu_global_state *global_state, uint32_t enc_id,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);
-/**
- * dpu_rm_print_state - output the RM private state
- * @p: DRM printer
- * @global_state: global state
- */
void dpu_rm_print_state(struct drm_printer *p,
const struct dpu_global_state *global_state);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 47c02b98eac3..2a551e455aa3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -204,6 +204,11 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
}
+/**
+ * dpu_vbif_set_qos_remap - set QoS priority level remap
+ * @dpu_kms: DPU handler
+ * @params: Pointer to QoS configuration parameters
+ */
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_qos_params *params)
{
@@ -245,6 +250,10 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
}
}
+/**
+ * dpu_vbif_clear_errors - clear any vbif errors
+ * @dpu_kms: DPU handler
+ */
void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
{
struct dpu_hw_vbif *vbif;
@@ -262,6 +271,10 @@ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
}
}
+/**
+ * dpu_vbif_init_memtypes - initialize xin memory types for vbif
+ * @dpu_kms: DPU handler
+ */
void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
{
struct dpu_hw_vbif *vbif;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
index e1b1f7f4e4be..62e47ae1e3ee 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
@@ -38,32 +38,14 @@ struct dpu_vbif_set_qos_params {
bool is_rt;
};
-/**
- * dpu_vbif_set_ot_limit - set OT limit for vbif client
- * @dpu_kms: DPU handler
- * @params: Pointer to OT configuration parameters
- */
void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_ot_params *params);
-/**
- * dpu_vbif_set_qos_remap - set QoS priority level remap
- * @dpu_kms: DPU handler
- * @params: Pointer to QoS configuration parameters
- */
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_qos_params *params);
-/**
- * dpu_vbif_clear_errors - clear any vbif errors
- * @dpu_kms: DPU handler
- */
void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
-/**
- * dpu_vbif_init_memtypes - initialize xin memory types for vbif
- * @dpu_kms: DPU handler
- */
void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
index 16f144cbc0c9..8ff496082902 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
@@ -42,9 +42,6 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,
if (!conn_state || !conn_state->connector) {
DPU_ERROR("invalid connector state\n");
return -EINVAL;
- } else if (conn_state->connector->status != connector_status_connected) {
- DPU_ERROR("connector not connected %d\n", conn_state->connector->status);
- return -EINVAL;
}
crtc = conn_state->crtc;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 6e4e74f9d63d..c469e66cfc11 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -568,7 +568,7 @@ MODULE_DEVICE_TABLE(of, mdp4_dt_match);
static struct platform_driver mdp4_platform_driver = {
.probe = mdp4_probe,
- .remove_new = mdp4_remove,
+ .remove = mdp4_remove,
.shutdown = msm_kms_shutdown,
.driver = {
.name = "mdp4",
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 576995ddce37..8bbc7fb881d5 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -389,7 +389,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
/* TODO: different regulators in other cases? */
mdp4_lcdc_encoder->regs[0].supply = "lvds-vccs-3p3v";
- mdp4_lcdc_encoder->regs[1].supply = "lvds-vccs-3p3v";
+ mdp4_lcdc_encoder->regs[1].supply = "lvds-pll-vdda";
mdp4_lcdc_encoder->regs[2].supply = "lvds-vdda";
ret = devm_regulator_bulk_get(dev->dev,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 374704cce656..3fcca7a3d82e 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -908,7 +908,7 @@ MODULE_DEVICE_TABLE(of, mdp5_dt_match);
static struct platform_driver mdp5_driver = {
.probe = mdp5_dev_probe,
- .remove_new = mdp5_dev_remove,
+ .remove = mdp5_dev_remove,
.shutdown = msm_kms_shutdown,
.driver = {
.name = "msm_mdp",
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
index e75b97127c0d..2be00b11e557 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
@@ -109,7 +109,7 @@ int msm_disp_snapshot_init(struct drm_device *drm_dev)
mutex_init(&kms->dump_mutex);
- kms->dump_worker = kthread_create_worker(0, "%s", "disp_snapshot");
+ kms->dump_worker = kthread_run_worker(0, "%s", "disp_snapshot");
if (IS_ERR(kms->dump_worker))
DRM_ERROR("failed to create disp state task\n");
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
index add72bbc28b1..07a2c1e87219 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
@@ -25,43 +25,41 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b
addr = base_addr;
end_addr = base_addr + aligned_len;
- if (!(*reg))
- *reg = kzalloc(len_padded, GFP_KERNEL);
-
- if (*reg)
- dump_addr = *reg;
+ *reg = kvzalloc(len_padded, GFP_KERNEL);
+ if (!*reg)
+ return;
+ dump_addr = *reg;
for (i = 0; i < num_rows; i++) {
x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0;
x4 = (addr + 0x4 < end_addr) ? readl_relaxed(addr + 0x4) : 0;
x8 = (addr + 0x8 < end_addr) ? readl_relaxed(addr + 0x8) : 0;
xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0;
- if (dump_addr) {
- dump_addr[i * 4] = x0;
- dump_addr[i * 4 + 1] = x4;
- dump_addr[i * 4 + 2] = x8;
- dump_addr[i * 4 + 3] = xc;
- }
+ dump_addr[i * 4] = x0;
+ dump_addr[i * 4 + 1] = x4;
+ dump_addr[i * 4 + 2] = x8;
+ dump_addr[i * 4 + 3] = xc;
addr += REG_DUMP_ALIGN;
}
}
-static void msm_disp_state_print_regs(u32 **reg, u32 len, void __iomem *base_addr,
- struct drm_printer *p)
+static void msm_disp_state_print_regs(const u32 *dump_addr, u32 len,
+ void __iomem *base_addr, struct drm_printer *p)
{
int i;
- u32 *dump_addr = NULL;
void __iomem *addr;
u32 num_rows;
+ if (!dump_addr) {
+ drm_printf(p, "Registers not stored\n");
+ return;
+ }
+
addr = base_addr;
num_rows = len / REG_DUMP_ALIGN;
- if (*reg)
- dump_addr = *reg;
-
for (i = 0; i < num_rows; i++) {
drm_printf(p, "0x%lx : %08x %08x %08x %08x\n",
(unsigned long)(addr - base_addr),
@@ -89,7 +87,7 @@ void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p)
list_for_each_entry_safe(block, tmp, &state->blocks, node) {
drm_printf(p, "====================%s================\n", block->name);
- msm_disp_state_print_regs(&block->state, block->size, block->base_addr, p);
+ msm_disp_state_print_regs(block->state, block->size, block->base_addr, p);
}
drm_printf(p, "===================dpu drm state================\n");
@@ -161,7 +159,7 @@ void msm_disp_state_free(void *data)
list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) {
list_del(&block->node);
- kfree(block->state);
+ kvfree(block->state);
kfree(block);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index a599fc5d63c5..70fdc9fe228a 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -14,284 +14,98 @@
#include "dp_catalog.h"
#include "dp_audio.h"
#include "dp_panel.h"
+#include "dp_reg.h"
#include "dp_display.h"
#include "dp_utils.h"
-struct dp_audio_private {
+struct msm_dp_audio_private {
struct platform_device *audio_pdev;
struct platform_device *pdev;
struct drm_device *drm_dev;
- struct dp_catalog *catalog;
+ struct msm_dp_catalog *catalog;
u32 channels;
- struct dp_audio dp_audio;
+ struct msm_dp_audio msm_dp_audio;
};
-static u32 dp_audio_get_header(struct dp_catalog *catalog,
- enum dp_catalog_audio_sdp_type sdp,
- enum dp_catalog_audio_header_type header)
+static void msm_dp_audio_stream_sdp(struct msm_dp_audio_private *audio)
{
- return dp_catalog_audio_get_header(catalog, sdp, header);
+ struct dp_sdp_header sdp_hdr = {
+ .HB0 = 0x00,
+ .HB1 = 0x02,
+ .HB2 = 0x00,
+ .HB3 = audio->channels - 1,
+ };
+
+ msm_dp_catalog_write_audio_stream(audio->catalog, &sdp_hdr);
}
-static void dp_audio_set_header(struct dp_catalog *catalog,
- u32 data,
- enum dp_catalog_audio_sdp_type sdp,
- enum dp_catalog_audio_header_type header)
+static void msm_dp_audio_timestamp_sdp(struct msm_dp_audio_private *audio)
{
- dp_catalog_audio_set_header(catalog, sdp, header, data);
+ struct dp_sdp_header sdp_hdr = {
+ .HB0 = 0x00,
+ .HB1 = 0x01,
+ .HB2 = 0x17,
+ .HB3 = 0x0 | (0x11 << 2),
+ };
+
+ msm_dp_catalog_write_audio_timestamp(audio->catalog, &sdp_hdr);
}
-static void dp_audio_stream_sdp(struct dp_audio_private *audio)
+static void msm_dp_audio_infoframe_sdp(struct msm_dp_audio_private *audio)
{
- struct dp_catalog *catalog = audio->catalog;
- u32 value, new_value;
- u8 parity_byte;
-
- /* Config header and parity byte 1 */
- value = dp_audio_get_header(catalog,
- DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
-
- new_value = 0x02;
- parity_byte = dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_1_BIT)
- | (parity_byte << PARITY_BYTE_1_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
-
- /* Config header and parity byte 2 */
- value = dp_audio_get_header(catalog,
- DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
- new_value = value;
- parity_byte = dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_2_BIT)
- | (parity_byte << PARITY_BYTE_2_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
-
- dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
-
- /* Config header and parity byte 3 */
- value = dp_audio_get_header(catalog,
- DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
-
- new_value = audio->channels - 1;
- parity_byte = dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_3_BIT)
- | (parity_byte << PARITY_BYTE_3_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
-
- dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
+ struct dp_sdp_header sdp_hdr = {
+ .HB0 = 0x00,
+ .HB1 = 0x84,
+ .HB2 = 0x1b,
+ .HB3 = 0x0 | (0x11 << 2),
+ };
+
+ msm_dp_catalog_write_audio_infoframe(audio->catalog, &sdp_hdr);
}
-static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
+static void msm_dp_audio_copy_management_sdp(struct msm_dp_audio_private *audio)
{
- struct dp_catalog *catalog = audio->catalog;
- u32 value, new_value;
- u8 parity_byte;
-
- /* Config header and parity byte 1 */
- value = dp_audio_get_header(catalog,
- DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
-
- new_value = 0x1;
- parity_byte = dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_1_BIT)
- | (parity_byte << PARITY_BYTE_1_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
-
- /* Config header and parity byte 2 */
- value = dp_audio_get_header(catalog,
- DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
-
- new_value = 0x17;
- parity_byte = dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_2_BIT)
- | (parity_byte << PARITY_BYTE_2_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
-
- /* Config header and parity byte 3 */
- value = dp_audio_get_header(catalog,
- DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
-
- new_value = (0x0 | (0x11 << 2));
- parity_byte = dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_3_BIT)
- | (parity_byte << PARITY_BYTE_3_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
+ struct dp_sdp_header sdp_hdr = {
+ .HB0 = 0x00,
+ .HB1 = 0x05,
+ .HB2 = 0x0f,
+ .HB3 = 0x00,
+ };
+
+ msm_dp_catalog_write_audio_copy_mgmt(audio->catalog, &sdp_hdr);
}
-static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
+static void msm_dp_audio_isrc_sdp(struct msm_dp_audio_private *audio)
{
- struct dp_catalog *catalog = audio->catalog;
- u32 value, new_value;
- u8 parity_byte;
-
- /* Config header and parity byte 1 */
- value = dp_audio_get_header(catalog,
- DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
-
- new_value = 0x84;
- parity_byte = dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_1_BIT)
- | (parity_byte << PARITY_BYTE_1_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
-
- /* Config header and parity byte 2 */
- value = dp_audio_get_header(catalog,
- DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
-
- new_value = 0x1b;
- parity_byte = dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_2_BIT)
- | (parity_byte << PARITY_BYTE_2_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
-
- /* Config header and parity byte 3 */
- value = dp_audio_get_header(catalog,
- DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
-
- new_value = (0x0 | (0x11 << 2));
- parity_byte = dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_3_BIT)
- | (parity_byte << PARITY_BYTE_3_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
- new_value, parity_byte);
- dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
+ struct dp_sdp_header sdp_hdr = {
+ .HB0 = 0x00,
+ .HB1 = 0x06,
+ .HB2 = 0x0f,
+ .HB3 = 0x00,
+ };
+
+ msm_dp_catalog_write_audio_isrc(audio->catalog, &sdp_hdr);
}
-static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
+static void msm_dp_audio_setup_sdp(struct msm_dp_audio_private *audio)
{
- struct dp_catalog *catalog = audio->catalog;
- u32 value, new_value;
- u8 parity_byte;
-
- /* Config header and parity byte 1 */
- value = dp_audio_get_header(catalog,
- DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
-
- new_value = 0x05;
- parity_byte = dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_1_BIT)
- | (parity_byte << PARITY_BYTE_1_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
-
- /* Config header and parity byte 2 */
- value = dp_audio_get_header(catalog,
- DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
-
- new_value = 0x0F;
- parity_byte = dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_2_BIT)
- | (parity_byte << PARITY_BYTE_2_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
-
- /* Config header and parity byte 3 */
- value = dp_audio_get_header(catalog,
- DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
-
- new_value = 0x0;
- parity_byte = dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_3_BIT)
- | (parity_byte << PARITY_BYTE_3_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
-}
+ msm_dp_catalog_audio_config_sdp(audio->catalog);
-static void dp_audio_isrc_sdp(struct dp_audio_private *audio)
-{
- struct dp_catalog *catalog = audio->catalog;
- u32 value, new_value;
- u8 parity_byte;
-
- /* Config header and parity byte 1 */
- value = dp_audio_get_header(catalog,
- DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
-
- new_value = 0x06;
- parity_byte = dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_1_BIT)
- | (parity_byte << PARITY_BYTE_1_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
-
- /* Config header and parity byte 2 */
- value = dp_audio_get_header(catalog,
- DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
-
- new_value = 0x0F;
- parity_byte = dp_utils_calculate_parity(new_value);
- value |= ((new_value << HEADER_BYTE_2_BIT)
- | (parity_byte << PARITY_BYTE_2_BIT));
- drm_dbg_dp(audio->drm_dev,
- "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
- dp_audio_set_header(catalog, value,
- DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
+ msm_dp_audio_stream_sdp(audio);
+ msm_dp_audio_timestamp_sdp(audio);
+ msm_dp_audio_infoframe_sdp(audio);
+ msm_dp_audio_copy_management_sdp(audio);
+ msm_dp_audio_isrc_sdp(audio);
}
-static void dp_audio_setup_sdp(struct dp_audio_private *audio)
-{
- dp_catalog_audio_config_sdp(audio->catalog);
-
- dp_audio_stream_sdp(audio);
- dp_audio_timestamp_sdp(audio);
- dp_audio_infoframe_sdp(audio);
- dp_audio_copy_management_sdp(audio);
- dp_audio_isrc_sdp(audio);
-}
-
-static void dp_audio_setup_acr(struct dp_audio_private *audio)
+static void msm_dp_audio_setup_acr(struct msm_dp_audio_private *audio)
{
u32 select = 0;
- struct dp_catalog *catalog = audio->catalog;
+ struct msm_dp_catalog *catalog = audio->catalog;
- switch (audio->dp_audio.bw_code) {
+ switch (audio->msm_dp_audio.bw_code) {
case DP_LINK_BW_1_62:
select = 0;
break;
@@ -310,15 +124,15 @@ static void dp_audio_setup_acr(struct dp_audio_private *audio)
break;
}
- dp_catalog_audio_config_acr(catalog, select);
+ msm_dp_catalog_audio_config_acr(catalog, select);
}
-static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio)
+static void msm_dp_audio_safe_to_exit_level(struct msm_dp_audio_private *audio)
{
- struct dp_catalog *catalog = audio->catalog;
+ struct msm_dp_catalog *catalog = audio->catalog;
u32 safe_to_exit_level = 0;
- switch (audio->dp_audio.lane_count) {
+ switch (audio->msm_dp_audio.lane_count) {
case 1:
safe_to_exit_level = 14;
break;
@@ -329,56 +143,56 @@ static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio)
safe_to_exit_level = 5;
break;
default:
+ safe_to_exit_level = 14;
drm_dbg_dp(audio->drm_dev,
"setting the default safe_to_exit_level = %u\n",
safe_to_exit_level);
- safe_to_exit_level = 14;
break;
}
- dp_catalog_audio_sfe_level(catalog, safe_to_exit_level);
+ msm_dp_catalog_audio_sfe_level(catalog, safe_to_exit_level);
}
-static void dp_audio_enable(struct dp_audio_private *audio, bool enable)
+static void msm_dp_audio_enable(struct msm_dp_audio_private *audio, bool enable)
{
- struct dp_catalog *catalog = audio->catalog;
+ struct msm_dp_catalog *catalog = audio->catalog;
- dp_catalog_audio_enable(catalog, enable);
+ msm_dp_catalog_audio_enable(catalog, enable);
}
-static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev)
+static struct msm_dp_audio_private *msm_dp_audio_get_data(struct platform_device *pdev)
{
- struct dp_audio *dp_audio;
- struct msm_dp *dp_display;
+ struct msm_dp_audio *msm_dp_audio;
+ struct msm_dp *msm_dp_display;
if (!pdev) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-ENODEV);
}
- dp_display = platform_get_drvdata(pdev);
- if (!dp_display) {
+ msm_dp_display = platform_get_drvdata(pdev);
+ if (!msm_dp_display) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-ENODEV);
}
- dp_audio = dp_display->dp_audio;
+ msm_dp_audio = msm_dp_display->msm_dp_audio;
- if (!dp_audio) {
- DRM_ERROR("invalid dp_audio data\n");
+ if (!msm_dp_audio) {
+ DRM_ERROR("invalid msm_dp_audio data\n");
return ERR_PTR(-EINVAL);
}
- return container_of(dp_audio, struct dp_audio_private, dp_audio);
+ return container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio);
}
-static int dp_audio_hook_plugged_cb(struct device *dev, void *data,
+static int msm_dp_audio_hook_plugged_cb(struct device *dev, void *data,
hdmi_codec_plugged_cb fn,
struct device *codec_dev)
{
struct platform_device *pdev;
- struct msm_dp *dp_display;
+ struct msm_dp *msm_dp_display;
pdev = to_platform_device(dev);
if (!pdev) {
@@ -386,20 +200,20 @@ static int dp_audio_hook_plugged_cb(struct device *dev, void *data,
return -ENODEV;
}
- dp_display = platform_get_drvdata(pdev);
- if (!dp_display) {
+ msm_dp_display = platform_get_drvdata(pdev);
+ if (!msm_dp_display) {
pr_err("invalid input\n");
return -ENODEV;
}
- return dp_display_set_plugged_cb(dp_display, fn, codec_dev);
+ return msm_dp_display_set_plugged_cb(msm_dp_display, fn, codec_dev);
}
-static int dp_audio_get_eld(struct device *dev,
+static int msm_dp_audio_get_eld(struct device *dev,
void *data, uint8_t *buf, size_t len)
{
struct platform_device *pdev;
- struct msm_dp *dp_display;
+ struct msm_dp *msm_dp_display;
pdev = to_platform_device(dev);
@@ -408,30 +222,32 @@ static int dp_audio_get_eld(struct device *dev,
return -ENODEV;
}
- dp_display = platform_get_drvdata(pdev);
- if (!dp_display) {
+ msm_dp_display = platform_get_drvdata(pdev);
+ if (!msm_dp_display) {
DRM_ERROR("invalid input\n");
return -ENODEV;
}
- memcpy(buf, dp_display->connector->eld,
- min(sizeof(dp_display->connector->eld), len));
+ mutex_lock(&msm_dp_display->connector->eld_mutex);
+ memcpy(buf, msm_dp_display->connector->eld,
+ min(sizeof(msm_dp_display->connector->eld), len));
+ mutex_unlock(&msm_dp_display->connector->eld_mutex);
return 0;
}
-int dp_audio_hw_params(struct device *dev,
+int msm_dp_audio_hw_params(struct device *dev,
void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
int rc = 0;
- struct dp_audio_private *audio;
+ struct msm_dp_audio_private *audio;
struct platform_device *pdev;
- struct msm_dp *dp_display;
+ struct msm_dp *msm_dp_display;
pdev = to_platform_device(dev);
- dp_display = platform_get_drvdata(pdev);
+ msm_dp_display = platform_get_drvdata(pdev);
/*
* there could be cases where sound card can be opened even
@@ -441,12 +257,12 @@ int dp_audio_hw_params(struct device *dev,
* such cases check for connection status and bail out if not
* connected.
*/
- if (!dp_display->power_on) {
+ if (!msm_dp_display->power_on) {
rc = -EINVAL;
goto end;
}
- audio = dp_audio_get_data(pdev);
+ audio = msm_dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
rc = PTR_ERR(audio);
goto end;
@@ -454,26 +270,26 @@ int dp_audio_hw_params(struct device *dev,
audio->channels = params->channels;
- dp_audio_setup_sdp(audio);
- dp_audio_setup_acr(audio);
- dp_audio_safe_to_exit_level(audio);
- dp_audio_enable(audio, true);
- dp_display_signal_audio_start(dp_display);
- dp_display->audio_enabled = true;
+ msm_dp_audio_setup_sdp(audio);
+ msm_dp_audio_setup_acr(audio);
+ msm_dp_audio_safe_to_exit_level(audio);
+ msm_dp_audio_enable(audio, true);
+ msm_dp_display_signal_audio_start(msm_dp_display);
+ msm_dp_display->audio_enabled = true;
end:
return rc;
}
-static void dp_audio_shutdown(struct device *dev, void *data)
+static void msm_dp_audio_shutdown(struct device *dev, void *data)
{
- struct dp_audio_private *audio;
+ struct msm_dp_audio_private *audio;
struct platform_device *pdev;
- struct msm_dp *dp_display;
+ struct msm_dp *msm_dp_display;
pdev = to_platform_device(dev);
- dp_display = platform_get_drvdata(pdev);
- audio = dp_audio_get_data(pdev);
+ msm_dp_display = platform_get_drvdata(pdev);
+ audio = msm_dp_audio_get_data(pdev);
if (IS_ERR(audio)) {
DRM_ERROR("failed to get audio data\n");
return;
@@ -487,32 +303,32 @@ static void dp_audio_shutdown(struct device *dev, void *data)
* connected. is_connected cannot be used here as its set
* to false earlier than this call
*/
- if (!dp_display->audio_enabled)
+ if (!msm_dp_display->audio_enabled)
return;
- dp_audio_enable(audio, false);
+ msm_dp_audio_enable(audio, false);
/* signal the dp display to safely shutdown clocks */
- dp_display_signal_audio_complete(dp_display);
+ msm_dp_display_signal_audio_complete(msm_dp_display);
}
-static const struct hdmi_codec_ops dp_audio_codec_ops = {
- .hw_params = dp_audio_hw_params,
- .audio_shutdown = dp_audio_shutdown,
- .get_eld = dp_audio_get_eld,
- .hook_plugged_cb = dp_audio_hook_plugged_cb,
+static const struct hdmi_codec_ops msm_dp_audio_codec_ops = {
+ .hw_params = msm_dp_audio_hw_params,
+ .audio_shutdown = msm_dp_audio_shutdown,
+ .get_eld = msm_dp_audio_get_eld,
+ .hook_plugged_cb = msm_dp_audio_hook_plugged_cb,
};
static struct hdmi_codec_pdata codec_data = {
- .ops = &dp_audio_codec_ops,
+ .ops = &msm_dp_audio_codec_ops,
.max_i2s_channels = 8,
.i2s = 1,
};
-void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio)
+void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio)
{
- struct dp_audio_private *audio_priv;
+ struct msm_dp_audio_private *audio_priv;
- audio_priv = container_of(dp_audio, struct dp_audio_private, dp_audio);
+ audio_priv = container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio);
if (audio_priv->audio_pdev) {
platform_device_unregister(audio_priv->audio_pdev);
@@ -520,13 +336,13 @@ void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio)
}
}
-int dp_register_audio_driver(struct device *dev,
- struct dp_audio *dp_audio)
+int msm_dp_register_audio_driver(struct device *dev,
+ struct msm_dp_audio *msm_dp_audio)
{
- struct dp_audio_private *audio_priv;
+ struct msm_dp_audio_private *audio_priv;
- audio_priv = container_of(dp_audio,
- struct dp_audio_private, dp_audio);
+ audio_priv = container_of(msm_dp_audio,
+ struct msm_dp_audio_private, msm_dp_audio);
audio_priv->audio_pdev = platform_device_register_data(dev,
HDMI_CODEC_DRV_NAME,
@@ -536,15 +352,14 @@ int dp_register_audio_driver(struct device *dev,
return PTR_ERR_OR_ZERO(audio_priv->audio_pdev);
}
-struct dp_audio *dp_audio_get(struct platform_device *pdev,
- struct dp_panel *panel,
- struct dp_catalog *catalog)
+struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
+ struct msm_dp_catalog *catalog)
{
int rc = 0;
- struct dp_audio_private *audio;
- struct dp_audio *dp_audio;
+ struct msm_dp_audio_private *audio;
+ struct msm_dp_audio *msm_dp_audio;
- if (!pdev || !panel || !catalog) {
+ if (!pdev || !catalog) {
DRM_ERROR("invalid input\n");
rc = -EINVAL;
goto error;
@@ -559,23 +374,21 @@ struct dp_audio *dp_audio_get(struct platform_device *pdev,
audio->pdev = pdev;
audio->catalog = catalog;
- dp_audio = &audio->dp_audio;
-
- dp_catalog_audio_init(catalog);
+ msm_dp_audio = &audio->msm_dp_audio;
- return dp_audio;
+ return msm_dp_audio;
error:
return ERR_PTR(rc);
}
-void dp_audio_put(struct dp_audio *dp_audio)
+void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio)
{
- struct dp_audio_private *audio;
+ struct msm_dp_audio_private *audio;
- if (!dp_audio)
+ if (!msm_dp_audio)
return;
- audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
+ audio = container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio);
devm_kfree(&audio->pdev->dev, audio);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
index 4ab78880af82..beea34cbab77 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.h
+++ b/drivers/gpu/drm/msm/dp/dp_audio.h
@@ -8,63 +8,60 @@
#include <linux/platform_device.h>
-#include "dp_panel.h"
#include "dp_catalog.h"
#include <sound/hdmi-codec.h>
/**
- * struct dp_audio
+ * struct msm_dp_audio
* @lane_count: number of lanes configured in current session
* @bw_code: link rate's bandwidth code for current session
*/
-struct dp_audio {
+struct msm_dp_audio {
u32 lane_count;
u32 bw_code;
};
/**
- * dp_audio_get()
+ * msm_dp_audio_get()
*
* Creates and instance of dp audio.
*
* @pdev: caller's platform device instance.
- * @panel: an instance of dp_panel module.
- * @catalog: an instance of dp_catalog module.
+ * @catalog: an instance of msm_dp_catalog module.
*
* Returns the error code in case of failure, otherwize
- * an instance of newly created dp_module.
+ * an instance of newly created msm_dp_module.
*/
-struct dp_audio *dp_audio_get(struct platform_device *pdev,
- struct dp_panel *panel,
- struct dp_catalog *catalog);
+struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
+ struct msm_dp_catalog *catalog);
/**
- * dp_register_audio_driver()
+ * msm_dp_register_audio_driver()
*
* Registers DP device with hdmi_codec interface.
*
* @dev: DP device instance.
- * @dp_audio: an instance of dp_audio module.
+ * @msm_dp_audio: an instance of msm_dp_audio module.
*
*
* Returns the error code in case of failure, otherwise
* zero on success.
*/
-int dp_register_audio_driver(struct device *dev,
- struct dp_audio *dp_audio);
+int msm_dp_register_audio_driver(struct device *dev,
+ struct msm_dp_audio *msm_dp_audio);
-void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio);
+void msm_dp_unregister_audio_driver(struct device *dev, struct msm_dp_audio *msm_dp_audio);
/**
- * dp_audio_put()
+ * msm_dp_audio_put()
*
- * Cleans the dp_audio instance.
+ * Cleans the msm_dp_audio instance.
*
- * @dp_audio: an instance of dp_audio.
+ * @msm_dp_audio: an instance of msm_dp_audio.
*/
-void dp_audio_put(struct dp_audio *dp_audio);
+void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio);
-int dp_audio_hw_params(struct device *dev,
+int msm_dp_audio_hw_params(struct device *dev,
void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params);
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 00dfafbebe0e..bc8d46abfc61 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -20,9 +20,9 @@ enum msm_dp_aux_err {
DP_AUX_ERR_PHY,
};
-struct dp_aux_private {
+struct msm_dp_aux_private {
struct device *dev;
- struct dp_catalog *catalog;
+ struct msm_dp_catalog *catalog;
struct phy *phy;
@@ -42,12 +42,12 @@ struct dp_aux_private {
u32 offset;
u32 segment;
- struct drm_dp_aux dp_aux;
+ struct drm_dp_aux msm_dp_aux;
};
#define MAX_AUX_RETRIES 5
-static ssize_t dp_aux_write(struct dp_aux_private *aux,
+static ssize_t msm_dp_aux_write(struct msm_dp_aux_private *aux,
struct drm_dp_aux_msg *msg)
{
u8 data[4];
@@ -88,11 +88,11 @@ static ssize_t dp_aux_write(struct dp_aux_private *aux,
/* index = 0, write */
if (i == 0)
reg |= DP_AUX_DATA_INDEX_WRITE;
- dp_catalog_aux_write_data(aux->catalog, reg);
+ msm_dp_catalog_aux_write_data(aux->catalog, reg);
}
- dp_catalog_aux_clear_trans(aux->catalog, false);
- dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+ msm_dp_catalog_aux_clear_trans(aux->catalog, false);
+ msm_dp_catalog_aux_clear_hw_interrupts(aux->catalog);
reg = 0; /* Transaction number == 1 */
if (!aux->native) { /* i2c */
@@ -106,12 +106,12 @@ static ssize_t dp_aux_write(struct dp_aux_private *aux,
}
reg |= DP_AUX_TRANS_CTRL_GO;
- dp_catalog_aux_write_trans(aux->catalog, reg);
+ msm_dp_catalog_aux_write_trans(aux->catalog, reg);
return len;
}
-static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
+static ssize_t msm_dp_aux_cmd_fifo_tx(struct msm_dp_aux_private *aux,
struct drm_dp_aux_msg *msg)
{
ssize_t ret;
@@ -119,7 +119,7 @@ static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
reinit_completion(&aux->comp);
- ret = dp_aux_write(aux, msg);
+ ret = msm_dp_aux_write(aux, msg);
if (ret < 0)
return ret;
@@ -131,7 +131,7 @@ static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
return ret;
}
-static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
+static ssize_t msm_dp_aux_cmd_fifo_rx(struct msm_dp_aux_private *aux,
struct drm_dp_aux_msg *msg)
{
u32 data;
@@ -139,20 +139,20 @@ static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
u32 i, actual_i;
u32 len = msg->size;
- dp_catalog_aux_clear_trans(aux->catalog, true);
+ msm_dp_catalog_aux_clear_trans(aux->catalog, true);
data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
data |= DP_AUX_DATA_READ; /* read */
- dp_catalog_aux_write_data(aux->catalog, data);
+ msm_dp_catalog_aux_write_data(aux->catalog, data);
dp = msg->buffer;
/* discard first byte */
- data = dp_catalog_aux_read_data(aux->catalog);
+ data = msm_dp_catalog_aux_read_data(aux->catalog);
for (i = 0; i < len; i++) {
- data = dp_catalog_aux_read_data(aux->catalog);
+ data = msm_dp_catalog_aux_read_data(aux->catalog);
*dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff);
actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF;
@@ -163,7 +163,7 @@ static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
return i;
}
-static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
+static void msm_dp_aux_update_offset_and_segment(struct msm_dp_aux_private *aux,
struct drm_dp_aux_msg *input_msg)
{
u32 edid_address = 0x50;
@@ -185,7 +185,7 @@ static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
}
/**
- * dp_aux_transfer_helper() - helper function for EDID read transactions
+ * msm_dp_aux_transfer_helper() - helper function for EDID read transactions
*
* @aux: DP AUX private structure
* @input_msg: input message from DRM upstream APIs
@@ -196,7 +196,7 @@ static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
* This helper function is used to fix EDID reads for non-compliant
* sinks that do not handle the i2c middle-of-transaction flag correctly.
*/
-static void dp_aux_transfer_helper(struct dp_aux_private *aux,
+static void msm_dp_aux_transfer_helper(struct msm_dp_aux_private *aux,
struct drm_dp_aux_msg *input_msg,
bool send_seg)
{
@@ -238,7 +238,7 @@ static void dp_aux_transfer_helper(struct dp_aux_private *aux,
helper_msg.address = segment_address;
helper_msg.buffer = &aux->segment;
helper_msg.size = 1;
- dp_aux_cmd_fifo_tx(aux, &helper_msg);
+ msm_dp_aux_cmd_fifo_tx(aux, &helper_msg);
}
/*
@@ -252,7 +252,7 @@ static void dp_aux_transfer_helper(struct dp_aux_private *aux,
helper_msg.address = input_msg->address;
helper_msg.buffer = &aux->offset;
helper_msg.size = 1;
- dp_aux_cmd_fifo_tx(aux, &helper_msg);
+ msm_dp_aux_cmd_fifo_tx(aux, &helper_msg);
end:
aux->offset += message_size;
@@ -265,15 +265,15 @@ end:
* It will call aux_reset() function to reset the AUX channel,
* if the waiting is timeout.
*/
-static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
+static ssize_t msm_dp_aux_transfer(struct drm_dp_aux *msm_dp_aux,
struct drm_dp_aux_msg *msg)
{
ssize_t ret;
int const aux_cmd_native_max = 16;
int const aux_cmd_i2c_max = 128;
- struct dp_aux_private *aux;
+ struct msm_dp_aux_private *aux;
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
@@ -292,7 +292,7 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
return -EINVAL;
}
- ret = pm_runtime_resume_and_get(dp_aux->dev);
+ ret = pm_runtime_resume_and_get(msm_dp_aux->dev);
if (ret)
return ret;
@@ -313,8 +313,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
goto exit;
}
- dp_aux_update_offset_and_segment(aux, msg);
- dp_aux_transfer_helper(aux, msg, true);
+ msm_dp_aux_update_offset_and_segment(aux, msg);
+ msm_dp_aux_transfer_helper(aux, msg, true);
aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
aux->cmd_busy = true;
@@ -327,7 +327,7 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
aux->no_send_stop = true;
}
- ret = dp_aux_cmd_fifo_tx(aux, msg);
+ ret = msm_dp_aux_cmd_fifo_tx(aux, msg);
if (ret < 0) {
if (aux->native) {
aux->retry_cnt++;
@@ -335,14 +335,14 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
phy_calibrate(aux->phy);
}
/* reset aux if link is in connected state */
- if (dp_catalog_link_is_connected(aux->catalog))
- dp_catalog_aux_reset(aux->catalog);
+ if (msm_dp_catalog_link_is_connected(aux->catalog))
+ msm_dp_catalog_aux_reset(aux->catalog);
} else {
aux->retry_cnt = 0;
switch (aux->aux_error_num) {
case DP_AUX_ERR_NONE:
if (aux->read)
- ret = dp_aux_cmd_fifo_rx(aux, msg);
+ ret = msm_dp_aux_cmd_fifo_rx(aux, msg);
msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
break;
case DP_AUX_ERR_DEFER:
@@ -364,24 +364,24 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
exit:
mutex_unlock(&aux->mutex);
- pm_runtime_put_sync(dp_aux->dev);
+ pm_runtime_put_sync(msm_dp_aux->dev);
return ret;
}
-irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux)
+irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux)
{
u32 isr;
- struct dp_aux_private *aux;
+ struct msm_dp_aux_private *aux;
- if (!dp_aux) {
+ if (!msm_dp_aux) {
DRM_ERROR("invalid input\n");
return IRQ_NONE;
}
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
- isr = dp_catalog_aux_get_irq(aux->catalog);
+ isr = msm_dp_catalog_aux_get_irq(aux->catalog);
/* no interrupts pending, return immediately */
if (!isr)
@@ -403,7 +403,7 @@ irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux)
if (isr & DP_INTR_AUX_ERROR) {
aux->aux_error_num = DP_AUX_ERR_PHY;
- dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+ msm_dp_catalog_aux_clear_hw_interrupts(aux->catalog);
} else if (isr & DP_INTR_NACK_DEFER) {
aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
} else if (isr & DP_INTR_WRONG_ADDR) {
@@ -429,68 +429,68 @@ irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux)
return IRQ_HANDLED;
}
-void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled)
+void msm_dp_aux_enable_xfers(struct drm_dp_aux *msm_dp_aux, bool enabled)
{
- struct dp_aux_private *aux;
+ struct msm_dp_aux_private *aux;
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
aux->enable_xfers = enabled;
}
-void dp_aux_reconfig(struct drm_dp_aux *dp_aux)
+void msm_dp_aux_reconfig(struct drm_dp_aux *msm_dp_aux)
{
- struct dp_aux_private *aux;
+ struct msm_dp_aux_private *aux;
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
phy_calibrate(aux->phy);
- dp_catalog_aux_reset(aux->catalog);
+ msm_dp_catalog_aux_reset(aux->catalog);
}
-void dp_aux_init(struct drm_dp_aux *dp_aux)
+void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux)
{
- struct dp_aux_private *aux;
+ struct msm_dp_aux_private *aux;
- if (!dp_aux) {
+ if (!msm_dp_aux) {
DRM_ERROR("invalid input\n");
return;
}
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
mutex_lock(&aux->mutex);
- dp_catalog_aux_enable(aux->catalog, true);
+ msm_dp_catalog_aux_enable(aux->catalog, true);
aux->retry_cnt = 0;
aux->initted = true;
mutex_unlock(&aux->mutex);
}
-void dp_aux_deinit(struct drm_dp_aux *dp_aux)
+void msm_dp_aux_deinit(struct drm_dp_aux *msm_dp_aux)
{
- struct dp_aux_private *aux;
+ struct msm_dp_aux_private *aux;
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
mutex_lock(&aux->mutex);
aux->initted = false;
- dp_catalog_aux_enable(aux->catalog, false);
+ msm_dp_catalog_aux_enable(aux->catalog, false);
mutex_unlock(&aux->mutex);
}
-int dp_aux_register(struct drm_dp_aux *dp_aux)
+int msm_dp_aux_register(struct drm_dp_aux *msm_dp_aux)
{
int ret;
- if (!dp_aux) {
+ if (!msm_dp_aux) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
- ret = drm_dp_aux_register(dp_aux);
+ ret = drm_dp_aux_register(msm_dp_aux);
if (ret) {
DRM_ERROR("%s: failed to register drm aux: %d\n", __func__,
ret);
@@ -500,34 +500,34 @@ int dp_aux_register(struct drm_dp_aux *dp_aux)
return 0;
}
-void dp_aux_unregister(struct drm_dp_aux *dp_aux)
+void msm_dp_aux_unregister(struct drm_dp_aux *msm_dp_aux)
{
- drm_dp_aux_unregister(dp_aux);
+ drm_dp_aux_unregister(msm_dp_aux);
}
-static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux,
+static int msm_dp_wait_hpd_asserted(struct drm_dp_aux *msm_dp_aux,
unsigned long wait_us)
{
int ret;
- struct dp_aux_private *aux;
+ struct msm_dp_aux_private *aux;
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
ret = pm_runtime_resume_and_get(aux->dev);
if (ret)
return ret;
- ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us);
+ ret = msm_dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us);
pm_runtime_put_sync(aux->dev);
return ret;
}
-struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
+struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *catalog,
struct phy *phy,
bool is_edp)
{
- struct dp_aux_private *aux;
+ struct msm_dp_aux_private *aux;
if (!catalog) {
DRM_ERROR("invalid input\n");
@@ -553,23 +553,23 @@ struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
* before registering AUX with the DRM device so that
* msm eDP panel can be detected by generic_dep_panel_probe().
*/
- aux->dp_aux.name = "dpu_dp_aux";
- aux->dp_aux.dev = dev;
- aux->dp_aux.transfer = dp_aux_transfer;
- aux->dp_aux.wait_hpd_asserted = dp_wait_hpd_asserted;
- drm_dp_aux_init(&aux->dp_aux);
+ aux->msm_dp_aux.name = "dpu_dp_aux";
+ aux->msm_dp_aux.dev = dev;
+ aux->msm_dp_aux.transfer = msm_dp_aux_transfer;
+ aux->msm_dp_aux.wait_hpd_asserted = msm_dp_wait_hpd_asserted;
+ drm_dp_aux_init(&aux->msm_dp_aux);
- return &aux->dp_aux;
+ return &aux->msm_dp_aux;
}
-void dp_aux_put(struct drm_dp_aux *dp_aux)
+void msm_dp_aux_put(struct drm_dp_aux *msm_dp_aux)
{
- struct dp_aux_private *aux;
+ struct msm_dp_aux_private *aux;
- if (!dp_aux)
+ if (!msm_dp_aux)
return;
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ aux = container_of(msm_dp_aux, struct msm_dp_aux_private, msm_dp_aux);
mutex_destroy(&aux->mutex);
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index 4f65e892a807..39c5b4c8596a 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -9,18 +9,18 @@
#include "dp_catalog.h"
#include <drm/display/drm_dp_helper.h>
-int dp_aux_register(struct drm_dp_aux *dp_aux);
-void dp_aux_unregister(struct drm_dp_aux *dp_aux);
-irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux);
-void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled);
-void dp_aux_init(struct drm_dp_aux *dp_aux);
-void dp_aux_deinit(struct drm_dp_aux *dp_aux);
-void dp_aux_reconfig(struct drm_dp_aux *dp_aux);
+int msm_dp_aux_register(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_unregister(struct drm_dp_aux *msm_dp_aux);
+irqreturn_t msm_dp_aux_isr(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_enable_xfers(struct drm_dp_aux *msm_dp_aux, bool enabled);
+void msm_dp_aux_init(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_deinit(struct drm_dp_aux *msm_dp_aux);
+void msm_dp_aux_reconfig(struct drm_dp_aux *msm_dp_aux);
struct phy;
-struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
+struct drm_dp_aux *msm_dp_aux_get(struct device *dev, struct msm_dp_catalog *catalog,
struct phy *phy,
bool is_edp);
-void dp_aux_put(struct drm_dp_aux *aux);
+void msm_dp_aux_put(struct drm_dp_aux *aux);
#endif /*__DP_AUX_H_*/
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 6e55cbf69674..7b7eadb2f83b 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -75,18 +75,17 @@ struct dss_io_data {
struct dss_io_region p0;
};
-struct dp_catalog_private {
+struct msm_dp_catalog_private {
struct device *dev;
struct drm_device *drm_dev;
struct dss_io_data io;
- u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX];
- struct dp_catalog dp_catalog;
+ struct msm_dp_catalog msm_dp_catalog;
};
-void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state)
+void msm_dp_catalog_snapshot(struct msm_dp_catalog *msm_dp_catalog, struct msm_disp_state *disp_state)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
struct dss_io_data *dss = &catalog->io;
msm_disp_snapshot_add_block(disp_state, dss->ahb.len, dss->ahb.base, "dp_ahb");
@@ -95,12 +94,12 @@ void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *d
msm_disp_snapshot_add_block(disp_state, dss->p0.len, dss->p0.base, "dp_p0");
}
-static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset)
+static inline u32 msm_dp_read_aux(struct msm_dp_catalog_private *catalog, u32 offset)
{
return readl_relaxed(catalog->io.aux.base + offset);
}
-static inline void dp_write_aux(struct dp_catalog_private *catalog,
+static inline void msm_dp_write_aux(struct msm_dp_catalog_private *catalog,
u32 offset, u32 data)
{
/*
@@ -110,12 +109,12 @@ static inline void dp_write_aux(struct dp_catalog_private *catalog,
writel(data, catalog->io.aux.base + offset);
}
-static inline u32 dp_read_ahb(const struct dp_catalog_private *catalog, u32 offset)
+static inline u32 msm_dp_read_ahb(const struct msm_dp_catalog_private *catalog, u32 offset)
{
return readl_relaxed(catalog->io.ahb.base + offset);
}
-static inline void dp_write_ahb(struct dp_catalog_private *catalog,
+static inline void msm_dp_write_ahb(struct msm_dp_catalog_private *catalog,
u32 offset, u32 data)
{
/*
@@ -125,7 +124,7 @@ static inline void dp_write_ahb(struct dp_catalog_private *catalog,
writel(data, catalog->io.ahb.base + offset);
}
-static inline void dp_write_p0(struct dp_catalog_private *catalog,
+static inline void msm_dp_write_p0(struct msm_dp_catalog_private *catalog,
u32 offset, u32 data)
{
/*
@@ -135,7 +134,7 @@ static inline void dp_write_p0(struct dp_catalog_private *catalog,
writel(data, catalog->io.p0.base + offset);
}
-static inline u32 dp_read_p0(struct dp_catalog_private *catalog,
+static inline u32 msm_dp_read_p0(struct msm_dp_catalog_private *catalog,
u32 offset)
{
/*
@@ -145,12 +144,12 @@ static inline u32 dp_read_p0(struct dp_catalog_private *catalog,
return readl_relaxed(catalog->io.p0.base + offset);
}
-static inline u32 dp_read_link(struct dp_catalog_private *catalog, u32 offset)
+static inline u32 msm_dp_read_link(struct msm_dp_catalog_private *catalog, u32 offset)
{
return readl_relaxed(catalog->io.link.base + offset);
}
-static inline void dp_write_link(struct dp_catalog_private *catalog,
+static inline void msm_dp_write_link(struct msm_dp_catalog_private *catalog,
u32 offset, u32 data)
{
/*
@@ -161,64 +160,64 @@ static inline void dp_write_link(struct dp_catalog_private *catalog,
}
/* aux related catalog functions */
-u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog)
+u32 msm_dp_catalog_aux_read_data(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- return dp_read_aux(catalog, REG_DP_AUX_DATA);
+ return msm_dp_read_aux(catalog, REG_DP_AUX_DATA);
}
-int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog, u32 data)
+int msm_dp_catalog_aux_write_data(struct msm_dp_catalog *msm_dp_catalog, u32 data)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- dp_write_aux(catalog, REG_DP_AUX_DATA, data);
+ msm_dp_write_aux(catalog, REG_DP_AUX_DATA, data);
return 0;
}
-int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog, u32 data)
+int msm_dp_catalog_aux_write_trans(struct msm_dp_catalog *msm_dp_catalog, u32 data)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
+ msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
return 0;
}
-int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read)
+int msm_dp_catalog_aux_clear_trans(struct msm_dp_catalog *msm_dp_catalog, bool read)
{
u32 data;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
if (read) {
- data = dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL);
+ data = msm_dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL);
data &= ~DP_AUX_TRANS_CTRL_GO;
- dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
+ msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
} else {
- dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0);
+ msm_dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0);
}
return 0;
}
-int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog)
+int msm_dp_catalog_aux_clear_hw_interrupts(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS);
- dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
- dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
- dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0);
+ msm_dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS);
+ msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
+ msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
+ msm_dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0);
return 0;
}
/**
- * dp_catalog_aux_reset() - reset AUX controller
+ * msm_dp_catalog_aux_reset() - reset AUX controller
*
- * @dp_catalog: DP catalog structure
+ * @msm_dp_catalog: DP catalog structure
*
* return: void
*
@@ -227,47 +226,47 @@ int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog)
* NOTE: reset AUX controller will also clear any pending HPD related interrupts
*
*/
-void dp_catalog_aux_reset(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_aux_reset(struct msm_dp_catalog *msm_dp_catalog)
{
u32 aux_ctrl;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL);
+ aux_ctrl = msm_dp_read_aux(catalog, REG_DP_AUX_CTRL);
aux_ctrl |= DP_AUX_CTRL_RESET;
- dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+ msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
usleep_range(1000, 1100); /* h/w recommended delay */
aux_ctrl &= ~DP_AUX_CTRL_RESET;
- dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+ msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
}
-void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable)
+void msm_dp_catalog_aux_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable)
{
u32 aux_ctrl;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL);
+ aux_ctrl = msm_dp_read_aux(catalog, REG_DP_AUX_CTRL);
if (enable) {
- dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff);
- dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff);
+ msm_dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff);
+ msm_dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff);
aux_ctrl |= DP_AUX_CTRL_ENABLE;
} else {
aux_ctrl &= ~DP_AUX_CTRL_ENABLE;
}
- dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+ msm_dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
}
-int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog,
+int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_catalog,
unsigned long wait_us)
{
u32 state;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
/* poll for hpd connected status every 2ms and timeout after wait_us */
return readl_poll_timeout(catalog->io.aux.base +
@@ -276,54 +275,17 @@ int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog,
min(wait_us, 2000), wait_us);
}
-static void dump_regs(void __iomem *base, int len)
+u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog)
{
- int i;
- u32 x0, x4, x8, xc;
- u32 addr_off = 0;
-
- len = DIV_ROUND_UP(len, 16);
- for (i = 0; i < len; i++) {
- x0 = readl_relaxed(base + addr_off);
- x4 = readl_relaxed(base + addr_off + 0x04);
- x8 = readl_relaxed(base + addr_off + 0x08);
- xc = readl_relaxed(base + addr_off + 0x0c);
-
- pr_info("%08x: %08x %08x %08x %08x", addr_off, x0, x4, x8, xc);
- addr_off += 16;
- }
-}
-
-void dp_catalog_dump_regs(struct dp_catalog *dp_catalog)
-{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
- struct dss_io_data *io = &catalog->io;
-
- pr_info("AHB regs\n");
- dump_regs(io->ahb.base, io->ahb.len);
-
- pr_info("AUXCLK regs\n");
- dump_regs(io->aux.base, io->aux.len);
-
- pr_info("LCLK regs\n");
- dump_regs(io->link.base, io->link.len);
-
- pr_info("P0CLK regs\n");
- dump_regs(io->p0.base, io->p0.len);
-}
-
-u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog)
-{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 intr, intr_ack;
- intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS);
+ intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS);
intr &= ~DP_INTERRUPT_STATUS1_MASK;
intr_ack = (intr & DP_INTERRUPT_STATUS1)
<< DP_INTERRUPT_STATUS_ACK_SHIFT;
- dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack |
+ msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack |
DP_INTERRUPT_STATUS1_MASK);
return intr;
@@ -331,40 +293,40 @@ u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog)
}
/* controller related catalog functions */
-void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog,
- u32 dp_tu, u32 valid_boundary,
+void msm_dp_catalog_ctrl_update_transfer_unit(struct msm_dp_catalog *msm_dp_catalog,
+ u32 msm_dp_tu, u32 valid_boundary,
u32 valid_boundary2)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary);
- dp_write_link(catalog, REG_DP_TU, dp_tu);
- dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2);
+ msm_dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary);
+ msm_dp_write_link(catalog, REG_DP_TU, msm_dp_tu);
+ msm_dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2);
}
-void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state)
+void msm_dp_catalog_ctrl_state_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 state)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- dp_write_link(catalog, REG_DP_STATE_CTRL, state);
+ msm_dp_write_link(catalog, REG_DP_STATE_CTRL, state);
}
-void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 cfg)
+void msm_dp_catalog_ctrl_config_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 cfg)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
drm_dbg_dp(catalog->drm_dev, "DP_CONFIGURATION_CTRL=0x%x\n", cfg);
- dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg);
+ msm_dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg);
}
-void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_ctrl_lane_mapping(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */
u32 ln_mapping;
@@ -373,71 +335,71 @@ void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog)
ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT;
ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT;
- dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING,
+ msm_dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING,
ln_mapping);
}
-void dp_catalog_ctrl_psr_mainlink_enable(struct dp_catalog *dp_catalog,
+void msm_dp_catalog_ctrl_psr_mainlink_enable(struct msm_dp_catalog *msm_dp_catalog,
bool enable)
{
u32 val;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- val = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+ val = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
if (enable)
val |= DP_MAINLINK_CTRL_ENABLE;
else
val &= ~DP_MAINLINK_CTRL_ENABLE;
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL, val);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, val);
}
-void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog,
+void msm_dp_catalog_ctrl_mainlink_ctrl(struct msm_dp_catalog *msm_dp_catalog,
bool enable)
{
u32 mainlink_ctrl;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
drm_dbg_dp(catalog->drm_dev, "enable=%d\n", enable);
if (enable) {
/*
* To make sure link reg writes happens before other operation,
- * dp_write_link() function uses writel()
+ * msm_dp_write_link() function uses writel()
*/
- mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+ mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET |
DP_MAINLINK_CTRL_ENABLE);
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
mainlink_ctrl |= DP_MAINLINK_CTRL_RESET;
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET;
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE |
DP_MAINLINK_FB_BOUNDARY_SEL);
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
} else {
- mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+ mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE;
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
}
}
-void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog,
+void msm_dp_catalog_ctrl_config_misc(struct msm_dp_catalog *msm_dp_catalog,
u32 colorimetry_cfg,
u32 test_bits_depth)
{
u32 misc_val;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- misc_val = dp_read_link(catalog, REG_DP_MISC1_MISC0);
+ misc_val = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0);
/* clear bpp bits */
misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT);
@@ -447,27 +409,27 @@ void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog,
misc_val |= DP_MISC0_SYNCHRONOUS_CLK;
drm_dbg_dp(catalog->drm_dev, "misc settings = 0x%x\n", misc_val);
- dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val);
+ msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val);
}
-void dp_catalog_setup_peripheral_flush(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_setup_peripheral_flush(struct msm_dp_catalog *msm_dp_catalog)
{
u32 mainlink_ctrl, hw_revision;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+ mainlink_ctrl = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
- hw_revision = dp_catalog_hw_revision(dp_catalog);
+ hw_revision = msm_dp_catalog_hw_revision(msm_dp_catalog);
if (hw_revision >= DP_HW_VERSION_1_2)
mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_SDE_PERIPH_UPDATE;
else
mainlink_ctrl |= DP_MAINLINK_FLUSH_MODE_UPDATE_SDP;
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
}
-void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
+void msm_dp_catalog_ctrl_config_msa(struct msm_dp_catalog *msm_dp_catalog,
u32 rate, u32 stream_rate_khz,
bool is_ycbcr_420)
{
@@ -478,8 +440,8 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
u32 const link_rate_hbr3 = 810000;
unsigned long den, num;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
if (rate == link_rate_hbr3)
pixel_div = 6;
@@ -522,22 +484,22 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
nvid *= 3;
drm_dbg_dp(catalog->drm_dev, "mvid=0x%x, nvid=0x%x\n", mvid, nvid);
- dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid);
- dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid);
- dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0);
+ msm_dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid);
+ msm_dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid);
+ msm_dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0);
}
-int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog,
+int msm_dp_catalog_ctrl_set_pattern_state_bit(struct msm_dp_catalog *msm_dp_catalog,
u32 state_bit)
{
int bit, ret;
u32 data;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
bit = BIT(state_bit - 1);
drm_dbg_dp(catalog->drm_dev, "hw: bit=%d train=%d\n", bit, state_bit);
- dp_catalog_ctrl_state_ctrl(dp_catalog, bit);
+ msm_dp_catalog_ctrl_state_ctrl(msm_dp_catalog, bit);
bit = BIT(state_bit - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
@@ -554,25 +516,25 @@ int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog,
}
/**
- * dp_catalog_hw_revision() - retrieve DP hw revision
+ * msm_dp_catalog_hw_revision() - retrieve DP hw revision
*
- * @dp_catalog: DP catalog structure
+ * @msm_dp_catalog: DP catalog structure
*
* Return: DP controller hw revision
*
*/
-u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog)
+u32 msm_dp_catalog_hw_revision(const struct msm_dp_catalog *msm_dp_catalog)
{
- const struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ const struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- return dp_read_ahb(catalog, REG_DP_HW_VERSION);
+ return msm_dp_read_ahb(catalog, REG_DP_HW_VERSION);
}
/**
- * dp_catalog_ctrl_reset() - reset DP controller
+ * msm_dp_catalog_ctrl_reset() - reset DP controller
*
- * @dp_catalog: DP catalog structure
+ * @msm_dp_catalog: DP catalog structure
*
* return: void
*
@@ -581,28 +543,28 @@ u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog)
* NOTE: reset DP controller will also clear any pending HPD related interrupts
*
*/
-void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_ctrl_reset(struct msm_dp_catalog *msm_dp_catalog)
{
u32 sw_reset;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- sw_reset = dp_read_ahb(catalog, REG_DP_SW_RESET);
+ sw_reset = msm_dp_read_ahb(catalog, REG_DP_SW_RESET);
sw_reset |= DP_SW_RESET;
- dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
+ msm_dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
usleep_range(1000, 1100); /* h/w recommended delay */
sw_reset &= ~DP_SW_RESET;
- dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
+ msm_dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
}
-bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog)
+bool msm_dp_catalog_ctrl_mainlink_ready(struct msm_dp_catalog *msm_dp_catalog)
{
u32 data;
int ret;
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
/* Poll for mainlink ready status */
ret = readl_poll_timeout(catalog->io.link.base +
@@ -617,96 +579,96 @@ bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog)
return true;
}
-void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog,
+void msm_dp_catalog_ctrl_enable_irq(struct msm_dp_catalog *msm_dp_catalog,
bool enable)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
if (enable) {
- dp_write_ahb(catalog, REG_DP_INTR_STATUS,
+ msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS,
DP_INTERRUPT_STATUS1_MASK);
- dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
+ msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
DP_INTERRUPT_STATUS2_MASK);
} else {
- dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00);
- dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00);
+ msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00);
+ msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00);
}
}
-void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
+void msm_dp_catalog_hpd_config_intr(struct msm_dp_catalog *msm_dp_catalog,
u32 intr_mask, bool en)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- u32 config = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
+ u32 config = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
config = (en ? config | intr_mask : config & ~intr_mask);
drm_dbg_dp(catalog->drm_dev, "intr_mask=%#x config=%#x\n",
intr_mask, config);
- dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK,
+ msm_dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK,
config & DP_DP_HPD_INT_MASK);
}
-void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_ctrl_hpd_enable(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
+ u32 reftimer = msm_dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
/* Configure REFTIMER and enable it */
reftimer |= DP_DP_HPD_REFTIMER_ENABLE;
- dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
+ msm_dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
/* Enable HPD */
- dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
+ msm_dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
}
-void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_ctrl_hpd_disable(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
+ u32 reftimer = msm_dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
reftimer &= ~DP_DP_HPD_REFTIMER_ENABLE;
- dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
+ msm_dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
- dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, 0);
+ msm_dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, 0);
}
-static void dp_catalog_enable_sdp(struct dp_catalog_private *catalog)
+static void msm_dp_catalog_enable_sdp(struct msm_dp_catalog_private *catalog)
{
/* trigger sdp */
- dp_write_link(catalog, MMSS_DP_SDP_CFG3, UPDATE_SDP);
- dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x0);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, UPDATE_SDP);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x0);
}
-void dp_catalog_ctrl_config_psr(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_ctrl_config_psr(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 config;
/* enable PSR1 function */
- config = dp_read_link(catalog, REG_PSR_CONFIG);
+ config = msm_dp_read_link(catalog, REG_PSR_CONFIG);
config |= PSR1_SUPPORTED;
- dp_write_link(catalog, REG_PSR_CONFIG, config);
+ msm_dp_write_link(catalog, REG_PSR_CONFIG, config);
- dp_write_ahb(catalog, REG_DP_INTR_MASK4, DP_INTERRUPT_MASK4);
- dp_catalog_enable_sdp(catalog);
+ msm_dp_write_ahb(catalog, REG_DP_INTR_MASK4, DP_INTERRUPT_MASK4);
+ msm_dp_catalog_enable_sdp(catalog);
}
-void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter)
+void msm_dp_catalog_ctrl_set_psr(struct msm_dp_catalog *msm_dp_catalog, bool enter)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 cmd;
- cmd = dp_read_link(catalog, REG_PSR_CMD);
+ cmd = msm_dp_read_link(catalog, REG_PSR_CMD);
cmd &= ~(PSR_ENTER | PSR_EXIT);
@@ -715,17 +677,17 @@ void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter)
else
cmd |= PSR_EXIT;
- dp_catalog_enable_sdp(catalog);
- dp_write_link(catalog, REG_PSR_CMD, cmd);
+ msm_dp_catalog_enable_sdp(catalog);
+ msm_dp_write_link(catalog, REG_PSR_CMD, cmd);
}
-u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog)
+u32 msm_dp_catalog_link_is_connected(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 status;
- status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
+ status = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
drm_dbg_dp(catalog->drm_dev, "aux status: %#x\n", status);
status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT;
status &= DP_DP_HPD_STATE_STATUS_BITS_MASK;
@@ -733,16 +695,16 @@ u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog)
return status;
}
-u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog)
+u32 msm_dp_catalog_hpd_get_intr_status(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
int isr, mask;
- isr = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
- dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK,
+ isr = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
+ msm_dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK,
(isr & DP_DP_HPD_INT_MASK));
- mask = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
+ mask = msm_dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
/*
* We only want to return interrupts that are unmasked to the caller.
@@ -754,115 +716,115 @@ u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog)
return isr & (mask | ~DP_DP_HPD_INT_MASK);
}
-u32 dp_catalog_ctrl_read_psr_interrupt_status(struct dp_catalog *dp_catalog)
+u32 msm_dp_catalog_ctrl_read_psr_interrupt_status(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 intr, intr_ack;
- intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS4);
+ intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS4);
intr_ack = (intr & DP_INTERRUPT_STATUS4)
<< DP_INTERRUPT_STATUS_ACK_SHIFT;
- dp_write_ahb(catalog, REG_DP_INTR_STATUS4, intr_ack);
+ msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS4, intr_ack);
return intr;
}
-int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog)
+int msm_dp_catalog_ctrl_get_interrupt(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 intr, intr_ack;
- intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS2);
+ intr = msm_dp_read_ahb(catalog, REG_DP_INTR_STATUS2);
intr &= ~DP_INTERRUPT_STATUS2_MASK;
intr_ack = (intr & DP_INTERRUPT_STATUS2)
<< DP_INTERRUPT_STATUS_ACK_SHIFT;
- dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
+ msm_dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
intr_ack | DP_INTERRUPT_STATUS2_MASK);
return intr;
}
-void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_ctrl_phy_reset(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- dp_write_ahb(catalog, REG_DP_PHY_CTRL,
+ msm_dp_write_ahb(catalog, REG_DP_PHY_CTRL,
DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL);
usleep_range(1000, 1100); /* h/w recommended delay */
- dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0);
+ msm_dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0);
}
-void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
+void msm_dp_catalog_ctrl_send_phy_pattern(struct msm_dp_catalog *msm_dp_catalog,
u32 pattern)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 value = 0x0;
/* Make sure to clear the current pattern before starting a new one */
- dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0);
+ msm_dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0);
drm_dbg_dp(catalog->drm_dev, "pattern: %#x\n", pattern);
switch (pattern) {
case DP_PHY_TEST_PATTERN_D10_2:
- dp_write_link(catalog, REG_DP_STATE_CTRL,
+ msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
DP_STATE_CTRL_LINK_TRAINING_PATTERN1);
break;
case DP_PHY_TEST_PATTERN_ERROR_COUNT:
value &= ~(1 << 16);
- dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
value);
value |= SCRAMBLER_RESET_COUNT_VALUE;
- dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
value);
- dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
- dp_write_link(catalog, REG_DP_STATE_CTRL,
+ msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
break;
case DP_PHY_TEST_PATTERN_PRBS7:
- dp_write_link(catalog, REG_DP_STATE_CTRL,
+ msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
DP_STATE_CTRL_LINK_PRBS7);
break;
case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
- dp_write_link(catalog, REG_DP_STATE_CTRL,
+ msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN);
/* 00111110000011111000001111100000 */
- dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0,
+ msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0,
0x3E0F83E0);
/* 00001111100000111110000011111000 */
- dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1,
+ msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1,
0x0F83E0F8);
/* 1111100000111110 */
- dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2,
+ msm_dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2,
0x0000F83E);
break;
case DP_PHY_TEST_PATTERN_CP2520:
- value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+ value = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER;
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
value = DP_HBR2_ERM_PATTERN;
- dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
value);
value |= SCRAMBLER_RESET_COUNT_VALUE;
- dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ msm_dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
value);
- dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
- dp_write_link(catalog, REG_DP_STATE_CTRL,
+ msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
- value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+ value = msm_dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
value |= DP_MAINLINK_CTRL_ENABLE;
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
break;
case DP_PHY_TEST_PATTERN_SEL_MASK:
- dp_write_link(catalog, REG_DP_MAINLINK_CTRL,
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_CTRL,
DP_MAINLINK_CTRL_ENABLE);
- dp_write_link(catalog, REG_DP_STATE_CTRL,
+ msm_dp_write_link(catalog, REG_DP_STATE_CTRL,
DP_STATE_CTRL_LINK_TRAINING_PATTERN4);
break;
default:
@@ -872,94 +834,94 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
}
}
-u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog)
+u32 msm_dp_catalog_ctrl_read_phy_pattern(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- return dp_read_link(catalog, REG_DP_MAINLINK_READY);
+ return msm_dp_read_link(catalog, REG_DP_MAINLINK_READY);
}
/* panel related catalog functions */
-int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog, u32 total,
- u32 sync_start, u32 width_blanking, u32 dp_active)
+int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 total,
+ u32 sync_start, u32 width_blanking, u32 msm_dp_active)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 reg;
- dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, total);
- dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, sync_start);
- dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, width_blanking);
- dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_active);
+ msm_dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, total);
+ msm_dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, sync_start);
+ msm_dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, width_blanking);
+ msm_dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, msm_dp_active);
- reg = dp_read_p0(catalog, MMSS_DP_INTF_CONFIG);
+ reg = msm_dp_read_p0(catalog, MMSS_DP_INTF_CONFIG);
- if (dp_catalog->wide_bus_en)
+ if (msm_dp_catalog->wide_bus_en)
reg |= DP_INTF_CONFIG_DATABUS_WIDEN;
else
reg &= ~DP_INTF_CONFIG_DATABUS_WIDEN;
- DRM_DEBUG_DP("wide_bus_en=%d reg=%#x\n", dp_catalog->wide_bus_en, reg);
+ DRM_DEBUG_DP("wide_bus_en=%d reg=%#x\n", msm_dp_catalog->wide_bus_en, reg);
- dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, reg);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, reg);
return 0;
}
-static void dp_catalog_panel_send_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp)
+static void msm_dp_catalog_panel_send_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
u32 header[2];
u32 val;
int i;
- catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
- dp_utils_pack_sdp_header(&vsc_sdp->sdp_header, header);
+ msm_dp_utils_pack_sdp_header(&vsc_sdp->sdp_header, header);
- dp_write_link(catalog, MMSS_DP_GENERIC0_0, header[0]);
- dp_write_link(catalog, MMSS_DP_GENERIC0_1, header[1]);
+ msm_dp_write_link(catalog, MMSS_DP_GENERIC0_0, header[0]);
+ msm_dp_write_link(catalog, MMSS_DP_GENERIC0_1, header[1]);
for (i = 0; i < sizeof(vsc_sdp->db); i += 4) {
val = ((vsc_sdp->db[i]) | (vsc_sdp->db[i + 1] << 8) | (vsc_sdp->db[i + 2] << 16) |
(vsc_sdp->db[i + 3] << 24));
- dp_write_link(catalog, MMSS_DP_GENERIC0_2 + i, val);
+ msm_dp_write_link(catalog, MMSS_DP_GENERIC0_2 + i, val);
}
}
-static void dp_catalog_panel_update_sdp(struct dp_catalog *dp_catalog)
+static void msm_dp_catalog_panel_update_sdp(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
u32 hw_revision;
- catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
- hw_revision = dp_catalog_hw_revision(dp_catalog);
+ hw_revision = msm_dp_catalog_hw_revision(msm_dp_catalog);
if (hw_revision < DP_HW_VERSION_1_2 && hw_revision >= DP_HW_VERSION_1_0) {
- dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x01);
- dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x00);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x01);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG3, 0x00);
}
}
-void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp)
+void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
u32 cfg, cfg2, misc;
- catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
- cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG);
- cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2);
- misc = dp_read_link(catalog, REG_DP_MISC1_MISC0);
+ cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG);
+ cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2);
+ misc = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0);
cfg |= GEN0_SDP_EN;
- dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg);
cfg2 |= GENERIC0_SDPSIZE_VALID;
- dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2);
- dp_catalog_panel_send_vsc_sdp(dp_catalog, vsc_sdp);
+ msm_dp_catalog_panel_send_vsc_sdp(msm_dp_catalog, vsc_sdp);
/* indicates presence of VSC (BIT(6) of MISC1) */
misc |= DP_MISC1_VSC_SDP;
@@ -967,27 +929,27 @@ void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sd
drm_dbg_dp(catalog->drm_dev, "vsc sdp enable=1\n");
pr_debug("misc settings = 0x%x\n", misc);
- dp_write_link(catalog, REG_DP_MISC1_MISC0, misc);
+ msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc);
- dp_catalog_panel_update_sdp(dp_catalog);
+ msm_dp_catalog_panel_update_sdp(msm_dp_catalog);
}
-void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
u32 cfg, cfg2, misc;
- catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog, struct msm_dp_catalog_private, msm_dp_catalog);
- cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG);
- cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2);
- misc = dp_read_link(catalog, REG_DP_MISC1_MISC0);
+ cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG);
+ cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2);
+ misc = msm_dp_read_link(catalog, REG_DP_MISC1_MISC0);
cfg &= ~GEN0_SDP_EN;
- dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, cfg);
cfg2 &= ~GENERIC0_SDPSIZE_VALID;
- dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, cfg2);
/* switch back to MSA */
misc &= ~DP_MISC1_VSC_SDP;
@@ -995,16 +957,16 @@ void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog)
drm_dbg_dp(catalog->drm_dev, "vsc sdp enable=0\n");
pr_debug("misc settings = 0x%x\n", misc);
- dp_write_link(catalog, REG_DP_MISC1_MISC0, misc);
+ msm_dp_write_link(catalog, REG_DP_MISC1_MISC0, misc);
- dp_catalog_panel_update_sdp(dp_catalog);
+ msm_dp_catalog_panel_update_sdp(msm_dp_catalog);
}
-void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
+void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog,
struct drm_display_mode *drm_mode)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
u32 hsync_period, vsync_period;
u32 display_v_start, display_v_end;
u32 hsync_start_x, hsync_end_x;
@@ -1036,49 +998,48 @@ void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
display_hctl = (hsync_end_x << 16) | hsync_start_x;
- dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0);
- dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
- dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
hsync_period);
- dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width *
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width *
hsync_period);
- dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
- dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
- dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl);
- dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0);
- dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start);
- dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end);
- dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0);
- dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
- dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
- dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
- dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
- dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
- dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0);
-
- dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL,
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0);
+ msm_dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end);
+ msm_dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
+ msm_dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0);
+
+ msm_dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL,
DP_TPG_CHECKERED_RECT_PATTERN);
- dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG,
+ msm_dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG,
DP_TPG_VIDEO_CONFIG_BPP_8BIT |
DP_TPG_VIDEO_CONFIG_RGB);
- dp_write_p0(catalog, MMSS_DP_BIST_ENABLE,
+ msm_dp_write_p0(catalog, MMSS_DP_BIST_ENABLE,
DP_BIST_ENABLE_DPBIST_EN);
- dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN,
+ msm_dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN,
DP_TIMING_ENGINE_EN_EN);
drm_dbg_dp(catalog->drm_dev, "%s: enabled tpg\n", __func__);
}
-void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0);
- dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0);
- dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0);
+ msm_dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0);
+ msm_dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0);
+ msm_dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0);
}
-static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
+static void __iomem *msm_dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
{
struct resource *res;
void __iomem *base;
@@ -1090,21 +1051,21 @@ static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *l
return base;
}
-static int dp_catalog_get_io(struct dp_catalog_private *catalog)
+static int msm_dp_catalog_get_io(struct msm_dp_catalog_private *catalog)
{
struct platform_device *pdev = to_platform_device(catalog->dev);
struct dss_io_data *dss = &catalog->io;
- dss->ahb.base = dp_ioremap(pdev, 0, &dss->ahb.len);
+ dss->ahb.base = msm_dp_ioremap(pdev, 0, &dss->ahb.len);
if (IS_ERR(dss->ahb.base))
return PTR_ERR(dss->ahb.base);
- dss->aux.base = dp_ioremap(pdev, 1, &dss->aux.len);
+ dss->aux.base = msm_dp_ioremap(pdev, 1, &dss->aux.len);
if (IS_ERR(dss->aux.base)) {
/*
* The initial binding had a single reg, but in order to
* support variation in the sub-region sizes this was split.
- * dp_ioremap() will fail with -EINVAL here if only a single
+ * msm_dp_ioremap() will fail with -EINVAL here if only a single
* reg is specified, so fill in the sub-region offsets and
* lengths based on this single region.
*/
@@ -1126,13 +1087,13 @@ static int dp_catalog_get_io(struct dp_catalog_private *catalog)
return PTR_ERR(dss->aux.base);
}
} else {
- dss->link.base = dp_ioremap(pdev, 2, &dss->link.len);
+ dss->link.base = msm_dp_ioremap(pdev, 2, &dss->link.len);
if (IS_ERR(dss->link.base)) {
DRM_ERROR("unable to remap link region: %pe\n", dss->link.base);
return PTR_ERR(dss->link.base);
}
- dss->p0.base = dp_ioremap(pdev, 3, &dss->p0.len);
+ dss->p0.base = msm_dp_ioremap(pdev, 3, &dss->p0.len);
if (IS_ERR(dss->p0.base)) {
DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base);
return PTR_ERR(dss->p0.base);
@@ -1142,9 +1103,9 @@ static int dp_catalog_get_io(struct dp_catalog_private *catalog)
return 0;
}
-struct dp_catalog *dp_catalog_get(struct device *dev)
+struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
int ret;
catalog = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL);
@@ -1153,78 +1114,115 @@ struct dp_catalog *dp_catalog_get(struct device *dev)
catalog->dev = dev;
- ret = dp_catalog_get_io(catalog);
+ ret = msm_dp_catalog_get_io(catalog);
if (ret)
return ERR_PTR(ret);
- return &catalog->dp_catalog;
+ return &catalog->msm_dp_catalog;
}
-u32 dp_catalog_audio_get_header(struct dp_catalog *dp_catalog,
- enum dp_catalog_audio_sdp_type sdp,
- enum dp_catalog_audio_header_type header)
+void msm_dp_catalog_write_audio_stream(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr)
{
- struct dp_catalog_private *catalog;
- u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
+ u32 header[2];
+
+ msm_dp_utils_pack_sdp_header(sdp_hdr, header);
- catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_STREAM_0, header[0]);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_STREAM_1, header[1]);
+}
+
+void msm_dp_catalog_write_audio_timestamp(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr)
+{
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
+ u32 header[2];
- sdp_map = catalog->audio_map;
+ msm_dp_utils_pack_sdp_header(sdp_hdr, header);
- return dp_read_link(catalog, sdp_map[sdp][header]);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_TIMESTAMP_0, header[0]);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_TIMESTAMP_1, header[1]);
}
-void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog,
- enum dp_catalog_audio_sdp_type sdp,
- enum dp_catalog_audio_header_type header,
- u32 data)
+void msm_dp_catalog_write_audio_infoframe(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr)
{
- struct dp_catalog_private *catalog;
- u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
+ u32 header[2];
- if (!dp_catalog)
- return;
+ msm_dp_utils_pack_sdp_header(sdp_hdr, header);
- catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_INFOFRAME_0, header[0]);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_INFOFRAME_1, header[1]);
+}
- sdp_map = catalog->audio_map;
+void msm_dp_catalog_write_audio_copy_mgmt(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr)
+{
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
+ u32 header[2];
- dp_write_link(catalog, sdp_map[sdp][header], data);
+ msm_dp_utils_pack_sdp_header(sdp_hdr, header);
+
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_COPYMANAGEMENT_0, header[0]);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_COPYMANAGEMENT_1, header[1]);
}
-void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog, u32 select)
+void msm_dp_catalog_write_audio_isrc(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
+ struct dp_sdp_header tmp = *sdp_hdr;
+ u32 header[2];
+ u32 reg;
+
+ /* XXX: is it necessary to preserve this field? */
+ reg = msm_dp_read_link(catalog, MMSS_DP_AUDIO_ISRC_1);
+ tmp.HB3 = FIELD_GET(HEADER_3_MASK, reg);
+
+ msm_dp_utils_pack_sdp_header(&tmp, header);
+
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_ISRC_0, header[0]);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_ISRC_1, header[1]);
+}
+
+void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *msm_dp_catalog, u32 select)
+{
+ struct msm_dp_catalog_private *catalog;
u32 acr_ctrl;
- if (!dp_catalog)
+ if (!msm_dp_catalog)
return;
- catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
drm_dbg_dp(catalog->drm_dev, "select: %#x, acr_ctrl: %#x\n",
select, acr_ctrl);
- dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
}
-void dp_catalog_audio_enable(struct dp_catalog *dp_catalog, bool enable)
+void msm_dp_catalog_audio_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
u32 audio_ctrl;
- if (!dp_catalog)
+ if (!msm_dp_catalog)
return;
- catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- audio_ctrl = dp_read_link(catalog, MMSS_DP_AUDIO_CFG);
+ audio_ctrl = msm_dp_read_link(catalog, MMSS_DP_AUDIO_CFG);
if (enable)
audio_ctrl |= BIT(0);
@@ -1233,24 +1231,24 @@ void dp_catalog_audio_enable(struct dp_catalog *dp_catalog, bool enable)
drm_dbg_dp(catalog->drm_dev, "dp_audio_cfg = 0x%x\n", audio_ctrl);
- dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl);
+ msm_dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl);
/* make sure audio engine is disabled */
wmb();
}
-void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog)
+void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *msm_dp_catalog)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
u32 sdp_cfg = 0;
u32 sdp_cfg2 = 0;
- if (!dp_catalog)
+ if (!msm_dp_catalog)
return;
- catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- sdp_cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG);
+ sdp_cfg = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG);
/* AUDIO_TIMESTAMP_SDP_EN */
sdp_cfg |= BIT(1);
/* AUDIO_STREAM_SDP_EN */
@@ -1264,9 +1262,9 @@ void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog)
drm_dbg_dp(catalog->drm_dev, "sdp_cfg = 0x%x\n", sdp_cfg);
- dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg);
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg);
- sdp_cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2);
+ sdp_cfg2 = msm_dp_read_link(catalog, MMSS_DP_SDP_CFG2);
/* IFRM_REGSRC -> Do not use reg values */
sdp_cfg2 &= ~BIT(0);
/* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */
@@ -1274,62 +1272,21 @@ void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog)
drm_dbg_dp(catalog->drm_dev, "sdp_cfg2 = 0x%x\n", sdp_cfg2);
- dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2);
-}
-
-void dp_catalog_audio_init(struct dp_catalog *dp_catalog)
-{
- struct dp_catalog_private *catalog;
-
- static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = {
- {
- MMSS_DP_AUDIO_STREAM_0,
- MMSS_DP_AUDIO_STREAM_1,
- MMSS_DP_AUDIO_STREAM_1,
- },
- {
- MMSS_DP_AUDIO_TIMESTAMP_0,
- MMSS_DP_AUDIO_TIMESTAMP_1,
- MMSS_DP_AUDIO_TIMESTAMP_1,
- },
- {
- MMSS_DP_AUDIO_INFOFRAME_0,
- MMSS_DP_AUDIO_INFOFRAME_1,
- MMSS_DP_AUDIO_INFOFRAME_1,
- },
- {
- MMSS_DP_AUDIO_COPYMANAGEMENT_0,
- MMSS_DP_AUDIO_COPYMANAGEMENT_1,
- MMSS_DP_AUDIO_COPYMANAGEMENT_1,
- },
- {
- MMSS_DP_AUDIO_ISRC_0,
- MMSS_DP_AUDIO_ISRC_1,
- MMSS_DP_AUDIO_ISRC_1,
- },
- };
-
- if (!dp_catalog)
- return;
-
- catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
-
- catalog->audio_map = sdp_map;
+ msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2);
}
-void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog, u32 safe_to_exit_level)
+void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *msm_dp_catalog, u32 safe_to_exit_level)
{
- struct dp_catalog_private *catalog;
+ struct msm_dp_catalog_private *catalog;
u32 mainlink_levels;
- if (!dp_catalog)
+ if (!msm_dp_catalog)
return;
- catalog = container_of(dp_catalog,
- struct dp_catalog_private, dp_catalog);
+ catalog = container_of(msm_dp_catalog,
+ struct msm_dp_catalog_private, msm_dp_catalog);
- mainlink_levels = dp_read_link(catalog, REG_DP_MAINLINK_LEVELS);
+ mainlink_levels = msm_dp_read_link(catalog, REG_DP_MAINLINK_LEVELS);
mainlink_levels &= 0xFE0;
mainlink_levels |= safe_to_exit_level;
@@ -1337,5 +1294,5 @@ void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog, u32 safe_to_exit_
"mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n",
mainlink_levels, safe_to_exit_level);
- dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels);
+ msm_dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index 4679d50b8c73..6678b0ac9a67 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -31,98 +31,83 @@
#define DP_HW_VERSION_1_0 0x10000000
#define DP_HW_VERSION_1_2 0x10020000
-enum dp_catalog_audio_sdp_type {
- DP_AUDIO_SDP_STREAM,
- DP_AUDIO_SDP_TIMESTAMP,
- DP_AUDIO_SDP_INFOFRAME,
- DP_AUDIO_SDP_COPYMANAGEMENT,
- DP_AUDIO_SDP_ISRC,
- DP_AUDIO_SDP_MAX,
-};
-
-enum dp_catalog_audio_header_type {
- DP_AUDIO_SDP_HEADER_1,
- DP_AUDIO_SDP_HEADER_2,
- DP_AUDIO_SDP_HEADER_3,
- DP_AUDIO_SDP_HEADER_MAX,
-};
-
-struct dp_catalog {
+struct msm_dp_catalog {
bool wide_bus_en;
};
/* Debug module */
-void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state);
+void msm_dp_catalog_snapshot(struct msm_dp_catalog *msm_dp_catalog, struct msm_disp_state *disp_state);
/* AUX APIs */
-u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog);
-int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog, u32 data);
-int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog, u32 data);
-int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read);
-int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog);
-void dp_catalog_aux_reset(struct dp_catalog *dp_catalog);
-void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable);
-int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog,
+u32 msm_dp_catalog_aux_read_data(struct msm_dp_catalog *msm_dp_catalog);
+int msm_dp_catalog_aux_write_data(struct msm_dp_catalog *msm_dp_catalog, u32 data);
+int msm_dp_catalog_aux_write_trans(struct msm_dp_catalog *msm_dp_catalog, u32 data);
+int msm_dp_catalog_aux_clear_trans(struct msm_dp_catalog *msm_dp_catalog, bool read);
+int msm_dp_catalog_aux_clear_hw_interrupts(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_aux_reset(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_aux_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable);
+int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_catalog,
unsigned long wait_us);
-u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog);
+u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog);
/* DP Controller APIs */
-void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state);
-void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 config);
-void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog);
-void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable);
-void dp_catalog_ctrl_psr_mainlink_enable(struct dp_catalog *dp_catalog, bool enable);
-void dp_catalog_setup_peripheral_flush(struct dp_catalog *dp_catalog);
-void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb);
-void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate,
+void msm_dp_catalog_ctrl_state_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 state);
+void msm_dp_catalog_ctrl_config_ctrl(struct msm_dp_catalog *msm_dp_catalog, u32 config);
+void msm_dp_catalog_ctrl_lane_mapping(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_ctrl_mainlink_ctrl(struct msm_dp_catalog *msm_dp_catalog, bool enable);
+void msm_dp_catalog_ctrl_psr_mainlink_enable(struct msm_dp_catalog *msm_dp_catalog, bool enable);
+void msm_dp_catalog_setup_peripheral_flush(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_ctrl_config_misc(struct msm_dp_catalog *msm_dp_catalog, u32 cc, u32 tb);
+void msm_dp_catalog_ctrl_config_msa(struct msm_dp_catalog *msm_dp_catalog, u32 rate,
u32 stream_rate_khz, bool is_ycbcr_420);
-int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, u32 pattern);
-u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog);
-void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog);
-bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog);
-void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable);
-void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
+int msm_dp_catalog_ctrl_set_pattern_state_bit(struct msm_dp_catalog *msm_dp_catalog, u32 pattern);
+u32 msm_dp_catalog_hw_revision(const struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_ctrl_reset(struct msm_dp_catalog *msm_dp_catalog);
+bool msm_dp_catalog_ctrl_mainlink_ready(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_ctrl_enable_irq(struct msm_dp_catalog *msm_dp_catalog, bool enable);
+void msm_dp_catalog_hpd_config_intr(struct msm_dp_catalog *msm_dp_catalog,
u32 intr_mask, bool en);
-void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog);
-void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog);
-void dp_catalog_ctrl_config_psr(struct dp_catalog *dp_catalog);
-void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter);
-u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog);
-u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog);
-void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog);
-int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog);
-u32 dp_catalog_ctrl_read_psr_interrupt_status(struct dp_catalog *dp_catalog);
-void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog,
- u32 dp_tu, u32 valid_boundary,
+void msm_dp_catalog_ctrl_hpd_enable(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_ctrl_hpd_disable(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_ctrl_config_psr(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_ctrl_set_psr(struct msm_dp_catalog *msm_dp_catalog, bool enter);
+u32 msm_dp_catalog_link_is_connected(struct msm_dp_catalog *msm_dp_catalog);
+u32 msm_dp_catalog_hpd_get_intr_status(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_ctrl_phy_reset(struct msm_dp_catalog *msm_dp_catalog);
+int msm_dp_catalog_ctrl_get_interrupt(struct msm_dp_catalog *msm_dp_catalog);
+u32 msm_dp_catalog_ctrl_read_psr_interrupt_status(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_ctrl_update_transfer_unit(struct msm_dp_catalog *msm_dp_catalog,
+ u32 msm_dp_tu, u32 valid_boundary,
u32 valid_boundary2);
-void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
+void msm_dp_catalog_ctrl_send_phy_pattern(struct msm_dp_catalog *msm_dp_catalog,
u32 pattern);
-u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog);
+u32 msm_dp_catalog_ctrl_read_phy_pattern(struct msm_dp_catalog *msm_dp_catalog);
/* DP Panel APIs */
-int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog, u32 total,
- u32 sync_start, u32 width_blanking, u32 dp_active);
-void dp_catalog_panel_enable_vsc_sdp(struct dp_catalog *dp_catalog, struct dp_sdp *vsc_sdp);
-void dp_catalog_panel_disable_vsc_sdp(struct dp_catalog *dp_catalog);
-void dp_catalog_dump_regs(struct dp_catalog *dp_catalog);
-void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
+int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 total,
+ u32 sync_start, u32 width_blanking, u32 msm_dp_active);
+void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp);
+void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog);
+void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog,
struct drm_display_mode *drm_mode);
-void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog);
+void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog);
-struct dp_catalog *dp_catalog_get(struct device *dev);
+struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev);
/* DP Audio APIs */
-u32 dp_catalog_audio_get_header(struct dp_catalog *dp_catalog,
- enum dp_catalog_audio_sdp_type sdp,
- enum dp_catalog_audio_header_type header);
-void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog,
- enum dp_catalog_audio_sdp_type sdp,
- enum dp_catalog_audio_header_type header,
- u32 data);
-void dp_catalog_audio_config_acr(struct dp_catalog *catalog, u32 select);
-void dp_catalog_audio_enable(struct dp_catalog *catalog, bool enable);
-void dp_catalog_audio_config_sdp(struct dp_catalog *catalog);
-void dp_catalog_audio_init(struct dp_catalog *catalog);
-void dp_catalog_audio_sfe_level(struct dp_catalog *catalog, u32 safe_to_exit_level);
+void msm_dp_catalog_write_audio_stream(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr);
+void msm_dp_catalog_write_audio_timestamp(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr);
+void msm_dp_catalog_write_audio_infoframe(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr);
+void msm_dp_catalog_write_audio_copy_mgmt(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr);
+void msm_dp_catalog_write_audio_isrc(struct msm_dp_catalog *msm_dp_catalog,
+ struct dp_sdp_header *sdp_hdr);
+void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *catalog, u32 select);
+void msm_dp_catalog_audio_enable(struct msm_dp_catalog *catalog, bool enable);
+void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *catalog);
+void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *catalog, u32 safe_to_exit_level);
#endif /* _DP_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index f342fc5ae41e..9c463ae2f8fa 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -40,7 +40,7 @@ enum {
DP_TRAINING_2,
};
-struct dp_tu_calc_input {
+struct msm_dp_tu_calc_input {
u64 lclk; /* 162, 270, 540 and 810 */
u64 pclk_khz; /* in KHz */
u64 hactive; /* active h-width */
@@ -55,7 +55,7 @@ struct dp_tu_calc_input {
int num_of_dsc_slices; /* number of slices per line */
};
-struct dp_vc_tu_mapping_table {
+struct msm_dp_vc_tu_mapping_table {
u32 vic;
u8 lanes;
u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */
@@ -69,14 +69,14 @@ struct dp_vc_tu_mapping_table {
u8 tu_size_minus1;
};
-struct dp_ctrl_private {
- struct dp_ctrl dp_ctrl;
+struct msm_dp_ctrl_private {
+ struct msm_dp_ctrl msm_dp_ctrl;
struct drm_device *drm_dev;
struct device *dev;
struct drm_dp_aux *aux;
- struct dp_panel *panel;
- struct dp_link *link;
- struct dp_catalog *catalog;
+ struct msm_dp_panel *panel;
+ struct msm_dp_link *link;
+ struct msm_dp_catalog *catalog;
struct phy *phy;
@@ -99,8 +99,8 @@ struct dp_ctrl_private {
bool stream_clks_on;
};
-static int dp_aux_link_configure(struct drm_dp_aux *aux,
- struct dp_link_info *link)
+static int msm_dp_aux_link_configure(struct drm_dp_aux *aux,
+ struct msm_dp_link_info *link)
{
u8 values[2];
int err;
@@ -118,14 +118,14 @@ static int dp_aux_link_configure(struct drm_dp_aux *aux,
return 0;
}
-void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
+void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
reinit_completion(&ctrl->idle_comp);
- dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE);
+ msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE);
if (!wait_for_completion_timeout(&ctrl->idle_comp,
IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES))
@@ -134,7 +134,7 @@ void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "mainlink off\n");
}
-static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
+static void msm_dp_ctrl_config_ctrl(struct msm_dp_ctrl_private *ctrl)
{
u32 config = 0, tbd;
const u8 *dpcd = ctrl->panel->dpcd;
@@ -142,15 +142,15 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
/* Default-> LSCLK DIV: 1/4 LCLK */
config |= (2 << DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT);
- if (ctrl->panel->dp_mode.out_fmt_is_yuv_420)
+ if (ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420)
config |= DP_CONFIGURATION_CTRL_RGB_YUV; /* YUV420 */
/* Scrambler reset enable */
if (drm_dp_alternate_scrambler_reset_cap(dpcd))
config |= DP_CONFIGURATION_CTRL_ASSR;
- tbd = dp_link_get_test_bits_depth(ctrl->link,
- ctrl->panel->dp_mode.bpp);
+ tbd = msm_dp_link_get_test_bits_depth(ctrl->link,
+ ctrl->panel->msm_dp_mode.bpp);
config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT;
@@ -170,24 +170,23 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
if (ctrl->panel->psr_cap.version)
config |= DP_CONFIGURATION_CTRL_SEND_VSC;
- dp_catalog_ctrl_config_ctrl(ctrl->catalog, config);
+ msm_dp_catalog_ctrl_config_ctrl(ctrl->catalog, config);
}
-static void dp_ctrl_configure_source_params(struct dp_ctrl_private *ctrl)
+static void msm_dp_ctrl_configure_source_params(struct msm_dp_ctrl_private *ctrl)
{
u32 cc, tb;
- dp_catalog_ctrl_lane_mapping(ctrl->catalog);
- dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
- dp_catalog_setup_peripheral_flush(ctrl->catalog);
+ msm_dp_catalog_ctrl_lane_mapping(ctrl->catalog);
+ msm_dp_catalog_setup_peripheral_flush(ctrl->catalog);
- dp_ctrl_config_ctrl(ctrl);
+ msm_dp_ctrl_config_ctrl(ctrl);
- tb = dp_link_get_test_bits_depth(ctrl->link,
- ctrl->panel->dp_mode.bpp);
- cc = dp_link_get_colorimetry_config(ctrl->link);
- dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb);
- dp_panel_timing_cfg(ctrl->panel);
+ tb = msm_dp_link_get_test_bits_depth(ctrl->link,
+ ctrl->panel->msm_dp_mode.bpp);
+ cc = msm_dp_link_get_colorimetry_config(ctrl->link);
+ msm_dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb);
+ msm_dp_panel_timing_cfg(ctrl->panel);
}
/*
@@ -310,7 +309,7 @@ static int _tu_param_compare(s64 a, s64 b)
}
}
-static void dp_panel_update_tu_timings(struct dp_tu_calc_input *in,
+static void msm_dp_panel_update_tu_timings(struct msm_dp_tu_calc_input *in,
struct tu_algo_data *tu)
{
int nlanes = in->nlanes;
@@ -622,9 +621,9 @@ static void _tu_valid_boundary_calc(struct tu_algo_data *tu)
}
}
-static void _dp_ctrl_calc_tu(struct dp_ctrl_private *ctrl,
- struct dp_tu_calc_input *in,
- struct dp_vc_tu_mapping_table *tu_table)
+static void _dp_ctrl_calc_tu(struct msm_dp_ctrl_private *ctrl,
+ struct msm_dp_tu_calc_input *in,
+ struct msm_dp_vc_tu_mapping_table *tu_table)
{
struct tu_algo_data *tu;
int compare_result_1, compare_result_2;
@@ -645,7 +644,7 @@ static void _dp_ctrl_calc_tu(struct dp_ctrl_private *ctrl,
if (!tu)
return;
- dp_panel_update_tu_timings(in, tu);
+ msm_dp_panel_update_tu_timings(in, tu);
tu->err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */
@@ -956,21 +955,21 @@ tu_size_calc:
kfree(tu);
}
-static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl,
- struct dp_vc_tu_mapping_table *tu_table)
+static void msm_dp_ctrl_calc_tu_parameters(struct msm_dp_ctrl_private *ctrl,
+ struct msm_dp_vc_tu_mapping_table *tu_table)
{
- struct dp_tu_calc_input in;
+ struct msm_dp_tu_calc_input in;
struct drm_display_mode *drm_mode;
- drm_mode = &ctrl->panel->dp_mode.drm_mode;
+ drm_mode = &ctrl->panel->msm_dp_mode.drm_mode;
in.lclk = ctrl->link->link_params.rate / 1000;
in.pclk_khz = drm_mode->clock;
in.hactive = drm_mode->hdisplay;
in.hporch = drm_mode->htotal - drm_mode->hdisplay;
in.nlanes = ctrl->link->link_params.num_lanes;
- in.bpp = ctrl->panel->dp_mode.bpp;
- in.pixel_enc = ctrl->panel->dp_mode.out_fmt_is_yuv_420 ? 420 : 444;
+ in.bpp = ctrl->panel->msm_dp_mode.bpp;
+ in.pixel_enc = ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420 ? 420 : 444;
in.dsc_en = 0;
in.async_en = 0;
in.fec_en = 0;
@@ -980,16 +979,16 @@ static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl,
_dp_ctrl_calc_tu(ctrl, &in, tu_table);
}
-static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl)
+static void msm_dp_ctrl_setup_tr_unit(struct msm_dp_ctrl_private *ctrl)
{
- u32 dp_tu = 0x0;
+ u32 msm_dp_tu = 0x0;
u32 valid_boundary = 0x0;
u32 valid_boundary2 = 0x0;
- struct dp_vc_tu_mapping_table tu_calc_table;
+ struct msm_dp_vc_tu_mapping_table tu_calc_table;
- dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table);
+ msm_dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table);
- dp_tu |= tu_calc_table.tu_size_minus1;
+ msm_dp_tu |= tu_calc_table.tu_size_minus1;
valid_boundary |= tu_calc_table.valid_boundary_link;
valid_boundary |= (tu_calc_table.delay_start_link << 16);
@@ -1001,13 +1000,13 @@ static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl)
valid_boundary2 |= BIT(0);
pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n",
- dp_tu, valid_boundary, valid_boundary2);
+ msm_dp_tu, valid_boundary, valid_boundary2);
- dp_catalog_ctrl_update_transfer_unit(ctrl->catalog,
- dp_tu, valid_boundary, valid_boundary2);
+ msm_dp_catalog_ctrl_update_transfer_unit(ctrl->catalog,
+ msm_dp_tu, valid_boundary, valid_boundary2);
}
-static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_wait4video_ready(struct msm_dp_ctrl_private *ctrl)
{
int ret = 0;
@@ -1019,7 +1018,7 @@ static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl)
return ret;
}
-static int dp_ctrl_set_vx_px(struct dp_ctrl_private *ctrl,
+static int msm_dp_ctrl_set_vx_px(struct msm_dp_ctrl_private *ctrl,
u8 v_level, u8 p_level)
{
union phy_configure_opts *phy_opts = &ctrl->phy_opts;
@@ -1034,9 +1033,9 @@ static int dp_ctrl_set_vx_px(struct dp_ctrl_private *ctrl,
return 0;
}
-static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_update_vx_px(struct msm_dp_ctrl_private *ctrl)
{
- struct dp_link *link = ctrl->link;
+ struct msm_dp_link *link = ctrl->link;
int ret = 0, lane, lane_cnt;
u8 buf[4];
u32 max_level_reached = 0;
@@ -1046,7 +1045,7 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
drm_dbg_dp(ctrl->drm_dev,
"voltage level: %d emphasis level: %d\n",
voltage_swing_level, pre_emphasis_level);
- ret = dp_ctrl_set_vx_px(ctrl,
+ ret = msm_dp_ctrl_set_vx_px(ctrl,
voltage_swing_level, pre_emphasis_level);
if (ret)
@@ -1083,7 +1082,7 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
return ret;
}
-static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
+static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl,
u8 pattern)
{
u8 buf;
@@ -1100,7 +1099,7 @@ static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
return ret == 1;
}
-static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl,
+static int msm_dp_ctrl_read_link_status(struct msm_dp_ctrl_private *ctrl,
u8 *link_status)
{
int ret = 0, len;
@@ -1114,24 +1113,24 @@ static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl,
return ret;
}
-static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
+static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl,
int *training_step)
{
int tries, old_v_level, ret = 0;
u8 link_status[DP_LINK_STATUS_SIZE];
int const maximum_retries = 4;
- dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
*training_step = DP_TRAINING_1;
- ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, 1);
+ ret = msm_dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, 1);
if (ret)
return ret;
- dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
+ msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE);
- ret = dp_ctrl_update_vx_px(ctrl);
+ ret = msm_dp_ctrl_update_vx_px(ctrl);
if (ret)
return ret;
@@ -1140,7 +1139,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
for (tries = 0; tries < maximum_retries; tries++) {
drm_dp_link_train_clock_recovery_delay(ctrl->aux, ctrl->panel->dpcd);
- ret = dp_ctrl_read_link_status(ctrl, link_status);
+ ret = msm_dp_ctrl_read_link_status(ctrl, link_status);
if (ret)
return ret;
@@ -1160,8 +1159,8 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
old_v_level = ctrl->link->phy_params.v_level;
}
- dp_link_adjust_levels(ctrl->link, link_status);
- ret = dp_ctrl_update_vx_px(ctrl);
+ msm_dp_link_adjust_levels(ctrl->link, link_status);
+ ret = msm_dp_ctrl_update_vx_px(ctrl);
if (ret)
return ret;
}
@@ -1170,7 +1169,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
return -ETIMEDOUT;
}
-static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_link_rate_down_shift(struct msm_dp_ctrl_private *ctrl)
{
int ret = 0;
@@ -1198,7 +1197,7 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
return ret;
}
-static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_link_lane_down_shift(struct msm_dp_ctrl_private *ctrl)
{
if (ctrl->link->link_params.num_lanes == 1)
@@ -1213,13 +1212,13 @@ static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl)
return 0;
}
-static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
+static void msm_dp_ctrl_clear_training_pattern(struct msm_dp_ctrl_private *ctrl)
{
- dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE);
+ msm_dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE);
drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
}
-static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
+static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl,
int *training_step)
{
int tries = 0, ret = 0;
@@ -1228,7 +1227,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
int const maximum_retries = 5;
u8 link_status[DP_LINK_STATUS_SIZE];
- dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
*training_step = DP_TRAINING_2;
@@ -1243,16 +1242,16 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
state_ctrl_bit = 2;
}
- ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, state_ctrl_bit);
+ ret = msm_dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, state_ctrl_bit);
if (ret)
return ret;
- dp_ctrl_train_pattern_set(ctrl, pattern);
+ msm_dp_ctrl_train_pattern_set(ctrl, pattern);
for (tries = 0; tries <= maximum_retries; tries++) {
drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
- ret = dp_ctrl_read_link_status(ctrl, link_status);
+ ret = msm_dp_ctrl_read_link_status(ctrl, link_status);
if (ret)
return ret;
@@ -1261,8 +1260,8 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
return 0;
}
- dp_link_adjust_levels(ctrl->link, link_status);
- ret = dp_ctrl_update_vx_px(ctrl);
+ msm_dp_link_adjust_levels(ctrl->link, link_status);
+ ret = msm_dp_ctrl_update_vx_px(ctrl);
if (ret)
return ret;
@@ -1271,24 +1270,24 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
return -ETIMEDOUT;
}
-static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
+static int msm_dp_ctrl_link_train(struct msm_dp_ctrl_private *ctrl,
int *training_step)
{
int ret = 0;
const u8 *dpcd = ctrl->panel->dpcd;
u8 encoding[] = { 0, DP_SET_ANSI_8B10B };
u8 assr;
- struct dp_link_info link_info = {0};
+ struct msm_dp_link_info link_info = {0};
- dp_ctrl_config_ctrl(ctrl);
+ msm_dp_ctrl_config_ctrl(ctrl);
link_info.num_lanes = ctrl->link->link_params.num_lanes;
link_info.rate = ctrl->link->link_params.rate;
link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING;
- dp_link_reset_phy_params_vx_px(ctrl->link);
+ msm_dp_link_reset_phy_params_vx_px(ctrl->link);
- dp_aux_link_configure(ctrl->aux, &link_info);
+ msm_dp_aux_link_configure(ctrl->aux, &link_info);
if (drm_dp_max_downspread(dpcd))
encoding[0] |= DP_SPREAD_AMP_0_5;
@@ -1302,7 +1301,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
&assr, 1);
}
- ret = dp_ctrl_link_train_1(ctrl, training_step);
+ ret = msm_dp_ctrl_link_train_1(ctrl, training_step);
if (ret) {
DRM_ERROR("link training #1 failed. ret=%d\n", ret);
goto end;
@@ -1311,7 +1310,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
/* print success info as this is a result of user initiated action */
drm_dbg_dp(ctrl->drm_dev, "link training #1 successful\n");
- ret = dp_ctrl_link_train_2(ctrl, training_step);
+ ret = msm_dp_ctrl_link_train_2(ctrl, training_step);
if (ret) {
DRM_ERROR("link training #2 failed. ret=%d\n", ret);
goto end;
@@ -1321,17 +1320,17 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
drm_dbg_dp(ctrl->drm_dev, "link training #2 successful\n");
end:
- dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
return ret;
}
-static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
+static int msm_dp_ctrl_setup_main_link(struct msm_dp_ctrl_private *ctrl,
int *training_step)
{
int ret = 0;
- dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
+ msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
return ret;
@@ -1342,17 +1341,17 @@ static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
* a link training pattern, we have to first do soft reset.
*/
- ret = dp_ctrl_link_train(ctrl, training_step);
+ ret = msm_dp_ctrl_link_train(ctrl, training_step);
return ret;
}
-int dp_ctrl_core_clk_enable(struct dp_ctrl *dp_ctrl)
+int msm_dp_ctrl_core_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
int ret = 0;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
if (ctrl->core_clks_on) {
drm_dbg_dp(ctrl->drm_dev, "core clks already enabled\n");
@@ -1374,11 +1373,11 @@ int dp_ctrl_core_clk_enable(struct dp_ctrl *dp_ctrl)
return 0;
}
-void dp_ctrl_core_clk_disable(struct dp_ctrl *dp_ctrl)
+void msm_dp_ctrl_core_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
clk_bulk_disable_unprepare(ctrl->num_core_clks, ctrl->core_clks);
@@ -1391,12 +1390,12 @@ void dp_ctrl_core_clk_disable(struct dp_ctrl *dp_ctrl)
ctrl->core_clks_on ? "on" : "off");
}
-static int dp_ctrl_link_clk_enable(struct dp_ctrl *dp_ctrl)
+static int msm_dp_ctrl_link_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
int ret = 0;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
if (ctrl->link_clks_on) {
drm_dbg_dp(ctrl->drm_dev, "links clks already enabled\n");
@@ -1406,7 +1405,7 @@ static int dp_ctrl_link_clk_enable(struct dp_ctrl *dp_ctrl)
if (!ctrl->core_clks_on) {
drm_dbg_dp(ctrl->drm_dev, "Enable core clks before link clks\n");
- dp_ctrl_core_clk_enable(dp_ctrl);
+ msm_dp_ctrl_core_clk_enable(msm_dp_ctrl);
}
ret = clk_bulk_prepare_enable(ctrl->num_link_clks, ctrl->link_clks);
@@ -1424,11 +1423,11 @@ static int dp_ctrl_link_clk_enable(struct dp_ctrl *dp_ctrl)
return 0;
}
-static void dp_ctrl_link_clk_disable(struct dp_ctrl *dp_ctrl)
+static void msm_dp_ctrl_link_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
clk_bulk_disable_unprepare(ctrl->num_link_clks, ctrl->link_clks);
@@ -1441,7 +1440,7 @@ static void dp_ctrl_link_clk_disable(struct dp_ctrl *dp_ctrl)
ctrl->core_clks_on ? "on" : "off");
}
-static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_enable_mainlink_clocks(struct msm_dp_ctrl_private *ctrl)
{
int ret = 0;
struct phy *phy = ctrl->phy;
@@ -1455,7 +1454,7 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
phy_power_on(phy);
dev_pm_opp_set_rate(ctrl->dev, ctrl->link->link_params.rate * 1000);
- ret = dp_ctrl_link_clk_enable(&ctrl->dp_ctrl);
+ ret = msm_dp_ctrl_link_clk_enable(&ctrl->msm_dp_ctrl);
if (ret)
DRM_ERROR("Unable to start link clocks. ret=%d\n", ret);
@@ -1464,13 +1463,13 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
return ret;
}
-void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable)
+void msm_dp_ctrl_reset_irq_ctrl(struct msm_dp_ctrl *msm_dp_ctrl, bool enable)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
- dp_catalog_ctrl_reset(ctrl->catalog);
+ msm_dp_catalog_ctrl_reset(ctrl->catalog);
/*
* all dp controller programmable registers will not
@@ -1478,28 +1477,28 @@ void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable)
* therefore interrupt mask bits have to be updated
* to enable/disable interrupts
*/
- dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
+ msm_dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
}
-void dp_ctrl_config_psr(struct dp_ctrl *dp_ctrl)
+void msm_dp_ctrl_config_psr(struct msm_dp_ctrl *msm_dp_ctrl)
{
u8 cfg;
- struct dp_ctrl_private *ctrl = container_of(dp_ctrl,
- struct dp_ctrl_private, dp_ctrl);
+ struct msm_dp_ctrl_private *ctrl = container_of(msm_dp_ctrl,
+ struct msm_dp_ctrl_private, msm_dp_ctrl);
if (!ctrl->panel->psr_cap.version)
return;
- dp_catalog_ctrl_config_psr(ctrl->catalog);
+ msm_dp_catalog_ctrl_config_psr(ctrl->catalog);
cfg = DP_PSR_ENABLE;
drm_dp_dpcd_write(ctrl->aux, DP_PSR_EN_CFG, &cfg, 1);
}
-void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enter)
+void msm_dp_ctrl_set_psr(struct msm_dp_ctrl *msm_dp_ctrl, bool enter)
{
- struct dp_ctrl_private *ctrl = container_of(dp_ctrl,
- struct dp_ctrl_private, dp_ctrl);
+ struct msm_dp_ctrl_private *ctrl = container_of(msm_dp_ctrl,
+ struct msm_dp_ctrl_private, msm_dp_ctrl);
if (!ctrl->panel->psr_cap.version)
return;
@@ -1516,64 +1515,64 @@ void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enter)
*/
if (enter) {
reinit_completion(&ctrl->psr_op_comp);
- dp_catalog_ctrl_set_psr(ctrl->catalog, true);
+ msm_dp_catalog_ctrl_set_psr(ctrl->catalog, true);
if (!wait_for_completion_timeout(&ctrl->psr_op_comp,
PSR_OPERATION_COMPLETION_TIMEOUT_JIFFIES)) {
DRM_ERROR("PSR_ENTRY timedout\n");
- dp_catalog_ctrl_set_psr(ctrl->catalog, false);
+ msm_dp_catalog_ctrl_set_psr(ctrl->catalog, false);
return;
}
- dp_ctrl_push_idle(dp_ctrl);
- dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_ctrl_push_idle(msm_dp_ctrl);
+ msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
- dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, false);
+ msm_dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, false);
} else {
- dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, true);
+ msm_dp_catalog_ctrl_psr_mainlink_enable(ctrl->catalog, true);
- dp_catalog_ctrl_set_psr(ctrl->catalog, false);
- dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
- dp_ctrl_wait4video_ready(ctrl);
- dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+ msm_dp_catalog_ctrl_set_psr(ctrl->catalog, false);
+ msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+ msm_dp_ctrl_wait4video_ready(ctrl);
+ msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
}
}
-void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl)
+void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
struct phy *phy;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- dp_catalog_ctrl_phy_reset(ctrl->catalog);
+ msm_dp_catalog_ctrl_phy_reset(ctrl->catalog);
phy_init(phy);
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
}
-void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl)
+void msm_dp_ctrl_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
struct phy *phy;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- dp_catalog_ctrl_phy_reset(ctrl->catalog);
+ msm_dp_catalog_ctrl_phy_reset(ctrl->catalog);
phy_exit(phy);
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
}
-static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_reinitialize_mainlink(struct msm_dp_ctrl_private *ctrl)
{
struct phy *phy = ctrl->phy;
int ret = 0;
- dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
ctrl->phy_opts.dp.lanes = ctrl->link->link_params.num_lanes;
phy_configure(phy, &ctrl->phy_opts);
/*
@@ -1583,13 +1582,13 @@ static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl)
*/
dev_pm_opp_set_rate(ctrl->dev, 0);
- dp_ctrl_link_clk_disable(&ctrl->dp_ctrl);
+ msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
phy_power_off(phy);
/* hw recommended delay before re-enabling clocks */
msleep(20);
- ret = dp_ctrl_enable_mainlink_clocks(ctrl);
+ ret = msm_dp_ctrl_enable_mainlink_clocks(ctrl);
if (ret) {
DRM_ERROR("Failed to enable mainlink clks. ret=%d\n", ret);
return ret;
@@ -1598,18 +1597,18 @@ static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl)
return ret;
}
-static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_deinitialize_mainlink(struct msm_dp_ctrl_private *ctrl)
{
struct phy *phy;
phy = ctrl->phy;
- dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
- dp_catalog_ctrl_reset(ctrl->catalog);
+ msm_dp_catalog_ctrl_reset(ctrl->catalog);
dev_pm_opp_set_rate(ctrl->dev, 0);
- dp_ctrl_link_clk_disable(&ctrl->dp_ctrl);
+ msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
phy_power_off(phy);
@@ -1622,30 +1621,30 @@ static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
return 0;
}
-static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_link_maintenance(struct msm_dp_ctrl_private *ctrl)
{
int ret = 0;
int training_step = DP_TRAINING_NONE;
- dp_ctrl_push_idle(&ctrl->dp_ctrl);
+ msm_dp_ctrl_push_idle(&ctrl->msm_dp_ctrl);
ctrl->link->phy_params.p_level = 0;
ctrl->link->phy_params.v_level = 0;
- ret = dp_ctrl_setup_main_link(ctrl, &training_step);
+ ret = msm_dp_ctrl_setup_main_link(ctrl, &training_step);
if (ret)
goto end;
- dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl);
- dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+ msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
- ret = dp_ctrl_wait4video_ready(ctrl);
+ ret = msm_dp_ctrl_wait4video_ready(ctrl);
end:
return ret;
}
-static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
+static bool msm_dp_ctrl_send_phy_test_pattern(struct msm_dp_ctrl_private *ctrl)
{
bool success = false;
u32 pattern_sent = 0x0;
@@ -1653,17 +1652,17 @@ static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
drm_dbg_dp(ctrl->drm_dev, "request: 0x%x\n", pattern_requested);
- if (dp_ctrl_set_vx_px(ctrl,
+ if (msm_dp_ctrl_set_vx_px(ctrl,
ctrl->link->phy_params.v_level,
ctrl->link->phy_params.p_level)) {
DRM_ERROR("Failed to set v/p levels\n");
return false;
}
- dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested);
- dp_ctrl_update_vx_px(ctrl);
- dp_link_send_test_response(ctrl->link);
+ msm_dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested);
+ msm_dp_ctrl_update_vx_px(ctrl);
+ msm_dp_link_send_test_response(ctrl->link);
- pattern_sent = dp_catalog_ctrl_read_phy_pattern(ctrl->catalog);
+ pattern_sent = msm_dp_catalog_ctrl_read_phy_pattern(ctrl->catalog);
switch (pattern_sent) {
case MR_LINK_TRAINING1:
@@ -1697,7 +1696,7 @@ static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
return success;
}
-static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_process_phy_test_request(struct msm_dp_ctrl_private *ctrl)
{
int ret;
unsigned long pixel_rate;
@@ -1713,15 +1712,15 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
* running. Add the global reset just before disabling the
* link clocks and core clocks.
*/
- dp_ctrl_off(&ctrl->dp_ctrl);
+ msm_dp_ctrl_off(&ctrl->msm_dp_ctrl);
- ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
+ ret = msm_dp_ctrl_on_link(&ctrl->msm_dp_ctrl);
if (ret) {
DRM_ERROR("failed to enable DP link controller\n");
return ret;
}
- pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+ pixel_rate = ctrl->panel->msm_dp_mode.drm_mode.clock;
ret = clk_set_rate(ctrl->pixel_clk, pixel_rate * 1000);
if (ret) {
DRM_ERROR("Failed to set pixel clock rate. ret=%d\n", ret);
@@ -1739,49 +1738,49 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
ctrl->stream_clks_on = true;
}
- dp_ctrl_send_phy_test_pattern(ctrl);
+ msm_dp_ctrl_send_phy_test_pattern(ctrl);
return 0;
}
-void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
+void msm_dp_ctrl_handle_sink_request(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
u32 sink_request = 0x0;
- if (!dp_ctrl) {
+ if (!msm_dp_ctrl) {
DRM_ERROR("invalid input\n");
return;
}
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
sink_request = ctrl->link->sink_request;
if (sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
drm_dbg_dp(ctrl->drm_dev, "PHY_TEST_PATTERN request\n");
- if (dp_ctrl_process_phy_test_request(ctrl)) {
+ if (msm_dp_ctrl_process_phy_test_request(ctrl)) {
DRM_ERROR("process phy_test_req failed\n");
return;
}
}
if (sink_request & DP_LINK_STATUS_UPDATED) {
- if (dp_ctrl_link_maintenance(ctrl)) {
+ if (msm_dp_ctrl_link_maintenance(ctrl)) {
DRM_ERROR("LM failed: TEST_LINK_TRAINING\n");
return;
}
}
if (sink_request & DP_TEST_LINK_TRAINING) {
- dp_link_send_test_response(ctrl->link);
- if (dp_ctrl_link_maintenance(ctrl)) {
+ msm_dp_link_send_test_response(ctrl->link);
+ if (msm_dp_ctrl_link_maintenance(ctrl)) {
DRM_ERROR("LM failed: TEST_LINK_TRAINING\n");
return;
}
}
}
-static bool dp_ctrl_clock_recovery_any_ok(
+static bool msm_dp_ctrl_clock_recovery_any_ok(
const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
@@ -1800,20 +1799,20 @@ static bool dp_ctrl_clock_recovery_any_ok(
return drm_dp_clock_recovery_ok(link_status, reduced_cnt);
}
-static bool dp_ctrl_channel_eq_ok(struct dp_ctrl_private *ctrl)
+static bool msm_dp_ctrl_channel_eq_ok(struct msm_dp_ctrl_private *ctrl)
{
u8 link_status[DP_LINK_STATUS_SIZE];
int num_lanes = ctrl->link->link_params.num_lanes;
- dp_ctrl_read_link_status(ctrl, link_status);
+ msm_dp_ctrl_read_link_status(ctrl, link_status);
return drm_dp_channel_eq_ok(link_status, num_lanes);
}
-int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
+int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl)
{
int rc = 0;
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
u32 rate;
int link_train_max_retries = 5;
u32 const phy_cts_pixel_clk_khz = 148500;
@@ -1821,15 +1820,15 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
unsigned int training_step;
unsigned long pixel_rate;
- if (!dp_ctrl)
+ if (!msm_dp_ctrl)
return -EINVAL;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
rate = ctrl->panel->link_info.rate;
- pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+ pixel_rate = ctrl->panel->msm_dp_mode.drm_mode.clock;
- dp_ctrl_core_clk_enable(&ctrl->dp_ctrl);
+ msm_dp_ctrl_core_clk_enable(&ctrl->msm_dp_ctrl);
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
drm_dbg_dp(ctrl->drm_dev,
@@ -1840,7 +1839,7 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
ctrl->link->link_params.rate = rate;
ctrl->link->link_params.num_lanes =
ctrl->panel->link_info.num_lanes;
- if (ctrl->panel->dp_mode.out_fmt_is_yuv_420)
+ if (ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420)
pixel_rate >>= 1;
}
@@ -1848,32 +1847,32 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
ctrl->link->link_params.rate, ctrl->link->link_params.num_lanes,
pixel_rate);
- rc = dp_ctrl_enable_mainlink_clocks(ctrl);
+ rc = msm_dp_ctrl_enable_mainlink_clocks(ctrl);
if (rc)
return rc;
while (--link_train_max_retries) {
training_step = DP_TRAINING_NONE;
- rc = dp_ctrl_setup_main_link(ctrl, &training_step);
+ rc = msm_dp_ctrl_setup_main_link(ctrl, &training_step);
if (rc == 0) {
/* training completed successfully */
break;
} else if (training_step == DP_TRAINING_1) {
/* link train_1 failed */
- if (!dp_catalog_link_is_connected(ctrl->catalog))
+ if (!msm_dp_catalog_link_is_connected(ctrl->catalog))
break;
- dp_ctrl_read_link_status(ctrl, link_status);
+ msm_dp_ctrl_read_link_status(ctrl, link_status);
- rc = dp_ctrl_link_rate_down_shift(ctrl);
+ rc = msm_dp_ctrl_link_rate_down_shift(ctrl);
if (rc < 0) { /* already in RBR = 1.6G */
- if (dp_ctrl_clock_recovery_any_ok(link_status,
+ if (msm_dp_ctrl_clock_recovery_any_ok(link_status,
ctrl->link->link_params.num_lanes)) {
/*
* some lanes are ready,
* reduce lane number
*/
- rc = dp_ctrl_link_lane_down_shift(ctrl);
+ rc = msm_dp_ctrl_link_lane_down_shift(ctrl);
if (rc < 0) { /* lane == 1 already */
/* end with failure */
break;
@@ -1885,16 +1884,16 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
}
} else if (training_step == DP_TRAINING_2) {
/* link train_2 failed */
- if (!dp_catalog_link_is_connected(ctrl->catalog))
+ if (!msm_dp_catalog_link_is_connected(ctrl->catalog))
break;
- dp_ctrl_read_link_status(ctrl, link_status);
+ msm_dp_ctrl_read_link_status(ctrl, link_status);
if (!drm_dp_clock_recovery_ok(link_status,
ctrl->link->link_params.num_lanes))
- rc = dp_ctrl_link_rate_down_shift(ctrl);
+ rc = msm_dp_ctrl_link_rate_down_shift(ctrl);
else
- rc = dp_ctrl_link_lane_down_shift(ctrl);
+ rc = msm_dp_ctrl_link_lane_down_shift(ctrl);
if (rc < 0) {
/* end with failure */
@@ -1902,10 +1901,10 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
}
/* stop link training before start re training */
- dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl);
}
- rc = dp_ctrl_reinitialize_mainlink(ctrl);
+ rc = msm_dp_ctrl_reinitialize_mainlink(ctrl);
if (rc) {
DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", rc);
break;
@@ -1926,38 +1925,38 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
* link training failed
* end txing train pattern here
*/
- dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl);
- dp_ctrl_deinitialize_mainlink(ctrl);
+ msm_dp_ctrl_deinitialize_mainlink(ctrl);
rc = -ECONNRESET;
}
return rc;
}
-static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
+static int msm_dp_ctrl_link_retrain(struct msm_dp_ctrl_private *ctrl)
{
int training_step = DP_TRAINING_NONE;
- return dp_ctrl_setup_main_link(ctrl, &training_step);
+ return msm_dp_ctrl_setup_main_link(ctrl, &training_step);
}
-int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
+int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train)
{
int ret = 0;
bool mainlink_ready = false;
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
unsigned long pixel_rate;
unsigned long pixel_rate_orig;
- if (!dp_ctrl)
+ if (!msm_dp_ctrl)
return -EINVAL;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
- pixel_rate = pixel_rate_orig = ctrl->panel->dp_mode.drm_mode.clock;
+ pixel_rate = pixel_rate_orig = ctrl->panel->msm_dp_mode.drm_mode.clock;
- if (dp_ctrl->wide_bus_en || ctrl->panel->dp_mode.out_fmt_is_yuv_420)
+ if (msm_dp_ctrl->wide_bus_en || ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420)
pixel_rate >>= 1;
drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n",
@@ -1969,7 +1968,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
ctrl->core_clks_on, ctrl->link_clks_on, ctrl->stream_clks_on);
if (!ctrl->link_clks_on) { /* link clk is off */
- ret = dp_ctrl_enable_mainlink_clocks(ctrl);
+ ret = msm_dp_ctrl_enable_mainlink_clocks(ctrl);
if (ret) {
DRM_ERROR("Failed to start link clocks. ret=%d\n", ret);
goto end;
@@ -1993,11 +1992,11 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
ctrl->stream_clks_on = true;
}
- if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl))
- dp_ctrl_link_retrain(ctrl);
+ if (force_link_train || !msm_dp_ctrl_channel_eq_ok(ctrl))
+ msm_dp_ctrl_link_retrain(ctrl);
/* stop txing train pattern to end link training */
- dp_ctrl_clear_training_pattern(ctrl);
+ msm_dp_ctrl_clear_training_pattern(ctrl);
/*
* Set up transfer unit values and set controller state to send
@@ -2005,22 +2004,22 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
*/
reinit_completion(&ctrl->video_comp);
- dp_ctrl_configure_source_params(ctrl);
+ msm_dp_ctrl_configure_source_params(ctrl);
- dp_catalog_ctrl_config_msa(ctrl->catalog,
+ msm_dp_catalog_ctrl_config_msa(ctrl->catalog,
ctrl->link->link_params.rate,
pixel_rate_orig,
- ctrl->panel->dp_mode.out_fmt_is_yuv_420);
+ ctrl->panel->msm_dp_mode.out_fmt_is_yuv_420);
- dp_ctrl_setup_tr_unit(ctrl);
+ msm_dp_ctrl_setup_tr_unit(ctrl);
- dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+ msm_dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
- ret = dp_ctrl_wait4video_ready(ctrl);
+ ret = msm_dp_ctrl_wait4video_ready(ctrl);
if (ret)
return ret;
- mainlink_ready = dp_catalog_ctrl_mainlink_ready(ctrl->catalog);
+ mainlink_ready = msm_dp_catalog_ctrl_mainlink_ready(ctrl->catalog);
drm_dbg_dp(ctrl->drm_dev,
"mainlink %s\n", mainlink_ready ? "READY" : "NOT READY");
@@ -2028,20 +2027,20 @@ end:
return ret;
}
-void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
+void msm_dp_ctrl_off_link_stream(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
struct phy *phy;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- dp_catalog_panel_disable_vsc_sdp(ctrl->catalog);
+ msm_dp_catalog_panel_disable_vsc_sdp(ctrl->catalog);
/* set dongle to D3 (power off) mode */
- dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true);
+ msm_dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true);
- dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
if (ctrl->stream_clks_on) {
clk_disable_unprepare(ctrl->pixel_clk);
@@ -2049,7 +2048,7 @@ void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
}
dev_pm_opp_set_rate(ctrl->dev, 0);
- dp_ctrl_link_clk_disable(&ctrl->dp_ctrl);
+ msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
phy_power_off(phy);
@@ -2061,17 +2060,18 @@ void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
phy, phy->init_count, phy->power_count);
}
-void dp_ctrl_off_link(struct dp_ctrl *dp_ctrl)
+void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
struct phy *phy;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
- dp_ctrl_link_clk_disable(&ctrl->dp_ctrl);
+ dev_pm_opp_set_rate(ctrl->dev, 0);
+ msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
DRM_DEBUG_DP("Before, phy=%p init_count=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
@@ -2082,19 +2082,19 @@ void dp_ctrl_off_link(struct dp_ctrl *dp_ctrl)
phy, phy->init_count, phy->power_count);
}
-void dp_ctrl_off(struct dp_ctrl *dp_ctrl)
+void msm_dp_ctrl_off(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
struct phy *phy;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
phy = ctrl->phy;
- dp_catalog_panel_disable_vsc_sdp(ctrl->catalog);
+ msm_dp_catalog_panel_disable_vsc_sdp(ctrl->catalog);
- dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
- dp_catalog_ctrl_reset(ctrl->catalog);
+ msm_dp_catalog_ctrl_reset(ctrl->catalog);
if (ctrl->stream_clks_on) {
clk_disable_unprepare(ctrl->pixel_clk);
@@ -2102,26 +2102,26 @@ void dp_ctrl_off(struct dp_ctrl *dp_ctrl)
}
dev_pm_opp_set_rate(ctrl->dev, 0);
- dp_ctrl_link_clk_disable(&ctrl->dp_ctrl);
+ msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
phy_power_off(phy);
drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
}
-irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
+irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
u32 isr;
irqreturn_t ret = IRQ_NONE;
- if (!dp_ctrl)
+ if (!msm_dp_ctrl)
return IRQ_NONE;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
if (ctrl->panel->psr_cap.version) {
- isr = dp_catalog_ctrl_read_psr_interrupt_status(ctrl->catalog);
+ isr = msm_dp_catalog_ctrl_read_psr_interrupt_status(ctrl->catalog);
if (isr)
complete(&ctrl->psr_op_comp);
@@ -2136,7 +2136,7 @@ irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
drm_dbg_dp(ctrl->drm_dev, "PSR frame capture done\n");
}
- isr = dp_catalog_ctrl_get_interrupt(ctrl->catalog);
+ isr = msm_dp_catalog_ctrl_get_interrupt(ctrl->catalog);
if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) {
@@ -2164,13 +2164,13 @@ static const char *ctrl_clks[] = {
"ctrl_link_iface",
};
-static int dp_ctrl_clk_init(struct dp_ctrl *dp_ctrl)
+static int msm_dp_ctrl_clk_init(struct msm_dp_ctrl *msm_dp_ctrl)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
struct device *dev;
int i, rc;
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ ctrl = container_of(msm_dp_ctrl, struct msm_dp_ctrl_private, msm_dp_ctrl);
dev = ctrl->dev;
ctrl->num_core_clks = ARRAY_SIZE(core_clks);
@@ -2204,12 +2204,12 @@ static int dp_ctrl_clk_init(struct dp_ctrl *dp_ctrl)
return 0;
}
-struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
- struct dp_panel *panel, struct drm_dp_aux *aux,
- struct dp_catalog *catalog,
+struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link,
+ struct msm_dp_panel *panel, struct drm_dp_aux *aux,
+ struct msm_dp_catalog *catalog,
struct phy *phy)
{
- struct dp_ctrl_private *ctrl;
+ struct msm_dp_ctrl_private *ctrl;
int ret;
if (!dev || !panel || !aux ||
@@ -2228,7 +2228,7 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
if (ret) {
dev_err(dev, "invalid DP OPP table in device tree\n");
/* caller do PTR_ERR(opp_table) */
- return (struct dp_ctrl *)ERR_PTR(ret);
+ return (struct msm_dp_ctrl *)ERR_PTR(ret);
}
/* OPP table is optional */
@@ -2248,11 +2248,11 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
ctrl->dev = dev;
ctrl->phy = phy;
- ret = dp_ctrl_clk_init(&ctrl->dp_ctrl);
+ ret = msm_dp_ctrl_clk_init(&ctrl->msm_dp_ctrl);
if (ret) {
dev_err(dev, "failed to init clocks\n");
return ERR_PTR(ret);
}
- return &ctrl->dp_ctrl;
+ return &ctrl->msm_dp_ctrl;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index ffcbd9a25748..b7abfedbf574 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -11,34 +11,34 @@
#include "dp_link.h"
#include "dp_catalog.h"
-struct dp_ctrl {
+struct msm_dp_ctrl {
bool wide_bus_en;
};
struct phy;
-int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
-int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train);
-void dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
-void dp_ctrl_off_link(struct dp_ctrl *dp_ctrl);
-void dp_ctrl_off(struct dp_ctrl *dp_ctrl);
-void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl);
-irqreturn_t dp_ctrl_isr(struct dp_ctrl *dp_ctrl);
-void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl);
-struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
- struct dp_panel *panel, struct drm_dp_aux *aux,
- struct dp_catalog *catalog,
+int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl);
+int msm_dp_ctrl_on_stream(struct msm_dp_ctrl *msm_dp_ctrl, bool force_link_train);
+void msm_dp_ctrl_off_link_stream(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_off(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_push_idle(struct msm_dp_ctrl *msm_dp_ctrl);
+irqreturn_t msm_dp_ctrl_isr(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_handle_sink_request(struct msm_dp_ctrl *msm_dp_ctrl);
+struct msm_dp_ctrl *msm_dp_ctrl_get(struct device *dev, struct msm_dp_link *link,
+ struct msm_dp_panel *panel, struct drm_dp_aux *aux,
+ struct msm_dp_catalog *catalog,
struct phy *phy);
-void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable);
-void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl);
-void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl);
-void dp_ctrl_irq_phy_exit(struct dp_ctrl *dp_ctrl);
+void msm_dp_ctrl_reset_irq_ctrl(struct msm_dp_ctrl *msm_dp_ctrl, bool enable);
+void msm_dp_ctrl_phy_init(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_irq_phy_exit(struct msm_dp_ctrl *msm_dp_ctrl);
-void dp_ctrl_set_psr(struct dp_ctrl *dp_ctrl, bool enable);
-void dp_ctrl_config_psr(struct dp_ctrl *dp_ctrl);
+void msm_dp_ctrl_set_psr(struct msm_dp_ctrl *msm_dp_ctrl, bool enable);
+void msm_dp_ctrl_config_psr(struct msm_dp_ctrl *msm_dp_ctrl);
-int dp_ctrl_core_clk_enable(struct dp_ctrl *dp_ctrl);
-void dp_ctrl_core_clk_disable(struct dp_ctrl *dp_ctrl);
+int msm_dp_ctrl_core_clk_enable(struct msm_dp_ctrl *msm_dp_ctrl);
+void msm_dp_ctrl_core_clk_disable(struct msm_dp_ctrl *msm_dp_ctrl);
#endif /* _DP_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index b8611f6d2296..22fd946ee201 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -17,15 +17,15 @@
#define DEBUG_NAME "msm_dp"
-struct dp_debug_private {
- struct dp_link *link;
- struct dp_panel *panel;
+struct msm_dp_debug_private {
+ struct msm_dp_link *link;
+ struct msm_dp_panel *panel;
struct drm_connector *connector;
};
-static int dp_debug_show(struct seq_file *seq, void *p)
+static int msm_dp_debug_show(struct seq_file *seq, void *p)
{
- struct dp_debug_private *debug = seq->private;
+ struct msm_dp_debug_private *debug = seq->private;
u64 lclk = 0;
u32 link_params_rate;
const struct drm_display_mode *drm_mode;
@@ -33,7 +33,7 @@ static int dp_debug_show(struct seq_file *seq, void *p)
if (!debug)
return -ENODEV;
- drm_mode = &debug->panel->dp_mode.drm_mode;
+ drm_mode = &debug->panel->msm_dp_mode.drm_mode;
seq_printf(seq, "\tname = %s\n", DEBUG_NAME);
seq_printf(seq, "\tdrm_dp_link\n\t\trate = %u\n",
@@ -55,8 +55,8 @@ static int dp_debug_show(struct seq_file *seq, void *p)
drm_mode->hsync_end - drm_mode->hsync_start,
drm_mode->vsync_end - drm_mode->vsync_start);
seq_printf(seq, "\t\tactive_low = %dx%d\n",
- debug->panel->dp_mode.h_active_low,
- debug->panel->dp_mode.v_active_low);
+ debug->panel->msm_dp_mode.h_active_low,
+ debug->panel->msm_dp_mode.v_active_low);
seq_printf(seq, "\t\th_skew = %d\n",
drm_mode->hskew);
seq_printf(seq, "\t\trefresh rate = %d\n",
@@ -64,7 +64,7 @@ static int dp_debug_show(struct seq_file *seq, void *p)
seq_printf(seq, "\t\tpixel clock khz = %d\n",
drm_mode->clock);
seq_printf(seq, "\t\tbpp = %d\n",
- debug->panel->dp_mode.bpp);
+ debug->panel->msm_dp_mode.bpp);
/* Link Information */
seq_printf(seq, "\tdp_link:\n\t\ttest_requested = %d\n",
@@ -83,11 +83,11 @@ static int dp_debug_show(struct seq_file *seq, void *p)
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(dp_debug);
+DEFINE_SHOW_ATTRIBUTE(msm_dp_debug);
-static int dp_test_data_show(struct seq_file *m, void *data)
+static int msm_dp_test_data_show(struct seq_file *m, void *data)
{
- const struct dp_debug_private *debug = m->private;
+ const struct msm_dp_debug_private *debug = m->private;
const struct drm_connector *connector = debug->connector;
u32 bpc;
@@ -98,18 +98,18 @@ static int dp_test_data_show(struct seq_file *m, void *data)
seq_printf(m, "vdisplay: %d\n",
debug->link->test_video.test_v_height);
seq_printf(m, "bpc: %u\n",
- dp_link_bit_depth_to_bpp(bpc) / 3);
+ msm_dp_link_bit_depth_to_bpp(bpc) / 3);
} else {
seq_puts(m, "0");
}
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(dp_test_data);
+DEFINE_SHOW_ATTRIBUTE(msm_dp_test_data);
-static int dp_test_type_show(struct seq_file *m, void *data)
+static int msm_dp_test_type_show(struct seq_file *m, void *data)
{
- const struct dp_debug_private *debug = m->private;
+ const struct msm_dp_debug_private *debug = m->private;
const struct drm_connector *connector = debug->connector;
if (connector->status == connector_status_connected)
@@ -119,15 +119,15 @@ static int dp_test_type_show(struct seq_file *m, void *data)
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(dp_test_type);
+DEFINE_SHOW_ATTRIBUTE(msm_dp_test_type);
-static ssize_t dp_test_active_write(struct file *file,
+static ssize_t msm_dp_test_active_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
char *input_buffer;
int status = 0;
- const struct dp_debug_private *debug;
+ const struct msm_dp_debug_private *debug;
const struct drm_connector *connector;
int val = 0;
@@ -164,9 +164,9 @@ static ssize_t dp_test_active_write(struct file *file,
return len;
}
-static int dp_test_active_show(struct seq_file *m, void *data)
+static int msm_dp_test_active_show(struct seq_file *m, void *data)
{
- struct dp_debug_private *debug = m->private;
+ struct msm_dp_debug_private *debug = m->private;
struct drm_connector *connector = debug->connector;
if (connector->status == connector_status_connected) {
@@ -181,28 +181,28 @@ static int dp_test_active_show(struct seq_file *m, void *data)
return 0;
}
-static int dp_test_active_open(struct inode *inode,
+static int msm_dp_test_active_open(struct inode *inode,
struct file *file)
{
- return single_open(file, dp_test_active_show,
+ return single_open(file, msm_dp_test_active_show,
inode->i_private);
}
static const struct file_operations test_active_fops = {
.owner = THIS_MODULE,
- .open = dp_test_active_open,
+ .open = msm_dp_test_active_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
- .write = dp_test_active_write
+ .write = msm_dp_test_active_write
};
-int dp_debug_init(struct device *dev, struct dp_panel *panel,
- struct dp_link *link,
+int msm_dp_debug_init(struct device *dev, struct msm_dp_panel *panel,
+ struct msm_dp_link *link,
struct drm_connector *connector,
struct dentry *root, bool is_edp)
{
- struct dp_debug_private *debug;
+ struct msm_dp_debug_private *debug;
if (!dev || !panel || !link) {
DRM_ERROR("invalid input\n");
@@ -217,20 +217,20 @@ int dp_debug_init(struct device *dev, struct dp_panel *panel,
debug->panel = panel;
debugfs_create_file("dp_debug", 0444, root,
- debug, &dp_debug_fops);
+ debug, &msm_dp_debug_fops);
if (!is_edp) {
- debugfs_create_file("msm_dp_test_active", 0444,
+ debugfs_create_file("dp_test_active", 0444,
root,
debug, &test_active_fops);
- debugfs_create_file("msm_dp_test_data", 0444,
+ debugfs_create_file("dp_test_data", 0444,
root,
- debug, &dp_test_data_fops);
+ debug, &msm_dp_test_data_fops);
- debugfs_create_file("msm_dp_test_type", 0444,
+ debugfs_create_file("dp_test_type", 0444,
root,
- debug, &dp_test_type_fops);
+ debug, &msm_dp_test_type_fops);
}
return 0;
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index 7e1aa892fc09..6dc0ff4f0f65 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -12,7 +12,7 @@
#if defined(CONFIG_DEBUG_FS)
/**
- * dp_debug_get() - configure and get the DisplayPlot debug module data
+ * msm_dp_debug_get() - configure and get the DisplayPlot debug module data
*
* @dev: device instance of the caller
* @panel: instance of panel module
@@ -25,8 +25,8 @@
* This function sets up the debug module and provides a way
* for debugfs input to be communicated with existing modules
*/
-int dp_debug_init(struct device *dev, struct dp_panel *panel,
- struct dp_link *link,
+int msm_dp_debug_init(struct device *dev, struct msm_dp_panel *panel,
+ struct msm_dp_link *link,
struct drm_connector *connector,
struct dentry *root,
bool is_edp);
@@ -34,8 +34,8 @@ int dp_debug_init(struct device *dev, struct dp_panel *panel,
#else
static inline
-int dp_debug_init(struct device *dev, struct dp_panel *panel,
- struct dp_link *link,
+int msm_dp_debug_init(struct device *dev, struct msm_dp_panel *panel,
+ struct msm_dp_link *link,
struct drm_connector *connector,
struct dentry *root,
bool is_edp)
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index e1228fb093ee..24dd37f1682b 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -67,13 +67,13 @@ enum {
#define WAIT_FOR_RESUME_TIMEOUT_JIFFIES (HZ / 2)
-struct dp_event {
+struct msm_dp_event {
u32 event_id;
u32 data;
u32 delay;
};
-struct dp_display_private {
+struct msm_dp_display_private {
int irq;
unsigned int id;
@@ -85,14 +85,14 @@ struct dp_display_private {
struct drm_device *drm_dev;
- struct dp_catalog *catalog;
+ struct msm_dp_catalog *catalog;
struct drm_dp_aux *aux;
- struct dp_link *link;
- struct dp_panel *panel;
- struct dp_ctrl *ctrl;
+ struct msm_dp_link *link;
+ struct msm_dp_panel *panel;
+ struct msm_dp_ctrl *ctrl;
- struct dp_display_mode dp_mode;
- struct msm_dp dp_display;
+ struct msm_dp_display_mode msm_dp_mode;
+ struct msm_dp msm_dp_display;
/* wait for audio signaling */
struct completion audio_comp;
@@ -104,12 +104,12 @@ struct dp_display_private {
u32 event_pndx;
u32 event_gndx;
struct task_struct *ev_tsk;
- struct dp_event event_list[DP_EVENT_Q_MAX];
+ struct msm_dp_event event_list[DP_EVENT_Q_MAX];
spinlock_t event_lock;
bool wide_bus_supported;
- struct dp_audio *audio;
+ struct msm_dp_audio *audio;
};
struct msm_dp_desc {
@@ -118,25 +118,33 @@ struct msm_dp_desc {
bool wide_bus_supported;
};
-static const struct msm_dp_desc sc7180_dp_descs[] = {
+static const struct msm_dp_desc msm_dp_desc_sa8775p[] = {
+ { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
+ { .io_start = 0x0af5c000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
+ { .io_start = 0x22154000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true },
+ { .io_start = 0x2215c000, .id = MSM_DP_CONTROLLER_3, .wide_bus_supported = true },
+ {}
+};
+
+static const struct msm_dp_desc msm_dp_desc_sc7180[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{}
};
-static const struct msm_dp_desc sc7280_dp_descs[] = {
+static const struct msm_dp_desc msm_dp_desc_sc7280[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{ .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
{}
};
-static const struct msm_dp_desc sc8180x_dp_descs[] = {
+static const struct msm_dp_desc msm_dp_desc_sc8180x[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{ .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
{ .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true },
{}
};
-static const struct msm_dp_desc sc8280xp_dp_descs[] = {
+static const struct msm_dp_desc msm_dp_desc_sc8280xp[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{ .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
{ .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true },
@@ -148,12 +156,12 @@ static const struct msm_dp_desc sc8280xp_dp_descs[] = {
{}
};
-static const struct msm_dp_desc sm8650_dp_descs[] = {
+static const struct msm_dp_desc msm_dp_desc_sm8650[] = {
{ .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{}
};
-static const struct msm_dp_desc x1e80100_dp_descs[] = {
+static const struct msm_dp_desc msm_dp_desc_x1e80100[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{ .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
{ .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true },
@@ -161,70 +169,71 @@ static const struct msm_dp_desc x1e80100_dp_descs[] = {
{}
};
-static const struct of_device_id dp_dt_match[] = {
- { .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_descs },
- { .compatible = "qcom,sc7280-dp", .data = &sc7280_dp_descs },
- { .compatible = "qcom,sc7280-edp", .data = &sc7280_dp_descs },
- { .compatible = "qcom,sc8180x-dp", .data = &sc8180x_dp_descs },
- { .compatible = "qcom,sc8180x-edp", .data = &sc8180x_dp_descs },
- { .compatible = "qcom,sc8280xp-dp", .data = &sc8280xp_dp_descs },
- { .compatible = "qcom,sc8280xp-edp", .data = &sc8280xp_dp_descs },
- { .compatible = "qcom,sdm845-dp", .data = &sc7180_dp_descs },
- { .compatible = "qcom,sm8350-dp", .data = &sc7180_dp_descs },
- { .compatible = "qcom,sm8650-dp", .data = &sm8650_dp_descs },
- { .compatible = "qcom,x1e80100-dp", .data = &x1e80100_dp_descs },
+static const struct of_device_id msm_dp_dt_match[] = {
+ { .compatible = "qcom,sa8775p-dp", .data = &msm_dp_desc_sa8775p },
+ { .compatible = "qcom,sc7180-dp", .data = &msm_dp_desc_sc7180 },
+ { .compatible = "qcom,sc7280-dp", .data = &msm_dp_desc_sc7280 },
+ { .compatible = "qcom,sc7280-edp", .data = &msm_dp_desc_sc7280 },
+ { .compatible = "qcom,sc8180x-dp", .data = &msm_dp_desc_sc8180x },
+ { .compatible = "qcom,sc8180x-edp", .data = &msm_dp_desc_sc8180x },
+ { .compatible = "qcom,sc8280xp-dp", .data = &msm_dp_desc_sc8280xp },
+ { .compatible = "qcom,sc8280xp-edp", .data = &msm_dp_desc_sc8280xp },
+ { .compatible = "qcom,sdm845-dp", .data = &msm_dp_desc_sc7180 },
+ { .compatible = "qcom,sm8350-dp", .data = &msm_dp_desc_sc7180 },
+ { .compatible = "qcom,sm8650-dp", .data = &msm_dp_desc_sm8650 },
+ { .compatible = "qcom,x1e80100-dp", .data = &msm_dp_desc_x1e80100 },
{}
};
-static struct dp_display_private *dev_get_dp_display_private(struct device *dev)
+static struct msm_dp_display_private *dev_get_dp_display_private(struct device *dev)
{
struct msm_dp *dp = dev_get_drvdata(dev);
- return container_of(dp, struct dp_display_private, dp_display);
+ return container_of(dp, struct msm_dp_display_private, msm_dp_display);
}
-static int dp_add_event(struct dp_display_private *dp_priv, u32 event,
+static int msm_dp_add_event(struct msm_dp_display_private *msm_dp_priv, u32 event,
u32 data, u32 delay)
{
unsigned long flag;
- struct dp_event *todo;
+ struct msm_dp_event *todo;
int pndx;
- spin_lock_irqsave(&dp_priv->event_lock, flag);
- pndx = dp_priv->event_pndx + 1;
+ spin_lock_irqsave(&msm_dp_priv->event_lock, flag);
+ pndx = msm_dp_priv->event_pndx + 1;
pndx %= DP_EVENT_Q_MAX;
- if (pndx == dp_priv->event_gndx) {
+ if (pndx == msm_dp_priv->event_gndx) {
pr_err("event_q is full: pndx=%d gndx=%d\n",
- dp_priv->event_pndx, dp_priv->event_gndx);
- spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ msm_dp_priv->event_pndx, msm_dp_priv->event_gndx);
+ spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
return -EPERM;
}
- todo = &dp_priv->event_list[dp_priv->event_pndx++];
- dp_priv->event_pndx %= DP_EVENT_Q_MAX;
+ todo = &msm_dp_priv->event_list[msm_dp_priv->event_pndx++];
+ msm_dp_priv->event_pndx %= DP_EVENT_Q_MAX;
todo->event_id = event;
todo->data = data;
todo->delay = delay;
- wake_up(&dp_priv->event_q);
- spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ wake_up(&msm_dp_priv->event_q);
+ spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
return 0;
}
-static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
+static int msm_dp_del_event(struct msm_dp_display_private *msm_dp_priv, u32 event)
{
unsigned long flag;
- struct dp_event *todo;
+ struct msm_dp_event *todo;
u32 gndx;
- spin_lock_irqsave(&dp_priv->event_lock, flag);
- if (dp_priv->event_pndx == dp_priv->event_gndx) {
- spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ spin_lock_irqsave(&msm_dp_priv->event_lock, flag);
+ if (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) {
+ spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
return -ENOENT;
}
- gndx = dp_priv->event_gndx;
- while (dp_priv->event_pndx != gndx) {
- todo = &dp_priv->event_list[gndx];
+ gndx = msm_dp_priv->event_gndx;
+ while (msm_dp_priv->event_pndx != gndx) {
+ todo = &msm_dp_priv->event_list[gndx];
if (todo->event_id == event) {
todo->event_id = EV_NO_EVENT; /* deleted */
todo->delay = 0;
@@ -232,60 +241,60 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
gndx++;
gndx %= DP_EVENT_Q_MAX;
}
- spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
return 0;
}
-void dp_display_signal_audio_start(struct msm_dp *dp_display)
+void msm_dp_display_signal_audio_start(struct msm_dp *msm_dp_display)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
- dp = container_of(dp_display, struct dp_display_private, dp_display);
+ dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
reinit_completion(&dp->audio_comp);
}
-void dp_display_signal_audio_complete(struct msm_dp *dp_display)
+void msm_dp_display_signal_audio_complete(struct msm_dp *msm_dp_display)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
- dp = container_of(dp_display, struct dp_display_private, dp_display);
+ dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
complete_all(&dp->audio_comp);
}
-static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv);
+static int msm_dp_hpd_event_thread_start(struct msm_dp_display_private *msm_dp_priv);
-static int dp_display_bind(struct device *dev, struct device *master,
+static int msm_dp_display_bind(struct device *dev, struct device *master,
void *data)
{
int rc = 0;
- struct dp_display_private *dp = dev_get_dp_display_private(dev);
+ struct msm_dp_display_private *dp = dev_get_dp_display_private(dev);
struct msm_drm_private *priv = dev_get_drvdata(master);
struct drm_device *drm = priv->dev;
- dp->dp_display.drm_dev = drm;
- priv->dp[dp->id] = &dp->dp_display;
+ dp->msm_dp_display.drm_dev = drm;
+ priv->dp[dp->id] = &dp->msm_dp_display;
dp->drm_dev = drm;
dp->aux->drm_dev = drm;
- rc = dp_aux_register(dp->aux);
+ rc = msm_dp_aux_register(dp->aux);
if (rc) {
DRM_ERROR("DRM DP AUX register failed\n");
goto end;
}
- rc = dp_register_audio_driver(dev, dp->audio);
+ rc = msm_dp_register_audio_driver(dev, dp->audio);
if (rc) {
DRM_ERROR("Audio registration Dp failed\n");
goto end;
}
- rc = dp_hpd_event_thread_start(dp);
+ rc = msm_dp_hpd_event_thread_start(dp);
if (rc) {
DRM_ERROR("Event thread create failed\n");
goto end;
@@ -296,44 +305,44 @@ end:
return rc;
}
-static void dp_display_unbind(struct device *dev, struct device *master,
+static void msm_dp_display_unbind(struct device *dev, struct device *master,
void *data)
{
- struct dp_display_private *dp = dev_get_dp_display_private(dev);
+ struct msm_dp_display_private *dp = dev_get_dp_display_private(dev);
struct msm_drm_private *priv = dev_get_drvdata(master);
kthread_stop(dp->ev_tsk);
of_dp_aux_depopulate_bus(dp->aux);
- dp_unregister_audio_driver(dev, dp->audio);
- dp_aux_unregister(dp->aux);
+ msm_dp_unregister_audio_driver(dev, dp->audio);
+ msm_dp_aux_unregister(dp->aux);
dp->drm_dev = NULL;
dp->aux->drm_dev = NULL;
priv->dp[dp->id] = NULL;
}
-static const struct component_ops dp_display_comp_ops = {
- .bind = dp_display_bind,
- .unbind = dp_display_unbind,
+static const struct component_ops msm_dp_display_comp_ops = {
+ .bind = msm_dp_display_bind,
+ .unbind = msm_dp_display_unbind,
};
-static void dp_display_send_hpd_event(struct msm_dp *dp_display)
+static void msm_dp_display_send_hpd_event(struct msm_dp *msm_dp_display)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
struct drm_connector *connector;
- dp = container_of(dp_display, struct dp_display_private, dp_display);
+ dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
- connector = dp->dp_display.connector;
+ connector = dp->msm_dp_display.connector;
drm_helper_hpd_irq_event(connector->dev);
}
-static int dp_display_send_hpd_notification(struct dp_display_private *dp,
+static int msm_dp_display_send_hpd_notification(struct msm_dp_display_private *dp,
bool hpd)
{
- if ((hpd && dp->dp_display.link_ready) ||
- (!hpd && !dp->dp_display.link_ready)) {
+ if ((hpd && dp->msm_dp_display.link_ready) ||
+ (!hpd && !dp->msm_dp_display.link_ready)) {
drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
(hpd ? "on" : "off"));
return 0;
@@ -342,139 +351,139 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
/* reset video pattern flag on disconnect */
if (!hpd) {
dp->panel->video_test = false;
- if (!dp->dp_display.is_edp)
- drm_dp_set_subconnector_property(dp->dp_display.connector,
+ if (!dp->msm_dp_display.is_edp)
+ drm_dp_set_subconnector_property(dp->msm_dp_display.connector,
connector_status_disconnected,
dp->panel->dpcd,
dp->panel->downstream_ports);
}
- dp->dp_display.link_ready = hpd;
+ dp->msm_dp_display.link_ready = hpd;
drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n",
- dp->dp_display.connector_type, hpd);
- dp_display_send_hpd_event(&dp->dp_display);
+ dp->msm_dp_display.connector_type, hpd);
+ msm_dp_display_send_hpd_event(&dp->msm_dp_display);
return 0;
}
-static int dp_display_process_hpd_high(struct dp_display_private *dp)
+static int msm_dp_display_process_hpd_high(struct msm_dp_display_private *dp)
{
- struct drm_connector *connector = dp->dp_display.connector;
+ struct drm_connector *connector = dp->msm_dp_display.connector;
const struct drm_display_info *info = &connector->display_info;
int rc = 0;
- rc = dp_panel_read_sink_caps(dp->panel, connector);
+ rc = msm_dp_panel_read_sink_caps(dp->panel, connector);
if (rc)
goto end;
- dp_link_process_request(dp->link);
+ msm_dp_link_process_request(dp->link);
- if (!dp->dp_display.is_edp)
+ if (!dp->msm_dp_display.is_edp)
drm_dp_set_subconnector_property(connector,
connector_status_connected,
dp->panel->dpcd,
dp->panel->downstream_ports);
- dp->dp_display.psr_supported = dp->panel->psr_cap.version && psr_enabled;
+ dp->msm_dp_display.psr_supported = dp->panel->psr_cap.version && psr_enabled;
dp->audio_supported = info->has_audio;
- dp_panel_handle_sink_request(dp->panel);
+ msm_dp_panel_handle_sink_request(dp->panel);
/*
* set sink to normal operation mode -- D0
* before dpcd read
*/
- dp_link_psm_config(dp->link, &dp->panel->link_info, false);
+ msm_dp_link_psm_config(dp->link, &dp->panel->link_info, false);
- dp_link_reset_phy_params_vx_px(dp->link);
- rc = dp_ctrl_on_link(dp->ctrl);
+ msm_dp_link_reset_phy_params_vx_px(dp->link);
+ rc = msm_dp_ctrl_on_link(dp->ctrl);
if (rc) {
DRM_ERROR("failed to complete DP link training\n");
goto end;
}
- dp_add_event(dp, EV_USER_NOTIFICATION, true, 0);
+ msm_dp_add_event(dp, EV_USER_NOTIFICATION, true, 0);
end:
return rc;
}
-static void dp_display_host_phy_init(struct dp_display_private *dp)
+static void msm_dp_display_host_phy_init(struct msm_dp_display_private *dp)
{
drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
- dp->dp_display.connector_type, dp->core_initialized,
+ dp->msm_dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
if (!dp->phy_initialized) {
- dp_ctrl_phy_init(dp->ctrl);
+ msm_dp_ctrl_phy_init(dp->ctrl);
dp->phy_initialized = true;
}
}
-static void dp_display_host_phy_exit(struct dp_display_private *dp)
+static void msm_dp_display_host_phy_exit(struct msm_dp_display_private *dp)
{
drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
- dp->dp_display.connector_type, dp->core_initialized,
+ dp->msm_dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
if (dp->phy_initialized) {
- dp_ctrl_phy_exit(dp->ctrl);
+ msm_dp_ctrl_phy_exit(dp->ctrl);
dp->phy_initialized = false;
}
}
-static void dp_display_host_init(struct dp_display_private *dp)
+static void msm_dp_display_host_init(struct msm_dp_display_private *dp)
{
drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
- dp->dp_display.connector_type, dp->core_initialized,
+ dp->msm_dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
- dp_ctrl_core_clk_enable(dp->ctrl);
- dp_ctrl_reset_irq_ctrl(dp->ctrl, true);
- dp_aux_init(dp->aux);
+ msm_dp_ctrl_core_clk_enable(dp->ctrl);
+ msm_dp_ctrl_reset_irq_ctrl(dp->ctrl, true);
+ msm_dp_aux_init(dp->aux);
dp->core_initialized = true;
}
-static void dp_display_host_deinit(struct dp_display_private *dp)
+static void msm_dp_display_host_deinit(struct msm_dp_display_private *dp)
{
drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
- dp->dp_display.connector_type, dp->core_initialized,
+ dp->msm_dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
- dp_ctrl_reset_irq_ctrl(dp->ctrl, false);
- dp_aux_deinit(dp->aux);
- dp_ctrl_core_clk_disable(dp->ctrl);
+ msm_dp_ctrl_reset_irq_ctrl(dp->ctrl, false);
+ msm_dp_aux_deinit(dp->aux);
+ msm_dp_ctrl_core_clk_disable(dp->ctrl);
dp->core_initialized = false;
}
-static int dp_display_usbpd_configure_cb(struct device *dev)
+static int msm_dp_display_usbpd_configure_cb(struct device *dev)
{
- struct dp_display_private *dp = dev_get_dp_display_private(dev);
+ struct msm_dp_display_private *dp = dev_get_dp_display_private(dev);
- dp_display_host_phy_init(dp);
+ msm_dp_display_host_phy_init(dp);
- return dp_display_process_hpd_high(dp);
+ return msm_dp_display_process_hpd_high(dp);
}
-static int dp_display_notify_disconnect(struct device *dev)
+static int msm_dp_display_notify_disconnect(struct device *dev)
{
- struct dp_display_private *dp = dev_get_dp_display_private(dev);
+ struct msm_dp_display_private *dp = dev_get_dp_display_private(dev);
- dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
+ msm_dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
return 0;
}
-static void dp_display_handle_video_request(struct dp_display_private *dp)
+static void msm_dp_display_handle_video_request(struct msm_dp_display_private *dp)
{
if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) {
dp->panel->video_test = true;
- dp_link_send_test_response(dp->link);
+ msm_dp_link_send_test_response(dp->link);
}
}
-static int dp_display_handle_port_status_changed(struct dp_display_private *dp)
+static int msm_dp_display_handle_port_status_changed(struct msm_dp_display_private *dp)
{
int rc = 0;
@@ -482,12 +491,12 @@ static int dp_display_handle_port_status_changed(struct dp_display_private *dp)
drm_dbg_dp(dp->drm_dev, "sink count is zero, nothing to do\n");
if (dp->hpd_state != ST_DISCONNECTED) {
dp->hpd_state = ST_DISCONNECT_PENDING;
- dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
+ msm_dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
}
} else {
if (dp->hpd_state == ST_DISCONNECTED) {
dp->hpd_state = ST_MAINLINK_READY;
- rc = dp_display_process_hpd_high(dp);
+ rc = msm_dp_display_process_hpd_high(dp);
if (rc)
dp->hpd_state = ST_DISCONNECTED;
}
@@ -496,7 +505,7 @@ static int dp_display_handle_port_status_changed(struct dp_display_private *dp)
return rc;
}
-static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
+static int msm_dp_display_handle_irq_hpd(struct msm_dp_display_private *dp)
{
u32 sink_request = dp->link->sink_request;
@@ -510,48 +519,48 @@ static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
}
}
- dp_ctrl_handle_sink_request(dp->ctrl);
+ msm_dp_ctrl_handle_sink_request(dp->ctrl);
if (sink_request & DP_TEST_LINK_VIDEO_PATTERN)
- dp_display_handle_video_request(dp);
+ msm_dp_display_handle_video_request(dp);
return 0;
}
-static int dp_display_usbpd_attention_cb(struct device *dev)
+static int msm_dp_display_usbpd_attention_cb(struct device *dev)
{
int rc = 0;
u32 sink_request;
- struct dp_display_private *dp = dev_get_dp_display_private(dev);
+ struct msm_dp_display_private *dp = dev_get_dp_display_private(dev);
/* check for any test request issued by sink */
- rc = dp_link_process_request(dp->link);
+ rc = msm_dp_link_process_request(dp->link);
if (!rc) {
sink_request = dp->link->sink_request;
drm_dbg_dp(dp->drm_dev, "hpd_state=%d sink_request=%d\n",
dp->hpd_state, sink_request);
if (sink_request & DS_PORT_STATUS_CHANGED)
- rc = dp_display_handle_port_status_changed(dp);
+ rc = msm_dp_display_handle_port_status_changed(dp);
else
- rc = dp_display_handle_irq_hpd(dp);
+ rc = msm_dp_display_handle_irq_hpd(dp);
}
return rc;
}
-static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
+static int msm_dp_hpd_plug_handle(struct msm_dp_display_private *dp, u32 data)
{
u32 state;
int ret;
- struct platform_device *pdev = dp->dp_display.pdev;
+ struct platform_device *pdev = dp->msm_dp_display.pdev;
- dp_aux_enable_xfers(dp->aux, true);
+ msm_dp_aux_enable_xfers(dp->aux, true);
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
- dp->dp_display.connector_type, state);
+ dp->msm_dp_display.connector_type, state);
if (state == ST_DISPLAY_OFF) {
mutex_unlock(&dp->event_mutex);
@@ -565,7 +574,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
if (state == ST_DISCONNECT_PENDING) {
/* wait until ST_DISCONNECTED */
- dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */
+ msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */
mutex_unlock(&dp->event_mutex);
return 0;
}
@@ -577,7 +586,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
return ret;
}
- ret = dp_display_usbpd_configure_cb(&pdev->dev);
+ ret = msm_dp_display_usbpd_configure_cb(&pdev->dev);
if (ret) { /* link train failed */
dp->hpd_state = ST_DISCONNECTED;
pm_runtime_put_sync(&pdev->dev);
@@ -586,60 +595,60 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
}
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
- dp->dp_display.connector_type, state);
+ dp->msm_dp_display.connector_type, state);
mutex_unlock(&dp->event_mutex);
/* uevent will complete connection part */
return 0;
};
-static void dp_display_handle_plugged_change(struct msm_dp *dp_display,
+static void msm_dp_display_handle_plugged_change(struct msm_dp *msm_dp_display,
bool plugged)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
- dp = container_of(dp_display,
- struct dp_display_private, dp_display);
+ dp = container_of(msm_dp_display,
+ struct msm_dp_display_private, msm_dp_display);
/* notify audio subsystem only if sink supports audio */
- if (dp_display->plugged_cb && dp_display->codec_dev &&
+ if (msm_dp_display->plugged_cb && msm_dp_display->codec_dev &&
dp->audio_supported)
- dp_display->plugged_cb(dp_display->codec_dev, plugged);
+ msm_dp_display->plugged_cb(msm_dp_display->codec_dev, plugged);
}
-static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+static int msm_dp_hpd_unplug_handle(struct msm_dp_display_private *dp, u32 data)
{
u32 state;
- struct platform_device *pdev = dp->dp_display.pdev;
+ struct platform_device *pdev = dp->msm_dp_display.pdev;
- dp_aux_enable_xfers(dp->aux, false);
+ msm_dp_aux_enable_xfers(dp->aux, false);
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
- dp->dp_display.connector_type, state);
+ dp->msm_dp_display.connector_type, state);
/* unplugged, no more irq_hpd handle */
- dp_del_event(dp, EV_IRQ_HPD_INT);
+ msm_dp_del_event(dp, EV_IRQ_HPD_INT);
if (state == ST_DISCONNECTED) {
/* triggered by irq_hdp with sink_count = 0 */
if (dp->link->sink_count == 0) {
- dp_display_host_phy_exit(dp);
+ msm_dp_display_host_phy_exit(dp);
}
- dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
+ msm_dp_display_notify_disconnect(&dp->msm_dp_display.pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
} else if (state == ST_DISCONNECT_PENDING) {
mutex_unlock(&dp->event_mutex);
return 0;
} else if (state == ST_MAINLINK_READY) {
- dp_ctrl_off_link(dp->ctrl);
- dp_display_host_phy_exit(dp);
+ msm_dp_ctrl_off_link(dp->ctrl);
+ msm_dp_display_host_phy_exit(dp);
dp->hpd_state = ST_DISCONNECTED;
- dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
+ msm_dp_display_notify_disconnect(&dp->msm_dp_display.pdev->dev);
pm_runtime_put_sync(&pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
@@ -649,7 +658,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
* We don't need separate work for disconnect as
* connect/attention interrupts are disabled
*/
- dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
+ msm_dp_display_notify_disconnect(&dp->msm_dp_display.pdev->dev);
if (state == ST_DISPLAY_OFF) {
dp->hpd_state = ST_DISCONNECTED;
@@ -658,10 +667,10 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
}
/* signal the disconnect event early to ensure proper teardown */
- dp_display_handle_plugged_change(&dp->dp_display, false);
+ msm_dp_display_handle_plugged_change(&dp->msm_dp_display, false);
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
- dp->dp_display.connector_type, state);
+ dp->msm_dp_display.connector_type, state);
/* uevent will complete disconnection part */
pm_runtime_put_sync(&pdev->dev);
@@ -669,7 +678,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
return 0;
}
-static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
+static int msm_dp_irq_hpd_handle(struct msm_dp_display_private *dp, u32 data)
{
u32 state;
@@ -678,7 +687,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
/* irq_hpd can happen at either connected or disconnected state */
state = dp->hpd_state;
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
- dp->dp_display.connector_type, state);
+ dp->msm_dp_display.connector_type, state);
if (state == ST_DISPLAY_OFF) {
mutex_unlock(&dp->event_mutex);
@@ -687,35 +696,32 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
if (state == ST_MAINLINK_READY || state == ST_DISCONNECT_PENDING) {
/* wait until ST_CONNECTED */
- dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
+ msm_dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
mutex_unlock(&dp->event_mutex);
return 0;
}
- dp_display_usbpd_attention_cb(&dp->dp_display.pdev->dev);
+ msm_dp_display_usbpd_attention_cb(&dp->msm_dp_display.pdev->dev);
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
- dp->dp_display.connector_type, state);
+ dp->msm_dp_display.connector_type, state);
mutex_unlock(&dp->event_mutex);
return 0;
}
-static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
+static void msm_dp_display_deinit_sub_modules(struct msm_dp_display_private *dp)
{
- dp_audio_put(dp->audio);
- dp_panel_put(dp->panel);
- dp_aux_put(dp->aux);
+ msm_dp_audio_put(dp->audio);
+ msm_dp_panel_put(dp->panel);
+ msm_dp_aux_put(dp->aux);
}
-static int dp_init_sub_modules(struct dp_display_private *dp)
+static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
{
int rc = 0;
- struct device *dev = &dp->dp_display.pdev->dev;
- struct dp_panel_in panel_in = {
- .dev = dev,
- };
+ struct device *dev = &dp->msm_dp_display.pdev->dev;
struct phy *phy;
phy = devm_phy_get(dev, "dp");
@@ -723,14 +729,14 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
return PTR_ERR(phy);
rc = phy_set_mode_ext(phy, PHY_MODE_DP,
- dp->dp_display.is_edp ? PHY_SUBMODE_EDP : PHY_SUBMODE_DP);
+ dp->msm_dp_display.is_edp ? PHY_SUBMODE_EDP : PHY_SUBMODE_DP);
if (rc) {
DRM_ERROR("failed to set phy submode, rc = %d\n", rc);
dp->catalog = NULL;
goto error;
}
- dp->catalog = dp_catalog_get(dev);
+ dp->catalog = msm_dp_catalog_get(dev);
if (IS_ERR(dp->catalog)) {
rc = PTR_ERR(dp->catalog);
DRM_ERROR("failed to initialize catalog, rc = %d\n", rc);
@@ -738,9 +744,9 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
goto error;
}
- dp->aux = dp_aux_get(dev, dp->catalog,
+ dp->aux = msm_dp_aux_get(dev, dp->catalog,
phy,
- dp->dp_display.is_edp);
+ dp->msm_dp_display.is_edp);
if (IS_ERR(dp->aux)) {
rc = PTR_ERR(dp->aux);
DRM_ERROR("failed to initialize aux, rc = %d\n", rc);
@@ -748,7 +754,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
goto error;
}
- dp->link = dp_link_get(dev, dp->aux);
+ dp->link = msm_dp_link_get(dev, dp->aux);
if (IS_ERR(dp->link)) {
rc = PTR_ERR(dp->link);
DRM_ERROR("failed to initialize link, rc = %d\n", rc);
@@ -756,11 +762,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
goto error_link;
}
- panel_in.aux = dp->aux;
- panel_in.catalog = dp->catalog;
- panel_in.link = dp->link;
-
- dp->panel = dp_panel_get(&panel_in);
+ dp->panel = msm_dp_panel_get(dev, dp->aux, dp->link, dp->catalog);
if (IS_ERR(dp->panel)) {
rc = PTR_ERR(dp->panel);
DRM_ERROR("failed to initialize panel, rc = %d\n", rc);
@@ -768,7 +770,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
goto error_link;
}
- dp->ctrl = dp_ctrl_get(dev, dp->link, dp->panel, dp->aux,
+ dp->ctrl = msm_dp_ctrl_get(dev, dp->link, dp->panel, dp->aux,
dp->catalog,
phy);
if (IS_ERR(dp->ctrl)) {
@@ -778,7 +780,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
goto error_ctrl;
}
- dp->audio = dp_audio_get(dp->dp_display.pdev, dp->panel, dp->catalog);
+ dp->audio = msm_dp_audio_get(dp->msm_dp_display.pdev, dp->catalog);
if (IS_ERR(dp->audio)) {
rc = PTR_ERR(dp->audio);
pr_err("failed to initialize audio, rc = %d\n", rc);
@@ -789,51 +791,51 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
return rc;
error_ctrl:
- dp_panel_put(dp->panel);
+ msm_dp_panel_put(dp->panel);
error_link:
- dp_aux_put(dp->aux);
+ msm_dp_aux_put(dp->aux);
error:
return rc;
}
-static int dp_display_set_mode(struct msm_dp *dp_display,
- struct dp_display_mode *mode)
+static int msm_dp_display_set_mode(struct msm_dp *msm_dp_display,
+ struct msm_dp_display_mode *mode)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
- dp = container_of(dp_display, struct dp_display_private, dp_display);
+ dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
- drm_mode_copy(&dp->panel->dp_mode.drm_mode, &mode->drm_mode);
- dp->panel->dp_mode.bpp = mode->bpp;
- dp->panel->dp_mode.out_fmt_is_yuv_420 = mode->out_fmt_is_yuv_420;
- dp_panel_init_panel_info(dp->panel);
+ drm_mode_copy(&dp->panel->msm_dp_mode.drm_mode, &mode->drm_mode);
+ dp->panel->msm_dp_mode.bpp = mode->bpp;
+ dp->panel->msm_dp_mode.out_fmt_is_yuv_420 = mode->out_fmt_is_yuv_420;
+ msm_dp_panel_init_panel_info(dp->panel);
return 0;
}
-static int dp_display_enable(struct dp_display_private *dp, bool force_link_train)
+static int msm_dp_display_enable(struct msm_dp_display_private *dp, bool force_link_train)
{
int rc = 0;
- struct msm_dp *dp_display = &dp->dp_display;
+ struct msm_dp *msm_dp_display = &dp->msm_dp_display;
drm_dbg_dp(dp->drm_dev, "sink_count=%d\n", dp->link->sink_count);
- if (dp_display->power_on) {
+ if (msm_dp_display->power_on) {
drm_dbg_dp(dp->drm_dev, "Link already setup, return\n");
return 0;
}
- rc = dp_ctrl_on_stream(dp->ctrl, force_link_train);
+ rc = msm_dp_ctrl_on_stream(dp->ctrl, force_link_train);
if (!rc)
- dp_display->power_on = true;
+ msm_dp_display->power_on = true;
return rc;
}
-static int dp_display_post_enable(struct msm_dp *dp_display)
+static int msm_dp_display_post_enable(struct msm_dp *msm_dp_display)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
u32 rate;
- dp = container_of(dp_display, struct dp_display_private, dp_display);
+ dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
rate = dp->link->link_params.rate;
@@ -843,85 +845,85 @@ static int dp_display_post_enable(struct msm_dp *dp_display)
}
/* signal the connect event late to synchronize video and display */
- dp_display_handle_plugged_change(dp_display, true);
+ msm_dp_display_handle_plugged_change(msm_dp_display, true);
- if (dp_display->psr_supported)
- dp_ctrl_config_psr(dp->ctrl);
+ if (msm_dp_display->psr_supported)
+ msm_dp_ctrl_config_psr(dp->ctrl);
return 0;
}
-static int dp_display_disable(struct dp_display_private *dp)
+static int msm_dp_display_disable(struct msm_dp_display_private *dp)
{
- struct msm_dp *dp_display = &dp->dp_display;
+ struct msm_dp *msm_dp_display = &dp->msm_dp_display;
- if (!dp_display->power_on)
+ if (!msm_dp_display->power_on)
return 0;
/* wait only if audio was enabled */
- if (dp_display->audio_enabled) {
+ if (msm_dp_display->audio_enabled) {
/* signal the disconnect event */
- dp_display_handle_plugged_change(dp_display, false);
+ msm_dp_display_handle_plugged_change(msm_dp_display, false);
if (!wait_for_completion_timeout(&dp->audio_comp,
HZ * 5))
DRM_ERROR("audio comp timeout\n");
}
- dp_display->audio_enabled = false;
+ msm_dp_display->audio_enabled = false;
if (dp->link->sink_count == 0) {
/*
* irq_hpd with sink_count = 0
* hdmi unplugged out of dongle
*/
- dp_ctrl_off_link_stream(dp->ctrl);
+ msm_dp_ctrl_off_link_stream(dp->ctrl);
} else {
/*
* unplugged interrupt
* dongle unplugged out of DUT
*/
- dp_ctrl_off(dp->ctrl);
- dp_display_host_phy_exit(dp);
+ msm_dp_ctrl_off(dp->ctrl);
+ msm_dp_display_host_phy_exit(dp);
}
- dp_display->power_on = false;
+ msm_dp_display->power_on = false;
drm_dbg_dp(dp->drm_dev, "sink count: %d\n", dp->link->sink_count);
return 0;
}
-int dp_display_set_plugged_cb(struct msm_dp *dp_display,
+int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display,
hdmi_codec_plugged_cb fn, struct device *codec_dev)
{
bool plugged;
- dp_display->plugged_cb = fn;
- dp_display->codec_dev = codec_dev;
- plugged = dp_display->link_ready;
- dp_display_handle_plugged_change(dp_display, plugged);
+ msm_dp_display->plugged_cb = fn;
+ msm_dp_display->codec_dev = codec_dev;
+ plugged = msm_dp_display->link_ready;
+ msm_dp_display_handle_plugged_change(msm_dp_display, plugged);
return 0;
}
/**
- * dp_bridge_mode_valid - callback to determine if specified mode is valid
+ * msm_dp_bridge_mode_valid - callback to determine if specified mode is valid
* @bridge: Pointer to drm bridge structure
* @info: display info
* @mode: Pointer to drm mode structure
* Returns: Validity status for specified mode
*/
-enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge,
+enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
const u32 num_components = 3, default_bpp = 24;
- struct dp_display_private *dp_display;
- struct dp_link_info *link_info;
+ struct msm_dp_display_private *msm_dp_display;
+ struct msm_dp_link_info *link_info;
u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0;
struct msm_dp *dp;
int mode_pclk_khz = mode->clock;
- dp = to_dp_bridge(bridge)->dp_display;
+ dp = to_dp_bridge(bridge)->msm_dp_display;
if (!dp || !mode_pclk_khz || !dp->connector) {
DRM_ERROR("invalid params\n");
@@ -931,18 +933,18 @@ enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge,
if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
return MODE_CLOCK_HIGH;
- dp_display = container_of(dp, struct dp_display_private, dp_display);
- link_info = &dp_display->panel->link_info;
+ msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
+ link_info = &msm_dp_display->panel->link_info;
if (drm_mode_is_420_only(&dp->connector->display_info, mode) &&
- dp_display->panel->vsc_sdp_supported)
+ msm_dp_display->panel->vsc_sdp_supported)
mode_pclk_khz /= 2;
mode_bpp = dp->connector->display_info.bpc * num_components;
if (!mode_bpp)
mode_bpp = default_bpp;
- mode_bpp = dp_panel_get_mode_bpp(dp_display->panel,
+ mode_bpp = msm_dp_panel_get_mode_bpp(msm_dp_display->panel,
mode_bpp, mode_pclk_khz);
mode_rate_khz = mode_pclk_khz * mode_bpp;
@@ -954,50 +956,50 @@ enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-int dp_display_get_modes(struct msm_dp *dp)
+int msm_dp_display_get_modes(struct msm_dp *dp)
{
- struct dp_display_private *dp_display;
+ struct msm_dp_display_private *msm_dp_display;
if (!dp) {
DRM_ERROR("invalid params\n");
return 0;
}
- dp_display = container_of(dp, struct dp_display_private, dp_display);
+ msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
- return dp_panel_get_modes(dp_display->panel,
+ return msm_dp_panel_get_modes(msm_dp_display->panel,
dp->connector);
}
-bool dp_display_check_video_test(struct msm_dp *dp)
+bool msm_dp_display_check_video_test(struct msm_dp *dp)
{
- struct dp_display_private *dp_display;
+ struct msm_dp_display_private *msm_dp_display;
- dp_display = container_of(dp, struct dp_display_private, dp_display);
+ msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
- return dp_display->panel->video_test;
+ return msm_dp_display->panel->video_test;
}
-int dp_display_get_test_bpp(struct msm_dp *dp)
+int msm_dp_display_get_test_bpp(struct msm_dp *dp)
{
- struct dp_display_private *dp_display;
+ struct msm_dp_display_private *msm_dp_display;
if (!dp) {
DRM_ERROR("invalid params\n");
return 0;
}
- dp_display = container_of(dp, struct dp_display_private, dp_display);
+ msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
- return dp_link_bit_depth_to_bpp(
- dp_display->link->test_video.test_bit_depth);
+ return msm_dp_link_bit_depth_to_bpp(
+ msm_dp_display->link->test_video.test_bit_depth);
}
void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
{
- struct dp_display_private *dp_display;
+ struct msm_dp_display_private *msm_dp_display;
- dp_display = container_of(dp, struct dp_display_private, dp_display);
+ msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
/*
* if we are reading registers we need the link clocks to be on
@@ -1006,65 +1008,65 @@ void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
* power_on status before dumping DP registers to avoid crash due
* to unclocked access
*/
- mutex_lock(&dp_display->event_mutex);
+ mutex_lock(&msm_dp_display->event_mutex);
if (!dp->power_on) {
- mutex_unlock(&dp_display->event_mutex);
+ mutex_unlock(&msm_dp_display->event_mutex);
return;
}
- dp_catalog_snapshot(dp_display->catalog, disp_state);
+ msm_dp_catalog_snapshot(msm_dp_display->catalog, disp_state);
- mutex_unlock(&dp_display->event_mutex);
+ mutex_unlock(&msm_dp_display->event_mutex);
}
-void dp_display_set_psr(struct msm_dp *dp_display, bool enter)
+void msm_dp_display_set_psr(struct msm_dp *msm_dp_display, bool enter)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
- if (!dp_display) {
+ if (!msm_dp_display) {
DRM_ERROR("invalid params\n");
return;
}
- dp = container_of(dp_display, struct dp_display_private, dp_display);
- dp_ctrl_set_psr(dp->ctrl, enter);
+ dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
+ msm_dp_ctrl_set_psr(dp->ctrl, enter);
}
static int hpd_event_thread(void *data)
{
- struct dp_display_private *dp_priv;
+ struct msm_dp_display_private *msm_dp_priv;
unsigned long flag;
- struct dp_event *todo;
+ struct msm_dp_event *todo;
int timeout_mode = 0;
- dp_priv = (struct dp_display_private *)data;
+ msm_dp_priv = (struct msm_dp_display_private *)data;
while (1) {
if (timeout_mode) {
- wait_event_timeout(dp_priv->event_q,
- (dp_priv->event_pndx == dp_priv->event_gndx) ||
+ wait_event_timeout(msm_dp_priv->event_q,
+ (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) ||
kthread_should_stop(), EVENT_TIMEOUT);
} else {
- wait_event_interruptible(dp_priv->event_q,
- (dp_priv->event_pndx != dp_priv->event_gndx) ||
+ wait_event_interruptible(msm_dp_priv->event_q,
+ (msm_dp_priv->event_pndx != msm_dp_priv->event_gndx) ||
kthread_should_stop());
}
if (kthread_should_stop())
break;
- spin_lock_irqsave(&dp_priv->event_lock, flag);
- todo = &dp_priv->event_list[dp_priv->event_gndx];
+ spin_lock_irqsave(&msm_dp_priv->event_lock, flag);
+ todo = &msm_dp_priv->event_list[msm_dp_priv->event_gndx];
if (todo->delay) {
- struct dp_event *todo_next;
+ struct msm_dp_event *todo_next;
- dp_priv->event_gndx++;
- dp_priv->event_gndx %= DP_EVENT_Q_MAX;
+ msm_dp_priv->event_gndx++;
+ msm_dp_priv->event_gndx %= DP_EVENT_Q_MAX;
/* re enter delay event into q */
- todo_next = &dp_priv->event_list[dp_priv->event_pndx++];
- dp_priv->event_pndx %= DP_EVENT_Q_MAX;
+ todo_next = &msm_dp_priv->event_list[msm_dp_priv->event_pndx++];
+ msm_dp_priv->event_pndx %= DP_EVENT_Q_MAX;
todo_next->event_id = todo->event_id;
todo_next->data = todo->data;
todo_next->delay = todo->delay - 1;
@@ -1075,33 +1077,33 @@ static int hpd_event_thread(void *data)
/* switch to timeout mode */
timeout_mode = 1;
- spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
continue;
}
/* timeout with no events in q */
- if (dp_priv->event_pndx == dp_priv->event_gndx) {
- spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ if (msm_dp_priv->event_pndx == msm_dp_priv->event_gndx) {
+ spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
continue;
}
- dp_priv->event_gndx++;
- dp_priv->event_gndx %= DP_EVENT_Q_MAX;
+ msm_dp_priv->event_gndx++;
+ msm_dp_priv->event_gndx %= DP_EVENT_Q_MAX;
timeout_mode = 0;
- spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ spin_unlock_irqrestore(&msm_dp_priv->event_lock, flag);
switch (todo->event_id) {
case EV_HPD_PLUG_INT:
- dp_hpd_plug_handle(dp_priv, todo->data);
+ msm_dp_hpd_plug_handle(msm_dp_priv, todo->data);
break;
case EV_HPD_UNPLUG_INT:
- dp_hpd_unplug_handle(dp_priv, todo->data);
+ msm_dp_hpd_unplug_handle(msm_dp_priv, todo->data);
break;
case EV_IRQ_HPD_INT:
- dp_irq_hpd_handle(dp_priv, todo->data);
+ msm_dp_irq_hpd_handle(msm_dp_priv, todo->data);
break;
case EV_USER_NOTIFICATION:
- dp_display_send_hpd_notification(dp_priv,
+ msm_dp_display_send_hpd_notification(msm_dp_priv,
todo->data);
break;
default:
@@ -1112,22 +1114,22 @@ static int hpd_event_thread(void *data)
return 0;
}
-static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv)
+static int msm_dp_hpd_event_thread_start(struct msm_dp_display_private *msm_dp_priv)
{
/* set event q to empty */
- dp_priv->event_gndx = 0;
- dp_priv->event_pndx = 0;
+ msm_dp_priv->event_gndx = 0;
+ msm_dp_priv->event_pndx = 0;
- dp_priv->ev_tsk = kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler");
- if (IS_ERR(dp_priv->ev_tsk))
- return PTR_ERR(dp_priv->ev_tsk);
+ msm_dp_priv->ev_tsk = kthread_run(hpd_event_thread, msm_dp_priv, "dp_hpd_handler");
+ if (IS_ERR(msm_dp_priv->ev_tsk))
+ return PTR_ERR(msm_dp_priv->ev_tsk);
return 0;
}
-static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
+static irqreturn_t msm_dp_display_irq_handler(int irq, void *dev_id)
{
- struct dp_display_private *dp = dev_id;
+ struct msm_dp_display_private *dp = dev_id;
irqreturn_t ret = IRQ_NONE;
u32 hpd_isr_status;
@@ -1136,43 +1138,43 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
}
- hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog);
+ hpd_isr_status = msm_dp_catalog_hpd_get_intr_status(dp->catalog);
if (hpd_isr_status & 0x0F) {
drm_dbg_dp(dp->drm_dev, "type=%d isr=0x%x\n",
- dp->dp_display.connector_type, hpd_isr_status);
+ dp->msm_dp_display.connector_type, hpd_isr_status);
/* hpd related interrupts */
if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK)
- dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
+ msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) {
- dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
+ msm_dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
}
if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
- dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
- dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3);
+ msm_dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
+ msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3);
}
if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK)
- dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
+ msm_dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
ret = IRQ_HANDLED;
}
/* DP controller isr */
- ret |= dp_ctrl_isr(dp->ctrl);
+ ret |= msm_dp_ctrl_isr(dp->ctrl);
/* DP aux isr */
- ret |= dp_aux_isr(dp->aux);
+ ret |= msm_dp_aux_isr(dp->aux);
return ret;
}
-static int dp_display_request_irq(struct dp_display_private *dp)
+static int msm_dp_display_request_irq(struct msm_dp_display_private *dp)
{
int rc = 0;
- struct platform_device *pdev = dp->dp_display.pdev;
+ struct platform_device *pdev = dp->msm_dp_display.pdev;
dp->irq = platform_get_irq(pdev, 0);
if (dp->irq < 0) {
@@ -1180,7 +1182,7 @@ static int dp_display_request_irq(struct dp_display_private *dp)
return dp->irq;
}
- rc = devm_request_irq(&pdev->dev, dp->irq, dp_display_irq_handler,
+ rc = devm_request_irq(&pdev->dev, dp->irq, msm_dp_display_irq_handler,
IRQF_TRIGGER_HIGH|IRQF_NO_AUTOEN,
"dp_display_isr", dp);
@@ -1193,7 +1195,7 @@ static int dp_display_request_irq(struct dp_display_private *dp)
return 0;
}
-static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pdev)
+static const struct msm_dp_desc *msm_dp_display_get_desc(struct platform_device *pdev)
{
const struct msm_dp_desc *descs = of_device_get_match_data(&pdev->dev);
struct resource *res;
@@ -1212,7 +1214,7 @@ static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pde
return NULL;
}
-static int dp_display_probe_tail(struct device *dev)
+static int msm_dp_display_probe_tail(struct device *dev)
{
struct msm_dp *dp = dev_get_drvdata(dev);
int ret;
@@ -1232,19 +1234,19 @@ static int dp_display_probe_tail(struct device *dev)
return ret;
}
- ret = component_add(dev, &dp_display_comp_ops);
+ ret = component_add(dev, &msm_dp_display_comp_ops);
if (ret)
DRM_ERROR("component add failed, rc=%d\n", ret);
return ret;
}
-static int dp_auxbus_done_probe(struct drm_dp_aux *aux)
+static int msm_dp_auxbus_done_probe(struct drm_dp_aux *aux)
{
- return dp_display_probe_tail(aux->dev);
+ return msm_dp_display_probe_tail(aux->dev);
}
-static int dp_display_get_connector_type(struct platform_device *pdev,
+static int msm_dp_display_get_connector_type(struct platform_device *pdev,
const struct msm_dp_desc *desc)
{
struct device_node *node = pdev->dev.of_node;
@@ -1263,10 +1265,10 @@ static int dp_display_get_connector_type(struct platform_device *pdev,
return connector_type;
}
-static int dp_display_probe(struct platform_device *pdev)
+static int msm_dp_display_probe(struct platform_device *pdev)
{
int rc = 0;
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
const struct msm_dp_desc *desc;
if (!pdev || !pdev->dev.of_node) {
@@ -1278,18 +1280,18 @@ static int dp_display_probe(struct platform_device *pdev)
if (!dp)
return -ENOMEM;
- desc = dp_display_get_desc(pdev);
+ desc = msm_dp_display_get_desc(pdev);
if (!desc)
return -EINVAL;
- dp->dp_display.pdev = pdev;
+ dp->msm_dp_display.pdev = pdev;
dp->id = desc->id;
- dp->dp_display.connector_type = dp_display_get_connector_type(pdev, desc);
+ dp->msm_dp_display.connector_type = msm_dp_display_get_connector_type(pdev, desc);
dp->wide_bus_supported = desc->wide_bus_supported;
- dp->dp_display.is_edp =
- (dp->dp_display.connector_type == DRM_MODE_CONNECTOR_eDP);
+ dp->msm_dp_display.is_edp =
+ (dp->msm_dp_display.connector_type == DRM_MODE_CONNECTOR_eDP);
- rc = dp_init_sub_modules(dp);
+ rc = msm_dp_init_sub_modules(dp);
if (rc) {
DRM_ERROR("init sub module failed\n");
return -EPROBE_DEFER;
@@ -1301,28 +1303,28 @@ static int dp_display_probe(struct platform_device *pdev)
spin_lock_init(&dp->event_lock);
/* Store DP audio handle inside DP display */
- dp->dp_display.dp_audio = dp->audio;
+ dp->msm_dp_display.msm_dp_audio = dp->audio;
init_completion(&dp->audio_comp);
- platform_set_drvdata(pdev, &dp->dp_display);
+ platform_set_drvdata(pdev, &dp->msm_dp_display);
rc = devm_pm_runtime_enable(&pdev->dev);
if (rc)
goto err;
- rc = dp_display_request_irq(dp);
+ rc = msm_dp_display_request_irq(dp);
if (rc)
goto err;
- if (dp->dp_display.is_edp) {
- rc = devm_of_dp_aux_populate_bus(dp->aux, dp_auxbus_done_probe);
+ if (dp->msm_dp_display.is_edp) {
+ rc = devm_of_dp_aux_populate_bus(dp->aux, msm_dp_auxbus_done_probe);
if (rc) {
DRM_ERROR("eDP auxbus population failed, rc=%d\n", rc);
goto err;
}
} else {
- rc = dp_display_probe_tail(&pdev->dev);
+ rc = msm_dp_display_probe_tail(&pdev->dev);
if (rc)
goto err;
}
@@ -1330,70 +1332,70 @@ static int dp_display_probe(struct platform_device *pdev)
return rc;
err:
- dp_display_deinit_sub_modules(dp);
+ msm_dp_display_deinit_sub_modules(dp);
return rc;
}
-static void dp_display_remove(struct platform_device *pdev)
+static void msm_dp_display_remove(struct platform_device *pdev)
{
- struct dp_display_private *dp = dev_get_dp_display_private(&pdev->dev);
+ struct msm_dp_display_private *dp = dev_get_dp_display_private(&pdev->dev);
- component_del(&pdev->dev, &dp_display_comp_ops);
- dp_display_deinit_sub_modules(dp);
+ component_del(&pdev->dev, &msm_dp_display_comp_ops);
+ msm_dp_display_deinit_sub_modules(dp);
platform_set_drvdata(pdev, NULL);
}
-static int dp_pm_runtime_suspend(struct device *dev)
+static int msm_dp_pm_runtime_suspend(struct device *dev)
{
- struct dp_display_private *dp = dev_get_dp_display_private(dev);
+ struct msm_dp_display_private *dp = dev_get_dp_display_private(dev);
disable_irq(dp->irq);
- if (dp->dp_display.is_edp) {
- dp_display_host_phy_exit(dp);
- dp_catalog_ctrl_hpd_disable(dp->catalog);
+ if (dp->msm_dp_display.is_edp) {
+ msm_dp_display_host_phy_exit(dp);
+ msm_dp_catalog_ctrl_hpd_disable(dp->catalog);
}
- dp_display_host_deinit(dp);
+ msm_dp_display_host_deinit(dp);
return 0;
}
-static int dp_pm_runtime_resume(struct device *dev)
+static int msm_dp_pm_runtime_resume(struct device *dev)
{
- struct dp_display_private *dp = dev_get_dp_display_private(dev);
+ struct msm_dp_display_private *dp = dev_get_dp_display_private(dev);
/*
* for eDP, host cotroller, HPD block and PHY are enabled here
* but with HPD irq disabled
*
* for DP, only host controller is enabled here.
- * HPD block is enabled at dp_bridge_hpd_enable()
+ * HPD block is enabled at msm_dp_bridge_hpd_enable()
* PHY will be enabled at plugin handler later
*/
- dp_display_host_init(dp);
- if (dp->dp_display.is_edp) {
- dp_catalog_ctrl_hpd_enable(dp->catalog);
- dp_display_host_phy_init(dp);
+ msm_dp_display_host_init(dp);
+ if (dp->msm_dp_display.is_edp) {
+ msm_dp_catalog_ctrl_hpd_enable(dp->catalog);
+ msm_dp_display_host_phy_init(dp);
}
enable_irq(dp->irq);
return 0;
}
-static const struct dev_pm_ops dp_pm_ops = {
- SET_RUNTIME_PM_OPS(dp_pm_runtime_suspend, dp_pm_runtime_resume, NULL)
+static const struct dev_pm_ops msm_dp_pm_ops = {
+ SET_RUNTIME_PM_OPS(msm_dp_pm_runtime_suspend, msm_dp_pm_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
-static struct platform_driver dp_display_driver = {
- .probe = dp_display_probe,
- .remove_new = dp_display_remove,
+static struct platform_driver msm_dp_display_driver = {
+ .probe = msm_dp_display_probe,
+ .remove = msm_dp_display_remove,
.driver = {
.name = "msm-dp-display",
- .of_match_table = dp_dt_match,
+ .of_match_table = msm_dp_dt_match,
.suppress_bind_attrs = true,
- .pm = &dp_pm_ops,
+ .pm = &msm_dp_pm_ops,
},
};
@@ -1401,7 +1403,7 @@ int __init msm_dp_register(void)
{
int ret;
- ret = platform_driver_register(&dp_display_driver);
+ ret = platform_driver_register(&msm_dp_display_driver);
if (ret)
DRM_ERROR("Dp display driver register failed");
@@ -1410,294 +1412,294 @@ int __init msm_dp_register(void)
void __exit msm_dp_unregister(void)
{
- platform_driver_unregister(&dp_display_driver);
+ platform_driver_unregister(&msm_dp_display_driver);
}
-bool msm_dp_is_yuv_420_enabled(const struct msm_dp *dp_display,
+bool msm_dp_is_yuv_420_enabled(const struct msm_dp *msm_dp_display,
const struct drm_display_mode *mode)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
const struct drm_display_info *info;
- dp = container_of(dp_display, struct dp_display_private, dp_display);
- info = &dp_display->connector->display_info;
+ dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
+ info = &msm_dp_display->connector->display_info;
return dp->panel->vsc_sdp_supported && drm_mode_is_420_only(info, mode);
}
-bool msm_dp_needs_periph_flush(const struct msm_dp *dp_display,
+bool msm_dp_needs_periph_flush(const struct msm_dp *msm_dp_display,
const struct drm_display_mode *mode)
{
- return msm_dp_is_yuv_420_enabled(dp_display, mode);
+ return msm_dp_is_yuv_420_enabled(msm_dp_display, mode);
}
-bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
+bool msm_dp_wide_bus_available(const struct msm_dp *msm_dp_display)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
- dp = container_of(dp_display, struct dp_display_private, dp_display);
+ dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
- if (dp->dp_mode.out_fmt_is_yuv_420)
+ if (dp->msm_dp_mode.out_fmt_is_yuv_420)
return false;
return dp->wide_bus_supported;
}
-void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *root, bool is_edp)
+void msm_dp_display_debugfs_init(struct msm_dp *msm_dp_display, struct dentry *root, bool is_edp)
{
- struct dp_display_private *dp;
+ struct msm_dp_display_private *dp;
struct device *dev;
int rc;
- dp = container_of(dp_display, struct dp_display_private, dp_display);
- dev = &dp->dp_display.pdev->dev;
+ dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
+ dev = &dp->msm_dp_display.pdev->dev;
- rc = dp_debug_init(dev, dp->panel, dp->link, dp->dp_display.connector, root, is_edp);
+ rc = msm_dp_debug_init(dev, dp->panel, dp->link, dp->msm_dp_display.connector, root, is_edp);
if (rc)
DRM_ERROR("failed to initialize debug, rc = %d\n", rc);
}
-int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
+int msm_dp_modeset_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
struct drm_encoder *encoder, bool yuv_supported)
{
- struct dp_display_private *dp_priv;
+ struct msm_dp_display_private *msm_dp_priv;
int ret;
- dp_display->drm_dev = dev;
+ msm_dp_display->drm_dev = dev;
- dp_priv = container_of(dp_display, struct dp_display_private, dp_display);
+ msm_dp_priv = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
- ret = dp_bridge_init(dp_display, dev, encoder);
+ ret = msm_dp_bridge_init(msm_dp_display, dev, encoder, yuv_supported);
if (ret) {
DRM_DEV_ERROR(dev->dev,
"failed to create dp bridge: %d\n", ret);
return ret;
}
- dp_display->connector = dp_drm_connector_init(dp_display, encoder, yuv_supported);
- if (IS_ERR(dp_display->connector)) {
- ret = PTR_ERR(dp_display->connector);
+ msm_dp_display->connector = msm_dp_drm_connector_init(msm_dp_display, encoder);
+ if (IS_ERR(msm_dp_display->connector)) {
+ ret = PTR_ERR(msm_dp_display->connector);
DRM_DEV_ERROR(dev->dev,
"failed to create dp connector: %d\n", ret);
- dp_display->connector = NULL;
+ msm_dp_display->connector = NULL;
return ret;
}
- dp_priv->panel->connector = dp_display->connector;
+ msm_dp_priv->panel->connector = msm_dp_display->connector;
return 0;
}
-void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
+void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state)
{
- struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
- struct msm_dp *dp = dp_bridge->dp_display;
+ struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
int rc = 0;
- struct dp_display_private *dp_display;
+ struct msm_dp_display_private *msm_dp_display;
u32 state;
bool force_link_train = false;
- dp_display = container_of(dp, struct dp_display_private, dp_display);
- if (!dp_display->dp_mode.drm_mode.clock) {
+ msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
+ if (!msm_dp_display->msm_dp_mode.drm_mode.clock) {
DRM_ERROR("invalid params\n");
return;
}
if (dp->is_edp)
- dp_hpd_plug_handle(dp_display, 0);
+ msm_dp_hpd_plug_handle(msm_dp_display, 0);
- mutex_lock(&dp_display->event_mutex);
+ mutex_lock(&msm_dp_display->event_mutex);
if (pm_runtime_resume_and_get(&dp->pdev->dev)) {
DRM_ERROR("failed to pm_runtime_resume\n");
- mutex_unlock(&dp_display->event_mutex);
+ mutex_unlock(&msm_dp_display->event_mutex);
return;
}
- state = dp_display->hpd_state;
+ state = msm_dp_display->hpd_state;
if (state != ST_DISPLAY_OFF && state != ST_MAINLINK_READY) {
- mutex_unlock(&dp_display->event_mutex);
+ mutex_unlock(&msm_dp_display->event_mutex);
return;
}
- rc = dp_display_set_mode(dp, &dp_display->dp_mode);
+ rc = msm_dp_display_set_mode(dp, &msm_dp_display->msm_dp_mode);
if (rc) {
DRM_ERROR("Failed to perform a mode set, rc=%d\n", rc);
- mutex_unlock(&dp_display->event_mutex);
+ mutex_unlock(&msm_dp_display->event_mutex);
return;
}
- state = dp_display->hpd_state;
+ state = msm_dp_display->hpd_state;
if (state == ST_DISPLAY_OFF) {
- dp_display_host_phy_init(dp_display);
+ msm_dp_display_host_phy_init(msm_dp_display);
force_link_train = true;
}
- dp_display_enable(dp_display, force_link_train);
+ msm_dp_display_enable(msm_dp_display, force_link_train);
- rc = dp_display_post_enable(dp);
+ rc = msm_dp_display_post_enable(dp);
if (rc) {
DRM_ERROR("DP display post enable failed, rc=%d\n", rc);
- dp_display_disable(dp_display);
+ msm_dp_display_disable(msm_dp_display);
}
/* completed connection */
- dp_display->hpd_state = ST_CONNECTED;
+ msm_dp_display->hpd_state = ST_CONNECTED;
drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type);
- mutex_unlock(&dp_display->event_mutex);
+ mutex_unlock(&msm_dp_display->event_mutex);
}
-void dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
+void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state)
{
- struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
- struct msm_dp *dp = dp_bridge->dp_display;
- struct dp_display_private *dp_display;
+ struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
+ struct msm_dp_display_private *msm_dp_display;
- dp_display = container_of(dp, struct dp_display_private, dp_display);
+ msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
- dp_ctrl_push_idle(dp_display->ctrl);
+ msm_dp_ctrl_push_idle(msm_dp_display->ctrl);
}
-void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
+void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state)
{
- struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
- struct msm_dp *dp = dp_bridge->dp_display;
+ struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
u32 state;
- struct dp_display_private *dp_display;
+ struct msm_dp_display_private *msm_dp_display;
- dp_display = container_of(dp, struct dp_display_private, dp_display);
+ msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
if (dp->is_edp)
- dp_hpd_unplug_handle(dp_display, 0);
+ msm_dp_hpd_unplug_handle(msm_dp_display, 0);
- mutex_lock(&dp_display->event_mutex);
+ mutex_lock(&msm_dp_display->event_mutex);
- state = dp_display->hpd_state;
+ state = msm_dp_display->hpd_state;
if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED)
drm_dbg_dp(dp->drm_dev, "type=%d wrong hpd_state=%d\n",
dp->connector_type, state);
- dp_display_disable(dp_display);
+ msm_dp_display_disable(msm_dp_display);
- state = dp_display->hpd_state;
+ state = msm_dp_display->hpd_state;
if (state == ST_DISCONNECT_PENDING) {
/* completed disconnection */
- dp_display->hpd_state = ST_DISCONNECTED;
+ msm_dp_display->hpd_state = ST_DISCONNECTED;
} else {
- dp_display->hpd_state = ST_DISPLAY_OFF;
+ msm_dp_display->hpd_state = ST_DISPLAY_OFF;
}
drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type);
pm_runtime_put_sync(&dp->pdev->dev);
- mutex_unlock(&dp_display->event_mutex);
+ mutex_unlock(&msm_dp_display->event_mutex);
}
-void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
+void msm_dp_bridge_mode_set(struct drm_bridge *drm_bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
- struct msm_dp *dp = dp_bridge->dp_display;
- struct dp_display_private *dp_display;
- struct dp_panel *dp_panel;
+ struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
+ struct msm_dp_display_private *msm_dp_display;
+ struct msm_dp_panel *msm_dp_panel;
- dp_display = container_of(dp, struct dp_display_private, dp_display);
- dp_panel = dp_display->panel;
+ msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
+ msm_dp_panel = msm_dp_display->panel;
- memset(&dp_display->dp_mode, 0x0, sizeof(struct dp_display_mode));
+ memset(&msm_dp_display->msm_dp_mode, 0x0, sizeof(struct msm_dp_display_mode));
- if (dp_display_check_video_test(dp))
- dp_display->dp_mode.bpp = dp_display_get_test_bpp(dp);
+ if (msm_dp_display_check_video_test(dp))
+ msm_dp_display->msm_dp_mode.bpp = msm_dp_display_get_test_bpp(dp);
else /* Default num_components per pixel = 3 */
- dp_display->dp_mode.bpp = dp->connector->display_info.bpc * 3;
+ msm_dp_display->msm_dp_mode.bpp = dp->connector->display_info.bpc * 3;
- if (!dp_display->dp_mode.bpp)
- dp_display->dp_mode.bpp = 24; /* Default bpp */
+ if (!msm_dp_display->msm_dp_mode.bpp)
+ msm_dp_display->msm_dp_mode.bpp = 24; /* Default bpp */
- drm_mode_copy(&dp_display->dp_mode.drm_mode, adjusted_mode);
+ drm_mode_copy(&msm_dp_display->msm_dp_mode.drm_mode, adjusted_mode);
- dp_display->dp_mode.v_active_low =
- !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NVSYNC);
+ msm_dp_display->msm_dp_mode.v_active_low =
+ !!(msm_dp_display->msm_dp_mode.drm_mode.flags & DRM_MODE_FLAG_NVSYNC);
- dp_display->dp_mode.h_active_low =
- !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC);
+ msm_dp_display->msm_dp_mode.h_active_low =
+ !!(msm_dp_display->msm_dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC);
- dp_display->dp_mode.out_fmt_is_yuv_420 =
+ msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 =
drm_mode_is_420_only(&dp->connector->display_info, adjusted_mode) &&
- dp_panel->vsc_sdp_supported;
+ msm_dp_panel->vsc_sdp_supported;
/* populate wide_bus_support to different layers */
- dp_display->ctrl->wide_bus_en =
- dp_display->dp_mode.out_fmt_is_yuv_420 ? false : dp_display->wide_bus_supported;
- dp_display->catalog->wide_bus_en =
- dp_display->dp_mode.out_fmt_is_yuv_420 ? false : dp_display->wide_bus_supported;
+ msm_dp_display->ctrl->wide_bus_en =
+ msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 ? false : msm_dp_display->wide_bus_supported;
+ msm_dp_display->catalog->wide_bus_en =
+ msm_dp_display->msm_dp_mode.out_fmt_is_yuv_420 ? false : msm_dp_display->wide_bus_supported;
}
-void dp_bridge_hpd_enable(struct drm_bridge *bridge)
+void msm_dp_bridge_hpd_enable(struct drm_bridge *bridge)
{
- struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge);
- struct msm_dp *dp_display = dp_bridge->dp_display;
- struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
+ struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(bridge);
+ struct msm_dp *msm_dp_display = msm_dp_bridge->msm_dp_display;
+ struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
/*
* this is for external DP with hpd irq enabled case,
- * step-1: dp_pm_runtime_resume() enable dp host only
+ * step-1: msm_dp_pm_runtime_resume() enable dp host only
* step-2: enable hdp block and have hpd irq enabled here
* step-3: waiting for plugin irq while phy is not initialized
* step-4: DP PHY is initialized at plugin handler before link training
*
*/
mutex_lock(&dp->event_mutex);
- if (pm_runtime_resume_and_get(&dp_display->pdev->dev)) {
+ if (pm_runtime_resume_and_get(&msm_dp_display->pdev->dev)) {
DRM_ERROR("failed to resume power\n");
mutex_unlock(&dp->event_mutex);
return;
}
- dp_catalog_ctrl_hpd_enable(dp->catalog);
+ msm_dp_catalog_ctrl_hpd_enable(dp->catalog);
/* enable HDP interrupts */
- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, true);
+ msm_dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, true);
- dp_display->internal_hpd = true;
+ msm_dp_display->internal_hpd = true;
mutex_unlock(&dp->event_mutex);
}
-void dp_bridge_hpd_disable(struct drm_bridge *bridge)
+void msm_dp_bridge_hpd_disable(struct drm_bridge *bridge)
{
- struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge);
- struct msm_dp *dp_display = dp_bridge->dp_display;
- struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
+ struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(bridge);
+ struct msm_dp *msm_dp_display = msm_dp_bridge->msm_dp_display;
+ struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
mutex_lock(&dp->event_mutex);
/* disable HDP interrupts */
- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
- dp_catalog_ctrl_hpd_disable(dp->catalog);
+ msm_dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
+ msm_dp_catalog_ctrl_hpd_disable(dp->catalog);
- dp_display->internal_hpd = false;
+ msm_dp_display->internal_hpd = false;
- pm_runtime_put_sync(&dp_display->pdev->dev);
+ pm_runtime_put_sync(&msm_dp_display->pdev->dev);
mutex_unlock(&dp->event_mutex);
}
-void dp_bridge_hpd_notify(struct drm_bridge *bridge,
+void msm_dp_bridge_hpd_notify(struct drm_bridge *bridge,
enum drm_connector_status status)
{
- struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge);
- struct msm_dp *dp_display = dp_bridge->dp_display;
- struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
+ struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(bridge);
+ struct msm_dp *msm_dp_display = msm_dp_bridge->msm_dp_display;
+ struct msm_dp_display_private *dp = container_of(msm_dp_display, struct msm_dp_display_private, msm_dp_display);
/* Without next_bridge interrupts are handled by the DP core directly */
- if (dp_display->internal_hpd)
+ if (msm_dp_display->internal_hpd)
return;
- if (!dp_display->link_ready && status == connector_status_connected)
- dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
- else if (dp_display->link_ready && status == connector_status_disconnected)
- dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
+ if (!msm_dp_display->link_ready && status == connector_status_connected)
+ msm_dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
+ else if (msm_dp_display->link_ready && status == connector_status_disconnected)
+ msm_dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index ec7fa67e0569..ecbc2d92f546 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -27,18 +27,18 @@ struct msm_dp {
hdmi_codec_plugged_cb plugged_cb;
- struct dp_audio *dp_audio;
+ struct msm_dp_audio *msm_dp_audio;
bool psr_supported;
};
-int dp_display_set_plugged_cb(struct msm_dp *dp_display,
+int msm_dp_display_set_plugged_cb(struct msm_dp *msm_dp_display,
hdmi_codec_plugged_cb fn, struct device *codec_dev);
-int dp_display_get_modes(struct msm_dp *dp_display);
-bool dp_display_check_video_test(struct msm_dp *dp_display);
-int dp_display_get_test_bpp(struct msm_dp *dp_display);
-void dp_display_signal_audio_start(struct msm_dp *dp_display);
-void dp_display_signal_audio_complete(struct msm_dp *dp_display);
-void dp_display_set_psr(struct msm_dp *dp, bool enter);
-void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *dentry, bool is_edp);
+int msm_dp_display_get_modes(struct msm_dp *msm_dp_display);
+bool msm_dp_display_check_video_test(struct msm_dp *msm_dp_display);
+int msm_dp_display_get_test_bpp(struct msm_dp *msm_dp_display);
+void msm_dp_display_signal_audio_start(struct msm_dp *msm_dp_display);
+void msm_dp_display_signal_audio_complete(struct msm_dp *msm_dp_display);
+void msm_dp_display_set_psr(struct msm_dp *dp, bool enter);
+void msm_dp_display_debugfs_init(struct msm_dp *msm_dp_display, struct dentry *dentry, bool is_edp);
#endif /* _DP_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 1b9be5bd97f1..d3e241ea6941 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -14,15 +14,15 @@
#include "dp_drm.h"
/**
- * dp_bridge_detect - callback to determine if connector is connected
+ * msm_dp_bridge_detect - callback to determine if connector is connected
* @bridge: Pointer to drm bridge structure
* Returns: Bridge's 'is connected' status
*/
-static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status msm_dp_bridge_detect(struct drm_bridge *bridge)
{
struct msm_dp *dp;
- dp = to_dp_bridge(bridge)->dp_display;
+ dp = to_dp_bridge(bridge)->msm_dp_display;
drm_dbg_dp(dp->drm_dev, "link_ready = %s\n",
(dp->link_ready) ? "true" : "false");
@@ -31,14 +31,14 @@ static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge)
connector_status_disconnected;
}
-static int dp_bridge_atomic_check(struct drm_bridge *bridge,
+static int msm_dp_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct msm_dp *dp;
- dp = to_dp_bridge(bridge)->dp_display;
+ dp = to_dp_bridge(bridge)->msm_dp_display;
drm_dbg_dp(dp->drm_dev, "link_ready = %s\n",
(dp->link_ready) ? "true" : "false");
@@ -62,12 +62,12 @@ static int dp_bridge_atomic_check(struct drm_bridge *bridge,
/**
- * dp_bridge_get_modes - callback to add drm modes via drm_mode_probed_add()
+ * msm_dp_bridge_get_modes - callback to add drm modes via drm_mode_probed_add()
* @bridge: Poiner to drm bridge
* @connector: Pointer to drm connector structure
* Returns: Number of modes added
*/
-static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector)
+static int msm_dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector)
{
int rc = 0;
struct msm_dp *dp;
@@ -75,11 +75,11 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *
if (!connector)
return 0;
- dp = to_dp_bridge(bridge)->dp_display;
+ dp = to_dp_bridge(bridge)->msm_dp_display;
/* pluggable case assumes EDID is read when HPD */
if (dp->link_ready) {
- rc = dp_display_get_modes(dp);
+ rc = msm_dp_display_get_modes(dp);
if (rc <= 0) {
DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc);
return rc;
@@ -90,37 +90,37 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *
return rc;
}
-static void dp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+static void msm_dp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
{
- struct msm_dp *dp = to_dp_bridge(bridge)->dp_display;
+ struct msm_dp *dp = to_dp_bridge(bridge)->msm_dp_display;
- dp_display_debugfs_init(dp, root, false);
+ msm_dp_display_debugfs_init(dp, root, false);
}
-static const struct drm_bridge_funcs dp_bridge_ops = {
+static const struct drm_bridge_funcs msm_dp_bridge_ops = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
- .atomic_enable = dp_bridge_atomic_enable,
- .atomic_disable = dp_bridge_atomic_disable,
- .atomic_post_disable = dp_bridge_atomic_post_disable,
- .mode_set = dp_bridge_mode_set,
- .mode_valid = dp_bridge_mode_valid,
- .get_modes = dp_bridge_get_modes,
- .detect = dp_bridge_detect,
- .atomic_check = dp_bridge_atomic_check,
- .hpd_enable = dp_bridge_hpd_enable,
- .hpd_disable = dp_bridge_hpd_disable,
- .hpd_notify = dp_bridge_hpd_notify,
- .debugfs_init = dp_bridge_debugfs_init,
+ .atomic_enable = msm_dp_bridge_atomic_enable,
+ .atomic_disable = msm_dp_bridge_atomic_disable,
+ .atomic_post_disable = msm_dp_bridge_atomic_post_disable,
+ .mode_set = msm_dp_bridge_mode_set,
+ .mode_valid = msm_dp_bridge_mode_valid,
+ .get_modes = msm_dp_bridge_get_modes,
+ .detect = msm_dp_bridge_detect,
+ .atomic_check = msm_dp_bridge_atomic_check,
+ .hpd_enable = msm_dp_bridge_hpd_enable,
+ .hpd_disable = msm_dp_bridge_hpd_disable,
+ .hpd_notify = msm_dp_bridge_hpd_notify,
+ .debugfs_init = msm_dp_bridge_debugfs_init,
};
-static int edp_bridge_atomic_check(struct drm_bridge *drm_bridge,
+static int msm_edp_bridge_atomic_check(struct drm_bridge *drm_bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct msm_dp *dp = to_dp_bridge(drm_bridge)->dp_display;
+ struct msm_dp *dp = to_dp_bridge(drm_bridge)->msm_dp_display;
if (WARN_ON(!conn_state))
return -ENODEV;
@@ -136,18 +136,18 @@ static int edp_bridge_atomic_check(struct drm_bridge *drm_bridge,
return 0;
}
-static void edp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
+static void msm_edp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state)
{
struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
- struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
- struct msm_dp *dp = dp_bridge->dp_display;
+ struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
/*
* Check the old state of the crtc to determine if the panel
- * was put into psr state previously by the edp_bridge_atomic_disable.
+ * was put into psr state previously by the msm_edp_bridge_atomic_disable.
* If the panel is in psr, just exit psr state and skip the full
* bridge enable sequence.
*/
@@ -159,21 +159,21 @@ static void edp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc);
if (old_crtc_state && old_crtc_state->self_refresh_active) {
- dp_display_set_psr(dp, false);
+ msm_dp_display_set_psr(dp, false);
return;
}
- dp_bridge_atomic_enable(drm_bridge, old_bridge_state);
+ msm_dp_bridge_atomic_enable(drm_bridge, old_bridge_state);
}
-static void edp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
+static void msm_edp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state)
{
struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state = NULL, *old_crtc_state = NULL;
- struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
- struct msm_dp *dp = dp_bridge->dp_display;
+ struct msm_dp_bridge *msm_dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = msm_dp_bridge->msm_dp_display;
crtc = drm_atomic_get_old_crtc_for_encoder(atomic_state,
drm_bridge->encoder);
@@ -194,24 +194,24 @@ static void edp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
* If old crtc state is active, then this is a display disable
* call while the sink is in psr state. So, exit psr here.
* The eDP controller will be disabled in the
- * edp_bridge_atomic_post_disable function.
+ * msm_edp_bridge_atomic_post_disable function.
*
* We observed sink is stuck in self refresh if psr exit is skipped
* when display disable occurs while the sink is in psr state.
*/
if (new_crtc_state->self_refresh_active) {
- dp_display_set_psr(dp, true);
+ msm_dp_display_set_psr(dp, true);
return;
} else if (old_crtc_state->self_refresh_active) {
- dp_display_set_psr(dp, false);
+ msm_dp_display_set_psr(dp, false);
return;
}
out:
- dp_bridge_atomic_disable(drm_bridge, old_bridge_state);
+ msm_dp_bridge_atomic_disable(drm_bridge, old_bridge_state);
}
-static void edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
+static void msm_edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state)
{
struct drm_atomic_state *atomic_state = old_bridge_state->base.state;
@@ -228,29 +228,29 @@ static void edp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
return;
/*
- * Self refresh mode is already set in edp_bridge_atomic_disable.
+ * Self refresh mode is already set in msm_edp_bridge_atomic_disable.
*/
if (new_crtc_state->self_refresh_active)
return;
- dp_bridge_atomic_post_disable(drm_bridge, old_bridge_state);
+ msm_dp_bridge_atomic_post_disable(drm_bridge, old_bridge_state);
}
/**
- * edp_bridge_mode_valid - callback to determine if specified mode is valid
+ * msm_edp_bridge_mode_valid - callback to determine if specified mode is valid
* @bridge: Pointer to drm bridge structure
* @info: display info
* @mode: Pointer to drm mode structure
* Returns: Validity status for specified mode
*/
-static enum drm_mode_status edp_bridge_mode_valid(struct drm_bridge *bridge,
+static enum drm_mode_status msm_edp_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
struct msm_dp *dp;
int mode_pclk_khz = mode->clock;
- dp = to_dp_bridge(bridge)->dp_display;
+ dp = to_dp_bridge(bridge)->msm_dp_display;
if (!dp || !mode_pclk_khz || !dp->connector) {
DRM_ERROR("invalid params\n");
@@ -268,42 +268,43 @@ static enum drm_mode_status edp_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static void edp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+static void msm_edp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
{
- struct msm_dp *dp = to_dp_bridge(bridge)->dp_display;
+ struct msm_dp *dp = to_dp_bridge(bridge)->msm_dp_display;
- dp_display_debugfs_init(dp, root, true);
+ msm_dp_display_debugfs_init(dp, root, true);
}
-static const struct drm_bridge_funcs edp_bridge_ops = {
- .atomic_enable = edp_bridge_atomic_enable,
- .atomic_disable = edp_bridge_atomic_disable,
- .atomic_post_disable = edp_bridge_atomic_post_disable,
- .mode_set = dp_bridge_mode_set,
- .mode_valid = edp_bridge_mode_valid,
+static const struct drm_bridge_funcs msm_edp_bridge_ops = {
+ .atomic_enable = msm_edp_bridge_atomic_enable,
+ .atomic_disable = msm_edp_bridge_atomic_disable,
+ .atomic_post_disable = msm_edp_bridge_atomic_post_disable,
+ .mode_set = msm_dp_bridge_mode_set,
+ .mode_valid = msm_edp_bridge_mode_valid,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
- .atomic_check = edp_bridge_atomic_check,
- .debugfs_init = edp_bridge_debugfs_init,
+ .atomic_check = msm_edp_bridge_atomic_check,
+ .debugfs_init = msm_edp_bridge_debugfs_init,
};
-int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
- struct drm_encoder *encoder)
+int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
+ struct drm_encoder *encoder, bool yuv_supported)
{
int rc;
- struct msm_dp_bridge *dp_bridge;
+ struct msm_dp_bridge *msm_dp_bridge;
struct drm_bridge *bridge;
- dp_bridge = devm_kzalloc(dev->dev, sizeof(*dp_bridge), GFP_KERNEL);
- if (!dp_bridge)
+ msm_dp_bridge = devm_kzalloc(dev->dev, sizeof(*msm_dp_bridge), GFP_KERNEL);
+ if (!msm_dp_bridge)
return -ENOMEM;
- dp_bridge->dp_display = dp_display;
+ msm_dp_bridge->msm_dp_display = msm_dp_display;
- bridge = &dp_bridge->bridge;
- bridge->funcs = dp_display->is_edp ? &edp_bridge_ops : &dp_bridge_ops;
- bridge->type = dp_display->connector_type;
+ bridge = &msm_dp_bridge->bridge;
+ bridge->funcs = msm_dp_display->is_edp ? &msm_edp_bridge_ops : &msm_dp_bridge_ops;
+ bridge->type = msm_dp_display->connector_type;
+ bridge->ycbcr_420_allowed = yuv_supported;
/*
* Many ops only make sense for DP. Why?
@@ -316,7 +317,7 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
* allows the panel driver to properly power itself on to read the
* modes.
*/
- if (!dp_display->is_edp) {
+ if (!msm_dp_display->is_edp) {
bridge->ops =
DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_HPD |
@@ -337,9 +338,9 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
return rc;
}
- if (dp_display->next_bridge) {
+ if (msm_dp_display->next_bridge) {
rc = drm_bridge_attach(encoder,
- dp_display->next_bridge, bridge,
+ msm_dp_display->next_bridge, bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (rc < 0) {
DRM_ERROR("failed to attach panel bridge: %d\n", rc);
@@ -351,21 +352,18 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
}
/* connector initialization */
-struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder,
- bool yuv_supported)
+struct drm_connector *msm_dp_drm_connector_init(struct msm_dp *msm_dp_display,
+ struct drm_encoder *encoder)
{
struct drm_connector *connector = NULL;
- connector = drm_bridge_connector_init(dp_display->drm_dev, encoder);
+ connector = drm_bridge_connector_init(msm_dp_display->drm_dev, encoder);
if (IS_ERR(connector))
return connector;
- if (!dp_display->is_edp)
+ if (!msm_dp_display->is_edp)
drm_connector_attach_dp_subconnector_property(connector);
- if (yuv_supported)
- connector->ycbcr_420_allowed = true;
-
drm_connector_attach_encoder(connector, encoder);
return connector;
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
index 45e57ac25a4d..8eae2f74839f 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -14,31 +14,32 @@
struct msm_dp_bridge {
struct drm_bridge bridge;
- struct msm_dp *dp_display;
+ struct msm_dp *msm_dp_display;
};
#define to_dp_bridge(x) container_of((x), struct msm_dp_bridge, bridge)
-struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder,
- bool yuv_supported);
-int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
- struct drm_encoder *encoder);
+struct drm_connector *msm_dp_drm_connector_init(struct msm_dp *msm_dp_display,
+ struct drm_encoder *encoder);
+int msm_dp_bridge_init(struct msm_dp *msm_dp_display, struct drm_device *dev,
+ struct drm_encoder *encoder,
+ bool yuv_supported);
-void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
+void msm_dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state);
-void dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
+void msm_dp_bridge_atomic_disable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state);
-void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
+void msm_dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state);
-enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge,
+enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode);
-void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
+void msm_dp_bridge_mode_set(struct drm_bridge *drm_bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode);
-void dp_bridge_hpd_enable(struct drm_bridge *bridge);
-void dp_bridge_hpd_disable(struct drm_bridge *bridge);
-void dp_bridge_hpd_notify(struct drm_bridge *bridge,
+void msm_dp_bridge_hpd_enable(struct drm_bridge *bridge);
+void msm_dp_bridge_hpd_disable(struct drm_bridge *bridge);
+void msm_dp_bridge_hpd_notify(struct drm_bridge *bridge,
enum drm_connector_status status);
#endif /* _DP_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index d8967615d84d..1a1fbb2d7d4f 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -28,25 +28,25 @@ enum audio_pattern_type {
AUDIO_TEST_PATTERN_SAWTOOTH = 0x01,
};
-struct dp_link_request {
+struct msm_dp_link_request {
u32 test_requested;
u32 test_link_rate;
u32 test_lane_count;
};
-struct dp_link_private {
+struct msm_dp_link_private {
u32 prev_sink_count;
struct drm_device *drm_dev;
struct drm_dp_aux *aux;
- struct dp_link dp_link;
+ struct msm_dp_link msm_dp_link;
- struct dp_link_request request;
+ struct msm_dp_link_request request;
struct mutex psm_mutex;
u8 link_status[DP_LINK_STATUS_SIZE];
};
-static int dp_aux_link_power_up(struct drm_dp_aux *aux,
- struct dp_link_info *link)
+static int msm_dp_aux_link_power_up(struct drm_dp_aux *aux,
+ struct msm_dp_link_info *link)
{
u8 value;
ssize_t len;
@@ -73,8 +73,8 @@ static int dp_aux_link_power_up(struct drm_dp_aux *aux,
return 0;
}
-static int dp_aux_link_power_down(struct drm_dp_aux *aux,
- struct dp_link_info *link)
+static int msm_dp_aux_link_power_down(struct drm_dp_aux *aux,
+ struct msm_dp_link_info *link)
{
u8 value;
int err;
@@ -96,7 +96,7 @@ static int dp_aux_link_power_down(struct drm_dp_aux *aux,
return 0;
}
-static int dp_link_get_period(struct dp_link_private *link, int const addr)
+static int msm_dp_link_get_period(struct msm_dp_link_private *link, int const addr)
{
int ret = 0;
u8 data;
@@ -122,19 +122,19 @@ exit:
return ret;
}
-static int dp_link_parse_audio_channel_period(struct dp_link_private *link)
+static int msm_dp_link_parse_audio_channel_period(struct msm_dp_link_private *link)
{
int ret = 0;
- struct dp_link_test_audio *req = &link->dp_link.test_audio;
+ struct msm_dp_link_test_audio *req = &link->msm_dp_link.test_audio;
- ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1);
+ ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_1 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_1 = 0x%x\n", ret);
- ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2);
+ ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2);
if (ret == -EINVAL)
goto exit;
@@ -142,42 +142,42 @@ static int dp_link_parse_audio_channel_period(struct dp_link_private *link)
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_2 = 0x%x\n", ret);
/* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */
- ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3);
+ ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_3 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_3 = 0x%x\n", ret);
- ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4);
+ ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_4 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_4 = 0x%x\n", ret);
- ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5);
+ ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_5 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_5 = 0x%x\n", ret);
- ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6);
+ ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_6 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_6 = 0x%x\n", ret);
- ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7);
+ ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_7 = ret;
drm_dbg_dp(link->drm_dev, "test_audio_period_ch_7 = 0x%x\n", ret);
- ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8);
+ ret = msm_dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8);
if (ret == -EINVAL)
goto exit;
@@ -187,7 +187,7 @@ exit:
return ret;
}
-static int dp_link_parse_audio_pattern_type(struct dp_link_private *link)
+static int msm_dp_link_parse_audio_pattern_type(struct msm_dp_link_private *link)
{
int ret = 0;
u8 data;
@@ -208,13 +208,13 @@ static int dp_link_parse_audio_pattern_type(struct dp_link_private *link)
goto exit;
}
- link->dp_link.test_audio.test_audio_pattern_type = data;
+ link->msm_dp_link.test_audio.test_audio_pattern_type = data;
drm_dbg_dp(link->drm_dev, "audio pattern type = 0x%x\n", data);
exit:
return ret;
}
-static int dp_link_parse_audio_mode(struct dp_link_private *link)
+static int msm_dp_link_parse_audio_mode(struct msm_dp_link_private *link)
{
int ret = 0;
u8 data;
@@ -248,8 +248,8 @@ static int dp_link_parse_audio_mode(struct dp_link_private *link)
goto exit;
}
- link->dp_link.test_audio.test_audio_sampling_rate = sampling_rate;
- link->dp_link.test_audio.test_audio_channel_count = channel_count;
+ link->msm_dp_link.test_audio.test_audio_sampling_rate = sampling_rate;
+ link->msm_dp_link.test_audio.test_audio_channel_count = channel_count;
drm_dbg_dp(link->drm_dev,
"sampling_rate = 0x%x, channel_count = 0x%x\n",
sampling_rate, channel_count);
@@ -257,25 +257,25 @@ exit:
return ret;
}
-static int dp_link_parse_audio_pattern_params(struct dp_link_private *link)
+static int msm_dp_link_parse_audio_pattern_params(struct msm_dp_link_private *link)
{
int ret = 0;
- ret = dp_link_parse_audio_mode(link);
+ ret = msm_dp_link_parse_audio_mode(link);
if (ret)
goto exit;
- ret = dp_link_parse_audio_pattern_type(link);
+ ret = msm_dp_link_parse_audio_pattern_type(link);
if (ret)
goto exit;
- ret = dp_link_parse_audio_channel_period(link);
+ ret = msm_dp_link_parse_audio_channel_period(link);
exit:
return ret;
}
-static bool dp_link_is_video_pattern_valid(u32 pattern)
+static bool msm_dp_link_is_video_pattern_valid(u32 pattern)
{
switch (pattern) {
case DP_NO_TEST_PATTERN:
@@ -289,12 +289,12 @@ static bool dp_link_is_video_pattern_valid(u32 pattern)
}
/**
- * dp_link_is_bit_depth_valid() - validates the bit depth requested
+ * msm_dp_link_is_bit_depth_valid() - validates the bit depth requested
* @tbd: bit depth requested by the sink
*
* Returns true if the requested bit depth is supported.
*/
-static bool dp_link_is_bit_depth_valid(u32 tbd)
+static bool msm_dp_link_is_bit_depth_valid(u32 tbd)
{
/* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */
switch (tbd) {
@@ -307,7 +307,7 @@ static bool dp_link_is_bit_depth_valid(u32 tbd)
}
}
-static int dp_link_parse_timing_params1(struct dp_link_private *link,
+static int msm_dp_link_parse_timing_params1(struct msm_dp_link_private *link,
int addr, int len, u32 *val)
{
u8 bp[2];
@@ -328,7 +328,7 @@ static int dp_link_parse_timing_params1(struct dp_link_private *link,
return 0;
}
-static int dp_link_parse_timing_params2(struct dp_link_private *link,
+static int msm_dp_link_parse_timing_params2(struct msm_dp_link_private *link,
int addr, int len,
u32 *val1, u32 *val2)
{
@@ -351,7 +351,7 @@ static int dp_link_parse_timing_params2(struct dp_link_private *link,
return 0;
}
-static int dp_link_parse_timing_params3(struct dp_link_private *link,
+static int msm_dp_link_parse_timing_params3(struct msm_dp_link_private *link,
int addr, u32 *val)
{
u8 bp;
@@ -369,13 +369,13 @@ static int dp_link_parse_timing_params3(struct dp_link_private *link,
}
/**
- * dp_link_parse_video_pattern_params() - parses video pattern parameters from DPCD
+ * msm_dp_link_parse_video_pattern_params() - parses video pattern parameters from DPCD
* @link: Display Port Driver data
*
* Returns 0 if it successfully parses the video link pattern and the link
* bit depth requested by the sink and, and if the values parsed are valid.
*/
-static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
+static int msm_dp_link_parse_video_pattern_params(struct msm_dp_link_private *link)
{
int ret = 0;
ssize_t rlen;
@@ -388,13 +388,13 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
return rlen;
}
- if (!dp_link_is_video_pattern_valid(bp)) {
+ if (!msm_dp_link_is_video_pattern_valid(bp)) {
DRM_ERROR("invalid link video pattern = 0x%x\n", bp);
ret = -EINVAL;
return ret;
}
- link->dp_link.test_video.test_video_pattern = bp;
+ link->msm_dp_link.test_video.test_video_pattern = bp;
/* Read the requested color bit depth and dynamic range (Byte 0x232) */
rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_MISC0, &bp);
@@ -404,88 +404,88 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
}
/* Dynamic Range */
- link->dp_link.test_video.test_dyn_range =
+ link->msm_dp_link.test_video.test_dyn_range =
(bp & DP_TEST_DYNAMIC_RANGE_CEA);
/* Color bit depth */
bp &= DP_TEST_BIT_DEPTH_MASK;
- if (!dp_link_is_bit_depth_valid(bp)) {
+ if (!msm_dp_link_is_bit_depth_valid(bp)) {
DRM_ERROR("invalid link bit depth = 0x%x\n", bp);
ret = -EINVAL;
return ret;
}
- link->dp_link.test_video.test_bit_depth = bp;
+ link->msm_dp_link.test_video.test_bit_depth = bp;
/* resolution timing params */
- ret = dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2,
- &link->dp_link.test_video.test_h_total);
+ ret = msm_dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2,
+ &link->msm_dp_link.test_video.test_h_total);
if (ret) {
DRM_ERROR("failed to parse test_htotal(DP_TEST_H_TOTAL_HI)\n");
return ret;
}
- ret = dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2,
- &link->dp_link.test_video.test_v_total);
+ ret = msm_dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2,
+ &link->msm_dp_link.test_video.test_v_total);
if (ret) {
DRM_ERROR("failed to parse test_v_total(DP_TEST_V_TOTAL_HI)\n");
return ret;
}
- ret = dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2,
- &link->dp_link.test_video.test_h_start);
+ ret = msm_dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2,
+ &link->msm_dp_link.test_video.test_h_start);
if (ret) {
DRM_ERROR("failed to parse test_h_start(DP_TEST_H_START_HI)\n");
return ret;
}
- ret = dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2,
- &link->dp_link.test_video.test_v_start);
+ ret = msm_dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2,
+ &link->msm_dp_link.test_video.test_v_start);
if (ret) {
DRM_ERROR("failed to parse test_v_start(DP_TEST_V_START_HI)\n");
return ret;
}
- ret = dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2,
- &link->dp_link.test_video.test_hsync_pol,
- &link->dp_link.test_video.test_hsync_width);
+ ret = msm_dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2,
+ &link->msm_dp_link.test_video.test_hsync_pol,
+ &link->msm_dp_link.test_video.test_hsync_width);
if (ret) {
DRM_ERROR("failed to parse (DP_TEST_HSYNC_HI)\n");
return ret;
}
- ret = dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2,
- &link->dp_link.test_video.test_vsync_pol,
- &link->dp_link.test_video.test_vsync_width);
+ ret = msm_dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2,
+ &link->msm_dp_link.test_video.test_vsync_pol,
+ &link->msm_dp_link.test_video.test_vsync_width);
if (ret) {
DRM_ERROR("failed to parse (DP_TEST_VSYNC_HI)\n");
return ret;
}
- ret = dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2,
- &link->dp_link.test_video.test_h_width);
+ ret = msm_dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2,
+ &link->msm_dp_link.test_video.test_h_width);
if (ret) {
DRM_ERROR("failed to parse test_h_width(DP_TEST_H_WIDTH_HI)\n");
return ret;
}
- ret = dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2,
- &link->dp_link.test_video.test_v_height);
+ ret = msm_dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2,
+ &link->msm_dp_link.test_video.test_v_height);
if (ret) {
DRM_ERROR("failed to parse test_v_height\n");
return ret;
}
- ret = dp_link_parse_timing_params3(link, DP_TEST_MISC1,
- &link->dp_link.test_video.test_rr_d);
- link->dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR;
+ ret = msm_dp_link_parse_timing_params3(link, DP_TEST_MISC1,
+ &link->msm_dp_link.test_video.test_rr_d);
+ link->msm_dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR;
if (ret) {
DRM_ERROR("failed to parse test_rr_d (DP_TEST_MISC1)\n");
return ret;
}
- ret = dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR,
- &link->dp_link.test_video.test_rr_n);
+ ret = msm_dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR,
+ &link->msm_dp_link.test_video.test_rr_n);
if (ret) {
DRM_ERROR("failed to parse test_rr_n\n");
return ret;
@@ -505,34 +505,34 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
"TEST_V_HEIGHT = %d\n"
"TEST_REFRESH_DENOMINATOR = %d\n"
"TEST_REFRESH_NUMERATOR = %d\n",
- link->dp_link.test_video.test_video_pattern,
- link->dp_link.test_video.test_dyn_range,
- link->dp_link.test_video.test_bit_depth,
- link->dp_link.test_video.test_h_total,
- link->dp_link.test_video.test_v_total,
- link->dp_link.test_video.test_h_start,
- link->dp_link.test_video.test_v_start,
- link->dp_link.test_video.test_hsync_pol,
- link->dp_link.test_video.test_hsync_width,
- link->dp_link.test_video.test_vsync_pol,
- link->dp_link.test_video.test_vsync_width,
- link->dp_link.test_video.test_h_width,
- link->dp_link.test_video.test_v_height,
- link->dp_link.test_video.test_rr_d,
- link->dp_link.test_video.test_rr_n);
+ link->msm_dp_link.test_video.test_video_pattern,
+ link->msm_dp_link.test_video.test_dyn_range,
+ link->msm_dp_link.test_video.test_bit_depth,
+ link->msm_dp_link.test_video.test_h_total,
+ link->msm_dp_link.test_video.test_v_total,
+ link->msm_dp_link.test_video.test_h_start,
+ link->msm_dp_link.test_video.test_v_start,
+ link->msm_dp_link.test_video.test_hsync_pol,
+ link->msm_dp_link.test_video.test_hsync_width,
+ link->msm_dp_link.test_video.test_vsync_pol,
+ link->msm_dp_link.test_video.test_vsync_width,
+ link->msm_dp_link.test_video.test_h_width,
+ link->msm_dp_link.test_video.test_v_height,
+ link->msm_dp_link.test_video.test_rr_d,
+ link->msm_dp_link.test_video.test_rr_n);
return ret;
}
/**
- * dp_link_parse_link_training_params() - parses link training parameters from
+ * msm_dp_link_parse_link_training_params() - parses link training parameters from
* DPCD
* @link: Display Port Driver data
*
* Returns 0 if it successfully parses the link rate (Byte 0x219) and lane
* count (Byte 0x220), and if these values parse are valid.
*/
-static int dp_link_parse_link_training_params(struct dp_link_private *link)
+static int msm_dp_link_parse_link_training_params(struct msm_dp_link_private *link)
{
u8 bp;
ssize_t rlen;
@@ -571,13 +571,13 @@ static int dp_link_parse_link_training_params(struct dp_link_private *link)
}
/**
- * dp_link_parse_phy_test_params() - parses the phy link parameters
+ * msm_dp_link_parse_phy_test_params() - parses the phy link parameters
* @link: Display Port Driver data
*
* Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being
* requested.
*/
-static int dp_link_parse_phy_test_params(struct dp_link_private *link)
+static int msm_dp_link_parse_phy_test_params(struct msm_dp_link_private *link)
{
u8 data;
ssize_t rlen;
@@ -589,7 +589,7 @@ static int dp_link_parse_phy_test_params(struct dp_link_private *link)
return rlen;
}
- link->dp_link.phy_params.phy_test_pattern_sel = data & 0x07;
+ link->msm_dp_link.phy_params.phy_test_pattern_sel = data & 0x07;
drm_dbg_dp(link->drm_dev, "phy_test_pattern_sel = 0x%x\n", data);
@@ -608,12 +608,12 @@ static int dp_link_parse_phy_test_params(struct dp_link_private *link)
}
/**
- * dp_link_is_video_audio_test_requested() - checks for audio/video link request
+ * msm_dp_link_is_video_audio_test_requested() - checks for audio/video link request
* @link: link requested by the sink
*
* Returns true if the requested link is a permitted audio/video link.
*/
-static bool dp_link_is_video_audio_test_requested(u32 link)
+static bool msm_dp_link_is_video_audio_test_requested(u32 link)
{
u8 video_audio_test = (DP_TEST_LINK_VIDEO_PATTERN |
DP_TEST_LINK_AUDIO_PATTERN |
@@ -624,13 +624,13 @@ static bool dp_link_is_video_audio_test_requested(u32 link)
}
/**
- * dp_link_parse_request() - parses link request parameters from sink
+ * msm_dp_link_parse_request() - parses link request parameters from sink
* @link: Display Port Driver data
*
* Parses the DPCD to check if an automated link is requested (Byte 0x201),
* and what type of link automation is being requested (Byte 0x218).
*/
-static int dp_link_parse_request(struct dp_link_private *link)
+static int msm_dp_link_parse_request(struct msm_dp_link_private *link)
{
int ret = 0;
u8 data;
@@ -672,27 +672,27 @@ static int dp_link_parse_request(struct dp_link_private *link)
drm_dbg_dp(link->drm_dev, "Test:(0x%x) requested\n", data);
link->request.test_requested = data;
if (link->request.test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) {
- ret = dp_link_parse_phy_test_params(link);
+ ret = msm_dp_link_parse_phy_test_params(link);
if (ret)
goto end;
- ret = dp_link_parse_link_training_params(link);
+ ret = msm_dp_link_parse_link_training_params(link);
if (ret)
goto end;
}
if (link->request.test_requested == DP_TEST_LINK_TRAINING) {
- ret = dp_link_parse_link_training_params(link);
+ ret = msm_dp_link_parse_link_training_params(link);
if (ret)
goto end;
}
- if (dp_link_is_video_audio_test_requested(
+ if (msm_dp_link_is_video_audio_test_requested(
link->request.test_requested)) {
- ret = dp_link_parse_video_pattern_params(link);
+ ret = msm_dp_link_parse_video_pattern_params(link);
if (ret)
goto end;
- ret = dp_link_parse_audio_pattern_params(link);
+ ret = msm_dp_link_parse_audio_pattern_params(link);
}
end:
/*
@@ -700,29 +700,29 @@ end:
* a DP_TEST_NAK.
*/
if (ret) {
- link->dp_link.test_response = DP_TEST_NAK;
+ link->msm_dp_link.test_response = DP_TEST_NAK;
} else {
if (link->request.test_requested != DP_TEST_LINK_EDID_READ)
- link->dp_link.test_response = DP_TEST_ACK;
+ link->msm_dp_link.test_response = DP_TEST_ACK;
else
- link->dp_link.test_response =
+ link->msm_dp_link.test_response =
DP_TEST_EDID_CHECKSUM_WRITE;
}
return ret;
}
-static int dp_link_parse_sink_status_field(struct dp_link_private *link)
+static int msm_dp_link_parse_sink_status_field(struct msm_dp_link_private *link)
{
int len;
- link->prev_sink_count = link->dp_link.sink_count;
+ link->prev_sink_count = link->msm_dp_link.sink_count;
len = drm_dp_read_sink_count(link->aux);
if (len < 0) {
DRM_ERROR("DP parse sink count failed\n");
return len;
}
- link->dp_link.sink_count = len;
+ link->msm_dp_link.sink_count = len;
len = drm_dp_dpcd_read_link_status(link->aux,
link->link_status);
@@ -731,11 +731,11 @@ static int dp_link_parse_sink_status_field(struct dp_link_private *link)
return len;
}
- return dp_link_parse_request(link);
+ return msm_dp_link_parse_request(link);
}
/**
- * dp_link_process_link_training_request() - processes new training requests
+ * msm_dp_link_process_link_training_request() - processes new training requests
* @link: Display Port link data
*
* This function will handle new link training requests that are initiated by
@@ -745,7 +745,7 @@ static int dp_link_parse_sink_status_field(struct dp_link_private *link)
* The function will return 0 if a link training request has been processed,
* otherwise it will return -EINVAL.
*/
-static int dp_link_process_link_training_request(struct dp_link_private *link)
+static int msm_dp_link_process_link_training_request(struct msm_dp_link_private *link)
{
if (link->request.test_requested != DP_TEST_LINK_TRAINING)
return -EINVAL;
@@ -756,49 +756,49 @@ static int dp_link_process_link_training_request(struct dp_link_private *link)
link->request.test_link_rate,
link->request.test_lane_count);
- link->dp_link.link_params.num_lanes = link->request.test_lane_count;
- link->dp_link.link_params.rate =
+ link->msm_dp_link.link_params.num_lanes = link->request.test_lane_count;
+ link->msm_dp_link.link_params.rate =
drm_dp_bw_code_to_link_rate(link->request.test_link_rate);
return 0;
}
-bool dp_link_send_test_response(struct dp_link *dp_link)
+bool msm_dp_link_send_test_response(struct msm_dp_link *msm_dp_link)
{
- struct dp_link_private *link = NULL;
+ struct msm_dp_link_private *link = NULL;
int ret = 0;
- if (!dp_link) {
+ if (!msm_dp_link) {
DRM_ERROR("invalid input\n");
return false;
}
- link = container_of(dp_link, struct dp_link_private, dp_link);
+ link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link);
ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_RESPONSE,
- dp_link->test_response);
+ msm_dp_link->test_response);
return ret == 1;
}
-int dp_link_psm_config(struct dp_link *dp_link,
- struct dp_link_info *link_info, bool enable)
+int msm_dp_link_psm_config(struct msm_dp_link *msm_dp_link,
+ struct msm_dp_link_info *link_info, bool enable)
{
- struct dp_link_private *link = NULL;
+ struct msm_dp_link_private *link = NULL;
int ret = 0;
- if (!dp_link) {
+ if (!msm_dp_link) {
DRM_ERROR("invalid params\n");
return -EINVAL;
}
- link = container_of(dp_link, struct dp_link_private, dp_link);
+ link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link);
mutex_lock(&link->psm_mutex);
if (enable)
- ret = dp_aux_link_power_down(link->aux, link_info);
+ ret = msm_dp_aux_link_power_down(link->aux, link_info);
else
- ret = dp_aux_link_power_up(link->aux, link_info);
+ ret = msm_dp_aux_link_power_up(link->aux, link_info);
if (ret)
DRM_ERROR("Failed to %s low power mode\n", enable ?
@@ -808,24 +808,24 @@ int dp_link_psm_config(struct dp_link *dp_link,
return ret;
}
-bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum)
+bool msm_dp_link_send_edid_checksum(struct msm_dp_link *msm_dp_link, u8 checksum)
{
- struct dp_link_private *link = NULL;
+ struct msm_dp_link_private *link = NULL;
int ret = 0;
- if (!dp_link) {
+ if (!msm_dp_link) {
DRM_ERROR("invalid input\n");
return false;
}
- link = container_of(dp_link, struct dp_link_private, dp_link);
+ link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link);
ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_EDID_CHECKSUM,
checksum);
return ret == 1;
}
-static void dp_link_parse_vx_px(struct dp_link_private *link)
+static void msm_dp_link_parse_vx_px(struct msm_dp_link_private *link)
{
drm_dbg_dp(link->drm_dev, "vx: 0=%d, 1=%d, 2=%d, 3=%d\n",
drm_dp_get_adjust_request_voltage(link->link_status, 0),
@@ -845,31 +845,31 @@ static void dp_link_parse_vx_px(struct dp_link_private *link)
*/
drm_dbg_dp(link->drm_dev,
"Current: v_level = 0x%x, p_level = 0x%x\n",
- link->dp_link.phy_params.v_level,
- link->dp_link.phy_params.p_level);
- link->dp_link.phy_params.v_level =
+ link->msm_dp_link.phy_params.v_level,
+ link->msm_dp_link.phy_params.p_level);
+ link->msm_dp_link.phy_params.v_level =
drm_dp_get_adjust_request_voltage(link->link_status, 0);
- link->dp_link.phy_params.p_level =
+ link->msm_dp_link.phy_params.p_level =
drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0);
- link->dp_link.phy_params.p_level >>= DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ link->msm_dp_link.phy_params.p_level >>= DP_TRAIN_PRE_EMPHASIS_SHIFT;
drm_dbg_dp(link->drm_dev,
"Requested: v_level = 0x%x, p_level = 0x%x\n",
- link->dp_link.phy_params.v_level,
- link->dp_link.phy_params.p_level);
+ link->msm_dp_link.phy_params.v_level,
+ link->msm_dp_link.phy_params.p_level);
}
/**
- * dp_link_process_phy_test_pattern_request() - process new phy link requests
+ * msm_dp_link_process_phy_test_pattern_request() - process new phy link requests
* @link: Display Port Driver data
*
* This function will handle new phy link pattern requests that are initiated
* by the sink. The function will return 0 if a phy link pattern has been
* processed, otherwise it will return -EINVAL.
*/
-static int dp_link_process_phy_test_pattern_request(
- struct dp_link_private *link)
+static int msm_dp_link_process_phy_test_pattern_request(
+ struct msm_dp_link_private *link)
{
if (!(link->request.test_requested & DP_TEST_LINK_PHY_TEST_PATTERN)) {
drm_dbg_dp(link->drm_dev, "no phy test\n");
@@ -886,24 +886,24 @@ static int dp_link_process_phy_test_pattern_request(
drm_dbg_dp(link->drm_dev,
"Current: rate = 0x%x, lane count = 0x%x\n",
- link->dp_link.link_params.rate,
- link->dp_link.link_params.num_lanes);
+ link->msm_dp_link.link_params.rate,
+ link->msm_dp_link.link_params.num_lanes);
drm_dbg_dp(link->drm_dev,
"Requested: rate = 0x%x, lane count = 0x%x\n",
link->request.test_link_rate,
link->request.test_lane_count);
- link->dp_link.link_params.num_lanes = link->request.test_lane_count;
- link->dp_link.link_params.rate =
+ link->msm_dp_link.link_params.num_lanes = link->request.test_lane_count;
+ link->msm_dp_link.link_params.rate =
drm_dp_bw_code_to_link_rate(link->request.test_link_rate);
- dp_link_parse_vx_px(link);
+ msm_dp_link_parse_vx_px(link);
return 0;
}
-static bool dp_link_read_psr_error_status(struct dp_link_private *link)
+static bool msm_dp_link_read_psr_error_status(struct msm_dp_link_private *link)
{
u8 status;
@@ -921,7 +921,7 @@ static bool dp_link_read_psr_error_status(struct dp_link_private *link)
return true;
}
-static bool dp_link_psr_capability_changed(struct dp_link_private *link)
+static bool msm_dp_link_psr_capability_changed(struct msm_dp_link_private *link)
{
u8 status;
@@ -941,7 +941,7 @@ static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
}
/**
- * dp_link_process_link_status_update() - processes link status updates
+ * msm_dp_link_process_link_status_update() - processes link status updates
* @link: Display Port link module data
*
* This function will check for changes in the link status, e.g. clock
@@ -951,13 +951,13 @@ static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
* The function will return 0 if the a link status update has been processed,
* otherwise it will return -EINVAL.
*/
-static int dp_link_process_link_status_update(struct dp_link_private *link)
+static int msm_dp_link_process_link_status_update(struct msm_dp_link_private *link)
{
bool channel_eq_done = drm_dp_channel_eq_ok(link->link_status,
- link->dp_link.link_params.num_lanes);
+ link->msm_dp_link.link_params.num_lanes);
bool clock_recovery_done = drm_dp_clock_recovery_ok(link->link_status,
- link->dp_link.link_params.num_lanes);
+ link->msm_dp_link.link_params.num_lanes);
drm_dbg_dp(link->drm_dev,
"channel_eq_done = %d, clock_recovery_done = %d\n",
@@ -970,7 +970,7 @@ static int dp_link_process_link_status_update(struct dp_link_private *link)
}
/**
- * dp_link_process_ds_port_status_change() - process port status changes
+ * msm_dp_link_process_ds_port_status_change() - process port status changes
* @link: Display Port Driver data
*
* This function will handle downstream port updates that are initiated by
@@ -980,122 +980,122 @@ static int dp_link_process_link_status_update(struct dp_link_private *link)
* The function will return 0 if a downstream port update has been
* processed, otherwise it will return -EINVAL.
*/
-static int dp_link_process_ds_port_status_change(struct dp_link_private *link)
+static int msm_dp_link_process_ds_port_status_change(struct msm_dp_link_private *link)
{
if (get_link_status(link->link_status, DP_LANE_ALIGN_STATUS_UPDATED) &
DP_DOWNSTREAM_PORT_STATUS_CHANGED)
goto reset;
- if (link->prev_sink_count == link->dp_link.sink_count)
+ if (link->prev_sink_count == link->msm_dp_link.sink_count)
return -EINVAL;
reset:
/* reset prev_sink_count */
- link->prev_sink_count = link->dp_link.sink_count;
+ link->prev_sink_count = link->msm_dp_link.sink_count;
return 0;
}
-static bool dp_link_is_video_pattern_requested(struct dp_link_private *link)
+static bool msm_dp_link_is_video_pattern_requested(struct msm_dp_link_private *link)
{
return (link->request.test_requested & DP_TEST_LINK_VIDEO_PATTERN)
&& !(link->request.test_requested &
DP_TEST_LINK_AUDIO_DISABLED_VIDEO);
}
-static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link)
+static bool msm_dp_link_is_audio_pattern_requested(struct msm_dp_link_private *link)
{
return (link->request.test_requested & DP_TEST_LINK_AUDIO_PATTERN);
}
-static void dp_link_reset_data(struct dp_link_private *link)
+static void msm_dp_link_reset_data(struct msm_dp_link_private *link)
{
- link->request = (const struct dp_link_request){ 0 };
- link->dp_link.test_video = (const struct dp_link_test_video){ 0 };
- link->dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN;
- link->dp_link.test_audio = (const struct dp_link_test_audio){ 0 };
- link->dp_link.phy_params.phy_test_pattern_sel = 0;
- link->dp_link.sink_request = 0;
- link->dp_link.test_response = 0;
+ link->request = (const struct msm_dp_link_request){ 0 };
+ link->msm_dp_link.test_video = (const struct msm_dp_link_test_video){ 0 };
+ link->msm_dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN;
+ link->msm_dp_link.test_audio = (const struct msm_dp_link_test_audio){ 0 };
+ link->msm_dp_link.phy_params.phy_test_pattern_sel = 0;
+ link->msm_dp_link.sink_request = 0;
+ link->msm_dp_link.test_response = 0;
}
/**
- * dp_link_process_request() - handle HPD IRQ transition to HIGH
- * @dp_link: pointer to link module data
+ * msm_dp_link_process_request() - handle HPD IRQ transition to HIGH
+ * @msm_dp_link: pointer to link module data
*
* This function will handle the HPD IRQ state transitions from LOW to HIGH
* (including cases when there are back to back HPD IRQ HIGH) indicating
* the start of a new link training request or sink status update.
*/
-int dp_link_process_request(struct dp_link *dp_link)
+int msm_dp_link_process_request(struct msm_dp_link *msm_dp_link)
{
int ret = 0;
- struct dp_link_private *link;
+ struct msm_dp_link_private *link;
- if (!dp_link) {
+ if (!msm_dp_link) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
- link = container_of(dp_link, struct dp_link_private, dp_link);
+ link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link);
- dp_link_reset_data(link);
+ msm_dp_link_reset_data(link);
- ret = dp_link_parse_sink_status_field(link);
+ ret = msm_dp_link_parse_sink_status_field(link);
if (ret)
return ret;
if (link->request.test_requested == DP_TEST_LINK_EDID_READ) {
- dp_link->sink_request |= DP_TEST_LINK_EDID_READ;
- } else if (!dp_link_process_ds_port_status_change(link)) {
- dp_link->sink_request |= DS_PORT_STATUS_CHANGED;
- } else if (!dp_link_process_link_training_request(link)) {
- dp_link->sink_request |= DP_TEST_LINK_TRAINING;
- } else if (!dp_link_process_phy_test_pattern_request(link)) {
- dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN;
- } else if (dp_link_read_psr_error_status(link)) {
+ msm_dp_link->sink_request |= DP_TEST_LINK_EDID_READ;
+ } else if (!msm_dp_link_process_ds_port_status_change(link)) {
+ msm_dp_link->sink_request |= DS_PORT_STATUS_CHANGED;
+ } else if (!msm_dp_link_process_link_training_request(link)) {
+ msm_dp_link->sink_request |= DP_TEST_LINK_TRAINING;
+ } else if (!msm_dp_link_process_phy_test_pattern_request(link)) {
+ msm_dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN;
+ } else if (msm_dp_link_read_psr_error_status(link)) {
DRM_ERROR("PSR IRQ_HPD received\n");
- } else if (dp_link_psr_capability_changed(link)) {
+ } else if (msm_dp_link_psr_capability_changed(link)) {
drm_dbg_dp(link->drm_dev, "PSR Capability changed\n");
} else {
- ret = dp_link_process_link_status_update(link);
+ ret = msm_dp_link_process_link_status_update(link);
if (!ret) {
- dp_link->sink_request |= DP_LINK_STATUS_UPDATED;
+ msm_dp_link->sink_request |= DP_LINK_STATUS_UPDATED;
} else {
- if (dp_link_is_video_pattern_requested(link)) {
+ if (msm_dp_link_is_video_pattern_requested(link)) {
ret = 0;
- dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN;
+ msm_dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN;
}
- if (dp_link_is_audio_pattern_requested(link)) {
- dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN;
+ if (msm_dp_link_is_audio_pattern_requested(link)) {
+ msm_dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN;
ret = -EINVAL;
}
}
}
drm_dbg_dp(link->drm_dev, "sink request=%#x\n",
- dp_link->sink_request);
+ msm_dp_link->sink_request);
return ret;
}
-int dp_link_get_colorimetry_config(struct dp_link *dp_link)
+int msm_dp_link_get_colorimetry_config(struct msm_dp_link *msm_dp_link)
{
u32 cc = DP_MISC0_COLORIMERY_CFG_LEGACY_RGB;
- struct dp_link_private *link;
+ struct msm_dp_link_private *link;
- if (!dp_link) {
+ if (!msm_dp_link) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
- link = container_of(dp_link, struct dp_link_private, dp_link);
+ link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link);
/*
* Unless a video pattern CTS test is ongoing, use RGB_VESA
* Only RGB_VESA and RGB_CEA supported for now
*/
- if (dp_link_is_video_pattern_requested(link)) {
- if (link->dp_link.test_video.test_dyn_range &
+ if (msm_dp_link_is_video_pattern_requested(link)) {
+ if (link->msm_dp_link.test_video.test_dyn_range &
DP_TEST_DYNAMIC_RANGE_CEA)
cc = DP_MISC0_COLORIMERY_CFG_CEA_RGB;
}
@@ -1103,22 +1103,22 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
return cc;
}
-int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
+int msm_dp_link_adjust_levels(struct msm_dp_link *msm_dp_link, u8 *link_status)
{
int i;
u8 max_p_level;
int v_max = 0, p_max = 0;
- struct dp_link_private *link;
+ struct msm_dp_link_private *link;
- if (!dp_link) {
+ if (!msm_dp_link) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
- link = container_of(dp_link, struct dp_link_private, dp_link);
+ link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link);
/* use the max level across lanes */
- for (i = 0; i < dp_link->link_params.num_lanes; i++) {
+ for (i = 0; i < msm_dp_link->link_params.num_lanes; i++) {
u8 data_v = drm_dp_get_adjust_request_voltage(link_status, i);
u8 data_p = drm_dp_get_adjust_request_pre_emphasis(link_status,
i);
@@ -1131,56 +1131,56 @@ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
p_max = data_p;
}
- dp_link->phy_params.v_level = v_max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
- dp_link->phy_params.p_level = p_max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ msm_dp_link->phy_params.v_level = v_max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ msm_dp_link->phy_params.p_level = p_max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
/**
* Adjust the voltage swing and pre-emphasis level combination to within
* the allowable range.
*/
- if (dp_link->phy_params.v_level > DP_TRAIN_LEVEL_MAX) {
+ if (msm_dp_link->phy_params.v_level > DP_TRAIN_LEVEL_MAX) {
drm_dbg_dp(link->drm_dev,
"Requested vSwingLevel=%d, change to %d\n",
- dp_link->phy_params.v_level,
+ msm_dp_link->phy_params.v_level,
DP_TRAIN_LEVEL_MAX);
- dp_link->phy_params.v_level = DP_TRAIN_LEVEL_MAX;
+ msm_dp_link->phy_params.v_level = DP_TRAIN_LEVEL_MAX;
}
- if (dp_link->phy_params.p_level > DP_TRAIN_LEVEL_MAX) {
+ if (msm_dp_link->phy_params.p_level > DP_TRAIN_LEVEL_MAX) {
drm_dbg_dp(link->drm_dev,
"Requested preEmphasisLevel=%d, change to %d\n",
- dp_link->phy_params.p_level,
+ msm_dp_link->phy_params.p_level,
DP_TRAIN_LEVEL_MAX);
- dp_link->phy_params.p_level = DP_TRAIN_LEVEL_MAX;
+ msm_dp_link->phy_params.p_level = DP_TRAIN_LEVEL_MAX;
}
- max_p_level = DP_TRAIN_LEVEL_MAX - dp_link->phy_params.v_level;
- if (dp_link->phy_params.p_level > max_p_level) {
+ max_p_level = DP_TRAIN_LEVEL_MAX - msm_dp_link->phy_params.v_level;
+ if (msm_dp_link->phy_params.p_level > max_p_level) {
drm_dbg_dp(link->drm_dev,
"Requested preEmphasisLevel=%d, change to %d\n",
- dp_link->phy_params.p_level,
+ msm_dp_link->phy_params.p_level,
max_p_level);
- dp_link->phy_params.p_level = max_p_level;
+ msm_dp_link->phy_params.p_level = max_p_level;
}
drm_dbg_dp(link->drm_dev, "adjusted: v_level=%d, p_level=%d\n",
- dp_link->phy_params.v_level, dp_link->phy_params.p_level);
+ msm_dp_link->phy_params.v_level, msm_dp_link->phy_params.p_level);
return 0;
}
-void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link)
+void msm_dp_link_reset_phy_params_vx_px(struct msm_dp_link *msm_dp_link)
{
- dp_link->phy_params.v_level = 0;
- dp_link->phy_params.p_level = 0;
+ msm_dp_link->phy_params.v_level = 0;
+ msm_dp_link->phy_params.p_level = 0;
}
-u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
+u32 msm_dp_link_get_test_bits_depth(struct msm_dp_link *msm_dp_link, u32 bpp)
{
u32 tbd;
- struct dp_link_private *link;
+ struct msm_dp_link_private *link;
- link = container_of(dp_link, struct dp_link_private, dp_link);
+ link = container_of(msm_dp_link, struct msm_dp_link_private, msm_dp_link);
/*
* Few simplistic rules and assumptions made here:
@@ -1209,10 +1209,10 @@ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
return tbd;
}
-struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux)
+struct msm_dp_link *msm_dp_link_get(struct device *dev, struct drm_dp_aux *aux)
{
- struct dp_link_private *link;
- struct dp_link *dp_link;
+ struct msm_dp_link_private *link;
+ struct msm_dp_link *msm_dp_link;
if (!dev || !aux) {
DRM_ERROR("invalid input\n");
@@ -1226,7 +1226,7 @@ struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux)
link->aux = aux;
mutex_init(&link->psm_mutex);
- dp_link = &link->dp_link;
+ msm_dp_link = &link->msm_dp_link;
- return dp_link;
+ return msm_dp_link;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index 5846337bb56f..8db5d5698a97 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -12,7 +12,7 @@
#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF
#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0)
-struct dp_link_info {
+struct msm_dp_link_info {
unsigned char revision;
unsigned int rate;
unsigned int num_lanes;
@@ -21,7 +21,7 @@ struct dp_link_info {
#define DP_TRAIN_LEVEL_MAX 3
-struct dp_link_test_video {
+struct msm_dp_link_test_video {
u32 test_video_pattern;
u32 test_bit_depth;
u32 test_dyn_range;
@@ -39,7 +39,7 @@ struct dp_link_test_video {
u32 test_rr_n;
};
-struct dp_link_test_audio {
+struct msm_dp_link_test_audio {
u32 test_audio_sampling_rate;
u32 test_audio_channel_count;
u32 test_audio_pattern_type;
@@ -53,21 +53,21 @@ struct dp_link_test_audio {
u32 test_audio_period_ch_8;
};
-struct dp_link_phy_params {
+struct msm_dp_link_phy_params {
u32 phy_test_pattern_sel;
u8 v_level;
u8 p_level;
};
-struct dp_link {
+struct msm_dp_link {
u32 sink_request;
u32 test_response;
u8 sink_count;
- struct dp_link_test_video test_video;
- struct dp_link_test_audio test_audio;
- struct dp_link_phy_params phy_params;
- struct dp_link_info link_params;
+ struct msm_dp_link_test_video test_video;
+ struct msm_dp_link_test_audio test_audio;
+ struct msm_dp_link_phy_params phy_params;
+ struct msm_dp_link_info link_params;
};
/**
@@ -78,7 +78,7 @@ struct dp_link {
* git bit depth value. This function assumes that bit depth has
* already been validated.
*/
-static inline u32 dp_link_bit_depth_to_bpp(u32 tbd)
+static inline u32 msm_dp_link_bit_depth_to_bpp(u32 tbd)
{
/*
* Few simplistic rules and assumptions made here:
@@ -99,22 +99,22 @@ static inline u32 dp_link_bit_depth_to_bpp(u32 tbd)
}
}
-void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link);
-u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp);
-int dp_link_process_request(struct dp_link *dp_link);
-int dp_link_get_colorimetry_config(struct dp_link *dp_link);
-int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status);
-bool dp_link_send_test_response(struct dp_link *dp_link);
-int dp_link_psm_config(struct dp_link *dp_link,
- struct dp_link_info *link_info, bool enable);
-bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum);
+void msm_dp_link_reset_phy_params_vx_px(struct msm_dp_link *msm_dp_link);
+u32 msm_dp_link_get_test_bits_depth(struct msm_dp_link *msm_dp_link, u32 bpp);
+int msm_dp_link_process_request(struct msm_dp_link *msm_dp_link);
+int msm_dp_link_get_colorimetry_config(struct msm_dp_link *msm_dp_link);
+int msm_dp_link_adjust_levels(struct msm_dp_link *msm_dp_link, u8 *link_status);
+bool msm_dp_link_send_test_response(struct msm_dp_link *msm_dp_link);
+int msm_dp_link_psm_config(struct msm_dp_link *msm_dp_link,
+ struct msm_dp_link_info *link_info, bool enable);
+bool msm_dp_link_send_edid_checksum(struct msm_dp_link *msm_dp_link, u8 checksum);
/**
- * dp_link_get() - get the functionalities of dp test module
+ * msm_dp_link_get() - get the functionalities of dp test module
*
*
- * return: a pointer to dp_link struct
+ * return: a pointer to msm_dp_link struct
*/
-struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux);
+struct msm_dp_link *msm_dp_link_get(struct device *dev, struct drm_dp_aux *aux);
#endif /* _DP_LINK_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 6ff6c9ef351f..92415bf8aa16 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -14,52 +14,52 @@
#define DP_MAX_NUM_DP_LANES 4
#define DP_LINK_RATE_HBR2 540000 /* kbytes */
-struct dp_panel_private {
+struct msm_dp_panel_private {
struct device *dev;
struct drm_device *drm_dev;
- struct dp_panel dp_panel;
+ struct msm_dp_panel msm_dp_panel;
struct drm_dp_aux *aux;
- struct dp_link *link;
- struct dp_catalog *catalog;
+ struct msm_dp_link *link;
+ struct msm_dp_catalog *catalog;
bool panel_on;
};
-static void dp_panel_read_psr_cap(struct dp_panel_private *panel)
+static void msm_dp_panel_read_psr_cap(struct msm_dp_panel_private *panel)
{
ssize_t rlen;
- struct dp_panel *dp_panel;
+ struct msm_dp_panel *msm_dp_panel;
- dp_panel = &panel->dp_panel;
+ msm_dp_panel = &panel->msm_dp_panel;
/* edp sink */
- if (dp_panel->dpcd[DP_EDP_CONFIGURATION_CAP]) {
+ if (msm_dp_panel->dpcd[DP_EDP_CONFIGURATION_CAP]) {
rlen = drm_dp_dpcd_read(panel->aux, DP_PSR_SUPPORT,
- &dp_panel->psr_cap, sizeof(dp_panel->psr_cap));
- if (rlen == sizeof(dp_panel->psr_cap)) {
+ &msm_dp_panel->psr_cap, sizeof(msm_dp_panel->psr_cap));
+ if (rlen == sizeof(msm_dp_panel->psr_cap)) {
drm_dbg_dp(panel->drm_dev,
"psr version: 0x%x, psr_cap: 0x%x\n",
- dp_panel->psr_cap.version,
- dp_panel->psr_cap.capabilities);
+ msm_dp_panel->psr_cap.version,
+ msm_dp_panel->psr_cap.capabilities);
} else
DRM_ERROR("failed to read psr info, rlen=%zd\n", rlen);
}
}
-static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
+static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel)
{
int rc;
- struct dp_panel_private *panel;
- struct dp_link_info *link_info;
+ struct msm_dp_panel_private *panel;
+ struct msm_dp_link_info *link_info;
u8 *dpcd, major, minor;
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
- dpcd = dp_panel->dpcd;
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
+ dpcd = msm_dp_panel->dpcd;
rc = drm_dp_read_dpcd_caps(panel->aux, dpcd);
if (rc)
return rc;
- dp_panel->vsc_sdp_supported = drm_dp_vsc_sdp_supported(panel->aux, dpcd);
- link_info = &dp_panel->link_info;
+ msm_dp_panel->vsc_sdp_supported = drm_dp_vsc_sdp_supported(panel->aux, dpcd);
+ link_info = &msm_dp_panel->link_info;
link_info->revision = dpcd[DP_DPCD_REV];
major = (link_info->revision >> 4) & 0x0f;
minor = link_info->revision & 0x0f;
@@ -68,12 +68,12 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
link_info->num_lanes = drm_dp_max_lane_count(dpcd);
/* Limit data lanes from data-lanes of endpoint property of dtsi */
- if (link_info->num_lanes > dp_panel->max_dp_lanes)
- link_info->num_lanes = dp_panel->max_dp_lanes;
+ if (link_info->num_lanes > msm_dp_panel->max_dp_lanes)
+ link_info->num_lanes = msm_dp_panel->max_dp_lanes;
/* Limit link rate from link-frequencies of endpoint property of dtsi */
- if (link_info->rate > dp_panel->max_dp_link_rate)
- link_info->rate = dp_panel->max_dp_link_rate;
+ if (link_info->rate > msm_dp_panel->max_dp_link_rate)
+ link_info->rate = msm_dp_panel->max_dp_link_rate;
drm_dbg_dp(panel->drm_dev, "version: %d.%d\n", major, minor);
drm_dbg_dp(panel->drm_dev, "link_rate=%d\n", link_info->rate);
@@ -82,21 +82,21 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
if (drm_dp_enhanced_frame_cap(dpcd))
link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
- dp_panel_read_psr_cap(panel);
+ msm_dp_panel_read_psr_cap(panel);
return rc;
}
-static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
+static u32 msm_dp_panel_get_supported_bpp(struct msm_dp_panel *msm_dp_panel,
u32 mode_edid_bpp, u32 mode_pclk_khz)
{
- const struct dp_link_info *link_info;
+ const struct msm_dp_link_info *link_info;
const u32 max_supported_bpp = 30, min_supported_bpp = 18;
u32 bpp, data_rate_khz;
bpp = min(mode_edid_bpp, max_supported_bpp);
- link_info = &dp_panel->link_info;
+ link_info = &msm_dp_panel->link_info;
data_rate_khz = link_info->num_lanes * link_info->rate * 8;
do {
@@ -108,39 +108,39 @@ static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
return min_supported_bpp;
}
-int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
+int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel,
struct drm_connector *connector)
{
int rc, bw_code;
int count;
- struct dp_panel_private *panel;
+ struct msm_dp_panel_private *panel;
- if (!dp_panel || !connector) {
+ if (!msm_dp_panel || !connector) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
drm_dbg_dp(panel->drm_dev, "max_lanes=%d max_link_rate=%d\n",
- dp_panel->max_dp_lanes, dp_panel->max_dp_link_rate);
+ msm_dp_panel->max_dp_lanes, msm_dp_panel->max_dp_link_rate);
- rc = dp_panel_read_dpcd(dp_panel);
+ rc = msm_dp_panel_read_dpcd(msm_dp_panel);
if (rc) {
DRM_ERROR("read dpcd failed %d\n", rc);
return rc;
}
- bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
+ bw_code = drm_dp_link_rate_to_bw_code(msm_dp_panel->link_info.rate);
if (!is_link_rate_valid(bw_code) ||
- !is_lane_count_valid(dp_panel->link_info.num_lanes) ||
- (bw_code > dp_panel->max_bw_code)) {
- DRM_ERROR("Illegal link rate=%d lane=%d\n", dp_panel->link_info.rate,
- dp_panel->link_info.num_lanes);
+ !is_lane_count_valid(msm_dp_panel->link_info.num_lanes) ||
+ (bw_code > msm_dp_panel->max_bw_code)) {
+ DRM_ERROR("Illegal link rate=%d lane=%d\n", msm_dp_panel->link_info.rate,
+ msm_dp_panel->link_info.num_lanes);
return -EINVAL;
}
- if (drm_dp_is_branch(dp_panel->dpcd)) {
+ if (drm_dp_is_branch(msm_dp_panel->dpcd)) {
count = drm_dp_read_sink_count(panel->aux);
if (!count) {
panel->link->sink_count = 0;
@@ -148,21 +148,21 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
}
}
- rc = drm_dp_read_downstream_info(panel->aux, dp_panel->dpcd,
- dp_panel->downstream_ports);
+ rc = drm_dp_read_downstream_info(panel->aux, msm_dp_panel->dpcd,
+ msm_dp_panel->downstream_ports);
if (rc)
return rc;
- drm_edid_free(dp_panel->drm_edid);
+ drm_edid_free(msm_dp_panel->drm_edid);
- dp_panel->drm_edid = drm_edid_read_ddc(connector, &panel->aux->ddc);
+ msm_dp_panel->drm_edid = drm_edid_read_ddc(connector, &panel->aux->ddc);
- drm_edid_connector_update(connector, dp_panel->drm_edid);
+ drm_edid_connector_update(connector, msm_dp_panel->drm_edid);
- if (!dp_panel->drm_edid) {
+ if (!msm_dp_panel->drm_edid) {
DRM_ERROR("panel edid read failed\n");
/* check edid read fail is due to unplug */
- if (!dp_catalog_link_is_connected(panel->catalog)) {
+ if (!msm_dp_catalog_link_is_connected(panel->catalog)) {
rc = -ETIMEDOUT;
goto end;
}
@@ -172,87 +172,87 @@ end:
return rc;
}
-u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel,
+u32 msm_dp_panel_get_mode_bpp(struct msm_dp_panel *msm_dp_panel,
u32 mode_edid_bpp, u32 mode_pclk_khz)
{
- struct dp_panel_private *panel;
+ struct msm_dp_panel_private *panel;
u32 bpp;
- if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) {
+ if (!msm_dp_panel || !mode_edid_bpp || !mode_pclk_khz) {
DRM_ERROR("invalid input\n");
return 0;
}
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
- if (dp_panel->video_test)
- bpp = dp_link_bit_depth_to_bpp(
+ if (msm_dp_panel->video_test)
+ bpp = msm_dp_link_bit_depth_to_bpp(
panel->link->test_video.test_bit_depth);
else
- bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp,
+ bpp = msm_dp_panel_get_supported_bpp(msm_dp_panel, mode_edid_bpp,
mode_pclk_khz);
return bpp;
}
-int dp_panel_get_modes(struct dp_panel *dp_panel,
+int msm_dp_panel_get_modes(struct msm_dp_panel *msm_dp_panel,
struct drm_connector *connector)
{
- if (!dp_panel) {
+ if (!msm_dp_panel) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
- if (dp_panel->drm_edid)
+ if (msm_dp_panel->drm_edid)
return drm_edid_connector_add_modes(connector);
return 0;
}
-static u8 dp_panel_get_edid_checksum(const struct edid *edid)
+static u8 msm_dp_panel_get_edid_checksum(const struct edid *edid)
{
edid += edid->extensions;
return edid->checksum;
}
-void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
+void msm_dp_panel_handle_sink_request(struct msm_dp_panel *msm_dp_panel)
{
- struct dp_panel_private *panel;
+ struct msm_dp_panel_private *panel;
- if (!dp_panel) {
+ if (!msm_dp_panel) {
DRM_ERROR("invalid input\n");
return;
}
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) {
/* FIXME: get rid of drm_edid_raw() */
- const struct edid *edid = drm_edid_raw(dp_panel->drm_edid);
+ const struct edid *edid = drm_edid_raw(msm_dp_panel->drm_edid);
u8 checksum;
if (edid)
- checksum = dp_panel_get_edid_checksum(edid);
+ checksum = msm_dp_panel_get_edid_checksum(edid);
else
- checksum = dp_panel->connector->real_edid_checksum;
+ checksum = msm_dp_panel->connector->real_edid_checksum;
- dp_link_send_edid_checksum(panel->link, checksum);
- dp_link_send_test_response(panel->link);
+ msm_dp_link_send_edid_checksum(panel->link, checksum);
+ msm_dp_link_send_test_response(panel->link);
}
}
-void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
+void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable)
{
- struct dp_catalog *catalog;
- struct dp_panel_private *panel;
+ struct msm_dp_catalog *catalog;
+ struct msm_dp_panel_private *panel;
- if (!dp_panel) {
+ if (!msm_dp_panel) {
DRM_ERROR("invalid input\n");
return;
}
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
catalog = panel->catalog;
if (!panel->panel_on) {
@@ -262,31 +262,31 @@ void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
}
if (!enable) {
- dp_catalog_panel_tpg_disable(catalog);
+ msm_dp_catalog_panel_tpg_disable(catalog);
return;
}
drm_dbg_dp(panel->drm_dev, "calling catalog tpg_enable\n");
- dp_catalog_panel_tpg_enable(catalog, &panel->dp_panel.dp_mode.drm_mode);
+ msm_dp_catalog_panel_tpg_enable(catalog, &panel->msm_dp_panel.msm_dp_mode.drm_mode);
}
-static int dp_panel_setup_vsc_sdp_yuv_420(struct dp_panel *dp_panel)
+static int msm_dp_panel_setup_vsc_sdp_yuv_420(struct msm_dp_panel *msm_dp_panel)
{
- struct dp_catalog *catalog;
- struct dp_panel_private *panel;
- struct dp_display_mode *dp_mode;
+ struct msm_dp_catalog *catalog;
+ struct msm_dp_panel_private *panel;
+ struct msm_dp_display_mode *msm_dp_mode;
struct drm_dp_vsc_sdp vsc_sdp_data;
struct dp_sdp vsc_sdp;
ssize_t len;
- if (!dp_panel) {
+ if (!msm_dp_panel) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
catalog = panel->catalog;
- dp_mode = &dp_panel->dp_mode;
+ msm_dp_mode = &msm_dp_panel->msm_dp_mode;
memset(&vsc_sdp_data, 0, sizeof(vsc_sdp_data));
@@ -300,7 +300,7 @@ static int dp_panel_setup_vsc_sdp_yuv_420(struct dp_panel *dp_panel)
vsc_sdp_data.colorimetry = DP_COLORIMETRY_DEFAULT;
/* VSC SDP Payload for DB17 */
- vsc_sdp_data.bpc = dp_mode->bpp / 3;
+ vsc_sdp_data.bpc = msm_dp_mode->bpp / 3;
vsc_sdp_data.dynamic_range = DP_DYNAMIC_RANGE_CTA;
/* VSC SDP Payload for DB18 */
@@ -312,36 +312,25 @@ static int dp_panel_setup_vsc_sdp_yuv_420(struct dp_panel *dp_panel)
return len;
}
- dp_catalog_panel_enable_vsc_sdp(catalog, &vsc_sdp);
+ msm_dp_catalog_panel_enable_vsc_sdp(catalog, &vsc_sdp);
return 0;
}
-void dp_panel_dump_regs(struct dp_panel *dp_panel)
-{
- struct dp_catalog *catalog;
- struct dp_panel_private *panel;
-
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
- catalog = panel->catalog;
-
- dp_catalog_dump_regs(catalog);
-}
-
-int dp_panel_timing_cfg(struct dp_panel *dp_panel)
+int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel)
{
u32 data, total_ver, total_hor;
- struct dp_catalog *catalog;
- struct dp_panel_private *panel;
+ struct msm_dp_catalog *catalog;
+ struct msm_dp_panel_private *panel;
struct drm_display_mode *drm_mode;
u32 width_blanking;
u32 sync_start;
- u32 dp_active;
+ u32 msm_dp_active;
u32 total;
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
catalog = panel->catalog;
- drm_mode = &panel->dp_panel.dp_mode.drm_mode;
+ drm_mode = &panel->msm_dp_panel.msm_dp_mode.drm_mode;
drm_dbg_dp(panel->drm_dev, "width=%d hporch= %d %d %d\n",
drm_mode->hdisplay, drm_mode->htotal - drm_mode->hsync_end,
@@ -371,9 +360,9 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel)
data = drm_mode->vsync_end - drm_mode->vsync_start;
data <<= 16;
- data |= (panel->dp_panel.dp_mode.v_active_low << 31);
+ data |= (panel->msm_dp_panel.msm_dp_mode.v_active_low << 31);
data |= drm_mode->hsync_end - drm_mode->hsync_start;
- data |= (panel->dp_panel.dp_mode.h_active_low << 15);
+ data |= (panel->msm_dp_panel.msm_dp_mode.h_active_low << 15);
width_blanking = data;
@@ -381,26 +370,26 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel)
data <<= 16;
data |= drm_mode->hdisplay;
- dp_active = data;
+ msm_dp_active = data;
- dp_catalog_panel_timing_cfg(catalog, total, sync_start, width_blanking, dp_active);
+ msm_dp_catalog_panel_timing_cfg(catalog, total, sync_start, width_blanking, msm_dp_active);
- if (dp_panel->dp_mode.out_fmt_is_yuv_420)
- dp_panel_setup_vsc_sdp_yuv_420(dp_panel);
+ if (msm_dp_panel->msm_dp_mode.out_fmt_is_yuv_420)
+ msm_dp_panel_setup_vsc_sdp_yuv_420(msm_dp_panel);
panel->panel_on = true;
return 0;
}
-int dp_panel_init_panel_info(struct dp_panel *dp_panel)
+int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel)
{
struct drm_display_mode *drm_mode;
- struct dp_panel_private *panel;
+ struct msm_dp_panel_private *panel;
- drm_mode = &dp_panel->dp_mode.drm_mode;
+ drm_mode = &msm_dp_panel->msm_dp_mode.drm_mode;
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
/*
* print resolution info as this is a result
@@ -421,18 +410,18 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel)
drm_mode->vsync_end - drm_mode->vsync_start);
drm_dbg_dp(panel->drm_dev, "pixel clock (KHz)=(%d)\n",
drm_mode->clock);
- drm_dbg_dp(panel->drm_dev, "bpp = %d\n", dp_panel->dp_mode.bpp);
+ drm_dbg_dp(panel->drm_dev, "bpp = %d\n", msm_dp_panel->msm_dp_mode.bpp);
- dp_panel->dp_mode.bpp = dp_panel_get_mode_bpp(dp_panel, dp_panel->dp_mode.bpp,
- dp_panel->dp_mode.drm_mode.clock);
+ msm_dp_panel->msm_dp_mode.bpp = msm_dp_panel_get_mode_bpp(msm_dp_panel, msm_dp_panel->msm_dp_mode.bpp,
+ msm_dp_panel->msm_dp_mode.drm_mode.clock);
drm_dbg_dp(panel->drm_dev, "updated bpp = %d\n",
- dp_panel->dp_mode.bpp);
+ msm_dp_panel->msm_dp_mode.bpp);
return 0;
}
-static u32 dp_panel_link_frequencies(struct device_node *of_node)
+static u32 msm_dp_panel_link_frequencies(struct device_node *of_node)
{
struct device_node *endpoint;
u64 frequency = 0;
@@ -456,17 +445,17 @@ static u32 dp_panel_link_frequencies(struct device_node *of_node)
return frequency;
}
-static int dp_panel_parse_dt(struct dp_panel *dp_panel)
+static int msm_dp_panel_parse_dt(struct msm_dp_panel *msm_dp_panel)
{
- struct dp_panel_private *panel;
+ struct msm_dp_panel_private *panel;
struct device_node *of_node;
int cnt;
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
of_node = panel->dev->of_node;
/*
- * data-lanes is the property of dp_out endpoint
+ * data-lanes is the property of msm_dp_out endpoint
*/
cnt = drm_of_get_data_lanes_count_ep(of_node, 1, 0, 1, DP_MAX_NUM_DP_LANES);
if (cnt < 0) {
@@ -475,51 +464,52 @@ static int dp_panel_parse_dt(struct dp_panel *dp_panel)
}
if (cnt > 0)
- dp_panel->max_dp_lanes = cnt;
+ msm_dp_panel->max_dp_lanes = cnt;
else
- dp_panel->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */
+ msm_dp_panel->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */
- dp_panel->max_dp_link_rate = dp_panel_link_frequencies(of_node);
- if (!dp_panel->max_dp_link_rate)
- dp_panel->max_dp_link_rate = DP_LINK_RATE_HBR2;
+ msm_dp_panel->max_dp_link_rate = msm_dp_panel_link_frequencies(of_node);
+ if (!msm_dp_panel->max_dp_link_rate)
+ msm_dp_panel->max_dp_link_rate = DP_LINK_RATE_HBR2;
return 0;
}
-struct dp_panel *dp_panel_get(struct dp_panel_in *in)
+struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux,
+ struct msm_dp_link *link, struct msm_dp_catalog *catalog)
{
- struct dp_panel_private *panel;
- struct dp_panel *dp_panel;
+ struct msm_dp_panel_private *panel;
+ struct msm_dp_panel *msm_dp_panel;
int ret;
- if (!in->dev || !in->catalog || !in->aux || !in->link) {
+ if (!dev || !catalog || !aux || !link) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-EINVAL);
}
- panel = devm_kzalloc(in->dev, sizeof(*panel), GFP_KERNEL);
+ panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
if (!panel)
return ERR_PTR(-ENOMEM);
- panel->dev = in->dev;
- panel->aux = in->aux;
- panel->catalog = in->catalog;
- panel->link = in->link;
+ panel->dev = dev;
+ panel->aux = aux;
+ panel->catalog = catalog;
+ panel->link = link;
- dp_panel = &panel->dp_panel;
- dp_panel->max_bw_code = DP_LINK_BW_8_1;
+ msm_dp_panel = &panel->msm_dp_panel;
+ msm_dp_panel->max_bw_code = DP_LINK_BW_8_1;
- ret = dp_panel_parse_dt(dp_panel);
+ ret = msm_dp_panel_parse_dt(msm_dp_panel);
if (ret)
return ERR_PTR(ret);
- return dp_panel;
+ return msm_dp_panel;
}
-void dp_panel_put(struct dp_panel *dp_panel)
+void msm_dp_panel_put(struct msm_dp_panel *msm_dp_panel)
{
- if (!dp_panel)
+ if (!msm_dp_panel)
return;
- drm_edid_free(dp_panel->drm_edid);
+ drm_edid_free(msm_dp_panel->drm_edid);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 6722e3923fa5..4906f4f09f24 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -13,7 +13,7 @@
struct edid;
-struct dp_display_mode {
+struct msm_dp_display_mode {
struct drm_display_mode drm_mode;
u32 bpp;
u32 h_active_low;
@@ -21,28 +21,21 @@ struct dp_display_mode {
bool out_fmt_is_yuv_420;
};
-struct dp_panel_in {
- struct device *dev;
- struct drm_dp_aux *aux;
- struct dp_link *link;
- struct dp_catalog *catalog;
-};
-
-struct dp_panel_psr {
+struct msm_dp_panel_psr {
u8 version;
u8 capabilities;
};
-struct dp_panel {
+struct msm_dp_panel {
/* dpcd raw data */
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
- struct dp_link_info link_info;
+ struct msm_dp_link_info link_info;
const struct drm_edid *drm_edid;
struct drm_connector *connector;
- struct dp_display_mode dp_mode;
- struct dp_panel_psr psr_cap;
+ struct msm_dp_display_mode msm_dp_mode;
+ struct msm_dp_panel_psr psr_cap;
bool video_test;
bool vsc_sdp_supported;
@@ -52,18 +45,17 @@ struct dp_panel {
u32 max_bw_code;
};
-int dp_panel_init_panel_info(struct dp_panel *dp_panel);
-int dp_panel_deinit(struct dp_panel *dp_panel);
-int dp_panel_timing_cfg(struct dp_panel *dp_panel);
-void dp_panel_dump_regs(struct dp_panel *dp_panel);
-int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
+int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel);
+int msm_dp_panel_deinit(struct msm_dp_panel *msm_dp_panel);
+int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel);
+int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel,
struct drm_connector *connector);
-u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
+u32 msm_dp_panel_get_mode_bpp(struct msm_dp_panel *msm_dp_panel, u32 mode_max_bpp,
u32 mode_pclk_khz);
-int dp_panel_get_modes(struct dp_panel *dp_panel,
+int msm_dp_panel_get_modes(struct msm_dp_panel *msm_dp_panel,
struct drm_connector *connector);
-void dp_panel_handle_sink_request(struct dp_panel *dp_panel);
-void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable);
+void msm_dp_panel_handle_sink_request(struct msm_dp_panel *msm_dp_panel);
+void msm_dp_panel_tpg_config(struct msm_dp_panel *msm_dp_panel, bool enable);
/**
* is_link_rate_valid() - validates the link rate
@@ -80,7 +72,7 @@ static inline bool is_link_rate_valid(u32 bw_code)
}
/**
- * dp_link_is_lane_count_valid() - validates the lane count
+ * msm_dp_link_is_lane_count_valid() - validates the lane count
* @lane_count: lane count requested by the sink
*
* Returns true if the requested lane count is supported.
@@ -92,6 +84,7 @@ static inline bool is_lane_count_valid(u32 lane_count)
lane_count == 4);
}
-struct dp_panel *dp_panel_get(struct dp_panel_in *in);
-void dp_panel_put(struct dp_panel *dp_panel);
+struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux,
+ struct msm_dp_link *link, struct msm_dp_catalog *catalog);
+void msm_dp_panel_put(struct msm_dp_panel *msm_dp_panel);
#endif /* _DP_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_utils.c b/drivers/gpu/drm/msm/dp/dp_utils.c
index da9207caf72d..4a5ebb0c33b8 100644
--- a/drivers/gpu/drm/msm/dp/dp_utils.c
+++ b/drivers/gpu/drm/msm/dp/dp_utils.c
@@ -9,7 +9,7 @@
#define DP_SDP_HEADER_SIZE 8
-u8 dp_utils_get_g0_value(u8 data)
+u8 msm_dp_utils_get_g0_value(u8 data)
{
u8 c[4];
u8 g[4];
@@ -30,7 +30,7 @@ u8 dp_utils_get_g0_value(u8 data)
return ret_data;
}
-u8 dp_utils_get_g1_value(u8 data)
+u8 msm_dp_utils_get_g1_value(u8 data)
{
u8 c[4];
u8 g[4];
@@ -51,7 +51,7 @@ u8 dp_utils_get_g1_value(u8 data)
return ret_data;
}
-u8 dp_utils_calculate_parity(u32 data)
+u8 msm_dp_utils_calculate_parity(u32 data)
{
u8 x0 = 0;
u8 x1 = 0;
@@ -65,8 +65,8 @@ u8 dp_utils_calculate_parity(u32 data)
iData = (data >> i * 4) & 0xF;
ci = iData ^ x1;
- x1 = x0 ^ dp_utils_get_g1_value(ci);
- x0 = dp_utils_get_g0_value(ci);
+ x1 = x0 ^ msm_dp_utils_get_g1_value(ci);
+ x0 = msm_dp_utils_get_g0_value(ci);
}
parity_byte = x1 | (x0 << 4);
@@ -74,23 +74,15 @@ u8 dp_utils_calculate_parity(u32 data)
return parity_byte;
}
-ssize_t dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff)
+void msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 header_buff[2])
{
- size_t length;
-
- length = sizeof(header_buff);
- if (length < DP_SDP_HEADER_SIZE)
- return -ENOSPC;
-
header_buff[0] = FIELD_PREP(HEADER_0_MASK, sdp_header->HB0) |
- FIELD_PREP(PARITY_0_MASK, dp_utils_calculate_parity(sdp_header->HB0)) |
+ FIELD_PREP(PARITY_0_MASK, msm_dp_utils_calculate_parity(sdp_header->HB0)) |
FIELD_PREP(HEADER_1_MASK, sdp_header->HB1) |
- FIELD_PREP(PARITY_1_MASK, dp_utils_calculate_parity(sdp_header->HB1));
+ FIELD_PREP(PARITY_1_MASK, msm_dp_utils_calculate_parity(sdp_header->HB1));
header_buff[1] = FIELD_PREP(HEADER_2_MASK, sdp_header->HB2) |
- FIELD_PREP(PARITY_2_MASK, dp_utils_calculate_parity(sdp_header->HB2)) |
+ FIELD_PREP(PARITY_2_MASK, msm_dp_utils_calculate_parity(sdp_header->HB2)) |
FIELD_PREP(HEADER_3_MASK, sdp_header->HB3) |
- FIELD_PREP(PARITY_3_MASK, dp_utils_calculate_parity(sdp_header->HB3));
-
- return length;
+ FIELD_PREP(PARITY_3_MASK, msm_dp_utils_calculate_parity(sdp_header->HB3));
}
diff --git a/drivers/gpu/drm/msm/dp/dp_utils.h b/drivers/gpu/drm/msm/dp/dp_utils.h
index 7c056d9798dc..2e4f98a863c4 100644
--- a/drivers/gpu/drm/msm/dp/dp_utils.h
+++ b/drivers/gpu/drm/msm/dp/dp_utils.h
@@ -28,9 +28,9 @@
#define HEADER_3_MASK GENMASK(23, 16)
#define PARITY_3_MASK GENMASK(31, 24)
-u8 dp_utils_get_g0_value(u8 data);
-u8 dp_utils_get_g1_value(u8 data);
-u8 dp_utils_calculate_parity(u32 data);
-ssize_t dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff);
+u8 msm_dp_utils_get_g0_value(u8 data);
+u8 msm_dp_utils_get_g1_value(u8 data);
+u8 msm_dp_utils_calculate_parity(u32 data);
+void msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 header_buff[2]);
#endif /* _DP_UTILS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index efd7c23b662f..296215877613 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -207,7 +207,7 @@ static const struct dev_pm_ops dsi_pm_ops = {
static struct platform_driver dsi_driver = {
.probe = dsi_dev_probe,
- .remove_new = dsi_dev_remove,
+ .remove = dsi_dev_remove,
.driver = {
.name = "msm_dsi",
.of_match_table = dt_match,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 10ba7d153d1c..7754dcec33d0 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -286,6 +286,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_0,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_1,
+ &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_0,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 4c9b4b37681b..120cb65164c1 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -23,6 +23,7 @@
#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000
#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
#define MSM_DSI_6G_VER_MINOR_V2_3_0 0x20030000
+#define MSM_DSI_6G_VER_MINOR_V2_3_1 0x20030001
#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000
#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 185d7de0bf37..007311c21fda 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -542,7 +542,7 @@ static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mo
int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay;
- return new_htotal * mode->vtotal * drm_mode_vrefresh(mode);
+ return mult_frac(mode->clock * 1000u, new_htotal, mode->htotal);
}
static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode,
@@ -550,7 +550,7 @@ static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode,
{
unsigned long pclk_rate;
- pclk_rate = mode->clock * 1000;
+ pclk_rate = mode->clock * 1000u;
if (dsc)
pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc);
@@ -1831,7 +1831,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
msm_dsi->te_source = devm_kstrdup(dev, te_source, GFP_KERNEL);
ret = 0;
- if (of_property_read_bool(np, "syscon-sfpb")) {
+ if (of_property_present(np, "syscon-sfpb")) {
msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
"syscon-sfpb");
if (IS_ERR(msm_host->sfpb)) {
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index dd58bc0a49eb..c0bcc6828963 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -567,6 +567,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_14nm_8953_cfgs },
{ .compatible = "qcom,sm6125-dsi-phy-14nm",
.data = &dsi_phy_14nm_2290_cfgs },
+ { .compatible = "qcom,sm6150-dsi-phy-14nm",
+ .data = &dsi_phy_14nm_6150_cfgs },
#endif
#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
{ .compatible = "qcom,dsi-phy-10nm",
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 4953459edd63..8985818bb2e0 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -46,6 +46,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8937_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_14nm_6150_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_2290_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 1723f0e4faa4..2c3cbe0f2870 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -1032,6 +1032,10 @@ static const struct regulator_bulk_data dsi_phy_14nm_73p4mA_regulators[] = {
{ .supply = "vcca", .init_load_uA = 73400 },
};
+static const struct regulator_bulk_data dsi_phy_14nm_36mA_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 36000 },
+};
+
const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_14nm_17mA_regulators,
@@ -1097,3 +1101,20 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_2290_cfgs = {
.io_start = { 0x5e94400 },
.num_dsi_phy = 1,
};
+
+const struct msm_dsi_phy_cfg dsi_phy_14nm_6150_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_14nm_36mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_14nm_36mA_regulators),
+ .ops = {
+ .enable = dsi_14nm_phy_enable,
+ .disable = dsi_14nm_phy_disable,
+ .pll_init = dsi_pll_14nm_init,
+ .save_pll_state = dsi_14nm_pll_save_state,
+ .restore_pll_state = dsi_14nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0xae94400 },
+ .num_dsi_phy = 1,
+};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 0bfee41c2e71..37b3809c6bdd 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -561,7 +561,7 @@ static const struct of_device_id msm_hdmi_dt_match[] = {
static struct platform_driver msm_hdmi_driver = {
.probe = msm_hdmi_dev_probe,
- .remove_new = msm_hdmi_dev_remove,
+ .remove = msm_hdmi_dev_remove,
.driver = {
.name = "hdmi_msm",
.of_match_table = msm_hdmi_dt_match,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 95b3f7535d84..03120c54ced6 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -203,7 +203,7 @@ static const struct of_device_id msm_hdmi_phy_dt_match[] = {
static struct platform_driver msm_hdmi_phy_platform_driver = {
.probe = msm_hdmi_phy_probe,
- .remove_new = msm_hdmi_phy_remove,
+ .remove = msm_hdmi_phy_remove,
.driver = {
.name = "msm_hdmi_phy",
.of_match_table = msm_hdmi_phy_dt_match,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
index 0e3a2b16a2ce..33bb48ae58a2 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
@@ -137,7 +137,7 @@ static inline u32 pll_get_integloop_gain(u64 frac_start, u64 bclk, u32 ref_clk,
base <<= (digclk_divsel == 2 ? 1 : 0);
- return (base <= 2046 ? base : 2046);
+ return base;
}
static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk)
@@ -153,22 +153,12 @@ static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk)
return dividend - 1;
}
-static inline u64 pll_cmp_to_fdata(u32 pll_cmp, unsigned long ref_clk)
-{
- u64 fdata = ((u64)pll_cmp) * ref_clk * 10;
-
- do_div(fdata, HDMI_PLL_CMP_CNT);
-
- return fdata;
-}
-
#define HDMI_REF_CLOCK_HZ ((u64)19200000)
#define HDMI_MHZ_TO_HZ ((u64)1000000)
static int pll_get_post_div(struct hdmi_8998_post_divider *pd, u64 bclk)
{
- u32 const ratio_list[] = {1, 2, 3, 4, 5, 6,
- 9, 10, 12, 15, 25};
- u32 const band_list[] = {0, 1, 2, 3};
+ static const u32 ratio_list[] = {1, 2, 3, 4, 5, 6, 9, 10, 12, 15, 25};
+ static const u32 band_list[] = {0, 1, 2, 3};
u32 const sz_ratio = ARRAY_SIZE(ratio_list);
u32 const sz_band = ARRAY_SIZE(band_list);
u32 const cmp_cnt = 1024;
@@ -279,7 +269,7 @@ find_optimal_index:
case 25:
found_hsclk_divsel = 14;
break;
- };
+ }
pd->vco_freq = found_vco_freq;
pd->tx_band_sel = found_tx_band_sel;
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 9c45d641b521..a7a2384044ff 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -115,7 +115,7 @@ int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
timer->kms = kms;
timer->crtc_idx = crtc_idx;
- timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx);
+ timer->worker = kthread_run_worker(0, "atomic-worker-%d", crtc_idx);
if (IS_ERR(timer->worker)) {
int ret = PTR_ERR(timer->worker);
timer->worker = NULL;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 8c13b08708d2..ff7a7a9f7b0d 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -11,6 +11,7 @@
#include <linux/of_address.h>
#include <linux/uaccess.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -291,7 +292,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (priv->kms_init) {
drm_kms_helper_poll_init(ddev);
- msm_fbdev_setup(ddev);
+ drm_client_setup(ddev, NULL);
}
return 0;
@@ -537,7 +538,7 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
/* Only supported if per-process address space is supported: */
if (priv->gpu->aspace == ctx->aspace)
- return -EOPNOTSUPP;
+ return UERR(EOPNOTSUPP, dev, "requires per-process pgtables");
if (should_fail(&fail_gem_iova, obj->size))
return -ENOMEM;
@@ -902,13 +903,13 @@ static const struct drm_driver msm_driver = {
#ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init,
#endif
+ MSM_FBDEV_DRIVER_OPS,
.show_fdinfo = msm_show_fdinfo,
.ioctls = msm_ioctls,
.num_ioctls = ARRAY_SIZE(msm_ioctls),
.fops = &fops,
.name = "msm",
.desc = "MSM Snapdragon DRM",
- .date = "20130625",
.major = MSM_VERSION_MAJOR,
.minor = MSM_VERSION_MINOR,
.patchlevel = MSM_VERSION_PATCHLEVEL,
@@ -983,6 +984,10 @@ module_param(prefer_mdp5, bool, 0444);
/* list all platforms supported by both mdp5 and dpu drivers */
static const char *const msm_mdp5_dpu_migration[] = {
+ "qcom,msm8917-mdp5",
+ "qcom,msm8937-mdp5",
+ "qcom,msm8953-mdp5",
+ "qcom,msm8996-mdp5",
"qcom,sdm630-mdp5",
"qcom,sdm660-mdp5",
NULL,
@@ -1105,7 +1110,7 @@ static void msm_pdev_remove(struct platform_device *pdev)
static struct platform_driver msm_platform_driver = {
.probe = msm_pdev_probe,
- .remove_new = msm_pdev_remove,
+ .remove = msm_pdev_remove,
.driver = {
.name = "msm",
},
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 2e28a1344636..fee31680a6d5 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -28,6 +28,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/display/drm_dsc.h>
#include <drm/msm_drm.h>
@@ -36,6 +37,9 @@
extern struct fault_attr fail_gem_alloc;
extern struct fault_attr fail_gem_iova;
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
+
struct msm_kms;
struct msm_gpu;
struct msm_mmu;
@@ -49,7 +53,6 @@ struct msm_gem_vma;
struct msm_disp_state;
#define MAX_CRTCS 8
-#define MAX_BRIDGES 8
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
@@ -68,23 +71,6 @@ enum msm_dsi_controller {
};
#define MSM_GPU_MAX_RINGS 4
-#define MAX_H_TILES_PER_DISPLAY 2
-
-/**
- * struct msm_display_topology - defines a display topology pipeline
- * @num_lm: number of layer mixers used
- * @num_intf: number of interfaces the panel is mounted on
- * @num_dspp: number of dspp blocks used
- * @num_dsc: number of Display Stream Compression (DSC) blocks used
- * @needs_cdm: indicates whether cdm block is needed for this display topology
- */
-struct msm_display_topology {
- u32 num_lm;
- u32 num_intf;
- u32 num_dspp;
- u32 num_dsc;
- bool needs_cdm;
-};
/* Commit/Event thread specific structure */
struct msm_drm_thread {
@@ -290,11 +276,13 @@ struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
int w, int h, int p, uint32_t format);
#ifdef CONFIG_DRM_FBDEV_EMULATION
-void msm_fbdev_setup(struct drm_device *dev);
+int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes);
+#define MSM_FBDEV_DRIVER_OPS \
+ .fbdev_probe = msm_fbdev_driver_fbdev_probe
#else
-static inline void msm_fbdev_setup(struct drm_device *dev)
-{
-}
+#define MSM_FBDEV_DRIVER_OPS \
+ .fbdev_probe = NULL
#endif
struct hdmi;
@@ -519,6 +507,12 @@ void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
clockid_t clock_id,
enum hrtimer_mode mode);
+/* Helper for returning a UABI error with optional logging which can make
+ * it easier for userspace to understand what it is doing wrong.
+ */
+#define UERR(err, drm, fmt, ...) \
+ ({ DRM_DEV_DEBUG_DRIVER((drm)->dev, fmt, ##__VA_ARGS__); -(err); })
+
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 030bedac632d..c62249b1ab3d 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -65,8 +65,31 @@ static const struct fb_ops msm_fb_ops = {
.fb_destroy = msm_fbdev_fb_destroy,
};
-static int msm_fbdev_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
+static int msm_fbdev_fb_dirty(struct drm_fb_helper *helper,
+ struct drm_clip_rect *clip)
+{
+ struct drm_device *dev = helper->dev;
+ int ret;
+
+ /* Call damage handlers only if necessary */
+ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+ return 0;
+
+ if (helper->fb->funcs->dirty) {
+ ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+ if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct drm_fb_helper_funcs msm_fbdev_helper_funcs = {
+ .fb_dirty = msm_fbdev_fb_dirty,
+};
+
+int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = helper->dev;
struct msm_drm_private *priv = dev->dev_private;
@@ -114,6 +137,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
DBG("fbi=%p, dev=%p", fbi, dev);
+ helper->funcs = &msm_fbdev_helper_funcs;
helper->fb = fb;
fbi->fbops = &msm_fb_ops;
@@ -138,119 +162,3 @@ fail:
drm_framebuffer_remove(fb);
return ret;
}
-
-static int msm_fbdev_fb_dirty(struct drm_fb_helper *helper,
- struct drm_clip_rect *clip)
-{
- struct drm_device *dev = helper->dev;
- int ret;
-
- /* Call damage handlers only if necessary */
- if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
- return 0;
-
- if (helper->fb->funcs->dirty) {
- ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
- if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
- return ret;
- }
-
- return 0;
-}
-
-static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
- .fb_probe = msm_fbdev_create,
- .fb_dirty = msm_fbdev_fb_dirty,
-};
-
-/*
- * struct drm_client
- */
-
-static void msm_fbdev_client_unregister(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
-
- if (fb_helper->info) {
- drm_fb_helper_unregister_info(fb_helper);
- } else {
- drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
- }
-}
-
-static int msm_fbdev_client_restore(struct drm_client_dev *client)
-{
- drm_fb_helper_lastclose(client->dev);
-
- return 0;
-}
-
-static int msm_fbdev_client_hotplug(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- struct drm_device *dev = client->dev;
- int ret;
-
- if (dev->fb_helper)
- return drm_fb_helper_hotplug_event(dev->fb_helper);
-
- ret = drm_fb_helper_init(dev, fb_helper);
- if (ret)
- goto err_drm_err;
-
- if (!drm_drv_uses_atomic_modeset(dev))
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(fb_helper);
- if (ret)
- goto err_drm_fb_helper_fini;
-
- return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(fb_helper);
-err_drm_err:
- drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret);
- return ret;
-}
-
-static const struct drm_client_funcs msm_fbdev_client_funcs = {
- .owner = THIS_MODULE,
- .unregister = msm_fbdev_client_unregister,
- .restore = msm_fbdev_client_restore,
- .hotplug = msm_fbdev_client_hotplug,
-};
-
-/* initialize fbdev helper */
-void msm_fbdev_setup(struct drm_device *dev)
-{
- struct drm_fb_helper *helper;
- int ret;
-
- if (!fbdev)
- return;
-
- drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
- drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
-
- helper = kzalloc(sizeof(*helper), GFP_KERNEL);
- if (!helper)
- return;
- drm_fb_helper_prepare(dev, helper, 32, &msm_fb_helper_funcs);
-
- ret = drm_client_init(dev, &helper->client, "fbdev", &msm_fbdev_client_funcs);
- if (ret) {
- drm_err(dev, "Failed to register client: %d\n", ret);
- goto err_drm_fb_helper_unprepare;
- }
-
- drm_client_register(&helper->client);
-
- return;
-
-err_drm_fb_helper_unprepare:
- drm_fb_helper_unprepare(helper);
- kfree(helper);
-}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index fba78193127d..dee470403036 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -20,8 +20,8 @@
/* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable
* error msgs for debugging, but we don't spam dmesg by default
*/
-#define SUBMIT_ERROR(submit, fmt, ...) \
- DRM_DEV_DEBUG_DRIVER((submit)->dev->dev, fmt, ##__VA_ARGS__)
+#define SUBMIT_ERROR(err, submit, fmt, ...) \
+ UERR(err, (submit)->dev, fmt, ##__VA_ARGS__)
/*
* Cmdstream submission:
@@ -142,8 +142,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
!(submit_bo.flags & MANDATORY_FLAGS)) {
- SUBMIT_ERROR(submit, "invalid flags: %x\n", submit_bo.flags);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid flags: %x\n", submit_bo.flags);
i = 0;
goto out;
}
@@ -162,8 +161,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
*/
obj = idr_find(&file->object_idr, submit->bos[i].handle);
if (!obj) {
- SUBMIT_ERROR(submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i);
goto out_unlock;
}
@@ -206,14 +204,12 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
break;
default:
- SUBMIT_ERROR(submit, "invalid type: %08x\n", submit_cmd.type);
- return -EINVAL;
+ return SUBMIT_ERROR(EINVAL, submit, "invalid type: %08x\n", submit_cmd.type);
}
if (submit_cmd.size % 4) {
- SUBMIT_ERROR(submit, "non-aligned cmdstream buffer size: %u\n",
- submit_cmd.size);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "non-aligned cmdstream buffer size: %u\n",
+ submit_cmd.size);
goto out;
}
@@ -371,9 +367,8 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
struct drm_gem_object **obj, uint64_t *iova)
{
if (idx >= submit->nr_bos) {
- SUBMIT_ERROR(submit, "invalid buffer index: %u (out of %u)\n",
- idx, submit->nr_bos);
- return -EINVAL;
+ return SUBMIT_ERROR(EINVAL, submit, "invalid buffer index: %u (out of %u)\n",
+ idx, submit->nr_bos);
}
if (obj)
@@ -392,10 +387,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob
uint32_t *ptr;
int ret = 0;
- if (offset % 4) {
- SUBMIT_ERROR(submit, "non-aligned cmdstream buffer: %u\n", offset);
- return -EINVAL;
- }
+ if (offset % 4)
+ return SUBMIT_ERROR(EINVAL, submit, "non-aligned cmdstream buffer: %u\n", offset);
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
@@ -414,9 +407,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob
uint64_t iova;
if (submit_reloc.submit_offset % 4) {
- SUBMIT_ERROR(submit, "non-aligned reloc offset: %u\n",
- submit_reloc.submit_offset);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "non-aligned reloc offset: %u\n",
+ submit_reloc.submit_offset);
goto out;
}
@@ -425,8 +417,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob
if ((off >= (obj->size / 4)) ||
(off < last_offset)) {
- SUBMIT_ERROR(submit, "invalid offset %u at reloc %u\n", off, i);
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid offset %u at reloc %u\n", off, i);
goto out;
}
@@ -513,12 +504,12 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
if (syncobj_desc.point &&
!drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) {
- ret = -EOPNOTSUPP;
+ ret = SUBMIT_ERROR(EOPNOTSUPP, submit, "syncobj timeline unsupported");
break;
}
if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
- ret = -EINVAL;
+ ret = -SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags);
break;
}
@@ -531,7 +522,7 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
syncobjs[i] =
drm_syncobj_find(file, syncobj_desc.handle);
if (!syncobjs[i]) {
- ret = -EINVAL;
+ ret = SUBMIT_ERROR(EINVAL, submit, "invalid syncobj handle: %u", i);
break;
}
}
@@ -588,14 +579,14 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
post_deps[i].point = syncobj_desc.point;
if (syncobj_desc.flags) {
- ret = -EINVAL;
+ ret = UERR(EINVAL, dev, "invalid syncobj flags");
break;
}
if (syncobj_desc.point) {
if (!drm_core_check_feature(dev,
DRIVER_SYNCOBJ_TIMELINE)) {
- ret = -EOPNOTSUPP;
+ ret = UERR(EOPNOTSUPP, dev, "syncobj timeline unsupported");
break;
}
@@ -609,7 +600,7 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
post_deps[i].syncobj =
drm_syncobj_find(file, syncobj_desc.handle);
if (!post_deps[i].syncobj) {
- ret = -EINVAL;
+ ret = UERR(EINVAL, dev, "invalid syncobj handle");
break;
}
}
@@ -677,10 +668,10 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
* be more clever to dispatch to appropriate gpu module:
*/
if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
- return -EINVAL;
+ return UERR(EINVAL, dev, "invalid pipe");
if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
- return -EINVAL;
+ return UERR(EINVAL, dev, "invalid flags");
if (args->flags & MSM_SUBMIT_SUDO) {
if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
@@ -724,7 +715,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
in_fence = sync_file_get_fence(args->fence_fd);
if (!in_fence) {
- ret = -EINVAL;
+ ret = UERR(EINVAL, dev, "invalid in-fence");
goto out_unlock;
}
@@ -787,10 +778,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
if (!submit->cmd[i].size ||
- ((submit->cmd[i].size + submit->cmd[i].offset) >
- obj->size / 4)) {
- SUBMIT_ERROR(submit, "invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
- ret = -EINVAL;
+ (size_add(submit->cmd[i].size, submit->cmd[i].offset) > obj->size / 4)) {
+ ret = UERR(EINVAL, dev, "invalid cmdstream size: %u\n",
+ submit->cmd[i].size * 4);
goto out;
}
@@ -800,8 +790,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
continue;
if (!gpu->allow_relocs) {
- SUBMIT_ERROR(submit, "relocs not allowed\n");
- ret = -EINVAL;
+ ret = UERR(EINVAL, dev, "relocs not allowed\n");
goto out;
}
@@ -827,7 +816,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
(!args->fence || idr_find(&queue->fence_idr, args->fence))) {
spin_unlock(&queue->idr_lock);
idr_preload_end();
- ret = -EINVAL;
+ ret = UERR(EINVAL, dev, "invalid in-fence-sn");
goto out;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index a274b8466423..8557998e0c92 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -783,7 +783,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
mutex_unlock(&gpu->active_lock);
gpu->funcs->submit(gpu, submit);
- gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
+ submit->ring->cur_ctx_seqno = submit->queue->ctx->seqno;
pm_runtime_put(&gpu->pdev->dev);
hangcheck_timer_reset(gpu);
@@ -859,7 +859,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->funcs = funcs;
gpu->name = name;
- gpu->worker = kthread_create_worker(0, "gpu-worker");
+ gpu->worker = kthread_run_worker(0, "gpu-worker");
if (IS_ERR(gpu->worker)) {
ret = PTR_ERR(gpu->worker);
gpu->worker = NULL;
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 1f02bb9956be..7cabc8480d7c 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -194,17 +194,6 @@ struct msm_gpu {
refcount_t sysprof_active;
/**
- * cur_ctx_seqno:
- *
- * The ctx->seqno value of the last context to submit rendering,
- * and the one with current pgtables installed (for generations
- * that support per-context pgtables). Tracked by seqno rather
- * than pointer value to avoid dangling pointers, and cases where
- * a ctx can be freed and a new one created with the same address.
- */
- int cur_ctx_seqno;
-
- /**
* lock:
*
* General lock for serializing all the gpu things.
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index ea70c1c32d94..6970b0f7f457 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -140,6 +140,7 @@ void msm_devfreq_init(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
struct msm_drm_private *priv = gpu->dev->dev_private;
+ int ret;
/* We need target support to do devfreq */
if (!gpu->funcs->gpu_busy)
@@ -156,8 +157,12 @@ void msm_devfreq_init(struct msm_gpu *gpu)
mutex_init(&df->lock);
- dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
- DEV_PM_QOS_MIN_FREQUENCY, 0);
+ ret = dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
+ DEV_PM_QOS_MIN_FREQUENCY, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize QoS\n");
+ return;
+ }
msm_devfreq_profile.initial_freq = gpu->fast_rate;
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
index ac40d857bc45..7f863282db0d 100644
--- a/drivers/gpu/drm/msm/msm_gpu_trace.h
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -177,6 +177,34 @@ TRACE_EVENT(msm_gpu_resume,
TP_printk("%u", __entry->dummy)
);
+TRACE_EVENT(msm_gpu_preemption_trigger,
+ TP_PROTO(int ring_id_from, int ring_id_to),
+ TP_ARGS(ring_id_from, ring_id_to),
+ TP_STRUCT__entry(
+ __field(int, ring_id_from)
+ __field(int, ring_id_to)
+ ),
+ TP_fast_assign(
+ __entry->ring_id_from = ring_id_from;
+ __entry->ring_id_to = ring_id_to;
+ ),
+ TP_printk("preempting %u -> %u",
+ __entry->ring_id_from,
+ __entry->ring_id_to)
+);
+
+TRACE_EVENT(msm_gpu_preemption_irq,
+ TP_PROTO(u32 ring_id),
+ TP_ARGS(ring_id),
+ TP_STRUCT__entry(
+ __field(u32, ring_id)
+ ),
+ TP_fast_assign(
+ __entry->ring_id = ring_id;
+ ),
+ TP_printk("preempted to %u", __entry->ring_id)
+);
+
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index af6a6fcb1173..38965e12a6bf 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -5,11 +5,11 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/aperture.h>
#include <linux/kthread.h>
#include <linux/sched/mm.h>
#include <uapi/linux/sched/types.h>
-#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_vblank.h>
@@ -237,14 +237,13 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
int ret;
/* the fw fb could be anywhere in memory */
- ret = drm_aperture_remove_framebuffers(drv);
+ ret = aperture_remove_all_conflicting_devices(drv->name);
if (ret)
return ret;
ret = priv->kms_init(ddev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to load kms\n");
- priv->kms = NULL;
return ret;
}
@@ -269,7 +268,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
/* initialize event thread */
ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
ev_thread->dev = ddev;
- ev_thread->worker = kthread_create_worker(0, "crtc_event:%d", crtc->base.id);
+ ev_thread->worker = kthread_run_worker(0, "crtc_event:%d", crtc->base.id);
if (IS_ERR(ev_thread->worker)) {
ret = PTR_ERR(ev_thread->worker);
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 1e0c54de3716..e60162744c66 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -92,12 +92,6 @@ struct msm_kms_funcs {
* Format handling:
*/
- /* do format checking on format modified through fb_cmd2 modifiers */
- int (*check_modified_format)(const struct msm_kms *kms,
- const struct msm_format *msm_fmt,
- const struct drm_mode_fb_cmd2 *cmd,
- struct drm_gem_object **bos);
-
/* misc: */
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index faa88fd6eb4d..dcb49fd30402 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -19,13 +19,7 @@
#include "msm_mdss.h"
#include "msm_kms.h"
-#define HW_REV 0x0
-#define HW_INTR_STATUS 0x0010
-
-#define UBWC_DEC_HW_VERSION 0x58
-#define UBWC_STATIC 0x144
-#define UBWC_CTRL_2 0x150
-#define UBWC_PREDICTION_MODE 0x154
+#include <generated/mdss.xml.h>
#define MIN_IB_BW 400000000UL /* Min ib vote 400MB */
@@ -83,7 +77,7 @@ static void msm_mdss_irq(struct irq_desc *desc)
chained_irq_enter(chip, desc);
- interrupts = readl_relaxed(msm_mdss->mmio + HW_INTR_STATUS);
+ interrupts = readl_relaxed(msm_mdss->mmio + REG_MDSS_HW_INTR_STATUS);
while (interrupts) {
irq_hw_number_t hwirq = fls(interrupts) - 1;
@@ -172,49 +166,64 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss)
{
const struct msm_mdss_data *data = msm_mdss->mdss_data;
+ u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) |
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
+
+ if (data->ubwc_bank_spread)
+ value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD;
- writel_relaxed(data->ubwc_static, msm_mdss->mmio + UBWC_STATIC);
+ if (data->ubwc_enc_version == UBWC_1_0)
+ value |= MDSS_UBWC_STATIC_UBWC_MIN_ACC_LEN(1);
+
+ writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
}
static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
{
const struct msm_mdss_data *data = msm_mdss->mdss_data;
- u32 value = (data->ubwc_swizzle & 0x1) |
- (data->highest_bank_bit & 0x3) << 4 |
- (data->macrotile_mode & 0x1) << 12;
+ u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle & 0x1) |
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
+
+ if (data->macrotile_mode)
+ value |= MDSS_UBWC_STATIC_MACROTILE_MODE;
if (data->ubwc_enc_version == UBWC_3_0)
- value |= BIT(10);
+ value |= MDSS_UBWC_STATIC_UBWC_AMSBC;
if (data->ubwc_enc_version == UBWC_1_0)
- value |= BIT(8);
+ value |= MDSS_UBWC_STATIC_UBWC_MIN_ACC_LEN(1);
- writel_relaxed(value, msm_mdss->mmio + UBWC_STATIC);
+ writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
}
static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
{
const struct msm_mdss_data *data = msm_mdss->mdss_data;
- u32 value = (data->ubwc_swizzle & 0x7) |
- (data->ubwc_static & 0x1) << 3 |
- (data->highest_bank_bit & 0x7) << 4 |
- (data->macrotile_mode & 0x1) << 12;
+ u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) |
+ MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
- writel_relaxed(value, msm_mdss->mmio + UBWC_STATIC);
+ if (data->ubwc_bank_spread)
+ value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD;
+
+ if (data->macrotile_mode)
+ value |= MDSS_UBWC_STATIC_MACROTILE_MODE;
+
+ writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
if (data->ubwc_enc_version == UBWC_3_0) {
- writel_relaxed(1, msm_mdss->mmio + UBWC_CTRL_2);
- writel_relaxed(0, msm_mdss->mmio + UBWC_PREDICTION_MODE);
+ writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2);
+ writel_relaxed(0, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE);
} else {
if (data->ubwc_dec_version == UBWC_4_3)
- writel_relaxed(3, msm_mdss->mmio + UBWC_CTRL_2);
+ writel_relaxed(3, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2);
else
- writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2);
- writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE);
+ writel_relaxed(2, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2);
+ writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE);
}
}
-#define MDSS_HW_MAJ_MIN GENMASK(31, 16)
+#define MDSS_HW_MAJ_MIN \
+ (MDSS_HW_VERSION_MAJOR__MASK | MDSS_HW_VERSION_MINOR__MASK)
#define MDSS_HW_MSM8996 0x1007
#define MDSS_HW_MSM8937 0x100e
@@ -235,7 +244,7 @@ static const struct msm_mdss_data *msm_mdss_generate_mdp5_mdss_data(struct msm_m
if (!data)
return NULL;
- hw_rev = readl_relaxed(mdss->mmio + HW_REV);
+ hw_rev = readl_relaxed(mdss->mmio + REG_MDSS_HW_VERSION);
hw_rev = FIELD_GET(MDSS_HW_MAJ_MIN, hw_rev);
if (hw_rev == MDSS_HW_MSM8996 ||
@@ -334,9 +343,9 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
dev_err(msm_mdss->dev, "Unsupported UBWC decoder version %x\n",
msm_mdss->mdss_data->ubwc_dec_version);
dev_err(msm_mdss->dev, "HW_REV: 0x%x\n",
- readl_relaxed(msm_mdss->mmio + HW_REV));
+ readl_relaxed(msm_mdss->mmio + REG_MDSS_HW_VERSION));
dev_err(msm_mdss->dev, "UBWC_DEC_HW_VERSION: 0x%x\n",
- readl_relaxed(msm_mdss->mmio + UBWC_DEC_HW_VERSION));
+ readl_relaxed(msm_mdss->mmio + REG_MDSS_UBWC_DEC_HW_VERSION));
break;
}
@@ -573,10 +582,21 @@ static const struct msm_mdss_data qcm2290_data = {
.reg_bus_bw = 76800,
};
+static const struct msm_mdss_data sa8775p_data = {
+ .ubwc_enc_version = UBWC_4_0,
+ .ubwc_dec_version = UBWC_4_0,
+ .ubwc_swizzle = 4,
+ .ubwc_bank_spread = true,
+ .highest_bank_bit = 0,
+ .macrotile_mode = true,
+ .reg_bus_bw = 74000,
+};
+
static const struct msm_mdss_data sc7180_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
- .ubwc_static = 0x1e,
+ .ubwc_swizzle = 6,
+ .ubwc_bank_spread = true,
.highest_bank_bit = 0x1,
.reg_bus_bw = 76800,
};
@@ -585,9 +605,9 @@ static const struct msm_mdss_data sc7280_data = {
.ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
.highest_bank_bit = 1,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 74000,
};
@@ -595,7 +615,7 @@ static const struct msm_mdss_data sc8180x_data = {
.ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_3_0,
.highest_bank_bit = 3,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 76800,
};
@@ -603,9 +623,9 @@ static const struct msm_mdss_data sc8280xp_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
.highest_bank_bit = 3,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 76800,
};
@@ -627,7 +647,7 @@ static const struct msm_mdss_data sm6350_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.ubwc_swizzle = 6,
- .ubwc_static = 0x1e,
+ .ubwc_bank_spread = true,
.highest_bank_bit = 1,
.reg_bus_bw = 76800,
};
@@ -650,7 +670,7 @@ static const struct msm_mdss_data sm6115_data = {
.ubwc_enc_version = UBWC_1_0,
.ubwc_dec_version = UBWC_2_0,
.ubwc_swizzle = 7,
- .ubwc_static = 0x11f,
+ .ubwc_bank_spread = true,
.highest_bank_bit = 0x1,
.reg_bus_bw = 76800,
};
@@ -662,14 +682,21 @@ static const struct msm_mdss_data sm6125_data = {
.highest_bank_bit = 1,
};
+static const struct msm_mdss_data sm6150_data = {
+ .ubwc_enc_version = UBWC_2_0,
+ .ubwc_dec_version = UBWC_2_0,
+ .highest_bank_bit = 1,
+ .reg_bus_bw = 76800,
+};
+
static const struct msm_mdss_data sm8250_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 76800,
};
@@ -677,10 +704,10 @@ static const struct msm_mdss_data sm8350_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 74000,
};
@@ -688,10 +715,10 @@ static const struct msm_mdss_data sm8550_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_3,
.ubwc_swizzle = 6,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
.reg_bus_bw = 57000,
};
@@ -699,10 +726,10 @@ static const struct msm_mdss_data x1e80100_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_3,
.ubwc_swizzle = 6,
- .ubwc_static = 1,
+ .ubwc_bank_spread = true,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
- .macrotile_mode = 1,
+ .macrotile_mode = true,
/* TODO: Add reg_bus_bw with real value */
};
@@ -710,6 +737,7 @@ static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,msm8998-mdss", .data = &msm8998_data },
{ .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data },
+ { .compatible = "qcom,sa8775p-mdss", .data = &sa8775p_data },
{ .compatible = "qcom,sdm670-mdss", .data = &sdm670_data },
{ .compatible = "qcom,sdm845-mdss", .data = &sdm845_data },
{ .compatible = "qcom,sc7180-mdss", .data = &sc7180_data },
@@ -718,6 +746,7 @@ static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,sc8280xp-mdss", .data = &sc8280xp_data },
{ .compatible = "qcom,sm6115-mdss", .data = &sm6115_data },
{ .compatible = "qcom,sm6125-mdss", .data = &sm6125_data },
+ { .compatible = "qcom,sm6150-mdss", .data = &sm6150_data },
{ .compatible = "qcom,sm6350-mdss", .data = &sm6350_data },
{ .compatible = "qcom,sm6375-mdss", .data = &sm6350_data },
{ .compatible = "qcom,sm7150-mdss", .data = &sm7150_data },
@@ -734,7 +763,7 @@ MODULE_DEVICE_TABLE(of, mdss_dt_match);
static struct platform_driver mdss_platform_driver = {
.probe = mdss_probe,
- .remove_new = mdss_remove,
+ .remove = mdss_remove,
.driver = {
.name = "msm-mdss",
.of_match_table = mdss_dt_match,
diff --git a/drivers/gpu/drm/msm/msm_mdss.h b/drivers/gpu/drm/msm/msm_mdss.h
index 3afef4b1786d..14dc53704314 100644
--- a/drivers/gpu/drm/msm/msm_mdss.h
+++ b/drivers/gpu/drm/msm/msm_mdss.h
@@ -11,9 +11,9 @@ struct msm_mdss_data {
/* can be read from register 0x58 */
u32 ubwc_dec_version;
u32 ubwc_swizzle;
- u32 ubwc_static;
u32 highest_bank_bit;
- u32 macrotile_mode;
+ bool ubwc_bank_spread;
+ bool macrotile_mode;
u32 reg_bus_bw;
};
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 9d6655f96f0c..c803556a8f64 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -64,7 +64,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
char name[32];
int ret;
- /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
+ /* We assume everywhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ));
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index 0d6beb8cd39a..d1e49f701c81 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -31,10 +31,12 @@ struct msm_rbmemptrs {
volatile uint32_t rptr;
volatile uint32_t fence;
/* Introduced on A7xx */
+ volatile uint32_t bv_rptr;
volatile uint32_t bv_fence;
volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
volatile u64 ttbr0;
+ volatile u32 context_idr;
};
struct msm_cp_state {
@@ -99,6 +101,22 @@ struct msm_ringbuffer {
* preemption. Can be aquired from irq context.
*/
spinlock_t preempt_lock;
+
+ /*
+ * Whether we skipped writing wptr and it needs to be updated in the
+ * future when the ring becomes current.
+ */
+ bool restore_wptr;
+
+ /**
+ * cur_ctx_seqno:
+ *
+ * The ctx->seqno value of the last context to submit to this ring
+ * Tracked by seqno rather than pointer value to avoid dangling
+ * pointers, and cases where a ctx can be freed and a new one created
+ * with the same address.
+ */
+ int cur_ctx_seqno;
};
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 0e803125a325..7fed1de63b5d 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -18,7 +18,7 @@ int msm_file_private_set_sysprof(struct msm_file_private *ctx,
switch (sysprof) {
default:
- return -EINVAL;
+ return UERR(EINVAL, gpu->dev, "Invalid sysprof: %d", sysprof);
case 2:
pm_runtime_get_sync(&gpu->pdev->dev);
fallthrough;
@@ -161,6 +161,8 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu_submitqueue *queue;
enum drm_sched_priority sched_prio;
+ extern int enable_preemption;
+ bool preemption_supported;
unsigned ring_nr;
int ret;
@@ -170,6 +172,11 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
if (!priv->gpu)
return -ENODEV;
+ preemption_supported = priv->gpu->nr_rings == 1 && enable_preemption != 0;
+
+ if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported)
+ return -EINVAL;
+
ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
index 97608603ea62..2db425abf0f3 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
@@ -2358,7 +2358,12 @@ to upconvert to 32b float internally?
<reg32 offset="0x0" name="REG" type="a6x_cp_protect"/>
</array>
- <reg32 offset="0x08A0" name="CP_CONTEXT_SWITCH_CNTL"/>
+ <reg32 offset="0x08A0" name="CP_CONTEXT_SWITCH_CNTL">
+ <bitfield name="STOP" pos="0" type="boolean"/>
+ <bitfield name="LEVEL" low="6" high="7"/>
+ <bitfield name="USES_GMEM" pos="8" type="boolean"/>
+ <bitfield name="SKIP_SAVE_RESTORE" pos="9" type="boolean"/>
+ </reg32>
<reg64 offset="0x08A1" name="CP_CONTEXT_SWITCH_SMMU_INFO"/>
<reg64 offset="0x08A3" name="CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR"/>
<reg64 offset="0x08A5" name="CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR"/>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
index 6531749d30f4..3d2cc339b8f1 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
@@ -52,6 +52,11 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x23fd" name="GMU_DCVS_PERF_SETTING"/>
<reg32 offset="0x23fe" name="GMU_DCVS_BW_SETTING"/>
<reg32 offset="0x23ff" name="GMU_DCVS_RETURN"/>
+ <reg32 offset="0x2bf8" name="GMU_CORE_FW_VERSION">
+ <bitfield name="MAJOR" low="28" high="31"/>
+ <bitfield name="MINOR" low="16" high="27"/>
+ <bitfield name="STEP" low="0" high="15"/>
+ </reg32>
<reg32 offset="0x4c00" name="GMU_ICACHE_CONFIG"/>
<reg32 offset="0x4c01" name="GMU_DCACHE_CONFIG"/>
<reg32 offset="0x4c0f" name="GMU_SYS_BUS_CONFIG"/>
diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
index cab01af55d22..55a35182858c 100644
--- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
@@ -581,8 +581,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
and forcibly switch to the indicated context.
</doc>
<value name="CP_CONTEXT_SWITCH" value="0x54" variants="A6XX"/>
- <!-- Note, kgsl calls this CP_SET_AMBLE: -->
- <value name="CP_SET_CTXSWITCH_IB" value="0x55" variants="A6XX-"/>
+ <value name="CP_SET_AMBLE" value="0x55" variants="A6XX-"/>
<!--
Seems to always have the payload:
@@ -2013,42 +2012,38 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
</domain>
-<domain name="CP_SET_CTXSWITCH_IB" width="32">
+<domain name="CP_SET_AMBLE" width="32">
<doc>
- Used by the userspace driver to set various IB's which are
- executed during context save/restore for handling
- state that isn't restored by the
- context switch routine itself.
- </doc>
- <enum name="ctxswitch_ib">
- <value name="RESTORE_IB" value="0">
+ Used by the userspace and kernel drivers to set various IB's
+ which are executed during context save/restore for handling
+ state that isn't restored by the context switch routine itself.
+ </doc>
+ <enum name="amble_type">
+ <value name="PREAMBLE_AMBLE_TYPE" value="0">
<doc>Executed unconditionally when switching back to the context.</doc>
</value>
- <value name="YIELD_RESTORE_IB" value="1">
+ <value name="BIN_PREAMBLE_AMBLE_TYPE" value="1">
<doc>
Executed when switching back after switching
away during execution of
- a CP_SET_MARKER packet with RM6_YIELD as the
- payload *and* the normal save routine was
- bypassed for a shorter one. I think this is
- connected to the "skipsaverestore" bit set by
- the kernel when preempting.
+ a CP_SET_MARKER packet with RM6_BIN_RENDER_END as the
+ payload *and* skipsaverestore is set. This is
+ expected to restore static register values not
+ saved when skipsaverestore is set.
</doc>
</value>
- <value name="SAVE_IB" value="2">
+ <value name="POSTAMBLE_AMBLE_TYPE" value="2">
<doc>
Executed when switching away from the context,
except for context switches initiated via
CP_YIELD.
</doc>
</value>
- <value name="RB_SAVE_IB" value="3">
+ <value name="KMD_AMBLE_TYPE" value="3">
<doc>
This can only be set by the RB (i.e. the kernel)
and executes with protected mode off, but
- is otherwise similar to SAVE_IB.
-
- Note, kgsl calls this CP_KMD_AMBLE_TYPE
+ is otherwise similar to POSTAMBLE_AMBLE_TYPE.
</doc>
</value>
</enum>
@@ -2060,7 +2055,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
<reg32 offset="2" name="2">
<bitfield name="DWORDS" low="0" high="19" type="uint"/>
- <bitfield name="TYPE" low="20" high="21" type="ctxswitch_ib"/>
+ <bitfield name="TYPE" low="20" high="21" type="amble_type"/>
</reg32>
</domain>
diff --git a/drivers/gpu/drm/msm/registers/display/mdp5.xml b/drivers/gpu/drm/msm/registers/display/mdp5.xml
index 92f3263af170..8c9c4af350aa 100644
--- a/drivers/gpu/drm/msm/registers/display/mdp5.xml
+++ b/drivers/gpu/drm/msm/registers/display/mdp5.xml
@@ -9,22 +9,6 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<domain name="VBIF" width="32">
</domain>
-<domain name="MDSS" width="32">
- <reg32 offset="0x00000" name="HW_VERSION">
- <bitfield name="STEP" low="0" high="15" type="uint"/>
- <bitfield name="MINOR" low="16" high="27" type="uint"/>
- <bitfield name="MAJOR" low="28" high="31" type="uint"/>
- </reg32>
-
- <reg32 offset="0x00010" name="HW_INTR_STATUS">
- <bitfield name="INTR_MDP" pos="0" type="boolean"/>
- <bitfield name="INTR_DSI0" pos="4" type="boolean"/>
- <bitfield name="INTR_DSI1" pos="5" type="boolean"/>
- <bitfield name="INTR_HDMI" pos="8" type="boolean"/>
- <bitfield name="INTR_EDP" pos="12" type="boolean"/>
- </reg32>
-</domain>
-
<domain name="MDP5" width="32">
<enum name="mdp5_intf_type">
diff --git a/drivers/gpu/drm/msm/registers/display/mdss.xml b/drivers/gpu/drm/msm/registers/display/mdss.xml
new file mode 100644
index 000000000000..6e9f81cd4690
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/display/mdss.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+
+<domain name="MDSS" width="32">
+ <reg32 offset="0x00000" name="HW_VERSION">
+ <bitfield name="STEP" low="0" high="15" type="uint"/>
+ <bitfield name="MINOR" low="16" high="27" type="uint"/>
+ <bitfield name="MAJOR" low="28" high="31" type="uint"/>
+ </reg32>
+
+ <reg32 offset="0x00010" name="HW_INTR_STATUS">
+ <bitfield name="INTR_MDP" pos="0" type="boolean"/>
+ <bitfield name="INTR_DSI0" pos="4" type="boolean"/>
+ <bitfield name="INTR_DSI1" pos="5" type="boolean"/>
+ <bitfield name="INTR_HDMI" pos="8" type="boolean"/>
+ <bitfield name="INTR_EDP" pos="12" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x00058" name="UBWC_DEC_HW_VERSION"/>
+
+ <reg32 offset="0x00144" name="UBWC_STATIC">
+ <bitfield name="UBWC_SWIZZLE" low="0" high="2"/>
+ <bitfield name="UBWC_BANK_SPREAD" pos="3"/>
+ <!-- high=5 for UBWC < 4.0 -->
+ <bitfield name="HIGHEST_BANK_BIT" low="4" high="6"/>
+ <bitfield name="UBWC_MIN_ACC_LEN" low="8" high="9"/>
+ <bitfield name="UBWC_AMSBC" pos="10"/>
+ <bitfield name="MACROTILE_MODE" pos="12"/>
+ </reg32>
+
+ <reg32 offset="0x00150" name="UBWC_CTRL_2"/>
+ <reg32 offset="0x00154" name="UBWC_PREDICTION_MODE"/>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig
index 518b53345354..264e74f45554 100644
--- a/drivers/gpu/drm/mxsfb/Kconfig
+++ b/drivers/gpu/drm/mxsfb/Kconfig
@@ -9,6 +9,7 @@ config DRM_MXSFB
depends on DRM && OF
depends on COMMON_CLK
depends on ARCH_MXS || ARCH_MXC || COMPILE_TEST
+ select DRM_CLIENT_SELECTION
select DRM_MXS
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
@@ -26,6 +27,7 @@ config DRM_IMX_LCDIF
depends on DRM && OF
depends on COMMON_CLK
depends on ARCH_MXC || COMPILE_TEST
+ select DRM_CLIENT_SELECTION
select DRM_MXS
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
index 0f895b8a99d6..8ee00f59ca82 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
@@ -243,10 +244,10 @@ DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver lcdif_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &fops,
.name = "imx-lcdif",
.desc = "i.MX LCDIF Controller DRM",
- .date = "20220417",
.major = 1,
.minor = 0,
};
@@ -275,7 +276,7 @@ static int lcdif_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
- drm_fbdev_dma_setup(drm, 32);
+ drm_client_setup(drm, NULL);
return 0;
@@ -366,7 +367,7 @@ static const struct dev_pm_ops lcdif_pm_ops = {
static struct platform_driver lcdif_platform_driver = {
.probe = lcdif_probe,
- .remove_new = lcdif_remove,
+ .remove = lcdif_remove,
.shutdown = lcdif_shutdown,
.driver = {
.name = "imx-lcdif",
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index cb5ce4e81fc7..59020862cf65 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -17,6 +17,7 @@
#include <linux/property.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
@@ -331,10 +332,10 @@ DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver mxsfb_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &fops,
.name = "mxsfb-drm",
.desc = "MXSFB Controller DRM",
- .date = "20160824",
.major = 1,
.minor = 0,
};
@@ -364,7 +365,7 @@ static int mxsfb_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
- drm_fbdev_dma_setup(drm, 32);
+ drm_client_setup(drm, NULL);
return 0;
@@ -415,7 +416,7 @@ static const struct dev_pm_ops mxsfb_pm_ops = {
static struct platform_driver mxsfb_platform_driver = {
.probe = mxsfb_probe,
- .remove_new = mxsfb_remove,
+ .remove = mxsfb_remove,
.shutdown = mxsfb_shutdown,
.driver = {
.name = "mxsfb",
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index ceef470c9fbf..ce840300578d 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -4,6 +4,7 @@ config DRM_NOUVEAU
depends on DRM && PCI && MMU
select IOMMU_API
select FW_LOADER
+ select DRM_CLIENT_SELECTION
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index eed579a6c858..62d72b7a8d04 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -992,8 +992,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
if (!mst_state->pbn_div.full) {
struct nouveau_encoder *outp = mstc->mstm->outp;
- mst_state->pbn_div = drm_dp_get_vc_payload_bw(&mstm->mgr,
- outp->dp.link_bw, outp->dp.link_nr);
+ mst_state->pbn_div = drm_dp_get_vc_payload_bw(outp->dp.link_bw, outp->dp.link_nr);
}
slots = drm_dp_atomic_find_time_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn);
@@ -1265,8 +1264,8 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
mstc->mstm = mstm;
mstc->port = port;
- ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
- DRM_MODE_CONNECTOR_DisplayPort);
+ ret = drm_connector_dynamic_init(dev, &mstc->connector, &nv50_mstc,
+ DRM_MODE_CONNECTOR_DisplayPort, NULL);
if (ret) {
kfree(*pmstc);
*pmstc = NULL;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/tile.h b/drivers/gpu/drm/nouveau/dispnv50/tile.h
new file mode 100644
index 000000000000..e2be82830cf7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/tile.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NV50_TILE_H__
+#define __NV50_TILE_H__
+
+#include <linux/types.h>
+#include <linux/math.h>
+
+/*
+ * Tiling parameters for NV50+.
+ * GOB = Group of bytes, the main unit for tiling blocks.
+ * Tiling blocks are a power of 2 number of GOB.
+ * All GOBs and blocks have the same width: 64 bytes (so 16 pixels in 32bits).
+ * tile_mode is the log2 of the number of GOB per block.
+ */
+
+#define NV_TILE_GOB_HEIGHT_TESLA 4 /* 4 x 64 bytes = 256 bytes for a GOB on Tesla*/
+#define NV_TILE_GOB_HEIGHT 8 /* 8 x 64 bytes = 512 bytes for a GOB on Fermi and later */
+#define NV_TILE_GOB_WIDTH_BYTES 64
+
+/* Number of blocks to cover the width of the framebuffer */
+static inline u32 nouveau_get_width_in_blocks(u32 stride)
+{
+ return DIV_ROUND_UP(stride, NV_TILE_GOB_WIDTH_BYTES);
+}
+
+/* Return the height in pixel of one GOB */
+static inline u32 nouveau_get_gob_height(u16 family)
+{
+ if (family == NV_DEVICE_INFO_V0_TESLA)
+ return NV_TILE_GOB_HEIGHT_TESLA;
+ else
+ return NV_TILE_GOB_HEIGHT;
+}
+
+/* Number of blocks to cover the heigth of the framebuffer */
+static inline u32 nouveau_get_height_in_blocks(u32 height, u32 gobs_in_block, u16 family)
+{
+ return DIV_ROUND_UP(height, nouveau_get_gob_height(family) * gobs_in_block);
+}
+
+/* Return the GOB size in bytes */
+static inline u32 nouveau_get_gob_size(u16 family)
+{
+ return nouveau_get_gob_height(family) * NV_TILE_GOB_WIDTH_BYTES;
+}
+
+/* Return the number of GOB in a block */
+static inline int nouveau_get_gobs_in_block(u32 tile_mode, u16 chipset)
+{
+ if (chipset >= 0xc0)
+ return 1 << (tile_mode >> 4);
+ return 1 << tile_mode;
+}
+
+/* Return true if tile_mode is invalid */
+static inline bool nouveau_check_tile_mode(u32 tile_mode, u16 chipset)
+{
+ if (chipset >= 0xc0)
+ return (tile_mode & 0xfffff0f);
+ return (tile_mode & 0xfffffff0);
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 7a2cceaee6e9..f6be426dd525 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -30,14 +30,20 @@
#include <nvhw/class/cl507e.h>
#include <nvhw/class/clc37e.h>
+#include <linux/iosys-map.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
-#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_panic.h>
+#include <drm/ttm/ttm_bo.h>
#include "nouveau_bo.h"
#include "nouveau_gem.h"
+#include "tile.h"
static void
nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
@@ -577,6 +583,114 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
return 0;
}
+/* Only used by drm_panic get_scanout_buffer() and set_pixel(), so it is
+ * protected by the drm panic spinlock
+ */
+static u32 nv50_panic_blk_h;
+
+/* Return the framebuffer offset of the start of the block where pixel(x,y) is */
+static u32
+nv50_get_block_off(unsigned int x, unsigned int y, unsigned int pitch)
+{
+ u32 blk_x, blk_y, blk_columns;
+
+ blk_columns = nouveau_get_width_in_blocks(pitch);
+ blk_x = (x * 4) / NV_TILE_GOB_WIDTH_BYTES;
+ blk_y = y / nv50_panic_blk_h;
+
+ return ((blk_y * blk_columns) + blk_x) * NV_TILE_GOB_WIDTH_BYTES * nv50_panic_blk_h;
+}
+
+/* Turing and later have 2 level of tiles inside the block */
+static void
+nv50_set_pixel_swizzle(struct drm_scanout_buffer *sb, unsigned int x,
+ unsigned int y, u32 color)
+{
+ u32 blk_off, off, swizzle;
+
+ blk_off = nv50_get_block_off(x, y, sb->pitch[0]);
+
+ y = y % nv50_panic_blk_h;
+
+ /* Inside the block, use the fast address swizzle to compute the offset
+ * For nvidia blocklinear, bit order is yn..y3 x3 y2 x2 y1 y0 x1 x0
+ */
+ swizzle = (x & 3) | (y & 3) << 2 | (x & 4) << 2 | (y & 4) << 3;
+ swizzle |= (x & 8) << 3 | (y >> 3) << 7;
+ off = blk_off + swizzle * 4;
+
+ iosys_map_wr(&sb->map[0], off, u32, color);
+}
+
+static void
+nv50_set_pixel(struct drm_scanout_buffer *sb, unsigned int x, unsigned int y,
+ u32 color)
+{
+ u32 blk_off, off;
+
+ blk_off = nv50_get_block_off(x, y, sb->width);
+
+ x = x % (NV_TILE_GOB_WIDTH_BYTES / 4);
+ y = y % nv50_panic_blk_h;
+ off = blk_off + x * 4 + y * NV_TILE_GOB_WIDTH_BYTES;
+
+ iosys_map_wr(&sb->map[0], off, u32, color);
+}
+
+static int
+nv50_wndw_get_scanout_buffer(struct drm_plane *plane, struct drm_scanout_buffer *sb)
+{
+ struct drm_framebuffer *fb;
+ struct nouveau_bo *nvbo;
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ u16 chipset = drm->client.device.info.chipset;
+ u8 family = drm->client.device.info.family;
+ u32 tile_mode;
+ u8 kind;
+
+ if (!plane->state || !plane->state->fb)
+ return -EINVAL;
+
+ fb = plane->state->fb;
+ nvbo = nouveau_gem_object(fb->obj[0]);
+
+ /* Don't support compressed format, or multiplane yet. */
+ if (nvbo->comp || fb->format->num_planes != 1)
+ return -EOPNOTSUPP;
+
+ if (nouveau_bo_map(nvbo)) {
+ drm_warn(plane->dev, "nouveau bo map failed, panic won't be displayed\n");
+ return -ENOMEM;
+ }
+
+ if (nvbo->kmap.bo_kmap_type & TTM_BO_MAP_IOMEM_MASK)
+ iosys_map_set_vaddr_iomem(&sb->map[0], (void __iomem *)nvbo->kmap.virtual);
+ else
+ iosys_map_set_vaddr(&sb->map[0], nvbo->kmap.virtual);
+
+ sb->height = fb->height;
+ sb->width = fb->width;
+ sb->pitch[0] = fb->pitches[0];
+ sb->format = fb->format;
+
+ nouveau_framebuffer_get_layout(fb, &tile_mode, &kind);
+ if (kind) {
+ /* If tiling is enabled, use set_pixel() to display correctly.
+ * Only handle 32bits format for now.
+ */
+ if (fb->format->cpp[0] != 4)
+ return -EOPNOTSUPP;
+ nv50_panic_blk_h = nouveau_get_gob_height(family) *
+ nouveau_get_gobs_in_block(tile_mode, chipset);
+
+ if (chipset >= 0x160)
+ sb->set_pixel = nv50_set_pixel_swizzle;
+ else
+ sb->set_pixel = nv50_set_pixel;
+ }
+ return 0;
+}
+
static const struct drm_plane_helper_funcs
nv50_wndw_helper = {
.prepare_fb = nv50_wndw_prepare_fb,
@@ -584,6 +698,14 @@ nv50_wndw_helper = {
.atomic_check = nv50_wndw_atomic_check,
};
+static const struct drm_plane_helper_funcs
+nv50_wndw_primary_helper = {
+ .prepare_fb = nv50_wndw_prepare_fb,
+ .cleanup_fb = nv50_wndw_cleanup_fb,
+ .atomic_check = nv50_wndw_atomic_check,
+ .get_scanout_buffer = nv50_wndw_get_scanout_buffer,
+};
+
static void
nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
@@ -732,7 +854,10 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
return ret;
}
- drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_helper_add(&wndw->plane, &nv50_wndw_primary_helper);
+ else
+ drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
if (wndw->func->ilut) {
ret = nv50_lut_init(disp, mmu, &wndw->ilut);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/log.h b/drivers/gpu/drm/nouveau/include/nvif/log.h
new file mode 100644
index 000000000000..64f6f8fc6141
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/log.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/* SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __NVIF_LOG_H__
+#define __NVIF_LOG_H__
+
+#ifdef CONFIG_DEBUG_FS
+
+/**
+ * nvif_log - structure for tracking logging buffers
+ * @entry: an entry in a list of struct nvif_logs
+ * @shutdown: pointer to function to call to clean up
+ *
+ * Structure used to track logging buffers so that they can be cleaned up
+ * when the module exits.
+ *
+ * The @shutdown function is called when the module exits. It should free all
+ * backing resources, such as logging buffers.
+ */
+struct nvif_log {
+ struct list_head entry;
+ void (*shutdown)(struct nvif_log *log);
+};
+
+/**
+ * nvif_logs - linked list of nvif_log objects
+ */
+struct nvif_logs {
+ struct list_head head;
+};
+
+#define NVIF_LOGS_DECLARE(logs) \
+ struct nvif_logs logs = { LIST_HEAD_INIT(logs.head) }
+
+static inline void nvif_log_shutdown(struct nvif_logs *logs)
+{
+ if (!list_empty(&logs->head)) {
+ struct nvif_log *log, *n;
+
+ list_for_each_entry_safe(log, n, &logs->head, entry) {
+ /* shutdown() should also delete the log entry */
+ log->shutdown(log);
+ }
+ }
+}
+
+extern struct nvif_logs gsp_logs;
+
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 9e6f39912368..5c5f4607fcc9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -5,10 +5,13 @@
#include <core/falcon.h>
#include <core/firmware.h>
+#include <linux/debugfs.h>
+
#define GSP_PAGE_SHIFT 12
#define GSP_PAGE_SIZE BIT(GSP_PAGE_SHIFT)
struct nvkm_gsp_mem {
+ struct device *dev;
size_t size;
void *data;
dma_addr_t addr;
@@ -210,7 +213,7 @@ struct nvkm_gsp {
} *rm;
struct {
- struct mutex mutex;;
+ struct mutex mutex;
struct idr idr;
} client_id;
@@ -219,6 +222,24 @@ struct nvkm_gsp {
/* The size of the registry RPC */
size_t registry_rpc_size;
+
+#ifdef CONFIG_DEBUG_FS
+ /*
+ * Logging buffers in debugfs. The wrapper objects need to remain
+ * in memory until the dentry is deleted.
+ */
+ struct {
+ struct dentry *parent;
+ struct dentry *init;
+ struct dentry *rm;
+ struct dentry *intr;
+ struct dentry *pmu;
+ } debugfs;
+ struct debugfs_blob_wrapper blob_init;
+ struct debugfs_blob_wrapper blob_intr;
+ struct debugfs_blob_wrapper blob_rm;
+ struct debugfs_blob_wrapper blob_pmu;
+#endif
};
static inline bool
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 8f0c69aad248..21b56cc7605c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -384,7 +384,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
if (ret < 0)
return NULL;
- return kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
+ return edid;
}
bool nouveau_acpi_video_backlight_use_native(void)
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 2cb2e5675807..cd659b9fd1d9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -279,7 +279,6 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
const u64 plength = 0x10000;
const u64 ioffset = plength;
const u64 ilength = 0x02000;
- char name[TASK_COMM_LEN];
int cid, ret;
u64 size;
@@ -338,8 +337,7 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
chan->userd = &chan->user;
}
- get_task_comm(name, current);
- snprintf(args.name, sizeof(args.name), "%s[%d]", name, task_pid_nr(current));
+ snprintf(args.name, sizeof(args.name), "%s[%d]", current->comm, task_pid_nr(current));
ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0, hosts[cid].oclass,
&args, sizeof(args), &chan->user);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index b06aa473102b..8d5c9c74cbb9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -477,14 +477,14 @@ nouveau_connector_of_detect(struct drm_connector *connector)
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder;
struct pci_dev *pdev = to_pci_dev(dev->dev);
- struct device_node *cn, *dn = pci_device_to_OF_node(pdev);
+ struct device_node *dn = pci_device_to_OF_node(pdev);
if (!dn ||
!((nv_encoder = find_encoder(connector, DCB_OUTPUT_TMDS)) ||
(nv_encoder = find_encoder(connector, DCB_OUTPUT_ANALOG))))
return NULL;
- for_each_child_of_node(dn, cn) {
+ for_each_child_of_node_scoped(dn, cn) {
const char *name = of_get_property(cn, "name", NULL);
const void *edid = of_get_property(cn, "EDID", NULL);
int idx = name ? name[strlen(name) - 1] - 'A' : 0;
@@ -492,7 +492,6 @@ nouveau_connector_of_detect(struct drm_connector *connector)
if (nv_encoder->dcb->i2c_index == idx && edid) {
nv_connector->edid =
kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
- of_node_put(cn);
return nv_encoder;
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index e83db051e851..200e65a7cefc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -313,3 +313,19 @@ nouveau_debugfs_fini(struct nouveau_drm *drm)
kfree(drm->debugfs);
drm->debugfs = NULL;
}
+
+int
+nouveau_module_debugfs_init(void)
+{
+ nouveau_debugfs_root = debugfs_create_dir("nouveau", NULL);
+ if (IS_ERR(nouveau_debugfs_root))
+ return PTR_ERR(nouveau_debugfs_root);
+
+ return 0;
+}
+
+void
+nouveau_module_debugfs_fini(void)
+{
+ debugfs_remove(nouveau_debugfs_root);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
index 77f0323b38ba..b7617b344ee2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -21,6 +21,11 @@ nouveau_debugfs(struct drm_device *dev)
extern void nouveau_drm_debugfs_init(struct drm_minor *);
extern int nouveau_debugfs_init(struct nouveau_drm *);
extern void nouveau_debugfs_fini(struct nouveau_drm *);
+
+extern struct dentry *nouveau_debugfs_root;
+
+int nouveau_module_debugfs_init(void);
+void nouveau_module_debugfs_fini(void);
#else
static inline void
nouveau_drm_debugfs_init(struct drm_minor *minor)
@@ -37,6 +42,17 @@ nouveau_debugfs_fini(struct nouveau_drm *drm)
{
}
+static inline int
+nouveau_module_debugfs_init(void)
+{
+ return 0;
+}
+
+static inline void
+nouveau_module_debugfs_fini(void)
+{
+}
+
#endif
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index e2fd561cd23f..add006fc8d81 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -28,8 +28,8 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_client_event.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
@@ -44,6 +44,7 @@
#include <nvif/if0011.h>
#include <nvif/if0013.h>
#include <dispnv50/crc.h>
+#include <dispnv50/tile.h>
int
nouveau_display_vblank_enable(struct drm_crtc *crtc)
@@ -220,69 +221,29 @@ nouveau_validate_decode_mod(struct nouveau_drm *drm,
return 0;
}
-static inline uint32_t
-nouveau_get_width_in_blocks(uint32_t stride)
-{
- /* GOBs per block in the x direction is always one, and GOBs are
- * 64 bytes wide
- */
- static const uint32_t log_block_width = 6;
-
- return (stride + (1 << log_block_width) - 1) >> log_block_width;
-}
-
-static inline uint32_t
-nouveau_get_height_in_blocks(struct nouveau_drm *drm,
- uint32_t height,
- uint32_t log_block_height_in_gobs)
-{
- uint32_t log_gob_height;
- uint32_t log_block_height;
-
- BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
-
- if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
- log_gob_height = 2;
- else
- log_gob_height = 3;
-
- log_block_height = log_block_height_in_gobs + log_gob_height;
-
- return (height + (1 << log_block_height) - 1) >> log_block_height;
-}
-
static int
nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
uint32_t offset, uint32_t stride, uint32_t h,
uint32_t tile_mode)
{
- uint32_t gob_size, bw, bh;
+ uint32_t gob_size, bw, bh, gobs_in_block;
uint64_t bl_size;
BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
- if (drm->client.device.info.chipset >= 0xc0) {
- if (tile_mode & 0xF)
- return -EINVAL;
- tile_mode >>= 4;
- }
-
- if (tile_mode & 0xFFFFFFF0)
+ if (nouveau_check_tile_mode(tile_mode, drm->client.device.info.chipset))
return -EINVAL;
- if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
- gob_size = 256;
- else
- gob_size = 512;
-
+ gobs_in_block = nouveau_get_gobs_in_block(tile_mode, drm->client.device.info.chipset);
bw = nouveau_get_width_in_blocks(stride);
- bh = nouveau_get_height_in_blocks(drm, h, tile_mode);
+ bh = nouveau_get_height_in_blocks(h, gobs_in_block, drm->client.device.info.family);
+ gob_size = nouveau_get_gob_size(drm->client.device.info.family);
- bl_size = bw * bh * (1 << tile_mode) * gob_size;
+ bl_size = bw * bh * gobs_in_block * gob_size;
- DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n",
- offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
- nvbo->bo.base.size);
+ DRM_DEBUG_KMS("offset=%u stride=%u h=%u gobs_in_block=%u bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n",
+ offset, stride, h, gobs_in_block, bw, bh, gob_size,
+ bl_size, nvbo->bo.base.size);
if (bl_size + offset > nvbo->bo.base.size)
return -ERANGE;
@@ -804,8 +765,7 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
{
struct nouveau_display *disp = nouveau_display(dev);
- /* Disable console. */
- drm_fb_helper_set_suspend_unlocked(dev->fb_helper, true);
+ drm_client_dev_suspend(dev, false);
if (drm_drv_uses_atomic_modeset(dev)) {
if (!runtime) {
@@ -836,8 +796,7 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
}
}
- /* Enable console. */
- drm_fb_helper_set_suspend_unlocked(dev->fb_helper, false);
+ drm_client_dev_resume(dev, false);
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 1f2d649f4b96..1a072568cef6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -193,7 +193,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
if (!spage || !(src & MIGRATE_PFN_MIGRATE))
goto done;
- dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
+ dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma, vmf->address);
if (!dpage)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index f6e78dba594f..5664c4c71faf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs
*/
+#include <linux/aperture.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -29,8 +30,9 @@
#include <linux/vga_switcheroo.h>
#include <linux/mmu_notifier.h>
#include <linux/dynamic_debug.h>
+#include <linux/debugfs.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem_ttm_helper.h>
@@ -46,6 +48,7 @@
#include <nvif/fifo.h>
#include <nvif/push006c.h>
#include <nvif/user.h>
+#include <nvif/log.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
@@ -112,6 +115,20 @@ static struct drm_driver driver_stub;
static struct drm_driver driver_pci;
static struct drm_driver driver_platform;
+#ifdef CONFIG_DEBUG_FS
+struct dentry *nouveau_debugfs_root;
+
+/**
+ * gsp_logs - list of nvif_log GSP-RM logging buffers
+ *
+ * Head pointer to a a list of nvif_log buffers that is created for each GPU
+ * upon GSP shutdown if the "keep_gsp_logging" command-line parameter is
+ * specified. This is used to track the alternative debugfs entries for the
+ * GSP-RM logs.
+ */
+NVIF_LOGS_DECLARE(gsp_logs);
+#endif
+
static u64
nouveau_pci_name(struct pci_dev *pdev)
{
@@ -331,7 +348,7 @@ nouveau_accel_ce_init(struct nouveau_drm *drm)
return;
}
- ret = nouveau_channel_new(&drm->client, false, runm, NvDmaFB, NvDmaTT, &drm->cechan);
+ ret = nouveau_channel_new(&drm->client, true, runm, NvDmaFB, NvDmaTT, &drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
}
@@ -836,6 +853,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
{
struct nvkm_device *device;
struct nouveau_drm *drm;
+ const struct drm_format_info *format;
int ret;
if (vga_switcheroo_client_probe_defer(pdev))
@@ -849,7 +867,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
return ret;
/* Remove conflicting drivers (vesafb, efifb etc). */
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver_pci);
+ ret = aperture_remove_conflicting_pci_devices(pdev, driver_pci.name);
if (ret)
return ret;
@@ -873,9 +891,11 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
goto fail_pci;
if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
- drm_fbdev_ttm_setup(drm->dev, 8);
+ format = drm_format_info(DRM_FORMAT_C8);
else
- drm_fbdev_ttm_setup(drm->dev, 32);
+ format = NULL;
+
+ drm_client_setup(drm->dev, format);
quirk_broken_nv_runpm(pdev);
return 0;
@@ -1155,7 +1175,7 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
- char name[32], tmpname[TASK_COMM_LEN];
+ char name[32];
int ret;
/* need to bring up power immediately if opening device */
@@ -1165,10 +1185,9 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
return ret;
}
- get_task_comm(tmpname, current);
rcu_read_lock();
snprintf(name, sizeof(name), "%s[%d]",
- tmpname, pid_nr(rcu_dereference(fpriv->pid)));
+ current->comm, pid_nr(rcu_dereference(fpriv->pid)));
rcu_read_unlock();
if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) {
@@ -1318,13 +1337,10 @@ driver_stub = {
.dumb_create = nouveau_display_dumb_create,
.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
+ DRM_FBDEV_TTM_DRIVER_OPS,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
-#ifdef GIT_REVISION
- .date = GIT_REVISION,
-#else
- .date = DRIVER_DATE,
-#endif
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -1417,6 +1433,8 @@ err_free:
static int __init
nouveau_drm_init(void)
{
+ int ret;
+
driver_pci = driver_stub;
driver_platform = driver_stub;
@@ -1430,6 +1448,10 @@ nouveau_drm_init(void)
if (!nouveau_modeset)
return 0;
+ ret = nouveau_module_debugfs_init();
+ if (ret)
+ return ret;
+
#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
platform_driver_register(&nouveau_platform_driver);
#endif
@@ -1438,10 +1460,14 @@ nouveau_drm_init(void)
nouveau_backlight_ctor();
#ifdef CONFIG_PCI
- return pci_register_driver(&nouveau_drm_pci_driver);
-#else
- return 0;
+ ret = pci_register_driver(&nouveau_drm_pci_driver);
+ if (ret) {
+ nouveau_module_debugfs_fini();
+ return ret;
+ }
#endif
+
+ return 0;
}
static void __exit
@@ -1461,6 +1487,12 @@ nouveau_drm_exit(void)
#endif
if (IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM))
mmu_notifier_synchronize();
+
+#ifdef CONFIG_DEBUG_FS
+ nvif_log_shutdown(&gsp_logs);
+#endif
+
+ nouveau_module_debugfs_fini();
}
module_init(nouveau_drm_init);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 685d6ca3d8aa..55abc510067b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -7,7 +7,6 @@
#define DRIVER_NAME "nouveau"
#define DRIVER_DESC "nVidia Riva/TNT/GeForce/Quadro/Tesla/Tegra K1+"
-#define DRIVER_DATE "20120801"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 4
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 09686d038d60..7cc84472cece 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -387,11 +387,13 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
if (f) {
struct nouveau_channel *prev;
bool must_wait = true;
+ bool local;
rcu_read_lock();
prev = rcu_dereference(f->channel);
- if (prev && (prev == chan ||
- fctx->sync(f, prev, chan) == 0))
+ local = prev && prev->cli->drm == chan->cli->drm;
+ if (local && (prev == chan ||
+ fctx->sync(f, prev, chan) == 0))
must_wait = false;
rcu_read_unlock();
if (!must_wait)
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 829fdc6e4031..a5ce8eb4a3be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -86,5 +86,5 @@ struct platform_driver nouveau_platform_driver = {
.of_match_table = of_match_ptr(nouveau_platform_match),
},
.probe = nouveau_platform_probe,
- .remove_new = nouveau_platform_remove,
+ .remove = nouveau_platform_remove,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index eb6c3f9a01f5..4412f2711fb5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -379,7 +379,7 @@ nouveau_sched_timedout_job(struct drm_sched_job *sched_job)
else
NV_PRINTK(warn, job->cli, "Generic job timeout.\n");
- drm_sched_start(sched);
+ drm_sched_start(sched, 0);
return stat;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index ab4e11dc0b8a..a6c375a24154 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -2,7 +2,7 @@
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_client_event.h>
#include "nouveau_drv.h"
#include "nouveau_acpi.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index d1c294f00665..78a83f904bbd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -120,8 +120,8 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
mutex_init(&tdev->iommu.mutex);
if (device_iommu_mapped(dev)) {
- tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
- if (!tdev->iommu.domain)
+ tdev->iommu.domain = iommu_paging_domain_alloc(dev);
+ if (IS_ERR(tdev->iommu.domain))
goto error;
/*
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
index 841e3b69fcaf..5a0c9b8a79f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
@@ -31,6 +31,7 @@ mcp77_sor = {
.state = g94_sor_state,
.power = nv50_sor_power,
.clock = nv50_sor_clock,
+ .bl = &nv50_sor_bl,
.hdmi = &g84_sor_hdmi,
.dp = &g94_sor_dp,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
index 027867c2a8c5..99110ab2f44d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
@@ -992,7 +992,7 @@ r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8
ctrl->data = data;
ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
- if (ret == -EAGAIN && ctrl->retryTimeMs) {
+ if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) {
/*
* Device (likely an eDP panel) isn't ready yet, wait for the time specified
* by GSP before retrying again
@@ -1060,33 +1060,44 @@ r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *ctrl;
u8 size = *psize;
int ret;
+ int retries;
- ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ for (retries = 0; retries < 3; ++retries) {
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
- ctrl->subDeviceInstance = 0;
- ctrl->displayId = BIT(outp->index);
- ctrl->bAddrOnly = !size;
- ctrl->cmd = type;
- if (ctrl->bAddrOnly) {
- ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE);
- ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, I2C_MOT, FALSE);
- }
- ctrl->addr = addr;
- ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0;
- memcpy(ctrl->data, data, size);
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(outp->index);
+ ctrl->bAddrOnly = !size;
+ ctrl->cmd = type;
+ if (ctrl->bAddrOnly) {
+ ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE);
+ ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, I2C_MOT, FALSE);
+ }
+ ctrl->addr = addr;
+ ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0;
+ memcpy(ctrl->data, data, size);
- ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
- if (ret) {
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
- return ret;
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if ((ret == -EAGAIN || ret == -EBUSY) && ctrl->retryTimeMs) {
+ /*
+ * Device (likely an eDP panel) isn't ready yet, wait for the time specified
+ * by GSP before retrying again
+ */
+ nvkm_debug(&disp->engine.subdev,
+ "Waiting %dms for GSP LT panel delay before retrying in AUX\n",
+ ctrl->retryTimeMs);
+ msleep(ctrl->retryTimeMs);
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ } else {
+ memcpy(data, ctrl->data, size);
+ *psize = ctrl->size;
+ ret = ctrl->replyType;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ break;
+ }
}
-
- memcpy(data, ctrl->data, size);
- *psize = ctrl->size;
- ret = ctrl->replyType;
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 060c74a80eb1..3ea447f6a45b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -443,6 +443,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
ret = gf100_grctx_generate(gr, chan, fifoch->inst);
if (ret) {
nvkm_error(&base->engine.subdev, "failed to construct context\n");
+ mutex_unlock(&gr->fecs.mutex);
return ret;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
index a1c8545f1249..cac6d64ab67d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
@@ -89,11 +89,6 @@ nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user,
nvkm_falcon_fw_dtor_sigs(fw);
}
- /* after last write to the img, sync dma mappings */
- dma_sync_single_for_device(fw->fw.device->dev,
- fw->fw.phys,
- sg_dma_len(&fw->fw.mem.sgl),
- DMA_TO_DEVICE);
FLCNFW_DBG(fw, "resetting");
fw->func->reset(fw);
@@ -105,6 +100,12 @@ nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user,
goto done;
}
+ /* after last write to the img, sync dma mappings */
+ dma_sync_single_for_device(fw->fw.device->dev,
+ fw->fw.phys,
+ sg_dma_len(&fw->fw.mem.sgl),
+ DMA_TO_DEVICE);
+
ret = fw->func->load(fw);
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index cf58f9da9139..58502102926b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -26,6 +26,7 @@
#include <subdev/vfn.h>
#include <engine/fifo/chan.h>
#include <engine/sec2.h>
+#include <nvif/log.h>
#include <nvfw/fw.h>
@@ -57,6 +58,8 @@
#include <linux/ctype.h>
#include <linux/parser.h>
+extern struct dentry *nouveau_debugfs_root;
+
#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
#define GSP_MSG_MAX_SIZE GSP_PAGE_MIN_SIZE * 16
@@ -78,7 +81,7 @@ r535_rpc_status_to_errno(uint32_t rpc_status)
switch (rpc_status) {
case 0x55: /* NV_ERR_NOT_READY */
case 0x66: /* NV_ERR_TIMEOUT_RETRY */
- return -EAGAIN;
+ return -EBUSY;
case 0x51: /* NV_ERR_NO_MEMORY */
return -ENOMEM;
default:
@@ -121,6 +124,8 @@ r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
return mqe->data;
}
+ size = ALIGN(repc + GSP_MSG_HDR_SIZE, GSP_PAGE_SIZE);
+
msg = kvmalloc(repc, GFP_KERNEL);
if (!msg)
return ERR_PTR(-ENOMEM);
@@ -129,19 +134,15 @@ r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
len = min_t(u32, repc, len);
memcpy(msg, mqe->data, len);
- rptr += DIV_ROUND_UP(len, GSP_PAGE_SIZE);
- if (rptr == gsp->msgq.cnt)
- rptr = 0;
-
repc -= len;
if (repc) {
mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
memcpy(msg + len, mqe, repc);
-
- rptr += DIV_ROUND_UP(repc, GSP_PAGE_SIZE);
}
+ rptr = (rptr + DIV_ROUND_UP(size, GSP_PAGE_SIZE)) % gsp->msgq.cnt;
+
mb();
(*gsp->msgq.rptr) = rptr;
return msg;
@@ -163,7 +164,7 @@ r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
u64 *end;
u64 csum = 0;
int free, time = 1000000;
- u32 wptr, size;
+ u32 wptr, size, step;
u32 off = 0;
argc = ALIGN(GSP_MSG_HDR_SIZE + argc, GSP_PAGE_SIZE);
@@ -197,7 +198,9 @@ r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
}
cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
- size = min_t(u32, argc, (gsp->cmdq.cnt - wptr) * GSP_PAGE_SIZE);
+ step = min_t(u32, free, (gsp->cmdq.cnt - wptr));
+ size = min_t(u32, argc, step * GSP_PAGE_SIZE);
+
memcpy(cqe, (u8 *)cmd + off, size);
wptr += DIV_ROUND_UP(size, 0x1000);
@@ -601,7 +604,7 @@ r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
if (rpc->status) {
ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
- if (PTR_ERR(ret) != -EAGAIN)
+ if (PTR_ERR(ret) != -EAGAIN && PTR_ERR(ret) != -EBUSY)
nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
} else {
ret = repc ? rpc->params : NULL;
@@ -660,7 +663,7 @@ r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc)
if (rpc->status) {
ret = r535_rpc_status_to_errno(rpc->status);
- if (ret != -EAGAIN)
+ if (ret != -EAGAIN && ret != -EBUSY)
nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
object->client->object.handle, object->handle, rpc->cmd, rpc->status);
}
@@ -1000,7 +1003,7 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
}
static void
-nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
+nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem)
{
if (mem->data) {
/*
@@ -1009,19 +1012,35 @@ nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
*/
memset(mem->data, 0xFF, mem->size);
- dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr);
+ dma_free_coherent(mem->dev, mem->size, mem->data, mem->addr);
+ put_device(mem->dev);
+
memset(mem, 0, sizeof(*mem));
}
}
+/**
+ * nvkm_gsp_mem_ctor - constructor for nvkm_gsp_mem objects
+ * @gsp: gsp pointer
+ * @size: number of bytes to allocate
+ * @mem: nvkm_gsp_mem object to initialize
+ *
+ * Allocates a block of memory for use with GSP.
+ *
+ * This memory block can potentially out-live the driver's remove() callback,
+ * so we take a device reference to ensure its lifetime. The reference is
+ * dropped in the destructor.
+ */
static int
nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem)
{
- mem->size = size;
mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
if (WARN_ON(!mem->data))
return -ENOMEM;
+ mem->size = size;
+ mem->dev = get_device(gsp->subdev.device->dev);
+
return 0;
}
@@ -1054,8 +1073,8 @@ r535_gsp_postinit(struct nvkm_gsp *gsp)
nvkm_wr32(device, 0x110004, 0x00000040);
/* Release the DMA buffers that were needed only for boot and init */
- nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw);
- nvkm_gsp_mem_dtor(gsp, &gsp->libos);
+ nvkm_gsp_mem_dtor(&gsp->boot.fw);
+ nvkm_gsp_mem_dtor(&gsp->libos);
return ret;
}
@@ -2060,6 +2079,215 @@ r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
return 0;
}
+#ifdef CONFIG_DEBUG_FS
+
+/*
+ * If GSP-RM load fails, then the GSP nvkm object will be deleted, the logging
+ * debugfs entries will be deleted, and it will not be possible to debug the
+ * load failure. The keep_gsp_logging parameter tells Nouveau to copy the
+ * logging buffers to new debugfs entries, and these entries are retained
+ * until the driver unloads.
+ */
+static bool keep_gsp_logging;
+module_param(keep_gsp_logging, bool, 0444);
+MODULE_PARM_DESC(keep_gsp_logging,
+ "Migrate the GSP-RM logging debugfs entries upon exit");
+
+/*
+ * GSP-RM uses a pseudo-class mechanism to define of a variety of per-"engine"
+ * data structures, and each engine has a "class ID" genererated by a
+ * pre-processor. This is the class ID for the PMU.
+ */
+#define NV_GSP_MSG_EVENT_UCODE_LIBOS_CLASS_PMU 0xf3d722
+
+/**
+ * rpc_ucode_libos_print_v1E_08 - RPC payload for libos print buffers
+ * @ucode_eng_desc: the engine descriptor
+ * @libos_print_buf_size: the size of the libos_print_buf[]
+ * @libos_print_buf: the actual buffer
+ *
+ * The engine descriptor is divided into 31:8 "class ID" and 7:0 "instance
+ * ID". We only care about messages from PMU.
+ */
+struct rpc_ucode_libos_print_v1e_08 {
+ u32 ucode_eng_desc;
+ u32 libos_print_buf_size;
+ u8 libos_print_buf[];
+};
+
+/**
+ * r535_gsp_msg_libos_print - capture log message from the PMU
+ * @priv: gsp pointer
+ * @fn: function number (ignored)
+ * @repv: pointer to libos print RPC
+ * @repc: message size
+ *
+ * Called when we receive a UCODE_LIBOS_PRINT event RPC from GSP-RM. This RPC
+ * contains the contents of the libos print buffer from PMU. It is typically
+ * only written to when PMU encounters an error.
+ *
+ * Technically this RPC can be used to pass print buffers from any number of
+ * GSP-RM engines, but we only expect to receive them for the PMU.
+ *
+ * For the PMU, the buffer is 4K in size and the RPC always contains the full
+ * contents.
+ */
+static int
+r535_gsp_msg_libos_print(void *priv, u32 fn, void *repv, u32 repc)
+{
+ struct nvkm_gsp *gsp = priv;
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ struct rpc_ucode_libos_print_v1e_08 *rpc = repv;
+ unsigned int class = rpc->ucode_eng_desc >> 8;
+
+ nvkm_debug(subdev, "received libos print from class 0x%x for %u bytes\n",
+ class, rpc->libos_print_buf_size);
+
+ if (class != NV_GSP_MSG_EVENT_UCODE_LIBOS_CLASS_PMU) {
+ nvkm_warn(subdev,
+ "received libos print from unknown class 0x%x\n",
+ class);
+ return -ENOMSG;
+ }
+
+ if (rpc->libos_print_buf_size > GSP_PAGE_SIZE) {
+ nvkm_error(subdev, "libos print is too large (%u bytes)\n",
+ rpc->libos_print_buf_size);
+ return -E2BIG;
+ }
+
+ memcpy(gsp->blob_pmu.data, rpc->libos_print_buf, rpc->libos_print_buf_size);
+
+ return 0;
+}
+
+/**
+ * create_debufgs - create a blob debugfs entry
+ * @gsp: gsp pointer
+ * @name: name of this dentry
+ * @blob: blob wrapper
+ *
+ * Creates a debugfs entry for a logging buffer with the name 'name'.
+ */
+static struct dentry *create_debugfs(struct nvkm_gsp *gsp, const char *name,
+ struct debugfs_blob_wrapper *blob)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_blob(name, 0444, gsp->debugfs.parent, blob);
+ if (IS_ERR(dent)) {
+ nvkm_error(&gsp->subdev,
+ "failed to create %s debugfs entry\n", name);
+ return NULL;
+ }
+
+ /*
+ * For some reason, debugfs_create_blob doesn't set the size of the
+ * dentry, so do that here. See [1]
+ *
+ * [1] https://lore.kernel.org/r/linux-fsdevel/20240207200619.3354549-1-ttabi@nvidia.com/
+ */
+ i_size_write(d_inode(dent), blob->size);
+
+ return dent;
+}
+
+/**
+ * r535_gsp_libos_debugfs_init - create logging debugfs entries
+ * @gsp: gsp pointer
+ *
+ * Create the debugfs entries. This exposes the log buffers to userspace so
+ * that an external tool can parse it.
+ *
+ * The 'logpmu' contains exception dumps from the PMU. It is written via an
+ * RPC sent from GSP-RM and must be only 4KB. We create it here because it's
+ * only useful if there is a debugfs entry to expose it. If we get the PMU
+ * logging RPC and there is no debugfs entry, the RPC is just ignored.
+ *
+ * The blob_init, blob_rm, and blob_pmu objects can't be transient
+ * because debugfs_create_blob doesn't copy them.
+ *
+ * NOTE: OpenRM loads the logging elf image and prints the log messages
+ * in real-time. We may add that capability in the future, but that
+ * requires loading ELF images that are not distributed with the driver and
+ * adding the parsing code to Nouveau.
+ *
+ * Ideally, this should be part of nouveau_debugfs_init(), but that function
+ * is called too late. We really want to create these debugfs entries before
+ * r535_gsp_booter_load() is called, so that if GSP-RM fails to initialize,
+ * there could still be a log to capture.
+ */
+static void
+r535_gsp_libos_debugfs_init(struct nvkm_gsp *gsp)
+{
+ struct device *dev = gsp->subdev.device->dev;
+
+ /* Create a new debugfs directory with a name unique to this GPU. */
+ gsp->debugfs.parent = debugfs_create_dir(dev_name(dev), nouveau_debugfs_root);
+ if (IS_ERR(gsp->debugfs.parent)) {
+ nvkm_error(&gsp->subdev,
+ "failed to create %s debugfs root\n", dev_name(dev));
+ return;
+ }
+
+ gsp->blob_init.data = gsp->loginit.data;
+ gsp->blob_init.size = gsp->loginit.size;
+ gsp->blob_intr.data = gsp->logintr.data;
+ gsp->blob_intr.size = gsp->logintr.size;
+ gsp->blob_rm.data = gsp->logrm.data;
+ gsp->blob_rm.size = gsp->logrm.size;
+
+ gsp->debugfs.init = create_debugfs(gsp, "loginit", &gsp->blob_init);
+ if (!gsp->debugfs.init)
+ goto error;
+
+ gsp->debugfs.intr = create_debugfs(gsp, "logintr", &gsp->blob_intr);
+ if (!gsp->debugfs.intr)
+ goto error;
+
+ gsp->debugfs.rm = create_debugfs(gsp, "logrm", &gsp->blob_rm);
+ if (!gsp->debugfs.rm)
+ goto error;
+
+ /*
+ * Since the PMU buffer is copied from an RPC, it doesn't need to be
+ * a DMA buffer.
+ */
+ gsp->blob_pmu.size = GSP_PAGE_SIZE;
+ gsp->blob_pmu.data = kzalloc(gsp->blob_pmu.size, GFP_KERNEL);
+ if (!gsp->blob_pmu.data)
+ goto error;
+
+ gsp->debugfs.pmu = create_debugfs(gsp, "logpmu", &gsp->blob_pmu);
+ if (!gsp->debugfs.pmu) {
+ kfree(gsp->blob_pmu.data);
+ goto error;
+ }
+
+ i_size_write(d_inode(gsp->debugfs.init), gsp->blob_init.size);
+ i_size_write(d_inode(gsp->debugfs.intr), gsp->blob_intr.size);
+ i_size_write(d_inode(gsp->debugfs.rm), gsp->blob_rm.size);
+ i_size_write(d_inode(gsp->debugfs.pmu), gsp->blob_pmu.size);
+
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT,
+ r535_gsp_msg_libos_print, gsp);
+
+ nvkm_debug(&gsp->subdev, "created debugfs GSP-RM logging entries\n");
+
+ if (keep_gsp_logging) {
+ nvkm_info(&gsp->subdev,
+ "logging buffers will be retained on failure\n");
+ }
+
+ return;
+
+error:
+ debugfs_remove(gsp->debugfs.parent);
+ gsp->debugfs.parent = NULL;
+}
+
+#endif
+
static inline u64
r535_gsp_libos_id8(const char *name)
{
@@ -2110,7 +2338,11 @@ static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
* written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE.
*
* The physical address map for the log buffer is stored in the buffer
- * itself, starting with offset 1. Offset 0 contains the "put" pointer.
+ * itself, starting with offset 1. Offset 0 contains the "put" pointer (pp).
+ * Initially, pp is equal to 0. If the buffer has valid logging data in it,
+ * then pp points to index into the buffer where the next logging entry will
+ * be written. Therefore, the logging data is valid if:
+ * 1 <= pp < sizeof(buffer)/sizeof(u64)
*
* The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is
* configured for a larger page size (e.g. 64K pages), we need to give
@@ -2181,6 +2413,11 @@ r535_gsp_libos_init(struct nvkm_gsp *gsp)
args[3].size = gsp->rmargs.size;
args[3].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
args[3].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+
+#ifdef CONFIG_DEBUG_FS
+ r535_gsp_libos_debugfs_init(gsp);
+#endif
+
return 0;
}
@@ -2234,8 +2471,8 @@ static void
nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
{
nvkm_gsp_sg_free(gsp->subdev.device, &rx3->lvl2);
- nvkm_gsp_mem_dtor(gsp, &rx3->lvl1);
- nvkm_gsp_mem_dtor(gsp, &rx3->lvl0);
+ nvkm_gsp_mem_dtor(&rx3->lvl1);
+ nvkm_gsp_mem_dtor(&rx3->lvl0);
}
/**
@@ -2323,9 +2560,9 @@ nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size,
if (ret) {
lvl2_fail:
- nvkm_gsp_mem_dtor(gsp, &rx3->lvl1);
+ nvkm_gsp_mem_dtor(&rx3->lvl1);
lvl1_fail:
- nvkm_gsp_mem_dtor(gsp, &rx3->lvl0);
+ nvkm_gsp_mem_dtor(&rx3->lvl0);
}
return ret;
@@ -2417,7 +2654,7 @@ r535_gsp_init(struct nvkm_gsp *gsp)
done:
if (gsp->sr.meta.data) {
- nvkm_gsp_mem_dtor(gsp, &gsp->sr.meta);
+ nvkm_gsp_mem_dtor(&gsp->sr.meta);
nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
return ret;
@@ -2491,6 +2728,222 @@ r535_gsp_dtor_fws(struct nvkm_gsp *gsp)
gsp->fws.rm = NULL;
}
+#ifdef CONFIG_DEBUG_FS
+
+struct r535_gsp_log {
+ struct nvif_log log;
+
+ /*
+ * Logging buffers in debugfs. The wrapper objects need to remain
+ * in memory until the dentry is deleted.
+ */
+ struct dentry *debugfs_logging_dir;
+ struct debugfs_blob_wrapper blob_init;
+ struct debugfs_blob_wrapper blob_intr;
+ struct debugfs_blob_wrapper blob_rm;
+ struct debugfs_blob_wrapper blob_pmu;
+};
+
+/**
+ * r535_debugfs_shutdown - delete GSP-RM logging buffers for one GPU
+ * @_log: nvif_log struct for this GPU
+ *
+ * Called when the driver is shutting down, to clean up the retained GSP-RM
+ * logging buffers.
+ */
+static void r535_debugfs_shutdown(struct nvif_log *_log)
+{
+ struct r535_gsp_log *log = container_of(_log, struct r535_gsp_log, log);
+
+ debugfs_remove(log->debugfs_logging_dir);
+
+ kfree(log->blob_init.data);
+ kfree(log->blob_intr.data);
+ kfree(log->blob_rm.data);
+ kfree(log->blob_pmu.data);
+
+ /* We also need to delete the list object */
+ kfree(log);
+}
+
+/**
+ * is_empty - return true if the logging buffer was never written to
+ * @b: blob wrapper with ->data field pointing to logging buffer
+ *
+ * The first 64-bit field of loginit, and logintr, and logrm is the 'put'
+ * pointer, and it is initialized to 0. It's a dword-based index into the
+ * circular buffer, indicating where the next printf write will be made.
+ *
+ * If the pointer is still 0 when GSP-RM is shut down, that means that the
+ * buffer was never written to, so it can be ignored.
+ *
+ * This test also works for logpmu, even though it doesn't have a put pointer.
+ */
+static bool is_empty(const struct debugfs_blob_wrapper *b)
+{
+ u64 *put = b->data;
+
+ return put ? (*put == 0) : true;
+}
+
+/**
+ * r535_gsp_copy_log - preserve the logging buffers in a blob
+ *
+ * When GSP shuts down, the nvkm_gsp object and all its memory is deleted.
+ * To preserve the logging buffers, the buffers need to be copied, but only
+ * if they actually have data.
+ */
+static int r535_gsp_copy_log(struct dentry *parent,
+ const char *name,
+ const struct debugfs_blob_wrapper *s,
+ struct debugfs_blob_wrapper *t)
+{
+ struct dentry *dent;
+ void *p;
+
+ if (is_empty(s))
+ return 0;
+
+ /* The original buffers will be deleted */
+ p = kmemdup(s->data, s->size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ t->data = p;
+ t->size = s->size;
+
+ dent = debugfs_create_blob(name, 0444, parent, t);
+ if (IS_ERR(dent)) {
+ kfree(p);
+ memset(t, 0, sizeof(*t));
+ return PTR_ERR(dent);
+ }
+
+ i_size_write(d_inode(dent), t->size);
+
+ return 0;
+}
+
+/**
+ * r535_gsp_retain_logging - copy logging buffers to new debugfs root
+ * @gsp: gsp pointer
+ *
+ * If keep_gsp_logging is enabled, then we want to preserve the GSP-RM logging
+ * buffers and their debugfs entries, but all those objects would normally
+ * deleted if GSP-RM fails to load.
+ *
+ * To preserve the logging buffers, we need to:
+ *
+ * 1) Allocate new buffers and copy the logs into them, so that the original
+ * DMA buffers can be released.
+ *
+ * 2) Preserve the directories. We don't need to save single dentries because
+ * we're going to delete the parent when the
+ *
+ * If anything fails in this process, then all the dentries need to be
+ * deleted. We don't need to deallocate the original logging buffers because
+ * the caller will do that regardless.
+ */
+static void r535_gsp_retain_logging(struct nvkm_gsp *gsp)
+{
+ struct device *dev = gsp->subdev.device->dev;
+ struct r535_gsp_log *log = NULL;
+ int ret;
+
+ if (!keep_gsp_logging || !gsp->debugfs.parent) {
+ /* Nothing to do */
+ goto exit;
+ }
+
+ /* Check to make sure at least one buffer has data. */
+ if (is_empty(&gsp->blob_init) && is_empty(&gsp->blob_intr) &&
+ is_empty(&gsp->blob_rm) && is_empty(&gsp->blob_rm)) {
+ nvkm_warn(&gsp->subdev, "all logging buffers are empty\n");
+ goto exit;
+ }
+
+ log = kzalloc(sizeof(*log), GFP_KERNEL);
+ if (!log)
+ goto error;
+
+ /*
+ * Since the nvkm_gsp object is going away, the debugfs_blob_wrapper
+ * objects are also being deleted, which means the dentries will no
+ * longer be valid. Delete the existing entries so that we can create
+ * new ones with the same name.
+ */
+ debugfs_remove(gsp->debugfs.init);
+ debugfs_remove(gsp->debugfs.intr);
+ debugfs_remove(gsp->debugfs.rm);
+ debugfs_remove(gsp->debugfs.pmu);
+
+ ret = r535_gsp_copy_log(gsp->debugfs.parent, "loginit", &gsp->blob_init, &log->blob_init);
+ if (ret)
+ goto error;
+
+ ret = r535_gsp_copy_log(gsp->debugfs.parent, "logintr", &gsp->blob_intr, &log->blob_intr);
+ if (ret)
+ goto error;
+
+ ret = r535_gsp_copy_log(gsp->debugfs.parent, "logrm", &gsp->blob_rm, &log->blob_rm);
+ if (ret)
+ goto error;
+
+ ret = r535_gsp_copy_log(gsp->debugfs.parent, "logpmu", &gsp->blob_pmu, &log->blob_pmu);
+ if (ret)
+ goto error;
+
+ /* The nvkm_gsp object is going away, so save the dentry */
+ log->debugfs_logging_dir = gsp->debugfs.parent;
+
+ log->log.shutdown = r535_debugfs_shutdown;
+ list_add(&log->log.entry, &gsp_logs.head);
+
+ nvkm_warn(&gsp->subdev,
+ "logging buffers migrated to /sys/kernel/debug/nouveau/%s\n",
+ dev_name(dev));
+
+ return;
+
+error:
+ nvkm_warn(&gsp->subdev, "failed to migrate logging buffers\n");
+
+exit:
+ debugfs_remove(gsp->debugfs.parent);
+
+ if (log) {
+ kfree(log->blob_init.data);
+ kfree(log->blob_intr.data);
+ kfree(log->blob_rm.data);
+ kfree(log->blob_pmu.data);
+ kfree(log);
+ }
+}
+
+#endif
+
+/**
+ * r535_gsp_libos_debugfs_fini - cleanup/retain log buffers on shutdown
+ * @gsp: gsp pointer
+ *
+ * If the log buffers are exposed via debugfs, the data for those entries
+ * needs to be cleaned up when the GSP device shuts down.
+ */
+static void
+r535_gsp_libos_debugfs_fini(struct nvkm_gsp __maybe_unused *gsp)
+{
+#ifdef CONFIG_DEBUG_FS
+ r535_gsp_retain_logging(gsp);
+
+ /*
+ * Unlike the other buffers, the PMU blob is a kmalloc'd buffer that
+ * exists only if the debugfs entries were created.
+ */
+ kfree(gsp->blob_pmu.data);
+ gsp->blob_pmu.data = NULL;
+#endif
+}
+
void
r535_gsp_dtor(struct nvkm_gsp *gsp)
{
@@ -2498,7 +2951,7 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
mutex_destroy(&gsp->client_id.mutex);
nvkm_gsp_radix3_dtor(gsp, &gsp->radix3);
- nvkm_gsp_mem_dtor(gsp, &gsp->sig);
+ nvkm_gsp_mem_dtor(&gsp->sig);
nvkm_firmware_dtor(&gsp->fw);
nvkm_falcon_fw_dtor(&gsp->booter.unload);
@@ -2509,12 +2962,15 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
r535_gsp_dtor_fws(gsp);
- nvkm_gsp_mem_dtor(gsp, &gsp->rmargs);
- nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta);
- nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem);
- nvkm_gsp_mem_dtor(gsp, &gsp->loginit);
- nvkm_gsp_mem_dtor(gsp, &gsp->logintr);
- nvkm_gsp_mem_dtor(gsp, &gsp->logrm);
+ nvkm_gsp_mem_dtor(&gsp->rmargs);
+ nvkm_gsp_mem_dtor(&gsp->wpr_meta);
+ nvkm_gsp_mem_dtor(&gsp->shm.mem);
+
+ r535_gsp_libos_debugfs_fini(gsp);
+
+ nvkm_gsp_mem_dtor(&gsp->loginit);
+ nvkm_gsp_mem_dtor(&gsp->logintr);
+ nvkm_gsp_mem_dtor(&gsp->logrm);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index 819703913a00..2c551bdc9bc9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -25,7 +25,7 @@ nvkm-y += nvkm/subdev/i2c/busnv50.o
nvkm-y += nvkm/subdev/i2c/busgf119.o
nvkm-y += nvkm/subdev/i2c/bit.o
-nvkm-y += nvkm/subdev/i2c/aux.o
+nvkm-y += nvkm/subdev/i2c/auxch.o
nvkm-y += nvkm/subdev/i2c/auxg94.o
nvkm-y += nvkm/subdev/i2c/auxgf119.o
nvkm-y += nvkm/subdev/i2c/auxgm200.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
index dd391809fef7..6c76e5e14b75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
@@ -24,7 +24,7 @@
#define anx9805_pad(p) container_of((p), struct anx9805_pad, base)
#define anx9805_bus(p) container_of((p), struct anx9805_bus, base)
#define anx9805_aux(p) container_of((p), struct anx9805_aux, base)
-#include "aux.h"
+#include "auxch.h"
#include "bus.h"
struct anx9805_pad {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxch.c
index d063d0dc13c5..fafc634acbf6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxch.c
@@ -24,7 +24,7 @@
#include <linux/string_helpers.h>
-#include "aux.h"
+#include "auxch.h"
#include "pad.h"
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxch.h
index f920eabf8628..f920eabf8628 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxch.h
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index 47068f6f9c55..854bb4b5fdb4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#define g94_i2c_aux(p) container_of((p), struct g94_i2c_aux, base)
-#include "aux.h"
+#include "auxch.h"
struct g94_i2c_aux {
struct nvkm_i2c_aux base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c
index dab40cd8fe3a..c17d5647cb99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c
@@ -19,7 +19,7 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "aux.h"
+#include "auxch.h"
static const struct nvkm_i2c_aux_func
gf119_i2c_aux = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index 8bd1d442e465..3c5005e3b330 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#define gm200_i2c_aux(p) container_of((p), struct gm200_i2c_aux, base)
-#include "aux.h"
+#include "auxch.h"
struct gm200_i2c_aux {
struct nvkm_i2c_aux base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index 731b2f68d3db..7ec17e8435a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs
*/
#include "priv.h"
-#include "aux.h"
+#include "auxch.h"
#include "bus.h"
#include "pad.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
index 5904bc5f2d2a..cc26cd677917 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs
*/
#include "pad.h"
-#include "aux.h"
+#include "auxch.h"
#include "bus.h"
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
index 3bc4d0310076..1797c6c65979 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs
*/
#include "pad.h"
-#include "aux.h"
+#include "auxch.h"
#include "bus.h"
static const struct nvkm_i2c_pad_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c
index 7d417f6a816e..5afc1bf8e798 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm200.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs
*/
#include "pad.h"
-#include "aux.h"
+#include "auxch.h"
#include "bus.h"
static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index a17a6dd8d3de..803b98df4858 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -142,7 +142,7 @@ nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temp)
return -ENODEV;
}
- result = min(max(result, (s64)info.min), (s64)info.max);
+ result = clamp(result, (s64)info.min, (s64)info.max);
if (info.link != 0xff) {
int ret = nvkm_volt_map(volt, info.link, temp);
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index fbd9af758581..9d4016bd0f44 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -4,6 +4,7 @@ config DRM_OMAP
depends on MMU
depends on DRM && OF
depends on ARCH_OMAP2PLUS || (COMPILE_TEST && PAGE_SIZE_LESS_THAN_64KB)
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index 5f8002f6bb7a..a4ac113e1690 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -139,21 +139,13 @@ static bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
}
int omapdss_device_connect(struct dss_device *dss,
- struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dev_dbg(&dss->pdev->dev, "connect(%s, %s)\n",
- src ? dev_name(src->dev) : "NULL",
+ dev_dbg(&dss->pdev->dev, "connect(%s)\n",
dst ? dev_name(dst->dev) : "NULL");
- if (!dst) {
- /*
- * The destination is NULL when the source is connected to a
- * bridge instead of a DSS device. Stop here, we will attach
- * the bridge later when we will have a DRM encoder.
- */
- return src && src->bridge ? 0 : -EINVAL;
- }
+ if (!dst)
+ return -EINVAL;
if (omapdss_device_is_connected(dst))
return -EBUSY;
@@ -163,19 +155,14 @@ int omapdss_device_connect(struct dss_device *dss,
return 0;
}
-void omapdss_device_disconnect(struct omap_dss_device *src,
+void omapdss_device_disconnect(struct dss_device *dss,
struct omap_dss_device *dst)
{
- struct dss_device *dss = src ? src->dss : dst->dss;
-
- dev_dbg(&dss->pdev->dev, "disconnect(%s, %s)\n",
- src ? dev_name(src->dev) : "NULL",
+ dev_dbg(&dss->pdev->dev, "disconnect(%s)\n",
dst ? dev_name(dst->dev) : "NULL");
- if (!dst) {
- WARN_ON(!src->bridge);
+ if (WARN_ON(!dst))
return;
- }
if (!dst->id && !omapdss_device_is_connected(dst)) {
WARN_ON(1);
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 993691b3cc7e..533f70e8a4a6 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -691,11 +691,6 @@ u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
return mgr_desc[channel].sync_lost_irq;
}
-u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc)
-{
- return DISPC_IRQ_FRAMEDONEWB;
-}
-
void dispc_mgr_enable(struct dispc_device *dispc,
enum omap_channel channel, bool enable)
{
@@ -726,30 +721,6 @@ void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel)
mgr_fld_write(dispc, channel, DISPC_MGR_FLD_GO, 1);
}
-bool dispc_wb_go_busy(struct dispc_device *dispc)
-{
- return REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1;
-}
-
-void dispc_wb_go(struct dispc_device *dispc)
-{
- enum omap_plane_id plane = OMAP_DSS_WB;
- bool enable, go;
-
- enable = REG_GET(dispc, DISPC_OVL_ATTRIBUTES(plane), 0, 0) == 1;
-
- if (!enable)
- return;
-
- go = REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1;
- if (go) {
- DSSERR("GO bit not down for WB\n");
- return;
- }
-
- REG_FLD_MOD(dispc, DISPC_CONTROL2, 1, 6, 6);
-}
-
static void dispc_ovl_write_firh_reg(struct dispc_device *dispc,
enum omap_plane_id plane, int reg,
u32 value)
@@ -1498,17 +1469,6 @@ void dispc_ovl_set_fifo_threshold(struct dispc_device *dispc,
min(high, 0xfffu));
}
-void dispc_enable_fifomerge(struct dispc_device *dispc, bool enable)
-{
- if (!dispc_has_feature(dispc, FEAT_FIFO_MERGE)) {
- WARN_ON(enable);
- return;
- }
-
- DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled");
- REG_FLD_MOD(dispc, DISPC_CONFIG, enable ? 1 : 0, 14, 14);
-}
-
void dispc_ovl_compute_fifo_thresholds(struct dispc_device *dispc,
enum omap_plane_id plane,
u32 *fifo_low, u32 *fifo_high,
@@ -2814,95 +2774,6 @@ int dispc_ovl_setup(struct dispc_device *dispc,
return r;
}
-int dispc_wb_setup(struct dispc_device *dispc,
- const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct videomode *vm,
- enum dss_writeback_channel channel_in)
-{
- int r;
- u32 l;
- enum omap_plane_id plane = OMAP_DSS_WB;
- const int pos_x = 0, pos_y = 0;
- const u8 zorder = 0, global_alpha = 0;
- const bool replication = true;
- bool truncation;
- int in_width = vm->hactive;
- int in_height = vm->vactive;
- enum omap_overlay_caps caps =
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA;
-
- if (vm->flags & DISPLAY_FLAGS_INTERLACED)
- in_height /= 2;
-
- DSSDBG("dispc_wb_setup, pa %x, pa_uv %x, %d,%d -> %dx%d, cmode %x, "
- "rot %d\n", wi->paddr, wi->p_uv_addr, in_width,
- in_height, wi->width, wi->height, wi->fourcc, wi->rotation);
-
- r = dispc_ovl_setup_common(dispc, plane, caps, wi->paddr, wi->p_uv_addr,
- wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
- wi->height, wi->fourcc, wi->rotation, zorder,
- wi->pre_mult_alpha, global_alpha, wi->rotation_type,
- replication, vm, mem_to_mem, DRM_COLOR_YCBCR_BT601,
- DRM_COLOR_YCBCR_LIMITED_RANGE);
- if (r)
- return r;
-
- switch (wi->fourcc) {
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_ARGB4444:
- case DRM_FORMAT_RGBA4444:
- case DRM_FORMAT_RGBX4444:
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_XRGB4444:
- truncation = true;
- break;
- default:
- truncation = false;
- break;
- }
-
- /* setup extra DISPC_WB_ATTRIBUTES */
- l = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane));
- l = FLD_MOD(l, truncation, 10, 10); /* TRUNCATIONENABLE */
- l = FLD_MOD(l, channel_in, 18, 16); /* CHANNELIN */
- l = FLD_MOD(l, mem_to_mem, 19, 19); /* WRITEBACKMODE */
- if (mem_to_mem)
- l = FLD_MOD(l, 1, 26, 24); /* CAPTUREMODE */
- else
- l = FLD_MOD(l, 0, 26, 24); /* CAPTUREMODE */
- dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), l);
-
- if (mem_to_mem) {
- /* WBDELAYCOUNT */
- REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane), 0, 7, 0);
- } else {
- u32 wbdelay;
-
- if (channel_in == DSS_WB_TV_MGR)
- wbdelay = vm->vsync_len + vm->vback_porch;
- else
- wbdelay = vm->vfront_porch + vm->vsync_len +
- vm->vback_porch;
-
- if (vm->flags & DISPLAY_FLAGS_INTERLACED)
- wbdelay /= 2;
-
- wbdelay = min(wbdelay, 255u);
-
- /* WBDELAYCOUNT */
- REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0);
- }
-
- return 0;
-}
-
-bool dispc_has_writeback(struct dispc_device *dispc)
-{
- return dispc->feat->has_writeback;
-}
-
int dispc_ovl_enable(struct dispc_device *dispc,
enum omap_plane_id plane, bool enable)
{
@@ -3742,23 +3613,6 @@ void dispc_mgr_set_clock_div(struct dispc_device *dispc,
cinfo->pck_div);
}
-int dispc_mgr_get_clock_div(struct dispc_device *dispc,
- enum omap_channel channel,
- struct dispc_clock_info *cinfo)
-{
- unsigned long fck;
-
- fck = dispc_fclk_rate(dispc);
-
- cinfo->lck_div = REG_GET(dispc, DISPC_DIVISORo(channel), 23, 16);
- cinfo->pck_div = REG_GET(dispc, DISPC_DIVISORo(channel), 7, 0);
-
- cinfo->lck = fck / cinfo->lck_div;
- cinfo->pck = cinfo->lck / cinfo->pck_div;
-
- return 0;
-}
-
u32 dispc_read_irqstatus(struct dispc_device *dispc)
{
return dispc_read_reg(dispc, DISPC_IRQSTATUS);
@@ -4912,7 +4766,7 @@ static const struct dev_pm_ops dispc_pm_ops = {
struct platform_driver omap_dispchw_driver = {
.probe = dispc_probe,
- .remove_new = dispc_remove,
+ .remove = dispc_remove,
.driver = {
.name = "omapdss_dispc",
.pm = &dispc_pm_ops,
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 030f997eccd0..b17e77f700dd 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -16,6 +16,7 @@
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/string.h>
@@ -709,7 +710,7 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
if (!dpi)
return -ENOMEM;
- ep = of_get_next_child(port, NULL);
+ ep = of_graph_get_next_port_endpoint(port, NULL);
if (!ep)
return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index ea63c64d3a1a..59d20eb8a7e0 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -5093,7 +5093,7 @@ static const struct dev_pm_ops dsi_pm_ops = {
struct platform_driver omap_dsihw_driver = {
.probe = dsi_probe,
- .remove_new = dsi_remove,
+ .remove = dsi_remove,
.driver = {
.name = "omapdss_dsi",
.pm = &dsi_pm_ops,
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 988888e164d7..7b2df3185de4 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1606,7 +1606,7 @@ static const struct dev_pm_ops dss_pm_ops = {
struct platform_driver omap_dsshw_driver = {
.probe = dss_probe,
- .remove_new = dss_remove,
+ .remove = dss_remove,
.shutdown = dss_shutdown,
.driver = {
.name = "omapdss_dss",
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 4ff02fbc0e71..a8b231ed4f4b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -416,7 +416,6 @@ u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
enum omap_channel channel);
u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
enum omap_channel channel);
-u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc);
u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc);
@@ -458,20 +457,11 @@ int dispc_ovl_setup(struct dispc_device *dispc,
int dispc_ovl_enable(struct dispc_device *dispc,
enum omap_plane_id plane, bool enable);
-bool dispc_has_writeback(struct dispc_device *dispc);
-int dispc_wb_setup(struct dispc_device *dispc,
- const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct videomode *vm,
- enum dss_writeback_channel channel_in);
-bool dispc_wb_go_busy(struct dispc_device *dispc);
-void dispc_wb_go(struct dispc_device *dispc);
-
void dispc_enable_sidle(struct dispc_device *dispc);
void dispc_disable_sidle(struct dispc_device *dispc);
void dispc_lcd_enable_signal(struct dispc_device *dispc, bool enable);
void dispc_pck_free_enable(struct dispc_device *dispc, bool enable);
-void dispc_enable_fifomerge(struct dispc_device *dispc, bool enable);
typedef bool (*dispc_div_calc_func)(int lckd, int pckd, unsigned long lck,
unsigned long pck, void *data);
@@ -494,9 +484,6 @@ void dispc_ovl_compute_fifo_thresholds(struct dispc_device *dispc,
void dispc_mgr_set_clock_div(struct dispc_device *dispc,
enum omap_channel channel,
const struct dispc_clock_info *cinfo);
-int dispc_mgr_get_clock_div(struct dispc_device *dispc,
- enum omap_channel channel,
- struct dispc_clock_info *cinfo);
void dispc_set_tv_pclk(struct dispc_device *dispc, unsigned long pclk);
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 9b8747d83ee8..4435f0027c78 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -852,7 +852,7 @@ static const struct of_device_id hdmi_of_match[] = {
struct platform_driver omapdss_hdmi4hw_driver = {
.probe = hdmi4_probe,
- .remove_new = hdmi4_remove,
+ .remove = hdmi4_remove,
.driver = {
.name = "omapdss_hdmi",
.of_match_table = hdmi_of_match,
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index c7ae2235ae99..a8c740df3146 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -819,7 +819,7 @@ static const struct of_device_id hdmi_of_match[] = {
struct platform_driver omapdss_hdmi5hw_driver = {
.probe = hdmi5_probe,
- .remove_new = hdmi5_remove,
+ .remove = hdmi5_remove,
.driver = {
.name = "omapdss_hdmi5",
.of_match_table = hdmi_of_match,
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 040d5a3e33d6..4c22c09c93d5 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -242,9 +242,8 @@ struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev);
void omapdss_device_put(struct omap_dss_device *dssdev);
struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node);
int omapdss_device_connect(struct dss_device *dss,
- struct omap_dss_device *src,
struct omap_dss_device *dst);
-void omapdss_device_disconnect(struct omap_dss_device *src,
+void omapdss_device_disconnect(struct dss_device *dss,
struct omap_dss_device *dst);
int omap_dss_get_num_overlay_managers(void);
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 91eaae3b9481..f9ae358e8e52 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/string.h>
@@ -346,7 +347,7 @@ int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
if (!sdi)
return -ENOMEM;
- ep = of_get_next_child(port, NULL);
+ ep = of_graph_get_next_port_endpoint(port, NULL);
if (!ep) {
r = 0;
goto err_free;
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index f163d52a7c7d..aaeef603682c 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -912,7 +912,7 @@ static const struct of_device_id venc_of_match[] = {
struct platform_driver omap_venchw_driver = {
.probe = venc_probe,
- .remove_new = venc_remove,
+ .remove = venc_remove,
.driver = {
.name = "omapdss_venc",
.pm = &venc_pm_ops,
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 1aca3060333e..3fff32c000a6 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -119,7 +119,7 @@ static u32 dmm_read_wa(struct dmm *dmm, u32 reg)
* earlier than the DMA finished writing the value to memory.
*/
rmb();
- return readl(dmm->wa_dma_data);
+ return readl((__iomem void *)dmm->wa_dma_data);
}
static void dmm_write_wa(struct dmm *dmm, u32 val, u32 reg)
@@ -127,7 +127,7 @@ static void dmm_write_wa(struct dmm *dmm, u32 val, u32 reg)
dma_addr_t src, dst;
int r;
- writel(val, dmm->wa_dma_data);
+ writel(val, (__iomem void *)dmm->wa_dma_data);
/*
* As per i878 workaround, the DMA is used to access the DMM registers.
* Make sure that the writel is not moved by the compiler or the CPU, so
@@ -411,7 +411,7 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
*/
/* read back to ensure the data is in RAM */
- readl(&txn->last_pat->next_pa);
+ readl((__iomem void *)&txn->last_pat->next_pa);
/* write to PAT_DESCR to clear out any pending transaction */
dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
@@ -1210,7 +1210,7 @@ static const struct of_device_id dmm_of_match[] = {
struct platform_driver omap_dmm_driver = {
.probe = omap_dmm_probe,
- .remove_new = omap_dmm_remove,
+ .remove = omap_dmm_remove,
.driver = {
.name = DMM_DRIVER_NAME,
.of_match_table = of_match_ptr(dmm_of_match),
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index d3eac4817d76..054b71dba6a7 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -28,7 +28,6 @@
#define DRIVER_NAME MODULE_NAME
#define DRIVER_DESC "OMAP DRM"
-#define DRIVER_DATE "20110917"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
@@ -307,7 +306,7 @@ static void omap_disconnect_pipelines(struct drm_device *ddev)
for (i = 0; i < priv->num_pipes; i++) {
struct omap_drm_pipeline *pipe = &priv->pipes[i];
- omapdss_device_disconnect(NULL, pipe->output);
+ omapdss_device_disconnect(priv->dss, pipe->output);
omapdss_device_put(pipe->output);
pipe->output = NULL;
@@ -325,7 +324,7 @@ static int omap_connect_pipelines(struct drm_device *ddev)
int r;
for_each_dss_output(output) {
- r = omapdss_device_connect(priv->dss, NULL, output);
+ r = omapdss_device_connect(priv->dss, output);
if (r == -EPROBE_DEFER) {
omapdss_device_put(output);
return r;
@@ -647,12 +646,12 @@ static const struct drm_driver omap_drm_driver = {
.gem_prime_import = omap_gem_prime_import,
.dumb_create = omap_gem_dumb_create,
.dumb_map_offset = omap_gem_dumb_map_offset,
+ OMAP_FBDEV_DRIVER_OPS,
.ioctls = ioctls,
.num_ioctls = DRM_OMAP_NUM_IOCTLS,
.fops = &omapdriver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -856,7 +855,7 @@ static struct platform_driver pdev = {
.pm = &omapdrm_pm_ops,
},
.probe = pdev_probe,
- .remove_new = pdev_remove,
+ .remove = pdev_remove,
.shutdown = pdev_shutdown,
};
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 4c7217b35f6b..d903568fd8cc 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -32,6 +32,7 @@
#define MODULE_NAME "omapdrm"
struct omap_drm_usergart;
+struct omap_fbdev;
struct omap_drm_pipeline {
struct drm_crtc *crtc;
@@ -97,6 +98,8 @@ struct omap_drm_private {
/* memory bandwidth limit if it is needed on the platform */
unsigned int max_bandwidth;
+
+ struct omap_fbdev *fbdev;
};
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 523be34682ca..7b6396890681 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -6,6 +6,7 @@
#include <linux/fb.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
@@ -13,6 +14,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_util.h>
#include "omap_drv.h"
@@ -26,10 +28,8 @@ module_param_named(ywrap, ywrap_enabled, bool, 0644);
* fbdev funcs, to implement legacy fbdev interface on top of drm driver
*/
-#define to_omap_fbdev(x) container_of(x, struct omap_fbdev, base)
-
struct omap_fbdev {
- struct drm_fb_helper base;
+ struct drm_device *dev;
bool ywrap_enabled;
/* for deferred dmm roll when getting called in atomic ctx */
@@ -41,7 +41,7 @@ static struct drm_fb_helper *get_fb(struct fb_info *fbi);
static void pan_worker(struct work_struct *work)
{
struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work);
- struct drm_fb_helper *helper = &fbdev->base;
+ struct drm_fb_helper *helper = fbdev->dev->fb_helper;
struct fb_info *fbi = helper->info;
struct drm_gem_object *bo = drm_gem_fb_get_obj(helper->fb, 0);
int npages;
@@ -55,24 +55,25 @@ FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(omap_fbdev,
drm_fb_helper_damage_range,
drm_fb_helper_damage_area)
-static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *fbi)
+static int omap_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *fbi)
{
struct drm_fb_helper *helper = get_fb(fbi);
- struct omap_fbdev *fbdev = to_omap_fbdev(helper);
+ struct omap_drm_private *priv;
+ struct omap_fbdev *fbdev;
if (!helper)
goto fallback;
+ priv = helper->dev->dev_private;
+ fbdev = priv->fbdev;
+
if (!fbdev->ywrap_enabled)
goto fallback;
- if (drm_can_sleep()) {
+ if (drm_can_sleep())
pan_worker(&fbdev->work);
- } else {
- struct omap_drm_private *priv = helper->dev->dev_private;
+ else
queue_work(priv->wq, &fbdev->work);
- }
return 0;
@@ -92,7 +93,6 @@ static void omap_fbdev_fb_destroy(struct fb_info *info)
struct drm_fb_helper *helper = info->par;
struct drm_framebuffer *fb = helper->fb;
struct drm_gem_object *bo = drm_gem_fb_get_obj(fb, 0);
- struct omap_fbdev *fbdev = to_omap_fbdev(helper);
DBG();
@@ -104,7 +104,7 @@ static void omap_fbdev_fb_destroy(struct fb_info *info)
drm_client_release(&helper->client);
drm_fb_helper_unprepare(helper);
- kfree(fbdev);
+ kfree(helper);
}
/*
@@ -125,12 +125,36 @@ static const struct fb_ops omap_fb_ops = {
.fb_destroy = omap_fbdev_fb_destroy,
};
-static int omap_fbdev_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
+static int omap_fbdev_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
+{
+ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+ return 0;
+
+ if (helper->fb->funcs->dirty)
+ return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+
+ return 0;
+}
+
+static const struct drm_fb_helper_funcs omap_fbdev_helper_funcs = {
+ .fb_dirty = omap_fbdev_dirty,
+};
+
+static struct drm_fb_helper *get_fb(struct fb_info *fbi)
+{
+ if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) {
+ /* these are not the fb's you're looking for */
+ return NULL;
+ }
+ return fbi->par;
+}
+
+int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
- struct omap_fbdev *fbdev = to_omap_fbdev(helper);
struct drm_device *dev = helper->dev;
struct omap_drm_private *priv = dev->dev_private;
+ struct omap_fbdev *fbdev = priv->fbdev;
struct drm_framebuffer *fb = NULL;
union omap_gem_size gsize;
struct fb_info *fbi = NULL;
@@ -208,6 +232,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
DBG("fbi=%p, dev=%p", fbi, dev);
+ helper->funcs = &omap_fbdev_helper_funcs;
helper->fb = fb;
fbi->fbops = &omap_fb_ops;
@@ -254,115 +279,21 @@ fail:
return ret;
}
-static int omap_fbdev_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
-{
- if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
- return 0;
-
- if (helper->fb->funcs->dirty)
- return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
-
- return 0;
-}
-
-static const struct drm_fb_helper_funcs omap_fb_helper_funcs = {
- .fb_probe = omap_fbdev_create,
- .fb_dirty = omap_fbdev_dirty,
-};
-
-static struct drm_fb_helper *get_fb(struct fb_info *fbi)
-{
- if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) {
- /* these are not the fb's you're looking for */
- return NULL;
- }
- return fbi->par;
-}
-
-/*
- * struct drm_client
- */
-
-static void omap_fbdev_client_unregister(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
-
- if (fb_helper->info) {
- drm_fb_helper_unregister_info(fb_helper);
- } else {
- drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
- }
-}
-
-static int omap_fbdev_client_restore(struct drm_client_dev *client)
-{
- drm_fb_helper_lastclose(client->dev);
-
- return 0;
-}
-
-static int omap_fbdev_client_hotplug(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- struct drm_device *dev = client->dev;
- int ret;
-
- if (dev->fb_helper)
- return drm_fb_helper_hotplug_event(dev->fb_helper);
-
- ret = drm_fb_helper_init(dev, fb_helper);
- if (ret)
- goto err_drm_err;
-
- ret = drm_fb_helper_initial_config(fb_helper);
- if (ret)
- goto err_drm_fb_helper_fini;
-
- return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(fb_helper);
-err_drm_err:
- drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret);
- return ret;
-}
-
-static const struct drm_client_funcs omap_fbdev_client_funcs = {
- .owner = THIS_MODULE,
- .unregister = omap_fbdev_client_unregister,
- .restore = omap_fbdev_client_restore,
- .hotplug = omap_fbdev_client_hotplug,
-};
-
void omap_fbdev_setup(struct drm_device *dev)
{
+ struct omap_drm_private *priv = dev->dev_private;
struct omap_fbdev *fbdev;
- struct drm_fb_helper *helper;
- int ret;
drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
- fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ fbdev = drmm_kzalloc(dev, sizeof(*fbdev), GFP_KERNEL);
if (!fbdev)
return;
- helper = &fbdev->base;
-
- drm_fb_helper_prepare(dev, helper, 32, &omap_fb_helper_funcs);
-
- ret = drm_client_init(dev, &helper->client, "fbdev", &omap_fbdev_client_funcs);
- if (ret)
- goto err_drm_client_init;
-
+ fbdev->dev = dev;
INIT_WORK(&fbdev->work, pan_worker);
- drm_client_register(&helper->client);
+ priv->fbdev = fbdev;
- return;
-
-err_drm_client_init:
- drm_fb_helper_unprepare(helper);
- kfree(fbdev);
+ drm_client_setup(dev, NULL);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.h b/drivers/gpu/drm/omapdrm/omap_fbdev.h
index 74c691a8d45f..283e35b42ada 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.h
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.h
@@ -10,10 +10,18 @@
#define __OMAPDRM_FBDEV_H__
struct drm_device;
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
#ifdef CONFIG_DRM_FBDEV_EMULATION
+int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes);
+#define OMAP_FBDEV_DRIVER_OPS \
+ .fbdev_probe = omap_fbdev_driver_fbdev_probe
void omap_fbdev_setup(struct drm_device *dev);
#else
+#define OMAP_FBDEV_DRIVER_OPS \
+ .fbdev_probe = NULL
static inline void omap_fbdev_setup(struct drm_device *dev)
{
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index fdae677558f3..b9c67e4ca360 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1402,8 +1402,6 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
omap_obj = to_omap_bo(obj);
- mutex_lock(&omap_obj->lock);
-
omap_obj->sgt = sgt;
if (omap_gem_sgt_is_contiguous(sgt, size)) {
@@ -1418,21 +1416,17 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
if (!pages) {
omap_gem_free_object(obj);
- obj = ERR_PTR(-ENOMEM);
- goto done;
+ return ERR_PTR(-ENOMEM);
}
omap_obj->pages = pages;
ret = drm_prime_sg_to_page_array(sgt, pages, npages);
if (ret) {
omap_gem_free_object(obj);
- obj = ERR_PTR(-ENOMEM);
- goto done;
+ return ERR_PTR(-ENOMEM);
}
}
-done:
- mutex_unlock(&omap_obj->lock);
return obj;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 36f9ee4baad3..30cf1cdc1aa3 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -11,7 +11,7 @@
#include "omap_drv.h"
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
/* -----------------------------------------------------------------------------
* DMABUF Export
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index d3a9a9fafe4e..d7469c565d1d 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -378,7 +378,7 @@ config DRM_PANEL_LG_SW43408
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
- select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_DSC_HELPER
select DRM_DISPLAY_HELPER
help
Say Y here if you want to enable support for LG sw43408 panel.
@@ -587,7 +587,7 @@ config DRM_PANEL_RAYDIUM_RM692E5
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
- select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_DSC_HELPER
select DRM_DISPLAY_HELPER
help
Say Y here if you want to enable support for Raydium RM692E5-based
@@ -614,6 +614,34 @@ config DRM_PANEL_RONBO_RB070D30
Say Y here if you want to enable support for Ronbo Electronics
RB070D30 1024x600 DSI panel.
+config DRM_PANEL_SAMSUNG_AMS581VF01
+ tristate "Samsung AMS581VF01 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y or M here if you want to enable support for the
+ Samsung AMS581VF01 FHD Plus (2340x1080@60Hz) CMD mode panel.
+
+config DRM_PANEL_SAMSUNG_AMS639RQ08
+ tristate "Samsung AMS639RQ08 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y or M here if you want to enable support for the
+ Samsung AMS639RQ08 FHD Plus (2340x1080@60Hz) CMD mode panel.
+
+config DRM_PANEL_SAMSUNG_S6E88A0_AMS427AP24
+ tristate "Samsung AMS427AP24 panel with S6E88A0 controller"
+ depends on GPIOLIB && OF && REGULATOR
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Samsung AMS427AP24 panel
+ with S6E88A0 controller (found in Samsung Galaxy S4 Mini Value Edition
+ GT-I9195I). To compile this driver as a module, choose M here.
+
config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01
tristate "Samsung AMS452EF01 panel with S6E88A0 DSI video mode controller"
depends on OF
@@ -689,6 +717,14 @@ config DRM_PANEL_SAMSUNG_S6E3HA2
depends on BACKLIGHT_CLASS_DEVICE
select VIDEOMODE_HELPERS
+config DRM_PANEL_SAMSUNG_S6E3HA8
+ tristate "Samsung S6E3HA8 DSI video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_DISPLAY_DSC_HELPER
+ select VIDEOMODE_HELPERS
+
config DRM_PANEL_SAMSUNG_S6E63J0X03
tristate "Samsung S6E63J0X03 DSI command mode panel"
depends on OF
@@ -946,7 +982,7 @@ config DRM_PANEL_VISIONOX_R66451
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
- select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_DSC_HELPER
select DRM_DISPLAY_HELPER
help
Say Y here if you want to enable support for Visionox
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 987a08702410..7dcf72646cac 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -62,6 +62,8 @@ obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM692E5) += panel-raydium-rm692e5.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM69380) += panel-raydium-rm69380.o
obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_AMS581VF01) += panel-samsung-ams581vf01.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_AMS639RQ08) += panel-samsung-ams639rq08.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20) += panel-samsung-atna33xc20.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_DB7430) += panel-samsung-db7430.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
@@ -70,10 +72,12 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D27A1) += panel-samsung-s6d27a1.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0) += panel-samsung-s6d7aa0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3FA7) += panel-samsung-s6e3fa7.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA8) += panel-samsung-s6e3ha8.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_SPI) += panel-samsung-s6e63m0-spi.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI) += panel-samsung-s6e63m0-dsi.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS427AP24) += panel-samsung-s6e88a0-ams427ap24.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01) += panel-samsung-s6e88a0-ams452ef01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_SOFEF00) += panel-samsung-sofef00.o
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 767e47a2b0c1..f8511fe5fb0d 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1802,6 +1802,12 @@ static const struct panel_delay delay_200_500_e50_po2e200 = {
.powered_on_to_enable = 200,
};
+static const struct panel_delay delay_200_150_e50 = {
+ .hpd_absent = 200,
+ .unprepare = 150,
+ .enable = 50,
+};
+
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
.ident = { \
@@ -1913,6 +1919,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b66, &delay_200_500_e80, "NE140WUM-N6G"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c93, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"),
@@ -1963,6 +1970,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1118, &delay_200_500_e50, "KD116N29-30NK-A005"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1212, &delay_200_500_e50, "KD116N0930A16"),
+ EDP_PANEL_ENTRY('K', 'D', 'B', 0x1707, &delay_200_150_e50, "KD116N2130B12"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x044f, &delay_200_500_e50, "KD116N9-30NH-F3"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x05f1, &delay_200_500_e80_d50, "KD116N5-30NV-G7"),
@@ -1977,11 +1985,13 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('L', 'G', 'D', 0x0567, &delay_200_500_e200_d200, "Unknown"),
EDP_PANEL_ENTRY('L', 'G', 'D', 0x05af, &delay_200_500_e200_d200, "Unknown"),
EDP_PANEL_ENTRY('L', 'G', 'D', 0x05f1, &delay_200_500_e200_d200, "Unknown"),
+ EDP_PANEL_ENTRY('L', 'G', 'D', 0x0778, &delay_200_500_e200_d200, "134WT1"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &delay_80_500_e50, "LQ140M1JW46"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x153a, &delay_200_500_e50, "LQ140T1JH01"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
+ EDP_PANEL_ENTRY('S', 'H', 'P', 0x1593, &delay_200_500_p2e100, "LQ134N1"),
EDP_PANEL_ENTRY('S', 'T', 'A', 0x0100, &delay_100_500_e200, "2081116HHD028001-51D"),
@@ -2047,7 +2057,7 @@ static struct platform_driver panel_edp_platform_driver = {
.pm = &panel_edp_pm_ops,
},
.probe = panel_edp_platform_probe,
- .remove_new = panel_edp_platform_remove,
+ .remove = panel_edp_platform_remove,
.shutdown = panel_edp_platform_shutdown,
};
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index 00791ea81e90..b904d5437444 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -50,55 +50,44 @@ static inline struct kd35t133 *panel_to_kd35t133(struct drm_panel *panel)
return container_of(panel, struct kd35t133, panel);
}
-static int kd35t133_init_sequence(struct kd35t133 *ctx)
+static void kd35t133_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- struct device *dev = ctx->dev;
-
/*
* Init sequence was supplied by the panel vendor with minimal
* documentation.
*/
- mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_POSITIVEGAMMA,
- 0x00, 0x13, 0x18, 0x04, 0x0f, 0x06, 0x3a, 0x56,
- 0x4d, 0x03, 0x0a, 0x06, 0x30, 0x3e, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_NEGATIVEGAMMA,
- 0x00, 0x13, 0x18, 0x01, 0x11, 0x06, 0x38, 0x34,
- 0x4d, 0x06, 0x0d, 0x0b, 0x31, 0x37, 0x0f);
- mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL1, 0x18, 0x17);
- mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL2, 0x41);
- mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_VCOMCONTROL, 0x00, 0x1a, 0x80);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x48);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
- mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_INTERFACEMODECTRL, 0x00);
- mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_FRAMERATECTRL, 0xa0);
- mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYINVERSIONCTRL, 0x02);
- mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYFUNCTIONCTRL,
- 0x20, 0x02);
- mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_SETIMAGEFUNCTION, 0x00);
- mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_ADJUSTCONTROL3,
- 0xa9, 0x51, 0x2c, 0x82);
- mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_INVERT_MODE, NULL, 0);
-
- dev_dbg(dev, "Panel init sequence done\n");
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_POSITIVEGAMMA,
+ 0x00, 0x13, 0x18, 0x04, 0x0f, 0x06, 0x3a, 0x56,
+ 0x4d, 0x03, 0x0a, 0x06, 0x30, 0x3e, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_NEGATIVEGAMMA,
+ 0x00, 0x13, 0x18, 0x01, 0x11, 0x06, 0x38, 0x34,
+ 0x4d, 0x06, 0x0d, 0x0b, 0x31, 0x37, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_POWERCONTROL1, 0x18, 0x17);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_POWERCONTROL2, 0x41);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_VCOMCONTROL, 0x00, 0x1a, 0x80);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x48);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_INTERFACEMODECTRL, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_FRAMERATECTRL, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_DISPLAYINVERSIONCTRL, 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_DISPLAYFUNCTIONCTRL,
+ 0x20, 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_SETIMAGEFUNCTION, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, KD35T133_CMD_ADJUSTCONTROL3,
+ 0xa9, 0x51, 0x2c, 0x82);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_ENTER_INVERT_MODE);
}
static int kd35t133_unprepare(struct drm_panel *panel)
{
struct kd35t133 *ctx = panel_to_kd35t133(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0)
- dev_err(ctx->dev, "failed to set display off: %d\n", ret);
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(ctx->dev, "failed to enter sleep mode: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ if (dsi_ctx.accum_err)
+ return dsi_ctx.accum_err;
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
@@ -112,18 +101,20 @@ static int kd35t133_prepare(struct drm_panel *panel)
{
struct kd35t133 *ctx = panel_to_kd35t133(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dev_dbg(ctx->dev, "Resetting the panel\n");
- ret = regulator_enable(ctx->vdd);
- if (ret < 0) {
- dev_err(ctx->dev, "Failed to enable vdd supply: %d\n", ret);
- return ret;
+ dsi_ctx.accum_err = regulator_enable(ctx->vdd);
+ if (dsi_ctx.accum_err) {
+ dev_err(ctx->dev, "Failed to enable vdd supply: %d\n",
+ dsi_ctx.accum_err);
+ return dsi_ctx.accum_err;
}
- ret = regulator_enable(ctx->iovcc);
- if (ret < 0) {
- dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
+ dsi_ctx.accum_err = regulator_enable(ctx->iovcc);
+ if (dsi_ctx.accum_err) {
+ dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n",
+ dsi_ctx.accum_err);
goto disable_vdd;
}
@@ -135,27 +126,18 @@ static int kd35t133_prepare(struct drm_panel *panel)
msleep(20);
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
- goto disable_iovcc;
- }
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 250);
- msleep(250);
+ kd35t133_init_sequence(&dsi_ctx);
+ if (!dsi_ctx.accum_err)
+ dev_dbg(ctx->dev, "Panel init sequence done\n");
- ret = kd35t133_init_sequence(ctx);
- if (ret < 0) {
- dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
- goto disable_iovcc;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(ctx->dev, "Failed to set display on: %d\n", ret);
+ if (dsi_ctx.accum_err)
goto disable_iovcc;
- }
-
- msleep(50);
return 0;
@@ -163,7 +145,7 @@ disable_iovcc:
regulator_disable(ctx->iovcc);
disable_vdd:
regulator_disable(ctx->vdd);
- return ret;
+ return dsi_ctx.accum_err;
}
static const struct drm_display_mode default_mode = {
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83102.c b/drivers/gpu/drm/panel/panel-himax-hx83102.c
index 6e4b7e4644ce..3644a7544b93 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx83102.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx83102.c
@@ -298,7 +298,7 @@ static int ivo_t109nw41_init(struct hx83102 *ctx)
msleep(60);
hx83102_enable_extended_cmds(&dsi_ctx, true);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xed, 0xed, 0x0f, 0xcf, 0x42,
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xed, 0xed, 0x27, 0xe7, 0x52,
0xf5, 0x39, 0x36, 0x36, 0x36, 0x36, 0x32, 0x8b, 0x11, 0x65, 0x00, 0x88,
0xfa, 0xff, 0xff, 0x8f, 0xff, 0x08, 0xd6, 0x33);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETDISP, 0x00, 0x47, 0xb0, 0x80, 0x00, 0x12,
@@ -343,11 +343,11 @@ static int ivo_t109nw41_init(struct hx83102 *ctx)
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGMA, 0x04, 0x04, 0x06, 0x0a, 0x0a, 0x05,
- 0x12, 0x14, 0x17, 0x13, 0x2c, 0x33, 0x39, 0x4b, 0x4c, 0x56, 0x61, 0x78,
- 0x7a, 0x41, 0x50, 0x68, 0x73, 0x04, 0x04, 0x06, 0x0a, 0x0a, 0x05, 0x12,
- 0x14, 0x17, 0x13, 0x2c, 0x33, 0x39, 0x4b, 0x4c, 0x56, 0x61, 0x78, 0x7a,
- 0x41, 0x50, 0x68, 0x73);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGMA, 0x00, 0x07, 0x10, 0x17, 0x1c, 0x33,
+ 0x48, 0x50, 0x57, 0x50, 0x68, 0x6e, 0x71, 0x7f, 0x81, 0x8a, 0x8e, 0x9b,
+ 0x9c, 0x4d, 0x56, 0x5d, 0x73, 0x00, 0x07, 0x10, 0x17, 0x1c, 0x33, 0x48,
+ 0x50, 0x57, 0x50, 0x68, 0x6e, 0x71, 0x7f, 0x81, 0x8a, 0x8e, 0x9b, 0x9c,
+ 0x4d, 0x56, 0x5d, 0x73);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0x07, 0x10, 0x10, 0x1a, 0x26, 0x9e,
0x00, 0x4f, 0xa0, 0x14, 0x14, 0x00, 0x00, 0x00, 0x00, 0x12, 0x0a, 0x02,
0x02, 0x00, 0x33, 0x02, 0x04, 0x18, 0x01);
@@ -565,6 +565,8 @@ static int hx83102_get_modes(struct drm_panel *panel,
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, m);
+ if (!mode)
+ return -ENOMEM;
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83112a.c b/drivers/gpu/drm/panel/panel-himax-hx83112a.c
index 466c27012abf..47bce087e339 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx83112a.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx83112a.c
@@ -56,198 +56,173 @@ static void hx83112a_reset(struct hx83112a_panel *ctx)
msleep(50);
}
-static int hx83112a_on(struct hx83112a_panel *ctx)
+static int hx83112a_on(struct mipi_dsi_device *dsi)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETEXTC, 0x83, 0x11, 0x2a);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPOWER1,
- 0x08, 0x28, 0x28, 0x83, 0x83, 0x4c, 0x4f, 0x33);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDISP,
- 0x00, 0x02, 0x00, 0x90, 0x24, 0x00, 0x08, 0x19,
- 0xea, 0x11, 0x11, 0x00, 0x11, 0xa3);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDRV,
- 0x58, 0x68, 0x58, 0x68, 0x0f, 0xef, 0x0b, 0xc0,
- 0x0b, 0xc0, 0x0b, 0xc0, 0x00, 0xff, 0x00, 0xff,
- 0x00, 0x00, 0x14, 0x15, 0x00, 0x29, 0x11, 0x07,
- 0x12, 0x00, 0x29);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDRV,
- 0x00, 0x12, 0x12, 0x11, 0x88, 0x12, 0x12, 0x00,
- 0x53);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x03);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT,
- 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6,
- 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6,
- 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d,
- 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49,
- 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a,
- 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3,
- 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad,
- 0x40);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT,
- 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6,
- 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6,
- 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d,
- 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49,
- 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a,
- 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3,
- 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad,
- 0x40);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT,
- 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6,
- 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6,
- 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d,
- 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49,
- 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a,
- 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3,
- 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad,
- 0x40);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETDGCLUT, 0x01);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTCON,
- 0x70, 0x00, 0x04, 0xe0, 0x33, 0x00);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPANEL, 0x08);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPOWER2, 0x2b, 0x2b);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP0,
- 0x80, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x08,
- 0x08, 0x03, 0x03, 0x22, 0x18, 0x07, 0x07, 0x07,
- 0x07, 0x32, 0x10, 0x06, 0x00, 0x06, 0x32, 0x10,
- 0x07, 0x00, 0x07, 0x32, 0x19, 0x31, 0x09, 0x31,
- 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x08,
- 0x09, 0x30, 0x00, 0x00, 0x00, 0x06, 0x0d, 0x00,
- 0x0f);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP0,
- 0x00, 0x00, 0x19, 0x10, 0x00, 0x0a, 0x00, 0x81);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP1,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0xc0, 0xc0, 0x18, 0x18, 0x19, 0x19, 0x18, 0x18,
- 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x3f, 0x3f,
- 0x28, 0x28, 0x24, 0x24, 0x02, 0x03, 0x02, 0x03,
- 0x00, 0x01, 0x00, 0x01, 0x31, 0x31, 0x31, 0x31,
- 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP2,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x19, 0x19,
- 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x3f, 0x3f,
- 0x24, 0x24, 0x28, 0x28, 0x01, 0x00, 0x01, 0x00,
- 0x03, 0x02, 0x03, 0x02, 0x31, 0x31, 0x31, 0x31,
- 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3,
- 0xaa, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xea,
- 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xea, 0xab, 0xaa,
- 0xaa, 0xaa, 0xaa, 0xea, 0xab, 0xaa, 0xaa, 0xaa);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3,
- 0xaa, 0x2e, 0x28, 0x00, 0x00, 0x00, 0xaa, 0x2e,
- 0x28, 0x00, 0x00, 0x00, 0xaa, 0xee, 0xaa, 0xaa,
- 0xaa, 0xaa, 0xaa, 0xee, 0xaa, 0xaa, 0xaa, 0xaa);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3,
- 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaa, 0xff,
- 0xff, 0xff, 0xff, 0xff);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x03);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETGIP3,
- 0xaa, 0xaa, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
- 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTP1,
- 0x0e, 0x0e, 0x1e, 0x65, 0x1c, 0x65, 0x00, 0x50,
- 0x20, 0x20, 0x00, 0x00, 0x02, 0x02, 0x02, 0x05,
- 0x14, 0x14, 0x32, 0xb9, 0x23, 0xb9, 0x08);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x01);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTP1,
- 0x02, 0x00, 0xa8, 0x01, 0xa8, 0x0d, 0xa4, 0x0e);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x02);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETTP1,
- 0x00, 0x00, 0x08, 0x00, 0x01, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
- 0x00, 0x00, 0x00, 0x02, 0x00);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETBANK, 0x00);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0xc3);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETCLOCK, 0xd1, 0xd6);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0x3f);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0xc6);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_SETPTBA, 0x37);
- mipi_dsi_dcs_write_seq(dsi, HX83112A_UNKNOWN1, 0x3f);
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(150);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
- msleep(50);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETEXTC, 0x83, 0x11, 0x2a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETPOWER1,
+ 0x08, 0x28, 0x28, 0x83, 0x83, 0x4c, 0x4f, 0x33);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDISP,
+ 0x00, 0x02, 0x00, 0x90, 0x24, 0x00, 0x08, 0x19,
+ 0xea, 0x11, 0x11, 0x00, 0x11, 0xa3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDRV,
+ 0x58, 0x68, 0x58, 0x68, 0x0f, 0xef, 0x0b, 0xc0,
+ 0x0b, 0xc0, 0x0b, 0xc0, 0x00, 0xff, 0x00, 0xff,
+ 0x00, 0x00, 0x14, 0x15, 0x00, 0x29, 0x11, 0x07,
+ 0x12, 0x00, 0x29);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDRV,
+ 0x00, 0x12, 0x12, 0x11, 0x88, 0x12, 0x12, 0x00,
+ 0x53);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDGCLUT,
+ 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6,
+ 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6,
+ 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d,
+ 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49,
+ 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a,
+ 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3,
+ 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad,
+ 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDGCLUT,
+ 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6,
+ 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6,
+ 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d,
+ 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49,
+ 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a,
+ 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3,
+ 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad,
+ 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDGCLUT,
+ 0xff, 0xfe, 0xfb, 0xf8, 0xf4, 0xf1, 0xed, 0xe6,
+ 0xe2, 0xde, 0xdb, 0xd6, 0xd3, 0xcf, 0xca, 0xc6,
+ 0xc2, 0xbe, 0xb9, 0xb0, 0xa7, 0x9e, 0x96, 0x8d,
+ 0x84, 0x7c, 0x74, 0x6b, 0x62, 0x5a, 0x51, 0x49,
+ 0x41, 0x39, 0x31, 0x29, 0x21, 0x19, 0x12, 0x0a,
+ 0x06, 0x05, 0x02, 0x01, 0x00, 0x00, 0xc9, 0xb3,
+ 0x08, 0x0e, 0xf2, 0xe1, 0x59, 0xf4, 0x22, 0xad,
+ 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETDGCLUT, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETTCON,
+ 0x70, 0x00, 0x04, 0xe0, 0x33, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETPANEL, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETPOWER2, 0x2b, 0x2b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP0,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x08,
+ 0x08, 0x03, 0x03, 0x22, 0x18, 0x07, 0x07, 0x07,
+ 0x07, 0x32, 0x10, 0x06, 0x00, 0x06, 0x32, 0x10,
+ 0x07, 0x00, 0x07, 0x32, 0x19, 0x31, 0x09, 0x31,
+ 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x09, 0x30, 0x00, 0x00, 0x00, 0x06, 0x0d, 0x00,
+ 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP0,
+ 0x00, 0x00, 0x19, 0x10, 0x00, 0x0a, 0x00, 0x81);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP1,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0xc0, 0xc0, 0x18, 0x18, 0x19, 0x19, 0x18, 0x18,
+ 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x3f, 0x3f,
+ 0x28, 0x28, 0x24, 0x24, 0x02, 0x03, 0x02, 0x03,
+ 0x00, 0x01, 0x00, 0x01, 0x31, 0x31, 0x31, 0x31,
+ 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP2,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x19, 0x19,
+ 0x40, 0x40, 0x18, 0x18, 0x18, 0x18, 0x3f, 0x3f,
+ 0x24, 0x24, 0x28, 0x28, 0x01, 0x00, 0x01, 0x00,
+ 0x03, 0x02, 0x03, 0x02, 0x31, 0x31, 0x31, 0x31,
+ 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP3,
+ 0xaa, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xea,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xea, 0xab, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xea, 0xab, 0xaa, 0xaa, 0xaa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP3,
+ 0xaa, 0x2e, 0x28, 0x00, 0x00, 0x00, 0xaa, 0x2e,
+ 0x28, 0x00, 0x00, 0x00, 0xaa, 0xee, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xee, 0xaa, 0xaa, 0xaa, 0xaa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP3,
+ 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaa, 0xff,
+ 0xff, 0xff, 0xff, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETGIP3,
+ 0xaa, 0xaa, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xea, 0xaa, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETTP1,
+ 0x0e, 0x0e, 0x1e, 0x65, 0x1c, 0x65, 0x00, 0x50,
+ 0x20, 0x20, 0x00, 0x00, 0x02, 0x02, 0x02, 0x05,
+ 0x14, 0x14, 0x32, 0xb9, 0x23, 0xb9, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETTP1,
+ 0x02, 0x00, 0xa8, 0x01, 0xa8, 0x0d, 0xa4, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETTP1,
+ 0x00, 0x00, 0x08, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x02, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETBANK, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_UNKNOWN1, 0xc3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETCLOCK, 0xd1, 0xd6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_UNKNOWN1, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_UNKNOWN1, 0xc6);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_SETPTBA, 0x37);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112A_UNKNOWN1, 0x3f);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 150);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
+
+ return dsi_ctx.accum_err;
}
static int hx83112a_disable(struct drm_panel *panel)
{
struct hx83112a_panel *ctx = to_hx83112a_panel(panel);
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(20);
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- return 0;
+ return dsi_ctx.accum_err;
}
static int hx83112a_prepare(struct drm_panel *panel)
{
struct hx83112a_panel *ctx = to_hx83112a_panel(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- if (ret < 0) {
- dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ if (ret < 0)
return ret;
- }
hx83112a_reset(ctx);
- ret = hx83112a_on(ctx);
+ ret = hx83112a_on(ctx->dsi);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- return ret;
}
- return 0;
+ return ret;
}
static int hx83112a_unprepare(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 4a6dcfd781e8..94b7dfef3b5e 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -318,7 +318,7 @@ static int ili9322_regmap_spi_read(void *context, const void *reg,
return spi_write_then_read(spi, buf, 1, val, 1);
}
-static struct regmap_bus ili9322_regmap_bus = {
+static const struct regmap_bus ili9322_regmap_bus = {
.write = ili9322_regmap_spi_write,
.read = ili9322_regmap_spi_read,
.reg_format_endian_default = REGMAP_ENDIAN_BIG,
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index 1fbc5d433d75..ff39f5dd4097 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -13,9 +13,6 @@
* Derived from drivers/drm/gpu/panel/panel-ilitek-ili9322.c
* the reuse of DBI abstraction part referred from Linus's patch
* "drm/panel: s6e63m0: Switch to DBI abstraction for SPI"
- *
- * For only-dbi part, copy from David's code (drm/tiny/ili9341.c)
- * Copyright 2018 David Lechner <david@lechnology.com>
*/
#include <linux/backlight.h>
@@ -486,176 +483,6 @@ static const struct drm_panel_funcs ili9341_dpi_funcs = {
.get_modes = ili9341_dpi_get_modes,
};
-static void ili9341_dbi_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
-{
- struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
- struct mipi_dbi *dbi = &dbidev->dbi;
- u8 addr_mode;
- int ret, idx;
-
- if (!drm_dev_enter(pipe->crtc.dev, &idx))
- return;
-
- ret = mipi_dbi_poweron_conditional_reset(dbidev);
- if (ret < 0)
- goto out_exit;
- if (ret == 1)
- goto out_enable;
-
- mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
-
- mipi_dbi_command(dbi, ILI9341_POWERB, 0x00, 0xc1, 0x30);
- mipi_dbi_command(dbi, ILI9341_POWER_SEQ, 0x64, 0x03, 0x12, 0x81);
- mipi_dbi_command(dbi, ILI9341_DTCA, 0x85, 0x00, 0x78);
- mipi_dbi_command(dbi, ILI9341_POWERA, 0x39, 0x2c, 0x00, 0x34, 0x02);
- mipi_dbi_command(dbi, ILI9341_PRC, ILI9341_DBI_PRC_NORMAL);
- mipi_dbi_command(dbi, ILI9341_DTCB, 0x00, 0x00);
-
- /* Power Control */
- mipi_dbi_command(dbi, ILI9341_POWER1, ILI9341_DBI_VCOMH_4P6V);
- mipi_dbi_command(dbi, ILI9341_POWER2, ILI9341_DBI_PWR_2_DEFAULT);
- /* VCOM */
- mipi_dbi_command(dbi, ILI9341_VCOM1, ILI9341_DBI_VCOM_1_VMH_4P25V,
- ILI9341_DBI_VCOM_1_VML_1P5V);
- mipi_dbi_command(dbi, ILI9341_VCOM2, ILI9341_DBI_VCOM_2_DEC_58);
-
- /* Memory Access Control */
- mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT,
- MIPI_DCS_PIXEL_FMT_16BIT);
-
- /* Frame Rate */
- mipi_dbi_command(dbi, ILI9341_FRC, ILI9341_DBI_FRC_DIVA & 0x03,
- ILI9341_DBI_FRC_RTNA & 0x1f);
-
- /* Gamma */
- mipi_dbi_command(dbi, ILI9341_3GAMMA_EN, 0x00);
- mipi_dbi_command(dbi, MIPI_DCS_SET_GAMMA_CURVE, ILI9341_GAMMA_CURVE_1);
- mipi_dbi_command(dbi, ILI9341_PGAMMA,
- 0x0f, 0x31, 0x2b, 0x0c, 0x0e, 0x08, 0x4e, 0xf1,
- 0x37, 0x07, 0x10, 0x03, 0x0e, 0x09, 0x00);
- mipi_dbi_command(dbi, ILI9341_NGAMMA,
- 0x00, 0x0e, 0x14, 0x03, 0x11, 0x07, 0x31, 0xc1,
- 0x48, 0x08, 0x0f, 0x0c, 0x31, 0x36, 0x0f);
-
- /* DDRAM */
- mipi_dbi_command(dbi, ILI9341_ETMOD, ILI9341_DBI_EMS_GAS |
- ILI9341_DBI_EMS_DTS |
- ILI9341_DBI_EMS_GON);
-
- /* Display */
- mipi_dbi_command(dbi, ILI9341_DFC, 0x08, 0x82, 0x27, 0x00);
- mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
- msleep(100);
-
- mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
- msleep(100);
-
-out_enable:
- switch (dbidev->rotation) {
- default:
- addr_mode = ILI9341_MADCTL_MX;
- break;
- case 90:
- addr_mode = ILI9341_MADCTL_MV;
- break;
- case 180:
- addr_mode = ILI9341_MADCTL_MY;
- break;
- case 270:
- addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY |
- ILI9341_MADCTL_MX;
- break;
- }
-
- addr_mode |= ILI9341_MADCTL_BGR;
- mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
- mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
- drm_info(&dbidev->drm, "Initialized display serial interface\n");
-out_exit:
- drm_dev_exit(idx);
-}
-
-static const struct drm_simple_display_pipe_funcs ili9341_dbi_funcs = {
- DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(ili9341_dbi_enable),
-};
-
-static const struct drm_display_mode ili9341_dbi_mode = {
- DRM_SIMPLE_MODE(240, 320, 37, 49),
-};
-
-DEFINE_DRM_GEM_DMA_FOPS(ili9341_dbi_fops);
-
-static struct drm_driver ili9341_dbi_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- .fops = &ili9341_dbi_fops,
- DRM_GEM_DMA_DRIVER_OPS_VMAP,
- .debugfs_init = mipi_dbi_debugfs_init,
- .name = "ili9341",
- .desc = "Ilitek ILI9341",
- .date = "20210716",
- .major = 1,
- .minor = 0,
-};
-
-static int ili9341_dbi_probe(struct spi_device *spi, struct gpio_desc *dc,
- struct gpio_desc *reset)
-{
- struct device *dev = &spi->dev;
- struct mipi_dbi_dev *dbidev;
- struct mipi_dbi *dbi;
- struct drm_device *drm;
- struct regulator *vcc;
- u32 rotation = 0;
- int ret;
-
- vcc = devm_regulator_get_optional(dev, "vcc");
- if (IS_ERR(vcc)) {
- dev_err(dev, "get optional vcc failed\n");
- vcc = NULL;
- }
-
- dbidev = devm_drm_dev_alloc(dev, &ili9341_dbi_driver,
- struct mipi_dbi_dev, drm);
- if (IS_ERR(dbidev))
- return PTR_ERR(dbidev);
-
- dbi = &dbidev->dbi;
- drm = &dbidev->drm;
- dbi->reset = reset;
- dbidev->regulator = vcc;
-
- drm_mode_config_init(drm);
-
- dbidev->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(dbidev->backlight))
- return PTR_ERR(dbidev->backlight);
-
- device_property_read_u32(dev, "rotation", &rotation);
-
- ret = mipi_dbi_spi_init(spi, dbi, dc);
- if (ret)
- return ret;
-
- ret = mipi_dbi_dev_init(dbidev, &ili9341_dbi_funcs,
- &ili9341_dbi_mode, rotation);
- if (ret)
- return ret;
-
- drm_mode_config_reset(drm);
-
- ret = drm_dev_register(drm, 0);
- if (ret)
- return ret;
-
- spi_set_drvdata(spi, drm);
-
- drm_fbdev_dma_setup(drm, 0);
-
- return 0;
-}
-
static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc,
struct gpio_desc *reset)
{
@@ -711,7 +538,6 @@ static int ili9341_probe(struct spi_device *spi)
struct device *dev = &spi->dev;
struct gpio_desc *dc;
struct gpio_desc *reset;
- const struct spi_device_id *id = spi_get_device_id(spi);
reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(reset))
@@ -721,36 +547,15 @@ static int ili9341_probe(struct spi_device *spi)
if (IS_ERR(dc))
return dev_err_probe(dev, PTR_ERR(dc), "Failed to get gpio 'dc'\n");
- if (!strcmp(id->name, "sf-tc240t-9370-t"))
- return ili9341_dpi_probe(spi, dc, reset);
-
- if (!strcmp(id->name, "yx240qv29"))
- return ili9341_dbi_probe(spi, dc, reset);
-
- return -ENODEV;
+ return ili9341_dpi_probe(spi, dc, reset);
}
static void ili9341_remove(struct spi_device *spi)
{
- const struct spi_device_id *id = spi_get_device_id(spi);
struct ili9341 *ili = spi_get_drvdata(spi);
- struct drm_device *drm = spi_get_drvdata(spi);
-
- if (!strcmp(id->name, "sf-tc240t-9370-t")) {
- ili9341_dpi_power_off(ili);
- drm_panel_remove(&ili->panel);
- } else if (!strcmp(id->name, "yx240qv29")) {
- drm_dev_unplug(drm);
- drm_atomic_helper_shutdown(drm);
- }
-}
-static void ili9341_shutdown(struct spi_device *spi)
-{
- const struct spi_device_id *id = spi_get_device_id(spi);
-
- if (!strcmp(id->name, "yx240qv29"))
- drm_atomic_helper_shutdown(spi_get_drvdata(spi));
+ ili9341_dpi_power_off(ili);
+ drm_panel_remove(&ili->panel);
}
static const struct of_device_id ili9341_of_match[] = {
@@ -758,19 +563,11 @@ static const struct of_device_id ili9341_of_match[] = {
.compatible = "st,sf-tc240t-9370-t",
.data = &ili9341_stm32f429_disco_data,
},
- {
- /* porting from tiny/ili9341.c
- * for original mipi dbi compitable
- */
- .compatible = "adafruit,yx240qv29",
- .data = NULL,
- },
{ }
};
MODULE_DEVICE_TABLE(of, ili9341_of_match);
static const struct spi_device_id ili9341_id[] = {
- { "yx240qv29", 0 },
{ "sf-tc240t-9370-t", 0 },
{ }
};
@@ -779,7 +576,6 @@ MODULE_DEVICE_TABLE(spi, ili9341_id);
static struct spi_driver ili9341_driver = {
.probe = ili9341_probe,
.remove = ili9341_remove,
- .shutdown = ili9341_shutdown,
.id_table = ili9341_id,
.driver = {
.name = "panel-ilitek-ili9341",
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 084c37fa7348..28cd7560e5db 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -42,6 +42,7 @@ struct ili9881c_desc {
const size_t init_length;
const struct drm_display_mode *mode;
const unsigned long mode_flags;
+ u8 default_address_mode;
};
struct ili9881c {
@@ -53,6 +54,7 @@ struct ili9881c {
struct gpio_desc *reset;
enum drm_panel_orientation orientation;
+ u8 address_mode;
};
#define ILI9881C_SWITCH_PAGE_INSTR(_page) \
@@ -815,8 +817,6 @@ static const struct ili9881c_instr tl050hdv35_init[] = {
ILI9881C_COMMAND_INSTR(0xd1, 0x4b),
ILI9881C_COMMAND_INSTR(0xd2, 0x60),
ILI9881C_COMMAND_INSTR(0xd3, 0x39),
- ILI9881C_SWITCH_PAGE_INSTR(0),
- ILI9881C_COMMAND_INSTR(0x36, 0x03),
};
static const struct ili9881c_instr w552946ab_init[] = {
@@ -1299,6 +1299,14 @@ static int ili9881c_prepare(struct drm_panel *panel)
if (ret)
return ret;
+ if (ctx->address_mode) {
+ ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_ADDRESS_MODE,
+ &ctx->address_mode,
+ sizeof(ctx->address_mode));
+ if (ret < 0)
+ return ret;
+ }
+
ret = mipi_dsi_dcs_set_tear_on(ctx->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (ret)
return ret;
@@ -1463,6 +1471,10 @@ static int ili9881c_get_modes(struct drm_panel *panel,
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
+ if (ctx->address_mode == 0x3)
+ connector->display_info.subpixel_order = SubPixelHorizontalBGR;
+ else
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
/*
* TODO: Remove once all drm drivers call
@@ -1521,6 +1533,12 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
return ret;
}
+ ctx->address_mode = ctx->desc->default_address_mode;
+ if (ctx->orientation == DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP) {
+ ctx->address_mode ^= 0x03;
+ ctx->orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
+ }
+
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
@@ -1572,6 +1590,7 @@ static const struct ili9881c_desc tl050hdv35_desc = {
.mode = &tl050hdv35_default_mode,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM,
+ .default_address_mode = 0x03,
};
static const struct ili9881c_desc w552946aba_desc = {
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index 44897e5218a6..45d09e6fa667 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -26,7 +26,6 @@ struct jadard_panel_desc {
unsigned int lanes;
enum mipi_dsi_pixel_format format;
int (*init)(struct jadard *jadard);
- u32 num_init_cmds;
bool lp11_before_reset;
bool reset_before_power_off_vcioo;
unsigned int vcioo_to_lp11_delay_ms;
diff --git a/drivers/gpu/drm/panel/panel-khadas-ts050.c b/drivers/gpu/drm/panel/panel-khadas-ts050.c
index 14932cb3defc..0e5e8e57bd1e 100644
--- a/drivers/gpu/drm/panel/panel-khadas-ts050.c
+++ b/drivers/gpu/drm/panel/panel-khadas-ts050.c
@@ -617,12 +617,12 @@ static const struct khadas_ts050_panel_cmd ts050_init_code[] = {
{0xd4, {0x04}, 0x01}, /* RGBMIPICTRL: VSYNC front porch = 4 */
};
-struct khadas_ts050_panel_data ts050_panel_data = {
+static struct khadas_ts050_panel_data ts050_panel_data = {
.init_code = (struct khadas_ts050_panel_cmd *)ts050_init_code,
.len = ARRAY_SIZE(ts050_init_code)
};
-struct khadas_ts050_panel_data ts050v2_panel_data = {
+static struct khadas_ts050_panel_data ts050v2_panel_data = {
.init_code = (struct khadas_ts050_panel_cmd *)ts050v2_init_code,
.len = ARRAY_SIZE(ts050v2_init_code)
};
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index 292aa26a456d..77f74e6c467e 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -26,7 +26,7 @@ struct ltk050h3146w;
struct ltk050h3146w_desc {
const unsigned long mode_flags;
const struct drm_display_mode *mode;
- int (*init)(struct ltk050h3146w *ctx);
+ void (*init)(struct mipi_dsi_multi_context *dsi_ctx);
};
struct ltk050h3146w {
@@ -243,67 +243,57 @@ struct ltk050h3146w *panel_to_ltk050h3146w(struct drm_panel *panel)
return container_of(panel, struct ltk050h3146w, panel);
}
-static int ltk050h3148w_init_sequence(struct ltk050h3146w *ctx)
+static void ltk050h3148w_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
-
/*
* Init sequence was supplied by the panel vendor without much
* documentation.
*/
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0xff, 0x83, 0x94);
- mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x50, 0x15, 0x75, 0x09, 0x32, 0x44,
- 0x71, 0x31, 0x55, 0x2f);
- mipi_dsi_dcs_write_seq(dsi, 0xba, 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
- mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x88);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x80, 0x64, 0x10, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x05, 0x70, 0x05, 0x70, 0x01, 0x70,
- 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f, 0x01, 0x74,
- 0x01, 0x74, 0x01, 0x74, 0x01, 0x0c, 0x86);
- mipi_dsi_dcs_write_seq(dsi, 0xd3, 0x00, 0x00, 0x07, 0x07, 0x40, 0x1e,
- 0x08, 0x00, 0x32, 0x10, 0x08, 0x00, 0x08, 0x54,
- 0x15, 0x10, 0x05, 0x04, 0x02, 0x12, 0x10, 0x05,
- 0x07, 0x33, 0x34, 0x0c, 0x0c, 0x37, 0x10, 0x07,
- 0x17, 0x11, 0x40);
- mipi_dsi_dcs_write_seq(dsi, 0xd5, 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b,
- 0x1a, 0x1a, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01,
- 0x02, 0x03, 0x20, 0x21, 0x18, 0x18, 0x22, 0x23,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
- mipi_dsi_dcs_write_seq(dsi, 0xd6, 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b,
- 0x1a, 0x1a, 0x03, 0x02, 0x01, 0x00, 0x07, 0x06,
- 0x05, 0x04, 0x23, 0x22, 0x18, 0x18, 0x21, 0x20,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
- 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
- mipi_dsi_dcs_write_seq(dsi, 0xe0, 0x00, 0x03, 0x09, 0x11, 0x11, 0x14,
- 0x18, 0x16, 0x2e, 0x3d, 0x4d, 0x4d, 0x58, 0x6c,
- 0x72, 0x78, 0x88, 0x8b, 0x86, 0xa4, 0xb2, 0x58,
- 0x55, 0x59, 0x5b, 0x5d, 0x60, 0x64, 0x7f, 0x00,
- 0x03, 0x09, 0x0f, 0x11, 0x14, 0x18, 0x16, 0x2e,
- 0x3d, 0x4d, 0x4d, 0x58, 0x6d, 0x73, 0x78, 0x88,
- 0x8b, 0x87, 0xa5, 0xb2, 0x58, 0x55, 0x58, 0x5b,
- 0x5d, 0x61, 0x65, 0x7f);
- mipi_dsi_dcs_write_seq(dsi, 0xcc, 0x0b);
- mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x1f, 0x31);
- mipi_dsi_dcs_write_seq(dsi, 0xb6, 0xc4, 0xc4);
- mipi_dsi_dcs_write_seq(dsi, 0xbd, 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xbd, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc6, 0xef);
- mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x02);
-
- ret = mipi_dsi_dcs_set_tear_on(dsi, 1);
- if (ret < 0) {
- dev_err(ctx->dev, "failed to set tear on: %d\n", ret);
- return ret;
- }
-
- msleep(60);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb9, 0xff, 0x83, 0x94);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb1, 0x50, 0x15, 0x75, 0x09, 0x32, 0x44,
+ 0x71, 0x31, 0x55, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xba, 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd2, 0x88);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb2, 0x00, 0x80, 0x64, 0x10, 0x07);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb4, 0x05, 0x70, 0x05, 0x70, 0x01, 0x70,
+ 0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f, 0x01, 0x74,
+ 0x01, 0x74, 0x01, 0x74, 0x01, 0x0c, 0x86);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd3, 0x00, 0x00, 0x07, 0x07, 0x40, 0x1e,
+ 0x08, 0x00, 0x32, 0x10, 0x08, 0x00, 0x08, 0x54,
+ 0x15, 0x10, 0x05, 0x04, 0x02, 0x12, 0x10, 0x05,
+ 0x07, 0x33, 0x34, 0x0c, 0x0c, 0x37, 0x10, 0x07,
+ 0x17, 0x11, 0x40);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd5, 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b,
+ 0x1a, 0x1a, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01,
+ 0x02, 0x03, 0x20, 0x21, 0x18, 0x18, 0x22, 0x23,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd6, 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b,
+ 0x1a, 0x1a, 0x03, 0x02, 0x01, 0x00, 0x07, 0x06,
+ 0x05, 0x04, 0x23, 0x22, 0x18, 0x18, 0x21, 0x20,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xe0, 0x00, 0x03, 0x09, 0x11, 0x11, 0x14,
+ 0x18, 0x16, 0x2e, 0x3d, 0x4d, 0x4d, 0x58, 0x6c,
+ 0x72, 0x78, 0x88, 0x8b, 0x86, 0xa4, 0xb2, 0x58,
+ 0x55, 0x59, 0x5b, 0x5d, 0x60, 0x64, 0x7f, 0x00,
+ 0x03, 0x09, 0x0f, 0x11, 0x14, 0x18, 0x16, 0x2e,
+ 0x3d, 0x4d, 0x4d, 0x58, 0x6d, 0x73, 0x78, 0x88,
+ 0x8b, 0x87, 0xa5, 0xb2, 0x58, 0x55, 0x58, 0x5b,
+ 0x5d, 0x61, 0x65, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xcc, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc0, 0x1f, 0x31);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb6, 0xc4, 0xc4);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbd, 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb1, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbd, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc6, 0xef);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd4, 0x02);
+
+ mipi_dsi_dcs_set_tear_on_multi(dsi_ctx, 1);
+ mipi_dsi_msleep(dsi_ctx, 60);
}
static const struct drm_display_mode ltk050h3148w_mode = {
@@ -327,74 +317,64 @@ static const struct ltk050h3146w_desc ltk050h3148w_data = {
MIPI_DSI_MODE_VIDEO_BURST,
};
-static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
+static void ltk050h3146w_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
-
/*
* Init sequence was supplied by the panel vendor without much
* documentation.
*/
- mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x93, 0x65, 0xf8);
- mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06,
- 0x01);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0xb5);
- mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x00, 0xb5);
- mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00);
-
- mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xc4, 0x23, 0x07);
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f,
- 0x28, 0x04, 0xcc, 0xcc, 0xcc);
- mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x0f, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xbe, 0x1e, 0xf2);
- mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x26, 0x03);
- mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x12);
- mipi_dsi_dcs_write_seq(dsi, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80,
- 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f,
- 0x16, 0x00, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50,
- 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f,
- 0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67,
- 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55,
- 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a,
- 0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
- mipi_dsi_dcs_write_seq(dsi, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b,
- 0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
- mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05,
- 0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
- mipi_dsi_dcs_write_seq(dsi, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04,
- 0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
- mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20,
- 0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03,
- 0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08);
- mipi_dsi_dcs_write_seq(dsi, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05,
- 0x21, 0x00, 0x60);
- mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x2c, 0xa3, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xde, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x32, 0x1c);
- mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x3b, 0x70, 0x00, 0x04);
- mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37);
- mipi_dsi_dcs_write_seq(dsi, 0xc2, 0x20, 0x38, 0x1e, 0x84);
- mipi_dsi_dcs_write_seq(dsi, 0xde, 0x00);
-
- ret = mipi_dsi_dcs_set_tear_on(dsi, 1);
- if (ret < 0) {
- dev_err(ctx->dev, "failed to set tear on: %d\n", ret);
- return ret;
- }
-
- msleep(60);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xdf, 0x93, 0x65, 0xf8);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06,
+ 0x01);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb2, 0x00, 0xb5);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb3, 0x00, 0xb5);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00);
+
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb9, 0x00, 0xc4, 0x23, 0x07);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f,
+ 0x28, 0x04, 0xcc, 0xcc, 0xcc);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x0f, 0x04);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbe, 0x1e, 0xf2);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc0, 0x26, 0x03);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc1, 0x00, 0x12);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80,
+ 0x80);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f,
+ 0x16, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50,
+ 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f,
+ 0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67,
+ 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55,
+ 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a,
+ 0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b,
+ 0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05,
+ 0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04,
+ 0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20,
+ 0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03,
+ 0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05,
+ 0x21, 0x00, 0x60);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xdd, 0x2c, 0xa3, 0x00);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xde, 0x02);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb2, 0x32, 0x1c);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb7, 0x3b, 0x70, 0x00, 0x04);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc1, 0x11);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc2, 0x20, 0x38, 0x1e, 0x84);
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xde, 0x00);
+
+ mipi_dsi_dcs_set_tear_on_multi(dsi_ctx, 1);
+ mipi_dsi_msleep(dsi_ctx, 60);
}
static const struct drm_display_mode ltk050h3146w_mode = {
@@ -418,79 +398,42 @@ static const struct ltk050h3146w_desc ltk050h3146w_data = {
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
};
-static int ltk050h3146w_a2_select_page(struct ltk050h3146w *ctx, int page)
+static void ltk050h3146w_a2_select_page(struct mipi_dsi_multi_context *dsi_ctx, int page)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- u8 d[3] = { 0x98, 0x81, page };
+ u8 d[4] = { 0xff, 0x98, 0x81, page };
- return mipi_dsi_dcs_write(dsi, 0xff, d, ARRAY_SIZE(d));
+ mipi_dsi_dcs_write_buffer_multi(dsi_ctx, d, ARRAY_SIZE(d));
}
-static int ltk050h3146w_a2_write_page(struct ltk050h3146w *ctx, int page,
+static void ltk050h3146w_a2_write_page(struct mipi_dsi_multi_context *dsi_ctx, int page,
const struct ltk050h3146w_cmd *cmds,
int num)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int i, ret;
+ ltk050h3146w_a2_select_page(dsi_ctx, page);
- ret = ltk050h3146w_a2_select_page(ctx, page);
- if (ret < 0) {
- dev_err(ctx->dev, "failed to select page %d: %d\n", page, ret);
- return ret;
- }
-
- for (i = 0; i < num; i++) {
- ret = mipi_dsi_generic_write(dsi, &cmds[i],
+ for (int i = 0; i < num; i++)
+ mipi_dsi_generic_write_multi(dsi_ctx, &cmds[i],
sizeof(struct ltk050h3146w_cmd));
- if (ret < 0) {
- dev_err(ctx->dev, "failed to write page %d init cmds: %d\n", page, ret);
- return ret;
- }
- }
-
- return 0;
}
-static int ltk050h3146w_a2_init_sequence(struct ltk050h3146w *ctx)
+static void ltk050h3146w_a2_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
-
/*
* Init sequence was supplied by the panel vendor without much
* documentation.
*/
- ret = ltk050h3146w_a2_write_page(ctx, 3, page3_cmds,
+ ltk050h3146w_a2_write_page(dsi_ctx, 3, page3_cmds,
ARRAY_SIZE(page3_cmds));
- if (ret < 0)
- return ret;
-
- ret = ltk050h3146w_a2_write_page(ctx, 4, page4_cmds,
+ ltk050h3146w_a2_write_page(dsi_ctx, 4, page4_cmds,
ARRAY_SIZE(page4_cmds));
- if (ret < 0)
- return ret;
-
- ret = ltk050h3146w_a2_write_page(ctx, 1, page1_cmds,
+ ltk050h3146w_a2_write_page(dsi_ctx, 1, page1_cmds,
ARRAY_SIZE(page1_cmds));
- if (ret < 0)
- return ret;
-
- ret = ltk050h3146w_a2_select_page(ctx, 0);
- if (ret < 0) {
- dev_err(ctx->dev, "failed to select page 0: %d\n", ret);
- return ret;
- }
+ ltk050h3146w_a2_select_page(dsi_ctx, 0);
/* vendor code called this without param, where there should be one */
- ret = mipi_dsi_dcs_set_tear_on(dsi, 0);
- if (ret < 0) {
- dev_err(ctx->dev, "failed to set tear on: %d\n", ret);
- return ret;
- }
-
- msleep(60);
+ mipi_dsi_dcs_set_tear_on_multi(dsi_ctx, 0);
- return 0;
+ mipi_dsi_msleep(dsi_ctx, 60);
}
static const struct drm_display_mode ltk050h3146w_a2_mode = {
@@ -518,19 +461,12 @@ static int ltk050h3146w_unprepare(struct drm_panel *panel)
{
struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(ctx->dev, "failed to set display off: %d\n", ret);
- return ret;
- }
-
- mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(ctx->dev, "failed to enter sleep mode: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ if (dsi_ctx.accum_err)
+ return dsi_ctx.accum_err;
regulator_disable(ctx->iovcc);
regulator_disable(ctx->vci);
@@ -542,17 +478,17 @@ static int ltk050h3146w_prepare(struct drm_panel *panel)
{
struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dev_dbg(ctx->dev, "Resetting the panel\n");
- ret = regulator_enable(ctx->vci);
- if (ret < 0) {
- dev_err(ctx->dev, "Failed to enable vci supply: %d\n", ret);
- return ret;
+ dsi_ctx.accum_err = regulator_enable(ctx->vci);
+ if (dsi_ctx.accum_err) {
+ dev_err(ctx->dev, "Failed to enable vci supply: %d\n", dsi_ctx.accum_err);
+ return dsi_ctx.accum_err;
}
- ret = regulator_enable(ctx->iovcc);
- if (ret < 0) {
- dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
+ dsi_ctx.accum_err = regulator_enable(ctx->iovcc);
+ if (dsi_ctx.accum_err) {
+ dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", dsi_ctx.accum_err);
goto disable_vci;
}
@@ -561,28 +497,15 @@ static int ltk050h3146w_prepare(struct drm_panel *panel)
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
msleep(20);
- ret = ctx->panel_desc->init(ctx);
- if (ret < 0) {
- dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
- goto disable_iovcc;
- }
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
- goto disable_iovcc;
- }
-
+ ctx->panel_desc->init(&dsi_ctx);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
/* T9: 120ms */
- msleep(120);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(ctx->dev, "Failed to set display on: %d\n", ret);
+ if (dsi_ctx.accum_err)
goto disable_iovcc;
- }
-
- msleep(50);
return 0;
@@ -590,7 +513,7 @@ disable_iovcc:
regulator_disable(ctx->iovcc);
disable_vci:
regulator_disable(ctx->vci);
- return ret;
+ return dsi_ctx.accum_err;
}
static int ltk050h3146w_get_modes(struct drm_panel *panel,
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 1b8e3156914c..ba6c015aabba 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -246,7 +246,7 @@ MODULE_DEVICE_TABLE(of, panel_lvds_of_table);
static struct platform_driver panel_lvds_driver = {
.probe = panel_lvds_probe,
- .remove_new = panel_lvds_remove,
+ .remove = panel_lvds_remove,
.driver = {
.name = "panel-lvds",
.of_match_table = panel_lvds_of_table,
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
index d3baccfe6286..06e16a7c14a7 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
@@ -917,7 +917,7 @@ static const struct nv3052c_panel_info wl_355608_a8_panel_info = {
static const struct spi_device_id nv3052c_ids[] = {
{ "ltk035c5444t", },
{ "fs035vg158", },
- { "wl-355608-a8", },
+ { "rg35xx-plus-panel", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(spi, nv3052c_ids);
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index 57686340de49..549b86f2cc28 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -38,6 +38,7 @@
#define NT35510_CMD_CORRECT_GAMMA BIT(0)
#define NT35510_CMD_CONTROL_DISPLAY BIT(1)
+#define NT35510_CMD_SETVCMOFF BIT(2)
#define MCS_CMD_MAUCCTR 0xF0 /* Manufacturer command enable */
#define MCS_CMD_READ_ID1 0xDA
@@ -721,11 +722,13 @@ static int nt35510_setup_power(struct nt35510 *nt)
if (ret)
return ret;
- ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVCMOFF,
- NT35510_P1_VCMOFF_LEN,
- nt->conf->vcmoff);
- if (ret)
- return ret;
+ if (nt->conf->cmds & NT35510_CMD_SETVCMOFF) {
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVCMOFF,
+ NT35510_P1_VCMOFF_LEN,
+ nt->conf->vcmoff);
+ if (ret)
+ return ret;
+ }
/* Typically 10 ms */
usleep_range(10000, 20000);
@@ -1319,7 +1322,7 @@ static const struct nt35510_config nt35510_frida_frd400b25025 = {
},
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM,
- .cmds = NT35510_CMD_CONTROL_DISPLAY,
+ .cmds = NT35510_CMD_CONTROL_DISPLAY | NT35510_CMD_SETVCMOFF,
/* 0x03: AVDD = 6.2V */
.avdd = { 0x03, 0x03, 0x03 },
/* 0x46: PCK = 2 x Hsync, BTP = 2.5 x VDDB */
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index b036208f9356..08b22b592ab0 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -481,9 +481,9 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, -EPROBE_DEFER, "Cannot get secondary DSI host\n");
nt->dsi[1] = mipi_dsi_device_register_full(dsi_r_host, info);
- if (!nt->dsi[1]) {
+ if (IS_ERR(nt->dsi[1])) {
dev_err(dev, "Cannot get secondary DSI node\n");
- return -ENODEV;
+ return PTR_ERR(nt->dsi[1]);
}
num_dsis++;
}
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index 18bd2ee71201..04f1d2676c78 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -1095,18 +1095,6 @@ static int nt36523_unprepare(struct drm_panel *panel)
static void nt36523_remove(struct mipi_dsi_device *dsi)
{
struct panel_info *pinfo = mipi_dsi_get_drvdata(dsi);
- int ret;
-
- ret = mipi_dsi_detach(pinfo->dsi[0]);
- if (ret < 0)
- dev_err(&dsi->dev, "failed to detach from DSI0 host: %d\n", ret);
-
- if (pinfo->desc->is_dual_dsi) {
- ret = mipi_dsi_detach(pinfo->dsi[1]);
- if (ret < 0)
- dev_err(&pinfo->dsi[1]->dev, "failed to detach from DSI1 host: %d\n", ret);
- mipi_dsi_device_unregister(pinfo->dsi[1]);
- }
drm_panel_remove(&pinfo->panel);
}
@@ -1251,7 +1239,7 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
if (!dsi1_host)
return dev_err_probe(dev, -EPROBE_DEFER, "cannot get secondary DSI host\n");
- pinfo->dsi[1] = mipi_dsi_device_register_full(dsi1_host, info);
+ pinfo->dsi[1] = devm_mipi_dsi_device_register_full(dev, dsi1_host, info);
if (IS_ERR(pinfo->dsi[1])) {
dev_err(dev, "cannot get secondary DSI device\n");
return PTR_ERR(pinfo->dsi[1]);
@@ -1288,7 +1276,7 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
pinfo->dsi[i]->format = pinfo->desc->format;
pinfo->dsi[i]->mode_flags = pinfo->desc->mode_flags;
- ret = mipi_dsi_attach(pinfo->dsi[i]);
+ ret = devm_mipi_dsi_attach(dev, pinfo->dsi[i]);
if (ret < 0)
return dev_err_probe(dev, ret, "cannot attach to DSI%d host.\n", i);
}
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index a9b5dad70bc1..87bbb25d119a 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -9,6 +9,7 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 4618c892cdd6..e10e469aa7a6 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -400,7 +400,7 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c)
rpi_touchscreen_i2c_write(ts, REG_POWERON, 0);
/* Look up the DSI host. It needs to probe before we do. */
- endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
if (!endpoint)
return -ENODEV;
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm69380.c b/drivers/gpu/drm/panel/panel-raydium-rm69380.c
index 4dca6802faef..d3071c01aaea 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm69380.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm69380.c
@@ -46,108 +46,73 @@ static void rm69380_reset(struct rm69380_panel *ctx)
static int rm69380_on(struct rm69380_panel *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi[0];
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
if (ctx->dsi[1])
ctx->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM;
- mipi_dsi_dcs_write_seq(dsi, 0xfe, 0xd4);
- mipi_dsi_dcs_write_seq(dsi, 0x00, 0x80);
- mipi_dsi_dcs_write_seq(dsi, 0xfe, 0xd0);
- mipi_dsi_dcs_write_seq(dsi, 0x48, 0x00);
- mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x26);
- mipi_dsi_dcs_write_seq(dsi, 0x75, 0x3f);
- mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x1a);
- mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x28);
- mipi_dsi_dcs_write_seq(dsi, 0xc2, 0x08);
-
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear on: %d\n", ret);
- return ret;
- }
-
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(20);
-
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
- msleep(36);
-
- return 0;
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0xd4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0xd0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x26);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0x3f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfe, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc2, 0x08);
+
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 36);
+
+ return dsi_ctx.accum_err;
}
-static int rm69380_off(struct rm69380_panel *ctx)
+static void rm69380_off(struct rm69380_panel *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi[0];
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
if (ctx->dsi[1])
ctx->dsi[1]->mode_flags &= ~MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
- msleep(35);
-
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(20);
-
- return 0;
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 35);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
}
static int rm69380_prepare(struct drm_panel *panel)
{
struct rm69380_panel *ctx = to_rm69380_panel(panel);
- struct device *dev = &ctx->dsi[0]->dev;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- if (ret < 0) {
- dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ if (ret < 0)
return ret;
- }
rm69380_reset(ctx);
ret = rm69380_on(ctx);
if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- return ret;
}
- return 0;
+ return ret;
}
static int rm69380_unprepare(struct drm_panel *panel)
{
struct rm69380_panel *ctx = to_rm69380_panel(panel);
- struct device *dev = &ctx->dsi[0]->dev;
- int ret;
- ret = rm69380_off(ctx);
- if (ret < 0)
- dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+ rm69380_off(ctx);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
diff --git a/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c b/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c
new file mode 100644
index 000000000000..cf6186312252
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+/* Manufacturer Command Set */
+#define MCS_ACCESS_PROT_OFF 0xb0
+#define MCS_PASSWD 0xf0
+
+struct ams581vf01 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data ams581vf01_supplies[] = {
+ { .supply = "vdd3p3" },
+ { .supply = "vddio" },
+ { .supply = "vsn" },
+ { .supply = "vsp" },
+};
+
+static inline struct ams581vf01 *to_ams581vf01(struct drm_panel *panel)
+{
+ return container_of(panel, struct ams581vf01, panel);
+}
+
+static void ams581vf01_reset(struct ams581vf01 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+}
+
+static int ams581vf01_on(struct ams581vf01 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ /* Sleep Out, Wait 10ms */
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ usleep_range(10000, 11000);
+
+ /* TE On */
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+
+ /* MIC Setting */
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0x5a, 0x5a); /* Unlock */
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xeb, 0x17,
+ 0x41, 0x92,
+ 0x0e, 0x10,
+ 0x82, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0xa5, 0xa5); /* Lock */
+
+ /* Column & Page Address Setting */
+ mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0x0000, 0x0437);
+ mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0x0000, 0x0923);
+
+ /* Brightness Setting */
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+
+ /* Horizontal & Vertical sync Setting */
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0x5a, 0x5a); /* Unlock */
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe8, 0x11, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0xa5, 0xa5); /* Lock */
+ mipi_dsi_msleep(&dsi_ctx, 110);
+
+ /* Display On */
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ return dsi_ctx.accum_err;
+}
+
+static void ams581vf01_off(struct ams581vf01 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ /* Display Off & Sleep In */
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+
+ /* VCI operating mode change */
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0x5a, 0x5a); /* Unlock */
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf4, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD, 0xa5, 0xa5); /* Lock */
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+}
+
+static int ams581vf01_prepare(struct drm_panel *panel)
+{
+ struct ams581vf01 *ctx = to_ams581vf01(panel);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ams581vf01_supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ams581vf01_reset(ctx);
+
+ ret = ams581vf01_on(ctx);
+ if (ret < 0) {
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ams581vf01_supplies),
+ ctx->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ams581vf01_unprepare(struct drm_panel *panel)
+{
+ struct ams581vf01 *ctx = to_ams581vf01(panel);
+
+ ams581vf01_off(ctx);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ams581vf01_supplies),
+ ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode ams581vf01_mode = {
+ .clock = (1080 + 32 + 73 + 98) * (2340 + 8 + 1 + 8) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 32,
+ .hsync_end = 1080 + 32 + 73,
+ .htotal = 1080 + 32 + 73 + 98,
+ .vdisplay = 2340,
+ .vsync_start = 2340 + 8,
+ .vsync_end = 2340 + 8 + 1,
+ .vtotal = 2340 + 8 + 1 + 8,
+ .width_mm = 62,
+ .height_mm = 134,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int ams581vf01_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &ams581vf01_mode);
+}
+
+static const struct drm_panel_funcs ams581vf01_panel_funcs = {
+ .prepare = ams581vf01_prepare,
+ .unprepare = ams581vf01_unprepare,
+ .get_modes = ams581vf01_get_modes,
+};
+
+static int ams581vf01_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static const struct backlight_ops ams581vf01_bl_ops = {
+ .update_status = ams581vf01_bl_update_status,
+};
+
+static struct backlight_device *
+ams581vf01_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 511,
+ .max_brightness = 1023,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &ams581vf01_bl_ops, &props);
+}
+
+static int ams581vf01_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct ams581vf01 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ret = devm_regulator_bulk_get_const(&dsi->dev,
+ ARRAY_SIZE(ams581vf01_supplies),
+ ams581vf01_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ drm_panel_init(&ctx->panel, dev, &ams581vf01_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
+
+ ctx->panel.backlight = ams581vf01_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void ams581vf01_remove(struct mipi_dsi_device *dsi)
+{
+ struct ams581vf01 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id ams581vf01_of_match[] = {
+ { .compatible = "samsung,ams581vf01" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ams581vf01_of_match);
+
+static struct mipi_dsi_driver ams581vf01_driver = {
+ .probe = ams581vf01_probe,
+ .remove = ams581vf01_remove,
+ .driver = {
+ .name = "panel-samsung-ams581vf01",
+ .of_match_table = ams581vf01_of_match,
+ },
+};
+module_mipi_dsi_driver(ams581vf01_driver);
+
+MODULE_AUTHOR("Danila Tikhonov <danila@jiaxyga.com>");
+MODULE_DESCRIPTION("DRM driver for SAMSUNG AMS581VF01 cmd mode dsi panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c b/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c
new file mode 100644
index 000000000000..817365cb5e46
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+/* Manufacturer Command Set */
+#define MCS_ACCESS_PROT_OFF 0xb0
+#define MCS_UNKNOWN_B7 0xb7
+#define MCS_BIAS_CURRENT_CTRL 0xd1
+#define MCS_PASSWD1 0xf0
+#define MCS_PASSWD2 0xfc
+#define MCS_UNKNOWN_FF 0xff
+
+struct ams639rq08 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data ams639rq08_supplies[] = {
+ { .supply = "vdd3p3" },
+ { .supply = "vddio" },
+ { .supply = "vsn" },
+ { .supply = "vsp" },
+};
+
+static inline struct ams639rq08 *to_ams639rq08(struct drm_panel *panel)
+{
+ return container_of(panel, struct ams639rq08, panel);
+}
+
+static void ams639rq08_reset(struct ams639rq08 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+}
+
+static int ams639rq08_on(struct ams639rq08 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ /* Delay 2ms for VCI1 power */
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD1, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD2, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_UNKNOWN_FF, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_BIAS_CURRENT_CTRL, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD1, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD2, 0xa5, 0xa5);
+
+ /* Sleep Out */
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ usleep_range(10000, 11000);
+
+ /* TE OUT (Vsync On) */
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD1, 0x5a, 0x5a);
+
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+
+ /* DBV Smooth Transition */
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_UNKNOWN_B7, 0x01, 0x4b);
+
+ /* Edge Dimming Speed Setting */
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_UNKNOWN_B7, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD1, 0xa5, 0xa5);
+
+ /* Page Address Set */
+ mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0x0000, 0x0923);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD1, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD2, 0x5a, 0x5a);
+
+ /* Set DDIC internal HFP */
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x23);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_BIAS_CURRENT_CTRL, 0x11);
+
+ /* OFC Setting 84.1 Mhz */
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe9, 0x11, 0x55,
+ 0xa6, 0x75, 0xa3,
+ 0xb9, 0xa1, 0x4a,
+ 0x00, 0x1a, 0xb8);
+
+ /* Err_FG Setting */
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1,
+ 0x00, 0x00, 0x02,
+ 0x02, 0x42, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2,
+ 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ACCESS_PROT_OFF, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x19);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD1, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_PASSWD2, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+
+ /* Brightness Control */
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x0000);
+
+ /* Display On */
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_msleep(&dsi_ctx, 67);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ return dsi_ctx.accum_err;
+}
+
+static void ams639rq08_off(struct ams639rq08 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+}
+
+static int ams639rq08_prepare(struct drm_panel *panel)
+{
+ struct ams639rq08 *ctx = to_ams639rq08(panel);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ams639rq08_supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ams639rq08_reset(ctx);
+
+ ret = ams639rq08_on(ctx);
+ if (ret < 0) {
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ams639rq08_supplies),
+ ctx->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ams639rq08_unprepare(struct drm_panel *panel)
+{
+ struct ams639rq08 *ctx = to_ams639rq08(panel);
+
+ ams639rq08_off(ctx);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ams639rq08_supplies),
+ ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode ams639rq08_mode = {
+ .clock = (1080 + 64 + 20 + 64) * (2340 + 64 + 20 + 64) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 64,
+ .hsync_end = 1080 + 64 + 20,
+ .htotal = 1080 + 64 + 20 + 64,
+ .vdisplay = 2340,
+ .vsync_start = 2340 + 64,
+ .vsync_end = 2340 + 64 + 20,
+ .vtotal = 2340 + 64 + 20 + 64,
+ .width_mm = 68,
+ .height_mm = 147,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int ams639rq08_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &ams639rq08_mode);
+}
+
+static const struct drm_panel_funcs ams639rq08_panel_funcs = {
+ .prepare = ams639rq08_prepare,
+ .unprepare = ams639rq08_unprepare,
+ .get_modes = ams639rq08_get_modes,
+};
+
+static int ams639rq08_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static int ams639rq08_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_get_display_brightness_large(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return brightness;
+}
+
+static const struct backlight_ops ams639rq08_bl_ops = {
+ .update_status = ams639rq08_bl_update_status,
+ .get_brightness = ams639rq08_bl_get_brightness,
+};
+
+static struct backlight_device *
+ams639rq08_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 1023,
+ .max_brightness = 2047,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &ams639rq08_bl_ops, &props);
+}
+
+static int ams639rq08_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct ams639rq08 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ret = devm_regulator_bulk_get_const(&dsi->dev,
+ ARRAY_SIZE(ams639rq08_supplies),
+ ams639rq08_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ drm_panel_init(&ctx->panel, dev, &ams639rq08_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
+
+ ctx->panel.backlight = ams639rq08_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void ams639rq08_remove(struct mipi_dsi_device *dsi)
+{
+ struct ams639rq08 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id ams639rq08_of_match[] = {
+ { .compatible = "samsung,ams639rq08" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ams639rq08_of_match);
+
+static struct mipi_dsi_driver ams639rq08_driver = {
+ .probe = ams639rq08_probe,
+ .remove = ams639rq08_remove,
+ .driver = {
+ .name = "panel-samsung-ams639rq08",
+ .of_match_table = ams639rq08_of_match,
+ },
+};
+module_mipi_dsi_driver(ams639rq08_driver);
+
+MODULE_AUTHOR("Danila Tikhonov <danila@jiaxyga.com>");
+MODULE_DESCRIPTION("DRM driver for SAMSUNG AMS639RQ08 cmd mode dsi panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
index 10bc8fb5f1f9..27a059b55ae5 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
@@ -38,57 +38,38 @@ static void s6e3fa7_panel_reset(struct s6e3fa7_panel *ctx)
usleep_range(10000, 11000);
}
-static int s6e3fa7_panel_on(struct s6e3fa7_panel *ctx)
+static int s6e3fa7_panel_on(struct mipi_dsi_device *dsi)
{
- struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret < 0) {
- dev_err(dev, "Failed to set tear on: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf4,
+ 0xbb, 0x23, 0x19, 0x3a, 0x9f, 0x0f, 0x09, 0xc0,
+ 0x00, 0xb4, 0x37, 0x70, 0x79, 0x69);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
- mipi_dsi_dcs_write_seq(dsi, 0xf4,
- 0xbb, 0x23, 0x19, 0x3a, 0x9f, 0x0f, 0x09, 0xc0,
- 0x00, 0xb4, 0x37, 0x70, 0x79, 0x69);
- mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
- mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display on: %d\n", ret);
- return ret;
- }
-
- return 0;
+ return dsi_ctx.accum_err;
}
static int s6e3fa7_panel_prepare(struct drm_panel *panel)
{
struct s6e3fa7_panel *ctx = to_s6e3fa7_panel(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
s6e3fa7_panel_reset(ctx);
- ret = s6e3fa7_panel_on(ctx);
- if (ret < 0) {
- dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ ret = s6e3fa7_panel_on(ctx->dsi);
+ if (ret < 0)
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
- return ret;
- }
- return 0;
+ return ret;
}
static int s6e3fa7_panel_unprepare(struct drm_panel *panel)
@@ -104,23 +85,13 @@ static int s6e3fa7_panel_disable(struct drm_panel *panel)
{
struct s6e3fa7_panel *ctx = to_s6e3fa7_panel(panel);
struct mipi_dsi_device *dsi = ctx->dsi;
- struct device *dev = &dsi->dev;
- int ret;
-
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to set display off: %d\n", ret);
- return ret;
- }
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret < 0) {
- dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
- return ret;
- }
- msleep(120);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- return 0;
+ return dsi_ctx.accum_err;
}
static const struct drm_display_mode s6e3fa7_panel_mode = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c
new file mode 100644
index 000000000000..64c6f7d45bed
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+// Copyright (c) 2013, The Linux Foundation. All rights reserved.
+// Copyright (c) 2024 Dzmitry Sankouski <dsankouski@gmail.com>
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/display/drm_dsc.h>
+#include <drm/display/drm_dsc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_panel.h>
+
+struct s6e3ha8 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct drm_dsc_config dsc;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data s6e3ha8_supplies[] = {
+ { .supply = "vdd3" },
+ { .supply = "vci" },
+ { .supply = "vddr" },
+};
+
+static inline
+struct s6e3ha8 *to_s6e3ha8_amb577px01_wqhd(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6e3ha8, panel);
+}
+
+#define s6e3ha8_test_key_on_lvl2(ctx) \
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0x5a, 0x5a)
+#define s6e3ha8_test_key_off_lvl2(ctx) \
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0xa5, 0xa5)
+#define s6e3ha8_test_key_on_lvl3(ctx) \
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xfc, 0x5a, 0x5a)
+#define s6e3ha8_test_key_off_lvl3(ctx) \
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xfc, 0xa5, 0xa5)
+#define s6e3ha8_test_key_on_lvl1(ctx) \
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x9f, 0xa5, 0xa5)
+#define s6e3ha8_test_key_off_lvl1(ctx) \
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x9f, 0x5a, 0x5a)
+#define s6e3ha8_afc_off(ctx) \
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xe2, 0x00, 0x00)
+
+static void s6e3ha8_amb577px01_wqhd_reset(struct s6e3ha8 *priv)
+{
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(5000, 6000);
+}
+
+static int s6e3ha8_amb577px01_wqhd_on(struct s6e3ha8 *priv)
+{
+ struct mipi_dsi_device *dsi = priv->dsi;
+ struct mipi_dsi_multi_context ctx = { .dsi = dsi };
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ s6e3ha8_test_key_on_lvl1(&ctx);
+
+ s6e3ha8_test_key_on_lvl2(&ctx);
+ mipi_dsi_compression_mode_multi(&ctx, true);
+ s6e3ha8_test_key_off_lvl2(&ctx);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
+ usleep_range(5000, 6000);
+
+ s6e3ha8_test_key_on_lvl2(&ctx);
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xf2, 0x13);
+ s6e3ha8_test_key_off_lvl2(&ctx);
+ usleep_range(10000, 11000);
+
+ s6e3ha8_test_key_on_lvl2(&ctx);
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xf2, 0x13);
+ s6e3ha8_test_key_off_lvl2(&ctx);
+
+ /* OMOK setting 1 (Initial setting) - Scaler Latch Setting Guide */
+ s6e3ha8_test_key_on_lvl2(&ctx);
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x07);
+ /* latch setting 1 : Scaler on/off & address setting & PPS setting -> Image update latch */
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xf2, 0x3c, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x0b);
+ /* latch setting 2 : Ratio change mode -> Image update latch */
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xf2, 0x30);
+ /* OMOK setting 2 - Seamless setting guide : WQHD */
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0x2a, 0x00, 0x00, 0x05, 0x9f); /* CASET */
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0x2b, 0x00, 0x00, 0x0b, 0x8f); /* PASET */
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xba, 0x01); /* scaler setup : scaler off */
+ s6e3ha8_test_key_off_lvl2(&ctx);
+
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0x35, 0x00); /* TE Vsync ON */
+
+ s6e3ha8_test_key_on_lvl2(&ctx);
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xed, 0x4c); /* ERR_FG */
+ s6e3ha8_test_key_off_lvl2(&ctx);
+
+ s6e3ha8_test_key_on_lvl3(&ctx);
+ /* FFC Setting 897.6Mbps */
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xc5, 0x0d, 0x10, 0xb4, 0x3e, 0x01);
+ s6e3ha8_test_key_off_lvl3(&ctx);
+
+ s6e3ha8_test_key_on_lvl2(&ctx);
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xb9,
+ 0x00, 0xb0, 0x81, 0x09, 0x00, 0x00, 0x00,
+ 0x11, 0x03); /* TSP HSYNC Setting */
+ s6e3ha8_test_key_off_lvl2(&ctx);
+
+ s6e3ha8_test_key_on_lvl2(&ctx);
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xb0, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xf6, 0x43);
+ s6e3ha8_test_key_off_lvl2(&ctx);
+
+ s6e3ha8_test_key_on_lvl2(&ctx);
+ /* Brightness condition set */
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xca,
+ 0x07, 0x00, 0x00, 0x00, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xb1, 0x00, 0x0c); /* AID Set : 0% */
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xb5,
+ 0x19, 0xdc, 0x16, 0x01, 0x34, 0x67, 0x9a,
+ 0xcd, 0x01, 0x22, 0x33, 0x44, 0x00, 0x00,
+ 0x05, 0x55, 0xcc, 0x0c, 0x01, 0x11, 0x11,
+ 0x10); /* MPS/ELVSS Setting */
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xf4, 0xeb, 0x28); /* VINT */
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xf7, 0x03); /* Gamma, LTPS(AID) update */
+ s6e3ha8_test_key_off_lvl2(&ctx);
+
+ s6e3ha8_test_key_off_lvl1(&ctx);
+
+ return ctx.accum_err;
+}
+
+static int s6e3ha8_enable(struct drm_panel *panel)
+{
+ struct s6e3ha8 *priv = to_s6e3ha8_amb577px01_wqhd(panel);
+ struct mipi_dsi_device *dsi = priv->dsi;
+ struct mipi_dsi_multi_context ctx = { .dsi = dsi };
+
+ s6e3ha8_test_key_on_lvl1(&ctx);
+ mipi_dsi_dcs_set_display_on_multi(&ctx);
+ s6e3ha8_test_key_off_lvl1(&ctx);
+
+ return ctx.accum_err;
+}
+
+static int s6e3ha8_disable(struct drm_panel *panel)
+{
+ struct s6e3ha8 *priv = to_s6e3ha8_amb577px01_wqhd(panel);
+ struct mipi_dsi_device *dsi = priv->dsi;
+ struct mipi_dsi_multi_context ctx = { .dsi = dsi };
+
+ s6e3ha8_test_key_on_lvl1(&ctx);
+ mipi_dsi_dcs_set_display_off_multi(&ctx);
+ s6e3ha8_test_key_off_lvl1(&ctx);
+ mipi_dsi_msleep(&ctx, 20);
+
+ s6e3ha8_test_key_on_lvl2(&ctx);
+ s6e3ha8_afc_off(&ctx);
+ s6e3ha8_test_key_off_lvl2(&ctx);
+
+ mipi_dsi_msleep(&ctx, 160);
+
+ return ctx.accum_err;
+}
+
+static int s6e3ha8_amb577px01_wqhd_prepare(struct drm_panel *panel)
+{
+ struct s6e3ha8 *priv = to_s6e3ha8_amb577px01_wqhd(panel);
+ struct mipi_dsi_device *dsi = priv->dsi;
+ struct mipi_dsi_multi_context ctx = { .dsi = dsi };
+ struct drm_dsc_picture_parameter_set pps;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(s6e3ha8_supplies), priv->supplies);
+ if (ret < 0)
+ return ret;
+ mipi_dsi_msleep(&ctx, 120);
+ s6e3ha8_amb577px01_wqhd_reset(priv);
+
+ ret = s6e3ha8_amb577px01_wqhd_on(priv);
+ if (ret < 0) {
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ goto err;
+ }
+
+ drm_dsc_pps_payload_pack(&pps, &priv->dsc);
+
+ s6e3ha8_test_key_on_lvl1(&ctx);
+ mipi_dsi_picture_parameter_set_multi(&ctx, &pps);
+ s6e3ha8_test_key_off_lvl1(&ctx);
+
+ mipi_dsi_msleep(&ctx, 28);
+
+ return ctx.accum_err;
+err:
+ regulator_bulk_disable(ARRAY_SIZE(s6e3ha8_supplies), priv->supplies);
+ return ret;
+}
+
+static int s6e3ha8_amb577px01_wqhd_unprepare(struct drm_panel *panel)
+{
+ struct s6e3ha8 *priv = to_s6e3ha8_amb577px01_wqhd(panel);
+
+ return regulator_bulk_disable(ARRAY_SIZE(s6e3ha8_supplies), priv->supplies);
+}
+
+static const struct drm_display_mode s6e3ha8_amb577px01_wqhd_mode = {
+ .clock = (1440 + 116 + 44 + 120) * (2960 + 120 + 80 + 124) * 60 / 1000,
+ .hdisplay = 1440,
+ .hsync_start = 1440 + 116,
+ .hsync_end = 1440 + 116 + 44,
+ .htotal = 1440 + 116 + 44 + 120,
+ .vdisplay = 2960,
+ .vsync_start = 2960 + 120,
+ .vsync_end = 2960 + 120 + 80,
+ .vtotal = 2960 + 120 + 80 + 124,
+ .width_mm = 64,
+ .height_mm = 132,
+};
+
+static int s6e3ha8_amb577px01_wqhd_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &s6e3ha8_amb577px01_wqhd_mode);
+}
+
+static const struct drm_panel_funcs s6e3ha8_amb577px01_wqhd_panel_funcs = {
+ .prepare = s6e3ha8_amb577px01_wqhd_prepare,
+ .unprepare = s6e3ha8_amb577px01_wqhd_unprepare,
+ .get_modes = s6e3ha8_amb577px01_wqhd_get_modes,
+ .enable = s6e3ha8_enable,
+ .disable = s6e3ha8_disable,
+};
+
+static int s6e3ha8_amb577px01_wqhd_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct s6e3ha8 *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(s6e3ha8_supplies),
+ s6e3ha8_supplies,
+ &priv->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to get regulators: %d\n", ret);
+ return ret;
+ }
+
+ priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ priv->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, priv);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP |
+ MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_NO_EOT_PACKET;
+
+ drm_panel_init(&priv->panel, dev, &s6e3ha8_amb577px01_wqhd_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ priv->panel.prepare_prev_first = true;
+
+ drm_panel_add(&priv->panel);
+
+ /* This panel only supports DSC; unconditionally enable it */
+ dsi->dsc = &priv->dsc;
+
+ priv->dsc.dsc_version_major = 1;
+ priv->dsc.dsc_version_minor = 1;
+
+ priv->dsc.slice_height = 40;
+ priv->dsc.slice_width = 720;
+ WARN_ON(1440 % priv->dsc.slice_width);
+ priv->dsc.slice_count = 1440 / priv->dsc.slice_width;
+ priv->dsc.bits_per_component = 8;
+ priv->dsc.bits_per_pixel = 8 << 4; /* 4 fractional bits */
+ priv->dsc.block_pred_enable = true;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ drm_panel_remove(&priv->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void s6e3ha8_amb577px01_wqhd_remove(struct mipi_dsi_device *dsi)
+{
+ struct s6e3ha8 *priv = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&priv->panel);
+}
+
+static const struct of_device_id s6e3ha8_amb577px01_wqhd_of_match[] = {
+ { .compatible = "samsung,s6e3ha8" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, s6e3ha8_amb577px01_wqhd_of_match);
+
+static struct mipi_dsi_driver s6e3ha8_amb577px01_wqhd_driver = {
+ .probe = s6e3ha8_amb577px01_wqhd_probe,
+ .remove = s6e3ha8_amb577px01_wqhd_remove,
+ .driver = {
+ .name = "panel-s6e3ha8",
+ .of_match_table = s6e3ha8_amb577px01_wqhd_of_match,
+ },
+};
+module_mipi_dsi_driver(s6e3ha8_amb577px01_wqhd_driver);
+
+MODULE_AUTHOR("Dzmitry Sankouski <dsankouski@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for S6E3HA8 panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index ed53787d1dea..364f1c9a16d9 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -11,6 +11,7 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index a0e5698275a5..6917ffda5b2b 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/media-bus-format.h>
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
new file mode 100644
index 000000000000..e92e95158d1f
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
@@ -0,0 +1,766 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Samsung AMS427AP24 panel with S6E88A0 controller
+ * Copyright (c) 2024 Jakob Hauser <jahau@rocketmail.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+#define NUM_STEPS_CANDELA 54
+#define NUM_STEPS_AID 39
+#define NUM_STEPS_ELVSS 17
+
+/* length of the payload data, thereof fixed and variable */
+#define FIX_LEN_AID 4
+#define FIX_LEN_ELVSS 2
+#define FIX_LEN_GAMMA 1
+#define VAR_LEN_AID 2
+#define VAR_LEN_ELVSS 1
+#define VAR_LEN_GAMMA 33
+#define LEN_AID (FIX_LEN_AID + VAR_LEN_AID)
+#define LEN_ELVSS (FIX_LEN_ELVSS + VAR_LEN_ELVSS)
+#define LEN_GAMMA (FIX_LEN_GAMMA + VAR_LEN_GAMMA)
+
+struct s6e88a0_ams427ap24 {
+ struct drm_panel panel;
+ struct backlight_device *bl_dev;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data *supplies;
+ struct gpio_desc *reset_gpio;
+ bool flip_horizontal;
+};
+
+static const struct regulator_bulk_data s6e88a0_ams427ap24_supplies[] = {
+ { .supply = "vdd3" },
+ { .supply = "vci" },
+};
+
+static inline
+struct s6e88a0_ams427ap24 *to_s6e88a0_ams427ap24(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6e88a0_ams427ap24, panel);
+}
+
+enum candela {
+ CANDELA_10CD, /* 0 */
+ CANDELA_11CD,
+ CANDELA_12CD,
+ CANDELA_13CD,
+ CANDELA_14CD,
+ CANDELA_15CD,
+ CANDELA_16CD,
+ CANDELA_17CD,
+ CANDELA_19CD,
+ CANDELA_20CD,
+ CANDELA_21CD,
+ CANDELA_22CD,
+ CANDELA_24CD,
+ CANDELA_25CD,
+ CANDELA_27CD,
+ CANDELA_29CD,
+ CANDELA_30CD,
+ CANDELA_32CD,
+ CANDELA_34CD,
+ CANDELA_37CD,
+ CANDELA_39CD,
+ CANDELA_41CD,
+ CANDELA_44CD,
+ CANDELA_47CD,
+ CANDELA_50CD,
+ CANDELA_53CD,
+ CANDELA_56CD,
+ CANDELA_60CD,
+ CANDELA_64CD,
+ CANDELA_68CD,
+ CANDELA_72CD,
+ CANDELA_77CD,
+ CANDELA_82CD,
+ CANDELA_87CD,
+ CANDELA_93CD,
+ CANDELA_98CD,
+ CANDELA_105CD,
+ CANDELA_111CD,
+ CANDELA_119CD,
+ CANDELA_126CD,
+ CANDELA_134CD,
+ CANDELA_143CD,
+ CANDELA_152CD,
+ CANDELA_162CD,
+ CANDELA_172CD,
+ CANDELA_183CD,
+ CANDELA_195CD,
+ CANDELA_207CD,
+ CANDELA_220CD,
+ CANDELA_234CD,
+ CANDELA_249CD,
+ CANDELA_265CD,
+ CANDELA_282CD,
+ CANDELA_300CD, /* 53 */
+};
+
+static const int s6e88a0_ams427ap24_br_to_cd[NUM_STEPS_CANDELA] = {
+ /* columns: brightness from, brightness till, candela */
+ /* 0 */ 10, /* 10CD */
+ /* 11 */ 11, /* 11CD */
+ /* 12 */ 12, /* 12CD */
+ /* 13 */ 13, /* 13CD */
+ /* 14 */ 14, /* 14CD */
+ /* 15 */ 15, /* 15CD */
+ /* 16 */ 16, /* 16CD */
+ /* 17 */ 17, /* 17CD */
+ /* 18 */ 18, /* 19CD */
+ /* 19 */ 19, /* 20CD */
+ /* 20 */ 20, /* 21CD */
+ /* 21 */ 21, /* 22CD */
+ /* 22 */ 22, /* 24CD */
+ /* 23 */ 23, /* 25CD */
+ /* 24 */ 24, /* 27CD */
+ /* 25 */ 25, /* 29CD */
+ /* 26 */ 26, /* 30CD */
+ /* 27 */ 27, /* 32CD */
+ /* 28 */ 28, /* 34CD */
+ /* 29 */ 29, /* 37CD */
+ /* 30 */ 30, /* 39CD */
+ /* 31 */ 32, /* 41CD */
+ /* 33 */ 34, /* 44CD */
+ /* 35 */ 36, /* 47CD */
+ /* 37 */ 38, /* 50CD */
+ /* 39 */ 40, /* 53CD */
+ /* 41 */ 43, /* 56CD */
+ /* 44 */ 46, /* 60CD */
+ /* 47 */ 49, /* 64CD */
+ /* 50 */ 52, /* 68CD */
+ /* 53 */ 56, /* 72CD */
+ /* 57 */ 59, /* 77CD */
+ /* 60 */ 63, /* 82CD */
+ /* 64 */ 67, /* 87CD */
+ /* 68 */ 71, /* 93CD */
+ /* 72 */ 76, /* 98CD */
+ /* 77 */ 80, /* 105CD */
+ /* 81 */ 86, /* 111CD */
+ /* 87 */ 91, /* 119CD */
+ /* 92 */ 97, /* 126CD */
+ /* 98 */ 104, /* 134CD */
+ /* 105 */ 110, /* 143CD */
+ /* 111 */ 118, /* 152CD */
+ /* 119 */ 125, /* 162CD */
+ /* 126 */ 133, /* 172CD */
+ /* 134 */ 142, /* 183CD */
+ /* 143 */ 150, /* 195CD */
+ /* 151 */ 160, /* 207CD */
+ /* 161 */ 170, /* 220CD */
+ /* 171 */ 181, /* 234CD */
+ /* 182 */ 205, /* 249CD */
+ /* 206 */ 234, /* 265CD */
+ /* 235 */ 254, /* 282CD */
+ /* 255 */ 255, /* 300CD */
+};
+
+static const u8 s6e88a0_ams427ap24_aid[NUM_STEPS_AID][VAR_LEN_AID] = {
+ { 0x03, 0x77 }, /* AOR 90.9%, 10CD */
+ { 0x03, 0x73 }, /* AOR 90.5%, 11CD */
+ { 0x03, 0x69 }, /* AOR 89.4%, 12CD */
+ { 0x03, 0x65 }, /* AOR 89.0%, 13CD */
+ { 0x03, 0x61 }, /* AOR 88.6%, 14CD */
+ { 0x03, 0x55 }, /* AOR 87.4%, 15CD */
+ { 0x03, 0x50 }, /* AOR 86.9%, 16CD */
+ { 0x03, 0x45 }, /* AOR 85.8%, 17CD */
+ { 0x03, 0x35 }, /* AOR 84.1%, 19CD */
+ { 0x03, 0x27 }, /* AOR 82.7%, 20CD */
+ { 0x03, 0x23 }, /* AOR 82.3%, 21CD */
+ { 0x03, 0x17 }, /* AOR 81.0%, 22CD */
+ { 0x03, 0x11 }, /* AOR 80.4%, 24CD */
+ { 0x03, 0x04 }, /* AOR 79.1%, 25CD */
+ { 0x02, 0xf4 }, /* AOR 77.5%, 27CD */
+ { 0x02, 0xe3 }, /* AOR 75.7%, 29CD */
+ { 0x02, 0xd7 }, /* AOR 74.5%, 30CD */
+ { 0x02, 0xc6 }, /* AOR 72.7%, 32CD */
+ { 0x02, 0xb7 }, /* AOR 71.2%, 34CD */
+ { 0x02, 0xa1 }, /* AOR 69.0%, 37CD */
+ { 0x02, 0x91 }, /* AOR 67.3%, 39CD */
+ { 0x02, 0x78 }, /* AOR 64.8%, 41CD */
+ { 0x02, 0x62 }, /* AOR 62.5%, 44CD */
+ { 0x02, 0x45 }, /* AOR 59.5%, 47CD */
+ { 0x02, 0x30 }, /* AOR 57.4%, 50CD */
+ { 0x02, 0x13 }, /* AOR 54.4%, 53CD */
+ { 0x01, 0xf5 }, /* AOR 51.3%, 56CD */
+ { 0x01, 0xd3 }, /* AOR 47.8%, 60CD */
+ { 0x01, 0xb1 }, /* AOR 44.4%, 64CD */
+ { 0x01, 0x87 }, /* AOR 40.1%, 68CD */
+ { 0x01, 0x63 }, /* AOR 36.6%, 72CD */
+ { 0x01, 0x35 }, /* AOR 31.7%, 77CD */
+ { 0x01, 0x05 }, /* AOR 26.9%, 82CD */
+ { 0x00, 0xd5 }, /* AOR 21.8%, 87CD */
+ { 0x00, 0xa1 }, /* AOR 16.5%, 93CD */
+ { 0x00, 0x6f }, /* AOR 11.4%, 98CD */
+ { 0x00, 0x31 }, /* AOR 5.0%, 105CD */
+ { 0x01, 0x86 }, /* AOR 40.0%, 111CD ~ 172CD */
+ { 0x00, 0x08 }, /* AOR 0.6%, 183CD ~ 300CD */
+};
+
+static const u8 s6e88a0_ams427ap24_elvss[NUM_STEPS_ELVSS][VAR_LEN_ELVSS] = {
+ { 0x14 }, /* 10CD ~ 111CD */
+ { 0x13 }, /* 119CD */
+ { 0x12 }, /* 126CD */
+ { 0x12 }, /* 134CD */
+ { 0x11 }, /* 143CD */
+ { 0x10 }, /* 152CD */
+ { 0x0f }, /* 162CD */
+ { 0x0e }, /* 172CD */
+ { 0x11 }, /* 183CD */
+ { 0x11 }, /* 195CD */
+ { 0x10 }, /* 207CD */
+ { 0x0f }, /* 220CD */
+ { 0x0f }, /* 234CD */
+ { 0x0e }, /* 249CD */
+ { 0x0d }, /* 265CD */
+ { 0x0c }, /* 282CD */
+ { 0x0b }, /* 300CD */
+};
+
+static const u8 s6e88a0_ams427ap24_gamma[NUM_STEPS_CANDELA][VAR_LEN_GAMMA] = {
+ /* 10CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8c, 0x8b,
+ 0x8c, 0x87, 0x89, 0x89, 0x88, 0x87, 0x8c, 0x80, 0x82, 0x88, 0x7b,
+ 0x72, 0x8c, 0x60, 0x68, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ /* 11CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8c, 0x8b,
+ 0x8c, 0x87, 0x89, 0x89, 0x88, 0x87, 0x8c, 0x80, 0x82, 0x88, 0x7b,
+ 0x72, 0x8c, 0x60, 0x68, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ /* 12CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8b, 0x8b,
+ 0x8c, 0x88, 0x89, 0x8a, 0x88, 0x87, 0x8c, 0x81, 0x82, 0x87, 0x7a,
+ 0x72, 0x8b, 0x60, 0x68, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ /* 13CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8b, 0x8b,
+ 0x8c, 0x88, 0x89, 0x8a, 0x88, 0x87, 0x8c, 0x81, 0x82, 0x87, 0x7a,
+ 0x72, 0x8b, 0x61, 0x69, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ /* 14CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8c, 0x8b,
+ 0x8c, 0x88, 0x89, 0x8a, 0x87, 0x86, 0x8a, 0x82, 0x82, 0x87, 0x79,
+ 0x71, 0x89, 0x63, 0x6c, 0x8e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ /* 15CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x8a, 0x8c, 0x8c,
+ 0x8c, 0x86, 0x87, 0x88, 0x85, 0x85, 0x8a, 0x83, 0x83, 0x88, 0x78,
+ 0x72, 0x89, 0x64, 0x6c, 0x8e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ /* 16CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8c, 0x8b,
+ 0x8c, 0x86, 0x88, 0x88, 0x86, 0x86, 0x8a, 0x84, 0x84, 0x88, 0x78,
+ 0x72, 0x89, 0x5d, 0x67, 0x8b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ /* 17CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b,
+ 0x8b, 0x87, 0x89, 0x89, 0x86, 0x86, 0x8a, 0x84, 0x83, 0x87, 0x78,
+ 0x73, 0x89, 0x64, 0x6e, 0x8e, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 },
+ /* 19CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b,
+ 0x8b, 0x87, 0x89, 0x89, 0x86, 0x86, 0x89, 0x84, 0x84, 0x87, 0x77,
+ 0x72, 0x88, 0x65, 0x6f, 0x8e, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 },
+ /* 20CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b,
+ 0x8b, 0x88, 0x89, 0x89, 0x85, 0x85, 0x88, 0x82, 0x83, 0x85, 0x79,
+ 0x73, 0x88, 0x65, 0x6f, 0x8e, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 },
+ /* 21CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b,
+ 0x8b, 0x88, 0x89, 0x89, 0x85, 0x85, 0x88, 0x82, 0x83, 0x85, 0x79,
+ 0x74, 0x88, 0x65, 0x6f, 0x8e, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 },
+ /* 22CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8c, 0x8b,
+ 0x8c, 0x86, 0x88, 0x87, 0x86, 0x86, 0x89, 0x82, 0x83, 0x85, 0x7c,
+ 0x75, 0x87, 0x65, 0x6f, 0x8e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ /* 24CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8c, 0x8b,
+ 0x8c, 0x86, 0x88, 0x87, 0x86, 0x86, 0x89, 0x82, 0x83, 0x85, 0x7c,
+ 0x76, 0x86, 0x66, 0x6f, 0x8e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ /* 25CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b,
+ 0x8b, 0x86, 0x89, 0x88, 0x87, 0x87, 0x89, 0x82, 0x82, 0x84, 0x7f,
+ 0x7a, 0x89, 0x6b, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 27CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b,
+ 0x8b, 0x86, 0x89, 0x88, 0x87, 0x87, 0x89, 0x82, 0x82, 0x84, 0x7f,
+ 0x7a, 0x89, 0x6b, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 29CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b,
+ 0x8b, 0x86, 0x89, 0x88, 0x85, 0x84, 0x87, 0x84, 0x85, 0x86, 0x80,
+ 0x7b, 0x88, 0x6a, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 30CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b,
+ 0x8b, 0x86, 0x89, 0x88, 0x85, 0x84, 0x87, 0x84, 0x85, 0x86, 0x80,
+ 0x7b, 0x88, 0x6a, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 32CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b,
+ 0x8b, 0x86, 0x89, 0x88, 0x85, 0x84, 0x87, 0x84, 0x85, 0x86, 0x80,
+ 0x7b, 0x88, 0x6a, 0x73, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 34CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8c, 0x8a, 0x89, 0x8b, 0x8b,
+ 0x8b, 0x86, 0x89, 0x88, 0x85, 0x84, 0x87, 0x83, 0x84, 0x84, 0x7f,
+ 0x79, 0x86, 0x6c, 0x76, 0x91, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 37CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b,
+ 0x8b, 0x86, 0x88, 0x88, 0x87, 0x86, 0x87, 0x83, 0x84, 0x84, 0x7f,
+ 0x79, 0x86, 0x6c, 0x76, 0x90, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 39CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b,
+ 0x8b, 0x86, 0x88, 0x87, 0x84, 0x84, 0x86, 0x83, 0x85, 0x85, 0x80,
+ 0x79, 0x85, 0x6c, 0x76, 0x90, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 41CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b,
+ 0x8b, 0x86, 0x88, 0x87, 0x84, 0x84, 0x86, 0x81, 0x84, 0x83, 0x7f,
+ 0x79, 0x84, 0x6e, 0x79, 0x93, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 44CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b,
+ 0x8b, 0x86, 0x88, 0x87, 0x84, 0x84, 0x86, 0x81, 0x84, 0x83, 0x7f,
+ 0x79, 0x84, 0x6e, 0x79, 0x92, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 47CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b,
+ 0x8b, 0x86, 0x88, 0x87, 0x84, 0x85, 0x86, 0x81, 0x84, 0x83, 0x7f,
+ 0x79, 0x83, 0x6f, 0x79, 0x91, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 50CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b,
+ 0x8b, 0x86, 0x88, 0x87, 0x84, 0x85, 0x86, 0x82, 0x84, 0x83, 0x7f,
+ 0x79, 0x83, 0x6f, 0x79, 0x90, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 53CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8b,
+ 0x8b, 0x86, 0x88, 0x87, 0x83, 0x83, 0x85, 0x84, 0x85, 0x85, 0x7f,
+ 0x79, 0x83, 0x70, 0x79, 0x8f, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 56CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8a,
+ 0x8a, 0x87, 0x89, 0x87, 0x83, 0x83, 0x85, 0x84, 0x85, 0x84, 0x7f,
+ 0x79, 0x82, 0x70, 0x7a, 0x8e, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 60CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8a,
+ 0x8a, 0x87, 0x89, 0x87, 0x83, 0x83, 0x85, 0x84, 0x85, 0x84, 0x7e,
+ 0x79, 0x82, 0x71, 0x7a, 0x8d, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 64CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8b, 0x89, 0x89, 0x8b, 0x8a,
+ 0x8a, 0x86, 0x88, 0x86, 0x84, 0x84, 0x86, 0x82, 0x83, 0x82, 0x80,
+ 0x7a, 0x84, 0x71, 0x7a, 0x8c, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 68CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a,
+ 0x8a, 0x86, 0x88, 0x86, 0x84, 0x84, 0x86, 0x82, 0x84, 0x82, 0x81,
+ 0x7b, 0x83, 0x72, 0x7b, 0x8b, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 72CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a,
+ 0x8a, 0x86, 0x88, 0x86, 0x85, 0x85, 0x86, 0x82, 0x84, 0x82, 0x81,
+ 0x7b, 0x83, 0x72, 0x7c, 0x8a, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 77CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a,
+ 0x8a, 0x85, 0x87, 0x85, 0x85, 0x87, 0x87, 0x82, 0x84, 0x82, 0x81,
+ 0x7c, 0x82, 0x72, 0x7c, 0x89, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 82CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a,
+ 0x8a, 0x85, 0x87, 0x85, 0x85, 0x87, 0x87, 0x82, 0x84, 0x82, 0x81,
+ 0x7c, 0x82, 0x73, 0x7c, 0x88, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 87CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8c, 0x8a,
+ 0x8a, 0x85, 0x87, 0x85, 0x84, 0x84, 0x86, 0x80, 0x84, 0x81, 0x80,
+ 0x7a, 0x82, 0x76, 0x7f, 0x89, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 93CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8b, 0x8a,
+ 0x8a, 0x86, 0x87, 0x85, 0x84, 0x85, 0x86, 0x80, 0x84, 0x80, 0x80,
+ 0x7a, 0x82, 0x76, 0x80, 0x88, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 98CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x8a, 0x89, 0x89, 0x8b, 0x8a,
+ 0x8a, 0x86, 0x87, 0x85, 0x85, 0x85, 0x86, 0x80, 0x84, 0x80, 0x80,
+ 0x7a, 0x82, 0x76, 0x80, 0x88, 0x33, 0x2f, 0x22, 0x00, 0x00, 0x00 },
+ /* 105CD */
+ { 0x00, 0xc8, 0x00, 0xc4, 0x00, 0xc5, 0x89, 0x88, 0x88, 0x8b, 0x8a,
+ 0x8a, 0x84, 0x87, 0x85, 0x85, 0x85, 0x85, 0x80, 0x84, 0x80, 0x7f,
+ 0x79, 0x81, 0x71, 0x7d, 0x87, 0x38, 0x32, 0x24, 0x00, 0x00, 0x00 },
+ /* 111CD */
+ { 0x00, 0xdf, 0x00, 0xde, 0x00, 0xde, 0x85, 0x85, 0x84, 0x87, 0x86,
+ 0x87, 0x85, 0x86, 0x85, 0x83, 0x83, 0x83, 0x81, 0x82, 0x82, 0x80,
+ 0x7d, 0x82, 0x75, 0x7f, 0x86, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 },
+ /* 119CD */
+ { 0x00, 0xe3, 0x00, 0xe1, 0x00, 0xe2, 0x85, 0x85, 0x84, 0x86, 0x85,
+ 0x85, 0x84, 0x85, 0x84, 0x83, 0x83, 0x83, 0x82, 0x82, 0x82, 0x7e,
+ 0x7b, 0x81, 0x75, 0x7f, 0x86, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 },
+ /* 126CD */
+ { 0x00, 0xe6, 0x00, 0xe5, 0x00, 0xe5, 0x85, 0x84, 0x84, 0x85, 0x85,
+ 0x85, 0x84, 0x84, 0x84, 0x82, 0x83, 0x83, 0x80, 0x81, 0x81, 0x80,
+ 0x7f, 0x83, 0x73, 0x7c, 0x84, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 },
+ /* 134CD */
+ { 0x00, 0xe9, 0x00, 0xe8, 0x00, 0xe8, 0x84, 0x84, 0x83, 0x85, 0x85,
+ 0x85, 0x84, 0x84, 0x83, 0x81, 0x82, 0x82, 0x81, 0x81, 0x81, 0x7f,
+ 0x7d, 0x81, 0x73, 0x7c, 0x83, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 },
+ /* 143CD */
+ { 0x00, 0xed, 0x00, 0xec, 0x00, 0xec, 0x84, 0x83, 0x83, 0x84, 0x84,
+ 0x84, 0x84, 0x84, 0x83, 0x82, 0x83, 0x83, 0x81, 0x80, 0x81, 0x7f,
+ 0x7e, 0x81, 0x70, 0x79, 0x81, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 },
+ /* 152CD */
+ { 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x83, 0x83, 0x83, 0x83, 0x83,
+ 0x83, 0x84, 0x84, 0x83, 0x81, 0x81, 0x81, 0x80, 0x80, 0x81, 0x80,
+ 0x80, 0x82, 0x6f, 0x78, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 },
+ /* 162CD */
+ { 0x00, 0xf4, 0x00, 0xf3, 0x00, 0xf4, 0x83, 0x83, 0x83, 0x83, 0x83,
+ 0x83, 0x82, 0x81, 0x81, 0x81, 0x81, 0x81, 0x80, 0x80, 0x81, 0x80,
+ 0x7f, 0x82, 0x6f, 0x78, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 },
+ /* 172CD */
+ { 0x00, 0xf8, 0x00, 0xf8, 0x00, 0xf8, 0x82, 0x82, 0x82, 0x82, 0x82,
+ 0x82, 0x82, 0x81, 0x81, 0x80, 0x81, 0x80, 0x80, 0x80, 0x81, 0x81,
+ 0x80, 0x83, 0x6d, 0x76, 0x7d, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 },
+ /* 183CD */
+ { 0x00, 0xe0, 0x00, 0xdf, 0x00, 0xdf, 0x84, 0x84, 0x83, 0x86, 0x86,
+ 0x86, 0x83, 0x84, 0x83, 0x82, 0x82, 0x82, 0x81, 0x83, 0x81, 0x81,
+ 0x7e, 0x81, 0x80, 0x82, 0x84, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 },
+ /* 195CD */
+ { 0x00, 0xe4, 0x00, 0xe3, 0x00, 0xe3, 0x84, 0x83, 0x83, 0x85, 0x85,
+ 0x85, 0x83, 0x84, 0x83, 0x81, 0x82, 0x82, 0x82, 0x83, 0x81, 0x81,
+ 0x80, 0x82, 0x7d, 0x7f, 0x81, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 },
+ /* 207CD */
+ { 0x00, 0xe7, 0x00, 0xe6, 0x00, 0xe6, 0x83, 0x82, 0x82, 0x85, 0x85,
+ 0x85, 0x82, 0x83, 0x83, 0x82, 0x82, 0x82, 0x80, 0x81, 0x80, 0x81,
+ 0x80, 0x82, 0x7d, 0x7f, 0x81, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 },
+ /* 220CD */
+ { 0x00, 0xeb, 0x00, 0xea, 0x00, 0xea, 0x83, 0x83, 0x82, 0x84, 0x84,
+ 0x84, 0x82, 0x83, 0x82, 0x81, 0x81, 0x82, 0x81, 0x82, 0x81, 0x80,
+ 0x7e, 0x80, 0x7d, 0x7f, 0x81, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 },
+ /* 234CD */
+ { 0x00, 0xef, 0x00, 0xee, 0x00, 0xee, 0x83, 0x82, 0x82, 0x83, 0x83,
+ 0x83, 0x82, 0x82, 0x82, 0x81, 0x81, 0x81, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x81, 0x7b, 0x7c, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 },
+ /* 249CD */
+ { 0x00, 0xf3, 0x00, 0xf2, 0x00, 0xf2, 0x82, 0x81, 0x81, 0x83, 0x83,
+ 0x83, 0x82, 0x82, 0x82, 0x81, 0x81, 0x81, 0x80, 0x81, 0x80, 0x7f,
+ 0x7e, 0x7f, 0x7b, 0x7c, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 },
+ /* 265CD */
+ { 0x00, 0xf7, 0x00, 0xf7, 0x00, 0xf7, 0x81, 0x81, 0x80, 0x82, 0x82,
+ 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x81, 0x80, 0x7f,
+ 0x7e, 0x7f, 0x7b, 0x7c, 0x7f, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 },
+ /* 282CD */
+ { 0x00, 0xfb, 0x00, 0xfb, 0x00, 0xfb, 0x80, 0x80, 0x80, 0x81, 0x81,
+ 0x81, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x78, 0x79, 0x7d, 0x85, 0x85, 0x82, 0x00, 0x00, 0x00 },
+ /* 300CD */
+ { 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00 },
+};
+
+static int s6e88a0_ams427ap24_set_brightness(struct backlight_device *bd)
+{
+ struct s6e88a0_ams427ap24 *ctx = bl_get_data(bd);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+ struct device *dev = &dsi->dev;
+ int brightness = bd->props.brightness;
+ int candela_enum;
+ u8 b2[LEN_AID] = { 0xb2, 0x40, 0x08, 0x20, 0x00, 0x00 };
+ u8 b6[LEN_ELVSS] = { 0xb6, 0x28, 0x00 };
+ u8 ca[LEN_GAMMA];
+
+ /* get candela enum from brightness */
+ for (candela_enum = 0; candela_enum < NUM_STEPS_CANDELA; candela_enum++)
+ if (brightness <= s6e88a0_ams427ap24_br_to_cd[candela_enum])
+ break;
+
+ /* get aid */
+ switch (candela_enum) {
+ case CANDELA_10CD ... CANDELA_105CD:
+ memcpy(&b2[FIX_LEN_AID],
+ s6e88a0_ams427ap24_aid[candela_enum],
+ VAR_LEN_AID);
+ break;
+ case CANDELA_111CD ... CANDELA_172CD:
+ memcpy(&b2[FIX_LEN_AID],
+ s6e88a0_ams427ap24_aid[CANDELA_111CD],
+ VAR_LEN_AID);
+ break;
+ case CANDELA_183CD ... CANDELA_300CD:
+ memcpy(&b2[FIX_LEN_AID],
+ s6e88a0_ams427ap24_aid[CANDELA_111CD + 1],
+ VAR_LEN_AID);
+ break;
+ default:
+ dev_err(dev, "Failed to get aid data\n");
+ return -EINVAL;
+ }
+
+ /* get elvss */
+ if (candela_enum <= CANDELA_111CD) {
+ memcpy(&b6[FIX_LEN_ELVSS],
+ s6e88a0_ams427ap24_elvss[0],
+ VAR_LEN_ELVSS);
+ } else {
+ memcpy(&b6[FIX_LEN_ELVSS],
+ s6e88a0_ams427ap24_elvss[candela_enum - CANDELA_111CD],
+ VAR_LEN_ELVSS);
+ }
+
+ /* get gamma */
+ ca[0] = 0xca;
+ memcpy(&ca[FIX_LEN_GAMMA],
+ s6e88a0_ams427ap24_gamma[candela_enum],
+ VAR_LEN_GAMMA);
+
+ /* write data */
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); // level 1 key on
+ mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, b2, ARRAY_SIZE(b2)); // set aid
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x00); // acl off
+ mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, b6, ARRAY_SIZE(b6)); // set elvss
+ mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, ca, ARRAY_SIZE(ca)); // set gamma
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf7, 0x03); // gamma update
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); // level 1 key off
+
+ return dsi_ctx.accum_err;
+}
+
+static void s6e88a0_ams427ap24_reset(struct s6e88a0_ams427ap24 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(18000, 19000);
+}
+
+static int s6e88a0_ams427ap24_on(struct s6e88a0_ams427ap24 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); // level 1 key on
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfc, 0x5a, 0x5a); // level 2 key on
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x11); // src latch set global 1
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfd, 0x11); // src latch set 1
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x13); // src latch set global 2
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfd, 0x18); // src latch set 2
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x02); // avdd set 1
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x30); // avdd set 2
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf1, 0x5a, 0x5a); // level 3 key on
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcc, 0x4c); // pixel clock divider pol.
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf2, 0x03, 0x0d); // unknown
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf1, 0xa5, 0xa5); // level 3 key off
+
+ if (ctx->flip_horizontal)
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcb, 0x0e); // flip display
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); // level 1 key off
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfc, 0xa5, 0xa5); // level 2 key off
+
+ ret = s6e88a0_ams427ap24_set_brightness(ctx->bl_dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set brightness: %d\n", ret);
+ return ret;
+ }
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ return dsi_ctx.accum_err;
+}
+
+static int s6e88a0_ams427ap24_off(struct s6e88a0_ams427ap24 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ return dsi_ctx.accum_err;
+}
+
+static int s6e88a0_ams427ap24_prepare(struct drm_panel *panel)
+{
+ struct s6e88a0_ams427ap24 *ctx = to_s6e88a0_ams427ap24(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(s6e88a0_ams427ap24_supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ s6e88a0_ams427ap24_reset(ctx);
+
+ ret = s6e88a0_ams427ap24_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(s6e88a0_ams427ap24_supplies),
+ ctx->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6e88a0_ams427ap24_unprepare(struct drm_panel *panel)
+{
+ struct s6e88a0_ams427ap24 *ctx = to_s6e88a0_ams427ap24(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = s6e88a0_ams427ap24_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(s6e88a0_ams427ap24_supplies),
+ ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode s6e88a0_ams427ap24_mode = {
+ .clock = (540 + 94 + 4 + 18) * (960 + 12 + 1 + 3) * 60 / 1000,
+ .hdisplay = 540,
+ .hsync_start = 540 + 94,
+ .hsync_end = 540 + 94 + 4,
+ .htotal = 540 + 94 + 4 + 18,
+ .vdisplay = 960,
+ .vsync_start = 960 + 12,
+ .vsync_end = 960 + 12 + 1,
+ .vtotal = 960 + 12 + 1 + 3,
+ .width_mm = 55,
+ .height_mm = 95,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static int s6e88a0_ams427ap24_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector,
+ &s6e88a0_ams427ap24_mode);
+}
+
+static const struct drm_panel_funcs s6e88a0_ams427ap24_panel_funcs = {
+ .prepare = s6e88a0_ams427ap24_prepare,
+ .unprepare = s6e88a0_ams427ap24_unprepare,
+ .get_modes = s6e88a0_ams427ap24_get_modes,
+};
+
+static const struct backlight_ops s6e88a0_ams427ap24_bl_ops = {
+ .update_status = s6e88a0_ams427ap24_set_brightness,
+};
+
+static int s6e88a0_ams427ap24_register_backlight(struct s6e88a0_ams427ap24 *ctx)
+{
+ struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 180,
+ .max_brightness = 255,
+ };
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret = 0;
+
+ ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev), dev, ctx,
+ &s6e88a0_ams427ap24_bl_ops,
+ &props);
+ if (IS_ERR(ctx->bl_dev)) {
+ ret = PTR_ERR(ctx->bl_dev);
+ dev_err(dev, "error registering backlight device (%d)\n", ret);
+ }
+
+ return ret;
+}
+
+static int s6e88a0_ams427ap24_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct s6e88a0_ams427ap24 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ret = devm_regulator_bulk_get_const(dev,
+ ARRAY_SIZE(s6e88a0_ams427ap24_supplies),
+ s6e88a0_ams427ap24_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_NO_HFP;
+
+ drm_panel_init(&ctx->panel, dev, &s6e88a0_ams427ap24_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
+
+ ctx->flip_horizontal = device_property_read_bool(dev, "flip-horizontal");
+
+ ret = s6e88a0_ams427ap24_register_backlight(ctx);
+ if (ret < 0)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void s6e88a0_ams427ap24_remove(struct mipi_dsi_device *dsi)
+{
+ struct s6e88a0_ams427ap24 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id s6e88a0_ams427ap24_of_match[] = {
+ { .compatible = "samsung,s6e88a0-ams427ap24" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, s6e88a0_ams427ap24_of_match);
+
+static struct mipi_dsi_driver s6e88a0_ams427ap24_driver = {
+ .probe = s6e88a0_ams427ap24_probe,
+ .remove = s6e88a0_ams427ap24_remove,
+ .driver = {
+ .name = "panel-s6e88a0-ams427ap24",
+ .of_match_table = s6e88a0_ams427ap24_of_match,
+ },
+};
+module_mipi_dsi_driver(s6e88a0_ams427ap24_driver);
+
+MODULE_AUTHOR("Jakob Hauser <jahau@rocketmail.com>");
+MODULE_DESCRIPTION("Samsung AMS427AP24 panel with S6E88A0 controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 8a3fe531c641..7d1b421ea9dd 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -297,7 +297,7 @@ static struct platform_driver seiko_panel_platform_driver = {
.of_match_table = platform_of_match,
},
.probe = seiko_panel_platform_probe,
- .remove_new = seiko_panel_remove,
+ .remove = seiko_panel_remove,
};
module_platform_driver(seiko_panel_platform_driver);
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
index 76bd9e810827..a9673a52b861 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
@@ -207,7 +207,7 @@ MODULE_DEVICE_TABLE(of, ls037v7dw01_of_match);
static struct platform_driver ls037v7dw01_driver = {
.probe = ls037v7dw01_probe,
- .remove_new = ls037v7dw01_remove,
+ .remove = ls037v7dw01_remove,
.driver = {
.name = "panel-sharp-ls037v7dw01",
.of_match_table = ls037v7dw01_of_match,
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 86735430462f..9b2f128fd309 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -3222,6 +3222,33 @@ static const struct panel_desc mitsubishi_aa084xe01 = {
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
};
+static const struct display_timing multi_inno_mi0700a2t_30_timing = {
+ .pixelclock = { 26400000, 33000000, 46800000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 16, 204, 354 },
+ .hback_porch = { 46, 46, 46 },
+ .hsync_len = { 1, 6, 40 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 7, 22, 147 },
+ .vback_porch = { 23, 23, 23 },
+ .vsync_len = { 1, 3, 20 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc multi_inno_mi0700a2t_30 = {
+ .timings = &multi_inno_mi0700a2t_30_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 153,
+ .height = 92,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing multi_inno_mi0700s4t_6_timing = {
.pixelclock = { 29000000, 33000000, 38000000 },
.hactive = { 800, 800, 800 },
@@ -3313,6 +3340,33 @@ static const struct panel_desc multi_inno_mi1010ait_1cp = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing multi_inno_mi1010z1t_1cp11_timing = {
+ .pixelclock = { 40800000, 51200000, 67200000 },
+ .hactive = { 1024, 1024, 1024 },
+ .hfront_porch = { 30, 110, 130 },
+ .hback_porch = { 30, 110, 130 },
+ .hsync_len = { 30, 100, 116 },
+ .vactive = { 600, 600, 600 },
+ .vfront_porch = { 4, 13, 80 },
+ .vback_porch = { 4, 13, 80 },
+ .vsync_len = { 2, 9, 40 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc multi_inno_mi1010z1t_1cp11 = {
+ .timings = &multi_inno_mi1010z1t_1cp11_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 260,
+ .height = 162,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing nec_nl12880bc20_05_timing = {
.pixelclock = { 67000000, 71000000, 75000000 },
.hactive = { 1280, 1280, 1280 },
@@ -4280,6 +4334,45 @@ static const struct panel_desc tianma_tm070jvhg33 = {
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
};
+/*
+ * The datasheet computes total blanking as back porch + front porch, not
+ * including sync pulse width. This is for both H and V. To make the total
+ * blanking and period correct, subtract the pulse width from the front
+ * porch.
+ *
+ * This works well for the Min and Typ values, but for Max values the sync
+ * pulse width is higher than back porch + front porch, so work around that
+ * by reducing the Max sync length value to 1 and then treating the Max
+ * porches as in the Min and Typ cases.
+ *
+ * Exact datasheet values are added as a comment where they differ from the
+ * ones implemented for the above reason.
+ */
+static const struct display_timing tianma_tm070jdhg34_00_timing = {
+ .pixelclock = { 68400000, 71900000, 78100000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 130, 138, 158 }, /* 131, 139, 159 */
+ .hback_porch = { 5, 5, 5 },
+ .hsync_len = { 1, 1, 1 }, /* 1, 1, 256 */
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 2, 39, 98 }, /* 3, 40, 99 */
+ .vback_porch = { 2, 2, 2 },
+ .vsync_len = { 1, 1, 1 }, /* 1, 1, 128 */
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc tianma_tm070jdhg34_00 = {
+ .timings = &tianma_tm070jdhg34_00_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 150, /* 149.76 */
+ .height = 94, /* 93.60 */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing tianma_tm070rvhg71_timing = {
.pixelclock = { 27700000, 29200000, 39600000 },
.hactive = { 800, 800, 800 },
@@ -4361,6 +4454,37 @@ static const struct panel_desc ti_nspire_classic_lcd_panel = {
.bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
};
+static const struct display_timing topland_tian_g07017_01_timing = {
+ .pixelclock = { 44900000, 51200000, 63000000 },
+ .hactive = { 1024, 1024, 1024 },
+ .hfront_porch = { 16, 160, 216 },
+ .hback_porch = { 160, 160, 160 },
+ .hsync_len = { 1, 1, 140 },
+ .vactive = { 600, 600, 600 },
+ .vfront_porch = { 1, 12, 127 },
+ .vback_porch = { 23, 23, 23 },
+ .vsync_len = { 1, 1, 20 },
+};
+
+static const struct panel_desc topland_tian_g07017_01 = {
+ .timings = &topland_tian_g07017_01_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .delay = {
+ .prepare = 1, /* 6.5 - 150µs PLL wake-up time */
+ .enable = 100, /* 6.4 - Power on: 6 VSyncs */
+ .disable = 84, /* 6.4 - Power off: 5 Vsyncs */
+ .unprepare = 50, /* 6.4 - Power off: 3 Vsyncs */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+};
+
static const struct drm_display_mode toshiba_lt089ac29000_mode = {
.clock = 79500,
.hdisplay = 1280,
@@ -4565,6 +4689,31 @@ static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct drm_display_mode mchp_ac69t88a_mode = {
+ .clock = 25000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 88,
+ .hsync_end = 800 + 88 + 5,
+ .htotal = 800 + 88 + 5 + 40,
+ .vdisplay = 480,
+ .vsync_start = 480 + 23,
+ .vsync_end = 480 + 23 + 5,
+ .vtotal = 480 + 23 + 5 + 1,
+};
+
+static const struct panel_desc mchp_ac69t88a = {
+ .modes = &mchp_ac69t88a_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 108,
+ .height = 65,
+ },
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode arm_rtsm_mode[] = {
{
.clock = 65000,
@@ -4881,6 +5030,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "mitsubishi,aa084xe01",
.data = &mitsubishi_aa084xe01,
}, {
+ .compatible = "multi-inno,mi0700a2t-30",
+ .data = &multi_inno_mi0700a2t_30,
+ }, {
.compatible = "multi-inno,mi0700s4t-6",
.data = &multi_inno_mi0700s4t_6,
}, {
@@ -4890,6 +5042,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "multi-inno,mi1010ait-1cp",
.data = &multi_inno_mi1010ait_1cp,
}, {
+ .compatible = "multi-inno,mi1010z1t-1cp11",
+ .data = &multi_inno_mi1010z1t_1cp11,
+ }, {
.compatible = "nec,nl12880bc20-05",
.data = &nec_nl12880bc20_05,
}, {
@@ -4998,6 +5153,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "tianma,tm070jdhg30",
.data = &tianma_tm070jdhg30,
}, {
+ .compatible = "tianma,tm070jdhg34-00",
+ .data = &tianma_tm070jdhg34_00,
+ }, {
.compatible = "tianma,tm070jvhg33",
.data = &tianma_tm070jvhg33,
}, {
@@ -5013,6 +5171,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "toshiba,lt089ac29000",
.data = &toshiba_lt089ac29000,
}, {
+ .compatible = "topland,tian-g07017-01",
+ .data = &topland_tian_g07017_01,
+ }, {
.compatible = "tpk,f07a-0102",
.data = &tpk_f07a_0102,
}, {
@@ -5049,6 +5210,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "yes-optoelectronics,ytc700tlag-05-201c",
.data = &yes_optoelectronics_ytc700tlag_05_201c,
}, {
+ .compatible = "microchip,ac69t88a",
+ .data = &mchp_ac69t88a,
+ }, {
/* Must be the last entry */
.compatible = "panel-dpi",
.data = &panel_dpi,
@@ -5092,7 +5256,7 @@ static struct platform_driver panel_simple_platform_driver = {
.pm = &panel_simple_pm_ops,
},
.probe = panel_simple_platform_probe,
- .remove_new = panel_simple_platform_remove,
+ .remove = panel_simple_platform_remove,
.shutdown = panel_simple_platform_shutdown,
};
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index eef03d04e0cd..1f72ef7ca74c 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -1177,6 +1177,7 @@ static int st7701_probe(struct device *dev, int connector_type)
return dev_err_probe(dev, ret, "Failed to get orientation\n");
drm_panel_init(&st7701->panel, dev, &st7701_funcs, connector_type);
+ st7701->panel.prepare_prev_first = true;
/**
* Once sleep out has been issued, ST7701 IC required to wait 120ms
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index 217f03569494..d437f5c84f5f 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -562,8 +562,7 @@ static int acx565akm_detect(struct acx565akm_panel *lcd)
lcd->enabled ? "enabled" : "disabled ", status);
acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_ID, lcd->display_id, 3);
- dev_dbg(&lcd->spi->dev, "MIPI display ID: %02x%02x%02x\n",
- lcd->display_id[0], lcd->display_id[1], lcd->display_id[2]);
+ dev_dbg(&lcd->spi->dev, "MIPI display ID: %3phN\n", lcd->display_id);
switch (lcd->display_id[0]) {
case 0x10:
diff --git a/drivers/gpu/drm/panel/panel-synaptics-r63353.c b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
index 169c629746c7..17349825543f 100644
--- a/drivers/gpu/drm/panel/panel-synaptics-r63353.c
+++ b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
@@ -325,7 +325,7 @@ static void r63353_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct r63353_panel *rpanel = mipi_dsi_get_drvdata(dsi);
- r63353_panel_unprepare(&rpanel->base);
+ drm_panel_unprepare(&rpanel->base);
}
static const struct r63353_desc sharp_ls068b3sx02_data = {
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index 272490b9565b..be3a9797fbce 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -193,7 +193,6 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
- ctx->panel.dev = dev;
ctx->dsi = dsi;
ctx->supplies[0].supply = "vdda";
@@ -201,13 +200,11 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
ctx->supplies[1].supply = "vdd3p3";
ctx->supplies[1].init_load_uA = 13200;
- ret = devm_regulator_bulk_get(ctx->panel.dev, ARRAY_SIZE(ctx->supplies),
- ctx->supplies);
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0)
return ret;
- ctx->reset_gpio = devm_gpiod_get(ctx->panel.dev,
- "reset", GPIOD_OUT_LOW);
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
dev_err(dev, "cannot get reset gpio %ld\n", PTR_ERR(ctx->reset_gpio));
return PTR_ERR(ctx->reset_gpio);
@@ -215,8 +212,6 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
drm_panel_init(&ctx->panel, dev, &visionox_rm69299_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &visionox_rm69299_drm_funcs;
drm_panel_add(&ctx->panel);
dsi->lanes = 4;
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 2d30da38c2c3..3385fd3ef41a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -38,7 +38,7 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
return PTR_ERR(opp);
dev_pm_opp_put(opp);
- err = dev_pm_opp_set_rate(dev, *freq);
+ err = dev_pm_opp_set_rate(dev, *freq);
if (!err)
ptdev->pfdevfreq.current_frequency = *freq;
@@ -182,6 +182,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
* if any and will avoid a switch off by regulator_late_cleanup()
*/
ret = dev_pm_opp_set_opp(dev, opp);
+ dev_pm_opp_put(opp);
if (ret) {
DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
return ret;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 671eed4ad890..0f3935556ac7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -3,6 +3,10 @@
/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
/* Copyright 2019 Collabora ltd. */
+#ifdef CONFIG_ARM_ARCH_TIMER
+#include <asm/arch_timer.h>
+#endif
+
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pagemap.h>
@@ -21,13 +25,33 @@
#include "panfrost_gpu.h"
#include "panfrost_perfcnt.h"
+#define JOB_REQUIREMENTS (PANFROST_JD_REQ_FS | PANFROST_JD_REQ_CYCLE_COUNT)
+
static bool unstable_ioctls;
module_param_unsafe(unstable_ioctls, bool, 0600);
+static int panfrost_ioctl_query_timestamp(struct panfrost_device *pfdev,
+ u64 *arg)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(pfdev->dev);
+ if (ret)
+ return ret;
+
+ panfrost_cycle_counter_get(pfdev);
+ *arg = panfrost_timestamp_read(pfdev);
+ panfrost_cycle_counter_put(pfdev);
+
+ pm_runtime_put(pfdev->dev);
+ return 0;
+}
+
static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file)
{
struct drm_panfrost_get_param *param = data;
struct panfrost_device *pfdev = ddev->dev_private;
+ int ret;
if (param->pad != 0)
return -EINVAL;
@@ -69,6 +93,21 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc);
+
+ case DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP:
+ ret = panfrost_ioctl_query_timestamp(pfdev, &param->value);
+ if (ret)
+ return ret;
+ break;
+
+ case DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP_FREQUENCY:
+#ifdef CONFIG_ARM_ARCH_TIMER
+ param->value = arch_timer_get_cntfrq();
+#else
+ param->value = 0;
+#endif
+ break;
+
default:
return -EINVAL;
}
@@ -245,7 +284,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
if (!args->jc)
return -EINVAL;
- if (args->requirements && args->requirements != PANFROST_JD_REQ_FS)
+ if (args->requirements & ~JOB_REQUIREMENTS)
return -EINVAL;
if (args->out_sync > 0) {
@@ -584,6 +623,8 @@ static const struct file_operations panfrost_drm_driver_fops = {
* - 1.0 - initial interface
* - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
* - 1.2 - adds AFBC_FEATURES query
+ * - 1.3 - adds JD_REQ_CYCLE_COUNT job requirement for SUBMIT
+ * - adds SYSTEM_TIMESTAMP and SYSTEM_TIMESTAMP_FREQUENCY queries
*/
static const struct drm_driver panfrost_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
@@ -595,9 +636,8 @@ static const struct drm_driver panfrost_drm_driver = {
.fops = &panfrost_drm_driver_fops,
.name = "panfrost",
.desc = "panfrost DRM",
- .date = "20180908",
.major = 1,
- .minor = 2,
+ .minor = 3,
.gem_create_object = panfrost_gem_create_object,
.gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
@@ -825,7 +865,7 @@ MODULE_DEVICE_TABLE(of, dt_match);
static struct platform_driver panfrost_driver = {
.probe = panfrost_probe,
- .remove_new = panfrost_remove,
+ .remove = panfrost_remove,
.driver = {
.name = "panfrost",
.pm = pm_ptr(&panfrost_pm_ops),
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index fd8e44992184..174e190ba40f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -177,7 +177,6 @@ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
struct panfrost_model {
const char *name;
u32 id;
- u32 id_mask;
u64 features;
u64 issues;
struct {
@@ -237,6 +236,10 @@ static const struct panfrost_model gpu_models[] = {
*/
GPU_MODEL(g57, 0x9003,
GPU_REV(g57, 0, 0)),
+
+ /* MediaTek MT8188 Mali-G57 MC3 */
+ GPU_MODEL(g57, 0x9093,
+ GPU_REV(g57, 0, 0)),
};
static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
@@ -380,6 +383,18 @@ unsigned long long panfrost_cycle_counter_read(struct panfrost_device *pfdev)
return ((u64)hi << 32) | lo;
}
+unsigned long long panfrost_timestamp_read(struct panfrost_device *pfdev)
+{
+ u32 hi, lo;
+
+ do {
+ hi = gpu_read(pfdev, GPU_TIMESTAMP_HI);
+ lo = gpu_read(pfdev, GPU_TIMESTAMP_LO);
+ } while (hi != gpu_read(pfdev, GPU_TIMESTAMP_HI));
+
+ return ((u64)hi << 32) | lo;
+}
+
static u64 panfrost_get_core_mask(struct panfrost_device *pfdev)
{
u64 core_mask;
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h
index d841b86504ea..b4fef11211d5 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h
@@ -20,6 +20,7 @@ void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev);
void panfrost_cycle_counter_get(struct panfrost_device *pfdev);
void panfrost_cycle_counter_put(struct panfrost_device *pfdev);
unsigned long long panfrost_cycle_counter_read(struct panfrost_device *pfdev);
+unsigned long long panfrost_timestamp_read(struct panfrost_device *pfdev);
void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index df49d37d0e7e..9b8e82fb8bc4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -159,16 +159,17 @@ panfrost_dequeue_job(struct panfrost_device *pfdev, int slot)
struct panfrost_job *job = pfdev->jobs[slot][0];
WARN_ON(!job);
- if (job->is_profiled) {
- if (job->engine_usage) {
- job->engine_usage->elapsed_ns[slot] +=
- ktime_to_ns(ktime_sub(ktime_get(), job->start_time));
- job->engine_usage->cycles[slot] +=
- panfrost_cycle_counter_read(pfdev) - job->start_cycles;
- }
- panfrost_cycle_counter_put(job->pfdev);
+
+ if (job->is_profiled && job->engine_usage) {
+ job->engine_usage->elapsed_ns[slot] +=
+ ktime_to_ns(ktime_sub(ktime_get(), job->start_time));
+ job->engine_usage->cycles[slot] +=
+ panfrost_cycle_counter_read(pfdev) - job->start_cycles;
}
+ if (job->requirements & PANFROST_JD_REQ_CYCLE_COUNT || job->is_profiled)
+ panfrost_cycle_counter_put(pfdev);
+
pfdev->jobs[slot][0] = pfdev->jobs[slot][1];
pfdev->jobs[slot][1] = NULL;
@@ -243,9 +244,13 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
subslot = panfrost_enqueue_job(pfdev, js, job);
/* Don't queue the job if a reset is in progress */
if (!atomic_read(&pfdev->reset.pending)) {
- if (pfdev->profile_mode) {
+ job->is_profiled = pfdev->profile_mode;
+
+ if (job->requirements & PANFROST_JD_REQ_CYCLE_COUNT ||
+ job->is_profiled)
panfrost_cycle_counter_get(pfdev);
- job->is_profiled = true;
+
+ if (job->is_profiled) {
job->start_time = ktime_get();
job->start_cycles = panfrost_cycle_counter_read(pfdev);
}
@@ -693,7 +698,8 @@ panfrost_reset(struct panfrost_device *pfdev,
spin_lock(&pfdev->js->job_lock);
for (i = 0; i < NUM_JOB_SLOTS; i++) {
for (j = 0; j < ARRAY_SIZE(pfdev->jobs[0]) && pfdev->jobs[i][j]; j++) {
- if (pfdev->jobs[i][j]->is_profiled)
+ if (pfdev->jobs[i][j]->requirements & PANFROST_JD_REQ_CYCLE_COUNT ||
+ pfdev->jobs[i][j]->is_profiled)
panfrost_cycle_counter_put(pfdev->jobs[i][j]->pfdev);
pm_runtime_put_noidle(pfdev->dev);
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
@@ -727,7 +733,7 @@ panfrost_reset(struct panfrost_device *pfdev,
/* Restart the schedulers */
for (i = 0; i < NUM_JOB_SLOTS; i++)
- drm_sched_start(&pfdev->js->queue[i].sched);
+ drm_sched_start(&pfdev->js->queue[i].sched, 0);
/* Re-enable job interrupts now that everything has been restarted. */
job_write(pfdev, JOB_INT_MASK,
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index c25743b05c55..c7bba476ab3f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -78,6 +78,8 @@
#define GPU_CYCLE_COUNT_LO 0x90
#define GPU_CYCLE_COUNT_HI 0x94
+#define GPU_TIMESTAMP_LO 0x98
+#define GPU_TIMESTAMP_HI 0x9C
#define GPU_THREAD_MAX_THREADS 0x0A0 /* (RO) Maximum number of threads per core */
#define GPU_THREAD_MAX_WORKGROUP_SIZE 0x0A4 /* (RO) Maximum workgroup size */
diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.c b/drivers/gpu/drm/panthor/panthor_devfreq.c
index c6d3c327cc24..3686515d368d 100644
--- a/drivers/gpu/drm/panthor/panthor_devfreq.c
+++ b/drivers/gpu/drm/panthor/panthor_devfreq.c
@@ -62,14 +62,20 @@ static void panthor_devfreq_update_utilization(struct panthor_devfreq *pdevfreq)
static int panthor_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
+ struct panthor_device *ptdev = dev_get_drvdata(dev);
struct dev_pm_opp *opp;
+ int err;
opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp))
return PTR_ERR(opp);
dev_pm_opp_put(opp);
- return dev_pm_opp_set_rate(dev, *freq);
+ err = dev_pm_opp_set_rate(dev, *freq);
+ if (!err)
+ ptdev->current_frequency = *freq;
+
+ return err;
}
static void panthor_devfreq_reset(struct panthor_devfreq *pdevfreq)
@@ -130,6 +136,7 @@ int panthor_devfreq_init(struct panthor_device *ptdev)
struct panthor_devfreq *pdevfreq;
struct dev_pm_opp *opp;
unsigned long cur_freq;
+ unsigned long freq = ULONG_MAX;
int ret;
pdevfreq = drmm_kzalloc(&ptdev->base, sizeof(*ptdev->devfreq), GFP_KERNEL);
@@ -156,12 +163,6 @@ int panthor_devfreq_init(struct panthor_device *ptdev)
cur_freq = clk_get_rate(ptdev->clks.core);
- opp = devfreq_recommended_opp(dev, &cur_freq, 0);
- if (IS_ERR(opp))
- return PTR_ERR(opp);
-
- panthor_devfreq_profile.initial_freq = cur_freq;
-
/* Regulator coupling only takes care of synchronizing/balancing voltage
* updates, but the coupled regulator needs to be enabled manually.
*
@@ -192,16 +193,30 @@ int panthor_devfreq_init(struct panthor_device *ptdev)
return ret;
}
+ opp = devfreq_recommended_opp(dev, &cur_freq, 0);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ panthor_devfreq_profile.initial_freq = cur_freq;
+ ptdev->current_frequency = cur_freq;
+
/*
* Set the recommend OPP this will enable and configure the regulator
* if any and will avoid a switch off by regulator_late_cleanup()
*/
ret = dev_pm_opp_set_opp(dev, opp);
+ dev_pm_opp_put(opp);
if (ret) {
DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
return ret;
}
+ /* Find the fastest defined rate */
+ opp = dev_pm_opp_find_freq_floor(dev, &freq);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+ ptdev->fast_rate = freq;
+
dev_pm_opp_put(opp);
/*
@@ -228,26 +243,26 @@ int panthor_devfreq_init(struct panthor_device *ptdev)
return 0;
}
-int panthor_devfreq_resume(struct panthor_device *ptdev)
+void panthor_devfreq_resume(struct panthor_device *ptdev)
{
struct panthor_devfreq *pdevfreq = ptdev->devfreq;
if (!pdevfreq->devfreq)
- return 0;
+ return;
panthor_devfreq_reset(pdevfreq);
- return devfreq_resume_device(pdevfreq->devfreq);
+ drm_WARN_ON(&ptdev->base, devfreq_resume_device(pdevfreq->devfreq));
}
-int panthor_devfreq_suspend(struct panthor_device *ptdev)
+void panthor_devfreq_suspend(struct panthor_device *ptdev)
{
struct panthor_devfreq *pdevfreq = ptdev->devfreq;
if (!pdevfreq->devfreq)
- return 0;
+ return;
- return devfreq_suspend_device(pdevfreq->devfreq);
+ drm_WARN_ON(&ptdev->base, devfreq_suspend_device(pdevfreq->devfreq));
}
void panthor_devfreq_record_busy(struct panthor_device *ptdev)
diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.h b/drivers/gpu/drm/panthor/panthor_devfreq.h
index 83a5c9522493..b7631de695f7 100644
--- a/drivers/gpu/drm/panthor/panthor_devfreq.h
+++ b/drivers/gpu/drm/panthor/panthor_devfreq.h
@@ -12,8 +12,8 @@ struct panthor_devfreq;
int panthor_devfreq_init(struct panthor_device *ptdev);
-int panthor_devfreq_resume(struct panthor_device *ptdev);
-int panthor_devfreq_suspend(struct panthor_device *ptdev);
+void panthor_devfreq_resume(struct panthor_device *ptdev);
+void panthor_devfreq_suspend(struct panthor_device *ptdev);
void panthor_devfreq_record_busy(struct panthor_device *ptdev);
void panthor_devfreq_record_idle(struct panthor_device *ptdev);
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
index 4082c8f2951d..0a37cfeeb181 100644
--- a/drivers/gpu/drm/panthor/panthor_device.c
+++ b/drivers/gpu/drm/panthor/panthor_device.c
@@ -22,6 +22,24 @@
#include "panthor_regs.h"
#include "panthor_sched.h"
+static int panthor_gpu_coherency_init(struct panthor_device *ptdev)
+{
+ ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
+
+ if (!ptdev->coherent)
+ return 0;
+
+ /* Check if the ACE-Lite coherency protocol is actually supported by the GPU.
+ * ACE protocol has never been supported for command stream frontend GPUs.
+ */
+ if ((gpu_read(ptdev, GPU_COHERENCY_FEATURES) &
+ GPU_COHERENCY_PROT_BIT(ACE_LITE)))
+ return 0;
+
+ drm_err(&ptdev->base, "Coherency not supported by the device");
+ return -ENOTSUPP;
+}
+
static int panthor_clk_init(struct panthor_device *ptdev)
{
ptdev->clks.core = devm_clk_get(ptdev->base.dev, NULL);
@@ -156,7 +174,9 @@ int panthor_device_init(struct panthor_device *ptdev)
struct page *p;
int ret;
- ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
+ ret = panthor_gpu_coherency_init(ptdev);
+ if (ret)
+ return ret;
init_completion(&ptdev->unplug.done);
ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
@@ -390,11 +410,15 @@ int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *
{
u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+
switch (offset) {
case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
if (vma->vm_end - vma->vm_start != PAGE_SIZE ||
(vma->vm_flags & (VM_WRITE | VM_EXEC)))
return -EINVAL;
+ vm_flags_clear(vma, VM_MAYWRITE);
break;
@@ -411,6 +435,22 @@ int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *
return 0;
}
+static int panthor_device_resume_hw_components(struct panthor_device *ptdev)
+{
+ int ret;
+
+ panthor_gpu_resume(ptdev);
+ panthor_mmu_resume(ptdev);
+
+ ret = panthor_fw_resume(ptdev);
+ if (!ret)
+ return 0;
+
+ panthor_mmu_suspend(ptdev);
+ panthor_gpu_suspend(ptdev);
+ return ret;
+}
+
int panthor_device_resume(struct device *dev)
{
struct panthor_device *ptdev = dev_get_drvdata(dev);
@@ -433,22 +473,20 @@ int panthor_device_resume(struct device *dev)
if (ret)
goto err_disable_stacks_clk;
- ret = panthor_devfreq_resume(ptdev);
- if (ret)
- goto err_disable_coregroup_clk;
+ panthor_devfreq_resume(ptdev);
if (panthor_device_is_initialized(ptdev) &&
drm_dev_enter(&ptdev->base, &cookie)) {
- panthor_gpu_resume(ptdev);
- panthor_mmu_resume(ptdev);
- ret = drm_WARN_ON(&ptdev->base, panthor_fw_resume(ptdev));
- if (!ret) {
- panthor_sched_resume(ptdev);
- } else {
- panthor_mmu_suspend(ptdev);
- panthor_gpu_suspend(ptdev);
+ ret = panthor_device_resume_hw_components(ptdev);
+ if (ret && ptdev->reset.fast) {
+ drm_err(&ptdev->base, "Fast reset failed, trying a slow reset");
+ ptdev->reset.fast = false;
+ ret = panthor_device_resume_hw_components(ptdev);
}
+ if (!ret)
+ panthor_sched_resume(ptdev);
+
drm_dev_exit(cookie);
if (ret)
@@ -472,8 +510,6 @@ int panthor_device_resume(struct device *dev)
err_suspend_devfreq:
panthor_devfreq_suspend(ptdev);
-
-err_disable_coregroup_clk:
clk_disable_unprepare(ptdev->clks.coregroup);
err_disable_stacks_clk:
@@ -484,13 +520,14 @@ err_disable_core_clk:
err_set_suspended:
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
+ atomic_set(&ptdev->pm.recovery_needed, 1);
return ret;
}
int panthor_device_suspend(struct device *dev)
{
struct panthor_device *ptdev = dev_get_drvdata(dev);
- int ret, cookie;
+ int cookie;
if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
return -EINVAL;
@@ -522,36 +559,11 @@ int panthor_device_suspend(struct device *dev)
drm_dev_exit(cookie);
}
- ret = panthor_devfreq_suspend(ptdev);
- if (ret) {
- if (panthor_device_is_initialized(ptdev) &&
- drm_dev_enter(&ptdev->base, &cookie)) {
- panthor_gpu_resume(ptdev);
- panthor_mmu_resume(ptdev);
- drm_WARN_ON(&ptdev->base, panthor_fw_resume(ptdev));
- panthor_sched_resume(ptdev);
- drm_dev_exit(cookie);
- }
-
- goto err_set_active;
- }
+ panthor_devfreq_suspend(ptdev);
clk_disable_unprepare(ptdev->clks.coregroup);
clk_disable_unprepare(ptdev->clks.stacks);
clk_disable_unprepare(ptdev->clks.core);
atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
return 0;
-
-err_set_active:
- /* If something failed and we have to revert back to an
- * active state, we also need to clear the MMIO userspace
- * mappings, so any dumb pages that were mapped while we
- * were trying to suspend gets invalidated.
- */
- mutex_lock(&ptdev->pm.mmio_lock);
- atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
- unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
- DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
- mutex_unlock(&ptdev->pm.mmio_lock);
- return ret;
}
diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h
index e388c0472ba7..da6574021664 100644
--- a/drivers/gpu/drm/panthor/panthor_device.h
+++ b/drivers/gpu/drm/panthor/panthor_device.h
@@ -9,6 +9,7 @@
#include <linux/atomic.h>
#include <linux/io-pgtable.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
@@ -67,6 +68,25 @@ struct panthor_irq {
};
/**
+ * enum panthor_device_profiling_mode - Profiling state
+ */
+enum panthor_device_profiling_flags {
+ /** @PANTHOR_DEVICE_PROFILING_DISABLED: Profiling is disabled. */
+ PANTHOR_DEVICE_PROFILING_DISABLED = 0,
+
+ /** @PANTHOR_DEVICE_PROFILING_CYCLES: Sampling job cycles. */
+ PANTHOR_DEVICE_PROFILING_CYCLES = BIT(0),
+
+ /** @PANTHOR_DEVICE_PROFILING_TIMESTAMP: Sampling job timestamp. */
+ PANTHOR_DEVICE_PROFILING_TIMESTAMP = BIT(1),
+
+ /** @PANTHOR_DEVICE_PROFILING_ALL: Sampling everything. */
+ PANTHOR_DEVICE_PROFILING_ALL =
+ PANTHOR_DEVICE_PROFILING_CYCLES |
+ PANTHOR_DEVICE_PROFILING_TIMESTAMP,
+};
+
+/**
* struct panthor_device - Panthor device
*/
struct panthor_device {
@@ -137,6 +157,17 @@ struct panthor_device {
/** @pending: Set to true if a reset is pending. */
atomic_t pending;
+
+ /**
+ * @fast: True if the post_reset logic can proceed with a fast reset.
+ *
+ * A fast reset is just a reset where the driver doesn't reload the FW sections.
+ *
+ * Any time the firmware is properly suspended, a fast reset can take place.
+ * On the other hand, if the halt operation failed, the driver will reload
+ * all FW sections to make sure we start from a fresh state.
+ */
+ bool fast;
} reset;
/** @pm: Power management related data. */
@@ -161,7 +192,24 @@ struct panthor_device {
* is suspended.
*/
struct page *dummy_latest_flush;
+
+ /** @recovery_needed: True when a resume attempt failed. */
+ atomic_t recovery_needed;
} pm;
+
+ /** @profile_mask: User-set profiling flags for job accounting. */
+ u32 profile_mask;
+
+ /** @current_frequency: Device clock frequency at present. Set by DVFS*/
+ unsigned long current_frequency;
+
+ /** @fast_rate: Maximum device clock frequency. Set by DVFS */
+ unsigned long fast_rate;
+};
+
+struct panthor_gpu_usage {
+ u64 time;
+ u64 cycles;
};
/**
@@ -176,6 +224,9 @@ struct panthor_file {
/** @groups: Scheduling group pool attached to this file. */
struct panthor_group_pool *groups;
+
+ /** @stats: cycle and timestamp measures for job execution. */
+ struct panthor_gpu_usage stats;
};
int panthor_device_init(struct panthor_device *ptdev);
@@ -207,6 +258,28 @@ int panthor_device_mmap_io(struct panthor_device *ptdev,
int panthor_device_resume(struct device *dev);
int panthor_device_suspend(struct device *dev);
+static inline int panthor_device_resume_and_get(struct panthor_device *ptdev)
+{
+ int ret = pm_runtime_resume_and_get(ptdev->base.dev);
+
+ /* If the resume failed, we need to clear the runtime_error, which
+ * can done by forcing the RPM state to suspended. If multiple
+ * threads called panthor_device_resume_and_get(), we only want
+ * one of them to update the state, hence the cmpxchg. Note that a
+ * thread might enter panthor_device_resume_and_get() and call
+ * pm_runtime_resume_and_get() after another thread had attempted
+ * to resume and failed. This means we will end up with an error
+ * without even attempting a resume ourselves. The only risk here
+ * is to report an error when the second resume attempt might have
+ * succeeded. Given resume errors are not expected, this is probably
+ * something we can live with.
+ */
+ if (ret && atomic_cmpxchg(&ptdev->pm.recovery_needed, 1, 0) == 1)
+ pm_runtime_set_suspended(ptdev->base.dev);
+
+ return ret;
+}
+
enum drm_panthor_exception_type {
DRM_PANTHOR_EXCEPTION_OK = 0x00,
DRM_PANTHOR_EXCEPTION_TERMINATED = 0x04,
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index c520f156e2d7..d5dcd3d1b33a 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -3,12 +3,17 @@
/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
/* Copyright 2019 Collabora ltd. */
+#ifdef CONFIG_ARM_ARCH_TIMER
+#include <asm/arch_timer.h>
+#endif
+
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/time64.h>
#include <drm/drm_auth.h>
#include <drm/drm_debugfs.h>
@@ -165,6 +170,8 @@ panthor_get_uobj_array(const struct drm_panthor_obj_array *in, u32 min_stride,
_Generic(_obj_name, \
PANTHOR_UOBJ_DECL(struct drm_panthor_gpu_info, tiler_present), \
PANTHOR_UOBJ_DECL(struct drm_panthor_csif_info, pad), \
+ PANTHOR_UOBJ_DECL(struct drm_panthor_timestamp_info, current_timestamp), \
+ PANTHOR_UOBJ_DECL(struct drm_panthor_group_priorities_info, pad), \
PANTHOR_UOBJ_DECL(struct drm_panthor_sync_op, timeline_value), \
PANTHOR_UOBJ_DECL(struct drm_panthor_queue_submit, syncs), \
PANTHOR_UOBJ_DECL(struct drm_panthor_queue_create, ringbuf_size), \
@@ -751,10 +758,63 @@ static void panthor_submit_ctx_cleanup(struct panthor_submit_ctx *ctx,
kvfree(ctx->jobs);
}
+static int panthor_query_timestamp_info(struct panthor_device *ptdev,
+ struct drm_panthor_timestamp_info *arg)
+{
+ int ret;
+
+ ret = panthor_device_resume_and_get(ptdev);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_ARM_ARCH_TIMER
+ arg->timestamp_frequency = arch_timer_get_cntfrq();
+#else
+ arg->timestamp_frequency = 0;
+#endif
+ arg->current_timestamp = panthor_gpu_read_timestamp(ptdev);
+ arg->timestamp_offset = panthor_gpu_read_timestamp_offset(ptdev);
+
+ pm_runtime_put(ptdev->base.dev);
+ return 0;
+}
+
+static int group_priority_permit(struct drm_file *file,
+ u8 priority)
+{
+ /* Ensure that priority is valid */
+ if (priority > PANTHOR_GROUP_PRIORITY_REALTIME)
+ return -EINVAL;
+
+ /* Medium priority and below are always allowed */
+ if (priority <= PANTHOR_GROUP_PRIORITY_MEDIUM)
+ return 0;
+
+ /* Higher priorities require CAP_SYS_NICE or DRM_MASTER */
+ if (capable(CAP_SYS_NICE) || drm_is_current_master(file))
+ return 0;
+
+ return -EACCES;
+}
+
+static void panthor_query_group_priorities_info(struct drm_file *file,
+ struct drm_panthor_group_priorities_info *arg)
+{
+ int prio;
+
+ for (prio = PANTHOR_GROUP_PRIORITY_REALTIME; prio >= 0; prio--) {
+ if (!group_priority_permit(file, prio))
+ arg->allowed_mask |= BIT(prio);
+ }
+}
+
static int panthor_ioctl_dev_query(struct drm_device *ddev, void *data, struct drm_file *file)
{
struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
struct drm_panthor_dev_query *args = data;
+ struct drm_panthor_timestamp_info timestamp_info;
+ struct drm_panthor_group_priorities_info priorities_info;
+ int ret;
if (!args->pointer) {
switch (args->type) {
@@ -766,6 +826,14 @@ static int panthor_ioctl_dev_query(struct drm_device *ddev, void *data, struct d
args->size = sizeof(ptdev->csif_info);
return 0;
+ case DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO:
+ args->size = sizeof(timestamp_info);
+ return 0;
+
+ case DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO:
+ args->size = sizeof(priorities_info);
+ return 0;
+
default:
return -EINVAL;
}
@@ -778,6 +846,18 @@ static int panthor_ioctl_dev_query(struct drm_device *ddev, void *data, struct d
case DRM_PANTHOR_DEV_QUERY_CSIF_INFO:
return PANTHOR_UOBJ_SET(args->pointer, args->size, ptdev->csif_info);
+ case DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO:
+ ret = panthor_query_timestamp_info(ptdev, &timestamp_info);
+
+ if (ret)
+ return ret;
+
+ return PANTHOR_UOBJ_SET(args->pointer, args->size, timestamp_info);
+
+ case DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO:
+ panthor_query_group_priorities_info(file, &priorities_info);
+ return PANTHOR_UOBJ_SET(args->pointer, args->size, priorities_info);
+
default:
return -EINVAL;
}
@@ -997,24 +1077,6 @@ static int panthor_ioctl_group_destroy(struct drm_device *ddev, void *data,
return panthor_group_destroy(pfile, args->group_handle);
}
-static int group_priority_permit(struct drm_file *file,
- u8 priority)
-{
- /* Ensure that priority is valid */
- if (priority > PANTHOR_GROUP_PRIORITY_HIGH)
- return -EINVAL;
-
- /* Medium priority and below are always allowed */
- if (priority <= PANTHOR_GROUP_PRIORITY_MEDIUM)
- return 0;
-
- /* Higher priorities require CAP_SYS_NICE or DRM_MASTER */
- if (capable(CAP_SYS_NICE) || drm_is_current_master(file))
- return 0;
-
- return -EACCES;
-}
-
static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
struct drm_file *file)
{
@@ -1374,6 +1436,37 @@ static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
+static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev,
+ struct panthor_file *pfile,
+ struct drm_printer *p)
+{
+ if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_ALL)
+ panthor_fdinfo_gather_group_samples(pfile);
+
+ if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) {
+#ifdef CONFIG_ARM_ARCH_TIMER
+ drm_printf(p, "drm-engine-panthor:\t%llu ns\n",
+ DIV_ROUND_UP_ULL((pfile->stats.time * NSEC_PER_SEC),
+ arch_timer_get_cntfrq()));
+#endif
+ }
+ if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_CYCLES)
+ drm_printf(p, "drm-cycles-panthor:\t%llu\n", pfile->stats.cycles);
+
+ drm_printf(p, "drm-maxfreq-panthor:\t%lu Hz\n", ptdev->fast_rate);
+ drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency);
+}
+
+static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file)
+{
+ struct drm_device *dev = file->minor->dev;
+ struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
+
+ panthor_gpu_show_fdinfo(ptdev, file->driver_priv, p);
+
+ drm_show_memory_stats(p, file);
+}
+
static const struct file_operations panthor_drm_driver_fops = {
.open = drm_open,
.release = drm_release,
@@ -1383,6 +1476,7 @@ static const struct file_operations panthor_drm_driver_fops = {
.read = drm_read,
.llseek = noop_llseek,
.mmap = panthor_mmap,
+ .show_fdinfo = drm_show_fdinfo,
.fop_flags = FOP_UNSIGNED_OFFSET,
};
@@ -1396,20 +1490,24 @@ static void panthor_debugfs_init(struct drm_minor *minor)
/*
* PanCSF driver version:
* - 1.0 - initial interface
+ * - 1.1 - adds DEV_QUERY_TIMESTAMP_INFO query
+ * - 1.2 - adds DEV_QUERY_GROUP_PRIORITIES_INFO query
+ * - adds PANTHOR_GROUP_PRIORITY_REALTIME priority
+ * - 1.3 - adds DRM_PANTHOR_GROUP_STATE_INNOCENT flag
*/
static const struct drm_driver panthor_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ |
DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA,
.open = panthor_open,
.postclose = panthor_postclose,
+ .show_fdinfo = panthor_show_fdinfo,
.ioctls = panthor_drm_driver_ioctls,
.num_ioctls = ARRAY_SIZE(panthor_drm_driver_ioctls),
.fops = &panthor_drm_driver_fops,
.name = "panthor",
.desc = "Panthor DRM driver",
- .date = "20230801",
.major = 1,
- .minor = 0,
+ .minor = 3,
.gem_create_object = panthor_gem_create_object,
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
@@ -1439,6 +1537,44 @@ static void panthor_remove(struct platform_device *pdev)
panthor_device_unplug(ptdev);
}
+static ssize_t profiling_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct panthor_device *ptdev = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", ptdev->profile_mask);
+}
+
+static ssize_t profiling_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct panthor_device *ptdev = dev_get_drvdata(dev);
+ u32 value;
+ int err;
+
+ err = kstrtou32(buf, 0, &value);
+ if (err)
+ return err;
+
+ if ((value & ~PANTHOR_DEVICE_PROFILING_ALL) != 0)
+ return -EINVAL;
+
+ ptdev->profile_mask = value;
+
+ return len;
+}
+
+static DEVICE_ATTR_RW(profiling);
+
+static struct attribute *panthor_attrs[] = {
+ &dev_attr_profiling.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(panthor);
+
static const struct of_device_id dt_match[] = {
{ .compatible = "rockchip,rk3588-mali" },
{ .compatible = "arm,mali-valhall-csf" },
@@ -1453,11 +1589,12 @@ static DEFINE_RUNTIME_DEV_PM_OPS(panthor_pm_ops,
static struct platform_driver panthor_driver = {
.probe = panthor_probe,
- .remove_new = panthor_remove,
+ .remove = panthor_remove,
.driver = {
.name = "panthor",
.pm = pm_ptr(&panthor_pm_ops),
.of_match_table = dt_match,
+ .dev_groups = panthor_groups,
},
};
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index ef232c0c2049..68eb4fb4d3a8 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -12,6 +12,7 @@
#include <linux/iosys-map.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
@@ -78,6 +79,12 @@ enum panthor_fw_binary_entry_type {
/** @CSF_FW_BINARY_ENTRY_TYPE_TIMELINE_METADATA: Timeline metadata interface. */
CSF_FW_BINARY_ENTRY_TYPE_TIMELINE_METADATA = 4,
+
+ /**
+ * @CSF_FW_BINARY_ENTRY_TYPE_BUILD_INFO_METADATA: Metadata about how
+ * the FW binary was built.
+ */
+ CSF_FW_BINARY_ENTRY_TYPE_BUILD_INFO_METADATA = 6
};
#define CSF_FW_BINARY_ENTRY_TYPE(ehdr) ((ehdr) & 0xff)
@@ -85,26 +92,26 @@ enum panthor_fw_binary_entry_type {
#define CSF_FW_BINARY_ENTRY_UPDATE BIT(30)
#define CSF_FW_BINARY_ENTRY_OPTIONAL BIT(31)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_RD BIT(0)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_WR BIT(1)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_EX BIT(2)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_NONE (0 << 3)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED (1 << 3)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_UNCACHED_COHERENT (2 << 3)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED_COHERENT (3 << 3)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK GENMASK(4, 3)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_PROT BIT(5)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED BIT(30)
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO BIT(31)
-
-#define CSF_FW_BINARY_IFACE_ENTRY_RD_SUPPORTED_FLAGS \
- (CSF_FW_BINARY_IFACE_ENTRY_RD_RD | \
- CSF_FW_BINARY_IFACE_ENTRY_RD_WR | \
- CSF_FW_BINARY_IFACE_ENTRY_RD_EX | \
- CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK | \
- CSF_FW_BINARY_IFACE_ENTRY_RD_PROT | \
- CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED | \
- CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO)
+#define CSF_FW_BINARY_IFACE_ENTRY_RD BIT(0)
+#define CSF_FW_BINARY_IFACE_ENTRY_WR BIT(1)
+#define CSF_FW_BINARY_IFACE_ENTRY_EX BIT(2)
+#define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_NONE (0 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_CACHED (1 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_UNCACHED_COHERENT (2 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_CACHED_COHERENT (3 << 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_MASK GENMASK(4, 3)
+#define CSF_FW_BINARY_IFACE_ENTRY_PROT BIT(5)
+#define CSF_FW_BINARY_IFACE_ENTRY_SHARED BIT(30)
+#define CSF_FW_BINARY_IFACE_ENTRY_ZERO BIT(31)
+
+#define CSF_FW_BINARY_IFACE_ENTRY_SUPPORTED_FLAGS \
+ (CSF_FW_BINARY_IFACE_ENTRY_RD | \
+ CSF_FW_BINARY_IFACE_ENTRY_WR | \
+ CSF_FW_BINARY_IFACE_ENTRY_EX | \
+ CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_MASK | \
+ CSF_FW_BINARY_IFACE_ENTRY_PROT | \
+ CSF_FW_BINARY_IFACE_ENTRY_SHARED | \
+ CSF_FW_BINARY_IFACE_ENTRY_ZERO)
/**
* struct panthor_fw_binary_section_entry_hdr - Describes a section of FW binary
@@ -132,6 +139,13 @@ struct panthor_fw_binary_section_entry_hdr {
} data;
};
+struct panthor_fw_build_info_hdr {
+ /** @meta_start: Offset of the build info data in the FW binary */
+ u32 meta_start;
+ /** @meta_size: Size of the build info data in the FW binary */
+ u32 meta_size;
+};
+
/**
* struct panthor_fw_binary_iter - Firmware binary iterator
*
@@ -249,17 +263,6 @@ struct panthor_fw {
/** @booted: True is the FW is booted */
bool booted;
- /**
- * @fast_reset: True if the post_reset logic can proceed with a fast reset.
- *
- * A fast reset is just a reset where the driver doesn't reload the FW sections.
- *
- * Any time the firmware is properly suspended, a fast reset can take place.
- * On the other hand, if the halt operation failed, the driver will reload
- * all sections to make sure we start from a fresh state.
- */
- bool fast_reset;
-
/** @irq: Job irq data. */
struct panthor_irq irq;
};
@@ -400,7 +403,7 @@ static void panthor_fw_init_section_mem(struct panthor_device *ptdev,
int ret;
if (!section->data.size &&
- !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO))
+ !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_ZERO))
return;
ret = panthor_kernel_bo_vmap(section->mem);
@@ -408,7 +411,7 @@ static void panthor_fw_init_section_mem(struct panthor_device *ptdev,
return;
memcpy(section->mem->kmap, section->data.buf, section->data.size);
- if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_ZERO) {
+ if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_ZERO) {
memset(section->mem->kmap + section->data.size, 0,
panthor_kernel_bo_size(section->mem) - section->data.size);
}
@@ -487,6 +490,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
struct panthor_fw_binary_iter *iter,
u32 ehdr)
{
+ ssize_t vm_pgsz = panthor_vm_page_size(ptdev->fw->vm);
struct panthor_fw_binary_section_entry_hdr hdr;
struct panthor_fw_section *section;
u32 section_size;
@@ -515,27 +519,26 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
return -EINVAL;
}
- if ((hdr.va.start & ~PAGE_MASK) != 0 ||
- (hdr.va.end & ~PAGE_MASK) != 0) {
+ if (!IS_ALIGNED(hdr.va.start, vm_pgsz) || !IS_ALIGNED(hdr.va.end, vm_pgsz)) {
drm_err(&ptdev->base, "Firmware corrupted, virtual addresses not page aligned: 0x%x-0x%x\n",
hdr.va.start, hdr.va.end);
return -EINVAL;
}
- if (hdr.flags & ~CSF_FW_BINARY_IFACE_ENTRY_RD_SUPPORTED_FLAGS) {
+ if (hdr.flags & ~CSF_FW_BINARY_IFACE_ENTRY_SUPPORTED_FLAGS) {
drm_err(&ptdev->base, "Firmware contains interface with unsupported flags (0x%x)\n",
hdr.flags);
return -EINVAL;
}
- if (hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_PROT) {
+ if (hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_PROT) {
drm_warn(&ptdev->base,
"Firmware protected mode entry not be supported, ignoring");
return 0;
}
if (hdr.va.start == CSF_MCU_SHARED_REGION_START &&
- !(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED)) {
+ !(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_SHARED)) {
drm_err(&ptdev->base,
"Interface at 0x%llx must be shared", CSF_MCU_SHARED_REGION_START);
return -EINVAL;
@@ -574,26 +577,26 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
section_size = hdr.va.end - hdr.va.start;
if (section_size) {
- u32 cache_mode = hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK;
+ u32 cache_mode = hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_MASK;
struct panthor_gem_object *bo;
u32 vm_map_flags = 0;
struct sg_table *sgt;
u64 va = hdr.va.start;
- if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_WR))
+ if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_WR))
vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_READONLY;
- if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_EX))
+ if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_EX))
vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC;
- /* TODO: CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_*_COHERENT are mapped to
+ /* TODO: CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_*_COHERENT are mapped to
* non-cacheable for now. We might want to introduce a new
* IOMMU_xxx flag (or abuse IOMMU_MMIO, which maps to device
* memory and is currently not used by our driver) for
* AS_MEMATTR_AARCH64_SHARED memory, so we can take benefit
* of IO-coherent systems.
*/
- if (cache_mode != CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED)
+ if (cache_mode != CSF_FW_BINARY_IFACE_ENTRY_CACHE_MODE_CACHED)
vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED;
section->mem = panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev),
@@ -606,7 +609,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
if (drm_WARN_ON(&ptdev->base, section->mem->va_node.start != hdr.va.start))
return -EINVAL;
- if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED) {
+ if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_SHARED) {
ret = panthor_kernel_bo_vmap(section->mem);
if (ret)
return ret;
@@ -628,6 +631,46 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
return 0;
}
+static int panthor_fw_read_build_info(struct panthor_device *ptdev,
+ const struct firmware *fw,
+ struct panthor_fw_binary_iter *iter,
+ u32 ehdr)
+{
+ struct panthor_fw_build_info_hdr hdr;
+ char header[9];
+ const char git_sha_header[sizeof(header)] = "git_sha: ";
+ int ret;
+
+ ret = panthor_fw_binary_iter_read(ptdev, iter, &hdr, sizeof(hdr));
+ if (ret)
+ return ret;
+
+ if (hdr.meta_start > fw->size ||
+ hdr.meta_start + hdr.meta_size > fw->size) {
+ drm_err(&ptdev->base, "Firmware build info corrupt\n");
+ /* We don't need the build info, so continue */
+ return 0;
+ }
+
+ if (memcmp(git_sha_header, fw->data + hdr.meta_start,
+ sizeof(git_sha_header))) {
+ /* Not the expected header, this isn't metadata we understand */
+ return 0;
+ }
+
+ /* Check that the git SHA is NULL terminated as expected */
+ if (fw->data[hdr.meta_start + hdr.meta_size - 1] != '\0') {
+ drm_warn(&ptdev->base, "Firmware's git sha is not NULL terminated\n");
+ /* Don't treat as fatal */
+ return 0;
+ }
+
+ drm_info(&ptdev->base, "Firmware git sha: %s\n",
+ fw->data + hdr.meta_start + sizeof(git_sha_header));
+
+ return 0;
+}
+
static void
panthor_reload_fw_sections(struct panthor_device *ptdev, bool full_reload)
{
@@ -636,7 +679,7 @@ panthor_reload_fw_sections(struct panthor_device *ptdev, bool full_reload)
list_for_each_entry(section, &ptdev->fw->sections, node) {
struct sg_table *sgt;
- if (!full_reload && !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_WR))
+ if (!full_reload && !(section->flags & CSF_FW_BINARY_IFACE_ENTRY_WR))
continue;
panthor_fw_init_section_mem(ptdev, section);
@@ -672,6 +715,8 @@ static int panthor_fw_load_entry(struct panthor_device *ptdev,
switch (CSF_FW_BINARY_ENTRY_TYPE(ehdr)) {
case CSF_FW_BINARY_ENTRY_TYPE_IFACE:
return panthor_fw_load_section_entry(ptdev, fw, &eiter, ehdr);
+ case CSF_FW_BINARY_ENTRY_TYPE_BUILD_INFO_METADATA:
+ return panthor_fw_read_build_info(ptdev, fw, &eiter, ehdr);
/* FIXME: handle those entry types? */
case CSF_FW_BINARY_ENTRY_TYPE_CONFIG:
@@ -921,7 +966,7 @@ static int panthor_fw_init_ifaces(struct panthor_device *ptdev)
return ret;
}
- drm_info(&ptdev->base, "CSF FW v%d.%d.%d, Features %#x Instrumentation features %#x",
+ drm_info(&ptdev->base, "CSF FW using interface v%d.%d.%d, Features %#x Instrumentation features %#x",
CSF_IFACE_VERSION_MAJOR(glb_iface->control->version),
CSF_IFACE_VERSION_MINOR(glb_iface->control->version),
CSF_IFACE_VERSION_PATCH(glb_iface->control->version),
@@ -1034,7 +1079,7 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang)
/* Make sure we won't be woken up by a ping. */
cancel_delayed_work_sync(&ptdev->fw->watchdog.ping_work);
- ptdev->fw->fast_reset = false;
+ ptdev->reset.fast = false;
if (!on_hang) {
struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
@@ -1043,17 +1088,11 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang)
panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT);
gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
if (!readl_poll_timeout(ptdev->iomem + MCU_STATUS, status,
- status == MCU_STATUS_HALT, 10, 100000) &&
- glb_iface->output->halt_status == PANTHOR_FW_HALT_OK) {
- ptdev->fw->fast_reset = true;
+ status == MCU_STATUS_HALT, 10, 100000)) {
+ ptdev->reset.fast = true;
} else {
drm_warn(&ptdev->base, "Failed to cleanly suspend MCU");
}
-
- /* The FW detects 0 -> 1 transitions. Make sure we reset
- * the HALT bit before the FW is rebooted.
- */
- panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT);
}
panthor_job_irq_suspend(&ptdev->fw->irq);
@@ -1075,41 +1114,30 @@ int panthor_fw_post_reset(struct panthor_device *ptdev)
if (ret)
return ret;
- /* If this is a fast reset, try to start the MCU without reloading
- * the FW sections. If it fails, go for a full reset.
- */
- if (ptdev->fw->fast_reset) {
- ret = panthor_fw_start(ptdev);
- if (!ret)
- goto out;
-
- /* Forcibly reset the MCU and force a slow reset, so we get a
- * fresh boot on the next panthor_fw_start() call.
+ if (!ptdev->reset.fast) {
+ /* On a slow reset, reload all sections, including RO ones.
+ * We're not supposed to end up here anyway, let's just assume
+ * the overhead of reloading everything is acceptable.
*/
- panthor_fw_stop(ptdev);
- ptdev->fw->fast_reset = false;
- drm_err(&ptdev->base, "FW fast reset failed, trying a slow reset");
+ panthor_reload_fw_sections(ptdev, true);
+ } else {
+ /* The FW detects 0 -> 1 transitions. Make sure we reset
+ * the HALT bit before the FW is rebooted.
+ * This is not needed on a slow reset because FW sections are
+ * re-initialized.
+ */
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
- ret = panthor_vm_flush_all(ptdev->fw->vm);
- if (ret) {
- drm_err(&ptdev->base, "FW slow reset failed (couldn't flush FW's AS l2cache)");
- return ret;
- }
+ panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT);
}
- /* Reload all sections, including RO ones. We're not supposed
- * to end up here anyway, let's just assume the overhead of
- * reloading everything is acceptable.
- */
- panthor_reload_fw_sections(ptdev, true);
-
ret = panthor_fw_start(ptdev);
if (ret) {
- drm_err(&ptdev->base, "FW slow reset failed (couldn't start the FW )");
+ drm_err(&ptdev->base, "FW %s reset failed",
+ ptdev->reset.fast ? "fast" : "slow");
return ret;
}
-out:
/* We must re-initialize the global interface even on fast-reset. */
panthor_fw_init_global_iface(ptdev);
return 0;
@@ -1133,11 +1161,13 @@ void panthor_fw_unplug(struct panthor_device *ptdev)
cancel_delayed_work_sync(&ptdev->fw->watchdog.ping_work);
- /* Make sure the IRQ handler can be called after that point. */
- if (ptdev->fw->irq.irq)
- panthor_job_irq_suspend(&ptdev->fw->irq);
+ if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev)) {
+ /* Make sure the IRQ handler cannot be called after that point. */
+ if (ptdev->fw->irq.irq)
+ panthor_job_irq_suspend(&ptdev->fw->irq);
- panthor_fw_stop(ptdev);
+ panthor_fw_stop(ptdev);
+ }
list_for_each_entry(section, &ptdev->fw->sections, node)
panthor_kernel_bo_destroy(section->mem);
@@ -1150,7 +1180,8 @@ void panthor_fw_unplug(struct panthor_device *ptdev)
panthor_vm_put(ptdev->fw->vm);
ptdev->fw->vm = NULL;
- panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
+ if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
+ panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
}
/**
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
index 38f560864879..8244a4e6c2a2 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.c
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -44,8 +44,7 @@ void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo)
to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm)))
goto out_free_bo;
- ret = panthor_vm_unmap_range(vm, bo->va_node.start,
- panthor_kernel_bo_size(bo));
+ ret = panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size);
if (ret)
goto out_free_bo;
@@ -95,10 +94,16 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
}
bo = to_panthor_bo(&obj->base);
- size = obj->base.size;
kbo->obj = &obj->base;
bo->flags = bo_flags;
+ /* The system and GPU MMU page size might differ, which becomes a
+ * problem for FW sections that need to be mapped at explicit address
+ * since our PAGE_SIZE alignment might cover a VA range that's
+ * expected to be used for another section.
+ * Make sure we never map more than we need.
+ */
+ size = ALIGN(size, panthor_vm_page_size(vm));
ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node);
if (ret)
goto err_put_obj;
@@ -145,6 +150,17 @@ panthor_gem_prime_export(struct drm_gem_object *obj, int flags)
return drm_gem_prime_export(obj, flags);
}
+static enum drm_gem_object_status panthor_gem_status(struct drm_gem_object *obj)
+{
+ struct panthor_gem_object *bo = to_panthor_bo(obj);
+ enum drm_gem_object_status res = 0;
+
+ if (bo->base.base.import_attach || bo->base.pages)
+ res |= DRM_GEM_OBJECT_RESIDENT;
+
+ return res;
+}
+
static const struct drm_gem_object_funcs panthor_gem_funcs = {
.free = panthor_gem_free_object,
.print_info = drm_gem_shmem_object_print_info,
@@ -154,6 +170,7 @@ static const struct drm_gem_object_funcs panthor_gem_funcs = {
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = panthor_gem_mmap,
+ .status = panthor_gem_status,
.export = panthor_gem_prime_export,
.vm_ops = &drm_gem_shmem_vm_ops,
};
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c
index 5251d8764e7d..671049020afa 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.c
+++ b/drivers/gpu/drm/panthor/panthor_gpu.c
@@ -77,6 +77,12 @@ static const struct panthor_model gpu_models[] = {
GPU_IRQ_RESET_COMPLETED | \
GPU_IRQ_CLEAN_CACHES_COMPLETED)
+static void panthor_gpu_coherency_set(struct panthor_device *ptdev)
+{
+ gpu_write(ptdev, GPU_COHERENCY_PROTOCOL,
+ ptdev->coherent ? GPU_COHERENCY_PROT_BIT(ACE_LITE) : GPU_COHERENCY_NONE);
+}
+
static void panthor_gpu_init_info(struct panthor_device *ptdev)
{
const struct panthor_model *model;
@@ -174,7 +180,8 @@ void panthor_gpu_unplug(struct panthor_device *ptdev)
unsigned long flags;
/* Make sure the IRQ handler is not running after that point. */
- panthor_gpu_irq_suspend(&ptdev->gpu->irq);
+ if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
+ panthor_gpu_irq_suspend(&ptdev->gpu->irq);
/* Wake-up all waiters. */
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
@@ -365,6 +372,9 @@ int panthor_gpu_l2_power_on(struct panthor_device *ptdev)
hweight64(ptdev->gpu_info.shader_present));
}
+ /* Set the desired coherency mode before the power up of L2 */
+ panthor_gpu_coherency_set(ptdev);
+
return panthor_gpu_power_on(ptdev, L2, 1, 20000);
}
@@ -460,11 +470,12 @@ int panthor_gpu_soft_reset(struct panthor_device *ptdev)
*/
void panthor_gpu_suspend(struct panthor_device *ptdev)
{
- /*
- * It may be preferable to simply power down the L2, but for now just
- * soft-reset which will leave the L2 powered down.
- */
- panthor_gpu_soft_reset(ptdev);
+ /* On a fast reset, simply power down the L2. */
+ if (!ptdev->reset.fast)
+ panthor_gpu_soft_reset(ptdev);
+ else
+ panthor_gpu_power_off(ptdev, L2, 1, 20000);
+
panthor_gpu_irq_suspend(&ptdev->gpu->irq);
}
@@ -480,3 +491,50 @@ void panthor_gpu_resume(struct panthor_device *ptdev)
panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK);
panthor_gpu_l2_power_on(ptdev);
}
+
+/**
+ * panthor_gpu_read_64bit_counter() - Read a 64-bit counter at a given offset.
+ * @ptdev: Device.
+ * @reg: The offset of the register to read.
+ *
+ * Return: The counter value.
+ */
+static u64
+panthor_gpu_read_64bit_counter(struct panthor_device *ptdev, u32 reg)
+{
+ u32 hi, lo;
+
+ do {
+ hi = gpu_read(ptdev, reg + 0x4);
+ lo = gpu_read(ptdev, reg);
+ } while (hi != gpu_read(ptdev, reg + 0x4));
+
+ return ((u64)hi << 32) | lo;
+}
+
+/**
+ * panthor_gpu_read_timestamp() - Read the timestamp register.
+ * @ptdev: Device.
+ *
+ * Return: The GPU timestamp value.
+ */
+u64 panthor_gpu_read_timestamp(struct panthor_device *ptdev)
+{
+ return panthor_gpu_read_64bit_counter(ptdev, GPU_TIMESTAMP_LO);
+}
+
+/**
+ * panthor_gpu_read_timestamp_offset() - Read the timestamp offset register.
+ * @ptdev: Device.
+ *
+ * Return: The GPU timestamp offset value.
+ */
+u64 panthor_gpu_read_timestamp_offset(struct panthor_device *ptdev)
+{
+ u32 hi, lo;
+
+ hi = gpu_read(ptdev, GPU_TIMESTAMP_OFFSET_HI);
+ lo = gpu_read(ptdev, GPU_TIMESTAMP_OFFSET_LO);
+
+ return ((u64)hi << 32) | lo;
+}
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.h b/drivers/gpu/drm/panthor/panthor_gpu.h
index bba7555dd3c6..7f6133a66127 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.h
+++ b/drivers/gpu/drm/panthor/panthor_gpu.h
@@ -5,6 +5,8 @@
#ifndef __PANTHOR_GPU_H__
#define __PANTHOR_GPU_H__
+#include <linux/types.h>
+
struct panthor_device;
int panthor_gpu_init(struct panthor_device *ptdev);
@@ -48,5 +50,7 @@ int panthor_gpu_l2_power_on(struct panthor_device *ptdev);
int panthor_gpu_flush_caches(struct panthor_device *ptdev,
u32 l2, u32 lsc, u32 other);
int panthor_gpu_soft_reset(struct panthor_device *ptdev);
+u64 panthor_gpu_read_timestamp(struct panthor_device *ptdev);
+u64 panthor_gpu_read_timestamp_offset(struct panthor_device *ptdev);
#endif
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 3cd2bce59edc..c39e3eb1c15d 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -826,6 +826,14 @@ void panthor_vm_idle(struct panthor_vm *vm)
mutex_unlock(&ptdev->mmu->as.slots_lock);
}
+u32 panthor_vm_page_size(struct panthor_vm *vm)
+{
+ const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops);
+ u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1;
+
+ return 1u << pg_shift;
+}
+
static void panthor_vm_stop(struct panthor_vm *vm)
{
drm_sched_stop(&vm->sched, NULL);
@@ -833,7 +841,7 @@ static void panthor_vm_stop(struct panthor_vm *vm)
static void panthor_vm_start(struct panthor_vm *vm)
{
- drm_sched_start(&vm->sched);
+ drm_sched_start(&vm->sched, 0);
}
/**
@@ -982,6 +990,8 @@ panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot,
if (!size)
break;
+
+ offset = 0;
}
return panthor_vm_flush_range(vm, start_iova, iova - start_iova);
@@ -1025,12 +1035,13 @@ int
panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
struct drm_mm_node *va_node)
{
+ ssize_t vm_pgsz = panthor_vm_page_size(vm);
int ret;
- if (!size || (size & ~PAGE_MASK))
+ if (!size || !IS_ALIGNED(size, vm_pgsz))
return -EINVAL;
- if (va != PANTHOR_VM_KERNEL_AUTO_VA && (va & ~PAGE_MASK))
+ if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz))
return -EINVAL;
mutex_lock(&vm->mm_lock);
@@ -1571,7 +1582,9 @@ panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle)
{
struct panthor_vm *vm;
+ xa_lock(&pool->xa);
vm = panthor_vm_get(xa_load(&pool->xa, handle));
+ xa_unlock(&pool->xa);
return vm;
}
@@ -1928,7 +1941,7 @@ struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool c
return pool;
}
-static u64 mair_to_memattr(u64 mair)
+static u64 mair_to_memattr(u64 mair, bool coherent)
{
u64 memattr = 0;
u32 i;
@@ -1947,14 +1960,21 @@ static u64 mair_to_memattr(u64 mair)
AS_MEMATTR_AARCH64_SH_MIDGARD_INNER |
AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false);
} else {
- /* Use SH_CPU_INNER mode so SH_IS, which is used when
- * IOMMU_CACHE is set, actually maps to the standard
- * definition of inner-shareable and not Mali's
- * internal-shareable mode.
- */
out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB |
- AS_MEMATTR_AARCH64_SH_CPU_INNER |
AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2);
+ /* Use SH_MIDGARD_INNER mode when device isn't coherent,
+ * so SH_IS, which is used when IOMMU_CACHE is set, maps
+ * to Mali's internal-shareable mode. As per the Mali
+ * Spec, inner and outer-shareable modes aren't allowed
+ * for WB memory when coherency is disabled.
+ * Use SH_CPU_INNER mode when coherency is enabled, so
+ * that SH_IS actually maps to the standard definition of
+ * inner-shareable.
+ */
+ if (!coherent)
+ out_attr |= AS_MEMATTR_AARCH64_SH_MIDGARD_INNER;
+ else
+ out_attr |= AS_MEMATTR_AARCH64_SH_CPU_INNER;
}
memattr |= (u64)out_attr << (8 * i);
@@ -2326,7 +2346,7 @@ panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
goto err_sched_fini;
mair = io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg.arm_lpae_s1_cfg.mair;
- vm->memattr = mair_to_memattr(mair);
+ vm->memattr = mair_to_memattr(mair, ptdev->coherent);
mutex_lock(&ptdev->mmu->vm.lock);
list_add_tail(&vm->node, &ptdev->mmu->vm.list);
@@ -2366,11 +2386,12 @@ panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
const struct drm_panthor_vm_bind_op *op,
struct panthor_vm_op_ctx *op_ctx)
{
+ ssize_t vm_pgsz = panthor_vm_page_size(vm);
struct drm_gem_object *gem;
int ret;
/* Aligned on page size. */
- if ((op->va | op->size) & ~PAGE_MASK)
+ if (!IS_ALIGNED(op->va | op->size, vm_pgsz))
return -EINVAL;
switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
@@ -2651,7 +2672,8 @@ int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec, struct panthor_vm
*/
void panthor_mmu_unplug(struct panthor_device *ptdev)
{
- panthor_mmu_irq_suspend(&ptdev->mmu->irq);
+ if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
+ panthor_mmu_irq_suspend(&ptdev->mmu->irq);
mutex_lock(&ptdev->mmu->as.slots_lock);
for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
@@ -2716,9 +2738,9 @@ int panthor_mmu_init(struct panthor_device *ptdev)
* which passes iova as an unsigned long. Patch the mmu_features to reflect this
* limitation.
*/
- if (sizeof(unsigned long) * 8 < va_bits) {
+ if (va_bits > BITS_PER_LONG) {
ptdev->gpu_info.mmu_features &= ~GENMASK(7, 0);
- ptdev->gpu_info.mmu_features |= sizeof(unsigned long) * 8;
+ ptdev->gpu_info.mmu_features |= BITS_PER_LONG;
}
return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq);
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h
index 6788771071e3..8d21e83d8aba 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.h
+++ b/drivers/gpu/drm/panthor/panthor_mmu.h
@@ -30,6 +30,7 @@ panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset);
int panthor_vm_active(struct panthor_vm *vm);
void panthor_vm_idle(struct panthor_vm *vm);
+u32 panthor_vm_page_size(struct panthor_vm *vm);
int panthor_vm_as(struct panthor_vm *vm);
int panthor_vm_flush_all(struct panthor_vm *vm);
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index aee362abb710..77b184c3fb0c 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -93,6 +93,9 @@
#define MIN_CSGS 3
#define MAX_CSG_PRIO 0xf
+#define NUM_INSTRS_PER_CACHE_LINE (64 / sizeof(u64))
+#define MAX_INSTRS_PER_JOB 24
+
struct panthor_group;
/**
@@ -137,8 +140,6 @@ enum panthor_csg_priority {
* non-real-time groups. When such a group becomes executable,
* it will evict the group with the lowest non-rt priority if
* there's no free group slot available.
- *
- * Currently not exposed to userspace.
*/
PANTHOR_CSG_PRIORITY_RT,
@@ -476,6 +477,18 @@ struct panthor_queue {
*/
struct list_head in_flight_jobs;
} fence_ctx;
+
+ /** @profiling: Job profiling data slots and access information. */
+ struct {
+ /** @slots: Kernel BO holding the slots. */
+ struct panthor_kernel_bo *slots;
+
+ /** @slot_count: Number of jobs ringbuffer can hold at once. */
+ u32 slot_count;
+
+ /** @seqno: Index of the next available profiling information slot. */
+ u32 seqno;
+ } profiling;
};
/**
@@ -589,14 +602,25 @@ struct panthor_group {
* @timedout: True when a timeout occurred on any of the queues owned by
* this group.
*
- * Timeouts can be reported by drm_sched or by the FW. In any case, any
- * timeout situation is unrecoverable, and the group becomes useless.
- * We simply wait for all references to be dropped so we can release the
- * group object.
+ * Timeouts can be reported by drm_sched or by the FW. If a reset is required,
+ * and the group can't be suspended, this also leads to a timeout. In any case,
+ * any timeout situation is unrecoverable, and the group becomes useless. We
+ * simply wait for all references to be dropped so we can release the group
+ * object.
*/
bool timedout;
/**
+ * @innocent: True when the group becomes unusable because the group suspension
+ * failed during a reset.
+ *
+ * Sometimes the FW was put in a bad state by other groups, causing the group
+ * suspension happening in the reset path to fail. In that case, we consider the
+ * group innocent.
+ */
+ bool innocent;
+
+ /**
* @syncobjs: Pool of per-queue synchronization objects.
*
* One sync object per queue. The position of the sync object is
@@ -604,6 +628,18 @@ struct panthor_group {
*/
struct panthor_kernel_bo *syncobjs;
+ /** @fdinfo: Per-file total cycle and timestamp values reference. */
+ struct {
+ /** @data: Total sampled values for jobs in queues from this group. */
+ struct panthor_gpu_usage data;
+
+ /**
+ * @lock: Mutex to govern concurrent access from drm file's fdinfo callback
+ * and job post-completion processing function
+ */
+ struct mutex lock;
+ } fdinfo;
+
/** @state: Group state. */
enum panthor_group_state state;
@@ -661,6 +697,18 @@ struct panthor_group {
struct list_head wait_node;
};
+struct panthor_job_profiling_data {
+ struct {
+ u64 before;
+ u64 after;
+ } cycles;
+
+ struct {
+ u64 before;
+ u64 after;
+ } time;
+};
+
/**
* group_queue_work() - Queue a group work
* @group: Group to queue the work for.
@@ -774,6 +822,15 @@ struct panthor_job {
/** @done_fence: Fence signaled when the job is finished or cancelled. */
struct dma_fence *done_fence;
+
+ /** @profiling: Job profiling information. */
+ struct {
+ /** @mask: Current device job profiling enablement bitmask. */
+ u32 mask;
+
+ /** @slot: Job index in the profiling slots BO. */
+ u32 slot;
+ } profiling;
};
static void
@@ -838,6 +895,7 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue *
panthor_kernel_bo_destroy(queue->ringbuf);
panthor_kernel_bo_destroy(queue->iface.mem);
+ panthor_kernel_bo_destroy(queue->profiling.slots);
/* Release the last_fence we were holding, if any. */
dma_fence_put(queue->fence_ctx.last_fence);
@@ -852,6 +910,8 @@ static void group_release_work(struct work_struct *work)
release_work);
u32 i;
+ mutex_destroy(&group->fdinfo.lock);
+
for (i = 0; i < group->queue_count; i++)
group_free_queue(group, group->queues[i]);
@@ -1988,8 +2048,6 @@ tick_ctx_init(struct panthor_scheduler *sched,
}
}
-#define NUM_INSTRS_PER_SLOT 16
-
static void
group_term_post_processing(struct panthor_group *group)
{
@@ -2306,7 +2364,7 @@ static void tick_work(struct work_struct *work)
if (!drm_dev_enter(&ptdev->base, &cookie))
return;
- ret = pm_runtime_resume_and_get(ptdev->base.dev);
+ ret = panthor_device_resume_and_get(ptdev);
if (drm_WARN_ON(&ptdev->base, ret))
goto out_dev_exit;
@@ -2545,7 +2603,7 @@ static void queue_start(struct panthor_queue *queue)
list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
job->base.s_fence->parent = dma_fence_get(job->done_fence);
- drm_sched_start(&queue->scheduler);
+ drm_sched_start(&queue->scheduler, 0);
}
static void panthor_group_stop(struct panthor_group *group)
@@ -2640,6 +2698,18 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
csgs_upd_ctx_init(&upd_ctx);
while (slot_mask) {
u32 csg_id = ffs(slot_mask) - 1;
+ struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+
+ /* If the group was still usable before that point, we consider
+ * it innocent.
+ */
+ if (group_can_run(csg_slot->group))
+ csg_slot->group->innocent = true;
+
+ /* We consider group suspension failures as fatal and flag the
+ * group as unusable by setting timedout=true.
+ */
+ csg_slot->group->timedout = true;
csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
CSG_STATE_TERMINATE,
@@ -2783,6 +2853,41 @@ void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed)
}
}
+static void update_fdinfo_stats(struct panthor_job *job)
+{
+ struct panthor_group *group = job->group;
+ struct panthor_queue *queue = group->queues[job->queue_idx];
+ struct panthor_gpu_usage *fdinfo = &group->fdinfo.data;
+ struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap;
+ struct panthor_job_profiling_data *data = &slots[job->profiling.slot];
+
+ mutex_lock(&group->fdinfo.lock);
+ if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
+ fdinfo->cycles += data->cycles.after - data->cycles.before;
+ if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
+ fdinfo->time += data->time.after - data->time.before;
+ mutex_unlock(&group->fdinfo.lock);
+}
+
+void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
+{
+ struct panthor_group_pool *gpool = pfile->groups;
+ struct panthor_group *group;
+ unsigned long i;
+
+ if (IS_ERR_OR_NULL(gpool))
+ return;
+
+ xa_for_each(&gpool->xa, i, group) {
+ mutex_lock(&group->fdinfo.lock);
+ pfile->stats.cycles += group->fdinfo.data.cycles;
+ pfile->stats.time += group->fdinfo.data.time;
+ group->fdinfo.data.cycles = 0;
+ group->fdinfo.data.time = 0;
+ mutex_unlock(&group->fdinfo.lock);
+ }
+}
+
static void group_sync_upd_work(struct work_struct *work)
{
struct panthor_group *group =
@@ -2815,6 +2920,8 @@ static void group_sync_upd_work(struct work_struct *work)
dma_fence_end_signalling(cookie);
list_for_each_entry_safe(job, job_tmp, &done_jobs, node) {
+ if (job->profiling.mask)
+ update_fdinfo_stats(job);
list_del_init(&job->node);
panthor_job_put(&job->base);
}
@@ -2822,65 +2929,198 @@ static void group_sync_upd_work(struct work_struct *work)
group_put(group);
}
-static struct dma_fence *
-queue_run_job(struct drm_sched_job *sched_job)
+struct panthor_job_ringbuf_instrs {
+ u64 buffer[MAX_INSTRS_PER_JOB];
+ u32 count;
+};
+
+struct panthor_job_instr {
+ u32 profile_mask;
+ u64 instr;
+};
+
+#define JOB_INSTR(__prof, __instr) \
+ { \
+ .profile_mask = __prof, \
+ .instr = __instr, \
+ }
+
+static void
+copy_instrs_to_ringbuf(struct panthor_queue *queue,
+ struct panthor_job *job,
+ struct panthor_job_ringbuf_instrs *instrs)
+{
+ u64 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
+ u64 start = job->ringbuf.start & (ringbuf_size - 1);
+ u64 size, written;
+
+ /*
+ * We need to write a whole slot, including any trailing zeroes
+ * that may come at the end of it. Also, because instrs.buffer has
+ * been zero-initialised, there's no need to pad it with 0's
+ */
+ instrs->count = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE);
+ size = instrs->count * sizeof(u64);
+ WARN_ON(size > ringbuf_size);
+ written = min(ringbuf_size - start, size);
+
+ memcpy(queue->ringbuf->kmap + start, instrs->buffer, written);
+
+ if (written < size)
+ memcpy(queue->ringbuf->kmap,
+ &instrs->buffer[written / sizeof(u64)],
+ size - written);
+}
+
+struct panthor_job_cs_params {
+ u32 profile_mask;
+ u64 addr_reg; u64 val_reg;
+ u64 cycle_reg; u64 time_reg;
+ u64 sync_addr; u64 times_addr;
+ u64 cs_start; u64 cs_size;
+ u32 last_flush; u32 waitall_mask;
+};
+
+static void
+get_job_cs_params(struct panthor_job *job, struct panthor_job_cs_params *params)
{
- struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
struct panthor_group *group = job->group;
struct panthor_queue *queue = group->queues[job->queue_idx];
struct panthor_device *ptdev = group->ptdev;
struct panthor_scheduler *sched = ptdev->scheduler;
- u32 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
- u32 ringbuf_insert = queue->iface.input->insert & (ringbuf_size - 1);
- u64 addr_reg = ptdev->csif_info.cs_reg_count -
- ptdev->csif_info.unpreserved_cs_reg_count;
- u64 val_reg = addr_reg + 2;
- u64 sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) +
- job->queue_idx * sizeof(struct panthor_syncobj_64b);
- u32 waitall_mask = GENMASK(sched->sb_slot_count - 1, 0);
- struct dma_fence *done_fence;
- int ret;
- u64 call_instrs[NUM_INSTRS_PER_SLOT] = {
- /* MOV32 rX+2, cs.latest_flush */
- (2ull << 56) | (val_reg << 48) | job->call_info.latest_flush,
+ params->addr_reg = ptdev->csif_info.cs_reg_count -
+ ptdev->csif_info.unpreserved_cs_reg_count;
+ params->val_reg = params->addr_reg + 2;
+ params->cycle_reg = params->addr_reg;
+ params->time_reg = params->val_reg;
- /* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */
- (36ull << 56) | (0ull << 48) | (val_reg << 40) | (0 << 16) | 0x233,
+ params->sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) +
+ job->queue_idx * sizeof(struct panthor_syncobj_64b);
+ params->times_addr = panthor_kernel_bo_gpuva(queue->profiling.slots) +
+ (job->profiling.slot * sizeof(struct panthor_job_profiling_data));
+ params->waitall_mask = GENMASK(sched->sb_slot_count - 1, 0);
- /* MOV48 rX:rX+1, cs.start */
- (1ull << 56) | (addr_reg << 48) | job->call_info.start,
+ params->cs_start = job->call_info.start;
+ params->cs_size = job->call_info.size;
+ params->last_flush = job->call_info.latest_flush;
- /* MOV32 rX+2, cs.size */
- (2ull << 56) | (val_reg << 48) | job->call_info.size,
+ params->profile_mask = job->profiling.mask;
+}
- /* WAIT(0) => waits for FLUSH_CACHE2 instruction */
- (3ull << 56) | (1 << 16),
+#define JOB_INSTR_ALWAYS(instr) \
+ JOB_INSTR(PANTHOR_DEVICE_PROFILING_DISABLED, (instr))
+#define JOB_INSTR_TIMESTAMP(instr) \
+ JOB_INSTR(PANTHOR_DEVICE_PROFILING_TIMESTAMP, (instr))
+#define JOB_INSTR_CYCLES(instr) \
+ JOB_INSTR(PANTHOR_DEVICE_PROFILING_CYCLES, (instr))
+static void
+prepare_job_instrs(const struct panthor_job_cs_params *params,
+ struct panthor_job_ringbuf_instrs *instrs)
+{
+ const struct panthor_job_instr instr_seq[] = {
+ /* MOV32 rX+2, cs.latest_flush */
+ JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->last_flush),
+ /* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */
+ JOB_INSTR_ALWAYS((36ull << 56) | (0ull << 48) | (params->val_reg << 40) |
+ (0 << 16) | 0x233),
+ /* MOV48 rX:rX+1, cycles_offset */
+ JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) |
+ (params->times_addr +
+ offsetof(struct panthor_job_profiling_data, cycles.before))),
+ /* STORE_STATE cycles */
+ JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)),
+ /* MOV48 rX:rX+1, time_offset */
+ JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) |
+ (params->times_addr +
+ offsetof(struct panthor_job_profiling_data, time.before))),
+ /* STORE_STATE timer */
+ JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)),
+ /* MOV48 rX:rX+1, cs.start */
+ JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->cs_start),
+ /* MOV32 rX+2, cs.size */
+ JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->cs_size),
+ /* WAIT(0) => waits for FLUSH_CACHE2 instruction */
+ JOB_INSTR_ALWAYS((3ull << 56) | (1 << 16)),
/* CALL rX:rX+1, rX+2 */
- (32ull << 56) | (addr_reg << 40) | (val_reg << 32),
-
+ JOB_INSTR_ALWAYS((32ull << 56) | (params->addr_reg << 40) |
+ (params->val_reg << 32)),
+ /* MOV48 rX:rX+1, cycles_offset */
+ JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) |
+ (params->times_addr +
+ offsetof(struct panthor_job_profiling_data, cycles.after))),
+ /* STORE_STATE cycles */
+ JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)),
+ /* MOV48 rX:rX+1, time_offset */
+ JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) |
+ (params->times_addr +
+ offsetof(struct panthor_job_profiling_data, time.after))),
+ /* STORE_STATE timer */
+ JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)),
/* MOV48 rX:rX+1, sync_addr */
- (1ull << 56) | (addr_reg << 48) | sync_addr,
-
+ JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->sync_addr),
/* MOV48 rX+2, #1 */
- (1ull << 56) | (val_reg << 48) | 1,
-
+ JOB_INSTR_ALWAYS((1ull << 56) | (params->val_reg << 48) | 1),
/* WAIT(all) */
- (3ull << 56) | (waitall_mask << 16),
-
+ JOB_INSTR_ALWAYS((3ull << 56) | (params->waitall_mask << 16)),
/* SYNC_ADD64.system_scope.propage_err.nowait rX:rX+1, rX+2*/
- (51ull << 56) | (0ull << 48) | (addr_reg << 40) | (val_reg << 32) | (0 << 16) | 1,
+ JOB_INSTR_ALWAYS((51ull << 56) | (0ull << 48) | (params->addr_reg << 40) |
+ (params->val_reg << 32) | (0 << 16) | 1),
+ /* ERROR_BARRIER, so we can recover from faults at job boundaries. */
+ JOB_INSTR_ALWAYS((47ull << 56)),
+ };
+ u32 pad;
- /* ERROR_BARRIER, so we can recover from faults at job
- * boundaries.
- */
- (47ull << 56),
+ instrs->count = 0;
+
+ /* NEED to be cacheline aligned to please the prefetcher. */
+ static_assert(sizeof(instrs->buffer) % 64 == 0,
+ "panthor_job_ringbuf_instrs::buffer is not aligned on a cacheline");
+
+ /* Make sure we have enough storage to store the whole sequence. */
+ static_assert(ALIGN(ARRAY_SIZE(instr_seq), NUM_INSTRS_PER_CACHE_LINE) ==
+ ARRAY_SIZE(instrs->buffer),
+ "instr_seq vs panthor_job_ringbuf_instrs::buffer size mismatch");
+
+ for (u32 i = 0; i < ARRAY_SIZE(instr_seq); i++) {
+ /* If the profile mask of this instruction is not enabled, skip it. */
+ if (instr_seq[i].profile_mask &&
+ !(instr_seq[i].profile_mask & params->profile_mask))
+ continue;
+
+ instrs->buffer[instrs->count++] = instr_seq[i].instr;
+ }
+
+ pad = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE);
+ memset(&instrs->buffer[instrs->count], 0,
+ (pad - instrs->count) * sizeof(instrs->buffer[0]));
+ instrs->count = pad;
+}
+
+static u32 calc_job_credits(u32 profile_mask)
+{
+ struct panthor_job_ringbuf_instrs instrs;
+ struct panthor_job_cs_params params = {
+ .profile_mask = profile_mask,
};
- /* Need to be cacheline aligned to please the prefetcher. */
- static_assert(sizeof(call_instrs) % 64 == 0,
- "call_instrs is not aligned on a cacheline");
+ prepare_job_instrs(&params, &instrs);
+ return instrs.count;
+}
+
+static struct dma_fence *
+queue_run_job(struct drm_sched_job *sched_job)
+{
+ struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
+ struct panthor_group *group = job->group;
+ struct panthor_queue *queue = group->queues[job->queue_idx];
+ struct panthor_device *ptdev = group->ptdev;
+ struct panthor_scheduler *sched = ptdev->scheduler;
+ struct panthor_job_ringbuf_instrs instrs;
+ struct panthor_job_cs_params cs_params;
+ struct dma_fence *done_fence;
+ int ret;
/* Stream size is zero, nothing to do except making sure all previously
* submitted jobs are done before we signal the
@@ -2891,7 +3131,7 @@ queue_run_job(struct drm_sched_job *sched_job)
return dma_fence_get(job->done_fence);
}
- ret = pm_runtime_resume_and_get(ptdev->base.dev);
+ ret = panthor_device_resume_and_get(ptdev);
if (drm_WARN_ON(&ptdev->base, ret))
return ERR_PTR(ret);
@@ -2907,17 +3147,23 @@ queue_run_job(struct drm_sched_job *sched_job)
queue->fence_ctx.id,
atomic64_inc_return(&queue->fence_ctx.seqno));
- memcpy(queue->ringbuf->kmap + ringbuf_insert,
- call_instrs, sizeof(call_instrs));
+ job->profiling.slot = queue->profiling.seqno++;
+ if (queue->profiling.seqno == queue->profiling.slot_count)
+ queue->profiling.seqno = 0;
+
+ job->ringbuf.start = queue->iface.input->insert;
+
+ get_job_cs_params(job, &cs_params);
+ prepare_job_instrs(&cs_params, &instrs);
+ copy_instrs_to_ringbuf(queue, job, &instrs);
+
+ job->ringbuf.end = job->ringbuf.start + (instrs.count * sizeof(u64));
panthor_job_get(&job->base);
spin_lock(&queue->fence_ctx.lock);
list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs);
spin_unlock(&queue->fence_ctx.lock);
- job->ringbuf.start = queue->iface.input->insert;
- job->ringbuf.end = job->ringbuf.start + sizeof(call_instrs);
-
/* Make sure the ring buffer is updated before the INSERT
* register.
*/
@@ -3010,6 +3256,33 @@ static const struct drm_sched_backend_ops panthor_queue_sched_ops = {
.free_job = queue_free_job,
};
+static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
+ u32 cs_ringbuf_size)
+{
+ u32 min_profiled_job_instrs = U32_MAX;
+ u32 last_flag = fls(PANTHOR_DEVICE_PROFILING_ALL);
+
+ /*
+ * We want to calculate the minimum size of a profiled job's CS,
+ * because since they need additional instructions for the sampling
+ * of performance metrics, they might take up further slots in
+ * the queue's ringbuffer. This means we might not need as many job
+ * slots for keeping track of their profiling information. What we
+ * need is the maximum number of slots we should allocate to this end,
+ * which matches the maximum number of profiled jobs we can place
+ * simultaneously in the queue's ring buffer.
+ * That has to be calculated separately for every single job profiling
+ * flag, but not in the case job profiling is disabled, since unprofiled
+ * jobs don't need to keep track of this at all.
+ */
+ for (u32 i = 0; i < last_flag; i++) {
+ min_profiled_job_instrs =
+ min(min_profiled_job_instrs, calc_job_credits(BIT(i)));
+ }
+
+ return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64));
+}
+
static struct panthor_queue *
group_create_queue(struct panthor_group *group,
const struct drm_panthor_queue_create *args)
@@ -3063,9 +3336,35 @@ group_create_queue(struct panthor_group *group,
goto err_free_queue;
}
+ queue->profiling.slot_count =
+ calc_profiling_ringbuf_num_slots(group->ptdev, args->ringbuf_size);
+
+ queue->profiling.slots =
+ panthor_kernel_bo_create(group->ptdev, group->vm,
+ queue->profiling.slot_count *
+ sizeof(struct panthor_job_profiling_data),
+ DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
+ DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
+ PANTHOR_VM_KERNEL_AUTO_VA);
+
+ if (IS_ERR(queue->profiling.slots)) {
+ ret = PTR_ERR(queue->profiling.slots);
+ goto err_free_queue;
+ }
+
+ ret = panthor_kernel_bo_vmap(queue->profiling.slots);
+ if (ret)
+ goto err_free_queue;
+
+ /*
+ * Credit limit argument tells us the total number of instructions
+ * across all CS slots in the ringbuffer, with some jobs requiring
+ * twice as many as others, depending on their profiling status.
+ */
ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops,
group->ptdev->scheduler->wq, 1,
- args->ringbuf_size / (NUM_INSTRS_PER_SLOT * sizeof(u64)),
+ args->ringbuf_size / sizeof(u64),
0, msecs_to_jiffies(JOB_TIMEOUT_MS),
group->ptdev->reset.wq,
NULL, "panthor-queue", group->ptdev->base.dev);
@@ -3206,6 +3505,8 @@ int panthor_group_create(struct panthor_file *pfile,
}
mutex_unlock(&sched->reset.lock);
+ mutex_init(&group->fdinfo.lock);
+
return gid;
err_put_group:
@@ -3285,6 +3586,8 @@ int panthor_group_get_state(struct panthor_file *pfile,
get_state->state |= DRM_PANTHOR_GROUP_STATE_FATAL_FAULT;
get_state->fatal_queues = group->fatal_queues;
}
+ if (group->innocent)
+ get_state->state |= DRM_PANTHOR_GROUP_STATE_INNOCENT;
mutex_unlock(&sched->lock);
group_put(group);
@@ -3373,6 +3676,7 @@ panthor_job_create(struct panthor_file *pfile,
{
struct panthor_group_pool *gpool = pfile->groups;
struct panthor_job *job;
+ u32 credits;
int ret;
if (qsubmit->pad)
@@ -3409,6 +3713,11 @@ panthor_job_create(struct panthor_file *pfile,
goto err_put_job;
}
+ if (!group_can_run(job->group)) {
+ ret = -EINVAL;
+ goto err_put_job;
+ }
+
if (job->queue_idx >= job->group->queue_count ||
!job->group->queues[job->queue_idx]) {
ret = -EINVAL;
@@ -3426,9 +3735,16 @@ panthor_job_create(struct panthor_file *pfile,
}
}
+ job->profiling.mask = pfile->ptdev->profile_mask;
+ credits = calc_job_credits(job->profiling.mask);
+ if (credits == 0) {
+ ret = -EINVAL;
+ goto err_put_job;
+ }
+
ret = drm_sched_job_init(&job->base,
&job->group->queues[job->queue_idx]->entity,
- 1, job->group);
+ credits, job->group);
if (ret)
goto err_put_job;
diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
index 3a30d2328b30..5ae6b4bde7c5 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.h
+++ b/drivers/gpu/drm/panthor/panthor_sched.h
@@ -47,4 +47,6 @@ void panthor_sched_resume(struct panthor_device *ptdev);
void panthor_sched_report_mmu_fault(struct panthor_device *ptdev);
void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events);
+void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile);
+
#endif
diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig
index 20fe1d2c0aaf..82e918820950 100644
--- a/drivers/gpu/drm/pl111/Kconfig
+++ b/drivers/gpu/drm/pl111/Kconfig
@@ -5,6 +5,7 @@ config DRM_PL111
depends on ARM || ARM64 || COMPILE_TEST
depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n
depends on COMMON_CLK
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_BRIDGE
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 02e6b74d5016..56ff6a3fb483 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -45,6 +45,7 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
@@ -219,12 +220,12 @@ static const struct drm_driver pl111_drm_driver = {
.fops = &drm_fops,
.name = "pl111",
.desc = DRIVER_DESC,
- .date = "20170317",
.major = 1,
.minor = 0,
.patchlevel = 0,
.dumb_create = drm_gem_dma_dumb_create,
.gem_prime_import_sg_table = pl111_gem_import_sg_table,
+ DRM_FBDEV_DMA_DRIVER_OPS,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = pl111_debugfs_init,
@@ -305,7 +306,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
if (ret < 0)
goto dev_put;
- drm_fbdev_dma_setup(drm, priv->variant->fb_depth);
+ drm_client_setup_with_color_mode(drm, priv->variant->fb_depth);
return 0;
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index ca3f51c2a8fe..69427eb8bed2 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -1,10 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_QXL
tristate "QXL virtual GPU"
- depends on DRM && PCI && MMU
+ depends on DRM && PCI && MMU && HAS_IOPORT
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_TTM
select DRM_TTM_HELPER
+ select DRM_EXEC
select CRC32
help
QXL virtual GPU for Spice virtualization desktop integration.
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 5eb3f5719fdf..417061ae59eb 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -29,12 +29,13 @@
#include "qxl_drv.h"
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/vgaarb.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm.h>
-#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
@@ -91,7 +92,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &qxl_driver);
+ ret = aperture_remove_conflicting_pci_devices(pdev, qxl_driver.name);
if (ret)
goto disable_pci;
@@ -118,7 +119,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto modeset_cleanup;
- drm_fbdev_ttm_setup(&qdev->ddev, 32);
+ drm_client_setup(&qdev->ddev, NULL);
return 0;
modeset_cleanup:
@@ -293,12 +294,12 @@ static struct drm_driver qxl_driver = {
.debugfs_init = qxl_debugfs_init,
#endif
.gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
+ DRM_FBDEV_TTM_DRIVER_OPS,
.fops = &qxl_fops,
.ioctls = qxl_ioctls,
.num_ioctls = ARRAY_SIZE(qxl_ioctls),
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = 0,
.minor = 1,
.patchlevel = 0,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 32069acd93f8..cc02b5f10ad9 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -38,12 +38,12 @@
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_exec.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_gem.h>
#include <drm/qxl_drm.h>
#include <drm/ttm/ttm_bo.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_placement.h>
#include "qxl_dev.h"
@@ -54,7 +54,6 @@ struct iosys_map;
#define DRIVER_NAME "qxl"
#define DRIVER_DESC "RH QXL"
-#define DRIVER_DATE "20120117"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 1
@@ -101,7 +100,8 @@ struct qxl_gem {
};
struct qxl_bo_list {
- struct ttm_validate_buffer tv;
+ struct qxl_bo *bo;
+ struct list_head list;
};
struct qxl_crtc {
@@ -150,7 +150,7 @@ struct qxl_release {
struct qxl_bo *release_bo;
uint32_t release_offset;
uint32_t surface_release_id;
- struct ww_acquire_ctx ticket;
+ struct drm_exec exec;
struct list_head bos;
};
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 368d26da0d6a..05204a6a3fa8 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -121,13 +121,11 @@ qxl_release_free_list(struct qxl_release *release)
{
while (!list_empty(&release->bos)) {
struct qxl_bo_list *entry;
- struct qxl_bo *bo;
entry = container_of(release->bos.next,
- struct qxl_bo_list, tv.head);
- bo = to_qxl_bo(entry->tv.bo);
- qxl_bo_unref(&bo);
- list_del(&entry->tv.head);
+ struct qxl_bo_list, list);
+ qxl_bo_unref(&entry->bo);
+ list_del(&entry->list);
kfree(entry);
}
release->release_bo = NULL;
@@ -172,8 +170,8 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
{
struct qxl_bo_list *entry;
- list_for_each_entry(entry, &release->bos, tv.head) {
- if (entry->tv.bo == &bo->tbo)
+ list_for_each_entry(entry, &release->bos, list) {
+ if (entry->bo == bo)
return 0;
}
@@ -182,9 +180,8 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
return -ENOMEM;
qxl_bo_ref(bo);
- entry->tv.bo = &bo->tbo;
- entry->tv.num_shared = 0;
- list_add_tail(&entry->tv.head, &release->bos);
+ entry->bo = bo;
+ list_add_tail(&entry->list, &release->bos);
return 0;
}
@@ -221,21 +218,28 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
if (list_is_singular(&release->bos))
return 0;
- ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
- !no_intr, NULL);
- if (ret)
- return ret;
-
- list_for_each_entry(entry, &release->bos, tv.head) {
- struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
-
- ret = qxl_release_validate_bo(bo);
- if (ret) {
- ttm_eu_backoff_reservation(&release->ticket, &release->bos);
- return ret;
+ drm_exec_init(&release->exec, no_intr ? 0 :
+ DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&release->exec) {
+ list_for_each_entry(entry, &release->bos, list) {
+ ret = drm_exec_prepare_obj(&release->exec,
+ &entry->bo->tbo.base,
+ 1);
+ drm_exec_retry_on_contention(&release->exec);
+ if (ret)
+ goto error;
}
}
+
+ list_for_each_entry(entry, &release->bos, list) {
+ ret = qxl_release_validate_bo(entry->bo);
+ if (ret)
+ goto error;
+ }
return 0;
+error:
+ drm_exec_fini(&release->exec);
+ return ret;
}
void qxl_release_backoff_reserve_list(struct qxl_release *release)
@@ -245,7 +249,7 @@ void qxl_release_backoff_reserve_list(struct qxl_release *release)
if (list_is_singular(&release->bos))
return;
- ttm_eu_backoff_reservation(&release->ticket, &release->bos);
+ drm_exec_fini(&release->exec);
}
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
@@ -404,18 +408,18 @@ void qxl_release_unmap(struct qxl_device *qdev,
void qxl_release_fence_buffer_objects(struct qxl_release *release)
{
- struct ttm_buffer_object *bo;
struct ttm_device *bdev;
- struct ttm_validate_buffer *entry;
+ struct qxl_bo_list *entry;
struct qxl_device *qdev;
+ struct qxl_bo *bo;
/* if only one object on the release its the release itself
since these objects are pinned no need to reserve */
if (list_is_singular(&release->bos) || list_empty(&release->bos))
return;
- bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
- bdev = bo->bdev;
+ bo = list_first_entry(&release->bos, struct qxl_bo_list, list)->bo;
+ bdev = bo->tbo.bdev;
qdev = container_of(bdev, struct qxl_device, mman.bdev);
/*
@@ -426,14 +430,12 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
- list_for_each_entry(entry, &release->bos, head) {
+ list_for_each_entry(entry, &release->bos, list) {
bo = entry->bo;
- dma_resv_add_fence(bo->base.resv, &release->base,
+ dma_resv_add_fence(bo->tbo.base.resv, &release->base,
DMA_RESV_USAGE_READ);
- ttm_bo_move_to_lru_tail_unlocked(bo);
- dma_resv_unlock(bo->base.resv);
+ ttm_bo_move_to_lru_tail_unlocked(&bo->tbo);
}
- ww_acquire_fini(&release->ticket);
+ drm_exec_fini(&release->exec);
}
-
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index f98356be0af2..f51bace9555d 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -5,6 +5,7 @@ config DRM_RADEON
depends on DRM && PCI && MMU
depends on AGP || !AGP
select FW_LOADER
+ select DRM_CLIENT_SELECTION
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
@@ -12,6 +13,7 @@ config DRM_RADEON
select DRM_TTM
select DRM_TTM_HELPER
select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
+ select DRM_EXEC
select SND_HDA_COMPONENT if SND_HDA_CORE
select POWER_SUPPLY
select HWMON
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index fca8b08535a5..6328627b7c34 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -228,10 +228,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
{
struct drm_device *dev = radeon_connector->base.dev;
struct radeon_device *rdev = dev->dev_private;
- int ret;
radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd;
- radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
radeon_connector->ddc_bus->aux.drm_dev = radeon_connector->base.dev;
if (ASIC_IS_DCE5(rdev)) {
if (radeon_auxch)
@@ -242,11 +240,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom;
}
- ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux);
- if (!ret)
- radeon_connector->ddc_bus->has_aux = true;
-
- WARN(ret, "drm_dp_aux_register() failed with error %d\n", ret);
+ drm_dp_aux_init(&radeon_connector->ddc_bus->aux);
+ radeon_connector->ddc_bus->has_aux = true;
}
/***** general DP utility functions *****/
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 1b2d31c4d77c..ac77d1246b94 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -2104,7 +2104,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- offset = radeon_get_ib_value(p, idx+1) << 8;
+ offset = (u64)radeon_get_ib_value(p, idx+1) << 8;
if (offset != track->vgt_strmout_bo_offset[idx_value]) {
DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
offset, track->vgt_strmout_bo_offset[idx_value]);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index fd8a4513025f..8605c074d9f7 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -75,8 +75,8 @@
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/drm_exec.h>
#include <drm/drm_gem.h>
#include <drm/drm_audio_component.h>
#include <drm/drm_suballoc.h>
@@ -457,7 +457,8 @@ struct radeon_mman {
struct radeon_bo_list {
struct radeon_bo *robj;
- struct ttm_validate_buffer tv;
+ struct list_head list;
+ bool shared;
uint64_t gpu_offset;
unsigned preferred_domains;
unsigned allowed_domains;
@@ -1030,6 +1031,7 @@ struct radeon_cs_parser {
struct radeon_bo_list *vm_bos;
struct list_head validated;
unsigned dma_reloc_idx;
+ struct drm_exec exec;
/* indices of various chunks */
struct radeon_cs_chunk *chunk_ib;
struct radeon_cs_chunk *chunk_relocs;
@@ -1043,7 +1045,6 @@ struct radeon_cs_parser {
u32 cs_flags;
u32 ring;
s32 priority;
- struct ww_acquire_ctx ticket;
};
static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 47aa06a9a942..8d64ba18572e 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -760,19 +760,25 @@ static int radeon_audio_component_get_eld(struct device *kdev, int port,
if (!rdev->audio.enabled || !rdev->mode_info.mode_config_initialized)
return 0;
- list_for_each_entry(encoder, &rdev_to_drm(rdev)->mode_config.encoder_list, head) {
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ const struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+ encoder = connector_funcs->best_encoder(connector);
+
+ if (!encoder)
+ continue;
+
if (!radeon_encoder_is_digital(encoder))
continue;
radeon_encoder = to_radeon_encoder(encoder);
dig = radeon_encoder->enc_priv;
if (!dig->pin || dig->pin->id != port)
continue;
- connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
- continue;
*enabled = true;
+ mutex_lock(&connector->eld_mutex);
ret = drm_eld_size(connector->eld);
memcpy(buf, connector->eld, min(max_bytes, ret));
+ mutex_unlock(&connector->eld_mutex);
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 528a8f3677c2..f9996304d943 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1255,16 +1255,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
goto exit;
}
}
-
- if (dret && radeon_connector->hpd.hpd != RADEON_HPD_NONE &&
- !radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) {
- DRM_DEBUG_KMS("EDID is readable when HPD disconnected\n");
- schedule_delayed_work(&rdev->hotplug_work, msecs_to_jiffies(1000));
- ret = connector_status_disconnected;
- goto exit;
- }
-
if (dret) {
radeon_connector->detected_by_load = false;
radeon_connector_free_edid(connector);
@@ -1786,6 +1776,20 @@ static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector
return MODE_OK;
}
+static int
+radeon_connector_late_register(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int r = 0;
+
+ if (radeon_connector->ddc_bus->has_aux) {
+ radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
+ r = drm_dp_aux_register(&radeon_connector->ddc_bus->aux);
+ }
+
+ return r;
+}
+
static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
.get_modes = radeon_dp_get_modes,
.mode_valid = radeon_dp_mode_valid,
@@ -1800,6 +1804,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
.early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
+ .late_register = radeon_connector_late_register,
};
static const struct drm_connector_funcs radeon_edp_connector_funcs = {
@@ -1810,6 +1815,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = {
.early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
+ .late_register = radeon_connector_late_register,
};
static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
@@ -1820,6 +1826,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
.early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
+ .late_register = radeon_connector_late_register,
};
void
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index a6700d7278bf..64b26bfeafc9 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -182,11 +182,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
}
}
- p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
- p->relocs[i].tv.num_shared = !r->write_domain;
-
- radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
- priority);
+ p->relocs[i].shared = !r->write_domain;
+ radeon_cs_buckets_add(&buckets, &p->relocs[i].list, priority);
}
radeon_cs_buckets_get_list(&buckets, &p->validated);
@@ -197,7 +194,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
if (need_mmap_lock)
mmap_read_lock(current->mm);
- r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
+ r = radeon_bo_list_validate(p->rdev, &p->exec, &p->validated, p->ring);
if (need_mmap_lock)
mmap_read_unlock(current->mm);
@@ -253,12 +250,11 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
struct radeon_bo_list *reloc;
int r;
- list_for_each_entry(reloc, &p->validated, tv.head) {
+ list_for_each_entry(reloc, &p->validated, list) {
struct dma_resv *resv;
resv = reloc->robj->tbo.base.resv;
- r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
- reloc->tv.num_shared);
+ r = radeon_sync_resv(p->rdev, &p->ib.sync, resv, reloc->shared);
if (r)
return r;
}
@@ -276,6 +272,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
s32 priority = 0;
INIT_LIST_HEAD(&p->validated);
+ drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
if (!cs->num_chunks) {
return 0;
@@ -397,8 +394,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
static int cmp_size_smaller_first(void *priv, const struct list_head *a,
const struct list_head *b)
{
- struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
- struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
+ struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, list);
+ struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, list);
/* Sort A before B if A is smaller. */
if (la->robj->tbo.base.size > lb->robj->tbo.base.size)
@@ -417,11 +414,13 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a,
* If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context.
**/
-static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
+static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
if (!error) {
+ struct radeon_bo_list *reloc;
+
/* Sort the buffer list from the smallest to largest buffer,
* which affects the order of buffers in the LRU list.
* This assures that the smallest buffers are added first
@@ -433,15 +432,17 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
* per frame under memory pressure.
*/
list_sort(NULL, &parser->validated, cmp_size_smaller_first);
-
- ttm_eu_fence_buffer_objects(&parser->ticket,
- &parser->validated,
- &parser->ib.fence->base);
- } else if (backoff) {
- ttm_eu_backoff_reservation(&parser->ticket,
- &parser->validated);
+ list_for_each_entry(reloc, &parser->validated, list) {
+ dma_resv_add_fence(reloc->robj->tbo.base.resv,
+ &parser->ib.fence->base,
+ reloc->shared ?
+ DMA_RESV_USAGE_READ :
+ DMA_RESV_USAGE_WRITE);
+ }
}
+ drm_exec_fini(&parser->exec);
+
if (parser->relocs != NULL) {
for (i = 0; i < parser->nrelocs; i++) {
struct radeon_bo *bo = parser->relocs[i].robj;
@@ -693,7 +694,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = radeon_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
- radeon_cs_parser_fini(&parser, r, false);
+ radeon_cs_parser_fini(&parser, r);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
return r;
@@ -707,7 +708,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
if (r) {
- radeon_cs_parser_fini(&parser, r, false);
+ radeon_cs_parser_fini(&parser, r);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
return r;
@@ -724,7 +725,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out;
}
out:
- radeon_cs_parser_fini(&parser, r, true);
+ radeon_cs_parser_fini(&parser, r);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 554b236c2328..6f071e61f764 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -35,6 +35,7 @@
#include <linux/vgaarb.h>
#include <drm/drm_cache.h>
+#include <drm/drm_client_event.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
@@ -1542,7 +1543,7 @@ void radeon_device_fini(struct radeon_device *rdev)
* Called at driver suspend.
*/
int radeon_suspend_kms(struct drm_device *dev, bool suspend,
- bool fbcon, bool freeze)
+ bool notify_clients, bool freeze)
{
struct radeon_device *rdev;
struct pci_dev *pdev;
@@ -1634,9 +1635,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
pci_set_power_state(pdev, PCI_D3hot);
}
- if (fbcon) {
+ if (notify_clients) {
console_lock();
- radeon_fbdev_set_suspend(rdev, 1);
+ drm_client_dev_suspend(dev, true);
console_unlock();
}
return 0;
@@ -1649,7 +1650,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
* Returns 0 for success or an error on failure.
* Called at driver resume.
*/
-int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients)
{
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
@@ -1660,14 +1661,14 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- if (fbcon) {
+ if (notify_clients) {
console_lock();
}
if (resume) {
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
if (pci_enable_device(pdev)) {
- if (fbcon)
+ if (notify_clients)
console_unlock();
return -1;
}
@@ -1730,7 +1731,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
/* reset hpd state */
radeon_hpd_init(rdev);
/* blat the mode back in */
- if (fbcon) {
+ if (notify_clients) {
drm_helper_resume_force_mode(dev);
/* turn on display hw */
drm_modeset_lock_all(dev);
@@ -1746,8 +1747,8 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
radeon_pm_compute_clocks(rdev);
- if (fbcon) {
- radeon_fbdev_set_suspend(rdev, 0);
+ if (notify_clients) {
+ drm_client_dev_resume(dev, true);
console_unlock();
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e5a6f3e7c75b..267f082bc430 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -29,7 +29,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
+#include <linux/aperture.h>
#include <linux/compat.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
@@ -37,9 +37,10 @@
#include <linux/mmu_notifier.h>
#include <linux/pci.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_pciids.h>
@@ -247,10 +248,9 @@ int radeon_cik_support = 1;
MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)");
module_param_named(cik_support, radeon_cik_support, int, 0444);
-static struct pci_device_id pciidlist[] = {
+static const struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
-
MODULE_DEVICE_TABLE(pci, pciidlist);
static const struct drm_driver kms_driver;
@@ -261,6 +261,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
unsigned long flags = 0;
struct drm_device *ddev;
struct radeon_device *rdev;
+ const struct drm_format_info *format;
int ret;
if (!ent)
@@ -297,7 +298,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
return -EPROBE_DEFER;
/* Get rid of things like offb */
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &kms_driver);
+ ret = aperture_remove_conflicting_pci_devices(pdev, kms_driver.name);
if (ret)
return ret;
@@ -324,7 +325,14 @@ static int radeon_pci_probe(struct pci_dev *pdev,
if (ret)
goto err_agp;
- radeon_fbdev_setup(ddev->dev_private);
+ if (rdev->mc.real_vram_size <= (8 * 1024 * 1024))
+ format = drm_format_info(DRM_FORMAT_C8);
+ else if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32 * 1024 * 1024))
+ format = drm_format_info(DRM_FORMAT_RGB565);
+ else
+ format = NULL;
+
+ drm_client_setup(ddev, format);
return 0;
@@ -591,9 +599,10 @@ static const struct drm_driver kms_driver = {
.gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
+ RADEON_FBDEV_DRIVER_OPS,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = KMS_DRIVER_MAJOR,
.minor = KMS_DRIVER_MINOR,
.patchlevel = KMS_DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 02a65971d140..0f3dbffc492d 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -43,7 +43,6 @@
#define DRIVER_NAME "radeon"
#define DRIVER_DESC "ATI Radeon"
-#define DRIVER_DATE "20080528"
/* Interface history:
*
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 0f723292409e..fafed331e0a0 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -43,7 +43,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *clone_encoder;
- uint32_t index_mask = 0;
+ uint32_t index_mask = drm_encoder_mask(encoder);
int count;
/* DIG routing gets problematic */
diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c
index fb70de29545c..d4a58bd679db 100644
--- a/drivers/gpu/drm/radeon/radeon_fbdev.c
+++ b/drivers/gpu/drm/radeon/radeon_fbdev.c
@@ -198,12 +198,11 @@ static const struct fb_ops radeon_fbdev_fb_ops = {
.fb_destroy = radeon_fbdev_fb_destroy,
};
-/*
- * Fbdev helpers and struct drm_fb_helper_funcs
- */
+static const struct drm_fb_helper_funcs radeon_fbdev_fb_helper_funcs = {
+};
-static int radeon_fbdev_fb_helper_fb_probe(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes)
+int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
{
struct radeon_device *rdev = fb_helper->dev->dev_private;
struct drm_mode_fb_cmd2 mode_cmd = { };
@@ -243,6 +242,7 @@ static int radeon_fbdev_fb_helper_fb_probe(struct drm_fb_helper *fb_helper,
}
/* setup helper */
+ fb_helper->funcs = &radeon_fbdev_fb_helper_funcs;
fb_helper->fb = fb;
/* okay we have an object now allocate the framebuffer */
@@ -288,116 +288,6 @@ err_radeon_fbdev_destroy_pinned_object:
return ret;
}
-static const struct drm_fb_helper_funcs radeon_fbdev_fb_helper_funcs = {
- .fb_probe = radeon_fbdev_fb_helper_fb_probe,
-};
-
-/*
- * Fbdev client and struct drm_client_funcs
- */
-
-static void radeon_fbdev_client_unregister(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- struct drm_device *dev = fb_helper->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- if (fb_helper->info) {
- vga_switcheroo_client_fb_set(rdev->pdev, NULL);
- drm_helper_force_disable_all(dev);
- drm_fb_helper_unregister_info(fb_helper);
- } else {
- drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
- }
-}
-
-static int radeon_fbdev_client_restore(struct drm_client_dev *client)
-{
- drm_fb_helper_lastclose(client->dev);
- vga_switcheroo_process_delayed_switch();
-
- return 0;
-}
-
-static int radeon_fbdev_client_hotplug(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- struct drm_device *dev = client->dev;
- struct radeon_device *rdev = dev->dev_private;
- int ret;
-
- if (dev->fb_helper)
- return drm_fb_helper_hotplug_event(dev->fb_helper);
-
- ret = drm_fb_helper_init(dev, fb_helper);
- if (ret)
- goto err_drm_err;
-
- if (!drm_drv_uses_atomic_modeset(dev))
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(fb_helper);
- if (ret)
- goto err_drm_fb_helper_fini;
-
- vga_switcheroo_client_fb_set(rdev->pdev, fb_helper->info);
-
- return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(fb_helper);
-err_drm_err:
- drm_err(dev, "Failed to setup radeon fbdev emulation (ret=%d)\n", ret);
- return ret;
-}
-
-static const struct drm_client_funcs radeon_fbdev_client_funcs = {
- .owner = THIS_MODULE,
- .unregister = radeon_fbdev_client_unregister,
- .restore = radeon_fbdev_client_restore,
- .hotplug = radeon_fbdev_client_hotplug,
-};
-
-void radeon_fbdev_setup(struct radeon_device *rdev)
-{
- struct drm_fb_helper *fb_helper;
- int bpp_sel = 32;
- int ret;
-
- if (rdev->mc.real_vram_size <= (8 * 1024 * 1024))
- bpp_sel = 8;
- else if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32 * 1024 * 1024))
- bpp_sel = 16;
-
- fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
- if (!fb_helper)
- return;
- drm_fb_helper_prepare(rdev_to_drm(rdev), fb_helper, bpp_sel, &radeon_fbdev_fb_helper_funcs);
-
- ret = drm_client_init(rdev_to_drm(rdev), &fb_helper->client, "radeon-fbdev",
- &radeon_fbdev_client_funcs);
- if (ret) {
- drm_err(rdev_to_drm(rdev), "Failed to register client: %d\n", ret);
- goto err_drm_client_init;
- }
-
- drm_client_register(&fb_helper->client);
-
- return;
-
-err_drm_client_init:
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
-}
-
-void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
-{
- if (rdev_to_drm(rdev)->fb_helper)
- drm_fb_helper_set_suspend(rdev_to_drm(rdev)->fb_helper, state);
-}
-
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
struct drm_fb_helper *fb_helper = rdev_to_drm(rdev)->fb_helper;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 9735f4968b86..f86773f3db20 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -44,8 +44,6 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
int radeon_gem_prime_pin(struct drm_gem_object *obj);
void radeon_gem_prime_unpin(struct drm_gem_object *obj);
-const struct drm_gem_object_funcs radeon_gem_object_funcs;
-
static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
@@ -132,7 +130,6 @@ retry:
return r;
}
*obj = &robj->tbo.base;
- (*obj)->funcs = &radeon_gem_object_funcs;
robj->pid = task_pid_nr(current);
mutex_lock(&rdev->gem.mutex);
@@ -608,33 +605,40 @@ out:
static void radeon_gem_va_update_vm(struct radeon_device *rdev,
struct radeon_bo_va *bo_va)
{
- struct ttm_validate_buffer tv, *entry;
- struct radeon_bo_list *vm_bos;
- struct ww_acquire_ctx ticket;
+ struct radeon_bo_list *vm_bos, *entry;
struct list_head list;
+ struct drm_exec exec;
unsigned domain;
int r;
INIT_LIST_HEAD(&list);
- tv.bo = &bo_va->bo->tbo;
- tv.num_shared = 1;
- list_add(&tv.head, &list);
-
vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
if (!vm_bos)
return;
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
- if (r)
- goto error_free;
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&exec) {
+ list_for_each_entry(entry, &list, list) {
+ r = drm_exec_prepare_obj(&exec, &entry->robj->tbo.base,
+ 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error_cleanup;
+ }
- list_for_each_entry(entry, &list, head) {
- domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
+ r = drm_exec_prepare_obj(&exec, &bo_va->bo->tbo.base, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error_cleanup;
+ }
+
+ list_for_each_entry(entry, &list, list) {
+ domain = radeon_mem_type_to_domain(entry->robj->tbo.resource->mem_type);
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
if (domain == RADEON_GEM_DOMAIN_CPU)
- goto error_unreserve;
+ goto error_cleanup;
}
mutex_lock(&bo_va->vm->mutex);
@@ -648,10 +652,8 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
error_unlock:
mutex_unlock(&bo_va->vm->mutex);
-error_unreserve:
- ttm_eu_backoff_reservation(&ticket, &list);
-
-error_free:
+error_cleanup:
+ drm_exec_fini(&exec);
kvfree(vm_bos);
if (r && r != -ERESTARTSYS)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 421c83fc70dc..4063d3801e81 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -38,6 +38,9 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
+
struct edid;
struct drm_edid;
struct radeon_bo;
@@ -935,14 +938,14 @@ void dce8_program_fmt(struct drm_encoder *encoder);
/* fbdev layer */
#if defined(CONFIG_DRM_FBDEV_EMULATION)
-void radeon_fbdev_setup(struct radeon_device *rdev);
-void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
+int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes);
+#define RADEON_FBDEV_DRIVER_OPS \
+ .fbdev_probe = radeon_fbdev_driver_fbdev_probe
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
#else
-static inline void radeon_fbdev_setup(struct radeon_device *rdev)
-{ }
-static inline void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
-{ }
+#define RADEON_FBDEV_DRIVER_OPS \
+ .fbdev_probe = NULL
static inline bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d0e4b43d155c..a0fc0801abb0 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -151,6 +151,7 @@ int radeon_bo_create(struct radeon_device *rdev,
if (bo == NULL)
return -ENOMEM;
drm_gem_private_object_init(rdev_to_drm(rdev), &bo->tbo.base, size);
+ bo->tbo.base.funcs = &radeon_gem_object_funcs;
bo->rdev = rdev;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
@@ -463,23 +464,26 @@ static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
}
int radeon_bo_list_validate(struct radeon_device *rdev,
- struct ww_acquire_ctx *ticket,
+ struct drm_exec *exec,
struct list_head *head, int ring)
{
struct ttm_operation_ctx ctx = { true, false };
struct radeon_bo_list *lobj;
- struct list_head duplicates;
- int r;
u64 bytes_moved = 0, initial_bytes_moved;
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
+ int r;
- INIT_LIST_HEAD(&duplicates);
- r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
- if (unlikely(r != 0)) {
- return r;
+ drm_exec_until_all_locked(exec) {
+ list_for_each_entry(lobj, head, list) {
+ r = drm_exec_prepare_obj(exec, &lobj->robj->tbo.base,
+ 1);
+ drm_exec_retry_on_contention(exec);
+ if (unlikely(r && r != -EALREADY))
+ return r;
+ }
}
- list_for_each_entry(lobj, head, tv.head) {
+ list_for_each_entry(lobj, head, list) {
struct radeon_bo *bo = lobj->robj;
if (!bo->tbo.pin_count) {
u32 domain = lobj->preferred_domains;
@@ -518,7 +522,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
domain = lobj->allowed_domains;
goto retry;
}
- ttm_eu_backoff_reservation(ticket, head);
return r;
}
}
@@ -526,11 +529,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
lobj->tiling_flags = bo->tiling_flags;
}
- list_for_each_entry(lobj, &duplicates, tv.head) {
- lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
- lobj->tiling_flags = lobj->robj->tiling_flags;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 39cc87a59a9a..d7bbb52db546 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -152,7 +152,7 @@ extern void radeon_bo_force_delete(struct radeon_device *rdev);
extern int radeon_bo_init(struct radeon_device *rdev);
extern void radeon_bo_fini(struct radeon_device *rdev);
extern int radeon_bo_list_validate(struct radeon_device *rdev,
- struct ww_acquire_ctx *ticket,
+ struct drm_exec *exec,
struct list_head *head, int ring);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
u32 tiling_flags, u32 pitch);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 69d0c12fa419..616d25c8c2de 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -219,8 +219,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
if (old_mem->mem_type == TTM_PL_TT &&
new_mem->mem_type == TTM_PL_SYSTEM) {
radeon_ttm_tt_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, &bo->resource);
- ttm_bo_assign_mem(bo, new_mem);
+ ttm_bo_move_null(bo, new_mem);
goto out;
}
if (rdev->ring[radeon_copy_ring_index(rdev)].ready &&
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index c38b4d5d6a14..21a5340aefdf 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -142,10 +142,9 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
list[0].robj = vm->page_directory;
list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
- list[0].tv.bo = &vm->page_directory->tbo;
- list[0].tv.num_shared = 1;
+ list[0].shared = true;
list[0].tiling_flags = 0;
- list_add(&list[0].tv.head, head);
+ list_add(&list[0].list, head);
for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
if (!vm->page_tables[i].bo)
@@ -154,10 +153,9 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
list[idx].robj = vm->page_tables[i].bo;
list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
- list[idx].tv.bo = &list[idx].robj->tbo;
- list[idx].tv.num_shared = 1;
+ list[idx].shared = true;
list[idx].tiling_flags = 0;
- list_add(&list[idx++].tv.head, head);
+ list_add(&list[idx++].list, head);
}
return list;
diff --git a/drivers/gpu/drm/renesas/rcar-du/Kconfig b/drivers/gpu/drm/renesas/rcar-du/Kconfig
index e1f41468a9a6..840305fdeb49 100644
--- a/drivers/gpu/drm/renesas/rcar-du/Kconfig
+++ b/drivers/gpu/drm/renesas/rcar-du/Kconfig
@@ -4,6 +4,7 @@ config DRM_RCAR_DU
depends on DRM && OF
depends on ARM || ARM64 || COMPILE_TEST
depends on ARCH_RENESAS || COMPILE_TEST
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c b/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
index 26a2f5ad8ee5..79b67c406bd6 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_cmm.c
@@ -201,7 +201,7 @@ MODULE_DEVICE_TABLE(of, rcar_cmm_of_table);
static struct platform_driver rcar_cmm_platform_driver = {
.probe = rcar_cmm_probe,
- .remove_new = rcar_cmm_remove,
+ .remove = rcar_cmm_remove,
.driver = {
.name = "rcar-cmm",
.of_match_table = rcar_cmm_of_table,
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
index fb719d9aff10..d948ff3594c4 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/wait.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -545,6 +546,23 @@ static const struct rcar_du_device_info rcar_du_r8a779g0_info = {
.dsi_clk_mask = BIT(1) | BIT(0),
};
+static const struct rcar_du_device_info rcar_du_r8a779h0_info = {
+ .gen = 4,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_NO_BLENDING,
+ .channels_mask = BIT(0),
+ .routes = {
+ /* R8A779H0 has one MIPI DSI output. */
+ [RCAR_DU_OUTPUT_DSI0] = {
+ .possible_crtcs = BIT(0),
+ .port = 0,
+ },
+ },
+ .num_rpf = 5,
+ .dsi_clk_mask = BIT(0),
+};
+
static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7742", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info },
@@ -571,6 +589,7 @@ static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a77995", .data = &rcar_du_r8a7799x_info },
{ .compatible = "renesas,du-r8a779a0", .data = &rcar_du_r8a779a0_info },
{ .compatible = "renesas,du-r8a779g0", .data = &rcar_du_r8a779g0_info },
+ { .compatible = "renesas,du-r8a779h0", .data = &rcar_du_r8a779h0_info },
{ }
};
@@ -606,10 +625,10 @@ static const struct drm_driver rcar_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.dumb_create = rcar_du_dumb_create,
.gem_prime_import_sg_table = rcar_du_gem_prime_import_sg_table,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &rcar_du_fops,
.name = "rcar-du",
.desc = "Renesas R-Car Display Unit",
- .date = "20130110",
.major = 1,
.minor = 0,
};
@@ -716,7 +735,7 @@ static int rcar_du_probe(struct platform_device *pdev)
drm_info(&rcdu->ddev, "Device %s probed\n", dev_name(&pdev->dev));
- drm_fbdev_dma_setup(&rcdu->ddev, 32);
+ drm_client_setup(&rcdu->ddev, NULL);
return 0;
@@ -727,7 +746,7 @@ error:
static struct platform_driver rcar_du_platform_driver = {
.probe = rcar_du_probe,
- .remove_new = rcar_du_remove,
+ .remove = rcar_du_remove,
.shutdown = rcar_du_shutdown,
.driver = {
.name = "rcar-du",
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c
index 2ccd2581f544..068c106e586c 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_group.c
@@ -107,10 +107,12 @@ static void rcar_du_group_setup_didsr(struct rcar_du_group *rgrp)
*/
rcrtc = rcdu->crtcs;
num_crtcs = rcdu->num_crtcs;
- } else if (rcdu->info->gen >= 3 && rgrp->num_crtcs > 1) {
+ } else if ((rcdu->info->gen == 3 && rgrp->num_crtcs > 1) ||
+ rcdu->info->gen == 4) {
/*
* On Gen3 dot clocks are setup through per-group registers,
* only available when the group has two channels.
+ * On Gen4 the registers are there for single channel too.
*/
rcrtc = &rcdu->crtcs[rgrp->index * 2];
num_crtcs = rgrp->num_crtcs;
@@ -185,11 +187,21 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
dorcr |= DORCR_PG1T | DORCR_DK1S | DORCR_PG1D_DS1;
rcar_du_group_write(rgrp, DORCR, dorcr);
- /* Apply planes to CRTCs association. */
- mutex_lock(&rgrp->lock);
- rcar_du_group_write(rgrp, DPTSR, (rgrp->dptsr_planes << 16) |
- rgrp->dptsr_planes);
- mutex_unlock(&rgrp->lock);
+ /*
+ * DPTSR is used to select the source for the planes of a group. The
+ * first source is chosen by writing 0 to the respective bits, and this
+ * is always the default value of the register. In other words, writing
+ * DPTSR is only needed if the SoC supports choosing the second source.
+ *
+ * The SoCs documentations seems to confirm this, as the DPTSR register
+ * is not documented if only the first source exists on that SoC.
+ */
+ if (rgrp->channels_mask & BIT(1)) {
+ mutex_lock(&rgrp->lock);
+ rcar_du_group_write(rgrp, DPTSR, (rgrp->dptsr_planes << 16) |
+ rgrp->dptsr_planes);
+ mutex_unlock(&rgrp->lock);
+ }
}
/*
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c
index e445fac8e0b4..c546ab0805d6 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.c
@@ -680,6 +680,12 @@ static const struct drm_plane_helper_funcs rcar_du_plane_helper_funcs = {
.atomic_update = rcar_du_plane_atomic_update,
};
+static const struct drm_plane_helper_funcs rcar_du_primary_plane_helper_funcs = {
+ .atomic_check = rcar_du_plane_atomic_check,
+ .atomic_update = rcar_du_plane_atomic_update,
+ .get_scanout_buffer = drm_fb_dma_get_scanout_buffer,
+};
+
static struct drm_plane_state *
rcar_du_plane_atomic_duplicate_state(struct drm_plane *plane)
{
@@ -812,8 +818,12 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
if (ret < 0)
return ret;
- drm_plane_helper_add(&plane->plane,
- &rcar_du_plane_helper_funcs);
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_helper_add(&plane->plane,
+ &rcar_du_primary_plane_helper_funcs);
+ else
+ drm_plane_helper_add(&plane->plane,
+ &rcar_du_plane_helper_funcs);
drm_plane_create_alpha_property(&plane->plane);
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c
index 119d69d20b23..c0176e5de9a8 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_dw_hdmi.c
@@ -108,7 +108,7 @@ MODULE_DEVICE_TABLE(of, rcar_dw_hdmi_of_table);
static struct platform_driver rcar_dw_hdmi_platform_driver = {
.probe = rcar_dw_hdmi_probe,
- .remove_new = rcar_dw_hdmi_remove,
+ .remove = rcar_dw_hdmi_remove,
.driver = {
.name = "rcar-dw-hdmi",
.of_match_table = rcar_dw_hdmi_of_table,
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
index 92ba43a6fe38..e8d64583e3bd 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
@@ -1018,7 +1018,7 @@ static const struct dev_pm_ops rcar_lvds_pm_ops = {
static struct platform_driver rcar_lvds_platform_driver = {
.probe = rcar_lvds_probe,
- .remove_new = rcar_lvds_remove,
+ .remove = rcar_lvds_remove,
.driver = {
.name = "rcar-lvds",
.pm = &rcar_lvds_pm_ops,
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
index 2dba7c5ffd2c..3c0c18d5249a 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
@@ -587,7 +587,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
for (timeout = 10; timeout > 0; --timeout) {
if ((rcar_mipi_dsi_read(dsi, PPICLSR) & PPICLSR_STPST) &&
(rcar_mipi_dsi_read(dsi, PPIDLSR) & PPIDLSR_STPST) &&
- (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK))
+ (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK_PHY))
break;
usleep_range(1000, 2000);
@@ -1081,6 +1081,8 @@ static const struct rcar_mipi_dsi_device_info v4h_data = {
static const struct of_device_id rcar_mipi_dsi_of_table[] = {
{ .compatible = "renesas,r8a779a0-dsi-csi2-tx", .data = &v3u_data },
{ .compatible = "renesas,r8a779g0-dsi-csi2-tx", .data = &v4h_data },
+ /* DSI in r8a779h0 is identical to r8a779g0 */
+ { .compatible = "renesas,r8a779h0-dsi-csi2-tx", .data = &v4h_data },
{ }
};
@@ -1088,7 +1090,7 @@ MODULE_DEVICE_TABLE(of, rcar_mipi_dsi_of_table);
static struct platform_driver rcar_mipi_dsi_platform_driver = {
.probe = rcar_mipi_dsi_probe,
- .remove_new = rcar_mipi_dsi_remove,
+ .remove = rcar_mipi_dsi_remove,
.driver = {
.name = "rcar-mipi-dsi",
.of_match_table = rcar_mipi_dsi_of_table,
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
index f8114d11f2d1..a6b276f1d6ee 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
@@ -142,7 +142,6 @@
#define CLOCKSET1 0x101c
#define CLOCKSET1_LOCK_PHY (1 << 17)
-#define CLOCKSET1_LOCK (1 << 16)
#define CLOCKSET1_CLKSEL (1 << 8)
#define CLOCKSET1_CLKINSEL_EXTAL (0 << 2)
#define CLOCKSET1_CLKINSEL_DIG (1 << 2)
diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig
index 89bdb598e0ae..7c1817240846 100644
--- a/drivers/gpu/drm/renesas/rz-du/Kconfig
+++ b/drivers/gpu/drm/renesas/rz-du/Kconfig
@@ -4,6 +4,7 @@ config DRM_RZG2L_DU
depends on ARCH_RZG2L || COMPILE_TEST
depends on DRM && OF
depends on VIDEO_RENESAS_VSP1
+ select DRM_CLIENT_SELECTION
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
index c4c1474d487e..6e7aac6219be 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_crtc.c
@@ -28,7 +28,6 @@
#include "rzg2l_du_vsp.h"
#define DU_MCR0 0x00
-#define DU_MCR0_DPI_OE BIT(0)
#define DU_MCR0_DI_EN BIT(8)
#define DU_DITR0 0x10
@@ -217,14 +216,9 @@ static void rzg2l_du_crtc_put(struct rzg2l_du_crtc *rcrtc)
static void rzg2l_du_start_stop(struct rzg2l_du_crtc *rcrtc, bool start)
{
- struct rzg2l_du_crtc_state *rstate = to_rzg2l_crtc_state(rcrtc->crtc.state);
struct rzg2l_du_device *rcdu = rcrtc->dev;
- u32 val = DU_MCR0_DI_EN;
- if (rstate->outputs & BIT(RZG2L_DU_OUTPUT_DPAD0))
- val |= DU_MCR0_DPI_OE;
-
- writel(start ? val : 0, rcdu->mmio + DU_MCR0);
+ writel(start ? DU_MCR0_DI_EN : 0, rcdu->mmio + DU_MCR0);
}
static void rzg2l_du_crtc_start(struct rzg2l_du_crtc *rcrtc)
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
index bc7c381f92ac..cbd9b9841267 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -79,10 +80,10 @@ DEFINE_DRM_GEM_DMA_FOPS(rzg2l_du_fops);
static const struct drm_driver rzg2l_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.dumb_create = rzg2l_du_dumb_create,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &rzg2l_du_fops,
.name = "rzg2l-du",
.desc = "Renesas RZ/G2L Display Unit",
- .date = "20230410",
.major = 1,
.minor = 0,
};
@@ -160,7 +161,7 @@ static int rzg2l_du_probe(struct platform_device *pdev)
drm_info(&rcdu->ddev, "Device %s probed\n", dev_name(&pdev->dev));
- drm_fbdev_dma_setup(&rcdu->ddev, 32);
+ drm_client_setup(&rcdu->ddev, NULL);
return 0;
@@ -171,7 +172,7 @@ error:
static struct platform_driver rzg2l_du_platform_driver = {
.probe = rzg2l_du_probe,
- .remove_new = rzg2l_du_remove,
+ .remove = rzg2l_du_remove,
.shutdown = rzg2l_du_shutdown,
.driver = {
.name = "rzg2l-du",
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
index 339cbaaea0b5..564ab4cb3d37 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
@@ -10,6 +10,7 @@
#include <linux/export.h>
#include <linux/of.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_panel.h>
@@ -24,6 +25,22 @@
static const struct drm_encoder_funcs rzg2l_du_encoder_funcs = {
};
+static enum drm_mode_status
+rzg2l_du_encoder_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ struct rzg2l_du_encoder *renc = to_rzg2l_encoder(encoder);
+
+ if (renc->output == RZG2L_DU_OUTPUT_DPAD0 && mode->clock > 83500)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static const struct drm_encoder_helper_funcs rzg2l_du_encoder_helper_funcs = {
+ .mode_valid = rzg2l_du_encoder_mode_valid,
+};
+
int rzg2l_du_encoder_init(struct rzg2l_du_device *rcdu,
enum rzg2l_du_output output,
struct device_node *enc_node)
@@ -48,6 +65,7 @@ int rzg2l_du_encoder_init(struct rzg2l_du_device *rcdu,
return PTR_ERR(renc);
renc->output = output;
+ drm_encoder_helper_add(&renc->base, &rzg2l_du_encoder_helper_funcs);
/* Attach the bridge to the encoder. */
ret = drm_bridge_attach(&renc->base, bridge, NULL,
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
index b99217b4e05d..90c6269ccd29 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c
@@ -311,11 +311,11 @@ int rzg2l_du_modeset_init(struct rzg2l_du_device *rcdu)
dev->mode_config.helper_private = &rzg2l_du_mode_config_helper;
/*
- * The RZ DU uses the VSP1 for memory access, and is limited
- * to frame sizes of 1920x1080.
+ * The RZ DU was designed to support a frame size of 1920x1200 (landscape)
+ * or 1200x1920 (portrait).
*/
dev->mode_config.max_width = 1920;
- dev->mode_config.max_height = 1080;
+ dev->mode_config.max_height = 1920;
rcdu->num_crtcs = hweight8(rcdu->info->channels_mask);
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
index 10febea473cd..fa7a1ae22aa3 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
@@ -798,7 +798,7 @@ MODULE_DEVICE_TABLE(of, rzg2l_mipi_dsi_of_table);
static struct platform_driver rzg2l_mipi_dsi_platform_driver = {
.probe = rzg2l_mipi_dsi_probe,
- .remove_new = rzg2l_mipi_dsi_remove,
+ .remove = rzg2l_mipi_dsi_remove,
.driver = {
.name = "rzg2l-mipi-dsi",
.pm = &rzg2l_mipi_pm_ops,
diff --git a/drivers/gpu/drm/renesas/shmobile/Kconfig b/drivers/gpu/drm/renesas/shmobile/Kconfig
index c329ab8a7a8b..52e160464001 100644
--- a/drivers/gpu/drm/renesas/shmobile/Kconfig
+++ b/drivers/gpu/drm/renesas/shmobile/Kconfig
@@ -4,6 +4,7 @@ config DRM_SHMOBILE
depends on DRM && PM
depends on ARCH_RENESAS || ARCH_SHMOBILE || COMPILE_TEST
select BACKLIGHT_CLASS_DEVICE
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
index ff2883c7fd46..2f31822b2245 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
@@ -17,9 +17,11 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
@@ -101,10 +103,10 @@ DEFINE_DRM_GEM_DMA_FOPS(shmob_drm_fops);
static const struct drm_driver shmob_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &shmob_drm_fops,
.name = "shmob-drm",
.desc = "Renesas SH Mobile DRM",
- .date = "20120424",
.major = 1,
.minor = 0,
};
@@ -257,7 +259,7 @@ static int shmob_drm_probe(struct platform_device *pdev)
if (ret < 0)
goto err_modeset_cleanup;
- drm_fbdev_dma_setup(ddev, 16);
+ drm_client_setup_with_fourcc(ddev, DRM_FORMAT_RGB565);
return 0;
@@ -279,7 +281,7 @@ static const struct of_device_id shmob_drm_of_table[] __maybe_unused = {
static struct platform_driver shmob_drm_platform_driver = {
.probe = shmob_drm_probe,
- .remove_new = shmob_drm_remove,
+ .remove = shmob_drm_remove,
.shutdown = shmob_drm_shutdown,
.driver = {
.name = "shmob-drm",
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
index 07ad17d24294..9d166ab2af8b 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
@@ -273,6 +273,13 @@ static const struct drm_plane_helper_funcs shmob_drm_plane_helper_funcs = {
.atomic_disable = shmob_drm_plane_atomic_disable,
};
+static const struct drm_plane_helper_funcs shmob_drm_primary_plane_helper_funcs = {
+ .atomic_check = shmob_drm_plane_atomic_check,
+ .atomic_update = shmob_drm_plane_atomic_update,
+ .atomic_disable = shmob_drm_plane_atomic_disable,
+ .get_scanout_buffer = drm_fb_dma_get_scanout_buffer,
+};
+
static const struct drm_plane_funcs shmob_drm_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -310,7 +317,12 @@ struct drm_plane *shmob_drm_plane_create(struct shmob_drm_device *sdev,
splane->index = index;
- drm_plane_helper_add(&splane->base, &shmob_drm_plane_helper_funcs);
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_helper_add(&splane->base,
+ &shmob_drm_primary_plane_helper_funcs);
+ else
+ drm_plane_helper_add(&splane->base,
+ &shmob_drm_plane_helper_funcs);
return &splane->base;
}
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 23c49e91f1cc..26c4410b2407 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -2,13 +2,16 @@
config DRM_ROCKCHIP
tristate "DRM Support for Rockchip"
depends on DRM && ROCKCHIP_IOMMU
+ select DRM_CLIENT_SELECTION
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select VIDEOMODE_HELPERS
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
+ select DRM_DW_HDMI_QP if ROCKCHIP_DW_HDMI_QP
select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
+ select DRM_DW_MIPI_DSI2 if ROCKCHIP_DW_MIPI_DSI2
select GENERIC_PHY if ROCKCHIP_DW_MIPI_DSI
select GENERIC_PHY_MIPI_DPHY if ROCKCHIP_DW_MIPI_DSI
select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
@@ -63,6 +66,14 @@ config ROCKCHIP_DW_HDMI
enable HDMI on RK3288 or RK3399 based SoC, you should select
this option.
+config ROCKCHIP_DW_HDMI_QP
+ bool "Rockchip specific extensions for Synopsys DW HDMI QP"
+ select DRM_BRIDGE_CONNECTOR
+ help
+ This selects support for Rockchip SoC specific extensions
+ for the Synopsys DesignWare HDMI QP driver. If you want to
+ enable HDMI on RK3588 based SoC, you should select this option.
+
config ROCKCHIP_DW_MIPI_DSI
bool "Rockchip specific extensions for Synopsys DW MIPI DSI"
select GENERIC_PHY_MIPI_DPHY
@@ -72,6 +83,15 @@ config ROCKCHIP_DW_MIPI_DSI
enable MIPI DSI on RK3288 or RK3399 based SoC, you should
select this option.
+config ROCKCHIP_DW_MIPI_DSI2
+ bool "Rockchip specific extensions for Synopsys DW MIPI DSI2"
+ select GENERIC_PHY_MIPI_DPHY
+ help
+ This selects support for Rockchip SoC specific extensions
+ for the Synopsys DesignWare DSI2 driver. If you want to
+ enable MIPI DSI on RK3576 or RK3588 based SoC, you should
+ select this option.
+
config ROCKCHIP_INNO_HDMI
bool "Rockchip specific extensions for Innosilicon HDMI"
select DRM_DISPLAY_HDMI_HELPER
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 3ff7b21c0414..2b867cebbc12 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -11,7 +11,9 @@ rockchipdrm-$(CONFIG_ROCKCHIP_VOP) += rockchip_drm_vop.o rockchip_vop_reg.o
rockchipdrm-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
+rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI_QP) += dw_hdmi_qp-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi-rockchip.o
+rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI2) += dw-mipi-dsi2-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o
rockchipdrm-$(CONFIG_ROCKCHIP_RGB) += rockchip_rgb.o
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index d3341edfe4f4..0844175c37c5 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -2,7 +2,7 @@
/*
* Rockchip SoC DP (Display Port) interface driver.
*
- * Copyright (C) Fuzhou Rockchip Electronics Co., Ltd.
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Andy Yan <andy.yan@rock-chips.com>
* Yakir Yang <ykk@rock-chips.com>
* Jeff Chen <jeff.chen@rock-chips.com>
@@ -386,7 +386,7 @@ static int rockchip_dp_probe(struct platform_device *pdev)
return -ENODEV;
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
- if (ret < 0)
+ if (ret < 0 && ret != -ENODEV)
return ret;
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
@@ -470,7 +470,7 @@ MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids);
struct platform_driver rockchip_dp_driver = {
.probe = rockchip_dp_probe,
- .remove_new = rockchip_dp_remove,
+ .remove = rockchip_dp_remove,
.driver = {
.name = "rockchip-dp",
.pm = pm_ptr(&rockchip_dp_pm_ops),
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index b04538907f95..b17de83b988b 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Chris Zhong <zyw@rock-chips.com>
*/
@@ -885,7 +885,6 @@ static const struct hdmi_codec_ops audio_codec_ops = {
.mute_stream = cdn_dp_audio_mute_stream,
.get_eld = cdn_dp_audio_get_eld,
.hook_plugged_cb = cdn_dp_audio_hook_plugged_cb,
- .no_capture_mute = 1,
};
static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
@@ -896,6 +895,7 @@ static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp,
.spdif = 1,
.ops = &audio_codec_ops,
.max_i2s_channels = 8,
+ .no_capture_mute = 1,
};
dp->audio_pdev = platform_device_register_data(
@@ -947,9 +947,6 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
{
struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device,
event_work);
- struct drm_connector *connector = &dp->connector;
- enum drm_connector_status old_status;
-
int ret;
mutex_lock(&dp->lock);
@@ -1009,11 +1006,7 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
out:
mutex_unlock(&dp->lock);
-
- old_status = connector->status;
- connector->status = connector->funcs->detect(connector, false);
- if (old_status != connector->status)
- drm_kms_helper_hotplug_event(dp->drm_dev);
+ drm_connector_helper_hpd_irq_event(&dp->connector);
}
static int cdn_dp_pd_event(struct notifier_block *nb,
@@ -1254,7 +1247,7 @@ static const struct dev_pm_ops cdn_dp_pm_ops = {
struct platform_driver cdn_dp_driver = {
.probe = cdn_dp_probe,
- .remove_new = cdn_dp_remove,
+ .remove = cdn_dp_remove,
.shutdown = cdn_dp_shutdown,
.driver = {
.name = "cdn-dp",
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index 8e6e95d269da..17498f576ce7 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 Chris Zhong <zyw@rock-chips.com>
- * Copyright (C) 2016 ROCKCHIP, Inc.
+ * Copyright (C) Rockchip Electronics Co., Ltd.
*/
#ifndef _CDN_DP_CORE_H
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 33fb4d05c506..924fb1d3ece2 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Chris Zhong <zyw@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.h b/drivers/gpu/drm/rockchip/cdn-dp-reg.h
index 441248b7a79e..13ed8cbdbafa 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Chris Zhong <zyw@rock-chips.com>
*/
@@ -77,7 +77,7 @@
#define SOURCE_PIF_PKT_ALLOC_WR_EN 0x30830
#define SOURCE_PIF_SW_RESET 0x30834
-/* bellow registers need access by mailbox */
+/* below registers need access by mailbox */
/* source car addr */
#define SOURCE_HDTX_CAR 0x0900
#define SOURCE_DPTX_CAR 0x0904
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 58a44af0e9ad..3398160ad75e 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:
* Chris Zhong <zyw@rock-chips.com>
* Nickey Yang <nickey.yang@rock-chips.com>
@@ -1709,7 +1709,7 @@ MODULE_DEVICE_TABLE(of, dw_mipi_dsi_rockchip_dt_ids);
struct platform_driver dw_mipi_dsi_rockchip_driver = {
.probe = dw_mipi_dsi_rockchip_probe,
- .remove_new = dw_mipi_dsi_rockchip_remove,
+ .remove = dw_mipi_dsi_rockchip_remove,
.driver = {
.of_match_table = dw_mipi_dsi_rockchip_dt_ids,
.pm = &dw_mipi_dsi_rockchip_pm_ops,
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c
new file mode 100644
index 000000000000..cdd490778756
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2024 Rockchip Electronics Co., Ltd.
+ * Author:
+ * Guochun Huang <hero.huang@rock-chips.com>
+ * Heiko Stuebner <heiko.stuebner@cherry.de>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/media-bus-format.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+
+#include <drm/bridge/dw_mipi_dsi2.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include <uapi/linux/videodev2.h>
+
+#include "rockchip_drm_drv.h"
+
+#define PSEC_PER_SEC 1000000000000LL
+
+struct dsigrf_reg {
+ u16 offset;
+ u16 lsb;
+ u16 msb;
+};
+
+enum grf_reg_fields {
+ TXREQCLKHS_EN,
+ GATING_EN,
+ IPI_SHUTDN,
+ IPI_COLORM,
+ IPI_COLOR_DEPTH,
+ IPI_FORMAT,
+ MAX_FIELDS,
+};
+
+#define IPI_DEPTH_5_6_5_BITS 0x02
+#define IPI_DEPTH_6_BITS 0x03
+#define IPI_DEPTH_8_BITS 0x05
+#define IPI_DEPTH_10_BITS 0x06
+
+struct rockchip_dw_dsi2_chip_data {
+ u32 reg;
+ const struct dsigrf_reg *grf_regs;
+ unsigned long long max_bit_rate_per_lane;
+};
+
+struct dw_mipi_dsi2_rockchip {
+ struct device *dev;
+ struct rockchip_encoder encoder;
+ struct regmap *regmap;
+
+ unsigned int lane_mbps; /* per lane */
+ u32 format;
+
+ struct regmap *grf_regmap;
+ struct phy *phy;
+ union phy_configure_opts phy_opts;
+
+ struct dw_mipi_dsi2 *dmd;
+ struct dw_mipi_dsi2_plat_data pdata;
+ const struct rockchip_dw_dsi2_chip_data *cdata;
+};
+
+static inline struct dw_mipi_dsi2_rockchip *to_dsi2(struct drm_encoder *encoder)
+{
+ struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
+
+ return container_of(rkencoder, struct dw_mipi_dsi2_rockchip, encoder);
+}
+
+static void grf_field_write(struct dw_mipi_dsi2_rockchip *dsi2, enum grf_reg_fields index,
+ unsigned int val)
+{
+ const struct dsigrf_reg *field = &dsi2->cdata->grf_regs[index];
+
+ if (!field)
+ return;
+
+ regmap_write(dsi2->grf_regmap, field->offset,
+ (val << field->lsb) | (GENMASK(field->msb, field->lsb) << 16));
+}
+
+static int dw_mipi_dsi2_phy_init(void *priv_data)
+{
+ return 0;
+}
+
+static void dw_mipi_dsi2_phy_power_on(void *priv_data)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = priv_data;
+ int ret;
+
+ ret = phy_set_mode(dsi2->phy, PHY_MODE_MIPI_DPHY);
+ if (ret) {
+ dev_err(dsi2->dev, "Failed to set phy mode: %d\n", ret);
+ return;
+ }
+
+ phy_configure(dsi2->phy, &dsi2->phy_opts);
+ phy_power_on(dsi2->phy);
+}
+
+static void dw_mipi_dsi2_phy_power_off(void *priv_data)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = priv_data;
+
+ phy_power_off(dsi2->phy);
+}
+
+static int
+dw_mipi_dsi2_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
+ unsigned long mode_flags, u32 lanes, u32 format,
+ unsigned int *lane_mbps)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = priv_data;
+ u64 max_lane_rate, target_phyclk;
+ unsigned int lane_rate_kbps;
+ int bpp;
+
+ max_lane_rate = dsi2->cdata->max_bit_rate_per_lane;
+
+ dsi2->format = format;
+ bpp = mipi_dsi_pixel_format_to_bpp(format);
+ if (bpp < 0) {
+ dev_err(dsi2->dev, "failed to get bpp for pixel format %d\n", format);
+ return bpp;
+ }
+
+ lane_rate_kbps = mode->clock * bpp / lanes;
+
+ /*
+ * Set BW a little larger only in video burst mode in
+ * consideration of the protocol overhead and HS mode
+ * switching to BLLP mode, take 1 / 0.9, since Mbps must
+ * big than bandwidth of RGB
+ */
+ if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ lane_rate_kbps = (lane_rate_kbps * 10) / 9;
+
+ if (lane_rate_kbps > max_lane_rate) {
+ dev_err(dsi2->dev, "DPHY clock frequency is out of range\n");
+ return -ERANGE;
+ }
+
+ dsi2->lane_mbps = lane_rate_kbps / 1000;
+ *lane_mbps = dsi2->lane_mbps;
+
+ if (dsi2->phy) {
+ target_phyclk = DIV_ROUND_CLOSEST_ULL(lane_rate_kbps * lanes * 1000, bpp);
+ phy_mipi_dphy_get_default_config(target_phyclk, bpp, lanes,
+ &dsi2->phy_opts.mipi_dphy);
+ }
+
+ return 0;
+}
+
+static void dw_mipi_dsi2_phy_get_iface(void *priv_data, struct dw_mipi_dsi2_phy_iface *iface)
+{
+ /* PPI width is fixed to 16 bits in DCPHY */
+ iface->ppi_width = 16;
+ iface->phy_type = DW_MIPI_DSI2_DPHY;
+}
+
+static int
+dw_mipi_dsi2_phy_get_timing(void *priv_data, unsigned int lane_mbps,
+ struct dw_mipi_dsi2_phy_timing *timing)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = priv_data;
+ struct phy_configure_opts_mipi_dphy *cfg = &dsi2->phy_opts.mipi_dphy;
+ unsigned long long tmp, ui;
+ unsigned long long hstx_clk;
+
+ hstx_clk = DIV_ROUND_CLOSEST_ULL(dsi2->lane_mbps * USEC_PER_SEC, 16);
+
+ ui = ALIGN(PSEC_PER_SEC, hstx_clk);
+ do_div(ui, hstx_clk);
+
+ /* PHY_LP2HS_TIME = (TLPX + THS-PREPARE + THS-ZERO) / Tphy_hstx_clk */
+ tmp = cfg->lpx + cfg->hs_prepare + cfg->hs_zero;
+ tmp = DIV_ROUND_CLOSEST_ULL(tmp << 16, ui);
+ timing->data_lp2hs = tmp;
+
+ /* PHY_HS2LP_TIME = (THS-TRAIL + THS-EXIT) / Tphy_hstx_clk */
+ tmp = cfg->hs_trail + cfg->hs_exit;
+ tmp = DIV_ROUND_CLOSEST_ULL(tmp << 16, ui);
+ timing->data_hs2lp = tmp;
+
+ return 0;
+}
+
+static const struct dw_mipi_dsi2_phy_ops dw_mipi_dsi2_rockchip_phy_ops = {
+ .init = dw_mipi_dsi2_phy_init,
+ .power_on = dw_mipi_dsi2_phy_power_on,
+ .power_off = dw_mipi_dsi2_phy_power_off,
+ .get_interface = dw_mipi_dsi2_phy_get_iface,
+ .get_lane_mbps = dw_mipi_dsi2_get_lane_mbps,
+ .get_timing = dw_mipi_dsi2_phy_get_timing,
+};
+
+static void dw_mipi_dsi2_encoder_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = to_dsi2(encoder);
+ u32 color_depth;
+
+ switch (dsi2->format) {
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ color_depth = IPI_DEPTH_6_BITS;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ color_depth = IPI_DEPTH_5_6_5_BITS;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ color_depth = IPI_DEPTH_8_BITS;
+ break;
+ default:
+ /* Should've been caught by atomic_check */
+ WARN_ON(1);
+ return;
+ }
+
+ grf_field_write(dsi2, IPI_COLOR_DEPTH, color_depth);
+}
+
+static int
+dw_mipi_dsi2_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ struct dw_mipi_dsi2_rockchip *dsi2 = to_dsi2(encoder);
+ struct drm_connector *connector = conn_state->connector;
+ struct drm_display_info *info = &connector->display_info;
+
+ switch (dsi2->format) {
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ s->output_mode = ROCKCHIP_OUT_MODE_P666;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ s->output_mode = ROCKCHIP_OUT_MODE_P565;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (info->num_bus_formats)
+ s->bus_format = info->bus_formats[0];
+ else
+ s->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+
+ s->output_type = DRM_MODE_CONNECTOR_DSI;
+ s->bus_flags = info->bus_flags;
+ s->color_space = V4L2_COLORSPACE_DEFAULT;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs
+dw_mipi_dsi2_encoder_helper_funcs = {
+ .atomic_enable = dw_mipi_dsi2_encoder_atomic_enable,
+ .atomic_check = dw_mipi_dsi2_encoder_atomic_check,
+};
+
+static int rockchip_dsi2_drm_create_encoder(struct dw_mipi_dsi2_rockchip *dsi2,
+ struct drm_device *drm_dev)
+{
+ struct drm_encoder *encoder = &dsi2->encoder.encoder;
+ int ret;
+
+ encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
+ dsi2->dev->of_node);
+
+ ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI);
+ if (ret) {
+ dev_err(dsi2->dev, "Failed to initialize encoder with drm\n");
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &dw_mipi_dsi2_encoder_helper_funcs);
+
+ return 0;
+}
+
+static int dw_mipi_dsi2_rockchip_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ ret = rockchip_dsi2_drm_create_encoder(dsi2, drm_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to create drm encoder\n");
+
+ rockchip_drm_encoder_set_crtc_endpoint_id(&dsi2->encoder,
+ dev->of_node, 0, 0);
+
+ ret = dw_mipi_dsi2_bind(dsi2->dmd, &dsi2->encoder.encoder);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to bind\n");
+
+ return 0;
+}
+
+static void dw_mipi_dsi2_rockchip_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = dev_get_drvdata(dev);
+
+ dw_mipi_dsi2_unbind(dsi2->dmd);
+}
+
+static const struct component_ops dw_mipi_dsi2_rockchip_ops = {
+ .bind = dw_mipi_dsi2_rockchip_bind,
+ .unbind = dw_mipi_dsi2_rockchip_unbind,
+};
+
+static int dw_mipi_dsi2_rockchip_host_attach(void *priv_data,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = priv_data;
+ int ret;
+
+ ret = component_add(dsi2->dev, &dw_mipi_dsi2_rockchip_ops);
+ if (ret)
+ return dev_err_probe(dsi2->dev, ret, "Failed to register component\n");
+
+ return 0;
+}
+
+static int dw_mipi_dsi2_rockchip_host_detach(void *priv_data,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = priv_data;
+
+ component_del(dsi2->dev, &dw_mipi_dsi2_rockchip_ops);
+
+ return 0;
+}
+
+static const struct dw_mipi_dsi2_host_ops dw_mipi_dsi2_rockchip_host_ops = {
+ .attach = dw_mipi_dsi2_rockchip_host_attach,
+ .detach = dw_mipi_dsi2_rockchip_host_detach,
+};
+
+static const struct regmap_config dw_mipi_dsi2_rockchip_regmap_config = {
+ .name = "dsi2-host",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+};
+
+static int dw_mipi_dsi2_rockchip_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct rockchip_dw_dsi2_chip_data *cdata =
+ of_device_get_match_data(dev);
+ struct dw_mipi_dsi2_rockchip *dsi2;
+ struct resource *res;
+ void __iomem *base;
+ int i;
+
+ dsi2 = devm_kzalloc(dev, sizeof(*dsi2), GFP_KERNEL);
+ if (!dsi2)
+ return -ENOMEM;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base), "Unable to get dsi registers\n");
+
+ dsi2->regmap = devm_regmap_init_mmio(dev, base, &dw_mipi_dsi2_rockchip_regmap_config);
+ if (IS_ERR(dsi2->regmap))
+ return dev_err_probe(dev, PTR_ERR(dsi2->regmap), "failed to init register map\n");
+
+ i = 0;
+ while (cdata[i].reg) {
+ if (cdata[i].reg == res->start) {
+ dsi2->cdata = &cdata[i];
+ break;
+ }
+
+ i++;
+ }
+
+ if (!dsi2->cdata)
+ return dev_err_probe(dev, -EINVAL, "No dsi-config for %s node\n", np->name);
+
+ dsi2->grf_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+ if (IS_ERR(dsi2->grf_regmap))
+ return dev_err_probe(dsi2->dev, PTR_ERR(dsi2->grf_regmap), "Unable to get grf\n");
+
+ dsi2->phy = devm_phy_optional_get(dev, "dcphy");
+ if (IS_ERR(dsi2->phy))
+ return dev_err_probe(dev, PTR_ERR(dsi2->phy), "failed to get mipi phy\n");
+
+ dsi2->dev = dev;
+ dsi2->pdata.regmap = dsi2->regmap;
+ dsi2->pdata.max_data_lanes = 4;
+ dsi2->pdata.phy_ops = &dw_mipi_dsi2_rockchip_phy_ops;
+ dsi2->pdata.host_ops = &dw_mipi_dsi2_rockchip_host_ops;
+ dsi2->pdata.priv_data = dsi2;
+ platform_set_drvdata(pdev, dsi2);
+
+ dsi2->dmd = dw_mipi_dsi2_probe(pdev, &dsi2->pdata);
+ if (IS_ERR(dsi2->dmd))
+ return dev_err_probe(dev, PTR_ERR(dsi2->dmd), "Failed to probe dw_mipi_dsi2\n");
+
+ return 0;
+}
+
+static void dw_mipi_dsi2_rockchip_remove(struct platform_device *pdev)
+{
+ struct dw_mipi_dsi2_rockchip *dsi2 = platform_get_drvdata(pdev);
+
+ dw_mipi_dsi2_remove(dsi2->dmd);
+}
+
+static const struct dsigrf_reg rk3588_dsi0_grf_reg_fields[MAX_FIELDS] = {
+ [TXREQCLKHS_EN] = { 0x0000, 11, 11 },
+ [GATING_EN] = { 0x0000, 10, 10 },
+ [IPI_SHUTDN] = { 0x0000, 9, 9 },
+ [IPI_COLORM] = { 0x0000, 8, 8 },
+ [IPI_COLOR_DEPTH] = { 0x0000, 4, 7 },
+ [IPI_FORMAT] = { 0x0000, 0, 3 },
+};
+
+static const struct dsigrf_reg rk3588_dsi1_grf_reg_fields[MAX_FIELDS] = {
+ [TXREQCLKHS_EN] = { 0x0004, 11, 11 },
+ [GATING_EN] = { 0x0004, 10, 10 },
+ [IPI_SHUTDN] = { 0x0004, 9, 9 },
+ [IPI_COLORM] = { 0x0004, 8, 8 },
+ [IPI_COLOR_DEPTH] = { 0x0004, 4, 7 },
+ [IPI_FORMAT] = { 0x0004, 0, 3 },
+};
+
+static const struct rockchip_dw_dsi2_chip_data rk3588_chip_data[] = {
+ {
+ .reg = 0xfde20000,
+ .grf_regs = rk3588_dsi0_grf_reg_fields,
+ .max_bit_rate_per_lane = 4500000ULL,
+ },
+ {
+ .reg = 0xfde30000,
+ .grf_regs = rk3588_dsi1_grf_reg_fields,
+ .max_bit_rate_per_lane = 4500000ULL,
+ }
+};
+
+static const struct of_device_id dw_mipi_dsi2_rockchip_dt_ids[] = {
+ {
+ .compatible = "rockchip,rk3588-mipi-dsi2",
+ .data = &rk3588_chip_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dw_mipi_dsi2_rockchip_dt_ids);
+
+struct platform_driver dw_mipi_dsi2_rockchip_driver = {
+ .probe = dw_mipi_dsi2_rockchip_probe,
+ .remove = dw_mipi_dsi2_rockchip_remove,
+ .driver = {
+ .of_match_table = dw_mipi_dsi2_rockchip_dt_ids,
+ .name = "dw-mipi-dsi2",
+ },
+};
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 240552eb517f..e7a6669c46b0 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
+ * Copyright (c) 2014, Rockchip Electronics Co., Ltd.
*/
#include <linux/clk.h>
@@ -76,6 +76,7 @@ struct rockchip_hdmi {
struct rockchip_encoder encoder;
const struct rockchip_hdmi_chip_data *chip_data;
const struct dw_hdmi_plat_data *plat_data;
+ struct clk *hdmiphy_clk;
struct clk *ref_clk;
struct clk *grf_clk;
struct dw_hdmi *hdmi;
@@ -91,74 +92,70 @@ static struct rockchip_hdmi *to_rockchip_hdmi(struct drm_encoder *encoder)
static const struct dw_hdmi_mpll_config rockchip_mpll_cfg[] = {
{
- 27000000, {
- { 0x00b3, 0x0000},
- { 0x2153, 0x0000},
- { 0x40f3, 0x0000}
+ 30666000, {
+ { 0x00b3, 0x0000 },
+ { 0x2153, 0x0000 },
+ { 0x40f3, 0x0000 },
},
}, {
- 36000000, {
- { 0x00b3, 0x0000},
- { 0x2153, 0x0000},
- { 0x40f3, 0x0000}
+ 36800000, {
+ { 0x00b3, 0x0000 },
+ { 0x2153, 0x0000 },
+ { 0x40a2, 0x0001 },
},
}, {
- 40000000, {
- { 0x00b3, 0x0000},
- { 0x2153, 0x0000},
- { 0x40f3, 0x0000}
+ 46000000, {
+ { 0x00b3, 0x0000 },
+ { 0x2142, 0x0001 },
+ { 0x40a2, 0x0001 },
},
}, {
- 54000000, {
- { 0x0072, 0x0001},
- { 0x2142, 0x0001},
- { 0x40a2, 0x0001},
+ 61333000, {
+ { 0x0072, 0x0001 },
+ { 0x2142, 0x0001 },
+ { 0x40a2, 0x0001 },
},
}, {
- 65000000, {
- { 0x0072, 0x0001},
- { 0x2142, 0x0001},
- { 0x40a2, 0x0001},
+ 73600000, {
+ { 0x0072, 0x0001 },
+ { 0x2142, 0x0001 },
+ { 0x4061, 0x0002 },
},
}, {
- 66000000, {
- { 0x013e, 0x0003},
- { 0x217e, 0x0002},
- { 0x4061, 0x0002}
+ 92000000, {
+ { 0x0072, 0x0001 },
+ { 0x2145, 0x0002 },
+ { 0x4061, 0x0002 },
},
}, {
- 74250000, {
- { 0x0072, 0x0001},
- { 0x2145, 0x0002},
- { 0x4061, 0x0002}
+ 122666000, {
+ { 0x0051, 0x0002 },
+ { 0x2145, 0x0002 },
+ { 0x4061, 0x0002 },
},
}, {
- 83500000, {
- { 0x0072, 0x0001},
+ 147200000, {
+ { 0x0051, 0x0002 },
+ { 0x2145, 0x0002 },
+ { 0x4064, 0x0003 },
},
}, {
- 108000000, {
- { 0x0051, 0x0002},
- { 0x2145, 0x0002},
- { 0x4061, 0x0002}
+ 184000000, {
+ { 0x0051, 0x0002 },
+ { 0x214c, 0x0003 },
+ { 0x4064, 0x0003 },
},
}, {
- 106500000, {
- { 0x0051, 0x0002},
- { 0x2145, 0x0002},
- { 0x4061, 0x0002}
- },
- }, {
- 146250000, {
- { 0x0051, 0x0002},
- { 0x2145, 0x0002},
- { 0x4061, 0x0002}
+ 226666000, {
+ { 0x0040, 0x0003 },
+ { 0x214c, 0x0003 },
+ { 0x4064, 0x0003 },
},
}, {
- 148500000, {
- { 0x0051, 0x0003},
- { 0x214c, 0x0003},
- { 0x4064, 0x0003}
+ 272000000, {
+ { 0x0040, 0x0003 },
+ { 0x214c, 0x0003 },
+ { 0x5a64, 0x0003 },
},
}, {
340000000, {
@@ -167,10 +164,16 @@ static const struct dw_hdmi_mpll_config rockchip_mpll_cfg[] = {
{ 0x5a64, 0x0003 },
},
}, {
+ 600000000, {
+ { 0x1a40, 0x0003 },
+ { 0x3b4c, 0x0003 },
+ { 0x5a64, 0x0003 },
+ },
+ }, {
~0UL, {
- { 0x00a0, 0x000a },
- { 0x2001, 0x000f },
- { 0x4002, 0x000f },
+ { 0x0000, 0x0000 },
+ { 0x0000, 0x0000 },
+ { 0x0000, 0x0000 },
},
}
};
@@ -178,31 +181,18 @@ static const struct dw_hdmi_mpll_config rockchip_mpll_cfg[] = {
static const struct dw_hdmi_curr_ctrl rockchip_cur_ctr[] = {
/* pixelclk bpp8 bpp10 bpp12 */
{
- 40000000, { 0x0018, 0x0018, 0x0018 },
- }, {
- 65000000, { 0x0028, 0x0028, 0x0028 },
- }, {
- 66000000, { 0x0038, 0x0038, 0x0038 },
- }, {
- 74250000, { 0x0028, 0x0038, 0x0038 },
- }, {
- 83500000, { 0x0028, 0x0038, 0x0038 },
- }, {
- 146250000, { 0x0038, 0x0038, 0x0038 },
- }, {
- 148500000, { 0x0000, 0x0038, 0x0038 },
- }, {
600000000, { 0x0000, 0x0000, 0x0000 },
}, {
- ~0UL, { 0x0000, 0x0000, 0x0000},
+ ~0UL, { 0x0000, 0x0000, 0x0000 },
}
};
static const struct dw_hdmi_phy_config rockchip_phy_config[] = {
/*pixelclk symbol term vlev*/
{ 74250000, 0x8009, 0x0004, 0x0272},
- { 148500000, 0x802b, 0x0004, 0x028d},
+ { 165000000, 0x802b, 0x0004, 0x0209},
{ 297000000, 0x8039, 0x0005, 0x028d},
+ { 594000000, 0x8039, 0x0000, 0x019d},
{ ~0UL, 0x0000, 0x0000, 0x0000}
};
@@ -251,10 +241,7 @@ dw_hdmi_rockchip_mode_valid(struct dw_hdmi *dw_hdmi, void *data,
const struct drm_display_mode *mode)
{
struct rockchip_hdmi *hdmi = data;
- const struct dw_hdmi_mpll_config *mpll_cfg = rockchip_mpll_cfg;
int pclk = mode->clock * 1000;
- bool exact_match = hdmi->plat_data->phy_force_vendor;
- int i;
if (hdmi->chip_data->max_tmds_clock &&
mode->clock > hdmi->chip_data->max_tmds_clock)
@@ -263,26 +250,18 @@ dw_hdmi_rockchip_mode_valid(struct dw_hdmi *dw_hdmi, void *data,
if (hdmi->ref_clk) {
int rpclk = clk_round_rate(hdmi->ref_clk, pclk);
- if (abs(rpclk - pclk) > pclk / 1000)
+ if (rpclk < 0 || abs(rpclk - pclk) > pclk / 1000)
return MODE_NOCLOCK;
}
- for (i = 0; mpll_cfg[i].mpixelclock != (~0UL); i++) {
- /*
- * For vendor specific phys force an exact match of the pixelclock
- * to preserve the original behaviour of the driver.
- */
- if (exact_match && pclk == mpll_cfg[i].mpixelclock)
- return MODE_OK;
- /*
- * The Synopsys phy can work with pixelclocks up to the value given
- * in the corresponding mpll_cfg entry.
- */
- if (!exact_match && pclk <= mpll_cfg[i].mpixelclock)
- return MODE_OK;
+ if (hdmi->hdmiphy_clk) {
+ int rpclk = clk_round_rate(hdmi->hdmiphy_clk, pclk);
+
+ if (rpclk < 0 || abs(rpclk - pclk) > pclk / 1000)
+ return MODE_NOCLOCK;
}
- return MODE_BAD;
+ return MODE_OK;
}
static void dw_hdmi_rockchip_encoder_disable(struct drm_encoder *encoder)
@@ -502,7 +481,7 @@ static struct rockchip_hdmi_chip_data rk3399_chip_data = {
.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
.lcdsel_big = HIWORD_UPDATE(0, RK3399_HDMI_LCDC_SEL),
.lcdsel_lit = HIWORD_UPDATE(RK3399_HDMI_LCDC_SEL, RK3399_HDMI_LCDC_SEL),
- .max_tmds_clock = 340000,
+ .max_tmds_clock = 594000,
};
static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = {
@@ -516,7 +495,7 @@ static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = {
static struct rockchip_hdmi_chip_data rk3568_chip_data = {
.lcdsel_grf_reg = -1,
- .max_tmds_clock = 340000,
+ .max_tmds_clock = 594000,
};
static const struct dw_hdmi_plat_data rk3568_hdmi_drv_data = {
@@ -607,6 +586,15 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
return ret;
}
+ if (hdmi->phy) {
+ struct of_phandle_args clkspec;
+
+ clkspec.np = hdmi->phy->dev.of_node;
+ hdmi->hdmiphy_clk = of_clk_get_from_provider(&clkspec);
+ if (IS_ERR(hdmi->hdmiphy_clk))
+ hdmi->hdmiphy_clk = NULL;
+ }
+
if (hdmi->chip_data == &rk3568_chip_data) {
regmap_write(hdmi->regmap, RK3568_GRF_VO_CON1,
HIWORD_UPDATE(RK3568_HDMI_SDAIN_MSK |
@@ -678,7 +666,7 @@ static const struct dev_pm_ops dw_hdmi_rockchip_pm = {
struct platform_driver dw_hdmi_rockchip_pltfm_driver = {
.probe = dw_hdmi_rockchip_probe,
- .remove_new = dw_hdmi_rockchip_remove,
+ .remove = dw_hdmi_rockchip_remove,
.driver = {
.name = "dwhdmi-rockchip",
.pm = &dw_hdmi_rockchip_pm,
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
new file mode 100644
index 000000000000..e498767a0a66
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2021-2022 Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ *
+ * Author: Algea Cao <algea.cao@rock-chips.com>
+ * Author: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/workqueue.h>
+
+#include <drm/bridge/dw_hdmi_qp.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "rockchip_drm_drv.h"
+
+#define RK3588_GRF_SOC_CON2 0x0308
+#define RK3588_HDMI0_HPD_INT_MSK BIT(13)
+#define RK3588_HDMI0_HPD_INT_CLR BIT(12)
+#define RK3588_HDMI1_HPD_INT_MSK BIT(15)
+#define RK3588_HDMI1_HPD_INT_CLR BIT(14)
+#define RK3588_GRF_SOC_CON7 0x031c
+#define RK3588_SET_HPD_PATH_MASK GENMASK(13, 12)
+#define RK3588_GRF_SOC_STATUS1 0x0384
+#define RK3588_HDMI0_LEVEL_INT BIT(16)
+#define RK3588_HDMI1_LEVEL_INT BIT(24)
+#define RK3588_GRF_VO1_CON3 0x000c
+#define RK3588_GRF_VO1_CON6 0x0018
+#define RK3588_SCLIN_MASK BIT(9)
+#define RK3588_SDAIN_MASK BIT(10)
+#define RK3588_MODE_MASK BIT(11)
+#define RK3588_I2S_SEL_MASK BIT(13)
+#define RK3588_GRF_VO1_CON9 0x0024
+#define RK3588_HDMI0_GRANT_SEL BIT(10)
+#define RK3588_HDMI1_GRANT_SEL BIT(12)
+
+#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
+#define HOTPLUG_DEBOUNCE_MS 150
+#define MAX_HDMI_PORT_NUM 2
+
+struct rockchip_hdmi_qp {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap *vo_regmap;
+ struct rockchip_encoder encoder;
+ struct clk *ref_clk;
+ struct dw_hdmi_qp *hdmi;
+ struct phy *phy;
+ struct gpio_desc *enable_gpio;
+ struct delayed_work hpd_work;
+ int port_id;
+};
+
+static struct rockchip_hdmi_qp *to_rockchip_hdmi_qp(struct drm_encoder *encoder)
+{
+ struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
+
+ return container_of(rkencoder, struct rockchip_hdmi_qp, encoder);
+}
+
+static void dw_hdmi_qp_rockchip_encoder_enable(struct drm_encoder *encoder)
+{
+ struct rockchip_hdmi_qp *hdmi = to_rockchip_hdmi_qp(encoder);
+ struct drm_crtc *crtc = encoder->crtc;
+ unsigned long long rate;
+
+ /* Unconditionally switch to TMDS as FRL is not yet supported */
+ gpiod_set_value(hdmi->enable_gpio, 1);
+
+ if (crtc && crtc->state) {
+ rate = drm_hdmi_compute_mode_clock(&crtc->state->adjusted_mode,
+ 8, HDMI_COLORSPACE_RGB);
+ clk_set_rate(hdmi->ref_clk, rate);
+ /*
+ * FIXME: Temporary workaround to pass pixel clock rate
+ * to the PHY driver until phy_configure_opts_hdmi
+ * becomes available in the PHY API. See also the related
+ * comment in rk_hdptx_phy_power_on() from
+ * drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+ */
+ phy_set_bus_width(hdmi->phy, div_u64(rate, 100));
+ }
+}
+
+static int
+dw_hdmi_qp_rockchip_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+ s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
+ s->output_type = DRM_MODE_CONNECTOR_HDMIA;
+
+ return 0;
+}
+
+static const struct
+drm_encoder_helper_funcs dw_hdmi_qp_rockchip_encoder_helper_funcs = {
+ .enable = dw_hdmi_qp_rockchip_encoder_enable,
+ .atomic_check = dw_hdmi_qp_rockchip_encoder_atomic_check,
+};
+
+static int dw_hdmi_qp_rk3588_phy_init(struct dw_hdmi_qp *dw_hdmi, void *data)
+{
+ struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data;
+
+ return phy_power_on(hdmi->phy);
+}
+
+static void dw_hdmi_qp_rk3588_phy_disable(struct dw_hdmi_qp *dw_hdmi,
+ void *data)
+{
+ struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data;
+
+ phy_power_off(hdmi->phy);
+}
+
+static enum drm_connector_status
+dw_hdmi_qp_rk3588_read_hpd(struct dw_hdmi_qp *dw_hdmi, void *data)
+{
+ struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data;
+ u32 val;
+
+ regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &val);
+ val &= hdmi->port_id ? RK3588_HDMI1_LEVEL_INT : RK3588_HDMI0_LEVEL_INT;
+
+ return val ? connector_status_connected : connector_status_disconnected;
+}
+
+static void dw_hdmi_qp_rk3588_setup_hpd(struct dw_hdmi_qp *dw_hdmi, void *data)
+{
+ struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data;
+ u32 val;
+
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_CLR,
+ RK3588_HDMI1_HPD_INT_CLR | RK3588_HDMI1_HPD_INT_MSK);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
+ RK3588_HDMI0_HPD_INT_CLR | RK3588_HDMI0_HPD_INT_MSK);
+
+ regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
+}
+
+static const struct dw_hdmi_qp_phy_ops rk3588_hdmi_phy_ops = {
+ .init = dw_hdmi_qp_rk3588_phy_init,
+ .disable = dw_hdmi_qp_rk3588_phy_disable,
+ .read_hpd = dw_hdmi_qp_rk3588_read_hpd,
+ .setup_hpd = dw_hdmi_qp_rk3588_setup_hpd,
+};
+
+static void dw_hdmi_qp_rk3588_hpd_work(struct work_struct *work)
+{
+ struct rockchip_hdmi_qp *hdmi = container_of(work,
+ struct rockchip_hdmi_qp,
+ hpd_work.work);
+ struct drm_device *drm = hdmi->encoder.encoder.dev;
+ bool changed;
+
+ if (drm) {
+ changed = drm_helper_hpd_irq_event(drm);
+ if (changed)
+ drm_dbg(hdmi, "connector status changed\n");
+ }
+}
+
+static irqreturn_t dw_hdmi_qp_rk3588_hardirq(int irq, void *dev_id)
+{
+ struct rockchip_hdmi_qp *hdmi = dev_id;
+ u32 intr_stat, val;
+
+ regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &intr_stat);
+
+ if (intr_stat) {
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_MSK,
+ RK3588_HDMI1_HPD_INT_MSK);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK,
+ RK3588_HDMI0_HPD_INT_MSK);
+ regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t dw_hdmi_qp_rk3588_irq(int irq, void *dev_id)
+{
+ struct rockchip_hdmi_qp *hdmi = dev_id;
+ u32 intr_stat, val;
+
+ regmap_read(hdmi->regmap, RK3588_GRF_SOC_STATUS1, &intr_stat);
+ if (!intr_stat)
+ return IRQ_NONE;
+
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_CLR,
+ RK3588_HDMI1_HPD_INT_CLR);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
+ RK3588_HDMI0_HPD_INT_CLR);
+ regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
+
+ mod_delayed_work(system_wq, &hdmi->hpd_work,
+ msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
+
+ if (hdmi->port_id)
+ val |= HIWORD_UPDATE(0, RK3588_HDMI1_HPD_INT_MSK);
+ else
+ val |= HIWORD_UPDATE(0, RK3588_HDMI0_HPD_INT_MSK);
+ regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
+
+ return IRQ_HANDLED;
+}
+
+struct rockchip_hdmi_qp_cfg {
+ unsigned int num_ports;
+ unsigned int port_ids[MAX_HDMI_PORT_NUM];
+ const struct dw_hdmi_qp_phy_ops *phy_ops;
+};
+
+static const struct rockchip_hdmi_qp_cfg rk3588_hdmi_cfg = {
+ .num_ports = 2,
+ .port_ids = {
+ 0xfde80000,
+ 0xfdea0000,
+ },
+ .phy_ops = &rk3588_hdmi_phy_ops,
+};
+
+static const struct of_device_id dw_hdmi_qp_rockchip_dt_ids[] = {
+ { .compatible = "rockchip,rk3588-dw-hdmi-qp",
+ .data = &rk3588_hdmi_cfg },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_hdmi_qp_rockchip_dt_ids);
+
+static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct rockchip_hdmi_qp_cfg *cfg;
+ struct dw_hdmi_qp_plat_data plat_data;
+ struct drm_device *drm = data;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct rockchip_hdmi_qp *hdmi;
+ struct resource *res;
+ struct clk_bulk_data *clks;
+ int ret, irq, i;
+ u32 val;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ cfg = of_device_get_match_data(dev);
+ if (!cfg)
+ return -ENODEV;
+
+ hdmi->dev = &pdev->dev;
+ hdmi->port_id = -ENODEV;
+
+ /* Identify port ID by matching base IO address */
+ for (i = 0; i < cfg->num_ports; i++) {
+ if (res->start == cfg->port_ids[i]) {
+ hdmi->port_id = i;
+ break;
+ }
+ }
+ if (hdmi->port_id < 0) {
+ drm_err(hdmi, "Failed to match HDMI port ID\n");
+ return hdmi->port_id;
+ }
+
+ plat_data.phy_ops = cfg->phy_ops;
+ plat_data.phy_data = hdmi;
+
+ encoder = &hdmi->encoder.encoder;
+ encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
+
+ rockchip_drm_encoder_set_crtc_endpoint_id(&hdmi->encoder,
+ dev->of_node, 0, 0);
+ /*
+ * If we failed to find the CRTC(s) which this encoder is
+ * supposed to be connected to, it's because the CRTC has
+ * not been registered yet. Defer probing, and hope that
+ * the required CRTC is added later.
+ */
+ if (encoder->possible_crtcs == 0)
+ return -EPROBE_DEFER;
+
+ hdmi->regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,grf");
+ if (IS_ERR(hdmi->regmap)) {
+ drm_err(hdmi, "Unable to get rockchip,grf\n");
+ return PTR_ERR(hdmi->regmap);
+ }
+
+ hdmi->vo_regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,vo-grf");
+ if (IS_ERR(hdmi->vo_regmap)) {
+ drm_err(hdmi, "Unable to get rockchip,vo-grf\n");
+ return PTR_ERR(hdmi->vo_regmap);
+ }
+
+ ret = devm_clk_bulk_get_all_enabled(hdmi->dev, &clks);
+ if (ret < 0) {
+ drm_err(hdmi, "Failed to get clocks: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < ret; i++) {
+ if (!strcmp(clks[i].id, "ref")) {
+ hdmi->ref_clk = clks[1].clk;
+ break;
+ }
+ }
+ if (!hdmi->ref_clk) {
+ drm_err(hdmi, "Missing ref clock\n");
+ return -EINVAL;
+ }
+
+ hdmi->enable_gpio = devm_gpiod_get_optional(hdmi->dev, "enable",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(hdmi->enable_gpio)) {
+ ret = PTR_ERR(hdmi->enable_gpio);
+ drm_err(hdmi, "Failed to request enable GPIO: %d\n", ret);
+ return ret;
+ }
+
+ hdmi->phy = devm_of_phy_get_by_index(dev, dev->of_node, 0);
+ if (IS_ERR(hdmi->phy)) {
+ ret = PTR_ERR(hdmi->phy);
+ if (ret != -EPROBE_DEFER)
+ drm_err(hdmi, "failed to get phy: %d\n", ret);
+ return ret;
+ }
+
+ val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) |
+ HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
+ HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
+ HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
+ regmap_write(hdmi->vo_regmap,
+ hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
+ val);
+
+ val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK,
+ RK3588_SET_HPD_PATH_MASK);
+ regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
+
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL,
+ RK3588_HDMI1_GRANT_SEL);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL,
+ RK3588_HDMI0_GRANT_SEL);
+ regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val);
+
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_MSK, RK3588_HDMI1_HPD_INT_MSK);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, RK3588_HDMI0_HPD_INT_MSK);
+ regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
+
+ INIT_DELAYED_WORK(&hdmi->hpd_work, dw_hdmi_qp_rk3588_hpd_work);
+
+ plat_data.main_irq = platform_get_irq_byname(pdev, "main");
+ if (plat_data.main_irq < 0)
+ return plat_data.main_irq;
+
+ irq = platform_get_irq_byname(pdev, "hpd");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(hdmi->dev, irq,
+ dw_hdmi_qp_rk3588_hardirq,
+ dw_hdmi_qp_rk3588_irq,
+ IRQF_SHARED, "dw-hdmi-qp-hpd",
+ hdmi);
+ if (ret)
+ return ret;
+
+ drm_encoder_helper_add(encoder, &dw_hdmi_qp_rockchip_encoder_helper_funcs);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
+
+ platform_set_drvdata(pdev, hdmi);
+
+ hdmi->hdmi = dw_hdmi_qp_bind(pdev, encoder, &plat_data);
+ if (IS_ERR(hdmi->hdmi)) {
+ ret = PTR_ERR(hdmi->hdmi);
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+
+ connector = drm_bridge_connector_init(drm, encoder);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ drm_err(hdmi, "failed to init bridge connector: %d\n", ret);
+ return ret;
+ }
+
+ return drm_connector_attach_encoder(connector, encoder);
+}
+
+static void dw_hdmi_qp_rockchip_unbind(struct device *dev,
+ struct device *master,
+ void *data)
+{
+ struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&hdmi->hpd_work);
+
+ drm_encoder_cleanup(&hdmi->encoder.encoder);
+}
+
+static const struct component_ops dw_hdmi_qp_rockchip_ops = {
+ .bind = dw_hdmi_qp_rockchip_bind,
+ .unbind = dw_hdmi_qp_rockchip_unbind,
+};
+
+static int dw_hdmi_qp_rockchip_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &dw_hdmi_qp_rockchip_ops);
+}
+
+static void dw_hdmi_qp_rockchip_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dw_hdmi_qp_rockchip_ops);
+}
+
+static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev)
+{
+ struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev);
+ u32 val;
+
+ val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) |
+ HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
+ HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
+ HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
+ regmap_write(hdmi->vo_regmap,
+ hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
+ val);
+
+ val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK,
+ RK3588_SET_HPD_PATH_MASK);
+ regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
+
+ if (hdmi->port_id)
+ val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL,
+ RK3588_HDMI1_GRANT_SEL);
+ else
+ val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL,
+ RK3588_HDMI0_GRANT_SEL);
+ regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val);
+
+ dw_hdmi_qp_resume(dev, hdmi->hdmi);
+
+ if (hdmi->encoder.encoder.dev)
+ drm_helper_hpd_irq_event(hdmi->encoder.encoder.dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dw_hdmi_qp_rockchip_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(NULL, dw_hdmi_qp_rockchip_resume)
+};
+
+struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver = {
+ .probe = dw_hdmi_qp_rockchip_probe,
+ .remove = dw_hdmi_qp_rockchip_remove,
+ .driver = {
+ .name = "dwhdmiqp-rockchip",
+ .pm = &dw_hdmi_qp_rockchip_pm,
+ .of_match_table = dw_hdmi_qp_rockchip_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 42ef62aa0a1e..898d90155057 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Zheng Yang <zhengyang@rock-chips.com>
* Yakir Yang <ykk@rock-chips.com>
*/
@@ -1017,7 +1017,7 @@ MODULE_DEVICE_TABLE(of, inno_hdmi_dt_ids);
struct platform_driver inno_hdmi_driver = {
.probe = inno_hdmi_probe,
- .remove_new = inno_hdmi_remove,
+ .remove = inno_hdmi_remove,
.driver = {
.name = "innohdmi-rockchip",
.of_match_table = inno_hdmi_dt_ids,
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.h b/drivers/gpu/drm/rockchip/inno_hdmi.h
index a7edf3559e60..8b7ef3fac485 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.h
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Zheng Yang <zhengyang@rock-chips.com>
* Yakir Yang <ykk@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index 784de990da1b..403336397214 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Zheng Yang <zhengyang@rock-chips.com>
*/
@@ -858,7 +858,7 @@ MODULE_DEVICE_TABLE(of, rk3066_hdmi_dt_ids);
struct platform_driver rk3066_hdmi_driver = {
.probe = rk3066_hdmi_probe,
- .remove_new = rk3066_hdmi_remove,
+ .remove = rk3066_hdmi_remove,
.driver = {
.name = "rockchip-rk3066-hdmi",
.of_match_table = rk3066_hdmi_dt_ids,
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.h b/drivers/gpu/drm/rockchip/rk3066_hdmi.h
index 39a31c62a428..c3598ba7428c 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.h
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Zheng Yang <zhengyang@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 11e5d10de4d7..439edc165ff6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*
* based on exynos_drm_drv.c
*/
+#include <linux/aperture.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -16,7 +17,7 @@
#include <linux/console.h>
#include <linux/iommu.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
@@ -38,7 +39,6 @@
#define DRIVER_NAME "rockchip"
#define DRIVER_DESC "RockChip Soc DRM"
-#define DRIVER_DATE "20140818"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -145,7 +145,7 @@ static int rockchip_drm_bind(struct device *dev)
int ret;
/* Remove existing drivers that may own the framebuffer memory. */
- ret = drm_aperture_remove_framebuffers(&rockchip_drm_driver);
+ ret = aperture_remove_all_conflicting_devices(rockchip_drm_driver.name);
if (ret) {
DRM_DEV_ERROR(dev,
"Failed to remove existing framebuffers - %d.\n",
@@ -195,7 +195,7 @@ static int rockchip_drm_bind(struct device *dev)
if (ret)
goto err_kms_helper_poll_fini;
- drm_fbdev_dma_setup(drm_dev, 0);
+ drm_client_setup(drm_dev, NULL);
return 0;
err_kms_helper_poll_fini:
@@ -230,10 +230,10 @@ static const struct drm_driver rockchip_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.dumb_create = rockchip_gem_dumb_create,
.gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &rockchip_drm_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
@@ -358,11 +358,34 @@ static void rockchip_drm_match_remove(struct device *dev)
device_link_del(link);
}
+/* list of preferred vop devices */
+static const char *const rockchip_drm_match_preferred[] = {
+ "rockchip,rk3399-vop-big",
+ NULL,
+};
+
static struct component_match *rockchip_drm_match_add(struct device *dev)
{
struct component_match *match = NULL;
+ struct device_node *port;
int i;
+ /* add preferred vop device match before adding driver device matches */
+ for (i = 0; ; i++) {
+ port = of_parse_phandle(dev->of_node, "ports", i);
+ if (!port)
+ break;
+
+ if (of_device_is_available(port->parent) &&
+ of_device_compatible_match(port->parent,
+ rockchip_drm_match_preferred))
+ drm_of_component_match_add(dev, &match,
+ component_compare_of,
+ port->parent);
+
+ of_node_put(port);
+ }
+
for (i = 0; i < num_rockchip_sub_drivers; i++) {
struct platform_driver *drv = rockchip_sub_drivers[i];
struct device *p = NULL, *d;
@@ -463,8 +486,7 @@ static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
- if (drm)
- drm_atomic_helper_shutdown(drm);
+ drm_atomic_helper_shutdown(drm);
}
static const struct of_device_id rockchip_drm_dt_ids[] = {
@@ -475,7 +497,7 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe,
- .remove_new = rockchip_drm_platform_remove,
+ .remove = rockchip_drm_platform_remove,
.shutdown = rockchip_drm_platform_shutdown,
.driver = {
.name = "rockchip-drm",
@@ -507,8 +529,12 @@ static int __init rockchip_drm_init(void)
ADD_ROCKCHIP_SUB_DRIVER(cdn_dp_driver, CONFIG_ROCKCHIP_CDN_DP);
ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_rockchip_pltfm_driver,
CONFIG_ROCKCHIP_DW_HDMI);
+ ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_qp_rockchip_pltfm_driver,
+ CONFIG_ROCKCHIP_DW_HDMI_QP);
ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_rockchip_driver,
CONFIG_ROCKCHIP_DW_MIPI_DSI);
+ ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi2_rockchip_driver,
+ CONFIG_ROCKCHIP_DW_MIPI_DSI2);
ADD_ROCKCHIP_SUB_DRIVER(inno_hdmi_driver, CONFIG_ROCKCHIP_INNO_HDMI);
ADD_ROCKCHIP_SUB_DRIVER(rk3066_hdmi_driver,
CONFIG_ROCKCHIP_RK3066_HDMI);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 8d566fcd80a2..c183e82a42a5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*
* based on exynos_drm_drv.h
@@ -88,7 +88,9 @@ int rockchip_drm_encoder_set_crtc_endpoint_id(struct rockchip_encoder *rencoder,
int rockchip_drm_endpoint_is_subdriver(struct device_node *ep);
extern struct platform_driver cdn_dp_driver;
extern struct platform_driver dw_hdmi_rockchip_pltfm_driver;
+extern struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver;
extern struct platform_driver dw_mipi_dsi_rockchip_driver;
+extern struct platform_driver dw_mipi_dsi2_rockchip_driver;
extern struct platform_driver inno_hdmi_driver;
extern struct platform_driver rockchip_dp_driver;
extern struct platform_driver rockchip_lvds_driver;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index cfe8b793d344..dcc1f07632c3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
index bae4e079dfb1..5179026b12d6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 93ed841f5dce..6330b883efc3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
index 72f59ac6d258..cdeae36b91a1 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index f161f40d8ce4..57747f1cff26 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
@@ -1093,10 +1093,10 @@ static int vop_plane_atomic_async_check(struct drm_plane *plane,
if (!plane->state->fb)
return -EINVAL;
- if (state)
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- new_plane_state->crtc);
- else /* Special case for asynchronous cursor updates. */
+ crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc);
+
+ /* Special case for asynchronous cursor updates. */
+ if (!crtc_state)
crtc_state = plane->crtc->state;
return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 0cf512cc1614..f04c9731ae7b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 9873172e3fd3..17a98845fd31 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -24,16 +24,17 @@
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
+#include <linux/debugfs.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <uapi/linux/videodev2.h>
#include <dt-bindings/soc/rockchip,vop2.h>
-#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
#include "rockchip_drm_vop2.h"
#include "rockchip_rgb.h"
@@ -186,6 +187,7 @@ struct vop2 {
*/
u32 registered_num_wins;
+ struct resource *res;
void __iomem *regs;
struct regmap *map;
@@ -237,6 +239,37 @@ struct vop2 {
#define vop2_output_if_is_dpi(x) ((x) == ROCKCHIP_VOP2_EP_RGB0)
+/*
+ * bus-format types.
+ */
+struct drm_bus_format_enum_list {
+ int type;
+ const char *name;
+};
+
+static const struct drm_bus_format_enum_list drm_bus_format_enum_list[] = {
+ { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
+ { MEDIA_BUS_FMT_RGB565_1X16, "RGB565_1X16" },
+ { MEDIA_BUS_FMT_RGB666_1X18, "RGB666_1X18" },
+ { MEDIA_BUS_FMT_RGB666_1X24_CPADHI, "RGB666_1X24_CPADHI" },
+ { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, "RGB666_1X7X3_SPWG" },
+ { MEDIA_BUS_FMT_YUV8_1X24, "YUV8_1X24" },
+ { MEDIA_BUS_FMT_UYYVYY8_0_5X24, "UYYVYY8_0_5X24" },
+ { MEDIA_BUS_FMT_YUV10_1X30, "YUV10_1X30" },
+ { MEDIA_BUS_FMT_UYYVYY10_0_5X30, "UYYVYY10_0_5X30" },
+ { MEDIA_BUS_FMT_RGB888_3X8, "RGB888_3X8" },
+ { MEDIA_BUS_FMT_RGB888_1X24, "RGB888_1X24" },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, "RGB888_1X7X4_SPWG" },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, "RGB888_1X7X4_JEIDA" },
+ { MEDIA_BUS_FMT_UYVY8_2X8, "UYVY8_2X8" },
+ { MEDIA_BUS_FMT_YUYV8_1X16, "YUYV8_1X16" },
+ { MEDIA_BUS_FMT_UYVY8_1X16, "UYVY8_1X16" },
+ { MEDIA_BUS_FMT_RGB101010_1X30, "RGB101010_1X30" },
+ { MEDIA_BUS_FMT_YUYV10_1X20, "YUYV10_1X20" },
+};
+
+static DRM_ENUM_NAME_FN(drm_get_bus_format_name, drm_bus_format_enum_list)
+
static const struct regmap_config vop2_regmap_config;
static struct vop2_video_port *to_vop2_video_port(struct drm_crtc *crtc)
@@ -278,6 +311,15 @@ static u32 vop2_readl(struct vop2 *vop2, u32 offset)
return val;
}
+static u32 vop2_vp_read(struct vop2_video_port *vp, u32 offset)
+{
+ u32 val;
+
+ regmap_read(vp->vop2->map, vp->data->offset + offset, &val);
+
+ return val;
+}
+
static void vop2_win_write(const struct vop2_win *win, unsigned int reg, u32 v)
{
regmap_field_write(win->reg[reg], v);
@@ -550,6 +592,25 @@ static bool rockchip_vop2_mod_supported(struct drm_plane *plane, u32 format,
if (modifier == DRM_FORMAT_MOD_INVALID)
return false;
+ if (vop2->data->soc_id == 3568 || vop2->data->soc_id == 3566) {
+ if (vop2_cluster_window(win)) {
+ if (modifier == DRM_FORMAT_MOD_LINEAR) {
+ drm_dbg_kms(vop2->drm,
+ "Cluster window only supports format with afbc\n");
+ return false;
+ }
+ }
+ }
+
+ if (format == DRM_FORMAT_XRGB2101010 || format == DRM_FORMAT_XBGR2101010) {
+ if (vop2->data->soc_id == 3588) {
+ if (!rockchip_afbc(plane, modifier)) {
+ drm_dbg_kms(vop2->drm, "Only support 32 bpp format with afbc\n");
+ return false;
+ }
+ }
+ }
+
if (modifier == DRM_FORMAT_MOD_LINEAR)
return true;
@@ -998,6 +1059,67 @@ static void vop2_disable(struct vop2 *vop2)
clk_disable_unprepare(vop2->hclk);
}
+static bool vop2_vp_dsp_lut_is_enabled(struct vop2_video_port *vp)
+{
+ u32 dsp_ctrl = vop2_vp_read(vp, RK3568_VP_DSP_CTRL);
+
+ return dsp_ctrl & RK3568_VP_DSP_CTRL__DSP_LUT_EN;
+}
+
+static void vop2_vp_dsp_lut_disable(struct vop2_video_port *vp)
+{
+ u32 dsp_ctrl = vop2_vp_read(vp, RK3568_VP_DSP_CTRL);
+
+ dsp_ctrl &= ~RK3568_VP_DSP_CTRL__DSP_LUT_EN;
+ vop2_vp_write(vp, RK3568_VP_DSP_CTRL, dsp_ctrl);
+}
+
+static bool vop2_vp_dsp_lut_poll_disabled(struct vop2_video_port *vp)
+{
+ u32 dsp_ctrl;
+ int ret = readx_poll_timeout(vop2_vp_dsp_lut_is_enabled, vp, dsp_ctrl,
+ !dsp_ctrl, 5, 30 * 1000);
+ if (ret) {
+ drm_err(vp->vop2->drm, "display LUT RAM enable timeout!\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void vop2_vp_dsp_lut_enable(struct vop2_video_port *vp)
+{
+ u32 dsp_ctrl = vop2_vp_read(vp, RK3568_VP_DSP_CTRL);
+
+ dsp_ctrl |= RK3568_VP_DSP_CTRL__DSP_LUT_EN;
+ vop2_vp_write(vp, RK3568_VP_DSP_CTRL, dsp_ctrl);
+}
+
+static void vop2_vp_dsp_lut_update_enable(struct vop2_video_port *vp)
+{
+ u32 dsp_ctrl = vop2_vp_read(vp, RK3568_VP_DSP_CTRL);
+
+ dsp_ctrl |= RK3588_VP_DSP_CTRL__GAMMA_UPDATE_EN;
+ vop2_vp_write(vp, RK3568_VP_DSP_CTRL, dsp_ctrl);
+}
+
+static inline bool vop2_supports_seamless_gamma_lut_update(struct vop2 *vop2)
+{
+ return (vop2->data->soc_id != 3566 && vop2->data->soc_id != 3568);
+}
+
+static bool vop2_gamma_lut_in_use(struct vop2 *vop2, struct vop2_video_port *vp)
+{
+ const int nr_vps = vop2->data->nr_vps;
+ int gamma_en_vp_id;
+
+ for (gamma_en_vp_id = 0; gamma_en_vp_id < nr_vps; gamma_en_vp_id++)
+ if (vop2_vp_dsp_lut_is_enabled(&vop2->vps[gamma_en_vp_id]))
+ break;
+
+ return gamma_en_vp_id != nr_vps && gamma_en_vp_id != vp->id;
+}
+
static void vop2_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -1271,8 +1393,9 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
dsp_w = drm_rect_width(dest);
if (dest->x1 + dsp_w > adjusted_mode->hdisplay) {
- drm_err(vop2->drm, "vp%d %s dest->x1[%d] + dsp_w[%d] exceed mode hdisplay[%d]\n",
- vp->id, win->data->name, dest->x1, dsp_w, adjusted_mode->hdisplay);
+ drm_dbg_kms(vop2->drm,
+ "vp%d %s dest->x1[%d] + dsp_w[%d] exceed mode hdisplay[%d]\n",
+ vp->id, win->data->name, dest->x1, dsp_w, adjusted_mode->hdisplay);
dsp_w = adjusted_mode->hdisplay - dest->x1;
if (dsp_w < 4)
dsp_w = 4;
@@ -1282,8 +1405,9 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
dsp_h = drm_rect_height(dest);
if (dest->y1 + dsp_h > adjusted_mode->vdisplay) {
- drm_err(vop2->drm, "vp%d %s dest->y1[%d] + dsp_h[%d] exceed mode vdisplay[%d]\n",
- vp->id, win->data->name, dest->y1, dsp_h, adjusted_mode->vdisplay);
+ drm_dbg_kms(vop2->drm,
+ "vp%d %s dest->y1[%d] + dsp_h[%d] exceed mode vdisplay[%d]\n",
+ vp->id, win->data->name, dest->y1, dsp_h, adjusted_mode->vdisplay);
dsp_h = adjusted_mode->vdisplay - dest->y1;
if (dsp_h < 4)
dsp_h = 4;
@@ -1296,15 +1420,15 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
*/
if (!(win->data->feature & WIN_FEATURE_AFBDC)) {
if (actual_w > dsp_w && (actual_w & 0xf) == 1) {
- drm_err(vop2->drm, "vp%d %s act_w[%d] MODE 16 == 1\n",
- vp->id, win->data->name, actual_w);
+ drm_dbg_kms(vop2->drm, "vp%d %s act_w[%d] MODE 16 == 1\n",
+ vp->id, win->data->name, actual_w);
actual_w -= 1;
}
}
if (afbc_en && actual_w % 4) {
- drm_err(vop2->drm, "vp%d %s actual_w[%d] not 4 pixel aligned\n",
- vp->id, win->data->name, actual_w);
+ drm_dbg_kms(vop2->drm, "vp%d %s actual_w[%d] not 4 pixel aligned\n",
+ vp->id, win->data->name, actual_w);
actual_w = ALIGN_DOWN(actual_w, 4);
}
@@ -1320,20 +1444,28 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
&fb->format->format,
afbc_en ? "AFBC" : "", &yrgb_mst);
+ if (vop2->data->soc_id > 3568) {
+ vop2_win_write(win, VOP2_WIN_AXI_BUS_ID, win->data->axi_bus_id);
+ vop2_win_write(win, VOP2_WIN_AXI_YRGB_R_ID, win->data->axi_yrgb_r_id);
+ vop2_win_write(win, VOP2_WIN_AXI_UV_R_ID, win->data->axi_uv_r_id);
+ }
+
if (vop2_cluster_window(win))
vop2_win_write(win, VOP2_WIN_AFBC_HALF_BLOCK_EN, half_block_en);
if (afbc_en) {
- u32 stride;
+ u32 stride, block_w;
+
+ /* the afbc superblock is 16 x 16 or 32 x 8 */
+ block_w = fb->modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 ? 32 : 16;
- /* the afbc superblock is 16 x 16 */
afbc_format = vop2_convert_afbc_format(fb->format->format);
/* Enable color transform for YTR */
if (fb->modifier & AFBC_FORMAT_MOD_YTR)
afbc_format |= (1 << 4);
- afbc_tile_num = ALIGN(actual_w, 16) >> 4;
+ afbc_tile_num = ALIGN(actual_w, block_w) / block_w;
/*
* AFBC pic_vir_width is count by pixel, this is different
@@ -1341,8 +1473,11 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
*/
stride = (fb->pitches[0] << 3) / bpp;
if ((stride & 0x3f) && (xmirror || rotate_90 || rotate_270))
- drm_err(vop2->drm, "vp%d %s stride[%d] not 64 pixel aligned\n",
- vp->id, win->data->name, stride);
+ drm_dbg_kms(vop2->drm, "vp%d %s stride[%d] not 64 pixel aligned\n",
+ vp->id, win->data->name, stride);
+
+ /* It's for head stride, each head size is 16 byte */
+ stride = ALIGN(stride, block_w) / block_w * 16;
uv_swap = vop2_afbc_uv_swap(fb->format->format);
/*
@@ -1374,7 +1509,11 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
else
vop2_win_write(win, VOP2_WIN_AFBC_AUTO_GATING_EN, 1);
- vop2_win_write(win, VOP2_WIN_AFBC_BLOCK_SPLIT_EN, 0);
+ if (fb->modifier & AFBC_FORMAT_MOD_SPLIT)
+ vop2_win_write(win, VOP2_WIN_AFBC_BLOCK_SPLIT_EN, 1);
+ else
+ vop2_win_write(win, VOP2_WIN_AFBC_BLOCK_SPLIT_EN, 0);
+
transform_offset = vop2_afbc_transform_offset(pstate, half_block_en);
vop2_win_write(win, VOP2_WIN_AFBC_HDR_PTR, yrgb_mst);
vop2_win_write(win, VOP2_WIN_AFBC_PIC_SIZE, act_info);
@@ -1482,6 +1621,77 @@ static bool vop2_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
+static void vop2_crtc_write_gamma_lut(struct vop2 *vop2, struct drm_crtc *crtc)
+{
+ const struct vop2_video_port *vp = to_vop2_video_port(crtc);
+ const struct vop2_video_port_data *vp_data = &vop2->data->vp[vp->id];
+ struct drm_color_lut *lut = crtc->state->gamma_lut->data;
+ unsigned int i, bpc = ilog2(vp_data->gamma_lut_len);
+ u32 word;
+
+ for (i = 0; i < crtc->gamma_size; i++) {
+ word = (drm_color_lut_extract(lut[i].blue, bpc) << (2 * bpc)) |
+ (drm_color_lut_extract(lut[i].green, bpc) << bpc) |
+ drm_color_lut_extract(lut[i].red, bpc);
+
+ writel(word, vop2->lut_regs + i * 4);
+ }
+}
+
+static void vop2_crtc_atomic_set_gamma_seamless(struct vop2 *vop2,
+ struct vop2_video_port *vp,
+ struct drm_crtc *crtc)
+{
+ vop2_writel(vop2, RK3568_LUT_PORT_SEL,
+ FIELD_PREP(RK3588_LUT_PORT_SEL__GAMMA_AHB_WRITE_SEL, vp->id));
+ vop2_vp_dsp_lut_enable(vp);
+ vop2_crtc_write_gamma_lut(vop2, crtc);
+ vop2_vp_dsp_lut_update_enable(vp);
+}
+
+static void vop2_crtc_atomic_set_gamma_rk356x(struct vop2 *vop2,
+ struct vop2_video_port *vp,
+ struct drm_crtc *crtc)
+{
+ vop2_vp_dsp_lut_disable(vp);
+ vop2_cfg_done(vp);
+ if (!vop2_vp_dsp_lut_poll_disabled(vp))
+ return;
+
+ vop2_writel(vop2, RK3568_LUT_PORT_SEL, vp->id);
+ vop2_crtc_write_gamma_lut(vop2, crtc);
+ vop2_vp_dsp_lut_enable(vp);
+}
+
+static void vop2_crtc_atomic_try_set_gamma(struct vop2 *vop2,
+ struct vop2_video_port *vp,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ if (!vop2->lut_regs)
+ return;
+
+ if (!crtc_state->gamma_lut) {
+ vop2_vp_dsp_lut_disable(vp);
+ return;
+ }
+
+ if (vop2_supports_seamless_gamma_lut_update(vop2))
+ vop2_crtc_atomic_set_gamma_seamless(vop2, vp, crtc);
+ else
+ vop2_crtc_atomic_set_gamma_rk356x(vop2, vp, crtc);
+}
+
+static inline void vop2_crtc_atomic_try_set_gamma_locked(struct vop2 *vop2,
+ struct vop2_video_port *vp,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ vop2_lock(vop2);
+ vop2_crtc_atomic_try_set_gamma(vop2, vp, crtc, crtc_state);
+ vop2_unlock(vop2);
+}
+
static void vop2_dither_setup(struct drm_crtc *crtc, u32 *dsp_ctrl)
{
struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(crtc->state);
@@ -1721,9 +1931,9 @@ static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
else
dclk_out_rate = v_pixclk >> 2;
- dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000);
+ dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000);
if (!dclk_rate) {
- drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld KHZ\n",
+ drm_err(vop2->drm, "DP dclk_out_rate out of range, dclk_out_rate: %ld Hz\n",
dclk_out_rate);
return 0;
}
@@ -1738,9 +1948,9 @@ static unsigned long rk3588_calc_cru_cfg(struct vop2_video_port *vp, int id,
* dclk_rate = N * dclk_core_rate N = (1,2,4 ),
* we get a little factor here
*/
- dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000);
+ dclk_rate = rk3588_calc_dclk(dclk_out_rate, 600000000);
if (!dclk_rate) {
- drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld KHZ\n",
+ drm_err(vop2->drm, "MIPI dclk out of range, dclk_out_rate: %ld Hz\n",
dclk_out_rate);
return 0;
}
@@ -2057,11 +2267,40 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
vop2_vp_write(vp, RK3568_VP_DSP_CTRL, dsp_ctrl);
+ vop2_crtc_atomic_try_set_gamma(vop2, vp, crtc, crtc_state);
+
drm_crtc_vblank_on(crtc);
vop2_unlock(vop2);
}
+static int vop2_crtc_atomic_check_gamma(struct vop2_video_port *vp,
+ struct drm_crtc *crtc,
+ struct drm_atomic_state *state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct vop2 *vop2 = vp->vop2;
+ unsigned int len;
+
+ if (!vp->vop2->lut_regs || !crtc_state->color_mgmt_changed ||
+ !crtc_state->gamma_lut)
+ return 0;
+
+ len = drm_color_lut_size(crtc_state->gamma_lut);
+ if (len != crtc->gamma_size) {
+ drm_dbg(vop2->drm, "Invalid LUT size; got %d, expected %d\n",
+ len, crtc->gamma_size);
+ return -EINVAL;
+ }
+
+ if (!vop2_supports_seamless_gamma_lut_update(vop2) && vop2_gamma_lut_in_use(vop2, vp)) {
+ drm_info(vop2->drm, "Gamma LUT can be enabled for only one CRTC at a time\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int vop2_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -2069,6 +2308,11 @@ static int vop2_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_plane *plane;
int nplanes = 0;
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ int ret;
+
+ ret = vop2_crtc_atomic_check_gamma(vp, crtc, state, crtc_state);
+ if (ret)
+ return ret;
drm_atomic_crtc_state_for_each_plane(plane, crtc_state)
nplanes++;
@@ -2159,7 +2403,6 @@ static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id)
static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_win)
{
- u32 offset = (main_win->data->phys_id * 0x10);
struct vop2_alpha_config alpha_config;
struct vop2_alpha alpha;
struct drm_plane_state *bottom_win_pstate;
@@ -2167,6 +2410,7 @@ static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_wi
u16 src_glb_alpha_val, dst_glb_alpha_val;
bool premulti_en = false;
bool swap = false;
+ u32 offset = 0;
/* At one win mode, win0 is dst/bottom win, and win1 is a all zero src/top win */
bottom_win_pstate = main_win->base.state;
@@ -2185,6 +2429,22 @@ static void vop2_setup_cluster_alpha(struct vop2 *vop2, struct vop2_win *main_wi
vop2_parse_alpha(&alpha_config, &alpha);
alpha.src_color_ctrl.bits.src_dst_swap = swap;
+
+ switch (main_win->data->phys_id) {
+ case ROCKCHIP_VOP2_CLUSTER0:
+ offset = 0x0;
+ break;
+ case ROCKCHIP_VOP2_CLUSTER1:
+ offset = 0x10;
+ break;
+ case ROCKCHIP_VOP2_CLUSTER2:
+ offset = 0x20;
+ break;
+ case ROCKCHIP_VOP2_CLUSTER3:
+ offset = 0x30;
+ break;
+ }
+
vop2_writel(vop2, RK3568_CLUSTER0_MIX_SRC_COLOR_CTRL + offset,
alpha.src_color_ctrl.val);
vop2_writel(vop2, RK3568_CLUSTER0_MIX_DST_COLOR_CTRL + offset,
@@ -2232,6 +2492,12 @@ static void vop2_setup_alpha(struct vop2_video_port *vp)
struct vop2_win *win = to_vop2_win(plane);
int zpos = plane->state->normalized_zpos;
+ /*
+ * Need to configure alpha from second layer.
+ */
+ if (zpos == 0)
+ continue;
+
if (plane->state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
premulti_en = 1;
else
@@ -2308,7 +2574,10 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
struct drm_plane *plane;
u32 layer_sel = 0;
u32 port_sel;
- unsigned int nlayer, ofs;
+ u8 layer_id;
+ u8 old_layer_id;
+ u8 layer_sel_id;
+ unsigned int ofs;
u32 ovl_ctrl;
int i;
struct vop2_video_port *vp0 = &vop2->vps[0];
@@ -2352,9 +2621,30 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
for (i = 0; i < vp->id; i++)
ofs += vop2->vps[i].nlayers;
- nlayer = 0;
drm_atomic_crtc_for_each_plane(plane, &vp->crtc) {
struct vop2_win *win = to_vop2_win(plane);
+ struct vop2_win *old_win;
+
+ layer_id = (u8)(plane->state->normalized_zpos + ofs);
+
+ /*
+ * Find the layer this win bind in old state.
+ */
+ for (old_layer_id = 0; old_layer_id < vop2->data->win_size; old_layer_id++) {
+ layer_sel_id = (layer_sel >> (4 * old_layer_id)) & 0xf;
+ if (layer_sel_id == win->data->layer_sel_id)
+ break;
+ }
+
+ /*
+ * Find the win bind to this layer in old state
+ */
+ for (i = 0; i < vop2->data->win_size; i++) {
+ old_win = &vop2->win[i];
+ layer_sel_id = (layer_sel >> (4 * layer_id)) & 0xf;
+ if (layer_sel_id == old_win->data->layer_sel_id)
+ break;
+ }
switch (win->data->phys_id) {
case ROCKCHIP_VOP2_CLUSTER0:
@@ -2399,17 +2689,14 @@ static void vop2_setup_layer_mixer(struct vop2_video_port *vp)
break;
}
- layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos + ofs,
- 0x7);
- layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(plane->state->normalized_zpos + ofs,
- win->data->layer_sel_id);
- nlayer++;
- }
-
- /* configure unused layers to 0x5 (reserved) */
- for (; nlayer < vp->nlayers; nlayer++) {
- layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(nlayer + ofs, 0x7);
- layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(nlayer + ofs, 5);
+ layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(layer_id, 0x7);
+ layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(layer_id, win->data->layer_sel_id);
+ /*
+ * When we bind a window from layerM to layerN, we also need to move the old
+ * window on layerN to layerM to avoid one window selected by two or more layers.
+ */
+ layer_sel &= ~RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, 0x7);
+ layer_sel |= RK3568_OVL_LAYER_SEL__LAYER(old_layer_id, old_win->data->layer_sel_id);
}
vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
@@ -2444,9 +2731,11 @@ static void vop2_setup_dly_for_windows(struct vop2 *vop2)
sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__ESMART1, dly);
break;
case ROCKCHIP_VOP2_SMART0:
+ case ROCKCHIP_VOP2_ESMART2:
sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART0, dly);
break;
case ROCKCHIP_VOP2_SMART1:
+ case ROCKCHIP_VOP2_ESMART3:
sdly |= FIELD_PREP(RK3568_SMART_DLY_NUM__SMART1, dly);
break;
}
@@ -2487,7 +2776,13 @@ static void vop2_crtc_atomic_begin(struct drm_crtc *crtc,
static void vop2_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct vop2_video_port *vp = to_vop2_video_port(crtc);
+ struct vop2 *vop2 = vp->vop2;
+
+ /* In case of modeset, gamma lut update already happened in atomic enable */
+ if (!drm_atomic_crtc_needs_modeset(crtc_state) && crtc_state->color_mgmt_changed)
+ vop2_crtc_atomic_try_set_gamma_locked(vop2, vp, crtc, crtc_state);
vop2_post_config(crtc);
@@ -2513,6 +2808,228 @@ static const struct drm_crtc_helper_funcs vop2_crtc_helper_funcs = {
.atomic_disable = vop2_crtc_atomic_disable,
};
+static void vop2_dump_connector_on_crtc(struct drm_crtc *crtc, struct seq_file *s)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+
+ drm_connector_list_iter_begin(crtc->dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (crtc->state->connector_mask & drm_connector_mask(connector))
+ seq_printf(s, " Connector: %s\n", connector->name);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
+static int vop2_plane_state_dump(struct seq_file *s, struct drm_plane *plane)
+{
+ struct vop2_win *win = to_vop2_win(plane);
+ struct drm_plane_state *pstate = plane->state;
+ struct drm_rect *src, *dst;
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *obj;
+ struct rockchip_gem_object *rk_obj;
+ bool xmirror;
+ bool ymirror;
+ bool rotate_270;
+ bool rotate_90;
+ dma_addr_t fb_addr;
+ int i;
+
+ seq_printf(s, " %s: %s\n", win->data->name, !pstate ?
+ "DISABLED" : pstate->crtc ? "ACTIVE" : "DISABLED");
+
+ if (!pstate || !pstate->fb)
+ return 0;
+
+ fb = pstate->fb;
+ src = &pstate->src;
+ dst = &pstate->dst;
+ xmirror = pstate->rotation & DRM_MODE_REFLECT_X ? true : false;
+ ymirror = pstate->rotation & DRM_MODE_REFLECT_Y ? true : false;
+ rotate_270 = pstate->rotation & DRM_MODE_ROTATE_270;
+ rotate_90 = pstate->rotation & DRM_MODE_ROTATE_90;
+
+ seq_printf(s, "\twin_id: %d\n", win->win_id);
+
+ seq_printf(s, "\tformat: %p4cc%s glb_alpha[0x%x]\n",
+ &fb->format->format,
+ drm_is_afbc(fb->modifier) ? "[AFBC]" : "",
+ pstate->alpha >> 8);
+ seq_printf(s, "\trotate: xmirror: %d ymirror: %d rotate_90: %d rotate_270: %d\n",
+ xmirror, ymirror, rotate_90, rotate_270);
+ seq_printf(s, "\tzpos: %d\n", pstate->normalized_zpos);
+ seq_printf(s, "\tsrc: pos[%d, %d] rect[%d x %d]\n", src->x1 >> 16,
+ src->y1 >> 16, drm_rect_width(src) >> 16,
+ drm_rect_height(src) >> 16);
+ seq_printf(s, "\tdst: pos[%d, %d] rect[%d x %d]\n", dst->x1, dst->y1,
+ drm_rect_width(dst), drm_rect_height(dst));
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ obj = fb->obj[i];
+ rk_obj = to_rockchip_obj(obj);
+ fb_addr = rk_obj->dma_addr + fb->offsets[i];
+
+ seq_printf(s, "\tbuf[%d]: addr: %pad pitch: %d offset: %d\n",
+ i, &fb_addr, fb->pitches[i], fb->offsets[i]);
+ }
+
+ return 0;
+}
+
+static int vop2_crtc_state_dump(struct drm_crtc *crtc, struct seq_file *s)
+{
+ struct vop2_video_port *vp = to_vop2_video_port(crtc);
+ struct drm_crtc_state *cstate = crtc->state;
+ struct rockchip_crtc_state *vcstate;
+ struct drm_display_mode *mode;
+ struct drm_plane *plane;
+ bool interlaced;
+
+ seq_printf(s, "Video Port%d: %s\n", vp->id, !cstate ?
+ "DISABLED" : cstate->active ? "ACTIVE" : "DISABLED");
+
+ if (!cstate || !cstate->active)
+ return 0;
+
+ mode = &crtc->state->adjusted_mode;
+ vcstate = to_rockchip_crtc_state(cstate);
+ interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+ vop2_dump_connector_on_crtc(crtc, s);
+ seq_printf(s, "\tbus_format[%x]: %s\n", vcstate->bus_format,
+ drm_get_bus_format_name(vcstate->bus_format));
+ seq_printf(s, "\toutput_mode[%x]", vcstate->output_mode);
+ seq_printf(s, " color_space[%d]\n", vcstate->color_space);
+ seq_printf(s, " Display mode: %dx%d%s%d\n",
+ mode->hdisplay, mode->vdisplay, interlaced ? "i" : "p",
+ drm_mode_vrefresh(mode));
+ seq_printf(s, "\tclk[%d] real_clk[%d] type[%x] flag[%x]\n",
+ mode->clock, mode->crtc_clock, mode->type, mode->flags);
+ seq_printf(s, "\tH: %d %d %d %d\n", mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal);
+ seq_printf(s, "\tV: %d %d %d %d\n", mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal);
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ vop2_plane_state_dump(s, plane);
+ }
+
+ return 0;
+}
+
+static int vop2_summary_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct drm_minor *minor = node->minor;
+ struct drm_device *drm_dev = minor->dev;
+ struct drm_crtc *crtc;
+
+ drm_modeset_lock_all(drm_dev);
+ drm_for_each_crtc(crtc, drm_dev) {
+ vop2_crtc_state_dump(crtc, s);
+ }
+ drm_modeset_unlock_all(drm_dev);
+
+ return 0;
+}
+
+static void vop2_regs_print(struct vop2 *vop2, struct seq_file *s,
+ const struct vop2_regs_dump *dump, bool active_only)
+{
+ resource_size_t start;
+ u32 val;
+ int i;
+
+ if (dump->en_mask && active_only) {
+ val = vop2_readl(vop2, dump->base + dump->en_reg);
+ if ((val & dump->en_mask) != dump->en_val)
+ return;
+ }
+
+ seq_printf(s, "\n%s:\n", dump->name);
+
+ start = vop2->res->start + dump->base;
+ for (i = 0; i < dump->size >> 2; i += 4) {
+ seq_printf(s, "%08x: %08x %08x %08x %08x\n", (u32)start + i * 4,
+ vop2_readl(vop2, dump->base + (4 * i)),
+ vop2_readl(vop2, dump->base + (4 * (i + 1))),
+ vop2_readl(vop2, dump->base + (4 * (i + 2))),
+ vop2_readl(vop2, dump->base + (4 * (i + 3))));
+ }
+}
+
+static void __vop2_regs_dump(struct seq_file *s, bool active_only)
+{
+ struct drm_info_node *node = s->private;
+ struct vop2 *vop2 = node->info_ent->data;
+ struct drm_minor *minor = node->minor;
+ struct drm_device *drm_dev = minor->dev;
+ const struct vop2_regs_dump *dump;
+ unsigned int i;
+
+ drm_modeset_lock_all(drm_dev);
+
+ regcache_drop_region(vop2->map, 0, vop2_regmap_config.max_register);
+
+ if (vop2->enable_count) {
+ for (i = 0; i < vop2->data->regs_dump_size; i++) {
+ dump = &vop2->data->regs_dump[i];
+ vop2_regs_print(vop2, s, dump, active_only);
+ }
+ } else {
+ seq_puts(s, "VOP disabled\n");
+ }
+ drm_modeset_unlock_all(drm_dev);
+}
+
+static int vop2_regs_show(struct seq_file *s, void *arg)
+{
+ __vop2_regs_dump(s, false);
+
+ return 0;
+}
+
+static int vop2_active_regs_show(struct seq_file *s, void *data)
+{
+ __vop2_regs_dump(s, true);
+
+ return 0;
+}
+
+static struct drm_info_list vop2_debugfs_list[] = {
+ { "summary", vop2_summary_show, 0, NULL },
+ { "active_regs", vop2_active_regs_show, 0, NULL },
+ { "regs", vop2_regs_show, 0, NULL },
+};
+
+static void vop2_debugfs_init(struct vop2 *vop2, struct drm_minor *minor)
+{
+ struct dentry *root;
+ unsigned int i;
+
+ root = debugfs_create_dir("vop2", minor->debugfs_root);
+ if (!IS_ERR(root)) {
+ for (i = 0; i < ARRAY_SIZE(vop2_debugfs_list); i++)
+ vop2_debugfs_list[i].data = vop2;
+
+ drm_debugfs_create_files(vop2_debugfs_list,
+ ARRAY_SIZE(vop2_debugfs_list),
+ root, minor);
+ }
+}
+
+static int vop2_crtc_late_register(struct drm_crtc *crtc)
+{
+ struct vop2_video_port *vp = to_vop2_video_port(crtc);
+ struct vop2 *vop2 = vp->vop2;
+
+ if (drm_crtc_index(crtc) == 0)
+ vop2_debugfs_init(vop2, crtc->dev->primary);
+
+ return 0;
+}
+
static struct drm_crtc_state *vop2_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct rockchip_crtc_state *vcstate;
@@ -2562,6 +3079,7 @@ static const struct drm_crtc_funcs vop2_crtc_funcs = {
.atomic_destroy_state = vop2_crtc_destroy_state,
.enable_vblank = vop2_crtc_enable_vblank,
.disable_vblank = vop2_crtc_disable_vblank,
+ .late_register = vop2_crtc_late_register,
};
static irqreturn_t vop2_isr(int irq, void *data)
@@ -2790,7 +3308,12 @@ static int vop2_create_crtcs(struct vop2 *vop2)
}
drm_crtc_helper_add(&vp->crtc, &vop2_crtc_helper_funcs);
+ if (vop2->lut_regs) {
+ const struct vop2_video_port_data *vp_data = &vop2_data->vp[vp->id];
+ drm_mode_crtc_set_gamma_size(&vp->crtc, vp_data->gamma_lut_len);
+ drm_crtc_enable_color_mgmt(&vp->crtc, 0, false, vp_data->gamma_lut_len);
+ }
init_completion(&vp->dsp_hold_completion);
}
@@ -2865,6 +3388,10 @@ static struct reg_field vop2_cluster_regs[VOP2_WIN_MAX_REG] = {
[VOP2_WIN_Y2R_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 8, 8),
[VOP2_WIN_R2Y_EN] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 9, 9),
[VOP2_WIN_CSC_MODE] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL0, 10, 11),
+ [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 0, 3),
+ [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_CLUSTER_WIN_CTRL2, 5, 8),
+ /* RK3588 only, reserved bit on rk3568*/
+ [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3568_CLUSTER_CTRL, 13, 13),
/* Scale */
[VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_CLUSTER_WIN_SCL_FACTOR_YRGB, 0, 15),
@@ -2957,6 +3484,10 @@ static struct reg_field vop2_esmart_regs[VOP2_WIN_MAX_REG] = {
[VOP2_WIN_YMIRROR] = REG_FIELD(RK3568_SMART_CTRL1, 31, 31),
[VOP2_WIN_COLOR_KEY] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 0, 29),
[VOP2_WIN_COLOR_KEY_EN] = REG_FIELD(RK3568_SMART_COLOR_KEY_CTRL, 31, 31),
+ [VOP2_WIN_AXI_YRGB_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 4, 8),
+ [VOP2_WIN_AXI_UV_R_ID] = REG_FIELD(RK3568_SMART_CTRL1, 12, 16),
+ /* RK3588 only, reserved register on rk3568 */
+ [VOP2_WIN_AXI_BUS_ID] = REG_FIELD(RK3588_SMART_AXI_CTRL, 1, 1),
/* Scale */
[VOP2_WIN_SCALE_YRGB_X] = REG_FIELD(RK3568_SMART_REGION0_SCL_FACTOR_YRGB, 0, 15),
@@ -3106,6 +3637,7 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
return -EINVAL;
}
+ vop2->res = res;
vop2->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(vop2->regs))
return PTR_ERR(vop2->regs);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
index 615a16196aff..29cc7fb8f6d8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
@@ -9,6 +9,7 @@
#include <linux/regmap.h>
#include <drm/drm_modes.h>
+#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
#define VOP2_VP_FEATURE_OUTPUT_10BIT BIT(0)
@@ -78,6 +79,9 @@ enum vop2_win_regs {
VOP2_WIN_COLOR_KEY,
VOP2_WIN_COLOR_KEY_EN,
VOP2_WIN_DITHER_UP,
+ VOP2_WIN_AXI_BUS_ID,
+ VOP2_WIN_AXI_YRGB_R_ID,
+ VOP2_WIN_AXI_UV_R_ID,
/* scale regs */
VOP2_WIN_SCALE_YRGB_X,
@@ -122,6 +126,15 @@ enum vop2_win_regs {
VOP2_WIN_MAX_REG,
};
+struct vop2_regs_dump {
+ const char *name;
+ u32 base;
+ u32 size;
+ u32 en_reg;
+ u32 en_val;
+ u32 en_mask;
+};
+
struct vop2_win_data {
const char *name;
unsigned int phys_id;
@@ -140,6 +153,10 @@ struct vop2_win_data {
unsigned int layer_sel_id;
uint64_t feature;
+ uint8_t axi_bus_id;
+ uint8_t axi_yrgb_r_id;
+ uint8_t axi_uv_r_id;
+
unsigned int max_upscale_factor;
unsigned int max_downscale_factor;
const u8 dly[VOP2_DLY_MODE_MAX];
@@ -160,10 +177,12 @@ struct vop2_data {
u64 feature;
const struct vop2_win_data *win;
const struct vop2_video_port_data *vp;
+ const struct vop2_regs_dump *regs_dump;
struct vop_rect max_input;
struct vop_rect max_output;
unsigned int win_size;
+ unsigned int regs_dump_size;
unsigned int soc_id;
};
@@ -308,6 +327,7 @@ enum dst_factor_mode {
#define RK3568_CLUSTER_WIN_CTRL0 0x00
#define RK3568_CLUSTER_WIN_CTRL1 0x04
+#define RK3568_CLUSTER_WIN_CTRL2 0x08
#define RK3568_CLUSTER_WIN_YRGB_MST 0x10
#define RK3568_CLUSTER_WIN_CBR_MST 0x14
#define RK3568_CLUSTER_WIN_VIR 0x18
@@ -330,6 +350,7 @@ enum dst_factor_mode {
/* (E)smart register definition, offset relative to window base */
#define RK3568_SMART_CTRL0 0x00
#define RK3568_SMART_CTRL1 0x04
+#define RK3588_SMART_AXI_CTRL 0x08
#define RK3568_SMART_REGION0_CTRL 0x10
#define RK3568_SMART_REGION0_YRGB_MST 0x14
#define RK3568_SMART_REGION0_CBR_MST 0x18
@@ -394,6 +415,7 @@ enum dst_factor_mode {
#define RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN BIT(15)
#define RK3568_VP_DSP_CTRL__STANDBY BIT(31)
+#define RK3568_VP_DSP_CTRL__DSP_LUT_EN BIT(28)
#define RK3568_VP_DSP_CTRL__DITHER_DOWN_MODE BIT(20)
#define RK3568_VP_DSP_CTRL__DITHER_DOWN_SEL GENMASK(19, 18)
#define RK3568_VP_DSP_CTRL__DITHER_DOWN_EN BIT(17)
@@ -408,6 +430,8 @@ enum dst_factor_mode {
#define RK3568_VP_DSP_CTRL__CORE_DCLK_DIV BIT(4)
#define RK3568_VP_DSP_CTRL__OUT_MODE GENMASK(3, 0)
+#define RK3588_VP_DSP_CTRL__GAMMA_UPDATE_EN BIT(22)
+
#define RK3588_VP_CLK_CTRL__DCLK_OUT_DIV GENMASK(3, 2)
#define RK3588_VP_CLK_CTRL__DCLK_CORE_DIV GENMASK(1, 0)
@@ -460,6 +484,8 @@ enum dst_factor_mode {
#define RK3588_DSP_IF_POL__DP1_PIN_POL GENMASK(14, 12)
#define RK3588_DSP_IF_POL__DP0_PIN_POL GENMASK(10, 8)
+#define RK3588_LUT_PORT_SEL__GAMMA_AHB_WRITE_SEL GENMASK(13, 12)
+
#define RK3568_VP0_MIPI_CTRL__DCLK_DIV2_PHASE_LOCK BIT(5)
#define RK3568_VP0_MIPI_CTRL__DCLK_DIV2 BIT(4)
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 9a01aa450741..385cf6881504 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:
* Mark Yao <mark.yao@rock-chips.com>
* Sandy Huang <hjc@rock-chips.com>
@@ -746,7 +746,7 @@ static void rockchip_lvds_remove(struct platform_device *pdev)
struct platform_driver rockchip_lvds_driver = {
.probe = rockchip_lvds_probe,
- .remove_new = rockchip_lvds_remove,
+ .remove = rockchip_lvds_remove,
.driver = {
.name = "rockchip-lvds",
.of_match_table = rockchip_lvds_dt_ids,
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.h b/drivers/gpu/drm/rockchip/rockchip_lvds.h
index 4ce967d23813..ca83d7b6bea7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.h
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:
* Sandy Huang <hjc@rock-chips.com>
* Mark Yao <mark.yao@rock-chips.com>
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index dbfbde24698e..811020665120 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:
* Sandy Huang <hjc@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h
index 1bd4e20e91eb..116f958b894d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.h
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:
* Sandy Huang <hjc@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 18efb3fe1c00..65a88f489693 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author: Andy Yan <andy.yan@rock-chips.com>
*/
@@ -258,6 +258,88 @@ static const struct vop2_win_data rk3568_vop_win_data[] = {
},
};
+static const struct vop2_regs_dump rk3568_regs_dump[] = {
+ {
+ .name = "SYS",
+ .base = RK3568_REG_CFG_DONE,
+ .size = 0x100,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0
+ }, {
+ .name = "OVL",
+ .base = RK3568_OVL_CTRL,
+ .size = 0x100,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0,
+ }, {
+ .name = "VP0",
+ .base = RK3568_VP0_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "VP1",
+ .base = RK3568_VP1_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "VP2",
+ .base = RK3568_VP2_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+
+ }, {
+ .name = "Cluster0",
+ .base = RK3568_CLUSTER0_CTRL_BASE,
+ .size = 0x110,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Cluster1",
+ .base = RK3568_CLUSTER1_CTRL_BASE,
+ .size = 0x110,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Esmart0",
+ .base = RK3568_ESMART0_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Esmart1",
+ .base = RK3568_ESMART1_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Smart0",
+ .base = RK3568_SMART0_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Smart1",
+ .base = RK3568_SMART1_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ },
+};
+
static const struct vop2_video_port_data rk3588_vop_video_ports[] = {
{
.id = 0,
@@ -313,7 +395,7 @@ static const struct vop2_video_port_data rk3588_vop_video_ports[] = {
* AXI1 is a read only bus.
*
* Every window on a AXI bus must assigned two unique
- * read id(yrgb_id/uv_id, valid id are 0x1~0xe).
+ * read id(yrgb_r_id/uv_r_id, valid id are 0x1~0xe).
*
* AXI0:
* Cluster0/1, Esmart0/1, WriteBack
@@ -333,6 +415,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.layer_sel_id = 0,
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 2,
+ .axi_uv_r_id = 3,
.max_upscale_factor = 4,
.max_downscale_factor = 4,
.dly = { 4, 26, 29 },
@@ -349,6 +434,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 6,
+ .axi_uv_r_id = 7,
.max_upscale_factor = 4,
.max_downscale_factor = 4,
.dly = { 4, 26, 29 },
@@ -364,6 +452,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
+ .axi_bus_id = 1,
+ .axi_yrgb_r_id = 2,
+ .axi_uv_r_id = 3,
.max_upscale_factor = 4,
.max_downscale_factor = 4,
.dly = { 4, 26, 29 },
@@ -379,6 +470,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.supported_rotations = DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_PRIMARY,
+ .axi_bus_id = 1,
+ .axi_yrgb_r_id = 6,
+ .axi_uv_r_id = 7,
.max_upscale_factor = 4,
.max_downscale_factor = 4,
.dly = { 4, 26, 29 },
@@ -393,6 +487,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.layer_sel_id = 2,
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 0x0a,
+ .axi_uv_r_id = 0x0b,
.max_upscale_factor = 8,
.max_downscale_factor = 8,
.dly = { 23, 45, 48 },
@@ -406,6 +503,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.layer_sel_id = 3,
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 0,
+ .axi_yrgb_r_id = 0x0c,
+ .axi_uv_r_id = 0x01,
.max_upscale_factor = 8,
.max_downscale_factor = 8,
.dly = { 23, 45, 48 },
@@ -419,6 +519,9 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.layer_sel_id = 6,
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 1,
+ .axi_yrgb_r_id = 0x0a,
+ .axi_uv_r_id = 0x0b,
.max_upscale_factor = 8,
.max_downscale_factor = 8,
.dly = { 23, 45, 48 },
@@ -432,12 +535,118 @@ static const struct vop2_win_data rk3588_vop_win_data[] = {
.layer_sel_id = 7,
.supported_rotations = DRM_MODE_REFLECT_Y,
.type = DRM_PLANE_TYPE_OVERLAY,
+ .axi_bus_id = 1,
+ .axi_yrgb_r_id = 0x0c,
+ .axi_uv_r_id = 0x0d,
.max_upscale_factor = 8,
.max_downscale_factor = 8,
.dly = { 23, 45, 48 },
},
};
+static const struct vop2_regs_dump rk3588_regs_dump[] = {
+ {
+ .name = "SYS",
+ .base = RK3568_REG_CFG_DONE,
+ .size = 0x100,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0
+ }, {
+ .name = "OVL",
+ .base = RK3568_OVL_CTRL,
+ .size = 0x100,
+ .en_reg = 0,
+ .en_val = 0,
+ .en_mask = 0,
+ }, {
+ .name = "VP0",
+ .base = RK3568_VP0_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "VP1",
+ .base = RK3568_VP1_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "VP2",
+ .base = RK3568_VP2_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+
+ }, {
+ .name = "VP3",
+ .base = RK3588_VP3_CTRL_BASE,
+ .size = 0x100,
+ .en_reg = RK3568_VP_DSP_CTRL,
+ .en_val = 0,
+ .en_mask = RK3568_VP_DSP_CTRL__STANDBY,
+ }, {
+ .name = "Cluster0",
+ .base = RK3568_CLUSTER0_CTRL_BASE,
+ .size = 0x110,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Cluster1",
+ .base = RK3568_CLUSTER1_CTRL_BASE,
+ .size = 0x110,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Cluster2",
+ .base = RK3588_CLUSTER2_CTRL_BASE,
+ .size = 0x110,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Cluster3",
+ .base = RK3588_CLUSTER3_CTRL_BASE,
+ .size = 0x110,
+ .en_reg = RK3568_CLUSTER_WIN_CTRL0,
+ .en_val = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ .en_mask = RK3568_CLUSTER_WIN_CTRL0__WIN0_EN,
+ }, {
+ .name = "Esmart0",
+ .base = RK3568_ESMART0_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Esmart1",
+ .base = RK3568_ESMART1_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Esmart2",
+ .base = RK3588_ESMART2_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ }, {
+ .name = "Esmart3",
+ .base = RK3588_ESMART3_CTRL_BASE,
+ .size = 0xf0,
+ .en_reg = RK3568_SMART_REGION0_CTRL,
+ .en_val = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ .en_mask = RK3568_SMART_REGION0_CTRL__WIN0_EN,
+ },
+};
+
static const struct vop2_data rk3566_vop = {
.feature = VOP2_FEATURE_HAS_SYS_GRF,
.nr_vps = 3,
@@ -446,6 +655,8 @@ static const struct vop2_data rk3566_vop = {
.vp = rk3568_vop_video_ports,
.win = rk3568_vop_win_data,
.win_size = ARRAY_SIZE(rk3568_vop_win_data),
+ .regs_dump = rk3568_regs_dump,
+ .regs_dump_size = ARRAY_SIZE(rk3568_regs_dump),
.soc_id = 3566,
};
@@ -457,6 +668,8 @@ static const struct vop2_data rk3568_vop = {
.vp = rk3568_vop_video_ports,
.win = rk3568_vop_win_data,
.win_size = ARRAY_SIZE(rk3568_vop_win_data),
+ .regs_dump = rk3568_regs_dump,
+ .regs_dump_size = ARRAY_SIZE(rk3568_regs_dump),
.soc_id = 3568,
};
@@ -469,6 +682,8 @@ static const struct vop2_data rk3588_vop = {
.vp = rk3588_vop_video_ports,
.win = rk3588_vop_win_data,
.win_size = ARRAY_SIZE(rk3588_vop_win_data),
+ .regs_dump = rk3588_regs_dump,
+ .regs_dump_size = ARRAY_SIZE(rk3588_regs_dump),
.soc_id = 3588,
};
@@ -501,7 +716,7 @@ static void vop2_remove(struct platform_device *pdev)
struct platform_driver vop2_platform_driver = {
.probe = vop2_probe,
- .remove_new = vop2_remove,
+ .remove = vop2_remove,
.driver = {
.name = "rockchip-vop2",
.of_match_table = vop2_dt_match,
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index e2c6ba26f437..4e2099d86517 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
@@ -1284,7 +1284,7 @@ static void vop_remove(struct platform_device *pdev)
struct platform_driver vop_platform_driver = {
.probe = vop_probe,
- .remove_new = vop_remove,
+ .remove = vop_remove,
.driver = {
.name = "rockchip-vop",
.of_match_table = vop_driver_dt_match,
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
index fbf1bcc68625..addf8ca085f6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Copyright (C) Rockchip Electronics Co., Ltd.
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index a75eede8bf8d..69bcf0e99d57 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -51,7 +51,7 @@
* drm_sched_entity_set_priority(). For changing the set of schedulers
* @sched_list at runtime see drm_sched_entity_modify_sched().
*
- * An entity is cleaned up by callind drm_sched_entity_fini(). See also
+ * An entity is cleaned up by calling drm_sched_entity_fini(). See also
* drm_sched_entity_destroy().
*
* Returns 0 on success or a negative error code on failure.
@@ -105,7 +105,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
/* We start in an idle state. */
complete_all(&entity->entity_idle);
- spin_lock_init(&entity->rq_lock);
+ spin_lock_init(&entity->lock);
spsc_queue_init(&entity->job_queue);
atomic_set(&entity->fence_seq, 0);
@@ -133,10 +133,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
{
WARN_ON(!num_sched_list || !sched_list);
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
entity->sched_list = sched_list;
entity->num_sched_list = num_sched_list;
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
}
EXPORT_SYMBOL(drm_sched_entity_modify_sched);
@@ -244,10 +244,10 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity)
if (!entity->rq)
return;
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
entity->stopped = true;
drm_sched_rq_remove_entity(entity->rq, entity);
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
/* Make sure this entity is not used by the scheduler at the moment */
wait_for_completion(&entity->entity_idle);
@@ -372,8 +372,8 @@ static void drm_sched_entity_clear_dep(struct dma_fence *f,
}
/*
- * drm_sched_entity_clear_dep - callback to clear the entities dependency and
- * wake up scheduler
+ * drm_sched_entity_wakeup - callback to clear the entity's dependency and
+ * wake up the scheduler
*/
static void drm_sched_entity_wakeup(struct dma_fence *f,
struct dma_fence_cb *cb)
@@ -391,14 +391,14 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
* @entity: scheduler entity
* @priority: scheduler priority
*
- * Update the priority of runqueus used for the entity.
+ * Update the priority of runqueues used for the entity.
*/
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority)
{
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
entity->priority = priority;
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
}
EXPORT_SYMBOL(drm_sched_entity_set_priority);
@@ -514,8 +514,17 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
struct drm_sched_job *next;
next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
- if (next)
- drm_sched_rq_update_fifo(entity, next->submit_ts);
+ if (next) {
+ struct drm_sched_rq *rq;
+
+ spin_lock(&entity->lock);
+ rq = entity->rq;
+ spin_lock(&rq->lock);
+ drm_sched_rq_update_fifo_locked(entity, rq,
+ next->submit_ts);
+ spin_unlock(&rq->lock);
+ spin_unlock(&entity->lock);
+ }
}
/* Jobs and entities might have different lifecycles. Since we're
@@ -555,14 +564,14 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
if (fence && !dma_fence_is_signaled(fence))
return;
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
rq = sched ? sched->sched_rq[entity->priority] : NULL;
if (rq != entity->rq) {
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
}
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
if (entity->num_sched_list == 1)
entity->sched_list = NULL;
@@ -576,8 +585,6 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
* fence sequence number this function should be called with drm_sched_job_arm()
* under common lock for the struct drm_sched_entity that was set up for
* @sched_job in drm_sched_job_init().
- *
- * Returns 0 for success, negative error code otherwise.
*/
void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
{
@@ -603,9 +610,9 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
struct drm_sched_rq *rq;
/* Add the entity to the run queue */
- spin_lock(&entity->rq_lock);
+ spin_lock(&entity->lock);
if (entity->stopped) {
- spin_unlock(&entity->rq_lock);
+ spin_unlock(&entity->lock);
DRM_ERROR("Trying to push to a killed entity\n");
return;
@@ -614,11 +621,14 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
rq = entity->rq;
sched = rq->sched;
+ spin_lock(&rq->lock);
drm_sched_rq_add_entity(rq, entity);
- spin_unlock(&entity->rq_lock);
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
- drm_sched_rq_update_fifo(entity, submit_ts);
+ drm_sched_rq_update_fifo_locked(entity, rq, submit_ts);
+
+ spin_unlock(&rq->lock);
+ spin_unlock(&entity->lock);
drm_sched_wakeup(sched);
}
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 6f27cab0b76d..57da84908752 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -41,7 +41,7 @@
* 4. Entities themselves maintain a queue of jobs that will be scheduled on
* the hardware.
*
- * The jobs in a entity are always scheduled in the order that they were pushed.
+ * The jobs in an entity are always scheduled in the order in which they were pushed.
*
* Note that once a job was taken from the entities queue and pushed to the
* hardware, i.e. the pending queue, the entity must not be referenced anymore
@@ -87,6 +87,12 @@
#define CREATE_TRACE_POINTS
#include "gpu_scheduler_trace.h"
+#ifdef CONFIG_LOCKDEP
+static struct lockdep_map drm_sched_lockdep_map = {
+ .name = "drm_sched_lockdep_map"
+};
+#endif
+
#define to_drm_sched_job(sched_job) \
container_of((sched_job), struct drm_sched_job, queue_node)
@@ -153,35 +159,33 @@ static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
}
-static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
+static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq)
{
- struct drm_sched_rq *rq = entity->rq;
-
if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
RB_CLEAR_NODE(&entity->rb_tree_node);
}
}
-void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
+void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq,
+ ktime_t ts)
{
/*
* Both locks need to be grabbed, one to protect from entity->rq change
* for entity from within concurrent drm_sched_entity_select_rq and the
* other to update the rb tree structure.
*/
- spin_lock(&entity->rq_lock);
- spin_lock(&entity->rq->lock);
+ lockdep_assert_held(&entity->lock);
+ lockdep_assert_held(&rq->lock);
- drm_sched_rq_remove_fifo_locked(entity);
+ drm_sched_rq_remove_fifo_locked(entity, rq);
entity->oldest_job_waiting = ts;
- rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
+ rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root,
drm_sched_entity_compare_before);
-
- spin_unlock(&entity->rq->lock);
- spin_unlock(&entity->rq_lock);
}
/**
@@ -213,15 +217,14 @@ static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity)
{
+ lockdep_assert_held(&entity->lock);
+ lockdep_assert_held(&rq->lock);
+
if (!list_empty(&entity->list))
return;
- spin_lock(&rq->lock);
-
atomic_inc(rq->sched->score);
list_add_tail(&entity->list, &rq->entities);
-
- spin_unlock(&rq->lock);
}
/**
@@ -235,6 +238,8 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity)
{
+ lockdep_assert_held(&entity->lock);
+
if (list_empty(&entity->list))
return;
@@ -247,7 +252,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
rq->current_entity = NULL;
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
- drm_sched_rq_remove_fifo_locked(entity);
+ drm_sched_rq_remove_fifo_locked(entity, rq);
spin_unlock(&rq->lock);
}
@@ -349,7 +354,6 @@ drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched,
return ERR_PTR(-ENOSPC);
}
- rq->current_entity = entity;
reinit_completion(&entity->entity_idle);
break;
}
@@ -595,6 +599,9 @@ static void drm_sched_job_timedout(struct work_struct *work)
* callers responsibility to release it manually if it's not part of the
* pending list any more.
*
+ * This function is typically used for reset recovery (see the docu of
+ * drm_sched_backend_ops.timedout_job() for details). Do not call it for
+ * scheduler teardown, i.e., before calling drm_sched_fini().
*/
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
@@ -667,16 +674,20 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
*/
cancel_delayed_work(&sched->work_tdr);
}
-
EXPORT_SYMBOL(drm_sched_stop);
/**
* drm_sched_start - recover jobs after a reset
*
* @sched: scheduler instance
+ * @errno: error to set on the pending fences
*
+ * This function is typically used for reset recovery (see the docu of
+ * drm_sched_backend_ops.timedout_job() for details). Do not call it for
+ * scheduler startup. The scheduler itself is fully operational after
+ * drm_sched_init() succeeded.
*/
-void drm_sched_start(struct drm_gpu_scheduler *sched)
+void drm_sched_start(struct drm_gpu_scheduler *sched, int errno)
{
struct drm_sched_job *s_job, *tmp;
@@ -691,13 +702,13 @@ void drm_sched_start(struct drm_gpu_scheduler *sched)
atomic_add(s_job->credits, &sched->credit_count);
if (!fence) {
- drm_sched_job_done(s_job, -ECANCELED);
+ drm_sched_job_done(s_job, errno ?: -ECANCELED);
continue;
}
if (dma_fence_add_callback(fence, &s_job->cb,
drm_sched_job_done_cb))
- drm_sched_job_done(s_job, fence->error);
+ drm_sched_job_done(s_job, fence->error ?: errno);
}
drm_sched_start_timeout_unlocked(sched);
@@ -772,6 +783,10 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs);
* Drivers must make sure drm_sched_job_cleanup() if this function returns
* successfully, even when @job is aborted before drm_sched_job_arm() is called.
*
+ * Note that this function does not assign a valid value to each struct member
+ * of struct drm_sched_job. Take a look at that struct's documentation to see
+ * who sets which struct member with what lifetime.
+ *
* WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
* has died, which can mean that there's no valid runqueue for a @entity.
* This function returns -ENOENT in this case (which probably should be -EIO as
@@ -797,6 +812,14 @@ int drm_sched_job_init(struct drm_sched_job *job,
return -EINVAL;
}
+ /*
+ * We don't know for sure how the user has allocated. Thus, zero the
+ * struct so that unallowed (i.e., too early) usage of pointers that
+ * this function does not set is guaranteed to lead to a NULL pointer
+ * exception instead of UB.
+ */
+ memset(job, 0, sizeof(*job));
+
job->entity = entity;
job->credits = credits;
job->s_fence = drm_sched_fence_alloc(entity, owner);
@@ -1269,7 +1292,13 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->submit_wq = submit_wq;
sched->own_submit_wq = false;
} else {
- sched->submit_wq = alloc_ordered_workqueue(name, 0);
+#ifdef CONFIG_LOCKDEP
+ sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name,
+ WQ_MEM_RECLAIM,
+ &drm_sched_lockdep_map);
+#else
+ sched->submit_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
+#endif
if (!sched->submit_wq)
return -ENOMEM;
@@ -1321,6 +1350,20 @@ EXPORT_SYMBOL(drm_sched_init);
* @sched: scheduler instance
*
* Tears down and cleans up the scheduler.
+ *
+ * This stops submission of new jobs to the hardware through
+ * drm_sched_backend_ops.run_job(). Consequently, drm_sched_backend_ops.free_job()
+ * will not be called for all jobs still in drm_gpu_scheduler.pending_list.
+ * There is no solution for this currently. Thus, it is up to the driver to make
+ * sure that:
+ *
+ * a) drm_sched_fini() is only called after for all submitted jobs
+ * drm_sched_backend_ops.free_job() has been called or that
+ * b) the jobs for which drm_sched_backend_ops.free_job() has not been called
+ * after drm_sched_fini() ran are freed manually.
+ *
+ * FIXME: Take care of the above problem and prevent this function from leaking
+ * the jobs in drm_gpu_scheduler.pending_list under any circumstances.
*/
void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
@@ -1336,7 +1379,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
list_for_each_entry(s_entity, &rq->entities, list)
/*
* Prevents reinsertion and marks job_queue as idle,
- * it will removed from rq in drm_sched_entity_fini
+ * it will be removed from the rq in drm_sched_entity_fini()
* eventually
*/
s_entity->stopped = true;
@@ -1416,8 +1459,10 @@ EXPORT_SYMBOL(drm_sched_wqueue_ready);
/**
* drm_sched_wqueue_stop - stop scheduler submission
- *
* @sched: scheduler instance
+ *
+ * Stops the scheduler from pulling new jobs from entities. It also stops
+ * freeing jobs automatically through drm_sched_backend_ops.free_job().
*/
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
{
@@ -1429,8 +1474,12 @@ EXPORT_SYMBOL(drm_sched_wqueue_stop);
/**
* drm_sched_wqueue_start - start scheduler submission
- *
* @sched: scheduler instance
+ *
+ * Restarts the scheduler after drm_sched_wqueue_stop() has stopped it.
+ *
+ * This function is not necessary for 'conventional' startup. The scheduler is
+ * fully operational after drm_sched_init() succeeded.
*/
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
{
diff --git a/drivers/gpu/drm/solomon/Kconfig b/drivers/gpu/drm/solomon/Kconfig
index c3ee956c2bb9..400a6cab3a67 100644
--- a/drivers/gpu/drm/solomon/Kconfig
+++ b/drivers/gpu/drm/solomon/Kconfig
@@ -2,6 +2,7 @@ config DRM_SSD130X
tristate "DRM support for Solomon SSD13xx OLED displays"
depends on DRM && MMU
select BACKLIGHT_CLASS_DEVICE
+ select DRM_CLIENT_SELECTION
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
diff --git a/drivers/gpu/drm/solomon/ssd130x-i2c.c b/drivers/gpu/drm/solomon/ssd130x-i2c.c
index f2ccab9c06d9..941a2eb44c57 100644
--- a/drivers/gpu/drm/solomon/ssd130x-i2c.c
+++ b/drivers/gpu/drm/solomon/ssd130x-i2c.c
@@ -123,4 +123,4 @@ module_i2c_driver(ssd130x_i2c_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Javier Martinez Canillas <javierm@redhat.com>");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(DRM_SSD130X);
+MODULE_IMPORT_NS("DRM_SSD130X");
diff --git a/drivers/gpu/drm/solomon/ssd130x-spi.c b/drivers/gpu/drm/solomon/ssd130x-spi.c
index 84bfde31d172..08334be38694 100644
--- a/drivers/gpu/drm/solomon/ssd130x-spi.c
+++ b/drivers/gpu/drm/solomon/ssd130x-spi.c
@@ -192,4 +192,4 @@ module_spi_driver(ssd130x_spi_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Javier Martinez Canillas <javierm@redhat.com>");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(DRM_SSD130X);
+MODULE_IMPORT_NS("DRM_SSD130X");
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 6f51bcf774e2..b777690fd660 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -18,6 +18,7 @@
#include <linux/pwm.h>
#include <linux/regulator/consumer.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
@@ -38,7 +39,6 @@
#define DRIVER_NAME "ssd130x"
#define DRIVER_DESC "DRM driver for Solomon SSD13xx OLED displays"
-#define DRIVER_DATE "20220131"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -208,7 +208,7 @@ const struct ssd130x_deviceinfo ssd130x_variants[] = {
.family_id = SSD133X_FAMILY,
}
};
-EXPORT_SYMBOL_NS_GPL(ssd130x_variants, DRM_SSD130X);
+EXPORT_SYMBOL_NS_GPL(ssd130x_variants, "DRM_SSD130X");
struct ssd130x_crtc_state {
struct drm_crtc_state base;
@@ -1780,9 +1780,9 @@ DEFINE_DRM_GEM_FOPS(ssd130x_fops);
static const struct drm_driver ssd130x_drm_driver = {
DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
@@ -2029,7 +2029,7 @@ struct ssd130x_device *ssd130x_probe(struct device *dev, struct regmap *regmap)
if (ret)
return ERR_PTR(dev_err_probe(dev, ret, "DRM device register failed\n"));
- drm_fbdev_shmem_setup(drm, 32);
+ drm_client_setup(drm, NULL);
return ssd130x;
}
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c
index deb3bb96e2a8..cb2816985305 100644
--- a/drivers/gpu/drm/sprd/sprd_dpu.c
+++ b/drivers/gpu/drm/sprd/sprd_dpu.c
@@ -866,7 +866,7 @@ static void sprd_dpu_remove(struct platform_device *pdev)
struct platform_driver sprd_dpu_driver = {
.probe = sprd_dpu_probe,
- .remove_new = sprd_dpu_remove,
+ .remove = sprd_dpu_remove,
.driver = {
.name = "sprd-dpu-drv",
.of_match_table = dpu_match_table,
diff --git a/drivers/gpu/drm/sprd/sprd_drm.c b/drivers/gpu/drm/sprd/sprd_drm.c
index a74cd0caf645..ceacdcb7c566 100644
--- a/drivers/gpu/drm/sprd/sprd_drm.c
+++ b/drivers/gpu/drm/sprd/sprd_drm.c
@@ -23,7 +23,6 @@
#define DRIVER_NAME "sprd"
#define DRIVER_DESC "Spreadtrum SoCs' DRM Driver"
-#define DRIVER_DATE "20200201"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -59,7 +58,6 @@ static struct drm_driver sprd_drm_drv = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
@@ -163,7 +161,7 @@ MODULE_DEVICE_TABLE(of, drm_match_table);
static struct platform_driver sprd_drm_driver = {
.probe = sprd_drm_probe,
- .remove_new = sprd_drm_remove,
+ .remove = sprd_drm_remove,
.shutdown = sprd_drm_shutdown,
.driver = {
.name = "sprd-drm-drv",
diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c
index 0b69c140eab3..8fc26479bb6b 100644
--- a/drivers/gpu/drm/sprd/sprd_dsi.c
+++ b/drivers/gpu/drm/sprd/sprd_dsi.c
@@ -209,7 +209,7 @@ static int regmap_tst_io_read(void *context, u32 reg, u32 *val)
return 0;
}
-static struct regmap_bus regmap_tst_io = {
+static const struct regmap_bus regmap_tst_io = {
.reg_write = regmap_tst_io_write,
.reg_read = regmap_tst_io_read,
};
@@ -1060,7 +1060,7 @@ static void sprd_dsi_remove(struct platform_device *pdev)
struct platform_driver sprd_dsi_driver = {
.probe = sprd_dsi_probe,
- .remove_new = sprd_dsi_remove,
+ .remove = sprd_dsi_remove,
.driver = {
.name = "sprd-dsi-drv",
.of_match_table = dsi_match_table,
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index 75c301aadcbc..ec341a4720d4 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -3,6 +3,7 @@ config DRM_STI
tristate "DRM Support for STMicroelectronics SoC stiH4xx Series"
depends on OF && DRM && (ARCH_STI || COMPILE_TEST)
select RESET_CONTROLLER
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_PANEL
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 33487a1fed8f..063f82d23d80 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -269,7 +269,7 @@ struct platform_driver sti_compositor_driver = {
.of_match_table = compositor_of_match,
},
.probe = sti_compositor_probe,
- .remove_new = sti_compositor_remove,
+ .remove = sti_compositor_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index db0a1eb53532..c59fcb4dca32 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -200,6 +200,9 @@ static int sti_cursor_atomic_check(struct drm_plane *drm_plane,
return 0;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
mode = &crtc_state->mode;
dst_x = new_plane_state->crtc_x;
dst_y = new_plane_state->crtc_y;
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 1799c12babf5..5e9332df21df 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -13,6 +13,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
@@ -28,7 +29,6 @@
#define DRIVER_NAME "sti"
#define DRIVER_DESC "STMicroelectronics SoC DRM"
-#define DRIVER_DATE "20140601"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -136,12 +136,12 @@ static const struct drm_driver sti_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.fops = &sti_driver_fops,
DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.debugfs_init = sti_drm_dbg_init,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
@@ -203,7 +203,7 @@ static int sti_bind(struct device *dev)
drm_mode_config_reset(ddev);
- drm_fbdev_dma_setup(ddev, 32);
+ drm_client_setup(ddev, NULL);
return 0;
@@ -268,7 +268,7 @@ MODULE_DEVICE_TABLE(of, sti_dt_ids);
static struct platform_driver sti_platform_driver = {
.probe = sti_platform_probe,
- .remove_new = sti_platform_remove,
+ .remove = sti_platform_remove,
.shutdown = sti_platform_shutdown,
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 68b8197b3dd1..c6c2abaa1891 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -585,7 +585,7 @@ struct platform_driver sti_dvo_driver = {
.of_match_table = dvo_of_match,
},
.probe = sti_dvo_probe,
- .remove_new = sti_dvo_remove,
+ .remove = sti_dvo_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 43c72c2604a0..f046f5f7ad25 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -638,6 +638,9 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
mixer = to_sti_mixer(crtc);
crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
mode = &crtc_state->mode;
dst_x = new_plane_state->crtc_x;
dst_y = new_plane_state->crtc_y;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index f18faad974aa..b12863bea955 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -810,7 +810,7 @@ struct platform_driver sti_hda_driver = {
.of_match_table = hda_of_match,
},
.probe = sti_hda_probe,
- .remove_new = sti_hda_remove,
+ .remove = sti_hda_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 847470f747c0..ca2fe17de4a5 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1225,7 +1225,9 @@ static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size
struct drm_connector *connector = hdmi->drm_connector;
DRM_DEBUG_DRIVER("\n");
+ mutex_lock(&connector->eld_mutex);
memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+ mutex_unlock(&connector->eld_mutex);
return 0;
}
@@ -1235,7 +1237,6 @@ static const struct hdmi_codec_ops audio_codec_ops = {
.audio_shutdown = hdmi_audio_shutdown,
.mute_stream = hdmi_audio_mute,
.get_eld = hdmi_audio_get_eld,
- .no_capture_mute = 1,
};
static int sti_hdmi_register_audio_driver(struct device *dev,
@@ -1245,6 +1246,7 @@ static int sti_hdmi_register_audio_driver(struct device *dev,
.ops = &audio_codec_ops,
.max_i2s_channels = 8,
.i2s = 1,
+ .no_capture_mute = 1,
};
DRM_DEBUG_DRIVER("\n");
@@ -1492,7 +1494,7 @@ struct platform_driver sti_hdmi_driver = {
.of_match_table = hdmi_of_match,
},
.probe = sti_hdmi_probe,
- .remove_new = sti_hdmi_remove,
+ .remove = sti_hdmi_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index acbf70b95aeb..0f658709c9d0 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1037,6 +1037,9 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
return 0;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
mode = &crtc_state->mode;
dst_x = new_plane_state->crtc_x;
dst_y = new_plane_state->crtc_y;
@@ -1417,7 +1420,7 @@ struct platform_driver sti_hqvdp_driver = {
.of_match_table = hqvdp_of_match,
},
.probe = sti_hqvdp_probe,
- .remove_new = sti_hqvdp_remove,
+ .remove = sti_hqvdp_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 7e5f14646625..06c1b81912f7 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -137,7 +137,7 @@ static void mixer_dbg_crb(struct seq_file *s, int val)
}
}
-static void mixer_dbg_mxn(struct seq_file *s, void *addr)
+static void mixer_dbg_mxn(struct seq_file *s, void __iomem *addr)
{
int i;
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index e714c232026c..af6c06f448c4 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -889,7 +889,7 @@ struct platform_driver sti_tvout_driver = {
.of_match_table = tvout_of_match,
},
.probe = sti_tvout_probe,
- .remove_new = sti_tvout_remove,
+ .remove = sti_tvout_remove,
};
MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig
index d7f41a87808e..635be0ac00af 100644
--- a/drivers/gpu/drm/stm/Kconfig
+++ b/drivers/gpu/drm/stm/Kconfig
@@ -3,6 +3,7 @@ config DRM_STM
tristate "DRM Support for STMicroelectronics SoC Series"
depends on DRM && (ARCH_STM32 || COMPILE_TEST)
depends on COMMON_CLK
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index e1232f74dfa5..8ebcaf953782 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -8,6 +8,7 @@
* Mickael Reulier <mickael.reulier@st.com>
*/
+#include <linux/aperture.h>
#include <linux/component.h>
#include <linux/dma-mapping.h>
#include <linux/mod_devicetable.h>
@@ -15,11 +16,12 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
@@ -60,12 +62,12 @@ static const struct drm_driver drv_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "stm",
.desc = "STMicroelectronics SoC DRM",
- .date = "20170330",
.major = 1,
.minor = 0,
.patchlevel = 0,
.fops = &drv_driver_fops,
DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(stm_gem_dma_dumb_create),
+ DRM_FBDEV_DMA_DRIVER_OPS,
};
static int drv_load(struct drm_device *ddev)
@@ -188,7 +190,7 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
DRM_DEBUG("%s\n", __func__);
- ret = drm_aperture_remove_framebuffers(&drv_driver);
+ ret = aperture_remove_all_conflicting_devices(drv_driver.name);
if (ret)
return ret;
@@ -206,7 +208,7 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
- drm_fbdev_dma_setup(ddev, 16);
+ drm_client_setup_with_fourcc(ddev, DRM_FORMAT_RGB565);
return 0;
@@ -242,7 +244,7 @@ MODULE_DEVICE_TABLE(of, drv_dt_ids);
static struct platform_driver stm_drm_platform_driver = {
.probe = stm_drm_platform_probe,
- .remove_new = stm_drm_platform_remove,
+ .remove = stm_drm_platform_remove,
.shutdown = stm_drm_platform_shutdown,
.driver = {
.name = "stm32-display",
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index b20123854c4a..2c7bc064bc66 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -783,7 +783,7 @@ static const struct dev_pm_ops dw_mipi_dsi_stm_pm_ops = {
static struct platform_driver dw_mipi_dsi_stm_driver = {
.probe = dw_mipi_dsi_stm_probe,
- .remove_new = dw_mipi_dsi_stm_remove,
+ .remove = dw_mipi_dsi_stm_remove,
.driver = {
.of_match_table = dw_mipi_dsi_stm_dt_ids,
.name = "stm32-display-dsi",
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 4037e085430e..b56ba00aabca 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -3,6 +3,7 @@ config DRM_SUN4I
tristate "DRM Support for Allwinner A10 Display Engine"
depends on DRM && COMMON_CLK
depends on ARCH_SUNXI || COMPILE_TEST
+ select DRM_CLIENT_SELECTION
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index e89eb96d3131..2dded3b828df 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -1028,7 +1028,7 @@ MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
static struct platform_driver sun4i_backend_platform_driver = {
.probe = sun4i_backend_probe,
- .remove_new = sun4i_backend_remove,
+ .remove = sun4i_backend_remove,
.driver = {
.name = "sun4i-backend",
.of_match_table = sun4i_backend_of_table,
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 35d7a7ffd208..c11dfb2739fa 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -6,6 +6,7 @@
* Maxime Ripard <maxime.ripard@free-electrons.com>
*/
+#include <linux/aperture.h>
#include <linux/component.h>
#include <linux/dma-mapping.h>
#include <linux/kfifo.h>
@@ -14,7 +15,7 @@
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -49,12 +50,12 @@ static const struct drm_driver sun4i_drv_driver = {
.fops = &sun4i_drv_fops,
.name = "sun4i-drm",
.desc = "Allwinner sun4i Display Engine",
- .date = "20150629",
.major = 1,
.minor = 0,
/* GEM Operations */
DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_sun4i_gem_dumb_create),
+ DRM_FBDEV_DMA_DRIVER_OPS,
};
static int sun4i_drv_bind(struct device *dev)
@@ -98,7 +99,7 @@ static int sun4i_drv_bind(struct device *dev)
goto unbind_all;
/* Remove early framebuffers (ie. simplefb) */
- ret = drm_aperture_remove_framebuffers(&sun4i_drv_driver);
+ ret = aperture_remove_all_conflicting_devices(sun4i_drv_driver.name);
if (ret)
goto unbind_all;
@@ -111,7 +112,7 @@ static int sun4i_drv_bind(struct device *dev)
if (ret)
goto finish_poll;
- drm_fbdev_dma_setup(drm, 32);
+ drm_client_setup(drm, NULL);
dev_set_drvdata(dev, drm);
@@ -441,7 +442,7 @@ MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
static struct platform_driver sun4i_drv_platform_driver = {
.probe = sun4i_drv_probe,
- .remove_new = sun4i_drv_remove,
+ .remove = sun4i_drv_remove,
.shutdown = sun4i_drv_shutdown,
.driver = {
.name = "sun4i-drm",
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index 280d444dbb66..5ab1604f12dd 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -717,7 +717,7 @@ MODULE_DEVICE_TABLE(of, sun4i_frontend_of_table);
static struct platform_driver sun4i_frontend_driver = {
.probe = sun4i_frontend_probe,
- .remove_new = sun4i_frontend_remove,
+ .remove = sun4i_frontend_remove,
.driver = {
.name = "sun4i-frontend",
.of_match_table = sun4i_frontend_of_table,
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index b3649449de30..ab0938ba61f7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -187,34 +187,6 @@ sun4i_hdmi_connector_clock_valid(const struct drm_connector *connector,
return MODE_NOCLOCK;
}
-static int sun4i_hdmi_connector_atomic_check(struct drm_connector *connector,
- struct drm_atomic_state *state)
-{
- struct drm_connector_state *conn_state =
- drm_atomic_get_new_connector_state(state, connector);
- struct drm_crtc *crtc = conn_state->crtc;
- struct drm_crtc_state *crtc_state = crtc->state;
- struct drm_display_mode *mode = &crtc_state->adjusted_mode;
- enum drm_mode_status status;
-
- status = sun4i_hdmi_connector_clock_valid(connector, mode,
- conn_state->hdmi.tmds_char_rate);
- if (status != MODE_OK)
- return -EINVAL;
-
- return 0;
-}
-
-static enum drm_mode_status
-sun4i_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- unsigned long long rate = drm_hdmi_compute_mode_clock(mode, 8,
- HDMI_COLORSPACE_RGB);
-
- return sun4i_hdmi_connector_clock_valid(connector, mode, rate);
-}
-
static int sun4i_hdmi_get_modes(struct drm_connector *connector)
{
struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
@@ -268,8 +240,8 @@ static const struct drm_connector_hdmi_funcs sun4i_hdmi_hdmi_connector_funcs = {
};
static const struct drm_connector_helper_funcs sun4i_hdmi_connector_helper_funcs = {
- .atomic_check = sun4i_hdmi_connector_atomic_check,
- .mode_valid = sun4i_hdmi_connector_mode_valid,
+ .atomic_check = drm_atomic_helper_connector_hdmi_check,
+ .mode_valid = drm_hdmi_connector_mode_valid,
.get_modes = sun4i_hdmi_get_modes,
};
@@ -741,7 +713,7 @@ MODULE_DEVICE_TABLE(of, sun4i_hdmi_of_table);
static struct platform_driver sun4i_hdmi_driver = {
.probe = sun4i_hdmi_probe,
- .remove_new = sun4i_hdmi_remove,
+ .remove = sun4i_hdmi_remove,
.driver = {
.name = "sun4i-hdmi",
.of_match_table = sun4i_hdmi_of_table,
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index a1a2c845ade0..960e83c8291d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -1568,7 +1568,7 @@ EXPORT_SYMBOL(sun4i_tcon_of_table);
static struct platform_driver sun4i_tcon_platform_driver = {
.probe = sun4i_tcon_probe,
- .remove_new = sun4i_tcon_remove,
+ .remove = sun4i_tcon_remove,
.driver = {
.name = "sun4i-tcon",
.of_match_table = sun4i_tcon_of_table,
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index ec65d9d59de7..cce4e38789b9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -559,7 +559,7 @@ MODULE_DEVICE_TABLE(of, sun4i_tv_of_table);
static struct platform_driver sun4i_tv_platform_driver = {
.probe = sun4i_tv_probe,
- .remove_new = sun4i_tv_remove,
+ .remove = sun4i_tv_remove,
.driver = {
.name = "sun4i-tve",
.of_match_table = sun4i_tv_of_table,
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
index 0d342f43fa93..310c7e0daede 100644
--- a/drivers/gpu/drm/sun4i/sun6i_drc.c
+++ b/drivers/gpu/drm/sun4i/sun6i_drc.c
@@ -112,7 +112,7 @@ MODULE_DEVICE_TABLE(of, sun6i_drc_of_table);
static struct platform_driver sun6i_drc_platform_driver = {
.probe = sun6i_drc_probe,
- .remove_new = sun6i_drc_remove,
+ .remove = sun6i_drc_remove,
.driver = {
.name = "sun6i-drc",
.of_match_table = sun6i_drc_of_table,
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 4abf4f102007..c35b70d83e53 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -1244,7 +1244,7 @@ MODULE_DEVICE_TABLE(of, sun6i_dsi_of_table);
static struct platform_driver sun6i_dsi_platform_driver = {
.probe = sun6i_dsi_probe,
- .remove_new = sun6i_dsi_remove,
+ .remove = sun6i_dsi_remove,
.driver = {
.name = "sun6i-mipi-dsi",
.of_match_table = sun6i_dsi_of_table,
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 4727dfaa8fb9..96532709c2a7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -264,7 +264,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dw_hdmi_dt_ids);
static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
.probe = sun8i_dw_hdmi_probe,
- .remove_new = sun8i_dw_hdmi_remove,
+ .remove = sun8i_dw_hdmi_remove,
.driver = {
.name = "sun8i-dw-hdmi",
.of_match_table = sun8i_dw_hdmi_dt_ids,
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index bd0fe2c6624e..8b41d33baa30 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -775,7 +775,7 @@ MODULE_DEVICE_TABLE(of, sun8i_mixer_of_table);
static struct platform_driver sun8i_mixer_platform_driver = {
.probe = sun8i_mixer_probe,
- .remove_new = sun8i_mixer_remove,
+ .remove = sun8i_mixer_remove,
.driver = {
.name = "sun8i-mixer",
.of_match_table = sun8i_mixer_of_table,
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index a1ca3916f42b..8adda578c51b 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -299,7 +299,7 @@ EXPORT_SYMBOL(sun8i_tcon_top_of_table);
static struct platform_driver sun8i_tcon_top_platform_driver = {
.probe = sun8i_tcon_top_probe,
- .remove_new = sun8i_tcon_top_remove,
+ .remove = sun8i_tcon_top_remove,
.driver = {
.name = "sun8i-tcon-top",
.of_match_table = sun8i_tcon_top_of_table,
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index e688d8104652..8a3b16aac5d6 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -5,6 +5,7 @@ config DRM_TEGRA
depends on COMMON_CLK
depends on DRM
depends on OF
+ select DRM_CLIENT_SELECTION
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index be61c9d1a4f0..430b2eededb2 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -3286,5 +3286,5 @@ struct platform_driver tegra_dc_driver = {
.of_match_table = tegra_dc_of_match,
},
.probe = tegra_dc_probe,
- .remove_new = tegra_dc_remove,
+ .remove = tegra_dc_remove,
};
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index ae12d001a04b..2cd8dcb959c0 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -697,7 +697,7 @@ struct platform_driver tegra_dpaux_driver = {
.pm = pm_ptr(&tegra_dpaux_pm_ops),
},
.probe = tegra_dpaux_probe,
- .remove_new = tegra_dpaux_remove,
+ .remove = tegra_dpaux_remove,
};
struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np)
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index c9eb329665ec..4596073fe28f 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -4,6 +4,7 @@
* Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
*/
+#include <linux/aperture.h>
#include <linux/bitops.h>
#include <linux/host1x.h>
#include <linux/idr.h>
@@ -12,7 +13,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
@@ -34,7 +35,6 @@
#define DRIVER_NAME "tegra"
#define DRIVER_DESC "NVIDIA Tegra graphics"
-#define DRIVER_DATE "20120330"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
@@ -892,13 +892,14 @@ static const struct drm_driver tegra_drm_driver = {
.dumb_create = tegra_bo_dumb_create,
+ TEGRA_FBDEV_DRIVER_OPS,
+
.ioctls = tegra_drm_ioctls,
.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
.fops = &tegra_drm_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -1153,8 +1154,8 @@ static int host1x_drm_probe(struct host1x_device *dev)
if (host1x_drm_wants_iommu(dev) && device_iommu_mapped(dma_dev)) {
tegra->domain = iommu_paging_domain_alloc(dma_dev);
- if (!tegra->domain) {
- err = -ENOMEM;
+ if (IS_ERR(tegra->domain)) {
+ err = PTR_ERR(tegra->domain);
goto free;
}
@@ -1255,7 +1256,7 @@ static int host1x_drm_probe(struct host1x_device *dev)
* will not expose any modesetting features.
*/
if (drm->mode_config.num_crtc > 0) {
- err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
+ err = aperture_remove_all_conflicting_devices(tegra_drm_driver.name);
if (err < 0)
goto hub;
} else {
@@ -1270,7 +1271,7 @@ static int host1x_drm_probe(struct host1x_device *dev)
if (err < 0)
goto hub;
- tegra_fbdev_setup(drm);
+ drm_client_setup(drm, NULL);
return 0;
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 2f3781e04b0a..0b65e69f3a8a 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -25,6 +25,9 @@
/* XXX move to include/uapi/drm/drm_fourcc.h? */
#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT_ULL(22)
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
+
struct edid;
struct reset_control;
@@ -190,10 +193,13 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
const struct drm_mode_fb_cmd2 *cmd);
#ifdef CONFIG_DRM_FBDEV_EMULATION
-void tegra_fbdev_setup(struct drm_device *drm);
+int tegra_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes);
+#define TEGRA_FBDEV_DRIVER_OPS \
+ .fbdev_probe = tegra_fbdev_driver_fbdev_probe
#else
-static inline void tegra_fbdev_setup(struct drm_device *drm)
-{ }
+#define TEGRA_FBDEV_DRIVER_OPS \
+ .fbdev_probe = NULL
#endif
extern struct platform_driver tegra_display_hub_driver;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index db606e151afc..4a8cd9ed0a94 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1713,5 +1713,5 @@ struct platform_driver tegra_dsi_driver = {
.of_match_table = tegra_dsi_of_match,
},
.probe = tegra_dsi_probe,
- .remove_new = tegra_dsi_remove,
+ .remove = tegra_dsi_remove,
};
diff --git a/drivers/gpu/drm/tegra/fbdev.c b/drivers/gpu/drm/tegra/fbdev.c
index db6eaac3d30e..cd9d798f8870 100644
--- a/drivers/gpu/drm/tegra/fbdev.c
+++ b/drivers/gpu/drm/tegra/fbdev.c
@@ -66,8 +66,11 @@ static const struct fb_ops tegra_fb_ops = {
.fb_destroy = tegra_fbdev_fb_destroy,
};
-static int tegra_fbdev_probe(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
+static const struct drm_fb_helper_funcs tegra_fbdev_helper_funcs = {
+};
+
+int tegra_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
{
struct tegra_drm *tegra = helper->dev->dev_private;
struct drm_device *drm = helper->dev;
@@ -112,6 +115,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
return PTR_ERR(fb);
}
+ helper->funcs = &tegra_fbdev_helper_funcs;
helper->fb = fb;
helper->info = info;
@@ -144,93 +148,3 @@ destroy:
drm_framebuffer_remove(fb);
return err;
}
-
-static const struct drm_fb_helper_funcs tegra_fb_helper_funcs = {
- .fb_probe = tegra_fbdev_probe,
-};
-
-/*
- * struct drm_client
- */
-
-static void tegra_fbdev_client_unregister(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
-
- if (fb_helper->info) {
- drm_fb_helper_unregister_info(fb_helper);
- } else {
- drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
- }
-}
-
-static int tegra_fbdev_client_restore(struct drm_client_dev *client)
-{
- drm_fb_helper_lastclose(client->dev);
-
- return 0;
-}
-
-static int tegra_fbdev_client_hotplug(struct drm_client_dev *client)
-{
- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- struct drm_device *dev = client->dev;
- int ret;
-
- if (dev->fb_helper)
- return drm_fb_helper_hotplug_event(dev->fb_helper);
-
- ret = drm_fb_helper_init(dev, fb_helper);
- if (ret)
- goto err_drm_err;
-
- if (!drm_drv_uses_atomic_modeset(dev))
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(fb_helper);
- if (ret)
- goto err_drm_fb_helper_fini;
-
- return 0;
-
-err_drm_fb_helper_fini:
- drm_fb_helper_fini(fb_helper);
-err_drm_err:
- drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret);
- return ret;
-}
-
-static const struct drm_client_funcs tegra_fbdev_client_funcs = {
- .owner = THIS_MODULE,
- .unregister = tegra_fbdev_client_unregister,
- .restore = tegra_fbdev_client_restore,
- .hotplug = tegra_fbdev_client_hotplug,
-};
-
-void tegra_fbdev_setup(struct drm_device *dev)
-{
- struct drm_fb_helper *helper;
- int ret;
-
- drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
- drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
-
- helper = kzalloc(sizeof(*helper), GFP_KERNEL);
- if (!helper)
- return;
- drm_fb_helper_prepare(dev, helper, 32, &tegra_fb_helper_funcs);
-
- ret = drm_client_init(dev, &helper->client, "fbdev", &tegra_fbdev_client_funcs);
- if (ret)
- goto err_drm_client_init;
-
- drm_client_register(&helper->client);
-
- return;
-
-err_drm_client_init:
- drm_fb_helper_unprepare(helper);
- kfree(helper);
-}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index b4eb030ea961..ace3e5a805cf 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -22,7 +22,7 @@
#include "drm.h"
#include "gem.h"
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
{
@@ -76,8 +76,8 @@ static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_
/*
* Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
*/
- if (gem->import_attach) {
- struct dma_buf *buf = gem->import_attach->dmabuf;
+ if (obj->dma_buf) {
+ struct dma_buf *buf = obj->dma_buf;
map->attach = dma_buf_attach(buf, dev);
if (IS_ERR(map->attach)) {
@@ -184,8 +184,8 @@ static void *tegra_bo_mmap(struct host1x_bo *bo)
if (obj->vaddr)
return obj->vaddr;
- if (obj->gem.import_attach) {
- ret = dma_buf_vmap_unlocked(obj->gem.import_attach->dmabuf, &map);
+ if (obj->dma_buf) {
+ ret = dma_buf_vmap_unlocked(obj->dma_buf, &map);
if (ret < 0)
return ERR_PTR(ret);
@@ -208,8 +208,8 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
if (obj->vaddr)
return;
- if (obj->gem.import_attach)
- return dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map);
+ if (obj->dma_buf)
+ return dma_buf_vunmap_unlocked(obj->dma_buf, &map);
vunmap(addr);
}
@@ -465,27 +465,32 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
if (IS_ERR(bo))
return bo;
- attach = dma_buf_attach(buf, drm->dev);
- if (IS_ERR(attach)) {
- err = PTR_ERR(attach);
- goto free;
- }
-
- get_dma_buf(buf);
+ /*
+ * If we need to use IOMMU API to map the dma-buf into the internally managed
+ * domain, map it first to the DRM device to get an sgt.
+ */
+ if (tegra->domain) {
+ attach = dma_buf_attach(buf, drm->dev);
+ if (IS_ERR(attach)) {
+ err = PTR_ERR(attach);
+ goto free;
+ }
- bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
- if (IS_ERR(bo->sgt)) {
- err = PTR_ERR(bo->sgt);
- goto detach;
- }
+ bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
+ if (IS_ERR(bo->sgt)) {
+ err = PTR_ERR(bo->sgt);
+ goto detach;
+ }
- if (tegra->domain) {
err = tegra_bo_iommu_map(tegra, bo);
if (err < 0)
goto detach;
+
+ bo->gem.import_attach = attach;
}
- bo->gem.import_attach = attach;
+ get_dma_buf(buf);
+ bo->dma_buf = buf;
return bo;
@@ -516,17 +521,21 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
dev_name(mapping->dev));
}
- if (tegra->domain)
+ if (tegra->domain) {
tegra_bo_iommu_unmap(tegra, bo);
- if (gem->import_attach) {
- dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
- DMA_TO_DEVICE);
- drm_prime_gem_destroy(gem, NULL);
- } else {
- tegra_bo_free(gem->dev, bo);
+ if (gem->import_attach) {
+ dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
+ DMA_TO_DEVICE);
+ dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach);
+ }
}
+ tegra_bo_free(gem->dev, bo);
+
+ if (bo->dma_buf)
+ dma_buf_put(bo->dma_buf);
+
drm_gem_object_release(gem);
kfree(bo);
}
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index cb5146a67668..bf2cbd48eb3f 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -32,6 +32,26 @@ struct tegra_bo_tiling {
enum tegra_bo_sector_layout sector_layout;
};
+/*
+ * How memory is referenced within a tegra_bo:
+ *
+ * Buffer source | Mapping API(*) | Fields
+ * ---------------+-----------------+---------------
+ * Allocated here | DMA API | iova (IOVA mapped to drm->dev), vaddr (CPU VA)
+ *
+ * Allocated here | IOMMU API | pages/num_pages (Phys. memory), sgt (Mapped to drm->dev),
+ * | iova/size (Mapped to domain)
+ *
+ * Imported | DMA API | dma_buf (Imported dma_buf)
+ *
+ * Imported | IOMMU API | dma_buf (Imported dma_buf),
+ * | gem->import_attach (Attachment on drm->dev),
+ * | sgt (Mapped to drm->dev)
+ * | iova/size (Mapped to domain)
+ *
+ * (*) If tegra->domain is set, i.e. TegraDRM IOMMU domain is directly managed through IOMMU API,
+ * this is IOMMU API. Otherwise DMA API.
+ */
struct tegra_bo {
struct drm_gem_object gem;
struct host1x_bo base;
@@ -39,6 +59,7 @@ struct tegra_bo {
struct sg_table *sgt;
dma_addr_t iova;
void *vaddr;
+ struct dma_buf *dma_buf;
struct drm_mm_node *mm;
unsigned long num_pages;
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index a160d01f26e1..21f4dd0fa6af 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -394,5 +394,5 @@ struct platform_driver tegra_gr2d_driver = {
.pm = &tegra_gr2d_pm,
},
.probe = gr2d_probe,
- .remove_new = gr2d_remove,
+ .remove = gr2d_remove,
};
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 4de1ea0fc7c0..42e9656ab80c 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -375,6 +375,7 @@ static int gr3d_init_power(struct device *dev, struct gr3d *gr3d)
struct dev_pm_domain_attach_data pd_data = {
.pd_names = (const char *[]) { "3d0", "3d1" },
.num_pd_names = 2,
+ .pd_flags = PD_FLAG_REQUIRED_OPP,
};
int err;
@@ -409,7 +410,7 @@ static int gr3d_init_power(struct device *dev, struct gr3d *gr3d)
if (dev->pm_domain)
return 0;
- err = dev_pm_domain_attach_list(dev, &pd_data, &gr3d->pd_list);
+ err = devm_pm_domain_attach_list(dev, &pd_data, &gr3d->pd_list);
if (err < 0)
return err;
@@ -503,13 +504,13 @@ static int gr3d_probe(struct platform_device *pdev)
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (err)
- goto err;
+ return err;
err = host1x_client_register(&gr3d->client.base);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
- goto err;
+ return err;
}
/* initialize address register map */
@@ -517,9 +518,6 @@ static int gr3d_probe(struct platform_device *pdev)
set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
return 0;
-err:
- dev_pm_domain_detach_list(gr3d->pd_list);
- return err;
}
static void gr3d_remove(struct platform_device *pdev)
@@ -528,7 +526,6 @@ static void gr3d_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
host1x_client_unregister(&gr3d->client.base);
- dev_pm_domain_detach_list(gr3d->pd_list);
}
static int __maybe_unused gr3d_runtime_suspend(struct device *dev)
@@ -608,5 +605,5 @@ struct platform_driver tegra_gr3d_driver = {
.pm = &tegra_gr3d_pm,
},
.probe = gr3d_probe,
- .remove_new = gr3d_remove,
+ .remove = gr3d_remove,
};
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 09987e372e3e..e705f8590c13 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -434,7 +434,7 @@ tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pix_clock,
static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
{
- const unsigned int freqs[] = {
+ static const unsigned int freqs[] = {
32000, 44100, 48000, 88200, 96000, 176400, 192000
};
unsigned int i;
@@ -1919,5 +1919,5 @@ struct platform_driver tegra_hdmi_driver = {
.of_match_table = tegra_hdmi_of_match,
},
.probe = tegra_hdmi_probe,
- .remove_new = tegra_hdmi_remove,
+ .remove = tegra_hdmi_remove,
};
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index e0c2019a591b..fa6140fc37fb 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -1218,5 +1218,5 @@ struct platform_driver tegra_display_hub_driver = {
.of_match_table = tegra_display_hub_of_match,
},
.probe = tegra_display_hub_probe,
- .remove_new = tegra_display_hub_remove,
+ .remove = tegra_display_hub_remove,
};
diff --git a/drivers/gpu/drm/tegra/nvdec.c b/drivers/gpu/drm/tegra/nvdec.c
index 4860790666af..2d9a0a3f6c38 100644
--- a/drivers/gpu/drm/tegra/nvdec.c
+++ b/drivers/gpu/drm/tegra/nvdec.c
@@ -566,7 +566,7 @@ struct platform_driver tegra_nvdec_driver = {
.pm = &nvdec_pm_ops
},
.probe = nvdec_probe,
- .remove_new = nvdec_remove,
+ .remove = nvdec_remove,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index bad3b8fcc726..802d2db7007a 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -4040,5 +4040,5 @@ struct platform_driver tegra_sor_driver = {
.pm = &tegra_sor_pm_ops,
},
.probe = tegra_sor_probe,
- .remove_new = tegra_sor_remove,
+ .remove = tegra_sor_remove,
};
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 73c356f1c901..332c9b563d3f 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -553,7 +553,7 @@ struct platform_driver tegra_vic_driver = {
.pm = &vic_pm_ops
},
.probe = vic_probe,
- .remove_new = vic_remove,
+ .remove = vic_remove,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC)
diff --git a/drivers/gpu/drm/tests/drm_connector_test.c b/drivers/gpu/drm/tests/drm_connector_test.c
index 15e36a8db685..22e2d959eb31 100644
--- a/drivers/gpu/drm/tests/drm_connector_test.c
+++ b/drivers/gpu/drm/tests/drm_connector_test.c
@@ -9,6 +9,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
+#include <drm/drm_file.h>
#include <drm/drm_kunit_helpers.h>
#include <drm/drm_modes.h>
@@ -181,6 +182,465 @@ static struct kunit_suite drmm_connector_init_test_suite = {
.test_cases = drmm_connector_init_tests,
};
+static const struct drm_connector_funcs dummy_dynamic_init_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .reset = drm_atomic_helper_connector_reset,
+ .destroy = drm_connector_cleanup,
+};
+
+/*
+ * Test that the initialization of a bog standard dynamic connector works
+ * as expected and doesn't report any error.
+ */
+static void drm_test_drm_connector_dynamic_init(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ ret = drm_connector_dynamic_init(&priv->drm, connector,
+ &dummy_dynamic_init_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+}
+
+static void drm_test_connector_dynamic_init_cleanup(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+
+ drm_connector_cleanup(connector);
+}
+
+/*
+ * Test that the initialization of a dynamic connector without a DDC adapter
+ * doesn't report any error.
+ */
+static void drm_test_drm_connector_dynamic_init_null_ddc(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ ret = drm_connector_dynamic_init(&priv->drm, connector,
+ &dummy_dynamic_init_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+}
+
+/*
+ * Test that the initialization of a dynamic connector doesn't add the
+ * connector to the connector list.
+ */
+static void drm_test_drm_connector_dynamic_init_not_added(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ ret = drm_connector_dynamic_init(&priv->drm, connector,
+ &dummy_dynamic_init_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_PTR_EQ(test, connector->head.next, &connector->head);
+}
+
+static void test_connector_property(struct kunit *test,
+ struct drm_connector *connector,
+ const struct drm_property *expected_prop)
+{
+ struct drm_property *prop;
+ uint64_t val;
+ int ret;
+
+ KUNIT_ASSERT_NOT_NULL(test, expected_prop);
+ prop = drm_mode_obj_find_prop_id(&connector->base, expected_prop->base.id);
+ KUNIT_ASSERT_PTR_EQ_MSG(test, prop, expected_prop,
+ "Can't find property %s", expected_prop->name);
+
+ ret = drm_object_property_get_default_value(&connector->base, prop, &val);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, val, 0);
+
+ /* TODO: Check property value in the connector state. */
+}
+
+/*
+ * Test that the initialization of a dynamic connector adds all the expected
+ * properties to it.
+ */
+static void drm_test_drm_connector_dynamic_init_properties(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ struct drm_mode_config *config = &priv->drm.mode_config;
+ const struct drm_property *props[] = {
+ config->edid_property,
+ config->dpms_property,
+ config->link_status_property,
+ config->non_desktop_property,
+ config->tile_property,
+ config->prop_crtc_id,
+ };
+ int ret;
+ int i;
+
+ ret = drm_connector_dynamic_init(&priv->drm, connector,
+ &dummy_dynamic_init_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ for (i = 0; i < ARRAY_SIZE(props); i++)
+ test_connector_property(test, connector, props[i]);
+}
+
+/*
+ * Test that the initialization of a dynamic connector succeeds for all
+ * possible connector types.
+ */
+static void drm_test_drm_connector_dynamic_init_type_valid(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ unsigned int connector_type = *(unsigned int *)test->param_value;
+ int ret;
+
+ ret = drm_connector_dynamic_init(&priv->drm, connector,
+ &dummy_dynamic_init_funcs,
+ connector_type,
+ &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+}
+
+/*
+ * Test that the initialization of a dynamic connector sets the expected name
+ * for it for all possible connector types.
+ */
+static void drm_test_drm_connector_dynamic_init_name(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ unsigned int connector_type = *(unsigned int *)test->param_value;
+ char expected_name[128];
+ int ret;
+
+ ret = drm_connector_dynamic_init(&priv->drm, connector,
+ &dummy_dynamic_init_funcs,
+ connector_type,
+ &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ snprintf(expected_name, sizeof(expected_name), "%s-%d",
+ drm_get_connector_type_name(connector_type), connector->connector_type_id);
+ KUNIT_ASSERT_STREQ(test, connector->name, expected_name);
+}
+
+static struct kunit_case drm_connector_dynamic_init_tests[] = {
+ KUNIT_CASE(drm_test_drm_connector_dynamic_init),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_init_null_ddc),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_init_not_added),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_init_properties),
+ KUNIT_CASE_PARAM(drm_test_drm_connector_dynamic_init_type_valid,
+ drm_connector_init_type_valid_gen_params),
+ KUNIT_CASE_PARAM(drm_test_drm_connector_dynamic_init_name,
+ drm_connector_init_type_valid_gen_params),
+ {}
+};
+
+static struct kunit_suite drm_connector_dynamic_init_test_suite = {
+ .name = "drm_connector_dynamic_init",
+ .init = drm_test_connector_init,
+ .exit = drm_test_connector_dynamic_init_cleanup,
+ .test_cases = drm_connector_dynamic_init_tests,
+};
+
+static int drm_test_connector_dynamic_register_early_init(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv;
+ int ret;
+
+ ret = drm_test_connector_init(test);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ priv = test->priv;
+
+ ret = drm_connector_dynamic_init(&priv->drm, &priv->connector,
+ &dummy_dynamic_init_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ &priv->ddc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ return 0;
+}
+
+static void drm_test_connector_dynamic_register_early_cleanup(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+
+ drm_connector_unregister(connector);
+ drm_connector_put(connector);
+}
+
+/*
+ * Test that registration of a dynamic connector adds it to the connector list.
+ */
+static void drm_test_drm_connector_dynamic_register_early_on_list(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&connector->head));
+
+ ret = drm_connector_dynamic_register(connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_ASSERT_PTR_EQ(test, connector->head.next, &priv->drm.mode_config.connector_list);
+}
+
+/*
+ * Test that the registration of a dynamic connector before the drm device is
+ * registered results in deferring the connector's user interface registration.
+ */
+static void drm_test_drm_connector_dynamic_register_early_defer(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ ret = drm_connector_dynamic_register(connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_ASSERT_EQ(test, connector->registration_state, DRM_CONNECTOR_INITIALIZING);
+}
+
+/*
+ * Test that the registration of a dynamic connector fails, if this is done before
+ * the connector is initialized.
+ */
+static void drm_test_drm_connector_dynamic_register_early_no_init(struct kunit *test)
+{
+ struct drm_connector *connector;
+ int ret;
+
+ connector = kunit_kzalloc(test, sizeof(*connector), GFP_KERNEL); /* auto freed */
+ KUNIT_ASSERT_NOT_NULL(test, connector);
+
+ ret = drm_connector_dynamic_register(connector);
+ KUNIT_ASSERT_EQ(test, ret, -EINVAL);
+}
+
+/*
+ * Test that the registration of a dynamic connector before the drm device is
+ * registered results in deferring adding a mode object for the connector.
+ */
+static void drm_test_drm_connector_dynamic_register_early_no_mode_object(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ struct drm_connector *tmp_connector;
+ int ret;
+
+ ret = drm_connector_dynamic_register(&priv->connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ tmp_connector = drm_connector_lookup(connector->dev, NULL, connector->base.id);
+ KUNIT_ASSERT_NULL(test, tmp_connector);
+}
+
+static struct kunit_case drm_connector_dynamic_register_early_tests[] = {
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_early_on_list),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_early_defer),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_early_no_init),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_early_no_mode_object),
+ { }
+};
+
+static struct kunit_suite drm_connector_dynamic_register_early_test_suite = {
+ .name = "drm_connector_dynamic_register_early",
+ .init = drm_test_connector_dynamic_register_early_init,
+ .exit = drm_test_connector_dynamic_register_early_cleanup,
+ .test_cases = drm_connector_dynamic_register_early_tests,
+};
+
+static int drm_test_connector_dynamic_register_init(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv;
+ int ret;
+
+ ret = drm_test_connector_dynamic_register_early_init(test);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ priv = test->priv;
+
+ ret = drm_dev_register(priv->connector.dev, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ return 0;
+}
+
+static void drm_test_connector_dynamic_register_cleanup(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_device *dev = priv->connector.dev;
+
+ drm_connector_unregister(&priv->connector);
+ drm_connector_put(&priv->connector);
+
+ drm_dev_unregister(dev);
+
+ drm_test_connector_dynamic_register_early_cleanup(test);
+}
+
+static void drm_test_drm_connector_dynamic_register_on_list(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ int ret;
+
+ KUNIT_ASSERT_TRUE(test, list_empty(&priv->connector.head));
+
+ ret = drm_connector_dynamic_register(&priv->connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_ASSERT_PTR_EQ(test, priv->connector.head.next, &priv->drm.mode_config.connector_list);
+}
+
+/*
+ * Test that the registration of a dynamic connector doesn't get deferred if
+ * this is done after the drm device is registered.
+ */
+static void drm_test_drm_connector_dynamic_register_no_defer(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ int ret;
+
+ KUNIT_ASSERT_EQ(test, priv->connector.registration_state, DRM_CONNECTOR_INITIALIZING);
+
+ ret = drm_connector_dynamic_register(&priv->connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_ASSERT_EQ(test, priv->connector.registration_state, DRM_CONNECTOR_REGISTERED);
+}
+
+/*
+ * Test that the registration of a dynamic connector fails if this is done after the
+ * drm device is registered, but before the connector is initialized.
+ */
+static void drm_test_drm_connector_dynamic_register_no_init(struct kunit *test)
+{
+ struct drm_connector *connector;
+ int ret;
+
+ connector = kunit_kzalloc(test, sizeof(*connector), GFP_KERNEL); /* auto freed */
+ KUNIT_ASSERT_NOT_NULL(test, connector);
+
+ ret = drm_connector_dynamic_register(connector);
+ KUNIT_ASSERT_EQ(test, ret, -EINVAL);
+}
+
+/*
+ * Test that the registration of a dynamic connector after the drm device is
+ * registered adds the mode object for the connector.
+ */
+static void drm_test_drm_connector_dynamic_register_mode_object(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ struct drm_connector *tmp_connector;
+ int ret;
+
+ tmp_connector = drm_connector_lookup(connector->dev, NULL, connector->base.id);
+ KUNIT_ASSERT_NULL(test, tmp_connector);
+
+ ret = drm_connector_dynamic_register(&priv->connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ tmp_connector = drm_connector_lookup(connector->dev, NULL, connector->base.id);
+ KUNIT_ASSERT_PTR_EQ(test, tmp_connector, connector);
+}
+
+/*
+ * Test that the registration of a dynamic connector after the drm device is
+ * registered adds the connector to sysfs.
+ */
+static void drm_test_drm_connector_dynamic_register_sysfs(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ KUNIT_ASSERT_NULL(test, connector->kdev);
+
+ ret = drm_connector_dynamic_register(connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_ASSERT_NOT_NULL(test, connector->kdev);
+}
+
+/*
+ * Test that the registration of a dynamic connector after the drm device is
+ * registered sets the connector's sysfs name as expected.
+ */
+static void drm_test_drm_connector_dynamic_register_sysfs_name(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ char expected_name[128];
+ int ret;
+
+ ret = drm_connector_dynamic_register(connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ snprintf(expected_name, sizeof(expected_name), "card%d-%s",
+ connector->dev->primary->index, connector->name);
+
+ KUNIT_ASSERT_STREQ(test, dev_name(connector->kdev), expected_name);
+}
+
+/*
+ * Test that the registration of a dynamic connector after the drm device is
+ * registered adds the connector to debugfs.
+ */
+static void drm_test_drm_connector_dynamic_register_debugfs(struct kunit *test)
+{
+ struct drm_connector_init_priv *priv = test->priv;
+ int ret;
+
+ KUNIT_ASSERT_NULL(test, priv->connector.debugfs_entry);
+
+ ret = drm_connector_dynamic_register(&priv->connector);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ KUNIT_ASSERT_NOT_NULL(test, priv->connector.debugfs_entry);
+ else
+ KUNIT_ASSERT_NULL(test, priv->connector.debugfs_entry);
+}
+
+static struct kunit_case drm_connector_dynamic_register_tests[] = {
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_on_list),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_no_defer),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_no_init),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_mode_object),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_sysfs),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_sysfs_name),
+ KUNIT_CASE(drm_test_drm_connector_dynamic_register_debugfs),
+ { }
+};
+
+static struct kunit_suite drm_connector_dynamic_register_test_suite = {
+ .name = "drm_connector_dynamic_register",
+ .init = drm_test_connector_dynamic_register_init,
+ .exit = drm_test_connector_dynamic_register_cleanup,
+ .test_cases = drm_connector_dynamic_register_tests,
+};
+
/*
* Test that the registration of a bog standard connector works as
* expected and doesn't report any error.
@@ -635,6 +1095,64 @@ static void drm_test_connector_hdmi_init_formats_no_rgb(struct kunit *test)
KUNIT_EXPECT_LT(test, ret, 0);
}
+struct drm_connector_hdmi_init_formats_yuv420_allowed_test {
+ unsigned long supported_formats;
+ bool yuv420_allowed;
+ int expected_result;
+};
+
+#define YUV420_ALLOWED_TEST(_formats, _allowed, _result) \
+ { \
+ .supported_formats = BIT(HDMI_COLORSPACE_RGB) | (_formats), \
+ .yuv420_allowed = _allowed, \
+ .expected_result = _result, \
+ }
+
+static const struct drm_connector_hdmi_init_formats_yuv420_allowed_test
+drm_connector_hdmi_init_formats_yuv420_allowed_tests[] = {
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV420), true, 0),
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV420), false, -EINVAL),
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV422), true, -EINVAL),
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV422), false, 0),
+};
+
+static void
+drm_connector_hdmi_init_formats_yuv420_allowed_desc(const struct drm_connector_hdmi_init_formats_yuv420_allowed_test *t,
+ char *desc)
+{
+ sprintf(desc, "supported_formats=0x%lx yuv420_allowed=%d",
+ t->supported_formats, t->yuv420_allowed);
+}
+
+KUNIT_ARRAY_PARAM(drm_connector_hdmi_init_formats_yuv420_allowed,
+ drm_connector_hdmi_init_formats_yuv420_allowed_tests,
+ drm_connector_hdmi_init_formats_yuv420_allowed_desc);
+
+/*
+ * Test that the registration of an HDMI connector succeeds only when
+ * the presence of YUV420 in the supported formats matches the value
+ * of the ycbcr_420_allowed flag.
+ */
+static void drm_test_connector_hdmi_init_formats_yuv420_allowed(struct kunit *test)
+{
+ const struct drm_connector_hdmi_init_formats_yuv420_allowed_test *params;
+ struct drm_connector_init_priv *priv = test->priv;
+ int ret;
+
+ params = test->param_value;
+ priv->connector.ycbcr_420_allowed = params->yuv420_allowed;
+
+ ret = drmm_connector_hdmi_init(&priv->drm, &priv->connector,
+ "Vendor", "Product",
+ &dummy_funcs,
+ &dummy_hdmi_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ &priv->ddc,
+ params->supported_formats,
+ 8);
+ KUNIT_EXPECT_EQ(test, ret, params->expected_result);
+}
+
/*
* Test that the registration of an HDMI connector with an HDMI
* connector type succeeds.
@@ -726,6 +1244,8 @@ static struct kunit_case drmm_connector_hdmi_init_tests[] = {
KUNIT_CASE(drm_test_connector_hdmi_init_bpc_null),
KUNIT_CASE(drm_test_connector_hdmi_init_formats_empty),
KUNIT_CASE(drm_test_connector_hdmi_init_formats_no_rgb),
+ KUNIT_CASE_PARAM(drm_test_connector_hdmi_init_formats_yuv420_allowed,
+ drm_connector_hdmi_init_formats_yuv420_allowed_gen_params),
KUNIT_CASE(drm_test_connector_hdmi_init_null_ddc),
KUNIT_CASE(drm_test_connector_hdmi_init_null_product),
KUNIT_CASE(drm_test_connector_hdmi_init_null_vendor),
@@ -996,7 +1516,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb(struct kunit *test)
unsigned long long rate;
struct drm_device *drm = &priv->drm;
- mode = drm_display_mode_from_cea_vic(drm, 16);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1017,7 +1537,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc(struct kunit *test)
unsigned long long rate;
struct drm_device *drm = &priv->drm;
- mode = drm_display_mode_from_cea_vic(drm, 16);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1038,7 +1558,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_10bpc_vic_1(struct kunit *t
unsigned long long rate;
struct drm_device *drm = &priv->drm;
- mode = drm_display_mode_from_cea_vic(drm, 1);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
rate = drm_hdmi_compute_mode_clock(mode, 10, HDMI_COLORSPACE_RGB);
@@ -1056,7 +1576,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc(struct kunit *test)
unsigned long long rate;
struct drm_device *drm = &priv->drm;
- mode = drm_display_mode_from_cea_vic(drm, 16);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1077,7 +1597,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_12bpc_vic_1(struct kunit *t
unsigned long long rate;
struct drm_device *drm = &priv->drm;
- mode = drm_display_mode_from_cea_vic(drm, 1);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
rate = drm_hdmi_compute_mode_clock(mode, 12, HDMI_COLORSPACE_RGB);
@@ -1095,7 +1615,7 @@ static void drm_test_drm_hdmi_compute_mode_clock_rgb_double(struct kunit *test)
unsigned long long rate;
struct drm_device *drm = &priv->drm;
- mode = drm_display_mode_from_cea_vic(drm, 6);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 6);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_TRUE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1118,7 +1638,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_valid(struct kunit
unsigned long long rate;
unsigned int vic = *(unsigned int *)test->param_value;
- mode = drm_display_mode_from_cea_vic(drm, vic);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1155,7 +1675,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_10_bpc(struct kuni
drm_hdmi_compute_mode_clock_yuv420_vic_valid_tests[0];
unsigned long long rate;
- mode = drm_display_mode_from_cea_vic(drm, vic);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1180,7 +1700,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv420_12_bpc(struct kuni
drm_hdmi_compute_mode_clock_yuv420_vic_valid_tests[0];
unsigned long long rate;
- mode = drm_display_mode_from_cea_vic(drm, vic);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1203,7 +1723,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_8_bpc(struct kunit
struct drm_device *drm = &priv->drm;
unsigned long long rate;
- mode = drm_display_mode_from_cea_vic(drm, 16);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1225,7 +1745,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_10_bpc(struct kuni
struct drm_device *drm = &priv->drm;
unsigned long long rate;
- mode = drm_display_mode_from_cea_vic(drm, 16);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1247,7 +1767,7 @@ static void drm_test_connector_hdmi_compute_mode_clock_yuv422_12_bpc(struct kuni
struct drm_device *drm = &priv->drm;
unsigned long long rate;
- mode = drm_display_mode_from_cea_vic(drm, 16);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16);
KUNIT_ASSERT_NOT_NULL(test, mode);
KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1283,6 +1803,9 @@ static struct kunit_suite drm_hdmi_compute_mode_clock_test_suite = {
kunit_test_suites(
&drmm_connector_hdmi_init_test_suite,
&drmm_connector_init_test_suite,
+ &drm_connector_dynamic_init_test_suite,
+ &drm_connector_dynamic_register_early_test_suite,
+ &drm_connector_dynamic_register_test_suite,
&drm_connector_attach_broadcast_rgb_property_test_suite,
&drm_get_tv_mode_from_name_test_suite,
&drm_hdmi_compute_mode_clock_test_suite,
diff --git a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
index 89cd9e4f4d32..9e0e2fb65944 100644
--- a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
@@ -199,10 +199,8 @@ static const struct drm_dp_mst_calc_pbn_div_test drm_dp_mst_calc_pbn_div_dp1_4_c
static void drm_test_dp_mst_calc_pbn_div(struct kunit *test)
{
const struct drm_dp_mst_calc_pbn_div_test *params = test->param_value;
- /* mgr->dev is only needed by drm_dbg_kms(), but it's not called for the test cases. */
- struct drm_dp_mst_topology_mgr *mgr = test->priv;
- KUNIT_EXPECT_EQ(test, drm_dp_get_vc_payload_bw(mgr, params->link_rate, params->lane_count).full,
+ KUNIT_EXPECT_EQ(test, drm_dp_get_vc_payload_bw(params->link_rate, params->lane_count).full,
params->expected.full);
}
@@ -568,21 +566,8 @@ static struct kunit_case drm_dp_mst_helper_tests[] = {
{ }
};
-static int drm_dp_mst_helper_tests_init(struct kunit *test)
-{
- struct drm_dp_mst_topology_mgr *mgr;
-
- mgr = kunit_kzalloc(test, sizeof(*mgr), GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mgr);
-
- test->priv = mgr;
-
- return 0;
-}
-
static struct kunit_suite drm_dp_mst_helper_test_suite = {
.name = "drm_dp_mst_helper",
- .init = drm_dp_mst_helper_tests_init,
.test_cases = drm_dp_mst_helper_tests,
};
diff --git a/drivers/gpu/drm/tests/drm_framebuffer_test.c b/drivers/gpu/drm/tests/drm_framebuffer_test.c
index 06f03b78c9c4..6ea04cc8f324 100644
--- a/drivers/gpu/drm/tests/drm_framebuffer_test.c
+++ b/drivers/gpu/drm/tests/drm_framebuffer_test.c
@@ -5,11 +5,15 @@
* Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
*/
+#include <kunit/device.h>
#include <kunit/test.h>
#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_mode.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_kunit_helpers.h>
#include <drm/drm_print.h>
#include "../drm_crtc_internal.h"
@@ -19,6 +23,8 @@
#define MIN_HEIGHT 4
#define MAX_HEIGHT 4096
+#define DRM_MODE_FB_INVALID BIT(2)
+
struct drm_framebuffer_test {
int buffer_created;
struct drm_mode_fb_cmd2 cmd;
@@ -83,6 +89,24 @@ static const struct drm_framebuffer_test drm_framebuffer_create_cases[] = {
.pitches = { 4 * MAX_WIDTH, 0, 0 },
}
},
+
+/*
+ * All entries in members that represents per-plane values (@modifier, @handles,
+ * @pitches and @offsets) must be zero when unused.
+ */
+{ .buffer_created = 0, .name = "ABGR8888 Buffer offset for inexistent plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, UINT_MAX / 2, 0 },
+ .pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ }
+},
+
+{ .buffer_created = 0, .name = "ABGR8888 Invalid flag",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_INVALID,
+ }
+},
{ .buffer_created = 1, .name = "ABGR8888 Set DRM_MODE_FB_MODIFIERS without modifiers",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
@@ -262,6 +286,13 @@ static const struct drm_framebuffer_test drm_framebuffer_create_cases[] = {
.pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
}
},
+{ .buffer_created = 0, .name = "YUV420_10BIT Invalid modifier(DRM_FORMAT_MOD_LINEAR)",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YUV420_10BIT,
+ .handles = { 1, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { DRM_FORMAT_MOD_LINEAR, 0, 0 },
+ .pitches = { MAX_WIDTH, 0, 0 },
+ }
+},
{ .buffer_created = 1, .name = "X0L2 Normal sizes",
.cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_X0L2,
.handles = { 1, 0, 0 }, .pitches = { 1200, 0, 0 }
@@ -317,12 +348,26 @@ static const struct drm_framebuffer_test drm_framebuffer_create_cases[] = {
},
};
+/*
+ * This struct is intended to provide a way to mocked functions communicate
+ * with the outer test when it can't be achieved by using its return value. In
+ * this way, the functions that receive the mocked drm_device, for example, can
+ * grab a reference to this and actually return something to be used on some
+ * expectation.
+ */
+struct drm_framebuffer_test_priv {
+ struct drm_device dev;
+ bool buffer_created;
+ bool buffer_freed;
+};
+
static struct drm_framebuffer *fb_create_mock(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- int *buffer_created = dev->dev_private;
- *buffer_created = 1;
+ struct drm_framebuffer_test_priv *priv = container_of(dev, typeof(*priv), dev);
+
+ priv->buffer_created = true;
return ERR_PTR(-EINVAL);
}
@@ -332,42 +377,338 @@ static struct drm_mode_config_funcs mock_config_funcs = {
static int drm_framebuffer_test_init(struct kunit *test)
{
- struct drm_device *mock;
+ struct device *parent;
+ struct drm_framebuffer_test_priv *priv;
+ struct drm_device *dev;
+
+ parent = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
- mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mock);
+ priv = drm_kunit_helper_alloc_drm_device(test, parent, typeof(*priv),
+ dev, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+ dev = &priv->dev;
- mock->mode_config.min_width = MIN_WIDTH;
- mock->mode_config.max_width = MAX_WIDTH;
- mock->mode_config.min_height = MIN_HEIGHT;
- mock->mode_config.max_height = MAX_HEIGHT;
- mock->mode_config.funcs = &mock_config_funcs;
+ dev->mode_config.min_width = MIN_WIDTH;
+ dev->mode_config.max_width = MAX_WIDTH;
+ dev->mode_config.min_height = MIN_HEIGHT;
+ dev->mode_config.max_height = MAX_HEIGHT;
+ dev->mode_config.funcs = &mock_config_funcs;
- test->priv = mock;
+ test->priv = priv;
return 0;
}
static void drm_test_framebuffer_create(struct kunit *test)
{
const struct drm_framebuffer_test *params = test->param_value;
- struct drm_device *mock = test->priv;
- int buffer_created = 0;
+ struct drm_framebuffer_test_priv *priv = test->priv;
+ struct drm_device *dev = &priv->dev;
- mock->dev_private = &buffer_created;
- drm_internal_framebuffer_create(mock, &params->cmd, NULL);
- KUNIT_EXPECT_EQ(test, params->buffer_created, buffer_created);
+ priv->buffer_created = false;
+ drm_internal_framebuffer_create(dev, &params->cmd, NULL);
+ KUNIT_EXPECT_EQ(test, params->buffer_created, priv->buffer_created);
}
static void drm_framebuffer_test_to_desc(const struct drm_framebuffer_test *t, char *desc)
{
- strcpy(desc, t->name);
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
}
KUNIT_ARRAY_PARAM(drm_framebuffer_create, drm_framebuffer_create_cases,
drm_framebuffer_test_to_desc);
+/* Tries to create a framebuffer with modifiers without drm_device supporting it */
+static void drm_test_framebuffer_modifiers_not_supported(struct kunit *test)
+{
+ struct drm_framebuffer_test_priv *priv = test->priv;
+ struct drm_device *dev = &priv->dev;
+ struct drm_framebuffer *fb;
+
+ /* A valid cmd with modifier */
+ struct drm_mode_fb_cmd2 cmd = {
+ .width = MAX_WIDTH, .height = MAX_HEIGHT,
+ .pixel_format = DRM_FORMAT_ABGR8888, .handles = { 1, 0, 0 },
+ .offsets = { UINT_MAX / 2, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ .flags = DRM_MODE_FB_MODIFIERS,
+ };
+
+ priv->buffer_created = false;
+ dev->mode_config.fb_modifiers_not_supported = 1;
+
+ fb = drm_internal_framebuffer_create(dev, &cmd, NULL);
+ KUNIT_EXPECT_EQ(test, priv->buffer_created, false);
+ KUNIT_EXPECT_EQ(test, PTR_ERR(fb), -EINVAL);
+}
+
+/* Parameters for testing drm_framebuffer_check_src_coords function */
+struct drm_framebuffer_check_src_coords_case {
+ const char *name;
+ const int expect;
+ const unsigned int fb_size;
+ const uint32_t src_x;
+ const uint32_t src_y;
+
+ /* Deltas to be applied on source */
+ const uint32_t dsrc_w;
+ const uint32_t dsrc_h;
+};
+
+static const struct drm_framebuffer_check_src_coords_case
+drm_framebuffer_check_src_coords_cases[] = {
+ { .name = "Success: source fits into fb",
+ .expect = 0,
+ },
+ { .name = "Fail: overflowing fb with x-axis coordinate",
+ .expect = -ENOSPC, .src_x = 1, .fb_size = UINT_MAX,
+ },
+ { .name = "Fail: overflowing fb with y-axis coordinate",
+ .expect = -ENOSPC, .src_y = 1, .fb_size = UINT_MAX,
+ },
+ { .name = "Fail: overflowing fb with source width",
+ .expect = -ENOSPC, .dsrc_w = 1, .fb_size = UINT_MAX - 1,
+ },
+ { .name = "Fail: overflowing fb with source height",
+ .expect = -ENOSPC, .dsrc_h = 1, .fb_size = UINT_MAX - 1,
+ },
+};
+
+static void drm_test_framebuffer_check_src_coords(struct kunit *test)
+{
+ const struct drm_framebuffer_check_src_coords_case *params = test->param_value;
+ const uint32_t src_x = params->src_x;
+ const uint32_t src_y = params->src_y;
+ const uint32_t src_w = (params->fb_size << 16) + params->dsrc_w;
+ const uint32_t src_h = (params->fb_size << 16) + params->dsrc_h;
+ const struct drm_framebuffer fb = {
+ .width = params->fb_size,
+ .height = params->fb_size
+ };
+ int ret;
+
+ ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, &fb);
+ KUNIT_EXPECT_EQ(test, ret, params->expect);
+}
+
+static void
+check_src_coords_test_to_desc(const struct drm_framebuffer_check_src_coords_case *t,
+ char *desc)
+{
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(check_src_coords, drm_framebuffer_check_src_coords_cases,
+ check_src_coords_test_to_desc);
+
+/*
+ * Test if drm_framebuffer_cleanup() really pops out the framebuffer object
+ * from device's fb_list and decrement the number of framebuffers for that
+ * device, which is the only things it does.
+ */
+static void drm_test_framebuffer_cleanup(struct kunit *test)
+{
+ struct drm_framebuffer_test_priv *priv = test->priv;
+ struct drm_device *dev = &priv->dev;
+ struct list_head *fb_list = &dev->mode_config.fb_list;
+ struct drm_format_info format = { };
+ struct drm_framebuffer fb1 = { .dev = dev, .format = &format };
+ struct drm_framebuffer fb2 = { .dev = dev, .format = &format };
+
+ /* This will result on [fb_list] -> fb2 -> fb1 */
+ drm_framebuffer_init(dev, &fb1, NULL);
+ drm_framebuffer_init(dev, &fb2, NULL);
+
+ drm_framebuffer_cleanup(&fb1);
+
+ /* Now fb2 is the only one element on fb_list */
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&fb2.head));
+ KUNIT_ASSERT_EQ(test, dev->mode_config.num_fb, 1);
+
+ drm_framebuffer_cleanup(&fb2);
+
+ /* Now fb_list is empty */
+ KUNIT_ASSERT_TRUE(test, list_empty(fb_list));
+ KUNIT_ASSERT_EQ(test, dev->mode_config.num_fb, 0);
+}
+
+/*
+ * Initialize a framebuffer, lookup its id and test if the returned reference
+ * matches.
+ */
+static void drm_test_framebuffer_lookup(struct kunit *test)
+{
+ struct drm_framebuffer_test_priv *priv = test->priv;
+ struct drm_device *dev = &priv->dev;
+ struct drm_format_info format = { };
+ struct drm_framebuffer expected_fb = { .dev = dev, .format = &format };
+ struct drm_framebuffer *returned_fb;
+ uint32_t id = 0;
+ int ret;
+
+ ret = drm_framebuffer_init(dev, &expected_fb, NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ id = expected_fb.base.id;
+
+ /* Looking for expected_fb */
+ returned_fb = drm_framebuffer_lookup(dev, NULL, id);
+ KUNIT_EXPECT_PTR_EQ(test, returned_fb, &expected_fb);
+ drm_framebuffer_put(returned_fb);
+
+ drm_framebuffer_cleanup(&expected_fb);
+}
+
+/* Try to lookup an id that is not linked to a framebuffer */
+static void drm_test_framebuffer_lookup_inexistent(struct kunit *test)
+{
+ struct drm_framebuffer_test_priv *priv = test->priv;
+ struct drm_device *dev = &priv->dev;
+ struct drm_framebuffer *fb;
+ uint32_t id = 0;
+
+ /* Looking for an inexistent framebuffer */
+ fb = drm_framebuffer_lookup(dev, NULL, id);
+ KUNIT_EXPECT_NULL(test, fb);
+}
+
+/*
+ * Test if drm_framebuffer_init initializes the framebuffer successfully,
+ * asserting that its modeset object struct and its refcount are correctly
+ * set and that strictly one framebuffer is initialized.
+ */
+static void drm_test_framebuffer_init(struct kunit *test)
+{
+ struct drm_framebuffer_test_priv *priv = test->priv;
+ struct drm_device *dev = &priv->dev;
+ struct drm_format_info format = { };
+ struct drm_framebuffer fb1 = { .dev = dev, .format = &format };
+ struct drm_framebuffer_funcs funcs = { };
+ int ret;
+
+ ret = drm_framebuffer_init(dev, &fb1, &funcs);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /* Check if fb->funcs is actually set to the drm_framebuffer_funcs passed on */
+ KUNIT_EXPECT_PTR_EQ(test, fb1.funcs, &funcs);
+
+ /* The fb->comm must be set to the current running process */
+ KUNIT_EXPECT_STREQ(test, fb1.comm, current->comm);
+
+ /* The fb->base must be successfully initialized */
+ KUNIT_EXPECT_NE(test, fb1.base.id, 0);
+ KUNIT_EXPECT_EQ(test, fb1.base.type, DRM_MODE_OBJECT_FB);
+ KUNIT_EXPECT_EQ(test, kref_read(&fb1.base.refcount), 1);
+ KUNIT_EXPECT_PTR_EQ(test, fb1.base.free_cb, &drm_framebuffer_free);
+
+ /* There must be just that one fb initialized */
+ KUNIT_EXPECT_EQ(test, dev->mode_config.num_fb, 1);
+ KUNIT_EXPECT_PTR_EQ(test, dev->mode_config.fb_list.prev, &fb1.head);
+ KUNIT_EXPECT_PTR_EQ(test, dev->mode_config.fb_list.next, &fb1.head);
+
+ drm_framebuffer_cleanup(&fb1);
+}
+
+/* Try to init a framebuffer without setting its format */
+static void drm_test_framebuffer_init_bad_format(struct kunit *test)
+{
+ struct drm_framebuffer_test_priv *priv = test->priv;
+ struct drm_device *dev = &priv->dev;
+ struct drm_framebuffer fb1 = { .dev = dev, .format = NULL };
+ struct drm_framebuffer_funcs funcs = { };
+ int ret;
+
+ /* Fails if fb.format isn't set */
+ ret = drm_framebuffer_init(dev, &fb1, &funcs);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+}
+
+/*
+ * Test calling drm_framebuffer_init() passing a framebuffer linked to a
+ * different drm_device parent from the one passed on the first argument, which
+ * must fail.
+ */
+static void drm_test_framebuffer_init_dev_mismatch(struct kunit *test)
+{
+ struct drm_framebuffer_test_priv *priv = test->priv;
+ struct drm_device *right_dev = &priv->dev;
+ struct drm_device *wrong_dev;
+ struct device *wrong_dev_parent;
+ struct drm_format_info format = { };
+ struct drm_framebuffer fb1 = { .dev = right_dev, .format = &format };
+ struct drm_framebuffer_funcs funcs = { };
+ int ret;
+
+ wrong_dev_parent = kunit_device_register(test, "drm-kunit-wrong-device-mock");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, wrong_dev_parent);
+
+ wrong_dev = __drm_kunit_helper_alloc_drm_device(test, wrong_dev_parent,
+ sizeof(struct drm_device),
+ 0, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, wrong_dev);
+
+ /* Fails if fb->dev doesn't point to the drm_device passed on first arg */
+ ret = drm_framebuffer_init(wrong_dev, &fb1, &funcs);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+}
+
+static void destroy_free_mock(struct drm_framebuffer *fb)
+{
+ struct drm_framebuffer_test_priv *priv = container_of(fb->dev, typeof(*priv), dev);
+
+ priv->buffer_freed = true;
+}
+
+static struct drm_framebuffer_funcs framebuffer_funcs_free_mock = {
+ .destroy = destroy_free_mock,
+};
+
+/*
+ * In summary, the drm_framebuffer_free() function must implicitly call
+ * fb->funcs->destroy() and garantee that the framebufer object is unregistered
+ * from the drm_device idr pool.
+ */
+static void drm_test_framebuffer_free(struct kunit *test)
+{
+ struct drm_framebuffer_test_priv *priv = test->priv;
+ struct drm_device *dev = &priv->dev;
+ struct drm_mode_object *obj;
+ struct drm_framebuffer fb = {
+ .dev = dev,
+ .funcs = &framebuffer_funcs_free_mock,
+ };
+ int id, ret;
+
+ priv->buffer_freed = false;
+
+ /*
+ * Mock a framebuffer that was not unregistered at the moment of the
+ * drm_framebuffer_free() call.
+ */
+ ret = drm_mode_object_add(dev, &fb.base, DRM_MODE_OBJECT_FB);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ id = fb.base.id;
+
+ drm_framebuffer_free(&fb.base.refcount);
+
+ /* The framebuffer object must be unregistered */
+ obj = drm_mode_object_find(dev, NULL, id, DRM_MODE_OBJECT_FB);
+ KUNIT_EXPECT_PTR_EQ(test, obj, NULL);
+ KUNIT_EXPECT_EQ(test, fb.base.id, 0);
+
+ /* Test if fb->funcs->destroy() was called */
+ KUNIT_EXPECT_EQ(test, priv->buffer_freed, true);
+}
+
static struct kunit_case drm_framebuffer_tests[] = {
+ KUNIT_CASE_PARAM(drm_test_framebuffer_check_src_coords, check_src_coords_gen_params),
+ KUNIT_CASE(drm_test_framebuffer_cleanup),
KUNIT_CASE_PARAM(drm_test_framebuffer_create, drm_framebuffer_create_gen_params),
+ KUNIT_CASE(drm_test_framebuffer_free),
+ KUNIT_CASE(drm_test_framebuffer_init),
+ KUNIT_CASE(drm_test_framebuffer_init_bad_format),
+ KUNIT_CASE(drm_test_framebuffer_init_dev_mismatch),
+ KUNIT_CASE(drm_test_framebuffer_lookup),
+ KUNIT_CASE(drm_test_framebuffer_lookup_inexistent),
+ KUNIT_CASE(drm_test_framebuffer_modifiers_not_supported),
{ }
};
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index 34ee95d41f29..b976a5e9aef5 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -46,7 +46,7 @@ static struct drm_display_mode *find_preferred_mode(struct drm_connector *connec
struct drm_display_mode *mode, *preferred;
mutex_lock(&drm->mode_config.mutex);
- preferred = list_first_entry(&connector->modes, struct drm_display_mode, head);
+ preferred = list_first_entry_or_null(&connector->modes, struct drm_display_mode, head);
list_for_each_entry(mode, &connector->modes, head)
if (mode->type & DRM_MODE_TYPE_PREFERRED)
preferred = mode;
@@ -105,9 +105,8 @@ static int set_connector_edid(struct kunit *test, struct drm_connector *connecto
mutex_lock(&drm->mode_config.mutex);
ret = connector->funcs->fill_modes(connector, 4096, 4096);
mutex_unlock(&drm->mode_config.mutex);
- KUNIT_ASSERT_GT(test, ret, 0);
- return 0;
+ return ret;
}
static const struct drm_connector_hdmi_funcs dummy_connector_hdmi_funcs = {
@@ -125,6 +124,18 @@ static const struct drm_connector_hdmi_funcs reject_connector_hdmi_funcs = {
.tmds_char_rate_valid = reject_connector_tmds_char_rate_valid,
};
+static enum drm_mode_status
+reject_100MHz_connector_tmds_char_rate_valid(const struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate)
+{
+ return (tmds_rate > 100ULL * 1000 * 1000) ? MODE_BAD : MODE_OK;
+}
+
+static const struct drm_connector_hdmi_funcs reject_100_MHz_connector_hdmi_funcs = {
+ .tmds_char_rate_valid = reject_100MHz_connector_tmds_char_rate_valid,
+};
+
static int dummy_connector_get_modes(struct drm_connector *connector)
{
struct drm_atomic_helper_connector_hdmi_priv *priv =
@@ -147,6 +158,7 @@ static int dummy_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_helper_funcs dummy_connector_helper_funcs = {
.atomic_check = drm_atomic_helper_connector_hdmi_check,
.get_modes = dummy_connector_get_modes,
+ .mode_valid = drm_hdmi_connector_mode_valid,
};
static void dummy_hdmi_connector_reset(struct drm_connector *connector)
@@ -164,9 +176,10 @@ static const struct drm_connector_funcs dummy_connector_funcs = {
static
struct drm_atomic_helper_connector_hdmi_priv *
-drm_atomic_helper_connector_hdmi_init(struct kunit *test,
- unsigned int formats,
- unsigned int max_bpc)
+drm_kunit_helper_connector_hdmi_init_funcs(struct kunit *test,
+ unsigned int formats,
+ unsigned int max_bpc,
+ const struct drm_connector_hdmi_funcs *hdmi_funcs)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_connector *conn;
@@ -208,7 +221,7 @@ drm_atomic_helper_connector_hdmi_init(struct kunit *test,
ret = drmm_connector_hdmi_init(drm, conn,
"Vendor", "Product",
&dummy_connector_funcs,
- &dummy_connector_hdmi_funcs,
+ hdmi_funcs,
DRM_MODE_CONNECTOR_HDMIA,
NULL,
formats,
@@ -220,10 +233,27 @@ drm_atomic_helper_connector_hdmi_init(struct kunit *test,
drm_mode_config_reset(drm);
- ret = set_connector_edid(test, conn,
+ return priv;
+}
+
+static
+struct drm_atomic_helper_connector_hdmi_priv *
+drm_kunit_helper_connector_hdmi_init(struct kunit *test,
+ unsigned int formats,
+ unsigned int max_bpc)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
+ formats, max_bpc,
+ &dummy_connector_hdmi_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ ret = set_connector_edid(test, &priv->connector,
test_edid_hdmi_1080p_rgb_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
return priv;
}
@@ -247,9 +277,9 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
@@ -310,9 +340,9 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
@@ -373,9 +403,9 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -429,9 +459,9 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
@@ -441,7 +471,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- mode = drm_display_mode_from_cea_vic(drm, 1);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
drm = &priv->drm;
@@ -485,9 +515,9 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -543,9 +573,9 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
@@ -555,7 +585,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- mode = drm_display_mode_from_cea_vic(drm, 1);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
drm = &priv->drm;
@@ -601,9 +631,9 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -659,9 +689,9 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
@@ -671,7 +701,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- mode = drm_display_mode_from_cea_vic(drm, 1);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
drm = &priv->drm;
@@ -719,16 +749,16 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
@@ -793,16 +823,16 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
@@ -862,18 +892,18 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_dvi_1080p,
ARRAY_SIZE(test_edid_dvi_1080p));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_FALSE(test, info->is_hdmi);
@@ -911,16 +941,16 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
@@ -958,16 +988,16 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
@@ -1005,16 +1035,16 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
@@ -1056,9 +1086,9 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
@@ -1112,16 +1142,16 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
@@ -1179,18 +1209,18 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
@@ -1242,11 +1272,11 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
@@ -1254,7 +1284,7 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
@@ -1263,7 +1293,7 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
ctx = drm_kunit_helper_acquire_ctx_alloc(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
- mode = drm_display_mode_from_cea_vic(drm, 1);
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
/*
@@ -1305,16 +1335,16 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
@@ -1370,18 +1400,18 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
@@ -1438,16 +1468,16 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
@@ -1496,18 +1526,18 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
struct drm_crtc *crtc;
int ret;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_max_340mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_340mhz));
- KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_ASSERT_GT(test, ret, 0);
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
@@ -1538,6 +1568,57 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
}
+/* Test that atomic check succeeds when disabling a connector. */
+static void drm_test_check_disable_connector(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_modeset_acquire_ctx *ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_atomic_state *state;
+ struct drm_display_mode *preferred;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ conn = &priv->connector;
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+
+ crtc_state->active = false;
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+}
+
static struct kunit_case drm_atomic_helper_connector_hdmi_check_tests[] = {
KUNIT_CASE(drm_test_check_broadcast_rgb_auto_cea_mode),
KUNIT_CASE(drm_test_check_broadcast_rgb_auto_cea_mode_vic_1),
@@ -1552,6 +1633,7 @@ static struct kunit_case drm_atomic_helper_connector_hdmi_check_tests[] = {
*/
KUNIT_CASE(drm_test_check_broadcast_rgb_crtc_mode_changed),
KUNIT_CASE(drm_test_check_broadcast_rgb_crtc_mode_not_changed),
+ KUNIT_CASE(drm_test_check_disable_connector),
KUNIT_CASE(drm_test_check_hdmi_funcs_reject_rate),
KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback),
KUNIT_CASE(drm_test_check_max_tmds_rate_format_fallback),
@@ -1593,9 +1675,9 @@ static void drm_test_check_broadcast_rgb_value(struct kunit *test)
struct drm_connector_state *conn_state;
struct drm_connector *conn;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1615,9 +1697,9 @@ static void drm_test_check_bpc_8_value(struct kunit *test)
struct drm_connector_state *conn_state;
struct drm_connector *conn;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1639,9 +1721,9 @@ static void drm_test_check_bpc_10_value(struct kunit *test)
struct drm_connector_state *conn_state;
struct drm_connector *conn;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1663,9 +1745,9 @@ static void drm_test_check_bpc_12_value(struct kunit *test)
struct drm_connector_state *conn_state;
struct drm_connector *conn;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1685,11 +1767,11 @@ static void drm_test_check_format_value(struct kunit *test)
struct drm_connector_state *conn_state;
struct drm_connector *conn;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 8);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1707,11 +1789,11 @@ static void drm_test_check_tmds_char_value(struct kunit *test)
struct drm_connector_state *conn_state;
struct drm_connector *conn;
- priv = drm_atomic_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1734,9 +1816,148 @@ static struct kunit_suite drm_atomic_helper_connector_hdmi_reset_test_suite = {
.test_cases = drm_atomic_helper_connector_hdmi_reset_tests,
};
+/*
+ * Test that the default behaviour for drm_hdmi_connector_mode_valid() is not
+ * to reject any modes. Pass a correct EDID and verify that preferred mode
+ * matches the expectations (1080p).
+ */
+static void drm_test_check_mode_valid(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_connector *conn;
+ struct drm_display_mode *preferred;
+
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ conn = &priv->connector;
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+
+ KUNIT_EXPECT_EQ(test, preferred->hdisplay, 1920);
+ KUNIT_EXPECT_EQ(test, preferred->vdisplay, 1080);
+ KUNIT_EXPECT_EQ(test, preferred->clock, 148500);
+}
+
+/*
+ * Test that the drm_hdmi_connector_mode_valid() will reject modes depending on
+ * the .tmds_char_rate_valid() behaviour.
+ * Pass a correct EDID and verify that high-rate modes are filtered.
+ */
+static void drm_test_check_mode_valid_reject_rate(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_connector *conn;
+ struct drm_display_mode *preferred;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &reject_100_MHz_connector_hdmi_funcs);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ conn = &priv->connector;
+
+ ret = set_connector_edid(test, conn,
+ test_edid_hdmi_1080p_rgb_max_200mhz,
+ ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
+ KUNIT_ASSERT_GT(test, ret, 0);
+
+ /*
+ * Unlike the drm_test_check_mode_valid() here 1080p is rejected, but
+ * 480p is allowed.
+ */
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+ KUNIT_EXPECT_EQ(test, preferred->hdisplay, 640);
+ KUNIT_EXPECT_EQ(test, preferred->vdisplay, 480);
+ KUNIT_EXPECT_EQ(test, preferred->clock, 25200);
+}
+
+/*
+ * Test that the drm_hdmi_connector_mode_valid() will not mark any modes as
+ * valid if .tmds_char_rate_valid() rejects all of them. Pass a correct EDID
+ * and verify that there is no preferred mode and no modes were set for the
+ * connector.
+ */
+static void drm_test_check_mode_valid_reject(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_connector *conn;
+ struct drm_display_mode *preferred;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &reject_connector_hdmi_funcs);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ conn = &priv->connector;
+
+ /* should reject all modes */
+ ret = set_connector_edid(test, conn,
+ test_edid_hdmi_1080p_rgb_max_200mhz,
+ ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NULL(test, preferred);
+}
+
+/*
+ * Test that the drm_hdmi_connector_mode_valid() will reject modes that don't
+ * pass the info.max_tmds_clock filter. Pass crafted EDID and verify that
+ * high-rate modes are filtered.
+ */
+static void drm_test_check_mode_valid_reject_max_clock(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_connector *conn;
+ struct drm_display_mode *preferred;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ conn = &priv->connector;
+
+ ret = set_connector_edid(test, conn,
+ test_edid_hdmi_1080p_rgb_max_100mhz,
+ ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_100mhz));
+ KUNIT_ASSERT_GT(test, ret, 0);
+
+ KUNIT_ASSERT_EQ(test, conn->display_info.max_tmds_clock, 100 * 1000);
+
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+ KUNIT_EXPECT_EQ(test, preferred->hdisplay, 640);
+ KUNIT_EXPECT_EQ(test, preferred->vdisplay, 480);
+ KUNIT_EXPECT_EQ(test, preferred->clock, 25200);
+}
+
+static struct kunit_case drm_atomic_helper_connector_hdmi_mode_valid_tests[] = {
+ KUNIT_CASE(drm_test_check_mode_valid),
+ KUNIT_CASE(drm_test_check_mode_valid_reject),
+ KUNIT_CASE(drm_test_check_mode_valid_reject_rate),
+ KUNIT_CASE(drm_test_check_mode_valid_reject_max_clock),
+ { }
+};
+
+static struct kunit_suite drm_atomic_helper_connector_hdmi_mode_valid_test_suite = {
+ .name = "drm_atomic_helper_connector_hdmi_mode_valid",
+ .test_cases = drm_atomic_helper_connector_hdmi_mode_valid_tests,
+};
+
kunit_test_suites(
&drm_atomic_helper_connector_hdmi_check_test_suite,
&drm_atomic_helper_connector_hdmi_reset_test_suite,
+ &drm_atomic_helper_connector_hdmi_mode_valid_test_suite,
);
MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
diff --git a/drivers/gpu/drm/tests/drm_kunit_edid.h b/drivers/gpu/drm/tests/drm_kunit_edid.h
index 107559900e97..6358397a5d7a 100644
--- a/drivers/gpu/drm/tests/drm_kunit_edid.h
+++ b/drivers/gpu/drm/tests/drm_kunit_edid.h
@@ -74,6 +74,108 @@ static const unsigned char test_edid_dvi_1080p[] = {
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92
*
* 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c
+ * 00 12 34 00 14 20 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 e4
+ *
+ * ----------------
+ *
+ * Block 0, Base EDID:
+ * EDID Structure Version & Revision: 1.3
+ * Vendor & Product Identification:
+ * Manufacturer: LNX
+ * Model: 42
+ * Made in: 2023
+ * Basic Display Parameters & Features:
+ * Digital display
+ * DFP 1.x compatible TMDS
+ * Maximum image size: 160 cm x 90 cm
+ * Gamma: 2.20
+ * Monochrome or grayscale display
+ * First detailed timing is the preferred timing
+ * Color Characteristics:
+ * Red : 0.0000, 0.0000
+ * Green: 0.0000, 0.0000
+ * Blue : 0.0000, 0.0000
+ * White: 0.0000, 0.0000
+ * Established Timings I & II:
+ * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * Standard Timings: none
+ * Detailed Timing Descriptors:
+ * DTD 1: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (1600 mm x 900 mm)
+ * Hfront 88 Hsync 44 Hback 148 Hpol P
+ * Vfront 4 Vsync 5 Vback 36 Vpol P
+ * Display Product Name: 'Test EDID'
+ * Display Range Limits:
+ * Monitor ranges (GTF): 50-70 Hz V, 30-70 kHz H, max dotclock 150 MHz
+ * Dummy Descriptor:
+ * Extension blocks: 1
+ * Checksum: 0x92
+ *
+ * ----------------
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Revision: 3
+ * Underscans IT Video Formats by default
+ * Native detailed modes: 1
+ * Colorimetry Data Block:
+ * sRGB
+ * Video Data Block:
+ * VIC 16: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz
+ * Video Capability Data Block:
+ * YCbCr quantization: No Data
+ * RGB quantization: Selectable (via AVI Q)
+ * PT scan behavior: No Data
+ * IT scan behavior: Always Underscanned
+ * CE scan behavior: Always Underscanned
+ * Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
+ * Source physical address: 1.2.3.4
+ * Maximum TMDS clock: 100 MHz
+ * Extended HDMI video details:
+ * Checksum: 0xe4 Unused space in Extension Block: 100 bytes
+ */
+static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x01, 0x03, 0x81, 0xa0, 0x5a, 0x78,
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20,
+ 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38,
+ 0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
+ 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
+ 0x46, 0x00, 0x00, 0xc4, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x41, 0x02, 0x03, 0x1b, 0x81,
+ 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x6d, 0x03, 0x0c,
+ 0x00, 0x12, 0x34, 0x00, 0x14, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xe4
+};
+
+/*
+ * edid-decode (hex):
+ *
+ * 00 ff ff ff ff ff ff 00 31 d8 2a 00 00 00 00 00
+ * 00 21 01 03 81 a0 5a 78 02 00 00 00 00 00 00 00
+ * 00 00 00 20 00 00 01 01 01 01 01 01 01 01 01 01
+ * 01 01 01 01 01 01 02 3a 80 18 71 38 2d 40 58 2c
+ * 45 00 40 84 63 00 00 1e 00 00 00 fc 00 54 65 73
+ * 74 20 45 44 49 44 0a 20 20 20 00 00 00 fd 00 32
+ * 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92
+ *
+ * 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c
* 00 12 34 00 28 20 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
* 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index aa62719dab0e..3c0b7824c0be 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -3,6 +3,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_kunit_helpers.h>
#include <drm/drm_managed.h>
@@ -311,6 +312,46 @@ drm_kunit_helper_create_crtc(struct kunit *test,
}
EXPORT_SYMBOL_GPL(drm_kunit_helper_create_crtc);
+static void kunit_action_drm_mode_destroy(void *ptr)
+{
+ struct drm_display_mode *mode = ptr;
+
+ drm_mode_destroy(NULL, mode);
+}
+
+/**
+ * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC for a KUnit test
+ * @test: The test context object
+ * @dev: DRM device
+ * @video_code: CEA VIC of the mode
+ *
+ * Creates a new mode matching the specified CEA VIC for a KUnit test.
+ *
+ * Resources will be cleaned up automatically.
+ *
+ * Returns: A new drm_display_mode on success or NULL on failure
+ */
+struct drm_display_mode *
+drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev,
+ u8 video_code)
+{
+ struct drm_display_mode *mode;
+ int ret;
+
+ mode = drm_display_mode_from_cea_vic(dev, video_code);
+ if (!mode)
+ return NULL;
+
+ ret = kunit_add_action_or_reset(test,
+ kunit_action_drm_mode_destroy,
+ mode);
+ if (ret)
+ return NULL;
+
+ return mode;
+}
+EXPORT_SYMBOL_GPL(drm_kunit_display_mode_from_cea_vic);
+
MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
MODULE_DESCRIPTION("KUnit test suite helper functions");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tidss/Kconfig b/drivers/gpu/drm/tidss/Kconfig
index 2385c56493b9..31ad582b7602 100644
--- a/drivers/gpu/drm/tidss/Kconfig
+++ b/drivers/gpu/drm/tidss/Kconfig
@@ -2,6 +2,7 @@ config DRM_TIDSS
tristate "DRM Support for TI Keystone"
depends on DRM && OF
depends on ARM || ARM64 || COMPILE_TEST
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index 1ad711f8d2a8..cacb5f3d8085 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -700,7 +700,7 @@ void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
{
dispc_irq_t old_mask = dispc_k2g_read_irqenable(dispc);
- /* clear the irqstatus for newly enabled irqs */
+ /* clear the irqstatus for irqs that will be enabled */
dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & mask);
dispc_k2g_vp_set_irqenable(dispc, 0, mask);
@@ -708,6 +708,9 @@ void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
dispc_write(dispc, DISPC_IRQENABLE_SET, (1 << 0) | (1 << 7));
+ /* clear the irqstatus for irqs that were disabled */
+ dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & old_mask);
+
/* flush posted write */
dispc_k2g_read_irqenable(dispc);
}
@@ -780,24 +783,18 @@ static
void dispc_k3_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t clearmask)
{
unsigned int i;
- u32 top_clear = 0;
for (i = 0; i < dispc->feat->num_vps; ++i) {
- if (clearmask & DSS_IRQ_VP_MASK(i)) {
+ if (clearmask & DSS_IRQ_VP_MASK(i))
dispc_k3_vp_write_irqstatus(dispc, i, clearmask);
- top_clear |= BIT(i);
- }
}
for (i = 0; i < dispc->feat->num_planes; ++i) {
- if (clearmask & DSS_IRQ_PLANE_MASK(i)) {
+ if (clearmask & DSS_IRQ_PLANE_MASK(i))
dispc_k3_vid_write_irqstatus(dispc, i, clearmask);
- top_clear |= BIT(4 + i);
- }
}
- if (dispc->feat->subrev == DISPC_K2G)
- return;
- dispc_write(dispc, DISPC_IRQSTATUS, top_clear);
+ /* always clear the top level irqstatus */
+ dispc_write(dispc, DISPC_IRQSTATUS, dispc_read(dispc, DISPC_IRQSTATUS));
/* Flush posted writes */
dispc_read(dispc, DISPC_IRQSTATUS);
@@ -843,7 +840,7 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc,
old_mask = dispc_k3_read_irqenable(dispc);
- /* clear the irqstatus for newly enabled irqs */
+ /* clear the irqstatus for irqs that will be enabled */
dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & mask);
for (i = 0; i < dispc->feat->num_vps; ++i) {
@@ -868,6 +865,9 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc,
if (main_disable)
dispc_write(dispc, DISPC_IRQENABLE_CLR, main_disable);
+ /* clear the irqstatus for irqs that were disabled */
+ dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & old_mask);
+
/* Flush posted writes */
dispc_read(dispc, DISPC_IRQENABLE_SET);
}
@@ -2767,8 +2767,12 @@ static void dispc_init_errata(struct dispc_device *dispc)
*/
static void dispc_softreset_k2g(struct dispc_device *dispc)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dispc->tidss->irq_lock, flags);
dispc_set_irqenable(dispc, 0);
dispc_read_and_clear_irqstatus(dispc);
+ spin_unlock_irqrestore(&dispc->tidss->irq_lock, flags);
for (unsigned int vp_idx = 0; vp_idx < dispc->feat->num_vps; ++vp_idx)
VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 0, 0);
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index d15f836dca95..d4652e8cc28c 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
@@ -109,9 +110,9 @@ static const struct drm_driver tidss_driver = {
.fops = &tidss_fops,
.release = tidss_release,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.name = "tidss",
.desc = "TI Keystone DSS",
- .date = "20180215",
.major = 1,
.minor = 0,
};
@@ -138,7 +139,7 @@ static int tidss_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tidss);
- spin_lock_init(&tidss->wait_lock);
+ spin_lock_init(&tidss->irq_lock);
ret = dispc_init(tidss);
if (ret) {
@@ -186,7 +187,7 @@ static int tidss_probe(struct platform_device *pdev)
goto err_irq_uninstall;
}
- drm_fbdev_dma_setup(ddev, 32);
+ drm_client_setup(ddev, NULL);
dev_dbg(dev, "%s done\n", __func__);
@@ -250,7 +251,7 @@ MODULE_DEVICE_TABLE(of, tidss_of_table);
static struct platform_driver tidss_platform_driver = {
.probe = tidss_probe,
- .remove_new = tidss_remove,
+ .remove = tidss_remove,
.shutdown = tidss_shutdown,
.driver = {
.name = "tidss",
diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
index d7f27b0b0315..7f4f4282bc04 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.h
+++ b/drivers/gpu/drm/tidss/tidss_drv.h
@@ -29,8 +29,9 @@ struct tidss_device {
unsigned int irq;
- spinlock_t wait_lock; /* protects the irq masks */
- dispc_irq_t irq_mask; /* enabled irqs in addition to wait_list */
+ /* protects the irq masks field and irqenable/irqstatus registers */
+ spinlock_t irq_lock;
+ dispc_irq_t irq_mask; /* enabled irqs */
};
#define to_tidss(__dev) container_of(__dev, struct tidss_device, ddev)
diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c
index 604334ef526a..5abc788781f4 100644
--- a/drivers/gpu/drm/tidss/tidss_irq.c
+++ b/drivers/gpu/drm/tidss/tidss_irq.c
@@ -15,10 +15,9 @@
#include "tidss_irq.h"
#include "tidss_plane.h"
-/* call with wait_lock and dispc runtime held */
static void tidss_irq_update(struct tidss_device *tidss)
{
- assert_spin_locked(&tidss->wait_lock);
+ assert_spin_locked(&tidss->irq_lock);
dispc_set_irqenable(tidss->dispc, tidss->irq_mask);
}
@@ -31,11 +30,11 @@ void tidss_irq_enable_vblank(struct drm_crtc *crtc)
u32 hw_videoport = tcrtc->hw_videoport;
unsigned long flags;
- spin_lock_irqsave(&tidss->wait_lock, flags);
+ spin_lock_irqsave(&tidss->irq_lock, flags);
tidss->irq_mask |= DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
DSS_IRQ_VP_VSYNC_ODD(hw_videoport);
tidss_irq_update(tidss);
- spin_unlock_irqrestore(&tidss->wait_lock, flags);
+ spin_unlock_irqrestore(&tidss->irq_lock, flags);
}
void tidss_irq_disable_vblank(struct drm_crtc *crtc)
@@ -46,11 +45,11 @@ void tidss_irq_disable_vblank(struct drm_crtc *crtc)
u32 hw_videoport = tcrtc->hw_videoport;
unsigned long flags;
- spin_lock_irqsave(&tidss->wait_lock, flags);
+ spin_lock_irqsave(&tidss->irq_lock, flags);
tidss->irq_mask &= ~(DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
DSS_IRQ_VP_VSYNC_ODD(hw_videoport));
tidss_irq_update(tidss);
- spin_unlock_irqrestore(&tidss->wait_lock, flags);
+ spin_unlock_irqrestore(&tidss->irq_lock, flags);
}
static irqreturn_t tidss_irq_handler(int irq, void *arg)
@@ -60,7 +59,9 @@ static irqreturn_t tidss_irq_handler(int irq, void *arg)
unsigned int id;
dispc_irq_t irqstatus;
+ spin_lock(&tidss->irq_lock);
irqstatus = dispc_read_and_clear_irqstatus(tidss->dispc);
+ spin_unlock(&tidss->irq_lock);
for (id = 0; id < tidss->num_crtcs; id++) {
struct drm_crtc *crtc = tidss->crtcs[id];
@@ -78,8 +79,13 @@ static irqreturn_t tidss_irq_handler(int irq, void *arg)
tidss_crtc_error_irq(crtc, irqstatus);
}
- if (irqstatus & DSS_IRQ_DEVICE_OCP_ERR)
- dev_err_ratelimited(tidss->dev, "OCP error\n");
+ for (unsigned int i = 0; i < tidss->num_planes; ++i) {
+ struct drm_plane *plane = tidss->planes[i];
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+
+ if (irqstatus & DSS_IRQ_PLANE_FIFO_UNDERFLOW(tplane->hw_plane_id))
+ tidss_plane_error_irq(plane, irqstatus);
+ }
return IRQ_HANDLED;
}
@@ -88,9 +94,9 @@ void tidss_irq_resume(struct tidss_device *tidss)
{
unsigned long flags;
- spin_lock_irqsave(&tidss->wait_lock, flags);
+ spin_lock_irqsave(&tidss->irq_lock, flags);
tidss_irq_update(tidss);
- spin_unlock_irqrestore(&tidss->wait_lock, flags);
+ spin_unlock_irqrestore(&tidss->irq_lock, flags);
}
int tidss_irq_install(struct drm_device *ddev, unsigned int irq)
@@ -105,7 +111,7 @@ int tidss_irq_install(struct drm_device *ddev, unsigned int irq)
if (ret)
return ret;
- tidss->irq_mask = DSS_IRQ_DEVICE_OCP_ERR;
+ tidss->irq_mask = 0;
for (unsigned int i = 0; i < tidss->num_crtcs; ++i) {
struct tidss_crtc *tcrtc = to_tidss_crtc(tidss->crtcs[i]);
@@ -115,6 +121,12 @@ int tidss_irq_install(struct drm_device *ddev, unsigned int irq)
tidss->irq_mask |= DSS_IRQ_VP_FRAME_DONE(tcrtc->hw_videoport);
}
+ for (unsigned int i = 0; i < tidss->num_planes; ++i) {
+ struct tidss_plane *tplane = to_tidss_plane(tidss->planes[i]);
+
+ tidss->irq_mask |= DSS_IRQ_PLANE_FIFO_UNDERFLOW(tplane->hw_plane_id);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/tidss/tidss_irq.h b/drivers/gpu/drm/tidss/tidss_irq.h
index b512614d5863..dd61f645f662 100644
--- a/drivers/gpu/drm/tidss/tidss_irq.h
+++ b/drivers/gpu/drm/tidss/tidss_irq.h
@@ -19,15 +19,13 @@
* bit use |D |fou|FEOL|FEOL|FEOL|FEOL| UUUU | |
* bit number|0 |1-3|4-7 |8-11| 12-19 | 20-23 | 24-31 |
*
- * device bits: D = OCP error
+ * device bits: D = Unused
* WB bits: f = frame done wb, o = wb buffer overflow,
* u = wb buffer uncomplete
* vp bits: F = frame done, E = vsync even, O = vsync odd, L = sync lost
* plane bits: U = fifo underflow
*/
-#define DSS_IRQ_DEVICE_OCP_ERR BIT(0)
-
#define DSS_IRQ_DEVICE_FRAMEDONEWB BIT(1)
#define DSS_IRQ_DEVICE_WBBUFFEROVERFLOW BIT(2)
#define DSS_IRQ_DEVICE_WBUNCOMPLETEERROR BIT(3)
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index a5d86822c9e3..116de124bddb 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -18,6 +18,14 @@
#include "tidss_drv.h"
#include "tidss_plane.h"
+void tidss_plane_error_irq(struct drm_plane *plane, u64 irqstatus)
+{
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+
+ dev_err_ratelimited(plane->dev->dev, "Plane%u underflow (irq %llx)\n",
+ tplane->hw_plane_id, irqstatus);
+}
+
/* drm_plane_helper_funcs */
static int tidss_plane_atomic_check(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/tidss/tidss_plane.h b/drivers/gpu/drm/tidss/tidss_plane.h
index e933e158b617..aecaf2728406 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.h
+++ b/drivers/gpu/drm/tidss/tidss_plane.h
@@ -22,4 +22,6 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
u32 crtc_mask, const u32 *formats,
u32 num_formats);
+void tidss_plane_error_irq(struct drm_plane *plane, u64 irqstatus);
+
#endif
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index d3bd2d7a181e..24f9a245ba59 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -2,6 +2,7 @@
config DRM_TILCDC
tristate "DRM Support for TI LCDC Display Controller"
depends on DRM && OF && ARM
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_BRIDGE
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index cd5eefa06060..7caec4d38ddf 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
@@ -374,7 +375,8 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev)
goto init_failed;
priv->is_registered = true;
- drm_fbdev_dma_setup(ddev, bpp);
+ drm_client_setup_with_color_mode(ddev, bpp);
+
return 0;
init_failed:
@@ -472,13 +474,13 @@ DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver tilcdc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = tilcdc_debugfs_init,
#endif
.fops = &fops,
.name = "tilcdc",
.desc = "TI LCD Controller DRM",
- .date = "20121205",
.major = 1,
.minor = 0,
};
@@ -587,7 +589,7 @@ MODULE_DEVICE_TABLE(of, tilcdc_of_match);
static struct platform_driver tilcdc_platform_driver = {
.probe = tilcdc_pdev_probe,
- .remove_new = tilcdc_pdev_remove,
+ .remove = tilcdc_pdev_remove,
.shutdown = tilcdc_pdev_shutdown,
.driver = {
.name = "tilcdc",
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 5f2d1b6f9ee9..262f290d85d9 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -390,7 +390,7 @@ static const struct of_device_id panel_of_match[] = {
static struct platform_driver panel_driver = {
.probe = panel_probe,
- .remove_new = panel_remove,
+ .remove = panel_remove,
.driver = {
.name = "tilcdc-panel",
.of_match_table = panel_of_match,
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index f6889f649bc1..94cbdb1337c0 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -3,6 +3,7 @@
config DRM_ARCPGU
tristate "ARC PGU"
depends on DRM && OF
+ select DRM_CLIENT_SELECTION
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
help
@@ -13,10 +14,9 @@ config DRM_ARCPGU
config DRM_BOCHS
tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
depends on DRM && PCI && MMU
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
- select DRM_VRAM_HELPER
- select DRM_TTM
- select DRM_TTM_HELPER
help
This is a KMS driver for qemu's stdvga output. Choose this option
for qemu.
@@ -26,6 +26,7 @@ config DRM_BOCHS
config DRM_CIRRUS_QEMU
tristate "Cirrus driver for QEMU emulated device"
depends on DRM && PCI && MMU
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
help
@@ -45,6 +46,7 @@ config DRM_CIRRUS_QEMU
config DRM_GM12U320
tristate "GM12U320 driver for USB projectors"
depends on DRM && USB && MMU
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
help
@@ -55,6 +57,7 @@ config DRM_OFDRM
tristate "Open Firmware display driver"
depends on DRM && MMU && OF && (PPC || COMPILE_TEST)
select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
@@ -67,6 +70,7 @@ config DRM_OFDRM
config DRM_PANEL_MIPI_DBI
tristate "DRM support for MIPI DBI compatible panels"
depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
@@ -83,6 +87,7 @@ config DRM_SIMPLEDRM
tristate "Simple framebuffer driver"
depends on DRM && MMU
select APERTURE_HELPERS
+ select DRM_CLIENT_SELECTION
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
@@ -99,6 +104,7 @@ config DRM_SIMPLEDRM
config TINYDRM_HX8357D
tristate "DRM support for HX8357D display panels"
depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
@@ -113,6 +119,7 @@ config TINYDRM_ILI9163
tristate "DRM support for ILI9163 display panels"
depends on DRM && SPI
select BACKLIGHT_CLASS_DEVICE
+ select DRM_CLIENT_SELECTION
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DBI
@@ -125,6 +132,7 @@ config TINYDRM_ILI9163
config TINYDRM_ILI9225
tristate "DRM support for ILI9225 display panels"
depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
@@ -137,6 +145,7 @@ config TINYDRM_ILI9225
config TINYDRM_ILI9341
tristate "DRM support for ILI9341 display panels"
depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
@@ -150,6 +159,7 @@ config TINYDRM_ILI9341
config TINYDRM_ILI9486
tristate "DRM support for ILI9486 display panels"
depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
@@ -164,6 +174,7 @@ config TINYDRM_ILI9486
config TINYDRM_MI0283QT
tristate "DRM support for MI0283QT"
depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
@@ -175,6 +186,7 @@ config TINYDRM_MI0283QT
config TINYDRM_REPAPER
tristate "DRM support for Pervasive Displays RePaper panels (V231)"
depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
help
@@ -186,9 +198,31 @@ config TINYDRM_REPAPER
If M is selected the module will be called repaper.
+config TINYDRM_SHARP_MEMORY
+ tristate "DRM support for Sharp Memory LCD panels"
+ depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_DMA_HELPER
+ select DRM_KMS_HELPER
+ help
+ DRM Driver for the following Sharp Memory Panels:
+ * 1.00" Sharp Memory LCD (LS010B7DH04)
+ * 1.10" Sharp Memory LCD (LS011B7DH03)
+ * 1.20" Sharp Memory LCD (LS012B7DD01)
+ * 1.28" Sharp Memory LCD (LS013B7DH03)
+ * 1.26" Sharp Memory LCD (LS013B7DH05)
+ * 1.80" Sharp Memory LCD (LS018B7DH02)
+ * 2.70" Sharp Memory LCD (LS027B7DH01)
+ * 2.70" Sharp Memory LCD (LS027B7DH01A)
+ * 3.20" Sharp Memory LCD (LS032B7DD02)
+ * 4.40" Sharp Memory LCD (LS044Q7DH01)
+
+ If M is selected the module will be called sharp_memory.
+
config TINYDRM_ST7586
tristate "DRM support for Sitronix ST7586 display panels"
depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
@@ -201,6 +235,7 @@ config TINYDRM_ST7586
config TINYDRM_ST7735R
tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 76dde89a044b..60816d2eb4ff 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
obj-$(CONFIG_DRM_BOCHS) += bochs.o
-obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus-qemu.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
obj-$(CONFIG_DRM_OFDRM) += ofdrm.o
obj-$(CONFIG_DRM_PANEL_MIPI_DBI) += panel-mipi-dbi.o
@@ -14,5 +14,6 @@ obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o
obj-$(CONFIG_TINYDRM_ILI9486) += ili9486.o
obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o
+obj-$(CONFIG_TINYDRM_SHARP_MEMORY) += sharp-memory.o
obj-$(CONFIG_TINYDRM_ST7586) += st7586.o
obj-$(CONFIG_TINYDRM_ST7735R) += st7735r.o
diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c
index 4f8f3172379e..2748d1f21d86 100644
--- a/drivers/gpu/drm/tiny/arcpgu.c
+++ b/drivers/gpu/drm/tiny/arcpgu.c
@@ -6,6 +6,8 @@
*/
#include <linux/clk.h>
+
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
@@ -288,7 +290,7 @@ static int arcpgu_load(struct arcpgu_drm_private *arcpgu)
* There is only one output port inside each device. It is linked with
* encoder endpoint.
*/
- endpoint_node = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
+ endpoint_node = of_graph_get_endpoint_by_regs(pdev->dev.of_node, 0, -1);
if (endpoint_node) {
encoder_node = of_graph_get_remote_port_parent(endpoint_node);
of_node_put(endpoint_node);
@@ -365,12 +367,12 @@ static const struct drm_driver arcpgu_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "arcpgu",
.desc = "ARC PGU Controller",
- .date = "20160219",
.major = 1,
.minor = 0,
.patchlevel = 0,
.fops = &arcpgu_drm_ops,
DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = arcpgu_debugfs_init,
#endif
@@ -394,7 +396,7 @@ static int arcpgu_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
- drm_fbdev_dma_setup(&arcpgu->drm, 16);
+ drm_client_setup_with_fourcc(&arcpgu->drm, DRM_FORMAT_RGB565);
return 0;
@@ -421,7 +423,7 @@ MODULE_DEVICE_TABLE(of, arcpgu_of_table);
static struct platform_driver arcpgu_platform_driver = {
.probe = arcpgu_probe,
- .remove_new = arcpgu_remove,
+ .remove = arcpgu_remove,
.driver = {
.name = "arcpgu",
.of_match_table = arcpgu_of_table,
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 31fc5d839e10..c67e1f906785 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -1,21 +1,26 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/bug.h>
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fbdev_ttm.h>
+#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
+#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
#include <video/vga.h>
@@ -71,6 +76,8 @@ enum bochs_types {
};
struct bochs_device {
+ struct drm_device dev;
+
/* hw */
void __iomem *mmio;
int ioports;
@@ -85,22 +92,32 @@ struct bochs_device {
u16 yres_virtual;
u32 stride;
u32 bpp;
- const struct drm_edid *drm_edid;
/* drm */
- struct drm_device *dev;
- struct drm_simple_display_pipe pipe;
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
struct drm_connector connector;
};
+static struct bochs_device *to_bochs_device(const struct drm_device *dev)
+{
+ return container_of(dev, struct bochs_device, dev);
+}
+
/* ---------------------------------------------------------------------- */
+static __always_inline bool bochs_uses_mmio(struct bochs_device *bochs)
+{
+ return !IS_ENABLED(CONFIG_HAS_IOPORT) || bochs->mmio;
+}
+
static void bochs_vga_writeb(struct bochs_device *bochs, u16 ioport, u8 val)
{
if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df))
return;
- if (bochs->mmio) {
+ if (bochs_uses_mmio(bochs)) {
int offset = ioport - 0x3c0 + 0x400;
writeb(val, bochs->mmio + offset);
@@ -114,7 +131,7 @@ static u8 bochs_vga_readb(struct bochs_device *bochs, u16 ioport)
if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df))
return 0xff;
- if (bochs->mmio) {
+ if (bochs_uses_mmio(bochs)) {
int offset = ioport - 0x3c0 + 0x400;
return readb(bochs->mmio + offset);
@@ -127,7 +144,7 @@ static u16 bochs_dispi_read(struct bochs_device *bochs, u16 reg)
{
u16 ret = 0;
- if (bochs->mmio) {
+ if (bochs_uses_mmio(bochs)) {
int offset = 0x500 + (reg << 1);
ret = readw(bochs->mmio + offset);
@@ -140,7 +157,7 @@ static u16 bochs_dispi_read(struct bochs_device *bochs, u16 reg)
static void bochs_dispi_write(struct bochs_device *bochs, u16 reg, u16 val)
{
- if (bochs->mmio) {
+ if (bochs_uses_mmio(bochs)) {
int offset = 0x500 + (reg << 1);
writew(val, bochs->mmio + offset);
@@ -172,12 +189,14 @@ static void bochs_hw_set_little_endian(struct bochs_device *bochs)
#define bochs_hw_set_native_endian(_b) bochs_hw_set_little_endian(_b)
#endif
-static int bochs_get_edid_block(void *data, u8 *buf,
- unsigned int block, size_t len)
+static int bochs_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
{
struct bochs_device *bochs = data;
size_t i, start = block * EDID_LENGTH;
+ if (!bochs->mmio)
+ return -1;
+
if (start + len > 0x400 /* vga register offset */)
return -1;
@@ -187,55 +206,53 @@ static int bochs_get_edid_block(void *data, u8 *buf,
return 0;
}
-static int bochs_hw_load_edid(struct bochs_device *bochs)
+static const struct drm_edid *bochs_hw_read_edid(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
+ struct bochs_device *bochs = to_bochs_device(dev);
u8 header[8];
- if (!bochs->mmio)
- return -1;
-
/* check header to detect whenever edid support is enabled in qemu */
bochs_get_edid_block(bochs, header, 0, ARRAY_SIZE(header));
if (drm_edid_header_is_valid(header) != 8)
- return -1;
+ return NULL;
- drm_edid_free(bochs->drm_edid);
- bochs->drm_edid = drm_edid_read_custom(&bochs->connector,
- bochs_get_edid_block, bochs);
- if (!bochs->drm_edid)
- return -1;
+ drm_dbg(dev, "Found EDID data blob.\n");
- return 0;
+ return drm_edid_read_custom(connector, bochs_get_edid_block, bochs);
}
-static int bochs_hw_init(struct drm_device *dev)
+static int bochs_hw_init(struct bochs_device *bochs)
{
- struct bochs_device *bochs = dev->dev_private;
+ struct drm_device *dev = &bochs->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned long addr, size, mem, ioaddr, iosize;
u16 id;
if (pdev->resource[2].flags & IORESOURCE_MEM) {
+ ioaddr = pci_resource_start(pdev, 2);
+ iosize = pci_resource_len(pdev, 2);
/* mmio bar with vga and bochs registers present */
- if (pci_request_region(pdev, 2, "bochs-drm") != 0) {
+ if (!devm_request_mem_region(&pdev->dev, ioaddr, iosize, "bochs-drm")) {
DRM_ERROR("Cannot request mmio region\n");
return -EBUSY;
}
- ioaddr = pci_resource_start(pdev, 2);
- iosize = pci_resource_len(pdev, 2);
- bochs->mmio = ioremap(ioaddr, iosize);
+ bochs->mmio = devm_ioremap(&pdev->dev, ioaddr, iosize);
if (bochs->mmio == NULL) {
DRM_ERROR("Cannot map mmio region\n");
return -ENOMEM;
}
- } else {
+ } else if (IS_ENABLED(CONFIG_HAS_IOPORT)) {
ioaddr = VBE_DISPI_IOPORT_INDEX;
iosize = 2;
- if (!request_region(ioaddr, iosize, "bochs-drm")) {
+ if (!devm_request_region(&pdev->dev, ioaddr, iosize, "bochs-drm")) {
DRM_ERROR("Cannot request ioports\n");
return -EBUSY;
}
bochs->ioports = 1;
+ } else {
+ dev_err(dev->dev, "I/O ports are not supported\n");
+ return -EIO;
}
id = bochs_dispi_read(bochs, VBE_DISPI_INDEX_ID);
@@ -258,10 +275,10 @@ static int bochs_hw_init(struct drm_device *dev)
size = min(size, mem);
}
- if (pci_request_region(pdev, 0, "bochs-drm") != 0)
+ if (!devm_request_mem_region(&pdev->dev, addr, size, "bochs-drm"))
DRM_WARN("Cannot request framebuffer, boot fb still active?\n");
- bochs->fb_map = ioremap(addr, size);
+ bochs->fb_map = devm_ioremap_wc(&pdev->dev, addr, size);
if (bochs->fb_map == NULL) {
DRM_ERROR("Cannot map framebuffer\n");
return -ENOMEM;
@@ -290,22 +307,6 @@ noext:
return 0;
}
-static void bochs_hw_fini(struct drm_device *dev)
-{
- struct bochs_device *bochs = dev->dev_private;
-
- /* TODO: shot down existing vram mappings */
-
- if (bochs->mmio)
- iounmap(bochs->mmio);
- if (bochs->ioports)
- release_region(VBE_DISPI_IOPORT_INDEX, 2);
- if (bochs->fb_map)
- iounmap(bochs->fb_map);
- pci_release_regions(to_pci_dev(dev->dev));
- drm_edid_free(bochs->drm_edid);
-}
-
static void bochs_hw_blank(struct bochs_device *bochs, bool blank)
{
DRM_DEBUG_DRIVER("hw_blank %d\n", blank);
@@ -321,7 +322,7 @@ static void bochs_hw_setmode(struct bochs_device *bochs, struct drm_display_mode
{
int idx;
- if (!drm_dev_enter(bochs->dev, &idx))
+ if (!drm_dev_enter(&bochs->dev, &idx))
return;
bochs->xres = mode->hdisplay;
@@ -357,7 +358,7 @@ static void bochs_hw_setformat(struct bochs_device *bochs, const struct drm_form
{
int idx;
- if (!drm_dev_enter(bochs->dev, &idx))
+ if (!drm_dev_enter(&bochs->dev, &idx))
return;
DRM_DEBUG_DRIVER("format %c%c%c%c\n",
@@ -388,7 +389,7 @@ static void bochs_hw_setbase(struct bochs_device *bochs, int x, int y, int strid
unsigned long offset;
unsigned int vx, vy, vwidth, idx;
- if (!drm_dev_enter(bochs->dev, &idx))
+ if (!drm_dev_enter(&bochs->dev, &idx))
return;
bochs->stride = stride;
@@ -410,83 +411,156 @@ static void bochs_hw_setbase(struct bochs_device *bochs, int x, int y, int strid
/* ---------------------------------------------------------------------- */
-static const uint32_t bochs_formats[] = {
+static const uint32_t bochs_primary_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_BGRX8888,
};
-static void bochs_plane_update(struct bochs_device *bochs, struct drm_plane_state *state)
+static int bochs_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct drm_gem_vram_object *gbo;
- s64 gpu_addr;
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ int ret;
- if (!state->fb || !bochs->stride)
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ return 0;
+}
+
+static void bochs_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct bochs_device *bochs = to_bochs_device(dev);
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+
+ if (!fb || !bochs->stride)
return;
- gbo = drm_gem_vram_of_gem(state->fb->obj[0]);
- gpu_addr = drm_gem_vram_offset(gbo);
- if (WARN_ON_ONCE(gpu_addr < 0))
- return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(bochs->fb_map);
+
+ iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, &damage));
+ drm_fb_memcpy(&dst, fb->pitches, shadow_plane_state->data, fb, &damage);
+ }
+ /* Always scanout image at VRAM offset 0 */
bochs_hw_setbase(bochs,
- state->crtc_x,
- state->crtc_y,
- state->fb->pitches[0],
- state->fb->offsets[0] + gpu_addr);
- bochs_hw_setformat(bochs, state->fb->format);
+ plane_state->crtc_x,
+ plane_state->crtc_y,
+ fb->pitches[0],
+ 0);
+ bochs_hw_setformat(bochs, fb->format);
}
-static void bochs_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
+static const struct drm_plane_helper_funcs bochs_primary_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = bochs_primary_plane_helper_atomic_check,
+ .atomic_update = bochs_primary_plane_helper_atomic_update,
+};
+
+static const struct drm_plane_funcs bochs_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS
+};
+
+static void bochs_crtc_helper_mode_set_nofb(struct drm_crtc *crtc)
{
- struct bochs_device *bochs = pipe->crtc.dev->dev_private;
+ struct bochs_device *bochs = to_bochs_device(crtc->dev);
+ struct drm_crtc_state *crtc_state = crtc->state;
bochs_hw_setmode(bochs, &crtc_state->mode);
- bochs_plane_update(bochs, plane_state);
}
-static void bochs_pipe_disable(struct drm_simple_display_pipe *pipe)
+static int bochs_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
- struct bochs_device *bochs = pipe->crtc.dev->dev_private;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
- bochs_hw_blank(bochs, true);
+ if (!crtc_state->enable)
+ return 0;
+
+ return drm_atomic_helper_check_crtc_primary_plane(crtc_state);
+}
+
+static void bochs_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
}
-static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
+static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *crtc_state)
{
- struct bochs_device *bochs = pipe->crtc.dev->dev_private;
+ struct bochs_device *bochs = to_bochs_device(crtc->dev);
- bochs_plane_update(bochs, pipe->plane.state);
+ bochs_hw_blank(bochs, true);
}
-static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
- .enable = bochs_pipe_enable,
- .disable = bochs_pipe_disable,
- .update = bochs_pipe_update,
- .prepare_fb = drm_gem_vram_simple_display_pipe_prepare_fb,
- .cleanup_fb = drm_gem_vram_simple_display_pipe_cleanup_fb,
+static const struct drm_crtc_helper_funcs bochs_crtc_helper_funcs = {
+ .mode_set_nofb = bochs_crtc_helper_mode_set_nofb,
+ .atomic_check = bochs_crtc_helper_atomic_check,
+ .atomic_enable = bochs_crtc_helper_atomic_enable,
+ .atomic_disable = bochs_crtc_helper_atomic_disable,
};
-static int bochs_connector_get_modes(struct drm_connector *connector)
+static const struct drm_crtc_funcs bochs_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static const struct drm_encoder_funcs bochs_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int bochs_connector_helper_get_modes(struct drm_connector *connector)
{
+ const struct drm_edid *edid;
int count;
- count = drm_edid_connector_add_modes(connector);
+ edid = bochs_hw_read_edid(connector);
- if (!count) {
+ if (edid) {
+ drm_edid_connector_update(connector, edid);
+ count = drm_edid_connector_add_modes(connector);
+ drm_edid_free(edid);
+ } else {
+ drm_edid_connector_update(connector, NULL);
count = drm_add_modes_noedid(connector, 8192, 8192);
drm_set_preferred_mode(connector, defx, defy);
}
+
return count;
}
-static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
- .get_modes = bochs_connector_get_modes,
+static const struct drm_connector_helper_funcs bochs_connector_helper_funcs = {
+ .get_modes = bochs_connector_helper_get_modes,
};
-static const struct drm_connector_funcs bochs_connector_connector_funcs = {
+static const struct drm_connector_funcs bochs_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
@@ -494,68 +568,89 @@ static const struct drm_connector_funcs bochs_connector_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static void bochs_connector_init(struct drm_device *dev)
+static enum drm_mode_status bochs_mode_config_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode)
{
- struct bochs_device *bochs = dev->dev_private;
- struct drm_connector *connector = &bochs->connector;
-
- drm_connector_init(dev, connector, &bochs_connector_connector_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL);
- drm_connector_helper_add(connector, &bochs_connector_connector_helper_funcs);
-
- bochs_hw_load_edid(bochs);
- if (bochs->drm_edid) {
- DRM_INFO("Found EDID data blob.\n");
- drm_connector_attach_edid_property(connector);
- drm_edid_connector_update(&bochs->connector, bochs->drm_edid);
- }
-}
+ struct bochs_device *bochs = to_bochs_device(dev);
+ const struct drm_format_info *format = drm_format_info(DRM_FORMAT_XRGB8888);
+ u64 pitch;
-static struct drm_framebuffer *
-bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888 &&
- mode_cmd->pixel_format != DRM_FORMAT_BGRX8888)
- return ERR_PTR(-EINVAL);
+ if (drm_WARN_ON(dev, !format))
+ return MODE_ERROR;
- return drm_gem_fb_create(dev, file, mode_cmd);
+ pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay);
+ if (!pitch)
+ return MODE_BAD_WIDTH;
+ if (mode->vdisplay > DIV_ROUND_DOWN_ULL(bochs->fb_size, pitch))
+ return MODE_MEM;
+
+ return MODE_OK;
}
-static const struct drm_mode_config_funcs bochs_mode_funcs = {
- .fb_create = bochs_gem_fb_create,
- .mode_valid = drm_vram_helper_mode_valid,
+static const struct drm_mode_config_funcs bochs_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .mode_valid = bochs_mode_config_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static int bochs_kms_init(struct bochs_device *bochs)
{
+ struct drm_device *dev = &bochs->dev;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
int ret;
- ret = drmm_mode_config_init(bochs->dev);
+ ret = drmm_mode_config_init(dev);
if (ret)
return ret;
- bochs->dev->mode_config.max_width = 8192;
- bochs->dev->mode_config.max_height = 8192;
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
- bochs->dev->mode_config.preferred_depth = 24;
- bochs->dev->mode_config.prefer_shadow = 0;
- bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
- bochs->dev->mode_config.funcs = &bochs_mode_funcs;
+ dev->mode_config.funcs = &bochs_mode_config_funcs;
- bochs_connector_init(bochs->dev);
- drm_simple_display_pipe_init(bochs->dev,
- &bochs->pipe,
- &bochs_pipe_funcs,
- bochs_formats,
- ARRAY_SIZE(bochs_formats),
- NULL,
- &bochs->connector);
+ primary_plane = &bochs->primary_plane;
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &bochs_primary_plane_funcs,
+ bochs_primary_plane_formats,
+ ARRAY_SIZE(bochs_primary_plane_formats),
+ NULL,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+ drm_plane_helper_add(primary_plane, &bochs_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
- drm_mode_config_reset(bochs->dev);
+ crtc = &bochs->crtc;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &bochs_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+ drm_crtc_helper_add(crtc, &bochs_crtc_helper_funcs);
+
+ encoder = &bochs->encoder;
+ ret = drm_encoder_init(dev, encoder, &bochs_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret)
+ return ret;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ connector = &bochs->connector;
+ ret = drm_connector_init(dev, connector, &bochs_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ if (ret)
+ return ret;
+ drm_connector_helper_add(connector, &bochs_connector_helper_funcs);
+ drm_connector_attach_edid_property(connector);
+ drm_connector_attach_encoder(connector, encoder);
+
+ drm_mode_config_reset(dev);
return 0;
}
@@ -563,34 +658,19 @@ static int bochs_kms_init(struct bochs_device *bochs)
/* ---------------------------------------------------------------------- */
/* drm interface */
-static int bochs_load(struct drm_device *dev)
+static int bochs_load(struct bochs_device *bochs)
{
- struct bochs_device *bochs;
int ret;
- bochs = drmm_kzalloc(dev, sizeof(*bochs), GFP_KERNEL);
- if (bochs == NULL)
- return -ENOMEM;
- dev->dev_private = bochs;
- bochs->dev = dev;
-
- ret = bochs_hw_init(dev);
+ ret = bochs_hw_init(bochs);
if (ret)
return ret;
- ret = drmm_vram_helper_init(dev, bochs->fb_base, bochs->fb_size);
- if (ret)
- goto err_hw_fini;
-
ret = bochs_kms_init(bochs);
if (ret)
- goto err_hw_fini;
+ return ret;
return 0;
-
-err_hw_fini:
- bochs_hw_fini(dev);
- return ret;
}
DEFINE_DRM_GEM_FOPS(bochs_fops);
@@ -600,10 +680,10 @@ static const struct drm_driver bochs_driver = {
.fops = &bochs_fops,
.name = "bochs-drm",
.desc = "bochs dispi vga interface (qemu stdvga)",
- .date = "20130925",
.major = 1,
.minor = 0,
- DRM_GEM_VRAM_DRIVER,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
};
/* ---------------------------------------------------------------------- */
@@ -635,23 +715,18 @@ static const struct dev_pm_ops bochs_pm_ops = {
static int bochs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct bochs_device *bochs;
struct drm_device *dev;
- unsigned long fbsize;
int ret;
- fbsize = pci_resource_len(pdev, 0);
- if (fbsize < 4 * 1024 * 1024) {
- DRM_ERROR("less than 4 MB video memory, ignoring device\n");
- return -ENOMEM;
- }
-
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &bochs_driver);
+ ret = aperture_remove_conflicting_pci_devices(pdev, bochs_driver.name);
if (ret)
return ret;
- dev = drm_dev_alloc(&bochs_driver, &pdev->dev);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ bochs = devm_drm_dev_alloc(&pdev->dev, &bochs_driver, struct bochs_device, dev);
+ if (IS_ERR(bochs))
+ return PTR_ERR(bochs);
+ dev = &bochs->dev;
ret = pcim_enable_device(pdev);
if (ret)
@@ -659,19 +734,18 @@ static int bochs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
pci_set_drvdata(pdev, dev);
- ret = bochs_load(dev);
+ ret = bochs_load(bochs);
if (ret)
goto err_free_dev;
ret = drm_dev_register(dev, 0);
if (ret)
- goto err_hw_fini;
+ goto err_free_dev;
+
+ drm_client_setup(dev, NULL);
- drm_fbdev_ttm_setup(dev, 32);
return ret;
-err_hw_fini:
- bochs_hw_fini(dev);
err_free_dev:
drm_dev_put(dev);
return ret;
@@ -683,8 +757,6 @@ static void bochs_pci_remove(struct pci_dev *pdev)
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
- bochs_hw_fini(dev);
- drm_dev_put(dev);
}
static void bochs_pci_shutdown(struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus-qemu.c
index 751326e3d9c3..52ec1e4ea9e5 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus-qemu.c
@@ -16,6 +16,7 @@
* Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
*/
+#include <linux/aperture.h>
#include <linux/iosys-map.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -23,7 +24,7 @@
#include <video/cirrus.h>
#include <video/vga.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_state_helper.h>
@@ -45,9 +46,8 @@
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
-#define DRIVER_NAME "cirrus"
+#define DRIVER_NAME "cirrus-qemu"
#define DRIVER_DESC "qemu cirrus vga"
-#define DRIVER_DATE "2019"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 0
@@ -509,8 +509,10 @@ static void cirrus_crtc_helper_atomic_enable(struct drm_crtc *crtc,
cirrus_mode_set(cirrus, &crtc_state->mode);
+#ifdef CONFIG_HAS_IOPORT
/* Unblank (needed on S3 resume, vgabios doesn't do it then) */
outb(VGA_AR_ENABLE_DISPLAY, VGA_ATT_W);
+#endif
drm_dev_exit(idx);
}
@@ -586,14 +588,14 @@ static int cirrus_pipe_init(struct cirrus_device *cirrus)
encoder = &cirrus->encoder;
ret = drm_encoder_init(dev, encoder, &cirrus_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret)
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
connector = &cirrus->connector;
ret = drm_connector_init(dev, connector, &cirrus_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ DRM_MODE_CONNECTOR_VIRTUAL);
if (ret)
return ret;
drm_connector_helper_add(connector, &cirrus_connector_helper_funcs);
@@ -656,12 +658,12 @@ static const struct drm_driver cirrus_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.fops = &cirrus_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
};
static int cirrus_pci_probe(struct pci_dev *pdev,
@@ -671,7 +673,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
struct cirrus_device *cirrus;
int ret;
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &cirrus_driver);
+ ret = aperture_remove_conflicting_pci_devices(pdev, cirrus_driver.name);
if (ret)
return ret;
@@ -716,7 +718,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- drm_fbdev_shmem_setup(dev, 16);
+ drm_client_setup(dev, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 0bd7707c053e..41e9bfb2e2ff 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -7,6 +7,7 @@
#include <linux/pm.h>
#include <linux/usb.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
@@ -33,7 +34,6 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
#define DRIVER_NAME "gm12u320"
#define DRIVER_DESC "Grain Media GM12U320 USB projector display"
-#define DRIVER_DATE "2019"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -625,13 +625,13 @@ static const struct drm_driver gm12u320_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.fops = &gm12u320_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
.gem_prime_import = gm12u320_gem_prime_import,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
};
static const struct drm_mode_config_funcs gm12u320_mode_config_funcs = {
@@ -706,7 +706,7 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
if (ret)
goto err_put_device;
- drm_fbdev_shmem_setup(dev, 0);
+ drm_client_setup(dev, NULL);
return 0;
diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c
index 2e631282edeb..df263818f45f 100644
--- a/drivers/gpu/drm/tiny/hx8357d.c
+++ b/drivers/gpu/drm/tiny/hx8357d.c
@@ -16,6 +16,7 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -194,10 +195,10 @@ static const struct drm_driver hx8357d_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hx8357d_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "hx8357d",
.desc = "HX8357D",
- .date = "20181023",
.major = 1,
.minor = 0,
};
@@ -256,7 +257,7 @@ static int hx8357d_probe(struct spi_device *spi)
spi_set_drvdata(spi, drm);
- drm_fbdev_dma_setup(drm, 0);
+ drm_client_setup(drm, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c
index 86f9d8834901..62cadf5e033d 100644
--- a/drivers/gpu/drm/tiny/ili9163.c
+++ b/drivers/gpu/drm/tiny/ili9163.c
@@ -7,6 +7,7 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -113,10 +114,10 @@ static struct drm_driver ili9163_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9163_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9163",
.desc = "Ilitek ILI9163",
- .date = "20210208",
.major = 1,
.minor = 0,
};
@@ -185,7 +186,7 @@ static int ili9163_probe(struct spi_device *spi)
if (ret)
return ret;
- drm_fbdev_dma_setup(drm, 0);
+ drm_client_setup(drm, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index b6b7a49147bf..6de44ff69b51 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -16,6 +16,7 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
@@ -360,9 +361,9 @@ static const struct drm_driver ili9225_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9225_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.name = "ili9225",
.desc = "Ilitek ILI9225",
- .date = "20171106",
.major = 1,
.minor = 0,
};
@@ -426,7 +427,7 @@ static int ili9225_probe(struct spi_device *spi)
spi_set_drvdata(spi, drm);
- drm_fbdev_dma_setup(drm, 0);
+ drm_client_setup(drm, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c
index 8bcada30af71..e55029433509 100644
--- a/drivers/gpu/drm/tiny/ili9341.c
+++ b/drivers/gpu/drm/tiny/ili9341.c
@@ -15,6 +15,7 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -150,10 +151,10 @@ static const struct drm_driver ili9341_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9341_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
.desc = "Ilitek ILI9341",
- .date = "20180514",
.major = 1,
.minor = 0,
};
@@ -218,7 +219,7 @@ static int ili9341_probe(struct spi_device *spi)
spi_set_drvdata(spi, drm);
- drm_fbdev_dma_setup(drm, 0);
+ drm_client_setup(drm, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
index 70d366260041..093661c771a0 100644
--- a/drivers/gpu/drm/tiny/ili9486.c
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -14,6 +14,7 @@
#include <video/mipi_display.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -172,10 +173,10 @@ static const struct drm_driver ili9486_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9486_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9486",
.desc = "Ilitek ILI9486",
- .date = "20200118",
.major = 1,
.minor = 0,
};
@@ -247,7 +248,7 @@ static int ili9486_probe(struct spi_device *spi)
spi_set_drvdata(spi, drm);
- drm_fbdev_dma_setup(drm, 0);
+ drm_client_setup(drm, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c
index cdc5423990ca..b6b4664908ae 100644
--- a/drivers/gpu/drm/tiny/mi0283qt.c
+++ b/drivers/gpu/drm/tiny/mi0283qt.c
@@ -13,6 +13,7 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -154,10 +155,10 @@ static const struct drm_driver mi0283qt_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
.desc = "Multi-Inno MI0283QT",
- .date = "20160614",
.major = 1,
.minor = 0,
};
@@ -226,7 +227,7 @@ static int mi0283qt_probe(struct spi_device *spi)
spi_set_drvdata(spi, drm);
- drm_fbdev_dma_setup(drm, 0);
+ drm_client_setup(drm, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c
index 35996f7eedac..13491c0e704a 100644
--- a/drivers/gpu/drm/tiny/ofdrm.c
+++ b/drivers/gpu/drm/tiny/ofdrm.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/aperture.h>
#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
@@ -24,7 +25,6 @@
#define DRIVER_NAME "ofdrm"
#define DRIVER_DESC "DRM driver for OF platform devices"
-#define DRIVER_DATE "20220501"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -1219,7 +1219,7 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
fb_pgbase = round_down(fb_base, PAGE_SIZE);
fb_pgsize = fb_base - fb_pgbase + round_up(fb_size, PAGE_SIZE);
- ret = devm_aperture_acquire_from_firmware(dev, fb_pgbase, fb_pgsize);
+ ret = devm_aperture_acquire_for_platform_device(pdev, fb_pgbase, fb_pgsize);
if (ret) {
drm_err(dev, "could not acquire memory range %pr: error %d\n", &res, ret);
return ERR_PTR(ret);
@@ -1344,9 +1344,9 @@ DEFINE_DRM_GEM_FOPS(ofdrm_fops);
static struct drm_driver ofdrm_driver = {
DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
@@ -1361,7 +1361,6 @@ static int ofdrm_probe(struct platform_device *pdev)
{
struct ofdrm_device *odev;
struct drm_device *dev;
- unsigned int color_mode;
int ret;
odev = ofdrm_device_create(&ofdrm_driver, pdev);
@@ -1373,11 +1372,7 @@ static int ofdrm_probe(struct platform_device *pdev)
if (ret)
return ret;
- color_mode = drm_format_info_bpp(odev->format, 0);
- if (color_mode == 16)
- color_mode = odev->format->depth; // can be 15 or 16
-
- drm_fbdev_shmem_setup(dev, color_mode);
+ drm_client_setup(dev, odev->format);
return 0;
}
@@ -1401,7 +1396,7 @@ static struct platform_driver ofdrm_platform_driver = {
.of_match_table = ofdrm_of_match_display,
},
.probe = ofdrm_probe,
- .remove_new = ofdrm_remove,
+ .remove = ofdrm_remove,
};
module_platform_driver(ofdrm_platform_driver);
diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
index f753cdffe6f8..0460ecaef4bd 100644
--- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c
+++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
@@ -10,10 +10,12 @@
#include <linux/firmware.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -264,10 +266,10 @@ static const struct drm_driver panel_mipi_dbi_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &panel_mipi_dbi_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "panel-mipi-dbi",
.desc = "MIPI DBI compatible display panel",
- .date = "20220103",
.major = 1,
.minor = 0,
};
@@ -388,7 +390,7 @@ static int panel_mipi_dbi_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, drm);
- drm_fbdev_dma_setup(drm, 0);
+ drm_client_setup(drm, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 1f78aa3d26bb..52ba6c699bc8 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -21,6 +21,7 @@
#include <linux/spi/spi.h>
#include <linux/thermal.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
@@ -913,9 +914,9 @@ static const struct drm_driver repaper_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &repaper_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.name = "repaper",
.desc = "Pervasive Displays RePaper e-ink panels",
- .date = "20170405",
.major = 1,
.minor = 0,
};
@@ -1118,7 +1119,7 @@ static int repaper_probe(struct spi_device *spi)
DRM_DEBUG_DRIVER("SPI speed: %uMHz\n", spi->max_speed_hz / 1000000);
- drm_fbdev_dma_setup(drm, 0);
+ drm_client_setup(drm, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/sharp-memory.c b/drivers/gpu/drm/tiny/sharp-memory.c
new file mode 100644
index 000000000000..03d2850310c4
--- /dev/null
+++ b/drivers/gpu/drm/tiny/sharp-memory.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_rect.h>
+#include <linux/bitrev.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kthread.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pwm.h>
+#include <linux/spi/spi.h>
+
+#define SHARP_MODE_PERIOD 8
+#define SHARP_ADDR_PERIOD 8
+#define SHARP_DUMMY_PERIOD 8
+
+#define SHARP_MEMORY_DISPLAY_MAINTAIN_MODE 0
+#define SHARP_MEMORY_DISPLAY_UPDATE_MODE 1
+#define SHARP_MEMORY_DISPLAY_CLEAR_MODE 4
+
+enum sharp_memory_model {
+ LS010B7DH04,
+ LS011B7DH03,
+ LS012B7DD01,
+ LS013B7DH03,
+ LS013B7DH05,
+ LS018B7DH02,
+ LS027B7DH01,
+ LS027B7DH01A,
+ LS032B7DD02,
+ LS044Q7DH01,
+};
+
+enum sharp_memory_vcom_mode {
+ SHARP_MEMORY_SOFTWARE_VCOM,
+ SHARP_MEMORY_EXTERNAL_VCOM,
+ SHARP_MEMORY_PWM_VCOM
+};
+
+struct sharp_memory_device {
+ struct drm_device drm;
+ struct spi_device *spi;
+
+ const struct drm_display_mode *mode;
+
+ struct drm_crtc crtc;
+ struct drm_plane plane;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+
+ struct gpio_desc *enable_gpio;
+
+ struct task_struct *sw_vcom_signal;
+ struct pwm_device *pwm_vcom_signal;
+
+ enum sharp_memory_vcom_mode vcom_mode;
+ u8 vcom;
+
+ u32 pitch;
+ u32 tx_buffer_size;
+ u8 *tx_buffer;
+
+ /* When vcom_mode == "software" a kthread is used to periodically send a
+ * 'maintain display' message over spi. This mutex ensures tx_buffer access
+ * and spi bus usage is synchronized in this case.
+ */
+ struct mutex tx_mutex;
+};
+
+static inline int sharp_memory_spi_write(struct spi_device *spi, void *buf, size_t len)
+{
+ /* Reverse the bit order */
+ for (u8 *b = buf; b < ((u8 *)buf) + len; ++b)
+ *b = bitrev8(*b);
+
+ return spi_write(spi, buf, len);
+}
+
+static inline struct sharp_memory_device *drm_to_sharp_memory_device(struct drm_device *drm)
+{
+ return container_of(drm, struct sharp_memory_device, drm);
+}
+
+DEFINE_DRM_GEM_DMA_FOPS(sharp_memory_fops);
+
+static const struct drm_driver sharp_memory_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &sharp_memory_fops,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
+ DRM_FBDEV_DMA_DRIVER_OPS,
+ .name = "sharp_memory_display",
+ .desc = "Sharp Display Memory LCD",
+ .major = 1,
+ .minor = 0,
+};
+
+static inline void sharp_memory_set_tx_buffer_mode(u8 *buffer, u8 mode, u8 vcom)
+{
+ *buffer = mode | (vcom << 1);
+}
+
+static inline void sharp_memory_set_tx_buffer_addresses(u8 *buffer,
+ struct drm_rect clip,
+ u32 pitch)
+{
+ for (u32 line = 0; line < clip.y2; ++line)
+ buffer[line * pitch] = line + 1;
+}
+
+static void sharp_memory_set_tx_buffer_data(u8 *buffer,
+ struct drm_framebuffer *fb,
+ struct drm_rect clip,
+ u32 pitch,
+ struct drm_format_conv_state *fmtcnv_state)
+{
+ int ret;
+ struct iosys_map dst, vmap;
+ struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ return;
+
+ iosys_map_set_vaddr(&dst, buffer);
+ iosys_map_set_vaddr(&vmap, dma_obj->vaddr);
+
+ drm_fb_xrgb8888_to_mono(&dst, &pitch, &vmap, fb, &clip, fmtcnv_state);
+
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+}
+
+static int sharp_memory_update_display(struct sharp_memory_device *smd,
+ struct drm_framebuffer *fb,
+ struct drm_rect clip,
+ struct drm_format_conv_state *fmtcnv_state)
+{
+ int ret;
+ u32 pitch = smd->pitch;
+ u8 vcom = smd->vcom;
+ u8 *tx_buffer = smd->tx_buffer;
+ u32 tx_buffer_size = smd->tx_buffer_size;
+
+ mutex_lock(&smd->tx_mutex);
+
+ /* Populate the transmit buffer with frame data */
+ sharp_memory_set_tx_buffer_mode(&tx_buffer[0],
+ SHARP_MEMORY_DISPLAY_UPDATE_MODE, vcom);
+ sharp_memory_set_tx_buffer_addresses(&tx_buffer[1], clip, pitch);
+ sharp_memory_set_tx_buffer_data(&tx_buffer[2], fb, clip, pitch, fmtcnv_state);
+
+ ret = sharp_memory_spi_write(smd->spi, tx_buffer, tx_buffer_size);
+
+ mutex_unlock(&smd->tx_mutex);
+
+ return ret;
+}
+
+static int sharp_memory_maintain_display(struct sharp_memory_device *smd)
+{
+ int ret;
+ u8 vcom = smd->vcom;
+ u8 *tx_buffer = smd->tx_buffer;
+
+ mutex_lock(&smd->tx_mutex);
+
+ sharp_memory_set_tx_buffer_mode(&tx_buffer[0], SHARP_MEMORY_DISPLAY_MAINTAIN_MODE, vcom);
+ tx_buffer[1] = 0; /* Write dummy data */
+ ret = sharp_memory_spi_write(smd->spi, tx_buffer, 2);
+
+ mutex_unlock(&smd->tx_mutex);
+
+ return ret;
+}
+
+static int sharp_memory_clear_display(struct sharp_memory_device *smd)
+{
+ int ret;
+ u8 vcom = smd->vcom;
+ u8 *tx_buffer = smd->tx_buffer;
+
+ mutex_lock(&smd->tx_mutex);
+
+ sharp_memory_set_tx_buffer_mode(&tx_buffer[0], SHARP_MEMORY_DISPLAY_CLEAR_MODE, vcom);
+ tx_buffer[1] = 0; /* write dummy data */
+ ret = sharp_memory_spi_write(smd->spi, tx_buffer, 2);
+
+ mutex_unlock(&smd->tx_mutex);
+
+ return ret;
+}
+
+static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect,
+ struct drm_format_conv_state *fmtconv_state)
+{
+ struct drm_rect clip;
+ struct sharp_memory_device *smd = drm_to_sharp_memory_device(fb->dev);
+
+ /* Always update a full line regardless of what is dirty */
+ clip.x1 = 0;
+ clip.x2 = fb->width;
+ clip.y1 = rect->y1;
+ clip.y2 = rect->y2;
+
+ sharp_memory_update_display(smd, fb, clip, fmtconv_state);
+}
+
+static int sharp_memory_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct sharp_memory_device *smd;
+ struct drm_crtc_state *crtc_state;
+
+ smd = container_of(plane, struct sharp_memory_device, plane);
+ crtc_state = drm_atomic_get_new_crtc_state(state, &smd->crtc);
+
+ return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+}
+
+static void sharp_memory_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT;
+ struct sharp_memory_device *smd;
+ struct drm_rect rect;
+
+ smd = container_of(plane, struct sharp_memory_device, plane);
+ if (!smd->crtc.state->active)
+ return;
+
+ if (drm_atomic_helper_damage_merged(old_state, plane_state, &rect))
+ sharp_memory_fb_dirty(plane_state->fb, &rect, &fmtcnv_state);
+
+ drm_format_conv_state_release(&fmtcnv_state);
+}
+
+static const struct drm_plane_helper_funcs sharp_memory_plane_helper_funcs = {
+ .prepare_fb = drm_gem_plane_helper_prepare_fb,
+ .atomic_check = sharp_memory_plane_atomic_check,
+ .atomic_update = sharp_memory_plane_atomic_update,
+};
+
+static bool sharp_memory_format_mod_supported(struct drm_plane *plane,
+ u32 format,
+ u64 modifier)
+{
+ return modifier == DRM_FORMAT_MOD_LINEAR;
+}
+
+static const struct drm_plane_funcs sharp_memory_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .format_mod_supported = sharp_memory_format_mod_supported,
+};
+
+static enum drm_mode_status sharp_memory_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct sharp_memory_device *smd = drm_to_sharp_memory_device(crtc->dev);
+
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, smd->mode);
+}
+
+static int sharp_memory_crtc_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ int ret;
+
+ if (!crtc_state->enable)
+ goto out;
+
+ ret = drm_atomic_helper_check_crtc_primary_plane(crtc_state);
+ if (ret)
+ return ret;
+
+out:
+ return drm_atomic_add_affected_planes(state, crtc);
+}
+
+static int sharp_memory_sw_vcom_signal_thread(void *data)
+{
+ struct sharp_memory_device *smd = data;
+
+ while (!kthread_should_stop()) {
+ smd->vcom ^= 1; /* Toggle vcom */
+ sharp_memory_maintain_display(smd);
+ msleep(1000);
+ }
+
+ return 0;
+}
+
+static void sharp_memory_crtc_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct sharp_memory_device *smd = drm_to_sharp_memory_device(crtc->dev);
+
+ sharp_memory_clear_display(smd);
+
+ if (smd->enable_gpio)
+ gpiod_set_value(smd->enable_gpio, 1);
+}
+
+static void sharp_memory_crtc_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct sharp_memory_device *smd = drm_to_sharp_memory_device(crtc->dev);
+
+ sharp_memory_clear_display(smd);
+
+ if (smd->enable_gpio)
+ gpiod_set_value(smd->enable_gpio, 0);
+}
+
+static const struct drm_crtc_helper_funcs sharp_memory_crtc_helper_funcs = {
+ .mode_valid = sharp_memory_crtc_mode_valid,
+ .atomic_check = sharp_memory_crtc_check,
+ .atomic_enable = sharp_memory_crtc_enable,
+ .atomic_disable = sharp_memory_crtc_disable,
+};
+
+static const struct drm_crtc_funcs sharp_memory_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static const struct drm_encoder_funcs sharp_memory_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int sharp_memory_connector_get_modes(struct drm_connector *connector)
+{
+ struct sharp_memory_device *smd = drm_to_sharp_memory_device(connector->dev);
+
+ return drm_connector_helper_get_modes_fixed(connector, smd->mode);
+}
+
+static const struct drm_connector_helper_funcs sharp_memory_connector_hfuncs = {
+ .get_modes = sharp_memory_connector_get_modes,
+};
+
+static const struct drm_connector_funcs sharp_memory_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+
+};
+
+static const struct drm_mode_config_funcs sharp_memory_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static const struct drm_display_mode sharp_memory_ls010b7dh04_mode = {
+ DRM_SIMPLE_MODE(128, 128, 18, 18),
+};
+
+static const struct drm_display_mode sharp_memory_ls011b7dh03_mode = {
+ DRM_SIMPLE_MODE(160, 68, 25, 10),
+};
+
+static const struct drm_display_mode sharp_memory_ls012b7dd01_mode = {
+ DRM_SIMPLE_MODE(184, 38, 29, 6),
+};
+
+static const struct drm_display_mode sharp_memory_ls013b7dh03_mode = {
+ DRM_SIMPLE_MODE(128, 128, 23, 23),
+};
+
+static const struct drm_display_mode sharp_memory_ls013b7dh05_mode = {
+ DRM_SIMPLE_MODE(144, 168, 20, 24),
+};
+
+static const struct drm_display_mode sharp_memory_ls018b7dh02_mode = {
+ DRM_SIMPLE_MODE(230, 303, 27, 36),
+};
+
+static const struct drm_display_mode sharp_memory_ls027b7dh01_mode = {
+ DRM_SIMPLE_MODE(400, 240, 58, 35),
+};
+
+static const struct drm_display_mode sharp_memory_ls032b7dd02_mode = {
+ DRM_SIMPLE_MODE(336, 536, 42, 68),
+};
+
+static const struct drm_display_mode sharp_memory_ls044q7dh01_mode = {
+ DRM_SIMPLE_MODE(320, 240, 89, 67),
+};
+
+static const struct spi_device_id sharp_memory_ids[] = {
+ {"ls010b7dh04", (kernel_ulong_t)&sharp_memory_ls010b7dh04_mode},
+ {"ls011b7dh03", (kernel_ulong_t)&sharp_memory_ls011b7dh03_mode},
+ {"ls012b7dd01", (kernel_ulong_t)&sharp_memory_ls012b7dd01_mode},
+ {"ls013b7dh03", (kernel_ulong_t)&sharp_memory_ls013b7dh03_mode},
+ {"ls013b7dh05", (kernel_ulong_t)&sharp_memory_ls013b7dh05_mode},
+ {"ls018b7dh02", (kernel_ulong_t)&sharp_memory_ls018b7dh02_mode},
+ {"ls027b7dh01", (kernel_ulong_t)&sharp_memory_ls027b7dh01_mode},
+ {"ls027b7dh01a", (kernel_ulong_t)&sharp_memory_ls027b7dh01_mode},
+ {"ls032b7dd02", (kernel_ulong_t)&sharp_memory_ls032b7dd02_mode},
+ {"ls044q7dh01", (kernel_ulong_t)&sharp_memory_ls044q7dh01_mode},
+ {},
+};
+MODULE_DEVICE_TABLE(spi, sharp_memory_ids);
+
+static const struct of_device_id sharp_memory_of_match[] = {
+ {.compatible = "sharp,ls010b7dh04", &sharp_memory_ls010b7dh04_mode},
+ {.compatible = "sharp,ls011b7dh03", &sharp_memory_ls011b7dh03_mode},
+ {.compatible = "sharp,ls012b7dd01", &sharp_memory_ls012b7dd01_mode},
+ {.compatible = "sharp,ls013b7dh03", &sharp_memory_ls013b7dh03_mode},
+ {.compatible = "sharp,ls013b7dh05", &sharp_memory_ls013b7dh05_mode},
+ {.compatible = "sharp,ls018b7dh02", &sharp_memory_ls018b7dh02_mode},
+ {.compatible = "sharp,ls027b7dh01", &sharp_memory_ls027b7dh01_mode},
+ {.compatible = "sharp,ls027b7dh01a", &sharp_memory_ls027b7dh01_mode},
+ {.compatible = "sharp,ls032b7dd02", &sharp_memory_ls032b7dd02_mode},
+ {.compatible = "sharp,ls044q7dh01", &sharp_memory_ls044q7dh01_mode},
+ {},
+};
+MODULE_DEVICE_TABLE(of, sharp_memory_of_match);
+
+static const u32 sharp_memory_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static int sharp_memory_pipe_init(struct drm_device *dev,
+ struct sharp_memory_device *smd,
+ const u32 *formats, unsigned int format_count,
+ const u64 *format_modifiers)
+{
+ int ret;
+ struct drm_encoder *encoder = &smd->encoder;
+ struct drm_plane *plane = &smd->plane;
+ struct drm_crtc *crtc = &smd->crtc;
+ struct drm_connector *connector = &smd->connector;
+
+ drm_plane_helper_add(plane, &sharp_memory_plane_helper_funcs);
+ ret = drm_universal_plane_init(dev, plane, 0,
+ &sharp_memory_plane_funcs,
+ formats, format_count,
+ format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(crtc, &sharp_memory_crtc_helper_funcs);
+ ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ &sharp_memory_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder, &sharp_memory_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
+
+ ret = drm_connector_init(&smd->drm, &smd->connector,
+ &sharp_memory_connector_funcs,
+ DRM_MODE_CONNECTOR_SPI);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(&smd->connector,
+ &sharp_memory_connector_hfuncs);
+
+ return drm_connector_attach_encoder(connector, encoder);
+}
+
+static int sharp_memory_init_pwm_vcom_signal(struct sharp_memory_device *smd)
+{
+ int ret;
+ struct device *dev = &smd->spi->dev;
+ struct pwm_state pwm_state;
+
+ smd->pwm_vcom_signal = devm_pwm_get(dev, NULL);
+ if (IS_ERR(smd->pwm_vcom_signal))
+ return dev_err_probe(dev, PTR_ERR(smd->pwm_vcom_signal),
+ "Could not get pwm device\n");
+
+ pwm_init_state(smd->pwm_vcom_signal, &pwm_state);
+ pwm_set_relative_duty_cycle(&pwm_state, 1, 10);
+ pwm_state.enabled = true;
+ ret = pwm_apply_might_sleep(smd->pwm_vcom_signal, &pwm_state);
+ if (ret)
+ return dev_err_probe(dev, -EINVAL, "Could not apply pwm state\n");
+
+ return 0;
+}
+
+static int sharp_memory_probe(struct spi_device *spi)
+{
+ int ret;
+ struct device *dev;
+ struct sharp_memory_device *smd;
+ struct drm_device *drm;
+ const char *vcom_mode_str;
+
+ dev = &spi->dev;
+
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to setup spi device\n");
+
+ if (!dev->coherent_dma_mask) {
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set dma mask\n");
+ }
+
+ smd = devm_drm_dev_alloc(dev, &sharp_memory_drm_driver,
+ struct sharp_memory_device, drm);
+ if (!smd)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, smd);
+
+ smd->spi = spi;
+ drm = &smd->drm;
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to initialize drm config\n");
+
+ smd->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_HIGH);
+ if (!smd->enable_gpio)
+ dev_warn(dev, "Enable gpio not defined\n");
+
+ drm->mode_config.funcs = &sharp_memory_mode_config_funcs;
+ smd->mode = spi_get_device_match_data(spi);
+
+ smd->pitch = (SHARP_ADDR_PERIOD + smd->mode->hdisplay + SHARP_DUMMY_PERIOD) / 8;
+ smd->tx_buffer_size = (SHARP_MODE_PERIOD +
+ (SHARP_ADDR_PERIOD + (smd->mode->hdisplay) + SHARP_DUMMY_PERIOD) *
+ smd->mode->vdisplay) / 8;
+
+ smd->tx_buffer = devm_kzalloc(dev, smd->tx_buffer_size, GFP_KERNEL);
+ if (!smd->tx_buffer)
+ return -ENOMEM;
+
+ mutex_init(&smd->tx_mutex);
+
+ /*
+ * VCOM is a signal that prevents DC bias from being built up in
+ * the panel resulting in pixels being forever stuck in one state.
+ *
+ * This driver supports three different methods to generate this
+ * signal depending on EXTMODE pin:
+ *
+ * software (EXTMODE = L) - This mode uses a kthread to
+ * periodically send a "maintain display" message to the display,
+ * toggling the vcom bit on and off with each message
+ *
+ * external (EXTMODE = H) - This mode relies on an external
+ * clock to generate the signal on the EXTCOMM pin
+ *
+ * pwm (EXTMODE = H) - This mode uses a pwm device to generate
+ * the signal on the EXTCOMM pin
+ *
+ */
+ if (device_property_read_string(dev, "sharp,vcom-mode", &vcom_mode_str))
+ return dev_err_probe(dev, -EINVAL,
+ "Unable to find sharp,vcom-mode node in device tree\n");
+
+ if (!strcmp("software", vcom_mode_str)) {
+ smd->vcom_mode = SHARP_MEMORY_SOFTWARE_VCOM;
+ smd->sw_vcom_signal = kthread_run(sharp_memory_sw_vcom_signal_thread,
+ smd, "sw_vcom_signal");
+
+ } else if (!strcmp("external", vcom_mode_str)) {
+ smd->vcom_mode = SHARP_MEMORY_EXTERNAL_VCOM;
+
+ } else if (!strcmp("pwm", vcom_mode_str)) {
+ smd->vcom_mode = SHARP_MEMORY_PWM_VCOM;
+ ret = sharp_memory_init_pwm_vcom_signal(smd);
+ if (ret)
+ return ret;
+ } else {
+ return dev_err_probe(dev, -EINVAL, "Invalid value set for vcom-mode\n");
+ }
+
+ drm->mode_config.min_width = smd->mode->hdisplay;
+ drm->mode_config.max_width = smd->mode->hdisplay;
+ drm->mode_config.min_height = smd->mode->vdisplay;
+ drm->mode_config.max_height = smd->mode->vdisplay;
+
+ ret = sharp_memory_pipe_init(drm, smd, sharp_memory_formats,
+ ARRAY_SIZE(sharp_memory_formats),
+ NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to initialize display pipeline.\n");
+
+ drm_plane_enable_fb_damage_clips(&smd->plane);
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register drm device.\n");
+
+ drm_client_setup(drm, NULL);
+
+ return 0;
+}
+
+static void sharp_memory_remove(struct spi_device *spi)
+{
+ struct sharp_memory_device *smd = spi_get_drvdata(spi);
+
+ drm_dev_unplug(&smd->drm);
+ drm_atomic_helper_shutdown(&smd->drm);
+
+ switch (smd->vcom_mode) {
+ case SHARP_MEMORY_SOFTWARE_VCOM:
+ kthread_stop(smd->sw_vcom_signal);
+ break;
+
+ case SHARP_MEMORY_EXTERNAL_VCOM:
+ break;
+
+ case SHARP_MEMORY_PWM_VCOM:
+ pwm_disable(smd->pwm_vcom_signal);
+ break;
+ }
+}
+
+static struct spi_driver sharp_memory_spi_driver = {
+ .driver = {
+ .name = "sharp_memory",
+ .of_match_table = sharp_memory_of_match,
+ },
+ .probe = sharp_memory_probe,
+ .remove = sharp_memory_remove,
+ .id_table = sharp_memory_ids,
+};
+module_spi_driver(sharp_memory_spi_driver);
+
+MODULE_AUTHOR("Alex Lanzano <lanzano.alex@gmail.com>");
+MODULE_DESCRIPTION("SPI Protocol driver for the sharp_memory display");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index d19e10289428..5d9ab8adf800 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/aperture.h>
#include <linux/clk.h>
#include <linux/of_clk.h>
#include <linux/minmax.h>
@@ -9,7 +10,7 @@
#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
@@ -30,7 +31,6 @@
#define DRIVER_NAME "simpledrm"
#define DRIVER_DESC "DRM driver for simple-framebuffer platform devices"
-#define DRIVER_DATE "20200625"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -882,7 +882,8 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
if (mem) {
void *screen_base;
- ret = devm_aperture_acquire_from_firmware(dev, mem->start, resource_size(mem));
+ ret = devm_aperture_acquire_for_platform_device(pdev, mem->start,
+ resource_size(mem));
if (ret) {
drm_err(dev, "could not acquire memory range %pr: %d\n", mem, ret);
return ERR_PTR(ret);
@@ -902,7 +903,8 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
if (!res)
return ERR_PTR(-EINVAL);
- ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res));
+ ret = devm_aperture_acquire_for_platform_device(pdev, res->start,
+ resource_size(res));
if (ret) {
drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret);
return ERR_PTR(ret);
@@ -1009,9 +1011,9 @@ DEFINE_DRM_GEM_FOPS(simpledrm_fops);
static struct drm_driver simpledrm_driver = {
DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
@@ -1026,7 +1028,6 @@ static int simpledrm_probe(struct platform_device *pdev)
{
struct simpledrm_device *sdev;
struct drm_device *dev;
- unsigned int color_mode;
int ret;
sdev = simpledrm_device_create(&simpledrm_driver, pdev);
@@ -1038,11 +1039,7 @@ static int simpledrm_probe(struct platform_device *pdev)
if (ret)
return ret;
- color_mode = drm_format_info_bpp(sdev->format, 0);
- if (color_mode == 16)
- color_mode = sdev->format->depth; // can be 15 or 16
-
- drm_fbdev_shmem_setup(dev, color_mode);
+ drm_client_setup(dev, sdev->format);
return 0;
}
@@ -1067,7 +1064,7 @@ static struct platform_driver simpledrm_platform_driver = {
.of_match_table = simpledrm_of_match_table,
},
.probe = simpledrm_probe,
- .remove_new = simpledrm_remove,
+ .remove = simpledrm_remove,
};
module_platform_driver(simpledrm_platform_driver);
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index b9c6ed352182..a29672d84ede 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -12,6 +12,7 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
@@ -290,10 +291,10 @@ static const struct drm_driver st7586_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7586_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
.desc = "Sitronix ST7586",
- .date = "20170801",
.major = 1,
.minor = 0,
};
@@ -371,7 +372,7 @@ static int st7586_probe(struct spi_device *spi)
spi_set_drvdata(spi, drm);
- drm_fbdev_dma_setup(drm, 0);
+ drm_client_setup(drm, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index 1676da00883d..1d60f6e5b3bc 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -16,6 +16,7 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -155,10 +156,10 @@ static const struct drm_driver st7735r_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7735r_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
+ DRM_FBDEV_DMA_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
.desc = "Sitronix ST7735R",
- .date = "20171128",
.major = 1,
.minor = 0,
};
@@ -241,7 +242,7 @@ static int st7735r_probe(struct spi_device *spi)
spi_set_drvdata(spi, drm);
- drm_fbdev_dma_setup(drm, 0);
+ drm_client_setup(drm, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
index f0a7eb62116c..f8f20d2f6174 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -258,13 +258,13 @@ static void ttm_bo_unreserve_basic(struct kunit *test)
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
bo->priority = bo_prio;
- err = ttm_resource_alloc(bo, place, &res1);
+ err = ttm_resource_alloc(bo, place, &res1, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res1;
/* Add a dummy resource to populate LRU */
- ttm_resource_alloc(bo, place, &res2);
+ ttm_resource_alloc(bo, place, &res2, NULL);
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_unreserve(bo);
@@ -300,19 +300,19 @@ static void ttm_bo_unreserve_pinned(struct kunit *test)
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_pin(bo);
- err = ttm_resource_alloc(bo, place, &res1);
+ err = ttm_resource_alloc(bo, place, &res1, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res1;
/* Add a dummy resource to the pinned list */
- err = ttm_resource_alloc(bo, place, &res2);
+ err = ttm_resource_alloc(bo, place, &res2, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res2->lru.link, &priv->ttm_dev->pinned), 1);
+ list_is_last(&res2->lru.link, &priv->ttm_dev->unevictable), 1);
ttm_bo_unreserve(bo);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res1->lru.link, &priv->ttm_dev->pinned), 1);
+ list_is_last(&res1->lru.link, &priv->ttm_dev->unevictable), 1);
ttm_resource_free(bo, &res1);
ttm_resource_free(bo, &res2);
@@ -355,7 +355,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
ttm_bo_set_bulk_move(bo1, &lru_bulk_move);
dma_resv_unlock(bo1->base.resv);
- err = ttm_resource_alloc(bo1, place, &res1);
+ err = ttm_resource_alloc(bo1, place, &res1, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo1->resource = res1;
@@ -363,7 +363,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
ttm_bo_set_bulk_move(bo2, &lru_bulk_move);
dma_resv_unlock(bo2->base.resv);
- err = ttm_resource_alloc(bo2, place, &res2);
+ err = ttm_resource_alloc(bo2, place, &res2, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo2->resource = res2;
@@ -401,7 +401,7 @@ static void ttm_bo_put_basic(struct kunit *test)
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
bo->type = ttm_bo_type_device;
- err = ttm_resource_alloc(bo, place, &res);
+ err = ttm_resource_alloc(bo, place, &res, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res;
@@ -518,7 +518,7 @@ static void ttm_bo_pin_unpin_resource(struct kunit *test)
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
- err = ttm_resource_alloc(bo, place, &res);
+ err = ttm_resource_alloc(bo, place, &res, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res;
@@ -569,7 +569,7 @@ static void ttm_bo_multiple_pin_one_unpin(struct kunit *test)
bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
- err = ttm_resource_alloc(bo, place, &res);
+ err = ttm_resource_alloc(bo, place, &res, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
bo->resource = res;
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
index 1adf18481ea0..3148f5d3dbd6 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
@@ -542,7 +542,7 @@ static void ttm_bo_validate_no_placement_signaled(struct kunit *test)
bo->ttm = old_tt;
}
- err = ttm_resource_alloc(bo, place, &bo->resource);
+ err = ttm_resource_alloc(bo, place, &bo->resource, NULL);
KUNIT_EXPECT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test, man->usage, size);
@@ -603,7 +603,7 @@ static void ttm_bo_validate_no_placement_not_signaled(struct kunit *test)
bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
bo->type = params->bo_type;
- err = ttm_resource_alloc(bo, place, &bo->resource);
+ err = ttm_resource_alloc(bo, place, &bo->resource, NULL);
KUNIT_EXPECT_EQ(test, err, 0);
placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
diff --git a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
index 22260e7aea58..e6ea2bd01f07 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
@@ -164,18 +164,18 @@ static void ttm_resource_init_pinned(struct kunit *test)
res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, res);
- KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->pinned));
+ KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->unevictable));
dma_resv_lock(bo->base.resv, NULL);
ttm_bo_pin(bo);
ttm_resource_init(bo, place, res);
- KUNIT_ASSERT_TRUE(test, list_is_singular(&bo->bdev->pinned));
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&bo->bdev->unevictable));
ttm_bo_unpin(bo);
ttm_resource_fini(man, res);
dma_resv_unlock(bo->base.resv);
- KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->pinned));
+ KUNIT_ASSERT_TRUE(test, list_empty(&bo->bdev->unevictable));
}
static void ttm_resource_fini_basic(struct kunit *test)
@@ -302,7 +302,7 @@ static void ttm_sys_man_free_basic(struct kunit *test)
res = kunit_kzalloc(test, sizeof(*res), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, res);
- ttm_resource_alloc(bo, place, &res);
+ ttm_resource_alloc(bo, place, &res, NULL);
man = ttm_manager_type(priv->devs->ttm_dev, mem_type);
man->func->free(man, res);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 320592435252..ea5e49858857 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -42,6 +42,7 @@
#include <linux/file.h>
#include <linux/module.h>
#include <linux/atomic.h>
+#include <linux/cgroup_dmem.h>
#include <linux/dma-resv.h>
#include "ttm_module.h"
@@ -139,7 +140,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
goto out_err;
if (mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+ ret = ttm_bo_populate(bo, ctx);
if (ret)
goto out_err;
}
@@ -499,6 +500,13 @@ struct ttm_bo_evict_walk {
struct ttm_resource **res;
/** @evicted: Number of successful evictions. */
unsigned long evicted;
+
+ /** @limit_pool: Which pool limit we should test against */
+ struct dmem_cgroup_pool_state *limit_pool;
+ /** @try_low: Whether we should attempt to evict BO's with low watermark threshold */
+ bool try_low;
+ /** @hit_low: If we cannot evict a bo when @try_low is false (first pass) */
+ bool hit_low;
};
static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
@@ -507,6 +515,10 @@ static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *
container_of(walk, typeof(*evict_walk), walk);
s64 lret;
+ if (!dmem_cgroup_state_evict_valuable(evict_walk->limit_pool, bo->resource->css,
+ evict_walk->try_low, &evict_walk->hit_low))
+ return 0;
+
if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, evict_walk->place))
return 0;
@@ -524,7 +536,7 @@ static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *
evict_walk->evicted++;
if (evict_walk->res)
lret = ttm_resource_alloc(evict_walk->evictor, evict_walk->place,
- evict_walk->res);
+ evict_walk->res, NULL);
if (lret == 0)
return 1;
out:
@@ -545,7 +557,8 @@ static int ttm_bo_evict_alloc(struct ttm_device *bdev,
struct ttm_buffer_object *evictor,
struct ttm_operation_ctx *ctx,
struct ww_acquire_ctx *ticket,
- struct ttm_resource **res)
+ struct ttm_resource **res,
+ struct dmem_cgroup_pool_state *limit_pool)
{
struct ttm_bo_evict_walk evict_walk = {
.walk = {
@@ -556,22 +569,39 @@ static int ttm_bo_evict_alloc(struct ttm_device *bdev,
.place = place,
.evictor = evictor,
.res = res,
+ .limit_pool = limit_pool,
};
s64 lret;
evict_walk.walk.trylock_only = true;
lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+
+ /* One more attempt if we hit low limit? */
+ if (!lret && evict_walk.hit_low) {
+ evict_walk.try_low = true;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+ }
if (lret || !ticket)
goto out;
+ /* Reset low limit */
+ evict_walk.try_low = evict_walk.hit_low = false;
/* If ticket-locking, repeat while making progress. */
evict_walk.walk.trylock_only = false;
+
+retry:
do {
/* The walk may clear the evict_walk.walk.ticket field */
evict_walk.walk.ticket = ticket;
evict_walk.evicted = 0;
lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
} while (!lret && evict_walk.evicted);
+
+ /* We hit the low limit? Try once more */
+ if (!lret && evict_walk.hit_low && !evict_walk.try_low) {
+ evict_walk.try_low = true;
+ goto retry;
+ }
out:
if (lret < 0)
return lret;
@@ -594,7 +624,8 @@ void ttm_bo_pin(struct ttm_buffer_object *bo)
spin_lock(&bo->bdev->lru_lock);
if (bo->resource)
ttm_resource_del_bulk_move(bo->resource, bo);
- ++bo->pin_count;
+ if (!bo->pin_count++ && bo->resource)
+ ttm_resource_move_to_lru_tail(bo->resource);
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_pin);
@@ -613,9 +644,10 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo)
return;
spin_lock(&bo->bdev->lru_lock);
- --bo->pin_count;
- if (bo->resource)
+ if (!--bo->pin_count && bo->resource) {
ttm_resource_add_bulk_move(bo->resource, bo);
+ ttm_resource_move_to_lru_tail(bo->resource);
+ }
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_unpin);
@@ -687,6 +719,7 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i];
+ struct dmem_cgroup_pool_state *limit_pool = NULL;
struct ttm_resource_manager *man;
bool may_evict;
@@ -699,15 +732,20 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
continue;
may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM);
- ret = ttm_resource_alloc(bo, place, res);
+ ret = ttm_resource_alloc(bo, place, res, force_space ? &limit_pool : NULL);
if (ret) {
- if (ret != -ENOSPC)
+ if (ret != -ENOSPC && ret != -EAGAIN) {
+ dmem_cgroup_pool_state_put(limit_pool);
return ret;
- if (!may_evict)
+ }
+ if (!may_evict) {
+ dmem_cgroup_pool_state_put(limit_pool);
continue;
+ }
ret = ttm_bo_evict_alloc(bdev, man, place, bo, ctx,
- ticket, res);
+ ticket, res, limit_pool);
+ dmem_cgroup_pool_state_put(limit_pool);
if (ret == -EBUSY)
continue;
if (ret)
@@ -1054,6 +1092,8 @@ struct ttm_bo_swapout_walk {
struct ttm_lru_walk walk;
/** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
gfp_t gfp_flags;
+
+ bool hit_low, evict_low;
};
static s64
@@ -1104,7 +1144,7 @@ ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
memset(&hop, 0, sizeof(hop));
place.mem_type = TTM_PL_SYSTEM;
- ret = ttm_resource_alloc(bo, &place, &evict_mem);
+ ret = ttm_resource_alloc(bo, &place, &evict_mem, NULL);
if (ret)
goto out;
@@ -1128,9 +1168,20 @@ ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
if (bo->bdev->funcs->swap_notify)
bo->bdev->funcs->swap_notify(bo);
- if (ttm_tt_is_populated(bo->ttm))
+ if (ttm_tt_is_populated(bo->ttm)) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_del_bulk_move(bo->resource, bo);
+ spin_unlock(&bo->bdev->lru_lock);
+
ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags);
+ spin_lock(&bo->bdev->lru_lock);
+ if (ret)
+ ttm_resource_add_bulk_move(bo->resource, bo);
+ ttm_resource_move_to_lru_tail(bo->resource);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
out:
/* Consider -ENOMEM and -ENOSPC non-fatal. */
if (ret == -ENOMEM || ret == -ENOSPC)
@@ -1180,3 +1231,47 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
ttm_tt_destroy(bo->bdev, bo->ttm);
bo->ttm = NULL;
}
+
+/**
+ * ttm_bo_populate() - Ensure that a buffer object has backing pages
+ * @bo: The buffer object
+ * @ctx: The ttm_operation_ctx governing the operation.
+ *
+ * For buffer objects in a memory type whose manager uses
+ * struct ttm_tt for backing pages, ensure those backing pages
+ * are present and with valid content. The bo's resource is also
+ * placed on the correct LRU list if it was previously swapped
+ * out.
+ *
+ * Return: 0 if successful, negative error code on failure.
+ * Note: May return -EINTR or -ERESTARTSYS if @ctx::interruptible
+ * is set to true.
+ */
+int ttm_bo_populate(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx)
+{
+ struct ttm_tt *tt = bo->ttm;
+ bool swapped;
+ int ret;
+
+ dma_resv_assert_held(bo->base.resv);
+
+ if (!tt)
+ return 0;
+
+ swapped = ttm_tt_is_swapped(tt);
+ ret = ttm_tt_populate(bo->bdev, tt, ctx);
+ if (ret)
+ return ret;
+
+ if (swapped && !ttm_tt_is_swapped(tt) && !bo->pin_count &&
+ bo->resource) {
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_resource_add_bulk_move(bo->resource, bo);
+ ttm_resource_move_to_lru_tail(bo->resource);
+ spin_unlock(&bo->bdev->lru_lock);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_populate);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 3c07f4712d5c..d939925efa81 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -163,7 +163,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
src_man = ttm_manager_type(bdev, src_mem->mem_type);
if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
dst_man->use_tt)) {
- ret = ttm_tt_populate(bdev, ttm, ctx);
+ ret = ttm_bo_populate(bo, ctx);
if (ret)
return ret;
}
@@ -350,7 +350,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
BUG_ON(!ttm);
- ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
+ ret = ttm_bo_populate(bo, &ctx);
if (ret)
return ret;
@@ -507,7 +507,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
pgprot_t prot;
void *vaddr;
- ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
+ ret = ttm_bo_populate(bo, &ctx);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 4212b8c91dd4..a194db83421d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -58,13 +58,13 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
return VM_FAULT_RETRY;
- ttm_bo_get(bo);
+ drm_gem_object_get(&bo->base);
mmap_read_unlock(vmf->vma->vm_mm);
(void)dma_resv_wait_timeout(bo->base.resv,
DMA_RESV_USAGE_KERNEL, true,
MAX_SCHEDULE_TIMEOUT);
dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
+ drm_gem_object_put(&bo->base);
return VM_FAULT_RETRY;
}
@@ -130,12 +130,12 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
*/
if (fault_flag_allow_retry_first(vmf->flags)) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
- ttm_bo_get(bo);
+ drm_gem_object_get(&bo->base);
mmap_read_unlock(vmf->vma->vm_mm);
if (!dma_resv_lock_interruptible(bo->base.resv,
NULL))
dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
+ drm_gem_object_put(&bo->base);
}
return VM_FAULT_RETRY;
@@ -224,7 +224,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
};
ttm = bo->ttm;
- err = ttm_tt_populate(bdev, bo->ttm, &ctx);
+ err = ttm_bo_populate(bo, &ctx);
if (err) {
if (err == -EINTR || err == -ERESTARTSYS ||
err == -EAGAIN)
@@ -353,7 +353,7 @@ void ttm_bo_vm_open(struct vm_area_struct *vma)
WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
- ttm_bo_get(bo);
+ drm_gem_object_get(&bo->base);
}
EXPORT_SYMBOL(ttm_bo_vm_open);
@@ -361,7 +361,7 @@ void ttm_bo_vm_close(struct vm_area_struct *vma)
{
struct ttm_buffer_object *bo = vma->vm_private_data;
- ttm_bo_put(bo);
+ drm_gem_object_put(&bo->base);
vma->vm_private_data = NULL;
}
EXPORT_SYMBOL(ttm_bo_vm_close);
@@ -405,13 +405,25 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
return len;
}
-int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
- void *buf, int len, int write)
+/**
+ * ttm_bo_access - Helper to access a buffer object
+ *
+ * @bo: ttm buffer object
+ * @offset: access offset into buffer object
+ * @buf: pointer to caller memory to read into or write from
+ * @len: length of access
+ * @write: write access
+ *
+ * Utility function to access a buffer object. Useful when buffer object cannot
+ * be easily mapped (non-contiguous, non-visible, etc...). Should not directly
+ * be exported to user space via a peak / poke interface.
+ *
+ * Returns:
+ * @len if successful, negative error code on failure.
+ */
+int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset,
+ void *buf, int len, int write)
{
- struct ttm_buffer_object *bo = vma->vm_private_data;
- unsigned long offset = (addr) - vma->vm_start +
- ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
- << PAGE_SHIFT);
int ret;
if (len < 1 || (offset + len) > bo->base.size)
@@ -429,8 +441,8 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
break;
default:
if (bo->bdev->funcs->access_memory)
- ret = bo->bdev->funcs->access_memory(
- bo, offset, buf, len, write);
+ ret = bo->bdev->funcs->access_memory
+ (bo, offset, buf, len, write);
else
ret = -EIO;
}
@@ -439,6 +451,18 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
return ret;
}
+EXPORT_SYMBOL(ttm_bo_access);
+
+int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write)
+{
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ unsigned long offset = (addr) - vma->vm_start +
+ ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
+ << PAGE_SHIFT);
+
+ return ttm_bo_access(bo, offset, buf, len, write);
+}
EXPORT_SYMBOL(ttm_bo_vm_access);
static const struct vm_operations_struct ttm_bo_vm_ops = {
@@ -462,7 +486,7 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
if (is_cow_mapping(vma->vm_flags))
return -EINVAL;
- ttm_bo_get(bo);
+ drm_gem_object_get(&bo->base);
/*
* Drivers may want to override the vm_ops field. Otherwise we
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index e7cc4954c1bc..02e797fd1891 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -216,7 +216,7 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
bdev->vma_manager = vma_manager;
spin_lock_init(&bdev->lru_lock);
- INIT_LIST_HEAD(&bdev->pinned);
+ INIT_LIST_HEAD(&bdev->unevictable);
bdev->dev_mapping = mapping;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
@@ -283,7 +283,7 @@ void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
struct ttm_resource_manager *man;
unsigned int i, j;
- ttm_device_clear_lru_dma_mappings(bdev, &bdev->pinned);
+ ttm_device_clear_lru_dma_mappings(bdev, &bdev->unevictable);
for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
man = ttm_manager_type(bdev, i);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 6d764ba88aab..cc29bbf3eabb 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -26,10 +26,12 @@
#include <linux/io-mapping.h>
#include <linux/iosys-map.h>
#include <linux/scatterlist.h>
+#include <linux/cgroup_dmem.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_tt.h>
#include <drm/drm_util.h>
@@ -235,11 +237,26 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
}
}
+static bool ttm_resource_is_swapped(struct ttm_resource *res, struct ttm_buffer_object *bo)
+{
+ /*
+ * Take care when creating a new resource for a bo, that it is not considered
+ * swapped if it's not the current resource for the bo and is thus logically
+ * associated with the ttm_tt. Think a VRAM resource created to move a
+ * swapped-out bo to VRAM.
+ */
+ if (bo->resource != res || !bo->ttm)
+ return false;
+
+ dma_resv_assert_held(bo->base.resv);
+ return ttm_tt_is_swapped(bo->ttm);
+}
+
/* Add the resource to a bulk move if the BO is configured for it */
void ttm_resource_add_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
- if (bo->bulk_move && !bo->pin_count)
+ if (bo->bulk_move && !bo->pin_count && !ttm_resource_is_swapped(res, bo))
ttm_lru_bulk_move_add(bo->bulk_move, res);
}
@@ -247,7 +264,7 @@ void ttm_resource_add_bulk_move(struct ttm_resource *res,
void ttm_resource_del_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
- if (bo->bulk_move && !bo->pin_count)
+ if (bo->bulk_move && !bo->pin_count && !ttm_resource_is_swapped(res, bo))
ttm_lru_bulk_move_del(bo->bulk_move, res);
}
@@ -259,8 +276,8 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
lockdep_assert_held(&bo->bdev->lru_lock);
- if (bo->pin_count) {
- list_move_tail(&res->lru.link, &bdev->pinned);
+ if (bo->pin_count || ttm_resource_is_swapped(res, bo)) {
+ list_move_tail(&res->lru.link, &bdev->unevictable);
} else if (bo->bulk_move) {
struct ttm_lru_bulk_move_pos *pos =
@@ -301,8 +318,8 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
man = ttm_manager_type(bo->bdev, place->mem_type);
spin_lock(&bo->bdev->lru_lock);
- if (bo->pin_count)
- list_add_tail(&res->lru.link, &bo->bdev->pinned);
+ if (bo->pin_count || ttm_resource_is_swapped(res, bo))
+ list_add_tail(&res->lru.link, &bo->bdev->unevictable);
else
list_add_tail(&res->lru.link, &man->lru[bo->priority]);
man->usage += res->size;
@@ -334,15 +351,28 @@ EXPORT_SYMBOL(ttm_resource_fini);
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource **res_ptr)
+ struct ttm_resource **res_ptr,
+ struct dmem_cgroup_pool_state **ret_limit_pool)
{
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, place->mem_type);
+ struct dmem_cgroup_pool_state *pool = NULL;
int ret;
+ if (man->cg) {
+ ret = dmem_cgroup_try_charge(man->cg, bo->base.size, &pool, ret_limit_pool);
+ if (ret)
+ return ret;
+ }
+
ret = man->func->alloc(man, bo, place, res_ptr);
- if (ret)
+ if (ret) {
+ if (pool)
+ dmem_cgroup_uncharge(pool, bo->base.size);
return ret;
+ }
+
+ (*res_ptr)->css = pool;
spin_lock(&bo->bdev->lru_lock);
ttm_resource_add_bulk_move(*res_ptr, bo);
@@ -354,6 +384,7 @@ EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_resource_alloc);
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
{
struct ttm_resource_manager *man;
+ struct dmem_cgroup_pool_state *pool;
if (!*res)
return;
@@ -361,9 +392,13 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
spin_lock(&bo->bdev->lru_lock);
ttm_resource_del_bulk_move(*res, bo);
spin_unlock(&bo->bdev->lru_lock);
+
+ pool = (*res)->css;
man = ttm_manager_type(bo->bdev, (*res)->mem_type);
man->func->free(man, *res);
*res = NULL;
+ if (man->cg)
+ dmem_cgroup_uncharge(pool, bo->base.size);
}
EXPORT_SYMBOL(ttm_resource_free);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 4b51b9023126..3baf215eca23 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -367,7 +367,10 @@ error:
}
return ret;
}
+
+#if IS_ENABLED(CONFIG_DRM_TTM_KUNIT_TEST)
EXPORT_SYMBOL(ttm_tt_populate);
+#endif
void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
{
diff --git a/drivers/gpu/drm/tve200/Kconfig b/drivers/gpu/drm/tve200/Kconfig
index 5121fed571a5..a9d6fe535d88 100644
--- a/drivers/gpu/drm/tve200/Kconfig
+++ b/drivers/gpu/drm/tve200/Kconfig
@@ -6,6 +6,7 @@ config DRM_TVE200
depends on ARM || COMPILE_TEST
depends on OF
select DRM_BRIDGE
+ select DRM_CLIENT_SELECTION
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index acce210e2554..a048e37f1c2c 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -37,10 +37,12 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
@@ -144,11 +146,11 @@ static const struct drm_driver tve200_drm_driver = {
.fops = &drm_fops,
.name = "tve200",
.desc = DRIVER_DESC,
- .date = "20170703",
.major = 1,
.minor = 0,
.patchlevel = 0,
DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
};
static int tve200_probe(struct platform_device *pdev)
@@ -221,11 +223,7 @@ static int tve200_probe(struct platform_device *pdev)
if (ret < 0)
goto clk_disable;
- /*
- * Passing in 16 here will make the RGB565 mode the default
- * Passing in 32 will use XRGB8888 mode
- */
- drm_fbdev_dma_setup(drm, 16);
+ drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB565);
return 0;
@@ -268,7 +266,7 @@ static struct platform_driver tve200_driver = {
.of_match_table = tve200_of_match,
},
.probe = tve200_probe,
- .remove_new = tve200_remove,
+ .remove = tve200_remove,
.shutdown = tve200_shutdown,
};
drm_module_platform_driver(tve200_driver);
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index c744175c6992..d7a6abef7d78 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -5,6 +5,7 @@ config DRM_UDL
depends on USB
depends on USB_ARCH_HAS_HCD
depends on MMU
+ select DRM_CLIENT_SELECTION
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 280a09a6e2ad..05b3a152cc33 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -5,6 +5,7 @@
#include <linux/module.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_file.h>
@@ -73,10 +74,10 @@ static const struct drm_driver driver = {
.fops = &udl_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
.gem_prime_import = udl_driver_gem_prime_import,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -117,7 +118,7 @@ static int udl_usb_probe(struct usb_interface *interface,
DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index);
- drm_fbdev_shmem_setup(&udl->drm, 0);
+ drm_client_setup(&udl->drm, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 1eb716d9dad5..be00dc1d87a1 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -26,7 +26,6 @@ struct drm_mode_create_dumb;
#define DRIVER_NAME "udl"
#define DRIVER_DESC "DisplayLink"
-#define DRIVER_DATE "20120220"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 0
diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile
index b7d673f1153b..fcf710926057 100644
--- a/drivers/gpu/drm/v3d/Makefile
+++ b/drivers/gpu/drm/v3d/Makefile
@@ -13,7 +13,8 @@ v3d-y := \
v3d_trace_points.o \
v3d_sched.o \
v3d_sysfs.o \
- v3d_submit.o
+ v3d_submit.o \
+ v3d_gemfs.o
v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index ebe52bef4ffb..bb7815599435 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -13,10 +13,6 @@
* Display engines requiring physically contiguous allocations should
* look into Mesa's "renderonly" support (as used by the Mesa pl111
* driver) for an example of how to integrate with V3D.
- *
- * Long term, we should support evicting pages from the MMU when under
- * memory pressure (thus the v3d_bo_get_pages() refcounting), but
- * that's not a high priority since our systems tend to not have swap.
*/
#include <linux/dma-buf.h>
@@ -107,6 +103,7 @@ v3d_bo_create_finish(struct drm_gem_object *obj)
struct v3d_dev *v3d = to_v3d_dev(obj->dev);
struct v3d_bo *bo = to_v3d_bo(obj);
struct sg_table *sgt;
+ u64 align;
int ret;
/* So far we pin the BO in the MMU for its lifetime, so use
@@ -116,6 +113,15 @@ v3d_bo_create_finish(struct drm_gem_object *obj)
if (IS_ERR(sgt))
return PTR_ERR(sgt);
+ if (!v3d->gemfs)
+ align = SZ_4K;
+ else if (obj->size >= SZ_1M)
+ align = SZ_1M;
+ else if (obj->size >= SZ_64K)
+ align = SZ_64K;
+ else
+ align = SZ_4K;
+
spin_lock(&v3d->mm_lock);
/* Allocate the object's space in the GPU's page tables.
* Inserting PTEs will happen later, but the offset is for the
@@ -123,7 +129,7 @@ v3d_bo_create_finish(struct drm_gem_object *obj)
*/
ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
obj->size >> V3D_MMU_PAGE_SHIFT,
- GMP_GRANULARITY >> V3D_MMU_PAGE_SHIFT, 0, 0);
+ align >> V3D_MMU_PAGE_SHIFT, 0, 0);
spin_unlock(&v3d->mm_lock);
if (ret)
return ret;
@@ -143,10 +149,12 @@ struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
size_t unaligned_size)
{
struct drm_gem_shmem_object *shmem_obj;
+ struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_bo *bo;
int ret;
- shmem_obj = drm_gem_shmem_create(dev, unaligned_size);
+ shmem_obj = drm_gem_shmem_create_with_mnt(dev, unaligned_size,
+ v3d->gemfs);
if (IS_ERR(shmem_obj))
return ERR_CAST(shmem_obj);
bo = to_v3d_bo(&shmem_obj->base);
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 19e3ee7ac897..76816f2551c1 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -237,8 +237,8 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
if (v3d->ver >= 40) {
int cycle_count_reg = V3D_PCTR_CYCLE_COUNT(v3d->ver);
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
- V3D_SET_FIELD(cycle_count_reg,
- V3D_PCTR_S0));
+ V3D_SET_FIELD_VER(cycle_count_reg,
+ V3D_PCTR_S0, v3d->ver));
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_CLR, 1);
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_EN, 1);
} else {
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index d7ff1f5fa481..930737a9347b 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -31,11 +31,17 @@
#define DRIVER_NAME "v3d"
#define DRIVER_DESC "Broadcom V3D graphics"
-#define DRIVER_DATE "20180419"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
+/* Only expose the `super_pages` modparam if THP is enabled. */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+bool super_pages = true;
+module_param_named(super_pages, super_pages, bool, 0400);
+MODULE_PARM_DESC(super_pages, "Enable/Disable Super Pages support.");
+#endif
+
static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -97,6 +103,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_V3D_PARAM_MAX_PERF_COUNTERS:
args->value = v3d->perfmon_info.max_counters;
return 0;
+ case DRM_V3D_PARAM_SUPPORTS_SUPER_PAGES:
+ args->value = !!v3d->gemfs;
+ return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
return -EINVAL;
@@ -214,6 +223,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_VALUES, v3d_perfmon_get_values_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CPU, v3d_submit_cpu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_COUNTER, v3d_perfmon_get_counter_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(V3D_PERFMON_SET_GLOBAL, v3d_perfmon_set_global_ioctl, DRM_RENDER_ALLOW),
};
static const struct drm_driver v3d_drm_driver = {
@@ -238,7 +248,6 @@ static const struct drm_driver v3d_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -381,7 +390,7 @@ static void v3d_platform_drm_remove(struct platform_device *pdev)
static struct platform_driver v3d_platform_driver = {
.probe = v3d_platform_drm_probe,
- .remove_new = v3d_platform_drm_remove,
+ .remove = v3d_platform_drm_remove,
.driver = {
.name = "v3d",
.of_match_table = v3d_of_match,
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index cf4b23369dc4..dc1cfe2e14be 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -19,9 +19,8 @@ struct clk;
struct platform_device;
struct reset_control;
-#define GMP_GRANULARITY (128 * 1024)
-
#define V3D_MMU_PAGE_SHIFT 12
+#define V3D_PAGE_FACTOR (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT)
#define V3D_MAX_QUEUES (V3D_CPU + 1)
@@ -137,6 +136,11 @@ struct v3d_dev {
struct drm_mm mm;
spinlock_t mm_lock;
+ /*
+ * tmpfs instance used for shmem backed objects
+ */
+ struct vfsmount *gemfs;
+
struct work_struct overflow_mem_work;
struct v3d_bin_job *bin_job;
@@ -179,6 +183,12 @@ struct v3d_dev {
u32 num_allocated;
u32 pages_allocated;
} bo_stats;
+
+ /* To support a performance analysis tool in user space, we require
+ * a single, globally configured performance monitor (perfmon) for
+ * all jobs.
+ */
+ struct v3d_perfmon *global_perfmon;
};
static inline struct v3d_dev *
@@ -534,6 +544,11 @@ void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);
void v3d_clean_caches(struct v3d_dev *v3d);
+/* v3d_gemfs.c */
+extern bool super_pages;
+void v3d_gemfs_init(struct v3d_dev *v3d);
+void v3d_gemfs_fini(struct v3d_dev *v3d);
+
/* v3d_submit.c */
void v3d_job_cleanup(struct v3d_job *job);
void v3d_job_put(struct v3d_job *job);
@@ -553,6 +568,7 @@ void v3d_irq_disable(struct v3d_dev *v3d);
void v3d_irq_reset(struct v3d_dev *v3d);
/* v3d_mmu.c */
+int v3d_mmu_flush_all(struct v3d_dev *v3d);
int v3d_mmu_set_page_table(struct v3d_dev *v3d);
void v3d_mmu_insert_ptes(struct v3d_bo *bo);
void v3d_mmu_remove_ptes(struct v3d_bo *bo);
@@ -584,6 +600,8 @@ int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_perfmon_get_counter_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int v3d_perfmon_set_global_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* v3d_sysfs.c */
int v3d_sysfs_init(struct device *dev);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index da8faf3b9011..b1e681630ded 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -288,11 +288,14 @@ v3d_gem_init(struct drm_device *dev)
v3d_init_hw_state(v3d);
v3d_mmu_set_page_table(v3d);
+ v3d_gemfs_init(v3d);
+
ret = v3d_sched_init(v3d);
if (ret) {
drm_mm_takedown(&v3d->mm);
- dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
+ dma_free_coherent(v3d->drm.dev, pt_size, (void *)v3d->pt,
v3d->pt_paddr);
+ return ret;
}
return 0;
@@ -304,6 +307,7 @@ v3d_gem_destroy(struct drm_device *dev)
struct v3d_dev *v3d = to_v3d_dev(dev);
v3d_sched_fini(v3d);
+ v3d_gemfs_fini(v3d);
/* Waiting for jobs to finish would need to be done before
* unregistering V3D.
diff --git a/drivers/gpu/drm/v3d/v3d_gemfs.c b/drivers/gpu/drm/v3d/v3d_gemfs.c
new file mode 100644
index 000000000000..4c5e18590a5c
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_gemfs.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2024 Raspberry Pi */
+
+#include <linux/fs.h>
+#include <linux/mount.h>
+
+#include "v3d_drv.h"
+
+void v3d_gemfs_init(struct v3d_dev *v3d)
+{
+ char huge_opt[] = "huge=within_size";
+ struct file_system_type *type;
+ struct vfsmount *gemfs;
+
+ /*
+ * By creating our own shmemfs mountpoint, we can pass in
+ * mount flags that better match our usecase. However, we
+ * only do so on platforms which benefit from it.
+ */
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ goto err;
+
+ /* The user doesn't want to enable Super Pages */
+ if (!super_pages)
+ goto err;
+
+ type = get_fs_type("tmpfs");
+ if (!type)
+ goto err;
+
+ gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, huge_opt);
+ if (IS_ERR(gemfs))
+ goto err;
+
+ v3d->gemfs = gemfs;
+ drm_info(&v3d->drm, "Using Transparent Hugepages\n");
+
+ return;
+
+err:
+ v3d->gemfs = NULL;
+ drm_notice(&v3d->drm,
+ "Transparent Hugepage support is recommended for optimal performance on this platform!\n");
+}
+
+void v3d_gemfs_fini(struct v3d_dev *v3d)
+{
+ if (v3d->gemfs)
+ kern_unmount(v3d->gemfs);
+}
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index d469bda52c1a..72b6a119412f 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -70,6 +70,8 @@ v3d_overflow_mem_work(struct work_struct *work)
list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list);
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
+ v3d_mmu_flush_all(v3d);
+
V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << V3D_MMU_PAGE_SHIFT);
V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size);
@@ -105,7 +107,10 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN);
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
+
+ v3d->bin_job = NULL;
dma_fence_signal(&fence->base);
+
status = IRQ_HANDLED;
}
@@ -115,7 +120,10 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER);
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
+
+ v3d->render_job = NULL;
dma_fence_signal(&fence->base);
+
status = IRQ_HANDLED;
}
@@ -125,7 +133,10 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD);
trace_v3d_csd_irq(&v3d->drm, fence->seqno);
+
+ v3d->csd_job = NULL;
dma_fence_signal(&fence->base);
+
status = IRQ_HANDLED;
}
@@ -162,7 +173,10 @@ v3d_hub_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU);
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
+
+ v3d->tfu_job = NULL;
dma_fence_signal(&fence->base);
+
status = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c
index 14f3af40d6f6..a25d25a8ae61 100644
--- a/drivers/gpu/drm/v3d/v3d_mmu.c
+++ b/drivers/gpu/drm/v3d/v3d_mmu.c
@@ -4,7 +4,7 @@
/**
* DOC: Broadcom V3D MMU
*
- * The V3D 3.x hardware (compared to VC4) now includes an MMU. It has
+ * The V3D 3.x hardware (compared to VC4) now includes an MMU. It has
* a single level of page tables for the V3D's 4GB address space to
* map to AXI bus addresses, thus it could need up to 4MB of
* physically contiguous memory to store the PTEs.
@@ -15,49 +15,47 @@
*
* To protect clients from each other, we should use the GMP to
* quickly mask out (at 128kb granularity) what pages are available to
- * each client. This is not yet implemented.
+ * each client. This is not yet implemented.
*/
#include "v3d_drv.h"
#include "v3d_regs.h"
-/* Note: All PTEs for the 1MB superpage must be filled with the
- * superpage bit set.
+/* Note: All PTEs for the 64KB bigpage or 1MB superpage must be filled
+ * with the bigpage/superpage bit set.
*/
#define V3D_PTE_SUPERPAGE BIT(31)
+#define V3D_PTE_BIGPAGE BIT(30)
#define V3D_PTE_WRITEABLE BIT(29)
#define V3D_PTE_VALID BIT(28)
-static int v3d_mmu_flush_all(struct v3d_dev *v3d)
+static bool v3d_mmu_is_aligned(u32 page, u32 page_address, size_t alignment)
{
- int ret;
-
- /* Make sure that another flush isn't already running when we
- * start this one.
- */
- ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
- V3D_MMU_CTL_TLB_CLEARING), 100);
- if (ret)
- dev_err(v3d->drm.dev, "TLB clear wait idle pre-wait failed\n");
+ return IS_ALIGNED(page, alignment >> V3D_MMU_PAGE_SHIFT) &&
+ IS_ALIGNED(page_address, alignment >> V3D_MMU_PAGE_SHIFT);
+}
- V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
- V3D_MMU_CTL_TLB_CLEAR);
+int v3d_mmu_flush_all(struct v3d_dev *v3d)
+{
+ int ret;
- V3D_WRITE(V3D_MMUC_CONTROL,
- V3D_MMUC_CONTROL_FLUSH |
+ V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_FLUSH |
V3D_MMUC_CONTROL_ENABLE);
- ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
- V3D_MMU_CTL_TLB_CLEARING), 100);
+ ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
+ V3D_MMUC_CONTROL_FLUSHING), 100);
if (ret) {
- dev_err(v3d->drm.dev, "TLB clear wait idle failed\n");
+ dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n");
return ret;
}
- ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
- V3D_MMUC_CONTROL_FLUSHING), 100);
+ V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
+ V3D_MMU_CTL_TLB_CLEAR);
+
+ ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
+ V3D_MMU_CTL_TLB_CLEARING), 100);
if (ret)
- dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n");
+ dev_err(v3d->drm.dev, "MMU TLB clear wait idle failed\n");
return ret;
}
@@ -87,19 +85,40 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
struct drm_gem_shmem_object *shmem_obj = &bo->base;
struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev);
u32 page = bo->node.start;
- u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
- struct sg_dma_page_iter dma_iter;
-
- for_each_sgtable_dma_page(shmem_obj->sgt, &dma_iter, 0) {
- dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter);
- u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT;
- u32 pte = page_prot | page_address;
- u32 i;
-
- BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >=
- BIT(24));
- for (i = 0; i < PAGE_SIZE >> V3D_MMU_PAGE_SHIFT; i++)
- v3d->pt[page++] = pte + i;
+ struct scatterlist *sgl;
+ unsigned int count;
+
+ for_each_sgtable_dma_sg(shmem_obj->sgt, sgl, count) {
+ dma_addr_t dma_addr = sg_dma_address(sgl);
+ u32 pfn = dma_addr >> V3D_MMU_PAGE_SHIFT;
+ unsigned int len = sg_dma_len(sgl);
+
+ while (len > 0) {
+ u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
+ u32 page_address = page_prot | pfn;
+ unsigned int i, page_size;
+
+ BUG_ON(pfn + V3D_PAGE_FACTOR >= BIT(24));
+
+ if (len >= SZ_1M &&
+ v3d_mmu_is_aligned(page, page_address, SZ_1M)) {
+ page_size = SZ_1M;
+ page_address |= V3D_PTE_SUPERPAGE;
+ } else if (len >= SZ_64K &&
+ v3d_mmu_is_aligned(page, page_address, SZ_64K)) {
+ page_size = SZ_64K;
+ page_address |= V3D_PTE_BIGPAGE;
+ } else {
+ page_size = SZ_4K;
+ }
+
+ for (i = 0; i < page_size >> V3D_MMU_PAGE_SHIFT; i++) {
+ v3d->pt[page++] = page_address + i;
+ pfn++;
+ }
+
+ len -= page_size;
+ }
}
WARN_ON_ONCE(page - bo->node.start !=
diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
index cd7f1eedf17f..3ebda2fa46fc 100644
--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
+++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
@@ -240,23 +240,24 @@ void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon)
for (i = 0; i < ncounters; i++) {
u32 source = i / 4;
- u32 channel = V3D_SET_FIELD(perfmon->counters[i], V3D_PCTR_S0);
+ u32 channel = V3D_SET_FIELD_VER(perfmon->counters[i], V3D_PCTR_S0,
+ v3d->ver);
i++;
- channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0,
- V3D_PCTR_S1);
+ channel |= V3D_SET_FIELD_VER(i < ncounters ? perfmon->counters[i] : 0,
+ V3D_PCTR_S1, v3d->ver);
i++;
- channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0,
- V3D_PCTR_S2);
+ channel |= V3D_SET_FIELD_VER(i < ncounters ? perfmon->counters[i] : 0,
+ V3D_PCTR_S2, v3d->ver);
i++;
- channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0,
- V3D_PCTR_S3);
+ channel |= V3D_SET_FIELD_VER(i < ncounters ? perfmon->counters[i] : 0,
+ V3D_PCTR_S3, v3d->ver);
V3D_CORE_WRITE(0, V3D_V4_PCTR_0_SRC_X(source), channel);
}
+ V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask);
V3D_CORE_WRITE(0, V3D_V4_PCTR_0_CLR, mask);
V3D_CORE_WRITE(0, V3D_PCTR_0_OVERFLOW, mask);
- V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask);
v3d->active_perfmon = perfmon;
}
@@ -306,6 +307,14 @@ void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv)
static int v3d_perfmon_idr_del(int id, void *elem, void *data)
{
struct v3d_perfmon *perfmon = elem;
+ struct v3d_dev *v3d = (struct v3d_dev *)data;
+
+ /* If the active perfmon is being destroyed, stop it first */
+ if (perfmon == v3d->active_perfmon)
+ v3d_perfmon_stop(v3d, perfmon, false);
+
+ /* If the global perfmon is being destroyed, set it to NULL */
+ cmpxchg(&v3d->global_perfmon, perfmon, NULL);
v3d_perfmon_put(perfmon);
@@ -314,8 +323,10 @@ static int v3d_perfmon_idr_del(int id, void *elem, void *data)
void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv)
{
+ struct v3d_dev *v3d = v3d_priv->v3d;
+
mutex_lock(&v3d_priv->perfmon.lock);
- idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, NULL);
+ idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, v3d);
idr_destroy(&v3d_priv->perfmon.idr);
mutex_unlock(&v3d_priv->perfmon.lock);
mutex_destroy(&v3d_priv->perfmon.lock);
@@ -376,6 +387,7 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
{
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_perfmon_destroy *req = data;
+ struct v3d_dev *v3d = v3d_priv->v3d;
struct v3d_perfmon *perfmon;
mutex_lock(&v3d_priv->perfmon.lock);
@@ -385,6 +397,13 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
if (!perfmon)
return -EINVAL;
+ /* If the active perfmon is being destroyed, stop it first */
+ if (perfmon == v3d->active_perfmon)
+ v3d_perfmon_stop(v3d, perfmon, false);
+
+ /* If the global perfmon is being destroyed, set it to NULL */
+ cmpxchg(&v3d->global_perfmon, perfmon, NULL);
+
v3d_perfmon_put(perfmon);
return 0;
@@ -402,11 +421,7 @@ int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
if (req->pad != 0)
return -EINVAL;
- mutex_lock(&v3d_priv->perfmon.lock);
- perfmon = idr_find(&v3d_priv->perfmon.idr, req->id);
- v3d_perfmon_get(perfmon);
- mutex_unlock(&v3d_priv->perfmon.lock);
-
+ perfmon = v3d_perfmon_find(v3d_priv, req->id);
if (!perfmon)
return -EINVAL;
@@ -448,3 +463,34 @@ int v3d_perfmon_get_counter_ioctl(struct drm_device *dev, void *data,
return 0;
}
+
+int v3d_perfmon_set_global_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct drm_v3d_perfmon_set_global *req = data;
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct v3d_perfmon *perfmon;
+
+ if (req->flags & ~DRM_V3D_PERFMON_CLEAR_GLOBAL)
+ return -EINVAL;
+
+ perfmon = v3d_perfmon_find(v3d_priv, req->id);
+ if (!perfmon)
+ return -EINVAL;
+
+ /* If the request is to clear the global performance monitor */
+ if (req->flags & DRM_V3D_PERFMON_CLEAR_GLOBAL) {
+ if (!v3d->global_perfmon)
+ return -EINVAL;
+
+ xchg(&v3d->global_perfmon, NULL);
+
+ return 0;
+ }
+
+ if (cmpxchg(&v3d->global_perfmon, NULL, perfmon))
+ return -EBUSY;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/v3d/v3d_performance_counters.h b/drivers/gpu/drm/v3d/v3d_performance_counters.h
index d919a2fc9449..2bc4cce0744a 100644
--- a/drivers/gpu/drm/v3d/v3d_performance_counters.h
+++ b/drivers/gpu/drm/v3d/v3d_performance_counters.h
@@ -2,11 +2,12 @@
/*
* Copyright (C) 2024 Raspberry Pi
*/
+
#ifndef V3D_PERFORMANCE_COUNTERS_H
#define V3D_PERFORMANCE_COUNTERS_H
-/* Holds a description of a given performance counter. The index of performance
- * counter is given by the array on v3d_performance_counter.h
+/* Holds a description of a given performance counter. The index of
+ * performance counter is given by the array on `v3d_performance_counter.c`.
*/
struct v3d_perf_counter_desc {
/* Category of the counter */
@@ -20,15 +21,12 @@ struct v3d_perf_counter_desc {
};
struct v3d_perfmon_info {
- /*
- * Different revisions of V3D have different total number of
+ /* Different revisions of V3D have different total number of
* performance counters.
*/
unsigned int max_counters;
- /*
- * Array of counters valid for the platform.
- */
+ /* Array of counters valid for the platform. */
const struct v3d_perf_counter_desc *counters;
};
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 1b1a62ad9585..6da3c69082bd 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -15,6 +15,14 @@
fieldval & field##_MASK; \
})
+#define V3D_SET_FIELD_VER(value, field, ver) \
+ ({ \
+ typeof(ver) _ver = (ver); \
+ u32 fieldval = (value) << field##_SHIFT(_ver); \
+ WARN_ON((fieldval & ~field##_MASK(_ver)) != 0); \
+ fieldval & field##_MASK(_ver); \
+ })
+
#define V3D_GET_FIELD(word, field) (((word) & field##_MASK) >> \
field##_SHIFT)
@@ -354,18 +362,15 @@
#define V3D_V4_PCTR_0_SRC_28_31 0x0067c
#define V3D_V4_PCTR_0_SRC_X(x) (V3D_V4_PCTR_0_SRC_0_3 + \
4 * (x))
-# define V3D_PCTR_S0_MASK V3D_MASK(6, 0)
-# define V3D_V7_PCTR_S0_MASK V3D_MASK(7, 0)
-# define V3D_PCTR_S0_SHIFT 0
-# define V3D_PCTR_S1_MASK V3D_MASK(14, 8)
-# define V3D_V7_PCTR_S1_MASK V3D_MASK(15, 8)
-# define V3D_PCTR_S1_SHIFT 8
-# define V3D_PCTR_S2_MASK V3D_MASK(22, 16)
-# define V3D_V7_PCTR_S2_MASK V3D_MASK(23, 16)
-# define V3D_PCTR_S2_SHIFT 16
-# define V3D_PCTR_S3_MASK V3D_MASK(30, 24)
-# define V3D_V7_PCTR_S3_MASK V3D_MASK(31, 24)
-# define V3D_PCTR_S3_SHIFT 24
+# define V3D_PCTR_S0_MASK(ver) (((ver) >= 71) ? V3D_MASK(7, 0) : V3D_MASK(6, 0))
+# define V3D_PCTR_S0_SHIFT(ver) 0
+# define V3D_PCTR_S1_MASK(ver) (((ver) >= 71) ? V3D_MASK(15, 8) : V3D_MASK(14, 8))
+# define V3D_PCTR_S1_SHIFT(ver) 8
+# define V3D_PCTR_S2_MASK(ver) (((ver) >= 71) ? V3D_MASK(23, 16) : V3D_MASK(22, 16))
+# define V3D_PCTR_S2_SHIFT(ver) 16
+# define V3D_PCTR_S3_MASK(ver) (((ver) >= 71) ? V3D_MASK(31, 24) : V3D_MASK(30, 24))
+# define V3D_PCTR_S3_SHIFT(ver) 24
+
#define V3D_PCTR_CYCLE_COUNT(ver) ((ver >= 71) ? 0 : 32)
/* Output values of the counters. */
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 08d2a2739582..da08ddb01d21 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -5,16 +5,16 @@
* DOC: Broadcom V3D scheduling
*
* The shared DRM GPU scheduler is used to coordinate submitting jobs
- * to the hardware. Each DRM fd (roughly a client process) gets its
- * own scheduler entity, which will process jobs in order. The GPU
- * scheduler will round-robin between clients to submit the next job.
+ * to the hardware. Each DRM fd (roughly a client process) gets its
+ * own scheduler entity, which will process jobs in order. The GPU
+ * scheduler will schedule the clients with a FIFO scheduling algorithm.
*
* For simplicity, and in order to keep latency low for interactive
* jobs when bulk background jobs are queued up, we submit a new job
* to the HW only when it has completed the last one, instead of
- * filling up the CT[01]Q FIFOs with jobs. Similarly, we use
- * drm_sched_job_add_dependency() to manage the dependency between bin and
- * render, instead of having the clients submit jobs using the HW's
+ * filling up the CT[01]Q FIFOs with jobs. Similarly, we use
+ * `drm_sched_job_add_dependency()` to manage the dependency between bin
+ * and render, instead of having the clients submit jobs using the HW's
* semaphores to interlock between them.
*/
@@ -120,11 +120,19 @@ v3d_cpu_job_free(struct drm_sched_job *sched_job)
static void
v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
{
- if (job->perfmon != v3d->active_perfmon)
+ struct v3d_perfmon *perfmon = v3d->global_perfmon;
+
+ if (!perfmon)
+ perfmon = job->perfmon;
+
+ if (perfmon == v3d->active_perfmon)
+ return;
+
+ if (perfmon != v3d->active_perfmon)
v3d_perfmon_stop(v3d, v3d->active_perfmon, true);
- if (job->perfmon && v3d->active_perfmon != job->perfmon)
- v3d_perfmon_start(v3d, job->perfmon);
+ if (perfmon && v3d->active_perfmon != perfmon)
+ v3d_perfmon_start(v3d, perfmon);
}
static void
@@ -135,8 +143,31 @@ v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
struct v3d_stats *global_stats = &v3d->queue[queue].stats;
struct v3d_stats *local_stats = &file->stats[queue];
u64 now = local_clock();
-
- preempt_disable();
+ unsigned long flags;
+
+ /*
+ * We only need to disable local interrupts to appease lockdep who
+ * otherwise would think v3d_job_start_stats vs v3d_stats_update has an
+ * unsafe in-irq vs no-irq-off usage problem. This is a false positive
+ * because all the locks are per queue and stats type, and all jobs are
+ * completely one at a time serialised. More specifically:
+ *
+ * 1. Locks for GPU queues are updated from interrupt handlers under a
+ * spin lock and started here with preemption disabled.
+ *
+ * 2. Locks for CPU queues are updated from the worker with preemption
+ * disabled and equally started here with preemption disabled.
+ *
+ * Therefore both are consistent.
+ *
+ * 3. Because next job can only be queued after the previous one has
+ * been signaled, and locks are per queue, there is also no scope for
+ * the start part to race with the update part.
+ */
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ local_irq_save(flags);
+ else
+ preempt_disable();
write_seqcount_begin(&local_stats->lock);
local_stats->start_ns = now;
@@ -146,7 +177,10 @@ v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
global_stats->start_ns = now;
write_seqcount_end(&global_stats->lock);
- preempt_enable();
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ local_irq_restore(flags);
+ else
+ preempt_enable();
}
static void
@@ -167,11 +201,21 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
struct v3d_stats *global_stats = &v3d->queue[queue].stats;
struct v3d_stats *local_stats = &file->stats[queue];
u64 now = local_clock();
+ unsigned long flags;
+
+ /* See comment in v3d_job_start_stats() */
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ local_irq_save(flags);
+ else
+ preempt_disable();
- preempt_disable();
v3d_stats_update(local_stats, now);
v3d_stats_update(global_stats, now);
- preempt_enable();
+
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ local_irq_restore(flags);
+ else
+ preempt_enable();
}
static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
@@ -667,7 +711,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
/* Unblock schedulers and restart their jobs. */
for (q = 0; q < V3D_MAX_QUEUES; q++) {
- drm_sched_start(&v3d->queue[q].sched);
+ drm_sched_start(&v3d->queue[q].sched, 0);
}
mutex_unlock(&v3d->reset_lock);
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
index d607aa9c4ec2..4ff5de46fb22 100644
--- a/drivers/gpu/drm/v3d/v3d_submit.c
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -11,10 +11,11 @@
#include "v3d_trace.h"
/* Takes the reservation lock on all the BOs being referenced, so that
- * at queue submit time we can update the reservations.
+ * we can attach fences and update the reservations after pushing the job
+ * to the queue.
*
* We don't lock the RCL the tile alloc/state BOs, or overflow memory
- * (all of which are on exec->unref_list). They're entirely private
+ * (all of which are on render->unref_list). They're entirely private
* to v3d, so we don't attach dma-buf fences to them.
*/
static int
@@ -55,11 +56,11 @@ fail:
* @bo_count: Number of GEM handles passed in
*
* The command validator needs to reference BOs by their index within
- * the submitted job's BO list. This does the validation of the job's
+ * the submitted job's BO list. This does the validation of the job's
* BO list and reference counting for the lifetime of the job.
*
* Note that this function doesn't need to unreference the BOs on
- * failure, because that will happen at v3d_exec_cleanup() time.
+ * failure, because that will happen at `v3d_job_free()`.
*/
static int
v3d_lookup_bos(struct drm_device *dev,
@@ -981,6 +982,11 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
goto fail;
if (args->perfmon_id) {
+ if (v3d->global_perfmon) {
+ ret = -EAGAIN;
+ goto fail_perfmon;
+ }
+
render->base.perfmon = v3d_perfmon_find(v3d_priv,
args->perfmon_id);
@@ -1196,6 +1202,11 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
goto fail;
if (args->perfmon_id) {
+ if (v3d->global_perfmon) {
+ ret = -EAGAIN;
+ goto fail_perfmon;
+ }
+
job->base.perfmon = v3d_perfmon_find(v3d_priv,
args->perfmon_id);
if (!job->base.perfmon) {
diff --git a/drivers/gpu/drm/vboxvideo/Kconfig b/drivers/gpu/drm/vboxvideo/Kconfig
index 45fe135d6e43..180e30b82ab9 100644
--- a/drivers/gpu/drm/vboxvideo/Kconfig
+++ b/drivers/gpu/drm/vboxvideo/Kconfig
@@ -2,6 +2,7 @@
config DRM_VBOXVIDEO
tristate "Virtual Box Graphics Card"
depends on DRM && X86 && PCI
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index ef36834c8673..bb861f0a0a31 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -7,11 +7,13 @@
* Michael Thayer <michael.thayer@oracle.com,
* Hans de Goede <hdegoede@redhat.com>
*/
+
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/vt_kern.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
@@ -44,7 +46,7 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
return -ENODEV;
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
+ ret = aperture_remove_conflicting_pci_devices(pdev, driver.name);
if (ret)
return ret;
@@ -80,7 +82,7 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto err_irq_fini;
- drm_fbdev_ttm_setup(&vbox->ddev, 32);
+ drm_client_setup(&vbox->ddev, NULL);
return 0;
@@ -187,12 +189,12 @@ static const struct drm_driver driver = {
.fops = &vbox_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
DRM_GEM_VRAM_DRIVER,
+ DRM_FBDEV_TTM_DRIVER_OPS,
};
drm_module_pci_driver_if_modeset(vbox_pci_driver, vbox_modeset);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
index e77bd6512eb1..dfa935f381a6 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.h
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -25,7 +25,6 @@
#define DRIVER_NAME "vboxvideo"
#define DRIVER_DESC "Oracle VM VirtualBox Graphics Card"
-#define DRIVER_DATE "20130823"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 269b5f26b2ea..6cc7b7e6294a 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -9,6 +9,8 @@ config DRM_VC4
depends on SND && SND_SOC
depends on COMMON_CLK
depends on PM
+ select DRM_CLIENT_SELECTION
+ select DRM_DISPLAY_HDMI_AUDIO_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HDMI_STATE_HELPER
select DRM_DISPLAY_HELPER
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock.c b/drivers/gpu/drm/vc4/tests/vc4_mock.c
index 0731a7d85d7a..e276a957b01c 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_mock.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock.c
@@ -51,8 +51,8 @@ struct vc4_mock_desc {
static const struct vc4_mock_desc vc4_mock =
VC4_MOCK_DESC(
- VC4_MOCK_CRTC_DESC(&vc4_txp_crtc_data,
- VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_TXP,
+ VC4_MOCK_CRTC_DESC(&bcm2835_txp_data.base,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_TXP0,
DRM_MODE_ENCODER_VIRTUAL,
DRM_MODE_CONNECTOR_WRITEBACK)),
VC4_MOCK_PIXELVALVE_DESC(&bcm2835_pv0_data,
@@ -77,8 +77,8 @@ static const struct vc4_mock_desc vc4_mock =
static const struct vc4_mock_desc vc5_mock =
VC4_MOCK_DESC(
- VC4_MOCK_CRTC_DESC(&vc4_txp_crtc_data,
- VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_TXP,
+ VC4_MOCK_CRTC_DESC(&bcm2835_txp_data.base,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_TXP0,
DRM_MODE_ENCODER_VIRTUAL,
DRM_MODE_CONNECTOR_WRITEBACK)),
VC4_MOCK_PIXELVALVE_DESC(&bcm2711_pv0_data,
@@ -155,11 +155,11 @@ KUNIT_DEFINE_ACTION_WRAPPER(kunit_action_drm_dev_unregister,
drm_dev_unregister,
struct drm_device *);
-static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5)
+static struct vc4_dev *__mock_device(struct kunit *test, enum vc4_gen gen)
{
struct drm_device *drm;
- const struct drm_driver *drv = is_vc5 ? &vc5_drm_driver : &vc4_drm_driver;
- const struct vc4_mock_desc *desc = is_vc5 ? &vc5_mock : &vc4_mock;
+ const struct drm_driver *drv = (gen == VC4_GEN_5) ? &vc5_drm_driver : &vc4_drm_driver;
+ const struct vc4_mock_desc *desc = (gen == VC4_GEN_5) ? &vc5_mock : &vc4_mock;
struct vc4_dev *vc4;
struct device *dev;
int ret;
@@ -173,9 +173,9 @@ static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
vc4->dev = dev;
- vc4->is_vc5 = is_vc5;
+ vc4->gen = gen;
- vc4->hvs = __vc4_hvs_alloc(vc4, NULL);
+ vc4->hvs = __vc4_hvs_alloc(vc4, NULL, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4->hvs);
drm = &vc4->base;
@@ -198,10 +198,10 @@ static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5)
struct vc4_dev *vc4_mock_device(struct kunit *test)
{
- return __mock_device(test, false);
+ return __mock_device(test, VC4_GEN_4);
}
struct vc4_dev *vc5_mock_device(struct kunit *test)
{
- return __mock_device(test, true);
+ return __mock_device(test, VC4_GEN_5);
}
diff --git a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
index 61622e951031..40a05869a50e 100644
--- a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
+++ b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
@@ -90,7 +90,7 @@ static const struct encoder_constraint vc4_encoder_constraints[] = {
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI0, 0),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_HDMI0, 1),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_VEC, 1),
- ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_TXP, 2),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_TXP0, 2),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI1, 2),
};
@@ -98,7 +98,7 @@ static const struct encoder_constraint vc5_encoder_constraints[] = {
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DPI, 0),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI0, 0),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_VEC, 1),
- ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_TXP, 0, 2),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_TXP0, 0, 2),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI1, 0, 1, 2),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_HDMI0, 0, 1, 2),
ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_HDMI1, 0, 1, 2),
@@ -207,7 +207,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_PV_MUXING_TEST("1 output: DSI1",
VC4_ENCODER_TYPE_DSI1),
VC4_PV_MUXING_TEST("1 output: TXP",
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("2 outputs: DSI0, HDMI0",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_HDMI0),
@@ -219,7 +219,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_DSI1),
VC4_PV_MUXING_TEST("2 outputs: DSI0, TXP",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("2 outputs: DPI, HDMI0",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_HDMI0),
@@ -231,19 +231,19 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_DSI1),
VC4_PV_MUXING_TEST("2 outputs: DPI, TXP",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("2 outputs: HDMI0, DSI1",
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_DSI1),
VC4_PV_MUXING_TEST("2 outputs: HDMI0, TXP",
VC4_ENCODER_TYPE_HDMI0,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("2 outputs: VEC, DSI1",
VC4_ENCODER_TYPE_VEC,
VC4_ENCODER_TYPE_DSI1),
VC4_PV_MUXING_TEST("2 outputs: VEC, TXP",
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("3 outputs: DSI0, HDMI0, DSI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_HDMI0,
@@ -251,7 +251,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_PV_MUXING_TEST("3 outputs: DSI0, HDMI0, TXP",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_HDMI0,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("3 outputs: DSI0, VEC, DSI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
@@ -259,7 +259,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_PV_MUXING_TEST("3 outputs: DSI0, VEC, TXP",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("3 outputs: DPI, HDMI0, DSI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_HDMI0,
@@ -267,7 +267,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_PV_MUXING_TEST("3 outputs: DPI, HDMI0, TXP",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_HDMI0,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("3 outputs: DPI, VEC, DSI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
@@ -275,7 +275,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
VC4_PV_MUXING_TEST("3 outputs: DPI, VEC, TXP",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
};
KUNIT_ARRAY_PARAM(vc4_test_pv_muxing,
@@ -287,7 +287,7 @@ static const struct pv_muxing_param vc4_test_pv_muxing_invalid_params[] = {
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_DSI0),
VC4_PV_MUXING_TEST("TXP/DSI1 Conflict",
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1),
VC4_PV_MUXING_TEST("HDMI0/VEC Conflict",
VC4_ENCODER_TYPE_HDMI0,
@@ -296,22 +296,22 @@ static const struct pv_muxing_param vc4_test_pv_muxing_invalid_params[] = {
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_DSI1,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, TXP",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
VC4_ENCODER_TYPE_DSI1,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("More than 3 outputs: DPI, HDMI0, DSI1, TXP",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_DSI1,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC4_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, TXP",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
VC4_ENCODER_TYPE_DSI1,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
};
KUNIT_ARRAY_PARAM(vc4_test_pv_muxing_invalid,
@@ -342,7 +342,7 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("2 outputs: DPI, TXP",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("2 outputs: DPI, VEC",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC),
@@ -360,7 +360,7 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("2 outputs: DSI0, TXP",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("2 outputs: DSI0, VEC",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC),
@@ -372,7 +372,7 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_VEC),
VC5_PV_MUXING_TEST("2 outputs: DSI1, TXP",
VC4_ENCODER_TYPE_DSI1,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("2 outputs: DSI1, HDMI0",
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0),
@@ -384,7 +384,7 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_VEC),
VC5_PV_MUXING_TEST("2 outputs: HDMI0, TXP",
VC4_ENCODER_TYPE_HDMI0,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("2 outputs: HDMI0, HDMI1",
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
@@ -393,14 +393,14 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_VEC),
VC5_PV_MUXING_TEST("2 outputs: HDMI1, TXP",
VC4_ENCODER_TYPE_HDMI1,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("2 outputs: TXP, VEC",
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_VEC),
VC5_PV_MUXING_TEST("3 outputs: DPI, VEC, TXP",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("3 outputs: DPI, VEC, DSI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
@@ -415,15 +415,15 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("3 outputs: DPI, TXP, DSI1",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1),
VC5_PV_MUXING_TEST("3 outputs: DPI, TXP, HDMI0",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("3 outputs: DPI, TXP, HDMI1",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("3 outputs: DPI, DSI1, HDMI0",
VC4_ENCODER_TYPE_DPI,
@@ -440,7 +440,7 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC5_PV_MUXING_TEST("3 outputs: DSI0, VEC, TXP",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP),
+ VC4_ENCODER_TYPE_TXP0),
VC5_PV_MUXING_TEST("3 outputs: DSI0, VEC, DSI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
@@ -455,15 +455,15 @@ static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("3 outputs: DSI0, TXP, DSI1",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1),
VC5_PV_MUXING_TEST("3 outputs: DSI0, TXP, HDMI0",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("3 outputs: DSI0, TXP, HDMI1",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("3 outputs: DSI0, DSI1, HDMI0",
VC4_ENCODER_TYPE_DSI0,
@@ -490,17 +490,17 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, HDMI0",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, HDMI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, HDMI0",
VC4_ENCODER_TYPE_DPI,
@@ -519,17 +519,17 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, DSI1, HDMI0",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, DSI1, HDMI1",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, DSI1, HDMI0, HDMI1",
@@ -540,19 +540,19 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1, HDMI0",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1, HDMI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, HDMI0, HDMI1",
@@ -563,24 +563,24 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, DSI1, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, HDMI0",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, HDMI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, HDMI0",
VC4_ENCODER_TYPE_DSI0,
@@ -599,17 +599,17 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, DSI1, HDMI0",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, DSI1, HDMI1",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, DSI1, HDMI0, HDMI1",
@@ -620,19 +620,19 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1, HDMI0",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1, HDMI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, HDMI0, HDMI1",
@@ -643,27 +643,27 @@ static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, DSI1, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DSI0,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: VEC, TXP, DSI1, HDMI0, HDMI1",
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DPI,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1, HDMI0, HDMI1",
VC4_ENCODER_TYPE_DSI0,
VC4_ENCODER_TYPE_VEC,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_HDMI0,
VC4_ENCODER_TYPE_HDMI1),
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 3f72be7490d5..fb450b6a4d44 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -251,7 +251,7 @@ void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
{
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
mutex_lock(&vc4->purgeable.lock);
@@ -265,7 +265,7 @@ static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
{
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
/* list_del_init() is used here because the caller might release
@@ -396,7 +396,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return ERR_PTR(-ENODEV);
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
@@ -427,7 +427,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
struct drm_gem_dma_object *dma_obj;
struct vc4_bo *bo;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return ERR_PTR(-ENODEV);
if (size == 0)
@@ -496,7 +496,7 @@ int vc4_bo_dumb_create(struct drm_file *file_priv,
struct vc4_bo *bo = NULL;
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
ret = vc4_dumb_fixup_args(args);
@@ -622,7 +622,7 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo)
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
/* Fast path: if the BO is already retained by someone, no need to
@@ -661,7 +661,7 @@ void vc4_bo_dec_usecnt(struct vc4_bo *bo)
{
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
/* Fast path: if the BO is still retained by someone, no need to test
@@ -783,7 +783,7 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
struct vc4_bo *bo = NULL;
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
ret = vc4_grab_bin_bo(vc4, vc4file);
@@ -813,7 +813,7 @@ int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
struct drm_vc4_mmap_bo *args = data;
struct drm_gem_object *gem_obj;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
@@ -839,7 +839,7 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
struct vc4_bo *bo = NULL;
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
if (args->size == 0)
@@ -918,7 +918,7 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
struct vc4_bo *bo;
bool t_format;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
if (args->flags != 0)
@@ -964,7 +964,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gem_obj;
struct vc4_bo *bo;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
if (args->flags != 0 || args->modifier != 0)
@@ -1007,7 +1007,7 @@ int vc4_bo_cache_init(struct drm_device *dev)
int ret;
int i;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
/* Create the initial set of BO labels that the kernel will
@@ -1071,7 +1071,7 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gem_obj;
int ret = 0, label;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
if (!args->len)
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 8b5a7e5eb146..cf40a53ad42e 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -83,13 +83,22 @@ static unsigned int
vc4_crtc_get_cob_allocation(struct vc4_dev *vc4, unsigned int channel)
{
struct vc4_hvs *hvs = vc4->hvs;
- u32 dispbase = HVS_READ(SCALER_DISPBASEX(channel));
+ u32 dispbase, top, base;
+
/* Top/base are supposed to be 4-pixel aligned, but the
* Raspberry Pi firmware fills the low bits (which are
* presumably ignored).
*/
- u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3;
- u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3;
+
+ if (vc4->gen >= VC4_GEN_6_C) {
+ dispbase = HVS_READ(SCALER6_DISPX_COB(channel));
+ top = VC4_GET_FIELD(dispbase, SCALER6_DISPX_COB_TOP) & ~3;
+ base = VC4_GET_FIELD(dispbase, SCALER6_DISPX_COB_BASE) & ~3;
+ } else {
+ dispbase = HVS_READ(SCALER_DISPBASEX(channel));
+ top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3;
+ base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3;
+ }
return top - base + 4;
}
@@ -105,6 +114,7 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
struct vc4_hvs *hvs = vc4->hvs;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state);
+ unsigned int channel = vc4_crtc_state->assigned_channel;
unsigned int cob_size;
u32 val;
int fifo_lines;
@@ -121,7 +131,10 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
* Read vertical scanline which is currently composed for our
* pixelvalve by the HVS, and also the scaler status.
*/
- val = HVS_READ(SCALER_DISPSTATX(vc4_crtc_state->assigned_channel));
+ if (vc4->gen >= VC4_GEN_6_C)
+ val = HVS_READ(SCALER6_DISPX_STATUS(channel));
+ else
+ val = HVS_READ(SCALER_DISPSTATX(channel));
/* Get optional system timestamp after query. */
if (etime)
@@ -130,18 +143,23 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
/* Vertical position of hvs composed scanline. */
- *vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE);
+
+ if (vc4->gen >= VC4_GEN_6_C)
+ *vpos = VC4_GET_FIELD(val, SCALER6_DISPX_STATUS_YLINE);
+ else
+ *vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE);
+
*hpos = 0;
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
*vpos /= 2;
/* Use hpos to correct for field offset in interlaced mode. */
- if (vc4_hvs_get_fifo_frame_count(hvs, vc4_crtc_state->assigned_channel) % 2)
+ if (vc4_hvs_get_fifo_frame_count(hvs, channel) % 2)
*hpos += mode->crtc_htotal / 2;
}
- cob_size = vc4_crtc_get_cob_allocation(vc4, vc4_crtc_state->assigned_channel);
+ cob_size = vc4_crtc_get_cob_allocation(vc4, channel);
/* This is the offset we need for translating hvs -> pv scanout pos. */
fifo_lines = cob_size / mode->crtc_hdisplay;
@@ -222,6 +240,11 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
struct vc4_dev *vc4 = to_vc4_dev(vc4_crtc->base.dev);
+
+ /*
+ * NOTE: Could we use register 0x68 (PV_HW_CFG1) to get the FIFO
+ * size?
+ */
u32 fifo_len_bytes = pv_data->fifo_depth;
/*
@@ -263,7 +286,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
* Removing 1 from the FIFO full level however
* seems to completely remove that issue.
*/
- if (!vc4->is_vc5)
+ if (vc4->gen == VC4_GEN_4)
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1;
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
@@ -403,6 +426,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
*/
CRTC_WRITE(PV_V_CONTROL,
PV_VCONTROL_CONTINUOUS |
+ (vc4->gen >= VC4_GEN_6_C ? PV_VCONTROL_ODD_TIMING : 0) |
(is_dsi ? PV_VCONTROL_DSI : 0) |
PV_VCONTROL_INTERLACE |
(odd_field_first
@@ -414,6 +438,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
} else {
CRTC_WRITE(PV_V_CONTROL,
PV_VCONTROL_CONTINUOUS |
+ (vc4->gen >= VC4_GEN_6_C ? PV_VCONTROL_ODD_TIMING : 0) |
(is_dsi ? PV_VCONTROL_DSI : 0));
CRTC_WRITE(PV_VSYNCD_EVEN, 0);
}
@@ -428,11 +453,17 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
if (is_dsi)
CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
- if (vc4->is_vc5)
+ if (vc4->gen >= VC4_GEN_5)
CRTC_WRITE(PV_MUX_CFG,
VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP,
PV_MUX_CFG_RGB_PIXEL_MUX_MODE));
+ if (vc4->gen >= VC4_GEN_6_C)
+ CRTC_WRITE(PV_PIPE_INIT_CTRL,
+ VC4_SET_FIELD(1, PV_PIPE_INIT_CTRL_PV_INIT_WIDTH) |
+ VC4_SET_FIELD(1, PV_PIPE_INIT_CTRL_PV_INIT_IDLE) |
+ PV_PIPE_INIT_CTRL_PV_INIT_EN);
+
CRTC_WRITE(PV_CONTROL, PV_CONTROL_FIFO_CLR |
vc4_crtc_get_fifo_full_level_bits(vc4_crtc, format) |
VC4_SET_FIELD(format, PV_CONTROL_FORMAT) |
@@ -458,8 +489,10 @@ static void require_hvs_enabled(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
- WARN_ON_ONCE((HVS_READ(SCALER_DISPCTRL) & SCALER_DISPCTRL_ENABLE) !=
- SCALER_DISPCTRL_ENABLE);
+ if (vc4->gen >= VC4_GEN_6_C)
+ WARN_ON_ONCE(!(HVS_READ(SCALER6_CONTROL) & SCALER6_CONTROL_HVS_EN));
+ else
+ WARN_ON_ONCE(!(HVS_READ(SCALER_DISPCTRL) & SCALER_DISPCTRL_ENABLE));
}
static int vc4_crtc_disable(struct drm_crtc *crtc,
@@ -529,7 +562,11 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
if (!(of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
"brcm,bcm2711-pixelvalve2") ||
of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
- "brcm,bcm2711-pixelvalve4")))
+ "brcm,bcm2711-pixelvalve4") ||
+ of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
+ "brcm,bcm2712-pixelvalve0") ||
+ of_device_is_compatible(vc4_crtc->pdev->dev.of_node,
+ "brcm,bcm2712-pixelvalve1")))
return 0;
if (!(CRTC_READ(PV_CONTROL) & PV_CONTROL_EN))
@@ -735,10 +772,17 @@ int vc4_crtc_atomic_check(struct drm_crtc *crtc,
if (conn_state->crtc != crtc)
continue;
- vc4_state->margins.left = conn_state->tv.margins.left;
- vc4_state->margins.right = conn_state->tv.margins.right;
- vc4_state->margins.top = conn_state->tv.margins.top;
- vc4_state->margins.bottom = conn_state->tv.margins.bottom;
+ if (memcmp(&vc4_state->margins, &conn_state->tv.margins,
+ sizeof(vc4_state->margins))) {
+ memcpy(&vc4_state->margins, &conn_state->tv.margins,
+ sizeof(vc4_state->margins));
+
+ /*
+ * Need to force the dlist entries for all planes to be
+ * updated so that the dest rectangles are changed.
+ */
+ crtc_state->zpos_changed = true;
+ }
break;
}
@@ -781,14 +825,21 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
+ unsigned int current_dlist;
u32 chan = vc4_crtc->current_hvs_channel;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
spin_lock(&vc4_crtc->irq_lock);
+
+ if (vc4->gen >= VC4_GEN_6_C)
+ current_dlist = VC4_GET_FIELD(HVS_READ(SCALER6_DISPX_DL(chan)),
+ SCALER6_DISPX_DL_LACT);
+ else
+ current_dlist = HVS_READ(SCALER_DISPLACTX(chan));
+
if (vc4_crtc->event &&
- (vc4_crtc->current_dlist == HVS_READ(SCALER_DISPLACTX(chan)) ||
- vc4_crtc->feeds_txp)) {
+ (vc4_crtc->current_dlist == current_dlist || vc4_crtc->feeds_txp)) {
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
vc4_crtc->event = NULL;
drm_crtc_vblank_put(crtc);
@@ -799,7 +850,8 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
* the CRTC and encoder already reconfigured, leading to
* underruns. This can be seen when reconfiguring the CRTC.
*/
- vc4_hvs_unmask_underrun(hvs, chan);
+ if (vc4->gen < VC4_GEN_6_C)
+ vc4_hvs_unmask_underrun(hvs, chan);
}
spin_unlock(&vc4_crtc->irq_lock);
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -913,7 +965,7 @@ static int vc4_async_set_fence_cb(struct drm_device *dev,
struct dma_fence *fence;
int ret;
- if (!vc4->is_vc5) {
+ if (vc4->gen == VC4_GEN_4) {
struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
@@ -1000,7 +1052,7 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
/*
@@ -1043,7 +1095,7 @@ int vc4_page_flip(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- if (vc4->is_vc5)
+ if (vc4->gen > VC4_GEN_4)
return vc5_async_page_flip(crtc, fb, event, flags);
else
return vc4_async_page_flip(crtc, fb, event, flags);
@@ -1257,6 +1309,32 @@ const struct vc4_pv_data bcm2711_pv4_data = {
},
};
+const struct vc4_pv_data bcm2712_pv0_data = {
+ .base = {
+ .debugfs_name = "crtc0_regs",
+ .hvs_available_channels = BIT(0),
+ .hvs_output = 0,
+ },
+ .fifo_depth = 64,
+ .pixels_per_clock = 1,
+ .encoder_types = {
+ [0] = VC4_ENCODER_TYPE_HDMI0,
+ },
+};
+
+const struct vc4_pv_data bcm2712_pv1_data = {
+ .base = {
+ .debugfs_name = "crtc1_regs",
+ .hvs_available_channels = BIT(1),
+ .hvs_output = 1,
+ },
+ .fifo_depth = 64,
+ .pixels_per_clock = 1,
+ .encoder_types = {
+ [0] = VC4_ENCODER_TYPE_HDMI1,
+ },
+};
+
static const struct of_device_id vc4_crtc_dt_match[] = {
{ .compatible = "brcm,bcm2835-pixelvalve0", .data = &bcm2835_pv0_data },
{ .compatible = "brcm,bcm2835-pixelvalve1", .data = &bcm2835_pv1_data },
@@ -1266,6 +1344,8 @@ static const struct of_device_id vc4_crtc_dt_match[] = {
{ .compatible = "brcm,bcm2711-pixelvalve2", .data = &bcm2711_pv2_data },
{ .compatible = "brcm,bcm2711-pixelvalve3", .data = &bcm2711_pv3_data },
{ .compatible = "brcm,bcm2711-pixelvalve4", .data = &bcm2711_pv4_data },
+ { .compatible = "brcm,bcm2712-pixelvalve0", .data = &bcm2712_pv0_data },
+ { .compatible = "brcm,bcm2712-pixelvalve1", .data = &bcm2712_pv1_data },
{}
};
@@ -1338,9 +1418,8 @@ int __vc4_crtc_init(struct drm_device *drm,
drm_crtc_helper_add(crtc, crtc_helper_funcs);
- if (!vc4->is_vc5) {
+ if (vc4->gen == VC4_GEN_4) {
drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
-
drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
/* We support CTM, but only for one CRTC at a time. It's therefore
@@ -1458,7 +1537,7 @@ static void vc4_crtc_dev_remove(struct platform_device *pdev)
struct platform_driver vc4_crtc_driver = {
.probe = vc4_crtc_dev_probe,
- .remove_new = vc4_crtc_dev_remove,
+ .remove = vc4_crtc_dev_remove,
.driver = {
.name = "vc4_crtc",
.of_match_table = vc4_crtc_dt_match,
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index a382dc4654bd..960550c166d9 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -395,7 +395,7 @@ static void vc4_dpi_dev_remove(struct platform_device *pdev)
struct platform_driver vc4_dpi_driver = {
.probe = vc4_dpi_dev_probe,
- .remove_new = vc4_dpi_dev_remove,
+ .remove = vc4_dpi_dev_remove,
.driver = {
.name = "vc4_dpi",
.of_match_table = vc4_dpi_dt_match,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index c133e96b8aca..c7cb1e3a6434 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -20,6 +20,7 @@
* driver.
*/
+#include <linux/aperture.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/device.h>
@@ -30,10 +31,11 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_vblank.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
@@ -45,7 +47,6 @@
#define DRIVER_NAME "vc4"
#define DRIVER_DESC "Broadcom VC4 graphics"
-#define DRIVER_DATE "20140616"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
@@ -98,7 +99,7 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0)
return -EINVAL;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
if (!vc4->v3d)
@@ -147,7 +148,7 @@ static int vc4_open(struct drm_device *dev, struct drm_file *file)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL);
@@ -165,7 +166,7 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file->driver_priv;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
if (vc4file->bin_bo_used)
@@ -212,6 +213,7 @@ const struct drm_driver vc4_drm_driver = {
.gem_create_object = vc4_create_object,
DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create),
+ DRM_FBDEV_DMA_DRIVER_OPS,
.ioctls = vc4_drm_ioctls,
.num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
@@ -219,7 +221,6 @@ const struct drm_driver vc4_drm_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -235,12 +236,12 @@ const struct drm_driver vc5_drm_driver = {
#endif
DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create),
+ DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &vc4_drm_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -275,6 +276,7 @@ static void vc4_component_unbind_all(void *ptr)
static const struct of_device_id vc4_dma_range_matches[] = {
{ .compatible = "brcm,bcm2711-hvs" },
+ { .compatible = "brcm,bcm2712-hvs" },
{ .compatible = "brcm,bcm2835-hvs" },
{ .compatible = "brcm,bcm2835-v3d" },
{ .compatible = "brcm,cygnus-v3d" },
@@ -291,17 +293,23 @@ static int vc4_drm_bind(struct device *dev)
struct vc4_dev *vc4;
struct device_node *node;
struct drm_crtc *crtc;
- bool is_vc5;
+ enum vc4_gen gen;
int ret = 0;
dev->coherent_dma_mask = DMA_BIT_MASK(32);
- is_vc5 = of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5");
- if (is_vc5)
+ gen = (enum vc4_gen)of_device_get_match_data(dev);
+
+ if (gen > VC4_GEN_4)
driver = &vc5_drm_driver;
else
driver = &vc4_drm_driver;
+ if (gen >= VC4_GEN_6_C)
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
+ else
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
node = of_find_matching_node_and_match(NULL, vc4_dma_range_matches,
NULL);
if (node) {
@@ -315,13 +323,13 @@ static int vc4_drm_bind(struct device *dev)
vc4 = devm_drm_dev_alloc(dev, driver, struct vc4_dev, base);
if (IS_ERR(vc4))
return PTR_ERR(vc4);
- vc4->is_vc5 = is_vc5;
+ vc4->gen = gen;
vc4->dev = dev;
drm = &vc4->base;
platform_set_drvdata(pdev, drm);
- if (!is_vc5) {
+ if (gen == VC4_GEN_4) {
ret = drmm_mutex_init(drm, &vc4->bin_bo_lock);
if (ret)
goto err;
@@ -335,7 +343,7 @@ static int vc4_drm_bind(struct device *dev)
if (ret)
goto err;
- if (!is_vc5) {
+ if (gen == VC4_GEN_4) {
ret = vc4_gem_init(drm);
if (ret)
goto err;
@@ -352,7 +360,7 @@ static int vc4_drm_bind(struct device *dev)
}
}
- ret = drm_aperture_remove_framebuffers(driver);
+ ret = aperture_remove_all_conflicting_devices(driver->name);
if (ret)
goto err;
@@ -389,7 +397,7 @@ static int vc4_drm_bind(struct device *dev)
if (ret < 0)
goto err;
- drm_fbdev_dma_setup(drm, 16);
+ drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB565);
return 0;
@@ -454,16 +462,18 @@ static void vc4_platform_drm_shutdown(struct platform_device *pdev)
}
static const struct of_device_id vc4_of_match[] = {
- { .compatible = "brcm,bcm2711-vc5", },
- { .compatible = "brcm,bcm2835-vc4", },
- { .compatible = "brcm,cygnus-vc4", },
+ { .compatible = "brcm,bcm2711-vc5", .data = (void *)VC4_GEN_5 },
+ /* NB GEN_6_C will be corrected on D0 hw to GEN_6_D via vc4_hvs_bind */
+ { .compatible = "brcm,bcm2712-vc6", .data = (void *)VC4_GEN_6_C },
+ { .compatible = "brcm,bcm2835-vc4", .data = (void *)VC4_GEN_4 },
+ { .compatible = "brcm,cygnus-vc4", .data = (void *)VC4_GEN_4 },
{},
};
MODULE_DEVICE_TABLE(of, vc4_of_match);
static struct platform_driver vc4_platform_driver = {
.probe = vc4_platform_drm_probe,
- .remove_new = vc4_platform_drm_remove,
+ .remove = vc4_platform_drm_remove,
.shutdown = vc4_platform_drm_shutdown,
.driver = {
.name = "vc4-drm",
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 08e29fa82563..4a078ffd9f82 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -15,6 +15,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mm.h>
@@ -80,11 +81,18 @@ struct vc4_perfmon {
u64 counters[] __counted_by(ncounters);
};
+enum vc4_gen {
+ VC4_GEN_4,
+ VC4_GEN_5,
+ VC4_GEN_6_C,
+ VC4_GEN_6_D,
+};
+
struct vc4_dev {
struct drm_device base;
struct device *dev;
- bool is_vc5;
+ enum vc4_gen gen;
unsigned int irq;
@@ -310,13 +318,30 @@ struct vc4_v3d {
struct debugfs_regset32 regset;
};
+#define VC4_NUM_UPM_HANDLES 32
+struct vc4_upm_refcounts {
+ refcount_t refcount;
+
+ /* Allocation size */
+ size_t size;
+ /* Our allocation in UPM for prefetching. */
+ struct drm_mm_node upm;
+
+ /* Pointer back to the HVS structure */
+ struct vc4_hvs *hvs;
+};
+
+#define HVS_NUM_CHANNELS 3
+
struct vc4_hvs {
struct vc4_dev *vc4;
struct platform_device *pdev;
void __iomem *regs;
u32 __iomem *dlist;
+ unsigned int dlist_mem_size;
struct clk *core_clk;
+ struct clk *disp_clk;
unsigned long max_core_rate;
@@ -324,8 +349,15 @@ struct vc4_hvs {
* list. Units are dwords.
*/
struct drm_mm dlist_mm;
+
/* Memory manager for the LBM memory used by HVS scaling. */
struct drm_mm lbm_mm;
+
+ /* Memory manager for the UPM memory used for prefetching. */
+ struct drm_mm upm_mm;
+ struct ida upm_handles;
+ struct vc4_upm_refcounts upm_refcounts[VC4_NUM_UPM_HANDLES + 1];
+
spinlock_t mm_lock;
struct drm_mm_node mitchell_netravali_filter;
@@ -348,6 +380,7 @@ struct vc4_hvs {
};
#define HVS_NUM_CHANNELS 3
+#define HVS_UBM_WORD_SIZE 256
struct vc4_hvs_state {
struct drm_private_state base;
@@ -394,7 +427,7 @@ struct vc4_plane_state {
*/
u32 pos0_offset;
u32 pos2_offset;
- u32 ptr0_offset;
+ u32 ptr0_offset[DRM_FORMAT_MAX_PLANES];
u32 lbm_offset;
/* Offset where the plane's dlist was last stored in the
@@ -404,7 +437,7 @@ struct vc4_plane_state {
/* Clipped coordinates of the plane on the display. */
int crtc_x, crtc_y, crtc_w, crtc_h;
- /* Clipped area being scanned from in the FB. */
+ /* Clipped area being scanned from in the FB in u16.16 format */
u32 src_x, src_y;
u32 src_w[2], src_h[2];
@@ -414,14 +447,15 @@ struct vc4_plane_state {
bool is_unity;
bool is_yuv;
- /* Offset to start scanning out from the start of the plane's
- * BO.
- */
- u32 offsets[3];
-
/* Our allocation in LBM for temporary storage during scaling. */
struct drm_mm_node lbm;
+ /* The Unified Pre-Fetcher Handle */
+ unsigned int upm_handle[DRM_FORMAT_MAX_PLANES];
+
+ /* Number of lines to pre-fetch */
+ unsigned int upm_buffer_lines;
+
/* Set when the plane has per-pixel alpha content or does not cover
* the entire screen. This is a hint to the CRTC that it might need
* to enable background color fill.
@@ -456,7 +490,8 @@ enum vc4_encoder_type {
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_SMI,
VC4_ENCODER_TYPE_DPI,
- VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_TXP0,
+ VC4_ENCODER_TYPE_TXP1,
};
struct vc4_encoder {
@@ -503,7 +538,16 @@ struct vc4_crtc_data {
int hvs_output;
};
-extern const struct vc4_crtc_data vc4_txp_crtc_data;
+struct vc4_txp_data {
+ struct vc4_crtc_data base;
+ enum vc4_encoder_type encoder_type;
+ unsigned int high_addr_ptr_reg;
+ unsigned int has_byte_enable:1;
+ unsigned int size_minus_one:1;
+ unsigned int supports_40bit_addresses:1;
+};
+
+extern const struct vc4_txp_data bcm2835_txp_data;
struct vc4_pv_data {
struct vc4_crtc_data base;
@@ -525,6 +569,8 @@ extern const struct vc4_pv_data bcm2711_pv1_data;
extern const struct vc4_pv_data bcm2711_pv2_data;
extern const struct vc4_pv_data bcm2711_pv3_data;
extern const struct vc4_pv_data bcm2711_pv4_data;
+extern const struct vc4_pv_data bcm2712_pv0_data;
+extern const struct vc4_pv_data bcm2712_pv1_data;
struct vc4_crtc {
struct drm_crtc base;
@@ -598,12 +644,7 @@ struct vc4_crtc_state {
bool txp_armed;
unsigned int assigned_channel;
- struct {
- unsigned int left;
- unsigned int right;
- unsigned int top;
- unsigned int bottom;
- } margins;
+ struct drm_connector_tv_margins margins;
unsigned long hvs_load;
@@ -640,6 +681,12 @@ struct vc4_crtc_state {
writel(val, hvs->regs + (offset)); \
} while (0)
+#define HVS_READ6(offset) \
+ HVS_READ(hvs->vc4->gen == VC4_GEN_6_C ? SCALER6_ ## offset : SCALER6D_ ## offset)
+
+#define HVS_WRITE6(offset, val) \
+ HVS_WRITE(hvs->vc4->gen == VC4_GEN_6_C ? SCALER6_ ## offset : SCALER6D_ ## offset, val)
+
#define VC4_REG32(reg) { .name = #reg, .offset = reg }
struct vc4_exec_info {
@@ -1002,7 +1049,9 @@ void vc4_irq_reset(struct drm_device *dev);
/* vc4_hvs.c */
extern struct platform_driver vc4_hvs_driver;
-struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pdev);
+struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4,
+ void __iomem *regs,
+ struct platform_device *pdev);
void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int output);
int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output);
u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index f5ccc1bf7a63..5eb293bdb363 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -1841,7 +1841,7 @@ static void vc4_dsi_dev_remove(struct platform_device *pdev)
struct platform_driver vc4_dsi_driver = {
.probe = vc4_dsi_dev_probe,
- .remove_new = vc4_dsi_dev_remove,
+ .remove = vc4_dsi_dev_remove,
.driver = {
.name = "vc4_dsi",
.of_match_table = vc4_dsi_dt_match,
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 24fb1b57e1dd..22bccd69eb62 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -76,7 +76,7 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
u32 i;
int ret = 0;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
if (!vc4->v3d) {
@@ -389,7 +389,7 @@ vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
unsigned long timeout_expire;
DEFINE_WAIT(wait);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
if (vc4->finished_seqno >= seqno)
@@ -474,7 +474,7 @@ vc4_submit_next_bin_job(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_exec_info *exec;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
again:
@@ -522,7 +522,7 @@ vc4_submit_next_render_job(struct drm_device *dev)
if (!exec)
return;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
/* A previous RCL may have written to one of our textures, and
@@ -543,7 +543,7 @@ vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
struct vc4_dev *vc4 = to_vc4_dev(dev);
bool was_empty = list_empty(&vc4->render_job_list);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
list_move_tail(&exec->head, &vc4->render_job_list);
@@ -970,7 +970,7 @@ vc4_job_handle_completed(struct vc4_dev *vc4)
unsigned long irqflags;
struct vc4_seqno_cb *cb, *cb_temp;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
spin_lock_irqsave(&vc4->job_lock, irqflags);
@@ -1009,7 +1009,7 @@ int vc4_queue_seqno_cb(struct drm_device *dev,
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned long irqflags;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
cb->func = func;
@@ -1065,7 +1065,7 @@ vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_wait_seqno *args = data;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
@@ -1082,7 +1082,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gem_obj;
struct vc4_bo *bo;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
if (args->pad != 0)
@@ -1131,7 +1131,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
args->shader_rec_size,
args->bo_handle_count);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
if (!vc4->v3d) {
@@ -1267,7 +1267,7 @@ int vc4_gem_init(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
vc4->dma_fence_context = dma_fence_context_alloc(1);
@@ -1326,7 +1326,7 @@ int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct vc4_bo *bo;
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
switch (args->madv) {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 6611ab7c26a6..47d9ada98430 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -31,6 +31,7 @@
* encoder block has CEC support.
*/
+#include <drm/display/drm_hdmi_audio_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
#include <drm/display/drm_scdc_helper.h>
@@ -147,6 +148,8 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
if (!drm_dev_enter(drm, &idx))
return -ENODEV;
+ WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
+
drm_print_regset32(&p, &vc4_hdmi->hdmi_regset);
drm_print_regset32(&p, &vc4_hdmi->hd_regset);
drm_print_regset32(&p, &vc4_hdmi->cec_regset);
@@ -156,6 +159,8 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
drm_print_regset32(&p, &vc4_hdmi->ram_regset);
drm_print_regset32(&p, &vc4_hdmi->rm_regset);
+ pm_runtime_put(&vc4_hdmi->pdev->dev);
+
drm_dev_exit(idx);
return 0;
@@ -379,7 +384,6 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
enum drm_connector_status status)
{
struct drm_connector *connector = &vc4_hdmi->connector;
- const struct drm_edid *drm_edid;
int ret;
/*
@@ -401,17 +405,14 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
return;
}
- drm_edid = drm_edid_read_ddc(connector, vc4_hdmi->ddc);
+ drm_atomic_helper_connector_hdmi_hotplug(connector, status);
- drm_edid_connector_update(connector, drm_edid);
cec_s_phys_addr(vc4_hdmi->cec_adap,
connector->display_info.source_physical_address, false);
- if (!drm_edid)
+ if (status != connector_status_connected)
return;
- drm_edid_free(drm_edid);
-
for (;;) {
ret = vc4_hdmi_reset_link(connector, ctx);
if (ret == -EDEADLK) {
@@ -466,31 +467,10 @@ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
{
- struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct vc4_dev *vc4 = to_vc4_dev(connector->dev);
- const struct drm_edid *drm_edid;
int ret = 0;
- /*
- * NOTE: This function should really take vc4_hdmi->mutex, but doing so
- * results in reentrancy issues since cec_s_phys_addr() might call
- * .adap_enable, which leads to that funtion being called with our mutex
- * held.
- *
- * Concurrency isn't an issue at the moment since we don't share
- * any state with any of the other frameworks so we can ignore
- * the lock for now.
- */
-
- drm_edid = drm_edid_read_ddc(connector, vc4_hdmi->ddc);
- drm_edid_connector_update(connector, drm_edid);
- cec_s_phys_addr(vc4_hdmi->cec_adap,
- connector->display_info.source_physical_address, false);
- if (!drm_edid)
- return 0;
-
ret = drm_edid_connector_add_modes(connector);
- drm_edid_free(drm_edid);
if (!vc4->hvs->vc5_hdmi_enable_hdmi_20) {
struct drm_device *drm = connector->dev;
@@ -566,6 +546,7 @@ static void vc4_hdmi_connector_reset(struct drm_connector *connector)
}
static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
+ .force = drm_atomic_helper_connector_hdmi_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.reset = vc4_hdmi_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -576,9 +557,11 @@ static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs =
.detect_ctx = vc4_hdmi_connector_detect_ctx,
.get_modes = vc4_hdmi_connector_get_modes,
.atomic_check = vc4_hdmi_connector_atomic_check,
+ .mode_valid = drm_hdmi_connector_mode_valid,
};
static const struct drm_connector_hdmi_funcs vc4_hdmi_hdmi_connector_funcs;
+static const struct drm_connector_hdmi_audio_funcs vc4_hdmi_audio_funcs;
static int vc4_hdmi_connector_init(struct drm_device *dev,
struct vc4_hdmi *vc4_hdmi)
@@ -604,6 +587,12 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
if (ret)
return ret;
+ ret = drm_connector_hdmi_audio_init(connector, dev->dev,
+ &vc4_hdmi_audio_funcs,
+ 8, false, -1);
+ if (ret)
+ return ret;
+
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
/*
@@ -841,6 +830,7 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_device *drm = vc4_hdmi->connector.dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
unsigned long flags;
int idx;
@@ -857,14 +847,25 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_CLRRGB);
+ if (vc4->gen >= VC4_GEN_6_C)
+ HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) |
+ VC4_HD_VID_CTL_BLANKPIX);
+
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
mdelay(1);
- spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
- HDMI_WRITE(HDMI_VID_CTL,
- HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
- spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ /*
+ * TODO: This should work on BCM2712, but doesn't for some
+ * reason and result in a system lockup.
+ */
+ if (vc4->gen < VC4_GEN_6_C) {
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+ HDMI_WRITE(HDMI_VID_CTL,
+ HDMI_READ(HDMI_VID_CTL) &
+ ~VC4_HD_VID_CTL_ENABLE);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ }
vc4_hdmi_disable_scrambling(encoder);
@@ -1484,7 +1485,6 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
goto err_put_runtime_pm;
}
-
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
if (tmds_char_rate > 297000000)
@@ -1590,10 +1590,13 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_VID_CTL,
+ (HDMI_READ(HDMI_VID_CTL) &
+ ~(VC4_HD_VID_CTL_VSYNC_LOW | VC4_HD_VID_CTL_HSYNC_LOW)) |
VC4_HD_VID_CTL_ENABLE |
VC4_HD_VID_CTL_CLRRGB |
VC4_HD_VID_CTL_UNDERFLOW_ENABLE |
VC4_HD_VID_CTL_FRAME_COUNTER_RESET |
+ VC4_HD_VID_CTL_BLANK_INSERT_EN |
(vsync_pos ? 0 : VC4_HD_VID_CTL_VSYNC_LOW) |
(hsync_pos ? 0 : VC4_HD_VID_CTL_HSYNC_LOW));
@@ -1747,7 +1750,6 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- unsigned long long rate;
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
!(mode->flags & DRM_MODE_FLAG_DBLCLK) &&
@@ -1755,8 +1757,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
(mode->hsync_end % 2) || (mode->htotal % 2)))
return MODE_H_ILLEGAL;
- rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB);
- return vc4_hdmi_connector_clock_valid(&vc4_hdmi->connector, mode, rate);
+ return MODE_OK;
}
static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
@@ -1904,9 +1905,9 @@ static bool vc4_hdmi_audio_can_stream(struct vc4_hdmi *vc4_hdmi)
return true;
}
-static int vc4_hdmi_audio_startup(struct device *dev, void *data)
+static int vc4_hdmi_audio_startup(struct drm_connector *connector)
{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int ret = 0;
@@ -1920,7 +1921,7 @@ static int vc4_hdmi_audio_startup(struct device *dev, void *data)
}
if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) {
- ret = -ENODEV;
+ ret = -ENOTSUPP;
goto out_dev_exit;
}
@@ -1968,9 +1969,9 @@ static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi)
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
-static void vc4_hdmi_audio_shutdown(struct device *dev, void *data)
+static void vc4_hdmi_audio_shutdown(struct drm_connector *connector)
{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int idx;
@@ -2040,13 +2041,13 @@ static int sample_rate_to_mai_fmt(int samplerate)
}
/* HDMI audio codec callbacks */
-static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
+static int vc4_hdmi_audio_prepare(struct drm_connector *connector,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *drm = vc4_hdmi->connector.dev;
- struct drm_connector *connector = &vc4_hdmi->connector;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
unsigned int sample_rate = params->sample_rate;
unsigned int channels = params->channels;
unsigned long flags;
@@ -2057,7 +2058,7 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
int ret = 0;
int idx;
- dev_dbg(dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
+ dev_dbg(&vc4_hdmi->pdev->dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
sample_rate, params->sample_width, channels);
mutex_lock(&vc4_hdmi->mutex);
@@ -2104,11 +2105,33 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
VC4_HDMI_AUDIO_PACKET_CEA_MASK);
/* Set the MAI threshold */
- HDMI_WRITE(HDMI_MAI_THR,
- VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_PANICHIGH) |
- VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_PANICLOW) |
- VC4_SET_FIELD(0x06, VC4_HD_MAI_THR_DREQHIGH) |
- VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_DREQLOW));
+ switch (vc4->gen) {
+ case VC4_GEN_6_D:
+ HDMI_WRITE(HDMI_MAI_THR,
+ VC4_SET_FIELD(0x10, VC6_D_HD_MAI_THR_PANICHIGH) |
+ VC4_SET_FIELD(0x10, VC6_D_HD_MAI_THR_PANICLOW) |
+ VC4_SET_FIELD(0x1c, VC6_D_HD_MAI_THR_DREQHIGH) |
+ VC4_SET_FIELD(0x1c, VC6_D_HD_MAI_THR_DREQLOW));
+ break;
+ case VC4_GEN_6_C:
+ case VC4_GEN_5:
+ HDMI_WRITE(HDMI_MAI_THR,
+ VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) |
+ VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) |
+ VC4_SET_FIELD(0x1c, VC4_HD_MAI_THR_DREQHIGH) |
+ VC4_SET_FIELD(0x1c, VC4_HD_MAI_THR_DREQLOW));
+ break;
+ case VC4_GEN_4:
+ HDMI_WRITE(HDMI_MAI_THR,
+ VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_PANICHIGH) |
+ VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_PANICLOW) |
+ VC4_SET_FIELD(0x6, VC4_HD_MAI_THR_DREQHIGH) |
+ VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_DREQLOW));
+ break;
+ default:
+ drm_err(drm, "Unknown VC4 generation: %d", vc4->gen);
+ break;
+ }
HDMI_WRITE(HDMI_MAI_CONFIG,
VC4_HDMI_MAI_CONFIG_BIT_REVERSE |
@@ -2174,40 +2197,12 @@ static const struct snd_dmaengine_pcm_config pcm_conf = {
.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
};
-static int vc4_hdmi_audio_get_eld(struct device *dev, void *data,
- uint8_t *buf, size_t len)
-{
- struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
- struct drm_connector *connector = &vc4_hdmi->connector;
-
- mutex_lock(&vc4_hdmi->mutex);
- memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
- mutex_unlock(&vc4_hdmi->mutex);
-
- return 0;
-}
-
-static const struct hdmi_codec_ops vc4_hdmi_codec_ops = {
- .get_eld = vc4_hdmi_audio_get_eld,
+static const struct drm_connector_hdmi_audio_funcs vc4_hdmi_audio_funcs = {
+ .startup = vc4_hdmi_audio_startup,
.prepare = vc4_hdmi_audio_prepare,
- .audio_shutdown = vc4_hdmi_audio_shutdown,
- .audio_startup = vc4_hdmi_audio_startup,
-};
-
-static struct hdmi_codec_pdata vc4_hdmi_codec_pdata = {
- .ops = &vc4_hdmi_codec_ops,
- .max_i2s_channels = 8,
- .i2s = 1,
+ .shutdown = vc4_hdmi_audio_shutdown,
};
-static void vc4_hdmi_audio_codec_release(void *ptr)
-{
- struct vc4_hdmi *vc4_hdmi = ptr;
-
- platform_device_unregister(vc4_hdmi->audio.codec_pdev);
- vc4_hdmi->audio.codec_pdev = NULL;
-}
-
static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
{
const struct vc4_hdmi_register *mai_data =
@@ -2215,7 +2210,6 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
struct snd_soc_dai_link *dai_link = &vc4_hdmi->audio.link;
struct snd_soc_card *card = &vc4_hdmi->audio.card;
struct device *dev = &vc4_hdmi->pdev->dev;
- struct platform_device *codec_pdev;
const __be32 *addr;
int index, len;
int ret;
@@ -2308,20 +2302,6 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
return ret;
}
- codec_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &vc4_hdmi_codec_pdata,
- sizeof(vc4_hdmi_codec_pdata));
- if (IS_ERR(codec_pdev)) {
- dev_err(dev, "Couldn't register the HDMI codec: %ld\n", PTR_ERR(codec_pdev));
- return PTR_ERR(codec_pdev);
- }
- vc4_hdmi->audio.codec_pdev = codec_pdev;
-
- ret = devm_add_action_or_reset(dev, vc4_hdmi_audio_codec_release, vc4_hdmi);
- if (ret)
- return ret;
-
dai_link->cpus = &vc4_hdmi->audio.cpu;
dai_link->codecs = &vc4_hdmi->audio.codec;
dai_link->platforms = &vc4_hdmi->audio.platform;
@@ -2334,7 +2314,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
dai_link->stream_name = "MAI PCM";
dai_link->codecs->dai_name = "i2s-hifi";
dai_link->cpus->dai_name = dev_name(dev);
- dai_link->codecs->name = dev_name(&codec_pdev->dev);
+ dai_link->codecs->name = dev_name(&vc4_hdmi->connector.hdmi_audio.codec_pdev->dev);
dai_link->platforms->name = dev_name(dev);
card->dai_link = dai_link;
@@ -3108,6 +3088,7 @@ static int vc4_hdmi_runtime_suspend(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ clk_disable_unprepare(vc4_hdmi->audio_clock);
clk_disable_unprepare(vc4_hdmi->hsm_clock);
return 0;
@@ -3140,6 +3121,10 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
goto err_disable_clk;
}
+ ret = clk_prepare_enable(vc4_hdmi->audio_clock);
+ if (ret)
+ goto err_disable_clk;
+
if (vc4_hdmi->variant->reset)
vc4_hdmi->variant->reset(vc4_hdmi);
@@ -3260,7 +3245,9 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
return ret;
if ((of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi0") ||
- of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1")) &&
+ of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1") ||
+ of_device_is_compatible(dev->of_node, "brcm,bcm2712-hdmi0") ||
+ of_device_is_compatible(dev->of_node, "brcm,bcm2712-hdmi1")) &&
HDMI_READ(HDMI_VID_CTL) & VC4_HD_VID_CTL_ENABLE) {
clk_prepare_enable(vc4_hdmi->pixel_clock);
clk_prepare_enable(vc4_hdmi->hsm_clock);
@@ -3394,10 +3381,66 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
.hp_detect = vc5_hdmi_hp_detect,
};
+static const struct vc4_hdmi_variant bcm2712_hdmi0_variant = {
+ .encoder_type = VC4_ENCODER_TYPE_HDMI0,
+ .debugfs_name = "hdmi0_regs",
+ .card_name = "vc4-hdmi-0",
+ .max_pixel_clock = 600000000,
+ .registers = vc6_hdmi_hdmi0_fields,
+ .num_registers = ARRAY_SIZE(vc6_hdmi_hdmi0_fields),
+ .phy_lane_mapping = {
+ PHY_LANE_0,
+ PHY_LANE_1,
+ PHY_LANE_2,
+ PHY_LANE_CK,
+ },
+ .unsupported_odd_h_timings = false,
+ .external_irq_controller = true,
+
+ .init_resources = vc5_hdmi_init_resources,
+ .csc_setup = vc5_hdmi_csc_setup,
+ .reset = vc5_hdmi_reset,
+ .set_timings = vc5_hdmi_set_timings,
+ .phy_init = vc6_hdmi_phy_init,
+ .phy_disable = vc6_hdmi_phy_disable,
+ .channel_map = vc5_hdmi_channel_map,
+ .supports_hdr = true,
+ .hp_detect = vc5_hdmi_hp_detect,
+};
+
+static const struct vc4_hdmi_variant bcm2712_hdmi1_variant = {
+ .encoder_type = VC4_ENCODER_TYPE_HDMI1,
+ .debugfs_name = "hdmi1_regs",
+ .card_name = "vc4-hdmi-1",
+ .max_pixel_clock = 600000000,
+ .registers = vc6_hdmi_hdmi1_fields,
+ .num_registers = ARRAY_SIZE(vc6_hdmi_hdmi1_fields),
+ .phy_lane_mapping = {
+ PHY_LANE_0,
+ PHY_LANE_1,
+ PHY_LANE_2,
+ PHY_LANE_CK,
+ },
+ .unsupported_odd_h_timings = false,
+ .external_irq_controller = true,
+
+ .init_resources = vc5_hdmi_init_resources,
+ .csc_setup = vc5_hdmi_csc_setup,
+ .reset = vc5_hdmi_reset,
+ .set_timings = vc5_hdmi_set_timings,
+ .phy_init = vc6_hdmi_phy_init,
+ .phy_disable = vc6_hdmi_phy_disable,
+ .channel_map = vc5_hdmi_channel_map,
+ .supports_hdr = true,
+ .hp_detect = vc5_hdmi_hp_detect,
+};
+
static const struct of_device_id vc4_hdmi_dt_match[] = {
{ .compatible = "brcm,bcm2835-hdmi", .data = &bcm2835_variant },
{ .compatible = "brcm,bcm2711-hdmi0", .data = &bcm2711_hdmi0_variant },
{ .compatible = "brcm,bcm2711-hdmi1", .data = &bcm2711_hdmi1_variant },
+ { .compatible = "brcm,bcm2712-hdmi0", .data = &bcm2712_hdmi0_variant },
+ { .compatible = "brcm,bcm2712-hdmi1", .data = &bcm2712_hdmi1_variant },
{}
};
@@ -3409,7 +3452,7 @@ static const struct dev_pm_ops vc4_hdmi_pm_ops = {
struct platform_driver vc4_hdmi_driver = {
.probe = vc4_hdmi_dev_probe,
- .remove_new = vc4_hdmi_dev_remove,
+ .remove = vc4_hdmi_dev_remove,
.driver = {
.name = "vc4_hdmi",
.of_match_table = vc4_hdmi_dt_match,
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index b37f1d2c3fe5..e3d989ca302b 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -104,8 +104,6 @@ struct vc4_hdmi_audio {
struct snd_soc_dai_link_component codec;
struct snd_soc_dai_link_component platform;
struct snd_dmaengine_dai_dma_data dma_data;
- struct hdmi_audio_infoframe infoframe;
- struct platform_device *codec_pdev;
bool streaming;
};
@@ -237,4 +235,8 @@ void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi);
void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi);
void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi);
+void vc6_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *conn_state);
+void vc6_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi);
+
#endif /* _VC4_HDMI_H_ */
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
index 1f5507fc7a03..56e6a35da357 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c
@@ -125,6 +125,48 @@
#define VC4_HDMI_RM_FORMAT_SHIFT_SHIFT 24
#define VC4_HDMI_RM_FORMAT_SHIFT_MASK VC4_MASK(25, 24)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_BG_PWRUP BIT(8)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_LDO_PWRUP BIT(7)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_BIAS_PWRUP BIT(6)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_RNDGEN_PWRUP BIT(4)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_CK_PWRUP BIT(3)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_2_PWRUP BIT(2)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_1_PWRUP BIT(1)
+#define VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_0_PWRUP BIT(0)
+
+#define VC6_HDMI_TX_PHY_PLL_REFCLK_REFCLK_SEL_CMOS BIT(13)
+#define VC6_HDMI_TX_PHY_PLL_REFCLK_REFFRQ_MASK VC4_MASK(9, 0)
+
+#define VC6_HDMI_TX_PHY_PLL_POST_KDIV_CLK0_SEL_MASK VC4_MASK(3, 2)
+#define VC6_HDMI_TX_PHY_PLL_POST_KDIV_KDIV_MASK VC4_MASK(1, 0)
+
+#define VC6_HDMI_TX_PHY_PLL_VCOCLK_DIV_VCODIV_EN BIT(10)
+#define VC6_HDMI_TX_PHY_PLL_VCOCLK_DIV_VCODIV_MASK VC4_MASK(9, 0)
+
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_CTL_MASK VC4_MASK(31, 28)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_ENABLE_MASK VC4_MASK(27, 27)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_RATE_CTL_MASK VC4_MASK(26, 26)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_POST_TAP_EN_MASK VC4_MASK(25, 25)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_LDMOS_BIAS_CTL_MASK VC4_MASK(24, 23)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_COM_MODE_LDMOS_EN_MASK VC4_MASK(22, 22)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EDGE_SEL_MASK VC4_MASK(21, 21)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_HS_EN_MASK VC4_MASK(20, 20)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_TERM_CTL_MASK VC4_MASK(19, 18)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_EN_MASK VC4_MASK(17, 17)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_EN_MASK VC4_MASK(16, 16)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_CTL_MASK VC4_MASK(15, 12)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_HS_EN_MASK VC4_MASK(11, 11)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_MAIN_TAP_CURRENT_SELECT_MASK VC4_MASK(10, 8)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_POST_TAP_CURRENT_SELECT_MASK VC4_MASK(7, 5)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_LOADING_MASK VC4_MASK(4, 3)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_DRIVING_MASK VC4_MASK(2, 1)
+#define VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_PRE_TAP_EN_MASK VC4_MASK(0, 0)
+
+#define VC6_HDMI_TX_PHY_PLL_RESET_CTL_PLL_PLLPOST_RESETB BIT(1)
+#define VC6_HDMI_TX_PHY_PLL_RESET_CTL_PLL_RESETB BIT(0)
+
+#define VC6_HDMI_TX_PHY_PLL_POWERUP_CTL_PLL_PWRUP BIT(0)
+
#define OSCILLATOR_FREQUENCY 54000000
void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
@@ -558,3 +600,601 @@ void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi)
VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
}
+
+#define VC6_VCO_MIN_FREQ (8ULL * 1000 * 1000 * 1000)
+#define VC6_VCO_MAX_FREQ (12ULL * 1000 * 1000 * 1000)
+
+static unsigned long long
+vc6_phy_get_vco_freq(unsigned long long tmds_rate, unsigned int *vco_div)
+{
+ unsigned int min_div;
+ unsigned int max_div;
+ unsigned int div;
+
+ div = 0;
+ while (tmds_rate * div * 10 < VC6_VCO_MIN_FREQ)
+ div++;
+ min_div = div;
+
+ while (tmds_rate * (div + 1) * 10 < VC6_VCO_MAX_FREQ)
+ div++;
+ max_div = div;
+
+ div = min_div + (max_div - min_div) / 2;
+
+ *vco_div = div;
+ return tmds_rate * div * 10;
+}
+
+struct vc6_phy_lane_settings {
+ unsigned int ext_current_ctl:4;
+ unsigned int ffe_enable:1;
+ unsigned int slew_rate_ctl:1;
+ unsigned int ffe_post_tap_en:1;
+ unsigned int ldmos_bias_ctl:2;
+ unsigned int com_mode_ldmos_en:1;
+ unsigned int edge_sel:1;
+ unsigned int ext_current_src_hs_en:1;
+ unsigned int term_ctl:2;
+ unsigned int ext_current_src_en:1;
+ unsigned int int_current_src_en:1;
+ unsigned int int_current_ctl:4;
+ unsigned int int_current_src_hs_en:1;
+ unsigned int main_tap_current_select:3;
+ unsigned int post_tap_current_select:3;
+ unsigned int slew_ctl_slow_loading:2;
+ unsigned int slew_ctl_slow_driving:2;
+ unsigned int ffe_pre_tap_en:1;
+};
+
+struct vc6_phy_settings {
+ unsigned long long min_rate;
+ unsigned long long max_rate;
+ struct vc6_phy_lane_settings channel[3];
+ struct vc6_phy_lane_settings clock;
+};
+
+static const struct vc6_phy_settings vc6_hdmi_phy_settings[] = {
+ {
+ 0, 222000000,
+ {
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ },
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ },
+ {
+ 222000001, 297000000,
+ {
+ {
+ /* 200mA and 180mA ?! */
+ .ext_current_ctl = 12,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 100 Ohm */
+ .term_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+ },
+ {
+ /* 200mA and 180mA ?! */
+ .ext_current_ctl = 12,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 100 Ohm */
+ .term_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+ },
+ {
+ /* 200mA and 180mA ?! */
+ .ext_current_ctl = 12,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 100 Ohm */
+ .term_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+ },
+ },
+ {
+ /* 200mA and 180mA ?! */
+ .ext_current_ctl = 12,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 100 Ohm */
+ .term_ctl = 1,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+
+ /* Internal Current Source Half Swing Enable*/
+ .int_current_src_hs_en = 1,
+ },
+ },
+ {
+ 297000001, 597000044,
+ {
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* Normal Slew Rate Control */
+ .slew_rate_ctl = 1,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 50 Ohms */
+ .term_ctl = 3,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* Normal Slew Rate Control */
+ .slew_rate_ctl = 1,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 50 Ohms */
+ .term_ctl = 3,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* Normal Slew Rate Control */
+ .slew_rate_ctl = 1,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* 50 Ohms */
+ .term_ctl = 3,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ },
+ {
+ /* 200mA */
+ .ext_current_ctl = 8,
+
+ /* Normal Slew Rate Control */
+ .slew_rate_ctl = 1,
+
+ /* 0.85V */
+ .ldmos_bias_ctl = 1,
+
+ /* External Current Source Half Swing Enable*/
+ .ext_current_src_hs_en = 1,
+
+ /* 50 Ohms */
+ .term_ctl = 3,
+
+ /* Enable External Current Source */
+ .ext_current_src_en = 1,
+
+ /* Enable Internal Current Source */
+ .int_current_src_en = 1,
+
+ /* 200mA */
+ .int_current_ctl = 8,
+
+ /* Internal Current Source Half Swing Enable*/
+ .int_current_src_hs_en = 1,
+
+ /* 17.6 mA */
+ .main_tap_current_select = 7,
+ },
+ },
+};
+
+static const struct vc6_phy_settings *
+vc6_phy_get_settings(unsigned long long tmds_rate)
+{
+ unsigned int count = ARRAY_SIZE(vc6_hdmi_phy_settings);
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ const struct vc6_phy_settings *s = &vc6_hdmi_phy_settings[i];
+
+ if (tmds_rate >= s->min_rate && tmds_rate <= s->max_rate)
+ return s;
+ }
+
+ /*
+ * If the pixel clock exceeds our max setting, try the max
+ * setting anyway.
+ */
+ return &vc6_hdmi_phy_settings[count - 1];
+}
+
+static const struct vc6_phy_lane_settings *
+vc6_phy_get_channel_settings(enum vc4_hdmi_phy_channel chan,
+ unsigned long long tmds_rate)
+{
+ const struct vc6_phy_settings *settings = vc6_phy_get_settings(tmds_rate);
+
+ if (chan == PHY_LANE_CK)
+ return &settings->clock;
+
+ return &settings->channel[chan];
+}
+
+static void vc6_hdmi_reset_phy(struct vc4_hdmi *vc4_hdmi)
+{
+ lockdep_assert_held(&vc4_hdmi->hw_lock);
+
+ HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0);
+ HDMI_WRITE(HDMI_TX_PHY_POWERUP_CTL, 0);
+}
+
+void vc6_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi,
+ struct drm_connector_state *conn_state)
+{
+ const struct vc6_phy_lane_settings *chan0_settings;
+ const struct vc6_phy_lane_settings *chan1_settings;
+ const struct vc6_phy_lane_settings *chan2_settings;
+ const struct vc6_phy_lane_settings *clock_settings;
+ const struct vc4_hdmi_variant *variant = vc4_hdmi->variant;
+ unsigned long long pixel_freq = conn_state->hdmi.tmds_char_rate;
+ unsigned long long vco_freq;
+ unsigned char word_sel;
+ unsigned long flags;
+ unsigned int vco_div;
+
+ vco_freq = vc6_phy_get_vco_freq(pixel_freq, &vco_div);
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+
+ vc6_hdmi_reset_phy(vc4_hdmi);
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_0, 0x810c6000);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_1, 0x00b8c451);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_2, 0x46402e31);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_3, 0x00b8c005);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_4, 0x42410261);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_5, 0xcc021001);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_6, 0xc8301c80);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_7, 0xb0804444);
+ HDMI_WRITE(HDMI_TX_PHY_PLL_MISC_8, 0xf80f8000);
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_REFCLK,
+ VC6_HDMI_TX_PHY_PLL_REFCLK_REFCLK_SEL_CMOS |
+ VC4_SET_FIELD(54, VC6_HDMI_TX_PHY_PLL_REFCLK_REFFRQ));
+
+ HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0x7f);
+
+ HDMI_WRITE(HDMI_RM_OFFSET,
+ VC4_HDMI_RM_OFFSET_ONLY |
+ VC4_SET_FIELD(phy_get_rm_offset(vco_freq),
+ VC4_HDMI_RM_OFFSET_OFFSET));
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_VCOCLK_DIV,
+ VC6_HDMI_TX_PHY_PLL_VCOCLK_DIV_VCODIV_EN |
+ VC4_SET_FIELD(vco_div,
+ VC6_HDMI_TX_PHY_PLL_VCOCLK_DIV_VCODIV));
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_CFG,
+ VC4_SET_FIELD(0, VC4_HDMI_TX_PHY_PLL_CFG_PDIV));
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_POST_KDIV,
+ VC4_SET_FIELD(2, VC6_HDMI_TX_PHY_PLL_POST_KDIV_CLK0_SEL) |
+ VC4_SET_FIELD(1, VC6_HDMI_TX_PHY_PLL_POST_KDIV_KDIV));
+
+ chan0_settings =
+ vc6_phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_0],
+ pixel_freq);
+ HDMI_WRITE(HDMI_TX_PHY_CTL_0,
+ VC4_SET_FIELD(chan0_settings->ext_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_CTL) |
+ VC4_SET_FIELD(chan0_settings->ffe_enable,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_ENABLE) |
+ VC4_SET_FIELD(chan0_settings->slew_rate_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_RATE_CTL) |
+ VC4_SET_FIELD(chan0_settings->ffe_post_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_POST_TAP_EN) |
+ VC4_SET_FIELD(chan0_settings->ldmos_bias_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_LDMOS_BIAS_CTL) |
+ VC4_SET_FIELD(chan0_settings->com_mode_ldmos_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_COM_MODE_LDMOS_EN) |
+ VC4_SET_FIELD(chan0_settings->edge_sel,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EDGE_SEL) |
+ VC4_SET_FIELD(chan0_settings->ext_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(chan0_settings->term_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_TERM_CTL) |
+ VC4_SET_FIELD(chan0_settings->ext_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(chan0_settings->int_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(chan0_settings->int_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_CTL) |
+ VC4_SET_FIELD(chan0_settings->int_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(chan0_settings->main_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_MAIN_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(chan0_settings->post_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_POST_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(chan0_settings->slew_ctl_slow_loading,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_LOADING) |
+ VC4_SET_FIELD(chan0_settings->slew_ctl_slow_driving,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_DRIVING) |
+ VC4_SET_FIELD(chan0_settings->ffe_pre_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_PRE_TAP_EN));
+
+ chan1_settings =
+ vc6_phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_1],
+ pixel_freq);
+ HDMI_WRITE(HDMI_TX_PHY_CTL_1,
+ VC4_SET_FIELD(chan1_settings->ext_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_CTL) |
+ VC4_SET_FIELD(chan1_settings->ffe_enable,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_ENABLE) |
+ VC4_SET_FIELD(chan1_settings->slew_rate_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_RATE_CTL) |
+ VC4_SET_FIELD(chan1_settings->ffe_post_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_POST_TAP_EN) |
+ VC4_SET_FIELD(chan1_settings->ldmos_bias_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_LDMOS_BIAS_CTL) |
+ VC4_SET_FIELD(chan1_settings->com_mode_ldmos_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_COM_MODE_LDMOS_EN) |
+ VC4_SET_FIELD(chan1_settings->edge_sel,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EDGE_SEL) |
+ VC4_SET_FIELD(chan1_settings->ext_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(chan1_settings->term_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_TERM_CTL) |
+ VC4_SET_FIELD(chan1_settings->ext_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(chan1_settings->int_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(chan1_settings->int_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_CTL) |
+ VC4_SET_FIELD(chan1_settings->int_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(chan1_settings->main_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_MAIN_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(chan1_settings->post_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_POST_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(chan1_settings->slew_ctl_slow_loading,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_LOADING) |
+ VC4_SET_FIELD(chan1_settings->slew_ctl_slow_driving,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_DRIVING) |
+ VC4_SET_FIELD(chan1_settings->ffe_pre_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_PRE_TAP_EN));
+
+ chan2_settings =
+ vc6_phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_2],
+ pixel_freq);
+ HDMI_WRITE(HDMI_TX_PHY_CTL_2,
+ VC4_SET_FIELD(chan2_settings->ext_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_CTL) |
+ VC4_SET_FIELD(chan2_settings->ffe_enable,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_ENABLE) |
+ VC4_SET_FIELD(chan2_settings->slew_rate_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_RATE_CTL) |
+ VC4_SET_FIELD(chan2_settings->ffe_post_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_POST_TAP_EN) |
+ VC4_SET_FIELD(chan2_settings->ldmos_bias_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_LDMOS_BIAS_CTL) |
+ VC4_SET_FIELD(chan2_settings->com_mode_ldmos_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_COM_MODE_LDMOS_EN) |
+ VC4_SET_FIELD(chan2_settings->edge_sel,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EDGE_SEL) |
+ VC4_SET_FIELD(chan2_settings->ext_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(chan2_settings->term_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_TERM_CTL) |
+ VC4_SET_FIELD(chan2_settings->ext_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(chan2_settings->int_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(chan2_settings->int_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_CTL) |
+ VC4_SET_FIELD(chan2_settings->int_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(chan2_settings->main_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_MAIN_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(chan2_settings->post_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_POST_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(chan2_settings->slew_ctl_slow_loading,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_LOADING) |
+ VC4_SET_FIELD(chan2_settings->slew_ctl_slow_driving,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_DRIVING) |
+ VC4_SET_FIELD(chan2_settings->ffe_pre_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_PRE_TAP_EN));
+
+ clock_settings =
+ vc6_phy_get_channel_settings(variant->phy_lane_mapping[PHY_LANE_CK],
+ pixel_freq);
+ HDMI_WRITE(HDMI_TX_PHY_CTL_CK,
+ VC4_SET_FIELD(clock_settings->ext_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_CTL) |
+ VC4_SET_FIELD(clock_settings->ffe_enable,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_ENABLE) |
+ VC4_SET_FIELD(clock_settings->slew_rate_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_RATE_CTL) |
+ VC4_SET_FIELD(clock_settings->ffe_post_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_POST_TAP_EN) |
+ VC4_SET_FIELD(clock_settings->ldmos_bias_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_LDMOS_BIAS_CTL) |
+ VC4_SET_FIELD(clock_settings->com_mode_ldmos_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_COM_MODE_LDMOS_EN) |
+ VC4_SET_FIELD(clock_settings->edge_sel,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EDGE_SEL) |
+ VC4_SET_FIELD(clock_settings->ext_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(clock_settings->term_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_TERM_CTL) |
+ VC4_SET_FIELD(clock_settings->ext_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_EXT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(clock_settings->int_current_src_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_EN) |
+ VC4_SET_FIELD(clock_settings->int_current_ctl,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_CTL) |
+ VC4_SET_FIELD(clock_settings->int_current_src_hs_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_INT_CURRENT_SRC_HS_EN) |
+ VC4_SET_FIELD(clock_settings->main_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_MAIN_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(clock_settings->post_tap_current_select,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_POST_TAP_CURRENT_SELECT) |
+ VC4_SET_FIELD(clock_settings->slew_ctl_slow_loading,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_LOADING) |
+ VC4_SET_FIELD(clock_settings->slew_ctl_slow_driving,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_SLEW_CTL_SLOW_DRIVING) |
+ VC4_SET_FIELD(clock_settings->ffe_pre_tap_en,
+ VC6_HDMI_TX_PHY_HDMI_CTRL_CHX_FFE_PRE_TAP_EN));
+
+ if (pixel_freq >= 340000000)
+ word_sel = 3;
+ else
+ word_sel = 0;
+ HDMI_WRITE(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, word_sel);
+
+ HDMI_WRITE(HDMI_TX_PHY_POWERUP_CTL,
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_BG_PWRUP |
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_LDO_PWRUP |
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_BIAS_PWRUP |
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_CK_PWRUP |
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_2_PWRUP |
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_1_PWRUP |
+ VC6_HDMI_TX_PHY_HDMI_POWERUP_CTL_TX_0_PWRUP);
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_POWERUP_CTL,
+ VC6_HDMI_TX_PHY_PLL_POWERUP_CTL_PLL_PWRUP);
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_RESET_CTL,
+ HDMI_READ(HDMI_TX_PHY_PLL_RESET_CTL) &
+ ~VC6_HDMI_TX_PHY_PLL_RESET_CTL_PLL_RESETB);
+
+ HDMI_WRITE(HDMI_TX_PHY_PLL_RESET_CTL,
+ HDMI_READ(HDMI_TX_PHY_PLL_RESET_CTL) |
+ VC6_HDMI_TX_PHY_PLL_RESET_CTL_PLL_RESETB);
+
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+}
+
+void vc6_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi)
+{
+}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
index b04b2fc8d831..59bfd69f54d9 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
@@ -111,13 +111,30 @@ enum vc4_hdmi_field {
HDMI_TX_PHY_CTL_1,
HDMI_TX_PHY_CTL_2,
HDMI_TX_PHY_CTL_3,
+ HDMI_TX_PHY_CTL_CK,
HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_1,
HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_2,
HDMI_TX_PHY_PLL_CALIBRATION_CONFIG_4,
HDMI_TX_PHY_PLL_CFG,
+ HDMI_TX_PHY_PLL_CFG_PDIV,
HDMI_TX_PHY_PLL_CTL_0,
HDMI_TX_PHY_PLL_CTL_1,
+ HDMI_TX_PHY_PLL_MISC_0,
+ HDMI_TX_PHY_PLL_MISC_1,
+ HDMI_TX_PHY_PLL_MISC_2,
+ HDMI_TX_PHY_PLL_MISC_3,
+ HDMI_TX_PHY_PLL_MISC_4,
+ HDMI_TX_PHY_PLL_MISC_5,
+ HDMI_TX_PHY_PLL_MISC_6,
+ HDMI_TX_PHY_PLL_MISC_7,
+ HDMI_TX_PHY_PLL_MISC_8,
+ HDMI_TX_PHY_PLL_POST_KDIV,
+ HDMI_TX_PHY_PLL_POWERUP_CTL,
+ HDMI_TX_PHY_PLL_REFCLK,
+ HDMI_TX_PHY_PLL_RESET_CTL,
+ HDMI_TX_PHY_PLL_VCOCLK_DIV,
HDMI_TX_PHY_POWERDOWN_CTL,
+ HDMI_TX_PHY_POWERUP_CTL,
HDMI_TX_PHY_RESET_CTL,
HDMI_TX_PHY_TMDS_CLK_WORD_SEL,
HDMI_VEC_INTERFACE_CFG,
@@ -411,6 +428,206 @@ static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi1_fields[] = {
VC5_CSC_REG(HDMI_CSC_CHANNEL_CTL, 0x02c),
};
+static const struct vc4_hdmi_register __maybe_unused vc6_hdmi_hdmi0_fields[] = {
+ VC4_HD_REG(HDMI_DVP_CTL, 0x0000),
+ VC4_HD_REG(HDMI_MAI_CTL, 0x0010),
+ VC4_HD_REG(HDMI_MAI_THR, 0x0014),
+ VC4_HD_REG(HDMI_MAI_FMT, 0x0018),
+ VC4_HD_REG(HDMI_MAI_DATA, 0x001c),
+ VC4_HD_REG(HDMI_MAI_SMP, 0x0020),
+ VC4_HD_REG(HDMI_VID_CTL, 0x0044),
+ VC4_HD_REG(HDMI_FRAME_COUNT, 0x0060),
+
+ VC4_HDMI_REG(HDMI_FIFO_CTL, 0x07c),
+ VC4_HDMI_REG(HDMI_AUDIO_PACKET_CONFIG, 0x0c0),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_CONFIG, 0x0c4),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_STATUS, 0x0cc),
+ VC4_HDMI_REG(HDMI_CRP_CFG, 0x0d0),
+ VC4_HDMI_REG(HDMI_CTS_0, 0x0d4),
+ VC4_HDMI_REG(HDMI_CTS_1, 0x0d8),
+ VC4_HDMI_REG(HDMI_SCHEDULER_CONTROL, 0x0e8),
+ VC4_HDMI_REG(HDMI_HORZA, 0x0ec),
+ VC4_HDMI_REG(HDMI_HORZB, 0x0f0),
+ VC4_HDMI_REG(HDMI_VERTA0, 0x0f4),
+ VC4_HDMI_REG(HDMI_VERTB0, 0x0f8),
+ VC4_HDMI_REG(HDMI_VERTA1, 0x100),
+ VC4_HDMI_REG(HDMI_VERTB1, 0x104),
+ VC4_HDMI_REG(HDMI_MISC_CONTROL, 0x114),
+ VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x0a4),
+ VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a8),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_1, 0x148),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_2, 0x14c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_3, 0x150),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_4, 0x158),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_5, 0x15c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_6, 0x160),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_7, 0x164),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_8, 0x168),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_9, 0x16c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_10, 0x170),
+ VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x18c),
+ VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x194),
+ VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x198),
+ VC4_HDMI_REG(HDMI_HOTPLUG, 0x1c8),
+ VC4_HDMI_REG(HDMI_SCRAMBLER_CTL, 0x1e4),
+
+ VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
+ VC5_DVP_REG(HDMI_VEC_INTERFACE_CFG, 0x0f0),
+ VC5_DVP_REG(HDMI_VEC_INTERFACE_XBAR, 0x0f4),
+
+ VC5_PHY_REG(HDMI_TX_PHY_RESET_CTL, 0x000),
+ VC5_PHY_REG(HDMI_TX_PHY_POWERUP_CTL, 0x004),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_0, 0x008),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_1, 0x00c),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_2, 0x010),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_CK, 0x014),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_REFCLK, 0x01c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_POST_KDIV, 0x028),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_VCOCLK_DIV, 0x02c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_CFG, 0x044),
+ VC5_PHY_REG(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, 0x054),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_0, 0x060),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_1, 0x064),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_2, 0x068),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_3, 0x06c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_4, 0x070),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_5, 0x074),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_6, 0x078),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_7, 0x07c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_8, 0x080),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_RESET_CTL, 0x190),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_POWERUP_CTL, 0x194),
+
+ VC5_RM_REG(HDMI_RM_CONTROL, 0x000),
+ VC5_RM_REG(HDMI_RM_OFFSET, 0x018),
+ VC5_RM_REG(HDMI_RM_FORMAT, 0x01c),
+
+ VC5_RAM_REG(HDMI_RAM_PACKET_START, 0x000),
+
+ VC5_CEC_REG(HDMI_CEC_CNTRL_1, 0x010),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_2, 0x014),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_3, 0x018),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_4, 0x01c),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_5, 0x020),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_1, 0x028),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_2, 0x02c),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_3, 0x030),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_4, 0x034),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_1, 0x038),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_2, 0x03c),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_3, 0x040),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_4, 0x044),
+
+ VC5_CSC_REG(HDMI_CSC_CTL, 0x000),
+ VC5_CSC_REG(HDMI_CSC_12_11, 0x004),
+ VC5_CSC_REG(HDMI_CSC_14_13, 0x008),
+ VC5_CSC_REG(HDMI_CSC_22_21, 0x00c),
+ VC5_CSC_REG(HDMI_CSC_24_23, 0x010),
+ VC5_CSC_REG(HDMI_CSC_32_31, 0x014),
+ VC5_CSC_REG(HDMI_CSC_34_33, 0x018),
+ VC5_CSC_REG(HDMI_CSC_CHANNEL_CTL, 0x02c),
+};
+
+static const struct vc4_hdmi_register __maybe_unused vc6_hdmi_hdmi1_fields[] = {
+ VC4_HD_REG(HDMI_DVP_CTL, 0x0000),
+ VC4_HD_REG(HDMI_MAI_CTL, 0x0030),
+ VC4_HD_REG(HDMI_MAI_THR, 0x0034),
+ VC4_HD_REG(HDMI_MAI_FMT, 0x0038),
+ VC4_HD_REG(HDMI_MAI_DATA, 0x003c),
+ VC4_HD_REG(HDMI_MAI_SMP, 0x0040),
+ VC4_HD_REG(HDMI_VID_CTL, 0x0048),
+ VC4_HD_REG(HDMI_FRAME_COUNT, 0x0064),
+
+ VC4_HDMI_REG(HDMI_FIFO_CTL, 0x07c),
+ VC4_HDMI_REG(HDMI_AUDIO_PACKET_CONFIG, 0x0c0),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_CONFIG, 0x0c4),
+ VC4_HDMI_REG(HDMI_RAM_PACKET_STATUS, 0x0cc),
+ VC4_HDMI_REG(HDMI_CRP_CFG, 0x0d0),
+ VC4_HDMI_REG(HDMI_CTS_0, 0x0d4),
+ VC4_HDMI_REG(HDMI_CTS_1, 0x0d8),
+ VC4_HDMI_REG(HDMI_SCHEDULER_CONTROL, 0x0e8),
+ VC4_HDMI_REG(HDMI_HORZA, 0x0ec),
+ VC4_HDMI_REG(HDMI_HORZB, 0x0f0),
+ VC4_HDMI_REG(HDMI_VERTA0, 0x0f4),
+ VC4_HDMI_REG(HDMI_VERTB0, 0x0f8),
+ VC4_HDMI_REG(HDMI_VERTA1, 0x100),
+ VC4_HDMI_REG(HDMI_VERTB1, 0x104),
+ VC4_HDMI_REG(HDMI_MISC_CONTROL, 0x114),
+ VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x0a4),
+ VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a8),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_1, 0x148),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_2, 0x14c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_3, 0x150),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_4, 0x158),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_5, 0x15c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_6, 0x160),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_7, 0x164),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_8, 0x168),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_9, 0x16c),
+ VC4_HDMI_REG(HDMI_FORMAT_DET_10, 0x170),
+ VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x18c),
+ VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x194),
+ VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x198),
+ VC4_HDMI_REG(HDMI_HOTPLUG, 0x1c8),
+ VC4_HDMI_REG(HDMI_SCRAMBLER_CTL, 0x1e4),
+
+ VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc),
+ VC5_DVP_REG(HDMI_VEC_INTERFACE_CFG, 0x0f0),
+ VC5_DVP_REG(HDMI_VEC_INTERFACE_XBAR, 0x0f4),
+
+ VC5_PHY_REG(HDMI_TX_PHY_RESET_CTL, 0x000),
+ VC5_PHY_REG(HDMI_TX_PHY_POWERUP_CTL, 0x004),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_0, 0x008),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_1, 0x00c),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_2, 0x010),
+ VC5_PHY_REG(HDMI_TX_PHY_CTL_CK, 0x014),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_REFCLK, 0x01c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_POST_KDIV, 0x028),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_VCOCLK_DIV, 0x02c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_CFG, 0x044),
+ VC5_PHY_REG(HDMI_TX_PHY_TMDS_CLK_WORD_SEL, 0x054),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_0, 0x060),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_1, 0x064),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_2, 0x068),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_3, 0x06c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_4, 0x070),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_5, 0x074),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_6, 0x078),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_7, 0x07c),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_MISC_8, 0x080),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_RESET_CTL, 0x190),
+ VC5_PHY_REG(HDMI_TX_PHY_PLL_POWERUP_CTL, 0x194),
+
+ VC5_RM_REG(HDMI_RM_CONTROL, 0x000),
+ VC5_RM_REG(HDMI_RM_OFFSET, 0x018),
+ VC5_RM_REG(HDMI_RM_FORMAT, 0x01c),
+
+ VC5_RAM_REG(HDMI_RAM_PACKET_START, 0x000),
+
+ VC5_CEC_REG(HDMI_CEC_CNTRL_1, 0x010),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_2, 0x014),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_3, 0x018),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_4, 0x01c),
+ VC5_CEC_REG(HDMI_CEC_CNTRL_5, 0x020),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_1, 0x028),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_2, 0x02c),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_3, 0x030),
+ VC5_CEC_REG(HDMI_CEC_TX_DATA_4, 0x034),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_1, 0x038),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_2, 0x03c),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_3, 0x040),
+ VC5_CEC_REG(HDMI_CEC_RX_DATA_4, 0x044),
+
+ VC5_CSC_REG(HDMI_CSC_CTL, 0x000),
+ VC5_CSC_REG(HDMI_CSC_12_11, 0x004),
+ VC5_CSC_REG(HDMI_CSC_14_13, 0x008),
+ VC5_CSC_REG(HDMI_CSC_22_21, 0x00c),
+ VC5_CSC_REG(HDMI_CSC_24_23, 0x010),
+ VC5_CSC_REG(HDMI_CSC_32_31, 0x014),
+ VC5_CSC_REG(HDMI_CSC_34_33, 0x018),
+ VC5_CSC_REG(HDMI_CSC_CHANNEL_CTL, 0x02c),
+};
+
static inline
void __iomem *__vc4_hdmi_get_field_base(struct vc4_hdmi *hdmi,
enum vc4_hdmi_regs reg)
@@ -498,8 +715,11 @@ static inline void vc4_hdmi_write(struct vc4_hdmi *hdmi,
field = &variant->registers[reg];
base = __vc4_hdmi_get_field_base(hdmi, field->reg);
- if (!base)
+ if (!base) {
+ dev_warn(&hdmi->pdev->dev,
+ "Unknown register ID %u\n", reg);
return;
+ }
writel(value, base + field->offset);
}
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 2a835a5cff9d..4811d794001f 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -33,7 +33,7 @@
#include "vc4_drv.h"
#include "vc4_regs.h"
-static const struct debugfs_reg32 hvs_regs[] = {
+static const struct debugfs_reg32 vc4_hvs_regs[] = {
VC4_REG32(SCALER_DISPCTRL),
VC4_REG32(SCALER_DISPSTAT),
VC4_REG32(SCALER_DISPID),
@@ -67,6 +67,140 @@ static const struct debugfs_reg32 hvs_regs[] = {
VC4_REG32(SCALER_OLEDCOEF2),
};
+static const struct debugfs_reg32 vc6_hvs_regs[] = {
+ VC4_REG32(SCALER6_VERSION),
+ VC4_REG32(SCALER6_CXM_SIZE),
+ VC4_REG32(SCALER6_LBM_SIZE),
+ VC4_REG32(SCALER6_UBM_SIZE),
+ VC4_REG32(SCALER6_COBA_SIZE),
+ VC4_REG32(SCALER6_COB_SIZE),
+ VC4_REG32(SCALER6_CONTROL),
+ VC4_REG32(SCALER6_FETCHER_STATUS),
+ VC4_REG32(SCALER6_FETCH_STATUS),
+ VC4_REG32(SCALER6_HANDLE_ERROR),
+ VC4_REG32(SCALER6_DISP0_CTRL0),
+ VC4_REG32(SCALER6_DISP0_CTRL1),
+ VC4_REG32(SCALER6_DISP0_BGND),
+ VC4_REG32(SCALER6_DISP0_LPTRS),
+ VC4_REG32(SCALER6_DISP0_COB),
+ VC4_REG32(SCALER6_DISP0_STATUS),
+ VC4_REG32(SCALER6_DISP0_DL),
+ VC4_REG32(SCALER6_DISP0_RUN),
+ VC4_REG32(SCALER6_DISP1_CTRL0),
+ VC4_REG32(SCALER6_DISP1_CTRL1),
+ VC4_REG32(SCALER6_DISP1_BGND),
+ VC4_REG32(SCALER6_DISP1_LPTRS),
+ VC4_REG32(SCALER6_DISP1_COB),
+ VC4_REG32(SCALER6_DISP1_STATUS),
+ VC4_REG32(SCALER6_DISP1_DL),
+ VC4_REG32(SCALER6_DISP1_RUN),
+ VC4_REG32(SCALER6_DISP2_CTRL0),
+ VC4_REG32(SCALER6_DISP2_CTRL1),
+ VC4_REG32(SCALER6_DISP2_BGND),
+ VC4_REG32(SCALER6_DISP2_LPTRS),
+ VC4_REG32(SCALER6_DISP2_COB),
+ VC4_REG32(SCALER6_DISP2_STATUS),
+ VC4_REG32(SCALER6_DISP2_DL),
+ VC4_REG32(SCALER6_DISP2_RUN),
+ VC4_REG32(SCALER6_EOLN),
+ VC4_REG32(SCALER6_DL_STATUS),
+ VC4_REG32(SCALER6_BFG_MISC),
+ VC4_REG32(SCALER6_QOS0),
+ VC4_REG32(SCALER6_PROF0),
+ VC4_REG32(SCALER6_QOS1),
+ VC4_REG32(SCALER6_PROF1),
+ VC4_REG32(SCALER6_QOS2),
+ VC4_REG32(SCALER6_PROF2),
+ VC4_REG32(SCALER6_PRI_MAP0),
+ VC4_REG32(SCALER6_PRI_MAP1),
+ VC4_REG32(SCALER6_HISTCTRL),
+ VC4_REG32(SCALER6_HISTBIN0),
+ VC4_REG32(SCALER6_HISTBIN1),
+ VC4_REG32(SCALER6_HISTBIN2),
+ VC4_REG32(SCALER6_HISTBIN3),
+ VC4_REG32(SCALER6_HISTBIN4),
+ VC4_REG32(SCALER6_HISTBIN5),
+ VC4_REG32(SCALER6_HISTBIN6),
+ VC4_REG32(SCALER6_HISTBIN7),
+ VC4_REG32(SCALER6_HDR_CFG_REMAP),
+ VC4_REG32(SCALER6_COL_SPACE),
+ VC4_REG32(SCALER6_HVS_ID),
+ VC4_REG32(SCALER6_CFC1),
+ VC4_REG32(SCALER6_DISP_UPM_ISO0),
+ VC4_REG32(SCALER6_DISP_UPM_ISO1),
+ VC4_REG32(SCALER6_DISP_UPM_ISO2),
+ VC4_REG32(SCALER6_DISP_LBM_ISO0),
+ VC4_REG32(SCALER6_DISP_LBM_ISO1),
+ VC4_REG32(SCALER6_DISP_LBM_ISO2),
+ VC4_REG32(SCALER6_DISP_COB_ISO0),
+ VC4_REG32(SCALER6_DISP_COB_ISO1),
+ VC4_REG32(SCALER6_DISP_COB_ISO2),
+ VC4_REG32(SCALER6_BAD_COB),
+ VC4_REG32(SCALER6_BAD_LBM),
+ VC4_REG32(SCALER6_BAD_UPM),
+ VC4_REG32(SCALER6_BAD_AXI),
+};
+
+static const struct debugfs_reg32 vc6_d_hvs_regs[] = {
+ VC4_REG32(SCALER6D_VERSION),
+ VC4_REG32(SCALER6D_CXM_SIZE),
+ VC4_REG32(SCALER6D_LBM_SIZE),
+ VC4_REG32(SCALER6D_UBM_SIZE),
+ VC4_REG32(SCALER6D_COBA_SIZE),
+ VC4_REG32(SCALER6D_COB_SIZE),
+ VC4_REG32(SCALER6D_CONTROL),
+ VC4_REG32(SCALER6D_FETCHER_STATUS),
+ VC4_REG32(SCALER6D_FETCH_STATUS),
+ VC4_REG32(SCALER6D_HANDLE_ERROR),
+ VC4_REG32(SCALER6D_DISP0_CTRL0),
+ VC4_REG32(SCALER6D_DISP0_CTRL1),
+ VC4_REG32(SCALER6D_DISP0_BGND0),
+ VC4_REG32(SCALER6D_DISP0_BGND1),
+ VC4_REG32(SCALER6D_DISP0_LPTRS),
+ VC4_REG32(SCALER6D_DISP0_COB),
+ VC4_REG32(SCALER6D_DISP0_STATUS),
+ VC4_REG32(SCALER6D_DISP0_DL),
+ VC4_REG32(SCALER6D_DISP0_RUN),
+ VC4_REG32(SCALER6D_DISP1_CTRL0),
+ VC4_REG32(SCALER6D_DISP1_CTRL1),
+ VC4_REG32(SCALER6D_DISP1_BGND0),
+ VC4_REG32(SCALER6D_DISP1_BGND1),
+ VC4_REG32(SCALER6D_DISP1_LPTRS),
+ VC4_REG32(SCALER6D_DISP1_COB),
+ VC4_REG32(SCALER6D_DISP1_STATUS),
+ VC4_REG32(SCALER6D_DISP1_DL),
+ VC4_REG32(SCALER6D_DISP1_RUN),
+ VC4_REG32(SCALER6D_DISP2_CTRL0),
+ VC4_REG32(SCALER6D_DISP2_CTRL1),
+ VC4_REG32(SCALER6D_DISP2_BGND0),
+ VC4_REG32(SCALER6D_DISP2_BGND1),
+ VC4_REG32(SCALER6D_DISP2_LPTRS),
+ VC4_REG32(SCALER6D_DISP2_COB),
+ VC4_REG32(SCALER6D_DISP2_STATUS),
+ VC4_REG32(SCALER6D_DISP2_DL),
+ VC4_REG32(SCALER6D_DISP2_RUN),
+ VC4_REG32(SCALER6D_EOLN),
+ VC4_REG32(SCALER6D_DL_STATUS),
+ VC4_REG32(SCALER6D_QOS0),
+ VC4_REG32(SCALER6D_PROF0),
+ VC4_REG32(SCALER6D_QOS1),
+ VC4_REG32(SCALER6D_PROF1),
+ VC4_REG32(SCALER6D_QOS2),
+ VC4_REG32(SCALER6D_PROF2),
+ VC4_REG32(SCALER6D_PRI_MAP0),
+ VC4_REG32(SCALER6D_PRI_MAP1),
+ VC4_REG32(SCALER6D_HISTCTRL),
+ VC4_REG32(SCALER6D_HISTBIN0),
+ VC4_REG32(SCALER6D_HISTBIN1),
+ VC4_REG32(SCALER6D_HISTBIN2),
+ VC4_REG32(SCALER6D_HISTBIN3),
+ VC4_REG32(SCALER6D_HISTBIN4),
+ VC4_REG32(SCALER6D_HISTBIN5),
+ VC4_REG32(SCALER6D_HISTBIN6),
+ VC4_REG32(SCALER6D_HISTBIN7),
+ VC4_REG32(SCALER6D_HVS_ID),
+};
+
void vc4_hvs_dump_state(struct vc4_hvs *hvs)
{
struct drm_device *drm = &hvs->vc4->base;
@@ -110,7 +244,8 @@ static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
struct drm_printer p = drm_seq_file_printer(m);
- unsigned int next_entry_start = 0;
+ unsigned int dlist_mem_size = hvs->dlist_mem_size;
+ unsigned int next_entry_start;
unsigned int i, j;
u32 dlist_word, dispstat;
@@ -124,8 +259,58 @@ static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data)
}
drm_printf(&p, "HVS chan %u:\n", i);
+ next_entry_start = 0;
+
+ for (j = HVS_READ(SCALER_DISPLISTX(i)); j < dlist_mem_size; j++) {
+ dlist_word = readl((u32 __iomem *)vc4->hvs->dlist + j);
+ drm_printf(&p, "dlist: %02d: 0x%08x\n", j,
+ dlist_word);
+ if (!next_entry_start ||
+ next_entry_start == j) {
+ if (dlist_word & SCALER_CTL0_END)
+ break;
+ next_entry_start = j +
+ VC4_GET_FIELD(dlist_word,
+ SCALER_CTL0_SIZE);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int vc6_hvs_debugfs_dlist(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_hvs *hvs = vc4->hvs;
+ struct drm_printer p = drm_seq_file_printer(m);
+ unsigned int dlist_mem_size = hvs->dlist_mem_size;
+ unsigned int next_entry_start;
+ unsigned int i;
+
+ for (i = 0; i < SCALER_CHANNELS_COUNT; i++) {
+ unsigned int active_dlist, dispstat;
+ unsigned int j;
+
+ dispstat = VC4_GET_FIELD(HVS_READ(SCALER6_DISPX_STATUS(i)),
+ SCALER6_DISPX_STATUS_MODE);
+ if (dispstat == SCALER6_DISPX_STATUS_MODE_DISABLED ||
+ dispstat == SCALER6_DISPX_STATUS_MODE_EOF) {
+ drm_printf(&p, "HVS chan %u disabled\n", i);
+ continue;
+ }
+
+ drm_printf(&p, "HVS chan %u:\n", i);
+
+ active_dlist = VC4_GET_FIELD(HVS_READ(SCALER6_DISPX_DL(i)),
+ SCALER6_DISPX_DL_LACT);
+ next_entry_start = 0;
+
+ for (j = active_dlist; j < dlist_mem_size; j++) {
+ u32 dlist_word;
- for (j = HVS_READ(SCALER_DISPLISTX(i)); j < 256; j++) {
dlist_word = readl((u32 __iomem *)vc4->hvs->dlist + j);
drm_printf(&p, "dlist: %02d: 0x%08x\n", j,
dlist_word);
@@ -143,6 +328,27 @@ static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data)
return 0;
}
+static int vc6_hvs_debugfs_upm_allocs(struct seq_file *m, void *data)
+{
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_hvs *hvs = vc4->hvs;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct vc4_upm_refcounts *refcount;
+ unsigned int i;
+
+ drm_printf(&p, "UPM Handles:\n");
+ for (i = 1; i <= VC4_NUM_UPM_HANDLES; i++) {
+ refcount = &hvs->upm_refcounts[i];
+ drm_printf(&p, "handle %u: refcount %u, size %zu [%08llx + %08llx]\n",
+ i, refcount_read(&refcount->refcount), refcount->size,
+ refcount->upm.start, refcount->upm.size);
+ }
+
+ return 0;
+}
+
/* The filter kernel is composed of dwords each containing 3 9-bit
* signed integers packed next to each other.
*/
@@ -213,15 +419,21 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
static void vc4_hvs_lut_load(struct vc4_hvs *hvs,
struct vc4_crtc *vc4_crtc)
{
- struct drm_device *drm = &hvs->vc4->base;
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
struct drm_crtc *crtc = &vc4_crtc->base;
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
int idx;
u32 i;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_5);
+
if (!drm_dev_enter(drm, &idx))
return;
+ if (hvs->vc4->gen != VC4_GEN_4)
+ goto exit;
+
/* The LUT memory is laid out with each HVS channel in order,
* each of which takes 256 writes for R, 256 for G, then 256
* for B.
@@ -237,6 +449,7 @@ static void vc4_hvs_lut_load(struct vc4_hvs *hvs,
for (i = 0; i < crtc->gamma_size; i++)
HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
+exit:
drm_dev_exit(idx);
}
@@ -259,25 +472,56 @@ static void vc4_hvs_update_gamma_lut(struct vc4_hvs *hvs,
u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo)
{
- struct drm_device *drm = &hvs->vc4->base;
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
u8 field = 0;
int idx;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_6_D);
+
if (!drm_dev_enter(drm, &idx))
return 0;
- switch (fifo) {
- case 0:
- field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
- SCALER_DISPSTAT1_FRCNT0);
+ switch (vc4->gen) {
+ case VC4_GEN_6_C:
+ case VC4_GEN_6_D:
+ field = VC4_GET_FIELD(HVS_READ(SCALER6_DISPX_STATUS(fifo)),
+ SCALER6_DISPX_STATUS_FRCNT);
break;
- case 1:
- field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
- SCALER_DISPSTAT1_FRCNT1);
+ case VC4_GEN_5:
+ switch (fifo) {
+ case 0:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
+ SCALER5_DISPSTAT1_FRCNT0);
+ break;
+ case 1:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
+ SCALER5_DISPSTAT1_FRCNT1);
+ break;
+ case 2:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT2),
+ SCALER5_DISPSTAT2_FRCNT2);
+ break;
+ }
break;
- case 2:
- field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT2),
- SCALER_DISPSTAT2_FRCNT2);
+ case VC4_GEN_4:
+ switch (fifo) {
+ case 0:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
+ SCALER_DISPSTAT1_FRCNT0);
+ break;
+ case 1:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
+ SCALER_DISPSTAT1_FRCNT1);
+ break;
+ case 2:
+ field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT2),
+ SCALER_DISPSTAT2_FRCNT2);
+ break;
+ }
+ break;
+ default:
+ drm_err(drm, "Unknown VC4 generation: %d", vc4->gen);
break;
}
@@ -291,53 +535,80 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
u32 reg;
int ret;
- if (!vc4->is_vc5)
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_6_D);
+
+ switch (vc4->gen) {
+ case VC4_GEN_4:
return output;
- /*
- * NOTE: We should probably use drm_dev_enter()/drm_dev_exit()
- * here, but this function is only used during the DRM device
- * initialization, so we should be fine.
- */
+ case VC4_GEN_5:
+ /*
+ * NOTE: We should probably use
+ * drm_dev_enter()/drm_dev_exit() here, but this
+ * function is only used during the DRM device
+ * initialization, so we should be fine.
+ */
- switch (output) {
- case 0:
- return 0;
+ switch (output) {
+ case 0:
+ return 0;
- case 1:
- return 1;
+ case 1:
+ return 1;
- case 2:
- reg = HVS_READ(SCALER_DISPECTRL);
- ret = FIELD_GET(SCALER_DISPECTRL_DSP2_MUX_MASK, reg);
- if (ret == 0)
- return 2;
+ case 2:
+ reg = HVS_READ(SCALER_DISPECTRL);
+ ret = FIELD_GET(SCALER_DISPECTRL_DSP2_MUX_MASK, reg);
+ if (ret == 0)
+ return 2;
- return 0;
+ return 0;
- case 3:
- reg = HVS_READ(SCALER_DISPCTRL);
- ret = FIELD_GET(SCALER_DISPCTRL_DSP3_MUX_MASK, reg);
- if (ret == 3)
- return -EPIPE;
+ case 3:
+ reg = HVS_READ(SCALER_DISPCTRL);
+ ret = FIELD_GET(SCALER_DISPCTRL_DSP3_MUX_MASK, reg);
+ if (ret == 3)
+ return -EPIPE;
- return ret;
+ return ret;
- case 4:
- reg = HVS_READ(SCALER_DISPEOLN);
- ret = FIELD_GET(SCALER_DISPEOLN_DSP4_MUX_MASK, reg);
- if (ret == 3)
- return -EPIPE;
+ case 4:
+ reg = HVS_READ(SCALER_DISPEOLN);
+ ret = FIELD_GET(SCALER_DISPEOLN_DSP4_MUX_MASK, reg);
+ if (ret == 3)
+ return -EPIPE;
- return ret;
+ return ret;
+
+ case 5:
+ reg = HVS_READ(SCALER_DISPDITHER);
+ ret = FIELD_GET(SCALER_DISPDITHER_DSP5_MUX_MASK, reg);
+ if (ret == 3)
+ return -EPIPE;
+
+ return ret;
- case 5:
- reg = HVS_READ(SCALER_DISPDITHER);
- ret = FIELD_GET(SCALER_DISPDITHER_DSP5_MUX_MASK, reg);
- if (ret == 3)
+ default:
return -EPIPE;
+ }
- return ret;
+ case VC4_GEN_6_C:
+ case VC4_GEN_6_D:
+ switch (output) {
+ case 0:
+ return 0;
+
+ case 2:
+ return 2;
+
+ case 1:
+ case 3:
+ case 4:
+ return 1;
+
+ default:
+ return -EPIPE;
+ }
default:
return -EPIPE;
@@ -357,6 +628,8 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
u32 dispctrl;
int idx;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_5);
+
if (!drm_dev_enter(drm, &idx))
return -ENODEV;
@@ -372,7 +645,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
dispctrl = SCALER_DISPCTRLX_ENABLE;
dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan));
- if (!vc4->is_vc5) {
+ if (vc4->gen == VC4_GEN_4) {
dispctrl |= VC4_SET_FIELD(mode->hdisplay,
SCALER_DISPCTRLX_WIDTH) |
VC4_SET_FIELD(mode->vdisplay,
@@ -394,7 +667,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
dispbkgndx &= ~SCALER_DISPBKGND_INTERLACE;
HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx |
- ((!vc4->is_vc5) ? SCALER_DISPBKGND_GAMMA : 0) |
+ ((vc4->gen == VC4_GEN_4) ? SCALER_DISPBKGND_GAMMA : 0) |
(interlace ? SCALER_DISPBKGND_INTERLACE : 0));
/* Reload the LUT, since the SRAMs would have been disabled if
@@ -407,21 +680,58 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
return 0;
}
-void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
+static int vc6_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
+ struct drm_display_mode *mode, bool oneshot)
{
- struct drm_device *drm = &hvs->vc4->base;
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
+ struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state);
+ unsigned int chan = vc4_crtc_state->assigned_channel;
+ bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
+ u32 disp_ctrl1;
+ int idx;
+
+ WARN_ON_ONCE(vc4->gen < VC4_GEN_6_C);
+
+ if (!drm_dev_enter(drm, &idx))
+ return -ENODEV;
+
+ HVS_WRITE(SCALER6_DISPX_CTRL0(chan), SCALER6_DISPX_CTRL0_RESET);
+
+ disp_ctrl1 = HVS_READ(SCALER6_DISPX_CTRL1(chan));
+ disp_ctrl1 &= ~SCALER6_DISPX_CTRL1_INTLACE;
+ HVS_WRITE(SCALER6_DISPX_CTRL1(chan),
+ disp_ctrl1 | (interlace ? SCALER6_DISPX_CTRL1_INTLACE : 0));
+
+ HVS_WRITE(SCALER6_DISPX_CTRL0(chan),
+ SCALER6_DISPX_CTRL0_ENB |
+ VC4_SET_FIELD(mode->hdisplay - 1,
+ SCALER6_DISPX_CTRL0_FWIDTH) |
+ (oneshot ? SCALER6_DISPX_CTRL0_ONESHOT : 0) |
+ VC4_SET_FIELD(mode->vdisplay - 1,
+ SCALER6_DISPX_CTRL0_LINES));
+
+ drm_dev_exit(idx);
+
+ return 0;
+}
+
+static void __vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
+{
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
int idx;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_5);
+
if (!drm_dev_enter(drm, &idx))
return;
- if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE)
+ if (!(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE))
goto out;
- HVS_WRITE(SCALER_DISPCTRLX(chan),
- HVS_READ(SCALER_DISPCTRLX(chan)) | SCALER_DISPCTRLX_RESET);
- HVS_WRITE(SCALER_DISPCTRLX(chan),
- HVS_READ(SCALER_DISPCTRLX(chan)) & ~SCALER_DISPCTRLX_ENABLE);
+ HVS_WRITE(SCALER_DISPCTRLX(chan), SCALER_DISPCTRLX_RESET);
+ HVS_WRITE(SCALER_DISPCTRLX(chan), 0);
/* Once we leave, the scaler should be disabled and its fifo empty. */
WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET);
@@ -438,6 +748,44 @@ out:
drm_dev_exit(idx);
}
+static void __vc6_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
+{
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
+ int idx;
+
+ WARN_ON_ONCE(vc4->gen < VC4_GEN_6_C);
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ if (!(HVS_READ(SCALER6_DISPX_CTRL0(chan)) & SCALER6_DISPX_CTRL0_ENB))
+ goto out;
+
+ HVS_WRITE(SCALER6_DISPX_CTRL0(chan),
+ HVS_READ(SCALER6_DISPX_CTRL0(chan)) | SCALER6_DISPX_CTRL0_RESET);
+
+ HVS_WRITE(SCALER6_DISPX_CTRL0(chan),
+ HVS_READ(SCALER6_DISPX_CTRL0(chan)) & ~SCALER6_DISPX_CTRL0_ENB);
+
+ WARN_ON_ONCE(VC4_GET_FIELD(HVS_READ(SCALER6_DISPX_STATUS(chan)),
+ SCALER6_DISPX_STATUS_MODE) !=
+ SCALER6_DISPX_STATUS_MODE_DISABLED);
+
+out:
+ drm_dev_exit(idx);
+}
+
+void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
+{
+ struct vc4_dev *vc4 = hvs->vc4;
+
+ if (vc4->gen >= VC4_GEN_6_C)
+ __vc6_hvs_stop_channel(hvs, chan);
+ else
+ __vc4_hvs_stop_channel(hvs, chan);
+}
+
int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
@@ -456,17 +804,29 @@ int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
if (hweight32(crtc_state->connector_mask) > 1)
return -EINVAL;
- drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state)
- dlist_count += vc4_plane_dlist_size(plane_state);
+ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+ u32 plane_dlist_count = vc4_plane_dlist_size(plane_state);
+
+ drm_dbg_driver(dev, "[CRTC:%d:%s] Found [PLANE:%d:%s] with DLIST size: %u\n",
+ crtc->base.id, crtc->name,
+ plane->base.id, plane->name,
+ plane_dlist_count);
+
+ dlist_count += plane_dlist_count;
+ }
dlist_count++; /* Account for SCALER_CTL0_END. */
+ drm_dbg_driver(dev, "[CRTC:%d:%s] Allocating DLIST block with size: %u\n",
+ crtc->base.id, crtc->name, dlist_count);
spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm,
dlist_count);
spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
- if (ret)
+ if (ret) {
+ drm_err(dev, "Failed to allocate DLIST entry: %d\n", ret);
return ret;
+ }
return 0;
}
@@ -482,8 +842,13 @@ static void vc4_hvs_install_dlist(struct drm_crtc *crtc)
if (!drm_dev_enter(dev, &idx))
return;
- HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel),
- vc4_state->mm.start);
+ if (vc4->gen >= VC4_GEN_6_C)
+ HVS_WRITE(SCALER6_DISPX_LPTRS(vc4_state->assigned_channel),
+ VC4_SET_FIELD(vc4_state->mm.start,
+ SCALER6_DISPX_LPTRS_HEADE));
+ else
+ HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel),
+ vc4_state->mm.start);
drm_dev_exit(idx);
}
@@ -538,7 +903,11 @@ void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
vc4_hvs_install_dlist(crtc);
vc4_hvs_update_dlist(crtc);
- vc4_hvs_init_channel(vc4->hvs, crtc, mode, oneshot);
+
+ if (vc4->gen >= VC4_GEN_6_C)
+ vc6_hvs_init_channel(vc4->hvs, crtc, mode, oneshot);
+ else
+ vc4_hvs_init_channel(vc4->hvs, crtc, mode, oneshot);
}
void vc4_hvs_atomic_disable(struct drm_crtc *crtc,
@@ -567,20 +936,22 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
struct drm_plane *plane;
struct vc4_plane_state *vc4_plane_state;
bool debug_dump_regs = false;
- bool enable_bg_fill = false;
+ bool enable_bg_fill = true;
u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start;
u32 __iomem *dlist_next = dlist_start;
unsigned int zpos = 0;
bool found = false;
int idx;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_6_D);
+
if (!drm_dev_enter(dev, &idx)) {
vc4_crtc_send_vblank(crtc);
return;
}
if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
- return;
+ goto exit;
if (debug_dump_regs) {
DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc));
@@ -622,13 +993,26 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
- if (enable_bg_fill)
+ if (vc4->gen >= VC4_GEN_6_C) {
/* This sets a black background color fill, as is the case
* with other DRM drivers.
*/
+ if (enable_bg_fill)
+ HVS_WRITE(SCALER6_DISPX_CTRL1(channel),
+ HVS_READ(SCALER6_DISPX_CTRL1(channel)) |
+ SCALER6_DISPX_CTRL1_BGENB);
+ else
+ HVS_WRITE(SCALER6_DISPX_CTRL1(channel),
+ HVS_READ(SCALER6_DISPX_CTRL1(channel)) &
+ ~SCALER6_DISPX_CTRL1_BGENB);
+ } else {
+ /* we can actually run with a lower core clock when background
+ * fill is enabled on VC4_GEN_5 so leave it enabled always.
+ */
HVS_WRITE(SCALER_DISPBKGNDX(channel),
HVS_READ(SCALER_DISPBKGNDX(channel)) |
SCALER_DISPBKGND_FILL);
+ }
/* Only update DISPLIST if the CRTC was already running and is not
* being disabled.
@@ -645,6 +1029,8 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
if (crtc->state->color_mgmt_changed) {
u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(channel));
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_5);
+
if (crtc->state->gamma_lut) {
vc4_hvs_update_gamma_lut(hvs, vc4_crtc);
dispbkgndx |= SCALER_DISPBKGND_GAMMA;
@@ -663,21 +1049,26 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
vc4_hvs_dump_state(hvs);
}
+exit:
drm_dev_exit(idx);
}
void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
{
- struct drm_device *drm = &hvs->vc4->base;
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
u32 dispctrl;
int idx;
+ WARN_ON(vc4->gen > VC4_GEN_5);
+
if (!drm_dev_enter(drm, &idx))
return;
dispctrl = HVS_READ(SCALER_DISPCTRL);
- dispctrl &= ~(hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
- SCALER_DISPCTRL_DSPEISLUR(channel));
+ dispctrl &= ~((vc4->gen == VC4_GEN_5) ?
+ SCALER5_DISPCTRL_DSPEISLUR(channel) :
+ SCALER_DISPCTRL_DSPEISLUR(channel));
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
@@ -686,16 +1077,20 @@ void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel)
{
- struct drm_device *drm = &hvs->vc4->base;
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
u32 dispctrl;
int idx;
+ WARN_ON(vc4->gen > VC4_GEN_5);
+
if (!drm_dev_enter(drm, &idx))
return;
dispctrl = HVS_READ(SCALER_DISPCTRL);
- dispctrl |= (hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
- SCALER_DISPCTRL_DSPEISLUR(channel));
+ dispctrl |= ((vc4->gen == VC4_GEN_5) ?
+ SCALER5_DISPCTRL_DSPEISLUR(channel) :
+ SCALER_DISPCTRL_DSPEISLUR(channel));
HVS_WRITE(SCALER_DISPSTAT,
SCALER_DISPSTAT_EUFLOW(channel));
@@ -723,6 +1118,8 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
u32 status;
u32 dspeislur;
+ WARN_ON(vc4->gen > VC4_GEN_5);
+
/*
* NOTE: We don't need to protect the register access using
* drm_dev_enter() there because the interrupt handler lifetime
@@ -738,8 +1135,10 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
control = HVS_READ(SCALER_DISPCTRL);
for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) {
- dspeislur = vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
- SCALER_DISPCTRL_DSPEISLUR(channel);
+ dspeislur = (vc4->gen == VC4_GEN_5) ?
+ SCALER5_DISPCTRL_DSPEISLUR(channel) :
+ SCALER_DISPCTRL_DSPEISLUR(channel);
+
/* Interrupt masking is not always honored, so check it here. */
if (status & SCALER_DISPSTAT_EUFLOW(channel) &&
control & dspeislur) {
@@ -767,12 +1166,17 @@ int vc4_hvs_debugfs_init(struct drm_minor *minor)
if (!vc4->hvs)
return -ENODEV;
- if (!vc4->is_vc5)
+ if (vc4->gen == VC4_GEN_4)
debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
minor->debugfs_root,
&vc4->load_tracker_enabled);
- drm_debugfs_add_file(drm, "hvs_dlists", vc4_hvs_debugfs_dlist, NULL);
+ if (vc4->gen >= VC4_GEN_6_C) {
+ drm_debugfs_add_file(drm, "hvs_dlists", vc6_hvs_debugfs_dlist, NULL);
+ drm_debugfs_add_file(drm, "hvs_upm", vc6_hvs_debugfs_upm_allocs, NULL);
+ } else {
+ drm_debugfs_add_file(drm, "hvs_dlists", vc4_hvs_debugfs_dlist, NULL);
+ }
drm_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun, NULL);
@@ -781,119 +1185,129 @@ int vc4_hvs_debugfs_init(struct drm_minor *minor)
return 0;
}
-struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pdev)
+struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4,
+ void __iomem *regs,
+ struct platform_device *pdev)
{
struct drm_device *drm = &vc4->base;
struct vc4_hvs *hvs;
+ unsigned int dlist_start;
+ size_t dlist_size;
+ size_t lbm_size;
+ unsigned int i;
hvs = drmm_kzalloc(drm, sizeof(*hvs), GFP_KERNEL);
if (!hvs)
return ERR_PTR(-ENOMEM);
hvs->vc4 = vc4;
+ hvs->regs = regs;
hvs->pdev = pdev;
spin_lock_init(&hvs->mm_lock);
- /* Set up the HVS display list memory manager. We never
- * overwrite the setup from the bootloader (just 128b out of
- * our 16K), since we don't want to scramble the screen when
- * transitioning from the firmware's boot setup to runtime.
- */
- drm_mm_init(&hvs->dlist_mm,
- HVS_BOOTLOADER_DLIST_END,
- (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END);
+ switch (vc4->gen) {
+ case VC4_GEN_4:
+ case VC4_GEN_5:
+ /* Set up the HVS display list memory manager. We never
+ * overwrite the setup from the bootloader (just 128b
+ * out of our 16K), since we don't want to scramble the
+ * screen when transitioning from the firmware's boot
+ * setup to runtime.
+ */
+ dlist_start = HVS_BOOTLOADER_DLIST_END;
+ dlist_size = (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END;
+ break;
+
+ case VC4_GEN_6_C:
+ case VC4_GEN_6_D:
+ dlist_start = HVS_BOOTLOADER_DLIST_END;
+
+ /*
+ * If we are running a test, it means that we can't
+ * access a register. Use a plausible size then.
+ */
+ if (!kunit_get_current_test())
+ dlist_size = HVS_READ(SCALER6_CXM_SIZE);
+ else
+ dlist_size = 4096;
+
+ for (i = 0; i < VC4_NUM_UPM_HANDLES; i++) {
+ refcount_set(&hvs->upm_refcounts[i].refcount, 0);
+ hvs->upm_refcounts[i].hvs = hvs;
+ }
+
+ break;
+
+ default:
+ drm_err(drm, "Unknown VC4 generation: %d", vc4->gen);
+ return ERR_PTR(-ENODEV);
+ }
+
+ drm_mm_init(&hvs->dlist_mm, dlist_start, dlist_size);
+
+ hvs->dlist_mem_size = dlist_size;
/* Set up the HVS LBM memory manager. We could have some more
* complicated data structure that allowed reuse of LBM areas
* between planes when they don't overlap on the screen, but
* for now we just allocate globally.
*/
- if (!vc4->is_vc5)
- /* 48k words of 2x12-bit pixels */
- drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
- else
- /* 60k words of 4x12-bit pixels */
- drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
- vc4->hvs = hvs;
-
- return hvs;
-}
-
-static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
- struct vc4_hvs *hvs = NULL;
- int ret;
- u32 dispctrl;
- u32 reg, top;
-
- hvs = __vc4_hvs_alloc(vc4, NULL);
- if (IS_ERR(hvs))
- return PTR_ERR(hvs);
-
- hvs->regs = vc4_ioremap_regs(pdev, 0);
- if (IS_ERR(hvs->regs))
- return PTR_ERR(hvs->regs);
+ switch (vc4->gen) {
+ case VC4_GEN_4:
+ /* 48k words of 2x12-bit pixels */
+ lbm_size = 48 * SZ_1K;
+ break;
- hvs->regset.base = hvs->regs;
- hvs->regset.regs = hvs_regs;
- hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
+ case VC4_GEN_5:
+ /* 60k words of 4x12-bit pixels */
+ lbm_size = 60 * SZ_1K;
+ break;
- if (vc4->is_vc5) {
- struct rpi_firmware *firmware;
- struct device_node *node;
- unsigned int max_rate;
+ case VC4_GEN_6_C:
+ case VC4_GEN_6_D:
+ /*
+ * If we are running a test, it means that we can't
+ * access a register. Use a plausible size then.
+ */
+ lbm_size = 1024;
+ break;
- node = rpi_firmware_find_node();
- if (!node)
- return -EINVAL;
+ default:
+ drm_err(drm, "Unknown VC4 generation: %d", vc4->gen);
+ return ERR_PTR(-ENODEV);
+ }
- firmware = rpi_firmware_get(node);
- of_node_put(node);
- if (!firmware)
- return -EPROBE_DEFER;
+ drm_mm_init(&hvs->lbm_mm, 0, lbm_size);
- hvs->core_clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(hvs->core_clk)) {
- dev_err(&pdev->dev, "Couldn't get core clock\n");
- return PTR_ERR(hvs->core_clk);
- }
+ if (vc4->gen >= VC4_GEN_6_C) {
+ ida_init(&hvs->upm_handles);
- max_rate = rpi_firmware_clk_get_max_rate(firmware,
- RPI_FIRMWARE_CORE_CLK_ID);
- rpi_firmware_put(firmware);
- if (max_rate >= 550000000)
- hvs->vc5_hdmi_enable_hdmi_20 = true;
+ /*
+ * NOTE: On BCM2712, the size can also be read through
+ * the SCALER_UBM_SIZE register. We would need to do a
+ * register access though, which we can't do with kunit
+ * that also uses this function to create its mock
+ * device.
+ */
+ drm_mm_init(&hvs->upm_mm, 0, 1024 * HVS_UBM_WORD_SIZE);
+ }
- if (max_rate >= 600000000)
- hvs->vc5_hdmi_enable_4096by2160 = true;
- hvs->max_core_rate = max_rate;
+ vc4->hvs = hvs;
- ret = clk_prepare_enable(hvs->core_clk);
- if (ret) {
- dev_err(&pdev->dev, "Couldn't enable the core clock\n");
- return ret;
- }
- }
+ return hvs;
+}
- if (!vc4->is_vc5)
- hvs->dlist = hvs->regs + SCALER_DLIST_START;
- else
- hvs->dlist = hvs->regs + SCALER5_DLIST_START;
+static int vc4_hvs_hw_init(struct vc4_hvs *hvs)
+{
+ struct vc4_dev *vc4 = hvs->vc4;
+ u32 dispctrl, reg;
- /* Upload filter kernels. We only have the one for now, so we
- * keep it around for the lifetime of the driver.
- */
- ret = vc4_hvs_upload_linear_kernel(hvs,
- &hvs->mitchell_netravali_filter,
- mitchell_netravali_1_3_1_3_kernel);
- if (ret)
- return ret;
+ dispctrl = HVS_READ(SCALER_DISPCTRL);
+ dispctrl |= SCALER_DISPCTRL_ENABLE;
+ HVS_WRITE(SCALER_DISPCTRL, dispctrl);
reg = HVS_READ(SCALER_DISPECTRL);
reg &= ~SCALER_DISPECTRL_DSP2_MUX_MASK;
@@ -916,13 +1330,11 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
reg | VC4_SET_FIELD(3, SCALER_DISPDITHER_DSP5_MUX));
dispctrl = HVS_READ(SCALER_DISPCTRL);
-
- dispctrl |= SCALER_DISPCTRL_ENABLE;
dispctrl |= SCALER_DISPCTRL_DISPEIRQ(0) |
SCALER_DISPCTRL_DISPEIRQ(1) |
SCALER_DISPCTRL_DISPEIRQ(2);
- if (!vc4->is_vc5)
+ if (vc4->gen == VC4_GEN_4)
dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
SCALER_DISPCTRL_SLVWREIRQ |
SCALER_DISPCTRL_SLVRDEIRQ |
@@ -962,11 +1374,173 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1);
dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2);
+ /* Set AXI panic mode.
+ * VC4 panics when < 2 lines in FIFO.
+ * VC5 panics when less than 1 line in the FIFO.
+ */
+ dispctrl &= ~(SCALER_DISPCTRL_PANIC0_MASK |
+ SCALER_DISPCTRL_PANIC1_MASK |
+ SCALER_DISPCTRL_PANIC2_MASK);
+ dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC0);
+ dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1);
+ dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2);
+
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
- /* Recompute Composite Output Buffer (COB) allocations for the displays
+ return 0;
+}
+
+#define CFC1_N_NL_CSC_CTRL(x) (0xa000 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C00(x) (0xa008 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C01(x) (0xa00c + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C02(x) (0xa010 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C03(x) (0xa014 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C04(x) (0xa018 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C10(x) (0xa01c + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C11(x) (0xa020 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C12(x) (0xa024 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C13(x) (0xa028 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C14(x) (0xa02c + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C20(x) (0xa030 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C21(x) (0xa034 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C22(x) (0xa038 + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C23(x) (0xa03c + ((x) * 0x3000))
+#define CFC1_N_MA_CSC_COEFF_C24(x) (0xa040 + ((x) * 0x3000))
+
+#define SCALER_PI_CMP_CSC_RED0(x) (0x200 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_RED1(x) (0x204 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_RED_CLAMP(x) (0x208 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_CFG(x) (0x20c + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_GREEN0(x) (0x210 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_GREEN1(x) (0x214 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_GREEN_CLAMP(x) (0x218 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_BLUE0(x) (0x220 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_BLUE1(x) (0x224 + ((x) * 0x40))
+#define SCALER_PI_CMP_CSC_BLUE_CLAMP(x) (0x228 + ((x) * 0x40))
+
+/* 4 S2.22 multiplication factors, and 1 S9.15 addititive element for each of 3
+ * output components
+ */
+struct vc6_csc_coeff_entry {
+ u32 csc[3][5];
+};
+
+static const struct vc6_csc_coeff_entry csc_coeffs[2][3] = {
+ [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ .csc = {
+ { 0x004A8542, 0x0, 0x0066254A, 0x0, 0xFF908A0D },
+ { 0x004A8542, 0xFFE6ED5D, 0xFFCBF856, 0x0, 0x0043C9A3 },
+ { 0x004A8542, 0x00811A54, 0x0, 0x0, 0xFF759502 }
+ }
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ .csc = {
+ { 0x004A8542, 0x0, 0x0072BC44, 0x0, 0xFF83F312 },
+ { 0x004A8542, 0xFFF25A22, 0xFFDDE4D0, 0x0, 0x00267064 },
+ { 0x004A8542, 0x00873197, 0x0, 0x0, 0xFF6F7DC0 }
+ }
+ },
+ [DRM_COLOR_YCBCR_BT2020] = {
+ .csc = {
+ { 0x004A8542, 0x0, 0x006B4A17, 0x0, 0xFF8B653F },
+ { 0x004A8542, 0xFFF402D9, 0xFFDDE4D0, 0x0, 0x0024C7AE },
+ { 0x004A8542, 0x008912CC, 0x0, 0x0, 0xFF6D9C8B }
+ }
+ }
+ },
+ [DRM_COLOR_YCBCR_FULL_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ .csc = {
+ { 0x00400000, 0x0, 0x0059BA5E, 0x0, 0xFFA645A1 },
+ { 0x00400000, 0xFFE9F9AC, 0xFFD24B97, 0x0, 0x0043BABB },
+ { 0x00400000, 0x00716872, 0x0, 0x0, 0xFF8E978D }
+ }
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ .csc = {
+ { 0x00400000, 0x0, 0x0064C985, 0x0, 0xFF9B367A },
+ { 0x00400000, 0xFFF402E1, 0xFFE20A40, 0x0, 0x0029F2DE },
+ { 0x00400000, 0x0076C226, 0x0, 0x0, 0xFF893DD9 }
+ }
+ },
+ [DRM_COLOR_YCBCR_BT2020] = {
+ .csc = {
+ { 0x00400000, 0x0, 0x005E3F14, 0x0, 0xFFA1C0EB },
+ { 0x00400000, 0xFFF577F6, 0xFFDB580F, 0x0, 0x002F2FFA },
+ { 0x00400000, 0x007868DB, 0x0, 0x0, 0xFF879724 }
+ }
+ }
+ }
+};
+
+static int vc6_hvs_hw_init(struct vc4_hvs *hvs)
+{
+ const struct vc6_csc_coeff_entry *coeffs;
+ unsigned int i;
+
+ HVS_WRITE(SCALER6_CONTROL,
+ SCALER6_CONTROL_HVS_EN |
+ VC4_SET_FIELD(8, SCALER6_CONTROL_PF_LINES) |
+ VC4_SET_FIELD(15, SCALER6_CONTROL_MAX_REQS));
+
+ /* Set HVS arbiter priority to max */
+ HVS_WRITE(SCALER6(PRI_MAP0), 0xffffffff);
+ HVS_WRITE(SCALER6(PRI_MAP1), 0xffffffff);
+
+ if (hvs->vc4->gen == VC4_GEN_6_C) {
+ for (i = 0; i < 6; i++) {
+ coeffs = &csc_coeffs[i / 3][i % 3];
+
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C00(i), coeffs->csc[0][0]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C01(i), coeffs->csc[0][1]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C02(i), coeffs->csc[0][2]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C03(i), coeffs->csc[0][3]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C04(i), coeffs->csc[0][4]);
+
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C10(i), coeffs->csc[1][0]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C11(i), coeffs->csc[1][1]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C12(i), coeffs->csc[1][2]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C13(i), coeffs->csc[1][3]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C14(i), coeffs->csc[1][4]);
+
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C20(i), coeffs->csc[2][0]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C21(i), coeffs->csc[2][1]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C22(i), coeffs->csc[2][2]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C23(i), coeffs->csc[2][3]);
+ HVS_WRITE(CFC1_N_MA_CSC_COEFF_C24(i), coeffs->csc[2][4]);
+
+ HVS_WRITE(CFC1_N_NL_CSC_CTRL(i), BIT(15));
+ }
+ } else {
+ for (i = 0; i < 8; i++) {
+ HVS_WRITE(SCALER_PI_CMP_CSC_RED0(i), 0x1f002566);
+ HVS_WRITE(SCALER_PI_CMP_CSC_RED1(i), 0x3994);
+ HVS_WRITE(SCALER_PI_CMP_CSC_RED_CLAMP(i), 0xfff00000);
+ HVS_WRITE(SCALER_PI_CMP_CSC_CFG(i), 0x1);
+ HVS_WRITE(SCALER_PI_CMP_CSC_GREEN0(i), 0x18002566);
+ HVS_WRITE(SCALER_PI_CMP_CSC_GREEN1(i), 0xf927eee2);
+ HVS_WRITE(SCALER_PI_CMP_CSC_GREEN_CLAMP(i), 0xfff00000);
+ HVS_WRITE(SCALER_PI_CMP_CSC_BLUE0(i), 0x18002566);
+ HVS_WRITE(SCALER_PI_CMP_CSC_BLUE1(i), 0x43d80000);
+ HVS_WRITE(SCALER_PI_CMP_CSC_BLUE_CLAMP(i), 0xfff00000);
+ }
+ }
+
+ return 0;
+}
+
+static int vc4_hvs_cob_init(struct vc4_hvs *hvs)
+{
+ struct vc4_dev *vc4 = hvs->vc4;
+ u32 reg, top, base;
+
+ /*
+ * Recompute Composite Output Buffer (COB) allocations for the
+ * displays
*/
- if (!vc4->is_vc5) {
+ switch (vc4->gen) {
+ case VC4_GEN_4:
/* The COB is 20736 pixels, or just over 10 lines at 2048 wide.
* The bottom 2048 pixels are full 32bpp RGBA (intended for the
* TXP composing RGBA to memory), whilst the remainder are only
@@ -990,7 +1564,9 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
top = VC4_COB_SIZE;
reg |= (top - 1) << 16;
HVS_WRITE(SCALER_DISPBASE0, reg);
- } else {
+ break;
+
+ case VC4_GEN_5:
/* The COB is 44416 pixels, or 10.8 lines at 4096 wide.
* The bottom 4096 pixels are full RGBA (intended for the TXP
* composing RGBA to memory), whilst the remainder are only
@@ -1016,13 +1592,159 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
top = VC5_COB_SIZE;
reg |= top << 16;
HVS_WRITE(SCALER_DISPBASE0, reg);
+ break;
+
+ case VC4_GEN_6_C:
+ case VC4_GEN_6_D:
+ #define VC6_COB_LINE_WIDTH 3840
+ #define VC6_COB_NUM_LINES 4
+ base = 0;
+ top = 3840;
+
+ HVS_WRITE(SCALER6_DISPX_COB(2),
+ VC4_SET_FIELD(top, SCALER6_DISPX_COB_TOP) |
+ VC4_SET_FIELD(base, SCALER6_DISPX_COB_BASE));
+
+ base = top + 16;
+ top += VC6_COB_LINE_WIDTH * VC6_COB_NUM_LINES;
+
+ HVS_WRITE(SCALER6_DISPX_COB(1),
+ VC4_SET_FIELD(top, SCALER6_DISPX_COB_TOP) |
+ VC4_SET_FIELD(base, SCALER6_DISPX_COB_BASE));
+
+ base = top + 16;
+ top += VC6_COB_LINE_WIDTH * VC6_COB_NUM_LINES;
+
+ HVS_WRITE(SCALER6_DISPX_COB(0),
+ VC4_SET_FIELD(top, SCALER6_DISPX_COB_TOP) |
+ VC4_SET_FIELD(base, SCALER6_DISPX_COB_BASE));
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_hvs *hvs = NULL;
+ void __iomem *regs;
+ int ret;
+
+ regs = vc4_ioremap_regs(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ hvs = __vc4_hvs_alloc(vc4, regs, pdev);
+ if (IS_ERR(hvs))
+ return PTR_ERR(hvs);
+
+ hvs->regset.base = hvs->regs;
+
+ if (vc4->gen == VC4_GEN_6_C) {
+ hvs->regset.regs = vc6_hvs_regs;
+ hvs->regset.nregs = ARRAY_SIZE(vc6_hvs_regs);
+
+ if (VC4_GET_FIELD(HVS_READ(SCALER6_VERSION), SCALER6_VERSION) ==
+ SCALER6_VERSION_D0) {
+ vc4->gen = VC4_GEN_6_D;
+ hvs->regset.regs = vc6_d_hvs_regs;
+ hvs->regset.nregs = ARRAY_SIZE(vc6_d_hvs_regs);
+ }
+ } else {
+ hvs->regset.regs = vc4_hvs_regs;
+ hvs->regset.nregs = ARRAY_SIZE(vc4_hvs_regs);
}
- ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
- vc4_hvs_irq_handler, 0, "vc4 hvs", drm);
+ if (vc4->gen >= VC4_GEN_5) {
+ struct rpi_firmware *firmware;
+ struct device_node *node;
+ unsigned int max_rate;
+
+ node = rpi_firmware_find_node();
+ if (!node)
+ return -EINVAL;
+
+ firmware = rpi_firmware_get(node);
+ of_node_put(node);
+ if (!firmware)
+ return -EPROBE_DEFER;
+
+ hvs->core_clk = devm_clk_get(&pdev->dev,
+ (vc4->gen >= VC4_GEN_6_C) ? "core" : NULL);
+ if (IS_ERR(hvs->core_clk)) {
+ dev_err(&pdev->dev, "Couldn't get core clock\n");
+ return PTR_ERR(hvs->core_clk);
+ }
+
+ hvs->disp_clk = devm_clk_get(&pdev->dev,
+ (vc4->gen >= VC4_GEN_6_C) ? "disp" : NULL);
+ if (IS_ERR(hvs->disp_clk)) {
+ dev_err(&pdev->dev, "Couldn't get disp clock\n");
+ return PTR_ERR(hvs->disp_clk);
+ }
+
+ max_rate = rpi_firmware_clk_get_max_rate(firmware,
+ RPI_FIRMWARE_CORE_CLK_ID);
+ rpi_firmware_put(firmware);
+ if (max_rate >= 550000000)
+ hvs->vc5_hdmi_enable_hdmi_20 = true;
+
+ if (max_rate >= 600000000)
+ hvs->vc5_hdmi_enable_4096by2160 = true;
+
+ hvs->max_core_rate = max_rate;
+
+ ret = clk_prepare_enable(hvs->core_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't enable the core clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(hvs->disp_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't enable the disp clock\n");
+ return ret;
+ }
+ }
+
+ if (vc4->gen >= VC4_GEN_5)
+ hvs->dlist = hvs->regs + SCALER5_DLIST_START;
+ else
+ hvs->dlist = hvs->regs + SCALER_DLIST_START;
+
+ if (vc4->gen >= VC4_GEN_6_C)
+ ret = vc6_hvs_hw_init(hvs);
+ else
+ ret = vc4_hvs_hw_init(hvs);
+ if (ret)
+ return ret;
+
+ /* Upload filter kernels. We only have the one for now, so we
+ * keep it around for the lifetime of the driver.
+ */
+ ret = vc4_hvs_upload_linear_kernel(hvs,
+ &hvs->mitchell_netravali_filter,
+ mitchell_netravali_1_3_1_3_kernel);
if (ret)
return ret;
+ ret = vc4_hvs_cob_init(hvs);
+ if (ret)
+ return ret;
+
+ if (vc4->gen < VC4_GEN_6_C) {
+ ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
+ vc4_hvs_irq_handler, 0, "vc4 hvs", drm);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -1046,6 +1768,7 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master,
drm_mm_remove_node(node);
drm_mm_takedown(&vc4->hvs->lbm_mm);
+ clk_disable_unprepare(hvs->disp_clk);
clk_disable_unprepare(hvs->core_clk);
vc4->hvs = NULL;
@@ -1068,13 +1791,14 @@ static void vc4_hvs_dev_remove(struct platform_device *pdev)
static const struct of_device_id vc4_hvs_dt_match[] = {
{ .compatible = "brcm,bcm2711-hvs" },
+ { .compatible = "brcm,bcm2712-hvs" },
{ .compatible = "brcm,bcm2835-hvs" },
{}
};
struct platform_driver vc4_hvs_driver = {
.probe = vc4_hvs_dev_probe,
- .remove_new = vc4_hvs_dev_remove,
+ .remove = vc4_hvs_dev_remove,
.driver = {
.name = "vc4_hvs",
.of_match_table = vc4_hvs_dt_match,
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index ef93d8e22a35..69b399f3b802 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -263,7 +263,7 @@ vc4_irq_enable(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
if (!vc4->v3d)
@@ -280,7 +280,7 @@ vc4_irq_disable(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
if (!vc4->v3d)
@@ -303,7 +303,7 @@ int vc4_irq_install(struct drm_device *dev, int irq)
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
if (irq == IRQ_NOTCONNECTED)
@@ -324,7 +324,7 @@ void vc4_irq_uninstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
vc4_irq_disable(dev);
@@ -337,7 +337,7 @@ void vc4_irq_reset(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned long irqflags;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
/* Acknowledge any stale IRQs. */
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 5495f2a94fa9..f5b167417428 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -138,6 +138,8 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
struct drm_color_ctm *ctm = ctm_state->ctm;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_5);
+
if (ctm_state->fifo) {
HVS_WRITE(SCALER_OLEDCOEF2,
VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
@@ -213,6 +215,8 @@ static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
struct drm_crtc *crtc;
unsigned int i;
+ WARN_ON_ONCE(vc4->gen != VC4_GEN_4);
+
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
@@ -256,6 +260,8 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
unsigned int i;
u32 reg;
+ WARN_ON_ONCE(vc4->gen != VC4_GEN_5);
+
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
@@ -320,17 +326,62 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
}
}
+static void vc6_hvs_pv_muxing_commit(struct vc4_dev *vc4,
+ struct drm_atomic_state *state)
+{
+ struct vc4_hvs *hvs = vc4->hvs;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ unsigned int i;
+
+ WARN_ON_ONCE(vc4->gen != VC4_GEN_6_C && vc4->gen != VC4_GEN_6_D);
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
+ struct vc4_encoder *vc4_encoder;
+ struct drm_encoder *encoder;
+ unsigned char mux;
+ u32 reg;
+
+ if (!vc4_state->update_muxing)
+ continue;
+
+ if (vc4_state->assigned_channel != 1)
+ continue;
+
+ encoder = vc4_get_crtc_encoder(crtc, crtc_state);
+ vc4_encoder = to_vc4_encoder(encoder);
+ switch (vc4_encoder->type) {
+ case VC4_ENCODER_TYPE_HDMI1:
+ mux = 0;
+ break;
+
+ case VC4_ENCODER_TYPE_TXP1:
+ mux = 2;
+ break;
+
+ default:
+ drm_err(&vc4->base, "Unhandled encoder type for PV muxing %d",
+ vc4_encoder->type);
+ mux = 0;
+ break;
+ }
+
+ reg = HVS_READ(SCALER6_CONTROL);
+ HVS_WRITE(SCALER6_CONTROL,
+ (reg & ~SCALER6_CONTROL_DSP1_TARGET_MASK) |
+ VC4_SET_FIELD(mux, SCALER6_CONTROL_DSP1_TARGET));
+ }
+}
+
static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
- struct drm_crtc_state *new_crtc_state;
struct vc4_hvs_state *new_hvs_state;
- struct drm_crtc *crtc;
struct vc4_hvs_state *old_hvs_state;
unsigned int channel;
- int i;
old_hvs_state = vc4_hvs_get_old_global_state(state);
if (WARN_ON(IS_ERR(old_hvs_state)))
@@ -340,14 +391,20 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
if (WARN_ON(IS_ERR(new_hvs_state)))
return;
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- struct vc4_crtc_state *vc4_crtc_state;
+ if (vc4->gen < VC4_GEN_6_C) {
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc *crtc;
+ int i;
- if (!new_crtc_state->commit)
- continue;
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct vc4_crtc_state *vc4_crtc_state;
+
+ if (!new_crtc_state->commit)
+ continue;
- vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
- vc4_hvs_mask_underrun(hvs, vc4_crtc_state->assigned_channel);
+ vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
+ vc4_hvs_mask_underrun(hvs, vc4_crtc_state->assigned_channel);
+ }
}
for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
@@ -369,7 +426,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
old_hvs_state->fifo_state[channel].pending_commit = NULL;
}
- if (vc4->is_vc5) {
+ if (vc4->gen == VC4_GEN_5) {
unsigned long state_rate = max(old_hvs_state->core_clock_rate,
new_hvs_state->core_clock_rate);
unsigned long core_rate = clamp_t(unsigned long, state_rate,
@@ -382,16 +439,32 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
* modeset.
*/
WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
+ WARN_ON(clk_set_min_rate(hvs->disp_clk, core_rate));
}
drm_atomic_helper_commit_modeset_disables(dev, state);
- vc4_ctm_commit(vc4, state);
+ if (vc4->gen <= VC4_GEN_5)
+ vc4_ctm_commit(vc4, state);
- if (vc4->is_vc5)
- vc5_hvs_pv_muxing_commit(vc4, state);
- else
+ switch (vc4->gen) {
+ case VC4_GEN_4:
vc4_hvs_pv_muxing_commit(vc4, state);
+ break;
+
+ case VC4_GEN_5:
+ vc5_hvs_pv_muxing_commit(vc4, state);
+ break;
+
+ case VC4_GEN_6_C:
+ case VC4_GEN_6_D:
+ vc6_hvs_pv_muxing_commit(vc4, state);
+ break;
+
+ default:
+ drm_err(dev, "Unknown VC4 generation: %d", vc4->gen);
+ break;
+ }
drm_atomic_helper_commit_planes(dev, state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
@@ -406,7 +479,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_cleanup_planes(dev, state);
- if (vc4->is_vc5) {
+ if (vc4->gen == VC4_GEN_5) {
unsigned long core_rate = min_t(unsigned long,
hvs->max_core_rate,
new_hvs_state->core_clock_rate);
@@ -418,6 +491,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
* requirements.
*/
WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
+ WARN_ON(clk_set_min_rate(hvs->disp_clk, core_rate));
drm_dbg(dev, "Core clock actual rate: %lu Hz\n",
clk_get_rate(hvs->core_clk));
@@ -461,7 +535,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_mode_fb_cmd2 mode_cmd_local;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return ERR_PTR(-ENODEV);
/* If the user didn't specify a modifier, use the
@@ -1040,7 +1114,7 @@ int vc4_kms_load(struct drm_device *dev)
* the BCM2711, but the load tracker computations are used for
* the core clock rate calculation.
*/
- if (!vc4->is_vc5) {
+ if (vc4->gen == VC4_GEN_4) {
/* Start with the load tracker enabled. Can be
* disabled through the debugfs load_tracker file.
*/
@@ -1056,7 +1130,10 @@ int vc4_kms_load(struct drm_device *dev)
return ret;
}
- if (vc4->is_vc5) {
+ if (vc4->gen >= VC4_GEN_6_C) {
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
+ } else if (vc4->gen >= VC4_GEN_5) {
dev->mode_config.max_width = 7680;
dev->mode_config.max_height = 7680;
} else {
@@ -1064,7 +1141,7 @@ int vc4_kms_load(struct drm_device *dev)
dev->mode_config.max_height = 2048;
}
- dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs;
+ dev->mode_config.funcs = (vc4->gen > VC4_GEN_4) ? &vc5_mode_funcs : &vc4_mode_funcs;
dev->mode_config.helper_private = &vc4_mode_config_helpers;
dev->mode_config.preferred_depth = 24;
dev->mode_config.async_page_flip = true;
diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c
index c4ac2c946238..f1342f917cf7 100644
--- a/drivers/gpu/drm/vc4/vc4_perfmon.c
+++ b/drivers/gpu/drm/vc4/vc4_perfmon.c
@@ -23,7 +23,7 @@ void vc4_perfmon_get(struct vc4_perfmon *perfmon)
return;
vc4 = perfmon->dev;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
refcount_inc(&perfmon->refcnt);
@@ -37,7 +37,7 @@ void vc4_perfmon_put(struct vc4_perfmon *perfmon)
return;
vc4 = perfmon->dev;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
if (refcount_dec_and_test(&perfmon->refcnt))
@@ -49,7 +49,7 @@ void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon)
unsigned int i;
u32 mask;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon))
@@ -69,7 +69,7 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
{
unsigned int i;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
if (WARN_ON_ONCE(!vc4->active_perfmon ||
@@ -90,7 +90,7 @@ struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id)
struct vc4_dev *vc4 = vc4file->dev;
struct vc4_perfmon *perfmon;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return NULL;
mutex_lock(&vc4file->perfmon.lock);
@@ -105,7 +105,7 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file)
{
struct vc4_dev *vc4 = vc4file->dev;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
mutex_init(&vc4file->perfmon.lock);
@@ -116,6 +116,11 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file)
static int vc4_perfmon_idr_del(int id, void *elem, void *data)
{
struct vc4_perfmon *perfmon = elem;
+ struct vc4_dev *vc4 = (struct vc4_dev *)data;
+
+ /* If the active perfmon is being destroyed, stop it first */
+ if (perfmon == vc4->active_perfmon)
+ vc4_perfmon_stop(vc4, perfmon, false);
vc4_perfmon_put(perfmon);
@@ -126,11 +131,11 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file)
{
struct vc4_dev *vc4 = vc4file->dev;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
mutex_lock(&vc4file->perfmon.lock);
- idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL);
+ idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, vc4);
idr_destroy(&vc4file->perfmon.idr);
mutex_unlock(&vc4file->perfmon.lock);
mutex_destroy(&vc4file->perfmon.lock);
@@ -146,7 +151,7 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
unsigned int i;
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
if (!vc4->v3d) {
@@ -200,7 +205,7 @@ int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_vc4_perfmon_destroy *req = data;
struct vc4_perfmon *perfmon;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
if (!vc4->v3d) {
@@ -228,7 +233,7 @@ int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
struct vc4_perfmon *perfmon;
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
if (!vc4->v3d) {
@@ -236,11 +241,7 @@ int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
}
- mutex_lock(&vc4file->perfmon.lock);
- perfmon = idr_find(&vc4file->perfmon.idr, req->id);
- vc4_perfmon_get(perfmon);
- mutex_unlock(&vc4file->perfmon.lock);
-
+ perfmon = vc4_perfmon_find(vc4file, req->id);
if (!perfmon)
return -EINVAL;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 07caf2a47c6c..d608860d525f 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -110,6 +110,18 @@ static const struct hvs_format {
.pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
},
{
+ .drm = DRM_FORMAT_YUV444,
+ .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
+ .pixel_order = HVS_PIXEL_ORDER_XYCBCR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
+ },
+ {
+ .drm = DRM_FORMAT_YVU444,
+ .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
+ .pixel_order = HVS_PIXEL_ORDER_XYCRCB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
+ },
+ {
.drm = DRM_FORMAT_YUV420,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCBCR,
@@ -251,9 +263,9 @@ static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
{
- if (dst == src)
+ if (dst == src >> 16)
return VC4_SCALING_NONE;
- if (3 * dst >= 2 * src)
+ if (3 * dst >= 2 * (src >> 16))
return VC4_SCALING_PPF;
else
return VC4_SCALING_TPZ;
@@ -266,7 +278,10 @@ static bool plane_enabled(struct drm_plane_state *state)
static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
{
+ struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
+ struct vc4_hvs *hvs = vc4->hvs;
struct vc4_plane_state *vc4_state;
+ unsigned int i;
if (WARN_ON(!plane->state))
return NULL;
@@ -276,6 +291,12 @@ static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane
return NULL;
memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm));
+
+ for (i = 0; i < DRM_FORMAT_MAX_PLANES; i++) {
+ if (vc4_state->upm_handle[i])
+ refcount_inc(&hvs->upm_refcounts[vc4_state->upm_handle[i]].refcount);
+ }
+
vc4_state->dlist_initialized = 0;
__drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
@@ -294,18 +315,47 @@ static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane
return &vc4_state->base;
}
+static void vc4_plane_release_upm_ida(struct vc4_hvs *hvs, unsigned int upm_handle)
+{
+ struct vc4_upm_refcounts *refcount = &hvs->upm_refcounts[upm_handle];
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&hvs->mm_lock, irqflags);
+ drm_mm_remove_node(&refcount->upm);
+ spin_unlock_irqrestore(&hvs->mm_lock, irqflags);
+ refcount->upm.start = 0;
+ refcount->upm.size = 0;
+ refcount->size = 0;
+
+ ida_free(&hvs->upm_handles, upm_handle);
+}
+
static void vc4_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
+ struct vc4_hvs *hvs = vc4->hvs;
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ unsigned int i;
if (drm_mm_node_allocated(&vc4_state->lbm)) {
unsigned long irqflags;
- spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
+ spin_lock_irqsave(&hvs->mm_lock, irqflags);
drm_mm_remove_node(&vc4_state->lbm);
- spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
+ spin_unlock_irqrestore(&hvs->mm_lock, irqflags);
+ }
+
+ for (i = 0; i < DRM_FORMAT_MAX_PLANES; i++) {
+ struct vc4_upm_refcounts *refcount;
+
+ if (!vc4_state->upm_handle[i])
+ continue;
+
+ refcount = &hvs->upm_refcounts[vc4_state->upm_handle[i]];
+
+ if (refcount_dec_and_test(&refcount->refcount))
+ vc4_plane_release_upm_ida(hvs, vc4_state->upm_handle[i]);
}
kfree(vc4_state->dlist);
@@ -318,7 +368,10 @@ static void vc4_plane_reset(struct drm_plane *plane)
{
struct vc4_plane_state *vc4_state;
- WARN_ON(plane->state);
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+
+ kfree(plane->state);
vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
if (!vc4_state)
@@ -438,12 +491,11 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_dma_object *bo;
int num_planes = fb->format->num_planes;
struct drm_crtc_state *crtc_state;
u32 h_subsample = fb->format->hsub;
u32 v_subsample = fb->format->vsub;
- int i, ret;
+ int ret;
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
state->crtc);
@@ -457,20 +509,10 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
if (ret)
return ret;
- for (i = 0; i < num_planes; i++) {
- bo = drm_fb_dma_get_gem_obj(fb, i);
- vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i];
- }
-
- /*
- * We don't support subpixel source positioning for scaling,
- * but fractional coordinates can be generated by clipping
- * so just round for now
- */
- vc4_state->src_x = DIV_ROUND_CLOSEST(state->src.x1, 1 << 16);
- vc4_state->src_y = DIV_ROUND_CLOSEST(state->src.y1, 1 << 16);
- vc4_state->src_w[0] = DIV_ROUND_CLOSEST(state->src.x2, 1 << 16) - vc4_state->src_x;
- vc4_state->src_h[0] = DIV_ROUND_CLOSEST(state->src.y2, 1 << 16) - vc4_state->src_y;
+ vc4_state->src_x = state->src.x1;
+ vc4_state->src_y = state->src.y1;
+ vc4_state->src_w[0] = state->src.x2 - vc4_state->src_x;
+ vc4_state->src_h[0] = state->src.y2 - vc4_state->src_y;
vc4_state->crtc_x = state->dst.x1;
vc4_state->crtc_y = state->dst.y1;
@@ -510,6 +552,12 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
*/
if (vc4_state->x_scaling[1] == VC4_SCALING_NONE)
vc4_state->x_scaling[1] = VC4_SCALING_PPF;
+
+ /* Similarly UV needs vertical scaling to be enabled.
+ * Without this a 1:1 scaled YUV422 plane isn't rendered.
+ */
+ if (vc4_state->y_scaling[1] == VC4_SCALING_NONE)
+ vc4_state->y_scaling[1] = VC4_SCALING_PPF;
} else {
vc4_state->is_yuv = false;
vc4_state->x_scaling[1] = VC4_SCALING_NONE;
@@ -521,9 +569,12 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
{
+ struct vc4_dev *vc4 = to_vc4_dev(vc4_state->base.plane->dev);
u32 scale, recip;
- scale = (1 << 16) * src / dst;
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_6_D);
+
+ scale = src / dst;
/* The specs note that while the reciprocal would be defined
* as (1<<32)/scale, ~0 is close enough.
@@ -531,23 +582,83 @@ static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
recip = ~0 / scale;
vc4_dlist_write(vc4_state,
+ /*
+ * The BCM2712 is lacking BIT(31) compared to
+ * the previous generations, but we don't use
+ * it.
+ */
VC4_SET_FIELD(scale, SCALER_TPZ0_SCALE) |
VC4_SET_FIELD(0, SCALER_TPZ0_IPHASE));
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(recip, SCALER_TPZ1_RECIP));
}
-static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
+/* phase magnitude bits */
+#define PHASE_BITS 6
+
+static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst,
+ u32 xy, int channel)
{
- u32 scale = (1 << 16) * src / dst;
+ struct vc4_dev *vc4 = to_vc4_dev(vc4_state->base.plane->dev);
+ u32 scale = src / dst;
+ s32 offset, offset2;
+ s32 phase;
+
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_6_D);
+
+ /*
+ * Start the phase at 1/2 pixel from the 1st pixel at src_x.
+ * 1/4 pixel for YUV.
+ */
+ if (channel) {
+ /*
+ * The phase is relative to scale_src->x, so shift it for
+ * display list's x value
+ */
+ offset = (xy & 0x1ffff) >> (16 - PHASE_BITS) >> 1;
+ offset += -(1 << PHASE_BITS >> 2);
+ } else {
+ /*
+ * The phase is relative to scale_src->x, so shift it for
+ * display list's x value
+ */
+ offset = (xy & 0xffff) >> (16 - PHASE_BITS);
+ offset += -(1 << PHASE_BITS >> 1);
+
+ /*
+ * This is a kludge to make sure the scaling factors are
+ * consistent with YUV's luma scaling. We lose 1-bit precision
+ * because of this.
+ */
+ scale &= ~1;
+ }
+
+ /*
+ * There may be a also small error introduced by precision of scale.
+ * Add half of that as a compromise
+ */
+ offset2 = src - dst * scale;
+ offset2 >>= 16 - PHASE_BITS;
+ phase = offset + (offset2 >> 1);
+
+ /* Ensure +ve values don't touch the sign bit, then truncate negative values */
+ if (phase >= 1 << PHASE_BITS)
+ phase = (1 << PHASE_BITS) - 1;
+
+ phase &= SCALER_PPF_IPHASE_MASK;
vc4_dlist_write(vc4_state,
SCALER_PPF_AGC |
VC4_SET_FIELD(scale, SCALER_PPF_SCALE) |
- VC4_SET_FIELD(0, SCALER_PPF_IPHASE));
+ /*
+ * The register layout documentation is slightly
+ * different to setup the phase in the BCM2712,
+ * but they seem equivalent.
+ */
+ VC4_SET_FIELD(phase, SCALER_PPF_IPHASE));
}
-static u32 vc4_lbm_size(struct drm_plane_state *state)
+static u32 __vc4_lbm_size(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
@@ -569,7 +680,7 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
if (vc4_state->x_scaling[0] == VC4_SCALING_TPZ)
pix_per_line = vc4_state->crtc_w;
else
- pix_per_line = vc4_state->src_w[0];
+ pix_per_line = vc4_state->src_w[0] >> 16;
if (!vc4_state->is_yuv) {
if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
@@ -587,42 +698,170 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
}
/* Align it to 64 or 128 (hvs5) bytes */
- lbm = roundup(lbm, vc4->is_vc5 ? 128 : 64);
+ lbm = roundup(lbm, vc4->gen == VC4_GEN_5 ? 128 : 64);
/* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
- lbm /= vc4->is_vc5 ? 4 : 2;
+ lbm /= vc4->gen == VC4_GEN_5 ? 4 : 2;
return lbm;
}
+static unsigned int vc4_lbm_words_per_component(const struct drm_plane_state *state,
+ unsigned int channel)
+{
+ const struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+
+ switch (vc4_state->y_scaling[channel]) {
+ case VC4_SCALING_PPF:
+ return 4;
+
+ case VC4_SCALING_TPZ:
+ return 2;
+
+ default:
+ return 0;
+ }
+}
+
+static unsigned int vc4_lbm_components(const struct drm_plane_state *state,
+ unsigned int channel)
+{
+ const struct drm_format_info *info = state->fb->format;
+ const struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+
+ if (vc4_state->y_scaling[channel] == VC4_SCALING_NONE)
+ return 0;
+
+ if (info->is_yuv)
+ return channel ? 2 : 1;
+
+ if (info->has_alpha)
+ return 4;
+
+ return 3;
+}
+
+static unsigned int vc4_lbm_channel_size(const struct drm_plane_state *state,
+ unsigned int channel)
+{
+ const struct drm_format_info *info = state->fb->format;
+ const struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ unsigned int channels_scaled = 0;
+ unsigned int components, words, wpc;
+ unsigned int width, lines;
+ unsigned int i;
+
+ /* LBM is meant to use the smaller of source or dest width, but there
+ * is a issue with UV scaling that the size required for the second
+ * channel is based on the source width only.
+ */
+ if (info->hsub > 1 && channel == 1)
+ width = state->src_w >> 16;
+ else
+ width = min(state->src_w >> 16, state->crtc_w);
+ width = round_up(width / info->hsub, 4);
+
+ wpc = vc4_lbm_words_per_component(state, channel);
+ if (!wpc)
+ return 0;
+
+ components = vc4_lbm_components(state, channel);
+ if (!components)
+ return 0;
+
+ if (state->alpha != DRM_BLEND_ALPHA_OPAQUE && info->has_alpha)
+ components -= 1;
+
+ words = width * wpc * components;
+
+ lines = DIV_ROUND_UP(words, 128 / info->hsub);
+
+ for (i = 0; i < 2; i++)
+ if (vc4_state->y_scaling[channel] != VC4_SCALING_NONE)
+ channels_scaled++;
+
+ if (channels_scaled == 1)
+ lines = lines / 2;
+
+ return lines;
+}
+
+static unsigned int __vc6_lbm_size(const struct drm_plane_state *state)
+{
+ const struct drm_format_info *info = state->fb->format;
+
+ if (info->hsub > 1)
+ return max(vc4_lbm_channel_size(state, 0),
+ vc4_lbm_channel_size(state, 1));
+ else
+ return vc4_lbm_channel_size(state, 0);
+}
+
+static u32 vc4_lbm_size(struct drm_plane_state *state)
+{
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
+
+ /* LBM is not needed when there's no vertical scaling. */
+ if (vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
+ vc4_state->y_scaling[1] == VC4_SCALING_NONE)
+ return 0;
+
+ if (vc4->gen >= VC4_GEN_6_C)
+ return __vc6_lbm_size(state);
+ else
+ return __vc4_lbm_size(state);
+}
+
+static size_t vc6_upm_size(const struct drm_plane_state *state,
+ unsigned int plane)
+{
+ const struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ unsigned int stride = state->fb->pitches[plane];
+
+ /*
+ * TODO: This only works for raster formats, and is sub-optimal
+ * for buffers with a stride aligned on 32 bytes.
+ */
+ unsigned int words_per_line = (stride + 62) / 32;
+ unsigned int fetch_region_size = words_per_line * 32;
+ unsigned int buffer_lines = 2 << vc4_state->upm_buffer_lines;
+ unsigned int buffer_size = fetch_region_size * buffer_lines;
+
+ return ALIGN(buffer_size, HVS_UBM_WORD_SIZE);
+}
+
static void vc4_write_scaling_parameters(struct drm_plane_state *state,
int channel)
{
+ struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ WARN_ON_ONCE(vc4->gen > VC4_GEN_6_D);
+
/* Ch0 H-PPF Word 0: Scaling Parameters */
if (vc4_state->x_scaling[channel] == VC4_SCALING_PPF) {
- vc4_write_ppf(vc4_state,
- vc4_state->src_w[channel], vc4_state->crtc_w);
+ vc4_write_ppf(vc4_state, vc4_state->src_w[channel],
+ vc4_state->crtc_w, vc4_state->src_x, channel);
}
/* Ch0 V-PPF Words 0-1: Scaling Parameters, Context */
if (vc4_state->y_scaling[channel] == VC4_SCALING_PPF) {
- vc4_write_ppf(vc4_state,
- vc4_state->src_h[channel], vc4_state->crtc_h);
+ vc4_write_ppf(vc4_state, vc4_state->src_h[channel],
+ vc4_state->crtc_h, vc4_state->src_y, channel);
vc4_dlist_write(vc4_state, 0xc0c0c0c0);
}
/* Ch0 H-TPZ Words 0-1: Scaling Parameters, Recip */
if (vc4_state->x_scaling[channel] == VC4_SCALING_TPZ) {
- vc4_write_tpz(vc4_state,
- vc4_state->src_w[channel], vc4_state->crtc_w);
+ vc4_write_tpz(vc4_state, vc4_state->src_w[channel],
+ vc4_state->crtc_w);
}
/* Ch0 V-TPZ Words 0-2: Scaling Parameters, Recip, Context */
if (vc4_state->y_scaling[channel] == VC4_SCALING_TPZ) {
- vc4_write_tpz(vc4_state,
- vc4_state->src_h[channel], vc4_state->crtc_h);
+ vc4_write_tpz(vc4_state, vc4_state->src_h[channel],
+ vc4_state->crtc_h);
vc4_dlist_write(vc4_state, 0xc0c0c0c0);
}
}
@@ -660,7 +899,8 @@ static void vc4_plane_calc_load(struct drm_plane_state *state)
for (i = 0; i < fb->format->num_planes; i++) {
/* Even if the bandwidth/plane required for a single frame is
*
- * vc4_state->src_w[i] * vc4_state->src_h[i] * cpp * vrefresh
+ * (vc4_state->src_w[i] >> 16) * (vc4_state->src_h[i] >> 16) *
+ * cpp * vrefresh
*
* when downscaling, we have to read more pixels per line in
* the time frame reserved for a single line, so the bandwidth
@@ -669,11 +909,11 @@ static void vc4_plane_calc_load(struct drm_plane_state *state)
* load by this number. We're likely over-estimating the read
* demand, but that's better than under-estimating it.
*/
- vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i],
+ vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i] >> 16,
vc4_state->crtc_h);
- vc4_state->membus_load += vc4_state->src_w[i] *
- vc4_state->src_h[i] * vscale_factor *
- fb->format->cpp[i];
+ vc4_state->membus_load += (vc4_state->src_w[i] >> 16) *
+ (vc4_state->src_h[i] >> 16) *
+ vscale_factor * fb->format->cpp[i];
vc4_state->hvs_load += vc4_state->crtc_h * vc4_state->crtc_w;
}
@@ -684,7 +924,9 @@ static void vc4_plane_calc_load(struct drm_plane_state *state)
static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
{
- struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
+ struct drm_device *drm = state->plane->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct drm_plane *plane = state->plane;
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
unsigned long irqflags;
u32 lbm_size;
@@ -693,6 +935,18 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
if (!lbm_size)
return 0;
+ /*
+ * NOTE: BCM2712 doesn't need to be aligned, since the size
+ * returned by vc4_lbm_size() is in words already.
+ */
+ if (vc4->gen == VC4_GEN_5)
+ lbm_size = ALIGN(lbm_size, 64);
+ else if (vc4->gen == VC4_GEN_4)
+ lbm_size = ALIGN(lbm_size, 32);
+
+ drm_dbg_driver(drm, "[PLANE:%d:%s] LBM Allocation Size: %u\n",
+ plane->base.id, plane->name, lbm_size);
+
if (WARN_ON(!vc4_state->lbm_offset))
return -EINVAL;
@@ -705,13 +959,14 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
&vc4_state->lbm,
- lbm_size,
- vc4->is_vc5 ? 64 : 32,
+ lbm_size, 1,
0, 0);
spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
- if (ret)
+ if (ret) {
+ drm_err(drm, "Failed to allocate LBM entry: %d\n", ret);
return ret;
+ }
} else {
WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
}
@@ -721,6 +976,108 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
return 0;
}
+static int vc6_plane_allocate_upm(struct drm_plane_state *state)
+{
+ const struct drm_format_info *info = state->fb->format;
+ struct drm_device *drm = state->plane->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_hvs *hvs = vc4->hvs;
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ unsigned int i;
+ int ret;
+
+ WARN_ON_ONCE(vc4->gen < VC4_GEN_6_C);
+
+ vc4_state->upm_buffer_lines = SCALER6_PTR0_UPM_BUFF_SIZE_2_LINES;
+
+ for (i = 0; i < info->num_planes; i++) {
+ struct vc4_upm_refcounts *refcount;
+ int upm_handle;
+ unsigned long irqflags;
+ size_t upm_size;
+
+ upm_size = vc6_upm_size(state, i);
+ if (!upm_size)
+ return -EINVAL;
+ upm_handle = vc4_state->upm_handle[i];
+
+ if (upm_handle &&
+ hvs->upm_refcounts[upm_handle].size == upm_size) {
+ /* Allocation is the same size as the previous user of
+ * the plane. Keep the allocation.
+ */
+ vc4_state->upm_handle[i] = upm_handle;
+ } else {
+ if (upm_handle &&
+ refcount_dec_and_test(&hvs->upm_refcounts[upm_handle].refcount)) {
+ vc4_plane_release_upm_ida(hvs, upm_handle);
+ vc4_state->upm_handle[i] = 0;
+ }
+
+ upm_handle = ida_alloc_range(&hvs->upm_handles, 1,
+ VC4_NUM_UPM_HANDLES,
+ GFP_KERNEL);
+ if (upm_handle < 0) {
+ drm_dbg(drm, "Out of upm_handles\n");
+ return upm_handle;
+ }
+ vc4_state->upm_handle[i] = upm_handle;
+
+ refcount = &hvs->upm_refcounts[upm_handle];
+ refcount_set(&refcount->refcount, 1);
+ refcount->size = upm_size;
+
+ spin_lock_irqsave(&hvs->mm_lock, irqflags);
+ ret = drm_mm_insert_node_generic(&hvs->upm_mm,
+ &refcount->upm,
+ upm_size, HVS_UBM_WORD_SIZE,
+ 0, 0);
+ spin_unlock_irqrestore(&hvs->mm_lock, irqflags);
+ if (ret) {
+ drm_err(drm, "Failed to allocate UPM entry: %d\n", ret);
+ refcount_set(&refcount->refcount, 0);
+ ida_free(&hvs->upm_handles, upm_handle);
+ vc4_state->upm_handle[i] = 0;
+ return ret;
+ }
+ }
+
+ refcount = &hvs->upm_refcounts[upm_handle];
+ vc4_state->dlist[vc4_state->ptr0_offset[i]] |=
+ VC4_SET_FIELD(refcount->upm.start / HVS_UBM_WORD_SIZE,
+ SCALER6_PTR0_UPM_BASE) |
+ VC4_SET_FIELD(vc4_state->upm_handle[i] - 1,
+ SCALER6_PTR0_UPM_HANDLE) |
+ VC4_SET_FIELD(vc4_state->upm_buffer_lines,
+ SCALER6_PTR0_UPM_BUFF_SIZE);
+ }
+
+ return 0;
+}
+
+static void vc6_plane_free_upm(struct drm_plane_state *state)
+{
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ struct drm_device *drm = state->plane->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_hvs *hvs = vc4->hvs;
+ unsigned int i;
+
+ WARN_ON_ONCE(vc4->gen < VC4_GEN_6_C);
+
+ for (i = 0; i < DRM_FORMAT_MAX_PLANES; i++) {
+ unsigned int upm_handle;
+
+ upm_handle = vc4_state->upm_handle[i];
+ if (!upm_handle)
+ continue;
+
+ if (refcount_dec_and_test(&hvs->upm_refcounts[upm_handle].refcount))
+ vc4_plane_release_upm_ida(hvs, upm_handle);
+ vc4_state->upm_handle[i] = 0;
+ }
+}
+
/*
* The colorspace conversion matrices are held in 3 entries in the dlist.
* Create an array of them, with entries for each full and limited mode, and
@@ -768,6 +1125,11 @@ static const u32 colorspace_coeffs[2][DRM_COLOR_ENCODING_MAX][3] = {
static u32 vc4_hvs4_get_alpha_blend_mode(struct drm_plane_state *state)
{
+ struct drm_device *dev = state->state->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ WARN_ON_ONCE(vc4->gen != VC4_GEN_4);
+
if (!state->fb->format->has_alpha)
return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_FIXED,
SCALER_POS2_ALPHA_MODE);
@@ -789,25 +1151,56 @@ static u32 vc4_hvs4_get_alpha_blend_mode(struct drm_plane_state *state)
static u32 vc4_hvs5_get_alpha_blend_mode(struct drm_plane_state *state)
{
- if (!state->fb->format->has_alpha)
- return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
- SCALER5_CTL2_ALPHA_MODE);
+ struct drm_device *dev = state->state->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
- switch (state->pixel_blend_mode) {
- case DRM_MODE_BLEND_PIXEL_NONE:
- return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
- SCALER5_CTL2_ALPHA_MODE);
+ WARN_ON_ONCE(vc4->gen != VC4_GEN_5 && vc4->gen != VC4_GEN_6_C &&
+ vc4->gen != VC4_GEN_6_D);
+
+ switch (vc4->gen) {
default:
- case DRM_MODE_BLEND_PREMULTI:
- return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
- SCALER5_CTL2_ALPHA_MODE) |
- SCALER5_CTL2_ALPHA_PREMULT;
- case DRM_MODE_BLEND_COVERAGE:
- return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
- SCALER5_CTL2_ALPHA_MODE);
+ case VC4_GEN_5:
+ case VC4_GEN_6_C:
+ if (!state->fb->format->has_alpha)
+ return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
+ SCALER5_CTL2_ALPHA_MODE);
+
+ switch (state->pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
+ SCALER5_CTL2_ALPHA_MODE);
+ default:
+ case DRM_MODE_BLEND_PREMULTI:
+ return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
+ SCALER5_CTL2_ALPHA_MODE) |
+ SCALER5_CTL2_ALPHA_PREMULT;
+ case DRM_MODE_BLEND_COVERAGE:
+ return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
+ SCALER5_CTL2_ALPHA_MODE);
+ }
+ case VC4_GEN_6_D:
+ /* 2712-D configures fixed alpha mode in CTL0 */
+ return state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ?
+ SCALER5_CTL2_ALPHA_PREMULT : 0;
}
}
+static u32 vc4_hvs6_get_alpha_mask_mode(struct drm_plane_state *state)
+{
+ struct drm_device *dev = state->state->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ WARN_ON_ONCE(vc4->gen != VC4_GEN_6_C && vc4->gen != VC4_GEN_6_D);
+
+ if (vc4->gen == VC4_GEN_6_D &&
+ (!state->fb->format->has_alpha ||
+ state->pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE))
+ return VC4_SET_FIELD(SCALER6D_CTL0_ALPHA_MASK_FIXED,
+ SCALER6_CTL0_ALPHA_MASK);
+
+ return VC4_SET_FIELD(SCALER6_CTL0_ALPHA_MASK_NONE, SCALER6_CTL0_ALPHA_MASK);
+}
+
/* Writes out a full display list for an active plane to the plane's
* private dlist state.
*/
@@ -826,9 +1219,11 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
bool mix_plane_alpha;
bool covers_screen;
u32 scl0, scl1, pitch0;
- u32 tiling, src_y;
+ u32 tiling, src_x, src_y;
+ u32 width, height;
u32 hvs_format = format->hvs;
unsigned int rotation;
+ u32 offsets[3] = { 0 };
int ret, i;
if (vc4_state->dlist_initialized)
@@ -838,6 +1233,16 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
if (ret)
return ret;
+ if (!vc4_state->src_w[0] || !vc4_state->src_h[0] ||
+ !vc4_state->crtc_w || !vc4_state->crtc_h) {
+ /* 0 source size probably means the plane is offscreen */
+ vc4_state->dlist_initialized = 1;
+ return 0;
+ }
+
+ width = vc4_state->src_w[0] >> 16;
+ height = vc4_state->src_h[0] >> 16;
+
/* SCL1 is used for Cb/Cr scaling of planar formats. For RGB
* and 4:4:4, scl1 should be set to scl0 so both channels of
* the scaler do the same thing. For YUV, the Y plane needs
@@ -858,9 +1263,11 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
DRM_MODE_REFLECT_Y);
/* We must point to the last line when Y reflection is enabled. */
- src_y = vc4_state->src_y;
+ src_y = vc4_state->src_y >> 16;
if (rotation & DRM_MODE_REFLECT_Y)
- src_y += vc4_state->src_h[0] - 1;
+ src_y += height - 1;
+
+ src_x = vc4_state->src_x >> 16;
switch (base_format_mod) {
case DRM_FORMAT_MOD_LINEAR:
@@ -871,13 +1278,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
* out.
*/
for (i = 0; i < num_planes; i++) {
- vc4_state->offsets[i] += src_y /
- (i ? v_subsample : 1) *
- fb->pitches[i];
-
- vc4_state->offsets[i] += vc4_state->src_x /
- (i ? h_subsample : 1) *
- fb->format->cpp[i];
+ offsets[i] += src_y / (i ? v_subsample : 1) * fb->pitches[i];
+ offsets[i] += src_x / (i ? h_subsample : 1) * fb->format->cpp[i];
}
break;
@@ -898,7 +1300,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
* pitch * tile_h == tile_size * tiles_per_row
*/
u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift);
- u32 tiles_l = vc4_state->src_x >> tile_w_shift;
+ u32 tiles_l = src_x >> tile_w_shift;
u32 tiles_r = tiles_w - tiles_l;
u32 tiles_t = src_y >> tile_h_shift;
/* Intra-tile offsets, which modify the base address (the
@@ -908,7 +1310,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
u32 tile_y = (src_y >> 4) & 1;
u32 subtile_y = (src_y >> 2) & 3;
u32 utile_y = src_y & 3;
- u32 x_off = vc4_state->src_x & tile_w_mask;
+ u32 x_off = src_x & tile_w_mask;
u32 y_off = src_y & tile_h_mask;
/* When Y reflection is requested we must set the
@@ -932,19 +1334,18 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
VC4_SET_FIELD(y_off, SCALER_PITCH0_TILE_Y_OFFSET) |
VC4_SET_FIELD(tiles_l, SCALER_PITCH0_TILE_WIDTH_L) |
VC4_SET_FIELD(tiles_r, SCALER_PITCH0_TILE_WIDTH_R));
- vc4_state->offsets[0] += tiles_t * (tiles_w << tile_size_shift);
- vc4_state->offsets[0] += subtile_y << 8;
- vc4_state->offsets[0] += utile_y << 4;
+ offsets[0] += tiles_t * (tiles_w << tile_size_shift);
+ offsets[0] += subtile_y << 8;
+ offsets[0] += utile_y << 4;
/* Rows of tiles alternate left-to-right and right-to-left. */
if (tiles_t & 1) {
pitch0 |= SCALER_PITCH0_TILE_INITIAL_LINE_DIR;
- vc4_state->offsets[0] += (tiles_w - tiles_l) <<
- tile_size_shift;
- vc4_state->offsets[0] -= (1 + !tile_y) << 10;
+ offsets[0] += (tiles_w - tiles_l) << tile_size_shift;
+ offsets[0] -= (1 + !tile_y) << 10;
} else {
- vc4_state->offsets[0] += tiles_l << tile_size_shift;
- vc4_state->offsets[0] += tile_y << 10;
+ offsets[0] += tiles_l << tile_size_shift;
+ offsets[0] += tile_y << 10;
}
break;
@@ -1004,7 +1405,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
* of the 12-pixels in that 128-bit word is the
* first pixel to be used
*/
- u32 remaining_pixels = vc4_state->src_x % 96;
+ u32 remaining_pixels = src_x % 96;
u32 aligned = remaining_pixels / 12;
u32 last_bits = remaining_pixels % 12;
@@ -1026,18 +1427,16 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
return -EINVAL;
}
pix_per_tile = tile_w / fb->format->cpp[0];
- x_off = (vc4_state->src_x % pix_per_tile) /
+ x_off = (src_x % pix_per_tile) /
(i ? h_subsample : 1) *
fb->format->cpp[i];
}
- tile = vc4_state->src_x / pix_per_tile;
+ tile = src_x / pix_per_tile;
- vc4_state->offsets[i] += param * tile_w * tile;
- vc4_state->offsets[i] += src_y /
- (i ? v_subsample : 1) *
- tile_w;
- vc4_state->offsets[i] += x_off & ~(i ? 1 : 0);
+ offsets[i] += param * tile_w * tile;
+ offsets[i] += src_y / (i ? v_subsample : 1) * tile_w;
+ offsets[i] += x_off & ~(i ? 1 : 0);
}
pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT);
@@ -1050,6 +1449,30 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
return -EINVAL;
}
+ /* fetch an extra pixel if we don't actually line up with the left edge. */
+ if ((vc4_state->src_x & 0xffff) && vc4_state->src_x < (state->fb->width << 16))
+ width++;
+
+ /* same for the right side */
+ if (((vc4_state->src_x + vc4_state->src_w[0]) & 0xffff) &&
+ vc4_state->src_x + vc4_state->src_w[0] < (state->fb->width << 16))
+ width++;
+
+ /* now for the top */
+ if ((vc4_state->src_y & 0xffff) && vc4_state->src_y < (state->fb->height << 16))
+ height++;
+
+ /* and the bottom */
+ if (((vc4_state->src_y + vc4_state->src_h[0]) & 0xffff) &&
+ vc4_state->src_y + vc4_state->src_h[0] < (state->fb->height << 16))
+ height++;
+
+ /* For YUV444 the hardware wants double the width, otherwise it doesn't
+ * fetch full width of chroma
+ */
+ if (format->drm == DRM_FORMAT_YUV444 || format->drm == DRM_FORMAT_YVU444)
+ width <<= 1;
+
/* Don't waste cycles mixing with plane alpha if the set alpha
* is opaque or there is no per-pixel alpha information.
* In any case we use the alpha property value as the fixed alpha.
@@ -1057,7 +1480,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
fb->format->has_alpha;
- if (!vc4->is_vc5) {
+ if (vc4->gen == VC4_GEN_4) {
/* Control word */
vc4_dlist_write(vc4_state,
SCALER_CTL0_VALID |
@@ -1092,10 +1515,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
vc4_dlist_write(vc4_state,
(mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) |
vc4_hvs4_get_alpha_blend_mode(state) |
- VC4_SET_FIELD(vc4_state->src_w[0],
- SCALER_POS2_WIDTH) |
- VC4_SET_FIELD(vc4_state->src_h[0],
- SCALER_POS2_HEIGHT));
+ VC4_SET_FIELD(width, SCALER_POS2_WIDTH) |
+ VC4_SET_FIELD(height, SCALER_POS2_HEIGHT));
/* Position Word 3: Context. Written by the HVS. */
vc4_dlist_write(vc4_state, 0xc0c0c0c0);
@@ -1148,10 +1569,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
/* Position Word 2: Source Image Size */
vc4_state->pos2_offset = vc4_state->dlist_count;
vc4_dlist_write(vc4_state,
- VC4_SET_FIELD(vc4_state->src_w[0],
- SCALER5_POS2_WIDTH) |
- VC4_SET_FIELD(vc4_state->src_h[0],
- SCALER5_POS2_HEIGHT));
+ VC4_SET_FIELD(width, SCALER5_POS2_WIDTH) |
+ VC4_SET_FIELD(height, SCALER5_POS2_HEIGHT));
/* Position Word 3: Context. Written by the HVS. */
vc4_dlist_write(vc4_state, 0xc0c0c0c0);
@@ -1162,9 +1581,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
*
* The pointers may be any byte address.
*/
- vc4_state->ptr0_offset = vc4_state->dlist_count;
- for (i = 0; i < num_planes; i++)
- vc4_dlist_write(vc4_state, vc4_state->offsets[i]);
+ vc4_state->ptr0_offset[0] = vc4_state->dlist_count;
+
+ for (i = 0; i < num_planes; i++) {
+ struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, i);
+
+ vc4_dlist_write(vc4_state, bo->dma_addr + fb->offsets[i] + offsets[i]);
+ }
/* Pointer Context Word 0/1/2: Written by the HVS */
for (i = 0; i < num_planes; i++)
@@ -1274,6 +1697,427 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
return 0;
}
+static u32 vc6_plane_get_csc_mode(struct vc4_plane_state *vc4_state)
+{
+ struct drm_plane_state *state = &vc4_state->base;
+ struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
+ u32 ret = 0;
+
+ if (vc4_state->is_yuv) {
+ enum drm_color_encoding color_encoding = state->color_encoding;
+ enum drm_color_range color_range = state->color_range;
+
+ /* CSC pre-loaded with:
+ * 0 = BT601 limited range
+ * 1 = BT709 limited range
+ * 2 = BT2020 limited range
+ * 3 = BT601 full range
+ * 4 = BT709 full range
+ * 5 = BT2020 full range
+ */
+ if (color_encoding > DRM_COLOR_YCBCR_BT2020)
+ color_encoding = DRM_COLOR_YCBCR_BT601;
+ if (color_range > DRM_COLOR_YCBCR_FULL_RANGE)
+ color_range = DRM_COLOR_YCBCR_LIMITED_RANGE;
+
+ if (vc4->gen == VC4_GEN_6_C) {
+ ret |= SCALER6C_CTL2_CSC_ENABLE;
+ ret |= VC4_SET_FIELD(color_encoding + (color_range * 3),
+ SCALER6C_CTL2_BRCM_CFC_CONTROL);
+ } else {
+ ret |= SCALER6D_CTL2_CSC_ENABLE;
+ ret |= VC4_SET_FIELD(color_encoding + (color_range * 3),
+ SCALER6D_CTL2_BRCM_CFC_CONTROL);
+ }
+ }
+
+ return ret;
+}
+
+static int vc6_plane_mode_set(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_device *drm = plane->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ struct drm_framebuffer *fb = state->fb;
+ const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
+ u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
+ int num_planes = fb->format->num_planes;
+ u32 h_subsample = fb->format->hsub;
+ u32 v_subsample = fb->format->vsub;
+ bool mix_plane_alpha;
+ bool covers_screen;
+ u32 scl0, scl1, pitch0;
+ u32 tiling, src_x, src_y;
+ u32 width, height;
+ u32 hvs_format = format->hvs;
+ u32 offsets[3] = { 0 };
+ unsigned int rotation;
+ int ret, i;
+
+ if (vc4_state->dlist_initialized)
+ return 0;
+
+ ret = vc4_plane_setup_clipping_and_scaling(state);
+ if (ret)
+ return ret;
+
+ if (!vc4_state->src_w[0] || !vc4_state->src_h[0] ||
+ !vc4_state->crtc_w || !vc4_state->crtc_h) {
+ /* 0 source size probably means the plane is offscreen.
+ * 0 destination size is a redundant plane.
+ */
+ vc4_state->dlist_initialized = 1;
+ return 0;
+ }
+
+ width = vc4_state->src_w[0] >> 16;
+ height = vc4_state->src_h[0] >> 16;
+
+ /* SCL1 is used for Cb/Cr scaling of planar formats. For RGB
+ * and 4:4:4, scl1 should be set to scl0 so both channels of
+ * the scaler do the same thing. For YUV, the Y plane needs
+ * to be put in channel 1 and Cb/Cr in channel 0, so we swap
+ * the scl fields here.
+ */
+ if (num_planes == 1) {
+ scl0 = vc4_get_scl_field(state, 0);
+ scl1 = scl0;
+ } else {
+ scl0 = vc4_get_scl_field(state, 1);
+ scl1 = vc4_get_scl_field(state, 0);
+ }
+
+ rotation = drm_rotation_simplify(state->rotation,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+
+ /* We must point to the last line when Y reflection is enabled. */
+ src_y = vc4_state->src_y >> 16;
+ if (rotation & DRM_MODE_REFLECT_Y)
+ src_y += height - 1;
+
+ src_x = vc4_state->src_x >> 16;
+
+ switch (base_format_mod) {
+ case DRM_FORMAT_MOD_LINEAR:
+ tiling = SCALER6_CTL0_ADDR_MODE_LINEAR;
+
+ /* Adjust the base pointer to the first pixel to be scanned
+ * out.
+ */
+ for (i = 0; i < num_planes; i++) {
+ offsets[i] += src_y / (i ? v_subsample : 1) * fb->pitches[i];
+ offsets[i] += src_x / (i ? h_subsample : 1) * fb->format->cpp[i];
+ }
+
+ break;
+
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ case DRM_FORMAT_MOD_BROADCOM_SAND256: {
+ uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
+ u32 components_per_word;
+ u32 starting_offset;
+ u32 fetch_count;
+
+ if (param > SCALER_TILE_HEIGHT_MASK) {
+ DRM_DEBUG_KMS("SAND height too large (%d)\n",
+ param);
+ return -EINVAL;
+ }
+
+ if (fb->format->format == DRM_FORMAT_P030) {
+ hvs_format = HVS_PIXEL_FORMAT_YCBCR_10BIT;
+ tiling = SCALER6_CTL0_ADDR_MODE_128B;
+ } else {
+ hvs_format = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE;
+
+ switch (base_format_mod) {
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ tiling = SCALER6_CTL0_ADDR_MODE_128B;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ tiling = SCALER6_CTL0_ADDR_MODE_256B;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Adjust the base pointer to the first pixel to be scanned
+ * out.
+ *
+ * For P030, y_ptr [31:4] is the 128bit word for the start pixel
+ * y_ptr [3:0] is the pixel (0-11) contained within that 128bit
+ * word that should be taken as the first pixel.
+ * Ditto uv_ptr [31:4] vs [3:0], however [3:0] contains the
+ * element within the 128bit word, eg for pixel 3 the value
+ * should be 6.
+ */
+ for (i = 0; i < num_planes; i++) {
+ u32 tile_w, tile, x_off, pix_per_tile;
+
+ if (fb->format->format == DRM_FORMAT_P030) {
+ /*
+ * Spec says: bits [31:4] of the given address
+ * should point to the 128-bit word containing
+ * the desired starting pixel, and bits[3:0]
+ * should be between 0 and 11, indicating which
+ * of the 12-pixels in that 128-bit word is the
+ * first pixel to be used
+ */
+ u32 remaining_pixels = src_x % 96;
+ u32 aligned = remaining_pixels / 12;
+ u32 last_bits = remaining_pixels % 12;
+
+ x_off = aligned * 16 + last_bits;
+ tile_w = 128;
+ pix_per_tile = 96;
+ } else {
+ switch (base_format_mod) {
+ case DRM_FORMAT_MOD_BROADCOM_SAND128:
+ tile_w = 128;
+ break;
+ case DRM_FORMAT_MOD_BROADCOM_SAND256:
+ tile_w = 256;
+ break;
+ default:
+ return -EINVAL;
+ }
+ pix_per_tile = tile_w / fb->format->cpp[0];
+ x_off = (src_x % pix_per_tile) /
+ (i ? h_subsample : 1) *
+ fb->format->cpp[i];
+ }
+
+ tile = src_x / pix_per_tile;
+
+ offsets[i] += param * tile_w * tile;
+ offsets[i] += src_y / (i ? v_subsample : 1) * tile_w;
+ offsets[i] += x_off & ~(i ? 1 : 0);
+ }
+
+ components_per_word = fb->format->format == DRM_FORMAT_P030 ? 24 : 32;
+ starting_offset = src_x % components_per_word;
+ fetch_count = (width + starting_offset + components_per_word - 1) /
+ components_per_word;
+
+ pitch0 = VC4_SET_FIELD(param, SCALER6_PTR2_PITCH) |
+ VC4_SET_FIELD(fetch_count - 1, SCALER6_PTR2_FETCH_COUNT);
+ break;
+ }
+
+ default:
+ DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
+ (long long)fb->modifier);
+ return -EINVAL;
+ }
+
+ /* fetch an extra pixel if we don't actually line up with the left edge. */
+ if ((vc4_state->src_x & 0xffff) && vc4_state->src_x < (state->fb->width << 16))
+ width++;
+
+ /* same for the right side */
+ if (((vc4_state->src_x + vc4_state->src_w[0]) & 0xffff) &&
+ vc4_state->src_x + vc4_state->src_w[0] < (state->fb->width << 16))
+ width++;
+
+ /* now for the top */
+ if ((vc4_state->src_y & 0xffff) && vc4_state->src_y < (state->fb->height << 16))
+ height++;
+
+ /* and the bottom */
+ if (((vc4_state->src_y + vc4_state->src_h[0]) & 0xffff) &&
+ vc4_state->src_y + vc4_state->src_h[0] < (state->fb->height << 16))
+ height++;
+
+ /* for YUV444 hardware wants double the width, otherwise it doesn't
+ * fetch full width of chroma
+ */
+ if (format->drm == DRM_FORMAT_YUV444 || format->drm == DRM_FORMAT_YVU444)
+ width <<= 1;
+
+ /* Don't waste cycles mixing with plane alpha if the set alpha
+ * is opaque or there is no per-pixel alpha information.
+ * In any case we use the alpha property value as the fixed alpha.
+ */
+ mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
+ fb->format->has_alpha;
+
+ /* Control Word 0: Scaling Configuration & Element Validity*/
+ vc4_dlist_write(vc4_state,
+ SCALER6_CTL0_VALID |
+ VC4_SET_FIELD(tiling, SCALER6_CTL0_ADDR_MODE) |
+ vc4_hvs6_get_alpha_mask_mode(state) |
+ (vc4_state->is_unity ? SCALER6_CTL0_UNITY : 0) |
+ VC4_SET_FIELD(format->pixel_order_hvs5, SCALER6_CTL0_ORDERRGBA) |
+ VC4_SET_FIELD(scl1, SCALER6_CTL0_SCL1_MODE) |
+ VC4_SET_FIELD(scl0, SCALER6_CTL0_SCL0_MODE) |
+ VC4_SET_FIELD(hvs_format, SCALER6_CTL0_PIXEL_FORMAT));
+
+ /* Position Word 0: Image Position */
+ vc4_state->pos0_offset = vc4_state->dlist_count;
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(vc4_state->crtc_y, SCALER6_POS0_START_Y) |
+ (rotation & DRM_MODE_REFLECT_X ? SCALER6_POS0_HFLIP : 0) |
+ VC4_SET_FIELD(vc4_state->crtc_x, SCALER6_POS0_START_X));
+
+ /* Control Word 2: Alpha Value & CSC */
+ vc4_dlist_write(vc4_state,
+ vc6_plane_get_csc_mode(vc4_state) |
+ vc4_hvs5_get_alpha_blend_mode(state) |
+ (mix_plane_alpha ? SCALER6_CTL2_ALPHA_MIX : 0) |
+ VC4_SET_FIELD(state->alpha >> 4, SCALER5_CTL2_ALPHA));
+
+ /* Position Word 1: Scaled Image Dimensions */
+ if (!vc4_state->is_unity)
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(vc4_state->crtc_h - 1,
+ SCALER6_POS1_SCL_LINES) |
+ VC4_SET_FIELD(vc4_state->crtc_w - 1,
+ SCALER6_POS1_SCL_WIDTH));
+
+ /* Position Word 2: Source Image Size */
+ vc4_state->pos2_offset = vc4_state->dlist_count;
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(height - 1,
+ SCALER6_POS2_SRC_LINES) |
+ VC4_SET_FIELD(width - 1,
+ SCALER6_POS2_SRC_WIDTH));
+
+ /* Position Word 3: Context */
+ vc4_dlist_write(vc4_state, 0xc0c0c0c0);
+
+ /*
+ * TODO: This only covers Raster Scan Order planes
+ */
+ for (i = 0; i < num_planes; i++) {
+ struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, i);
+ dma_addr_t paddr = bo->dma_addr + fb->offsets[i] + offsets[i];
+
+ /* Pointer Word 0 */
+ vc4_state->ptr0_offset[i] = vc4_state->dlist_count;
+ vc4_dlist_write(vc4_state,
+ (rotation & DRM_MODE_REFLECT_Y ? SCALER6_PTR0_VFLIP : 0) |
+ /*
+ * The UPM buffer will be allocated in
+ * vc6_plane_allocate_upm().
+ */
+ VC4_SET_FIELD(upper_32_bits(paddr) & 0xff,
+ SCALER6_PTR0_UPPER_ADDR));
+
+ /* Pointer Word 1 */
+ vc4_dlist_write(vc4_state, lower_32_bits(paddr));
+
+ /* Pointer Word 2 */
+ if (base_format_mod != DRM_FORMAT_MOD_BROADCOM_SAND128 &&
+ base_format_mod != DRM_FORMAT_MOD_BROADCOM_SAND256) {
+ vc4_dlist_write(vc4_state,
+ VC4_SET_FIELD(fb->pitches[i],
+ SCALER6_PTR2_PITCH));
+ } else {
+ vc4_dlist_write(vc4_state, pitch0);
+ }
+ }
+
+ /*
+ * Palette Word 0
+ * TODO: We're not using the palette mode
+ */
+
+ /*
+ * Trans Word 0
+ * TODO: It's only relevant if we set the trans_rgb bit in the
+ * control word 0, and we don't at the moment.
+ */
+
+ vc4_state->lbm_offset = 0;
+
+ if (!vc4_state->is_unity || fb->format->is_yuv) {
+ /*
+ * Reserve a slot for the LBM Base Address. The real value will
+ * be set when calling vc4_plane_allocate_lbm().
+ */
+ if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
+ vc4_state->lbm_offset = vc4_state->dlist_count;
+ vc4_dlist_counter_increment(vc4_state);
+ }
+
+ if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
+ if (num_planes > 1)
+ /*
+ * Emit Cb/Cr as channel 0 and Y as channel
+ * 1. This matches how we set up scl0/scl1
+ * above.
+ */
+ vc4_write_scaling_parameters(state, 1);
+
+ vc4_write_scaling_parameters(state, 0);
+ }
+
+ /*
+ * If any PPF setup was done, then all the kernel
+ * pointers get uploaded.
+ */
+ if (vc4_state->x_scaling[0] == VC4_SCALING_PPF ||
+ vc4_state->y_scaling[0] == VC4_SCALING_PPF ||
+ vc4_state->x_scaling[1] == VC4_SCALING_PPF ||
+ vc4_state->y_scaling[1] == VC4_SCALING_PPF) {
+ u32 kernel =
+ VC4_SET_FIELD(vc4->hvs->mitchell_netravali_filter.start,
+ SCALER_PPF_KERNEL_OFFSET);
+
+ /* HPPF plane 0 */
+ vc4_dlist_write(vc4_state, kernel);
+ /* VPPF plane 0 */
+ vc4_dlist_write(vc4_state, kernel);
+ /* HPPF plane 1 */
+ vc4_dlist_write(vc4_state, kernel);
+ /* VPPF plane 1 */
+ vc4_dlist_write(vc4_state, kernel);
+ }
+ }
+
+ vc4_dlist_write(vc4_state, SCALER6_CTL0_END);
+
+ vc4_state->dlist[0] |=
+ VC4_SET_FIELD(vc4_state->dlist_count, SCALER6_CTL0_NEXT);
+
+ /* crtc_* are already clipped coordinates. */
+ covers_screen = vc4_state->crtc_x == 0 && vc4_state->crtc_y == 0 &&
+ vc4_state->crtc_w == state->crtc->mode.hdisplay &&
+ vc4_state->crtc_h == state->crtc->mode.vdisplay;
+
+ /*
+ * Background fill might be necessary when the plane has per-pixel
+ * alpha content or a non-opaque plane alpha and could blend from the
+ * background or does not cover the entire screen.
+ */
+ vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen ||
+ state->alpha != DRM_BLEND_ALPHA_OPAQUE;
+
+ /*
+ * Flag the dlist as initialized to avoid checking it twice in case
+ * the async update check already called vc4_plane_mode_set() and
+ * decided to fallback to sync update because async update was not
+ * possible.
+ */
+ vc4_state->dlist_initialized = 1;
+
+ vc4_plane_calc_load(state);
+
+ drm_dbg_driver(drm, "[PLANE:%d:%s] Computed DLIST size: %u\n",
+ plane->base.id, plane->name, vc4_state->dlist_count);
+
+ return 0;
+}
+
/* If a modeset involves changing the setup of a plane, the atomic
* infrastructure will call this to validate a proposed plane setup.
* However, if a plane isn't getting updated, this (and the
@@ -1284,6 +2128,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
static int vc4_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
+ struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct vc4_plane_state *vc4_state = to_vc4_plane_state(new_plane_state);
@@ -1291,14 +2136,39 @@ static int vc4_plane_atomic_check(struct drm_plane *plane,
vc4_state->dlist_count = 0;
- if (!plane_enabled(new_plane_state))
+ if (!plane_enabled(new_plane_state)) {
+ struct drm_plane_state *old_plane_state =
+ drm_atomic_get_old_plane_state(state, plane);
+
+ if (vc4->gen >= VC4_GEN_6_C && old_plane_state &&
+ plane_enabled(old_plane_state)) {
+ vc6_plane_free_upm(new_plane_state);
+ }
+ return 0;
+ }
+
+ if (vc4->gen >= VC4_GEN_6_C)
+ ret = vc6_plane_mode_set(plane, new_plane_state);
+ else
+ ret = vc4_plane_mode_set(plane, new_plane_state);
+ if (ret)
+ return ret;
+
+ if (!vc4_state->src_w[0] || !vc4_state->src_h[0] ||
+ !vc4_state->crtc_w || !vc4_state->crtc_h)
return 0;
- ret = vc4_plane_mode_set(plane, new_plane_state);
+ ret = vc4_plane_allocate_lbm(new_plane_state);
if (ret)
return ret;
- return vc4_plane_allocate_lbm(new_plane_state);
+ if (vc4->gen >= VC4_GEN_6_C) {
+ ret = vc6_plane_allocate_upm(new_plane_state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static void vc4_plane_atomic_update(struct drm_plane *plane,
@@ -1346,7 +2216,8 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
- uint32_t addr;
+ struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
+ dma_addr_t dma_addr = bo->dma_addr + fb->offsets[0];
int idx;
if (!drm_dev_enter(plane->dev, &idx))
@@ -1356,19 +2227,38 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
* because this is only called on the primary plane.
*/
WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
- addr = bo->dma_addr + fb->offsets[0];
- /* Write the new address into the hardware immediately. The
- * scanout will start from this address as soon as the FIFO
- * needs to refill with pixels.
- */
- writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
+ if (vc4->gen == VC4_GEN_6_C) {
+ u32 value;
- /* Also update the CPU-side dlist copy, so that any later
- * atomic updates that don't do a new modeset on our plane
- * also use our updated address.
- */
- vc4_state->dlist[vc4_state->ptr0_offset] = addr;
+ value = vc4_state->dlist[vc4_state->ptr0_offset[0]] &
+ ~SCALER6_PTR0_UPPER_ADDR_MASK;
+ value |= VC4_SET_FIELD(upper_32_bits(dma_addr) & 0xff,
+ SCALER6_PTR0_UPPER_ADDR);
+
+ writel(value, &vc4_state->hw_dlist[vc4_state->ptr0_offset[0]]);
+ vc4_state->dlist[vc4_state->ptr0_offset[0]] = value;
+
+ value = lower_32_bits(dma_addr);
+ writel(value, &vc4_state->hw_dlist[vc4_state->ptr0_offset[0] + 1]);
+ vc4_state->dlist[vc4_state->ptr0_offset[0] + 1] = value;
+ } else {
+ u32 addr;
+
+ addr = (u32)dma_addr;
+
+ /* Write the new address into the hardware immediately. The
+ * scanout will start from this address as soon as the FIFO
+ * needs to refill with pixels.
+ */
+ writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset[0]]);
+
+ /* Also update the CPU-side dlist copy, so that any later
+ * atomic updates that don't do a new modeset on our plane
+ * also use our updated address.
+ */
+ vc4_state->dlist[vc4_state->ptr0_offset[0]] = addr;
+ }
drm_dev_exit(idx);
}
@@ -1423,8 +2313,6 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
sizeof(vc4_state->y_scaling));
vc4_state->is_unity = new_vc4_state->is_unity;
vc4_state->is_yuv = new_vc4_state->is_yuv;
- memcpy(vc4_state->offsets, new_vc4_state->offsets,
- sizeof(vc4_state->offsets));
vc4_state->needs_bg_fill = new_vc4_state->needs_bg_fill;
/* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */
@@ -1432,8 +2320,8 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
new_vc4_state->dlist[vc4_state->pos0_offset];
vc4_state->dlist[vc4_state->pos2_offset] =
new_vc4_state->dlist[vc4_state->pos2_offset];
- vc4_state->dlist[vc4_state->ptr0_offset] =
- new_vc4_state->dlist[vc4_state->ptr0_offset];
+ vc4_state->dlist[vc4_state->ptr0_offset[0]] =
+ new_vc4_state->dlist[vc4_state->ptr0_offset[0]];
/* Note that we can't just call vc4_plane_write_dlist()
* because that would smash the context data that the HVS is
@@ -1443,8 +2331,8 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
&vc4_state->hw_dlist[vc4_state->pos0_offset]);
writel(vc4_state->dlist[vc4_state->pos2_offset],
&vc4_state->hw_dlist[vc4_state->pos2_offset]);
- writel(vc4_state->dlist[vc4_state->ptr0_offset],
- &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
+ writel(vc4_state->dlist[vc4_state->ptr0_offset[0]],
+ &vc4_state->hw_dlist[vc4_state->ptr0_offset[0]]);
drm_dev_exit(idx);
}
@@ -1452,13 +2340,17 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
static int vc4_plane_atomic_async_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
+ struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct vc4_plane_state *old_vc4_state, *new_vc4_state;
int ret;
u32 i;
- ret = vc4_plane_mode_set(plane, new_plane_state);
+ if (vc4->gen <= VC4_GEN_5)
+ ret = vc4_plane_mode_set(plane, new_plane_state);
+ else
+ ret = vc6_plane_mode_set(plane, new_plane_state);
if (ret)
return ret;
@@ -1471,7 +2363,7 @@ static int vc4_plane_atomic_async_check(struct drm_plane *plane,
if (old_vc4_state->dlist_count != new_vc4_state->dlist_count ||
old_vc4_state->pos0_offset != new_vc4_state->pos0_offset ||
old_vc4_state->pos2_offset != new_vc4_state->pos2_offset ||
- old_vc4_state->ptr0_offset != new_vc4_state->ptr0_offset ||
+ old_vc4_state->ptr0_offset[0] != new_vc4_state->ptr0_offset[0] ||
vc4_lbm_size(plane->state) != vc4_lbm_size(new_plane_state))
return -EINVAL;
@@ -1481,7 +2373,7 @@ static int vc4_plane_atomic_async_check(struct drm_plane *plane,
for (i = 0; i < new_vc4_state->dlist_count; i++) {
if (i == new_vc4_state->pos0_offset ||
i == new_vc4_state->pos2_offset ||
- i == new_vc4_state->ptr0_offset ||
+ i == new_vc4_state->ptr0_offset[0] ||
(new_vc4_state->lbm_offset &&
i == new_vc4_state->lbm_offset))
continue;
@@ -1632,7 +2524,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
};
for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
- if (!hvs_formats[i].hvs5_only || vc4->is_vc5) {
+ if (!hvs_formats[i].hvs5_only || vc4->gen >= VC4_GEN_5) {
formats[num_formats] = hvs_formats[i].drm;
num_formats++;
}
@@ -1647,7 +2539,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
return ERR_CAST(vc4_plane);
plane = &vc4_plane->base;
- if (vc4->is_vc5)
+ if (vc4->gen >= VC4_GEN_5)
drm_plane_helper_add(plane, &vc5_plane_helper_funcs);
else
drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 8ac9515554f8..27158be19952 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -19,6 +19,20 @@
#define VC4_GET_FIELD(word, field) FIELD_GET(field##_MASK, word)
+#define VC6_SET_FIELD(value, field) \
+ ({ \
+ WARN_ON(!FIELD_FIT(hvs->vc4->gen == VC4_GEN_6_C ? \
+ SCALER6_ ## field ## _MASK : \
+ SCALER6D_ ## field ## _MASK, value));\
+ FIELD_PREP(hvs->vc4->gen == VC4_GEN_6_C ? \
+ SCALER6_ ## field ## _MASK : \
+ SCALER6D_ ## field ## _MASK, value); \
+ })
+
+#define VC6_GET_FIELD(word, field) FIELD_GET(hvs->vc4->gen == VC4_GEN_6_C ? \
+ SCALER6_ ## field ## _MASK : \
+ SCALER6D_ ## field ## _MASK, word)
+
#define V3D_IDENT0 0x00000
# define V3D_EXPECTED_IDENT0 \
((2 << 24) | \
@@ -155,6 +169,7 @@
# define PV_CONTROL_EN BIT(0)
#define PV_V_CONTROL 0x04
+# define PV_VCONTROL_ODD_TIMING BIT(29)
# define PV_VCONTROL_ODD_DELAY_MASK VC4_MASK(22, 6)
# define PV_VCONTROL_ODD_DELAY_SHIFT 6
# define PV_VCONTROL_ODD_FIRST BIT(5)
@@ -215,6 +230,11 @@
# define PV_MUX_CFG_RGB_PIXEL_MUX_MODE_SHIFT 2
# define PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP 8
+#define PV_PIPE_INIT_CTRL 0x94
+# define PV_PIPE_INIT_CTRL_PV_INIT_WIDTH_MASK VC4_MASK(11, 8)
+# define PV_PIPE_INIT_CTRL_PV_INIT_IDLE_MASK VC4_MASK(7, 4)
+# define PV_PIPE_INIT_CTRL_PV_INIT_EN BIT(0)
+
#define SCALER_CHANNELS_COUNT 3
#define SCALER_DISPCTRL 0x00000000
@@ -418,6 +438,10 @@
# define SCALER_DISPSTAT1_FRCNT0_SHIFT 18
# define SCALER_DISPSTAT1_FRCNT1_MASK VC4_MASK(17, 12)
# define SCALER_DISPSTAT1_FRCNT1_SHIFT 12
+# define SCALER5_DISPSTAT1_FRCNT0_MASK VC4_MASK(25, 20)
+# define SCALER5_DISPSTAT1_FRCNT0_SHIFT 20
+# define SCALER5_DISPSTAT1_FRCNT1_MASK VC4_MASK(19, 14)
+# define SCALER5_DISPSTAT1_FRCNT1_SHIFT 14
#define SCALER_DISPSTATX(x) (SCALER_DISPSTAT0 + \
(x) * (SCALER_DISPSTAT1 - \
@@ -436,6 +460,8 @@
#define SCALER_DISPSTAT2 0x00000068
# define SCALER_DISPSTAT2_FRCNT2_MASK VC4_MASK(17, 12)
# define SCALER_DISPSTAT2_FRCNT2_SHIFT 12
+# define SCALER5_DISPSTAT2_FRCNT2_MASK VC4_MASK(19, 14)
+# define SCALER5_DISPSTAT2_FRCNT2_SHIFT 14
#define SCALER_DISPBASE2 0x0000006c
#define SCALER_DISPALPHA2 0x00000070
@@ -514,6 +540,206 @@
#define SCALER5_DLIST_START 0x00004000
+#define SCALER6_VERSION 0x00000000
+# define SCALER6_VERSION_MASK VC4_MASK(7, 0)
+# define SCALER6_VERSION_C0 0x00000053
+# define SCALER6_VERSION_D0 0x00000054
+#define SCALER6_CXM_SIZE 0x00000004
+#define SCALER6_LBM_SIZE 0x00000008
+#define SCALER6_UBM_SIZE 0x0000000c
+#define SCALER6_COBA_SIZE 0x00000010
+#define SCALER6_COB_SIZE 0x00000014
+
+#define SCALER6_CONTROL 0x00000020
+# define SCALER6_CONTROL_HVS_EN BIT(31)
+# define SCALER6_CONTROL_PF_LINES_MASK VC4_MASK(22, 18)
+# define SCALER6_CONTROL_ABORT_ON_EMPTY BIT(16)
+# define SCALER6_CONTROL_DSP1_TARGET_MASK VC4_MASK(13, 12)
+# define SCALER6_CONTROL_MAX_REQS_MASK VC4_MASK(7, 4)
+
+#define SCALER6_FETCHER_STATUS 0x00000024
+#define SCALER6_FETCH_STATUS 0x00000028
+#define SCALER6_HANDLE_ERROR 0x0000002c
+
+#define SCALER6_DISP0_CTRL0 0x00000030
+#define SCALER6_DISPX_CTRL0(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_CTRL0 + ((x) * (SCALER6_DISP1_CTRL0 - SCALER6_DISP0_CTRL0))) : \
+ (SCALER6D_DISP0_CTRL0 + ((x) * (SCALER6D_DISP1_CTRL0 - SCALER6D_DISP0_CTRL0))))
+# define SCALER6_DISPX_CTRL0_ENB BIT(31)
+# define SCALER6_DISPX_CTRL0_RESET BIT(30)
+# define SCALER6_DISPX_CTRL0_FWIDTH_MASK VC4_MASK(28, 16)
+# define SCALER6_DISPX_CTRL0_ONESHOT BIT(15)
+# define SCALER6_DISPX_CTRL0_ONECTX_MASK VC4_MASK(14, 13)
+# define SCALER6_DISPX_CTRL0_LINES_MASK VC4_MASK(12, 0)
+
+#define SCALER6_DISP0_CTRL1 0x00000034
+#define SCALER6_DISPX_CTRL1(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_CTRL1 + ((x) * (SCALER6_DISP1_CTRL1 - SCALER6_DISP0_CTRL1))) : \
+ (SCALER6D_DISP0_CTRL1 + ((x) * (SCALER6D_DISP1_CTRL1 - SCALER6D_DISP0_CTRL1))))
+# define SCALER6_DISPX_CTRL1_BGENB BIT(8)
+# define SCALER6_DISPX_CTRL1_INTLACE BIT(0)
+
+#define SCALER6_DISP0_BGND 0x00000038
+#define SCALER6_DISPX_BGND(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_BGND + ((x) * (SCALER6_DISP1_BGND - SCALER6_DISP0_BGND))) : \
+ (SCALER6D_DISP0_BGND + ((x) * (SCALER6D_DISP1_BGND - SCALER6D_DISP0_BGND))))
+
+#define SCALER6_DISP0_LPTRS 0x0000003c
+#define SCALER6_DISPX_LPTRS(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_LPTRS + ((x) * (SCALER6_DISP1_LPTRS - SCALER6_DISP0_LPTRS))) : \
+ (SCALER6D_DISP0_LPTRS + ((x) * (SCALER6D_DISP1_LPTRS - SCALER6D_DISP0_LPTRS))))
+# define SCALER6_DISPX_LPTRS_HEADE_MASK VC4_MASK(11, 0)
+
+#define SCALER6_DISP0_COB 0x00000040
+#define SCALER6_DISPX_COB(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_COB + ((x) * (SCALER6_DISP1_COB - SCALER6_DISP0_COB))) : \
+ (SCALER6D_DISP0_COB + ((x) * (SCALER6D_DISP1_COB - SCALER6D_DISP0_COB))))
+# define SCALER6_DISPX_COB_TOP_MASK VC4_MASK(31, 16)
+# define SCALER6_DISPX_COB_BASE_MASK VC4_MASK(15, 0)
+
+#define SCALER6_DISP0_STATUS 0x00000044
+#define SCALER6_DISPX_STATUS(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_STATUS + ((x) * (SCALER6_DISP1_STATUS - SCALER6_DISP0_STATUS))) : \
+ (SCALER6D_DISP0_STATUS + ((x) * (SCALER6D_DISP1_STATUS - SCALER6D_DISP0_STATUS))))
+# define SCALER6_DISPX_STATUS_EMPTY BIT(22)
+# define SCALER6_DISPX_STATUS_FRCNT_MASK VC4_MASK(21, 16)
+# define SCALER6_DISPX_STATUS_OFIELD BIT(15)
+# define SCALER6_DISPX_STATUS_MODE_MASK VC4_MASK(14, 13)
+# define SCALER6_DISPX_STATUS_MODE_DISABLED 0
+# define SCALER6_DISPX_STATUS_MODE_INIT 1
+# define SCALER6_DISPX_STATUS_MODE_RUN 2
+# define SCALER6_DISPX_STATUS_MODE_EOF 3
+# define SCALER6_DISPX_STATUS_YLINE_MASK VC4_MASK(12, 0)
+
+#define SCALER6_DISP0_DL 0x00000048
+
+#define SCALER6_DISPX_DL(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? \
+ (SCALER6_DISP0_DL + ((x) * (SCALER6_DISP1_DL - SCALER6_DISP0_DL))) : \
+ (SCALER6D_DISP0_DL + ((x) * (SCALER6D_DISP1_DL - SCALER6D_DISP0_DL))))
+# define SCALER6_DISPX_DL_LACT_MASK VC4_MASK(11, 0)
+
+#define SCALER6_DISP0_RUN 0x0000004c
+#define SCALER6_DISP1_CTRL0 0x00000050
+#define SCALER6_DISP1_CTRL1 0x00000054
+#define SCALER6_DISP1_BGND 0x00000058
+#define SCALER6_DISP1_LPTRS 0x0000005c
+#define SCALER6_DISP1_COB 0x00000060
+#define SCALER6_DISP1_STATUS 0x00000064
+#define SCALER6_DISP1_DL 0x00000068
+#define SCALER6_DISP1_RUN 0x0000006c
+#define SCALER6_DISP2_CTRL0 0x00000070
+#define SCALER6_DISP2_CTRL1 0x00000074
+#define SCALER6_DISP2_BGND 0x00000078
+#define SCALER6_DISP2_LPTRS 0x0000007c
+#define SCALER6_DISP2_COB 0x00000080
+#define SCALER6_DISP2_STATUS 0x00000084
+#define SCALER6_DISP2_DL 0x00000088
+#define SCALER6_DISP2_RUN 0x0000008c
+#define SCALER6_EOLN 0x00000090
+#define SCALER6_DL_STATUS 0x00000094
+#define SCALER6_BFG_MISC 0x0000009c
+#define SCALER6_QOS0 0x000000a0
+#define SCALER6_PROF0 0x000000a4
+#define SCALER6_QOS1 0x000000a8
+#define SCALER6_PROF1 0x000000ac
+#define SCALER6_QOS2 0x000000b0
+#define SCALER6_PROF2 0x000000b4
+#define SCALER6_PRI_MAP0 0x000000b8
+#define SCALER6_PRI_MAP1 0x000000bc
+#define SCALER6_HISTCTRL 0x000000c0
+#define SCALER6_HISTBIN0 0x000000c4
+#define SCALER6_HISTBIN1 0x000000c8
+#define SCALER6_HISTBIN2 0x000000cc
+#define SCALER6_HISTBIN3 0x000000d0
+#define SCALER6_HISTBIN4 0x000000d4
+#define SCALER6_HISTBIN5 0x000000d8
+#define SCALER6_HISTBIN6 0x000000dc
+#define SCALER6_HISTBIN7 0x000000e0
+#define SCALER6_HDR_CFG_REMAP 0x000000f4
+#define SCALER6_COL_SPACE 0x000000f8
+#define SCALER6_HVS_ID 0x000000fc
+#define SCALER6_CFC1 0x00000100
+#define SCALER6_DISP_UPM_ISO0 0x00000200
+#define SCALER6_DISP_UPM_ISO1 0x00000204
+#define SCALER6_DISP_UPM_ISO2 0x00000208
+#define SCALER6_DISP_LBM_ISO0 0x0000020c
+#define SCALER6_DISP_LBM_ISO1 0x00000210
+#define SCALER6_DISP_LBM_ISO2 0x00000214
+#define SCALER6_DISP_COB_ISO0 0x00000218
+#define SCALER6_DISP_COB_ISO1 0x0000021c
+#define SCALER6_DISP_COB_ISO2 0x00000220
+#define SCALER6_BAD_COB 0x00000224
+#define SCALER6_BAD_LBM 0x00000228
+#define SCALER6_BAD_UPM 0x0000022c
+#define SCALER6_BAD_AXI 0x00000230
+
+#define SCALER6D_VERSION 0x00000000
+#define SCALER6D_CXM_SIZE 0x00000004
+#define SCALER6D_LBM_SIZE 0x00000008
+#define SCALER6D_UBM_SIZE 0x0000000c
+#define SCALER6D_COBA_SIZE 0x00000010
+#define SCALER6D_COB_SIZE 0x00000014
+#define SCALER6D_CONTROL 0x00000020
+#define SCALER6D_FETCHER_STATUS 0x00000024
+#define SCALER6D_FETCH_STATUS 0x00000028
+#define SCALER6D_HANDLE_ERROR 0x0000002c
+#define SCALER6D_EOLN 0x00000030
+#define SCALER6D_DL_STATUS 0x00000034
+#define SCALER6D_PRI_MAP0 0x00000038
+#define SCALER6D_PRI_MAP1 0x0000003c
+#define SCALER6D_HISTCTRL 0x000000d0
+#define SCALER6D_HISTBIN0 0x000000d4
+#define SCALER6D_HISTBIN1 0x000000d8
+#define SCALER6D_HISTBIN2 0x000000dc
+#define SCALER6D_HISTBIN3 0x000000e0
+#define SCALER6D_HISTBIN4 0x000000e4
+#define SCALER6D_HISTBIN5 0x000000e8
+#define SCALER6D_HISTBIN6 0x000000ec
+#define SCALER6D_HISTBIN7 0x000000f0
+#define SCALER6D_HVS_ID 0x000000fc
+
+#define SCALER6D_DISP0_CTRL0 0x00000100
+#define SCALER6D_DISP0_CTRL1 0x00000104
+#define SCALER6D_DISP0_BGND 0x00000108
+#define SCALER6D_DISP0_LPTRS 0x00000110
+#define SCALER6D_DISP0_COB 0x00000114
+#define SCALER6D_DISP0_STATUS 0x00000118
+#define SCALER6D_DISP0_CTRL0 0x00000100
+#define SCALER6D_DISP0_CTRL1 0x00000104
+#define SCALER6D_DISP0_BGND0 0x00000108
+#define SCALER6D_DISP0_BGND1 0x0000010c
+#define SCALER6D_DISP0_LPTRS 0x00000110
+#define SCALER6D_DISP0_COB 0x00000114
+#define SCALER6D_DISP0_STATUS 0x00000118
+#define SCALER6D_DISP0_DL 0x0000011c
+#define SCALER6D_DISP0_RUN 0x00000120
+#define SCALER6D_QOS0 0x00000124
+#define SCALER6D_PROF0 0x00000128
+#define SCALER6D_DISP1_CTRL0 0x00000140
+#define SCALER6D_DISP1_CTRL1 0x00000144
+#define SCALER6D_DISP1_BGND0 0x00000148
+#define SCALER6D_DISP1_BGND1 0x0000014c
+#define SCALER6D_DISP1_LPTRS 0x00000150
+#define SCALER6D_DISP1_COB 0x00000154
+#define SCALER6D_DISP1_STATUS 0x00000158
+#define SCALER6D_DISP1_DL 0x0000015c
+#define SCALER6D_DISP1_RUN 0x00000160
+#define SCALER6D_QOS1 0x00000164
+#define SCALER6D_PROF1 0x00000168
+#define SCALER6D_DISP2_CTRL0 0x00000180
+#define SCALER6D_DISP2_CTRL1 0x00000184
+#define SCALER6D_DISP2_BGND0 0x00000188
+#define SCALER6D_DISP2_BGND1 0x0000018c
+#define SCALER6D_DISP2_LPTRS 0x00000190
+#define SCALER6D_DISP2_COB 0x00000194
+#define SCALER6D_DISP2_STATUS 0x00000198
+#define SCALER6D_DISP2_DL 0x0000019c
+#define SCALER6D_DISP2_RUN 0x000001a0
+#define SCALER6D_QOS2 0x000001a4
+#define SCALER6D_PROF2 0x000001a8
+
+#define SCALER6(x) ((hvs->vc4->gen == VC4_GEN_6_C) ? SCALER6_ ## x : SCALER6D_ ## x)
+
# define VC4_HDMI_SW_RESET_FORMAT_DETECT BIT(1)
# define VC4_HDMI_SW_RESET_HDMI BIT(0)
@@ -761,6 +987,15 @@ enum {
# define VC4_HD_MAI_THR_DREQLOW_MASK VC4_MASK(5, 0)
# define VC4_HD_MAI_THR_DREQLOW_SHIFT 0
+# define VC6_D_HD_MAI_THR_PANICHIGH_MASK VC4_MASK(29, 23)
+# define VC6_D_HD_MAI_THR_PANICHIGH_SHIFT 23
+# define VC6_D_HD_MAI_THR_PANICLOW_MASK VC4_MASK(21, 15)
+# define VC6_D_HD_MAI_THR_PANICLOW_SHIFT 15
+# define VC6_D_HD_MAI_THR_DREQHIGH_MASK VC4_MASK(13, 7)
+# define VC6_D_HD_MAI_THR_DREQHIGH_SHIFT 7
+# define VC6_D_HD_MAI_THR_DREQLOW_MASK VC4_MASK(6, 0)
+# define VC6_D_HD_MAI_THR_DREQLOW_SHIFT 0
+
/* Divider from HDMI HSM clock to MAI serial clock. Sampling period
* converges to N / (M + 1) cycles.
*/
@@ -777,6 +1012,7 @@ enum {
# define VC4_HD_VID_CTL_CLRSYNC BIT(24)
# define VC4_HD_VID_CTL_CLRRGB BIT(23)
# define VC4_HD_VID_CTL_BLANKPIX BIT(18)
+# define VC4_HD_VID_CTL_BLANK_INSERT_EN BIT(16)
# define VC4_HD_CSC_CTL_ORDER_MASK VC4_MASK(7, 5)
# define VC4_HD_CSC_CTL_ORDER_SHIFT 5
@@ -967,6 +1203,9 @@ enum hvs_pixel_format {
#define SCALER5_CTL2_ALPHA_MASK VC4_MASK(15, 4)
#define SCALER5_CTL2_ALPHA_SHIFT 4
+#define SCALER6D_CTL2_CSC_ENABLE BIT(19)
+#define SCALER6D_CTL2_BRCM_CFC_CONTROL_MASK VC4_MASK(22, 20)
+
#define SCALER_POS1_SCL_HEIGHT_MASK VC4_MASK(27, 16)
#define SCALER_POS1_SCL_HEIGHT_SHIFT 16
@@ -1108,4 +1347,63 @@ enum hvs_pixel_format {
#define SCALER_PITCH0_TILE_WIDTH_R_MASK VC4_MASK(6, 0)
#define SCALER_PITCH0_TILE_WIDTH_R_SHIFT 0
+#define SCALER6_CTL0_END BIT(31)
+#define SCALER6_CTL0_VALID BIT(30)
+#define SCALER6_CTL0_NEXT_MASK VC4_MASK(29, 24)
+#define SCALER6_CTL0_RGB_TRANS BIT(23)
+#define SCALER6_CTL0_ADDR_MODE_MASK VC4_MASK(22, 20)
+#define SCALER6_CTL0_ADDR_MODE_LINEAR 0
+#define SCALER6_CTL0_ADDR_MODE_128B 1
+#define SCALER6_CTL0_ADDR_MODE_256B 2
+#define SCALER6_CTL0_ADDR_MODE_MAP8 3
+#define SCALER6_CTL0_ADDR_MODE_UIF 4
+
+#define SCALER6_CTL0_ALPHA_MASK_MASK VC4_MASK(19, 18)
+#define SCALER6_CTL0_ALPHA_MASK_NONE 0
+#define SCALER6D_CTL0_ALPHA_MASK_FIXED 3
+#define SCALER6_CTL0_UNITY BIT(15)
+#define SCALER6_CTL0_ORDERRGBA_MASK VC4_MASK(14, 13)
+#define SCALER6_CTL0_SCL1_MODE_MASK VC4_MASK(10, 8)
+#define SCALER6_CTL0_SCL0_MODE_MASK VC4_MASK(7, 5)
+#define SCALER6_CTL0_PIXEL_FORMAT_MASK VC4_MASK(4, 0)
+
+#define SCALER6_POS0_START_Y_MASK VC4_MASK(28, 16)
+#define SCALER6_POS0_HFLIP BIT(15)
+#define SCALER6_POS0_START_X_MASK VC4_MASK(12, 0)
+
+#define SCALER6_CTL2_ALPHA_MODE_MASK VC4_MASK(31, 30)
+#define SCALER6_CTL2_ALPHA_PREMULT BIT(29)
+#define SCALER6_CTL2_ALPHA_MIX BIT(28)
+#define SCALER6_CTL2_BFG BIT(26)
+#define SCALER6C_CTL2_CSC_ENABLE BIT(25)
+#define SCALER6C_CTL2_BRCM_CFC_CONTROL_MASK VC4_MASK(18, 16)
+#define SCALER6_CTL2_ALPHA_MASK VC4_MASK(15, 4)
+
+#define SCALER6_POS1_SCL_LINES_MASK VC4_MASK(28, 16)
+#define SCALER6_POS1_SCL_WIDTH_MASK VC4_MASK(12, 0)
+
+#define SCALER6_POS2_SRC_LINES_MASK VC4_MASK(28, 16)
+#define SCALER6_POS2_SRC_WIDTH_MASK VC4_MASK(12, 0)
+
+#define SCALER6_PTR0_VFLIP BIT(31)
+#define SCALER6_PTR0_UPM_BASE_MASK VC4_MASK(28, 16)
+#define SCALER6_PTR0_UPM_HANDLE_MASK VC4_MASK(14, 10)
+#define SCALER6_PTR0_UPM_BUFF_SIZE_MASK VC4_MASK(9, 8)
+#define SCALER6_PTR0_UPM_BUFF_SIZE_16_LINES 3
+#define SCALER6_PTR0_UPM_BUFF_SIZE_8_LINES 2
+#define SCALER6_PTR0_UPM_BUFF_SIZE_4_LINES 1
+#define SCALER6_PTR0_UPM_BUFF_SIZE_2_LINES 0
+#define SCALER6_PTR0_UPPER_ADDR_MASK VC4_MASK(7, 0)
+
+#define SCALER6_PTR2_ALPHA_BPP_MASK VC4_MASK(31, 31)
+#define SCALER6_PTR2_ALPHA_BPP_1BPP 1
+#define SCALER6_PTR2_ALPHA_BPP_8BPP 0
+#define SCALER6_PTR2_ALPHA_ORDER_MASK VC4_MASK(30, 30)
+#define SCALER6_PTR2_ALPHA_ORDER_MSB_TO_LSB 1
+#define SCALER6_PTR2_ALPHA_ORDER_LSB_TO_MSB 0
+#define SCALER6_PTR2_ALPHA_OFFS_MASK VC4_MASK(29, 27)
+#define SCALER6_PTR2_LSKIP_MASK VC4_MASK(26, 24)
+#define SCALER6_PTR2_PITCH_MASK VC4_MASK(16, 0)
+#define SCALER6_PTR2_FETCH_COUNT_MASK VC4_MASK(26, 16)
+
#endif /* VC4_REGS_H */
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 1bda5010f15a..14079853338e 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -599,7 +599,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
bool has_bin = args->bin_cl_size != 0;
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
if (args->min_x_tile > args->max_x_tile ||
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index ffe1f7d1b911..4eab069cda75 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -145,6 +145,9 @@
/* Number of lines received and committed to memory. */
#define TXP_PROGRESS 0x10
+#define TXP_DST_PTR_HIGH_MOPLET 0x1c
+#define TXP_DST_PTR_HIGH_MOP 0x24
+
#define TXP_READ(offset) \
({ \
kunit_fail_current_test("Accessing a register in a unit test!\n"); \
@@ -159,6 +162,7 @@
struct vc4_txp {
struct vc4_crtc base;
+ const struct vc4_txp_data *data;
struct platform_device *pdev;
@@ -286,9 +290,13 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(state,
conn);
struct vc4_txp *txp = connector_to_vc4_txp(conn);
+ const struct vc4_txp_data *txp_data = txp->data;
struct drm_gem_dma_object *gem;
struct drm_display_mode *mode;
struct drm_framebuffer *fb;
+ unsigned int hdisplay;
+ unsigned int vdisplay;
+ dma_addr_t addr;
u32 ctrl;
int idx;
int i;
@@ -308,9 +316,11 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
return;
ctrl = TXP_GO | TXP_EI |
- VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE) |
VC4_SET_FIELD(txp_fmts[i], TXP_FORMAT);
+ if (txp_data->has_byte_enable)
+ ctrl |= VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE);
+
if (fb->format->has_alpha)
ctrl |= TXP_ALPHA_ENABLE;
else
@@ -324,11 +334,25 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
return;
gem = drm_fb_dma_get_gem_obj(fb, 0);
- TXP_WRITE(TXP_DST_PTR, gem->dma_addr + fb->offsets[0]);
+ addr = gem->dma_addr + fb->offsets[0];
+
+ TXP_WRITE(TXP_DST_PTR, lower_32_bits(addr));
+
+ if (txp_data->supports_40bit_addresses)
+ TXP_WRITE(txp_data->high_addr_ptr_reg, upper_32_bits(addr) & 0xff);
+
TXP_WRITE(TXP_DST_PITCH, fb->pitches[0]);
+
+ hdisplay = mode->hdisplay ?: 1;
+ vdisplay = mode->vdisplay ?: 1;
+ if (txp_data->size_minus_one) {
+ hdisplay -= 1;
+ vdisplay -= 1;
+ }
+
TXP_WRITE(TXP_DIM,
- VC4_SET_FIELD(mode->hdisplay, TXP_WIDTH) |
- VC4_SET_FIELD(mode->vdisplay, TXP_HEIGHT));
+ VC4_SET_FIELD(hdisplay, TXP_WIDTH) |
+ VC4_SET_FIELD(vdisplay, TXP_HEIGHT));
TXP_WRITE(TXP_DST_CTRL, ctrl);
@@ -362,6 +386,7 @@ static const struct drm_connector_funcs vc4_txp_connector_funcs = {
static void vc4_txp_encoder_disable(struct drm_encoder *encoder)
{
struct drm_device *drm = encoder->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_txp *txp = encoder_to_vc4_txp(encoder);
int idx;
@@ -380,7 +405,8 @@ static void vc4_txp_encoder_disable(struct drm_encoder *encoder)
WARN_ON(TXP_READ(TXP_DST_CTRL) & TXP_BUSY);
}
- TXP_WRITE(TXP_DST_CTRL, TXP_POWERDOWN);
+ if (vc4->gen < VC4_GEN_6_C)
+ TXP_WRITE(TXP_DST_CTRL, TXP_POWERDOWN);
drm_dev_exit(idx);
}
@@ -484,17 +510,49 @@ static irqreturn_t vc4_txp_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-const struct vc4_crtc_data vc4_txp_crtc_data = {
- .name = "txp",
- .debugfs_name = "txp_regs",
- .hvs_available_channels = BIT(2),
- .hvs_output = 2,
+static const struct vc4_txp_data bcm2712_mop_data = {
+ .base = {
+ .name = "mop",
+ .debugfs_name = "mop_regs",
+ .hvs_available_channels = BIT(2),
+ .hvs_output = 2,
+ },
+ .encoder_type = VC4_ENCODER_TYPE_TXP0,
+ .high_addr_ptr_reg = TXP_DST_PTR_HIGH_MOP,
+ .has_byte_enable = true,
+ .size_minus_one = true,
+ .supports_40bit_addresses = true,
+};
+
+static const struct vc4_txp_data bcm2712_moplet_data = {
+ .base = {
+ .name = "moplet",
+ .debugfs_name = "moplet_regs",
+ .hvs_available_channels = BIT(1),
+ .hvs_output = 4,
+ },
+ .encoder_type = VC4_ENCODER_TYPE_TXP1,
+ .high_addr_ptr_reg = TXP_DST_PTR_HIGH_MOPLET,
+ .size_minus_one = true,
+ .supports_40bit_addresses = true,
+};
+
+const struct vc4_txp_data bcm2835_txp_data = {
+ .base = {
+ .name = "txp",
+ .debugfs_name = "txp_regs",
+ .hvs_available_channels = BIT(2),
+ .hvs_output = 2,
+ },
+ .encoder_type = VC4_ENCODER_TYPE_TXP0,
+ .has_byte_enable = true,
};
static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
+ const struct vc4_txp_data *txp_data;
struct vc4_encoder *vc4_encoder;
struct drm_encoder *encoder;
struct vc4_crtc *vc4_crtc;
@@ -509,6 +567,11 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
if (!txp)
return -ENOMEM;
+ txp_data = of_device_get_match_data(dev);
+ if (!txp_data)
+ return -ENODEV;
+
+ txp->data = txp_data;
txp->pdev = pdev;
txp->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(txp->regs))
@@ -519,13 +582,13 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
vc4_crtc->regset.regs = txp_regs;
vc4_crtc->regset.nregs = ARRAY_SIZE(txp_regs);
- ret = vc4_crtc_init(drm, pdev, vc4_crtc, &vc4_txp_crtc_data,
+ ret = vc4_crtc_init(drm, pdev, vc4_crtc, &txp_data->base,
&vc4_txp_crtc_funcs, &vc4_txp_crtc_helper_funcs, true);
if (ret)
return ret;
vc4_encoder = &txp->encoder;
- txp->encoder.type = VC4_ENCODER_TYPE_TXP;
+ txp->encoder.type = txp_data->encoder_type;
encoder = &vc4_encoder->base;
encoder->possible_crtcs = drm_crtc_mask(&vc4_crtc->base);
@@ -579,13 +642,15 @@ static void vc4_txp_remove(struct platform_device *pdev)
}
static const struct of_device_id vc4_txp_dt_match[] = {
- { .compatible = "brcm,bcm2835-txp" },
+ { .compatible = "brcm,bcm2712-mop", .data = &bcm2712_mop_data },
+ { .compatible = "brcm,bcm2712-moplet", .data = &bcm2712_moplet_data },
+ { .compatible = "brcm,bcm2835-txp", .data = &bcm2835_txp_data },
{ /* sentinel */ },
};
struct platform_driver vc4_txp_driver = {
.probe = vc4_txp_probe,
- .remove_new = vc4_txp_remove,
+ .remove = vc4_txp_remove,
.driver = {
.name = "vc4_txp",
.of_match_table = vc4_txp_dt_match,
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index bf5c4e36c94e..bb09df5000bd 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -127,7 +127,7 @@ static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
int
vc4_v3d_pm_get(struct vc4_dev *vc4)
{
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
mutex_lock(&vc4->power_lock);
@@ -148,7 +148,7 @@ vc4_v3d_pm_get(struct vc4_dev *vc4)
void
vc4_v3d_pm_put(struct vc4_dev *vc4)
{
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
mutex_lock(&vc4->power_lock);
@@ -178,7 +178,7 @@ int vc4_v3d_get_bin_slot(struct vc4_dev *vc4)
uint64_t seqno = 0;
struct vc4_exec_info *exec;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
try_again:
@@ -325,7 +325,7 @@ int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used)
{
int ret = 0;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
mutex_lock(&vc4->bin_bo_lock);
@@ -360,7 +360,7 @@ static void bin_bo_release(struct kref *ref)
void vc4_v3d_bin_bo_put(struct vc4_dev *vc4)
{
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return;
mutex_lock(&vc4->bin_bo_lock);
@@ -534,7 +534,7 @@ const struct of_device_id vc4_v3d_dt_match[] = {
struct platform_driver vc4_v3d_driver = {
.probe = vc4_v3d_dev_probe,
- .remove_new = vc4_v3d_dev_remove,
+ .remove = vc4_v3d_dev_remove,
.driver = {
.name = "vc4_v3d",
.of_match_table = vc4_v3d_dt_match,
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 0c17284bf6f5..5bf134968ade 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -109,7 +109,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
struct drm_gem_dma_object *obj;
struct vc4_bo *bo;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return NULL;
if (hindex >= exec->bo_count) {
@@ -169,7 +169,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_dma_object *fbo,
uint32_t utile_w = utile_width(cpp);
uint32_t utile_h = utile_height(cpp);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return false;
/* The shaded vertex format stores signed 12.4 fixed point
@@ -495,7 +495,7 @@ vc4_validate_bin_cl(struct drm_device *dev,
uint32_t dst_offset = 0;
uint32_t src_offset = 0;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
while (src_offset < len) {
@@ -942,7 +942,7 @@ vc4_validate_shader_recs(struct drm_device *dev,
uint32_t i;
int ret = 0;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return -ENODEV;
for (i = 0; i < exec->shader_state_count; i++) {
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 9745f8810eca..2d74e786914c 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -786,7 +786,7 @@ vc4_validate_shader(struct drm_gem_dma_object *shader_obj)
struct vc4_validated_shader_info *validated_shader = NULL;
struct vc4_shader_validation_state validation_state;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
return NULL;
memset(&validation_state, 0, sizeof(validation_state));
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index eb64e881051e..06d702e879b0 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -848,7 +848,7 @@ static void vc4_vec_dev_remove(struct platform_device *pdev)
struct platform_driver vc4_vec_driver = {
.probe = vc4_vec_dev_probe,
- .remove_new = vc4_vec_dev_remove,
+ .remove = vc4_vec_dev_remove,
.driver = {
.name = "vc4_vec",
.of_match_table = vc4_vec_dt_match,
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index c5e3e5457737..2752ab4f1c97 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -47,7 +47,6 @@
#define DRIVER_NAME "vgem"
#define DRIVER_DESC "Virtual GEM provider"
-#define DRIVER_DATE "20120112"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -121,7 +120,6 @@ static const struct drm_driver vgem_driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index ea06ff2aa4b4..fc884fb57b7e 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -3,6 +3,7 @@ config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
depends on DRM && VIRTIO_MENU && MMU
select VIRTIO
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
select VIRTIO_DMA_SHARED_BUFFER
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index e5a2665e50ea..6a67c6297d58 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -26,13 +26,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/wait.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm.h>
-#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
@@ -58,7 +59,7 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev)
vga ? "virtio-vga" : "virtio-gpu-pci",
pname);
if (vga) {
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
+ ret = aperture_remove_conflicting_pci_devices(pdev, driver.name);
if (ret)
return ret;
}
@@ -103,7 +104,8 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
if (ret)
goto err_deinit;
- drm_fbdev_shmem_setup(vdev->priv, 32);
+ drm_client_setup(vdev->priv, NULL);
+
return 0;
err_deinit:
@@ -182,7 +184,8 @@ static const struct drm_driver driver = {
.postclose = virtio_gpu_driver_postclose,
.dumb_create = virtio_gpu_mode_dumb_create,
- .dumb_map_offset = virtio_gpu_mode_dumb_mmap,
+
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = virtio_gpu_debugfs_init,
@@ -198,7 +201,6 @@ static const struct drm_driver driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 64c236169db8..f42ca9d8ed10 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -45,7 +45,6 @@
#define DRIVER_NAME "virtio_gpu"
#define DRIVER_DESC "virtio GPU"
-#define DRIVER_DATE "0"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 1
@@ -89,9 +88,11 @@ struct virtio_gpu_object_params {
struct virtio_gpu_object {
struct drm_gem_shmem_object base;
+ struct sg_table *sgt;
uint32_t hw_res_handle;
bool dumb;
bool created;
+ bool attached;
bool host3d_blob, guest_blob;
uint32_t blob_mem, blob_flags;
@@ -194,6 +195,13 @@ struct virtio_gpu_framebuffer {
#define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
+struct virtio_gpu_plane_state {
+ struct drm_plane_state base;
+ struct virtio_gpu_fence *fence;
+};
+#define to_virtio_gpu_plane_state(x) \
+ container_of(x, struct virtio_gpu_plane_state, base)
+
struct virtio_gpu_queue {
struct virtqueue *vq;
spinlock_t qlock;
@@ -301,9 +309,6 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p);
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
struct virtio_gpu_object_array*
@@ -349,6 +354,10 @@ void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
struct virtio_gpu_mem_entry *ents,
unsigned int nents);
+void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ struct virtio_gpu_fence *fence);
+int virtio_gpu_detach_object_fenced(struct virtio_gpu_object *bo);
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
struct virtio_gpu_output *output);
int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev);
@@ -468,6 +477,10 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
+int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
+ unsigned int *nents,
+ struct virtio_gpu_object *bo,
+ struct dma_buf_attachment *attach);
/* virtgpu_debugfs.c */
void virtio_gpu_debugfs_init(struct drm_minor *minor);
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 7db48d17ee3a..5aab588fc400 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -99,21 +99,6 @@ fail:
return ret;
}
-int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p)
-{
- struct drm_gem_object *gobj;
-
- BUG_ON(!offset_p);
- gobj = drm_gem_object_lookup(file_priv, handle);
- if (gobj == NULL)
- return -ENOENT;
- *offset_p = drm_vma_node_offset_addr(&gobj->vma_node);
- drm_gem_object_put(gobj);
- return 0;
-}
-
int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file)
{
@@ -127,15 +112,17 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
/* the context might still be missing when the first ioctl is
* DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE
*/
- virtio_gpu_create_context(obj->dev, file);
+ if (!vgdev->has_context_init)
+ virtio_gpu_create_context(obj->dev, file);
objs = virtio_gpu_array_alloc(1);
if (!objs)
return -ENOMEM;
virtio_gpu_array_add_obj(objs, obj);
- virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
- objs);
+ if (vfpriv->ctx_id)
+ virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, objs);
+
out_notify:
virtio_gpu_notify(vgdev);
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index e4f76f315550..c33c057365f8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -80,9 +80,9 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_map *virtio_gpu_map = data;
- return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
- virtio_gpu_map->handle,
- &virtio_gpu_map->offset);
+ return drm_gem_dumb_map_offset(file, vgdev->ddev,
+ virtio_gpu_map->handle,
+ &virtio_gpu_map->offset);
}
static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index c7e74cf13022..5517cff8715c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -80,6 +80,9 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
drm_gem_free_mmap_offset(&vram->base.base.base);
drm_gem_object_release(&vram->base.base.base);
kfree(vram);
+ } else {
+ drm_gem_object_release(&bo->base.base);
+ kfree(bo);
}
}
@@ -97,6 +100,27 @@ static void virtio_gpu_free_object(struct drm_gem_object *obj)
virtio_gpu_cleanup_object(bo);
}
+int virtio_gpu_detach_object_fenced(struct virtio_gpu_object *bo)
+{
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ struct virtio_gpu_fence *fence;
+
+ if (!bo->attached)
+ return 0;
+
+ fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
+ if (!fence)
+ return -ENOMEM;
+
+ virtio_gpu_object_detach(vgdev, bo, fence);
+ virtio_gpu_notify(vgdev);
+
+ dma_fence_wait(&fence->f, false);
+ dma_fence_put(&fence->f);
+
+ return 0;
+}
+
static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
.free = virtio_gpu_free_object,
.open = virtio_gpu_gem_object_open,
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a72a2dbda031..42aa554eca9f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -26,6 +26,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <linux/virtio_dma_buf.h>
#include "virtgpu_drv.h"
@@ -66,11 +68,28 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
return format;
}
+static struct
+drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct virtio_gpu_plane_state *new;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &new->base);
+
+ return &new->base;
+}
+
static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_duplicate_state = virtio_gpu_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
@@ -138,11 +157,13 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane,
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo;
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ vgplane_st = to_virtio_gpu_plane_state(plane->state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
- if (vgfb->fence) {
+ if (vgplane_st->fence) {
struct virtio_gpu_object_array *objs;
objs = virtio_gpu_array_alloc(1);
@@ -151,13 +172,11 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane,
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_array_lock_resv(objs);
virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
- width, height, objs, vgfb->fence);
+ width, height, objs,
+ vgplane_st->fence);
virtio_gpu_notify(vgdev);
-
- dma_fence_wait_timeout(&vgfb->fence->f, true,
+ dma_fence_wait_timeout(&vgplane_st->fence->f, true,
msecs_to_jiffies(50));
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
} else {
virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
width, height, NULL, NULL);
@@ -241,45 +260,113 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
rect.y2 - rect.y1);
}
+static int virtio_gpu_prepare_imported_obj(struct drm_plane *plane,
+ struct drm_plane_state *new_state,
+ struct drm_gem_object *obj)
+{
+ struct virtio_gpu_device *vgdev = plane->dev->dev_private;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct dma_buf_attachment *attach = obj->import_attach;
+ struct dma_resv *resv = attach->dmabuf->resv;
+ struct virtio_gpu_mem_entry *ents = NULL;
+ unsigned int nents;
+ int ret;
+
+ dma_resv_lock(resv, NULL);
+
+ ret = dma_buf_pin(attach);
+ if (ret) {
+ dma_resv_unlock(resv);
+ return ret;
+ }
+
+ if (!bo->sgt) {
+ ret = virtgpu_dma_buf_import_sgt(&ents, &nents,
+ bo, attach);
+ if (ret)
+ goto err;
+
+ virtio_gpu_object_attach(vgdev, bo, ents, nents);
+ }
+
+ dma_resv_unlock(resv);
+ return 0;
+
+err:
+ dma_buf_unpin(attach);
+ dma_resv_unlock(resv);
+ return ret;
+}
+
static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo;
+ struct drm_gem_object *obj;
+ int ret;
if (!new_state->fb)
return 0;
vgfb = to_virtio_gpu_framebuffer(new_state->fb);
+ vgplane_st = to_virtio_gpu_plane_state(new_state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+
+ drm_gem_plane_helper_prepare_fb(plane, new_state);
+
if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
return 0;
- if (bo->dumb && (plane->state->fb != new_state->fb)) {
- vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
+ obj = new_state->fb->obj[0];
+ if (obj->import_attach) {
+ ret = virtio_gpu_prepare_imported_obj(plane, new_state, obj);
+ if (ret)
+ return ret;
+ }
+
+ if (bo->dumb || obj->import_attach) {
+ vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
+ vgdev->fence_drv.context,
0);
- if (!vgfb->fence)
+ if (!vgplane_st->fence)
return -ENOMEM;
}
return 0;
}
+static void virtio_gpu_cleanup_imported_obj(struct drm_gem_object *obj)
+{
+ struct dma_buf_attachment *attach = obj->import_attach;
+ struct dma_resv *resv = attach->dmabuf->resv;
+
+ dma_resv_lock(resv, NULL);
+ dma_buf_unpin(attach);
+ dma_resv_unlock(resv);
+}
+
static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
+ struct drm_gem_object *obj;
if (!state->fb)
return;
- vgfb = to_virtio_gpu_framebuffer(state->fb);
- if (vgfb->fence) {
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
+ vgplane_st = to_virtio_gpu_plane_state(state);
+ if (vgplane_st->fence) {
+ dma_fence_put(&vgplane_st->fence->f);
+ vgplane_st->fence = NULL;
}
+
+ obj = state->fb->obj[0];
+ if (obj->import_attach)
+ virtio_gpu_cleanup_imported_obj(obj);
}
static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
@@ -291,6 +378,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = NULL;
struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_plane_state *vgplane_st;
struct virtio_gpu_object *bo = NULL;
uint32_t handle;
@@ -303,6 +391,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ vgplane_st = to_virtio_gpu_plane_state(plane->state);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
} else {
@@ -322,11 +411,9 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
(vgdev, 0,
plane->state->crtc_w,
plane->state->crtc_h,
- 0, 0, objs, vgfb->fence);
+ 0, 0, objs, vgplane_st->fence);
virtio_gpu_notify(vgdev);
- dma_fence_wait(&vgfb->fence->f, true);
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
+ dma_fence_wait(&vgplane_st->fence->f, true);
}
if (plane->state->fb != old_state->fb) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 44425f20d91a..f92133a01195 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -27,6 +27,8 @@
#include "virtgpu_drv.h"
+MODULE_IMPORT_NS("DMA_BUF");
+
static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
uuid_t *uuid)
{
@@ -142,10 +144,160 @@ struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
return buf;
}
+int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
+ unsigned int *nents,
+ struct virtio_gpu_object *bo,
+ struct dma_buf_attachment *attach)
+{
+ struct scatterlist *sl;
+ struct sg_table *sgt;
+ long i, ret;
+
+ dma_resv_assert_held(attach->dmabuf->resv);
+
+ ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+ if (ret <= 0)
+ return ret < 0 ? ret : -ETIMEDOUT;
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ *ents = kvmalloc_array(sgt->nents,
+ sizeof(struct virtio_gpu_mem_entry),
+ GFP_KERNEL);
+ if (!(*ents)) {
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+ }
+
+ *nents = sgt->nents;
+ for_each_sgtable_dma_sg(sgt, sl, i) {
+ (*ents)[i].addr = cpu_to_le64(sg_dma_address(sl));
+ (*ents)[i].length = cpu_to_le32(sg_dma_len(sl));
+ (*ents)[i].padding = 0;
+ }
+
+ bo->sgt = sgt;
+ return 0;
+}
+
+static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+ struct dma_buf_attachment *attach = obj->import_attach;
+
+ if (attach) {
+ struct dma_buf *dmabuf = attach->dmabuf;
+
+ dma_resv_lock(dmabuf->resv, NULL);
+
+ virtio_gpu_detach_object_fenced(bo);
+
+ if (bo->sgt)
+ dma_buf_unmap_attachment(attach, bo->sgt,
+ DMA_BIDIRECTIONAL);
+
+ dma_resv_unlock(dmabuf->resv);
+
+ dma_buf_detach(dmabuf, attach);
+ dma_buf_put(dmabuf);
+ }
+
+ if (bo->created) {
+ virtio_gpu_cmd_unref_resource(vgdev, bo);
+ virtio_gpu_notify(vgdev);
+ return;
+ }
+ virtio_gpu_cleanup_object(bo);
+}
+
+static int virtgpu_dma_buf_init_obj(struct drm_device *dev,
+ struct virtio_gpu_object *bo,
+ struct dma_buf_attachment *attach)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_object_params params = { 0 };
+ struct dma_resv *resv = attach->dmabuf->resv;
+ struct virtio_gpu_mem_entry *ents = NULL;
+ unsigned int nents;
+ int ret;
+
+ ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
+ if (ret) {
+ virtgpu_dma_buf_free_obj(&bo->base.base);
+ return ret;
+ }
+
+ dma_resv_lock(resv, NULL);
+
+ ret = dma_buf_pin(attach);
+ if (ret)
+ goto err_pin;
+
+ ret = virtgpu_dma_buf_import_sgt(&ents, &nents, bo, attach);
+ if (ret)
+ goto err_import;
+
+ params.blob = true;
+ params.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
+ params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
+ params.size = attach->dmabuf->size;
+
+ virtio_gpu_cmd_resource_create_blob(vgdev, bo, &params,
+ ents, nents);
+ bo->guest_blob = true;
+ bo->attached = true;
+
+ dma_buf_unpin(attach);
+ dma_resv_unlock(resv);
+
+ return 0;
+
+err_import:
+ dma_buf_unpin(attach);
+err_pin:
+ dma_resv_unlock(resv);
+ virtgpu_dma_buf_free_obj(&bo->base.base);
+ return ret;
+}
+
+static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = {
+ .free = virtgpu_dma_buf_free_obj,
+};
+
+static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->importer_priv;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+ if (bo->created && kref_read(&obj->refcount)) {
+ virtio_gpu_detach_object_fenced(bo);
+
+ if (bo->sgt)
+ dma_buf_unmap_attachment(attach, bo->sgt,
+ DMA_BIDIRECTIONAL);
+
+ bo->sgt = NULL;
+ }
+}
+
+static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
+ .allow_peer2peer = true,
+ .move_notify = virtgpu_dma_buf_move_notify
+};
+
struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *buf)
{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct dma_buf_attachment *attach;
+ struct virtio_gpu_object *bo;
struct drm_gem_object *obj;
+ int ret;
if (buf->ops == &virtgpu_dmabuf_ops.ops) {
obj = buf->priv;
@@ -159,7 +311,32 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
}
}
- return drm_gem_prime_import(dev, buf);
+ if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
+ return drm_gem_prime_import(dev, buf);
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ obj = &bo->base.base;
+ obj->funcs = &virtgpu_gem_dma_buf_funcs;
+ drm_gem_private_object_init(dev, obj, buf->size);
+
+ attach = dma_buf_dynamic_attach(buf, dev->dev,
+ &virtgpu_dma_buf_attach_ops, obj);
+ if (IS_ERR(attach)) {
+ kfree(bo);
+ return ERR_CAST(attach);
+ }
+
+ obj->import_attach = attach;
+ get_dma_buf(buf);
+
+ ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return obj;
}
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 0d3d0d09f39b..ad91624df42d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -645,6 +645,23 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
+static void
+virtio_gpu_cmd_resource_detach_backing(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ struct virtio_gpu_fence *fence)
+{
+ struct virtio_gpu_resource_detach_backing *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
+ cmd_p->resource_id = cpu_to_le32(resource_id);
+
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+}
+
static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
@@ -1103,8 +1120,26 @@ void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_mem_entry *ents,
unsigned int nents)
{
+ if (obj->attached)
+ return;
+
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
ents, nents, NULL);
+
+ obj->attached = true;
+}
+
+void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ struct virtio_gpu_fence *fence)
+{
+ if (!obj->attached)
+ return;
+
+ virtio_gpu_cmd_resource_detach_backing(vgdev, obj->hw_res_handle,
+ fence);
+
+ obj->attached = false;
}
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/vkms/Kconfig b/drivers/gpu/drm/vkms/Kconfig
index b9ecdebecb0b..9def079f685b 100644
--- a/drivers/gpu/drm/vkms/Kconfig
+++ b/drivers/gpu/drm/vkms/Kconfig
@@ -3,6 +3,7 @@
config DRM_VKMS
tristate "Virtual KMS (EXPERIMENTAL)"
depends on DRM && MMU
+ select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
select CRC32
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index e7441b227b3c..b20ac1705726 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -24,64 +24,33 @@ static u16 pre_mul_blend_channel(u16 src, u16 dst, u16 alpha)
/**
* pre_mul_alpha_blend - alpha blending equation
- * @frame_info: Source framebuffer's metadata
* @stage_buffer: The line with the pixels from src_plane
* @output_buffer: A line buffer that receives all the blends output
+ * @x_start: The start offset
+ * @pixel_count: The number of pixels to blend
*
- * Using the information from the `frame_info`, this blends only the
- * necessary pixels from the `stage_buffer` to the `output_buffer`
- * using premultiplied blend formula.
+ * The pixels [@x_start;@x_start+@pixel_count) in stage_buffer are blended at
+ * [@x_start;@x_start+@pixel_count) in output_buffer.
*
* The current DRM assumption is that pixel color values have been already
* pre-multiplied with the alpha channel values. See more
* drm_plane_create_blend_mode_property(). Also, this formula assumes a
* completely opaque background.
*/
-static void pre_mul_alpha_blend(struct vkms_frame_info *frame_info,
- struct line_buffer *stage_buffer,
- struct line_buffer *output_buffer)
+static void pre_mul_alpha_blend(const struct line_buffer *stage_buffer,
+ struct line_buffer *output_buffer, int x_start, int pixel_count)
{
- int x_dst = frame_info->dst.x1;
- struct pixel_argb_u16 *out = output_buffer->pixels + x_dst;
- struct pixel_argb_u16 *in = stage_buffer->pixels;
- int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
- stage_buffer->n_pixels);
-
- for (int x = 0; x < x_limit; x++) {
- out[x].a = (u16)0xffff;
- out[x].r = pre_mul_blend_channel(in[x].r, out[x].r, in[x].a);
- out[x].g = pre_mul_blend_channel(in[x].g, out[x].g, in[x].a);
- out[x].b = pre_mul_blend_channel(in[x].b, out[x].b, in[x].a);
+ struct pixel_argb_u16 *out = &output_buffer->pixels[x_start];
+ const struct pixel_argb_u16 *in = &stage_buffer->pixels[x_start];
+
+ for (int i = 0; i < pixel_count; i++) {
+ out[i].a = (u16)0xffff;
+ out[i].r = pre_mul_blend_channel(in[i].r, out[i].r, in[i].a);
+ out[i].g = pre_mul_blend_channel(in[i].g, out[i].g, in[i].a);
+ out[i].b = pre_mul_blend_channel(in[i].b, out[i].b, in[i].a);
}
}
-static int get_y_pos(struct vkms_frame_info *frame_info, int y)
-{
- if (frame_info->rotation & DRM_MODE_REFLECT_Y)
- return drm_rect_height(&frame_info->rotated) - y - 1;
-
- switch (frame_info->rotation & DRM_MODE_ROTATE_MASK) {
- case DRM_MODE_ROTATE_90:
- return frame_info->rotated.x2 - y - 1;
- case DRM_MODE_ROTATE_270:
- return y + frame_info->rotated.x1;
- default:
- return y;
- }
-}
-
-static bool check_limit(struct vkms_frame_info *frame_info, int pos)
-{
- if (drm_rotation_90_or_270(frame_info->rotation)) {
- if (pos >= 0 && pos < drm_rect_width(&frame_info->rotated))
- return true;
- } else {
- if (pos >= frame_info->rotated.y1 && pos < frame_info->rotated.y2)
- return true;
- }
-
- return false;
-}
static void fill_background(const struct pixel_argb_u16 *background_color,
struct line_buffer *output_buffer)
@@ -96,7 +65,7 @@ static u16 lerp_u16(u16 a, u16 b, s64 t)
s64 a_fp = drm_int2fixp(a);
s64 b_fp = drm_int2fixp(b);
- s64 delta = drm_fixp_mul(b_fp - a_fp, t);
+ s64 delta = drm_fixp_mul(b_fp - a_fp, t);
return drm_fixp2int(a_fp + delta);
}
@@ -164,6 +133,226 @@ static void apply_lut(const struct vkms_crtc_state *crtc_state, struct line_buff
}
/**
+ * direction_for_rotation() - Get the correct reading direction for a given rotation
+ *
+ * @rotation: Rotation to analyze. It correspond the field @frame_info.rotation.
+ *
+ * This function will use the @rotation setting of a source plane to compute the reading
+ * direction in this plane which correspond to a "left to right writing" in the CRTC.
+ * For example, if the buffer is reflected on X axis, the pixel must be read from right to left
+ * to be written from left to right on the CRTC.
+ */
+static enum pixel_read_direction direction_for_rotation(unsigned int rotation)
+{
+ struct drm_rect tmp_a, tmp_b;
+ int x, y;
+
+ /*
+ * Points A and B are depicted as zero-size rectangles on the CRTC.
+ * The CRTC writing direction is from A to B. The plane reading direction
+ * is discovered by inverse-transforming A and B.
+ * The reading direction is computed by rotating the vector AB (top-left to top-right) in a
+ * 1x1 square.
+ */
+
+ tmp_a = DRM_RECT_INIT(0, 0, 0, 0);
+ tmp_b = DRM_RECT_INIT(1, 0, 0, 0);
+ drm_rect_rotate_inv(&tmp_a, 1, 1, rotation);
+ drm_rect_rotate_inv(&tmp_b, 1, 1, rotation);
+
+ x = tmp_b.x1 - tmp_a.x1;
+ y = tmp_b.y1 - tmp_a.y1;
+
+ if (x == 1 && y == 0)
+ return READ_LEFT_TO_RIGHT;
+ else if (x == -1 && y == 0)
+ return READ_RIGHT_TO_LEFT;
+ else if (y == 1 && x == 0)
+ return READ_TOP_TO_BOTTOM;
+ else if (y == -1 && x == 0)
+ return READ_BOTTOM_TO_TOP;
+
+ WARN_ONCE(true, "The inverse of the rotation gives an incorrect direction.");
+ return READ_LEFT_TO_RIGHT;
+}
+
+/**
+ * clamp_line_coordinates() - Compute and clamp the coordinate to read and write during the blend
+ * process.
+ *
+ * @direction: direction of the reading
+ * @current_plane: current plane blended
+ * @src_line: source line of the reading. Only the top-left coordinate is used. This rectangle
+ * must be rotated and have a shape of 1*pixel_count if @direction is vertical and a shape of
+ * pixel_count*1 if @direction is horizontal.
+ * @src_x_start: x start coordinate for the line reading
+ * @src_y_start: y start coordinate for the line reading
+ * @dst_x_start: x coordinate to blend the read line
+ * @pixel_count: number of pixels to blend
+ *
+ * This function is mainly a safety net to avoid reading outside the source buffer. As the
+ * userspace should never ask to read outside the source plane, all the cases covered here should
+ * be dead code.
+ */
+static void clamp_line_coordinates(enum pixel_read_direction direction,
+ const struct vkms_plane_state *current_plane,
+ const struct drm_rect *src_line, int *src_x_start,
+ int *src_y_start, int *dst_x_start, int *pixel_count)
+{
+ /* By default the start points are correct */
+ *src_x_start = src_line->x1;
+ *src_y_start = src_line->y1;
+ *dst_x_start = current_plane->frame_info->dst.x1;
+
+ /* Get the correct number of pixel to blend, it depends of the direction */
+ switch (direction) {
+ case READ_LEFT_TO_RIGHT:
+ case READ_RIGHT_TO_LEFT:
+ *pixel_count = drm_rect_width(src_line);
+ break;
+ case READ_BOTTOM_TO_TOP:
+ case READ_TOP_TO_BOTTOM:
+ *pixel_count = drm_rect_height(src_line);
+ break;
+ }
+
+ /*
+ * Clamp the coordinates to avoid reading outside the buffer
+ *
+ * This is mainly a security check to avoid reading outside the buffer, the userspace
+ * should never request to read outside the source buffer.
+ */
+ switch (direction) {
+ case READ_LEFT_TO_RIGHT:
+ case READ_RIGHT_TO_LEFT:
+ if (*src_x_start < 0) {
+ *pixel_count += *src_x_start;
+ *dst_x_start -= *src_x_start;
+ *src_x_start = 0;
+ }
+ if (*src_x_start + *pixel_count > current_plane->frame_info->fb->width)
+ *pixel_count = max(0, (int)current_plane->frame_info->fb->width -
+ *src_x_start);
+ break;
+ case READ_BOTTOM_TO_TOP:
+ case READ_TOP_TO_BOTTOM:
+ if (*src_y_start < 0) {
+ *pixel_count += *src_y_start;
+ *dst_x_start -= *src_y_start;
+ *src_y_start = 0;
+ }
+ if (*src_y_start + *pixel_count > current_plane->frame_info->fb->height)
+ *pixel_count = max(0, (int)current_plane->frame_info->fb->height -
+ *src_y_start);
+ break;
+ }
+}
+
+/**
+ * blend_line() - Blend a line from a plane to the output buffer
+ *
+ * @current_plane: current plane to work on
+ * @y: line to write in the output buffer
+ * @crtc_x_limit: width of the output buffer
+ * @stage_buffer: temporary buffer to convert the pixel line from the source buffer
+ * @output_buffer: buffer to blend the read line into.
+ */
+static void blend_line(struct vkms_plane_state *current_plane, int y,
+ int crtc_x_limit, struct line_buffer *stage_buffer,
+ struct line_buffer *output_buffer)
+{
+ int src_x_start, src_y_start, dst_x_start, pixel_count;
+ struct drm_rect dst_line, tmp_src, src_line;
+
+ /* Avoid rendering useless lines */
+ if (y < current_plane->frame_info->dst.y1 ||
+ y >= current_plane->frame_info->dst.y2)
+ return;
+
+ /*
+ * dst_line is the line to copy. The initial coordinates are inside the
+ * destination framebuffer, and then drm_rect_* helpers are used to
+ * compute the correct position into the source framebuffer.
+ */
+ dst_line = DRM_RECT_INIT(current_plane->frame_info->dst.x1, y,
+ drm_rect_width(&current_plane->frame_info->dst),
+ 1);
+
+ drm_rect_fp_to_int(&tmp_src, &current_plane->frame_info->src);
+
+ /*
+ * [1]: Clamping src_line to the crtc_x_limit to avoid writing outside of
+ * the destination buffer
+ */
+ dst_line.x1 = max_t(int, dst_line.x1, 0);
+ dst_line.x2 = min_t(int, dst_line.x2, crtc_x_limit);
+ /* The destination is completely outside of the crtc. */
+ if (dst_line.x2 <= dst_line.x1)
+ return;
+
+ src_line = dst_line;
+
+ /*
+ * Transform the coordinate x/y from the crtc to coordinates into
+ * coordinates for the src buffer.
+ *
+ * - Cancel the offset of the dst buffer.
+ * - Invert the rotation. This assumes that
+ * dst = drm_rect_rotate(src, rotation) (dst and src have the
+ * same size, but can be rotated).
+ * - Apply the offset of the source rectangle to the coordinate.
+ */
+ drm_rect_translate(&src_line, -current_plane->frame_info->dst.x1,
+ -current_plane->frame_info->dst.y1);
+ drm_rect_rotate_inv(&src_line, drm_rect_width(&tmp_src),
+ drm_rect_height(&tmp_src),
+ current_plane->frame_info->rotation);
+ drm_rect_translate(&src_line, tmp_src.x1, tmp_src.y1);
+
+ /* Get the correct reading direction in the source buffer. */
+
+ enum pixel_read_direction direction =
+ direction_for_rotation(current_plane->frame_info->rotation);
+
+ /* [2]: Compute and clamp the number of pixel to read */
+ clamp_line_coordinates(direction, current_plane, &src_line, &src_x_start, &src_y_start,
+ &dst_x_start, &pixel_count);
+
+ if (pixel_count <= 0) {
+ /* Nothing to read, so avoid multiple function calls */
+ return;
+ }
+
+ /*
+ * Modify the starting point to take in account the rotation
+ *
+ * src_line is the top-left corner, so when reading READ_RIGHT_TO_LEFT or
+ * READ_BOTTOM_TO_TOP, it must be changed to the top-right/bottom-left
+ * corner.
+ */
+ if (direction == READ_RIGHT_TO_LEFT) {
+ // src_x_start is now the right point
+ src_x_start += pixel_count - 1;
+ } else if (direction == READ_BOTTOM_TO_TOP) {
+ // src_y_start is now the bottom point
+ src_y_start += pixel_count - 1;
+ }
+
+ /*
+ * Perform the conversion and the blending
+ *
+ * Here we know that the read line (x_start, y_start, pixel_count) is
+ * inside the source buffer [2] and we don't write outside the stage
+ * buffer [1].
+ */
+ current_plane->pixel_read_line(current_plane, src_x_start, src_y_start, direction,
+ pixel_count, &stage_buffer->pixels[dst_x_start]);
+
+ pre_mul_alpha_blend(stage_buffer, output_buffer,
+ dst_x_start, pixel_count);
+}
+
+/**
* blend - blend the pixels from all planes and compute crc
* @wb: The writeback frame buffer metadata
* @crtc_state: The crtc state
@@ -183,25 +372,25 @@ static void blend(struct vkms_writeback_job *wb,
{
struct vkms_plane_state **plane = crtc_state->active_planes;
u32 n_active_planes = crtc_state->num_active_planes;
- int y_pos;
const struct pixel_argb_u16 background_color = { .a = 0xffff };
- size_t crtc_y_limit = crtc_state->base.crtc->mode.vdisplay;
+ int crtc_y_limit = crtc_state->base.mode.vdisplay;
+ int crtc_x_limit = crtc_state->base.mode.hdisplay;
- for (size_t y = 0; y < crtc_y_limit; y++) {
+ /*
+ * The planes are composed line-by-line to avoid heavy memory usage. It is a necessary
+ * complexity to avoid poor blending performance.
+ *
+ * The function pixel_read_line callback is used to read a line, using an efficient
+ * algorithm for a specific format, into the staging buffer.
+ */
+ for (int y = 0; y < crtc_y_limit; y++) {
fill_background(&background_color, output_buffer);
/* The active planes are composed associatively in z-order. */
for (size_t i = 0; i < n_active_planes; i++) {
- y_pos = get_y_pos(plane[i]->frame_info, y);
-
- if (!check_limit(plane[i]->frame_info, y_pos))
- continue;
-
- vkms_compose_row(stage_buffer, plane[i], y_pos);
- pre_mul_alpha_blend(plane[i]->frame_info, stage_buffer,
- output_buffer);
+ blend_line(plane[i], y, crtc_x_limit, stage_buffer, output_buffer);
}
apply_lut(crtc_state, output_buffer);
@@ -209,7 +398,7 @@ static void blend(struct vkms_writeback_job *wb,
*crc32 = crc32_le(*crc32, (void *)output_buffer->pixels, row_size);
if (wb)
- vkms_writeback_row(wb, output_buffer, y_pos);
+ vkms_writeback_row(wb, output_buffer, y);
}
}
@@ -220,7 +409,7 @@ static int check_format_funcs(struct vkms_crtc_state *crtc_state,
u32 n_active_planes = crtc_state->num_active_planes;
for (size_t i = 0; i < n_active_planes; i++)
- if (!planes[i]->pixel_read)
+ if (!planes[i]->pixel_read_line)
return -1;
if (active_wb && !active_wb->pixel_write)
@@ -263,7 +452,7 @@ static int compose_active_planes(struct vkms_writeback_job *active_wb,
if (WARN_ON(check_format_funcs(crtc_state, active_wb)))
return -EINVAL;
- line_width = crtc_state->base.crtc->mode.hdisplay;
+ line_width = crtc_state->base.mode.hdisplay;
stage_buffer.n_pixels = line_width;
output_buffer.n_pixels = line_width;
@@ -302,8 +491,8 @@ free_stage_buffer:
void vkms_composer_worker(struct work_struct *work)
{
struct vkms_crtc_state *crtc_state = container_of(work,
- struct vkms_crtc_state,
- composer_work);
+ struct vkms_crtc_state,
+ composer_work);
struct drm_crtc *crtc = crtc_state->base.crtc;
struct vkms_writeback_job *active_wb = crtc_state->active_writeback;
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
@@ -328,7 +517,7 @@ void vkms_composer_worker(struct work_struct *work)
crtc_state->gamma_lut.base = (struct drm_color_lut *)crtc->state->gamma_lut->data;
crtc_state->gamma_lut.lut_length =
crtc->state->gamma_lut->length / sizeof(struct drm_color_lut);
- max_lut_index_fp = drm_int2fixp(crtc_state->gamma_lut.lut_length - 1);
+ max_lut_index_fp = drm_int2fixp(crtc_state->gamma_lut.lut_length - 1);
crtc_state->gamma_lut.channel_value2index_ratio = drm_fixp_div(max_lut_index_fp,
u16_max_fp);
@@ -367,7 +556,7 @@ void vkms_composer_worker(struct work_struct *work)
drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
}
-static const char * const pipe_crc_sources[] = {"auto"};
+static const char *const pipe_crc_sources[] = { "auto" };
const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
size_t *count)
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 40b4d084e3ce..28a57ae109fc 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -64,8 +64,6 @@ static int vkms_enable_vblank(struct drm_crtc *crtc)
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
- drm_calc_timestamping_constants(crtc, &crtc->mode);
-
hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
out->vblank_hrtimer.function = &vkms_vblank_simulate;
out->period_ns = ktime_set(0, vblank->framedur_ns);
@@ -188,8 +186,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
return ret;
drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
- plane_state = drm_atomic_get_existing_plane_state(crtc_state->state,
- plane);
+ plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane);
WARN_ON(!plane_state);
if (!plane_state->visible)
@@ -205,8 +202,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
i = 0;
drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
- plane_state = drm_atomic_get_existing_plane_state(crtc_state->state,
- plane);
+ plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane);
if (!plane_state->visible)
continue;
@@ -232,6 +228,7 @@ static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
+ __acquires(&vkms_output->lock)
{
struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
@@ -243,6 +240,7 @@ static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
+ __releases(&vkms_output->lock)
{
struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
@@ -287,7 +285,12 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
- drm_mode_crtc_set_gamma_size(crtc, VKMS_LUT_SIZE);
+ ret = drm_mode_crtc_set_gamma_size(crtc, VKMS_LUT_SIZE);
+ if (ret) {
+ DRM_ERROR("Failed to set gamma size\n");
+ return ret;
+ }
+
drm_crtc_enable_color_mgmt(crtc, 0, false, VKMS_LUT_SIZE);
spin_lock_init(&vkms_out->lock);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 0c1a713b7b7b..e0409aba9349 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_gem.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -33,7 +34,6 @@
#define DRIVER_NAME "vkms"
#define DRIVER_DESC "Virtual Kernel Mode Setting"
-#define DRIVER_DATE "20180514"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
@@ -81,8 +81,7 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_wait_for_flip_done(dev, old_state);
for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
- struct vkms_crtc_state *vkms_state =
- to_vkms_crtc_state(old_crtc_state);
+ struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(old_crtc_state);
flush_work(&vkms_state->composer_work);
}
@@ -112,10 +111,10 @@ static const struct drm_driver vkms_driver = {
.release = vkms_release,
.fops = &vkms_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
@@ -172,7 +171,7 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
dev->mode_config.preferred_depth = 0;
dev->mode_config.helper_private = &vkms_mode_config_helpers;
- return vkms_output_init(vkmsdev, 0);
+ return vkms_output_init(vkmsdev);
}
static int vkms_create(struct vkms_config *config)
@@ -225,7 +224,7 @@ static int vkms_create(struct vkms_config *config)
if (ret)
goto out_devres;
- drm_fbdev_shmem_setup(&vkms_device->drm, 0);
+ drm_client_setup(&vkms_device->drm, NULL);
return 0;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 5e46ea5b96dc..00541eff3d1b 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -25,15 +25,22 @@
#define VKMS_LUT_SIZE 256
+/**
+ * struct vkms_frame_info - Structure to store the state of a frame
+ *
+ * @fb: backing drm framebuffer
+ * @src: source rectangle of this frame in the source framebuffer, stored in 16.16 fixed-point form
+ * @dst: destination rectangle in the crtc buffer, stored in whole pixel units
+ * @map: see @drm_shadow_plane_state.data
+ * @rotation: rotation applied to the source.
+ *
+ * @src and @dst should have the same size modulo the rotation.
+ */
struct vkms_frame_info {
struct drm_framebuffer *fb;
struct drm_rect src, dst;
- struct drm_rect rotated;
struct iosys_map map[DRM_FORMAT_MAX_PLANES];
unsigned int rotation;
- unsigned int offset;
- unsigned int pitch;
- unsigned int cpp;
};
struct pixel_argb_u16 {
@@ -45,21 +52,65 @@ struct line_buffer {
struct pixel_argb_u16 *pixels;
};
+/**
+ * typedef pixel_write_t - These functions are used to read a pixel from a
+ * &struct pixel_argb_u16, convert it in a specific format and write it in the @out_pixel
+ * buffer.
+ *
+ * @out_pixel: destination address to write the pixel
+ * @in_pixel: pixel to write
+ */
+typedef void (*pixel_write_t)(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel);
+
struct vkms_writeback_job {
struct iosys_map data[DRM_FORMAT_MAX_PLANES];
struct vkms_frame_info wb_frame_info;
- void (*pixel_write)(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel);
+ pixel_write_t pixel_write;
+};
+
+/**
+ * enum pixel_read_direction - Enum used internally by VKMS to represent a reading direction in a
+ * plane.
+ */
+enum pixel_read_direction {
+ READ_BOTTOM_TO_TOP,
+ READ_TOP_TO_BOTTOM,
+ READ_RIGHT_TO_LEFT,
+ READ_LEFT_TO_RIGHT
};
+struct vkms_plane_state;
+
/**
- * vkms_plane_state - Driver specific plane state
+ * typedef pixel_read_line_t - These functions are used to read a pixel line in the source frame,
+ * convert it to `struct pixel_argb_u16` and write it to @out_pixel.
+ *
+ * @plane: plane used as source for the pixel value
+ * @x_start: X (width) coordinate of the first pixel to copy. The caller must ensure that x_start
+ * is non-negative and smaller than @plane->frame_info->fb->width.
+ * @y_start: Y (height) coordinate of the first pixel to copy. The caller must ensure that y_start
+ * is non-negative and smaller than @plane->frame_info->fb->height.
+ * @direction: direction to use for the copy, starting at @x_start/@y_start
+ * @count: number of pixels to copy
+ * @out_pixel: pointer where to write the pixel values. They will be written from @out_pixel[0]
+ * (included) to @out_pixel[@count] (excluded). The caller must ensure that out_pixel have a
+ * length of at least @count.
+ */
+typedef void (*pixel_read_line_t)(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[]);
+
+/**
+ * struct vkms_plane_state - Driver specific plane state
* @base: base plane state
* @frame_info: data required for composing computation
+ * @pixel_read_line: function to read a pixel line in this plane. The creator of a
+ * struct vkms_plane_state must ensure that this pointer is valid
*/
struct vkms_plane_state {
struct drm_shadow_plane_state base;
struct vkms_frame_info *frame_info;
- void (*pixel_read)(u8 *src_buffer, struct pixel_argb_u16 *out_pixel);
+ pixel_read_line_t pixel_read_line;
};
struct vkms_plane {
@@ -73,29 +124,56 @@ struct vkms_color_lut {
};
/**
- * vkms_crtc_state - Driver specific CRTC state
+ * struct vkms_crtc_state - Driver specific CRTC state
+ *
* @base: base CRTC state
* @composer_work: work struct to compose and add CRC entries
- * @n_frame_start: start frame number for computed CRC
- * @n_frame_end: end frame number for computed CRC
+ *
+ * @num_active_planes: Number of active planes
+ * @active_planes: List containing all the active planes (counted by
+ * @num_active_planes). They should be stored in z-order.
+ * @active_writeback: Current active writeback job
+ * @gamma_lut: Look up table for gamma used in this CRTC
+ * @crc_pending: Protected by @vkms_output.composer_lock, true when the frame CRC is not computed
+ * yet. Used by vblank to detect if the composer is too slow.
+ * @wb_pending: Protected by @vkms_output.composer_lock, true when a writeback frame is requested.
+ * @frame_start: Protected by @vkms_output.composer_lock, saves the frame number before the start
+ * of the composition process.
+ * @frame_end: Protected by @vkms_output.composer_lock, saves the last requested frame number.
+ * This is used to generate enough CRC entries when the composition worker is too slow.
*/
struct vkms_crtc_state {
struct drm_crtc_state base;
struct work_struct composer_work;
int num_active_planes;
- /* stack of active planes for crc computation, should be in z order */
struct vkms_plane_state **active_planes;
struct vkms_writeback_job *active_writeback;
struct vkms_color_lut gamma_lut;
- /* below four are protected by vkms_output.composer_lock */
bool crc_pending;
bool wb_pending;
u64 frame_start;
u64 frame_end;
};
+/**
+ * struct vkms_output - Internal representation of all output components in VKMS
+ *
+ * @crtc: Base CRTC in DRM
+ * @encoder: DRM encoder used for this output
+ * @connector: DRM connector used for this output
+ * @wb_connecter: DRM writeback connector used for this output
+ * @vblank_hrtimer: Timer used to trigger the vblank
+ * @period_ns: vblank period, in nanoseconds, used to configure @vblank_hrtimer and to compute
+ * vblank timestamps
+ * @composer_workq: Ordered workqueue for @composer_state.composer_work.
+ * @lock: Lock used to protect concurrent access to the composer
+ * @composer_enabled: Protected by @lock, true when the VKMS composer is active (crc needed or
+ * writeback)
+ * @composer_state: Protected by @lock, current state of this VKMS output
+ * @composer_lock: Lock used internally to protect @composer_state members
+ */
struct vkms_output {
struct drm_crtc crtc;
struct drm_encoder encoder;
@@ -103,28 +181,38 @@ struct vkms_output {
struct drm_writeback_connector wb_connector;
struct hrtimer vblank_hrtimer;
ktime_t period_ns;
- /* ordered wq for composer_work */
struct workqueue_struct *composer_workq;
- /* protects concurrent access to composer */
spinlock_t lock;
- /* protected by @lock */
bool composer_enabled;
struct vkms_crtc_state *composer_state;
spinlock_t composer_lock;
};
-struct vkms_device;
-
+/**
+ * struct vkms_config - General configuration for VKMS driver
+ *
+ * @writeback: If true, a writeback buffer can be attached to the CRTC
+ * @cursor: If true, a cursor plane is created in the VKMS device
+ * @overlay: If true, NUM_OVERLAY_PLANES will be created for the VKMS device
+ * @dev: Used to store the current VKMS device. Only set when the device is instantiated.
+ */
struct vkms_config {
bool writeback;
bool cursor;
bool overlay;
- /* only set when instantiated */
struct vkms_device *dev;
};
+/**
+ * struct vkms_device - Description of a VKMS device
+ *
+ * @drm - Base device in DRM
+ * @platform - Associated platform device
+ * @output - Configuration and sub-components of the VKMS device
+ * @config: Configuration used in this VKMS device
+ */
struct vkms_device {
struct drm_device drm;
struct platform_device *platform;
@@ -132,6 +220,10 @@ struct vkms_device {
const struct vkms_config *config;
};
+/*
+ * The following helpers are used to convert a member of a struct into its parent.
+ */
+
#define drm_crtc_to_vkms_output(target) \
container_of(target, struct vkms_output, crtc)
@@ -144,14 +236,31 @@ struct vkms_device {
#define to_vkms_plane_state(target)\
container_of(target, struct vkms_plane_state, base.base)
-/* CRTC */
+/**
+ * vkms_crtc_init() - Initialize a CRTC for VKMS
+ * @dev: DRM device associated with the VKMS buffer
+ * @crtc: uninitialized CRTC device
+ * @primary: primary plane to attach to the CRTC
+ * @cursor: plane to attach to the CRTC
+ */
int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary, struct drm_plane *cursor);
-int vkms_output_init(struct vkms_device *vkmsdev, int index);
+/**
+ * vkms_output_init() - Initialize all sub-components needed for a VKMS device.
+ *
+ * @vkmsdev: VKMS device to initialize
+ */
+int vkms_output_init(struct vkms_device *vkmsdev);
+/**
+ * vkms_plane_init() - Initialize a plane
+ *
+ * @vkmsdev: VKMS device containing the plane
+ * @type: type of plane to initialize
+ */
struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
- enum drm_plane_type type, int index);
+ enum drm_plane_type type);
/* CRC Support */
const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
@@ -163,7 +272,6 @@ int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
/* Composer Support */
void vkms_composer_worker(struct work_struct *work);
void vkms_set_composer(struct vkms_output *out, bool enabled);
-void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state *plane, int y);
void vkms_writeback_row(struct vkms_writeback_job *wb, const struct line_buffer *src_buffer, int y);
/* Writeback */
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
index 040b7f113a3b..39b1d7c97d45 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.c
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -9,148 +9,316 @@
#include "vkms_formats.h"
-static size_t pixel_offset(const struct vkms_frame_info *frame_info, int x, int y)
+/**
+ * packed_pixels_offset() - Get the offset of the block containing the pixel at coordinates x/y
+ *
+ * @frame_info: Buffer metadata
+ * @x: The x coordinate of the wanted pixel in the buffer
+ * @y: The y coordinate of the wanted pixel in the buffer
+ * @plane_index: The index of the plane to use
+ * @offset: The returned offset inside the buffer of the block
+ * @rem_x: The returned X coordinate of the requested pixel in the block
+ * @rem_y: The returned Y coordinate of the requested pixel in the block
+ *
+ * As some pixel formats store multiple pixels in a block (DRM_FORMAT_R* for example), some
+ * pixels are not individually addressable. This function return 3 values: the offset of the
+ * whole block, and the coordinate of the requested pixel inside this block.
+ * For example, if the format is DRM_FORMAT_R1 and the requested coordinate is 13,5, the offset
+ * will point to the byte 5*pitches + 13/8 (second byte of the 5th line), and the rem_x/rem_y
+ * coordinates will be (13 % 8, 5 % 1) = (5, 0)
+ *
+ * With this function, the caller just have to extract the correct pixel from the block.
+ */
+static void packed_pixels_offset(const struct vkms_frame_info *frame_info, int x, int y,
+ int plane_index, int *offset, int *rem_x, int *rem_y)
{
- return frame_info->offset + (y * frame_info->pitch)
- + (x * frame_info->cpp);
+ struct drm_framebuffer *fb = frame_info->fb;
+ const struct drm_format_info *format = frame_info->fb->format;
+ /* Directly using x and y to multiply pitches and format->ccp is not sufficient because
+ * in some formats a block can represent multiple pixels.
+ *
+ * Dividing x and y by the block size allows to extract the correct offset of the block
+ * containing the pixel.
+ */
+
+ int block_x = x / drm_format_info_block_width(format, plane_index);
+ int block_y = y / drm_format_info_block_height(format, plane_index);
+ int block_pitch = fb->pitches[plane_index] * drm_format_info_block_height(format,
+ plane_index);
+ *rem_x = x % drm_format_info_block_width(format, plane_index);
+ *rem_y = y % drm_format_info_block_height(format, plane_index);
+ *offset = fb->offsets[plane_index] +
+ block_y * block_pitch +
+ block_x * format->char_per_block[plane_index];
}
-/*
- * packed_pixels_addr - Get the pointer to pixel of a given pair of coordinates
+/**
+ * packed_pixels_addr() - Get the pointer to the block containing the pixel at the given
+ * coordinates
*
* @frame_info: Buffer metadata
- * @x: The x(width) coordinate of the 2D buffer
- * @y: The y(Heigth) coordinate of the 2D buffer
+ * @x: The x (width) coordinate inside the plane
+ * @y: The y (height) coordinate inside the plane
+ * @plane_index: The index of the plane
+ * @addr: The returned pointer
+ * @rem_x: The returned X coordinate of the requested pixel in the block
+ * @rem_y: The returned Y coordinate of the requested pixel in the block
+ *
+ * Takes the information stored in the frame_info, a pair of coordinates, and returns the address
+ * of the block containing this pixel and the pixel position inside this block.
*
- * Takes the information stored in the frame_info, a pair of coordinates, and
- * returns the address of the first color channel.
- * This function assumes the channels are packed together, i.e. a color channel
- * comes immediately after another in the memory. And therefore, this function
- * doesn't work for YUV with chroma subsampling (e.g. YUV420 and NV21).
+ * See @packed_pixels_offset for details about rem_x/rem_y behavior.
*/
-static void *packed_pixels_addr(const struct vkms_frame_info *frame_info,
- int x, int y)
+static void packed_pixels_addr(const struct vkms_frame_info *frame_info,
+ int x, int y, int plane_index, u8 **addr, int *rem_x,
+ int *rem_y)
{
- size_t offset = pixel_offset(frame_info, x, y);
+ int offset;
- return (u8 *)frame_info->map[0].vaddr + offset;
+ packed_pixels_offset(frame_info, x, y, plane_index, &offset, rem_x, rem_y);
+ *addr = (u8 *)frame_info->map[0].vaddr + offset;
}
-static void *get_packed_src_addr(const struct vkms_frame_info *frame_info, int y)
+/**
+ * get_block_step_bytes() - Common helper to compute the correct step value between each pixel block
+ * to read in a certain direction.
+ *
+ * @fb: Framebuffer to iter on
+ * @direction: Direction of the reading
+ * @plane_index: Plane to get the step from
+ *
+ * As the returned count is the number of bytes between two consecutive blocks in a direction,
+ * the caller may have to read multiple pixels before using the next one (for example, to read from
+ * left to right in a DRM_FORMAT_R1 plane, each block contains 8 pixels, so the step must be used
+ * only every 8 pixels).
+ */
+static int get_block_step_bytes(struct drm_framebuffer *fb, enum pixel_read_direction direction,
+ int plane_index)
{
- int x_src = frame_info->src.x1 >> 16;
- int y_src = y - frame_info->rotated.y1 + (frame_info->src.y1 >> 16);
+ switch (direction) {
+ case READ_LEFT_TO_RIGHT:
+ return fb->format->char_per_block[plane_index];
+ case READ_RIGHT_TO_LEFT:
+ return -fb->format->char_per_block[plane_index];
+ case READ_TOP_TO_BOTTOM:
+ return (int)fb->pitches[plane_index] * drm_format_info_block_width(fb->format,
+ plane_index);
+ case READ_BOTTOM_TO_TOP:
+ return -(int)fb->pitches[plane_index] * drm_format_info_block_width(fb->format,
+ plane_index);
+ }
- return packed_pixels_addr(frame_info, x_src, y_src);
+ return 0;
}
-static int get_x_position(const struct vkms_frame_info *frame_info, int limit, int x)
+/**
+ * packed_pixels_addr_1x1() - Get the pointer to the block containing the pixel at the given
+ * coordinates
+ *
+ * @frame_info: Buffer metadata
+ * @x: The x (width) coordinate inside the plane
+ * @y: The y (height) coordinate inside the plane
+ * @plane_index: The index of the plane
+ * @addr: The returned pointer
+ *
+ * This function can only be used with format where block_h == block_w == 1.
+ */
+static void packed_pixels_addr_1x1(const struct vkms_frame_info *frame_info,
+ int x, int y, int plane_index, u8 **addr)
{
- if (frame_info->rotation & (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_270))
- return limit - x - 1;
- return x;
+ int offset, rem_x, rem_y;
+
+ WARN_ONCE(drm_format_info_block_width(frame_info->fb->format,
+ plane_index) != 1,
+ "%s() only support formats with block_w == 1", __func__);
+ WARN_ONCE(drm_format_info_block_height(frame_info->fb->format,
+ plane_index) != 1,
+ "%s() only support formats with block_h == 1", __func__);
+
+ packed_pixels_offset(frame_info, x, y, plane_index, &offset, &rem_x,
+ &rem_y);
+ *addr = (u8 *)frame_info->map[0].vaddr + offset;
}
-static void ARGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+/*
+ * The following functions take pixel data (a, r, g, b, pixel, ...) and convert them to
+ * &struct pixel_argb_u16
+ *
+ * They are used in the `read_line`s functions to avoid duplicate work for some pixel formats.
+ */
+
+static struct pixel_argb_u16 argb_u16_from_u8888(u8 a, u8 r, u8 g, u8 b)
{
+ struct pixel_argb_u16 out_pixel;
/*
* The 257 is the "conversion ratio". This number is obtained by the
* (2^16 - 1) / (2^8 - 1) division. Which, in this case, tries to get
* the best color value in a pixel format with more possibilities.
* A similar idea applies to others RGB color conversions.
*/
- out_pixel->a = (u16)src_pixels[3] * 257;
- out_pixel->r = (u16)src_pixels[2] * 257;
- out_pixel->g = (u16)src_pixels[1] * 257;
- out_pixel->b = (u16)src_pixels[0] * 257;
-}
+ out_pixel.a = (u16)a * 257;
+ out_pixel.r = (u16)r * 257;
+ out_pixel.g = (u16)g * 257;
+ out_pixel.b = (u16)b * 257;
-static void XRGB8888_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
-{
- out_pixel->a = (u16)0xffff;
- out_pixel->r = (u16)src_pixels[2] * 257;
- out_pixel->g = (u16)src_pixels[1] * 257;
- out_pixel->b = (u16)src_pixels[0] * 257;
+ return out_pixel;
}
-static void ARGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+static struct pixel_argb_u16 argb_u16_from_u16161616(u16 a, u16 r, u16 g, u16 b)
{
- __le16 *pixels = (__force __le16 *)src_pixels;
+ struct pixel_argb_u16 out_pixel;
+
+ out_pixel.a = a;
+ out_pixel.r = r;
+ out_pixel.g = g;
+ out_pixel.b = b;
- out_pixel->a = le16_to_cpu(pixels[3]);
- out_pixel->r = le16_to_cpu(pixels[2]);
- out_pixel->g = le16_to_cpu(pixels[1]);
- out_pixel->b = le16_to_cpu(pixels[0]);
+ return out_pixel;
}
-static void XRGB16161616_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+static struct pixel_argb_u16 argb_u16_from_le16161616(__le16 a, __le16 r, __le16 g, __le16 b)
{
- __le16 *pixels = (__force __le16 *)src_pixels;
-
- out_pixel->a = (u16)0xffff;
- out_pixel->r = le16_to_cpu(pixels[2]);
- out_pixel->g = le16_to_cpu(pixels[1]);
- out_pixel->b = le16_to_cpu(pixels[0]);
+ return argb_u16_from_u16161616(le16_to_cpu(a), le16_to_cpu(r), le16_to_cpu(g),
+ le16_to_cpu(b));
}
-static void RGB565_to_argb_u16(u8 *src_pixels, struct pixel_argb_u16 *out_pixel)
+static struct pixel_argb_u16 argb_u16_from_RGB565(const __le16 *pixel)
{
- __le16 *pixels = (__force __le16 *)src_pixels;
+ struct pixel_argb_u16 out_pixel;
s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
- u16 rgb_565 = le16_to_cpu(*pixels);
+ u16 rgb_565 = le16_to_cpu(*pixel);
s64 fp_r = drm_int2fixp((rgb_565 >> 11) & 0x1f);
s64 fp_g = drm_int2fixp((rgb_565 >> 5) & 0x3f);
s64 fp_b = drm_int2fixp(rgb_565 & 0x1f);
- out_pixel->a = (u16)0xffff;
- out_pixel->r = drm_fixp2int_round(drm_fixp_mul(fp_r, fp_rb_ratio));
- out_pixel->g = drm_fixp2int_round(drm_fixp_mul(fp_g, fp_g_ratio));
- out_pixel->b = drm_fixp2int_round(drm_fixp_mul(fp_b, fp_rb_ratio));
+ out_pixel.a = (u16)0xffff;
+ out_pixel.r = drm_fixp2int_round(drm_fixp_mul(fp_r, fp_rb_ratio));
+ out_pixel.g = drm_fixp2int_round(drm_fixp_mul(fp_g, fp_g_ratio));
+ out_pixel.b = drm_fixp2int_round(drm_fixp_mul(fp_b, fp_rb_ratio));
+
+ return out_pixel;
}
-/**
- * vkms_compose_row - compose a single row of a plane
- * @stage_buffer: output line with the composed pixels
- * @plane: state of the plane that is being composed
- * @y: y coordinate of the row
+/*
+ * The following functions are read_line function for each pixel format supported by VKMS.
+ *
+ * They read a line starting at the point @x_start,@y_start following the @direction. The result
+ * is stored in @out_pixel and in the format ARGB16161616.
*
- * This function composes a single row of a plane. It gets the source pixels
- * through the y coordinate (see get_packed_src_addr()) and goes linearly
- * through the source pixel, reading the pixels and converting it to
- * ARGB16161616 (see the pixel_read() callback). For rotate-90 and rotate-270,
- * the source pixels are not traversed linearly. The source pixels are queried
- * on each iteration in order to traverse the pixels vertically.
+ * These functions are very repetitive, but the innermost pixel loops must be kept inside these
+ * functions for performance reasons. Some benchmarking was done in [1] where having the innermost
+ * loop factored out of these functions showed a slowdown by a factor of three.
+ *
+ * [1]: https://lore.kernel.org/dri-devel/d258c8dc-78e9-4509-9037-a98f7f33b3a3@riseup.net/
*/
-void vkms_compose_row(struct line_buffer *stage_buffer, struct vkms_plane_state *plane, int y)
+
+static void ARGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
+ enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
{
- struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
- struct vkms_frame_info *frame_info = plane->frame_info;
- u8 *src_pixels = get_packed_src_addr(frame_info, y);
- int limit = min_t(size_t, drm_rect_width(&frame_info->dst), stage_buffer->n_pixels);
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
- for (size_t x = 0; x < limit; x++, src_pixels += frame_info->cpp) {
- int x_pos = get_x_position(frame_info, limit, x);
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
- if (drm_rotation_90_or_270(frame_info->rotation))
- src_pixels = get_packed_src_addr(frame_info, x + frame_info->rotated.y1)
- + frame_info->cpp * y;
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
- plane->pixel_read(src_pixels, &out_pixels[x_pos]);
+ while (out_pixel < end) {
+ u8 *px = (u8 *)src_pixels;
+ *out_pixel = argb_u16_from_u8888(px[3], px[2], px[1], px[0]);
+ out_pixel += 1;
+ src_pixels += step;
+ }
+}
+
+static void XRGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
+ enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+
+ while (out_pixel < end) {
+ u8 *px = (u8 *)src_pixels;
+ *out_pixel = argb_u16_from_u8888(255, px[2], px[1], px[0]);
+ out_pixel += 1;
+ src_pixels += step;
+ }
+}
+
+static void ARGB16161616_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+
+ while (out_pixel < end) {
+ u16 *px = (u16 *)src_pixels;
+ *out_pixel = argb_u16_from_u16161616(px[3], px[2], px[1], px[0]);
+ out_pixel += 1;
+ src_pixels += step;
+ }
+}
+
+static void XRGB16161616_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+
+ while (out_pixel < end) {
+ __le16 *px = (__le16 *)src_pixels;
+ *out_pixel = argb_u16_from_le16161616(cpu_to_le16(0xFFFF), px[2], px[1], px[0]);
+ out_pixel += 1;
+ src_pixels += step;
+ }
+}
+
+static void RGB565_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+
+ while (out_pixel < end) {
+ __le16 *px = (__le16 *)src_pixels;
+
+ *out_pixel = argb_u16_from_RGB565(px);
+ out_pixel += 1;
+ src_pixels += step;
}
}
/*
- * The following functions take an line of argb_u16 pixels from the
- * src_buffer, convert them to a specific format, and store them in the
- * destination.
+ * The following functions take one &struct pixel_argb_u16 and convert it to a specific format.
+ * The result is stored in @out_pixel.
*
- * They are used in the `compose_active_planes` to convert and store a line
- * from the src_buffer to the writeback buffer.
+ * They are used in vkms_writeback_row() to convert and store a pixel from the src_buffer to
+ * the writeback buffer.
*/
-static void argb_u16_to_ARGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+static void argb_u16_to_ARGB8888(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
{
/*
* This sequence below is important because the format's byte order is
@@ -162,43 +330,43 @@ static void argb_u16_to_ARGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel
* | Addr + 2 | = Red channel
* | Addr + 3 | = Alpha channel
*/
- dst_pixels[3] = DIV_ROUND_CLOSEST(in_pixel->a, 257);
- dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
- dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
- dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
+ out_pixel[3] = DIV_ROUND_CLOSEST(in_pixel->a, 257);
+ out_pixel[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
+ out_pixel[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
+ out_pixel[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
}
-static void argb_u16_to_XRGB8888(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+static void argb_u16_to_XRGB8888(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
{
- dst_pixels[3] = 0xff;
- dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
- dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
- dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
+ out_pixel[3] = 0xff;
+ out_pixel[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257);
+ out_pixel[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257);
+ out_pixel[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257);
}
-static void argb_u16_to_ARGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+static void argb_u16_to_ARGB16161616(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
{
- __le16 *pixels = (__force __le16 *)dst_pixels;
+ __le16 *pixel = (__le16 *)out_pixel;
- pixels[3] = cpu_to_le16(in_pixel->a);
- pixels[2] = cpu_to_le16(in_pixel->r);
- pixels[1] = cpu_to_le16(in_pixel->g);
- pixels[0] = cpu_to_le16(in_pixel->b);
+ pixel[3] = cpu_to_le16(in_pixel->a);
+ pixel[2] = cpu_to_le16(in_pixel->r);
+ pixel[1] = cpu_to_le16(in_pixel->g);
+ pixel[0] = cpu_to_le16(in_pixel->b);
}
-static void argb_u16_to_XRGB16161616(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+static void argb_u16_to_XRGB16161616(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
{
- __le16 *pixels = (__force __le16 *)dst_pixels;
+ __le16 *pixel = (__le16 *)out_pixel;
- pixels[3] = cpu_to_le16(0xffff);
- pixels[2] = cpu_to_le16(in_pixel->r);
- pixels[1] = cpu_to_le16(in_pixel->g);
- pixels[0] = cpu_to_le16(in_pixel->b);
+ pixel[3] = cpu_to_le16(0xffff);
+ pixel[2] = cpu_to_le16(in_pixel->r);
+ pixel[1] = cpu_to_le16(in_pixel->g);
+ pixel[0] = cpu_to_le16(in_pixel->b);
}
-static void argb_u16_to_RGB565(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
+static void argb_u16_to_RGB565(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel)
{
- __le16 *pixels = (__force __le16 *)dst_pixels;
+ __le16 *pixel = (__le16 *)out_pixel;
s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
@@ -211,41 +379,74 @@ static void argb_u16_to_RGB565(u8 *dst_pixels, struct pixel_argb_u16 *in_pixel)
u16 g = drm_fixp2int(drm_fixp_div(fp_g, fp_g_ratio));
u16 b = drm_fixp2int(drm_fixp_div(fp_b, fp_rb_ratio));
- *pixels = cpu_to_le16(r << 11 | g << 5 | b);
+ *pixel = cpu_to_le16(r << 11 | g << 5 | b);
}
+/**
+ * vkms_writeback_row() - Generic loop for all supported writeback format. It is executed just
+ * after the blending to write a line in the writeback buffer.
+ *
+ * @wb: Job where to insert the final image
+ * @src_buffer: Line to write
+ * @y: Row to write in the writeback buffer
+ */
void vkms_writeback_row(struct vkms_writeback_job *wb,
const struct line_buffer *src_buffer, int y)
{
struct vkms_frame_info *frame_info = &wb->wb_frame_info;
int x_dst = frame_info->dst.x1;
- u8 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
+ u8 *dst_pixels;
+ int rem_x, rem_y;
+
+ packed_pixels_addr(frame_info, x_dst, y, 0, &dst_pixels, &rem_x, &rem_y);
struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst), src_buffer->n_pixels);
- for (size_t x = 0; x < x_limit; x++, dst_pixels += frame_info->cpp)
+ for (size_t x = 0; x < x_limit; x++, dst_pixels += frame_info->fb->format->cpp[0])
wb->pixel_write(dst_pixels, &in_pixels[x]);
}
-void *get_pixel_conversion_function(u32 format)
+/**
+ * get_pixel_read_line_function() - Retrieve the correct read_line function for a specific
+ * format. The returned pointer is NULL for unsupported pixel formats. The caller must ensure that
+ * the pointer is valid before using it in a vkms_plane_state.
+ *
+ * @format: DRM_FORMAT_* value for which to obtain a conversion function (see [drm_fourcc.h])
+ */
+pixel_read_line_t get_pixel_read_line_function(u32 format)
{
switch (format) {
case DRM_FORMAT_ARGB8888:
- return &ARGB8888_to_argb_u16;
+ return &ARGB8888_read_line;
case DRM_FORMAT_XRGB8888:
- return &XRGB8888_to_argb_u16;
+ return &XRGB8888_read_line;
case DRM_FORMAT_ARGB16161616:
- return &ARGB16161616_to_argb_u16;
+ return &ARGB16161616_read_line;
case DRM_FORMAT_XRGB16161616:
- return &XRGB16161616_to_argb_u16;
+ return &XRGB16161616_read_line;
case DRM_FORMAT_RGB565:
- return &RGB565_to_argb_u16;
+ return &RGB565_read_line;
default:
- return NULL;
+ /*
+ * This is a bug in vkms_plane_atomic_check(). All the supported
+ * format must:
+ * - Be listed in vkms_formats in vkms_plane.c
+ * - Have a pixel_read callback defined here
+ */
+ pr_err("Pixel format %p4cc is not supported by VKMS planes. This is a kernel bug, atomic check must forbid this configuration.\n",
+ &format);
+ BUG();
}
}
-void *get_pixel_write_function(u32 format)
+/**
+ * get_pixel_write_function() - Retrieve the correct write_pixel function for a specific format.
+ * The returned pointer is NULL for unsupported pixel formats. The caller must ensure that the
+ * pointer is valid before using it in a vkms_writeback_job.
+ *
+ * @format: DRM_FORMAT_* value for which to obtain a conversion function (see [drm_fourcc.h])
+ */
+pixel_write_t get_pixel_write_function(u32 format)
{
switch (format) {
case DRM_FORMAT_ARGB8888:
@@ -259,6 +460,14 @@ void *get_pixel_write_function(u32 format)
case DRM_FORMAT_RGB565:
return &argb_u16_to_RGB565;
default:
- return NULL;
+ /*
+ * This is a bug in vkms_writeback_atomic_check. All the supported
+ * format must:
+ * - Be listed in vkms_wb_formats in vkms_writeback.c
+ * - Have a pixel_write callback defined here
+ */
+ pr_err("Pixel format %p4cc is not supported by VKMS writeback. This is a kernel bug, atomic check must forbid this configuration.\n",
+ &format);
+ BUG();
}
}
diff --git a/drivers/gpu/drm/vkms/vkms_formats.h b/drivers/gpu/drm/vkms/vkms_formats.h
index cf59c2ed8e9a..8d2bef95ff79 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.h
+++ b/drivers/gpu/drm/vkms/vkms_formats.h
@@ -5,8 +5,8 @@
#include "vkms_drv.h"
-void *get_pixel_conversion_function(u32 format);
+pixel_read_line_t get_pixel_read_line_function(u32 format);
-void *get_pixel_write_function(u32 format);
+pixel_write_t get_pixel_write_function(u32 format);
#endif /* _VKMS_FORMATS_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 5ce70dd946aa..8f4bd5aef087 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -21,6 +21,7 @@ static int vkms_conn_get_modes(struct drm_connector *connector)
{
int count;
+ /* Use the default modes list from DRM */
count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
@@ -31,47 +32,30 @@ static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
.get_modes = vkms_conn_get_modes,
};
-static int vkms_add_overlay_plane(struct vkms_device *vkmsdev, int index,
- struct drm_crtc *crtc)
-{
- struct vkms_plane *overlay;
-
- overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY, index);
- if (IS_ERR(overlay))
- return PTR_ERR(overlay);
-
- if (!overlay->base.possible_crtcs)
- overlay->base.possible_crtcs = drm_crtc_mask(crtc);
-
- return 0;
-}
-
-int vkms_output_init(struct vkms_device *vkmsdev, int index)
+int vkms_output_init(struct vkms_device *vkmsdev)
{
struct vkms_output *output = &vkmsdev->output;
struct drm_device *dev = &vkmsdev->drm;
struct drm_connector *connector = &output->connector;
struct drm_encoder *encoder = &output->encoder;
struct drm_crtc *crtc = &output->crtc;
- struct vkms_plane *primary, *cursor = NULL;
+ struct vkms_plane *primary, *overlay, *cursor = NULL;
int ret;
int writeback;
unsigned int n;
- primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index);
+ /*
+ * Initialize used plane. One primary plane is required to perform the composition.
+ *
+ * The overlay and cursor planes are not mandatory, but can be used to perform complex
+ * composition.
+ */
+ primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY);
if (IS_ERR(primary))
return PTR_ERR(primary);
- if (vkmsdev->config->overlay) {
- for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
- ret = vkms_add_overlay_plane(vkmsdev, index, crtc);
- if (ret)
- return ret;
- }
- }
-
if (vkmsdev->config->cursor) {
- cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index);
+ cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR);
if (IS_ERR(cursor))
return PTR_ERR(cursor);
}
@@ -80,11 +64,22 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
if (ret)
return ret;
+ if (vkmsdev->config->overlay) {
+ for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
+ overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY);
+ if (IS_ERR(overlay)) {
+ DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n");
+ return PTR_ERR(overlay);
+ }
+ overlay->base.possible_crtcs = drm_crtc_mask(crtc);
+ }
+ }
+
ret = drm_connector_init(dev, connector, &vkms_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
if (ret) {
DRM_ERROR("Failed to init connector\n");
- goto err_connector;
+ return ret;
}
drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
@@ -95,7 +90,7 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
DRM_ERROR("Failed to init encoder\n");
goto err_encoder;
}
- encoder->possible_crtcs = 1;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_connector_attach_encoder(connector, encoder);
if (ret) {
@@ -119,8 +114,5 @@ err_attach:
err_encoder:
drm_connector_cleanup(connector);
-err_connector:
- drm_crtc_cleanup(crtc);
-
return ret;
}
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index e5c625ab8e3e..e2fce471870f 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -112,23 +112,12 @@ static void vkms_plane_atomic_update(struct drm_plane *plane,
frame_info = vkms_plane_state->frame_info;
memcpy(&frame_info->src, &new_state->src, sizeof(struct drm_rect));
memcpy(&frame_info->dst, &new_state->dst, sizeof(struct drm_rect));
- memcpy(&frame_info->rotated, &new_state->dst, sizeof(struct drm_rect));
frame_info->fb = fb;
memcpy(&frame_info->map, &shadow_plane_state->data, sizeof(frame_info->map));
drm_framebuffer_get(frame_info->fb);
- frame_info->rotation = drm_rotation_simplify(new_state->rotation, DRM_MODE_ROTATE_0 |
- DRM_MODE_ROTATE_90 |
- DRM_MODE_ROTATE_270 |
- DRM_MODE_REFLECT_X |
- DRM_MODE_REFLECT_Y);
-
- drm_rect_rotate(&frame_info->rotated, drm_rect_width(&frame_info->rotated),
- drm_rect_height(&frame_info->rotated), frame_info->rotation);
-
- frame_info->offset = fb->offsets[0];
- frame_info->pitch = fb->pitches[0];
- frame_info->cpp = fb->format->cpp[0];
- vkms_plane_state->pixel_read = get_pixel_conversion_function(fmt);
+ frame_info->rotation = new_state->rotation;
+
+ vkms_plane_state->pixel_read_line = get_pixel_read_line_function(fmt);
}
static int vkms_plane_atomic_check(struct drm_plane *plane,
@@ -198,12 +187,12 @@ static const struct drm_plane_helper_funcs vkms_plane_helper_funcs = {
};
struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
- enum drm_plane_type type, int index)
+ enum drm_plane_type type)
{
struct drm_device *dev = &vkmsdev->drm;
struct vkms_plane *plane;
- plane = drmm_universal_plane_alloc(dev, struct vkms_plane, base, 1 << index,
+ plane = drmm_universal_plane_alloc(dev, struct vkms_plane, base, 0,
&vkms_plane_funcs,
vkms_formats, ARRAY_SIZE(vkms_formats),
NULL, type, NULL);
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index bc724cbd5e3a..79918b44fedd 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -131,8 +131,8 @@ static void vkms_wb_atomic_commit(struct drm_connector *conn,
struct drm_connector_state *conn_state = wb_conn->base.state;
struct vkms_crtc_state *crtc_state = output->composer_state;
struct drm_framebuffer *fb = connector_state->writeback_job->fb;
- u16 crtc_height = crtc_state->base.crtc->mode.vdisplay;
- u16 crtc_width = crtc_state->base.crtc->mode.hdisplay;
+ u16 crtc_height = crtc_state->base.mode.vdisplay;
+ u16 crtc_width = crtc_state->base.mode.hdisplay;
struct vkms_writeback_job *active_wb;
struct vkms_frame_info *wb_frame_info;
u32 wb_format = fb->format->format;
@@ -149,11 +149,6 @@ static void vkms_wb_atomic_commit(struct drm_connector *conn,
crtc_state->active_writeback = active_wb;
crtc_state->wb_pending = true;
spin_unlock_irq(&output->composer_lock);
-
- wb_frame_info->offset = fb->offsets[0];
- wb_frame_info->pitch = fb->pitches[0];
- wb_frame_info->cpp = fb->format->cpp[0];
-
drm_writeback_queue_job(wb_conn, connector_state);
active_wb->pixel_write = get_pixel_write_function(wb_format);
drm_rect_init(&wb_frame_info->src, 0, 0, crtc_width, crtc_height);
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 6f1ac940cbae..6c3c2922ae8b 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -3,6 +3,7 @@ config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
depends on DRM && PCI && MMU
depends on (X86 && HYPERVISOR_GUEST) || ARM64
+ select DRM_CLIENT_SELECTION
select DRM_TTM
select DRM_TTM_HELPER
select MAPPING_DIRTY_HELPERS
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 3353e97687d1..36d46b79562a 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -54,7 +54,7 @@
#include <linux/module.h>
#include <linux/hashtable.h>
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
#define VMW_TTM_OBJECT_REF_HT_ORDER 10
@@ -471,7 +471,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
*/
static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
{
- return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
+ return file_ref_get(&dmabuf->file->f_ref);
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index 890a66a2361f..64bd7d74854e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -635,10 +635,8 @@ out:
kunmap_atomic(d.src_addr);
if (d.dst_addr)
kunmap_atomic(d.dst_addr);
- if (src_pages)
- kvfree(src_pages);
- if (dst_pages)
- kvfree(dst_pages);
+ kvfree(src_pages);
+ kvfree(dst_pages);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index a0e433fbcba6..9b5b8c1f063b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -228,7 +228,6 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_VRAM);
buf->places[0].lpfn = PFN_UP(bo->resource->size);
- buf->busy_places[0].lpfn = PFN_UP(bo->resource->size);
ret = ttm_bo_validate(bo, &buf->placement, &ctx);
/* For some reason we didn't end up at the start of vram */
@@ -443,7 +442,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
if (params->pin)
ttm_bo_pin(&vmw_bo->tbo);
- ttm_bo_unreserve(&vmw_bo->tbo);
+ if (!params->keep_resv)
+ ttm_bo_unreserve(&vmw_bo->tbo);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 43b5439ec9f7..11e330c7c7f5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -56,8 +56,9 @@ struct vmw_bo_params {
u32 domain;
u32 busy_domain;
enum ttm_bo_type bo_type;
- size_t size;
bool pin;
+ bool keep_resv;
+ size_t size;
struct dma_resv *resv;
struct sg_table *sg;
};
@@ -83,7 +84,6 @@ struct vmw_bo {
struct ttm_placement placement;
struct ttm_place places[5];
- struct ttm_place busy_places[5];
/* Protected by reservation */
struct ttm_bo_kmap_obj map;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 2825dd3149ed..0f32471c8533 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -35,7 +35,7 @@
#include "vmwgfx_vkms.h"
#include "ttm_object.h"
-#include <drm/drm_aperture.h>
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem_ttm_helper.h>
@@ -49,6 +49,8 @@
#ifdef CONFIG_X86
#include <asm/hypervisor.h>
#endif
+
+#include <linux/aperture.h>
#include <linux/cc_platform.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
@@ -401,7 +403,8 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_kernel,
.size = PAGE_SIZE,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
/*
@@ -413,10 +416,6 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
- BUG_ON(ret != 0);
- vmw_bo_pin_reserved(vbo, true);
-
ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map);
if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy);
@@ -859,8 +858,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
bool refuse_dma = false;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- dev_priv->drm.dev_private = dev_priv;
-
vmw_sw_context_init(dev_priv);
mutex_init(&dev_priv->cmdbuf_mutex);
@@ -1629,10 +1626,11 @@ static const struct drm_driver driver = {
.prime_handle_to_fd = vmw_prime_handle_to_fd,
.gem_prime_import_sg_table = vmw_prime_import_sg_table,
+ DRM_FBDEV_TTM_DRIVER_OPS,
+
.fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
- .date = VMWGFX_DRIVER_DATE,
.major = VMWGFX_DRIVER_MAJOR,
.minor = VMWGFX_DRIVER_MINOR,
.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
@@ -1653,7 +1651,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct vmw_private *vmw;
int ret;
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
+ ret = aperture_remove_conflicting_pci_devices(pdev, driver.name);
if (ret)
goto out_error;
@@ -1680,7 +1678,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
vmw_fifo_resource_inc(vmw);
vmw_svga_enable(vmw);
- drm_fbdev_ttm_setup(&vmw->drm, 0);
+ drm_client_setup(&vmw->drm, NULL);
vmw_debugfs_gem_init(vmw);
vmw_debugfs_resource_managers_init(vmw);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 3f4719b3c268..5275ef632d4b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -57,12 +57,11 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20211206"
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 20
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
-#define VMWGFX_MAX_DISPLAYS 16
+#define VMWGFX_NUM_DISPLAY_UNITS 8
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
#define VMWGFX_MIN_INITIAL_WIDTH 1280
@@ -82,7 +81,7 @@
#define VMWGFX_NUM_GB_CONTEXT 256
#define VMWGFX_NUM_GB_SHADER 20000
#define VMWGFX_NUM_GB_SURFACE 32768
-#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
+#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_NUM_DISPLAY_UNITS
#define VMWGFX_NUM_DXCONTEXT 256
#define VMWGFX_NUM_DXQUERY 512
#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
@@ -639,7 +638,7 @@ static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
{
- return (struct vmw_private *)dev->dev_private;
+ return container_of(dev, struct vmw_private, drm);
}
static inline struct vmw_private *vmw_priv_from_ttm(struct ttm_device *bdev)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index b9857f37ca1a..ed5015ced392 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -206,6 +206,7 @@ struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
.bo_type = ttm_bo_type_sg,
.size = attach->dmabuf->size,
.pin = false,
+ .keep_resv = true,
.resv = attach->dmabuf->resv,
.sg = table,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 288ed0bb75cb..800a79e035ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -280,7 +280,7 @@ static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
struct vmw_plane_state *vps)
{
- struct vmw_private *dev_priv = vcp->base.dev->dev_private;
+ struct vmw_private *dev_priv = vmw_priv(vcp->base.dev);
u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
u32 i;
u32 cursor_max_dim, mob_max_size;
@@ -519,7 +519,7 @@ void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
u32 i;
- vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
+ vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0);
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
@@ -750,6 +750,7 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
struct vmw_bo *old_bo = NULL;
struct vmw_bo *new_bo = NULL;
+ struct ww_acquire_ctx ctx;
s32 hotspot_x, hotspot_y;
int ret;
@@ -769,9 +770,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
if (du->cursor_surface)
du->cursor_age = du->cursor_surface->snooper.age;
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
if (!vmw_user_object_is_null(&old_vps->uo)) {
old_bo = vmw_user_object_buffer(&old_vps->uo);
- ret = ttm_bo_reserve(&old_bo->tbo, false, false, NULL);
+ ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
if (ret != 0)
return;
}
@@ -779,9 +782,14 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
if (!vmw_user_object_is_null(&vps->uo)) {
new_bo = vmw_user_object_buffer(&vps->uo);
if (old_bo != new_bo) {
- ret = ttm_bo_reserve(&new_bo->tbo, false, false, NULL);
- if (ret != 0)
+ ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ if (old_bo) {
+ ttm_bo_unreserve(&old_bo->tbo);
+ ww_acquire_fini(&ctx);
+ }
return;
+ }
} else {
new_bo = NULL;
}
@@ -803,10 +811,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_x, hotspot_y);
}
- if (old_bo)
- ttm_bo_unreserve(&old_bo->tbo);
if (new_bo)
ttm_bo_unreserve(&new_bo->tbo);
+ if (old_bo)
+ ttm_bo_unreserve(&old_bo->tbo);
+
+ ww_acquire_fini(&ctx);
du->cursor_x = new_state->crtc_x + du->set_gui_x;
du->cursor_y = new_state->crtc_y + du->set_gui_y;
@@ -1265,6 +1275,8 @@ static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb,
struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb);
struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
+ if (WARN_ON(!bo))
+ return -EINVAL;
return drm_gem_handle_create(file_priv, &bo->tbo.base, handle);
}
@@ -1283,7 +1295,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
{
struct drm_device *dev = &dev_priv->drm;
struct vmw_framebuffer_surface *vfbs;
- enum SVGA3dSurfaceFormat format;
struct vmw_surface *surface;
int ret;
@@ -1320,34 +1331,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
return -EINVAL;
}
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_ARGB8888:
- format = SVGA3D_A8R8G8B8;
- break;
- case DRM_FORMAT_XRGB8888:
- format = SVGA3D_X8R8G8B8;
- break;
- case DRM_FORMAT_RGB565:
- format = SVGA3D_R5G6B5;
- break;
- case DRM_FORMAT_XRGB1555:
- format = SVGA3D_A1R5G5B5;
- break;
- default:
- DRM_ERROR("Invalid pixel format: %p4cc\n",
- &mode_cmd->pixel_format);
- return -EINVAL;
- }
-
- /*
- * For DX, surface format validation is done when surface->scanout
- * is set.
- */
- if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
- DRM_ERROR("Invalid surface format for requested mode.\n");
- return -EINVAL;
- }
-
vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
if (!vfbs) {
ret = -ENOMEM;
@@ -1539,6 +1522,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
DRM_ERROR("Surface size cannot exceed %dx%d\n",
dev_priv->texture_max_width,
dev_priv->texture_max_height);
+ ret = -EINVAL;
goto err_out;
}
@@ -2225,7 +2209,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_vmw_update_layout_arg *arg =
(struct drm_vmw_update_layout_arg *)data;
- void __user *user_rects;
+ const void __user *user_rects;
struct drm_vmw_rect *rects;
struct drm_rect *drm_rects;
unsigned rects_size;
@@ -2237,6 +2221,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
VMWGFX_MIN_INITIAL_HEIGHT};
vmw_du_update_layout(dev_priv, 1, &def_rect);
return 0;
+ } else if (arg->num_outputs > VMWGFX_NUM_DISPLAY_UNITS) {
+ return -E2BIG;
}
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 6141fadf81ef..2a6c6d6581e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -199,9 +199,6 @@ struct vmw_kms_dirty {
s32 unit_y2;
};
-#define VMWGFX_NUM_DISPLAY_UNITS 8
-
-
#define vmw_framebuffer_to_vfb(x) \
container_of(x, struct vmw_framebuffer, base)
#define vmw_framebuffer_to_vfbs(x) \
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 39949e0a493f..f0b429525467 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -479,7 +479,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_connector_helper_add(connector, &vmw_ldu_connector_helper_funcs);
- connector->status = vmw_du_connector_detect(connector, true);
ret = drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 0f4bfd98480a..32029d80b72b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -868,7 +868,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs);
- connector->status = vmw_du_connector_detect(connector, true);
ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index a01ca3226d0a..7fb1c88bcc47 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -896,7 +896,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_device,
.size = size,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
if (!vmw_shader_id_ok(user_key, shader_type))
@@ -906,10 +907,6 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto out;
- ret = ttm_bo_reserve(&buf->tbo, false, true, NULL);
- if (unlikely(ret != 0))
- goto no_reserve;
-
/* Map and copy shader bytecode. */
ret = ttm_bo_kmap(&buf->tbo, 0, PFN_UP(size), &map);
if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index fab155a68054..114a75069e1c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -886,6 +886,10 @@ static int vmw_stdu_connector_atomic_check(struct drm_connector *conn,
struct drm_crtc_state *new_crtc_state;
conn_state = drm_atomic_get_connector_state(state, conn);
+
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+
du = vmw_connector_to_stdu(conn);
if (!conn_state->crtc)
@@ -1589,7 +1593,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_connector_helper_add(connector, &vmw_stdu_connector_helper_funcs);
- connector->status = vmw_du_connector_detect(connector, false);
ret = drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 1625b30d9970..5721c74da3e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -2276,9 +2276,12 @@ int vmw_dumb_create(struct drm_file *file_priv,
const struct SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format);
SVGA3dSurfaceAllFlags flags = SVGA3D_SURFACE_HINT_TEXTURE |
SVGA3D_SURFACE_HINT_RENDERTARGET |
- SVGA3D_SURFACE_SCREENTARGET |
- SVGA3D_SURFACE_BIND_SHADER_RESOURCE |
- SVGA3D_SURFACE_BIND_RENDER_TARGET;
+ SVGA3D_SURFACE_SCREENTARGET;
+
+ if (vmw_surface_is_dx_screen_target_format(format)) {
+ flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE |
+ SVGA3D_SURFACE_BIND_RENDER_TARGET;
+ }
/*
* Without mob support we're just going to use raw memory buffer
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 621d98b376bb..5553892d7c3e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -572,15 +572,14 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
.busy_domain = domain,
.bo_type = ttm_bo_type_kernel,
.size = bo_size,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
- BUG_ON(ret != 0);
ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx);
if (likely(ret == 0)) {
struct vmw_ttm_tt *vmw_tt =
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 7bbe46a98ff1..b51a2bde73e2 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -8,12 +8,14 @@ config DRM_XE
select SHMEM
select TMPFS
select DRM_BUDDY
+ select DRM_CLIENT_SELECTION
select DRM_EXEC
select DRM_KMS_HELPER
select DRM_KUNIT_TEST_HELPERS if DRM_XE_KUNIT_TEST != n
select DRM_PANEL
select DRM_SUBALLOC_HELPER
select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_DSC_HELPER
select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
@@ -49,7 +51,7 @@ config DRM_XE
config DRM_XE_DISPLAY
bool "Enable display support"
- depends on DRM_XE && DRM_XE=m
+ depends on DRM_XE && DRM_XE=m && HAS_IOPORT
select FB_IOMEM_HELPERS
select I2C
select I2C_ALGOBIT
diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug
index bc177368af6c..0d749ed44878 100644
--- a/drivers/gpu/drm/xe/Kconfig.debug
+++ b/drivers/gpu/drm/xe/Kconfig.debug
@@ -40,9 +40,21 @@ config DRM_XE_DEBUG_VM
If in doubt, say "N".
+config DRM_XE_DEBUG_MEMIRQ
+ bool "Enable extra memirq debugging"
+ default n
+ help
+ Choose this option to enable additional debugging info for
+ memory based interrupts.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_XE_DEBUG_SRIOV
bool "Enable extra SR-IOV debugging"
default n
+ select DRM_XE_DEBUG_MEMIRQ
help
Enable extra SR-IOV debugging info.
@@ -54,7 +66,7 @@ config DRM_XE_DEBUG_MEM
bool "Enable passing SYS/VRAM addresses to user space"
default n
help
- Pass object location trough uapi. Intended for extended
+ Pass object location through uapi. Intended for extended
testing and development only.
Recommended for driver developers only.
@@ -92,5 +104,5 @@ config DRM_XE_USERPTR_INVAL_INJECT
Choose this option when debugging error paths that
are hit during checks for userptr invalidations.
- Recomended for driver developers only.
+ Recommended for driver developers only.
If in doubt, say "N".
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index edfd812e0f41..5c97ad6ed738 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -56,6 +56,7 @@ xe-y += xe_bb.o \
xe_gt_topology.o \
xe_guc.o \
xe_guc_ads.o \
+ xe_guc_capture.o \
xe_guc_ct.o \
xe_guc_db_mgr.o \
xe_guc_hwconfig.o \
@@ -100,6 +101,7 @@ xe-y += xe_bb.o \
xe_trace.o \
xe_trace_bo.o \
xe_trace_guc.o \
+ xe_trace_lrc.o \
xe_ttm_sys_mgr.o \
xe_ttm_stolen_mgr.o \
xe_ttm_vram_mgr.o \
@@ -109,6 +111,7 @@ xe-y += xe_bb.o \
xe_vm.o \
xe_vram.o \
xe_vram_freq.o \
+ xe_vsec.o \
xe_wait_user_fence.o \
xe_wa.o \
xe_wopcm.o
@@ -123,12 +126,14 @@ xe-y += \
xe_gt_sriov_vf.o \
xe_guc_relay.o \
xe_memirq.o \
- xe_sriov.o
+ xe_sriov.o \
+ xe_sriov_vf.o
xe-$(CONFIG_PCI_IOV) += \
xe_gt_sriov_pf.o \
xe_gt_sriov_pf_config.o \
xe_gt_sriov_pf_control.o \
+ xe_gt_sriov_pf_migration.o \
xe_gt_sriov_pf_monitor.o \
xe_gt_sriov_pf_policy.o \
xe_gt_sriov_pf_service.o \
@@ -148,7 +153,6 @@ subdir-ccflags-$(CONFIG_DRM_XE_DISPLAY) += \
-I$(src)/display/ext \
-I$(src)/compat-i915-headers \
-I$(srctree)/drivers/gpu/drm/i915/display/ \
- -Ddrm_i915_gem_object=xe_bo \
-Ddrm_i915_private=xe_device
# Rule to build SOC code shared with i915
@@ -165,6 +169,7 @@ $(obj)/i915-display/%.o: $(srctree)/drivers/gpu/drm/i915/display/%.c FORCE
xe-$(CONFIG_DRM_XE_DISPLAY) += \
display/ext/i915_irq.o \
display/ext/i915_utils.o \
+ display/intel_bo.o \
display/intel_fb_bo.o \
display/intel_fbdev_fb.o \
display/xe_display.o \
@@ -180,7 +185,8 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
# SOC code shared with i915
xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-soc/intel_dram.o \
- i915-soc/intel_pch.o
+ i915-soc/intel_pch.o \
+ i915-soc/intel_rom.o
# Display code shared with i915
xe-$(CONFIG_DRM_XE_DISPLAY) += \
@@ -203,6 +209,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_ddi.o \
i915-display/intel_ddi_buf_trans.o \
i915-display/intel_display.o \
+ i915-display/intel_display_conversion.o \
i915-display/intel_display_device.o \
i915-display/intel_display_driver.o \
i915-display/intel_display_irq.o \
@@ -220,6 +227,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_dp_hdcp.o \
i915-display/intel_dp_link_training.o \
i915-display/intel_dp_mst.o \
+ i915-display/intel_dp_test.o \
i915-display/intel_dpll.o \
i915-display/intel_dpll_mgr.o \
i915-display/intel_dpt_common.o \
@@ -248,6 +256,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_modeset_setup.o \
i915-display/intel_modeset_verify.o \
i915-display/intel_panel.o \
+ i915-display/intel_pfit.o \
i915-display/intel_pmdemand.o \
i915-display/intel_pps.o \
i915-display/intel_psr.o \
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index 43ad4652c2b2..fee385532fb0 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -134,6 +134,8 @@ enum xe_guc_action {
XE_GUC_ACTION_DEREGISTER_CONTEXT = 0x4503,
XE_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
XE_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
+ XE_GUC_ACTION_REGISTER_G2G = 0x4507,
+ XE_GUC_ACTION_DEREGISTER_G2G = 0x4508,
XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
XE_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
@@ -176,6 +178,14 @@ enum xe_guc_sleep_state_status {
#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8)
+enum xe_guc_state_capture_event_status {
+ XE_GUC_STATE_CAPTURE_EVENT_STATUS_SUCCESS = 0x0,
+ XE_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE = 0x1,
+};
+
+#define XE_GUC_STATE_CAPTURE_EVENT_STATUS_MASK 0x000000FF
+#define XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION_DATA_LEN 1
+
#define XE_GUC_TLB_INVAL_TYPE_SHIFT 0
#define XE_GUC_TLB_INVAL_MODE_SHIFT 8
/* Flush PPC or SMRO caches along with TLB invalidation request */
@@ -210,4 +220,22 @@ enum xe_guc_tlb_inval_mode {
XE_GUC_TLB_INVAL_MODE_LITE = 0x1,
};
+/*
+ * GuC to GuC communication (de-)registration fields:
+ */
+enum xe_guc_g2g_type {
+ XE_G2G_TYPE_IN = 0x0,
+ XE_G2G_TYPE_OUT,
+ XE_G2G_TYPE_LIMIT,
+};
+
+#define XE_G2G_REGISTER_DEVICE REG_GENMASK(16, 16)
+#define XE_G2G_REGISTER_TILE REG_GENMASK(15, 12)
+#define XE_G2G_REGISTER_TYPE REG_GENMASK(11, 8)
+#define XE_G2G_REGISTER_SIZE REG_GENMASK(7, 0)
+
+#define XE_G2G_DEREGISTER_DEVICE REG_GENMASK(16, 16)
+#define XE_G2G_DEREGISTER_TILE REG_GENMASK(15, 12)
+#define XE_G2G_DEREGISTER_TYPE REG_GENMASK(11, 8)
+
#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
index 181180f5945c..0b28659d94e9 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
@@ -502,6 +502,44 @@
#define VF2GUC_VF_RESET_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0
/**
+ * DOC: VF2GUC_NOTIFY_RESFIX_DONE
+ *
+ * This action is used by VF to notify the GuC that the VF KMD has completed
+ * post-migration recovery steps.
+ *
+ * This message must be sent as `MMIO HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | DATA0 = MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_VF2GUC_NOTIFY_RESFIX_DONE` = 0x5508 |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | DATA0 = MBZ |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_VF2GUC_NOTIFY_RESFIX_DONE 0x5508u
+
+#define VF2GUC_NOTIFY_RESFIX_DONE_REQUEST_MSG_LEN GUC_HXG_REQUEST_MSG_MIN_LEN
+#define VF2GUC_NOTIFY_RESFIX_DONE_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+
+#define VF2GUC_NOTIFY_RESFIX_DONE_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define VF2GUC_NOTIFY_RESFIX_DONE_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0
+
+/**
* DOC: VF2GUC_QUERY_SINGLE_KLV
*
* This action is used by VF to query value of the single KLV data.
@@ -557,4 +595,65 @@
#define VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_2_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn
#define VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_3_VALUE96 GUC_HXG_REQUEST_MSG_n_DATAn
+/**
+ * DOC: PF2GUC_SAVE_RESTORE_VF
+ *
+ * This message is used by the PF to migrate VF info state maintained by the GuC.
+ *
+ * This message must be sent as `CTB HXG Message`_.
+ *
+ * Available since GuC version 70.25.0
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | DATA0 = **OPCODE** - operation to take: |
+ * | | | |
+ * | | | - _`GUC_PF_OPCODE_VF_SAVE` = 0 |
+ * | | | - _`GUC_PF_OPCODE_VF_RESTORE` = 1 |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_PF2GUC_SAVE_RESTORE_VF` = 0x550B |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **VFID** - VF identifier |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | **ADDR_LO** - lower 32-bits of GGTT offset to the buffer |
+ * | | | where the VF info will be save to or restored from. |
+ * +---+-------+--------------------------------------------------------------+
+ * | 3 | 31:0 | **ADDR_HI** - upper 32-bits of GGTT offset to the buffer |
+ * | | | where the VF info will be save to or restored from. |
+ * +---+-------+--------------------------------------------------------------+
+ * | 4 | 27:0 | **SIZE** - size of the buffer (in dwords) |
+ * | +-------+--------------------------------------------------------------+
+ * | | 31:28 | MBZ |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | DATA0 = **USED** - size of used buffer space (in dwords) |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_PF2GUC_SAVE_RESTORE_VF 0x550Bu
+
+#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 4u)
+#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_0_OPCODE GUC_HXG_EVENT_MSG_0_DATA0
+#define GUC_PF_OPCODE_VF_SAVE 0u
+#define GUC_PF_OPCODE_VF_RESTORE 1u
+#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_1_VFID GUC_HXG_EVENT_MSG_n_DATAn
+#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_2_ADDR_LO GUC_HXG_EVENT_MSG_n_DATAn
+#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_3_ADDR_HI GUC_HXG_EVENT_MSG_n_DATAn
+#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_4_SIZE (0xfffffffu << 0)
+#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_4_MBZ (0xfu << 28)
+
+#define PF2GUC_SAVE_RESTORE_VF_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define PF2GUC_SAVE_RESTORE_VF_RESPONSE_MSG_0_USED GUC_HXG_RESPONSE_MSG_0_DATA0
+
#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_capture_abi.h b/drivers/gpu/drm/xe/abi/guc_capture_abi.h
new file mode 100644
index 000000000000..dd4117553739
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/guc_capture_abi.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_CAPTURE_ABI_H
+#define _ABI_GUC_CAPTURE_ABI_H
+
+#include <linux/types.h>
+
+/* Capture List Index */
+enum guc_capture_list_index_type {
+ GUC_CAPTURE_LIST_INDEX_PF = 0,
+ GUC_CAPTURE_LIST_INDEX_VF = 1,
+};
+
+#define GUC_CAPTURE_LIST_INDEX_MAX (GUC_CAPTURE_LIST_INDEX_VF + 1)
+
+/* Register-types of GuC capture register lists */
+enum guc_state_capture_type {
+ GUC_STATE_CAPTURE_TYPE_GLOBAL = 0,
+ GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS,
+ GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE
+};
+
+#define GUC_STATE_CAPTURE_TYPE_MAX (GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE + 1)
+
+/* Class indices for capture_class and capture_instance arrays */
+enum guc_capture_list_class_type {
+ GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE = 0,
+ GUC_CAPTURE_LIST_CLASS_VIDEO = 1,
+ GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE = 2,
+ GUC_CAPTURE_LIST_CLASS_BLITTER = 3,
+ GUC_CAPTURE_LIST_CLASS_GSC_OTHER = 4,
+};
+
+#define GUC_CAPTURE_LIST_CLASS_MAX (GUC_CAPTURE_LIST_CLASS_GSC_OTHER + 1)
+
+/**
+ * struct guc_mmio_reg - GuC MMIO reg state struct
+ *
+ * GuC MMIO reg state struct
+ */
+struct guc_mmio_reg {
+ /** @offset: MMIO Offset - filled in by Host */
+ u32 offset;
+ /** @value: MMIO Value - Used by Firmware to store value */
+ u32 value;
+ /** @flags: Flags for accessing the MMIO */
+ u32 flags;
+ /** @mask: Value of a mask to apply if mask with value is set */
+ u32 mask;
+#define GUC_REGSET_MASKED BIT(0)
+#define GUC_REGSET_STEERING_NEEDED BIT(1)
+#define GUC_REGSET_MASKED_WITH_VALUE BIT(2)
+#define GUC_REGSET_RESTORE_ONLY BIT(3)
+#define GUC_REGSET_STEERING_GROUP GENMASK(16, 12)
+#define GUC_REGSET_STEERING_INSTANCE GENMASK(23, 20)
+} __packed;
+
+/**
+ * struct guc_mmio_reg_set - GuC register sets
+ *
+ * GuC register sets
+ */
+struct guc_mmio_reg_set {
+ /** @address: register address */
+ u32 address;
+ /** @count: register count */
+ u16 count;
+ /** @reserved: reserved */
+ u16 reserved;
+} __packed;
+
+/**
+ * struct guc_debug_capture_list_header - Debug capture list header.
+ *
+ * Debug capture list header.
+ */
+struct guc_debug_capture_list_header {
+ /** @info: contains number of MMIO descriptors in the capture list. */
+ u32 info;
+#define GUC_CAPTURELISTHDR_NUMDESCR GENMASK(15, 0)
+} __packed;
+
+/**
+ * struct guc_debug_capture_list - Debug capture list
+ *
+ * As part of ADS registration, these header structures (followed by
+ * an array of 'struct guc_mmio_reg' entries) are used to register with
+ * GuC microkernel the list of registers we want it to dump out prior
+ * to a engine reset.
+ */
+struct guc_debug_capture_list {
+ /** @header: Debug capture list header. */
+ struct guc_debug_capture_list_header header;
+ /** @regs: MMIO descriptors in the capture list. */
+ struct guc_mmio_reg regs[];
+} __packed;
+
+/**
+ * struct guc_state_capture_header_t - State capture header.
+ *
+ * Prior to resetting engines that have hung or faulted, GuC microkernel
+ * reports the engine error-state (register values that was read) by
+ * logging them into the shared GuC log buffer using these hierarchy
+ * of structures.
+ */
+struct guc_state_capture_header_t {
+ /**
+ * @owner: VFID
+ * BR[ 7: 0] MBZ when SRIOV is disabled. When SRIOV is enabled
+ * VFID is an integer in range [0, 63] where 0 means the state capture
+ * is corresponding to the PF and an integer N in range [1, 63] means
+ * the state capture is for VF N.
+ */
+ u32 owner;
+#define GUC_STATE_CAPTURE_HEADER_VFID GENMASK(7, 0)
+ /** @info: Engine class/instance and capture type info */
+ u32 info;
+#define GUC_STATE_CAPTURE_HEADER_CAPTURE_TYPE GENMASK(3, 0) /* see guc_state_capture_type */
+#define GUC_STATE_CAPTURE_HEADER_ENGINE_CLASS GENMASK(7, 4) /* see guc_capture_list_class_type */
+#define GUC_STATE_CAPTURE_HEADER_ENGINE_INSTANCE GENMASK(11, 8)
+ /**
+ * @lrca: logical ring context address.
+ * if type-instance, LRCA (address) that hung, else set to ~0
+ */
+ u32 lrca;
+ /**
+ * @guc_id: context_index.
+ * if type-instance, context index of hung context, else set to ~0
+ */
+ u32 guc_id;
+ /** @num_mmio_entries: Number of captured MMIO entries. */
+ u32 num_mmio_entries;
+#define GUC_STATE_CAPTURE_HEADER_NUM_MMIO_ENTRIES GENMASK(9, 0)
+} __packed;
+
+/**
+ * struct guc_state_capture_t - State capture.
+ *
+ * State capture
+ */
+struct guc_state_capture_t {
+ /** @header: State capture header. */
+ struct guc_state_capture_header_t header;
+ /** @mmio_entries: Array of captured guc_mmio_reg entries. */
+ struct guc_mmio_reg mmio_entries[];
+} __packed;
+
+/* State Capture Group Type */
+enum guc_state_capture_group_type {
+ GUC_STATE_CAPTURE_GROUP_TYPE_FULL = 0,
+ GUC_STATE_CAPTURE_GROUP_TYPE_PARTIAL
+};
+
+#define GUC_STATE_CAPTURE_GROUP_TYPE_MAX (GUC_STATE_CAPTURE_GROUP_TYPE_PARTIAL + 1)
+
+/**
+ * struct guc_state_capture_group_header_t - State capture group header
+ *
+ * State capture group header.
+ */
+struct guc_state_capture_group_header_t {
+ /** @owner: VFID */
+ u32 owner;
+#define GUC_STATE_CAPTURE_GROUP_HEADER_VFID GENMASK(7, 0)
+ /** @info: Engine class/instance and capture type info */
+ u32 info;
+#define GUC_STATE_CAPTURE_GROUP_HEADER_NUM_CAPTURES GENMASK(7, 0)
+#define GUC_STATE_CAPTURE_GROUP_HEADER_CAPTURE_GROUP_TYPE GENMASK(15, 8)
+} __packed;
+
+/**
+ * struct guc_state_capture_group_t - State capture group.
+ *
+ * this is the top level structure where an error-capture dump starts
+ */
+struct guc_state_capture_group_t {
+ /** @grp_header: State capture group header. */
+ struct guc_state_capture_group_header_t grp_header;
+ /** @capture_entries: Array of state captures */
+ struct guc_state_capture_t capture_entries[];
+} __packed;
+
+#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
index 8f86a16dc577..f58198cf2cf6 100644
--- a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
@@ -52,6 +52,7 @@ struct guc_ct_buffer_desc {
#define GUC_CTB_STATUS_OVERFLOW (1 << 0)
#define GUC_CTB_STATUS_UNDERFLOW (1 << 1)
#define GUC_CTB_STATUS_MISMATCH (1 << 2)
+#define GUC_CTB_STATUS_DISABLED (1 << 3)
u32 reserved[13];
} __packed;
static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index 6b30743a2f6c..d633f1c739e4 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -132,7 +132,7 @@ enum {
* _`GUC_KLV_VGT_POLICY_SCHED_IF_IDLE` : 0x8001
* This config sets whether strict scheduling is enabled whereby any VF
* that doesn’t have work to submit is still allocated a fixed execution
- * time-slice to ensure active VFs execution is always consitent even
+ * time-slice to ensure active VFs execution is always consistent even
* during other VF reprovisiong / rebooting events. Changing this KLV
* impacts all VFs and takes effect on the next VF-Switch event.
*
@@ -207,7 +207,7 @@ enum {
* of and this will never be perfectly-exact (accumulated nano-second
* granularity) since the GPUs clock time runs off a different crystal
* from the CPUs clock. Changing this KLV on a VF that is currently
- * running a context wont take effect until a new context is scheduled in.
+ * running a context won't take effect until a new context is scheduled in.
* That said, when the PF is changing this value from 0x0 to
* a non-zero value, it might never take effect if the VF is running an
* infinitely long compute or shader kernel. In such a scenario, the
@@ -227,7 +227,7 @@ enum {
* HW is capable and this will never be perfectly-exact (accumulated
* nano-second granularity) since the GPUs clock time runs off a
* different crystal from the CPUs clock. Changing this KLV on a VF
- * that is currently running a context wont take effect until a new
+ * that is currently running a context won't take effect until a new
* context is scheduled in.
* That said, when the PF is changing this value from 0x0 to
* a non-zero value, it might never take effect if the VF is running an
@@ -291,6 +291,14 @@ enum {
*
* :0: (default)
* :1-65535: number of contexts (Gen12)
+ *
+ * _`GUC_KLV_VF_CFG_SCHED_PRIORITY` : 0x8A0C
+ * This config controls VF’s scheduling priority.
+ *
+ * :0: LOW = schedule VF only if it has active work (default)
+ * :1: NORMAL = schedule VF always, irrespective of whether it has work or not
+ * :2: HIGH = schedule VF in the next time-slice after current active
+ * time-slice completes if it has active work
*/
#define GUC_KLV_VF_CFG_GGTT_START_KEY 0x0001
@@ -343,6 +351,12 @@ enum {
#define GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_KEY 0x8a0b
#define GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_LEN 1u
+#define GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY 0x8a0c
+#define GUC_KLV_VF_CFG_SCHED_PRIORITY_LEN 1u
+#define GUC_SCHED_PRIORITY_LOW 0u
+#define GUC_SCHED_PRIORITY_NORMAL 1u
+#define GUC_SCHED_PRIORITY_HIGH 2u
+
/*
* Workaround keys:
*/
@@ -352,6 +366,7 @@ enum xe_guc_klv_ids {
GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE = 0x9007,
GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE = 0x9008,
GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET = 0x9009,
+ GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO = 0x900a,
};
#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_log_abi.h b/drivers/gpu/drm/xe/abi/guc_log_abi.h
new file mode 100644
index 000000000000..554630b7ccd9
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/guc_log_abi.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_LOG_ABI_H
+#define _ABI_GUC_LOG_ABI_H
+
+#include <linux/types.h>
+
+/* GuC logging buffer types */
+enum guc_log_buffer_type {
+ GUC_LOG_BUFFER_CRASH_DUMP,
+ GUC_LOG_BUFFER_DEBUG,
+ GUC_LOG_BUFFER_CAPTURE,
+};
+
+#define GUC_LOG_BUFFER_TYPE_MAX 3
+
+/**
+ * struct guc_log_buffer_state - GuC log buffer state
+ *
+ * Below state structure is used for coordination of retrieval of GuC firmware
+ * logs. Separate state is maintained for each log buffer type.
+ * read_ptr points to the location where Xe read last in log buffer and
+ * is read only for GuC firmware. write_ptr is incremented by GuC with number
+ * of bytes written for each log entry and is read only for Xe.
+ * When any type of log buffer becomes half full, GuC sends a flush interrupt.
+ * GuC firmware expects that while it is writing to 2nd half of the buffer,
+ * first half would get consumed by Host and then get a flush completed
+ * acknowledgment from Host, so that it does not end up doing any overwrite
+ * causing loss of logs. So when buffer gets half filled & Xe has requested
+ * for interrupt, GuC will set flush_to_file field, set the sampled_write_ptr
+ * to the value of write_ptr and raise the interrupt.
+ * On receiving the interrupt Xe should read the buffer, clear flush_to_file
+ * field and also update read_ptr with the value of sample_write_ptr, before
+ * sending an acknowledgment to GuC. marker & version fields are for internal
+ * usage of GuC and opaque to Xe. buffer_full_cnt field is incremented every
+ * time GuC detects the log buffer overflow.
+ */
+struct guc_log_buffer_state {
+ /** @marker: buffer state start marker */
+ u32 marker[2];
+ /** @read_ptr: the last byte offset that was read by KMD previously */
+ u32 read_ptr;
+ /**
+ * @write_ptr: the next byte offset location that will be written by
+ * GuC
+ */
+ u32 write_ptr;
+ /** @size: Log buffer size */
+ u32 size;
+ /**
+ * @sampled_write_ptr: Log buffer write pointer
+ * This is written by GuC to the byte offset of the next free entry in
+ * the buffer on log buffer half full or state capture notification
+ */
+ u32 sampled_write_ptr;
+ /**
+ * @wrap_offset: wraparound offset
+ * This is the byte offset of location 1 byte after last valid guc log
+ * event entry written by Guc firmware before there was a wraparound.
+ * This field is updated by guc firmware and should be used by Host
+ * when copying buffer contents to file.
+ */
+ u32 wrap_offset;
+ /** @flags: Flush to file flag and buffer full count */
+ u32 flags;
+#define GUC_LOG_BUFFER_STATE_FLUSH_TO_FILE GENMASK(0, 0)
+#define GUC_LOG_BUFFER_STATE_BUFFER_FULL_CNT GENMASK(4, 1)
+ /** @version: The Guc-Log-Entry format version */
+ u32 version;
+} __packed;
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h
deleted file mode 100644
index 710cecca972d..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h
+++ /dev/null
@@ -1 +0,0 @@
-/* Empty */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h
deleted file mode 100644
index 650ea2803a97..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _I915_GEM_MMAN_H_
-#define _I915_GEM_MMAN_H_
-
-#include "xe_bo_types.h"
-#include <drm/drm_prime.h>
-
-static inline int i915_gem_fb_mmap(struct xe_bo *bo, struct vm_area_struct *vma)
-{
- return drm_gem_prime_mmap(&bo->ttm.base, vma);
-}
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
deleted file mode 100644
index 777c20ceabab..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#ifndef _I915_GEM_OBJECT_H_
-#define _I915_GEM_OBJECT_H_
-
-#include <linux/types.h>
-
-#include "xe_bo.h"
-
-#define i915_gem_object_is_shmem(obj) (0) /* We don't use shmem */
-
-static inline dma_addr_t i915_gem_object_get_dma_address(const struct xe_bo *bo, pgoff_t n)
-{
- /* Should never be called */
- WARN_ON(1);
- return n;
-}
-
-static inline bool i915_gem_object_is_tiled(const struct xe_bo *bo)
-{
- /* legacy tiling is unused */
- return false;
-}
-
-static inline bool i915_gem_object_is_userptr(const struct xe_bo *bo)
-{
- /* legacy tiling is unused */
- return false;
-}
-
-static inline int i915_gem_object_read_from_page(struct xe_bo *bo,
- u32 ofs, u64 *ptr, u32 size)
-{
- struct ttm_bo_kmap_obj map;
- void *src;
- bool is_iomem;
- int ret;
-
- ret = xe_bo_lock(bo, true);
- if (ret)
- return ret;
-
- ret = ttm_bo_kmap(&bo->ttm, ofs >> PAGE_SHIFT, 1, &map);
- if (ret)
- goto out_unlock;
-
- ofs &= ~PAGE_MASK;
- src = ttm_kmap_obj_virtual(&map, &is_iomem);
- src += ofs;
- if (is_iomem)
- memcpy_fromio(ptr, (void __iomem *)src, size);
- else
- memcpy(ptr, src, size);
-
- ttm_bo_kunmap(&map);
-out_unlock:
- xe_bo_unlock(bo);
- return ret;
-}
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h
deleted file mode 100644
index 2a3f12d2978c..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#ifndef _I915_GEM_OBJECT_FRONTBUFFER_H_
-#define _I915_GEM_OBJECT_FRONTBUFFER_H_
-
-#define i915_gem_object_get_frontbuffer(obj) NULL
-#define i915_gem_object_set_frontbuffer(obj, front) (front)
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_types.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_types.h
deleted file mode 100644
index 7d6bb1abab73..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_types.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/* Copyright © 2024 Intel Corporation */
-
-#ifndef __I915_GEM_OBJECT_TYPES_H__
-#define __I915_GEM_OBJECT_TYPES_H__
-
-#include "xe_bo.h"
-
-#define to_intel_bo(x) gem_to_xe_bo((x))
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
index cb6c7598824b..9c4cf050059a 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
@@ -29,7 +29,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe),
NULL, size, start, end,
- ttm_bo_type_kernel, flags);
+ ttm_bo_type_kernel, flags, 0);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
bo = NULL;
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h
deleted file mode 100644
index b4c47617b64b..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __I915_DEBUGFS_H__
-#define __I915_DEBUGFS_H__
-
-struct drm_i915_gem_object;
-struct seq_file;
-
-static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {}
-
-#endif /* __I915_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index f27a2c75b56d..84b0991b35b3 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -14,6 +14,7 @@
#include "i915_utils.h"
#include "intel_runtime_pm.h"
+#include "xe_device.h" /* for xe_device_has_flat_ccs() */
#include "xe_device_types.h"
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
@@ -66,19 +67,14 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
#define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_METEORLAKE)
#define IS_LUNARLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_LUNARLAKE)
#define IS_BATTLEMAGE(dev_priv) IS_PLATFORM(dev_priv, XE_BATTLEMAGE)
+#define IS_PANTHERLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_PANTHERLAKE)
#define IS_HASWELL_ULT(dev_priv) (dev_priv && 0)
#define IS_BROADWELL_ULT(dev_priv) (dev_priv && 0)
#define IS_BROADWELL_ULX(dev_priv) (dev_priv && 0)
-#define IP_VER(ver, rel) ((ver) << 8 | (rel))
-
#define IS_MOBILE(xe) (xe && 0)
-#define IS_LP(xe) ((xe) && 0)
-#define IS_GEN9_LP(xe) ((xe) && 0)
-#define IS_GEN9_BC(xe) ((xe) && 0)
-
#define IS_TIGERLAKE_UY(xe) (xe && 0)
#define IS_COMETLAKE_ULX(xe) (xe && 0)
#define IS_COFFEELAKE_ULX(xe) (xe && 0)
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h
deleted file mode 100644
index 98e9dd78f670..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _I915_GPU_ERROR_H_
-#define _I915_GPU_ERROR_H_
-
-struct drm_i915_error_state_buf;
-
-__printf(2, 3)
-static inline void
-i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
-{
-}
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h
index 8c7b315aa8ac..274042bff1be 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h
@@ -20,18 +20,26 @@ static inline void enable_rpm_wakeref_asserts(void *rpm)
{
}
+static inline bool
+intel_runtime_pm_suspended(struct xe_runtime_pm *pm)
+{
+ struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
+
+ return pm_runtime_suspended(xe->drm.dev);
+}
+
static inline intel_wakeref_t intel_runtime_pm_get(struct xe_runtime_pm *pm)
{
struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
- return xe_pm_runtime_resume_and_get(xe);
+ return xe_pm_runtime_resume_and_get(xe) ? INTEL_WAKEREF_DEF : NULL;
}
static inline intel_wakeref_t intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm)
{
struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
- return xe_pm_runtime_get_if_in_use(xe);
+ return xe_pm_runtime_get_if_in_use(xe) ? INTEL_WAKEREF_DEF : NULL;
}
static inline intel_wakeref_t intel_runtime_pm_get_noresume(struct xe_runtime_pm *pm)
@@ -39,7 +47,8 @@ static inline intel_wakeref_t intel_runtime_pm_get_noresume(struct xe_runtime_pm
struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
xe_pm_runtime_get_noresume(xe);
- return true;
+
+ return INTEL_WAKEREF_DEF;
}
static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm)
@@ -62,6 +71,6 @@ static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, intel_wakeref_
#define with_intel_runtime_pm(rpm, wf) \
for ((wf) = intel_runtime_pm_get(rpm); (wf); \
- intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
+ intel_runtime_pm_put((rpm), (wf)), (wf) = NULL)
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index eb5b5f0e4bd9..4fc3e535de91 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -10,11 +10,16 @@
#include "xe_device_types.h"
#include "xe_mmio.h"
-static inline struct xe_gt *__compat_uncore_to_gt(struct intel_uncore *uncore)
+static inline struct intel_uncore *to_intel_uncore(struct drm_device *drm)
+{
+ return &to_xe_device(drm)->uncore;
+}
+
+static inline struct xe_mmio *__compat_uncore_to_mmio(struct intel_uncore *uncore)
{
struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
- return xe_root_mmio_gt(xe);
+ return xe_root_tile_mmio(xe);
}
static inline struct xe_tile *__compat_uncore_to_tile(struct intel_uncore *uncore)
@@ -29,7 +34,7 @@ static inline u32 intel_uncore_read(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
}
static inline u8 intel_uncore_read8(struct intel_uncore *uncore,
@@ -37,7 +42,7 @@ static inline u8 intel_uncore_read8(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read8(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read8(__compat_uncore_to_mmio(uncore), reg);
}
static inline u16 intel_uncore_read16(struct intel_uncore *uncore,
@@ -45,7 +50,7 @@ static inline u16 intel_uncore_read16(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read16(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read16(__compat_uncore_to_mmio(uncore), reg);
}
static inline u64
@@ -57,11 +62,11 @@ intel_uncore_read64_2x32(struct intel_uncore *uncore,
u32 upper, lower, old_upper;
int loop = 0;
- upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
+ upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg);
do {
old_upper = upper;
- lower = xe_mmio_read32(__compat_uncore_to_gt(uncore), lower_reg);
- upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
+ lower = xe_mmio_read32(__compat_uncore_to_mmio(uncore), lower_reg);
+ upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg);
} while (upper != old_upper && loop++ < 2);
return (u64)upper << 32 | lower;
@@ -72,7 +77,7 @@ static inline void intel_uncore_posting_read(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+ xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
}
static inline void intel_uncore_write(struct intel_uncore *uncore,
@@ -80,7 +85,7 @@ static inline void intel_uncore_write(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
+ xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
}
static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
@@ -88,7 +93,7 @@ static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_rmw32(__compat_uncore_to_gt(uncore), reg, clear, set);
+ return xe_mmio_rmw32(__compat_uncore_to_mmio(uncore), reg, clear, set);
}
static inline int intel_wait_for_register(struct intel_uncore *uncore,
@@ -97,7 +102,7 @@ static inline int intel_wait_for_register(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
+ return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
timeout * USEC_PER_MSEC, NULL, false);
}
@@ -107,7 +112,7 @@ static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
+ return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
timeout * USEC_PER_MSEC, NULL, false);
}
@@ -117,10 +122,19 @@ __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
unsigned int slow_timeout_ms, u32 *out_value)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+ bool atomic;
- return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
+ /*
+ * Replicate the behavior from i915 here, in which sleep is not
+ * performed if slow_timeout_ms == 0. This is necessary because
+ * of some paths in display code where waits are done in atomic
+ * context.
+ */
+ atomic = !slow_timeout_ms && fast_timeout_us > 0;
+
+ return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
fast_timeout_us + 1000 * slow_timeout_ms,
- out_value, false);
+ out_value, atomic);
}
static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
@@ -128,7 +142,7 @@ static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
}
static inline void intel_uncore_write_fw(struct intel_uncore *uncore,
@@ -136,7 +150,7 @@ static inline void intel_uncore_write_fw(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
+ xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
}
static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore,
@@ -144,7 +158,7 @@ static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
}
static inline void intel_uncore_write_notrace(struct intel_uncore *uncore,
@@ -152,33 +166,9 @@ static inline void intel_uncore_write_notrace(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
-}
-
-static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)
-{
- struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
-
- return xe_device_get_root_tile(xe)->mmio.regs;
+ xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
}
-/*
- * The raw_reg_{read,write} macros are intended as a micro-optimization for
- * interrupt handlers so that the pointer indirection on uncore->regs can
- * be computed once (and presumably cached in a register) instead of generating
- * extra load instructions for each MMIO access.
- *
- * Given that these macros are only intended for non-GSI interrupt registers
- * (and the goal is to avoid extra instructions generated by the compiler),
- * these macros do not account for uncore->gsi_offset. Any caller that needs
- * to use these macros on a GSI register is responsible for adding the
- * appropriate GSI offset to the 'base' parameter.
- */
-#define raw_reg_read(base, reg) \
- readl(base + i915_mmio_reg_offset(reg))
-#define raw_reg_write(base, reg, value) \
- writel(value, base + i915_mmio_reg_offset(reg))
-
#define intel_uncore_forcewake_get(x, y) do { } while (0)
#define intel_uncore_forcewake_put(x, y) do { } while (0)
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore_trace.h
index d429d421ac70..d429d421ac70 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore_trace.h
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h
index ecb1c0707706..2a32faea9db5 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h
@@ -5,4 +5,6 @@
#include <linux/types.h>
-typedef unsigned long intel_wakeref_t;
+typedef struct ref_tracker *intel_wakeref_t;
+
+#define INTEL_WAKEREF_DEF ERR_PTR(-ENOENT)
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h b/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h
index c2c30ece8f77..5dfc587c8237 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h
@@ -9,20 +9,14 @@
#include <linux/errno.h>
#include <linux/types.h>
-struct drm_i915_gem_object;
+struct drm_gem_object;
struct intel_pxp;
static inline int intel_pxp_key_check(struct intel_pxp *pxp,
- struct drm_i915_gem_object *obj,
+ struct drm_gem_object *obj,
bool assign)
{
return -ENODEV;
}
-static inline bool
-i915_gem_object_is_protected(const struct drm_i915_gem_object *obj)
-{
- return false;
-}
-
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_rom.h b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_rom.h
new file mode 100644
index 000000000000..05cbfb697b2b
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_rom.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "../../../i915/soc/intel_rom.h"
diff --git a/drivers/gpu/drm/xe/display/ext/i915_irq.c b/drivers/gpu/drm/xe/display/ext/i915_irq.c
index eb40f1cb44f6..ac4cda2d81c7 100644
--- a/drivers/gpu/drm/xe/display/ext/i915_irq.c
+++ b/drivers/gpu/drm/xe/display/ext/i915_irq.c
@@ -7,25 +7,24 @@
#include "i915_reg.h"
#include "intel_uncore.h"
-void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
- i915_reg_t iir, i915_reg_t ier)
+void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs)
{
- intel_uncore_write(uncore, imr, 0xffffffff);
- intel_uncore_posting_read(uncore, imr);
+ intel_uncore_write(uncore, regs.imr, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.imr);
- intel_uncore_write(uncore, ier, 0);
+ intel_uncore_write(uncore, regs.ier, 0);
/* IIR can theoretically queue up two events. Be paranoid. */
- intel_uncore_write(uncore, iir, 0xffffffff);
- intel_uncore_posting_read(uncore, iir);
- intel_uncore_write(uncore, iir, 0xffffffff);
- intel_uncore_posting_read(uncore, iir);
+ intel_uncore_write(uncore, regs.iir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.iir);
+ intel_uncore_write(uncore, regs.iir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.iir);
}
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
-void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
+void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
{
struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
u32 val = intel_uncore_read(uncore, reg);
@@ -42,32 +41,19 @@ void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
intel_uncore_posting_read(uncore, reg);
}
-void gen3_irq_init(struct intel_uncore *uncore,
- i915_reg_t imr, u32 imr_val,
- i915_reg_t ier, u32 ier_val,
- i915_reg_t iir)
+void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs,
+ u32 imr_val, u32 ier_val)
{
- gen3_assert_iir_is_zero(uncore, iir);
+ gen2_assert_iir_is_zero(uncore, regs.iir);
- intel_uncore_write(uncore, ier, ier_val);
- intel_uncore_write(uncore, imr, imr_val);
- intel_uncore_posting_read(uncore, imr);
+ intel_uncore_write(uncore, regs.ier, ier_val);
+ intel_uncore_write(uncore, regs.imr, imr_val);
+ intel_uncore_posting_read(uncore, regs.imr);
}
bool intel_irqs_enabled(struct xe_device *xe)
{
- /*
- * XXX: i915 has a racy handling of the irq.enabled, since it doesn't
- * lock its transitions. Because of that, the irq.enabled sometimes
- * is not read with the irq.lock in place.
- * However, the most critical cases like vblank and page flips are
- * properly using the locks.
- * We cannot take the lock in here or run any kind of assert because
- * of i915 inconsistency.
- * But at this point the xe irq is better protected against races,
- * although the full solution would be protecting the i915 side.
- */
- return xe->irq.enabled;
+ return atomic_read(&xe->irq.enabled);
}
void intel_synchronize_irq(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c
new file mode 100644
index 000000000000..b463f5bd4eed
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/intel_bo.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2024 Intel Corporation */
+
+#include <drm/drm_gem.h>
+
+#include "xe_bo.h"
+#include "intel_bo.h"
+
+bool intel_bo_is_tiled(struct drm_gem_object *obj)
+{
+ /* legacy tiling is unused */
+ return false;
+}
+
+bool intel_bo_is_userptr(struct drm_gem_object *obj)
+{
+ /* xe does not have userptr bos */
+ return false;
+}
+
+bool intel_bo_is_shmem(struct drm_gem_object *obj)
+{
+ return false;
+}
+
+bool intel_bo_is_protected(struct drm_gem_object *obj)
+{
+ return false;
+}
+
+void intel_bo_flush_if_display(struct drm_gem_object *obj)
+{
+}
+
+int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ return drm_gem_prime_mmap(obj, vma);
+}
+
+int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
+{
+ struct xe_bo *bo = gem_to_xe_bo(obj);
+
+ return xe_bo_read(bo, offset, dst, size);
+}
+
+struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj)
+{
+ return NULL;
+}
+
+struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
+ struct intel_frontbuffer *front)
+{
+ return front;
+}
+
+void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
+{
+ /* FIXME */
+}
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c
index 63ce97cc4cfe..4d209ebc26c2 100644
--- a/drivers/gpu/drm/xe/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c
@@ -11,8 +11,10 @@
#include "intel_fb_bo.h"
#include "xe_bo.h"
-void intel_fb_bo_framebuffer_fini(struct xe_bo *bo)
+void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj)
{
+ struct xe_bo *bo = gem_to_xe_bo(obj);
+
if (bo->flags & XE_BO_FLAG_PINNED) {
/* Unpin our kernel fb first */
xe_bo_lock(bo, false);
@@ -23,9 +25,10 @@ void intel_fb_bo_framebuffer_fini(struct xe_bo *bo)
}
int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
- struct xe_bo *bo,
+ struct drm_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
+ struct xe_bo *bo = gem_to_xe_bo(obj);
struct xe_device *xe = to_xe_device(bo->ttm.base.dev);
int ret;
@@ -65,11 +68,11 @@ err:
return ret;
}
-struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
+struct drm_gem_object *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct drm_i915_gem_object *bo;
+ struct xe_bo *bo;
struct drm_gem_object *gem = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
if (!gem)
@@ -78,11 +81,11 @@ struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
bo = gem_to_xe_bo(gem);
/* Require vram placement or dma-buf import */
if (IS_DGFX(i915) &&
- !xe_bo_can_migrate(gem_to_xe_bo(gem), XE_PL_VRAM0) &&
+ !xe_bo_can_migrate(bo, XE_PL_VRAM0) &&
bo->ttm.type != ttm_bo_type_sg) {
drm_gem_object_put(gem);
return ERR_PTR(-EREMOTE);
}
- return bo;
+ return gem;
}
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.h b/drivers/gpu/drm/xe/display/intel_fb_bo.h
deleted file mode 100644
index 5d365b925b7a..000000000000
--- a/drivers/gpu/drm/xe/display/intel_fb_bo.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2021 Intel Corporation
- */
-
-#ifndef __INTEL_FB_BO_H__
-#define __INTEL_FB_BO_H__
-
-struct drm_file;
-struct drm_mode_fb_cmd2;
-struct drm_i915_private;
-struct intel_framebuffer;
-struct xe_bo;
-
-void intel_fb_bo_framebuffer_fini(struct xe_bo *bo);
-int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
- struct xe_bo *bo,
- struct drm_mode_fb_cmd2 *mode_cmd);
-
-struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd);
-
-#endif
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index 99499d6c0256..ca95fcd098ec 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -6,6 +6,7 @@
#include <drm/drm_fb_helper.h>
#include "intel_display_types.h"
+#include "intel_fb.h"
#include "intel_fbdev_fb.h"
#include "xe_bo.h"
#include "xe_ttm_stolen_mgr.h"
@@ -20,7 +21,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
struct drm_device *dev = helper->dev;
struct xe_device *xe = to_xe_device(dev);
struct drm_mode_fb_cmd2 mode_cmd = {};
- struct drm_i915_gem_object *obj;
+ struct xe_bo *obj;
int size;
/* we don't do packed 24bpp */
@@ -64,13 +65,13 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
goto err;
}
- fb = intel_framebuffer_create(obj, &mode_cmd);
+ fb = intel_framebuffer_create(&obj->ttm.base, &mode_cmd);
if (IS_ERR(fb)) {
xe_bo_unpin_map_no_vm(obj);
goto err;
}
- drm_gem_object_put(intel_bo_to_drm_bo(obj));
+ drm_gem_object_put(&obj->ttm.base);
return to_intel_framebuffer(fb);
@@ -79,8 +80,9 @@ err:
}
int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
- struct drm_i915_gem_object *obj, struct i915_vma *vma)
+ struct drm_gem_object *_obj, struct i915_vma *vma)
{
+ struct xe_bo *obj = gem_to_xe_bo(_obj);
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
if (!(obj->flags & XE_BO_FLAG_SYSTEM)) {
@@ -100,7 +102,7 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info
XE_WARN_ON(iosys_map_is_null(&obj->vmap));
info->screen_base = obj->vmap.vaddr_iomem;
- info->screen_size = intel_bo_to_drm_bo(obj)->size;
+ info->screen_size = obj->ttm.base.size;
return 0;
}
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index 75736faf2a80..b3921dbc52ff 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -4,16 +4,16 @@
*/
#include "xe_display.h"
-#include "regs/xe_regs.h"
+#include "regs/xe_irq_regs.h"
#include <linux/fb.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
+#include <drm/drm_probe_helper.h>
#include <uapi/drm/xe_drm.h>
#include "soc/intel_dram.h"
-#include "i915_drv.h" /* FIXME: HAS_DISPLAY() depends on this */
#include "intel_acpi.h"
#include "intel_audio.h"
#include "intel_bw.h"
@@ -22,6 +22,7 @@
#include "intel_display_irq.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
+#include "intel_dmc_wl.h"
#include "intel_dp.h"
#include "intel_encoder.h"
#include "intel_fbdev.h"
@@ -34,7 +35,7 @@
static bool has_display(struct xe_device *xe)
{
- return HAS_DISPLAY(xe);
+ return HAS_DISPLAY(&xe->display);
}
/**
@@ -103,11 +104,12 @@ int xe_display_create(struct xe_device *xe)
static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
{
struct xe_device *xe = to_xe_device(dev);
+ struct intel_display *display = &xe->display;
if (!xe->info.probe_display)
return;
- intel_power_domains_cleanup(xe);
+ intel_power_domains_cleanup(display);
}
int xe_display_init_nommio(struct xe_device *xe)
@@ -132,7 +134,7 @@ static void xe_display_fini_noirq(void *arg)
if (!xe->info.probe_display)
return;
- intel_display_driver_remove_noirq(xe);
+ intel_display_driver_remove_noirq(display);
intel_opregion_cleanup(display);
}
@@ -144,7 +146,7 @@ int xe_display_init_noirq(struct xe_device *xe)
if (!xe->info.probe_display)
return 0;
- intel_display_driver_early_probe(xe);
+ intel_display_driver_early_probe(display);
/* Early display init.. */
intel_opregion_setup(display);
@@ -157,9 +159,9 @@ int xe_display_init_noirq(struct xe_device *xe)
intel_bw_init_hw(xe);
- intel_display_device_info_runtime_init(xe);
+ intel_display_device_info_runtime_init(display);
- err = intel_display_driver_probe_noirq(xe);
+ err = intel_display_driver_probe_noirq(display);
if (err) {
intel_opregion_cleanup(display);
return err;
@@ -171,21 +173,23 @@ int xe_display_init_noirq(struct xe_device *xe)
static void xe_display_fini_noaccel(void *arg)
{
struct xe_device *xe = arg;
+ struct intel_display *display = &xe->display;
if (!xe->info.probe_display)
return;
- intel_display_driver_remove_nogem(xe);
+ intel_display_driver_remove_nogem(display);
}
int xe_display_init_noaccel(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
int err;
if (!xe->info.probe_display)
return 0;
- err = intel_display_driver_probe_nogem(xe);
+ err = intel_display_driver_probe_nogem(display);
if (err)
return err;
@@ -194,49 +198,59 @@ int xe_display_init_noaccel(struct xe_device *xe)
int xe_display_init(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return 0;
- return intel_display_driver_probe(xe);
+ return intel_display_driver_probe(display);
}
void xe_display_fini(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
intel_hpd_poll_fini(xe);
- intel_hdcp_component_fini(xe);
+ intel_hdcp_component_fini(display);
intel_audio_deinit(xe);
}
void xe_display_register(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- intel_display_driver_register(xe);
+ intel_display_driver_register(display);
+ intel_power_domains_enable(display);
intel_register_dsm_handler();
- intel_power_domains_enable(xe);
}
void xe_display_unregister(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
intel_unregister_dsm_handler();
- intel_power_domains_disable(xe);
- intel_display_driver_unregister(xe);
+ intel_power_domains_disable(display);
+ intel_display_driver_unregister(display);
}
void xe_display_driver_remove(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- intel_display_driver_remove(xe);
+ intel_display_driver_remove(display);
}
/* IRQ-related functions */
@@ -309,18 +323,7 @@ static void xe_display_flush_cleanup_work(struct xe_device *xe)
}
/* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */
-void xe_display_pm_runtime_suspend(struct xe_device *xe)
-{
- if (!xe->info.probe_display)
- return;
-
- if (xe->d3cold.allowed)
- xe_display_pm_suspend(xe, true);
-
- intel_hpd_poll_enable(xe);
-}
-
-void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
+static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime)
{
struct intel_display *display = &xe->display;
bool s2idle = suspend_to_idle();
@@ -331,113 +334,210 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
* We do a lot of poking in a lot of registers, make sure they work
* properly.
*/
- intel_power_domains_disable(xe);
- intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
+ intel_power_domains_disable(display);
+ if (!runtime)
+ intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
+
if (!runtime && has_display(xe)) {
drm_kms_helper_poll_disable(&xe->drm);
- intel_display_driver_disable_user_access(xe);
- intel_display_driver_suspend(xe);
+ intel_display_driver_disable_user_access(display);
+ intel_display_driver_suspend(display);
}
xe_display_flush_cleanup_work(xe);
- intel_dp_mst_suspend(xe);
-
intel_hpd_cancel_work(xe);
if (!runtime && has_display(xe)) {
- intel_display_driver_suspend_access(xe);
+ intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(&xe->display);
}
intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
- intel_dmc_suspend(xe);
+ intel_dmc_suspend(display);
+
+ if (runtime && has_display(xe))
+ intel_hpd_poll_enable(xe);
}
-void xe_display_pm_suspend_late(struct xe_device *xe)
+void xe_display_pm_suspend(struct xe_device *xe)
{
- bool s2idle = suspend_to_idle();
+ __xe_display_pm_suspend(xe, false);
+}
+
+void xe_display_pm_shutdown(struct xe_device *xe)
+{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- intel_power_domains_suspend(xe, s2idle);
+ intel_power_domains_disable(display);
+ intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
+ if (has_display(xe)) {
+ drm_kms_helper_poll_disable(&xe->drm);
+ intel_display_driver_disable_user_access(display);
+ intel_display_driver_suspend(display);
+ }
- intel_display_power_suspend_late(xe);
+ xe_display_flush_cleanup_work(xe);
+ intel_dp_mst_suspend(display);
+ intel_hpd_cancel_work(xe);
+
+ if (has_display(xe))
+ intel_display_driver_suspend_access(display);
+
+ intel_encoder_suspend_all(display);
+ intel_encoder_shutdown_all(display);
+
+ intel_opregion_suspend(display, PCI_D3cold);
+
+ intel_dmc_suspend(display);
}
-void xe_display_pm_runtime_resume(struct xe_device *xe)
+void xe_display_pm_runtime_suspend(struct xe_device *xe)
{
if (!xe->info.probe_display)
return;
- intel_hpd_poll_disable(xe);
+ if (xe->d3cold.allowed) {
+ __xe_display_pm_suspend(xe, true);
+ return;
+ }
+
+ intel_hpd_poll_enable(xe);
+}
+
+void xe_display_pm_suspend_late(struct xe_device *xe)
+{
+ struct intel_display *display = &xe->display;
+ bool s2idle = suspend_to_idle();
+
+ if (!xe->info.probe_display)
+ return;
+
+ intel_display_power_suspend_late(display, s2idle);
+}
+
+void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
+{
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
+ return;
if (xe->d3cold.allowed)
- xe_display_pm_resume(xe, true);
+ xe_display_pm_suspend_late(xe);
+
+ /*
+ * If xe_display_pm_suspend_late() is not called, it is likely
+ * that we will be on dynamic DC states with DMC wakelock enabled. We
+ * need to flush the release work in that case.
+ */
+ intel_dmc_wl_flush_release_work(display);
}
-void xe_display_pm_resume_early(struct xe_device *xe)
+void xe_display_pm_shutdown_late(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- intel_display_power_resume_early(xe);
+ /*
+ * The only requirement is to reboot with display DC states disabled,
+ * for now leaving all display power wells in the INIT power domain
+ * enabled.
+ */
+ intel_power_domains_driver_remove(display);
+}
+
+void xe_display_pm_resume_early(struct xe_device *xe)
+{
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
+ return;
- intel_power_domains_resume(xe);
+ intel_display_power_resume_early(display);
}
-void xe_display_pm_resume(struct xe_device *xe, bool runtime)
+static void __xe_display_pm_resume(struct xe_device *xe, bool runtime)
{
struct intel_display *display = &xe->display;
if (!xe->info.probe_display)
return;
- intel_dmc_resume(xe);
+ intel_dmc_resume(display);
if (has_display(xe))
drm_mode_config_reset(&xe->drm);
- intel_display_driver_init_hw(xe);
- intel_hpd_init(xe);
+ intel_display_driver_init_hw(display);
if (!runtime && has_display(xe))
- intel_display_driver_resume_access(xe);
+ intel_display_driver_resume_access(display);
+
+ intel_hpd_init(xe);
- /* MST sideband requires HPD interrupts enabled */
- intel_dp_mst_resume(xe);
if (!runtime && has_display(xe)) {
- intel_display_driver_resume(xe);
+ intel_display_driver_resume(display);
drm_kms_helper_poll_enable(&xe->drm);
- intel_display_driver_enable_user_access(xe);
- intel_hpd_poll_disable(xe);
+ intel_display_driver_enable_user_access(display);
}
+ if (has_display(xe))
+ intel_hpd_poll_disable(xe);
+
intel_opregion_resume(display);
- intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
+ if (!runtime)
+ intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
- intel_power_domains_enable(xe);
+ intel_power_domains_enable(display);
}
+void xe_display_pm_resume(struct xe_device *xe)
+{
+ __xe_display_pm_resume(xe, false);
+}
+
+void xe_display_pm_runtime_resume(struct xe_device *xe)
+{
+ if (!xe->info.probe_display)
+ return;
+
+ if (xe->d3cold.allowed) {
+ __xe_display_pm_resume(xe, true);
+ return;
+ }
+
+ intel_hpd_init(xe);
+ intel_hpd_poll_disable(xe);
+}
+
+
static void display_device_remove(struct drm_device *dev, void *arg)
{
- struct xe_device *xe = arg;
+ struct intel_display *display = arg;
- intel_display_device_remove(xe);
+ intel_display_device_remove(display);
}
int xe_display_probe(struct xe_device *xe)
{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct intel_display *display;
int err;
if (!xe->info.probe_display)
goto no_display;
- intel_display_device_probe(xe);
+ display = intel_display_device_probe(pdev);
- err = drmm_add_action_or_reset(&xe->drm, display_device_remove, xe);
+ err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
index 53d727fd792b..233f81a26c25 100644
--- a/drivers/gpu/drm/xe/display/xe_display.h
+++ b/drivers/gpu/drm/xe/display/xe_display.h
@@ -34,11 +34,14 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir);
void xe_display_irq_reset(struct xe_device *xe);
void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt);
-void xe_display_pm_suspend(struct xe_device *xe, bool runtime);
+void xe_display_pm_suspend(struct xe_device *xe);
+void xe_display_pm_shutdown(struct xe_device *xe);
void xe_display_pm_suspend_late(struct xe_device *xe);
+void xe_display_pm_shutdown_late(struct xe_device *xe);
void xe_display_pm_resume_early(struct xe_device *xe);
-void xe_display_pm_resume(struct xe_device *xe, bool runtime);
+void xe_display_pm_resume(struct xe_device *xe);
void xe_display_pm_runtime_suspend(struct xe_device *xe);
+void xe_display_pm_runtime_suspend_late(struct xe_device *xe);
void xe_display_pm_runtime_resume(struct xe_device *xe);
#else
@@ -65,11 +68,14 @@ static inline void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
static inline void xe_display_irq_reset(struct xe_device *xe) {}
static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {}
-static inline void xe_display_pm_suspend(struct xe_device *xe, bool runtime) {}
+static inline void xe_display_pm_suspend(struct xe_device *xe) {}
+static inline void xe_display_pm_shutdown(struct xe_device *xe) {}
static inline void xe_display_pm_suspend_late(struct xe_device *xe) {}
+static inline void xe_display_pm_shutdown_late(struct xe_device *xe) {}
static inline void xe_display_pm_resume_early(struct xe_device *xe) {}
-static inline void xe_display_pm_resume(struct xe_device *xe, bool runtime) {}
+static inline void xe_display_pm_resume(struct xe_device *xe) {}
static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {}
+static inline void xe_display_pm_runtime_suspend_late(struct xe_device *xe) {}
static inline void xe_display_pm_runtime_resume(struct xe_device *xe) {}
#endif /* CONFIG_DRM_XE_DISPLAY */
diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
index f99d901a3214..f95375451e2f 100644
--- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
@@ -48,11 +48,12 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d
if (!vma)
return false;
+ /* Set scanout flag for WC mapping */
obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe),
NULL, PAGE_ALIGN(size),
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_FLAG_GGTT);
+ XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT);
if (IS_ERR(obj)) {
kfree(vma);
return false;
@@ -73,5 +74,9 @@ void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
{
- /* TODO: add xe specific flush_map() for dsb buffer object. */
+ /*
+ * The memory barrier here is to ensure coherency of DSB vs MMIO,
+ * both for weak ordering archs and discrete cards.
+ */
+ xe_device_wmb(dsb_buf->vma->bo->tile->xe);
}
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index b58fc4ba2aac..9fa51b84737c 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -79,12 +79,14 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs,
static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
const struct i915_gtt_view *view,
- struct i915_vma *vma)
+ struct i915_vma *vma,
+ u64 physical_alignment)
{
struct xe_device *xe = to_xe_device(fb->base.dev);
struct xe_tile *tile0 = xe_device_get_root_tile(xe);
struct xe_ggtt *ggtt = tile0->mem.ggtt;
- struct xe_bo *bo = intel_fb_obj(&fb->base), *dpt;
+ struct drm_gem_object *obj = intel_fb_bo(&fb->base);
+ struct xe_bo *bo = gem_to_xe_bo(obj), *dpt;
u32 dpt_size, size = bo->ttm.base.size;
if (view->type == I915_GTT_VIEW_NORMAL)
@@ -98,23 +100,29 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
XE_PAGE_SIZE);
if (IS_DGFX(xe))
- dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM0 |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_PAGETABLE);
+ dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
+ dpt_size, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM0 |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE,
+ physical_alignment);
else
- dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
- ttm_bo_type_kernel,
- XE_BO_FLAG_STOLEN |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_PAGETABLE);
+ dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
+ dpt_size, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_STOLEN |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE,
+ physical_alignment);
if (IS_ERR(dpt))
- dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_PAGETABLE);
+ dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
+ dpt_size, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE,
+ physical_alignment);
if (IS_ERR(dpt))
return PTR_ERR(dpt);
@@ -153,7 +161,7 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
}
vma->dpt = dpt;
- vma->node = dpt->ggtt_node;
+ vma->node = dpt->ggtt_node[tile0->id];
return 0;
}
@@ -183,9 +191,11 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo
static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
const struct i915_gtt_view *view,
- struct i915_vma *vma)
+ struct i915_vma *vma,
+ u64 physical_alignment)
{
- struct xe_bo *bo = intel_fb_obj(&fb->base);
+ struct drm_gem_object *obj = intel_fb_bo(&fb->base);
+ struct xe_bo *bo = gem_to_xe_bo(obj);
struct xe_device *xe = to_xe_device(fb->base.dev);
struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
u32 align;
@@ -203,8 +213,8 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
align = max_t(u32, align, SZ_64K);
- if (bo->ggtt_node && view->type == I915_GTT_VIEW_NORMAL) {
- vma->node = bo->ggtt_node;
+ if (bo->ggtt_node[ggtt->tile->id] && view->type == I915_GTT_VIEW_NORMAL) {
+ vma->node = bo->ggtt_node[ggtt->tile->id];
} else if (view->type == I915_GTT_VIEW_NORMAL) {
u32 x, size = bo->ttm.base.size;
@@ -264,12 +274,14 @@ out:
}
static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
- const struct i915_gtt_view *view)
+ const struct i915_gtt_view *view,
+ u64 physical_alignment)
{
struct drm_device *dev = fb->base.dev;
struct xe_device *xe = to_xe_device(dev);
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
- struct xe_bo *bo = intel_fb_obj(&fb->base);
+ struct drm_gem_object *obj = intel_fb_bo(&fb->base);
+ struct xe_bo *bo = gem_to_xe_bo(obj);
int ret;
if (!vma)
@@ -312,9 +324,9 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
vma->bo = bo;
if (intel_fb_uses_dpt(&fb->base))
- ret = __xe_pin_fb_vma_dpt(fb, view, vma);
+ ret = __xe_pin_fb_vma_dpt(fb, view, vma, physical_alignment);
else
- ret = __xe_pin_fb_vma_ggtt(fb, view, vma);
+ ret = __xe_pin_fb_vma_ggtt(fb, view, vma, physical_alignment);
if (ret)
goto err_unpin;
@@ -333,10 +345,12 @@ err:
static void __xe_unpin_fb_vma(struct i915_vma *vma)
{
+ u8 tile_id = vma->node->ggtt->tile->id;
+
if (vma->dpt)
xe_bo_unpin_map_no_vm(vma->dpt);
- else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node) ||
- vma->bo->ggtt_node->base.start != vma->node->base.start)
+ else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node[tile_id]) ||
+ vma->bo->ggtt_node[tile_id]->base.start != vma->node->base.start)
xe_ggtt_node_remove(vma->node, false);
ttm_bo_reserve(&vma->bo->ttm, false, false, NULL);
@@ -355,7 +369,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
{
*out_flags = 0;
- return __xe_pin_fb_vma(to_intel_framebuffer(fb), view);
+ return __xe_pin_fb_vma(to_intel_framebuffer(fb), view, phys_alignment);
}
void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags)
@@ -366,13 +380,18 @@ void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags)
int intel_plane_pin_fb(struct intel_plane_state *plane_state)
{
struct drm_framebuffer *fb = plane_state->hw.fb;
- struct xe_bo *bo = intel_fb_obj(fb);
+ struct drm_gem_object *obj = intel_fb_bo(fb);
+ struct xe_bo *bo = gem_to_xe_bo(obj);
struct i915_vma *vma;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ u64 phys_alignment = plane->min_alignment(plane, fb, 0);
/* We reject creating !SCANOUT fb's, so this is weird.. */
drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT));
- vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt);
+ vma = __xe_pin_fb_vma(intel_fb, &plane_state->view.gtt, phys_alignment);
+
if (IS_ERR(vma))
return PTR_ERR(vma);
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
index 6619a40aed15..7c02323e9531 100644
--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -30,26 +30,29 @@ struct intel_hdcp_gsc_message {
#define HDCP_GSC_HEADER_SIZE sizeof(struct intel_gsc_mtl_header)
-bool intel_hdcp_gsc_cs_required(struct xe_device *xe)
+bool intel_hdcp_gsc_cs_required(struct intel_display *display)
{
- return DISPLAY_VER(xe) >= 14;
+ return DISPLAY_VER(display) >= 14;
}
-bool intel_hdcp_gsc_check_status(struct xe_device *xe)
+bool intel_hdcp_gsc_check_status(struct intel_display *display)
{
+ struct xe_device *xe = to_xe_device(display->drm);
struct xe_tile *tile = xe_device_get_root_tile(xe);
struct xe_gt *gt = tile->media_gt;
struct xe_gsc *gsc = &gt->uc.gsc;
bool ret = true;
+ unsigned int fw_ref;
- if (!gsc && !xe_uc_fw_is_enabled(&gsc->fw)) {
+ if (!gsc || !xe_uc_fw_is_enabled(&gsc->fw)) {
drm_dbg_kms(&xe->drm,
"GSC Components not ready for HDCP2.x\n");
return false;
}
xe_pm_runtime_get(xe);
- if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC)) {
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
+ if (!fw_ref) {
drm_dbg_kms(&xe->drm,
"failed to get forcewake to check proxy status\n");
ret = false;
@@ -59,16 +62,17 @@ bool intel_hdcp_gsc_check_status(struct xe_device *xe)
if (!xe_gsc_proxy_init_done(gsc))
ret = false;
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
out:
xe_pm_runtime_put(xe);
return ret;
}
/*This function helps allocate memory for the command that we will send to gsc cs */
-static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
+static int intel_hdcp_gsc_initialize_message(struct intel_display *display,
struct intel_hdcp_gsc_message *hdcp_message)
{
+ struct xe_device *xe = to_xe_device(display->drm);
struct xe_bo *bo = NULL;
u64 cmd_in, cmd_out;
int ret = 0;
@@ -80,7 +84,7 @@ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) {
- drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");
+ drm_err(display->drm, "Failed to allocate bo for HDCP streaming command!\n");
ret = PTR_ERR(bo);
goto out;
}
@@ -96,7 +100,7 @@ out:
return ret;
}
-static int intel_hdcp_gsc_hdcp2_init(struct xe_device *xe)
+static int intel_hdcp_gsc_hdcp2_init(struct intel_display *display)
{
struct intel_hdcp_gsc_message *hdcp_message;
int ret;
@@ -110,14 +114,14 @@ static int intel_hdcp_gsc_hdcp2_init(struct xe_device *xe)
* NOTE: No need to lock the comp mutex here as it is already
* going to be taken before this function called
*/
- ret = intel_hdcp_gsc_initialize_message(xe, hdcp_message);
+ ret = intel_hdcp_gsc_initialize_message(display, hdcp_message);
if (ret) {
- drm_err(&xe->drm, "Could not initialize hdcp_message\n");
+ drm_err(display->drm, "Could not initialize hdcp_message\n");
kfree(hdcp_message);
return ret;
}
- xe->display.hdcp.hdcp_message = hdcp_message;
+ display->hdcp.hdcp_message = hdcp_message;
return ret;
}
@@ -137,7 +141,7 @@ static const struct i915_hdcp_ops gsc_hdcp_ops = {
.close_hdcp_session = intel_hdcp_gsc_close_session,
};
-int intel_hdcp_gsc_init(struct xe_device *xe)
+int intel_hdcp_gsc_init(struct intel_display *display)
{
struct i915_hdcp_arbiter *data;
int ret;
@@ -146,33 +150,33 @@ int intel_hdcp_gsc_init(struct xe_device *xe)
if (!data)
return -ENOMEM;
- mutex_lock(&xe->display.hdcp.hdcp_mutex);
- xe->display.hdcp.arbiter = data;
- xe->display.hdcp.arbiter->hdcp_dev = xe->drm.dev;
- xe->display.hdcp.arbiter->ops = &gsc_hdcp_ops;
- ret = intel_hdcp_gsc_hdcp2_init(xe);
+ mutex_lock(&display->hdcp.hdcp_mutex);
+ display->hdcp.arbiter = data;
+ display->hdcp.arbiter->hdcp_dev = display->drm->dev;
+ display->hdcp.arbiter->ops = &gsc_hdcp_ops;
+ ret = intel_hdcp_gsc_hdcp2_init(display);
if (ret)
kfree(data);
- mutex_unlock(&xe->display.hdcp.hdcp_mutex);
+ mutex_unlock(&display->hdcp.hdcp_mutex);
return ret;
}
-void intel_hdcp_gsc_fini(struct xe_device *xe)
+void intel_hdcp_gsc_fini(struct intel_display *display)
{
struct intel_hdcp_gsc_message *hdcp_message =
- xe->display.hdcp.hdcp_message;
- struct i915_hdcp_arbiter *arb = xe->display.hdcp.arbiter;
+ display->hdcp.hdcp_message;
+ struct i915_hdcp_arbiter *arb = display->hdcp.arbiter;
if (hdcp_message) {
xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo);
kfree(hdcp_message);
- xe->display.hdcp.hdcp_message = NULL;
+ display->hdcp.hdcp_message = NULL;
}
kfree(arb);
- xe->display.hdcp.arbiter = NULL;
+ display->hdcp.arbiter = NULL;
}
static int xe_gsc_send_sync(struct xe_device *xe,
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index a50ab9eae40a..2eb9633f163a 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -170,7 +170,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
return false;
if (intel_framebuffer_init(to_intel_framebuffer(fb),
- bo, &mode_cmd)) {
+ &bo->ttm.base, &mode_cmd)) {
drm_dbg_kms(&xe->drm, "intel fb init failed\n");
goto err_bo;
}
@@ -248,7 +248,7 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
* the lookup of sysmem scratch pages.
*/
plane->check_plane(crtc_state, plane_state);
- plane->async_flip(plane, crtc_state, plane_state, true);
+ plane->async_flip(NULL, plane, crtc_state, plane_state, true);
return;
nofb:
@@ -275,12 +275,12 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config)
}
}
-void intel_initial_plane_config(struct drm_i915_private *i915)
+void intel_initial_plane_config(struct intel_display *display)
{
struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_initial_plane_config *plane_config =
&plane_configs[crtc->pipe];
@@ -294,7 +294,7 @@ void intel_initial_plane_config(struct drm_i915_private *i915)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- i915->display.funcs.display->get_initial_plane_config(crtc, plane_config);
+ display->funcs.display->get_initial_plane_config(crtc, plane_config);
/*
* If the fb is shared between multiple heads, we'll
@@ -302,7 +302,7 @@ void intel_initial_plane_config(struct drm_i915_private *i915)
*/
intel_find_initial_plane_obj(crtc, plane_configs);
- if (i915->display.funcs.display->fixup_initial_plane_config(crtc, plane_config))
+ if (display->funcs.display->fixup_initial_plane_config(crtc, plane_config))
intel_crtc_wait_for_next_vblank(crtc);
plane_config_fini(plane_config);
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index 81b71903675e..d86219dedde2 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -83,6 +83,8 @@
#define RING_IMR(base) XE_REG((base) + 0xa8)
#define RING_INT_STATUS_RPT_PTR(base) XE_REG((base) + 0xac)
+#define CS_INT_VEC(base) XE_REG((base) + 0x1b8)
+
#define RING_EIR(base) XE_REG((base) + 0xb0)
#define RING_EMR(base) XE_REG((base) + 0xb4)
#define RING_ESR(base) XE_REG((base) + 0xb8)
@@ -138,6 +140,7 @@
#define RING_MODE(base) XE_REG((base) + 0x29c)
#define GFX_DISABLE_LEGACY_MODE REG_BIT(3)
+#define GFX_MSIX_INTERRUPT_ENABLE REG_BIT(13)
#define RING_TIMESTAMP(base) XE_REG((base) + 0x358)
@@ -186,6 +189,7 @@
#define VDBOX_CGCTL3F10(base) XE_REG((base) + 0x3f10)
#define IECPUNIT_CLKGATE_DIS REG_BIT(22)
+#define RAMDFTUNIT_CLKGATE_DIS REG_BIT(9)
#define VDBOX_CGCTL3F18(base) XE_REG((base) + 0x3f18)
#define ALNUNIT_CLKGATE_DIS REG_BIT(13)
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index ac9c437e103d..162f18e975da 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -286,6 +286,9 @@
#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16)
#define LTCDD_CLKGATE_DIS REG_BIT(10)
+#define UNSLCGCTL9454 XE_REG(0x9454)
+#define LSCFE_CLKGATE_DIS REG_BIT(4)
+
#define XEHP_SLICE_UNIT_LEVEL_CLKGATE XE_REG_MCR(0x94d4)
#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
#define L3_CLKGATE_DIS REG_BIT(16)
@@ -344,6 +347,14 @@
#define CTC_SOURCE_DIVIDE_LOGIC REG_BIT(0)
#define FORCEWAKE_RENDER XE_REG(0xa278)
+
+#define POWERGATE_DOMAIN_STATUS XE_REG(0xa2a0)
+#define MEDIA_SLICE3_AWAKE_STATUS REG_BIT(4)
+#define MEDIA_SLICE2_AWAKE_STATUS REG_BIT(3)
+#define MEDIA_SLICE1_AWAKE_STATUS REG_BIT(2)
+#define RENDER_AWAKE_STATUS REG_BIT(1)
+#define MEDIA_SLICE0_AWAKE_STATUS REG_BIT(0)
+
#define FORCEWAKE_MEDIA_VDBOX(n) XE_REG(0xa540 + (n) * 4)
#define FORCEWAKE_MEDIA_VEBOX(n) XE_REG(0xa560 + (n) * 4)
#define FORCEWAKE_GSC XE_REG(0xa618)
@@ -393,9 +404,6 @@
#define XE2_GLOBAL_INVAL XE_REG(0xb404)
-#define SCRATCH1LPFC XE_REG(0xb474)
-#define EN_L3_RW_CCS_CACHE_FLUSH REG_BIT(0)
-
#define XE2LPM_L3SQCREG2 XE_REG_MCR(0xb604)
#define XE2LPM_L3SQCREG3 XE_REG_MCR(0xb608)
@@ -437,6 +445,8 @@
#define SAMPLER_MODE XE_REG_MCR(0xe18c, XE_REG_OPTION_MASKED)
#define ENABLE_SMALLPL REG_BIT(15)
+#define SMP_WAIT_FETCH_MERGING_COUNTER REG_GENMASK(11, 10)
+#define SMP_FORCE_128B_OVERFETCH REG_FIELD_PREP(SMP_WAIT_FETCH_MERGING_COUNTER, 1)
#define SC_DISABLE_POWER_OPTIMIZATION_EBB REG_BIT(9)
#define SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5)
#define INDIRECT_STATE_BASE_ADDR_OVERRIDE REG_BIT(0)
@@ -520,7 +530,7 @@
* [4-6] RSVD
* [7] Disabled
*/
-#define CCS_MODE XE_REG(0x14804)
+#define CCS_MODE XE_REG(0x14804, XE_REG_OPTION_MASKED)
#define CCS_MODE_CSLICE_0_3_MASK REG_GENMASK(11, 0) /* 3 bits per cslice */
#define CCS_MODE_CSLICE_MASK 0x7 /* CCS0-3 + rsvd */
#define CCS_MODE_CSLICE_WIDTH ilog2(CCS_MODE_CSLICE_MASK + 1)
@@ -559,62 +569,6 @@
#define GT_PERF_STATUS XE_REG(0x1381b4)
#define VOLTAGE_MASK REG_GENMASK(10, 0)
-/*
- * Note: Interrupt registers 1900xx are VF accessible only until version 12.50.
- * On newer platforms, VFs are using memory-based interrupts instead.
- * However, for simplicity we keep this XE_REG_OPTION_VF tag intact.
- */
-
-#define GT_INTR_DW(x) XE_REG(0x190018 + ((x) * 4), XE_REG_OPTION_VF)
-#define INTR_GSC REG_BIT(31)
-#define INTR_GUC REG_BIT(25)
-#define INTR_MGUC REG_BIT(24)
-#define INTR_BCS8 REG_BIT(23)
-#define INTR_BCS(x) REG_BIT(15 - (x))
-#define INTR_CCS(x) REG_BIT(4 + (x))
-#define INTR_RCS0 REG_BIT(0)
-#define INTR_VECS(x) REG_BIT(31 - (x))
-#define INTR_VCS(x) REG_BIT(x)
-
-#define RENDER_COPY_INTR_ENABLE XE_REG(0x190030, XE_REG_OPTION_VF)
-#define VCS_VECS_INTR_ENABLE XE_REG(0x190034, XE_REG_OPTION_VF)
-#define GUC_SG_INTR_ENABLE XE_REG(0x190038, XE_REG_OPTION_VF)
-#define ENGINE1_MASK REG_GENMASK(31, 16)
-#define ENGINE0_MASK REG_GENMASK(15, 0)
-#define GPM_WGBOXPERF_INTR_ENABLE XE_REG(0x19003c, XE_REG_OPTION_VF)
-#define GUNIT_GSC_INTR_ENABLE XE_REG(0x190044, XE_REG_OPTION_VF)
-#define CCS_RSVD_INTR_ENABLE XE_REG(0x190048, XE_REG_OPTION_VF)
-
-#define INTR_IDENTITY_REG(x) XE_REG(0x190060 + ((x) * 4), XE_REG_OPTION_VF)
-#define INTR_DATA_VALID REG_BIT(31)
-#define INTR_ENGINE_INSTANCE(x) REG_FIELD_GET(GENMASK(25, 20), x)
-#define INTR_ENGINE_CLASS(x) REG_FIELD_GET(GENMASK(18, 16), x)
-#define INTR_ENGINE_INTR(x) REG_FIELD_GET(GENMASK(15, 0), x)
-#define OTHER_GUC_INSTANCE 0
-#define OTHER_GSC_HECI2_INSTANCE 3
-#define OTHER_GSC_INSTANCE 6
-
-#define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4), XE_REG_OPTION_VF)
-#define RCS0_RSVD_INTR_MASK XE_REG(0x190090, XE_REG_OPTION_VF)
-#define BCS_RSVD_INTR_MASK XE_REG(0x1900a0, XE_REG_OPTION_VF)
-#define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8, XE_REG_OPTION_VF)
-#define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac, XE_REG_OPTION_VF)
-#define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0, XE_REG_OPTION_VF)
-#define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4)
-#define GUC_SG_INTR_MASK XE_REG(0x1900e8, XE_REG_OPTION_VF)
-#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec, XE_REG_OPTION_VF)
-#define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4, XE_REG_OPTION_VF)
-#define CCS0_CCS1_INTR_MASK XE_REG(0x190100)
-#define CCS2_CCS3_INTR_MASK XE_REG(0x190104)
-#define XEHPC_BCS1_BCS2_INTR_MASK XE_REG(0x190110)
-#define XEHPC_BCS3_BCS4_INTR_MASK XE_REG(0x190114)
-#define XEHPC_BCS5_BCS6_INTR_MASK XE_REG(0x190118)
-#define XEHPC_BCS7_BCS8_INTR_MASK XE_REG(0x19011c)
-#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11)
-#define GT_CONTEXT_SWITCH_INTERRUPT REG_BIT(8)
-#define GSC_ER_COMPLETE REG_BIT(5)
-#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT REG_BIT(4)
-#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3)
-#define GT_RENDER_USER_INTERRUPT REG_BIT(0)
+#define SFC_DONE(n) XE_REG(0x1cc000 + (n) * 0x1000)
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_guc_regs.h b/drivers/gpu/drm/xe/regs/xe_guc_regs.h
index a5fd14307f94..2118f7dec287 100644
--- a/drivers/gpu/drm/xe/regs/xe_guc_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_guc_regs.h
@@ -84,6 +84,8 @@
#define HUC_LOADING_AGENT_GUC REG_BIT(1)
#define GUC_WOPCM_OFFSET_VALID REG_BIT(0)
#define GUC_MAX_IDLE_COUNT XE_REG(0xc3e4)
+#define GUC_PMTIMESTAMP_LO XE_REG(0xc3e8)
+#define GUC_PMTIMESTAMP_HI XE_REG(0xc3ec)
#define GUC_SEND_INTERRUPT XE_REG(0xc4c8)
#define GUC_SEND_TRIGGER REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/regs/xe_irq_regs.h b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
new file mode 100644
index 000000000000..1776b3f78ccb
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+#ifndef _XE_IRQ_REGS_H_
+#define _XE_IRQ_REGS_H_
+
+#include "regs/xe_reg_defs.h"
+
+#define PCU_IRQ_OFFSET 0x444e0
+#define GU_MISC_IRQ_OFFSET 0x444f0
+#define GU_MISC_GSE REG_BIT(27)
+
+#define DG1_MSTR_TILE_INTR XE_REG(0x190008)
+#define DG1_MSTR_IRQ REG_BIT(31)
+#define DG1_MSTR_TILE(t) REG_BIT(t)
+
+#define GFX_MSTR_IRQ XE_REG(0x190010, XE_REG_OPTION_VF)
+#define MASTER_IRQ REG_BIT(31)
+#define GU_MISC_IRQ REG_BIT(29)
+#define DISPLAY_IRQ REG_BIT(16)
+#define GT_DW_IRQ(x) REG_BIT(x)
+
+/*
+ * Note: Interrupt registers 1900xx are VF accessible only until version 12.50.
+ * On newer platforms, VFs are using memory-based interrupts instead.
+ * However, for simplicity we keep this XE_REG_OPTION_VF tag intact.
+ */
+
+#define GT_INTR_DW(x) XE_REG(0x190018 + ((x) * 4), XE_REG_OPTION_VF)
+#define INTR_GSC REG_BIT(31)
+#define INTR_GUC REG_BIT(25)
+#define INTR_MGUC REG_BIT(24)
+#define INTR_BCS8 REG_BIT(23)
+#define INTR_BCS(x) REG_BIT(15 - (x))
+#define INTR_CCS(x) REG_BIT(4 + (x))
+#define INTR_RCS0 REG_BIT(0)
+#define INTR_VECS(x) REG_BIT(31 - (x))
+#define INTR_VCS(x) REG_BIT(x)
+
+#define RENDER_COPY_INTR_ENABLE XE_REG(0x190030, XE_REG_OPTION_VF)
+#define VCS_VECS_INTR_ENABLE XE_REG(0x190034, XE_REG_OPTION_VF)
+#define GUC_SG_INTR_ENABLE XE_REG(0x190038, XE_REG_OPTION_VF)
+#define ENGINE1_MASK REG_GENMASK(31, 16)
+#define ENGINE0_MASK REG_GENMASK(15, 0)
+#define GPM_WGBOXPERF_INTR_ENABLE XE_REG(0x19003c, XE_REG_OPTION_VF)
+#define GUNIT_GSC_INTR_ENABLE XE_REG(0x190044, XE_REG_OPTION_VF)
+#define CCS_RSVD_INTR_ENABLE XE_REG(0x190048, XE_REG_OPTION_VF)
+
+#define INTR_IDENTITY_REG(x) XE_REG(0x190060 + ((x) * 4), XE_REG_OPTION_VF)
+#define INTR_DATA_VALID REG_BIT(31)
+#define INTR_ENGINE_INSTANCE(x) REG_FIELD_GET(GENMASK(25, 20), x)
+#define INTR_ENGINE_CLASS(x) REG_FIELD_GET(GENMASK(18, 16), x)
+#define INTR_ENGINE_INTR(x) REG_FIELD_GET(GENMASK(15, 0), x)
+#define OTHER_GUC_INSTANCE 0
+#define OTHER_GSC_HECI2_INSTANCE 3
+#define OTHER_GSC_INSTANCE 6
+
+#define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4), XE_REG_OPTION_VF)
+#define RCS0_RSVD_INTR_MASK XE_REG(0x190090, XE_REG_OPTION_VF)
+#define BCS_RSVD_INTR_MASK XE_REG(0x1900a0, XE_REG_OPTION_VF)
+#define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8, XE_REG_OPTION_VF)
+#define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac, XE_REG_OPTION_VF)
+#define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0, XE_REG_OPTION_VF)
+#define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4)
+#define GUC_SG_INTR_MASK XE_REG(0x1900e8, XE_REG_OPTION_VF)
+#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec, XE_REG_OPTION_VF)
+#define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4, XE_REG_OPTION_VF)
+#define CCS0_CCS1_INTR_MASK XE_REG(0x190100)
+#define CCS2_CCS3_INTR_MASK XE_REG(0x190104)
+#define XEHPC_BCS1_BCS2_INTR_MASK XE_REG(0x190110)
+#define XEHPC_BCS3_BCS4_INTR_MASK XE_REG(0x190114)
+#define XEHPC_BCS5_BCS6_INTR_MASK XE_REG(0x190118)
+#define XEHPC_BCS7_BCS8_INTR_MASK XE_REG(0x19011c)
+#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11)
+#define GT_CONTEXT_SWITCH_INTERRUPT REG_BIT(8)
+#define GSC_ER_COMPLETE REG_BIT(5)
+#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT REG_BIT(4)
+#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3)
+#define GT_RENDER_USER_INTERRUPT REG_BIT(0)
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
index 045dfd09db99..57944f90bbf6 100644
--- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
+++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
@@ -25,6 +25,9 @@
#define CTX_INT_SRC_REPORT_REG (CTX_LRI_INT_REPORT_PTR + 3)
#define CTX_INT_SRC_REPORT_PTR (CTX_LRI_INT_REPORT_PTR + 4)
+#define CTX_CS_INT_VEC_REG 0x5a
+#define CTX_CS_INT_VEC_DATA (CTX_CS_INT_VEC_REG + 1)
+
#define INDIRECT_CTX_RING_HEAD (0x02 + 1)
#define INDIRECT_CTX_RING_TAIL (0x04 + 1)
#define INDIRECT_CTX_RING_START (0x06 + 1)
diff --git a/drivers/gpu/drm/xe/regs/xe_oa_regs.h b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
index a9b0091cb7ee..a79ad2da070c 100644
--- a/drivers/gpu/drm/xe/regs/xe_oa_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
@@ -41,14 +41,6 @@
#define OAG_OABUFFER XE_REG(0xdb08)
#define OABUFFER_SIZE_MASK REG_GENMASK(5, 3)
-#define OABUFFER_SIZE_128K REG_FIELD_PREP(OABUFFER_SIZE_MASK, 0)
-#define OABUFFER_SIZE_256K REG_FIELD_PREP(OABUFFER_SIZE_MASK, 1)
-#define OABUFFER_SIZE_512K REG_FIELD_PREP(OABUFFER_SIZE_MASK, 2)
-#define OABUFFER_SIZE_1M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 3)
-#define OABUFFER_SIZE_2M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 4)
-#define OABUFFER_SIZE_4M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 5)
-#define OABUFFER_SIZE_8M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 6)
-#define OABUFFER_SIZE_16M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 7)
#define OAG_OABUFFER_MEMORY_SELECT REG_BIT(0) /* 0: PPGTT, 1: GGTT */
#define OAG_OACONTROL XE_REG(0xdaf4)
@@ -59,10 +51,15 @@
/* Common to all OA units */
#define OA_OACONTROL_REPORT_BC_MASK REG_GENMASK(9, 9)
#define OA_OACONTROL_COUNTER_SIZE_MASK REG_GENMASK(8, 8)
+#define OAG_OACONTROL_USED_BITS \
+ (OAG_OACONTROL_OA_PES_DISAG_EN | OAG_OACONTROL_OA_CCS_SELECT_MASK | \
+ OAG_OACONTROL_OA_COUNTER_SEL_MASK | OAG_OACONTROL_OA_COUNTER_ENABLE | \
+ OA_OACONTROL_REPORT_BC_MASK | OA_OACONTROL_COUNTER_SIZE_MASK)
#define OAG_OA_DEBUG XE_REG(0xdaf8, XE_REG_OPTION_MASKED)
#define OAG_OA_DEBUG_DISABLE_MMIO_TRG REG_BIT(14)
#define OAG_OA_DEBUG_START_TRIGGER_SCOPE_CONTROL REG_BIT(13)
+#define OAG_OA_DEBUG_BUF_SIZE_SELECT REG_BIT(12)
#define OAG_OA_DEBUG_DISABLE_START_TRG_2_COUNT_QUAL REG_BIT(8)
#define OAG_OA_DEBUG_DISABLE_START_TRG_1_COUNT_QUAL REG_BIT(7)
#define OAG_OA_DEBUG_INCLUDE_CLK_RATIO REG_BIT(6)
@@ -85,6 +82,8 @@
#define OAM_CONTEXT_CONTROL_OFFSET (0x1bc)
#define OAM_CONTROL_OFFSET (0x194)
#define OAM_CONTROL_COUNTER_SEL_MASK REG_GENMASK(3, 1)
+#define OAM_OACONTROL_USED_BITS \
+ (OAM_CONTROL_COUNTER_SEL_MASK | OAG_OACONTROL_OA_COUNTER_ENABLE)
#define OAM_DEBUG_OFFSET (0x198)
#define OAM_STATUS_OFFSET (0x19c)
#define OAM_MMIO_TRG_OFFSET (0x1d0)
diff --git a/drivers/gpu/drm/xe/regs/xe_pmt.h b/drivers/gpu/drm/xe/regs/xe_pmt.h
new file mode 100644
index 000000000000..f45abcd96ba8
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_pmt.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+#ifndef _XE_PMT_H_
+#define _XE_PMT_H_
+
+#define SOC_BASE 0x280000
+
+#define BMG_PMT_BASE_OFFSET 0xDB000
+#define BMG_DISCOVERY_OFFSET (SOC_BASE + BMG_PMT_BASE_OFFSET)
+
+#define BMG_TELEMETRY_BASE_OFFSET 0xE0000
+#define BMG_TELEMETRY_OFFSET (SOC_BASE + BMG_TELEMETRY_BASE_OFFSET)
+
+#define SG_REMAP_INDEX1 XE_REG(SOC_BASE + 0x08)
+#define SG_REMAP_BITS REG_GENMASK(31, 24)
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_reg_defs.h b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
index 23f7dc5bbe99..0eedd6c26b1b 100644
--- a/drivers/gpu/drm/xe/regs/xe_reg_defs.h
+++ b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
@@ -13,7 +13,7 @@
/**
* struct xe_reg - Register definition
*
- * Register defintion to be used by the individual register. Although the same
+ * Register definition to be used by the individual register. Although the same
* definition is used for xe_reg and xe_reg_mcr, they use different internal
* APIs for accesses.
*/
@@ -128,7 +128,7 @@ struct xe_reg_mcr {
* options.
*/
#define XE_REG_MCR(r_, ...) ((const struct xe_reg_mcr){ \
- .__reg = XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .mcr = 1) \
+ .__reg = XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .mcr = 1) \
})
static inline bool xe_reg_is_valid(struct xe_reg r)
diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h
index dfa869f0dddd..6cf282618836 100644
--- a/drivers/gpu/drm/xe/regs/xe_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_regs.h
@@ -11,10 +11,6 @@
#define TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK REG_GENMASK(15, 12)
#define TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK REG_GENMASK(9, 0)
-#define PCU_IRQ_OFFSET 0x444e0
-#define GU_MISC_IRQ_OFFSET 0x444f0
-#define GU_MISC_GSE REG_BIT(27)
-
#define GU_CNTL_PROTECTED XE_REG(0x10100C)
#define DRIVERINT_FLR_DIS REG_BIT(31)
@@ -48,25 +44,19 @@
#define MTL_RP_STATE_CAP XE_REG(0x138000)
+#define MTL_GT_RPA_FREQUENCY XE_REG(0x138008)
#define MTL_GT_RPE_FREQUENCY XE_REG(0x13800c)
#define MTL_MEDIAP_STATE_CAP XE_REG(0x138020)
#define MTL_RPN_CAP_MASK REG_GENMASK(24, 16)
#define MTL_RP0_CAP_MASK REG_GENMASK(8, 0)
+#define MTL_MPA_FREQUENCY XE_REG(0x138028)
+#define MTL_RPA_MASK REG_GENMASK(8, 0)
+
#define MTL_MPE_FREQUENCY XE_REG(0x13802c)
#define MTL_RPE_MASK REG_GENMASK(8, 0)
-#define DG1_MSTR_TILE_INTR XE_REG(0x190008)
-#define DG1_MSTR_IRQ REG_BIT(31)
-#define DG1_MSTR_TILE(t) REG_BIT(t)
-
-#define GFX_MSTR_IRQ XE_REG(0x190010, XE_REG_OPTION_VF)
-#define MASTER_IRQ REG_BIT(31)
-#define GU_MISC_IRQ REG_BIT(29)
-#define DISPLAY_IRQ REG_BIT(16)
-#define GT_DW_IRQ(x) REG_BIT(x)
-
#define VF_CAP_REG XE_REG(0x1901f8, XE_REG_OPTION_VF)
#define VF_CAP REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 8dac069483e8..6795d1d916e4 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -6,6 +6,13 @@
#include <kunit/test.h>
#include <kunit/visibility.h>
+#include <linux/iosys-map.h>
+#include <linux/math64.h>
+#include <linux/prandom.h>
+#include <linux/swap.h>
+
+#include <uapi/linux/sysinfo.h>
+
#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "tests/xe_test.h"
@@ -42,6 +49,13 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
KUNIT_FAIL(test, "Failed to submit bo clear.\n");
return PTR_ERR(fence);
}
+
+ if (dma_fence_wait_timeout(fence, false, 5 * HZ) <= 0) {
+ dma_fence_put(fence);
+ KUNIT_FAIL(test, "Timeout while clearing bo.\n");
+ return -ETIME;
+ }
+
dma_fence_put(fence);
}
@@ -250,10 +264,9 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
* however seems quite fragile not to also restart the GT. Try
* to do that here by triggering a GT reset.
*/
- for_each_gt(__gt, xe, id) {
- xe_gt_reset_async(__gt);
- flush_work(&__gt->reset.worker);
- }
+ for_each_gt(__gt, xe, id)
+ xe_gt_reset(__gt);
+
if (err) {
KUNIT_FAIL(test, "restore kernel err=%pe\n",
ERR_PTR(err));
@@ -358,6 +371,237 @@ static void xe_bo_evict_kunit(struct kunit *test)
evict_test_run_device(xe);
}
+struct xe_bo_link {
+ struct list_head link;
+ struct xe_bo *bo;
+ u32 val;
+};
+
+#define XE_BO_SHRINK_SIZE ((unsigned long)SZ_64M)
+
+static int shrink_test_fill_random(struct xe_bo *bo, struct rnd_state *state,
+ struct xe_bo_link *link)
+{
+ struct iosys_map map;
+ int ret = ttm_bo_vmap(&bo->ttm, &map);
+ size_t __maybe_unused i;
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < bo->ttm.base.size; i += sizeof(u32)) {
+ u32 val = prandom_u32_state(state);
+
+ iosys_map_wr(&map, i, u32, val);
+ if (i == 0)
+ link->val = val;
+ }
+
+ ttm_bo_vunmap(&bo->ttm, &map);
+ return 0;
+}
+
+static bool shrink_test_verify(struct kunit *test, struct xe_bo *bo,
+ unsigned int bo_nr, struct rnd_state *state,
+ struct xe_bo_link *link)
+{
+ struct iosys_map map;
+ int ret = ttm_bo_vmap(&bo->ttm, &map);
+ size_t i;
+ bool failed = false;
+
+ if (ret) {
+ KUNIT_FAIL(test, "Error mapping bo %u for content check.\n", bo_nr);
+ return true;
+ }
+
+ for (i = 0; i < bo->ttm.base.size; i += sizeof(u32)) {
+ u32 val = prandom_u32_state(state);
+
+ if (iosys_map_rd(&map, i, u32) != val) {
+ KUNIT_FAIL(test, "Content not preserved, bo %u offset 0x%016llx",
+ bo_nr, (unsigned long long)i);
+ kunit_info(test, "Failed value is 0x%08x, recorded 0x%08x\n",
+ (unsigned int)iosys_map_rd(&map, i, u32), val);
+ if (i == 0 && val != link->val)
+ kunit_info(test, "Looks like PRNG is out of sync.\n");
+ failed = true;
+ break;
+ }
+ }
+
+ ttm_bo_vunmap(&bo->ttm, &map);
+
+ return failed;
+}
+
+/*
+ * Try to create system bos corresponding to twice the amount
+ * of available system memory to test shrinker functionality.
+ * If no swap space is available to accommodate the
+ * memory overcommit, mark bos purgeable.
+ */
+static int shrink_test_run_device(struct xe_device *xe)
+{
+ struct kunit *test = kunit_get_current_test();
+ LIST_HEAD(bos);
+ struct xe_bo_link *link, *next;
+ struct sysinfo si;
+ u64 ram, ram_and_swap, purgeable = 0, alloced, to_alloc, limit;
+ unsigned int interrupted = 0, successful = 0, count = 0;
+ struct rnd_state prng;
+ u64 rand_seed;
+ bool failed = false;
+
+ rand_seed = get_random_u64();
+ prandom_seed_state(&prng, rand_seed);
+ kunit_info(test, "Random seed is 0x%016llx.\n",
+ (unsigned long long)rand_seed);
+
+ /* Skip if execution time is expected to be too long. */
+
+ limit = SZ_32G;
+ /* IGFX with flat CCS needs to copy when swapping / shrinking */
+ if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
+ limit = SZ_16G;
+
+ si_meminfo(&si);
+ ram = (size_t)si.freeram * si.mem_unit;
+ if (ram > limit) {
+ kunit_skip(test, "Too long expected execution time.\n");
+ return 0;
+ }
+ to_alloc = ram * 2;
+
+ ram_and_swap = ram + get_nr_swap_pages() * PAGE_SIZE;
+ if (to_alloc > ram_and_swap)
+ purgeable = to_alloc - ram_and_swap;
+ purgeable += div64_u64(purgeable, 5);
+
+ kunit_info(test, "Free ram is %lu bytes. Will allocate twice of that.\n",
+ (unsigned long)ram);
+ for (alloced = 0; alloced < to_alloc; alloced += XE_BO_SHRINK_SIZE) {
+ struct xe_bo *bo;
+ unsigned int mem_type;
+ struct xe_ttm_tt *xe_tt;
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link) {
+ KUNIT_FAIL(test, "Unexpected link allocation failure\n");
+ failed = true;
+ break;
+ }
+
+ INIT_LIST_HEAD(&link->link);
+
+ /* We can create bos using WC caching here. But it is slower. */
+ bo = xe_bo_create_user(xe, NULL, NULL, XE_BO_SHRINK_SIZE,
+ DRM_XE_GEM_CPU_CACHING_WB,
+ XE_BO_FLAG_SYSTEM);
+ if (IS_ERR(bo)) {
+ if (bo != ERR_PTR(-ENOMEM) && bo != ERR_PTR(-ENOSPC) &&
+ bo != ERR_PTR(-EINTR) && bo != ERR_PTR(-ERESTARTSYS))
+ KUNIT_FAIL(test, "Error creating bo: %pe\n", bo);
+ kfree(link);
+ failed = true;
+ break;
+ }
+ xe_bo_lock(bo, false);
+ xe_tt = container_of(bo->ttm.ttm, typeof(*xe_tt), ttm);
+
+ /*
+ * Allocate purgeable bos first, because if we do it the
+ * other way around, they may not be subject to swapping...
+ */
+ if (alloced < purgeable) {
+ xe_tt->purgeable = true;
+ bo->ttm.priority = 0;
+ } else {
+ int ret = shrink_test_fill_random(bo, &prng, link);
+
+ if (ret) {
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
+ KUNIT_FAIL(test, "Error filling bo with random data: %pe\n",
+ ERR_PTR(ret));
+ kfree(link);
+ failed = true;
+ break;
+ }
+ }
+
+ mem_type = bo->ttm.resource->mem_type;
+ xe_bo_unlock(bo);
+ link->bo = bo;
+ list_add_tail(&link->link, &bos);
+
+ if (mem_type != XE_PL_TT) {
+ KUNIT_FAIL(test, "Bo in incorrect memory type: %u\n",
+ bo->ttm.resource->mem_type);
+ failed = true;
+ }
+ cond_resched();
+ if (signal_pending(current))
+ break;
+ }
+
+ /*
+ * Read back and destroy bos. Reset the pseudo-random seed to get an
+ * identical pseudo-random number sequence for readback.
+ */
+ prandom_seed_state(&prng, rand_seed);
+ list_for_each_entry_safe(link, next, &bos, link) {
+ static struct ttm_operation_ctx ctx = {.interruptible = true};
+ struct xe_bo *bo = link->bo;
+ struct xe_ttm_tt *xe_tt;
+ int ret;
+
+ count++;
+ if (!signal_pending(current) && !failed) {
+ bool purgeable, intr = false;
+
+ xe_bo_lock(bo, NULL);
+
+ /* xe_tt->purgeable is cleared on validate. */
+ xe_tt = container_of(bo->ttm.ttm, typeof(*xe_tt), ttm);
+ purgeable = xe_tt->purgeable;
+ do {
+ ret = ttm_bo_validate(&bo->ttm, &tt_placement, &ctx);
+ if (ret == -EINTR)
+ intr = true;
+ } while (ret == -EINTR && !signal_pending(current));
+
+ if (!ret && !purgeable)
+ failed = shrink_test_verify(test, bo, count, &prng, link);
+
+ xe_bo_unlock(bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Validation failed: %pe\n",
+ ERR_PTR(ret));
+ failed = true;
+ } else if (intr) {
+ interrupted++;
+ } else {
+ successful++;
+ }
+ }
+ xe_bo_put(link->bo);
+ list_del(&link->link);
+ kfree(link);
+ }
+ kunit_info(test, "Readbacks interrupted: %u successful: %u\n",
+ interrupted, successful);
+
+ return 0;
+}
+
+static void xe_bo_shrink_kunit(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+
+ shrink_test_run_device(xe);
+}
+
static struct kunit_case xe_bo_tests[] = {
KUNIT_CASE_PARAM(xe_ccs_migrate_kunit, xe_pci_live_device_gen_param),
KUNIT_CASE_PARAM(xe_bo_evict_kunit, xe_pci_live_device_gen_param),
@@ -371,3 +615,17 @@ struct kunit_suite xe_bo_test_suite = {
.init = xe_kunit_helper_xe_device_live_test_init,
};
EXPORT_SYMBOL_IF_KUNIT(xe_bo_test_suite);
+
+static struct kunit_case xe_bo_shrink_test[] = {
+ KUNIT_CASE_PARAM_ATTR(xe_bo_shrink_kunit, xe_pci_live_device_gen_param,
+ {.speed = KUNIT_SPEED_SLOW}),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_bo_shrink_test_suite = {
+ .name = "xe_bo_shrink",
+ .test_cases = xe_bo_shrink_test,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_bo_shrink_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
index 5f14737c8210..81277c77016d 100644
--- a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
+++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
@@ -6,11 +6,13 @@
#include <kunit/test.h>
extern struct kunit_suite xe_bo_test_suite;
+extern struct kunit_suite xe_bo_shrink_test_suite;
extern struct kunit_suite xe_dma_buf_test_suite;
extern struct kunit_suite xe_migrate_test_suite;
extern struct kunit_suite xe_mocs_test_suite;
kunit_test_suite(xe_bo_test_suite);
+kunit_test_suite(xe_bo_shrink_test_suite);
kunit_test_suite(xe_dma_buf_test_suite);
kunit_test_suite(xe_migrate_test_suite);
kunit_test_suite(xe_mocs_test_suite);
@@ -18,4 +20,4 @@ kunit_test_suite(xe_mocs_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("xe live kunit tests");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index 1a192a2a941b..d5fe0ea889ad 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -83,7 +83,8 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
bo->size,
ttm_bo_type_kernel,
region |
- XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(remote)) {
KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
str, remote);
@@ -224,8 +225,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_PINNED);
if (IS_ERR(tiny)) {
- KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
- PTR_ERR(pt));
+ KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
+ PTR_ERR(tiny));
goto free_pt;
}
@@ -642,7 +643,9 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
sys_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(sys_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
@@ -666,7 +669,8 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
ccs_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
- bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(ccs_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
@@ -690,7 +694,8 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
vram_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
- bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(vram_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
PTR_ERR(vram_bo));
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index 79be73b4a02b..ef1e5256c56a 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -43,23 +43,22 @@ static void read_l3cc_table(struct xe_gt *gt,
{
struct kunit *test = kunit_get_current_test();
u32 l3cc, l3cc_expected;
- unsigned int i;
+ unsigned int fw_ref, i;
u32 reg_val;
- u32 ret;
- ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n");
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n");
for (i = 0; i < info->num_mocs_regs; i++) {
if (!(i & 1)) {
if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i >> 1));
else
- reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i >> 1));
+ reg_val = xe_mmio_read32(&gt->mmio, XELP_LNCFCMOCS(i >> 1));
mocs_dbg(gt, "reg_val=0x%x\n", reg_val);
} else {
- /* Just re-use value read on previous iteration */
+ /* Just reuse value read on previous iteration */
reg_val >>= 16;
}
@@ -72,7 +71,7 @@ static void read_l3cc_table(struct xe_gt *gt,
KUNIT_EXPECT_EQ_MSG(test, l3cc_expected, l3cc,
"l3cc idx=%u has incorrect val.\n", i);
}
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static void read_mocs_table(struct xe_gt *gt,
@@ -80,21 +79,20 @@ static void read_mocs_table(struct xe_gt *gt,
{
struct kunit *test = kunit_get_current_test();
u32 mocs, mocs_expected;
- unsigned int i;
+ unsigned int fw_ref, i;
u32 reg_val;
- u32 ret;
KUNIT_EXPECT_TRUE_MSG(test, info->unused_entries_index,
"Unused entries index should have been defined\n");
- ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n");
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n");
for (i = 0; i < info->num_mocs_regs; i++) {
if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i));
else
- reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i));
+ reg_val = xe_mmio_read32(&gt->mmio, XELP_GLOBAL_MOCS(i));
mocs_expected = get_entry_control(info, i);
mocs = reg_val;
@@ -106,7 +104,7 @@ static void read_mocs_table(struct xe_gt *gt,
"mocs reg 0x%x has incorrect val.\n", i);
}
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static int mocs_kernel_test_run_device(struct xe_device *xe)
@@ -164,8 +162,7 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
if (flags & HAS_LNCF_MOCS)
read_l3cc_table(gt, &mocs.table);
- xe_gt_reset_async(gt);
- flush_work(&gt->reset.worker);
+ xe_gt_reset(gt);
kunit_info(test, "mocs_reset_test after reset\n");
if (flags & HAS_GLOBAL_MOCS)
diff --git a/drivers/gpu/drm/xe/tests/xe_test_mod.c b/drivers/gpu/drm/xe/tests/xe_test_mod.c
index 875f3e6f965e..93081bcf2ab0 100644
--- a/drivers/gpu/drm/xe/tests/xe_test_mod.c
+++ b/drivers/gpu/drm/xe/tests/xe_test_mod.c
@@ -7,4 +7,4 @@
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("xe kunit tests");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
diff --git a/drivers/gpu/drm/xe/xe_assert.h b/drivers/gpu/drm/xe/xe_assert.h
index e22bbf57fca7..68fe70ce2be3 100644
--- a/drivers/gpu/drm/xe/xe_assert.h
+++ b/drivers/gpu/drm/xe/xe_assert.h
@@ -10,11 +10,11 @@
#include <drm/drm_print.h>
-#include "xe_device_types.h"
+#include "xe_gt_types.h"
#include "xe_step.h"
/**
- * DOC: Xe ASSERTs
+ * DOC: Xe Asserts
*
* While Xe driver aims to be simpler than legacy i915 driver it is still
* complex enough that some changes introduced while adding new functionality
@@ -103,7 +103,7 @@
* (&CONFIG_DRM_XE_DEBUG must be enabled) and cannot be used in expressions
* or as a condition.
*
- * See `Xe ASSERTs`_ for general usage guidelines.
+ * See `Xe Asserts`_ for general usage guidelines.
*/
#define xe_assert(xe, condition) xe_assert_msg((xe), condition, "")
#define xe_assert_msg(xe, condition, msg, arg...) ({ \
@@ -138,7 +138,7 @@
* (&CONFIG_DRM_XE_DEBUG must be enabled) and cannot be used in expressions
* or as a condition.
*
- * See `Xe ASSERTs`_ for general usage guidelines.
+ * See `Xe Asserts`_ for general usage guidelines.
*/
#define xe_tile_assert(tile, condition) xe_tile_assert_msg((tile), condition, "")
#define xe_tile_assert_msg(tile, condition, msg, arg...) ({ \
@@ -162,7 +162,7 @@
* (&CONFIG_DRM_XE_DEBUG must be enabled) and cannot be used in expressions
* or as a condition.
*
- * See `Xe ASSERTs`_ for general usage guidelines.
+ * See `Xe Asserts`_ for general usage guidelines.
*/
#define xe_gt_assert(gt, condition) xe_gt_assert_msg((gt), condition, "")
#define xe_gt_assert_msg(gt, condition, msg, arg...) ({ \
diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
index ef777dbdf4ec..9570672fce33 100644
--- a/drivers/gpu/drm/xe/xe_bb.c
+++ b/drivers/gpu/drm/xe/xe_bb.c
@@ -41,7 +41,7 @@ struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm)
/*
* We need to allocate space for the requested number of dwords,
* one additional MI_BATCH_BUFFER_END dword, and additional buffer
- * space to accomodate the platform-specific hardware prefetch
+ * space to accommodate the platform-specific hardware prefetch
* requirements.
*/
bb->bo = xe_sa_bo_new(!usm ? tile->mem.kernel_bb_pool : gt->usm.bb_pool,
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index e5f51fd23c65..3f5391d416d4 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -162,6 +162,15 @@ static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
}
}
+static bool force_contiguous(u32 bo_flags)
+{
+ /*
+ * For eviction / restore on suspend / resume objects pinned in VRAM
+ * must be contiguous, also only contiguous BOs support xe_bo_vmap.
+ */
+ return bo_flags & (XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT);
+}
+
static void add_vram(struct xe_device *xe, struct xe_bo *bo,
struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
{
@@ -175,12 +184,7 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
xe_assert(xe, vram && vram->usable_size);
io_size = vram->io_size;
- /*
- * For eviction / restore on suspend / resume objects
- * pinned in VRAM must be contiguous
- */
- if (bo_flags & (XE_BO_FLAG_PINNED |
- XE_BO_FLAG_GGTT))
+ if (force_contiguous(bo_flags))
place.flags |= TTM_PL_FLAG_CONTIGUOUS;
if (io_size < vram->usable_size) {
@@ -212,8 +216,7 @@ static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
bo->placements[*c] = (struct ttm_place) {
.mem_type = XE_PL_STOLEN,
- .flags = bo_flags & (XE_BO_FLAG_PINNED |
- XE_BO_FLAG_GGTT) ?
+ .flags = force_contiguous(bo_flags) ?
TTM_PL_FLAG_CONTIGUOUS : 0,
};
*c += 1;
@@ -283,6 +286,8 @@ struct xe_ttm_tt {
struct device *dev;
struct sg_table sgt;
struct sg_table *sg;
+ /** @purgeable: Whether the content of the pages of @ttm is purgeable. */
+ bool purgeable;
};
static int xe_tt_map_sg(struct ttm_tt *tt)
@@ -440,6 +445,14 @@ static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
kfree(tt);
}
+static bool xe_ttm_resource_visible(struct ttm_resource *mem)
+{
+ struct xe_ttm_vram_mgr_resource *vres =
+ to_xe_ttm_vram_mgr_resource(mem);
+
+ return vres->used_visible_size == mem->size;
+}
+
static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
@@ -451,11 +464,9 @@ static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
return 0;
case XE_PL_VRAM0:
case XE_PL_VRAM1: {
- struct xe_ttm_vram_mgr_resource *vres =
- to_xe_ttm_vram_mgr_resource(mem);
struct xe_mem_region *vram = res_to_mem_region(mem);
- if (vres->used_visible_size < mem->size)
+ if (!xe_ttm_resource_visible(mem))
return -EINVAL;
mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -468,7 +479,7 @@ static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
mem->bus.offset += vram->io_start;
mem->bus.is_iomem = true;
-#if !defined(CONFIG_X86)
+#if !IS_ENABLED(CONFIG_X86)
mem->bus.caching = ttm_write_combined;
#endif
return 0;
@@ -722,7 +733,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
new_mem->mem_type == XE_PL_SYSTEM) {
long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
DMA_RESV_USAGE_BOOKKEEP,
- true,
+ false,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
ret = timeout;
@@ -761,7 +772,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
if (xe_rpm_reclaim_safe(xe)) {
/*
* We might be called through swapout in the validation path of
- * another TTM device, so unconditionally acquire rpm here.
+ * another TTM device, so acquire rpm here.
*/
xe_pm_runtime_get(xe);
} else {
@@ -775,7 +786,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
* / resume, some of the pinned memory is required for the
* device to resume / use the GPU to move other evicted memory
* (user memory) around. This likely could be optimized a bit
- * futher where we find the minimum set of pinned memory
+ * further where we find the minimum set of pinned memory
* required for resume but for simplity doing a memcpy for all
* pinned memory.
*/
@@ -846,8 +857,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
out:
if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) &&
- ttm_bo->ttm)
+ ttm_bo->ttm) {
+ long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
+ DMA_RESV_USAGE_KERNEL,
+ false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0)
+ ret = timeout;
+
xe_tt_unmap_sg(ttm_bo->ttm);
+ }
return ret;
}
@@ -856,7 +875,7 @@ out:
* xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
* @bo: The buffer object to move.
*
- * On successful completion, the object memory will be moved to sytem memory.
+ * On successful completion, the object memory will be moved to system memory.
*
* This is needed to for special handling of pinned VRAM object during
* suspend-resume.
@@ -874,6 +893,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
};
struct ttm_operation_ctx ctx = {
.interruptible = false,
+ .gfp_retry_mayfail = true,
};
struct ttm_resource *new_mem;
int ret;
@@ -886,8 +906,8 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
if (WARN_ON(!xe_bo_is_pinned(bo)))
return -EINVAL;
- if (WARN_ON(!xe_bo_is_vram(bo)))
- return -EINVAL;
+ if (!xe_bo_is_vram(bo))
+ return 0;
ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx);
if (ret)
@@ -901,7 +921,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
}
}
- ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
+ ret = ttm_bo_populate(&bo->ttm, &ctx);
if (ret)
goto err_res_free;
@@ -935,8 +955,10 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
+ .gfp_retry_mayfail = false,
};
struct ttm_resource *new_mem;
+ struct ttm_place *place = &bo->placements[0];
int ret;
xe_bo_assert_held(bo);
@@ -947,14 +969,20 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
if (WARN_ON(!xe_bo_is_pinned(bo)))
return -EINVAL;
- if (WARN_ON(xe_bo_is_vram(bo) || !bo->ttm.ttm))
+ if (WARN_ON(xe_bo_is_vram(bo)))
+ return -EINVAL;
+
+ if (WARN_ON(!bo->ttm.ttm && !xe_bo_is_stolen(bo)))
return -EINVAL;
+ if (!mem_type_is_vram(place->mem_type))
+ return 0;
+
ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx);
if (ret)
return ret;
- ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
+ ret = ttm_bo_populate(&bo->ttm, &ctx);
if (ret)
goto err_res_free;
@@ -1082,6 +1110,80 @@ static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
}
}
+static void xe_ttm_bo_purge(struct ttm_buffer_object *ttm_bo, struct ttm_operation_ctx *ctx)
+{
+ struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+
+ if (ttm_bo->ttm) {
+ struct ttm_placement place = {};
+ int ret = ttm_bo_validate(ttm_bo, &place, ctx);
+
+ drm_WARN_ON(&xe->drm, ret);
+ }
+}
+
+static void xe_ttm_bo_swap_notify(struct ttm_buffer_object *ttm_bo)
+{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .gfp_retry_mayfail = false,
+ };
+
+ if (ttm_bo->ttm) {
+ struct xe_ttm_tt *xe_tt =
+ container_of(ttm_bo->ttm, struct xe_ttm_tt, ttm);
+
+ if (xe_tt->purgeable)
+ xe_ttm_bo_purge(ttm_bo, &ctx);
+ }
+}
+
+static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
+ unsigned long offset, void *buf, int len,
+ int write)
+{
+ struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
+ struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ struct iosys_map vmap;
+ struct xe_res_cursor cursor;
+ struct xe_mem_region *vram;
+ int bytes_left = len;
+
+ xe_bo_assert_held(bo);
+ xe_device_assert_mem_access(xe);
+
+ if (!mem_type_is_vram(ttm_bo->resource->mem_type))
+ return -EIO;
+
+ /* FIXME: Use GPU for non-visible VRAM */
+ if (!xe_ttm_resource_visible(ttm_bo->resource))
+ return -EIO;
+
+ vram = res_to_mem_region(ttm_bo->resource);
+ xe_res_first(ttm_bo->resource, offset & PAGE_MASK,
+ bo->size - (offset & PAGE_MASK), &cursor);
+
+ do {
+ unsigned long page_offset = (offset & ~PAGE_MASK);
+ int byte_count = min((int)(PAGE_SIZE - page_offset), bytes_left);
+
+ iosys_map_set_vaddr_iomem(&vmap, (u8 __iomem *)vram->mapping +
+ cursor.start);
+ if (write)
+ xe_map_memcpy_to(xe, &vmap, page_offset, buf, byte_count);
+ else
+ xe_map_memcpy_from(xe, buf, &vmap, page_offset, byte_count);
+
+ buf += byte_count;
+ offset += byte_count;
+ bytes_left -= byte_count;
+ if (bytes_left)
+ xe_res_next(&cursor, PAGE_SIZE);
+ } while (bytes_left);
+
+ return len;
+}
+
const struct ttm_device_funcs xe_ttm_funcs = {
.ttm_tt_create = xe_ttm_tt_create,
.ttm_tt_populate = xe_ttm_tt_populate,
@@ -1091,15 +1193,19 @@ const struct ttm_device_funcs xe_ttm_funcs = {
.move = xe_bo_move,
.io_mem_reserve = xe_ttm_io_mem_reserve,
.io_mem_pfn = xe_ttm_io_mem_pfn,
+ .access_memory = xe_ttm_access_memory,
.release_notify = xe_ttm_bo_release_notify,
.eviction_valuable = ttm_bo_eviction_valuable,
.delete_mem_notify = xe_ttm_bo_delete_mem_notify,
+ .swap_notify = xe_ttm_bo_swap_notify,
};
static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
{
struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ struct xe_tile *tile;
+ u8 id;
if (bo->ttm.base.import_attach)
drm_prime_gem_destroy(&bo->ttm.base, NULL);
@@ -1107,8 +1213,9 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
- if (bo->ggtt_node && bo->ggtt_node->base.size)
- xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
+ for_each_tile(tile, xe, id)
+ if (bo->ggtt_node[id] && bo->ggtt_node[id]->base.size)
+ xe_ggtt_remove_bo(tile->mem.ggtt, bo);
#ifdef CONFIG_PROC_FS
if (bo->client)
@@ -1206,11 +1313,50 @@ out:
return ret;
}
+static int xe_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write)
+{
+ struct ttm_buffer_object *ttm_bo = vma->vm_private_data;
+ struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
+ struct xe_device *xe = xe_bo_device(bo);
+ int ret;
+
+ xe_pm_runtime_get(xe);
+ ret = ttm_bo_vm_access(vma, addr, buf, len, write);
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+/**
+ * xe_bo_read() - Read from an xe_bo
+ * @bo: The buffer object to read from.
+ * @offset: The byte offset to start reading from.
+ * @dst: Location to store the read.
+ * @size: Size in bytes for the read.
+ *
+ * Read @size bytes from the @bo, starting from @offset, storing into @dst.
+ *
+ * Return: Zero on success, or negative error.
+ */
+int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size)
+{
+ int ret;
+
+ ret = ttm_bo_access(&bo->ttm, offset, dst, size, 0);
+ if (ret >= 0 && ret != size)
+ ret = -EIO;
+ else if (ret == size)
+ ret = 0;
+
+ return ret;
+}
+
static const struct vm_operations_struct xe_gem_vm_ops = {
.fault = xe_gem_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
- .access = ttm_bo_vm_access
+ .access = xe_bo_vm_access,
};
static const struct drm_gem_object_funcs xe_gem_object_funcs = {
@@ -1224,7 +1370,7 @@ static const struct drm_gem_object_funcs xe_gem_object_funcs = {
/**
* xe_bo_alloc - Allocate storage for a struct xe_bo
*
- * This funcition is intended to allocate storage to be used for input
+ * This function is intended to allocate storage to be used for input
* to __xe_bo_create_locked(), in the case a pointer to the bo to be
* created is needed before the call to __xe_bo_create_locked().
* If __xe_bo_create_locked ends up never to be called, then the
@@ -1264,6 +1410,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
+ .gfp_retry_mayfail = true,
};
struct ttm_placement *placement;
uint32_t alignment;
@@ -1278,6 +1425,10 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
return ERR_PTR(-EINVAL);
}
+ /* XE_BO_FLAG_GGTTx requires XE_BO_FLAG_GGTT also be set */
+ if ((flags & XE_BO_FLAG_GGTT_ALL) && !(flags & XE_BO_FLAG_GGTT))
+ return ERR_PTR(-EINVAL);
+
if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) &&
!(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) &&
((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
@@ -1424,7 +1575,8 @@ static struct xe_bo *
__xe_bo_create_locked(struct xe_device *xe,
struct xe_tile *tile, struct xe_vm *vm,
size_t size, u64 start, u64 end,
- u16 cpu_caching, enum ttm_bo_type type, u32 flags)
+ u16 cpu_caching, enum ttm_bo_type type, u32 flags,
+ u64 alignment)
{
struct xe_bo *bo = NULL;
int err;
@@ -1453,6 +1605,8 @@ __xe_bo_create_locked(struct xe_device *xe,
if (IS_ERR(bo))
return bo;
+ bo->min_align = alignment;
+
/*
* Note that instead of taking a reference no the drm_gpuvm_resv_bo(),
* to ensure the shared resv doesn't disappear under the bo, the bo
@@ -1465,19 +1619,29 @@ __xe_bo_create_locked(struct xe_device *xe,
bo->vm = vm;
if (bo->flags & XE_BO_FLAG_GGTT) {
- if (!tile && flags & XE_BO_FLAG_STOLEN)
- tile = xe_device_get_root_tile(xe);
+ struct xe_tile *t;
+ u8 id;
- xe_assert(xe, tile);
+ if (!(bo->flags & XE_BO_FLAG_GGTT_ALL)) {
+ if (!tile && flags & XE_BO_FLAG_STOLEN)
+ tile = xe_device_get_root_tile(xe);
- if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
- err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo,
- start + bo->size, U64_MAX);
- } else {
- err = xe_ggtt_insert_bo(tile->mem.ggtt, bo);
+ xe_assert(xe, tile);
+ }
+
+ for_each_tile(t, xe, id) {
+ if (t != tile && !(bo->flags & XE_BO_FLAG_GGTTx(t)))
+ continue;
+
+ if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
+ err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo,
+ start + bo->size, U64_MAX);
+ } else {
+ err = xe_ggtt_insert_bo(t->mem.ggtt, bo);
+ }
+ if (err)
+ goto err_unlock_put_bo;
}
- if (err)
- goto err_unlock_put_bo;
}
return bo;
@@ -1493,16 +1657,18 @@ struct xe_bo *
xe_bo_create_locked_range(struct xe_device *xe,
struct xe_tile *tile, struct xe_vm *vm,
size_t size, u64 start, u64 end,
- enum ttm_bo_type type, u32 flags)
+ enum ttm_bo_type type, u32 flags, u64 alignment)
{
- return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, flags);
+ return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type,
+ flags, alignment);
}
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
enum ttm_bo_type type, u32 flags)
{
- return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, flags);
+ return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type,
+ flags, 0);
}
struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
@@ -1512,7 +1678,7 @@ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
{
struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
cpu_caching, ttm_bo_type_device,
- flags | XE_BO_FLAG_USER);
+ flags | XE_BO_FLAG_USER, 0);
if (!IS_ERR(bo))
xe_bo_unlock_vm_held(bo);
@@ -1536,6 +1702,17 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile
size_t size, u64 offset,
enum ttm_bo_type type, u32 flags)
{
+ return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset,
+ type, flags, 0);
+}
+
+struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
+ struct xe_tile *tile,
+ struct xe_vm *vm,
+ size_t size, u64 offset,
+ enum ttm_bo_type type, u32 flags,
+ u64 alignment)
+{
struct xe_bo *bo;
int err;
u64 start = offset == ~0ull ? 0 : offset;
@@ -1546,7 +1723,8 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile
flags |= XE_BO_FLAG_GGTT;
bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
- flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ flags | XE_BO_FLAG_NEEDS_CPU_ACCESS,
+ alignment);
if (IS_ERR(bo))
return bo;
@@ -1719,6 +1897,7 @@ int xe_bo_pin_external(struct xe_bo *bo)
int xe_bo_pin(struct xe_bo *bo)
{
+ struct ttm_place *place = &bo->placements[0];
struct xe_device *xe = xe_bo_device(bo);
int err;
@@ -1749,21 +1928,21 @@ int xe_bo_pin(struct xe_bo *bo)
*/
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
- struct ttm_place *place = &(bo->placements[0]);
-
if (mem_type_is_vram(place->mem_type)) {
xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
-
- spin_lock(&xe->pinned.lock);
- list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
- spin_unlock(&xe->pinned.lock);
}
}
+ if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
+ spin_lock(&xe->pinned.lock);
+ list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
+ spin_unlock(&xe->pinned.lock);
+ }
+
ttm_bo_pin(&bo->ttm);
/*
@@ -1809,23 +1988,18 @@ void xe_bo_unpin_external(struct xe_bo *bo)
void xe_bo_unpin(struct xe_bo *bo)
{
+ struct ttm_place *place = &bo->placements[0];
struct xe_device *xe = xe_bo_device(bo);
xe_assert(xe, !bo->ttm.base.import_attach);
xe_assert(xe, xe_bo_is_pinned(bo));
- if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
- bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
- struct ttm_place *place = &(bo->placements[0]);
-
- if (mem_type_is_vram(place->mem_type)) {
- spin_lock(&xe->pinned.lock);
- xe_assert(xe, !list_empty(&bo->pinned_link));
- list_del_init(&bo->pinned_link);
- spin_unlock(&xe->pinned.lock);
- }
+ if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
+ spin_lock(&xe->pinned.lock);
+ xe_assert(xe, !list_empty(&bo->pinned_link));
+ list_del_init(&bo->pinned_link);
+ spin_unlock(&xe->pinned.lock);
}
-
ttm_bo_unpin(&bo->ttm);
}
@@ -1850,6 +2024,7 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
+ .gfp_retry_mayfail = true,
};
if (vm) {
@@ -1860,6 +2035,7 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
ctx.resv = xe_vm_resv(vm);
}
+ trace_xe_bo_validate(bo);
return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
}
@@ -1911,13 +2087,15 @@ dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
int xe_bo_vmap(struct xe_bo *bo)
{
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
void *virtual;
bool is_iomem;
int ret;
xe_bo_assert_held(bo);
- if (!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS))
+ if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) ||
+ !force_contiguous(bo->flags)))
return -EINVAL;
if (!iosys_map_is_null(&bo->vmap))
@@ -2193,6 +2371,7 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
+ .gfp_retry_mayfail = true,
};
struct ttm_placement placement;
struct ttm_place requested;
@@ -2233,7 +2412,7 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
* @force_alloc: Set force_alloc in ttm_operation_ctx
*
* On successful completion, the object memory will be moved to evict
- * placement. Ths function blocks until the object has been fully moved.
+ * placement. This function blocks until the object has been fully moved.
*
* Return: 0 on success. Negative error code on failure.
*/
@@ -2243,6 +2422,7 @@ int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
.interruptible = false,
.no_wait_gpu = false,
.force_alloc = force_alloc,
+ .gfp_retry_mayfail = true,
};
struct ttm_placement placement;
int ret;
@@ -2322,14 +2502,18 @@ void xe_bo_put_commit(struct llist_head *deferred)
void xe_bo_put(struct xe_bo *bo)
{
+ struct xe_tile *tile;
+ u8 id;
+
might_sleep();
if (bo) {
#ifdef CONFIG_PROC_FS
if (bo->client)
might_lock(&bo->client->bos_lock);
#endif
- if (bo->ggtt_node && bo->ggtt_node->ggtt)
- might_lock(&bo->ggtt_node->ggtt->lock);
+ for_each_tile(tile, xe_bo_device(bo), id)
+ if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt)
+ might_lock(&bo->ggtt_node[id]->ggtt->lock);
drm_gem_object_put(&bo->ttm.base);
}
}
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 6e4be52306df..d9386ab03140 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -39,10 +39,22 @@
#define XE_BO_FLAG_NEEDS_64K BIT(15)
#define XE_BO_FLAG_NEEDS_2M BIT(16)
#define XE_BO_FLAG_GGTT_INVALIDATE BIT(17)
+#define XE_BO_FLAG_GGTT0 BIT(18)
+#define XE_BO_FLAG_GGTT1 BIT(19)
+#define XE_BO_FLAG_GGTT2 BIT(20)
+#define XE_BO_FLAG_GGTT3 BIT(21)
+#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \
+ XE_BO_FLAG_GGTT1 | \
+ XE_BO_FLAG_GGTT2 | \
+ XE_BO_FLAG_GGTT3)
+
/* this one is trigger internally only */
#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
#define XE_BO_FLAG_INTERNAL_64K BIT(31)
+#define XE_BO_FLAG_GGTTx(tile) \
+ (XE_BO_FLAG_GGTT0 << (tile)->id)
+
#define XE_PTE_SHIFT 12
#define XE_PAGE_SIZE (1 << XE_PTE_SHIFT)
#define XE_PTE_MASK (XE_PAGE_SIZE - 1)
@@ -77,7 +89,7 @@ struct xe_bo *
xe_bo_create_locked_range(struct xe_device *xe,
struct xe_tile *tile, struct xe_vm *vm,
size_t size, u64 start, u64 end,
- enum ttm_bo_type type, u32 flags);
+ enum ttm_bo_type type, u32 flags, u64 alignment);
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
enum ttm_bo_type type, u32 flags);
@@ -94,6 +106,12 @@ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size, u64 offset,
enum ttm_bo_type type, u32 flags);
+struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
+ struct xe_tile *tile,
+ struct xe_vm *vm,
+ size_t size, u64 offset,
+ enum ttm_bo_type type, u32 flags,
+ u64 alignment);
struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
const void *data, size_t size,
enum ttm_bo_type type, u32 flags);
@@ -188,18 +206,29 @@ xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
}
static inline u32
-xe_bo_ggtt_addr(struct xe_bo *bo)
+__xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id)
{
- if (XE_WARN_ON(!bo->ggtt_node))
+ struct xe_ggtt_node *ggtt_node = bo->ggtt_node[tile_id];
+
+ if (XE_WARN_ON(!ggtt_node))
return 0;
- XE_WARN_ON(bo->ggtt_node->base.size > bo->size);
- XE_WARN_ON(bo->ggtt_node->base.start + bo->ggtt_node->base.size > (1ull << 32));
- return bo->ggtt_node->base.start;
+ XE_WARN_ON(ggtt_node->base.size > bo->size);
+ XE_WARN_ON(ggtt_node->base.start + ggtt_node->base.size > (1ull << 32));
+ return ggtt_node->base.start;
+}
+
+static inline u32
+xe_bo_ggtt_addr(struct xe_bo *bo)
+{
+ xe_assert(xe_bo_device(bo), bo->tile);
+
+ return __xe_bo_ggtt_addr(bo, bo->tile->id);
}
int xe_bo_vmap(struct xe_bo *bo);
void xe_bo_vunmap(struct xe_bo *bo);
+int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size);
bool mem_type_is_vram(u32 mem_type);
bool xe_bo_is_vram(struct xe_bo *bo);
@@ -312,8 +341,6 @@ static inline unsigned int xe_sg_segment_size(struct device *dev)
return round_down(max / 2, PAGE_SIZE);
}
-#define i915_gem_object_flush_if_display(obj) ((void)(obj))
-
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
/**
* xe_bo_is_mem_type - Whether the bo currently resides in the given
diff --git a/drivers/gpu/drm/xe/xe_bo_doc.h b/drivers/gpu/drm/xe/xe_bo_doc.h
index f57d440cc95a..25a884c64bf1 100644
--- a/drivers/gpu/drm/xe/xe_bo_doc.h
+++ b/drivers/gpu/drm/xe/xe_bo_doc.h
@@ -41,7 +41,7 @@
* created the BO can be mmap'd (via DRM_IOCTL_XE_GEM_MMAP_OFFSET) for user
* access and it can be bound for GPU access (via DRM_IOCTL_XE_VM_BIND). All
* user BOs are evictable and user BOs are never pinned by XE. The allocation of
- * the backing store can be defered from creation time until first use which is
+ * the backing store can be deferred from creation time until first use which is
* either mmap, bind, or pagefault.
*
* Private BOs
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index 541b49007d73..6a40eedd9db1 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -34,14 +34,22 @@ int xe_bo_evict_all(struct xe_device *xe)
u8 id;
int ret;
- if (!IS_DGFX(xe))
- return 0;
-
/* User memory */
- for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
+ for (mem_type = XE_PL_TT; mem_type <= XE_PL_VRAM1; ++mem_type) {
struct ttm_resource_manager *man =
ttm_manager_type(bdev, mem_type);
+ /*
+ * On igpu platforms with flat CCS we need to ensure we save and restore any CCS
+ * state since this state lives inside graphics stolen memory which doesn't survive
+ * hibernation.
+ *
+ * This can be further improved by only evicting objects that we know have actually
+ * used a compression enabled PAT index.
+ */
+ if (mem_type == XE_PL_TT && (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe)))
+ continue;
+
if (man) {
ret = ttm_resource_manager_evict_all(bdev, man);
if (ret)
@@ -125,9 +133,6 @@ int xe_bo_restore_kernel(struct xe_device *xe)
struct xe_bo *bo;
int ret;
- if (!IS_DGFX(xe))
- return 0;
-
spin_lock(&xe->pinned.lock);
for (;;) {
bo = list_first_entry_or_null(&xe->pinned.evicted,
@@ -147,11 +152,17 @@ int xe_bo_restore_kernel(struct xe_device *xe)
}
if (bo->flags & XE_BO_FLAG_GGTT) {
- struct xe_tile *tile = bo->tile;
+ struct xe_tile *tile;
+ u8 id;
+
+ for_each_tile(tile, xe, id) {
+ if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
+ continue;
- mutex_lock(&tile->mem.ggtt->lock);
- xe_ggtt_map_bo(tile->mem.ggtt, bo);
- mutex_unlock(&tile->mem.ggtt->lock);
+ mutex_lock(&tile->mem.ggtt->lock);
+ xe_ggtt_map_bo(tile->mem.ggtt, bo);
+ mutex_unlock(&tile->mem.ggtt->lock);
+ }
}
/*
@@ -159,7 +170,6 @@ int xe_bo_restore_kernel(struct xe_device *xe)
* should setup the iosys map.
*/
xe_assert(xe, !iosys_map_is_null(&bo->vmap));
- xe_assert(xe, xe_bo_is_vram(bo));
xe_bo_put(bo);
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 2ed558ac2264..46dc9e4e3e46 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -10,9 +10,9 @@
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_device.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_placement.h>
+#include "xe_device_types.h"
#include "xe_ggtt_types.h"
struct xe_device;
@@ -39,8 +39,8 @@ struct xe_bo {
struct ttm_place placements[XE_BO_MAX_PLACEMENTS];
/** @placement: current placement for this BO */
struct ttm_placement placement;
- /** @ggtt_node: GGTT node if this BO is mapped in the GGTT */
- struct xe_ggtt_node *ggtt_node;
+ /** @ggtt_node: Array of GGTT nodes if this BO is mapped in the GGTTs */
+ struct xe_ggtt_node *ggtt_node[XE_MAX_TILES_PER_DEVICE];
/** @vmap: iosys map of this buffer */
struct iosys_map vmap;
/** @ttm_kmap: TTM bo kmap object for internal use only. Keep off. */
@@ -76,9 +76,11 @@ struct xe_bo {
/** @vram_userfault_link: Link into @mem_access.vram_userfault.list */
struct list_head vram_userfault_link;
-};
-#define intel_bo_to_drm_bo(bo) (&(bo)->ttm.base)
-#define intel_bo_to_i915(bo) to_i915(intel_bo_to_drm_bo(bo)->dev)
+ /** @min_align: minimum alignment needed for this BO if different
+ * from default
+ */
+ u64 min_align;
+};
#endif
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index 668615c6b172..492b4877433f 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -90,13 +90,32 @@ static int forcewake_open(struct inode *inode, struct file *file)
{
struct xe_device *xe = inode->i_private;
struct xe_gt *gt;
- u8 id;
+ u8 id, last_gt;
+ unsigned int fw_ref;
xe_pm_runtime_get(xe);
- for_each_gt(gt, xe, id)
- XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ for_each_gt(gt, xe, id) {
+ last_gt = id;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
+ goto err_fw_get;
+ }
return 0;
+
+err_fw_get:
+ for_each_gt(gt, xe, id) {
+ if (id < last_gt)
+ xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ else if (id == last_gt)
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ else
+ break;
+ }
+
+ xe_pm_runtime_put(xe);
+ return -ETIMEDOUT;
}
static int forcewake_release(struct inode *inode, struct file *file)
@@ -106,7 +125,7 @@ static int forcewake_release(struct inode *inode, struct file *file)
u8 id;
for_each_gt(gt, xe, id)
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
xe_pm_runtime_put(xe);
return 0;
@@ -187,7 +206,7 @@ void xe_debugfs_register(struct xe_device *xe)
debugfs_create_file("forcewake_all", 0400, root, xe,
&forcewake_all_fops);
- debugfs_create_file("wedged_mode", 0400, root, xe,
+ debugfs_create_file("wedged_mode", 0600, root, xe,
&wedged_mode_fops);
for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index bdb76e834e4c..39fe485d2085 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -6,6 +6,7 @@
#include "xe_devcoredump.h"
#include "xe_devcoredump_types.h"
+#include <linux/ascii85.h>
#include <linux/devcoredump.h>
#include <generated/utsrelease.h>
@@ -16,39 +17,52 @@
#include "xe_force_wake.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
+#include "xe_guc_capture.h"
#include "xe_guc_ct.h"
+#include "xe_guc_log.h"
#include "xe_guc_submit.h"
#include "xe_hw_engine.h"
+#include "xe_module.h"
+#include "xe_pm.h"
#include "xe_sched_job.h"
#include "xe_vm.h"
/**
* DOC: Xe device coredump
*
- * Devices overview:
* Xe uses dev_coredump infrastructure for exposing the crash errors in a
- * standardized way.
- * devcoredump exposes a temporary device under /sys/class/devcoredump/
- * which is linked with our card device directly.
- * The core dump can be accessed either from
- * /sys/class/drm/card<n>/device/devcoredump/ or from
- * /sys/class/devcoredump/devcd<m> where
- * /sys/class/devcoredump/devcd<m>/failing_device is a link to
- * /sys/class/drm/card<n>/device/.
+ * standardized way. Once a crash occurs, devcoredump exposes a temporary
+ * node under ``/sys/class/devcoredump/devcd<m>/``. The same node is also
+ * accessible in ``/sys/class/drm/card<n>/device/devcoredump/``. The
+ * ``failing_device`` symlink points to the device that crashed and created the
+ * coredump.
*
- * Snapshot at hang:
- * The 'data' file is printed with a drm_printer pointer at devcoredump read
- * time. For this reason, we need to take snapshots from when the hang has
- * happened, and not only when the user is reading the file. Otherwise the
- * information is outdated since the resets might have happened in between.
+ * The following characteristics are observed by xe when creating a device
+ * coredump:
*
- * 'First' failure snapshot:
- * In general, the first hang is the most critical one since the following hangs
- * can be a consequence of the initial hang. For this reason we only take the
- * snapshot of the 'first' failure and ignore subsequent calls of this function,
- * at least while the coredump device is alive. Dev_coredump has a delayed work
- * queue that will eventually delete the device and free all the dump
- * information.
+ * **Snapshot at hang**:
+ * The 'data' file contains a snapshot of the HW and driver states at the time
+ * the hang happened. Due to the driver recovering from resets/crashes, it may
+ * not correspond to the state of the system when the file is read by
+ * userspace.
+ *
+ * **Coredump release**:
+ * After a coredump is generated, it stays in kernel memory until released by
+ * userspace by writing anything to it, or after an internal timer expires. The
+ * exact timeout may vary and should not be relied upon. Example to release
+ * a coredump:
+ *
+ * .. code-block:: shell
+ *
+ * $ > /sys/class/drm/card0/device/devcoredump/data
+ *
+ * **First failure only**:
+ * In general, the first hang is the most critical one since the following
+ * hangs can be a consequence of the initial hang. For this reason a snapshot
+ * is taken only for the first failure. Until the devcoredump is released by
+ * userspace or kernel, all subsequent hangs do not override the snapshot nor
+ * create new ones. Devcoredump has a delayed work queue that will eventually
+ * delete the file node and free all the dump information.
*/
#ifdef CONFIG_DEV_COREDUMP
@@ -85,31 +99,39 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
p = drm_coredump_printer(&iter);
- drm_printf(&p, "**** Xe Device Coredump ****\n");
- drm_printf(&p, "kernel: " UTS_RELEASE "\n");
- drm_printf(&p, "module: " KBUILD_MODNAME "\n");
+ drm_puts(&p, "**** Xe Device Coredump ****\n");
+ drm_printf(&p, "Reason: %s\n", ss->reason);
+ drm_puts(&p, "kernel: " UTS_RELEASE "\n");
+ drm_puts(&p, "module: " KBUILD_MODNAME "\n");
ts = ktime_to_timespec64(ss->snapshot_time);
drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
ts = ktime_to_timespec64(ss->boot_time);
drm_printf(&p, "Uptime: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
- drm_printf(&p, "Process: %s\n", ss->process_name);
+ drm_printf(&p, "Process: %s [%d]\n", ss->process_name, ss->pid);
xe_device_snapshot_print(xe, &p);
- drm_printf(&p, "\n**** GuC CT ****\n");
- xe_guc_ct_snapshot_print(coredump->snapshot.ct, &p);
- xe_guc_exec_queue_snapshot_print(coredump->snapshot.ge, &p);
+ drm_printf(&p, "\n**** GT #%d ****\n", ss->gt->info.id);
+ drm_printf(&p, "\tTile: %d\n", ss->gt->tile->id);
- drm_printf(&p, "\n**** Job ****\n");
- xe_sched_job_snapshot_print(coredump->snapshot.job, &p);
+ drm_puts(&p, "\n**** GuC Log ****\n");
+ xe_guc_log_snapshot_print(ss->guc.log, &p);
+ drm_puts(&p, "\n**** GuC CT ****\n");
+ xe_guc_ct_snapshot_print(ss->guc.ct, &p);
- drm_printf(&p, "\n**** HW Engines ****\n");
+ drm_puts(&p, "\n**** Contexts ****\n");
+ xe_guc_exec_queue_snapshot_print(ss->ge, &p);
+
+ drm_puts(&p, "\n**** Job ****\n");
+ xe_sched_job_snapshot_print(ss->job, &p);
+
+ drm_puts(&p, "\n**** HW Engines ****\n");
for (i = 0; i < XE_NUM_HW_ENGINES; i++)
- if (coredump->snapshot.hwe[i])
- xe_hw_engine_snapshot_print(coredump->snapshot.hwe[i],
- &p);
- drm_printf(&p, "\n**** VM state ****\n");
- xe_vm_snapshot_print(coredump->snapshot.vm, &p);
+ if (ss->hwe[i])
+ xe_engine_snapshot_print(ss->hwe[i], &p);
+
+ drm_puts(&p, "\n**** VM state ****\n");
+ xe_vm_snapshot_print(ss->vm, &p);
return count - iter.remain;
}
@@ -118,8 +140,17 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss)
{
int i;
- xe_guc_ct_snapshot_free(ss->ct);
- ss->ct = NULL;
+ kfree(ss->reason);
+ ss->reason = NULL;
+
+ xe_guc_log_snapshot_free(ss->guc.log);
+ ss->guc.log = NULL;
+
+ xe_guc_ct_snapshot_free(ss->guc.ct);
+ ss->guc.ct = NULL;
+
+ xe_guc_capture_put_matched_nodes(&ss->gt->uc.guc);
+ ss->matched_node = NULL;
xe_guc_exec_queue_snapshot_free(ss->ge);
ss->ge = NULL;
@@ -137,29 +168,6 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss)
ss->vm = NULL;
}
-static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
-{
- struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
- struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot);
-
- /* keep going if fw fails as we still want to save the memory and SW data */
- if (xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL))
- xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
- xe_vm_snapshot_capture_delayed(ss->vm);
- xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
- xe_force_wake_put(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
-
- /* Calculate devcoredump size */
- ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump);
-
- ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
- if (!ss->read.buffer)
- return;
-
- __xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump);
- xe_devcoredump_snapshot_free(ss);
-}
-
static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
{
@@ -175,16 +183,24 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
/* Ensure delayed work is captured before continuing */
flush_work(&ss->work);
- if (!ss->read.buffer)
+ mutex_lock(&coredump->lock);
+
+ if (!ss->read.buffer) {
+ mutex_unlock(&coredump->lock);
return -ENODEV;
+ }
- if (offset >= ss->read.size)
+ if (offset >= ss->read.size) {
+ mutex_unlock(&coredump->lock);
return 0;
+ }
byte_copied = count < ss->read.size - offset ? count :
ss->read.size - offset;
memcpy(buffer, ss->read.buffer + offset, byte_copied);
+ mutex_unlock(&coredump->lock);
+
return byte_copied;
}
@@ -198,6 +214,8 @@ static void xe_devcoredump_free(void *data)
cancel_work_sync(&coredump->snapshot.work);
+ mutex_lock(&coredump->lock);
+
xe_devcoredump_snapshot_free(&coredump->snapshot);
kvfree(coredump->snapshot.read.buffer);
@@ -206,28 +224,71 @@ static void xe_devcoredump_free(void *data)
coredump->captured = false;
drm_info(&coredump_to_xe(coredump)->drm,
"Xe device coredump has been deleted.\n");
+
+ mutex_unlock(&coredump->lock);
+}
+
+static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
+{
+ struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
+ struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot);
+ struct xe_device *xe = coredump_to_xe(coredump);
+ unsigned int fw_ref;
+
+ /*
+ * NB: Despite passing a GFP_ flags parameter here, more allocations are done
+ * internally using GFP_KERNEL expliictly. Hence this call must be in the worker
+ * thread and not in the initial capture call.
+ */
+ dev_coredumpm_timeout(gt_to_xe(ss->gt)->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL,
+ xe_devcoredump_read, xe_devcoredump_free,
+ XE_COREDUMP_TIMEOUT_JIFFIES);
+
+ xe_pm_runtime_get(xe);
+
+ /* keep going if fw fails as we still want to save the memory and SW data */
+ fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
+ xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
+ xe_vm_snapshot_capture_delayed(ss->vm);
+ xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
+ xe_force_wake_put(gt_to_fw(ss->gt), fw_ref);
+
+ xe_pm_runtime_put(xe);
+
+ /* Calculate devcoredump size */
+ ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump);
+
+ ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
+ if (!ss->read.buffer)
+ return;
+
+ __xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump);
+ xe_devcoredump_snapshot_free(ss);
}
static void devcoredump_snapshot(struct xe_devcoredump *coredump,
+ struct xe_exec_queue *q,
struct xe_sched_job *job)
{
struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
- struct xe_exec_queue *q = job->q;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_hw_engine *hwe;
- enum xe_hw_engine_id id;
u32 adj_logical_mask = q->logical_mask;
u32 width_mask = (0x1 << q->width) - 1;
const char *process_name = "no process";
- int i;
+ unsigned int fw_ref;
bool cookie;
+ int i;
ss->snapshot_time = ktime_get_real();
ss->boot_time = ktime_get_boottime();
- if (q->vm && q->vm->xef)
+ if (q->vm && q->vm->xef) {
process_name = q->vm->xef->process_name;
+ ss->pid = q->vm->xef->pid;
+ }
+
strscpy(ss->process_name, process_name);
ss->gt = q->gt;
@@ -244,57 +305,61 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
}
/* keep going if fw fails as we still want to save the memory and SW data */
- if (xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL))
- xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
+ fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
- coredump->snapshot.ct = xe_guc_ct_snapshot_capture(&guc->ct, true);
- coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(q);
- coredump->snapshot.job = xe_sched_job_snapshot_capture(job);
- coredump->snapshot.vm = xe_vm_snapshot_capture(q->vm);
+ ss->guc.log = xe_guc_log_snapshot_capture(&guc->log, true);
+ ss->guc.ct = xe_guc_ct_snapshot_capture(&guc->ct);
+ ss->ge = xe_guc_exec_queue_snapshot_capture(q);
+ if (job)
+ ss->job = xe_sched_job_snapshot_capture(job);
+ ss->vm = xe_vm_snapshot_capture(q->vm);
- for_each_hw_engine(hwe, q->gt, id) {
- if (hwe->class != q->hwe->class ||
- !(BIT(hwe->logical_instance) & adj_logical_mask)) {
- coredump->snapshot.hwe[id] = NULL;
- continue;
- }
- coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe);
- }
+ xe_engine_snapshot_capture_for_queue(q);
queue_work(system_unbound_wq, &ss->work);
- xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
+ xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
dma_fence_end_signalling(cookie);
}
/**
* xe_devcoredump - Take the required snapshots and initialize coredump device.
+ * @q: The faulty xe_exec_queue, where the issue was detected.
* @job: The faulty xe_sched_job, where the issue was detected.
+ * @fmt: Printf format + args to describe the reason for the core dump
*
* This function should be called at the crash time within the serialized
* gt_reset. It is skipped if we still have the core dump device available
* with the information of the 'first' snapshot.
*/
-void xe_devcoredump(struct xe_sched_job *job)
+__printf(3, 4)
+void xe_devcoredump(struct xe_exec_queue *q, struct xe_sched_job *job, const char *fmt, ...)
{
- struct xe_device *xe = gt_to_xe(job->q->gt);
+ struct xe_device *xe = gt_to_xe(q->gt);
struct xe_devcoredump *coredump = &xe->devcoredump;
+ va_list varg;
+
+ mutex_lock(&coredump->lock);
if (coredump->captured) {
drm_dbg(&xe->drm, "Multiple hangs are occurring, but only the first snapshot was taken\n");
+ mutex_unlock(&coredump->lock);
return;
}
coredump->captured = true;
- devcoredump_snapshot(coredump, job);
+
+ va_start(varg, fmt);
+ coredump->snapshot.reason = kvasprintf(GFP_ATOMIC, fmt, varg);
+ va_end(varg);
+
+ devcoredump_snapshot(coredump, q, job);
drm_info(&xe->drm, "Xe device coredump has been created\n");
drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
xe->drm.primary->index);
- dev_coredumpm_timeout(xe->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL,
- xe_devcoredump_read, xe_devcoredump_free,
- XE_COREDUMP_TIMEOUT_JIFFIES);
+ mutex_unlock(&coredump->lock);
}
static void xe_driver_devcoredump_fini(void *arg)
@@ -306,7 +371,106 @@ static void xe_driver_devcoredump_fini(void *arg)
int xe_devcoredump_init(struct xe_device *xe)
{
+ int err;
+
+ err = drmm_mutex_init(&xe->drm, &xe->devcoredump.lock);
+ if (err)
+ return err;
+
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&xe->devcoredump.lock);
+ fs_reclaim_release(GFP_KERNEL);
+ }
+
return devm_add_action_or_reset(xe->drm.dev, xe_driver_devcoredump_fini, &xe->drm);
}
#endif
+
+/**
+ * xe_print_blob_ascii85 - print a BLOB to some useful location in ASCII85
+ *
+ * The output is split into multiple calls to drm_puts() because some print
+ * targets, e.g. dmesg, cannot handle arbitrarily long lines. These targets may
+ * add newlines, as is the case with dmesg: each drm_puts() call creates a
+ * separate line.
+ *
+ * There is also a scheduler yield call to prevent the 'task has been stuck for
+ * 120s' kernel hang check feature from firing when printing to a slow target
+ * such as dmesg over a serial port.
+ *
+ * @p: the printer object to output to
+ * @prefix: optional prefix to add to output string
+ * @suffix: optional suffix to add at the end. 0 disables it and is
+ * not added to the output, which is useful when using multiple calls
+ * to dump data to @p
+ * @blob: the Binary Large OBject to dump out
+ * @offset: offset in bytes to skip from the front of the BLOB, must be a multiple of sizeof(u32)
+ * @size: the size in bytes of the BLOB, must be a multiple of sizeof(u32)
+ */
+void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix,
+ const void *blob, size_t offset, size_t size)
+{
+ const u32 *blob32 = (const u32 *)blob;
+ char buff[ASCII85_BUFSZ], *line_buff;
+ size_t line_pos = 0;
+
+#define DMESG_MAX_LINE_LEN 800
+ /* Always leave space for the suffix char and the \0 */
+#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "<suffix>\0" */
+
+ if (size & 3)
+ drm_printf(p, "Size not word aligned: %zu", size);
+ if (offset & 3)
+ drm_printf(p, "Offset not word aligned: %zu", size);
+
+ line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(line_buff)) {
+ drm_printf(p, "Failed to allocate line buffer: %pe", line_buff);
+ return;
+ }
+
+ blob32 += offset / sizeof(*blob32);
+ size /= sizeof(*blob32);
+
+ if (prefix) {
+ strscpy(line_buff, prefix, DMESG_MAX_LINE_LEN - MIN_SPACE - 2);
+ line_pos = strlen(line_buff);
+
+ line_buff[line_pos++] = ':';
+ line_buff[line_pos++] = ' ';
+ }
+
+ while (size--) {
+ u32 val = *(blob32++);
+
+ strscpy(line_buff + line_pos, ascii85_encode(val, buff),
+ DMESG_MAX_LINE_LEN - line_pos);
+ line_pos += strlen(line_buff + line_pos);
+
+ if ((line_pos + MIN_SPACE) >= DMESG_MAX_LINE_LEN) {
+ line_buff[line_pos++] = 0;
+
+ drm_puts(p, line_buff);
+
+ line_pos = 0;
+
+ /* Prevent 'stuck thread' time out errors */
+ cond_resched();
+ }
+ }
+
+ if (suffix)
+ line_buff[line_pos++] = suffix;
+
+ if (line_pos) {
+ line_buff[line_pos++] = 0;
+ drm_puts(p, line_buff);
+ }
+
+ kfree(line_buff);
+
+#undef MIN_SPACE
+#undef DMESG_MAX_LINE_LEN
+}
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h
index e2fa65ce0932..5391a80a4d1b 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump.h
@@ -6,14 +6,20 @@
#ifndef _XE_DEVCOREDUMP_H_
#define _XE_DEVCOREDUMP_H_
+#include <linux/types.h>
+
+struct drm_printer;
struct xe_device;
+struct xe_exec_queue;
struct xe_sched_job;
#ifdef CONFIG_DEV_COREDUMP
-void xe_devcoredump(struct xe_sched_job *job);
+void xe_devcoredump(struct xe_exec_queue *q, struct xe_sched_job *job, const char *fmt, ...);
int xe_devcoredump_init(struct xe_device *xe);
#else
-static inline void xe_devcoredump(struct xe_sched_job *job)
+static inline void xe_devcoredump(struct xe_exec_queue *q,
+ struct xe_sched_job *job,
+ const char *fmt, ...)
{
}
@@ -23,4 +29,7 @@ static inline int xe_devcoredump_init(struct xe_device *xe)
}
#endif
+void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix,
+ const void *blob, size_t offset, size_t size);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h
index 440d05d77a5a..1a1d16a96b2d 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump_types.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h
@@ -28,22 +28,37 @@ struct xe_devcoredump_snapshot {
ktime_t boot_time;
/** @process_name: Name of process that triggered this gpu hang */
char process_name[TASK_COMM_LEN];
+ /** @pid: Process id of process that triggered this gpu hang */
+ pid_t pid;
+ /** @reason: The reason the coredump was triggered */
+ char *reason;
/** @gt: Affected GT, used by forcewake for delayed capture */
struct xe_gt *gt;
/** @work: Workqueue for deferred capture outside of signaling context */
struct work_struct work;
- /* GuC snapshots */
- /** @ct: GuC CT snapshot */
- struct xe_guc_ct_snapshot *ct;
- /** @ge: Guc Engine snapshot */
+ /** @guc: GuC snapshots */
+ struct {
+ /** @guc.ct: GuC CT snapshot */
+ struct xe_guc_ct_snapshot *ct;
+ /** @guc.log: GuC log snapshot */
+ struct xe_guc_log_snapshot *log;
+ } guc;
+
+ /** @ge: GuC Submission Engine snapshot */
struct xe_guc_submit_exec_queue_snapshot *ge;
/** @hwe: HW Engine snapshot array */
struct xe_hw_engine_snapshot *hwe[XE_NUM_HW_ENGINES];
/** @job: Snapshot of job state */
struct xe_sched_job_snapshot *job;
+ /**
+ * @matched_node: The matched capture node for timedout job
+ * this single-node tracker works because devcoredump will always only
+ * produce one hw-engine capture per devcoredump event
+ */
+ struct __guc_capture_parsed_output *matched_node;
/** @vm: Snapshot of VM state */
struct xe_vm_snapshot *vm;
@@ -65,7 +80,9 @@ struct xe_devcoredump_snapshot {
* for reading the information.
*/
struct xe_devcoredump {
- /** @captured: The snapshot of the first hang has already been taken. */
+ /** @lock: protects access to entire structure */
+ struct mutex lock;
+ /** @captured: The snapshot of the first hang has already been taken */
bool captured;
/** @snapshot: Snapshot is captured at time of the first crash */
struct xe_devcoredump_snapshot snapshot;
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 5a63d135ba96..4e1839b483a0 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -5,10 +5,11 @@
#include "xe_device.h"
+#include <linux/aperture.h>
#include <linux/delay.h>
+#include <linux/fault-inject.h>
#include <linux/units.h>
-#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_client.h>
#include <drm/drm_gem_ttm_helper.h>
@@ -43,6 +44,7 @@
#include "xe_memirq.h"
#include "xe_mmio.h"
#include "xe_module.h"
+#include "xe_oa.h"
#include "xe_observation.h"
#include "xe_pat.h"
#include "xe_pcode.h"
@@ -54,6 +56,7 @@
#include "xe_ttm_sys_mgr.h"
#include "xe_vm.h"
#include "xe_vram.h"
+#include "xe_vsec.h"
#include "xe_wait_user_fence.h"
#include "xe_wa.h"
@@ -87,10 +90,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
mutex_init(&xef->exec_queue.lock);
xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1);
- spin_lock(&xe->clients.lock);
- xe->clients.count++;
- spin_unlock(&xe->clients.lock);
-
file->driver_priv = xef;
kref_init(&xef->refcount);
@@ -107,17 +106,12 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
static void xe_file_destroy(struct kref *ref)
{
struct xe_file *xef = container_of(ref, struct xe_file, refcount);
- struct xe_device *xe = xef->xe;
xa_destroy(&xef->exec_queue.xa);
mutex_destroy(&xef->exec_queue.lock);
xa_destroy(&xef->vm.xa);
mutex_destroy(&xef->vm.lock);
- spin_lock(&xe->clients.lock);
- xe->clients.count--;
- spin_unlock(&xe->clients.lock);
-
xe_drm_client_put(xef->client);
kfree(xef->process_name);
kfree(xef);
@@ -277,7 +271,6 @@ static struct drm_driver driver = {
.fops = &xe_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -310,7 +303,7 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xe_display_driver_set_hooks(&driver);
- err = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
+ err = aperture_remove_conflicting_pci_devices(pdev, driver.name);
if (err)
return ERR_PTR(err);
@@ -332,8 +325,9 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xe->info.revid = pdev->revision;
xe->info.force_execlist = xe_modparam.force_execlist;
- spin_lock_init(&xe->irq.lock);
- spin_lock_init(&xe->clients.lock);
+ err = xe_irq_init(xe);
+ if (err)
+ goto err;
init_waitqueue_head(&xe->ufence_wq);
@@ -359,7 +353,8 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
INIT_LIST_HEAD(&xe->pinned.external_vram);
INIT_LIST_HEAD(&xe->pinned.evicted);
- xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 0);
+ xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq",
+ WQ_MEM_RECLAIM);
xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0);
@@ -374,6 +369,10 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
goto err;
}
+ err = drmm_mutex_init(&xe->drm, &xe->pmt.lock);
+ if (err)
+ goto err;
+
err = xe_display_create(xe);
if (WARN_ON(err))
goto err;
@@ -383,6 +382,12 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
err:
return ERR_PTR(err);
}
+ALLOW_ERROR_INJECTION(xe_device_create, ERRNO); /* See xe_pci_probe() */
+
+static bool xe_driver_flr_disabled(struct xe_device *xe)
+{
+ return xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS;
+}
/*
* The driver-initiated FLR is the highest level of reset that we can trigger
@@ -397,17 +402,12 @@ err:
* if/when a new instance of i915 is bound to the device it will do a full
* re-init anyway.
*/
-static void xe_driver_flr(struct xe_device *xe)
+static void __xe_driver_flr(struct xe_device *xe)
{
const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
int ret;
- if (xe_mmio_read32(gt, GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
- drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n");
- return;
- }
-
drm_dbg(&xe->drm, "Triggering Driver-FLR\n");
/*
@@ -419,25 +419,25 @@ static void xe_driver_flr(struct xe_device *xe)
* is still pending (unless the HW is totally dead), but better to be
* safe in case something unexpected happens
*/
- ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
+ ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
if (ret) {
drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret);
return;
}
- xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
+ xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS);
/* Trigger the actual Driver-FLR */
- xe_mmio_rmw32(gt, GU_CNTL, 0, DRIVERFLR);
+ xe_mmio_rmw32(mmio, GU_CNTL, 0, DRIVERFLR);
/* Wait for hardware teardown to complete */
- ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
+ ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
if (ret) {
drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
return;
}
/* Wait for hardware/firmware re-init to complete */
- ret = xe_mmio_wait32(gt, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS,
+ ret = xe_mmio_wait32(mmio, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS,
flr_timeout, NULL, false);
if (ret) {
drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
@@ -445,7 +445,17 @@ static void xe_driver_flr(struct xe_device *xe)
}
/* Clear sticky completion status */
- xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
+ xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS);
+}
+
+static void xe_driver_flr(struct xe_device *xe)
+{
+ if (xe_driver_flr_disabled(xe)) {
+ drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n");
+ return;
+ }
+
+ __xe_driver_flr(xe);
}
static void xe_driver_flr_fini(void *arg)
@@ -488,16 +498,15 @@ mask_err:
return err;
}
-static bool verify_lmem_ready(struct xe_gt *gt)
+static bool verify_lmem_ready(struct xe_device *xe)
{
- u32 val = xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT;
+ u32 val = xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT;
return !!val;
}
static int wait_for_lmem_ready(struct xe_device *xe)
{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
unsigned long timeout, start;
if (!IS_DGFX(xe))
@@ -506,13 +515,13 @@ static int wait_for_lmem_ready(struct xe_device *xe)
if (IS_SRIOV_VF(xe))
return 0;
- if (verify_lmem_ready(gt))
+ if (verify_lmem_ready(xe))
return 0;
drm_dbg(&xe->drm, "Waiting for lmem initialization\n");
start = jiffies;
- timeout = start + msecs_to_jiffies(60 * 1000); /* 60 sec! */
+ timeout = start + secs_to_jiffies(60); /* 60 sec! */
do {
if (signal_pending(current))
@@ -535,13 +544,14 @@ static int wait_for_lmem_ready(struct xe_device *xe)
msleep(20);
- } while (!verify_lmem_ready(gt));
+ } while (!verify_lmem_ready(xe));
drm_dbg(&xe->drm, "lmem ready after %ums",
jiffies_to_msecs(jiffies - start));
return 0;
}
+ALLOW_ERROR_INJECTION(wait_for_lmem_ready, ERRNO); /* See xe_pci_probe() */
static void update_device_info(struct xe_device *xe)
{
@@ -589,19 +599,21 @@ int xe_device_probe_early(struct xe_device *xe)
return 0;
}
-static int xe_device_set_has_flat_ccs(struct xe_device *xe)
+static int probe_has_flat_ccs(struct xe_device *xe)
{
+ struct xe_gt *gt;
+ unsigned int fw_ref;
u32 reg;
- int err;
- if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs)
+ /* Always enabled/disabled, no runtime check to do */
+ if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs || IS_SRIOV_VF(xe))
return 0;
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ gt = xe_root_mmio_gt(xe);
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- return err;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER);
xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE);
@@ -610,7 +622,8 @@ static int xe_device_set_has_flat_ccs(struct xe_device *xe)
drm_dbg(&xe->drm,
"Flat CCS has been disabled in bios, May lead to performance impact");
- return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return 0;
}
int xe_device_probe(struct xe_device *xe)
@@ -646,6 +659,13 @@ int xe_device_probe(struct xe_device *xe)
err = xe_gt_init_early(gt);
if (err)
return err;
+
+ /*
+ * Only after this point can GT-specific MMIO operations
+ * (including things like communication with the GuC)
+ * be performed.
+ */
+ xe_gt_mmio_init(gt);
}
for_each_tile(tile, xe, id) {
@@ -661,11 +681,9 @@ int xe_device_probe(struct xe_device *xe)
err = xe_ggtt_init_early(tile->mem.ggtt);
if (err)
return err;
- if (IS_SRIOV_VF(xe)) {
- err = xe_memirq_init(&tile->sriov.vf.memirq);
- if (err)
- return err;
- }
+ err = xe_memirq_init(&tile->memirq);
+ if (err)
+ return err;
}
for_each_gt(gt, xe, id) {
@@ -689,7 +707,7 @@ int xe_device_probe(struct xe_device *xe)
if (err)
goto err;
- err = xe_device_set_has_flat_ccs(xe);
+ err = probe_has_flat_ccs(xe);
if (err)
goto err;
@@ -749,6 +767,8 @@ int xe_device_probe(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_sanitize_freq(gt);
+ xe_vsec_init(xe);
+
return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
err_fini_display:
@@ -799,6 +819,24 @@ void xe_device_remove(struct xe_device *xe)
void xe_device_shutdown(struct xe_device *xe)
{
+ struct xe_gt *gt;
+ u8 id;
+
+ drm_dbg(&xe->drm, "Shutting down device\n");
+
+ if (xe_driver_flr_disabled(xe)) {
+ xe_display_pm_shutdown(xe);
+
+ xe_irq_suspend(xe);
+
+ for_each_gt(gt, xe, id)
+ xe_gt_shutdown(gt);
+
+ xe_display_pm_shutdown_late(xe);
+ } else {
+ /* BOOM! */
+ __xe_driver_flr(xe);
+ }
}
/**
@@ -812,11 +850,9 @@ void xe_device_shutdown(struct xe_device *xe)
*/
void xe_device_wmb(struct xe_device *xe)
{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
-
wmb();
if (IS_DGFX(xe))
- xe_mmio_write32(gt, VF_CAP_REG, 0);
+ xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0);
}
/**
@@ -840,6 +876,7 @@ void xe_device_wmb(struct xe_device *xe)
void xe_device_td_flush(struct xe_device *xe)
{
struct xe_gt *gt;
+ unsigned int fw_ref;
u8 id;
if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20)
@@ -854,10 +891,11 @@ void xe_device_td_flush(struct xe_device *xe)
if (xe_gt_is_media_type(gt))
continue;
- if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GT))
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
return;
- xe_mmio_write32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
+ xe_mmio_write32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
/*
* FIXME: We can likely do better here with our choice of
* timeout. Currently we just assume the worst case, i.e. 150us,
@@ -865,36 +903,36 @@ void xe_device_td_flush(struct xe_device *xe)
* scenario on current platforms if all cache entries are
* transient and need to be flushed..
*/
- if (xe_mmio_wait32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
+ if (xe_mmio_wait32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
150, NULL, false))
xe_gt_err_once(gt, "TD flush timeout\n");
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
}
void xe_device_l2_flush(struct xe_device *xe)
{
struct xe_gt *gt;
- int err;
+ unsigned int fw_ref;
gt = xe_root_mmio_gt(xe);
if (!XE_WA(gt, 16023588340))
return;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
return;
spin_lock(&gt->global_invl_lock);
- xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1);
+ xe_mmio_write32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1);
- if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true))
+ if (xe_mmio_wait32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true))
xe_gt_err_once(gt, "Global invalidation timeout\n");
spin_unlock(&gt->global_invl_lock);
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
@@ -929,6 +967,7 @@ void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p)
for_each_gt(gt, xe, id) {
drm_printf(p, "GT id: %u\n", id);
+ drm_printf(p, "\tTile: %u\n", gt->tile->id);
drm_printf(p, "\tType: %s\n",
gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media");
drm_printf(p, "\tIP ver: %u.%u.%u\n",
@@ -960,7 +999,7 @@ static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
* xe_device_declare_wedged - Declare device wedged
* @xe: xe device instance
*
- * This is a final state that can only be cleared with a mudule
+ * This is a final state that can only be cleared with a module
* re-probe (unbind + bind).
* In this state every IOCTL will be blocked so the GT cannot be used.
* In general it will be called upon any critical error such as gt reset
@@ -980,13 +1019,13 @@ void xe_device_declare_wedged(struct xe_device *xe)
return;
}
+ xe_pm_runtime_get_noresume(xe);
+
if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) {
drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n");
return;
}
- xe_pm_runtime_get_noresume(xe);
-
if (!atomic_xchg(&xe->wedged.flag, 1)) {
xe->needs_flr_on_fini = true;
drm_err(&xe->drm,
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 894f04770454..fc3c2af3fb7f 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -9,6 +9,8 @@
#include <drm/drm_util.h>
#include "xe_device_types.h"
+#include "xe_gt_types.h"
+#include "xe_sriov.h"
static inline struct xe_device *to_xe_device(const struct drm_device *dev)
{
@@ -138,7 +140,7 @@ static inline bool xe_device_uc_enabled(struct xe_device *xe)
static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt)
{
- return &gt->mmio.fw;
+ return &gt->pm.fw;
}
void xe_device_assert_mem_access(struct xe_device *xe);
@@ -153,11 +155,21 @@ static inline bool xe_device_has_sriov(struct xe_device *xe)
return xe->info.has_sriov;
}
+static inline bool xe_device_has_msix(struct xe_device *xe)
+{
+ return xe->irq.msix.nvec > 0;
+}
+
static inline bool xe_device_has_memirq(struct xe_device *xe)
{
return GRAPHICS_VERx100(xe) >= 1250;
}
+static inline bool xe_device_uses_memirq(struct xe_device *xe)
+{
+ return xe_device_has_memirq(xe) && (IS_SRIOV_VF(xe) || xe_device_has_msix(xe));
+}
+
u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size);
void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p);
@@ -178,4 +190,18 @@ void xe_device_declare_wedged(struct xe_device *xe);
struct xe_file *xe_file_get(struct xe_file *xef);
void xe_file_put(struct xe_file *xef);
+/*
+ * Occasionally it is seen that the G2H worker starts running after a delay of more than
+ * a second even after being queued and activated by the Linux workqueue subsystem. This
+ * leads to G2H timeout error. The root cause of issue lies with scheduling latency of
+ * Lunarlake Hybrid CPU. Issue disappears if we disable Lunarlake atom cores from BIOS
+ * and this is beyond xe kmd.
+ *
+ * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
+ */
+#define LNL_FLUSH_WORKQUEUE(wq__) \
+ flush_workqueue(wq__)
+#define LNL_FLUSH_WORK(wrk__) \
+ flush_work(wrk__)
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 09d731a9125c..8a7b15972413 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -14,10 +14,9 @@
#include "xe_devcoredump_types.h"
#include "xe_heci_gsc.h"
-#include "xe_gt_types.h"
#include "xe_lmtt_types.h"
#include "xe_memirq_types.h"
-#include "xe_oa.h"
+#include "xe_oa_types.h"
#include "xe_platform_types.h"
#include "xe_pt_types.h"
#include "xe_sriov_types.h"
@@ -43,8 +42,6 @@ struct xe_pat_ops;
#define GRAPHICS_VERx100(xe) ((xe)->info.graphics_verx100)
#define MEDIA_VERx100(xe) ((xe)->info.media_verx100)
#define IS_DGFX(xe) ((xe)->info.is_dgfx)
-#define HAS_HECI_GSCFI(xe) ((xe)->info.has_heci_gscfi)
-#define HAS_HECI_CSCFI(xe) ((xe)->info.has_heci_cscfi)
#define XE_VRAM_FLAGS_NEED64K BIT(0)
@@ -108,6 +105,45 @@ struct xe_mem_region {
};
/**
+ * struct xe_mmio - register mmio structure
+ *
+ * Represents an MMIO region that the CPU may use to access registers. A
+ * region may share its IO map with other regions (e.g., all GTs within a
+ * tile share the same map with their parent tile, but represent different
+ * subregions of the overall IO space).
+ */
+struct xe_mmio {
+ /** @tile: Backpointer to tile, used for tracing */
+ struct xe_tile *tile;
+
+ /** @regs: Map used to access registers. */
+ void __iomem *regs;
+
+ /**
+ * @sriov_vf_gt: Backpointer to GT.
+ *
+ * This pointer is only set for GT MMIO regions and only when running
+ * as an SRIOV VF structure
+ */
+ struct xe_gt *sriov_vf_gt;
+
+ /**
+ * @regs_size: Length of the register region within the map.
+ *
+ * The size of the iomap set in *regs is generally larger than the
+ * register mmio space since it includes unused regions and/or
+ * non-register regions such as the GGTT PTEs.
+ */
+ size_t regs_size;
+
+ /** @adj_limit: adjust MMIO address if address is below this value */
+ u32 adj_limit;
+
+ /** @adj_offset: offset to add to MMIO address when adjusting */
+ u32 adj_offset;
+};
+
+/**
* struct xe_tile - hardware tile structure
*
* From a driver perspective, a "tile" is effectively a complete GPU, containing
@@ -148,26 +184,14 @@ struct xe_tile {
* * 4MB-8MB: reserved
* * 8MB-16MB: global GTT
*/
- struct {
- /** @mmio.size: size of tile's MMIO space */
- size_t size;
-
- /** @mmio.regs: pointer to tile's MMIO space (starting with registers) */
- void __iomem *regs;
- } mmio;
+ struct xe_mmio mmio;
/**
* @mmio_ext: MMIO-extension info for a tile.
*
* Each tile has its own additional 256MB (28-bit) MMIO-extension space.
*/
- struct {
- /** @mmio_ext.size: size of tile's additional MMIO-extension space */
- size_t size;
-
- /** @mmio_ext.regs: pointer to tile's additional MMIO-extension space */
- void __iomem *regs;
- } mmio_ext;
+ struct xe_mmio mmio_ext;
/** @mem: memory management info for tile */
struct {
@@ -200,14 +224,14 @@ struct xe_tile {
struct xe_lmtt lmtt;
} pf;
struct {
- /** @sriov.vf.memirq: Memory Based Interrupts. */
- struct xe_memirq memirq;
-
/** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */
struct xe_ggtt_node *ggtt_balloon[2];
} vf;
} sriov;
+ /** @memirq: Memory Based Interrupts. */
+ struct xe_memirq memirq;
+
/** @pcode: tile's PCODE */
struct {
/** @pcode.lock: protecting tile's PCODE mailbox data */
@@ -270,14 +294,24 @@ struct xe_device {
/** @info.va_bits: Maximum bits of a virtual address */
u8 va_bits;
- /** @info.is_dgfx: is discrete device */
- u8 is_dgfx:1;
- /** @info.has_asid: Has address space ID */
- u8 has_asid:1;
+ /*
+ * Keep all flags below alphabetically sorted
+ */
+
/** @info.force_execlist: Forced execlist submission */
u8 force_execlist:1;
+ /** @info.has_asid: Has address space ID */
+ u8 has_asid:1;
+ /** @info.has_atomic_enable_pte_bit: Device has atomic enable PTE bit */
+ u8 has_atomic_enable_pte_bit:1;
+ /** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */
+ u8 has_device_atomics_on_smem:1;
/** @info.has_flat_ccs: Whether flat CCS metadata is used */
u8 has_flat_ccs:1;
+ /** @info.has_heci_cscfi: device has heci cscfi */
+ u8 has_heci_cscfi:1;
+ /** @info.has_heci_gscfi: device has heci gscfi */
+ u8 has_heci_gscfi:1;
/** @info.has_llc: Device has a shared CPU+GPU last level cache */
u8 has_llc:1;
/** @info.has_mmio_ext: Device has extra MMIO address range */
@@ -288,6 +322,8 @@ struct xe_device {
u8 has_sriov:1;
/** @info.has_usm: Device has unified shared memory support */
u8 has_usm:1;
+ /** @info.is_dgfx: is discrete device */
+ u8 is_dgfx:1;
/**
* @info.probe_display: Probe display hardware. If set to
* false, the driver will behave as if there is no display
@@ -297,20 +333,12 @@ struct xe_device {
* state the firmware or bootloader left it in.
*/
u8 probe_display:1;
+ /** @info.skip_guc_pc: Skip GuC based PM feature init */
+ u8 skip_guc_pc:1;
/** @info.skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
u8 skip_mtcfg:1;
/** @info.skip_pcode: skip access to PCODE uC */
u8 skip_pcode:1;
- /** @info.has_heci_gscfi: device has heci gscfi */
- u8 has_heci_gscfi:1;
- /** @info.has_heci_cscfi: device has heci cscfi */
- u8 has_heci_cscfi:1;
- /** @info.skip_guc_pc: Skip GuC based PM feature init */
- u8 skip_guc_pc:1;
- /** @info.has_atomic_enable_pte_bit: Device has atomic enable PTE bit */
- u8 has_atomic_enable_pte_bit:1;
- /** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */
- u8 has_device_atomics_on_smem:1;
} info;
/** @irq: device interrupt state */
@@ -319,7 +347,15 @@ struct xe_device {
spinlock_t lock;
/** @irq.enabled: interrupts enabled on this device */
- bool enabled;
+ atomic_t enabled;
+
+ /** @irq.msix: irq info for platforms that support MSI-X */
+ struct {
+ /** @irq.msix.nvec: number of MSI-X interrupts */
+ u16 nvec;
+ /** @irq.msix.indexes: used to allocate MSI-X indexes */
+ struct xarray indexes;
+ } msix;
} irq;
/** @ttm: ttm device */
@@ -348,20 +384,13 @@ struct xe_device {
/** @sriov.pf: PF specific data */
struct xe_device_pf pf;
+ /** @sriov.vf: VF specific data */
+ struct xe_device_vf vf;
/** @sriov.wq: workqueue used by the virtualization workers */
struct workqueue_struct *wq;
} sriov;
- /** @clients: drm clients info */
- struct {
- /** @clients.lock: Protects drm clients info */
- spinlock_t lock;
-
- /** @clients.count: number of drm clients */
- u64 count;
- } clients;
-
/** @usm: unified memory state */
struct {
/** @usm.asid: convert a ASID to VM */
@@ -464,6 +493,12 @@ struct xe_device {
struct mutex lock;
} d3cold;
+ /** @pmt: Support the PMT driver callback interface */
+ struct {
+ /** @pmt.lock: protect access for telemetry data */
+ struct mutex lock;
+ } pmt;
+
/**
* @pm_callback_task: Track the active task that is running in either
* the runtime_suspend or runtime_resume callbacks.
@@ -571,7 +606,7 @@ struct xe_file {
/** @vm.xe: xarray to store VMs */
struct xarray xa;
/**
- * @vm.lock: Protects VM lookup + reference and removal a from
+ * @vm.lock: Protects VM lookup + reference and removal from
* file xarray. Not an intended to be an outer lock which does
* thing while being held.
*/
@@ -584,10 +619,15 @@ struct xe_file {
struct xarray xa;
/**
* @exec_queue.lock: Protects exec queue lookup + reference and
- * removal a frommfile xarray. Not an intended to be an outer
- * lock which does thing while being held.
+ * removal from file xarray. Not intended to be an outer lock
+ * which does things while being held.
*/
struct mutex lock;
+ /**
+ * @exec_queue.pending_removal: items pending to be removed to
+ * synchronize GPU state update with ongoing query.
+ */
+ atomic_t pending_removal;
} exec_queue;
/** @run_ticks: hw engine class run time in ticks for this drm client */
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index 68f309f5e981..c5b95470fa32 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -20,7 +20,7 @@
#include "xe_ttm_vram_mgr.h"
#include "xe_vm.h"
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
static int xe_dma_buf_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attach)
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index fb52a23e28f8..63f30b6df70b 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -261,6 +261,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
if (man) {
drm_print_memory_stats(p,
&stats[mem_type],
+ DRM_GEM_OBJECT_ACTIVE |
DRM_GEM_OBJECT_RESIDENT |
(mem_type != XE_PL_SYSTEM ? 0 :
DRM_GEM_OBJECT_PURGEABLE),
@@ -269,6 +270,49 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
}
}
+static struct xe_hw_engine *any_engine(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned long gt_id;
+
+ for_each_gt(gt, xe, gt_id) {
+ struct xe_hw_engine *hwe = xe_gt_any_hw_engine(gt);
+
+ if (hwe)
+ return hwe;
+ }
+
+ return NULL;
+}
+
+static bool force_wake_get_any_engine(struct xe_device *xe,
+ struct xe_hw_engine **phwe,
+ unsigned int *pfw_ref)
+{
+ enum xe_force_wake_domains domain;
+ unsigned int fw_ref;
+ struct xe_hw_engine *hwe;
+ struct xe_force_wake *fw;
+
+ hwe = any_engine(xe);
+ if (!hwe)
+ return false;
+
+ domain = xe_hw_engine_to_fw_domain(hwe);
+ fw = gt_to_fw(hwe->gt);
+
+ fw_ref = xe_force_wake_get(fw, domain);
+ if (!xe_force_wake_ref_has_domain(fw_ref, domain)) {
+ xe_force_wake_put(fw, fw_ref);
+ return false;
+ }
+
+ *phwe = hwe;
+ *pfw_ref = fw_ref;
+
+ return true;
+}
+
static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
{
unsigned long class, i, gt_id, capacity[XE_ENGINE_CLASS_MAX] = { };
@@ -278,8 +322,20 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
struct xe_hw_engine *hwe;
struct xe_exec_queue *q;
u64 gpu_timestamp;
+ unsigned int fw_ref;
+
+ /*
+ * Wait for any exec queue going away: their cycles will get updated on
+ * context switch out, so wait for that to happen
+ */
+ wait_var_event(&xef->exec_queue.pending_removal,
+ !atomic_read(&xef->exec_queue.pending_removal));
xe_pm_runtime_get(xe);
+ if (!force_wake_get_any_engine(xe, &hwe, &fw_ref)) {
+ xe_pm_runtime_put(xe);
+ return;
+ }
/* Accumulate all the exec queues from this client */
mutex_lock(&xef->exec_queue.lock);
@@ -294,30 +350,11 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
}
mutex_unlock(&xef->exec_queue.lock);
- /* Get the total GPU cycles */
- for_each_gt(gt, xe, gt_id) {
- enum xe_force_wake_domains fw;
-
- hwe = xe_gt_any_hw_engine(gt);
- if (!hwe)
- continue;
-
- fw = xe_hw_engine_to_fw_domain(hwe);
- if (xe_force_wake_get(gt_to_fw(gt), fw)) {
- hwe = NULL;
- break;
- }
-
- gpu_timestamp = xe_hw_engine_read_timestamp(hwe);
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw));
- break;
- }
+ gpu_timestamp = xe_hw_engine_read_timestamp(hwe);
+ xe_force_wake_put(gt_to_fw(hwe->gt), fw_ref);
xe_pm_runtime_put(xe);
- if (unlikely(!hwe))
- return;
-
for (class = 0; class < XE_ENGINE_CLASS_MAX; class++) {
const char *class_name;
@@ -348,7 +385,7 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
* @p: The drm_printer ptr
* @file: The drm_file ptr
*
- * This is callabck for drm fdinfo interface. Register this callback
+ * This is callback for drm fdinfo interface. Register this callback
* in drm driver ops for show_fdinfo.
*
* Return: void
diff --git a/drivers/gpu/drm/xe/xe_drv.h b/drivers/gpu/drm/xe/xe_drv.h
index d45b71426cc8..d61650d4aa0b 100644
--- a/drivers/gpu/drm/xe/xe_drv.h
+++ b/drivers/gpu/drm/xe/xe_drv.h
@@ -10,7 +10,6 @@
#define DRIVER_NAME "xe"
#define DRIVER_DESC "Intel Xe Graphics"
-#define DRIVER_DATE "20201103"
/* Interface history:
*
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 7b38485817dc..df8ce550deb4 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -33,7 +33,7 @@
*
* In XE we avoid all of this complication by not allowing a BO list to be
* passed into an exec, using the dma-buf implicit sync uAPI, have binds as
- * seperate operations, and using the DRM scheduler to flow control the ring.
+ * separate operations, and using the DRM scheduler to flow control the ring.
* Let's deep dive on each of these.
*
* We can get away from a BO list by forcing the user to use in / out fences on
@@ -41,11 +41,6 @@
* user knows an exec writes to a BO and reads from the BO in the next exec, it
* is the user's responsibility to pass in / out fence between the two execs).
*
- * Implicit dependencies for external BOs are handled by using the dma-buf
- * implicit dependency uAPI (TODO: add link). To make this works each exec must
- * install the job's fence into the DMA_RESV_USAGE_WRITE slot of every external
- * BO mapped in the VM.
- *
* We do not allow a user to trigger a bind at exec time rather we have a VM
* bind IOCTL which uses the same in / out fence interface as exec. In that
* sense, a VM bind is basically the same operation as an exec from the user
@@ -59,8 +54,8 @@
* behind any pending kernel operations on any external BOs in VM or any BOs
* private to the VM. This is accomplished by the rebinds waiting on BOs
* DMA_RESV_USAGE_KERNEL slot (kernel ops) and kernel ops waiting on all BOs
- * slots (inflight execs are in the DMA_RESV_USAGE_BOOKING for private BOs and
- * in DMA_RESV_USAGE_WRITE for external BOs).
+ * slots (inflight execs are in the DMA_RESV_USAGE_BOOKKEEP for private BOs and
+ * for external BOs).
*
* Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute
* mode VMs we use preempt fences and a rebind worker (TODO: add link).
@@ -137,12 +132,16 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (XE_IOCTL_DBG(xe, !q))
return -ENOENT;
- if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM))
- return -EINVAL;
+ if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) {
+ err = -EINVAL;
+ goto err_exec_queue;
+ }
if (XE_IOCTL_DBG(xe, args->num_batch_buffer &&
- q->width != args->num_batch_buffer))
- return -EINVAL;
+ q->width != args->num_batch_buffer)) {
+ err = -EINVAL;
+ goto err_exec_queue;
+ }
if (XE_IOCTL_DBG(xe, q->ops->reset_status(q))) {
err = -ECANCELED;
@@ -204,14 +203,14 @@ retry:
write_locked = false;
}
if (err)
- goto err_syncs;
+ goto err_hw_exec_mode;
if (write_locked) {
err = xe_vm_userptr_pin(vm);
downgrade_write(&vm->lock);
write_locked = false;
if (err)
- goto err_hw_exec_mode;
+ goto err_unlock_list;
}
if (!args->num_batch_buffer) {
@@ -225,6 +224,7 @@ retry:
fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
+ xe_vm_unlock(vm);
goto err_unlock_list;
}
for (i = 0; i < num_syncs; i++)
@@ -304,7 +304,8 @@ retry:
xe_sched_job_arm(job);
if (!xe_vm_in_lr_mode(vm))
drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished,
- DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
+ DMA_RESV_USAGE_BOOKKEEP,
+ DMA_RESV_USAGE_BOOKKEEP);
for (i = 0; i < num_syncs; i++) {
xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index d098d2dd1b2d..7e1abbbfba12 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -8,6 +8,7 @@
#include <linux/nospec.h>
#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <uapi/drm/xe_drm.h>
@@ -16,6 +17,7 @@
#include "xe_hw_engine_class_sysfs.h"
#include "xe_hw_engine_group.h"
#include "xe_hw_fence.h"
+#include "xe_irq.h"
#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_migrate.h"
@@ -68,6 +70,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
q->gt = gt;
q->class = hwe->class;
q->width = width;
+ q->msix_vec = XE_IRQ_DEFAULT_MSIX;
q->logical_mask = logical_mask;
q->fence_irq = &gt->fence_irq[hwe->class];
q->ring_ops = gt->ring_ops[hwe->class];
@@ -117,7 +120,7 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q)
}
for (i = 0; i < q->width; ++i) {
- q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K);
+ q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec);
if (IS_ERR(q->lrc[i])) {
err = PTR_ERR(q->lrc[i]);
goto err_unlock;
@@ -240,6 +243,7 @@ struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
return q;
}
+ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
void xe_exec_queue_destroy(struct kref *ref)
{
@@ -260,8 +264,17 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
{
int i;
+ /*
+ * Before releasing our ref to lrc and xef, accumulate our run ticks
+ * and wakeup any waiters.
+ */
+ xe_exec_queue_update_run_ticks(q);
+ if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
+ wake_up_var(&q->xef->exec_queue.pending_removal);
+
for (i = 0; i < q->width; ++i)
xe_lrc_put(q->lrc[i]);
+
__xe_exec_queue_free(q);
}
@@ -756,20 +769,21 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
*/
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
{
- struct xe_file *xef;
+ struct xe_device *xe = gt_to_xe(q->gt);
struct xe_lrc *lrc;
u32 old_ts, new_ts;
+ int idx;
/*
- * Jobs that are run during driver load may use an exec_queue, but are
- * not associated with a user xe file, so avoid accumulating busyness
- * for kernel specific work.
+ * Jobs that are executed by kernel doesn't have a corresponding xe_file
+ * and thus are not accounted.
*/
- if (!q->vm || !q->vm->xef)
+ if (!q->xef)
return;
- xef = q->vm->xef;
-
+ /* Synchronize with unbind while holding the xe file open */
+ if (!drm_dev_enter(&xe->drm, &idx))
+ return;
/*
* Only sample the first LRC. For parallel submission, all of them are
* scheduled together and we compensate that below by multiplying by
@@ -780,7 +794,9 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
*/
lrc = q->lrc[0];
new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
- xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
+ q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
+
+ drm_dev_exit(idx);
}
/**
@@ -820,7 +836,10 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
mutex_lock(&xef->exec_queue.lock);
q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
+ if (q)
+ atomic_inc(&xef->exec_queue.pending_removal);
mutex_unlock(&xef->exec_queue.lock);
+
if (XE_IOCTL_DBG(xe, !q))
return -ENOENT;
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 7deb480e26af..5af5419cec7a 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -41,7 +41,7 @@ struct xe_exec_queue {
/** @xef: Back pointer to xe file if this is user created exec queue */
struct xe_file *xef;
- /** @gt: graphics tile this exec queue can submit to */
+ /** @gt: GT structure this exec queue can submit to */
struct xe_gt *gt;
/**
* @hwe: A hardware of the same class. May (physical engine) or may not
@@ -63,6 +63,8 @@ struct xe_exec_queue {
char name[MAX_FENCE_NAME_LEN];
/** @width: width (number BB submitted per exec) of this exec queue */
u16 width;
+ /** @msix_vec: MSI-X vector (for platforms that support it) */
+ u16 msix_vec;
/** @fence_irq: fence IRQ used to signal job completion */
struct xe_hw_fence_irq *fence_irq;
@@ -143,7 +145,7 @@ struct xe_exec_queue {
/** @hw_engine_group_link: link into exec queues in the same hw engine group */
struct list_head hw_engine_group_link;
/** @lrc: logical ring context for this exec queue */
- struct xe_lrc *lrc[];
+ struct xe_lrc *lrc[] __counted_by(width);
};
/**
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 6a59165b9569..5ef96deaa881 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -17,6 +17,7 @@
#include "xe_exec_queue.h"
#include "xe_gt.h"
#include "xe_hw_fence.h"
+#include "xe_irq.h"
#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_mmio.h"
@@ -44,8 +45,10 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
u32 ctx_id)
{
struct xe_gt *gt = hwe->gt;
+ struct xe_mmio *mmio = &gt->mmio;
struct xe_device *xe = gt_to_xe(gt);
u64 lrc_desc;
+ u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
lrc_desc = xe_lrc_descriptor(lrc);
@@ -58,7 +61,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
}
if (hwe->class == XE_ENGINE_CLASS_COMPUTE)
- xe_mmio_write32(hwe->gt, RCU_MODE,
+ xe_mmio_write32(mmio, RCU_MODE,
_MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail);
@@ -76,17 +79,19 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
*/
wmb();
- xe_mmio_write32(gt, RING_HWS_PGA(hwe->mmio_base),
+ xe_mmio_write32(mmio, RING_HWS_PGA(hwe->mmio_base),
xe_bo_ggtt_addr(hwe->hwsp));
- xe_mmio_read32(gt, RING_HWS_PGA(hwe->mmio_base));
- xe_mmio_write32(gt, RING_MODE(hwe->mmio_base),
- _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
+ xe_mmio_read32(mmio, RING_HWS_PGA(hwe->mmio_base));
- xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base),
+ if (xe_device_has_msix(gt_to_xe(hwe->gt)))
+ ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
+ xe_mmio_write32(mmio, RING_MODE(hwe->mmio_base), ring_mode);
+
+ xe_mmio_write32(mmio, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base),
lower_32_bits(lrc_desc));
- xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_HI(hwe->mmio_base),
+ xe_mmio_write32(mmio, RING_EXECLIST_SQ_CONTENTS_HI(hwe->mmio_base),
upper_32_bits(lrc_desc));
- xe_mmio_write32(gt, RING_EXECLIST_CONTROL(hwe->mmio_base),
+ xe_mmio_write32(mmio, RING_EXECLIST_CONTROL(hwe->mmio_base),
EL_CTRL_LOAD);
}
@@ -168,8 +173,8 @@ static u64 read_execlist_status(struct xe_hw_engine *hwe)
struct xe_gt *gt = hwe->gt;
u32 hi, lo;
- lo = xe_mmio_read32(gt, RING_EXECLIST_STATUS_LO(hwe->mmio_base));
- hi = xe_mmio_read32(gt, RING_EXECLIST_STATUS_HI(hwe->mmio_base));
+ lo = xe_mmio_read32(&gt->mmio, RING_EXECLIST_STATUS_LO(hwe->mmio_base));
+ hi = xe_mmio_read32(&gt->mmio, RING_EXECLIST_STATUS_HI(hwe->mmio_base));
return lo | (u64)hi << 32;
}
@@ -264,7 +269,7 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
port->hwe = hwe;
- port->lrc = xe_lrc_create(hwe, NULL, SZ_16K);
+ port->lrc = xe_lrc_create(hwe, NULL, SZ_16K, XE_IRQ_DEFAULT_MSIX);
if (IS_ERR(port->lrc)) {
err = PTR_ERR(port->lrc);
goto err;
@@ -312,7 +317,7 @@ execlist_run_job(struct drm_sched_job *drm_job)
q->ring_ops->emit_job(job);
xe_execlist_make_active(exl);
- return dma_fence_get(job->fence);
+ return job->fence;
}
static void execlist_job_free(struct drm_sched_job *drm_job)
diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c
index b263fff15273..4f6784e5abf8 100644
--- a/drivers/gpu/drm/xe/xe_force_wake.c
+++ b/drivers/gpu/drm/xe/xe_force_wake.c
@@ -21,15 +21,25 @@ static const char *str_wake_sleep(bool wake)
return wake ? "wake" : "sleep";
}
-static void domain_init(struct xe_force_wake_domain *domain,
+static void mark_domain_initialized(struct xe_force_wake *fw,
+ enum xe_force_wake_domain_id id)
+{
+ fw->initialized_domains |= BIT(id);
+}
+
+static void init_domain(struct xe_force_wake *fw,
enum xe_force_wake_domain_id id,
struct xe_reg reg, struct xe_reg ack)
{
+ struct xe_force_wake_domain *domain = &fw->domains[id];
+
domain->id = id;
domain->reg_ctl = reg;
domain->reg_ack = ack;
domain->val = FORCEWAKE_MT(FORCEWAKE_KERNEL);
domain->mask = FORCEWAKE_MT_MASK(FORCEWAKE_KERNEL);
+
+ mark_domain_initialized(fw, id);
}
void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
@@ -43,13 +53,11 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
if (xe->info.graphics_verx100 >= 1270) {
- domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
- XE_FW_DOMAIN_ID_GT,
+ init_domain(fw, XE_FW_DOMAIN_ID_GT,
FORCEWAKE_GT,
FORCEWAKE_ACK_GT_MTL);
} else {
- domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
- XE_FW_DOMAIN_ID_GT,
+ init_domain(fw, XE_FW_DOMAIN_ID_GT,
FORCEWAKE_GT,
FORCEWAKE_ACK_GT);
}
@@ -63,8 +71,7 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
if (!xe_gt_is_media_type(gt))
- domain_init(&fw->domains[XE_FW_DOMAIN_ID_RENDER],
- XE_FW_DOMAIN_ID_RENDER,
+ init_domain(fw, XE_FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER,
FORCEWAKE_ACK_RENDER);
@@ -72,8 +79,7 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
if (!(gt->info.engine_mask & BIT(i)))
continue;
- domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j],
- XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j,
+ init_domain(fw, XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j,
FORCEWAKE_MEDIA_VDBOX(j),
FORCEWAKE_ACK_MEDIA_VDBOX(j));
}
@@ -82,15 +88,13 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
if (!(gt->info.engine_mask & BIT(i)))
continue;
- domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j],
- XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j,
+ init_domain(fw, XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j,
FORCEWAKE_MEDIA_VEBOX(j),
FORCEWAKE_ACK_MEDIA_VEBOX(j));
}
if (gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0))
- domain_init(&fw->domains[XE_FW_DOMAIN_ID_GSC],
- XE_FW_DOMAIN_ID_GSC,
+ init_domain(fw, XE_FW_DOMAIN_ID_GSC,
FORCEWAKE_GSC,
FORCEWAKE_ACK_GSC);
}
@@ -100,7 +104,7 @@ static void __domain_ctl(struct xe_gt *gt, struct xe_force_wake_domain *domain,
if (IS_SRIOV_VF(gt_to_xe(gt)))
return;
- xe_mmio_write32(gt, domain->reg_ctl, domain->mask | (wake ? domain->val : 0));
+ xe_mmio_write32(&gt->mmio, domain->reg_ctl, domain->mask | (wake ? domain->val : 0));
}
static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain, bool wake)
@@ -111,13 +115,19 @@ static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain,
if (IS_SRIOV_VF(gt_to_xe(gt)))
return 0;
- ret = xe_mmio_wait32(gt, domain->reg_ack, domain->val, wake ? domain->val : 0,
+ ret = xe_mmio_wait32(&gt->mmio, domain->reg_ack, domain->val, wake ? domain->val : 0,
XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
&value, true);
if (ret)
- xe_gt_notice(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
- domain->id, str_wake_sleep(wake), ERR_PTR(ret),
- domain->reg_ack.addr, value);
+ xe_gt_err(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
+ domain->id, str_wake_sleep(wake), ERR_PTR(ret),
+ domain->reg_ack.addr, value);
+ if (value == ~0) {
+ xe_gt_err(gt,
+ "Force wake domain %d: %s. MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
+ domain->id, str_wake_sleep(wake));
+ ret = -EIO;
+ }
return ret;
}
@@ -150,52 +160,108 @@ static int domain_sleep_wait(struct xe_gt *gt,
(ffs(tmp__) - 1))) && \
domain__->reg_ctl.addr)
-int xe_force_wake_get(struct xe_force_wake *fw,
- enum xe_force_wake_domains domains)
+/**
+ * xe_force_wake_get() : Increase the domain refcount
+ * @fw: struct xe_force_wake
+ * @domains: forcewake domains to get refcount on
+ *
+ * This function wakes up @domains if they are asleep and takes references.
+ * If requested domain is XE_FORCEWAKE_ALL then only applicable/initialized
+ * domains will be considered for refcount and it is a caller responsibility
+ * to check returned ref if it includes any specific domain by using
+ * xe_force_wake_ref_has_domain() function. Caller must call
+ * xe_force_wake_put() function to decrease incremented refcounts.
+ *
+ * Return: opaque reference to woken domains or zero if none of requested
+ * domains were awake.
+ */
+unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw,
+ enum xe_force_wake_domains domains)
{
struct xe_gt *gt = fw->gt;
struct xe_force_wake_domain *domain;
- enum xe_force_wake_domains tmp, woken = 0;
+ unsigned int ref_incr = 0, awake_rqst = 0, awake_failed = 0;
+ unsigned int tmp, ref_rqst;
unsigned long flags;
- int ret = 0;
+ xe_gt_assert(gt, is_power_of_2(domains));
+ xe_gt_assert(gt, domains <= XE_FORCEWAKE_ALL);
+ xe_gt_assert(gt, domains == XE_FORCEWAKE_ALL || fw->initialized_domains & domains);
+
+ ref_rqst = (domains == XE_FORCEWAKE_ALL) ? fw->initialized_domains : domains;
spin_lock_irqsave(&fw->lock, flags);
- for_each_fw_domain_masked(domain, domains, fw, tmp) {
+ for_each_fw_domain_masked(domain, ref_rqst, fw, tmp) {
if (!domain->ref++) {
- woken |= BIT(domain->id);
+ awake_rqst |= BIT(domain->id);
domain_wake(gt, domain);
}
+ ref_incr |= BIT(domain->id);
}
- for_each_fw_domain_masked(domain, woken, fw, tmp) {
- ret |= domain_wake_wait(gt, domain);
+ for_each_fw_domain_masked(domain, awake_rqst, fw, tmp) {
+ if (domain_wake_wait(gt, domain) == 0) {
+ fw->awake_domains |= BIT(domain->id);
+ } else {
+ awake_failed |= BIT(domain->id);
+ --domain->ref;
+ }
}
- fw->awake_domains |= woken;
+ ref_incr &= ~awake_failed;
spin_unlock_irqrestore(&fw->lock, flags);
- return ret;
+ xe_gt_WARN(gt, awake_failed, "Forcewake domain%s %#x failed to acknowledge awake request\n",
+ str_plural(hweight_long(awake_failed)), awake_failed);
+
+ if (domains == XE_FORCEWAKE_ALL && ref_incr == fw->initialized_domains)
+ ref_incr |= XE_FORCEWAKE_ALL;
+
+ return ref_incr;
}
-int xe_force_wake_put(struct xe_force_wake *fw,
- enum xe_force_wake_domains domains)
+/**
+ * xe_force_wake_put - Decrement the refcount and put domain to sleep if refcount becomes 0
+ * @fw: Pointer to the force wake structure
+ * @fw_ref: return of xe_force_wake_get()
+ *
+ * This function reduces the reference counts for domains in fw_ref. If
+ * refcount for any of the specified domain reaches 0, it puts the domain to sleep
+ * and waits for acknowledgment for domain to sleep within 50 milisec timeout.
+ * Warns in case of timeout of ack from domain.
+ */
+void xe_force_wake_put(struct xe_force_wake *fw, unsigned int fw_ref)
{
struct xe_gt *gt = fw->gt;
struct xe_force_wake_domain *domain;
- enum xe_force_wake_domains tmp, sleep = 0;
+ unsigned int tmp, sleep = 0;
unsigned long flags;
- int ret = 0;
+ int ack_fail = 0;
+
+ /*
+ * Avoid unnecessary lock and unlock when the function is called
+ * in error path of individual domains.
+ */
+ if (!fw_ref)
+ return;
+
+ if (xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
+ fw_ref = fw->initialized_domains;
spin_lock_irqsave(&fw->lock, flags);
- for_each_fw_domain_masked(domain, domains, fw, tmp) {
+ for_each_fw_domain_masked(domain, fw_ref, fw, tmp) {
+ xe_gt_assert(gt, domain->ref);
+
if (!--domain->ref) {
sleep |= BIT(domain->id);
domain_sleep(gt, domain);
}
}
for_each_fw_domain_masked(domain, sleep, fw, tmp) {
- ret |= domain_sleep_wait(gt, domain);
+ if (domain_sleep_wait(gt, domain) == 0)
+ fw->awake_domains &= ~BIT(domain->id);
+ else
+ ack_fail |= BIT(domain->id);
}
- fw->awake_domains &= ~sleep;
spin_unlock_irqrestore(&fw->lock, flags);
- return ret;
+ xe_gt_WARN(gt, ack_fail, "Forcewake domain%s %#x failed to acknowledge sleep request\n",
+ str_plural(hweight_long(ack_fail)), ack_fail);
}
diff --git a/drivers/gpu/drm/xe/xe_force_wake.h b/drivers/gpu/drm/xe/xe_force_wake.h
index a2577672f4e3..0e3e84bfa51c 100644
--- a/drivers/gpu/drm/xe/xe_force_wake.h
+++ b/drivers/gpu/drm/xe/xe_force_wake.h
@@ -15,10 +15,9 @@ void xe_force_wake_init_gt(struct xe_gt *gt,
struct xe_force_wake *fw);
void xe_force_wake_init_engines(struct xe_gt *gt,
struct xe_force_wake *fw);
-int xe_force_wake_get(struct xe_force_wake *fw,
- enum xe_force_wake_domains domains);
-int xe_force_wake_put(struct xe_force_wake *fw,
- enum xe_force_wake_domains domains);
+unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw,
+ enum xe_force_wake_domains domains);
+void xe_force_wake_put(struct xe_force_wake *fw, unsigned int fw_ref);
static inline int
xe_force_wake_ref(struct xe_force_wake *fw,
@@ -46,4 +45,20 @@ xe_force_wake_assert_held(struct xe_force_wake *fw,
xe_gt_assert(fw->gt, fw->awake_domains & domain);
}
+/**
+ * xe_force_wake_ref_has_domain - verifies if the domains are in fw_ref
+ * @fw_ref : the force_wake reference
+ * @domain : forcewake domain to verify
+ *
+ * This function confirms whether the @fw_ref includes a reference to the
+ * specified @domain.
+ *
+ * Return: true if domain is refcounted.
+ */
+static inline bool
+xe_force_wake_ref_has_domain(unsigned int fw_ref, enum xe_force_wake_domains domain)
+{
+ return fw_ref & domain;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_force_wake_types.h b/drivers/gpu/drm/xe/xe_force_wake_types.h
index ed0edc2cdf9f..899fbbcb3ea9 100644
--- a/drivers/gpu/drm/xe/xe_force_wake_types.h
+++ b/drivers/gpu/drm/xe/xe_force_wake_types.h
@@ -48,7 +48,7 @@ enum xe_force_wake_domains {
XE_FW_MEDIA_VEBOX2 = BIT(XE_FW_DOMAIN_ID_MEDIA_VEBOX2),
XE_FW_MEDIA_VEBOX3 = BIT(XE_FW_DOMAIN_ID_MEDIA_VEBOX3),
XE_FW_GSC = BIT(XE_FW_DOMAIN_ID_GSC),
- XE_FORCEWAKE_ALL = BIT(XE_FW_DOMAIN_ID_COUNT) - 1
+ XE_FORCEWAKE_ALL = BIT(XE_FW_DOMAIN_ID_COUNT)
};
/**
@@ -78,7 +78,9 @@ struct xe_force_wake {
/** @lock: protects everything force wake struct */
spinlock_t lock;
/** @awake_domains: mask of all domains awake */
- enum xe_force_wake_domains awake_domains;
+ unsigned int awake_domains;
+ /** @initialized_domains: mask of all initialized domains */
+ unsigned int initialized_domains;
/** @domains: force wake domains */
struct xe_force_wake_domain domains[XE_FW_DOMAIN_ID_COUNT];
};
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 2895f154654c..5fcb2b4c2c13 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -5,6 +5,7 @@
#include "xe_ggtt.h"
+#include <linux/fault-inject.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/sizes.h>
@@ -107,8 +108,10 @@ static unsigned int probe_gsm_size(struct pci_dev *pdev)
static void ggtt_update_access_counter(struct xe_ggtt *ggtt)
{
- struct xe_gt *gt = XE_WA(ggtt->tile->primary_gt, 22019338487) ? ggtt->tile->primary_gt :
- ggtt->tile->media_gt;
+ struct xe_tile *tile = ggtt->tile;
+ struct xe_gt *affected_gt = XE_WA(tile->primary_gt, 22019338487) ?
+ tile->primary_gt : tile->media_gt;
+ struct xe_mmio *mmio = &affected_gt->mmio;
u32 max_gtt_writes = XE_WA(ggtt->tile->primary_gt, 22019338487) ? 1100 : 63;
/*
* Wa_22019338487: GMD_ID is a RO register, a dummy write forces gunit
@@ -118,7 +121,7 @@ static void ggtt_update_access_counter(struct xe_ggtt *ggtt)
lockdep_assert_held(&ggtt->lock);
if ((++ggtt->access_count % max_gtt_writes) == 0) {
- xe_mmio_write32(gt, GMD_ID, 0x0);
+ xe_mmio_write32(mmio, GMD_ID, 0x0);
ggtt->access_count = 0;
}
}
@@ -243,7 +246,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
else
ggtt->pt_ops = &xelp_pt_ops;
- ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, 0);
+ ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
drm_mm_init(&ggtt->mm, xe_wopcm_size(xe),
ggtt->size - xe_wopcm_size(xe));
@@ -262,6 +265,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
return 0;
}
+ALLOW_ERROR_INJECTION(xe_ggtt_init_early, ERRNO); /* See xe_pci_probe() */
static void xe_ggtt_invalidate(struct xe_ggtt *ggtt);
@@ -358,7 +362,7 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
/*
* So we don't need to worry about 64K GGTT layout when dealing with
- * scratch entires, rather keep the scratch page in system memory on
+ * scratch entries, rather keep the scratch page in system memory on
* platforms where 64K pages are needed for VRAM.
*/
flags = XE_BO_FLAG_PINNED;
@@ -397,6 +401,16 @@ static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
{
+ struct xe_device *xe = tile_to_xe(ggtt->tile);
+
+ /*
+ * XXX: Barrier for GGTT pages. Unsure exactly why this required but
+ * without this LNL is having issues with the GuC reading scratch page
+ * vs. correct GGTT page. Not particularly a hot code path so blindly
+ * do a mmio read here which results in GuC reading correct GGTT page.
+ */
+ xe_mmio_read32(xe_root_tile_mmio(xe), VF_CAP_REG);
+
/* Each GT in a tile has its own TLB to cache GGTT lookups */
ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt);
ggtt_invalidate_gt_tlb(ggtt->tile->media_gt);
@@ -584,10 +598,10 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
u64 start;
u64 offset, pte;
- if (XE_WARN_ON(!bo->ggtt_node))
+ if (XE_WARN_ON(!bo->ggtt_node[ggtt->tile->id]))
return;
- start = bo->ggtt_node->base.start;
+ start = bo->ggtt_node[ggtt->tile->id]->base.start;
for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
@@ -598,15 +612,16 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end)
{
+ u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE;
+ u8 tile_id = ggtt->tile->id;
int err;
- u64 alignment = XE_PAGE_SIZE;
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
alignment = SZ_64K;
- if (XE_WARN_ON(bo->ggtt_node)) {
+ if (XE_WARN_ON(bo->ggtt_node[tile_id])) {
/* Someone's already inserted this BO in the GGTT */
- xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size);
+ xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size);
return 0;
}
@@ -616,19 +631,19 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
- bo->ggtt_node = xe_ggtt_node_init(ggtt);
- if (IS_ERR(bo->ggtt_node)) {
- err = PTR_ERR(bo->ggtt_node);
- bo->ggtt_node = NULL;
+ bo->ggtt_node[tile_id] = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(bo->ggtt_node[tile_id])) {
+ err = PTR_ERR(bo->ggtt_node[tile_id]);
+ bo->ggtt_node[tile_id] = NULL;
goto out;
}
mutex_lock(&ggtt->lock);
- err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node->base, bo->size,
- alignment, 0, start, end, 0);
+ err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node[tile_id]->base,
+ bo->size, alignment, 0, start, end, 0);
if (err) {
- xe_ggtt_node_fini(bo->ggtt_node);
- bo->ggtt_node = NULL;
+ xe_ggtt_node_fini(bo->ggtt_node[tile_id]);
+ bo->ggtt_node[tile_id] = NULL;
} else {
xe_ggtt_map_bo(ggtt, bo);
}
@@ -677,13 +692,15 @@ int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
*/
void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
- if (XE_WARN_ON(!bo->ggtt_node))
+ u8 tile_id = ggtt->tile->id;
+
+ if (XE_WARN_ON(!bo->ggtt_node[tile_id]))
return;
/* This BO is not currently in the GGTT */
- xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size);
+ xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size);
- xe_ggtt_node_remove(bo->ggtt_node,
+ xe_ggtt_node_remove(bo->ggtt_node[tile_id],
bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
}
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index 5ad5629a6c60..c250ea773491 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -63,14 +63,22 @@ xe_sched_invalidate_job(struct xe_sched_job *job, int threshold)
static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched,
struct xe_sched_job *job)
{
+ spin_lock(&sched->base.job_list_lock);
list_add(&job->drm.list, &sched->base.pending_list);
+ spin_unlock(&sched->base.job_list_lock);
}
static inline
struct xe_sched_job *xe_sched_first_pending_job(struct xe_gpu_scheduler *sched)
{
- return list_first_entry_or_null(&sched->base.pending_list,
- struct xe_sched_job, drm.list);
+ struct xe_sched_job *job;
+
+ spin_lock(&sched->base.job_list_lock);
+ job = list_first_entry_or_null(&sched->base.pending_list,
+ struct xe_sched_job, drm.list);
+ spin_unlock(&sched->base.job_list_lock);
+
+ return job;
}
static inline int
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index 6fbea70d3d36..1eb791ddc375 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -34,6 +34,7 @@
#include "instructions/xe_gsc_commands.h"
#include "regs/xe_gsc_regs.h"
#include "regs/xe_gt_regs.h"
+#include "regs/xe_irq_regs.h"
static struct xe_gt *
gsc_to_gt(struct xe_gsc *gsc)
@@ -179,7 +180,7 @@ out_bo:
static int gsc_fw_is_loaded(struct xe_gt *gt)
{
- return xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE)) &
+ return xe_mmio_read32(&gt->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)) &
HECI1_FWSTS1_INIT_COMPLETE;
}
@@ -190,7 +191,7 @@ static int gsc_fw_wait(struct xe_gt *gt)
* executed by the GSCCS. To account for possible submission delays or
* other issues, we use a 500ms timeout in the wait here.
*/
- return xe_mmio_wait32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE),
+ return xe_mmio_wait32(&gt->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE),
HECI1_FWSTS1_INIT_COMPLETE,
HECI1_FWSTS1_INIT_COMPLETE,
500 * USEC_PER_MSEC, NULL, false);
@@ -260,19 +261,17 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
struct xe_tile *tile = gt_to_tile(gt);
+ unsigned int fw_ref;
int ret;
if (XE_WA(tile->primary_gt, 14018094691)) {
- ret = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
+ fw_ref = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
/*
* If the forcewake fails we want to keep going, because the worst
* case outcome in failing to apply the WA is that PXP won't work,
- * which is not fatal. We still throw a warning so the issue is
- * seen if it happens.
+ * which is not fatal. Forcewake get warns implicitly in case of failure
*/
- xe_gt_WARN_ON(tile->primary_gt, ret);
-
xe_gt_mcr_multicast_write(tile->primary_gt,
EU_SYSTOLIC_LIC_THROTTLE_CTL_WITH_LOCK,
EU_SYSTOLIC_LIC_THROTTLE_CTL_LOCK_BIT);
@@ -281,7 +280,7 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
ret = gsc_upload(gsc);
if (XE_WA(tile->primary_gt, 14018094691))
- xe_force_wake_put(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
+ xe_force_wake_put(gt_to_fw(tile->primary_gt), fw_ref);
if (ret)
return ret;
@@ -330,7 +329,7 @@ static int gsc_er_complete(struct xe_gt *gt)
* so in that scenario we're always guaranteed to find the correct
* value.
*/
- er_status = xe_mmio_read32(gt, GSCI_TIMER_STATUS) & GSCI_TIMER_STATUS_VALUE;
+ er_status = xe_mmio_read32(&gt->mmio, GSCI_TIMER_STATUS) & GSCI_TIMER_STATUS_VALUE;
if (er_status == GSCI_TIMER_STATUS_TIMER_EXPIRED) {
/*
@@ -351,6 +350,7 @@ static void gsc_work(struct work_struct *work)
struct xe_gsc *gsc = container_of(work, typeof(*gsc), work);
struct xe_gt *gt = gsc_to_gt(gsc);
struct xe_device *xe = gt_to_xe(gt);
+ unsigned int fw_ref;
u32 actions;
int ret;
@@ -360,7 +360,7 @@ static void gsc_work(struct work_struct *work)
spin_unlock_irq(&gsc->lock);
xe_pm_runtime_get(xe);
- xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC));
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
if (actions & GSC_ACTION_ER_COMPLETE) {
ret = gsc_er_complete(gt);
@@ -380,7 +380,7 @@ static void gsc_work(struct work_struct *work)
xe_gsc_proxy_request_handler(gsc);
out:
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(xe);
}
@@ -581,11 +581,11 @@ void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep)
if (!XE_WA(gt, 14015076503) || !gsc_fw_is_loaded(gt))
return;
- xe_mmio_rmw32(gt, HECI_H_GS1(MTL_GSC_HECI2_BASE), gs1_clr, gs1_set);
+ xe_mmio_rmw32(&gt->mmio, HECI_H_GS1(MTL_GSC_HECI2_BASE), gs1_clr, gs1_set);
if (prep) {
/* make sure the reset bit is clear when writing the CSR reg */
- xe_mmio_rmw32(gt, HECI_H_CSR(MTL_GSC_HECI2_BASE),
+ xe_mmio_rmw32(&gt->mmio, HECI_H_CSR(MTL_GSC_HECI2_BASE),
HECI_H_CSR_RST, HECI_H_CSR_IG);
msleep(200);
}
@@ -599,7 +599,8 @@ void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep)
void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p)
{
struct xe_gt *gt = gsc_to_gt(gsc);
- int err;
+ struct xe_mmio *mmio = &gt->mmio;
+ unsigned int fw_ref;
xe_uc_fw_print(&gsc->fw, p);
@@ -608,17 +609,17 @@ void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p)
if (!xe_uc_fw_is_enabled(&gsc->fw))
return;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
- if (err)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
+ if (!fw_ref)
return;
drm_printf(p, "\nHECI1 FWSTS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
- xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE)),
- xe_mmio_read32(gt, HECI_FWSTS2(MTL_GSC_HECI1_BASE)),
- xe_mmio_read32(gt, HECI_FWSTS3(MTL_GSC_HECI1_BASE)),
- xe_mmio_read32(gt, HECI_FWSTS4(MTL_GSC_HECI1_BASE)),
- xe_mmio_read32(gt, HECI_FWSTS5(MTL_GSC_HECI1_BASE)),
- xe_mmio_read32(gt, HECI_FWSTS6(MTL_GSC_HECI1_BASE)));
-
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
+ xe_mmio_read32(mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(mmio, HECI_FWSTS2(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(mmio, HECI_FWSTS3(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(mmio, HECI_FWSTS4(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(mmio, HECI_FWSTS5(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(mmio, HECI_FWSTS6(MTL_GSC_HECI1_BASE)));
+
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
index 2d6ea8c01445..24cc6a4f9a96 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
@@ -65,7 +65,7 @@ gsc_to_gt(struct xe_gsc *gsc)
bool xe_gsc_proxy_init_done(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
- u32 fwsts1 = xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE));
+ u32 fwsts1 = xe_mmio_read32(&gt->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE));
return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fwsts1) ==
HECI1_FWSTS1_PROXY_STATE_NORMAL;
@@ -78,7 +78,7 @@ static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set)
/* make sure we never accidentally write the RST bit */
clr |= HECI_H_CSR_RST;
- xe_mmio_rmw32(gt, HECI_H_CSR(MTL_GSC_HECI2_BASE), clr, set);
+ xe_mmio_rmw32(&gt->mmio, HECI_H_CSR(MTL_GSC_HECI2_BASE), clr, set);
}
static void gsc_proxy_irq_clear(struct xe_gsc *gsc)
@@ -139,17 +139,29 @@ static int proxy_send_to_gsc(struct xe_gsc *gsc, u32 size)
return 0;
}
-static int validate_proxy_header(struct xe_gsc_proxy_header *header,
+static int validate_proxy_header(struct xe_gt *gt,
+ struct xe_gsc_proxy_header *header,
u32 source, u32 dest, u32 max_size)
{
u32 type = FIELD_GET(GSC_PROXY_TYPE, header->hdr);
u32 length = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, header->hdr);
+ int ret = 0;
- if (header->destination != dest || header->source != source)
- return -ENOEXEC;
+ if (header->destination != dest || header->source != source) {
+ ret = -ENOEXEC;
+ goto out;
+ }
+
+ if (length + PROXY_HDR_SIZE > max_size) {
+ ret = -E2BIG;
+ goto out;
+ }
- if (length + PROXY_HDR_SIZE > max_size)
- return -E2BIG;
+ /* We only care about the status if this is a message for the driver */
+ if (dest == GSC_PROXY_ADDRESSING_KMD && header->status != 0) {
+ ret = -EIO;
+ goto out;
+ }
switch (type) {
case GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD:
@@ -157,12 +169,20 @@ static int validate_proxy_header(struct xe_gsc_proxy_header *header,
break;
fallthrough;
case GSC_PROXY_MSG_TYPE_PROXY_INVALID:
- return -EIO;
+ ret = -EIO;
+ break;
default:
break;
}
- return 0;
+out:
+ if (ret)
+ xe_gt_err(gt,
+ "GSC proxy error: s=0x%x[0x%x], d=0x%x[0x%x], t=%u, l=0x%x, st=0x%x\n",
+ header->source, source, header->destination, dest,
+ type, length, header->status);
+
+ return ret;
}
#define proxy_header_wr(xe_, map_, offset_, field_, val_) \
@@ -228,12 +248,17 @@ static int proxy_query(struct xe_gsc *gsc)
xe_map_memcpy_from(xe, to_csme_hdr, &gsc->proxy.from_gsc,
reply_offset, PROXY_HDR_SIZE);
- /* stop if this was the last message */
- if (FIELD_GET(GSC_PROXY_TYPE, to_csme_hdr->hdr) == GSC_PROXY_MSG_TYPE_PROXY_END)
+ /* Check the status and stop if this was the last message */
+ if (FIELD_GET(GSC_PROXY_TYPE, to_csme_hdr->hdr) == GSC_PROXY_MSG_TYPE_PROXY_END) {
+ ret = validate_proxy_header(gt, to_csme_hdr,
+ GSC_PROXY_ADDRESSING_GSC,
+ GSC_PROXY_ADDRESSING_KMD,
+ GSC_PROXY_BUFFER_SIZE - reply_offset);
break;
+ }
/* make sure the GSC-to-CSME proxy header is sane */
- ret = validate_proxy_header(to_csme_hdr,
+ ret = validate_proxy_header(gt, to_csme_hdr,
GSC_PROXY_ADDRESSING_GSC,
GSC_PROXY_ADDRESSING_CSME,
GSC_PROXY_BUFFER_SIZE - reply_offset);
@@ -262,7 +287,7 @@ static int proxy_query(struct xe_gsc *gsc)
}
/* make sure the CSME-to-GSC proxy header is sane */
- ret = validate_proxy_header(gsc->proxy.from_csme,
+ ret = validate_proxy_header(gt, gsc->proxy.from_csme,
GSC_PROXY_ADDRESSING_CSME,
GSC_PROXY_ADDRESSING_GSC,
GSC_PROXY_BUFFER_SIZE - reply_offset);
@@ -450,22 +475,21 @@ void xe_gsc_proxy_remove(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
struct xe_device *xe = gt_to_xe(gt);
- int err = 0;
+ unsigned int fw_ref = 0;
if (!gsc->proxy.component_added)
return;
/* disable HECI2 IRQs */
xe_pm_runtime_get(xe);
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
- if (err)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
+ if (!fw_ref)
xe_gt_err(gt, "failed to get forcewake to disable GSC interrupts\n");
/* try do disable irq even if forcewake failed */
gsc_proxy_irq_toggle(gsc, false);
- if (!err)
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(xe);
xe_gsc_wait_for_worker_completion(gsc);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index f0dc2bf24c7b..5d6fb79957b6 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -77,7 +77,8 @@ struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
return ERR_PTR(-ENOMEM);
gt->tile = tile;
- gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0);
+ gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq",
+ WQ_MEM_RECLAIM);
err = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
if (err)
@@ -97,31 +98,30 @@ void xe_gt_sanitize(struct xe_gt *gt)
static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
{
+ unsigned int fw_ref;
u32 reg;
- int err;
if (!XE_WA(gt, 16023588340))
return;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (WARN_ON(err))
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
return;
if (!xe_gt_is_media_type(gt)) {
- xe_mmio_write32(gt, SCRATCH1LPFC, EN_L3_RW_CCS_CACHE_FLUSH);
reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
reg |= CG_DIS_CNTLBUS;
xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
}
xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3);
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
{
+ unsigned int fw_ref;
u32 reg;
- int err;
if (!XE_WA(gt, 16023588340))
return;
@@ -129,15 +129,15 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
if (xe_gt_is_media_type(gt))
return;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (WARN_ON(err))
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
return;
reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
reg &= ~CG_DIS_CNTLBUS;
xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
/**
@@ -245,7 +245,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
else if (entry->clr_bits + 1)
val = (reg.mcr ?
xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
- xe_mmio_read32(gt, reg)) & (~entry->clr_bits);
+ xe_mmio_read32(&gt->mmio, reg)) & (~entry->clr_bits);
else
val = 0;
@@ -387,6 +387,10 @@ int xe_gt_init_early(struct xe_gt *gt)
xe_force_wake_init_gt(gt, gt_to_fw(gt));
spin_lock_init(&gt->global_invl_lock);
+ err = xe_gt_tlb_invalidation_init_early(gt);
+ if (err)
+ return err;
+
return 0;
}
@@ -403,11 +407,14 @@ static void dump_pat_on_error(struct xe_gt *gt)
static int gt_fw_domain_init(struct xe_gt *gt)
{
+ unsigned int fw_ref;
int err, i;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref) {
+ err = -ETIMEDOUT;
goto err_hw_fence_irq;
+ }
if (!xe_gt_is_media_type(gt)) {
err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
@@ -440,16 +447,14 @@ static int gt_fw_domain_init(struct xe_gt *gt)
* Stash hardware-reported version. Since this register does not exist
* on pre-MTL platforms, reading it there will (correctly) return 0.
*/
- gt->info.gmdid = xe_mmio_read32(gt, GMD_ID);
-
- err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
- XE_WARN_ON(err);
+ gt->info.gmdid = xe_mmio_read32(&gt->mmio, GMD_ID);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
return 0;
err_force_wake:
dump_pat_on_error(gt);
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
err_hw_fence_irq:
for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
xe_hw_fence_irq_finish(&gt->fence_irq[i]);
@@ -459,11 +464,14 @@ err_hw_fence_irq:
static int all_fw_domain_init(struct xe_gt *gt)
{
+ unsigned int fw_ref;
int err, i;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (err)
- goto err_hw_fence_irq;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ err = -ETIMEDOUT;
+ goto err_force_wake;
+ }
xe_gt_mcr_set_implicit_defaults(gt);
xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
@@ -524,17 +532,17 @@ static int all_fw_domain_init(struct xe_gt *gt)
if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
- if (IS_SRIOV_PF(gt_to_xe(gt)))
+ if (IS_SRIOV_PF(gt_to_xe(gt))) {
+ xe_gt_sriov_pf_init(gt);
xe_gt_sriov_pf_init_hw(gt);
+ }
- err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- XE_WARN_ON(err);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
return 0;
err_force_wake:
- xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
-err_hw_fence_irq:
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
xe_hw_fence_irq_finish(&gt->fence_irq[i]);
@@ -547,11 +555,12 @@ err_hw_fence_irq:
*/
int xe_gt_init_hwconfig(struct xe_gt *gt)
{
+ unsigned int fw_ref;
int err;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- goto out;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
xe_gt_mcr_init_early(gt);
xe_pat_init(gt);
@@ -569,8 +578,7 @@ int xe_gt_init_hwconfig(struct xe_gt *gt)
xe_gt_enable_host_l2_vram(gt);
out_fw:
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
-out:
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
return err;
}
@@ -586,10 +594,6 @@ int xe_gt_init(struct xe_gt *gt)
xe_hw_fence_irq_init(&gt->fence_irq[i]);
}
- err = xe_gt_tlb_invalidation_init(gt);
- if (err)
- return err;
-
err = xe_gt_pagefault_init(gt);
if (err)
return err;
@@ -623,6 +627,30 @@ int xe_gt_init(struct xe_gt *gt)
return 0;
}
+/**
+ * xe_gt_mmio_init() - Initialize GT's MMIO access
+ * @gt: the GT object
+ *
+ * Initialize GT's MMIO accessor, which will be used to access registers inside
+ * this GT.
+ */
+void xe_gt_mmio_init(struct xe_gt *gt)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+
+ gt->mmio.regs = tile->mmio.regs;
+ gt->mmio.regs_size = tile->mmio.regs_size;
+ gt->mmio.tile = tile;
+
+ if (gt->info.type == XE_GT_TYPE_MEDIA) {
+ gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET;
+ gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH;
+ }
+
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ gt->mmio.sriov_vf_gt = gt;
+}
+
void xe_gt_record_user_engines(struct xe_gt *gt)
{
struct xe_hw_engine *hwe;
@@ -650,8 +678,8 @@ static int do_gt_reset(struct xe_gt *gt)
xe_gsc_wa_14015076503(gt, true);
- xe_mmio_write32(gt, GDRST, GRDOM_FULL);
- err = xe_mmio_wait32(gt, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
+ xe_mmio_write32(&gt->mmio, GDRST, GRDOM_FULL);
+ err = xe_mmio_wait32(&gt->mmio, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
if (err)
xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
ERR_PTR(err));
@@ -722,10 +750,8 @@ static int do_gt_restart(struct xe_gt *gt)
if (err)
return err;
- for_each_hw_engine(hwe, gt, id) {
+ for_each_hw_engine(hwe, gt, id)
xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
- xe_reg_sr_apply_whitelist(hwe);
- }
/* Get CCS mode in sync between sw/hw */
xe_gt_apply_ccs_mode(gt);
@@ -741,6 +767,7 @@ static int do_gt_restart(struct xe_gt *gt)
static int gt_reset(struct xe_gt *gt)
{
+ unsigned int fw_ref;
int err;
if (xe_device_wedged(gt_to_xe(gt)))
@@ -761,9 +788,11 @@ static int gt_reset(struct xe_gt *gt)
xe_gt_sanitize(gt);
- err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (err)
- goto err_msg;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ err = -ETIMEDOUT;
+ goto err_out;
+ }
xe_uc_gucrc_disable(&gt->uc);
xe_uc_stop_prepare(&gt->uc);
@@ -781,8 +810,7 @@ static int gt_reset(struct xe_gt *gt)
if (err)
goto err_out;
- err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- XE_WARN_ON(err);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(gt_to_xe(gt));
xe_gt_info(gt, "reset done\n");
@@ -790,8 +818,7 @@ static int gt_reset(struct xe_gt *gt)
return 0;
err_out:
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
-err_msg:
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
XE_WARN_ON(xe_uc_start(&gt->uc));
err_fail:
xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
@@ -811,7 +838,7 @@ static void gt_reset_worker(struct work_struct *w)
void xe_gt_reset_async(struct xe_gt *gt)
{
- xe_gt_info(gt, "trying reset\n");
+ xe_gt_info(gt, "trying reset from %ps\n", __builtin_return_address(0));
/* Don't do a reset while one is already in flight */
if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(&gt->uc))
@@ -823,22 +850,25 @@ void xe_gt_reset_async(struct xe_gt *gt)
void xe_gt_suspend_prepare(struct xe_gt *gt)
{
- XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ unsigned int fw_ref;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
xe_uc_stop_prepare(&gt->uc);
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
int xe_gt_suspend(struct xe_gt *gt)
{
+ unsigned int fw_ref;
int err;
xe_gt_dbg(gt, "suspending\n");
xe_gt_sanitize(gt);
- err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (err)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
goto err_msg;
err = xe_uc_suspend(&gt->uc);
@@ -849,19 +879,29 @@ int xe_gt_suspend(struct xe_gt *gt)
xe_gt_disable_host_l2_vram(gt);
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_gt_dbg(gt, "suspended\n");
return 0;
-err_force_wake:
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
err_msg:
+ err = -ETIMEDOUT;
+err_force_wake:
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
return err;
}
+void xe_gt_shutdown(struct xe_gt *gt)
+{
+ unsigned int fw_ref;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ do_gt_reset(gt);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+}
+
/**
* xe_gt_sanitize_freq() - Restore saved frequencies if necessary.
* @gt: the GT object
@@ -874,7 +914,9 @@ int xe_gt_sanitize_freq(struct xe_gt *gt)
int ret = 0;
if ((!xe_uc_fw_is_available(&gt->uc.gsc.fw) ||
- xe_uc_fw_is_loaded(&gt->uc.gsc.fw)) && XE_WA(gt, 22019338487))
+ xe_uc_fw_is_loaded(&gt->uc.gsc.fw) ||
+ xe_uc_fw_is_in_error_state(&gt->uc.gsc.fw)) &&
+ XE_WA(gt, 22019338487))
ret = xe_guc_pc_restore_stashed_freq(&gt->uc.guc.pc);
return ret;
@@ -882,11 +924,12 @@ int xe_gt_sanitize_freq(struct xe_gt *gt)
int xe_gt_resume(struct xe_gt *gt)
{
+ unsigned int fw_ref;
int err;
xe_gt_dbg(gt, "resuming\n");
- err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (err)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
goto err_msg;
err = do_gt_restart(gt);
@@ -895,14 +938,15 @@ int xe_gt_resume(struct xe_gt *gt)
xe_gt_idle_enable_pg(gt);
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_gt_dbg(gt, "resumed\n");
return 0;
-err_force_wake:
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
err_msg:
+ err = -ETIMEDOUT;
+err_force_wake:
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
return err;
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index ee138e9768a2..e504cc33ade4 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -31,12 +31,13 @@ struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
int xe_gt_init_hwconfig(struct xe_gt *gt);
int xe_gt_init_early(struct xe_gt *gt);
int xe_gt_init(struct xe_gt *gt);
+void xe_gt_mmio_init(struct xe_gt *gt);
void xe_gt_declare_wedged(struct xe_gt *gt);
int xe_gt_record_default_lrcs(struct xe_gt *gt);
/**
* xe_gt_record_user_engines - save data related to engines available to
- * usersapce
+ * userspace
* @gt: GT structure
*
* Walk the available HW engines from gt->info.engine_mask and calculate data
@@ -48,6 +49,7 @@ void xe_gt_record_user_engines(struct xe_gt *gt);
void xe_gt_suspend_prepare(struct xe_gt *gt);
int xe_gt_suspend(struct xe_gt *gt);
+void xe_gt_shutdown(struct xe_gt *gt);
int xe_gt_resume(struct xe_gt *gt);
void xe_gt_reset_async(struct xe_gt *gt);
void xe_gt_sanitize(struct xe_gt *gt);
@@ -55,6 +57,31 @@ int xe_gt_sanitize_freq(struct xe_gt *gt);
void xe_gt_remove(struct xe_gt *gt);
/**
+ * xe_gt_wait_for_reset - wait for gt's async reset to finalize.
+ * @gt: GT structure
+ * Return:
+ * %true if it waited for the work to finish execution,
+ * %false if there was no scheduled reset or it was done.
+ */
+static inline bool xe_gt_wait_for_reset(struct xe_gt *gt)
+{
+ return flush_work(&gt->reset.worker);
+}
+
+/**
+ * xe_gt_reset - perform synchronous reset
+ * @gt: GT structure
+ * Return:
+ * %true if it waited for the reset to finish,
+ * %false if there was no scheduled reset.
+ */
+static inline bool xe_gt_reset(struct xe_gt *gt)
+{
+ xe_gt_reset_async(gt);
+ return xe_gt_wait_for_reset(gt);
+}
+
+/**
* xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the
* first that matches the same reset domain as @class
* @gt: GT structure
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
index d2e4dc3aaf61..50fffc9ebf62 100644
--- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
@@ -68,7 +68,13 @@ static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines)
}
}
- xe_mmio_write32(gt, CCS_MODE, mode);
+ /*
+ * Mask bits need to be set for the register. Though only Xe2+
+ * platforms require setting of mask bits, it won't harm for older
+ * platforms as these bits are unused there.
+ */
+ mode |= CCS_MODE_CSLICE_0_3_MASK << 16;
+ xe_mmio_write32(&gt->mmio, CCS_MODE, mode);
xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n",
mode, config, num_engines, num_slices);
@@ -133,9 +139,10 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr,
}
/* CCS mode can only be updated when there are no drm clients */
- spin_lock(&xe->clients.lock);
- if (xe->clients.count) {
- spin_unlock(&xe->clients.lock);
+ mutex_lock(&xe->drm.filelist_mutex);
+ if (!list_empty(&xe->drm.filelist)) {
+ mutex_unlock(&xe->drm.filelist_mutex);
+ xe_gt_dbg(gt, "Rejecting compute mode change as there are active drm clients\n");
return -EBUSY;
}
@@ -143,10 +150,10 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr,
xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
gt->ccs_mode = num_engines;
xe_gt_record_user_engines(gt);
- xe_gt_reset_async(gt);
+ xe_gt_reset(gt);
}
- spin_unlock(&xe->clients.lock);
+ mutex_unlock(&xe->drm.filelist_mutex);
return count;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_clock.c b/drivers/gpu/drm/xe/xe_gt_clock.c
index 86c2d62b4bdc..cc2ae159298e 100644
--- a/drivers/gpu/drm/xe/xe_gt_clock.c
+++ b/drivers/gpu/drm/xe/xe_gt_clock.c
@@ -17,7 +17,7 @@
static u32 read_reference_ts_freq(struct xe_gt *gt)
{
- u32 ts_override = xe_mmio_read32(gt, TIMESTAMP_OVERRIDE);
+ u32 ts_override = xe_mmio_read32(&gt->mmio, TIMESTAMP_OVERRIDE);
u32 base_freq, frac_freq;
base_freq = REG_FIELD_GET(TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK,
@@ -57,7 +57,7 @@ static u32 get_crystal_clock_freq(u32 rpm_config_reg)
int xe_gt_clock_init(struct xe_gt *gt)
{
- u32 ctc_reg = xe_mmio_read32(gt, CTC_MODE);
+ u32 ctc_reg = xe_mmio_read32(&gt->mmio, CTC_MODE);
u32 freq = 0;
/* Assuming gen11+ so assert this assumption is correct */
@@ -66,7 +66,7 @@ int xe_gt_clock_init(struct xe_gt *gt)
if (ctc_reg & CTC_SOURCE_DIVIDE_LOGIC) {
freq = read_reference_ts_freq(gt);
} else {
- u32 c0 = xe_mmio_read32(gt, RPM_CONFIG0);
+ u32 c0 = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
freq = get_crystal_clock_freq(c0);
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 8f95d3a5949b..e7792858b1e4 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -15,6 +15,7 @@
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_gt_mcr.h"
+#include "xe_gt_idle.h"
#include "xe_gt_sriov_pf_debugfs.h"
#include "xe_gt_sriov_vf_debugfs.h"
#include "xe_gt_stats.h"
@@ -89,26 +90,36 @@ static int hw_engines(struct xe_gt *gt, struct drm_printer *p)
struct xe_device *xe = gt_to_xe(gt);
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
- int err;
+ unsigned int fw_ref;
xe_pm_runtime_get(xe);
- err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (err) {
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
xe_pm_runtime_put(xe);
- return err;
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return -ETIMEDOUT;
}
for_each_hw_engine(hwe, gt, id)
xe_hw_engine_print(hwe, p);
- err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(xe);
- if (err)
- return err;
return 0;
}
+static int powergate_info(struct xe_gt *gt, struct drm_printer *p)
+{
+ int ret;
+
+ xe_pm_runtime_get(gt_to_xe(gt));
+ ret = xe_gt_idle_pg_print(gt, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
+
+ return ret;
+}
+
static int force_reset(struct xe_gt *gt, struct drm_printer *p)
{
xe_pm_runtime_get(gt_to_xe(gt));
@@ -121,11 +132,9 @@ static int force_reset(struct xe_gt *gt, struct drm_printer *p)
static int force_reset_sync(struct xe_gt *gt, struct drm_printer *p)
{
xe_pm_runtime_get(gt_to_xe(gt));
- xe_gt_reset_async(gt);
+ xe_gt_reset(gt);
xe_pm_runtime_put(gt_to_xe(gt));
- flush_work(&gt->reset.worker);
-
return 0;
}
@@ -288,6 +297,7 @@ static const struct drm_info_list debugfs_list[] = {
{"topology", .show = xe_gt_debugfs_simple_show, .data = topology},
{"steering", .show = xe_gt_debugfs_simple_show, .data = steering},
{"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt},
+ {"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info},
{"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore},
{"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds},
{"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index ab76973f3e1e..604bdc7c8173 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -11,9 +11,9 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
-#include "xe_device_types.h"
#include "xe_gt_sysfs.h"
#include "xe_gt_throttle.h"
+#include "xe_gt_types.h"
#include "xe_guc_pc.h"
#include "xe_pm.h"
@@ -115,6 +115,20 @@ static ssize_t rpe_freq_show(struct device *dev,
}
static DEVICE_ATTR_RO(rpe_freq);
+static ssize_t rpa_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xe_guc_pc *pc = dev_to_pc(dev);
+ u32 freq;
+
+ xe_pm_runtime_get(dev_to_xe(dev));
+ freq = xe_guc_pc_get_rpa_freq(pc);
+ xe_pm_runtime_put(dev_to_xe(dev));
+
+ return sysfs_emit(buf, "%d\n", freq);
+}
+static DEVICE_ATTR_RO(rpa_freq);
+
static ssize_t rpn_freq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -202,6 +216,7 @@ static const struct attribute *freq_attrs[] = {
&dev_attr_act_freq.attr,
&dev_attr_cur_freq.attr,
&dev_attr_rp0_freq.attr,
+ &dev_attr_rpa_freq.attr,
&dev_attr_rpe_freq.attr,
&dev_attr_rpn_freq.attr,
&dev_attr_min_freq.attr,
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index 67aba4140510..ffd3ba7f6656 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -98,7 +98,10 @@ static u64 get_residency_ms(struct xe_gt_idle *gtidle, u64 cur_residency)
void xe_gt_idle_enable_pg(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
- u32 pg_enable;
+ struct xe_gt_idle *gtidle = &gt->gtidle;
+ struct xe_mmio *mmio = &gt->mmio;
+ u32 vcs_mask, vecs_mask;
+ unsigned int fw_ref;
int i, j;
if (IS_SRIOV_VF(xe))
@@ -110,39 +113,138 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt)
xe_device_assert_mem_access(gt_to_xe(gt));
- pg_enable = RENDER_POWERGATE_ENABLE | MEDIA_POWERGATE_ENABLE;
+ vcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE);
+ vecs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE);
- for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
- if ((gt->info.engine_mask & BIT(i)))
- pg_enable |= (VDN_HCP_POWERGATE_ENABLE(j) |
- VDN_MFXVDENC_POWERGATE_ENABLE(j));
+ if (vcs_mask || vecs_mask)
+ gtidle->powergate_enable = MEDIA_POWERGATE_ENABLE;
+
+ if (!xe_gt_is_media_type(gt))
+ gtidle->powergate_enable |= RENDER_POWERGATE_ENABLE;
+
+ if (xe->info.platform != XE_DG1) {
+ for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
+ if ((gt->info.engine_mask & BIT(i)))
+ gtidle->powergate_enable |= (VDN_HCP_POWERGATE_ENABLE(j) |
+ VDN_MFXVDENC_POWERGATE_ENABLE(j));
+ }
}
- XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (xe->info.skip_guc_pc) {
/*
* GuC sets the hysteresis value when GuC PC is enabled
* else set it to 25 (25 * 1.28us)
*/
- xe_mmio_write32(gt, MEDIA_POWERGATE_IDLE_HYSTERESIS, 25);
- xe_mmio_write32(gt, RENDER_POWERGATE_IDLE_HYSTERESIS, 25);
+ xe_mmio_write32(mmio, MEDIA_POWERGATE_IDLE_HYSTERESIS, 25);
+ xe_mmio_write32(mmio, RENDER_POWERGATE_IDLE_HYSTERESIS, 25);
}
- xe_mmio_write32(gt, POWERGATE_ENABLE, pg_enable);
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FW_GT));
+ xe_mmio_write32(mmio, POWERGATE_ENABLE, gtidle->powergate_enable);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
void xe_gt_idle_disable_pg(struct xe_gt *gt)
{
+ struct xe_gt_idle *gtidle = &gt->gtidle;
+ unsigned int fw_ref;
+
if (IS_SRIOV_VF(gt_to_xe(gt)))
return;
xe_device_assert_mem_access(gt_to_xe(gt));
- XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
+ gtidle->powergate_enable = 0;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ xe_mmio_write32(&gt->mmio, POWERGATE_ENABLE, gtidle->powergate_enable);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+}
+
+/**
+ * xe_gt_idle_pg_print - Xe powergating info
+ * @gt: GT object
+ * @p: drm_printer.
+ *
+ * This function prints the powergating information
+ *
+ * Return: 0 on success, negative error code otherwise
+ */
+int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p)
+{
+ struct xe_gt_idle *gtidle = &gt->gtidle;
+ struct xe_device *xe = gt_to_xe(gt);
+ enum xe_gt_idle_state state;
+ u32 pg_enabled, pg_status = 0;
+ u32 vcs_mask, vecs_mask;
+ unsigned int fw_ref;
+ int n;
+ /*
+ * Media Slices
+ *
+ * Slice 0: VCS0, VCS1, VECS0
+ * Slice 1: VCS2, VCS3, VECS1
+ * Slice 2: VCS4, VCS5, VECS2
+ * Slice 3: VCS6, VCS7, VECS3
+ */
+ static const struct {
+ u64 engines;
+ u32 status_bit;
+ } media_slices[] = {
+ {(BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS1) |
+ BIT(XE_HW_ENGINE_VECS0)), MEDIA_SLICE0_AWAKE_STATUS},
+
+ {(BIT(XE_HW_ENGINE_VCS2) | BIT(XE_HW_ENGINE_VCS3) |
+ BIT(XE_HW_ENGINE_VECS1)), MEDIA_SLICE1_AWAKE_STATUS},
- xe_mmio_write32(gt, POWERGATE_ENABLE, 0);
+ {(BIT(XE_HW_ENGINE_VCS4) | BIT(XE_HW_ENGINE_VCS5) |
+ BIT(XE_HW_ENGINE_VECS2)), MEDIA_SLICE2_AWAKE_STATUS},
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FW_GT));
+ {(BIT(XE_HW_ENGINE_VCS6) | BIT(XE_HW_ENGINE_VCS7) |
+ BIT(XE_HW_ENGINE_VECS3)), MEDIA_SLICE3_AWAKE_STATUS},
+ };
+
+ if (xe->info.platform == XE_PVC) {
+ drm_printf(p, "Power Gating not supported\n");
+ return 0;
+ }
+
+ state = gtidle->idle_status(gtidle_to_pc(gtidle));
+ pg_enabled = gtidle->powergate_enable;
+
+ /* Do not wake the GT to read powergating status */
+ if (state != GT_IDLE_C6) {
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
+
+ pg_enabled = xe_mmio_read32(&gt->mmio, POWERGATE_ENABLE);
+ pg_status = xe_mmio_read32(&gt->mmio, POWERGATE_DOMAIN_STATUS);
+
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ }
+
+ if (gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK) {
+ drm_printf(p, "Render Power Gating Enabled: %s\n",
+ str_yes_no(pg_enabled & RENDER_POWERGATE_ENABLE));
+
+ drm_printf(p, "Render Power Gate Status: %s\n",
+ str_up_down(pg_status & RENDER_AWAKE_STATUS));
+ }
+
+ vcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE);
+ vecs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE);
+
+ /* Print media CPG status only if media is present */
+ if (vcs_mask || vecs_mask) {
+ drm_printf(p, "Media Power Gating Enabled: %s\n",
+ str_yes_no(pg_enabled & MEDIA_POWERGATE_ENABLE));
+
+ for (n = 0; n < ARRAY_SIZE(media_slices); n++)
+ if (gt->info.engine_mask & media_slices[n].engines)
+ drm_printf(p, "Media Slice%d Power Gate Status: %s\n", n,
+ str_up_down(pg_status & media_slices[n].status_bit));
+ }
+ return 0;
}
static ssize_t name_show(struct device *dev,
@@ -201,13 +303,14 @@ static void gt_idle_fini(void *arg)
{
struct kobject *kobj = arg;
struct xe_gt *gt = kobj_to_gt(kobj->parent);
+ unsigned int fw_ref;
xe_gt_idle_disable_pg(gt);
if (gt_to_xe(gt)->info.skip_guc_pc) {
- XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
xe_gt_idle_disable_c6(gt);
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
sysfs_remove_files(kobj, gt_idle_attrs);
@@ -260,9 +363,9 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt)
return;
/* Units of 1280 ns for a total of 5s */
- xe_mmio_write32(gt, RC_IDLE_HYSTERSIS, 0x3B9ACA);
+ xe_mmio_write32(&gt->mmio, RC_IDLE_HYSTERSIS, 0x3B9ACA);
/* Enable RC6 */
- xe_mmio_write32(gt, RC_CONTROL,
+ xe_mmio_write32(&gt->mmio, RC_CONTROL,
RC_CTL_HW_ENABLE | RC_CTL_TO_MODE | RC_CTL_RC6_ENABLE);
}
@@ -274,6 +377,6 @@ void xe_gt_idle_disable_c6(struct xe_gt *gt)
if (IS_SRIOV_VF(gt_to_xe(gt)))
return;
- xe_mmio_write32(gt, RC_CONTROL, 0);
- xe_mmio_write32(gt, RC_STATE, 0);
+ xe_mmio_write32(&gt->mmio, RC_CONTROL, 0);
+ xe_mmio_write32(&gt->mmio, RC_STATE, 0);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.h b/drivers/gpu/drm/xe/xe_gt_idle.h
index 554447b5d46d..4455a6501cb0 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.h
+++ b/drivers/gpu/drm/xe/xe_gt_idle.h
@@ -8,6 +8,7 @@
#include "xe_gt_idle_types.h"
+struct drm_printer;
struct xe_gt;
int xe_gt_idle_init(struct xe_gt_idle *gtidle);
@@ -15,5 +16,6 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt);
void xe_gt_idle_disable_c6(struct xe_gt *gt);
void xe_gt_idle_enable_pg(struct xe_gt *gt);
void xe_gt_idle_disable_pg(struct xe_gt *gt);
+int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p);
#endif /* _XE_GT_IDLE_H_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_idle_types.h b/drivers/gpu/drm/xe/xe_gt_idle_types.h
index f99b447534f3..b8b297a3f884 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_idle_types.h
@@ -23,6 +23,8 @@ enum xe_gt_idle_state {
struct xe_gt_idle {
/** @name: name */
char name[16];
+ /** @powergate_enable: copy of powergate enable bits */
+ u32 powergate_enable;
/** @residency_multiplier: residency multiplier in ns */
u32 residency_multiplier;
/** @cur_residency: raw driver copy of idle residency */
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index c834f64b0178..a1676b787fdc 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -237,13 +237,26 @@ static const struct xe_mmio_range xe2lpm_instance0_steering_table[] = {
{},
};
+static const struct xe_mmio_range xe3lpm_instance0_steering_table[] = {
+ { 0x384000, 0x3847DF }, /* GAM, rsvd, GAM */
+ { 0x384900, 0x384AFF }, /* GAM */
+ { 0x389560, 0x3895FF }, /* MEDIAINF */
+ { 0x38B600, 0x38B8FF }, /* L3BANK */
+ { 0x38C800, 0x38D07F }, /* GAM, MEDIAINF */
+ { 0x38D0D0, 0x38F0FF }, /* MEDIAINF, GAM */
+ { 0x393C00, 0x393C7F }, /* MEDIAINF */
+ {},
+};
+
static void init_steering_l3bank(struct xe_gt *gt)
{
+ struct xe_mmio *mmio = &gt->mmio;
+
if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK,
- xe_mmio_read32(gt, MIRROR_FUSE3));
+ xe_mmio_read32(mmio, MIRROR_FUSE3));
u32 bank_mask = REG_FIELD_GET(GT_L3_EXC_MASK,
- xe_mmio_read32(gt, XEHP_FUSE4));
+ xe_mmio_read32(mmio, XEHP_FUSE4));
/*
* Group selects mslice, instance selects bank within mslice.
@@ -254,7 +267,7 @@ static void init_steering_l3bank(struct xe_gt *gt)
bank_mask & BIT(0) ? 0 : 2;
} else if (gt_to_xe(gt)->info.platform == XE_DG2) {
u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK,
- xe_mmio_read32(gt, MIRROR_FUSE3));
+ xe_mmio_read32(mmio, MIRROR_FUSE3));
u32 bank = __ffs(mslice_mask) * 8;
/*
@@ -266,7 +279,7 @@ static void init_steering_l3bank(struct xe_gt *gt)
gt->steering[L3BANK].instance_target = bank & 0x3;
} else {
u32 fuse = REG_FIELD_GET(L3BANK_MASK,
- ~xe_mmio_read32(gt, MIRROR_FUSE3));
+ ~xe_mmio_read32(mmio, MIRROR_FUSE3));
gt->steering[L3BANK].group_target = 0; /* unused */
gt->steering[L3BANK].instance_target = __ffs(fuse);
@@ -276,7 +289,7 @@ static void init_steering_l3bank(struct xe_gt *gt)
static void init_steering_mslice(struct xe_gt *gt)
{
u32 mask = REG_FIELD_GET(MEML3_EN_MASK,
- xe_mmio_read32(gt, MIRROR_FUSE3));
+ xe_mmio_read32(&gt->mmio, MIRROR_FUSE3));
/*
* mslice registers are valid (not terminated) if either the meml3
@@ -352,6 +365,19 @@ void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group,
*instance = dss % gt->steering_dss_per_grp;
}
+/**
+ * xe_gt_mcr_steering_info_to_dss_id - Get DSS ID from group/instance steering
+ * @gt: GT structure
+ * @group: steering group ID
+ * @instance: steering instance ID
+ *
+ * Return: the converted DSS id.
+ */
+u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance)
+{
+ return group * dss_per_group(gt) + instance;
+}
+
static void init_steering_dss(struct xe_gt *gt)
{
gt->steering_dss_per_grp = dss_per_group(gt);
@@ -380,7 +406,7 @@ static void init_steering_oaddrm(struct xe_gt *gt)
static void init_steering_sqidi_psmi(struct xe_gt *gt)
{
u32 mask = REG_FIELD_GET(XE2_NODE_ENABLE_MASK,
- xe_mmio_read32(gt, MIRROR_FUSE3));
+ xe_mmio_read32(&gt->mmio, MIRROR_FUSE3));
u32 select = __ffs(mask);
gt->steering[SQIDI_PSMI].group_target = select >> 1;
@@ -439,7 +465,10 @@ void xe_gt_mcr_init(struct xe_gt *gt)
if (gt->info.type == XE_GT_TYPE_MEDIA) {
drm_WARN_ON(&xe->drm, MEDIA_VER(xe) < 13);
- if (MEDIA_VERx100(xe) >= 1301) {
+ if (MEDIA_VER(xe) >= 30) {
+ gt->steering[OADDRM].ranges = xe2lpm_gpmxmt_steering_table;
+ gt->steering[INSTANCE0].ranges = xe3lpm_instance0_steering_table;
+ } else if (MEDIA_VERx100(xe) >= 1301) {
gt->steering[OADDRM].ranges = xe2lpm_gpmxmt_steering_table;
gt->steering[INSTANCE0].ranges = xe2lpm_instance0_steering_table;
} else {
@@ -494,8 +523,8 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
u32 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, 0) |
REG_FIELD_PREP(MCR_SUBSLICE_MASK, 2);
- xe_mmio_write32(gt, MCFG_MCR_SELECTOR, steer_val);
- xe_mmio_write32(gt, SF_MCR_SELECTOR, steer_val);
+ xe_mmio_write32(&gt->mmio, MCFG_MCR_SELECTOR, steer_val);
+ xe_mmio_write32(&gt->mmio, SF_MCR_SELECTOR, steer_val);
/*
* For GAM registers, all reads should be directed to instance 1
* (unicast reads against other instances are not allowed),
@@ -521,9 +550,9 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
* Returns true if the caller should steer to the @group/@instance values
* returned. Returns false if the caller need not perform any steering
*/
-static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
- struct xe_reg_mcr reg_mcr,
- u8 *group, u8 *instance)
+bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
+ struct xe_reg_mcr reg_mcr,
+ u8 *group, u8 *instance)
{
const struct xe_reg reg = to_xe_reg(reg_mcr);
const struct xe_mmio_range *implicit_ranges;
@@ -533,7 +562,7 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
continue;
for (int i = 0; gt->steering[type].ranges[i].end > 0; i++) {
- if (xe_mmio_in_range(gt, &gt->steering[type].ranges[i], reg)) {
+ if (xe_mmio_in_range(&gt->mmio, &gt->steering[type].ranges[i], reg)) {
*group = gt->steering[type].group_target;
*instance = gt->steering[type].instance_target;
return true;
@@ -544,7 +573,7 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
implicit_ranges = gt->steering[IMPLICIT_STEERING].ranges;
if (implicit_ranges)
for (int i = 0; implicit_ranges[i].end > 0; i++)
- if (xe_mmio_in_range(gt, &implicit_ranges[i], reg))
+ if (xe_mmio_in_range(&gt->mmio, &implicit_ranges[i], reg))
return false;
/*
@@ -579,7 +608,7 @@ static void mcr_lock(struct xe_gt *gt) __acquires(&gt->mcr_lock)
* when a read to the relevant register returns 1.
*/
if (GRAPHICS_VERx100(xe) >= 1270)
- ret = xe_mmio_wait32(gt, STEER_SEMAPHORE, 0x1, 0x1, 10, NULL,
+ ret = xe_mmio_wait32(&gt->mmio, STEER_SEMAPHORE, 0x1, 0x1, 10, NULL,
true);
drm_WARN_ON_ONCE(&xe->drm, ret == -ETIMEDOUT);
@@ -589,7 +618,7 @@ static void mcr_unlock(struct xe_gt *gt) __releases(&gt->mcr_lock)
{
/* Release hardware semaphore - this is done by writing 1 to the register */
if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270)
- xe_mmio_write32(gt, STEER_SEMAPHORE, 0x1);
+ xe_mmio_write32(&gt->mmio, STEER_SEMAPHORE, 0x1);
spin_unlock(&gt->mcr_lock);
}
@@ -603,6 +632,7 @@ static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
u8 rw_flag, int group, int instance, u32 value)
{
const struct xe_reg reg = to_xe_reg(reg_mcr);
+ struct xe_mmio *mmio = &gt->mmio;
struct xe_reg steer_reg;
u32 steer_val, val = 0;
@@ -635,12 +665,12 @@ static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
if (rw_flag == MCR_OP_READ)
steer_val |= MCR_MULTICAST;
- xe_mmio_write32(gt, steer_reg, steer_val);
+ xe_mmio_write32(mmio, steer_reg, steer_val);
if (rw_flag == MCR_OP_READ)
- val = xe_mmio_read32(gt, reg);
+ val = xe_mmio_read32(mmio, reg);
else
- xe_mmio_write32(gt, reg, value);
+ xe_mmio_write32(mmio, reg, value);
/*
* If we turned off the multicast bit (during a write) we're required
@@ -649,7 +679,7 @@ static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
* operation.
*/
if (rw_flag == MCR_OP_WRITE)
- xe_mmio_write32(gt, steer_reg, MCR_MULTICAST);
+ xe_mmio_write32(mmio, steer_reg, MCR_MULTICAST);
return val;
}
@@ -684,7 +714,7 @@ u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, struct xe_reg_mcr reg_mcr)
group, instance, 0);
mcr_unlock(gt);
} else {
- val = xe_mmio_read32(gt, reg);
+ val = xe_mmio_read32(&gt->mmio, reg);
}
return val;
@@ -757,7 +787,7 @@ void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
* to touch the steering register.
*/
mcr_lock(gt);
- xe_mmio_write32(gt, reg, value);
+ xe_mmio_write32(&gt->mmio, reg, value);
mcr_unlock(gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h
index 8d119a0d5493..bc06520befab 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.h
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.h
@@ -26,8 +26,13 @@ void xe_gt_mcr_unicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
u32 value);
+bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
+ struct xe_reg_mcr reg_mcr,
+ u8 *group, u8 *instance);
+
void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance);
+u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance);
/*
* Loop over each DSS and determine the group and instance IDs that
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 79c426dc2505..2606cd396df5 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -10,7 +10,6 @@
#include <drm/drm_exec.h>
#include <drm/drm_managed.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include "abi/guc_actions_abi.h"
#include "xe_bo.h"
diff --git a/drivers/gpu/drm/xe/xe_gt_printk.h b/drivers/gpu/drm/xe/xe_gt_printk.h
index d6228baaff1e..11da0228cea7 100644
--- a/drivers/gpu/drm/xe/xe_gt_printk.h
+++ b/drivers/gpu/drm/xe/xe_gt_printk.h
@@ -8,7 +8,7 @@
#include <drm/drm_print.h>
-#include "xe_device_types.h"
+#include "xe_gt_types.h"
#define xe_gt_printk(_gt, _level, _fmt, ...) \
drm_##_level(&gt_to_xe(_gt)->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
@@ -60,6 +60,21 @@ static inline void __xe_gt_printfn_info(struct drm_printer *p, struct va_format
xe_gt_info(gt, "%pV", vaf);
}
+static inline void __xe_gt_printfn_dbg(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_gt *gt = p->arg;
+ struct drm_printer dbg;
+
+ /*
+ * The original xe_gt_dbg() callsite annotations are useless here,
+ * redirect to the tweaked drm_dbg_printer() instead.
+ */
+ dbg = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, NULL);
+ dbg.origin = p->origin;
+
+ drm_printf(&dbg, "GT%u: %pV", gt->info.id, vaf);
+}
+
/**
* xe_gt_err_printer - Construct a &drm_printer that outputs to xe_gt_err()
* @gt: the &xe_gt pointer to use in xe_gt_err()
@@ -90,4 +105,20 @@ static inline struct drm_printer xe_gt_info_printer(struct xe_gt *gt)
return p;
}
+/**
+ * xe_gt_dbg_printer - Construct a &drm_printer that outputs like xe_gt_dbg()
+ * @gt: the &xe_gt pointer to use in xe_gt_dbg()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_gt_dbg_printer(struct xe_gt *gt)
+{
+ struct drm_printer p = {
+ .printfn = __xe_gt_printfn_dbg,
+ .arg = gt,
+ .origin = (const void *)_THIS_IP_,
+ };
+ return p;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index 905f409db74b..6f906c8e8108 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -5,12 +5,15 @@
#include <drm/drm_managed.h>
+#include "regs/xe_guc_regs.h"
#include "regs/xe_regs.h"
+#include "xe_gt.h"
#include "xe_gt_sriov_pf.h"
#include "xe_gt_sriov_pf_config.h"
#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_helpers.h"
+#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_pf_service.h"
#include "xe_mmio.h"
@@ -65,6 +68,19 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
return 0;
}
+/**
+ * xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF.
+ * @gt: the &xe_gt to initialize
+ *
+ * Late one-time initialization of the PF data.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_init(struct xe_gt *gt)
+{
+ return xe_gt_sriov_pf_migration_init(gt);
+}
+
static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
{
return GRAPHICS_VERx100(xe) == 1200;
@@ -72,7 +88,7 @@ static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
static void pf_enable_ggtt_guest_update(struct xe_gt *gt)
{
- xe_mmio_write32(gt, VIRTUAL_CTRL_REG, GUEST_GTT_UPDATE_EN);
+ xe_mmio_write32(&gt->mmio, VIRTUAL_CTRL_REG, GUEST_GTT_UPDATE_EN);
}
/**
@@ -89,6 +105,56 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
xe_gt_sriov_pf_service_update(gt);
}
+static u32 pf_get_vf_regs_stride(struct xe_device *xe)
+{
+ return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000;
+}
+
+static struct xe_reg xe_reg_vf_to_pf(struct xe_reg vf_reg, unsigned int vfid, u32 stride)
+{
+ struct xe_reg pf_reg = vf_reg;
+
+ pf_reg.vf = 0;
+ pf_reg.addr += stride * vfid;
+
+ return pf_reg;
+}
+
+static void pf_clear_vf_scratch_regs(struct xe_gt *gt, unsigned int vfid)
+{
+ u32 stride = pf_get_vf_regs_stride(gt_to_xe(gt));
+ struct xe_reg scratch;
+ int n, count;
+
+ if (xe_gt_is_media_type(gt)) {
+ count = MED_VF_SW_FLAG_COUNT;
+ for (n = 0; n < count; n++) {
+ scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride);
+ xe_mmio_write32(&gt->mmio, scratch, 0);
+ }
+ } else {
+ count = VF_SW_FLAG_COUNT;
+ for (n = 0; n < count; n++) {
+ scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride);
+ xe_mmio_write32(&gt->mmio, scratch, 0);
+ }
+ }
+}
+
+/**
+ * xe_gt_sriov_pf_sanitize_hw() - Reset hardware state related to a VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function can only be called on PF.
+ */
+void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ pf_clear_vf_scratch_regs(gt, vfid);
+}
+
/**
* xe_gt_sriov_pf_restart - Restart SR-IOV support after a GT reset.
* @gt: the &xe_gt
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
index f0cb726a6919..f474509411c0 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
@@ -10,7 +10,9 @@ struct xe_gt;
#ifdef CONFIG_PCI_IOV
int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
+int xe_gt_sriov_pf_init(struct xe_gt *gt);
void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
+void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
void xe_gt_sriov_pf_restart(struct xe_gt *gt);
#else
static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
@@ -18,6 +20,11 @@ static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
return 0;
}
+static inline int xe_gt_sriov_pf_init(struct xe_gt *gt)
+{
+ return 0;
+}
+
static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
{
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 8250ef71e685..878e96281c03 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -34,6 +34,8 @@
#include "xe_ttm_vram_mgr.h"
#include "xe_wopcm.h"
+#define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
+
/*
* Return: number of KLVs that were successfully parsed and saved,
* negative error code on failure.
@@ -205,6 +207,11 @@ static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u
return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
}
+static int pf_push_vf_cfg_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
+{
+ return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY, priority);
+}
+
static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
{
return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
@@ -229,14 +236,16 @@ static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned i
}
/* Return: number of configuration dwords written */
-static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config)
+static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
{
u32 n = 0;
if (xe_ggtt_node_allocated(config->ggtt_region)) {
- cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
- cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
- cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
+ if (details) {
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
+ cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
+ cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
+ }
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
@@ -247,20 +256,24 @@ static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config)
}
/* Return: number of configuration dwords written */
-static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config)
+static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
{
u32 n = 0;
- n += encode_config_ggtt(cfg, config);
+ n += encode_config_ggtt(cfg, config, details);
- cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
- cfg[n++] = config->begin_ctx;
+ if (details) {
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
+ cfg[n++] = config->begin_ctx;
+ }
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
cfg[n++] = config->num_ctxs;
- cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
- cfg[n++] = config->begin_db;
+ if (details) {
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
+ cfg[n++] = config->begin_db;
+ }
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
cfg[n++] = config->num_dbs;
@@ -301,7 +314,7 @@ static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
if (!cfg)
return -ENOMEM;
- num_dwords = encode_config(cfg, config);
+ num_dwords = encode_config(cfg, config, true);
xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
if (xe_gt_is_media_type(gt)) {
@@ -309,10 +322,10 @@ static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
/* media-GT will never include a GGTT config */
- xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config));
+ xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
/* the GGTT config must be taken from the primary-GT instead */
- num_dwords += encode_config_ggtt(cfg + num_dwords, other);
+ num_dwords += encode_config_ggtt(cfg + num_dwords, other, true);
}
xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
@@ -387,6 +400,8 @@ static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
* the xe_ggtt_clear() called by below xe_ggtt_remove_node().
*/
xe_ggtt_node_remove(node, false);
+ } else {
+ xe_ggtt_node_fini(node);
}
}
@@ -442,7 +457,7 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
config->ggtt_region = node;
return 0;
err:
- xe_ggtt_node_fini(node);
+ pf_release_ggtt(tile, node);
return err;
}
@@ -1530,8 +1545,6 @@ static u64 pf_query_max_lmem(struct xe_gt *gt)
#ifdef CONFIG_DRM_XE_DEBUG_SRIOV
#define MAX_FAIR_LMEM SZ_128M /* XXX: make it small for the driver bringup */
-#else
-#define MAX_FAIR_LMEM SZ_2G /* XXX: known issue with allocating BO over 2GiB */
#endif
static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
@@ -1757,6 +1770,77 @@ u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfi
return preempt_timeout;
}
+static const char *sched_priority_unit(u32 priority)
+{
+ return priority == GUC_SCHED_PRIORITY_LOW ? "(low)" :
+ priority == GUC_SCHED_PRIORITY_NORMAL ? "(normal)" :
+ priority == GUC_SCHED_PRIORITY_HIGH ? "(high)" :
+ "(?)";
+}
+
+static int pf_provision_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ int err;
+
+ err = pf_push_vf_cfg_sched_priority(gt, vfid, priority);
+ if (unlikely(err))
+ return err;
+
+ config->sched_priority = priority;
+ return 0;
+}
+
+static int pf_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+
+ return config->sched_priority;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_sched_priority() - Configure scheduling priority.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @priority: requested scheduling priority
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
+{
+ int err;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_provision_sched_priority(gt, vfid, priority);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_set_u32_done(gt, vfid, priority,
+ xe_gt_sriov_pf_config_get_sched_priority(gt, vfid),
+ "scheduling priority", sched_priority_unit, err);
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_sched_priority - Get VF's scheduling priority.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function can only be called on PF.
+ *
+ * Return: VF's (or PF's) scheduling priority.
+ */
+u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
+{
+ u32 priority;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ priority = pf_get_sched_priority(gt, vfid);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return priority;
+}
+
static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
@@ -2036,13 +2120,13 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
valid_any = valid_any || (valid_ggtt && is_primary);
if (IS_DGFX(xe)) {
- bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
+ bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
valid_any = valid_any || (valid_lmem && is_primary);
valid_all = valid_all && valid_lmem;
}
- return valid_all ? 1 : valid_any ? -ENOKEY : -ENODATA;
+ return valid_all ? 0 : valid_any ? -ENOKEY : -ENODATA;
}
/**
@@ -2069,6 +2153,174 @@ bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
}
/**
+ * xe_gt_sriov_pf_config_save - Save a VF provisioning config as binary blob.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be PF)
+ * @buf: the buffer to save a config to (or NULL if query the buf size)
+ * @size: the size of the buffer (or 0 if query the buf size)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: minimum size of the buffer or the number of bytes saved,
+ * or a negative error code on failure.
+ */
+ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
+{
+ struct xe_gt_sriov_config *config;
+ ssize_t ret;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, !(!buf ^ !size));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ ret = pf_validate_vf_config(gt, vfid);
+ if (!size) {
+ ret = ret ? 0 : SZ_4K;
+ } else if (!ret) {
+ if (size < SZ_4K) {
+ ret = -ENOBUFS;
+ } else {
+ config = pf_pick_vf_config(gt, vfid);
+ ret = encode_config(buf, config, false) * sizeof(u32);
+ }
+ }
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return ret;
+}
+
+static int pf_restore_vf_config_klv(struct xe_gt *gt, unsigned int vfid,
+ u32 key, u32 len, const u32 *value)
+{
+ switch (key) {
+ case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY:
+ if (len != GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN)
+ return -EBADMSG;
+ return pf_provision_vf_ctxs(gt, vfid, value[0]);
+
+ case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY:
+ if (len != GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN)
+ return -EBADMSG;
+ return pf_provision_vf_dbs(gt, vfid, value[0]);
+
+ case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY:
+ if (len != GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN)
+ return -EBADMSG;
+ return pf_provision_exec_quantum(gt, vfid, value[0]);
+
+ case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY:
+ if (len != GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN)
+ return -EBADMSG;
+ return pf_provision_preempt_timeout(gt, vfid, value[0]);
+
+ /* auto-generate case statements */
+#define define_threshold_key_to_provision_case(TAG, ...) \
+ case MAKE_GUC_KLV_VF_CFG_THRESHOLD_KEY(TAG): \
+ BUILD_BUG_ON(MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) != 1u); \
+ if (len != MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG)) \
+ return -EBADMSG; \
+ return pf_provision_threshold(gt, vfid, \
+ MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG), \
+ value[0]);
+
+ MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_key_to_provision_case)
+#undef define_threshold_key_to_provision_case
+ }
+
+ if (xe_gt_is_media_type(gt))
+ return -EKEYREJECTED;
+
+ switch (key) {
+ case GUC_KLV_VF_CFG_GGTT_SIZE_KEY:
+ if (len != GUC_KLV_VF_CFG_GGTT_SIZE_LEN)
+ return -EBADMSG;
+ return pf_provision_vf_ggtt(gt, vfid, make_u64_from_u32(value[1], value[0]));
+
+ case GUC_KLV_VF_CFG_LMEM_SIZE_KEY:
+ if (!IS_DGFX(gt_to_xe(gt)))
+ return -EKEYREJECTED;
+ if (len != GUC_KLV_VF_CFG_LMEM_SIZE_LEN)
+ return -EBADMSG;
+ return pf_provision_vf_lmem(gt, vfid, make_u64_from_u32(value[1], value[0]));
+ }
+
+ return -EKEYREJECTED;
+}
+
+static int pf_restore_vf_config(struct xe_gt *gt, unsigned int vfid,
+ const u32 *klvs, size_t num_dwords)
+{
+ int err;
+
+ while (num_dwords >= GUC_KLV_LEN_MIN) {
+ u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
+ u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
+
+ klvs += GUC_KLV_LEN_MIN;
+ num_dwords -= GUC_KLV_LEN_MIN;
+
+ if (num_dwords < len)
+ err = -EBADMSG;
+ else
+ err = pf_restore_vf_config_klv(gt, vfid, key, len, klvs);
+
+ if (err) {
+ xe_gt_sriov_dbg(gt, "restore failed on key %#x (%pe)\n", key, ERR_PTR(err));
+ return err;
+ }
+
+ klvs += len;
+ num_dwords -= len;
+ }
+
+ return pf_validate_vf_config(gt, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_config_restore - Restore a VF provisioning config from binary blob.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be PF)
+ * @buf: the buffer with config data
+ * @size: the size of the config data
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
+ const void *buf, size_t size)
+{
+ int err;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid);
+
+ if (!size)
+ return -ENODATA;
+
+ if (size % sizeof(u32))
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
+ struct drm_printer p = xe_gt_info_printer(gt);
+
+ drm_printf(&p, "restoring VF%u config:\n", vfid);
+ xe_guc_klv_print(buf, size / sizeof(u32), &p);
+ }
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_send_vf_cfg_reset(gt, vfid);
+ if (!err) {
+ pf_release_vf_config(gt, vfid);
+ err = pf_restore_vf_config(gt, vfid, buf, size / sizeof(u32));
+ }
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return err;
+}
+
+/**
* xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
* @gt: the &xe_gt
*
@@ -2201,6 +2453,41 @@ int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
}
/**
+ * xe_gt_sriov_pf_config_print_lmem - Print LMEM configurations.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Print LMEM allocations across all VFs.
+ * VFs without LMEM allocation are skipped.
+ *
+ * This function can only be called on PF.
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p)
+{
+ unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
+ const struct xe_gt_sriov_config *config;
+ char buf[10];
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+
+ for (n = 1; n <= total_vfs; n++) {
+ config = &gt->sriov.pf.vfs[n].config;
+ if (!config->lmem_obj)
+ continue;
+
+ string_get_size(config->lmem_obj->size, 1, STRING_UNITS_2,
+ buf, sizeof(buf));
+ drm_printf(p, "VF%u:\t%zu\t(%s)\n",
+ n, config->lmem_obj->size, buf);
+ }
+
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+ return 0;
+}
+
+/**
* xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
* @gt: the &xe_gt
* @p: the &drm_printer
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
index 42e64769f666..f894e9d4abba 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
@@ -44,6 +44,9 @@ u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfi
int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
u32 preempt_timeout);
+u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority);
+
u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
enum xe_guc_klv_threshold_index index);
int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
@@ -54,6 +57,10 @@ int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long tim
int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force);
int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh);
+ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size);
+int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
+ const void *buf, size_t size);
+
bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid);
void xe_gt_sriov_pf_config_restart(struct xe_gt *gt);
@@ -61,6 +68,7 @@ void xe_gt_sriov_pf_config_restart(struct xe_gt *gt);
int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p);
int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p);
int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p);
+int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p);
int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
index 2d3b73d78f14..686c7b3b6d7a 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
@@ -33,6 +33,8 @@ struct xe_gt_sriov_config {
u32 exec_quantum;
/** @preempt_timeout: preemption timeout in microseconds. */
u32 preempt_timeout;
+ /** @sched_priority: scheduling priority. */
+ u32 sched_priority;
/** @thresholds: GuC thresholds for adverse events notifications. */
u32 thresholds[XE_GUC_KLV_NUM_THRESHOLDS];
};
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
index 02f7328bd6ce..1f50aec3a059 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
@@ -9,9 +9,11 @@
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_sriov_pf.h"
#include "xe_gt_sriov_pf_config.h"
#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_helpers.h"
+#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_pf_monitor.h"
#include "xe_gt_sriov_pf_service.h"
#include "xe_gt_sriov_printk.h"
@@ -176,6 +178,7 @@ static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit)
CASE2STR(PAUSE_SEND_PAUSE);
CASE2STR(PAUSE_WAIT_GUC);
CASE2STR(PAUSE_GUC_DONE);
+ CASE2STR(PAUSE_SAVE_GUC);
CASE2STR(PAUSE_FAILED);
CASE2STR(PAUSED);
CASE2STR(RESUME_WIP);
@@ -415,6 +418,10 @@ static void pf_enter_vf_ready(struct xe_gt *gt, unsigned int vfid)
* : | : /
* : v : /
* : PAUSE_GUC_DONE o-----restart
+ * : | :
+ * : | o---<--busy :
+ * : v / / :
+ * : PAUSE_SAVE_GUC :
* : / :
* : / :
* :....o..............o...............o...........:
@@ -434,6 +441,7 @@ static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid)
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE);
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC);
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC);
}
}
@@ -464,12 +472,41 @@ static void pf_enter_vf_pause_rejected(struct xe_gt *gt, unsigned int vfid)
pf_enter_vf_pause_failed(gt, vfid);
}
+static void pf_enter_vf_pause_save_guc(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+}
+
+static bool pf_exit_vf_pause_save_guc(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC))
+ return false;
+
+ err = xe_gt_sriov_pf_migration_save_guc_state(gt, vfid);
+ if (err) {
+ /* retry if busy */
+ if (err == -EBUSY) {
+ pf_enter_vf_pause_save_guc(gt, vfid);
+ return true;
+ }
+ /* give up on error */
+ if (err == -EIO)
+ pf_enter_vf_mismatch(gt, vfid);
+ }
+
+ pf_enter_vf_pause_completed(gt, vfid);
+ return true;
+}
+
static bool pf_exit_vf_pause_guc_done(struct xe_gt *gt, unsigned int vfid)
{
if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE))
return false;
- pf_enter_vf_pause_completed(gt, vfid);
+ pf_enter_vf_pause_save_guc(gt, vfid);
return true;
}
@@ -1008,7 +1045,7 @@ static bool pf_exit_vf_flr_reset_mmio(struct xe_gt *gt, unsigned int vfid)
if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO))
return false;
- /* XXX: placeholder */
+ xe_gt_sriov_pf_sanitize_hw(gt, vfid);
pf_enter_vf_flr_send_finish(gt, vfid);
return true;
@@ -1338,6 +1375,9 @@ static bool pf_process_vf_state_machine(struct xe_gt *gt, unsigned int vfid)
if (pf_exit_vf_pause_guc_done(gt, vfid))
return true;
+ if (pf_exit_vf_pause_save_guc(gt, vfid))
+ return true;
+
if (pf_exit_vf_resume_send_resume(gt, vfid))
return true;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
index 11830aafea45..f02f941b4ad2 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
@@ -27,6 +27,7 @@
* @XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE: indicates that the PF is about to send a PAUSE command.
* @XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC: indicates that the PF awaits for a response from the GuC.
* @XE_GT_SRIOV_STATE_PAUSE_GUC_DONE: indicates that the PF has received a response from the GuC.
+ * @XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC: indicates that the PF needs to save the VF GuC state.
* @XE_GT_SRIOV_STATE_PAUSE_FAILED: indicates that a VF pause operation has failed.
* @XE_GT_SRIOV_STATE_PAUSED: indicates that the VF is paused.
* @XE_GT_SRIOV_STATE_RESUME_WIP: indicates the a VF resume operation is in progress.
@@ -56,6 +57,7 @@ enum xe_gt_sriov_control_bits {
XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE,
XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC,
XE_GT_SRIOV_STATE_PAUSE_GUC_DONE,
+ XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC,
XE_GT_SRIOV_STATE_PAUSE_FAILED,
XE_GT_SRIOV_STATE_PAUSED,
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
index 2290ddaf9594..b2521dd6ec42 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
@@ -17,6 +17,7 @@
#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_debugfs.h"
#include "xe_gt_sriov_pf_helpers.h"
+#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_pf_monitor.h"
#include "xe_gt_sriov_pf_policy.h"
#include "xe_gt_sriov_pf_service.h"
@@ -81,6 +82,11 @@ static const struct drm_info_list pf_info[] = {
.data = xe_gt_sriov_pf_config_print_dbs,
},
{
+ "lmem_provisioned",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_config_print_lmem,
+ },
+ {
"runtime_registers",
.show = xe_gt_debugfs_simple_show,
.data = xe_gt_sriov_pf_service_print_runtime,
@@ -158,6 +164,7 @@ static void pf_add_policy_attrs(struct xe_gt *gt, struct dentry *parent)
* │   │   ├── contexts_spare
* │   │   ├── exec_quantum_ms
* │   │   ├── preempt_timeout_us
+ * │   │   ├── sched_priority
* │   ├── vf1
* │   │   ├── ggtt_quota
* │   │   ├── lmem_quota
@@ -165,6 +172,7 @@ static void pf_add_policy_attrs(struct xe_gt *gt, struct dentry *parent)
* │   │   ├── contexts_quota
* │   │   ├── exec_quantum_ms
* │   │   ├── preempt_timeout_us
+ * │   │   ├── sched_priority
*/
#define DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(CONFIG, TYPE, FORMAT) \
@@ -203,6 +211,7 @@ DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(ctxs, u32, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(dbs, u32, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(exec_quantum, u32, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(preempt_timeout, u32, "%llu\n");
+DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(sched_priority, u32, "%llu\n");
/*
* /sys/kernel/debug/dri/0/
@@ -289,6 +298,8 @@ static void pf_add_config_attrs(struct xe_gt *gt, struct dentry *parent, unsigne
&exec_quantum_fops);
debugfs_create_file_unsafe("preempt_timeout_us", 0644, parent, parent,
&preempt_timeout_fops);
+ debugfs_create_file_unsafe("sched_priority", 0644, parent, parent,
+ &sched_priority_fops);
/* register all threshold attributes */
#define register_threshold_attribute(TAG, NAME, ...) \
@@ -312,6 +323,9 @@ static const struct {
{ "stop", xe_gt_sriov_pf_control_stop_vf },
{ "pause", xe_gt_sriov_pf_control_pause_vf },
{ "resume", xe_gt_sriov_pf_control_resume_vf },
+#ifdef CONFIG_DRM_XE_DEBUG_SRIOV
+ { "restore!", xe_gt_sriov_pf_migration_restore_guc_state },
+#endif
};
static ssize_t control_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
@@ -375,6 +389,119 @@ static const struct file_operations control_ops = {
.llseek = default_llseek,
};
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── vf1
+ * │   │   ├── guc_state
+ */
+static ssize_t guc_state_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct dentry *dent = file_dentry(file);
+ struct dentry *parent = dent->d_parent;
+ struct xe_gt *gt = extract_gt(parent);
+ unsigned int vfid = extract_vfid(parent);
+
+ return xe_gt_sriov_pf_migration_read_guc_state(gt, vfid, buf, count, pos);
+}
+
+static ssize_t guc_state_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct dentry *dent = file_dentry(file);
+ struct dentry *parent = dent->d_parent;
+ struct xe_gt *gt = extract_gt(parent);
+ unsigned int vfid = extract_vfid(parent);
+
+ if (*pos)
+ return -EINVAL;
+
+ return xe_gt_sriov_pf_migration_write_guc_state(gt, vfid, buf, count);
+}
+
+static const struct file_operations guc_state_ops = {
+ .owner = THIS_MODULE,
+ .read = guc_state_read,
+ .write = guc_state_write,
+ .llseek = default_llseek,
+};
+
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── vf1
+ * │   │   ├── config_blob
+ */
+static ssize_t config_blob_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct dentry *dent = file_dentry(file);
+ struct dentry *parent = dent->d_parent;
+ struct xe_gt *gt = extract_gt(parent);
+ unsigned int vfid = extract_vfid(parent);
+ ssize_t ret;
+ void *tmp;
+
+ ret = xe_gt_sriov_pf_config_save(gt, vfid, NULL, 0);
+ if (!ret)
+ return -ENODATA;
+ if (ret < 0)
+ return ret;
+
+ tmp = kzalloc(ret, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = xe_gt_sriov_pf_config_save(gt, vfid, tmp, ret);
+ if (ret > 0)
+ ret = simple_read_from_buffer(buf, count, pos, tmp, ret);
+
+ kfree(tmp);
+ return ret;
+}
+
+static ssize_t config_blob_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct dentry *dent = file_dentry(file);
+ struct dentry *parent = dent->d_parent;
+ struct xe_gt *gt = extract_gt(parent);
+ unsigned int vfid = extract_vfid(parent);
+ ssize_t ret;
+ void *tmp;
+
+ if (*pos)
+ return -EINVAL;
+
+ if (!count)
+ return -ENODATA;
+
+ if (count > SZ_4K)
+ return -EINVAL;
+
+ tmp = kzalloc(count, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ if (copy_from_user(tmp, buf, count)) {
+ ret = -EFAULT;
+ } else {
+ ret = xe_gt_sriov_pf_config_restore(gt, vfid, tmp, count);
+ if (!ret)
+ ret = count;
+ }
+ kfree(tmp);
+ return ret;
+}
+
+static const struct file_operations config_blob_ops = {
+ .owner = THIS_MODULE,
+ .read = config_blob_read,
+ .write = config_blob_write,
+ .llseek = default_llseek,
+};
+
/**
* xe_gt_sriov_pf_debugfs_register - Register SR-IOV PF specific entries in GT debugfs.
* @gt: the &xe_gt to register
@@ -423,5 +550,15 @@ void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root)
pf_add_config_attrs(gt, vfdentry, VFID(n));
debugfs_create_file("control", 0600, vfdentry, NULL, &control_ops);
+
+ /* for testing/debugging purposes only! */
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ debugfs_create_file("guc_state",
+ IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400,
+ vfdentry, NULL, &guc_state_ops);
+ debugfs_create_file("config_blob",
+ IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400,
+ vfdentry, NULL, &config_blob_ops);
+ }
}
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h
index 0bf12d89ceb2..6af219d93c3b 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h
@@ -18,7 +18,7 @@
* is within a range of supported VF numbers (up to maximum number of VFs that
* driver can support, including VF0 that represents the PF itself).
*
- * Note: Effective only on debug builds. See `Xe ASSERTs`_ for more information.
+ * Note: Effective only on debug builds. See `Xe Asserts`_ for more information.
*/
#define xe_gt_sriov_pf_assert_vfid(gt, vfid) xe_sriov_pf_assert_vfid(gt_to_xe(gt), (vfid))
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
new file mode 100644
index 000000000000..c712111aa30d
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "abi/guc_actions_sriov_abi.h"
+#include "xe_bo.h"
+#include "xe_gt_sriov_pf_helpers.h"
+#include "xe_gt_sriov_pf_migration.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+#include "xe_sriov.h"
+
+/* Return: number of dwords saved/restored/required or a negative error code on failure */
+static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode,
+ u64 addr, u32 ndwords)
+{
+ u32 request[PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_LEN] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_PF2GUC_SAVE_RESTORE_VF) |
+ FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_0_OPCODE, opcode),
+ FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_1_VFID, vfid),
+ FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_2_ADDR_LO, lower_32_bits(addr)),
+ FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_3_ADDR_HI, upper_32_bits(addr)),
+ FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_4_SIZE, ndwords),
+ };
+
+ return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
+}
+
+/* Return: size of the state in dwords or a negative error code on failure */
+static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid)
+{
+ int ret;
+
+ ret = guc_action_vf_save_restore(&gt->uc.guc, vfid, GUC_PF_OPCODE_VF_SAVE, 0, 0);
+ return ret ?: -ENODATA;
+}
+
+/* Return: number of state dwords saved or a negative error code on failure */
+static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid,
+ void *buff, size_t size)
+{
+ const int ndwords = size / sizeof(u32);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_guc *guc = &gt->uc.guc;
+ struct xe_bo *bo;
+ int ret;
+
+ xe_gt_assert(gt, size % sizeof(u32) == 0);
+ xe_gt_assert(gt, size == ndwords * sizeof(u32));
+
+ bo = xe_bo_create_pin_map(xe, tile, NULL,
+ ALIGN(size, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_SAVE,
+ xe_bo_ggtt_addr(bo), ndwords);
+ if (!ret)
+ ret = -ENODATA;
+ else if (ret > ndwords)
+ ret = -EPROTO;
+ else if (ret > 0)
+ xe_map_memcpy_from(xe, buff, &bo->vmap, 0, ret * sizeof(u32));
+
+ xe_bo_unpin_map_no_vm(bo);
+ return ret;
+}
+
+/* Return: number of state dwords restored or a negative error code on failure */
+static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid,
+ const void *buff, size_t size)
+{
+ const int ndwords = size / sizeof(u32);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_guc *guc = &gt->uc.guc;
+ struct xe_bo *bo;
+ int ret;
+
+ xe_gt_assert(gt, size % sizeof(u32) == 0);
+ xe_gt_assert(gt, size == ndwords * sizeof(u32));
+
+ bo = xe_bo_create_pin_map(xe, tile, NULL,
+ ALIGN(size, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ xe_map_memcpy_to(xe, &bo->vmap, 0, buff, size);
+
+ ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_RESTORE,
+ xe_bo_ggtt_addr(bo), ndwords);
+ if (!ret)
+ ret = -ENODATA;
+ else if (ret > ndwords)
+ ret = -EPROTO;
+
+ xe_bo_unpin_map_no_vm(bo);
+ return ret;
+}
+
+static bool pf_migration_supported(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ return gt->sriov.pf.migration.supported;
+}
+
+static struct mutex *pf_migration_mutex(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ return &gt->sriov.pf.migration.snapshot_lock;
+}
+
+static struct xe_gt_sriov_state_snapshot *pf_pick_vf_snapshot(struct xe_gt *gt,
+ unsigned int vfid)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+ lockdep_assert_held(pf_migration_mutex(gt));
+
+ return &gt->sriov.pf.vfs[vfid].snapshot;
+}
+
+static unsigned int pf_snapshot_index(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
+{
+ return container_of(snapshot, struct xe_gt_sriov_metadata, snapshot) - gt->sriov.pf.vfs;
+}
+
+static void pf_free_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ drmm_kfree(&xe->drm, snapshot->guc.buff);
+ snapshot->guc.buff = NULL;
+ snapshot->guc.size = 0;
+}
+
+static int pf_alloc_guc_state(struct xe_gt *gt,
+ struct xe_gt_sriov_state_snapshot *snapshot,
+ size_t size)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ void *p;
+
+ pf_free_guc_state(gt, snapshot);
+
+ if (!size)
+ return -ENODATA;
+
+ if (size % sizeof(u32))
+ return -EINVAL;
+
+ if (size > SZ_2M)
+ return -EFBIG;
+
+ p = drmm_kzalloc(&xe->drm, size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ snapshot->guc.buff = p;
+ snapshot->guc.size = size;
+ return 0;
+}
+
+static void pf_dump_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
+{
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
+ unsigned int vfid __maybe_unused = pf_snapshot_index(gt, snapshot);
+
+ xe_gt_sriov_dbg_verbose(gt, "VF%u GuC state is %zu dwords:\n",
+ vfid, snapshot->guc.size / sizeof(u32));
+ print_hex_dump_bytes("state: ", DUMP_PREFIX_OFFSET,
+ snapshot->guc.buff, min(SZ_64, snapshot->guc.size));
+ }
+}
+
+static int pf_save_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid);
+ size_t size;
+ int ret;
+
+ ret = pf_send_guc_query_vf_state_size(gt, vfid);
+ if (ret < 0)
+ goto fail;
+ size = ret * sizeof(u32);
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state size is %d dwords (%zu bytes)\n", vfid, ret, size);
+
+ ret = pf_alloc_guc_state(gt, snapshot, size);
+ if (ret < 0)
+ goto fail;
+
+ ret = pf_send_guc_save_vf_state(gt, vfid, snapshot->guc.buff, size);
+ if (ret < 0)
+ goto fail;
+ size = ret * sizeof(u32);
+ xe_gt_assert(gt, size);
+ xe_gt_assert(gt, size <= snapshot->guc.size);
+ snapshot->guc.size = size;
+
+ pf_dump_guc_state(gt, snapshot);
+ return 0;
+
+fail:
+ xe_gt_sriov_dbg(gt, "Unable to save VF%u state (%pe)\n", vfid, ERR_PTR(ret));
+ pf_free_guc_state(gt, snapshot);
+ return ret;
+}
+
+/**
+ * xe_gt_sriov_pf_migration_save_guc_state() - Take a GuC VF state snapshot.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid != PFID);
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+
+ if (!pf_migration_supported(gt))
+ return -ENOPKG;
+
+ mutex_lock(pf_migration_mutex(gt));
+ err = pf_save_vf_guc_state(gt, vfid);
+ mutex_unlock(pf_migration_mutex(gt));
+
+ return err;
+}
+
+static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid);
+ int ret;
+
+ if (!snapshot->guc.size)
+ return -ENODATA;
+
+ xe_gt_sriov_dbg_verbose(gt, "restoring %zu dwords of VF%u GuC state\n",
+ snapshot->guc.size / sizeof(u32), vfid);
+ ret = pf_send_guc_restore_vf_state(gt, vfid, snapshot->guc.buff, snapshot->guc.size);
+ if (ret < 0)
+ goto fail;
+
+ xe_gt_sriov_dbg_verbose(gt, "restored %d dwords of VF%u GuC state\n", ret, vfid);
+ return 0;
+
+fail:
+ xe_gt_sriov_dbg(gt, "Failed to restore VF%u GuC state (%pe)\n", vfid, ERR_PTR(ret));
+ return ret;
+}
+
+/**
+ * xe_gt_sriov_pf_migration_restore_guc_state() - Restore a GuC VF state.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid)
+{
+ int ret;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid != PFID);
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+
+ if (!pf_migration_supported(gt))
+ return -ENOPKG;
+
+ mutex_lock(pf_migration_mutex(gt));
+ ret = pf_restore_vf_guc_state(gt, vfid);
+ mutex_unlock(pf_migration_mutex(gt));
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * xe_gt_sriov_pf_migration_read_guc_state() - Read a GuC VF state.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @buf: the user space buffer to read to
+ * @count: the maximum number of bytes to read
+ * @pos: the current position in the buffer
+ *
+ * This function is for PF only.
+ *
+ * This function reads up to @count bytes from the saved VF GuC state buffer
+ * at offset @pos into the user space address starting at @buf.
+ *
+ * Return: the number of bytes read or a negative error code on failure.
+ */
+ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
+ char __user *buf, size_t count, loff_t *pos)
+{
+ struct xe_gt_sriov_state_snapshot *snapshot;
+ ssize_t ret;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid != PFID);
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+
+ if (!pf_migration_supported(gt))
+ return -ENOPKG;
+
+ mutex_lock(pf_migration_mutex(gt));
+ snapshot = pf_pick_vf_snapshot(gt, vfid);
+ if (snapshot->guc.size)
+ ret = simple_read_from_buffer(buf, count, pos, snapshot->guc.buff,
+ snapshot->guc.size);
+ else
+ ret = -ENODATA;
+ mutex_unlock(pf_migration_mutex(gt));
+
+ return ret;
+}
+
+/**
+ * xe_gt_sriov_pf_migration_write_guc_state() - Write a GuC VF state.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @buf: the user space buffer with GuC VF state
+ * @size: the size of GuC VF state (in bytes)
+ *
+ * This function is for PF only.
+ *
+ * This function reads @size bytes of the VF GuC state stored at user space
+ * address @buf and writes it into a internal VF state buffer.
+ *
+ * Return: the number of bytes used or a negative error code on failure.
+ */
+ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid,
+ const char __user *buf, size_t size)
+{
+ struct xe_gt_sriov_state_snapshot *snapshot;
+ loff_t pos = 0;
+ ssize_t ret;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid != PFID);
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+
+ if (!pf_migration_supported(gt))
+ return -ENOPKG;
+
+ mutex_lock(pf_migration_mutex(gt));
+ snapshot = pf_pick_vf_snapshot(gt, vfid);
+ ret = pf_alloc_guc_state(gt, snapshot, size);
+ if (!ret) {
+ ret = simple_write_to_buffer(snapshot->guc.buff, size, &pos, buf, size);
+ if (ret < 0)
+ pf_free_guc_state(gt, snapshot);
+ else
+ pf_dump_guc_state(gt, snapshot);
+ }
+ mutex_unlock(pf_migration_mutex(gt));
+
+ return ret;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static bool pf_check_migration_support(struct xe_gt *gt)
+{
+ /* GuC 70.25 with save/restore v2 is required */
+ xe_gt_assert(gt, GUC_FIRMWARE_VER(&gt->uc.guc) >= MAKE_GUC_VER(70, 25, 0));
+
+ /* XXX: for now this is for feature enabling only */
+ return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
+}
+
+/**
+ * xe_gt_sriov_pf_migration_init() - Initialize support for VF migration.
+ * @gt: the &xe_gt
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_migration_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int err;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ gt->sriov.pf.migration.supported = pf_check_migration_support(gt);
+
+ if (!pf_migration_supported(gt))
+ return 0;
+
+ err = drmm_mutex_init(&xe->drm, &gt->sriov.pf.migration.snapshot_lock);
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
new file mode 100644
index 000000000000..09faeae00ddb
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_MIGRATION_H_
+#define _XE_GT_SRIOV_PF_MIGRATION_H_
+
+#include <linux/types.h>
+
+struct xe_gt;
+
+int xe_gt_sriov_pf_migration_init(struct xe_gt *gt);
+int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid);
+
+#ifdef CONFIG_DEBUG_FS
+ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
+ char __user *buf, size_t count, loff_t *pos);
+ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid,
+ const char __user *buf, size_t count);
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
new file mode 100644
index 000000000000..1f3110b6d44f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_
+#define _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/**
+ * struct xe_gt_sriov_state_snapshot - GT-level per-VF state snapshot data.
+ *
+ * Used by the PF driver to maintain per-VF migration data.
+ */
+struct xe_gt_sriov_state_snapshot {
+ /** @guc: GuC VF state snapshot */
+ struct {
+ /** @guc.buff: buffer with the VF state */
+ u32 *buff;
+ /** @guc.size: size of the buffer (must be dwords aligned) */
+ u32 size;
+ } guc;
+};
+
+/**
+ * struct xe_gt_sriov_pf_migration - GT-level data.
+ *
+ * Used by the PF driver to maintain non-VF specific per-GT data.
+ */
+struct xe_gt_sriov_pf_migration {
+ /** @supported: indicates whether the feature is supported */
+ bool supported;
+
+ /** @snapshot_lock: protects all VFs snapshots */
+ struct mutex snapshot_lock;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
index fae5be5a2a11..c00fb354705f 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
@@ -135,14 +135,33 @@ static int pf_update_policy_u32(struct xe_gt *gt, u16 key, u32 *policy, u32 valu
return 0;
}
+static void pf_bulk_reset_sched_priority(struct xe_gt *gt, u32 priority)
+{
+ unsigned int total_vfs = 1 + xe_gt_sriov_pf_get_totalvfs(gt);
+ unsigned int n;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ for (n = 0; n < total_vfs; n++)
+ gt->sriov.pf.vfs[n].config.sched_priority = priority;
+}
+
static int pf_provision_sched_if_idle(struct xe_gt *gt, bool enable)
{
+ int err;
+
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
- return pf_update_policy_bool(gt, GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY,
- &gt->sriov.pf.policy.guc.sched_if_idle,
- enable);
+ err = pf_update_policy_bool(gt, GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY,
+ &gt->sriov.pf.policy.guc.sched_if_idle,
+ enable);
+
+ if (!err)
+ pf_bulk_reset_sched_priority(gt, enable ? GUC_SCHED_PRIORITY_NORMAL :
+ GUC_SCHED_PRIORITY_LOW);
+ return err;
}
static int pf_reprovision_sched_if_idle(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
index 0e23b7ea4f3e..924e75b94aec 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
@@ -237,7 +237,7 @@ static void read_many(struct xe_gt *gt, unsigned int count,
const struct xe_reg *regs, u32 *values)
{
while (count--)
- *values++ = xe_mmio_read32(gt, *regs++);
+ *values++ = xe_mmio_read32(&gt->mmio, *regs++);
}
static void pf_prepare_runtime_info(struct xe_gt *gt)
@@ -402,7 +402,7 @@ static int pf_service_runtime_query(struct xe_gt *gt, u32 start, u32 limit,
for (i = 0; i < count; ++i, ++data) {
addr = runtime->regs[start + i].addr;
- data->offset = xe_mmio_adjusted_addr(gt, addr);
+ data->offset = xe_mmio_adjusted_addr(&gt->mmio, addr);
data->value = runtime->values[start + i];
}
@@ -513,7 +513,7 @@ int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p
for (; size--; regs++, values++) {
drm_printf(p, "reg[%#x] = %#x\n",
- xe_mmio_adjusted_addr(gt, regs->addr), *values);
+ xe_mmio_adjusted_addr(&gt->mmio, regs->addr), *values);
}
return 0;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
index 28e1b130bf87..0426b1a77069 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
@@ -10,6 +10,7 @@
#include "xe_gt_sriov_pf_config_types.h"
#include "xe_gt_sriov_pf_control_types.h"
+#include "xe_gt_sriov_pf_migration_types.h"
#include "xe_gt_sriov_pf_monitor_types.h"
#include "xe_gt_sriov_pf_policy_types.h"
#include "xe_gt_sriov_pf_service_types.h"
@@ -29,6 +30,9 @@ struct xe_gt_sriov_metadata {
/** @version: negotiated VF/PF ABI version */
struct xe_gt_sriov_pf_service_version version;
+
+ /** @snapshot: snapshot of the VF state data */
+ struct xe_gt_sriov_state_snapshot snapshot;
};
/**
@@ -36,6 +40,7 @@ struct xe_gt_sriov_metadata {
* @service: service data.
* @control: control data.
* @policy: policy data.
+ * @migration: migration data.
* @spare: PF-only provisioning configuration.
* @vfs: metadata for all VFs.
*/
@@ -43,6 +48,7 @@ struct xe_gt_sriov_pf {
struct xe_gt_sriov_pf_service service;
struct xe_gt_sriov_pf_control control;
struct xe_gt_sriov_pf_policy policy;
+ struct xe_gt_sriov_pf_migration migration;
struct xe_gt_sriov_spare_config spare;
struct xe_gt_sriov_metadata *vfs;
};
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 4ebc82e607af..cca5d5732802 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -27,6 +27,7 @@
#include "xe_guc_relay.h"
#include "xe_mmio.h"
#include "xe_sriov.h"
+#include "xe_sriov_vf.h"
#include "xe_uc_fw.h"
#include "xe_wopcm.h"
@@ -223,6 +224,44 @@ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
return 0;
}
+static int guc_action_vf_notify_resfix_done(struct xe_guc *guc)
+{
+ u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_NOTIFY_RESFIX_DONE),
+ };
+ int ret;
+
+ ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
+
+ return ret > 0 ? -EPROTO : ret;
+}
+
+/**
+ * xe_gt_sriov_vf_notify_resfix_done - Notify GuC about resource fixups apply completed.
+ * @gt: the &xe_gt struct instance linked to target GuC
+ *
+ * Returns: 0 if the operation completed successfully, or a negative error
+ * code otherwise.
+ */
+int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt)
+{
+ struct xe_guc *guc = &gt->uc.guc;
+ int err;
+
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+
+ err = guc_action_vf_notify_resfix_done(guc);
+ if (unlikely(err))
+ xe_gt_sriov_err(gt, "Failed to notify GuC about resource fixup done (%pe)\n",
+ ERR_PTR(err));
+ else
+ xe_gt_sriov_dbg_verbose(gt, "sent GuC resource fixup done\n");
+
+ return err;
+}
+
static int guc_action_query_single_klv(struct xe_guc *guc, u32 key,
u32 *value, u32 value_len)
{
@@ -692,6 +731,30 @@ failed:
return err;
}
+/**
+ * xe_gt_sriov_vf_migrated_event_handler - Start a VF migration recovery,
+ * or just mark that a GuC is ready for it.
+ * @gt: the &xe_gt struct instance linked to target GuC
+ *
+ * This function shall be called only by VF.
+ */
+void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_gt_assert(gt, IS_SRIOV_VF(xe));
+
+ set_bit(gt->info.id, &xe->sriov.vf.migration.gt_flags);
+ /*
+ * We need to be certain that if all flags were set, at least one
+ * thread will notice that and schedule the recovery.
+ */
+ smp_mb__after_atomic();
+
+ xe_gt_sriov_info(gt, "ready for recovery after migration\n");
+ xe_sriov_vf_start_migration_recovery(xe);
+}
+
static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor)
{
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
@@ -881,7 +944,7 @@ static struct vf_runtime_reg *vf_lookup_reg(struct xe_gt *gt, u32 addr)
*/
u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg)
{
- u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u32 addr = xe_mmio_adjusted_addr(&gt->mmio, reg.addr);
struct vf_runtime_reg *rr;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
@@ -917,7 +980,7 @@ u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg)
*/
void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
{
- u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u32 addr = xe_mmio_adjusted_addr(&gt->mmio, reg.addr);
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
xe_gt_assert(gt, !reg.vf);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index e541ce57bec2..912d20814261 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -17,6 +17,8 @@ int xe_gt_sriov_vf_query_config(struct xe_gt *gt);
int xe_gt_sriov_vf_connect(struct xe_gt *gt);
int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt);
int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt);
+int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt);
+void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c
index f3ddcbefc6bc..2ed5b6780d30 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c
@@ -33,7 +33,7 @@ static const struct drm_info_list vf_info[] = {
.show = xe_gt_debugfs_simple_show,
.data = xe_gt_sriov_vf_print_version,
},
-#if defined(CONFIG_DRM_XE_DEBUG) || defined(CONFIG_DRM_XE_DEBUG_SRIOV)
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) || IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)
{
"runtime_regs",
.show = xe_gt_debugfs_simple_show,
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
index c7364a5aef8f..7a6c1d808e41 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.c
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -12,7 +12,7 @@
/**
* xe_gt_stats_incr - Increments the specified stats counter
- * @gt: graphics tile
+ * @gt: GT structure
* @id: xe_gt_stats_id type id that needs to be incremented
* @incr: value to be incremented with
*
@@ -32,7 +32,7 @@ static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
/**
* xe_gt_stats_print_info - Print the GT stats
- * @gt: graphics tile
+ * @gt: GT structure
* @p: drm_printer where it will be printed out.
*
* This prints out all the available GT stats.
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.h b/drivers/gpu/drm/xe/xe_gt_stats.h
index 91d944f6c4e4..38325ef53617 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats.h
@@ -6,15 +6,11 @@
#ifndef _XE_GT_STATS_H_
#define _XE_GT_STATS_H_
+#include "xe_gt_stats_types.h"
+
struct xe_gt;
struct drm_printer;
-enum xe_gt_stats_id {
- XE_GT_STATS_ID_TLB_INVAL,
- /* must be the last entry */
- __XE_GT_STATS_NUM_IDS,
-};
-
#ifdef CONFIG_DEBUG_FS
int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr);
diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h
new file mode 100644
index 000000000000..2fc055e39f27
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_STATS_TYPES_H_
+#define _XE_GT_STATS_TYPES_H_
+
+enum xe_gt_stats_id {
+ XE_GT_STATS_ID_TLB_INVAL,
+ /* must be the last entry */
+ __XE_GT_STATS_NUM_IDS,
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_throttle.c b/drivers/gpu/drm/xe/xe_gt_throttle.c
index 25963e33a383..8db78d616b6f 100644
--- a/drivers/gpu/drm/xe/xe_gt_throttle.c
+++ b/drivers/gpu/drm/xe/xe_gt_throttle.c
@@ -8,6 +8,7 @@
#include <regs/xe_gt_regs.h>
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_gt_sysfs.h"
#include "xe_gt_throttle.h"
#include "xe_mmio.h"
@@ -41,9 +42,9 @@ u32 xe_gt_throttle_get_limit_reasons(struct xe_gt *gt)
xe_pm_runtime_get(gt_to_xe(gt));
if (xe_gt_is_media_type(gt))
- reg = xe_mmio_read32(gt, MTL_MEDIA_PERF_LIMIT_REASONS);
+ reg = xe_mmio_read32(&gt->mmio, MTL_MEDIA_PERF_LIMIT_REASONS);
else
- reg = xe_mmio_read32(gt, GT0_PERF_LIMIT_REASONS);
+ reg = xe_mmio_read32(&gt->mmio, GT0_PERF_LIMIT_REASONS);
xe_pm_runtime_put(gt_to_xe(gt));
return reg;
@@ -53,6 +54,7 @@ static u32 read_status(struct xe_gt *gt)
{
u32 status = xe_gt_throttle_get_limit_reasons(gt) & GT0_PERF_LIMIT_REASONS_MASK;
+ xe_gt_dbg(gt, "throttle reasons: 0x%08x\n", status);
return status;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index cca9cf536f76..0a93831c0a02 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -37,6 +37,15 @@ static long tlb_timeout_jiffies(struct xe_gt *gt)
return hw_tlb_timeout + 2 * delay;
}
+static void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
+{
+ if (WARN_ON_ONCE(!fence->gt))
+ return;
+
+ xe_pm_runtime_put(gt_to_xe(fence->gt));
+ fence->gt = NULL; /* fini() should be called once */
+}
+
static void
__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
{
@@ -56,6 +65,14 @@ invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fe
__invalidation_fence_signal(xe, fence);
}
+void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
+{
+ if (WARN_ON_ONCE(!fence->gt))
+ return;
+
+ __invalidation_fence_signal(gt_to_xe(fence->gt), fence);
+}
+
static void xe_gt_tlb_fence_timeout(struct work_struct *work)
{
struct xe_gt *gt = container_of(work, struct xe_gt,
@@ -63,6 +80,8 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
struct xe_device *xe = gt_to_xe(gt);
struct xe_gt_tlb_invalidation_fence *fence, *next;
+ LNL_FLUSH_WORK(&gt->uc.guc.ct.g2h_worker);
+
spin_lock_irq(&gt->tlb_invalidation.pending_lock);
list_for_each_entry_safe(fence, next,
&gt->tlb_invalidation.pending_fences, link) {
@@ -87,15 +106,15 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
}
/**
- * xe_gt_tlb_invalidation_init - Initialize GT TLB invalidation state
- * @gt: graphics tile
+ * xe_gt_tlb_invalidation_init_early - Initialize GT TLB invalidation state
+ * @gt: GT structure
*
* Initialize GT TLB invalidation state, purely software initialization, should
* be called once during driver load.
*
* Return: 0 on success, negative error code on error.
*/
-int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
+int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt)
{
gt->tlb_invalidation.seqno = 1;
INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
@@ -109,7 +128,7 @@ int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
/**
* xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
- * @gt: graphics tile
+ * @gt: GT structure
*
* Signal any pending invalidation fences, should be called during a GT reset
*/
@@ -204,7 +223,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
tlb_timeout_jiffies(gt));
}
spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
- } else if (ret < 0) {
+ } else {
__invalidation_fence_signal(xe, fence);
}
if (!ret) {
@@ -225,7 +244,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
/**
* xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
- * @gt: graphics tile
+ * @gt: GT structure
* @fence: invalidation fence which will be signal on TLB invalidation
* completion
*
@@ -242,14 +261,23 @@ static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
0, /* seqno, replaced in send_tlb_invalidation */
MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
};
+ int ret;
+
+ ret = send_tlb_invalidation(&gt->uc.guc, fence, action,
+ ARRAY_SIZE(action));
+ /*
+ * -ECANCELED indicates the CT is stopped for a GT reset. TLB caches
+ * should be nuked on a GT reset so this error can be ignored.
+ */
+ if (ret == -ECANCELED)
+ return 0;
- return send_tlb_invalidation(&gt->uc.guc, fence, action,
- ARRAY_SIZE(action));
+ return ret;
}
/**
* xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
- * @gt: graphics tile
+ * @gt: GT structure
*
* Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
* synchronous.
@@ -259,6 +287,7 @@ static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
+ unsigned int fw_ref;
if (xe_guc_ct_enabled(&gt->uc.guc.ct) &&
gt->uc.guc.submission_state.enabled) {
@@ -267,27 +296,27 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
ret = xe_gt_tlb_invalidation_guc(gt, &fence);
- if (ret < 0) {
- xe_gt_tlb_invalidation_fence_fini(&fence);
+ if (ret)
return ret;
- }
xe_gt_tlb_invalidation_fence_wait(&fence);
} else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
+ struct xe_mmio *mmio = &gt->mmio;
+
if (IS_SRIOV_VF(xe))
return 0;
- xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
- xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1,
+ xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1,
PVC_GUC_TLB_INV_DESC1_INVALIDATE);
- xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC0,
+ xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC0,
PVC_GUC_TLB_INV_DESC0_VALID);
} else {
- xe_mmio_write32(gt, GUC_TLB_INV_CR,
+ xe_mmio_write32(mmio, GUC_TLB_INV_CR,
GUC_TLB_INV_CR_INVALIDATE);
}
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
return 0;
@@ -297,7 +326,7 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
* xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
* address range
*
- * @gt: graphics tile
+ * @gt: GT structure
* @fence: invalidation fence which will be signal on TLB invalidation
* completion
* @start: start address
@@ -383,7 +412,7 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
/**
* xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
- * @gt: graphics tile
+ * @gt: GT structure
* @fence: invalidation fence which will be signal on TLB invalidation
* completion, can be NULL
* @vma: VMA to invalidate
@@ -496,7 +525,8 @@ static const struct dma_fence_ops invalidation_fence_ops = {
* @stack: fence is stack variable
*
* Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
- * must be called if fence is not signaled.
+ * will be automatically called when fence is signalled (all fences must signal),
+ * even on error.
*/
void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
@@ -516,14 +546,3 @@ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
dma_fence_get(&fence->base);
fence->gt = gt;
}
-
-/**
- * xe_gt_tlb_invalidation_fence_fini - Finalize TLB invalidation fence
- * @fence: TLB invalidation fence to finalize
- *
- * Drop PM ref which fence took durinig init.
- */
-void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
-{
- xe_pm_runtime_put(gt_to_xe(fence->gt));
-}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index a84065fa324c..672acfcdf0d7 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -14,7 +14,8 @@ struct xe_gt;
struct xe_guc;
struct xe_vma;
-int xe_gt_tlb_invalidation_init(struct xe_gt *gt);
+int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
+
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
@@ -28,7 +29,7 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
bool stack);
-void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence);
+void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence);
static inline void
xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index 0662f71c6ede..df2042db7ee6 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -5,6 +5,7 @@
#include "xe_gt_topology.h"
+#include <generated/xe_wa_oob.h>
#include <linux/bitmap.h>
#include <linux/compiler.h>
@@ -12,6 +13,7 @@
#include "xe_assert.h"
#include "xe_gt.h"
#include "xe_mmio.h"
+#include "xe_wa.h"
static void
load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
@@ -25,7 +27,7 @@ load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
va_start(argp, numregs);
for (i = 0; i < numregs; i++)
- fuse_val[i] = xe_mmio_read32(gt, va_arg(argp, struct xe_reg));
+ fuse_val[i] = xe_mmio_read32(&gt->mmio, va_arg(argp, struct xe_reg));
va_end(argp);
bitmap_from_arr32(mask, fuse_val, numregs * 32);
@@ -35,7 +37,7 @@ static void
load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask, enum xe_gt_eu_type *eu_type)
{
struct xe_device *xe = gt_to_xe(gt);
- u32 reg_val = xe_mmio_read32(gt, XELP_EU_ENABLE);
+ u32 reg_val = xe_mmio_read32(&gt->mmio, XELP_EU_ENABLE);
u32 val = 0;
int i;
@@ -127,7 +129,19 @@ static void
load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
{
struct xe_device *xe = gt_to_xe(gt);
- u32 fuse3 = xe_mmio_read32(gt, MIRROR_FUSE3);
+ u32 fuse3 = xe_mmio_read32(&gt->mmio, MIRROR_FUSE3);
+
+ /*
+ * PTL platforms with media version 30.00 do not provide proper values
+ * for the media GT's L3 bank registers. Skip the readout since we
+ * don't have any way to obtain real values.
+ *
+ * This may get re-described as an official workaround in the future,
+ * but there's no tracking number assigned yet so we use a custom
+ * OOB workaround descriptor.
+ */
+ if (XE_WA(gt, no_media_l3))
+ return;
if (GRAPHICS_VER(xe) >= 20) {
xe_l3_bank_mask_t per_node = {};
@@ -141,7 +155,7 @@ load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
xe_l3_bank_mask_t per_node = {};
xe_l3_bank_mask_t per_mask_bit = {};
u32 meml3_en = REG_FIELD_GET(MEML3_EN_MASK, fuse3);
- u32 fuse4 = xe_mmio_read32(gt, XEHP_FUSE4);
+ u32 fuse4 = xe_mmio_read32(&gt->mmio, XEHP_FUSE4);
u32 bank_val = REG_FIELD_GET(GT_L3_EXC_MASK, fuse4);
bitmap_set_value8(per_mask_bit, 0x3, 0);
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 3d1c51de0268..6e66bf0e8b3f 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -6,14 +6,15 @@
#ifndef _XE_GT_TYPES_H_
#define _XE_GT_TYPES_H_
+#include "xe_device_types.h"
#include "xe_force_wake_types.h"
#include "xe_gt_idle_types.h"
#include "xe_gt_sriov_pf_types.h"
#include "xe_gt_sriov_vf_types.h"
-#include "xe_gt_stats.h"
+#include "xe_gt_stats_types.h"
#include "xe_hw_engine_types.h"
#include "xe_hw_fence_types.h"
-#include "xe_oa.h"
+#include "xe_oa_types.h"
#include "xe_reg_sr_types.h"
#include "xe_sa_types.h"
#include "xe_uc_types.h"
@@ -145,19 +146,20 @@ struct xe_gt {
/**
* @mmio: mmio info for GT. All GTs within a tile share the same
* register space, but have their own copy of GSI registers at a
- * specific offset, as well as their own forcewake handling.
+ * specific offset.
+ */
+ struct xe_mmio mmio;
+
+ /**
+ * @pm: power management info for GT. The driver uses the GT's
+ * "force wake" interface to wake up specific parts of the GT hardware
+ * from C6 sleep states and ensure the hardware remains awake while it
+ * is being actively used.
*/
struct {
- /** @mmio.fw: force wake for GT */
+ /** @pm.fw: force wake for GT */
struct xe_force_wake fw;
- /**
- * @mmio.adj_limit: adjust MMIO address if address is below this
- * value
- */
- u32 adj_limit;
- /** @mmio.adj_offset: offect to add to MMIO address when adjusting */
- u32 adj_offset;
- } mmio;
+ } pm;
/** @sriov: virtualization data related to GT */
union {
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index 52df28032a6f..408365dfe4ee 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -14,6 +14,7 @@
#include "regs/xe_gt_regs.h"
#include "regs/xe_gtt_defs.h"
#include "regs/xe_guc_regs.h"
+#include "regs/xe_irq_regs.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_force_wake.h"
@@ -22,6 +23,7 @@
#include "xe_gt_sriov_vf.h"
#include "xe_gt_throttle.h"
#include "xe_guc_ads.h"
+#include "xe_guc_capture.h"
#include "xe_guc_ct.h"
#include "xe_guc_db_mgr.h"
#include "xe_guc_hwconfig.h"
@@ -42,7 +44,15 @@ static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
struct xe_bo *bo)
{
struct xe_device *xe = guc_to_xe(guc);
- u32 addr = xe_bo_ggtt_addr(bo);
+ u32 addr;
+
+ /*
+ * For most BOs, the address on the allocating tile is fine. However for
+ * some, e.g. G2G CTB, the address on a specific tile is required as it
+ * might be different for each tile. So, just always ask for the address
+ * on the target GuC.
+ */
+ addr = __xe_bo_ggtt_addr(bo, gt_to_tile(guc_to_gt(guc))->id);
/* GuC addresses above GUC_GGTT_TOP don't map through the GTT */
xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
@@ -68,7 +78,7 @@ static u32 guc_ctl_debug_flags(struct xe_guc *guc)
static u32 guc_ctl_feature_flags(struct xe_guc *guc)
{
- u32 flags = 0;
+ u32 flags = GUC_CTL_ENABLE_LITE_RESTORE;
if (!guc_to_xe(guc)->info.skip_guc_pc)
flags |= GUC_CTL_ENABLE_SLPC;
@@ -137,6 +147,34 @@ static u32 guc_ctl_ads_flags(struct xe_guc *guc)
return flags;
}
+static bool needs_wa_dual_queue(struct xe_gt *gt)
+{
+ /*
+ * The DUAL_QUEUE_WA tells the GuC to not allow concurrent submissions
+ * on RCS and CCSes with different address spaces, which on DG2 is
+ * required as a WA for an HW bug.
+ */
+ if (XE_WA(gt, 22011391025))
+ return true;
+
+ /*
+ * On newer platforms, the HW has been updated to not allow parallel
+ * execution of different address spaces, so the RCS/CCS will stall the
+ * context switch if one of the other RCS/CCSes is busy with a different
+ * address space. While functionally correct, having a submission
+ * stalled on the HW limits the GuC ability to shuffle things around and
+ * can cause complications if the non-stalled submission runs for a long
+ * time, because the GuC doesn't know that the stalled submission isn't
+ * actually running and might declare it as hung. Therefore, we enable
+ * the DUAL_QUEUE_WA on all newer platforms on GTs that have CCS engines
+ * to move management back to the GuC.
+ */
+ if (CCS_MASK(gt) && GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270)
+ return true;
+
+ return false;
+}
+
static u32 guc_ctl_wa_flags(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
@@ -149,7 +187,7 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc)
if (XE_WA(gt, 14014475959))
flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
- if (XE_WA(gt, 22011391025))
+ if (needs_wa_dual_queue(gt))
flags |= GUC_WA_DUAL_QUEUE;
/*
@@ -236,20 +274,310 @@ static void guc_write_params(struct xe_guc *guc)
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
- xe_mmio_write32(gt, SOFT_SCRATCH(0), 0);
+ xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(0), 0);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
- xe_mmio_write32(gt, SOFT_SCRATCH(1 + i), guc->params[i]);
+ xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(1 + i), guc->params[i]);
+}
+
+static int guc_action_register_g2g_buffer(struct xe_guc *guc, u32 type, u32 dst_tile, u32 dst_dev,
+ u32 desc_addr, u32 buff_addr, u32 size)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 action[] = {
+ XE_GUC_ACTION_REGISTER_G2G,
+ FIELD_PREP(XE_G2G_REGISTER_SIZE, size / SZ_4K - 1) |
+ FIELD_PREP(XE_G2G_REGISTER_TYPE, type) |
+ FIELD_PREP(XE_G2G_REGISTER_TILE, dst_tile) |
+ FIELD_PREP(XE_G2G_REGISTER_DEVICE, dst_dev),
+ desc_addr,
+ buff_addr,
+ };
+
+ xe_assert(xe, (type == XE_G2G_TYPE_IN) || (type == XE_G2G_TYPE_OUT));
+ xe_assert(xe, !(size % SZ_4K));
+
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+static int guc_action_deregister_g2g_buffer(struct xe_guc *guc, u32 type, u32 dst_tile, u32 dst_dev)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 action[] = {
+ XE_GUC_ACTION_DEREGISTER_G2G,
+ FIELD_PREP(XE_G2G_DEREGISTER_TYPE, type) |
+ FIELD_PREP(XE_G2G_DEREGISTER_TILE, dst_tile) |
+ FIELD_PREP(XE_G2G_DEREGISTER_DEVICE, dst_dev),
+ };
+
+ xe_assert(xe, (type == XE_G2G_TYPE_IN) || (type == XE_G2G_TYPE_OUT));
+
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+#define G2G_DEV(gt) (((gt)->info.type == XE_GT_TYPE_MAIN) ? 0 : 1)
+
+#define G2G_BUFFER_SIZE (SZ_4K)
+#define G2G_DESC_SIZE (64)
+#define G2G_DESC_AREA_SIZE (SZ_4K)
+
+/*
+ * Generate a unique id for each bi-directional CTB for each pair of
+ * near and far tiles/devices. The id can then be used as an index into
+ * a single allocation that is sub-divided into multiple CTBs.
+ *
+ * For example, with two devices per tile and two tiles, the table should
+ * look like:
+ * Far <tile>.<dev>
+ * 0.0 0.1 1.0 1.1
+ * N 0.0 --/-- 00/01 02/03 04/05
+ * e 0.1 01/00 --/-- 06/07 08/09
+ * a 1.0 03/02 07/06 --/-- 10/11
+ * r 1.1 05/04 09/08 11/10 --/--
+ *
+ * Where each entry is Rx/Tx channel id.
+ *
+ * So GuC #3 (tile 1, dev 1) talking to GuC #2 (tile 1, dev 0) would
+ * be reading from channel #11 and writing to channel #10. Whereas,
+ * GuC #2 talking to GuC #3 would be read on #10 and write to #11.
+ */
+static unsigned int g2g_slot(u32 near_tile, u32 near_dev, u32 far_tile, u32 far_dev,
+ u32 type, u32 max_inst, bool have_dev)
+{
+ u32 near = near_tile, far = far_tile;
+ u32 idx = 0, x, y, direction;
+ int i;
+
+ if (have_dev) {
+ near = (near << 1) | near_dev;
+ far = (far << 1) | far_dev;
+ }
+
+ /* No need to send to one's self */
+ if (far == near)
+ return -1;
+
+ if (far > near) {
+ /* Top right table half */
+ x = far;
+ y = near;
+
+ /* T/R is 'forwards' direction */
+ direction = type;
+ } else {
+ /* Bottom left table half */
+ x = near;
+ y = far;
+
+ /* B/L is 'backwards' direction */
+ direction = (1 - type);
+ }
+
+ /* Count the rows prior to the target */
+ for (i = y; i > 0; i--)
+ idx += max_inst - i;
+
+ /* Count this row up to the target */
+ idx += (x - 1 - y);
+
+ /* Slots are in Rx/Tx pairs */
+ idx *= 2;
+
+ /* Pick Rx/Tx direction */
+ idx += direction;
+
+ return idx;
+}
+
+static int guc_g2g_register(struct xe_guc *near_guc, struct xe_gt *far_gt, u32 type, bool have_dev)
+{
+ struct xe_gt *near_gt = guc_to_gt(near_guc);
+ struct xe_device *xe = gt_to_xe(near_gt);
+ struct xe_bo *g2g_bo;
+ u32 near_tile = gt_to_tile(near_gt)->id;
+ u32 near_dev = G2G_DEV(near_gt);
+ u32 far_tile = gt_to_tile(far_gt)->id;
+ u32 far_dev = G2G_DEV(far_gt);
+ u32 max = xe->info.gt_count;
+ u32 base, desc, buf;
+ int slot;
+
+ /* G2G is not allowed between different cards */
+ xe_assert(xe, xe == gt_to_xe(far_gt));
+
+ g2g_bo = near_guc->g2g.bo;
+ xe_assert(xe, g2g_bo);
+
+ slot = g2g_slot(near_tile, near_dev, far_tile, far_dev, type, max, have_dev);
+ xe_assert(xe, slot >= 0);
+
+ base = guc_bo_ggtt_addr(near_guc, g2g_bo);
+ desc = base + slot * G2G_DESC_SIZE;
+ buf = base + G2G_DESC_AREA_SIZE + slot * G2G_BUFFER_SIZE;
+
+ xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE);
+ xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= g2g_bo->size);
+
+ return guc_action_register_g2g_buffer(near_guc, type, far_tile, far_dev,
+ desc, buf, G2G_BUFFER_SIZE);
+}
+
+static void guc_g2g_deregister(struct xe_guc *guc, u32 far_tile, u32 far_dev, u32 type)
+{
+ guc_action_deregister_g2g_buffer(guc, type, far_tile, far_dev);
+}
+
+static u32 guc_g2g_size(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int count = xe->info.gt_count;
+ u32 num_channels = (count * (count - 1)) / 2;
+
+ xe_assert(xe, num_channels * XE_G2G_TYPE_LIMIT * G2G_DESC_SIZE <= G2G_DESC_AREA_SIZE);
+
+ return num_channels * XE_G2G_TYPE_LIMIT * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE;
+}
+
+static bool xe_guc_g2g_wanted(struct xe_device *xe)
+{
+ /* Can't do GuC to GuC communication if there is only one GuC */
+ if (xe->info.gt_count <= 1)
+ return false;
+
+ /* No current user */
+ return false;
+}
+
+static int guc_g2g_alloc(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_bo *bo;
+ u32 g2g_size;
+
+ if (guc->g2g.bo)
+ return 0;
+
+ if (gt->info.id != 0) {
+ struct xe_gt *root_gt = xe_device_get_gt(xe, 0);
+ struct xe_guc *root_guc = &root_gt->uc.guc;
+ struct xe_bo *bo;
+
+ bo = xe_bo_get(root_guc->g2g.bo);
+ if (!bo)
+ return -ENODEV;
+
+ guc->g2g.bo = bo;
+ guc->g2g.owned = false;
+ return 0;
+ }
+
+ g2g_size = guc_g2g_size(guc);
+ bo = xe_managed_bo_create_pin_map(xe, tile, g2g_size,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_ALL |
+ XE_BO_FLAG_GGTT_INVALIDATE);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size);
+ guc->g2g.bo = bo;
+ guc->g2g.owned = true;
+
+ return 0;
+}
+
+static void guc_g2g_fini(struct xe_guc *guc)
+{
+ if (!guc->g2g.bo)
+ return;
+
+ /* Unpinning the owned object is handled by generic shutdown */
+ if (!guc->g2g.owned)
+ xe_bo_put(guc->g2g.bo);
+
+ guc->g2g.bo = NULL;
+}
+
+static int guc_g2g_start(struct xe_guc *guc)
+{
+ struct xe_gt *far_gt, *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int i, j;
+ int t, err;
+ bool have_dev;
+
+ if (!guc->g2g.bo) {
+ int ret;
+
+ ret = guc_g2g_alloc(guc);
+ if (ret)
+ return ret;
+ }
+
+ /* GuC interface will need extending if more GT device types are ever created. */
+ xe_gt_assert(gt, (gt->info.type == XE_GT_TYPE_MAIN) || (gt->info.type == XE_GT_TYPE_MEDIA));
+
+ /* Channel numbering depends on whether there are multiple GTs per tile */
+ have_dev = xe->info.gt_count > xe->info.tile_count;
+
+ for_each_gt(far_gt, xe, i) {
+ u32 far_tile, far_dev;
+
+ if (far_gt->info.id == gt->info.id)
+ continue;
+
+ far_tile = gt_to_tile(far_gt)->id;
+ far_dev = G2G_DEV(far_gt);
+
+ for (t = 0; t < XE_G2G_TYPE_LIMIT; t++) {
+ err = guc_g2g_register(guc, far_gt, t, have_dev);
+ if (err) {
+ while (--t >= 0)
+ guc_g2g_deregister(guc, far_tile, far_dev, t);
+ goto err_deregister;
+ }
+ }
+ }
+
+ return 0;
+
+err_deregister:
+ for_each_gt(far_gt, xe, j) {
+ u32 tile, dev;
+
+ if (far_gt->info.id == gt->info.id)
+ continue;
+
+ if (j >= i)
+ break;
+
+ tile = gt_to_tile(far_gt)->id;
+ dev = G2G_DEV(far_gt);
+
+ for (t = 0; t < XE_G2G_TYPE_LIMIT; t++)
+ guc_g2g_deregister(guc, tile, dev, t);
+ }
+
+ return err;
}
static void guc_fini_hw(void *arg)
{
struct xe_guc *guc = arg;
struct xe_gt *gt = guc_to_gt(guc);
+ unsigned int fw_ref;
- xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
xe_uc_fini_hw(&guc_to_gt(guc)->uc);
- xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
+ guc_g2g_fini(guc);
}
/**
@@ -338,6 +666,10 @@ int xe_guc_init(struct xe_guc *guc)
if (ret)
goto out;
+ ret = xe_guc_capture_init(guc);
+ if (ret)
+ goto out;
+
ret = xe_guc_ads_init(&guc->ads);
if (ret)
goto out;
@@ -416,7 +748,16 @@ int xe_guc_init_post_hwconfig(struct xe_guc *guc)
int xe_guc_post_load_init(struct xe_guc *guc)
{
+ int ret;
+
xe_guc_ads_populate_post_load(&guc->ads);
+
+ if (xe_guc_g2g_wanted(guc_to_xe(guc))) {
+ ret = guc_g2g_start(guc);
+ if (ret)
+ return ret;
+ }
+
guc->submission_state.enabled = true;
return 0;
@@ -425,6 +766,7 @@ int xe_guc_post_load_init(struct xe_guc *guc)
int xe_guc_reset(struct xe_guc *guc)
{
struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_mmio *mmio = &gt->mmio;
u32 guc_status, gdrst;
int ret;
@@ -433,15 +775,15 @@ int xe_guc_reset(struct xe_guc *guc)
if (IS_SRIOV_VF(gt_to_xe(gt)))
return xe_gt_sriov_vf_bootstrap(gt);
- xe_mmio_write32(gt, GDRST, GRDOM_GUC);
+ xe_mmio_write32(mmio, GDRST, GRDOM_GUC);
- ret = xe_mmio_wait32(gt, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false);
+ ret = xe_mmio_wait32(mmio, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false);
if (ret) {
xe_gt_err(gt, "GuC reset timed out, GDRST=%#x\n", gdrst);
goto err_out;
}
- guc_status = xe_mmio_read32(gt, GUC_STATUS);
+ guc_status = xe_mmio_read32(mmio, GUC_STATUS);
if (!(guc_status & GS_MIA_IN_RESET)) {
xe_gt_err(gt, "GuC status: %#x, MIA core expected to be in reset\n",
guc_status);
@@ -459,6 +801,7 @@ err_out:
static void guc_prepare_xfer(struct xe_guc *guc)
{
struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_mmio *mmio = &gt->mmio;
struct xe_device *xe = guc_to_xe(guc);
u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC |
GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
@@ -473,12 +816,12 @@ static void guc_prepare_xfer(struct xe_guc *guc)
shim_flags |= REG_FIELD_PREP(GUC_MOCS_INDEX_MASK, gt->mocs.uc_index);
/* Must program this register before loading the ucode with DMA */
- xe_mmio_write32(gt, GUC_SHIM_CONTROL, shim_flags);
+ xe_mmio_write32(mmio, GUC_SHIM_CONTROL, shim_flags);
- xe_mmio_write32(gt, GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+ xe_mmio_write32(mmio, GT_PM_CONFIG, GT_DOORBELL_ENABLE);
/* Make sure GuC receives ARAT interrupts */
- xe_mmio_rmw32(gt, PMINTRMSK, ARAT_EXPIRED_INTRMSK, 0);
+ xe_mmio_rmw32(mmio, PMINTRMSK, ARAT_EXPIRED_INTRMSK, 0);
}
/*
@@ -494,7 +837,7 @@ static int guc_xfer_rsa(struct xe_guc *guc)
if (guc->fw.rsa_size > 256) {
u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) +
xe_uc_fw_rsa_offset(&guc->fw);
- xe_mmio_write32(gt, UOS_RSA_SCRATCH(0), rsa_ggtt_addr);
+ xe_mmio_write32(&gt->mmio, UOS_RSA_SCRATCH(0), rsa_ggtt_addr);
return 0;
}
@@ -503,7 +846,7 @@ static int guc_xfer_rsa(struct xe_guc *guc)
return -ENOMEM;
for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
- xe_mmio_write32(gt, UOS_RSA_SCRATCH(i), rsa[i]);
+ xe_mmio_write32(&gt->mmio, UOS_RSA_SCRATCH(i), rsa[i]);
return 0;
}
@@ -583,7 +926,7 @@ static s32 guc_pc_get_cur_freq(struct xe_guc_pc *guc_pc)
* extreme thermal throttling. And a system that is that hot during boot is probably
* dead anyway!
*/
-#if defined(CONFIG_DRM_XE_DEBUG)
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
#define GUC_LOAD_RETRY_LIMIT 20
#else
#define GUC_LOAD_RETRY_LIMIT 3
@@ -593,6 +936,7 @@ static s32 guc_pc_get_cur_freq(struct xe_guc_pc *guc_pc)
static void guc_wait_ucode(struct xe_guc *guc)
{
struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_mmio *mmio = &gt->mmio;
struct xe_guc_pc *guc_pc = &gt->uc.guc.pc;
ktime_t before, after, delta;
int load_done;
@@ -619,7 +963,7 @@ static void guc_wait_ucode(struct xe_guc *guc)
* timeouts rather than allowing a huge timeout each time. So basically, need
* to treat a timeout no different to a value change.
*/
- ret = xe_mmio_wait32_not(gt, GUC_STATUS, GS_UKERNEL_MASK | GS_BOOTROM_MASK,
+ ret = xe_mmio_wait32_not(mmio, GUC_STATUS, GS_UKERNEL_MASK | GS_BOOTROM_MASK,
last_status, 1000 * 1000, &status, false);
if (ret < 0)
count++;
@@ -657,7 +1001,7 @@ static void guc_wait_ucode(struct xe_guc *guc)
switch (bootrom) {
case XE_BOOTROM_STATUS_NO_KEY_FOUND:
xe_gt_err(gt, "invalid key requested, header = 0x%08X\n",
- xe_mmio_read32(gt, GUC_HEADER_INFO));
+ xe_mmio_read32(mmio, GUC_HEADER_INFO));
break;
case XE_BOOTROM_STATUS_RSA_FAILED:
@@ -672,7 +1016,7 @@ static void guc_wait_ucode(struct xe_guc *guc)
switch (ukernel) {
case XE_GUC_LOAD_STATUS_EXCEPTION:
xe_gt_err(gt, "firmware exception. EIP: %#x\n",
- xe_mmio_read32(gt, SOFT_SCRATCH(13)));
+ xe_mmio_read32(mmio, SOFT_SCRATCH(13)));
break;
case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
@@ -824,10 +1168,10 @@ static void guc_handle_mmio_msg(struct xe_guc *guc)
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
- msg = xe_mmio_read32(gt, SOFT_SCRATCH(15));
+ msg = xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(15));
msg &= XE_GUC_RECV_MSG_EXCEPTION |
XE_GUC_RECV_MSG_CRASH_DUMP_POSTED;
- xe_mmio_write32(gt, SOFT_SCRATCH(15), 0);
+ xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(15), 0);
if (msg & XE_GUC_RECV_MSG_CRASH_DUMP_POSTED)
xe_gt_err(gt, "Received early GuC crash dump notification!\n");
@@ -844,14 +1188,14 @@ static void guc_enable_irq(struct xe_guc *guc)
REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
/* Primary GuC and media GuC share a single enable bit */
- xe_mmio_write32(gt, GUC_SG_INTR_ENABLE,
+ xe_mmio_write32(&gt->mmio, GUC_SG_INTR_ENABLE,
REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST));
/*
* There are separate mask bits for primary and media GuCs, so use
* a RMW operation to avoid clobbering the other GuC's setting.
*/
- xe_mmio_rmw32(gt, GUC_SG_INTR_MASK, events, 0);
+ xe_mmio_rmw32(&gt->mmio, GUC_SG_INTR_MASK, events, 0);
}
int xe_guc_enable_communication(struct xe_guc *guc)
@@ -863,7 +1207,7 @@ int xe_guc_enable_communication(struct xe_guc *guc)
struct xe_gt *gt = guc_to_gt(guc);
struct xe_tile *tile = gt_to_tile(gt);
- err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc);
+ err = xe_memirq_init_guc(&tile->memirq, guc);
if (err)
return err;
} else {
@@ -907,7 +1251,7 @@ void xe_guc_notify(struct xe_guc *guc)
* additional payload data to the GuC but this capability is not
* used by the firmware yet. Use default value in the meantime.
*/
- xe_mmio_write32(gt, guc->notify_reg, default_notify_data);
+ xe_mmio_write32(&gt->mmio, guc->notify_reg, default_notify_data);
}
int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr)
@@ -925,6 +1269,7 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_mmio *mmio = &gt->mmio;
u32 header, reply;
struct xe_reg reply_reg = xe_gt_is_media_type(gt) ?
MED_VF_SW_FLAG(0) : VF_SW_FLAG(0);
@@ -934,7 +1279,6 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT);
- xe_assert(xe, !xe_guc_ct_enabled(&guc->ct));
xe_assert(xe, len);
xe_assert(xe, len <= VF_SW_FLAG_COUNT);
xe_assert(xe, len <= MED_VF_SW_FLAG_COUNT);
@@ -947,19 +1291,19 @@ retry:
/* Not in critical data-path, just do if else for GT type */
if (xe_gt_is_media_type(gt)) {
for (i = 0; i < len; ++i)
- xe_mmio_write32(gt, MED_VF_SW_FLAG(i),
+ xe_mmio_write32(mmio, MED_VF_SW_FLAG(i),
request[i]);
- xe_mmio_read32(gt, MED_VF_SW_FLAG(LAST_INDEX));
+ xe_mmio_read32(mmio, MED_VF_SW_FLAG(LAST_INDEX));
} else {
for (i = 0; i < len; ++i)
- xe_mmio_write32(gt, VF_SW_FLAG(i),
+ xe_mmio_write32(mmio, VF_SW_FLAG(i),
request[i]);
- xe_mmio_read32(gt, VF_SW_FLAG(LAST_INDEX));
+ xe_mmio_read32(mmio, VF_SW_FLAG(LAST_INDEX));
}
xe_guc_notify(guc);
- ret = xe_mmio_wait32(gt, reply_reg, GUC_HXG_MSG_0_ORIGIN,
+ ret = xe_mmio_wait32(mmio, reply_reg, GUC_HXG_MSG_0_ORIGIN,
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC),
50000, &reply, false);
if (ret) {
@@ -969,7 +1313,7 @@ timeout:
return ret;
}
- header = xe_mmio_read32(gt, reply_reg);
+ header = xe_mmio_read32(mmio, reply_reg);
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
/*
@@ -985,7 +1329,7 @@ timeout:
BUILD_BUG_ON(FIELD_MAX(GUC_HXG_MSG_0_TYPE) != GUC_HXG_TYPE_RESPONSE_SUCCESS);
BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1);
- ret = xe_mmio_wait32(gt, reply_reg, resp_mask, resp_mask,
+ ret = xe_mmio_wait32(mmio, reply_reg, resp_mask, resp_mask,
1000000, &header, false);
if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
@@ -1032,7 +1376,7 @@ proto:
for (i = 1; i < VF_SW_FLAG_COUNT; i++) {
reply_reg.addr += sizeof(u32);
- response_buf[i] = xe_mmio_read32(gt, reply_reg);
+ response_buf[i] = xe_mmio_read32(mmio, reply_reg);
}
}
@@ -1088,10 +1432,21 @@ int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val)
return guc_self_cfg(guc, key, 2, val);
}
+static void xe_guc_sw_0_irq_handler(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ xe_gt_sriov_vf_migrated_event_handler(gt);
+}
+
void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir)
{
if (iir & GUC_INTR_GUC2HOST)
xe_guc_ct_irq_handler(&guc->ct);
+
+ if (iir & GUC_INTR_SW_INT_0)
+ xe_guc_sw_0_irq_handler(guc);
}
void xe_guc_sanitize(struct xe_guc *guc)
@@ -1145,17 +1500,17 @@ int xe_guc_start(struct xe_guc *guc)
void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
{
struct xe_gt *gt = guc_to_gt(guc);
+ unsigned int fw_ref;
u32 status;
- int err;
int i;
xe_uc_fw_print(&guc->fw, p);
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
return;
- status = xe_mmio_read32(gt, GUC_STATUS);
+ status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
drm_printf(p, "\nGuC status 0x%08x:\n", status);
drm_printf(p, "\tBootrom status = 0x%x\n",
@@ -1170,12 +1525,15 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
drm_puts(p, "\nScratch registers:\n");
for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
drm_printf(p, "\t%2d: \t0x%x\n",
- i, xe_mmio_read32(gt, SOFT_SCRATCH(i)));
+ i, xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(i)));
}
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ drm_puts(p, "\n");
xe_guc_ct_print(&guc->ct, p, false);
+
+ drm_puts(p, "\n");
xe_guc_submit_print(guc, p);
}
diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h
index 42116b167c98..58338be44558 100644
--- a/drivers/gpu/drm/xe/xe_guc.h
+++ b/drivers/gpu/drm/xe/xe_guc.h
@@ -82,4 +82,9 @@ static inline struct xe_device *guc_to_xe(struct xe_guc *guc)
return gt_to_xe(guc_to_gt(guc));
}
+static inline struct drm_device *guc_to_drm(struct xe_guc *guc)
+{
+ return &guc_to_xe(guc)->drm;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index d1902a8581ca..fab259adc380 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -5,6 +5,8 @@
#include "xe_guc_ads.h"
+#include <linux/fault-inject.h>
+
#include <drm/drm_managed.h>
#include <generated/xe_wa_oob.h>
@@ -18,6 +20,7 @@
#include "xe_gt_ccs_mode.h"
#include "xe_gt_printk.h"
#include "xe_guc.h"
+#include "xe_guc_capture.h"
#include "xe_guc_ct.h"
#include "xe_hw_engine.h"
#include "xe_lrc.h"
@@ -26,6 +29,7 @@
#include "xe_platform_types.h"
#include "xe_uc_fw.h"
#include "xe_wa.h"
+#include "xe_gt_mcr.h"
/* Slack of a few additional entries per engine */
#define ADS_REGSET_EXTRA_MAX 8
@@ -149,8 +153,7 @@ static u32 guc_ads_waklv_size(struct xe_guc_ads *ads)
static size_t guc_ads_capture_size(struct xe_guc_ads *ads)
{
- /* FIXME: Allocate a proper capture list */
- return PAGE_ALIGN(PAGE_SIZE);
+ return PAGE_ALIGN(ads->capture_size);
}
static size_t guc_ads_um_queues_size(struct xe_guc_ads *ads)
@@ -229,11 +232,6 @@ static size_t guc_ads_size(struct xe_guc_ads *ads)
guc_ads_private_data_size(ads);
}
-static bool needs_wa_1607983814(struct xe_device *xe)
-{
- return GRAPHICS_VERx100(xe) < 1250;
-}
-
static size_t calculate_regset_size(struct xe_gt *gt)
{
struct xe_reg_sr_entry *sr_entry;
@@ -248,7 +246,7 @@ static size_t calculate_regset_size(struct xe_gt *gt)
count += ADS_REGSET_EXTRA_MAX * XE_NUM_HW_ENGINES;
- if (needs_wa_1607983814(gt_to_xe(gt)))
+ if (XE_WA(gt, 1607983814))
count += LNCFCMOCS_REG_COUNT;
return count * sizeof(struct guc_mmio_reg);
@@ -357,6 +355,11 @@ static void guc_waklv_init(struct xe_guc_ads *ads)
GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE,
&offset, &remain);
+ if (XE_WA(gt, 14022866841))
+ guc_waklv_enable_simple(ads,
+ GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO,
+ &offset, &remain);
+
/*
* On RC6 exit, GuC will write register 0xB04 with the default value provided. As of now,
* the default value for this register is determined to be 0xC40. This could change in the
@@ -404,6 +407,7 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
struct xe_bo *bo;
ads->golden_lrc_size = calculate_golden_lrc_size(ads);
+ ads->capture_size = xe_guc_capture_ads_input_worst_size(ads_to_guc(ads));
ads->regset_size = calculate_regset_size(gt);
ads->ads_waklv_size = calculate_waklv_size(ads);
@@ -418,14 +422,15 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
return 0;
}
+ALLOW_ERROR_INJECTION(xe_guc_ads_init, ERRNO); /* See xe_pci_probe() */
/**
* xe_guc_ads_init_post_hwconfig - initialize ADS post hwconfig load
* @ads: Additional data structures object
*
- * Recalcuate golden_lrc_size & regset_size as the number hardware engines may
- * have changed after the hwconfig was loaded. Also verify the new sizes fit in
- * the already allocated ADS buffer object.
+ * Recalculate golden_lrc_size, capture_size and regset_size as the number
+ * hardware engines may have changed after the hwconfig was loaded. Also verify
+ * the new sizes fit in the already allocated ADS buffer object.
*
* Return: 0 on success, negative error code on error.
*/
@@ -437,6 +442,8 @@ int xe_guc_ads_init_post_hwconfig(struct xe_guc_ads *ads)
xe_gt_assert(gt, ads->bo);
ads->golden_lrc_size = calculate_golden_lrc_size(ads);
+ /* Calculate Capture size with worst size */
+ ads->capture_size = xe_guc_capture_ads_input_worst_size(ads_to_guc(ads));
ads->regset_size = calculate_regset_size(gt);
xe_gt_assert(gt, ads->golden_lrc_size +
@@ -536,20 +543,148 @@ static void guc_mapping_table_init(struct xe_gt *gt,
}
}
-static void guc_capture_list_init(struct xe_guc_ads *ads)
+static u32 guc_get_capture_engine_mask(struct xe_gt *gt, struct iosys_map *info_map,
+ enum guc_capture_list_class_type capture_class)
{
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 mask;
+
+ switch (capture_class) {
+ case GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE:
+ mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_RENDER_CLASS]);
+ mask |= info_map_read(xe, info_map, engine_enabled_masks[GUC_COMPUTE_CLASS]);
+ break;
+ case GUC_CAPTURE_LIST_CLASS_VIDEO:
+ mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_VIDEO_CLASS]);
+ break;
+ case GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE:
+ mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_VIDEOENHANCE_CLASS]);
+ break;
+ case GUC_CAPTURE_LIST_CLASS_BLITTER:
+ mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_BLITTER_CLASS]);
+ break;
+ case GUC_CAPTURE_LIST_CLASS_GSC_OTHER:
+ mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS]);
+ break;
+ default:
+ mask = 0;
+ }
+
+ return mask;
+}
+
+static inline bool get_capture_list(struct xe_guc_ads *ads, struct xe_guc *guc, struct xe_gt *gt,
+ int owner, int type, int class, u32 *total_size, size_t *size,
+ void **pptr)
+{
+ *size = 0;
+
+ if (!xe_guc_capture_getlistsize(guc, owner, type, class, size)) {
+ if (*total_size + *size > ads->capture_size)
+ xe_gt_dbg(gt, "Capture size overflow :%zu vs %d\n",
+ *total_size + *size, ads->capture_size);
+ else if (!xe_guc_capture_getlist(guc, owner, type, class, pptr))
+ return false;
+ }
+
+ return true;
+}
+
+static int guc_capture_prep_lists(struct xe_guc_ads *ads)
+{
+ struct xe_guc *guc = ads_to_guc(ads);
+ struct xe_gt *gt = ads_to_gt(ads);
+ u32 ads_ggtt, capture_offset, null_ggtt, total_size = 0;
+ struct iosys_map info_map;
+ size_t size = 0;
+ void *ptr;
int i, j;
- u32 addr = xe_bo_ggtt_addr(ads->bo) + guc_ads_capture_offset(ads);
- /* FIXME: Populate a proper capture list */
+ /*
+ * GuC Capture's steered reg-list needs to be allocated and initialized
+ * after the GuC-hwconfig is available which guaranteed from here.
+ */
+ xe_guc_capture_steered_list_init(ads_to_guc(ads));
+
+ capture_offset = guc_ads_capture_offset(ads);
+ ads_ggtt = xe_bo_ggtt_addr(ads->bo);
+ info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
+ offsetof(struct __guc_ads_blob, system_info));
+
+ /* first, set aside the first page for a capture_list with zero descriptors */
+ total_size = PAGE_SIZE;
+ if (!xe_guc_capture_getnullheader(guc, &ptr, &size))
+ xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, ptr, size);
+
+ null_ggtt = ads_ggtt + capture_offset;
+ capture_offset += PAGE_SIZE;
+
+ /*
+ * Populate capture list : at this point adps is already allocated and
+ * mapped to worst case size
+ */
for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) {
- for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) {
- ads_blob_write(ads, ads.capture_instance[i][j], addr);
- ads_blob_write(ads, ads.capture_class[i][j], addr);
+ bool write_empty_list;
+
+ for (j = 0; j < GUC_CAPTURE_LIST_CLASS_MAX; j++) {
+ u32 engine_mask = guc_get_capture_engine_mask(gt, &info_map, j);
+ /* null list if we dont have said engine or list */
+ if (!engine_mask) {
+ ads_blob_write(ads, ads.capture_class[i][j], null_ggtt);
+ ads_blob_write(ads, ads.capture_instance[i][j], null_ggtt);
+ continue;
+ }
+
+ /* engine exists: start with engine-class registers */
+ write_empty_list = get_capture_list(ads, guc, gt, i,
+ GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS,
+ j, &total_size, &size, &ptr);
+ if (!write_empty_list) {
+ ads_blob_write(ads, ads.capture_class[i][j],
+ ads_ggtt + capture_offset);
+ xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset,
+ ptr, size);
+ total_size += size;
+ capture_offset += size;
+ } else {
+ ads_blob_write(ads, ads.capture_class[i][j], null_ggtt);
+ }
+
+ /* engine exists: next, engine-instance registers */
+ write_empty_list = get_capture_list(ads, guc, gt, i,
+ GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE,
+ j, &total_size, &size, &ptr);
+ if (!write_empty_list) {
+ ads_blob_write(ads, ads.capture_instance[i][j],
+ ads_ggtt + capture_offset);
+ xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset,
+ ptr, size);
+ total_size += size;
+ capture_offset += size;
+ } else {
+ ads_blob_write(ads, ads.capture_instance[i][j], null_ggtt);
+ }
}
- ads_blob_write(ads, ads.capture_global[i], addr);
+ /* global registers is last in our PF/VF loops */
+ write_empty_list = get_capture_list(ads, guc, gt, i,
+ GUC_STATE_CAPTURE_TYPE_GLOBAL,
+ 0, &total_size, &size, &ptr);
+ if (!write_empty_list) {
+ ads_blob_write(ads, ads.capture_global[i], ads_ggtt + capture_offset);
+ xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, ptr,
+ size);
+ total_size += size;
+ capture_offset += size;
+ } else {
+ ads_blob_write(ads, ads.capture_global[i], null_ggtt);
+ }
}
+
+ if (ads->capture_size != PAGE_ALIGN(total_size))
+ xe_gt_dbg(gt, "ADS capture alloc size changed from %d to %d\n",
+ ads->capture_size, PAGE_ALIGN(total_size));
+ return PAGE_ALIGN(total_size);
}
static void guc_mmio_regset_write_one(struct xe_guc_ads *ads,
@@ -562,6 +697,20 @@ static void guc_mmio_regset_write_one(struct xe_guc_ads *ads,
.flags = reg.masked ? GUC_REGSET_MASKED : 0,
};
+ if (reg.mcr) {
+ struct xe_reg_mcr mcr_reg = XE_REG_MCR(reg.addr);
+ u8 group, instance;
+
+ bool steer = xe_gt_mcr_get_nonterminated_steering(ads_to_gt(ads), mcr_reg,
+ &group, &instance);
+
+ if (steer) {
+ entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, group);
+ entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, instance);
+ entry.flags |= GUC_REGSET_STEERING_NEEDED;
+ }
+ }
+
xe_map_memcpy_to(ads_to_xe(ads), regset_map, n_entry * sizeof(entry),
&entry, sizeof(entry));
}
@@ -570,7 +719,6 @@ static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads,
struct iosys_map *regset_map,
struct xe_hw_engine *hwe)
{
- struct xe_device *xe = ads_to_xe(ads);
struct xe_hw_engine *hwe_rcs_reset_domain =
xe_gt_any_hw_engine_by_reset_domain(hwe->gt, XE_ENGINE_CLASS_RENDER);
struct xe_reg_sr_entry *entry;
@@ -601,8 +749,7 @@ static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads,
guc_mmio_regset_write_one(ads, regset_map, e->reg, count++);
}
- /* Wa_1607983814 */
- if (needs_wa_1607983814(xe) && hwe->class == XE_ENGINE_CLASS_RENDER) {
+ if (XE_WA(hwe->gt, 1607983814) && hwe->class == XE_ENGINE_CLASS_RENDER) {
for (i = 0; i < LNCFCMOCS_REG_COUNT; i++) {
guc_mmio_regset_write_one(ads, regset_map,
XELP_LNCFCMOCS(i), count++);
@@ -684,7 +831,7 @@ static void guc_doorbell_init(struct xe_guc_ads *ads)
if (GRAPHICS_VER(xe) >= 12 && !IS_DGFX(xe)) {
u32 distdbreg =
- xe_mmio_read32(gt, DIST_DBS_POPULATED);
+ xe_mmio_read32(&gt->mmio, DIST_DBS_POPULATED);
ads_blob_write(ads,
system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI],
@@ -738,7 +885,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads)
guc_mmio_reg_state_init(ads);
guc_prep_golden_lrc_null(ads);
guc_mapping_table_init(gt, &info_map);
- guc_capture_list_init(ads);
+ guc_capture_prep_lists(ads);
guc_doorbell_init(ads);
guc_waklv_init(ads);
diff --git a/drivers/gpu/drm/xe/xe_guc_ads_types.h b/drivers/gpu/drm/xe/xe_guc_ads_types.h
index 2de5decfe0fd..70c132458ac3 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ads_types.h
@@ -22,6 +22,8 @@ struct xe_guc_ads {
u32 regset_size;
/** @ads_waklv_size: total waklv size supported by platform */
u32 ads_waklv_size;
+ /** @capture_size: size of register set passed to GuC for capture */
+ u32 capture_size;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c
new file mode 100644
index 000000000000..f6d523e4c5fe
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_capture.c
@@ -0,0 +1,2017 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021-2024 Intel Corporation
+ */
+
+#include <linux/types.h>
+
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+
+#include "abi/guc_actions_abi.h"
+#include "abi/guc_capture_abi.h"
+#include "abi/guc_log_abi.h"
+#include "regs/xe_engine_regs.h"
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_guc_regs.h"
+#include "regs/xe_regs.h"
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_exec_queue_types.h"
+#include "xe_gt.h"
+#include "xe_gt_mcr.h"
+#include "xe_gt_printk.h"
+#include "xe_guc.h"
+#include "xe_guc_ads.h"
+#include "xe_guc_capture.h"
+#include "xe_guc_capture_types.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_exec_queue_types.h"
+#include "xe_guc_log.h"
+#include "xe_guc_submit_types.h"
+#include "xe_guc_submit.h"
+#include "xe_hw_engine_types.h"
+#include "xe_hw_engine.h"
+#include "xe_lrc.h"
+#include "xe_macros.h"
+#include "xe_map.h"
+#include "xe_mmio.h"
+#include "xe_sched_job.h"
+
+/*
+ * struct __guc_capture_bufstate
+ *
+ * Book-keeping structure used to track read and write pointers
+ * as we extract error capture data from the GuC-log-buffer's
+ * error-capture region as a stream of dwords.
+ */
+struct __guc_capture_bufstate {
+ u32 size;
+ u32 data_offset;
+ u32 rd;
+ u32 wr;
+};
+
+/*
+ * struct __guc_capture_parsed_output - extracted error capture node
+ *
+ * A single unit of extracted error-capture output data grouped together
+ * at an engine-instance level. We keep these nodes in a linked list.
+ * See cachelist and outlist below.
+ */
+struct __guc_capture_parsed_output {
+ /*
+ * A single set of 3 capture lists: a global-list
+ * an engine-class-list and an engine-instance list.
+ * outlist in __guc_capture_parsed_output will keep
+ * a linked list of these nodes that will eventually
+ * be detached from outlist and attached into to
+ * xe_codedump in response to a context reset
+ */
+ struct list_head link;
+ bool is_partial;
+ u32 eng_class;
+ u32 eng_inst;
+ u32 guc_id;
+ u32 lrca;
+ u32 type;
+ bool locked;
+ enum xe_hw_engine_snapshot_source_id source;
+ struct gcap_reg_list_info {
+ u32 vfid;
+ u32 num_regs;
+ struct guc_mmio_reg *regs;
+ } reginfo[GUC_STATE_CAPTURE_TYPE_MAX];
+#define GCAP_PARSED_REGLIST_INDEX_GLOBAL BIT(GUC_STATE_CAPTURE_TYPE_GLOBAL)
+#define GCAP_PARSED_REGLIST_INDEX_ENGCLASS BIT(GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS)
+};
+
+/*
+ * Define all device tables of GuC error capture register lists
+ * NOTE:
+ * For engine-registers, GuC only needs the register offsets
+ * from the engine-mmio-base
+ *
+ * 64 bit registers need 2 entries for low 32 bit register and high 32 bit
+ * register, for example:
+ * Register data_type flags mask Register name
+ * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
+ * { XXX_REG_HI(0), REG_64BIT_HI_DW,, 0, 0, "XXX_REG"},
+ * 1. data_type: Indicate is hi/low 32 bit for a 64 bit register
+ * A 64 bit register define requires 2 consecutive entries,
+ * with low dword first and hi dword the second.
+ * 2. Register name: null for incompleted define
+ * 3. Incorrect order will trigger XE_WARN.
+ */
+#define COMMON_XELP_BASE_GLOBAL \
+ { FORCEWAKE_GT, REG_32BIT, 0, 0, "FORCEWAKE_GT"}
+
+#define COMMON_BASE_ENGINE_INSTANCE \
+ { RING_HWSTAM(0), REG_32BIT, 0, 0, "HWSTAM"}, \
+ { RING_HWS_PGA(0), REG_32BIT, 0, 0, "RING_HWS_PGA"}, \
+ { RING_HEAD(0), REG_32BIT, 0, 0, "RING_HEAD"}, \
+ { RING_TAIL(0), REG_32BIT, 0, 0, "RING_TAIL"}, \
+ { RING_CTL(0), REG_32BIT, 0, 0, "RING_CTL"}, \
+ { RING_MI_MODE(0), REG_32BIT, 0, 0, "RING_MI_MODE"}, \
+ { RING_MODE(0), REG_32BIT, 0, 0, "RING_MODE"}, \
+ { RING_ESR(0), REG_32BIT, 0, 0, "RING_ESR"}, \
+ { RING_EMR(0), REG_32BIT, 0, 0, "RING_EMR"}, \
+ { RING_EIR(0), REG_32BIT, 0, 0, "RING_EIR"}, \
+ { RING_IMR(0), REG_32BIT, 0, 0, "RING_IMR"}, \
+ { RING_IPEHR(0), REG_32BIT, 0, 0, "IPEHR"}, \
+ { RING_INSTDONE(0), REG_32BIT, 0, 0, "RING_INSTDONE"}, \
+ { INDIRECT_RING_STATE(0), REG_32BIT, 0, 0, "INDIRECT_RING_STATE"}, \
+ { RING_ACTHD(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
+ { RING_ACTHD_UDW(0), REG_64BIT_HI_DW, 0, 0, "ACTHD"}, \
+ { RING_BBADDR(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
+ { RING_BBADDR_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_BBADDR"}, \
+ { RING_START(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
+ { RING_START_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_START"}, \
+ { RING_DMA_FADD(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
+ { RING_DMA_FADD_UDW(0), REG_64BIT_HI_DW, 0, 0, "RING_DMA_FADD"}, \
+ { RING_EXECLIST_STATUS_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
+ { RING_EXECLIST_STATUS_HI(0), REG_64BIT_HI_DW, 0, 0, "RING_EXECLIST_STATUS"}, \
+ { RING_EXECLIST_SQ_CONTENTS_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL}, \
+ { RING_EXECLIST_SQ_CONTENTS_HI(0), REG_64BIT_HI_DW, 0, 0, "RING_EXECLIST_SQ_CONTENTS"}
+
+#define COMMON_XELP_RC_CLASS \
+ { RCU_MODE, REG_32BIT, 0, 0, "RCU_MODE"}
+
+#define COMMON_XELP_RC_CLASS_INSTDONE \
+ { SC_INSTDONE, REG_32BIT, 0, 0, "SC_INSTDONE"}, \
+ { SC_INSTDONE_EXTRA, REG_32BIT, 0, 0, "SC_INSTDONE_EXTRA"}, \
+ { SC_INSTDONE_EXTRA2, REG_32BIT, 0, 0, "SC_INSTDONE_EXTRA2"}
+
+#define XELP_VEC_CLASS_REGS \
+ { SFC_DONE(0), 0, 0, 0, "SFC_DONE[0]"}, \
+ { SFC_DONE(1), 0, 0, 0, "SFC_DONE[1]"}, \
+ { SFC_DONE(2), 0, 0, 0, "SFC_DONE[2]"}, \
+ { SFC_DONE(3), 0, 0, 0, "SFC_DONE[3]"}
+
+/* XE_LP Global */
+static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = {
+ COMMON_XELP_BASE_GLOBAL,
+};
+
+/* Render / Compute Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_rc_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* Render / Compute Engine-Class */
+static const struct __guc_mmio_reg_descr xe_rc_class_regs[] = {
+ COMMON_XELP_RC_CLASS,
+ COMMON_XELP_RC_CLASS_INSTDONE,
+};
+
+/* Render / Compute Engine-Class for xehpg */
+static const struct __guc_mmio_reg_descr xe_hpg_rc_class_regs[] = {
+ COMMON_XELP_RC_CLASS,
+};
+
+/* Media Decode/Encode Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_vd_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* Video Enhancement Engine-Class */
+static const struct __guc_mmio_reg_descr xe_vec_class_regs[] = {
+ XELP_VEC_CLASS_REGS,
+};
+
+/* Video Enhancement Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_vec_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* Blitter Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_blt_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* XE_LP - GSC Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_lp_gsc_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/*
+ * Empty list to prevent warnings about unknown class/instance types
+ * as not all class/instance types have entries on all platforms.
+ */
+static const struct __guc_mmio_reg_descr empty_regs_list[] = {
+};
+
+#define TO_GCAP_DEF_OWNER(x) (GUC_CAPTURE_LIST_INDEX_##x)
+#define TO_GCAP_DEF_TYPE(x) (GUC_STATE_CAPTURE_TYPE_##x)
+#define MAKE_REGLIST(regslist, regsowner, regstype, class) \
+ { \
+ regslist, \
+ ARRAY_SIZE(regslist), \
+ TO_GCAP_DEF_OWNER(regsowner), \
+ TO_GCAP_DEF_TYPE(regstype), \
+ class \
+ }
+
+/* List of lists for legacy graphic product version < 1255 */
+static const struct __guc_mmio_reg_descr_group xe_lp_lists[] = {
+ MAKE_REGLIST(xe_lp_global_regs, PF, GLOBAL, 0),
+ MAKE_REGLIST(xe_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
+ MAKE_REGLIST(xe_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO),
+ MAKE_REGLIST(xe_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO),
+ MAKE_REGLIST(xe_vec_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
+ MAKE_REGLIST(xe_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER),
+ MAKE_REGLIST(xe_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
+ MAKE_REGLIST(xe_lp_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
+ {}
+};
+
+ /* List of lists for graphic product version >= 1255 */
+static const struct __guc_mmio_reg_descr_group xe_hpg_lists[] = {
+ MAKE_REGLIST(xe_lp_global_regs, PF, GLOBAL, 0),
+ MAKE_REGLIST(xe_hpg_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
+ MAKE_REGLIST(xe_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO),
+ MAKE_REGLIST(xe_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO),
+ MAKE_REGLIST(xe_vec_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
+ MAKE_REGLIST(xe_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER),
+ MAKE_REGLIST(xe_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
+ MAKE_REGLIST(xe_lp_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
+ {}
+};
+
+static const char * const capture_list_type_names[] = {
+ "Global",
+ "Class",
+ "Instance",
+};
+
+static const char * const capture_engine_class_names[] = {
+ "Render/Compute",
+ "Video",
+ "VideoEnhance",
+ "Blitter",
+ "GSC-Other",
+};
+
+struct __guc_capture_ads_cache {
+ bool is_valid;
+ void *ptr;
+ size_t size;
+ int status;
+};
+
+struct xe_guc_state_capture {
+ const struct __guc_mmio_reg_descr_group *reglists;
+ /**
+ * NOTE: steered registers have multiple instances depending on the HW configuration
+ * (slices or dual-sub-slices) and thus depends on HW fuses discovered
+ */
+ struct __guc_mmio_reg_descr_group *extlists;
+ struct __guc_capture_ads_cache ads_cache[GUC_CAPTURE_LIST_INDEX_MAX]
+ [GUC_STATE_CAPTURE_TYPE_MAX]
+ [GUC_CAPTURE_LIST_CLASS_MAX];
+ void *ads_null_cache;
+ struct list_head cachelist;
+#define PREALLOC_NODES_MAX_COUNT (3 * GUC_MAX_ENGINE_CLASSES * GUC_MAX_INSTANCES_PER_CLASS)
+#define PREALLOC_NODES_DEFAULT_NUMREGS 64
+
+ int max_mmio_per_node;
+ struct list_head outlist;
+};
+
+static void
+guc_capture_remove_stale_matches_from_list(struct xe_guc_state_capture *gc,
+ struct __guc_capture_parsed_output *node);
+
+static const struct __guc_mmio_reg_descr_group *
+guc_capture_get_device_reglist(struct xe_device *xe)
+{
+ if (GRAPHICS_VERx100(xe) >= 1255)
+ return xe_hpg_lists;
+ else
+ return xe_lp_lists;
+}
+
+static const struct __guc_mmio_reg_descr_group *
+guc_capture_get_one_list(const struct __guc_mmio_reg_descr_group *reglists,
+ u32 owner, u32 type, enum guc_capture_list_class_type capture_class)
+{
+ int i;
+
+ if (!reglists)
+ return NULL;
+
+ for (i = 0; reglists[i].list; ++i) {
+ if (reglists[i].owner == owner && reglists[i].type == type &&
+ (reglists[i].engine == capture_class ||
+ reglists[i].type == GUC_STATE_CAPTURE_TYPE_GLOBAL))
+ return &reglists[i];
+ }
+
+ return NULL;
+}
+
+const struct __guc_mmio_reg_descr_group *
+xe_guc_capture_get_reg_desc_list(struct xe_gt *gt, u32 owner, u32 type,
+ enum guc_capture_list_class_type capture_class, bool is_ext)
+{
+ const struct __guc_mmio_reg_descr_group *reglists;
+
+ if (is_ext) {
+ struct xe_guc *guc = &gt->uc.guc;
+
+ reglists = guc->capture->extlists;
+ } else {
+ reglists = guc_capture_get_device_reglist(gt_to_xe(gt));
+ }
+ return guc_capture_get_one_list(reglists, owner, type, capture_class);
+}
+
+struct __ext_steer_reg {
+ const char *name;
+ struct xe_reg_mcr reg;
+};
+
+static const struct __ext_steer_reg xe_extregs[] = {
+ {"SAMPLER_INSTDONE", SAMPLER_INSTDONE},
+ {"ROW_INSTDONE", ROW_INSTDONE}
+};
+
+static const struct __ext_steer_reg xehpg_extregs[] = {
+ {"SC_INSTDONE", XEHPG_SC_INSTDONE},
+ {"SC_INSTDONE_EXTRA", XEHPG_SC_INSTDONE_EXTRA},
+ {"SC_INSTDONE_EXTRA2", XEHPG_SC_INSTDONE_EXTRA2},
+ {"INSTDONE_GEOM_SVGUNIT", XEHPG_INSTDONE_GEOM_SVGUNIT}
+};
+
+static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
+ const struct __ext_steer_reg *extlist,
+ int slice_id, int subslice_id)
+{
+ if (!ext || !extlist)
+ return;
+
+ ext->reg = XE_REG(extlist->reg.__reg.addr);
+ ext->flags = FIELD_PREP(GUC_REGSET_STEERING_NEEDED, 1);
+ ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
+ ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id);
+ ext->regname = extlist->name;
+}
+
+static int
+__alloc_ext_regs(struct drm_device *drm, struct __guc_mmio_reg_descr_group *newlist,
+ const struct __guc_mmio_reg_descr_group *rootlist, int num_regs)
+{
+ struct __guc_mmio_reg_descr *list;
+
+ list = drmm_kzalloc(drm, num_regs * sizeof(struct __guc_mmio_reg_descr), GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+
+ newlist->list = list;
+ newlist->num_regs = num_regs;
+ newlist->owner = rootlist->owner;
+ newlist->engine = rootlist->engine;
+ newlist->type = rootlist->type;
+
+ return 0;
+}
+
+static int guc_capture_get_steer_reg_num(struct xe_device *xe)
+{
+ int num = ARRAY_SIZE(xe_extregs);
+
+ if (GRAPHICS_VERx100(xe) >= 1255)
+ num += ARRAY_SIZE(xehpg_extregs);
+
+ return num;
+}
+
+static void guc_capture_alloc_steered_lists(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ u16 slice, subslice;
+ int iter, i, total = 0;
+ const struct __guc_mmio_reg_descr_group *lists = guc->capture->reglists;
+ const struct __guc_mmio_reg_descr_group *list;
+ struct __guc_mmio_reg_descr_group *extlists;
+ struct __guc_mmio_reg_descr *extarray;
+ bool has_xehpg_extregs = GRAPHICS_VERx100(gt_to_xe(gt)) >= 1255;
+ struct drm_device *drm = &gt_to_xe(gt)->drm;
+ bool has_rcs_ccs = false;
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ /*
+ * If GT has no rcs/ccs, no need to alloc steered list.
+ * Currently, only rcs/ccs has steering register, if in the future,
+ * other engine types has steering register, this condition check need
+ * to be extended
+ */
+ for_each_hw_engine(hwe, gt, id) {
+ if (xe_engine_class_to_guc_capture_class(hwe->class) ==
+ GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE) {
+ has_rcs_ccs = true;
+ break;
+ }
+ }
+
+ if (!has_rcs_ccs)
+ return;
+
+ /* steered registers currently only exist for the render-class */
+ list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF,
+ GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS,
+ GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE);
+ /*
+ * Skip if this platform has no engine class registers or if extlists
+ * was previously allocated
+ */
+ if (!list || guc->capture->extlists)
+ return;
+
+ total = bitmap_weight(gt->fuse_topo.g_dss_mask, sizeof(gt->fuse_topo.g_dss_mask) * 8) *
+ guc_capture_get_steer_reg_num(guc_to_xe(guc));
+
+ if (!total)
+ return;
+
+ /* allocate an extra for an end marker */
+ extlists = drmm_kzalloc(drm, 2 * sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL);
+ if (!extlists)
+ return;
+
+ if (__alloc_ext_regs(drm, &extlists[0], list, total)) {
+ drmm_kfree(drm, extlists);
+ return;
+ }
+
+ /* For steering registers, the list is generated at run-time */
+ extarray = (struct __guc_mmio_reg_descr *)extlists[0].list;
+ for_each_dss_steering(iter, gt, slice, subslice) {
+ for (i = 0; i < ARRAY_SIZE(xe_extregs); ++i) {
+ __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice);
+ ++extarray;
+ }
+
+ if (has_xehpg_extregs)
+ for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) {
+ __fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice);
+ ++extarray;
+ }
+ }
+
+ extlists[0].num_regs = total;
+
+ xe_gt_dbg(guc_to_gt(guc), "capture found %d ext-regs.\n", total);
+ guc->capture->extlists = extlists;
+}
+
+static int
+guc_capture_list_init(struct xe_guc *guc, u32 owner, u32 type,
+ enum guc_capture_list_class_type capture_class, struct guc_mmio_reg *ptr,
+ u16 num_entries)
+{
+ u32 ptr_idx = 0, list_idx = 0;
+ const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
+ struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
+ const struct __guc_mmio_reg_descr_group *match;
+ u32 list_num;
+
+ if (!reglists)
+ return -ENODEV;
+
+ match = guc_capture_get_one_list(reglists, owner, type, capture_class);
+ if (!match)
+ return -ENODATA;
+
+ list_num = match->num_regs;
+ for (list_idx = 0; ptr_idx < num_entries && list_idx < list_num; ++list_idx, ++ptr_idx) {
+ ptr[ptr_idx].offset = match->list[list_idx].reg.addr;
+ ptr[ptr_idx].value = 0xDEADF00D;
+ ptr[ptr_idx].flags = match->list[list_idx].flags;
+ ptr[ptr_idx].mask = match->list[list_idx].mask;
+ }
+
+ match = guc_capture_get_one_list(extlists, owner, type, capture_class);
+ if (match)
+ for (ptr_idx = list_num, list_idx = 0;
+ ptr_idx < num_entries && list_idx < match->num_regs;
+ ++ptr_idx, ++list_idx) {
+ ptr[ptr_idx].offset = match->list[list_idx].reg.addr;
+ ptr[ptr_idx].value = 0xDEADF00D;
+ ptr[ptr_idx].flags = match->list[list_idx].flags;
+ ptr[ptr_idx].mask = match->list[list_idx].mask;
+ }
+
+ if (ptr_idx < num_entries)
+ xe_gt_dbg(guc_to_gt(guc), "Got short capture reglist init: %d out-of %d.\n",
+ ptr_idx, num_entries);
+
+ return 0;
+}
+
+static int
+guc_cap_list_num_regs(struct xe_guc *guc, u32 owner, u32 type,
+ enum guc_capture_list_class_type capture_class)
+{
+ const struct __guc_mmio_reg_descr_group *match;
+ int num_regs = 0;
+
+ match = guc_capture_get_one_list(guc->capture->reglists, owner, type, capture_class);
+ if (match)
+ num_regs = match->num_regs;
+
+ match = guc_capture_get_one_list(guc->capture->extlists, owner, type, capture_class);
+ if (match)
+ num_regs += match->num_regs;
+ else
+ /*
+ * If a caller wants the full register dump size but we have
+ * not yet got the hw-config, which is before max_mmio_per_node
+ * is initialized, then provide a worst-case number for
+ * extlists based on max dss fuse bits, but only ever for
+ * render/compute
+ */
+ if (owner == GUC_CAPTURE_LIST_INDEX_PF &&
+ type == GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS &&
+ capture_class == GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE &&
+ !guc->capture->max_mmio_per_node)
+ num_regs += guc_capture_get_steer_reg_num(guc_to_xe(guc)) *
+ XE_MAX_DSS_FUSE_BITS;
+
+ return num_regs;
+}
+
+static int
+guc_capture_getlistsize(struct xe_guc *guc, u32 owner, u32 type,
+ enum guc_capture_list_class_type capture_class,
+ size_t *size, bool is_purpose_est)
+{
+ struct xe_guc_state_capture *gc = guc->capture;
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct __guc_capture_ads_cache *cache;
+ int num_regs;
+
+ xe_gt_assert(gt, type < GUC_STATE_CAPTURE_TYPE_MAX);
+ xe_gt_assert(gt, capture_class < GUC_CAPTURE_LIST_CLASS_MAX);
+
+ cache = &gc->ads_cache[owner][type][capture_class];
+ if (!gc->reglists) {
+ xe_gt_warn(gt, "No capture reglist for this device\n");
+ return -ENODEV;
+ }
+
+ if (cache->is_valid) {
+ *size = cache->size;
+ return cache->status;
+ }
+
+ if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF &&
+ !guc_capture_get_one_list(gc->reglists, owner, type, capture_class)) {
+ if (type == GUC_STATE_CAPTURE_TYPE_GLOBAL)
+ xe_gt_warn(gt, "Missing capture reglist: global!\n");
+ else
+ xe_gt_warn(gt, "Missing capture reglist: %s(%u):%s(%u)!\n",
+ capture_list_type_names[type], type,
+ capture_engine_class_names[capture_class], capture_class);
+ return -ENODEV;
+ }
+
+ num_regs = guc_cap_list_num_regs(guc, owner, type, capture_class);
+ /* intentional empty lists can exist depending on hw config */
+ if (!num_regs)
+ return -ENODATA;
+
+ if (size)
+ *size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) +
+ (num_regs * sizeof(struct guc_mmio_reg)));
+
+ return 0;
+}
+
+/**
+ * xe_guc_capture_getlistsize - Get list size for owner/type/class combination
+ * @guc: The GuC object
+ * @owner: PF/VF owner
+ * @type: GuC capture register type
+ * @capture_class: GuC capture engine class id
+ * @size: Point to the size
+ *
+ * This function will get the list for the owner/type/class combination, and
+ * return the page aligned list size.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int
+xe_guc_capture_getlistsize(struct xe_guc *guc, u32 owner, u32 type,
+ enum guc_capture_list_class_type capture_class, size_t *size)
+{
+ return guc_capture_getlistsize(guc, owner, type, capture_class, size, false);
+}
+
+/**
+ * xe_guc_capture_getlist - Get register capture list for owner/type/class
+ * combination
+ * @guc: The GuC object
+ * @owner: PF/VF owner
+ * @type: GuC capture register type
+ * @capture_class: GuC capture engine class id
+ * @outptr: Point to cached register capture list
+ *
+ * This function will get the register capture list for the owner/type/class
+ * combination.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int
+xe_guc_capture_getlist(struct xe_guc *guc, u32 owner, u32 type,
+ enum guc_capture_list_class_type capture_class, void **outptr)
+{
+ struct xe_guc_state_capture *gc = guc->capture;
+ struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][capture_class];
+ struct guc_debug_capture_list *listnode;
+ int ret, num_regs;
+ u8 *caplist, *tmp;
+ size_t size = 0;
+
+ if (!gc->reglists)
+ return -ENODEV;
+
+ if (cache->is_valid) {
+ *outptr = cache->ptr;
+ return cache->status;
+ }
+
+ ret = xe_guc_capture_getlistsize(guc, owner, type, capture_class, &size);
+ if (ret) {
+ cache->is_valid = true;
+ cache->ptr = NULL;
+ cache->size = 0;
+ cache->status = ret;
+ return ret;
+ }
+
+ caplist = drmm_kzalloc(guc_to_drm(guc), size, GFP_KERNEL);
+ if (!caplist)
+ return -ENOMEM;
+
+ /* populate capture list header */
+ tmp = caplist;
+ num_regs = guc_cap_list_num_regs(guc, owner, type, capture_class);
+ listnode = (struct guc_debug_capture_list *)tmp;
+ listnode->header.info = FIELD_PREP(GUC_CAPTURELISTHDR_NUMDESCR, (u32)num_regs);
+
+ /* populate list of register descriptor */
+ tmp += sizeof(struct guc_debug_capture_list);
+ guc_capture_list_init(guc, owner, type, capture_class,
+ (struct guc_mmio_reg *)tmp, num_regs);
+
+ /* cache this list */
+ cache->is_valid = true;
+ cache->ptr = caplist;
+ cache->size = size;
+ cache->status = 0;
+
+ *outptr = caplist;
+
+ return 0;
+}
+
+/**
+ * xe_guc_capture_getnullheader - Get a null list for register capture
+ * @guc: The GuC object
+ * @outptr: Point to cached register capture list
+ * @size: Point to the size
+ *
+ * This function will alloc for a null list for register capture.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int
+xe_guc_capture_getnullheader(struct xe_guc *guc, void **outptr, size_t *size)
+{
+ struct xe_guc_state_capture *gc = guc->capture;
+ int tmp = sizeof(u32) * 4;
+ void *null_header;
+
+ if (gc->ads_null_cache) {
+ *outptr = gc->ads_null_cache;
+ *size = tmp;
+ return 0;
+ }
+
+ null_header = drmm_kzalloc(guc_to_drm(guc), tmp, GFP_KERNEL);
+ if (!null_header)
+ return -ENOMEM;
+
+ gc->ads_null_cache = null_header;
+ *outptr = null_header;
+ *size = tmp;
+
+ return 0;
+}
+
+/**
+ * xe_guc_capture_ads_input_worst_size - Calculate the worst size for GuC register capture
+ * @guc: point to xe_guc structure
+ *
+ * Calculate the worst size for GuC register capture by including all possible engines classes.
+ *
+ * Returns: Calculated size
+ */
+size_t xe_guc_capture_ads_input_worst_size(struct xe_guc *guc)
+{
+ size_t total_size, class_size, instance_size, global_size;
+ int i, j;
+
+ /*
+ * This function calculates the worst case register lists size by
+ * including all possible engines classes. It is called during the
+ * first of a two-phase GuC (and ADS-population) initialization
+ * sequence, that is, during the pre-hwconfig phase before we have
+ * the exact engine fusing info.
+ */
+ total_size = PAGE_SIZE; /* Pad a page in front for empty lists */
+ for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) {
+ for (j = 0; j < GUC_CAPTURE_LIST_CLASS_MAX; j++) {
+ if (xe_guc_capture_getlistsize(guc, i,
+ GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS,
+ j, &class_size) < 0)
+ class_size = 0;
+ if (xe_guc_capture_getlistsize(guc, i,
+ GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE,
+ j, &instance_size) < 0)
+ instance_size = 0;
+ total_size += class_size + instance_size;
+ }
+ if (xe_guc_capture_getlistsize(guc, i,
+ GUC_STATE_CAPTURE_TYPE_GLOBAL,
+ 0, &global_size) < 0)
+ global_size = 0;
+ total_size += global_size;
+ }
+
+ return PAGE_ALIGN(total_size);
+}
+
+static int guc_capture_output_size_est(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ int capture_size = 0;
+ size_t tmp = 0;
+
+ if (!guc->capture)
+ return -ENODEV;
+
+ /*
+ * If every single engine-instance suffered a failure in quick succession but
+ * were all unrelated, then a burst of multiple error-capture events would dump
+ * registers for every one engine instance, one at a time. In this case, GuC
+ * would even dump the global-registers repeatedly.
+ *
+ * For each engine instance, there would be 1 x guc_state_capture_group_t output
+ * followed by 3 x guc_state_capture_t lists. The latter is how the register
+ * dumps are split across different register types (where the '3' are global vs class
+ * vs instance).
+ */
+ for_each_hw_engine(hwe, gt, id) {
+ enum guc_capture_list_class_type capture_class;
+
+ capture_class = xe_engine_class_to_guc_capture_class(hwe->class);
+ capture_size += sizeof(struct guc_state_capture_group_header_t) +
+ (3 * sizeof(struct guc_state_capture_header_t));
+
+ if (!guc_capture_getlistsize(guc, 0, GUC_STATE_CAPTURE_TYPE_GLOBAL,
+ 0, &tmp, true))
+ capture_size += tmp;
+ if (!guc_capture_getlistsize(guc, 0, GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS,
+ capture_class, &tmp, true))
+ capture_size += tmp;
+ if (!guc_capture_getlistsize(guc, 0, GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE,
+ capture_class, &tmp, true))
+ capture_size += tmp;
+ }
+
+ return capture_size;
+}
+
+/*
+ * Add on a 3x multiplier to allow for multiple back-to-back captures occurring
+ * before the Xe can read the data out and process it
+ */
+#define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3
+
+static void check_guc_capture_size(struct xe_guc *guc)
+{
+ int capture_size = guc_capture_output_size_est(guc);
+ int spare_size = capture_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER;
+ u32 buffer_size = xe_guc_log_section_size_capture(&guc->log);
+
+ /*
+ * NOTE: capture_size is much smaller than the capture region
+ * allocation (DG2: <80K vs 1MB).
+ * Additionally, its based on space needed to fit all engines getting
+ * reset at once within the same G2H handler task slot. This is very
+ * unlikely. However, if GuC really does run out of space for whatever
+ * reason, we will see an separate warning message when processing the
+ * G2H event capture-notification, search for:
+ * xe_guc_STATE_CAPTURE_EVENT_STATUS_NOSPACE.
+ */
+ if (capture_size < 0)
+ xe_gt_dbg(guc_to_gt(guc),
+ "Failed to calculate error state capture buffer minimum size: %d!\n",
+ capture_size);
+ if (capture_size > buffer_size)
+ xe_gt_dbg(guc_to_gt(guc), "Error state capture buffer maybe small: %d < %d\n",
+ buffer_size, capture_size);
+ else if (spare_size > buffer_size)
+ xe_gt_dbg(guc_to_gt(guc),
+ "Error state capture buffer lacks spare size: %d < %d (min = %d)\n",
+ buffer_size, spare_size, capture_size);
+}
+
+static void
+guc_capture_add_node_to_list(struct __guc_capture_parsed_output *node,
+ struct list_head *list)
+{
+ list_add(&node->link, list);
+}
+
+static void
+guc_capture_add_node_to_outlist(struct xe_guc_state_capture *gc,
+ struct __guc_capture_parsed_output *node)
+{
+ guc_capture_remove_stale_matches_from_list(gc, node);
+ guc_capture_add_node_to_list(node, &gc->outlist);
+}
+
+static void
+guc_capture_add_node_to_cachelist(struct xe_guc_state_capture *gc,
+ struct __guc_capture_parsed_output *node)
+{
+ guc_capture_add_node_to_list(node, &gc->cachelist);
+}
+
+static void
+guc_capture_free_outlist_node(struct xe_guc_state_capture *gc,
+ struct __guc_capture_parsed_output *n)
+{
+ if (n) {
+ n->locked = 0;
+ list_del(&n->link);
+ /* put node back to cache list */
+ guc_capture_add_node_to_cachelist(gc, n);
+ }
+}
+
+static void
+guc_capture_remove_stale_matches_from_list(struct xe_guc_state_capture *gc,
+ struct __guc_capture_parsed_output *node)
+{
+ struct __guc_capture_parsed_output *n, *ntmp;
+ int guc_id = node->guc_id;
+
+ list_for_each_entry_safe(n, ntmp, &gc->outlist, link) {
+ if (n != node && !n->locked && n->guc_id == guc_id)
+ guc_capture_free_outlist_node(gc, n);
+ }
+}
+
+static void
+guc_capture_init_node(struct xe_guc *guc, struct __guc_capture_parsed_output *node)
+{
+ struct guc_mmio_reg *tmp[GUC_STATE_CAPTURE_TYPE_MAX];
+ int i;
+
+ for (i = 0; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i) {
+ tmp[i] = node->reginfo[i].regs;
+ memset(tmp[i], 0, sizeof(struct guc_mmio_reg) *
+ guc->capture->max_mmio_per_node);
+ }
+ memset(node, 0, sizeof(*node));
+ for (i = 0; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i)
+ node->reginfo[i].regs = tmp[i];
+
+ INIT_LIST_HEAD(&node->link);
+}
+
+/**
+ * DOC: Init, G2H-event and reporting flows for GuC-error-capture
+ *
+ * KMD Init time flows:
+ * --------------------
+ * --> alloc A: GuC input capture regs lists (registered to GuC via ADS).
+ * xe_guc_ads acquires the register lists by calling
+ * xe_guc_capture_getlistsize and xe_guc_capture_getlist 'n' times,
+ * where n = 1 for global-reg-list +
+ * num_engine_classes for class-reg-list +
+ * num_engine_classes for instance-reg-list
+ * (since all instances of the same engine-class type
+ * have an identical engine-instance register-list).
+ * ADS module also calls separately for PF vs VF.
+ *
+ * --> alloc B: GuC output capture buf (registered via guc_init_params(log_param))
+ * Size = #define CAPTURE_BUFFER_SIZE (warns if on too-small)
+ * Note2: 'x 3' to hold multiple capture groups
+ *
+ * GUC Runtime notify capture:
+ * --------------------------
+ * --> G2H STATE_CAPTURE_NOTIFICATION
+ * L--> xe_guc_capture_process
+ * L--> Loop through B (head..tail) and for each engine instance's
+ * err-state-captured register-list we find, we alloc 'C':
+ * --> alloc C: A capture-output-node structure that includes misc capture info along
+ * with 3 register list dumps (global, engine-class and engine-instance)
+ * This node is created from a pre-allocated list of blank nodes in
+ * guc->capture->cachelist and populated with the error-capture
+ * data from GuC and then it's added into guc->capture->outlist linked
+ * list. This list is used for matchup and printout by xe_devcoredump_read
+ * and xe_engine_snapshot_print, (when user invokes the devcoredump sysfs).
+ *
+ * GUC --> notify context reset:
+ * -----------------------------
+ * --> guc_exec_queue_timedout_job
+ * L--> xe_devcoredump
+ * L--> devcoredump_snapshot
+ * --> xe_hw_engine_snapshot_capture
+ * --> xe_engine_manual_capture(For manual capture)
+ *
+ * User Sysfs / Debugfs
+ * --------------------
+ * --> xe_devcoredump_read->
+ * L--> xxx_snapshot_print
+ * L--> xe_engine_snapshot_print
+ * Print register lists values saved at
+ * guc->capture->outlist
+ *
+ */
+
+static int guc_capture_buf_cnt(struct __guc_capture_bufstate *buf)
+{
+ if (buf->wr >= buf->rd)
+ return (buf->wr - buf->rd);
+ return (buf->size - buf->rd) + buf->wr;
+}
+
+static int guc_capture_buf_cnt_to_end(struct __guc_capture_bufstate *buf)
+{
+ if (buf->rd > buf->wr)
+ return (buf->size - buf->rd);
+ return (buf->wr - buf->rd);
+}
+
+/*
+ * GuC's error-capture output is a ring buffer populated in a byte-stream fashion:
+ *
+ * The GuC Log buffer region for error-capture is managed like a ring buffer.
+ * The GuC firmware dumps error capture logs into this ring in a byte-stream flow.
+ * Additionally, as per the current and foreseeable future, all packed error-
+ * capture output structures are dword aligned.
+ *
+ * That said, if the GuC firmware is in the midst of writing a structure that is larger
+ * than one dword but the tail end of the err-capture buffer-region has lesser space left,
+ * we would need to extract that structure one dword at a time straddled across the end,
+ * onto the start of the ring.
+ *
+ * Below function, guc_capture_log_remove_bytes is a helper for that. All callers of this
+ * function would typically do a straight-up memcpy from the ring contents and will only
+ * call this helper if their structure-extraction is straddling across the end of the
+ * ring. GuC firmware does not add any padding. The reason for the no-padding is to ease
+ * scalability for future expansion of output data types without requiring a redesign
+ * of the flow controls.
+ */
+static int
+guc_capture_log_remove_bytes(struct xe_guc *guc, struct __guc_capture_bufstate *buf,
+ void *out, int bytes_needed)
+{
+#define GUC_CAPTURE_LOG_BUF_COPY_RETRY_MAX 3
+
+ int fill_size = 0, tries = GUC_CAPTURE_LOG_BUF_COPY_RETRY_MAX;
+ int copy_size, avail;
+
+ xe_assert(guc_to_xe(guc), bytes_needed % sizeof(u32) == 0);
+
+ if (bytes_needed > guc_capture_buf_cnt(buf))
+ return -1;
+
+ while (bytes_needed > 0 && tries--) {
+ int misaligned;
+
+ avail = guc_capture_buf_cnt_to_end(buf);
+ misaligned = avail % sizeof(u32);
+ /* wrap if at end */
+ if (!avail) {
+ /* output stream clipped */
+ if (!buf->rd)
+ return fill_size;
+ buf->rd = 0;
+ continue;
+ }
+
+ /* Only copy to u32 aligned data */
+ copy_size = avail < bytes_needed ? avail - misaligned : bytes_needed;
+ xe_map_memcpy_from(guc_to_xe(guc), out + fill_size, &guc->log.bo->vmap,
+ buf->data_offset + buf->rd, copy_size);
+ buf->rd += copy_size;
+ fill_size += copy_size;
+ bytes_needed -= copy_size;
+
+ if (misaligned)
+ xe_gt_warn(guc_to_gt(guc),
+ "Bytes extraction not dword aligned, clipping.\n");
+ }
+
+ return fill_size;
+}
+
+static int
+guc_capture_log_get_group_hdr(struct xe_guc *guc, struct __guc_capture_bufstate *buf,
+ struct guc_state_capture_group_header_t *ghdr)
+{
+ int fullsize = sizeof(struct guc_state_capture_group_header_t);
+
+ if (guc_capture_log_remove_bytes(guc, buf, ghdr, fullsize) != fullsize)
+ return -1;
+ return 0;
+}
+
+static int
+guc_capture_log_get_data_hdr(struct xe_guc *guc, struct __guc_capture_bufstate *buf,
+ struct guc_state_capture_header_t *hdr)
+{
+ int fullsize = sizeof(struct guc_state_capture_header_t);
+
+ if (guc_capture_log_remove_bytes(guc, buf, hdr, fullsize) != fullsize)
+ return -1;
+ return 0;
+}
+
+static int
+guc_capture_log_get_register(struct xe_guc *guc, struct __guc_capture_bufstate *buf,
+ struct guc_mmio_reg *reg)
+{
+ int fullsize = sizeof(struct guc_mmio_reg);
+
+ if (guc_capture_log_remove_bytes(guc, buf, reg, fullsize) != fullsize)
+ return -1;
+ return 0;
+}
+
+static struct __guc_capture_parsed_output *
+guc_capture_get_prealloc_node(struct xe_guc *guc)
+{
+ struct __guc_capture_parsed_output *found = NULL;
+
+ if (!list_empty(&guc->capture->cachelist)) {
+ struct __guc_capture_parsed_output *n, *ntmp;
+
+ /* get first avail node from the cache list */
+ list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link) {
+ found = n;
+ break;
+ }
+ } else {
+ struct __guc_capture_parsed_output *n, *ntmp;
+
+ /*
+ * traverse reversed and steal back the oldest node already
+ * allocated
+ */
+ list_for_each_entry_safe_reverse(n, ntmp, &guc->capture->outlist, link) {
+ if (!n->locked)
+ found = n;
+ }
+ }
+ if (found) {
+ list_del(&found->link);
+ guc_capture_init_node(guc, found);
+ }
+
+ return found;
+}
+
+static struct __guc_capture_parsed_output *
+guc_capture_clone_node(struct xe_guc *guc, struct __guc_capture_parsed_output *original,
+ u32 keep_reglist_mask)
+{
+ struct __guc_capture_parsed_output *new;
+ int i;
+
+ new = guc_capture_get_prealloc_node(guc);
+ if (!new)
+ return NULL;
+ if (!original)
+ return new;
+
+ new->is_partial = original->is_partial;
+
+ /* copy reg-lists that we want to clone */
+ for (i = 0; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i) {
+ if (keep_reglist_mask & BIT(i)) {
+ XE_WARN_ON(original->reginfo[i].num_regs >
+ guc->capture->max_mmio_per_node);
+
+ memcpy(new->reginfo[i].regs, original->reginfo[i].regs,
+ original->reginfo[i].num_regs * sizeof(struct guc_mmio_reg));
+
+ new->reginfo[i].num_regs = original->reginfo[i].num_regs;
+ new->reginfo[i].vfid = original->reginfo[i].vfid;
+
+ if (i == GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS) {
+ new->eng_class = original->eng_class;
+ } else if (i == GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE) {
+ new->eng_inst = original->eng_inst;
+ new->guc_id = original->guc_id;
+ new->lrca = original->lrca;
+ }
+ }
+ }
+
+ return new;
+}
+
+static int
+guc_capture_extract_reglists(struct xe_guc *guc, struct __guc_capture_bufstate *buf)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct guc_state_capture_group_header_t ghdr = {0};
+ struct guc_state_capture_header_t hdr = {0};
+ struct __guc_capture_parsed_output *node = NULL;
+ struct guc_mmio_reg *regs = NULL;
+ int i, numlists, numregs, ret = 0;
+ enum guc_state_capture_type datatype;
+ struct guc_mmio_reg tmp;
+ bool is_partial = false;
+
+ i = guc_capture_buf_cnt(buf);
+ if (!i)
+ return -ENODATA;
+
+ if (i % sizeof(u32)) {
+ xe_gt_warn(gt, "Got mis-aligned register capture entries\n");
+ ret = -EIO;
+ goto bailout;
+ }
+
+ /* first get the capture group header */
+ if (guc_capture_log_get_group_hdr(guc, buf, &ghdr)) {
+ ret = -EIO;
+ goto bailout;
+ }
+ /*
+ * we would typically expect a layout as below where n would be expected to be
+ * anywhere between 3 to n where n > 3 if we are seeing multiple dependent engine
+ * instances being reset together.
+ * ____________________________________________
+ * | Capture Group |
+ * | ________________________________________ |
+ * | | Capture Group Header: | |
+ * | | - num_captures = 5 | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture1: | |
+ * | | Hdr: GLOBAL, numregs=a | |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... rega | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture2: | |
+ * | | Hdr: CLASS=RENDER/COMPUTE, numregs=b| |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... regb | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture3: | |
+ * | | Hdr: INSTANCE=RCS, numregs=c | |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... regc | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture4: | |
+ * | | Hdr: CLASS=RENDER/COMPUTE, numregs=d| |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... regd | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture5: | |
+ * | | Hdr: INSTANCE=CCS0, numregs=e | |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... rege | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * |__________________________________________|
+ */
+ is_partial = FIELD_GET(GUC_STATE_CAPTURE_GROUP_HEADER_CAPTURE_GROUP_TYPE, ghdr.info);
+ numlists = FIELD_GET(GUC_STATE_CAPTURE_GROUP_HEADER_NUM_CAPTURES, ghdr.info);
+
+ while (numlists--) {
+ if (guc_capture_log_get_data_hdr(guc, buf, &hdr)) {
+ ret = -EIO;
+ break;
+ }
+
+ datatype = FIELD_GET(GUC_STATE_CAPTURE_HEADER_CAPTURE_TYPE, hdr.info);
+ if (datatype > GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE) {
+ /* unknown capture type - skip over to next capture set */
+ numregs = FIELD_GET(GUC_STATE_CAPTURE_HEADER_NUM_MMIO_ENTRIES,
+ hdr.num_mmio_entries);
+ while (numregs--) {
+ if (guc_capture_log_get_register(guc, buf, &tmp)) {
+ ret = -EIO;
+ break;
+ }
+ }
+ continue;
+ } else if (node) {
+ /*
+ * Based on the current capture type and what we have so far,
+ * decide if we should add the current node into the internal
+ * linked list for match-up when xe_devcoredump calls later
+ * (and alloc a blank node for the next set of reglists)
+ * or continue with the same node or clone the current node
+ * but only retain the global or class registers (such as the
+ * case of dependent engine resets).
+ */
+ if (datatype == GUC_STATE_CAPTURE_TYPE_GLOBAL) {
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = NULL;
+ } else if (datatype == GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS &&
+ node->reginfo[GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS].num_regs) {
+ /* Add to list, clone node and duplicate global list */
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = guc_capture_clone_node(guc, node,
+ GCAP_PARSED_REGLIST_INDEX_GLOBAL);
+ } else if (datatype == GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE &&
+ node->reginfo[GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE].num_regs) {
+ /* Add to list, clone node and duplicate global + class lists */
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = guc_capture_clone_node(guc, node,
+ (GCAP_PARSED_REGLIST_INDEX_GLOBAL |
+ GCAP_PARSED_REGLIST_INDEX_ENGCLASS));
+ }
+ }
+
+ if (!node) {
+ node = guc_capture_get_prealloc_node(guc);
+ if (!node) {
+ ret = -ENOMEM;
+ break;
+ }
+ if (datatype != GUC_STATE_CAPTURE_TYPE_GLOBAL)
+ xe_gt_dbg(gt, "Register capture missing global dump: %08x!\n",
+ datatype);
+ }
+ node->is_partial = is_partial;
+ node->reginfo[datatype].vfid = FIELD_GET(GUC_STATE_CAPTURE_HEADER_VFID, hdr.owner);
+ node->source = XE_ENGINE_CAPTURE_SOURCE_GUC;
+ node->type = datatype;
+
+ switch (datatype) {
+ case GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE:
+ node->eng_class = FIELD_GET(GUC_STATE_CAPTURE_HEADER_ENGINE_CLASS,
+ hdr.info);
+ node->eng_inst = FIELD_GET(GUC_STATE_CAPTURE_HEADER_ENGINE_INSTANCE,
+ hdr.info);
+ node->lrca = hdr.lrca;
+ node->guc_id = hdr.guc_id;
+ break;
+ case GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS:
+ node->eng_class = FIELD_GET(GUC_STATE_CAPTURE_HEADER_ENGINE_CLASS,
+ hdr.info);
+ break;
+ default:
+ break;
+ }
+
+ numregs = FIELD_GET(GUC_STATE_CAPTURE_HEADER_NUM_MMIO_ENTRIES,
+ hdr.num_mmio_entries);
+ if (numregs > guc->capture->max_mmio_per_node) {
+ xe_gt_dbg(gt, "Register capture list extraction clipped by prealloc!\n");
+ numregs = guc->capture->max_mmio_per_node;
+ }
+ node->reginfo[datatype].num_regs = numregs;
+ regs = node->reginfo[datatype].regs;
+ i = 0;
+ while (numregs--) {
+ if (guc_capture_log_get_register(guc, buf, &regs[i++])) {
+ ret = -EIO;
+ break;
+ }
+ }
+ }
+
+bailout:
+ if (node) {
+ /* If we have data, add to linked list for match-up when xe_devcoredump calls */
+ for (i = GUC_STATE_CAPTURE_TYPE_GLOBAL; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i) {
+ if (node->reginfo[i].regs) {
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = NULL;
+ break;
+ }
+ }
+ if (node) /* else return it back to cache list */
+ guc_capture_add_node_to_cachelist(guc->capture, node);
+ }
+ return ret;
+}
+
+static int __guc_capture_flushlog_complete(struct xe_guc *guc)
+{
+ u32 action[] = {
+ XE_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE,
+ GUC_LOG_BUFFER_CAPTURE
+ };
+
+ return xe_guc_ct_send_g2h_handler(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+static void __guc_capture_process_output(struct xe_guc *guc)
+{
+ unsigned int buffer_size, read_offset, write_offset, full_count;
+ struct xe_uc *uc = container_of(guc, typeof(*uc), guc);
+ struct guc_log_buffer_state log_buf_state_local;
+ struct __guc_capture_bufstate buf;
+ bool new_overflow;
+ int ret, tmp;
+ u32 log_buf_state_offset;
+ u32 src_data_offset;
+
+ log_buf_state_offset = sizeof(struct guc_log_buffer_state) * GUC_LOG_BUFFER_CAPTURE;
+ src_data_offset = xe_guc_get_log_buffer_offset(&guc->log, GUC_LOG_BUFFER_CAPTURE);
+
+ /*
+ * Make a copy of the state structure, inside GuC log buffer
+ * (which is uncached mapped), on the stack to avoid reading
+ * from it multiple times.
+ */
+ xe_map_memcpy_from(guc_to_xe(guc), &log_buf_state_local, &guc->log.bo->vmap,
+ log_buf_state_offset, sizeof(struct guc_log_buffer_state));
+
+ buffer_size = xe_guc_get_log_buffer_size(&guc->log, GUC_LOG_BUFFER_CAPTURE);
+ read_offset = log_buf_state_local.read_ptr;
+ write_offset = log_buf_state_local.sampled_write_ptr;
+ full_count = FIELD_GET(GUC_LOG_BUFFER_STATE_BUFFER_FULL_CNT, log_buf_state_local.flags);
+
+ /* Bookkeeping stuff */
+ tmp = FIELD_GET(GUC_LOG_BUFFER_STATE_FLUSH_TO_FILE, log_buf_state_local.flags);
+ guc->log.stats[GUC_LOG_BUFFER_CAPTURE].flush += tmp;
+ new_overflow = xe_guc_check_log_buf_overflow(&guc->log, GUC_LOG_BUFFER_CAPTURE,
+ full_count);
+
+ /* Now copy the actual logs. */
+ if (unlikely(new_overflow)) {
+ /* copy the whole buffer in case of overflow */
+ read_offset = 0;
+ write_offset = buffer_size;
+ } else if (unlikely((read_offset > buffer_size) ||
+ (write_offset > buffer_size))) {
+ xe_gt_err(guc_to_gt(guc),
+ "Register capture buffer in invalid state: read = 0x%X, size = 0x%X!\n",
+ read_offset, buffer_size);
+ /* copy whole buffer as offsets are unreliable */
+ read_offset = 0;
+ write_offset = buffer_size;
+ }
+
+ buf.size = buffer_size;
+ buf.rd = read_offset;
+ buf.wr = write_offset;
+ buf.data_offset = src_data_offset;
+
+ if (!xe_guc_read_stopped(guc)) {
+ do {
+ ret = guc_capture_extract_reglists(guc, &buf);
+ if (ret && ret != -ENODATA)
+ xe_gt_dbg(guc_to_gt(guc), "Capture extraction failed:%d\n", ret);
+ } while (ret >= 0);
+ }
+
+ /* Update the state of log buffer err-cap state */
+ xe_map_wr(guc_to_xe(guc), &guc->log.bo->vmap,
+ log_buf_state_offset + offsetof(struct guc_log_buffer_state, read_ptr), u32,
+ write_offset);
+
+ /*
+ * Clear the flush_to_file from local first, the local was loaded by above
+ * xe_map_memcpy_from, then write out the "updated local" through
+ * xe_map_wr()
+ */
+ log_buf_state_local.flags &= ~GUC_LOG_BUFFER_STATE_FLUSH_TO_FILE;
+ xe_map_wr(guc_to_xe(guc), &guc->log.bo->vmap,
+ log_buf_state_offset + offsetof(struct guc_log_buffer_state, flags), u32,
+ log_buf_state_local.flags);
+ __guc_capture_flushlog_complete(guc);
+}
+
+/*
+ * xe_guc_capture_process - Process GuC register captured data
+ * @guc: The GuC object
+ *
+ * When GuC captured data is ready, GuC will send message
+ * XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION to host, this function will be
+ * called to process the data comes with the message.
+ *
+ * Returns: None
+ */
+void xe_guc_capture_process(struct xe_guc *guc)
+{
+ if (guc->capture)
+ __guc_capture_process_output(guc);
+}
+
+static struct __guc_capture_parsed_output *
+guc_capture_alloc_one_node(struct xe_guc *guc)
+{
+ struct drm_device *drm = guc_to_drm(guc);
+ struct __guc_capture_parsed_output *new;
+ int i;
+
+ new = drmm_kzalloc(drm, sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ for (i = 0; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i) {
+ new->reginfo[i].regs = drmm_kzalloc(drm, guc->capture->max_mmio_per_node *
+ sizeof(struct guc_mmio_reg), GFP_KERNEL);
+ if (!new->reginfo[i].regs) {
+ while (i)
+ drmm_kfree(drm, new->reginfo[--i].regs);
+ drmm_kfree(drm, new);
+ return NULL;
+ }
+ }
+ guc_capture_init_node(guc, new);
+
+ return new;
+}
+
+static void
+__guc_capture_create_prealloc_nodes(struct xe_guc *guc)
+{
+ struct __guc_capture_parsed_output *node = NULL;
+ int i;
+
+ for (i = 0; i < PREALLOC_NODES_MAX_COUNT; ++i) {
+ node = guc_capture_alloc_one_node(guc);
+ if (!node) {
+ xe_gt_warn(guc_to_gt(guc), "Register capture pre-alloc-cache failure\n");
+ /* dont free the priors, use what we got and cleanup at shutdown */
+ return;
+ }
+ guc_capture_add_node_to_cachelist(guc->capture, node);
+ }
+}
+
+static int
+guc_get_max_reglist_count(struct xe_guc *guc)
+{
+ int i, j, k, tmp, maxregcount = 0;
+
+ for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) {
+ for (j = 0; j < GUC_STATE_CAPTURE_TYPE_MAX; ++j) {
+ for (k = 0; k < GUC_CAPTURE_LIST_CLASS_MAX; ++k) {
+ const struct __guc_mmio_reg_descr_group *match;
+
+ if (j == GUC_STATE_CAPTURE_TYPE_GLOBAL && k > 0)
+ continue;
+
+ tmp = 0;
+ match = guc_capture_get_one_list(guc->capture->reglists, i, j, k);
+ if (match)
+ tmp = match->num_regs;
+
+ match = guc_capture_get_one_list(guc->capture->extlists, i, j, k);
+ if (match)
+ tmp += match->num_regs;
+
+ if (tmp > maxregcount)
+ maxregcount = tmp;
+ }
+ }
+ }
+ if (!maxregcount)
+ maxregcount = PREALLOC_NODES_DEFAULT_NUMREGS;
+
+ return maxregcount;
+}
+
+static void
+guc_capture_create_prealloc_nodes(struct xe_guc *guc)
+{
+ /* skip if we've already done the pre-alloc */
+ if (guc->capture->max_mmio_per_node)
+ return;
+
+ guc->capture->max_mmio_per_node = guc_get_max_reglist_count(guc);
+ __guc_capture_create_prealloc_nodes(guc);
+}
+
+static void
+read_reg_to_node(struct xe_hw_engine *hwe, const struct __guc_mmio_reg_descr_group *list,
+ struct guc_mmio_reg *regs)
+{
+ int i;
+
+ if (!list || !list->list || list->num_regs == 0)
+ return;
+
+ if (!regs)
+ return;
+
+ for (i = 0; i < list->num_regs; i++) {
+ struct __guc_mmio_reg_descr desc = list->list[i];
+ u32 value;
+
+ if (list->type == GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE) {
+ value = xe_hw_engine_mmio_read32(hwe, desc.reg);
+ } else {
+ if (list->type == GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS &&
+ FIELD_GET(GUC_REGSET_STEERING_NEEDED, desc.flags)) {
+ int group, instance;
+
+ group = FIELD_GET(GUC_REGSET_STEERING_GROUP, desc.flags);
+ instance = FIELD_GET(GUC_REGSET_STEERING_INSTANCE, desc.flags);
+ value = xe_gt_mcr_unicast_read(hwe->gt, XE_REG_MCR(desc.reg.addr),
+ group, instance);
+ } else {
+ value = xe_mmio_read32(&hwe->gt->mmio, desc.reg);
+ }
+ }
+
+ regs[i].value = value;
+ regs[i].offset = desc.reg.addr;
+ regs[i].flags = desc.flags;
+ regs[i].mask = desc.mask;
+ }
+}
+
+/**
+ * xe_engine_manual_capture - Take a manual engine snapshot from engine.
+ * @hwe: Xe HW Engine.
+ * @snapshot: The engine snapshot
+ *
+ * Take engine snapshot from engine read.
+ *
+ * Returns: None
+ */
+void
+xe_engine_manual_capture(struct xe_hw_engine *hwe, struct xe_hw_engine_snapshot *snapshot)
+{
+ struct xe_gt *gt = hwe->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_guc *guc = &gt->uc.guc;
+ struct xe_devcoredump *devcoredump = &xe->devcoredump;
+ enum guc_capture_list_class_type capture_class;
+ const struct __guc_mmio_reg_descr_group *list;
+ struct __guc_capture_parsed_output *new;
+ enum guc_state_capture_type type;
+ u16 guc_id = 0;
+ u32 lrca = 0;
+
+ if (IS_SRIOV_VF(xe))
+ return;
+
+ new = guc_capture_get_prealloc_node(guc);
+ if (!new)
+ return;
+
+ capture_class = xe_engine_class_to_guc_capture_class(hwe->class);
+ for (type = GUC_STATE_CAPTURE_TYPE_GLOBAL; type < GUC_STATE_CAPTURE_TYPE_MAX; type++) {
+ struct gcap_reg_list_info *reginfo = &new->reginfo[type];
+ /*
+ * regsinfo->regs is allocated based on guc->capture->max_mmio_per_node
+ * which is based on the descriptor list driving the population so
+ * should not overflow
+ */
+
+ /* Get register list for the type/class */
+ list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, type,
+ capture_class, false);
+ if (!list) {
+ xe_gt_dbg(gt, "Empty GuC capture register descriptor for %s",
+ hwe->name);
+ continue;
+ }
+
+ read_reg_to_node(hwe, list, reginfo->regs);
+ reginfo->num_regs = list->num_regs;
+
+ /* Capture steering registers for rcs/ccs */
+ if (capture_class == GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE) {
+ list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF,
+ type, capture_class, true);
+ if (list) {
+ read_reg_to_node(hwe, list, &reginfo->regs[reginfo->num_regs]);
+ reginfo->num_regs += list->num_regs;
+ }
+ }
+ }
+
+ if (devcoredump && devcoredump->captured) {
+ struct xe_guc_submit_exec_queue_snapshot *ge = devcoredump->snapshot.ge;
+
+ if (ge) {
+ guc_id = ge->guc.id;
+ if (ge->lrc[0])
+ lrca = ge->lrc[0]->context_desc;
+ }
+ }
+
+ new->eng_class = xe_engine_class_to_guc_class(hwe->class);
+ new->eng_inst = hwe->instance;
+ new->guc_id = guc_id;
+ new->lrca = lrca;
+ new->is_partial = 0;
+ new->locked = 1;
+ new->source = XE_ENGINE_CAPTURE_SOURCE_MANUAL;
+
+ guc_capture_add_node_to_outlist(guc->capture, new);
+ devcoredump->snapshot.matched_node = new;
+}
+
+static struct guc_mmio_reg *
+guc_capture_find_reg(struct gcap_reg_list_info *reginfo, u32 addr, u32 flags)
+{
+ int i;
+
+ if (reginfo && reginfo->num_regs > 0) {
+ struct guc_mmio_reg *regs = reginfo->regs;
+
+ if (regs)
+ for (i = 0; i < reginfo->num_regs; i++)
+ if (regs[i].offset == addr && regs[i].flags == flags)
+ return &regs[i];
+ }
+
+ return NULL;
+}
+
+static void
+snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p,
+ u32 type, const struct __guc_mmio_reg_descr_group *list)
+{
+ struct xe_gt *gt = snapshot->hwe->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_guc *guc = &gt->uc.guc;
+ struct xe_devcoredump *devcoredump = &xe->devcoredump;
+ struct xe_devcoredump_snapshot *devcore_snapshot = &devcoredump->snapshot;
+ struct gcap_reg_list_info *reginfo = NULL;
+ u32 i, last_value = 0;
+ bool is_ext, low32_ready = false;
+
+ if (!list || !list->list || list->num_regs == 0)
+ return;
+ XE_WARN_ON(!devcore_snapshot->matched_node);
+
+ is_ext = list == guc->capture->extlists;
+ reginfo = &devcore_snapshot->matched_node->reginfo[type];
+
+ /*
+ * loop through descriptor first and find the register in the node
+ * this is more scalable for developer maintenance as it will ensure
+ * the printout matched the ordering of the static descriptor
+ * table-of-lists
+ */
+ for (i = 0; i < list->num_regs; i++) {
+ const struct __guc_mmio_reg_descr *reg_desc = &list->list[i];
+ struct guc_mmio_reg *reg;
+ u32 value;
+
+ reg = guc_capture_find_reg(reginfo, reg_desc->reg.addr, reg_desc->flags);
+ if (!reg)
+ continue;
+
+ value = reg->value;
+ switch (reg_desc->data_type) {
+ case REG_64BIT_LOW_DW:
+ last_value = value;
+
+ /*
+ * A 64 bit register define requires 2 consecutive
+ * entries in register list, with low dword first
+ * and hi dword the second, like:
+ * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
+ * { XXX_REG_HI(0), REG_64BIT_HI_DW, 0, 0, "XXX_REG"},
+ *
+ * Incorrect order will trigger XE_WARN.
+ *
+ * Possible double low here, for example:
+ * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
+ * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
+ */
+ XE_WARN_ON(low32_ready);
+ low32_ready = true;
+ /* Low 32 bit dword saved, continue for high 32 bit */
+ break;
+
+ case REG_64BIT_HI_DW: {
+ u64 value_qw = ((u64)value << 32) | last_value;
+
+ /*
+ * Incorrect 64bit register order. Possible missing low.
+ * for example:
+ * { XXX_REG(0), REG_32BIT, 0, 0, NULL},
+ * { XXX_REG_HI(0), REG_64BIT_HI_DW, 0, 0, NULL},
+ */
+ XE_WARN_ON(!low32_ready);
+ low32_ready = false;
+
+ drm_printf(p, "\t%s: 0x%016llx\n", reg_desc->regname, value_qw);
+ break;
+ }
+
+ case REG_32BIT:
+ /*
+ * Incorrect 64bit register order. Possible missing high.
+ * for example:
+ * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
+ * { XXX_REG(0), REG_32BIT, 0, 0, "XXX_REG"},
+ */
+ XE_WARN_ON(low32_ready);
+
+ if (is_ext) {
+ int dss, group, instance;
+
+ group = FIELD_GET(GUC_REGSET_STEERING_GROUP, reg_desc->flags);
+ instance = FIELD_GET(GUC_REGSET_STEERING_INSTANCE, reg_desc->flags);
+ dss = xe_gt_mcr_steering_info_to_dss_id(gt, group, instance);
+
+ drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname, dss, value);
+ } else {
+ drm_printf(p, "\t%s: 0x%08x\n", reg_desc->regname, value);
+ }
+ break;
+ }
+ }
+
+ /*
+ * Incorrect 64bit register order. Possible missing high.
+ * for example:
+ * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
+ * } // <- Register list end
+ */
+ XE_WARN_ON(low32_ready);
+}
+
+/**
+ * xe_engine_snapshot_print - Print out a given Xe HW Engine snapshot.
+ * @snapshot: Xe HW Engine snapshot object.
+ * @p: drm_printer where it will be printed out.
+ *
+ * This function prints out a given Xe HW Engine snapshot object.
+ */
+void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p)
+{
+ const char *grptype[GUC_STATE_CAPTURE_GROUP_TYPE_MAX] = {
+ "full-capture",
+ "partial-capture"
+ };
+ int type;
+ const struct __guc_mmio_reg_descr_group *list;
+ enum guc_capture_list_class_type capture_class;
+
+ struct xe_gt *gt;
+ struct xe_device *xe;
+ struct xe_devcoredump *devcoredump;
+ struct xe_devcoredump_snapshot *devcore_snapshot;
+
+ if (!snapshot)
+ return;
+
+ gt = snapshot->hwe->gt;
+ xe = gt_to_xe(gt);
+ devcoredump = &xe->devcoredump;
+ devcore_snapshot = &devcoredump->snapshot;
+
+ if (!devcore_snapshot->matched_node)
+ return;
+
+ xe_gt_assert(gt, snapshot->hwe);
+
+ capture_class = xe_engine_class_to_guc_capture_class(snapshot->hwe->class);
+
+ drm_printf(p, "%s (physical), logical instance=%d\n",
+ snapshot->name ? snapshot->name : "",
+ snapshot->logical_instance);
+ drm_printf(p, "\tCapture_source: %s\n",
+ devcore_snapshot->matched_node->source == XE_ENGINE_CAPTURE_SOURCE_GUC ?
+ "GuC" : "Manual");
+ drm_printf(p, "\tCoverage: %s\n", grptype[devcore_snapshot->matched_node->is_partial]);
+ drm_printf(p, "\tForcewake: domain 0x%x, ref %d\n",
+ snapshot->forcewake.domain, snapshot->forcewake.ref);
+ drm_printf(p, "\tReserved: %s\n",
+ str_yes_no(snapshot->kernel_reserved));
+
+ for (type = GUC_STATE_CAPTURE_TYPE_GLOBAL; type < GUC_STATE_CAPTURE_TYPE_MAX; type++) {
+ list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, type,
+ capture_class, false);
+ snapshot_print_by_list_order(snapshot, p, type, list);
+ }
+
+ if (capture_class == GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE) {
+ list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF,
+ GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS,
+ capture_class, true);
+ snapshot_print_by_list_order(snapshot, p, GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS,
+ list);
+ }
+
+ drm_puts(p, "\n");
+}
+
+/**
+ * xe_guc_capture_get_matching_and_lock - Matching GuC capture for the queue.
+ * @q: The exec queue object
+ *
+ * Search within the capture outlist for the queue, could be used for check if
+ * GuC capture is ready for the queue.
+ * If found, the locked boolean of the node will be flagged.
+ *
+ * Returns: found guc-capture node ptr else NULL
+ */
+struct __guc_capture_parsed_output *
+xe_guc_capture_get_matching_and_lock(struct xe_exec_queue *q)
+{
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ struct xe_device *xe;
+ u16 guc_class = GUC_LAST_ENGINE_CLASS + 1;
+ struct xe_devcoredump_snapshot *ss;
+
+ if (!q || !q->gt)
+ return NULL;
+
+ xe = gt_to_xe(q->gt);
+ if (xe->wedged.mode >= 2 || !xe_device_uc_enabled(xe) || IS_SRIOV_VF(xe))
+ return NULL;
+
+ ss = &xe->devcoredump.snapshot;
+ if (ss->matched_node && ss->matched_node->source == XE_ENGINE_CAPTURE_SOURCE_GUC)
+ return ss->matched_node;
+
+ /* Find hwe for the queue */
+ for_each_hw_engine(hwe, q->gt, id) {
+ if (hwe != q->hwe)
+ continue;
+ guc_class = xe_engine_class_to_guc_class(hwe->class);
+ break;
+ }
+
+ if (guc_class <= GUC_LAST_ENGINE_CLASS) {
+ struct __guc_capture_parsed_output *n, *ntmp;
+ struct xe_guc *guc = &q->gt->uc.guc;
+ u16 guc_id = q->guc->id;
+ u32 lrca = xe_lrc_ggtt_addr(q->lrc[0]);
+
+ /*
+ * Look for a matching GuC reported error capture node from
+ * the internal output link-list based on engine, guc id and
+ * lrca info.
+ */
+ list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
+ if (n->eng_class == guc_class && n->eng_inst == hwe->instance &&
+ n->guc_id == guc_id && n->lrca == lrca &&
+ n->source == XE_ENGINE_CAPTURE_SOURCE_GUC) {
+ n->locked = 1;
+ return n;
+ }
+ }
+ }
+ return NULL;
+}
+
+/**
+ * xe_engine_snapshot_capture_for_queue - Take snapshot of associated engine
+ * @q: The exec queue object
+ *
+ * Take snapshot of associated HW Engine
+ *
+ * Returns: None.
+ */
+void
+xe_engine_snapshot_capture_for_queue(struct xe_exec_queue *q)
+{
+ struct xe_device *xe = gt_to_xe(q->gt);
+ struct xe_devcoredump *coredump = &xe->devcoredump;
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ u32 adj_logical_mask = q->logical_mask;
+
+ if (IS_SRIOV_VF(xe))
+ return;
+
+ for_each_hw_engine(hwe, q->gt, id) {
+ if (hwe->class != q->hwe->class ||
+ !(BIT(hwe->logical_instance) & adj_logical_mask)) {
+ coredump->snapshot.hwe[id] = NULL;
+ continue;
+ }
+
+ if (!coredump->snapshot.hwe[id]) {
+ coredump->snapshot.hwe[id] =
+ xe_hw_engine_snapshot_capture(hwe, q);
+ } else {
+ struct __guc_capture_parsed_output *new;
+
+ new = xe_guc_capture_get_matching_and_lock(q);
+ if (new) {
+ struct xe_guc *guc = &q->gt->uc.guc;
+
+ /*
+ * If we are in here, it means we found a fresh
+ * GuC-err-capture node for this engine after
+ * previously failing to find a match in the
+ * early part of guc_exec_queue_timedout_job.
+ * Thus we must free the manually captured node
+ */
+ guc_capture_free_outlist_node(guc->capture,
+ coredump->snapshot.matched_node);
+ coredump->snapshot.matched_node = new;
+ }
+ }
+
+ break;
+ }
+}
+
+/*
+ * xe_guc_capture_put_matched_nodes - Cleanup matched nodes
+ * @guc: The GuC object
+ *
+ * Free matched node and all nodes with the equal guc_id from
+ * GuC captured outlist
+ */
+void xe_guc_capture_put_matched_nodes(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_devcoredump *devcoredump = &xe->devcoredump;
+ struct __guc_capture_parsed_output *n = devcoredump->snapshot.matched_node;
+
+ if (n) {
+ guc_capture_remove_stale_matches_from_list(guc->capture, n);
+ guc_capture_free_outlist_node(guc->capture, n);
+ devcoredump->snapshot.matched_node = NULL;
+ }
+}
+
+/*
+ * xe_guc_capture_steered_list_init - Init steering register list
+ * @guc: The GuC object
+ *
+ * Init steering register list for GuC register capture, create pre-alloc node
+ */
+void xe_guc_capture_steered_list_init(struct xe_guc *guc)
+{
+ /*
+ * For certain engine classes, there are slice and subslice
+ * level registers requiring steering. We allocate and populate
+ * these based on hw config and add it as an extension list at
+ * the end of the pre-populated render list.
+ */
+ guc_capture_alloc_steered_lists(guc);
+ check_guc_capture_size(guc);
+ guc_capture_create_prealloc_nodes(guc);
+}
+
+/*
+ * xe_guc_capture_init - Init for GuC register capture
+ * @guc: The GuC object
+ *
+ * Init for GuC register capture, alloc memory for capture data structure.
+ *
+ * Returns: 0 if success.
+ * -ENOMEM if out of memory
+ */
+int xe_guc_capture_init(struct xe_guc *guc)
+{
+ guc->capture = drmm_kzalloc(guc_to_drm(guc), sizeof(*guc->capture), GFP_KERNEL);
+ if (!guc->capture)
+ return -ENOMEM;
+
+ guc->capture->reglists = guc_capture_get_device_reglist(guc_to_xe(guc));
+
+ INIT_LIST_HEAD(&guc->capture->outlist);
+ INIT_LIST_HEAD(&guc->capture->cachelist);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.h b/drivers/gpu/drm/xe/xe_guc_capture.h
new file mode 100644
index 000000000000..20a078dc4b85
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_capture.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021-2024 Intel Corporation
+ */
+
+#ifndef _XE_GUC_CAPTURE_H
+#define _XE_GUC_CAPTURE_H
+
+#include <linux/types.h>
+#include "abi/guc_capture_abi.h"
+#include "xe_guc.h"
+#include "xe_guc_fwif.h"
+
+struct xe_exec_queue;
+struct xe_guc;
+struct xe_hw_engine;
+struct xe_hw_engine_snapshot;
+
+static inline enum guc_capture_list_class_type xe_guc_class_to_capture_class(u16 class)
+{
+ switch (class) {
+ case GUC_RENDER_CLASS:
+ case GUC_COMPUTE_CLASS:
+ return GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE;
+ case GUC_GSC_OTHER_CLASS:
+ return GUC_CAPTURE_LIST_CLASS_GSC_OTHER;
+ case GUC_VIDEO_CLASS:
+ case GUC_VIDEOENHANCE_CLASS:
+ case GUC_BLITTER_CLASS:
+ return class;
+ default:
+ XE_WARN_ON(class);
+ return GUC_CAPTURE_LIST_CLASS_MAX;
+ }
+}
+
+static inline enum guc_capture_list_class_type
+xe_engine_class_to_guc_capture_class(enum xe_engine_class class)
+{
+ return xe_guc_class_to_capture_class(xe_engine_class_to_guc_class(class));
+}
+
+void xe_guc_capture_process(struct xe_guc *guc);
+int xe_guc_capture_getlist(struct xe_guc *guc, u32 owner, u32 type,
+ enum guc_capture_list_class_type capture_class, void **outptr);
+int xe_guc_capture_getlistsize(struct xe_guc *guc, u32 owner, u32 type,
+ enum guc_capture_list_class_type capture_class, size_t *size);
+int xe_guc_capture_getnullheader(struct xe_guc *guc, void **outptr, size_t *size);
+size_t xe_guc_capture_ads_input_worst_size(struct xe_guc *guc);
+const struct __guc_mmio_reg_descr_group *
+xe_guc_capture_get_reg_desc_list(struct xe_gt *gt, u32 owner, u32 type,
+ enum guc_capture_list_class_type capture_class, bool is_ext);
+struct __guc_capture_parsed_output *xe_guc_capture_get_matching_and_lock(struct xe_exec_queue *q);
+void xe_engine_manual_capture(struct xe_hw_engine *hwe, struct xe_hw_engine_snapshot *snapshot);
+void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p);
+void xe_engine_snapshot_capture_for_queue(struct xe_exec_queue *q);
+void xe_guc_capture_steered_list_init(struct xe_guc *guc);
+void xe_guc_capture_put_matched_nodes(struct xe_guc *guc);
+int xe_guc_capture_init(struct xe_guc *guc);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_capture_types.h b/drivers/gpu/drm/xe/xe_guc_capture_types.h
new file mode 100644
index 000000000000..ca2d390ccbee
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_capture_types.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021-2024 Intel Corporation
+ */
+
+#ifndef _XE_GUC_CAPTURE_TYPES_H
+#define _XE_GUC_CAPTURE_TYPES_H
+
+#include <linux/types.h>
+#include "regs/xe_reg_defs.h"
+
+struct xe_guc;
+
+/* data type of the register in register list */
+enum capture_register_data_type {
+ REG_32BIT = 0,
+ REG_64BIT_LOW_DW,
+ REG_64BIT_HI_DW,
+};
+
+/**
+ * struct __guc_mmio_reg_descr - GuC mmio register descriptor
+ *
+ * xe_guc_capture module uses these structures to define a register
+ * (offsets, names, flags,...) that are used at the ADS registration
+ * time as well as during runtime processing and reporting of error-
+ * capture states generated by GuC just prior to engine reset events.
+ */
+struct __guc_mmio_reg_descr {
+ /** @reg: the register */
+ struct xe_reg reg;
+ /**
+ * @data_type: data type of the register
+ * Could be 32 bit, low or hi dword of a 64 bit, see enum
+ * register_data_type
+ */
+ enum capture_register_data_type data_type;
+ /** @flags: Flags for the register */
+ u32 flags;
+ /** @mask: The mask to apply */
+ u32 mask;
+ /** @regname: Name of the register */
+ const char *regname;
+};
+
+/**
+ * struct __guc_mmio_reg_descr_group - The group of register descriptor
+ *
+ * xe_guc_capture module uses these structures to maintain static
+ * tables (per unique platform) that consists of lists of registers
+ * (offsets, names, flags,...) that are used at the ADS registration
+ * time as well as during runtime processing and reporting of error-
+ * capture states generated by GuC just prior to engine reset events.
+ */
+struct __guc_mmio_reg_descr_group {
+ /** @list: The register list */
+ const struct __guc_mmio_reg_descr *list;
+ /** @num_regs: Count of registers in the list */
+ u32 num_regs;
+ /** @owner: PF/VF owner, see enum guc_capture_list_index_type */
+ u32 owner;
+ /** @type: Capture register type, see enum guc_state_capture_type */
+ u32 type;
+ /** @engine: The engine class, see enum guc_capture_list_class_type */
+ u32 engine;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index f24dd5223926..50c8076b5158 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/circ_buf.h>
#include <linux/delay.h>
+#include <linux/fault-inject.h>
#include <kunit/static_stub.h>
@@ -17,6 +18,7 @@
#include "abi/guc_actions_sriov_abi.h"
#include "abi/guc_klvs_abi.h"
#include "xe_bo.h"
+#include "xe_devcoredump.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_gt_pagefault.h"
@@ -25,12 +27,49 @@
#include "xe_gt_sriov_pf_monitor.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
+#include "xe_guc_log.h"
#include "xe_guc_relay.h"
#include "xe_guc_submit.h"
#include "xe_map.h"
#include "xe_pm.h"
#include "xe_trace_guc.h"
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+enum {
+ /* Internal states, not error conditions */
+ CT_DEAD_STATE_REARM, /* 0x0001 */
+ CT_DEAD_STATE_CAPTURE, /* 0x0002 */
+
+ /* Error conditions */
+ CT_DEAD_SETUP, /* 0x0004 */
+ CT_DEAD_H2G_WRITE, /* 0x0008 */
+ CT_DEAD_H2G_HAS_ROOM, /* 0x0010 */
+ CT_DEAD_G2H_READ, /* 0x0020 */
+ CT_DEAD_G2H_RECV, /* 0x0040 */
+ CT_DEAD_G2H_RELEASE, /* 0x0080 */
+ CT_DEAD_DEADLOCK, /* 0x0100 */
+ CT_DEAD_PROCESS_FAILED, /* 0x0200 */
+ CT_DEAD_FAST_G2H, /* 0x0400 */
+ CT_DEAD_PARSE_G2H_RESPONSE, /* 0x0800 */
+ CT_DEAD_PARSE_G2H_UNKNOWN, /* 0x1000 */
+ CT_DEAD_PARSE_G2H_ORIGIN, /* 0x2000 */
+ CT_DEAD_PARSE_G2H_TYPE, /* 0x4000 */
+ CT_DEAD_CRASH, /* 0x8000 */
+};
+
+static void ct_dead_worker_func(struct work_struct *w);
+static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code);
+
+#define CT_DEAD(ct, ctb, reason_code) ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code)
+#else
+#define CT_DEAD(ct, ctb, reason) \
+ do { \
+ struct guc_ctb *_ctb = (ctb); \
+ if (_ctb) \
+ _ctb->info.broken = true; \
+ } while (0)
+#endif
+
/* Used when a CT send wants to block and / or receive data */
struct g2h_fence {
u32 *response_buffer;
@@ -175,14 +214,18 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
- ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", 0);
+ ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM);
if (!ct->g2h_wq)
return -ENOMEM;
spin_lock_init(&ct->fast_lock);
xa_init(&ct->fence_lookup);
INIT_WORK(&ct->g2h_worker, g2h_worker_func);
- INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func);
+ INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func);
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+ spin_lock_init(&ct->dead.lock);
+ INIT_WORK(&ct->dead.worker, ct_dead_worker_func);
+#endif
init_waitqueue_head(&ct->wq);
init_waitqueue_head(&ct->g2h_fence_wq);
@@ -209,6 +252,7 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
ct->state = XE_GUC_CT_STATE_DISABLED;
return 0;
}
+ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */
#define desc_read(xe_, guc_ctb__, field_) \
xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \
@@ -395,6 +439,7 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
+ xe_map_memset(xe, &ct->bo->vmap, 0, 0, ct->bo->size);
guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
@@ -419,10 +464,24 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
if (ct_needs_safe_mode(ct))
ct_enter_safe_mode(ct);
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+ /*
+ * The CT has now been reset so the dumper can be re-armed
+ * after any existing dead state has been dumped.
+ */
+ spin_lock_irq(&ct->dead.lock);
+ if (ct->dead.reason) {
+ ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
+ queue_work(system_unbound_wq, &ct->dead.worker);
+ }
+ spin_unlock_irq(&ct->dead.lock);
+#endif
+
return 0;
err_out:
xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err));
+ CT_DEAD(ct, NULL, SETUP);
return err;
}
@@ -466,6 +525,19 @@ static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
if (cmd_len > h2g->info.space) {
h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
+
+ if (h2g->info.head > h2g->info.size) {
+ struct xe_device *xe = ct_to_xe(ct);
+ u32 desc_status = desc_read(xe, h2g, status);
+
+ desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+
+ xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n",
+ h2g->info.head, h2g->info.size);
+ CT_DEAD(ct, h2g, H2G_HAS_ROOM);
+ return false;
+ }
+
h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
h2g->info.size) -
h2g->info.resv_space;
@@ -521,10 +593,24 @@ static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
{
+ bool bad = false;
+
lockdep_assert_held(&ct->fast_lock);
- xe_gt_assert(ct_to_gt(ct), ct->ctbs.g2h.info.space + g2h_len <=
- ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
- xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding);
+
+ bad = ct->ctbs.g2h.info.space + g2h_len >
+ ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space;
+ bad |= !ct->g2h_outstanding;
+
+ if (bad) {
+ xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n",
+ ct->ctbs.g2h.info.space, g2h_len,
+ ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space,
+ ct->ctbs.g2h.info.space + g2h_len,
+ ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space,
+ ct->g2h_outstanding);
+ CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE);
+ return;
+ }
ct->ctbs.g2h.info.space += g2h_len;
if (!--ct->g2h_outstanding)
@@ -551,12 +637,43 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
u32 full_len;
struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
tail * sizeof(u32));
+ u32 desc_status;
full_len = len + GUC_CTB_HDR_LEN;
lockdep_assert_held(&ct->lock);
xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN);
- xe_gt_assert(gt, tail <= h2g->info.size);
+
+ desc_status = desc_read(xe, h2g, status);
+ if (desc_status) {
+ xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status);
+ goto corrupted;
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ u32 desc_tail = desc_read(xe, h2g, tail);
+ u32 desc_head = desc_read(xe, h2g, head);
+
+ if (tail != desc_tail) {
+ desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH);
+ xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail);
+ goto corrupted;
+ }
+
+ if (tail > h2g->info.size) {
+ desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+ xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n",
+ tail, h2g->info.size);
+ goto corrupted;
+ }
+
+ if (desc_head >= h2g->info.size) {
+ desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+ xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n",
+ desc_head, h2g->info.size);
+ goto corrupted;
+ }
+ }
/* Command will wrap, zero fill (NOPs), return and check credits again */
if (tail + full_len > h2g->info.size) {
@@ -593,7 +710,7 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
--len;
++action;
- /* Write H2G ensuring visable before descriptor update */
+ /* Write H2G ensuring visible before descriptor update */
xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
xe_device_wmb(xe);
@@ -609,6 +726,10 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
desc_read(xe, h2g, head), h2g->info.tail);
return 0;
+
+corrupted:
+ CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE);
+ return -EPIPE;
}
/*
@@ -667,16 +788,12 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
num_g2h = 1;
if (g2h_fence_needs_alloc(g2h_fence)) {
- void *ptr;
-
g2h_fence->seqno = next_ct_seqno(ct, true);
- ptr = xa_store(&ct->fence_lookup,
- g2h_fence->seqno,
- g2h_fence, GFP_ATOMIC);
- if (IS_ERR(ptr)) {
- ret = PTR_ERR(ptr);
+ ret = xa_err(xa_store(&ct->fence_lookup,
+ g2h_fence->seqno, g2h_fence,
+ GFP_ATOMIC));
+ if (ret)
goto out;
- }
}
seqno = g2h_fence->seqno;
@@ -720,7 +837,6 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
{
struct xe_device *xe = ct_to_xe(ct);
struct xe_gt *gt = ct_to_gt(ct);
- struct drm_printer p = xe_gt_info_printer(gt);
unsigned int sleep_period_ms = 1;
int ret;
@@ -773,8 +889,13 @@ try_again:
goto broken;
#undef g2h_avail
- if (dequeue_one_g2h(ct) < 0)
+ ret = dequeue_one_g2h(ct);
+ if (ret < 0) {
+ if (ret != -ECANCELED)
+ xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)",
+ ERR_PTR(ret));
goto broken;
+ }
goto try_again;
}
@@ -783,8 +904,7 @@ try_again:
broken:
xe_gt_err(gt, "No forward process on H2G, reset required\n");
- xe_guc_ct_print(ct, &p, true);
- ct->ctbs.h2g.info.broken = true;
+ CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK);
return -EDEADLK;
}
@@ -852,7 +972,7 @@ static bool retry_failure(struct xe_guc_ct *ct, int ret)
#define ct_alive(ct) \
(xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
!ct->ctbs.g2h.info.broken)
- if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
+ if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
return false;
#undef ct_alive
@@ -879,14 +999,11 @@ retry:
retry_same_fence:
ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
if (unlikely(ret == -ENOMEM)) {
- void *ptr;
-
/* Retry allocation /w GFP_KERNEL */
- ptr = xa_store(&ct->fence_lookup,
- g2h_fence.seqno,
- &g2h_fence, GFP_KERNEL);
- if (IS_ERR(ptr))
- return PTR_ERR(ptr);
+ ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno,
+ &g2h_fence, GFP_KERNEL));
+ if (ret)
+ return ret;
goto retry_same_fence;
} else if (unlikely(ret)) {
@@ -897,22 +1014,40 @@ retry_same_fence:
goto retry_same_fence;
if (!g2h_fence_needs_alloc(&g2h_fence))
- xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
+ xa_erase(&ct->fence_lookup, g2h_fence.seqno);
return ret;
}
ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
if (!ret) {
- xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x",
- g2h_fence.seqno, action[0]);
- xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
+ LNL_FLUSH_WORK(&ct->g2h_worker);
+ if (g2h_fence.done) {
+ xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
+ g2h_fence.seqno, action[0]);
+ ret = 1;
+ }
+ }
+
+ /*
+ * Ensure we serialize with completion side to prevent UAF with fence going out of scope on
+ * the stack, since we have no clue if it will fire after the timeout before we can erase
+ * from the xa. Also we have some dependent loads and stores below for which we need the
+ * correct ordering, and we lack the needed barriers.
+ */
+ mutex_lock(&ct->lock);
+ if (!ret) {
+ xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s",
+ g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done));
+ xa_erase(&ct->fence_lookup, g2h_fence.seqno);
+ mutex_unlock(&ct->lock);
return -ETIME;
}
if (g2h_fence.retry) {
xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n",
action[0], g2h_fence.reason);
+ mutex_unlock(&ct->lock);
goto retry;
}
if (g2h_fence.fail) {
@@ -921,7 +1056,12 @@ retry_same_fence:
ret = -EIO;
}
- return ret > 0 ? response_buffer ? g2h_fence.response_len : g2h_fence.response_data : ret;
+ if (ret > 0)
+ ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data;
+
+ mutex_unlock(&ct->lock);
+
+ return ret;
}
/**
@@ -983,6 +1123,24 @@ static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
return 0;
}
+static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action)
+{
+ struct xe_gt *gt = ct_to_gt(ct);
+
+ if (action == XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED)
+ xe_gt_err(gt, "GuC Crash dump notification\n");
+ else if (action == XE_GUC_ACTION_NOTIFY_EXCEPTION)
+ xe_gt_err(gt, "GuC Exception notification\n");
+ else
+ xe_gt_err(gt, "Unknown GuC crash notification: 0x%04X\n", action);
+
+ CT_DEAD(ct, NULL, CRASH);
+
+ kick_reset(ct);
+
+ return 0;
+}
+
static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
struct xe_gt *gt = ct_to_gt(ct);
@@ -1011,6 +1169,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
else
xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
type, fence);
+ CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
return -EPROTO;
}
@@ -1018,6 +1177,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
g2h_fence = xa_erase(&ct->fence_lookup, fence);
if (unlikely(!g2h_fence)) {
/* Don't tear down channel, as send could've timed out */
+ /* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */
xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence);
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
return 0;
@@ -1062,7 +1222,7 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n",
origin);
- ct->ctbs.g2h.info.broken = true;
+ CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN);
return -EPROTO;
}
@@ -1080,7 +1240,7 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
default:
xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n",
type);
- ct->ctbs.g2h.info.broken = true;
+ CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE);
ret = -EOPNOTSUPP;
}
@@ -1123,6 +1283,8 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
/* Selftest only at the moment */
break;
case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
+ ret = xe_guc_error_capture_handler(guc, payload, adj_len);
+ break;
case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
/* FIXME: Handle this */
break;
@@ -1153,13 +1315,19 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
case GUC_ACTION_GUC2PF_ADVERSE_EVENT:
ret = xe_gt_sriov_pf_monitor_process_guc2pf(gt, hxg, hxg_len);
break;
+ case XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED:
+ case XE_GUC_ACTION_NOTIFY_EXCEPTION:
+ ret = guc_crash_process_msg(ct, action);
+ break;
default:
xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
}
- if (ret)
- xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
- action, ERR_PTR(ret));
+ if (ret) {
+ xe_gt_err(gt, "G2H action %#04x failed (%pe) len %u msg %*ph\n",
+ action, ERR_PTR(ret), hxg_len, (int)sizeof(u32) * hxg_len, hxg);
+ CT_DEAD(ct, NULL, PROCESS_FAILED);
+ }
return 0;
}
@@ -1169,7 +1337,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
struct xe_device *xe = ct_to_xe(ct);
struct xe_gt *gt = ct_to_gt(ct);
struct guc_ctb *g2h = &ct->ctbs.g2h;
- u32 tail, head, len;
+ u32 tail, head, len, desc_status;
s32 avail;
u32 action;
u32 *hxg;
@@ -1188,6 +1356,63 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
xe_gt_assert(gt, xe_guc_ct_enabled(ct));
+ desc_status = desc_read(xe, g2h, status);
+ if (desc_status) {
+ if (desc_status & GUC_CTB_STATUS_DISABLED) {
+ /*
+ * Potentially valid if a CLIENT_RESET request resulted in
+ * contexts/engines being reset. But should never happen as
+ * no contexts should be active when CLIENT_RESET is sent.
+ */
+ xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n");
+ desc_status &= ~GUC_CTB_STATUS_DISABLED;
+ }
+
+ if (desc_status) {
+ xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status);
+ goto corrupted;
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ u32 desc_tail = desc_read(xe, g2h, tail);
+ /*
+ u32 desc_head = desc_read(xe, g2h, head);
+
+ * info.head and desc_head are updated back-to-back at the end of
+ * this function and nowhere else. Hence, they cannot be different
+ * unless two g2h_read calls are running concurrently. Which is not
+ * possible because it is guarded by ct->fast_lock. And yet, some
+ * discrete platforms are regularly hitting this error :(.
+ *
+ * desc_head rolling backwards shouldn't cause any noticeable
+ * problems - just a delay in GuC being allowed to proceed past that
+ * point in the queue. So for now, just disable the error until it
+ * can be root caused.
+ *
+ if (g2h->info.head != desc_head) {
+ desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH);
+ xe_gt_err(gt, "CT read: head was modified %u != %u\n",
+ desc_head, g2h->info.head);
+ goto corrupted;
+ }
+ */
+
+ if (g2h->info.head > g2h->info.size) {
+ desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+ xe_gt_err(gt, "CT read: head out of range: %u vs %u\n",
+ g2h->info.head, g2h->info.size);
+ goto corrupted;
+ }
+
+ if (desc_tail >= g2h->info.size) {
+ desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+ xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n",
+ desc_tail, g2h->info.size);
+ goto corrupted;
+ }
+ }
+
/* Calculate DW available to read */
tail = desc_read(xe, g2h, tail);
avail = tail - g2h->info.head;
@@ -1204,9 +1429,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
if (len > avail) {
xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n",
avail, len);
- g2h->info.broken = true;
-
- return -EPROTO;
+ goto corrupted;
}
head = (g2h->info.head + 1) % g2h->info.size;
@@ -1252,6 +1475,10 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
action, len, g2h->info.head, tail);
return len;
+
+corrupted:
+ CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ);
+ return -EPROTO;
}
static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
@@ -1278,9 +1505,11 @@ static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
xe_gt_warn(gt, "NOT_POSSIBLE");
}
- if (ret)
+ if (ret) {
xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
action, ERR_PTR(ret));
+ CT_DEAD(ct, NULL, FAST_G2H);
+ }
}
/**
@@ -1340,7 +1569,6 @@ static int dequeue_one_g2h(struct xe_guc_ct *ct)
static void receive_g2h(struct xe_guc_ct *ct)
{
- struct xe_gt *gt = ct_to_gt(ct);
bool ongoing;
int ret;
@@ -1377,9 +1605,8 @@ static void receive_g2h(struct xe_guc_ct *ct)
mutex_unlock(&ct->lock);
if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
- struct drm_printer p = xe_gt_info_printer(gt);
-
- xe_guc_ct_print(ct, &p, false);
+ xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret);
+ CT_DEAD(ct, NULL, G2H_RECV);
kick_reset(ct);
}
} while (ret == 1);
@@ -1395,49 +1622,34 @@ static void g2h_worker_func(struct work_struct *w)
receive_g2h(ct);
}
-static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
- struct guc_ctb_snapshot *snapshot,
- bool atomic)
+static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
+ bool want_ctb)
{
- u32 head, tail;
-
- xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
- sizeof(struct guc_ct_buffer_desc));
- memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
+ struct xe_guc_ct_snapshot *snapshot;
- snapshot->cmds = kmalloc_array(ctb->info.size, sizeof(u32),
- atomic ? GFP_ATOMIC : GFP_KERNEL);
+ snapshot = kzalloc(sizeof(*snapshot), atomic ? GFP_ATOMIC : GFP_KERNEL);
+ if (!snapshot)
+ return NULL;
- if (!snapshot->cmds) {
- drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CTB info will be available.\n");
- return;
+ if (ct->bo && want_ctb) {
+ snapshot->ctb_size = ct->bo->size;
+ snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL);
}
- head = snapshot->desc.head;
- tail = snapshot->desc.tail;
-
- if (head != tail) {
- struct iosys_map map =
- IOSYS_MAP_INIT_OFFSET(&ctb->cmds, head * sizeof(u32));
-
- while (head != tail) {
- snapshot->cmds[head] = xe_map_rd(xe, &map, 0, u32);
- ++head;
- if (head == ctb->info.size) {
- head = 0;
- map = ctb->cmds;
- } else {
- iosys_map_incr(&map, sizeof(u32));
- }
- }
- }
+ return snapshot;
+}
+
+static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
+ struct guc_ctb_snapshot *snapshot)
+{
+ xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
+ sizeof(struct guc_ct_buffer_desc));
+ memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
}
static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
struct drm_printer *p)
{
- u32 head, tail;
-
drm_printf(p, "\tsize: %d\n", snapshot->info.size);
drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space);
drm_printf(p, "\thead: %d\n", snapshot->info.head);
@@ -1447,63 +1659,46 @@ static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head);
drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail);
drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status);
+}
- if (!snapshot->cmds)
- return;
+static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic,
+ bool want_ctb)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_guc_ct_snapshot *snapshot;
- head = snapshot->desc.head;
- tail = snapshot->desc.tail;
+ snapshot = guc_ct_snapshot_alloc(ct, atomic, want_ctb);
+ if (!snapshot) {
+ xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n");
+ return NULL;
+ }
- while (head != tail) {
- drm_printf(p, "\tcmd[%d]: 0x%08x\n", head,
- snapshot->cmds[head]);
- ++head;
- if (head == snapshot->info.size)
- head = 0;
+ if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) {
+ snapshot->ct_enabled = true;
+ snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
+ guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, &snapshot->h2g);
+ guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, &snapshot->g2h);
}
-}
-static void guc_ctb_snapshot_free(struct guc_ctb_snapshot *snapshot)
-{
- kfree(snapshot->cmds);
+ if (ct->bo && snapshot->ctb)
+ xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo->vmap, 0, snapshot->ctb_size);
+
+ return snapshot;
}
/**
* xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state.
* @ct: GuC CT object.
- * @atomic: Boolean to indicate if this is called from atomic context like
- * reset or CTB handler or from some regular path like debugfs.
*
* This can be printed out in a later stage like during dev_coredump
- * analysis.
+ * analysis. This is safe to be called during atomic context.
*
* Returns: a GuC CT snapshot object that must be freed by the caller
* by using `xe_guc_ct_snapshot_free`.
*/
-struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct,
- bool atomic)
+struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct)
{
- struct xe_device *xe = ct_to_xe(ct);
- struct xe_guc_ct_snapshot *snapshot;
-
- snapshot = kzalloc(sizeof(*snapshot),
- atomic ? GFP_ATOMIC : GFP_KERNEL);
-
- if (!snapshot) {
- drm_err(&xe->drm, "Skipping CTB snapshot entirely.\n");
- return NULL;
- }
-
- if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) {
- snapshot->ct_enabled = true;
- snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
- guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g,
- &snapshot->h2g, atomic);
- guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h,
- &snapshot->g2h, atomic);
- }
-
- return snapshot;
+ return guc_ct_snapshot_capture(ct, true, true);
}
/**
@@ -1523,11 +1718,14 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
drm_puts(p, "H2G CTB (all sizes in DW):\n");
guc_ctb_snapshot_print(&snapshot->h2g, p);
- drm_puts(p, "\nG2H CTB (all sizes in DW):\n");
+ drm_puts(p, "G2H CTB (all sizes in DW):\n");
guc_ctb_snapshot_print(&snapshot->g2h, p);
-
drm_printf(p, "\tg2h outstanding: %d\n",
snapshot->g2h_outstanding);
+
+ if (snapshot->ctb)
+ xe_print_blob_ascii85(p, "CTB data", '\n',
+ snapshot->ctb, 0, snapshot->ctb_size);
} else {
drm_puts(p, "CT disabled\n");
}
@@ -1545,8 +1743,7 @@ void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
if (!snapshot)
return;
- guc_ctb_snapshot_free(&snapshot->h2g);
- guc_ctb_snapshot_free(&snapshot->g2h);
+ kfree(snapshot->ctb);
kfree(snapshot);
}
@@ -1554,16 +1751,121 @@ void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
* xe_guc_ct_print - GuC CT Print.
* @ct: GuC CT.
* @p: drm_printer where it will be printed out.
- * @atomic: Boolean to indicate if this is called from atomic context like
- * reset or CTB handler or from some regular path like debugfs.
+ * @want_ctb: Should the full CTB content be dumped (vs just the headers)
*
- * This function quickly capture a snapshot and immediately print it out.
+ * This function will quickly capture a snapshot of the CT state
+ * and immediately print it out.
*/
-void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic)
+void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb)
{
struct xe_guc_ct_snapshot *snapshot;
- snapshot = xe_guc_ct_snapshot_capture(ct, atomic);
+ snapshot = guc_ct_snapshot_capture(ct, false, want_ctb);
xe_guc_ct_snapshot_print(snapshot, p);
xe_guc_ct_snapshot_free(snapshot);
}
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code)
+{
+ struct xe_guc_log_snapshot *snapshot_log;
+ struct xe_guc_ct_snapshot *snapshot_ct;
+ struct xe_guc *guc = ct_to_guc(ct);
+ unsigned long flags;
+ bool have_capture;
+
+ if (ctb)
+ ctb->info.broken = true;
+
+ /* Ignore further errors after the first dump until a reset */
+ if (ct->dead.reported)
+ return;
+
+ spin_lock_irqsave(&ct->dead.lock, flags);
+
+ /* And only capture one dump at a time */
+ have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE);
+ ct->dead.reason |= (1 << reason_code) |
+ (1 << CT_DEAD_STATE_CAPTURE);
+
+ spin_unlock_irqrestore(&ct->dead.lock, flags);
+
+ if (have_capture)
+ return;
+
+ snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true);
+ snapshot_ct = xe_guc_ct_snapshot_capture((ct));
+
+ spin_lock_irqsave(&ct->dead.lock, flags);
+
+ if (ct->dead.snapshot_log || ct->dead.snapshot_ct) {
+ xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n");
+ xe_guc_log_snapshot_free(snapshot_log);
+ xe_guc_ct_snapshot_free(snapshot_ct);
+ } else {
+ ct->dead.snapshot_log = snapshot_log;
+ ct->dead.snapshot_ct = snapshot_ct;
+ }
+
+ spin_unlock_irqrestore(&ct->dead.lock, flags);
+
+ queue_work(system_unbound_wq, &(ct)->dead.worker);
+}
+
+static void ct_dead_print(struct xe_dead_ct *dead)
+{
+ struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead);
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
+ static int g_count;
+ struct drm_printer ip = xe_gt_info_printer(gt);
+ struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count);
+
+ if (!dead->reason) {
+ xe_gt_err(gt, "CTB is dead for no reason!?\n");
+ return;
+ }
+
+ drm_printf(&lp, "CTB is dead - reason=0x%X\n", dead->reason);
+
+ /* Can't generate a genuine core dump at this point, so just do the good bits */
+ drm_puts(&lp, "**** Xe Device Coredump ****\n");
+ xe_device_snapshot_print(xe, &lp);
+
+ drm_printf(&lp, "**** GT #%d ****\n", gt->info.id);
+ drm_printf(&lp, "\tTile: %d\n", gt->tile->id);
+
+ drm_puts(&lp, "**** GuC Log ****\n");
+ xe_guc_log_snapshot_print(dead->snapshot_log, &lp);
+
+ drm_puts(&lp, "**** GuC CT ****\n");
+ xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp);
+
+ drm_puts(&lp, "Done.\n");
+}
+
+static void ct_dead_worker_func(struct work_struct *w)
+{
+ struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker);
+
+ if (!ct->dead.reported) {
+ ct->dead.reported = true;
+ ct_dead_print(&ct->dead);
+ }
+
+ spin_lock_irq(&ct->dead.lock);
+
+ xe_guc_log_snapshot_free(ct->dead.snapshot_log);
+ ct->dead.snapshot_log = NULL;
+ xe_guc_ct_snapshot_free(ct->dead.snapshot_ct);
+ ct->dead.snapshot_ct = NULL;
+
+ if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) {
+ /* A reset has occurred so re-arm the error reporting */
+ ct->dead.reason = 0;
+ ct->dead.reported = false;
+ }
+
+ spin_unlock_irq(&ct->dead.lock);
+}
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index 190202fce2d0..82c4ae458dda 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -9,6 +9,7 @@
#include "xe_guc_ct_types.h"
struct drm_printer;
+struct xe_device;
int xe_guc_ct_init(struct xe_guc_ct *ct);
int xe_guc_ct_enable(struct xe_guc_ct *ct);
@@ -16,12 +17,10 @@ void xe_guc_ct_disable(struct xe_guc_ct *ct);
void xe_guc_ct_stop(struct xe_guc_ct *ct);
void xe_guc_ct_fast_path(struct xe_guc_ct *ct);
-struct xe_guc_ct_snapshot *
-xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic);
-void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
- struct drm_printer *p);
+struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct);
+void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, struct drm_printer *p);
void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot);
-void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic);
+void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb);
static inline bool xe_guc_ct_enabled(struct xe_guc_ct *ct)
{
diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h
index 761cb9031298..8e1b9d981d61 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h
@@ -52,8 +52,6 @@ struct guc_ctb {
struct guc_ctb_snapshot {
/** @desc: snapshot of the CTB descriptor */
struct guc_ct_buffer_desc desc;
- /** @cmds: snapshot of the CTB commands */
- u32 *cmds;
/** @info: snapshot of the CTB info */
struct guc_ctb_info info;
};
@@ -70,6 +68,10 @@ struct xe_guc_ct_snapshot {
struct guc_ctb_snapshot g2h;
/** @h2g: H2G CTB snapshot */
struct guc_ctb_snapshot h2g;
+ /** @ctb_size: size of the snapshot of the CTB */
+ size_t ctb_size;
+ /** @ctb: snapshot of the entire CTB */
+ u32 *ctb;
};
/**
@@ -86,6 +88,24 @@ enum xe_guc_ct_state {
XE_GUC_CT_STATE_ENABLED,
};
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+/** struct xe_dead_ct - Information for debugging a dead CT */
+struct xe_dead_ct {
+ /** @lock: protects memory allocation/free operations, and @reason updates */
+ spinlock_t lock;
+ /** @reason: bit mask of CT_DEAD_* reason codes */
+ unsigned int reason;
+ /** @reported: for preventing multiple dumps per error sequence */
+ bool reported;
+ /** @worker: worker thread to get out of interrupt context before dumping */
+ struct work_struct worker;
+ /** snapshot_ct: copy of CT state and CTB content at point of error */
+ struct xe_guc_ct_snapshot *snapshot_ct;
+ /** snapshot_log: copy of GuC log at point of error */
+ struct xe_guc_log_snapshot *snapshot_log;
+};
+#endif
+
/**
* struct xe_guc_ct - GuC command transport (CT) layer
*
@@ -128,6 +148,11 @@ struct xe_guc_ct {
u32 msg[GUC_CTB_MSG_MAX_LEN];
/** @fast_msg: Message buffer */
u32 fast_msg[GUC_CTB_MSG_MAX_LEN];
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+ /** @dead: information for debugging dead CTs */
+ struct xe_dead_ct dead;
+#endif
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_debugfs.c b/drivers/gpu/drm/xe/xe_guc_debugfs.c
index d3822cbea273..995b306aced7 100644
--- a/drivers/gpu/drm/xe/xe_guc_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_guc_debugfs.c
@@ -47,9 +47,23 @@ static int guc_log(struct seq_file *m, void *data)
return 0;
}
+static int guc_ctb(struct seq_file *m, void *data)
+{
+ struct xe_guc *guc = node_to_guc(m->private);
+ struct xe_device *xe = guc_to_xe(guc);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_pm_runtime_get(xe);
+ xe_guc_ct_print(&guc->ct, &p, true);
+ xe_pm_runtime_put(xe);
+
+ return 0;
+}
+
static const struct drm_info_list debugfs_list[] = {
{"guc_info", guc_info, 0},
{"guc_log", guc_log, 0},
+ {"guc_ctb", guc_ctb, 0},
};
void xe_guc_debugfs_register(struct xe_guc *guc, struct dentry *parent)
diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h
index 19ee71aeaf17..057153f89b30 100644
--- a/drivers/gpu/drm/xe/xe_guc_fwif.h
+++ b/drivers/gpu/drm/xe/xe_guc_fwif.h
@@ -8,13 +8,16 @@
#include <linux/bits.h>
+#include "abi/guc_capture_abi.h"
#include "abi/guc_klvs_abi.h"
+#include "xe_hw_engine_types.h"
#define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 4
#define G2H_LEN_DW_DEREGISTER_CONTEXT 3
#define G2H_LEN_DW_TLB_INVALIDATE 3
#define GUC_ID_MAX 65535
+#define GUC_ID_UNKNOWN 0xffffffff
#define GUC_CONTEXT_DISABLE 0
#define GUC_CONTEXT_ENABLE 1
@@ -103,6 +106,7 @@ struct guc_update_exec_queue_policy {
#define GUC_CTL_FEATURE 2
#define GUC_CTL_ENABLE_SLPC BIT(2)
+#define GUC_CTL_ENABLE_LITE_RESTORE BIT(4)
#define GUC_CTL_DISABLE_SCHEDULER BIT(14)
#define GUC_CTL_DEBUG 3
@@ -157,24 +161,6 @@ struct guc_policies {
u32 reserved[4];
} __packed;
-/* GuC MMIO reg state struct */
-struct guc_mmio_reg {
- u32 offset;
- u32 value;
- u32 flags;
- u32 mask;
-#define GUC_REGSET_MASKED BIT(0)
-#define GUC_REGSET_MASKED_WITH_VALUE BIT(2)
-#define GUC_REGSET_RESTORE_ONLY BIT(3)
-} __packed;
-
-/* GuC register sets */
-struct guc_mmio_reg_set {
- u32 address;
- u16 count;
- u16 reserved;
-} __packed;
-
/* Generic GT SysInfo data types */
#define GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED 0
#define GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK 1
@@ -188,12 +174,6 @@ struct guc_gt_system_info {
u32 generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_MAX];
} __packed;
-enum {
- GUC_CAPTURE_LIST_INDEX_PF = 0,
- GUC_CAPTURE_LIST_INDEX_VF = 1,
- GUC_CAPTURE_LIST_INDEX_MAX = 2,
-};
-
/* GuC Additional Data Struct */
struct guc_ads {
struct guc_mmio_reg_set reg_state_list[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
diff --git a/drivers/gpu/drm/xe/xe_guc_klv_helpers.c b/drivers/gpu/drm/xe/xe_guc_klv_helpers.c
index 9d99fe266d97..146a6eda9e06 100644
--- a/drivers/gpu/drm/xe/xe_guc_klv_helpers.c
+++ b/drivers/gpu/drm/xe/xe_guc_klv_helpers.c
@@ -49,6 +49,8 @@ const char *xe_guc_klv_key_to_string(u16 key)
return "begin_db_id";
case GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_KEY:
return "begin_ctx_id";
+ case GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY:
+ return "sched_priority";
/* VF CFG threshold keys */
#define define_threshold_key_to_string_case(TAG, NAME, ...) \
diff --git a/drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h b/drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h
index da0fedbbdbaf..da10cf0389cb 100644
--- a/drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h
+++ b/drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h
@@ -18,6 +18,13 @@
MAKE_GUC_KLV_KEY(CONCATENATE(VF_CFG_THRESHOLD_, TAG))
/**
+ * MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN - Prepare the name of the KLV length constant.
+ * @TAG: unique tag of the GuC threshold KLV key.
+ */
+#define MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) \
+ MAKE_GUC_KLV_LEN(CONCATENATE(VF_CFG_THRESHOLD_, TAG))
+
+/**
* xe_guc_klv_threshold_key_to_index - Find index of the tracked GuC threshold.
* @key: GuC threshold KLV key.
*
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index a37ee3419428..2baa4d95571f 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -5,13 +5,26 @@
#include "xe_guc_log.h"
+#include <linux/fault-inject.h>
+
#include <drm/drm_managed.h>
+#include "regs/xe_guc_regs.h"
#include "xe_bo.h"
+#include "xe_devcoredump.h"
+#include "xe_force_wake.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_map.h"
+#include "xe_mmio.h"
#include "xe_module.h"
+static struct xe_guc *
+log_to_guc(struct xe_guc_log *log)
+{
+ return container_of(log, struct xe_guc, log);
+}
+
static struct xe_gt *
log_to_gt(struct xe_guc_log *log)
{
@@ -49,32 +62,196 @@ static size_t guc_log_size(void)
CAPTURE_BUFFER_SIZE;
}
-void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p)
+#define GUC_LOG_CHUNK_SIZE SZ_2M
+
+static struct xe_guc_log_snapshot *xe_guc_log_snapshot_alloc(struct xe_guc_log *log, bool atomic)
+{
+ struct xe_guc_log_snapshot *snapshot;
+ size_t remain;
+ int i;
+
+ snapshot = kzalloc(sizeof(*snapshot), atomic ? GFP_ATOMIC : GFP_KERNEL);
+ if (!snapshot)
+ return NULL;
+
+ /*
+ * NB: kmalloc has a hard limit well below the maximum GuC log buffer size.
+ * Also, can't use vmalloc as might be called from atomic context. So need
+ * to break the buffer up into smaller chunks that can be allocated.
+ */
+ snapshot->size = log->bo->size;
+ snapshot->num_chunks = DIV_ROUND_UP(snapshot->size, GUC_LOG_CHUNK_SIZE);
+
+ snapshot->copy = kcalloc(snapshot->num_chunks, sizeof(*snapshot->copy),
+ atomic ? GFP_ATOMIC : GFP_KERNEL);
+ if (!snapshot->copy)
+ goto fail_snap;
+
+ remain = snapshot->size;
+ for (i = 0; i < snapshot->num_chunks; i++) {
+ size_t size = min(GUC_LOG_CHUNK_SIZE, remain);
+
+ snapshot->copy[i] = kmalloc(size, atomic ? GFP_ATOMIC : GFP_KERNEL);
+ if (!snapshot->copy[i])
+ goto fail_copy;
+ remain -= size;
+ }
+
+ return snapshot;
+
+fail_copy:
+ for (i = 0; i < snapshot->num_chunks; i++)
+ kfree(snapshot->copy[i]);
+ kfree(snapshot->copy);
+fail_snap:
+ kfree(snapshot);
+ return NULL;
+}
+
+/**
+ * xe_guc_log_snapshot_free - free a previously captured GuC log snapshot
+ * @snapshot: GuC log snapshot structure
+ *
+ * Return: pointer to a newly allocated snapshot object or null if out of memory. Caller is
+ * responsible for calling xe_guc_log_snapshot_free when done with the snapshot.
+ */
+void xe_guc_log_snapshot_free(struct xe_guc_log_snapshot *snapshot)
+{
+ int i;
+
+ if (!snapshot)
+ return;
+
+ if (snapshot->copy) {
+ for (i = 0; i < snapshot->num_chunks; i++)
+ kfree(snapshot->copy[i]);
+ kfree(snapshot->copy);
+ }
+
+ kfree(snapshot);
+}
+
+/**
+ * xe_guc_log_snapshot_capture - create a new snapshot copy the GuC log for later dumping
+ * @log: GuC log structure
+ * @atomic: is the call inside an atomic section of some kind?
+ *
+ * Return: pointer to a newly allocated snapshot object or null if out of memory. Caller is
+ * responsible for calling xe_guc_log_snapshot_free when done with the snapshot.
+ */
+struct xe_guc_log_snapshot *xe_guc_log_snapshot_capture(struct xe_guc_log *log, bool atomic)
{
+ struct xe_guc_log_snapshot *snapshot;
struct xe_device *xe = log_to_xe(log);
- size_t size;
- int i, j;
+ struct xe_guc *guc = log_to_guc(log);
+ struct xe_gt *gt = log_to_gt(log);
+ unsigned int fw_ref;
+ size_t remain;
+ int i;
- xe_assert(xe, log->bo);
+ if (!log->bo) {
+ xe_gt_err(gt, "GuC log buffer not allocated\n");
+ return NULL;
+ }
- size = log->bo->size;
+ snapshot = xe_guc_log_snapshot_alloc(log, atomic);
+ if (!snapshot) {
+ xe_gt_err(gt, "GuC log snapshot not allocated\n");
+ return NULL;
+ }
-#define DW_PER_READ 128
- xe_assert(xe, !(size % (DW_PER_READ * sizeof(u32))));
- for (i = 0; i < size / sizeof(u32); i += DW_PER_READ) {
- u32 read[DW_PER_READ];
+ remain = snapshot->size;
+ for (i = 0; i < snapshot->num_chunks; i++) {
+ size_t size = min(GUC_LOG_CHUNK_SIZE, remain);
- xe_map_memcpy_from(xe, read, &log->bo->vmap, i * sizeof(u32),
- DW_PER_READ * sizeof(u32));
-#define DW_PER_PRINT 4
- for (j = 0; j < DW_PER_READ / DW_PER_PRINT; ++j) {
- u32 *print = read + j * DW_PER_PRINT;
+ xe_map_memcpy_from(xe, snapshot->copy[i], &log->bo->vmap,
+ i * GUC_LOG_CHUNK_SIZE, size);
+ remain -= size;
+ }
- drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
- *(print + 0), *(print + 1),
- *(print + 2), *(print + 3));
- }
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref) {
+ snapshot->stamp = ~0ULL;
+ } else {
+ snapshot->stamp = xe_mmio_read64_2x32(&gt->mmio, GUC_PMTIMESTAMP_LO);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
+ snapshot->ktime = ktime_get_boottime_ns();
+ snapshot->level = log->level;
+ snapshot->ver_found = guc->fw.versions.found[XE_UC_FW_VER_RELEASE];
+ snapshot->ver_want = guc->fw.versions.wanted;
+ snapshot->path = guc->fw.path;
+
+ return snapshot;
+}
+
+/**
+ * xe_guc_log_snapshot_print - dump a previously saved copy of the GuC log to some useful location
+ * @snapshot: a snapshot of the GuC log
+ * @p: the printer object to output to
+ */
+void xe_guc_log_snapshot_print(struct xe_guc_log_snapshot *snapshot, struct drm_printer *p)
+{
+ size_t remain;
+ int i;
+
+ if (!snapshot) {
+ drm_printf(p, "GuC log snapshot not allocated!\n");
+ return;
+ }
+
+ drm_printf(p, "GuC firmware: %s\n", snapshot->path);
+ drm_printf(p, "GuC version: %u.%u.%u (wanted %u.%u.%u)\n",
+ snapshot->ver_found.major, snapshot->ver_found.minor, snapshot->ver_found.patch,
+ snapshot->ver_want.major, snapshot->ver_want.minor, snapshot->ver_want.patch);
+ drm_printf(p, "Kernel timestamp: 0x%08llX [%llu]\n", snapshot->ktime, snapshot->ktime);
+ drm_printf(p, "GuC timestamp: 0x%08llX [%llu]\n", snapshot->stamp, snapshot->stamp);
+ drm_printf(p, "Log level: %u\n", snapshot->level);
+
+ remain = snapshot->size;
+ for (i = 0; i < snapshot->num_chunks; i++) {
+ size_t size = min(GUC_LOG_CHUNK_SIZE, remain);
+ const char *prefix = i ? NULL : "Log data";
+ char suffix = i == snapshot->num_chunks - 1 ? '\n' : 0;
+
+ xe_print_blob_ascii85(p, prefix, suffix, snapshot->copy[i], 0, size);
+ remain -= size;
+ }
+}
+
+/**
+ * xe_guc_log_print_dmesg - dump a copy of the GuC log to dmesg
+ * @log: GuC log structure
+ */
+void xe_guc_log_print_dmesg(struct xe_guc_log *log)
+{
+ struct xe_gt *gt = log_to_gt(log);
+ static int g_count;
+ struct drm_printer ip = xe_gt_info_printer(gt);
+ struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count);
+
+ drm_printf(&lp, "Dumping GuC log for %ps...\n", __builtin_return_address(0));
+
+ xe_guc_log_print(log, &lp);
+
+ drm_printf(&lp, "Done.\n");
+}
+
+/**
+ * xe_guc_log_print - dump a copy of the GuC log to some useful location
+ * @log: GuC log structure
+ * @p: the printer object to output to
+ */
+void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p)
+{
+ struct xe_guc_log_snapshot *snapshot;
+
+ drm_printf(p, "**** GuC Log ****\n");
+
+ snapshot = xe_guc_log_snapshot_capture(log, false);
+ drm_printf(p, "CS reference clock: %u\n", log_to_gt(log)->info.reference_clock);
+ xe_guc_log_snapshot_print(snapshot, p);
+ xe_guc_log_snapshot_free(snapshot);
}
int xe_guc_log_init(struct xe_guc_log *log)
@@ -96,3 +273,105 @@ int xe_guc_log_init(struct xe_guc_log *log)
return 0;
}
+
+ALLOW_ERROR_INJECTION(xe_guc_log_init, ERRNO); /* See xe_pci_probe() */
+
+static u32 xe_guc_log_section_size_crash(struct xe_guc_log *log)
+{
+ return CRASH_BUFFER_SIZE;
+}
+
+static u32 xe_guc_log_section_size_debug(struct xe_guc_log *log)
+{
+ return DEBUG_BUFFER_SIZE;
+}
+
+/**
+ * xe_guc_log_section_size_capture - Get capture buffer size within log sections.
+ * @log: The log object.
+ *
+ * This function will return the capture buffer size within log sections.
+ *
+ * Return: capture buffer size.
+ */
+u32 xe_guc_log_section_size_capture(struct xe_guc_log *log)
+{
+ return CAPTURE_BUFFER_SIZE;
+}
+
+/**
+ * xe_guc_get_log_buffer_size - Get log buffer size for a type.
+ * @log: The log object.
+ * @type: The log buffer type
+ *
+ * Return: buffer size.
+ */
+u32 xe_guc_get_log_buffer_size(struct xe_guc_log *log, enum guc_log_buffer_type type)
+{
+ switch (type) {
+ case GUC_LOG_BUFFER_CRASH_DUMP:
+ return xe_guc_log_section_size_crash(log);
+ case GUC_LOG_BUFFER_DEBUG:
+ return xe_guc_log_section_size_debug(log);
+ case GUC_LOG_BUFFER_CAPTURE:
+ return xe_guc_log_section_size_capture(log);
+ }
+ return 0;
+}
+
+/**
+ * xe_guc_get_log_buffer_offset - Get offset in log buffer for a type.
+ * @log: The log object.
+ * @type: The log buffer type
+ *
+ * This function will return the offset in the log buffer for a type.
+ * Return: buffer offset.
+ */
+u32 xe_guc_get_log_buffer_offset(struct xe_guc_log *log, enum guc_log_buffer_type type)
+{
+ enum guc_log_buffer_type i;
+ u32 offset = PAGE_SIZE;/* for the log_buffer_states */
+
+ for (i = GUC_LOG_BUFFER_CRASH_DUMP; i < GUC_LOG_BUFFER_TYPE_MAX; ++i) {
+ if (i == type)
+ break;
+ offset += xe_guc_get_log_buffer_size(log, i);
+ }
+
+ return offset;
+}
+
+/**
+ * xe_guc_check_log_buf_overflow - Check if log buffer overflowed
+ * @log: The log object.
+ * @type: The log buffer type
+ * @full_cnt: The count of buffer full
+ *
+ * This function will check count of buffer full against previous, mismatch
+ * indicate overflowed.
+ * Update the sampled_overflow counter, if the 4 bit counter overflowed, add
+ * up 16 to correct the value.
+ *
+ * Return: True if overflowed.
+ */
+bool xe_guc_check_log_buf_overflow(struct xe_guc_log *log, enum guc_log_buffer_type type,
+ unsigned int full_cnt)
+{
+ unsigned int prev_full_cnt = log->stats[type].sampled_overflow;
+ bool overflow = false;
+
+ if (full_cnt != prev_full_cnt) {
+ overflow = true;
+
+ log->stats[type].overflow = full_cnt;
+ log->stats[type].sampled_overflow += full_cnt - prev_full_cnt;
+
+ if (full_cnt < prev_full_cnt) {
+ /* buffer_full_cnt is a 4 bit counter */
+ log->stats[type].sampled_overflow += 16;
+ }
+ xe_gt_notice(log_to_gt(log), "log buffer overflow\n");
+ }
+
+ return overflow;
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_log.h b/drivers/gpu/drm/xe/xe_guc_log.h
index 2d25ab28b4b3..5b896f5fafaf 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.h
+++ b/drivers/gpu/drm/xe/xe_guc_log.h
@@ -7,8 +7,10 @@
#define _XE_GUC_LOG_H_
#include "xe_guc_log_types.h"
+#include "abi/guc_log_abi.h"
struct drm_printer;
+struct xe_device;
#if IS_ENABLED(CONFIG_DRM_XE_LARGE_GUC_BUFFER)
#define CRASH_BUFFER_SIZE SZ_1M
@@ -17,7 +19,7 @@ struct drm_printer;
#else
#define CRASH_BUFFER_SIZE SZ_8K
#define DEBUG_BUFFER_SIZE SZ_64K
-#define CAPTURE_BUFFER_SIZE SZ_16K
+#define CAPTURE_BUFFER_SIZE SZ_1M
#endif
/*
* While we're using plain log level in i915, GuC controls are much more...
@@ -38,6 +40,10 @@ struct drm_printer;
int xe_guc_log_init(struct xe_guc_log *log);
void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p);
+void xe_guc_log_print_dmesg(struct xe_guc_log *log);
+struct xe_guc_log_snapshot *xe_guc_log_snapshot_capture(struct xe_guc_log *log, bool atomic);
+void xe_guc_log_snapshot_print(struct xe_guc_log_snapshot *snapshot, struct drm_printer *p);
+void xe_guc_log_snapshot_free(struct xe_guc_log_snapshot *snapshot);
static inline u32
xe_guc_log_get_level(struct xe_guc_log *log)
@@ -45,4 +51,11 @@ xe_guc_log_get_level(struct xe_guc_log *log)
return log->level;
}
+u32 xe_guc_log_section_size_capture(struct xe_guc_log *log);
+u32 xe_guc_get_log_buffer_size(struct xe_guc_log *log, enum guc_log_buffer_type type);
+u32 xe_guc_get_log_buffer_offset(struct xe_guc_log *log, enum guc_log_buffer_type type);
+bool xe_guc_check_log_buf_overflow(struct xe_guc_log *log,
+ enum guc_log_buffer_type type,
+ unsigned int full_cnt);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_log_types.h b/drivers/gpu/drm/xe/xe_guc_log_types.h
index 125080d138a7..b3d5c72ac752 100644
--- a/drivers/gpu/drm/xe/xe_guc_log_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_log_types.h
@@ -7,10 +7,38 @@
#define _XE_GUC_LOG_TYPES_H_
#include <linux/types.h>
+#include "abi/guc_log_abi.h"
+
+#include "xe_uc_fw_types.h"
struct xe_bo;
/**
+ * struct xe_guc_log_snapshot:
+ * Capture of the GuC log plus various state useful for decoding the log
+ */
+struct xe_guc_log_snapshot {
+ /** @size: Size in bytes of the @copy allocation */
+ size_t size;
+ /** @copy: Host memory copy of the log buffer for later dumping, split into chunks */
+ void **copy;
+ /** @num_chunks: Number of chunks within @copy */
+ int num_chunks;
+ /** @ktime: Kernel time the snapshot was taken */
+ u64 ktime;
+ /** @stamp: GuC timestamp at which the snapshot was taken */
+ u64 stamp;
+ /** @level: GuC log verbosity level */
+ u32 level;
+ /** @ver_found: GuC firmware version */
+ struct xe_uc_fw_version ver_found;
+ /** @ver_want: GuC firmware version that driver expected */
+ struct xe_uc_fw_version ver_want;
+ /** @path: Path of GuC firmware blob */
+ const char *path;
+};
+
+/**
* struct xe_guc_log - GuC log
*/
struct xe_guc_log {
@@ -18,6 +46,12 @@ struct xe_guc_log {
u32 level;
/** @bo: XE BO for GuC log */
struct xe_bo *bo;
+ /** @stats: logging related stats */
+ struct {
+ u32 sampled_overflow;
+ u32 overflow;
+ u32 flush;
+ } stats[GUC_LOG_BUFFER_TYPE_MAX];
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 034b29984d5e..df7f130fb663 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -38,6 +38,7 @@
#define FREQ_INFO_REC XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
#define RPE_MASK REG_GENMASK(15, 8)
+#define RPA_MASK REG_GENMASK(31, 16)
#define GT_PERF_STATUS XE_REG(0x1381b4)
#define CAGF_MASK REG_GENMASK(19, 11)
@@ -262,7 +263,7 @@ static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
/* Allow/Disallow punit to process software freq requests */
- xe_mmio_write32(gt, RP_CONTROL, state);
+ xe_mmio_write32(&gt->mmio, RP_CONTROL, state);
}
static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
@@ -274,7 +275,7 @@ static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
/* Req freq is in units of 16.66 Mhz */
rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
- xe_mmio_write32(gt, RPNSWREQ, rpnswreq);
+ xe_mmio_write32(&gt->mmio, RPNSWREQ, rpnswreq);
/* Sleep for a small time to allow pcode to respond */
usleep_range(100, 300);
@@ -328,19 +329,51 @@ static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
freq);
}
+static void mtl_update_rpa_value(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ u32 reg;
+
+ if (xe_gt_is_media_type(gt))
+ reg = xe_mmio_read32(&gt->mmio, MTL_MPA_FREQUENCY);
+ else
+ reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPA_FREQUENCY);
+
+ pc->rpa_freq = decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
+}
+
static void mtl_update_rpe_value(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
u32 reg;
if (xe_gt_is_media_type(gt))
- reg = xe_mmio_read32(gt, MTL_MPE_FREQUENCY);
+ reg = xe_mmio_read32(&gt->mmio, MTL_MPE_FREQUENCY);
else
- reg = xe_mmio_read32(gt, MTL_GT_RPE_FREQUENCY);
+ reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPE_FREQUENCY);
pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
}
+static void tgl_update_rpa_value(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 reg;
+
+ /*
+ * For PVC we still need to use fused RP1 as the approximation for RPe
+ * For other platforms than PVC we get the resolved RPe directly from
+ * PCODE at a different register
+ */
+ if (xe->info.platform == XE_PVC)
+ reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
+ else
+ reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
+
+ pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+}
+
static void tgl_update_rpe_value(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
@@ -353,9 +386,9 @@ static void tgl_update_rpe_value(struct xe_guc_pc *pc)
* PCODE at a different register
*/
if (xe->info.platform == XE_PVC)
- reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
+ reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
else
- reg = xe_mmio_read32(gt, FREQ_INFO_REC);
+ reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
}
@@ -365,10 +398,13 @@ static void pc_update_rp_values(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
struct xe_device *xe = gt_to_xe(gt);
- if (GRAPHICS_VERx100(xe) >= 1270)
+ if (GRAPHICS_VERx100(xe) >= 1270) {
+ mtl_update_rpa_value(pc);
mtl_update_rpe_value(pc);
- else
+ } else {
+ tgl_update_rpa_value(pc);
tgl_update_rpe_value(pc);
+ }
/*
* RPe is decided at runtime by PCODE. In the rare case where that's
@@ -392,10 +428,10 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
/* When in RC6, actual frequency reported will be 0. */
if (GRAPHICS_VERx100(xe) >= 1270) {
- freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
+ freq = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
} else {
- freq = xe_mmio_read32(gt, GT_PERF_STATUS);
+ freq = xe_mmio_read32(&gt->mmio, GT_PERF_STATUS);
freq = REG_FIELD_GET(CAGF_MASK, freq);
}
@@ -415,22 +451,24 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
{
struct xe_gt *gt = pc_to_gt(pc);
- int ret;
+ unsigned int fw_ref;
/*
* GuC SLPC plays with cur freq request when GuCRC is enabled
* Block RC6 for a more reliable read.
*/
- ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (ret)
- return ret;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return -ETIMEDOUT;
+ }
- *freq = xe_mmio_read32(gt, RPNSWREQ);
+ *freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
*freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq);
*freq = decode_freq(*freq);
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
return 0;
}
@@ -446,6 +484,19 @@ u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
}
/**
+ * xe_guc_pc_get_rpa_freq - Get the RPa freq
+ * @pc: The GuC PC
+ *
+ * Returns: RPa freq.
+ */
+u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
+{
+ pc_update_rp_values(pc);
+
+ return pc->rpa_freq;
+}
+
+/**
* xe_guc_pc_get_rpe_freq - Get the RPe freq
* @pc: The GuC PC
*
@@ -479,9 +530,10 @@ u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
*/
int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
{
- struct xe_gt *gt = pc_to_gt(pc);
int ret;
+ xe_device_assert_mem_access(pc_to_xe(pc));
+
mutex_lock(&pc->freq_lock);
if (!pc->freq_ready) {
/* Might be in the middle of a gt reset */
@@ -489,22 +541,12 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
goto out;
}
- /*
- * GuC SLPC plays with min freq request when GuCRC is enabled
- * Block RC6 for a more reliable read.
- */
- ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (ret)
- goto out;
-
ret = pc_action_query_task_state(pc);
if (ret)
- goto fw;
+ goto out;
*freq = pc_get_min_freq(pc);
-fw:
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
out:
mutex_unlock(&pc->freq_lock);
return ret;
@@ -612,10 +654,10 @@ enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
u32 reg, gt_c_state;
if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
- reg = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
+ reg = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
} else {
- reg = xe_mmio_read32(gt, GT_CORE_STATUS);
+ reg = xe_mmio_read32(&gt->mmio, GT_CORE_STATUS);
gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
}
@@ -638,7 +680,7 @@ u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
u32 reg;
- reg = xe_mmio_read32(gt, GT_GFX_RC6);
+ reg = xe_mmio_read32(&gt->mmio, GT_GFX_RC6);
return reg;
}
@@ -652,7 +694,7 @@ u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
u64 reg;
- reg = xe_mmio_read32(gt, MTL_MEDIA_MC6);
+ reg = xe_mmio_read32(&gt->mmio, MTL_MEDIA_MC6);
return reg;
}
@@ -665,9 +707,9 @@ static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
xe_device_assert_mem_access(pc_to_xe(pc));
if (xe_gt_is_media_type(gt))
- reg = xe_mmio_read32(gt, MTL_MEDIAP_STATE_CAP);
+ reg = xe_mmio_read32(&gt->mmio, MTL_MEDIAP_STATE_CAP);
else
- reg = xe_mmio_read32(gt, MTL_RP_STATE_CAP);
+ reg = xe_mmio_read32(&gt->mmio, MTL_RP_STATE_CAP);
pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
@@ -683,9 +725,9 @@ static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
xe_device_assert_mem_access(pc_to_xe(pc));
if (xe->info.platform == XE_PVC)
- reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
+ reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
else
- reg = xe_mmio_read32(gt, RP_STATE_CAP);
+ reg = xe_mmio_read32(&gt->mmio, RP_STATE_CAP);
pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
}
@@ -855,6 +897,7 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
{
struct xe_device *xe = pc_to_xe(pc);
struct xe_gt *gt = pc_to_gt(pc);
+ unsigned int fw_ref;
int ret = 0;
if (xe->info.skip_guc_pc)
@@ -864,13 +907,15 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
if (ret)
return ret;
- ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (ret)
- return ret;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return -ETIMEDOUT;
+ }
xe_gt_idle_disable_c6(gt);
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
return 0;
}
@@ -956,13 +1001,16 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
struct xe_device *xe = pc_to_xe(pc);
struct xe_gt *gt = pc_to_gt(pc);
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
+ unsigned int fw_ref;
int ret;
xe_gt_assert(gt, xe_device_uc_enabled(xe));
- ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (ret)
- return ret;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return -ETIMEDOUT;
+ }
if (xe->info.skip_guc_pc) {
if (xe->info.platform != XE_PVC)
@@ -1005,7 +1053,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL);
out:
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
return ret;
}
@@ -1037,18 +1085,19 @@ static void xe_guc_pc_fini_hw(void *arg)
{
struct xe_guc_pc *pc = arg;
struct xe_device *xe = pc_to_xe(pc);
+ unsigned int fw_ref;
if (xe_device_wedged(xe))
return;
- XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL));
+ fw_ref = xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
xe_guc_pc_gucrc_disable(pc);
XE_WARN_ON(xe_guc_pc_stop(pc));
/* Bind requested freq to mert_freq_cap before unload */
pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq));
- xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
+ xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h
index efda432fadfc..619f59cd633c 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc.h
@@ -21,6 +21,7 @@ int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc);
int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq);
u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc);
+u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc);
int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc_types.h b/drivers/gpu/drm/xe/xe_guc_pc_types.h
index 13810be015db..2978ac9a249b 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc_types.h
@@ -17,6 +17,8 @@ struct xe_guc_pc {
struct xe_bo *bo;
/** @rp0_freq: HW RP0 frequency - The Maximum one */
u32 rp0_freq;
+ /** @rpa_freq: HW RPa frequency - The Achievable one */
+ u32 rpa_freq;
/** @rpe_freq: HW RPe frequency - The Efficient one */
u32 rpe_freq;
/** @rpn_freq: HW RPN frequency - The Minimum one */
diff --git a/drivers/gpu/drm/xe/xe_guc_relay.c b/drivers/gpu/drm/xe/xe_guc_relay.c
index ade6162dc259..8f62de026724 100644
--- a/drivers/gpu/drm/xe/xe_guc_relay.c
+++ b/drivers/gpu/drm/xe/xe_guc_relay.c
@@ -5,6 +5,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
+#include <linux/fault-inject.h>
#include <drm/drm_managed.h>
@@ -355,6 +356,7 @@ int xe_guc_relay_init(struct xe_guc_relay *relay)
return drmm_add_action_or_reset(&xe->drm, __fini_relay, relay);
}
+ALLOW_ERROR_INJECTION(xe_guc_relay_init, ERRNO); /* See xe_pci_probe() */
static u32 to_relay_error(int err)
{
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 80062e1d3f66..913c74d6e2ae 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -27,6 +27,7 @@
#include "xe_gt_clock.h"
#include "xe_gt_printk.h"
#include "xe_guc.h"
+#include "xe_guc_capture.h"
#include "xe_guc_ct.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_id_mgr.h"
@@ -224,80 +225,11 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
EXEC_QUEUE_STATE_BANNED));
}
-#ifdef CONFIG_PROVE_LOCKING
-static int alloc_submit_wq(struct xe_guc *guc)
-{
- int i;
-
- for (i = 0; i < NUM_SUBMIT_WQ; ++i) {
- guc->submission_state.submit_wq_pool[i] =
- alloc_ordered_workqueue("submit_wq", 0);
- if (!guc->submission_state.submit_wq_pool[i])
- goto err_free;
- }
-
- return 0;
-
-err_free:
- while (i)
- destroy_workqueue(guc->submission_state.submit_wq_pool[--i]);
-
- return -ENOMEM;
-}
-
-static void free_submit_wq(struct xe_guc *guc)
-{
- int i;
-
- for (i = 0; i < NUM_SUBMIT_WQ; ++i)
- destroy_workqueue(guc->submission_state.submit_wq_pool[i]);
-}
-
-static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
-{
- int idx = guc->submission_state.submit_wq_idx++ % NUM_SUBMIT_WQ;
-
- return guc->submission_state.submit_wq_pool[idx];
-}
-#else
-static int alloc_submit_wq(struct xe_guc *guc)
-{
- return 0;
-}
-
-static void free_submit_wq(struct xe_guc *guc)
-{
-
-}
-
-static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
-{
- return NULL;
-}
-#endif
-
-static void xe_guc_submit_fini(struct xe_guc *guc)
-{
- struct xe_device *xe = guc_to_xe(guc);
- struct xe_gt *gt = guc_to_gt(guc);
- int ret;
-
- ret = wait_event_timeout(guc->submission_state.fini_wq,
- xa_empty(&guc->submission_state.exec_queue_lookup),
- HZ * 5);
-
- drain_workqueue(xe->destroy_wq);
-
- xe_gt_assert(gt, ret);
-}
-
static void guc_submit_fini(struct drm_device *drm, void *arg)
{
struct xe_guc *guc = arg;
- xe_guc_submit_fini(guc);
xa_destroy(&guc->submission_state.exec_queue_lookup);
- free_submit_wq(guc);
}
static void guc_submit_wedged_fini(void *arg)
@@ -359,10 +291,6 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
if (err)
return err;
- err = alloc_submit_wq(guc);
- if (err)
- return err;
-
gt->exec_queue_ops = &guc_exec_queue_ops;
xa_init(&guc->submission_state.exec_queue_lookup);
@@ -393,7 +321,6 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa
static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
{
int ret;
- void *ptr;
int i;
/*
@@ -413,12 +340,10 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
q->guc->id = ret;
for (i = 0; i < q->width; ++i) {
- ptr = xa_store(&guc->submission_state.exec_queue_lookup,
- q->guc->id + i, q, GFP_NOWAIT);
- if (IS_ERR(ptr)) {
- ret = PTR_ERR(ptr);
+ ret = xa_err(xa_store(&guc->submission_state.exec_queue_lookup,
+ q->guc->id + i, q, GFP_NOWAIT));
+ if (ret)
goto err_release;
- }
}
return 0;
@@ -487,12 +412,11 @@ static const int xe_exec_queue_prio_to_guc[] = {
static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
{
struct exec_queue_policy policy;
- struct xe_device *xe = guc_to_xe(guc);
enum xe_exec_queue_priority prio = q->sched_props.priority;
u32 timeslice_us = q->sched_props.timeslice_us;
u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
- xe_assert(xe, exec_queue_registered(q));
+ xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
__guc_exec_queue_policy_start_klv(&policy, q->guc->id);
__guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]);
@@ -526,12 +450,11 @@ static void __register_mlrc_exec_queue(struct xe_guc *guc,
struct guc_ctxt_registration_info *info)
{
#define MAX_MLRC_REG_SIZE (13 + XE_HW_ENGINE_MAX_INSTANCE * 2)
- struct xe_device *xe = guc_to_xe(guc);
u32 action[MAX_MLRC_REG_SIZE];
int len = 0;
int i;
- xe_assert(xe, xe_exec_queue_is_parallel(q));
+ xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_parallel(q));
action[len++] = XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
action[len++] = info->flags;
@@ -554,7 +477,7 @@ static void __register_mlrc_exec_queue(struct xe_guc *guc,
action[len++] = upper_32_bits(xe_lrc_descriptor(lrc));
}
- xe_assert(xe, len <= MAX_MLRC_REG_SIZE);
+ xe_gt_assert(guc_to_gt(guc), len <= MAX_MLRC_REG_SIZE);
#undef MAX_MLRC_REG_SIZE
xe_guc_ct_send(&guc->ct, action, len, 0, 0);
@@ -588,7 +511,7 @@ static void register_exec_queue(struct xe_exec_queue *q)
struct xe_lrc *lrc = q->lrc[0];
struct guc_ctxt_registration_info info;
- xe_assert(xe, !exec_queue_registered(q));
+ xe_gt_assert(guc_to_gt(guc), !exec_queue_registered(q));
memset(&info, 0, sizeof(info));
info.context_idx = q->guc->id;
@@ -678,7 +601,7 @@ static int wq_noop_append(struct xe_exec_queue *q)
if (wq_wait_for_space(q, wq_space_until_wrap(q)))
return -ENODEV;
- xe_assert(xe, FIELD_FIT(WQ_LEN_MASK, len_dw));
+ xe_gt_assert(guc_to_gt(guc), FIELD_FIT(WQ_LEN_MASK, len_dw));
parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)],
FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
@@ -718,13 +641,13 @@ static void wq_item_append(struct xe_exec_queue *q)
wqi[i++] = lrc->ring.tail / sizeof(u64);
}
- xe_assert(xe, i == wqi_size / sizeof(u32));
+ xe_gt_assert(guc_to_gt(guc), i == wqi_size / sizeof(u32));
iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch,
wq[q->guc->wqi_tail / sizeof(u32)]));
xe_map_memcpy_to(xe, &map, 0, wqi, wqi_size);
q->guc->wqi_tail += wqi_size;
- xe_assert(xe, q->guc->wqi_tail <= WQ_SIZE);
+ xe_gt_assert(guc_to_gt(guc), q->guc->wqi_tail <= WQ_SIZE);
xe_device_wmb(xe);
@@ -736,7 +659,6 @@ static void wq_item_append(struct xe_exec_queue *q)
static void submit_exec_queue(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_lrc *lrc = q->lrc[0];
u32 action[3];
u32 g2h_len = 0;
@@ -744,7 +666,7 @@ static void submit_exec_queue(struct xe_exec_queue *q)
int len = 0;
bool extra_submit = false;
- xe_assert(xe, exec_queue_registered(q));
+ xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
if (xe_exec_queue_is_parallel(q))
wq_item_append(q);
@@ -791,11 +713,11 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
struct xe_sched_job *job = to_xe_sched_job(drm_job);
struct xe_exec_queue *q = job->q;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
+ struct dma_fence *fence = NULL;
bool lr = xe_exec_queue_is_lr(q);
- xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
- exec_queue_banned(q) || exec_queue_suspended(q));
+ xe_gt_assert(guc_to_gt(guc), !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
+ exec_queue_banned(q) || exec_queue_suspended(q));
trace_xe_sched_job_run(job);
@@ -809,25 +731,23 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
if (lr) {
xe_sched_job_set_error(job, -EOPNOTSUPP);
- return NULL;
- } else if (test_and_set_bit(JOB_FLAG_SUBMIT, &job->fence->flags)) {
- return job->fence;
+ dma_fence_put(job->fence); /* Drop ref from xe_sched_job_arm */
} else {
- return dma_fence_get(job->fence);
+ fence = job->fence;
}
+
+ return fence;
}
static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
{
struct xe_sched_job *job = to_xe_sched_job(drm_job);
- xe_exec_queue_update_run_ticks(job->q);
-
trace_xe_sched_job_free(job);
xe_sched_job_put(job);
}
-static int guc_read_stopped(struct xe_guc *guc)
+int xe_guc_read_stopped(struct xe_guc *guc)
{
return atomic_read(&guc->submission_state.stopped);
}
@@ -843,17 +763,19 @@ static void disable_scheduling_deregister(struct xe_guc *guc,
struct xe_exec_queue *q)
{
MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
- struct xe_device *xe = guc_to_xe(guc);
int ret;
set_min_preemption_timeout(guc, q);
smp_rmb();
- ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) ||
- guc_read_stopped(guc), HZ * 5);
+ ret = wait_event_timeout(guc->ct.wq,
+ (!exec_queue_pending_enable(q) &&
+ !exec_queue_pending_disable(q)) ||
+ xe_guc_read_stopped(guc),
+ HZ * 5);
if (!ret) {
struct xe_gpu_scheduler *sched = &q->guc->sched;
- drm_warn(&xe->drm, "Pending enable failed to respond");
+ xe_gt_warn(q->gt, "Pending enable/disable failed to respond\n");
xe_sched_submission_start(sched);
xe_gt_reset_async(q->gt);
xe_sched_tdr_queue_imm(sched);
@@ -897,7 +819,7 @@ static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
*/
void xe_guc_submit_wedge(struct xe_guc *guc)
{
- struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
struct xe_exec_queue *q;
unsigned long index;
int err;
@@ -907,7 +829,8 @@ void xe_guc_submit_wedge(struct xe_guc *guc)
err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev,
guc_submit_wedged_fini, guc);
if (err) {
- drm_err(&xe->drm, "Failed to register xe_guc_submit clean-up on wedged.mode=2. Although device is wedged.\n");
+ xe_gt_err(gt, "Failed to register clean-up on wedged.mode=2; "
+ "Although device is wedged.\n");
return;
}
@@ -939,11 +862,10 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
container_of(w, struct xe_guc_exec_queue, lr_tdr);
struct xe_exec_queue *q = ge->q;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_gpu_scheduler *sched = &ge->sched;
bool wedged;
- xe_assert(xe, xe_exec_queue_is_lr(q));
+ xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q));
trace_xe_exec_queue_lr_cleanup(q);
wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
@@ -975,15 +897,21 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
*/
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_disable(q) ||
- guc_read_stopped(guc), HZ * 5);
+ xe_guc_read_stopped(guc), HZ * 5);
if (!ret) {
- drm_warn(&xe->drm, "Schedule disable failed to respond");
+ xe_gt_warn(q->gt, "Schedule disable failed to respond, guc_id=%d\n",
+ q->guc->id);
+ xe_devcoredump(q, NULL, "Schedule disable failed to respond, guc_id=%d\n",
+ q->guc->id);
xe_sched_submission_start(sched);
xe_gt_reset_async(q->gt);
return;
}
}
+ if (!exec_queue_killed(q) && !xe_lrc_ring_is_idle(q->lrc[0]))
+ xe_devcoredump(q, NULL, "LR job cleanup, guc_id=%d", q->guc->id);
+
xe_sched_submission_start(sched);
}
@@ -992,12 +920,22 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
{
struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q));
- u32 ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]);
- u32 ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
+ u32 ctx_timestamp, ctx_job_timestamp;
u32 timeout_ms = q->sched_props.job_timeout_ms;
u32 diff;
u64 running_time_ms;
+ if (!xe_sched_job_started(job)) {
+ xe_gt_warn(gt, "Check job timeout: seqno=%u, lrc_seqno=%u, guc_id=%d, not started",
+ xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
+ q->guc->id);
+
+ return xe_sched_invalidate_job(job, 2);
+ }
+
+ ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]);
+ ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
+
/*
* Counter wraps at ~223s at the usual 19.2MHz, be paranoid catch
* possible overflows with a high timeout.
@@ -1043,8 +981,8 @@ static void enable_scheduling(struct xe_exec_queue *q)
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_enable(q) ||
- guc_read_stopped(guc), HZ * 5);
- if (!ret || guc_read_stopped(guc)) {
+ xe_guc_read_stopped(guc), HZ * 5);
+ if (!ret || xe_guc_read_stopped(guc)) {
xe_gt_warn(guc_to_gt(guc), "Schedule enable failed to respond");
set_exec_queue_banned(q);
xe_gt_reset_async(q->gt);
@@ -1099,6 +1037,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
struct xe_gpu_scheduler *sched = &q->guc->sched;
struct xe_guc *guc = exec_queue_to_guc(q);
const char *process_name = "no process";
+ struct xe_device *xe = guc_to_xe(guc);
+ unsigned int fw_ref;
int err = -ETIME;
pid_t pid = -1;
int i = 0;
@@ -1106,10 +1046,13 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
/*
* TDR has fired before free job worker. Common if exec queue
- * immediately closed after last fence signaled.
+ * immediately closed after last fence signaled. Add back to pending
+ * list so job can be freed and kick scheduler ensuring free job is not
+ * lost.
*/
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
- guc_exec_queue_free_job(drm_job);
+ xe_sched_add_pending_job(sched, job);
+ xe_sched_submission_start(sched);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -1122,9 +1065,21 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
exec_queue_killed_or_banned_or_wedged(q) ||
exec_queue_destroyed(q);
- /* Job hasn't started, can't be timed out */
- if (!skip_timeout_check && !xe_sched_job_started(job))
- goto rearm;
+ /*
+ * If devcoredump not captured and GuC capture for the job is not ready
+ * do manual capture first and decide later if we need to use it
+ */
+ if (!exec_queue_killed(q) && !xe->devcoredump.captured &&
+ !xe_guc_capture_get_matching_and_lock(q)) {
+ /* take force wake before engine register manual capture */
+ fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
+ xe_gt_info(q->gt, "failed to get forcewake for coredump capture\n");
+
+ xe_engine_snapshot_capture_for_queue(q);
+
+ xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
+ }
/*
* XXX: Sampling timeout doesn't work in wedged mode as we have to
@@ -1148,9 +1103,10 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
* modifying state
*/
ret = wait_event_timeout(guc->ct.wq,
- !exec_queue_pending_enable(q) ||
- guc_read_stopped(guc), HZ * 5);
- if (!ret || guc_read_stopped(guc))
+ (!exec_queue_pending_enable(q) &&
+ !exec_queue_pending_disable(q)) ||
+ xe_guc_read_stopped(guc), HZ * 5);
+ if (!ret || xe_guc_read_stopped(guc))
goto trigger_reset;
/*
@@ -1174,11 +1130,16 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
smp_rmb();
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_disable(q) ||
- guc_read_stopped(guc), HZ * 5);
- if (!ret || guc_read_stopped(guc)) {
+ xe_guc_read_stopped(guc), HZ * 5);
+ if (!ret || xe_guc_read_stopped(guc)) {
trigger_reset:
if (!ret)
- xe_gt_warn(guc_to_gt(guc), "Schedule disable failed to respond");
+ xe_gt_warn(guc_to_gt(guc),
+ "Schedule disable failed to respond, guc_id=%d",
+ q->guc->id);
+ xe_devcoredump(q, job,
+ "Schedule disable failed to respond, guc_id=%d, ret=%d, guc_read=%d",
+ q->guc->id, ret, xe_guc_read_stopped(guc));
set_exec_queue_extra_ref(q);
xe_exec_queue_get(q); /* GT reset owns this */
set_exec_queue_banned(q);
@@ -1208,7 +1169,10 @@ trigger_reset:
trace_xe_sched_job_timedout(job);
if (!exec_queue_killed(q))
- xe_devcoredump(job);
+ xe_devcoredump(q, job,
+ "Timedout job - seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx",
+ xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
+ q->guc->id, q->flags);
/*
* Kernel jobs should never fail, nor should VM jobs if they do
@@ -1262,7 +1226,7 @@ sched_enable:
enable_scheduling(q);
rearm:
/*
- * XXX: Ideally want to adjust timeout based on current exection time
+ * XXX: Ideally want to adjust timeout based on current execution time
* but there is not currently an easy way to do in DRM scheduler. With
* some thought, do this in a follow up.
*/
@@ -1323,9 +1287,8 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
{
struct xe_exec_queue *q = msg->private_data;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
+ xe_gt_assert(guc_to_gt(guc), !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
trace_xe_exec_queue_cleanup_entity(q);
if (exec_queue_registered(q))
@@ -1361,11 +1324,10 @@ static void __suspend_fence_signal(struct xe_exec_queue *q)
static void suspend_fence_signal(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, exec_queue_suspended(q) || exec_queue_killed(q) ||
- guc_read_stopped(guc));
- xe_assert(xe, q->guc->suspend_pending);
+ xe_gt_assert(guc_to_gt(guc), exec_queue_suspended(q) || exec_queue_killed(q) ||
+ xe_guc_read_stopped(guc));
+ xe_gt_assert(guc_to_gt(guc), q->guc->suspend_pending);
__suspend_fence_signal(q);
}
@@ -1377,10 +1339,10 @@ static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) &&
exec_queue_enabled(q)) {
- wait_event(guc->ct.wq, q->guc->resume_time != RESUME_PENDING ||
- guc_read_stopped(guc));
+ wait_event(guc->ct.wq, (q->guc->resume_time != RESUME_PENDING ||
+ xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q));
- if (!guc_read_stopped(guc)) {
+ if (!xe_guc_read_stopped(guc)) {
s64 since_resume_ms =
ktime_ms_delta(ktime_get(),
q->guc->resume_time);
@@ -1461,12 +1423,11 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
{
struct xe_gpu_scheduler *sched;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_guc_exec_queue *ge;
long timeout;
int err, i;
- xe_assert(xe, xe_device_uc_enabled(guc_to_xe(guc)));
+ xe_gt_assert(guc_to_gt(guc), xe_device_uc_enabled(guc_to_xe(guc)));
ge = kzalloc(sizeof(*ge), GFP_KERNEL);
if (!ge)
@@ -1482,8 +1443,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
msecs_to_jiffies(q->sched_props.job_timeout_ms);
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
- get_submit_wq(guc),
- q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
+ NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
timeout, guc_to_gt(guc)->ordered_wq, NULL,
q->name, gt_to_xe(q->gt)->drm.dev);
if (err)
@@ -1505,7 +1465,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
q->entity = &ge->entity;
- if (guc_read_stopped(guc))
+ if (xe_guc_read_stopped(guc))
xe_sched_stop(sched);
mutex_unlock(&guc->submission_state.lock);
@@ -1661,7 +1621,7 @@ static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
ret = wait_event_interruptible_timeout(q->guc->suspend_wait,
!READ_ONCE(q->guc->suspend_pending) ||
exec_queue_killed(q) ||
- guc_read_stopped(guc),
+ xe_guc_read_stopped(guc),
HZ * 5);
if (!ret) {
@@ -1680,9 +1640,8 @@ static void guc_exec_queue_resume(struct xe_exec_queue *q)
struct xe_gpu_scheduler *sched = &q->guc->sched;
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, !q->guc->suspend_pending);
+ xe_gt_assert(guc_to_gt(guc), !q->guc->suspend_pending);
xe_sched_msg_lock(sched);
guc_exec_queue_try_add_msg(q, msg, RESUME);
@@ -1755,7 +1714,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
ban = true;
}
} else if (xe_exec_queue_is_lr(q) &&
- (xe_lrc_ring_head(q->lrc[0]) != xe_lrc_ring_tail(q->lrc[0]))) {
+ !xe_lrc_ring_is_idle(q->lrc[0])) {
ban = true;
}
@@ -1787,21 +1746,25 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc)
void xe_guc_submit_reset_wait(struct xe_guc *guc)
{
wait_event(guc->ct.wq, xe_device_wedged(guc_to_xe(guc)) ||
- !guc_read_stopped(guc));
+ !xe_guc_read_stopped(guc));
}
void xe_guc_submit_stop(struct xe_guc *guc)
{
struct xe_exec_queue *q;
unsigned long index;
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, guc_read_stopped(guc) == 1);
+ xe_gt_assert(guc_to_gt(guc), xe_guc_read_stopped(guc) == 1);
mutex_lock(&guc->submission_state.lock);
- xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ /* Prevent redundant attempts to stop parallel queues */
+ if (q->guc->id != index)
+ continue;
+
guc_exec_queue_stop(guc, q);
+ }
mutex_unlock(&guc->submission_state.lock);
@@ -1833,14 +1796,18 @@ int xe_guc_submit_start(struct xe_guc *guc)
{
struct xe_exec_queue *q;
unsigned long index;
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, guc_read_stopped(guc) == 1);
+ xe_gt_assert(guc_to_gt(guc), xe_guc_read_stopped(guc) == 1);
mutex_lock(&guc->submission_state.lock);
atomic_dec(&guc->submission_state.stopped);
- xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ /* Prevent redundant attempts to start parallel queues */
+ if (q->guc->id != index)
+ continue;
+
guc_exec_queue_start(q);
+ }
mutex_unlock(&guc->submission_state.lock);
wake_up_all(&guc->ct.wq);
@@ -1851,22 +1818,22 @@ int xe_guc_submit_start(struct xe_guc *guc)
static struct xe_exec_queue *
g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
{
- struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
struct xe_exec_queue *q;
if (unlikely(guc_id >= GUC_ID_MAX)) {
- drm_err(&xe->drm, "Invalid guc_id %u", guc_id);
+ xe_gt_err(gt, "Invalid guc_id %u\n", guc_id);
return NULL;
}
q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id);
if (unlikely(!q)) {
- drm_err(&xe->drm, "Not engine present for guc_id %u", guc_id);
+ xe_gt_err(gt, "Not engine present for guc_id %u\n", guc_id);
return NULL;
}
- xe_assert(xe, guc_id >= q->guc->id);
- xe_assert(xe, guc_id < (q->guc->id + q->width));
+ xe_gt_assert(guc_to_gt(guc), guc_id >= q->guc->id);
+ xe_gt_assert(guc_to_gt(guc), guc_id < (q->guc->id + q->width));
return q;
}
@@ -1906,31 +1873,43 @@ static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q,
xe_gt_assert(guc_to_gt(guc), runnable_state == 0);
xe_gt_assert(guc_to_gt(guc), exec_queue_pending_disable(q));
- clear_exec_queue_pending_disable(q);
if (q->guc->suspend_pending) {
suspend_fence_signal(q);
+ clear_exec_queue_pending_disable(q);
} else {
if (exec_queue_banned(q) || check_timeout) {
smp_wmb();
wake_up_all(&guc->ct.wq);
}
- if (!check_timeout)
+ if (!check_timeout && exec_queue_destroyed(q)) {
+ /*
+ * Make sure to clear the pending_disable only
+ * after sampling the destroyed state. We want
+ * to ensure we don't trigger the unregister too
+ * early with something intending to only
+ * disable scheduling. The caller doing the
+ * destroy must wait for an ongoing
+ * pending_disable before marking as destroyed.
+ */
+ clear_exec_queue_pending_disable(q);
deregister_exec_queue(guc, q);
+ } else {
+ clear_exec_queue_pending_disable(q);
+ }
}
}
}
int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
- struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
- u32 guc_id = msg[0];
- u32 runnable_state = msg[1];
+ u32 guc_id, runnable_state;
- if (unlikely(len < 2)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len < 2))
return -EPROTO;
- }
+
+ guc_id = msg[0];
+ runnable_state = msg[1];
q = g2h_exec_queue_lookup(guc, guc_id);
if (unlikely(!q))
@@ -1964,14 +1943,13 @@ static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
- struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
- u32 guc_id = msg[0];
+ u32 guc_id;
- if (unlikely(len < 1)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len < 1))
return -EPROTO;
- }
+
+ guc_id = msg[0];
q = g2h_exec_queue_lookup(guc, guc_id);
if (unlikely(!q))
@@ -1993,14 +1971,13 @@ int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
struct xe_gt *gt = guc_to_gt(guc);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
- u32 guc_id = msg[0];
+ u32 guc_id;
- if (unlikely(len < 1)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len < 1))
return -EPROTO;
- }
+
+ guc_id = msg[0];
q = g2h_exec_queue_lookup(guc, guc_id);
if (unlikely(!q))
@@ -2009,8 +1986,6 @@ int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
xe_gt_info(gt, "Engine reset: engine_class=%s, logical_mask: 0x%x, guc_id=%d",
xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
- /* FIXME: Do error capture, most likely async */
-
trace_xe_exec_queue_reset(q);
/*
@@ -2026,17 +2001,53 @@ int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
return 0;
}
+/*
+ * xe_guc_error_capture_handler - Handler of GuC captured message
+ * @guc: The GuC object
+ * @msg: Point to the message
+ * @len: The message length
+ *
+ * When GuC captured data is ready, GuC will send message
+ * XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION to host, this function will be
+ * called 1st to check status before process the data comes with the message.
+ *
+ * Returns: error code. 0 if success
+ */
+int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ u32 status;
+
+ if (unlikely(len != XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION_DATA_LEN))
+ return -EPROTO;
+
+ status = msg[0] & XE_GUC_STATE_CAPTURE_EVENT_STATUS_MASK;
+ if (status == XE_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE)
+ xe_gt_warn(guc_to_gt(guc), "G2H-Error capture no space");
+
+ xe_guc_capture_process(guc);
+
+ return 0;
+}
+
int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
u32 len)
{
struct xe_gt *gt = guc_to_gt(guc);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
- u32 guc_id = msg[0];
+ u32 guc_id;
- if (unlikely(len < 1)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len < 1))
return -EPROTO;
+
+ guc_id = msg[0];
+
+ if (guc_id == GUC_ID_UNKNOWN) {
+ /*
+ * GuC uses GUC_ID_UNKNOWN if it can not map the CAT fault to any PF/VF
+ * context. In such case only PF will be notified about that fault.
+ */
+ xe_gt_err_ratelimited(gt, "Memory CAT error reported by GuC!\n");
+ return 0;
}
q = g2h_exec_queue_lookup(guc, guc_id);
@@ -2058,24 +2069,22 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
- struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
u8 guc_class, instance;
u32 reason;
- if (unlikely(len != 3)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len != 3))
return -EPROTO;
- }
guc_class = msg[0];
instance = msg[1];
reason = msg[2];
/* Unexpected failure of a hardware feature, log an actual error */
- drm_err(&xe->drm, "GuC engine reset request failed on %d:%d because 0x%08X",
- guc_class, instance, reason);
+ xe_gt_err(gt, "GuC engine reset request failed on %d:%d because 0x%08X",
+ guc_class, instance, reason);
- xe_gt_reset_async(guc_to_gt(guc));
+ xe_gt_reset_async(gt);
return 0;
}
@@ -2240,7 +2249,7 @@ xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
if (!snapshot)
return;
- drm_printf(p, "\nGuC ID: %d\n", snapshot->guc.id);
+ drm_printf(p, "GuC ID: %d\n", snapshot->guc.id);
drm_printf(p, "\tName: %s\n", snapshot->name);
drm_printf(p, "\tClass: %d\n", snapshot->class);
drm_printf(p, "\tLogical mask: 0x%x\n", snapshot->logical_mask);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
index bdf8c9f3d24a..9b71a986c6ca 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -20,12 +20,14 @@ void xe_guc_submit_stop(struct xe_guc *guc);
int xe_guc_submit_start(struct xe_guc *guc);
void xe_guc_submit_wedge(struct xe_guc *guc);
+int xe_guc_read_stopped(struct xe_guc *guc);
int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
u32 len);
int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
+int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len);
struct xe_guc_submit_exec_queue_snapshot *
xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index 69046f698271..83a41ebcdc91 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -58,10 +58,21 @@ struct xe_guc {
struct xe_guc_ads ads;
/** @ct: GuC ct */
struct xe_guc_ct ct;
+ /** @capture: the error-state-capture module's data and objects */
+ struct xe_guc_state_capture *capture;
/** @pc: GuC Power Conservation */
struct xe_guc_pc pc;
/** @dbm: GuC Doorbell Manager */
struct xe_guc_db_mgr dbm;
+
+ /** @g2g: GuC to GuC communication state */
+ struct {
+ /** @g2g.bo: Storage for GuC to GuC communication channels */
+ struct xe_bo *bo;
+ /** @g2g.owned: Is the BO owned by this GT or just mapped in */
+ bool owned;
+ } g2g;
+
/** @submission_state: GuC submission state */
struct {
/** @submission_state.idm: GuC context ID Manager */
@@ -72,18 +83,12 @@ struct xe_guc {
atomic_t stopped;
/** @submission_state.lock: protects submission state */
struct mutex lock;
-#ifdef CONFIG_PROVE_LOCKING
-#define NUM_SUBMIT_WQ 256
- /** @submission_state.submit_wq_pool: submission ordered workqueues pool */
- struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ];
- /** @submission_state.submit_wq_idx: submission ordered workqueue index */
- int submit_wq_idx;
-#endif
/** @submission_state.enabled: submission is enabled */
bool enabled;
/** @submission_state.fini_wq: submit fini wait queue */
wait_queue_head_t fini_wq;
} submission_state;
+
/** @hwconfig: Hardware config state */
struct {
/** @hwconfig.bo: buffer object of the hardware config */
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index 65b2e147c4b9..d765bfd3636b 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -92,7 +92,7 @@ void xe_heci_gsc_fini(struct xe_device *xe)
{
struct xe_heci_gsc *heci_gsc = &xe->heci_gsc;
- if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe))
+ if (!xe->info.has_heci_gscfi && !xe->info.has_heci_cscfi)
return;
if (heci_gsc->adev) {
@@ -177,7 +177,7 @@ void xe_heci_gsc_init(struct xe_device *xe)
const struct heci_gsc_def *def;
int ret;
- if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe))
+ if (!xe->info.has_heci_gscfi && !xe->info.has_heci_cscfi)
return;
heci_gsc->irq = -1;
@@ -222,7 +222,7 @@ void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir)
if ((iir & GSC_IRQ_INTF(1)) == 0)
return;
- if (!HAS_HECI_GSCFI(xe)) {
+ if (!xe->info.has_heci_gscfi) {
drm_warn_once(&xe->drm, "GSC irq: not supported");
return;
}
@@ -242,7 +242,7 @@ void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir)
if ((iir & CSC_IRQ_INTF(1)) == 0)
return;
- if (!HAS_HECI_CSCFI(xe)) {
+ if (!xe->info.has_heci_cscfi) {
drm_warn_once(&xe->drm, "CSC irq: not supported");
return;
}
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
index 2c32dc46f7d4..089834467880 100644
--- a/drivers/gpu/drm/xe/xe_hmm.c
+++ b/drivers/gpu/drm/xe/xe_hmm.c
@@ -159,7 +159,7 @@ void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
* This function allocates the storage of the userptr sg table.
* It is caller's responsibility to free it calling sg_free_table.
*
- * returns: 0 for succuss; negative error no on failure
+ * returns: 0 for success; negative error no on failure
*/
int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
bool is_mm_mmap_locked)
diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
index f5459f97af23..6a846e4cb221 100644
--- a/drivers/gpu/drm/xe/xe_huc.c
+++ b/drivers/gpu/drm/xe/xe_huc.c
@@ -229,7 +229,7 @@ bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type)
{
struct xe_gt *gt = huc_to_gt(huc);
- return xe_mmio_read32(gt, huc_auth_modes[type].reg) & huc_auth_modes[type].val;
+ return xe_mmio_read32(&gt->mmio, huc_auth_modes[type].reg) & huc_auth_modes[type].val;
}
int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type)
@@ -268,7 +268,7 @@ int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type)
goto fail;
}
- ret = xe_mmio_wait32(gt, huc_auth_modes[type].reg, huc_auth_modes[type].val,
+ ret = xe_mmio_wait32(&gt->mmio, huc_auth_modes[type].reg, huc_auth_modes[type].val,
huc_auth_modes[type].val, 100000, NULL, false);
if (ret) {
xe_gt_err(gt, "HuC: firmware not verified: %pe\n", ERR_PTR(ret));
@@ -296,19 +296,19 @@ void xe_huc_sanitize(struct xe_huc *huc)
void xe_huc_print_info(struct xe_huc *huc, struct drm_printer *p)
{
struct xe_gt *gt = huc_to_gt(huc);
- int err;
+ unsigned int fw_ref;
xe_uc_fw_print(&huc->fw, p);
if (!xe_uc_fw_is_enabled(&huc->fw))
return;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
return;
drm_printf(p, "\nHuC status: 0x%08x\n",
- xe_mmio_read32(gt, HUC_KERNEL_LOAD_INFO));
+ xe_mmio_read32(&gt->mmio, HUC_KERNEL_LOAD_INFO));
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index c9c3beb3ce8d..fc447751fe78 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -12,6 +12,7 @@
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
+#include "regs/xe_irq_regs.h"
#include "xe_assert.h"
#include "xe_bo.h"
#include "xe_device.h"
@@ -23,6 +24,7 @@
#include "xe_gt_printk.h"
#include "xe_gt_mcr.h"
#include "xe_gt_topology.h"
+#include "xe_guc_capture.h"
#include "xe_hw_engine_group.h"
#include "xe_hw_fence.h"
#include "xe_irq.h"
@@ -295,7 +297,7 @@ void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe,
reg.addr += hwe->mmio_base;
- xe_mmio_write32(hwe->gt, reg, val);
+ xe_mmio_write32(&hwe->gt->mmio, reg, val);
}
/**
@@ -315,23 +317,26 @@ u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
reg.addr += hwe->mmio_base;
- return xe_mmio_read32(hwe->gt, reg);
+ return xe_mmio_read32(&hwe->gt->mmio, reg);
}
void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
{
u32 ccs_mask =
xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
+ u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
- xe_mmio_write32(hwe->gt, RCU_MODE,
+ xe_mmio_write32(&hwe->gt->mmio, RCU_MODE,
_MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
xe_bo_ggtt_addr(hwe->hwsp));
- xe_hw_engine_mmio_write32(hwe, RING_MODE(0),
- _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
+
+ if (xe_device_has_msix(gt_to_xe(hwe->gt)))
+ ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
+ xe_hw_engine_mmio_write32(hwe, RING_MODE(0), ring_mode);
xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
_MASKED_BIT_DISABLE(STOP_RING));
xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
@@ -354,7 +359,7 @@ static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_gt *gt,
hwe->class != XE_ENGINE_CLASS_RENDER)
return false;
- return xe_mmio_read32(hwe->gt, XEHP_FUSE4) & CFEG_WMTP_DISABLE;
+ return xe_mmio_read32(&hwe->gt->mmio, XEHP_FUSE4) & CFEG_WMTP_DISABLE;
}
void
@@ -417,7 +422,7 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
* Bspec: 72161
*/
const u8 mocs_write_idx = gt->mocs.uc_index;
- const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE &&
+ const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE && IS_DGFX(xe) &&
(GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
gt->mocs.wb_index : gt->mocs.uc_index;
u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
@@ -460,6 +465,30 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
xe_rtp_process_to_sr(&ctx, engine_entries, &hwe->reg_sr);
}
+static const struct engine_info *find_engine_info(enum xe_engine_class class, int instance)
+{
+ const struct engine_info *info;
+ enum xe_hw_engine_id id;
+
+ for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
+ info = &engine_infos[id];
+ if (info->class == class && info->instance == instance)
+ return info;
+ }
+
+ return NULL;
+}
+
+static u16 get_msix_irq_offset(struct xe_gt *gt, enum xe_engine_class class)
+{
+ /* For MSI-X, hw engines report to offset of engine instance zero */
+ const struct engine_info *info = find_engine_info(class, 0);
+
+ xe_gt_assert(gt, info);
+
+ return info ? info->irq_offset : 0;
+}
+
static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
enum xe_hw_engine_id id)
{
@@ -479,7 +508,9 @@ static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
hwe->class = info->class;
hwe->instance = info->instance;
hwe->mmio_base = info->mmio_base;
- hwe->irq_offset = info->irq_offset;
+ hwe->irq_offset = xe_device_has_msix(gt_to_xe(gt)) ?
+ get_msix_irq_offset(gt, info->class) :
+ info->irq_offset;
hwe->domain = info->domain;
hwe->name = info->name;
hwe->fence_irq = &gt->fence_irq[info->class];
@@ -546,7 +577,6 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
xe_gt_assert(gt, gt->info.engine_mask & BIT(id));
xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
- xe_reg_sr_apply_whitelist(hwe);
hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
@@ -612,7 +642,7 @@ static void read_media_fuses(struct xe_gt *gt)
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
- media_fuse = xe_mmio_read32(gt, GT_VEBOX_VDBOX_DISABLE);
+ media_fuse = xe_mmio_read32(&gt->mmio, GT_VEBOX_VDBOX_DISABLE);
/*
* Pre-Xe_HP platforms had register bits representing absent engines,
@@ -657,7 +687,7 @@ static void read_copy_fuses(struct xe_gt *gt)
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
- bcs_mask = xe_mmio_read32(gt, MIRROR_FUSE3);
+ bcs_mask = xe_mmio_read32(&gt->mmio, MIRROR_FUSE3);
bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask);
/* BCS0 is always present; only BCS1-BCS8 may be fused off */
@@ -704,7 +734,7 @@ static void read_compute_fuses_from_reg(struct xe_gt *gt)
struct xe_device *xe = gt_to_xe(gt);
u32 ccs_mask;
- ccs_mask = xe_mmio_read32(gt, XEHP_FUSE4);
+ ccs_mask = xe_mmio_read32(&gt->mmio, XEHP_FUSE4);
ccs_mask = REG_FIELD_GET(CCS_EN_MASK, ccs_mask);
for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
@@ -742,10 +772,10 @@ static void check_gsc_availability(struct xe_gt *gt)
gt->info.engine_mask &= ~BIT(XE_HW_ENGINE_GSCCS0);
/* interrupts where previously enabled, so turn them off */
- xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, 0);
- xe_mmio_write32(gt, GUNIT_GSC_INTR_MASK, ~0);
+ xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_ENABLE, 0);
+ xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_MASK, ~0);
- drm_info(&xe->drm, "gsccs disabled due to lack of FW\n");
+ drm_dbg(&xe->drm, "GSC FW not used, disabling gsccs\n");
}
}
@@ -798,60 +828,10 @@ void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
xe_hw_fence_irq_run(hwe->fence_irq);
}
-static bool
-is_slice_common_per_gslice(struct xe_device *xe)
-{
- return GRAPHICS_VERx100(xe) >= 1255;
-}
-
-static void
-xe_hw_engine_snapshot_instdone_capture(struct xe_hw_engine *hwe,
- struct xe_hw_engine_snapshot *snapshot)
-{
- struct xe_gt *gt = hwe->gt;
- struct xe_device *xe = gt_to_xe(gt);
- unsigned int dss;
- u16 group, instance;
-
- snapshot->reg.instdone.ring = xe_hw_engine_mmio_read32(hwe, RING_INSTDONE(0));
-
- if (snapshot->hwe->class != XE_ENGINE_CLASS_RENDER)
- return;
-
- if (is_slice_common_per_gslice(xe) == false) {
- snapshot->reg.instdone.slice_common[0] =
- xe_mmio_read32(gt, SC_INSTDONE);
- snapshot->reg.instdone.slice_common_extra[0] =
- xe_mmio_read32(gt, SC_INSTDONE_EXTRA);
- snapshot->reg.instdone.slice_common_extra2[0] =
- xe_mmio_read32(gt, SC_INSTDONE_EXTRA2);
- } else {
- for_each_geometry_dss(dss, gt, group, instance) {
- snapshot->reg.instdone.slice_common[dss] =
- xe_gt_mcr_unicast_read(gt, XEHPG_SC_INSTDONE, group, instance);
- snapshot->reg.instdone.slice_common_extra[dss] =
- xe_gt_mcr_unicast_read(gt, XEHPG_SC_INSTDONE_EXTRA, group, instance);
- snapshot->reg.instdone.slice_common_extra2[dss] =
- xe_gt_mcr_unicast_read(gt, XEHPG_SC_INSTDONE_EXTRA2, group, instance);
- }
- }
-
- for_each_geometry_dss(dss, gt, group, instance) {
- snapshot->reg.instdone.sampler[dss] =
- xe_gt_mcr_unicast_read(gt, SAMPLER_INSTDONE, group, instance);
- snapshot->reg.instdone.row[dss] =
- xe_gt_mcr_unicast_read(gt, ROW_INSTDONE, group, instance);
-
- if (GRAPHICS_VERx100(xe) >= 1255)
- snapshot->reg.instdone.geom_svg[dss] =
- xe_gt_mcr_unicast_read(gt, XEHPG_INSTDONE_GEOM_SVGUNIT,
- group, instance);
- }
-}
-
/**
* xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine.
* @hwe: Xe HW Engine.
+ * @q: The exec queue object.
*
* This can be printed out in a later stage like during dev_coredump
* analysis.
@@ -860,11 +840,10 @@ xe_hw_engine_snapshot_instdone_capture(struct xe_hw_engine *hwe,
* caller, using `xe_hw_engine_snapshot_free`.
*/
struct xe_hw_engine_snapshot *
-xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
+xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_exec_queue *q)
{
struct xe_hw_engine_snapshot *snapshot;
- size_t len;
- u64 val;
+ struct __guc_capture_parsed_output *node;
if (!xe_hw_engine_is_valid(hwe))
return NULL;
@@ -874,28 +853,6 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
if (!snapshot)
return NULL;
- /* Because XE_MAX_DSS_FUSE_BITS is defined in xe_gt_types.h and it
- * includes xe_hw_engine_types.h the length of this 3 registers can't be
- * set in struct xe_hw_engine_snapshot, so here doing additional
- * allocations.
- */
- len = (XE_MAX_DSS_FUSE_BITS * sizeof(u32));
- snapshot->reg.instdone.slice_common = kzalloc(len, GFP_ATOMIC);
- snapshot->reg.instdone.slice_common_extra = kzalloc(len, GFP_ATOMIC);
- snapshot->reg.instdone.slice_common_extra2 = kzalloc(len, GFP_ATOMIC);
- snapshot->reg.instdone.sampler = kzalloc(len, GFP_ATOMIC);
- snapshot->reg.instdone.row = kzalloc(len, GFP_ATOMIC);
- snapshot->reg.instdone.geom_svg = kzalloc(len, GFP_ATOMIC);
- if (!snapshot->reg.instdone.slice_common ||
- !snapshot->reg.instdone.slice_common_extra ||
- !snapshot->reg.instdone.slice_common_extra2 ||
- !snapshot->reg.instdone.sampler ||
- !snapshot->reg.instdone.row ||
- !snapshot->reg.instdone.geom_svg) {
- xe_hw_engine_snapshot_free(snapshot);
- return NULL;
- }
-
snapshot->name = kstrdup(hwe->name, GFP_ATOMIC);
snapshot->hwe = hwe;
snapshot->logical_instance = hwe->logical_instance;
@@ -903,157 +860,30 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
snapshot->forcewake.ref = xe_force_wake_ref(gt_to_fw(hwe->gt),
hwe->domain);
snapshot->mmio_base = hwe->mmio_base;
+ snapshot->kernel_reserved = xe_hw_engine_is_reserved(hwe);
/* no more VF accessible data below this point */
if (IS_SRIOV_VF(gt_to_xe(hwe->gt)))
return snapshot;
- snapshot->reg.ring_execlist_status =
- xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0));
- val = xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0));
- snapshot->reg.ring_execlist_status |= val << 32;
-
- snapshot->reg.ring_execlist_sq_contents =
- xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_LO(0));
- val = xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_HI(0));
- snapshot->reg.ring_execlist_sq_contents |= val << 32;
-
- snapshot->reg.ring_acthd = xe_hw_engine_mmio_read32(hwe, RING_ACTHD(0));
- val = xe_hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0));
- snapshot->reg.ring_acthd |= val << 32;
-
- snapshot->reg.ring_bbaddr = xe_hw_engine_mmio_read32(hwe, RING_BBADDR(0));
- val = xe_hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0));
- snapshot->reg.ring_bbaddr |= val << 32;
-
- snapshot->reg.ring_dma_fadd =
- xe_hw_engine_mmio_read32(hwe, RING_DMA_FADD(0));
- val = xe_hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0));
- snapshot->reg.ring_dma_fadd |= val << 32;
-
- snapshot->reg.ring_hwstam = xe_hw_engine_mmio_read32(hwe, RING_HWSTAM(0));
- snapshot->reg.ring_hws_pga = xe_hw_engine_mmio_read32(hwe, RING_HWS_PGA(0));
- snapshot->reg.ring_start = xe_hw_engine_mmio_read32(hwe, RING_START(0));
- if (GRAPHICS_VERx100(hwe->gt->tile->xe) >= 2000) {
- val = xe_hw_engine_mmio_read32(hwe, RING_START_UDW(0));
- snapshot->reg.ring_start |= val << 32;
- }
- if (xe_gt_has_indirect_ring_state(hwe->gt)) {
- snapshot->reg.indirect_ring_state =
- xe_hw_engine_mmio_read32(hwe, INDIRECT_RING_STATE(0));
- }
-
- snapshot->reg.ring_head =
- xe_hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR;
- snapshot->reg.ring_tail =
- xe_hw_engine_mmio_read32(hwe, RING_TAIL(0)) & TAIL_ADDR;
- snapshot->reg.ring_ctl = xe_hw_engine_mmio_read32(hwe, RING_CTL(0));
- snapshot->reg.ring_mi_mode =
- xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
- snapshot->reg.ring_mode = xe_hw_engine_mmio_read32(hwe, RING_MODE(0));
- snapshot->reg.ring_imr = xe_hw_engine_mmio_read32(hwe, RING_IMR(0));
- snapshot->reg.ring_esr = xe_hw_engine_mmio_read32(hwe, RING_ESR(0));
- snapshot->reg.ring_emr = xe_hw_engine_mmio_read32(hwe, RING_EMR(0));
- snapshot->reg.ring_eir = xe_hw_engine_mmio_read32(hwe, RING_EIR(0));
- snapshot->reg.ipehr = xe_hw_engine_mmio_read32(hwe, RING_IPEHR(0));
- xe_hw_engine_snapshot_instdone_capture(hwe, snapshot);
-
- if (snapshot->hwe->class == XE_ENGINE_CLASS_COMPUTE)
- snapshot->reg.rcu_mode = xe_mmio_read32(hwe->gt, RCU_MODE);
-
- return snapshot;
-}
-
-static void
-xe_hw_engine_snapshot_instdone_print(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p)
-{
- struct xe_gt *gt = snapshot->hwe->gt;
- struct xe_device *xe = gt_to_xe(gt);
- u16 group, instance;
- unsigned int dss;
-
- drm_printf(p, "\tRING_INSTDONE: 0x%08x\n", snapshot->reg.instdone.ring);
-
- if (snapshot->hwe->class != XE_ENGINE_CLASS_RENDER)
- return;
+ if (q) {
+ /* If got guc capture, set source to GuC */
+ node = xe_guc_capture_get_matching_and_lock(q);
+ if (node) {
+ struct xe_device *xe = gt_to_xe(hwe->gt);
+ struct xe_devcoredump *coredump = &xe->devcoredump;
- if (is_slice_common_per_gslice(xe) == false) {
- drm_printf(p, "\tSC_INSTDONE[0]: 0x%08x\n",
- snapshot->reg.instdone.slice_common[0]);
- drm_printf(p, "\tSC_INSTDONE_EXTRA[0]: 0x%08x\n",
- snapshot->reg.instdone.slice_common_extra[0]);
- drm_printf(p, "\tSC_INSTDONE_EXTRA2[0]: 0x%08x\n",
- snapshot->reg.instdone.slice_common_extra2[0]);
- } else {
- for_each_geometry_dss(dss, gt, group, instance) {
- drm_printf(p, "\tSC_INSTDONE[%u]: 0x%08x\n", dss,
- snapshot->reg.instdone.slice_common[dss]);
- drm_printf(p, "\tSC_INSTDONE_EXTRA[%u]: 0x%08x\n", dss,
- snapshot->reg.instdone.slice_common_extra[dss]);
- drm_printf(p, "\tSC_INSTDONE_EXTRA2[%u]: 0x%08x\n", dss,
- snapshot->reg.instdone.slice_common_extra2[dss]);
+ coredump->snapshot.matched_node = node;
+ xe_gt_dbg(hwe->gt, "Found and locked GuC-err-capture node");
+ return snapshot;
}
}
- for_each_geometry_dss(dss, gt, group, instance) {
- drm_printf(p, "\tSAMPLER_INSTDONE[%u]: 0x%08x\n", dss,
- snapshot->reg.instdone.sampler[dss]);
- drm_printf(p, "\tROW_INSTDONE[%u]: 0x%08x\n", dss,
- snapshot->reg.instdone.row[dss]);
-
- if (GRAPHICS_VERx100(xe) >= 1255)
- drm_printf(p, "\tINSTDONE_GEOM_SVGUNIT[%u]: 0x%08x\n",
- dss, snapshot->reg.instdone.geom_svg[dss]);
- }
-}
+ /* otherwise, do manual capture */
+ xe_engine_manual_capture(hwe, snapshot);
+ xe_gt_dbg(hwe->gt, "Proceeding with manual engine snapshot");
-/**
- * xe_hw_engine_snapshot_print - Print out a given Xe HW Engine snapshot.
- * @snapshot: Xe HW Engine snapshot object.
- * @p: drm_printer where it will be printed out.
- *
- * This function prints out a given Xe HW Engine snapshot object.
- */
-void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot,
- struct drm_printer *p)
-{
- if (!snapshot)
- return;
-
- drm_printf(p, "%s (physical), logical instance=%d\n",
- snapshot->name ? snapshot->name : "",
- snapshot->logical_instance);
- drm_printf(p, "\tForcewake: domain 0x%x, ref %d\n",
- snapshot->forcewake.domain, snapshot->forcewake.ref);
- drm_printf(p, "\tHWSTAM: 0x%08x\n", snapshot->reg.ring_hwstam);
- drm_printf(p, "\tRING_HWS_PGA: 0x%08x\n", snapshot->reg.ring_hws_pga);
- drm_printf(p, "\tRING_EXECLIST_STATUS: 0x%016llx\n",
- snapshot->reg.ring_execlist_status);
- drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS: 0x%016llx\n",
- snapshot->reg.ring_execlist_sq_contents);
- drm_printf(p, "\tRING_START: 0x%016llx\n", snapshot->reg.ring_start);
- drm_printf(p, "\tRING_HEAD: 0x%08x\n", snapshot->reg.ring_head);
- drm_printf(p, "\tRING_TAIL: 0x%08x\n", snapshot->reg.ring_tail);
- drm_printf(p, "\tRING_CTL: 0x%08x\n", snapshot->reg.ring_ctl);
- drm_printf(p, "\tRING_MI_MODE: 0x%08x\n", snapshot->reg.ring_mi_mode);
- drm_printf(p, "\tRING_MODE: 0x%08x\n",
- snapshot->reg.ring_mode);
- drm_printf(p, "\tRING_IMR: 0x%08x\n", snapshot->reg.ring_imr);
- drm_printf(p, "\tRING_ESR: 0x%08x\n", snapshot->reg.ring_esr);
- drm_printf(p, "\tRING_EMR: 0x%08x\n", snapshot->reg.ring_emr);
- drm_printf(p, "\tRING_EIR: 0x%08x\n", snapshot->reg.ring_eir);
- drm_printf(p, "\tACTHD: 0x%016llx\n", snapshot->reg.ring_acthd);
- drm_printf(p, "\tBBADDR: 0x%016llx\n", snapshot->reg.ring_bbaddr);
- drm_printf(p, "\tDMA_FADDR: 0x%016llx\n", snapshot->reg.ring_dma_fadd);
- drm_printf(p, "\tINDIRECT_RING_STATE: 0x%08x\n",
- snapshot->reg.indirect_ring_state);
- drm_printf(p, "\tIPEHR: 0x%08x\n", snapshot->reg.ipehr);
- xe_hw_engine_snapshot_instdone_print(snapshot, p);
-
- if (snapshot->hwe->class == XE_ENGINE_CLASS_COMPUTE)
- drm_printf(p, "\tRCU_MODE: 0x%08x\n",
- snapshot->reg.rcu_mode);
- drm_puts(p, "\n");
+ return snapshot;
}
/**
@@ -1065,15 +895,18 @@ void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot,
*/
void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot)
{
+ struct xe_gt *gt;
if (!snapshot)
return;
- kfree(snapshot->reg.instdone.slice_common);
- kfree(snapshot->reg.instdone.slice_common_extra);
- kfree(snapshot->reg.instdone.slice_common_extra2);
- kfree(snapshot->reg.instdone.sampler);
- kfree(snapshot->reg.instdone.row);
- kfree(snapshot->reg.instdone.geom_svg);
+ gt = snapshot->hwe->gt;
+ /*
+ * xe_guc_capture_put_matched_nodes is called here and from
+ * xe_devcoredump_snapshot_free, to cover the 2 calling paths
+ * of hw_engines - debugfs and devcoredump free.
+ */
+ xe_guc_capture_put_matched_nodes(&gt->uc.guc);
+
kfree(snapshot->name);
kfree(snapshot);
}
@@ -1089,8 +922,8 @@ void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p)
{
struct xe_hw_engine_snapshot *snapshot;
- snapshot = xe_hw_engine_snapshot_capture(hwe);
- xe_hw_engine_snapshot_print(snapshot, p);
+ snapshot = xe_hw_engine_snapshot_capture(hwe, NULL);
+ xe_engine_snapshot_print(snapshot, p);
xe_hw_engine_snapshot_free(snapshot);
}
@@ -1150,7 +983,7 @@ const char *xe_hw_engine_class_to_str(enum xe_engine_class class)
u64 xe_hw_engine_read_timestamp(struct xe_hw_engine *hwe)
{
- return xe_mmio_read64_2x32(hwe->gt, RING_TIMESTAMP(hwe->mmio_base));
+ return xe_mmio_read64_2x32(&hwe->gt->mmio, RING_TIMESTAMP(hwe->mmio_base));
}
enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe)
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.h b/drivers/gpu/drm/xe/xe_hw_engine.h
index 022819a4a8eb..6b5f9fa2a594 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine.h
@@ -11,6 +11,7 @@
struct drm_printer;
struct drm_xe_engine_class_instance;
struct xe_device;
+struct xe_exec_queue;
#ifdef CONFIG_DRM_XE_JOB_TIMEOUT_MIN
#define XE_HW_ENGINE_JOB_TIMEOUT_MIN CONFIG_DRM_XE_JOB_TIMEOUT_MIN
@@ -54,12 +55,9 @@ void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec);
void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe);
u32 xe_hw_engine_mask_per_class(struct xe_gt *gt,
enum xe_engine_class engine_class);
-
struct xe_hw_engine_snapshot *
-xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe);
+xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_exec_queue *q);
void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot);
-void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot,
- struct drm_printer *p);
void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p);
void xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe);
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h
index 8be6d420ece4..e4191a7a2c31 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_types.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h
@@ -106,7 +106,7 @@ struct xe_hw_engine_class_intf {
* Contains all the hardware engine state for physical instances.
*/
struct xe_hw_engine {
- /** @gt: graphics tile this hw engine belongs to */
+ /** @gt: GT structure this hw engine belongs to */
struct xe_gt *gt;
/** @name: name of this hw engine */
const char *name;
@@ -152,6 +152,11 @@ struct xe_hw_engine {
struct xe_hw_engine_group *hw_engine_group;
};
+enum xe_hw_engine_snapshot_source_id {
+ XE_ENGINE_CAPTURE_SOURCE_MANUAL,
+ XE_ENGINE_CAPTURE_SOURCE_GUC
+};
+
/**
* struct xe_hw_engine_snapshot - Hardware engine snapshot
*
@@ -173,65 +178,8 @@ struct xe_hw_engine_snapshot {
} forcewake;
/** @mmio_base: MMIO base address of this hw engine*/
u32 mmio_base;
- /** @reg: Useful MMIO register snapshot */
- struct {
- /** @reg.ring_execlist_status: RING_EXECLIST_STATUS */
- u64 ring_execlist_status;
- /** @reg.ring_execlist_sq_contents: RING_EXECLIST_SQ_CONTENTS */
- u64 ring_execlist_sq_contents;
- /** @reg.ring_acthd: RING_ACTHD */
- u64 ring_acthd;
- /** @reg.ring_bbaddr: RING_BBADDR */
- u64 ring_bbaddr;
- /** @reg.ring_dma_fadd: RING_DMA_FADD */
- u64 ring_dma_fadd;
- /** @reg.ring_hwstam: RING_HWSTAM */
- u32 ring_hwstam;
- /** @reg.ring_hws_pga: RING_HWS_PGA */
- u32 ring_hws_pga;
- /** @reg.ring_start: RING_START */
- u64 ring_start;
- /** @reg.ring_head: RING_HEAD */
- u32 ring_head;
- /** @reg.ring_tail: RING_TAIL */
- u32 ring_tail;
- /** @reg.ring_ctl: RING_CTL */
- u32 ring_ctl;
- /** @reg.ring_mi_mode: RING_MI_MODE */
- u32 ring_mi_mode;
- /** @reg.ring_mode: RING_MODE */
- u32 ring_mode;
- /** @reg.ring_imr: RING_IMR */
- u32 ring_imr;
- /** @reg.ring_esr: RING_ESR */
- u32 ring_esr;
- /** @reg.ring_emr: RING_EMR */
- u32 ring_emr;
- /** @reg.ring_eir: RING_EIR */
- u32 ring_eir;
- /** @reg.indirect_ring_state: INDIRECT_RING_STATE */
- u32 indirect_ring_state;
- /** @reg.ipehr: IPEHR */
- u32 ipehr;
- /** @reg.rcu_mode: RCU_MODE */
- u32 rcu_mode;
- struct {
- /** @reg.instdone.ring: RING_INSTDONE */
- u32 ring;
- /** @reg.instdone.slice_common: SC_INSTDONE */
- u32 *slice_common;
- /** @reg.instdone.slice_common_extra: SC_INSTDONE_EXTRA */
- u32 *slice_common_extra;
- /** @reg.instdone.slice_common_extra2: SC_INSTDONE_EXTRA2 */
- u32 *slice_common_extra2;
- /** @reg.instdone.sampler: SAMPLER_INSTDONE */
- u32 *sampler;
- /** @reg.instdone.row: ROW_INSTDONE */
- u32 *row;
- /** @reg.instdone.geom_svg: INSTDONE_GEOM_SVGUNIT */
- u32 *geom_svg;
- } instdone;
- } reg;
+ /** @kernel_reserved: Engine reserved, can't be used by userspace */
+ bool kernel_reserved;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_fence_types.h b/drivers/gpu/drm/xe/xe_hw_fence_types.h
index 364a61f4bfda..58a8d09afe5c 100644
--- a/drivers/gpu/drm/xe/xe_hw_fence_types.h
+++ b/drivers/gpu/drm/xe/xe_hw_fence_types.h
@@ -41,7 +41,7 @@ struct xe_hw_fence_irq {
* to a xe_hw_fence_irq, maintains serial seqno.
*/
struct xe_hw_fence_ctx {
- /** @gt: graphics tile of hardware fence context */
+ /** @gt: GT structure of hardware fence context */
struct xe_gt *gt;
/** @irq: fence irq handler */
struct xe_hw_fence_irq *irq;
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index aa11728e7e79..fde56dad3ab7 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -149,7 +149,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *v
u64 reg_val, min, max;
struct xe_device *xe = hwmon->xe;
struct xe_reg rapl_limit, pkg_power_sku;
- struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
@@ -190,7 +190,7 @@ unlock:
static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long value)
{
- struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
int ret = 0;
u64 reg_val;
struct xe_reg rapl_limit;
@@ -222,7 +222,7 @@ unlock:
static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, long *value)
{
- struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
u64 reg_val;
@@ -259,7 +259,7 @@ static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, l
static void
xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy)
{
- struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
struct xe_hwmon_energy_info *ei = &hwmon->ei[channel];
u64 reg_val;
@@ -282,7 +282,7 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
char *buf)
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
- struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
u32 x, y, x_w = 2; /* 2 bits */
u64 r, tau4, out;
int sensor_index = to_sensor_dev_attr(attr)->index;
@@ -323,7 +323,7 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
const char *buf, size_t count)
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
- struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
u32 x, y, rxy, x_w = 2; /* 2 bits */
u64 tau4, r, max_win;
unsigned long val;
@@ -498,7 +498,7 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value)
{
- struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
u64 reg_val;
reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel));
@@ -781,7 +781,7 @@ static const struct hwmon_chip_info hwmon_chip_info = {
static void
xe_hwmon_get_preregistration_info(struct xe_device *xe)
{
- struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
struct xe_hwmon *hwmon = xe->hwmon;
long energy;
u64 val_sku_unit = 0;
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 5f2c368c35ad..32f5a67a917b 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -10,8 +10,8 @@
#include <drm/drm_managed.h>
#include "display/xe_display.h"
-#include "regs/xe_gt_regs.h"
-#include "regs/xe_regs.h"
+#include "regs/xe_guc_regs.h"
+#include "regs/xe_irq_regs.h"
#include "xe_device.h"
#include "xe_drv.h"
#include "xe_gsc_proxy.h"
@@ -30,14 +30,19 @@
#define IIR(offset) XE_REG(offset + 0x8)
#define IER(offset) XE_REG(offset + 0xc)
-static void assert_iir_is_zero(struct xe_gt *mmio, struct xe_reg reg)
+static int xe_irq_msix_init(struct xe_device *xe);
+static void xe_irq_msix_free(struct xe_device *xe);
+static int xe_irq_msix_request_irqs(struct xe_device *xe);
+static void xe_irq_msix_synchronize_irq(struct xe_device *xe);
+
+static void assert_iir_is_zero(struct xe_mmio *mmio, struct xe_reg reg)
{
u32 val = xe_mmio_read32(mmio, reg);
if (val == 0)
return;
- drm_WARN(&gt_to_xe(mmio)->drm, 1,
+ drm_WARN(&mmio->tile->xe->drm, 1,
"Interrupt register 0x%x is not zero: 0x%08x\n",
reg.addr, val);
xe_mmio_write32(mmio, reg, 0xffffffff);
@@ -52,7 +57,7 @@ static void assert_iir_is_zero(struct xe_gt *mmio, struct xe_reg reg)
*/
static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits)
{
- struct xe_gt *mmio = tile->primary_gt;
+ struct xe_mmio *mmio = &tile->mmio;
/*
* If we're just enabling an interrupt now, it shouldn't already
@@ -70,7 +75,7 @@ static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits)
/* Mask and disable all interrupts. */
static void mask_and_disable(struct xe_tile *tile, u32 irqregs)
{
- struct xe_gt *mmio = tile->primary_gt;
+ struct xe_mmio *mmio = &tile->mmio;
xe_mmio_write32(mmio, IMR(irqregs), ~0);
/* Posting read */
@@ -87,7 +92,7 @@ static void mask_and_disable(struct xe_tile *tile, u32 irqregs)
static u32 xelp_intr_disable(struct xe_device *xe)
{
- struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
xe_mmio_write32(mmio, GFX_MSTR_IRQ, 0);
@@ -103,7 +108,7 @@ static u32 xelp_intr_disable(struct xe_device *xe)
static u32
gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl)
{
- struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
u32 iir;
if (!(master_ctl & GU_MISC_IRQ))
@@ -118,7 +123,7 @@ gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl)
static inline void xelp_intr_enable(struct xe_device *xe, bool stall)
{
- struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
xe_mmio_write32(mmio, GFX_MSTR_IRQ, MASTER_IRQ);
if (stall)
@@ -129,12 +134,13 @@ static inline void xelp_intr_enable(struct xe_device *xe, bool stall)
void xe_irq_enable_hwe(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
+ struct xe_mmio *mmio = &gt->mmio;
u32 ccs_mask, bcs_mask;
u32 irqs, dmask, smask;
u32 gsc_mask = 0;
u32 heci_mask = 0;
- if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe))
+ if (xe_device_uses_memirq(xe))
return;
if (xe_device_uc_enabled(xe)) {
@@ -155,35 +161,35 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
if (!xe_gt_is_media_type(gt)) {
/* Enable interrupts for each engine class */
- xe_mmio_write32(gt, RENDER_COPY_INTR_ENABLE, dmask);
+ xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask);
if (ccs_mask)
- xe_mmio_write32(gt, CCS_RSVD_INTR_ENABLE, smask);
+ xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, smask);
/* Unmask interrupts for each engine instance */
- xe_mmio_write32(gt, RCS0_RSVD_INTR_MASK, ~smask);
- xe_mmio_write32(gt, BCS_RSVD_INTR_MASK, ~smask);
+ xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~smask);
+ xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~smask);
if (bcs_mask & (BIT(1)|BIT(2)))
- xe_mmio_write32(gt, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
+ xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
if (bcs_mask & (BIT(3)|BIT(4)))
- xe_mmio_write32(gt, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
+ xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
if (bcs_mask & (BIT(5)|BIT(6)))
- xe_mmio_write32(gt, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
+ xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
if (bcs_mask & (BIT(7)|BIT(8)))
- xe_mmio_write32(gt, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
+ xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
if (ccs_mask & (BIT(0)|BIT(1)))
- xe_mmio_write32(gt, CCS0_CCS1_INTR_MASK, ~dmask);
+ xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~dmask);
if (ccs_mask & (BIT(2)|BIT(3)))
- xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK, ~dmask);
+ xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~dmask);
}
if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) {
/* Enable interrupts for each engine class */
- xe_mmio_write32(gt, VCS_VECS_INTR_ENABLE, dmask);
+ xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, dmask);
/* Unmask interrupts for each engine instance */
- xe_mmio_write32(gt, VCS0_VCS1_INTR_MASK, ~dmask);
- xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK, ~dmask);
- xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK, ~dmask);
+ xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~dmask);
+ xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~dmask);
+ xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~dmask);
/*
* the heci2 interrupt is enabled via the same register as the
@@ -192,22 +198,22 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) {
gsc_mask = irqs | GSC_ER_COMPLETE;
heci_mask = GSC_IRQ_INTF(1);
- } else if (HAS_HECI_GSCFI(xe)) {
+ } else if (xe->info.has_heci_gscfi) {
gsc_mask = GSC_IRQ_INTF(1);
}
if (gsc_mask) {
- xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask);
- xe_mmio_write32(gt, GUNIT_GSC_INTR_MASK, ~gsc_mask);
+ xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask);
+ xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~gsc_mask);
}
if (heci_mask)
- xe_mmio_write32(gt, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16));
+ xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16));
}
}
static u32
gt_engine_identity(struct xe_device *xe,
- struct xe_gt *mmio,
+ struct xe_mmio *mmio,
const unsigned int bank,
const unsigned int bit)
{
@@ -279,7 +285,7 @@ static struct xe_gt *pick_engine_gt(struct xe_tile *tile,
return tile->media_gt;
default:
break;
- };
+ }
fallthrough;
default:
return tile->primary_gt;
@@ -291,7 +297,7 @@ static void gt_irq_handler(struct xe_tile *tile,
u32 *identity)
{
struct xe_device *xe = tile_to_xe(tile);
- struct xe_gt *mmio = tile->primary_gt;
+ struct xe_mmio *mmio = &tile->mmio;
unsigned int bank, bit;
u16 instance, intr_vec;
enum xe_engine_class class;
@@ -325,7 +331,7 @@ static void gt_irq_handler(struct xe_tile *tile,
if (class == XE_ENGINE_CLASS_OTHER) {
/* HECI GSCFI interrupts come from outside of GT */
- if (HAS_HECI_GSCFI(xe) && instance == OTHER_GSC_INSTANCE)
+ if (xe->info.has_heci_gscfi && instance == OTHER_GSC_INSTANCE)
xe_heci_gsc_irq_handler(xe, intr_vec);
else
gt_other_irq_handler(engine_gt, instance, intr_vec);
@@ -348,12 +354,8 @@ static irqreturn_t xelp_irq_handler(int irq, void *arg)
unsigned long intr_dw[2];
u32 identity[32];
- spin_lock(&xe->irq.lock);
- if (!xe->irq.enabled) {
- spin_unlock(&xe->irq.lock);
+ if (!atomic_read(&xe->irq.enabled))
return IRQ_NONE;
- }
- spin_unlock(&xe->irq.lock);
master_ctl = xelp_intr_disable(xe);
if (!master_ctl) {
@@ -376,7 +378,7 @@ static irqreturn_t xelp_irq_handler(int irq, void *arg)
static u32 dg1_intr_disable(struct xe_device *xe)
{
- struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
u32 val;
/* First disable interrupts */
@@ -394,7 +396,7 @@ static u32 dg1_intr_disable(struct xe_device *xe)
static void dg1_intr_enable(struct xe_device *xe, bool stall)
{
- struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
if (stall)
@@ -417,12 +419,8 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
/* TODO: This really shouldn't be copied+pasted */
- spin_lock(&xe->irq.lock);
- if (!xe->irq.enabled) {
- spin_unlock(&xe->irq.lock);
+ if (!atomic_read(&xe->irq.enabled))
return IRQ_NONE;
- }
- spin_unlock(&xe->irq.lock);
master_tile_ctl = dg1_intr_disable(xe);
if (!master_tile_ctl) {
@@ -431,7 +429,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
}
for_each_tile(tile, xe, id) {
- struct xe_gt *mmio = tile->primary_gt;
+ struct xe_mmio *mmio = &tile->mmio;
if ((master_tile_ctl & DG1_MSTR_TILE(tile->id)) == 0)
continue;
@@ -459,7 +457,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
* the primary tile.
*/
if (id == 0) {
- if (HAS_HECI_CSCFI(xe))
+ if (xe->info.has_heci_cscfi)
xe_heci_csc_irq_handler(xe, master_ctl);
xe_display_irq_handler(xe, master_ctl);
gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
@@ -474,7 +472,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
static void gt_irq_reset(struct xe_tile *tile)
{
- struct xe_gt *mmio = tile->primary_gt;
+ struct xe_mmio *mmio = &tile->mmio;
u32 ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
XE_ENGINE_CLASS_COMPUTE);
@@ -504,11 +502,11 @@ static void gt_irq_reset(struct xe_tile *tile)
if (ccs_mask & (BIT(0)|BIT(1)))
xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0);
if (ccs_mask & (BIT(2)|BIT(3)))
- xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0);
+ xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0);
if ((tile->media_gt &&
xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) ||
- HAS_HECI_GSCFI(tile_to_xe(tile))) {
+ tile_to_xe(tile)->info.has_heci_gscfi) {
xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0);
xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0);
xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0);
@@ -547,7 +545,7 @@ static void dg1_irq_reset(struct xe_tile *tile)
static void dg1_irq_reset_mstr(struct xe_tile *tile)
{
- struct xe_gt *mmio = tile->primary_gt;
+ struct xe_mmio *mmio = &tile->mmio;
xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0);
}
@@ -566,7 +564,7 @@ static void vf_irq_reset(struct xe_device *xe)
for_each_tile(tile, xe, id) {
if (xe_device_has_memirq(xe))
- xe_memirq_reset(&tile->sriov.vf.memirq);
+ xe_memirq_reset(&tile->memirq);
else
gt_irq_reset(tile);
}
@@ -580,6 +578,11 @@ static void xe_irq_reset(struct xe_device *xe)
if (IS_SRIOV_VF(xe))
return vf_irq_reset(xe);
+ if (xe_device_uses_memirq(xe)) {
+ for_each_tile(tile, xe, id)
+ xe_memirq_reset(&tile->memirq);
+ }
+
for_each_tile(tile, xe, id) {
if (GRAPHICS_VERx100(xe) >= 1210)
dg1_irq_reset(tile);
@@ -609,7 +612,7 @@ static void vf_irq_postinstall(struct xe_device *xe)
for_each_tile(tile, xe, id)
if (xe_device_has_memirq(xe))
- xe_memirq_postinstall(&tile->sriov.vf.memirq);
+ xe_memirq_postinstall(&tile->memirq);
if (GRAPHICS_VERx100(xe) < 1210)
xelp_intr_enable(xe, true);
@@ -622,6 +625,14 @@ static void xe_irq_postinstall(struct xe_device *xe)
if (IS_SRIOV_VF(xe))
return vf_irq_postinstall(xe);
+ if (xe_device_uses_memirq(xe)) {
+ struct xe_tile *tile;
+ unsigned int id;
+
+ for_each_tile(tile, xe, id)
+ xe_memirq_postinstall(&tile->memirq);
+ }
+
xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
/*
@@ -644,15 +655,11 @@ static irqreturn_t vf_mem_irq_handler(int irq, void *arg)
struct xe_tile *tile;
unsigned int id;
- spin_lock(&xe->irq.lock);
- if (!xe->irq.enabled) {
- spin_unlock(&xe->irq.lock);
+ if (!atomic_read(&xe->irq.enabled))
return IRQ_NONE;
- }
- spin_unlock(&xe->irq.lock);
for_each_tile(tile, xe, id)
- xe_memirq_handler(&tile->sriov.vf.memirq);
+ xe_memirq_handler(&tile->memirq);
return IRQ_HANDLED;
}
@@ -668,63 +675,85 @@ static irq_handler_t xe_irq_handler(struct xe_device *xe)
return xelp_irq_handler;
}
-static void irq_uninstall(void *arg)
+static int xe_irq_msi_request_irqs(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ irq_handler_t irq_handler;
+ int irq, err;
+
+ irq_handler = xe_irq_handler(xe);
+ if (!irq_handler) {
+ drm_err(&xe->drm, "No supported interrupt handler");
+ return -EINVAL;
+ }
+
+ irq = pci_irq_vector(pdev, 0);
+ err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
+ if (err < 0) {
+ drm_err(&xe->drm, "Failed to request MSI IRQ %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void xe_irq_msi_free(struct xe_device *xe)
{
- struct xe_device *xe = arg;
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
int irq;
- if (!xe->irq.enabled)
+ irq = pci_irq_vector(pdev, 0);
+ free_irq(irq, xe);
+}
+
+static void irq_uninstall(void *arg)
+{
+ struct xe_device *xe = arg;
+
+ if (!atomic_xchg(&xe->irq.enabled, 0))
return;
- xe->irq.enabled = false;
xe_irq_reset(xe);
- irq = pci_irq_vector(pdev, 0);
- free_irq(irq, xe);
+ if (xe_device_has_msix(xe))
+ xe_irq_msix_free(xe);
+ else
+ xe_irq_msi_free(xe);
+}
+
+int xe_irq_init(struct xe_device *xe)
+{
+ spin_lock_init(&xe->irq.lock);
+
+ return xe_irq_msix_init(xe);
}
int xe_irq_install(struct xe_device *xe)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- unsigned int irq_flags = PCI_IRQ_MSIX;
- irq_handler_t irq_handler;
- int err, irq, nvec;
-
- irq_handler = xe_irq_handler(xe);
- if (!irq_handler) {
- drm_err(&xe->drm, "No supported interrupt handler");
- return -EINVAL;
- }
+ unsigned int irq_flags = PCI_IRQ_MSI;
+ int nvec = 1;
+ int err;
xe_irq_reset(xe);
- nvec = pci_msix_vec_count(pdev);
- if (nvec <= 0) {
- if (nvec == -EINVAL) {
- /* MSIX capability is not supported in the device, using MSI */
- irq_flags = PCI_IRQ_MSI;
- nvec = 1;
- } else {
- drm_err(&xe->drm, "MSIX: Failed getting count\n");
- return nvec;
- }
+ if (xe_device_has_msix(xe)) {
+ nvec = xe->irq.msix.nvec;
+ irq_flags = PCI_IRQ_MSIX;
}
err = pci_alloc_irq_vectors(pdev, nvec, nvec, irq_flags);
if (err < 0) {
- drm_err(&xe->drm, "MSI/MSIX: Failed to enable support %d\n", err);
+ drm_err(&xe->drm, "Failed to allocate IRQ vectors: %d\n", err);
return err;
}
- irq = pci_irq_vector(pdev, 0);
- err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
- if (err < 0) {
- drm_err(&xe->drm, "Failed to request MSI/MSIX IRQ %d\n", err);
+ err = xe_device_has_msix(xe) ? xe_irq_msix_request_irqs(xe) :
+ xe_irq_msi_request_irqs(xe);
+ if (err)
return err;
- }
- xe->irq.enabled = true;
+ atomic_set(&xe->irq.enabled, 1);
xe_irq_postinstall(xe);
@@ -735,20 +764,28 @@ int xe_irq_install(struct xe_device *xe)
return 0;
free_irq_handler:
- free_irq(irq, xe);
+ if (xe_device_has_msix(xe))
+ xe_irq_msix_free(xe);
+ else
+ xe_irq_msi_free(xe);
return err;
}
-void xe_irq_suspend(struct xe_device *xe)
+static void xe_irq_msi_synchronize_irq(struct xe_device *xe)
{
- int irq = to_pci_dev(xe->drm.dev)->irq;
+ synchronize_irq(to_pci_dev(xe->drm.dev)->irq);
+}
- spin_lock_irq(&xe->irq.lock);
- xe->irq.enabled = false; /* no new irqs */
- spin_unlock_irq(&xe->irq.lock);
+void xe_irq_suspend(struct xe_device *xe)
+{
+ atomic_set(&xe->irq.enabled, 0); /* no new irqs */
- synchronize_irq(irq); /* flush irqs */
+ /* flush irqs */
+ if (xe_device_has_msix(xe))
+ xe_irq_msix_synchronize_irq(xe);
+ else
+ xe_irq_msi_synchronize_irq(xe);
xe_irq_reset(xe); /* turn irqs off */
}
@@ -762,10 +799,205 @@ void xe_irq_resume(struct xe_device *xe)
* 1. no irq will arrive before the postinstall
* 2. display is not yet resumed
*/
- xe->irq.enabled = true;
+ atomic_set(&xe->irq.enabled, 1);
xe_irq_reset(xe);
xe_irq_postinstall(xe); /* turn irqs on */
for_each_gt(gt, xe, id)
xe_irq_enable_hwe(gt);
}
+
+/* MSI-X related definitions and functions below. */
+
+enum xe_irq_msix_static {
+ GUC2HOST_MSIX = 0,
+ DEFAULT_MSIX = XE_IRQ_DEFAULT_MSIX,
+ /* Must be last */
+ NUM_OF_STATIC_MSIX,
+};
+
+static int xe_irq_msix_init(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int nvec = pci_msix_vec_count(pdev);
+
+ if (nvec == -EINVAL)
+ return 0; /* MSI */
+
+ if (nvec < 0) {
+ drm_err(&xe->drm, "Failed getting MSI-X vectors count: %d\n", nvec);
+ return nvec;
+ }
+
+ xe->irq.msix.nvec = nvec;
+ xa_init_flags(&xe->irq.msix.indexes, XA_FLAGS_ALLOC);
+ return 0;
+}
+
+static irqreturn_t guc2host_irq_handler(int irq, void *arg)
+{
+ struct xe_device *xe = arg;
+ struct xe_tile *tile;
+ u8 id;
+
+ if (!atomic_read(&xe->irq.enabled))
+ return IRQ_NONE;
+
+ for_each_tile(tile, xe, id)
+ xe_guc_irq_handler(&tile->primary_gt->uc.guc,
+ GUC_INTR_GUC2HOST);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
+{
+ unsigned int tile_id, gt_id;
+ struct xe_device *xe = arg;
+ struct xe_memirq *memirq;
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ struct xe_tile *tile;
+ struct xe_gt *gt;
+
+ if (!atomic_read(&xe->irq.enabled))
+ return IRQ_NONE;
+
+ for_each_tile(tile, xe, tile_id) {
+ memirq = &tile->memirq;
+ if (!memirq->bo)
+ continue;
+
+ for_each_gt(gt, xe, gt_id) {
+ if (gt->tile != tile)
+ continue;
+
+ for_each_hw_engine(hwe, gt, id)
+ xe_memirq_hwe_handler(memirq, hwe);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int xe_irq_msix_alloc_vector(struct xe_device *xe, void *irq_buf,
+ bool dynamic_msix, u16 *msix)
+{
+ struct xa_limit limit;
+ int ret;
+ u32 id;
+
+ limit = (dynamic_msix) ? XA_LIMIT(NUM_OF_STATIC_MSIX, xe->irq.msix.nvec - 1) :
+ XA_LIMIT(*msix, *msix);
+ ret = xa_alloc(&xe->irq.msix.indexes, &id, irq_buf, limit, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ if (dynamic_msix)
+ *msix = id;
+
+ return 0;
+}
+
+static void xe_irq_msix_release_vector(struct xe_device *xe, u16 msix)
+{
+ xa_erase(&xe->irq.msix.indexes, msix);
+}
+
+static int xe_irq_msix_request_irq_internal(struct xe_device *xe, irq_handler_t handler,
+ void *irq_buf, const char *name, u16 msix)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int ret, irq;
+
+ irq = pci_irq_vector(pdev, msix);
+ if (irq < 0)
+ return irq;
+
+ ret = request_irq(irq, handler, IRQF_SHARED, name, irq_buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf,
+ const char *name, bool dynamic_msix, u16 *msix)
+{
+ int ret;
+
+ ret = xe_irq_msix_alloc_vector(xe, irq_buf, dynamic_msix, msix);
+ if (ret)
+ return ret;
+
+ ret = xe_irq_msix_request_irq_internal(xe, handler, irq_buf, name, *msix);
+ if (ret) {
+ drm_err(&xe->drm, "Failed to request IRQ for MSI-X %u\n", *msix);
+ xe_irq_msix_release_vector(xe, *msix);
+ return ret;
+ }
+
+ return 0;
+}
+
+void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int irq;
+ void *irq_buf;
+
+ irq_buf = xa_load(&xe->irq.msix.indexes, msix);
+ if (!irq_buf)
+ return;
+
+ irq = pci_irq_vector(pdev, msix);
+ if (irq < 0) {
+ drm_err(&xe->drm, "MSI-X %u can't be released, there is no matching IRQ\n", msix);
+ return;
+ }
+
+ free_irq(irq, irq_buf);
+ xe_irq_msix_release_vector(xe, msix);
+}
+
+int xe_irq_msix_request_irqs(struct xe_device *xe)
+{
+ int err;
+ u16 msix;
+
+ msix = GUC2HOST_MSIX;
+ err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, xe,
+ DRIVER_NAME "-guc2host", false, &msix);
+ if (err)
+ return err;
+
+ msix = DEFAULT_MSIX;
+ err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler, xe,
+ DRIVER_NAME "-default-msix", false, &msix);
+ if (err) {
+ xe_irq_msix_free_irq(xe, GUC2HOST_MSIX);
+ return err;
+ }
+
+ return 0;
+}
+
+void xe_irq_msix_free(struct xe_device *xe)
+{
+ unsigned long msix;
+ u32 *dummy;
+
+ xa_for_each(&xe->irq.msix.indexes, msix, dummy)
+ xe_irq_msix_free_irq(xe, msix);
+ xa_destroy(&xe->irq.msix.indexes);
+}
+
+void xe_irq_msix_synchronize_irq(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ unsigned long msix;
+ u32 *dummy;
+
+ xa_for_each(&xe->irq.msix.indexes, msix, dummy)
+ synchronize_irq(pci_irq_vector(pdev, msix));
+}
diff --git a/drivers/gpu/drm/xe/xe_irq.h b/drivers/gpu/drm/xe/xe_irq.h
index 067514e13675..a28bd577ba52 100644
--- a/drivers/gpu/drm/xe/xe_irq.h
+++ b/drivers/gpu/drm/xe/xe_irq.h
@@ -6,13 +6,21 @@
#ifndef _XE_IRQ_H_
#define _XE_IRQ_H_
+#include <linux/interrupt.h>
+
+#define XE_IRQ_DEFAULT_MSIX 1
+
struct xe_device;
struct xe_tile;
struct xe_gt;
+int xe_irq_init(struct xe_device *xe);
int xe_irq_install(struct xe_device *xe);
void xe_irq_suspend(struct xe_device *xe);
void xe_irq_resume(struct xe_device *xe);
void xe_irq_enable_hwe(struct xe_gt *gt);
+int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf,
+ const char *name, bool dynamic_msix, u16 *msix);
+void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix);
#endif
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
index 8999ac511555..a60ceae4c6dd 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.c
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -193,7 +193,7 @@ static void lmtt_setup_dir_ptr(struct xe_lmtt *lmtt)
lmtt_assert(lmtt, xe_bo_is_vram(lmtt->pd->bo));
lmtt_assert(lmtt, IS_ALIGNED(offset, SZ_64K));
- xe_mmio_write32(tile->primary_gt,
+ xe_mmio_write32(&tile->mmio,
GRAPHICS_VER(xe) >= 20 ? XE2_LMEM_CFG : LMEM_CFG,
LMEM_EN | REG_FIELD_PREP(LMTT_DIR_PTR, offset / SZ_64K));
}
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index aec7db39c061..bbb9ffbf6367 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -25,6 +25,7 @@
#include "xe_map.h"
#include "xe_memirq.h"
#include "xe_sriov.h"
+#include "xe_trace_lrc.h"
#include "xe_vm.h"
#include "xe_wa.h"
@@ -38,24 +39,6 @@
#define LRC_INDIRECT_RING_STATE_SIZE SZ_4K
-struct xe_lrc_snapshot {
- struct xe_bo *lrc_bo;
- void *lrc_snapshot;
- unsigned long lrc_size, lrc_offset;
-
- u32 context_desc;
- u32 indirect_context_desc;
- u32 head;
- struct {
- u32 internal;
- u32 memory;
- } tail;
- u32 start_seqno;
- u32 seqno;
- u32 ctx_timestamp;
- u32 ctx_job_timestamp;
-};
-
static struct xe_device *
lrc_to_xe(struct xe_lrc *lrc)
{
@@ -599,10 +582,11 @@ static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe)
{
- struct xe_memirq *memirq = &gt_to_tile(hwe->gt)->sriov.vf.memirq;
+ struct xe_memirq *memirq = &gt_to_tile(hwe->gt)->memirq;
struct xe_device *xe = gt_to_xe(hwe->gt);
+ u8 num_regs;
- if (!IS_SRIOV_VF(xe) || !xe_device_has_memirq(xe))
+ if (!xe_device_uses_memirq(xe))
return;
regs[CTX_LRM_INT_MASK_ENABLE] = MI_LOAD_REGISTER_MEM |
@@ -610,12 +594,18 @@ static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe)
regs[CTX_INT_MASK_ENABLE_REG] = RING_IMR(0).addr;
regs[CTX_INT_MASK_ENABLE_PTR] = xe_memirq_enable_ptr(memirq);
- regs[CTX_LRI_INT_REPORT_PTR] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
+ num_regs = xe_device_has_msix(xe) ? 3 : 2;
+ regs[CTX_LRI_INT_REPORT_PTR] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(num_regs) |
MI_LRI_LRM_CS_MMIO | MI_LRI_FORCE_POSTED;
regs[CTX_INT_STATUS_REPORT_REG] = RING_INT_STATUS_RPT_PTR(0).addr;
- regs[CTX_INT_STATUS_REPORT_PTR] = xe_memirq_status_ptr(memirq);
+ regs[CTX_INT_STATUS_REPORT_PTR] = xe_memirq_status_ptr(memirq, hwe);
regs[CTX_INT_SRC_REPORT_REG] = RING_INT_SRC_RPT_PTR(0).addr;
- regs[CTX_INT_SRC_REPORT_PTR] = xe_memirq_source_ptr(memirq);
+ regs[CTX_INT_SRC_REPORT_PTR] = xe_memirq_source_ptr(memirq, hwe);
+
+ if (xe_device_has_msix(xe)) {
+ regs[CTX_CS_INT_VEC_REG] = CS_INT_VEC(0).addr;
+ /* CTX_CS_INT_VEC_DATA will be set in xe_lrc_init */
+ }
}
static int lrc_ring_mi_mode(struct xe_hw_engine *hwe)
@@ -893,7 +883,7 @@ static void xe_lrc_finish(struct xe_lrc *lrc)
#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
- struct xe_vm *vm, u32 ring_size)
+ struct xe_vm *vm, u32 ring_size, u16 msix_vec)
{
struct xe_gt *gt = hwe->gt;
struct xe_tile *tile = gt_to_tile(gt);
@@ -962,6 +952,14 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_drm_client_add_bo(vm->xef->client, lrc->bo);
}
+ if (xe_device_has_msix(xe)) {
+ xe_lrc_write_ctx_reg(lrc, CTX_INT_STATUS_REPORT_PTR,
+ xe_memirq_status_ptr(&tile->memirq, hwe));
+ xe_lrc_write_ctx_reg(lrc, CTX_INT_SRC_REPORT_PTR,
+ xe_memirq_source_ptr(&tile->memirq, hwe));
+ xe_lrc_write_ctx_reg(lrc, CTX_CS_INT_VEC_DATA, msix_vec << 16 | msix_vec);
+ }
+
if (xe_gt_has_indirect_ring_state(gt)) {
xe_lrc_write_ctx_reg(lrc, CTX_INDIRECT_RING_STATE,
__xe_lrc_indirect_ring_ggtt_addr(lrc));
@@ -1022,6 +1020,7 @@ err_lrc_finish:
* @hwe: Hardware Engine
* @vm: The VM (address space)
* @ring_size: LRC ring size
+ * @msix_vec: MSI-X interrupt vector (for platforms that support it)
*
* Allocate and initialize the Logical Ring Context (LRC).
*
@@ -1029,7 +1028,7 @@ err_lrc_finish:
* upon failure.
*/
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
- u32 ring_size)
+ u32 ring_size, u16 msix_vec)
{
struct xe_lrc *lrc;
int err;
@@ -1038,7 +1037,7 @@ struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
if (!lrc)
return ERR_PTR(-ENOMEM);
- err = xe_lrc_init(lrc, hwe, vm, ring_size);
+ err = xe_lrc_init(lrc, hwe, vm, ring_size, msix_vec);
if (err) {
kfree(lrc);
return ERR_PTR(err);
@@ -1078,6 +1077,14 @@ u32 xe_lrc_ring_tail(struct xe_lrc *lrc)
return xe_lrc_read_ctx_reg(lrc, CTX_RING_TAIL) & TAIL_ADDR;
}
+static u32 xe_lrc_ring_start(struct xe_lrc *lrc)
+{
+ if (xe_lrc_has_indirect_ring_state(lrc))
+ return xe_lrc_read_indirect_ctx_reg(lrc, INDIRECT_CTX_RING_START);
+ else
+ return xe_lrc_read_ctx_reg(lrc, CTX_RING_START);
+}
+
void xe_lrc_set_ring_head(struct xe_lrc *lrc, u32 head)
{
if (xe_lrc_has_indirect_ring_state(lrc))
@@ -1653,10 +1660,12 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
xe_vm_get(lrc->bo->vm);
snapshot->context_desc = xe_lrc_ggtt_addr(lrc);
+ snapshot->ring_addr = __xe_lrc_ring_ggtt_addr(lrc);
snapshot->indirect_context_desc = xe_lrc_indirect_ring_ggtt_addr(lrc);
snapshot->head = xe_lrc_ring_head(lrc);
snapshot->tail.internal = lrc->ring.tail;
snapshot->tail.memory = xe_lrc_ring_tail(lrc);
+ snapshot->start = xe_lrc_ring_start(lrc);
snapshot->start_seqno = xe_lrc_start_seqno(lrc);
snapshot->seqno = xe_lrc_seqno(lrc);
snapshot->lrc_bo = xe_bo_get(lrc->bo);
@@ -1710,11 +1719,14 @@ void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer
return;
drm_printf(p, "\tHW Context Desc: 0x%08x\n", snapshot->context_desc);
+ drm_printf(p, "\tHW Ring address: 0x%08x\n",
+ snapshot->ring_addr);
drm_printf(p, "\tHW Indirect Ring State: 0x%08x\n",
snapshot->indirect_context_desc);
drm_printf(p, "\tLRC Head: (memory) %u\n", snapshot->head);
drm_printf(p, "\tLRC Tail: (internal) %u, (memory) %u\n",
snapshot->tail.internal, snapshot->tail.memory);
+ drm_printf(p, "\tRing start: (memory) 0x%08x\n", snapshot->start);
drm_printf(p, "\tStart seqno: (memory) %d\n", snapshot->start_seqno);
drm_printf(p, "\tSeqno: (memory) %d\n", snapshot->seqno);
drm_printf(p, "\tTimestamp: 0x%08x\n", snapshot->ctx_timestamp);
@@ -1776,5 +1788,20 @@ u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts)
lrc->ctx_timestamp = xe_lrc_ctx_timestamp(lrc);
+ trace_xe_lrc_update_timestamp(lrc, *old_ts);
+
return lrc->ctx_timestamp;
}
+
+/**
+ * xe_lrc_ring_is_idle() - LRC is idle
+ * @lrc: Pointer to the lrc.
+ *
+ * Compare LRC ring head and tail to determine if idle.
+ *
+ * Return: True is ring is idle, False otherwise
+ */
+bool xe_lrc_ring_is_idle(struct xe_lrc *lrc)
+{
+ return xe_lrc_ring_head(lrc) == xe_lrc_ring_tail(lrc);
+}
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index c24542e89318..4206e6a8b50a 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -17,13 +17,32 @@ enum xe_engine_class;
struct xe_gt;
struct xe_hw_engine;
struct xe_lrc;
-struct xe_lrc_snapshot;
struct xe_vm;
+struct xe_lrc_snapshot {
+ struct xe_bo *lrc_bo;
+ void *lrc_snapshot;
+ unsigned long lrc_size, lrc_offset;
+
+ u32 context_desc;
+ u32 ring_addr;
+ u32 indirect_context_desc;
+ u32 head;
+ u32 start;
+ struct {
+ u32 internal;
+ u32 memory;
+ } tail;
+ u32 start_seqno;
+ u32 seqno;
+ u32 ctx_timestamp;
+ u32 ctx_job_timestamp;
+};
+
#define LRC_PPHWSP_SCRATCH_ADDR (0x34 * 4)
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
- u32 ring_size);
+ u32 ring_size, u16 msix_vec);
void xe_lrc_destroy(struct kref *ref);
/**
@@ -61,6 +80,8 @@ u32 xe_lrc_ring_head(struct xe_lrc *lrc);
u32 xe_lrc_ring_space(struct xe_lrc *lrc);
void xe_lrc_write_ring(struct xe_lrc *lrc, const void *data, size_t size);
+bool xe_lrc_ring_is_idle(struct xe_lrc *lrc);
+
u32 xe_lrc_indirect_ring_ggtt_addr(struct xe_lrc *lrc);
u32 xe_lrc_ggtt_addr(struct xe_lrc *lrc);
u32 *xe_lrc_regs(struct xe_lrc *lrc);
diff --git a/drivers/gpu/drm/xe/xe_macros.h b/drivers/gpu/drm/xe/xe_macros.h
index daf56c846d03..8a77c2423555 100644
--- a/drivers/gpu/drm/xe/xe_macros.h
+++ b/drivers/gpu/drm/xe/xe_macros.h
@@ -10,9 +10,13 @@
#define XE_WARN_ON WARN_ON
-#define XE_IOCTL_DBG(xe, cond) \
- ((cond) && (drm_dbg(&(xe)->drm, \
- "Ioctl argument check failed at %s:%d: %s", \
- __FILE__, __LINE__, #cond), 1))
+#define XE_IOCTL_DBG(xe, cond) ({ \
+ int cond__ = !!(cond); \
+ if (cond__) \
+ drm_dbg(&(xe)->drm, \
+ "Ioctl argument check failed at %s:%d: %s", \
+ __FILE__, __LINE__, #cond); \
+ cond__; \
+})
#endif
diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c
index 95b6e9d7b7db..404fa2a456d5 100644
--- a/drivers/gpu/drm/xe/xe_memirq.c
+++ b/drivers/gpu/drm/xe/xe_memirq.c
@@ -5,8 +5,8 @@
#include <drm/drm_managed.h>
-#include "regs/xe_gt_regs.h"
#include "regs/xe_guc_regs.h"
+#include "regs/xe_irq_regs.h"
#include "regs/xe_regs.h"
#include "xe_assert.h"
@@ -19,15 +19,25 @@
#include "xe_hw_engine.h"
#include "xe_map.h"
#include "xe_memirq.h"
-#include "xe_sriov.h"
-#include "xe_sriov_printk.h"
#define memirq_assert(m, condition) xe_tile_assert(memirq_to_tile(m), condition)
-#define memirq_debug(m, msg...) xe_sriov_dbg_verbose(memirq_to_xe(m), "MEMIRQ: " msg)
+#define memirq_printk(m, _level, _fmt, ...) \
+ drm_##_level(&memirq_to_xe(m)->drm, "MEMIRQ%u: " _fmt, \
+ memirq_to_tile(m)->id, ##__VA_ARGS__)
+
+#ifdef CONFIG_DRM_XE_DEBUG_MEMIRQ
+#define memirq_debug(m, _fmt, ...) memirq_printk(m, dbg, _fmt, ##__VA_ARGS__)
+#else
+#define memirq_debug(...)
+#endif
+
+#define memirq_err(m, _fmt, ...) memirq_printk(m, err, _fmt, ##__VA_ARGS__)
+#define memirq_err_ratelimited(m, _fmt, ...) \
+ memirq_printk(m, err_ratelimited, _fmt, ##__VA_ARGS__)
static struct xe_tile *memirq_to_tile(struct xe_memirq *memirq)
{
- return container_of(memirq, struct xe_tile, sriov.vf.memirq);
+ return container_of(memirq, struct xe_tile, memirq);
}
static struct xe_device *memirq_to_xe(struct xe_memirq *memirq)
@@ -105,33 +115,74 @@ static const char *guc_name(struct xe_guc *guc)
* | |
* | |
* +-----------+
+ *
+ *
+ * MSI-X use case
+ *
+ * When using MSI-X, hw engines report interrupt status and source to engine
+ * instance 0. For this scenario, in order to differentiate between the
+ * engines, we need to pass different status/source pointers in the LRC.
+ *
+ * The requirements on those pointers are:
+ * - Interrupt status should be 4KiB aligned
+ * - Interrupt source should be 64 bytes aligned
+ *
+ * To accommodate this, we duplicate the memirq page layout above -
+ * allocating a page for each engine instance and pass this page in the LRC.
+ * Note that the same page can be reused for different engine types.
+ * For example, an LRC executing on CCS #x will have pointers to page #x,
+ * and an LRC executing on BCS #x will have the same pointers.
+ *
+ * ::
+ *
+ * 0x0000 +==============================+ <== page for instance 0 (BCS0, CCS0, etc.)
+ * | Interrupt Status Report Page |
+ * 0x0400 +==============================+
+ * | Interrupt Source Report Page |
+ * 0x0440 +==============================+
+ * | Interrupt Enable Mask |
+ * +==============================+
+ * | Not used |
+ * 0x1000 +==============================+ <== page for instance 1 (BCS1, CCS1, etc.)
+ * | Interrupt Status Report Page |
+ * 0x1400 +==============================+
+ * | Interrupt Source Report Page |
+ * 0x1440 +==============================+
+ * | Not used |
+ * 0x2000 +==============================+ <== page for instance 2 (BCS2, CCS2, etc.)
+ * | ... |
+ * +==============================+
+ *
*/
-static void __release_xe_bo(struct drm_device *drm, void *arg)
+static inline bool hw_reports_to_instance_zero(struct xe_memirq *memirq)
{
- struct xe_bo *bo = arg;
-
- xe_bo_unpin_map_no_vm(bo);
+ /*
+ * When the HW engines are configured to use MSI-X,
+ * they report interrupt status and source to the offset of
+ * engine instance 0.
+ */
+ return xe_device_has_msix(memirq_to_xe(memirq));
}
static int memirq_alloc_pages(struct xe_memirq *memirq)
{
struct xe_device *xe = memirq_to_xe(memirq);
struct xe_tile *tile = memirq_to_tile(memirq);
+ size_t bo_size = hw_reports_to_instance_zero(memirq) ?
+ XE_HW_ENGINE_MAX_INSTANCE * SZ_4K : SZ_4K;
struct xe_bo *bo;
int err;
- BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_SOURCE_OFFSET, SZ_64));
- BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_STATUS_OFFSET, SZ_4K));
-
- /* XXX: convert to managed bo */
- bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE |
- XE_BO_FLAG_NEEDS_UC |
- XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_SOURCE_OFFSET(0), SZ_64));
+ BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_STATUS_OFFSET(0), SZ_4K));
+
+ bo = xe_managed_bo_create_pin_map(xe, tile, bo_size,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_NEEDS_UC |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out;
@@ -140,25 +191,25 @@ static int memirq_alloc_pages(struct xe_memirq *memirq)
memirq_assert(memirq, !xe_bo_is_vram(bo));
memirq_assert(memirq, !memirq->bo);
- iosys_map_memset(&bo->vmap, 0, 0, SZ_4K);
+ iosys_map_memset(&bo->vmap, 0, 0, bo_size);
memirq->bo = bo;
- memirq->source = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_SOURCE_OFFSET);
- memirq->status = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_STATUS_OFFSET);
+ memirq->source = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_SOURCE_OFFSET(0));
+ memirq->status = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_STATUS_OFFSET(0));
memirq->mask = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_ENABLE_OFFSET);
memirq_assert(memirq, !memirq->source.is_iomem);
memirq_assert(memirq, !memirq->status.is_iomem);
memirq_assert(memirq, !memirq->mask.is_iomem);
- memirq_debug(memirq, "page offsets: source %#x status %#x\n",
- xe_memirq_source_ptr(memirq), xe_memirq_status_ptr(memirq));
+ memirq_debug(memirq, "page offsets: bo %#x bo_size %zu source %#x status %#x\n",
+ xe_bo_ggtt_addr(bo), bo_size, XE_MEMIRQ_SOURCE_OFFSET(0),
+ XE_MEMIRQ_STATUS_OFFSET(0));
- return drmm_add_action_or_reset(&xe->drm, __release_xe_bo, memirq->bo);
+ return 0;
out:
- xe_sriov_err(memirq_to_xe(memirq),
- "Failed to allocate memirq page (%pe)\n", ERR_PTR(err));
+ memirq_err(memirq, "Failed to allocate memirq page (%pe)\n", ERR_PTR(err));
return err;
}
@@ -178,9 +229,7 @@ static void memirq_set_enable(struct xe_memirq *memirq, bool enable)
*
* These allocations are managed and will be implicitly released on unload.
*
- * Note: This function shall be called only by the VF driver.
- *
- * If this function fails then VF driver won't be able to operate correctly.
+ * If this function fails then the driver won't be able to operate correctly.
* If `Memory Based Interrupts`_ are not used this function will return 0.
*
* Return: 0 on success or a negative error code on failure.
@@ -190,9 +239,7 @@ int xe_memirq_init(struct xe_memirq *memirq)
struct xe_device *xe = memirq_to_xe(memirq);
int err;
- memirq_assert(memirq, IS_SRIOV_VF(xe));
-
- if (!xe_device_has_memirq(xe))
+ if (!xe_device_uses_memirq(xe))
return 0;
err = memirq_alloc_pages(memirq);
@@ -205,55 +252,70 @@ int xe_memirq_init(struct xe_memirq *memirq)
return 0;
}
+static u32 __memirq_source_page(struct xe_memirq *memirq, u16 instance)
+{
+ memirq_assert(memirq, instance <= XE_HW_ENGINE_MAX_INSTANCE);
+ memirq_assert(memirq, memirq->bo);
+
+ instance = hw_reports_to_instance_zero(memirq) ? instance : 0;
+ return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_SOURCE_OFFSET(instance);
+}
+
/**
* xe_memirq_source_ptr - Get GGTT's offset of the `Interrupt Source Report Page`_.
* @memirq: the &xe_memirq to query
+ * @hwe: the hw engine for which we want the report page
*
- * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
+ * Shall be called when `Memory Based Interrupts`_ are used
* and xe_memirq_init() didn't fail.
*
* Return: GGTT's offset of the `Interrupt Source Report Page`_.
*/
-u32 xe_memirq_source_ptr(struct xe_memirq *memirq)
+u32 xe_memirq_source_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe)
{
- memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
- memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
+
+ return __memirq_source_page(memirq, hwe->instance);
+}
+
+static u32 __memirq_status_page(struct xe_memirq *memirq, u16 instance)
+{
+ memirq_assert(memirq, instance <= XE_HW_ENGINE_MAX_INSTANCE);
memirq_assert(memirq, memirq->bo);
- return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_SOURCE_OFFSET;
+ instance = hw_reports_to_instance_zero(memirq) ? instance : 0;
+ return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_STATUS_OFFSET(instance);
}
/**
* xe_memirq_status_ptr - Get GGTT's offset of the `Interrupt Status Report Page`_.
* @memirq: the &xe_memirq to query
+ * @hwe: the hw engine for which we want the report page
*
- * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
+ * Shall be called when `Memory Based Interrupts`_ are used
* and xe_memirq_init() didn't fail.
*
* Return: GGTT's offset of the `Interrupt Status Report Page`_.
*/
-u32 xe_memirq_status_ptr(struct xe_memirq *memirq)
+u32 xe_memirq_status_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe)
{
- memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
- memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
- memirq_assert(memirq, memirq->bo);
+ memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
- return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_STATUS_OFFSET;
+ return __memirq_status_page(memirq, hwe->instance);
}
/**
* xe_memirq_enable_ptr - Get GGTT's offset of the Interrupt Enable Mask.
* @memirq: the &xe_memirq to query
*
- * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
+ * Shall be called when `Memory Based Interrupts`_ are used
* and xe_memirq_init() didn't fail.
*
* Return: GGTT's offset of the Interrupt Enable Mask.
*/
u32 xe_memirq_enable_ptr(struct xe_memirq *memirq)
{
- memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
- memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
memirq_assert(memirq, memirq->bo);
return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_ENABLE_OFFSET;
@@ -267,7 +329,7 @@ u32 xe_memirq_enable_ptr(struct xe_memirq *memirq)
* Register `Interrupt Source Report Page`_ and `Interrupt Status Report Page`_
* to be used by the GuC when `Memory Based Interrupts`_ are required.
*
- * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
+ * Shall be called when `Memory Based Interrupts`_ are used
* and xe_memirq_init() didn't fail.
*
* Return: 0 on success or a negative error code on failure.
@@ -279,12 +341,10 @@ int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc)
u32 source, status;
int err;
- memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
- memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
- memirq_assert(memirq, memirq->bo);
+ memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
- source = xe_memirq_source_ptr(memirq) + offset;
- status = xe_memirq_status_ptr(memirq) + offset * SZ_16;
+ source = __memirq_source_page(memirq, 0) + offset;
+ status = __memirq_status_page(memirq, 0) + offset * SZ_16;
err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_SOURCE_ADDR_KEY,
source);
@@ -299,9 +359,8 @@ int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc)
return 0;
failed:
- xe_sriov_err(memirq_to_xe(memirq),
- "Failed to setup report pages in %s (%pe)\n",
- guc_name(guc), ERR_PTR(err));
+ memirq_err(memirq, "Failed to setup report pages in %s (%pe)\n",
+ guc_name(guc), ERR_PTR(err));
return err;
}
@@ -311,13 +370,12 @@ failed:
*
* This is part of the driver IRQ setup flow.
*
- * This function shall only be used by the VF driver on platforms that use
+ * This function shall only be used on platforms that use
* `Memory Based Interrupts`_.
*/
void xe_memirq_reset(struct xe_memirq *memirq)
{
- memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
- memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
if (memirq->bo)
memirq_set_enable(memirq, false);
@@ -329,13 +387,12 @@ void xe_memirq_reset(struct xe_memirq *memirq)
*
* This is part of the driver IRQ setup flow.
*
- * This function shall only be used by the VF driver on platforms that use
+ * This function shall only be used on platforms that use
* `Memory Based Interrupts`_.
*/
void xe_memirq_postinstall(struct xe_memirq *memirq)
{
- memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
- memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
if (memirq->bo)
memirq_set_enable(memirq, true);
@@ -349,9 +406,9 @@ static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
value = iosys_map_rd(vector, offset, u8);
if (value) {
if (value != 0xff)
- xe_sriov_err_ratelimited(memirq_to_xe(memirq),
- "Unexpected memirq value %#x from %s at %u\n",
- value, name, offset);
+ memirq_err_ratelimited(memirq,
+ "Unexpected memirq value %#x from %s at %u\n",
+ value, name, offset);
iosys_map_wr(vector, offset, u8, 0x00);
}
@@ -376,6 +433,31 @@ static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *stat
if (memirq_received(memirq, status, ilog2(GUC_INTR_GUC2HOST), name))
xe_guc_irq_handler(guc, GUC_INTR_GUC2HOST);
+
+ if (memirq_received(memirq, status, ilog2(GUC_INTR_SW_INT_0), name))
+ xe_guc_irq_handler(guc, GUC_INTR_SW_INT_0);
+}
+
+/**
+ * xe_memirq_hwe_handler - Check and process interrupts for a specific HW engine.
+ * @memirq: the &xe_memirq
+ * @hwe: the hw engine to process
+ *
+ * This function reads and dispatches `Memory Based Interrupts` for the provided HW engine.
+ */
+void xe_memirq_hwe_handler(struct xe_memirq *memirq, struct xe_hw_engine *hwe)
+{
+ u16 offset = hwe->irq_offset;
+ u16 instance = hw_reports_to_instance_zero(memirq) ? hwe->instance : 0;
+ struct iosys_map src_offset = IOSYS_MAP_INIT_OFFSET(&memirq->bo->vmap,
+ XE_MEMIRQ_SOURCE_OFFSET(instance));
+
+ if (memirq_received(memirq, &src_offset, offset, "SRC")) {
+ struct iosys_map status_offset =
+ IOSYS_MAP_INIT_OFFSET(&memirq->bo->vmap,
+ XE_MEMIRQ_STATUS_OFFSET(instance) + offset * SZ_16);
+ memirq_dispatch_engine(memirq, &status_offset, hwe);
+ }
}
/**
@@ -405,13 +487,8 @@ void xe_memirq_handler(struct xe_memirq *memirq)
if (gt->tile != tile)
continue;
- for_each_hw_engine(hwe, gt, id) {
- if (memirq_received(memirq, &memirq->source, hwe->irq_offset, "SRC")) {
- map = IOSYS_MAP_INIT_OFFSET(&memirq->status,
- hwe->irq_offset * SZ_16);
- memirq_dispatch_engine(memirq, &map, hwe);
- }
- }
+ for_each_hw_engine(hwe, gt, id)
+ xe_memirq_hwe_handler(memirq, hwe);
}
/* GuC and media GuC (if present) must be checked separately */
diff --git a/drivers/gpu/drm/xe/xe_memirq.h b/drivers/gpu/drm/xe/xe_memirq.h
index 2d40d03c3095..06130650e9d6 100644
--- a/drivers/gpu/drm/xe/xe_memirq.h
+++ b/drivers/gpu/drm/xe/xe_memirq.h
@@ -9,16 +9,18 @@
#include <linux/types.h>
struct xe_guc;
+struct xe_hw_engine;
struct xe_memirq;
int xe_memirq_init(struct xe_memirq *memirq);
-u32 xe_memirq_source_ptr(struct xe_memirq *memirq);
-u32 xe_memirq_status_ptr(struct xe_memirq *memirq);
+u32 xe_memirq_source_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe);
+u32 xe_memirq_status_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe);
u32 xe_memirq_enable_ptr(struct xe_memirq *memirq);
void xe_memirq_reset(struct xe_memirq *memirq);
void xe_memirq_postinstall(struct xe_memirq *memirq);
+void xe_memirq_hwe_handler(struct xe_memirq *memirq, struct xe_hw_engine *hwe);
void xe_memirq_handler(struct xe_memirq *memirq);
int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc);
diff --git a/drivers/gpu/drm/xe/xe_memirq_types.h b/drivers/gpu/drm/xe/xe_memirq_types.h
index 625b6b8736cc..9d0f6c1cdb9d 100644
--- a/drivers/gpu/drm/xe/xe_memirq_types.h
+++ b/drivers/gpu/drm/xe/xe_memirq_types.h
@@ -11,9 +11,9 @@
struct xe_bo;
/* ISR */
-#define XE_MEMIRQ_STATUS_OFFSET 0x0
+#define XE_MEMIRQ_STATUS_OFFSET(inst) ((inst) * SZ_4K + 0x0)
/* IIR */
-#define XE_MEMIRQ_SOURCE_OFFSET 0x400
+#define XE_MEMIRQ_SOURCE_OFFSET(inst) ((inst) * SZ_4K + 0x400)
/* IMR */
#define XE_MEMIRQ_ENABLE_OFFSET 0x440
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index cfd31ae49cc1..278bc96cf593 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -209,7 +209,8 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
num_entries * XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_PAGETABLE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -1350,6 +1351,7 @@ __xe_migrate_update_pgtables(struct xe_migrate *m,
/* For sysmem PTE's, need to map them in our hole.. */
if (!IS_DGFX(xe)) {
+ u16 pat_index = xe->pat.idx[XE_CACHE_WB];
u32 ptes, ofs;
ppgtt_ofs = NUM_KERNEL_PDE - 1;
@@ -1409,7 +1411,7 @@ __xe_migrate_update_pgtables(struct xe_migrate *m,
pt_bo->update_index = current_update;
addr = vm->pt_ops->pte_encode_bo(pt_bo, 0,
- XE_CACHE_WB, 0);
+ pat_index, 0);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
}
@@ -1504,7 +1506,7 @@ err_bb:
* using the default engine for the updates, they will be performed in the
* order they grab the job_mutex. If different engines are used, external
* synchronization is needed for overlapping updates to maintain page-table
- * consistency. Note that the meaing of "overlapping" is that the updates
+ * consistency. Note that the meaning of "overlapping" is that the updates
* touch the same page-table, which might be a higher-level page-directory.
* If no pipelining is needed, then updates may be performed by the cpu.
*
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 3fd462fda625..a48f239cad1c 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -36,13 +36,19 @@ static void tiles_fini(void *arg)
/*
* On multi-tile devices, partition the BAR space for MMIO on each tile,
* possibly accounting for register override on the number of tiles available.
+ * tile_mmio_size contains both the tile's 4MB register space, as well as
+ * additional space for the GTT and other (possibly unused) regions).
* Resulting memory layout is like below:
*
* .----------------------. <- tile_count * tile_mmio_size
* | .... |
* |----------------------| <- 2 * tile_mmio_size
+ * | tile1 GTT + other |
+ * |----------------------| <- 1 * tile_mmio_size + 4MB
* | tile1->mmio.regs |
* |----------------------| <- 1 * tile_mmio_size
+ * | tile0 GTT + other |
+ * |----------------------| <- 4MB
* | tile0->mmio.regs |
* '----------------------' <- 0MB
*/
@@ -61,16 +67,16 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
/* Possibly override number of tile based on configuration register */
if (!xe->info.skip_mtcfg) {
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
u8 tile_count;
u32 mtcfg;
/*
* Although the per-tile mmio regs are not yet initialized, this
- * is fine as it's going to the root gt, that's guaranteed to be
- * initialized earlier in xe_mmio_init()
+ * is fine as it's going to the root tile's mmio, that's
+ * guaranteed to be initialized earlier in xe_mmio_init()
*/
- mtcfg = xe_mmio_read64_2x32(gt, XEHP_MTCFG_ADDR);
+ mtcfg = xe_mmio_read64_2x32(mmio, XEHP_MTCFG_ADDR);
tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
if (tile_count < xe->info.tile_count) {
@@ -90,8 +96,9 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
regs = xe->mmio.regs;
for_each_tile(tile, xe, id) {
- tile->mmio.size = tile_mmio_size;
+ tile->mmio.regs_size = SZ_4M;
tile->mmio.regs = regs;
+ tile->mmio.tile = tile;
regs += tile_mmio_size;
}
}
@@ -126,8 +133,9 @@ static void mmio_extension_setup(struct xe_device *xe, size_t tile_mmio_size,
regs = xe->mmio.regs + tile_mmio_size * xe->info.tile_count;
for_each_tile(tile, xe, id) {
- tile->mmio_ext.size = tile_mmio_ext_size;
+ tile->mmio_ext.regs_size = tile_mmio_ext_size;
tile->mmio_ext.regs = regs;
+ tile->mmio_ext.tile = tile;
regs += tile_mmio_ext_size;
}
}
@@ -157,137 +165,132 @@ int xe_mmio_init(struct xe_device *xe)
{
struct xe_tile *root_tile = xe_device_get_root_tile(xe);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- const int mmio_bar = 0;
/*
* Map the entire BAR.
* The first 16MB of the BAR, belong to the root tile, and include:
* registers (0-4MB), reserved space (4MB-8MB) and GGTT (8MB-16MB).
*/
- xe->mmio.size = pci_resource_len(pdev, mmio_bar);
- xe->mmio.regs = pci_iomap(pdev, mmio_bar, GTTMMADR_BAR);
+ xe->mmio.size = pci_resource_len(pdev, GTTMMADR_BAR);
+ xe->mmio.regs = pci_iomap(pdev, GTTMMADR_BAR, 0);
if (xe->mmio.regs == NULL) {
drm_err(&xe->drm, "failed to map registers\n");
return -EIO;
}
/* Setup first tile; other tiles (if present) will be setup later. */
- root_tile->mmio.size = SZ_16M;
+ root_tile->mmio.regs_size = SZ_4M;
root_tile->mmio.regs = xe->mmio.regs;
+ root_tile->mmio.tile = root_tile;
return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
}
-static void mmio_flush_pending_writes(struct xe_gt *gt)
+static void mmio_flush_pending_writes(struct xe_mmio *mmio)
{
#define DUMMY_REG_OFFSET 0x130030
- struct xe_tile *tile = gt_to_tile(gt);
int i;
- if (tile->xe->info.platform != XE_LUNARLAKE)
+ if (mmio->tile->xe->info.platform != XE_LUNARLAKE)
return;
/* 4 dummy writes */
for (i = 0; i < 4; i++)
- writel(0, tile->mmio.regs + DUMMY_REG_OFFSET);
+ writel(0, mmio->regs + DUMMY_REG_OFFSET);
}
-u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
+u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
{
- struct xe_tile *tile = gt_to_tile(gt);
- u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u8 val;
/* Wa_15015404425 */
- mmio_flush_pending_writes(gt);
+ mmio_flush_pending_writes(mmio);
- val = readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
- trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
+ val = readb(mmio->regs + addr);
+ trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
return val;
}
-u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
+u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
{
- struct xe_tile *tile = gt_to_tile(gt);
- u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u16 val;
/* Wa_15015404425 */
- mmio_flush_pending_writes(gt);
+ mmio_flush_pending_writes(mmio);
- val = readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
- trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
+ val = readw(mmio->regs + addr);
+ trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
return val;
}
-void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
+void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val)
{
- struct xe_tile *tile = gt_to_tile(gt);
- u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
- trace_xe_reg_rw(gt, true, addr, val, sizeof(val));
+ trace_xe_reg_rw(mmio, true, addr, val, sizeof(val));
- if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
- xe_gt_sriov_vf_write32(gt, reg, val);
+ if (!reg.vf && mmio->sriov_vf_gt)
+ xe_gt_sriov_vf_write32(mmio->sriov_vf_gt, reg, val);
else
- writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
+ writel(val, mmio->regs + addr);
}
-u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
+u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg)
{
- struct xe_tile *tile = gt_to_tile(gt);
- u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u32 val;
/* Wa_15015404425 */
- mmio_flush_pending_writes(gt);
+ mmio_flush_pending_writes(mmio);
- if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
- val = xe_gt_sriov_vf_read32(gt, reg);
+ if (!reg.vf && mmio->sriov_vf_gt)
+ val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt, reg);
else
- val = readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
+ val = readl(mmio->regs + addr);
- trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
+ trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
return val;
}
-u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set)
+u32 xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set)
{
u32 old, reg_val;
- old = xe_mmio_read32(gt, reg);
+ old = xe_mmio_read32(mmio, reg);
reg_val = (old & ~clr) | set;
- xe_mmio_write32(gt, reg, reg_val);
+ xe_mmio_write32(mmio, reg, reg_val);
return old;
}
-int xe_mmio_write32_and_verify(struct xe_gt *gt,
+int xe_mmio_write32_and_verify(struct xe_mmio *mmio,
struct xe_reg reg, u32 val, u32 mask, u32 eval)
{
u32 reg_val;
- xe_mmio_write32(gt, reg, val);
- reg_val = xe_mmio_read32(gt, reg);
+ xe_mmio_write32(mmio, reg, val);
+ reg_val = xe_mmio_read32(mmio, reg);
return (reg_val & mask) != eval ? -EINVAL : 0;
}
-bool xe_mmio_in_range(const struct xe_gt *gt,
+bool xe_mmio_in_range(const struct xe_mmio *mmio,
const struct xe_mmio_range *range,
struct xe_reg reg)
{
- u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
return range && addr >= range->start && addr <= range->end;
}
/**
* xe_mmio_read64_2x32() - Read a 64-bit register as two 32-bit reads
- * @gt: MMIO target GT
+ * @mmio: MMIO target
* @reg: register to read value from
*
* Although Intel GPUs have some 64-bit registers, the hardware officially
@@ -307,21 +310,21 @@ bool xe_mmio_in_range(const struct xe_gt *gt,
*
* Returns the value of the 64-bit register.
*/
-u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
+u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg)
{
struct xe_reg reg_udw = { .addr = reg.addr + 0x4 };
u32 ldw, udw, oldudw, retries;
- reg.addr = xe_mmio_adjusted_addr(gt, reg.addr);
- reg_udw.addr = xe_mmio_adjusted_addr(gt, reg_udw.addr);
+ reg.addr = xe_mmio_adjusted_addr(mmio, reg.addr);
+ reg_udw.addr = xe_mmio_adjusted_addr(mmio, reg_udw.addr);
/* we shouldn't adjust just one register address */
- xe_gt_assert(gt, reg_udw.addr == reg.addr + 0x4);
+ xe_tile_assert(mmio->tile, reg_udw.addr == reg.addr + 0x4);
- oldudw = xe_mmio_read32(gt, reg_udw);
+ oldudw = xe_mmio_read32(mmio, reg_udw);
for (retries = 5; retries; --retries) {
- ldw = xe_mmio_read32(gt, reg);
- udw = xe_mmio_read32(gt, reg_udw);
+ ldw = xe_mmio_read32(mmio, reg);
+ udw = xe_mmio_read32(mmio, reg_udw);
if (udw == oldudw)
break;
@@ -329,13 +332,13 @@ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
oldudw = udw;
}
- xe_gt_WARN(gt, retries == 0,
- "64-bit read of %#x did not stabilize\n", reg.addr);
+ drm_WARN(&mmio->tile->xe->drm, retries == 0,
+ "64-bit read of %#x did not stabilize\n", reg.addr);
return (u64)udw << 32 | ldw;
}
-static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
u32 *out_val, bool atomic, bool expect_match)
{
ktime_t cur = ktime_get_raw();
@@ -346,7 +349,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
bool check;
for (;;) {
- read = xe_mmio_read32(gt, reg);
+ read = xe_mmio_read32(mmio, reg);
check = (read & mask) == val;
if (!expect_match)
@@ -372,7 +375,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
}
if (ret != 0) {
- read = xe_mmio_read32(gt, reg);
+ read = xe_mmio_read32(mmio, reg);
check = (read & mask) == val;
if (!expect_match)
@@ -390,7 +393,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
/**
* xe_mmio_wait32() - Wait for a register to match the desired masked value
- * @gt: MMIO target GT
+ * @mmio: MMIO target
* @reg: register to read value from
* @mask: mask to be applied to the value read from the register
* @val: desired value after applying the mask
@@ -407,15 +410,15 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
* @timeout_us for different reasons, specially in non-atomic contexts. Thus,
* it is possible that this function succeeds even after @timeout_us has passed.
*/
-int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+int xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
u32 *out_val, bool atomic)
{
- return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, true);
+ return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, true);
}
/**
* xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
- * @gt: MMIO target GT
+ * @mmio: MMIO target
* @reg: register to read value from
* @mask: mask to be applied to the value read from the register
* @val: value not to be matched after applying the mask
@@ -426,8 +429,8 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t
* This function works exactly like xe_mmio_wait32() with the exception that
* @val is expected not to be matched.
*/
-int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+int xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
u32 *out_val, bool atomic)
{
- return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, false);
+ return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, false);
}
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
index 26551410ecc8..8a46f4006a84 100644
--- a/drivers/gpu/drm/xe/xe_mmio.h
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -14,25 +14,30 @@ struct xe_reg;
int xe_mmio_init(struct xe_device *xe);
int xe_mmio_probe_tiles(struct xe_device *xe);
-u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg);
-u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg);
-void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
-u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg);
-u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set);
-int xe_mmio_write32_and_verify(struct xe_gt *gt, struct xe_reg reg, u32 val, u32 mask, u32 eval);
-bool xe_mmio_in_range(const struct xe_gt *gt, const struct xe_mmio_range *range, struct xe_reg reg);
-
-u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg);
-int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
- u32 *out_val, bool atomic);
-int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
- u32 *out_val, bool atomic);
-
-static inline u32 xe_mmio_adjusted_addr(const struct xe_gt *gt, u32 addr)
+u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg);
+u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg);
+void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val);
+u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg);
+u32 xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set);
+int xe_mmio_write32_and_verify(struct xe_mmio *mmio, struct xe_reg reg, u32 val, u32 mask, u32 eval);
+bool xe_mmio_in_range(const struct xe_mmio *mmio, const struct xe_mmio_range *range, struct xe_reg reg);
+
+u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg);
+int xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val,
+ u32 timeout_us, u32 *out_val, bool atomic);
+int xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask,
+ u32 val, u32 timeout_us, u32 *out_val, bool atomic);
+
+static inline u32 xe_mmio_adjusted_addr(const struct xe_mmio *mmio, u32 addr)
{
- if (addr < gt->mmio.adj_limit)
- addr += gt->mmio.adj_offset;
+ if (addr < mmio->adj_limit)
+ addr += mmio->adj_offset;
return addr;
}
+static inline struct xe_mmio *xe_root_tile_mmio(struct xe_device *xe)
+{
+ return &xe->tiles[0].mmio;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c
index 7ff0ac5b799a..54d199b5cfb2 100644
--- a/drivers/gpu/drm/xe/xe_mocs.c
+++ b/drivers/gpu/drm/xe/xe_mocs.c
@@ -278,7 +278,7 @@ static void xelp_lncf_dump(struct xe_mocs_info *info, struct xe_gt *gt, struct d
if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i));
else
- reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i));
+ reg_val = xe_mmio_read32(&gt->mmio, XELP_LNCFCMOCS(i));
drm_printf(p, "LNCFCMOCS[%2d] = [%u, %u, %u] (%#8x)\n",
j++,
@@ -310,7 +310,7 @@ static void xelp_mocs_dump(struct xe_mocs_info *info, unsigned int flags,
if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i));
else
- reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i));
+ reg_val = xe_mmio_read32(&gt->mmio, XELP_GLOBAL_MOCS(i));
drm_printf(p, "GLOB_MOCS[%2d] = [%u, %u, %u, %u, %u, %u, %u, %u, %u, %u ] (%#8x)\n",
i,
@@ -383,7 +383,7 @@ static void xehp_lncf_dump(struct xe_mocs_info *info, unsigned int flags,
if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i));
else
- reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i));
+ reg_val = xe_mmio_read32(&gt->mmio, XELP_LNCFCMOCS(i));
drm_printf(p, "LNCFCMOCS[%2d] = [%u, %u, %u] (%#8x)\n",
j++,
@@ -428,7 +428,7 @@ static void pvc_mocs_dump(struct xe_mocs_info *info, unsigned int flags, struct
if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i));
else
- reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i));
+ reg_val = xe_mmio_read32(&gt->mmio, XELP_LNCFCMOCS(i));
drm_printf(p, "LNCFCMOCS[%2d] = [ %u ] (%#8x)\n",
j++,
@@ -510,7 +510,7 @@ static void mtl_mocs_dump(struct xe_mocs_info *info, unsigned int flags,
if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i));
else
- reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i));
+ reg_val = xe_mmio_read32(&gt->mmio, XELP_GLOBAL_MOCS(i));
drm_printf(p, "GLOB_MOCS[%2d] = [%u, %u] (%#8x)\n",
i,
@@ -553,7 +553,7 @@ static void xe2_mocs_dump(struct xe_mocs_info *info, unsigned int flags,
if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i));
else
- reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i));
+ reg_val = xe_mmio_read32(&gt->mmio, XELP_GLOBAL_MOCS(i));
drm_printf(p, "GLOB_MOCS[%2d] = [%u, %u, %u] (%#8x)\n",
i,
@@ -576,6 +576,7 @@ static unsigned int get_mocs_settings(struct xe_device *xe,
memset(info, 0, sizeof(struct xe_mocs_info));
switch (xe->info.platform) {
+ case XE_PANTHERLAKE:
case XE_LUNARLAKE:
case XE_BATTLEMAGE:
info->ops = &xe2_mocs_ops;
@@ -690,7 +691,7 @@ static void __init_mocs_table(struct xe_gt *gt,
if (regs_are_mcr(gt))
xe_gt_mcr_multicast_write(gt, XEHP_GLOBAL_MOCS(i), mocs);
else
- xe_mmio_write32(gt, XELP_GLOBAL_MOCS(i), mocs);
+ xe_mmio_write32(&gt->mmio, XELP_GLOBAL_MOCS(i), mocs);
}
}
@@ -730,7 +731,7 @@ static void init_l3cc_table(struct xe_gt *gt,
if (regs_are_mcr(gt))
xe_gt_mcr_multicast_write(gt, XEHP_LNCFCMOCS(i), l3cc);
else
- xe_mmio_write32(gt, XELP_LNCFCMOCS(i), l3cc);
+ xe_mmio_write32(&gt->mmio, XELP_LNCFCMOCS(i), l3cc);
}
}
@@ -773,25 +774,21 @@ void xe_mocs_init(struct xe_gt *gt)
void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_mocs_info table;
- unsigned int flags;
- u32 ret;
struct xe_device *xe = gt_to_xe(gt);
+ struct xe_mocs_info table;
+ unsigned int fw_ref, flags;
flags = get_mocs_settings(xe, &table);
xe_pm_runtime_get_noresume(xe);
- ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
-
- if (ret)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
goto err_fw;
table.ops->dump(&table, flags, gt, p);
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
-
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
err_fw:
- xe_assert(xe, !ret);
xe_pm_runtime_put(xe);
}
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index bfc3deebdaa2..07b27114be9a 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -19,7 +19,7 @@
struct xe_modparam xe_modparam = {
.probe_display = true,
- .guc_log_level = 5,
+ .guc_log_level = 3,
.force_probe = CONFIG_DRM_XE_FORCE_PROBE,
.wedged_mode = 1,
/* the rest are 0 by default */
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 2804f14f8f29..fa873f3d0a9d 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -16,7 +16,6 @@
#include "instructions/xe_mi_commands.h"
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
-#include "regs/xe_lrc_layout.h"
#include "regs/xe_oa_regs.h"
#include "xe_assert.h"
#include "xe_bb.h"
@@ -28,7 +27,6 @@
#include "xe_gt_mcr.h"
#include "xe_gt_printk.h"
#include "xe_guc_pc.h"
-#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_mmio.h"
#include "xe_oa.h"
@@ -36,11 +34,22 @@
#include "xe_pm.h"
#include "xe_sched_job.h"
#include "xe_sriov.h"
+#include "xe_sync.h"
#define DEFAULT_POLL_FREQUENCY_HZ 200
#define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
#define XE_OA_UNIT_INVALID U32_MAX
+enum xe_oa_submit_deps {
+ XE_OA_SUBMIT_NO_DEPS,
+ XE_OA_SUBMIT_ADD_DEPS,
+};
+
+enum xe_oa_user_extn_from {
+ XE_OA_USER_EXTN_FROM_OPEN,
+ XE_OA_USER_EXTN_FROM_CONFIG,
+};
+
struct xe_oa_reg {
struct xe_reg addr;
u32 value;
@@ -63,13 +72,8 @@ struct xe_oa_config {
struct rcu_head rcu;
};
-struct flex {
- struct xe_reg reg;
- u32 offset;
- u32 value;
-};
-
struct xe_oa_open_param {
+ struct xe_file *xef;
u32 oa_unit_id;
bool sample;
u32 metric_set;
@@ -81,6 +85,11 @@ struct xe_oa_open_param {
struct xe_exec_queue *exec_q;
struct xe_hw_engine *hwe;
bool no_preempt;
+ struct drm_xe_sync __user *syncs_user;
+ int num_syncs;
+ struct xe_sync_entry *syncs;
+ size_t oa_buffer_size;
+ int wait_num_reports;
};
struct xe_oa_config_bo {
@@ -90,6 +99,17 @@ struct xe_oa_config_bo {
struct xe_bb *bb;
};
+struct xe_oa_fence {
+ /* @base: dma fence base */
+ struct dma_fence base;
+ /* @lock: lock for the fence */
+ spinlock_t lock;
+ /* @work: work to signal @base */
+ struct delayed_work work;
+ /* @cb: callback to schedule @work */
+ struct dma_fence_cb cb;
+};
+
#define DRM_FMT(x) DRM_XE_OA_FMT_TYPE_##x
static const struct xe_oa_format oa_formats[] = {
@@ -162,10 +182,10 @@ static struct xe_oa_config *xe_oa_get_oa_config(struct xe_oa *oa, int metrics_se
return oa_config;
}
-static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo)
+static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo, struct dma_fence *last_fence)
{
xe_oa_config_put(oa_bo->oa_config);
- xe_bb_free(oa_bo->bb, NULL);
+ xe_bb_free(oa_bo->bb, last_fence);
kfree(oa_bo);
}
@@ -176,7 +196,7 @@ static const struct xe_oa_regs *__oa_regs(struct xe_oa_stream *stream)
static u32 xe_oa_hw_tail_read(struct xe_oa_stream *stream)
{
- return xe_mmio_read32(stream->gt, __oa_regs(stream)->oa_tail_ptr) &
+ return xe_mmio_read32(&stream->gt->mmio, __oa_regs(stream)->oa_tail_ptr) &
OAG_OATAILPTR_MASK;
}
@@ -214,11 +234,9 @@ static void oa_timestamp_clear(struct xe_oa_stream *stream, u32 *report)
static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
{
u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo);
+ u32 tail, hw_tail, partial_report_size, available;
int report_size = stream->oa_buffer.format->size;
- u32 tail, hw_tail;
unsigned long flags;
- bool pollin;
- u32 partial_report_size;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
@@ -262,12 +280,12 @@ static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
stream->oa_buffer.tail = tail;
- pollin = xe_oa_circ_diff(stream, stream->oa_buffer.tail,
- stream->oa_buffer.head) >= report_size;
+ available = xe_oa_circ_diff(stream, stream->oa_buffer.tail, stream->oa_buffer.head);
+ stream->pollin = available >= stream->wait_num_reports * report_size;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
- return pollin;
+ return stream->pollin;
}
static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer)
@@ -275,10 +293,8 @@ static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer)
struct xe_oa_stream *stream =
container_of(hrtimer, typeof(*stream), poll_check_timer);
- if (xe_oa_buffer_check_unlocked(stream)) {
- stream->pollin = true;
+ if (xe_oa_buffer_check_unlocked(stream))
wake_up(&stream->poll_wq);
- }
hrtimer_forward_now(hrtimer, ns_to_ktime(stream->poll_period_ns));
@@ -366,7 +382,7 @@ static int xe_oa_append_reports(struct xe_oa_stream *stream, char __user *buf,
struct xe_reg oaheadptr = __oa_regs(stream)->oa_head_ptr;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- xe_mmio_write32(stream->gt, oaheadptr,
+ xe_mmio_write32(&stream->gt->mmio, oaheadptr,
(head + gtt_offset) & OAG_OAHEADPTR_MASK);
stream->oa_buffer.head = head;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
@@ -378,21 +394,30 @@ static int xe_oa_append_reports(struct xe_oa_stream *stream, char __user *buf,
static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream)
{
u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo);
- u32 oa_buf = gtt_offset | OABUFFER_SIZE_16M | OAG_OABUFFER_MEMORY_SELECT;
+ int size_exponent = __ffs(stream->oa_buffer.bo->size);
+ u32 oa_buf = gtt_offset | OAG_OABUFFER_MEMORY_SELECT;
+ struct xe_mmio *mmio = &stream->gt->mmio;
unsigned long flags;
+ /*
+ * If oa buffer size is more than 16MB (exponent greater than 24), the
+ * oa buffer size field is multiplied by 8 in xe_oa_enable_metric_set.
+ */
+ oa_buf |= REG_FIELD_PREP(OABUFFER_SIZE_MASK,
+ size_exponent > 24 ? size_exponent - 20 : size_exponent - 17);
+
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_status, 0);
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_head_ptr,
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_status, 0);
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_head_ptr,
gtt_offset & OAG_OAHEADPTR_MASK);
stream->oa_buffer.head = 0;
/*
* PRM says: "This MMIO must be set before the OATAILPTR register and after the
* OAHEADPTR register. This is to enable proper functionality of the overflow bit".
*/
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_buffer, oa_buf);
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_tail_ptr,
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_buffer, oa_buf);
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_tail_ptr,
gtt_offset & OAG_OATAILPTR_MASK);
/* Mark that we need updated tail pointer to read from */
@@ -424,6 +449,12 @@ static u32 __oa_ccs_select(struct xe_oa_stream *stream)
return val;
}
+static u32 __oactrl_used_bits(struct xe_oa_stream *stream)
+{
+ return stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ?
+ OAG_OACONTROL_USED_BITS : OAM_OACONTROL_USED_BITS;
+}
+
static void xe_oa_enable(struct xe_oa_stream *stream)
{
const struct xe_oa_format *format = stream->oa_buffer.format;
@@ -444,21 +475,23 @@ static void xe_oa_enable(struct xe_oa_stream *stream)
stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
val |= OAG_OACONTROL_OA_PES_DISAG_EN;
- xe_mmio_write32(stream->gt, regs->oa_ctrl, val);
+ xe_mmio_rmw32(&stream->gt->mmio, regs->oa_ctrl, __oactrl_used_bits(stream), val);
}
static void xe_oa_disable(struct xe_oa_stream *stream)
{
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctrl, 0);
- if (xe_mmio_wait32(stream->gt, __oa_regs(stream)->oa_ctrl,
+ struct xe_mmio *mmio = &stream->gt->mmio;
+
+ xe_mmio_rmw32(mmio, __oa_regs(stream)->oa_ctrl, __oactrl_used_bits(stream), 0);
+ if (xe_mmio_wait32(mmio, __oa_regs(stream)->oa_ctrl,
OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 50000, NULL, false))
drm_err(&stream->oa->xe->drm,
"wait for OA to be disabled timed out\n");
if (GRAPHICS_VERx100(stream->oa->xe) <= 1270 && GRAPHICS_VERx100(stream->oa->xe) != 1260) {
/* <= XE_METEORLAKE except XE_PVC */
- xe_mmio_write32(stream->gt, OA_TLB_INV_CR, 1);
- if (xe_mmio_wait32(stream->gt, OA_TLB_INV_CR, 1, 0, 50000, NULL, false))
+ xe_mmio_write32(mmio, OA_TLB_INV_CR, 1);
+ if (xe_mmio_wait32(mmio, OA_TLB_INV_CR, 1, 0, 50000, NULL, false))
drm_err(&stream->oa->xe->drm,
"wait for OA tlb invalidate timed out\n");
}
@@ -481,7 +514,7 @@ static int __xe_oa_read(struct xe_oa_stream *stream, char __user *buf,
size_t count, size_t *offset)
{
/* Only clear our bits to avoid side-effects */
- stream->oa_status = xe_mmio_rmw32(stream->gt, __oa_regs(stream)->oa_status,
+ stream->oa_status = xe_mmio_rmw32(&stream->gt->mmio, __oa_regs(stream)->oa_status,
OASTATUS_RELEVANT_BITS, 0);
/*
* Signal to userspace that there is non-zero OA status to read via
@@ -567,32 +600,60 @@ static __poll_t xe_oa_poll(struct file *file, poll_table *wait)
return ret;
}
-static int xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb)
+static void xe_oa_lock_vma(struct xe_exec_queue *q)
{
+ if (q->vm) {
+ down_read(&q->vm->lock);
+ xe_vm_lock(q->vm, false);
+ }
+}
+
+static void xe_oa_unlock_vma(struct xe_exec_queue *q)
+{
+ if (q->vm) {
+ xe_vm_unlock(q->vm);
+ up_read(&q->vm->lock);
+ }
+}
+
+static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa_submit_deps deps,
+ struct xe_bb *bb)
+{
+ struct xe_exec_queue *q = stream->exec_q ?: stream->k_exec_q;
struct xe_sched_job *job;
struct dma_fence *fence;
- long timeout;
int err = 0;
- /* Kernel configuration is issued on stream->k_exec_q, not stream->exec_q */
- job = xe_bb_create_job(stream->k_exec_q, bb);
+ xe_oa_lock_vma(q);
+
+ job = xe_bb_create_job(q, bb);
if (IS_ERR(job)) {
err = PTR_ERR(job);
goto exit;
}
+ job->ggtt = true;
+
+ if (deps == XE_OA_SUBMIT_ADD_DEPS) {
+ for (int i = 0; i < stream->num_syncs && !err; i++)
+ err = xe_sync_entry_add_deps(&stream->syncs[i], job);
+ if (err) {
+ drm_dbg(&stream->oa->xe->drm, "xe_sync_entry_add_deps err %d\n", err);
+ goto err_put_job;
+ }
+ }
xe_sched_job_arm(job);
fence = dma_fence_get(&job->drm.s_fence->finished);
xe_sched_job_push(job);
- timeout = dma_fence_wait_timeout(fence, false, HZ);
- dma_fence_put(fence);
- if (timeout < 0)
- err = timeout;
- else if (!timeout)
- err = -ETIME;
+ xe_oa_unlock_vma(q);
+
+ return fence;
+err_put_job:
+ xe_sched_job_put(job);
exit:
- return err;
+ xe_oa_unlock_vma(q);
+ return ERR_PTR(err);
}
static void write_cs_mi_lri(struct xe_bb *bb, const struct xe_oa_reg *reg_data, u32 n_regs)
@@ -636,57 +697,34 @@ static void xe_oa_free_configs(struct xe_oa_stream *stream)
xe_oa_config_put(stream->oa_config);
llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
- free_oa_config_bo(oa_bo);
-}
-
-static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc,
- struct xe_bb *bb, const struct flex *flex, u32 count)
-{
- u32 offset = xe_bo_ggtt_addr(lrc->bo);
-
- do {
- bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
- bb->cs[bb->len++] = offset + flex->offset * sizeof(u32);
- bb->cs[bb->len++] = 0;
- bb->cs[bb->len++] = flex->value;
-
- } while (flex++, --count);
+ free_oa_config_bo(oa_bo, stream->last_fence);
+ dma_fence_put(stream->last_fence);
}
-static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lrc,
- const struct flex *flex, u32 count)
+static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri, u32 count)
{
+ struct dma_fence *fence;
struct xe_bb *bb;
int err;
- bb = xe_bb_new(stream->gt, 4 * count, false);
+ bb = xe_bb_new(stream->gt, 2 * count + 1, false);
if (IS_ERR(bb)) {
err = PTR_ERR(bb);
goto exit;
}
- xe_oa_store_flex(stream, lrc, bb, flex, count);
+ write_cs_mi_lri(bb, reg_lri, count);
- err = xe_oa_submit_bb(stream, bb);
- xe_bb_free(bb, NULL);
-exit:
- return err;
-}
-
-static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri)
-{
- struct xe_bb *bb;
- int err;
-
- bb = xe_bb_new(stream->gt, 3, false);
- if (IS_ERR(bb)) {
- err = PTR_ERR(bb);
- goto exit;
+ fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto free_bb;
}
+ xe_bb_free(bb, fence);
+ dma_fence_put(fence);
- write_cs_mi_lri(bb, reg_lri, 1);
-
- err = xe_oa_submit_bb(stream, bb);
+ return 0;
+free_bb:
xe_bb_free(bb, NULL);
exit:
return err;
@@ -695,70 +733,55 @@ exit:
static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable)
{
const struct xe_oa_format *format = stream->oa_buffer.format;
- struct xe_lrc *lrc = stream->exec_q->lrc[0];
- u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
- struct flex regs_context[] = {
+ struct xe_oa_reg reg_lri[] = {
{
OACTXCONTROL(stream->hwe->mmio_base),
- stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
enable ? OA_COUNTER_RESUME : 0,
},
{
+ OAR_OACONTROL,
+ oacontrol,
+ },
+ {
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
- regs_offset + CTX_CONTEXT_CONTROL,
- _MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE),
+ _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
+ enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0)
},
};
- struct xe_oa_reg reg_lri = { OAR_OACONTROL, oacontrol };
- int err;
-
- /* Modify stream hwe context image with regs_context */
- err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
- regs_context, ARRAY_SIZE(regs_context));
- if (err)
- return err;
- /* Apply reg_lri using LRI */
- return xe_oa_load_with_lri(stream, &reg_lri);
+ return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri));
}
static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable)
{
const struct xe_oa_format *format = stream->oa_buffer.format;
- struct xe_lrc *lrc = stream->exec_q->lrc[0];
- u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
- struct flex regs_context[] = {
+ struct xe_oa_reg reg_lri[] = {
{
OACTXCONTROL(stream->hwe->mmio_base),
- stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
enable ? OA_COUNTER_RESUME : 0,
},
{
+ OAC_OACONTROL,
+ oacontrol
+ },
+ {
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
- regs_offset + CTX_CONTEXT_CONTROL,
- _MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE) |
+ _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
+ enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) |
_MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0),
},
};
- struct xe_oa_reg reg_lri = { OAC_OACONTROL, oacontrol };
- int err;
/* Set ccs select to enable programming of OAC_OACONTROL */
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctrl, __oa_ccs_select(stream));
-
- /* Modify stream hwe context image with regs_context */
- err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
- regs_context, ARRAY_SIZE(regs_context));
- if (err)
- return err;
+ xe_mmio_write32(&stream->gt->mmio, __oa_regs(stream)->oa_ctrl,
+ __oa_ccs_select(stream));
- /* Apply reg_lri using LRI */
- return xe_oa_load_with_lri(stream, &reg_lri);
+ return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri));
}
static int xe_oa_configure_oa_context(struct xe_oa_stream *stream, bool enable)
@@ -785,6 +808,7 @@ static u32 oag_configure_mmio_trigger(const struct xe_oa_stream *stream, bool en
static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
{
+ struct xe_mmio *mmio = &stream->gt->mmio;
u32 sqcnt1;
/*
@@ -798,7 +822,7 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
_MASKED_BIT_DISABLE(DISABLE_DOP_GATING));
}
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_debug,
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug,
oag_configure_mmio_trigger(stream, false));
/* disable the context save/restore or OAR counters */
@@ -806,13 +830,13 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
xe_oa_configure_oa_context(stream, false);
/* Make sure we disable noa to save power. */
- xe_mmio_rmw32(stream->gt, RPM_CONFIG1, GT_NOA_ENABLE, 0);
+ xe_mmio_rmw32(mmio, RPM_CONFIG1, GT_NOA_ENABLE, 0);
sqcnt1 = SQCNT1_PMON_ENABLE |
(HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0);
/* Reset PMON Enable to save power. */
- xe_mmio_rmw32(stream->gt, XELPMP_SQCNT1, sqcnt1, 0);
+ xe_mmio_rmw32(mmio, XELPMP_SQCNT1, sqcnt1, 0);
}
static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
@@ -832,7 +856,7 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
xe_oa_free_oa_buffer(stream);
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
xe_pm_runtime_put(stream->oa->xe);
/* Wa_1509372804:pvc: Unset the override of GUCRC mode to enable rc6 */
@@ -840,17 +864,15 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(&gt->uc.guc.pc));
xe_oa_free_configs(stream);
+ xe_file_put(stream->xef);
}
-static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream)
+static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream, size_t size)
{
struct xe_bo *bo;
- BUILD_BUG_ON_NOT_POWER_OF_2(XE_OA_BUFFER_SIZE);
- BUILD_BUG_ON(XE_OA_BUFFER_SIZE < SZ_128K || XE_OA_BUFFER_SIZE > SZ_16M);
-
bo = xe_bo_create_pin_map(stream->oa->xe, stream->gt->tile, NULL,
- XE_OA_BUFFER_SIZE, ttm_bo_type_kernel,
+ size, ttm_bo_type_kernel,
XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -910,11 +932,62 @@ out:
return oa_bo;
}
+static void xe_oa_update_last_fence(struct xe_oa_stream *stream, struct dma_fence *fence)
+{
+ dma_fence_put(stream->last_fence);
+ stream->last_fence = dma_fence_get(fence);
+}
+
+static void xe_oa_fence_work_fn(struct work_struct *w)
+{
+ struct xe_oa_fence *ofence = container_of(w, typeof(*ofence), work.work);
+
+ /* Signal fence to indicate new OA configuration is active */
+ dma_fence_signal(&ofence->base);
+ dma_fence_put(&ofence->base);
+}
+
+static void xe_oa_config_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ /* Additional empirical delay needed for NOA programming after registers are written */
+#define NOA_PROGRAM_ADDITIONAL_DELAY_US 500
+
+ struct xe_oa_fence *ofence = container_of(cb, typeof(*ofence), cb);
+
+ INIT_DELAYED_WORK(&ofence->work, xe_oa_fence_work_fn);
+ queue_delayed_work(system_unbound_wq, &ofence->work,
+ usecs_to_jiffies(NOA_PROGRAM_ADDITIONAL_DELAY_US));
+ dma_fence_put(fence);
+}
+
+static const char *xe_oa_get_driver_name(struct dma_fence *fence)
+{
+ return "xe_oa";
+}
+
+static const char *xe_oa_get_timeline_name(struct dma_fence *fence)
+{
+ return "unbound";
+}
+
+static const struct dma_fence_ops xe_oa_fence_ops = {
+ .get_driver_name = xe_oa_get_driver_name,
+ .get_timeline_name = xe_oa_get_timeline_name,
+};
+
static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config *config)
{
#define NOA_PROGRAM_ADDITIONAL_DELAY_US 500
struct xe_oa_config_bo *oa_bo;
- int err, us = NOA_PROGRAM_ADDITIONAL_DELAY_US;
+ struct xe_oa_fence *ofence;
+ int i, err, num_signal = 0;
+ struct dma_fence *fence;
+
+ ofence = kzalloc(sizeof(*ofence), GFP_KERNEL);
+ if (!ofence) {
+ err = -ENOMEM;
+ goto exit;
+ }
oa_bo = xe_oa_alloc_config_buffer(stream, config);
if (IS_ERR(oa_bo)) {
@@ -922,11 +995,50 @@ static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config
goto exit;
}
- err = xe_oa_submit_bb(stream, oa_bo->bb);
+ /* Emit OA configuration batch */
+ fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_ADD_DEPS, oa_bo->bb);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto exit;
+ }
- /* Additional empirical delay needed for NOA programming after registers are written */
- usleep_range(us, 2 * us);
+ /* Point of no return: initialize and set fence to signal */
+ spin_lock_init(&ofence->lock);
+ dma_fence_init(&ofence->base, &xe_oa_fence_ops, &ofence->lock, 0, 0);
+
+ for (i = 0; i < stream->num_syncs; i++) {
+ if (stream->syncs[i].flags & DRM_XE_SYNC_FLAG_SIGNAL)
+ num_signal++;
+ xe_sync_entry_signal(&stream->syncs[i], &ofence->base);
+ }
+
+ /* Additional dma_fence_get in case we dma_fence_wait */
+ if (!num_signal)
+ dma_fence_get(&ofence->base);
+
+ /* Update last fence too before adding callback */
+ xe_oa_update_last_fence(stream, fence);
+
+ /* Add job fence callback to schedule work to signal ofence->base */
+ err = dma_fence_add_callback(fence, &ofence->cb, xe_oa_config_cb);
+ xe_gt_assert(stream->gt, !err || err == -ENOENT);
+ if (err == -ENOENT)
+ xe_oa_config_cb(fence, &ofence->cb);
+
+ /* If nothing needs to be signaled we wait synchronously */
+ if (!num_signal) {
+ dma_fence_wait(&ofence->base, false);
+ dma_fence_put(&ofence->base);
+ }
+
+ /* Done with syncs */
+ for (i = 0; i < stream->num_syncs; i++)
+ xe_sync_entry_cleanup(&stream->syncs[i]);
+ kfree(stream->syncs);
+
+ return 0;
exit:
+ kfree(ofence);
return err;
}
@@ -938,8 +1050,16 @@ static u32 oag_report_ctx_switches(const struct xe_oa_stream *stream)
0 : OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
}
+static u32 oag_buf_size_select(const struct xe_oa_stream *stream)
+{
+ return _MASKED_FIELD(OAG_OA_DEBUG_BUF_SIZE_SELECT,
+ stream->oa_buffer.bo->size > SZ_16M ?
+ OAG_OA_DEBUG_BUF_SIZE_SELECT : 0);
+}
+
static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
{
+ struct xe_mmio *mmio = &stream->gt->mmio;
u32 oa_debug, sqcnt1;
int ret;
@@ -966,12 +1086,13 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
OAG_OA_DEBUG_DISABLE_START_TRG_2_COUNT_QUAL |
OAG_OA_DEBUG_DISABLE_START_TRG_1_COUNT_QUAL;
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_debug,
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug,
_MASKED_BIT_ENABLE(oa_debug) |
oag_report_ctx_switches(stream) |
+ oag_buf_size_select(stream) |
oag_configure_mmio_trigger(stream, true));
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ?
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ?
(OAG_OAGLBCTXCTRL_COUNTER_RESUME |
OAG_OAGLBCTXCTRL_TIMER_ENABLE |
REG_FIELD_PREP(OAG_OAGLBCTXCTRL_TIMER_PERIOD_MASK,
@@ -985,7 +1106,7 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
sqcnt1 = SQCNT1_PMON_ENABLE |
(HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0);
- xe_mmio_rmw32(stream->gt, XELPMP_SQCNT1, 0, sqcnt1);
+ xe_mmio_rmw32(mmio, XELPMP_SQCNT1, 0, sqcnt1);
/* Configure OAR/OAC */
if (stream->exec_q) {
@@ -997,6 +1118,288 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
return xe_oa_emit_oa_config(stream, stream->oa_config);
}
+static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name)
+{
+ u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt);
+ u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt);
+ u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt);
+ u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt);
+ int idx;
+
+ for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) {
+ const struct xe_oa_format *f = &oa->oa_formats[idx];
+
+ if (counter_size == f->counter_size && bc_report == f->bc_report &&
+ type == f->type && counter_sel == f->counter_select) {
+ *name = idx;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ if (value >= oa->oa_unit_ids) {
+ drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value);
+ return -EINVAL;
+ }
+ param->oa_unit_id = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ param->sample = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ param->metric_set = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ int ret = decode_oa_format(oa, value, &param->oa_format);
+
+ if (ret) {
+ drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value);
+ return ret;
+ }
+ return 0;
+}
+
+static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+#define OA_EXPONENT_MAX 31
+
+ if (value > OA_EXPONENT_MAX) {
+ drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX);
+ return -EINVAL;
+ }
+ param->period_exponent = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ param->disabled = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ param->exec_queue_id = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ param->engine_instance = value;
+ return 0;
+}
+
+static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ param->no_preempt = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_num_syncs(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ param->num_syncs = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_syncs_user(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ param->syncs_user = u64_to_user_ptr(value);
+ return 0;
+}
+
+static int xe_oa_set_prop_oa_buffer_size(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ if (!is_power_of_2(value) || value < SZ_128K || value > SZ_128M) {
+ drm_dbg(&oa->xe->drm, "OA buffer size invalid %llu\n", value);
+ return -EINVAL;
+ }
+ param->oa_buffer_size = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_wait_num_reports(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ if (!value) {
+ drm_dbg(&oa->xe->drm, "wait_num_reports %llu\n", value);
+ return -EINVAL;
+ }
+ param->wait_num_reports = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_ret_inval(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ return -EINVAL;
+}
+
+typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param);
+static const xe_oa_set_property_fn xe_oa_set_property_funcs_open[] = {
+ [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id,
+ [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa,
+ [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set,
+ [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format,
+ [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent,
+ [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled,
+ [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id,
+ [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance,
+ [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt,
+ [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
+ [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
+ [DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE] = xe_oa_set_prop_oa_buffer_size,
+ [DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS] = xe_oa_set_prop_wait_num_reports,
+};
+
+static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = {
+ [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set,
+ [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
+ [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
+ [DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS] = xe_oa_set_prop_ret_inval,
+};
+
+static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_from from,
+ u64 extension, struct xe_oa_open_param *param)
+{
+ u64 __user *address = u64_to_user_ptr(extension);
+ struct drm_xe_ext_set_property ext;
+ int err;
+ u32 idx;
+
+ err = __copy_from_user(&ext, address, sizeof(ext));
+ if (XE_IOCTL_DBG(oa->xe, err))
+ return -EFAULT;
+
+ BUILD_BUG_ON(ARRAY_SIZE(xe_oa_set_property_funcs_open) !=
+ ARRAY_SIZE(xe_oa_set_property_funcs_config));
+
+ if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs_open)) ||
+ XE_IOCTL_DBG(oa->xe, ext.pad))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs_open));
+
+ if (from == XE_OA_USER_EXTN_FROM_CONFIG)
+ return xe_oa_set_property_funcs_config[idx](oa, ext.value, param);
+ else
+ return xe_oa_set_property_funcs_open[idx](oa, ext.value, param);
+}
+
+typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, enum xe_oa_user_extn_from from,
+ u64 extension, struct xe_oa_open_param *param);
+static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = {
+ [DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property,
+};
+
+#define MAX_USER_EXTENSIONS 16
+static int xe_oa_user_extensions(struct xe_oa *oa, enum xe_oa_user_extn_from from, u64 extension,
+ int ext_number, struct xe_oa_open_param *param)
+{
+ u64 __user *address = u64_to_user_ptr(extension);
+ struct drm_xe_user_extension ext;
+ int err;
+ u32 idx;
+
+ if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS))
+ return -E2BIG;
+
+ err = __copy_from_user(&ext, address, sizeof(ext));
+ if (XE_IOCTL_DBG(oa->xe, err))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(oa->xe, ext.pad) ||
+ XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs)))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs));
+ err = xe_oa_user_extension_funcs[idx](oa, from, extension, param);
+ if (XE_IOCTL_DBG(oa->xe, err))
+ return err;
+
+ if (ext.next_extension)
+ return xe_oa_user_extensions(oa, from, ext.next_extension, ++ext_number, param);
+
+ return 0;
+}
+
+static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param)
+{
+ int ret, num_syncs, num_ufence = 0;
+
+ if (param->num_syncs && !param->syncs_user) {
+ drm_dbg(&oa->xe->drm, "num_syncs specified without sync array\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (param->num_syncs) {
+ param->syncs = kcalloc(param->num_syncs, sizeof(*param->syncs), GFP_KERNEL);
+ if (!param->syncs) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ }
+
+ for (num_syncs = 0; num_syncs < param->num_syncs; num_syncs++) {
+ ret = xe_sync_entry_parse(oa->xe, param->xef, &param->syncs[num_syncs],
+ &param->syncs_user[num_syncs], 0);
+ if (ret)
+ goto err_syncs;
+
+ if (xe_sync_is_ufence(&param->syncs[num_syncs]))
+ num_ufence++;
+ }
+
+ if (XE_IOCTL_DBG(oa->xe, num_ufence > 1)) {
+ ret = -EINVAL;
+ goto err_syncs;
+ }
+
+ return 0;
+
+err_syncs:
+ while (num_syncs--)
+ xe_sync_entry_cleanup(&param->syncs[num_syncs]);
+ kfree(param->syncs);
+exit:
+ return ret;
+}
+
static void xe_oa_stream_enable(struct xe_oa_stream *stream)
{
stream->pollin = false;
@@ -1090,36 +1493,38 @@ static int xe_oa_disable_locked(struct xe_oa_stream *stream)
static long xe_oa_config_locked(struct xe_oa_stream *stream, u64 arg)
{
- struct drm_xe_ext_set_property ext;
+ struct xe_oa_open_param param = {};
long ret = stream->oa_config->id;
struct xe_oa_config *config;
int err;
- err = __copy_from_user(&ext, u64_to_user_ptr(arg), sizeof(ext));
- if (XE_IOCTL_DBG(stream->oa->xe, err))
- return -EFAULT;
-
- if (XE_IOCTL_DBG(stream->oa->xe, ext.pad) ||
- XE_IOCTL_DBG(stream->oa->xe, ext.base.name != DRM_XE_OA_EXTENSION_SET_PROPERTY) ||
- XE_IOCTL_DBG(stream->oa->xe, ext.base.next_extension) ||
- XE_IOCTL_DBG(stream->oa->xe, ext.property != DRM_XE_OA_PROPERTY_OA_METRIC_SET))
- return -EINVAL;
+ err = xe_oa_user_extensions(stream->oa, XE_OA_USER_EXTN_FROM_CONFIG, arg, 0, &param);
+ if (err)
+ return err;
- config = xe_oa_get_oa_config(stream->oa, ext.value);
+ config = xe_oa_get_oa_config(stream->oa, param.metric_set);
if (!config)
return -ENODEV;
- if (config != stream->oa_config) {
- err = xe_oa_emit_oa_config(stream, config);
- if (!err)
- config = xchg(&stream->oa_config, config);
- else
- ret = err;
+ param.xef = stream->xef;
+ err = xe_oa_parse_syncs(stream->oa, &param);
+ if (err)
+ goto err_config_put;
+
+ stream->num_syncs = param.num_syncs;
+ stream->syncs = param.syncs;
+
+ err = xe_oa_emit_oa_config(stream, config);
+ if (!err) {
+ config = xchg(&stream->oa_config, config);
+ drm_dbg(&stream->oa->xe->drm, "changed to oa config uuid=%s\n",
+ stream->oa_config->uuid);
}
+err_config_put:
xe_oa_config_put(config);
- return ret;
+ return err ?: ret;
}
static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg)
@@ -1145,7 +1550,7 @@ static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg)
static long xe_oa_info_locked(struct xe_oa_stream *stream, unsigned long arg)
{
- struct drm_xe_oa_stream_info info = { .oa_buf_size = XE_OA_BUFFER_SIZE, };
+ struct drm_xe_oa_stream_info info = { .oa_buf_size = stream->oa_buffer.bo->size, };
void __user *uaddr = (void __user *)arg;
if (copy_to_user(uaddr, &info, sizeof(info)))
@@ -1206,9 +1611,11 @@ static int xe_oa_release(struct inode *inode, struct file *file)
struct xe_oa_stream *stream = file->private_data;
struct xe_gt *gt = stream->gt;
+ xe_pm_runtime_get(gt_to_xe(gt));
mutex_lock(&gt->oa.gt_lock);
xe_oa_destroy_locked(stream);
mutex_unlock(&gt->oa.gt_lock);
+ xe_pm_runtime_put(gt_to_xe(gt));
/* Release the reference the OA stream kept on the driver */
drm_dev_put(&gt_to_xe(gt)->drm);
@@ -1229,7 +1636,7 @@ static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma)
}
/* Can mmap the entire OA buffer or nothing (no partial OA buffer mmaps) */
- if (vma->vm_end - vma->vm_start != XE_OA_BUFFER_SIZE) {
+ if (vma->vm_end - vma->vm_start != stream->oa_buffer.bo->size) {
drm_dbg(&stream->oa->xe->drm, "Wrong mmap size, must be OA buffer size\n");
return -EINVAL;
}
@@ -1267,86 +1674,12 @@ static const struct file_operations xe_oa_fops = {
.mmap = xe_oa_mmap,
};
-static bool engine_supports_mi_query(struct xe_hw_engine *hwe)
-{
- return hwe->class == XE_ENGINE_CLASS_RENDER ||
- hwe->class == XE_ENGINE_CLASS_COMPUTE;
-}
-
-static bool xe_oa_find_reg_in_lri(u32 *state, u32 reg, u32 *offset, u32 end)
-{
- u32 idx = *offset;
- u32 len = min(MI_LRI_LEN(state[idx]) + idx, end);
- bool found = false;
-
- idx++;
- for (; idx < len; idx += 2) {
- if (state[idx] == reg) {
- found = true;
- break;
- }
- }
-
- *offset = idx;
- return found;
-}
-
-#define IS_MI_LRI_CMD(x) (REG_FIELD_GET(MI_OPCODE, (x)) == \
- REG_FIELD_GET(MI_OPCODE, MI_LOAD_REGISTER_IMM))
-
-static u32 xe_oa_context_image_offset(struct xe_oa_stream *stream, u32 reg)
-{
- struct xe_lrc *lrc = stream->exec_q->lrc[0];
- u32 len = (xe_gt_lrc_size(stream->gt, stream->hwe->class) +
- lrc->ring.size) / sizeof(u32);
- u32 offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
- u32 *state = (u32 *)lrc->bo->vmap.vaddr;
-
- if (drm_WARN_ON(&stream->oa->xe->drm, !state))
- return U32_MAX;
-
- for (; offset < len; ) {
- if (IS_MI_LRI_CMD(state[offset])) {
- /*
- * We expect reg-value pairs in MI_LRI command, so
- * MI_LRI_LEN() should be even
- */
- drm_WARN_ON(&stream->oa->xe->drm,
- MI_LRI_LEN(state[offset]) & 0x1);
-
- if (xe_oa_find_reg_in_lri(state, reg, &offset, len))
- break;
- } else {
- offset++;
- }
- }
-
- return offset < len ? offset : U32_MAX;
-}
-
-static int xe_oa_set_ctx_ctrl_offset(struct xe_oa_stream *stream)
-{
- struct xe_reg reg = OACTXCONTROL(stream->hwe->mmio_base);
- u32 offset = stream->oa->ctx_oactxctrl_offset[stream->hwe->class];
-
- /* Do this only once. Failure is stored as offset of U32_MAX */
- if (offset)
- goto exit;
-
- offset = xe_oa_context_image_offset(stream, reg.addr);
- stream->oa->ctx_oactxctrl_offset[stream->hwe->class] = offset;
-
- drm_dbg(&stream->oa->xe->drm, "%s oa ctx control at 0x%08x dword offset\n",
- stream->hwe->name, offset);
-exit:
- return offset && offset != U32_MAX ? 0 : -ENODEV;
-}
-
static int xe_oa_stream_init(struct xe_oa_stream *stream,
struct xe_oa_open_param *param)
{
struct xe_oa_unit *u = param->hwe->oa_unit;
struct xe_gt *gt = param->hwe->gt;
+ unsigned int fw_ref;
int ret;
stream->exec_q = param->exec_q;
@@ -1359,6 +1692,11 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
stream->periodic = param->period_exponent > 0;
stream->period_exponent = param->period_exponent;
stream->no_preempt = param->no_preempt;
+ stream->wait_num_reports = param->wait_num_reports;
+
+ stream->xef = xe_file_get(param->xef);
+ stream->num_syncs = param->num_syncs;
+ stream->syncs = param->syncs;
/*
* For Xe2+, when overrun mode is enabled, there are no partial reports at the end
@@ -1368,20 +1706,10 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
if (GRAPHICS_VER(stream->oa->xe) >= 20 &&
stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG && stream->sample)
stream->oa_buffer.circ_size =
- XE_OA_BUFFER_SIZE - XE_OA_BUFFER_SIZE % stream->oa_buffer.format->size;
+ param->oa_buffer_size -
+ param->oa_buffer_size % stream->oa_buffer.format->size;
else
- stream->oa_buffer.circ_size = XE_OA_BUFFER_SIZE;
-
- if (stream->exec_q && engine_supports_mi_query(stream->hwe)) {
- /* If we don't find the context offset, just return error */
- ret = xe_oa_set_ctx_ctrl_offset(stream);
- if (ret) {
- drm_err(&stream->oa->xe->drm,
- "xe_oa_set_ctx_ctrl_offset failed for %s\n",
- stream->hwe->name);
- goto exit;
- }
- }
+ stream->oa_buffer.circ_size = param->oa_buffer_size;
stream->oa_config = xe_oa_get_oa_config(stream->oa, param->metric_set);
if (!stream->oa_config) {
@@ -1407,9 +1735,13 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
/* Take runtime pm ref and forcewake to disable RC6 */
xe_pm_runtime_get(stream->oa->xe);
- XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ ret = -ETIMEDOUT;
+ goto err_fw_put;
+ }
- ret = xe_oa_alloc_oa_buffer(stream);
+ ret = xe_oa_alloc_oa_buffer(stream, param->oa_buffer_size);
if (ret)
goto err_fw_put;
@@ -1449,13 +1781,14 @@ err_put_k_exec_q:
err_free_oa_buf:
xe_oa_free_oa_buffer(stream);
err_fw_put:
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(stream->oa->xe);
if (stream->override_gucrc)
xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(&gt->uc.guc.pc));
err_free_configs:
xe_oa_free_configs(stream);
exit:
+ xe_file_put(stream->xef);
return ret;
}
@@ -1533,7 +1866,7 @@ u32 xe_oa_timestamp_frequency(struct xe_gt *gt)
case XE_PVC:
case XE_METEORLAKE:
xe_pm_runtime_get(gt_to_xe(gt));
- reg = xe_mmio_read32(gt, RPM_CONFIG0);
+ reg = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
xe_pm_runtime_put(gt_to_xe(gt));
shift = REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
@@ -1565,27 +1898,6 @@ static bool engine_supports_oa_format(const struct xe_hw_engine *hwe, int type)
}
}
-static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name)
-{
- u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt);
- u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt);
- u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt);
- u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt);
- int idx;
-
- for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) {
- const struct xe_oa_format *f = &oa->oa_formats[idx];
-
- if (counter_size == f->counter_size && bc_report == f->bc_report &&
- type == f->type && counter_sel == f->counter_select) {
- *name = idx;
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
/**
* xe_oa_unit_id - Return OA unit ID for a hardware engine
* @hwe: @xe_hw_engine
@@ -1632,155 +1944,6 @@ out:
return ret;
}
-static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
-{
- if (value >= oa->oa_unit_ids) {
- drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value);
- return -EINVAL;
- }
- param->oa_unit_id = value;
- return 0;
-}
-
-static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
-{
- param->sample = value;
- return 0;
-}
-
-static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
-{
- param->metric_set = value;
- return 0;
-}
-
-static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
-{
- int ret = decode_oa_format(oa, value, &param->oa_format);
-
- if (ret) {
- drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value);
- return ret;
- }
- return 0;
-}
-
-static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
-{
-#define OA_EXPONENT_MAX 31
-
- if (value > OA_EXPONENT_MAX) {
- drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX);
- return -EINVAL;
- }
- param->period_exponent = value;
- return 0;
-}
-
-static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
-{
- param->disabled = value;
- return 0;
-}
-
-static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
-{
- param->exec_queue_id = value;
- return 0;
-}
-
-static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
-{
- param->engine_instance = value;
- return 0;
-}
-
-static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
-{
- param->no_preempt = value;
- return 0;
-}
-
-typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param);
-static const xe_oa_set_property_fn xe_oa_set_property_funcs[] = {
- [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id,
- [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa,
- [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set,
- [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format,
- [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent,
- [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled,
- [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id,
- [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance,
- [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt,
-};
-
-static int xe_oa_user_ext_set_property(struct xe_oa *oa, u64 extension,
- struct xe_oa_open_param *param)
-{
- u64 __user *address = u64_to_user_ptr(extension);
- struct drm_xe_ext_set_property ext;
- int err;
- u32 idx;
-
- err = __copy_from_user(&ext, address, sizeof(ext));
- if (XE_IOCTL_DBG(oa->xe, err))
- return -EFAULT;
-
- if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs)) ||
- XE_IOCTL_DBG(oa->xe, ext.pad))
- return -EINVAL;
-
- idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs));
- return xe_oa_set_property_funcs[idx](oa, ext.value, param);
-}
-
-typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, u64 extension,
- struct xe_oa_open_param *param);
-static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = {
- [DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property,
-};
-
-#define MAX_USER_EXTENSIONS 16
-static int xe_oa_user_extensions(struct xe_oa *oa, u64 extension, int ext_number,
- struct xe_oa_open_param *param)
-{
- u64 __user *address = u64_to_user_ptr(extension);
- struct drm_xe_user_extension ext;
- int err;
- u32 idx;
-
- if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS))
- return -E2BIG;
-
- err = __copy_from_user(&ext, address, sizeof(ext));
- if (XE_IOCTL_DBG(oa->xe, err))
- return -EFAULT;
-
- if (XE_IOCTL_DBG(oa->xe, ext.pad) ||
- XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs)))
- return -EINVAL;
-
- idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs));
- err = xe_oa_user_extension_funcs[idx](oa, extension, param);
- if (XE_IOCTL_DBG(oa->xe, err))
- return err;
-
- if (ext.next_extension)
- return xe_oa_user_extensions(oa, ext.next_extension, ++ext_number, param);
-
- return 0;
-}
-
/**
* xe_oa_stream_open_ioctl - Opens an OA stream
* @dev: @drm_device
@@ -1806,7 +1969,8 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
return -ENODEV;
}
- ret = xe_oa_user_extensions(oa, data, 0, &param);
+ param.xef = xef;
+ ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, &param);
if (ret)
return ret;
@@ -1815,8 +1979,8 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
if (XE_IOCTL_DBG(oa->xe, !param.exec_q))
return -ENOENT;
- if (param.exec_q->width > 1)
- drm_dbg(&oa->xe->drm, "exec_q->width > 1, programming only exec_q->lrc[0]\n");
+ if (XE_IOCTL_DBG(oa->xe, param.exec_q->width > 1))
+ return -EOPNOTSUPP;
}
/*
@@ -1874,11 +2038,35 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
drm_dbg(&oa->xe->drm, "Using periodic sampling freq %lld Hz\n", oa_freq_hz);
}
+ if (!param.oa_buffer_size)
+ param.oa_buffer_size = DEFAULT_XE_OA_BUFFER_SIZE;
+
+ if (!param.wait_num_reports)
+ param.wait_num_reports = 1;
+ if (param.wait_num_reports > param.oa_buffer_size / f->size) {
+ drm_dbg(&oa->xe->drm, "wait_num_reports %d\n", param.wait_num_reports);
+ ret = -EINVAL;
+ goto err_exec_q;
+ }
+
+ ret = xe_oa_parse_syncs(oa, &param);
+ if (ret)
+ goto err_exec_q;
+
mutex_lock(&param.hwe->gt->oa.gt_lock);
ret = xe_oa_stream_open_ioctl_locked(oa, &param);
mutex_unlock(&param.hwe->gt->oa.gt_lock);
+ if (ret < 0)
+ goto err_sync_cleanup;
+
+ return ret;
+
+err_sync_cleanup:
+ while (param.num_syncs--)
+ xe_sync_entry_cleanup(&param.syncs[param.num_syncs]);
+ kfree(param.syncs);
err_exec_q:
- if (ret < 0 && param.exec_q)
+ if (param.exec_q)
xe_exec_queue_put(param.exec_q);
return ret;
}
@@ -1978,6 +2166,7 @@ static const struct xe_mmio_range xe2_oa_mux_regs[] = {
{ .start = 0x5194, .end = 0x5194 }, /* SYS_MEM_LAT_MEASURE_MERTF_GRP_3D */
{ .start = 0x8704, .end = 0x8704 }, /* LMEM_LAT_MEASURE_MCFG_GRP */
{ .start = 0xB1BC, .end = 0xB1BC }, /* L3_BANK_LAT_MEASURE_LBCF_GFX */
+ { .start = 0xD0E0, .end = 0xD0F4 }, /* VISACTL */
{ .start = 0xE18C, .end = 0xE18C }, /* SAMPLER_MODE */
{ .start = 0xE590, .end = 0xE590 }, /* TDL_LSC_LAT_MEASURE_TDL_GFX */
{ .start = 0x13000, .end = 0x137FC }, /* PES_0_PESL0 - PES_63_UPPER_PESL3 */
@@ -2348,8 +2537,10 @@ static void __xe_oa_init_oa_units(struct xe_gt *gt)
u->type = DRM_XE_OA_UNIT_TYPE_OAM;
}
+ xe_mmio_write32(&gt->mmio, u->regs.oa_ctrl, 0);
+
/* Ensure MMIO trigger remains disabled till there is a stream */
- xe_mmio_write32(gt, u->regs.oa_debug,
+ xe_mmio_write32(&gt->mmio, u->regs.oa_debug,
oag_configure_mmio_trigger(NULL, false));
/* Set oa_unit_ids now to ensure ids remain contiguous */
diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
index 8862eca73fbe..52e33c37d5ee 100644
--- a/drivers/gpu/drm/xe/xe_oa_types.h
+++ b/drivers/gpu/drm/xe/xe_oa_types.h
@@ -15,7 +15,7 @@
#include "regs/xe_reg_defs.h"
#include "xe_hw_engine_types.h"
-#define XE_OA_BUFFER_SIZE SZ_16M
+#define DEFAULT_XE_OA_BUFFER_SIZE SZ_16M
enum xe_oa_report_header {
HDR_32_BIT = 0,
@@ -138,9 +138,6 @@ struct xe_oa {
/** @metrics_idr: List of dynamic configurations (struct xe_oa_config) */
struct idr metrics_idr;
- /** @ctx_oactxctrl_offset: offset of OACTXCONTROL register in context image */
- u32 ctx_oactxctrl_offset[XE_ENGINE_CLASS_MAX];
-
/** @oa_formats: tracks all OA formats across platforms */
const struct xe_oa_format *oa_formats;
@@ -218,6 +215,9 @@ struct xe_oa_stream {
/** @pollin: Whether there is data available to read */
bool pollin;
+ /** @wait_num_reports: Number of reports to wait for before signalling pollin */
+ int wait_num_reports;
+
/** @periodic: Whether periodic sampling is currently enabled */
bool periodic;
@@ -238,5 +238,17 @@ struct xe_oa_stream {
/** @no_preempt: Whether preemption and timeslicing is disabled for stream exec_q */
u32 no_preempt;
+
+ /** @xef: xe_file with which the stream was opened */
+ struct xe_file *xef;
+
+ /** @last_fence: fence to use in stream destroy when needed */
+ struct dma_fence *last_fence;
+
+ /** @num_syncs: size of @syncs array */
+ u32 num_syncs;
+
+ /** @syncs: syncs to wait on and to signal */
+ struct xe_sync_entry *syncs;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_observation.c b/drivers/gpu/drm/xe/xe_observation.c
index 8ec1b84cbb9e..57cf01efc07f 100644
--- a/drivers/gpu/drm/xe/xe_observation.c
+++ b/drivers/gpu/drm/xe/xe_observation.c
@@ -56,7 +56,7 @@ int xe_observation_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
}
}
-static struct ctl_table observation_ctl_table[] = {
+static const struct ctl_table observation_ctl_table[] = {
{
.procname = "observation_paranoid",
.data = &xe_observation_paranoid,
diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
index f291a1730024..30fdbdb9341e 100644
--- a/drivers/gpu/drm/xe/xe_pat.c
+++ b/drivers/gpu/drm/xe/xe_pat.c
@@ -100,6 +100,10 @@ static const struct xe_pat_table_entry xelpg_pat_table[] = {
* Reserved entries should be programmed with the maximum caching, minimum
* coherency (which matches an all-0's encoding), so we can just omit them
* in the table.
+ *
+ * Note: There is an implicit assumption in the driver that compression and
+ * coh_1way+ are mutually exclusive. If this is ever not true then userptr
+ * and imported dma-buf from external device will have uncleared ccs state.
*/
#define XE2_PAT(no_promote, comp_en, l3clos, l3_policy, l4_policy, __coh_mode) \
{ \
@@ -109,7 +113,8 @@ static const struct xe_pat_table_entry xelpg_pat_table[] = {
REG_FIELD_PREP(XE2_L3_POLICY, l3_policy) | \
REG_FIELD_PREP(XE2_L4_POLICY, l4_policy) | \
REG_FIELD_PREP(XE2_COH_MODE, __coh_mode), \
- .coh_mode = __coh_mode ? XE_COH_AT_LEAST_1WAY : XE_COH_NONE \
+ .coh_mode = (BUILD_BUG_ON_ZERO(__coh_mode && comp_en) || __coh_mode) ? \
+ XE_COH_AT_LEAST_1WAY : XE_COH_NONE \
}
static const struct xe_pat_table_entry xe2_pat_table[] = {
@@ -160,7 +165,7 @@ static void program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[
for (int i = 0; i < n_entries; i++) {
struct xe_reg reg = XE_REG(_PAT_INDEX(i));
- xe_mmio_write32(gt, reg, table[i].value);
+ xe_mmio_write32(&gt->mmio, reg, table[i].value);
}
}
@@ -177,25 +182,24 @@ static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry ta
static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
- int i, err;
+ unsigned int fw_ref;
+ int i;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- goto err_fw;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return;
drm_printf(p, "PAT table:\n");
for (i = 0; i < xe->pat.n_entries; i++) {
- u32 pat = xe_mmio_read32(gt, XE_REG(_PAT_INDEX(i)));
+ u32 pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_INDEX(i)));
u8 mem_type = REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat);
drm_printf(p, "PAT[%2d] = %s (%#8x)\n", i,
XELP_MEM_TYPE_STR_MAP[mem_type], pat);
}
- err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
-err_fw:
- xe_assert(xe, !err);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static const struct xe_pat_ops xelp_pat_ops = {
@@ -206,11 +210,12 @@ static const struct xe_pat_ops xelp_pat_ops = {
static void xehp_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
- int i, err;
+ unsigned int fw_ref;
+ int i;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- goto err_fw;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return;
drm_printf(p, "PAT table:\n");
@@ -224,9 +229,7 @@ static void xehp_dump(struct xe_gt *gt, struct drm_printer *p)
XELP_MEM_TYPE_STR_MAP[mem_type], pat);
}
- err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
-err_fw:
- xe_assert(xe, !err);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static const struct xe_pat_ops xehp_pat_ops = {
@@ -237,11 +240,12 @@ static const struct xe_pat_ops xehp_pat_ops = {
static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
- int i, err;
+ unsigned int fw_ref;
+ int i;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- goto err_fw;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return;
drm_printf(p, "PAT table:\n");
@@ -253,9 +257,7 @@ static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
REG_FIELD_GET(XEHPC_CLOS_LEVEL_MASK, pat), pat);
}
- err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
-err_fw:
- xe_assert(xe, !err);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static const struct xe_pat_ops xehpc_pat_ops = {
@@ -266,11 +268,12 @@ static const struct xe_pat_ops xehpc_pat_ops = {
static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
- int i, err;
+ unsigned int fw_ref;
+ int i;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- goto err_fw;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return;
drm_printf(p, "PAT table:\n");
@@ -278,7 +281,7 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
u32 pat;
if (xe_gt_is_media_type(gt))
- pat = xe_mmio_read32(gt, XE_REG(_PAT_INDEX(i)));
+ pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_INDEX(i)));
else
pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
@@ -287,9 +290,7 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
REG_FIELD_GET(XELPG_INDEX_COH_MODE_MASK, pat), pat);
}
- err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
-err_fw:
- xe_assert(xe, !err);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
/*
@@ -316,27 +317,28 @@ static void xe2lpm_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry
int n_entries)
{
program_pat(gt, table, n_entries);
- xe_mmio_write32(gt, XE_REG(_PAT_ATS), xe2_pat_ats.value);
+ xe_mmio_write32(&gt->mmio, XE_REG(_PAT_ATS), xe2_pat_ats.value);
if (IS_DGFX(gt_to_xe(gt)))
- xe_mmio_write32(gt, XE_REG(_PAT_PTA), xe2_pat_pta.value);
+ xe_mmio_write32(&gt->mmio, XE_REG(_PAT_PTA), xe2_pat_pta.value);
}
static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
- int i, err;
+ unsigned int fw_ref;
u32 pat;
+ int i;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- goto err_fw;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return;
drm_printf(p, "PAT table:\n");
for (i = 0; i < xe->pat.n_entries; i++) {
if (xe_gt_is_media_type(gt))
- pat = xe_mmio_read32(gt, XE_REG(_PAT_INDEX(i)));
+ pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_INDEX(i)));
else
pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
@@ -355,7 +357,7 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
* PPGTT entries.
*/
if (xe_gt_is_media_type(gt))
- pat = xe_mmio_read32(gt, XE_REG(_PAT_PTA));
+ pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_PTA));
else
pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_PTA));
@@ -369,9 +371,7 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
REG_FIELD_GET(XE2_COH_MODE, pat),
pat);
- err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
-err_fw:
- xe_assert(xe, !err);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static const struct xe_pat_ops xe2_pat_ops = {
@@ -382,7 +382,7 @@ static const struct xe_pat_ops xe2_pat_ops = {
void xe_pat_init_early(struct xe_device *xe)
{
- if (GRAPHICS_VER(xe) == 20) {
+ if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) {
xe->pat.ops = &xe2_pat_ops;
xe->pat.table = xe2_pat_table;
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 5e962e72c97e..39be74848e44 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -13,7 +13,7 @@
#include <drm/drm_color_mgmt.h>
#include <drm/drm_drv.h>
-#include <drm/intel/xe_pciids.h>
+#include <drm/intel/pciids.h>
#include "display/xe_display.h"
#include "regs/xe_gt_regs.h"
@@ -103,7 +103,6 @@ static const struct xe_graphics_desc graphics_xelpp = {
#define XE_HP_FEATURES \
.has_range_tlb_invalidation = true, \
- .has_flat_ccs = true, \
.dma_mask_size = 46, \
.va_bits = 48, \
.vm_max_level = 3
@@ -120,6 +119,8 @@ static const struct xe_graphics_desc graphics_xehpg = {
XE_HP_FEATURES,
.vram_flags = XE_VRAM_FLAGS_NEED64K,
+
+ .has_flat_ccs = 1,
};
static const struct xe_graphics_desc graphics_xehpc = {
@@ -145,7 +146,6 @@ static const struct xe_graphics_desc graphics_xehpc = {
.has_asid = 1,
.has_atomic_enable_pte_bit = 1,
- .has_flat_ccs = 0,
.has_usm = 1,
};
@@ -156,7 +156,6 @@ static const struct xe_graphics_desc graphics_xelpg = {
BIT(XE_HW_ENGINE_CCS0),
XE_HP_FEATURES,
- .has_flat_ccs = 0,
};
#define XE2_GFX_FEATURES \
@@ -175,7 +174,7 @@ static const struct xe_graphics_desc graphics_xelpg = {
GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)
static const struct xe_graphics_desc graphics_xe2 = {
- .name = "Xe2_LPG / Xe2_HPG",
+ .name = "Xe2_LPG / Xe2_HPG / Xe3_LPG",
XE2_GFX_FEATURES,
};
@@ -209,7 +208,7 @@ static const struct xe_media_desc media_xelpmp = {
};
static const struct xe_media_desc media_xe2 = {
- .name = "Xe2_LPM / Xe2_HPM",
+ .name = "Xe2_LPM / Xe2_HPM / Xe3_LPM",
.hw_engine_mask =
GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) |
@@ -234,7 +233,7 @@ static const struct xe_device_desc rkl_desc = {
.require_force_probe = true,
};
-static const u16 adls_rpls_ids[] = { XE_RPLS_IDS(NOP), 0 };
+static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 };
static const struct xe_device_desc adl_s_desc = {
.graphics = &graphics_xelp,
@@ -249,7 +248,7 @@ static const struct xe_device_desc adl_s_desc = {
},
};
-static const u16 adlp_rplu_ids[] = { XE_RPLU_IDS(NOP), 0 };
+static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 };
static const struct xe_device_desc adl_p_desc = {
.graphics = &graphics_xelp,
@@ -286,9 +285,9 @@ static const struct xe_device_desc dg1_desc = {
.require_force_probe = true,
};
-static const u16 dg2_g10_ids[] = { XE_DG2_G10_IDS(NOP), XE_ATS_M150_IDS(NOP), 0 };
-static const u16 dg2_g11_ids[] = { XE_DG2_G11_IDS(NOP), XE_ATS_M75_IDS(NOP), 0 };
-static const u16 dg2_g12_ids[] = { XE_DG2_G12_IDS(NOP), 0 };
+static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 };
+static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 };
+static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 };
#define DG2_FEATURES \
DGFX_FEATURES, \
@@ -347,6 +346,12 @@ static const struct xe_device_desc bmg_desc = {
.has_heci_cscfi = 1,
};
+static const struct xe_device_desc ptl_desc = {
+ PLATFORM(PANTHERLAKE),
+ .has_display = true,
+ .require_force_probe = true,
+};
+
#undef PLATFORM
__diag_pop();
@@ -357,6 +362,8 @@ static const struct gmdid_map graphics_ip_map[] = {
{ 1274, &graphics_xelpg }, /* Xe_LPG+ */
{ 2001, &graphics_xe2 },
{ 2004, &graphics_xe2 },
+ { 3000, &graphics_xe2 },
+ { 3001, &graphics_xe2 },
};
/* Map of GMD_ID values to media IP */
@@ -364,13 +371,9 @@ static const struct gmdid_map media_ip_map[] = {
{ 1300, &media_xelpmp },
{ 1301, &media_xe2 },
{ 2000, &media_xe2 },
+ { 3000, &media_xe2 },
};
-#define INTEL_VGA_DEVICE(id, info) { \
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, id), \
- PCI_BASE_CLASS_DISPLAY << 16, 0xff << 16, \
- (unsigned long) info }
-
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
@@ -378,25 +381,26 @@ static const struct gmdid_map media_ip_map[] = {
* PCI ID matches, otherwise we'll use the wrong info struct above.
*/
static const struct pci_device_id pciidlist[] = {
- XE_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc),
- XE_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc),
- XE_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
- XE_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
- XE_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc),
- XE_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
- XE_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
- XE_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc),
- XE_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc),
- XE_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc),
- XE_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
- XE_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
- XE_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc),
+ INTEL_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc),
+ INTEL_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc),
+ INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
+ INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
+ INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc),
+ INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
+ INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
+ INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
+ INTEL_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc),
+ INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc),
+ INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
+ INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc),
+ INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
+ INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
+ INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc),
+ INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
{ }
};
MODULE_DEVICE_TABLE(pci, pciidlist);
-#undef INTEL_VGA_DEVICE
-
/* is device_id present in comma separated list of ids */
static bool device_id_in_list(u16 device_id, const char *devices, bool negative)
{
@@ -467,13 +471,15 @@ enum xe_gmdid_type {
static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid)
{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
struct xe_reg gmdid_reg = GMD_ID;
u32 val;
KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid);
if (IS_SRIOV_VF(xe)) {
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+
/*
* To get the value of the GMDID register, VFs must obtain it
* from the GuC using MMIO communication.
@@ -484,7 +490,7 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
* least basic xe_gt and xe_guc initialization.
*
* Since to obtain the value of GMDID_MEDIA we need to use the
- * media GuC, temporarly tweak the gt type.
+ * media GuC, temporarily tweak the gt type.
*/
xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED);
@@ -509,14 +515,17 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
gt->info.type = XE_GT_TYPE_UNINITIALIZED;
} else {
/*
- * We need to apply the GSI offset explicitly here as at this
- * point the xe_gt is not fully uninitialized and only basic
- * access to MMIO registers is possible.
+ * GMD_ID is a GT register, but at this point in the driver
+ * init we haven't fully initialized the GT yet so we need to
+ * read the register with the tile's MMIO accessor. That means
+ * we need to apply the GSI offset manually since it won't get
+ * automatically added as it would if we were using a GT mmio
+ * accessor.
*/
if (type == GMDID_MEDIA)
gmdid_reg.addr += MEDIA_GT_GSI_OFFSET;
- val = xe_mmio_read32(gt, gmdid_reg);
+ val = xe_mmio_read32(mmio, gmdid_reg);
}
*ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val);
@@ -678,7 +687,10 @@ static int xe_info_init(struct xe_device *xe,
xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit;
if (xe->info.platform != XE_PVC)
xe->info.has_device_atomics_on_smem = 1;
+
+ /* Runtime detection may change this later */
xe->info.has_flat_ccs = graphics_desc->has_flat_ccs;
+
xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation;
xe->info.has_usm = graphics_desc->has_usm;
@@ -707,6 +719,7 @@ static int xe_info_init(struct xe_device *xe,
gt->info.type = XE_GT_TYPE_MAIN;
gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state;
gt->info.engine_mask = graphics_desc->hw_engine_mask;
+
if (MEDIA_VER(xe) < 13 && media_desc)
gt->info.engine_mask |= media_desc->hw_engine_mask;
@@ -725,8 +738,6 @@ static int xe_info_init(struct xe_device *xe,
gt->info.type = XE_GT_TYPE_MEDIA;
gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state;
gt->info.engine_mask = media_desc->hw_engine_mask;
- gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET;
- gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH;
/*
* FIXME: At the moment multi-tile and standalone media are
@@ -757,6 +768,25 @@ static void xe_pci_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
+/*
+ * Probe the PCI device, initialize various parts of the driver.
+ *
+ * Fault injection is used to test the error paths of some initialization
+ * functions called either directly from xe_pci_probe() or indirectly for
+ * example through xe_device_probe(). Those functions use the kernel fault
+ * injection capabilities infrastructure, see
+ * Documentation/fault-injection/fault-injection.rst for details. The macro
+ * ALLOW_ERROR_INJECTION() is used to conditionally skip function execution
+ * at runtime and use a provided return value. The first requirement for
+ * error injectable functions is proper handling of the error code by the
+ * caller for recovery, which is always the case here. The second
+ * requirement is that no state is changed before the first error return.
+ * It is not strictly fulfilled for all initialization functions using the
+ * ALLOW_ERROR_INJECTION() macro but this is acceptable because for those
+ * error cases at probe time, the error code is simply propagated up by the
+ * caller. Therefore there is no consequence on those specific callers when
+ * function error injection skips the whole function.
+ */
static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct xe_device_desc *desc = (const void *)ent->driver_data;
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index 7397d556996a..9333ce776a6e 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -44,7 +44,7 @@ static int pcode_mailbox_status(struct xe_tile *tile)
[PCODE_ERROR_MASK] = {-EPROTO, "Unknown"},
};
- err = xe_mmio_read32(tile->primary_gt, PCODE_MAILBOX) & PCODE_ERROR_MASK;
+ err = xe_mmio_read32(&tile->mmio, PCODE_MAILBOX) & PCODE_ERROR_MASK;
if (err) {
drm_err(&tile_to_xe(tile)->drm, "PCODE Mailbox failed: %d %s", err,
err_decode[err].str ?: "Unknown");
@@ -58,7 +58,7 @@ static int __pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *d
unsigned int timeout_ms, bool return_data,
bool atomic)
{
- struct xe_gt *mmio = tile->primary_gt;
+ struct xe_mmio *mmio = &tile->mmio;
int err;
if (tile_to_xe(tile)->info.skip_pcode)
@@ -217,7 +217,7 @@ out:
*
* It returns 0 on success, and -ERROR number on failure, -EINVAL if max
* frequency is higher then the minimal, and other errors directly translated
- * from the PCODE Error returs:
+ * from the PCODE Error returns:
* - -ENXIO: "Illegal Command"
* - -ETIMEDOUT: "Timed out"
* - -EINVAL: "Illegal Data"
diff --git a/drivers/gpu/drm/xe/xe_platform_types.h b/drivers/gpu/drm/xe/xe_platform_types.h
index 79b7042c4534..d08574c4cdb8 100644
--- a/drivers/gpu/drm/xe/xe_platform_types.h
+++ b/drivers/gpu/drm/xe/xe_platform_types.h
@@ -23,6 +23,7 @@ enum xe_platform {
XE_METEORLAKE,
XE_LUNARLAKE,
XE_BATTLEMAGE,
+ XE_PANTHERLAKE,
};
enum xe_subplatform {
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 7cf2160fe040..c9cc0c091dfd 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -5,7 +5,9 @@
#include "xe_pm.h"
+#include <linux/fault-inject.h>
#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
#include <drm/drm_managed.h>
#include <drm/ttm/ttm_placement.h>
@@ -123,7 +125,7 @@ int xe_pm_suspend(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_suspend_prepare(gt);
- xe_display_pm_suspend(xe, false);
+ xe_display_pm_suspend(xe);
/* FIXME: Super racey... */
err = xe_bo_evict_all(xe);
@@ -133,7 +135,7 @@ int xe_pm_suspend(struct xe_device *xe)
for_each_gt(gt, xe, id) {
err = xe_gt_suspend(gt);
if (err) {
- xe_display_pm_resume(xe, false);
+ xe_display_pm_resume(xe);
goto err;
}
}
@@ -187,7 +189,7 @@ int xe_pm_resume(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_resume(gt);
- xe_display_pm_resume(xe, false);
+ xe_display_pm_resume(xe);
err = xe_bo_restore_user(xe);
if (err)
@@ -263,6 +265,7 @@ int xe_pm_init_early(struct xe_device *xe)
return 0;
}
+ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
/**
* xe_pm_init - Initialize Xe Power Management
@@ -388,7 +391,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
/*
* Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
- * also checks and delets bo entry from user fault list.
+ * also checks and deletes bo entry from user fault list.
*/
mutex_lock(&xe->mem_access.vram_userfault.lock);
list_for_each_entry_safe(bo, on,
@@ -412,8 +415,8 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
xe_irq_suspend(xe);
- if (xe->d3cold.allowed)
- xe_display_pm_suspend_late(xe);
+ xe_display_pm_runtime_suspend_late(xe);
+
out:
if (err)
xe_display_pm_runtime_resume(xe);
@@ -605,7 +608,8 @@ static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
struct device *dev = xe->drm.dev;
return dev->power.runtime_status == RPM_SUSPENDING ||
- dev->power.runtime_status == RPM_RESUMING;
+ dev->power.runtime_status == RPM_RESUMING ||
+ pm_suspend_target_state != PM_SUSPEND_ON;
#else
return false;
#endif
@@ -736,9 +740,6 @@ void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
xe->d3cold.allowed = false;
mutex_unlock(&xe->d3cold.lock);
-
- drm_dbg(&xe->drm,
- "d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed));
}
/**
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index f27f579f4d85..1ddcc7e79a93 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -136,6 +136,7 @@ err_kfree:
xe_pt_free(pt);
return ERR_PTR(err);
}
+ALLOW_ERROR_INJECTION(xe_pt_create, ERRNO);
/**
* xe_pt_populate_empty() - Populate a page-table bo with scratch- or zero
@@ -275,7 +276,7 @@ struct xe_pt_stage_bind_walk {
/* Also input, but is updated during the walk*/
/** @curs: The DMA address cursor. */
struct xe_res_cursor *curs;
- /** @va_curs_start: The Virtual address coresponding to @curs->start */
+ /** @va_curs_start: The Virtual address corresponding to @curs->start */
u64 va_curs_start;
/* Output */
@@ -1333,8 +1334,7 @@ static void invalidation_fence_cb(struct dma_fence *fence,
queue_work(system_wq, &ifence->work);
} else {
ifence->base.base.error = ifence->fence->error;
- dma_fence_signal(&ifence->base.base);
- dma_fence_put(&ifence->base.base);
+ xe_gt_tlb_invalidation_fence_signal(&ifence->base);
}
dma_fence_put(ifence->fence);
}
@@ -1851,6 +1851,7 @@ int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
return 0;
}
+ALLOW_ERROR_INJECTION(xe_pt_update_ops_prepare, ERRNO);
static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
@@ -2131,6 +2132,7 @@ kill_vm_tile1:
return ERR_PTR(err);
}
+ALLOW_ERROR_INJECTION(xe_pt_update_ops_run, ERRNO);
/**
* xe_pt_update_ops_fini() - Finish PT update operations
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 28d9bb3b825d..c059639613f7 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -9,6 +9,7 @@
#include <linux/sched/clock.h>
#include <drm/ttm/ttm_placement.h>
+#include <generated/xe_wa_oob.h>
#include <uapi/drm/xe_drm.h>
#include "regs/xe_engine_regs.h"
@@ -22,7 +23,9 @@
#include "xe_guc_hwconfig.h"
#include "xe_macros.h"
#include "xe_mmio.h"
+#include "xe_oa.h"
#include "xe_ttm_vram_mgr.h"
+#include "xe_wa.h"
static const u16 xe_to_user_engine_class[] = {
[XE_ENGINE_CLASS_RENDER] = DRM_XE_ENGINE_CLASS_RENDER,
@@ -83,24 +86,22 @@ static __ktime_func_t __clock_id_to_func(clockid_t clk_id)
}
static void
-__read_timestamps(struct xe_gt *gt,
- struct xe_reg lower_reg,
- struct xe_reg upper_reg,
- u64 *engine_ts,
- u64 *cpu_ts,
- u64 *cpu_delta,
- __ktime_func_t cpu_clock)
+hwe_read_timestamp(struct xe_hw_engine *hwe, u64 *engine_ts, u64 *cpu_ts,
+ u64 *cpu_delta, __ktime_func_t cpu_clock)
{
+ struct xe_mmio *mmio = &hwe->gt->mmio;
u32 upper, lower, old_upper, loop = 0;
+ struct xe_reg upper_reg = RING_TIMESTAMP_UDW(hwe->mmio_base),
+ lower_reg = RING_TIMESTAMP(hwe->mmio_base);
- upper = xe_mmio_read32(gt, upper_reg);
+ upper = xe_mmio_read32(mmio, upper_reg);
do {
*cpu_delta = local_clock();
*cpu_ts = cpu_clock();
- lower = xe_mmio_read32(gt, lower_reg);
+ lower = xe_mmio_read32(mmio, lower_reg);
*cpu_delta = local_clock() - *cpu_delta;
old_upper = upper;
- upper = xe_mmio_read32(gt, upper_reg);
+ upper = xe_mmio_read32(mmio, upper_reg);
} while (upper != old_upper && loop++ < 2);
*engine_ts = (u64)upper << 32 | lower;
@@ -117,6 +118,7 @@ query_engine_cycles(struct xe_device *xe,
__ktime_func_t cpu_clock;
struct xe_hw_engine *hwe;
struct xe_gt *gt;
+ unsigned int fw_ref;
if (query->size == 0) {
query->size = size;
@@ -149,31 +151,27 @@ query_engine_cycles(struct xe_device *xe,
if (!hwe)
return -EINVAL;
- if (xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL))
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
return -EIO;
+ }
- __read_timestamps(gt,
- RING_TIMESTAMP(hwe->mmio_base),
- RING_TIMESTAMP_UDW(hwe->mmio_base),
- &resp.engine_cycles,
- &resp.cpu_timestamp,
- &resp.cpu_delta,
- cpu_clock);
-
- xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- resp.width = 36;
-
- /* Only write to the output fields of user query */
- if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp))
- return -EFAULT;
+ hwe_read_timestamp(hwe, &resp.engine_cycles, &resp.cpu_timestamp,
+ &resp.cpu_delta, cpu_clock);
- if (put_user(resp.cpu_delta, &query_ptr->cpu_delta))
- return -EFAULT;
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
- if (put_user(resp.engine_cycles, &query_ptr->engine_cycles))
- return -EFAULT;
+ if (GRAPHICS_VER(xe) >= 20)
+ resp.width = 64;
+ else
+ resp.width = 36;
- if (put_user(resp.width, &query_ptr->width))
+ /* Only write to the output fields of user query */
+ if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp) ||
+ put_user(resp.cpu_delta, &query_ptr->cpu_delta) ||
+ put_user(resp.engine_cycles, &query_ptr->engine_cycles) ||
+ put_user(resp.width, &query_ptr->width))
return -EFAULT;
return 0;
@@ -454,12 +452,23 @@ static int query_hwconfig(struct xe_device *xe,
static size_t calc_topo_query_size(struct xe_device *xe)
{
- return xe->info.gt_count *
- (4 * sizeof(struct drm_xe_query_topology_mask) +
- sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) +
- sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) +
- sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask) +
- sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss));
+ struct xe_gt *gt;
+ size_t query_size = 0;
+ int id;
+
+ for_each_gt(gt, xe, id) {
+ query_size += 3 * sizeof(struct drm_xe_query_topology_mask) +
+ sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) +
+ sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) +
+ sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss);
+
+ /* L3bank mask may not be available for some GTs */
+ if (!XE_WA(gt, no_media_l3))
+ query_size += sizeof(struct drm_xe_query_topology_mask) +
+ sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask);
+ }
+
+ return query_size;
}
static int copy_mask(void __user **ptr,
@@ -512,11 +521,18 @@ static int query_gt_topology(struct xe_device *xe,
if (err)
return err;
- topo.type = DRM_XE_TOPO_L3_BANK;
- err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask,
- sizeof(gt->fuse_topo.l3_bank_mask));
- if (err)
- return err;
+ /*
+ * If the kernel doesn't have a way to obtain a correct L3bank
+ * mask, then it's better to omit L3 from the query rather than
+ * reporting bogus or zeroed information to userspace.
+ */
+ if (!XE_WA(gt, no_media_l3)) {
+ topo.type = DRM_XE_TOPO_L3_BANK;
+ err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask,
+ sizeof(gt->fuse_topo.l3_bank_mask));
+ if (err)
+ return err;
+ }
topo.type = gt->fuse_topo.eu_type == XE_GT_EU_TYPE_SIMD16 ?
DRM_XE_TOPO_SIMD16_EU_PER_DSS :
@@ -655,7 +671,9 @@ static int query_oa_units(struct xe_device *xe,
du->oa_unit_id = u->oa_unit_id;
du->oa_unit_type = u->type;
du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt);
- du->capabilities = DRM_XE_OA_CAPS_BASE;
+ du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS |
+ DRM_XE_OA_CAPS_OA_BUFFER_SIZE |
+ DRM_XE_OA_CAPS_WAIT_NUM_REPORTS;
j = 0;
for_each_hw_engine(hwe, gt, hwe_id) {
diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c
index 440ac572f6e5..9475e3f74958 100644
--- a/drivers/gpu/drm/xe/xe_reg_sr.c
+++ b/drivers/gpu/drm/xe/xe_reg_sr.c
@@ -15,6 +15,7 @@
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
+#include "xe_device.h"
#include "xe_device_types.h"
#include "xe_force_wake.h"
#include "xe_gt.h"
@@ -23,49 +24,29 @@
#include "xe_hw_engine_types.h"
#include "xe_macros.h"
#include "xe_mmio.h"
-#include "xe_reg_whitelist.h"
#include "xe_rtp_types.h"
-#define XE_REG_SR_GROW_STEP_DEFAULT 16
-
static void reg_sr_fini(struct drm_device *drm, void *arg)
{
struct xe_reg_sr *sr = arg;
+ struct xe_reg_sr_entry *entry;
+ unsigned long reg;
+
+ xa_for_each(&sr->xa, reg, entry)
+ kfree(entry);
xa_destroy(&sr->xa);
- kfree(sr->pool.arr);
- memset(&sr->pool, 0, sizeof(sr->pool));
}
int xe_reg_sr_init(struct xe_reg_sr *sr, const char *name, struct xe_device *xe)
{
xa_init(&sr->xa);
- memset(&sr->pool, 0, sizeof(sr->pool));
- sr->pool.grow_step = XE_REG_SR_GROW_STEP_DEFAULT;
sr->name = name;
return drmm_add_action_or_reset(&xe->drm, reg_sr_fini, sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_reg_sr_init);
-static struct xe_reg_sr_entry *alloc_entry(struct xe_reg_sr *sr)
-{
- if (sr->pool.used == sr->pool.allocated) {
- struct xe_reg_sr_entry *arr;
-
- arr = krealloc_array(sr->pool.arr,
- ALIGN(sr->pool.allocated + 1, sr->pool.grow_step),
- sizeof(*arr), GFP_KERNEL);
- if (!arr)
- return NULL;
-
- sr->pool.arr = arr;
- sr->pool.allocated += sr->pool.grow_step;
- }
-
- return &sr->pool.arr[sr->pool.used++];
-}
-
static bool compatible_entries(const struct xe_reg_sr_entry *e1,
const struct xe_reg_sr_entry *e2)
{
@@ -111,7 +92,7 @@ int xe_reg_sr_add(struct xe_reg_sr *sr,
return 0;
}
- pentry = alloc_entry(sr);
+ pentry = kmalloc(sizeof(*pentry), GFP_KERNEL);
if (!pentry) {
ret = -ENOMEM;
goto fail;
@@ -164,7 +145,7 @@ static void apply_one_mmio(struct xe_gt *gt, struct xe_reg_sr_entry *entry)
else if (entry->clr_bits + 1)
val = (reg.mcr ?
xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
- xe_mmio_read32(gt, reg)) & (~entry->clr_bits);
+ xe_mmio_read32(&gt->mmio, reg)) & (~entry->clr_bits);
else
val = 0;
@@ -180,86 +161,34 @@ static void apply_one_mmio(struct xe_gt *gt, struct xe_reg_sr_entry *entry)
if (entry->reg.mcr)
xe_gt_mcr_multicast_write(gt, reg_mcr, val);
else
- xe_mmio_write32(gt, reg, val);
+ xe_mmio_write32(&gt->mmio, reg, val);
}
void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt)
{
struct xe_reg_sr_entry *entry;
unsigned long reg;
- int err;
+ unsigned int fw_ref;
if (xa_empty(&sr->xa))
return;
xe_gt_dbg(gt, "Applying %s save-restore MMIOs\n", sr->name);
- err = xe_force_wake_get(&gt->mmio.fw, XE_FORCEWAKE_ALL);
- if (err)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
goto err_force_wake;
xa_for_each(&sr->xa, reg, entry)
apply_one_mmio(gt, entry);
- err = xe_force_wake_put(&gt->mmio.fw, XE_FORCEWAKE_ALL);
- XE_WARN_ON(err);
-
- return;
-
-err_force_wake:
- xe_gt_err(gt, "Failed to apply, err=%d\n", err);
-}
-
-void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe)
-{
- struct xe_reg_sr *sr = &hwe->reg_whitelist;
- struct xe_gt *gt = hwe->gt;
- struct xe_device *xe = gt_to_xe(gt);
- struct xe_reg_sr_entry *entry;
- struct drm_printer p;
- u32 mmio_base = hwe->mmio_base;
- unsigned long reg;
- unsigned int slot = 0;
- int err;
-
- if (xa_empty(&sr->xa))
- return;
-
- drm_dbg(&xe->drm, "Whitelisting %s registers\n", sr->name);
-
- err = xe_force_wake_get(&gt->mmio.fw, XE_FORCEWAKE_ALL);
- if (err)
- goto err_force_wake;
-
- p = drm_dbg_printer(&xe->drm, DRM_UT_DRIVER, NULL);
- xa_for_each(&sr->xa, reg, entry) {
- if (slot == RING_MAX_NONPRIV_SLOTS) {
- xe_gt_err(gt,
- "hwe %s: maximum register whitelist slots (%d) reached, refusing to add more\n",
- hwe->name, RING_MAX_NONPRIV_SLOTS);
- break;
- }
-
- xe_reg_whitelist_print_entry(&p, 0, reg, entry);
- xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(mmio_base, slot),
- reg | entry->set_bits);
- slot++;
- }
-
- /* And clear the rest just in case of garbage */
- for (; slot < RING_MAX_NONPRIV_SLOTS; slot++) {
- u32 addr = RING_NOPID(mmio_base).addr;
-
- xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(mmio_base, slot), addr);
- }
-
- err = xe_force_wake_put(&gt->mmio.fw, XE_FORCEWAKE_ALL);
- XE_WARN_ON(err);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
return;
err_force_wake:
- drm_err(&xe->drm, "Failed to apply, err=%d\n", err);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ xe_gt_err(gt, "Failed to apply, err=-ETIMEDOUT\n");
}
/**
diff --git a/drivers/gpu/drm/xe/xe_reg_sr_types.h b/drivers/gpu/drm/xe/xe_reg_sr_types.h
index ad48a52b824a..ebe11f237fa2 100644
--- a/drivers/gpu/drm/xe/xe_reg_sr_types.h
+++ b/drivers/gpu/drm/xe/xe_reg_sr_types.h
@@ -20,12 +20,6 @@ struct xe_reg_sr_entry {
};
struct xe_reg_sr {
- struct {
- struct xe_reg_sr_entry *arr;
- unsigned int used;
- unsigned int allocated;
- unsigned int grow_step;
- } pool;
struct xarray xa;
const char *name;
diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.c b/drivers/gpu/drm/xe/xe_reg_whitelist.c
index 3996934974fa..edab5d4e3ba5 100644
--- a/drivers/gpu/drm/xe/xe_reg_whitelist.c
+++ b/drivers/gpu/drm/xe/xe_reg_whitelist.c
@@ -10,7 +10,9 @@
#include "regs/xe_oa_regs.h"
#include "regs/xe_regs.h"
#include "xe_gt_types.h"
+#include "xe_gt_printk.h"
#include "xe_platform_types.h"
+#include "xe_reg_sr.h"
#include "xe_rtp.h"
#include "xe_step.h"
@@ -89,6 +91,40 @@ static const struct xe_rtp_entry_sr register_whitelist[] = {
{}
};
+static void whitelist_apply_to_hwe(struct xe_hw_engine *hwe)
+{
+ struct xe_reg_sr *sr = &hwe->reg_whitelist;
+ struct xe_reg_sr_entry *entry;
+ struct drm_printer p;
+ unsigned long reg;
+ unsigned int slot;
+
+ xe_gt_dbg(hwe->gt, "Add %s whitelist to engine\n", sr->name);
+ p = xe_gt_dbg_printer(hwe->gt);
+
+ slot = 0;
+ xa_for_each(&sr->xa, reg, entry) {
+ struct xe_reg_sr_entry hwe_entry = {
+ .reg = RING_FORCE_TO_NONPRIV(hwe->mmio_base, slot),
+ .set_bits = entry->reg.addr | entry->set_bits,
+ .clr_bits = ~0u,
+ .read_mask = entry->read_mask,
+ };
+
+ if (slot == RING_MAX_NONPRIV_SLOTS) {
+ xe_gt_err(hwe->gt,
+ "hwe %s: maximum register whitelist slots (%d) reached, refusing to add more\n",
+ hwe->name, RING_MAX_NONPRIV_SLOTS);
+ break;
+ }
+
+ xe_reg_whitelist_print_entry(&p, 0, reg, entry);
+ xe_reg_sr_add(&hwe->reg_sr, &hwe_entry, hwe->gt);
+
+ slot++;
+ }
+}
+
/**
* xe_reg_whitelist_process_engine - process table of registers to whitelist
* @hwe: engine instance to process whitelist for
@@ -102,6 +138,7 @@ void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe)
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
xe_rtp_process_to_sr(&ctx, register_whitelist, &hwe->reg_whitelist);
+ whitelist_apply_to_hwe(hwe);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index 0be4f489d3e1..9f327f27c072 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -221,7 +221,10 @@ static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw,
static u32 get_ppgtt_flag(struct xe_sched_job *job)
{
- return job->q->vm ? BIT(8) : 0;
+ if (job->q->vm && !job->ggtt)
+ return BIT(8);
+
+ return 0;
}
static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 86c705d18c0d..7a1c78fdfc92 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -196,7 +196,7 @@ static void rtp_get_context(struct xe_rtp_process_ctx *ctx,
*gt = (*hwe)->gt;
*xe = gt_to_xe(*gt);
break;
- };
+ }
}
/**
@@ -340,3 +340,8 @@ bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
return dss >= dss_per_gslice;
}
+bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ return !IS_SRIOV_VF(gt_to_xe(gt));
+}
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index 827d932b6908..38b9f13bba5e 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -131,7 +131,7 @@ struct xe_reg_sr;
* @ver_end__: Last graphics IP version to match
*
* Note that the range matching this rule is [ @ver_start__, @ver_end__ ], i.e.
- * inclusive on boths sides
+ * inclusive on both sides
*
* Refer to XE_RTP_RULES() for expected usage.
*/
@@ -169,7 +169,7 @@ struct xe_reg_sr;
* @ver_end__: Last media IP version to match
*
* Note that the range matching this rule is [ @ver_start__, @ver_end__ ], i.e.
- * inclusive on boths sides
+ * inclusive on both sides
*
* Refer to XE_RTP_RULES() for expected usage.
*/
@@ -476,4 +476,15 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
+/*
+ * xe_rtp_match_not_sriov_vf - Match when not on SR-IOV VF device
+ *
+ * @gt: GT structure
+ * @hwe: Engine instance
+ *
+ * Returns: true if device is not VF, false otherwise.
+ */
+bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
index fe2cb2a96f78..e055bed7ae55 100644
--- a/drivers/gpu/drm/xe/xe_sa.c
+++ b/drivers/gpu/drm/xe/xe_sa.c
@@ -53,7 +53,7 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
if (IS_ERR(bo)) {
drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n",
PTR_ERR(bo));
- return (struct xe_sa_manager *)bo;
+ return ERR_CAST(bo);
}
sa_manager->bo = bo;
sa_manager->is_iomem = bo->vmap.is_iomem;
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index eeccc1c318ae..1905ca590965 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -280,7 +280,7 @@ void xe_sched_job_arm(struct xe_sched_job *job)
fence = &chain->base;
}
- job->fence = fence;
+ job->fence = dma_fence_get(fence); /* Pairs with put in scheduler */
drm_sched_job_arm(&job->drm);
}
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
index 0d3f76fb05ce..d942b20a9f29 100644
--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -40,7 +40,6 @@ struct xe_sched_job {
* @fence: dma fence to indicate completion. 1 way relationship - job
* can safely reference fence, fence cannot safely reference job.
*/
-#define JOB_FLAG_SUBMIT DMA_FENCE_FLAG_USER_BITS
struct dma_fence *fence;
/** @user_fence: write back value when BB is complete */
struct {
@@ -57,13 +56,15 @@ struct xe_sched_job {
u32 migrate_flush_flags;
/** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */
bool ring_ops_flush_tlb;
+ /** @ggtt: mapped in ggtt. */
+ bool ggtt;
/** @ptrs: per instance pointers. */
struct xe_job_ptrs ptrs[];
};
struct xe_sched_job_snapshot {
u16 batch_addr_len;
- u64 batch_addr[];
+ u64 batch_addr[] __counted_by(batch_addr_len);
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c
index 5a1d65e4f19f..04e2f539ccd9 100644
--- a/drivers/gpu/drm/xe/xe_sriov.c
+++ b/drivers/gpu/drm/xe/xe_sriov.c
@@ -3,6 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/fault-inject.h>
+
#include <drm/drm_managed.h>
#include "regs/xe_regs.h"
@@ -12,6 +14,7 @@
#include "xe_mmio.h"
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
+#include "xe_sriov_vf.h"
/**
* xe_sriov_mode_to_string - Convert enum value to string.
@@ -35,7 +38,7 @@ const char *xe_sriov_mode_to_string(enum xe_sriov_mode mode)
static bool test_is_vf(struct xe_device *xe)
{
- u32 value = xe_mmio_read32(xe_root_mmio_gt(xe), VF_CAP_REG);
+ u32 value = xe_mmio_read32(xe_root_tile_mmio(xe), VF_CAP_REG);
return value & VF_CAP;
}
@@ -112,6 +115,9 @@ int xe_sriov_init(struct xe_device *xe)
return err;
}
+ if (IS_SRIOV_VF(xe))
+ xe_sriov_vf_init_early(xe);
+
xe_assert(xe, !xe->sriov.wq);
xe->sriov.wq = alloc_workqueue("xe-sriov-wq", 0, 0);
if (!xe->sriov.wq)
@@ -119,6 +125,7 @@ int xe_sriov_init(struct xe_device *xe)
return drmm_add_action_or_reset(&xe->drm, fini_sriov, xe);
}
+ALLOW_ERROR_INJECTION(xe_sriov_init, ERRNO); /* See xe_pci_probe() */
/**
* xe_sriov_print_info - Print basic SR-IOV information.
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h b/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
index 7d156ba82479..dd1df950b021 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
@@ -20,7 +20,7 @@
* is within a range of supported VF numbers (up to maximum number of VFs that
* driver can support, including VF0 that represents the PF itself).
*
- * Note: Effective only on debug builds. See `Xe ASSERTs`_ for more information.
+ * Note: Effective only on debug builds. See `Xe Asserts`_ for more information.
*/
#define xe_sriov_pf_assert_vfid(xe, vfid) \
xe_assert((xe), (vfid) <= xe_sriov_pf_get_totalvfs(xe))
diff --git a/drivers/gpu/drm/xe/xe_sriov_types.h b/drivers/gpu/drm/xe/xe_sriov_types.h
index c7b7ad4af5c8..ca94382a721e 100644
--- a/drivers/gpu/drm/xe/xe_sriov_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_types.h
@@ -9,6 +9,7 @@
#include <linux/build_bug.h>
#include <linux/mutex.h>
#include <linux/types.h>
+#include <linux/workqueue_types.h>
/**
* VFID - Virtual Function Identifier
@@ -56,4 +57,20 @@ struct xe_device_pf {
struct mutex master_lock;
};
+/**
+ * struct xe_device_vf - Xe Virtual Function related data
+ *
+ * The data in this structure is valid only if driver is running in the
+ * @XE_SRIOV_MODE_VF mode.
+ */
+struct xe_device_vf {
+ /** @migration: VF Migration state data */
+ struct {
+ /** @migration.worker: VF migration recovery worker */
+ struct work_struct worker;
+ /** @migration.gt_flags: Per-GT request flags for VF migration recovery */
+ unsigned long gt_flags;
+ } migration;
+};
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c
new file mode 100644
index 000000000000..c1275e64aa9c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "xe_assert.h"
+#include "xe_device.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_gt_sriov_vf.h"
+#include "xe_pm.h"
+#include "xe_sriov.h"
+#include "xe_sriov_printk.h"
+#include "xe_sriov_vf.h"
+
+/**
+ * DOC: VF restore procedure in PF KMD and VF KMD
+ *
+ * Restoring previously saved state of a VF is one of core features of
+ * SR-IOV. All major VM Management applications allow saving and restoring
+ * the VM state, and doing that to a VM which uses SRIOV VF as one of
+ * the accessible devices requires support from KMD on both PF and VF side.
+ * VMM initiates all required operations through VFIO module, which then
+ * translates them into PF KMD calls. This description will focus on these
+ * calls, leaving out the module which initiates these steps (VFIO).
+ *
+ * In order to start the restore procedure, GuC needs to keep the VF in
+ * proper state. The PF driver can ensure GuC set it to VF_READY state
+ * by provisioning the VF, which in turn can be done after Function Level
+ * Reset of said VF (or after it was freshly created - in that case FLR
+ * is not needed). The FLR procedure ends with GuC sending message
+ * `GUC_PF_NOTIFY_VF_FLR_DONE`, and then provisioning data is sent to GuC.
+ * After the provisioning is completed, the VF needs to be paused, and
+ * at that point the actual restore can begin.
+ *
+ * During VF Restore, state of several resources is restored. These may
+ * include local memory content (system memory is restored by VMM itself),
+ * values of MMIO registers, stateless compression metadata and others.
+ * The final resource which also needs restoring is state of the VF
+ * submission maintained within GuC. For that, `GUC_PF_OPCODE_VF_RESTORE`
+ * message is used, with reference to the state blob to be consumed by
+ * GuC.
+ *
+ * Next, when VFIO is asked to set the VM into running state, the PF driver
+ * sends `GUC_PF_TRIGGER_VF_RESUME` to GuC. When sent after restore, this
+ * changes VF state within GuC to `VF_RESFIX_BLOCKED` rather than the
+ * usual `VF_RUNNING`. At this point GuC triggers an interrupt to inform
+ * the VF KMD within the VM that it was migrated.
+ *
+ * As soon as Virtual GPU of the VM starts, the VF driver within receives
+ * the MIGRATED interrupt and schedules post-migration recovery worker.
+ * That worker queries GuC for new provisioning (using MMIO communication),
+ * and applies fixups to any non-virtualized resources used by the VF.
+ *
+ * When the VF driver is ready to continue operation on the newly connected
+ * hardware, it sends `VF2GUC_NOTIFY_RESFIX_DONE` which causes it to
+ * enter the long awaited `VF_RUNNING` state, and therefore start handling
+ * CTB messages and scheduling workloads from the VF::
+ *
+ * PF GuC VF
+ * [ ] | |
+ * [ ] PF2GUC_VF_CONTROL(pause) | |
+ * [ ]---------------------------> [ ] |
+ * [ ] [ ] GuC sets new VF state to |
+ * [ ] [ ]------- VF_READY_PAUSED |
+ * [ ] [ ] | |
+ * [ ] [ ] <----- |
+ * [ ] success [ ] |
+ * [ ] <---------------------------[ ] |
+ * [ ] | |
+ * [ ] PF loads resources from the | |
+ * [ ]------- saved image supplied | |
+ * [ ] | | |
+ * [ ] <----- | |
+ * [ ] | |
+ * [ ] GUC_PF_OPCODE_VF_RESTORE | |
+ * [ ]---------------------------> [ ] |
+ * [ ] [ ] GuC loads contexts and CTB |
+ * [ ] [ ]------- state from image |
+ * [ ] [ ] | |
+ * [ ] [ ] <----- |
+ * [ ] [ ] |
+ * [ ] [ ] GuC sets new VF state to |
+ * [ ] [ ]------- VF_RESFIX_PAUSED |
+ * [ ] [ ] | |
+ * [ ] success [ ] <----- |
+ * [ ] <---------------------------[ ] |
+ * [ ] | |
+ * [ ] GUC_PF_TRIGGER_VF_RESUME | |
+ * [ ]---------------------------> [ ] |
+ * [ ] [ ] GuC sets new VF state to |
+ * [ ] [ ]------- VF_RESFIX_BLOCKED |
+ * [ ] [ ] | |
+ * [ ] [ ] <----- |
+ * [ ] [ ] |
+ * [ ] [ ] GUC_INTR_SW_INT_0 |
+ * [ ] success [ ]---------------------------> [ ]
+ * [ ] <---------------------------[ ] [ ]
+ * | | VF2GUC_QUERY_SINGLE_KLV [ ]
+ * | [ ] <---------------------------[ ]
+ * | [ ] [ ]
+ * | [ ] new VF provisioning [ ]
+ * | [ ]---------------------------> [ ]
+ * | | [ ]
+ * | | VF driver applies post [ ]
+ * | | migration fixups -------[ ]
+ * | | | [ ]
+ * | | -----> [ ]
+ * | | [ ]
+ * | | VF2GUC_NOTIFY_RESFIX_DONE [ ]
+ * | [ ] <---------------------------[ ]
+ * | [ ] [ ]
+ * | [ ] GuC sets new VF state to [ ]
+ * | [ ]------- VF_RUNNING [ ]
+ * | [ ] | [ ]
+ * | [ ] <----- [ ]
+ * | [ ] success [ ]
+ * | [ ]---------------------------> [ ]
+ * | | |
+ * | | |
+ */
+
+static void migration_worker_func(struct work_struct *w);
+
+/**
+ * xe_sriov_vf_init_early - Initialize SR-IOV VF specific data.
+ * @xe: the &xe_device to initialize
+ */
+void xe_sriov_vf_init_early(struct xe_device *xe)
+{
+ INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func);
+}
+
+/**
+ * vf_post_migration_requery_guc - Re-query GuC for current VF provisioning.
+ * @xe: the &xe_device struct instance
+ *
+ * After migration, we need to re-query all VF configuration to make sure
+ * they match previous provisioning. Note that most of VF provisioning
+ * shall be the same, except GGTT range, since GGTT is not virtualized per-VF.
+ *
+ * Returns: 0 if the operation completed successfully, or a negative error
+ * code otherwise.
+ */
+static int vf_post_migration_requery_guc(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int err, ret = 0;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_vf_query_config(gt);
+ ret = ret ?: err;
+ }
+
+ return ret;
+}
+
+/*
+ * vf_post_migration_imminent - Check if post-restore recovery is coming.
+ * @xe: the &xe_device struct instance
+ *
+ * Return: True if migration recovery worker will soon be running. Any worker currently
+ * executing does not affect the result.
+ */
+static bool vf_post_migration_imminent(struct xe_device *xe)
+{
+ return xe->sriov.vf.migration.gt_flags != 0 ||
+ work_pending(&xe->sriov.vf.migration.worker);
+}
+
+/*
+ * Notify all GuCs about resource fixups apply finished.
+ */
+static void vf_post_migration_notify_resfix_done(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+
+ for_each_gt(gt, xe, id) {
+ if (vf_post_migration_imminent(xe))
+ goto skip;
+ xe_gt_sriov_vf_notify_resfix_done(gt);
+ }
+ return;
+
+skip:
+ drm_dbg(&xe->drm, "another recovery imminent, skipping notifications\n");
+}
+
+static void vf_post_migration_recovery(struct xe_device *xe)
+{
+ int err;
+
+ drm_dbg(&xe->drm, "migration recovery in progress\n");
+ xe_pm_runtime_get(xe);
+ err = vf_post_migration_requery_guc(xe);
+ if (vf_post_migration_imminent(xe))
+ goto defer;
+ if (unlikely(err))
+ goto fail;
+
+ /* FIXME: add the recovery steps */
+ vf_post_migration_notify_resfix_done(xe);
+ xe_pm_runtime_put(xe);
+ drm_notice(&xe->drm, "migration recovery ended\n");
+ return;
+defer:
+ xe_pm_runtime_put(xe);
+ drm_dbg(&xe->drm, "migration recovery deferred\n");
+ return;
+fail:
+ xe_pm_runtime_put(xe);
+ drm_err(&xe->drm, "migration recovery failed (%pe)\n", ERR_PTR(err));
+ xe_device_declare_wedged(xe);
+}
+
+static void migration_worker_func(struct work_struct *w)
+{
+ struct xe_device *xe = container_of(w, struct xe_device,
+ sriov.vf.migration.worker);
+
+ vf_post_migration_recovery(xe);
+}
+
+static bool vf_ready_to_recovery_on_all_gts(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+
+ for_each_gt(gt, xe, id) {
+ if (!test_bit(id, &xe->sriov.vf.migration.gt_flags)) {
+ xe_gt_sriov_dbg_verbose(gt, "still not ready to recover\n");
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * xe_sriov_vf_start_migration_recovery - Start VF migration recovery.
+ * @xe: the &xe_device to start recovery on
+ *
+ * This function shall be called only by VF.
+ */
+void xe_sriov_vf_start_migration_recovery(struct xe_device *xe)
+{
+ bool started;
+
+ xe_assert(xe, IS_SRIOV_VF(xe));
+
+ if (!vf_ready_to_recovery_on_all_gts(xe))
+ return;
+
+ WRITE_ONCE(xe->sriov.vf.migration.gt_flags, 0);
+ /* Ensure other threads see that no flags are set now. */
+ smp_mb();
+
+ started = queue_work(xe->sriov.wq, &xe->sriov.vf.migration.worker);
+ drm_info(&xe->drm, "VF migration recovery %s\n", started ?
+ "scheduled" : "already in progress");
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.h b/drivers/gpu/drm/xe/xe_sriov_vf.h
new file mode 100644
index 000000000000..7b8622cff2b7
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_VF_H_
+#define _XE_SRIOV_VF_H_
+
+struct xe_device;
+
+void xe_sriov_vf_init_early(struct xe_device *xe);
+void xe_sriov_vf_start_migration_recovery(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index bb3c2a830362..42f5bebd09e5 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -54,11 +54,12 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
{
struct xe_user_fence *ufence;
u64 __user *ptr = u64_to_user_ptr(addr);
+ u64 __maybe_unused prefetch_val;
- if (!access_ok(ptr, sizeof(*ptr)))
+ if (get_user(prefetch_val, ptr))
return ERR_PTR(-EFAULT);
- ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
+ ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
if (!ufence)
return ERR_PTR(-ENOMEM);
@@ -82,10 +83,16 @@ static void user_fence_worker(struct work_struct *w)
XE_WARN_ON("Copy to user failed");
kthread_unuse_mm(ufence->mm);
mmput(ufence->mm);
+ } else {
+ drm_dbg(&ufence->xe->drm, "mmget_not_zero() failed, ufence wasn't signaled\n");
}
- wake_up_all(&ufence->xe->ufence_wq);
+ /*
+ * Wake up waiters only after updating the ufence state, allowing the UMD
+ * to safely reuse the same ufence without encountering -EBUSY errors.
+ */
WRITE_ONCE(ufence->signalled, 1);
+ wake_up_all(&ufence->xe->ufence_wq);
user_fence_put(ufence);
}
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index dda5268507d8..07cf7cfe4abd 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -3,6 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/fault-inject.h>
+
#include <drm/drm_managed.h>
#include "xe_device.h"
@@ -129,6 +131,7 @@ int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id)
return 0;
}
+ALLOW_ERROR_INJECTION(xe_tile_init_early, ERRNO); /* See xe_pci_probe() */
static int tile_ttm_mgr_init(struct xe_tile *tile)
{
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 8573d7a87d84..d5281de04d54 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -21,6 +21,7 @@
#include "xe_vm.h"
#define __dev_name_xe(xe) dev_name((xe)->drm.dev)
+#define __dev_name_tile(tile) __dev_name_xe(tile_to_xe((tile)))
#define __dev_name_gt(gt) __dev_name_xe(gt_to_xe((gt)))
#define __dev_name_eq(q) __dev_name_gt((q)->gt)
@@ -210,6 +211,7 @@ DECLARE_EVENT_CLASS(xe_sched_job,
__string(dev, __dev_name_eq(job->q))
__field(u32, seqno)
__field(u32, lrc_seqno)
+ __field(u8, gt_id)
__field(u16, guc_id)
__field(u32, guc_state)
__field(u32, flags)
@@ -222,6 +224,7 @@ DECLARE_EVENT_CLASS(xe_sched_job,
__assign_str(dev);
__entry->seqno = xe_sched_job_seqno(job);
__entry->lrc_seqno = xe_sched_job_lrc_seqno(job);
+ __entry->gt_id = job->q->gt->info.id;
__entry->guc_id = job->q->guc->id;
__entry->guc_state =
atomic_read(&job->q->guc->state);
@@ -231,9 +234,9 @@ DECLARE_EVENT_CLASS(xe_sched_job,
__entry->batch_addr = (u64)job->ptrs[0].batch_addr;
),
- TP_printk("dev=%s, fence=%p, seqno=%u, lrc_seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
+ TP_printk("dev=%s, fence=%p, seqno=%u, lrc_seqno=%u, gt=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
__get_str(dev), __entry->fence, __entry->seqno,
- __entry->lrc_seqno, __entry->guc_id,
+ __entry->lrc_seqno, __entry->gt_id, __entry->guc_id,
__entry->batch_addr, __entry->guc_state,
__entry->flags, __entry->error)
);
@@ -281,6 +284,7 @@ DECLARE_EVENT_CLASS(xe_sched_msg,
__string(dev, __dev_name_eq(((struct xe_exec_queue *)msg->private_data)))
__field(u32, opcode)
__field(u16, guc_id)
+ __field(u8, gt_id)
),
TP_fast_assign(
@@ -288,9 +292,11 @@ DECLARE_EVENT_CLASS(xe_sched_msg,
__entry->opcode = msg->opcode;
__entry->guc_id =
((struct xe_exec_queue *)msg->private_data)->guc->id;
+ __entry->gt_id =
+ ((struct xe_exec_queue *)msg->private_data)->gt->info.id;
),
- TP_printk("dev=%s, guc_id=%d, opcode=%u", __get_str(dev), __entry->guc_id,
+ TP_printk("dev=%s, gt=%u guc_id=%d, opcode=%u", __get_str(dev), __entry->gt_id, __entry->guc_id,
__entry->opcode)
);
@@ -342,12 +348,12 @@ DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
);
TRACE_EVENT(xe_reg_rw,
- TP_PROTO(struct xe_gt *gt, bool write, u32 reg, u64 val, int len),
+ TP_PROTO(struct xe_mmio *mmio, bool write, u32 reg, u64 val, int len),
- TP_ARGS(gt, write, reg, val, len),
+ TP_ARGS(mmio, write, reg, val, len),
TP_STRUCT__entry(
- __string(dev, __dev_name_gt(gt))
+ __string(dev, __dev_name_tile(mmio->tile))
__field(u64, val)
__field(u32, reg)
__field(u16, write)
diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h
index 9b1a1d4304ae..ea50fee50c7d 100644
--- a/drivers/gpu/drm/xe/xe_trace_bo.h
+++ b/drivers/gpu/drm/xe/xe_trace_bo.h
@@ -48,6 +48,11 @@ DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
TP_ARGS(bo)
);
+DEFINE_EVENT(xe_bo, xe_bo_validate,
+ TP_PROTO(struct xe_bo *bo),
+ TP_ARGS(bo)
+);
+
TRACE_EVENT(xe_bo_move,
TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement,
bool move_lacks_source),
@@ -55,8 +60,8 @@ TRACE_EVENT(xe_bo_move,
TP_STRUCT__entry(
__field(struct xe_bo *, bo)
__field(size_t, size)
- __field(u32, new_placement)
- __field(u32, old_placement)
+ __string(new_placement_name, xe_mem_type_to_name[new_placement])
+ __string(old_placement_name, xe_mem_type_to_name[old_placement])
__string(device_id, __dev_name_bo(bo))
__field(bool, move_lacks_source)
),
@@ -64,15 +69,15 @@ TRACE_EVENT(xe_bo_move,
TP_fast_assign(
__entry->bo = bo;
__entry->size = bo->size;
- __entry->new_placement = new_placement;
- __entry->old_placement = old_placement;
+ __assign_str(new_placement_name);
+ __assign_str(old_placement_name);
__assign_str(device_id);
__entry->move_lacks_source = move_lacks_source;
),
TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
__entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
- xe_mem_type_to_name[__entry->old_placement],
- xe_mem_type_to_name[__entry->new_placement], __get_str(device_id))
+ __get_str(old_placement_name),
+ __get_str(new_placement_name), __get_str(device_id))
);
DECLARE_EVENT_CLASS(xe_vma,
@@ -189,7 +194,7 @@ DECLARE_EVENT_CLASS(xe_vm,
),
TP_printk("dev=%s, vm=%p, asid=0x%05x", __get_str(dev),
- __entry->vm, __entry->asid)
+ __entry->vm, __entry->asid)
);
DEFINE_EVENT(xe_vm, xe_vm_kill,
diff --git a/drivers/gpu/drm/xe/xe_trace_lrc.c b/drivers/gpu/drm/xe/xe_trace_lrc.c
new file mode 100644
index 000000000000..ab9b7e2970bc
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_trace_lrc.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "xe_trace_lrc.h"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_trace_lrc.h b/drivers/gpu/drm/xe/xe_trace_lrc.h
new file mode 100644
index 000000000000..5c669a0b2180
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_trace_lrc.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM xe
+
+#if !defined(_XE_TRACE_LRC_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _XE_TRACE_LRC_H_
+
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+
+#include "xe_gt_types.h"
+#include "xe_lrc.h"
+#include "xe_lrc_types.h"
+
+#define __dev_name_lrc(lrc) dev_name(gt_to_xe((lrc)->fence_ctx.gt)->drm.dev)
+
+TRACE_EVENT(xe_lrc_update_timestamp,
+ TP_PROTO(struct xe_lrc *lrc, uint32_t old),
+ TP_ARGS(lrc, old),
+ TP_STRUCT__entry(
+ __field(struct xe_lrc *, lrc)
+ __field(u32, old)
+ __field(u32, new)
+ __string(name, lrc->fence_ctx.name)
+ __string(device_id, __dev_name_lrc(lrc))
+ ),
+
+ TP_fast_assign(
+ __entry->lrc = lrc;
+ __entry->old = old;
+ __entry->new = lrc->ctx_timestamp;
+ __assign_str(name);
+ __assign_str(device_id);
+ ),
+ TP_printk("lrc=:%p lrc->name=%s old=%u new=%u device_id:%s",
+ __entry->lrc, __get_str(name),
+ __entry->old, __entry->new,
+ __get_str(device_id))
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
+#define TRACE_INCLUDE_FILE xe_trace_lrc
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index f7113cf6109d..423856cc18d4 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -60,7 +60,7 @@ bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe)
static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
{
struct xe_tile *tile = xe_device_get_root_tile(xe);
- struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
u64 stolen_size;
u64 tile_offset;
@@ -94,7 +94,7 @@ static u32 get_wopcm_size(struct xe_device *xe)
u32 wopcm_size;
u64 val;
- val = xe_mmio_read64_2x32(xe_root_mmio_gt(xe), STOLEN_RESERVED);
+ val = xe_mmio_read64_2x32(xe_root_tile_mmio(xe), STOLEN_RESERVED);
val = REG_FIELD_GET64(WOPCM_SIZE_MASK, val);
switch (val) {
@@ -119,7 +119,7 @@ static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr
u32 stolen_size, wopcm_size;
u32 ggc, gms;
- ggc = xe_mmio_read32(xe_root_mmio_gt(xe), GGC);
+ ggc = xe_mmio_read32(xe_root_tile_mmio(xe), GGC);
/*
* Check GGMS: it should be fixed 0x3 (8MB), which corresponds to the
@@ -159,7 +159,7 @@ static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr
stolen_size -= wopcm_size;
if (media_gt && XE_WA(media_gt, 14019821291)) {
- u64 gscpsmi_base = xe_mmio_read64_2x32(media_gt, GSCPSMI_BASE)
+ u64 gscpsmi_base = xe_mmio_read64_2x32(&media_gt->mmio, GSCPSMI_BASE)
& ~GENMASK_ULL(5, 0);
/*
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
index 423b261ea743..f4a16e5fa770 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
@@ -5,6 +5,7 @@
*/
#include <drm/drm_managed.h>
+#include <drm/drm_drv.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
@@ -52,7 +53,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
struct xe_ttm_vram_mgr_resource *vres;
struct drm_buddy *mm = &mgr->mm;
- u64 size, remaining_size, min_page_size;
+ u64 size, min_page_size;
unsigned long lpfn;
int err;
@@ -98,17 +99,6 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
goto error_fini;
}
- if (WARN_ON(min_page_size > SZ_2G)) { /* FIXME: sg limit */
- err = -EINVAL;
- goto error_fini;
- }
-
- if (WARN_ON((size > SZ_2G &&
- (vres->base.placement & TTM_PL_FLAG_CONTIGUOUS)))) {
- err = -EINVAL;
- goto error_fini;
- }
-
if (WARN_ON(!IS_ALIGNED(size, min_page_size))) {
err = -EINVAL;
goto error_fini;
@@ -116,12 +106,11 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
mutex_lock(&mgr->lock);
if (lpfn <= mgr->visible_size >> PAGE_SHIFT && size > mgr->visible_avail) {
- mutex_unlock(&mgr->lock);
err = -ENOSPC;
- goto error_fini;
+ goto error_unlock;
}
- if (place->fpfn + (size >> PAGE_SHIFT) != place->lpfn &&
+ if (place->fpfn + (size >> PAGE_SHIFT) != lpfn &&
place->flags & TTM_PL_FLAG_CONTIGUOUS) {
size = roundup_pow_of_two(size);
min_page_size = size;
@@ -129,25 +118,11 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
lpfn = max_t(unsigned long, place->fpfn + (size >> PAGE_SHIFT), lpfn);
}
- remaining_size = size;
- do {
- /*
- * Limit maximum size to 2GiB due to SG table limitations.
- * FIXME: Should maybe be handled as part of sg construction.
- */
- u64 alloc_size = min_t(u64, remaining_size, SZ_2G);
-
- err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
- (u64)lpfn << PAGE_SHIFT,
- alloc_size,
- min_page_size,
- &vres->blocks,
- vres->flags);
- if (err)
- goto error_free_blocks;
-
- remaining_size -= alloc_size;
- } while (remaining_size);
+ err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
+ (u64)lpfn << PAGE_SHIFT, size,
+ min_page_size, &vres->blocks, vres->flags);
+ if (err)
+ goto error_unlock;
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
if (!drm_buddy_block_trim(mm, NULL, vres->base.size, &vres->blocks))
@@ -194,9 +169,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
*res = &vres->base;
return 0;
-
-error_free_blocks:
- drm_buddy_free_list(mm, &vres->blocks, 0);
+error_unlock:
mutex_unlock(&mgr->lock);
error_fini:
ttm_resource_fini(man, &vres->base);
@@ -339,6 +312,13 @@ int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr,
struct ttm_resource_manager *man = &mgr->manager;
int err;
+ if (mem_type != XE_PL_STOLEN) {
+ const char *name = mem_type == XE_PL_VRAM0 ? "vram0" : "vram1";
+ man->cg = drmm_cgroup_register_region(&xe->drm, name, size);
+ if (IS_ERR(man->cg))
+ return PTR_ERR(man->cg);
+ }
+
man->func = &xe_ttm_vram_mgr_func;
mgr->mem_type = mem_type;
mutex_init(&mgr->lock);
@@ -393,7 +373,8 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
xe_res_first(res, offset, length, &cursor);
while (cursor.remaining) {
num_entries++;
- xe_res_next(&cursor, cursor.size);
+ /* Limit maximum size to 2GiB due to SG table limitations. */
+ xe_res_next(&cursor, min_t(u64, cursor.size, SZ_2G));
}
r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
@@ -413,7 +394,7 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
xe_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
phys_addr_t phys = cursor.start + tile->mem.vram.io_start;
- size_t size = cursor.size;
+ size_t size = min_t(u64, cursor.size, SZ_2G);
dma_addr_t addr;
addr = dma_map_resource(dev, phys, size, dir,
@@ -426,7 +407,7 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
sg_dma_address(sg) = addr;
sg_dma_len(sg) = size;
- xe_res_next(&cursor, cursor.size);
+ xe_res_next(&cursor, size);
}
return 0;
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index 0d5e04158917..d449de0fb6ec 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -33,7 +33,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
},
{ XE_RTP_NAME("Tuning: L3 cache - media"),
- XE_RTP_RULES(MEDIA_VERSION(2000)),
+ XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(FIELD_SET(XE2LPM_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
},
@@ -43,7 +43,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
SET(CCCHKNREG1, L3CMPCTRL))
},
{ XE_RTP_NAME("Tuning: Compression Overfetch - media"),
- XE_RTP_RULES(MEDIA_VERSION(2000)),
+ XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(CLR(XE2LPM_CCCHKNREG1, ENCOMPPERFFIX),
SET(XE2LPM_CCCHKNREG1, L3CMPCTRL))
},
@@ -52,7 +52,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
XE_RTP_ACTIONS(SET(L3SQCREG3, COMPPWOVERFETCHEN))
},
{ XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3 - media"),
- XE_RTP_RULES(MEDIA_VERSION(2000)),
+ XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG3, COMPPWOVERFETCHEN))
},
{ XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only"),
@@ -61,7 +61,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
COMPMEMRD256BOVRFETCHEN))
},
{ XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only - media"),
- XE_RTP_RULES(MEDIA_VERSION(2000)),
+ XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG2,
COMPMEMRD256BOVRFETCHEN))
},
@@ -71,7 +71,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0)))
},
{ XE_RTP_NAME("Tuning: Stateless compression control - media"),
- XE_RTP_RULES(MEDIA_VERSION_RANGE(1301, 2000)),
+ XE_RTP_RULES(MEDIA_VERSION_RANGE(1301, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(FIELD_SET(STATELESS_COMPRESSION_CTRL, UNIFIED_COMPRESSION_FORMAT,
REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0)))
},
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index d431d0031185..fb0eda3d5682 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -4,6 +4,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/fault-inject.h>
#include <linux/firmware.h>
#include <drm/drm_managed.h>
@@ -796,6 +797,7 @@ int xe_uc_fw_init(struct xe_uc_fw *uc_fw)
return err;
}
+ALLOW_ERROR_INJECTION(xe_uc_fw_init, ERRNO); /* See xe_pci_probe() */
static u32 uc_fw_ggtt_offset(struct xe_uc_fw *uc_fw)
{
@@ -806,6 +808,7 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags)
{
struct xe_device *xe = uc_fw_to_xe(uc_fw);
struct xe_gt *gt = uc_fw_to_gt(uc_fw);
+ struct xe_mmio *mmio = &gt->mmio;
u64 src_offset;
u32 dma_ctrl;
int ret;
@@ -814,34 +817,34 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags)
/* Set the source address for the uCode */
src_offset = uc_fw_ggtt_offset(uc_fw) + uc_fw->css_offset;
- xe_mmio_write32(gt, DMA_ADDR_0_LOW, lower_32_bits(src_offset));
- xe_mmio_write32(gt, DMA_ADDR_0_HIGH,
+ xe_mmio_write32(mmio, DMA_ADDR_0_LOW, lower_32_bits(src_offset));
+ xe_mmio_write32(mmio, DMA_ADDR_0_HIGH,
upper_32_bits(src_offset) | DMA_ADDRESS_SPACE_GGTT);
/* Set the DMA destination */
- xe_mmio_write32(gt, DMA_ADDR_1_LOW, offset);
- xe_mmio_write32(gt, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+ xe_mmio_write32(mmio, DMA_ADDR_1_LOW, offset);
+ xe_mmio_write32(mmio, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
/*
* Set the transfer size. The header plus uCode will be copied to WOPCM
* via DMA, excluding any other components
*/
- xe_mmio_write32(gt, DMA_COPY_SIZE,
+ xe_mmio_write32(mmio, DMA_COPY_SIZE,
sizeof(struct uc_css_header) + uc_fw->ucode_size);
/* Start the DMA */
- xe_mmio_write32(gt, DMA_CTRL,
+ xe_mmio_write32(mmio, DMA_CTRL,
_MASKED_BIT_ENABLE(dma_flags | START_DMA));
/* Wait for DMA to finish */
- ret = xe_mmio_wait32(gt, DMA_CTRL, START_DMA, 0, 100000, &dma_ctrl,
+ ret = xe_mmio_wait32(mmio, DMA_CTRL, START_DMA, 0, 100000, &dma_ctrl,
false);
if (ret)
drm_err(&xe->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
xe_uc_fw_type_repr(uc_fw->type), dma_ctrl);
/* Disable the bits once DMA is over */
- xe_mmio_write32(gt, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
+ xe_mmio_write32(mmio, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h
index 0d8caa0e7354..ad3b35a0e6eb 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h
@@ -92,7 +92,7 @@ struct xe_uc_fw {
const enum xe_uc_fw_status status;
/**
* @__status: private firmware load status - only to be used
- * by firmware laoding code
+ * by firmware loading code
*/
enum xe_uc_fw_status __status;
};
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index ce9dca4d4e87..690330352d4c 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -10,7 +10,6 @@
#include <drm/drm_exec.h>
#include <drm/drm_print.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_tt.h>
#include <uapi/drm/xe_drm.h>
#include <linux/ascii85.h>
@@ -733,13 +732,14 @@ static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
vops->pt_update_ops[i].ops =
kmalloc_array(vops->pt_update_ops[i].num_ops,
sizeof(*vops->pt_update_ops[i].ops),
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!vops->pt_update_ops[i].ops)
return array_of_binds ? -ENOBUFS : -ENOMEM;
}
return 0;
}
+ALLOW_ERROR_INJECTION(xe_vma_ops_alloc, ERRNO);
static void xe_vma_ops_fini(struct xe_vma_ops *vops)
{
@@ -1024,7 +1024,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
/*
* Since userptr pages are not pinned, we can't remove
- * the notifer until we're sure the GPU is not accessing
+ * the notifier until we're sure the GPU is not accessing
* them anymore
*/
mmu_interval_notifier_remove(&userptr->notifier);
@@ -1352,6 +1352,7 @@ static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
return 0;
}
+ALLOW_ERROR_INJECTION(xe_vm_create_scratch, ERRNO);
static void xe_vm_free_scratch(struct xe_vm *vm)
{
@@ -1978,6 +1979,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
return ops;
}
+ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
u16 pat_index, unsigned int flags)
@@ -2105,7 +2107,7 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
}
}
- /* Adjust for partial unbind after removin VMA from VM */
+ /* Adjust for partial unbind after removing VMA from VM */
if (!err) {
op->base.remap.unmap->va->va.addr = op->remap.start;
op->base.remap.unmap->va->va.range = op->remap.range;
@@ -2357,13 +2359,15 @@ static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
bool validate)
{
struct xe_bo *bo = xe_vma_bo(vma);
+ struct xe_vm *vm = xe_vma_vm(vma);
int err = 0;
if (bo) {
if (!bo->vm)
err = drm_exec_lock_obj(exec, &bo->ttm.base);
if (!err && validate)
- err = xe_bo_validate(bo, xe_vma_vm(vma), true);
+ err = xe_bo_validate(bo, vm,
+ !xe_vm_in_preempt_fence_mode(vm));
}
return err;
@@ -2697,6 +2701,7 @@ unlock:
drm_exec_fini(&exec);
return err;
}
+ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
#define SUPPORTED_FLAGS_STUB \
(DRM_XE_VM_BIND_FLAG_READONLY | \
@@ -2733,7 +2738,8 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
*bind_ops = kvmalloc_array(args->num_binds,
sizeof(struct drm_xe_vm_bind_op),
- GFP_KERNEL | __GFP_ACCOUNT);
+ GFP_KERNEL | __GFP_ACCOUNT |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!*bind_ops)
return args->num_binds > 1 ? -ENOBUFS : -ENOMEM;
@@ -2973,14 +2979,16 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (args->num_binds) {
bos = kvcalloc(args->num_binds, sizeof(*bos),
- GFP_KERNEL | __GFP_ACCOUNT);
+ GFP_KERNEL | __GFP_ACCOUNT |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!bos) {
err = -ENOMEM;
goto release_vm_lock;
}
ops = kvcalloc(args->num_binds, sizeof(*ops),
- GFP_KERNEL | __GFP_ACCOUNT);
+ GFP_KERNEL | __GFP_ACCOUNT |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!ops) {
err = -ENOMEM;
goto release_vm_lock;
@@ -3199,10 +3207,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
&fence[fence_id], vma);
- if (ret < 0) {
- xe_gt_tlb_invalidation_fence_fini(&fence[fence_id]);
+ if (ret)
goto wait;
- }
++fence_id;
if (!tile->media_gt)
@@ -3214,10 +3220,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
&fence[fence_id], vma);
- if (ret < 0) {
- xe_gt_tlb_invalidation_fence_fini(&fence[fence_id]);
+ if (ret)
goto wait;
- }
++fence_id;
}
}
@@ -3307,7 +3311,6 @@ void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
for (int i = 0; i < snap->num_snaps; i++) {
struct xe_bo *bo = snap->snap[i].bo;
- struct iosys_map src;
int err;
if (IS_ERR(snap->snap[i].data))
@@ -3320,16 +3323,8 @@ void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
}
if (bo) {
- xe_bo_lock(bo, false);
- err = ttm_bo_vmap(&bo->ttm, &src);
- if (!err) {
- xe_map_memcpy_from(xe_bo_device(bo),
- snap->snap[i].data,
- &src, snap->snap[i].bo_ofs,
- snap->snap[i].len);
- ttm_bo_vunmap(&bo->ttm, &src);
- }
- xe_bo_unlock(bo);
+ err = xe_bo_read(bo, snap->snap[i].bo_ofs,
+ snap->snap[i].data, snap->snap[i].len);
} else {
void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index c864dba35e1d..23adb7442881 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -17,7 +17,6 @@ struct drm_printer;
struct drm_file;
struct ttm_buffer_object;
-struct ttm_validate_buffer;
struct xe_exec_queue;
struct xe_file;
diff --git a/drivers/gpu/drm/xe/xe_vm_doc.h b/drivers/gpu/drm/xe/xe_vm_doc.h
index 4d33f310b653..078786958403 100644
--- a/drivers/gpu/drm/xe/xe_vm_doc.h
+++ b/drivers/gpu/drm/xe/xe_vm_doc.h
@@ -64,8 +64,8 @@
* update page level 2 PDE[1] to page level 3b phys address (GPU)
*
* bind BO2 0x1ff000-0x201000
- * update page level 3a PTE[511] to BO2 phys addres (GPU)
- * update page level 3b PTE[0] to BO2 phys addres + 0x1000 (GPU)
+ * update page level 3a PTE[511] to BO2 phys address (GPU)
+ * update page level 3b PTE[0] to BO2 phys address + 0x1000 (GPU)
*
* GPU bypass
* ~~~~~~~~~~
@@ -192,7 +192,7 @@
*
* If a VM is in fault mode (TODO: link to fault mode), new bind operations that
* create mappings are by default deferred to the page fault handler (first
- * use). This behavior can be overriden by setting the flag
+ * use). This behavior can be overridden by setting the flag
* DRM_XE_VM_BIND_FLAG_IMMEDIATE which indicates to creating the mapping
* immediately.
*
@@ -209,7 +209,7 @@
*
* Since this a core kernel managed memory the kernel can move this memory
* whenever it wants. We register an invalidation MMU notifier to alert XE when
- * a user poiter is about to move. The invalidation notifier needs to block
+ * a user pointer is about to move. The invalidation notifier needs to block
* until all pending users (jobs or compute mode engines) of the userptr are
* idle to ensure no faults. This done by waiting on all of VM's dma-resv slots.
*
@@ -252,7 +252,7 @@
* Rebind worker
* -------------
*
- * The rebind worker is very similar to an exec. It is resposible for rebinding
+ * The rebind worker is very similar to an exec. It is responsible for rebinding
* evicted BOs or userptrs, waiting on those operations, installing new preempt
* fences, and finally resuming executing of engines in the VM.
*
@@ -317,11 +317,11 @@
* are not allowed, only long running workloads and ULLS are enabled on a faulting
* VM.
*
- * Defered VM binds
+ * Deferred VM binds
* ----------------
*
* By default, on a faulting VM binds just allocate the VMA and the actual
- * updating of the page tables is defered to the page fault handler. This
+ * updating of the page tables is deferred to the page fault handler. This
* behavior can be overridden by setting the flag DRM_XE_VM_BIND_FLAG_IMMEDIATE in
* the VM bind which will then do the bind immediately.
*
@@ -500,18 +500,18 @@
* Slot waiting
* ------------
*
- * 1. The exection of all jobs from kernel ops shall wait on all slots
+ * 1. The execution of all jobs from kernel ops shall wait on all slots
* (DMA_RESV_USAGE_PREEMPT_FENCE) of either an external BO or VM (depends on if
* kernel op is operating on external or private BO)
*
- * 2. In non-compute mode, the exection of all jobs from rebinds in execs shall
+ * 2. In non-compute mode, the execution of all jobs from rebinds in execs shall
* wait on the DMA_RESV_USAGE_KERNEL slot of either an external BO or VM
* (depends on if the rebind is operatiing on an external or private BO)
*
- * 3. In non-compute mode, the exection of all jobs from execs shall wait on the
+ * 3. In non-compute mode, the execution of all jobs from execs shall wait on the
* last rebind job
*
- * 4. In compute mode, the exection of all jobs from rebinds in the rebind
+ * 4. In compute mode, the execution of all jobs from rebinds in the rebind
* worker shall wait on the DMA_RESV_USAGE_KERNEL slot of either an external BO
* or VM (depends on if rebind is operating on external or private BO)
*
diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c
index 80ba2fc78837..b1f81dca610d 100644
--- a/drivers/gpu/drm/xe/xe_vram.c
+++ b/drivers/gpu/drm/xe/xe_vram.c
@@ -169,7 +169,7 @@ static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size)
u64 offset_hi, offset_lo;
u32 nodes, num_enabled;
- reg = xe_mmio_read32(gt, MIRROR_FUSE3);
+ reg = xe_mmio_read32(&gt->mmio, MIRROR_FUSE3);
nodes = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, reg);
num_enabled = hweight32(nodes); /* Number of enabled l3 nodes */
@@ -185,7 +185,8 @@ static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size)
offset = round_up(offset, SZ_128K); /* SW must round up to nearest 128K */
/* We don't expect any holes */
- xe_assert_msg(xe, offset == (xe_mmio_read64_2x32(gt, GSMBASE) - ccs_size),
+ xe_assert_msg(xe, offset == (xe_mmio_read64_2x32(&gt_to_tile(gt)->mmio, GSMBASE) -
+ ccs_size),
"Hole between CCS and GSM.\n");
} else {
reg = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
@@ -219,8 +220,8 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size,
{
struct xe_device *xe = tile_to_xe(tile);
struct xe_gt *gt = tile->primary_gt;
+ unsigned int fw_ref;
u64 offset;
- int err;
u32 reg;
if (IS_SRIOV_VF(xe)) {
@@ -239,9 +240,9 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size,
return 0;
}
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- return err;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
/* actual size */
if (unlikely(xe->info.platform == XE_DG1)) {
@@ -257,13 +258,15 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size,
if (xe->info.has_flat_ccs) {
offset = get_flat_ccs_offset(gt, *tile_size);
} else {
- offset = xe_mmio_read64_2x32(gt, GSMBASE);
+ offset = xe_mmio_read64_2x32(&tile->mmio, GSMBASE);
}
/* remove the tile offset so we have just the available size */
*vram_size = offset - *tile_offset;
- return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
+ return 0;
}
static void vram_fini(void *arg)
diff --git a/drivers/gpu/drm/xe/xe_vsec.c b/drivers/gpu/drm/xe/xe_vsec.c
new file mode 100644
index 000000000000..b378848d3b7b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vsec.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright © 2024 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/errno.h>
+#include <linux/intel_vsec.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_drv.h"
+#include "xe_mmio.h"
+#include "xe_platform_types.h"
+#include "xe_pm.h"
+#include "xe_vsec.h"
+
+#include "regs/xe_pmt.h"
+
+/* PMT GUID value for BMG devices. NOTE: this is NOT a PCI id */
+#define BMG_DEVICE_ID 0xE2F8
+
+static struct intel_vsec_header bmg_telemetry = {
+ .length = 0x10,
+ .id = VSEC_ID_TELEMETRY,
+ .num_entries = 2,
+ .entry_size = 4,
+ .tbir = 0,
+ .offset = BMG_DISCOVERY_OFFSET,
+};
+
+static struct intel_vsec_header bmg_punit_crashlog = {
+ .length = 0x10,
+ .id = VSEC_ID_CRASHLOG,
+ .num_entries = 1,
+ .entry_size = 4,
+ .tbir = 0,
+ .offset = BMG_DISCOVERY_OFFSET + 0x60,
+};
+
+static struct intel_vsec_header bmg_oobmsm_crashlog = {
+ .length = 0x10,
+ .id = VSEC_ID_CRASHLOG,
+ .num_entries = 1,
+ .entry_size = 4,
+ .tbir = 0,
+ .offset = BMG_DISCOVERY_OFFSET + 0x78,
+};
+
+static struct intel_vsec_header *bmg_capabilities[] = {
+ &bmg_telemetry,
+ &bmg_punit_crashlog,
+ &bmg_oobmsm_crashlog,
+ NULL
+};
+
+enum xe_vsec {
+ XE_VSEC_UNKNOWN = 0,
+ XE_VSEC_BMG,
+};
+
+static struct intel_vsec_platform_info xe_vsec_info[] = {
+ [XE_VSEC_BMG] = {
+ .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_CRASHLOG,
+ .headers = bmg_capabilities,
+ },
+ { }
+};
+
+/*
+ * The GUID will have the following bits to decode:
+ * [0:3] - {Telemetry space iteration number (0,1,..)}
+ * [4:7] - Segment (SEGMENT_INDEPENDENT-0, Client-1, Server-2)
+ * [8:11] - SOC_SKU
+ * [12:27] – Device ID – changes for each down bin SKU’s
+ * [28:29] - Capability Type (Crashlog-0, Telemetry Aggregator-1, Watcher-2)
+ * [30:31] - Record-ID (0-PUNIT, 1-OOBMSM_0, 2-OOBMSM_1)
+ */
+#define GUID_TELEM_ITERATION GENMASK(3, 0)
+#define GUID_SEGMENT GENMASK(7, 4)
+#define GUID_SOC_SKU GENMASK(11, 8)
+#define GUID_DEVICE_ID GENMASK(27, 12)
+#define GUID_CAP_TYPE GENMASK(29, 28)
+#define GUID_RECORD_ID GENMASK(31, 30)
+
+#define PUNIT_TELEMETRY_OFFSET 0x0200
+#define PUNIT_WATCHER_OFFSET 0x14A0
+#define OOBMSM_0_WATCHER_OFFSET 0x18D8
+#define OOBMSM_1_TELEMETRY_OFFSET 0x1000
+
+enum record_id {
+ PUNIT,
+ OOBMSM_0,
+ OOBMSM_1,
+};
+
+enum capability {
+ CRASHLOG,
+ TELEMETRY,
+ WATCHER,
+};
+
+static int xe_guid_decode(u32 guid, int *index, u32 *offset)
+{
+ u32 record_id = FIELD_GET(GUID_RECORD_ID, guid);
+ u32 cap_type = FIELD_GET(GUID_CAP_TYPE, guid);
+ u32 device_id = FIELD_GET(GUID_DEVICE_ID, guid);
+
+ if (device_id != BMG_DEVICE_ID)
+ return -ENODEV;
+
+ if (cap_type > WATCHER)
+ return -EINVAL;
+
+ *offset = 0;
+
+ if (cap_type == CRASHLOG) {
+ *index = record_id == PUNIT ? 2 : 4;
+ return 0;
+ }
+
+ switch (record_id) {
+ case PUNIT:
+ *index = 0;
+ if (cap_type == TELEMETRY)
+ *offset = PUNIT_TELEMETRY_OFFSET;
+ else
+ *offset = PUNIT_WATCHER_OFFSET;
+ break;
+
+ case OOBMSM_0:
+ *index = 1;
+ if (cap_type == WATCHER)
+ *offset = OOBMSM_0_WATCHER_OFFSET;
+ break;
+
+ case OOBMSM_1:
+ *index = 1;
+ if (cap_type == TELEMETRY)
+ *offset = OOBMSM_1_TELEMETRY_OFFSET;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset,
+ u32 count)
+{
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ void __iomem *telem_addr = xe->mmio.regs + BMG_TELEMETRY_OFFSET;
+ u32 mem_region;
+ u32 offset;
+ int ret;
+
+ ret = xe_guid_decode(guid, &mem_region, &offset);
+ if (ret)
+ return ret;
+
+ telem_addr += offset + user_offset;
+
+ guard(mutex)(&xe->pmt.lock);
+
+ /* indicate that we are not at an appropriate power level */
+ if (!xe_pm_runtime_get_if_active(xe))
+ return -ENODATA;
+
+ /* set SoC re-mapper index register based on GUID memory region */
+ xe_mmio_rmw32(xe_root_tile_mmio(xe), SG_REMAP_INDEX1, SG_REMAP_BITS,
+ REG_FIELD_PREP(SG_REMAP_BITS, mem_region));
+
+ memcpy_fromio(data, telem_addr, count);
+ xe_pm_runtime_put(xe);
+
+ return count;
+}
+
+static struct pmt_callbacks xe_pmt_cb = {
+ .read_telem = xe_pmt_telem_read,
+};
+
+static const int vsec_platforms[] = {
+ [XE_BATTLEMAGE] = XE_VSEC_BMG,
+};
+
+static enum xe_vsec get_platform_info(struct xe_device *xe)
+{
+ if (xe->info.platform > XE_BATTLEMAGE)
+ return XE_VSEC_UNKNOWN;
+
+ return vsec_platforms[xe->info.platform];
+}
+
+/**
+ * xe_vsec_init - Initialize resources and add intel_vsec auxiliary
+ * interface
+ * @xe: valid xe instance
+ */
+void xe_vsec_init(struct xe_device *xe)
+{
+ struct intel_vsec_platform_info *info;
+ struct device *dev = xe->drm.dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ enum xe_vsec platform;
+
+ platform = get_platform_info(xe);
+ if (platform == XE_VSEC_UNKNOWN)
+ return;
+
+ info = &xe_vsec_info[platform];
+ if (!info->headers)
+ return;
+
+ switch (platform) {
+ case XE_VSEC_BMG:
+ info->priv_data = &xe_pmt_cb;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Register a VSEC. Cleanup is handled using device managed
+ * resources.
+ */
+ intel_vsec_register(pdev, info);
+}
+MODULE_IMPORT_NS("INTEL_VSEC");
diff --git a/drivers/gpu/drm/xe/xe_vsec.h b/drivers/gpu/drm/xe/xe_vsec.h
new file mode 100644
index 000000000000..5777c53faec2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vsec.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef _XE_VSEC_H_
+#define _XE_VSEC_H_
+
+struct xe_device;
+
+void xe_vsec_init(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index d424992514a4..570fe0376402 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -8,6 +8,7 @@
#include <drm/drm_managed.h>
#include <kunit/visibility.h>
#include <linux/compiler_types.h>
+#include <linux/fault-inject.h>
#include <generated/xe_wa_oob.h>
@@ -251,6 +252,34 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
+ /* Xe3_LPG */
+
+ { XE_RTP_NAME("14021871409"),
+ XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0)),
+ XE_RTP_ACTIONS(SET(UNSLCGCTL9454, LSCFE_CLKGATE_DIS))
+ },
+
+ /* Xe3_LPM */
+
+ { XE_RTP_NAME("16021867713"),
+ XE_RTP_RULES(MEDIA_VERSION(3000),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F1C(0), MFXPIPE_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+ { XE_RTP_NAME("16021865536"),
+ XE_RTP_RULES(MEDIA_VERSION(3000),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), IECPUNIT_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+ { XE_RTP_NAME("14021486841"),
+ XE_RTP_RULES(MEDIA_VERSION(3000), MEDIA_STEP(A0, B0),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), RAMDFTUNIT_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+
{}
};
@@ -567,6 +596,24 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
+ /* Xe3_LPG */
+
+ { XE_RTP_NAME("14021402888"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
+ },
+ { XE_RTP_NAME("18034896535"),
+ XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN4, DISABLE_TDL_PUSH))
+ },
+ { XE_RTP_NAME("16024792527"),
+ XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(FIELD_SET(SAMPLER_MODE, SMP_WAIT_FETCH_MERGING_COUNTER,
+ SMP_FORCE_128B_OVERFETCH))
+ },
+
{}
};
@@ -710,6 +757,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
DIS_PARTIAL_AUTOSTRIP |
DIS_AUTOSTRIP))
},
+ { XE_RTP_NAME("15016589081"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
+ },
/* Xe2_HPG */
{ XE_RTP_NAME("15010599737"),
@@ -738,6 +789,18 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
},
+ /* Xe3_LPG */
+ { XE_RTP_NAME("14021490052"),
+ XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(FF_MODE,
+ DIS_MESH_PARTIAL_AUTOSTRIP |
+ DIS_MESH_AUTOSTRIP),
+ SET(VFLSKPD,
+ DIS_PARTIAL_AUTOSTRIP |
+ DIS_AUTOSTRIP))
+ },
+
{}
};
@@ -850,6 +913,7 @@ int xe_wa_init(struct xe_gt *gt)
return 0;
}
+ALLOW_ERROR_INJECTION(xe_wa_init, ERRNO); /* See xe_pci_probe() */
void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p)
{
@@ -887,11 +951,11 @@ void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p)
*/
void xe_wa_apply_tile_workarounds(struct xe_tile *tile)
{
- struct xe_gt *mmio = tile->primary_gt;
+ struct xe_mmio *mmio = &tile->mmio;
if (IS_SRIOV_VF(tile->xe))
return;
- if (XE_WA(mmio, 22010954014))
+ if (XE_WA(tile->primary_gt, 22010954014))
xe_mmio_rmw32(mmio, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS);
}
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 920ca5060146..40438c3d9b72 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -1,3 +1,4 @@
+1607983814 GRAPHICS_VERSION_RANGE(1200, 1210)
22012773006 GRAPHICS_VERSION_RANGE(1200, 1250)
14014475959 GRAPHICS_VERSION_RANGE(1270, 1271), GRAPHICS_STEP(A0, B0)
PLATFORM(DG2)
@@ -33,7 +34,11 @@
GRAPHICS_VERSION(2004)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001)
+ MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf)
22019338487_display PLATFORM(LUNARLAKE)
16023588340 GRAPHICS_VERSION(2001)
14019789679 GRAPHICS_VERSION(1255)
GRAPHICS_VERSION_RANGE(1270, 2004)
+no_media_l3 MEDIA_VERSION(3000)
+14022866841 GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0)
+ MEDIA_VERSION(3000), MEDIA_STEP(A0, B0)
diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c
index d46fa8374980..5b4264ea38bd 100644
--- a/drivers/gpu/drm/xe/xe_wait_user_fence.c
+++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c
@@ -155,6 +155,13 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
}
if (!timeout) {
+ LNL_FLUSH_WORKQUEUE(xe->ordered_wq);
+ err = do_compare(addr, args->value, args->mask,
+ args->op);
+ if (err <= 0) {
+ drm_dbg(&xe->drm, "LNL_FLUSH_WORKQUEUE resolved ufence timeout\n");
+ break;
+ }
err = -ETIME;
break;
}
@@ -169,9 +176,6 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
args->timeout = 0;
}
- if (!timeout && !(err < 0))
- err = -ETIME;
-
if (q)
xe_exec_queue_put(q);
diff --git a/drivers/gpu/drm/xe/xe_wopcm.c b/drivers/gpu/drm/xe/xe_wopcm.c
index d3a99157e523..ada0d0aa6b74 100644
--- a/drivers/gpu/drm/xe/xe_wopcm.c
+++ b/drivers/gpu/drm/xe/xe_wopcm.c
@@ -5,6 +5,8 @@
#include "xe_wopcm.h"
+#include <linux/fault-inject.h>
+
#include "regs/xe_guc_regs.h"
#include "xe_device.h"
#include "xe_force_wake.h"
@@ -123,8 +125,8 @@ static bool __check_layout(struct xe_device *xe, u32 wopcm_size,
static bool __wopcm_regs_locked(struct xe_gt *gt,
u32 *guc_wopcm_base, u32 *guc_wopcm_size)
{
- u32 reg_base = xe_mmio_read32(gt, DMA_GUC_WOPCM_OFFSET);
- u32 reg_size = xe_mmio_read32(gt, GUC_WOPCM_SIZE);
+ u32 reg_base = xe_mmio_read32(&gt->mmio, DMA_GUC_WOPCM_OFFSET);
+ u32 reg_size = xe_mmio_read32(&gt->mmio, GUC_WOPCM_SIZE);
if (!(reg_size & GUC_WOPCM_SIZE_LOCKED) ||
!(reg_base & GUC_WOPCM_OFFSET_VALID))
@@ -150,13 +152,13 @@ static int __wopcm_init_regs(struct xe_device *xe, struct xe_gt *gt,
XE_WARN_ON(size & ~GUC_WOPCM_SIZE_MASK);
mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED;
- err = xe_mmio_write32_and_verify(gt, GUC_WOPCM_SIZE, size, mask,
+ err = xe_mmio_write32_and_verify(&gt->mmio, GUC_WOPCM_SIZE, size, mask,
size | GUC_WOPCM_SIZE_LOCKED);
if (err)
goto err_out;
mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
- err = xe_mmio_write32_and_verify(gt, DMA_GUC_WOPCM_OFFSET,
+ err = xe_mmio_write32_and_verify(&gt->mmio, DMA_GUC_WOPCM_OFFSET,
base | huc_agent, mask,
base | huc_agent |
GUC_WOPCM_OFFSET_VALID);
@@ -169,10 +171,10 @@ err_out:
drm_notice(&xe->drm, "Failed to init uC WOPCM registers!\n");
drm_notice(&xe->drm, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
DMA_GUC_WOPCM_OFFSET.addr,
- xe_mmio_read32(gt, DMA_GUC_WOPCM_OFFSET));
+ xe_mmio_read32(&gt->mmio, DMA_GUC_WOPCM_OFFSET));
drm_notice(&xe->drm, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
GUC_WOPCM_SIZE.addr,
- xe_mmio_read32(gt, GUC_WOPCM_SIZE));
+ xe_mmio_read32(&gt->mmio, GUC_WOPCM_SIZE));
return err;
}
@@ -268,3 +270,4 @@ check:
return ret;
}
+ALLOW_ERROR_INJECTION(xe_wopcm_init, ERRNO); /* See xe_pci_probe() */
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index aab79c5e34c2..1bda7ef606cc 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -478,7 +478,6 @@ static const struct drm_driver xen_drm_driver = {
.fops = &xen_drm_dev_fops,
.name = "xendrm-du",
.desc = "Xen PV DRM Display Unit",
- .date = "20180221",
.major = 1,
.minor = 0,
@@ -525,11 +524,6 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
if (ret)
goto fail_register;
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
- xen_drm_driver.name, xen_drm_driver.major,
- xen_drm_driver.minor, xen_drm_driver.patchlevel,
- xen_drm_driver.date, drm_dev->primary->index);
-
return 0;
fail_register:
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index 626e5ac4c33d..dbecca9bdd54 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -6,6 +6,7 @@ config DRM_ZYNQMP_DPSUB
depends on PHY_XILINX_ZYNQMP
depends on XILINX_ZYNQMP_DPDMA
select DMA_ENGINE
+ select DRM_CLIENT_SELECTION
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
@@ -16,3 +17,12 @@ config DRM_ZYNQMP_DPSUB
This is a DRM/KMS driver for ZynqMP DisplayPort controller. Choose
this option if you have a Xilinx ZynqMP SoC with DisplayPort
subsystem.
+
+config DRM_ZYNQMP_DPSUB_AUDIO
+ bool "ZynqMP DisplayPort Audio Support"
+ depends on DRM_ZYNQMP_DPSUB
+ depends on SND && SND_SOC
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ help
+ Choose this option to enable DisplayPort audio support in the ZynqMP
+ DisplayPort driver.
diff --git a/drivers/gpu/drm/xlnx/Makefile b/drivers/gpu/drm/xlnx/Makefile
index ea1422a39502..ab6e2ffd7e8d 100644
--- a/drivers/gpu/drm/xlnx/Makefile
+++ b/drivers/gpu/drm/xlnx/Makefile
@@ -1,2 +1,3 @@
zynqmp-dpsub-y := zynqmp_disp.o zynqmp_dpsub.o zynqmp_dp.o zynqmp_kms.o
+zynqmp-dpsub-$(CONFIG_DRM_ZYNQMP_DPSUB_AUDIO) += zynqmp_dp_audio.o
obj-$(CONFIG_DRM_ZYNQMP_DPSUB) += zynqmp-dpsub.o
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index 9368acf56eaf..80d1e499a18d 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -143,7 +143,6 @@ struct zynqmp_disp_layer {
* @dpsub: Display subsystem
* @blend: Register I/O base address for the blender
* @avbuf: Register I/O base address for the audio/video buffer manager
- * @audio: Registers I/O base address for the audio mixer
* @layers: Layers (planes)
*/
struct zynqmp_disp {
@@ -152,7 +151,6 @@ struct zynqmp_disp {
void __iomem *blend;
void __iomem *avbuf;
- void __iomem *audio;
struct zynqmp_disp_layer layers[ZYNQMP_DPSUB_NUM_LAYERS];
};
@@ -866,42 +864,6 @@ static void zynqmp_disp_blend_layer_disable(struct zynqmp_disp *disp,
}
/* -----------------------------------------------------------------------------
- * Audio Mixer
- */
-
-static void zynqmp_disp_audio_write(struct zynqmp_disp *disp, int reg, u32 val)
-{
- writel(val, disp->audio + reg);
-}
-
-/**
- * zynqmp_disp_audio_enable - Enable the audio mixer
- * @disp: Display controller
- *
- * Enable the audio mixer by de-asserting the soft reset. The audio state is set to
- * default values by the reset, set the default mixer volume explicitly.
- */
-static void zynqmp_disp_audio_enable(struct zynqmp_disp *disp)
-{
- /* Clear the audio soft reset register as it's an non-reset flop. */
- zynqmp_disp_audio_write(disp, ZYNQMP_DISP_AUD_SOFT_RESET, 0);
- zynqmp_disp_audio_write(disp, ZYNQMP_DISP_AUD_MIXER_VOLUME,
- ZYNQMP_DISP_AUD_MIXER_VOLUME_NO_SCALE);
-}
-
-/**
- * zynqmp_disp_audio_disable - Disable the audio mixer
- * @disp: Display controller
- *
- * Disable the audio mixer by asserting its soft reset.
- */
-static void zynqmp_disp_audio_disable(struct zynqmp_disp *disp)
-{
- zynqmp_disp_audio_write(disp, ZYNQMP_DISP_AUD_SOFT_RESET,
- ZYNQMP_DISP_AUD_SOFT_RESET_AUD_SRST);
-}
-
-/* -----------------------------------------------------------------------------
* ZynqMP Display Layer & DRM Plane
*/
@@ -1200,6 +1162,9 @@ static void zynqmp_disp_layer_release_dma(struct zynqmp_disp *disp,
{
unsigned int i;
+ if (!layer->info)
+ return;
+
for (i = 0; i < layer->info->num_channels; i++) {
struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
@@ -1338,8 +1303,6 @@ void zynqmp_disp_enable(struct zynqmp_disp *disp)
disp->dpsub->vid_clk_from_ps);
zynqmp_disp_avbuf_enable_channels(disp);
zynqmp_disp_avbuf_enable_audio(disp);
-
- zynqmp_disp_audio_enable(disp);
}
/**
@@ -1348,8 +1311,6 @@ void zynqmp_disp_enable(struct zynqmp_disp *disp)
*/
void zynqmp_disp_disable(struct zynqmp_disp *disp)
{
- zynqmp_disp_audio_disable(disp);
-
zynqmp_disp_avbuf_disable_audio(disp);
zynqmp_disp_avbuf_disable_channels(disp);
zynqmp_disp_avbuf_disable(disp);
@@ -1418,12 +1379,6 @@ int zynqmp_disp_probe(struct zynqmp_dpsub *dpsub)
goto error;
}
- disp->audio = devm_platform_ioremap_resource_byname(pdev, "aud");
- if (IS_ERR(disp->audio)) {
- ret = PTR_ERR(disp->audio);
- goto error;
- }
-
ret = zynqmp_disp_create_layers(disp);
if (ret)
goto error;
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h b/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h
index fa3935384834..9a4ff094e276 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp_regs.h
@@ -177,12 +177,7 @@
#define ZYNQMP_DISP_AUD_MIXER_VOLUME 0x0
#define ZYNQMP_DISP_AUD_MIXER_VOLUME_NO_SCALE 0x20002000
#define ZYNQMP_DISP_AUD_MIXER_META_DATA 0x4
-#define ZYNQMP_DISP_AUD_CH_STATUS0 0x8
-#define ZYNQMP_DISP_AUD_CH_STATUS1 0xc
-#define ZYNQMP_DISP_AUD_CH_STATUS2 0x10
-#define ZYNQMP_DISP_AUD_CH_STATUS3 0x14
-#define ZYNQMP_DISP_AUD_CH_STATUS4 0x18
-#define ZYNQMP_DISP_AUD_CH_STATUS5 0x1c
+#define ZYNQMP_DISP_AUD_CH_STATUS(x) (0x8 + ((x) * 4))
#define ZYNQMP_DISP_AUD_CH_A_DATA0 0x20
#define ZYNQMP_DISP_AUD_CH_A_DATA1 0x24
#define ZYNQMP_DISP_AUD_CH_A_DATA2 0x28
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 129beac4c073..979f6d3239ba 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -18,7 +18,9 @@
#include <drm/drm_modes.h>
#include <drm/drm_of.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
@@ -51,6 +53,7 @@ MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)");
#define ZYNQMP_DP_LANE_COUNT_SET 0x4
#define ZYNQMP_DP_ENHANCED_FRAME_EN 0x8
#define ZYNQMP_DP_TRAINING_PATTERN_SET 0xc
+#define ZYNQMP_DP_LINK_QUAL_PATTERN_SET 0x10
#define ZYNQMP_DP_SCRAMBLING_DISABLE 0x14
#define ZYNQMP_DP_DOWNSPREAD_CTL 0x18
#define ZYNQMP_DP_SOFTWARE_RESET 0x1c
@@ -64,6 +67,9 @@ MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)");
ZYNQMP_DP_SOFTWARE_RESET_STREAM3 | \
ZYNQMP_DP_SOFTWARE_RESET_STREAM4 | \
ZYNQMP_DP_SOFTWARE_RESET_AUX)
+#define ZYNQMP_DP_COMP_PATTERN_80BIT_1 0x20
+#define ZYNQMP_DP_COMP_PATTERN_80BIT_2 0x24
+#define ZYNQMP_DP_COMP_PATTERN_80BIT_3 0x28
/* Core enable registers */
#define ZYNQMP_DP_TRANSMITTER_ENABLE 0x80
@@ -207,6 +213,7 @@ MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)");
#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_2 BIT(2)
#define ZYNQMP_DP_TX_PHY_POWER_DOWN_LANE_3 BIT(3)
#define ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL 0xf
+#define ZYNQMP_DP_TRANSMIT_PRBS7 0x230
#define ZYNQMP_DP_PHY_PRECURSOR_LANE_0 0x23c
#define ZYNQMP_DP_PHY_PRECURSOR_LANE_1 0x240
#define ZYNQMP_DP_PHY_PRECURSOR_LANE_2 0x244
@@ -275,30 +282,108 @@ struct zynqmp_dp_config {
};
/**
+ * enum test_pattern - Test patterns for test testing
+ * @TEST_VIDEO: Use regular video input
+ * @TEST_SYMBOL_ERROR: Symbol error measurement pattern
+ * @TEST_PRBS7: Output of the PRBS7 (x^7 + x^6 + 1) polynomial
+ * @TEST_80BIT_CUSTOM: A custom 80-bit pattern
+ * @TEST_CP2520: HBR2 compliance eye pattern
+ * @TEST_TPS1: Link training symbol pattern TPS1 (/D10.2/)
+ * @TEST_TPS2: Link training symbol pattern TPS2
+ * @TEST_TPS3: Link training symbol pattern TPS3 (for HBR2)
+ */
+enum test_pattern {
+ TEST_VIDEO,
+ TEST_TPS1,
+ TEST_TPS2,
+ TEST_TPS3,
+ TEST_SYMBOL_ERROR,
+ TEST_PRBS7,
+ TEST_80BIT_CUSTOM,
+ TEST_CP2520,
+};
+
+static const char *const test_pattern_str[] = {
+ [TEST_VIDEO] = "video",
+ [TEST_TPS1] = "tps1",
+ [TEST_TPS2] = "tps2",
+ [TEST_TPS3] = "tps3",
+ [TEST_SYMBOL_ERROR] = "symbol-error",
+ [TEST_PRBS7] = "prbs7",
+ [TEST_80BIT_CUSTOM] = "80bit-custom",
+ [TEST_CP2520] = "cp2520",
+};
+
+/**
+ * struct zynqmp_dp_test - Configuration for test mode
+ * @pattern: The test pattern
+ * @enhanced: Use enhanced framing
+ * @downspread: Use SSC
+ * @active: Whether test mode is active
+ * @custom: Custom pattern for %TEST_80BIT_CUSTOM
+ * @train_set: Voltage/preemphasis settings
+ * @bw_code: Bandwidth code for the link
+ * @link_cnt: Number of lanes
+ */
+struct zynqmp_dp_test {
+ enum test_pattern pattern;
+ bool enhanced, downspread, active;
+ u8 custom[10];
+ u8 train_set[ZYNQMP_DP_MAX_LANES];
+ u8 bw_code;
+ u8 link_cnt;
+};
+
+/**
+ * struct zynqmp_dp_train_set_priv - Private data for train_set debugfs files
+ * @dp: DisplayPort IP core structure
+ * @lane: The lane for this file
+ */
+struct zynqmp_dp_train_set_priv {
+ struct zynqmp_dp *dp;
+ int lane;
+};
+
+/**
* struct zynqmp_dp - Xilinx DisplayPort core
* @dev: device structure
* @dpsub: Display subsystem
* @iomem: device I/O memory for register access
* @reset: reset controller
+ * @lock: Mutex protecting this struct and register access (but not AUX)
* @irq: irq
* @bridge: DRM bridge for the DP encoder
* @next_bridge: The downstream bridge
+ * @test: Configuration for test mode
* @config: IP core configuration from DTS
* @aux: aux channel
+ * @aux_done: Completed when we get an AUX reply or timeout
+ * @ignore_aux_errors: If set, AUX errors are suppressed
* @phy: PHY handles for DP lanes
* @num_lanes: number of enabled phy lanes
* @hpd_work: hot plug detection worker
+ * @hpd_irq_work: hot plug detection IRQ worker
+ * @ignore_hpd: If set, HPD events and IRQs are ignored
* @status: connection status
* @enabled: flag to indicate if the device is enabled
* @dpcd: DP configuration data from currently connected sink device
* @link_config: common link configuration between IP core and sink device
* @mode: current mode between IP core and sink device
* @train_set: set of training data
+ * @debugfs_train_set: Debugfs private data for @train_set
+ *
+ * @lock covers the link configuration in this struct and the device's
+ * registers. It does not cover @aux or @ignore_aux_errors. It is not strictly
+ * required for any of the members which are only modified at probe/remove time
+ * (e.g. @dev).
*/
struct zynqmp_dp {
struct drm_dp_aux aux;
struct drm_bridge bridge;
struct work_struct hpd_work;
+ struct work_struct hpd_irq_work;
+ struct completion aux_done;
+ struct mutex lock;
struct drm_bridge *next_bridge;
struct device *dev;
@@ -310,9 +395,13 @@ struct zynqmp_dp {
enum drm_connector_status status;
int irq;
bool enabled;
+ bool ignore_aux_errors;
+ bool ignore_hpd;
+ struct zynqmp_dp_train_set_priv debugfs_train_set[ZYNQMP_DP_MAX_LANES];
struct zynqmp_dp_mode mode;
struct zynqmp_dp_link_config link_config;
+ struct zynqmp_dp_test test;
struct zynqmp_dp_config config;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 train_set[ZYNQMP_DP_MAX_LANES];
@@ -626,6 +715,7 @@ static void zynqmp_dp_adjust_train(struct zynqmp_dp *dp,
/**
* zynqmp_dp_update_vs_emph - Update the training values
* @dp: DisplayPort IP core structure
+ * @train_set: A set of training values
*
* Update the training values based on the request from sink. The mapped values
* are predefined, and values(vs, pe, pc) are from the device manual.
@@ -633,12 +723,12 @@ static void zynqmp_dp_adjust_train(struct zynqmp_dp *dp,
* Return: 0 if vs and emph are updated successfully, or the error code returned
* by drm_dp_dpcd_write().
*/
-static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp)
+static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp, u8 *train_set)
{
unsigned int i;
int ret;
- ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->train_set,
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, train_set,
dp->mode.lane_cnt);
if (ret < 0)
return ret;
@@ -646,7 +736,7 @@ static int zynqmp_dp_update_vs_emph(struct zynqmp_dp *dp)
for (i = 0; i < dp->mode.lane_cnt; i++) {
u32 reg = ZYNQMP_DP_SUB_TX_PHY_PRECURSOR_LANE_0 + i * 4;
union phy_configure_opts opts = { 0 };
- u8 train = dp->train_set[i];
+ u8 train = train_set[i];
opts.dp.voltage[0] = (train & DP_TRAIN_VOLTAGE_SWING_MASK)
>> DP_TRAIN_VOLTAGE_SWING_SHIFT;
@@ -690,7 +780,7 @@ static int zynqmp_dp_link_train_cr(struct zynqmp_dp *dp)
* So, This loop should exit before 512 iterations
*/
for (max_tries = 0; max_tries < 512; max_tries++) {
- ret = zynqmp_dp_update_vs_emph(dp);
+ ret = zynqmp_dp_update_vs_emph(dp, dp->train_set);
if (ret)
return ret;
@@ -755,7 +845,7 @@ static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp)
return ret;
for (tries = 0; tries < DP_MAX_TRAINING_TRIES; tries++) {
- ret = zynqmp_dp_update_vs_emph(dp);
+ ret = zynqmp_dp_update_vs_emph(dp, dp->train_set);
if (ret)
return ret;
@@ -778,28 +868,29 @@ static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp)
}
/**
- * zynqmp_dp_train - Train the link
+ * zynqmp_dp_setup() - Set up major link parameters
* @dp: DisplayPort IP core structure
+ * @bw_code: The link bandwidth as a multiple of 270 MHz
+ * @lane_cnt: The number of lanes to use
+ * @enhanced: Use enhanced framing
+ * @downspread: Enable spread-spectrum clocking
*
- * Return: 0 if all trains are done successfully, or corresponding error code.
+ * Return: 0 on success, or -errno on failure
*/
-static int zynqmp_dp_train(struct zynqmp_dp *dp)
+static int zynqmp_dp_setup(struct zynqmp_dp *dp, u8 bw_code, u8 lane_cnt,
+ bool enhanced, bool downspread)
{
u32 reg;
- u8 bw_code = dp->mode.bw_code;
- u8 lane_cnt = dp->mode.lane_cnt;
u8 aux_lane_cnt = lane_cnt;
- bool enhanced;
int ret;
zynqmp_dp_write(dp, ZYNQMP_DP_LANE_COUNT_SET, lane_cnt);
- enhanced = drm_dp_enhanced_frame_cap(dp->dpcd);
if (enhanced) {
zynqmp_dp_write(dp, ZYNQMP_DP_ENHANCED_FRAME_EN, 1);
aux_lane_cnt |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
}
- if (dp->dpcd[3] & 0x1) {
+ if (downspread) {
zynqmp_dp_write(dp, ZYNQMP_DP_DOWNSPREAD_CTL, 1);
drm_dp_dpcd_writeb(&dp->aux, DP_DOWNSPREAD_CTRL,
DP_SPREAD_AMP_0_5);
@@ -842,8 +933,24 @@ static int zynqmp_dp_train(struct zynqmp_dp *dp)
}
zynqmp_dp_write(dp, ZYNQMP_DP_PHY_CLOCK_SELECT, reg);
- ret = zynqmp_dp_phy_ready(dp);
- if (ret < 0)
+ return zynqmp_dp_phy_ready(dp);
+}
+
+/**
+ * zynqmp_dp_train - Train the link
+ * @dp: DisplayPort IP core structure
+ *
+ * Return: 0 if all trains are done successfully, or corresponding error code.
+ */
+static int zynqmp_dp_train(struct zynqmp_dp *dp)
+{
+ int ret;
+
+ ret = zynqmp_dp_setup(dp, dp->mode.bw_code, dp->mode.lane_cnt,
+ drm_dp_enhanced_frame_cap(dp->dpcd),
+ dp->dpcd[DP_MAX_DOWNSPREAD] &
+ DP_MAX_DOWNSPREAD_0_5);
+ if (ret)
return ret;
zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, 1);
@@ -934,12 +1041,15 @@ static int zynqmp_dp_aux_cmd_submit(struct zynqmp_dp *dp, u32 cmd, u16 addr,
u8 *buf, u8 bytes, u8 *reply)
{
bool is_read = (cmd & AUX_READ_BIT) ? true : false;
+ unsigned long time_left;
u32 reg, i;
reg = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE);
if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REQUEST)
return -EBUSY;
+ reinit_completion(&dp->aux_done);
+
zynqmp_dp_write(dp, ZYNQMP_DP_AUX_ADDRESS, addr);
if (!is_read)
for (i = 0; i < bytes; i++)
@@ -954,17 +1064,14 @@ static int zynqmp_dp_aux_cmd_submit(struct zynqmp_dp *dp, u32 cmd, u16 addr,
zynqmp_dp_write(dp, ZYNQMP_DP_AUX_COMMAND, reg);
/* Wait for reply to be delivered upto 2ms */
- for (i = 0; ; i++) {
- reg = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE);
- if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY)
- break;
+ time_left = wait_for_completion_timeout(&dp->aux_done,
+ msecs_to_jiffies(2));
+ if (!time_left)
+ return -ETIMEDOUT;
- if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY_TIMEOUT ||
- i == 2)
- return -ETIMEDOUT;
-
- usleep_range(1000, 1100);
- }
+ reg = zynqmp_dp_read(dp, ZYNQMP_DP_INTERRUPT_SIGNAL_STATE);
+ if (reg & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY_TIMEOUT)
+ return -ETIMEDOUT;
reg = zynqmp_dp_read(dp, ZYNQMP_DP_AUX_REPLY_CODE);
if (reply)
@@ -1006,6 +1113,8 @@ zynqmp_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
if (dp->status == connector_status_disconnected) {
dev_dbg(dp->dev, "no connected aux device\n");
+ if (dp->ignore_aux_errors)
+ goto fake_response;
return -ENODEV;
}
@@ -1014,7 +1123,13 @@ zynqmp_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
dev_dbg(dp->dev, "failed to do aux transfer (%d)\n", ret);
- return ret;
+ if (!dp->ignore_aux_errors)
+ return ret;
+
+fake_response:
+ msg->reply = DP_AUX_NATIVE_REPLY_ACK;
+ memset(msg->buffer, 0, msg->size);
+ return msg->size;
}
/**
@@ -1048,6 +1163,9 @@ static int zynqmp_dp_aux_init(struct zynqmp_dp *dp)
(w << ZYNQMP_DP_AUX_CLK_DIVIDER_AUX_FILTER_SHIFT) |
(rate / (1000 * 1000)));
+ zynqmp_dp_write(dp, ZYNQMP_DP_INT_EN, ZYNQMP_DP_INT_REPLY_RECEIVED |
+ ZYNQMP_DP_INT_REPLY_TIMEOUT);
+
dp->aux.name = "ZynqMP DP AUX";
dp->aux.dev = dp->dev;
dp->aux.drm_dev = dp->bridge.dev;
@@ -1065,6 +1183,9 @@ static int zynqmp_dp_aux_init(struct zynqmp_dp *dp)
static void zynqmp_dp_aux_cleanup(struct zynqmp_dp *dp)
{
drm_dp_aux_unregister(&dp->aux);
+
+ zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, ZYNQMP_DP_INT_REPLY_RECEIVED |
+ ZYNQMP_DP_INT_REPLY_TIMEOUT);
}
/* -----------------------------------------------------------------------------
@@ -1221,7 +1342,6 @@ static void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
{
u8 lane_cnt = dp->mode.lane_cnt;
u32 reg, wpl;
- unsigned int rate;
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_HTOTAL, mode->htotal);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_VTOTAL, mode->vtotal);
@@ -1246,18 +1366,8 @@ static void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
reg = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_N_VID, reg);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_M_VID, mode->clock);
- rate = zynqmp_dpsub_get_audio_clk_rate(dp->dpsub);
- if (rate) {
- dev_dbg(dp->dev, "Audio rate: %d\n", rate / 512);
- zynqmp_dp_write(dp, ZYNQMP_DP_TX_N_AUD, reg);
- zynqmp_dp_write(dp, ZYNQMP_DP_TX_M_AUD, rate / 1000);
- }
}
- /* Only 2 channel audio is supported now */
- if (zynqmp_dpsub_audio_enabled(dp->dpsub))
- zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CHANNELS, 1);
-
zynqmp_dp_write(dp, ZYNQMP_DP_USER_PIX_WIDTH, 1);
/* Translate to the native 16 bit datapath based on IP core spec */
@@ -1267,6 +1377,44 @@ static void zynqmp_dp_encoder_mode_set_stream(struct zynqmp_dp *dp,
}
/* -----------------------------------------------------------------------------
+ * Audio
+ */
+
+void zynqmp_dp_audio_set_channels(struct zynqmp_dp *dp,
+ unsigned int num_channels)
+{
+ zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CHANNELS, num_channels - 1);
+}
+
+void zynqmp_dp_audio_enable(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 1);
+}
+
+void zynqmp_dp_audio_disable(struct zynqmp_dp *dp)
+{
+ zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 0);
+}
+
+void zynqmp_dp_audio_write_n_m(struct zynqmp_dp *dp)
+{
+ unsigned int rate;
+ u32 link_rate;
+
+ if (!(dp->config.misc0 & ZYNQMP_DP_MAIN_STREAM_MISC0_SYNC_LOCK))
+ return;
+
+ link_rate = drm_dp_bw_code_to_link_rate(dp->mode.bw_code);
+
+ rate = clk_get_rate(dp->dpsub->aud_clk);
+
+ dev_dbg(dp->dev, "Audio rate: %d\n", rate / 512);
+
+ zynqmp_dp_write(dp, ZYNQMP_DP_TX_N_AUD, link_rate);
+ zynqmp_dp_write(dp, ZYNQMP_DP_TX_M_AUD, rate / 1000);
+}
+
+/* -----------------------------------------------------------------------------
* DISP Configuration
*/
@@ -1386,8 +1534,10 @@ zynqmp_dp_bridge_mode_valid(struct drm_bridge *bridge,
}
/* Check with link rate and lane count */
+ mutex_lock(&dp->lock);
rate = zynqmp_dp_max_rate(dp->link_config.max_rate,
dp->link_config.max_lanes, dp->config.bpp);
+ mutex_unlock(&dp->lock);
if (mode->clock > rate) {
dev_dbg(dp->dev, "filtered mode %s for high pixel rate\n",
mode->name);
@@ -1414,6 +1564,7 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge,
pm_runtime_get_sync(dp->dev);
+ guard(mutex)(&dp->lock);
zynqmp_dp_disp_enable(dp, old_bridge_state);
/*
@@ -1453,8 +1604,7 @@ static void zynqmp_dp_bridge_atomic_enable(struct drm_bridge *bridge,
/* Enable the encoder */
dp->enabled = true;
zynqmp_dp_update_misc(dp);
- if (zynqmp_dpsub_audio_enabled(dp->dpsub))
- zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 1);
+
zynqmp_dp_write(dp, ZYNQMP_DP_TX_PHY_POWER_DOWN, 0);
if (dp->status == connector_status_connected) {
for (i = 0; i < 3; i++) {
@@ -1481,16 +1631,16 @@ static void zynqmp_dp_bridge_atomic_disable(struct drm_bridge *bridge,
{
struct zynqmp_dp *dp = bridge_to_dp(bridge);
+ mutex_lock(&dp->lock);
dp->enabled = false;
cancel_work(&dp->hpd_work);
zynqmp_dp_write(dp, ZYNQMP_DP_MAIN_STREAM_ENABLE, 0);
drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
zynqmp_dp_write(dp, ZYNQMP_DP_TX_PHY_POWER_DOWN,
ZYNQMP_DP_TX_PHY_POWER_DOWN_ALL);
- if (zynqmp_dpsub_audio_enabled(dp->dpsub))
- zynqmp_dp_write(dp, ZYNQMP_DP_TX_AUDIO_CONTROL, 0);
zynqmp_dp_disp_disable(dp, old_bridge_state);
+ mutex_unlock(&dp->lock);
pm_runtime_put_sync(dp->dev);
}
@@ -1526,13 +1676,14 @@ static int zynqmp_dp_bridge_atomic_check(struct drm_bridge *bridge,
return 0;
}
-static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge)
+static enum drm_connector_status __zynqmp_dp_bridge_detect(struct zynqmp_dp *dp)
{
- struct zynqmp_dp *dp = bridge_to_dp(bridge);
struct zynqmp_dp_link_config *link_config = &dp->link_config;
u32 state, i;
int ret;
+ lockdep_assert_held(&dp->lock);
+
/*
* This is from heuristic. It takes some delay (ex, 100 ~ 500 msec) to
* get the HPD signal with some monitors.
@@ -1568,6 +1719,18 @@ disconnected:
return connector_status_disconnected;
}
+static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge)
+{
+ struct zynqmp_dp *dp = bridge_to_dp(bridge);
+ enum drm_connector_status ret;
+
+ mutex_lock(&dp->lock);
+ ret = __zynqmp_dp_bridge_detect(dp);
+ mutex_unlock(&dp->lock);
+
+ return ret;
+}
+
static const struct drm_edid *zynqmp_dp_bridge_edid_read(struct drm_bridge *bridge,
struct drm_connector *connector)
{
@@ -1605,6 +1768,582 @@ zynqmp_dp_bridge_get_input_bus_fmts(struct drm_bridge *bridge,
return zynqmp_dp_bridge_default_bus_fmts(num_input_fmts);
}
+/* -----------------------------------------------------------------------------
+ * debugfs
+ */
+
+/**
+ * zynqmp_dp_set_test_pattern() - Configure the link for a test pattern
+ * @dp: DisplayPort IP core structure
+ * @pattern: The test pattern to configure
+ * @custom: The custom pattern to use if @pattern is %TEST_80BIT_CUSTOM
+ *
+ * Return: 0 on success, or negative errno on (DPCD) failure
+ */
+static int zynqmp_dp_set_test_pattern(struct zynqmp_dp *dp,
+ enum test_pattern pattern,
+ u8 *const custom)
+{
+ bool scramble = false;
+ u32 train_pattern = 0;
+ u32 link_pattern = 0;
+ u8 dpcd_train = 0;
+ u8 dpcd_link = 0;
+ int ret;
+
+ switch (pattern) {
+ case TEST_TPS1:
+ train_pattern = 1;
+ break;
+ case TEST_TPS2:
+ train_pattern = 2;
+ break;
+ case TEST_TPS3:
+ train_pattern = 3;
+ break;
+ case TEST_SYMBOL_ERROR:
+ scramble = true;
+ link_pattern = DP_PHY_TEST_PATTERN_ERROR_COUNT;
+ break;
+ case TEST_PRBS7:
+ /* We use a dedicated register to enable PRBS7 */
+ dpcd_link = DP_LINK_QUAL_PATTERN_ERROR_RATE;
+ break;
+ case TEST_80BIT_CUSTOM: {
+ const u8 *p = custom;
+
+ link_pattern = DP_LINK_QUAL_PATTERN_80BIT_CUSTOM;
+
+ zynqmp_dp_write(dp, ZYNQMP_DP_COMP_PATTERN_80BIT_1,
+ (p[3] << 24) | (p[2] << 16) | (p[1] << 8) | p[0]);
+ zynqmp_dp_write(dp, ZYNQMP_DP_COMP_PATTERN_80BIT_2,
+ (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]);
+ zynqmp_dp_write(dp, ZYNQMP_DP_COMP_PATTERN_80BIT_3,
+ (p[9] << 8) | p[8]);
+ break;
+ }
+ case TEST_CP2520:
+ link_pattern = DP_LINK_QUAL_PATTERN_CP2520_PAT_1;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ fallthrough;
+ case TEST_VIDEO:
+ scramble = true;
+ }
+
+ zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, !scramble);
+ zynqmp_dp_write(dp, ZYNQMP_DP_TRAINING_PATTERN_SET, train_pattern);
+ zynqmp_dp_write(dp, ZYNQMP_DP_LINK_QUAL_PATTERN_SET, link_pattern);
+ zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMIT_PRBS7, pattern == TEST_PRBS7);
+
+ dpcd_link = dpcd_link ?: link_pattern;
+ dpcd_train = train_pattern;
+ if (!scramble)
+ dpcd_train |= DP_LINK_SCRAMBLING_DISABLE;
+
+ if (dp->dpcd[DP_DPCD_REV] < 0x12) {
+ if (pattern == TEST_CP2520)
+ dev_warn(dp->dev,
+ "can't set sink link quality pattern to CP2520 for DPCD < r1.2; error counters will be invalid\n");
+ else
+ dpcd_train |= FIELD_PREP(DP_LINK_QUAL_PATTERN_11_MASK,
+ dpcd_link);
+ } else {
+ u8 dpcd_link_lane[ZYNQMP_DP_MAX_LANES];
+
+ memset(dpcd_link_lane, dpcd_link, ZYNQMP_DP_MAX_LANES);
+ ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_QUAL_LANE0_SET,
+ dpcd_link_lane, ZYNQMP_DP_MAX_LANES);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET, dpcd_train);
+ return ret < 0 ? ret : 0;
+}
+
+static int zynqmp_dp_test_setup(struct zynqmp_dp *dp)
+{
+ return zynqmp_dp_setup(dp, dp->test.bw_code, dp->test.link_cnt,
+ dp->test.enhanced, dp->test.downspread);
+}
+
+static ssize_t zynqmp_dp_pattern_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dentry *dentry = file->f_path.dentry;
+ struct zynqmp_dp *dp = file->private_data;
+ char buf[16];
+ ssize_t ret;
+
+ ret = debugfs_file_get(dentry);
+ if (unlikely(ret))
+ return ret;
+
+ mutex_lock(&dp->lock);
+ ret = snprintf(buf, sizeof(buf), "%s\n",
+ test_pattern_str[dp->test.pattern]);
+ mutex_unlock(&dp->lock);
+
+ debugfs_file_put(dentry);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+}
+
+static ssize_t zynqmp_dp_pattern_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dentry *dentry = file->f_path.dentry;
+ struct zynqmp_dp *dp = file->private_data;
+ char buf[16];
+ ssize_t ret;
+ int pattern;
+
+ ret = debugfs_file_get(dentry);
+ if (unlikely(ret))
+ return ret;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf,
+ count);
+ if (ret < 0)
+ goto out;
+ buf[ret] = '\0';
+
+ pattern = sysfs_match_string(test_pattern_str, buf);
+ if (pattern < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&dp->lock);
+ dp->test.pattern = pattern;
+ if (dp->test.active)
+ ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern,
+ dp->test.custom) ?: ret;
+ mutex_unlock(&dp->lock);
+
+out:
+ debugfs_file_put(dentry);
+ return ret;
+}
+
+static const struct file_operations fops_zynqmp_dp_pattern = {
+ .read = zynqmp_dp_pattern_read,
+ .write = zynqmp_dp_pattern_write,
+ .open = simple_open,
+ .llseek = noop_llseek,
+};
+
+static int zynqmp_dp_enhanced_get(void *data, u64 *val)
+{
+ struct zynqmp_dp *dp = data;
+
+ mutex_lock(&dp->lock);
+ *val = dp->test.enhanced;
+ mutex_unlock(&dp->lock);
+ return 0;
+}
+
+static int zynqmp_dp_enhanced_set(void *data, u64 val)
+{
+ struct zynqmp_dp *dp = data;
+ int ret = 0;
+
+ mutex_lock(&dp->lock);
+ dp->test.enhanced = val;
+ if (dp->test.active)
+ ret = zynqmp_dp_test_setup(dp);
+ mutex_unlock(&dp->lock);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_enhanced, zynqmp_dp_enhanced_get,
+ zynqmp_dp_enhanced_set, "%llu\n");
+
+static int zynqmp_dp_downspread_get(void *data, u64 *val)
+{
+ struct zynqmp_dp *dp = data;
+
+ mutex_lock(&dp->lock);
+ *val = dp->test.downspread;
+ mutex_unlock(&dp->lock);
+ return 0;
+}
+
+static int zynqmp_dp_downspread_set(void *data, u64 val)
+{
+ struct zynqmp_dp *dp = data;
+ int ret = 0;
+
+ mutex_lock(&dp->lock);
+ dp->test.downspread = val;
+ if (dp->test.active)
+ ret = zynqmp_dp_test_setup(dp);
+ mutex_unlock(&dp->lock);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_downspread, zynqmp_dp_downspread_get,
+ zynqmp_dp_downspread_set, "%llu\n");
+
+static int zynqmp_dp_active_get(void *data, u64 *val)
+{
+ struct zynqmp_dp *dp = data;
+
+ mutex_lock(&dp->lock);
+ *val = dp->test.active;
+ mutex_unlock(&dp->lock);
+ return 0;
+}
+
+static int zynqmp_dp_active_set(void *data, u64 val)
+{
+ struct zynqmp_dp *dp = data;
+ int ret = 0;
+
+ mutex_lock(&dp->lock);
+ if (val) {
+ if (val < 2) {
+ ret = zynqmp_dp_test_setup(dp);
+ if (ret)
+ goto out;
+ }
+
+ ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern,
+ dp->test.custom);
+ if (ret)
+ goto out;
+
+ ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set);
+ if (ret)
+ goto out;
+
+ dp->test.active = true;
+ } else {
+ int err;
+
+ dp->test.active = false;
+ err = zynqmp_dp_set_test_pattern(dp, TEST_VIDEO, NULL);
+ if (err)
+ dev_warn(dp->dev, "could not clear test pattern: %d\n",
+ err);
+ zynqmp_dp_train_loop(dp);
+ }
+out:
+ mutex_unlock(&dp->lock);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_active, zynqmp_dp_active_get,
+ zynqmp_dp_active_set, "%llu\n");
+
+static ssize_t zynqmp_dp_custom_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dentry *dentry = file->f_path.dentry;
+ struct zynqmp_dp *dp = file->private_data;
+ ssize_t ret;
+
+ ret = debugfs_file_get(dentry);
+ if (unlikely(ret))
+ return ret;
+
+ mutex_lock(&dp->lock);
+ ret = simple_read_from_buffer(user_buf, count, ppos, &dp->test.custom,
+ sizeof(dp->test.custom));
+ mutex_unlock(&dp->lock);
+
+ debugfs_file_put(dentry);
+ return ret;
+}
+
+static ssize_t zynqmp_dp_custom_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dentry *dentry = file->f_path.dentry;
+ struct zynqmp_dp *dp = file->private_data;
+ ssize_t ret;
+ char buf[sizeof(dp->test.custom)];
+
+ ret = debugfs_file_get(dentry);
+ if (unlikely(ret))
+ return ret;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
+ if (ret < 0)
+ goto out;
+
+ mutex_lock(&dp->lock);
+ memcpy(dp->test.custom, buf, ret);
+ if (dp->test.active)
+ ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern,
+ dp->test.custom) ?: ret;
+ mutex_unlock(&dp->lock);
+
+out:
+ debugfs_file_put(dentry);
+ return ret;
+}
+
+static const struct file_operations fops_zynqmp_dp_custom = {
+ .read = zynqmp_dp_custom_read,
+ .write = zynqmp_dp_custom_write,
+ .open = simple_open,
+ .llseek = noop_llseek,
+};
+
+static int zynqmp_dp_swing_get(void *data, u64 *val)
+{
+ struct zynqmp_dp_train_set_priv *priv = data;
+ struct zynqmp_dp *dp = priv->dp;
+
+ mutex_lock(&dp->lock);
+ *val = dp->test.train_set[priv->lane] & DP_TRAIN_VOLTAGE_SWING_MASK;
+ mutex_unlock(&dp->lock);
+ return 0;
+}
+
+static int zynqmp_dp_swing_set(void *data, u64 val)
+{
+ struct zynqmp_dp_train_set_priv *priv = data;
+ struct zynqmp_dp *dp = priv->dp;
+ u8 *train_set = &dp->test.train_set[priv->lane];
+ int ret = 0;
+
+ if (val > 3)
+ return -EINVAL;
+
+ mutex_lock(&dp->lock);
+ *train_set &= ~(DP_TRAIN_MAX_SWING_REACHED |
+ DP_TRAIN_VOLTAGE_SWING_MASK);
+ *train_set |= val;
+ if (val == 3)
+ *train_set |= DP_TRAIN_MAX_SWING_REACHED;
+
+ if (dp->test.active)
+ ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set);
+ mutex_unlock(&dp->lock);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_swing, zynqmp_dp_swing_get,
+ zynqmp_dp_swing_set, "%llu\n");
+
+static int zynqmp_dp_preemphasis_get(void *data, u64 *val)
+{
+ struct zynqmp_dp_train_set_priv *priv = data;
+ struct zynqmp_dp *dp = priv->dp;
+
+ mutex_lock(&dp->lock);
+ *val = FIELD_GET(DP_TRAIN_PRE_EMPHASIS_MASK,
+ dp->test.train_set[priv->lane]);
+ mutex_unlock(&dp->lock);
+ return 0;
+}
+
+static int zynqmp_dp_preemphasis_set(void *data, u64 val)
+{
+ struct zynqmp_dp_train_set_priv *priv = data;
+ struct zynqmp_dp *dp = priv->dp;
+ u8 *train_set = &dp->test.train_set[priv->lane];
+ int ret = 0;
+
+ if (val > 2)
+ return -EINVAL;
+
+ mutex_lock(&dp->lock);
+ *train_set &= ~(DP_TRAIN_MAX_PRE_EMPHASIS_REACHED |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ *train_set |= val;
+ if (val == 2)
+ *train_set |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ if (dp->test.active)
+ ret = zynqmp_dp_update_vs_emph(dp, dp->test.train_set);
+ mutex_unlock(&dp->lock);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_preemphasis, zynqmp_dp_preemphasis_get,
+ zynqmp_dp_preemphasis_set, "%llu\n");
+
+static int zynqmp_dp_lanes_get(void *data, u64 *val)
+{
+ struct zynqmp_dp *dp = data;
+
+ mutex_lock(&dp->lock);
+ *val = dp->test.link_cnt;
+ mutex_unlock(&dp->lock);
+ return 0;
+}
+
+static int zynqmp_dp_lanes_set(void *data, u64 val)
+{
+ struct zynqmp_dp *dp = data;
+ int ret = 0;
+
+ if (val > ZYNQMP_DP_MAX_LANES)
+ return -EINVAL;
+
+ mutex_lock(&dp->lock);
+ if (val > dp->num_lanes) {
+ ret = -EINVAL;
+ } else {
+ dp->test.link_cnt = val;
+ if (dp->test.active)
+ ret = zynqmp_dp_test_setup(dp);
+ }
+ mutex_unlock(&dp->lock);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_lanes, zynqmp_dp_lanes_get,
+ zynqmp_dp_lanes_set, "%llu\n");
+
+static int zynqmp_dp_rate_get(void *data, u64 *val)
+{
+ struct zynqmp_dp *dp = data;
+
+ mutex_lock(&dp->lock);
+ *val = drm_dp_bw_code_to_link_rate(dp->test.bw_code) * 10000ULL;
+ mutex_unlock(&dp->lock);
+ return 0;
+}
+
+static int zynqmp_dp_rate_set(void *data, u64 val)
+{
+ struct zynqmp_dp *dp = data;
+ int link_rate;
+ int ret = 0;
+ u8 bw_code;
+
+ if (do_div(val, 10000))
+ return -EINVAL;
+
+ bw_code = drm_dp_link_rate_to_bw_code(val);
+ link_rate = drm_dp_bw_code_to_link_rate(bw_code);
+ if (val != link_rate)
+ return -EINVAL;
+
+ if (bw_code != DP_LINK_BW_1_62 && bw_code != DP_LINK_BW_2_7 &&
+ bw_code != DP_LINK_BW_5_4)
+ return -EINVAL;
+
+ mutex_lock(&dp->lock);
+ dp->test.bw_code = bw_code;
+ if (dp->test.active)
+ ret = zynqmp_dp_test_setup(dp);
+ mutex_unlock(&dp->lock);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_rate, zynqmp_dp_rate_get,
+ zynqmp_dp_rate_set, "%llu\n");
+
+static int zynqmp_dp_ignore_aux_errors_get(void *data, u64 *val)
+{
+ struct zynqmp_dp *dp = data;
+
+ mutex_lock(&dp->aux.hw_mutex);
+ *val = dp->ignore_aux_errors;
+ mutex_unlock(&dp->aux.hw_mutex);
+ return 0;
+}
+
+static int zynqmp_dp_ignore_aux_errors_set(void *data, u64 val)
+{
+ struct zynqmp_dp *dp = data;
+
+ if (val != !!val)
+ return -EINVAL;
+
+ mutex_lock(&dp->aux.hw_mutex);
+ dp->ignore_aux_errors = val;
+ mutex_unlock(&dp->aux.hw_mutex);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_ignore_aux_errors,
+ zynqmp_dp_ignore_aux_errors_get,
+ zynqmp_dp_ignore_aux_errors_set, "%llu\n");
+
+static int zynqmp_dp_ignore_hpd_get(void *data, u64 *val)
+{
+ struct zynqmp_dp *dp = data;
+
+ mutex_lock(&dp->lock);
+ *val = dp->ignore_hpd;
+ mutex_unlock(&dp->lock);
+ return 0;
+}
+
+static int zynqmp_dp_ignore_hpd_set(void *data, u64 val)
+{
+ struct zynqmp_dp *dp = data;
+
+ if (val != !!val)
+ return -EINVAL;
+
+ mutex_lock(&dp->lock);
+ dp->ignore_hpd = val;
+ mutex_lock(&dp->lock);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_ignore_hpd, zynqmp_dp_ignore_hpd_get,
+ zynqmp_dp_ignore_hpd_set, "%llu\n");
+
+static void zynqmp_dp_bridge_debugfs_init(struct drm_bridge *bridge,
+ struct dentry *root)
+{
+ struct zynqmp_dp *dp = bridge_to_dp(bridge);
+ struct dentry *test;
+ int i;
+
+ dp->test.bw_code = DP_LINK_BW_5_4;
+ dp->test.link_cnt = dp->num_lanes;
+
+ test = debugfs_create_dir("test", root);
+#define CREATE_FILE(name) \
+ debugfs_create_file(#name, 0600, test, dp, &fops_zynqmp_dp_##name)
+ CREATE_FILE(pattern);
+ CREATE_FILE(enhanced);
+ CREATE_FILE(downspread);
+ CREATE_FILE(active);
+ CREATE_FILE(custom);
+ CREATE_FILE(rate);
+ CREATE_FILE(lanes);
+ CREATE_FILE(ignore_aux_errors);
+ CREATE_FILE(ignore_hpd);
+
+ for (i = 0; i < dp->num_lanes; i++) {
+ static const char fmt[] = "lane%d_preemphasis";
+ char name[sizeof(fmt)];
+
+ dp->debugfs_train_set[i].dp = dp;
+ dp->debugfs_train_set[i].lane = i;
+
+ snprintf(name, sizeof(name), fmt, i);
+ debugfs_create_file(name, 0600, test,
+ &dp->debugfs_train_set[i],
+ &fops_zynqmp_dp_preemphasis);
+
+ snprintf(name, sizeof(name), "lane%d_swing", i);
+ debugfs_create_file(name, 0600, test,
+ &dp->debugfs_train_set[i],
+ &fops_zynqmp_dp_swing);
+ }
+}
+
static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = {
.attach = zynqmp_dp_bridge_attach,
.detach = zynqmp_dp_bridge_detach,
@@ -1618,6 +2357,7 @@ static const struct drm_bridge_funcs zynqmp_dp_bridge_funcs = {
.detect = zynqmp_dp_bridge_detect,
.edid_read = zynqmp_dp_bridge_edid_read,
.atomic_get_input_bus_fmts = zynqmp_dp_bridge_get_input_bus_fmts,
+ .debugfs_init = zynqmp_dp_bridge_debugfs_init,
};
/* -----------------------------------------------------------------------------
@@ -1651,10 +2391,46 @@ static void zynqmp_dp_hpd_work_func(struct work_struct *work)
struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp, hpd_work);
enum drm_connector_status status;
- status = zynqmp_dp_bridge_detect(&dp->bridge);
+ mutex_lock(&dp->lock);
+ if (dp->ignore_hpd) {
+ mutex_unlock(&dp->lock);
+ return;
+ }
+
+ status = __zynqmp_dp_bridge_detect(dp);
+ mutex_unlock(&dp->lock);
+
drm_bridge_hpd_notify(&dp->bridge, status);
}
+static void zynqmp_dp_hpd_irq_work_func(struct work_struct *work)
+{
+ struct zynqmp_dp *dp = container_of(work, struct zynqmp_dp,
+ hpd_irq_work);
+ u8 status[DP_LINK_STATUS_SIZE + 2];
+ int err;
+
+ mutex_lock(&dp->lock);
+ if (dp->ignore_hpd) {
+ mutex_unlock(&dp->lock);
+ return;
+ }
+
+ err = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status,
+ DP_LINK_STATUS_SIZE + 2);
+ if (err < 0) {
+ dev_dbg_ratelimited(dp->dev,
+ "could not read sink status: %d\n", err);
+ } else {
+ if (status[4] & DP_LINK_STATUS_UPDATED ||
+ !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) ||
+ !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) {
+ zynqmp_dp_train_loop(dp);
+ }
+ }
+ mutex_unlock(&dp->lock);
+}
+
static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
{
struct zynqmp_dp *dp = (struct zynqmp_dp *)data;
@@ -1686,23 +2462,15 @@ static irqreturn_t zynqmp_dp_irq_handler(int irq, void *data)
if (status & ZYNQMP_DP_INT_HPD_EVENT)
schedule_work(&dp->hpd_work);
- if (status & ZYNQMP_DP_INT_HPD_IRQ) {
- int ret;
- u8 status[DP_LINK_STATUS_SIZE + 2];
+ if (status & ZYNQMP_DP_INT_HPD_IRQ)
+ schedule_work(&dp->hpd_irq_work);
- ret = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, status,
- DP_LINK_STATUS_SIZE + 2);
- if (ret < 0)
- goto handled;
+ if (status & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY)
+ complete(&dp->aux_done);
- if (status[4] & DP_LINK_STATUS_UPDATED ||
- !drm_dp_clock_recovery_ok(&status[2], dp->mode.lane_cnt) ||
- !drm_dp_channel_eq_ok(&status[2], dp->mode.lane_cnt)) {
- zynqmp_dp_train_loop(dp);
- }
- }
+ if (status & ZYNQMP_DP_INTERRUPT_SIGNAL_STATE_REPLY_TIMEOUT)
+ complete(&dp->aux_done);
-handled:
return IRQ_HANDLED;
}
@@ -1725,8 +2493,11 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
dp->dev = &pdev->dev;
dp->dpsub = dpsub;
dp->status = connector_status_disconnected;
+ mutex_init(&dp->lock);
+ init_completion(&dp->aux_done);
INIT_WORK(&dp->hpd_work, zynqmp_dp_hpd_work_func);
+ INIT_WORK(&dp->hpd_irq_work, zynqmp_dp_hpd_irq_work_func);
/* Acquire all resources (IOMEM, IRQ and PHYs). */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dp");
@@ -1802,9 +2573,8 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
* Now that the hardware is initialized and won't generate spurious
* interrupts, request the IRQ.
*/
- ret = devm_request_threaded_irq(dp->dev, dp->irq, NULL,
- zynqmp_dp_irq_handler, IRQF_ONESHOT,
- dev_name(dp->dev), dp);
+ ret = devm_request_irq(dp->dev, dp->irq, zynqmp_dp_irq_handler,
+ IRQF_SHARED, dev_name(dp->dev), dp);
if (ret < 0)
goto err_phy_exit;
@@ -1829,8 +2599,9 @@ void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub)
struct zynqmp_dp *dp = dpsub->dp;
zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, ZYNQMP_DP_INT_ALL);
- disable_irq(dp->irq);
+ devm_free_irq(dp->dev, dp->irq, dp);
+ cancel_work_sync(&dp->hpd_irq_work);
cancel_work_sync(&dp->hpd_work);
zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMITTER_ENABLE, 0);
@@ -1838,4 +2609,5 @@ void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub)
zynqmp_dp_phy_exit(dp);
zynqmp_dp_reset(dp, true);
+ mutex_destroy(&dp->lock);
}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.h b/drivers/gpu/drm/xlnx/zynqmp_dp.h
index f077d7fbd0ad..a3257793e23a 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.h
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.h
@@ -22,4 +22,11 @@ void zynqmp_dp_disable_vblank(struct zynqmp_dp *dp);
int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub);
void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub);
+void zynqmp_dp_audio_set_channels(struct zynqmp_dp *dp,
+ unsigned int num_channels);
+void zynqmp_dp_audio_enable(struct zynqmp_dp *dp);
+void zynqmp_dp_audio_disable(struct zynqmp_dp *dp);
+
+void zynqmp_dp_audio_write_n_m(struct zynqmp_dp *dp);
+
#endif /* _ZYNQMP_DP_H_ */
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
new file mode 100644
index 000000000000..fa5f0ace6084
--- /dev/null
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp_audio.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP DisplayPort Subsystem Driver - Audio support
+ *
+ * Copyright (C) 2015 - 2024 Xilinx, Inc.
+ *
+ * Authors:
+ * - Hyun Woo Kwon <hyun.kwon@xilinx.com>
+ * - Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+
+#include <sound/asoundef.h>
+#include <sound/core.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#include "zynqmp_disp_regs.h"
+#include "zynqmp_dp.h"
+#include "zynqmp_dpsub.h"
+
+#define ZYNQMP_DISP_AUD_SMPL_RATE_TO_CLK 512
+#define ZYNQMP_NUM_PCMS 2
+
+struct zynqmp_dpsub_audio {
+ void __iomem *base;
+
+ struct snd_soc_card card;
+
+ const char *dai_name;
+ const char *link_names[ZYNQMP_NUM_PCMS];
+ const char *pcm_names[ZYNQMP_NUM_PCMS];
+
+ struct snd_soc_dai_driver dai_driver;
+ struct snd_dmaengine_pcm_config pcm_configs[2];
+
+ struct snd_soc_dai_link links[ZYNQMP_NUM_PCMS];
+
+ struct {
+ struct snd_soc_dai_link_component cpu;
+ struct snd_soc_dai_link_component codec;
+ struct snd_soc_dai_link_component platform;
+ } components[ZYNQMP_NUM_PCMS];
+
+ /*
+ * Protects:
+ * - enabled_streams
+ * - volumes
+ * - current_rate
+ */
+ struct mutex enable_lock;
+
+ u32 enabled_streams;
+ u32 current_rate;
+
+ u16 volumes[2];
+};
+
+static const struct snd_pcm_hardware zynqmp_dp_pcm_hw = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+
+ .buffer_bytes_max = 128 * 1024,
+ .period_bytes_min = 256,
+ .period_bytes_max = 1024 * 1024,
+ .periods_min = 2,
+ .periods_max = 256,
+};
+
+static int zynqmp_dp_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+ 256);
+
+ return 0;
+}
+
+static const struct snd_soc_ops zynqmp_dp_ops = {
+ .startup = zynqmp_dp_startup,
+};
+
+static void zynqmp_dp_audio_write(struct zynqmp_dpsub_audio *audio, int reg,
+ u32 val)
+{
+ writel(val, audio->base + reg);
+}
+
+static int dp_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *socdai)
+{
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct zynqmp_dpsub *dpsub =
+ snd_soc_dai_get_drvdata(snd_soc_rtd_to_cpu(rtd, 0));
+ struct zynqmp_dpsub_audio *audio = dpsub->audio;
+ int ret;
+ u32 sample_rate;
+ struct snd_aes_iec958 iec = { 0 };
+ unsigned long rate;
+
+ sample_rate = params_rate(params);
+
+ if (sample_rate != 48000 && sample_rate != 44100)
+ return -EINVAL;
+
+ guard(mutex)(&audio->enable_lock);
+
+ if (audio->enabled_streams && audio->current_rate != sample_rate) {
+ dev_err(dpsub->dev,
+ "Can't change rate while playback enabled\n");
+ return -EINVAL;
+ }
+
+ if (audio->enabled_streams > 0) {
+ /* Nothing to do */
+ audio->enabled_streams++;
+ return 0;
+ }
+
+ audio->current_rate = sample_rate;
+
+ /* Note: clock rate can only be changed if the clock is disabled */
+ ret = clk_set_rate(dpsub->aud_clk,
+ sample_rate * ZYNQMP_DISP_AUD_SMPL_RATE_TO_CLK);
+ if (ret) {
+ dev_err(dpsub->dev, "can't set aud_clk to %u err:%d\n",
+ sample_rate * ZYNQMP_DISP_AUD_SMPL_RATE_TO_CLK, ret);
+ return ret;
+ }
+
+ clk_prepare_enable(dpsub->aud_clk);
+
+ rate = clk_get_rate(dpsub->aud_clk);
+
+ /* Ignore some offset +- 10 */
+ if (abs(sample_rate * ZYNQMP_DISP_AUD_SMPL_RATE_TO_CLK - rate) > 10) {
+ dev_err(dpsub->dev, "aud_clk offset is higher: %ld\n",
+ sample_rate * ZYNQMP_DISP_AUD_SMPL_RATE_TO_CLK - rate);
+ clk_disable_unprepare(dpsub->aud_clk);
+ return -EINVAL;
+ }
+
+ pm_runtime_get_sync(dpsub->dev);
+
+ zynqmp_dp_audio_write(audio, ZYNQMP_DISP_AUD_MIXER_VOLUME,
+ audio->volumes[0] | (audio->volumes[1] << 16));
+
+ /* Clear the audio soft reset register as it's an non-reset flop. */
+ zynqmp_dp_audio_write(audio, ZYNQMP_DISP_AUD_SOFT_RESET, 0);
+
+ /* Only 2 channel audio is supported now */
+ zynqmp_dp_audio_set_channels(dpsub->dp, 2);
+
+ zynqmp_dp_audio_write_n_m(dpsub->dp);
+
+ /* Channel status */
+
+ if (sample_rate == 48000)
+ iec.status[3] = IEC958_AES3_CON_FS_48000;
+ else
+ iec.status[3] = IEC958_AES3_CON_FS_44100;
+
+ for (unsigned int i = 0; i < AES_IEC958_STATUS_SIZE / 4; ++i) {
+ u32 v;
+
+ v = (iec.status[(i * 4) + 0] << 0) |
+ (iec.status[(i * 4) + 1] << 8) |
+ (iec.status[(i * 4) + 2] << 16) |
+ (iec.status[(i * 4) + 3] << 24);
+
+ zynqmp_dp_audio_write(audio, ZYNQMP_DISP_AUD_CH_STATUS(i), v);
+ }
+
+ zynqmp_dp_audio_enable(dpsub->dp);
+
+ audio->enabled_streams++;
+
+ return 0;
+}
+
+static int dp_dai_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *socdai)
+{
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct zynqmp_dpsub *dpsub =
+ snd_soc_dai_get_drvdata(snd_soc_rtd_to_cpu(rtd, 0));
+ struct zynqmp_dpsub_audio *audio = dpsub->audio;
+
+ guard(mutex)(&audio->enable_lock);
+
+ /* Nothing to do */
+ if (audio->enabled_streams > 1) {
+ audio->enabled_streams--;
+ return 0;
+ }
+
+ pm_runtime_put(dpsub->dev);
+
+ zynqmp_dp_audio_disable(dpsub->dp);
+
+ /*
+ * Reset doesn't work. If we assert reset between audio stop and start,
+ * the audio won't start anymore. Probably we are missing writing
+ * some audio related registers. A/B buf?
+ */
+ /*
+ zynqmp_disp_audio_write(audio, ZYNQMP_DISP_AUD_SOFT_RESET,
+ ZYNQMP_DISP_AUD_SOFT_RESET_AUD_SRST);
+ */
+
+ clk_disable_unprepare(dpsub->aud_clk);
+
+ audio->current_rate = 0;
+ audio->enabled_streams--;
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops zynqmp_dp_dai_ops = {
+ .hw_params = dp_dai_hw_params,
+ .hw_free = dp_dai_hw_free,
+};
+
+/*
+ * Min = 10 * log10(0x1 / 0x2000) = -39.13
+ * Max = 10 * log10(0xffffff / 0x2000) = 9.03
+ */
+static const DECLARE_TLV_DB_RANGE(zynqmp_dp_tlv,
+ 0x0, 0x0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, -3913, 1),
+ 0x1, 0x2000, TLV_DB_LINEAR_ITEM(-3913, 0),
+ 0x2000, 0xffff, TLV_DB_LINEAR_ITEM(0, 903),
+);
+
+static const struct snd_kcontrol_new zynqmp_dp_snd_controls[] = {
+ SOC_SINGLE_TLV("Input0 Playback Volume", 0,
+ 0, 0xffff, 0, zynqmp_dp_tlv),
+ SOC_SINGLE_TLV("Input1 Playback Volume", 1,
+ 0, 0xffff, 0, zynqmp_dp_tlv),
+};
+
+/*
+ * Note: these read & write functions only support two "registers", 0 and 1,
+ * for volume 0 and 1. In other words, these are not real register read/write
+ * functions.
+ *
+ * This is done to support caching the volume value for the case where the
+ * hardware is not enabled, and also to support locking as volumes 0 and 1
+ * are in the same register.
+ */
+static unsigned int zynqmp_dp_dai_read(struct snd_soc_component *component,
+ unsigned int reg)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(component->dev);
+ struct zynqmp_dpsub_audio *audio = dpsub->audio;
+
+ return audio->volumes[reg];
+}
+
+static int zynqmp_dp_dai_write(struct snd_soc_component *component,
+ unsigned int reg, unsigned int val)
+{
+ struct zynqmp_dpsub *dpsub = dev_get_drvdata(component->dev);
+ struct zynqmp_dpsub_audio *audio = dpsub->audio;
+
+ guard(mutex)(&audio->enable_lock);
+
+ audio->volumes[reg] = val;
+
+ if (audio->enabled_streams)
+ zynqmp_dp_audio_write(audio, ZYNQMP_DISP_AUD_MIXER_VOLUME,
+ audio->volumes[0] |
+ (audio->volumes[1] << 16));
+
+ return 0;
+}
+
+static const struct snd_soc_component_driver zynqmp_dp_component_driver = {
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .controls = zynqmp_dp_snd_controls,
+ .num_controls = ARRAY_SIZE(zynqmp_dp_snd_controls),
+ .read = zynqmp_dp_dai_read,
+ .write = zynqmp_dp_dai_write,
+};
+
+int zynqmp_audio_init(struct zynqmp_dpsub *dpsub)
+{
+ struct platform_device *pdev = to_platform_device(dpsub->dev);
+ struct device *dev = dpsub->dev;
+ struct zynqmp_dpsub_audio *audio;
+ struct snd_soc_card *card;
+ void *dev_data;
+ int ret;
+
+ if (!dpsub->aud_clk)
+ return 0;
+
+ audio = devm_kzalloc(dev, sizeof(*audio), GFP_KERNEL);
+ if (!audio)
+ return -ENOMEM;
+
+ dpsub->audio = audio;
+
+ mutex_init(&audio->enable_lock);
+
+ /* 0x2000 is the zero level, no change */
+ audio->volumes[0] = 0x2000;
+ audio->volumes[1] = 0x2000;
+
+ audio->dai_name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-dai", dev_name(dev));
+
+ for (unsigned int i = 0; i < ZYNQMP_NUM_PCMS; ++i) {
+ audio->link_names[i] = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-dp-%u", dev_name(dev), i);
+ audio->pcm_names[i] = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-pcm-%u", dev_name(dev), i);
+ }
+
+ audio->base = devm_platform_ioremap_resource_byname(pdev, "aud");
+ if (IS_ERR(audio->base))
+ return PTR_ERR(audio->base);
+
+ /* Create CPU DAI */
+
+ audio->dai_driver = (struct snd_soc_dai_driver) {
+ .name = audio->dai_name,
+ .ops = &zynqmp_dp_dai_ops,
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ };
+
+ ret = devm_snd_soc_register_component(dev, &zynqmp_dp_component_driver,
+ &audio->dai_driver, 1);
+ if (ret) {
+ dev_err(dev, "Failed to register CPU DAI\n");
+ return ret;
+ }
+
+ /* Create PCMs */
+
+ for (unsigned int i = 0; i < ZYNQMP_NUM_PCMS; ++i) {
+ struct snd_dmaengine_pcm_config *pcm_config =
+ &audio->pcm_configs[i];
+
+ *pcm_config = (struct snd_dmaengine_pcm_config){
+ .name = audio->pcm_names[i],
+ .pcm_hardware = &zynqmp_dp_pcm_hw,
+ .prealloc_buffer_size = 64 * 1024,
+ .chan_names[SNDRV_PCM_STREAM_PLAYBACK] =
+ i == 0 ? "aud0" : "aud1",
+ };
+
+ ret = devm_snd_dmaengine_pcm_register(dev, pcm_config, 0);
+ if (ret) {
+ dev_err(dev, "Failed to register PCM %u\n", i);
+ return ret;
+ }
+ }
+
+ /* Create card */
+
+ card = &audio->card;
+ card->name = "DisplayPort";
+ card->long_name = "DisplayPort Monitor";
+ card->driver_name = "zynqmp_dpsub";
+ card->dev = dev;
+ card->owner = THIS_MODULE;
+ card->num_links = ZYNQMP_NUM_PCMS;
+ card->dai_link = audio->links;
+
+ for (unsigned int i = 0; i < ZYNQMP_NUM_PCMS; ++i) {
+ struct snd_soc_dai_link *link = &card->dai_link[i];
+
+ link->ops = &zynqmp_dp_ops;
+
+ link->name = audio->link_names[i];
+ link->stream_name = audio->link_names[i];
+
+ link->cpus = &audio->components[i].cpu;
+ link->num_cpus = 1;
+ link->cpus[0].dai_name = audio->dai_name;
+
+ link->codecs = &audio->components[i].codec;
+ link->num_codecs = 1;
+ link->codecs[0].name = "snd-soc-dummy";
+ link->codecs[0].dai_name = "snd-soc-dummy-dai";
+
+ link->platforms = &audio->components[i].platform;
+ link->num_platforms = 1;
+ link->platforms[0].name = audio->pcm_names[i];
+ }
+
+ /*
+ * HACK: devm_snd_soc_register_card() overwrites current drvdata
+ * so we need to hack it back.
+ */
+ dev_data = dev_get_drvdata(dev);
+ ret = devm_snd_soc_register_card(dev, card);
+ dev_set_drvdata(dev, dev_data);
+ if (ret) {
+ /*
+ * As older dtbs may not have the audio channel dmas defined,
+ * instead of returning an error here we'll continue and just
+ * mark the audio as disabled.
+ */
+ dev_err(dev, "Failed to register sound card, disabling audio support\n");
+
+ devm_kfree(dev, audio);
+ dpsub->audio = NULL;
+
+ return 0;
+ }
+
+ return 0;
+}
+
+void zynqmp_audio_uninit(struct zynqmp_dpsub *dpsub)
+{
+ struct zynqmp_dpsub_audio *audio = dpsub->audio;
+
+ if (!audio)
+ return;
+
+ if (!dpsub->aud_clk)
+ return;
+
+ mutex_destroy(&audio->enable_lock);
+}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
index f5781939de9c..f953ca48a930 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -57,36 +57,6 @@ static const struct dev_pm_ops zynqmp_dpsub_pm_ops = {
};
/* -----------------------------------------------------------------------------
- * DPSUB Configuration
- */
-
-/**
- * zynqmp_dpsub_audio_enabled - If the audio is enabled
- * @dpsub: DisplayPort subsystem
- *
- * Return if the audio is enabled depending on the audio clock.
- *
- * Return: true if audio is enabled, or false.
- */
-bool zynqmp_dpsub_audio_enabled(struct zynqmp_dpsub *dpsub)
-{
- return !!dpsub->aud_clk;
-}
-
-/**
- * zynqmp_dpsub_get_audio_clk_rate - Get the current audio clock rate
- * @dpsub: DisplayPort subsystem
- *
- * Return: the current audio clock rate.
- */
-unsigned int zynqmp_dpsub_get_audio_clk_rate(struct zynqmp_dpsub *dpsub)
-{
- if (zynqmp_dpsub_audio_enabled(dpsub))
- return 0;
- return clk_get_rate(dpsub->aud_clk);
-}
-
-/* -----------------------------------------------------------------------------
* Probe & Remove
*/
@@ -264,10 +234,17 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
goto err_disp;
}
+ ret = zynqmp_audio_init(dpsub);
+ if (ret)
+ goto err_drm_cleanup;
+
dev_info(&pdev->dev, "ZynqMP DisplayPort Subsystem driver probed");
return 0;
+err_drm_cleanup:
+ if (dpsub->drm)
+ zynqmp_dpsub_drm_cleanup(dpsub);
err_disp:
drm_bridge_remove(dpsub->bridge);
zynqmp_disp_remove(dpsub);
@@ -287,6 +264,8 @@ static void zynqmp_dpsub_remove(struct platform_device *pdev)
{
struct zynqmp_dpsub *dpsub = platform_get_drvdata(pdev);
+ zynqmp_audio_uninit(dpsub);
+
if (dpsub->drm)
zynqmp_dpsub_drm_cleanup(dpsub);
@@ -320,7 +299,7 @@ MODULE_DEVICE_TABLE(of, zynqmp_dpsub_of_match);
static struct platform_driver zynqmp_dpsub_driver = {
.probe = zynqmp_dpsub_probe,
- .remove_new = zynqmp_dpsub_remove,
+ .remove = zynqmp_dpsub_remove,
.shutdown = zynqmp_dpsub_shutdown,
.driver = {
.name = "zynqmp-dpsub",
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.h b/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
index b18554467e9c..d771b8b199e0 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.h
@@ -12,6 +12,8 @@
#ifndef _ZYNQMP_DPSUB_H_
#define _ZYNQMP_DPSUB_H_
+#include <linux/types.h>
+
struct clk;
struct device;
struct drm_bridge;
@@ -39,6 +41,8 @@ enum zynqmp_dpsub_format {
ZYNQMP_DPSUB_FORMAT_YONLY,
};
+struct zynqmp_dpsub_audio;
+
/**
* struct zynqmp_dpsub - ZynqMP DisplayPort Subsystem
* @dev: The physical device
@@ -56,6 +60,7 @@ enum zynqmp_dpsub_format {
* @layers: Video and graphics layers
* @dp: The DisplayPort controller
* @dma_align: DMA alignment constraint (must be a power of 2)
+ * @audio: DP audio data
*/
struct zynqmp_dpsub {
struct device *dev;
@@ -77,10 +82,17 @@ struct zynqmp_dpsub {
struct zynqmp_dp *dp;
unsigned int dma_align;
+
+ struct zynqmp_dpsub_audio *audio;
};
-bool zynqmp_dpsub_audio_enabled(struct zynqmp_dpsub *dpsub);
-unsigned int zynqmp_dpsub_get_audio_clk_rate(struct zynqmp_dpsub *dpsub);
+#ifdef CONFIG_DRM_ZYNQMP_DPSUB_AUDIO
+int zynqmp_audio_init(struct zynqmp_dpsub *dpsub);
+void zynqmp_audio_uninit(struct zynqmp_dpsub *dpsub);
+#else
+static inline int zynqmp_audio_init(struct zynqmp_dpsub *dpsub) { return 0; }
+static inline void zynqmp_audio_uninit(struct zynqmp_dpsub *dpsub) { }
+#endif
void zynqmp_dpsub_release(struct zynqmp_dpsub *dpsub);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
index bd1368df7870..b47463473472 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
@@ -9,6 +9,7 @@
* - Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
+#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
@@ -402,12 +403,12 @@ static const struct drm_driver zynqmp_dpsub_drm_driver = {
DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(zynqmp_dpsub_dumb_create),
+ DRM_FBDEV_DMA_DRIVER_OPS,
.fops = &zynqmp_dpsub_drm_fops,
.name = "zynqmp-dpsub",
.desc = "Xilinx DisplayPort Subsystem Driver",
- .date = "20130509",
.major = 1,
.minor = 0,
};
@@ -509,12 +510,12 @@ int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub)
if (ret)
return ret;
- drm_kms_helper_poll_init(drm);
-
ret = zynqmp_dpsub_kms_init(dpsub);
if (ret < 0)
goto err_poll_fini;
+ drm_kms_helper_poll_init(drm);
+
/* Reset all components and register the DRM device. */
drm_mode_config_reset(drm);
@@ -523,7 +524,7 @@ int zynqmp_dpsub_drm_init(struct zynqmp_dpsub *dpsub)
goto err_poll_fini;
/* Initialize fbdev generic emulation. */
- drm_fbdev_dma_setup(drm, 24);
+ drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB888);
return 0;
@@ -536,7 +537,7 @@ void zynqmp_dpsub_drm_cleanup(struct zynqmp_dpsub *dpsub)
{
struct drm_device *drm = &dpsub->drm->dev;
- drm_dev_unregister(drm);
+ drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
drm_encoder_cleanup(&dpsub->drm->encoder);
drm_kms_helper_poll_fini(drm);
diff --git a/drivers/gpu/host1x/context.c b/drivers/gpu/host1x/context.c
index 955c971c528d..a6f6779662a3 100644
--- a/drivers/gpu/host1x/context.c
+++ b/drivers/gpu/host1x/context.c
@@ -58,6 +58,7 @@ int host1x_memory_context_list_init(struct host1x *host1x)
ctx->dev.parent = host1x->dev;
ctx->dev.release = host1x_memory_context_release;
+ ctx->dev.dma_parms = &ctx->dma_parms;
dma_set_max_seg_size(&ctx->dev, UINT_MAX);
err = device_add(&ctx->dev);
diff --git a/drivers/gpu/host1x/context_bus.c b/drivers/gpu/host1x/context_bus.c
index d9421179d7b4..7cd0e1a5edd1 100644
--- a/drivers/gpu/host1x/context_bus.c
+++ b/drivers/gpu/host1x/context_bus.c
@@ -6,7 +6,7 @@
#include <linux/device.h>
#include <linux/of.h>
-struct bus_type host1x_context_device_bus_type = {
+const struct bus_type host1x_context_device_bus_type = {
.name = "host1x-context",
};
EXPORT_SYMBOL_GPL(host1x_context_device_bus_type);
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index b62e4f0e8130..7b1d091f3c09 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -142,18 +142,29 @@ static const struct host1x_info host1x05_info = {
};
static const struct host1x_sid_entry tegra186_sid_table[] = {
- {
- /* VIC */
- .base = 0x1af0,
- .offset = 0x30,
- .limit = 0x34
- },
- {
- /* NVDEC */
- .base = 0x1b00,
- .offset = 0x30,
- .limit = 0x34
- },
+ { /* SE1 */ .base = 0x1ac8, .offset = 0x90, .limit = 0x90 },
+ { /* SE2 */ .base = 0x1ad0, .offset = 0x90, .limit = 0x90 },
+ { /* SE3 */ .base = 0x1ad8, .offset = 0x90, .limit = 0x90 },
+ { /* SE4 */ .base = 0x1ae0, .offset = 0x90, .limit = 0x90 },
+ { /* ISP */ .base = 0x1ae8, .offset = 0x50, .limit = 0x50 },
+ { /* VIC */ .base = 0x1af0, .offset = 0x30, .limit = 0x34 },
+ { /* NVENC */ .base = 0x1af8, .offset = 0x30, .limit = 0x34 },
+ { /* NVDEC */ .base = 0x1b00, .offset = 0x30, .limit = 0x34 },
+ { /* NVJPG */ .base = 0x1b08, .offset = 0x30, .limit = 0x34 },
+ { /* TSEC */ .base = 0x1b10, .offset = 0x30, .limit = 0x34 },
+ { /* TSECB */ .base = 0x1b18, .offset = 0x30, .limit = 0x34 },
+ { /* VI 0 */ .base = 0x1b80, .offset = 0x10000, .limit = 0x10000 },
+ { /* VI 1 */ .base = 0x1b88, .offset = 0x20000, .limit = 0x20000 },
+ { /* VI 2 */ .base = 0x1b90, .offset = 0x30000, .limit = 0x30000 },
+ { /* VI 3 */ .base = 0x1b98, .offset = 0x40000, .limit = 0x40000 },
+ { /* VI 4 */ .base = 0x1ba0, .offset = 0x50000, .limit = 0x50000 },
+ { /* VI 5 */ .base = 0x1ba8, .offset = 0x60000, .limit = 0x60000 },
+ { /* VI 6 */ .base = 0x1bb0, .offset = 0x70000, .limit = 0x70000 },
+ { /* VI 7 */ .base = 0x1bb8, .offset = 0x80000, .limit = 0x80000 },
+ { /* VI 8 */ .base = 0x1bc0, .offset = 0x90000, .limit = 0x90000 },
+ { /* VI 9 */ .base = 0x1bc8, .offset = 0xa0000, .limit = 0xa0000 },
+ { /* VI 10 */ .base = 0x1bd0, .offset = 0xb0000, .limit = 0xb0000 },
+ { /* VI 11 */ .base = 0x1bd8, .offset = 0xc0000, .limit = 0xc0000 },
};
static const struct host1x_info host1x06_info = {
@@ -173,24 +184,26 @@ static const struct host1x_info host1x06_info = {
};
static const struct host1x_sid_entry tegra194_sid_table[] = {
- {
- /* VIC */
- .base = 0x1af0,
- .offset = 0x30,
- .limit = 0x34
- },
- {
- /* NVDEC */
- .base = 0x1b00,
- .offset = 0x30,
- .limit = 0x34
- },
- {
- /* NVDEC1 */
- .base = 0x1bc0,
- .offset = 0x30,
- .limit = 0x34
- },
+ { /* SE1 */ .base = 0x1ac8, .offset = 0x90, .limit = 0x90 },
+ { /* SE2 */ .base = 0x1ad0, .offset = 0x90, .limit = 0x90 },
+ { /* SE3 */ .base = 0x1ad8, .offset = 0x90, .limit = 0x90 },
+ { /* SE4 */ .base = 0x1ae0, .offset = 0x90, .limit = 0x90 },
+ { /* ISP */ .base = 0x1ae8, .offset = 0x800, .limit = 0x800 },
+ { /* VIC */ .base = 0x1af0, .offset = 0x30, .limit = 0x34 },
+ { /* NVENC */ .base = 0x1af8, .offset = 0x30, .limit = 0x34 },
+ { /* NVDEC */ .base = 0x1b00, .offset = 0x30, .limit = 0x34 },
+ { /* NVJPG */ .base = 0x1b08, .offset = 0x30, .limit = 0x34 },
+ { /* TSEC */ .base = 0x1b10, .offset = 0x30, .limit = 0x34 },
+ { /* TSECB */ .base = 0x1b18, .offset = 0x30, .limit = 0x34 },
+ { /* VI */ .base = 0x1b80, .offset = 0x800, .limit = 0x800 },
+ { /* VI_THI */ .base = 0x1b88, .offset = 0x30, .limit = 0x34 },
+ { /* ISP_THI */ .base = 0x1b90, .offset = 0x30, .limit = 0x34 },
+ { /* PVA0_CLUSTER */ .base = 0x1b98, .offset = 0x0, .limit = 0x0 },
+ { /* PVA0_CLUSTER */ .base = 0x1ba0, .offset = 0x0, .limit = 0x0 },
+ { /* NVDLA0 */ .base = 0x1ba8, .offset = 0x30, .limit = 0x34 },
+ { /* NVDLA1 */ .base = 0x1bb0, .offset = 0x30, .limit = 0x34 },
+ { /* NVENC1 */ .base = 0x1bb8, .offset = 0x30, .limit = 0x34 },
+ { /* NVDEC1 */ .base = 0x1bc0, .offset = 0x30, .limit = 0x34 },
};
static const struct host1x_info host1x07_info = {
@@ -215,54 +228,35 @@ static const struct host1x_info host1x07_info = {
* and firmware stream ID in the MMIO path table.
*/
static const struct host1x_sid_entry tegra234_sid_table[] = {
- {
- /* SE2 MMIO */
- .base = 0x1658,
- .offset = 0x90,
- .limit = 0x90
- },
- {
- /* SE4 MMIO */
- .base = 0x1660,
- .offset = 0x90,
- .limit = 0x90
- },
- {
- /* SE2 channel */
- .base = 0x1738,
- .offset = 0x90,
- .limit = 0x90
- },
- {
- /* SE4 channel */
- .base = 0x1740,
- .offset = 0x90,
- .limit = 0x90
- },
- {
- /* VIC channel */
- .base = 0x17b8,
- .offset = 0x30,
- .limit = 0x30
- },
- {
- /* VIC MMIO */
- .base = 0x1688,
- .offset = 0x34,
- .limit = 0x34
- },
- {
- /* NVDEC channel */
- .base = 0x17c8,
- .offset = 0x30,
- .limit = 0x30,
- },
- {
- /* NVDEC MMIO */
- .base = 0x1698,
- .offset = 0x34,
- .limit = 0x34,
- },
+ { /* SE1 MMIO */ .base = 0x1650, .offset = 0x90, .limit = 0x90 },
+ { /* SE1 ch */ .base = 0x1730, .offset = 0x90, .limit = 0x90 },
+ { /* SE2 MMIO */ .base = 0x1658, .offset = 0x90, .limit = 0x90 },
+ { /* SE2 ch */ .base = 0x1738, .offset = 0x90, .limit = 0x90 },
+ { /* SE4 MMIO */ .base = 0x1660, .offset = 0x90, .limit = 0x90 },
+ { /* SE4 ch */ .base = 0x1740, .offset = 0x90, .limit = 0x90 },
+ { /* ISP MMIO */ .base = 0x1680, .offset = 0x800, .limit = 0x800 },
+ { /* VIC MMIO */ .base = 0x1688, .offset = 0x34, .limit = 0x34 },
+ { /* VIC ch */ .base = 0x17b8, .offset = 0x30, .limit = 0x30 },
+ { /* NVENC MMIO */ .base = 0x1690, .offset = 0x34, .limit = 0x34 },
+ { /* NVENC ch */ .base = 0x17c0, .offset = 0x30, .limit = 0x30 },
+ { /* NVDEC MMIO */ .base = 0x1698, .offset = 0x34, .limit = 0x34 },
+ { /* NVDEC ch */ .base = 0x17c8, .offset = 0x30, .limit = 0x30 },
+ { /* NVJPG MMIO */ .base = 0x16a0, .offset = 0x34, .limit = 0x34 },
+ { /* NVJPG ch */ .base = 0x17d0, .offset = 0x30, .limit = 0x30 },
+ { /* TSEC MMIO */ .base = 0x16a8, .offset = 0x30, .limit = 0x34 },
+ { /* NVJPG1 MMIO */ .base = 0x16b0, .offset = 0x34, .limit = 0x34 },
+ { /* NVJPG1 ch */ .base = 0x17a8, .offset = 0x30, .limit = 0x30 },
+ { /* VI MMIO */ .base = 0x16b8, .offset = 0x800, .limit = 0x800 },
+ { /* VI_THI MMIO */ .base = 0x16c0, .offset = 0x30, .limit = 0x34 },
+ { /* ISP_THI MMIO */ .base = 0x16c8, .offset = 0x30, .limit = 0x34 },
+ { /* NVDLA MMIO */ .base = 0x16d8, .offset = 0x30, .limit = 0x34 },
+ { /* NVDLA ch */ .base = 0x17e0, .offset = 0x30, .limit = 0x34 },
+ { /* NVDLA1 MMIO */ .base = 0x16e0, .offset = 0x30, .limit = 0x34 },
+ { /* NVDLA1 ch */ .base = 0x17e8, .offset = 0x30, .limit = 0x34 },
+ { /* OFA MMIO */ .base = 0x16e8, .offset = 0x34, .limit = 0x34 },
+ { /* OFA ch */ .base = 0x1768, .offset = 0x30, .limit = 0x30 },
+ { /* VI2 MMIO */ .base = 0x16f0, .offset = 0x800, .limit = 0x800 },
+ { /* VI2_THI MMIO */ .base = 0x16f8, .offset = 0x30, .limit = 0x34 },
};
static const struct host1x_info host1x08_info = {
@@ -625,12 +619,6 @@ static int host1x_probe(struct platform_device *pdev)
goto free_contexts;
}
- err = host1x_intr_init(host);
- if (err) {
- dev_err(&pdev->dev, "failed to initialize interrupts\n");
- goto deinit_syncpt;
- }
-
pm_runtime_enable(&pdev->dev);
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
@@ -642,6 +630,12 @@ static int host1x_probe(struct platform_device *pdev)
if (err)
goto pm_disable;
+ err = host1x_intr_init(host);
+ if (err) {
+ dev_err(&pdev->dev, "failed to initialize interrupts\n");
+ goto pm_put;
+ }
+
host1x_debug_init(host);
err = host1x_register(host);
@@ -658,13 +652,11 @@ unregister:
host1x_unregister(host);
deinit_debugfs:
host1x_debug_deinit(host);
-
+ host1x_intr_deinit(host);
+pm_put:
pm_runtime_put_sync_suspend(&pdev->dev);
pm_disable:
pm_runtime_disable(&pdev->dev);
-
- host1x_intr_deinit(host);
-deinit_syncpt:
host1x_syncpt_deinit(host);
free_contexts:
host1x_memory_context_list_free(&host->context_list);
@@ -777,7 +769,7 @@ static struct platform_driver tegra_host1x_driver = {
.pm = &host1x_pm_ops,
},
.probe = host1x_probe,
- .remove_new = host1x_remove,
+ .remove = host1x_remove,
};
static struct platform_driver * const drivers[] = {
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index 92031b240a17..d3855a1c6b47 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -175,11 +175,11 @@ struct host1x {
};
void host1x_common_writel(struct host1x *host1x, u32 v, u32 r);
-void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v);
+void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r);
u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r);
-void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v);
+void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r);
u32 host1x_sync_readl(struct host1x *host1x, u32 r);
-void host1x_ch_writel(struct host1x_channel *ch, u32 r, u32 v);
+void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r);
u32 host1x_ch_readl(struct host1x_channel *ch, u32 r);
static inline void host1x_hw_syncpt_restore(struct host1x *host,
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 1b65a10b9dfc..3f3f0018eee0 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -254,12 +254,24 @@ static void timeout_release_mlock(struct host1x_cdma *cdma)
u32 offset;
switch (ch->client->class) {
+ case HOST1X_CLASS_NVJPG1:
+ offset = HOST1X_COMMON_NVJPG1_MLOCK;
+ break;
+ case HOST1X_CLASS_NVENC:
+ offset = HOST1X_COMMON_NVENC_MLOCK;
+ break;
case HOST1X_CLASS_VIC:
offset = HOST1X_COMMON_VIC_MLOCK;
break;
+ case HOST1X_CLASS_NVJPG:
+ offset = HOST1X_COMMON_NVJPG_MLOCK;
+ break;
case HOST1X_CLASS_NVDEC:
offset = HOST1X_COMMON_NVDEC_MLOCK;
break;
+ case HOST1X_CLASS_OFA:
+ offset = HOST1X_COMMON_OFA_MLOCK;
+ break;
default:
WARN(1, "%s was not updated for class %u", __func__, ch->client->class);
return;
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index 54e31d81517b..4c32aa1b95e8 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -177,7 +177,16 @@ static void show_gather(struct output *o, dma_addr_t phys_addr,
for (i = 0; i < words; i++) {
dma_addr_t addr = phys_addr + i * 4;
- u32 val = *(map_addr + offset / 4 + i);
+ u32 voffset = offset + i * 4;
+ u32 val;
+
+ /* If we reach the RESTART opcode, continue at the beginning of pushbuffer */
+ if (cdma && voffset >= cdma->push_buffer.size) {
+ addr -= cdma->push_buffer.size;
+ voffset -= cdma->push_buffer.size;
+ }
+
+ val = *(map_addr + voffset / 4);
if (!data_count) {
host1x_debug_output(o, " %pad: %08x: ", &addr, val);
@@ -203,7 +212,7 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
job->num_slots, job->num_unpins);
show_gather(o, pb->dma + job->first_get, job->num_slots * 2, cdma,
- pb->dma + job->first_get, pb->mapped + job->first_get);
+ pb->dma, pb->mapped);
for (i = 0; i < job->num_cmds; i++) {
struct host1x_job_gather *g;
@@ -227,7 +236,7 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
host1x_debug_output(o, " GATHER at %pad+%#x, %d words\n",
&g->base, g->offset, g->words);
- show_gather(o, g->base + g->offset, g->words, cdma,
+ show_gather(o, g->base + g->offset, g->words, NULL,
g->base, mapped);
if (!job->gather_copy_mapped)
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 3535be9daa1f..947323f4a234 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1467,7 +1467,7 @@ static struct platform_driver imx_ipu_driver = {
.of_match_table = imx_ipu_dt_ids,
},
.probe = ipu_probe,
- .remove_new = ipu_remove,
+ .remove = ipu_remove,
};
static struct platform_driver * const drivers[] = {
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index 41bd5dbd7356..7aac70368b00 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -374,7 +374,7 @@ static const struct of_device_id ipu_pre_dt_ids[] = {
struct platform_driver ipu_pre_drv = {
.probe = ipu_pre_probe,
- .remove_new = ipu_pre_remove,
+ .remove = ipu_pre_remove,
.driver = {
.name = "imx-ipu-pre",
.of_match_table = ipu_pre_dt_ids,
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index afb2d72e9175..d38d3ba54d72 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -469,7 +469,7 @@ static const struct of_device_id ipu_prg_dt_ids[] = {
struct platform_driver ipu_prg_drv = {
.probe = ipu_prg_probe,
- .remove_new = ipu_prg_remove,
+ .remove = ipu_prg_remove,
.driver = {
.name = "imx-ipu-prg",
.pm = &prg_pm_ops,
diff --git a/drivers/greybus/interface.c b/drivers/greybus/interface.c
index d022bfb5e95d..a0f3e9422721 100644
--- a/drivers/greybus/interface.c
+++ b/drivers/greybus/interface.c
@@ -780,7 +780,7 @@ const struct device_type greybus_interface_type = {
* The position of interface within the Endo is encoded in "interface_id"
* argument.
*
- * Returns a pointer to the new interfce or a null pointer if a
+ * Returns a pointer to the new interface or a null pointer if a
* failure occurs due to memory exhaustion.
*/
struct gb_interface *gb_interface_create(struct gb_module *module,
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index f8a56d631242..b53eb569bd49 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -213,13 +213,16 @@ config HID_CHICONY
config HID_CORSAIR
tristate "Corsair devices"
depends on USB_HID && LEDS_CLASS
+ select POWER_SUPPLY
help
Support for Corsair devices that are not fully compliant with the
HID standard.
+ Support for Corsair Void headsets.
Supported devices:
- Vengeance K90
- Scimitar PRO RGB
+ - Corsair Void headsets
config HID_COUGAR
tristate "Cougar devices"
@@ -465,6 +468,15 @@ config HID_KYE
- MousePen i608X tablet
- EasyPen M610X tablet
+config HID_KYSONA
+ tristate "Kysona devices"
+ depends on USB_HID
+ help
+ Support for Kysona mice.
+
+ Say Y here if you have a Kysona M600 mouse
+ and want to be able to read its battery capacity.
+
config HID_UCLOGIC
tristate "UC-Logic"
depends on USB_HID
@@ -775,7 +787,7 @@ config HID_NINTENDO
Adds support for the Nintendo Switch Joy-Cons, NSO, Pro Controller.
All controllers support bluetooth, and the Pro Controller also supports
its USB mode. This also includes support for the Nintendo Switch Online
- Controllers which include the Genesis, SNES, and N64 controllers.
+ Controllers which include the NES, Genesis, SNES, and N64 controllers.
To compile this driver as a module, choose M here: the
module will be called hid-nintendo.
@@ -1096,6 +1108,7 @@ config HID_RMI
select RMI4_F11
select RMI4_F12
select RMI4_F30
+ select RMI4_F3A
help
Support for Synaptics RMI4 touchpads.
Say Y here if you have a Synaptics RMI4 touchpads over i2c-hid or usbhid
@@ -1373,4 +1386,6 @@ source "drivers/hid/amd-sfh-hid/Kconfig"
source "drivers/hid/surface-hid/Kconfig"
+source "drivers/hid/intel-thc-hid/Kconfig"
+
endif # HID_SUPPORT
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 496dab54c73a..482b096eea28 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -38,7 +38,7 @@ obj-$(CONFIG_HID_BIGBEN_FF) += hid-bigbenff.o
obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
obj-$(CONFIG_HID_CMEDIA) += hid-cmedia.o
-obj-$(CONFIG_HID_CORSAIR) += hid-corsair.o
+obj-$(CONFIG_HID_CORSAIR) += hid-corsair.o hid-corsair-void.o
obj-$(CONFIG_HID_COUGAR) += hid-cougar.o
obj-$(CONFIG_HID_CP2112) += hid-cp2112.o
obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o
@@ -70,6 +70,7 @@ obj-$(CONFIG_HID_JABRA) += hid-jabra.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
+obj-$(CONFIG_HID_KYSONA) += hid-kysona.o
obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o
obj-$(CONFIG_HID_LENOVO) += hid-lenovo.o
obj-$(CONFIG_HID_LETSKETCH) += hid-letsketch.o
@@ -170,3 +171,5 @@ obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/
obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/
obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/
+
+obj-$(CONFIG_INTEL_THC_HID) += intel-thc-hid/
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index 4b59687ff5d8..3438d392920f 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -236,9 +236,9 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
cl_data->in_data = in_data;
for (i = 0; i < cl_data->num_hid_devices; i++) {
- in_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8,
- &cl_data->sensor_dma_addr[i],
- GFP_KERNEL);
+ in_data->sensor_virt_addr[i] = dmam_alloc_coherent(dev, sizeof(int) * 8,
+ &cl_data->sensor_dma_addr[i],
+ GFP_KERNEL);
if (!in_data->sensor_virt_addr[i]) {
rc = -ENOMEM;
goto cleanup;
@@ -331,7 +331,6 @@ cleanup:
int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
{
struct amdtp_cl_data *cl_data = privdata->cl_data;
- struct amd_input_data *in_data = cl_data->in_data;
int i, status;
for (i = 0; i < cl_data->num_hid_devices; i++) {
@@ -351,12 +350,5 @@ int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
cancel_delayed_work_sync(&cl_data->work_buffer);
amdtp_hid_remove(cl_data);
- for (i = 0; i < cl_data->num_hid_devices; i++) {
- if (in_data->sensor_virt_addr[i]) {
- dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
- in_data->sensor_virt_addr[i],
- cl_data->sensor_dma_addr[i]);
- }
- }
return 0;
}
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_common.h b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
index e5620d7db569..799b8686a88a 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_common.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
@@ -43,6 +43,7 @@ struct amd_mp2_sensor_info {
struct sfh_dev_status {
bool is_hpd_present;
bool is_als_present;
+ bool is_sra_present;
};
struct amd_mp2_dev {
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 0c28ca349bcd..48cfd0c58241 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -122,7 +122,7 @@ int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata)
{
int rc;
- pci_intx(privdata->pdev, true);
+ pcim_intx(privdata->pdev, true);
rc = devm_request_irq(&privdata->pdev->dev, privdata->pdev->irq,
amd_sfh_irq_handler, 0, DRIVER_NAME, privdata);
@@ -248,7 +248,7 @@ static void amd_mp2_pci_remove(void *privdata)
struct amd_mp2_dev *mp2 = privdata;
amd_sfh_hid_client_deinit(privdata);
mp2->mp2_ops->stop_all(mp2);
- pci_intx(mp2->pdev, false);
+ pcim_intx(mp2->pdev, false);
amd_sfh_clear_intr(mp2);
}
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
index db36d87d5634..e9929c4aa72e 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -30,6 +30,7 @@ static int amd_sfh_get_sensor_num(struct amd_mp2_dev *mp2, u8 *sensor_id)
case ACCEL_IDX:
case GYRO_IDX:
case MAG_IDX:
+ case SRA_IDX:
case ALS_IDX:
case HPD_IDX:
if (BIT(i) & slist->sl.sensors)
@@ -58,6 +59,8 @@ static const char *get_sensor_name(int idx)
return "gyroscope";
case MAG_IDX:
return "magnetometer";
+ case SRA_IDX:
+ return "SRA";
case ALS_IDX:
return "ALS";
case HPD_IDX:
@@ -130,6 +133,23 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
for (i = 0; i < cl_data->num_hid_devices; i++) {
cl_data->sensor_sts[i] = SENSOR_DISABLED;
+
+ if (cl_data->num_hid_devices == 1 && cl_data->sensor_idx[0] == SRA_IDX)
+ break;
+
+ if (cl_data->sensor_idx[i] == SRA_IDX) {
+ info.sensor_idx = cl_data->sensor_idx[i];
+ writel(0, privdata->mmio + amd_get_p2c_val(privdata, 0));
+ mp2_ops->start(privdata, info);
+ status = amd_sfh_wait_for_response
+ (privdata, cl_data->sensor_idx[i], ENABLE_SENSOR);
+
+ cl_data->sensor_sts[i] = (status == 0) ? SENSOR_ENABLED : SENSOR_DISABLED;
+ if (cl_data->sensor_sts[i] == SENSOR_ENABLED)
+ privdata->dev_en.is_sra_present = true;
+ continue;
+ }
+
cl_data->sensor_requested_cnt[i] = 0;
cl_data->cur_hid_dev = i;
cl_idx = cl_data->sensor_idx[i];
@@ -181,6 +201,8 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
}
for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_idx[i] == SRA_IDX)
+ continue;
cl_data->cur_hid_dev = i;
if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
cl_data->is_any_sensor_enabled = true;
@@ -289,7 +311,7 @@ static void amd_mp2_pci_remove(void *privdata)
sfh_deinit_emp2();
amd_sfh_hid_client_deinit(privdata);
mp2->mp2_ops->stop_all(mp2);
- pci_intx(mp2->pdev, false);
+ pcim_intx(mp2->pdev, false);
amd_sfh_clear_intr(mp2);
}
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
index 4676f060da26..ffb98b4c36cb 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
@@ -87,6 +87,41 @@ void sfh_interface_init(struct amd_mp2_dev *mp2)
emp2 = mp2;
}
+static int amd_sfh_mode_info(u32 *platform_type, u32 *laptop_placement)
+{
+ struct sfh_op_mode mode;
+
+ if (!platform_type || !laptop_placement)
+ return -EINVAL;
+
+ if (!emp2 || !emp2->dev_en.is_sra_present)
+ return -ENODEV;
+
+ mode.val = readl(emp2->mmio + amd_get_c2p_val(emp2, 3));
+
+ *platform_type = mode.op_mode.devicemode;
+
+ if (mode.op_mode.ontablestate == 1) {
+ *laptop_placement = ON_TABLE;
+ } else if (mode.op_mode.ontablestate == 2) {
+ *laptop_placement = ON_LAP_MOTION;
+ } else if (mode.op_mode.inbagstate == 1) {
+ *laptop_placement = IN_BAG;
+ } else if (mode.op_mode.outbagstate == 1) {
+ *laptop_placement = OUT_OF_BAG;
+ } else if (mode.op_mode.ontablestate == 0 || mode.op_mode.inbagstate == 0 ||
+ mode.op_mode.outbagstate == 0) {
+ *laptop_placement = LP_UNKNOWN;
+ pr_warn_once("Unknown laptop placement\n");
+ } else if (mode.op_mode.ontablestate == 3 || mode.op_mode.inbagstate == 3 ||
+ mode.op_mode.outbagstate == 3) {
+ *laptop_placement = LP_UNDEFINED;
+ pr_warn_once("Undefined laptop placement\n");
+ }
+
+ return 0;
+}
+
static int amd_sfh_hpd_info(u8 *user_present)
{
struct hpd_status hpdstatus;
@@ -131,6 +166,9 @@ int amd_get_sfh_info(struct amd_sfh_info *sfh_info, enum sfh_message_type op)
return amd_sfh_hpd_info(&sfh_info->user_present);
case MT_ALS:
return amd_sfh_als_info(&sfh_info->ambient_light);
+ case MT_SRA:
+ return amd_sfh_mode_info(&sfh_info->platform_type,
+ &sfh_info->laptop_placement);
}
}
return -EINVAL;
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
index 2c211d28764d..665c99ad779f 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
@@ -22,8 +22,9 @@ enum sensor_index {
ACCEL_IDX,
GYRO_IDX,
MAG_IDX,
- ALS_IDX = 4,
- HPD_IDX = 5,
+ SRA_IDX,
+ ALS_IDX,
+ HPD_IDX,
MAX_IDX = 15,
};
@@ -164,6 +165,25 @@ struct hpd_status {
};
};
+struct sfh_op_mode {
+ union {
+ u32 val;
+ struct {
+ u32 mode : 3;
+ u32 lidstatus : 1;
+ u32 angle : 10;
+ u32 inbagstatedbg : 2;
+ u32 ontablestate : 2;
+ u32 inbagstate : 2;
+ u32 outbagstate : 2;
+ u32 inbagmlcstate : 1;
+ u32 powerstate : 2;
+ u32 data : 3;
+ u32 devicemode : 4;
+ } op_mode;
+ };
+};
+
void sfh_interface_init(struct amd_mp2_dev *mp2);
void sfh_deinit_emp2(void);
void amd_sfh1_1_set_desc_ops(struct amd_mp2_ops *mp2_ops);
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
index 8420c227e21b..2e96ec6a3073 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.c
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -19,7 +19,7 @@
#include <linux/module.h>
#include "hid_bpf_dispatch.h"
-struct hid_ops *hid_ops;
+const struct hid_ops *hid_ops;
EXPORT_SYMBOL(hid_ops);
u8 *
@@ -148,7 +148,7 @@ out:
}
EXPORT_SYMBOL_GPL(dispatch_hid_bpf_output_report);
-u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size)
+const u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size)
{
int ret;
struct hid_bpf_ctx_kern ctx_kern = {
@@ -183,7 +183,7 @@ u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned
ignore_bpf:
kfree(ctx_kern.data);
- return kmemdup(rdesc, *size, GFP_KERNEL);
+ return rdesc;
}
EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
@@ -260,8 +260,11 @@ int hid_bpf_allocate_event_data(struct hid_device *hdev)
int hid_bpf_reconnect(struct hid_device *hdev)
{
- if (!test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
+ if (!test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status)) {
+ /* trigger call to call_hid_bpf_rdesc_fixup() during the next probe */
+ hdev->bpf_rsize = 0;
return device_reprobe(&hdev->dev);
+ }
return 0;
}
@@ -349,7 +352,6 @@ __hid_bpf_hw_check_params(struct hid_bpf_ctx *ctx, __u8 *buf, size_t *buf__sz,
{
struct hid_report_enum *report_enum;
struct hid_report *report;
- struct hid_device *hdev;
u32 report_len;
/* check arguments */
@@ -368,9 +370,7 @@ __hid_bpf_hw_check_params(struct hid_bpf_ctx *ctx, __u8 *buf, size_t *buf__sz,
if (*buf__sz < 1)
return -EINVAL;
- hdev = (struct hid_device *)ctx->hid; /* discard const */
-
- report_enum = hdev->report_enum + rtype;
+ report_enum = ctx->hid->report_enum + rtype;
report = hid_ops->hid_get_report(report_enum, buf);
if (!report)
return -EINVAL;
@@ -399,7 +399,6 @@ hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
enum hid_report_type rtype, enum hid_class_request reqtype)
{
struct hid_bpf_ctx_kern *ctx_kern;
- struct hid_device *hdev;
size_t size = buf__sz;
u8 *dma_data;
int ret;
@@ -426,13 +425,11 @@ hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
return -EINVAL;
}
- hdev = (struct hid_device *)ctx->hid; /* discard const */
-
dma_data = kmemdup(buf, size, GFP_KERNEL);
if (!dma_data)
return -ENOMEM;
- ret = hid_ops->hid_hw_raw_request(hdev,
+ ret = hid_ops->hid_hw_raw_request(ctx->hid,
dma_data[0],
dma_data,
size,
@@ -461,7 +458,6 @@ __bpf_kfunc int
hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz)
{
struct hid_bpf_ctx_kern *ctx_kern;
- struct hid_device *hdev;
size_t size = buf__sz;
u8 *dma_data;
int ret;
@@ -475,13 +471,11 @@ hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz)
if (ret)
return ret;
- hdev = (struct hid_device *)ctx->hid; /* discard const */
-
dma_data = kmemdup(buf, size, GFP_KERNEL);
if (!dma_data)
return -ENOMEM;
- ret = hid_ops->hid_hw_output_report(hdev, dma_data, size, (u64)(long)ctx, true);
+ ret = hid_ops->hid_hw_output_report(ctx->hid, dma_data, size, (u64)(long)ctx, true);
kfree(dma_data);
return ret;
diff --git a/drivers/hid/bpf/progs/Huion__Dial-2.bpf.c b/drivers/hid/bpf/progs/Huion__Dial-2.bpf.c
index 2411dec6db08..9670e5ef8d54 100644
--- a/drivers/hid/bpf/progs/Huion__Dial-2.bpf.c
+++ b/drivers/hid/bpf/progs/Huion__Dial-2.bpf.c
@@ -214,7 +214,8 @@ static const __u8 fixed_rdesc_pad[] = {
CollectionApplication(
// -- Byte 0 in report
ReportId(PAD_REPORT_ID)
- LogicalRange_i8(0, 1)
+ LogicalMaximum_i8(0)
+ LogicalMaximum_i8(1)
UsagePage_Digitizers
Usage_Dig_TabletFunctionKeys
CollectionPhysical(
@@ -234,14 +235,17 @@ static const __u8 fixed_rdesc_pad[] = {
Input(Var|Abs)
// Byte 4 in report is the dial
Usage_GD_Wheel
- LogicalRange_i8(-1, 1)
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
ReportCount(1)
ReportSize(8)
Input(Var|Rel)
// Byte 5 is the button state
UsagePage_Button
- UsageRange_i8(0x01, 0x8)
- LogicalRange_i8(0x0, 0x1)
+ UsageMinimum_i8(0x01)
+ UsageMaximum_i8(0x08)
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
ReportCount(7)
ReportSize(1)
Input(Var|Abs)
@@ -265,7 +269,8 @@ static const __u8 fixed_rdesc_pen[] = {
Usage_Dig_TipSwitch
Usage_Dig_BarrelSwitch
Usage_Dig_SecondaryBarrelSwitch // maps eraser to BTN_STYLUS2
- LogicalRange_i8(0, 1)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
ReportSize(1)
ReportCount(3)
Input(Var|Abs)
@@ -280,22 +285,28 @@ static const __u8 fixed_rdesc_pen[] = {
UsagePage_GenericDesktop
Unit(cm)
UnitExponent(-1)
- PhysicalRange_i16(0, 266)
- LogicalRange_i16(0, 32767)
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(266)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(32767)
Usage_GD_X
Input(Var|Abs) // Bytes 2+3
- PhysicalRange_i16(0, 166)
- LogicalRange_i16(0, 32767)
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(166)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(32767)
Usage_GD_Y
Input(Var|Abs) // Bytes 4+5
)
UsagePage_Digitizers
Usage_Dig_TipPressure
- LogicalRange_i16(0, 8191)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(8191)
Input(Var|Abs) // Byte 6+7
ReportSize(8)
ReportCount(2)
- LogicalRange_i8(-60, 60)
+ LogicalMinimum_i8(-60)
+ LogicalMaximum_i8(60)
Usage_Dig_XTilt
Usage_Dig_YTilt
Input(Var|Abs) // Byte 8+9
@@ -313,7 +324,8 @@ static const __u8 fixed_rdesc_vendor[] = {
Usage_Dig_Pen
CollectionPhysical(
// Byte 1 are the buttons
- LogicalRange_i8(0, 1)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
ReportSize(1)
Usage_Dig_TipSwitch
Usage_Dig_BarrelSwitch
@@ -333,25 +345,31 @@ static const __u8 fixed_rdesc_vendor[] = {
UnitExponent(-1)
// Note: reported logical range differs
// from the pen report ID for x and y
- LogicalRange_i16(0, 53340)
- PhysicalRange_i16(0, 266)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(53340)
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(266)
// Bytes 2/3 in report
Usage_GD_X
Input(Var|Abs)
- LogicalRange_i16(0, 33340)
- PhysicalRange_i16(0, 166)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(33340)
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(166)
// Bytes 4/5 in report
Usage_GD_Y
Input(Var|Abs)
)
// Bytes 6/7 in report
- LogicalRange_i16(0, 8191)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(8191)
Usage_Dig_TipPressure
Input(Var|Abs)
// Bytes 8/9 in report
ReportCount(1) // Padding
Input(Const)
- LogicalRange_i8(-60, 60)
+ LogicalMinimum_i8(-60)
+ LogicalMaximum_i8(60)
// Byte 10 in report
Usage_Dig_XTilt
// Byte 11 in report
@@ -366,7 +384,8 @@ static const __u8 fixed_rdesc_vendor[] = {
CollectionApplication(
// Byte 0
ReportId(PAD_REPORT_ID)
- LogicalRange_i8(0, 1)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
UsagePage_Digitizers
Usage_Dig_TabletFunctionKeys
CollectionPhysical(
@@ -386,15 +405,18 @@ static const __u8 fixed_rdesc_vendor[] = {
Input(Var|Abs)
// Byte 4 is the button state
UsagePage_Button
- UsageRange_i8(0x01, 0x8)
- LogicalRange_i8(0x0, 0x1)
+ UsageMinimum_i8(0x1)
+ UsageMaximum_i8(0x8)
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
ReportCount(8)
ReportSize(1)
Input(Var|Abs)
// Byte 5 is the top dial
UsagePage_GenericDesktop
Usage_GD_Wheel
- LogicalRange_i8(-1, 1)
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
ReportCount(1)
ReportSize(8)
Input(Var|Rel)
diff --git a/drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c b/drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c
index b09b80132368..13f64fb49800 100644
--- a/drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c
+++ b/drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c
@@ -170,7 +170,8 @@ static const __u8 fixed_rdesc_pad[] = {
CollectionApplication(
// -- Byte 0 in report
ReportId(PAD_REPORT_ID)
- LogicalRange_i8(0, 1)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
UsagePage_Digitizers
Usage_Dig_TabletFunctionKeys
CollectionPhysical(
@@ -190,14 +191,17 @@ static const __u8 fixed_rdesc_pad[] = {
Input(Var|Abs)
// Byte 4 in report is the wheel
Usage_GD_Wheel
- LogicalRange_i8(-1, 1)
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
ReportCount(1)
ReportSize(8)
Input(Var|Rel)
// Byte 5 is the button state
UsagePage_Button
- UsageRange_i8(0x01, 0x6)
- LogicalRange_i8(0x01, 0x6)
+ UsageMinimum_i8(0x1)
+ UsageMaximum_i8(0x6)
+ LogicalMinimum_i8(0x1)
+ LogicalMaximum_i8(0x6)
ReportCount(1)
ReportSize(8)
Input(Arr|Abs)
@@ -219,7 +223,8 @@ static const __u8 fixed_rdesc_pen[] = {
Usage_Dig_TipSwitch
Usage_Dig_BarrelSwitch
Usage_Dig_SecondaryBarrelSwitch // maps eraser to BTN_STYLUS2
- LogicalRange_i8(0, 1)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
ReportSize(1)
ReportCount(3)
Input(Var|Abs)
@@ -234,18 +239,23 @@ static const __u8 fixed_rdesc_pen[] = {
UsagePage_GenericDesktop
Unit(cm)
UnitExponent(-1)
- PhysicalRange_i16(0, 160)
- LogicalRange_i16(0, 32767)
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(160)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(32767)
Usage_GD_X
Input(Var|Abs) // Bytes 2+3
- PhysicalRange_i16(0, 100)
- LogicalRange_i16(0, 32767)
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(100)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(32767)
Usage_GD_Y
Input(Var|Abs) // Bytes 4+5
)
UsagePage_Digitizers
Usage_Dig_TipPressure
- LogicalRange_i16(0, 8191)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(8191)
Input(Var|Abs) // Byte 6+7
// Two bytes padding so we don't need to change the report at all
ReportSize(8)
@@ -265,7 +275,8 @@ static const __u8 fixed_rdesc_vendor[] = {
Usage_Dig_Pen
CollectionPhysical(
// Byte 1 are the buttons
- LogicalRange_i8(0, 1)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
ReportSize(1)
Usage_Dig_TipSwitch
Usage_Dig_BarrelSwitch
@@ -285,19 +296,24 @@ static const __u8 fixed_rdesc_vendor[] = {
UnitExponent(-1)
// Note: reported logical range differs
// from the pen report ID for x and y
- LogicalRange_i16(0, 32000)
- PhysicalRange_i16(0, 160)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(32000)
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(160)
// Bytes 2/3 in report
Usage_GD_X
Input(Var|Abs)
- LogicalRange_i16(0, 20000)
- PhysicalRange_i16(0, 100)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(20000)
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(100)
// Bytes 4/5 in report
Usage_GD_Y
Input(Var|Abs)
)
// Bytes 6/7 in report
- LogicalRange_i16(0, 8192)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(8192)
Usage_Dig_TipPressure
Input(Var|Abs)
)
@@ -307,7 +323,8 @@ static const __u8 fixed_rdesc_vendor[] = {
CollectionApplication(
// Byte 0
ReportId(PAD_REPORT_ID)
- LogicalRange_i8(0, 1)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
UsagePage_Digitizers
Usage_Dig_TabletFunctionKeys
CollectionPhysical(
@@ -327,8 +344,10 @@ static const __u8 fixed_rdesc_vendor[] = {
Input(Var|Abs)
// Byte 4 is the button state
UsagePage_Button
- UsageRange_i8(0x01, 0x6)
- LogicalRange_i8(0x0, 0x1)
+ UsageMinimum_i8(0x1)
+ UsageMaximum_i8(0x6)
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
ReportCount(6)
ReportSize(1)
Input(Var|Abs)
@@ -337,7 +356,8 @@ static const __u8 fixed_rdesc_vendor[] = {
// Byte 5 is the wheel
UsagePage_GenericDesktop
Usage_GD_Wheel
- LogicalRange_i8(-1, 1)
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
ReportCount(1)
ReportSize(8)
Input(Var|Rel)
diff --git a/drivers/hid/bpf/progs/Mistel__MD770.bpf.c b/drivers/hid/bpf/progs/Mistel__MD770.bpf.c
new file mode 100644
index 000000000000..fb8b5a6968b1
--- /dev/null
+++ b/drivers/hid/bpf/progs/Mistel__MD770.bpf.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Tatsuyuki Ishi
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_HOLTEK 0x04D9
+#define PID_MD770 0x0339
+#define RDESC_SIZE 203
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_HOLTEK, PID_MD770)
+);
+
+/*
+ * The Mistel MD770 keyboard reports the first 6 simultaneous key presses
+ * through the first interface, and anything beyond that through a second
+ * interface. Unfortunately, the second interface's report descriptor has an
+ * error, causing events to be malformed and ignored. This HID-BPF driver
+ * fixes the descriptor to allow NKRO to work again.
+ *
+ * For reference, this is the original report descriptor:
+ *
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * 0x09, 0x80, // Usage (System Control) 2
+ * 0xa1, 0x01, // Collection (Application) 4
+ * 0x85, 0x01, // Report ID (1) 6
+ * 0x19, 0x81, // Usage Minimum (129) 8
+ * 0x29, 0x83, // Usage Maximum (131) 10
+ * 0x15, 0x00, // Logical Minimum (0) 12
+ * 0x25, 0x01, // Logical Maximum (1) 14
+ * 0x95, 0x03, // Report Count (3) 16
+ * 0x75, 0x01, // Report Size (1) 18
+ * 0x81, 0x02, // Input (Data,Var,Abs) 20
+ * 0x95, 0x01, // Report Count (1) 22
+ * 0x75, 0x05, // Report Size (5) 24
+ * 0x81, 0x01, // Input (Cnst,Arr,Abs) 26
+ * 0xc0, // End Collection 28
+ * 0x05, 0x0c, // Usage Page (Consumer Devices) 29
+ * 0x09, 0x01, // Usage (Consumer Control) 31
+ * 0xa1, 0x01, // Collection (Application) 33
+ * 0x85, 0x02, // Report ID (2) 35
+ * 0x15, 0x00, // Logical Minimum (0) 37
+ * 0x25, 0x01, // Logical Maximum (1) 39
+ * 0x95, 0x12, // Report Count (18) 41
+ * 0x75, 0x01, // Report Size (1) 43
+ * 0x0a, 0x83, 0x01, // Usage (AL Consumer Control Config) 45
+ * 0x0a, 0x8a, 0x01, // Usage (AL Email Reader) 48
+ * 0x0a, 0x92, 0x01, // Usage (AL Calculator) 51
+ * 0x0a, 0x94, 0x01, // Usage (AL Local Machine Browser) 54
+ * 0x09, 0xcd, // Usage (Play/Pause) 57
+ * 0x09, 0xb7, // Usage (Stop) 59
+ * 0x09, 0xb6, // Usage (Scan Previous Track) 61
+ * 0x09, 0xb5, // Usage (Scan Next Track) 63
+ * 0x09, 0xe2, // Usage (Mute) 65
+ * 0x09, 0xea, // Usage (Volume Down) 67
+ * 0x09, 0xe9, // Usage (Volume Up) 69
+ * 0x0a, 0x21, 0x02, // Usage (AC Search) 71
+ * 0x0a, 0x23, 0x02, // Usage (AC Home) 74
+ * 0x0a, 0x24, 0x02, // Usage (AC Back) 77
+ * 0x0a, 0x25, 0x02, // Usage (AC Forward) 80
+ * 0x0a, 0x26, 0x02, // Usage (AC Stop) 83
+ * 0x0a, 0x27, 0x02, // Usage (AC Refresh) 86
+ * 0x0a, 0x2a, 0x02, // Usage (AC Bookmarks) 89
+ * 0x81, 0x02, // Input (Data,Var,Abs) 92
+ * 0x95, 0x01, // Report Count (1) 94
+ * 0x75, 0x0e, // Report Size (14) 96
+ * 0x81, 0x01, // Input (Cnst,Arr,Abs) 98
+ * 0xc0, // End Collection 100
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 101
+ * 0x09, 0x02, // Usage (Mouse) 103
+ * 0xa1, 0x01, // Collection (Application) 105
+ * 0x09, 0x01, // Usage (Pointer) 107
+ * 0xa1, 0x00, // Collection (Physical) 109
+ * 0x85, 0x03, // Report ID (3) 111
+ * 0x05, 0x09, // Usage Page (Button) 113
+ * 0x19, 0x01, // Usage Minimum (1) 115
+ * 0x29, 0x08, // Usage Maximum (8) 117
+ * 0x15, 0x00, // Logical Minimum (0) 119
+ * 0x25, 0x01, // Logical Maximum (1) 121
+ * 0x75, 0x01, // Report Size (1) 123
+ * 0x95, 0x08, // Report Count (8) 125
+ * 0x81, 0x02, // Input (Data,Var,Abs) 127
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 129
+ * 0x09, 0x30, // Usage (X) 131
+ * 0x09, 0x31, // Usage (Y) 133
+ * 0x16, 0x01, 0x80, // Logical Minimum (-32767) 135
+ * 0x26, 0xff, 0x7f, // Logical Maximum (32767) 138
+ * 0x75, 0x10, // Report Size (16) 141
+ * 0x95, 0x02, // Report Count (2) 143
+ * 0x81, 0x06, // Input (Data,Var,Rel) 145
+ * 0x09, 0x38, // Usage (Wheel) 147
+ * 0x15, 0x81, // Logical Minimum (-127) 149
+ * 0x25, 0x7f, // Logical Maximum (127) 151
+ * 0x75, 0x08, // Report Size (8) 153
+ * 0x95, 0x01, // Report Count (1) 155
+ * 0x81, 0x06, // Input (Data,Var,Rel) 157
+ * 0x05, 0x0c, // Usage Page (Consumer Devices) 159
+ * 0x0a, 0x38, 0x02, // Usage (AC Pan) 161
+ * 0x95, 0x01, // Report Count (1) 164
+ * 0x81, 0x06, // Input (Data,Var,Rel) 166
+ * 0xc0, // End Collection 168
+ * 0xc0, // End Collection 169
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 170
+ * 0x09, 0x06, // Usage (Keyboard) 172
+ * 0xa1, 0x01, // Collection (Application) 174
+ * 0x85, 0x04, // Report ID (4) 176
+ * 0x05, 0x07, // Usage Page (Keyboard) 178
+ * 0x95, 0x01, // Report Count (1) 180
+ * 0x75, 0x08, // Report Size (8) 182
+ * 0x81, 0x03, // Input (Cnst,Var,Abs) 184
+ * 0x95, 0xe8, // Report Count (232) 186
+ * 0x75, 0x01, // Report Size (1) 188
+ * 0x15, 0x00, // Logical Minimum (0) 190
+ * 0x25, 0x01, // Logical Maximum (1) 192
+ * 0x05, 0x07, // Usage Page (Keyboard) 194
+ * 0x19, 0x00, // Usage Minimum (0) 196
+ * 0x29, 0xe7, // Usage Maximum (231) 198
+ * 0x81, 0x00, // Input (Data,Arr,Abs) 200 <- change to 0x81, 0x02 (Data,Var,Abs)
+ * 0xc0, // End Collection 202
+ */
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(hid_rdesc_fixup_mistel_md770, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0, HID_MAX_DESCRIPTOR_SIZE);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (data[201] == 0x00)
+ data[201] = 0x02;
+
+ return 0;
+}
+
+HID_BPF_OPS(mistel_md770) = {
+ .hid_rdesc_fixup = (void *)hid_rdesc_fixup_mistel_md770,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ ctx->retval = ctx->rdesc_size != RDESC_SIZE;
+ if (ctx->retval)
+ ctx->retval = -EINVAL;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/Rapoo__M50-Plus-Silent.bpf.c b/drivers/hid/bpf/progs/Rapoo__M50-Plus-Silent.bpf.c
new file mode 100644
index 000000000000..6b379e45f531
--- /dev/null
+++ b/drivers/hid/bpf/progs/Rapoo__M50-Plus-Silent.bpf.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 José Expósito
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_RAPOO 0x24AE
+#define PID_M50 0x2015
+#define RDESC_SIZE 186
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_RAPOO, PID_M50)
+);
+
+/*
+ * The Rapoo M50 Plus Silent mouse has 2 side buttons in addition to the left,
+ * right and middle buttons. However, its original HID descriptor has a Usage
+ * Maximum of 3, preventing the side buttons to work. This HID-BPF driver
+ * changes that usage to 5.
+ *
+ * For reference, this is the original report descriptor:
+ *
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * 0x09, 0x02, // Usage (Mouse) 2
+ * 0xa1, 0x01, // Collection (Application) 4
+ * 0x85, 0x01, // Report ID (1) 6
+ * 0x09, 0x01, // Usage (Pointer) 8
+ * 0xa1, 0x00, // Collection (Physical) 10
+ * 0x05, 0x09, // Usage Page (Button) 12
+ * 0x19, 0x01, // Usage Minimum (1) 14
+ * 0x29, 0x03, // Usage Maximum (3) 16 <- change to 0x05
+ * 0x15, 0x00, // Logical Minimum (0) 18
+ * 0x25, 0x01, // Logical Maximum (1) 20
+ * 0x75, 0x01, // Report Size (1) 22
+ * 0x95, 0x05, // Report Count (5) 24
+ * 0x81, 0x02, // Input (Data,Var,Abs) 26
+ * 0x75, 0x03, // Report Size (3) 28
+ * 0x95, 0x01, // Report Count (1) 30
+ * 0x81, 0x01, // Input (Cnst,Arr,Abs) 32
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 34
+ * 0x09, 0x30, // Usage (X) 36
+ * 0x09, 0x31, // Usage (Y) 38
+ * 0x16, 0x01, 0x80, // Logical Minimum (-32767) 40
+ * 0x26, 0xff, 0x7f, // Logical Maximum (32767) 43
+ * 0x75, 0x10, // Report Size (16) 46
+ * 0x95, 0x02, // Report Count (2) 48
+ * 0x81, 0x06, // Input (Data,Var,Rel) 50
+ * 0x09, 0x38, // Usage (Wheel) 52
+ * 0x15, 0x81, // Logical Minimum (-127) 54
+ * 0x25, 0x7f, // Logical Maximum (127) 56
+ * 0x75, 0x08, // Report Size (8) 58
+ * 0x95, 0x01, // Report Count (1) 60
+ * 0x81, 0x06, // Input (Data,Var,Rel) 62
+ * 0xc0, // End Collection 64
+ * 0xc0, // End Collection 65
+ * 0x05, 0x0c, // Usage Page (Consumer Devices) 66
+ * 0x09, 0x01, // Usage (Consumer Control) 68
+ * 0xa1, 0x01, // Collection (Application) 70
+ * 0x85, 0x02, // Report ID (2) 72
+ * 0x75, 0x10, // Report Size (16) 74
+ * 0x95, 0x01, // Report Count (1) 76
+ * 0x15, 0x01, // Logical Minimum (1) 78
+ * 0x26, 0x8c, 0x02, // Logical Maximum (652) 80
+ * 0x19, 0x01, // Usage Minimum (1) 83
+ * 0x2a, 0x8c, 0x02, // Usage Maximum (652) 85
+ * 0x81, 0x00, // Input (Data,Arr,Abs) 88
+ * 0xc0, // End Collection 90
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 91
+ * 0x09, 0x80, // Usage (System Control) 93
+ * 0xa1, 0x01, // Collection (Application) 95
+ * 0x85, 0x03, // Report ID (3) 97
+ * 0x09, 0x82, // Usage (System Sleep) 99
+ * 0x09, 0x81, // Usage (System Power Down) 101
+ * 0x09, 0x83, // Usage (System Wake Up) 103
+ * 0x15, 0x00, // Logical Minimum (0) 105
+ * 0x25, 0x01, // Logical Maximum (1) 107
+ * 0x19, 0x01, // Usage Minimum (1) 109
+ * 0x29, 0x03, // Usage Maximum (3) 111
+ * 0x75, 0x01, // Report Size (1) 113
+ * 0x95, 0x03, // Report Count (3) 115
+ * 0x81, 0x02, // Input (Data,Var,Abs) 117
+ * 0x95, 0x05, // Report Count (5) 119
+ * 0x81, 0x01, // Input (Cnst,Arr,Abs) 121
+ * 0xc0, // End Collection 123
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 124
+ * 0x09, 0x00, // Usage (Undefined) 126
+ * 0xa1, 0x01, // Collection (Application) 128
+ * 0x85, 0x05, // Report ID (5) 130
+ * 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 1) 132
+ * 0x09, 0x01, // Usage (Vendor Usage 1) 135
+ * 0x15, 0x81, // Logical Minimum (-127) 137
+ * 0x25, 0x7f, // Logical Maximum (127) 139
+ * 0x75, 0x08, // Report Size (8) 141
+ * 0x95, 0x07, // Report Count (7) 143
+ * 0xb1, 0x02, // Feature (Data,Var,Abs) 145
+ * 0xc0, // End Collection 147
+ * 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 1) 148
+ * 0x09, 0x0e, // Usage (Vendor Usage 0x0e) 151
+ * 0xa1, 0x01, // Collection (Application) 153
+ * 0x85, 0xba, // Report ID (186) 155
+ * 0x95, 0x1f, // Report Count (31) 157
+ * 0x75, 0x08, // Report Size (8) 159
+ * 0x26, 0xff, 0x00, // Logical Maximum (255) 161
+ * 0x15, 0x00, // Logical Minimum (0) 164
+ * 0x09, 0x01, // Usage (Vendor Usage 1) 166
+ * 0x91, 0x02, // Output (Data,Var,Abs) 168
+ * 0x85, 0xba, // Report ID (186) 170
+ * 0x95, 0x1f, // Report Count (31) 172
+ * 0x75, 0x08, // Report Size (8) 174
+ * 0x26, 0xff, 0x00, // Logical Maximum (255) 176
+ * 0x15, 0x00, // Logical Minimum (0) 179
+ * 0x09, 0x01, // Usage (Vendor Usage 1) 181
+ * 0x81, 0x02, // Input (Data,Var,Abs) 183
+ * 0xc0, // End Collection 185
+ */
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(hid_rdesc_fixup_rapoo_m50, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0, HID_MAX_DESCRIPTOR_SIZE);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (data[17] == 0x03)
+ data[17] = 0x05;
+
+ return 0;
+}
+
+HID_BPF_OPS(rapoo_m50) = {
+ .hid_rdesc_fixup = (void *)hid_rdesc_fixup_rapoo_m50,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ ctx->retval = ctx->rdesc_size != RDESC_SIZE;
+ if (ctx->retval)
+ ctx->retval = -EINVAL;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/hid_report_helpers.h b/drivers/hid/bpf/progs/hid_report_helpers.h
index 0aa1df438eeb..9b2a48e4a311 100644
--- a/drivers/hid/bpf/progs/hid_report_helpers.h
+++ b/drivers/hid/bpf/progs/hid_report_helpers.h
@@ -52,7 +52,8 @@
* Usage_GD_Keyboard
* CollectionApplication( ← Open the collection
* ReportId(3)
- * LogicalRange_i8(0, 1)
+ * LogicalMinimum_i8(0)
+ * LogicalMaximum_i8(1)
* // other fields
* ) ← End EndCollection
*
@@ -74,26 +75,43 @@
#define Arr 0x0
#define Abs 0x0
#define Rel 0x4
+#define Null 0x40
+#define Buff 0x0100
/* Use like this: Input(Var|Abs) */
#define Input(i_) 0x081, i8(i_),
#define Output(i_) 0x091, i8(i_),
#define Feature(i_) 0x0b1, i8(i_),
+#define Input_i16(i_) 0x082, LE16(i_),
+#define Output_i16(i_) 0x092, LE16(i_),
+#define Feature_i16(i_) 0x0b2, LE16(i_),
+
#define ReportId(id_) 0x85, i8(id_),
#define ReportSize(sz_) 0x75, i8(sz_),
#define ReportCount(cnt_) 0x95, i8(cnt_),
-#define LogicalRange_i8(min_, max_) 0x15, i8(min_), 0x25, i8(max_),
-#define LogicalRange_i16(min_, max_) 0x16, LE16(min_), 0x26, LE16(max_),
-#define LogicalRange_i32(min_, max_) 0x17, LE32(min_), 0x27, LE32(max_),
+#define LogicalMinimum_i8(min_) 0x15, i8(min_),
+#define LogicalMinimum_i16(min_) 0x16, LE16(min_),
+#define LogicalMinimum_i32(min_) 0x17, LE32(min_),
+
+#define LogicalMaximum_i8(max_) 0x25, i8(max_),
+#define LogicalMaximum_i16(max_) 0x26, LE16(max_),
+#define LogicalMaximum_i32(max_) 0x27, LE32(max_),
+
+#define PhysicalMinimum_i8(min_) 0x35, i8(min_),
+#define PhysicalMinimum_i16(min_) 0x36, LE16(min_),
+#define PhysicalMinimum_i32(min_) 0x37, LE32(min_),
+
+#define PhysicalMaximum_i8(max_) 0x45, i8(max_),
+#define PhysicalMaximum_i16(max_) 0x46, LE16(max_),
+#define PhysicalMaximum_i32(max_) 0x47, LE32(max_),
-#define PhysicalRange_i8(min_, max_) 0x35, i8(min_), 0x45, i8(max_),
-#define PhysicalRange_i16(min_, max_) 0x36, LE16(min_), 0x46, LE16(max_),
-#define PhysicalRange_i32(min_, max_) 0x37, LE32(min_), 0x47, LE32(max_),
+#define UsageMinimum_i8(min_) 0x19, i8(min_),
+#define UsageMinimum_i16(min_) 0x1a, LE16(min_),
-#define UsageRange_i8(min_, max_) 0x19, i8(min_), 0x29, i8(max_),
-#define UsageRange_i16(min_, max_) 0x1a, LE16(min_), 0x2a, LE16(max_),
+#define UsageMaximum_i8(max_) 0x29, i8(max_),
+#define UsageMaximum_i16(max_) 0x2a, LE16(max_),
#define UsagePage_i8(p_) 0x05, i8(p_),
#define UsagePage_i16(p_) 0x06, LE16(p_),
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index a4b47319ad8e..46e3e42f9eb5 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -432,6 +432,26 @@ static int asus_kbd_get_functions(struct hid_device *hdev,
return ret;
}
+static int asus_kbd_disable_oobe(struct hid_device *hdev)
+{
+ const u8 init[][6] = {
+ { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 },
+ { FEATURE_KBD_REPORT_ID, 0xBA, 0xC5, 0xC4 },
+ { FEATURE_KBD_REPORT_ID, 0xD0, 0x8F, 0x01 },
+ { FEATURE_KBD_REPORT_ID, 0xD0, 0x85, 0xFF }
+ };
+ int ret;
+
+ for (size_t i = 0; i < ARRAY_SIZE(init); i++) {
+ ret = asus_kbd_set_report(hdev, init[i], sizeof(init[i]));
+ if (ret < 0)
+ return ret;
+ }
+
+ hid_info(hdev, "Disabled OOBE for keyboard\n");
+ return 0;
+}
+
static void asus_schedule_work(struct asus_kbd_leds *led)
{
unsigned long flags;
@@ -534,6 +554,12 @@ static int asus_kbd_register_leds(struct hid_device *hdev)
ret = asus_kbd_init(hdev, FEATURE_KBD_LED_REPORT_ID2);
if (ret < 0)
return ret;
+
+ if (dmi_match(DMI_PRODUCT_FAMILY, "ProArt P16")) {
+ ret = asus_kbd_disable_oobe(hdev);
+ if (ret < 0)
+ return ret;
+ }
} else {
/* Initialize keyboard */
ret = asus_kbd_init(hdev, FEATURE_KBD_REPORT_ID);
@@ -1183,7 +1209,7 @@ static const __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if (drvdata->quirks & QUIRK_G752_KEYBOARD &&
*rsize == 75 && rdesc[61] == 0x15 && rdesc[62] == 0x00) {
- /* report is missing usage mninum and maximum */
+ /* report is missing usage minimum and maximum */
__u8 *new_rdesc;
size_t new_size = *rsize + sizeof(asus_g752_fixed_rdesc);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 612ee6ddfc8d..4497b50799db 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -46,6 +46,34 @@ module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600
MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
/*
+ * Convert a signed n-bit integer to signed 32-bit integer.
+ */
+
+static s32 snto32(__u32 value, unsigned int n)
+{
+ if (!value || !n)
+ return 0;
+
+ if (n > 32)
+ n = 32;
+
+ return sign_extend32(value, n - 1);
+}
+
+/*
+ * Convert a signed 32-bit integer to a signed n-bit integer.
+ */
+
+static u32 s32ton(__s32 value, unsigned int n)
+{
+ s32 a = value >> (n - 1);
+
+ if (a && a != -1)
+ return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
+ return value & ((1 << n) - 1);
+}
+
+/*
* Register a new report for a device.
*/
@@ -425,7 +453,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
* both this and the standard encoding. */
raw_value = item_sdata(item);
if (!(raw_value & 0xfffffff0))
- parser->global.unit_exponent = hid_snto32(raw_value, 4);
+ parser->global.unit_exponent = snto32(raw_value, 4);
else
parser->global.unit_exponent = raw_value;
return 0;
@@ -685,7 +713,14 @@ static void hid_close_report(struct hid_device *device)
INIT_LIST_HEAD(&report_enum->report_list);
}
- kfree(device->rdesc);
+ /*
+ * If the HID driver had a rdesc_fixup() callback, dev->rdesc
+ * will be allocated by hid-core and needs to be freed.
+ * Otherwise, it is either equal to dev_rdesc or bpf_rdesc, in
+ * which cases it'll be freed later on device removal or destroy.
+ */
+ if (device->rdesc != device->dev_rdesc && device->rdesc != device->bpf_rdesc)
+ kfree(device->rdesc);
device->rdesc = NULL;
device->rsize = 0;
@@ -698,6 +733,14 @@ static void hid_close_report(struct hid_device *device)
device->status &= ~HID_STAT_PARSED;
}
+static inline void hid_free_bpf_rdesc(struct hid_device *hdev)
+{
+ /* bpf_rdesc is either equal to dev_rdesc or allocated by call_hid_bpf_rdesc_fixup() */
+ if (hdev->bpf_rdesc != hdev->dev_rdesc)
+ kfree(hdev->bpf_rdesc);
+ hdev->bpf_rdesc = NULL;
+}
+
/*
* Free a device structure, all reports, and all fields.
*/
@@ -707,6 +750,7 @@ void hiddev_free(struct kref *ref)
struct hid_device *hid = container_of(ref, struct hid_device, ref);
hid_close_report(hid);
+ hid_free_bpf_rdesc(hid);
kfree(hid->dev_rdesc);
kfree(hid);
}
@@ -754,35 +798,29 @@ static const u8 *fetch_item(const __u8 *start, const __u8 *end, struct hid_item
}
item->format = HID_ITEM_FORMAT_SHORT;
- item->size = b & 3;
+ item->size = BIT(b & 3) >> 1; /* 0, 1, 2, 3 -> 0, 1, 2, 4 */
+
+ if (end - start < item->size)
+ return NULL;
switch (item->size) {
case 0:
- return start;
+ break;
case 1:
- if ((end - start) < 1)
- return NULL;
- item->data.u8 = *start++;
- return start;
+ item->data.u8 = *start;
+ break;
case 2:
- if ((end - start) < 2)
- return NULL;
item->data.u16 = get_unaligned_le16(start);
- start = (__u8 *)((__le16 *)start + 1);
- return start;
+ break;
- case 3:
- item->size++;
- if ((end - start) < 4)
- return NULL;
+ case 4:
item->data.u32 = get_unaligned_le32(start);
- start = (__u8 *)((__le32 *)start + 1);
- return start;
+ break;
}
- return NULL;
+ return start + item->size;
}
static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
@@ -1125,6 +1163,8 @@ static void hid_apply_multiplier(struct hid_device *hid,
while (multiplier_collection->parent_idx != -1 &&
multiplier_collection->type != HID_COLLECTION_LOGICAL)
multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
+ if (multiplier_collection->type != HID_COLLECTION_LOGICAL)
+ multiplier_collection = NULL;
effective_multiplier = hid_calculate_multiplier(hid, multiplier);
@@ -1205,7 +1245,6 @@ int hid_open_report(struct hid_device *device)
struct hid_item item;
unsigned int size;
const __u8 *start;
- __u8 *buf;
const __u8 *end;
const __u8 *next;
int ret;
@@ -1221,25 +1260,34 @@ int hid_open_report(struct hid_device *device)
if (WARN_ON(device->status & HID_STAT_PARSED))
return -EBUSY;
- start = device->dev_rdesc;
+ start = device->bpf_rdesc;
if (WARN_ON(!start))
return -ENODEV;
- size = device->dev_rsize;
+ size = device->bpf_rsize;
- /* call_hid_bpf_rdesc_fixup() ensures we work on a copy of rdesc */
- buf = call_hid_bpf_rdesc_fixup(device, start, &size);
- if (buf == NULL)
- return -ENOMEM;
+ if (device->driver->report_fixup) {
+ /*
+ * device->driver->report_fixup() needs to work
+ * on a copy of our report descriptor so it can
+ * change it.
+ */
+ __u8 *buf = kmemdup(start, size, GFP_KERNEL);
+
+ if (buf == NULL)
+ return -ENOMEM;
- if (device->driver->report_fixup)
start = device->driver->report_fixup(device, buf, &size);
- else
- start = buf;
- start = kmemdup(start, size, GFP_KERNEL);
- kfree(buf);
- if (start == NULL)
- return -ENOMEM;
+ /*
+ * The second kmemdup is required in case report_fixup() returns
+ * a static read-only memory, but we have no idea if that memory
+ * needs to be cleaned up or not at the end.
+ */
+ start = kmemdup(start, size, GFP_KERNEL);
+ kfree(buf);
+ if (start == NULL)
+ return -ENOMEM;
+ }
device->rdesc = start;
device->rsize = size;
@@ -1316,46 +1364,6 @@ alloc_err:
EXPORT_SYMBOL_GPL(hid_open_report);
/*
- * Convert a signed n-bit integer to signed 32-bit integer. Common
- * cases are done through the compiler, the screwed things has to be
- * done by hand.
- */
-
-static s32 snto32(__u32 value, unsigned n)
-{
- if (!value || !n)
- return 0;
-
- if (n > 32)
- n = 32;
-
- switch (n) {
- case 8: return ((__s8)value);
- case 16: return ((__s16)value);
- case 32: return ((__s32)value);
- }
- return value & (1 << (n - 1)) ? value | (~0U << n) : value;
-}
-
-s32 hid_snto32(__u32 value, unsigned n)
-{
- return snto32(value, n);
-}
-EXPORT_SYMBOL_GPL(hid_snto32);
-
-/*
- * Convert a signed 32-bit integer to a signed n-bit integer.
- */
-
-static u32 s32ton(__s32 value, unsigned n)
-{
- s32 a = value >> (n - 1);
- if (a && a != -1)
- return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
- return value & ((1 << n) - 1);
-}
-
-/*
* Extract/implement a data field from/to a little endian report (bit array).
*
* Code sort-of follows HID spec:
@@ -1875,7 +1883,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
u32 len = hid_report_len(report) + 7;
- return kmalloc(len, flags);
+ return kzalloc(len, flags);
}
EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
@@ -2168,9 +2176,9 @@ static bool hid_hiddev(struct hid_device *hdev)
static ssize_t
-read_report_descriptor(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+report_descriptor_read(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct hid_device *hdev = to_hid_device(dev);
@@ -2187,24 +2195,17 @@ read_report_descriptor(struct file *filp, struct kobject *kobj,
}
static ssize_t
-show_country(struct device *dev, struct device_attribute *attr,
- char *buf)
+country_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct hid_device *hdev = to_hid_device(dev);
return sprintf(buf, "%02x\n", hdev->country & 0xff);
}
-static struct bin_attribute dev_bin_attr_report_desc = {
- .attr = { .name = "report_descriptor", .mode = 0444 },
- .read = read_report_descriptor,
- .size = HID_MAX_DESCRIPTOR_SIZE,
-};
+static const BIN_ATTR_RO(report_descriptor, HID_MAX_DESCRIPTOR_SIZE);
-static const struct device_attribute dev_attr_country = {
- .attr = { .name = "country", .mode = 0444 },
- .show = show_country,
-};
+static const DEVICE_ATTR_RO(country);
int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
{
@@ -2674,9 +2675,10 @@ static bool hid_check_device_match(struct hid_device *hdev,
/*
* hid-generic implements .match(), so we must be dealing with a
* different HID driver here, and can simply check if
- * hid_ignore_special_drivers is set or not.
+ * hid_ignore_special_drivers or HID_QUIRK_IGNORE_SPECIAL_DRIVER
+ * are set or not.
*/
- return !hid_ignore_special_drivers;
+ return !hid_ignore_special_drivers && !(hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER);
}
static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
@@ -2684,6 +2686,18 @@ static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
const struct hid_device_id *id;
int ret;
+ if (!hdev->bpf_rsize) {
+ /* in case a bpf program gets detached, we need to free the old one */
+ hid_free_bpf_rdesc(hdev);
+
+ /* keep this around so we know we called it once */
+ hdev->bpf_rsize = hdev->dev_rsize;
+
+ /* call_hid_bpf_rdesc_fixup will always return a valid pointer */
+ hdev->bpf_rdesc = call_hid_bpf_rdesc_fixup(hdev, hdev->dev_rdesc,
+ &hdev->bpf_rsize);
+ }
+
if (!hid_check_device_match(hdev, hdrv, &id))
return -ENODEV;
@@ -2781,13 +2795,13 @@ static struct attribute *hid_dev_attrs[] = {
&dev_attr_modalias.attr,
NULL,
};
-static struct bin_attribute *hid_dev_bin_attrs[] = {
- &dev_bin_attr_report_desc,
+static const struct bin_attribute *hid_dev_bin_attrs[] = {
+ &bin_attr_report_descriptor,
NULL
};
static const struct attribute_group hid_dev_group = {
.attrs = hid_dev_attrs,
- .bin_attrs = hid_dev_bin_attrs,
+ .bin_attrs_new = hid_dev_bin_attrs,
};
__ATTRIBUTE_GROUPS(hid_dev);
@@ -2940,9 +2954,11 @@ static void hid_remove_device(struct hid_device *hdev)
hid_debug_unregister(hdev);
hdev->status &= ~HID_STAT_ADDED;
}
+ hid_free_bpf_rdesc(hdev);
kfree(hdev->dev_rdesc);
hdev->dev_rdesc = NULL;
hdev->dev_rsize = 0;
+ hdev->bpf_rsize = 0;
}
/**
@@ -3043,7 +3059,7 @@ int hid_check_keys_pressed(struct hid_device *hid)
EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
#ifdef CONFIG_HID_BPF
-static struct hid_ops __hid_ops = {
+static const struct hid_ops __hid_ops = {
.hid_get_report = hid_get_report,
.hid_hw_raw_request = __hid_hw_raw_request,
.hid_hw_output_report = __hid_hw_output_report,
diff --git a/drivers/hid/hid-corsair-void.c b/drivers/hid/hid-corsair-void.c
new file mode 100644
index 000000000000..6ece56b850fc
--- /dev/null
+++ b/drivers/hid/hid-corsair-void.c
@@ -0,0 +1,829 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * HID driver for Corsair Void headsets
+ *
+ * Copyright (C) 2023-2024 Stuart Hayhurst
+ */
+
+/* -------------------------------------------------------------------------- */
+/* Receiver report information: (ID 100) */
+/* -------------------------------------------------------------------------- */
+/*
+ * When queried, the receiver reponds with 5 bytes to describe the battery
+ * The power button, mute button and moving the mic also trigger this report
+ * This includes power button + mic + connection + battery status and capacity
+ * The information below may not be perfect, it's been gathered through guesses
+ *
+ * 0: REPORT ID
+ * 100 for the battery packet
+ *
+ * 1: POWER BUTTON + (?)
+ * Largest bit is 1 when power button pressed
+ *
+ * 2: BATTERY CAPACITY + MIC STATUS
+ * Battery capacity:
+ * Seems to report ~54 higher than reality when charging
+ * Capped at 100, charging or not
+ * Microphone status:
+ * Largest bit is set to 1 when the mic is physically up
+ * No bits change when the mic is muted, only when physically moved
+ * This report is sent every time the mic is moved, no polling required
+ *
+ * 3: CONNECTION STATUS
+ * 16: Wired headset
+ * 38: Initialising
+ * 49: Lost connection
+ * 51: Disconnected, searching
+ * 52: Disconnected, not searching
+ * 177: Normal
+ *
+ * 4: BATTERY STATUS
+ * 0: Disconnected
+ * 1: Normal
+ * 2: Low
+ * 3: Critical - sent during shutdown
+ * 4: Fully charged
+ * 5: Charging
+ */
+/* -------------------------------------------------------------------------- */
+
+/* -------------------------------------------------------------------------- */
+/* Receiver report information: (ID 102) */
+/* -------------------------------------------------------------------------- */
+/*
+ * When queried, the recevier responds with 4 bytes to describe the firmware
+ * The first 2 bytes are for the receiver, the second 2 are the headset
+ * The headset firmware version will be 0 if no headset is connected
+ *
+ * 0: Recevier firmware major version
+ * Major version of the receiver's firmware
+ *
+ * 1: Recevier firmware minor version
+ * Minor version of the receiver's firmware
+ *
+ * 2: Headset firmware major version
+ * Major version of the headset's firmware
+ *
+ * 3: Headset firmware minor version
+ * Minor version of the headset's firmware
+ */
+/* -------------------------------------------------------------------------- */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
+#include <linux/usb.h>
+#include <linux/workqueue.h>
+#include <asm/byteorder.h>
+
+#include "hid-ids.h"
+
+#define CORSAIR_VOID_DEVICE(id, type) { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, (id)), \
+ .driver_data = (type) }
+#define CORSAIR_VOID_WIRELESS_DEVICE(id) CORSAIR_VOID_DEVICE((id), CORSAIR_VOID_WIRELESS)
+#define CORSAIR_VOID_WIRED_DEVICE(id) CORSAIR_VOID_DEVICE((id), CORSAIR_VOID_WIRED)
+
+#define CORSAIR_VOID_STATUS_REQUEST_ID 0xC9
+#define CORSAIR_VOID_NOTIF_REQUEST_ID 0xCA
+#define CORSAIR_VOID_SIDETONE_REQUEST_ID 0xFF
+#define CORSAIR_VOID_STATUS_REPORT_ID 0x64
+#define CORSAIR_VOID_FIRMWARE_REPORT_ID 0x66
+
+#define CORSAIR_VOID_USB_SIDETONE_REQUEST 0x1
+#define CORSAIR_VOID_USB_SIDETONE_REQUEST_TYPE 0x21
+#define CORSAIR_VOID_USB_SIDETONE_VALUE 0x200
+#define CORSAIR_VOID_USB_SIDETONE_INDEX 0xB00
+
+#define CORSAIR_VOID_MIC_MASK GENMASK(7, 7)
+#define CORSAIR_VOID_CAPACITY_MASK GENMASK(6, 0)
+
+#define CORSAIR_VOID_WIRELESS_CONNECTED 177
+
+#define CORSAIR_VOID_SIDETONE_MAX_WIRELESS 55
+#define CORSAIR_VOID_SIDETONE_MAX_WIRED 4096
+
+enum {
+ CORSAIR_VOID_WIRELESS,
+ CORSAIR_VOID_WIRED,
+};
+
+enum {
+ CORSAIR_VOID_BATTERY_NORMAL = 1,
+ CORSAIR_VOID_BATTERY_LOW = 2,
+ CORSAIR_VOID_BATTERY_CRITICAL = 3,
+ CORSAIR_VOID_BATTERY_CHARGED = 4,
+ CORSAIR_VOID_BATTERY_CHARGING = 5,
+};
+
+static enum power_supply_property corsair_void_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_SCOPE,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+struct corsair_void_battery_data {
+ int status;
+ bool present;
+ int capacity;
+ int capacity_level;
+};
+
+struct corsair_void_drvdata {
+ struct hid_device *hid_dev;
+ struct device *dev;
+
+ char *name;
+ bool is_wired;
+ unsigned int sidetone_max;
+
+ struct corsair_void_battery_data battery_data;
+ bool mic_up;
+ bool connected;
+ int fw_receiver_major;
+ int fw_receiver_minor;
+ int fw_headset_major;
+ int fw_headset_minor;
+
+ struct power_supply *battery;
+ struct power_supply_desc battery_desc;
+ struct mutex battery_mutex;
+
+ struct delayed_work delayed_status_work;
+ struct delayed_work delayed_firmware_work;
+ struct work_struct battery_remove_work;
+ struct work_struct battery_add_work;
+};
+
+/*
+ * Functions to process receiver data
+*/
+
+static void corsair_void_set_wireless_status(struct corsair_void_drvdata *drvdata)
+{
+ struct usb_interface *usb_if = to_usb_interface(drvdata->dev->parent);
+
+ if (drvdata->is_wired)
+ return;
+
+ usb_set_wireless_status(usb_if, drvdata->connected ?
+ USB_WIRELESS_STATUS_CONNECTED :
+ USB_WIRELESS_STATUS_DISCONNECTED);
+}
+
+static void corsair_void_set_unknown_batt(struct corsair_void_drvdata *drvdata)
+{
+ struct corsair_void_battery_data *battery_data = &drvdata->battery_data;
+
+ battery_data->status = POWER_SUPPLY_STATUS_UNKNOWN;
+ battery_data->present = false;
+ battery_data->capacity = 0;
+ battery_data->capacity_level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+}
+
+/* Reset data that may change between wireless connections */
+static void corsair_void_set_unknown_wireless_data(struct corsair_void_drvdata *drvdata)
+{
+ /* Only 0 out headset, receiver is always known if relevant */
+ drvdata->fw_headset_major = 0;
+ drvdata->fw_headset_minor = 0;
+
+ drvdata->connected = false;
+ drvdata->mic_up = false;
+
+ corsair_void_set_wireless_status(drvdata);
+}
+
+static void corsair_void_process_receiver(struct corsair_void_drvdata *drvdata,
+ int raw_battery_capacity,
+ int raw_connection_status,
+ int raw_battery_status)
+{
+ struct corsair_void_battery_data *battery_data = &drvdata->battery_data;
+ struct corsair_void_battery_data orig_battery_data;
+
+ /* Save initial battery data, to compare later */
+ orig_battery_data = *battery_data;
+
+ /* Headset not connected, or it's wired */
+ if (raw_connection_status != CORSAIR_VOID_WIRELESS_CONNECTED)
+ goto unknown_battery;
+
+ /* Battery information unavailable */
+ if (raw_battery_status == 0)
+ goto unknown_battery;
+
+ /* Battery must be connected then */
+ battery_data->present = true;
+ battery_data->capacity_level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+
+ /* Set battery status */
+ switch (raw_battery_status) {
+ case CORSAIR_VOID_BATTERY_NORMAL:
+ case CORSAIR_VOID_BATTERY_LOW:
+ case CORSAIR_VOID_BATTERY_CRITICAL:
+ battery_data->status = POWER_SUPPLY_STATUS_DISCHARGING;
+ if (raw_battery_status == CORSAIR_VOID_BATTERY_LOW)
+ battery_data->capacity_level = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else if (raw_battery_status == CORSAIR_VOID_BATTERY_CRITICAL)
+ battery_data->capacity_level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+
+ break;
+ case CORSAIR_VOID_BATTERY_CHARGED:
+ battery_data->status = POWER_SUPPLY_STATUS_FULL;
+ break;
+ case CORSAIR_VOID_BATTERY_CHARGING:
+ battery_data->status = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ default:
+ hid_warn(drvdata->hid_dev, "unknown battery status '%d'",
+ raw_battery_status);
+ goto unknown_battery;
+ break;
+ }
+
+ battery_data->capacity = raw_battery_capacity;
+ corsair_void_set_wireless_status(drvdata);
+
+ goto success;
+unknown_battery:
+ corsair_void_set_unknown_batt(drvdata);
+success:
+
+ /* Inform power supply if battery values changed */
+ if (memcmp(&orig_battery_data, battery_data, sizeof(*battery_data))) {
+ scoped_guard(mutex, &drvdata->battery_mutex) {
+ if (drvdata->battery) {
+ power_supply_changed(drvdata->battery);
+ }
+ }
+ }
+}
+
+/*
+ * Functions to report stored data
+*/
+
+static int corsair_void_battery_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct corsair_void_drvdata *drvdata = power_supply_get_drvdata(psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ if (!strncmp(drvdata->hid_dev->name, "Corsair ", 8))
+ val->strval = drvdata->hid_dev->name + 8;
+ else
+ val->strval = drvdata->hid_dev->name;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = "Corsair";
+ break;
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = drvdata->battery_data.status;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = drvdata->battery_data.present;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = drvdata->battery_data.capacity;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ val->intval = drvdata->battery_data.capacity_level;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t microphone_up_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct corsair_void_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (!drvdata->connected)
+ return -ENODEV;
+
+ return sysfs_emit(buf, "%d\n", drvdata->mic_up);
+}
+
+static ssize_t fw_version_receiver_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct corsair_void_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata->fw_receiver_major == 0 && drvdata->fw_receiver_minor == 0)
+ return -ENODATA;
+
+ return sysfs_emit(buf, "%d.%02d\n", drvdata->fw_receiver_major,
+ drvdata->fw_receiver_minor);
+}
+
+
+static ssize_t fw_version_headset_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct corsair_void_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata->fw_headset_major == 0 && drvdata->fw_headset_minor == 0)
+ return -ENODATA;
+
+ return sysfs_emit(buf, "%d.%02d\n", drvdata->fw_headset_major,
+ drvdata->fw_headset_minor);
+}
+
+static ssize_t sidetone_max_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct corsair_void_drvdata *drvdata = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", drvdata->sidetone_max);
+}
+
+/*
+ * Functions to send data to headset
+*/
+
+static ssize_t send_alert_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct corsair_void_drvdata *drvdata = dev_get_drvdata(dev);
+ struct hid_device *hid_dev = drvdata->hid_dev;
+ unsigned char alert_id;
+ unsigned char *send_buf __free(kfree) = NULL;
+ int ret;
+
+ if (!drvdata->connected || drvdata->is_wired)
+ return -ENODEV;
+
+ /* Only accept 0 or 1 for alert ID */
+ if (kstrtou8(buf, 10, &alert_id) || alert_id >= 2)
+ return -EINVAL;
+
+ send_buf = kmalloc(3, GFP_KERNEL);
+ if (!send_buf)
+ return -ENOMEM;
+
+ /* Packet format to send alert with ID alert_id */
+ send_buf[0] = CORSAIR_VOID_NOTIF_REQUEST_ID;
+ send_buf[1] = 0x02;
+ send_buf[2] = alert_id;
+
+ ret = hid_hw_raw_request(hid_dev, CORSAIR_VOID_NOTIF_REQUEST_ID,
+ send_buf, 3, HID_OUTPUT_REPORT,
+ HID_REQ_SET_REPORT);
+ if (ret < 0)
+ hid_warn(hid_dev, "failed to send alert request (reason: %d)",
+ ret);
+ else
+ ret = count;
+
+ return ret;
+}
+
+static int corsair_void_set_sidetone_wired(struct device *dev, const char *buf,
+ unsigned int sidetone)
+{
+ struct usb_interface *usb_if = to_usb_interface(dev->parent);
+ struct usb_device *usb_dev = interface_to_usbdev(usb_if);
+
+ /* Packet format to set sidetone for wired headsets */
+ __le16 sidetone_le = cpu_to_le16(sidetone);
+
+ return usb_control_msg_send(usb_dev, 0,
+ CORSAIR_VOID_USB_SIDETONE_REQUEST,
+ CORSAIR_VOID_USB_SIDETONE_REQUEST_TYPE,
+ CORSAIR_VOID_USB_SIDETONE_VALUE,
+ CORSAIR_VOID_USB_SIDETONE_INDEX,
+ &sidetone_le, 2, USB_CTRL_SET_TIMEOUT,
+ GFP_KERNEL);
+}
+
+static int corsair_void_set_sidetone_wireless(struct device *dev,
+ const char *buf,
+ unsigned char sidetone)
+{
+ struct corsair_void_drvdata *drvdata = dev_get_drvdata(dev);
+ struct hid_device *hid_dev = drvdata->hid_dev;
+ unsigned char *send_buf __free(kfree) = NULL;
+
+ send_buf = kmalloc(12, GFP_KERNEL);
+ if (!send_buf)
+ return -ENOMEM;
+
+ /* Packet format to set sidetone for wireless headsets */
+ send_buf[0] = CORSAIR_VOID_SIDETONE_REQUEST_ID;
+ send_buf[1] = 0x0B;
+ send_buf[2] = 0x00;
+ send_buf[3] = 0xFF;
+ send_buf[4] = 0x04;
+ send_buf[5] = 0x0E;
+ send_buf[6] = 0xFF;
+ send_buf[7] = 0x05;
+ send_buf[8] = 0x01;
+ send_buf[9] = 0x04;
+ send_buf[10] = 0x00;
+ send_buf[11] = sidetone + 200;
+
+ return hid_hw_raw_request(hid_dev, CORSAIR_VOID_SIDETONE_REQUEST_ID,
+ send_buf, 12, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
+}
+
+static ssize_t set_sidetone_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct corsair_void_drvdata *drvdata = dev_get_drvdata(dev);
+ struct hid_device *hid_dev = drvdata->hid_dev;
+ unsigned int sidetone;
+ int ret;
+
+ if (!drvdata->connected)
+ return -ENODEV;
+
+ /* sidetone must be between 0 and drvdata->sidetone_max inclusive */
+ if (kstrtouint(buf, 10, &sidetone) || sidetone > drvdata->sidetone_max)
+ return -EINVAL;
+
+ if (drvdata->is_wired)
+ ret = corsair_void_set_sidetone_wired(dev, buf, sidetone);
+ else
+ ret = corsair_void_set_sidetone_wireless(dev, buf, sidetone);
+
+ if (ret < 0)
+ hid_warn(hid_dev, "failed to send sidetone (reason: %d)", ret);
+ else
+ ret = count;
+
+ return ret;
+}
+
+static int corsair_void_request_status(struct hid_device *hid_dev, int id)
+{
+ unsigned char *send_buf __free(kfree) = NULL;
+
+ send_buf = kmalloc(2, GFP_KERNEL);
+ if (!send_buf)
+ return -ENOMEM;
+
+ /* Packet format to request data item (status / firmware) refresh */
+ send_buf[0] = CORSAIR_VOID_STATUS_REQUEST_ID;
+ send_buf[1] = id;
+
+ /* Send request for data refresh */
+ return hid_hw_raw_request(hid_dev, CORSAIR_VOID_STATUS_REQUEST_ID,
+ send_buf, 2, HID_OUTPUT_REPORT,
+ HID_REQ_SET_REPORT);
+}
+
+/*
+ * Headset connect / disconnect handlers and work handlers
+*/
+
+static void corsair_void_status_work_handler(struct work_struct *work)
+{
+ struct corsair_void_drvdata *drvdata;
+ struct delayed_work *delayed_work;
+ int battery_ret;
+
+ delayed_work = container_of(work, struct delayed_work, work);
+ drvdata = container_of(delayed_work, struct corsair_void_drvdata,
+ delayed_status_work);
+
+ battery_ret = corsair_void_request_status(drvdata->hid_dev,
+ CORSAIR_VOID_STATUS_REPORT_ID);
+ if (battery_ret < 0) {
+ hid_warn(drvdata->hid_dev,
+ "failed to request battery (reason: %d)", battery_ret);
+ }
+}
+
+static void corsair_void_firmware_work_handler(struct work_struct *work)
+{
+ struct corsair_void_drvdata *drvdata;
+ struct delayed_work *delayed_work;
+ int firmware_ret;
+
+ delayed_work = container_of(work, struct delayed_work, work);
+ drvdata = container_of(delayed_work, struct corsair_void_drvdata,
+ delayed_firmware_work);
+
+ firmware_ret = corsair_void_request_status(drvdata->hid_dev,
+ CORSAIR_VOID_FIRMWARE_REPORT_ID);
+ if (firmware_ret < 0) {
+ hid_warn(drvdata->hid_dev,
+ "failed to request firmware (reason: %d)", firmware_ret);
+ }
+
+}
+
+static void corsair_void_battery_remove_work_handler(struct work_struct *work)
+{
+ struct corsair_void_drvdata *drvdata;
+
+ drvdata = container_of(work, struct corsair_void_drvdata,
+ battery_remove_work);
+ scoped_guard(mutex, &drvdata->battery_mutex) {
+ if (drvdata->battery) {
+ power_supply_unregister(drvdata->battery);
+ drvdata->battery = NULL;
+ }
+ }
+}
+
+static void corsair_void_battery_add_work_handler(struct work_struct *work)
+{
+ struct corsair_void_drvdata *drvdata;
+ struct power_supply_config psy_cfg;
+ struct power_supply *new_supply;
+
+ drvdata = container_of(work, struct corsair_void_drvdata,
+ battery_add_work);
+ guard(mutex)(&drvdata->battery_mutex);
+ if (drvdata->battery)
+ return;
+
+ psy_cfg.drv_data = drvdata;
+ new_supply = power_supply_register(drvdata->dev,
+ &drvdata->battery_desc,
+ &psy_cfg);
+
+ if (IS_ERR(new_supply)) {
+ hid_err(drvdata->hid_dev,
+ "failed to register battery '%s' (reason: %ld)\n",
+ drvdata->battery_desc.name,
+ PTR_ERR(new_supply));
+ return;
+ }
+
+ if (power_supply_powers(new_supply, drvdata->dev)) {
+ power_supply_unregister(new_supply);
+ return;
+ }
+
+ drvdata->battery = new_supply;
+}
+
+static void corsair_void_headset_connected(struct corsair_void_drvdata *drvdata)
+{
+ schedule_work(&drvdata->battery_add_work);
+ schedule_delayed_work(&drvdata->delayed_firmware_work,
+ msecs_to_jiffies(100));
+}
+
+static void corsair_void_headset_disconnected(struct corsair_void_drvdata *drvdata)
+{
+ schedule_work(&drvdata->battery_remove_work);
+
+ corsair_void_set_unknown_wireless_data(drvdata);
+ corsair_void_set_unknown_batt(drvdata);
+}
+
+/*
+ * Driver setup, probing and HID event handling
+*/
+
+static DEVICE_ATTR_RO(fw_version_receiver);
+static DEVICE_ATTR_RO(fw_version_headset);
+static DEVICE_ATTR_RO(microphone_up);
+static DEVICE_ATTR_RO(sidetone_max);
+
+static DEVICE_ATTR_WO(send_alert);
+static DEVICE_ATTR_WO(set_sidetone);
+
+static struct attribute *corsair_void_attrs[] = {
+ &dev_attr_fw_version_receiver.attr,
+ &dev_attr_fw_version_headset.attr,
+ &dev_attr_microphone_up.attr,
+ &dev_attr_send_alert.attr,
+ &dev_attr_set_sidetone.attr,
+ &dev_attr_sidetone_max.attr,
+ NULL,
+};
+
+static const struct attribute_group corsair_void_attr_group = {
+ .attrs = corsair_void_attrs,
+};
+
+static int corsair_void_probe(struct hid_device *hid_dev,
+ const struct hid_device_id *hid_id)
+{
+ int ret;
+ struct corsair_void_drvdata *drvdata;
+ char *name;
+
+ if (!hid_is_usb(hid_dev))
+ return -EINVAL;
+
+ drvdata = devm_kzalloc(&hid_dev->dev, sizeof(*drvdata),
+ GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ hid_set_drvdata(hid_dev, drvdata);
+ dev_set_drvdata(&hid_dev->dev, drvdata);
+
+ drvdata->dev = &hid_dev->dev;
+ drvdata->hid_dev = hid_dev;
+ drvdata->is_wired = hid_id->driver_data == CORSAIR_VOID_WIRED;
+
+ drvdata->sidetone_max = CORSAIR_VOID_SIDETONE_MAX_WIRELESS;
+ if (drvdata->is_wired)
+ drvdata->sidetone_max = CORSAIR_VOID_SIDETONE_MAX_WIRED;
+
+ /* Set initial values for no wireless headset attached */
+ /* If a headset is attached, it'll be prompted later */
+ corsair_void_set_unknown_wireless_data(drvdata);
+ corsair_void_set_unknown_batt(drvdata);
+
+ /* Receiver version won't be reset after init */
+ /* Headset version already set via set_unknown_wireless_data */
+ drvdata->fw_receiver_major = 0;
+ drvdata->fw_receiver_minor = 0;
+
+ ret = hid_parse(hid_dev);
+ if (ret) {
+ hid_err(hid_dev, "parse failed (reason: %d)\n", ret);
+ return ret;
+ }
+
+ name = devm_kasprintf(drvdata->dev, GFP_KERNEL,
+ "corsair-void-%d-battery", hid_dev->id);
+ if (!name)
+ return -ENOMEM;
+
+ drvdata->battery_desc.name = name;
+ drvdata->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY;
+ drvdata->battery_desc.properties = corsair_void_battery_props;
+ drvdata->battery_desc.num_properties = ARRAY_SIZE(corsair_void_battery_props);
+ drvdata->battery_desc.get_property = corsair_void_battery_get_property;
+
+ drvdata->battery = NULL;
+ INIT_WORK(&drvdata->battery_remove_work,
+ corsair_void_battery_remove_work_handler);
+ INIT_WORK(&drvdata->battery_add_work,
+ corsair_void_battery_add_work_handler);
+ ret = devm_mutex_init(drvdata->dev, &drvdata->battery_mutex);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&hid_dev->dev.kobj, &corsair_void_attr_group);
+ if (ret)
+ return ret;
+
+ /* Any failures after here will need to call hid_hw_stop */
+ ret = hid_hw_start(hid_dev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hid_dev, "hid_hw_start failed (reason: %d)\n", ret);
+ goto failed_after_sysfs;
+ }
+
+ /* Refresh battery data, in case wireless headset is already connected */
+ INIT_DELAYED_WORK(&drvdata->delayed_status_work,
+ corsair_void_status_work_handler);
+ schedule_delayed_work(&drvdata->delayed_status_work,
+ msecs_to_jiffies(100));
+
+ /* Refresh firmware versions */
+ INIT_DELAYED_WORK(&drvdata->delayed_firmware_work,
+ corsair_void_firmware_work_handler);
+ schedule_delayed_work(&drvdata->delayed_firmware_work,
+ msecs_to_jiffies(100));
+
+ return 0;
+
+failed_after_sysfs:
+ sysfs_remove_group(&hid_dev->dev.kobj, &corsair_void_attr_group);
+ return ret;
+}
+
+static void corsair_void_remove(struct hid_device *hid_dev)
+{
+ struct corsair_void_drvdata *drvdata = hid_get_drvdata(hid_dev);
+
+ hid_hw_stop(hid_dev);
+ cancel_work_sync(&drvdata->battery_remove_work);
+ cancel_work_sync(&drvdata->battery_add_work);
+ if (drvdata->battery)
+ power_supply_unregister(drvdata->battery);
+
+ cancel_delayed_work_sync(&drvdata->delayed_firmware_work);
+ sysfs_remove_group(&hid_dev->dev.kobj, &corsair_void_attr_group);
+}
+
+static int corsair_void_raw_event(struct hid_device *hid_dev,
+ struct hid_report *hid_report,
+ u8 *data, int size)
+{
+ struct corsair_void_drvdata *drvdata = hid_get_drvdata(hid_dev);
+ bool was_connected = drvdata->connected;
+
+ /* Description of packets are documented at the top of this file */
+ if (hid_report->id == CORSAIR_VOID_STATUS_REPORT_ID) {
+ drvdata->mic_up = FIELD_GET(CORSAIR_VOID_MIC_MASK, data[2]);
+ drvdata->connected = (data[3] == CORSAIR_VOID_WIRELESS_CONNECTED) ||
+ drvdata->is_wired;
+
+ corsair_void_process_receiver(drvdata,
+ FIELD_GET(CORSAIR_VOID_CAPACITY_MASK, data[2]),
+ data[3], data[4]);
+ } else if (hid_report->id == CORSAIR_VOID_FIRMWARE_REPORT_ID) {
+ drvdata->fw_receiver_major = data[1];
+ drvdata->fw_receiver_minor = data[2];
+ drvdata->fw_headset_major = data[3];
+ drvdata->fw_headset_minor = data[4];
+ }
+
+ /* Handle wireless headset connect / disconnect */
+ if ((was_connected != drvdata->connected) && !drvdata->is_wired) {
+ if (drvdata->connected)
+ corsair_void_headset_connected(drvdata);
+ else
+ corsair_void_headset_disconnected(drvdata);
+ }
+
+ return 0;
+}
+
+static const struct hid_device_id corsair_void_devices[] = {
+ /* Corsair Void Wireless */
+ CORSAIR_VOID_WIRELESS_DEVICE(0x0a0c),
+ CORSAIR_VOID_WIRELESS_DEVICE(0x0a2b),
+ CORSAIR_VOID_WIRELESS_DEVICE(0x1b23),
+ CORSAIR_VOID_WIRELESS_DEVICE(0x1b25),
+ CORSAIR_VOID_WIRELESS_DEVICE(0x1b27),
+
+ /* Corsair Void USB */
+ CORSAIR_VOID_WIRED_DEVICE(0x0a0f),
+ CORSAIR_VOID_WIRED_DEVICE(0x1b1c),
+ CORSAIR_VOID_WIRED_DEVICE(0x1b29),
+ CORSAIR_VOID_WIRED_DEVICE(0x1b2a),
+
+ /* Corsair Void Surround */
+ CORSAIR_VOID_WIRED_DEVICE(0x0a30),
+ CORSAIR_VOID_WIRED_DEVICE(0x0a31),
+
+ /* Corsair Void Pro Wireless */
+ CORSAIR_VOID_WIRELESS_DEVICE(0x0a14),
+ CORSAIR_VOID_WIRELESS_DEVICE(0x0a16),
+ CORSAIR_VOID_WIRELESS_DEVICE(0x0a1a),
+
+ /* Corsair Void Pro USB */
+ CORSAIR_VOID_WIRED_DEVICE(0x0a17),
+ CORSAIR_VOID_WIRED_DEVICE(0x0a1d),
+
+ /* Corsair Void Pro Surround */
+ CORSAIR_VOID_WIRED_DEVICE(0x0a18),
+ CORSAIR_VOID_WIRED_DEVICE(0x0a1e),
+ CORSAIR_VOID_WIRED_DEVICE(0x0a1f),
+
+ /* Corsair Void Elite Wireless */
+ CORSAIR_VOID_WIRELESS_DEVICE(0x0a51),
+ CORSAIR_VOID_WIRELESS_DEVICE(0x0a55),
+ CORSAIR_VOID_WIRELESS_DEVICE(0x0a75),
+
+ /* Corsair Void Elite USB */
+ CORSAIR_VOID_WIRED_DEVICE(0x0a52),
+ CORSAIR_VOID_WIRED_DEVICE(0x0a56),
+
+ /* Corsair Void Elite Surround */
+ CORSAIR_VOID_WIRED_DEVICE(0x0a53),
+ CORSAIR_VOID_WIRED_DEVICE(0x0a57),
+
+ {}
+};
+
+MODULE_DEVICE_TABLE(hid, corsair_void_devices);
+
+static struct hid_driver corsair_void_driver = {
+ .name = "hid-corsair-void",
+ .id_table = corsair_void_devices,
+ .probe = corsair_void_probe,
+ .remove = corsair_void_remove,
+ .raw_event = corsair_void_raw_event,
+};
+
+module_hid_driver(corsair_void_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stuart Hayhurst <stuart.a.hayhurst@gmail.com>");
+MODULE_DESCRIPTION("HID driver for Corsair Void headsets");
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index dae2b84a1490..f4c8d981aa0a 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -852,7 +852,8 @@ static int cp2112_set_usb_config(struct hid_device *hdev,
{
int ret;
- BUG_ON(cfg->report != CP2112_USB_CONFIG);
+ if (WARN_ON(cfg->report != CP2112_USB_CONFIG))
+ return -EINVAL;
ret = cp2112_hid_output(hdev, (u8 *)cfg, sizeof(*cfg),
HID_FEATURE_REPORT);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index d5abfe652fb5..541d682af15a 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -3309,9 +3309,9 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_EPG] = "EPG", [KEY_PVR] = "PVR",
[KEY_MHP] = "MHP", [KEY_LANGUAGE] = "Language",
[KEY_TITLE] = "Title", [KEY_SUBTITLE] = "Subtitle",
- [KEY_ANGLE] = "Angle", [KEY_ZOOM] = "Zoom",
+ [KEY_ANGLE] = "Angle",
[KEY_MODE] = "Mode", [KEY_KEYBOARD] = "Keyboard",
- [KEY_SCREEN] = "Screen", [KEY_PC] = "PC",
+ [KEY_PC] = "PC",
[KEY_TV] = "TV", [KEY_TV2] = "TV2",
[KEY_VCR] = "VCR", [KEY_VCR2] = "VCR2",
[KEY_SAT] = "Sat", [KEY_SAT2] = "Sat2",
@@ -3409,8 +3409,7 @@ static const char *keys[KEY_MAX + 1] = {
[BTN_TRIGGER_HAPPY35] = "TriggerHappy35", [BTN_TRIGGER_HAPPY36] = "TriggerHappy36",
[BTN_TRIGGER_HAPPY37] = "TriggerHappy37", [BTN_TRIGGER_HAPPY38] = "TriggerHappy38",
[BTN_TRIGGER_HAPPY39] = "TriggerHappy39", [BTN_TRIGGER_HAPPY40] = "TriggerHappy40",
- [BTN_DIGI] = "Digi", [BTN_STYLUS3] = "Stylus3",
- [BTN_TOOL_QUINTTAP] = "ToolQuintTap", [BTN_WHEEL] = "Wheel",
+ [BTN_STYLUS3] = "Stylus3", [BTN_TOOL_QUINTTAP] = "ToolQuintTap",
[KEY_10CHANNELSDOWN] = "10ChannelsDown",
[KEY_10CHANNELSUP] = "10ChannelsUp",
[KEY_3D_MODE] = "3DMode", [KEY_ADDRESSBOOK] = "Addressbook",
@@ -3440,7 +3439,7 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_FN_RIGHT_SHIFT] = "FnRightShift", [KEY_FRAMEBACK] = "FrameBack",
[KEY_FRAMEFORWARD] = "FrameForward", [KEY_FULL_SCREEN] = "FullScreen",
[KEY_GAMES] = "Games", [KEY_GRAPHICSEDITOR] = "GraphicsEditor",
- [KEY_HANGEUL] = "HanGeul", [KEY_HANGUP_PHONE] = "HangUpPhone",
+ [KEY_HANGUP_PHONE] = "HangUpPhone",
[KEY_IMAGES] = "Images", [KEY_KBD_LCD_MENU1] = "KbdLcdMenu1",
[KEY_KBD_LCD_MENU2] = "KbdLcdMenu2", [KEY_KBD_LCD_MENU3] = "KbdLcdMenu3",
[KEY_KBD_LCD_MENU4] = "KbdLcdMenu4", [KEY_KBD_LCD_MENU5] = "KbdLcdMenu5",
diff --git a/drivers/hid/hid-generic.c b/drivers/hid/hid-generic.c
index d2439399fb35..9e04c6d0fcc8 100644
--- a/drivers/hid/hid-generic.c
+++ b/drivers/hid/hid-generic.c
@@ -40,6 +40,9 @@ static bool hid_generic_match(struct hid_device *hdev,
if (ignore_special_driver)
return true;
+ if (hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER)
+ return true;
+
if (hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)
return false;
diff --git a/drivers/hid/hid-goodix-spi.c b/drivers/hid/hid-goodix-spi.c
index 0f87bf9c67cf..80c0288a3a38 100644
--- a/drivers/hid/hid-goodix-spi.c
+++ b/drivers/hid/hid-goodix-spi.c
@@ -19,6 +19,8 @@
#define GOODIX_HID_DESC_ADDR 0x1058C
#define GOODIX_HID_REPORT_DESC_ADDR 0x105AA
#define GOODIX_HID_SIGN_ADDR 0x10D32
+#define GOODIX_HID_CMD_ADDR 0x10364
+#define GOODIX_HID_REPORT_ADDR 0x22C8C
#define GOODIX_HID_GET_REPORT_CMD 0x02
#define GOODIX_HID_SET_REPORT_CMD 0x03
@@ -348,7 +350,7 @@ static int goodix_hid_check_ack_status(struct goodix_ts_data *ts, u32 *resp_len)
* - byte 0: Ack flag, value of 1 for data ready
* - bytes 1-2: Response data length
*/
- error = goodix_spi_read(ts, ts->hid_report_addr,
+ error = goodix_spi_read(ts, GOODIX_HID_CMD_ADDR,
&hdr, sizeof(hdr));
if (!error && (hdr.flag & GOODIX_HID_ACK_READY_FLAG)) {
len = le16_to_cpu(hdr.size);
@@ -356,7 +358,7 @@ static int goodix_hid_check_ack_status(struct goodix_ts_data *ts, u32 *resp_len)
dev_err(ts->dev, "hrd.size too short: %d", len);
return -EINVAL;
}
- *resp_len = len;
+ *resp_len = len - GOODIX_HID_PKG_LEN_SIZE;
return 0;
}
@@ -431,7 +433,7 @@ static int goodix_hid_get_raw_report(struct hid_device *hid,
tx_len += args_len;
/* Step1: write report request info */
- error = goodix_spi_write(ts, ts->hid_report_addr, tmp_buf, tx_len);
+ error = goodix_spi_write(ts, GOODIX_HID_CMD_ADDR, tmp_buf, tx_len);
if (error) {
dev_err(ts->dev, "failed send read feature cmd, %d", error);
return error;
@@ -446,9 +448,12 @@ static int goodix_hid_get_raw_report(struct hid_device *hid,
if (error)
return error;
- len = min(len, response_data_len - GOODIX_HID_PKG_LEN_SIZE);
+ /* Empty reprot response */
+ if (!response_data_len)
+ return 0;
+ len = min(len, response_data_len);
/* Step3: read response data(skip 2bytes of hid pkg length) */
- error = goodix_spi_read(ts, ts->hid_report_addr +
+ error = goodix_spi_read(ts, GOODIX_HID_CMD_ADDR +
GOODIX_HID_ACK_HEADER_SIZE +
GOODIX_HID_PKG_LEN_SIZE, buf, len);
if (error) {
@@ -518,7 +523,7 @@ static int goodix_hid_set_raw_report(struct hid_device *hid,
memcpy(tmp_buf + tx_len, buf, len);
tx_len += len;
- error = goodix_spi_write(ts, ts->hid_report_addr, tmp_buf, tx_len);
+ error = goodix_spi_write(ts, GOODIX_HID_CMD_ADDR, tmp_buf, tx_len);
if (error) {
dev_err(ts->dev, "failed send report: %*ph", tx_len, tmp_buf);
return error;
@@ -697,12 +702,7 @@ static int goodix_spi_probe(struct spi_device *spi)
return dev_err_probe(dev, PTR_ERR(ts->reset_gpio),
"failed to request reset gpio\n");
- error = device_property_read_u32(dev, "goodix,hid-report-addr",
- &ts->hid_report_addr);
- if (error)
- return dev_err_probe(dev, error,
- "failed get hid report addr\n");
-
+ ts->hid_report_addr = GOODIX_HID_REPORT_ADDR;
error = goodix_dev_confirm(ts);
if (error)
return error;
@@ -749,7 +749,7 @@ static int goodix_spi_set_power(struct goodix_ts_data *ts, int power_state)
power_control_cmd[5] = power_state;
guard(mutex)(&ts->hid_request_lock);
- error = goodix_spi_write(ts, ts->hid_report_addr, power_control_cmd,
+ error = goodix_spi_write(ts, GOODIX_HID_CMD_ADDR, power_control_cmd,
sizeof(power_control_cmd));
if (error) {
dev_err(ts->dev, "failed set power mode: %s",
@@ -786,6 +786,14 @@ static const struct acpi_device_id goodix_spi_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, goodix_spi_acpi_match);
#endif
+#ifdef CONFIG_OF
+static const struct of_device_id goodix_spi_of_match[] = {
+ { .compatible = "goodix,gt7986u-spifw", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, goodix_spi_of_match);
+#endif
+
static const struct spi_device_id goodix_spi_ids[] = {
{ "gt7986u" },
{ },
@@ -796,6 +804,7 @@ static struct spi_driver goodix_spi_driver = {
.driver = {
.name = "goodix-spi-hid",
.acpi_match_table = ACPI_PTR(goodix_spi_acpi_match),
+ .of_match_table = of_match_ptr(goodix_spi_of_match),
.pm = pm_sleep_ptr(&goodix_spi_pm_ops),
},
.probe = goodix_spi_probe,
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index 22683ec819aa..0f292b5d3e26 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -284,7 +284,7 @@ MODULE_DEVICE_TABLE(of, cbas_ec_of_match);
static struct platform_driver cbas_ec_driver = {
.probe = cbas_ec_probe,
- .remove_new = cbas_ec_remove,
+ .remove = cbas_ec_remove,
.driver = {
.name = "cbas_ec",
.acpi_match_table = ACPI_PTR(cbas_ec_acpi_ids),
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index f33485d83d24..0fb210e40a41 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -422,6 +422,25 @@ static int mousevsc_hid_raw_request(struct hid_device *hid,
return 0;
}
+static int mousevsc_hid_probe(struct hid_device *hid_dev, const struct hid_device_id *id)
+{
+ int ret;
+
+ ret = hid_parse(hid_dev);
+ if (ret) {
+ hid_err(hid_dev, "parse failed\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hid_dev, HID_CONNECT_HIDINPUT | HID_CONNECT_HIDDEV);
+ if (ret) {
+ hid_err(hid_dev, "hw start failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct hid_ll_driver mousevsc_ll_driver = {
.parse = mousevsc_hid_parse,
.open = mousevsc_hid_open,
@@ -431,7 +450,16 @@ static const struct hid_ll_driver mousevsc_ll_driver = {
.raw_request = mousevsc_hid_raw_request,
};
-static struct hid_driver mousevsc_hid_driver;
+static const struct hid_device_id mousevsc_devices[] = {
+ { HID_DEVICE(BUS_VIRTUAL, HID_GROUP_ANY, 0x045E, 0x0621) },
+ { }
+};
+
+static struct hid_driver mousevsc_hid_driver = {
+ .name = "hid-hyperv",
+ .id_table = mousevsc_devices,
+ .probe = mousevsc_hid_probe,
+};
static int mousevsc_probe(struct hv_device *device,
const struct hv_vmbus_device_id *dev_id)
@@ -473,7 +501,6 @@ static int mousevsc_probe(struct hv_device *device,
}
hid_dev->ll_driver = &mousevsc_ll_driver;
- hid_dev->driver = &mousevsc_hid_driver;
hid_dev->bus = BUS_VIRTUAL;
hid_dev->vendor = input_dev->hid_dev_info.vendor;
hid_dev->product = input_dev->hid_dev_info.product;
@@ -488,20 +515,6 @@ static int mousevsc_probe(struct hv_device *device,
if (ret)
goto probe_err2;
-
- ret = hid_parse(hid_dev);
- if (ret) {
- hid_err(hid_dev, "parse failed\n");
- goto probe_err2;
- }
-
- ret = hid_hw_start(hid_dev, HID_CONNECT_HIDINPUT | HID_CONNECT_HIDDEV);
-
- if (ret) {
- hid_err(hid_dev, "hw start failed\n");
- goto probe_err2;
- }
-
device_init_wakeup(&device->device, true);
input_dev->connected = true;
@@ -579,12 +592,23 @@ static struct hv_driver mousevsc_drv = {
static int __init mousevsc_init(void)
{
- return vmbus_driver_register(&mousevsc_drv);
+ int ret;
+
+ ret = hid_register_driver(&mousevsc_hid_driver);
+ if (ret)
+ return ret;
+
+ ret = vmbus_driver_register(&mousevsc_drv);
+ if (ret)
+ hid_unregister_driver(&mousevsc_hid_driver);
+
+ return ret;
}
static void __exit mousevsc_exit(void)
{
vmbus_driver_unregister(&mousevsc_drv);
+ hid_unregister_driver(&mousevsc_hid_driver);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 86820a3d9766..c448de53bf91 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -94,6 +94,7 @@
#define USB_DEVICE_ID_APPLE_MAGICMOUSE2 0x0269
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 0x0265
+#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC 0x0324
#define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e
#define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO 0x020f
#define USB_DEVICE_ID_APPLE_GEYSER_ANSI 0x0214
@@ -505,10 +506,10 @@
#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
#define I2C_VENDOR_ID_GOODIX 0x27c6
-#define I2C_DEVICE_ID_GOODIX_01E0 0x01e0
#define I2C_DEVICE_ID_GOODIX_01E8 0x01e8
#define I2C_DEVICE_ID_GOODIX_01E9 0x01e9
#define I2C_DEVICE_ID_GOODIX_01F0 0x01f0
+#define I2C_DEVICE_ID_GOODIX_0D42 0x0d42
#define USB_VENDOR_ID_GOODTOUCH 0x1aad
#define USB_DEVICE_ID_GOODTOUCH_000f 0x000f
@@ -734,6 +735,10 @@
#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2 0x501A
#define USB_DEVICE_ID_KYE_PENSKETCH_T609A 0x501B
+#define USB_VENDOR_ID_KYSONA 0x3554
+#define USB_DEVICE_ID_KYSONA_M600_DONGLE 0xF57C
+#define USB_DEVICE_ID_KYSONA_M600_WIRED 0xF57D
+
#define USB_VENDOR_ID_LABTEC 0x1020
#define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006
#define USB_DEVICE_ID_LABTEC_ODDOR_HANDBRAKE 0x8888
@@ -868,6 +873,7 @@
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1 0xc53f
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a
+#define USB_DEVICE_ID_LOGITECH_BOLT_RECEIVER 0xc548
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704
@@ -1036,6 +1042,8 @@
#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056
#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES 0xc057
#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES 0xc058
+#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3325_SERIES 0x430c
+#define USB_DEVICE_ID_PLANTRONICS_ENCOREPRO_500_SERIES 0x431e
#define USB_VENDOR_ID_PANASONIC 0x04da
#define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
@@ -1080,6 +1088,8 @@
#define USB_VENDOR_ID_PRODIGE 0x05af
#define USB_DEVICE_ID_PRODIGE_CORDLESS 0x3062
+#define I2C_VENDOR_ID_QTEC 0x6243
+
#define USB_VENDOR_ID_QUANTA 0x0408
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index fda9dce3da99..9d80635a91eb 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -810,10 +810,23 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
break;
}
- if ((usage->hid & 0xf0) == 0x90) { /* SystemControl*/
- switch (usage->hid & 0xf) {
- case 0xb: map_key_clear(KEY_DO_NOT_DISTURB); break;
- default: goto ignore;
+ if ((usage->hid & 0xf0) == 0x90) { /* SystemControl & D-pad */
+ switch (usage->hid) {
+ case HID_GD_UP: usage->hat_dir = 1; break;
+ case HID_GD_DOWN: usage->hat_dir = 5; break;
+ case HID_GD_RIGHT: usage->hat_dir = 3; break;
+ case HID_GD_LEFT: usage->hat_dir = 7; break;
+ case HID_GD_DO_NOT_DISTURB:
+ map_key_clear(KEY_DO_NOT_DISTURB); break;
+ default: goto unknown;
+ }
+
+ if (usage->hid <= HID_GD_LEFT) {
+ if (field->dpad) {
+ map_abs(field->dpad);
+ goto ignore;
+ }
+ map_abs(ABS_HAT0X);
}
break;
}
@@ -844,22 +857,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
if (field->application == HID_GD_SYSTEM_CONTROL)
goto ignore;
- if ((usage->hid & 0xf0) == 0x90) { /* D-pad */
- switch (usage->hid) {
- case HID_GD_UP: usage->hat_dir = 1; break;
- case HID_GD_DOWN: usage->hat_dir = 5; break;
- case HID_GD_RIGHT: usage->hat_dir = 3; break;
- case HID_GD_LEFT: usage->hat_dir = 7; break;
- default: goto unknown;
- }
- if (field->dpad) {
- map_abs(field->dpad);
- goto ignore;
- }
- map_abs(ABS_HAT0X);
- break;
- }
-
switch (usage->hid) {
/* These usage IDs map directly to the usage codes. */
case HID_GD_X: case HID_GD_Y: case HID_GD_Z:
diff --git a/drivers/hid/hid-kysona.c b/drivers/hid/hid-kysona.c
new file mode 100644
index 000000000000..d4c0406b3323
--- /dev/null
+++ b/drivers/hid/hid-kysona.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * USB HID driver for Kysona
+ * Kysona M600 mice.
+ *
+ * Copyright (c) 2024 Lode Willems <me@lodewillems.com>
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/usb.h>
+
+#include "hid-ids.h"
+
+#define BATTERY_TIMEOUT_MS 5000
+
+#define BATTERY_REPORT_ID 4
+
+struct kysona_drvdata {
+ struct hid_device *hdev;
+ bool online;
+
+ struct power_supply_desc battery_desc;
+ struct power_supply *battery;
+ u8 battery_capacity;
+ bool battery_charging;
+ u16 battery_voltage;
+ struct delayed_work battery_work;
+};
+
+static enum power_supply_property kysona_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_SCOPE,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_ONLINE
+};
+
+static int kysona_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct kysona_drvdata *drv_data = power_supply_get_drvdata(psy);
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = drv_data->online;
+ break;
+ case POWER_SUPPLY_PROP_STATUS:
+ if (drv_data->online)
+ val->intval = drv_data->battery_charging ?
+ POWER_SUPPLY_STATUS_CHARGING :
+ POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = drv_data->battery_capacity;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ /* hardware reports voltage in mV. sysfs expects uV */
+ val->intval = drv_data->battery_voltage * 1000;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = drv_data->hdev->name;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static const char kysona_battery_request[] = {
+ 0x08, BATTERY_REPORT_ID, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49
+};
+
+static int kysona_m600_fetch_battery(struct hid_device *hdev)
+{
+ u8 *write_buf;
+ int ret;
+
+ /* Request battery information */
+ write_buf = kmemdup(kysona_battery_request, sizeof(kysona_battery_request), GFP_KERNEL);
+ if (!write_buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(hdev, kysona_battery_request[0],
+ write_buf, sizeof(kysona_battery_request),
+ HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+ if (ret < (int)sizeof(kysona_battery_request)) {
+ hid_err(hdev, "hid_hw_raw_request() failed with %d\n", ret);
+ ret = -ENODATA;
+ }
+ kfree(write_buf);
+ return ret;
+}
+
+static void kysona_fetch_battery(struct hid_device *hdev)
+{
+ int ret = kysona_m600_fetch_battery(hdev);
+
+ if (ret < 0)
+ hid_dbg(hdev,
+ "Battery query failed (err: %d)\n", ret);
+}
+
+static void kysona_battery_timer_tick(struct work_struct *work)
+{
+ struct kysona_drvdata *drv_data = container_of(work,
+ struct kysona_drvdata, battery_work.work);
+ struct hid_device *hdev = drv_data->hdev;
+
+ kysona_fetch_battery(hdev);
+ schedule_delayed_work(&drv_data->battery_work,
+ msecs_to_jiffies(BATTERY_TIMEOUT_MS));
+}
+
+static int kysona_battery_probe(struct hid_device *hdev)
+{
+ struct kysona_drvdata *drv_data = hid_get_drvdata(hdev);
+ struct power_supply_config pscfg = { .drv_data = drv_data };
+ int ret = 0;
+
+ drv_data->online = false;
+ drv_data->battery_capacity = 100;
+ drv_data->battery_voltage = 4200;
+
+ drv_data->battery_desc.properties = kysona_battery_props;
+ drv_data->battery_desc.num_properties = ARRAY_SIZE(kysona_battery_props);
+ drv_data->battery_desc.get_property = kysona_battery_get_property;
+ drv_data->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY;
+ drv_data->battery_desc.use_for_apm = 0;
+ drv_data->battery_desc.name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+ "kysona-%s-battery",
+ strlen(hdev->uniq) ?
+ hdev->uniq : dev_name(&hdev->dev));
+ if (!drv_data->battery_desc.name)
+ return -ENOMEM;
+
+ drv_data->battery = devm_power_supply_register(&hdev->dev,
+ &drv_data->battery_desc, &pscfg);
+ if (IS_ERR(drv_data->battery)) {
+ ret = PTR_ERR(drv_data->battery);
+ drv_data->battery = NULL;
+ hid_err(hdev, "Unable to register battery device\n");
+ return ret;
+ }
+
+ power_supply_powers(drv_data->battery, &hdev->dev);
+
+ INIT_DELAYED_WORK(&drv_data->battery_work, kysona_battery_timer_tick);
+ kysona_fetch_battery(hdev);
+ schedule_delayed_work(&drv_data->battery_work,
+ msecs_to_jiffies(BATTERY_TIMEOUT_MS));
+
+ return ret;
+}
+
+static int kysona_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct kysona_drvdata *drv_data;
+ struct usb_interface *usbif;
+
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
+ usbif = to_usb_interface(hdev->dev.parent);
+
+ drv_data = devm_kzalloc(&hdev->dev, sizeof(*drv_data), GFP_KERNEL);
+ if (!drv_data)
+ return -ENOMEM;
+
+ hid_set_drvdata(hdev, drv_data);
+ drv_data->hdev = hdev;
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return ret;
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret)
+ return ret;
+
+ if (usbif->cur_altsetting->desc.bInterfaceNumber == 1) {
+ if (kysona_battery_probe(hdev) < 0)
+ hid_err(hdev, "Kysona hid battery_probe failed: %d\n", ret);
+ }
+
+ return 0;
+}
+
+static int kysona_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct kysona_drvdata *drv_data = hid_get_drvdata(hdev);
+
+ if (drv_data->battery && size == sizeof(kysona_battery_request) &&
+ data[0] == 8 && data[1] == BATTERY_REPORT_ID) {
+ drv_data->battery_capacity = data[6];
+ drv_data->battery_charging = data[7];
+ drv_data->battery_voltage = (data[8] << 8) | data[9];
+ drv_data->online = true;
+ }
+
+ return 0;
+}
+
+static void kysona_remove(struct hid_device *hdev)
+{
+ struct kysona_drvdata *drv_data = hid_get_drvdata(hdev);
+
+ if (drv_data->battery)
+ cancel_delayed_work_sync(&drv_data->battery_work);
+
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id kysona_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYSONA, USB_DEVICE_ID_KYSONA_M600_DONGLE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYSONA, USB_DEVICE_ID_KYSONA_M600_WIRED) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, kysona_devices);
+
+static struct hid_driver kysona_driver = {
+ .name = "kysona",
+ .id_table = kysona_devices,
+ .probe = kysona_probe,
+ .raw_event = kysona_raw_event,
+ .remove = kysona_remove
+};
+module_hid_driver(kysona_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("HID driver for Kysona devices");
+MODULE_AUTHOR("Lode Willems <me@lodewillems.com>");
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 3b0c779ce8f7..4d00bc4d656e 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -32,11 +32,22 @@
#include <linux/leds.h>
#include <linux/workqueue.h>
+#if IS_ENABLED(CONFIG_ACPI_PLATFORM_PROFILE)
+#include <linux/platform_profile.h>
+#endif /* CONFIG_ACPI_PLATFORM_PROFILE */
+
#include "hid-ids.h"
/* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */
#define LENOVO_KEY_MICMUTE KEY_F20
+/* HID raw events for ThinkPad X12 Tabs*/
+#define TP_X12_RAW_HOTKEY_FN_F4 0x00020003
+#define TP_X12_RAW_HOTKEY_FN_F8 0x38001003
+#define TP_X12_RAW_HOTKEY_FN_F10 0x00000803
+#define TP_X12_RAW_HOTKEY_FN_F12 0x00000403
+#define TP_X12_RAW_HOTKEY_FN_SPACE 0x18001003
+
struct lenovo_drvdata {
u8 led_report[3]; /* Must be first for proper alignment */
int led_state;
@@ -71,6 +82,14 @@ struct lenovo_drvdata {
#define TP10UBKBD_LED_OFF 1
#define TP10UBKBD_LED_ON 2
+/* Function to report raw_events as key events*/
+static inline void report_key_event(struct input_dev *input, int keycode)
+{
+ input_report_key(input, keycode, 1);
+ input_report_key(input, keycode, 0);
+ input_sync(input);
+}
+
static int lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
enum led_brightness value)
{
@@ -472,7 +491,10 @@ static int lenovo_input_mapping(struct hid_device *hdev,
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
return lenovo_input_mapping_tp10_ultrabook_kbd(hdev, hi, field,
usage, bit, max);
+ case USB_DEVICE_ID_LENOVO_X12_TAB:
+ case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_X1_TAB:
+ case USB_DEVICE_ID_LENOVO_X1_TAB3:
return lenovo_input_mapping_x1_tab_kbd(hdev, hi, field, usage, bit, max);
default:
return 0;
@@ -581,8 +603,11 @@ static ssize_t attr_fn_lock_store(struct device *dev,
case USB_DEVICE_ID_LENOVO_TPIIBTKBD:
lenovo_features_set_cptkbd(hdev);
break;
+ case USB_DEVICE_ID_LENOVO_X12_TAB:
+ case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
+ case USB_DEVICE_ID_LENOVO_X1_TAB3:
ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
if (ret)
return ret;
@@ -678,6 +703,62 @@ static const struct attribute_group lenovo_attr_group_cptkbd = {
.attrs = lenovo_attributes_cptkbd,
};
+/* Function to handle Lenovo Thinkpad TAB X12's HID raw inputs for fn keys*/
+static int lenovo_raw_event_TP_X12_tab(struct hid_device *hdev, u32 raw_data)
+{
+ struct hid_input *hidinput;
+ struct input_dev *input = NULL;
+
+ /* Iterate through all associated input devices */
+ list_for_each_entry(hidinput, &hdev->inputs, list) {
+ input = hidinput->input;
+ if (!input)
+ continue;
+
+ switch (raw_data) {
+ /* fn-F20 being used here for MIC mute*/
+ case TP_X12_RAW_HOTKEY_FN_F4:
+ report_key_event(input, LENOVO_KEY_MICMUTE);
+ return 1;
+ /* Power-mode or Airplane mode will be called based on the device*/
+ case TP_X12_RAW_HOTKEY_FN_F8:
+ /*
+ * TP X12 TAB uses Fn-F8 calls Airplanemode
+ * Whereas TP X12 TAB2 uses Fn-F8 for toggling
+ * Power modes
+ */
+ if (hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB) {
+ report_key_event(input, KEY_RFKILL);
+ return 1;
+ }
+#if IS_ENABLED(CONFIG_ACPI_PLATFORM_PROFILE)
+ else {
+ platform_profile_cycle();
+ return 1;
+ }
+#endif /* CONFIG_ACPI_PLATFORM_PROFILE */
+ return 0;
+ case TP_X12_RAW_HOTKEY_FN_F10:
+ /* TAB1 has PICKUP Phone and TAB2 use Snipping tool*/
+ (hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB) ?
+ report_key_event(input, KEY_PICKUP_PHONE) :
+ report_key_event(input, KEY_SELECTIVE_SCREENSHOT);
+ return 1;
+ case TP_X12_RAW_HOTKEY_FN_F12:
+ /* BookMarks/STAR key*/
+ report_key_event(input, KEY_BOOKMARKS);
+ return 1;
+ case TP_X12_RAW_HOTKEY_FN_SPACE:
+ /* Keyboard LED backlight toggle*/
+ report_key_event(input, KEY_KBDILLUMTOGGLE);
+ return 1;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
static int lenovo_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data, int size)
{
@@ -695,6 +776,15 @@ static int lenovo_raw_event(struct hid_device *hdev,
data[2] = 0x01;
}
+ /*
+ * Lenovo TP X12 Tab KBD's Fn+XX is HID raw data defined. Report ID is 0x03
+ * e.g.: Raw data received for MIC mute is 0x00020003.
+ */
+ if (unlikely((hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB
+ || hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB2)
+ && size >= 3 && report->id == 0x03))
+ return lenovo_raw_event_TP_X12_tab(hdev, le32_to_cpu(*(u32 *)data));
+
return 0;
}
@@ -774,8 +864,11 @@ static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
case USB_DEVICE_ID_LENOVO_TPIIUSBKBD:
case USB_DEVICE_ID_LENOVO_TPIIBTKBD:
return lenovo_event_cptkbd(hdev, field, usage, value);
+ case USB_DEVICE_ID_LENOVO_X12_TAB:
+ case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
+ case USB_DEVICE_ID_LENOVO_X1_TAB3:
return lenovo_event_tp10ubkbd(hdev, field, usage, value);
default:
return 0;
@@ -1054,8 +1147,11 @@ static int lenovo_led_brightness_set(struct led_classdev *led_cdev,
case USB_DEVICE_ID_LENOVO_TPKBD:
lenovo_led_set_tpkbd(hdev);
break;
+ case USB_DEVICE_ID_LENOVO_X12_TAB:
+ case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
+ case USB_DEVICE_ID_LENOVO_X1_TAB3:
ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
break;
}
@@ -1239,8 +1335,15 @@ static int lenovo_probe_tp10ubkbd(struct hid_device *hdev)
* We cannot read the state, only set it, so we force it to on here
* (which should be a no-op) to make sure that our state matches the
* keyboard's FN-lock state. This is the same as what Windows does.
+ *
+ * For X12 TAB and TAB2, the default windows behaviour Fn-lock Off.
+ * Adding additional check to ensure the behaviour in case of
+ * Thinkpad X12 Tabs.
*/
- data->fn_lock = true;
+
+ data->fn_lock = !(hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB ||
+ hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB2);
+
lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, data->fn_lock);
ret = sysfs_create_group(&hdev->dev.kobj, &lenovo_attr_group_tp10ubkbd);
@@ -1284,8 +1387,11 @@ static int lenovo_probe(struct hid_device *hdev,
case USB_DEVICE_ID_LENOVO_TPIIBTKBD:
ret = lenovo_probe_cptkbd(hdev);
break;
+ case USB_DEVICE_ID_LENOVO_X12_TAB:
+ case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
+ case USB_DEVICE_ID_LENOVO_X1_TAB3:
ret = lenovo_probe_tp10ubkbd(hdev);
break;
default:
@@ -1370,8 +1476,11 @@ static void lenovo_remove(struct hid_device *hdev)
case USB_DEVICE_ID_LENOVO_TPIIBTKBD:
lenovo_remove_cptkbd(hdev);
break;
+ case USB_DEVICE_ID_LENOVO_X12_TAB:
+ case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
+ case USB_DEVICE_ID_LENOVO_X1_TAB3:
lenovo_remove_tp10ubkbd(hdev);
break;
}
@@ -1421,6 +1530,12 @@ static const struct hid_device_id lenovo_devices[] = {
*/
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB3) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB2) },
{ }
};
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index e3fcf1353fb3..c0a138f21ca4 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -1350,7 +1350,8 @@ int lg4ff_init(struct hid_device *hid)
/* Initialize device properties */
if (mmode_ret == LG4FF_MMODE_IS_MULTIMODE) {
- BUG_ON(mmode_idx == -1);
+ if (WARN_ON(mmode_idx == -1))
+ return -EINVAL;
mmode_wheel = &lg4ff_multimode_wheels[mmode_idx];
}
lg4ff_init_wheel_data(&entry->wdata, &lg4ff_devices[i], mmode_wheel, real_product_id);
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index cf7a6986cf20..10a3bc5f931b 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -928,7 +928,7 @@ static int hidpp_unifying_init(struct hidpp_device *hidpp)
#define CMD_ROOT_GET_PROTOCOL_VERSION 0x10
static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature,
- u8 *feature_index, u8 *feature_type)
+ u8 *feature_index)
{
struct hidpp_report response;
int ret;
@@ -945,7 +945,6 @@ static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature,
return -ENOENT;
*feature_index = response.fap.params[0];
- *feature_type = response.fap.params[1];
return ret;
}
@@ -1012,13 +1011,11 @@ print_version:
static int hidpp_get_serial(struct hidpp_device *hidpp, u32 *serial)
{
struct hidpp_report response;
- u8 feature_type;
u8 feature_index;
int ret;
ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_DEVICE_INFORMATION,
- &feature_index,
- &feature_type);
+ &feature_index);
if (ret)
return ret;
@@ -1125,7 +1122,6 @@ static int hidpp_devicenametype_get_device_name(struct hidpp_device *hidpp,
static char *hidpp_get_device_name(struct hidpp_device *hidpp)
{
- u8 feature_type;
u8 feature_index;
u8 __name_length;
char *name;
@@ -1133,7 +1129,7 @@ static char *hidpp_get_device_name(struct hidpp_device *hidpp)
int ret;
ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_GET_DEVICE_NAME_TYPE,
- &feature_index, &feature_type);
+ &feature_index);
if (ret)
return NULL;
@@ -1300,15 +1296,13 @@ static int hidpp20_batterylevel_get_battery_info(struct hidpp_device *hidpp,
static int hidpp20_query_battery_info_1000(struct hidpp_device *hidpp)
{
- u8 feature_type;
int ret;
int status, capacity, next_capacity, level;
if (hidpp->battery.feature_index == 0xff) {
ret = hidpp_root_get_feature(hidpp,
HIDPP_PAGE_BATTERY_LEVEL_STATUS,
- &hidpp->battery.feature_index,
- &feature_type);
+ &hidpp->battery.feature_index);
if (ret)
return ret;
}
@@ -1489,14 +1483,12 @@ static int hidpp20_map_battery_capacity(struct hid_device *hid_dev, int voltage)
static int hidpp20_query_battery_voltage_info(struct hidpp_device *hidpp)
{
- u8 feature_type;
int ret;
int status, voltage, level, charge_type;
if (hidpp->battery.voltage_feature_index == 0xff) {
ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_BATTERY_VOLTAGE,
- &hidpp->battery.voltage_feature_index,
- &feature_type);
+ &hidpp->battery.voltage_feature_index);
if (ret)
return ret;
}
@@ -1692,7 +1684,6 @@ static int hidpp20_unifiedbattery_get_status(struct hidpp_device *hidpp,
static int hidpp20_query_battery_info_1004(struct hidpp_device *hidpp)
{
- u8 feature_type;
int ret;
u8 state_of_charge;
int status, level;
@@ -1700,8 +1691,7 @@ static int hidpp20_query_battery_info_1004(struct hidpp_device *hidpp)
if (hidpp->battery.feature_index == 0xff) {
ret = hidpp_root_get_feature(hidpp,
HIDPP_PAGE_UNIFIED_BATTERY,
- &hidpp->battery.feature_index,
- &feature_type);
+ &hidpp->battery.feature_index);
if (ret)
return ret;
}
@@ -1834,14 +1824,9 @@ static int hidpp_battery_get_property(struct power_supply *psy,
static int hidpp_get_wireless_feature_index(struct hidpp_device *hidpp, u8 *feature_index)
{
- u8 feature_type;
- int ret;
-
- ret = hidpp_root_get_feature(hidpp,
- HIDPP_PAGE_WIRELESS_DEVICE_STATUS,
- feature_index, &feature_type);
-
- return ret;
+ return hidpp_root_get_feature(hidpp,
+ HIDPP_PAGE_WIRELESS_DEVICE_STATUS,
+ feature_index);
}
/* -------------------------------------------------------------------------- */
@@ -1952,14 +1937,11 @@ static bool hidpp20_get_adc_measurement_1f20(struct hidpp_device *hidpp,
static int hidpp20_query_adc_measurement_info_1f20(struct hidpp_device *hidpp)
{
- u8 feature_type;
-
if (hidpp->battery.adc_measurement_feature_index == 0xff) {
int ret;
ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_ADC_MEASUREMENT,
- &hidpp->battery.adc_measurement_feature_index,
- &feature_type);
+ &hidpp->battery.adc_measurement_feature_index);
if (ret)
return ret;
@@ -2014,15 +1996,13 @@ static int hidpp_hrs_set_highres_scrolling_mode(struct hidpp_device *hidpp,
bool enabled, u8 *multiplier)
{
u8 feature_index;
- u8 feature_type;
int ret;
u8 params[1];
struct hidpp_report response;
ret = hidpp_root_get_feature(hidpp,
HIDPP_PAGE_HI_RESOLUTION_SCROLLING,
- &feature_index,
- &feature_type);
+ &feature_index);
if (ret)
return ret;
@@ -2049,12 +2029,11 @@ static int hidpp_hrw_get_wheel_capability(struct hidpp_device *hidpp,
u8 *multiplier)
{
u8 feature_index;
- u8 feature_type;
int ret;
struct hidpp_report response;
ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
- &feature_index, &feature_type);
+ &feature_index);
if (ret)
goto return_default;
@@ -2076,13 +2055,12 @@ static int hidpp_hrw_set_wheel_mode(struct hidpp_device *hidpp, bool invert,
bool high_resolution, bool use_hidpp)
{
u8 feature_index;
- u8 feature_type;
int ret;
u8 params[1];
struct hidpp_report response;
ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
- &feature_index, &feature_type);
+ &feature_index);
if (ret)
return ret;
@@ -2111,14 +2089,12 @@ static int hidpp_solar_request_battery_event(struct hidpp_device *hidpp)
{
struct hidpp_report response;
u8 params[2] = { 1, 1 };
- u8 feature_type;
int ret;
if (hidpp->battery.feature_index == 0xff) {
ret = hidpp_root_get_feature(hidpp,
HIDPP_PAGE_SOLAR_KEYBOARD,
- &hidpp->battery.solar_feature_index,
- &feature_type);
+ &hidpp->battery.solar_feature_index);
if (ret)
return ret;
}
@@ -2522,7 +2498,7 @@ static void hidpp_ff_work_handler(struct work_struct *w)
/* regular effect destroyed */
data->effect_ids[wd->params[0]-1] = -1;
else if (wd->effect_id >= HIDPP_FF_EFFECTID_AUTOCENTER)
- /* autocenter spring destoyed */
+ /* autocenter spring destroyed */
data->slot_autocenter = 0;
break;
case HIDPP_FF_SET_GLOBAL_GAINS:
@@ -3098,11 +3074,10 @@ static int wtp_get_config(struct hidpp_device *hidpp)
{
struct wtp_data *wd = hidpp->private_data;
struct hidpp_touchpad_raw_info raw_info = {0};
- u8 feature_type;
int ret;
ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_TOUCHPAD_RAW_XY,
- &wd->mt_feature_index, &feature_type);
+ &wd->mt_feature_index);
if (ret)
/* means that the device is not powered up */
return ret;
@@ -3296,13 +3271,13 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
120);
}
- v = hid_snto32(hid_field_extract(hdev, data+3, 0, 12), 12);
+ v = sign_extend32(hid_field_extract(hdev, data + 3, 0, 12), 11);
input_report_rel(hidpp->input, REL_X, v);
- v = hid_snto32(hid_field_extract(hdev, data+3, 12, 12), 12);
+ v = sign_extend32(hid_field_extract(hdev, data + 3, 12, 12), 11);
input_report_rel(hidpp->input, REL_Y, v);
- v = hid_snto32(data[6], 8);
+ v = sign_extend32(data[6], 7);
if (v != 0)
hidpp_scroll_counter_handle_scroll(hidpp->input,
&hidpp->vertical_wheel_counter, v);
@@ -3362,12 +3337,11 @@ static int k400_disable_tap_to_click(struct hidpp_device *hidpp)
struct k400_private_data *k400 = hidpp->private_data;
struct hidpp_touchpad_fw_items items = {};
int ret;
- u8 feature_type;
if (!k400->feature_index) {
ret = hidpp_root_get_feature(hidpp,
HIDPP_PAGE_TOUCHPAD_FW_ITEMS,
- &k400->feature_index, &feature_type);
+ &k400->feature_index);
if (ret)
/* means that the device is not powered up */
return ret;
@@ -3439,14 +3413,13 @@ static int g920_get_config(struct hidpp_device *hidpp,
struct hidpp_ff_private_data *data)
{
struct hidpp_report response;
- u8 feature_type;
int ret;
memset(data, 0, sizeof(*data));
/* Find feature and store for later use */
ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_G920_FORCE_FEEDBACK,
- &data->feature_index, &feature_type);
+ &data->feature_index);
if (ret)
return ret;
@@ -3735,17 +3708,16 @@ static int hidpp_initialize_hires_scroll(struct hidpp_device *hidpp)
if (hidpp->protocol_major >= 2) {
u8 feature_index;
- u8 feature_type;
ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
- &feature_index, &feature_type);
+ &feature_index);
if (!ret) {
hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL;
hid_dbg(hidpp->hid_dev, "Detected HID++ 2.0 hi-res scroll wheel\n");
return 0;
}
ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HI_RESOLUTION_SCROLLING,
- &feature_index, &feature_type);
+ &feature_index);
if (!ret) {
hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL;
hid_dbg(hidpp->hid_dev, "Detected HID++ 2.0 hi-res scrolling\n");
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 8a73b59e0827..a76f17158539 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -52,6 +52,7 @@ module_param(report_undeciphered, bool, 0644);
MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event");
#define TRACKPAD2_2021_BT_VERSION 0x110
+#define TRACKPAD_2024_BT_VERSION 0x314
#define TRACKPAD_REPORT_ID 0x28
#define TRACKPAD2_USB_REPORT_ID 0x02
@@ -227,7 +228,9 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
touch_minor = tdata[4];
state = tdata[7] & TOUCH_STATE_MASK;
down = state != TOUCH_STATE_NONE;
- } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ input->id.product ==
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
id = tdata[8] & 0xf;
x = (tdata[1] << 27 | tdata[0] << 19) >> 19;
y = -((tdata[3] << 30 | tdata[2] << 22 | tdata[1] << 14) >> 19);
@@ -259,8 +262,9 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
/* If requested, emulate a scroll wheel by detecting small
* vertical touch motions.
*/
- if (emulate_scroll_wheel && (input->id.product !=
- USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)) {
+ if (emulate_scroll_wheel &&
+ input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
+ input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
unsigned long now = jiffies;
int step_x = msc->touches[id].scroll_x - x;
int step_y = msc->touches[id].scroll_y - y;
@@ -359,7 +363,9 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
- if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ input->id.product ==
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC)
input_report_abs(input, ABS_MT_PRESSURE, pressure);
if (report_undeciphered) {
@@ -367,7 +373,9 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2)
input_event(input, EV_MSC, MSC_RAW, tdata[7]);
else if (input->id.product !=
- USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
+ input->id.product !=
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC)
input_event(input, EV_MSC, MSC_RAW, tdata[8]);
}
}
@@ -493,7 +501,9 @@ static int magicmouse_raw_event(struct hid_device *hdev,
magicmouse_emit_buttons(msc, clicks & 3);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
- } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ input->id.product ==
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
input_mt_sync_frame(input);
input_report_key(input, BTN_MOUSE, clicks & 1);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
@@ -545,7 +555,9 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(REL_WHEEL_HI_RES, input->relbit);
__set_bit(REL_HWHEEL_HI_RES, input->relbit);
}
- } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ input->id.product ==
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
/* If the trackpad has been connected to a Mac, the name is
* automatically personalized, e.g., "José Expósito's Trackpad".
* When connected through Bluetooth, the personalized name is
@@ -556,9 +568,12 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
*/
if (hdev->vendor == BT_VENDOR_ID_APPLE) {
if (input->id.version == TRACKPAD2_2021_BT_VERSION)
+ input->name = "Apple Inc. Magic Trackpad 2021";
+ else if (input->id.version == TRACKPAD_2024_BT_VERSION) {
+ input->name = "Apple Inc. Magic Trackpad USB-C";
+ } else {
input->name = "Apple Inc. Magic Trackpad";
- else
- input->name = "Apple Inc. Magic Trackpad 2";
+ }
} else { /* USB_VENDOR_ID_APPLE */
input->name = hdev->name;
}
@@ -621,7 +636,9 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
MOUSE_RES_X);
input_abs_set_res(input, ABS_MT_POSITION_Y,
MOUSE_RES_Y);
- } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ input->id.product ==
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
input_set_abs_params(input, ABS_MT_PRESSURE, 0, 253, 0, 0);
input_set_abs_params(input, ABS_PRESSURE, 0, 253, 0, 0);
input_set_abs_params(input, ABS_MT_ORIENTATION, -3, 4, 0, 0);
@@ -660,7 +677,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
input_set_events_per_packet(input, 60);
if (report_undeciphered &&
- input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
+ input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
__set_bit(EV_MSC, input->evbit);
__set_bit(MSC_RAW, input->mscbit);
}
@@ -685,7 +703,9 @@ static int magicmouse_input_mapping(struct hid_device *hdev,
/* Magic Trackpad does not give relative data after switching to MT */
if ((hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD ||
- hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) &&
+ hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ hi->input->id.product ==
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
field->flags & HID_MAIN_ITEM_RELATIVE)
return -1;
@@ -721,7 +741,8 @@ static int magicmouse_enable_multitouch(struct hid_device *hdev)
int ret;
int feature_size;
- if (hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ if (hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
if (hdev->vendor == BT_VENDOR_ID_APPLE) {
feature_size = sizeof(feature_mt_trackpad2_bt);
feature = feature_mt_trackpad2_bt;
@@ -766,7 +787,8 @@ static int magicmouse_fetch_battery(struct hid_device *hdev)
if (!hdev->battery || hdev->vendor != USB_VENDOR_ID_APPLE ||
(hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2 &&
- hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2))
+ hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
+ hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC))
return -1;
report_enum = &hdev->report_enum[hdev->battery_report_type];
@@ -835,7 +857,9 @@ static int magicmouse_probe(struct hid_device *hdev,
if (id->vendor == USB_VENDOR_ID_APPLE &&
(id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
- (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && hdev->type != HID_TYPE_USBMOUSE)))
+ ((id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
+ hdev->type != HID_TYPE_USBMOUSE)))
return 0;
if (!msc->input) {
@@ -850,7 +874,8 @@ static int magicmouse_probe(struct hid_device *hdev,
else if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2)
report = hid_register_report(hdev, HID_INPUT_REPORT,
MOUSE2_REPORT_ID, 0);
- else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) {
if (id->vendor == BT_VENDOR_ID_APPLE)
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD2_BT_REPORT_ID, 0);
@@ -920,7 +945,8 @@ static const __u8 *magicmouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*/
if (hdev->vendor == USB_VENDOR_ID_APPLE &&
(hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 ||
- hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) &&
+ hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 ||
+ hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) &&
*rsize == 83 && rdesc[46] == 0x84 && rdesc[58] == 0x85) {
hid_info(hdev,
"fixing up magicmouse battery report descriptor\n");
@@ -951,6 +977,10 @@ static const struct hid_device_id magic_mice[] = {
USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC), .driver_data = 0 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC), .driver_data = 0 },
{ }
};
MODULE_DEVICE_TABLE(hid, magic_mice);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 638e36c6d0f1..82900857bfd8 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -31,6 +31,7 @@
* [1] https://gitlab.freedesktop.org/libevdev/hid-tools
*/
+#include <linux/bits.h>
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
@@ -83,6 +84,13 @@ enum latency_mode {
HID_LATENCY_HIGH = 1,
};
+enum report_mode {
+ TOUCHPAD_REPORT_NONE = 0,
+ TOUCHPAD_REPORT_BUTTONS = BIT(0),
+ TOUCHPAD_REPORT_CONTACTS = BIT(1),
+ TOUCHPAD_REPORT_ALL = TOUCHPAD_REPORT_BUTTONS | TOUCHPAD_REPORT_CONTACTS,
+};
+
#define MT_IO_FLAGS_RUNNING 0
#define MT_IO_FLAGS_ACTIVE_SLOTS 1
#define MT_IO_FLAGS_PENDING_SLOTS 2
@@ -1452,8 +1460,7 @@ static const __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc,
{
if (hdev->vendor == I2C_VENDOR_ID_GOODIX &&
(hdev->product == I2C_DEVICE_ID_GOODIX_01E8 ||
- hdev->product == I2C_DEVICE_ID_GOODIX_01E9 ||
- hdev->product == I2C_DEVICE_ID_GOODIX_01E0)) {
+ hdev->product == I2C_DEVICE_ID_GOODIX_01E9)) {
if (rdesc[607] == 0x15) {
rdesc[607] = 0x25;
dev_info(
@@ -1493,8 +1500,7 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
struct hid_field *field,
struct hid_usage *usage,
enum latency_mode latency,
- bool surface_switch,
- bool button_switch,
+ enum report_mode report_mode,
bool *inputmode_found)
{
struct mt_device *td = hid_get_drvdata(hdev);
@@ -1549,11 +1555,11 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
return true;
case HID_DG_SURFACESWITCH:
- field->value[index] = surface_switch;
+ field->value[index] = !!(report_mode & TOUCHPAD_REPORT_CONTACTS);
return true;
case HID_DG_BUTTONSWITCH:
- field->value[index] = button_switch;
+ field->value[index] = !!(report_mode & TOUCHPAD_REPORT_BUTTONS);
return true;
}
@@ -1561,7 +1567,7 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
}
static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
- bool surface_switch, bool button_switch)
+ enum report_mode report_mode)
{
struct hid_report_enum *rep_enum;
struct hid_report *rep;
@@ -1586,8 +1592,7 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
rep->field[i],
usage,
latency,
- surface_switch,
- button_switch,
+ report_mode,
&inputmode_found))
update_report = true;
}
@@ -1830,7 +1835,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev_warn(&hdev->dev, "Cannot allocate sysfs group for %s\n",
hdev->name);
- mt_set_modes(hdev, HID_LATENCY_NORMAL, true, true);
+ mt_set_modes(hdev, HID_LATENCY_NORMAL, TOUCHPAD_REPORT_ALL);
return 0;
}
@@ -1842,9 +1847,9 @@ static int mt_suspend(struct hid_device *hdev, pm_message_t state)
/* High latency is desirable for power savings during S3/S0ix */
if ((td->mtclass.quirks & MT_QUIRK_DISABLE_WAKEUP) ||
!hid_hw_may_wakeup(hdev))
- mt_set_modes(hdev, HID_LATENCY_HIGH, false, false);
+ mt_set_modes(hdev, HID_LATENCY_HIGH, TOUCHPAD_REPORT_NONE);
else
- mt_set_modes(hdev, HID_LATENCY_HIGH, true, true);
+ mt_set_modes(hdev, HID_LATENCY_HIGH, TOUCHPAD_REPORT_ALL);
return 0;
}
@@ -1852,7 +1857,7 @@ static int mt_suspend(struct hid_device *hdev, pm_message_t state)
static int mt_reset_resume(struct hid_device *hdev)
{
mt_release_contacts(hdev);
- mt_set_modes(hdev, HID_LATENCY_NORMAL, true, true);
+ mt_set_modes(hdev, HID_LATENCY_NORMAL, TOUCHPAD_REPORT_ALL);
return 0;
}
@@ -1864,7 +1869,7 @@ static int mt_resume(struct hid_device *hdev)
hid_hw_idle(hdev, 0, 0, HID_REQ_SET_IDLE);
- mt_set_modes(hdev, HID_LATENCY_NORMAL, true, true);
+ mt_set_modes(hdev, HID_LATENCY_NORMAL, TOUCHPAD_REPORT_ALL);
return 0;
}
@@ -2026,6 +2031,10 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_ELAN, 0x3148) },
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_ELAN, 0x32ae) },
+
/* Elitegroup panel */
{ .driver_data = MT_CLS_SERIAL,
MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
@@ -2076,9 +2085,6 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
I2C_DEVICE_ID_GOODIX_01E9) },
- { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
- HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
- I2C_DEVICE_ID_GOODIX_01E0) },
/* GoodTouch panels */
{ .driver_data = MT_CLS_NSMU,
@@ -2095,6 +2101,11 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
0x347d, 0x7853) },
+ /* HONOR MagicBook Art 14 touchpad */
+ { .driver_data = MT_CLS_VTL,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ 0x35cc, 0x0104) },
+
/* Ilitek dual touch panel */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_ILITEK,
@@ -2137,6 +2148,10 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_CASA_TOUCHPAD) },
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
+ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_BOLT_RECEIVER) },
/* MosArt panels */
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
@@ -2299,6 +2314,11 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_SIS_TOUCH,
HID_ANY_ID) },
+ /* Hantick */
+ { .driver_data = MT_CLS_NSMU,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288) },
+
/* Generic MT device */
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index 55153a2f7988..11ac246176ae 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -456,14 +456,13 @@ static const struct joycon_ctlr_button_mapping snescon_button_mappings[] = {
{ /* sentinel */ },
};
-/*
- * "A", "B", and "C" are mapped positionally, rather than by label (e.g., "A"
- * gets assigned to BTN_EAST instead of BTN_A).
- */
static const struct joycon_ctlr_button_mapping gencon_button_mappings[] = {
- { BTN_SOUTH, JC_BTN_A, },
- { BTN_EAST, JC_BTN_B, },
- { BTN_WEST, JC_BTN_R, },
+ { BTN_A, JC_BTN_A, },
+ { BTN_B, JC_BTN_B, },
+ { BTN_C, JC_BTN_R, },
+ { BTN_X, JC_BTN_X, }, /* MD/GEN 6B Only */
+ { BTN_Y, JC_BTN_Y, }, /* MD/GEN 6B Only */
+ { BTN_Z, JC_BTN_L, }, /* MD/GEN 6B Only */
{ BTN_SELECT, JC_BTN_ZR, },
{ BTN_START, JC_BTN_PLUS, },
{ BTN_MODE, JC_BTN_HOME, },
@@ -471,9 +470,6 @@ static const struct joycon_ctlr_button_mapping gencon_button_mappings[] = {
{ /* sentinel */ },
};
-/*
- * N64's C buttons get assigned to d-pad directions and registered as buttons.
- */
static const struct joycon_ctlr_button_mapping n64con_button_mappings[] = {
{ BTN_A, JC_BTN_A, },
{ BTN_B, JC_BTN_B, },
diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c
index 83e3409d170c..8c28e982e09d 100644
--- a/drivers/hid/hid-picolcd_fb.c
+++ b/drivers/hid/hid-picolcd_fb.c
@@ -296,7 +296,7 @@ static void picolcd_fb_destroy(struct fb_info *info)
/* make sure no work is deferred */
fb_deferred_io_cleanup(info);
- /* No thridparty should ever unregister our framebuffer! */
+ /* No thirdparty should ever unregister our framebuffer! */
WARN_ON(fbdata->picolcd != NULL);
vfree((u8 *)info->fix.smem_start);
@@ -497,6 +497,10 @@ int picolcd_init_framebuffer(struct picolcd_data *data)
#endif
#endif
+#ifdef CONFIG_HID_PICOLCD_LCD
+ info->lcd_dev = data->lcd;
+#endif
+
fbdata = info->par;
spin_lock_init(&fbdata->lock);
fbdata->picolcd = data;
diff --git a/drivers/hid/hid-picolcd_lcd.c b/drivers/hid/hid-picolcd_lcd.c
index 061a33ba7b1d..318f19eac0e7 100644
--- a/drivers/hid/hid-picolcd_lcd.c
+++ b/drivers/hid/hid-picolcd_lcd.c
@@ -41,15 +41,9 @@ static int picolcd_set_contrast(struct lcd_device *ldev, int contrast)
return 0;
}
-static int picolcd_check_lcd_fb(struct lcd_device *ldev, struct fb_info *fb)
-{
- return fb && fb == picolcd_fbinfo((struct picolcd_data *)lcd_get_data(ldev));
-}
-
static const struct lcd_ops picolcd_lcdops = {
.get_contrast = picolcd_get_contrast,
.set_contrast = picolcd_set_contrast,
- .check_fb = picolcd_check_lcd_fb,
};
int picolcd_init_lcd(struct picolcd_data *data, struct hid_report *report)
diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
index 3d414ae194ac..25cfd964dc25 100644
--- a/drivers/hid/hid-plantronics.c
+++ b/drivers/hid/hid-plantronics.c
@@ -38,8 +38,10 @@
(usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0)
+#define PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS BIT(1)
#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */
+#define PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT 220 /* ms */
struct plt_drv_data {
unsigned long device_type;
@@ -137,6 +139,21 @@ static int plantronics_event(struct hid_device *hdev, struct hid_field *field,
drv_data->last_volume_key_ts = cur_ts;
}
+ if (drv_data->quirks & PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS) {
+ unsigned long prev_ts, cur_ts;
+
+ /* Usages are filtered in plantronics_usages. */
+
+ if (!value) /* Handle key presses only. */
+ return 0;
+
+ prev_ts = drv_data->last_volume_key_ts;
+ cur_ts = jiffies;
+ if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT)
+ return 1; /* Ignore the followed opposite volume key. */
+
+ drv_data->last_volume_key_ts = cur_ts;
+ }
return 0;
}
@@ -210,6 +227,12 @@ static const struct hid_device_id plantronics_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES),
.driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
+ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3325_SERIES),
+ .driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
+ USB_DEVICE_ID_PLANTRONICS_ENCOREPRO_500_SERIES),
+ .driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
{ }
};
diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c
index d55aaabab1ed..3048297569c5 100644
--- a/drivers/hid/hid-roccat-arvo.c
+++ b/drivers/hid/hid-roccat-arvo.c
@@ -224,24 +224,24 @@ static ssize_t arvo_sysfs_read(struct file *fp,
}
static ssize_t arvo_sysfs_write_button(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
return arvo_sysfs_write(fp, kobj, buf, off, count,
sizeof(struct arvo_button), ARVO_COMMAND_BUTTON);
}
-static BIN_ATTR(button, 0220, NULL, arvo_sysfs_write_button,
- sizeof(struct arvo_button));
+static const BIN_ATTR(button, 0220, NULL, arvo_sysfs_write_button,
+ sizeof(struct arvo_button));
static ssize_t arvo_sysfs_read_info(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
return arvo_sysfs_read(fp, kobj, buf, off, count,
sizeof(struct arvo_info), ARVO_COMMAND_INFO);
}
-static BIN_ATTR(info, 0440, arvo_sysfs_read_info, NULL,
- sizeof(struct arvo_info));
+static const BIN_ATTR(info, 0440, arvo_sysfs_read_info, NULL,
+ sizeof(struct arvo_info));
static struct attribute *arvo_attrs[] = {
&dev_attr_mode_key.attr,
@@ -250,7 +250,7 @@ static struct attribute *arvo_attrs[] = {
NULL,
};
-static struct bin_attribute *arvo_bin_attributes[] = {
+static const struct bin_attribute *const arvo_bin_attributes[] = {
&bin_attr_button,
&bin_attr_info,
NULL,
@@ -258,7 +258,7 @@ static struct bin_attribute *arvo_bin_attributes[] = {
static const struct attribute_group arvo_group = {
.attrs = arvo_attrs,
- .bin_attrs = arvo_bin_attributes,
+ .bin_attrs_new = arvo_bin_attributes,
};
static const struct attribute_group *arvo_groups[] = {
diff --git a/drivers/hid/hid-roccat-common.h b/drivers/hid/hid-roccat-common.h
index 839ddfd931f0..0f9a2db04df9 100644
--- a/drivers/hid/hid-roccat-common.h
+++ b/drivers/hid/hid-roccat-common.h
@@ -46,8 +46,8 @@ ssize_t roccat_common2_sysfs_write(struct file *fp, struct kobject *kobj,
#define ROCCAT_COMMON2_SYSFS_W(thingy, COMMAND, SIZE) \
static ssize_t roccat_common2_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return roccat_common2_sysfs_write(fp, kobj, buf, off, count, \
SIZE, COMMAND); \
@@ -55,8 +55,8 @@ static ssize_t roccat_common2_sysfs_write_ ## thingy(struct file *fp, \
#define ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE) \
static ssize_t roccat_common2_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return roccat_common2_sysfs_read(fp, kobj, buf, off, count, \
SIZE, COMMAND); \
@@ -68,27 +68,27 @@ ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE)
#define ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(thingy, COMMAND, SIZE) \
ROCCAT_COMMON2_SYSFS_RW(thingy, COMMAND, SIZE); \
-static struct bin_attribute bin_attr_ ## thingy = { \
+static const struct bin_attribute bin_attr_ ## thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = SIZE, \
- .read = roccat_common2_sysfs_read_ ## thingy, \
- .write = roccat_common2_sysfs_write_ ## thingy \
+ .read_new = roccat_common2_sysfs_read_ ## thingy, \
+ .write_new = roccat_common2_sysfs_write_ ## thingy \
}
#define ROCCAT_COMMON2_BIN_ATTRIBUTE_R(thingy, COMMAND, SIZE) \
ROCCAT_COMMON2_SYSFS_R(thingy, COMMAND, SIZE); \
-static struct bin_attribute bin_attr_ ## thingy = { \
+static const struct bin_attribute bin_attr_ ## thingy = { \
.attr = { .name = #thingy, .mode = 0440 }, \
.size = SIZE, \
- .read = roccat_common2_sysfs_read_ ## thingy, \
+ .read_new = roccat_common2_sysfs_read_ ## thingy, \
}
#define ROCCAT_COMMON2_BIN_ATTRIBUTE_W(thingy, COMMAND, SIZE) \
ROCCAT_COMMON2_SYSFS_W(thingy, COMMAND, SIZE); \
-static struct bin_attribute bin_attr_ ## thingy = { \
+static const struct bin_attribute bin_attr_ ## thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = SIZE, \
- .write = roccat_common2_sysfs_write_ ## thingy \
+ .write_new = roccat_common2_sysfs_write_ ## thingy \
}
#endif
diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
index 0cd6208fb371..65a84bfcc2f8 100644
--- a/drivers/hid/hid-roccat-isku.c
+++ b/drivers/hid/hid-roccat-isku.c
@@ -156,7 +156,7 @@ static ssize_t isku_sysfs_write(struct file *fp, struct kobject *kobj,
#define ISKU_SYSFS_W(thingy, THINGY) \
static ssize_t isku_sysfs_write_ ## thingy(struct file *fp, struct kobject *kobj, \
- struct bin_attribute *attr, char *buf, \
+ const struct bin_attribute *attr, char *buf, \
loff_t off, size_t count) \
{ \
return isku_sysfs_write(fp, kobj, buf, off, count, \
@@ -165,7 +165,7 @@ static ssize_t isku_sysfs_write_ ## thingy(struct file *fp, struct kobject *kobj
#define ISKU_SYSFS_R(thingy, THINGY) \
static ssize_t isku_sysfs_read_ ## thingy(struct file *fp, struct kobject *kobj, \
- struct bin_attribute *attr, char *buf, \
+ const struct bin_attribute *attr, char *buf, \
loff_t off, size_t count) \
{ \
return isku_sysfs_read(fp, kobj, buf, off, count, \
@@ -178,27 +178,27 @@ ISKU_SYSFS_W(thingy, THINGY)
#define ISKU_BIN_ATTR_RW(thingy, THINGY) \
ISKU_SYSFS_RW(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = ISKU_SIZE_ ## THINGY, \
- .read = isku_sysfs_read_ ## thingy, \
- .write = isku_sysfs_write_ ## thingy \
+ .read_new = isku_sysfs_read_ ## thingy, \
+ .write_new = isku_sysfs_write_ ## thingy \
}
#define ISKU_BIN_ATTR_R(thingy, THINGY) \
ISKU_SYSFS_R(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0440 }, \
.size = ISKU_SIZE_ ## THINGY, \
- .read = isku_sysfs_read_ ## thingy, \
+ .read_new = isku_sysfs_read_ ## thingy, \
}
#define ISKU_BIN_ATTR_W(thingy, THINGY) \
ISKU_SYSFS_W(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = ISKU_SIZE_ ## THINGY, \
- .write = isku_sysfs_write_ ## thingy \
+ .write_new = isku_sysfs_write_ ## thingy \
}
ISKU_BIN_ATTR_RW(macro, MACRO);
@@ -217,7 +217,7 @@ ISKU_BIN_ATTR_W(control, CONTROL);
ISKU_BIN_ATTR_W(reset, RESET);
ISKU_BIN_ATTR_R(info, INFO);
-static struct bin_attribute *isku_bin_attributes[] = {
+static const struct bin_attribute *const isku_bin_attributes[] = {
&bin_attr_macro,
&bin_attr_keys_function,
&bin_attr_keys_easyzone,
@@ -238,7 +238,7 @@ static struct bin_attribute *isku_bin_attributes[] = {
static const struct attribute_group isku_group = {
.attrs = isku_attrs,
- .bin_attrs = isku_bin_attributes,
+ .bin_attrs_new = isku_bin_attributes,
};
static const struct attribute_group *isku_groups[] = {
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 3f8f459edcf3..b3c0242e5a37 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -261,7 +261,7 @@ static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
}
static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
@@ -285,7 +285,7 @@ static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj,
* case of error the old data is still valid
*/
static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count) {
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
@@ -327,11 +327,11 @@ unlock:
return sizeof(struct kone_settings);
}
-static BIN_ATTR(settings, 0660, kone_sysfs_read_settings,
- kone_sysfs_write_settings, sizeof(struct kone_settings));
+static const BIN_ATTR(settings, 0660, kone_sysfs_read_settings,
+ kone_sysfs_write_settings, sizeof(struct kone_settings));
static ssize_t kone_sysfs_read_profilex(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr,
+ struct kobject *kobj, const struct bin_attribute *attr,
char *buf, loff_t off, size_t count) {
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
@@ -351,7 +351,7 @@ static ssize_t kone_sysfs_read_profilex(struct file *fp,
/* Writes data only if different to stored data */
static ssize_t kone_sysfs_write_profilex(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr,
+ struct kobject *kobj, const struct bin_attribute *attr,
char *buf, loff_t off, size_t count) {
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
@@ -382,11 +382,11 @@ static ssize_t kone_sysfs_write_profilex(struct file *fp,
return sizeof(struct kone_profile);
}
#define PROFILE_ATTR(number) \
-static struct bin_attribute bin_attr_profile##number = { \
+static const struct bin_attribute bin_attr_profile##number = { \
.attr = { .name = "profile" #number, .mode = 0660 }, \
.size = sizeof(struct kone_profile), \
- .read = kone_sysfs_read_profilex, \
- .write = kone_sysfs_write_profilex, \
+ .read_new = kone_sysfs_read_profilex, \
+ .write_new = kone_sysfs_write_profilex, \
.private = &profile_numbers[number-1], \
}
PROFILE_ATTR(1);
@@ -634,7 +634,7 @@ static struct attribute *kone_attrs[] = {
NULL,
};
-static struct bin_attribute *kone_bin_attributes[] = {
+static const struct bin_attribute *const kone_bin_attributes[] = {
&bin_attr_settings,
&bin_attr_profile1,
&bin_attr_profile2,
@@ -646,7 +646,7 @@ static struct bin_attribute *kone_bin_attributes[] = {
static const struct attribute_group kone_group = {
.attrs = kone_attrs,
- .bin_attrs = kone_bin_attributes,
+ .bin_attrs_new = kone_bin_attributes,
};
static const struct attribute_group *kone_groups[] = {
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index 8ccb3b14a1a9..5d8a5ce88b4c 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -128,8 +128,8 @@ static ssize_t koneplus_sysfs_write(struct file *fp, struct kobject *kobj,
#define KONEPLUS_SYSFS_W(thingy, THINGY) \
static ssize_t koneplus_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return koneplus_sysfs_write(fp, kobj, buf, off, count, \
KONEPLUS_SIZE_ ## THINGY, KONEPLUS_COMMAND_ ## THINGY); \
@@ -137,8 +137,8 @@ static ssize_t koneplus_sysfs_write_ ## thingy(struct file *fp, \
#define KONEPLUS_SYSFS_R(thingy, THINGY) \
static ssize_t koneplus_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return koneplus_sysfs_read(fp, kobj, buf, off, count, \
KONEPLUS_SIZE_ ## THINGY, KONEPLUS_COMMAND_ ## THINGY); \
@@ -150,27 +150,27 @@ KONEPLUS_SYSFS_R(thingy, THINGY)
#define KONEPLUS_BIN_ATTRIBUTE_RW(thingy, THINGY) \
KONEPLUS_SYSFS_RW(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = KONEPLUS_SIZE_ ## THINGY, \
- .read = koneplus_sysfs_read_ ## thingy, \
- .write = koneplus_sysfs_write_ ## thingy \
+ .read_new = koneplus_sysfs_read_ ## thingy, \
+ .write_new = koneplus_sysfs_write_ ## thingy \
}
#define KONEPLUS_BIN_ATTRIBUTE_R(thingy, THINGY) \
KONEPLUS_SYSFS_R(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0440 }, \
.size = KONEPLUS_SIZE_ ## THINGY, \
- .read = koneplus_sysfs_read_ ## thingy, \
+ .read_new = koneplus_sysfs_read_ ## thingy, \
}
#define KONEPLUS_BIN_ATTRIBUTE_W(thingy, THINGY) \
KONEPLUS_SYSFS_W(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = KONEPLUS_SIZE_ ## THINGY, \
- .write = koneplus_sysfs_write_ ## thingy \
+ .write_new = koneplus_sysfs_write_ ## thingy \
}
KONEPLUS_BIN_ATTRIBUTE_W(control, CONTROL);
KONEPLUS_BIN_ATTRIBUTE_W(talk, TALK);
@@ -183,8 +183,8 @@ KONEPLUS_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS);
KONEPLUS_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS);
static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
@@ -201,8 +201,8 @@ static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp,
}
static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
@@ -219,16 +219,16 @@ static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
}
#define PROFILE_ATTR(number) \
-static struct bin_attribute bin_attr_profile##number##_settings = { \
+static const struct bin_attribute bin_attr_profile##number##_settings = { \
.attr = { .name = "profile" #number "_settings", .mode = 0440 }, \
.size = KONEPLUS_SIZE_PROFILE_SETTINGS, \
- .read = koneplus_sysfs_read_profilex_settings, \
+ .read_new = koneplus_sysfs_read_profilex_settings, \
.private = &profile_numbers[number-1], \
}; \
-static struct bin_attribute bin_attr_profile##number##_buttons = { \
+static const struct bin_attribute bin_attr_profile##number##_buttons = { \
.attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
.size = KONEPLUS_SIZE_PROFILE_BUTTONS, \
- .read = koneplus_sysfs_read_profilex_buttons, \
+ .read_new = koneplus_sysfs_read_profilex_buttons, \
.private = &profile_numbers[number-1], \
};
PROFILE_ATTR(1);
@@ -321,7 +321,7 @@ static struct attribute *koneplus_attrs[] = {
NULL,
};
-static struct bin_attribute *koneplus_bin_attributes[] = {
+static const struct bin_attribute *const koneplus_bin_attributes[] = {
&bin_attr_control,
&bin_attr_talk,
&bin_attr_macro,
@@ -346,7 +346,7 @@ static struct bin_attribute *koneplus_bin_attributes[] = {
static const struct attribute_group koneplus_group = {
.attrs = koneplus_attrs,
- .bin_attrs = koneplus_bin_attributes,
+ .bin_attrs_new = koneplus_bin_attributes,
};
static const struct attribute_group *koneplus_groups[] = {
diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c
index beca8aef8bbb..7fb705789d4e 100644
--- a/drivers/hid/hid-roccat-konepure.c
+++ b/drivers/hid/hid-roccat-konepure.c
@@ -47,7 +47,7 @@ ROCCAT_COMMON2_BIN_ATTRIBUTE_R(tcu_image, 0x0c, 0x0404);
ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(sensor, 0x0f, 0x06);
ROCCAT_COMMON2_BIN_ATTRIBUTE_W(talk, 0x10, 0x10);
-static struct bin_attribute *konepure_bin_attrs[] = {
+static const struct bin_attribute *const konepure_bin_attrs[] = {
&bin_attr_actual_profile,
&bin_attr_control,
&bin_attr_info,
@@ -62,7 +62,7 @@ static struct bin_attribute *konepure_bin_attrs[] = {
};
static const struct attribute_group konepure_group = {
- .bin_attrs = konepure_bin_attrs,
+ .bin_attrs_new = konepure_bin_attrs,
};
static const struct attribute_group *konepure_groups[] = {
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 748d4d7cb2fc..e31e4a2e62d5 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -171,8 +171,8 @@ static ssize_t kovaplus_sysfs_write(struct file *fp, struct kobject *kobj,
#define KOVAPLUS_SYSFS_W(thingy, THINGY) \
static ssize_t kovaplus_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return kovaplus_sysfs_write(fp, kobj, buf, off, count, \
KOVAPLUS_SIZE_ ## THINGY, KOVAPLUS_COMMAND_ ## THINGY); \
@@ -180,8 +180,8 @@ static ssize_t kovaplus_sysfs_write_ ## thingy(struct file *fp, \
#define KOVAPLUS_SYSFS_R(thingy, THINGY) \
static ssize_t kovaplus_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return kovaplus_sysfs_read(fp, kobj, buf, off, count, \
KOVAPLUS_SIZE_ ## THINGY, KOVAPLUS_COMMAND_ ## THINGY); \
@@ -193,19 +193,19 @@ KOVAPLUS_SYSFS_R(thingy, THINGY)
#define KOVAPLUS_BIN_ATTRIBUTE_RW(thingy, THINGY) \
KOVAPLUS_SYSFS_RW(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = KOVAPLUS_SIZE_ ## THINGY, \
- .read = kovaplus_sysfs_read_ ## thingy, \
- .write = kovaplus_sysfs_write_ ## thingy \
+ .read_new = kovaplus_sysfs_read_ ## thingy, \
+ .write_new = kovaplus_sysfs_write_ ## thingy \
}
#define KOVAPLUS_BIN_ATTRIBUTE_W(thingy, THINGY) \
KOVAPLUS_SYSFS_W(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = KOVAPLUS_SIZE_ ## THINGY, \
- .write = kovaplus_sysfs_write_ ## thingy \
+ .write_new = kovaplus_sysfs_write_ ## thingy \
}
KOVAPLUS_BIN_ATTRIBUTE_W(control, CONTROL);
KOVAPLUS_BIN_ATTRIBUTE_RW(info, INFO);
@@ -213,8 +213,8 @@ KOVAPLUS_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS);
KOVAPLUS_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS);
static ssize_t kovaplus_sysfs_read_profilex_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
@@ -231,8 +231,8 @@ static ssize_t kovaplus_sysfs_read_profilex_settings(struct file *fp,
}
static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
@@ -249,16 +249,16 @@ static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
}
#define PROFILE_ATTR(number) \
-static struct bin_attribute bin_attr_profile##number##_settings = { \
+static const struct bin_attribute bin_attr_profile##number##_settings = { \
.attr = { .name = "profile" #number "_settings", .mode = 0440 }, \
.size = KOVAPLUS_SIZE_PROFILE_SETTINGS, \
- .read = kovaplus_sysfs_read_profilex_settings, \
+ .read_new = kovaplus_sysfs_read_profilex_settings, \
.private = &profile_numbers[number-1], \
}; \
-static struct bin_attribute bin_attr_profile##number##_buttons = { \
+static const struct bin_attribute bin_attr_profile##number##_buttons = { \
.attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
.size = KOVAPLUS_SIZE_PROFILE_BUTTONS, \
- .read = kovaplus_sysfs_read_profilex_buttons, \
+ .read_new = kovaplus_sysfs_read_profilex_buttons, \
.private = &profile_numbers[number-1], \
};
PROFILE_ATTR(1);
@@ -379,7 +379,7 @@ static struct attribute *kovaplus_attrs[] = {
NULL,
};
-static struct bin_attribute *kovaplus_bin_attributes[] = {
+static const struct bin_attribute *const kovaplus_bin_attributes[] = {
&bin_attr_control,
&bin_attr_info,
&bin_attr_profile_settings,
@@ -399,7 +399,7 @@ static struct bin_attribute *kovaplus_bin_attributes[] = {
static const struct attribute_group kovaplus_group = {
.attrs = kovaplus_attrs,
- .bin_attrs = kovaplus_bin_attributes,
+ .bin_attrs_new = kovaplus_bin_attributes,
};
static const struct attribute_group *kovaplus_groups[] = {
diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c
index d5ddf0d68346..023ec64b4b0e 100644
--- a/drivers/hid/hid-roccat-lua.c
+++ b/drivers/hid/hid-roccat-lua.c
@@ -66,7 +66,7 @@ static ssize_t lua_sysfs_write(struct file *fp, struct kobject *kobj,
#define LUA_SYSFS_W(thingy, THINGY) \
static ssize_t lua_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, \
+ struct kobject *kobj, const struct bin_attribute *attr, \
char *buf, loff_t off, size_t count) \
{ \
return lua_sysfs_write(fp, kobj, buf, off, count, \
@@ -75,7 +75,7 @@ static ssize_t lua_sysfs_write_ ## thingy(struct file *fp, \
#define LUA_SYSFS_R(thingy, THINGY) \
static ssize_t lua_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, \
+ struct kobject *kobj, const struct bin_attribute *attr, \
char *buf, loff_t off, size_t count) \
{ \
return lua_sysfs_read(fp, kobj, buf, off, count, \
@@ -85,11 +85,11 @@ static ssize_t lua_sysfs_read_ ## thingy(struct file *fp, \
#define LUA_BIN_ATTRIBUTE_RW(thingy, THINGY) \
LUA_SYSFS_W(thingy, THINGY) \
LUA_SYSFS_R(thingy, THINGY) \
-static struct bin_attribute lua_ ## thingy ## _attr = { \
+static const struct bin_attribute lua_ ## thingy ## _attr = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = LUA_SIZE_ ## THINGY, \
- .read = lua_sysfs_read_ ## thingy, \
- .write = lua_sysfs_write_ ## thingy \
+ .read_new = lua_sysfs_read_ ## thingy, \
+ .write_new = lua_sysfs_write_ ## thingy \
};
LUA_BIN_ATTRIBUTE_RW(control, CONTROL)
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index eeb3d38cd805..2b53fbfbb897 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -129,8 +129,8 @@ static ssize_t pyra_sysfs_write(struct file *fp, struct kobject *kobj,
#define PYRA_SYSFS_W(thingy, THINGY) \
static ssize_t pyra_sysfs_write_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return pyra_sysfs_write(fp, kobj, buf, off, count, \
PYRA_SIZE_ ## THINGY, PYRA_COMMAND_ ## THINGY); \
@@ -138,8 +138,8 @@ static ssize_t pyra_sysfs_write_ ## thingy(struct file *fp, \
#define PYRA_SYSFS_R(thingy, THINGY) \
static ssize_t pyra_sysfs_read_ ## thingy(struct file *fp, \
- struct kobject *kobj, struct bin_attribute *attr, char *buf, \
- loff_t off, size_t count) \
+ struct kobject *kobj, const struct bin_attribute *attr, \
+ char *buf, loff_t off, size_t count) \
{ \
return pyra_sysfs_read(fp, kobj, buf, off, count, \
PYRA_SIZE_ ## THINGY, PYRA_COMMAND_ ## THINGY); \
@@ -151,27 +151,27 @@ PYRA_SYSFS_R(thingy, THINGY)
#define PYRA_BIN_ATTRIBUTE_RW(thingy, THINGY) \
PYRA_SYSFS_RW(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = PYRA_SIZE_ ## THINGY, \
- .read = pyra_sysfs_read_ ## thingy, \
- .write = pyra_sysfs_write_ ## thingy \
+ .read_new = pyra_sysfs_read_ ## thingy, \
+ .write_new = pyra_sysfs_write_ ## thingy \
}
#define PYRA_BIN_ATTRIBUTE_R(thingy, THINGY) \
PYRA_SYSFS_R(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0440 }, \
- .size = PYRA_SIZE_ ## THINGY, \
- .read = pyra_sysfs_read_ ## thingy, \
+ .size_new = PYRA_SIZE_ ## THINGY, \
+ .read_new = pyra_sysfs_read_ ## thingy, \
}
#define PYRA_BIN_ATTRIBUTE_W(thingy, THINGY) \
PYRA_SYSFS_W(thingy, THINGY); \
-static struct bin_attribute bin_attr_##thingy = { \
+static const struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = PYRA_SIZE_ ## THINGY, \
- .write = pyra_sysfs_write_ ## thingy \
+ .write_new = pyra_sysfs_write_ ## thingy \
}
PYRA_BIN_ATTRIBUTE_W(control, CONTROL);
@@ -180,8 +180,8 @@ PYRA_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS);
PYRA_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS);
static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
@@ -198,8 +198,8 @@ static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
}
static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
@@ -216,16 +216,16 @@ static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
}
#define PROFILE_ATTR(number) \
-static struct bin_attribute bin_attr_profile##number##_settings = { \
+static const struct bin_attribute bin_attr_profile##number##_settings = { \
.attr = { .name = "profile" #number "_settings", .mode = 0440 }, \
.size = PYRA_SIZE_PROFILE_SETTINGS, \
- .read = pyra_sysfs_read_profilex_settings, \
+ .read_new = pyra_sysfs_read_profilex_settings, \
.private = &profile_numbers[number-1], \
}; \
-static struct bin_attribute bin_attr_profile##number##_buttons = { \
+static const struct bin_attribute bin_attr_profile##number##_buttons = { \
.attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
.size = PYRA_SIZE_PROFILE_BUTTONS, \
- .read = pyra_sysfs_read_profilex_buttons, \
+ .read_new = pyra_sysfs_read_profilex_buttons, \
.private = &profile_numbers[number-1], \
};
PROFILE_ATTR(1);
@@ -235,8 +235,8 @@ PROFILE_ATTR(4);
PROFILE_ATTR(5);
static ssize_t pyra_sysfs_write_settings(struct file *fp,
- struct kobject *kobj, struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj)->parent->parent;
struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
@@ -273,7 +273,7 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
}
PYRA_SYSFS_R(settings, SETTINGS);
-static struct bin_attribute bin_attr_settings =
+static const struct bin_attribute bin_attr_settings =
__BIN_ATTR(settings, (S_IWUSR | S_IRUGO),
pyra_sysfs_read_settings, pyra_sysfs_write_settings,
PYRA_SIZE_SETTINGS);
@@ -334,7 +334,7 @@ static struct attribute *pyra_attrs[] = {
NULL,
};
-static struct bin_attribute *pyra_bin_attributes[] = {
+static const struct bin_attribute *const pyra_bin_attributes[] = {
&bin_attr_control,
&bin_attr_info,
&bin_attr_profile_settings,
@@ -355,7 +355,7 @@ static struct bin_attribute *pyra_bin_attributes[] = {
static const struct attribute_group pyra_group = {
.attrs = pyra_attrs,
- .bin_attrs = pyra_bin_attributes,
+ .bin_attrs_new = pyra_bin_attributes,
};
static const struct attribute_group *pyra_groups[] = {
diff --git a/drivers/hid/hid-roccat-ryos.c b/drivers/hid/hid-roccat-ryos.c
index 57714a4525e2..902dac1e714e 100644
--- a/drivers/hid/hid-roccat-ryos.c
+++ b/drivers/hid/hid-roccat-ryos.c
@@ -47,7 +47,7 @@ ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(stored_lights, 0x17, 0x0566);
ROCCAT_COMMON2_BIN_ATTRIBUTE_W(custom_lights, 0x18, 0x14);
ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(light_macro, 0x19, 0x07d2);
-static struct bin_attribute *ryos_bin_attrs[] = {
+static const struct bin_attribute *const ryos_bin_attrs[] = {
&bin_attr_control,
&bin_attr_profile,
&bin_attr_keys_primary,
@@ -70,7 +70,7 @@ static struct bin_attribute *ryos_bin_attrs[] = {
};
static const struct attribute_group ryos_group = {
- .bin_attrs = ryos_bin_attrs,
+ .bin_attrs_new = ryos_bin_attrs,
};
static const struct attribute_group *ryos_groups[] = {
diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c
index 2baa47a0efc5..7399b8ffb5c7 100644
--- a/drivers/hid/hid-roccat-savu.c
+++ b/drivers/hid/hid-roccat-savu.c
@@ -30,7 +30,7 @@ ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(macro, 0x8, 0x0823);
ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(info, 0x9, 0x08);
ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(sensor, 0xc, 0x04);
-static struct bin_attribute *savu_bin_attrs[] = {
+static const struct bin_attribute *const savu_bin_attrs[] = {
&bin_attr_control,
&bin_attr_profile,
&bin_attr_general,
@@ -42,7 +42,7 @@ static struct bin_attribute *savu_bin_attrs[] = {
};
static const struct attribute_group savu_group = {
- .bin_attrs = savu_bin_attrs,
+ .bin_attrs_new = savu_bin_attrs,
};
static const struct attribute_group *savu_groups[] = {
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 66f0675df24b..761760668f6d 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -946,7 +946,7 @@ hid_sensor_register_platform_device(struct platform_device *pdev,
memcpy(real_usage, match->luid, 4);
- /* usage id are all lowcase */
+ /* usage id are all lowercase */
for (c = real_usage; *c != '\0'; c++)
*c = tolower(*c);
@@ -1065,7 +1065,7 @@ static struct platform_driver hid_sensor_custom_platform_driver = {
.name = KBUILD_MODNAME,
},
.probe = hid_sensor_custom_probe,
- .remove_new = hid_sensor_custom_remove,
+ .remove = hid_sensor_custom_remove,
};
module_platform_driver(hid_sensor_custom_platform_driver);
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 7bd86eef6ec7..4c94c03cb573 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -730,23 +730,30 @@ err_stop_hw:
return ret;
}
+static int sensor_hub_finalize_pending_fn(struct device *dev, void *data)
+{
+ struct hid_sensor_hub_device *hsdev = dev->platform_data;
+
+ if (hsdev->pending.status)
+ complete(&hsdev->pending.ready);
+
+ return 0;
+}
+
static void sensor_hub_remove(struct hid_device *hdev)
{
struct sensor_hub_data *data = hid_get_drvdata(hdev);
unsigned long flags;
- int i;
hid_dbg(hdev, " hardware removed\n");
hid_hw_close(hdev);
hid_hw_stop(hdev);
+
spin_lock_irqsave(&data->lock, flags);
- for (i = 0; i < data->hid_sensor_client_cnt; ++i) {
- struct hid_sensor_hub_device *hsdev =
- data->hid_sensor_hub_client_devs[i].platform_data;
- if (hsdev->pending.status)
- complete(&hsdev->pending.ready);
- }
+ device_for_each_child(&hdev->dev, NULL,
+ sensor_hub_finalize_pending_fn);
spin_unlock_irqrestore(&data->lock, flags);
+
mfd_remove_devices(&hdev->dev);
mutex_destroy(&data->mutex);
}
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index d2486f3734f0..5258b45684e8 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -1379,7 +1379,8 @@ static int sony_leds_init(struct sony_sc *sc)
u8 max_brightness[MAX_LEDS] = { [0 ... (MAX_LEDS - 1)] = 1 };
u8 use_hw_blink[MAX_LEDS] = { 0 };
- BUG_ON(!(sc->quirks & SONY_LED_SUPPORT));
+ if (WARN_ON(!(sc->quirks & SONY_LED_SUPPORT)))
+ return -EINVAL;
if (sc->quirks & BUZZ_CONTROLLER) {
sc->led_count = 4;
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index bf8b633114be..af38fc8eb34f 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -253,7 +253,7 @@ enum
ID_CONTROLLER_DECK_STATE = 9
};
-/* String attribute idenitifiers */
+/* String attribute identifiers */
enum {
ATTRIB_STR_BOARD_SERIAL,
ATTRIB_STR_UNIT_SERIAL,
@@ -1306,6 +1306,7 @@ static void steam_remove(struct hid_device *hdev)
cancel_delayed_work_sync(&steam->mode_switch);
cancel_work_sync(&steam->work_connect);
+ cancel_work_sync(&steam->rumble_work);
hid_destroy_device(steam->client_hdev);
steam->client_hdev = NULL;
steam->client_opened = 0;
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
index 7e83fee1ffa0..d4bd7848b8c6 100644
--- a/drivers/hid/hid-steelseries.c
+++ b/drivers/hid/hid-steelseries.c
@@ -19,6 +19,7 @@
#define STEELSERIES_SRWS1 BIT(0)
#define STEELSERIES_ARCTIS_1 BIT(1)
+#define STEELSERIES_ARCTIS_9 BIT(2)
struct steelseries_device {
struct hid_device *hdev;
@@ -32,6 +33,7 @@ struct steelseries_device {
struct power_supply *battery;
uint8_t battery_capacity;
bool headset_connected;
+ bool battery_charging;
};
#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
@@ -368,32 +370,35 @@ static void steelseries_srws1_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
kfree(drv_data);
- return;
}
#endif
#define STEELSERIES_HEADSET_BATTERY_TIMEOUT_MS 3000
#define ARCTIS_1_BATTERY_RESPONSE_LEN 8
+#define ARCTIS_9_BATTERY_RESPONSE_LEN 64
static const char arctis_1_battery_request[] = { 0x06, 0x12 };
+static const char arctis_9_battery_request[] = { 0x00, 0x20 };
-static int steelseries_headset_arctis_1_fetch_battery(struct hid_device *hdev)
+static int steelseries_headset_request_battery(struct hid_device *hdev,
+ const char *request, size_t len)
{
u8 *write_buf;
int ret;
/* Request battery information */
- write_buf = kmemdup(arctis_1_battery_request, sizeof(arctis_1_battery_request), GFP_KERNEL);
+ write_buf = kmemdup(request, len, GFP_KERNEL);
if (!write_buf)
return -ENOMEM;
- ret = hid_hw_raw_request(hdev, arctis_1_battery_request[0],
- write_buf, sizeof(arctis_1_battery_request),
+ hid_dbg(hdev, "Sending battery request report");
+ ret = hid_hw_raw_request(hdev, request[0], write_buf, len,
HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
- if (ret < (int)sizeof(arctis_1_battery_request)) {
+ if (ret < (int)len) {
hid_err(hdev, "hid_hw_raw_request() failed with %d\n", ret);
ret = -ENODATA;
}
+
kfree(write_buf);
return ret;
}
@@ -404,13 +409,26 @@ static void steelseries_headset_fetch_battery(struct hid_device *hdev)
int ret = 0;
if (sd->quirks & STEELSERIES_ARCTIS_1)
- ret = steelseries_headset_arctis_1_fetch_battery(hdev);
+ ret = steelseries_headset_request_battery(hdev,
+ arctis_1_battery_request, sizeof(arctis_1_battery_request));
+ else if (sd->quirks & STEELSERIES_ARCTIS_9)
+ ret = steelseries_headset_request_battery(hdev,
+ arctis_9_battery_request, sizeof(arctis_9_battery_request));
if (ret < 0)
hid_dbg(hdev,
"Battery query failed (err: %d)\n", ret);
}
+static int battery_capacity_to_level(int capacity)
+{
+ if (capacity >= 50)
+ return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ if (capacity >= 20)
+ return POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+}
+
static void steelseries_headset_battery_timer_tick(struct work_struct *work)
{
struct steelseries_device *sd = container_of(work,
@@ -420,6 +438,9 @@ static void steelseries_headset_battery_timer_tick(struct work_struct *work)
steelseries_headset_fetch_battery(hdev);
}
+#define STEELSERIES_PREFIX "SteelSeries "
+#define STEELSERIES_PREFIX_LEN strlen(STEELSERIES_PREFIX)
+
static int steelseries_headset_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -428,13 +449,24 @@ static int steelseries_headset_battery_get_property(struct power_supply *psy,
int ret = 0;
switch (psp) {
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = sd->hdev->name;
+ while (!strncmp(val->strval, STEELSERIES_PREFIX, STEELSERIES_PREFIX_LEN))
+ val->strval += STEELSERIES_PREFIX_LEN;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = "SteelSeries";
+ break;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = 1;
break;
case POWER_SUPPLY_PROP_STATUS:
- val->intval = sd->headset_connected ?
- POWER_SUPPLY_STATUS_DISCHARGING :
- POWER_SUPPLY_STATUS_UNKNOWN;
+ if (sd->headset_connected) {
+ val->intval = sd->battery_charging ?
+ POWER_SUPPLY_STATUS_CHARGING :
+ POWER_SUPPLY_STATUS_DISCHARGING;
+ } else
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
break;
case POWER_SUPPLY_PROP_SCOPE:
val->intval = POWER_SUPPLY_SCOPE_DEVICE;
@@ -442,6 +474,9 @@ static int steelseries_headset_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CAPACITY:
val->intval = sd->battery_capacity;
break;
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ val->intval = battery_capacity_to_level(sd->battery_capacity);
+ break;
default:
ret = -EINVAL;
break;
@@ -465,10 +500,13 @@ steelseries_headset_set_wireless_status(struct hid_device *hdev,
}
static enum power_supply_property steelseries_headset_battery_props[] = {
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_SCOPE,
POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
};
static int steelseries_headset_battery_register(struct steelseries_device *sd)
@@ -492,6 +530,7 @@ static int steelseries_headset_battery_register(struct steelseries_device *sd)
/* avoid the warning of 0% battery while waiting for the first info */
steelseries_headset_set_wireless_status(sd->hdev, false);
sd->battery_capacity = 100;
+ sd->battery_charging = false;
sd->battery = devm_power_supply_register(&sd->hdev->dev,
&sd->battery_desc, &battery_cfg);
@@ -507,9 +546,22 @@ static int steelseries_headset_battery_register(struct steelseries_device *sd)
INIT_DELAYED_WORK(&sd->battery_work, steelseries_headset_battery_timer_tick);
steelseries_headset_fetch_battery(sd->hdev);
+ if (sd->quirks & STEELSERIES_ARCTIS_9) {
+ /* The first fetch_battery request can remain unanswered in some cases */
+ schedule_delayed_work(&sd->battery_work,
+ msecs_to_jiffies(STEELSERIES_HEADSET_BATTERY_TIMEOUT_MS));
+ }
+
return 0;
}
+static bool steelseries_is_vendor_usage_page(struct hid_device *hdev, uint8_t usage_page)
+{
+ return hdev->rdesc[0] == 0x06 &&
+ hdev->rdesc[1] == usage_page &&
+ hdev->rdesc[2] == 0xff;
+}
+
static int steelseries_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct steelseries_device *sd;
@@ -535,12 +587,20 @@ static int steelseries_probe(struct hid_device *hdev, const struct hid_device_id
if (ret)
return ret;
+ if (sd->quirks & STEELSERIES_ARCTIS_9 &&
+ !steelseries_is_vendor_usage_page(hdev, 0xc0))
+ return -ENODEV;
+
spin_lock_init(&sd->lock);
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret)
return ret;
+ ret = hid_hw_open(hdev);
+ if (ret)
+ return ret;
+
if (steelseries_headset_battery_register(sd) < 0)
hid_err(sd->hdev,
"Failed to register battery for headset\n");
@@ -567,6 +627,7 @@ static void steelseries_remove(struct hid_device *hdev)
cancel_delayed_work_sync(&sd->battery_work);
+ hid_hw_close(hdev);
hid_hw_stop(hdev);
}
@@ -586,6 +647,15 @@ static const __u8 *steelseries_srws1_report_fixup(struct hid_device *hdev,
return rdesc;
}
+static uint8_t steelseries_headset_map_capacity(uint8_t capacity, uint8_t min_in, uint8_t max_in)
+{
+ if (capacity >= max_in)
+ return 100;
+ if (capacity <= min_in)
+ return 0;
+ return (capacity - min_in) * 100 / (max_in - min_in);
+}
+
static int steelseries_headset_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *read_buf,
int size)
@@ -593,6 +663,7 @@ static int steelseries_headset_raw_event(struct hid_device *hdev,
struct steelseries_device *sd = hid_get_drvdata(hdev);
int capacity = sd->battery_capacity;
bool connected = sd->headset_connected;
+ bool charging = sd->battery_charging;
unsigned long flags;
/* Not a headset */
@@ -603,8 +674,11 @@ static int steelseries_headset_raw_event(struct hid_device *hdev,
hid_dbg(sd->hdev,
"Parsing raw event for Arctis 1 headset (%*ph)\n", size, read_buf);
if (size < ARCTIS_1_BATTERY_RESPONSE_LEN ||
- memcmp (read_buf, arctis_1_battery_request, sizeof(arctis_1_battery_request)))
+ memcmp(read_buf, arctis_1_battery_request, sizeof(arctis_1_battery_request))) {
+ if (!delayed_work_pending(&sd->battery_work))
+ goto request_battery;
return 0;
+ }
if (read_buf[2] == 0x01) {
connected = false;
capacity = 100;
@@ -614,6 +688,34 @@ static int steelseries_headset_raw_event(struct hid_device *hdev,
}
}
+ if (sd->quirks & STEELSERIES_ARCTIS_9) {
+ hid_dbg(sd->hdev,
+ "Parsing raw event for Arctis 9 headset (%*ph)\n", size, read_buf);
+ if (size < ARCTIS_9_BATTERY_RESPONSE_LEN) {
+ if (!delayed_work_pending(&sd->battery_work))
+ goto request_battery;
+ return 0;
+ }
+
+ if (read_buf[0] == 0xaa && read_buf[1] == 0x01) {
+ connected = true;
+ charging = read_buf[4] == 0x01;
+
+ /*
+ * Found no official documentation about min and max.
+ * Values defined by testing.
+ */
+ capacity = steelseries_headset_map_capacity(read_buf[3], 0x68, 0x9d);
+ } else {
+ /*
+ * Device is off and sends the last known status read_buf[1] == 0x03 or
+ * there is no known status of the device read_buf[0] == 0x55
+ */
+ connected = false;
+ charging = false;
+ }
+ }
+
if (connected != sd->headset_connected) {
hid_dbg(sd->hdev,
"Connected status changed from %sconnected to %sconnected\n",
@@ -631,6 +733,16 @@ static int steelseries_headset_raw_event(struct hid_device *hdev,
power_supply_changed(sd->battery);
}
+ if (charging != sd->battery_charging) {
+ hid_dbg(sd->hdev,
+ "Battery charging status changed from %scharging to %scharging\n",
+ sd->battery_charging ? "" : "not ",
+ charging ? "" : "not ");
+ sd->battery_charging = charging;
+ power_supply_changed(sd->battery);
+ }
+
+request_battery:
spin_lock_irqsave(&sd->lock, flags);
if (!sd->removed)
schedule_delayed_work(&sd->battery_work,
@@ -648,6 +760,10 @@ static const struct hid_device_id steelseries_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, 0x12b6),
.driver_data = STEELSERIES_ARCTIS_1 },
+ { /* SteelSeries Arctis 9 Wireless for XBox */
+ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, 0x12c2),
+ .driver_data = STEELSERIES_ARCTIS_9 },
+
{ }
};
MODULE_DEVICE_TABLE(hid, steelseries_devices);
@@ -666,3 +782,4 @@ MODULE_DESCRIPTION("HID driver for Steelseries devices");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
MODULE_AUTHOR("Simon Wood <simon@mungewell.org>");
+MODULE_AUTHOR("Christian Mayer <git@mayer-bgk.de>");
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index cf1679b0d4fb..6c3e758bbb09 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -170,6 +170,14 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
ep = &usbif->cur_altsetting->endpoint[1];
b_ep = ep->desc.bEndpointAddress;
+ /* Are the expected endpoints present? */
+ u8 ep_addr[1] = {b_ep};
+
+ if (!usb_check_int_endpoints(usbif, ep_addr)) {
+ hid_err(hdev, "Unexpected non-int endpoint\n");
+ return;
+ }
+
for (i = 0; i < ARRAY_SIZE(setup_arr); ++i) {
memcpy(send_buf, setup_arr[i], setup_arr_sizes[i]);
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index ef26c7defcf6..a6044996abf2 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -842,7 +842,7 @@ static int uclogic_params_huion_init(struct uclogic_params *params,
__u8 *params_ptr = NULL;
size_t params_len = 0;
/* Parameters string descriptor of a model with touch ring (HS610) */
- const __u8 touch_ring_model_params_buf[] = {
+ static const __u8 touch_ring_model_params_buf[] = {
0x13, 0x03, 0x70, 0xC6, 0x00, 0x06, 0x7C, 0x00,
0xFF, 0x1F, 0xD8, 0x13, 0x03, 0x0D, 0x10, 0x01,
0x04, 0x3C, 0x3E
diff --git a/drivers/hid/hid-uclogic-rdesc-test.c b/drivers/hid/hid-uclogic-rdesc-test.c
index d6b18213f45f..066df622b6f2 100644
--- a/drivers/hid/hid-uclogic-rdesc-test.c
+++ b/drivers/hid/hid-uclogic-rdesc-test.c
@@ -9,7 +9,7 @@
#include <kunit/test.h>
#include "./hid-uclogic-rdesc.h"
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
struct uclogic_template_case {
const char *name;
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index be5d342d5d13..75544448c239 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -50,6 +50,8 @@
#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(3)
#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(4)
#define I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND BIT(5)
+#define I2C_HID_QUIRK_DELAY_WAKEUP_AFTER_RESUME BIT(6)
+#define I2C_HID_QUIRK_RE_POWER_ON BIT(7)
/* Command opcodes */
#define I2C_HID_OPCODE_RESET 0x01
@@ -135,11 +137,18 @@ static const struct i2c_hid_quirks {
{ I2C_VENDOR_ID_CIRQUE, I2C_PRODUCT_ID_CIRQUE_1063,
I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND },
/*
+ * Without additional power on command, at least some QTEC devices send garbage
+ */
+ { I2C_VENDOR_ID_QTEC, HID_ANY_ID,
+ I2C_HID_QUIRK_RE_POWER_ON },
+ /*
* Sending the wakeup after reset actually break ELAN touchscreen controller
*/
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET |
I2C_HID_QUIRK_BOGUS_IRQ },
+ { I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_0D42,
+ I2C_HID_QUIRK_DELAY_WAKEUP_AFTER_RESUME },
{ 0, 0 }
};
@@ -411,7 +420,19 @@ static int i2c_hid_set_power(struct i2c_hid *ihid, int power_state)
i2c_hid_dbg(ihid, "%s\n", __func__);
+ /*
+ * Some STM-based devices need 400µs after a rising clock edge to wake
+ * from deep sleep, in which case the first request will fail due to
+ * the address not being acknowledged. Try after a short sleep to see
+ * if the device came alive on the bus. Certain Weida Tech devices also
+ * need this.
+ */
ret = i2c_hid_set_power_command(ihid, power_state);
+ if (ret && power_state == I2C_HID_PWR_ON) {
+ usleep_range(400, 500);
+ ret = i2c_hid_set_power_command(ihid, I2C_HID_PWR_ON);
+ }
+
if (ret)
dev_err(&ihid->client->dev,
"failed to change power setting.\n");
@@ -973,13 +994,12 @@ static int i2c_hid_core_resume(struct i2c_hid *ihid)
enable_irq(client->irq);
- /* Make sure the device is awake on the bus */
- ret = i2c_hid_probe_address(ihid);
- if (ret < 0) {
- dev_err(&client->dev, "nothing at address after resume: %d\n",
- ret);
- return -ENXIO;
- }
+ /* On Goodix 27c6:0d42 wait extra time before device wakeup.
+ * It's not clear why but if we send wakeup too early, the device will
+ * never trigger input interrupts.
+ */
+ if (ihid->quirks & I2C_HID_QUIRK_DELAY_WAKEUP_AFTER_RESUME)
+ msleep(1500);
/* Instead of resetting device, simply powers the device on. This
* solves "incomplete reports" on Raydium devices 2386:3118 and
@@ -1059,7 +1079,11 @@ static int i2c_hid_core_register_hid(struct i2c_hid *ihid)
return ret;
}
- return 0;
+ /* At least some QTEC devices need this after initialization */
+ if (ihid->quirks & I2C_HID_QUIRK_RE_POWER_ON)
+ ret = i2c_hid_set_power(ihid, I2C_HID_PWR_ON);
+
+ return ret;
}
static int i2c_hid_core_probe_panel_follower(struct i2c_hid *ihid)
diff --git a/drivers/hid/i2c-hid/i2c-hid-of.c b/drivers/hid/i2c-hid/i2c-hid-of.c
index 8be4d576da77..57379b77e977 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of.c
@@ -144,9 +144,9 @@ MODULE_DEVICE_TABLE(of, i2c_hid_of_match);
#endif
static const struct i2c_device_id i2c_hid_of_id_table[] = {
- { "hid", 0 },
- { "hid-over-i2c", 0 },
- { },
+ { "hid" },
+ { "hid-over-i2c" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, i2c_hid_of_id_table);
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index aae0d965b47b..9e2401291a2f 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -381,6 +381,50 @@ static int __maybe_unused ish_resume(struct device *device)
static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume);
+static ssize_t base_version_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ishtp_device *dev = dev_get_drvdata(cdev);
+
+ return sysfs_emit(buf, "%u.%u.%u.%u\n", dev->base_ver.major,
+ dev->base_ver.minor, dev->base_ver.hotfix,
+ dev->base_ver.build);
+}
+static DEVICE_ATTR_RO(base_version);
+
+static ssize_t project_version_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ishtp_device *dev = dev_get_drvdata(cdev);
+
+ return sysfs_emit(buf, "%u.%u.%u.%u\n", dev->prj_ver.major,
+ dev->prj_ver.minor, dev->prj_ver.hotfix,
+ dev->prj_ver.build);
+}
+static DEVICE_ATTR_RO(project_version);
+
+static struct attribute *ish_firmware_attrs[] = {
+ &dev_attr_base_version.attr,
+ &dev_attr_project_version.attr,
+ NULL
+};
+
+static umode_t firmware_is_visible(struct kobject *kobj, struct attribute *attr,
+ int i)
+{
+ struct ishtp_device *dev = dev_get_drvdata(kobj_to_dev(kobj));
+
+ return dev->driver_data->fw_generation ? attr->mode : 0;
+}
+
+static const struct attribute_group ish_firmware_group = {
+ .name = "firmware",
+ .attrs = ish_firmware_attrs,
+ .is_visible = firmware_is_visible,
+};
+
+__ATTRIBUTE_GROUPS(ish_firmware);
+
static struct pci_driver ish_driver = {
.name = KBUILD_MODNAME,
.id_table = ish_pci_tbl,
@@ -388,6 +432,7 @@ static struct pci_driver ish_driver = {
.remove = ish_remove,
.shutdown = ish_shutdown,
.driver.pm = &ish_pm_ops,
+ .dev_groups = ish_firmware_groups,
};
module_pci_driver(ish_driver);
diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
index e157863a8b25..f4a671d6386c 100644
--- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
+++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
@@ -635,7 +635,7 @@ static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data,
const struct firmware *fw,
const struct shim_fw_info fw_info)
{
- int rv;
+ int rv = 0;
void *dma_buf;
dma_addr_t dma_buf_phy;
u32 fragment_offset, fragment_size, payload_max_size;
@@ -793,7 +793,7 @@ static int load_fw_from_host(struct ishtp_cl_data *client_data)
if (rv < 0)
goto end_err_fw_release;
- /* Step 3: Start ISH main firmware exeuction */
+ /* Step 3: Start ISH main firmware execution */
rv = ish_fw_start(client_data);
if (rv < 0)
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index fbd4f8ea1951..cb04cd1d980b 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -70,10 +70,10 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
unsigned char *payload;
struct device_info *dev_info;
int i, j;
- size_t payload_len, total_len, cur_pos, raw_len;
+ size_t payload_len, total_len, cur_pos, raw_len, msg_len;
int report_type;
struct report_list *reports_list;
- char *reports;
+ struct report *report;
size_t report_len;
struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
int curr_hid_dev = client_data->cur_hid_dev;
@@ -280,14 +280,13 @@ do_get_report:
case HOSTIF_PUBLISH_INPUT_REPORT_LIST:
report_type = HID_INPUT_REPORT;
reports_list = (struct report_list *)payload;
- reports = (char *)reports_list->reports;
+ report = reports_list->reports;
for (j = 0; j < reports_list->num_of_reports; j++) {
- recv_msg = (struct hostif_msg *)(reports +
- sizeof(uint16_t));
- report_len = *(uint16_t *)reports;
- payload = reports + sizeof(uint16_t) +
- sizeof(struct hostif_msg_hdr);
+ recv_msg = container_of(&report->msg,
+ struct hostif_msg, hdr);
+ report_len = report->size;
+ payload = recv_msg->payload;
payload_len = report_len -
sizeof(struct hostif_msg_hdr);
@@ -304,7 +303,7 @@ do_get_report:
0);
}
- reports += sizeof(uint16_t) + report_len;
+ report += sizeof(*report) + payload_len;
}
break;
default:
@@ -316,12 +315,12 @@ do_get_report:
}
- if (!cur_pos && cur_pos + payload_len +
- sizeof(struct hostif_msg) < total_len)
+ msg_len = payload_len + sizeof(struct hostif_msg);
+ if (!cur_pos && cur_pos + msg_len < total_len)
++client_data->multi_packet_cnt;
- cur_pos += payload_len + sizeof(struct hostif_msg);
- payload += payload_len + sizeof(struct hostif_msg);
+ cur_pos += msg_len;
+ payload += msg_len;
} while (cur_pos < total_len);
}
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h
index 35dddc5015b3..2bc19e8ba13e 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.h
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.h
@@ -31,6 +31,7 @@ struct hostif_msg_hdr {
struct hostif_msg {
struct hostif_msg_hdr hdr;
+ uint8_t payload[];
} __packed;
struct hostif_msg_to_sensor {
@@ -52,15 +53,17 @@ struct ishtp_version {
uint16_t build;
} __packed;
+struct report {
+ uint16_t size;
+ struct hostif_msg_hdr msg;
+} __packed;
+
/* struct for ISHTP aggregated input data */
struct report_list {
uint16_t total_size;
uint8_t num_of_reports;
uint8_t flags;
- struct {
- uint16_t size_of_report;
- uint8_t report[1];
- } __packed reports[1];
+ struct report reports[];
} __packed;
/* HOSTIF commands */
diff --git a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
index 513d7a4a1b8a..97f4026b1627 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
@@ -252,27 +252,6 @@ int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);
/**
- * ishtp_cl_tx_empty() -test whether client device tx buffer is empty
- * @cl: Pointer to client device instance
- *
- * Look client device tx buffer list, and check whether this list is empty
- *
- * Return: true if client tx buffer list is empty else false
- */
-bool ishtp_cl_tx_empty(struct ishtp_cl *cl)
-{
- int tx_list_empty;
- unsigned long tx_flags;
-
- spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
- tx_list_empty = list_empty(&cl->tx_list.list);
- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
-
- return !!tx_list_empty;
-}
-EXPORT_SYMBOL(ishtp_cl_tx_empty);
-
-/**
* ishtp_cl_rx_get_rb() -Get a rb from client device rx buffer list
* @cl: Pointer to client device instance
*
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index 8a7f2f6a4f86..21a2c0773cc2 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -14,25 +14,6 @@
#include "hbm.h"
#include "client.h"
-int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
-{
- unsigned long tx_free_flags;
- int size;
-
- spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
- size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
- spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
-
- return size;
-}
-EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size);
-
-int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
-{
- return cl->tx_ring_free_size;
-}
-EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings);
-
/**
* ishtp_read_list_flush() - Flush read queue
* @cl: ishtp client instance
@@ -863,7 +844,7 @@ static void ipc_tx_send(void *prm)
/* Send ipc fragment */
ishtp_hdr.length = dev->mtu;
ishtp_hdr.msg_complete = 0;
- /* All fregments submitted to IPC queue with no callback */
+ /* All fragments submitted to IPC queue with no callback */
ishtp_write_message(dev, &ishtp_hdr, pmsg);
cl->tx_offs += dev->mtu;
rem = cl_msg->send_buf.size - cl->tx_offs;
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.h b/drivers/hid/intel-ish-hid/ishtp/client.h
index d9d398fadcf7..0efd49dd2530 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.h
+++ b/drivers/hid/intel-ish-hid/ishtp/client.h
@@ -120,8 +120,6 @@ int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl);
int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl);
void ishtp_cl_free_rx_ring(struct ishtp_cl *cl);
void ishtp_cl_free_tx_ring(struct ishtp_cl *cl);
-int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl);
-int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl);
/* DMA I/F functions */
void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
diff --git a/drivers/hid/intel-ish-hid/ishtp/init.c b/drivers/hid/intel-ish-hid/ishtp/init.c
index 07fdd52e4c5e..26bf9045a8de 100644
--- a/drivers/hid/intel-ish-hid/ishtp/init.c
+++ b/drivers/hid/intel-ish-hid/ishtp/init.c
@@ -15,36 +15,6 @@
#include "loader.h"
/**
- * ishtp_dev_state_str() -Convert to string format
- * @state: state to convert
- *
- * Convert state to string for prints
- *
- * Return: character pointer to converted string
- */
-const char *ishtp_dev_state_str(int state)
-{
- switch (state) {
- case ISHTP_DEV_INITIALIZING:
- return "INITIALIZING";
- case ISHTP_DEV_INIT_CLIENTS:
- return "INIT_CLIENTS";
- case ISHTP_DEV_ENABLED:
- return "ENABLED";
- case ISHTP_DEV_RESETTING:
- return "RESETTING";
- case ISHTP_DEV_DISABLED:
- return "DISABLED";
- case ISHTP_DEV_POWER_DOWN:
- return "POWER_DOWN";
- case ISHTP_DEV_POWER_UP:
- return "POWER_UP";
- default:
- return "unknown";
- }
-}
-
-/**
* ishtp_device_init() - ishtp device init
* @dev: ISHTP device instance
*
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index cdacce0a4c9d..44eddc411e97 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -57,7 +57,6 @@ enum ishtp_dev_state {
ISHTP_DEV_POWER_DOWN,
ISHTP_DEV_POWER_UP
};
-const char *ishtp_dev_state_str(int state);
struct ishtp_cl;
@@ -140,6 +139,13 @@ struct ishtp_driver_data {
char *fw_generation;
};
+struct ish_version {
+ u16 major;
+ u16 minor;
+ u16 hotfix;
+ u16 build;
+};
+
/**
* struct ishtp_device - ISHTP private device struct
*/
@@ -236,6 +242,11 @@ struct ishtp_device {
/* Dump to trace buffers if enabled*/
ishtp_print_log print_log;
+ /* Base version of Intel's released firmware */
+ struct ish_version base_ver;
+ /* Vendor-customized project version */
+ struct ish_version prj_ver;
+
/* Debug stats */
unsigned int ipc_rx_cnt;
unsigned long long ipc_rx_bytes_cnt;
diff --git a/drivers/hid/intel-ish-hid/ishtp/loader.c b/drivers/hid/intel-ish-hid/ishtp/loader.c
index f76c4437a1f5..f34086b29cf0 100644
--- a/drivers/hid/intel-ish-hid/ishtp/loader.c
+++ b/drivers/hid/intel-ish-hid/ishtp/loader.c
@@ -308,6 +308,28 @@ static int request_ish_firmware(const struct firmware **firmware_p,
return _request_ish_firmware(firmware_p, filename, dev);
}
+static int copy_manifest(const struct firmware *fw, struct ish_global_manifest *manifest)
+{
+ u32 offset;
+
+ for (offset = 0; offset + sizeof(*manifest) < fw->size; offset += ISH_MANIFEST_ALIGNMENT) {
+ memcpy(manifest, fw->data + offset, sizeof(*manifest));
+
+ if (le32_to_cpu(manifest->sig_fourcc) == ISH_GLOBAL_SIG)
+ return 0;
+ }
+
+ return -1;
+}
+
+static void copy_ish_version(struct version_in_manifest *src, struct ish_version *dst)
+{
+ dst->major = le16_to_cpu(src->major);
+ dst->minor = le16_to_cpu(src->minor);
+ dst->hotfix = le16_to_cpu(src->hotfix);
+ dst->build = le16_to_cpu(src->build);
+}
+
/**
* ishtp_loader_work() - Load the ISHTP firmware
* @work: The work structure
@@ -336,6 +358,7 @@ void ishtp_loader_work(struct work_struct *work)
struct loader_xfer_query query = { .header = cpu_to_le32(query_hdr.val32), };
struct loader_start start = { .header = cpu_to_le32(start_hdr.val32), };
union loader_recv_message recv_msg;
+ struct ish_global_manifest manifest;
const struct firmware *ish_fw;
void *dma_bufs[FRAGMENT_MAX_NUM] = {};
u32 fragment_size;
@@ -372,7 +395,7 @@ void ishtp_loader_work(struct work_struct *work)
if (rv)
continue; /* try again if failed */
- dev_dbg(dev->devc, "ISH Version %u.%u.%u.%u\n",
+ dev_dbg(dev->devc, "ISH Bootloader Version %u.%u.%u.%u\n",
recv_msg.query_ack.version_major,
recv_msg.query_ack.version_minor,
recv_msg.query_ack.version_hotfix,
@@ -390,6 +413,16 @@ void ishtp_loader_work(struct work_struct *work)
continue; /* try again if failed */
dev_info(dev->devc, "firmware loaded. size:%zu\n", ish_fw->size);
+ if (!copy_manifest(ish_fw, &manifest)) {
+ copy_ish_version(&manifest.base_ver, &dev->base_ver);
+ copy_ish_version(&manifest.prj_ver, &dev->prj_ver);
+ dev_info(dev->devc, "FW base version: %u.%u.%u.%u\n",
+ dev->base_ver.major, dev->base_ver.minor,
+ dev->base_ver.hotfix, dev->base_ver.build);
+ dev_info(dev->devc, "FW project version: %u.%u.%u.%u\n",
+ dev->prj_ver.major, dev->prj_ver.minor,
+ dev->prj_ver.hotfix, dev->prj_ver.build);
+ }
break;
} while (--retry);
diff --git a/drivers/hid/intel-ish-hid/ishtp/loader.h b/drivers/hid/intel-ish-hid/ishtp/loader.h
index 308b96085a4d..4dda038b4947 100644
--- a/drivers/hid/intel-ish-hid/ishtp/loader.h
+++ b/drivers/hid/intel-ish-hid/ishtp/loader.h
@@ -10,6 +10,7 @@
#include <linux/bits.h>
#include <linux/jiffies.h>
+#include <linux/sizes.h>
#include <linux/types.h>
#include "ishtp-dev.h"
@@ -228,4 +229,37 @@ struct ish_firmware_variant {
*/
void ishtp_loader_work(struct work_struct *work);
+/* ISH Manifest alignment in binary is 4KB aligned */
+#define ISH_MANIFEST_ALIGNMENT SZ_4K
+
+/* Signature for ISH global manifest */
+#define ISH_GLOBAL_SIG 0x47485349 /* FourCC 'I', 'S', 'H', 'G' */
+
+struct version_in_manifest {
+ __le16 major;
+ __le16 minor;
+ __le16 hotfix;
+ __le16 build;
+};
+
+/**
+ * struct ish_global_manifest - global manifest for ISH
+ * @sig_fourcc: Signature FourCC, should be 'I', 'S', 'H', 'G'.
+ * @len: Length of the manifest.
+ * @header_version: Version of the manifest header.
+ * @flags: Flags for additional information.
+ * @base_ver: Base version of Intel's released firmware.
+ * @reserved: Reserved space for future use.
+ * @prj_ver: Vendor-customized project version.
+ */
+struct ish_global_manifest {
+ __le32 sig_fourcc;
+ __le32 len;
+ __le32 header_version;
+ __le32 flags;
+ struct version_in_manifest base_ver;
+ __le32 reserved[13];
+ struct version_in_manifest prj_ver;
+};
+
#endif /* _ISHTP_LOADER_H_ */
diff --git a/drivers/hid/intel-thc-hid/Kconfig b/drivers/hid/intel-thc-hid/Kconfig
new file mode 100644
index 000000000000..91ec84902db8
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/Kconfig
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2024, Intel Corporation.
+
+menu "Intel THC HID Support"
+ depends on X86_64 && PCI
+
+config INTEL_THC_HID
+ tristate "Intel Touch Host Controller"
+ depends on ACPI
+ select HID
+ help
+ THC (Touch Host Controller) is the name of the IP block in PCH that
+ interfaces with Touch Devices (ex: touchscreen, touchpad etc.). It
+ is comprised of 3 key functional blocks: A natively half-duplex
+ Quad I/O capable SPI master; a low latency I2C interface to support
+ HIDI2C compliant devices; a hardware sequencer with Read/Write DMA
+ capability to system memory.
+
+ Say Y/M here if you want to support Intel THC. If unsure, say N.
+
+config INTEL_QUICKSPI
+ tristate "Intel QuickSPI driver based on Intel Touch Host Controller"
+ depends on INTEL_THC_HID
+ help
+ Intel QuickSPI, based on Touch Host Controller (THC), implements
+ HIDSPI (HID over SPI) protocol. It configures THC to work at SPI
+ mode, and controls THC hardware sequencer to accelerate HIDSPI
+ transaction flow.
+
+ Say Y/M here if you want to support Intel QuickSPI. If unsure, say N.
+
+config INTEL_QUICKI2C
+ tristate "Intel QuickI2C driver based on Intel Touch Host Controller"
+ depends on INTEL_THC_HID
+ help
+ Intel QuickI2C, uses Touch Host Controller (THC) hardware, implements
+ HIDI2C (HID over I2C) protocol. It configures THC to work in I2C
+ mode, and controls THC hardware sequencer to accelerate HIDI2C
+ transaction flow.
+
+ Say Y/M here if you want to support Intel QuickI2C. If unsure, say N.
+
+endmenu
diff --git a/drivers/hid/intel-thc-hid/Makefile b/drivers/hid/intel-thc-hid/Makefile
new file mode 100644
index 000000000000..6f762d87af07
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile - Intel Touch Host Controller (THC) drivers
+# Copyright (c) 2024, Intel Corporation.
+#
+#
+
+obj-$(CONFIG_INTEL_THC_HID) += intel-thc.o
+intel-thc-objs += intel-thc/intel-thc-dev.o
+intel-thc-objs += intel-thc/intel-thc-dma.o
+
+obj-$(CONFIG_INTEL_QUICKSPI) += intel-quickspi.o
+intel-quickspi-objs += intel-quickspi/pci-quickspi.o
+intel-quickspi-objs += intel-quickspi/quickspi-hid.o
+intel-quickspi-objs += intel-quickspi/quickspi-protocol.o
+
+obj-$(CONFIG_INTEL_QUICKI2C) += intel-quicki2c.o
+intel-quicki2c-objs += intel-quicki2c/pci-quicki2c.o
+intel-quicki2c-objs += intel-quicki2c/quicki2c-hid.o
+intel-quicki2c-objs += intel-quicki2c/quicki2c-protocol.o
+
+ccflags-y += -I $(src)/intel-thc
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
new file mode 100644
index 000000000000..2de93f4a25ca
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
@@ -0,0 +1,969 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+#include <linux/pm_runtime.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-hw.h"
+
+#include "quicki2c-dev.h"
+#include "quicki2c-hid.h"
+#include "quicki2c-protocol.h"
+
+/* THC QuickI2C ACPI method to get device properties */
+/* HIDI2C device method */
+static guid_t i2c_hid_guid =
+ GUID_INIT(0x3cdff6f7, 0x4267, 0x4555, 0xad, 0x05, 0xb3, 0x0a, 0x3d, 0x89, 0x38, 0xde);
+
+/* platform method */
+static guid_t thc_platform_guid =
+ GUID_INIT(0x84005682, 0x5b71, 0x41a4, 0x8d, 0x66, 0x81, 0x30, 0xf7, 0x87, 0xa1, 0x38);
+
+/**
+ * quicki2c_acpi_get_dsm_property - Query device ACPI DSM parameter
+ *
+ * @adev: point to ACPI device
+ * @guid: ACPI method's guid
+ * @rev: ACPI method's revision
+ * @func: ACPI method's function number
+ * @type: ACPI parameter's data type
+ * @prop_buf: point to return buffer
+ *
+ * This is a helper function for device to query its ACPI DSM parameters.
+ *
+ * Return: 0 if success or ENODEV on failed.
+ */
+static int quicki2c_acpi_get_dsm_property(struct acpi_device *adev, const guid_t *guid,
+ u64 rev, u64 func, acpi_object_type type, void *prop_buf)
+{
+ acpi_handle handle = acpi_device_handle(adev);
+ union acpi_object *obj;
+
+ obj = acpi_evaluate_dsm_typed(handle, guid, rev, func, NULL, type);
+ if (!obj) {
+ acpi_handle_err(handle,
+ "Error _DSM call failed, rev: %d, func: %d, type: %d\n",
+ (int)rev, (int)func, (int)type);
+ return -ENODEV;
+ }
+
+ if (type == ACPI_TYPE_INTEGER)
+ *(u32 *)prop_buf = (u32)obj->integer.value;
+ else if (type == ACPI_TYPE_BUFFER)
+ memcpy(prop_buf, obj->buffer.pointer, obj->buffer.length);
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+
+/**
+ * quicki2c_acpi_get_dsd_property - Query device ACPI DSD parameter
+ *
+ * @adev: point to ACPI device
+ * @dsd_method_name: ACPI method's property name
+ * @type: ACPI parameter's data type
+ * @prop_buf: point to return buffer
+ *
+ * This is a helper function for device to query its ACPI DSD parameters.
+ *
+ * Return: 0 if success or ENODEV on failed.
+ */
+static int quicki2c_acpi_get_dsd_property(struct acpi_device *adev, acpi_string dsd_method_name,
+ acpi_object_type type, void *prop_buf)
+{
+ acpi_handle handle = acpi_device_handle(adev);
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object obj = { .type = type };
+ struct acpi_object_list arg_list = {
+ .count = 1,
+ .pointer = &obj,
+ };
+ union acpi_object *ret_obj;
+ acpi_status status;
+
+ status = acpi_evaluate_object(handle, dsd_method_name, &arg_list, &buffer);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle,
+ "Can't evaluate %s method: %d\n", dsd_method_name, status);
+ return -ENODEV;
+ }
+
+ ret_obj = buffer.pointer;
+
+ memcpy(prop_buf, ret_obj->buffer.pointer, ret_obj->buffer.length);
+
+ return 0;
+}
+
+/**
+ * quicki2c_get_acpi_resources - Query all quicki2c devices' ACPI parameters
+ *
+ * @qcdev: point to quicki2c device
+ *
+ * This function gets all quicki2c devices' ACPI resource.
+ *
+ * Return: 0 if success or error code on failed.
+ */
+static int quicki2c_get_acpi_resources(struct quicki2c_device *qcdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(qcdev->dev);
+ struct quicki2c_subip_acpi_parameter i2c_param;
+ struct quicki2c_subip_acpi_config i2c_config;
+ u32 hid_desc_addr;
+ int ret = -EINVAL;
+
+ if (!adev) {
+ dev_err(qcdev->dev, "Invalid acpi device pointer\n");
+ return ret;
+ }
+
+ qcdev->acpi_dev = adev;
+
+ ret = quicki2c_acpi_get_dsm_property(adev, &i2c_hid_guid,
+ QUICKI2C_ACPI_REVISION_NUM,
+ QUICKI2C_ACPI_FUNC_NUM_HID_DESC_ADDR,
+ ACPI_TYPE_INTEGER,
+ &hid_desc_addr);
+ if (ret)
+ return ret;
+
+ qcdev->hid_desc_addr = (u16)hid_desc_addr;
+
+ ret = quicki2c_acpi_get_dsm_property(adev, &thc_platform_guid,
+ QUICKI2C_ACPI_REVISION_NUM,
+ QUICKI2C_ACPI_FUNC_NUM_ACTIVE_LTR_VAL,
+ ACPI_TYPE_INTEGER,
+ &qcdev->active_ltr_val);
+ if (ret)
+ return ret;
+
+ ret = quicki2c_acpi_get_dsm_property(adev, &thc_platform_guid,
+ QUICKI2C_ACPI_REVISION_NUM,
+ QUICKI2C_ACPI_FUNC_NUM_LP_LTR_VAL,
+ ACPI_TYPE_INTEGER,
+ &qcdev->low_power_ltr_val);
+ if (ret)
+ return ret;
+
+ ret = quicki2c_acpi_get_dsd_property(adev, QUICKI2C_ACPI_METHOD_NAME_ICRS,
+ ACPI_TYPE_BUFFER, &i2c_param);
+ if (ret)
+ return ret;
+
+ if (i2c_param.addressing_mode != HIDI2C_ADDRESSING_MODE_7BIT)
+ return -EOPNOTSUPP;
+
+ qcdev->i2c_slave_addr = i2c_param.device_address;
+
+ ret = quicki2c_acpi_get_dsd_property(adev, QUICKI2C_ACPI_METHOD_NAME_ISUB,
+ ACPI_TYPE_BUFFER, &i2c_config);
+ if (ret)
+ return ret;
+
+ if (i2c_param.connection_speed > 0 &&
+ i2c_param.connection_speed <= QUICKI2C_SUBIP_STANDARD_MODE_MAX_SPEED) {
+ qcdev->i2c_speed_mode = THC_I2C_STANDARD;
+ qcdev->i2c_clock_hcnt = i2c_config.SMHX;
+ qcdev->i2c_clock_lcnt = i2c_config.SMLX;
+ } else if (i2c_param.connection_speed > QUICKI2C_SUBIP_STANDARD_MODE_MAX_SPEED &&
+ i2c_param.connection_speed <= QUICKI2C_SUBIP_FAST_MODE_MAX_SPEED) {
+ qcdev->i2c_speed_mode = THC_I2C_FAST_AND_PLUS;
+ qcdev->i2c_clock_hcnt = i2c_config.FMHX;
+ qcdev->i2c_clock_lcnt = i2c_config.FMLX;
+ } else if (i2c_param.connection_speed > QUICKI2C_SUBIP_FAST_MODE_MAX_SPEED &&
+ i2c_param.connection_speed <= QUICKI2C_SUBIP_FASTPLUS_MODE_MAX_SPEED) {
+ qcdev->i2c_speed_mode = THC_I2C_FAST_AND_PLUS;
+ qcdev->i2c_clock_hcnt = i2c_config.FPHX;
+ qcdev->i2c_clock_lcnt = i2c_config.FPLX;
+ } else if (i2c_param.connection_speed > QUICKI2C_SUBIP_FASTPLUS_MODE_MAX_SPEED &&
+ i2c_param.connection_speed <= QUICKI2C_SUBIP_HIGH_SPEED_MODE_MAX_SPEED) {
+ qcdev->i2c_speed_mode = THC_I2C_HIGH_SPEED;
+ qcdev->i2c_clock_hcnt = i2c_config.HMHX;
+ qcdev->i2c_clock_lcnt = i2c_config.HMLX;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * quicki2c_irq_quick_handler - The ISR of the quicki2c driver
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * Return: IRQ_WAKE_THREAD if further process needed.
+ */
+static irqreturn_t quicki2c_irq_quick_handler(int irq, void *dev_id)
+{
+ struct quicki2c_device *qcdev = dev_id;
+
+ if (qcdev->state == QUICKI2C_DISABLED)
+ return IRQ_HANDLED;
+
+ /* Disable THC interrupt before current interrupt be handled */
+ thc_interrupt_enable(qcdev->thc_hw, false);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * try_recover - Try to recovery THC and Device
+ * @qcdev: pointer to quicki2c device
+ *
+ * This function is a error handler, called when fatal error happens.
+ * It try to reset Touch Device and re-configure THC to recovery
+ * transferring between Device and THC.
+ *
+ * Return: 0 if successful or error code on failed
+ */
+static int try_recover(struct quicki2c_device *qcdev)
+{
+ int ret;
+
+ thc_dma_unconfigure(qcdev->thc_hw);
+
+ ret = thc_dma_configure(qcdev->thc_hw);
+ if (ret) {
+ dev_err(qcdev->dev, "Reconfig DMA failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int handle_input_report(struct quicki2c_device *qcdev)
+{
+ struct hidi2c_report_packet *pkt = (struct hidi2c_report_packet *)qcdev->input_buf;
+ int rx_dma_finished = 0;
+ size_t report_len;
+ int ret;
+
+ while (!rx_dma_finished) {
+ ret = thc_rxdma_read(qcdev->thc_hw, THC_RXDMA2,
+ (u8 *)pkt, &report_len,
+ &rx_dma_finished);
+ if (ret)
+ return ret;
+
+ if (!pkt->len) {
+ if (qcdev->state == QUICKI2C_RESETING) {
+ qcdev->reset_ack = true;
+ wake_up(&qcdev->reset_ack_wq);
+
+ qcdev->state = QUICKI2C_RESETED;
+ } else {
+ dev_warn(qcdev->dev, "unexpected DIR happen\n");
+ }
+
+ continue;
+ }
+
+ /* discard samples before driver probe complete */
+ if (qcdev->state != QUICKI2C_ENABLED)
+ continue;
+
+ quicki2c_hid_send_report(qcdev, pkt->data,
+ HIDI2C_DATA_LEN(le16_to_cpu(pkt->len)));
+ }
+
+ return 0;
+}
+
+/**
+ * quicki2c_irq_thread_handler - IRQ thread handler of quicki2c driver
+ *
+ * @irq: The IRQ number
+ * @dev_id: pointer to the quicki2c device structure
+ *
+ * Return: IRQ_HANDLED to finish this handler.
+ */
+static irqreturn_t quicki2c_irq_thread_handler(int irq, void *dev_id)
+{
+ struct quicki2c_device *qcdev = dev_id;
+ int err_recover = 0;
+ int int_mask;
+ int ret;
+
+ if (qcdev->state == QUICKI2C_DISABLED)
+ return IRQ_HANDLED;
+
+ ret = pm_runtime_resume_and_get(qcdev->dev);
+ if (ret)
+ return IRQ_HANDLED;
+
+ int_mask = thc_interrupt_handler(qcdev->thc_hw);
+
+ if (int_mask & BIT(THC_FATAL_ERR_INT) || int_mask & BIT(THC_TXN_ERR_INT) ||
+ int_mask & BIT(THC_UNKNOWN_INT)) {
+ err_recover = 1;
+ goto exit;
+ }
+
+ if (int_mask & BIT(THC_RXDMA2_INT)) {
+ err_recover = handle_input_report(qcdev);
+ if (err_recover)
+ goto exit;
+ }
+
+exit:
+ thc_interrupt_enable(qcdev->thc_hw, true);
+
+ if (err_recover)
+ if (try_recover(qcdev))
+ qcdev->state = QUICKI2C_DISABLED;
+
+ pm_runtime_mark_last_busy(qcdev->dev);
+ pm_runtime_put_autosuspend(qcdev->dev);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * quicki2c_dev_init - Initialize quicki2c device
+ *
+ * @pdev: pointer to the thc pci device
+ * @mem_addr: The pointer of MMIO memory address
+ *
+ * Alloc quicki2c device structure and initialized THC device,
+ * then configure THC to HIDI2C mode.
+ *
+ * If success, enable THC hardware interrupt.
+ *
+ * Return: pointer to the quicki2c device structure if success
+ * or NULL on failed.
+ */
+static struct quicki2c_device *quicki2c_dev_init(struct pci_dev *pdev, void __iomem *mem_addr)
+{
+ struct device *dev = &pdev->dev;
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = devm_kzalloc(dev, sizeof(struct quicki2c_device), GFP_KERNEL);
+ if (!qcdev)
+ return ERR_PTR(-ENOMEM);
+
+ qcdev->pdev = pdev;
+ qcdev->dev = dev;
+ qcdev->mem_addr = mem_addr;
+ qcdev->state = QUICKI2C_DISABLED;
+
+ init_waitqueue_head(&qcdev->reset_ack_wq);
+
+ /* thc hw init */
+ qcdev->thc_hw = thc_dev_init(qcdev->dev, qcdev->mem_addr);
+ if (IS_ERR(qcdev->thc_hw)) {
+ ret = PTR_ERR(qcdev->thc_hw);
+ dev_err_once(dev, "Failed to initialize THC device context, ret = %d.\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = quicki2c_get_acpi_resources(qcdev);
+ if (ret) {
+ dev_err_once(dev, "Get ACPI resources failed, ret = %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, true);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = thc_port_select(qcdev->thc_hw, THC_PORT_TYPE_I2C);
+ if (ret) {
+ dev_err_once(dev, "Failed to select THC port, ret = %d.\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = thc_i2c_subip_init(qcdev->thc_hw, qcdev->i2c_slave_addr,
+ qcdev->i2c_speed_mode,
+ qcdev->i2c_clock_hcnt,
+ qcdev->i2c_clock_lcnt);
+ if (ret)
+ return ERR_PTR(ret);
+
+ thc_int_trigger_type_select(qcdev->thc_hw, false);
+
+ thc_interrupt_config(qcdev->thc_hw);
+
+ thc_interrupt_enable(qcdev->thc_hw, true);
+
+ qcdev->state = QUICKI2C_INITED;
+
+ return qcdev;
+}
+
+/**
+ * quicki2c_dev_deinit - De-initialize quicki2c device
+ *
+ * @qcdev: pointer to the quicki2c device structure
+ *
+ * Disable THC interrupt and deinitilize THC.
+ */
+static void quicki2c_dev_deinit(struct quicki2c_device *qcdev)
+{
+ thc_interrupt_enable(qcdev->thc_hw, false);
+ thc_ltr_unconfig(qcdev->thc_hw);
+
+ qcdev->state = QUICKI2C_DISABLED;
+}
+
+/**
+ * quicki2c_dma_init - Configure THC DMA for quicki2c device
+ * @qcdev: pointer to the quicki2c device structure
+ *
+ * This function uses TIC's parameters(such as max input length, max output
+ * length) to allocate THC DMA buffers and configure THC DMA engines.
+ *
+ * Return: 0 if success or error code on failed.
+ */
+static int quicki2c_dma_init(struct quicki2c_device *qcdev)
+{
+ size_t swdma_max_len;
+ int ret;
+
+ swdma_max_len = max(le16_to_cpu(qcdev->dev_desc.max_input_len),
+ le16_to_cpu(qcdev->dev_desc.report_desc_len));
+
+ ret = thc_dma_set_max_packet_sizes(qcdev->thc_hw, 0,
+ le16_to_cpu(qcdev->dev_desc.max_input_len),
+ le16_to_cpu(qcdev->dev_desc.max_output_len),
+ swdma_max_len);
+ if (ret)
+ return ret;
+
+ ret = thc_dma_allocate(qcdev->thc_hw);
+ if (ret) {
+ dev_err(qcdev->dev, "Allocate THC DMA buffer failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ /* Enable RxDMA */
+ ret = thc_dma_configure(qcdev->thc_hw);
+ if (ret) {
+ dev_err(qcdev->dev, "Configure THC DMA failed, ret = %d\n", ret);
+ thc_dma_unconfigure(qcdev->thc_hw);
+ thc_dma_release(qcdev->thc_hw);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * quicki2c_dma_deinit - Release THC DMA for quicki2c device
+ * @qcdev: pointer to the quicki2c device structure
+ *
+ * Stop THC DMA engines and release all DMA buffers.
+ *
+ */
+static void quicki2c_dma_deinit(struct quicki2c_device *qcdev)
+{
+ thc_dma_unconfigure(qcdev->thc_hw);
+ thc_dma_release(qcdev->thc_hw);
+}
+
+/**
+ * quicki2c_alloc_report_buf - Alloc report buffers
+ * @qcdev: pointer to the quicki2c device structure
+ *
+ * Allocate report descriptor buffer, it will be used for restore TIC HID
+ * report descriptor.
+ *
+ * Allocate input report buffer, it will be used for receive HID input report
+ * data from TIC.
+ *
+ * Allocate output report buffer, it will be used for store HID output report,
+ * such as set feature.
+ *
+ * Return: 0 if success or error code on failed.
+ */
+static int quicki2c_alloc_report_buf(struct quicki2c_device *qcdev)
+{
+ size_t max_report_len;
+
+ qcdev->report_descriptor = devm_kzalloc(qcdev->dev,
+ le16_to_cpu(qcdev->dev_desc.report_desc_len),
+ GFP_KERNEL);
+ if (!qcdev->report_descriptor)
+ return -ENOMEM;
+
+ /*
+ * Some HIDI2C devices don't declare input/output max length correctly,
+ * give default 4K buffer to avoid DMA buffer overrun.
+ */
+ max_report_len = max(le16_to_cpu(qcdev->dev_desc.max_input_len), SZ_4K);
+
+ qcdev->input_buf = devm_kzalloc(qcdev->dev, max_report_len, GFP_KERNEL);
+ if (!qcdev->input_buf)
+ return -ENOMEM;
+
+ if (!le16_to_cpu(qcdev->dev_desc.max_output_len))
+ qcdev->dev_desc.max_output_len = cpu_to_le16(SZ_4K);
+
+ max_report_len = max(le16_to_cpu(qcdev->dev_desc.max_output_len),
+ max_report_len);
+
+ qcdev->report_buf = devm_kzalloc(qcdev->dev, max_report_len, GFP_KERNEL);
+ if (!qcdev->report_buf)
+ return -ENOMEM;
+
+ qcdev->report_len = max_report_len;
+
+ return 0;
+}
+
+/*
+ * quicki2c_probe: Quicki2c driver probe function
+ *
+ * @pdev: point to pci device
+ * @id: point to pci_device_id structure
+ *
+ * This function initializes THC and HIDI2C device, the flow is:
+ * - do THC pci device initialization
+ * - query HIDI2C ACPI parameters
+ * - configure THC to HIDI2C mode
+ * - go through HIDI2C enumeration flow
+ * |- read device descriptor
+ * |- reset HIDI2C device
+ * - enable THC interrupt and DMA
+ * - read report descriptor
+ * - register HID device
+ * - enable runtime power management
+ *
+ * Return 0 if success or error code on failed.
+ */
+static int quicki2c_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct quicki2c_device *qcdev;
+ void __iomem *mem_addr;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err_once(&pdev->dev, "Failed to enable PCI device, ret = %d.\n", ret);
+ return ret;
+ }
+
+ pci_set_master(pdev);
+
+ ret = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
+ if (ret) {
+ dev_err_once(&pdev->dev, "Failed to get PCI regions, ret = %d.\n", ret);
+ goto disable_pci_device;
+ }
+
+ mem_addr = pcim_iomap_table(pdev)[0];
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err_once(&pdev->dev, "No usable DMA configuration %d\n", ret);
+ goto unmap_io_region;
+ }
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_err_once(&pdev->dev,
+ "Failed to allocate IRQ vectors. ret = %d\n", ret);
+ goto unmap_io_region;
+ }
+
+ pdev->irq = pci_irq_vector(pdev, 0);
+
+ qcdev = quicki2c_dev_init(pdev, mem_addr);
+ if (IS_ERR(qcdev)) {
+ dev_err_once(&pdev->dev, "QuickI2C device init failed\n");
+ ret = PTR_ERR(qcdev);
+ goto unmap_io_region;
+ }
+
+ pci_set_drvdata(pdev, qcdev);
+
+ ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
+ quicki2c_irq_quick_handler,
+ quicki2c_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME,
+ qcdev);
+ if (ret) {
+ dev_err_once(&pdev->dev,
+ "Failed to request threaded IRQ, irq = %d.\n", pdev->irq);
+ goto dev_deinit;
+ }
+
+ ret = quicki2c_get_device_descriptor(qcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Get device descriptor failed, ret = %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quicki2c_alloc_report_buf(qcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Alloc report buffers failed, ret= %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quicki2c_dma_init(qcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Setup THC DMA failed, ret= %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, false);
+ if (ret)
+ goto dev_deinit;
+
+ ret = quicki2c_set_power(qcdev, HIDI2C_ON);
+ if (ret) {
+ dev_err(&pdev->dev, "Set Power On command failed, ret= %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quicki2c_reset(qcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Reset HIDI2C device failed, ret= %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quicki2c_get_report_descriptor(qcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Get report descriptor failed, ret = %d\n", ret);
+ goto dma_deinit;
+ }
+
+ ret = quicki2c_hid_probe(qcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register HID device, ret = %d\n", ret);
+ goto dma_deinit;
+ }
+
+ qcdev->state = QUICKI2C_ENABLED;
+
+ /* Enable runtime power management */
+ pm_runtime_use_autosuspend(qcdev->dev);
+ pm_runtime_set_autosuspend_delay(qcdev->dev, DEFAULT_AUTO_SUSPEND_DELAY_MS);
+ pm_runtime_mark_last_busy(qcdev->dev);
+ pm_runtime_put_noidle(qcdev->dev);
+ pm_runtime_put_autosuspend(qcdev->dev);
+
+ dev_dbg(&pdev->dev, "QuickI2C probe success\n");
+
+ return 0;
+
+dma_deinit:
+ quicki2c_dma_deinit(qcdev);
+dev_deinit:
+ quicki2c_dev_deinit(qcdev);
+unmap_io_region:
+ pcim_iounmap_regions(pdev, BIT(0));
+disable_pci_device:
+ pci_clear_master(pdev);
+
+ return ret;
+}
+
+/**
+ * quicki2c_remove - Device Removal Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * This is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+static void quicki2c_remove(struct pci_dev *pdev)
+{
+ struct quicki2c_device *qcdev;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return;
+
+ quicki2c_hid_remove(qcdev);
+ quicki2c_dma_deinit(qcdev);
+
+ pm_runtime_get_noresume(qcdev->dev);
+
+ quicki2c_dev_deinit(qcdev);
+
+ pcim_iounmap_regions(pdev, BIT(0));
+ pci_clear_master(pdev);
+}
+
+/**
+ * quicki2c_shutdown - Device Shutdown Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * This is called from the reboot notifier
+ * it's a simplified version of remove so we go down
+ * faster.
+ */
+static void quicki2c_shutdown(struct pci_dev *pdev)
+{
+ struct quicki2c_device *qcdev;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return;
+
+ /* Must stop DMA before reboot to avoid DMA entering into unknown state */
+ quicki2c_dma_deinit(qcdev);
+
+ quicki2c_dev_deinit(qcdev);
+}
+
+static int quicki2c_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ /*
+ * As I2C is THC subsystem, no register auto save/restore support,
+ * need driver to do that explicitly for every D3 case.
+ */
+ ret = thc_i2c_subip_regs_save(qcdev->thc_hw);
+ if (ret)
+ return ret;
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qcdev->thc_hw, false);
+
+ thc_dma_unconfigure(qcdev->thc_hw);
+
+ return 0;
+}
+
+static int quicki2c_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ ret = thc_port_select(qcdev->thc_hw, THC_PORT_TYPE_I2C);
+ if (ret)
+ return ret;
+
+ ret = thc_i2c_subip_regs_restore(qcdev->thc_hw);
+ if (ret)
+ return ret;
+
+ thc_interrupt_config(qcdev->thc_hw);
+
+ thc_interrupt_enable(qcdev->thc_hw, true);
+
+ ret = thc_dma_configure(qcdev->thc_hw);
+ if (ret)
+ return ret;
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int quicki2c_freeze(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qcdev->thc_hw, false);
+
+ thc_dma_unconfigure(qcdev->thc_hw);
+
+ return 0;
+}
+
+static int quicki2c_thaw(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ ret = thc_dma_configure(qcdev->thc_hw);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qcdev->thc_hw, true);
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int quicki2c_poweroff(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qcdev->thc_hw, false);
+
+ thc_ltr_unconfig(qcdev->thc_hw);
+
+ quicki2c_dma_deinit(qcdev);
+
+ return 0;
+}
+
+static int quicki2c_restore(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+ int ret;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ /* Reconfig THC HW when back from hibernate */
+ ret = thc_port_select(qcdev->thc_hw, THC_PORT_TYPE_I2C);
+ if (ret)
+ return ret;
+
+ ret = thc_i2c_subip_init(qcdev->thc_hw, qcdev->i2c_slave_addr,
+ qcdev->i2c_speed_mode,
+ qcdev->i2c_clock_hcnt,
+ qcdev->i2c_clock_lcnt);
+ if (ret)
+ return ret;
+
+ thc_interrupt_config(qcdev->thc_hw);
+
+ thc_interrupt_enable(qcdev->thc_hw, true);
+
+ ret = thc_interrupt_quiesce(qcdev->thc_hw, false);
+ if (ret)
+ return ret;
+
+ ret = thc_dma_configure(qcdev->thc_hw);
+ if (ret)
+ return ret;
+
+ thc_ltr_config(qcdev->thc_hw,
+ qcdev->active_ltr_val,
+ qcdev->low_power_ltr_val);
+
+ thc_change_ltr_mode(qcdev->thc_hw, THC_LTR_MODE_ACTIVE);
+
+ return 0;
+}
+
+static int quicki2c_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ thc_change_ltr_mode(qcdev->thc_hw, THC_LTR_MODE_LP);
+
+ pci_save_state(pdev);
+
+ return 0;
+}
+
+static int quicki2c_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quicki2c_device *qcdev;
+
+ qcdev = pci_get_drvdata(pdev);
+ if (!qcdev)
+ return -ENODEV;
+
+ thc_change_ltr_mode(qcdev->thc_hw, THC_LTR_MODE_ACTIVE);
+
+ return 0;
+}
+
+static const struct dev_pm_ops quicki2c_pm_ops = {
+ .suspend = quicki2c_suspend,
+ .resume = quicki2c_resume,
+ .freeze = quicki2c_freeze,
+ .thaw = quicki2c_thaw,
+ .poweroff = quicki2c_poweroff,
+ .restore = quicki2c_restore,
+ .runtime_suspend = quicki2c_runtime_suspend,
+ .runtime_resume = quicki2c_runtime_resume,
+ .runtime_idle = NULL,
+};
+
+static const struct pci_device_id quicki2c_pci_tbl[] = {
+ {PCI_VDEVICE(INTEL, THC_LNL_DEVICE_ID_I2C_PORT1), },
+ {PCI_VDEVICE(INTEL, THC_LNL_DEVICE_ID_I2C_PORT2), },
+ {PCI_VDEVICE(INTEL, THC_PTL_H_DEVICE_ID_I2C_PORT1), },
+ {PCI_VDEVICE(INTEL, THC_PTL_H_DEVICE_ID_I2C_PORT2), },
+ {PCI_VDEVICE(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT1), },
+ {PCI_VDEVICE(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT2), },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, quicki2c_pci_tbl);
+
+static struct pci_driver quicki2c_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = quicki2c_pci_tbl,
+ .probe = quicki2c_probe,
+ .remove = quicki2c_remove,
+ .shutdown = quicki2c_shutdown,
+ .driver.pm = &quicki2c_pm_ops,
+ .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+};
+
+module_pci_driver(quicki2c_driver);
+
+MODULE_AUTHOR("Xinpeng Sun <xinpeng.sun@intel.com>");
+MODULE_AUTHOR("Even Xu <even.xu@intel.com>");
+
+MODULE_DESCRIPTION("Intel(R) QuickI2C Driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("INTEL_THC");
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
new file mode 100644
index 000000000000..6ddb584bd611
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _QUICKI2C_DEV_H_
+#define _QUICKI2C_DEV_H_
+
+#include <linux/hid-over-i2c.h>
+#include <linux/workqueue.h>
+
+#define THC_LNL_DEVICE_ID_I2C_PORT1 0xA848
+#define THC_LNL_DEVICE_ID_I2C_PORT2 0xA84A
+#define THC_PTL_H_DEVICE_ID_I2C_PORT1 0xE348
+#define THC_PTL_H_DEVICE_ID_I2C_PORT2 0xE34A
+#define THC_PTL_U_DEVICE_ID_I2C_PORT1 0xE448
+#define THC_PTL_U_DEVICE_ID_I2C_PORT2 0xE44A
+
+/* Packet size value, the unit is 16 bytes */
+#define MAX_PACKET_SIZE_VALUE_LNL 256
+
+/* HIDI2C special ACPI parameters DSD name */
+#define QUICKI2C_ACPI_METHOD_NAME_ICRS "ICRS"
+#define QUICKI2C_ACPI_METHOD_NAME_ISUB "ISUB"
+
+/* HIDI2C special ACPI parameters DSM methods */
+#define QUICKI2C_ACPI_REVISION_NUM 1
+#define QUICKI2C_ACPI_FUNC_NUM_HID_DESC_ADDR 1
+#define QUICKI2C_ACPI_FUNC_NUM_ACTIVE_LTR_VAL 1
+#define QUICKI2C_ACPI_FUNC_NUM_LP_LTR_VAL 2
+
+#define QUICKI2C_SUBIP_STANDARD_MODE_MAX_SPEED 100000
+#define QUICKI2C_SUBIP_FAST_MODE_MAX_SPEED 400000
+#define QUICKI2C_SUBIP_FASTPLUS_MODE_MAX_SPEED 1000000
+#define QUICKI2C_SUBIP_HIGH_SPEED_MODE_MAX_SPEED 3400000
+
+#define QUICKI2C_DEFAULT_ACTIVE_LTR_VALUE 5
+#define QUICKI2C_DEFAULT_LP_LTR_VALUE 500
+#define QUICKI2C_RPM_TIMEOUT_MS 500
+
+/*
+ * THC uses runtime auto suspend to dynamically switch between THC active LTR
+ * and low power LTR to save CPU power.
+ * Default value is 5000ms, that means if no touch event in this time, THC will
+ * change to low power LTR mode.
+ */
+#define DEFAULT_AUTO_SUSPEND_DELAY_MS 5000
+
+enum quicki2c_dev_state {
+ QUICKI2C_NONE,
+ QUICKI2C_RESETING,
+ QUICKI2C_RESETED,
+ QUICKI2C_INITED,
+ QUICKI2C_ENABLED,
+ QUICKI2C_DISABLED,
+};
+
+enum {
+ HIDI2C_ADDRESSING_MODE_7BIT,
+ HIDI2C_ADDRESSING_MODE_10BIT,
+};
+
+/**
+ * struct quicki2c_subip_acpi_parameter - QuickI2C ACPI DSD parameters
+ * @device_address: I2C device slave address
+ * @connection_speed: I2C device expected connection speed
+ * @addressing_mode: I2C device slave address mode, 7bit or 10bit
+ *
+ * Those properties get from QUICKI2C_ACPI_METHOD_NAME_ICRS method, used for
+ * Bus parameter.
+ */
+struct quicki2c_subip_acpi_parameter {
+ u16 device_address;
+ u64 connection_speed;
+ u8 addressing_mode;
+} __packed;
+
+/**
+ * struct quicki2c_subip_acpi_config - QuickI2C ACPI DSD parameters
+ * @SMHX: Standard Mode (100 kbit/s) Serial Clock Line HIGH Period
+ * @SMLX: Standard Mode (100 kbit/s) Serial Clock Line LOW Period
+ * @SMTD: Standard Mode (100 kbit/s) Serial Data Line Transmit Hold Period
+ * @SMRD: Standard Mode (100 kbit/s) Serial Data Receive Hold Period
+ * @FMHX: Fast Mode (400 kbit/s) Serial Clock Line HIGH Period
+ * @FMLX: Fast Mode (400 kbit/s) Serial Clock Line LOW Period
+ * @FMTD: Fast Mode (400 kbit/s) Serial Data Line Transmit Hold Period
+ * @FMRD: Fast Mode (400 kbit/s) Serial Data Line Receive Hold Period
+ * @FMSL: Maximum length (in ic_clk_cycles) of suppressed spikes
+ * in Standard Mode, Fast Mode and Fast Mode Plus
+ * @FPHX: Fast Mode Plus (1Mbit/sec) Serial Clock Line HIGH Period
+ * @FPLX: Fast Mode Plus (1Mbit/sec) Serial Clock Line LOW Period
+ * @FPTD: Fast Mode Plus (1Mbit/sec) Serial Data Line Transmit HOLD Period
+ * @FPRD: Fast Mode Plus (1Mbit/sec) Serial Data Line Receive HOLD Period
+ * @HMHX: High Speed Mode Plus (3.4Mbits/sec) Serial Clock Line HIGH Period
+ * @HMLX: High Speed Mode Plus (3.4Mbits/sec) Serial Clock Line LOW Period
+ * @HMTD: High Speed Mode Plus (3.4Mbits/sec) Serial Data Line Transmit HOLD Period
+ * @HMRD: High Speed Mode Plus (3.4Mbits/sec) Serial Data Line Receive HOLD Period
+ * @HMSL: Maximum length (in ic_clk_cycles) of suppressed spikes in High Speed Mode
+ *
+ * Those properties get from QUICKI2C_ACPI_METHOD_NAME_ISUB method, used for
+ * I2C timing configure.
+ */
+struct quicki2c_subip_acpi_config {
+ u64 SMHX;
+ u64 SMLX;
+ u64 SMTD;
+ u64 SMRD;
+
+ u64 FMHX;
+ u64 FMLX;
+ u64 FMTD;
+ u64 FMRD;
+ u64 FMSL;
+
+ u64 FPHX;
+ u64 FPLX;
+ u64 FPTD;
+ u64 FPRD;
+
+ u64 HMHX;
+ u64 HMLX;
+ u64 HMTD;
+ u64 HMRD;
+ u64 HMSL;
+};
+
+struct device;
+struct pci_dev;
+struct thc_device;
+struct hid_device;
+struct acpi_device;
+
+/**
+ * struct quicki2c_device - THC QuickI2C device struct
+ * @dev: point to kernel device
+ * @pdev: point to PCI device
+ * @thc_hw: point to THC device
+ * @hid_dev: point to hid device
+ * @acpi_dev: point to ACPI device
+ * @driver_data: point to quicki2c specific driver data
+ * @state: THC I2C device state
+ * @mem_addr: MMIO memory address
+ * @dev_desc: device descriptor for HIDI2C protocol
+ * @i2c_slave_addr: HIDI2C device slave address
+ * @hid_desc_addr: Register address for retrieve HID device descriptor
+ * @active_ltr_val: THC active LTR value
+ * @low_power_ltr_val: THC low power LTR value
+ * @i2c_speed_mode: 0 - standard mode, 1 - fast mode, 2 - fast mode plus
+ * @i2c_clock_hcnt: I2C CLK high period time (unit in cycle count)
+ * @i2c_clock_lcnt: I2C CLK low period time (unit in cycle count)
+ * @report_descriptor: store a copy of device report descriptor
+ * @input_buf: store a copy of latest input report data
+ * @report_buf: store a copy of latest input/output report packet from set/get feature
+ * @report_len: the length of input/output report packet
+ * @reset_ack_wq: workqueue for waiting reset response from device
+ * @reset_ack: indicate reset response received or not
+ */
+struct quicki2c_device {
+ struct device *dev;
+ struct pci_dev *pdev;
+ struct thc_device *thc_hw;
+ struct hid_device *hid_dev;
+ struct acpi_device *acpi_dev;
+ enum quicki2c_dev_state state;
+
+ void __iomem *mem_addr;
+
+ struct hidi2c_dev_descriptor dev_desc;
+ u8 i2c_slave_addr;
+ u16 hid_desc_addr;
+
+ u32 active_ltr_val;
+ u32 low_power_ltr_val;
+
+ u32 i2c_speed_mode;
+ u32 i2c_clock_hcnt;
+ u32 i2c_clock_lcnt;
+
+ u8 *report_descriptor;
+ u8 *input_buf;
+ u8 *report_buf;
+ u32 report_len;
+
+ wait_queue_head_t reset_ack_wq;
+ bool reset_ack;
+};
+
+#endif /* _QUICKI2C_DEV_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c
new file mode 100644
index 000000000000..5c3ec95bb3fd
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/pm_runtime.h>
+
+#include "quicki2c-dev.h"
+#include "quicki2c-hid.h"
+#include "quicki2c-protocol.h"
+
+/**
+ * quicki2c_hid_parse() - HID core parse() callback
+ *
+ * @hid: HID device instance
+ *
+ * This function gets called during call to hid_add_device
+ *
+ * Return: 0 on success and non zero on error.
+ */
+static int quicki2c_hid_parse(struct hid_device *hid)
+{
+ struct quicki2c_device *qcdev = hid->driver_data;
+
+ if (qcdev->report_descriptor)
+ return hid_parse_report(hid, qcdev->report_descriptor,
+ le16_to_cpu(qcdev->dev_desc.report_desc_len));
+
+ dev_err_once(qcdev->dev, "invalid report descriptor\n");
+ return -EINVAL;
+}
+
+static int quicki2c_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void quicki2c_hid_stop(struct hid_device *hid)
+{
+}
+
+static int quicki2c_hid_open(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void quicki2c_hid_close(struct hid_device *hid)
+{
+}
+
+static int quicki2c_hid_raw_request(struct hid_device *hid,
+ unsigned char reportnum,
+ __u8 *buf, size_t len,
+ unsigned char rtype, int reqtype)
+{
+ struct quicki2c_device *qcdev = hid->driver_data;
+ int ret = 0;
+
+ ret = pm_runtime_resume_and_get(qcdev->dev);
+ if (ret)
+ return ret;
+
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ ret = quicki2c_get_report(qcdev, rtype, reportnum, buf, len);
+ break;
+ case HID_REQ_SET_REPORT:
+ ret = quicki2c_set_report(qcdev, rtype, reportnum, buf, len);
+ break;
+ default:
+ dev_err(qcdev->dev, "Not supported request type %d\n", reqtype);
+ break;
+ }
+
+ pm_runtime_mark_last_busy(qcdev->dev);
+ pm_runtime_put_autosuspend(qcdev->dev);
+
+ return ret;
+}
+
+static int quicki2c_hid_power(struct hid_device *hid, int lvl)
+{
+ return 0;
+}
+
+static struct hid_ll_driver quicki2c_hid_ll_driver = {
+ .parse = quicki2c_hid_parse,
+ .start = quicki2c_hid_start,
+ .stop = quicki2c_hid_stop,
+ .open = quicki2c_hid_open,
+ .close = quicki2c_hid_close,
+ .power = quicki2c_hid_power,
+ .raw_request = quicki2c_hid_raw_request,
+};
+
+/**
+ * quicki2c_hid_probe() - Register HID low level driver
+ *
+ * @qcdev: point to quicki2c device
+ *
+ * This function is used to allocate and add HID device.
+ *
+ * Return: 0 on success, non zero on error.
+ */
+int quicki2c_hid_probe(struct quicki2c_device *qcdev)
+{
+ struct hid_device *hid;
+ int ret;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid))
+ return PTR_ERR(hid);
+
+ hid->ll_driver = &quicki2c_hid_ll_driver;
+ hid->bus = BUS_PCI;
+ hid->dev.parent = qcdev->dev;
+ hid->driver_data = qcdev;
+ hid->version = le16_to_cpu(qcdev->dev_desc.version_id);
+ hid->vendor = le16_to_cpu(qcdev->dev_desc.vendor_id);
+ hid->product = le16_to_cpu(qcdev->dev_desc.product_id);
+ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "quicki2c-hid",
+ hid->vendor, hid->product);
+
+ ret = hid_add_device(hid);
+ if (ret) {
+ hid_destroy_device(hid);
+ return ret;
+ }
+
+ qcdev->hid_dev = hid;
+
+ return 0;
+}
+
+/**
+ * quicki2c_hid_remove() - Destroy HID device
+ *
+ * @qcdev: point to quicki2c device
+ *
+ * Return: 0 on success, non zero on error.
+ */
+void quicki2c_hid_remove(struct quicki2c_device *qcdev)
+{
+ hid_destroy_device(qcdev->hid_dev);
+}
+
+/**
+ * quicki2c_hid_send_report() - Send HID input report data to HID core
+ *
+ * @qcdev: point to quicki2c device
+ * @data: point to input report data buffer
+ * @data_len: the length of input report data
+ *
+ * Return: 0 on success, non zero on error.
+ */
+int quicki2c_hid_send_report(struct quicki2c_device *qcdev,
+ void *data, size_t data_len)
+{
+ int ret;
+
+ ret = hid_input_report(qcdev->hid_dev, HID_INPUT_REPORT, data, data_len, 1);
+ if (ret)
+ dev_err(qcdev->dev, "Failed to send HID input report, ret = %d.\n", ret);
+
+ return ret;
+}
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.h b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.h
new file mode 100644
index 000000000000..e80df5f339fe
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _QUICKI2C_HID_H_
+#define _QUICKI2C_HID_H_
+
+struct quicki2c_device;
+
+int quicki2c_hid_send_report(struct quicki2c_device *qcdev,
+ void *data, size_t data_size);
+int quicki2c_hid_probe(struct quicki2c_device *qcdev);
+void quicki2c_hid_remove(struct quicki2c_device *qcdev);
+
+#endif /* _QUICKI2C_HID_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c
new file mode 100644
index 000000000000..f493df0d5dc4
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/bitfield.h>
+#include <linux/hid.h>
+#include <linux/hid-over-i2c.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-dma.h"
+
+#include "quicki2c-dev.h"
+#include "quicki2c-hid.h"
+#include "quicki2c-protocol.h"
+
+static int quicki2c_init_write_buf(struct quicki2c_device *qcdev, u32 cmd, int cmd_len,
+ bool append_data_reg, u8 *data, int data_len,
+ u8 *write_buf, int write_buf_len)
+{
+ int buf_len, offset = 0;
+
+ buf_len = HIDI2C_REG_LEN + cmd_len;
+
+ if (append_data_reg)
+ buf_len += HIDI2C_REG_LEN;
+
+ if (data && data_len)
+ buf_len += data_len + HIDI2C_LENGTH_LEN;
+
+ if (buf_len > write_buf_len)
+ return -EINVAL;
+
+ memcpy(write_buf, &qcdev->dev_desc.cmd_reg, HIDI2C_REG_LEN);
+ offset += HIDI2C_REG_LEN;
+ memcpy(write_buf + offset, &cmd, cmd_len);
+ offset += cmd_len;
+
+ if (append_data_reg) {
+ memcpy(write_buf + offset, &qcdev->dev_desc.data_reg, HIDI2C_REG_LEN);
+ offset += HIDI2C_REG_LEN;
+ }
+
+ if (data && data_len) {
+ __le16 len = cpu_to_le16(data_len + HIDI2C_LENGTH_LEN);
+
+ memcpy(write_buf + offset, &len, HIDI2C_LENGTH_LEN);
+ offset += HIDI2C_LENGTH_LEN;
+ memcpy(write_buf + offset, data, data_len);
+ }
+
+ return buf_len;
+}
+
+static int quicki2c_encode_cmd(struct quicki2c_device *qcdev, u32 *cmd_buf,
+ u8 opcode, u8 report_type, u8 report_id)
+{
+ int cmd_len;
+
+ *cmd_buf = FIELD_PREP(HIDI2C_CMD_OPCODE, opcode) |
+ FIELD_PREP(HIDI2C_CMD_REPORT_TYPE, report_type);
+
+ if (report_id < HIDI2C_CMD_MAX_RI) {
+ *cmd_buf |= FIELD_PREP(HIDI2C_CMD_REPORT_ID, report_id);
+ cmd_len = HIDI2C_CMD_LEN;
+ } else {
+ *cmd_buf |= FIELD_PREP(HIDI2C_CMD_REPORT_ID, HIDI2C_CMD_MAX_RI) |
+ FIELD_PREP(HIDI2C_CMD_3RD_BYTE, report_id);
+ cmd_len = HIDI2C_CMD_LEN_OPT;
+ }
+
+ return cmd_len;
+}
+
+static int write_cmd_to_txdma(struct quicki2c_device *qcdev, int opcode,
+ int report_type, int report_id, u8 *buf, int buf_len)
+{
+ size_t write_buf_len;
+ int cmd_len, ret;
+ u32 cmd;
+
+ cmd_len = quicki2c_encode_cmd(qcdev, &cmd, opcode, report_type, report_id);
+
+ ret = quicki2c_init_write_buf(qcdev, cmd, cmd_len, buf ? true : false, buf,
+ buf_len, qcdev->report_buf, qcdev->report_len);
+ if (ret < 0)
+ return ret;
+
+ write_buf_len = ret;
+
+ return thc_dma_write(qcdev->thc_hw, qcdev->report_buf, write_buf_len);
+}
+
+int quicki2c_set_power(struct quicki2c_device *qcdev, enum hidi2c_power_state power_state)
+{
+ return write_cmd_to_txdma(qcdev, HIDI2C_SET_POWER, HIDI2C_RESERVED, power_state, NULL, 0);
+}
+
+int quicki2c_get_device_descriptor(struct quicki2c_device *qcdev)
+{
+ u32 read_len = 0;
+ int ret;
+
+ ret = thc_tic_pio_write_and_read(qcdev->thc_hw, qcdev->hid_desc_addr,
+ HIDI2C_REG_LEN, NULL, HIDI2C_DEV_DESC_LEN,
+ &read_len, (u32 *)&qcdev->dev_desc);
+ if (ret || HIDI2C_DEV_DESC_LEN != read_len) {
+ dev_err_once(qcdev->dev, "Get device descriptor failed, ret %d, read len %u\n",
+ ret, read_len);
+ return -EIO;
+ }
+
+ if (le16_to_cpu(qcdev->dev_desc.bcd_ver) != HIDI2C_HID_DESC_BCDVERSION)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+int quicki2c_get_report_descriptor(struct quicki2c_device *qcdev)
+{
+ u16 desc_reg = le16_to_cpu(qcdev->dev_desc.report_desc_reg);
+ size_t read_len = le16_to_cpu(qcdev->dev_desc.report_desc_len);
+ u32 prd_len = read_len;
+
+ return thc_swdma_read(qcdev->thc_hw, (u8 *)&desc_reg, HIDI2C_REG_LEN,
+ &prd_len, qcdev->report_descriptor, &read_len);
+}
+
+int quicki2c_get_report(struct quicki2c_device *qcdev, u8 report_type,
+ unsigned int reportnum, void *buf, u32 buf_len)
+{
+ struct hidi2c_report_packet *rpt;
+ size_t write_buf_len, read_len = 0;
+ int cmd_len, rep_type;
+ u32 cmd;
+ int ret;
+
+ if (report_type == HID_INPUT_REPORT) {
+ rep_type = HIDI2C_INPUT;
+ } else if (report_type == HID_FEATURE_REPORT) {
+ rep_type = HIDI2C_FEATURE;
+ } else {
+ dev_err(qcdev->dev, "Unsupported report type for GET REPORT: %d\n", report_type);
+ return -EINVAL;
+ }
+
+ cmd_len = quicki2c_encode_cmd(qcdev, &cmd, HIDI2C_GET_REPORT, rep_type, reportnum);
+
+ ret = quicki2c_init_write_buf(qcdev, cmd, cmd_len, true, NULL, 0,
+ qcdev->report_buf, qcdev->report_len);
+ if (ret < 0)
+ return ret;
+
+ write_buf_len = ret;
+
+ rpt = (struct hidi2c_report_packet *)qcdev->input_buf;
+
+ ret = thc_swdma_read(qcdev->thc_hw, qcdev->report_buf, write_buf_len,
+ NULL, rpt, &read_len);
+ if (ret) {
+ dev_err_once(qcdev->dev, "Get report failed, ret %d, read len (%zu vs %d)\n",
+ ret, read_len, buf_len);
+ return ret;
+ }
+
+ if (HIDI2C_DATA_LEN(le16_to_cpu(rpt->len)) != buf_len || rpt->data[0] != reportnum) {
+ dev_err_once(qcdev->dev, "Invalid packet, len (%d vs %d) report id (%d vs %d)\n",
+ le16_to_cpu(rpt->len), buf_len, rpt->data[0], reportnum);
+ return -EINVAL;
+ }
+
+ memcpy(buf, rpt->data, buf_len);
+
+ return buf_len;
+}
+
+int quicki2c_set_report(struct quicki2c_device *qcdev, u8 report_type,
+ unsigned int reportnum, void *buf, u32 buf_len)
+{
+ int rep_type;
+ int ret;
+
+ if (report_type == HID_OUTPUT_REPORT) {
+ rep_type = HIDI2C_OUTPUT;
+ } else if (report_type == HID_FEATURE_REPORT) {
+ rep_type = HIDI2C_FEATURE;
+ } else {
+ dev_err(qcdev->dev, "Unsupported report type for SET REPORT: %d\n", report_type);
+ return -EINVAL;
+ }
+
+ ret = write_cmd_to_txdma(qcdev, HIDI2C_SET_REPORT, rep_type, reportnum, buf, buf_len);
+ if (ret) {
+ dev_err_once(qcdev->dev, "Set Report failed, ret %d\n", ret);
+ return ret;
+ }
+
+ return buf_len;
+}
+
+#define HIDI2C_RESET_TIMEOUT 5
+
+int quicki2c_reset(struct quicki2c_device *qcdev)
+{
+ int ret;
+
+ qcdev->reset_ack = false;
+ qcdev->state = QUICKI2C_RESETING;
+
+ ret = write_cmd_to_txdma(qcdev, HIDI2C_RESET, HIDI2C_RESERVED, 0, NULL, 0);
+ if (ret) {
+ dev_err_once(qcdev->dev, "Send reset command failed, ret %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible_timeout(qcdev->reset_ack_wq, qcdev->reset_ack,
+ HIDI2C_RESET_TIMEOUT * HZ);
+ if (ret <= 0 || !qcdev->reset_ack) {
+ dev_err_once(qcdev->dev,
+ "Wait reset response timed out ret:%d timeout:%ds\n",
+ ret, HIDI2C_RESET_TIMEOUT);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.h b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.h
new file mode 100644
index 000000000000..bf4908cce59c
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _QUICKI2C_PROTOCOL_H_
+#define _QUICKI2C_PROTOCOL_H_
+
+#include <linux/hid-over-i2c.h>
+
+struct quicki2c_device;
+
+int quicki2c_set_power(struct quicki2c_device *qcdev, enum hidi2c_power_state power_state);
+int quicki2c_get_report(struct quicki2c_device *qcdev, u8 report_type,
+ unsigned int reportnum, void *buf, u32 buf_len);
+int quicki2c_set_report(struct quicki2c_device *qcdev, u8 report_type,
+ unsigned int reportnum, void *buf, u32 buf_len);
+int quicki2c_get_device_descriptor(struct quicki2c_device *qcdev);
+int quicki2c_get_report_descriptor(struct quicki2c_device *qcdev);
+int quicki2c_reset(struct quicki2c_device *qcdev);
+
+#endif /* _QUICKI2C_PROTOCOL_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
new file mode 100644
index 000000000000..4641e818dfa4
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
@@ -0,0 +1,987 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-hw.h"
+
+#include "quickspi-dev.h"
+#include "quickspi-hid.h"
+#include "quickspi-protocol.h"
+
+struct quickspi_driver_data mtl = {
+ .max_packet_size_value = MAX_PACKET_SIZE_VALUE_MTL,
+};
+
+struct quickspi_driver_data lnl = {
+ .max_packet_size_value = MAX_PACKET_SIZE_VALUE_LNL,
+};
+
+struct quickspi_driver_data ptl = {
+ .max_packet_size_value = MAX_PACKET_SIZE_VALUE_LNL,
+};
+
+/* THC QuickSPI ACPI method to get device properties */
+/* HIDSPI Method: {6e2ac436-0fcf-41af-a265-b32a220dcfab} */
+static guid_t hidspi_guid =
+ GUID_INIT(0x6e2ac436, 0x0fcf, 0x41af, 0xa2, 0x65, 0xb3, 0x2a,
+ 0x22, 0x0d, 0xcf, 0xab);
+
+/* QuickSpi Method: {300D35b7-ac20-413e-8e9c-92e4dafd0afe} */
+static guid_t thc_quickspi_guid =
+ GUID_INIT(0x300d35b7, 0xac20, 0x413e, 0x8e, 0x9c, 0x92, 0xe4,
+ 0xda, 0xfd, 0x0a, 0xfe);
+
+/* Platform Method: {84005682-5b71-41a4-0x8d668130f787a138} */
+static guid_t thc_platform_guid =
+ GUID_INIT(0x84005682, 0x5b71, 0x41a4, 0x8d, 0x66, 0x81, 0x30,
+ 0xf7, 0x87, 0xa1, 0x38);
+
+/**
+ * thc_acpi_get_property - Query device ACPI parameter
+ *
+ * @adev: point to ACPI device
+ * @guid: ACPI method's guid
+ * @rev: ACPI method's revision
+ * @func: ACPI method's function number
+ * @type: ACPI parameter's data type
+ * @prop_buf: point to return buffer
+ *
+ * This is a helper function for device to query its ACPI parameters.
+ *
+ * Return: 0 if successful or ENODEV on failed.
+ */
+static int thc_acpi_get_property(struct acpi_device *adev, const guid_t *guid,
+ u64 rev, u64 func, acpi_object_type type, void *prop_buf)
+{
+ acpi_handle handle = acpi_device_handle(adev);
+ union acpi_object *obj;
+
+ obj = acpi_evaluate_dsm_typed(handle, guid, rev, func, NULL, type);
+ if (!obj) {
+ acpi_handle_err(handle,
+ "Error _DSM call failed, rev: %llu, func: %llu, type: %u\n",
+ rev, func, type);
+ return -ENODEV;
+ }
+
+ if (type == ACPI_TYPE_INTEGER)
+ *(u32 *)prop_buf = (u32)obj->integer.value;
+ else if (type == ACPI_TYPE_BUFFER)
+ memcpy(prop_buf, obj->buffer.pointer, obj->buffer.length);
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+
+/**
+ * quickspi_get_acpi_resources - Query all quickspi devices' ACPI parameters
+ *
+ * @qsdev: point to quickspi device
+ *
+ * This function gets all quickspi devices' ACPI resource.
+ *
+ * Return: 0 if successful or error code on failed.
+ */
+static int quickspi_get_acpi_resources(struct quickspi_device *qsdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(qsdev->dev);
+ int ret = -EINVAL;
+
+ if (!adev) {
+ dev_err(qsdev->dev, "no valid ACPI companion\n");
+ return ret;
+ }
+
+ qsdev->acpi_dev = adev;
+
+ ret = thc_acpi_get_property(adev, &hidspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_INPUT_REP_HDR_ADDR,
+ ACPI_TYPE_INTEGER,
+ &qsdev->input_report_hdr_addr);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &hidspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_INPUT_REP_BDY_ADDR,
+ ACPI_TYPE_INTEGER,
+ &qsdev->input_report_bdy_addr);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &hidspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_OUTPUT_REP_ADDR,
+ ACPI_TYPE_INTEGER,
+ &qsdev->output_report_addr);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &hidspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_READ_OPCODE,
+ ACPI_TYPE_BUFFER,
+ &qsdev->spi_read_opcode);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &hidspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_WRITE_OPCODE,
+ ACPI_TYPE_BUFFER,
+ &qsdev->spi_write_opcode);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &hidspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_IO_MODE,
+ ACPI_TYPE_INTEGER,
+ &qsdev->spi_read_io_mode);
+ if (ret)
+ return ret;
+
+ if (qsdev->spi_read_io_mode & SPI_WRITE_IO_MODE)
+ qsdev->spi_write_io_mode = FIELD_GET(SPI_IO_MODE_OPCODE, qsdev->spi_read_io_mode);
+ else
+ qsdev->spi_write_io_mode = THC_SINGLE_IO;
+
+ qsdev->spi_read_io_mode = FIELD_GET(SPI_IO_MODE_OPCODE, qsdev->spi_read_io_mode);
+
+ ret = thc_acpi_get_property(adev, &thc_quickspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_CONNECTION_SPEED,
+ ACPI_TYPE_INTEGER,
+ &qsdev->spi_freq_val);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &thc_quickspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_LIMIT_PACKET_SIZE,
+ ACPI_TYPE_INTEGER,
+ &qsdev->limit_packet_size);
+ if (ret)
+ return ret;
+
+ if (qsdev->limit_packet_size || !qsdev->driver_data)
+ qsdev->spi_packet_size = DEFAULT_MIN_PACKET_SIZE_VALUE;
+ else
+ qsdev->spi_packet_size = qsdev->driver_data->max_packet_size_value;
+
+ ret = thc_acpi_get_property(adev, &thc_quickspi_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_PERFORMANCE_LIMIT,
+ ACPI_TYPE_INTEGER,
+ &qsdev->performance_limit);
+ if (ret)
+ return ret;
+
+ qsdev->performance_limit = FIELD_GET(PERFORMANCE_LIMITATION, qsdev->performance_limit);
+
+ ret = thc_acpi_get_property(adev, &thc_platform_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_ACTIVE_LTR,
+ ACPI_TYPE_INTEGER,
+ &qsdev->active_ltr_val);
+ if (ret)
+ return ret;
+
+ ret = thc_acpi_get_property(adev, &thc_platform_guid,
+ ACPI_QUICKSPI_REVISION_NUM,
+ ACPI_QUICKSPI_FUNC_NUM_LP_LTR,
+ ACPI_TYPE_INTEGER,
+ &qsdev->low_power_ltr_val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * quickspi_irq_quick_handler - The ISR of the quickspi driver
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * Return: IRQ_WAKE_THREAD if further process needed.
+ */
+static irqreturn_t quickspi_irq_quick_handler(int irq, void *dev_id)
+{
+ struct quickspi_device *qsdev = dev_id;
+
+ if (qsdev->state == QUICKSPI_DISABLED)
+ return IRQ_HANDLED;
+
+ /* Disable THC interrupt before current interrupt be handled */
+ thc_interrupt_enable(qsdev->thc_hw, false);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * try_recover - Try to recovery THC and Device
+ * @qsdev: pointer to quickspi device
+ *
+ * This function is a error handler, called when fatal error happens.
+ * It try to reset Touch Device and re-configure THC to recovery
+ * transferring between Device and THC.
+ *
+ * Return: 0 if successful or error code on failed.
+ */
+static int try_recover(struct quickspi_device *qsdev)
+{
+ int ret;
+
+ ret = reset_tic(qsdev);
+ if (ret) {
+ dev_err(qsdev->dev, "Reset touch device failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ thc_dma_unconfigure(qsdev->thc_hw);
+
+ ret = thc_dma_configure(qsdev->thc_hw);
+ if (ret) {
+ dev_err(qsdev->dev, "Re-configure THC DMA failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * quickspi_irq_thread_handler - IRQ thread handler of quickspi driver
+ *
+ * @irq: The IRQ number
+ * @dev_id: pointer to the quickspi device structure
+ *
+ * Return: IRQ_HANDLED to finish this handler.
+ */
+static irqreturn_t quickspi_irq_thread_handler(int irq, void *dev_id)
+{
+ struct quickspi_device *qsdev = dev_id;
+ size_t input_len;
+ int read_finished = 0;
+ int err_recover = 0;
+ int int_mask;
+ int ret;
+
+ if (qsdev->state == QUICKSPI_DISABLED)
+ return IRQ_HANDLED;
+
+ ret = pm_runtime_resume_and_get(qsdev->dev);
+ if (ret)
+ return IRQ_HANDLED;
+
+ int_mask = thc_interrupt_handler(qsdev->thc_hw);
+
+ if (int_mask & BIT(THC_FATAL_ERR_INT) || int_mask & BIT(THC_TXN_ERR_INT)) {
+ err_recover = 1;
+ goto end;
+ }
+
+ if (int_mask & BIT(THC_NONDMA_INT)) {
+ if (qsdev->state == QUICKSPI_RESETING) {
+ qsdev->reset_ack = true;
+ wake_up_interruptible(&qsdev->reset_ack_wq);
+ } else {
+ qsdev->nondma_int_received = true;
+ wake_up_interruptible(&qsdev->nondma_int_received_wq);
+ }
+ }
+
+ if (int_mask & BIT(THC_RXDMA2_INT)) {
+ while (!read_finished) {
+ ret = thc_rxdma_read(qsdev->thc_hw, THC_RXDMA2, qsdev->input_buf,
+ &input_len, &read_finished);
+ if (ret) {
+ err_recover = 1;
+ goto end;
+ }
+
+ quickspi_handle_input_data(qsdev, input_len);
+ }
+ }
+
+end:
+ thc_interrupt_enable(qsdev->thc_hw, true);
+
+ if (err_recover)
+ if (try_recover(qsdev))
+ qsdev->state = QUICKSPI_DISABLED;
+
+ pm_runtime_mark_last_busy(qsdev->dev);
+ pm_runtime_put_autosuspend(qsdev->dev);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * quickspi_dev_init - Initialize quickspi device
+ *
+ * @pdev: pointer to the thc pci device
+ * @mem_addr: The pointer of MMIO memory address
+ * @id: point to pci_device_id structure
+ *
+ * Alloc quickspi device structure and initialized THC device,
+ * then configure THC to HIDSPI mode.
+ *
+ * If success, enable THC hardware interrupt.
+ *
+ * Return: pointer to the quickspi device structure if success
+ * or NULL on failed.
+ */
+static struct quickspi_device *quickspi_dev_init(struct pci_dev *pdev, void __iomem *mem_addr,
+ const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = devm_kzalloc(dev, sizeof(struct quickspi_device), GFP_KERNEL);
+ if (!qsdev)
+ return ERR_PTR(-ENOMEM);
+
+ qsdev->pdev = pdev;
+ qsdev->dev = dev;
+ qsdev->mem_addr = mem_addr;
+ qsdev->state = QUICKSPI_DISABLED;
+ qsdev->driver_data = (struct quickspi_driver_data *)id->driver_data;
+
+ init_waitqueue_head(&qsdev->reset_ack_wq);
+ init_waitqueue_head(&qsdev->nondma_int_received_wq);
+ init_waitqueue_head(&qsdev->report_desc_got_wq);
+ init_waitqueue_head(&qsdev->get_report_cmpl_wq);
+ init_waitqueue_head(&qsdev->set_report_cmpl_wq);
+
+ /* thc hw init */
+ qsdev->thc_hw = thc_dev_init(qsdev->dev, qsdev->mem_addr);
+ if (IS_ERR(qsdev->thc_hw)) {
+ ret = PTR_ERR(qsdev->thc_hw);
+ dev_err(dev, "Failed to initialize THC device context, ret = %d.\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, true);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = thc_port_select(qsdev->thc_hw, THC_PORT_TYPE_SPI);
+ if (ret) {
+ dev_err(dev, "Failed to select THC port, ret = %d.\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = quickspi_get_acpi_resources(qsdev);
+ if (ret) {
+ dev_err(dev, "Get ACPI resources failed, ret = %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ /* THC config for input/output address */
+ thc_spi_input_output_address_config(qsdev->thc_hw,
+ qsdev->input_report_hdr_addr,
+ qsdev->input_report_bdy_addr,
+ qsdev->output_report_addr);
+
+ /* THC config for spi read operation */
+ ret = thc_spi_read_config(qsdev->thc_hw, qsdev->spi_freq_val,
+ qsdev->spi_read_io_mode,
+ qsdev->spi_read_opcode,
+ qsdev->spi_packet_size);
+ if (ret) {
+ dev_err(dev, "thc_spi_read_config failed, ret = %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ /* THC config for spi write operation */
+ ret = thc_spi_write_config(qsdev->thc_hw, qsdev->spi_freq_val,
+ qsdev->spi_write_io_mode,
+ qsdev->spi_write_opcode,
+ qsdev->spi_packet_size,
+ qsdev->performance_limit);
+ if (ret) {
+ dev_err(dev, "thc_spi_write_config failed, ret = %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ thc_ltr_config(qsdev->thc_hw,
+ qsdev->active_ltr_val,
+ qsdev->low_power_ltr_val);
+
+ thc_interrupt_config(qsdev->thc_hw);
+
+ thc_interrupt_enable(qsdev->thc_hw, true);
+
+ qsdev->state = QUICKSPI_INITED;
+
+ return qsdev;
+}
+
+/**
+ * quickspi_dev_deinit - De-initialize quickspi device
+ *
+ * @qsdev: pointer to the quickspi device structure
+ *
+ * Disable THC interrupt and deinitilize THC.
+ */
+static void quickspi_dev_deinit(struct quickspi_device *qsdev)
+{
+ thc_interrupt_enable(qsdev->thc_hw, false);
+ thc_ltr_unconfig(qsdev->thc_hw);
+
+ qsdev->state = QUICKSPI_DISABLED;
+}
+
+/**
+ * quickspi_dma_init - Configure THC DMA for quickspi device
+ * @qsdev: pointer to the quickspi device structure
+ *
+ * This function uses TIC's parameters(such as max input length, max output
+ * length) to allocate THC DMA buffers and configure THC DMA engines.
+ *
+ * Return: 0 if successful or error code on failed.
+ */
+static int quickspi_dma_init(struct quickspi_device *qsdev)
+{
+ int ret;
+
+ ret = thc_dma_set_max_packet_sizes(qsdev->thc_hw, 0,
+ le16_to_cpu(qsdev->dev_desc.max_input_len),
+ le16_to_cpu(qsdev->dev_desc.max_output_len),
+ 0);
+ if (ret)
+ return ret;
+
+ ret = thc_dma_allocate(qsdev->thc_hw);
+ if (ret) {
+ dev_err(qsdev->dev, "Allocate THC DMA buffer failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ /* Enable RxDMA */
+ ret = thc_dma_configure(qsdev->thc_hw);
+ if (ret) {
+ dev_err(qsdev->dev, "Configure THC DMA failed, ret = %d\n", ret);
+ thc_dma_unconfigure(qsdev->thc_hw);
+ thc_dma_release(qsdev->thc_hw);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * quickspi_dma_deinit - Release THC DMA for quickspi device
+ * @qsdev: pointer to the quickspi device structure
+ *
+ * Stop THC DMA engines and release all DMA buffers.
+ *
+ */
+static void quickspi_dma_deinit(struct quickspi_device *qsdev)
+{
+ thc_dma_unconfigure(qsdev->thc_hw);
+ thc_dma_release(qsdev->thc_hw);
+}
+
+/**
+ * quickspi_alloc_report_buf - Alloc report buffers
+ * @qsdev: pointer to the quickspi device structure
+ *
+ * Allocate report descriptor buffer, it will be used for restore TIC HID
+ * report descriptor.
+ *
+ * Allocate input report buffer, it will be used for receive HID input report
+ * data from TIC.
+ *
+ * Allocate output report buffer, it will be used for store HID output report,
+ * such as set feature.
+ *
+ * Return: 0 if successful or error code on failed.
+ */
+static int quickspi_alloc_report_buf(struct quickspi_device *qsdev)
+{
+ size_t max_report_len;
+ size_t max_input_len;
+
+ qsdev->report_descriptor = devm_kzalloc(qsdev->dev,
+ le16_to_cpu(qsdev->dev_desc.rep_desc_len),
+ GFP_KERNEL);
+ if (!qsdev->report_descriptor)
+ return -ENOMEM;
+
+ max_input_len = max(le16_to_cpu(qsdev->dev_desc.rep_desc_len),
+ le16_to_cpu(qsdev->dev_desc.max_input_len));
+
+ qsdev->input_buf = devm_kzalloc(qsdev->dev, max_input_len, GFP_KERNEL);
+ if (!qsdev->input_buf)
+ return -ENOMEM;
+
+ max_report_len = max(le16_to_cpu(qsdev->dev_desc.max_output_len),
+ le16_to_cpu(qsdev->dev_desc.max_input_len));
+
+ qsdev->report_buf = devm_kzalloc(qsdev->dev, max_report_len, GFP_KERNEL);
+ if (!qsdev->report_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * quickspi_probe: Quickspi driver probe function
+ *
+ * @pdev: point to pci device
+ * @id: point to pci_device_id structure
+ *
+ * This function initializes THC and HIDSPI device, the flow is:
+ * - do THC pci device initialization
+ * - query HIDSPI ACPI parameters
+ * - configure THC to HIDSPI mode
+ * - go through HIDSPI enumeration flow
+ * |- reset HIDSPI device
+ * |- read device descriptor
+ * - enable THC interrupt and DMA
+ * - read report descriptor
+ * - register HID device
+ * - enable runtime power management
+ *
+ * Return 0 if success or error code on failure.
+ */
+static int quickspi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct quickspi_device *qsdev;
+ void __iomem *mem_addr;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable PCI device, ret = %d.\n", ret);
+ return ret;
+ }
+
+ pci_set_master(pdev);
+
+ ret = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to get PCI regions, ret = %d.\n", ret);
+ goto disable_pci_device;
+ }
+
+ mem_addr = pcim_iomap_table(pdev)[0];
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration %d\n", ret);
+ goto unmap_io_region;
+ }
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Failed to allocate IRQ vectors. ret = %d\n", ret);
+ goto unmap_io_region;
+ }
+
+ pdev->irq = pci_irq_vector(pdev, 0);
+
+ qsdev = quickspi_dev_init(pdev, mem_addr, id);
+ if (IS_ERR(qsdev)) {
+ dev_err(&pdev->dev, "QuickSPI device init failed\n");
+ ret = PTR_ERR(qsdev);
+ goto unmap_io_region;
+ }
+
+ pci_set_drvdata(pdev, qsdev);
+
+ ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
+ quickspi_irq_quick_handler,
+ quickspi_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME,
+ qsdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to request threaded IRQ, irq = %d.\n", pdev->irq);
+ goto dev_deinit;
+ }
+
+ ret = reset_tic(qsdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Reset Touch Device failed, ret = %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quickspi_alloc_report_buf(qsdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Alloc report buffers failed, ret= %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quickspi_dma_init(qsdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Setup THC DMA failed, ret= %d\n", ret);
+ goto dev_deinit;
+ }
+
+ ret = quickspi_get_report_descriptor(qsdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Get report descriptor failed, ret = %d\n", ret);
+ goto dma_deinit;
+ }
+
+ ret = quickspi_hid_probe(qsdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register HID device, ret = %d\n", ret);
+ goto dma_deinit;
+ }
+
+ qsdev->state = QUICKSPI_ENABLED;
+
+ /* Enable runtime power management */
+ pm_runtime_use_autosuspend(qsdev->dev);
+ pm_runtime_set_autosuspend_delay(qsdev->dev, DEFAULT_AUTO_SUSPEND_DELAY_MS);
+ pm_runtime_mark_last_busy(qsdev->dev);
+ pm_runtime_put_noidle(qsdev->dev);
+ pm_runtime_put_autosuspend(qsdev->dev);
+
+ dev_dbg(&pdev->dev, "QuickSPI probe success\n");
+
+ return 0;
+
+dma_deinit:
+ quickspi_dma_deinit(qsdev);
+dev_deinit:
+ quickspi_dev_deinit(qsdev);
+unmap_io_region:
+ pcim_iounmap_regions(pdev, BIT(0));
+disable_pci_device:
+ pci_clear_master(pdev);
+
+ return ret;
+}
+
+/**
+ * quickspi_remove - Device Removal Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * This is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+static void quickspi_remove(struct pci_dev *pdev)
+{
+ struct quickspi_device *qsdev;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return;
+
+ quickspi_hid_remove(qsdev);
+ quickspi_dma_deinit(qsdev);
+
+ pm_runtime_get_noresume(qsdev->dev);
+
+ quickspi_dev_deinit(qsdev);
+
+ pcim_iounmap_regions(pdev, BIT(0));
+ pci_clear_master(pdev);
+}
+
+/**
+ * quickspi_shutdown - Device Shutdown Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * This is called from the reboot notifier
+ * it's a simplified version of remove so we go down
+ * faster.
+ */
+static void quickspi_shutdown(struct pci_dev *pdev)
+{
+ struct quickspi_device *qsdev;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return;
+
+ /* Must stop DMA before reboot to avoid DMA entering into unknown state */
+ quickspi_dma_deinit(qsdev);
+
+ quickspi_dev_deinit(qsdev);
+}
+
+static int quickspi_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ ret = quickspi_set_power(qsdev, HIDSPI_SLEEP);
+ if (ret)
+ return ret;
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qsdev->thc_hw, false);
+
+ thc_dma_unconfigure(qsdev->thc_hw);
+
+ return 0;
+}
+
+static int quickspi_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ ret = thc_port_select(qsdev->thc_hw, THC_PORT_TYPE_SPI);
+ if (ret)
+ return ret;
+
+ thc_interrupt_config(qsdev->thc_hw);
+
+ thc_interrupt_enable(qsdev->thc_hw, true);
+
+ ret = thc_dma_configure(qsdev->thc_hw);
+ if (ret)
+ return ret;
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, false);
+ if (ret)
+ return ret;
+
+ ret = quickspi_set_power(qsdev, HIDSPI_ON);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int quickspi_freeze(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qsdev->thc_hw, false);
+
+ thc_dma_unconfigure(qsdev->thc_hw);
+
+ return 0;
+}
+
+static int quickspi_thaw(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ ret = thc_dma_configure(qsdev->thc_hw);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qsdev->thc_hw, true);
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int quickspi_poweroff(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ thc_interrupt_enable(qsdev->thc_hw, false);
+
+ thc_ltr_unconfig(qsdev->thc_hw);
+
+ quickspi_dma_deinit(qsdev);
+
+ return 0;
+}
+
+static int quickspi_restore(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+ int ret;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, true);
+ if (ret)
+ return ret;
+
+ /* Reconfig THC HW when back from hibernate */
+ ret = thc_port_select(qsdev->thc_hw, THC_PORT_TYPE_SPI);
+ if (ret)
+ return ret;
+
+ thc_spi_input_output_address_config(qsdev->thc_hw,
+ qsdev->input_report_hdr_addr,
+ qsdev->input_report_bdy_addr,
+ qsdev->output_report_addr);
+
+ ret = thc_spi_read_config(qsdev->thc_hw, qsdev->spi_freq_val,
+ qsdev->spi_read_io_mode,
+ qsdev->spi_read_opcode,
+ qsdev->spi_packet_size);
+ if (ret)
+ return ret;
+
+ ret = thc_spi_write_config(qsdev->thc_hw, qsdev->spi_freq_val,
+ qsdev->spi_write_io_mode,
+ qsdev->spi_write_opcode,
+ qsdev->spi_packet_size,
+ qsdev->performance_limit);
+ if (ret)
+ return ret;
+
+ thc_interrupt_config(qsdev->thc_hw);
+
+ thc_interrupt_enable(qsdev->thc_hw, true);
+
+ /* TIC may lose power, needs go through reset flow */
+ ret = reset_tic(qsdev);
+ if (ret)
+ return ret;
+
+ ret = thc_dma_configure(qsdev->thc_hw);
+ if (ret)
+ return ret;
+
+ thc_ltr_config(qsdev->thc_hw,
+ qsdev->active_ltr_val,
+ qsdev->low_power_ltr_val);
+
+ thc_change_ltr_mode(qsdev->thc_hw, THC_LTR_MODE_ACTIVE);
+
+ return 0;
+}
+
+static int quickspi_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ thc_change_ltr_mode(qsdev->thc_hw, THC_LTR_MODE_LP);
+
+ pci_save_state(pdev);
+
+ return 0;
+}
+
+static int quickspi_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct quickspi_device *qsdev;
+
+ qsdev = pci_get_drvdata(pdev);
+ if (!qsdev)
+ return -ENODEV;
+
+ thc_change_ltr_mode(qsdev->thc_hw, THC_LTR_MODE_ACTIVE);
+
+ return 0;
+}
+
+static const struct dev_pm_ops quickspi_pm_ops = {
+ .suspend = quickspi_suspend,
+ .resume = quickspi_resume,
+ .freeze = quickspi_freeze,
+ .thaw = quickspi_thaw,
+ .poweroff = quickspi_poweroff,
+ .restore = quickspi_restore,
+ .runtime_suspend = quickspi_runtime_suspend,
+ .runtime_resume = quickspi_runtime_resume,
+ .runtime_idle = NULL,
+};
+
+static const struct pci_device_id quickspi_pci_tbl[] = {
+ {PCI_DEVICE_DATA(INTEL, THC_MTL_DEVICE_ID_SPI_PORT1, &mtl), },
+ {PCI_DEVICE_DATA(INTEL, THC_MTL_DEVICE_ID_SPI_PORT2, &mtl), },
+ {PCI_DEVICE_DATA(INTEL, THC_LNL_DEVICE_ID_SPI_PORT1, &lnl), },
+ {PCI_DEVICE_DATA(INTEL, THC_LNL_DEVICE_ID_SPI_PORT2, &lnl), },
+ {PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_SPI_PORT1, &ptl), },
+ {PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_SPI_PORT2, &ptl), },
+ {PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT1, &ptl), },
+ {PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT2, &ptl), },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, quickspi_pci_tbl);
+
+static struct pci_driver quickspi_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = quickspi_pci_tbl,
+ .probe = quickspi_probe,
+ .remove = quickspi_remove,
+ .shutdown = quickspi_shutdown,
+ .driver.pm = &quickspi_pm_ops,
+ .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+};
+
+module_pci_driver(quickspi_driver);
+
+MODULE_AUTHOR("Xinpeng Sun <xinpeng.sun@intel.com>");
+MODULE_AUTHOR("Even Xu <even.xu@intel.com>");
+
+MODULE_DESCRIPTION("Intel(R) QuickSPI Driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("INTEL_THC");
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
new file mode 100644
index 000000000000..75179bb26767
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _QUICKSPI_DEV_H_
+#define _QUICKSPI_DEV_H_
+
+#include <linux/bits.h>
+#include <linux/hid-over-spi.h>
+#include <linux/sizes.h>
+#include <linux/wait.h>
+
+#include "quickspi-protocol.h"
+
+#define PCI_DEVICE_ID_INTEL_THC_MTL_DEVICE_ID_SPI_PORT1 0x7E49
+#define PCI_DEVICE_ID_INTEL_THC_MTL_DEVICE_ID_SPI_PORT2 0x7E4B
+#define PCI_DEVICE_ID_INTEL_THC_LNL_DEVICE_ID_SPI_PORT1 0xA849
+#define PCI_DEVICE_ID_INTEL_THC_LNL_DEVICE_ID_SPI_PORT2 0xA84B
+#define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_SPI_PORT1 0xE349
+#define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_SPI_PORT2 0xE34B
+#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT1 0xE449
+#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT2 0xE44B
+
+/* HIDSPI special ACPI parameters DSM methods */
+#define ACPI_QUICKSPI_REVISION_NUM 2
+#define ACPI_QUICKSPI_FUNC_NUM_INPUT_REP_HDR_ADDR 1
+#define ACPI_QUICKSPI_FUNC_NUM_INPUT_REP_BDY_ADDR 2
+#define ACPI_QUICKSPI_FUNC_NUM_OUTPUT_REP_ADDR 3
+#define ACPI_QUICKSPI_FUNC_NUM_READ_OPCODE 4
+#define ACPI_QUICKSPI_FUNC_NUM_WRITE_OPCODE 5
+#define ACPI_QUICKSPI_FUNC_NUM_IO_MODE 6
+
+/* QickSPI device special ACPI parameters DSM methods */
+#define ACPI_QUICKSPI_FUNC_NUM_CONNECTION_SPEED 1
+#define ACPI_QUICKSPI_FUNC_NUM_LIMIT_PACKET_SIZE 2
+#define ACPI_QUICKSPI_FUNC_NUM_PERFORMANCE_LIMIT 3
+
+/* Platform special ACPI parameters DSM methods */
+#define ACPI_QUICKSPI_FUNC_NUM_ACTIVE_LTR 1
+#define ACPI_QUICKSPI_FUNC_NUM_LP_LTR 2
+
+#define SPI_WRITE_IO_MODE BIT(13)
+#define SPI_IO_MODE_OPCODE GENMASK(15, 14)
+#define PERFORMANCE_LIMITATION GENMASK(15, 0)
+
+/* Packet size value, the unit is 16 bytes */
+#define DEFAULT_MIN_PACKET_SIZE_VALUE 4
+#define MAX_PACKET_SIZE_VALUE_MTL 128
+#define MAX_PACKET_SIZE_VALUE_LNL 256
+
+/*
+ * THC uses runtime auto suspend to dynamically switch between THC active LTR
+ * and low power LTR to save CPU power.
+ * Default value is 5000ms, that means if no touch event in this time, THC will
+ * change to low power LTR mode.
+ */
+#define DEFAULT_AUTO_SUSPEND_DELAY_MS 5000
+
+enum quickspi_dev_state {
+ QUICKSPI_NONE,
+ QUICKSPI_RESETING,
+ QUICKSPI_RESETED,
+ QUICKSPI_INITED,
+ QUICKSPI_ENABLED,
+ QUICKSPI_DISABLED,
+};
+
+/**
+ * struct quickspi_driver_data - Driver specific data for quickspi device
+ * @max_packet_size_value: identify max packet size, unit is 16 bytes
+ */
+struct quickspi_driver_data {
+ u32 max_packet_size_value;
+};
+
+struct device;
+struct pci_dev;
+struct thc_device;
+struct hid_device;
+struct acpi_device;
+
+/**
+ * struct quickspi_device - THC QuickSpi device struct
+ * @dev: point to kernel device
+ * @pdev: point to PCI device
+ * @thc_hw: point to THC device
+ * @hid_dev: point to hid device
+ * @acpi_dev: point to ACPI device
+ * @driver_data: point to quickspi specific driver data
+ * @state: THC SPI device state
+ * @mem_addr: MMIO memory address
+ * @dev_desc: device descriptor for HIDSPI protocol
+ * @input_report_hdr_addr: device input report header address
+ * @input_report_bdy_addr: device input report body address
+ * @output_report_bdy_addr: device output report address
+ * @spi_freq_val: device supported max SPI frequnecy, in Hz
+ * @spi_read_io_mode: device supported SPI read io mode
+ * @spi_write_io_mode: device supported SPI write io mode
+ * @spi_read_opcode: device read opcode
+ * @spi_write_opcode: device write opcode
+ * @limit_packet_size: 1 - limit read/write packet to 64Bytes
+ * 0 - device no packet size limiation for read/write
+ * @performance_limit: delay time, in ms.
+ * if device has performance limitation, must give a delay
+ * before write operation after a read operation.
+ * @active_ltr_val: THC active LTR value
+ * @low_power_ltr_val: THC low power LTR value
+ * @report_descriptor: store a copy of device report descriptor
+ * @input_buf: store a copy of latest input report data
+ * @report_buf: store a copy of latest input/output report packet from set/get feature
+ * @report_len: the length of input/output report packet
+ * @reset_ack_wq: workqueue for waiting reset response from device
+ * @reset_ack: indicate reset response received or not
+ * @nondma_int_received_wq: workqueue for waiting THC non-DMA interrupt
+ * @nondma_int_received: indicate THC non-DMA interrupt received or not
+ * @report_desc_got_wq: workqueue for waiting device report descriptor
+ * @report_desc_got: indicate device report descritor received or not
+ * @set_power_on_wq: workqueue for waiting set power on response from device
+ * @set_power_on: indicate set power on response received or not
+ * @get_feature_cmpl_wq: workqueue for waiting get feature response from device
+ * @get_feature_cmpl: indicate get feature received or not
+ * @set_feature_cmpl_wq: workqueue for waiting set feature to device
+ * @set_feature_cmpl: indicate set feature send complete or not
+ */
+struct quickspi_device {
+ struct device *dev;
+ struct pci_dev *pdev;
+ struct thc_device *thc_hw;
+ struct hid_device *hid_dev;
+ struct acpi_device *acpi_dev;
+ struct quickspi_driver_data *driver_data;
+ enum quickspi_dev_state state;
+
+ void __iomem *mem_addr;
+
+ struct hidspi_dev_descriptor dev_desc;
+ u32 input_report_hdr_addr;
+ u32 input_report_bdy_addr;
+ u32 output_report_addr;
+ u32 spi_freq_val;
+ u32 spi_read_io_mode;
+ u32 spi_write_io_mode;
+ u32 spi_read_opcode;
+ u32 spi_write_opcode;
+ u32 limit_packet_size;
+ u32 spi_packet_size;
+ u32 performance_limit;
+
+ u32 active_ltr_val;
+ u32 low_power_ltr_val;
+
+ u8 *report_descriptor;
+ u8 *input_buf;
+ u8 *report_buf;
+ u32 report_len;
+
+ wait_queue_head_t reset_ack_wq;
+ bool reset_ack;
+
+ wait_queue_head_t nondma_int_received_wq;
+ bool nondma_int_received;
+
+ wait_queue_head_t report_desc_got_wq;
+ bool report_desc_got;
+
+ wait_queue_head_t get_report_cmpl_wq;
+ bool get_report_cmpl;
+
+ wait_queue_head_t set_report_cmpl_wq;
+ bool set_report_cmpl;
+};
+
+#endif /* _QUICKSPI_DEV_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c
new file mode 100644
index 000000000000..ad52e402c28a
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/pm_runtime.h>
+
+#include "quickspi-dev.h"
+#include "quickspi-hid.h"
+
+/**
+ * quickspi_hid_parse() - HID core parse() callback
+ *
+ * @hid: HID device instance
+ *
+ * This function gets called during call to hid_add_device
+ *
+ * Return: 0 on success and non zero on error.
+ */
+static int quickspi_hid_parse(struct hid_device *hid)
+{
+ struct quickspi_device *qsdev = hid->driver_data;
+
+ if (qsdev->report_descriptor)
+ return hid_parse_report(hid, qsdev->report_descriptor,
+ le16_to_cpu(qsdev->dev_desc.rep_desc_len));
+
+ dev_err(qsdev->dev, "invalid report descriptor\n");
+ return -EINVAL;
+}
+
+static int quickspi_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void quickspi_hid_stop(struct hid_device *hid)
+{
+}
+
+static int quickspi_hid_open(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void quickspi_hid_close(struct hid_device *hid)
+{
+}
+
+static int quickspi_hid_raw_request(struct hid_device *hid,
+ unsigned char reportnum,
+ __u8 *buf, size_t len,
+ unsigned char rtype, int reqtype)
+{
+ struct quickspi_device *qsdev = hid->driver_data;
+ int ret = 0;
+
+ ret = pm_runtime_resume_and_get(qsdev->dev);
+ if (ret)
+ return ret;
+
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ ret = quickspi_get_report(qsdev, rtype, reportnum, buf);
+ break;
+ case HID_REQ_SET_REPORT:
+ ret = quickspi_set_report(qsdev, rtype, reportnum, buf, len);
+ break;
+ default:
+ dev_err_once(qsdev->dev, "Not supported request type %d\n", reqtype);
+ break;
+ }
+
+ pm_runtime_mark_last_busy(qsdev->dev);
+ pm_runtime_put_autosuspend(qsdev->dev);
+
+ return ret;
+}
+
+static int quickspi_hid_power(struct hid_device *hid, int lvl)
+{
+ return 0;
+}
+
+static struct hid_ll_driver quickspi_hid_ll_driver = {
+ .parse = quickspi_hid_parse,
+ .start = quickspi_hid_start,
+ .stop = quickspi_hid_stop,
+ .open = quickspi_hid_open,
+ .close = quickspi_hid_close,
+ .power = quickspi_hid_power,
+ .raw_request = quickspi_hid_raw_request,
+};
+
+/**
+ * quickspi_hid_probe() - Register HID low level driver
+ *
+ * @qsdev: point to quickspi device
+ *
+ * This function is used to allocate and add HID device.
+ *
+ * Return: 0 on success, non zero on error.
+ */
+int quickspi_hid_probe(struct quickspi_device *qsdev)
+{
+ struct hid_device *hid;
+ int ret;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid))
+ return PTR_ERR(hid);
+
+ hid->ll_driver = &quickspi_hid_ll_driver;
+ hid->bus = BUS_PCI;
+ hid->dev.parent = qsdev->dev;
+ hid->driver_data = qsdev;
+ hid->version = le16_to_cpu(qsdev->dev_desc.version_id);
+ hid->vendor = le16_to_cpu(qsdev->dev_desc.vendor_id);
+ hid->product = le16_to_cpu(qsdev->dev_desc.product_id);
+ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "quickspi-hid",
+ hid->vendor, hid->product);
+
+ ret = hid_add_device(hid);
+ if (ret) {
+ hid_destroy_device(hid);
+ return ret;
+ }
+
+ qsdev->hid_dev = hid;
+
+ return 0;
+}
+
+/**
+ * quickspi_hid_remove() - Destroy HID device
+ *
+ * @qsdev: point to quickspi device
+ *
+ * Return: 0 on success, non zero on error.
+ */
+void quickspi_hid_remove(struct quickspi_device *qsdev)
+{
+ hid_destroy_device(qsdev->hid_dev);
+}
+
+/**
+ * quickspi_hid_send_report() - Send HID input report data to HID core
+ *
+ * @qsdev: point to quickspi device
+ * @data: point to input report data buffer
+ * @data_len: the length of input report data
+ *
+ * Return: 0 on success, non zero on error.
+ */
+int quickspi_hid_send_report(struct quickspi_device *qsdev,
+ void *data, size_t data_len)
+{
+ int ret;
+
+ ret = hid_input_report(qsdev->hid_dev, HID_INPUT_REPORT, data, data_len, 1);
+ if (ret)
+ dev_err(qsdev->dev, "Failed to send HID input report, ret = %d.\n", ret);
+
+ return ret;
+}
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.h b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.h
new file mode 100644
index 000000000000..f640fa876a40
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _QUICKSPI_HID_H_
+#define _QUICKSPI_HID_H_
+
+struct quickspi_device;
+
+int quickspi_hid_send_report(struct quickspi_device *qsdev,
+ void *data, size_t data_size);
+int quickspi_hid_probe(struct quickspi_device *qsdev);
+void quickspi_hid_remove(struct quickspi_device *qsdev);
+
+#endif /* _QUICKSPI_HID_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
new file mode 100644
index 000000000000..7373238ceb18
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
@@ -0,0 +1,414 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright © 2024 Intel Corporation */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/hid.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-dma.h"
+
+#include "quickspi-dev.h"
+#include "quickspi-hid.h"
+#include "quickspi-protocol.h"
+
+/* THC uses HW to accelerate HID over SPI protocol, THC_M_PRT_DEV_INT_CAUSE
+ * register is used to store message header and body header, below definition
+ * let driver retrieve needed data filed easier from THC_M_PRT_DEV_INT_CAUSE
+ * register.
+ */
+#define HIDSPI_IN_REP_BDY_HDR_REP_TYPE GENMASK(7, 0)
+
+static int write_cmd_to_txdma(struct quickspi_device *qsdev,
+ int report_type, int report_id,
+ u8 *report_buf, const int report_buf_len)
+{
+ struct output_report *write_buf;
+ int write_buf_len;
+ int ret;
+
+ write_buf = (struct output_report *)qsdev->report_buf;
+
+ write_buf->output_hdr.report_type = report_type;
+ write_buf->output_hdr.content_len = cpu_to_le16(report_buf_len);
+ write_buf->output_hdr.content_id = report_id;
+
+ if (report_buf && report_buf_len > 0)
+ memcpy(write_buf->content, report_buf, report_buf_len);
+
+ write_buf_len = HIDSPI_OUTPUT_REPORT_SIZE(report_buf_len);
+
+ ret = thc_dma_write(qsdev->thc_hw, write_buf, write_buf_len);
+ if (ret)
+ dev_err_once(qsdev->dev, "DMA write failed, ret = %d\n", ret);
+
+ return ret;
+}
+
+static int quickspi_get_device_descriptor(struct quickspi_device *qsdev)
+{
+ u8 read_buf[HIDSPI_INPUT_DEVICE_DESCRIPTOR_SIZE];
+ struct output_report output_rep;
+ u32 input_len, read_len = 0;
+ u32 int_cause_val;
+ u8 input_rep_type;
+ int ret;
+
+ output_rep.output_hdr.report_type = DEVICE_DESCRIPTOR;
+ output_rep.output_hdr.content_len = 0;
+ output_rep.output_hdr.content_id = 0;
+
+ qsdev->nondma_int_received = false;
+
+ ret = thc_tic_pio_write(qsdev->thc_hw, qsdev->output_report_addr,
+ HIDSPI_OUTPUT_REPORT_SIZE(0), (u32 *)&output_rep);
+ if (ret) {
+ dev_err_once(qsdev->dev,
+ "Write DEVICE_DESCRIPTOR command failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible_timeout(qsdev->nondma_int_received_wq,
+ qsdev->nondma_int_received,
+ QUICKSPI_ACK_WAIT_TIMEOUT * HZ);
+ if (ret <= 0 || !qsdev->nondma_int_received) {
+ dev_err_once(qsdev->dev, "Wait DEVICE_DESCRIPTOR timeout, ret:%d\n", ret);
+ return -ETIMEDOUT;
+ }
+ qsdev->nondma_int_received = false;
+
+ int_cause_val = thc_int_cause_read(qsdev->thc_hw);
+ input_len = FIELD_GET(HIDSPI_INPUT_HEADER_REPORT_LEN, int_cause_val);
+
+ input_len = input_len * sizeof(u32);
+ if (input_len != HIDSPI_INPUT_DEVICE_DESCRIPTOR_SIZE) {
+ dev_err_once(qsdev->dev, "Receive wrong DEVICE_DESCRIPTOR length, len = %u\n",
+ input_len);
+ return -EINVAL;
+ }
+
+ ret = thc_tic_pio_read(qsdev->thc_hw, qsdev->input_report_bdy_addr,
+ input_len, &read_len, (u32 *)read_buf);
+ if (ret || read_len != input_len) {
+ dev_err_once(qsdev->dev, "Read DEVICE_DESCRIPTOR failed, ret = %d\n", ret);
+ dev_err_once(qsdev->dev, "DEVICE_DESCRIPTOR expected len = %u, actual read = %u\n",
+ input_len, read_len);
+ return ret;
+ }
+
+ input_rep_type = ((struct input_report_body_header *)read_buf)->input_report_type;
+
+ if (input_rep_type == DEVICE_DESCRIPTOR_RESPONSE) {
+ memcpy(&qsdev->dev_desc,
+ read_buf + HIDSPI_INPUT_BODY_HEADER_SIZE,
+ HIDSPI_DEVICE_DESCRIPTOR_SIZE);
+
+ return 0;
+ }
+
+ dev_err_once(qsdev->dev, "Unexpected intput report type: %d\n", input_rep_type);
+ return -EINVAL;
+}
+
+int quickspi_get_report_descriptor(struct quickspi_device *qsdev)
+{
+ int ret;
+
+ ret = write_cmd_to_txdma(qsdev, REPORT_DESCRIPTOR, 0, NULL, 0);
+ if (ret) {
+ dev_err_once(qsdev->dev,
+ "Write REPORT_DESCRIPTOR command failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible_timeout(qsdev->report_desc_got_wq,
+ qsdev->report_desc_got,
+ QUICKSPI_ACK_WAIT_TIMEOUT * HZ);
+ if (ret <= 0 || !qsdev->report_desc_got) {
+ dev_err_once(qsdev->dev, "Wait Report Descriptor timeout, ret:%d\n", ret);
+ return -ETIMEDOUT;
+ }
+ qsdev->report_desc_got = false;
+
+ return 0;
+}
+
+int quickspi_set_power(struct quickspi_device *qsdev,
+ enum hidspi_power_state power_state)
+{
+ u8 cmd_content = power_state;
+ int ret;
+
+ ret = write_cmd_to_txdma(qsdev, COMMAND_CONTENT,
+ HIDSPI_SET_POWER_CMD_ID,
+ &cmd_content,
+ sizeof(cmd_content));
+ if (ret) {
+ dev_err_once(qsdev->dev, "Write SET_POWER command failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void quickspi_handle_input_data(struct quickspi_device *qsdev, u32 buf_len)
+{
+ struct input_report_body_header *body_hdr;
+ struct input_report_body *input_body;
+ u8 *input_report;
+ u32 input_len;
+ int ret = 0;
+
+ input_body = (struct input_report_body *)qsdev->input_buf;
+ body_hdr = &input_body->body_hdr;
+ input_len = le16_to_cpu(body_hdr->content_len);
+
+ if (HIDSPI_INPUT_BODY_SIZE(input_len) > buf_len) {
+ dev_err_once(qsdev->dev, "Wrong input report length: %u",
+ input_len);
+ return;
+ }
+
+ switch (body_hdr->input_report_type) {
+ case REPORT_DESCRIPTOR_RESPONSE:
+ if (input_len != le16_to_cpu(qsdev->dev_desc.rep_desc_len)) {
+ dev_err_once(qsdev->dev, "Unexpected report descriptor length: %u\n",
+ input_len);
+ return;
+ }
+
+ memcpy(qsdev->report_descriptor, input_body->content, input_len);
+
+ qsdev->report_desc_got = true;
+ wake_up_interruptible(&qsdev->report_desc_got_wq);
+
+ break;
+
+ case COMMAND_RESPONSE:
+ if (body_hdr->content_id == HIDSPI_SET_POWER_CMD_ID) {
+ dev_dbg(qsdev->dev, "Receive set power on response\n");
+ } else {
+ dev_err_once(qsdev->dev, "Unknown command response type: %u\n",
+ body_hdr->content_id);
+ }
+
+ break;
+
+ case RESET_RESPONSE:
+ if (qsdev->state == QUICKSPI_RESETING) {
+ qsdev->reset_ack = true;
+ wake_up_interruptible(&qsdev->reset_ack_wq);
+ dev_dbg(qsdev->dev, "Receive HIR reset response\n");
+ } else {
+ dev_info(qsdev->dev, "Receive DIR\n");
+ }
+ break;
+
+ case GET_FEATURE_RESPONSE:
+ case GET_INPUT_REPORT_RESPONSE:
+ qsdev->report_len = sizeof(body_hdr->content_id) + input_len;
+ input_report = input_body->content - sizeof(body_hdr->content_id);
+
+ memcpy(qsdev->report_buf, input_report, qsdev->report_len);
+
+ qsdev->get_report_cmpl = true;
+ wake_up_interruptible(&qsdev->get_report_cmpl_wq);
+
+ break;
+
+ case SET_FEATURE_RESPONSE:
+ case OUTPUT_REPORT_RESPONSE:
+ qsdev->set_report_cmpl = true;
+ wake_up_interruptible(&qsdev->set_report_cmpl_wq);
+
+ break;
+
+ case DATA:
+ if (qsdev->state != QUICKSPI_ENABLED)
+ return;
+
+ if (input_len > le16_to_cpu(qsdev->dev_desc.max_input_len)) {
+ dev_err_once(qsdev->dev, "Unexpected too large input report length: %u\n",
+ input_len);
+ return;
+ }
+
+ input_len = sizeof(body_hdr->content_id) + input_len;
+ input_report = input_body->content - sizeof(body_hdr->content_id);
+
+ ret = quickspi_hid_send_report(qsdev, input_report, input_len);
+ if (ret)
+ dev_err_once(qsdev->dev, "Failed to send HID input report: %d\n", ret);
+
+ break;
+
+ default:
+ dev_err_once(qsdev->dev, "Unsupported input report type: %u\n",
+ body_hdr->input_report_type);
+ break;
+ }
+}
+
+static int acpi_tic_reset(struct quickspi_device *qsdev)
+{
+ acpi_status status = 0;
+ acpi_handle handle;
+
+ if (!qsdev->acpi_dev)
+ return -ENODEV;
+
+ handle = acpi_device_handle(qsdev->acpi_dev);
+ status = acpi_execute_simple_method(handle, "_RST", 0);
+ if (ACPI_FAILURE(status)) {
+ dev_err_once(qsdev->dev,
+ "Failed to reset device through ACPI method, ret = %d\n", status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int reset_tic(struct quickspi_device *qsdev)
+{
+ u32 actual_read_len, read_len = 0;
+ u32 input_report_len, reset_response, int_cause_val;
+ u8 input_rep_type;
+ int ret;
+
+ qsdev->state = QUICKSPI_RESETING;
+
+ qsdev->reset_ack = false;
+
+ /* First interrupt uses level trigger to avoid missing interrupt */
+ thc_int_trigger_type_select(qsdev->thc_hw, false);
+
+ ret = acpi_tic_reset(qsdev);
+ if (ret)
+ return ret;
+
+ ret = thc_interrupt_quiesce(qsdev->thc_hw, false);
+ if (ret)
+ return ret;
+
+ ret = wait_event_interruptible_timeout(qsdev->reset_ack_wq,
+ qsdev->reset_ack,
+ QUICKSPI_ACK_WAIT_TIMEOUT * HZ);
+ if (ret <= 0 || !qsdev->reset_ack) {
+ dev_err_once(qsdev->dev, "Wait RESET_RESPONSE timeout, ret:%d\n", ret);
+ return -ETIMEDOUT;
+ }
+
+ int_cause_val = thc_int_cause_read(qsdev->thc_hw);
+ input_report_len = FIELD_GET(HIDSPI_INPUT_HEADER_REPORT_LEN, int_cause_val);
+
+ read_len = input_report_len * sizeof(u32);
+ if (read_len != HIDSPI_INPUT_BODY_SIZE(0)) {
+ dev_err_once(qsdev->dev, "Receive wrong RESET_RESPONSE, len = %u\n",
+ read_len);
+ return -EINVAL;
+ }
+
+ /* Switch to edge trigger matching with HIDSPI protocol definition */
+ thc_int_trigger_type_select(qsdev->thc_hw, true);
+
+ ret = thc_tic_pio_read(qsdev->thc_hw, qsdev->input_report_bdy_addr,
+ read_len, &actual_read_len,
+ (u32 *)&reset_response);
+ if (ret || actual_read_len != read_len) {
+ dev_err_once(qsdev->dev, "Read RESET_RESPONSE body failed, ret = %d\n", ret);
+ dev_err_once(qsdev->dev, "RESET_RESPONSE body expected len = %u, actual = %u\n",
+ read_len, actual_read_len);
+ return ret;
+ }
+
+ input_rep_type = FIELD_GET(HIDSPI_IN_REP_BDY_HDR_REP_TYPE, reset_response);
+
+ if (input_rep_type == RESET_RESPONSE) {
+ dev_dbg(qsdev->dev, "RESET_RESPONSE received\n");
+ } else {
+ dev_err_once(qsdev->dev,
+ "Unexpected input report type: %d, expect RESET_RESPONSE\n",
+ input_rep_type);
+ return -EINVAL;
+ }
+
+ qsdev->state = QUICKSPI_RESETED;
+
+ ret = quickspi_get_device_descriptor(qsdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int quickspi_get_report(struct quickspi_device *qsdev,
+ u8 report_type, unsigned int report_id, void *buf)
+{
+ int rep_type;
+ int ret;
+
+ if (report_type == HID_INPUT_REPORT) {
+ rep_type = GET_INPUT_REPORT;
+ } else if (report_type == HID_FEATURE_REPORT) {
+ rep_type = GET_FEATURE;
+ } else {
+ dev_err_once(qsdev->dev, "Unsupported report type for GET REPORT: %d\n",
+ report_type);
+ return -EINVAL;
+ }
+
+ ret = write_cmd_to_txdma(qsdev, rep_type, report_id, NULL, 0);
+ if (ret) {
+ dev_err_once(qsdev->dev, "Write GET_REPORT command failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible_timeout(qsdev->get_report_cmpl_wq,
+ qsdev->get_report_cmpl,
+ QUICKSPI_ACK_WAIT_TIMEOUT * HZ);
+ if (ret <= 0 || !qsdev->get_report_cmpl) {
+ dev_err_once(qsdev->dev, "Wait Get Report Response timeout, ret:%d\n", ret);
+ return -ETIMEDOUT;
+ }
+ qsdev->get_report_cmpl = false;
+
+ memcpy(buf, qsdev->report_buf, qsdev->report_len);
+
+ return qsdev->report_len;
+}
+
+int quickspi_set_report(struct quickspi_device *qsdev,
+ u8 report_type, unsigned int report_id,
+ void *buf, u32 buf_len)
+{
+ int rep_type;
+ int ret;
+
+ if (report_type == HID_OUTPUT_REPORT) {
+ rep_type = OUTPUT_REPORT;
+ } else if (report_type == HID_FEATURE_REPORT) {
+ rep_type = SET_FEATURE;
+ } else {
+ dev_err_once(qsdev->dev, "Unsupported report type for SET REPORT: %d\n",
+ report_type);
+ return -EINVAL;
+ }
+
+ ret = write_cmd_to_txdma(qsdev, rep_type, report_id, buf + 1, buf_len - 1);
+ if (ret) {
+ dev_err_once(qsdev->dev, "Write SET_REPORT command failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible_timeout(qsdev->set_report_cmpl_wq,
+ qsdev->set_report_cmpl,
+ QUICKSPI_ACK_WAIT_TIMEOUT * HZ);
+ if (ret <= 0 || !qsdev->set_report_cmpl) {
+ dev_err_once(qsdev->dev, "Wait Set Report Response timeout, ret:%d\n", ret);
+ return -ETIMEDOUT;
+ }
+ qsdev->set_report_cmpl = false;
+
+ return buf_len;
+}
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.h b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.h
new file mode 100644
index 000000000000..775e29c1ed13
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _QUICKSPI_PROTOCOL_H_
+#define _QUICKSPI_PROTOCOL_H_
+
+#include <linux/hid-over-spi.h>
+
+#define QUICKSPI_ACK_WAIT_TIMEOUT 5
+
+struct quickspi_device;
+
+void quickspi_handle_input_data(struct quickspi_device *qsdev, u32 buf_len);
+int quickspi_get_report(struct quickspi_device *qsdev, u8 report_type,
+ unsigned int report_id, void *buf);
+int quickspi_set_report(struct quickspi_device *qsdev, u8 report_type,
+ unsigned int report_id, void *buf, u32 buf_len);
+int quickspi_get_report_descriptor(struct quickspi_device *qsdev);
+
+int quickspi_set_power(struct quickspi_device *qsdev,
+ enum hidspi_power_state power_state);
+
+int reset_tic(struct quickspi_device *qsdev);
+
+#endif /* _QUICKSPI_PROTOCOL_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
new file mode 100644
index 000000000000..4fc78b5a04b5
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
@@ -0,0 +1,1578 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-hw.h"
+
+static int thc_regmap_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct thc_device *thc_ctx = context;
+ void __iomem *base = thc_ctx->mmio_addr;
+
+ *val = ioread32(base + reg);
+ return 0;
+}
+
+static int thc_regmap_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct thc_device *thc_ctx = context;
+ void __iomem *base = thc_ctx->mmio_addr;
+
+ iowrite32(val, base + reg);
+ return 0;
+}
+
+static const struct regmap_range thc_rw_ranges[] = {
+ regmap_reg_range(0x10, 0x14),
+ regmap_reg_range(0x1000, 0x1320),
+};
+
+static const struct regmap_access_table thc_rw_table = {
+ .yes_ranges = thc_rw_ranges,
+ .n_yes_ranges = ARRAY_SIZE(thc_rw_ranges),
+};
+
+static const struct regmap_config thc_regmap_cfg = {
+ .name = "thc_regmap_common",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x1320,
+ .reg_read = thc_regmap_read,
+ .reg_write = thc_regmap_write,
+ .cache_type = REGCACHE_NONE,
+ .fast_io = true,
+ .rd_table = &thc_rw_table,
+ .wr_table = &thc_rw_table,
+ .volatile_table = &thc_rw_table,
+};
+
+/**
+ * thc_clear_state - Clear THC hardware state
+ *
+ * @dev: The pointer of THC device structure
+ */
+static void thc_clear_state(const struct thc_device *dev)
+{
+ u32 val;
+
+ /* Clear interrupt cause register */
+ val = THC_M_PRT_ERR_CAUSE_INVLD_DEV_ENTRY |
+ THC_M_PRT_ERR_CAUSE_FRAME_BABBLE_ERR |
+ THC_M_PRT_ERR_CAUSE_BUF_OVRRUN_ERR |
+ THC_M_PRT_ERR_CAUSE_PRD_ENTRY_ERR;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_ERR_CAUSE_OFFSET, val, val);
+
+ /* Clear interrupt error state */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_IE_STALL,
+ THC_M_PRT_READ_DMA_CNTRL_IE_STALL);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_IE_STALL,
+ THC_M_PRT_READ_DMA_CNTRL_IE_STALL);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_TXN_ERR_INT_STS,
+ THC_M_PRT_INT_STATUS_TXN_ERR_INT_STS);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_FATAL_ERR_INT_STS,
+ THC_M_PRT_INT_STATUS_FATAL_ERR_INT_STS);
+
+ val = THC_M_PRT_INT_EN_TXN_ERR_INT_EN |
+ THC_M_PRT_INT_EN_FATAL_ERR_INT_EN |
+ THC_M_PRT_INT_EN_BUF_OVRRUN_ERR_INT_EN;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_INT_EN_OFFSET, val, val);
+
+ val = THC_M_PRT_SW_SEQ_STS_THC_SS_ERR |
+ THC_M_PRT_SW_SEQ_STS_TSSDONE;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, val, val);
+
+ /* Clear RxDMA state */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_IE_EOF, 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_IE_EOF, 0);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_INT_STS_1_OFFSET,
+ THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS,
+ THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_INT_STS_2_OFFSET,
+ THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS,
+ THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_INT_STS_1_OFFSET,
+ THC_M_PRT_READ_DMA_INT_STS_NONDMA_INT_STS,
+ THC_M_PRT_READ_DMA_INT_STS_NONDMA_INT_STS);
+
+ /* Clear TxDMA state */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_DMA_CNTRL_OFFSET,
+ THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL,
+ THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL);
+
+ val = THC_M_PRT_WRITE_INT_STS_THC_WRDMA_ERROR_STS |
+ THC_M_PRT_WRITE_INT_STS_THC_WRDMA_IOC_STS |
+ THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_INT_STS_OFFSET, val, val);
+
+ /* Reset all DMAs count */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_DB_CNT_1_OFFSET,
+ THC_M_PRT_DB_CNT_1_THC_M_PRT_DB_CNT_RST,
+ THC_M_PRT_DB_CNT_1_THC_M_PRT_DB_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_DEVINT_CNT_OFFSET,
+ THC_M_PRT_DEVINT_CNT_THC_M_PRT_DEVINT_CNT_RST,
+ THC_M_PRT_DEVINT_CNT_THC_M_PRT_DEVINT_CNT_RST);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR);
+
+ /* Reset THC hardware sequence state */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_FRAME_DROP_CNT_1_OFFSET,
+ THC_M_PRT_FRAME_DROP_CNT_1_RFDC,
+ THC_M_PRT_FRAME_DROP_CNT_1_RFDC);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_FRAME_DROP_CNT_2_OFFSET,
+ THC_M_PRT_FRAME_DROP_CNT_2_RFDC,
+ THC_M_PRT_FRAME_DROP_CNT_2_RFDC);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_FRM_CNT_1_OFFSET,
+ THC_M_PRT_FRM_CNT_1_THC_M_PRT_FRM_CNT_RST,
+ THC_M_PRT_FRM_CNT_1_THC_M_PRT_FRM_CNT_RST);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_FRM_CNT_2_OFFSET,
+ THC_M_PRT_FRM_CNT_2_THC_M_PRT_FRM_CNT_RST,
+ THC_M_PRT_FRM_CNT_2_THC_M_PRT_FRM_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_RXDMA_PKT_CNT_1_OFFSET,
+ THC_M_PRT_RXDMA_PKT_CNT_1_THC_M_PRT_RXDMA_PKT_CNT_RST,
+ THC_M_PRT_RXDMA_PKT_CNT_1_THC_M_PRT_RXDMA_PKT_CNT_RST);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_RXDMA_PKT_CNT_2_OFFSET,
+ THC_M_PRT_RXDMA_PKT_CNT_2_THC_M_PRT_RXDMA_PKT_CNT_RST,
+ THC_M_PRT_RXDMA_PKT_CNT_2_THC_M_PRT_RXDMA_PKT_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SWINT_CNT_1_OFFSET,
+ THC_M_PRT_SWINT_CNT_1_THC_M_PRT_SWINT_CNT_RST,
+ THC_M_PRT_SWINT_CNT_1_THC_M_PRT_SWINT_CNT_RST);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SWINT_CNT_1_OFFSET,
+ THC_M_PRT_SWINT_CNT_1_THC_M_PRT_SWINT_CNT_RST,
+ THC_M_PRT_SWINT_CNT_1_THC_M_PRT_SWINT_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_TX_FRM_CNT_OFFSET,
+ THC_M_PRT_TX_FRM_CNT_THC_M_PRT_TX_FRM_CNT_RST,
+ THC_M_PRT_TX_FRM_CNT_THC_M_PRT_TX_FRM_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_TXDMA_PKT_CNT_OFFSET,
+ THC_M_PRT_TXDMA_PKT_CNT_THC_M_PRT_TXDMA_PKT_CNT_RST,
+ THC_M_PRT_TXDMA_PKT_CNT_THC_M_PRT_TXDMA_PKT_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_UFRM_CNT_1_OFFSET,
+ THC_M_PRT_UFRM_CNT_1_THC_M_PRT_UFRM_CNT_RST,
+ THC_M_PRT_UFRM_CNT_1_THC_M_PRT_UFRM_CNT_RST);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_UFRM_CNT_2_OFFSET,
+ THC_M_PRT_UFRM_CNT_2_THC_M_PRT_UFRM_CNT_RST,
+ THC_M_PRT_UFRM_CNT_2_THC_M_PRT_UFRM_CNT_RST);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_PRD_EMPTY_CNT_1_OFFSET,
+ THC_M_PRT_PRD_EMPTY_CNT_1_RPTEC,
+ THC_M_PRT_PRD_EMPTY_CNT_1_RPTEC);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_PRD_EMPTY_CNT_2_OFFSET,
+ THC_M_PRT_PRD_EMPTY_CNT_2_RPTEC,
+ THC_M_PRT_PRD_EMPTY_CNT_2_RPTEC);
+}
+
+/**
+ * thc_dev_init - Allocate and initialize the THC device structure
+ *
+ * @device: The pointer of device structure
+ * @mem_addr: The pointer of MMIO memory address
+ *
+ * Return: The thc_device pointer on success, NULL on failed.
+ */
+struct thc_device *thc_dev_init(struct device *device, void __iomem *mem_addr)
+{
+ struct thc_device *thc_dev;
+ int ret;
+
+ thc_dev = devm_kzalloc(device, sizeof(*thc_dev), GFP_KERNEL);
+ if (!thc_dev)
+ return ERR_PTR(-ENOMEM);
+
+ thc_dev->dev = device;
+ thc_dev->mmio_addr = mem_addr;
+ thc_dev->thc_regmap = devm_regmap_init(device, NULL, thc_dev, &thc_regmap_cfg);
+ if (IS_ERR(thc_dev->thc_regmap)) {
+ ret = PTR_ERR(thc_dev->thc_regmap);
+ dev_err_once(device, "Failed to init thc_regmap: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ thc_clear_state(thc_dev);
+
+ mutex_init(&thc_dev->thc_bus_lock);
+ init_waitqueue_head(&thc_dev->write_complete_wait);
+ init_waitqueue_head(&thc_dev->swdma_complete_wait);
+
+ thc_dev->dma_ctx = thc_dma_init(thc_dev);
+ if (!thc_dev->dma_ctx) {
+ dev_err_once(device, "DMA context init failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return thc_dev;
+}
+EXPORT_SYMBOL_NS_GPL(thc_dev_init, "INTEL_THC");
+
+static int prepare_pio(const struct thc_device *dev, const u8 pio_op,
+ const u32 address, const u32 size)
+{
+ u32 sts, ctrl, addr, mask;
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, &sts);
+
+ /* Check if THC previous PIO still in progress */
+ if (sts & THC_M_PRT_SW_SEQ_STS_THC_SS_CIP) {
+ dev_err_once(dev->dev, "THC PIO is still busy!\n");
+ return -EBUSY;
+ }
+
+ /* Clear error bit and complete bit in state register */
+ sts |= THC_M_PRT_SW_SEQ_STS_THC_SS_ERR |
+ THC_M_PRT_SW_SEQ_STS_TSSDONE;
+ regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, sts);
+
+ /* Set PIO data size, opcode and interrupt capability */
+ ctrl = FIELD_PREP(THC_M_PRT_SW_SEQ_CNTRL_THC_SS_BC, size) |
+ FIELD_PREP(THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CMD, pio_op);
+ if (dev->pio_int_supported)
+ ctrl |= THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CD_IE;
+
+ mask = THC_M_PRT_SW_SEQ_CNTRL_THC_SS_BC |
+ THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CMD |
+ THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CD_IE;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_SW_SEQ_CNTRL_OFFSET, mask, ctrl);
+
+ /* Set PIO target address */
+ addr = FIELD_PREP(THC_M_PRT_SW_SEQ_DATA0_ADDR_THC_SW_SEQ_DATA0_ADDR, address);
+ mask = THC_M_PRT_SW_SEQ_DATA0_ADDR_THC_SW_SEQ_DATA0_ADDR;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_SW_SEQ_DATA0_ADDR_OFFSET, mask, addr);
+ return 0;
+}
+
+static void pio_start(const struct thc_device *dev,
+ u32 size_in_bytes, const u32 *buffer)
+{
+ if (size_in_bytes && buffer)
+ regmap_bulk_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA1_OFFSET,
+ buffer, size_in_bytes / sizeof(u32));
+
+ /* Enable Start bit */
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_SW_SEQ_CNTRL_OFFSET,
+ THC_M_PRT_SW_SEQ_CNTRL_TSSGO,
+ THC_M_PRT_SW_SEQ_CNTRL_TSSGO);
+}
+
+static int pio_complete(const struct thc_device *dev,
+ u32 *buffer, u32 *size)
+{
+ u32 sts, ctrl;
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, &sts);
+ if (sts & THC_M_PRT_SW_SEQ_STS_THC_SS_ERR) {
+ dev_err_once(dev->dev, "PIO operation error\n");
+ return -EBUSY;
+ }
+
+ if (buffer && size) {
+ regmap_read(dev->thc_regmap, THC_M_PRT_SW_SEQ_CNTRL_OFFSET, &ctrl);
+ *size = FIELD_GET(THC_M_PRT_SW_SEQ_CNTRL_THC_SS_BC, ctrl);
+
+ regmap_bulk_read(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA1_OFFSET,
+ buffer, *size / sizeof(u32));
+ }
+
+ sts |= THC_M_PRT_SW_SEQ_STS_THC_SS_ERR | THC_M_PRT_SW_SEQ_STS_TSSDONE;
+ regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, sts);
+ return 0;
+}
+
+static int pio_wait(const struct thc_device *dev)
+{
+ u32 sts = 0;
+ int ret;
+
+ ret = regmap_read_poll_timeout(dev->thc_regmap, THC_M_PRT_SW_SEQ_STS_OFFSET, sts,
+ !(sts & THC_M_PRT_SW_SEQ_STS_THC_SS_CIP ||
+ !(sts & THC_M_PRT_SW_SEQ_STS_TSSDONE)),
+ THC_REGMAP_POLLING_INTERVAL_US, THC_PIO_DONE_TIMEOUT_US);
+ if (ret)
+ dev_err_once(dev->dev, "Timeout while polling PIO operation done\n");
+
+ return ret;
+}
+
+/**
+ * thc_tic_pio_read - Read data from touch device by PIO
+ *
+ * @dev: The pointer of THC private device context
+ * @address: Slave address for the PIO operation
+ * @size: Expected read data size
+ * @actual_size: The pointer of the actual data size read from touch device
+ * @buffer: The pointer of data buffer to store the data read from touch device
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_tic_pio_read(struct thc_device *dev, const u32 address,
+ const u32 size, u32 *actual_size, u32 *buffer)
+{
+ u8 opcode;
+ int ret;
+
+ if (size <= 0 || !actual_size || !buffer) {
+ dev_err(dev->dev, "Invalid input parameters, size %u, actual_size %p, buffer %p\n",
+ size, actual_size, buffer);
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ opcode = (dev->port_type == THC_PORT_TYPE_SPI) ?
+ THC_PIO_OP_SPI_TIC_READ : THC_PIO_OP_I2C_TIC_READ;
+
+ ret = prepare_pio(dev, opcode, address, size);
+ if (ret < 0)
+ goto end;
+
+ pio_start(dev, 0, NULL);
+
+ ret = pio_wait(dev);
+ if (ret < 0)
+ goto end;
+
+ ret = pio_complete(dev, buffer, actual_size);
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_tic_pio_read, "INTEL_THC");
+
+/**
+ * thc_tic_pio_write - Write data to touch device by PIO
+ *
+ * @dev: The pointer of THC private device context
+ * @address: Slave address for the PIO operation
+ * @size: PIO write data size
+ * @buffer: The pointer of the write data buffer
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_tic_pio_write(struct thc_device *dev, const u32 address,
+ const u32 size, const u32 *buffer)
+{
+ u8 opcode;
+ int ret;
+
+ if (size <= 0 || !buffer) {
+ dev_err(dev->dev, "Invalid input parameters, size %u, buffer %p\n",
+ size, buffer);
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ opcode = (dev->port_type == THC_PORT_TYPE_SPI) ?
+ THC_PIO_OP_SPI_TIC_WRITE : THC_PIO_OP_I2C_TIC_WRITE;
+
+ ret = prepare_pio(dev, opcode, address, size);
+ if (ret < 0)
+ goto end;
+
+ pio_start(dev, size, buffer);
+
+ ret = pio_wait(dev);
+ if (ret < 0)
+ goto end;
+
+ ret = pio_complete(dev, NULL, NULL);
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_tic_pio_write, "INTEL_THC");
+
+/**
+ * thc_tic_pio_write_and_read - Write data followed by read data by PIO
+ *
+ * @dev: The pointer of THC private device context
+ * @address: Slave address for the PIO operation
+ * @write_size: PIO write data size
+ * @write_buffer: The pointer of the write data buffer
+ * @read_size: Expected PIO read data size
+ * @actual_size: The pointer of the actual read data size
+ * @read_buffer: The pointer of PIO read data buffer
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_tic_pio_write_and_read(struct thc_device *dev, const u32 address,
+ const u32 write_size, const u32 *write_buffer,
+ const u32 read_size, u32 *actual_size, u32 *read_buffer)
+{
+ u32 i2c_ctrl, mask;
+ int ret;
+
+ if (dev->port_type == THC_PORT_TYPE_SPI) {
+ dev_err(dev->dev, "SPI port type doesn't support pio write and read!");
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ /* Config i2c PIO write and read sequence */
+ i2c_ctrl = FIELD_PREP(THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_THC_PIO_I2C_WBC, write_size);
+ mask = THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_THC_PIO_I2C_WBC;
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_OFFSET,
+ mask, i2c_ctrl);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_OFFSET,
+ THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_THC_I2C_RW_PIO_EN,
+ THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_THC_I2C_RW_PIO_EN);
+
+ ret = prepare_pio(dev, THC_PIO_OP_I2C_TIC_WRITE_AND_READ, address, read_size);
+ if (ret < 0)
+ goto end;
+
+ pio_start(dev, write_size, write_buffer);
+
+ ret = pio_wait(dev);
+ if (ret < 0)
+ goto end;
+
+ ret = pio_complete(dev, read_buffer, actual_size);
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_tic_pio_write_and_read, "INTEL_THC");
+
+/**
+ * thc_interrupt_config - Configure THC interrupts
+ *
+ * @dev: The pointer of THC private device context
+ */
+void thc_interrupt_config(struct thc_device *dev)
+{
+ u32 mbits, mask, r_dma_ctrl_1;
+
+ /* Clear Error reporting interrupt status bits */
+ mbits = THC_M_PRT_INT_STATUS_TXN_ERR_INT_STS |
+ THC_M_PRT_INT_STATUS_FATAL_ERR_INT_STS;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_INT_STATUS_OFFSET,
+ mbits, mbits);
+
+ /* Enable Error Reporting Interrupts */
+ mbits = THC_M_PRT_INT_EN_TXN_ERR_INT_EN |
+ THC_M_PRT_INT_EN_FATAL_ERR_INT_EN |
+ THC_M_PRT_INT_EN_BUF_OVRRUN_ERR_INT_EN;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_INT_EN_OFFSET,
+ mbits, mbits);
+
+ /* Clear PIO Interrupt status bits */
+ mbits = THC_M_PRT_SW_SEQ_STS_THC_SS_ERR |
+ THC_M_PRT_SW_SEQ_STS_TSSDONE;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_SW_SEQ_STS_OFFSET,
+ mbits, mbits);
+
+ /* Read Interrupts */
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ &r_dma_ctrl_1);
+ /* Disable RxDMA1 */
+ r_dma_ctrl_1 &= ~THC_M_PRT_READ_DMA_CNTRL_IE_EOF;
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ r_dma_ctrl_1);
+
+ /* Ack EOF Interrupt RxDMA1 */
+ mbits = THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS;
+ /* Ack NonDMA Interrupt */
+ mbits |= THC_M_PRT_READ_DMA_INT_STS_NONDMA_INT_STS;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_1_OFFSET,
+ mbits, mbits);
+
+ /* Ack EOF Interrupt RxDMA2 */
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_2_OFFSET,
+ THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS,
+ THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS);
+
+ /* Write Interrupts */
+ /* Disable TxDMA */
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_WRITE_DMA_CNTRL_OFFSET,
+ THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL,
+ 0);
+
+ /* Clear TxDMA interrupt status bits */
+ mbits = THC_M_PRT_WRITE_INT_STS_THC_WRDMA_ERROR_STS;
+ mbits |= THC_M_PRT_WRITE_INT_STS_THC_WRDMA_IOC_STS;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_WRITE_INT_STS_OFFSET,
+ mbits, mbits);
+
+ /* Enable Non-DMA device inband interrupt */
+ r_dma_ctrl_1 |= THC_M_PRT_READ_DMA_CNTRL_IE_NDDI;
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ r_dma_ctrl_1);
+
+ if (dev->port_type == THC_PORT_TYPE_SPI) {
+ /* Edge triggered interrupt */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_TSEQ_CNTRL_1_OFFSET,
+ THC_M_PRT_TSEQ_CNTRL_1_INT_EDG_DET_EN,
+ THC_M_PRT_TSEQ_CNTRL_1_INT_EDG_DET_EN);
+ } else {
+ /* Level triggered interrupt */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_TSEQ_CNTRL_1_OFFSET,
+ THC_M_PRT_TSEQ_CNTRL_1_INT_EDG_DET_EN, 0);
+
+ mbits = THC_M_PRT_INT_EN_THC_I2C_IC_MST_ON_HOLD_INT_EN |
+ THC_M_PRT_INT_EN_THC_I2C_IC_SCL_STUCK_AT_LOW_DET_INT_EN |
+ THC_M_PRT_INT_EN_THC_I2C_IC_TX_ABRT_INT_EN |
+ THC_M_PRT_INT_EN_THC_I2C_IC_TX_OVER_INT_EN |
+ THC_M_PRT_INT_EN_THC_I2C_IC_RX_FULL_INT_EN |
+ THC_M_PRT_INT_EN_THC_I2C_IC_RX_OVER_INT_EN |
+ THC_M_PRT_INT_EN_THC_I2C_IC_RX_UNDER_INT_EN;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_INT_EN_OFFSET,
+ mbits, mbits);
+ }
+
+ thc_set_pio_interrupt_support(dev, false);
+
+ /* HIDSPI specific settings */
+ if (dev->port_type == THC_PORT_TYPE_SPI) {
+ mbits = FIELD_PREP(THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_OFFSET,
+ THC_BIT_OFFSET_INTERRUPT_TYPE) |
+ FIELD_PREP(THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_LEN,
+ THC_BIT_LENGTH_INTERRUPT_TYPE) |
+ FIELD_PREP(THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_EOF_OFFSET,
+ THC_BIT_OFFSET_LAST_FRAGMENT_FLAG) |
+ FIELD_PREP(THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL,
+ THC_BITMASK_INVALID_TYPE_DATA);
+ mask = THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_OFFSET |
+ THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_LEN |
+ THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_EOF_OFFSET |
+ THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_DEVINT_CFG_1_OFFSET,
+ mask, mbits);
+
+ mbits = FIELD_PREP(THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_OFFSET,
+ THC_BIT_OFFSET_MICROFRAME_SIZE) |
+ FIELD_PREP(THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_LEN,
+ THC_BIT_LENGTH_MICROFRAME_SIZE) |
+ FIELD_PREP(THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_UNIT,
+ THC_UNIT_MICROFRAME_SIZE) |
+ THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_FTYPE_IGNORE |
+ THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_FTYPE_VAL;
+ mask = THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_OFFSET |
+ THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_LEN |
+ THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_UNIT |
+ THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_FTYPE_IGNORE |
+ THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_FTYPE_VAL;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_DEVINT_CFG_2_OFFSET,
+ mask, mbits);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(thc_interrupt_config, "INTEL_THC");
+
+/**
+ * thc_int_trigger_type_select - Select THC interrupt trigger type
+ *
+ * @dev: the pointer of THC private device context
+ * @edge_trigger: determine the interrupt is edge triggered or level triggered
+ */
+void thc_int_trigger_type_select(struct thc_device *dev, bool edge_trigger)
+{
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_TSEQ_CNTRL_1_OFFSET,
+ THC_M_PRT_TSEQ_CNTRL_1_INT_EDG_DET_EN,
+ edge_trigger ? THC_M_PRT_TSEQ_CNTRL_1_INT_EDG_DET_EN : 0);
+}
+EXPORT_SYMBOL_NS_GPL(thc_int_trigger_type_select, "INTEL_THC");
+
+/**
+ * thc_interrupt_enable - Enable or disable THC interrupt
+ *
+ * @dev: the pointer of THC private device context
+ * @int_enable: the flag to control THC interrupt enable or disable
+ */
+void thc_interrupt_enable(struct thc_device *dev, bool int_enable)
+{
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_INT_EN_OFFSET,
+ THC_M_PRT_INT_EN_GBL_INT_EN,
+ int_enable ? THC_M_PRT_INT_EN_GBL_INT_EN : 0);
+}
+EXPORT_SYMBOL_NS_GPL(thc_interrupt_enable, "INTEL_THC");
+
+/**
+ * thc_interrupt_quiesce - Quiesce or unquiesce external touch device interrupt
+ *
+ * @dev: the pointer of THC private device context
+ * @int_quiesce: the flag to determine quiesce or unquiesce device interrupt
+ *
+ * Return: 0 on success, other error codes on failed
+ */
+int thc_interrupt_quiesce(const struct thc_device *dev, bool int_quiesce)
+{
+ u32 ctrl;
+ int ret;
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET, &ctrl);
+ if (!(ctrl & THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_EN) && !int_quiesce) {
+ dev_warn(dev->dev, "THC interrupt already unquiesce\n");
+ return 0;
+ }
+
+ if ((ctrl & THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_EN) && int_quiesce) {
+ dev_warn(dev->dev, "THC interrupt already quiesce\n");
+ return 0;
+ }
+
+ /* Quiesce device interrupt - Set quiesce bit and waiting for THC HW to ACK */
+ if (int_quiesce)
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET,
+ THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_EN,
+ THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_EN);
+
+ ret = regmap_read_poll_timeout(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET, ctrl,
+ ctrl & THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_HW_STS,
+ THC_REGMAP_POLLING_INTERVAL_US, THC_QUIESCE_EN_TIMEOUT_US);
+ if (ret) {
+ dev_err_once(dev->dev,
+ "Timeout while waiting THC idle, target quiesce state = %s\n",
+ int_quiesce ? "true" : "false");
+ return ret;
+ }
+
+ /* Unquiesce device interrupt - Clear the quiesce bit */
+ if (!int_quiesce)
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET,
+ THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_EN, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_interrupt_quiesce, "INTEL_THC");
+
+/**
+ * thc_set_pio_interrupt_support - Determine PIO interrupt is supported or not
+ *
+ * @dev: The pointer of THC private device context
+ * @supported: The flag to determine enabling PIO interrupt or not
+ */
+void thc_set_pio_interrupt_support(struct thc_device *dev, bool supported)
+{
+ dev->pio_int_supported = supported;
+}
+EXPORT_SYMBOL_NS_GPL(thc_set_pio_interrupt_support, "INTEL_THC");
+
+/**
+ * thc_ltr_config - Configure THC Latency Tolerance Reporting(LTR) settings
+ *
+ * @dev: The pointer of THC private device context
+ * @active_ltr_us: active LTR value, unit is us
+ * @lp_ltr_us: low power LTR value, unit is us
+ */
+void thc_ltr_config(struct thc_device *dev, u32 active_ltr_us, u32 lp_ltr_us)
+{
+ u32 active_ltr_scale, lp_ltr_scale, ltr_ctrl, ltr_mask, orig, tmp;
+
+ if (active_ltr_us >= THC_LTR_MIN_VAL_SCALE_3 &&
+ active_ltr_us < THC_LTR_MAX_VAL_SCALE_3) {
+ active_ltr_scale = THC_LTR_SCALE_3;
+ active_ltr_us = active_ltr_us >> 5;
+ } else if (active_ltr_us >= THC_LTR_MIN_VAL_SCALE_4 &&
+ active_ltr_us < THC_LTR_MAX_VAL_SCALE_4) {
+ active_ltr_scale = THC_LTR_SCALE_4;
+ active_ltr_us = active_ltr_us >> 10;
+ } else if (active_ltr_us >= THC_LTR_MIN_VAL_SCALE_5 &&
+ active_ltr_us < THC_LTR_MAX_VAL_SCALE_5) {
+ active_ltr_scale = THC_LTR_SCALE_5;
+ active_ltr_us = active_ltr_us >> 15;
+ } else {
+ active_ltr_scale = THC_LTR_SCALE_2;
+ }
+
+ if (lp_ltr_us >= THC_LTR_MIN_VAL_SCALE_3 &&
+ lp_ltr_us < THC_LTR_MAX_VAL_SCALE_3) {
+ lp_ltr_scale = THC_LTR_SCALE_3;
+ lp_ltr_us = lp_ltr_us >> 5;
+ } else if (lp_ltr_us >= THC_LTR_MIN_VAL_SCALE_4 &&
+ lp_ltr_us < THC_LTR_MAX_VAL_SCALE_4) {
+ lp_ltr_scale = THC_LTR_SCALE_4;
+ lp_ltr_us = lp_ltr_us >> 10;
+ } else if (lp_ltr_us >= THC_LTR_MIN_VAL_SCALE_5 &&
+ lp_ltr_us < THC_LTR_MAX_VAL_SCALE_5) {
+ lp_ltr_scale = THC_LTR_SCALE_5;
+ lp_ltr_us = lp_ltr_us >> 15;
+ } else {
+ lp_ltr_scale = THC_LTR_SCALE_2;
+ }
+
+ regmap_read(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET, &orig);
+ ltr_ctrl = FIELD_PREP(THC_M_CMN_LTR_CTRL_ACT_LTR_VAL, active_ltr_us) |
+ FIELD_PREP(THC_M_CMN_LTR_CTRL_ACT_LTR_SCALE, active_ltr_scale) |
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_REQ |
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN |
+ FIELD_PREP(THC_M_CMN_LTR_CTRL_LP_LTR_VAL, lp_ltr_us) |
+ FIELD_PREP(THC_M_CMN_LTR_CTRL_LP_LTR_SCALE, lp_ltr_scale) |
+ THC_M_CMN_LTR_CTRL_LP_LTR_REQ;
+
+ ltr_mask = THC_M_CMN_LTR_CTRL_ACT_LTR_VAL |
+ THC_M_CMN_LTR_CTRL_ACT_LTR_SCALE |
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_REQ |
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN |
+ THC_M_CMN_LTR_CTRL_LP_LTR_VAL |
+ THC_M_CMN_LTR_CTRL_LP_LTR_SCALE |
+ THC_M_CMN_LTR_CTRL_LP_LTR_REQ |
+ THC_M_CMN_LTR_CTRL_LP_LTR_EN;
+
+ tmp = orig & ~ltr_mask;
+ tmp |= ltr_ctrl & ltr_mask;
+
+ regmap_write(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET, tmp);
+}
+EXPORT_SYMBOL_NS_GPL(thc_ltr_config, "INTEL_THC");
+
+/**
+ * thc_change_ltr_mode - Change THC LTR mode
+ *
+ * @dev: The pointer of THC private device context
+ * @ltr_mode: LTR mode(active or low power)
+ */
+void thc_change_ltr_mode(struct thc_device *dev, u32 ltr_mode)
+{
+ if (ltr_mode == THC_LTR_MODE_ACTIVE) {
+ regmap_write_bits(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET,
+ THC_M_CMN_LTR_CTRL_LP_LTR_EN, 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET,
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN,
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN);
+ return;
+ }
+
+ regmap_write_bits(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET,
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN, 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET,
+ THC_M_CMN_LTR_CTRL_LP_LTR_EN,
+ THC_M_CMN_LTR_CTRL_LP_LTR_EN);
+}
+EXPORT_SYMBOL_NS_GPL(thc_change_ltr_mode, "INTEL_THC");
+
+/**
+ * thc_ltr_unconfig - Unconfigure THC Latency Tolerance Reporting(LTR) settings
+ *
+ * @dev: The pointer of THC private device context
+ */
+void thc_ltr_unconfig(struct thc_device *dev)
+{
+ u32 ltr_ctrl, bits_clear;
+
+ regmap_read(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET, &ltr_ctrl);
+ bits_clear = THC_M_CMN_LTR_CTRL_LP_LTR_EN |
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN |
+ THC_M_CMN_LTR_CTRL_LP_LTR_REQ |
+ THC_M_CMN_LTR_CTRL_ACTIVE_LTR_REQ;
+
+ ltr_ctrl &= ~bits_clear;
+
+ regmap_write(dev->thc_regmap, THC_M_CMN_LTR_CTRL_OFFSET, ltr_ctrl);
+}
+EXPORT_SYMBOL_NS_GPL(thc_ltr_unconfig, "INTEL_THC");
+
+/**
+ * thc_int_cause_read - Read interrupt cause register value
+ *
+ * @dev: The pointer of THC private device context
+ *
+ * Return: The interrupt cause register value
+ */
+u32 thc_int_cause_read(struct thc_device *dev)
+{
+ u32 int_cause;
+
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_DEV_INT_CAUSE_REG_VAL_OFFSET, &int_cause);
+
+ return int_cause;
+}
+EXPORT_SYMBOL_NS_GPL(thc_int_cause_read, "INTEL_THC");
+
+static void thc_print_txn_error_cause(const struct thc_device *dev)
+{
+ bool known_error = false;
+ u32 cause = 0;
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_ERR_CAUSE_OFFSET, &cause);
+
+ if (cause & THC_M_PRT_ERR_CAUSE_PRD_ENTRY_ERR) {
+ dev_err(dev->dev, "TXN Error: Invalid PRD Entry\n");
+ known_error = true;
+ }
+ if (cause & THC_M_PRT_ERR_CAUSE_BUF_OVRRUN_ERR) {
+ dev_err(dev->dev, "TXN Error: THC Buffer Overrun\n");
+ known_error = true;
+ }
+ if (cause & THC_M_PRT_ERR_CAUSE_FRAME_BABBLE_ERR) {
+ dev_err(dev->dev, "TXN Error: Frame Babble\n");
+ known_error = true;
+ }
+ if (cause & THC_M_PRT_ERR_CAUSE_INVLD_DEV_ENTRY) {
+ dev_err(dev->dev, "TXN Error: Invalid Device Register Setting\n");
+ known_error = true;
+ }
+
+ /* Clear interrupt status bits */
+ regmap_write(dev->thc_regmap, THC_M_PRT_ERR_CAUSE_OFFSET, cause);
+
+ if (!known_error)
+ dev_err(dev->dev, "TXN Error does not match any known value: 0x%X\n",
+ cause);
+}
+
+/**
+ * thc_interrupt_handler - Handle THC interrupts
+ *
+ * THC interrupts include several types: external touch device (TIC) non-DMA
+ * interrupts, PIO completion interrupts, DMA interrtups, I2C subIP raw
+ * interrupts and error interrupts.
+ *
+ * This is a help function for interrupt processing, it detects interrupt
+ * type, clear the interrupt status bit and return the interrupt type to caller
+ * for future processing.
+ *
+ * @dev: The pointer of THC private device context
+ *
+ * Return: The combined flag for interrupt type
+ */
+int thc_interrupt_handler(struct thc_device *dev)
+{
+ u32 read_sts_1, read_sts_2, read_sts_sw, write_sts;
+ u32 int_sts, err_cause, seq_cntrl, seq_sts;
+ int interrupt_type = 0;
+
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_1_OFFSET, &read_sts_1);
+
+ if (read_sts_1 & THC_M_PRT_READ_DMA_INT_STS_NONDMA_INT_STS) {
+ dev_dbg(dev->dev, "THC non-DMA device interrupt\n");
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_READ_DMA_INT_STS_1_OFFSET,
+ NONDMA_INT_STS_BIT);
+
+ interrupt_type |= BIT(THC_NONDMA_INT);
+
+ return interrupt_type;
+ }
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET, &int_sts);
+
+ if (int_sts & THC_M_PRT_INT_STATUS_TXN_ERR_INT_STS) {
+ dev_err(dev->dev, "THC transaction error, int_sts: 0x%08X\n", int_sts);
+ thc_print_txn_error_cause(dev);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ TXN_ERR_INT_STS_BIT);
+
+ interrupt_type |= BIT(THC_TXN_ERR_INT);
+
+ return interrupt_type;
+ }
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_ERR_CAUSE_OFFSET, &err_cause);
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_2_OFFSET, &read_sts_2);
+
+ if (err_cause & THC_M_PRT_ERR_CAUSE_BUF_OVRRUN_ERR ||
+ read_sts_1 & THC_M_PRT_READ_DMA_INT_STS_STALL_STS ||
+ read_sts_2 & THC_M_PRT_READ_DMA_INT_STS_STALL_STS) {
+ dev_err(dev->dev, "Buffer overrun or RxDMA engine stalled!\n");
+ thc_print_txn_error_cause(dev);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_READ_DMA_INT_STS_2_OFFSET,
+ THC_M_PRT_READ_DMA_INT_STS_STALL_STS);
+ regmap_write(dev->thc_regmap, THC_M_PRT_READ_DMA_INT_STS_1_OFFSET,
+ THC_M_PRT_READ_DMA_INT_STS_STALL_STS);
+ regmap_write(dev->thc_regmap, THC_M_PRT_ERR_CAUSE_OFFSET,
+ THC_M_PRT_ERR_CAUSE_BUF_OVRRUN_ERR);
+
+ interrupt_type |= BIT(THC_TXN_ERR_INT);
+
+ return interrupt_type;
+ }
+
+ if (int_sts & THC_M_PRT_INT_STATUS_FATAL_ERR_INT_STS) {
+ dev_err_once(dev->dev, "THC FATAL error, int_sts: 0x%08X\n", int_sts);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ TXN_FATAL_INT_STS_BIT);
+
+ interrupt_type |= BIT(THC_FATAL_ERR_INT);
+
+ return interrupt_type;
+ }
+
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_SW_SEQ_CNTRL_OFFSET, &seq_cntrl);
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_SW_SEQ_STS_OFFSET, &seq_sts);
+
+ if (seq_cntrl & THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CD_IE &&
+ seq_sts & THC_M_PRT_SW_SEQ_STS_TSSDONE) {
+ dev_dbg(dev->dev, "THC_SS_CD_IE and TSSDONE are set\n");
+ interrupt_type |= BIT(THC_PIO_DONE_INT);
+ }
+
+ if (read_sts_1 & THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS) {
+ dev_dbg(dev->dev, "Got RxDMA1 Read Interrupt\n");
+
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_1_OFFSET, read_sts_1);
+
+ interrupt_type |= BIT(THC_RXDMA1_INT);
+ }
+
+ if (read_sts_2 & THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS) {
+ dev_dbg(dev->dev, "Got RxDMA2 Read Interrupt\n");
+
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_2_OFFSET, read_sts_2);
+
+ interrupt_type |= BIT(THC_RXDMA2_INT);
+ }
+
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_SW_OFFSET, &read_sts_sw);
+
+ if (read_sts_sw & THC_M_PRT_READ_DMA_INT_STS_DMACPL_STS) {
+ dev_dbg(dev->dev, "Got SwDMA Read Interrupt\n");
+
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_READ_DMA_INT_STS_SW_OFFSET, read_sts_sw);
+
+ dev->swdma_done = true;
+ wake_up_interruptible(&dev->swdma_complete_wait);
+
+ interrupt_type |= BIT(THC_SWDMA_INT);
+ }
+
+ regmap_read(dev->thc_regmap,
+ THC_M_PRT_WRITE_INT_STS_OFFSET, &write_sts);
+
+ if (write_sts & THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS) {
+ dev_dbg(dev->dev, "Got TxDMA Write complete Interrupt\n");
+
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_WRITE_INT_STS_OFFSET, write_sts);
+
+ dev->write_done = true;
+ wake_up_interruptible(&dev->write_complete_wait);
+
+ interrupt_type |= BIT(THC_TXDMA_INT);
+ }
+
+ if (int_sts & THC_M_PRT_INT_STATUS_DEV_RAW_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_DEV_RAW_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_UNDER_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_UNDER_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_OVER_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_OVER_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_FULL_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_FULL_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_OVER_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_OVER_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_EMPTY_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_EMPTY_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_ABRT_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_ABRT_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_ACTIVITY_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_ACTIVITY_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_SCL_STUCK_AT_LOW_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_SCL_STUCK_AT_LOW_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_STOP_DET_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_STOP_DET_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_START_DET_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_START_DET_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+ if (int_sts & THC_M_PRT_INT_STATUS_THC_I2C_IC_MST_ON_HOLD_INT_STS) {
+ regmap_write(dev->thc_regmap, THC_M_PRT_INT_STATUS_OFFSET,
+ THC_M_PRT_INT_STATUS_THC_I2C_IC_MST_ON_HOLD_INT_STS);
+ interrupt_type |= BIT(THC_I2CSUBIP_INT);
+ }
+
+ if (!interrupt_type)
+ interrupt_type |= BIT(THC_UNKNOWN_INT);
+
+ return interrupt_type;
+}
+EXPORT_SYMBOL_NS_GPL(thc_interrupt_handler, "INTEL_THC");
+
+/**
+ * thc_port_select - Set THC port type
+ *
+ * @dev: The pointer of THC private device context
+ * @port_type: THC port type to use for current device
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_port_select(struct thc_device *dev, enum thc_port_type port_type)
+{
+ u32 ctrl, mask;
+
+ if (port_type == THC_PORT_TYPE_SPI) {
+ dev_dbg(dev->dev, "Set THC port type to SPI\n");
+ dev->port_type = THC_PORT_TYPE_SPI;
+
+ /* Enable delay of CS assertion and set to default value */
+ ctrl = THC_M_PRT_SPI_DUTYC_CFG_SPI_CSA_CK_DELAY_EN |
+ FIELD_PREP(THC_M_PRT_SPI_DUTYC_CFG_SPI_CSA_CK_DELAY_VAL,
+ THC_CSA_CK_DELAY_VAL_DEFAULT);
+ mask = THC_M_PRT_SPI_DUTYC_CFG_SPI_CSA_CK_DELAY_EN |
+ THC_M_PRT_SPI_DUTYC_CFG_SPI_CSA_CK_DELAY_VAL;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SPI_DUTYC_CFG_OFFSET,
+ mask, ctrl);
+ } else if (port_type == THC_PORT_TYPE_I2C) {
+ dev_dbg(dev->dev, "Set THC port type to I2C\n");
+ dev->port_type = THC_PORT_TYPE_I2C;
+
+ /* Set THC transition arbitration policy to frame boundary for I2C */
+ ctrl = FIELD_PREP(THC_M_PRT_CONTROL_THC_ARB_POLICY,
+ THC_ARB_POLICY_FRAME_BOUNDARY);
+ mask = THC_M_PRT_CONTROL_THC_ARB_POLICY;
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET, mask, ctrl);
+ } else {
+ dev_err(dev->dev, "unsupported THC port type: %d\n", port_type);
+ return -EINVAL;
+ }
+
+ ctrl = FIELD_PREP(THC_M_PRT_CONTROL_PORT_TYPE, port_type);
+ mask = THC_M_PRT_CONTROL_PORT_TYPE;
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET, mask, ctrl);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_port_select, "INTEL_THC");
+
+#define THC_SPI_FREQUENCY_7M 7812500
+#define THC_SPI_FREQUENCY_15M 15625000
+#define THC_SPI_FREQUENCY_17M 17857100
+#define THC_SPI_FREQUENCY_20M 20833000
+#define THC_SPI_FREQUENCY_25M 25000000
+#define THC_SPI_FREQUENCY_31M 31250000
+#define THC_SPI_FREQUENCY_41M 41666700
+
+#define THC_SPI_LOW_FREQUENCY THC_SPI_FREQUENCY_17M
+
+static u8 thc_get_spi_freq_div_val(struct thc_device *dev, u32 spi_freq_val)
+{
+ int frequency[] = {
+ THC_SPI_FREQUENCY_7M,
+ THC_SPI_FREQUENCY_15M,
+ THC_SPI_FREQUENCY_17M,
+ THC_SPI_FREQUENCY_20M,
+ THC_SPI_FREQUENCY_25M,
+ THC_SPI_FREQUENCY_31M,
+ THC_SPI_FREQUENCY_41M,
+ };
+ u8 frequency_div[] = {
+ THC_SPI_FRQ_DIV_2,
+ THC_SPI_FRQ_DIV_1,
+ THC_SPI_FRQ_DIV_7,
+ THC_SPI_FRQ_DIV_6,
+ THC_SPI_FRQ_DIV_5,
+ THC_SPI_FRQ_DIV_4,
+ THC_SPI_FRQ_DIV_3,
+ };
+ int size = ARRAY_SIZE(frequency);
+ u32 closest_freq;
+ u8 freq_div;
+ int i;
+
+ for (i = size - 1; i >= 0; i--)
+ if ((int)spi_freq_val - frequency[i] >= 0)
+ break;
+
+ if (i < 0) {
+ dev_err_once(dev->dev, "Not supported SPI frequency %d\n", spi_freq_val);
+ return THC_SPI_FRQ_RESERVED;
+ }
+
+ closest_freq = frequency[i];
+ freq_div = frequency_div[i];
+
+ dev_dbg(dev->dev,
+ "Setting SPI frequency: spi_freq_val = %u, Closest freq = %u\n",
+ spi_freq_val, closest_freq);
+
+ return freq_div;
+}
+
+/**
+ * thc_spi_read_config - Configure SPI bus read attributes
+ *
+ * @dev: The pointer of THC private device context
+ * @spi_freq_val: SPI read frequecy value
+ * @io_mode: SPI read IO mode
+ * @opcode: Read opcode
+ * @spi_rd_mps: SPI read max packet size
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_spi_read_config(struct thc_device *dev, u32 spi_freq_val,
+ u32 io_mode, u32 opcode, u32 spi_rd_mps)
+{
+ bool is_low_freq = false;
+ u32 cfg, mask;
+ u8 freq_div;
+
+ freq_div = thc_get_spi_freq_div_val(dev, spi_freq_val);
+ if (freq_div == THC_SPI_FRQ_RESERVED)
+ return -EINVAL;
+
+ if (spi_freq_val < THC_SPI_LOW_FREQUENCY)
+ is_low_freq = true;
+
+ cfg = FIELD_PREP(THC_M_PRT_SPI_CFG_SPI_TCRF, freq_div) |
+ FIELD_PREP(THC_M_PRT_SPI_CFG_SPI_TRMODE, io_mode) |
+ (is_low_freq ? THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN : 0) |
+ FIELD_PREP(THC_M_PRT_SPI_CFG_SPI_RD_MPS, spi_rd_mps);
+ mask = THC_M_PRT_SPI_CFG_SPI_TCRF |
+ THC_M_PRT_SPI_CFG_SPI_TRMODE |
+ THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN |
+ THC_M_PRT_SPI_CFG_SPI_RD_MPS;
+
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_SPI_CFG_OFFSET, mask, cfg);
+
+ if (io_mode == THC_QUAD_IO)
+ opcode = FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_SPI_QIO, opcode);
+ else if (io_mode == THC_DUAL_IO)
+ opcode = FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_SPI_DIO, opcode);
+ else
+ opcode = FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_SPI_SIO, opcode);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_SPI_ICRRD_OPCODE_OFFSET, opcode);
+ regmap_write(dev->thc_regmap, THC_M_PRT_SPI_DMARD_OPCODE_OFFSET, opcode);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_spi_read_config, "INTEL_THC");
+
+/**
+ * thc_spi_write_config - Configure SPI bus write attributes
+ *
+ * @dev: The pointer of THC private device context
+ * @spi_freq_val: SPI write frequecy value
+ * @io_mode: SPI write IO mode
+ * @opcode: Write opcode
+ * @spi_wr_mps: SPI write max packet size
+ * @perf_limit: Performance limitation in unit of 10us
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_spi_write_config(struct thc_device *dev, u32 spi_freq_val,
+ u32 io_mode, u32 opcode, u32 spi_wr_mps,
+ u32 perf_limit)
+{
+ bool is_low_freq = false;
+ u32 cfg, mask;
+ u8 freq_div;
+
+ freq_div = thc_get_spi_freq_div_val(dev, spi_freq_val);
+ if (freq_div == THC_SPI_FRQ_RESERVED)
+ return -EINVAL;
+
+ if (spi_freq_val < THC_SPI_LOW_FREQUENCY)
+ is_low_freq = true;
+
+ cfg = FIELD_PREP(THC_M_PRT_SPI_CFG_SPI_TCWF, freq_div) |
+ FIELD_PREP(THC_M_PRT_SPI_CFG_SPI_TWMODE, io_mode) |
+ (is_low_freq ? THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN : 0) |
+ FIELD_PREP(THC_M_PRT_SPI_CFG_SPI_WR_MPS, spi_wr_mps);
+ mask = THC_M_PRT_SPI_CFG_SPI_TCWF |
+ THC_M_PRT_SPI_CFG_SPI_TWMODE |
+ THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN |
+ THC_M_PRT_SPI_CFG_SPI_WR_MPS;
+
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_SPI_CFG_OFFSET, mask, cfg);
+
+ if (io_mode == THC_QUAD_IO)
+ opcode = FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_SPI_QIO, opcode);
+ else if (io_mode == THC_DUAL_IO)
+ opcode = FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_SPI_DIO, opcode);
+ else
+ opcode = FIELD_PREP(THC_M_PRT_SPI_ICRRD_OPCODE_SPI_SIO, opcode);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_SPI_WR_OPCODE_OFFSET, opcode);
+
+ dev->perf_limit = perf_limit;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_spi_write_config, "INTEL_THC");
+
+/**
+ * thc_spi_input_output_address_config - Configure SPI input and output addresses
+ *
+ * @dev: the pointer of THC private device context
+ * @input_hdr_addr: input report header address
+ * @input_bdy_addr: input report body address
+ * @output_addr: output report address
+ */
+void thc_spi_input_output_address_config(struct thc_device *dev, u32 input_hdr_addr,
+ u32 input_bdy_addr, u32 output_addr)
+{
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_DEV_INT_CAUSE_ADDR_OFFSET, input_hdr_addr);
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_RD_BULK_ADDR_1_OFFSET, input_bdy_addr);
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_RD_BULK_ADDR_2_OFFSET, input_bdy_addr);
+ regmap_write(dev->thc_regmap,
+ THC_M_PRT_WR_BULK_ADDR_OFFSET, output_addr);
+}
+EXPORT_SYMBOL_NS_GPL(thc_spi_input_output_address_config, "INTEL_THC");
+
+static int thc_i2c_subip_pio_read(struct thc_device *dev, const u32 address,
+ u32 *size, u32 *buffer)
+{
+ int ret;
+
+ if (!size || *size == 0 || !buffer) {
+ dev_err(dev->dev, "Invalid input parameters, size %p, buffer %p\n",
+ size, buffer);
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ ret = prepare_pio(dev, THC_PIO_OP_I2C_SUBSYSTEM_READ, address, *size);
+ if (ret < 0)
+ goto end;
+
+ pio_start(dev, 0, NULL);
+
+ ret = pio_wait(dev);
+ if (ret < 0)
+ goto end;
+
+ ret = pio_complete(dev, buffer, size);
+ if (ret < 0)
+ goto end;
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+
+ if (ret)
+ dev_err_once(dev->dev, "Read THC I2C SubIP register failed %d, offset %u\n",
+ ret, address);
+
+ return ret;
+}
+
+static int thc_i2c_subip_pio_write(struct thc_device *dev, const u32 address,
+ const u32 size, const u32 *buffer)
+{
+ int ret;
+
+ if (size == 0 || !buffer) {
+ dev_err(dev->dev, "Invalid input parameters, size %u, buffer %p\n",
+ size, buffer);
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ ret = prepare_pio(dev, THC_PIO_OP_I2C_SUBSYSTEM_WRITE, address, size);
+ if (ret < 0)
+ goto end;
+
+ pio_start(dev, size, buffer);
+
+ ret = pio_wait(dev);
+ if (ret < 0)
+ goto end;
+
+ ret = pio_complete(dev, NULL, NULL);
+ if (ret < 0)
+ goto end;
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+
+ if (ret)
+ dev_err_once(dev->dev, "Write THC I2C SubIP register failed %d, offset %u\n",
+ ret, address);
+
+ return ret;
+}
+
+#define I2C_SUBIP_CON_DEFAULT 0x663
+#define I2C_SUBIP_INT_MASK_DEFAULT 0x7FFF
+#define I2C_SUBIP_RX_TL_DEFAULT 62
+#define I2C_SUBIP_TX_TL_DEFAULT 0
+#define I2C_SUBIP_DMA_TDLR_DEFAULT 7
+#define I2C_SUBIP_DMA_RDLR_DEFAULT 7
+
+static int thc_i2c_subip_set_speed(struct thc_device *dev, const u32 speed,
+ const u32 hcnt, const u32 lcnt)
+{
+ u32 hcnt_offset, lcnt_offset;
+ u32 val;
+ int ret;
+
+ switch (speed) {
+ case THC_I2C_STANDARD:
+ hcnt_offset = THC_I2C_IC_SS_SCL_HCNT_OFFSET;
+ lcnt_offset = THC_I2C_IC_SS_SCL_LCNT_OFFSET;
+ break;
+
+ case THC_I2C_FAST_AND_PLUS:
+ hcnt_offset = THC_I2C_IC_FS_SCL_HCNT_OFFSET;
+ lcnt_offset = THC_I2C_IC_FS_SCL_LCNT_OFFSET;
+ break;
+
+ case THC_I2C_HIGH_SPEED:
+ hcnt_offset = THC_I2C_IC_HS_SCL_HCNT_OFFSET;
+ lcnt_offset = THC_I2C_IC_HS_SCL_LCNT_OFFSET;
+ break;
+
+ default:
+ dev_err_once(dev->dev, "Unsupported i2c speed %d\n", speed);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = thc_i2c_subip_pio_write(dev, hcnt_offset, sizeof(u32), &hcnt);
+ if (ret < 0)
+ return ret;
+
+ ret = thc_i2c_subip_pio_write(dev, lcnt_offset, sizeof(u32), &lcnt);
+ if (ret < 0)
+ return ret;
+
+ val = I2C_SUBIP_CON_DEFAULT & ~THC_I2C_IC_CON_SPEED;
+ val |= FIELD_PREP(THC_I2C_IC_CON_SPEED, speed);
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_CON_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static u32 i2c_subip_regs[] = {
+ THC_I2C_IC_CON_OFFSET,
+ THC_I2C_IC_TAR_OFFSET,
+ THC_I2C_IC_INTR_MASK_OFFSET,
+ THC_I2C_IC_RX_TL_OFFSET,
+ THC_I2C_IC_TX_TL_OFFSET,
+ THC_I2C_IC_DMA_CR_OFFSET,
+ THC_I2C_IC_DMA_TDLR_OFFSET,
+ THC_I2C_IC_DMA_RDLR_OFFSET,
+ THC_I2C_IC_SS_SCL_HCNT_OFFSET,
+ THC_I2C_IC_SS_SCL_LCNT_OFFSET,
+ THC_I2C_IC_FS_SCL_HCNT_OFFSET,
+ THC_I2C_IC_FS_SCL_LCNT_OFFSET,
+ THC_I2C_IC_HS_SCL_HCNT_OFFSET,
+ THC_I2C_IC_HS_SCL_LCNT_OFFSET,
+ THC_I2C_IC_ENABLE_OFFSET,
+};
+
+/**
+ * thc_i2c_subip_init - Initialize and configure THC I2C subsystem
+ *
+ * @dev: The pointer of THC private device context
+ * @target_address: Slave address of touch device (TIC)
+ * @speed: I2C bus frequency speed mode
+ * @hcnt: I2C clock SCL high count
+ * @lcnt: I2C clock SCL low count
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_i2c_subip_init(struct thc_device *dev, const u32 target_address,
+ const u32 speed, const u32 hcnt, const u32 lcnt)
+{
+ u32 read_size = sizeof(u32);
+ u32 val;
+ int ret;
+
+ ret = thc_i2c_subip_pio_read(dev, THC_I2C_IC_ENABLE_OFFSET, &read_size, &val);
+ if (ret < 0)
+ return ret;
+
+ val &= ~THC_I2C_IC_ENABLE_ENABLE;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_ENABLE_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ ret = thc_i2c_subip_pio_read(dev, THC_I2C_IC_TAR_OFFSET, &read_size, &val);
+ if (ret < 0)
+ return ret;
+
+ val &= ~THC_I2C_IC_TAR_IC_TAR;
+ val |= FIELD_PREP(THC_I2C_IC_TAR_IC_TAR, target_address);
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_TAR_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ ret = thc_i2c_subip_set_speed(dev, speed, hcnt, lcnt);
+ if (ret < 0)
+ return ret;
+
+ val = I2C_SUBIP_INT_MASK_DEFAULT;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_INTR_MASK_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ val = I2C_SUBIP_RX_TL_DEFAULT;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_RX_TL_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ val = I2C_SUBIP_TX_TL_DEFAULT;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_TX_TL_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ val = THC_I2C_IC_DMA_CR_RDMAE | THC_I2C_IC_DMA_CR_TDMAE;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_DMA_CR_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ val = I2C_SUBIP_DMA_TDLR_DEFAULT;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_DMA_TDLR_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ val = I2C_SUBIP_DMA_RDLR_DEFAULT;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_DMA_RDLR_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ ret = thc_i2c_subip_pio_read(dev, THC_I2C_IC_ENABLE_OFFSET, &read_size, &val);
+ if (ret < 0)
+ return ret;
+
+ val |= THC_I2C_IC_ENABLE_ENABLE;
+ ret = thc_i2c_subip_pio_write(dev, THC_I2C_IC_ENABLE_OFFSET, sizeof(u32), &val);
+ if (ret < 0)
+ return ret;
+
+ dev->i2c_subip_regs = devm_kzalloc(dev->dev, sizeof(i2c_subip_regs), GFP_KERNEL);
+ if (!dev->i2c_subip_regs)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_i2c_subip_init, "INTEL_THC");
+
+/**
+ * thc_i2c_subip_regs_save - Save THC I2C sub-subsystem register values to THC device context
+ *
+ * @dev: The pointer of THC private device context
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_i2c_subip_regs_save(struct thc_device *dev)
+{
+ int ret;
+ u32 read_size = sizeof(u32);
+
+ for (int i = 0; i < ARRAY_SIZE(i2c_subip_regs); i++) {
+ ret = thc_i2c_subip_pio_read(dev, i2c_subip_regs[i],
+ &read_size, (u32 *)&dev->i2c_subip_regs + i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_i2c_subip_regs_save, "INTEL_THC");
+
+/**
+ * thc_i2c_subip_regs_restore - Restore THC I2C subsystem registers from THC device context
+ *
+ * @dev: The pointer of THC private device context
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_i2c_subip_regs_restore(struct thc_device *dev)
+{
+ int ret;
+ u32 write_size = sizeof(u32);
+
+ for (int i = 0; i < ARRAY_SIZE(i2c_subip_regs); i++) {
+ ret = thc_i2c_subip_pio_write(dev, i2c_subip_regs[i],
+ write_size, (u32 *)&dev->i2c_subip_regs + i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_i2c_subip_regs_restore, "INTEL_THC");
+
+MODULE_AUTHOR("Xinpeng Sun <xinpeng.sun@intel.com>");
+MODULE_AUTHOR("Even Xu <even.xu@intel.com>");
+
+MODULE_DESCRIPTION("Intel(R) Intel THC Hardware Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h
new file mode 100644
index 000000000000..0517fee2c668
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _INTEL_THC_DEV_H_
+#define _INTEL_THC_DEV_H_
+
+#include <linux/cdev.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+#include "intel-thc-dma.h"
+
+#define THC_REGMAP_COMMON_OFFSET 0x10
+#define THC_REGMAP_MMIO_OFFSET 0x1000
+
+/*
+ * THC Port type
+ * @THC_PORT_TYPE_SPI: This port is used for HIDSPI
+ * @THC_PORT_TYPE_I2C: This port is used for HIDI2C
+ */
+enum thc_port_type {
+ THC_PORT_TYPE_SPI = 0,
+ THC_PORT_TYPE_I2C = 1,
+};
+
+/**
+ * THC interrupt flag
+ * @THC_NONDMA_INT: THC non-DMA interrupt
+ * @THC_RXDMA1_INT: THC RxDMA1 interrupt
+ * @THC_RXDMA2_INT: THC RxDMA2 interrupt
+ * @THC_SWDMA_INT: THC SWDMA interrupt
+ * @THC_TXDMA_INT: THC TXDMA interrupt
+ * @THC_PIO_DONE_INT: THC PIO complete interrupt
+ * @THC_I2CSUBIP_INT: THC I2C subsystem interrupt
+ * @THC_TXN_ERR_INT: THC transfer error interrupt
+ * @THC_FATAL_ERR_INT: THC fatal error interrupt
+ */
+enum thc_int_type {
+ THC_NONDMA_INT = 0,
+ THC_RXDMA1_INT = 1,
+ THC_RXDMA2_INT = 2,
+ THC_SWDMA_INT = 3,
+ THC_TXDMA_INT = 4,
+ THC_PIO_DONE_INT = 5,
+ THC_I2CSUBIP_INT = 6,
+ THC_TXN_ERR_INT = 7,
+ THC_FATAL_ERR_INT = 8,
+ THC_UNKNOWN_INT
+};
+
+/**
+ * struct thc_device - THC private device struct
+ * @thc_regmap: MMIO regmap structure for accessing THC registers
+ * @mmio_addr: MMIO registers address
+ * @thc_bus_lock: mutex locker for THC config
+ * @port_type: port type of THC port instance
+ * @pio_int_supported: PIO interrupt supported flag
+ * @dma_ctx: DMA specific data
+ * @write_complete_wait: signal event for DMA write complete
+ * @swdma_complete_wait: signal event for SWDMA sequence complete
+ * @write_done: bool value that indicates if DMA write is done
+ * @swdma_done: bool value that indicates if SWDMA swquence is done
+ * @perf_limit: the delay between read operation and write operation
+ * @i2c_subip_regs: the copy of THC I2C sub-system registers for resuming restore
+ */
+struct thc_device {
+ struct device *dev;
+ struct regmap *thc_regmap;
+ void __iomem *mmio_addr;
+ struct mutex thc_bus_lock;
+ enum thc_port_type port_type;
+ bool pio_int_supported;
+
+ struct thc_dma_context *dma_ctx;
+
+ wait_queue_head_t write_complete_wait;
+ wait_queue_head_t swdma_complete_wait;
+ bool write_done;
+ bool swdma_done;
+
+ u32 perf_limit;
+
+ u32 *i2c_subip_regs;
+};
+
+struct thc_device *thc_dev_init(struct device *device, void __iomem *mem_addr);
+int thc_tic_pio_read(struct thc_device *dev, const u32 address,
+ const u32 size, u32 *actual_size, u32 *buffer);
+int thc_tic_pio_write(struct thc_device *dev, const u32 address,
+ const u32 size, const u32 *buffer);
+int thc_tic_pio_write_and_read(struct thc_device *dev, const u32 address,
+ const u32 write_size, const u32 *write_buffer,
+ const u32 read_size, u32 *actual_size, u32 *read_buffer);
+void thc_interrupt_config(struct thc_device *dev);
+void thc_int_trigger_type_select(struct thc_device *dev, bool edge_trigger);
+void thc_interrupt_enable(struct thc_device *dev, bool int_enable);
+void thc_set_pio_interrupt_support(struct thc_device *dev, bool supported);
+int thc_interrupt_quiesce(const struct thc_device *dev, bool int_quiesce);
+void thc_ltr_config(struct thc_device *dev, u32 active_ltr_us, u32 lp_ltr_us);
+void thc_change_ltr_mode(struct thc_device *dev, u32 ltr_mode);
+void thc_ltr_unconfig(struct thc_device *dev);
+u32 thc_int_cause_read(struct thc_device *dev);
+int thc_interrupt_handler(struct thc_device *dev);
+int thc_port_select(struct thc_device *dev, enum thc_port_type port_type);
+int thc_spi_read_config(struct thc_device *dev, u32 spi_freq_val,
+ u32 io_mode, u32 opcode, u32 spi_rd_mps);
+int thc_spi_write_config(struct thc_device *dev, u32 spi_freq_val,
+ u32 io_mode, u32 opcode, u32 spi_wr_mps, u32 perf_limit);
+void thc_spi_input_output_address_config(struct thc_device *dev, u32 input_hdr_addr,
+ u32 input_bdy_addr, u32 output_addr);
+int thc_i2c_subip_init(struct thc_device *dev, const u32 target_address,
+ const u32 speed, const u32 hcnt, const u32 lcnt);
+int thc_i2c_subip_regs_save(struct thc_device *dev);
+int thc_i2c_subip_regs_restore(struct thc_device *dev);
+
+#endif /* _INTEL_THC_DEV_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c
new file mode 100644
index 000000000000..eb23bea77686
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c
@@ -0,0 +1,969 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/overflow.h>
+#include <linux/regmap.h>
+#include <linux/scatterlist.h>
+
+#include "intel-thc-dev.h"
+#include "intel-thc-dma.h"
+#include "intel-thc-hw.h"
+
+static void dma_set_prd_base_addr(struct thc_device *dev, u64 physical_addr,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 addr_high, addr_low;
+
+ if (!dma_config->is_enabled)
+ return;
+
+ addr_high = upper_32_bits(physical_addr);
+ addr_low = lower_32_bits(physical_addr);
+
+ regmap_write(dev->thc_regmap, dma_config->prd_base_addr_high, addr_high);
+ regmap_write(dev->thc_regmap, dma_config->prd_base_addr_low, addr_low);
+}
+
+static void dma_set_start_bit(struct thc_device *dev,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 ctrl, mask, mbits, data, offset;
+
+ if (!dma_config->is_enabled)
+ return;
+
+ switch (dma_config->dma_channel) {
+ case THC_RXDMA1:
+ case THC_RXDMA2:
+ if (dma_config->dma_channel == THC_RXDMA2) {
+ mbits = FIELD_PREP(THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL,
+ THC_BITMASK_INTERRUPT_TYPE_DATA);
+ mask = THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL;
+ regmap_write_bits(dev->thc_regmap,
+ THC_M_PRT_DEVINT_CFG_1_OFFSET, mask, mbits);
+ }
+
+ mbits = THC_M_PRT_READ_DMA_CNTRL_IE_EOF |
+ THC_M_PRT_READ_DMA_CNTRL_SOO |
+ THC_M_PRT_READ_DMA_CNTRL_IE_STALL |
+ THC_M_PRT_READ_DMA_CNTRL_IE_ERROR |
+ THC_M_PRT_READ_DMA_CNTRL_START;
+
+ mask = THC_M_PRT_READ_DMA_CNTRL_TPCWP | mbits;
+ mask |= THC_M_PRT_READ_DMA_CNTRL_INT_SW_DMA_EN;
+ ctrl = FIELD_PREP(THC_M_PRT_READ_DMA_CNTRL_TPCWP, THC_POINTER_WRAPAROUND) | mbits;
+ offset = dma_config->dma_channel == THC_RXDMA1 ?
+ THC_M_PRT_READ_DMA_CNTRL_1_OFFSET : THC_M_PRT_READ_DMA_CNTRL_2_OFFSET;
+ regmap_write_bits(dev->thc_regmap, offset, mask, ctrl);
+ break;
+
+ case THC_SWDMA:
+ mbits = THC_M_PRT_READ_DMA_CNTRL_IE_DMACPL |
+ THC_M_PRT_READ_DMA_CNTRL_IE_IOC |
+ THC_M_PRT_READ_DMA_CNTRL_SOO |
+ THC_M_PRT_READ_DMA_CNTRL_START;
+
+ mask = THC_M_PRT_READ_DMA_CNTRL_TPCWP | mbits;
+ ctrl = FIELD_PREP(THC_M_PRT_READ_DMA_CNTRL_TPCWP, THC_POINTER_WRAPAROUND) | mbits;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET,
+ mask, ctrl);
+ break;
+
+ case THC_TXDMA:
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_INT_STS_OFFSET,
+ THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS,
+ THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS);
+
+ /* Select interrupt or polling method upon Write completion */
+ if (dev->dma_ctx->use_write_interrupts)
+ data = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL;
+ else
+ data = 0;
+
+ data |= THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START;
+ mask = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL |
+ THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START;
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_DMA_CNTRL_OFFSET,
+ mask, data);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void dma_set_prd_control(struct thc_device *dev, u8 entry_count, u8 cb_depth,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 ctrl, mask;
+
+ if (!dma_config->is_enabled)
+ return;
+
+ if (dma_config->dma_channel == THC_TXDMA) {
+ mask = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC;
+ ctrl = FIELD_PREP(THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC, entry_count);
+ } else {
+ mask = THC_M_PRT_RPRD_CNTRL_PTEC | THC_M_PRT_RPRD_CNTRL_PCD;
+ ctrl = FIELD_PREP(THC_M_PRT_RPRD_CNTRL_PTEC, entry_count) |
+ FIELD_PREP(THC_M_PRT_RPRD_CNTRL_PCD, cb_depth);
+ }
+
+ regmap_write_bits(dev->thc_regmap, dma_config->prd_cntrl, mask, ctrl);
+}
+
+static void dma_clear_prd_control(struct thc_device *dev,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 mask;
+
+ if (!dma_config->is_enabled)
+ return;
+
+ if (dma_config->dma_channel == THC_TXDMA)
+ mask = THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC;
+ else
+ mask = THC_M_PRT_RPRD_CNTRL_PTEC | THC_M_PRT_RPRD_CNTRL_PCD;
+
+ regmap_write_bits(dev->thc_regmap, dma_config->prd_cntrl, mask, 0);
+}
+
+static u8 dma_get_read_pointer(struct thc_device *dev,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 ctrl, read_pointer;
+
+ regmap_read(dev->thc_regmap, dma_config->dma_cntrl, &ctrl);
+ read_pointer = FIELD_GET(THC_M_PRT_READ_DMA_CNTRL_TPCRP, ctrl);
+
+ dev_dbg(dev->dev, "THC_M_PRT_READ_DMA_CNTRL 0x%x offset 0x%x TPCRP 0x%x\n",
+ ctrl, dma_config->dma_cntrl, read_pointer);
+
+ return read_pointer;
+}
+
+static u8 dma_get_write_pointer(struct thc_device *dev,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 ctrl, write_pointer;
+
+ regmap_read(dev->thc_regmap, dma_config->dma_cntrl, &ctrl);
+ write_pointer = FIELD_GET(THC_M_PRT_READ_DMA_CNTRL_TPCWP, ctrl);
+
+ dev_dbg(dev->dev, "THC_M_PRT_READ_DMA_CNTRL 0x%x offset 0x%x TPCWP 0x%x\n",
+ ctrl, dma_config->dma_cntrl, write_pointer);
+
+ return write_pointer;
+}
+
+static void dma_set_write_pointer(struct thc_device *dev, u8 value,
+ struct thc_dma_configuration *dma_config)
+{
+ u32 ctrl, mask;
+
+ mask = THC_M_PRT_READ_DMA_CNTRL_TPCWP;
+ ctrl = FIELD_PREP(THC_M_PRT_READ_DMA_CNTRL_TPCWP, value);
+ regmap_write_bits(dev->thc_regmap, dma_config->dma_cntrl, mask, ctrl);
+}
+
+static size_t dma_get_max_packet_size(struct thc_device *dev,
+ struct thc_dma_configuration *dma_config)
+{
+ return dma_config->max_packet_size;
+}
+
+static void dma_set_max_packet_size(struct thc_device *dev, size_t size,
+ struct thc_dma_configuration *dma_config)
+{
+ if (size) {
+ dma_config->max_packet_size = ALIGN(size, SZ_4K);
+ dma_config->is_enabled = true;
+ }
+}
+
+static void thc_copy_one_sgl_to_prd(struct thc_device *dev,
+ struct thc_dma_configuration *config,
+ unsigned int ind)
+{
+ struct thc_prd_table *prd_tbl;
+ struct scatterlist *sg;
+ int j;
+
+ prd_tbl = &config->prd_tbls[ind];
+
+ for_each_sg(config->sgls[ind], sg, config->sgls_nent[ind], j) {
+ prd_tbl->entries[j].dest_addr =
+ sg_dma_address(sg) >> THC_ADDRESS_SHIFT;
+ prd_tbl->entries[j].len = sg_dma_len(sg);
+ prd_tbl->entries[j].hw_status = 0;
+ prd_tbl->entries[j].end_of_prd = 0;
+ }
+
+ /* Set the end_of_prd flag in the last filled entry */
+ if (j > 0)
+ prd_tbl->entries[j - 1].end_of_prd = 1;
+}
+
+static void thc_copy_sgls_to_prd(struct thc_device *dev,
+ struct thc_dma_configuration *config)
+{
+ unsigned int i;
+
+ memset(config->prd_tbls, 0, array_size(PRD_TABLE_SIZE, config->prd_tbl_num));
+
+ for (i = 0; i < config->prd_tbl_num; i++)
+ thc_copy_one_sgl_to_prd(dev, config, i);
+}
+
+static int setup_dma_buffers(struct thc_device *dev,
+ struct thc_dma_configuration *config,
+ enum dma_data_direction dir)
+{
+ size_t prd_tbls_size = array_size(PRD_TABLE_SIZE, config->prd_tbl_num);
+ unsigned int i, nent = PRD_ENTRIES_NUM;
+ dma_addr_t dma_handle;
+ void *cpu_addr;
+ size_t buf_sz;
+ int count;
+
+ if (!config->is_enabled)
+ return 0;
+
+ memset(config->sgls, 0, sizeof(config->sgls));
+ memset(config->sgls_nent, 0, sizeof(config->sgls_nent));
+
+ cpu_addr = dma_alloc_coherent(dev->dev, prd_tbls_size,
+ &dma_handle, GFP_KERNEL);
+ if (!cpu_addr)
+ return -ENOMEM;
+
+ config->prd_tbls = cpu_addr;
+ config->prd_tbls_dma_handle = dma_handle;
+
+ buf_sz = dma_get_max_packet_size(dev, config);
+
+ /* Allocate and map the scatter-gather lists, one for each PRD table */
+ for (i = 0; i < config->prd_tbl_num; i++) {
+ config->sgls[i] = sgl_alloc(buf_sz, GFP_KERNEL, &nent);
+ if (!config->sgls[i] || nent > PRD_ENTRIES_NUM) {
+ dev_err_once(dev->dev, "sgl_alloc (%uth) failed, nent %u\n",
+ i, nent);
+ return -ENOMEM;
+ }
+ count = dma_map_sg(dev->dev, config->sgls[i], nent, dir);
+
+ config->sgls_nent[i] = count;
+ }
+
+ thc_copy_sgls_to_prd(dev, config);
+
+ return 0;
+}
+
+static void thc_reset_dma_settings(struct thc_device *dev)
+{
+ /* Stop all DMA channels and reset DMA read pointers */
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_START, 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_START, 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_START, 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_WRITE_DMA_CNTRL_OFFSET,
+ THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START, 0);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR,
+ THC_M_PRT_READ_DMA_CNTRL_TPCPR);
+}
+
+static void release_dma_buffers(struct thc_device *dev,
+ struct thc_dma_configuration *config)
+{
+ size_t prd_tbls_size = array_size(PRD_TABLE_SIZE, config->prd_tbl_num);
+ unsigned int i;
+
+ if (!config->is_enabled)
+ return;
+
+ for (i = 0; i < config->prd_tbl_num; i++) {
+ if (!config->sgls[i] | !config->sgls_nent[i])
+ continue;
+
+ dma_unmap_sg(dev->dev, config->sgls[i],
+ config->sgls_nent[i],
+ config->dir);
+
+ sgl_free(config->sgls[i]);
+ config->sgls[i] = NULL;
+ }
+
+ memset(config->prd_tbls, 0, prd_tbls_size);
+
+ if (config->prd_tbls) {
+ dma_free_coherent(dev->dev, prd_tbls_size, config->prd_tbls,
+ config->prd_tbls_dma_handle);
+ config->prd_tbls = NULL;
+ config->prd_tbls_dma_handle = 0;
+ }
+}
+
+struct thc_dma_context *thc_dma_init(struct thc_device *dev)
+{
+ struct thc_dma_context *dma_ctx;
+
+ dma_ctx = devm_kzalloc(dev->dev, sizeof(*dma_ctx), GFP_KERNEL);
+ if (!dma_ctx)
+ return NULL;
+
+ dev->dma_ctx = dma_ctx;
+
+ dma_ctx->dma_config[THC_RXDMA1].dma_channel = THC_RXDMA1;
+ dma_ctx->dma_config[THC_RXDMA2].dma_channel = THC_RXDMA2;
+ dma_ctx->dma_config[THC_TXDMA].dma_channel = THC_TXDMA;
+ dma_ctx->dma_config[THC_SWDMA].dma_channel = THC_SWDMA;
+
+ dma_ctx->dma_config[THC_RXDMA1].dir = DMA_FROM_DEVICE;
+ dma_ctx->dma_config[THC_RXDMA2].dir = DMA_FROM_DEVICE;
+ dma_ctx->dma_config[THC_TXDMA].dir = DMA_TO_DEVICE;
+ dma_ctx->dma_config[THC_SWDMA].dir = DMA_FROM_DEVICE;
+
+ dma_ctx->dma_config[THC_RXDMA1].prd_tbl_num = PRD_TABLES_NUM;
+ dma_ctx->dma_config[THC_RXDMA2].prd_tbl_num = PRD_TABLES_NUM;
+ dma_ctx->dma_config[THC_TXDMA].prd_tbl_num = 1;
+ dma_ctx->dma_config[THC_SWDMA].prd_tbl_num = 1;
+
+ dma_ctx->dma_config[THC_RXDMA1].prd_base_addr_high = THC_M_PRT_RPRD_BA_HI_1_OFFSET;
+ dma_ctx->dma_config[THC_RXDMA2].prd_base_addr_high = THC_M_PRT_RPRD_BA_HI_2_OFFSET;
+ dma_ctx->dma_config[THC_TXDMA].prd_base_addr_high = THC_M_PRT_WPRD_BA_HI_OFFSET;
+ dma_ctx->dma_config[THC_SWDMA].prd_base_addr_high = THC_M_PRT_RPRD_BA_HI_SW_OFFSET;
+
+ dma_ctx->dma_config[THC_RXDMA1].prd_base_addr_low = THC_M_PRT_RPRD_BA_LOW_1_OFFSET;
+ dma_ctx->dma_config[THC_RXDMA2].prd_base_addr_low = THC_M_PRT_RPRD_BA_LOW_2_OFFSET;
+ dma_ctx->dma_config[THC_TXDMA].prd_base_addr_low = THC_M_PRT_WPRD_BA_LOW_OFFSET;
+ dma_ctx->dma_config[THC_SWDMA].prd_base_addr_low = THC_M_PRT_RPRD_BA_LOW_SW_OFFSET;
+
+ dma_ctx->dma_config[THC_RXDMA1].prd_cntrl = THC_M_PRT_RPRD_CNTRL_1_OFFSET;
+ dma_ctx->dma_config[THC_RXDMA2].prd_cntrl = THC_M_PRT_RPRD_CNTRL_2_OFFSET;
+ dma_ctx->dma_config[THC_TXDMA].prd_cntrl = THC_M_PRT_WRITE_DMA_CNTRL_OFFSET;
+ dma_ctx->dma_config[THC_SWDMA].prd_cntrl = THC_M_PRT_RPRD_CNTRL_SW_OFFSET;
+
+ dma_ctx->dma_config[THC_RXDMA1].dma_cntrl = THC_M_PRT_READ_DMA_CNTRL_1_OFFSET;
+ dma_ctx->dma_config[THC_RXDMA2].dma_cntrl = THC_M_PRT_READ_DMA_CNTRL_2_OFFSET;
+ dma_ctx->dma_config[THC_TXDMA].dma_cntrl = THC_M_PRT_WRITE_DMA_CNTRL_OFFSET;
+ dma_ctx->dma_config[THC_SWDMA].dma_cntrl = THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET;
+
+ /* Enable write DMA completion interrupt by default */
+ dma_ctx->use_write_interrupts = 1;
+
+ return dma_ctx;
+}
+
+/**
+ * thc_dma_set_max_packet_sizes - Set max packet sizes for all DMA engines
+ *
+ * @dev: The pointer of THC private device context
+ * @mps_read1: RxDMA1 max packet size
+ * @mps_read2: RxDMA2 max packet size
+ * @mps_write: TxDMA max packet size
+ * @mps_swdma: Software DMA max packet size
+ *
+ * If mps is not 0, it means the corresponding DMA channel is used, then set
+ * the flag to turn on this channel.
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_dma_set_max_packet_sizes(struct thc_device *dev, size_t mps_read1,
+ size_t mps_read2, size_t mps_write,
+ size_t mps_swdma)
+{
+ if (!dev->dma_ctx) {
+ dev_err_once(dev->dev,
+ "Cannot set max packet sizes because DMA context is NULL!\n");
+ return -EINVAL;
+ }
+
+ dma_set_max_packet_size(dev, mps_read1, &dev->dma_ctx->dma_config[THC_RXDMA1]);
+ dma_set_max_packet_size(dev, mps_read2, &dev->dma_ctx->dma_config[THC_RXDMA2]);
+ dma_set_max_packet_size(dev, mps_write, &dev->dma_ctx->dma_config[THC_TXDMA]);
+ dma_set_max_packet_size(dev, mps_swdma, &dev->dma_ctx->dma_config[THC_SWDMA]);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_dma_set_max_packet_sizes, "INTEL_THC");
+
+/**
+ * thc_dma_allocate - Allocate DMA buffers for all DMA engines
+ *
+ * @dev: The pointer of THC private device context
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_dma_allocate(struct thc_device *dev)
+{
+ int ret, chan;
+
+ for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) {
+ ret = setup_dma_buffers(dev, &dev->dma_ctx->dma_config[chan],
+ dev->dma_ctx->dma_config[chan].dir);
+ if (ret < 0) {
+ dev_err_once(dev->dev, "DMA setup failed for DMA channel %d\n", chan);
+ goto release_bufs;
+ }
+ }
+
+ return 0;
+
+release_bufs:
+ while (chan--)
+ release_dma_buffers(dev, &dev->dma_ctx->dma_config[chan]);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_dma_allocate, "INTEL_THC");
+
+/**
+ * thc_dma_release - Release DMA buffers for all DMA engines
+ *
+ * @dev: The pointer of THC private device context
+ */
+void thc_dma_release(struct thc_device *dev)
+{
+ int chan;
+
+ for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++)
+ release_dma_buffers(dev, &dev->dma_ctx->dma_config[chan]);
+}
+EXPORT_SYMBOL_NS_GPL(thc_dma_release, "INTEL_THC");
+
+static int calc_prd_entries_num(struct thc_prd_table *prd_tbl,
+ size_t mes_len, u8 *nent)
+{
+ *nent = DIV_ROUND_UP(mes_len, THC_MIN_BYTES_PER_SG_LIST_ENTRY);
+ if (*nent > PRD_ENTRIES_NUM)
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static size_t calc_message_len(struct thc_prd_table *prd_tbl, u8 *nent)
+{
+ size_t mes_len = 0;
+ unsigned int j;
+
+ for (j = 0; j < PRD_ENTRIES_NUM; j++) {
+ mes_len += prd_tbl->entries[j].len;
+ if (prd_tbl->entries[j].end_of_prd)
+ break;
+ }
+
+ *nent = j + 1;
+
+ return mes_len;
+}
+
+/**
+ * thc_dma_configure - Configure DMA settings for all DMA engines
+ *
+ * @dev: The pointer of THC private device context
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_dma_configure(struct thc_device *dev)
+{
+ struct thc_dma_context *dma_ctx = dev->dma_ctx;
+ int chan;
+
+ thc_reset_dma_settings(dev);
+
+ if (!dma_ctx) {
+ dev_err_once(dev->dev, "Cannot do DMA configure because DMA context is NULL\n");
+ return -EINVAL;
+ }
+
+ for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) {
+ dma_set_prd_base_addr(dev,
+ dma_ctx->dma_config[chan].prd_tbls_dma_handle,
+ &dma_ctx->dma_config[chan]);
+
+ dma_set_prd_control(dev, PRD_ENTRIES_NUM - 1,
+ dma_ctx->dma_config[chan].prd_tbl_num - 1,
+ &dma_ctx->dma_config[chan]);
+ }
+
+ /* Start read2 DMA engine */
+ dma_set_start_bit(dev, &dma_ctx->dma_config[THC_RXDMA2]);
+
+ dev_dbg(dev->dev, "DMA configured successfully!\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(thc_dma_configure, "INTEL_THC");
+
+/**
+ * thc_dma_unconfigure - Unconfigure DMA settings for all DMA engines
+ *
+ * @dev: The pointer of THC private device context
+ */
+void thc_dma_unconfigure(struct thc_device *dev)
+{
+ int chan;
+
+ for (chan = 0; chan < MAX_THC_DMA_CHANNEL; chan++) {
+ dma_set_prd_base_addr(dev, 0, &dev->dma_ctx->dma_config[chan]);
+ dma_clear_prd_control(dev, &dev->dma_ctx->dma_config[chan]);
+ }
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_1_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_START, 0);
+
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_READ_DMA_CNTRL_2_OFFSET,
+ THC_M_PRT_READ_DMA_CNTRL_START, 0);
+}
+EXPORT_SYMBOL_NS_GPL(thc_dma_unconfigure, "INTEL_THC");
+
+static int thc_wait_for_dma_pause(struct thc_device *dev, enum thc_dma_channel channel)
+{
+ u32 ctrl_reg, sts_reg, sts;
+ int ret;
+
+ ctrl_reg = (channel == THC_RXDMA1) ? THC_M_PRT_READ_DMA_CNTRL_1_OFFSET :
+ ((channel == THC_RXDMA2) ? THC_M_PRT_READ_DMA_CNTRL_2_OFFSET :
+ THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET);
+
+ regmap_write_bits(dev->thc_regmap, ctrl_reg, THC_M_PRT_READ_DMA_CNTRL_START, 0);
+
+ sts_reg = (channel == THC_RXDMA1) ? THC_M_PRT_READ_DMA_INT_STS_1_OFFSET :
+ ((channel == THC_RXDMA2) ? THC_M_PRT_READ_DMA_INT_STS_2_OFFSET :
+ THC_M_PRT_READ_DMA_INT_STS_SW_OFFSET);
+
+ ret = regmap_read_poll_timeout(dev->thc_regmap, sts_reg, sts,
+ !(sts & THC_M_PRT_READ_DMA_INT_STS_ACTIVE),
+ THC_DEFAULT_RXDMA_POLLING_US_INTERVAL,
+ THC_DEFAULT_RXDMA_POLLING_US_TIMEOUT);
+
+ if (ret) {
+ dev_err_once(dev->dev,
+ "Timeout while waiting for DMA %d stop\n", channel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int read_dma_buffer(struct thc_device *dev,
+ struct thc_dma_configuration *read_config,
+ u8 prd_table_index, void *read_buff)
+{
+ struct thc_prd_table *prd_tbl;
+ struct scatterlist *sg;
+ size_t mes_len, ret;
+ u8 nent;
+
+ if (prd_table_index >= read_config->prd_tbl_num) {
+ dev_err_once(dev->dev, "PRD table index %d too big\n", prd_table_index);
+ return -EINVAL;
+ }
+
+ prd_tbl = &read_config->prd_tbls[prd_table_index];
+ mes_len = calc_message_len(prd_tbl, &nent);
+ if (mes_len > read_config->max_packet_size) {
+ dev_err(dev->dev,
+ "Message length %zu is bigger than buffer length %lu\n",
+ mes_len, read_config->max_packet_size);
+ return -EMSGSIZE;
+ }
+
+ sg = read_config->sgls[prd_table_index];
+ ret = sg_copy_to_buffer(sg, nent, read_buff, mes_len);
+ if (ret != mes_len) {
+ dev_err_once(dev->dev, "Copied %zu bytes instead of requested %zu\n",
+ ret, mes_len);
+ return -EIO;
+ }
+
+ return mes_len;
+}
+
+static void update_write_pointer(struct thc_device *dev,
+ struct thc_dma_configuration *read_config)
+{
+ u8 write_ptr = dma_get_write_pointer(dev, read_config);
+
+ if (write_ptr + 1 == THC_WRAPAROUND_VALUE_ODD)
+ dma_set_write_pointer(dev, THC_POINTER_WRAPAROUND, read_config);
+ else if (write_ptr + 1 == THC_WRAPAROUND_VALUE_EVEN)
+ dma_set_write_pointer(dev, 0, read_config);
+ else
+ dma_set_write_pointer(dev, write_ptr + 1, read_config);
+}
+
+static int is_dma_buf_empty(struct thc_device *dev,
+ struct thc_dma_configuration *read_config,
+ u8 *read_ptr, u8 *write_ptr)
+{
+ *read_ptr = dma_get_read_pointer(dev, read_config);
+ *write_ptr = dma_get_write_pointer(dev, read_config);
+
+ if ((*read_ptr & THC_POINTER_MASK) == (*write_ptr & THC_POINTER_MASK))
+ if (*read_ptr != *write_ptr)
+ return true;
+
+ return false;
+}
+
+static int thc_dma_read(struct thc_device *dev,
+ struct thc_dma_configuration *read_config,
+ void *read_buff, size_t *read_len, int *read_finished)
+{
+ u8 read_ptr, write_ptr, prd_table_index;
+ int status;
+
+ if (!is_dma_buf_empty(dev, read_config, &read_ptr, &write_ptr)) {
+ prd_table_index = write_ptr & THC_POINTER_MASK;
+
+ status = read_dma_buffer(dev, read_config, prd_table_index, read_buff);
+ if (status <= 0) {
+ dev_err_once(dev->dev, "read DMA buffer failed %d\n", status);
+ return -EIO;
+ }
+
+ *read_len = status;
+
+ /* Clear the relevant PRD table */
+ thc_copy_one_sgl_to_prd(dev, read_config, prd_table_index);
+
+ /* Increment the write pointer to let the HW know we have processed this PRD */
+ update_write_pointer(dev, read_config);
+ }
+
+ /*
+ * This function only reads one frame from PRD table for each call, so we need to
+ * check if all DMAed data is read out and return the flag to the caller. Caller
+ * should repeatedly call thc_dma_read() until all DMAed data is handled.
+ */
+ if (read_finished)
+ *read_finished = is_dma_buf_empty(dev, read_config, &read_ptr, &write_ptr) ? 1 : 0;
+
+ return 0;
+}
+
+/**
+ * thc_rxdma_read - Read data from RXDMA buffer
+ *
+ * @dev: The pointer of THC private device context
+ * @dma_channel: The RXDMA engine of read data source
+ * @read_buff: The pointer of the read data buffer
+ * @read_len: The pointer of the read data length
+ * @read_finished: The pointer of the flag indicating if all pending data has been read out
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_rxdma_read(struct thc_device *dev, enum thc_dma_channel dma_channel,
+ void *read_buff, size_t *read_len, int *read_finished)
+{
+ struct thc_dma_configuration *dma_config;
+ int ret;
+
+ dma_config = &dev->dma_ctx->dma_config[dma_channel];
+
+ if (!dma_config->is_enabled) {
+ dev_err_once(dev->dev, "The DMA channel %d is not enabled", dma_channel);
+ return -EINVAL;
+ }
+
+ if (!read_buff || !read_len) {
+ dev_err(dev->dev, "Invalid input parameters, read_buff %p, read_len %p\n",
+ read_buff, read_len);
+ return -EINVAL;
+ }
+
+ if (dma_channel >= THC_TXDMA) {
+ dev_err(dev->dev, "Unsupported DMA channel for RxDMA read, %d\n", dma_channel);
+ return -EINVAL;
+ }
+
+ ret = thc_dma_read(dev, dma_config, read_buff, read_len, read_finished);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_rxdma_read, "INTEL_THC");
+
+static int thc_swdma_read_start(struct thc_device *dev, void *write_buff,
+ size_t write_len, u32 *prd_tbl_len)
+{
+ u32 mask, val, data0 = 0, data1 = 0;
+ int ret;
+
+ ret = thc_interrupt_quiesce(dev, true);
+ if (ret)
+ return ret;
+
+ if (thc_wait_for_dma_pause(dev, THC_RXDMA1) || thc_wait_for_dma_pause(dev, THC_RXDMA2))
+ return -EIO;
+
+ thc_reset_dma_settings(dev);
+
+ mask = THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_WBC |
+ THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_RX_DLEN_EN;
+ val = FIELD_PREP(THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_WBC, write_len) |
+ ((!prd_tbl_len) ? THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_RX_DLEN_EN : 0);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_RPRD_CNTRL_SW_OFFSET,
+ mask, val);
+
+ if (prd_tbl_len) {
+ mask = THC_M_PRT_SW_DMA_PRD_TABLE_LEN_THC_M_PRT_SW_DMA_PRD_TABLE_LEN;
+ val = FIELD_PREP(THC_M_PRT_SW_DMA_PRD_TABLE_LEN_THC_M_PRT_SW_DMA_PRD_TABLE_LEN,
+ *prd_tbl_len);
+ regmap_write_bits(dev->thc_regmap, THC_M_PRT_SW_DMA_PRD_TABLE_LEN_OFFSET,
+ mask, val);
+ }
+
+ if (write_len <= sizeof(u32)) {
+ for (int i = 0; i < write_len; i++)
+ data0 |= *(((u8 *)write_buff) + i) << (i * 8);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA0_ADDR_OFFSET, data0);
+ } else if (write_len <= 2 * sizeof(u32)) {
+ data0 = *(u32 *)write_buff;
+ regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA0_ADDR_OFFSET, data0);
+
+ for (int i = 0; i < write_len - sizeof(u32); i++)
+ data1 |= *(((u8 *)write_buff) + sizeof(u32) + i) << (i * 8);
+
+ regmap_write(dev->thc_regmap, THC_M_PRT_SW_SEQ_DATA1_OFFSET, data1);
+ }
+ dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_SWDMA]);
+
+ return 0;
+}
+
+static int thc_swdma_read_completion(struct thc_device *dev)
+{
+ int ret;
+
+ ret = thc_wait_for_dma_pause(dev, THC_SWDMA);
+ if (ret)
+ return ret;
+
+ thc_reset_dma_settings(dev);
+
+ dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_RXDMA2]);
+
+ ret = thc_interrupt_quiesce(dev, false);
+
+ return ret;
+}
+
+/**
+ * thc_swdma_read - Use software DMA to read data from touch device
+ *
+ * @dev: The pointer of THC private device context
+ * @write_buff: The pointer of write buffer for SWDMA sequence
+ * @write_len: The write data length for SWDMA sequence
+ * @prd_tbl_len: The prd table length of SWDMA engine, can be set to NULL
+ * @read_buff: The pointer of the read data buffer
+ * @read_len: The pointer of the read data length
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_swdma_read(struct thc_device *dev, void *write_buff, size_t write_len,
+ u32 *prd_tbl_len, void *read_buff, size_t *read_len)
+{
+ int ret;
+
+ if (!(&dev->dma_ctx->dma_config[THC_SWDMA])->is_enabled) {
+ dev_err_once(dev->dev, "The SWDMA channel is not enabled");
+ return -EINVAL;
+ }
+
+ if (!read_buff || !read_len) {
+ dev_err(dev->dev, "Invalid input parameters, read_buff %p, read_len %p\n",
+ read_buff, read_len);
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ dev->swdma_done = false;
+
+ ret = thc_swdma_read_start(dev, write_buff, write_len, prd_tbl_len);
+ if (ret)
+ goto end;
+
+ ret = wait_event_interruptible_timeout(dev->swdma_complete_wait, dev->swdma_done, 1 * HZ);
+ if (ret <= 0 || !dev->swdma_done) {
+ dev_err_once(dev->dev, "timeout for waiting SWDMA completion\n");
+ ret = -ETIMEDOUT;
+ goto end;
+ }
+
+ ret = thc_dma_read(dev, &dev->dma_ctx->dma_config[THC_SWDMA], read_buff, read_len, NULL);
+ if (ret)
+ goto end;
+
+ ret = thc_swdma_read_completion(dev);
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_swdma_read, "INTEL_THC");
+
+static int write_dma_buffer(struct thc_device *dev,
+ void *buffer, size_t buf_len)
+{
+ struct thc_dma_configuration *write_config = &dev->dma_ctx->dma_config[THC_TXDMA];
+ struct thc_prd_table *prd_tbl;
+ struct scatterlist *sg;
+ unsigned long len_left;
+ size_t ret;
+ u8 nent;
+ int i;
+
+ /* There is only one PRD table for write */
+ prd_tbl = &write_config->prd_tbls[0];
+
+ if (calc_prd_entries_num(prd_tbl, buf_len, &nent) < 0) {
+ dev_err(dev->dev, "Tx message length too big (%zu)\n", buf_len);
+ return -EOVERFLOW;
+ }
+
+ sg = write_config->sgls[0];
+ ret = sg_copy_from_buffer(sg, nent, buffer, buf_len);
+ if (ret != buf_len) {
+ dev_err_once(dev->dev, "Copied %zu bytes instead of requested %zu\n",
+ ret, buf_len);
+ return -EIO;
+ }
+
+ prd_tbl = &write_config->prd_tbls[0];
+ len_left = buf_len;
+
+ for_each_sg(write_config->sgls[0], sg, write_config->sgls_nent[0], i) {
+ if (sg_dma_address(sg) == 0 || sg_dma_len(sg) == 0) {
+ dev_err_once(dev->dev, "SGList: zero address or length\n");
+ return -EINVAL;
+ }
+
+ prd_tbl->entries[i].dest_addr =
+ sg_dma_address(sg) >> THC_ADDRESS_SHIFT;
+
+ if (len_left < sg_dma_len(sg)) {
+ prd_tbl->entries[i].len = len_left;
+ prd_tbl->entries[i].end_of_prd = 1;
+ break;
+ }
+
+ prd_tbl->entries[i].len = sg_dma_len(sg);
+ prd_tbl->entries[i].end_of_prd = 0;
+
+ len_left -= sg_dma_len(sg);
+ }
+
+ dma_set_prd_control(dev, i, 0, write_config);
+
+ return 0;
+}
+
+static void thc_ensure_performance_limitations(struct thc_device *dev)
+{
+ unsigned long delay_usec = 0;
+ /*
+ * Minimum amount of delay the THC / QUICKSPI driver must wait
+ * between end of write operation and begin of read operation.
+ * This value shall be in 10us multiples.
+ */
+ if (dev->perf_limit > 0) {
+ delay_usec = dev->perf_limit * 10;
+ udelay(delay_usec);
+ }
+}
+
+static void thc_dma_write_completion(struct thc_device *dev)
+{
+ thc_ensure_performance_limitations(dev);
+}
+
+/**
+ * thc_dma_write - Use TXDMA to write data to touch device
+ *
+ * @dev: The pointer of THC private device context
+ * @buffer: The pointer of write data buffer
+ * @buf_len: The write data length
+ *
+ * Return: 0 on success, other error codes on failed.
+ */
+int thc_dma_write(struct thc_device *dev, void *buffer, size_t buf_len)
+{
+ bool restore_interrupts = false;
+ u32 sts, ctrl;
+ int ret;
+
+ if (!(&dev->dma_ctx->dma_config[THC_TXDMA])->is_enabled) {
+ dev_err_once(dev->dev, "The TxDMA channel is not enabled\n");
+ return -EINVAL;
+ }
+
+ if (!buffer || buf_len <= 0) {
+ dev_err(dev->dev, "Invalid input parameters, buffer %p\n, buf_len %zu\n",
+ buffer, buf_len);
+ return -EINVAL;
+ }
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_WRITE_INT_STS_OFFSET, &sts);
+ if (sts & THC_M_PRT_WRITE_INT_STS_THC_WRDMA_ACTIVE) {
+ dev_err_once(dev->dev, "THC TxDMA is till active and can't start again\n");
+ return -EBUSY;
+ }
+
+ if (mutex_lock_interruptible(&dev->thc_bus_lock))
+ return -EINTR;
+
+ regmap_read(dev->thc_regmap, THC_M_PRT_CONTROL_OFFSET, &ctrl);
+
+ ret = write_dma_buffer(dev, buffer, buf_len);
+ if (ret)
+ goto end;
+
+ if (dev->perf_limit && !(ctrl & THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_HW_STS)) {
+ ret = thc_interrupt_quiesce(dev, true);
+ if (ret)
+ goto end;
+
+ restore_interrupts = true;
+ }
+
+ dev->write_done = false;
+
+ dma_set_start_bit(dev, &dev->dma_ctx->dma_config[THC_TXDMA]);
+
+ ret = wait_event_interruptible_timeout(dev->write_complete_wait, dev->write_done, 1 * HZ);
+ if (ret <= 0 || !dev->write_done) {
+ dev_err_once(dev->dev, "timeout for waiting TxDMA completion\n");
+ ret = -ETIMEDOUT;
+ goto end;
+ }
+
+ thc_dma_write_completion(dev);
+ mutex_unlock(&dev->thc_bus_lock);
+ return 0;
+
+end:
+ mutex_unlock(&dev->thc_bus_lock);
+
+ if (restore_interrupts)
+ ret = thc_interrupt_quiesce(dev, false);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(thc_dma_write, "INTEL_THC");
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h
new file mode 100644
index 000000000000..ca923ff2bef9
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _INTEL_THC_DMA_H_
+#define _INTEL_THC_DMA_H_
+
+#include <linux/bits.h>
+#include <linux/dma-mapping.h>
+#include <linux/sizes.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+
+#define THC_POINTER_MASK GENMASK(6, 0)
+#define THC_POINTER_WRAPAROUND 0x80
+#define THC_WRAPAROUND_VALUE_ODD 0x10
+#define THC_WRAPAROUND_VALUE_EVEN 0x90
+#define THC_MIN_BYTES_PER_SG_LIST_ENTRY SZ_4K
+
+#define THC_DEFAULT_RXDMA_POLLING_US_INTERVAL 100
+#define THC_DEFAULT_RXDMA_POLLING_US_TIMEOUT (10 * USEC_PER_MSEC)
+
+/*
+ * THC needs 1KB aligned address, dest_addr is 54 bits, not 64,
+ * so don't need to send the lower 10-bits of address.
+ */
+#define THC_ADDRESS_SHIFT 10
+
+/**
+ * THC DMA channels:
+ * @THC_RXDMA1: legacy channel, reserved for raw data reading
+ * @THC_RXDMA2: DMA to read HID data from touch device
+ * @THC_TXDMA: DMA to write to touch device
+ * @THC_SWDMA: SW triggered DMA to write and read from touch device
+ */
+enum thc_dma_channel {
+ THC_RXDMA1 = 0,
+ THC_RXDMA2 = 1,
+ THC_TXDMA = 2,
+ THC_SWDMA = 3,
+ MAX_THC_DMA_CHANNEL
+};
+
+/**
+ * THC DMA Physical Memory Descriptor (PRD)
+ * @dest_addr: bit[53:0], destination address in system memory
+ * @int_on_completion: bit[63], if set, thc will trigger interrupt to driver
+ * @len: bit[87:64], length of this entry
+ * @end_of_prd: bit[88], if set, this entry is last one of current PRD table
+ * @hw_status: bit[90:89], hw status bits
+ */
+struct thc_prd_entry {
+ u64 dest_addr : 54;
+ u64 reserved1 : 9;
+ u64 int_on_completion : 1;
+ u64 len : 24;
+ u64 end_of_prd : 1;
+ u64 hw_status : 2;
+ u64 reserved2 : 37;
+};
+
+/*
+ * Max OS memory fragmentation will be at a 4KB boundary, thus to address 1MB
+ * of virtually contiguous memory 256 PRD entries are required for a single
+ * PRD Table. SW writes the number of PRD Entries for each PRD table in the
+ * THC_M_PRT_RPRD_CNTRL.PTEC register field. The PRD entry's length must be
+ * multiple of 4KB except for the last entry in a PRD table.
+ * This is the max possible number of etries supported by HW, in practise we
+ * there will be less entries in each prd table(the actual number will be
+ * given by scatter-gather list allocation).
+ */
+#define PRD_ENTRIES_NUM 16
+
+/*
+ * Number of PRD tables equals to number of data buffers.
+ * The max number of PRD tables supported by the HW is 128,
+ * but we allocate only 16.
+ */
+#define PRD_TABLES_NUM 16
+
+/* THC DMA Physical Memory Descriptor Table */
+struct thc_prd_table {
+ struct thc_prd_entry entries[PRD_ENTRIES_NUM];
+};
+
+#define PRD_TABLE_SIZE sizeof(struct thc_prd_table)
+
+/**
+ * struct thc_dma_configuration - THC DMA configure
+ * @dma_channel: DMA channel for current DMA configuration
+ * @prd_tbls_dma_handle: DMA buffer handle
+ * @dir: direction of DMA for this config
+ * @prd_tbls: PRD tables for current DMA
+ * @sgls: array of pointers to scatter-gather lists
+ * @sgls_nent: actual number of entries per sg list
+ * @prd_tbl_num: actual number of PRD tables
+ * @max_packet_size: size of the buffer needed for 1 DMA message (1 PRD table)
+ * @prd_base_addr_high: High 32bits memory address where stores PRD table
+ * @prd_base_addr_low: low 32bits memory address where stores PRD table
+ * @prd_cntrl: PRD control register value
+ * @dma_cntrl: DMA control register value
+ */
+struct thc_dma_configuration {
+ enum thc_dma_channel dma_channel;
+ dma_addr_t prd_tbls_dma_handle;
+ enum dma_data_direction dir;
+ bool is_enabled;
+
+ struct thc_prd_table *prd_tbls;
+ struct scatterlist *sgls[PRD_TABLES_NUM];
+ u8 sgls_nent[PRD_TABLES_NUM];
+ u8 prd_tbl_num;
+
+ size_t max_packet_size;
+ u32 prd_base_addr_high;
+ u32 prd_base_addr_low;
+ u32 prd_cntrl;
+ u32 dma_cntrl;
+};
+
+/*
+ * THC DMA context
+ * Store all THC Channel configures
+ */
+struct thc_dma_context {
+ struct thc_dma_configuration dma_config[MAX_THC_DMA_CHANNEL];
+ u8 use_write_interrupts;
+};
+
+struct thc_device;
+
+int thc_dma_set_max_packet_sizes(struct thc_device *dev,
+ size_t mps_read1, size_t mps_read2,
+ size_t mps_write, size_t mps_swdma);
+int thc_dma_allocate(struct thc_device *dev);
+int thc_dma_configure(struct thc_device *dev);
+void thc_dma_unconfigure(struct thc_device *dev);
+void thc_dma_release(struct thc_device *dev);
+int thc_rxdma_read(struct thc_device *dev, enum thc_dma_channel dma_channel,
+ void *read_buff, size_t *read_len, int *read_finished);
+int thc_swdma_read(struct thc_device *dev, void *write_buff, size_t write_len,
+ u32 *prd_tbl_len, void *read_buff, size_t *read_len);
+int thc_dma_write(struct thc_device *dev, void *buffer, size_t buf_len);
+
+struct thc_dma_context *thc_dma_init(struct thc_device *dev);
+
+#endif /* _INTEL_THC_DMA_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h
new file mode 100644
index 000000000000..6729c4c25dab
--- /dev/null
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-hw.h
@@ -0,0 +1,881 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Intel Corporation */
+
+#ifndef _INTEL_THC_HW_H_
+#define _INTEL_THC_HW_H_
+
+#include <linux/bits.h>
+
+/* THC registers offset */
+/* Touch Host Controller Control Register */
+#define THC_M_PRT_CONTROL_OFFSET 0x1008
+/* THC SPI Bus Configuration Register */
+#define THC_M_PRT_SPI_CFG_OFFSET 0x1010
+/* THC SPI Bus Read Opcode Register */
+#define THC_M_PRT_SPI_ICRRD_OPCODE_OFFSET 0x1014
+/* THC SPI Bus Read Opcode Register */
+#define THC_M_PRT_SPI_DMARD_OPCODE_OFFSET 0x1018
+/* THC SPI Bus Write Opcode Register */
+#define THC_M_PRT_SPI_WR_OPCODE_OFFSET 0x101C
+/* THC Interrupt Enable Register */
+#define THC_M_PRT_INT_EN_OFFSET 0x1020
+/* THC Interrupt Status Register */
+#define THC_M_PRT_INT_STATUS_OFFSET 0x1024
+/* THC Error Cause Register */
+#define THC_M_PRT_ERR_CAUSE_OFFSET 0x1028
+/* THC SW sequencing Control */
+#define THC_M_PRT_SW_SEQ_CNTRL_OFFSET 0x1040
+/* THC SW sequencing Status */
+#define THC_M_PRT_SW_SEQ_STS_OFFSET 0x1044
+/* THC SW Sequencing Data DW0 or SPI Address Register */
+#define THC_M_PRT_SW_SEQ_DATA0_ADDR_OFFSET 0x1048
+/* THC SW sequencing Data DW1 */
+#define THC_M_PRT_SW_SEQ_DATA1_OFFSET 0x104C
+/* THC SW sequencing Data DW2 */
+#define THC_M_PRT_SW_SEQ_DATA2_OFFSET 0x1050
+/* THC SW sequencing Data DW3 */
+#define THC_M_PRT_SW_SEQ_DATA3_OFFSET 0x1054
+/* THC SW sequencing Data DW4 */
+#define THC_M_PRT_SW_SEQ_DATA4_OFFSET 0x1058
+/* THC SW sequencing Data DW5 */
+#define THC_M_PRT_SW_SEQ_DATA5_OFFSET 0x105C
+/* THC SW sequencing Data DW6 */
+#define THC_M_PRT_SW_SEQ_DATA6_OFFSET 0x1060
+/* THC SW sequencing Data DW7 */
+#define THC_M_PRT_SW_SEQ_DATA7_OFFSET 0x1064
+/* THC SW sequencing Data DW8 */
+#define THC_M_PRT_SW_SEQ_DATA8_OFFSET 0x1068
+/* THC SW sequencing Data DW9 */
+#define THC_M_PRT_SW_SEQ_DATA9_OFFSET 0x106C
+/* THC SW sequencing Data DW10 */
+#define THC_M_PRT_SW_SEQ_DATA10_OFFSET 0x1070
+/* THC SW sequencing Data DW11 */
+#define THC_M_PRT_SW_SEQ_DATA11_OFFSET 0x1074
+/* THC SW sequencing Data DW12 */
+#define THC_M_PRT_SW_SEQ_DATA12_OFFSET 0x1078
+/* THC SW sequencing Data DW13 */
+#define THC_M_PRT_SW_SEQ_DATA13_OFFSET 0x107C
+/* THC SW sequencing Data DW14 */
+#define THC_M_PRT_SW_SEQ_DATA14_OFFSET 0x1080
+/* THC SW sequencing Data DW15 */
+#define THC_M_PRT_SW_SEQ_DATA15_OFFSET 0x1084
+/* THC SW sequencing Data DW16 */
+#define THC_M_PRT_SW_SEQ_DATA16_OFFSET 0x1088
+/* THC Write PRD Base Address Register Low */
+#define THC_M_PRT_WPRD_BA_LOW_OFFSET 0x1090
+/* THC Write PRD Base Address Register High */
+#define THC_M_PRT_WPRD_BA_HI_OFFSET 0x1094
+/* THC Write DMA Control */
+#define THC_M_PRT_WRITE_DMA_CNTRL_OFFSET 0x1098
+/* THC Write Interrupt Status */
+#define THC_M_PRT_WRITE_INT_STS_OFFSET 0x109C
+/* THC Write DMA Error Register */
+#define THC_M_PRT_WRITE_DMA_ERR_OFFSET 0x10A0
+/* THC device address for the bulk write */
+#define THC_M_PRT_WR_BULK_ADDR_OFFSET 0x10B4
+/* THC Device Interrupt Cause Register Address */
+#define THC_M_PRT_DEV_INT_CAUSE_ADDR_OFFSET 0x10B8
+/* THC Device Interrupt Cause Register Value */
+#define THC_M_PRT_DEV_INT_CAUSE_REG_VAL_OFFSET 0x10BC
+/* THC TXDMA Frame Count */
+#define THC_M_PRT_TX_FRM_CNT_OFFSET 0x10E0
+/* THC TXDMA Packet Count */
+#define THC_M_PRT_TXDMA_PKT_CNT_OFFSET 0x10E4
+/* THC Device Interrupt Count on this port */
+#define THC_M_PRT_DEVINT_CNT_OFFSET 0x10E8
+/* Touch Device Interrupt Cause register Format Configuration Register 1 */
+#define THC_M_PRT_DEVINT_CFG_1_OFFSET 0x10EC
+/* Touch Device Interrupt Cause register Format Configuration Register 2 */
+#define THC_M_PRT_DEVINT_CFG_2_OFFSET 0x10F0
+/* THC Read PRD Base Address Low for the 1st RXDMA */
+#define THC_M_PRT_RPRD_BA_LOW_1_OFFSET 0x1100
+/* THC Read PRD Base Address High for the 1st RXDMA */
+#define THC_M_PRT_RPRD_BA_HI_1_OFFSET 0x1104
+/* THC Read PRD Control for the 1st RXDMA */
+#define THC_M_PRT_RPRD_CNTRL_1_OFFSET 0x1108
+/* THC Read DMA Control for the 1st RXDMA */
+#define THC_M_PRT_READ_DMA_CNTRL_1_OFFSET 0x110C
+/* THC Read Interrupt Status for the 1st RXDMA */
+#define THC_M_PRT_READ_DMA_INT_STS_1_OFFSET 0x1110
+/* THC Read DMA Error Register for the 1st RXDMA */
+#define THC_M_PRT_READ_DMA_ERR_1_OFFSET 0x1114
+/* Touch Sequencer GuC Tail Offset Address Low for the 1st RXDMA */
+#define THC_M_PRT_GUC_OFFSET_LOW_1_OFFSET 0x1118
+/* Touch Sequencer GuC Tail Offset Address High for the 1st RXDMA */
+#define THC_M_PRT_GUC_OFFSET_HI_1_OFFSET 0x111C
+/* Touch Host Controller GuC Work Queue Item Size for the 1st RXDMA */
+#define THC_M_PRT_GUC_WORKQ_ITEM_SZ_1_OFFSET 0x1120
+/* Touch Host Controller GuC Control register for the 1st RXDMA */
+#define THC_M_PRT_GUC_WORKQ_SZ_1_OFFSET 0x1124
+/* Touch Sequencer Control for the 1st DMA */
+#define THC_M_PRT_TSEQ_CNTRL_1_OFFSET 0x1128
+/* Touch Sequencer GuC Doorbell Address Low for the 1st RXDMA */
+#define THC_M_PRT_GUC_DB_ADDR_LOW_1_OFFSET 0x1130
+/* Touch Sequencer GuC Doorbell Address High for the 1st RXDMA */
+#define THC_M_PRT_GUC_DB_ADDR_HI_1_OFFSET 0x1134
+/* Touch Sequencer GuC Doorbell Data */
+#define THC_M_PRT_GUC_DB_DATA_1_OFFSET 0x1138
+/* Touch Sequencer GuC Tail Offset Initial Value for the 1st RXDMA */
+#define THC_M_PRT_GUC_OFFSET_INITVAL_1_OFFSET 0x1140
+/* THC Device Address for the bulk/touch data read for the 1st RXDMA */
+#define THC_M_PRT_RD_BULK_ADDR_1_OFFSET 0x1170
+/* THC Gfx/SW Doorbell Count from the 1st Stream RXDMA on this port */
+#define THC_M_PRT_DB_CNT_1_OFFSET 0x11A0
+/* THC Frame Count from the 1st Stream RXDMA on this port */
+#define THC_M_PRT_FRM_CNT_1_OFFSET 0x11A4
+/* THC Micro Frame Count from the 1st Stream RXDMA on this port */
+#define THC_M_PRT_UFRM_CNT_1_OFFSET 0x11A8
+/* THC Packet Count from the 1st Stream RXDMA on this port */
+#define THC_M_PRT_RXDMA_PKT_CNT_1_OFFSET 0x11AC
+/*
+ * THC Software Interrupt Count from the 1st Stream RXDMA
+ * on this port
+ */
+#define THC_M_PRT_SWINT_CNT_1_OFFSET 0x11B0
+/* Touch Sequencer Frame Drop Counter for the 1st RXDMA */
+#define THC_M_PRT_FRAME_DROP_CNT_1_OFFSET 0x11B4
+/* THC Coaescing 1 */
+#define THC_M_PRT_COALESCE_1_OFFSET 0x11B8
+/* THC Read PRD Base Address Low for the 2nd RXDMA */
+#define THC_M_PRT_RPRD_BA_LOW_2_OFFSET 0x1200
+/* THC Read PRD Base Address High for the 2nd RXDMA */
+#define THC_M_PRT_RPRD_BA_HI_2_OFFSET 0x1204
+/* THC Read PRD Control for the 2nd RXDMA */
+#define THC_M_PRT_RPRD_CNTRL_2_OFFSET 0x1208
+/* THC Read DMA Control for the 2nd RXDMA */
+#define THC_M_PRT_READ_DMA_CNTRL_2_OFFSET 0x120C
+/* THC Read Interrupt Status for the 2nd RXDMA */
+#define THC_M_PRT_READ_DMA_INT_STS_2_OFFSET 0x1210
+/* THC Read DMA Error Register for the 2nd RXDMA */
+#define THC_M_PRT_READ_DMA_ERR_2_OFFSET 0x1214
+/* Touch Sequencer GuC Tail Offset Address Low for the 2nd RXDMA */
+#define THC_M_PRT_GUC_OFFSET_LOW_2_OFFSET 0x1218
+/* Touch Sequencer GuC Tail Offset Address High for the 2nd RXDMA */
+#define THC_M_PRT_GUC_OFFSET_HI_2_OFFSET 0x121C
+/* Touch Host Controller GuC Work Queue Item Size for the 2nd RXDMA */
+#define THC_M_PRT_GUC_WORKQ_ITEM_SZ_2_OFFSET 0x1220
+/* Touch Host Controller GuC Control register for the 2nd RXDMA */
+#define THC_M_PRT_GUC_WORKQ_SZ_2_OFFSET 0x1224
+/* Touch Sequencer Control for the 2nd DMA */
+#define THC_M_PRT_TSEQ_CNTRL_2_OFFSET 0x1228
+/* Touch Sequencer GuC Doorbell Address Low for the 2nd RXDMA */
+#define THC_M_PRT_GUC_DB_ADDR_LOW_2_OFFSET 0x1230
+/* Touch Sequencer GuC Doorbell Address High for the 2nd RXDMA */
+#define THC_M_PRT_GUC_DB_ADDR_HI_2_OFFSET 0x1234
+/* Touch Sequencer GuC Doorbell Data for PRD2 */
+#define THC_M_PRT_GUC_DB_DATA_2_OFFSET 0x1238
+/* Touch Sequencer GuC Tail Offset Initial Value for the 2nd RXDMA */
+#define THC_M_PRT_GUC_OFFSET_INITVAL_2_OFFSET 0x1240
+/* THC Device Address for the bulk/touch data read for the 2nd RXDMA */
+#define THC_M_PRT_RD_BULK_ADDR_2_OFFSET 0x1270
+/* THC Gfx/SW Doorbell Count from the 2nd Stream RXDMA on this port */
+#define THC_M_PRT_DB_CNT_2_OFFSET 0x12A0
+/* THC Frame Count from the 2nd Stream RXDMA on this port */
+#define THC_M_PRT_FRM_CNT_2_OFFSET 0x12A4
+/* THC Micro Frame Count from the 2nd Stream RXDMA on this port */
+#define THC_M_PRT_UFRM_CNT_2_OFFSET 0x12A8
+/* THC Packet Count from the 2nd Stream RXDMA on this port */
+#define THC_M_PRT_RXDMA_PKT_CNT_2_OFFSET 0x12AC
+/*
+ * THC Software Interrupt Count from the 2nd Stream RXDMA
+ * on this port
+ */
+#define THC_M_PRT_SWINT_CNT_2_OFFSET 0x12B0
+/* Touch Sequencer Frame Drop Counter for the 2nd RXDMA */
+#define THC_M_PRT_FRAME_DROP_CNT_2_OFFSET 0x12B4
+/* THC Coaescing 2 */
+#define THC_M_PRT_COALESCE_2_OFFSET 0x12B8
+/* THC SPARE REGISTER */
+#define THC_M_PRT_SPARE_REG_OFFSET 0x12BC
+/* THC Read PRD Base Address Low for the SW RXDMA */
+#define THC_M_PRT_RPRD_BA_LOW_SW_OFFSET 0x12C0
+/* THC Read PRD Base Address High for the SW RXDMA */
+#define THC_M_PRT_RPRD_BA_HI_SW_OFFSET 0x12C4
+/* THC Read PRD Control for the SW RXDMA */
+#define THC_M_PRT_RPRD_CNTRL_SW_OFFSET 0x12C8
+/* THC Read DMA Control for the SW RXDMA */
+#define THC_M_PRT_READ_DMA_CNTRL_SW_OFFSET 0x12CC
+/* THC Read Interrupt Status for the SW RXDMA */
+#define THC_M_PRT_READ_DMA_INT_STS_SW_OFFSET 0x12D0
+/* Touch Sequencer Control for the SW DMA */
+#define THC_M_PRT_TSEQ_CNTRL_SW_OFFSET 0x12D4
+/* Address for the bulk read for SW DMA engine */
+#define THC_M_PRT_RD_BULK_ADDR_SW_OFFSET 0x12D8
+/* THC Frame Count from the SW RXDMA on this port */
+#define THC_M_PRT_FRM_CNT_SW_OFFSET 0x12DC
+/* THC Packet Count from the SW RXDMA on this port */
+#define THC_M_PRT_RXDMA_PKT_CNT_SW_OFFSET 0x12E0
+/* SW DMA PRD Table Length */
+#define THC_M_PRT_SW_DMA_PRD_TABLE_LEN_OFFSET 0x12E4
+/* THC timing based Frame/Interrupt caolescing control register for 1st RXDMA */
+#define THC_M_PRT_COALESCE_CNTRL_1_OFFSET 0x12E8
+/* THC timing based Frame/Interrupt caolescing control register for 2nd RXDMA */
+#define THC_M_PRT_COALESCE_CNTRL_2_OFFSET 0x12EC
+/* Touch Sequencer PRD Table Empty Counter for the 1st RXDMA */
+#define THC_M_PRT_PRD_EMPTY_CNT_1_OFFSET 0x12F0
+/* Touch Sequencer PRD Table Empty Counter for the 2nd RXDM */
+#define THC_M_PRT_PRD_EMPTY_CNT_2_OFFSET 0x12F4
+/* THC coalescing status to reflect the current coalescing FSM state for 1st RXDMA */
+#define THC_M_PRT_COALESCE_STS_1_OFFSET 0x12F8
+/* THC coalescing status to reflect the current coalescing FSM state for 2nd RXDMA */
+#define THC_M_PRT_COALESCE_STS_2_OFFSET 0x12FC
+/* THC Register for the SPI Port Duty Cycle Configuration */
+#define THC_M_PRT_SPI_DUTYC_CFG_OFFSET 0x1300
+/* THC Register for SW I2C Wtite Sequecning control */
+#define THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_OFFSET 0x1304
+/* THC current Timestamp Register for RXDMA1 */
+#define THC_M_PRT_TIMESTAMP_1_OFFSET 0x1308
+/* THC current Timestamp Register for RXDMA2 */
+#define THC_M_PRT_TIMESTAMP_2_OFFSET 0x130C
+/* Current SYNC Event Timestamp Register */
+#define THC_M_PRT_SYNC_TIMESTAMP_OFFSET 0x1310
+/* THC Display Sync Register */
+#define THC_M_PRT_DISP_SYNC_OFFSET 0x1314
+/* THC Display Sync Register */
+#define THC_M_PRT_DISP_SYNC_2_OFFSET 0x1318
+/* THC Register for SW I2C Wtite Sequecning control */
+#define THC_M_PRT_I2C_CFG_OFFSET 0x131C
+
+/* THC register bits definition */
+#define TXN_ERR_INT_STS_BIT BIT(28)
+#define TXN_FATAL_INT_STS_BIT BIT(30)
+
+#define NONDMA_INT_STS_BIT BIT(4)
+#define EOF_INT_STS_BIT BIT(5)
+
+#define THC_CFG_DID_VID_VID GENMASK(15, 0)
+#define THC_CFG_DID_VID_DID GENMASK(31, 16)
+
+#define THC_CFG_STS_CMD_IOSE BIT(0)
+#define THC_CFG_STS_CMD_MSE BIT(1)
+#define THC_CFG_STS_CMD_BME BIT(2)
+#define THC_CFG_STS_CMD_SPCYC BIT(3)
+#define THC_CFG_STS_CMD_MWRIEN BIT(4)
+#define THC_CFG_STS_CMD_VGAPS BIT(5)
+#define THC_CFG_STS_CMD_PERRR BIT(6)
+#define THC_CFG_STS_CMD_SERREN BIT(8)
+#define THC_CFG_STS_CMD_FBTBEN BIT(9)
+#define THC_CFG_STS_CMD_INTD BIT(10)
+#define THC_CFG_STS_CMD_INTS BIT(19)
+#define THC_CFG_STS_CMD_CAPL BIT(20)
+#define THC_CFG_STS_CMD_MCAP BIT(21)
+#define THC_CFG_STS_CMD_FBTBC BIT(23)
+#define THC_CFG_STS_CMD_MDPE BIT(24)
+#define THC_CFG_STS_CMD_DEVT GENMASK(26, 25)
+#define THC_CFG_STS_CMD_STA BIT(27)
+#define THC_CFG_STS_CMD_RTA BIT(28)
+#define THC_CFG_STS_CMD_RMA BIT(29)
+#define THC_CFG_STS_CMD_SSE BIT(30)
+#define THC_CFG_STS_CMD_DPE BIT(31)
+
+#define THC_CFG_CC_RID_RID GENMASK(7, 0)
+#define THC_CFG_CC_RID_PI GENMASK(15, 8)
+#define THC_CFG_CC_RID_SCC GENMASK(23, 16)
+#define THC_CFG_CC_RID_BCC GENMASK(31, 24)
+
+#define THC_CFG_BIST_HTYPE_LT_CLS_CLSZ GENMASK(7, 0)
+#define THC_CFG_BIST_HTYPE_LT_CLS_LT GENMASK(15, 8)
+#define THC_CFG_BIST_HTYPE_LT_CLS_HTYPE GENMASK(22, 16)
+#define THC_CFG_BIST_HTYPE_LT_CLS_MFD BIT(23)
+
+#define THC_CFG_BAR0_LOW_MEMSPACE BIT(0)
+#define THC_CFG_BAR0_LOW_TYP GENMASK(2, 1)
+#define THC_CFG_BAR0_LOW_PREFETCH BIT(3)
+#define THC_CFG_BAR0_LOW_MEMSIZE GENMASK(14, 4)
+#define THC_CFG_BAR0_LOW_MEMBAR GENMASK(31, 15)
+#define THC_CFG_BAR0_HI_MEMBAR GENMASK(31, 0)
+
+#define THC_CFG_SID_SVID_SSVID GENMASK(15, 0)
+#define THC_CFG_SID_SVID_SSID GENMASK(31, 16)
+
+#define THC_CFG_CAPP_CP GENMASK(7, 0)
+
+#define THC_CFG_INT_ILINE GENMASK(7, 0)
+#define THC_CFG_INT_IPIN GENMASK(15, 8)
+
+#define THC_CFG_UR_STS_CTL_URRE BIT(0)
+#define THC_CFG_UR_STS_CTL_URD BIT(1)
+#define THC_CFG_UR_STS_CTL_FD BIT(2)
+
+#define THC_CFG_MSIMC_MSINP_MSICID_CAPID GENMASK(7, 0)
+#define THC_CFG_MSIMC_MSINP_MSICID_NXTP GENMASK(15, 8)
+#define THC_CFG_MSIMC_MSINP_MSICID_MSIE BIT(16)
+#define THC_CFG_MSIMC_MSINP_MSICID_MMC GENMASK(19, 17)
+#define THC_CFG_MSIMC_MSINP_MSICID_MMEN GENMASK(22, 20)
+#define THC_CFG_MSIMC_MSINP_MSICID_XAC BIT(23)
+#define THC_CFG_MSIMC_MSINP_MSICID_PVMC BIT(24)
+#define THC_CFG_MSIMA_MADDR GENMASK(31, 2)
+#define THC_CFG_MSIMUA_MAUDDR GENMASK(31, 0)
+#define THC_CFG_MSIMD_MDAT GENMASK(15, 0)
+
+#define THC_CFG_PMCAP_PMNP_PMCID_CAPP GENMASK(7, 0)
+#define THC_CFG_PMCAP_PMNP_PMCID_NXTP GENMASK(15, 8)
+#define THC_CFG_PMCAP_PMNP_PMCID_VER GENMASK(18, 16)
+#define THC_CFG_PMCAP_PMNP_PMCID_PMECLK BIT(19)
+#define THC_CFG_PMCAP_PMNP_PMCID_DSI BIT(21)
+#define THC_CFG_PMCAP_PMNP_PMCID_AUXC GENMASK(24, 22)
+#define THC_CFG_PMCAP_PMNP_PMCID_D1S BIT(25)
+#define THC_CFG_PMCAP_PMNP_PMCID_D2S BIT(26)
+#define THC_CFG_PMCAP_PMNP_PMCID_PMES GENMASK(31, 27)
+
+#define THC_CFG_PMD_PMCSRBSE_PMCSR_PWRST GENMASK(1, 0)
+#define THC_CFG_PMD_PMCSRBSE_PMCSR_NSR BIT(3)
+#define THC_CFG_PMD_PMCSRBSE_PMCSR_PMEEN BIT(8)
+#define THC_CFG_PMD_PMCSRBSE_PMCSR_DSEL GENMASK(12, 9)
+#define THC_CFG_PMD_PMCSRBSE_PMCSR_DS GENMASK(14, 13)
+#define THC_CFG_PMD_PMCSRBSE_PMCSR_PMESTS BIT(15)
+
+#define THC_CFG_DEVIDLE_CAPPID GENMASK(7, 0)
+#define THC_CFG_DEVIDLE_NCAPPP GENMASK(15, 8)
+#define THC_CFG_DEVIDLE_LENGTH GENMASK(23, 16)
+#define THC_CFG_DEVIDLE_REV GENMASK(27, 24)
+#define THC_CFG_DEVIDLE_VID GENMASK(31, 28)
+
+#define THC_CFG_VSHDR_VSECID GENMASK(15, 0)
+#define THC_CFG_VSHDR_VSECR GENMASK(19, 16)
+#define THC_CFG_VSHDR_VSECL GENMASK(31, 20)
+
+#define THC_CFG_SWLTRPTR_VALID BIT(0)
+#define THC_CFG_SWLTRPTR_BARNUM GENMASK(3, 1)
+#define THC_CFG_SWLTRPTR_SWLTRLOC GENMASK(31, 4)
+
+#define THC_CFG_DEVIDLEPTR_VALID BIT(0)
+#define THC_CFG_DEVIDLEPTR_BARNUM GENMASK(3, 1)
+#define THC_CFG_DEVIDLEPTR_DEVIDLELOC GENMASK(31, 4)
+#define THC_CFG_DEVIDLEPOL_POLV GENMASK(9, 0)
+#define THC_CFG_DEVIDLEPOL_POLS GENMASK(12, 10)
+
+#define THC_CFG_PCE_SPE BIT(0)
+#define THC_CFG_PCE_I3E BIT(1)
+#define THC_CFG_PCE_D3HE BIT(2)
+#define THC_CFG_PCE_SE BIT(3)
+#define THC_CFG_PCE_HAE BIT(5)
+
+#define THC_CFG_MANID_PROC GENMASK(7, 0)
+#define THC_CFG_MANID_MID GENMASK(15, 8)
+#define THC_CFG_MANID_MSID GENMASK(23, 16)
+#define THC_CFG_MANID_DOT GENMASK(27, 24)
+
+#define THC_M_CMN_DEVIDLECTRL_CIP BIT(0)
+#define THC_M_CMN_DEVIDLECTRL_IR BIT(1)
+#define THC_M_CMN_DEVIDLECTRL_DEVIDLE BIT(2)
+#define THC_M_CMN_DEVIDLECTRL_RR BIT(3)
+#define THC_M_CMN_DEVIDLECTRL_IRC BIT(4)
+
+#define THC_M_CMN_LTR_CTRL_OFFSET 0x14
+#define THC_M_CMN_LTR_CTRL_ACTIVE_LTR_REQ BIT(0)
+#define THC_M_CMN_LTR_CTRL_ACTIVE_LTR_EN BIT(1)
+#define THC_M_CMN_LTR_CTRL_LP_LTR_REQ BIT(2)
+#define THC_M_CMN_LTR_CTRL_LP_LTR_EN BIT(3)
+#define THC_M_CMN_LTR_CTRL_LP_LTR_SCALE GENMASK(6, 4)
+#define THC_M_CMN_LTR_CTRL_LP_LTR_VAL GENMASK(16, 7)
+#define THC_M_CMN_LTR_CTRL_ACT_LTR_SCALE GENMASK(19, 17)
+#define THC_M_CMN_LTR_CTRL_ACT_LTR_VAL GENMASK(29, 20)
+#define THC_M_CMN_LTR_CTRL_LAST_LTR_SENT GENMASK(31, 30)
+
+#define THC_M_PRT_CONTROL_TSFTRST BIT(0)
+#define THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_EN BIT(1)
+#define THC_M_PRT_CONTROL_THC_DEVINT_QUIESCE_HW_STS BIT(2)
+#define THC_M_PRT_CONTROL_DEVRST BIT(3)
+#define THC_M_PRT_CONTROL_THC_DRV_LOCK_EN BIT(13)
+#define THC_M_PRT_CONTROL_THC_INSTANCE_INDEX GENMASK(18, 16)
+#define THC_M_PRT_CONTROL_PORT_INDEX GENMASK(22, 20)
+#define THC_M_PRT_CONTROL_THC_ARB_POLICY GENMASK(25, 24)
+#define THC_M_PRT_CONTROL_THC_BIOS_LOCK_EN BIT(27)
+#define THC_M_PRT_CONTROL_PORT_SUPPORTED BIT(28)
+#define THC_M_PRT_CONTROL_SPI_IO_RDY BIT(29)
+#define THC_M_PRT_CONTROL_PORT_TYPE GENMASK(31, 30)
+
+#define THC_M_PRT_SPI_CFG_SPI_TRDC GENMASK(1, 0)
+#define THC_M_PRT_SPI_CFG_SPI_TRMODE GENMASK(3, 2)
+#define THC_M_PRT_SPI_CFG_SPI_TCRF GENMASK(6, 4)
+#define THC_M_PRT_SPI_CFG_SPI_RD_MPS GENMASK(15, 7)
+#define THC_M_PRT_SPI_CFG_SPI_TWMODE GENMASK(19, 18)
+#define THC_M_PRT_SPI_CFG_SPI_TCWF GENMASK(22, 20)
+#define THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN BIT(23)
+#define THC_M_PRT_SPI_CFG_SPI_WR_MPS GENMASK(31, 24)
+
+#define THC_M_PRT_SPI_ICRRD_OPCODE_SPI_SIO GENMASK(31, 24)
+#define THC_M_PRT_SPI_ICRRD_OPCODE_SPI_DIO GENMASK(23, 16)
+#define THC_M_PRT_SPI_ICRRD_OPCODE_SPI_QIO GENMASK(15, 8)
+
+#define THC_M_PRT_INT_EN_SIPE BIT(0)
+#define THC_M_PRT_INT_EN_SBO BIT(1)
+#define THC_M_PRT_INT_EN_SIDR BIT(2)
+#define THC_M_PRT_INT_EN_SOFB BIT(3)
+#define THC_M_PRT_INT_EN_INVLD_DEV_ENTRY_INT_EN BIT(9)
+#define THC_M_PRT_INT_EN_FRAME_BABBLE_ERR_INT_EN BIT(10)
+#define THC_M_PRT_INT_EN_BUF_OVRRUN_ERR_INT_EN BIT(12)
+#define THC_M_PRT_INT_EN_PRD_ENTRY_ERR_INT_EN BIT(13)
+#define THC_M_PRT_INT_EN_DISP_SYNC_EVT_INT_EN BIT(14)
+#define THC_M_PRT_INT_EN_DEV_RAW_INT_EN BIT(15)
+#define THC_M_PRT_INT_EN_FATAL_ERR_INT_EN BIT(16)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_RX_UNDER_INT_EN BIT(17)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_RX_OVER_INT_EN BIT(18)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_RX_FULL_INT_EN BIT(19)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_TX_OVER_INT_EN BIT(20)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_TX_EMPTY_INT_EN BIT(21)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_TX_ABRT_INT_EN BIT(22)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_SCL_STUCK_AT_LOW_DET_INT_EN BIT(24)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_STOP_DET_INT_EN BIT(25)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_START_DET_INT_EN BIT(26)
+#define THC_M_PRT_INT_EN_THC_I2C_IC_MST_ON_HOLD_INT_EN BIT(27)
+#define THC_M_PRT_INT_EN_TXN_ERR_INT_EN BIT(29)
+#define THC_M_PRT_INT_EN_GBL_INT_EN BIT(31)
+
+#define THC_M_PRT_INT_STATUS_DISP_SYNC_EVT_INT_STS BIT(14)
+#define THC_M_PRT_INT_STATUS_DEV_RAW_INT_STS BIT(15)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_UNDER_INT_STS BIT(17)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_OVER_INT_STS BIT(18)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_RX_FULL_INT_STS BIT(19)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_OVER_INT_STS BIT(20)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_EMPTY_INT_STS BIT(21)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_TX_ABRT_INT_STS BIT(22)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_ACTIVITY_INT_STS BIT(23)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_SCL_STUCK_AT_LOW_INT_STS BIT(24)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_STOP_DET_INT_STS BIT(25)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_START_DET_INT_STS BIT(26)
+#define THC_M_PRT_INT_STATUS_THC_I2C_IC_MST_ON_HOLD_INT_STS BIT(27)
+#define THC_M_PRT_INT_STATUS_TXN_ERR_INT_STS BIT(28)
+#define THC_M_PRT_INT_STATUS_FATAL_ERR_INT_STS BIT(30)
+
+#define THC_M_PRT_ERR_CAUSE_INVLD_DEV_ENTRY BIT(9)
+#define THC_M_PRT_ERR_CAUSE_FRAME_BABBLE_ERR BIT(10)
+#define THC_M_PRT_ERR_CAUSE_BUF_OVRRUN_ERR BIT(12)
+#define THC_M_PRT_ERR_CAUSE_PRD_ENTRY_ERR BIT(13)
+#define THC_M_PRT_ERR_CAUSE_FATAL_ERR_CAUSE GENMASK(23, 16)
+
+#define THC_M_PRT_SW_SEQ_CNTRL_TSSGO BIT(0)
+#define THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CD_IE BIT(1)
+#define THC_M_PRT_SW_SEQ_CNTRL_THC_SS_CMD GENMASK(15, 8)
+#define THC_M_PRT_SW_SEQ_CNTRL_THC_SS_BC GENMASK(31, 16)
+#define THC_M_PRT_SW_SEQ_STS_TSSDONE BIT(0)
+#define THC_M_PRT_SW_SEQ_STS_THC_SS_ERR BIT(1)
+#define THC_M_PRT_SW_SEQ_STS_THC_SS_CIP BIT(3)
+#define THC_M_PRT_SW_SEQ_DATA0_ADDR_THC_SW_SEQ_DATA0_ADDR GENMASK(31, 0)
+#define THC_M_PRT_SW_SEQ_DATA1_THC_SW_SEQ_DATA1 GENMASK(31, 0)
+
+#define THC_M_PRT_WPRD_BA_LOW_THC_M_PRT_WPRD_BA_LOW GENMASK(31, 12)
+#define THC_M_PRT_WPRD_BA_HI_THC_M_PRT_WPRD_BA_HI GENMASK(31, 0)
+
+#define THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_START BIT(0)
+#define THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_ERROR BIT(1)
+#define THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC BIT(2)
+#define THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_IE_IOC_DMACPL BIT(3)
+#define THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_UHS BIT(23)
+#define THC_M_PRT_WRITE_DMA_CNTRL_THC_WRDMA_PTEC GENMASK(31, 24)
+
+#define THC_M_PRT_WRITE_INT_STS_THC_WRDMA_CMPL_STATUS BIT(0)
+#define THC_M_PRT_WRITE_INT_STS_THC_WRDMA_ERROR_STS BIT(1)
+#define THC_M_PRT_WRITE_INT_STS_THC_WRDMA_IOC_STS BIT(2)
+#define THC_M_PRT_WRITE_INT_STS_THC_WRDMA_ACTIVE BIT(3)
+
+#define THC_M_PRT_WR_BULK_ADDR_THC_M_PRT_WR_BULK_ADDR GENMASK(31, 0)
+
+#define THC_M_PRT_DEV_INT_CAUSE_ADDR_THC_M_PRT_DEV_INT_CAUSE_ADDR GENMASK(31, 0)
+#define THC_M_PRT_DEV_INT_CAUSE_REG_VAL_INTERRUPT_TYPE GENMASK(3, 0)
+#define THC_M_PRT_DEV_INT_CAUSE_REG_VAL_MICRO_FRAME_SIZE GENMASK(23, 4)
+#define THC_M_PRT_DEV_INT_CAUSE_REG_VAL_BEGINNING_OF_FRAME BIT(29)
+#define THC_M_PRT_DEV_INT_CAUSE_REG_VAL_END_OF_FRAME BIT(30)
+#define THC_M_PRT_DEV_INT_CAUSE_REG_VAL_FRAME_TYPE BIT(31)
+
+#define THC_M_PRT_TX_FRM_CNT_THC_M_PRT_TX_FRM_CNT GENMASK(30, 0)
+#define THC_M_PRT_TX_FRM_CNT_THC_M_PRT_TX_FRM_CNT_RST BIT(31)
+
+#define THC_M_PRT_TXDMA_PKT_CNT_THC_M_PRT_TXDMA_PKT_CNT GENMASK(30, 0)
+#define THC_M_PRT_TXDMA_PKT_CNT_THC_M_PRT_TXDMA_PKT_CNT_RST BIT(31)
+
+#define THC_M_PRT_DEVINT_CNT_THC_M_PRT_DEVINT_CNT GENMASK(30, 0)
+#define THC_M_PRT_DEVINT_CNT_THC_M_PRT_DEVINT_CNT_RST BIT(31)
+
+#define THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_OFFSET GENMASK(4, 0)
+#define THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_LEN GENMASK(9, 5)
+#define THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_EOF_OFFSET GENMASK(14, 10)
+#define THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_SEND_ICR_US_EN BIT(15)
+#define THC_M_PRT_DEVINT_CFG_1_THC_M_PRT_INTTYP_DATA_VAL GENMASK(31, 16)
+
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_OFFSET GENMASK(4, 0)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_LEN GENMASK(9, 5)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_UFSIZE_UNIT GENMASK(15, 12)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_FTYPE_IGNORE BIT(16)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_FTYPE_VAL BIT(17)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_RXDMA_ADDRINC_DIS BIT(24)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_TXDMA_ADDRINC_DIS BIT(25)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_RXDMA_PKT_STRM_EN BIT(26)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_TXDMA_PKT_STRM_EN BIT(27)
+#define THC_M_PRT_DEVINT_CFG_2_THC_M_PRT_DEVINT_POL BIT(28)
+
+#define THC_M_PRT_RPRD_BA_LOW_1_THC_M_PRT_RPRD_BA_LOW GENMASK(31, 12)
+#define THC_M_PRT_RPRD_BA_HI_1_THC_M_PRT_RPRD_BA_HI GENMASK(31, 0)
+
+#define THC_M_PRT_RPRD_CNTRL_PCD GENMASK(6, 0)
+#define THC_M_PRT_RPRD_CNTRL_PTEC GENMASK(15, 8)
+#define THC_M_PRT_RPRD_CNTRL_PREFETCH_WM GENMASK(19, 16)
+
+#define THC_M_PRT_READ_DMA_CNTRL_START BIT(0)
+#define THC_M_PRT_READ_DMA_CNTRL_IE_ERROR BIT(1)
+#define THC_M_PRT_READ_DMA_CNTRL_IE_IOC BIT(2)
+#define THC_M_PRT_READ_DMA_CNTRL_IE_STALL BIT(3)
+#define THC_M_PRT_READ_DMA_CNTRL_IE_NDDI BIT(4)
+#define THC_M_PRT_READ_DMA_CNTRL_IE_EOF BIT(5)
+#define THC_M_PRT_READ_DMA_CNTRL_IE_DMACPL BIT(7)
+#define THC_M_PRT_READ_DMA_CNTRL_TPCRP GENMASK(15, 8)
+#define THC_M_PRT_READ_DMA_CNTRL_TPCWP GENMASK(23, 16)
+#define THC_M_PRT_READ_DMA_CNTRL_INT_SW_DMA_EN BIT(28)
+#define THC_M_PRT_READ_DMA_CNTRL_SOO BIT(29)
+#define THC_M_PRT_READ_DMA_CNTRL_UHS BIT(30)
+#define THC_M_PRT_READ_DMA_CNTRL_TPCPR BIT(31)
+
+#define THC_M_PRT_READ_DMA_INT_STS_DMACPL_STS BIT(0)
+#define THC_M_PRT_READ_DMA_INT_STS_ERROR_STS BIT(1)
+#define THC_M_PRT_READ_DMA_INT_STS_IOC_STS BIT(2)
+#define THC_M_PRT_READ_DMA_INT_STS_STALL_STS BIT(3)
+#define THC_M_PRT_READ_DMA_INT_STS_NONDMA_INT_STS BIT(4)
+#define THC_M_PRT_READ_DMA_INT_STS_EOF_INT_STS BIT(5)
+#define THC_M_PRT_READ_DMA_INT_STS_ACTIVE BIT(8)
+
+#define THC_M_PRT_READ_DMA_ERR_1_DLERR BIT(0)
+
+#define THC_M_PRT_GUC_OFFSET_LOW_1_THC_M_PRT_GUC_OFFSET_LOW GENMASK(31, 3)
+#define THC_M_PRT_GUC_OFFSET_HI_1_THC_M_PRT_GUC_OFFSET_HI GENMASK(31, 0)
+#define THC_M_PRT_GUC_WORKQ_ITEM_SZ_1_WORKQ_ITEM_SZ GENMASK(23, 0)
+#define THC_M_PRT_GUC_WORKQ_SZ_1_WORKQ_SZ GENMASK(23, 0)
+#define THC_M_PRT_GUC_WORKQ_SZ_1_FCD GENMASK(27, 24)
+#define THC_M_PRT_GUC_WORKQ_SZ_1_GIC GENMASK(31, 28)
+
+#define THC_M_PRT_TSEQ_CNTRL_1_RGD BIT(2)
+#define THC_M_PRT_TSEQ_CNTRL_1_EGP BIT(3)
+#define THC_M_PRT_TSEQ_CNTRL_1_RTO BIT(4)
+#define THC_M_PRT_TSEQ_CNTRL_1_EWOG BIT(5)
+#define THC_M_PRT_TSEQ_CNTRL_1_RWOGC BIT(6)
+#define THC_M_PRT_TSEQ_CNTRL_1_RX_DATA_FIFO_WR_WM GENMASK(25, 16)
+#define THC_M_PRT_TSEQ_CNTRL_1_RESET_PREP_CHICKEN BIT(30)
+#define THC_M_PRT_TSEQ_CNTRL_1_INT_EDG_DET_EN BIT(31)
+
+#define THC_M_PRT_GUC_DB_ADDR_LOW_1_GUC_DB_ADDR_LOW GENMASK(31, 2)
+#define THC_M_PRT_GUC_DB_ADDR_HI_1_GUC_DB_ADDR_HI GENMASK(31, 0)
+#define THC_M_PRT_GUC_DB_DATA_1_GUC_DB_DATA GENMASK(31, 0)
+#define THC_M_PRT_GUC_OFFSET_INITVAL_1_THC_M_PRT_GUC_OFFSET_INITVAL GENMASK(31, 0)
+
+#define THC_M_PRT_RD_BULK_ADDR_1_THC_M_PRT_RD_BULK_ADDR GENMASK(31, 0)
+
+#define THC_M_PRT_DB_CNT_1_THC_M_PRT_DB_CNT GENMASK(30, 0)
+#define THC_M_PRT_DB_CNT_1_THC_M_PRT_DB_CNT_RST BIT(31)
+
+#define THC_M_PRT_FRM_CNT_1_THC_M_PRT_FRM_CNT GENMASK(30, 0)
+#define THC_M_PRT_FRM_CNT_1_THC_M_PRT_FRM_CNT_RST BIT(31)
+
+#define THC_M_PRT_UFRM_CNT_1_THC_M_PRT_UFRM_CNT GENMASK(30, 0)
+#define THC_M_PRT_UFRM_CNT_1_THC_M_PRT_UFRM_CNT_RST BIT(31)
+
+#define THC_M_PRT_RXDMA_PKT_CNT_1_THC_M_PRT_RXDMA_PKT_CNT GENMASK(30, 0)
+#define THC_M_PRT_RXDMA_PKT_CNT_1_THC_M_PRT_RXDMA_PKT_CNT_RST BIT(31)
+
+#define THC_M_PRT_SWINT_CNT_1_THC_M_PRT_SWINT_CNT GENMASK(30, 0)
+#define THC_M_PRT_SWINT_CNT_1_THC_M_PRT_SWINT_CNT_RST BIT(31)
+
+#define THC_M_PRT_FRAME_DROP_CNT_1_NOFD GENMASK(30, 0)
+#define THC_M_PRT_FRAME_DROP_CNT_1_RFDC BIT(31)
+
+#define THC_M_PRT_COALESCE_1_COALESCE_TIMEOUT GENMASK(6, 0)
+
+#define THC_M_PRT_RPRD_BA_LOW_2_THC_M_PRT_RPRD_BA_LOW GENMASK(31, 12)
+#define THC_M_PRT_RPRD_BA_HI_2_THC_M_PRT_RPRD_BA_HI GENMASK(31, 0)
+
+#define THC_M_PRT_READ_DMA_ERR_2_DLERR BIT(0)
+
+#define THC_M_PRT_GUC_OFFSET_LOW_2_THC_M_PRT_GUC_OFFSET_LOW GENMASK(31, 3)
+#define THC_M_PRT_GUC_OFFSET_HI_2_THC_M_PRT_GUC_OFFSET_HI GENMASK(31, 0)
+
+#define THC_M_PRT_GUC_WORKQ_ITEM_SZ_2_WORKQ_ITEM_SZ GENMASK(23, 0)
+#define THC_M_PRT_GUC_WORKQ_SZ_2_WORKQ_SZ GENMASK(23, 0)
+#define THC_M_PRT_GUC_WORKQ_SZ_2_FCD GENMASK(27, 24)
+#define THC_M_PRT_GUC_WORKQ_SZ_2_GIC GENMASK(31, 28)
+
+#define THC_M_PRT_TSEQ_CNTRL_2_RGD BIT(2)
+#define THC_M_PRT_TSEQ_CNTRL_2_EGP BIT(3)
+#define THC_M_PRT_TSEQ_CNTRL_2_RTO BIT(4)
+
+#define THC_M_PRT_GUC_DB_ADDR_LOW_2_GUC_DB_ADDR_LOW GENMASK(31, 2)
+#define THC_M_PRT_GUC_DB_ADDR_HI_2_GUC_DB_ADDR_HI GENMASK(31, 0)
+
+#define THC_M_PRT_GUC_DB_DATA_2_GUC_DB_DATA GENMASK(31, 0)
+
+#define THC_M_PRT_GUC_OFFSET_INITVAL_2_THC_M_PRT_GUC_OFFSET_INITVAL GENMASK(31, 0)
+
+#define THC_M_PRT_RD_BULK_ADDR_2_THC_M_PRT_RD_BULK_ADDR GENMASK(31, 0)
+
+#define THC_M_PRT_DB_CNT_2_THC_M_PRT_DB_CNT GENMASK(30, 0)
+#define THC_M_PRT_DB_CNT_2_THC_M_PRT_DB_CNT_RST BIT(31)
+
+#define THC_M_PRT_FRM_CNT_2_THC_M_PRT_FRM_CNT GENMASK(30, 0)
+#define THC_M_PRT_FRM_CNT_2_THC_M_PRT_FRM_CNT_RST BIT(31)
+
+#define THC_M_PRT_UFRM_CNT_2_THC_M_PRT_UFRM_CNT GENMASK(30, 0)
+#define THC_M_PRT_UFRM_CNT_2_THC_M_PRT_UFRM_CNT_RST BIT(31)
+
+#define THC_M_PRT_RXDMA_PKT_CNT_2_THC_M_PRT_RXDMA_PKT_CNT GENMASK(30, 0)
+#define THC_M_PRT_RXDMA_PKT_CNT_2_THC_M_PRT_RXDMA_PKT_CNT_RST BIT(31)
+
+#define THC_M_PRT_SWINT_CNT_2_THC_M_PRT_SWINT_CNT GENMASK(30, 0)
+#define THC_M_PRT_SWINT_CNT_2_THC_M_PRT_SWINT_CNT_RST BIT(31)
+
+#define THC_M_PRT_FRAME_DROP_CNT_2_NOFD GENMASK(30, 0)
+#define THC_M_PRT_FRAME_DROP_CNT_2_RFDC BIT(31)
+
+#define THC_M_PRT_COALESCE_2_COALESCE_TIMEOUT GENMASK(6, 0)
+
+#define THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_THC_I2C_RW_PIO_EN BIT(23)
+#define THC_M_PRT_SW_SEQ_I2C_WR_CNTRL_THC_PIO_I2C_WBC GENMASK(31, 26)
+
+#define THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_RX_DLEN_EN BIT(23)
+#define THC_M_PRT_RPRD_CNTRL_SW_THC_SWDMA_I2C_WBC GENMASK(31, 26)
+
+#define THC_M_PRT_PRD_EMPTY_CNT_1_RPTEC BIT(31)
+#define THC_M_PRT_PRD_EMPTY_CNT_2_RPTEC BIT(31)
+
+#define THC_M_PRT_SW_DMA_PRD_TABLE_LEN_THC_M_PRT_SW_DMA_PRD_TABLE_LEN GENMASK(23, 0)
+
+#define THC_M_PRT_SPI_DUTYC_CFG_SPI_CSA_CK_DELAY_VAL GENMASK(3, 0)
+#define THC_M_PRT_SPI_DUTYC_CFG_SPI_CSA_CK_DELAY_EN BIT(25)
+
+/* CS Assertion delay default value */
+#define THC_CSA_CK_DELAY_VAL_DEFAULT 4
+
+/* ARB policy definition */
+/* Arbiter switches on packet boundary */
+#define THC_ARB_POLICY_PACKET_BOUNDARY 0
+/* Arbiter switches on Micro Frame boundary */
+#define THC_ARB_POLICY_UFRAME_BOUNDARY 1
+/* Arbiter switches on Frame boundary */
+#define THC_ARB_POLICY_FRAME_BOUNDARY 2
+
+#define THC_REGMAP_POLLING_INTERVAL_US 10 /* 10us */
+#define THC_PIO_DONE_TIMEOUT_US USEC_PER_SEC /* 1s */
+
+/* Default configures for HIDSPI */
+#define THC_BIT_OFFSET_INTERRUPT_TYPE 4
+/* input_report_type is 4 bits for HIDSPI */
+#define THC_BIT_LENGTH_INTERRUPT_TYPE 4
+/* Last fragment indicator is bit 15 for HIDSPI */
+#define THC_BIT_OFFSET_LAST_FRAGMENT_FLAG 22
+#define THC_BIT_OFFSET_MICROFRAME_SIZE 8
+/* input_report_length is 14 bits for HIDSPI */
+#define THC_BIT_LENGTH_MICROFRAME_SIZE 14
+/* MFS unit in power of 2 */
+#define THC_UNIT_MICROFRAME_SIZE 2
+#define THC_BITMASK_INTERRUPT_TYPE_DATA 1
+#define THC_BITMASK_INVALID_TYPE_DATA 2
+
+/* Interrupt Quiesce default timeout value */
+#define THC_QUIESCE_EN_TIMEOUT_US USEC_PER_SEC /* 1s */
+
+/* LTR definition */
+/*
+ * THC uses scale to calculate final LTR value.
+ * Scale is geometric progression of 2^5 step, starting from 2^0.
+ * For example, THC_LTR_SCALE_2(2) means 2^(5 * 2) = 1024, unit is ns.
+ */
+#define THC_LTR_SCALE_0 0
+#define THC_LTR_SCALE_1 1
+#define THC_LTR_SCALE_2 2
+#define THC_LTR_SCALE_3 3
+#define THC_LTR_SCALE_4 4
+#define THC_LTR_SCALE_5 5
+#define THC_LTR_MODE_ACTIVE 0
+#define THC_LTR_MODE_LP 1
+#define THC_LTR_MIN_VAL_SCALE_3 BIT(10)
+#define THC_LTR_MAX_VAL_SCALE_3 BIT(15)
+#define THC_LTR_MIN_VAL_SCALE_4 BIT(15)
+#define THC_LTR_MAX_VAL_SCALE_4 BIT(20)
+#define THC_LTR_MIN_VAL_SCALE_5 BIT(20)
+#define THC_LTR_MAX_VAL_SCALE_5 BIT(25)
+
+/*
+ * THC PIO opcode default value
+ * @THC_PIO_OP_SPI_TIC_READ: THC opcode for SPI PIO read
+ * @THC_PIO_OP_SPI_TIC_WRITE: THC opcode for SPI PIO write
+ * @THC_PIO_OP_I2C_SUBSYSTEM_READ: THC opcode for read I2C subsystem registers
+ * @THC_PIO_OP_I2C_SUBSYSTEM_WRITE: THC opcode for write I2C subsystem registers
+ * @THC_PIO_OP_I2C_TIC_READ: THC opcode for read I2C device
+ * @THC_PIO_OP_I2C_TIC_WRITE: THC opcode for write I2C device
+ * @THC_PIO_OP_I2C_TIC_WRITE_AND_READ: THC opcode for write followed by read I2C device
+ */
+enum thc_pio_opcode {
+ THC_PIO_OP_SPI_TIC_READ = 0x4,
+ THC_PIO_OP_SPI_TIC_WRITE = 0x6,
+ THC_PIO_OP_I2C_SUBSYSTEM_READ = 0x12,
+ THC_PIO_OP_I2C_SUBSYSTEM_WRITE = 0x13,
+ THC_PIO_OP_I2C_TIC_READ = 0x14,
+ THC_PIO_OP_I2C_TIC_WRITE = 0x18,
+ THC_PIO_OP_I2C_TIC_WRITE_AND_READ = 0x1C,
+};
+
+/**
+ * THC SPI IO mode
+ * @THC_SINGLE_IO: single IO mode, 1(opcode) - 1(address) - 1(data)
+ * @THC_DUAL_IO: dual IO mode, 1(opcode) - 2(address) - 2(data)
+ * @THC_QUAD_IO: quad IO mode, 1(opcode) - 4(address) - 4(data)
+ * @THC_QUAD_PARALLEL_IO: parallel quad IO mode, 4(opcode) - 4(address) - 4(data)
+ */
+enum thc_spi_iomode {
+ THC_SINGLE_IO = 0,
+ THC_DUAL_IO = 1,
+ THC_QUAD_IO = 2,
+ THC_QUAD_PARALLEL_IO = 3,
+};
+
+/**
+ * THC SPI frequency divider
+ *
+ * This DIV final value is determined by THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN bit.
+ * If THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN isn't be set, THC takes the DIV value directly;
+ * If THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN is set, THC takes the DIV value multiply by 8.
+ *
+ * For example, if THC input clock is 125MHz:
+ * When THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN isn't set, THC_SPI_FRQ_DIV_3 means DIV is 3,
+ * THC final clock is 125 / 3 = 41.667MHz;
+ * When THC_M_PRT_SPI_CFG_SPI_LOW_FREQ_EN is set, THC_SPI_FRQ_DIV_3 means DIV is 3 * 8,
+ * THC final clock is 125 / (3 * 8) = 5.208MHz;
+ */
+enum thc_spi_frq_div {
+ THC_SPI_FRQ_RESERVED = 0,
+ THC_SPI_FRQ_DIV_1 = 1,
+ THC_SPI_FRQ_DIV_2 = 2,
+ THC_SPI_FRQ_DIV_3 = 3,
+ THC_SPI_FRQ_DIV_4 = 4,
+ THC_SPI_FRQ_DIV_5 = 5,
+ THC_SPI_FRQ_DIV_6 = 6,
+ THC_SPI_FRQ_DIV_7 = 7,
+};
+
+/* THC I2C sub-system registers */
+#define THC_I2C_IC_CON_OFFSET 0x0
+#define THC_I2C_IC_TAR_OFFSET 0x4
+#define THC_I2C_IC_SAR_OFFSET 0x8
+#define THC_I2C_IC_HS_MADDR_OFFSET 0xC
+#define THC_I2C_IC_DATA_CMD_OFFSET 0x10
+#define THC_I2C_IC_SS_SCL_HCNT_OFFSET 0x14
+#define THC_I2C_IC_UFM_SCL_HCNT_OFFSET 0x14
+#define THC_I2C_IC_SS_SCL_LCNT_OFFSET 0x18
+#define THC_I2C_IC_UFM_SCL_LCNT_OFFSET 0x18
+#define THC_I2C_IC_FS_SCL_HCNT_OFFSET 0x1C
+#define THC_I2C_IC_UFM_TBUF_CNT_OFFSET 0x1C
+#define THC_I2C_IC_FS_SCL_LCNT_OFFSET 0x20
+#define THC_I2C_IC_HS_SCL_HCNT_OFFSET 0x24
+#define THC_I2C_IC_HS_SCL_LCNT_OFFSET 0x28
+#define THC_I2C_IC_INTR_STAT_OFFSET 0x2C
+#define THC_I2C_IC_INTR_MASK_OFFSET 0x30
+#define THC_I2C_IC_RAW_INTR_STAT_OFFSET 0x34
+#define THC_I2C_IC_RX_TL_OFFSET 0x38
+#define THC_I2C_IC_TX_TL_OFFSET 0x3C
+#define THC_I2C_IC_CLR_INTR_OFFSET 0x40
+#define THC_I2C_IC_CLR_RX_UNDER_OFFSET 0x44
+#define THC_I2C_IC_CLR_RX_OVER_OFFSET 0x48
+#define THC_I2C_IC_CLR_TX_OVER_OFFSET 0x4C
+#define THC_I2C_IC_CLR_RD_REQ_OFFSET 0x50
+#define THC_I2C_IC_CLR_TX_ABRT_OFFSET 0x54
+#define THC_I2C_IC_CLR_RX_DONE_OFFSET 0x58
+#define THC_I2C_IC_CLR_ACTIVITY_OFFSET 0x5C
+#define THC_I2C_IC_CLR_STOP_DET_OFFSET 0x60
+#define THC_I2C_IC_CLR_START_DET_OFFSET 0x64
+#define THC_I2C_IC_CLR_GEN_CALL_OFFSET 0x68
+#define THC_I2C_IC_ENABLE_OFFSET 0x6C
+#define THC_I2C_IC_STATUS_OFFSET 0x70
+#define THC_I2C_IC_TXFLR_OFFSET 0x74
+#define THC_I2C_IC_RXFLR_OFFSET 0x78
+#define THC_I2C_IC_SDA_HOLD_OFFSET 0x7C
+#define THC_I2C_IC_TX_ABRT_SOURCE_OFFSET 0x80
+#define THC_I2C_IC_SLV_DATA_NACK_ONLY_OFFSET 0x84
+#define THC_I2C_IC_DMA_CR_OFFSET 0x88
+#define THC_I2C_IC_DMA_TDLR_OFFSET 0x8C
+#define THC_I2C_IC_DMA_RDLR_OFFSET 0x90
+#define THC_I2C_IC_SDA_SETUP_OFFSET 0x94
+#define THC_I2C_IC_ACK_GENERAL_CALL_OFFSET 0x98
+#define THC_I2C_IC_ENABLE_STATUS_OFFSET 0x9C
+#define THC_I2C_IC_FS_SPKLEN_OFFSET 0xA0
+#define THC_I2C_IC_UFM_SPKLEN_OFFSET 0xA0
+#define THC_I2C_IC_HS_SPKLEN_OFFSET 0xA4
+#define THC_I2C_IC_CLR_RESTART_DET_OFFSET 0xA8
+#define THC_I2C_IC_SCL_STUCK_AT_LOW_TIMEOUT_OFFSET 0xAC
+#define THC_I2C_IC_SDA_STUCK_AT_LOW_TIMEOUT_OFFSET 0xB0
+#define THC_I2C_IC_CLR_SCL_STUCK_DET_OFFSET 0xB4
+#define THC_I2C_IC_DEVICE_ID_OFFSET 0xB8
+#define THC_I2C_IC_SMBUS_CLK_LOW_SEXT_OFFSET 0xBC
+#define THC_I2C_IC_SMBUS_CLK_LOW_MEXT_OFFSET 0xC0
+#define THC_I2C_IC_SMBUS_THIGH_MAX_IDLE_COUNT_OFFSET 0xC4
+#define THC_I2C_IC_SMBUS_INTR_STAT_OFFSET 0xC8
+#define THC_I2C_IC_SMBUS_INTR_MASK_OFFSET 0xCC
+#define THC_I2C_IC_SMBUS_RAW_INTR_STAT_OFFSET 0xD0
+#define THC_I2C_IC_CLR_SMBUS_INTR_OFFSET 0xD4
+#define THC_I2C_IC_OPTIONAL_SAR_OFFSET 0xD8
+#define THC_I2C_IC_SMBUS_UDID_LSB_OFFSET 0xDC
+#define THC_I2C_IC_SMBUS_UDID_WORD0_OFFSET 0xDC
+#define THC_I2C_IC_SMBUS_UDID_WORD1_OFFSET 0xE0
+#define THC_I2C_IC_SMBUS_UDID_WORD2_OFFSET 0xE4
+#define THC_I2C_IC_SMBUS_UDID_WORD3_OFFSET 0xE8
+#define THC_I2C_IC_COMP_PARAM_1_OFFSET 0xF4
+#define THC_I2C_IC_COMP_VERSION_OFFSET 0xF8
+#define THC_I2C_IC_COMP_TYPE_OFFSET 0xFC
+
+/**
+ * THC I2C sub-system supported speed mode
+ */
+enum THC_I2C_SPEED_MODE {
+ THC_I2C_STANDARD = 1,
+ THC_I2C_FAST_AND_PLUS = 2,
+ THC_I2C_HIGH_SPEED = 3,
+};
+
+/* THC I2C sub-system register bits definition */
+#define THC_I2C_IC_ENABLE_ENABLE BIT(0)
+#define THC_I2C_IC_ENABLE_ABORT BIT(1)
+#define THC_I2C_IC_ENABLE_TX_CMD_BLOCK BIT(2)
+#define THC_I2C_IC_ENABLE_SDA_STUCK_RECOVERY_ENABLE BIT(3)
+#define THC_I2C_IC_ENABLE_SMBUS_CLK_RESET BIT(16)
+#define THC_I2C_IC_ENABLE_SMBUS_SUSPEND_EN BIT(17)
+#define THC_I2C_IC_ENABLE_SMBUS_ALERT_EN BIT(18)
+
+#define THC_I2C_IC_CON_MASTER_MODE BIT(0)
+#define THC_I2C_IC_CON_SPEED GENMASK(2, 1)
+#define THC_I2C_IC_CON_IC_10BITADDR_SLAVE BIT(3)
+#define THC_I2C_IC_CON_IC_10BITADDR_MASTER BIT(4)
+#define THC_I2C_IC_CON_IC_RESTART_EN BIT(5)
+#define THC_I2C_IC_CON_IC_SLAVE_DISABLE BIT(6)
+#define THC_I2C_IC_CON_STOP_DET_IFADDRESSED BIT(7)
+#define THC_I2C_IC_CON_TX_EMPTY_CTRL BIT(8)
+#define THC_I2C_IC_CON_RX_FIFO_FULL_HLD_CTRL BIT(9)
+#define THC_I2C_IC_CON_STOP_DET_IF_MASTER_ACTIVE BIT(10)
+#define THC_I2C_IC_CON_BUS_CLEAR_FEATURE_CTRL BIT(11)
+#define THC_I2C_IC_CON_OPTIONAL_SAR_CTRL BIT(16)
+#define THC_I2C_IC_CON_SMBUS_SLAVE_QUICK_EN BIT(17)
+#define THC_I2C_IC_CON_SMBUS_ARP_EN BIT(18)
+#define THC_I2C_IC_CON_SMBUS_PERSISTENT_SLV_ADDR_EN BIT(19)
+
+#define THC_I2C_IC_TAR_IC_TAR GENMASK(9, 0)
+#define THC_I2C_IC_TAR_GC_OR_START BIT(10)
+#define THC_I2C_IC_TAR_SPECIAL BIT(11)
+#define THC_I2C_IC_TAR_IC_10BITADDR_MASTER BIT(12)
+#define THC_I2C_IC_TAR_DEVICE_ID BIT(13)
+#define THC_I2C_IC_TAR_SMBUS_QUICK_CMD BIT(16)
+
+#define THC_I2C_IC_INTR_MASK_M_RX_UNDER BIT(0)
+#define THC_I2C_IC_INTR_MASK_M_RX_OVER BIT(1)
+#define THC_I2C_IC_INTR_MASK_M_RX_FULL BIT(2)
+#define THC_I2C_IC_INTR_MASK_M_TX_OVER BIT(3)
+#define THC_I2C_IC_INTR_MASK_M_TX_EMPTY BIT(4)
+#define THC_I2C_IC_INTR_MASK_M_RD_REQ BIT(5)
+#define THC_I2C_IC_INTR_MASK_M_TX_ABRT BIT(6)
+#define THC_I2C_IC_INTR_MASK_M_RX_DONE BIT(7)
+#define THC_I2C_IC_INTR_MASK_M_ACTIVITY BIT(8)
+#define THC_I2C_IC_INTR_MASK_M_STOP_DET BIT(9)
+#define THC_I2C_IC_INTR_MASK_M_START_DET BIT(10)
+#define THC_I2C_IC_INTR_MASK_M_GEN_CALL BIT(11)
+#define THC_I2C_IC_INTR_MASK_M_RESTART_DET BIT(12)
+#define THC_I2C_IC_INTR_MASK_M_MASTER_ON_HOLD BIT(13)
+#define THC_I2C_IC_INTR_MASK_M_SCL_STUCK_AT_LOW BIT(14)
+
+#define THC_I2C_IC_DMA_CR_RDMAE BIT(0)
+#define THC_I2C_IC_DMA_CR_TDMAE BIT(1)
+
+#endif /* _INTEL_THC_HW_H_ */
diff --git a/drivers/hid/surface-hid/surface_kbd.c b/drivers/hid/surface-hid/surface_kbd.c
index 383200d9121a..0be01b5e7425 100644
--- a/drivers/hid/surface-hid/surface_kbd.c
+++ b/drivers/hid/surface-hid/surface_kbd.c
@@ -284,7 +284,7 @@ MODULE_DEVICE_TABLE(acpi, surface_kbd_match);
static struct platform_driver surface_kbd_driver = {
.probe = surface_kbd_probe,
- .remove_new = surface_kbd_remove,
+ .remove = surface_kbd_remove,
.driver = {
.name = "surface_keyboard",
.acpi_match_table = surface_kbd_match,
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index a9e85bdd4cc6..a6eb6fe6130d 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1100,7 +1100,7 @@ static int usbhid_start(struct hid_device *hid)
interval = endpoint->bInterval;
- /* Some vendors give fullspeed interval on highspeed devides */
+ /* Some vendors give fullspeed interval on highspeed devices */
if (hid->quirks & HID_QUIRK_FULLSPEED_INTERVAL &&
dev->speed == USB_SPEED_HIGH) {
interval = fls(endpoint->bInterval*8);
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index 6f1443999d1d..1deacb4568cb 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -218,6 +218,14 @@ static inline __u32 wacom_s32tou(s32 value, __u8 n)
return value & (1 << (n - 1)) ? value & (~(~0U << n)) : value;
}
+static inline u32 wacom_rescale(u32 value, u32 in_max, u32 out_max)
+{
+ if (in_max == 0 || out_max == 0)
+ return 0;
+ value = clamp(value, 0, in_max);
+ return DIV_ROUND_CLOSEST(value * out_max, in_max);
+}
+
extern const struct hid_device_id wacom_ids[];
void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 2bc45b24075c..8125383932ec 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1084,6 +1084,17 @@ static ssize_t wacom_luminance_store(struct wacom *wacom, u8 *dest,
mutex_lock(&wacom->lock);
*dest = value & 0x7f;
+ for (unsigned int i = 0; i < wacom->led.count; i++) {
+ struct wacom_group_leds *group = &wacom->led.groups[i];
+
+ for (unsigned int j = 0; j < group->count; j++) {
+ if (dest == &wacom->led.llv)
+ group->leds[j].llv = *dest;
+ else if (dest == &wacom->led.hlv)
+ group->leds[j].hlv = *dest;
+ }
+ }
+
err = wacom_led_control(wacom);
mutex_unlock(&wacom->lock);
@@ -1302,10 +1313,10 @@ enum led_brightness wacom_leds_brightness_get(struct wacom_led *led)
struct wacom *wacom = led->wacom;
if (wacom->led.max_hlv)
- return led->hlv * LED_FULL / wacom->led.max_hlv;
+ return wacom_rescale(led->hlv, wacom->led.max_hlv, LED_FULL);
if (wacom->led.max_llv)
- return led->llv * LED_FULL / wacom->led.max_llv;
+ return wacom_rescale(led->llv, wacom->led.max_llv, LED_FULL);
/* device doesn't support brightness tuning */
return LED_FULL;
@@ -1337,8 +1348,8 @@ static int wacom_led_brightness_set(struct led_classdev *cdev,
goto out;
}
- led->llv = wacom->led.llv = wacom->led.max_llv * brightness / LED_FULL;
- led->hlv = wacom->led.hlv = wacom->led.max_hlv * brightness / LED_FULL;
+ led->llv = wacom->led.llv = wacom_rescale(brightness, LED_FULL, wacom->led.max_llv);
+ led->hlv = wacom->led.hlv = wacom_rescale(brightness, LED_FULL, wacom->led.max_hlv);
wacom->led.groups[led->group].select = led->id;
@@ -1370,17 +1381,6 @@ static int wacom_led_register_one(struct device *dev, struct wacom *wacom,
if (!name)
return -ENOMEM;
- if (!read_only) {
- led->trigger.name = name;
- error = devm_led_trigger_register(dev, &led->trigger);
- if (error) {
- hid_err(wacom->hdev,
- "failed to register LED trigger %s: %d\n",
- led->cdev.name, error);
- return error;
- }
- }
-
led->group = group;
led->id = id;
led->wacom = wacom;
@@ -1397,6 +1397,19 @@ static int wacom_led_register_one(struct device *dev, struct wacom *wacom,
led->cdev.brightness_set = wacom_led_readonly_brightness_set;
}
+ if (!read_only) {
+ led->trigger.name = name;
+ if (id == wacom->led.groups[group].select)
+ led->trigger.brightness = wacom_leds_brightness_get(led);
+ error = devm_led_trigger_register(dev, &led->trigger);
+ if (error) {
+ hid_err(wacom->hdev,
+ "failed to register LED trigger %s: %d\n",
+ led->cdev.name, error);
+ return error;
+ }
+ }
+
error = devm_led_classdev_register(dev, &led->cdev);
if (error) {
hid_err(wacom->hdev,
@@ -2241,7 +2254,8 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
if (hid_is_usb(wacom->hdev)) {
struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent);
struct usb_device *dev = interface_to_usbdev(intf);
- product_name = dev->product;
+ if (dev->product != NULL)
+ product_name = dev->product;
}
if (wacom->hdev->bus == BUS_I2C) {
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 59a13ad9371c..b60bfafc6a8f 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1353,9 +1353,9 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
rotation -= 1800;
input_report_abs(pen_input, ABS_TILT_X,
- (char)frame[7]);
+ (signed char)frame[7]);
input_report_abs(pen_input, ABS_TILT_Y,
- (char)frame[8]);
+ (signed char)frame[8]);
input_report_abs(pen_input, ABS_Z, rotation);
input_report_abs(pen_input, ABS_WHEEL,
get_unaligned_le16(&frame[11]));
@@ -2422,9 +2422,11 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
wacom_wac->hid_data.sense_state = value;
return;
case HID_DG_INVERT:
- wacom_wac->hid_data.invert_state = value;
+ wacom_wac->hid_data.eraser |= value;
return;
case HID_DG_ERASER:
+ wacom_wac->hid_data.eraser |= value;
+ fallthrough;
case HID_DG_TIPSWITCH:
wacom_wac->hid_data.tipswitch |= value;
return;
@@ -2565,8 +2567,10 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
if (entering_range) { /* first in range */
/* Going into range select tool */
- if (wacom_wac->hid_data.invert_state)
+ if (wacom_wac->hid_data.eraser)
wacom_wac->tool[0] = BTN_TOOL_RUBBER;
+ else if (wacom_wac->features.quirks & WACOM_QUIRK_AESPEN)
+ wacom_wac->tool[0] = BTN_TOOL_PEN;
else if (wacom_wac->id[0])
wacom_wac->tool[0] = wacom_intuos_get_tool_type(wacom_wac->id[0]);
else
@@ -2617,6 +2621,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
}
wacom_wac->hid_data.tipswitch = false;
+ wacom_wac->hid_data.eraser = false;
input_sync(input);
}
@@ -4941,6 +4946,10 @@ static const struct wacom_features wacom_features_0x94 =
HID_DEVICE(BUS_I2C, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\
.driver_data = (kernel_ulong_t)&wacom_features_##prod
+#define PCI_DEVICE_WACOM(prod) \
+ HID_DEVICE(BUS_PCI, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\
+ .driver_data = (kernel_ulong_t)&wacom_features_##prod
+
#define USB_DEVICE_LENOVO(prod) \
HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, prod), \
.driver_data = (kernel_ulong_t)&wacom_features_##prod
@@ -5110,6 +5119,7 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(HID_ANY_ID) },
{ I2C_DEVICE_WACOM(HID_ANY_ID) },
+ { PCI_DEVICE_WACOM(HID_ANY_ID) },
{ BT_DEVICE_WACOM(HID_ANY_ID) },
{ }
};
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index c8803d5c6a71..0c3c6a6aaae9 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -300,7 +300,7 @@ struct hid_data {
__s16 inputmode_index; /* InputMode HID feature index in the report */
bool sense_state;
bool inrange_state;
- bool invert_state;
+ bool eraser;
bool tipswitch;
bool barrelswitch;
bool barrelswitch2;
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index 15cb759151e6..eeacc427fd65 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -607,7 +607,7 @@ MODULE_DEVICE_TABLE(of, omap_ssi_of_match);
static struct platform_driver ssi_pdriver = {
.probe = ssi_probe,
- .remove_new = ssi_remove,
+ .remove = ssi_remove,
.driver = {
.name = "omap_ssi",
.pm = DEV_PM_OPS,
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index f0b3eca7376e..aeb92b803a17 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -1385,7 +1385,7 @@ MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
struct platform_driver ssi_port_pdriver = {
.probe = ssi_port_probe,
- .remove_new = ssi_port_remove,
+ .remove = ssi_port_remove,
.driver = {
.name = "omap_ssi_port",
.of_match_table = omap_ssi_port_of_match,
diff --git a/drivers/hte/hte-tegra194-test.c b/drivers/hte/hte-tegra194-test.c
index df631b5100d2..f890b79723af 100644
--- a/drivers/hte/hte-tegra194-test.c
+++ b/drivers/hte/hte-tegra194-test.c
@@ -226,7 +226,7 @@ static void tegra_hte_test_remove(struct platform_device *pdev)
static struct platform_driver tegra_hte_test_driver = {
.probe = tegra_hte_test_probe,
- .remove_new = tegra_hte_test_remove,
+ .remove = tegra_hte_test_remove,
.driver = {
.name = "tegra_hte_test",
.of_match_table = tegra_hte_test_of_match,
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 3c6011a48dab..6e084c207414 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -944,16 +944,6 @@ void vmbus_initiate_unload(bool crash)
vmbus_wait_for_unload();
}
-static void check_ready_for_resume_event(void)
-{
- /*
- * If all the old primary channels have been fixed up, then it's safe
- * to resume.
- */
- if (atomic_dec_and_test(&vmbus_connection.nr_chan_fixup_on_resume))
- complete(&vmbus_connection.ready_for_resume_event);
-}
-
static void vmbus_setup_channel_state(struct vmbus_channel *channel,
struct vmbus_channel_offer_channel *offer)
{
@@ -1109,8 +1099,6 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
/* Add the channel back to the array of channels. */
vmbus_channel_map_relid(oldchannel);
- check_ready_for_resume_event();
-
mutex_unlock(&vmbus_connection.channel_mutex);
return;
}
@@ -1296,13 +1284,28 @@ EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
/*
* vmbus_onoffers_delivered -
- * This is invoked when all offers have been delivered.
+ * The CHANNELMSG_ALLOFFERS_DELIVERED message arrives after all
+ * boot-time offers are delivered. A boot-time offer is for the primary
+ * channel for any virtual hardware configured in the VM at the time it boots.
+ * Boot-time offers include offers for physical devices assigned to the VM
+ * via Hyper-V's Discrete Device Assignment (DDA) functionality that are
+ * handled as virtual PCI devices in Linux (e.g., NVMe devices and GPUs).
+ * Boot-time offers do not include offers for VMBus sub-channels. Because
+ * devices can be hot-added to the VM after it is booted, additional channel
+ * offers that aren't boot-time offers can be received at any time after the
+ * all-offers-delivered message.
*
- * Nothing to do here.
+ * SR-IOV NIC Virtual Functions (VFs) assigned to a VM are not considered
+ * to be assigned to the VM at boot-time, and offers for VFs may occur after
+ * the all-offers-delivered message. VFs are optional accelerators to the
+ * synthetic VMBus NIC and are effectively hot-added only after the VMBus
+ * NIC channel is opened (once it knows the guest can support it, via the
+ * sriov bit in the netvsc protocol).
*/
static void vmbus_onoffers_delivered(
struct vmbus_channel_message_header *hdr)
{
+ complete(&vmbus_connection.all_offers_delivered_event);
}
/*
@@ -1578,7 +1581,8 @@ void vmbus_onmessage(struct vmbus_channel_message_header *hdr)
}
/*
- * vmbus_request_offers - Send a request to get all our pending offers.
+ * vmbus_request_offers - Send a request to get all our pending offers
+ * and wait for all boot-time offers to arrive.
*/
int vmbus_request_offers(void)
{
@@ -1596,6 +1600,10 @@ int vmbus_request_offers(void)
msg->msgtype = CHANNELMSG_REQUESTOFFERS;
+ /*
+ * This REQUESTOFFERS message will result in the host sending an all
+ * offers delivered message after all the boot-time offers are sent.
+ */
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
true);
@@ -1607,6 +1615,29 @@ int vmbus_request_offers(void)
goto cleanup;
}
+ /*
+ * Wait for the host to send all boot-time offers.
+ * Keeping it as a best-effort mechanism, where a warning is
+ * printed if a timeout occurs, and execution is resumed.
+ */
+ if (!wait_for_completion_timeout(&vmbus_connection.all_offers_delivered_event,
+ secs_to_jiffies(60))) {
+ pr_warn("timed out waiting for all boot-time offers to be delivered.\n");
+ }
+
+ /*
+ * Flush handling of offer messages (which may initiate work on
+ * other work queues).
+ */
+ flush_workqueue(vmbus_connection.work_queue);
+
+ /*
+ * Flush workqueue for processing the incoming offers. Subchannel
+ * offers and their processing can happen later, so there is no need to
+ * flush that workqueue here.
+ */
+ flush_workqueue(vmbus_connection.handle_primary_chan_wq);
+
cleanup:
kfree(msginfo);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index f001ae880e1d..8351360bba16 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -34,8 +34,8 @@ struct vmbus_connection vmbus_connection = {
.ready_for_suspend_event = COMPLETION_INITIALIZER(
vmbus_connection.ready_for_suspend_event),
- .ready_for_resume_event = COMPLETION_INITIALIZER(
- vmbus_connection.ready_for_resume_event),
+ .all_offers_delivered_event = COMPLETION_INITIALIZER(
+ vmbus_connection.all_offers_delivered_event),
};
EXPORT_SYMBOL_GPL(vmbus_connection);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index c38dcdfcb914..fec2f18679e3 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -28,7 +28,7 @@
#include <linux/sizes.h>
#include <linux/hyperv.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
@@ -756,7 +756,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
* adding succeeded, it is ok to proceed even if the memory was
* not onlined in time.
*/
- wait_for_completion_timeout(&dm_device.ol_waitevent, 5 * HZ);
+ wait_for_completion_timeout(&dm_device.ol_waitevent, secs_to_jiffies(5));
post_status(&dm_device);
}
}
@@ -766,16 +766,18 @@ static void hv_online_page(struct page *pg, unsigned int order)
struct hv_hotadd_state *has;
unsigned long pfn = page_to_pfn(pg);
- guard(spinlock_irqsave)(&dm_device.ha_lock);
- list_for_each_entry(has, &dm_device.ha_region_list, list) {
- /* The page belongs to a different HAS. */
- if (pfn < has->start_pfn ||
- (pfn + (1UL << order) > has->end_pfn))
- continue;
+ scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
+ list_for_each_entry(has, &dm_device.ha_region_list, list) {
+ /* The page belongs to a different HAS. */
+ if (pfn < has->start_pfn ||
+ (pfn + (1UL << order) > has->end_pfn))
+ continue;
- hv_bring_pgs_online(has, pfn, 1UL << order);
- break;
+ hv_bring_pgs_online(has, pfn, 1UL << order);
+ return;
+ }
}
+ generic_online_page(pg, order);
}
static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
@@ -1373,7 +1375,8 @@ static int dm_thread_func(void *dm_dev)
struct hv_dynmem_device *dm = dm_dev;
while (!kthread_should_stop()) {
- wait_for_completion_interruptible_timeout(&dm_device.config_event, 1 * HZ);
+ wait_for_completion_interruptible_timeout(&dm_device.config_event,
+ secs_to_jiffies(1));
/*
* The host expects us to post information on the memory
* pressure every second.
@@ -1585,7 +1588,7 @@ static int hv_free_page_report(struct page_reporting_dev_info *pr_dev_info,
return -ENOSPC;
}
- hint->type = HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD;
+ hint->heat_type = HV_EXTMEM_HEAT_HINT_COLD_DISCARD;
hint->reserved = 0;
for_each_sg(sgl, sg, nents, i) {
union hv_gpa_page_range *range;
@@ -1748,7 +1751,7 @@ static int balloon_connect_vsp(struct hv_device *dev)
if (ret)
goto out;
- t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ);
+ t = wait_for_completion_timeout(&dm_device.host_event, secs_to_jiffies(5));
if (t == 0) {
ret = -ETIMEDOUT;
goto out;
@@ -1806,7 +1809,7 @@ static int balloon_connect_vsp(struct hv_device *dev)
if (ret)
goto out;
- t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ);
+ t = wait_for_completion_timeout(&dm_device.host_event, secs_to_jiffies(5));
if (t == 0) {
ret = -ETIMEDOUT;
goto out;
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 7a35c82976e0..f2e6f55d6ca6 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -28,7 +28,7 @@
#include <linux/slab.h>
#include <linux/dma-map-ops.h>
#include <linux/set_memory.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
/*
@@ -141,7 +141,7 @@ static int sysctl_record_panic_msg = 1;
* sysctl option to allow the user to control whether kmsg data should be
* reported to Hyper-V on panic.
*/
-static struct ctl_table hv_ctl_table[] = {
+static const struct ctl_table hv_ctl_table[] = {
{
.procname = "hyperv_record_panic_msg",
.data = &sysctl_record_panic_msg,
@@ -278,6 +278,11 @@ static void hv_kmsg_dump_register(void)
}
}
+static inline bool hv_output_page_exists(void)
+{
+ return hv_root_partition || IS_ENABLED(CONFIG_HYPERV_VTL_MODE);
+}
+
int __init hv_common_init(void)
{
int i;
@@ -340,19 +345,19 @@ int __init hv_common_init(void)
BUG_ON(!hyperv_pcpu_input_arg);
/* Allocate the per-CPU state for output arg for root */
- if (hv_root_partition) {
+ if (hv_output_page_exists()) {
hyperv_pcpu_output_arg = alloc_percpu(void *);
BUG_ON(!hyperv_pcpu_output_arg);
}
- hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index),
+ hv_vp_index = kmalloc_array(nr_cpu_ids, sizeof(*hv_vp_index),
GFP_KERNEL);
if (!hv_vp_index) {
hv_common_free();
return -ENOMEM;
}
- for (i = 0; i < num_possible_cpus(); i++)
+ for (i = 0; i < nr_cpu_ids; i++)
hv_vp_index[i] = VP_INVAL;
return 0;
@@ -435,7 +440,7 @@ int hv_common_cpu_init(unsigned int cpu)
void **inputarg, **outputarg;
u64 msr_vp_index;
gfp_t flags;
- int pgcount = hv_root_partition ? 2 : 1;
+ const int pgcount = hv_output_page_exists() ? 2 : 1;
void *mem;
int ret;
@@ -453,7 +458,7 @@ int hv_common_cpu_init(unsigned int cpu)
if (!mem)
return -ENOMEM;
- if (hv_root_partition) {
+ if (hv_output_page_exists()) {
outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg);
*outputarg = (char *)mem + HV_HYP_PAGE_SIZE;
}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index d35b60c06114..62795f6cbb00 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -27,7 +27,7 @@
#include <linux/connector.h>
#include <linux/workqueue.h>
#include <linux/hyperv.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
@@ -655,7 +655,7 @@ void hv_kvp_onchannelcallback(void *context)
if (host_negotiatied == NEGO_NOT_STARTED) {
host_negotiatied = NEGO_IN_PROGRESS;
schedule_delayed_work(&kvp_host_handshake_work,
- HV_UTIL_NEGO_TIMEOUT * HZ);
+ secs_to_jiffies(HV_UTIL_NEGO_TIMEOUT));
}
return;
}
@@ -724,7 +724,7 @@ void hv_kvp_onchannelcallback(void *context)
*/
schedule_work(&kvp_sendkey_work);
schedule_delayed_work(&kvp_timeout_work,
- HV_UTIL_TIMEOUT * HZ);
+ secs_to_jiffies(HV_UTIL_TIMEOUT));
return;
@@ -767,6 +767,12 @@ hv_kvp_init(struct hv_util_service *srv)
*/
kvp_transaction.state = HVUTIL_DEVICE_INIT;
+ return 0;
+}
+
+int
+hv_kvp_init_transport(void)
+{
hvt = hvutil_transport_init(kvp_devname, CN_KVP_IDX, CN_KVP_VAL,
kvp_on_msg, kvp_on_reset);
if (!hvt)
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 0d2184be1691..2e7f537d53cf 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -12,7 +12,7 @@
#include <linux/connector.h>
#include <linux/workqueue.h>
#include <linux/hyperv.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
@@ -193,7 +193,8 @@ static void vss_send_op(void)
vss_transaction.state = HVUTIL_USERSPACE_REQ;
schedule_delayed_work(&vss_timeout_work, op == VSS_OP_FREEZE ?
- VSS_FREEZE_TIMEOUT * HZ : HV_UTIL_TIMEOUT * HZ);
+ secs_to_jiffies(VSS_FREEZE_TIMEOUT) :
+ secs_to_jiffies(HV_UTIL_TIMEOUT));
rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
if (rc) {
@@ -388,6 +389,12 @@ hv_vss_init(struct hv_util_service *srv)
*/
vss_transaction.state = HVUTIL_DEVICE_INIT;
+ return 0;
+}
+
+int
+hv_vss_init_transport(void)
+{
hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL,
vss_on_msg, vss_on_reset);
if (!hvt) {
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index c4f525325790..36ee89c0358b 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -141,6 +141,7 @@ static struct hv_util_service util_heartbeat = {
static struct hv_util_service util_kvp = {
.util_cb = hv_kvp_onchannelcallback,
.util_init = hv_kvp_init,
+ .util_init_transport = hv_kvp_init_transport,
.util_pre_suspend = hv_kvp_pre_suspend,
.util_pre_resume = hv_kvp_pre_resume,
.util_deinit = hv_kvp_deinit,
@@ -149,6 +150,7 @@ static struct hv_util_service util_kvp = {
static struct hv_util_service util_vss = {
.util_cb = hv_vss_onchannelcallback,
.util_init = hv_vss_init,
+ .util_init_transport = hv_vss_init_transport,
.util_pre_suspend = hv_vss_pre_suspend,
.util_pre_resume = hv_vss_pre_resume,
.util_deinit = hv_vss_deinit,
@@ -590,10 +592,8 @@ static int util_probe(struct hv_device *dev,
srv->channel = dev->channel;
if (srv->util_init) {
ret = srv->util_init(srv);
- if (ret) {
- ret = -ENODEV;
+ if (ret)
goto error1;
- }
}
/*
@@ -613,6 +613,13 @@ static int util_probe(struct hv_device *dev,
if (ret)
goto error;
+ if (srv->util_init_transport) {
+ ret = srv->util_init_transport();
+ if (ret) {
+ vmbus_close(dev->channel);
+ goto error;
+ }
+ }
return 0;
error:
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index d2856023d53c..29780f3a7478 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -15,10 +15,10 @@
#include <linux/list.h>
#include <linux/bitops.h>
#include <asm/sync_bitops.h>
-#include <asm/hyperv-tlfs.h>
#include <linux/atomic.h>
#include <linux/hyperv.h>
#include <linux/interrupt.h>
+#include <hyperv/hvhdk.h>
#include "hv_trace.h"
@@ -287,18 +287,10 @@ struct vmbus_connection {
struct completion ready_for_suspend_event;
/*
- * The number of primary channels that should be "fixed up"
- * upon resume: these channels are re-offered upon resume, and some
- * fields of the channel offers (i.e. child_relid and connection_id)
- * can change, so the old offermsg must be fixed up, before the resume
- * callbacks of the VSC drivers start to further touch the channels.
+ * Completed once the host has offered all boot-time channels.
+ * Note that some channels may still be under process on a workqueue.
*/
- atomic_t nr_chan_fixup_on_resume;
- /*
- * vmbus_bus_resume() waits for "nr_chan_fixup_on_resume" to
- * drop to zero.
- */
- struct completion ready_for_resume_event;
+ struct completion all_offers_delivered_event;
};
@@ -370,12 +362,14 @@ void vmbus_on_event(unsigned long data);
void vmbus_on_msg_dpc(unsigned long data);
int hv_kvp_init(struct hv_util_service *srv);
+int hv_kvp_init_transport(void);
void hv_kvp_deinit(void);
int hv_kvp_pre_suspend(void);
int hv_kvp_pre_resume(void);
void hv_kvp_onchannelcallback(void *context);
int hv_vss_init(struct hv_util_service *srv);
+int hv_vss_init_transport(void);
void hv_vss_deinit(void);
int hv_vss_pre_suspend(void);
int hv_vss_pre_resume(void);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 9b15f7daf505..0f6cd44fff29 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2427,11 +2427,6 @@ static int vmbus_bus_suspend(struct device *dev)
if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
wait_for_completion(&vmbus_connection.ready_for_suspend_event);
- if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
- pr_err("Can not suspend due to a previous failed resuming\n");
- return -EBUSY;
- }
-
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
@@ -2456,22 +2451,18 @@ static int vmbus_bus_suspend(struct device *dev)
pr_err("Sub-channel not deleted!\n");
WARN_ON_ONCE(1);
}
-
- atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
}
mutex_unlock(&vmbus_connection.channel_mutex);
vmbus_initiate_unload(false);
- /* Reset the event for the next resume. */
- reinit_completion(&vmbus_connection.ready_for_resume_event);
-
return 0;
}
static int vmbus_bus_resume(struct device *dev)
{
+ struct vmbus_channel *channel;
struct vmbus_channel_msginfo *msginfo;
size_t msgsize;
int ret;
@@ -2502,13 +2493,23 @@ static int vmbus_bus_resume(struct device *dev)
if (ret != 0)
return ret;
- WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
-
vmbus_request_offers();
- if (wait_for_completion_timeout(
- &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
- pr_err("Some vmbus device is missing after suspending?\n");
+ mutex_lock(&vmbus_connection.channel_mutex);
+ list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+ if (channel->offermsg.child_relid != INVALID_RELID)
+ continue;
+
+ /* hvsock channels are not expected to be present. */
+ if (is_hvsock_channel(channel))
+ continue;
+
+ pr_err("channel %pUl/%pUl not present after resume.\n",
+ &channel->offermsg.offer.if_type,
+ &channel->offermsg.offer.if_instance);
+ /* ToDo: Cleanup these channels here */
+ }
+ mutex_unlock(&vmbus_connection.channel_mutex);
/* Reset the event for the next suspend. */
reinit_completion(&vmbus_connection.ready_for_suspend_event);
@@ -2560,7 +2561,7 @@ static const struct dev_pm_ops vmbus_bus_pm = {
static struct platform_driver vmbus_platform_driver = {
.probe = vmbus_platform_driver_probe,
- .remove_new = vmbus_platform_driver_remove,
+ .remove = vmbus_platform_driver_remove,
.driver = {
.name = "vmbus",
.acpi_match_table = ACPI_PTR(vmbus_acpi_device_ids),
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 65ea92529406..4cbaba15d86e 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -162,6 +162,7 @@ config SENSORS_ADM9240
tristate "Analog Devices ADM9240 and compatibles"
depends on I2C
select HWMON_VID
+ select REGMAP_I2C
help
If you say yes here you get support for Analog Devices ADM9240,
Dallas DS1780, National Semiconductor LM81 sensor chips.
@@ -223,6 +224,7 @@ config SENSORS_ADT7462
config SENSORS_ADT7470
tristate "Analog Devices ADT7470"
depends on I2C
+ select REGMAP_I2C
help
If you say yes here you get support for the Analog Devices
ADT7470 temperature monitoring chips.
@@ -322,7 +324,7 @@ config SENSORS_K8TEMP
config SENSORS_K10TEMP
tristate "AMD Family 10h+ temperature sensor"
- depends on X86 && PCI && AMD_NB
+ depends on X86 && PCI && AMD_NODE
help
If you say yes here you get support for the temperature
sensor(s) inside your CPU. Supported are later revisions of
@@ -411,7 +413,7 @@ config SENSORS_ASPEED
will be called aspeed_pwm_tacho.
config SENSORS_ASPEED_G6
- tristate "ASPEED g6 PWM and Fan tach driver"
+ tristate "ASPEED G6 PWM and Fan tach driver"
depends on ARCH_ASPEED || COMPILE_TEST
depends on PWM
help
@@ -419,7 +421,7 @@ config SENSORS_ASPEED_G6
controllers.
This driver can also be built as a module. If so, the module
- will be called aspeed_pwm_tacho.
+ will be called aspeed_g6_pwm_tach.
config SENSORS_ATXP1
tristate "Attansic ATXP1 VID controller"
@@ -853,6 +855,17 @@ config SENSORS_CORETEMP
sensor inside your CPU. Most of the family 6 CPUs
are supported. Check Documentation/hwmon/coretemp.rst for details.
+config SENSORS_ISL28022
+ tristate "Renesas ISL28022"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for ISL28022 power monitor.
+ Check Documentation/hwmon/isl28022.rst for details.
+
+ This driver can also be built as a module. If so, the module
+ will be called isl28022.
+
config SENSORS_IT87
tristate "ITE IT87xx and compatibles"
depends on HAS_IOPORT
@@ -999,6 +1012,7 @@ config SENSORS_LTC2990
config SENSORS_LTC2991
tristate "Analog Devices LTC2991"
depends on I2C
+ select REGMAP_I2C
help
If you say yes here you get support for Analog Devices LTC2991
Octal I2C Voltage, Current, and Temperature Monitor. The LTC2991
@@ -1146,6 +1160,7 @@ config SENSORS_MAX1619
config SENSORS_MAX1668
tristate "Maxim MAX1668 and compatibles"
depends on I2C
+ select REGMAP_I2C
help
If you say yes here you get support for MAX1668, MAX1989 and
MAX1805 chips.
@@ -1275,6 +1290,7 @@ config SENSORS_MAX31790
config SENSORS_MC34VR500
tristate "NXP MC34VR500 hardware monitoring driver"
depends on I2C
+ select REGMAP_I2C
help
If you say yes here you get support for the temperature and input
voltage sensors of the NXP MC34VR500.
@@ -1396,7 +1412,9 @@ config SENSORS_LM73
config SENSORS_LM75
tristate "National Semiconductor LM75 and compatibles"
depends on I2C
+ depends on I3C || !I3C
select REGMAP_I2C
+ select REGMAP_I3C if I3C
help
If you say yes here you get support for one common type of
temperature sensor chip, with models including:
@@ -1665,6 +1683,17 @@ config SENSORS_NCT6775_I2C
This driver can also be built as a module. If so, the module
will be called nct6775-i2c.
+config SENSORS_NCT7363
+ tristate "Nuvoton NCT7363Y"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the Nuvoton NCT7363Y
+ hardware monitoring chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called nct7363.
+
config SENSORS_NCT7802
tristate "Nuvoton NCT7802Y"
depends on I2C
@@ -1747,7 +1776,7 @@ source "drivers/hwmon/occ/Kconfig"
config SENSORS_OXP
tristate "OneXPlayer EC fan control"
- depends on ACPI
+ depends on ACPI_EC
depends on X86
help
If you say yes here you get support for fan readings and control over
@@ -1795,6 +1824,18 @@ config SENSORS_PWM_FAN
This driver can also be built as a module. If so, the module
will be called pwm-fan.
+config SENSORS_QNAP_MCU_HWMON
+ tristate "QNAP MCU hardware monitoring"
+ depends on MFD_QNAP_MCU
+ depends on THERMAL || THERMAL=n
+ help
+ Say yes here to enable support for fan and temperature sensor
+ connected to a QNAP MCU, as found in a number of QNAP network
+ attached storage devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called qnap-mcu-hwmon.
+
config SENSORS_RASPBERRYPI_HWMON
tristate "Raspberry Pi voltage monitor"
depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
@@ -2162,11 +2203,12 @@ config SENSORS_INA2XX
select REGMAP_I2C
help
If you say yes here you get support for INA219, INA220, INA226,
- INA230, and INA231 power monitor chips.
+ INA230, INA231, INA260, and SY24655 power monitor chips.
The INA2xx driver is configured for the default configuration of
the part as described in the datasheet.
- Default value for Rshunt is 10 mOhms.
+ Default value for Rshunt is 10 mOhms except for INA260 which has an
+ internal 2 mOhm shunt resistor.
This driver can also be built as a module. If so, the module
will be called ina2xx.
@@ -2269,10 +2311,12 @@ config SENSORS_TMP103
config SENSORS_TMP108
tristate "Texas Instruments TMP108"
depends on I2C
+ depends on I3C || !I3C
select REGMAP_I2C
+ select REGMAP_I3C if I3C
help
If you say yes here you get support for Texas Instruments TMP108
- sensor chips.
+ sensor chips and NXP P3T1085.
This driver can also be built as a module. If so, the module
will be called tmp108.
@@ -2312,6 +2356,7 @@ config SENSORS_TMP464
config SENSORS_TMP513
tristate "Texas Instruments TMP513 and compatibles"
depends on I2C
+ select REGMAP_I2C
help
If you say yes here you get support for Texas Instruments TMP512,
and TMP513 temperature and power supply sensor chips.
@@ -2586,6 +2631,7 @@ config SENSORS_ASUS_WMI
config SENSORS_ASUS_EC
tristate "ASUS EC Sensors"
depends on X86
+ depends on ACPI_EC
help
If you say yes here you get support for the ACPI embedded controller
hardware monitoring interface found in ASUS motherboards. The driver
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 9554d2fdcf7b..b7ef0f0562d3 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -103,6 +103,7 @@ obj-$(CONFIG_SENSORS_INA2XX) += ina2xx.o
obj-$(CONFIG_SENSORS_INA238) += ina238.o
obj-$(CONFIG_SENSORS_INA3221) += ina3221.o
obj-$(CONFIG_SENSORS_INTEL_M10_BMC_HWMON) += intel-m10-bmc-hwmon.o
+obj-$(CONFIG_SENSORS_ISL28022) += isl28022.o
obj-$(CONFIG_SENSORS_IT87) += it87.o
obj-$(CONFIG_SENSORS_JC42) += jc42.o
obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
@@ -171,6 +172,7 @@ obj-$(CONFIG_SENSORS_NCT6775_CORE) += nct6775-core.o
nct6775-objs := nct6775-platform.o
obj-$(CONFIG_SENSORS_NCT6775) += nct6775.o
obj-$(CONFIG_SENSORS_NCT6775_I2C) += nct6775-i2c.o
+obj-$(CONFIG_SENSORS_NCT7363) += nct7363.o
obj-$(CONFIG_SENSORS_NCT7802) += nct7802.o
obj-$(CONFIG_SENSORS_NCT7904) += nct7904.o
obj-$(CONFIG_SENSORS_NPCM7XX) += npcm750-pwm-fan.o
@@ -187,6 +189,7 @@ obj-$(CONFIG_SENSORS_POWERZ) += powerz.o
obj-$(CONFIG_SENSORS_POWR1220) += powr1220.o
obj-$(CONFIG_SENSORS_PT5161L) += pt5161l.o
obj-$(CONFIG_SENSORS_PWM_FAN) += pwm-fan.o
+obj-$(CONFIG_SENSORS_QNAP_MCU_HWMON) += qnap-mcu-hwmon.o
obj-$(CONFIG_SENSORS_RASPBERRYPI_HWMON) += raspberrypi-hwmon.o
obj-$(CONFIG_SENSORS_SBTSI) += sbtsi_temp.o
obj-$(CONFIG_SENSORS_SBRMI) += sbrmi.o
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index 93653ea05430..ba8c68ae4595 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -1531,7 +1531,7 @@ static struct platform_driver abituguru_driver = {
.pm = pm_sleep_ptr(&abituguru_pm),
},
.probe = abituguru_probe,
- .remove_new = abituguru_remove,
+ .remove = abituguru_remove,
};
static int __init abituguru_detect(void)
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index 4501f0e49efb..b70330dc2198 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -1147,12 +1147,12 @@ static int abituguru3_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(abituguru3_pm, abituguru3_suspend, abituguru3_resume);
static struct platform_driver abituguru3_driver = {
- .driver = {
+ .driver = {
.name = ABIT_UGURU3_NAME,
.pm = pm_sleep_ptr(&abituguru3_pm),
},
.probe = abituguru3_probe,
- .remove_new = abituguru3_remove,
+ .remove = abituguru3_remove,
};
static int __init abituguru3_dmi_detect(void)
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 6c8a9c863528..44afb07409a4 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -84,6 +84,7 @@ struct acpi_power_meter_resource {
u64 power;
u64 cap;
u64 avg_interval;
+ bool power_alarm;
int sensors_valid;
unsigned long sensors_last_updated;
struct sensor_device_attribute sensors[NUM_SENSORS];
@@ -292,8 +293,8 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
+ unsigned long temp, trip_bk;
int res;
- unsigned long temp;
res = kstrtoul(buf, 10, &temp);
if (res)
@@ -301,13 +302,15 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
temp = DIV_ROUND_CLOSEST(temp, 1000);
- mutex_lock(&resource->lock);
+ guard(mutex)(&resource->lock);
+
+ trip_bk = resource->trip[attr->index - 7];
resource->trip[attr->index - 7] = temp;
res = set_acpi_trip(resource);
- mutex_unlock(&resource->lock);
-
- if (res)
+ if (res) {
+ resource->trip[attr->index - 7] = trip_bk;
return res;
+ }
return count;
}
@@ -396,6 +399,9 @@ static ssize_t show_val(struct device *dev,
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
u64 val = 0;
+ int ret;
+
+ guard(mutex)(&resource->lock);
switch (attr->index) {
case 0:
@@ -423,10 +429,17 @@ static ssize_t show_val(struct device *dev,
val = 0;
break;
case 6:
- if (resource->power > resource->cap)
- val = 1;
- else
- val = 0;
+ ret = update_meter(resource);
+ if (ret)
+ return ret;
+ /* need to update cap if not to support the notification. */
+ if (!(resource->caps.flags & POWER_METER_CAN_NOTIFY)) {
+ ret = update_cap(resource);
+ if (ret)
+ return ret;
+ }
+ val = resource->power_alarm || resource->power > resource->cap;
+ resource->power_alarm = resource->power > resource->cap;
break;
case 7:
case 8:
@@ -680,8 +693,9 @@ static int setup_attrs(struct acpi_power_meter_resource *resource)
{
int res = 0;
+ /* _PMD method is optional. */
res = read_domain_devices(resource);
- if (res)
+ if (res && res != -ENODEV)
return res;
if (resource->caps.flags & POWER_METER_CAN_MEASURE) {
@@ -846,12 +860,20 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
sysfs_notify(&device->dev.kobj, NULL, POWER_AVERAGE_NAME);
break;
case METER_NOTIFY_CAP:
+ mutex_lock(&resource->lock);
+ res = update_cap(resource);
+ if (res)
+ dev_err_once(&device->dev, "update cap failed when capping value is changed.\n");
+ mutex_unlock(&resource->lock);
sysfs_notify(&device->dev.kobj, NULL, POWER_CAP_NAME);
break;
case METER_NOTIFY_INTERVAL:
sysfs_notify(&device->dev.kobj, NULL, POWER_AVG_INTERVAL_NAME);
break;
case METER_NOTIFY_CAPPING:
+ mutex_lock(&resource->lock);
+ resource->power_alarm = true;
+ mutex_unlock(&resource->lock);
sysfs_notify(&device->dev.kobj, NULL, POWER_ALARM_NAME);
dev_info(&device->dev, "Capping in progress.\n");
break;
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index ca466d12475a..5f2541c11fe9 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -1735,11 +1735,10 @@ static int adt7475_pwm_properties_parse_args(struct fwnode_handle *fwnode,
static int adt7475_fan_pwm_config(struct i2c_client *client)
{
struct adt7475_data *data = i2c_get_clientdata(client);
- struct fwnode_handle *child;
struct adt7475_pwm_config cfg = {};
int ret;
- device_for_each_child_node(&client->dev, child) {
+ device_for_each_child_node_scoped(&client->dev, child) {
if (!fwnode_property_present(child, "pwms"))
continue;
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index ac64b407ed0e..1e3c6acd8974 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -22,6 +22,7 @@
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of_platform.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -893,7 +894,6 @@ static bool amc6821_volatile_reg(struct device *dev, unsigned int reg)
static const struct regmap_config amc6821_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = AMC6821_REG_CONF3,
.volatile_reg = amc6821_volatile_reg,
.cache_type = REGCACHE_MAPLE,
};
@@ -920,6 +920,13 @@ static int amc6821_probe(struct i2c_client *client)
if (err)
return err;
+ if (of_device_is_compatible(dev->of_node, "tsd,mule")) {
+ err = devm_of_platform_populate(dev);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Failed to create sub-devices\n");
+ }
+
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
data, &amc6821_chip_info,
amc6821_groups);
@@ -927,7 +934,7 @@ static int amc6821_probe(struct i2c_client *client)
}
static const struct i2c_device_id amc6821_id[] = {
- { "amc6821", 0 },
+ { "amc6821" },
{ }
};
@@ -937,6 +944,9 @@ static const struct of_device_id __maybe_unused amc6821_of_match[] = {
{
.compatible = "ti,amc6821",
},
+ {
+ .compatible = "tsd,mule",
+ },
{ }
};
diff --git a/drivers/hwmon/aquacomputer_d5next.c b/drivers/hwmon/aquacomputer_d5next.c
index 34cac27e4dde..0dcb8a3a691d 100644
--- a/drivers/hwmon/aquacomputer_d5next.c
+++ b/drivers/hwmon/aquacomputer_d5next.c
@@ -597,7 +597,7 @@ struct aqc_data {
/* Sensor values */
s32 temp_input[20]; /* Max 4 physical and 16 virtual or 8 physical and 12 virtual */
- s32 speed_input[8];
+ s32 speed_input[9];
u32 speed_input_min[1];
u32 speed_input_target[1];
u32 speed_input_max[1];
diff --git a/drivers/hwmon/aspeed-g6-pwm-tach.c b/drivers/hwmon/aspeed-g6-pwm-tach.c
index 75eadda738ab..4174b129d1fc 100644
--- a/drivers/hwmon/aspeed-g6-pwm-tach.c
+++ b/drivers/hwmon/aspeed-g6-pwm-tach.c
@@ -534,7 +534,7 @@ MODULE_DEVICE_TABLE(of, aspeed_pwm_tach_match);
static struct platform_driver aspeed_pwm_tach_driver = {
.probe = aspeed_pwm_tach_probe,
- .remove_new = aspeed_pwm_tach_remove,
+ .remove = aspeed_pwm_tach_remove,
.driver = {
.name = "aspeed-g6-pwm-tach",
.of_match_table = aspeed_pwm_tach_match,
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 9555366aeaf0..43e54dc513da 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -250,6 +250,8 @@ static const struct ec_sensor_info sensors_family_amd_600[] = {
EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
[ec_sensor_temp_water_out] =
EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
};
static const struct ec_sensor_info sensors_family_intel_300[] = {
@@ -477,6 +479,15 @@ static const struct ec_board_info board_info_zenith_ii_extreme = {
.family = family_amd_500_series,
};
+static const struct ec_board_info board_info_tuf_gaming_x670e_plus = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_WATER_IN | SENSOR_TEMP_WATER_OUT |
+ SENSOR_FAN_CPU_OPT,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_600_series,
+};
+
#define DMI_EXACT_MATCH_ASUS_BOARD_NAME(name, board_info) \
{ \
.matches = { \
@@ -538,6 +549,8 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_zenith_ii_extreme),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH II EXTREME ALPHA",
&board_info_zenith_ii_extreme),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("TUF GAMING X670E-PLUS",
+ &board_info_tuf_gaming_x670e_plus),
{},
};
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 1dc7e24fe4c5..c80350e499e9 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -17,6 +17,7 @@
#include <linux/jiffies.h>
#include <linux/err.h>
#include <linux/acpi.h>
+#include <linux/string_choices.h>
#define ATK_HID "ATK0110"
@@ -441,7 +442,7 @@ static void atk_print_sensor(struct atk_data *data, union acpi_object *obj)
flags->integer.value,
name->string.pointer,
limit1->integer.value, limit2->integer.value,
- enable->integer.value ? "enabled" : "disabled");
+ str_enabled_disabled(enable->integer.value));
#endif
}
@@ -1074,8 +1075,7 @@ static int atk_ec_enabled(struct atk_data *data)
err = -EIO;
} else {
err = (buf->value != 0);
- dev_dbg(dev, "EC is %sabled\n",
- err ? "en" : "dis");
+ dev_dbg(dev, "EC is %s\n", str_enabled_disabled(err));
}
ACPI_FREE(obj);
@@ -1096,18 +1096,15 @@ static int atk_ec_ctl(struct atk_data *data, int enable)
obj = atk_sitm(data, &sitm);
if (IS_ERR(obj)) {
- dev_err(dev, "Failed to %sable the EC\n",
- enable ? "en" : "dis");
+ dev_err(dev, "Failed to %s the EC\n", str_enable_disable(enable));
return PTR_ERR(obj);
}
ec_ret = (struct atk_acpi_ret_buffer *)obj->buffer.pointer;
if (ec_ret->flags == 0) {
- dev_err(dev, "Failed to %sable the EC\n",
- enable ? "en" : "dis");
+ dev_err(dev, "Failed to %s the EC\n", str_enable_disable(enable));
err = -EIO;
} else {
- dev_info(dev, "EC %sabled\n",
- enable ? "en" : "dis");
+ dev_info(dev, "EC %s\n", str_enabled_disabled(enable));
}
ACPI_FREE(obj);
diff --git a/drivers/hwmon/chipcap2.c b/drivers/hwmon/chipcap2.c
index edf454474f11..9d071f7ca9d2 100644
--- a/drivers/hwmon/chipcap2.c
+++ b/drivers/hwmon/chipcap2.c
@@ -13,6 +13,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/hwmon.h>
@@ -556,55 +557,40 @@ static int cc2_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
{
struct cc2_data *data = dev_get_drvdata(dev);
- int ret = 0;
- mutex_lock(&data->dev_access_lock);
+ guard(mutex)(&data->dev_access_lock);
switch (type) {
case hwmon_temp:
- ret = cc2_measurement(data, type, val);
- break;
+ return cc2_measurement(data, type, val);
case hwmon_humidity:
switch (attr) {
case hwmon_humidity_input:
- ret = cc2_measurement(data, type, val);
- break;
+ return cc2_measurement(data, type, val);
case hwmon_humidity_min:
- ret = cc2_get_reg_val(data, CC2_R_ALARM_L_ON, val);
- break;
+ return cc2_get_reg_val(data, CC2_R_ALARM_L_ON, val);
case hwmon_humidity_min_hyst:
- ret = cc2_get_reg_val(data, CC2_R_ALARM_L_OFF, val);
- break;
+ return cc2_get_reg_val(data, CC2_R_ALARM_L_OFF, val);
case hwmon_humidity_max:
- ret = cc2_get_reg_val(data, CC2_R_ALARM_H_ON, val);
- break;
+ return cc2_get_reg_val(data, CC2_R_ALARM_H_ON, val);
case hwmon_humidity_max_hyst:
- ret = cc2_get_reg_val(data, CC2_R_ALARM_H_OFF, val);
- break;
+ return cc2_get_reg_val(data, CC2_R_ALARM_H_OFF, val);
case hwmon_humidity_min_alarm:
- ret = cc2_humidity_min_alarm_status(data, val);
- break;
+ return cc2_humidity_min_alarm_status(data, val);
case hwmon_humidity_max_alarm:
- ret = cc2_humidity_max_alarm_status(data, val);
- break;
+ return cc2_humidity_max_alarm_status(data, val);
default:
- ret = -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
- break;
default:
- ret = -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
-
- mutex_unlock(&data->dev_access_lock);
-
- return ret;
}
static int cc2_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long val)
{
struct cc2_data *data = dev_get_drvdata(dev);
- int ret;
u16 arg;
u8 cmd;
@@ -614,41 +600,28 @@ static int cc2_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
if (val < 0 || val > CC2_RH_MAX)
return -EINVAL;
- mutex_lock(&data->dev_access_lock);
+ guard(mutex)(&data->dev_access_lock);
switch (attr) {
case hwmon_humidity_min:
cmd = CC2_W_ALARM_L_ON;
arg = cc2_rh_to_reg(val);
- ret = cc2_write_reg(data, cmd, arg);
- break;
-
+ return cc2_write_reg(data, cmd, arg);
case hwmon_humidity_min_hyst:
cmd = CC2_W_ALARM_L_OFF;
arg = cc2_rh_to_reg(val);
- ret = cc2_write_reg(data, cmd, arg);
- break;
-
+ return cc2_write_reg(data, cmd, arg);
case hwmon_humidity_max:
cmd = CC2_W_ALARM_H_ON;
arg = cc2_rh_to_reg(val);
- ret = cc2_write_reg(data, cmd, arg);
- break;
-
+ return cc2_write_reg(data, cmd, arg);
case hwmon_humidity_max_hyst:
cmd = CC2_W_ALARM_H_OFF;
arg = cc2_rh_to_reg(val);
- ret = cc2_write_reg(data, cmd, arg);
- break;
-
+ return cc2_write_reg(data, cmd, arg);
default:
- ret = -EOPNOTSUPP;
- break;
+ return -EOPNOTSUPP;
}
-
- mutex_unlock(&data->dev_access_lock);
-
- return ret;
}
static int cc2_request_ready_irq(struct cc2_data *data, struct device *dev)
diff --git a/drivers/hwmon/cros_ec_hwmon.c b/drivers/hwmon/cros_ec_hwmon.c
index 5514cf780b8b..9991c3fa020a 100644
--- a/drivers/hwmon/cros_ec_hwmon.c
+++ b/drivers/hwmon/cros_ec_hwmon.c
@@ -141,6 +141,7 @@ static umode_t cros_ec_hwmon_is_visible(const void *data, enum hwmon_sensor_type
}
static const struct hwmon_channel_info * const cros_ec_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
HWMON_CHANNEL_INFO(fan,
HWMON_F_INPUT | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_FAULT,
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index 7fb0c57dfef5..588e96790850 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -473,7 +473,7 @@ static void da9052_hwmon_remove(struct platform_device *pdev)
static struct platform_driver da9052_hwmon_driver = {
.probe = da9052_hwmon_probe,
- .remove_new = da9052_hwmon_remove,
+ .remove = da9052_hwmon_remove,
.driver = {
.name = "da9052-hwmon",
},
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index f5bdf842040e..cd00adaad1b4 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1545,6 +1545,14 @@ static const struct dmi_system_id i8k_whitelist_fan_control[] __initconst = {
.driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
},
{
+ .ident = "Dell XPS 13 9370",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 13 9370"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_30A3_31A3],
+ },
+ {
.ident = "Dell Optiplex 7000",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 1a9b28dc91e6..3d4057309950 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -2721,7 +2721,7 @@ static struct platform_driver dme1737_isa_driver = {
.name = "dme1737",
},
.probe = dme1737_isa_probe,
- .remove_new = dme1737_isa_remove,
+ .remove = dme1737_isa_remove,
};
/* ---------------------------------------------------------------------
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
index 6bdd21aa005a..291d91f68646 100644
--- a/drivers/hwmon/drivetemp.c
+++ b/drivers/hwmon/drivetemp.c
@@ -165,6 +165,7 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
{
u8 scsi_cmd[MAX_COMMAND_SIZE];
enum req_op op;
+ int err;
memset(scsi_cmd, 0, sizeof(scsi_cmd));
scsi_cmd[0] = ATA_16;
@@ -192,8 +193,11 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
scsi_cmd[12] = lba_high;
scsi_cmd[14] = ata_command;
- return scsi_execute_cmd(st->sdev, scsi_cmd, op, st->smartdata,
- ATA_SECT_SIZE, HZ, 5, NULL);
+ err = scsi_execute_cmd(st->sdev, scsi_cmd, op, st->smartdata,
+ ATA_SECT_SIZE, 10 * HZ, 5, NULL);
+ if (err > 0)
+ err = -EIO;
+ return err;
}
static int drivetemp_ata_command(struct drivetemp_data *st, u8 feature,
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 243c570dee4c..820f894d9ffd 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -1497,7 +1497,7 @@ static struct platform_driver f71805f_driver = {
.name = DRVNAME,
},
.probe = f71805f_probe,
- .remove_new = f71805f_remove,
+ .remove = f71805f_remove,
};
static int __init f71805f_device_add(unsigned short address,
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 7c941d320a18..df83f9866fbc 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -2658,7 +2658,7 @@ static struct platform_driver f71882fg_driver = {
.name = DRVNAME,
},
.probe = f71882fg_probe,
- .remove_new = f71882fg_remove,
+ .remove = f71882fg_remove,
};
static int __init f71882fg_init(void)
diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c
index 4514f3ed90cc..14a6385cd7cc 100644
--- a/drivers/hwmon/gsc-hwmon.c
+++ b/drivers/hwmon/gsc-hwmon.c
@@ -231,15 +231,8 @@ gsc_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type,
return 0;
}
-static umode_t
-gsc_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type, u32 attr,
- int ch)
-{
- return 0444;
-}
-
static const struct hwmon_ops gsc_hwmon_ops = {
- .is_visible = gsc_hwmon_is_visible,
+ .visible = 0444,
.read = gsc_hwmon_read,
.read_string = gsc_hwmon_read_string,
};
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 9c35c4d0369d..9703d60e9bbf 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -145,13 +145,19 @@ static const struct class hwmon_class = {
static DEFINE_IDA(hwmon_ida);
+static umode_t hwmon_is_visible(const struct hwmon_ops *ops,
+ const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (ops->visible)
+ return ops->visible;
+
+ return ops->is_visible(drvdata, type, attr, channel);
+}
+
/* Thermal zone handling */
-/*
- * The complex conditional is necessary to avoid a cyclic dependency
- * between hwmon and thermal_sys modules.
- */
-#ifdef CONFIG_THERMAL_OF
static int hwmon_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
struct hwmon_thermal_data *tdata = thermal_zone_device_priv(tz);
@@ -257,6 +263,9 @@ static int hwmon_thermal_register_sensors(struct device *dev)
void *drvdata = dev_get_drvdata(dev);
int i;
+ if (!IS_ENABLED(CONFIG_THERMAL_OF))
+ return 0;
+
for (i = 1; info[i]; i++) {
int j;
@@ -267,8 +276,8 @@ static int hwmon_thermal_register_sensors(struct device *dev)
int err;
if (!(info[i]->config[j] & HWMON_T_INPUT) ||
- !chip->ops->is_visible(drvdata, hwmon_temp,
- hwmon_temp_input, j))
+ !hwmon_is_visible(chip->ops, drvdata, hwmon_temp,
+ hwmon_temp_input, j))
continue;
err = hwmon_thermal_add_sensor(dev, j);
@@ -285,6 +294,9 @@ static void hwmon_thermal_notify(struct device *dev, int index)
struct hwmon_device *hwdev = to_hwmon_device(dev);
struct hwmon_thermal_data *tzdata;
+ if (!IS_ENABLED(CONFIG_THERMAL_OF))
+ return;
+
list_for_each_entry(tzdata, &hwdev->tzdata, node) {
if (tzdata->index == index) {
thermal_zone_device_update(tzdata->tzd,
@@ -293,16 +305,6 @@ static void hwmon_thermal_notify(struct device *dev, int index)
}
}
-#else
-static int hwmon_thermal_register_sensors(struct device *dev)
-{
- return 0;
-}
-
-static void hwmon_thermal_notify(struct device *dev, int index) { }
-
-#endif /* IS_REACHABLE(CONFIG_THERMAL) && ... */
-
static int hwmon_attr_base(enum hwmon_sensor_types type)
{
if (type == hwmon_in || type == hwmon_intrusion)
@@ -330,7 +332,7 @@ static int hwmon_attr_base(enum hwmon_sensor_types type)
static DEFINE_MUTEX(hwmon_pec_mutex);
-static int hwmon_match_device(struct device *dev, void *data)
+static int hwmon_match_device(struct device *dev, const void *data)
{
return dev->class == &hwmon_class;
}
@@ -506,7 +508,7 @@ static struct attribute *hwmon_genattr(const void *drvdata,
const char *name;
bool is_string = is_string_attr(type, attr);
- mode = ops->is_visible(drvdata, type, attr, index);
+ mode = hwmon_is_visible(ops, drvdata, type, attr, index);
if (!mode)
return ERR_PTR(-ENOENT);
@@ -1033,7 +1035,7 @@ hwmon_device_register_with_info(struct device *dev, const char *name,
if (!dev || !name || !chip)
return ERR_PTR(-EINVAL);
- if (!chip->ops || !chip->ops->is_visible || !chip->info)
+ if (!chip->ops || !(chip->ops->visible || chip->ops->is_visible) || !chip->info)
return ERR_PTR(-EINVAL);
return __hwmon_device_register(dev, name, drvdata, chip, extra_groups);
@@ -1063,7 +1065,7 @@ hwmon_device_register_for_thermal(struct device *dev, const char *name,
return __hwmon_device_register(dev, name, drvdata, NULL, NULL);
}
-EXPORT_SYMBOL_NS_GPL(hwmon_device_register_for_thermal, HWMON_THERMAL);
+EXPORT_SYMBOL_NS_GPL(hwmon_device_register_for_thermal, "HWMON_THERMAL");
/**
* hwmon_device_register - register w/ hwmon
@@ -1168,6 +1170,12 @@ devm_hwmon_device_register_with_info(struct device *dev, const char *name,
if (!dev)
return ERR_PTR(-EINVAL);
+ if (!name) {
+ name = devm_hwmon_sanitize_name(dev, dev_name(dev));
+ if (IS_ERR(name))
+ return ERR_CAST(name);
+ }
+
ptr = devres_alloc(devm_hwmon_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c
index 7b00b38c7f7b..2a530da21949 100644
--- a/drivers/hwmon/i5500_temp.c
+++ b/drivers/hwmon/i5500_temp.c
@@ -29,12 +29,6 @@
#define REG_CTCTRL 0xF7
#define REG_TSTIMER 0xF8
-static umode_t i5500_is_visible(const void *drvdata, enum hwmon_sensor_types type, u32 attr,
- int channel)
-{
- return 0444;
-}
-
static int i5500_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
long *val)
{
@@ -84,7 +78,7 @@ static int i5500_read(struct device *dev, enum hwmon_sensor_types type, u32 attr
}
static const struct hwmon_ops i5500_ops = {
- .is_visible = i5500_is_visible,
+ .visible = 0444,
.read = i5500_read,
};
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index 02f5d35dd319..b22e0423e324 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -568,7 +568,7 @@ static struct platform_driver i5k_amb_driver = {
.name = DRVNAME,
},
.probe = i5k_amb_probe,
- .remove_new = i5k_amb_remove,
+ .remove = i5k_amb_remove,
};
static int __init i5k_amb_init(void)
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index f0fa6d073627..345fe7db9de9 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -51,17 +51,26 @@
#define INA226_ALERT_LIMIT 0x07
#define INA226_DIE_ID 0xFF
-#define INA2XX_MAX_REGISTERS 8
+/* SY24655 register definitions */
+#define SY24655_EIN 0x0A
+#define SY24655_ACCUM_CONFIG 0x0D
+#define INA2XX_MAX_REGISTERS 0x0D
/* settings - depend on use case */
#define INA219_CONFIG_DEFAULT 0x399F /* PGA=8 */
#define INA226_CONFIG_DEFAULT 0x4527 /* averages=16 */
+#define INA260_CONFIG_DEFAULT 0x6527 /* averages=16 */
+#define SY24655_CONFIG_DEFAULT 0x4527 /* averages=16 */
+
+/* (only for sy24655) */
+#define SY24655_ACCUM_CONFIG_DEFAULT 0x044C /* continuous mode, clear after read*/
/* worst case is 68.10 ms (~14.6Hz, ina219) */
#define INA2XX_CONVERSION_RATE 15
#define INA2XX_MAX_DELAY 69 /* worst case delay in ms */
#define INA2XX_RSHUNT_DEFAULT 10000
+#define INA260_RSHUNT 2000
/* bit mask for reading the averaging setting in the configuration register */
#define INA226_AVG_RD_MASK GENMASK(11, 9)
@@ -95,6 +104,7 @@ static bool ina2xx_writeable_reg(struct device *dev, unsigned int reg)
case INA2XX_CALIBRATION:
case INA226_MASK_ENABLE:
case INA226_ALERT_LIMIT:
+ case SY24655_ACCUM_CONFIG:
return true;
default:
return false;
@@ -125,10 +135,13 @@ static const struct regmap_config ina2xx_regmap_config = {
.writeable_reg = ina2xx_writeable_reg,
};
-enum ina2xx_ids { ina219, ina226 };
+enum ina2xx_ids { ina219, ina226, ina260, sy24655 };
struct ina2xx_config {
u16 config_default;
+ bool has_alerts; /* chip supports alerts and limits */
+ bool has_ishunt; /* chip has internal shunt resistor */
+ bool has_power_average; /* chip has internal shunt resistor */
int calibration_value;
int shunt_div;
int bus_voltage_shift;
@@ -145,6 +158,7 @@ struct ina2xx_data {
long power_lsb_uW;
struct mutex config_lock;
struct regmap *regmap;
+ struct i2c_client *client;
};
static const struct ina2xx_config ina2xx_config[] = {
@@ -155,6 +169,9 @@ static const struct ina2xx_config ina2xx_config[] = {
.bus_voltage_shift = 3,
.bus_voltage_lsb = 4000,
.power_lsb_factor = 20,
+ .has_alerts = false,
+ .has_ishunt = false,
+ .has_power_average = false,
},
[ina226] = {
.config_default = INA226_CONFIG_DEFAULT,
@@ -163,6 +180,30 @@ static const struct ina2xx_config ina2xx_config[] = {
.bus_voltage_shift = 0,
.bus_voltage_lsb = 1250,
.power_lsb_factor = 25,
+ .has_alerts = true,
+ .has_ishunt = false,
+ .has_power_average = false,
+ },
+ [ina260] = {
+ .config_default = INA260_CONFIG_DEFAULT,
+ .shunt_div = 400,
+ .bus_voltage_shift = 0,
+ .bus_voltage_lsb = 1250,
+ .power_lsb_factor = 8,
+ .has_alerts = true,
+ .has_ishunt = true,
+ .has_power_average = false,
+ },
+ [sy24655] = {
+ .config_default = SY24655_CONFIG_DEFAULT,
+ .calibration_value = 4096,
+ .shunt_div = 400,
+ .bus_voltage_shift = 0,
+ .bus_voltage_lsb = 1250,
+ .power_lsb_factor = 25,
+ .has_alerts = true,
+ .has_ishunt = false,
+ .has_power_average = true,
},
};
@@ -254,6 +295,15 @@ static int ina2xx_read_init(struct device *dev, int reg, long *val)
unsigned int regval;
int ret, retry;
+ if (data->config->has_ishunt) {
+ /* No calibration needed */
+ ret = regmap_read(regmap, reg, &regval);
+ if (ret < 0)
+ return ret;
+ *val = ina2xx_get_value(data, reg, regval);
+ return 0;
+ }
+
for (retry = 5; retry; retry--) {
ret = regmap_read(regmap, reg, &regval);
if (ret < 0)
@@ -459,6 +509,41 @@ static int ina2xx_in_read(struct device *dev, u32 attr, int channel, long *val)
return 0;
}
+/*
+ * Configuring the READ_EIN (bit 10) of the ACCUM_CONFIG register to 1
+ * can clear accumulator and sample_count after reading the EIN register.
+ * This way, the average power between the last read and the current
+ * read can be obtained. By combining with accurate time data from
+ * outside, the energy consumption during that period can be calculated.
+ */
+static int sy24655_average_power_read(struct ina2xx_data *data, u8 reg, long *val)
+{
+ u8 template[6];
+ int ret;
+ long accumulator_24, sample_count;
+
+ /* 48-bit register read */
+ ret = i2c_smbus_read_i2c_block_data(data->client, reg, 6, template);
+ if (ret < 0)
+ return ret;
+ if (ret != 6)
+ return -EIO;
+ accumulator_24 = ((template[3] << 16) |
+ (template[4] << 8) |
+ template[5]);
+ sample_count = ((template[0] << 16) |
+ (template[1] << 8) |
+ template[2]);
+ if (sample_count <= 0) {
+ *val = 0;
+ return 0;
+ }
+
+ *val = DIV_ROUND_CLOSEST(accumulator_24, sample_count) * data->power_lsb_uW;
+
+ return 0;
+}
+
static int ina2xx_power_read(struct device *dev, u32 attr, long *val)
{
struct ina2xx_data *data = dev_get_drvdata(dev);
@@ -466,6 +551,8 @@ static int ina2xx_power_read(struct device *dev, u32 attr, long *val)
switch (attr) {
case hwmon_power_input:
return ina2xx_read_init(dev, INA2XX_POWER, val);
+ case hwmon_power_average:
+ return sy24655_average_power_read(data, SY24655_EIN, val);
case hwmon_power_crit:
return ina226_alert_limit_read(data, INA226_POWER_OVER_LIMIT_MASK,
INA2XX_POWER, val);
@@ -624,6 +711,8 @@ static umode_t ina2xx_is_visible(const void *_data, enum hwmon_sensor_types type
u32 attr, int channel)
{
const struct ina2xx_data *data = _data;
+ bool has_alerts = data->config->has_alerts;
+ bool has_power_average = data->config->has_power_average;
enum ina2xx_ids chip = data->chip;
switch (type) {
@@ -633,12 +722,12 @@ static umode_t ina2xx_is_visible(const void *_data, enum hwmon_sensor_types type
return 0444;
case hwmon_in_lcrit:
case hwmon_in_crit:
- if (chip == ina226)
+ if (has_alerts)
return 0644;
break;
case hwmon_in_lcrit_alarm:
case hwmon_in_crit_alarm:
- if (chip == ina226)
+ if (has_alerts)
return 0444;
break;
default:
@@ -651,12 +740,12 @@ static umode_t ina2xx_is_visible(const void *_data, enum hwmon_sensor_types type
return 0444;
case hwmon_curr_lcrit:
case hwmon_curr_crit:
- if (chip == ina226)
+ if (has_alerts)
return 0644;
break;
case hwmon_curr_lcrit_alarm:
case hwmon_curr_crit_alarm:
- if (chip == ina226)
+ if (has_alerts)
return 0444;
break;
default:
@@ -668,11 +757,15 @@ static umode_t ina2xx_is_visible(const void *_data, enum hwmon_sensor_types type
case hwmon_power_input:
return 0444;
case hwmon_power_crit:
- if (chip == ina226)
+ if (has_alerts)
return 0644;
break;
case hwmon_power_crit_alarm:
- if (chip == ina226)
+ if (has_alerts)
+ return 0444;
+ break;
+ case hwmon_power_average:
+ if (has_power_average)
return 0444;
break;
default:
@@ -682,7 +775,7 @@ static umode_t ina2xx_is_visible(const void *_data, enum hwmon_sensor_types type
case hwmon_chip:
switch (attr) {
case hwmon_chip_update_interval:
- if (chip == ina226)
+ if (chip == ina226 || chip == ina260)
return 0644;
break;
default:
@@ -707,7 +800,8 @@ static const struct hwmon_channel_info * const ina2xx_info[] = {
HWMON_CHANNEL_INFO(curr, HWMON_C_INPUT | HWMON_C_CRIT | HWMON_C_CRIT_ALARM |
HWMON_C_LCRIT | HWMON_C_LCRIT_ALARM),
HWMON_CHANNEL_INFO(power,
- HWMON_P_INPUT | HWMON_P_CRIT | HWMON_P_CRIT_ALARM),
+ HWMON_P_INPUT | HWMON_P_CRIT | HWMON_P_CRIT_ALARM |
+ HWMON_P_AVERAGE),
NULL
};
@@ -791,7 +885,9 @@ static int ina2xx_init(struct device *dev, struct ina2xx_data *data)
u32 shunt;
int ret;
- if (device_property_read_u32(dev, "shunt-resistor", &shunt) < 0)
+ if (data->config->has_ishunt)
+ shunt = INA260_RSHUNT;
+ else if (device_property_read_u32(dev, "shunt-resistor", &shunt) < 0)
shunt = INA2XX_RSHUNT_DEFAULT;
ret = ina2xx_set_shunt(data, shunt);
@@ -802,7 +898,7 @@ static int ina2xx_init(struct device *dev, struct ina2xx_data *data)
if (ret < 0)
return ret;
- if (data->chip == ina226) {
+ if (data->config->has_alerts) {
bool active_high = device_property_read_bool(dev, "ti,alert-polarity-active-high");
regmap_update_bits(regmap, INA226_MASK_ENABLE,
@@ -810,6 +906,22 @@ static int ina2xx_init(struct device *dev, struct ina2xx_data *data)
INA226_ALERT_LATCH_ENABLE |
FIELD_PREP(INA226_ALERT_POLARITY, active_high));
}
+ if (data->config->has_power_average) {
+ if (data->chip == sy24655) {
+ /*
+ * Initialize the power accumulation method to continuous
+ * mode and clear the EIN register after each read of the
+ * EIN register
+ */
+ ret = regmap_write(regmap, SY24655_ACCUM_CONFIG,
+ SY24655_ACCUM_CONFIG_DEFAULT);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (data->config->has_ishunt)
+ return 0;
/*
* Calibration register is set to the best value, which eliminates
@@ -836,6 +948,7 @@ static int ina2xx_probe(struct i2c_client *client)
return -ENOMEM;
/* set the device type */
+ data->client = client;
data->config = &ina2xx_config[chip];
data->chip = chip;
mutex_init(&data->config_lock);
@@ -856,7 +969,8 @@ static int ina2xx_probe(struct i2c_client *client)
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
data, &ina2xx_chip_info,
- ina2xx_groups);
+ data->config->has_ishunt ?
+ NULL : ina2xx_groups);
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
@@ -872,12 +986,18 @@ static const struct i2c_device_id ina2xx_id[] = {
{ "ina226", ina226 },
{ "ina230", ina226 },
{ "ina231", ina226 },
+ { "ina260", ina260 },
+ { "sy24655", sy24655 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ina2xx_id);
static const struct of_device_id __maybe_unused ina2xx_of_match[] = {
{
+ .compatible = "silergy,sy24655",
+ .data = (void *)sy24655
+ },
+ {
.compatible = "ti,ina219",
.data = (void *)ina219
},
@@ -897,7 +1017,11 @@ static const struct of_device_id __maybe_unused ina2xx_of_match[] = {
.compatible = "ti,ina231",
.data = (void *)ina226
},
- { },
+ {
+ .compatible = "ti,ina260",
+ .data = (void *)ina260
+ },
+ { }
};
MODULE_DEVICE_TABLE(of, ina2xx_of_match);
diff --git a/drivers/hwmon/intel-m10-bmc-hwmon.c b/drivers/hwmon/intel-m10-bmc-hwmon.c
index ca2dff158925..aa01a4bedc21 100644
--- a/drivers/hwmon/intel-m10-bmc-hwmon.c
+++ b/drivers/hwmon/intel-m10-bmc-hwmon.c
@@ -358,7 +358,7 @@ static const struct m10bmc_sdata n6000bmc_temp_tbl[] = {
{ 0x4f0, 0x4f4, 0x4f8, 0x52c, 0x0, 500, "Board Top Near FPGA Temperature" },
{ 0x4fc, 0x500, 0x504, 0x52c, 0x0, 500, "Board Bottom Near CVL Temperature" },
{ 0x508, 0x50c, 0x510, 0x52c, 0x0, 500, "Board Top East Near VRs Temperature" },
- { 0x514, 0x518, 0x51c, 0x52c, 0x0, 500, "Columbiaville Die Temperature" },
+ { 0x514, 0x518, 0x51c, 0x52c, 0x0, 500, "CVL Die Temperature" },
{ 0x520, 0x524, 0x528, 0x52c, 0x0, 500, "Board Rear Side Temperature" },
{ 0x530, 0x534, 0x538, 0x52c, 0x0, 500, "Board Front Side Temperature" },
{ 0x53c, 0x540, 0x544, 0x0, 0x0, 500, "QSFP1 Case Temperature" },
@@ -565,13 +565,6 @@ static const struct m10bmc_hwmon_board_data n6000bmc_hwmon_bdata = {
.hinfo = n6000bmc_hinfo,
};
-static umode_t
-m10bmc_hwmon_is_visible(const void *data, enum hwmon_sensor_types type,
- u32 attr, int channel)
-{
- return 0444;
-}
-
static const struct m10bmc_sdata *
find_sensor_data(struct m10bmc_hwmon *hw, enum hwmon_sensor_types type,
int channel)
@@ -729,7 +722,7 @@ static int m10bmc_hwmon_read_string(struct device *dev,
}
static const struct hwmon_ops m10bmc_hwmon_ops = {
- .is_visible = m10bmc_hwmon_is_visible,
+ .visible = 0444,
.read = m10bmc_hwmon_read,
.read_string = m10bmc_hwmon_read_string,
};
@@ -794,4 +787,4 @@ MODULE_DEVICE_TABLE(platform, intel_m10bmc_hwmon_ids);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel MAX 10 BMC hardware monitor");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(INTEL_M10_BMC_CORE);
+MODULE_IMPORT_NS("INTEL_M10_BMC_CORE");
diff --git a/drivers/hwmon/isl28022.c b/drivers/hwmon/isl28022.c
new file mode 100644
index 000000000000..3f9b4520b53e
--- /dev/null
+++ b/drivers/hwmon/isl28022.c
@@ -0,0 +1,532 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * isl28022.c - driver for Renesas ISL28022 power monitor chip monitoring
+ *
+ * Copyright (c) 2023 Carsten Spieß <mail@carsten-spiess.de>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+/* ISL28022 registers */
+#define ISL28022_REG_CONFIG 0x00
+#define ISL28022_REG_SHUNT 0x01
+#define ISL28022_REG_BUS 0x02
+#define ISL28022_REG_POWER 0x03
+#define ISL28022_REG_CURRENT 0x04
+#define ISL28022_REG_CALIB 0x05
+#define ISL28022_REG_SHUNT_THR 0x06
+#define ISL28022_REG_BUS_THR 0x07
+#define ISL28022_REG_INT 0x08
+#define ISL28022_REG_AUX 0x09
+#define ISL28022_REG_MAX ISL28022_REG_AUX
+
+/* ISL28022 config flags */
+/* mode flags */
+#define ISL28022_MODE_SHIFT 0
+#define ISL28022_MODE_MASK 0x0007
+
+#define ISL28022_MODE_PWR_DOWN 0x0
+#define ISL28022_MODE_TRG_S 0x1
+#define ISL28022_MODE_TRG_B 0x2
+#define ISL28022_MODE_TRG_SB 0x3
+#define ISL28022_MODE_ADC_OFF 0x4
+#define ISL28022_MODE_CONT_S 0x5
+#define ISL28022_MODE_CONT_B 0x6
+#define ISL28022_MODE_CONT_SB 0x7
+
+/* shunt ADC settings */
+#define ISL28022_SADC_SHIFT 3
+#define ISL28022_SADC_MASK 0x0078
+
+#define ISL28022_BADC_SHIFT 7
+#define ISL28022_BADC_MASK 0x0780
+
+#define ISL28022_ADC_12 0x0 /* 12 bit ADC */
+#define ISL28022_ADC_13 0x1 /* 13 bit ADC */
+#define ISL28022_ADC_14 0x2 /* 14 bit ADC */
+#define ISL28022_ADC_15 0x3 /* 15 bit ADC */
+#define ISL28022_ADC_15_1 0x8 /* 15 bit ADC, 1 sample */
+#define ISL28022_ADC_15_2 0x9 /* 15 bit ADC, 2 samples */
+#define ISL28022_ADC_15_4 0xA /* 15 bit ADC, 4 samples */
+#define ISL28022_ADC_15_8 0xB /* 15 bit ADC, 8 samples */
+#define ISL28022_ADC_15_16 0xC /* 15 bit ADC, 16 samples */
+#define ISL28022_ADC_15_32 0xD /* 15 bit ADC, 32 samples */
+#define ISL28022_ADC_15_64 0xE /* 15 bit ADC, 64 samples */
+#define ISL28022_ADC_15_128 0xF /* 15 bit ADC, 128 samples */
+
+/* shunt voltage range */
+#define ISL28022_PG_SHIFT 11
+#define ISL28022_PG_MASK 0x1800
+
+#define ISL28022_PG_40 0x0 /* +/-40 mV */
+#define ISL28022_PG_80 0x1 /* +/-80 mV */
+#define ISL28022_PG_160 0x2 /* +/-160 mV */
+#define ISL28022_PG_320 0x3 /* +/-3200 mV */
+
+/* bus voltage range */
+#define ISL28022_BRNG_SHIFT 13
+#define ISL28022_BRNG_MASK 0x6000
+
+#define ISL28022_BRNG_16 0x0 /* 16 V */
+#define ISL28022_BRNG_32 0x1 /* 32 V */
+#define ISL28022_BRNG_60 0x3 /* 60 V */
+
+/* reset */
+#define ISL28022_RESET 0x8000
+
+struct isl28022_data {
+ struct regmap *regmap;
+ u32 shunt;
+ u32 gain;
+ u32 average;
+};
+
+static int isl28022_read_in(struct device *dev, u32 attr, int channel, long *val)
+{
+ struct isl28022_data *data = dev_get_drvdata(dev);
+ unsigned int regval;
+ int err;
+ u16 sign_bit;
+
+ switch (channel) {
+ case 0:
+ switch (attr) {
+ case hwmon_in_input:
+ err = regmap_read(data->regmap,
+ ISL28022_REG_BUS, &regval);
+ if (err < 0)
+ return err;
+ /* driver supports only 60V mode (BRNG 11) */
+ *val = (long)(((u16)regval) & 0xFFFC);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case 1:
+ switch (attr) {
+ case hwmon_in_input:
+ err = regmap_read(data->regmap,
+ ISL28022_REG_SHUNT, &regval);
+ if (err < 0)
+ return err;
+ switch (data->gain) {
+ case 8:
+ sign_bit = (regval >> 15) & 0x01;
+ *val = (long)((((u16)regval) & 0x7FFF) -
+ (sign_bit * 32768)) / 100;
+ break;
+ case 4:
+ sign_bit = (regval >> 14) & 0x01;
+ *val = (long)((((u16)regval) & 0x3FFF) -
+ (sign_bit * 16384)) / 100;
+ break;
+ case 2:
+ sign_bit = (regval >> 13) & 0x01;
+ *val = (long)((((u16)regval) & 0x1FFF) -
+ (sign_bit * 8192)) / 100;
+ break;
+ case 1:
+ sign_bit = (regval >> 12) & 0x01;
+ *val = (long)((((u16)regval) & 0x0FFF) -
+ (sign_bit * 4096)) / 100;
+ break;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int isl28022_read_current(struct device *dev, u32 attr, long *val)
+{
+ struct isl28022_data *data = dev_get_drvdata(dev);
+ unsigned int regval;
+ int err;
+
+ switch (attr) {
+ case hwmon_curr_input:
+ err = regmap_read(data->regmap,
+ ISL28022_REG_CURRENT, &regval);
+ if (err < 0)
+ return err;
+ *val = ((long)regval * 1250L * (long)data->gain) /
+ (long)data->shunt;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int isl28022_read_power(struct device *dev, u32 attr, long *val)
+{
+ struct isl28022_data *data = dev_get_drvdata(dev);
+ unsigned int regval;
+ int err;
+
+ switch (attr) {
+ case hwmon_power_input:
+ err = regmap_read(data->regmap,
+ ISL28022_REG_POWER, &regval);
+ if (err < 0)
+ return err;
+ *val = ((51200000L * ((long)data->gain)) /
+ (long)data->shunt) * (long)regval;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int isl28022_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_in:
+ return isl28022_read_in(dev, attr, channel, val);
+ case hwmon_curr:
+ return isl28022_read_current(dev, attr, val);
+ case hwmon_power:
+ return isl28022_read_power(dev, attr, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static umode_t isl28022_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *isl28022_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT, /* channel 0: bus voltage (mV) */
+ HWMON_I_INPUT), /* channel 1: shunt voltage (mV) */
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT), /* channel 1: current (mA) */
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT), /* channel 1: power (µW) */
+ NULL
+};
+
+static const struct hwmon_ops isl28022_hwmon_ops = {
+ .is_visible = isl28022_is_visible,
+ .read = isl28022_read,
+};
+
+static const struct hwmon_chip_info isl28022_chip_info = {
+ .ops = &isl28022_hwmon_ops,
+ .info = isl28022_info,
+};
+
+static bool isl28022_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ISL28022_REG_CONFIG:
+ case ISL28022_REG_CALIB:
+ case ISL28022_REG_SHUNT_THR:
+ case ISL28022_REG_BUS_THR:
+ case ISL28022_REG_INT:
+ case ISL28022_REG_AUX:
+ return true;
+ }
+
+ return false;
+}
+
+static bool isl28022_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ISL28022_REG_CONFIG:
+ case ISL28022_REG_SHUNT:
+ case ISL28022_REG_BUS:
+ case ISL28022_REG_POWER:
+ case ISL28022_REG_CURRENT:
+ case ISL28022_REG_INT:
+ case ISL28022_REG_AUX:
+ return true;
+ }
+ return true;
+}
+
+static const struct regmap_config isl28022_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = ISL28022_REG_MAX,
+ .writeable_reg = isl28022_is_writeable_reg,
+ .volatile_reg = isl28022_is_volatile_reg,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .cache_type = REGCACHE_RBTREE,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+static int shunt_voltage_show(struct seq_file *seqf, void *unused)
+{
+ struct isl28022_data *data = seqf->private;
+ unsigned int regval;
+ int err;
+
+ err = regmap_read(data->regmap,
+ ISL28022_REG_SHUNT, &regval);
+ if (err)
+ return err;
+
+ /* print shunt voltage in micro volt */
+ seq_printf(seqf, "%d\n", regval * 10);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(shunt_voltage);
+
+static struct dentry *isl28022_debugfs_root;
+
+static void isl28022_debugfs_remove(void *res)
+{
+ debugfs_remove_recursive(res);
+}
+
+static void isl28022_debugfs_init(struct i2c_client *client, struct isl28022_data *data)
+{
+ char name[16];
+ struct dentry *debugfs;
+
+ scnprintf(name, sizeof(name), "%d-%04hx", client->adapter->nr, client->addr);
+
+ debugfs = debugfs_create_dir(name, isl28022_debugfs_root);
+ debugfs_create_file("shunt_voltage", 0444, debugfs, data, &shunt_voltage_fops);
+
+ devm_add_action_or_reset(&client->dev, isl28022_debugfs_remove, debugfs);
+}
+
+/*
+ * read property values and make consistency checks.
+ *
+ * following values for shunt range and resistor are allowed:
+ * 40 mV -> gain 1, shunt min. 800 micro ohms
+ * 80 mV -> gain 2, shunt min. 1600 micro ohms
+ * 160 mV -> gain 4, shunt min. 3200 micro ohms
+ * 320 mV -> gain 8, shunt min. 6400 micro ohms
+ */
+static int isl28022_read_properties(struct device *dev, struct isl28022_data *data)
+{
+ u32 val;
+ int err;
+
+ err = device_property_read_u32(dev, "shunt-resistor-micro-ohms", &val);
+ if (err == -EINVAL)
+ val = 10000;
+ else if (err < 0)
+ return err;
+ data->shunt = val;
+
+ err = device_property_read_u32(dev, "renesas,shunt-range-microvolt", &val);
+ if (err == -EINVAL)
+ val = 320000;
+ else if (err < 0)
+ return err;
+
+ switch (val) {
+ case 40000:
+ data->gain = 1;
+ if (data->shunt < 800)
+ goto shunt_invalid;
+ break;
+ case 80000:
+ data->gain = 2;
+ if (data->shunt < 1600)
+ goto shunt_invalid;
+ break;
+ case 160000:
+ data->gain = 4;
+ if (data->shunt < 3200)
+ goto shunt_invalid;
+ break;
+ case 320000:
+ data->gain = 8;
+ if (data->shunt < 6400)
+ goto shunt_invalid;
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL,
+ "renesas,shunt-range-microvolt invalid value %d\n",
+ val);
+ }
+
+ err = device_property_read_u32(dev, "renesas,average-samples", &val);
+ if (err == -EINVAL)
+ val = 1;
+ else if (err < 0)
+ return err;
+ if (val > 128 || hweight32(val) != 1)
+ return dev_err_probe(dev, -EINVAL,
+ "renesas,average-samples invalid value %d\n",
+ val);
+
+ data->average = val;
+
+ return 0;
+
+shunt_invalid:
+ return dev_err_probe(dev, -EINVAL,
+ "renesas,shunt-resistor-microvolt invalid value %d\n",
+ data->shunt);
+}
+
+/*
+ * write configuration and calibration registers
+ *
+ * The driver supports only shunt and bus continuous ADC mode at 15bit resolution
+ * with averaging from 1 to 128 samples (pow of 2) on both channels.
+ * Shunt voltage gain 1,2,4 or 8 is allowed.
+ * The bus voltage range is 60V fixed.
+ */
+static int isl28022_config(struct isl28022_data *data)
+{
+ int err;
+ u16 config;
+ u16 calib;
+
+ config = (ISL28022_MODE_CONT_SB << ISL28022_MODE_SHIFT) |
+ (ISL28022_BRNG_60 << ISL28022_BRNG_SHIFT) |
+ (__ffs(data->gain) << ISL28022_PG_SHIFT) |
+ ((ISL28022_ADC_15_1 + __ffs(data->average)) << ISL28022_SADC_SHIFT) |
+ ((ISL28022_ADC_15_1 + __ffs(data->average)) << ISL28022_BADC_SHIFT);
+
+ calib = data->shunt ? 0x8000 / data->gain : 0;
+
+ err = regmap_write(data->regmap, ISL28022_REG_CONFIG, config);
+ if (err < 0)
+ return err;
+
+ return regmap_write(data->regmap, ISL28022_REG_CALIB, calib);
+}
+
+static int isl28022_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct isl28022_data *data;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(struct isl28022_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ err = isl28022_read_properties(dev, data);
+ if (err)
+ return err;
+
+ data->regmap = devm_regmap_init_i2c(client, &isl28022_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ err = isl28022_config(data);
+ if (err)
+ return err;
+
+ isl28022_debugfs_init(client, data);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data, &isl28022_chip_info, NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id isl28022_ids[] = {
+ { "isl28022" },
+ { /* LIST END */ }
+};
+MODULE_DEVICE_TABLE(i2c, isl28022_ids);
+
+static const struct of_device_id __maybe_unused isl28022_of_match[] = {
+ { .compatible = "renesas,isl28022"},
+ { /* LIST END */ }
+};
+MODULE_DEVICE_TABLE(of, isl28022_of_match);
+
+static struct i2c_driver isl28022_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "isl28022",
+ },
+ .probe = isl28022_probe,
+ .id_table = isl28022_ids,
+};
+
+static int __init isl28022_init(void)
+{
+ int err;
+
+ isl28022_debugfs_root = debugfs_create_dir("isl28022", NULL);
+ err = i2c_add_driver(&isl28022_driver);
+ if (!err)
+ return 0;
+
+ debugfs_remove_recursive(isl28022_debugfs_root);
+ return err;
+}
+module_init(isl28022_init);
+
+static void __exit isl28022_exit(void)
+{
+ i2c_del_driver(&isl28022_driver);
+ debugfs_remove_recursive(isl28022_debugfs_root);
+}
+module_exit(isl28022_exit);
+
+MODULE_AUTHOR("Carsten Spieß <mail@carsten-spiess.de>");
+MODULE_DESCRIPTION("ISL28022 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index a260cff750a5..06f0ab2f52fa 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -11,6 +11,7 @@
#include <linux/bitops.h>
#include <linux/bitfield.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -19,7 +20,6 @@
#include <linux/hwmon.h>
#include <linux/err.h>
#include <linux/mutex.h>
-#include <linux/of.h>
#include <linux/regmap.h>
/* Addresses to scan */
@@ -417,7 +417,7 @@ static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info)
return -ENODEV;
if ((devid & TSE2004_DEVID_MASK) == TSE2004_DEVID &&
- (cap & 0x00e7) != 0x00e7)
+ (cap & 0x0062) != 0x0062)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(jc42_chips); i++) {
@@ -595,20 +595,18 @@ static const struct i2c_device_id jc42_id[] = {
};
MODULE_DEVICE_TABLE(i2c, jc42_id);
-#ifdef CONFIG_OF
static const struct of_device_id jc42_of_ids[] = {
{ .compatible = "jedec,jc-42.4-temp", },
{ }
};
MODULE_DEVICE_TABLE(of, jc42_of_ids);
-#endif
static struct i2c_driver jc42_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "jc42",
.pm = JC42_DEV_PM_OPS,
- .of_match_table = of_match_ptr(jc42_of_ids),
+ .of_match_table = jc42_of_ids,
},
.probe = jc42_probe,
.remove = jc42_remove,
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 7dc19c5d62ac..d0b4cc9a5011 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -20,7 +20,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
-#include <asm/amd_nb.h>
+#include <asm/amd_node.h>
#include <asm/processor.h>
MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
@@ -150,6 +150,11 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval);
}
+static u16 amd_pci_dev_to_node_id(struct pci_dev *pdev)
+{
+ return PCI_SLOT(pdev->devfn) - AMD_NODE0_PCI_SLOT;
+}
+
static void read_tempreg_nb_zen(struct pci_dev *pdev, u32 *regval)
{
if (amd_smn_read(amd_pci_dev_to_node_id(pdev),
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 2c2205aec7d4..d95a3c6c245c 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
+#include <linux/i3c/device.h>
#include <linux/hwmon.h>
#include <linux/err.h>
#include <linux/of.h>
@@ -38,6 +39,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
max6626,
max31725,
mcp980x,
+ p3t1755,
pct2075,
stds75,
stlm75,
@@ -104,17 +106,15 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
#define LM75_REG_MAX 0x03
#define PCT2075_REG_IDLE 0x04
-/* Each client has this additional data */
struct lm75_data {
- struct i2c_client *client;
struct regmap *regmap;
- struct regulator *vs;
u16 orig_conf;
- u16 current_conf;
u8 resolution; /* In bits, 9 to 16 */
unsigned int sample_time; /* In ms */
enum lm75_type kind;
const struct lm75_params *params;
+ u8 reg_buf[1];
+ u8 val_buf[3];
};
/*-----------------------------------------------------------------------*/
@@ -222,6 +222,13 @@ static const struct lm75_params device_params[] = {
.default_resolution = 9,
.default_sample_time = MSEC_PER_SEC / 18,
},
+ [p3t1755] = {
+ .clr_mask = 1 << 1 | 1 << 7, /* disable SMBAlert and one-shot */
+ .default_resolution = 12,
+ .default_sample_time = 55,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 28, 55, 110, 220 },
+ },
[pct2075] = {
.default_resolution = 11,
.default_sample_time = MSEC_PER_SEC / 10,
@@ -276,6 +283,7 @@ static const struct lm75_params device_params[] = {
.default_sample_time = 125,
.num_sample_times = 4,
.sample_times = (unsigned int []){ 125, 250, 1000, 4000 },
+ .alarm = true,
},
[tmp175] = {
.set_mask = 3 << 5, /* 12-bit mode */
@@ -332,41 +340,11 @@ static inline long lm75_reg_to_mc(s16 temp, u8 resolution)
return ((temp >> (16 - resolution)) * 1000) >> (resolution - 8);
}
-static int lm75_write_config(struct lm75_data *data, u16 set_mask,
- u16 clr_mask)
-{
- unsigned int value;
-
- clr_mask |= LM75_SHUTDOWN << (8 * data->params->config_reg_16bits);
- value = data->current_conf & ~clr_mask;
- value |= set_mask;
-
- if (data->current_conf != value) {
- s32 err;
- if (data->params->config_reg_16bits)
- err = regmap_write(data->regmap, LM75_REG_CONF, value);
- else
- err = i2c_smbus_write_byte_data(data->client,
- LM75_REG_CONF,
- value);
- if (err)
- return err;
- data->current_conf = value;
- }
- return 0;
-}
-
-static int lm75_read_config(struct lm75_data *data)
+static inline int lm75_write_config(struct lm75_data *data, u16 set_mask,
+ u16 clr_mask)
{
- int ret;
- unsigned int status;
-
- if (data->params->config_reg_16bits) {
- ret = regmap_read(data->regmap, LM75_REG_CONF, &status);
- return ret ? ret : status;
- }
-
- return i2c_smbus_read_byte_data(data->client, LM75_REG_CONF);
+ return regmap_update_bits(data->regmap, LM75_REG_CONF,
+ clr_mask | LM75_SHUTDOWN, set_mask);
}
static irqreturn_t lm75_alarm_handler(int irq, void *private)
@@ -418,7 +396,8 @@ static int lm75_read(struct device *dev, enum hwmon_sensor_types type,
if (attr == hwmon_temp_alarm) {
switch (data->kind) {
case as6200:
- *val = (regval >> 5) & 0x1;
+ case tmp112:
+ *val = (regval >> 13) & 0x1;
break;
default:
return -EINVAL;
@@ -469,7 +448,6 @@ static int lm75_write_temp(struct device *dev, u32 attr, long temp)
static int lm75_update_interval(struct device *dev, long val)
{
struct lm75_data *data = dev_get_drvdata(dev);
- unsigned int reg;
u8 index;
s32 err;
@@ -489,19 +467,14 @@ static int lm75_update_interval(struct device *dev, long val)
break;
case tmp112:
case as6200:
- err = regmap_read(data->regmap, LM75_REG_CONF, &reg);
- if (err < 0)
- return err;
- reg &= ~0x00c0;
- reg |= (3 - index) << 6;
- err = regmap_write(data->regmap, LM75_REG_CONF, reg);
+ err = regmap_update_bits(data->regmap, LM75_REG_CONF,
+ 0xc000, (3 - index) << 14);
if (err < 0)
return err;
data->sample_time = data->params->sample_times[index];
break;
case pct2075:
- err = i2c_smbus_write_byte_data(data->client, PCT2075_REG_IDLE,
- index + 1);
+ err = regmap_write(data->regmap, PCT2075_REG_IDLE, index + 1);
if (err)
return err;
data->sample_time = data->params->sample_times[index];
@@ -598,6 +571,115 @@ static bool lm75_is_volatile_reg(struct device *dev, unsigned int reg)
return reg == LM75_REG_TEMP || reg == LM75_REG_CONF;
}
+static int lm75_i2c_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct i2c_client *client = context;
+ struct lm75_data *data = i2c_get_clientdata(client);
+ int ret;
+
+ if (reg == LM75_REG_CONF) {
+ if (!data->params->config_reg_16bits)
+ ret = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
+ else
+ ret = i2c_smbus_read_word_data(client, LM75_REG_CONF);
+ } else {
+ ret = i2c_smbus_read_word_swapped(client, reg);
+ }
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return 0;
+}
+
+static int lm75_i2c_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct i2c_client *client = context;
+ struct lm75_data *data = i2c_get_clientdata(client);
+
+ if (reg == PCT2075_REG_IDLE ||
+ (reg == LM75_REG_CONF && !data->params->config_reg_16bits))
+ return i2c_smbus_write_byte_data(client, reg, val);
+ else if (reg == LM75_REG_CONF)
+ return i2c_smbus_write_word_data(client, reg, val);
+ return i2c_smbus_write_word_swapped(client, reg, val);
+}
+
+static const struct regmap_bus lm75_i2c_regmap_bus = {
+ .reg_read = lm75_i2c_reg_read,
+ .reg_write = lm75_i2c_reg_write,
+};
+
+static int lm75_i3c_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct i3c_device *i3cdev = context;
+ struct lm75_data *data = i3cdev_get_drvdata(i3cdev);
+ struct i3c_priv_xfer xfers[] = {
+ {
+ .rnw = false,
+ .len = 1,
+ .data.out = data->reg_buf,
+ },
+ {
+ .rnw = true,
+ .len = 2,
+ .data.out = data->val_buf,
+ },
+ };
+ int ret;
+
+ data->reg_buf[0] = reg;
+
+ if (reg == LM75_REG_CONF && !data->params->config_reg_16bits)
+ xfers[1].len--;
+
+ ret = i3c_device_do_priv_xfers(i3cdev, xfers, 2);
+ if (ret < 0)
+ return ret;
+
+ if (reg == LM75_REG_CONF && !data->params->config_reg_16bits)
+ *val = data->val_buf[0];
+ else if (reg == LM75_REG_CONF)
+ *val = data->val_buf[0] | (data->val_buf[1] << 8);
+ else
+ *val = data->val_buf[1] | (data->val_buf[0] << 8);
+
+ return 0;
+}
+
+static int lm75_i3c_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct i3c_device *i3cdev = context;
+ struct lm75_data *data = i3cdev_get_drvdata(i3cdev);
+ struct i3c_priv_xfer xfers[] = {
+ {
+ .rnw = false,
+ .len = 3,
+ .data.out = data->val_buf,
+ },
+ };
+
+ data->val_buf[0] = reg;
+
+ if (reg == PCT2075_REG_IDLE ||
+ (reg == LM75_REG_CONF && !data->params->config_reg_16bits)) {
+ xfers[0].len--;
+ data->val_buf[1] = val & 0xff;
+ } else if (reg == LM75_REG_CONF) {
+ data->val_buf[1] = val & 0xff;
+ data->val_buf[2] = (val >> 8) & 0xff;
+ } else {
+ data->val_buf[1] = (val >> 8) & 0xff;
+ data->val_buf[2] = val & 0xff;
+ }
+
+ return i3c_device_do_priv_xfers(i3cdev, xfers, 1);
+}
+
+static const struct regmap_bus lm75_i3c_regmap_bus = {
+ .reg_read = lm75_i3c_reg_read,
+ .reg_write = lm75_i3c_reg_write,
+};
+
static const struct regmap_config lm75_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
@@ -610,46 +692,33 @@ static const struct regmap_config lm75_regmap_config = {
.use_single_write = true,
};
-static void lm75_disable_regulator(void *data)
-{
- struct lm75_data *lm75 = data;
-
- regulator_disable(lm75->vs);
-}
-
static void lm75_remove(void *data)
{
struct lm75_data *lm75 = data;
- struct i2c_client *client = lm75->client;
- i2c_smbus_write_byte_data(client, LM75_REG_CONF, lm75->orig_conf);
+ regmap_write(lm75->regmap, LM75_REG_CONF, lm75->orig_conf);
}
-static int lm75_probe(struct i2c_client *client)
+static int lm75_generic_probe(struct device *dev, const char *name,
+ enum lm75_type kind, int irq, struct regmap *regmap)
{
- struct device *dev = &client->dev;
struct device *hwmon_dev;
struct lm75_data *data;
int status, err;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
- return -EIO;
-
data = devm_kzalloc(dev, sizeof(struct lm75_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->client = client;
- data->kind = (uintptr_t)i2c_get_match_data(client);
+ /* needed by custom regmap callbacks */
+ dev_set_drvdata(dev, data);
- data->vs = devm_regulator_get(dev, "vs");
- if (IS_ERR(data->vs))
- return PTR_ERR(data->vs);
+ data->kind = kind;
+ data->regmap = regmap;
- data->regmap = devm_regmap_init_i2c(client, &lm75_regmap_config);
- if (IS_ERR(data->regmap))
- return PTR_ERR(data->regmap);
+ err = devm_regulator_get_enable(dev, "vs");
+ if (err)
+ return err;
/* Set to LM75 resolution (9 bits, 1/2 degree C) and range.
* Then tweak to be more precise when appropriate.
@@ -661,25 +730,11 @@ static int lm75_probe(struct i2c_client *client)
data->sample_time = data->params->default_sample_time;
data->resolution = data->params->default_resolution;
- /* Enable the power */
- err = regulator_enable(data->vs);
- if (err) {
- dev_err(dev, "failed to enable regulator: %d\n", err);
- return err;
- }
-
- err = devm_add_action_or_reset(dev, lm75_disable_regulator, data);
+ /* Cache original configuration */
+ err = regmap_read(data->regmap, LM75_REG_CONF, &status);
if (err)
return err;
-
- /* Cache original configuration */
- status = lm75_read_config(data);
- if (status < 0) {
- dev_dbg(dev, "Can't read config? %d\n", status);
- return status;
- }
data->orig_conf = status;
- data->current_conf = status;
err = lm75_write_config(data, data->params->set_mask,
data->params->clr_mask);
@@ -690,20 +745,19 @@ static int lm75_probe(struct i2c_client *client)
if (err)
return err;
- hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
- data, &lm75_chip_info,
- NULL);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, name, data,
+ &lm75_chip_info, NULL);
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
- if (client->irq) {
+ if (irq) {
if (data->params->alarm) {
err = devm_request_threaded_irq(dev,
- client->irq,
+ irq,
NULL,
&lm75_alarm_handler,
IRQF_ONESHOT,
- client->name,
+ name,
hwmon_dev);
if (err)
return err;
@@ -713,12 +767,29 @@ static int lm75_probe(struct i2c_client *client)
}
}
- dev_info(dev, "%s: sensor '%s'\n", dev_name(hwmon_dev), client->name);
+ dev_info(dev, "%s: sensor '%s'\n", dev_name(hwmon_dev), name);
return 0;
}
-static const struct i2c_device_id lm75_ids[] = {
+static int lm75_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct regmap *regmap;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
+ return -EOPNOTSUPP;
+
+ regmap = devm_regmap_init(dev, &lm75_i2c_regmap_bus, client, &lm75_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return lm75_generic_probe(dev, client->name, (uintptr_t)i2c_get_match_data(client),
+ client->irq, regmap);
+}
+
+static const struct i2c_device_id lm75_i2c_ids[] = {
{ "adt75", adt75, },
{ "as6200", as6200, },
{ "at30ts74", at30ts74, },
@@ -734,6 +805,7 @@ static const struct i2c_device_id lm75_ids[] = {
{ "max31725", max31725, },
{ "max31726", max31725, },
{ "mcp980x", mcp980x, },
+ { "p3t1755", p3t1755, },
{ "pct2075", pct2075, },
{ "stds75", stds75, },
{ "stlm75", stlm75, },
@@ -750,7 +822,38 @@ static const struct i2c_device_id lm75_ids[] = {
{ "tmp1075", tmp1075, },
{ /* LIST END */ }
};
-MODULE_DEVICE_TABLE(i2c, lm75_ids);
+MODULE_DEVICE_TABLE(i2c, lm75_i2c_ids);
+
+struct lm75_i3c_device {
+ enum lm75_type type;
+ const char *name;
+};
+
+static const struct lm75_i3c_device lm75_i3c_p3t1755 = {
+ .name = "p3t1755",
+ .type = p3t1755,
+};
+
+static const struct i3c_device_id lm75_i3c_ids[] = {
+ I3C_DEVICE(0x011b, 0x152a, &lm75_i3c_p3t1755),
+ { /* LIST END */ }
+};
+MODULE_DEVICE_TABLE(i3c, lm75_i3c_ids);
+
+static int lm75_i3c_probe(struct i3c_device *i3cdev)
+{
+ struct device *dev = i3cdev_to_dev(i3cdev);
+ const struct lm75_i3c_device *id_data;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init(dev, &lm75_i3c_regmap_bus, i3cdev, &lm75_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ id_data = i3c_device_match_id(i3cdev, lm75_i3c_ids)->data;
+
+ return lm75_generic_probe(dev, id_data->name, id_data->type, 0, regmap);
+}
static const struct of_device_id __maybe_unused lm75_of_match[] = {
{
@@ -814,6 +917,10 @@ static const struct of_device_id __maybe_unused lm75_of_match[] = {
.data = (void *)mcp980x
},
{
+ .compatible = "nxp,p3t1755",
+ .data = (void *)p3t1755
+ },
+ {
.compatible = "nxp,pct2075",
.data = (void *)pct2075
},
@@ -972,32 +1079,16 @@ static int lm75_detect(struct i2c_client *new_client,
#ifdef CONFIG_PM
static int lm75_suspend(struct device *dev)
{
- int status;
- struct i2c_client *client = to_i2c_client(dev);
+ struct lm75_data *data = dev_get_drvdata(dev);
- status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
- if (status < 0) {
- dev_dbg(&client->dev, "Can't read config? %d\n", status);
- return status;
- }
- status = status | LM75_SHUTDOWN;
- i2c_smbus_write_byte_data(client, LM75_REG_CONF, status);
- return 0;
+ return regmap_update_bits(data->regmap, LM75_REG_CONF, LM75_SHUTDOWN, LM75_SHUTDOWN);
}
static int lm75_resume(struct device *dev)
{
- int status;
- struct i2c_client *client = to_i2c_client(dev);
+ struct lm75_data *data = dev_get_drvdata(dev);
- status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
- if (status < 0) {
- dev_dbg(&client->dev, "Can't read config? %d\n", status);
- return status;
- }
- status = status & ~LM75_SHUTDOWN;
- i2c_smbus_write_byte_data(client, LM75_REG_CONF, status);
- return 0;
+ return regmap_update_bits(data->regmap, LM75_REG_CONF, LM75_SHUTDOWN, 0);
}
static const struct dev_pm_ops lm75_dev_pm_ops = {
@@ -1009,20 +1100,28 @@ static const struct dev_pm_ops lm75_dev_pm_ops = {
#define LM75_DEV_PM_OPS NULL
#endif /* CONFIG_PM */
-static struct i2c_driver lm75_driver = {
+static struct i2c_driver lm75_i2c_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "lm75",
.of_match_table = of_match_ptr(lm75_of_match),
.pm = LM75_DEV_PM_OPS,
},
- .probe = lm75_probe,
- .id_table = lm75_ids,
+ .probe = lm75_i2c_probe,
+ .id_table = lm75_i2c_ids,
.detect = lm75_detect,
.address_list = normal_i2c,
};
-module_i2c_driver(lm75_driver);
+static struct i3c_driver lm75_i3c_driver = {
+ .driver = {
+ .name = "lm75_i3c",
+ },
+ .probe = lm75_i3c_probe,
+ .id_table = lm75_i3c_ids,
+};
+
+module_i3c_i2c_driver(lm75_i3c_driver, &lm75_i2c_driver)
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
MODULE_DESCRIPTION("LM75 driver");
diff --git a/drivers/hwmon/ltc2991.c b/drivers/hwmon/ltc2991.c
index 7ca139e4b6af..6d5d4cb846da 100644
--- a/drivers/hwmon/ltc2991.c
+++ b/drivers/hwmon/ltc2991.c
@@ -125,7 +125,7 @@ static int ltc2991_get_curr(struct ltc2991_state *st, u32 reg, int channel,
/* Vx-Vy, 19.075uV/LSB */
*val = DIV_ROUND_CLOSEST(sign_extend32(reg_val, 14) * 19075,
- st->r_sense_uohm[channel]);
+ (s32)st->r_sense_uohm[channel]);
return 0;
}
diff --git a/drivers/hwmon/max197.c b/drivers/hwmon/max197.c
index bb30403f81ca..f0048ff37607 100644
--- a/drivers/hwmon/max197.c
+++ b/drivers/hwmon/max197.c
@@ -332,7 +332,7 @@ static struct platform_driver max197_driver = {
.name = "max197",
},
.probe = max197_probe,
- .remove_new = max197_remove,
+ .remove = max197_remove,
.id_table = max197_device_ids,
};
module_platform_driver(max197_driver);
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index c955b0f3a8d3..32b4d54b2076 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -19,7 +19,6 @@
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
-#include <linux/platform_data/max6639.h>
#include <linux/regmap.h>
#include <linux/util_macros.h>
@@ -531,14 +530,49 @@ static int rpm_range_to_reg(int range)
return 1; /* default: 4000 RPM */
}
+static int max6639_probe_child_from_dt(struct i2c_client *client,
+ struct device_node *child,
+ struct max6639_data *data)
+
+{
+ struct device *dev = &client->dev;
+ u32 i;
+ int err, val;
+
+ err = of_property_read_u32(child, "reg", &i);
+ if (err) {
+ dev_err(dev, "missing reg property of %pOFn\n", child);
+ return err;
+ }
+
+ if (i > 1) {
+ dev_err(dev, "Invalid fan index reg %d\n", i);
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(child, "pulses-per-revolution", &val);
+ if (!err) {
+ if (val < 1 || val > 5) {
+ dev_err(dev, "invalid pulses-per-revolution %d of %pOFn\n", val, child);
+ return -EINVAL;
+ }
+ data->ppr[i] = val;
+ }
+
+ err = of_property_read_u32(child, "max-rpm", &val);
+ if (!err)
+ data->rpm_range[i] = rpm_range_to_reg(val);
+
+ return 0;
+}
+
static int max6639_init_client(struct i2c_client *client,
struct max6639_data *data)
{
- struct max6639_platform_data *max6639_info =
- dev_get_platdata(&client->dev);
- int i;
- int rpm_range = 1; /* default: 4000 RPM */
- int err, ppr;
+ struct device *dev = &client->dev;
+ const struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int i, err;
/* Reset chip to default values, see below for GCONFIG setup */
err = regmap_write(data->regmap, MAX6639_REG_GCONFIG, MAX6639_GCONFIG_POR);
@@ -546,21 +580,29 @@ static int max6639_init_client(struct i2c_client *client,
return err;
/* Fans pulse per revolution is 2 by default */
- if (max6639_info && max6639_info->ppr > 0 &&
- max6639_info->ppr < 5)
- ppr = max6639_info->ppr;
- else
- ppr = 2;
+ data->ppr[0] = 2;
+ data->ppr[1] = 2;
+
+ /* default: 4000 RPM */
+ data->rpm_range[0] = 1;
+ data->rpm_range[1] = 1;
- data->ppr[0] = ppr;
- data->ppr[1] = ppr;
+ for_each_child_of_node(np, child) {
+ if (strcmp(child->name, "fan"))
+ continue;
- if (max6639_info)
- rpm_range = rpm_range_to_reg(max6639_info->rpm_range);
- data->rpm_range[0] = rpm_range;
- data->rpm_range[1] = rpm_range;
+ err = max6639_probe_child_from_dt(client, child, data);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+ }
for (i = 0; i < MAX6639_NUM_CHANNELS; i++) {
+ err = regmap_set_bits(data->regmap, MAX6639_REG_OUTPUT_MASK, BIT(1 - i));
+ if (err)
+ return err;
+
/* Set Fan pulse per revolution */
err = max6639_set_ppr(data, i, data->ppr[i]);
if (err)
@@ -573,12 +615,7 @@ static int max6639_init_client(struct i2c_client *client,
return err;
/* Fans PWM polarity high by default */
- if (max6639_info) {
- if (max6639_info->pwm_polarity == 0)
- err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x00);
- else
- err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x02);
- }
+ err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x00);
if (err)
return err;
diff --git a/drivers/hwmon/mc13783-adc.c b/drivers/hwmon/mc13783-adc.c
index 67471c9cd4d4..66304d48d33a 100644
--- a/drivers/hwmon/mc13783-adc.c
+++ b/drivers/hwmon/mc13783-adc.c
@@ -315,7 +315,7 @@ static const struct platform_device_id mc13783_adc_idtable[] = {
MODULE_DEVICE_TABLE(platform, mc13783_adc_idtable);
static struct platform_driver mc13783_adc_driver = {
- .remove_new = mc13783_adc_remove,
+ .remove = mc13783_adc_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index f71615e06a8f..416ac02e9f74 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -175,9 +175,11 @@ superio_exit(int ioreg)
#define NCT6683_CUSTOMER_ID_MSI 0x201
#define NCT6683_CUSTOMER_ID_MSI2 0x200
#define NCT6683_CUSTOMER_ID_MSI3 0x207
+#define NCT6683_CUSTOMER_ID_MSI4 0x20d
#define NCT6683_CUSTOMER_ID_ASROCK 0xe2c
#define NCT6683_CUSTOMER_ID_ASROCK2 0xe1b
#define NCT6683_CUSTOMER_ID_ASROCK3 0x1631
+#define NCT6683_CUSTOMER_ID_ASROCK4 0x163e
#define NCT6683_REG_BUILD_YEAR 0x604
#define NCT6683_REG_BUILD_MONTH 0x605
@@ -1227,12 +1229,16 @@ static int nct6683_probe(struct platform_device *pdev)
break;
case NCT6683_CUSTOMER_ID_MSI3:
break;
+ case NCT6683_CUSTOMER_ID_MSI4:
+ break;
case NCT6683_CUSTOMER_ID_ASROCK:
break;
case NCT6683_CUSTOMER_ID_ASROCK2:
break;
case NCT6683_CUSTOMER_ID_ASROCK3:
break;
+ case NCT6683_CUSTOMER_ID_ASROCK4:
+ break;
default:
if (!force)
return -ENODEV;
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index 934fed3dd586..fa3351351825 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -42,6 +42,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#undef DEFAULT_SYMBOL_NAMESPACE
+#define DEFAULT_SYMBOL_NAMESPACE "HWMON_NCT6775"
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -56,9 +59,6 @@
#include "lm75.h"
#include "nct6775.h"
-#undef DEFAULT_SYMBOL_NAMESPACE
-#define DEFAULT_SYMBOL_NAMESPACE HWMON_NCT6775
-
#define USE_ALTERNATE
/* used to set data->name = nct6775_device_names[data->sio_kind] */
@@ -2878,8 +2878,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0,
- data->target_temp_mask);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, data->target_temp_mask * 1000), 1000);
mutex_lock(&data->update_lock);
data->target_temp[nr] = val;
@@ -2959,7 +2958,7 @@ store_temp_tolerance(struct device *dev, struct device_attribute *attr,
return err;
/* Limit tolerance as needed */
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, data->tolerance_mask);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, data->tolerance_mask * 1000), 1000);
mutex_lock(&data->update_lock);
data->temp_tolerance[index][nr] = val;
@@ -3085,7 +3084,7 @@ store_weight_temp(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 255);
+ val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000);
mutex_lock(&data->update_lock);
data->weight_temp[index][nr] = val;
diff --git a/drivers/hwmon/nct6775-i2c.c b/drivers/hwmon/nct6775-i2c.c
index aff69fa50461..ba71d776a291 100644
--- a/drivers/hwmon/nct6775-i2c.c
+++ b/drivers/hwmon/nct6775-i2c.c
@@ -184,4 +184,4 @@ module_i2c_driver(nct6775_i2c_driver);
MODULE_AUTHOR("Zev Weiss <zev@bewilderbeest.net>");
MODULE_DESCRIPTION("I2C driver for NCT6775F and compatible chips");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(HWMON_NCT6775);
+MODULE_IMPORT_NS("HWMON_NCT6775");
diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
index 096f1daa8f2b..0a040364b512 100644
--- a/drivers/hwmon/nct6775-platform.c
+++ b/drivers/hwmon/nct6775-platform.c
@@ -1350,6 +1350,8 @@ static const char * const asus_msi_boards[] = {
"Pro H610M-CT D4",
"Pro H610T D4",
"Pro Q670M-C",
+ "Pro WS 600M-CL",
+ "Pro WS 665-ACE",
"Pro WS W680-ACE",
"Pro WS W680-ACE IPMI",
"Pro WS W790-ACE",
@@ -1620,7 +1622,7 @@ static void __exit sensors_nct6775_platform_exit(void)
MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION("Platform driver for NCT6775F and compatible chips");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(HWMON_NCT6775);
+MODULE_IMPORT_NS("HWMON_NCT6775");
module_init(sensors_nct6775_platform_init);
module_exit(sensors_nct6775_platform_exit);
diff --git a/drivers/hwmon/nct7363.c b/drivers/hwmon/nct7363.c
new file mode 100644
index 000000000000..be7bf32f6e68
--- /dev/null
+++ b/drivers/hwmon/nct7363.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 Nuvoton Technology corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define NCT7363_REG_FUNC_CFG_BASE(x) (0x20 + (x))
+#define NCT7363_REG_LSRS(x) (0x34 + ((x) / 8))
+#define NCT7363_REG_PWMEN_BASE(x) (0x38 + (x))
+#define NCT7363_REG_FANINEN_BASE(x) (0x41 + (x))
+#define NCT7363_REG_FANINX_HVAL(x) (0x48 + ((x) * 2))
+#define NCT7363_REG_FANINX_LVAL(x) (0x49 + ((x) * 2))
+#define NCT7363_REG_FANINX_HL(x) (0x6C + ((x) * 2))
+#define NCT7363_REG_FANINX_LL(x) (0x6D + ((x) * 2))
+#define NCT7363_REG_FSCPXDUTY(x) (0x90 + ((x) * 2))
+#define NCT7363_REG_FSCPXDIV(x) (0x91 + ((x) * 2))
+
+#define PWM_SEL(x) (BIT(0) << ((x) * 2))
+#define FANIN_SEL(_x) ({typeof(_x) (x) = (_x); \
+ BIT(1) << (((x) < 8) ? \
+ (((x) + 8) * 2) : \
+ (((x) % 8) * 2)); })
+#define ALARM_SEL(x, y) ((x) & (BIT((y) % 8)))
+#define VALUE_TO_REG(x, y) (((x) >> ((y) * 8)) & 0xFF)
+
+#define NCT7363_FANINX_LVAL_MASK GENMASK(4, 0)
+#define NCT7363_FANIN_MASK GENMASK(12, 0)
+
+#define NCT7363_PWM_COUNT 16
+
+static inline unsigned int fan_from_reg(u16 val)
+{
+ if (val == NCT7363_FANIN_MASK || val == 0)
+ return 0;
+
+ return (1350000UL / val);
+}
+
+static const struct of_device_id nct7363_of_match[] = {
+ { .compatible = "nuvoton,nct7363", },
+ { .compatible = "nuvoton,nct7362", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nct7363_of_match);
+
+struct nct7363_data {
+ struct regmap *regmap;
+
+ u16 fanin_mask;
+ u16 pwm_mask;
+};
+
+static int nct7363_read_fan(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct nct7363_data *data = dev_get_drvdata(dev);
+ unsigned int reg;
+ u8 regval[2];
+ int ret;
+ u16 cnt;
+
+ switch (attr) {
+ case hwmon_fan_input:
+ /*
+ * High-byte register should be read first to latch
+ * synchronous low-byte value
+ */
+ ret = regmap_bulk_read(data->regmap,
+ NCT7363_REG_FANINX_HVAL(channel),
+ &regval, 2);
+ if (ret)
+ return ret;
+
+ cnt = (regval[0] << 5) | (regval[1] & NCT7363_FANINX_LVAL_MASK);
+ *val = fan_from_reg(cnt);
+ return 0;
+ case hwmon_fan_min:
+ ret = regmap_bulk_read(data->regmap,
+ NCT7363_REG_FANINX_HL(channel),
+ &regval, 2);
+ if (ret)
+ return ret;
+
+ cnt = (regval[0] << 5) | (regval[1] & NCT7363_FANINX_LVAL_MASK);
+ *val = fan_from_reg(cnt);
+ return 0;
+ case hwmon_fan_alarm:
+ ret = regmap_read(data->regmap,
+ NCT7363_REG_LSRS(channel), &reg);
+ if (ret)
+ return ret;
+
+ *val = (long)ALARM_SEL(reg, channel) > 0 ? 1 : 0;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct7363_write_fan(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct nct7363_data *data = dev_get_drvdata(dev);
+ u8 regval[2];
+ int ret;
+
+ if (val <= 0)
+ return -EINVAL;
+
+ switch (attr) {
+ case hwmon_fan_min:
+ val = clamp_val(DIV_ROUND_CLOSEST(1350000, val),
+ 1, NCT7363_FANIN_MASK);
+ regval[0] = val >> 5;
+ regval[1] = val & NCT7363_FANINX_LVAL_MASK;
+
+ ret = regmap_bulk_write(data->regmap,
+ NCT7363_REG_FANINX_HL(channel),
+ regval, 2);
+ return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t nct7363_fan_is_visible(const void *_data, u32 attr, int channel)
+{
+ const struct nct7363_data *data = _data;
+
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_alarm:
+ if (data->fanin_mask & BIT(channel))
+ return 0444;
+ break;
+ case hwmon_fan_min:
+ if (data->fanin_mask & BIT(channel))
+ return 0644;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int nct7363_read_pwm(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct nct7363_data *data = dev_get_drvdata(dev);
+ unsigned int regval;
+ int ret;
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ ret = regmap_read(data->regmap,
+ NCT7363_REG_FSCPXDUTY(channel), &regval);
+ if (ret)
+ return ret;
+
+ *val = (long)regval;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct7363_write_pwm(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct nct7363_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ ret = regmap_write(data->regmap,
+ NCT7363_REG_FSCPXDUTY(channel), val);
+
+ return ret;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t nct7363_pwm_is_visible(const void *_data, u32 attr, int channel)
+{
+ const struct nct7363_data *data = _data;
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (data->pwm_mask & BIT(channel))
+ return 0644;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int nct7363_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_fan:
+ return nct7363_read_fan(dev, attr, channel, val);
+ case hwmon_pwm:
+ return nct7363_read_pwm(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct7363_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_fan:
+ return nct7363_write_fan(dev, attr, channel, val);
+ case hwmon_pwm:
+ return nct7363_write_pwm(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t nct7363_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ return nct7363_fan_is_visible(data, attr, channel);
+ case hwmon_pwm:
+ return nct7363_pwm_is_visible(data, attr, channel);
+ default:
+ return 0;
+ }
+}
+
+static const struct hwmon_channel_info *nct7363_info[] = {
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_MIN | HWMON_F_ALARM),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops nct7363_hwmon_ops = {
+ .is_visible = nct7363_is_visible,
+ .read = nct7363_read,
+ .write = nct7363_write,
+};
+
+static const struct hwmon_chip_info nct7363_chip_info = {
+ .ops = &nct7363_hwmon_ops,
+ .info = nct7363_info,
+};
+
+static int nct7363_init_chip(struct nct7363_data *data)
+{
+ u32 func_config = 0;
+ int i, ret;
+
+ /* Pin Function Configuration */
+ for (i = 0; i < NCT7363_PWM_COUNT; i++) {
+ if (data->pwm_mask & BIT(i))
+ func_config |= PWM_SEL(i);
+ if (data->fanin_mask & BIT(i))
+ func_config |= FANIN_SEL(i);
+ }
+
+ for (i = 0; i < 4; i++) {
+ ret = regmap_write(data->regmap, NCT7363_REG_FUNC_CFG_BASE(i),
+ VALUE_TO_REG(func_config, i));
+ if (ret < 0)
+ return ret;
+ }
+
+ /* PWM and FANIN Monitoring Enable */
+ for (i = 0; i < 2; i++) {
+ ret = regmap_write(data->regmap, NCT7363_REG_PWMEN_BASE(i),
+ VALUE_TO_REG(data->pwm_mask, i));
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, NCT7363_REG_FANINEN_BASE(i),
+ VALUE_TO_REG(data->fanin_mask, i));
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nct7363_present_pwm_fanin(struct device *dev,
+ struct device_node *child,
+ struct nct7363_data *data)
+{
+ u8 fanin_ch[NCT7363_PWM_COUNT];
+ struct of_phandle_args args;
+ int ret, fanin_cnt;
+ u8 ch, index;
+
+ ret = of_parse_phandle_with_args(child, "pwms", "#pwm-cells",
+ 0, &args);
+ if (ret)
+ return ret;
+
+ if (args.args[0] >= NCT7363_PWM_COUNT)
+ return -EINVAL;
+ data->pwm_mask |= BIT(args.args[0]);
+
+ fanin_cnt = of_property_count_u8_elems(child, "tach-ch");
+ if (fanin_cnt < 1 || fanin_cnt > NCT7363_PWM_COUNT)
+ return -EINVAL;
+
+ ret = of_property_read_u8_array(child, "tach-ch", fanin_ch, fanin_cnt);
+ if (ret)
+ return ret;
+
+ for (ch = 0; ch < fanin_cnt; ch++) {
+ index = fanin_ch[ch];
+ if (index >= NCT7363_PWM_COUNT)
+ return -EINVAL;
+ data->fanin_mask |= BIT(index);
+ }
+
+ return 0;
+}
+
+static bool nct7363_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case NCT7363_REG_LSRS(0) ... NCT7363_REG_LSRS(15):
+ case NCT7363_REG_FANINX_HVAL(0) ... NCT7363_REG_FANINX_LVAL(15):
+ case NCT7363_REG_FANINX_HL(0) ... NCT7363_REG_FANINX_LL(15):
+ case NCT7363_REG_FSCPXDUTY(0) ... NCT7363_REG_FSCPXDIV(15):
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config nct7363_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .use_single_read = true,
+ .use_single_write = true,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = nct7363_regmap_is_volatile,
+};
+
+static int nct7363_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct device_node *child;
+ struct nct7363_data *data;
+ struct device *hwmon_dev;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->regmap = devm_regmap_init_i2c(client, &nct7363_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ for_each_child_of_node(dev->of_node, child) {
+ ret = nct7363_present_pwm_fanin(dev, child, data);
+ if (ret) {
+ of_node_put(child);
+ return ret;
+ }
+ }
+
+ /* Initialize the chip */
+ ret = nct7363_init_chip(data);
+ if (ret)
+ return ret;
+
+ hwmon_dev =
+ devm_hwmon_device_register_with_info(dev, client->name, data,
+ &nct7363_chip_info, NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct i2c_driver nct7363_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "nct7363",
+ .of_match_table = nct7363_of_match,
+ },
+ .probe = nct7363_probe,
+};
+
+module_i2c_driver(nct7363_driver);
+
+MODULE_AUTHOR("CW Ho <cwho@nuvoton.com>");
+MODULE_AUTHOR("Ban Feng <kcfeng0@nuvoton.com>");
+MODULE_DESCRIPTION("NCT7363 Hardware Monitoring Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/nzxt-kraken2.c b/drivers/hwmon/nzxt-kraken2.c
index ed38645a1dc2..034698232758 100644
--- a/drivers/hwmon/nzxt-kraken2.c
+++ b/drivers/hwmon/nzxt-kraken2.c
@@ -35,13 +35,6 @@ struct kraken2_priv_data {
unsigned long updated; /* jiffies */
};
-static umode_t kraken2_is_visible(const void *data,
- enum hwmon_sensor_types type,
- u32 attr, int channel)
-{
- return 0444;
-}
-
static int kraken2_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
@@ -81,7 +74,7 @@ static int kraken2_read_string(struct device *dev, enum hwmon_sensor_types type,
}
static const struct hwmon_ops kraken2_hwmon_ops = {
- .is_visible = kraken2_is_visible,
+ .visible = 0444,
.read = kraken2_read,
.read_string = kraken2_read_string,
};
diff --git a/drivers/hwmon/occ/p9_sbe.c b/drivers/hwmon/occ/p9_sbe.c
index b5993c79c09e..1e3749dfa598 100644
--- a/drivers/hwmon/occ/p9_sbe.c
+++ b/drivers/hwmon/occ/p9_sbe.c
@@ -30,7 +30,7 @@ struct p9_sbe_occ {
#define to_p9_sbe_occ(x) container_of((x), struct p9_sbe_occ, occ)
static ssize_t ffdc_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *battr, char *buf, loff_t pos,
+ const struct bin_attribute *battr, char *buf, loff_t pos,
size_t count)
{
ssize_t rc = 0;
@@ -48,7 +48,7 @@ static ssize_t ffdc_read(struct file *filp, struct kobject *kobj,
return rc;
}
-static BIN_ATTR_RO(ffdc, OCC_MAX_RESP_WORDS * 4);
+static const BIN_ATTR_RO(ffdc, OCC_MAX_RESP_WORDS * 4);
static bool p9_sbe_occ_save_ffdc(struct p9_sbe_occ *ctx, const void *resp,
size_t resp_len)
@@ -192,8 +192,8 @@ static struct platform_driver p9_sbe_occ_driver = {
.name = "occ-hwmon",
.of_match_table = p9_sbe_occ_of_match,
},
- .probe = p9_sbe_occ_probe,
- .remove_new = p9_sbe_occ_remove,
+ .probe = p9_sbe_occ_probe,
+ .remove = p9_sbe_occ_remove,
};
module_platform_driver(p9_sbe_occ_driver);
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 788b5d58f77e..0f8aa6b42164 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -1606,7 +1606,7 @@ static struct platform_driver pc87360_driver = {
.name = DRIVER_NAME,
},
.probe = pc87360_probe,
- .remove_new = pc87360_remove,
+ .remove = pc87360_remove,
};
/*
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 7bca04eb4ee4..571402a89368 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -1129,7 +1129,7 @@ static struct platform_driver pc87427_driver = {
.name = DRVNAME,
},
.probe = pc87427_probe,
- .remove_new = pc87427_remove,
+ .remove = pc87427_remove,
};
static int __init pc87427_device_add(const struct pc87427_sio_data *sio_data)
diff --git a/drivers/hwmon/peci/cputemp.c b/drivers/hwmon/peci/cputemp.c
index 5a682195b98f..c7112dbf008b 100644
--- a/drivers/hwmon/peci/cputemp.c
+++ b/drivers/hwmon/peci/cputemp.c
@@ -607,4 +607,4 @@ MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
MODULE_AUTHOR("Iwona Winiarska <iwona.winiarska@intel.com>");
MODULE_DESCRIPTION("PECI cputemp driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PECI_CPU);
+MODULE_IMPORT_NS("PECI_CPU");
diff --git a/drivers/hwmon/peci/dimmtemp.c b/drivers/hwmon/peci/dimmtemp.c
index 4a72e9712408..d6762259dd69 100644
--- a/drivers/hwmon/peci/dimmtemp.c
+++ b/drivers/hwmon/peci/dimmtemp.c
@@ -666,4 +666,4 @@ MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
MODULE_AUTHOR("Iwona Winiarska <iwona.winiarska@intel.com>");
MODULE_DESCRIPTION("PECI dimmtemp driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PECI_CPU);
+MODULE_IMPORT_NS("PECI_CPU");
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index a4f02cad92fd..419469f40ba0 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -51,7 +51,7 @@ config SENSORS_ADM1275
tristate "Analog Devices ADM1275 and compatibles"
help
If you say yes here you get hardware monitoring support for Analog
- Devices ADM1075, ADM1272, ADM1275, ADM1276, ADM1278, ADM1281,
+ Devices ADM1075, ADM1272, ADM1273, ADM1275, ADM1276, ADM1278, ADM1281,
ADM1293, and ADM1294 Hot-Swap Controller and Digital Power Monitors.
This driver can also be built as a module. If so, the module will
@@ -85,6 +85,15 @@ config SENSORS_BPA_RS600
This driver can also be built as a module. If so, the module will
be called bpa-rs600.
+config SENSORS_CRPS
+ tristate "Intel Common Redundant Power Supply"
+ help
+ If you say yes here you get hardware monitoring support for the Intel
+ Common Redundant Power Supply.
+
+ This driver can also be built as a module. If so, the module will
+ be called crps.
+
config SENSORS_DELTA_AHE50DC_FAN
tristate "Delta AHE-50DC fan control module"
help
@@ -224,9 +233,9 @@ config SENSORS_LTC2978_REGULATOR
depends on SENSORS_LTC2978 && REGULATOR
help
If you say yes here you get regulator support for Linear Technology
- LTC3880, LTC3883, LTC3884, LTC3886, LTC3887, LTC3889, LTC7880,
- LTM4644, LTM4675, LTM4676, LTM4677, LTM4678, LTM4680, LTM4686,
- and LTM4700.
+ LTC3880, LTC3883, LTC3884, LTC3886, LTC3887, LTC3889, LTC7841,
+ LTC7880, LTM4644, LTM4675, LTM4676, LTM4677, LTM4678, LTM4680,
+ LTM4686, and LTM4700.
config SENSORS_LTC3815
tristate "Linear Technologies LTC3815"
@@ -251,7 +260,7 @@ config SENSORS_MAX15301
tristate "Maxim MAX15301"
help
If you say yes here you get hardware monitoring support for Maxim
- MAX15301, as well as for Flex BMR461.
+ MAX15301, MAX15303, as well as for Flex BMR461.
This driver can also be built as a module. If so, the module will
be called max15301.
@@ -510,6 +519,23 @@ config SENSORS_TDA38640_REGULATOR
If you say yes here you get regulator support for Infineon
TDA38640 as regulator.
+config SENSORS_TPS25990
+ tristate "TI TPS25990"
+ help
+ If you say yes here you get hardware monitoring support for TI
+ TPS25990.
+
+ This driver can also be built as a module. If so, the module will
+ be called tps25990.
+
+config SENSORS_TPS25990_REGULATOR
+ bool "Regulator support for TPS25990 and compatibles"
+ depends on SENSORS_TPS25990 && REGULATOR
+ default SENSORS_TPS25990
+ help
+ If you say yes here you get regulator support for Texas Instruments
+ TPS25990.
+
config SENSORS_TPS40422
tristate "TI TPS40422"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index d00bcc758b97..c7eb7739b7f8 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_SENSORS_PXE1610) += pxe1610.o
obj-$(CONFIG_SENSORS_Q54SJ108A2) += q54sj108a2.o
obj-$(CONFIG_SENSORS_STPDDC60) += stpddc60.o
obj-$(CONFIG_SENSORS_TDA38640) += tda38640.o
+obj-$(CONFIG_SENSORS_TPS25990) += tps25990.o
obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
obj-$(CONFIG_SENSORS_TPS546D24) += tps546d24.o
@@ -61,3 +62,4 @@ obj-$(CONFIG_SENSORS_XDPE122) += xdpe12284.o
obj-$(CONFIG_SENSORS_XDPE152) += xdpe152c4.o
obj-$(CONFIG_SENSORS_ZL6100) += zl6100.o
obj-$(CONFIG_SENSORS_PIM4328) += pim4328.o
+obj-$(CONFIG_SENSORS_CRPS) += crps.o
diff --git a/drivers/hwmon/pmbus/acbel-fsg032.c b/drivers/hwmon/pmbus/acbel-fsg032.c
index e0c55fd8f3a6..9f07fb4abaff 100644
--- a/drivers/hwmon/pmbus/acbel-fsg032.c
+++ b/drivers/hwmon/pmbus/acbel-fsg032.c
@@ -120,4 +120,4 @@ module_i2c_driver(acbel_fsg032_driver);
MODULE_AUTHOR("Lakshmi Yadlapati");
MODULE_DESCRIPTION("PMBus driver for AcBel Power System power supplies");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/adm1266.c b/drivers/hwmon/pmbus/adm1266.c
index 2c4d94cc8729..d90f8f80be8e 100644
--- a/drivers/hwmon/pmbus/adm1266.c
+++ b/drivers/hwmon/pmbus/adm1266.c
@@ -509,4 +509,4 @@ module_i2c_driver(adm1266_driver);
MODULE_AUTHOR("Alexandru Tachici <alexandru.tachici@analog.com>");
MODULE_DESCRIPTION("PMBus driver for Analog Devices ADM1266");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 59ffc08289bd..7d175baa5de2 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -18,7 +18,7 @@
#include <linux/log2.h>
#include "pmbus.h"
-enum chips { adm1075, adm1272, adm1275, adm1276, adm1278, adm1281, adm1293, adm1294 };
+enum chips { adm1075, adm1272, adm1273, adm1275, adm1276, adm1278, adm1281, adm1293, adm1294 };
#define ADM1275_MFR_STATUS_IOUT_WARN2 BIT(0)
#define ADM1293_MFR_STATUS_VAUX_UV_WARN BIT(5)
@@ -479,6 +479,7 @@ static int adm1275_read_byte_data(struct i2c_client *client, int page, int reg)
static const struct i2c_device_id adm1275_id[] = {
{ "adm1075", adm1075 },
{ "adm1272", adm1272 },
+ { "adm1273", adm1273 },
{ "adm1275", adm1275 },
{ "adm1276", adm1276 },
{ "adm1278", adm1278 },
@@ -555,9 +556,9 @@ static int adm1275_probe(struct i2c_client *client)
"Device mismatch: Configured %s, detected %s\n",
client->name, mid->name);
- if (mid->driver_data == adm1272 || mid->driver_data == adm1278 ||
- mid->driver_data == adm1281 || mid->driver_data == adm1293 ||
- mid->driver_data == adm1294)
+ if (mid->driver_data == adm1272 || mid->driver_data == adm1273 ||
+ mid->driver_data == adm1278 || mid->driver_data == adm1281 ||
+ mid->driver_data == adm1293 || mid->driver_data == adm1294)
config_read_fn = i2c_smbus_read_word_data;
else
config_read_fn = i2c_smbus_read_byte_data;
@@ -630,6 +631,7 @@ static int adm1275_probe(struct i2c_client *client)
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
break;
case adm1272:
+ case adm1273:
data->have_vout = true;
data->have_pin_max = true;
data->have_temp_max = true;
@@ -866,4 +868,4 @@ module_i2c_driver(adm1275_driver);
MODULE_AUTHOR("Guenter Roeck");
MODULE_DESCRIPTION("PMBus driver for Analog Devices ADM1275 and compatibles");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/adp1050.c b/drivers/hwmon/pmbus/adp1050.c
index 20f22730fc01..ef46c880b168 100644
--- a/drivers/hwmon/pmbus/adp1050.c
+++ b/drivers/hwmon/pmbus/adp1050.c
@@ -53,4 +53,4 @@ module_i2c_driver(adp1050_driver);
MODULE_AUTHOR("Radu Sabau <radu.sabau@analog.com>");
MODULE_DESCRIPTION("Analog Devices ADP1050 HWMON PMBus Driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/bel-pfe.c b/drivers/hwmon/pmbus/bel-pfe.c
index 7c5f4b10a7c1..ddf9d9a2958c 100644
--- a/drivers/hwmon/pmbus/bel-pfe.c
+++ b/drivers/hwmon/pmbus/bel-pfe.c
@@ -129,4 +129,4 @@ module_i2c_driver(pfe_pmbus_driver);
MODULE_AUTHOR("Tao Ren <rentao.bupt@gmail.com>");
MODULE_DESCRIPTION("PMBus driver for BEL PFE Family Power Supplies");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/bpa-rs600.c b/drivers/hwmon/pmbus/bpa-rs600.c
index 0dce26c35556..6c3875ba37a0 100644
--- a/drivers/hwmon/pmbus/bpa-rs600.c
+++ b/drivers/hwmon/pmbus/bpa-rs600.c
@@ -205,4 +205,4 @@ module_i2c_driver(bpa_rs600_driver);
MODULE_AUTHOR("Chris Packham");
MODULE_DESCRIPTION("PMBus driver for BluTek BPA-RS600");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/crps.c b/drivers/hwmon/pmbus/crps.c
new file mode 100644
index 000000000000..164b33fed312
--- /dev/null
+++ b/drivers/hwmon/pmbus/crps.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2024 IBM Corp.
+ */
+
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/pmbus.h>
+
+#include "pmbus.h"
+
+static const struct i2c_device_id crps_id[] = {
+ { "intel_crps185" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, crps_id);
+
+static struct pmbus_driver_info crps_info = {
+ .pages = 1,
+ /* PSU uses default linear data format. */
+ .func[0] = PMBUS_HAVE_PIN | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_IIN |
+ PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 |
+ PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12,
+};
+
+static int crps_probe(struct i2c_client *client)
+{
+ int rc;
+ struct device *dev = &client->dev;
+ char buf[I2C_SMBUS_BLOCK_MAX + 2] = { 0 };
+
+ rc = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf);
+ if (rc < 0)
+ return dev_err_probe(dev, rc, "Failed to read PMBUS_MFR_MODEL\n");
+
+ if (rc != 7 || strncmp(buf, "03NK260", 7)) {
+ buf[rc] = '\0';
+ return dev_err_probe(dev, -ENODEV, "Model '%s' not supported\n", buf);
+ }
+
+ rc = pmbus_do_probe(client, &crps_info);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to probe\n");
+
+ return 0;
+}
+
+static const struct of_device_id crps_of_match[] = {
+ {
+ .compatible = "intel,crps185",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, crps_of_match);
+
+static struct i2c_driver crps_driver = {
+ .driver = {
+ .name = "crps",
+ .of_match_table = crps_of_match,
+ },
+ .probe = crps_probe,
+ .id_table = crps_id,
+};
+
+module_i2c_driver(crps_driver);
+
+MODULE_AUTHOR("Ninad Palsule");
+MODULE_DESCRIPTION("PMBus driver for Intel Common Redundant power supplies");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/delta-ahe50dc-fan.c b/drivers/hwmon/pmbus/delta-ahe50dc-fan.c
index 4dd3b6686d6a..3850eaea75da 100644
--- a/drivers/hwmon/pmbus/delta-ahe50dc-fan.c
+++ b/drivers/hwmon/pmbus/delta-ahe50dc-fan.c
@@ -127,4 +127,4 @@ module_i2c_driver(ahe50dc_fan_driver);
MODULE_AUTHOR("Zev Weiss <zev@bewilderbeest.net>");
MODULE_DESCRIPTION("Driver for Delta AHE-50DC power shelf fan control module");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/dps920ab.c b/drivers/hwmon/pmbus/dps920ab.c
index 04e0d598a6e5..325111a955e6 100644
--- a/drivers/hwmon/pmbus/dps920ab.c
+++ b/drivers/hwmon/pmbus/dps920ab.c
@@ -190,12 +190,19 @@ static const struct of_device_id __maybe_unused dps920ab_of_match[] = {
MODULE_DEVICE_TABLE(of, dps920ab_of_match);
+static const struct i2c_device_id dps920ab_device_id[] = {
+ { "dps920ab" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, dps920ab_device_id);
+
static struct i2c_driver dps920ab_driver = {
.driver = {
.name = "dps920ab",
.of_match_table = of_match_ptr(dps920ab_of_match),
},
.probe = dps920ab_probe,
+ .id_table = dps920ab_device_id,
};
module_i2c_driver(dps920ab_driver);
@@ -203,4 +210,4 @@ module_i2c_driver(dps920ab_driver);
MODULE_AUTHOR("Robert Marko <robert.marko@sartura.hr>");
MODULE_DESCRIPTION("PMBus driver for Delta DPS920AB PSU");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/fsp-3y.c b/drivers/hwmon/pmbus/fsp-3y.c
index 72a7c261ef06..a4dc09e2ef75 100644
--- a/drivers/hwmon/pmbus/fsp-3y.c
+++ b/drivers/hwmon/pmbus/fsp-3y.c
@@ -291,4 +291,4 @@ module_i2c_driver(fsp3y_driver);
MODULE_AUTHOR("Václav Kubernát");
MODULE_DESCRIPTION("PMBus driver for FSP/3Y-Power power supplies");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index 1ba4c5e95820..d05ef7a968a9 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -614,4 +614,4 @@ module_i2c_driver(ibm_cffps_driver);
MODULE_AUTHOR("Eddie James");
MODULE_DESCRIPTION("PMBus driver for IBM Common Form Factor power supplies");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/inspur-ipsps.c b/drivers/hwmon/pmbus/inspur-ipsps.c
index 3e3cc9a0f116..074e0f164ee1 100644
--- a/drivers/hwmon/pmbus/inspur-ipsps.c
+++ b/drivers/hwmon/pmbus/inspur-ipsps.c
@@ -224,4 +224,4 @@ module_i2c_driver(ipsps_driver);
MODULE_AUTHOR("John Wang");
MODULE_DESCRIPTION("PMBus driver for Inspur Power System power supplies");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/ir35221.c b/drivers/hwmon/pmbus/ir35221.c
index 503be59c6c7f..46d8f334d49a 100644
--- a/drivers/hwmon/pmbus/ir35221.c
+++ b/drivers/hwmon/pmbus/ir35221.c
@@ -145,4 +145,4 @@ module_i2c_driver(ir35221_driver);
MODULE_AUTHOR("Samuel Mendoza-Jonas <sam@mendozajonas.com");
MODULE_DESCRIPTION("PMBus driver for IR35221");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/ir36021.c b/drivers/hwmon/pmbus/ir36021.c
index 5148c9187c9e..34ce15fc708b 100644
--- a/drivers/hwmon/pmbus/ir36021.c
+++ b/drivers/hwmon/pmbus/ir36021.c
@@ -76,4 +76,4 @@ module_i2c_driver(ir36021_driver);
MODULE_AUTHOR("Chris Packham <chris.packham@alliedtelesis.co.nz>");
MODULE_DESCRIPTION("PMBus driver for Infineon IR36021");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/ir38064.c b/drivers/hwmon/pmbus/ir38064.c
index d4bcc9c39774..7b4188e8bf48 100644
--- a/drivers/hwmon/pmbus/ir38064.c
+++ b/drivers/hwmon/pmbus/ir38064.c
@@ -87,4 +87,4 @@ module_i2c_driver(ir38064_driver);
MODULE_AUTHOR("Maxim Sloyko <maxims@google.com>");
MODULE_DESCRIPTION("PMBus driver for Infineon IR38064 and compatible chips");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/irps5401.c b/drivers/hwmon/pmbus/irps5401.c
index f0bdf55c95bf..43674c64841d 100644
--- a/drivers/hwmon/pmbus/irps5401.c
+++ b/drivers/hwmon/pmbus/irps5401.c
@@ -63,4 +63,4 @@ module_i2c_driver(irps5401_driver);
MODULE_AUTHOR("Robert Hancock");
MODULE_DESCRIPTION("PMBus driver for Infineon IRPS5401");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
index 7e53fb1d5ea3..2af921039309 100644
--- a/drivers/hwmon/pmbus/isl68137.c
+++ b/drivers/hwmon/pmbus/isl68137.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/string.h>
#include <linux/sysfs.h>
@@ -20,6 +21,7 @@
#define ISL68137_VOUT_AVS 0x30
#define RAA_DMPVR2_READ_VMON 0xc8
+#define MAX_CHANNELS 4
enum chips {
isl68137,
@@ -72,6 +74,17 @@ enum variants {
raa_dmpvr2_hv,
};
+struct isl68137_channel {
+ u32 vout_voltage_divider[2];
+};
+
+struct isl68137_data {
+ struct pmbus_driver_info info;
+ struct isl68137_channel channel[MAX_CHANNELS];
+};
+
+#define to_isl68137_data(x) container_of(x, struct isl68137_data, info)
+
static const struct i2c_device_id raa_dmpvr_id[];
static ssize_t isl68137_avs_enable_show_page(struct i2c_client *client,
@@ -163,13 +176,32 @@ static const struct attribute_group *isl68137_attribute_groups[] = {
static int raa_dmpvr2_read_word_data(struct i2c_client *client, int page,
int phase, int reg)
{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ const struct isl68137_data *data = to_isl68137_data(info);
int ret;
+ u64 temp;
switch (reg) {
case PMBUS_VIRT_READ_VMON:
ret = pmbus_read_word_data(client, page, phase,
RAA_DMPVR2_READ_VMON);
break;
+ case PMBUS_READ_POUT:
+ case PMBUS_READ_VOUT:
+ /*
+ * In cases where a voltage divider is attached to the target
+ * rail between Vout and the Vsense pin, both Vout and Pout
+ * should be scaled by the voltage divider scaling factor.
+ * I.e. Vout = Vsense * Rtotal / Rout
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret > 0) {
+ temp = DIV_U64_ROUND_CLOSEST((u64)ret *
+ data->channel[page].vout_voltage_divider[1],
+ data->channel[page].vout_voltage_divider[0]);
+ ret = clamp_val(temp, 0, 0xffff);
+ }
+ break;
default:
ret = -ENODATA;
break;
@@ -178,6 +210,40 @@ static int raa_dmpvr2_read_word_data(struct i2c_client *client, int page,
return ret;
}
+static int raa_dmpvr2_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ const struct isl68137_data *data = to_isl68137_data(info);
+ int ret;
+ u64 temp;
+
+ switch (reg) {
+ case PMBUS_VOUT_MAX:
+ case PMBUS_VOUT_MARGIN_HIGH:
+ case PMBUS_VOUT_MARGIN_LOW:
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ case PMBUS_VOUT_COMMAND:
+ /*
+ * In cases where a voltage divider is attached to the target
+ * rail between Vout and the Vsense pin, Vout related PMBus
+ * commands should be scaled based on the expected voltage
+ * at the Vsense pin.
+ * I.e. Vsense = Vout * Rout / Rtotal
+ */
+ temp = DIV_U64_ROUND_CLOSEST((u64)word *
+ data->channel[page].vout_voltage_divider[0],
+ data->channel[page].vout_voltage_divider[1]);
+ ret = clamp_val(temp, 0, 0xffff);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
static struct pmbus_driver_info raa_dmpvr_info = {
.pages = 3,
.format[PSC_VOLTAGE_IN] = direct,
@@ -220,14 +286,90 @@ static struct pmbus_driver_info raa_dmpvr_info = {
| PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT,
};
+static int isl68137_probe_child_from_dt(struct device *dev,
+ struct device_node *child,
+ struct isl68137_data *data)
+{
+ u32 channel, rout, rtotal;
+ int err;
+
+ err = of_property_read_u32(child, "reg", &channel);
+ if (err) {
+ dev_err(dev, "missing reg property of %pOFn\n", child);
+ return err;
+ }
+ if (channel >= data->info.pages) {
+ dev_err(dev, "invalid reg %d of %pOFn\n", channel, child);
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32_array(child, "vout-voltage-divider",
+ data->channel[channel].vout_voltage_divider,
+ ARRAY_SIZE(data->channel[channel].vout_voltage_divider));
+ if (err && err != -EINVAL) {
+ dev_err(dev,
+ "malformed vout-voltage-divider value for channel %d\n",
+ channel);
+ return err;
+ }
+
+ rout = data->channel[channel].vout_voltage_divider[0];
+ rtotal = data->channel[channel].vout_voltage_divider[1];
+ if (rout == 0) {
+ dev_err(dev,
+ "Voltage divider output resistance must be greater than 0\n");
+ return -EINVAL;
+ }
+ if (rtotal < rout) {
+ dev_err(dev,
+ "Voltage divider total resistance is less than output resistance\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int isl68137_probe_from_dt(struct device *dev,
+ struct isl68137_data *data)
+{
+ const struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int err;
+
+ for_each_child_of_node(np, child) {
+ if (strcmp(child->name, "channel"))
+ continue;
+
+ err = isl68137_probe_child_from_dt(dev, child, data);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int isl68137_probe(struct i2c_client *client)
{
+ struct device *dev = &client->dev;
struct pmbus_driver_info *info;
+ struct isl68137_data *data;
+ int i, err;
- info = devm_kzalloc(&client->dev, sizeof(*info), GFP_KERNEL);
- if (!info)
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
return -ENOMEM;
- memcpy(info, &raa_dmpvr_info, sizeof(*info));
+
+ /*
+ * Initialize all voltage dividers to Rout=1 and Rtotal=1 to simplify
+ * logic in PMBus word read/write functions
+ */
+ for (i = 0; i < MAX_CHANNELS; i++)
+ memset(data->channel[i].vout_voltage_divider,
+ 1,
+ sizeof(data->channel[i].vout_voltage_divider));
+
+ memcpy(&data->info, &raa_dmpvr_info, sizeof(data->info));
+ info = &data->info;
switch (i2c_match_id(raa_dmpvr_id, client)->driver_data) {
case raa_dmpvr1_2rail:
@@ -237,11 +379,14 @@ static int isl68137_probe(struct i2c_client *client)
info->func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
| PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
| PMBUS_HAVE_POUT;
+ info->read_word_data = raa_dmpvr2_read_word_data;
+ info->write_word_data = raa_dmpvr2_write_word_data;
info->groups = isl68137_attribute_groups;
break;
case raa_dmpvr2_1rail:
info->pages = 1;
info->read_word_data = raa_dmpvr2_read_word_data;
+ info->write_word_data = raa_dmpvr2_write_word_data;
break;
case raa_dmpvr2_2rail_nontc:
info->func[0] &= ~PMBUS_HAVE_TEMP3;
@@ -250,9 +395,11 @@ static int isl68137_probe(struct i2c_client *client)
case raa_dmpvr2_2rail:
info->pages = 2;
info->read_word_data = raa_dmpvr2_read_word_data;
+ info->write_word_data = raa_dmpvr2_write_word_data;
break;
case raa_dmpvr2_3rail:
info->read_word_data = raa_dmpvr2_read_word_data;
+ info->write_word_data = raa_dmpvr2_write_word_data;
break;
case raa_dmpvr2_hv:
info->pages = 1;
@@ -263,11 +410,16 @@ static int isl68137_probe(struct i2c_client *client)
info->m[PSC_POWER] = 2;
info->R[PSC_POWER] = -1;
info->read_word_data = raa_dmpvr2_read_word_data;
+ info->write_word_data = raa_dmpvr2_write_word_data;
break;
default:
return -ENODEV;
}
+ err = isl68137_probe_from_dt(dev, data);
+ if (err)
+ return err;
+
return pmbus_do_probe(client, info);
}
@@ -318,11 +470,59 @@ static const struct i2c_device_id raa_dmpvr_id[] = {
MODULE_DEVICE_TABLE(i2c, raa_dmpvr_id);
+static const struct of_device_id isl68137_of_match[] = {
+ { .compatible = "isil,isl68137", .data = (void *)raa_dmpvr1_2rail },
+ { .compatible = "renesas,isl68220", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,isl68221", .data = (void *)raa_dmpvr2_3rail },
+ { .compatible = "renesas,isl68222", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,isl68223", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,isl68224", .data = (void *)raa_dmpvr2_3rail },
+ { .compatible = "renesas,isl68225", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,isl68226", .data = (void *)raa_dmpvr2_3rail },
+ { .compatible = "renesas,isl68227", .data = (void *)raa_dmpvr2_1rail },
+ { .compatible = "renesas,isl68229", .data = (void *)raa_dmpvr2_3rail },
+ { .compatible = "renesas,isl68233", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,isl68239", .data = (void *)raa_dmpvr2_3rail },
+
+ { .compatible = "renesas,isl69222", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,isl69223", .data = (void *)raa_dmpvr2_3rail },
+ { .compatible = "renesas,isl69224", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,isl69225", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,isl69227", .data = (void *)raa_dmpvr2_3rail },
+ { .compatible = "renesas,isl69228", .data = (void *)raa_dmpvr2_3rail },
+ { .compatible = "renesas,isl69234", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,isl69236", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,isl69239", .data = (void *)raa_dmpvr2_3rail },
+ { .compatible = "renesas,isl69242", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,isl69243", .data = (void *)raa_dmpvr2_1rail },
+ { .compatible = "renesas,isl69247", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,isl69248", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,isl69254", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,isl69255", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,isl69256", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,isl69259", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "isil,isl69260", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,isl69268", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "isil,isl69269", .data = (void *)raa_dmpvr2_3rail },
+ { .compatible = "renesas,isl69298", .data = (void *)raa_dmpvr2_2rail },
+
+ { .compatible = "renesas,raa228000", .data = (void *)raa_dmpvr2_hv },
+ { .compatible = "renesas,raa228004", .data = (void *)raa_dmpvr2_hv },
+ { .compatible = "renesas,raa228006", .data = (void *)raa_dmpvr2_hv },
+ { .compatible = "renesas,raa228228", .data = (void *)raa_dmpvr2_2rail_nontc },
+ { .compatible = "renesas,raa229001", .data = (void *)raa_dmpvr2_2rail },
+ { .compatible = "renesas,raa229004", .data = (void *)raa_dmpvr2_2rail },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, isl68137_of_match);
+
/* This is the driver that will be inserted */
static struct i2c_driver isl68137_driver = {
.driver = {
- .name = "isl68137",
- },
+ .name = "isl68137",
+ .of_match_table = isl68137_of_match,
+ },
.probe = isl68137_probe,
.id_table = raa_dmpvr_id,
};
@@ -332,4 +532,4 @@ module_i2c_driver(isl68137_driver);
MODULE_AUTHOR("Maxim Sloyko <maxims@google.com>");
MODULE_DESCRIPTION("PMBus driver for Renesas digital multiphase voltage regulators");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index c36c124d1a2d..40b0dda32ea6 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -569,4 +569,4 @@ module_i2c_driver(lm25066_driver);
MODULE_AUTHOR("Guenter Roeck");
MODULE_DESCRIPTION("PMBus driver for LM25066 and compatible chips");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/lt7182s.c b/drivers/hwmon/pmbus/lt7182s.c
index aebd97af2741..9d6d50f39bd6 100644
--- a/drivers/hwmon/pmbus/lt7182s.c
+++ b/drivers/hwmon/pmbus/lt7182s.c
@@ -192,4 +192,4 @@ module_i2c_driver(lt7182s_driver);
MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION("PMBus driver for Analog Devices LT7182S");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index 73a86f4d6472..4c306943383a 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -23,7 +23,8 @@ enum chips {
/* Managers */
ltc2972, ltc2974, ltc2975, ltc2977, ltc2978, ltc2979, ltc2980,
/* Controllers */
- ltc3880, ltc3882, ltc3883, ltc3884, ltc3886, ltc3887, ltc3889, ltc7132, ltc7880,
+ ltc3880, ltc3882, ltc3883, ltc3884, ltc3886, ltc3887, ltc3889, ltc7132,
+ ltc7841, ltc7880,
/* Modules */
ltm2987, ltm4664, ltm4675, ltm4676, ltm4677, ltm4678, ltm4680, ltm4686,
ltm4700,
@@ -50,7 +51,7 @@ enum chips {
#define LTC3880_MFR_CLEAR_PEAKS 0xe3
#define LTC3880_MFR_TEMPERATURE2_PEAK 0xf4
-/* LTC3883, LTC3884, LTC3886, LTC3889, LTC7132, LTC7880 */
+/* LTC3883, LTC3884, LTC3886, LTC3889, LTC7132, LTC7841 and LTC7880 only */
#define LTC3883_MFR_IIN_PEAK 0xe1
/* LTC2975 only */
@@ -80,6 +81,7 @@ enum chips {
#define LTC3887_ID 0x4700
#define LTC3889_ID 0x4900
#define LTC7132_ID 0x4CE0
+#define LTC7841_ID 0x40D0
#define LTC7880_ID 0x49E0
#define LTM2987_ID_A 0x8010 /* A/B for two die IDs */
#define LTM2987_ID_B 0x8020
@@ -548,6 +550,7 @@ static const struct i2c_device_id ltc2978_id[] = {
{"ltc3887", ltc3887},
{"ltc3889", ltc3889},
{"ltc7132", ltc7132},
+ {"ltc7841", ltc7841},
{"ltc7880", ltc7880},
{"ltm2987", ltm2987},
{"ltm4664", ltm4664},
@@ -654,6 +657,8 @@ static int ltc2978_get_id(struct i2c_client *client)
return ltc3889;
else if (chip_id == LTC7132_ID)
return ltc7132;
+ else if (chip_id == LTC7841_ID)
+ return ltc7841;
else if (chip_id == LTC7880_ID)
return ltc7880;
else if (chip_id == LTM2987_ID_A || chip_id == LTM2987_ID_B)
@@ -854,6 +859,16 @@ static int ltc2978_probe(struct i2c_client *client)
| PMBUS_HAVE_POUT
| PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
break;
+ case ltc7841:
+ data->features |= FEAT_CLEAR_PEAKS;
+ info->read_word_data = ltc3883_read_word_data;
+ info->pages = LTC3883_NUM_PAGES;
+ info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN
+ | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ break;
default:
return -ENODEV;
}
@@ -907,6 +922,7 @@ static const struct of_device_id ltc2978_of_match[] = {
{ .compatible = "lltc,ltc3887" },
{ .compatible = "lltc,ltc3889" },
{ .compatible = "lltc,ltc7132" },
+ { .compatible = "lltc,ltc7841" },
{ .compatible = "lltc,ltc7880" },
{ .compatible = "lltc,ltm2987" },
{ .compatible = "lltc,ltm4664" },
@@ -936,4 +952,4 @@ module_i2c_driver(ltc2978_driver);
MODULE_AUTHOR("Guenter Roeck");
MODULE_DESCRIPTION("PMBus driver for LTC2978 and compatible chips");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/ltc3815.c b/drivers/hwmon/pmbus/ltc3815.c
index f58a8cedb0d7..824c16a75e2c 100644
--- a/drivers/hwmon/pmbus/ltc3815.c
+++ b/drivers/hwmon/pmbus/ltc3815.c
@@ -208,4 +208,4 @@ module_i2c_driver(ltc3815_driver);
MODULE_AUTHOR("Guenter Roeck");
MODULE_DESCRIPTION("PMBus driver for LTC3815");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/max15301.c b/drivers/hwmon/pmbus/max15301.c
index f5367a7bc0f5..d5810b88ea8d 100644
--- a/drivers/hwmon/pmbus/max15301.c
+++ b/drivers/hwmon/pmbus/max15301.c
@@ -25,6 +25,7 @@
static const struct i2c_device_id max15301_id[] = {
{ "bmr461" },
{ "max15301" },
+ { "max15303" },
{}
};
MODULE_DEVICE_TABLE(i2c, max15301_id);
@@ -97,4 +98,4 @@ module_i2c_driver(max15301_driver);
MODULE_AUTHOR("Erik Rosen <erik.rosen@metormote.com>");
MODULE_DESCRIPTION("PMBus driver for Maxim MAX15301");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/max16064.c b/drivers/hwmon/pmbus/max16064.c
index 98e2b5dd5841..eb84915c2a83 100644
--- a/drivers/hwmon/pmbus/max16064.c
+++ b/drivers/hwmon/pmbus/max16064.c
@@ -111,4 +111,4 @@ module_i2c_driver(max16064_driver);
MODULE_AUTHOR("Guenter Roeck");
MODULE_DESCRIPTION("PMBus driver for Maxim MAX16064");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/max16601.c b/drivers/hwmon/pmbus/max16601.c
index 3ab219504600..d696e506aafb 100644
--- a/drivers/hwmon/pmbus/max16601.c
+++ b/drivers/hwmon/pmbus/max16601.c
@@ -366,4 +366,4 @@ module_i2c_driver(max16601_driver);
MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION("PMBus driver for Maxim MAX16601");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/max20730.c b/drivers/hwmon/pmbus/max20730.c
index d56ec24764fd..95869d198ecf 100644
--- a/drivers/hwmon/pmbus/max20730.c
+++ b/drivers/hwmon/pmbus/max20730.c
@@ -787,4 +787,4 @@ module_i2c_driver(max20730_driver);
MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION("PMBus driver for Maxim MAX20710 / MAX20730 / MAX20734 / MAX20743");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/max20751.c b/drivers/hwmon/pmbus/max20751.c
index 8f23c1eb559e..ac8c43122133 100644
--- a/drivers/hwmon/pmbus/max20751.c
+++ b/drivers/hwmon/pmbus/max20751.c
@@ -51,4 +51,4 @@ module_i2c_driver(max20751_driver);
MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION("PMBus driver for Maxim MAX20751");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c
index 09218dba8965..1f94d38a1637 100644
--- a/drivers/hwmon/pmbus/max31785.c
+++ b/drivers/hwmon/pmbus/max31785.c
@@ -549,4 +549,4 @@ module_i2c_driver(max31785_driver);
MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
MODULE_DESCRIPTION("PMBus driver for the Maxim MAX31785");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index fe7f6b1b0985..c9dda33831ff 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -529,4 +529,4 @@ module_i2c_driver(max34440_driver);
MODULE_AUTHOR("Guenter Roeck");
MODULE_DESCRIPTION("PMBus driver for Maxim MAX34440/MAX34441");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c
index 5d5b6aeefa80..b3a2a7492bbf 100644
--- a/drivers/hwmon/pmbus/max8688.c
+++ b/drivers/hwmon/pmbus/max8688.c
@@ -191,4 +191,4 @@ module_i2c_driver(max8688_driver);
MODULE_AUTHOR("Guenter Roeck");
MODULE_DESCRIPTION("PMBus driver for Maxim MAX8688");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/mp2856.c b/drivers/hwmon/pmbus/mp2856.c
index 41bb86667091..e83c70a3583f 100644
--- a/drivers/hwmon/pmbus/mp2856.c
+++ b/drivers/hwmon/pmbus/mp2856.c
@@ -463,4 +463,4 @@ module_i2c_driver(mp2856_driver);
MODULE_AUTHOR("Peter Yin <peter.yin@quantatw.com>");
MODULE_DESCRIPTION("PMBus driver for MPS MP2856/MP2857 device");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/mp2888.c b/drivers/hwmon/pmbus/mp2888.c
index 3b45f126b611..772a623ca7d0 100644
--- a/drivers/hwmon/pmbus/mp2888.c
+++ b/drivers/hwmon/pmbus/mp2888.c
@@ -404,4 +404,4 @@ module_i2c_driver(mp2888_driver);
MODULE_AUTHOR("Vadim Pasternak <vadimp@nvidia.com>");
MODULE_DESCRIPTION("PMBus driver for MPS MP2888 device");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/mp2891.c b/drivers/hwmon/pmbus/mp2891.c
index bb28b15a9103..f8f4c91ec23c 100644
--- a/drivers/hwmon/pmbus/mp2891.c
+++ b/drivers/hwmon/pmbus/mp2891.c
@@ -572,8 +572,8 @@ static int mp2891_probe(struct i2c_client *client)
}
static const struct i2c_device_id mp2891_id[] = {
- {"mp2891", 0},
- {}
+ { "mp2891" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, mp2891_id);
@@ -597,4 +597,4 @@ module_i2c_driver(mp2891_driver);
MODULE_AUTHOR("Noah Wang <noahwang.wang@outlook.com>");
MODULE_DESCRIPTION("PMBus driver for MPS MP2891");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/mp2975.c b/drivers/hwmon/pmbus/mp2975.c
index 280bb12f762c..c31982d85196 100644
--- a/drivers/hwmon/pmbus/mp2975.c
+++ b/drivers/hwmon/pmbus/mp2975.c
@@ -1101,4 +1101,4 @@ module_i2c_driver(mp2975_driver);
MODULE_AUTHOR("Vadim Pasternak <vadimp@nvidia.com>");
MODULE_DESCRIPTION("PMBus driver for MPS MP2975 device");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/mp2993.c b/drivers/hwmon/pmbus/mp2993.c
index 944593e13231..81c84fc8ed47 100644
--- a/drivers/hwmon/pmbus/mp2993.c
+++ b/drivers/hwmon/pmbus/mp2993.c
@@ -233,8 +233,8 @@ static int mp2993_probe(struct i2c_client *client)
}
static const struct i2c_device_id mp2993_id[] = {
- {"mp2993", 0},
- {}
+ { "mp2993" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, mp2993_id);
@@ -258,4 +258,4 @@ module_i2c_driver(mp2993_driver);
MODULE_AUTHOR("Noah Wang <noahwang.wang@outlook.com>");
MODULE_DESCRIPTION("PMBus driver for MPS MP2993");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/mp5023.c b/drivers/hwmon/pmbus/mp5023.c
index 21acb7fd9a1a..c466d67e9a8f 100644
--- a/drivers/hwmon/pmbus/mp5023.c
+++ b/drivers/hwmon/pmbus/mp5023.c
@@ -64,4 +64,4 @@ module_i2c_driver(mp5023_driver);
MODULE_AUTHOR("Howard Chiu <howard.chiu@quantatw.com>");
MODULE_DESCRIPTION("PMBus driver for MPS MP5023 HSC");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/mp5920.c b/drivers/hwmon/pmbus/mp5920.c
index f6d7527ade7d..319ae2721bcf 100644
--- a/drivers/hwmon/pmbus/mp5920.c
+++ b/drivers/hwmon/pmbus/mp5920.c
@@ -87,4 +87,4 @@ MODULE_AUTHOR("Tony Ao <tony_ao@wiwynn.com>");
MODULE_AUTHOR("Alex Vdovydchenko <xzeol@yahoo.com>");
MODULE_DESCRIPTION("PMBus driver for MP5920 HSC");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/mp5990.c b/drivers/hwmon/pmbus/mp5990.c
index 5d1d5eac89da..4ce381a39480 100644
--- a/drivers/hwmon/pmbus/mp5990.c
+++ b/drivers/hwmon/pmbus/mp5990.c
@@ -176,4 +176,4 @@ module_i2c_driver(mp5990_driver);
MODULE_AUTHOR("Peter Yin <peter.yin@quantatw.com>");
MODULE_DESCRIPTION("PMBus driver for MP5990 HSC");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/mp9941.c b/drivers/hwmon/pmbus/mp9941.c
index 543955cfce67..42ca6748777a 100644
--- a/drivers/hwmon/pmbus/mp9941.c
+++ b/drivers/hwmon/pmbus/mp9941.c
@@ -291,8 +291,8 @@ static int mp9941_probe(struct i2c_client *client)
}
static const struct i2c_device_id mp9941_id[] = {
- {"mp9941", 0},
- {}
+ { "mp9941" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, mp9941_id);
@@ -316,4 +316,4 @@ module_i2c_driver(mp9941_driver);
MODULE_AUTHOR("Noah Wang <noahwang.wang@outlook.com>");
MODULE_DESCRIPTION("PMBus driver for MPS MP9941");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/mpq7932.c b/drivers/hwmon/pmbus/mpq7932.c
index 2dcb6da853bd..c1e2d0cb2fd0 100644
--- a/drivers/hwmon/pmbus/mpq7932.c
+++ b/drivers/hwmon/pmbus/mpq7932.c
@@ -164,4 +164,4 @@ module_i2c_driver(mpq7932_regulator_driver);
MODULE_AUTHOR("Saravanan Sekar <saravanan@linumiz.com>");
MODULE_DESCRIPTION("MPQ7932 PMIC regulator driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/mpq8785.c b/drivers/hwmon/pmbus/mpq8785.c
index 7f87e117b49d..331c274ca892 100644
--- a/drivers/hwmon/pmbus/mpq8785.c
+++ b/drivers/hwmon/pmbus/mpq8785.c
@@ -22,7 +22,7 @@ static int mpq8785_identify(struct i2c_client *client,
break;
case 1:
case 2:
- info->format[PSC_VOLTAGE_OUT] = direct,
+ info->format[PSC_VOLTAGE_OUT] = direct;
info->m[PSC_VOLTAGE_OUT] = 64;
info->b[PSC_VOLTAGE_OUT] = 0;
info->R[PSC_VOLTAGE_OUT] = 1;
@@ -87,4 +87,4 @@ module_i2c_driver(mpq8785_driver);
MODULE_AUTHOR("Charles Hsu <ythsu0511@gmail.com>");
MODULE_DESCRIPTION("PMBus driver for MPS MPQ8785");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/pim4328.c b/drivers/hwmon/pmbus/pim4328.c
index 31d9ae06379a..aa98284bbdd8 100644
--- a/drivers/hwmon/pmbus/pim4328.c
+++ b/drivers/hwmon/pmbus/pim4328.c
@@ -230,4 +230,4 @@ module_i2c_driver(pim4328_driver);
MODULE_AUTHOR("Erik Rosen <erik.rosen@metormote.com>");
MODULE_DESCRIPTION("PMBus driver for PIM4006, PIM4328, PIM4820 power interface modules");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/pli1209bc.c b/drivers/hwmon/pmbus/pli1209bc.c
index 178e0cdb7887..569b61dc1a32 100644
--- a/drivers/hwmon/pmbus/pli1209bc.c
+++ b/drivers/hwmon/pmbus/pli1209bc.c
@@ -145,4 +145,4 @@ module_i2c_driver(pli1209bc_driver);
MODULE_AUTHOR("Marcello Sylvester Bauer <sylv@sylv.io>");
MODULE_DESCRIPTION("PMBus driver for Vicor PLI1209BC");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/pm6764tr.c b/drivers/hwmon/pmbus/pm6764tr.c
index 23f15b608dcf..c96c0aecb920 100644
--- a/drivers/hwmon/pmbus/pm6764tr.c
+++ b/drivers/hwmon/pmbus/pm6764tr.c
@@ -73,4 +73,4 @@ module_i2c_driver(pm6764tr_driver);
MODULE_AUTHOR("Charles Hsu");
MODULE_DESCRIPTION("PMBus driver for ST PM6764TR");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index ec40c5c59954..77cf268e7d2d 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -261,4 +261,4 @@ module_i2c_driver(pmbus_driver);
MODULE_AUTHOR("Guenter Roeck");
MODULE_DESCRIPTION("Generic PMBus driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index d605412a3173..ddb19c9726d6 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -487,6 +487,8 @@ struct pmbus_driver_info {
/* Regulator ops */
extern const struct regulator_ops pmbus_regulator_ops;
+int pmbus_regulator_init_cb(struct regulator_dev *rdev,
+ struct regulator_config *config);
/* Macros for filling in array of struct regulator_desc */
#define PMBUS_REGULATOR_STEP(_name, _id, _voltages, _step, _min_uV) \
@@ -501,6 +503,7 @@ extern const struct regulator_ops pmbus_regulator_ops;
.n_voltages = _voltages, \
.uV_step = _step, \
.min_uV = _min_uV, \
+ .init_cb = pmbus_regulator_init_cb, \
}
#define PMBUS_REGULATOR(_name, _id) PMBUS_REGULATOR_STEP(_name, _id, 0, 0, 0)
@@ -516,6 +519,7 @@ extern const struct regulator_ops pmbus_regulator_ops;
.n_voltages = _voltages, \
.uV_step = _step, \
.min_uV = _min_uV, \
+ .init_cb = pmbus_regulator_init_cb, \
}
#define PMBUS_REGULATOR_ONE(_name) PMBUS_REGULATOR_STEP_ONE(_name, 0, 0, 0)
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index ce7fd4ca9d89..787683e83db6 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -31,6 +31,9 @@
#define PMBUS_ATTR_ALLOC_SIZE 32
#define PMBUS_NAME_SIZE 24
+static int wp = -1;
+module_param(wp, int, 0444);
+
struct pmbus_sensor {
struct pmbus_sensor *next;
char name[PMBUS_NAME_SIZE]; /* sysfs sensor name */
@@ -150,7 +153,7 @@ void pmbus_clear_cache(struct i2c_client *client)
for (sensor = data->sensors; sensor; sensor = sensor->next)
sensor->data = -ENODATA;
}
-EXPORT_SYMBOL_NS_GPL(pmbus_clear_cache, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_clear_cache, "PMBUS");
void pmbus_set_update(struct i2c_client *client, u8 reg, bool update)
{
@@ -161,7 +164,7 @@ void pmbus_set_update(struct i2c_client *client, u8 reg, bool update)
if (sensor->reg == reg)
sensor->update = update;
}
-EXPORT_SYMBOL_NS_GPL(pmbus_set_update, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_set_update, "PMBUS");
/* Some chips need a delay between accesses. */
static void pmbus_wait(struct i2c_client *client)
@@ -236,7 +239,7 @@ int pmbus_set_page(struct i2c_client *client, int page, int phase)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(pmbus_set_page, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_set_page, "PMBUS");
int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
{
@@ -252,7 +255,7 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
return rv;
}
-EXPORT_SYMBOL_NS_GPL(pmbus_write_byte, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_write_byte, "PMBUS");
/*
* _pmbus_write_byte() is similar to pmbus_write_byte(), but checks if
@@ -287,7 +290,7 @@ int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
return rv;
}
-EXPORT_SYMBOL_NS_GPL(pmbus_write_word_data, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_write_word_data, "PMBUS");
static int pmbus_write_virt_reg(struct i2c_client *client, int page, int reg,
@@ -393,7 +396,7 @@ int pmbus_update_fan(struct i2c_client *client, int page, int id,
return _pmbus_write_word_data(client, page,
pmbus_fan_command_registers[id], command);
}
-EXPORT_SYMBOL_NS_GPL(pmbus_update_fan, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_update_fan, "PMBUS");
int pmbus_read_word_data(struct i2c_client *client, int page, int phase, u8 reg)
{
@@ -409,7 +412,7 @@ int pmbus_read_word_data(struct i2c_client *client, int page, int phase, u8 reg)
return rv;
}
-EXPORT_SYMBOL_NS_GPL(pmbus_read_word_data, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_read_word_data, "PMBUS");
static int pmbus_read_virt_reg(struct i2c_client *client, int page, int reg)
{
@@ -472,7 +475,7 @@ int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg)
return rv;
}
-EXPORT_SYMBOL_NS_GPL(pmbus_read_byte_data, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_read_byte_data, "PMBUS");
int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg, u8 value)
{
@@ -488,7 +491,7 @@ int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg, u8 value)
return rv;
}
-EXPORT_SYMBOL_NS_GPL(pmbus_write_byte_data, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_write_byte_data, "PMBUS");
int pmbus_update_byte_data(struct i2c_client *client, int page, u8 reg,
u8 mask, u8 value)
@@ -507,7 +510,7 @@ int pmbus_update_byte_data(struct i2c_client *client, int page, u8 reg,
return rv;
}
-EXPORT_SYMBOL_NS_GPL(pmbus_update_byte_data, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_update_byte_data, "PMBUS");
static int pmbus_read_block_data(struct i2c_client *client, int page, u8 reg,
char *data_buf)
@@ -578,14 +581,14 @@ int pmbus_get_fan_rate_device(struct i2c_client *client, int page, int id,
{
return pmbus_get_fan_rate(client, page, id, mode, false);
}
-EXPORT_SYMBOL_NS_GPL(pmbus_get_fan_rate_device, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_get_fan_rate_device, "PMBUS");
int pmbus_get_fan_rate_cached(struct i2c_client *client, int page, int id,
enum pmbus_fan_mode mode)
{
return pmbus_get_fan_rate(client, page, id, mode, true);
}
-EXPORT_SYMBOL_NS_GPL(pmbus_get_fan_rate_cached, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_get_fan_rate_cached, "PMBUS");
static void pmbus_clear_fault_page(struct i2c_client *client, int page)
{
@@ -600,7 +603,7 @@ void pmbus_clear_faults(struct i2c_client *client)
for (i = 0; i < data->info->pages; i++)
pmbus_clear_fault_page(client, i);
}
-EXPORT_SYMBOL_NS_GPL(pmbus_clear_faults, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_clear_faults, "PMBUS");
static int pmbus_check_status_cml(struct i2c_client *client)
{
@@ -655,13 +658,13 @@ bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg)
{
return pmbus_check_register(client, _pmbus_read_byte_data, page, reg);
}
-EXPORT_SYMBOL_NS_GPL(pmbus_check_byte_register, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_check_byte_register, "PMBUS");
bool pmbus_check_word_register(struct i2c_client *client, int page, int reg)
{
return pmbus_check_register(client, __pmbus_read_word_data, page, reg);
}
-EXPORT_SYMBOL_NS_GPL(pmbus_check_word_register, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_check_word_register, "PMBUS");
static bool __maybe_unused pmbus_check_block_register(struct i2c_client *client,
int page, int reg)
@@ -685,7 +688,7 @@ const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client)
return data->info;
}
-EXPORT_SYMBOL_NS_GPL(pmbus_get_driver_info, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_get_driver_info, "PMBUS");
static int pmbus_get_status(struct i2c_client *client, int page, int reg)
{
@@ -2665,6 +2668,56 @@ static void pmbus_remove_pec(void *dev)
device_remove_file(dev, &dev_attr_pec);
}
+static void pmbus_init_wp(struct i2c_client *client, struct pmbus_data *data)
+{
+ int ret;
+
+ switch (wp) {
+ case 0:
+ _pmbus_write_byte_data(client, -1,
+ PMBUS_WRITE_PROTECT, 0);
+ break;
+
+ case 1:
+ _pmbus_write_byte_data(client, -1,
+ PMBUS_WRITE_PROTECT, PB_WP_VOUT);
+ break;
+
+ case 2:
+ _pmbus_write_byte_data(client, -1,
+ PMBUS_WRITE_PROTECT, PB_WP_OP);
+ break;
+
+ case 3:
+ _pmbus_write_byte_data(client, -1,
+ PMBUS_WRITE_PROTECT, PB_WP_ALL);
+ break;
+
+ default:
+ /* Ignore the other values */
+ break;
+ }
+
+ ret = _pmbus_read_byte_data(client, -1, PMBUS_WRITE_PROTECT);
+ if (ret < 0)
+ return;
+
+ switch (ret & PB_WP_ANY) {
+ case PB_WP_ALL:
+ data->flags |= PMBUS_OP_PROTECTED;
+ fallthrough;
+ case PB_WP_OP:
+ data->flags |= PMBUS_VOUT_PROTECTED;
+ fallthrough;
+ case PB_WP_VOUT:
+ data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
+ break;
+
+ default:
+ break;
+ }
+}
+
static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
struct pmbus_driver_info *info)
{
@@ -2718,14 +2771,8 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
* faults, and we should not try it. Also, in that case, writes into
* limit registers need to be disabled.
*/
- if (!(data->flags & PMBUS_NO_WRITE_PROTECT)) {
- pmbus_wait(client);
- ret = i2c_smbus_read_byte_data(client, PMBUS_WRITE_PROTECT);
- pmbus_update_ts(client, false);
-
- if (ret > 0 && (ret & PB_WP_ANY))
- data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
- }
+ if (!(data->flags & PMBUS_NO_WRITE_PROTECT))
+ pmbus_init_wp(client, data);
ret = i2c_smbus_read_byte_data(client, PMBUS_REVISION);
if (ret >= 0)
@@ -3185,8 +3232,12 @@ static int pmbus_regulator_list_voltage(struct regulator_dev *rdev,
{
struct device *dev = rdev_get_dev(rdev);
struct i2c_client *client = to_i2c_client(dev->parent);
+ struct pmbus_data *data = i2c_get_clientdata(client);
int val, low, high;
+ if (data->flags & PMBUS_VOUT_PROTECTED)
+ return 0;
+
if (selector >= rdev->desc->n_voltages ||
selector < rdev->desc->linear_min_sel)
return -EINVAL;
@@ -3219,7 +3270,23 @@ const struct regulator_ops pmbus_regulator_ops = {
.set_voltage = pmbus_regulator_set_voltage,
.list_voltage = pmbus_regulator_list_voltage,
};
-EXPORT_SYMBOL_NS_GPL(pmbus_regulator_ops, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_regulator_ops, "PMBUS");
+
+int pmbus_regulator_init_cb(struct regulator_dev *rdev,
+ struct regulator_config *config)
+{
+ struct pmbus_data *data = config->driver_data;
+ struct regulation_constraints *constraints = rdev->constraints;
+
+ if (data->flags & PMBUS_OP_PROTECTED)
+ constraints->valid_ops_mask &= ~REGULATOR_CHANGE_STATUS;
+
+ if (data->flags & PMBUS_VOUT_PROTECTED)
+ constraints->valid_ops_mask &= ~REGULATOR_CHANGE_VOLTAGE;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(pmbus_regulator_init_cb, "PMBUS");
static int pmbus_regulator_register(struct pmbus_data *data)
{
@@ -3279,7 +3346,17 @@ static int pmbus_regulator_notify(struct pmbus_data *data, int page, int event)
static int pmbus_write_smbalert_mask(struct i2c_client *client, u8 page, u8 reg, u8 val)
{
- return _pmbus_write_word_data(client, page, PMBUS_SMBALERT_MASK, reg | (val << 8));
+ int ret;
+
+ ret = _pmbus_write_word_data(client, page, PMBUS_SMBALERT_MASK, reg | (val << 8));
+
+ /*
+ * Clear fault systematically in case writing PMBUS_SMBALERT_MASK
+ * is not supported by the chip.
+ */
+ pmbus_clear_fault_page(client, page);
+
+ return ret;
}
static irqreturn_t pmbus_fault_handler(int irq, void *pdata)
@@ -3457,11 +3534,11 @@ static int pmbus_init_debugfs(struct i2c_client *client,
/*
* Allocate the max possible entries we need.
- * 6 entries device-specific
+ * 7 entries device-specific
* 10 entries page-specific
*/
entries = devm_kcalloc(data->dev,
- 6 + data->info->pages * 10, sizeof(*entries),
+ 7 + data->info->pages * 10, sizeof(*entries),
GFP_KERNEL);
if (!entries)
return -ENOMEM;
@@ -3474,6 +3551,15 @@ static int pmbus_init_debugfs(struct i2c_client *client,
* assume that values of the following registers are the same for all
* pages and report values only for page 0.
*/
+ if (pmbus_check_byte_register(client, 0, PMBUS_REVISION)) {
+ entries[idx].client = client;
+ entries[idx].page = 0;
+ entries[idx].reg = PMBUS_REVISION;
+ debugfs_create_file("revision", 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops);
+ }
+
if (pmbus_check_block_register(client, 0, PMBUS_MFR_ID)) {
entries[idx].client = client;
entries[idx].page = 0;
@@ -3735,7 +3821,7 @@ int pmbus_do_probe(struct i2c_client *client, struct pmbus_driver_info *info)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(pmbus_do_probe, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_do_probe, "PMBUS");
struct dentry *pmbus_get_debugfs_dir(struct i2c_client *client)
{
@@ -3743,7 +3829,7 @@ struct dentry *pmbus_get_debugfs_dir(struct i2c_client *client)
return data->debugfs;
}
-EXPORT_SYMBOL_NS_GPL(pmbus_get_debugfs_dir, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_get_debugfs_dir, "PMBUS");
int pmbus_lock_interruptible(struct i2c_client *client)
{
@@ -3751,7 +3837,7 @@ int pmbus_lock_interruptible(struct i2c_client *client)
return mutex_lock_interruptible(&data->update_lock);
}
-EXPORT_SYMBOL_NS_GPL(pmbus_lock_interruptible, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_lock_interruptible, "PMBUS");
void pmbus_unlock(struct i2c_client *client)
{
@@ -3759,7 +3845,7 @@ void pmbus_unlock(struct i2c_client *client)
mutex_unlock(&data->update_lock);
}
-EXPORT_SYMBOL_NS_GPL(pmbus_unlock, PMBUS);
+EXPORT_SYMBOL_NS_GPL(pmbus_unlock, "PMBUS");
static int __init pmbus_core_init(void)
{
diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c
index 5ac476d3cdd2..6a4a978eca7e 100644
--- a/drivers/hwmon/pmbus/pxe1610.c
+++ b/drivers/hwmon/pmbus/pxe1610.c
@@ -148,4 +148,4 @@ module_i2c_driver(pxe1610_driver);
MODULE_AUTHOR("Vijay Khemka <vijaykhemka@fb.com>");
MODULE_DESCRIPTION("PMBus driver for Infineon PXE1610, PXE1110 and PXM1310");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/q54sj108a2.c b/drivers/hwmon/pmbus/q54sj108a2.c
index a235c1cdf4fe..4d7086d83aa3 100644
--- a/drivers/hwmon/pmbus/q54sj108a2.c
+++ b/drivers/hwmon/pmbus/q54sj108a2.c
@@ -421,4 +421,4 @@ module_i2c_driver(q54sj108a2_driver);
MODULE_AUTHOR("Xiao.Ma <xiao.mx.ma@deltaww.com>");
MODULE_DESCRIPTION("PMBus driver for Delta Q54SJ108A2 series modules");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/stpddc60.c b/drivers/hwmon/pmbus/stpddc60.c
index 34d0f06f4845..5cb905ed8ae5 100644
--- a/drivers/hwmon/pmbus/stpddc60.c
+++ b/drivers/hwmon/pmbus/stpddc60.c
@@ -246,4 +246,4 @@ module_i2c_driver(stpddc60_driver);
MODULE_AUTHOR("Erik Rosen <erik.rosen@metormote.com>");
MODULE_DESCRIPTION("PMBus driver for ST STPDDC60");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/tda38640.c b/drivers/hwmon/pmbus/tda38640.c
index 044d5fbdf9eb..07fe58c24485 100644
--- a/drivers/hwmon/pmbus/tda38640.c
+++ b/drivers/hwmon/pmbus/tda38640.c
@@ -221,4 +221,4 @@ module_i2c_driver(tda38640_driver);
MODULE_AUTHOR("Patrick Rudolph <patrick.rudolph@9elements.com>");
MODULE_DESCRIPTION("PMBus driver for Infineon TDA38640");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/tps25990.c b/drivers/hwmon/pmbus/tps25990.c
new file mode 100644
index 000000000000..0d2655e69549
--- /dev/null
+++ b/drivers/hwmon/pmbus/tps25990.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2024 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "pmbus.h"
+
+#define TPS25990_READ_VAUX 0xd0
+#define TPS25990_READ_VIN_MIN 0xd1
+#define TPS25990_READ_VIN_PEAK 0xd2
+#define TPS25990_READ_IIN_PEAK 0xd4
+#define TPS25990_READ_PIN_PEAK 0xd5
+#define TPS25990_READ_TEMP_AVG 0xd6
+#define TPS25990_READ_TEMP_PEAK 0xd7
+#define TPS25990_READ_VOUT_MIN 0xda
+#define TPS25990_READ_VIN_AVG 0xdc
+#define TPS25990_READ_VOUT_AVG 0xdd
+#define TPS25990_READ_IIN_AVG 0xde
+#define TPS25990_READ_PIN_AVG 0xdf
+#define TPS25990_VIREF 0xe0
+#define TPS25990_PK_MIN_AVG 0xea
+#define PK_MIN_AVG_RST_PEAK BIT(7)
+#define PK_MIN_AVG_RST_AVG BIT(6)
+#define PK_MIN_AVG_RST_MIN BIT(5)
+#define PK_MIN_AVG_AVG_CNT GENMASK(2, 0)
+#define TPS25990_MFR_WRITE_PROTECT 0xf8
+#define TPS25990_UNLOCKED BIT(7)
+
+#define TPS25990_8B_SHIFT 2
+#define TPS25990_VIN_OVF_NUM 525100
+#define TPS25990_VIN_OVF_DIV 10163
+#define TPS25990_VIN_OVF_OFF 155
+#define TPS25990_IIN_OCF_NUM 953800
+#define TPS25990_IIN_OCF_DIV 129278
+#define TPS25990_IIN_OCF_OFF 157
+
+#define PK_MIN_AVG_RST_MASK (PK_MIN_AVG_RST_PEAK | \
+ PK_MIN_AVG_RST_AVG | \
+ PK_MIN_AVG_RST_MIN)
+
+/*
+ * Arbitrary default Rimon value: 1kOhm
+ * This correspond to an overcurrent limit of 55A, close to the specified limit
+ * of un-stacked TPS25990 and makes further calculation easier to setup in
+ * sensor.conf, if necessary
+ */
+#define TPS25990_DEFAULT_RIMON 1000000000
+
+static void tps25990_set_m(int *m, u32 rimon)
+{
+ u64 val = ((u64)*m) * rimon;
+
+ /* Make sure m fits the s32 type */
+ *m = DIV_ROUND_CLOSEST_ULL(val, 1000000);
+}
+
+static int tps25990_mfr_write_protect_set(struct i2c_client *client,
+ u8 protect)
+{
+ u8 val;
+
+ switch (protect) {
+ case 0:
+ val = 0xa2;
+ break;
+ case PB_WP_ALL:
+ val = 0x0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return pmbus_write_byte_data(client, -1, TPS25990_MFR_WRITE_PROTECT,
+ val);
+}
+
+static int tps25990_mfr_write_protect_get(struct i2c_client *client)
+{
+ int ret = pmbus_read_byte_data(client, -1, TPS25990_MFR_WRITE_PROTECT);
+
+ if (ret < 0)
+ return ret;
+
+ return (ret & TPS25990_UNLOCKED) ? 0 : PB_WP_ALL;
+}
+
+static int tps25990_read_word_data(struct i2c_client *client,
+ int page, int phase, int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_VIN_MAX:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_VIN_PEAK);
+ break;
+
+ case PMBUS_VIRT_READ_VIN_MIN:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_VIN_MIN);
+ break;
+
+ case PMBUS_VIRT_READ_VIN_AVG:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_VIN_AVG);
+ break;
+
+ case PMBUS_VIRT_READ_VOUT_MIN:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_VOUT_MIN);
+ break;
+
+ case PMBUS_VIRT_READ_VOUT_AVG:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_VOUT_AVG);
+ break;
+
+ case PMBUS_VIRT_READ_IIN_AVG:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_IIN_AVG);
+ break;
+
+ case PMBUS_VIRT_READ_IIN_MAX:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_IIN_PEAK);
+ break;
+
+ case PMBUS_VIRT_READ_TEMP_AVG:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_TEMP_AVG);
+ break;
+
+ case PMBUS_VIRT_READ_TEMP_MAX:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_TEMP_PEAK);
+ break;
+
+ case PMBUS_VIRT_READ_PIN_AVG:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_PIN_AVG);
+ break;
+
+ case PMBUS_VIRT_READ_PIN_MAX:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_PIN_PEAK);
+ break;
+
+ case PMBUS_VIRT_READ_VMON:
+ ret = pmbus_read_word_data(client, page, phase,
+ TPS25990_READ_VAUX);
+ break;
+
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ case PMBUS_VIN_OV_WARN_LIMIT:
+ case PMBUS_VOUT_UV_WARN_LIMIT:
+ case PMBUS_IIN_OC_WARN_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_PIN_OP_WARN_LIMIT:
+ /*
+ * These registers provide an 8 bits value instead of a
+ * 10bits one. Just shifting twice the register value is
+ * enough to make the sensor type conversion work, even
+ * if the datasheet provides different m, b and R for
+ * those.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ break;
+ ret <<= TPS25990_8B_SHIFT;
+ break;
+
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ break;
+ ret = DIV_ROUND_CLOSEST(ret * TPS25990_VIN_OVF_NUM,
+ TPS25990_VIN_OVF_DIV);
+ ret += TPS25990_VIN_OVF_OFF;
+ break;
+
+ case PMBUS_IIN_OC_FAULT_LIMIT:
+ /*
+ * VIREF directly sets the over-current limit at which the eFuse
+ * will turn the FET off and trigger a fault. Expose it through
+ * this generic property instead of a manufacturer specific one.
+ */
+ ret = pmbus_read_byte_data(client, page, TPS25990_VIREF);
+ if (ret < 0)
+ break;
+ ret = DIV_ROUND_CLOSEST(ret * TPS25990_IIN_OCF_NUM,
+ TPS25990_IIN_OCF_DIV);
+ ret += TPS25990_IIN_OCF_OFF;
+ break;
+
+ case PMBUS_VIRT_SAMPLES:
+ ret = pmbus_read_byte_data(client, page, TPS25990_PK_MIN_AVG);
+ if (ret < 0)
+ break;
+ ret = 1 << FIELD_GET(PK_MIN_AVG_AVG_CNT, ret);
+ break;
+
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ case PMBUS_VIRT_RESET_VIN_HISTORY:
+ case PMBUS_VIRT_RESET_IIN_HISTORY:
+ case PMBUS_VIRT_RESET_PIN_HISTORY:
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ ret = 0;
+ break;
+
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+static int tps25990_write_word_data(struct i2c_client *client,
+ int page, int reg, u16 value)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ case PMBUS_VIN_OV_WARN_LIMIT:
+ case PMBUS_VOUT_UV_WARN_LIMIT:
+ case PMBUS_IIN_OC_WARN_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_PIN_OP_WARN_LIMIT:
+ value >>= TPS25990_8B_SHIFT;
+ value = clamp_val(value, 0, 0xff);
+ ret = pmbus_write_word_data(client, page, reg, value);
+ break;
+
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ value -= TPS25990_VIN_OVF_OFF;
+ value = DIV_ROUND_CLOSEST(((unsigned int)value) * TPS25990_VIN_OVF_DIV,
+ TPS25990_VIN_OVF_NUM);
+ value = clamp_val(value, 0, 0xf);
+ ret = pmbus_write_word_data(client, page, reg, value);
+ break;
+
+ case PMBUS_IIN_OC_FAULT_LIMIT:
+ value -= TPS25990_IIN_OCF_OFF;
+ value = DIV_ROUND_CLOSEST(((unsigned int)value) * TPS25990_IIN_OCF_DIV,
+ TPS25990_IIN_OCF_NUM);
+ value = clamp_val(value, 0, 0x3f);
+ ret = pmbus_write_byte_data(client, page, TPS25990_VIREF, value);
+ break;
+
+ case PMBUS_VIRT_SAMPLES:
+ value = clamp_val(value, 1, 1 << PK_MIN_AVG_AVG_CNT);
+ value = ilog2(value);
+ ret = pmbus_update_byte_data(client, page, TPS25990_PK_MIN_AVG,
+ PK_MIN_AVG_AVG_CNT,
+ FIELD_PREP(PK_MIN_AVG_AVG_CNT, value));
+ break;
+
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ case PMBUS_VIRT_RESET_VIN_HISTORY:
+ case PMBUS_VIRT_RESET_IIN_HISTORY:
+ case PMBUS_VIRT_RESET_PIN_HISTORY:
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ /*
+ * TPS25990 has history resets based on MIN/AVG/PEAK instead of per
+ * sensor type. Exposing this quirk in hwmon is not desirable so
+ * reset MIN, AVG and PEAK together. Even is there effectively only
+ * one reset, which resets everything, expose the 5 entries so
+ * userspace is not required map a sensor type to another to trigger
+ * a reset
+ */
+ ret = pmbus_update_byte_data(client, 0, TPS25990_PK_MIN_AVG,
+ PK_MIN_AVG_RST_MASK,
+ PK_MIN_AVG_RST_MASK);
+ break;
+
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+static int tps25990_read_byte_data(struct i2c_client *client,
+ int page, int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_WRITE_PROTECT:
+ ret = tps25990_mfr_write_protect_get(client);
+ break;
+
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+static int tps25990_write_byte_data(struct i2c_client *client,
+ int page, int reg, u8 byte)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_WRITE_PROTECT:
+ ret = tps25990_mfr_write_protect_set(client, byte);
+ break;
+
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_SENSORS_TPS25990_REGULATOR)
+static const struct regulator_desc tps25990_reg_desc[] = {
+ PMBUS_REGULATOR_ONE("vout"),
+};
+#endif
+
+static const struct pmbus_driver_info tps25990_base_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .m[PSC_VOLTAGE_IN] = 5251,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -2,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .m[PSC_VOLTAGE_OUT] = 5251,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = -2,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 140,
+ .b[PSC_TEMPERATURE] = 32100,
+ .R[PSC_TEMPERATURE] = -2,
+ /*
+ * Current and Power measurement depends on the ohm value
+ * of Rimon. m is multiplied by 1000 below to have an integer
+ * and -3 is added to R to compensate.
+ */
+ .format[PSC_CURRENT_IN] = direct,
+ .m[PSC_CURRENT_IN] = 9538,
+ .b[PSC_CURRENT_IN] = 0,
+ .R[PSC_CURRENT_IN] = -6,
+ .format[PSC_POWER] = direct,
+ .m[PSC_POWER] = 4901,
+ .b[PSC_POWER] = 0,
+ .R[PSC_POWER] = -7,
+ .func[0] = (PMBUS_HAVE_VIN |
+ PMBUS_HAVE_VOUT |
+ PMBUS_HAVE_VMON |
+ PMBUS_HAVE_IIN |
+ PMBUS_HAVE_PIN |
+ PMBUS_HAVE_TEMP |
+ PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_SAMPLES),
+ .read_word_data = tps25990_read_word_data,
+ .write_word_data = tps25990_write_word_data,
+ .read_byte_data = tps25990_read_byte_data,
+ .write_byte_data = tps25990_write_byte_data,
+
+#if IS_ENABLED(CONFIG_SENSORS_TPS25990_REGULATOR)
+ .reg_desc = tps25990_reg_desc,
+ .num_regulators = ARRAY_SIZE(tps25990_reg_desc),
+#endif
+};
+
+static const struct i2c_device_id tps25990_i2c_id[] = {
+ { "tps25990" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tps25990_i2c_id);
+
+static const struct of_device_id tps25990_of_match[] = {
+ { .compatible = "ti,tps25990" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tps25990_of_match);
+
+static int tps25990_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct pmbus_driver_info *info;
+ u32 rimon = TPS25990_DEFAULT_RIMON;
+ int ret;
+
+ ret = device_property_read_u32(dev, "ti,rimon-micro-ohms", &rimon);
+ if (ret < 0 && ret != -EINVAL)
+ return dev_err_probe(dev, ret, "failed to get rimon\n");
+
+ info = devm_kmemdup(dev, &tps25990_base_info, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ /* Adapt the current and power scale for each instance */
+ tps25990_set_m(&info->m[PSC_CURRENT_IN], rimon);
+ tps25990_set_m(&info->m[PSC_POWER], rimon);
+
+ return pmbus_do_probe(client, info);
+}
+
+static struct i2c_driver tps25990_driver = {
+ .driver = {
+ .name = "tps25990",
+ .of_match_table = tps25990_of_match,
+ },
+ .probe = tps25990_probe,
+ .id_table = tps25990_i2c_id,
+};
+module_i2c_driver(tps25990_driver);
+
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_DESCRIPTION("PMBUS driver for TPS25990 eFuse");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/tps40422.c b/drivers/hwmon/pmbus/tps40422.c
index d99b9850ea36..7c9fedaa068c 100644
--- a/drivers/hwmon/pmbus/tps40422.c
+++ b/drivers/hwmon/pmbus/tps40422.c
@@ -51,4 +51,4 @@ module_i2c_driver(tps40422_driver);
MODULE_AUTHOR("Zhu Laiwen <richard.zhu@nsn.com>");
MODULE_DESCRIPTION("PMBus driver for TI TPS40422");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c
index 5c9466244d70..63524dff5e75 100644
--- a/drivers/hwmon/pmbus/tps53679.c
+++ b/drivers/hwmon/pmbus/tps53679.c
@@ -308,4 +308,4 @@ module_i2c_driver(tps53679_driver);
MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
MODULE_DESCRIPTION("PMBus driver for Texas Instruments TPS53679");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/tps546d24.c b/drivers/hwmon/pmbus/tps546d24.c
index 520ca37269f7..44d7a6df1dbd 100644
--- a/drivers/hwmon/pmbus/tps546d24.c
+++ b/drivers/hwmon/pmbus/tps546d24.c
@@ -68,4 +68,4 @@ module_i2c_driver(tps546d24_driver);
MODULE_AUTHOR("Duke Du <dukedu83@gmail.com>");
MODULE_DESCRIPTION("PMBus driver for TI tps546d24");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index 5d3d1773bf52..9b0eadc81a2e 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -642,4 +642,4 @@ module_i2c_driver(ucd9000_driver);
MODULE_AUTHOR("Guenter Roeck");
MODULE_DESCRIPTION("PMBus driver for TI UCD90xxx");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
index 7920d1c06df0..f68adaf4a110 100644
--- a/drivers/hwmon/pmbus/ucd9200.c
+++ b/drivers/hwmon/pmbus/ucd9200.c
@@ -209,4 +209,4 @@ module_i2c_driver(ucd9200_driver);
MODULE_AUTHOR("Guenter Roeck");
MODULE_DESCRIPTION("PMBus driver for TI UCD922x, UCD924x");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/xdp710.c b/drivers/hwmon/pmbus/xdp710.c
index dd107e83f612..660bbfe16e1e 100644
--- a/drivers/hwmon/pmbus/xdp710.c
+++ b/drivers/hwmon/pmbus/xdp710.c
@@ -128,4 +128,4 @@ module_i2c_driver(xdp710_driver);
MODULE_AUTHOR("Peter Yin <peter.yin@quantatw.com>");
MODULE_DESCRIPTION("PMBus driver for XDP710 HSC");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c
index facb1201aa43..f3aa6339d60d 100644
--- a/drivers/hwmon/pmbus/xdpe12284.c
+++ b/drivers/hwmon/pmbus/xdpe12284.c
@@ -194,4 +194,4 @@ module_i2c_driver(xdpe122_driver);
MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
MODULE_DESCRIPTION("PMBus driver for Infineon XDPE122 family");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/xdpe152c4.c b/drivers/hwmon/pmbus/xdpe152c4.c
index 7f3b31d4f033..67a3d5fe1daf 100644
--- a/drivers/hwmon/pmbus/xdpe152c4.c
+++ b/drivers/hwmon/pmbus/xdpe152c4.c
@@ -72,4 +72,4 @@ module_i2c_driver(xdpe152_driver);
MODULE_AUTHOR("Greg Schwendimann <greg.schwendimann@infineon.com>");
MODULE_DESCRIPTION("PMBus driver for Infineon XDPE152 family");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
index 7920a16203e1..97be69630cfb 100644
--- a/drivers/hwmon/pmbus/zl6100.c
+++ b/drivers/hwmon/pmbus/zl6100.c
@@ -420,4 +420,4 @@ module_i2c_driver(zl6100_driver);
MODULE_AUTHOR("Guenter Roeck");
MODULE_DESCRIPTION("PMBus driver for ZL6100 and compatibles");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(PMBUS);
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/powerz.c b/drivers/hwmon/powerz.c
index cfb635f94d66..4e663d5b4e33 100644
--- a/drivers/hwmon/powerz.c
+++ b/drivers/hwmon/powerz.c
@@ -54,12 +54,6 @@ static const struct hwmon_channel_info *const powerz_info[] = {
NULL
};
-static umode_t powerz_is_visible(const void *data, enum hwmon_sensor_types type,
- u32 attr, int channel)
-{
- return 0444;
-}
-
static int powerz_read_string(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, const char **str)
{
@@ -201,7 +195,7 @@ out:
}
static const struct hwmon_ops powerz_hwmon_ops = {
- .is_visible = powerz_is_visible,
+ .visible = 0444,
.read = powerz_read,
.read_string = powerz_read_string,
};
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index c434db4656e7..579d31bb9ac7 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -7,6 +7,7 @@
* Author: Kamil Debski <k.debski@samsung.com>
*/
+#include <linux/delay.h>
#include <linux/hwmon.h>
#include <linux/interrupt.h>
#include <linux/mod_devicetable.h>
@@ -60,6 +61,9 @@ struct pwm_fan_ctx {
struct hwmon_chip_info info;
struct hwmon_channel_info fan_channel;
+
+ u64 pwm_duty_cycle_from_stopped;
+ u32 pwm_usec_from_stopped;
};
/* This handler assumes self resetting edge triggered interrupt. */
@@ -199,7 +203,9 @@ static int pwm_fan_power_off(struct pwm_fan_ctx *ctx, bool force_disable)
static int __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
{
struct pwm_state *state = &ctx->pwm_state;
+ unsigned long final_pwm = pwm;
unsigned long period;
+ bool update = false;
int ret = 0;
if (pwm > 0) {
@@ -208,11 +214,22 @@ static int __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
return 0;
period = state->period;
- state->duty_cycle = DIV_ROUND_UP(pwm * (period - 1), MAX_PWM);
+ update = state->duty_cycle < ctx->pwm_duty_cycle_from_stopped;
+ if (update)
+ state->duty_cycle = ctx->pwm_duty_cycle_from_stopped;
+ else
+ state->duty_cycle = DIV_ROUND_UP(pwm * (period - 1), MAX_PWM);
ret = pwm_apply_might_sleep(ctx->pwm, state);
if (ret)
return ret;
ret = pwm_fan_power_on(ctx);
+ if (!ret && update) {
+ pwm = final_pwm;
+ state->duty_cycle = DIV_ROUND_UP(pwm * (period - 1), MAX_PWM);
+ usleep_range(ctx->pwm_usec_from_stopped,
+ ctx->pwm_usec_from_stopped * 2);
+ ret = pwm_apply_might_sleep(ctx->pwm, state);
+ }
} else {
ret = pwm_fan_power_off(ctx, false);
}
@@ -480,6 +497,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
struct device *hwmon;
int ret;
const struct hwmon_channel_info **channels;
+ u32 initial_pwm, pwm_min_from_stopped = 0;
u32 *fan_channel_config;
int channel_count = 1; /* We always have a PWM channel. */
int i;
@@ -527,11 +545,21 @@ static int pwm_fan_probe(struct platform_device *pdev)
ctx->enable_mode = pwm_disable_reg_enable;
+ ret = pwm_fan_get_cooling_data(dev, ctx);
+ if (ret)
+ return ret;
+
+ /* use maximum cooling level if provided */
+ if (ctx->pwm_fan_cooling_levels)
+ initial_pwm = ctx->pwm_fan_cooling_levels[ctx->pwm_fan_max_state];
+ else
+ initial_pwm = MAX_PWM;
+
/*
* Set duty cycle to maximum allowed and enable PWM output as well as
* the regulator. In case of error nothing is changed
*/
- ret = set_pwm(ctx, MAX_PWM);
+ ret = set_pwm(ctx, initial_pwm);
if (ret) {
dev_err(dev, "Failed to configure PWM: %d\n", ret);
return ret;
@@ -620,6 +648,19 @@ static int pwm_fan_probe(struct platform_device *pdev)
channels[1] = &ctx->fan_channel;
}
+ ret = device_property_read_u32(dev, "fan-stop-to-start-percent",
+ &pwm_min_from_stopped);
+ if (!ret && pwm_min_from_stopped) {
+ ctx->pwm_duty_cycle_from_stopped =
+ DIV_ROUND_UP_ULL(pwm_min_from_stopped *
+ (ctx->pwm_state.period - 1),
+ 100);
+ }
+ ret = device_property_read_u32(dev, "fan-stop-to-start-us",
+ &ctx->pwm_usec_from_stopped);
+ if (ret)
+ ctx->pwm_usec_from_stopped = 250000;
+
ctx->info.ops = &pwm_fan_hwmon_ops;
ctx->info.info = channels;
@@ -630,10 +671,6 @@ static int pwm_fan_probe(struct platform_device *pdev)
return PTR_ERR(hwmon);
}
- ret = pwm_fan_get_cooling_data(dev, ctx);
- if (ret)
- return ret;
-
ctx->pwm_fan_state = ctx->pwm_fan_max_state;
if (IS_ENABLED(CONFIG_THERMAL)) {
cdev = devm_thermal_of_cooling_device_register(dev,
diff --git a/drivers/hwmon/qnap-mcu-hwmon.c b/drivers/hwmon/qnap-mcu-hwmon.c
new file mode 100644
index 000000000000..29057514739c
--- /dev/null
+++ b/drivers/hwmon/qnap-mcu-hwmon.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Driver for hwmon elements found on QNAP-MCU devices
+ *
+ * Copyright (C) 2024 Heiko Stuebner <heiko@sntech.de>
+ */
+
+#include <linux/fwnode.h>
+#include <linux/hwmon.h>
+#include <linux/mfd/qnap-mcu.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/thermal.h>
+
+struct qnap_mcu_hwmon {
+ struct qnap_mcu *mcu;
+ struct device *dev;
+
+ unsigned int pwm_min;
+ unsigned int pwm_max;
+
+ struct fwnode_handle *fan_node;
+ unsigned int fan_state;
+ unsigned int fan_max_state;
+ unsigned int *fan_cooling_levels;
+
+ struct thermal_cooling_device *cdev;
+ struct hwmon_chip_info info;
+};
+
+static int qnap_mcu_hwmon_get_rpm(struct qnap_mcu_hwmon *hwm)
+{
+ static const u8 cmd[] = { '@', 'F', 'A' };
+ u8 reply[6];
+ int ret;
+
+ /* poll the fan rpm */
+ ret = qnap_mcu_exec(hwm->mcu, cmd, sizeof(cmd), reply, sizeof(reply));
+ if (ret)
+ return ret;
+
+ /* First 2 bytes must mirror the sent command */
+ if (memcmp(cmd, reply, 2))
+ return -EIO;
+
+ return reply[4] * 30;
+}
+
+static int qnap_mcu_hwmon_get_pwm(struct qnap_mcu_hwmon *hwm)
+{
+ static const u8 cmd[] = { '@', 'F', 'Z', '0' }; /* 0 = fan-id? */
+ u8 reply[4];
+ int ret;
+
+ /* poll the fan pwm */
+ ret = qnap_mcu_exec(hwm->mcu, cmd, sizeof(cmd), reply, sizeof(reply));
+ if (ret)
+ return ret;
+
+ /* First 3 bytes must mirror the sent command */
+ if (memcmp(cmd, reply, 3))
+ return -EIO;
+
+ return reply[3];
+}
+
+static int qnap_mcu_hwmon_set_pwm(struct qnap_mcu_hwmon *hwm, u8 pwm)
+{
+ const u8 cmd[] = { '@', 'F', 'W', '0', pwm }; /* 0 = fan-id?, pwm 0-255 */
+
+ /* set the fan pwm */
+ return qnap_mcu_exec_with_ack(hwm->mcu, cmd, sizeof(cmd));
+}
+
+static int qnap_mcu_hwmon_get_temp(struct qnap_mcu_hwmon *hwm)
+{
+ static const u8 cmd[] = { '@', 'T', '3' };
+ u8 reply[4];
+ int ret;
+
+ /* poll the fan rpm */
+ ret = qnap_mcu_exec(hwm->mcu, cmd, sizeof(cmd), reply, sizeof(reply));
+ if (ret)
+ return ret;
+
+ /* First bytes must mirror the sent command */
+ if (memcmp(cmd, reply, sizeof(cmd)))
+ return -EIO;
+
+ /*
+ * There is an unknown bit set in bit7.
+ * Bits [6:0] report the actual temperature as returned by the
+ * original qnap firmware-tools, so just drop bit7 for now.
+ */
+ return (reply[3] & 0x7f) * 1000;
+}
+
+static int qnap_mcu_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct qnap_mcu_hwmon *hwm = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ if (val != 0)
+ val = clamp_val(val, hwm->pwm_min, hwm->pwm_max);
+
+ return qnap_mcu_hwmon_set_pwm(hwm, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int qnap_mcu_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct qnap_mcu_hwmon *hwm = dev_get_drvdata(dev);
+ int ret;
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ ret = qnap_mcu_hwmon_get_pwm(hwm);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ case hwmon_fan:
+ ret = qnap_mcu_hwmon_get_rpm(hwm);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+ case hwmon_temp:
+ ret = qnap_mcu_hwmon_get_temp(hwm);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t qnap_mcu_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ return 0444;
+
+ case hwmon_pwm:
+ return 0644;
+
+ case hwmon_fan:
+ return 0444;
+
+ default:
+ return 0;
+ }
+}
+
+static const struct hwmon_ops qnap_mcu_hwmon_hwmon_ops = {
+ .is_visible = qnap_mcu_hwmon_is_visible,
+ .read = qnap_mcu_hwmon_read,
+ .write = qnap_mcu_hwmon_write,
+};
+
+/* thermal cooling device callbacks */
+static int qnap_mcu_hwmon_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct qnap_mcu_hwmon *hwm = cdev->devdata;
+
+ if (!hwm)
+ return -EINVAL;
+
+ *state = hwm->fan_max_state;
+
+ return 0;
+}
+
+static int qnap_mcu_hwmon_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct qnap_mcu_hwmon *hwm = cdev->devdata;
+
+ if (!hwm)
+ return -EINVAL;
+
+ *state = hwm->fan_state;
+
+ return 0;
+}
+
+static int qnap_mcu_hwmon_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct qnap_mcu_hwmon *hwm = cdev->devdata;
+ int ret;
+
+ if (!hwm || state > hwm->fan_max_state)
+ return -EINVAL;
+
+ if (state == hwm->fan_state)
+ return 0;
+
+ ret = qnap_mcu_hwmon_set_pwm(hwm, hwm->fan_cooling_levels[state]);
+ if (ret)
+ return ret;
+
+ hwm->fan_state = state;
+
+ return ret;
+}
+
+static const struct thermal_cooling_device_ops qnap_mcu_hwmon_cooling_ops = {
+ .get_max_state = qnap_mcu_hwmon_get_max_state,
+ .get_cur_state = qnap_mcu_hwmon_get_cur_state,
+ .set_cur_state = qnap_mcu_hwmon_set_cur_state,
+};
+
+static void devm_fan_node_release(void *data)
+{
+ struct qnap_mcu_hwmon *hwm = data;
+
+ fwnode_handle_put(hwm->fan_node);
+}
+
+static int qnap_mcu_hwmon_get_cooling_data(struct device *dev, struct qnap_mcu_hwmon *hwm)
+{
+ struct fwnode_handle *fwnode;
+ int num, i, ret;
+
+ fwnode = device_get_named_child_node(dev->parent, "fan-0");
+ if (!fwnode)
+ return 0;
+
+ /* if we found the fan-node, we're keeping it until device-unbind */
+ hwm->fan_node = fwnode;
+ ret = devm_add_action_or_reset(dev, devm_fan_node_release, hwm);
+ if (ret)
+ return ret;
+
+ num = fwnode_property_count_u32(fwnode, "cooling-levels");
+ if (num <= 0)
+ return dev_err_probe(dev, num ? : -EINVAL,
+ "Failed to count elements in 'cooling-levels'\n");
+
+ hwm->fan_cooling_levels = devm_kcalloc(dev, num, sizeof(u32),
+ GFP_KERNEL);
+ if (!hwm->fan_cooling_levels)
+ return -ENOMEM;
+
+ ret = fwnode_property_read_u32_array(fwnode, "cooling-levels",
+ hwm->fan_cooling_levels, num);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read 'cooling-levels'\n");
+
+ for (i = 0; i < num; i++) {
+ if (hwm->fan_cooling_levels[i] > hwm->pwm_max)
+ return dev_err_probe(dev, -EINVAL, "fan state[%d]:%d > %d\n", i,
+ hwm->fan_cooling_levels[i], hwm->pwm_max);
+ }
+
+ hwm->fan_max_state = num - 1;
+
+ return 0;
+}
+
+static const struct hwmon_channel_info * const qnap_mcu_hwmon_channels[] = {
+ HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT),
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ NULL
+};
+
+static int qnap_mcu_hwmon_probe(struct platform_device *pdev)
+{
+ struct qnap_mcu *mcu = dev_get_drvdata(pdev->dev.parent);
+ const struct qnap_mcu_variant *variant = pdev->dev.platform_data;
+ struct qnap_mcu_hwmon *hwm;
+ struct thermal_cooling_device *cdev;
+ struct device *dev = &pdev->dev;
+ struct device *hwmon;
+ int ret;
+
+ hwm = devm_kzalloc(dev, sizeof(*hwm), GFP_KERNEL);
+ if (!hwm)
+ return -ENOMEM;
+
+ hwm->mcu = mcu;
+ hwm->dev = &pdev->dev;
+ hwm->pwm_min = variant->fan_pwm_min;
+ hwm->pwm_max = variant->fan_pwm_max;
+
+ platform_set_drvdata(pdev, hwm);
+
+ /*
+ * Set duty cycle to maximum allowed.
+ */
+ ret = qnap_mcu_hwmon_set_pwm(hwm, hwm->pwm_max);
+ if (ret)
+ return ret;
+
+ hwm->info.ops = &qnap_mcu_hwmon_hwmon_ops;
+ hwm->info.info = qnap_mcu_hwmon_channels;
+
+ ret = qnap_mcu_hwmon_get_cooling_data(dev, hwm);
+ if (ret)
+ return ret;
+
+ hwm->fan_state = hwm->fan_max_state;
+
+ hwmon = devm_hwmon_device_register_with_info(dev, "qnapmcu",
+ hwm, &hwm->info, NULL);
+ if (IS_ERR(hwmon))
+ return dev_err_probe(dev, PTR_ERR(hwmon), "Failed to register hwmon device\n");
+
+ /*
+ * Only register cooling device when we found cooling-levels.
+ * qnap_mcu_hwmon_get_cooling_data() will fail when reading malformed
+ * levels and only succeed with either no or correct cooling levels.
+ */
+ if (IS_ENABLED(CONFIG_THERMAL) && hwm->fan_cooling_levels) {
+ cdev = devm_thermal_of_cooling_device_register(dev,
+ to_of_node(hwm->fan_node), "qnap-mcu-hwmon",
+ hwm, &qnap_mcu_hwmon_cooling_ops);
+ if (IS_ERR(cdev))
+ return dev_err_probe(dev, PTR_ERR(cdev),
+ "Failed to register qnap-mcu-hwmon as cooling device\n");
+ hwm->cdev = cdev;
+ }
+
+ return 0;
+}
+
+static struct platform_driver qnap_mcu_hwmon_driver = {
+ .probe = qnap_mcu_hwmon_probe,
+ .driver = {
+ .name = "qnap-mcu-hwmon",
+ },
+};
+module_platform_driver(qnap_mcu_hwmon_driver);
+
+MODULE_ALIAS("platform:qnap-mcu-hwmon");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("QNAP MCU hwmon driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
index 65cc52e47db0..a2938881ccd2 100644
--- a/drivers/hwmon/raspberrypi-hwmon.c
+++ b/drivers/hwmon/raspberrypi-hwmon.c
@@ -81,12 +81,6 @@ static int rpi_read(struct device *dev, enum hwmon_sensor_types type,
return 0;
}
-static umode_t rpi_is_visible(const void *_data, enum hwmon_sensor_types type,
- u32 attr, int channel)
-{
- return 0444;
-}
-
static const struct hwmon_channel_info * const rpi_info[] = {
HWMON_CHANNEL_INFO(in,
HWMON_I_LCRIT_ALARM),
@@ -94,7 +88,7 @@ static const struct hwmon_channel_info * const rpi_info[] = {
};
static const struct hwmon_ops rpi_hwmon_ops = {
- .is_visible = rpi_is_visible,
+ .visible = 0444,
.read = rpi_read,
};
@@ -134,10 +128,32 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
return 0;
}
+static int rpi_hwmon_suspend(struct device *dev)
+{
+ struct rpi_hwmon_data *data = dev_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&data->get_values_poll_work);
+
+ return 0;
+}
+
+static int rpi_hwmon_resume(struct device *dev)
+{
+ struct rpi_hwmon_data *data = dev_get_drvdata(dev);
+
+ get_values_poll(&data->get_values_poll_work.work);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(rpi_hwmon_pm_ops, rpi_hwmon_suspend,
+ rpi_hwmon_resume);
+
static struct platform_driver rpi_hwmon_driver = {
.probe = rpi_hwmon_probe,
.driver = {
.name = "raspberrypi-hwmon",
+ .pm = pm_ptr(&rpi_hwmon_pm_ops),
},
};
module_platform_driver(rpi_hwmon_driver);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index a4b05ebb0546..d00bd5cc6b15 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -512,7 +512,7 @@ static struct platform_driver sch5636_driver = {
.name = DRVNAME,
},
.probe = sch5636_probe,
- .remove_new = sch5636_remove,
+ .remove = sch5636_remove,
.id_table = sch5636_device_id,
};
diff --git a/drivers/hwmon/sg2042-mcu.c b/drivers/hwmon/sg2042-mcu.c
index 141045769354..aa3fb773602c 100644
--- a/drivers/hwmon/sg2042-mcu.c
+++ b/drivers/hwmon/sg2042-mcu.c
@@ -346,8 +346,8 @@ static void sg2042_mcu_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id sg2042_mcu_id[] = {
- { "sg2042-hwmon-mcu", 0 },
- {},
+ { "sg2042-hwmon-mcu" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, sg2042_mcu_id);
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 494f9655f44f..3d55047e9baf 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -1051,7 +1051,7 @@ static struct platform_driver sht15_driver = {
.of_match_table = of_match_ptr(sht15_dt_match),
},
.probe = sht15_probe,
- .remove_new = sht15_remove,
+ .remove = sht15_remove,
.id_table = sht15_device_ids,
};
module_platform_driver(sht15_driver);
diff --git a/drivers/hwmon/sht4x.c b/drivers/hwmon/sht4x.c
index b8916d2735b5..6c9b776237c2 100644
--- a/drivers/hwmon/sht4x.c
+++ b/drivers/hwmon/sht4x.c
@@ -11,6 +11,7 @@
#include <linux/crc8.h>
#include <linux/delay.h>
#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include <linux/i2c.h>
#include <linux/jiffies.h>
#include <linux/module.h>
@@ -31,6 +32,12 @@
*/
#define SHT4X_CMD_MEASURE_HPM 0b11111101
#define SHT4X_CMD_RESET 0b10010100
+#define SHT4X_CMD_HEATER_20_1 0b00011110
+#define SHT4X_CMD_HEATER_20_01 0b00010101
+#define SHT4X_CMD_HEATER_110_1 0b00101111
+#define SHT4X_CMD_HEATER_110_01 0b00100100
+#define SHT4X_CMD_HEATER_200_1 0b00111001
+#define SHT4X_CMD_HEATER_200_01 0b00110010
#define SHT4X_CMD_LEN 1
#define SHT4X_CRC8_LEN 1
@@ -49,6 +56,10 @@ DECLARE_CRC8_TABLE(sht4x_crc8_table);
* struct sht4x_data - All the data required to operate an SHT4X chip
* @client: the i2c client associated with the SHT4X
* @lock: a mutex that is used to prevent parallel access to the i2c client
+ * @heating_complete: the time that the last heating finished
+ * @data_pending: true if and only if there are measurements to retrieve after heating
+ * @heater_power: the power at which the heater will be started
+ * @heater_time: the time for which the heater will remain turned on
* @valid: validity of fields below
* @update_interval: the minimum poll interval
* @last_updated: the previous time that the SHT4X was polled
@@ -58,6 +69,10 @@ DECLARE_CRC8_TABLE(sht4x_crc8_table);
struct sht4x_data {
struct i2c_client *client;
struct mutex lock; /* atomic read data updates */
+ unsigned long heating_complete; /* in jiffies */
+ bool data_pending;
+ u32 heater_power; /* in milli-watts */
+ u32 heater_time; /* in milli-seconds */
bool valid; /* validity of fields below */
long update_interval; /* in milli-seconds */
long last_updated; /* in jiffies */
@@ -79,19 +94,30 @@ static int sht4x_read_values(struct sht4x_data *data)
u8 crc;
u8 cmd[SHT4X_CMD_LEN] = {SHT4X_CMD_MEASURE_HPM};
u8 raw_data[SHT4X_RESPONSE_LENGTH];
+ unsigned long curr_jiffies;
mutex_lock(&data->lock);
- next_update = data->last_updated +
- msecs_to_jiffies(data->update_interval);
- if (data->valid && time_before_eq(jiffies, next_update))
- goto unlock;
+ curr_jiffies = jiffies;
+ if (time_before(curr_jiffies, data->heating_complete))
+ msleep(jiffies_to_msecs(data->heating_complete - curr_jiffies));
- ret = i2c_master_send(client, cmd, SHT4X_CMD_LEN);
- if (ret < 0)
- goto unlock;
+ if (data->data_pending &&
+ time_before(jiffies, data->heating_complete + data->update_interval)) {
+ data->data_pending = false;
+ } else {
+ next_update = data->last_updated +
+ msecs_to_jiffies(data->update_interval);
- usleep_range(SHT4X_MEAS_DELAY_HPM, SHT4X_MEAS_DELAY_HPM + SHT4X_DELAY_EXTRA);
+ if (data->valid && time_before_eq(jiffies, next_update))
+ goto unlock;
+
+ ret = i2c_master_send(client, cmd, SHT4X_CMD_LEN);
+ if (ret < 0)
+ goto unlock;
+
+ usleep_range(SHT4X_MEAS_DELAY_HPM, SHT4X_MEAS_DELAY_HPM + SHT4X_DELAY_EXTRA);
+ }
ret = i2c_master_recv(client, raw_data, SHT4X_RESPONSE_LENGTH);
if (ret != SHT4X_RESPONSE_LENGTH) {
@@ -215,6 +241,143 @@ static int sht4x_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
}
}
+static ssize_t heater_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sht4x_data *data = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", time_before(jiffies, data->heating_complete));
+}
+
+static ssize_t heater_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct sht4x_data *data = dev_get_drvdata(dev);
+ bool status;
+ ssize_t ret;
+ u8 cmd;
+ u32 heating_time_bound;
+
+ ret = kstrtobool(buf, &status);
+ if (ret)
+ return ret;
+ if (!status)
+ return -EINVAL;
+
+ if (data->heater_time == 100) {
+ if (data->heater_power == 20)
+ cmd = SHT4X_CMD_HEATER_20_01;
+ else if (data->heater_power == 110)
+ cmd = SHT4X_CMD_HEATER_110_01;
+ else /* data->heater_power == 200 */
+ cmd = SHT4X_CMD_HEATER_200_01;
+
+ heating_time_bound = 110;
+ } else { /* data->heater_time == 1000 */
+ if (data->heater_power == 20)
+ cmd = SHT4X_CMD_HEATER_20_1;
+ else if (data->heater_power == 110)
+ cmd = SHT4X_CMD_HEATER_110_1;
+ else /* data->heater_power == 200 */
+ cmd = SHT4X_CMD_HEATER_200_1;
+
+ heating_time_bound = 1100;
+ }
+
+ mutex_lock(&data->lock);
+
+ if (time_before(jiffies, data->heating_complete)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ ret = i2c_master_send(data->client, &cmd, SHT4X_CMD_LEN);
+ if (ret < 0)
+ goto unlock;
+
+ data->heating_complete = jiffies + msecs_to_jiffies(heating_time_bound);
+ data->data_pending = true;
+unlock:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static ssize_t heater_power_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sht4x_data *data = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", data->heater_power);
+}
+
+static ssize_t heater_power_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct sht4x_data *data = dev_get_drvdata(dev);
+ u32 power;
+ ssize_t ret;
+
+ ret = kstrtou32(buf, 10, &power);
+ if (ret)
+ return ret;
+
+ if (power != 20 && power != 110 && power != 200)
+ return -EINVAL;
+
+ data->heater_power = power;
+
+ return count;
+}
+
+static ssize_t heater_time_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sht4x_data *data = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", data->heater_time);
+}
+
+static ssize_t heater_time_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct sht4x_data *data = dev_get_drvdata(dev);
+ u32 time;
+ ssize_t ret;
+
+ ret = kstrtou32(buf, 10, &time);
+ if (ret)
+ return ret;
+
+ if (time != 100 && time != 1000)
+ return -EINVAL;
+
+ data->heater_time = time;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(heater_enable);
+static DEVICE_ATTR_RW(heater_power);
+static DEVICE_ATTR_RW(heater_time);
+
+static struct attribute *sht4x_attrs[] = {
+ &dev_attr_heater_enable.attr,
+ &dev_attr_heater_power.attr,
+ &dev_attr_heater_time.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(sht4x);
+
static const struct hwmon_channel_info * const sht4x_info[] = {
HWMON_CHANNEL_INFO(chip, HWMON_C_UPDATE_INTERVAL),
HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
@@ -255,6 +418,9 @@ static int sht4x_probe(struct i2c_client *client)
data->update_interval = SHT4X_MIN_POLL_INTERVAL;
data->client = client;
+ data->heater_power = 200;
+ data->heater_time = 1000;
+ data->heating_complete = jiffies;
mutex_init(&data->lock);
@@ -270,7 +436,7 @@ static int sht4x_probe(struct i2c_client *client)
client->name,
data,
&sht4x_chip_info,
- NULL);
+ sht4x_groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index e73b1522f3ce..b7a7bcd6d3af 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -784,7 +784,7 @@ static struct platform_driver sis5595_driver = {
.name = DRIVER_NAME,
},
.probe = sis5595_probe,
- .remove_new = sis5595_remove,
+ .remove = sis5595_remove,
};
static int sis5595_pci_probe(struct pci_dev *dev,
diff --git a/drivers/hwmon/sl28cpld-hwmon.c b/drivers/hwmon/sl28cpld-hwmon.c
index e020f25c9300..454cc844fb9d 100644
--- a/drivers/hwmon/sl28cpld-hwmon.c
+++ b/drivers/hwmon/sl28cpld-hwmon.c
@@ -23,13 +23,6 @@ struct sl28cpld_hwmon {
u32 offset;
};
-static umode_t sl28cpld_hwmon_is_visible(const void *data,
- enum hwmon_sensor_types type,
- u32 attr, int channel)
-{
- return 0444;
-}
-
static int sl28cpld_hwmon_read(struct device *dev,
enum hwmon_sensor_types type, u32 attr,
int channel, long *input)
@@ -73,7 +66,7 @@ static const struct hwmon_channel_info * const sl28cpld_hwmon_info[] = {
};
static const struct hwmon_ops sl28cpld_hwmon_ops = {
- .is_visible = sl28cpld_hwmon_is_visible,
+ .visible = 0444,
.read = sl28cpld_hwmon_read,
};
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 0d46edbcb144..595bceb78d76 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -858,7 +858,7 @@ static struct platform_driver smsc47m1_driver __refdata = {
.driver = {
.name = DRVNAME,
},
- .remove_new = __exit_p(smsc47m1_remove),
+ .remove = __exit_p(smsc47m1_remove),
};
static int __init smsc47m1_device_add(unsigned short address,
diff --git a/drivers/hwmon/spd5118.c b/drivers/hwmon/spd5118.c
index fcbce5a01e55..358152868d96 100644
--- a/drivers/hwmon/spd5118.c
+++ b/drivers/hwmon/spd5118.c
@@ -291,12 +291,6 @@ static umode_t spd5118_is_visible(const void *_data, enum hwmon_sensor_types typ
}
}
-static inline bool spd5118_parity8(u8 w)
-{
- w ^= w >> 4;
- return (0x6996 >> (w & 0xf)) & 1;
-}
-
/*
* Bank and vendor id are 8-bit fields with seven data bits and odd parity.
* Vendor IDs 0 and 0x7f are invalid.
@@ -304,7 +298,7 @@ static inline bool spd5118_parity8(u8 w)
*/
static bool spd5118_vendor_valid(u8 bank, u8 id)
{
- if (!spd5118_parity8(bank) || !spd5118_parity8(id))
+ if (parity8(bank) == 0 || parity8(id) == 0)
return false;
id &= 0x7f;
@@ -671,7 +665,7 @@ static int spd5118_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(spd5118_pm_ops, spd5118_suspend, spd5118_resume);
static const struct i2c_device_id spd5118_id[] = {
- { "spd5118", 0 },
+ { "spd5118" },
{ }
};
MODULE_DEVICE_TABLE(i2c, spd5118_id);
diff --git a/drivers/hwmon/surface_fan.c b/drivers/hwmon/surface_fan.c
index de3c5a2409c6..aafb4ac92e6c 100644
--- a/drivers/hwmon/surface_fan.c
+++ b/drivers/hwmon/surface_fan.c
@@ -18,14 +18,6 @@ SSAM_DEFINE_SYNC_REQUEST_CL_R(__ssam_fan_rpm_get, __le16, {
.command_id = 0x01,
});
-// hwmon
-static umode_t surface_fan_hwmon_is_visible(const void *drvdata,
- enum hwmon_sensor_types type, u32 attr,
- int channel)
-{
- return 0444;
-}
-
static int surface_fan_hwmon_read(struct device *dev,
enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
@@ -49,7 +41,7 @@ static const struct hwmon_channel_info *const surface_fan_info[] = {
};
static const struct hwmon_ops surface_fan_hwmon_ops = {
- .is_visible = surface_fan_hwmon_is_visible,
+ .visible = 0444,
.read = surface_fan_hwmon_read,
};
diff --git a/drivers/hwmon/tmp108.c b/drivers/hwmon/tmp108.c
index a82bbc959eb1..a971ff628435 100644
--- a/drivers/hwmon/tmp108.c
+++ b/drivers/hwmon/tmp108.c
@@ -8,14 +8,15 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of.h>
#include <linux/i2c.h>
+#include <linux/i3c/device.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#define DRIVER_NAME "tmp108"
@@ -323,33 +324,23 @@ static const struct regmap_config tmp108_regmap_config = {
.use_single_write = true,
};
-static int tmp108_probe(struct i2c_client *client)
+static int tmp108_common_probe(struct device *dev, struct regmap *regmap, char *name)
{
- struct device *dev = &client->dev;
struct device *hwmon_dev;
struct tmp108 *tmp108;
- int err;
u32 config;
+ int err;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_WORD_DATA)) {
- dev_err(dev,
- "adapter doesn't support SMBus word transactions\n");
- return -ENODEV;
- }
+ err = devm_regulator_get_enable(dev, "vcc");
+ if (err)
+ return dev_err_probe(dev, err, "Failed to enable regulator\n");
tmp108 = devm_kzalloc(dev, sizeof(*tmp108), GFP_KERNEL);
if (!tmp108)
return -ENOMEM;
dev_set_drvdata(dev, tmp108);
-
- tmp108->regmap = devm_regmap_init_i2c(client, &tmp108_regmap_config);
- if (IS_ERR(tmp108->regmap)) {
- err = PTR_ERR(tmp108->regmap);
- dev_err(dev, "regmap init failed: %d", err);
- return err;
- }
+ tmp108->regmap = regmap;
err = regmap_read(tmp108->regmap, TMP108_REG_CONF, &config);
if (err < 0) {
@@ -383,13 +374,30 @@ static int tmp108_probe(struct i2c_client *client)
return err;
}
- hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, name,
tmp108,
&tmp108_chip_info,
NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
+static int tmp108_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct regmap *regmap;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return dev_err_probe(dev, -ENODEV,
+ "adapter doesn't support SMBus word transactions\n");
+
+ regmap = devm_regmap_init_i2c(client, &tmp108_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "regmap init failed");
+
+ return tmp108_common_probe(dev, regmap, client->name);
+}
+
static int tmp108_suspend(struct device *dev)
{
struct tmp108 *tmp108 = dev_get_drvdata(dev);
@@ -413,30 +421,57 @@ static int tmp108_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(tmp108_dev_pm_ops, tmp108_suspend, tmp108_resume);
static const struct i2c_device_id tmp108_i2c_ids[] = {
+ { "p3t1085" },
{ "tmp108" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tmp108_i2c_ids);
-#ifdef CONFIG_OF
static const struct of_device_id tmp108_of_ids[] = {
+ { .compatible = "nxp,p3t1085", },
{ .compatible = "ti,tmp108", },
{}
};
MODULE_DEVICE_TABLE(of, tmp108_of_ids);
-#endif
static struct i2c_driver tmp108_driver = {
.driver = {
.name = DRIVER_NAME,
.pm = pm_sleep_ptr(&tmp108_dev_pm_ops),
- .of_match_table = of_match_ptr(tmp108_of_ids),
+ .of_match_table = tmp108_of_ids,
},
.probe = tmp108_probe,
.id_table = tmp108_i2c_ids,
};
-module_i2c_driver(tmp108_driver);
+static const struct i3c_device_id p3t1085_i3c_ids[] = {
+ I3C_DEVICE(0x011b, 0x1529, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(i3c, p3t1085_i3c_ids);
+
+static int p3t1085_i3c_probe(struct i3c_device *i3cdev)
+{
+ struct device *dev = i3cdev_to_dev(i3cdev);
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i3c(i3cdev, &tmp108_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Failed to register i3c regmap\n");
+
+ return tmp108_common_probe(dev, regmap, "p3t1085_i3c");
+}
+
+static struct i3c_driver p3t1085_driver = {
+ .driver = {
+ .name = "p3t1085_i3c",
+ },
+ .probe = p3t1085_i3c_probe,
+ .id_table = p3t1085_i3c_ids,
+};
+
+module_i3c_i2c_driver(p3t1085_driver, &tmp108_driver)
MODULE_AUTHOR("John Muir <john@jmuir.com>");
MODULE_DESCRIPTION("Texas Instruments TMP108 temperature sensor driver");
diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
index 926d28cd3fab..5acbfd7d088d 100644
--- a/drivers/hwmon/tmp513.c
+++ b/drivers/hwmon/tmp513.c
@@ -182,7 +182,7 @@ struct tmp51x_data {
struct regmap *regmap;
};
-// Set the shift based on the gain 8=4, 4=3, 2=2, 1=1
+// Set the shift based on the gain: 8 -> 1, 4 -> 2, 2 -> 3, 1 -> 4
static inline u8 tmp51x_get_pga_shift(struct tmp51x_data *data)
{
return 5 - ffs(data->pga_gain);
@@ -204,8 +204,11 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
* 2's complement number shifted by one to four depending
* on the pga gain setting. 1lsb = 10uV
*/
- *val = sign_extend32(regval, 17 - tmp51x_get_pga_shift(data));
- *val = DIV_ROUND_CLOSEST(*val * 10 * MILLI, data->shunt_uohms);
+ *val = sign_extend32(regval,
+ reg == TMP51X_SHUNT_CURRENT_RESULT ?
+ 16 - tmp51x_get_pga_shift(data) : 15);
+ *val = DIV_ROUND_CLOSEST(*val * 10 * (long)MILLI, (long)data->shunt_uohms);
+
break;
case TMP51X_BUS_VOLTAGE_RESULT:
case TMP51X_BUS_VOLTAGE_H_LIMIT:
@@ -220,8 +223,8 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
break;
case TMP51X_BUS_CURRENT_RESULT:
// Current = (ShuntVoltage * CalibrationRegister) / 4096
- *val = sign_extend32(regval, 16) * data->curr_lsb_ua;
- *val = DIV_ROUND_CLOSEST(*val, MILLI);
+ *val = sign_extend32(regval, 15) * (long)data->curr_lsb_ua;
+ *val = DIV_ROUND_CLOSEST(*val, (long)MILLI);
break;
case TMP51X_LOCAL_TEMP_RESULT:
case TMP51X_REMOTE_TEMP_RESULT_1:
@@ -232,7 +235,7 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
case TMP51X_REMOTE_TEMP_LIMIT_2:
case TMP513_REMOTE_TEMP_LIMIT_3:
// 1lsb = 0.0625 degrees centigrade
- *val = sign_extend32(regval, 16) >> TMP51X_TEMP_SHIFT;
+ *val = sign_extend32(regval, 15) >> TMP51X_TEMP_SHIFT;
*val = DIV_ROUND_CLOSEST(*val * 625, 10);
break;
case TMP51X_N_FACTOR_AND_HYST_1:
@@ -261,7 +264,7 @@ static int tmp51x_set_value(struct tmp51x_data *data, u8 reg, long val)
* The user enter current value and we convert it to
* voltage. 1lsb = 10uV
*/
- val = DIV_ROUND_CLOSEST(val * data->shunt_uohms, 10 * MILLI);
+ val = DIV_ROUND_CLOSEST(val * (long)data->shunt_uohms, 10 * (long)MILLI);
max_val = U16_MAX >> tmp51x_get_pga_shift(data);
regval = clamp_val(val, -max_val, max_val);
break;
diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
index dfcfb09d9f3c..80fb03f30c30 100644
--- a/drivers/hwmon/tps23861.c
+++ b/drivers/hwmon/tps23861.c
@@ -132,7 +132,7 @@ static int tps23861_read_temp(struct tps23861_data *data, long *val)
if (err < 0)
return err;
- *val = (regval * TEMPERATURE_LSB) - 20000;
+ *val = ((long)regval * TEMPERATURE_LSB) - 20000;
return 0;
}
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 2765d5f1b7f0..e4f1bb538628 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -317,7 +317,7 @@ static struct platform_driver env_driver = {
.of_match_table = env_match,
},
.probe = env_probe,
- .remove_new = env_remove,
+ .remove = env_remove,
};
module_platform_driver(env_driver);
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index 5abe95b683c0..823bff2871e1 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -197,7 +197,7 @@ static struct platform_driver via_cputemp_driver = {
.name = DRVNAME,
},
.probe = via_cputemp_probe,
- .remove_new = via_cputemp_remove,
+ .remove = via_cputemp_remove,
};
struct pdev_entry {
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 3a002ad3c005..bbaeb808cc15 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -799,7 +799,7 @@ static struct platform_driver via686a_driver = {
.name = DRIVER_NAME,
},
.probe = via686a_probe,
- .remove_new = via686a_remove,
+ .remove = via686a_remove,
};
static const struct pci_device_id via686a_pci_ids[] = {
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index 2f3890463e18..386edea6b69e 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -1221,7 +1221,7 @@ static struct platform_driver vt1211_driver = {
.name = DRVNAME,
},
.probe = vt1211_probe,
- .remove_new = vt1211_remove,
+ .remove = vt1211_remove,
};
static int __init vt1211_device_add(unsigned short address)
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index dcdd14ccd115..3bf27c21845b 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -910,11 +910,11 @@ static void vt8231_remove(struct platform_device *pdev)
static struct platform_driver vt8231_driver = {
- .driver = {
+ .driver = {
.name = DRIVER_NAME,
},
.probe = vt8231_probe,
- .remove_new = vt8231_remove,
+ .remove = vt8231_remove,
};
static const struct pci_device_id vt8231_pci_ids[] = {
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 2fc9b718e2ab..95115d7b863e 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -1844,7 +1844,7 @@ static struct platform_driver w83627hf_driver = {
.pm = W83627HF_DEV_PM_OPS,
},
.probe = w83627hf_probe,
- .remove_new = w83627hf_remove,
+ .remove = w83627hf_remove,
};
static int __init w83627hf_find(int sioaddr, unsigned short *addr,
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index b7957c84d235..076200ed2ec9 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1828,7 +1828,7 @@ static struct platform_driver w83781d_isa_driver = {
.name = "w83781d",
},
.probe = w83781d_isa_probe,
- .remove_new = w83781d_isa_remove,
+ .remove = w83781d_isa_remove,
};
/* return 1 if a supported chip is found, 0 otherwise */
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 5e0759a70f6d..1e3bd129a922 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -772,7 +772,7 @@ MODULE_DEVICE_TABLE(of, xgene_hwmon_of_match);
static struct platform_driver xgene_hwmon_driver = {
.probe = xgene_hwmon_probe,
- .remove_new = xgene_hwmon_remove,
+ .remove = xgene_hwmon_remove,
.driver = {
.name = "xgene-slimpro-hwmon",
.of_match_table = xgene_hwmon_of_match,
diff --git a/drivers/hwspinlock/u8500_hsem.c b/drivers/hwspinlock/u8500_hsem.c
index 1edca1092f29..5a2d8c3e0d80 100644
--- a/drivers/hwspinlock/u8500_hsem.c
+++ b/drivers/hwspinlock/u8500_hsem.c
@@ -131,7 +131,7 @@ static void u8500_hsem_remove(struct platform_device *pdev)
static struct platform_driver u8500_hsem_driver = {
.probe = u8500_hsem_probe,
- .remove_new = u8500_hsem_remove,
+ .remove = u8500_hsem_remove,
.driver = {
.name = "u8500_hsem",
},
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index bfea880d6dfb..275cc0d9f505 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -689,7 +689,7 @@ MODULE_DEVICE_TABLE(acpi, catu_acpi_ids);
static struct platform_driver catu_platform_driver = {
.probe = catu_platform_probe,
- .remove_new = catu_platform_remove,
+ .remove = catu_platform_remove,
.driver = {
.name = "coresight-catu-platform",
.acpi_match_table = ACPI_PTR(catu_acpi_ids),
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index ea38ecf26fcb..0a9380350fb5 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -75,22 +75,54 @@ struct coresight_device *coresight_get_percpu_sink(int cpu)
}
EXPORT_SYMBOL_GPL(coresight_get_percpu_sink);
+static struct coresight_device *coresight_get_source(struct list_head *path)
+{
+ struct coresight_device *csdev;
+
+ if (!path)
+ return NULL;
+
+ csdev = list_first_entry(path, struct coresight_node, link)->csdev;
+ if (!coresight_is_device_source(csdev))
+ return NULL;
+
+ return csdev;
+}
+
+/**
+ * coresight_blocks_source - checks whether the connection matches the source
+ * of path if connection is bound to specific source.
+ * @src: The source device of the trace path
+ * @conn: The connection of one outport
+ *
+ * Return false if the connection doesn't have a source binded or source of the
+ * path matches the source binds to connection.
+ */
+static bool coresight_blocks_source(struct coresight_device *src,
+ struct coresight_connection *conn)
+{
+ return conn->filter_src_fwnode && (conn->filter_src_dev != src);
+}
+
static struct coresight_connection *
-coresight_find_out_connection(struct coresight_device *src_dev,
- struct coresight_device *dest_dev)
+coresight_find_out_connection(struct coresight_device *csdev,
+ struct coresight_device *out_dev,
+ struct coresight_device *trace_src)
{
int i;
struct coresight_connection *conn;
- for (i = 0; i < src_dev->pdata->nr_outconns; i++) {
- conn = src_dev->pdata->out_conns[i];
- if (conn->dest_dev == dest_dev)
+ for (i = 0; i < csdev->pdata->nr_outconns; i++) {
+ conn = csdev->pdata->out_conns[i];
+ if (coresight_blocks_source(trace_src, conn))
+ continue;
+ if (conn->dest_dev == out_dev)
return conn;
}
- dev_err(&src_dev->dev,
- "couldn't find output connection, src_dev: %s, dest_dev: %s\n",
- dev_name(&src_dev->dev), dev_name(&dest_dev->dev));
+ dev_err(&csdev->dev,
+ "couldn't find output connection, csdev: %s, out_dev: %s\n",
+ dev_name(&csdev->dev), dev_name(&out_dev->dev));
return ERR_PTR(-ENODEV);
}
@@ -251,7 +283,8 @@ static void coresight_disable_sink(struct coresight_device *csdev)
static int coresight_enable_link(struct coresight_device *csdev,
struct coresight_device *parent,
- struct coresight_device *child)
+ struct coresight_device *child,
+ struct coresight_device *source)
{
int link_subtype;
struct coresight_connection *inconn, *outconn;
@@ -259,8 +292,8 @@ static int coresight_enable_link(struct coresight_device *csdev,
if (!parent || !child)
return -EINVAL;
- inconn = coresight_find_out_connection(parent, csdev);
- outconn = coresight_find_out_connection(csdev, child);
+ inconn = coresight_find_out_connection(parent, csdev, source);
+ outconn = coresight_find_out_connection(csdev, child, source);
link_subtype = csdev->subtype.link_subtype;
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG && IS_ERR(inconn))
@@ -273,15 +306,16 @@ static int coresight_enable_link(struct coresight_device *csdev,
static void coresight_disable_link(struct coresight_device *csdev,
struct coresight_device *parent,
- struct coresight_device *child)
+ struct coresight_device *child,
+ struct coresight_device *source)
{
struct coresight_connection *inconn, *outconn;
if (!parent || !child)
return;
- inconn = coresight_find_out_connection(parent, csdev);
- outconn = coresight_find_out_connection(csdev, child);
+ inconn = coresight_find_out_connection(parent, csdev, source);
+ outconn = coresight_find_out_connection(csdev, child, source);
link_ops(csdev)->disable(csdev, inconn, outconn);
}
@@ -375,7 +409,8 @@ static void coresight_disable_path_from(struct list_head *path,
case CORESIGHT_DEV_TYPE_LINK:
parent = list_prev_entry(nd, link)->csdev;
child = list_next_entry(nd, link)->csdev;
- coresight_disable_link(csdev, parent, child);
+ coresight_disable_link(csdev, parent, child,
+ coresight_get_source(path));
break;
default:
break;
@@ -418,7 +453,9 @@ int coresight_enable_path(struct list_head *path, enum cs_mode mode,
u32 type;
struct coresight_node *nd;
struct coresight_device *csdev, *parent, *child;
+ struct coresight_device *source;
+ source = coresight_get_source(path);
list_for_each_entry_reverse(nd, path, link) {
csdev = nd->csdev;
type = csdev->type;
@@ -456,7 +493,7 @@ int coresight_enable_path(struct list_head *path, enum cs_mode mode,
case CORESIGHT_DEV_TYPE_LINK:
parent = list_prev_entry(nd, link)->csdev;
child = list_next_entry(nd, link)->csdev;
- ret = coresight_enable_link(csdev, parent, child);
+ ret = coresight_enable_link(csdev, parent, child, source);
if (ret)
goto err;
break;
@@ -619,6 +656,7 @@ static void coresight_drop_device(struct coresight_device *csdev)
/**
* _coresight_build_path - recursively build a path from a @csdev to a sink.
* @csdev: The device to start from.
+ * @source: The trace source device of the path.
* @sink: The final sink we want in this path.
* @path: The list to add devices to.
*
@@ -628,6 +666,7 @@ static void coresight_drop_device(struct coresight_device *csdev)
* the source is the first device and the sink the last one.
*/
static int _coresight_build_path(struct coresight_device *csdev,
+ struct coresight_device *source,
struct coresight_device *sink,
struct list_head *path)
{
@@ -641,7 +680,7 @@ static int _coresight_build_path(struct coresight_device *csdev,
if (coresight_is_percpu_source(csdev) && coresight_is_percpu_sink(sink) &&
sink == per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev))) {
- if (_coresight_build_path(sink, sink, path) == 0) {
+ if (_coresight_build_path(sink, source, sink, path) == 0) {
found = true;
goto out;
}
@@ -652,8 +691,12 @@ static int _coresight_build_path(struct coresight_device *csdev,
struct coresight_device *child_dev;
child_dev = csdev->pdata->out_conns[i]->dest_dev;
+
+ if (coresight_blocks_source(source, csdev->pdata->out_conns[i]))
+ continue;
+
if (child_dev &&
- _coresight_build_path(child_dev, sink, path) == 0) {
+ _coresight_build_path(child_dev, source, sink, path) == 0) {
found = true;
break;
}
@@ -698,7 +741,7 @@ struct list_head *coresight_build_path(struct coresight_device *source,
INIT_LIST_HEAD(path);
- rc = _coresight_build_path(source, sink, path);
+ rc = _coresight_build_path(source, source, sink, path);
if (rc) {
kfree(path);
return ERR_PTR(rc);
@@ -927,6 +970,16 @@ static int coresight_orphan_match(struct device *dev, void *data)
for (i = 0; i < src_csdev->pdata->nr_outconns; i++) {
conn = src_csdev->pdata->out_conns[i];
+ /* Fix filter source device before skip the port */
+ if (conn->filter_src_fwnode && !conn->filter_src_dev) {
+ if (dst_csdev &&
+ (conn->filter_src_fwnode == dst_csdev->dev.fwnode) &&
+ !WARN_ON_ONCE(!coresight_is_device_source(dst_csdev)))
+ conn->filter_src_dev = dst_csdev;
+ else
+ still_orphan = true;
+ }
+
/* Skip the port if it's already connected. */
if (conn->dest_dev)
continue;
@@ -977,18 +1030,40 @@ static int coresight_fixup_orphan_conns(struct coresight_device *csdev)
csdev, coresight_orphan_match);
}
+static int coresight_clear_filter_source(struct device *dev, void *data)
+{
+ int i;
+ struct coresight_device *source = data;
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ for (i = 0; i < csdev->pdata->nr_outconns; ++i) {
+ if (csdev->pdata->out_conns[i]->filter_src_dev == source)
+ csdev->pdata->out_conns[i]->filter_src_dev = NULL;
+ }
+ return 0;
+}
+
/* coresight_remove_conns - Remove other device's references to this device */
static void coresight_remove_conns(struct coresight_device *csdev)
{
int i, j;
struct coresight_connection *conn;
+ if (coresight_is_device_source(csdev))
+ bus_for_each_dev(&coresight_bustype, NULL, csdev,
+ coresight_clear_filter_source);
+
/*
* Remove the input connection references from the destination device
* for each output connection.
*/
for (i = 0; i < csdev->pdata->nr_outconns; i++) {
conn = csdev->pdata->out_conns[i];
+ if (conn->filter_src_fwnode) {
+ conn->filter_src_dev = NULL;
+ fwnode_handle_put(conn->filter_src_fwnode);
+ }
+
if (!conn->dest_dev)
continue;
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index 75962dae9aa1..342c3aaf414d 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -763,7 +763,7 @@ static const struct dev_pm_ops debug_dev_pm_ops = {
static struct platform_driver debug_platform_driver = {
.probe = debug_platform_probe,
- .remove_new = debug_platform_remove,
+ .remove = debug_platform_remove,
.driver = {
.name = "coresight-debug-platform",
.acpi_match_table = ACPI_PTR(debug_platform_ids),
diff --git a/drivers/hwtracing/coresight/coresight-dummy.c b/drivers/hwtracing/coresight/coresight-dummy.c
index bb85fa663ffc..9be53be8964b 100644
--- a/drivers/hwtracing/coresight/coresight-dummy.c
+++ b/drivers/hwtracing/coresight/coresight-dummy.c
@@ -11,10 +11,12 @@
#include <linux/pm_runtime.h>
#include "coresight-priv.h"
+#include "coresight-trace-id.h"
struct dummy_drvdata {
struct device *dev;
struct coresight_device *csdev;
+ u8 traceid;
};
DEFINE_CORESIGHT_DEVLIST(source_devs, "dummy_source");
@@ -72,6 +74,32 @@ static const struct coresight_ops dummy_sink_cs_ops = {
.sink_ops = &dummy_sink_ops,
};
+/* User can get the trace id of dummy source from this node. */
+static ssize_t traceid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct dummy_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->traceid;
+ return sysfs_emit(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(traceid);
+
+static struct attribute *coresight_dummy_attrs[] = {
+ &dev_attr_traceid.attr,
+ NULL,
+};
+
+static const struct attribute_group coresight_dummy_group = {
+ .attrs = coresight_dummy_attrs,
+};
+
+static const struct attribute_group *coresight_dummy_groups[] = {
+ &coresight_dummy_group,
+ NULL,
+};
+
static int dummy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -79,6 +107,11 @@ static int dummy_probe(struct platform_device *pdev)
struct coresight_platform_data *pdata;
struct dummy_drvdata *drvdata;
struct coresight_desc desc = { 0 };
+ int ret = 0, trace_id = 0;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
if (of_device_is_compatible(node, "arm,coresight-dummy-source")) {
@@ -90,6 +123,26 @@ static int dummy_probe(struct platform_device *pdev)
desc.subtype.source_subtype =
CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS;
desc.ops = &dummy_source_cs_ops;
+ desc.groups = coresight_dummy_groups;
+
+ ret = coresight_get_static_trace_id(dev, &trace_id);
+ if (!ret) {
+ /* Get the static id if id is set in device tree. */
+ ret = coresight_trace_id_get_static_system_id(trace_id);
+ if (ret < 0) {
+ dev_err(dev, "Fail to get static id.\n");
+ return ret;
+ }
+ } else {
+ /* Get next available id if id is not set in device tree. */
+ trace_id = coresight_trace_id_get_system_id();
+ if (trace_id < 0) {
+ ret = trace_id;
+ return ret;
+ }
+ }
+ drvdata->traceid = (u8)trace_id;
+
} else if (of_device_is_compatible(node, "arm,coresight-dummy-sink")) {
desc.name = coresight_alloc_device_name(&sink_devs, dev);
if (!desc.name)
@@ -104,27 +157,35 @@ static int dummy_probe(struct platform_device *pdev)
}
pdata = coresight_get_platform_data(dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto free_id;
+ }
pdev->dev.platform_data = pdata;
- drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
drvdata->dev = &pdev->dev;
platform_set_drvdata(pdev, drvdata);
desc.pdata = pdev->dev.platform_data;
desc.dev = &pdev->dev;
drvdata->csdev = coresight_register(&desc);
- if (IS_ERR(drvdata->csdev))
- return PTR_ERR(drvdata->csdev);
+ if (IS_ERR(drvdata->csdev)) {
+ ret = PTR_ERR(drvdata->csdev);
+ goto free_id;
+ }
pm_runtime_enable(dev);
dev_dbg(dev, "Dummy device initialized\n");
- return 0;
+ ret = 0;
+ goto out;
+
+free_id:
+ if (IS_VALID_CS_TRACE_ID(drvdata->traceid))
+ coresight_trace_id_put_system_id(drvdata->traceid);
+
+out:
+ return ret;
}
static void dummy_remove(struct platform_device *pdev)
@@ -132,6 +193,8 @@ static void dummy_remove(struct platform_device *pdev)
struct dummy_drvdata *drvdata = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
+ if (IS_VALID_CS_TRACE_ID(drvdata->traceid))
+ coresight_trace_id_put_system_id(drvdata->traceid);
pm_runtime_disable(dev);
coresight_unregister(drvdata->csdev);
}
@@ -144,7 +207,7 @@ static const struct of_device_id dummy_match[] = {
static struct platform_driver dummy_driver = {
.probe = dummy_probe,
- .remove_new = dummy_remove,
+ .remove = dummy_remove,
.driver = {
.name = "coresight-dummy",
.of_match_table = dummy_match,
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 66d44a404ad0..2c1a60577728 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
+#include <linux/kvm_host.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -268,10 +269,28 @@ struct etm4_enable_arg {
*/
static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata)
{
+ u64 trfcr;
+
/* If the CPU doesn't support FEAT_TRF, nothing to do */
if (!drvdata->trfcr)
return;
- cpu_prohibit_trace();
+
+ trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE);
+
+ write_trfcr(trfcr);
+ kvm_tracing_set_el1_configuration(trfcr);
+}
+
+static u64 etm4x_get_kern_user_filter(struct etmv4_drvdata *drvdata)
+{
+ u64 trfcr = drvdata->trfcr;
+
+ if (drvdata->config.mode & ETM_MODE_EXCL_KERN)
+ trfcr &= ~TRFCR_EL1_ExTRE;
+ if (drvdata->config.mode & ETM_MODE_EXCL_USER)
+ trfcr &= ~TRFCR_EL1_E0TRE;
+
+ return trfcr;
}
/*
@@ -286,18 +305,28 @@ static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata)
*/
static void etm4x_allow_trace(struct etmv4_drvdata *drvdata)
{
- u64 trfcr = drvdata->trfcr;
+ u64 trfcr, guest_trfcr;
/* If the CPU doesn't support FEAT_TRF, nothing to do */
- if (!trfcr)
+ if (!drvdata->trfcr)
return;
- if (drvdata->config.mode & ETM_MODE_EXCL_KERN)
- trfcr &= ~TRFCR_ELx_ExTRE;
- if (drvdata->config.mode & ETM_MODE_EXCL_USER)
- trfcr &= ~TRFCR_ELx_E0TRE;
+ if (drvdata->config.mode & ETM_MODE_EXCL_HOST)
+ trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE);
+ else
+ trfcr = etm4x_get_kern_user_filter(drvdata);
write_trfcr(trfcr);
+
+ /* Set filters for guests and pass to KVM */
+ if (drvdata->config.mode & ETM_MODE_EXCL_GUEST)
+ guest_trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE);
+ else
+ guest_trfcr = etm4x_get_kern_user_filter(drvdata);
+
+ /* TRFCR_EL1 doesn't have CX so mask it out. */
+ guest_trfcr &= ~TRFCR_EL2_CX;
+ kvm_tracing_set_el1_configuration(guest_trfcr);
}
#ifdef CONFIG_ETM4X_IMPDEF_FEATURE
@@ -655,6 +684,12 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
if (attr->exclude_user)
config->mode = ETM_MODE_EXCL_USER;
+ if (attr->exclude_host)
+ config->mode |= ETM_MODE_EXCL_HOST;
+
+ if (attr->exclude_guest)
+ config->mode |= ETM_MODE_EXCL_GUEST;
+
/* Always start from the default config */
etm4_set_default_config(config);
@@ -1141,9 +1176,9 @@ static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata)
* tracing at the kernel EL and EL0, forcing to use the
* virtual time as the timestamp.
*/
- trfcr = (TRFCR_ELx_TS_VIRTUAL |
- TRFCR_ELx_ExTRE |
- TRFCR_ELx_E0TRE);
+ trfcr = (TRFCR_EL1_TS_VIRTUAL |
+ TRFCR_EL1_ExTRE |
+ TRFCR_EL1_E0TRE);
/* If we are running at EL2, allow tracing the CONTEXTIDR_EL2. */
if (is_kernel_in_hyp_mode())
@@ -2399,7 +2434,7 @@ MODULE_DEVICE_TABLE(acpi, etm4x_acpi_ids);
static struct platform_driver etm4_platform_driver = {
.probe = etm4_probe_platform_dev,
- .remove_new = etm4_remove_platform_dev,
+ .remove = etm4_remove_platform_dev,
.driver = {
.name = "coresight-etm4x",
.of_match_table = etm4_sysreg_match,
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index a9f19629f3f8..c767f8ae4cf1 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -2319,11 +2319,11 @@ static ssize_t ts_source_show(struct device *dev,
goto out;
}
- switch (drvdata->trfcr & TRFCR_ELx_TS_MASK) {
- case TRFCR_ELx_TS_VIRTUAL:
- case TRFCR_ELx_TS_GUEST_PHYSICAL:
- case TRFCR_ELx_TS_PHYSICAL:
- val = FIELD_GET(TRFCR_ELx_TS_MASK, drvdata->trfcr);
+ switch (drvdata->trfcr & TRFCR_EL1_TS_MASK) {
+ case TRFCR_EL1_TS_VIRTUAL:
+ case TRFCR_EL1_TS_GUEST_PHYSICAL:
+ case TRFCR_EL1_TS_PHYSICAL:
+ val = FIELD_GET(TRFCR_EL1_TS_MASK, drvdata->trfcr);
break;
default:
val = -1;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 9e9165f62e81..1119762b5cec 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -817,7 +817,7 @@ enum etm_impdef_type {
* @s_ex_level: Secure ELs where tracing is supported.
*/
struct etmv4_config {
- u32 mode;
+ u64 mode;
u32 pe_sel;
u32 cfg;
u32 eventctrl0;
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 5a819c8970fb..8faf51469bb8 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -86,14 +86,14 @@ static int funnel_enable(struct coresight_device *csdev,
bool first_enable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_read(&in->dest_refcnt) == 0) {
+ if (in->dest_refcnt == 0) {
if (drvdata->base)
rc = dynamic_funnel_enable_hw(drvdata, in->dest_port);
if (!rc)
first_enable = true;
}
if (!rc)
- atomic_inc(&in->dest_refcnt);
+ in->dest_refcnt++;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (first_enable)
@@ -130,7 +130,7 @@ static void funnel_disable(struct coresight_device *csdev,
bool last_disable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_dec_return(&in->dest_refcnt) == 0) {
+ if (--in->dest_refcnt == 0) {
if (drvdata->base)
dynamic_funnel_disable_hw(drvdata, in->dest_port);
last_disable = true;
@@ -377,7 +377,7 @@ MODULE_DEVICE_TABLE(acpi, funnel_acpi_ids);
static struct platform_driver funnel_driver = {
.probe = funnel_platform_probe,
- .remove_new = funnel_platform_remove,
+ .remove = funnel_platform_remove,
.driver = {
.name = "coresight-funnel",
/* THIS_MODULE is taken care of by platform_driver_register() */
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
index 64e171eaad82..8192ba3279f0 100644
--- a/drivers/hwtracing/coresight/coresight-platform.c
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -243,6 +243,27 @@ static int of_coresight_parse_endpoint(struct device *dev,
conn.dest_fwnode = fwnode_handle_get(rdev_fwnode);
conn.dest_port = rendpoint.port;
+ /*
+ * Get the firmware node of the filter source through the
+ * reference. This could be used to filter the source in
+ * building path.
+ */
+ conn.filter_src_fwnode =
+ fwnode_find_reference(&ep->fwnode, "filter-source", 0);
+ if (IS_ERR(conn.filter_src_fwnode)) {
+ conn.filter_src_fwnode = NULL;
+ } else {
+ conn.filter_src_dev =
+ coresight_find_csdev_by_fwnode(conn.filter_src_fwnode);
+ if (conn.filter_src_dev &&
+ !coresight_is_device_source(conn.filter_src_dev)) {
+ dev_warn(dev, "port %d: Filter handle is not a trace source : %s\n",
+ conn.src_port, dev_name(&conn.filter_src_dev->dev));
+ conn.filter_src_dev = NULL;
+ conn.filter_src_fwnode = NULL;
+ }
+ }
+
new_conn = coresight_add_out_conn(dev, pdata, &conn);
if (IS_ERR_VALUE(new_conn)) {
fwnode_handle_put(conn.dest_fwnode);
@@ -796,6 +817,12 @@ int coresight_get_cpu(struct device *dev)
}
EXPORT_SYMBOL_GPL(coresight_get_cpu);
+int coresight_get_static_trace_id(struct device *dev, u32 *id)
+{
+ return fwnode_property_read_u32(dev_fwnode(dev), "arm,static-trace-id", id);
+}
+EXPORT_SYMBOL_GPL(coresight_get_static_trace_id);
+
struct coresight_platform_data *
coresight_get_platform_data(struct device *dev)
{
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 05f891ca6b5c..76403530f33e 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -42,6 +42,9 @@ extern const struct device_type coresight_dev_type[];
#define ETM_MODE_EXCL_KERN BIT(30)
#define ETM_MODE_EXCL_USER BIT(31)
+#define ETM_MODE_EXCL_HOST BIT(32)
+#define ETM_MODE_EXCL_GUEST BIT(33)
+
struct cs_pair_attribute {
struct device_attribute attr;
u32 lo_off;
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 3e55be9c8418..a1181c9048c0 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -126,7 +126,7 @@ static int replicator_enable(struct coresight_device *csdev,
bool first_enable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_read(&out->src_refcnt) == 0) {
+ if (out->src_refcnt == 0) {
if (drvdata->base)
rc = dynamic_replicator_enable(drvdata, in->dest_port,
out->src_port);
@@ -134,7 +134,7 @@ static int replicator_enable(struct coresight_device *csdev,
first_enable = true;
}
if (!rc)
- atomic_inc(&out->src_refcnt);
+ out->src_refcnt++;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (first_enable)
@@ -180,7 +180,7 @@ static void replicator_disable(struct coresight_device *csdev,
bool last_disable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_dec_return(&out->src_refcnt) == 0) {
+ if (--out->src_refcnt == 0) {
if (drvdata->base)
dynamic_replicator_disable(drvdata, in->dest_port,
out->src_port);
@@ -389,7 +389,7 @@ MODULE_DEVICE_TABLE(acpi, replicator_acpi_ids);
static struct platform_driver replicator_driver = {
.probe = replicator_platform_probe,
- .remove_new = replicator_platform_remove,
+ .remove = replicator_platform_remove,
.driver = {
.name = "coresight-replicator",
/* THIS_MODULE is taken care of by platform_driver_register() */
diff --git a/drivers/hwtracing/coresight/coresight-self-hosted-trace.h b/drivers/hwtracing/coresight/coresight-self-hosted-trace.h
index 53840a2c41f2..303d71911870 100644
--- a/drivers/hwtracing/coresight/coresight-self-hosted-trace.h
+++ b/drivers/hwtracing/coresight/coresight-self-hosted-trace.h
@@ -21,13 +21,4 @@ static inline void write_trfcr(u64 val)
isb();
}
-static inline u64 cpu_prohibit_trace(void)
-{
- u64 trfcr = read_trfcr();
-
- /* Prohibit tracing at EL0 & the kernel EL */
- write_trfcr(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE));
- /* Return the original value of the TRFCR */
- return trfcr;
-}
#endif /* __CORESIGHT_SELF_HOSTED_TRACE_H */
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index cb3e04755c99..b581a30a1cd9 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -1036,7 +1036,7 @@ MODULE_DEVICE_TABLE(acpi, stm_acpi_ids);
static struct platform_driver stm_platform_driver = {
.probe = stm_platform_probe,
- .remove_new = stm_platform_remove,
+ .remove = stm_platform_remove,
.driver = {
.name = "coresight-stm-platform",
.acpi_match_table = ACPI_PTR(stm_acpi_ids),
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index 3a482fd2cb22..e9876252a789 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -730,7 +730,7 @@ MODULE_DEVICE_TABLE(acpi, tmc_acpi_ids);
static struct platform_driver tmc_platform_driver = {
.probe = tmc_platform_probe,
- .remove_new = tmc_platform_remove,
+ .remove = tmc_platform_remove,
.driver = {
.name = "coresight-tmc-platform",
.acpi_match_table = ACPI_PTR(tmc_acpi_ids),
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index bfca103f9f84..189a4abc2561 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -24,7 +24,7 @@ DEFINE_CORESIGHT_DEVLIST(tpda_devs, "tpda");
static bool coresight_device_is_tpdm(struct coresight_device *csdev)
{
- return (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) &&
+ return (coresight_is_device_source(csdev)) &&
(csdev->subtype.source_subtype ==
CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM);
}
@@ -110,6 +110,16 @@ static int tpda_get_element_size(struct tpda_drvdata *drvdata,
csdev->pdata->in_conns[i]->dest_port != inport)
continue;
+ /*
+ * If this port has a hardcoded filter, use the source
+ * device directly.
+ */
+ if (csdev->pdata->in_conns[i]->filter_src_fwnode) {
+ in = csdev->pdata->in_conns[i]->filter_src_dev;
+ if (!in)
+ continue;
+ }
+
if (coresight_device_is_tpdm(in)) {
if (drvdata->dsb_esize || drvdata->cmb_esize)
return -EEXIST;
@@ -124,7 +134,6 @@ static int tpda_get_element_size(struct tpda_drvdata *drvdata,
}
}
-
return rc;
}
@@ -190,10 +199,10 @@ static int tpda_enable(struct coresight_device *csdev,
int ret = 0;
spin_lock(&drvdata->spinlock);
- if (atomic_read(&in->dest_refcnt) == 0) {
+ if (in->dest_refcnt == 0) {
ret = __tpda_enable(drvdata, in->dest_port);
if (!ret) {
- atomic_inc(&in->dest_refcnt);
+ in->dest_refcnt++;
csdev->refcnt++;
dev_dbg(drvdata->dev, "TPDA inport %d enabled.\n", in->dest_port);
}
@@ -223,7 +232,7 @@ static void tpda_disable(struct coresight_device *csdev,
struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
- if (atomic_dec_return(&in->dest_refcnt) == 0) {
+ if (--in->dest_refcnt == 0) {
__tpda_disable(drvdata, in->dest_port);
csdev->refcnt--;
}
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index b7d99e91ab84..c38f9701665e 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -640,8 +640,7 @@ static ssize_t dsb_mode_store(struct device *dev,
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
- if ((kstrtoul(buf, 0, &val)) || (val < 0) ||
- (val & ~TPDM_DSB_MODE_MASK))
+ if ((kstrtoul(buf, 0, &val)) || (val & ~TPDM_DSB_MODE_MASK))
return -EINVAL;
spin_lock(&drvdata->spinlock);
@@ -1308,8 +1307,8 @@ static void tpdm_remove(struct amba_device *adev)
*/
static struct amba_id tpdm_ids[] = {
{
- .id = 0x000f0e00,
- .mask = 0x000fff00,
+ .id = 0x001f0e00,
+ .mask = 0x00ffff00,
},
{ 0, 0, NULL },
};
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index b048e146fbb1..97ef36f03ec2 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -307,7 +307,7 @@ MODULE_DEVICE_TABLE(acpi, tpiu_acpi_ids);
static struct platform_driver tpiu_platform_driver = {
.probe = tpiu_platform_probe,
- .remove_new = tpiu_platform_remove,
+ .remove = tpiu_platform_remove,
.driver = {
.name = "coresight-tpiu-platform",
.acpi_match_table = ACPI_PTR(tpiu_acpi_ids),
diff --git a/drivers/hwtracing/coresight/coresight-trace-id.c b/drivers/hwtracing/coresight/coresight-trace-id.c
index d98e12cb30ec..378af743be45 100644
--- a/drivers/hwtracing/coresight/coresight-trace-id.c
+++ b/drivers/hwtracing/coresight/coresight-trace-id.c
@@ -12,6 +12,12 @@
#include "coresight-trace-id.h"
+enum trace_id_flags {
+ TRACE_ID_ANY = 0x0,
+ TRACE_ID_PREFER_ODD = 0x1,
+ TRACE_ID_REQ_STATIC = 0x2,
+};
+
/* Default trace ID map. Used in sysfs mode and for system sources */
static DEFINE_PER_CPU(atomic_t, id_map_default_cpu_ids) = ATOMIC_INIT(0);
static struct coresight_trace_id_map id_map_default = {
@@ -74,21 +80,25 @@ static int coresight_trace_id_find_odd_id(struct coresight_trace_id_map *id_map)
* Otherwise allocate next available ID.
*/
static int coresight_trace_id_alloc_new_id(struct coresight_trace_id_map *id_map,
- int preferred_id, bool prefer_odd_id)
+ int preferred_id, unsigned int flags)
{
int id = 0;
/* for backwards compatibility, cpu IDs may use preferred value */
- if (IS_VALID_CS_TRACE_ID(preferred_id) &&
- !test_bit(preferred_id, id_map->used_ids)) {
- id = preferred_id;
- goto trace_id_allocated;
- } else if (prefer_odd_id) {
+ if (IS_VALID_CS_TRACE_ID(preferred_id)) {
+ if (!test_bit(preferred_id, id_map->used_ids)) {
+ id = preferred_id;
+ goto trace_id_allocated;
+ } else if (flags & TRACE_ID_REQ_STATIC)
+ return -EBUSY;
+ } else if (flags & TRACE_ID_PREFER_ODD) {
/* may use odd ids to avoid preferred legacy cpu IDs */
id = coresight_trace_id_find_odd_id(id_map);
if (id)
goto trace_id_allocated;
- }
+ } else if (!IS_VALID_CS_TRACE_ID(preferred_id) &&
+ (flags & TRACE_ID_REQ_STATIC))
+ return -EINVAL;
/*
* skip reserved bit 0, look at bitmap length of
@@ -153,7 +163,7 @@ static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map
*/
id = coresight_trace_id_alloc_new_id(id_map,
CORESIGHT_LEGACY_CPU_TRACE_ID(cpu),
- false);
+ TRACE_ID_ANY);
if (!IS_VALID_CS_TRACE_ID(id))
goto get_cpu_id_out_unlock;
@@ -188,14 +198,14 @@ static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_ma
DUMP_ID_MAP(id_map);
}
-static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *id_map)
+static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *id_map,
+ int preferred_id, unsigned int traceid_flags)
{
unsigned long flags;
int id;
spin_lock_irqsave(&id_map->lock, flags);
- /* prefer odd IDs for system components to avoid legacy CPU IDS */
- id = coresight_trace_id_alloc_new_id(id_map, 0, true);
+ id = coresight_trace_id_alloc_new_id(id_map, preferred_id, traceid_flags);
spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID(id);
@@ -255,10 +265,19 @@ EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id_map);
int coresight_trace_id_get_system_id(void)
{
- return coresight_trace_id_map_get_system_id(&id_map_default);
+ /* prefer odd IDs for system components to avoid legacy CPU IDS */
+ return coresight_trace_id_map_get_system_id(&id_map_default, 0,
+ TRACE_ID_PREFER_ODD);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_get_system_id);
+int coresight_trace_id_get_static_system_id(int trace_id)
+{
+ return coresight_trace_id_map_get_system_id(&id_map_default,
+ trace_id, TRACE_ID_REQ_STATIC);
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_get_static_system_id);
+
void coresight_trace_id_put_system_id(int id)
{
coresight_trace_id_map_put_system_id(&id_map_default, id);
diff --git a/drivers/hwtracing/coresight/coresight-trace-id.h b/drivers/hwtracing/coresight/coresight-trace-id.h
index 9aae50a553ca..db68e1ec56b6 100644
--- a/drivers/hwtracing/coresight/coresight-trace-id.h
+++ b/drivers/hwtracing/coresight/coresight-trace-id.h
@@ -117,6 +117,15 @@ int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *i
int coresight_trace_id_get_system_id(void);
/**
+ * Allocate a CoreSight static trace ID for a system component.
+ *
+ * Used to allocate static IDs for system trace sources such as dummy source.
+ *
+ * return: Trace ID or -EINVAL if allocation is impossible.
+ */
+int coresight_trace_id_get_static_system_id(int id);
+
+/**
* Release an allocated system trace ID.
*
* Unconditionally release a trace ID allocated to a system component.
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
index 96a32b213669..fff67aac8418 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.c
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -17,6 +17,7 @@
#include <asm/barrier.h>
#include <asm/cpufeature.h>
+#include <linux/kvm_host.h>
#include <linux/vmalloc.h>
#include "coresight-self-hosted-trace.h"
@@ -221,6 +222,7 @@ static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr)
*/
trblimitr |= TRBLIMITR_EL1_E;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+ kvm_enable_trbe();
/* Synchronize the TRBE enable event */
isb();
@@ -239,6 +241,7 @@ static inline void set_trbe_disabled(struct trbe_cpudata *cpudata)
*/
trblimitr &= ~TRBLIMITR_EL1_E;
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+ kvm_disable_trbe();
if (trbe_needs_drain_after_disable(cpudata))
trbe_drain_buffer();
@@ -253,8 +256,8 @@ static void trbe_drain_and_disable_local(struct trbe_cpudata *cpudata)
static void trbe_reset_local(struct trbe_cpudata *cpudata)
{
- trbe_drain_and_disable_local(cpudata);
write_sysreg_s(0, SYS_TRBLIMITR_EL1);
+ trbe_drain_buffer();
write_sysreg_s(0, SYS_TRBPTR_EL1);
write_sysreg_s(0, SYS_TRBBASER_EL1);
write_sysreg_s(0, SYS_TRBSR_EL1);
@@ -1110,6 +1113,16 @@ static bool is_perf_trbe(struct perf_output_handle *handle)
return true;
}
+static u64 cpu_prohibit_trace(void)
+{
+ u64 trfcr = read_trfcr();
+
+ /* Prohibit tracing at EL0 & the kernel EL */
+ write_trfcr(trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE));
+ /* Return the original value of the TRFCR */
+ return trfcr;
+}
+
static irqreturn_t arm_trbe_irq_handler(int irq, void *dev)
{
struct perf_output_handle **handle_ptr = dev;
@@ -1562,7 +1575,7 @@ static struct platform_driver arm_trbe_driver = {
.suppress_bind_attrs = true,
},
.probe = arm_trbe_device_probe,
- .remove_new = arm_trbe_device_remove,
+ .remove = arm_trbe_device_remove,
};
static int __init arm_trbe_init(void)
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.c b/drivers/hwtracing/coresight/ultrasoc-smb.c
index ef7f560f0ffa..dc3c9504dd7c 100644
--- a/drivers/hwtracing/coresight/ultrasoc-smb.c
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.c
@@ -600,7 +600,7 @@ static struct platform_driver smb_driver = {
.suppress_bind_attrs = true,
},
.probe = smb_probe,
- .remove_new = smb_remove,
+ .remove = smb_remove,
};
module_platform_driver(smb_driver);
diff --git a/drivers/hwtracing/intel_th/acpi.c b/drivers/hwtracing/intel_th/acpi.c
index 503620e9fd10..d229324978bd 100644
--- a/drivers/hwtracing/intel_th/acpi.c
+++ b/drivers/hwtracing/intel_th/acpi.c
@@ -69,7 +69,7 @@ static void intel_th_acpi_remove(struct platform_device *pdev)
static struct platform_driver intel_th_acpi_driver = {
.probe = intel_th_acpi_probe,
- .remove_new = intel_th_acpi_remove,
+ .remove = intel_th_acpi_remove,
.driver = {
.name = DRIVER_NAME,
.acpi_match_table = intel_th_acpi_ids,
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index d72993355473..47d9e6c3bac0 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -857,8 +857,9 @@ static irqreturn_t intel_th_irq(int irq, void *data)
/**
* intel_th_alloc() - allocate a new Intel TH device and its subdevices
* @dev: parent device
+ * @drvdata: data private to the driver
* @devres: resources indexed by th_mmio_idx
- * @irq: irq number
+ * @ndevres: number of entries in the @devres resources
*/
struct intel_th *
intel_th_alloc(struct device *dev, const struct intel_th_drvdata *drvdata,
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 0d7b9839e5b6..e9d8d28e055f 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -23,7 +23,6 @@ enum {
TH_PCI_RTIT_BAR = 4,
};
-#define BAR_MASK (BIT(TH_PCI_CONFIG_BAR) | BIT(TH_PCI_STH_SW_BAR))
#define PCI_REG_NPKDSC 0x80
#define NPKDSC_TSACT BIT(5)
@@ -83,10 +82,16 @@ static int intel_th_pci_probe(struct pci_dev *pdev,
if (err)
return err;
- err = pcim_iomap_regions_request_all(pdev, BAR_MASK, DRIVER_NAME);
+ err = pcim_request_all_regions(pdev, DRIVER_NAME);
if (err)
return err;
+ if (!pcim_iomap(pdev, TH_PCI_CONFIG_BAR, 0))
+ return -ENOMEM;
+
+ if (!pcim_iomap(pdev, TH_PCI_STH_SW_BAR, 0))
+ return -ENOMEM;
+
if (pdev->resource[TH_PCI_RTIT_BAR].start) {
resource[TH_MMIO_RTIT] = pdev->resource[TH_PCI_RTIT_BAR];
r++;
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index 3f71ce4711e3..d27de18de46f 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -5,10 +5,11 @@
obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o
obj-$(CONFIG_I2C) += i2c-core.o
-i2c-core-objs := i2c-core-base.o i2c-core-smbus.o
+i2c-core-y := i2c-core-base.o i2c-core-smbus.o
i2c-core-$(CONFIG_ACPI) += i2c-core-acpi.o
-i2c-core-$(CONFIG_I2C_SLAVE) += i2c-core-slave.o
-i2c-core-$(CONFIG_OF) += i2c-core-of.o
+i2c-core-$(CONFIG_I2C_SLAVE) += i2c-core-slave.o
+i2c-core-$(CONFIG_OF) += i2c-core-of.o
+i2c-core-$(CONFIG_OF_DYNAMIC) += i2c-core-of-prober.o
obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o
obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 6b3ba7e5723a..fc438f445771 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -62,19 +62,6 @@ config I2C_AMD756
This driver can also be built as a module. If so, the module
will be called i2c-amd756.
-config I2C_AMD756_S4882
- tristate "SMBus multiplexing on the Tyan S4882"
- depends on I2C_AMD756 && X86
- help
- Enabling this option will add specific SMBus support for the Tyan
- S4882 motherboard. On this 4-CPU board, the SMBus is multiplexed
- over 8 different channels, where the various memory module EEPROMs
- and temperature sensors live. Saying yes here will give you access
- to these in addition to the trunk.
-
- This driver can also be built as a module. If so, the module
- will be called i2c-amd756-s4882.
-
config I2C_AMD8111
tristate "AMD 8111"
depends on PCI && HAS_IOPORT
@@ -95,6 +82,23 @@ config I2C_AMD_MP2
This driver can also be built as modules. If so, the modules will
be called i2c-amd-mp2-pci and i2c-amd-mp2-plat.
+config I2C_AMD_ASF
+ tristate "AMD ASF I2C Controller Support"
+ depends on I2C_PIIX4
+ select I2C_SLAVE
+ help
+ This option enables support for the AMD ASF (Alert Standard Format)
+ I2C controller. The AMD ASF controller is an SMBus controller with
+ built-in ASF functionality, allowing it to issue generic SMBus
+ packets and communicate with the DASH controller using MCTP over
+ ASF.
+
+ If you have an AMD system with ASF support and want to enable this
+ functionality, say Y or M here. If unsure, say N.
+
+ To compile this driver as a module, choose M here: the module will
+ be called i2c_amd_asf_plat.
+
config I2C_HIX5HD2
tristate "Hix5hd2 high-speed I2C driver"
depends on ARCH_HISI || ARCH_HIX5HD2 || COMPILE_TEST
@@ -160,6 +164,7 @@ config I2C_I801
Meteor Lake (SOC and PCH)
Birch Stream (SOC)
Arrow Lake (SOC)
+ Panther Lake (SOC)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -250,19 +255,6 @@ config I2C_NFORCE2
This driver can also be built as a module. If so, the module
will be called i2c-nforce2.
-config I2C_NFORCE2_S4985
- tristate "SMBus multiplexing on the Tyan S4985"
- depends on I2C_NFORCE2 && X86
- help
- Enabling this option will add specific SMBus support for the Tyan
- S4985 motherboard. On this 4-CPU board, the SMBus is multiplexed
- over 4 different channels, where the various memory module EEPROMs
- live. Saying yes here will give you access to these in addition
- to the trunk.
-
- This driver can also be built as a module. If so, the module
- will be called i2c-nforce2-s4985.
-
config I2C_NVIDIA_GPU
tristate "NVIDIA GPU I2C controller"
depends on PCI
@@ -439,7 +431,7 @@ config I2C_AT91
are facing this situation, use the i2c-gpio driver.
config I2C_AT91_SLAVE_EXPERIMENTAL
- tristate "Microchip AT91 I2C experimental slave mode"
+ bool "Microchip AT91 I2C experimental slave mode"
depends on I2C_AT91
select I2C_SLAVE
help
@@ -448,7 +440,7 @@ config I2C_AT91_SLAVE_EXPERIMENTAL
been tested in a heavy way, help wanted.
There are known bugs:
- It can hang, on a SAMA5D4, after several transfers.
- - There are some mismtaches with a SAMA5D4 as slave and a SAMA5D2 as
+ - There are some mismatches with a SAMA5D4 as slave and a SAMA5D2 as
master.
config I2C_AU1550
@@ -511,7 +503,7 @@ config I2C_BRCMSTB
tristate "BRCM Settop/DSL I2C controller"
depends on ARCH_BCM2835 || ARCH_BCMBCA || ARCH_BRCMSTB || \
BMIPS_GENERIC || COMPILE_TEST
- default y
+ default ARCH_BCM2835 || ARCH_BCMBCA || ARCH_BRCMSTB || BMIPS_GENERIC
help
If you say yes to this option, support will be included for the
I2C interface on the Broadcom Settop/DSL SoCs.
@@ -535,6 +527,16 @@ config I2C_CBUS_GPIO
This driver can also be built as a module. If so, the module
will be called i2c-cbus-gpio.
+config I2C_CGBC
+ tristate "Congatec I2C Controller"
+ depends on MFD_CGBC
+ help
+ This driver supports the 2 I2C interfaces on the Congatec Board
+ Controller.
+
+ This driver can also be built as a module. If so, the module will
+ be called i2c-cgbc.ko.
+
config I2C_CPM
tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)"
depends on CPM1 || CPM2
@@ -741,18 +743,20 @@ config I2C_IMG
config I2C_IMX
tristate "IMX I2C interface"
- depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE || COMPILE_TEST
+ depends on ARCH_MXC || ARCH_LAYERSCAPE || ARCH_S32 || COLDFIRE \
+ || COMPILE_TEST
select I2C_SLAVE
help
Say Y here if you want to use the IIC bus controller on
- the Freescale i.MX/MXC, Layerscape or ColdFire processors.
+ the Freescale i.MX/MXC/S32G, Layerscape or ColdFire processors.
- This driver can also be built as a module. If so, the module
+ This driver can also be built as a module. If so, the module
will be called i2c-imx.
config I2C_IMX_LPI2C
tristate "IMX Low Power I2C interface"
depends on ARCH_MXC || COMPILE_TEST
+ select I2C_SLAVE
help
Say Y here if you want to use the Low Power IIC bus controller
on the Freescale i.MX processors.
@@ -907,7 +911,7 @@ config I2C_MXS
config I2C_NOMADIK
tristate "ST-Ericsson Nomadik/Ux500 I2C Controller"
- depends on ARM_AMBA
+ depends on ARM_AMBA || COMPILE_TEST
help
If you say yes to this option, support will be included for the
I2C interface from ST-Ericsson's Nomadik and Ux500 architectures,
@@ -1060,6 +1064,16 @@ config I2C_RK3X
This driver can also be built as a module. If so, the module will
be called i2c-rk3x.
+config I2C_RTL9300
+ tristate "Realtek RTL9300 I2C controller"
+ depends on MACH_REALTEK_RTL || COMPILE_TEST
+ help
+ Say Y here to include support for the I2C controller in Realtek
+ RTL9300 SoCs.
+
+ This driver can also be built as a module. If so, the module will
+ be called i2c-rtl9300.
+
config I2C_RZV2M
tristate "Renesas RZ/V2M adapter"
depends on ARCH_RENESAS || COMPILE_TEST
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index ecc07c50f2a0..1c2a4510abe4 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -14,14 +14,12 @@ obj-$(CONFIG_I2C_ALI1535) += i2c-ali1535.o
obj-$(CONFIG_I2C_ALI1563) += i2c-ali1563.o
obj-$(CONFIG_I2C_ALI15X3) += i2c-ali15x3.o
obj-$(CONFIG_I2C_AMD756) += i2c-amd756.o
-obj-$(CONFIG_I2C_AMD756_S4882) += i2c-amd756-s4882.o
obj-$(CONFIG_I2C_AMD8111) += i2c-amd8111.o
obj-$(CONFIG_I2C_CHT_WC) += i2c-cht-wc.o
obj-$(CONFIG_I2C_I801) += i2c-i801.o
obj-$(CONFIG_I2C_ISCH) += i2c-isch.o
obj-$(CONFIG_I2C_ISMT) += i2c-ismt.o
obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o
-obj-$(CONFIG_I2C_NFORCE2_S4985) += i2c-nforce2-s4985.o
obj-$(CONFIG_I2C_NVIDIA_GPU) += i2c-nvidia-gpu.o
obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o
obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o
@@ -38,18 +36,18 @@ obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o
# Embedded system I2C/SMBus host controller drivers
obj-$(CONFIG_I2C_ALTERA) += i2c-altera.o
obj-$(CONFIG_I2C_AMD_MP2) += i2c-amd-mp2-pci.o i2c-amd-mp2-plat.o
+obj-$(CONFIG_I2C_AMD_ASF) += i2c-amd-asf-plat.o
obj-$(CONFIG_I2C_ASPEED) += i2c-aspeed.o
obj-$(CONFIG_I2C_AT91) += i2c-at91.o
-i2c-at91-objs := i2c-at91-core.o i2c-at91-master.o
-ifeq ($(CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL),y)
- i2c-at91-objs += i2c-at91-slave.o
-endif
+i2c-at91-y := i2c-at91-core.o i2c-at91-master.o
+i2c-at91-$(CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL) += i2c-at91-slave.o
obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
obj-$(CONFIG_I2C_AXXIA) += i2c-axxia.o
obj-$(CONFIG_I2C_BCM2835) += i2c-bcm2835.o
obj-$(CONFIG_I2C_BCM_IPROC) += i2c-bcm-iproc.o
obj-$(CONFIG_I2C_CADENCE) += i2c-cadence.o
obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o
+obj-$(CONFIG_I2C_CGBC) += i2c-cgbc.o
obj-$(CONFIG_I2C_CPM) += i2c-cpm.o
obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o
obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o
@@ -103,6 +101,7 @@ obj-$(CONFIG_I2C_QCOM_GENI) += i2c-qcom-geni.o
obj-$(CONFIG_I2C_QUP) += i2c-qup.o
obj-$(CONFIG_I2C_RIIC) += i2c-riic.o
obj-$(CONFIG_I2C_RK3X) += i2c-rk3x.o
+obj-$(CONFIG_I2C_RTL9300) += i2c-rtl9300.o
obj-$(CONFIG_I2C_RZV2M) += i2c-rzv2m.o
obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
@@ -111,8 +110,8 @@ obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
obj-$(CONFIG_I2C_SPRD) += i2c-sprd.o
obj-$(CONFIG_I2C_ST) += i2c-st.o
obj-$(CONFIG_I2C_STM32F4) += i2c-stm32f4.o
-i2c-stm32f7-drv-objs := i2c-stm32f7.o i2c-stm32.o
obj-$(CONFIG_I2C_STM32F7) += i2c-stm32f7-drv.o
+i2c-stm32f7-drv-y := i2c-stm32f7.o i2c-stm32.o
obj-$(CONFIG_I2C_SUN6I_P2WI) += i2c-sun6i-p2wi.o
obj-$(CONFIG_I2C_SYNQUACER) += i2c-synquacer.o
obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o
@@ -121,10 +120,10 @@ obj-$(CONFIG_I2C_UNIPHIER) += i2c-uniphier.o
obj-$(CONFIG_I2C_UNIPHIER_F) += i2c-uniphier-f.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
obj-$(CONFIG_I2C_WMT) += i2c-viai2c-wmt.o i2c-viai2c-common.o
-i2c-octeon-objs := i2c-octeon-core.o i2c-octeon-platdrv.o
obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
-i2c-thunderx-objs := i2c-octeon-core.o i2c-thunderx-pcidrv.o
+i2c-octeon-y := i2c-octeon-core.o i2c-octeon-platdrv.o
obj-$(CONFIG_I2C_THUNDERX) += i2c-thunderx.o
+i2c-thunderx-y := i2c-octeon-core.o i2c-thunderx-pcidrv.o
obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
obj-$(CONFIG_I2C_XLP9XX) += i2c-xlp9xx.o
obj-$(CONFIG_I2C_RCAR) += i2c-rcar.o
diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
index f4dde08a3b92..2da73173ce24 100644
--- a/drivers/i2c/busses/i2c-altera.c
+++ b/drivers/i2c/busses/i2c-altera.c
@@ -482,7 +482,7 @@ MODULE_DEVICE_TABLE(of, altr_i2c_of_match);
static struct platform_driver altr_i2c_driver = {
.probe = altr_i2c_probe,
- .remove_new = altr_i2c_remove,
+ .remove = altr_i2c_remove,
.driver = {
.name = "altera-i2c",
.of_match_table = altr_i2c_of_match,
diff --git a/drivers/i2c/busses/i2c-amd-asf-plat.c b/drivers/i2c/busses/i2c-amd-asf-plat.c
new file mode 100644
index 000000000000..7512614bf4b7
--- /dev/null
+++ b/drivers/i2c/busses/i2c-amd-asf-plat.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Alert Standard Format Platform Driver
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Sanket Goswami <Sanket.Goswami@amd.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/devm-helpers.h>
+#include <linux/errno.h>
+#include <linux/gfp_types.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/sprintf.h>
+
+#include "i2c-piix4.h"
+
+/* ASF register bits */
+#define ASF_SLV_LISTN 0
+#define ASF_SLV_INTR 1
+#define ASF_SLV_RST 4
+#define ASF_PEC_SP 5
+#define ASF_DATA_EN 7
+#define ASF_MSTR_EN 16
+#define ASF_CLK_EN 17
+
+/* ASF address offsets */
+#define ASFINDEX (0x07 + piix4_smba)
+#define ASFLISADDR (0x09 + piix4_smba)
+#define ASFSTA (0x0A + piix4_smba)
+#define ASFSLVSTA (0x0D + piix4_smba)
+#define ASFDATARWPTR (0x11 + piix4_smba)
+#define ASFSETDATARDPTR (0x12 + piix4_smba)
+#define ASFDATABNKSEL (0x13 + piix4_smba)
+#define ASFSLVEN (0x15 + piix4_smba)
+
+#define ASF_BLOCK_MAX_BYTES 72
+#define ASF_ERROR_STATUS GENMASK(3, 1)
+
+struct amd_asf_dev {
+ struct i2c_adapter adap;
+ void __iomem *eoi_base;
+ struct i2c_client *target;
+ struct delayed_work work_buf;
+ struct sb800_mmio_cfg mmio_cfg;
+ struct resource *port_addr;
+};
+
+static void amd_asf_process_target(struct work_struct *work)
+{
+ struct amd_asf_dev *dev = container_of(work, struct amd_asf_dev, work_buf.work);
+ unsigned short piix4_smba = dev->port_addr->start;
+ u8 data[ASF_BLOCK_MAX_BYTES];
+ u8 bank, reg, cmd;
+ u8 len = 0, idx, val;
+
+ /* Read target status register */
+ reg = inb_p(ASFSLVSTA);
+
+ /* Check if no error bits are set in target status register */
+ if (reg & ASF_ERROR_STATUS) {
+ /* Set bank as full */
+ cmd = 0;
+ reg |= GENMASK(3, 2);
+ outb_p(reg, ASFDATABNKSEL);
+ } else {
+ /* Read data bank */
+ reg = inb_p(ASFDATABNKSEL);
+ bank = (reg & BIT(3)) ? 1 : 0;
+
+ /* Set read data bank */
+ if (bank) {
+ reg |= BIT(4);
+ reg &= ~BIT(3);
+ } else {
+ reg &= ~BIT(4);
+ reg &= ~BIT(2);
+ }
+
+ /* Read command register */
+ outb_p(reg, ASFDATABNKSEL);
+ cmd = inb_p(ASFINDEX);
+ len = inb_p(ASFDATARWPTR);
+ for (idx = 0; idx < len; idx++)
+ data[idx] = inb_p(ASFINDEX);
+
+ /* Clear data bank status */
+ if (bank) {
+ reg |= BIT(3);
+ outb_p(reg, ASFDATABNKSEL);
+ } else {
+ reg |= BIT(2);
+ outb_p(reg, ASFDATABNKSEL);
+ }
+ }
+
+ outb_p(0, ASFSETDATARDPTR);
+ if (cmd & BIT(0))
+ return;
+
+ /*
+ * Although i2c_slave_event() returns an appropriate error code, we
+ * don't check it here because we're operating in the workqueue context.
+ */
+ i2c_slave_event(dev->target, I2C_SLAVE_WRITE_REQUESTED, &val);
+ for (idx = 0; idx < len; idx++) {
+ val = data[idx];
+ i2c_slave_event(dev->target, I2C_SLAVE_WRITE_RECEIVED, &val);
+ }
+ i2c_slave_event(dev->target, I2C_SLAVE_STOP, &val);
+}
+
+static void amd_asf_update_ioport_target(unsigned short piix4_smba, u8 bit,
+ unsigned long offset, bool set)
+{
+ unsigned long reg;
+
+ reg = inb_p(offset);
+ __assign_bit(bit, &reg, set);
+ outb_p(reg, offset);
+}
+
+static void amd_asf_update_mmio_target(struct amd_asf_dev *dev, u8 bit, bool set)
+{
+ unsigned long reg;
+
+ reg = ioread32(dev->mmio_cfg.addr);
+ __assign_bit(bit, &reg, set);
+ iowrite32(reg, dev->mmio_cfg.addr);
+}
+
+static void amd_asf_setup_target(struct amd_asf_dev *dev)
+{
+ unsigned short piix4_smba = dev->port_addr->start;
+
+ /* Reset both host and target before setting up */
+ outb_p(0, SMBHSTSTS);
+ outb_p(0, ASFSLVSTA);
+ outb_p(0, ASFSTA);
+
+ /* Update target address */
+ amd_asf_update_ioport_target(piix4_smba, ASF_SLV_LISTN, ASFLISADDR, true);
+ /* Enable target and set the clock */
+ amd_asf_update_mmio_target(dev, ASF_MSTR_EN, false);
+ amd_asf_update_mmio_target(dev, ASF_CLK_EN, true);
+ /* Enable target interrupt */
+ amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, ASFSLVEN, true);
+ amd_asf_update_ioport_target(piix4_smba, ASF_SLV_RST, ASFSLVEN, false);
+ /* Enable PEC and PEC append */
+ amd_asf_update_ioport_target(piix4_smba, ASF_DATA_EN, SMBHSTCNT, true);
+ amd_asf_update_ioport_target(piix4_smba, ASF_PEC_SP, SMBHSTCNT, true);
+}
+
+static int amd_asf_access(struct i2c_adapter *adap, u16 addr, u8 command, u8 *data)
+{
+ struct amd_asf_dev *dev = i2c_get_adapdata(adap);
+ unsigned short piix4_smba = dev->port_addr->start;
+ u8 i, len;
+
+ outb_p((addr << 1), SMBHSTADD);
+ outb_p(command, SMBHSTCMD);
+ len = data[0];
+ if (len == 0 || len > ASF_BLOCK_MAX_BYTES)
+ return -EINVAL;
+
+ outb_p(len, SMBHSTDAT0);
+ /* Reset SMBBLKDAT */
+ inb_p(SMBHSTCNT);
+ for (i = 1; i <= len; i++)
+ outb_p(data[i], SMBBLKDAT);
+
+ outb_p(PIIX4_BLOCK_DATA, SMBHSTCNT);
+ /* Enable PEC and PEC append */
+ amd_asf_update_ioport_target(piix4_smba, ASF_DATA_EN, SMBHSTCNT, true);
+ amd_asf_update_ioport_target(piix4_smba, ASF_PEC_SP, SMBHSTCNT, true);
+
+ return piix4_transaction(adap, piix4_smba);
+}
+
+static int amd_asf_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct amd_asf_dev *dev = i2c_get_adapdata(adap);
+ unsigned short piix4_smba = dev->port_addr->start;
+ u8 asf_data[ASF_BLOCK_MAX_BYTES];
+ struct i2c_msg *dev_msgs = msgs;
+ u8 prev_port;
+ int ret;
+
+ if (msgs->flags & I2C_M_RD) {
+ dev_err(&adap->dev, "ASF: Read not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Exclude the receive header and PEC */
+ if (msgs->len > ASF_BLOCK_MAX_BYTES - 3) {
+ dev_warn(&adap->dev, "ASF: max message length exceeded\n");
+ return -EOPNOTSUPP;
+ }
+
+ asf_data[0] = dev_msgs->len;
+ memcpy(asf_data + 1, dev_msgs[0].buf, dev_msgs->len);
+
+ ret = piix4_sb800_region_request(&adap->dev, &dev->mmio_cfg);
+ if (ret)
+ return ret;
+
+ amd_asf_update_ioport_target(piix4_smba, ASF_SLV_RST, ASFSLVEN, true);
+ amd_asf_update_ioport_target(piix4_smba, ASF_SLV_LISTN, ASFLISADDR, false);
+ /* Clear ASF target status */
+ outb_p(0, ASFSLVSTA);
+
+ /* Enable ASF SMBus controller function */
+ amd_asf_update_mmio_target(dev, ASF_MSTR_EN, true);
+ prev_port = piix4_sb800_port_sel(0, &dev->mmio_cfg);
+ ret = amd_asf_access(adap, msgs->addr, msgs[0].buf[0], asf_data);
+ piix4_sb800_port_sel(prev_port, &dev->mmio_cfg);
+ amd_asf_setup_target(dev);
+ piix4_sb800_region_release(&adap->dev, &dev->mmio_cfg);
+ return ret;
+}
+
+static int amd_asf_reg_target(struct i2c_client *target)
+{
+ struct amd_asf_dev *dev = i2c_get_adapdata(target->adapter);
+ unsigned short piix4_smba = dev->port_addr->start;
+ int ret;
+ u8 reg;
+
+ if (dev->target)
+ return -EBUSY;
+
+ ret = piix4_sb800_region_request(&target->dev, &dev->mmio_cfg);
+ if (ret)
+ return ret;
+
+ reg = (target->addr << 1) | I2C_M_RD;
+ outb_p(reg, ASFLISADDR);
+
+ amd_asf_setup_target(dev);
+ dev->target = target;
+ amd_asf_update_ioport_target(piix4_smba, ASF_DATA_EN, ASFDATABNKSEL, false);
+ piix4_sb800_region_release(&target->dev, &dev->mmio_cfg);
+
+ return 0;
+}
+
+static int amd_asf_unreg_target(struct i2c_client *target)
+{
+ struct amd_asf_dev *dev = i2c_get_adapdata(target->adapter);
+ unsigned short piix4_smba = dev->port_addr->start;
+
+ amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, ASFSLVEN, false);
+ amd_asf_update_ioport_target(piix4_smba, ASF_SLV_RST, ASFSLVEN, true);
+ dev->target = NULL;
+
+ return 0;
+}
+
+static u32 amd_asf_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_PEC | I2C_FUNC_SLAVE;
+}
+
+static const struct i2c_algorithm amd_asf_smbus_algorithm = {
+ .master_xfer = amd_asf_xfer,
+ .reg_slave = amd_asf_reg_target,
+ .unreg_slave = amd_asf_unreg_target,
+ .functionality = amd_asf_func,
+};
+
+static irqreturn_t amd_asf_irq_handler(int irq, void *ptr)
+{
+ struct amd_asf_dev *dev = ptr;
+ unsigned short piix4_smba = dev->port_addr->start;
+ u8 target_int = inb_p(ASFSTA);
+
+ if (target_int & BIT(6)) {
+ /* Target Interrupt */
+ outb_p(target_int | BIT(6), ASFSTA);
+ schedule_delayed_work(&dev->work_buf, HZ);
+ } else {
+ /* Controller Interrupt */
+ amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, SMBHSTSTS, true);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int amd_asf_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct amd_asf_dev *asf_dev;
+ struct resource *eoi_addr;
+ int ret, irq;
+
+ asf_dev = devm_kzalloc(dev, sizeof(*asf_dev), GFP_KERNEL);
+ if (!asf_dev)
+ return dev_err_probe(dev, -ENOMEM, "Failed to allocate memory\n");
+
+ asf_dev->mmio_cfg.use_mmio = true;
+ asf_dev->port_addr = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!asf_dev->port_addr)
+ return dev_err_probe(dev, -EINVAL, "missing IO resources\n");
+
+ /*
+ * The resource obtained via ACPI might not belong to the ASF device address space. Instead,
+ * it could be within other IP blocks of the ASIC, which are crucial for generating
+ * subsequent interrupts. Therefore, we avoid using devm_platform_ioremap_resource() and
+ * use platform_get_resource() and devm_ioremap() separately to prevent any address space
+ * conflicts.
+ */
+ eoi_addr = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!eoi_addr)
+ return dev_err_probe(dev, -EINVAL, "missing MEM resources\n");
+
+ asf_dev->eoi_base = devm_ioremap(dev, eoi_addr->start, resource_size(eoi_addr));
+ if (!asf_dev->eoi_base)
+ return dev_err_probe(dev, -EBUSY, "failed mapping IO region\n");
+
+ ret = devm_delayed_work_autocancel(dev, &asf_dev->work_buf, amd_asf_process_target);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to create work queue\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "missing IRQ resources\n");
+
+ ret = devm_request_irq(dev, irq, amd_asf_irq_handler, IRQF_SHARED, "amd_asf", asf_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Unable to request irq: %d for use\n", irq);
+
+ asf_dev->adap.owner = THIS_MODULE;
+ asf_dev->adap.algo = &amd_asf_smbus_algorithm;
+ asf_dev->adap.dev.parent = dev;
+
+ i2c_set_adapdata(&asf_dev->adap, asf_dev);
+ snprintf(asf_dev->adap.name, sizeof(asf_dev->adap.name), "AMD ASF adapter");
+
+ return devm_i2c_add_adapter(dev, &asf_dev->adap);
+}
+
+static const struct acpi_device_id amd_asf_acpi_ids[] = {
+ { "AMDI001A" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, amd_asf_acpi_ids);
+
+static struct platform_driver amd_asf_driver = {
+ .driver = {
+ .name = "i2c-amd-asf",
+ .acpi_match_table = amd_asf_acpi_ids,
+ },
+ .probe = amd_asf_probe,
+};
+module_platform_driver(amd_asf_driver);
+
+MODULE_IMPORT_NS("PIIX4_SMBUS");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AMD Alert Standard Format Driver");
diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c
index 6f0ef587e76d..d9dd0e475d1a 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-plat.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c
@@ -346,7 +346,7 @@ MODULE_DEVICE_TABLE(acpi, i2c_amd_acpi_match);
static struct platform_driver i2c_amd_plat_driver = {
.probe = i2c_amd_probe,
- .remove_new = i2c_amd_remove,
+ .remove = i2c_amd_remove,
.driver = {
.name = "i2c_amd_mp2",
.acpi_match_table = ACPI_PTR(i2c_amd_acpi_match),
diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
deleted file mode 100644
index 063274388a75..000000000000
--- a/drivers/i2c/busses/i2c-amd756-s4882.c
+++ /dev/null
@@ -1,245 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * i2c-amd756-s4882.c - i2c-amd756 extras for the Tyan S4882 motherboard
- *
- * Copyright (C) 2004, 2008 Jean Delvare <jdelvare@suse.de>
- */
-
-/*
- * We select the channels by sending commands to the Philips
- * PCA9556 chip at I2C address 0x18. The main adapter is used for
- * the non-multiplexed part of the bus, and 4 virtual adapters
- * are defined for the multiplexed addresses: 0x50-0x53 (memory
- * module EEPROM) located on channels 1-4, and 0x4c (LM63)
- * located on multiplexed channels 0 and 5-7. We define one
- * virtual adapter per CPU, which corresponds to two multiplexed
- * channels:
- * CPU0: virtual adapter 1, channels 1 and 0
- * CPU1: virtual adapter 2, channels 2 and 5
- * CPU2: virtual adapter 3, channels 3 and 6
- * CPU3: virtual adapter 4, channels 4 and 7
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/mutex.h>
-
-extern struct i2c_adapter amd756_smbus;
-
-static struct i2c_adapter *s4882_adapter;
-static struct i2c_algorithm *s4882_algo;
-
-/* Wrapper access functions for multiplexed SMBus */
-static DEFINE_MUTEX(amd756_lock);
-
-static s32 amd756_access_virt0(struct i2c_adapter * adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size,
- union i2c_smbus_data * data)
-{
- int error;
-
- /* We exclude the multiplexed addresses */
- if (addr == 0x4c || (addr & 0xfc) == 0x50 || (addr & 0xfc) == 0x30
- || addr == 0x18)
- return -ENXIO;
-
- mutex_lock(&amd756_lock);
-
- error = amd756_smbus.algo->smbus_xfer(adap, addr, flags, read_write,
- command, size, data);
-
- mutex_unlock(&amd756_lock);
-
- return error;
-}
-
-/* We remember the last used channels combination so as to only switch
- channels when it is really needed. This greatly reduces the SMBus
- overhead, but also assumes that nobody will be writing to the PCA9556
- in our back. */
-static u8 last_channels;
-
-static inline s32 amd756_access_channel(struct i2c_adapter * adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size,
- union i2c_smbus_data * data,
- u8 channels)
-{
- int error;
-
- /* We exclude the non-multiplexed addresses */
- if (addr != 0x4c && (addr & 0xfc) != 0x50 && (addr & 0xfc) != 0x30)
- return -ENXIO;
-
- mutex_lock(&amd756_lock);
-
- if (last_channels != channels) {
- union i2c_smbus_data mplxdata;
- mplxdata.byte = channels;
-
- error = amd756_smbus.algo->smbus_xfer(adap, 0x18, 0,
- I2C_SMBUS_WRITE, 0x01,
- I2C_SMBUS_BYTE_DATA,
- &mplxdata);
- if (error)
- goto UNLOCK;
- last_channels = channels;
- }
- error = amd756_smbus.algo->smbus_xfer(adap, addr, flags, read_write,
- command, size, data);
-
-UNLOCK:
- mutex_unlock(&amd756_lock);
- return error;
-}
-
-static s32 amd756_access_virt1(struct i2c_adapter * adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size,
- union i2c_smbus_data * data)
-{
- /* CPU0: channels 1 and 0 enabled */
- return amd756_access_channel(adap, addr, flags, read_write, command,
- size, data, 0x03);
-}
-
-static s32 amd756_access_virt2(struct i2c_adapter * adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size,
- union i2c_smbus_data * data)
-{
- /* CPU1: channels 2 and 5 enabled */
- return amd756_access_channel(adap, addr, flags, read_write, command,
- size, data, 0x24);
-}
-
-static s32 amd756_access_virt3(struct i2c_adapter * adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size,
- union i2c_smbus_data * data)
-{
- /* CPU2: channels 3 and 6 enabled */
- return amd756_access_channel(adap, addr, flags, read_write, command,
- size, data, 0x48);
-}
-
-static s32 amd756_access_virt4(struct i2c_adapter * adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size,
- union i2c_smbus_data * data)
-{
- /* CPU3: channels 4 and 7 enabled */
- return amd756_access_channel(adap, addr, flags, read_write, command,
- size, data, 0x90);
-}
-
-static int __init amd756_s4882_init(void)
-{
- int i, error;
- union i2c_smbus_data ioconfig;
-
- if (!amd756_smbus.dev.parent)
- return -ENODEV;
-
- /* Configure the PCA9556 multiplexer */
- ioconfig.byte = 0x00; /* All I/O to output mode */
- error = i2c_smbus_xfer(&amd756_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03,
- I2C_SMBUS_BYTE_DATA, &ioconfig);
- if (error) {
- dev_err(&amd756_smbus.dev, "PCA9556 configuration failed\n");
- error = -EIO;
- goto ERROR0;
- }
-
- /* Unregister physical bus */
- i2c_del_adapter(&amd756_smbus);
-
- printk(KERN_INFO "Enabling SMBus multiplexing for Tyan S4882\n");
- /* Define the 5 virtual adapters and algorithms structures */
- if (!(s4882_adapter = kcalloc(5, sizeof(struct i2c_adapter),
- GFP_KERNEL))) {
- error = -ENOMEM;
- goto ERROR1;
- }
- if (!(s4882_algo = kcalloc(5, sizeof(struct i2c_algorithm),
- GFP_KERNEL))) {
- error = -ENOMEM;
- goto ERROR2;
- }
-
- /* Fill in the new structures */
- s4882_algo[0] = *(amd756_smbus.algo);
- s4882_algo[0].smbus_xfer = amd756_access_virt0;
- s4882_adapter[0] = amd756_smbus;
- s4882_adapter[0].algo = s4882_algo;
- s4882_adapter[0].dev.parent = amd756_smbus.dev.parent;
- for (i = 1; i < 5; i++) {
- s4882_algo[i] = *(amd756_smbus.algo);
- s4882_adapter[i] = amd756_smbus;
- snprintf(s4882_adapter[i].name, sizeof(s4882_adapter[i].name),
- "SMBus 8111 adapter (CPU%d)", i-1);
- s4882_adapter[i].algo = s4882_algo+i;
- s4882_adapter[i].dev.parent = amd756_smbus.dev.parent;
- }
- s4882_algo[1].smbus_xfer = amd756_access_virt1;
- s4882_algo[2].smbus_xfer = amd756_access_virt2;
- s4882_algo[3].smbus_xfer = amd756_access_virt3;
- s4882_algo[4].smbus_xfer = amd756_access_virt4;
-
- /* Register virtual adapters */
- for (i = 0; i < 5; i++) {
- error = i2c_add_adapter(s4882_adapter+i);
- if (error) {
- printk(KERN_ERR "i2c-amd756-s4882: "
- "Virtual adapter %d registration "
- "failed, module not inserted\n", i);
- for (i--; i >= 0; i--)
- i2c_del_adapter(s4882_adapter+i);
- goto ERROR3;
- }
- }
-
- return 0;
-
-ERROR3:
- kfree(s4882_algo);
- s4882_algo = NULL;
-ERROR2:
- kfree(s4882_adapter);
- s4882_adapter = NULL;
-ERROR1:
- /* Restore physical bus */
- i2c_add_adapter(&amd756_smbus);
-ERROR0:
- return error;
-}
-
-static void __exit amd756_s4882_exit(void)
-{
- if (s4882_adapter) {
- int i;
-
- for (i = 0; i < 5; i++)
- i2c_del_adapter(s4882_adapter+i);
- kfree(s4882_adapter);
- s4882_adapter = NULL;
- }
- kfree(s4882_algo);
- s4882_algo = NULL;
-
- /* Restore physical bus */
- if (i2c_add_adapter(&amd756_smbus))
- printk(KERN_ERR "i2c-amd756-s4882: "
- "Physical bus restoration failed\n");
-}
-
-MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
-MODULE_DESCRIPTION("S4882 SMBus multiplexing");
-MODULE_LICENSE("GPL");
-
-module_init(amd756_s4882_init);
-module_exit(amd756_s4882_exit);
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 208310db906d..3621c02f1cba 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -211,7 +211,7 @@ static s32 amd756_access(struct i2c_adapter * adap, u16 addr,
SMB_HOST_ADDRESS);
outb_p(command, SMB_HOST_COMMAND);
if (read_write == I2C_SMBUS_WRITE)
- outw_p(data->word, SMB_HOST_DATA); /* TODO: endian???? */
+ outw_p(data->word, SMB_HOST_DATA);
size = AMD756_WORD_DATA;
break;
case I2C_SMBUS_BLOCK_DATA:
@@ -256,7 +256,7 @@ static s32 amd756_access(struct i2c_adapter * adap, u16 addr,
data->byte = inw_p(SMB_HOST_DATA);
break;
case AMD756_WORD_DATA:
- data->word = inw_p(SMB_HOST_DATA); /* TODO: endian???? */
+ data->word = inw_p(SMB_HOST_DATA);
break;
case AMD756_BLOCK_DATA:
data->block[0] = inw_p(SMB_HOST_DATA) & 0x3f;
@@ -283,7 +283,7 @@ static const struct i2c_algorithm smbus_algorithm = {
.functionality = amd756_func,
};
-struct i2c_adapter amd756_smbus = {
+static struct i2c_adapter amd756_smbus = {
.owner = THIS_MODULE,
.class = I2C_CLASS_HWMON,
.algo = &smbus_algorithm,
@@ -398,5 +398,3 @@ module_pci_driver(amd756_driver);
MODULE_AUTHOR("Merlin Hughes <merlin@merlin.org>");
MODULE_DESCRIPTION("AMD756/766/768/8111 and nVidia nForce SMBus driver");
MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(amd756_smbus);
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index cc5a26637fd5..1550d3d552ae 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -1102,7 +1102,7 @@ static void aspeed_i2c_remove_bus(struct platform_device *pdev)
static struct platform_driver aspeed_i2c_bus_driver = {
.probe = aspeed_i2c_probe_bus,
- .remove_new = aspeed_i2c_remove_bus,
+ .remove = aspeed_i2c_remove_bus,
.driver = {
.name = "aspeed-i2c-bus",
.of_match_table = aspeed_i2c_bus_of_table,
diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c
index dc52b3530725..edc047e3e535 100644
--- a/drivers/i2c/busses/i2c-at91-core.c
+++ b/drivers/i2c/busses/i2c-at91-core.c
@@ -330,7 +330,7 @@ static const struct dev_pm_ops __maybe_unused at91_twi_pm = {
static struct platform_driver at91_twi_driver = {
.probe = at91_twi_probe,
- .remove_new = at91_twi_remove,
+ .remove = at91_twi_remove,
.id_table = at91_twi_devtypes,
.driver = {
.name = "at91_i2c",
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 902e420e761e..b78b38ddac46 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -368,7 +368,7 @@ static struct platform_driver au1xpsc_smbus_driver = {
.pm = pm_sleep_ptr(&i2c_au1550_pmops),
},
.probe = i2c_au1550_probe,
- .remove_new = i2c_au1550_remove,
+ .remove = i2c_au1550_remove,
};
module_platform_driver(au1xpsc_smbus_driver);
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index a66f7f67b3b8..48916cf45ff7 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -824,7 +824,7 @@ MODULE_DEVICE_TABLE(of, axxia_i2c_of_match);
static struct platform_driver axxia_i2c_driver = {
.probe = axxia_i2c_probe,
- .remove_new = axxia_i2c_remove,
+ .remove = axxia_i2c_remove,
.driver = {
.name = "axxia-i2c",
.of_match_table = axxia_i2c_of_match,
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 133d02899c6b..15b632a146e1 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -1264,7 +1264,7 @@ static struct platform_driver bcm_iproc_i2c_driver = {
.pm = pm_sleep_ptr(&bcm_iproc_i2c_pm_ops),
},
.probe = bcm_iproc_i2c_probe,
- .remove_new = bcm_iproc_i2c_remove,
+ .remove = bcm_iproc_i2c_remove,
};
module_platform_driver(bcm_iproc_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index eb5c46a8f824..340fe1305dd9 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -877,7 +877,7 @@ static struct platform_driver bcm_kona_i2c_driver = {
.of_match_table = bcm_kona_i2c_of_match,
},
.probe = bcm_kona_i2c_probe,
- .remove_new = bcm_kona_i2c_remove,
+ .remove = bcm_kona_i2c_remove,
};
module_platform_driver(bcm_kona_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index ae42e37052a8..8554e790f8e3 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -520,7 +520,7 @@ MODULE_DEVICE_TABLE(of, bcm2835_i2c_of_match);
static struct platform_driver bcm2835_i2c_driver = {
.probe = bcm2835_i2c_probe,
- .remove_new = bcm2835_i2c_remove,
+ .remove = bcm2835_i2c_remove,
.driver = {
.name = "i2c-bcm2835",
.of_match_table = bcm2835_i2c_of_match,
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 83b85011e377..00f1a046e985 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -744,7 +744,7 @@ static struct platform_driver brcmstb_i2c_driver = {
.pm = pm_sleep_ptr(&brcmstb_i2c_pm),
},
.probe = brcmstb_i2c_probe,
- .remove_new = brcmstb_i2c_remove,
+ .remove = brcmstb_i2c_remove,
};
module_platform_driver(brcmstb_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 87b9ba95b2e1..b64026fbca66 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -129,6 +129,7 @@
#define CDNS_I2C_BROKEN_HOLD_BIT BIT(0)
#define CDNS_I2C_POLL_US 100000
+#define CDNS_I2C_POLL_US_ATOMIC 10
#define CDNS_I2C_TIMEOUT_US 500000
#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset)
@@ -189,6 +190,8 @@ enum cdns_i2c_slave_state {
* @slave_state: I2C Slave state(idle/read/write).
* @fifo_depth: The depth of the transfer FIFO
* @transfer_size: The maximum number of bytes in one transfer
+ * @atomic: Mode of transfer
+ * @err_status_atomic: Error status in atomic mode
*/
struct cdns_i2c {
struct device *dev;
@@ -219,6 +222,8 @@ struct cdns_i2c {
#endif
u32 fifo_depth;
unsigned int transfer_size;
+ bool atomic;
+ int err_status_atomic;
};
struct cdns_platform_data {
@@ -229,6 +234,66 @@ struct cdns_platform_data {
clk_rate_change_nb)
/**
+ * cdns_i2c_init - Controller initialisation
+ * @id: Device private data structure
+ *
+ * Initialise the i2c controller.
+ *
+ */
+static void cdns_i2c_init(struct cdns_i2c *id)
+{
+ cdns_i2c_writereg(id->ctrl_reg, CDNS_I2C_CR_OFFSET);
+ /*
+ * Cadence I2C controller has a bug wherein it generates
+ * invalid read transaction after HW timeout in master receiver mode.
+ * HW timeout is not used by this driver and the interrupt is disabled.
+ * But the feature itself cannot be disabled. Hence maximum value
+ * is written to this register to reduce the chances of error.
+ */
+ cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
+}
+
+/**
+ * cdns_i2c_runtime_suspend - Runtime suspend method for the driver
+ * @dev: Address of the platform_device structure
+ *
+ * Put the driver into low power mode.
+ *
+ * Return: 0 always
+ */
+static int cdns_i2c_runtime_suspend(struct device *dev)
+{
+ struct cdns_i2c *xi2c = dev_get_drvdata(dev);
+
+ clk_disable(xi2c->clk);
+
+ return 0;
+}
+
+/**
+ * cdns_i2c_runtime_resume - Runtime resume
+ * @dev: Address of the platform_device structure
+ *
+ * Runtime resume callback.
+ *
+ * Return: 0 on success and error value on error
+ */
+static int cdns_i2c_runtime_resume(struct device *dev)
+{
+ struct cdns_i2c *xi2c = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(xi2c->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable clock.\n");
+ return ret;
+ }
+ cdns_i2c_init(xi2c);
+
+ return 0;
+}
+
+/**
* cdns_i2c_clear_bus_hold - Clear bus hold bit
* @id: Pointer to driver data struct
*
@@ -561,6 +626,89 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
return cdns_i2c_master_isr(ptr);
}
+static bool cdns_i2c_error_check(struct cdns_i2c *id)
+{
+ unsigned int isr_status;
+
+ id->err_status = 0;
+
+ isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
+ cdns_i2c_writereg(isr_status & CDNS_I2C_IXR_ERR_INTR_MASK, CDNS_I2C_ISR_OFFSET);
+
+ id->err_status = isr_status & CDNS_I2C_IXR_ERR_INTR_MASK;
+
+ return !!id->err_status;
+}
+
+static void cdns_i2c_mrecv_atomic(struct cdns_i2c *id)
+{
+ while (id->recv_count > 0) {
+ bool updatetx;
+
+ /*
+ * Check if transfer size register needs to be updated again for a
+ * large data receive operation.
+ */
+ updatetx = id->recv_count > id->curr_recv_count;
+
+ while (id->curr_recv_count > 0) {
+ if (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_RXDV) {
+ *id->p_recv_buf = cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
+ id->p_recv_buf++;
+ id->recv_count--;
+ id->curr_recv_count--;
+
+ /*
+ * Clear the hold bit that was set for FIFO control,
+ * if the remaining RX data is less than or equal to
+ * the FIFO depth, unless a repeated start is selected.
+ */
+ if (id->recv_count <= id->fifo_depth && !id->bus_hold_flag)
+ cdns_i2c_clear_bus_hold(id);
+ }
+ if (cdns_i2c_error_check(id))
+ return;
+ if (cdns_is_holdquirk(id, updatetx))
+ break;
+ }
+
+ /*
+ * The controller sends NACK to the slave/target when transfer size
+ * register reaches zero without considering the HOLD bit.
+ * This workaround is implemented for large data transfers to
+ * maintain transfer size non-zero while performing a large
+ * receive operation.
+ */
+ if (cdns_is_holdquirk(id, updatetx)) {
+ /* wait while fifo is full */
+ while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
+ (id->curr_recv_count - id->fifo_depth))
+ ;
+
+ /*
+ * Check number of bytes to be received against maximum
+ * transfer size and update register accordingly.
+ */
+ if ((id->recv_count - id->fifo_depth) >
+ id->transfer_size) {
+ cdns_i2c_writereg(id->transfer_size,
+ CDNS_I2C_XFER_SIZE_OFFSET);
+ id->curr_recv_count = id->transfer_size +
+ id->fifo_depth;
+ } else {
+ cdns_i2c_writereg(id->recv_count -
+ id->fifo_depth,
+ CDNS_I2C_XFER_SIZE_OFFSET);
+ id->curr_recv_count = id->recv_count;
+ }
+ }
+ }
+
+ /* Clear hold (if not repeated start) */
+ if (!id->recv_count && !id->bus_hold_flag)
+ cdns_i2c_clear_bus_hold(id);
+}
+
/**
* cdns_i2c_mrecv - Prepare and start a master receive operation
* @id: pointer to the i2c device structure
@@ -655,7 +803,34 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
cdns_i2c_writereg(addr, CDNS_I2C_ADDR_OFFSET);
}
- cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET);
+ if (!id->atomic)
+ cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET);
+ else
+ cdns_i2c_mrecv_atomic(id);
+}
+
+static void cdns_i2c_msend_rem_atomic(struct cdns_i2c *id)
+{
+ while (id->send_count) {
+ unsigned int avail_bytes;
+ unsigned int bytes_to_send;
+
+ avail_bytes = id->fifo_depth - cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
+ if (id->send_count > avail_bytes)
+ bytes_to_send = avail_bytes;
+ else
+ bytes_to_send = id->send_count;
+
+ while (bytes_to_send--) {
+ cdns_i2c_writereg((*id->p_send_buf++), CDNS_I2C_DATA_OFFSET);
+ id->send_count--;
+ }
+ if (cdns_i2c_error_check(id))
+ return;
+ }
+
+ if (!id->send_count && !id->bus_hold_flag)
+ cdns_i2c_clear_bus_hold(id);
}
/**
@@ -718,7 +893,10 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
CDNS_I2C_ADDR_OFFSET);
- cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET);
+ if (!id->atomic)
+ cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET);
+ else if (id->send_count > 0)
+ cdns_i2c_msend_rem_atomic(id);
}
/**
@@ -758,7 +936,8 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
id->p_msg = msg;
id->err_status = 0;
- reinit_completion(&id->xfer_done);
+ if (!id->atomic)
+ reinit_completion(&id->xfer_done);
/* Check for the TEN Bit mode on each msg */
reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
@@ -780,14 +959,31 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
/* Minimal time to execute this message */
msg_timeout = msecs_to_jiffies((1000 * msg->len * BITS_PER_BYTE) / id->i2c_clk);
- /* Plus some wiggle room */
- msg_timeout += msecs_to_jiffies(500);
+
+ /*
+ * Plus some wiggle room.
+ * For non-atomic contexts, 500 ms is added to the timeout.
+ * For atomic contexts, 2000 ms is added because transfers happen in polled
+ * mode, requiring more time to account for the polling overhead.
+ */
+ if (!id->atomic)
+ msg_timeout += msecs_to_jiffies(500);
+ else
+ msg_timeout += msecs_to_jiffies(2000);
if (msg_timeout < adap->timeout)
msg_timeout = adap->timeout;
- /* Wait for the signal of completion */
- time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout);
+ if (!id->atomic) {
+ /* Wait for the signal of completion */
+ time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout);
+ } else {
+ /* 0 is success, -ETIMEDOUT is error */
+ time_left = !readl_poll_timeout_atomic(id->membase + CDNS_I2C_ISR_OFFSET,
+ reg, (reg & CDNS_I2C_IXR_COMP),
+ CDNS_I2C_POLL_US_ATOMIC, msg_timeout);
+ }
+
if (time_left == 0) {
cdns_i2c_master_reset(adap);
return -ETIMEDOUT;
@@ -806,58 +1002,31 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
return 0;
}
-/**
- * cdns_i2c_master_xfer - The main i2c transfer function
- * @adap: pointer to the i2c adapter driver instance
- * @msgs: pointer to the i2c message structure
- * @num: the number of messages to transfer
- *
- * Initiates the send/recv activity based on the transfer message received.
- *
- * Return: number of msgs processed on success, negative error otherwise
- */
-static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num)
+static int cdns_i2c_master_common_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs,
+ int num)
{
int ret, count;
u32 reg;
struct cdns_i2c *id = adap->algo_data;
bool hold_quirk;
-#if IS_ENABLED(CONFIG_I2C_SLAVE)
- bool change_role = false;
-#endif
-
- ret = pm_runtime_resume_and_get(id->dev);
- if (ret < 0)
- return ret;
-
-#if IS_ENABLED(CONFIG_I2C_SLAVE)
- /* Check i2c operating mode and switch if possible */
- if (id->dev_mode == CDNS_I2C_MODE_SLAVE) {
- if (id->slave_state != CDNS_I2C_SLAVE_STATE_IDLE) {
- ret = -EAGAIN;
- goto out;
- }
-
- /* Set mode to master */
- cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id);
-
- /* Mark flag to change role once xfer is completed */
- change_role = true;
- }
-#endif
/* Check if the bus is free */
-
- ret = readl_relaxed_poll_timeout(id->membase + CDNS_I2C_SR_OFFSET,
- reg,
- !(reg & CDNS_I2C_SR_BA),
- CDNS_I2C_POLL_US, CDNS_I2C_TIMEOUT_US);
+ if (!id->atomic)
+ ret = readl_relaxed_poll_timeout(id->membase + CDNS_I2C_SR_OFFSET,
+ reg,
+ !(reg & CDNS_I2C_SR_BA),
+ CDNS_I2C_POLL_US, CDNS_I2C_TIMEOUT_US);
+ else
+ ret = readl_poll_timeout_atomic(id->membase + CDNS_I2C_SR_OFFSET,
+ reg,
+ !(reg & CDNS_I2C_SR_BA),
+ CDNS_I2C_POLL_US_ATOMIC, CDNS_I2C_TIMEOUT_US);
if (ret) {
ret = -EAGAIN;
if (id->adap.bus_recovery_info)
i2c_recover_bus(adap);
- goto out;
+ return ret;
}
hold_quirk = !!(id->quirks & CDNS_I2C_BROKEN_HOLD_BIT);
@@ -877,8 +1046,7 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
if (msgs[count].flags & I2C_M_RD) {
dev_warn(adap->dev.parent,
"Can't do repeated start after a receive message\n");
- ret = -EOPNOTSUPP;
- goto out;
+ return -EOPNOTSUPP;
}
}
id->bus_hold_flag = 1;
@@ -896,26 +1064,65 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
ret = cdns_i2c_process_msg(id, msgs, adap);
if (ret)
- goto out;
+ return ret;
/* Report the other error interrupts to application */
- if (id->err_status) {
+ if (id->err_status || id->err_status_atomic) {
cdns_i2c_master_reset(adap);
- if (id->err_status & CDNS_I2C_IXR_NACK) {
- ret = -ENXIO;
- goto out;
- }
- ret = -EIO;
- goto out;
+ if (id->err_status & CDNS_I2C_IXR_NACK)
+ return -ENXIO;
+
+ return -EIO;
}
}
+ return 0;
+}
+
+/**
+ * cdns_i2c_master_xfer - The main i2c transfer function
+ * @adap: pointer to the i2c adapter driver instance
+ * @msgs: pointer to the i2c message structure
+ * @num: the number of messages to transfer
+ *
+ * Initiates the send/recv activity based on the transfer message received.
+ *
+ * Return: number of msgs processed on success, negative error otherwise
+ */
+static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ int ret;
+ struct cdns_i2c *id = adap->algo_data;
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ bool change_role = false;
+#endif
- ret = num;
+ ret = pm_runtime_resume_and_get(id->dev);
+ if (ret < 0)
+ return ret;
-out:
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ /* Check i2c operating mode and switch if possible */
+ if (id->dev_mode == CDNS_I2C_MODE_SLAVE) {
+ if (id->slave_state != CDNS_I2C_SLAVE_STATE_IDLE) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ /* Set mode to master */
+ cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id);
+
+ /* Mark flag to change role once xfer is completed */
+ change_role = true;
+ }
+#endif
+ ret = cdns_i2c_master_common_xfer(adap, msgs, num);
+ if (!ret)
+ ret = num;
#if IS_ENABLED(CONFIG_I2C_SLAVE)
+out:
/* Switch i2c mode to slave */
if (change_role)
cdns_i2c_set_mode(CDNS_I2C_MODE_SLAVE, id);
@@ -927,6 +1134,41 @@ out:
}
/**
+ * cdns_i2c_master_xfer_atomic - The i2c transfer function in atomic mode
+ * @adap: pointer to the i2c adapter driver instance
+ * @msgs: pointer to the i2c message structure
+ * @num: the number of messages to transfer
+ *
+ * Return: number of msgs processed on success, negative error otherwise
+ */
+static int cdns_i2c_master_xfer_atomic(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ int ret;
+ struct cdns_i2c *id = adap->algo_data;
+
+ ret = cdns_i2c_runtime_resume(id->dev);
+ if (ret)
+ return ret;
+
+ if (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) {
+ dev_warn(id->adap.dev.parent,
+ "Atomic xfer not supported for version 1.0\n");
+ return 0;
+ }
+
+ id->atomic = true;
+ ret = cdns_i2c_master_common_xfer(adap, msgs, num);
+ if (!ret)
+ ret = num;
+
+ id->atomic = false;
+ cdns_i2c_runtime_suspend(id->dev);
+
+ return ret;
+}
+
+/**
* cdns_i2c_func - Returns the supported features of the I2C driver
* @adap: pointer to the i2c adapter structure
*
@@ -990,6 +1232,7 @@ static int cdns_unreg_slave(struct i2c_client *slave)
static const struct i2c_algorithm cdns_i2c_algo = {
.master_xfer = cdns_i2c_master_xfer,
+ .master_xfer_atomic = cdns_i2c_master_xfer_atomic,
.functionality = cdns_i2c_func,
#if IS_ENABLED(CONFIG_I2C_SLAVE)
.reg_slave = cdns_reg_slave,
@@ -1158,23 +1401,6 @@ static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
}
}
-/**
- * cdns_i2c_runtime_suspend - Runtime suspend method for the driver
- * @dev: Address of the platform_device structure
- *
- * Put the driver into low power mode.
- *
- * Return: 0 always
- */
-static int __maybe_unused cdns_i2c_runtime_suspend(struct device *dev)
-{
- struct cdns_i2c *xi2c = dev_get_drvdata(dev);
-
- clk_disable(xi2c->clk);
-
- return 0;
-}
-
static int __maybe_unused cdns_i2c_suspend(struct device *dev)
{
struct cdns_i2c *xi2c = dev_get_drvdata(dev);
@@ -1187,49 +1413,6 @@ static int __maybe_unused cdns_i2c_suspend(struct device *dev)
return 0;
}
-/**
- * cdns_i2c_init - Controller initialisation
- * @id: Device private data structure
- *
- * Initialise the i2c controller.
- *
- */
-static void cdns_i2c_init(struct cdns_i2c *id)
-{
- cdns_i2c_writereg(id->ctrl_reg, CDNS_I2C_CR_OFFSET);
- /*
- * Cadence I2C controller has a bug wherein it generates
- * invalid read transaction after HW timeout in master receiver mode.
- * HW timeout is not used by this driver and the interrupt is disabled.
- * But the feature itself cannot be disabled. Hence maximum value
- * is written to this register to reduce the chances of error.
- */
- cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
-}
-
-/**
- * cdns_i2c_runtime_resume - Runtime resume
- * @dev: Address of the platform_device structure
- *
- * Runtime resume callback.
- *
- * Return: 0 on success and error value on error
- */
-static int __maybe_unused cdns_i2c_runtime_resume(struct device *dev)
-{
- struct cdns_i2c *xi2c = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_enable(xi2c->clk);
- if (ret) {
- dev_err(dev, "Cannot enable clock.\n");
- return ret;
- }
- cdns_i2c_init(xi2c);
-
- return 0;
-}
-
static int __maybe_unused cdns_i2c_resume(struct device *dev)
{
struct cdns_i2c *xi2c = dev_get_drvdata(dev);
@@ -1469,7 +1652,7 @@ static struct platform_driver cdns_i2c_drv = {
.pm = &cdns_i2c_dev_pm_ops,
},
.probe = cdns_i2c_probe,
- .remove_new = cdns_i2c_remove,
+ .remove = cdns_i2c_remove,
};
module_platform_driver(cdns_i2c_drv);
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
index fdc1758a3275..8065c7e4462e 100644
--- a/drivers/i2c/busses/i2c-cbus-gpio.c
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -264,7 +264,7 @@ MODULE_DEVICE_TABLE(of, i2c_cbus_dt_ids);
static struct platform_driver cbus_i2c_driver = {
.probe = cbus_i2c_probe,
- .remove_new = cbus_i2c_remove,
+ .remove = cbus_i2c_remove,
.driver = {
.name = "i2c-cbus-gpio",
.of_match_table = of_match_ptr(i2c_cbus_dt_ids),
diff --git a/drivers/i2c/busses/i2c-cgbc.c b/drivers/i2c/busses/i2c-cgbc.c
new file mode 100644
index 000000000000..f054d167ac47
--- /dev/null
+++ b/drivers/i2c/busses/i2c-cgbc.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Congatec Board Controller I2C busses driver
+ *
+ * Copyright (C) 2024 Bootlin
+ * Author: Thomas Richard <thomas.richard@bootlin.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/cgbc.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define CGBC_I2C_PRIMARY_BUS_ID 0
+#define CGBC_I2C_PM_BUS_ID 4
+
+#define CGBC_I2C_CMD_START 0x40
+#define CGBC_I2C_CMD_STAT 0x48
+#define CGBC_I2C_CMD_DATA 0x50
+#define CGBC_I2C_CMD_SPEED 0x58
+
+#define CGBC_I2C_STAT_IDL 0x00
+#define CGBC_I2C_STAT_DAT 0x01
+#define CGBC_I2C_STAT_BUSY 0x02
+
+#define CGBC_I2C_START 0x80
+#define CGBC_I2C_STOP 0x40
+
+#define CGBC_I2C_LAST_ACK 0x80 /* send ACK on last read byte */
+
+/*
+ * Reference code defines 1kHz as min freq and 6.1MHz as max freq.
+ * But in practice, the board controller limits the frequency to 1MHz, and the
+ * 1kHz is not functional (minimal working freq is 50kHz).
+ * So use these values as limits.
+ */
+#define CGBC_I2C_FREQ_MIN_HZ 50000 /* 50 kHz */
+#define CGBC_I2C_FREQ_MAX_HZ 1000000 /* 1 MHz */
+
+#define CGBC_I2C_FREQ_UNIT_1KHZ 0x40
+#define CGBC_I2C_FREQ_UNIT_10KHZ 0x80
+#define CGBC_I2C_FREQ_UNIT_100KHZ 0xC0
+
+#define CGBC_I2C_FREQ_UNIT_MASK 0xC0
+#define CGBC_I2C_FREQ_VALUE_MASK 0x3F
+
+#define CGBC_I2C_READ_MAX_LEN 31
+#define CGBC_I2C_WRITE_MAX_LEN 32
+
+#define CGBC_I2C_CMD_HEADER_SIZE 4
+#define CGBC_I2C_CMD_SIZE (CGBC_I2C_CMD_HEADER_SIZE + CGBC_I2C_WRITE_MAX_LEN)
+
+enum cgbc_i2c_state {
+ CGBC_I2C_STATE_DONE = 0,
+ CGBC_I2C_STATE_INIT,
+ CGBC_I2C_STATE_START,
+ CGBC_I2C_STATE_READ,
+ CGBC_I2C_STATE_WRITE,
+ CGBC_I2C_STATE_ERROR,
+};
+
+struct i2c_algo_cgbc_data {
+ u8 bus_id;
+ unsigned long read_maxtime_us;
+};
+
+struct cgbc_i2c_data {
+ struct device *dev;
+ struct cgbc_device_data *cgbc;
+ struct i2c_adapter adap;
+ struct i2c_msg *msg;
+ int nmsgs;
+ int pos;
+ enum cgbc_i2c_state state;
+};
+
+struct cgbc_i2c_transfer {
+ u8 bus_id;
+ bool start;
+ bool stop;
+ bool last_ack;
+ u8 read;
+ u8 write;
+ u8 addr;
+ u8 data[CGBC_I2C_WRITE_MAX_LEN];
+};
+
+static u8 cgbc_i2c_freq_to_reg(unsigned int bus_frequency)
+{
+ u8 reg;
+
+ if (bus_frequency <= 10000)
+ reg = CGBC_I2C_FREQ_UNIT_1KHZ | (bus_frequency / 1000);
+ else if (bus_frequency <= 100000)
+ reg = CGBC_I2C_FREQ_UNIT_10KHZ | (bus_frequency / 10000);
+ else
+ reg = CGBC_I2C_FREQ_UNIT_100KHZ | (bus_frequency / 100000);
+
+ return reg;
+}
+
+static unsigned int cgbc_i2c_reg_to_freq(u8 reg)
+{
+ unsigned int freq = reg & CGBC_I2C_FREQ_VALUE_MASK;
+ u8 unit = reg & CGBC_I2C_FREQ_UNIT_MASK;
+
+ if (unit == CGBC_I2C_FREQ_UNIT_100KHZ)
+ return freq * 100000;
+ else if (unit == CGBC_I2C_FREQ_UNIT_10KHZ)
+ return freq * 10000;
+ else
+ return freq * 1000;
+}
+
+static int cgbc_i2c_get_status(struct i2c_adapter *adap)
+{
+ struct i2c_algo_cgbc_data *algo_data = adap->algo_data;
+ struct cgbc_i2c_data *i2c = i2c_get_adapdata(adap);
+ struct cgbc_device_data *cgbc = i2c->cgbc;
+ u8 cmd = CGBC_I2C_CMD_STAT | algo_data->bus_id;
+ u8 status;
+ int ret;
+
+ ret = cgbc_command(cgbc, &cmd, sizeof(cmd), NULL, 0, &status);
+ if (ret)
+ return ret;
+
+ return status;
+}
+
+static int cgbc_i2c_set_frequency(struct i2c_adapter *adap,
+ unsigned int bus_frequency)
+{
+ struct i2c_algo_cgbc_data *algo_data = adap->algo_data;
+ struct cgbc_i2c_data *i2c = i2c_get_adapdata(adap);
+ struct cgbc_device_data *cgbc = i2c->cgbc;
+ u8 cmd[2], data;
+ int ret;
+
+ if (bus_frequency > CGBC_I2C_FREQ_MAX_HZ ||
+ bus_frequency < CGBC_I2C_FREQ_MIN_HZ) {
+ dev_info(i2c->dev, "invalid frequency %u, using default\n", bus_frequency);
+ bus_frequency = I2C_MAX_STANDARD_MODE_FREQ;
+ }
+
+ cmd[0] = CGBC_I2C_CMD_SPEED | algo_data->bus_id;
+ cmd[1] = cgbc_i2c_freq_to_reg(bus_frequency);
+
+ ret = cgbc_command(cgbc, &cmd, sizeof(cmd), &data, 1, NULL);
+ if (ret)
+ return dev_err_probe(i2c->dev, ret,
+ "Failed to initialize I2C bus %s",
+ adap->name);
+
+ cmd[1] = 0x00;
+
+ ret = cgbc_command(cgbc, &cmd, sizeof(cmd), &data, 1, NULL);
+ if (ret)
+ return dev_err_probe(i2c->dev, ret,
+ "Failed to get I2C bus frequency");
+
+ bus_frequency = cgbc_i2c_reg_to_freq(data);
+
+ dev_dbg(i2c->dev, "%s is running at %d Hz\n", adap->name, bus_frequency);
+
+ /*
+ * The read_maxtime_us variable represents the maximum time to wait
+ * for data during a read operation. The maximum amount of data that
+ * can be read by a command is CGBC_I2C_READ_MAX_LEN.
+ * Therefore, calculate the max time to properly size the timeout.
+ */
+ algo_data->read_maxtime_us = (BITS_PER_BYTE + 1) * CGBC_I2C_READ_MAX_LEN
+ * USEC_PER_SEC / bus_frequency;
+
+ return 0;
+}
+
+static unsigned int cgbc_i2c_xfer_to_cmd(struct cgbc_i2c_transfer xfer, u8 *cmd)
+{
+ int i = 0;
+
+ cmd[i++] = CGBC_I2C_CMD_START | xfer.bus_id;
+
+ cmd[i] = (xfer.start) ? CGBC_I2C_START : 0x00;
+ if (xfer.stop)
+ cmd[i] |= CGBC_I2C_STOP;
+ cmd[i++] |= (xfer.start) ? xfer.write + 1 : xfer.write;
+
+ cmd[i++] = (xfer.last_ack) ? (xfer.read | CGBC_I2C_LAST_ACK) : xfer.read;
+
+ if (xfer.start)
+ cmd[i++] = xfer.addr;
+
+ if (xfer.write > 0)
+ memcpy(&cmd[i], &xfer.data, xfer.write);
+
+ return i + xfer.write;
+}
+
+static int cgbc_i2c_xfer_msg(struct i2c_adapter *adap)
+{
+ struct i2c_algo_cgbc_data *algo_data = adap->algo_data;
+ struct cgbc_i2c_data *i2c = i2c_get_adapdata(adap);
+ struct cgbc_device_data *cgbc = i2c->cgbc;
+ struct i2c_msg *msg = i2c->msg;
+ u8 cmd[CGBC_I2C_CMD_SIZE];
+ int ret, max_len, len, i;
+ unsigned int cmd_len;
+ u8 cmd_data;
+
+ struct cgbc_i2c_transfer xfer = {
+ .bus_id = algo_data->bus_id,
+ .addr = i2c_8bit_addr_from_msg(msg),
+ };
+
+ if (i2c->state == CGBC_I2C_STATE_DONE)
+ return 0;
+
+ ret = cgbc_i2c_get_status(adap);
+
+ if (ret == CGBC_I2C_STAT_BUSY)
+ return -EBUSY;
+ else if (ret < 0)
+ goto err;
+
+ if (i2c->state == CGBC_I2C_STATE_INIT ||
+ (i2c->state == CGBC_I2C_STATE_WRITE && msg->flags & I2C_M_RD))
+ xfer.start = true;
+
+ i2c->state = (msg->flags & I2C_M_RD) ? CGBC_I2C_STATE_READ : CGBC_I2C_STATE_WRITE;
+
+ max_len = (i2c->state == CGBC_I2C_STATE_READ) ?
+ CGBC_I2C_READ_MAX_LEN : CGBC_I2C_WRITE_MAX_LEN;
+
+ if (msg->len - i2c->pos > max_len) {
+ len = max_len;
+ } else {
+ len = msg->len - i2c->pos;
+
+ if (i2c->nmsgs == 1)
+ xfer.stop = true;
+ }
+
+ if (i2c->state == CGBC_I2C_STATE_WRITE) {
+ xfer.write = len;
+ xfer.read = 0;
+
+ for (i = 0; i < len; i++)
+ xfer.data[i] = msg->buf[i2c->pos + i];
+
+ cmd_len = cgbc_i2c_xfer_to_cmd(xfer, &cmd[0]);
+
+ ret = cgbc_command(cgbc, &cmd, cmd_len, NULL, 0, NULL);
+ if (ret)
+ goto err;
+ } else if (i2c->state == CGBC_I2C_STATE_READ) {
+ xfer.write = 0;
+ xfer.read = len;
+
+ if (i2c->nmsgs > 1 || msg->len - i2c->pos > max_len)
+ xfer.read |= CGBC_I2C_LAST_ACK;
+
+ cmd_len = cgbc_i2c_xfer_to_cmd(xfer, &cmd[0]);
+ ret = cgbc_command(cgbc, &cmd, cmd_len, NULL, 0, NULL);
+ if (ret)
+ goto err;
+
+ ret = read_poll_timeout(cgbc_i2c_get_status, ret,
+ ret != CGBC_I2C_STAT_BUSY, 0,
+ 2 * algo_data->read_maxtime_us, false, adap);
+ if (ret < 0)
+ goto err;
+
+ cmd_data = CGBC_I2C_CMD_DATA | algo_data->bus_id;
+ ret = cgbc_command(cgbc, &cmd_data, sizeof(cmd_data),
+ msg->buf + i2c->pos, len, NULL);
+ if (ret)
+ goto err;
+ }
+
+ if (len == (msg->len - i2c->pos)) {
+ i2c->msg++;
+ i2c->nmsgs--;
+ i2c->pos = 0;
+ } else {
+ i2c->pos += len;
+ }
+
+ if (i2c->nmsgs == 0)
+ i2c->state = CGBC_I2C_STATE_DONE;
+
+ return 0;
+
+err:
+ i2c->state = CGBC_I2C_STATE_ERROR;
+ return ret;
+}
+
+static int cgbc_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ struct cgbc_i2c_data *i2c = i2c_get_adapdata(adap);
+ unsigned long timeout = jiffies + HZ;
+ int ret;
+
+ i2c->state = CGBC_I2C_STATE_INIT;
+ i2c->msg = msgs;
+ i2c->nmsgs = num;
+ i2c->pos = 0;
+
+ while (time_before(jiffies, timeout)) {
+ ret = cgbc_i2c_xfer_msg(adap);
+ if (i2c->state == CGBC_I2C_STATE_DONE)
+ return num;
+
+ if (i2c->state == CGBC_I2C_STATE_ERROR)
+ return ret;
+
+ if (ret == 0)
+ timeout = jiffies + HZ;
+ }
+
+ i2c->state = CGBC_I2C_STATE_ERROR;
+ return -ETIMEDOUT;
+}
+
+static u32 cgbc_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~(I2C_FUNC_SMBUS_QUICK));
+}
+
+static const struct i2c_algorithm cgbc_i2c_algorithm = {
+ .master_xfer = cgbc_i2c_xfer,
+ .functionality = cgbc_i2c_func,
+};
+
+static struct i2c_algo_cgbc_data cgbc_i2c_algo_data[] = {
+ { .bus_id = CGBC_I2C_PRIMARY_BUS_ID },
+ { .bus_id = CGBC_I2C_PM_BUS_ID },
+};
+
+static const struct i2c_adapter cgbc_i2c_adapter[] = {
+ {
+ .owner = THIS_MODULE,
+ .name = "Congatec General Purpose I2C adapter",
+ .class = I2C_CLASS_DEPRECATED,
+ .algo = &cgbc_i2c_algorithm,
+ .algo_data = &cgbc_i2c_algo_data[0],
+ .nr = -1,
+ },
+ {
+ .owner = THIS_MODULE,
+ .name = "Congatec Power Management I2C adapter",
+ .class = I2C_CLASS_DEPRECATED,
+ .algo = &cgbc_i2c_algorithm,
+ .algo_data = &cgbc_i2c_algo_data[1],
+ .nr = -1,
+ },
+};
+
+static int cgbc_i2c_probe(struct platform_device *pdev)
+{
+ struct cgbc_device_data *cgbc = dev_get_drvdata(pdev->dev.parent);
+ struct cgbc_i2c_data *i2c;
+ int ret;
+
+ i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ i2c->cgbc = cgbc;
+ i2c->dev = &pdev->dev;
+ i2c->adap = cgbc_i2c_adapter[pdev->id];
+ i2c->adap.dev.parent = i2c->dev;
+ i2c_set_adapdata(&i2c->adap, i2c);
+ platform_set_drvdata(pdev, i2c);
+
+ ret = cgbc_i2c_set_frequency(&i2c->adap, I2C_MAX_STANDARD_MODE_FREQ);
+ if (ret)
+ return ret;
+
+ return i2c_add_numbered_adapter(&i2c->adap);
+}
+
+static void cgbc_i2c_remove(struct platform_device *pdev)
+{
+ struct cgbc_i2c_data *i2c = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&i2c->adap);
+}
+
+static struct platform_driver cgbc_i2c_driver = {
+ .driver = {
+ .name = "cgbc-i2c",
+ },
+ .probe = cgbc_i2c_probe,
+ .remove = cgbc_i2c_remove,
+};
+
+module_platform_driver(cgbc_i2c_driver);
+
+MODULE_DESCRIPTION("Congatec Board Controller I2C Driver");
+MODULE_AUTHOR("Thomas Richard <thomas.richard@bootlin.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:cgbc_i2c");
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 52e3000626c5..26a36a65521e 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -546,7 +546,7 @@ MODULE_DEVICE_TABLE(platform, cht_wc_i2c_adap_id_table);
static struct platform_driver cht_wc_i2c_adap_driver = {
.probe = cht_wc_i2c_adap_i2c_probe,
- .remove_new = cht_wc_i2c_adap_i2c_remove,
+ .remove = cht_wc_i2c_adap_i2c_remove,
.driver = {
.name = "cht_wcove_ext_chgr",
},
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 4794ec066eb0..260e1643c2cc 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -701,7 +701,7 @@ MODULE_DEVICE_TABLE(of, cpm_i2c_match);
static struct platform_driver cpm_i2c_driver = {
.probe = cpm_i2c_probe,
- .remove_new = cpm_i2c_remove,
+ .remove = cpm_i2c_remove,
.driver = {
.name = "fsl-i2c-cpm",
.of_match_table = cpm_i2c_match,
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index ab2688bd4d33..43bf90d90eeb 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -304,7 +304,7 @@ MODULE_DEVICE_TABLE(acpi, cros_ec_i2c_tunnel_acpi_id);
static struct platform_driver ec_i2c_tunnel_driver = {
.probe = ec_i2c_probe,
- .remove_new = ec_i2c_remove,
+ .remove = ec_i2c_remove,
.driver = {
.name = "cros-ec-i2c-tunnel",
.acpi_match_table = ACPI_PTR(cros_ec_i2c_tunnel_acpi_id),
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index c4fb5e9ab506..6a909d339681 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -11,23 +11,23 @@
*
* ----------------------------------------------------------------------------
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
+
#include <linux/clk.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/cpufreq.h>
-#include <linux/gpio/consumer.h>
-#include <linux/of.h>
-#include <linux/platform_data/i2c-davinci.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
/* ----- global defines ----------------------------------------------- */
@@ -117,6 +117,8 @@
/* timeout for pm runtime autosuspend */
#define DAVINCI_I2C_PM_TIMEOUT 1000 /* ms */
+#define DAVINCI_I2C_DEFAULT_BUS_FREQ 100
+
struct davinci_i2c_dev {
struct device *dev;
void __iomem *base;
@@ -132,13 +134,10 @@ struct davinci_i2c_dev {
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
#endif
- struct davinci_i2c_platform_data *pdata;
-};
-
-/* default platform data to use if not supplied in the platform_device */
-static struct davinci_i2c_platform_data davinci_i2c_platform_data_default = {
- .bus_freq = 100,
- .bus_delay = 0,
+ /* standard bus frequency (kHz) */
+ unsigned int bus_freq;
+ /* Chip has a ICPFUNC register */
+ bool has_pfunc;
};
static inline void davinci_i2c_write_reg(struct davinci_i2c_dev *i2c_dev,
@@ -168,14 +167,12 @@ static inline void davinci_i2c_reset_ctrl(struct davinci_i2c_dev *i2c_dev,
static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
{
- struct davinci_i2c_platform_data *pdata = dev->pdata;
u16 psc;
u32 clk;
u32 d;
u32 clkh;
u32 clkl;
u32 input_clock = clk_get_rate(dev->clk);
- struct device_node *of_node = dev->dev->of_node;
/* NOTE: I2C Clock divider programming info
* As per I2C specs the following formulas provide prescaler
@@ -209,19 +206,19 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
psc++; /* better to run under spec than over */
d = (psc >= 2) ? 5 : 7 - psc;
- if (of_node && of_device_is_compatible(of_node, "ti,keystone-i2c"))
+ if (device_is_compatible(dev->dev, "ti,keystone-i2c"))
d = 6;
- clk = ((input_clock / (psc + 1)) / (pdata->bus_freq * 1000));
+ clk = ((input_clock / (psc + 1)) / (dev->bus_freq * 1000));
/* Avoid driving the bus too fast because of rounding errors above */
- if (input_clock / (psc + 1) / clk > pdata->bus_freq * 1000)
+ if (input_clock / (psc + 1) / clk > dev->bus_freq * 1000)
clk++;
/*
* According to I2C-BUS Spec 2.1, in FAST-MODE LOW period should be at
* least 1.3uS, which is not the case with 50% duty cycle. Driving HIGH
* to LOW ratio as 1 to 2 is more safe.
*/
- if (pdata->bus_freq > 100)
+ if (dev->bus_freq > 100)
clkl = (clk << 1) / 3;
else
clkl = (clk >> 1);
@@ -255,8 +252,6 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
*/
static int i2c_davinci_init(struct davinci_i2c_dev *dev)
{
- struct davinci_i2c_platform_data *pdata = dev->pdata;
-
/* put I2C into reset */
davinci_i2c_reset_ctrl(dev, 0);
@@ -274,8 +269,7 @@ static int i2c_davinci_init(struct davinci_i2c_dev *dev)
davinci_i2c_read_reg(dev, DAVINCI_I2C_CLKL_REG));
dev_dbg(dev->dev, "CLKH = %d\n",
davinci_i2c_read_reg(dev, DAVINCI_I2C_CLKH_REG));
- dev_dbg(dev->dev, "bus_freq = %dkHz, bus_delay = %d\n",
- pdata->bus_freq, pdata->bus_delay);
+ dev_dbg(dev->dev, "bus_freq = %dkHz\n", dev->bus_freq);
/* Take the I2C module out of reset: */
@@ -309,12 +303,6 @@ static void davinci_i2c_unprepare_recovery(struct i2c_adapter *adap)
i2c_davinci_init(dev);
}
-static struct i2c_bus_recovery_info davinci_i2c_gpio_recovery_info = {
- .recover_bus = i2c_generic_scl_recovery,
- .prepare_recovery = davinci_i2c_prepare_recovery,
- .unprepare_recovery = davinci_i2c_unprepare_recovery,
-};
-
static void davinci_i2c_set_scl(struct i2c_adapter *adap, int val)
{
struct davinci_i2c_dev *dev = i2c_get_adapdata(adap);
@@ -414,7 +402,6 @@ static int
i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
{
struct davinci_i2c_dev *dev = i2c_get_adapdata(adap);
- struct davinci_i2c_platform_data *pdata = dev->pdata;
u32 flag;
u16 w;
unsigned long time_left;
@@ -424,10 +411,6 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
return -EADDRNOTAVAIL;
}
- /* Introduce a delay, required for some boards (e.g Davinci EVM) */
- if (pdata->bus_delay)
- udelay(pdata->bus_delay);
-
/* set the target address */
davinci_i2c_write_reg(dev, DAVINCI_I2C_SAR_REG, msg->addr);
@@ -758,8 +741,8 @@ static int davinci_i2c_probe(struct platform_device *pdev)
{
struct davinci_i2c_dev *dev;
struct i2c_adapter *adap;
- struct i2c_bus_recovery_info *rinfo;
int r, irq;
+ u32 prop;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -773,29 +756,15 @@ static int davinci_i2c_probe(struct platform_device *pdev)
dev->dev = &pdev->dev;
dev->irq = irq;
- dev->pdata = dev_get_platdata(&pdev->dev);
platform_set_drvdata(pdev, dev);
- if (!dev->pdata && pdev->dev.of_node) {
- u32 prop;
-
- dev->pdata = devm_kzalloc(&pdev->dev,
- sizeof(struct davinci_i2c_platform_data), GFP_KERNEL);
- if (!dev->pdata)
- return -ENOMEM;
-
- memcpy(dev->pdata, &davinci_i2c_platform_data_default,
- sizeof(struct davinci_i2c_platform_data));
- if (!of_property_read_u32(pdev->dev.of_node, "clock-frequency",
- &prop))
- dev->pdata->bus_freq = prop / 1000;
-
- dev->pdata->has_pfunc =
- of_property_read_bool(pdev->dev.of_node,
- "ti,has-pfunc");
- } else if (!dev->pdata) {
- dev->pdata = &davinci_i2c_platform_data_default;
- }
+ r = device_property_read_u32(&pdev->dev, "clock-frequency", &prop);
+ if (r)
+ prop = DAVINCI_I2C_DEFAULT_BUS_FREQ;
+
+ dev->bus_freq = prop / 1000;
+
+ dev->has_pfunc = device_property_present(&pdev->dev, "ti,has-pfunc");
dev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dev->clk))
@@ -841,25 +810,10 @@ static int davinci_i2c_probe(struct platform_device *pdev)
adap->algo = &i2c_davinci_algo;
adap->dev.parent = &pdev->dev;
adap->timeout = DAVINCI_I2C_TIMEOUT;
- adap->dev.of_node = pdev->dev.of_node;
+ adap->dev.of_node = dev_of_node(&pdev->dev);
- if (dev->pdata->has_pfunc)
+ if (dev->has_pfunc)
adap->bus_recovery_info = &davinci_i2c_scl_recovery_info;
- else if (dev->pdata->gpio_recovery) {
- rinfo = &davinci_i2c_gpio_recovery_info;
- adap->bus_recovery_info = rinfo;
- rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl",
- GPIOD_OUT_HIGH_OPEN_DRAIN);
- if (IS_ERR(rinfo->scl_gpiod)) {
- r = PTR_ERR(rinfo->scl_gpiod);
- goto err_unuse_clocks;
- }
- rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
- if (IS_ERR(rinfo->sda_gpiod)) {
- r = PTR_ERR(rinfo->sda_gpiod);
- goto err_unuse_clocks;
- }
- }
adap->nr = pdev->id;
r = i2c_add_numbered_adapter(adap);
@@ -935,7 +889,7 @@ MODULE_DEVICE_TABLE(platform, davinci_i2c_driver_ids);
static struct platform_driver davinci_i2c_driver = {
.probe = davinci_i2c_probe,
- .remove_new = davinci_i2c_remove,
+ .remove = davinci_i2c_remove,
.id_table = davinci_i2c_driver_ids,
.driver = {
.name = "i2c_davinci",
diff --git a/drivers/i2c/busses/i2c-designware-amdpsp.c b/drivers/i2c/busses/i2c-designware-amdpsp.c
index 63454b06e5da..8fbd2a10c31a 100644
--- a/drivers/i2c/busses/i2c-designware-amdpsp.c
+++ b/drivers/i2c/busses/i2c-designware-amdpsp.c
@@ -155,7 +155,7 @@ static void psp_release_i2c_bus_deferred(struct work_struct *work)
/*
* If there is any pending transaction, cannot release the bus here.
- * psp_release_i2c_bus will take care of this later.
+ * psp_release_i2c_bus() will take care of this later.
*/
if (psp_i2c_access_count)
goto cleanup;
@@ -210,12 +210,12 @@ static void psp_release_i2c_bus(void)
{
mutex_lock(&psp_i2c_access_mutex);
- /* Return early if mailbox was malfunctional */
+ /* Return early if mailbox was malfunctioned */
if (psp_i2c_mbox_fail)
goto cleanup;
/*
- * If we are last owner of PSP semaphore, need to release aribtration
+ * If we are last owner of PSP semaphore, need to release arbitration
* via mailbox.
*/
psp_i2c_access_count--;
@@ -235,9 +235,9 @@ cleanup:
/*
* Locking methods are based on the default implementation from
- * drivers/i2c/i2c-core-base.c, but with psp acquire and release operations
+ * drivers/i2c/i2c-core-base.c, but with PSP acquire and release operations
* added. With this in place we can ensure that i2c clients on the bus shared
- * with psp are able to lock HW access to the bus for arbitrary number of
+ * with PSP are able to lock HW access to the bus for arbitrary number of
* operations - that is e.g. write-wait-read.
*/
static void i2c_adapter_dw_psp_lock_bus(struct i2c_adapter *adapter,
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index f31d352d98b5..8eb7bd640f8d 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -8,6 +8,9 @@
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
*/
+
+#define DEFAULT_SYMBOL_NAMESPACE "I2C_DW_COMMON"
+
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -29,11 +32,9 @@
#include <linux/types.h>
#include <linux/units.h>
-#define DEFAULT_SYMBOL_NAMESPACE I2C_DW_COMMON
-
#include "i2c-designware-core.h"
-static char *abort_sources[] = {
+static const char *const abort_sources[] = {
[ABRT_7B_ADDR_NOACK] =
"slave address not acknowledged (7bit mode)",
[ABRT_10ADDR1_NOACK] =
@@ -127,6 +128,8 @@ static int dw_reg_write_word(void *context, unsigned int reg, unsigned int val)
* Autodetects needed register access mode and creates the regmap with
* corresponding read/write callbacks. This must be called before doing any
* other register access.
+ *
+ * Return: 0 on success, or negative errno otherwise.
*/
int i2c_dw_init_regmap(struct dw_i2c_dev *dev)
{
@@ -174,7 +177,7 @@ int i2c_dw_init_regmap(struct dw_i2c_dev *dev)
/*
* Note we'll check the return value of the regmap IO accessors only
* at the probe stage. The rest of the code won't do this because
- * basically we have MMIO-based regmap so non of the read/write methods
+ * basically we have MMIO-based regmap, so none of the read/write methods
* can fail.
*/
dev->map = devm_regmap_init(dev->dev, NULL, dev, &map_cfg);
@@ -336,7 +339,7 @@ static u32 i2c_dw_acpi_round_bus_speed(struct device *device)
acpi_speed = i2c_acpi_find_bus_speed(device);
/*
- * Some DSTDs use a non standard speed, round down to the lowest
+ * Some DSDTs use a non standard speed, round down to the lowest
* standard speed.
*/
for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) {
@@ -380,6 +383,11 @@ int i2c_dw_fw_parse_and_configure(struct dw_i2c_dev *dev)
i2c_parse_fw_timings(device, t, false);
+ if (device_property_read_u32(device, "snps,bus-capacitance-pf", &dev->bus_capacitance_pF))
+ dev->bus_capacitance_pF = 100;
+
+ dev->clk_freq_optimized = device_property_read_bool(device, "snps,clk-freq-optimized");
+
i2c_dw_adjust_bus_speed(dev);
if (is_of_node(fwnode))
@@ -407,47 +415,26 @@ static u32 i2c_dw_read_scl_reg(struct dw_i2c_dev *dev, u32 reg)
}
u32 i2c_dw_scl_hcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk,
- u32 tSYMBOL, u32 tf, int cond, int offset)
+ u32 tSYMBOL, u32 tf, int offset)
{
if (!ic_clk)
return i2c_dw_read_scl_reg(dev, reg);
/*
- * DesignWare I2C core doesn't seem to have solid strategy to meet
- * the tHD;STA timing spec. Configuring _HCNT based on tHIGH spec
- * will result in violation of the tHD;STA spec.
+ * Conditional expression:
+ *
+ * IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf)
+ *
+ * This is just experimental rule; the tHD;STA period turned
+ * out to be proportinal to (_HCNT + 3). With this setting,
+ * we could meet both tHIGH and tHD;STA timing specs.
+ *
+ * If unsure, you'd better to take this alternative.
+ *
+ * The reason why we need to take into account "tf" here,
+ * is the same as described in i2c_dw_scl_lcnt().
*/
- if (cond)
- /*
- * Conditional expression:
- *
- * IC_[FS]S_SCL_HCNT + (1+4+3) >= IC_CLK * tHIGH
- *
- * This is based on the DW manuals, and represents an ideal
- * configuration. The resulting I2C bus speed will be
- * faster than any of the others.
- *
- * If your hardware is free from tHD;STA issue, try this one.
- */
- return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * tSYMBOL, MICRO) -
- 8 + offset;
- else
- /*
- * Conditional expression:
- *
- * IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf)
- *
- * This is just experimental rule; the tHD;STA period turned
- * out to be proportinal to (_HCNT + 3). With this setting,
- * we could meet both tHIGH and tHD;STA timing specs.
- *
- * If unsure, you'd better to take this alternative.
- *
- * The reason why we need to take into account "tf" here,
- * is the same as described in i2c_dw_scl_lcnt().
- */
- return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tSYMBOL + tf), MICRO) -
- 3 + offset;
+ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tSYMBOL + tf), MICRO) - 3 + offset;
}
u32 i2c_dw_scl_lcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk,
@@ -467,8 +454,7 @@ u32 i2c_dw_scl_lcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk,
* account the fall time of SCL signal (tf). Default tf value
* should be 0.3 us, for safety.
*/
- return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tLOW + tf), MICRO) -
- 1 + offset;
+ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tLOW + tf), MICRO) - 1 + offset;
}
int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev)
@@ -524,7 +510,7 @@ err_release_lock:
void __i2c_dw_disable(struct dw_i2c_dev *dev)
{
struct i2c_timings *t = &dev->timings;
- unsigned int raw_intr_stats;
+ unsigned int raw_intr_stats, ic_stats;
unsigned int enable;
int timeout = 100;
bool abort_needed;
@@ -532,9 +518,11 @@ void __i2c_dw_disable(struct dw_i2c_dev *dev)
int ret;
regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_intr_stats);
+ regmap_read(dev->map, DW_IC_STATUS, &ic_stats);
regmap_read(dev->map, DW_IC_ENABLE, &enable);
- abort_needed = raw_intr_stats & DW_IC_INTR_MST_ON_HOLD;
+ abort_needed = (raw_intr_stats & DW_IC_INTR_MST_ON_HOLD) ||
+ (ic_stats & DW_IC_STATUS_MASTER_HOLD_TX_FIFO_EMPTY);
if (abort_needed) {
if (!(enable & DW_IC_ENABLE_ENABLE)) {
regmap_write(dev->map, DW_IC_ENABLE, DW_IC_ENABLE_ENABLE);
@@ -569,7 +557,7 @@ void __i2c_dw_disable(struct dw_i2c_dev *dev)
/*
* Wait 10 times the signaling period of the highest I2C
- * transfer supported by the driver (for 400KHz this is
+ * transfer supported by the driver (for 400kHz this is
* 25us) as described in the DesignWare I2C databook.
*/
usleep_range(25, 250);
@@ -676,10 +664,10 @@ int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev)
if (abort_source & DW_IC_TX_ARB_LOST)
return -EAGAIN;
- else if (abort_source & DW_IC_TX_ABRT_GCALL_READ)
+ if (abort_source & DW_IC_TX_ABRT_GCALL_READ)
return -EINVAL; /* wrong msgs[] data */
- else
- return -EIO;
+
+ return -EIO;
}
int i2c_dw_set_fifo_size(struct dw_i2c_dev *dev)
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 8e8854ec9882..347843b4f5dd 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -116,6 +116,7 @@
#define DW_IC_STATUS_RFNE BIT(3)
#define DW_IC_STATUS_MASTER_ACTIVITY BIT(5)
#define DW_IC_STATUS_SLAVE_ACTIVITY BIT(6)
+#define DW_IC_STATUS_MASTER_HOLD_TX_FIFO_EMPTY BIT(7)
#define DW_IC_SDA_HOLD_RX_SHIFT 16
#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, 16)
@@ -142,10 +143,10 @@
#define DW_IC_SLAVE 1
/*
- * Hardware abort codes from the DW_IC_TX_ABRT_SOURCE register
+ * Hardware abort codes from the DW_IC_TX_ABRT_SOURCE register.
*
- * Only expected abort codes are listed here
- * refer to the datasheet for the full list
+ * Only expected abort codes are listed here,
+ * refer to the datasheet for the full list.
*/
#define ABRT_7B_ADDR_NOACK 0
#define ABRT_10ADDR1_NOACK 1
@@ -200,7 +201,7 @@ struct reset_control;
* @rst: optional reset for the controller
* @slave: represent an I2C slave device
* @get_clk_rate_khz: callback to retrieve IP specific bus speed
- * @cmd_err: run time hadware error code
+ * @cmd_err: run time hardware error code
* @msgs: points to an array of messages currently being transferred
* @msgs_num: the number of elements in msgs
* @msg_write_idx: the element index of the current tx message in the msgs array
@@ -236,11 +237,15 @@ struct reset_control;
* @release_lock: function to release a hardware lock on the bus
* @semaphore_idx: Index of table with semaphore type attached to the bus. It's
* -1 if there is no semaphore.
- * @shared_with_punit: true if this bus is shared with the SoCs PUNIT
+ * @shared_with_punit: true if this bus is shared with the SoC's PUNIT
* @init: function to initialize the I2C hardware
* @set_sda_hold_time: callback to retrieve IP specific SDA hold timing
* @mode: operation mode - DW_IC_MASTER or DW_IC_SLAVE
* @rinfo: I²C GPIO recovery information
+ * @bus_capacitance_pF: bus capacitance in picofarads
+ * @clk_freq_optimized: if this value is true, it means the hardware reduces
+ * its internal clock frequency by reducing the internal latency required
+ * to generate the high period and low period of SCL line.
*
* HCNT and LCNT parameters can be used if the platform knows more accurate
* values than the one computed based only on the input clock frequency.
@@ -298,6 +303,8 @@ struct dw_i2c_dev {
int (*set_sda_hold_time)(struct dw_i2c_dev *dev);
int mode;
struct i2c_bus_recovery_info rinfo;
+ u32 bus_capacitance_pF;
+ bool clk_freq_optimized;
};
#define ACCESS_INTR_MASK BIT(0)
@@ -328,7 +335,7 @@ struct i2c_dw_semaphore_callbacks {
int i2c_dw_init_regmap(struct dw_i2c_dev *dev);
u32 i2c_dw_scl_hcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk,
- u32 tSYMBOL, u32 tf, int cond, int offset);
+ u32 tSYMBOL, u32 tf, int offset);
u32 i2c_dw_scl_lcnt(struct dw_i2c_dev *dev, unsigned int reg, u32 ic_clk,
u32 tLOW, u32 tf, int offset);
int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index e8ac9a7bf0b3..2569bf1a72e0 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -8,6 +8,9 @@
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
*/
+
+#define DEFAULT_SYMBOL_NAMESPACE "I2C_DW"
+
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -22,8 +25,6 @@
#include <linux/regmap.h>
#include <linux/reset.h>
-#define DEFAULT_SYMBOL_NAMESPACE I2C_DW
-
#include "i2c-designware-core.h"
#define AMD_TIMEOUT_MIN_US 25
@@ -71,7 +72,6 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
ic_clk,
4000, /* tHD;STA = tHIGH = 4.0 us */
sda_falling_time,
- 0, /* 0: DW default, 1: Ideal */
0); /* No offset */
dev->ss_lcnt =
i2c_dw_scl_lcnt(dev,
@@ -105,7 +105,6 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
ic_clk,
260, /* tHIGH = 260 ns */
sda_falling_time,
- 0, /* DW default */
0); /* No offset */
dev->fs_lcnt =
i2c_dw_scl_lcnt(dev,
@@ -129,7 +128,6 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
ic_clk,
600, /* tHD;STA = tHIGH = 0.6 us */
sda_falling_time,
- 0, /* 0: DW default, 1: Ideal */
0); /* No offset */
dev->fs_lcnt =
i2c_dw_scl_lcnt(dev,
@@ -154,20 +152,38 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
dev->hs_hcnt = 0;
dev->hs_lcnt = 0;
} else if (!dev->hs_hcnt || !dev->hs_lcnt) {
+ u32 t_high, t_low;
+
+ /*
+ * The legal values stated in the databook for bus
+ * capacitance are only 100pF and 400pF.
+ * If dev->bus_capacitance_pF is greater than or equals
+ * to 400, t_high and t_low are assumed to be
+ * appropriate values for 400pF, otherwise 100pF.
+ */
+ if (dev->bus_capacitance_pF >= 400) {
+ /* assume bus capacitance is 400pF */
+ t_high = dev->clk_freq_optimized ? 160 : 120;
+ t_low = 320;
+ } else {
+ /* assume bus capacitance is 100pF */
+ t_high = 60;
+ t_low = dev->clk_freq_optimized ? 120 : 160;
+ }
+
ic_clk = i2c_dw_clk_rate(dev);
dev->hs_hcnt =
i2c_dw_scl_hcnt(dev,
DW_IC_HS_SCL_HCNT,
ic_clk,
- 160, /* tHIGH = 160 ns */
+ t_high,
sda_falling_time,
- 0, /* DW default */
0); /* No offset */
dev->hs_lcnt =
i2c_dw_scl_lcnt(dev,
DW_IC_HS_SCL_LCNT,
ic_clk,
- 320, /* tLOW = 320 ns */
+ t_low,
scl_falling_time,
0); /* No offset */
}
@@ -184,12 +200,14 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
}
/**
- * i2c_dw_init_master() - Initialize the designware I2C master hardware
+ * i2c_dw_init_master() - Initialize the DesignWare I2C master hardware
* @dev: device private data
*
* This functions configures and enables the I2C master.
* This function is called during I2C init function, and in case of timeout at
* run time.
+ *
+ * Return: 0 on success, or negative errno otherwise.
*/
static int i2c_dw_init_master(struct dw_i2c_dev *dev)
{
@@ -357,7 +375,7 @@ static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs,
/*
* Initiate the i2c read/write transaction of buffer length,
* and poll for bus busy status. For the last message transfer,
- * update the command with stopbit enable.
+ * update the command with stop bit enable.
*/
for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) {
if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1)
@@ -402,7 +420,7 @@ static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs,
/*
* Initiate (and continue) low level master read/write transaction.
- * This function is only called from i2c_dw_isr, and pumping i2c_msg
+ * This function is only called from i2c_dw_isr(), and pumping i2c_msg
* messages into the tx buffer. Even if the size of i2c_msg data is
* longer than the size of the tx buffer, it handles everything.
*/
@@ -440,7 +458,8 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
buf = msgs[dev->msg_write_idx].buf;
buf_len = msgs[dev->msg_write_idx].len;
- /* If both IC_EMPTYFIFO_HOLD_MASTER_EN and
+ /*
+ * If both IC_EMPTYFIFO_HOLD_MASTER_EN and
* IC_RESTART_EN are set, we must manually
* set restart bit between messages.
*/
@@ -971,7 +990,7 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
rinfo->unprepare_recovery = i2c_dw_unprepare_recovery;
adap->bus_recovery_info = rinfo;
- dev_info(dev->dev, "running with gpio recovery mode! scl%s",
+ dev_info(dev->dev, "running with GPIO recovery mode! scl%s",
rinfo->sda_gpiod ? ",sda" : "");
return 0;
@@ -1076,4 +1095,4 @@ EXPORT_SYMBOL_GPL(i2c_dw_probe_master);
MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(I2C_DW_COMMON);
+MODULE_IMPORT_NS("I2C_DW_COMMON");
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 7b2c5d71a7fc..8e0267c7cc29 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -51,7 +51,7 @@ struct dw_scl_sda_cfg {
u16 fs_hcnt;
u16 ss_lcnt;
u16 fs_lcnt;
- u32 sda_hold;
+ u32 sda_hold_time;
};
struct dw_pci_controller {
@@ -76,7 +76,7 @@ static struct dw_scl_sda_cfg byt_config = {
.fs_hcnt = 0x55,
.ss_lcnt = 0x200,
.fs_lcnt = 0x99,
- .sda_hold = 0x6,
+ .sda_hold_time = 0x6,
};
/* Haswell HCNT/LCNT/SDA hold time */
@@ -85,14 +85,14 @@ static struct dw_scl_sda_cfg hsw_config = {
.fs_hcnt = 0x48,
.ss_lcnt = 0x01fb,
.fs_lcnt = 0xa0,
- .sda_hold = 0x9,
+ .sda_hold_time = 0x9,
};
/* NAVI-AMD HCNT/LCNT/SDA hold time */
static struct dw_scl_sda_cfg navi_amd_config = {
.ss_hcnt = 0x1ae,
.ss_lcnt = 0x23a,
- .sda_hold = 0x9,
+ .sda_hold_time = 0x9,
};
static u32 mfld_get_clk_rate_khz(struct dw_i2c_dev *dev)
@@ -207,6 +207,7 @@ static const struct software_node dgpu_node = {
static int i2c_dw_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
+ struct device *device = &pdev->dev;
struct dw_i2c_dev *dev;
struct i2c_adapter *adap;
int r;
@@ -214,25 +215,22 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
struct dw_scl_sda_cfg *cfg;
if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers))
- return dev_err_probe(&pdev->dev, -EINVAL,
- "Invalid driver data %ld\n",
+ return dev_err_probe(device, -EINVAL, "Invalid driver data %ld\n",
id->driver_data);
controller = &dw_pci_controllers[id->driver_data];
r = pcim_enable_device(pdev);
if (r)
- return dev_err_probe(&pdev->dev, r,
- "Failed to enable I2C PCI device\n");
+ return dev_err_probe(device, r, "Failed to enable I2C PCI device\n");
pci_set_master(pdev);
r = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
if (r)
- return dev_err_probe(&pdev->dev, r,
- "I/O memory remapping failed\n");
+ return dev_err_probe(device, r, "I/O memory remapping failed\n");
- dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ dev = devm_kzalloc(device, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -242,7 +240,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
dev->get_clk_rate_khz = controller->get_clk_rate_khz;
dev->base = pcim_iomap_table(pdev)[0];
- dev->dev = &pdev->dev;
+ dev->dev = device;
dev->irq = pci_irq_vector(pdev, 0);
dev->flags |= controller->flags;
@@ -266,7 +264,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
dev->fs_hcnt = cfg->fs_hcnt;
dev->ss_lcnt = cfg->ss_lcnt;
dev->fs_lcnt = cfg->fs_lcnt;
- dev->sda_hold_time = cfg->sda_hold;
+ dev->sda_hold_time = cfg->sda_hold_time;
}
adap = &dev->adapter;
@@ -281,14 +279,14 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) {
dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, &dgpu_node);
if (IS_ERR(dev->slave))
- return dev_err_probe(dev->dev, PTR_ERR(dev->slave),
+ return dev_err_probe(device, PTR_ERR(dev->slave),
"register UCSI failed\n");
}
- pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_put_autosuspend(&pdev->dev);
- pm_runtime_allow(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(device, 1000);
+ pm_runtime_use_autosuspend(device);
+ pm_runtime_put_autosuspend(device);
+ pm_runtime_allow(device);
return 0;
}
@@ -296,11 +294,12 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
static void i2c_dw_pci_remove(struct pci_dev *pdev)
{
struct dw_i2c_dev *dev = pci_get_drvdata(pdev);
+ struct device *device = &pdev->dev;
i2c_dw_disable(dev);
- pm_runtime_forbid(&pdev->dev);
- pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_forbid(device);
+ pm_runtime_get_noresume(device);
i2c_del_adapter(&dev->adapter);
}
@@ -369,5 +368,5 @@ module_pci_driver(dw_i2c_driver);
MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
MODULE_DESCRIPTION("Synopsys DesignWare PCI I2C bus adapter");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(I2C_DW);
-MODULE_IMPORT_NS(I2C_DW_COMMON);
+MODULE_IMPORT_NS("I2C_DW");
+MODULE_IMPORT_NS("I2C_DW_COMMON");
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 2d0c7348e491..d6e1ee935399 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -72,7 +72,7 @@ static int bt1_i2c_write(void *context, unsigned int reg, unsigned int val)
return ret;
return regmap_write(dev->sysmap, BT1_I2C_CTL,
- BT1_I2C_CTL_GO | BT1_I2C_CTL_WR | (reg & BT1_I2C_CTL_ADDR_MASK));
+ BT1_I2C_CTL_GO | BT1_I2C_CTL_WR | (reg & BT1_I2C_CTL_ADDR_MASK));
}
static const struct regmap_config bt1_i2c_cfg = {
@@ -205,6 +205,7 @@ static void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev)
static int dw_i2c_plat_probe(struct platform_device *pdev)
{
+ struct device *device = &pdev->dev;
struct i2c_adapter *adap;
struct dw_i2c_dev *dev;
int irq, ret;
@@ -213,15 +214,15 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL);
+ dev = devm_kzalloc(device, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
- dev->flags = (uintptr_t)device_get_match_data(&pdev->dev);
- if (device_property_present(&pdev->dev, "wx,i2c-snps-model"))
+ dev->flags = (uintptr_t)device_get_match_data(device);
+ if (device_property_present(device, "wx,i2c-snps-model"))
dev->flags = MODEL_WANGXUN_SP | ACCESS_POLLING;
- dev->dev = &pdev->dev;
+ dev->dev = device;
dev->irq = irq;
platform_set_drvdata(pdev, dev);
@@ -229,7 +230,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
if (ret)
return ret;
- dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ dev->rst = devm_reset_control_get_optional_exclusive(device, NULL);
if (IS_ERR(dev->rst))
return PTR_ERR(dev->rst);
@@ -246,13 +247,13 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
i2c_dw_configure(dev);
/* Optional interface clock */
- dev->pclk = devm_clk_get_optional(&pdev->dev, "pclk");
+ dev->pclk = devm_clk_get_optional(device, "pclk");
if (IS_ERR(dev->pclk)) {
ret = PTR_ERR(dev->pclk);
goto exit_reset;
}
- dev->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ dev->clk = devm_clk_get_optional(device, NULL);
if (IS_ERR(dev->clk)) {
ret = PTR_ERR(dev->clk);
goto exit_reset;
@@ -277,31 +278,27 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
adap = &dev->adapter;
adap->owner = THIS_MODULE;
adap->class = dmi_check_system(dw_i2c_hwmon_class_dmi) ?
- I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED;
+ I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED;
adap->nr = -1;
- if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
- dev_pm_set_driver_flags(&pdev->dev,
- DPM_FLAG_SMART_PREPARE);
- } else {
- dev_pm_set_driver_flags(&pdev->dev,
- DPM_FLAG_SMART_PREPARE |
- DPM_FLAG_SMART_SUSPEND);
- }
+ if (dev->flags & ACCESS_NO_IRQ_SUSPEND)
+ dev_pm_set_driver_flags(device, DPM_FLAG_SMART_PREPARE);
+ else
+ dev_pm_set_driver_flags(device, DPM_FLAG_SMART_PREPARE | DPM_FLAG_SMART_SUSPEND);
- device_enable_async_suspend(&pdev->dev);
+ device_enable_async_suspend(device);
/* The code below assumes runtime PM to be disabled. */
- WARN_ON(pm_runtime_enabled(&pdev->dev));
+ WARN_ON(pm_runtime_enabled(device));
- pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(device, 1000);
+ pm_runtime_use_autosuspend(device);
+ pm_runtime_set_active(device);
if (dev->shared_with_punit)
- pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_get_noresume(device);
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_enable(device);
ret = i2c_dw_probe(dev);
if (ret)
@@ -319,15 +316,16 @@ exit_reset:
static void dw_i2c_plat_remove(struct platform_device *pdev)
{
struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
+ struct device *device = &pdev->dev;
- pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_get_sync(device);
i2c_del_adapter(&dev->adapter);
i2c_dw_disable(dev);
- pm_runtime_dont_use_autosuspend(&pdev->dev);
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(device);
+ pm_runtime_put_sync(device);
dw_i2c_plat_pm_cleanup(dev);
i2c_dw_remove_lock_support(dev);
@@ -351,9 +349,11 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = {
{ "AMDI0019", ACCESS_INTR_MASK | ARBITRATION_SEMAPHORE },
{ "AMDI0510", 0 },
{ "APMC0D0F", 0 },
+ { "FUJI200B", 0 },
{ "HISI02A1", 0 },
{ "HISI02A2", 0 },
{ "HISI02A3", 0 },
+ { "HJMC3001", 0 },
{ "HYGO0010", ACCESS_INTR_MASK },
{ "INT33C2", 0 },
{ "INT33C3", 0 },
@@ -372,7 +372,7 @@ MODULE_DEVICE_TABLE(platform, dw_i2c_platform_ids);
static struct platform_driver dw_i2c_driver = {
.probe = dw_i2c_plat_probe,
- .remove_new = dw_i2c_plat_remove,
+ .remove = dw_i2c_plat_remove,
.driver = {
.name = "i2c_designware",
.of_match_table = dw_i2c_of_match,
@@ -397,5 +397,5 @@ module_exit(dw_i2c_exit_driver);
MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(I2C_DW);
-MODULE_IMPORT_NS(I2C_DW_COMMON);
+MODULE_IMPORT_NS("I2C_DW");
+MODULE_IMPORT_NS("I2C_DW_COMMON");
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 7035296aa24c..5cd4a5f7a472 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -6,6 +6,9 @@
*
* Copyright (C) 2016 Synopsys Inc.
*/
+
+#define DEFAULT_SYMBOL_NAMESPACE "I2C_DW"
+
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -16,8 +19,6 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#define DEFAULT_SYMBOL_NAMESPACE I2C_DW
-
#include "i2c-designware-core.h"
static void i2c_dw_configure_fifo_slave(struct dw_i2c_dev *dev)
@@ -32,12 +33,14 @@ static void i2c_dw_configure_fifo_slave(struct dw_i2c_dev *dev)
}
/**
- * i2c_dw_init_slave() - Initialize the designware i2c slave hardware
+ * i2c_dw_init_slave() - Initialize the DesignWare i2c slave hardware
* @dev: device private data
*
* This function configures and enables the I2C in slave mode.
* This function is called during I2C init function, and in case of timeout at
* run time.
+ *
+ * Return: 0 on success, or negative errno otherwise.
*/
static int i2c_dw_init_slave(struct dw_i2c_dev *dev)
{
@@ -264,7 +267,7 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev)
ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr_slave,
IRQF_SHARED, dev_name(dev->dev), dev);
if (ret) {
- dev_err(dev->dev, "failure requesting irq %i: %d\n",
+ dev_err(dev->dev, "failure requesting IRQ %i: %d\n",
dev->irq, ret);
return ret;
}
@@ -280,4 +283,4 @@ EXPORT_SYMBOL_GPL(i2c_dw_probe_slave);
MODULE_AUTHOR("Luis Oliveira <lolivei@synopsys.com>");
MODULE_DESCRIPTION("Synopsys DesignWare I2C bus slave adapter");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(I2C_DW_COMMON);
+MODULE_IMPORT_NS("I2C_DW_COMMON");
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 3dc5a46698fc..38d7f31aee79 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -363,7 +363,7 @@ MODULE_DEVICE_TABLE(of, dc_i2c_match);
static struct platform_driver dc_i2c_driver = {
.probe = dc_i2c_probe,
- .remove_new = dc_i2c_remove,
+ .remove = dc_i2c_remove,
.driver = {
.name = "digicolor-i2c",
.of_match_table = dc_i2c_match,
diff --git a/drivers/i2c/busses/i2c-dln2.c b/drivers/i2c/busses/i2c-dln2.c
index 11ed055143d3..bde2ef098862 100644
--- a/drivers/i2c/busses/i2c-dln2.c
+++ b/drivers/i2c/busses/i2c-dln2.c
@@ -245,7 +245,7 @@ static void dln2_i2c_remove(struct platform_device *pdev)
static struct platform_driver dln2_i2c_driver = {
.driver.name = "dln2-i2c",
.probe = dln2_i2c_probe,
- .remove_new = dln2_i2c_remove,
+ .remove = dln2_i2c_remove,
};
module_platform_driver(dln2_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index d08be3f3cede..2512cef8e2a2 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -425,7 +425,7 @@ static const struct of_device_id em_i2c_ids[] = {
static struct platform_driver em_i2c_driver = {
.probe = em_i2c_probe,
- .remove_new = em_i2c_remove,
+ .remove = em_i2c_remove,
.driver = {
.name = "em-i2c",
.of_match_table = em_i2c_ids,
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index d8baca9b610c..6cdd957ea7e4 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -168,6 +168,7 @@ enum i2c_type_exynos {
I2C_TYPE_EXYNOS5,
I2C_TYPE_EXYNOS7,
I2C_TYPE_EXYNOSAUTOV9,
+ I2C_TYPE_EXYNOS8895,
};
struct exynos5_i2c {
@@ -240,6 +241,11 @@ static const struct exynos_hsi2c_variant exynosautov9_hsi2c_data = {
.hw = I2C_TYPE_EXYNOSAUTOV9,
};
+static const struct exynos_hsi2c_variant exynos8895_hsi2c_data = {
+ .fifo_depth = 64,
+ .hw = I2C_TYPE_EXYNOS8895,
+};
+
static const struct of_device_id exynos5_i2c_match[] = {
{
.compatible = "samsung,exynos5-hsi2c",
@@ -256,6 +262,9 @@ static const struct of_device_id exynos5_i2c_match[] = {
}, {
.compatible = "samsung,exynosautov9-hsi2c",
.data = &exynosautov9_hsi2c_data
+ }, {
+ .compatible = "samsung,exynos8895-hsi2c",
+ .data = &exynos8895_hsi2c_data
}, {},
};
MODULE_DEVICE_TABLE(of, exynos5_i2c_match);
@@ -331,6 +340,14 @@ static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, bool hs_timings)
* clk_cycle := TSCLK_L + TSCLK_H
* temp := (CLK_DIV + 1) * (clk_cycle + 2)
*
+ * In case of HSI2C controllers in Exynos8895
+ * FPCLK / FI2C =
+ * (CLK_DIV + 1) * (TSCLK_L + TSCLK_H + 2) +
+ * 2 * ((FLT_CYCLE + 3) - (FLT_CYCLE + 3) % (CLK_DIV + 1))
+ *
+ * clk_cycle := TSCLK_L + TSCLK_H
+ * temp := (FPCLK / FI2C) - (FLT_CYCLE + 3) * 2
+ *
* Constraints: 4 <= temp, 0 <= CLK_DIV < 256, 2 <= clk_cycle <= 510
*
* To split SCL clock into low, high periods appropriately, one
@@ -352,11 +369,19 @@ static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, bool hs_timings)
*
*/
t_ftl_cycle = (readl(i2c->regs + HSI2C_CONF) >> 16) & 0x7;
- temp = clkin / op_clk - 8 - t_ftl_cycle;
- if (i2c->variant->hw != I2C_TYPE_EXYNOS7)
- temp -= t_ftl_cycle;
+ if (i2c->variant->hw == I2C_TYPE_EXYNOS8895)
+ temp = clkin / op_clk - (t_ftl_cycle + 3) * 2;
+ else if (i2c->variant->hw == I2C_TYPE_EXYNOS7)
+ temp = clkin / op_clk - 8 - t_ftl_cycle;
+ else
+ temp = clkin / op_clk - 8 - (t_ftl_cycle * 2);
div = temp / 512;
- clk_cycle = temp / (div + 1) - 2;
+
+ if (i2c->variant->hw == I2C_TYPE_EXYNOS8895)
+ clk_cycle = (temp + ((t_ftl_cycle + 3) % (div + 1)) * 2) /
+ (div + 1) - 2;
+ else
+ clk_cycle = temp / (div + 1) - 2;
if (temp < 4 || div >= 256 || clk_cycle < 2) {
dev_err(i2c->dev, "%s clock set-up failed\n",
hs_timings ? "HS" : "FS");
@@ -491,6 +516,8 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
switch (i2c->variant->hw) {
case I2C_TYPE_EXYNOSAUTOV9:
fallthrough;
+ case I2C_TYPE_EXYNOS8895:
+ fallthrough;
case I2C_TYPE_EXYNOS7:
if (int_status & HSI2C_INT_TRANS_DONE) {
i2c->trans_done = 1;
@@ -1009,7 +1036,7 @@ static const struct dev_pm_ops exynos5_i2c_dev_pm_ops = {
static struct platform_driver exynos5_i2c_driver = {
.probe = exynos5_i2c_probe,
- .remove_new = exynos5_i2c_remove,
+ .remove = exynos5_i2c_remove,
.driver = {
.name = "exynos5-hsi2c",
.pm = pm_sleep_ptr(&exynos5_i2c_dev_pm_ops),
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index e0bd218e2f14..f4355b17bfbf 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -481,7 +481,7 @@ static struct platform_driver i2c_gpio_driver = {
.acpi_match_table = i2c_gpio_acpi_match,
},
.probe = i2c_gpio_probe,
- .remove_new = i2c_gpio_remove,
+ .remove = i2c_gpio_remove,
};
static int __init i2c_gpio_init(void)
diff --git a/drivers/i2c/busses/i2c-gxp.c b/drivers/i2c/busses/i2c-gxp.c
index efafc0528c44..0fc39caa6c87 100644
--- a/drivers/i2c/busses/i2c-gxp.c
+++ b/drivers/i2c/busses/i2c-gxp.c
@@ -595,7 +595,7 @@ MODULE_DEVICE_TABLE(of, gxp_i2c_of_match);
static struct platform_driver gxp_i2c_driver = {
.probe = gxp_i2c_probe,
- .remove_new = gxp_i2c_remove,
+ .remove = gxp_i2c_remove,
.driver = {
.name = "gxp-i2c",
.of_match_table = gxp_i2c_of_match,
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index ec1ebacb9aa8..78c5845e0877 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -454,7 +454,7 @@ static struct platform_driver highlander_i2c_driver = {
},
.probe = highlander_i2c_probe,
- .remove_new = highlander_i2c_remove,
+ .remove = highlander_i2c_remove,
};
module_platform_driver(highlander_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 64cade6ba923..370f32974763 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -508,7 +508,7 @@ MODULE_DEVICE_TABLE(of, hix5hd2_i2c_match);
static struct platform_driver hix5hd2_i2c_driver = {
.probe = hix5hd2_i2c_probe,
- .remove_new = hix5hd2_i2c_remove,
+ .remove = hix5hd2_i2c_remove,
.driver = {
.name = "hix5hd2-i2c",
.pm = pm_ptr(&hix5hd2_i2c_pm_ops),
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 299fe9d3afab..171d29d2770e 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -81,6 +81,8 @@
* Meteor Lake PCH-S (PCH) 0x7f23 32 hard yes yes yes
* Birch Stream (SOC) 0x5796 32 hard yes yes yes
* Arrow Lake-H (SOC) 0x7722 32 hard yes yes yes
+ * Panther Lake-H (SOC) 0xe322 32 hard yes yes yes
+ * Panther Lake-P (SOC) 0xe422 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -261,6 +263,8 @@
#define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323
#define PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS 0xa3a3
#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_SOC_S_SMBUS 0xae22
+#define PCI_DEVICE_ID_INTEL_PANTHER_LAKE_H_SMBUS 0xe322
+#define PCI_DEVICE_ID_INTEL_PANTHER_LAKE_P_SMBUS 0xe422
struct i801_mux_config {
char *gpio_chip;
@@ -1055,6 +1059,8 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, ARROW_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ 0, }
};
@@ -1156,127 +1162,6 @@ static void dmi_check_onboard_devices(const struct dmi_header *dm, void *adap)
}
}
-/* NOTE: Keep this list in sync with drivers/platform/x86/dell-smo8800.c */
-static const char *const acpi_smo8800_ids[] = {
- "SMO8800",
- "SMO8801",
- "SMO8810",
- "SMO8811",
- "SMO8820",
- "SMO8821",
- "SMO8830",
- "SMO8831",
-};
-
-static acpi_status check_acpi_smo88xx_device(acpi_handle obj_handle,
- u32 nesting_level,
- void *context,
- void **return_value)
-{
- struct acpi_device_info *info;
- acpi_status status;
- char *hid;
- int i;
-
- status = acpi_get_object_info(obj_handle, &info);
- if (ACPI_FAILURE(status))
- return AE_OK;
-
- if (!(info->valid & ACPI_VALID_HID))
- goto smo88xx_not_found;
-
- hid = info->hardware_id.string;
- if (!hid)
- goto smo88xx_not_found;
-
- i = match_string(acpi_smo8800_ids, ARRAY_SIZE(acpi_smo8800_ids), hid);
- if (i < 0)
- goto smo88xx_not_found;
-
- kfree(info);
-
- *return_value = NULL;
- return AE_CTRL_TERMINATE;
-
-smo88xx_not_found:
- kfree(info);
- return AE_OK;
-}
-
-static bool is_dell_system_with_lis3lv02d(void)
-{
- void *err = ERR_PTR(-ENOENT);
-
- if (!dmi_match(DMI_SYS_VENDOR, "Dell Inc."))
- return false;
-
- /*
- * Check that ACPI device SMO88xx is present and is functioning.
- * Function acpi_get_devices() already filters all ACPI devices
- * which are not present or are not functioning.
- * ACPI device SMO88xx represents our ST microelectronics lis3lv02d
- * accelerometer but unfortunately ACPI does not provide any other
- * information (like I2C address).
- */
- acpi_get_devices(NULL, check_acpi_smo88xx_device, NULL, &err);
-
- return !IS_ERR(err);
-}
-
-/*
- * Accelerometer's I2C address is not specified in DMI nor ACPI,
- * so it is needed to define mapping table based on DMI product names.
- */
-static const struct {
- const char *dmi_product_name;
- unsigned short i2c_addr;
-} dell_lis3lv02d_devices[] = {
- /*
- * Dell platform team told us that these Latitude devices have
- * ST microelectronics accelerometer at I2C address 0x29.
- */
- { "Latitude E5250", 0x29 },
- { "Latitude E5450", 0x29 },
- { "Latitude E5550", 0x29 },
- { "Latitude E6440", 0x29 },
- { "Latitude E6440 ATG", 0x29 },
- { "Latitude E6540", 0x29 },
- /*
- * Additional individual entries were added after verification.
- */
- { "Latitude 5480", 0x29 },
- { "Precision 3540", 0x29 },
- { "Vostro V131", 0x1d },
- { "Vostro 5568", 0x29 },
- { "XPS 15 7590", 0x29 },
-};
-
-static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv)
-{
- struct i2c_board_info info;
- const char *dmi_product_name;
- int i;
-
- dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
- for (i = 0; i < ARRAY_SIZE(dell_lis3lv02d_devices); ++i) {
- if (strcmp(dmi_product_name,
- dell_lis3lv02d_devices[i].dmi_product_name) == 0)
- break;
- }
-
- if (i == ARRAY_SIZE(dell_lis3lv02d_devices)) {
- dev_warn(&priv->pci_dev->dev,
- "Accelerometer lis3lv02d is present on SMBus but its"
- " address is unknown, skipping registration\n");
- return;
- }
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = dell_lis3lv02d_devices[i].i2c_addr;
- strscpy(info.type, "lis3lv02d", I2C_NAME_SIZE);
- i2c_new_client_device(&priv->adapter, &info);
-}
-
/* Register optional targets */
static void i801_probe_optional_targets(struct i801_priv *priv)
{
@@ -1296,9 +1181,6 @@ static void i801_probe_optional_targets(struct i801_priv *priv)
if (dmi_name_in_vendors("FUJITSU"))
dmi_walk(dmi_check_onboard_devices, &priv->adapter);
- if (is_dell_system_with_lis3lv02d())
- register_dell_lis3lv02d_i2c_device(priv);
-
/* Instantiate SPD EEPROMs unless the SMBus is multiplexed */
#ifdef CONFIG_I2C_I801_MUX
if (!priv->mux_pdev)
@@ -1676,13 +1558,16 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (!(priv->features & FEATURE_BLOCK_BUFFER))
priv->features &= ~FEATURE_BLOCK_PROC;
- err = pcim_enable_device(dev);
+ /*
+ * Do not call pcim_enable_device(), because the device has to remain
+ * enabled on driver detach. See i801_remove() for the reasoning.
+ */
+ err = pci_enable_device(dev);
if (err) {
dev_err(&dev->dev, "Failed to enable SMBus PCI device (%d)\n",
err);
return err;
}
- pcim_pin_device(dev);
/* Determine the address of the SMBus area */
priv->smba = pci_resource_start(dev, SMBBAR);
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 82dedb1bb5be..c76c4116ddc7 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -790,7 +790,7 @@ static struct platform_driver ibm_iic_driver = {
.of_match_table = ibm_iic_match,
},
.probe = iic_probe,
- .remove_new = iic_remove,
+ .remove = iic_remove,
};
module_platform_driver(ibm_iic_driver);
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index e0e87185f6bb..02f75cf310aa 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -1497,7 +1497,7 @@ static struct platform_driver img_scb_i2c_driver = {
.pm = pm_ptr(&img_i2c_pm),
},
.probe = img_i2c_probe,
- .remove_new = img_i2c_remove,
+ .remove = img_i2c_remove,
};
module_platform_driver(img_scb_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 976d43f73f38..0d4b3935e687 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -8,6 +8,8 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/i2c.h>
@@ -29,6 +31,7 @@
#define LPI2C_MCR 0x10 /* i2c contrl register */
#define LPI2C_MSR 0x14 /* i2c status register */
#define LPI2C_MIER 0x18 /* i2c interrupt enable */
+#define LPI2C_MDER 0x1C /* i2c DMA enable */
#define LPI2C_MCFGR0 0x20 /* i2c master configuration */
#define LPI2C_MCFGR1 0x24 /* i2c master configuration */
#define LPI2C_MCFGR2 0x28 /* i2c master configuration */
@@ -40,6 +43,20 @@
#define LPI2C_MTDR 0x60 /* i2c master TX data register */
#define LPI2C_MRDR 0x70 /* i2c master RX data register */
+#define LPI2C_SCR 0x110 /* i2c target control register */
+#define LPI2C_SSR 0x114 /* i2c target status register */
+#define LPI2C_SIER 0x118 /* i2c target interrupt enable */
+#define LPI2C_SDER 0x11C /* i2c target DMA enable */
+#define LPI2C_SCFGR0 0x120 /* i2c target configuration */
+#define LPI2C_SCFGR1 0x124 /* i2c target configuration */
+#define LPI2C_SCFGR2 0x128 /* i2c target configuration */
+#define LPI2C_SAMR 0x140 /* i2c target address match */
+#define LPI2C_SASR 0x150 /* i2c target address status */
+#define LPI2C_STAR 0x154 /* i2c target transmit ACK */
+#define LPI2C_STDR 0x160 /* i2c target transmit data */
+#define LPI2C_SRDR 0x170 /* i2c target receive data */
+#define LPI2C_SRDROR 0x178 /* i2c target receive data read only */
+
/* i2c command */
#define TRAN_DATA 0X00
#define RECV_DATA 0X01
@@ -70,11 +87,50 @@
#define MCFGR1_AUTOSTOP BIT(8)
#define MCFGR1_IGNACK BIT(9)
#define MRDR_RXEMPTY BIT(14)
+#define MDER_TDDE BIT(0)
+#define MDER_RDDE BIT(1)
+
+#define SCR_SEN BIT(0)
+#define SCR_RST BIT(1)
+#define SCR_FILTEN BIT(4)
+#define SCR_RTF BIT(8)
+#define SCR_RRF BIT(9)
+#define SSR_TDF BIT(0)
+#define SSR_RDF BIT(1)
+#define SSR_AVF BIT(2)
+#define SSR_TAF BIT(3)
+#define SSR_RSF BIT(8)
+#define SSR_SDF BIT(9)
+#define SSR_BEF BIT(10)
+#define SSR_FEF BIT(11)
+#define SSR_SBF BIT(24)
+#define SSR_BBF BIT(25)
+#define SSR_CLEAR_BITS (SSR_RSF | SSR_SDF | SSR_BEF | SSR_FEF)
+#define SIER_TDIE BIT(0)
+#define SIER_RDIE BIT(1)
+#define SIER_AVIE BIT(2)
+#define SIER_TAIE BIT(3)
+#define SIER_RSIE BIT(8)
+#define SIER_SDIE BIT(9)
+#define SIER_BEIE BIT(10)
+#define SIER_FEIE BIT(11)
+#define SIER_AM0F BIT(12)
+#define SCFGR1_RXSTALL BIT(1)
+#define SCFGR1_TXDSTALL BIT(2)
+#define SCFGR2_FILTSDA_SHIFT 24
+#define SCFGR2_FILTSCL_SHIFT 16
+#define SCFGR2_CLKHOLD(x) (x)
+#define SCFGR2_FILTSDA(x) ((x) << SCFGR2_FILTSDA_SHIFT)
+#define SCFGR2_FILTSCL(x) ((x) << SCFGR2_FILTSCL_SHIFT)
+#define SASR_READ_REQ 0x1
+#define SLAVE_INT_FLAG (SIER_TDIE | SIER_RDIE | SIER_AVIE | \
+ SIER_SDIE | SIER_BEIE)
#define I2C_CLK_RATIO 2
#define CHUNK_DATA 256
#define I2C_PM_TIMEOUT 10 /* ms */
+#define I2C_DMA_THRESHOLD 8 /* bytes */
enum lpi2c_imx_mode {
STANDARD, /* 100+Kbps */
@@ -91,6 +147,24 @@ enum lpi2c_imx_pincfg {
FOUR_PIN_PP,
};
+struct lpi2c_imx_dma {
+ bool using_pio_mode;
+ u8 rx_cmd_buf_len;
+ u8 *dma_buf;
+ u16 *rx_cmd_buf;
+ unsigned int dma_len;
+ unsigned int tx_burst_num;
+ unsigned int rx_burst_num;
+ unsigned long dma_msg_flag;
+ resource_size_t phy_addr;
+ dma_addr_t dma_tx_addr;
+ dma_addr_t dma_addr;
+ enum dma_data_direction dma_data_dir;
+ enum dma_transfer_direction dma_transfer_dir;
+ struct dma_chan *chan_tx;
+ struct dma_chan *chan_rx;
+};
+
struct lpi2c_imx_struct {
struct i2c_adapter adapter;
int num_clks;
@@ -108,6 +182,9 @@ struct lpi2c_imx_struct {
unsigned int rxfifosize;
enum lpi2c_imx_mode mode;
struct i2c_bus_recovery_info rinfo;
+ bool can_use_dma;
+ struct lpi2c_imx_dma *dma;
+ struct i2c_client *target;
};
static void lpi2c_imx_intctrl(struct lpi2c_imx_struct *lpi2c_imx,
@@ -305,7 +382,7 @@ static int lpi2c_imx_master_disable(struct lpi2c_imx_struct *lpi2c_imx)
return 0;
}
-static int lpi2c_imx_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
+static int lpi2c_imx_pio_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
{
unsigned long time_left;
@@ -451,6 +528,425 @@ static void lpi2c_imx_read(struct lpi2c_imx_struct *lpi2c_imx,
lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE | MIER_NDIE);
}
+static bool is_use_dma(struct lpi2c_imx_struct *lpi2c_imx, struct i2c_msg *msg)
+{
+ if (!lpi2c_imx->can_use_dma)
+ return false;
+
+ /*
+ * When the length of data is less than I2C_DMA_THRESHOLD,
+ * cpu mode is used directly to avoid low performance.
+ */
+ return !(msg->len < I2C_DMA_THRESHOLD);
+}
+
+static int lpi2c_imx_pio_xfer(struct lpi2c_imx_struct *lpi2c_imx,
+ struct i2c_msg *msg)
+{
+ reinit_completion(&lpi2c_imx->complete);
+
+ if (msg->flags & I2C_M_RD)
+ lpi2c_imx_read(lpi2c_imx, msg);
+ else
+ lpi2c_imx_write(lpi2c_imx, msg);
+
+ return lpi2c_imx_pio_msg_complete(lpi2c_imx);
+}
+
+static int lpi2c_imx_dma_timeout_calculate(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned long time = 0;
+
+ time = 8 * lpi2c_imx->dma->dma_len * 1000 / lpi2c_imx->bitrate;
+
+ /* Add extra second for scheduler related activities */
+ time += 1;
+
+ /* Double calculated time */
+ return msecs_to_jiffies(time * MSEC_PER_SEC);
+}
+
+static int lpi2c_imx_alloc_rx_cmd_buf(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ u16 rx_remain = dma->dma_len;
+ int cmd_num;
+ u16 temp;
+
+ /*
+ * Calculate the number of rx command words via the DMA TX channel
+ * writing into command register based on the i2c msg len, and build
+ * the rx command words buffer.
+ */
+ cmd_num = DIV_ROUND_UP(rx_remain, CHUNK_DATA);
+ dma->rx_cmd_buf = kcalloc(cmd_num, sizeof(u16), GFP_KERNEL);
+ dma->rx_cmd_buf_len = cmd_num * sizeof(u16);
+
+ if (!dma->rx_cmd_buf) {
+ dev_err(&lpi2c_imx->adapter.dev, "Alloc RX cmd buffer failed\n");
+ return -ENOMEM;
+ }
+
+ for (int i = 0; i < cmd_num ; i++) {
+ temp = rx_remain > CHUNK_DATA ? CHUNK_DATA - 1 : rx_remain - 1;
+ temp |= (RECV_DATA << 8);
+ rx_remain -= CHUNK_DATA;
+ dma->rx_cmd_buf[i] = temp;
+ }
+
+ return 0;
+}
+
+static int lpi2c_imx_dma_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ unsigned long time_left, time;
+
+ time = lpi2c_imx_dma_timeout_calculate(lpi2c_imx);
+ time_left = wait_for_completion_timeout(&lpi2c_imx->complete, time);
+ if (time_left == 0) {
+ dev_err(&lpi2c_imx->adapter.dev, "I/O Error in DMA Data Transfer\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void lpi2c_dma_unmap(struct lpi2c_imx_dma *dma)
+{
+ struct dma_chan *chan = dma->dma_data_dir == DMA_FROM_DEVICE
+ ? dma->chan_rx : dma->chan_tx;
+
+ dma_unmap_single(chan->device->dev, dma->dma_addr,
+ dma->dma_len, dma->dma_data_dir);
+
+ dma->dma_data_dir = DMA_NONE;
+}
+
+static void lpi2c_cleanup_rx_cmd_dma(struct lpi2c_imx_dma *dma)
+{
+ dmaengine_terminate_sync(dma->chan_tx);
+ dma_unmap_single(dma->chan_tx->device->dev, dma->dma_tx_addr,
+ dma->rx_cmd_buf_len, DMA_TO_DEVICE);
+}
+
+static void lpi2c_cleanup_dma(struct lpi2c_imx_dma *dma)
+{
+ if (dma->dma_data_dir == DMA_FROM_DEVICE)
+ dmaengine_terminate_sync(dma->chan_rx);
+ else if (dma->dma_data_dir == DMA_TO_DEVICE)
+ dmaengine_terminate_sync(dma->chan_tx);
+
+ lpi2c_dma_unmap(dma);
+}
+
+static void lpi2c_dma_callback(void *data)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = (struct lpi2c_imx_struct *)data;
+
+ complete(&lpi2c_imx->complete);
+}
+
+static int lpi2c_dma_rx_cmd_submit(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct dma_async_tx_descriptor *rx_cmd_desc;
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ struct dma_chan *txchan = dma->chan_tx;
+ dma_cookie_t cookie;
+
+ dma->dma_tx_addr = dma_map_single(txchan->device->dev,
+ dma->rx_cmd_buf, dma->rx_cmd_buf_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(txchan->device->dev, dma->dma_tx_addr)) {
+ dev_err(&lpi2c_imx->adapter.dev, "DMA map failed, use pio\n");
+ return -EINVAL;
+ }
+
+ rx_cmd_desc = dmaengine_prep_slave_single(txchan, dma->dma_tx_addr,
+ dma->rx_cmd_buf_len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rx_cmd_desc) {
+ dev_err(&lpi2c_imx->adapter.dev, "DMA prep slave sg failed, use pio\n");
+ goto desc_prepare_err_exit;
+ }
+
+ cookie = dmaengine_submit(rx_cmd_desc);
+ if (dma_submit_error(cookie)) {
+ dev_err(&lpi2c_imx->adapter.dev, "submitting DMA failed, use pio\n");
+ goto submit_err_exit;
+ }
+
+ dma_async_issue_pending(txchan);
+
+ return 0;
+
+desc_prepare_err_exit:
+ dma_unmap_single(txchan->device->dev, dma->dma_tx_addr,
+ dma->rx_cmd_buf_len, DMA_TO_DEVICE);
+ return -EINVAL;
+
+submit_err_exit:
+ dma_unmap_single(txchan->device->dev, dma->dma_tx_addr,
+ dma->rx_cmd_buf_len, DMA_TO_DEVICE);
+ dmaengine_desc_free(rx_cmd_desc);
+ return -EINVAL;
+}
+
+static int lpi2c_dma_submit(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *chan;
+ dma_cookie_t cookie;
+
+ if (dma->dma_msg_flag & I2C_M_RD) {
+ chan = dma->chan_rx;
+ dma->dma_data_dir = DMA_FROM_DEVICE;
+ dma->dma_transfer_dir = DMA_DEV_TO_MEM;
+ } else {
+ chan = dma->chan_tx;
+ dma->dma_data_dir = DMA_TO_DEVICE;
+ dma->dma_transfer_dir = DMA_MEM_TO_DEV;
+ }
+
+ dma->dma_addr = dma_map_single(chan->device->dev,
+ dma->dma_buf, dma->dma_len, dma->dma_data_dir);
+ if (dma_mapping_error(chan->device->dev, dma->dma_addr)) {
+ dev_err(&lpi2c_imx->adapter.dev, "DMA map failed, use pio\n");
+ return -EINVAL;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, dma->dma_addr,
+ dma->dma_len, dma->dma_transfer_dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(&lpi2c_imx->adapter.dev, "DMA prep slave sg failed, use pio\n");
+ goto desc_prepare_err_exit;
+ }
+
+ reinit_completion(&lpi2c_imx->complete);
+ desc->callback = lpi2c_dma_callback;
+ desc->callback_param = lpi2c_imx;
+
+ cookie = dmaengine_submit(desc);
+ if (dma_submit_error(cookie)) {
+ dev_err(&lpi2c_imx->adapter.dev, "submitting DMA failed, use pio\n");
+ goto submit_err_exit;
+ }
+
+ /* Can't switch to PIO mode when DMA have started transfer */
+ dma->using_pio_mode = false;
+
+ dma_async_issue_pending(chan);
+
+ return 0;
+
+desc_prepare_err_exit:
+ lpi2c_dma_unmap(dma);
+ return -EINVAL;
+
+submit_err_exit:
+ lpi2c_dma_unmap(dma);
+ dmaengine_desc_free(desc);
+ return -EINVAL;
+}
+
+static int lpi2c_imx_find_max_burst_num(unsigned int fifosize, unsigned int len)
+{
+ unsigned int i;
+
+ for (i = fifosize / 2; i > 0; i--)
+ if (!(len % i))
+ break;
+
+ return i;
+}
+
+/*
+ * For a highest DMA efficiency, tx/rx burst number should be calculated according
+ * to the FIFO depth.
+ */
+static void lpi2c_imx_dma_burst_num_calculate(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ unsigned int cmd_num;
+
+ if (dma->dma_msg_flag & I2C_M_RD) {
+ /*
+ * One RX cmd word can trigger DMA receive no more than 256 bytes.
+ * The number of RX cmd words should be calculated based on the data
+ * length.
+ */
+ cmd_num = DIV_ROUND_UP(dma->dma_len, CHUNK_DATA);
+ dma->tx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->txfifosize,
+ cmd_num);
+ dma->rx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->rxfifosize,
+ dma->dma_len);
+ } else {
+ dma->tx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->txfifosize,
+ dma->dma_len);
+ }
+}
+
+static int lpi2c_dma_config(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ struct dma_slave_config rx = {}, tx = {};
+ int ret;
+
+ lpi2c_imx_dma_burst_num_calculate(lpi2c_imx);
+
+ if (dma->dma_msg_flag & I2C_M_RD) {
+ tx.dst_addr = dma->phy_addr + LPI2C_MTDR;
+ tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ tx.dst_maxburst = dma->tx_burst_num;
+ tx.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(dma->chan_tx, &tx);
+ if (ret < 0)
+ return ret;
+
+ rx.src_addr = dma->phy_addr + LPI2C_MRDR;
+ rx.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ rx.src_maxburst = dma->rx_burst_num;
+ rx.direction = DMA_DEV_TO_MEM;
+ ret = dmaengine_slave_config(dma->chan_rx, &rx);
+ if (ret < 0)
+ return ret;
+ } else {
+ tx.dst_addr = dma->phy_addr + LPI2C_MTDR;
+ tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ tx.dst_maxburst = dma->tx_burst_num;
+ tx.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(dma->chan_tx, &tx);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void lpi2c_dma_enable(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ /*
+ * TX interrupt will be triggered when the number of words in
+ * the transmit FIFO is equal or less than TX watermark.
+ * RX interrupt will be triggered when the number of words in
+ * the receive FIFO is greater than RX watermark.
+ * In order to trigger the DMA interrupt, TX watermark should be
+ * set equal to the DMA TX burst number but RX watermark should
+ * be set less than the DMA RX burst number.
+ */
+ if (dma->dma_msg_flag & I2C_M_RD) {
+ /* Set I2C TX/RX watermark */
+ writel(dma->tx_burst_num | (dma->rx_burst_num - 1) << 16,
+ lpi2c_imx->base + LPI2C_MFCR);
+ /* Enable I2C DMA TX/RX function */
+ writel(MDER_TDDE | MDER_RDDE, lpi2c_imx->base + LPI2C_MDER);
+ } else {
+ /* Set I2C TX watermark */
+ writel(dma->tx_burst_num, lpi2c_imx->base + LPI2C_MFCR);
+ /* Enable I2C DMA TX function */
+ writel(MDER_TDDE, lpi2c_imx->base + LPI2C_MDER);
+ }
+
+ /* Enable NACK detected */
+ lpi2c_imx_intctrl(lpi2c_imx, MIER_NDIE);
+};
+
+/*
+ * When lpi2c is in TX DMA mode we can use one DMA TX channel to write
+ * data word into TXFIFO, but in RX DMA mode it is different.
+ *
+ * The LPI2C MTDR register is a command data and transmit data register.
+ * Bits 8-10 are the command data field and Bits 0-7 are the transmit
+ * data field. When the LPI2C master needs to read data, the number of
+ * bytes to read should be set in the command field and RECV_DATA should
+ * be set into the command data field to receive (DATA[7:0] + 1) bytes.
+ * The recv data command word is made of RECV_DATA in the command data
+ * field and the number of bytes to read in transmit data field. When the
+ * length of data to be read exceeds 256 bytes, recv data command word
+ * needs to be written to TXFIFO multiple times.
+ *
+ * So when in RX DMA mode, the TX channel also must to be configured to
+ * send RX command words and the RX command word must be set in advance
+ * before transmitting.
+ */
+static int lpi2c_imx_dma_xfer(struct lpi2c_imx_struct *lpi2c_imx,
+ struct i2c_msg *msg)
+{
+ struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
+ int ret;
+
+ /* When DMA mode fails before transferring, CPU mode can be used. */
+ dma->using_pio_mode = true;
+
+ dma->dma_len = msg->len;
+ dma->dma_msg_flag = msg->flags;
+ dma->dma_buf = i2c_get_dma_safe_msg_buf(msg, I2C_DMA_THRESHOLD);
+ if (!dma->dma_buf)
+ return -ENOMEM;
+
+ ret = lpi2c_dma_config(lpi2c_imx);
+ if (ret) {
+ dev_err(&lpi2c_imx->adapter.dev, "Failed to configure DMA (%d)\n", ret);
+ goto disable_dma;
+ }
+
+ lpi2c_dma_enable(lpi2c_imx);
+
+ ret = lpi2c_dma_submit(lpi2c_imx);
+ if (ret) {
+ dev_err(&lpi2c_imx->adapter.dev, "DMA submission failed (%d)\n", ret);
+ goto disable_dma;
+ }
+
+ if (dma->dma_msg_flag & I2C_M_RD) {
+ ret = lpi2c_imx_alloc_rx_cmd_buf(lpi2c_imx);
+ if (ret)
+ goto disable_cleanup_data_dma;
+
+ ret = lpi2c_dma_rx_cmd_submit(lpi2c_imx);
+ if (ret)
+ goto disable_cleanup_data_dma;
+ }
+
+ ret = lpi2c_imx_dma_msg_complete(lpi2c_imx);
+ if (ret)
+ goto disable_cleanup_all_dma;
+
+ /* When encountering NACK in transfer, clean up all DMA transfers */
+ if ((readl(lpi2c_imx->base + LPI2C_MSR) & MSR_NDF) && !ret) {
+ ret = -EIO;
+ goto disable_cleanup_all_dma;
+ }
+
+ if (dma->dma_msg_flag & I2C_M_RD)
+ dma_unmap_single(dma->chan_tx->device->dev, dma->dma_tx_addr,
+ dma->rx_cmd_buf_len, DMA_TO_DEVICE);
+ lpi2c_dma_unmap(dma);
+
+ goto disable_dma;
+
+disable_cleanup_all_dma:
+ if (dma->dma_msg_flag & I2C_M_RD)
+ lpi2c_cleanup_rx_cmd_dma(dma);
+disable_cleanup_data_dma:
+ lpi2c_cleanup_dma(dma);
+disable_dma:
+ /* Disable I2C DMA function */
+ writel(0, lpi2c_imx->base + LPI2C_MDER);
+
+ if (dma->dma_msg_flag & I2C_M_RD)
+ kfree(dma->rx_cmd_buf);
+
+ if (ret)
+ i2c_put_dma_safe_msg_buf(dma->dma_buf, msg, false);
+ else
+ i2c_put_dma_safe_msg_buf(dma->dma_buf, msg, true);
+
+ return ret;
+}
+
static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs, int num)
{
@@ -477,12 +973,14 @@ static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
lpi2c_imx->msglen = msgs[i].len;
init_completion(&lpi2c_imx->complete);
- if (msgs[i].flags & I2C_M_RD)
- lpi2c_imx_read(lpi2c_imx, &msgs[i]);
- else
- lpi2c_imx_write(lpi2c_imx, &msgs[i]);
+ if (is_use_dma(lpi2c_imx, &msgs[i])) {
+ result = lpi2c_imx_dma_xfer(lpi2c_imx, &msgs[i]);
+ if (result && lpi2c_imx->dma->using_pio_mode)
+ result = lpi2c_imx_pio_xfer(lpi2c_imx, &msgs[i]);
+ } else {
+ result = lpi2c_imx_pio_xfer(lpi2c_imx, &msgs[i]);
+ }
- result = lpi2c_imx_msg_complete(lpi2c_imx);
if (result)
goto stop;
@@ -510,9 +1008,56 @@ disable:
return (result < 0) ? result : num;
}
-static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
+static irqreturn_t lpi2c_imx_target_isr(struct lpi2c_imx_struct *lpi2c_imx,
+ u32 ssr, u32 sier_filter)
+{
+ u8 value;
+ u32 sasr;
+
+ /* Arbitration lost */
+ if (sier_filter & SSR_BEF) {
+ writel(0, lpi2c_imx->base + LPI2C_SIER);
+ return IRQ_HANDLED;
+ }
+
+ /* Address detected */
+ if (sier_filter & SSR_AVF) {
+ sasr = readl(lpi2c_imx->base + LPI2C_SASR);
+ if (SASR_READ_REQ & sasr) {
+ /* Read request */
+ i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_READ_REQUESTED, &value);
+ writel(value, lpi2c_imx->base + LPI2C_STDR);
+ goto ret;
+ } else {
+ /* Write request */
+ i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_WRITE_REQUESTED, &value);
+ }
+ }
+
+ if (sier_filter & SSR_SDF)
+ /* STOP */
+ i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_STOP, &value);
+
+ if (sier_filter & SSR_TDF) {
+ /* Target send data */
+ i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_READ_PROCESSED, &value);
+ writel(value, lpi2c_imx->base + LPI2C_STDR);
+ }
+
+ if (sier_filter & SSR_RDF) {
+ /* Target receive data */
+ value = readl(lpi2c_imx->base + LPI2C_SRDR);
+ i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_WRITE_RECEIVED, &value);
+ }
+
+ret:
+ /* Clear SSR */
+ writel(ssr & SSR_CLEAR_BITS, lpi2c_imx->base + LPI2C_SSR);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lpi2c_imx_master_isr(struct lpi2c_imx_struct *lpi2c_imx)
{
- struct lpi2c_imx_struct *lpi2c_imx = dev_id;
unsigned int enabled;
unsigned int temp;
@@ -532,6 +1077,124 @@ static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = dev_id;
+
+ if (lpi2c_imx->target) {
+ u32 scr = readl(lpi2c_imx->base + LPI2C_SCR);
+ u32 ssr = readl(lpi2c_imx->base + LPI2C_SSR);
+ u32 sier_filter = ssr & readl(lpi2c_imx->base + LPI2C_SIER);
+
+ /*
+ * The target is enabled and an interrupt has been triggered.
+ * Enter the target's irq handler.
+ */
+ if ((scr & SCR_SEN) && sier_filter)
+ return lpi2c_imx_target_isr(lpi2c_imx, ssr, sier_filter);
+ }
+
+ /*
+ * Otherwise the interrupt has been triggered by the master.
+ * Enter the master's irq handler.
+ */
+ return lpi2c_imx_master_isr(lpi2c_imx);
+}
+
+static void lpi2c_imx_target_init(struct lpi2c_imx_struct *lpi2c_imx)
+{
+ u32 temp;
+
+ /* reset target module */
+ writel(SCR_RST, lpi2c_imx->base + LPI2C_SCR);
+ writel(0, lpi2c_imx->base + LPI2C_SCR);
+
+ /* Set target address */
+ writel((lpi2c_imx->target->addr << 1), lpi2c_imx->base + LPI2C_SAMR);
+
+ writel(SCFGR1_RXSTALL | SCFGR1_TXDSTALL, lpi2c_imx->base + LPI2C_SCFGR1);
+
+ /*
+ * set SCFGR2: FILTSDA, FILTSCL and CLKHOLD
+ *
+ * FILTSCL/FILTSDA can eliminate signal skew. It should generally be
+ * set to the same value and should be set >= 50ns.
+ *
+ * CLKHOLD is only used when clock stretching is enabled, but it will
+ * extend the clock stretching to ensure there is an additional delay
+ * between the target driving SDA and the target releasing the SCL pin.
+ *
+ * CLKHOLD setting is crucial for lpi2c target. When master read data
+ * from target, if there is a delay caused by cpu idle, excessive load,
+ * or other delays between two bytes in one message transmission, it
+ * will cause a short interval time between the driving SDA signal and
+ * releasing SCL signal. The lpi2c master will mistakenly think it is a stop
+ * signal resulting in an arbitration failure. This issue can be avoided
+ * by setting CLKHOLD.
+ *
+ * In order to ensure lpi2c function normally when the lpi2c speed is as
+ * low as 100kHz, CLKHOLD should be set to 3 and it is also compatible with
+ * higher clock frequency like 400kHz and 1MHz.
+ */
+ temp = SCFGR2_FILTSDA(2) | SCFGR2_FILTSCL(2) | SCFGR2_CLKHOLD(3);
+ writel(temp, lpi2c_imx->base + LPI2C_SCFGR2);
+
+ /*
+ * Enable module:
+ * SCR_FILTEN can enable digital filter and output delay counter for LPI2C
+ * target mode. So SCR_FILTEN need be asserted when enable SDA/SCL FILTER
+ * and CLKHOLD.
+ */
+ writel(SCR_SEN | SCR_FILTEN, lpi2c_imx->base + LPI2C_SCR);
+
+ /* Enable interrupt from i2c module */
+ writel(SLAVE_INT_FLAG, lpi2c_imx->base + LPI2C_SIER);
+}
+
+static int lpi2c_imx_register_target(struct i2c_client *client)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(client->adapter);
+ int ret;
+
+ if (lpi2c_imx->target)
+ return -EBUSY;
+
+ lpi2c_imx->target = client;
+
+ ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
+ if (ret < 0) {
+ dev_err(&lpi2c_imx->adapter.dev, "failed to resume i2c controller");
+ return ret;
+ }
+
+ lpi2c_imx_target_init(lpi2c_imx);
+
+ return 0;
+}
+
+static int lpi2c_imx_unregister_target(struct i2c_client *client)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(client->adapter);
+ int ret;
+
+ if (!lpi2c_imx->target)
+ return -EINVAL;
+
+ /* Reset target address. */
+ writel(0, lpi2c_imx->base + LPI2C_SAMR);
+
+ writel(SCR_RST, lpi2c_imx->base + LPI2C_SCR);
+ writel(0, lpi2c_imx->base + LPI2C_SCR);
+
+ lpi2c_imx->target = NULL;
+
+ ret = pm_runtime_put_sync(lpi2c_imx->adapter.dev.parent);
+ if (ret < 0)
+ dev_err(&lpi2c_imx->adapter.dev, "failed to suspend i2c controller");
+
+ return ret;
+}
+
static int lpi2c_imx_init_recovery_info(struct lpi2c_imx_struct *lpi2c_imx,
struct platform_device *pdev)
{
@@ -546,6 +1209,58 @@ static int lpi2c_imx_init_recovery_info(struct lpi2c_imx_struct *lpi2c_imx,
return 0;
}
+static void dma_exit(struct device *dev, struct lpi2c_imx_dma *dma)
+{
+ if (dma->chan_rx)
+ dma_release_channel(dma->chan_rx);
+
+ if (dma->chan_tx)
+ dma_release_channel(dma->chan_tx);
+
+ devm_kfree(dev, dma);
+}
+
+static int lpi2c_dma_init(struct device *dev, dma_addr_t phy_addr)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
+ struct lpi2c_imx_dma *dma;
+ int ret;
+
+ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
+
+ dma->phy_addr = phy_addr;
+
+ /* Prepare for TX DMA: */
+ dma->chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(dma->chan_tx)) {
+ ret = PTR_ERR(dma->chan_tx);
+ if (ret != -ENODEV && ret != -EPROBE_DEFER)
+ dev_err(dev, "can't request DMA tx channel (%d)\n", ret);
+ dma->chan_tx = NULL;
+ goto dma_exit;
+ }
+
+ /* Prepare for RX DMA: */
+ dma->chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(dma->chan_rx)) {
+ ret = PTR_ERR(dma->chan_rx);
+ if (ret != -ENODEV && ret != -EPROBE_DEFER)
+ dev_err(dev, "can't request DMA rx channel (%d)\n", ret);
+ dma->chan_rx = NULL;
+ goto dma_exit;
+ }
+
+ lpi2c_imx->can_use_dma = true;
+ lpi2c_imx->dma = dma;
+ return 0;
+
+dma_exit:
+ dma_exit(dev, dma);
+ return ret;
+}
+
static u32 lpi2c_imx_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
@@ -555,6 +1270,8 @@ static u32 lpi2c_imx_func(struct i2c_adapter *adapter)
static const struct i2c_algorithm lpi2c_imx_algo = {
.master_xfer = lpi2c_imx_xfer,
.functionality = lpi2c_imx_func,
+ .reg_target = lpi2c_imx_register_target,
+ .unreg_target = lpi2c_imx_unregister_target,
};
static const struct of_device_id lpi2c_imx_of_match[] = {
@@ -566,6 +1283,8 @@ MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
static int lpi2c_imx_probe(struct platform_device *pdev)
{
struct lpi2c_imx_struct *lpi2c_imx;
+ struct resource *res;
+ dma_addr_t phy_addr;
unsigned int temp;
int irq, ret;
@@ -573,7 +1292,7 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
if (!lpi2c_imx)
return -ENOMEM;
- lpi2c_imx->base = devm_platform_ioremap_resource(pdev, 0);
+ lpi2c_imx->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(lpi2c_imx->base))
return PTR_ERR(lpi2c_imx->base);
@@ -587,6 +1306,7 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
lpi2c_imx->adapter.dev.of_node = pdev->dev.of_node;
strscpy(lpi2c_imx->adapter.name, pdev->name,
sizeof(lpi2c_imx->adapter.name));
+ phy_addr = (dma_addr_t)res->start;
ret = devm_clk_bulk_get_all(&pdev->dev, &lpi2c_imx->clks);
if (ret < 0)
@@ -598,7 +1318,7 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
if (ret)
lpi2c_imx->bitrate = I2C_MAX_STANDARD_MODE_FREQ;
- ret = devm_request_irq(&pdev->dev, irq, lpi2c_imx_isr, 0,
+ ret = devm_request_irq(&pdev->dev, irq, lpi2c_imx_isr, IRQF_NO_SUSPEND,
pdev->name, lpi2c_imx);
if (ret)
return dev_err_probe(&pdev->dev, ret, "can't claim irq %d\n", irq);
@@ -640,6 +1360,14 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
if (ret == -EPROBE_DEFER)
goto rpm_disable;
+ /* Init DMA */
+ ret = lpi2c_dma_init(&pdev->dev, phy_addr);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ goto rpm_disable;
+ dev_info(&pdev->dev, "use pio mode\n");
+ }
+
ret = i2c_add_adapter(&lpi2c_imx->adapter);
if (ret)
goto rpm_disable;
@@ -694,16 +1422,75 @@ static int __maybe_unused lpi2c_runtime_resume(struct device *dev)
return 0;
}
+static int __maybe_unused lpi2c_suspend_noirq(struct device *dev)
+{
+ return pm_runtime_force_suspend(dev);
+}
+
+static int __maybe_unused lpi2c_resume_noirq(struct device *dev)
+{
+ struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * If the I2C module powers down during system suspend,
+ * the register values will be lost. Therefore, reinitialize
+ * the target when the system resumes.
+ */
+ if (lpi2c_imx->target)
+ lpi2c_imx_target_init(lpi2c_imx);
+
+ return 0;
+}
+
+static int lpi2c_suspend(struct device *dev)
+{
+ /*
+ * Some I2C devices may need the I2C controller to remain active
+ * during resume_noirq() or suspend_noirq(). If the controller is
+ * autosuspended, there is no way to wake it up once runtime PM is
+ * disabled (in suspend_late()).
+ *
+ * During system resume, the I2C controller will be available only
+ * after runtime PM is re-enabled (in resume_early()). However, this
+ * may be too late for some devices.
+ *
+ * Wake up the controller in the suspend() callback while runtime PM
+ * is still enabled. The I2C controller will remain available until
+ * the suspend_noirq() callback (pm_runtime_force_suspend()) is
+ * called. During resume, the I2C controller can be restored by the
+ * resume_noirq() callback (pm_runtime_force_resume()).
+ *
+ * Finally, the resume() callback re-enables autosuspend, ensuring
+ * the I2C controller remains available until the system enters
+ * suspend_noirq() and from resume_noirq().
+ */
+ return pm_runtime_resume_and_get(dev);
+}
+
+static int lpi2c_resume(struct device *dev)
+{
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
static const struct dev_pm_ops lpi2c_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(lpi2c_suspend_noirq,
+ lpi2c_resume_noirq)
+ SYSTEM_SLEEP_PM_OPS(lpi2c_suspend, lpi2c_resume)
SET_RUNTIME_PM_OPS(lpi2c_runtime_suspend,
lpi2c_runtime_resume, NULL)
};
static struct platform_driver lpi2c_imx_driver = {
.probe = lpi2c_imx_probe,
- .remove_new = lpi2c_imx_remove,
+ .remove = lpi2c_imx_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = lpi2c_imx_of_match,
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 98539313cbc9..ee0d25b498cb 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -17,7 +17,7 @@
* Copyright (C) 2008 Darius Augulis <darius.augulis at teltonika.lt>
*
* Copyright 2013 Freescale Semiconductor, Inc.
- * Copyright 2020 NXP
+ * Copyright 2020, 2024 NXP
*
*/
@@ -84,6 +84,7 @@
#define IMX_I2C_REGSHIFT 2
#define VF610_I2C_REGSHIFT 0
+#define S32G_I2C_REGSHIFT 0
/* Bits of IMX I2C registers */
#define I2SR_RXAK 0x01
@@ -165,9 +166,34 @@ static struct imx_i2c_clk_pair vf610_i2c_clk_div[] = {
{ 3840, 0x3F }, { 4096, 0x7B }, { 5120, 0x7D }, { 6144, 0x7E },
};
+/* S32G2/S32G3 clock divider, register value pairs */
+static struct imx_i2c_clk_pair s32g2_i2c_clk_div[] = {
+ { 34, 0x00 }, { 36, 0x01 }, { 38, 0x02 }, { 40, 0x03 },
+ { 42, 0x04 }, { 44, 0x05 }, { 46, 0x06 }, { 48, 0x09 },
+ { 52, 0x0A }, { 54, 0x07 }, { 56, 0x0B }, { 60, 0x0C },
+ { 64, 0x0D }, { 68, 0x40 }, { 72, 0x0E }, { 76, 0x42 },
+ { 80, 0x12 }, { 84, 0x0F }, { 88, 0x13 }, { 96, 0x14 },
+ { 104, 0x15 }, { 108, 0x47 }, { 112, 0x19 }, { 120, 0x16 },
+ { 128, 0x1A }, { 136, 0x80 }, { 144, 0x17 }, { 152, 0x82 },
+ { 160, 0x1C }, { 168, 0x84 }, { 176, 0x1D }, { 192, 0x21 },
+ { 208, 0x1E }, { 216, 0x87 }, { 224, 0x22 }, { 240, 0x56 },
+ { 256, 0x1F }, { 288, 0x24 }, { 320, 0x25 }, { 336, 0x8F },
+ { 352, 0x93 }, { 356, 0x5D }, { 358, 0x98 }, { 384, 0x26 },
+ { 416, 0x56 }, { 448, 0x2A }, { 480, 0x27 }, { 512, 0x2B },
+ { 576, 0x2C }, { 640, 0x2D }, { 704, 0x9D }, { 768, 0x2E },
+ { 832, 0x9D }, { 896, 0x32 }, { 960, 0x2F }, { 1024, 0x33 },
+ { 1152, 0x34 }, { 1280, 0x35 }, { 1536, 0x36 }, { 1792, 0x3A },
+ { 1920, 0x37 }, { 2048, 0x3B }, { 2304, 0x74 }, { 2560, 0x3D },
+ { 3072, 0x3E }, { 3584, 0x7A }, { 3840, 0x3F }, { 4096, 0x7B },
+ { 4608, 0x7C }, { 5120, 0x7D }, { 6144, 0x7E }, { 7168, 0xBA },
+ { 7680, 0x7F }, { 8192, 0xBB }, { 9216, 0xBC }, { 10240, 0xBD },
+ { 12288, 0xBE }, { 15360, 0xBF },
+};
+
enum imx_i2c_type {
IMX1_I2C,
IMX21_I2C,
+ S32G_I2C,
VF610_I2C,
};
@@ -197,6 +223,17 @@ struct imx_i2c_dma {
enum dma_data_direction dma_data_dir;
};
+enum imx_i2c_state {
+ IMX_I2C_STATE_DONE,
+ IMX_I2C_STATE_FAILED,
+ IMX_I2C_STATE_WRITE,
+ IMX_I2C_STATE_DMA,
+ IMX_I2C_STATE_READ,
+ IMX_I2C_STATE_READ_CONTINUE,
+ IMX_I2C_STATE_READ_BLOCK_DATA,
+ IMX_I2C_STATE_READ_BLOCK_DATA_LEN,
+};
+
struct imx_i2c_struct {
struct i2c_adapter adapter;
struct clk *clk;
@@ -216,6 +253,14 @@ struct imx_i2c_struct {
struct i2c_client *slave;
enum i2c_slave_event last_slave_event;
+ struct i2c_msg *msg;
+ unsigned int msg_buf_idx;
+ int isr_result;
+ bool is_lastmsg;
+ enum imx_i2c_state state;
+
+ bool multi_master;
+
/* For checking slave events. */
spinlock_t slave_lock;
struct hrtimer slave_timer;
@@ -258,7 +303,15 @@ static struct imx_i2c_hwdata vf610_i2c_hwdata = {
.ndivs = ARRAY_SIZE(vf610_i2c_clk_div),
.i2sr_clr_opcode = I2SR_CLR_OPCODE_W1C,
.i2cr_ien_opcode = I2CR_IEN_OPCODE_0,
+};
+static const struct imx_i2c_hwdata s32g2_i2c_hwdata = {
+ .devtype = S32G_I2C,
+ .regshift = S32G_I2C_REGSHIFT,
+ .clk_div = s32g2_i2c_clk_div,
+ .ndivs = ARRAY_SIZE(s32g2_i2c_clk_div),
+ .i2sr_clr_opcode = I2SR_CLR_OPCODE_W1C,
+ .i2cr_ien_opcode = I2CR_IEN_OPCODE_0,
};
static const struct platform_device_id imx_i2c_devtype[] = {
@@ -282,12 +335,14 @@ static const struct of_device_id i2c_imx_dt_ids[] = {
{ .compatible = "fsl,imx6sll-i2c", .data = &imx6_i2c_hwdata, },
{ .compatible = "fsl,imx6sx-i2c", .data = &imx6_i2c_hwdata, },
{ .compatible = "fsl,imx6ul-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx7d-i2c", .data = &imx6_i2c_hwdata, },
{ .compatible = "fsl,imx7s-i2c", .data = &imx6_i2c_hwdata, },
{ .compatible = "fsl,imx8mm-i2c", .data = &imx6_i2c_hwdata, },
{ .compatible = "fsl,imx8mn-i2c", .data = &imx6_i2c_hwdata, },
{ .compatible = "fsl,imx8mp-i2c", .data = &imx6_i2c_hwdata, },
{ .compatible = "fsl,imx8mq-i2c", .data = &imx6_i2c_hwdata, },
{ .compatible = "fsl,vf610-i2c", .data = &vf610_i2c_hwdata, },
+ { .compatible = "nxp,s32g2-i2c", .data = &s32g2_i2c_hwdata, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, i2c_imx_dt_ids);
@@ -342,17 +397,16 @@ static void i2c_imx_reset_regs(struct imx_i2c_struct *i2c_imx)
}
/* Functions for DMA support */
-static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
- dma_addr_t phy_addr)
+static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, dma_addr_t phy_addr)
{
struct imx_i2c_dma *dma;
struct dma_slave_config dma_sconfig;
- struct device *dev = &i2c_imx->adapter.dev;
+ struct device *dev = i2c_imx->adapter.dev.parent;
int ret;
dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
if (!dma)
- return;
+ return -ENOMEM;
dma->chan_tx = dma_request_chan(dev, "tx");
if (IS_ERR(dma->chan_tx)) {
@@ -397,7 +451,7 @@ static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n",
dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
- return;
+ return 0;
fail_rx:
dma_release_channel(dma->chan_rx);
@@ -405,6 +459,8 @@ fail_tx:
dma_release_channel(dma->chan_tx);
fail_al:
devm_kfree(dev, dma);
+
+ return ret;
}
static void i2c_imx_dma_callback(void *arg)
@@ -478,6 +534,7 @@ static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx)
static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool atomic)
{
+ bool multi_master = i2c_imx->multi_master;
unsigned long orig_jiffies = jiffies;
unsigned int temp;
@@ -485,12 +542,12 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool a
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
/* check for arbitration lost */
- if (temp & I2SR_IAL) {
+ if (multi_master && (temp & I2SR_IAL)) {
i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
return -EAGAIN;
}
- if (for_busy && (temp & I2SR_IBB)) {
+ if (for_busy && (!multi_master || (temp & I2SR_IBB))) {
i2c_imx->stopped = 0;
break;
}
@@ -540,8 +597,8 @@ static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx, bool atomic)
return -ETIMEDOUT;
}
- /* check for arbitration lost */
- if (i2c_imx->i2csr & I2SR_IAL) {
+ /* In multi-master mode check for arbitration lost */
+ if (i2c_imx->multi_master && (i2c_imx->i2csr & I2SR_IAL)) {
dev_dbg(&i2c_imx->adapter.dev, "<%s> Arbitration lost\n", __func__);
i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
@@ -565,8 +622,8 @@ static int i2c_imx_acked(struct imx_i2c_struct *i2c_imx)
return 0;
}
-static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
- unsigned int i2c_clk_rate)
+static int i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
+ unsigned int i2c_clk_rate)
{
struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div;
unsigned int div;
@@ -581,7 +638,11 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
/* Divider value calculation */
if (i2c_imx->cur_clk == i2c_clk_rate)
- return;
+ return 0;
+
+ /* Keep the denominator of the following program always NOT equal to 0. */
+ if (!(i2c_clk_rate / 2))
+ return -EINVAL;
i2c_imx->cur_clk = i2c_clk_rate;
@@ -612,6 +673,8 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
dev_dbg(&i2c_imx->adapter.dev, "IFDR[IC]=0x%x, REAL DIV=%d\n",
i2c_clk_div[i].val, i2c_clk_div[i].div);
#endif
+
+ return 0;
}
static int i2c_imx_clk_notifier_call(struct notifier_block *nb,
@@ -621,11 +684,12 @@ static int i2c_imx_clk_notifier_call(struct notifier_block *nb,
struct imx_i2c_struct *i2c_imx = container_of(nb,
struct imx_i2c_struct,
clk_change_nb);
+ int ret = 0;
if (action & POST_RATE_CHANGE)
- i2c_imx_set_clk(i2c_imx, ndata->new_rate);
+ ret = i2c_imx_set_clk(i2c_imx, ndata->new_rate);
- return NOTIFY_OK;
+ return notifier_from_errno(ret);
}
static int i2c_imx_start(struct imx_i2c_struct *i2c_imx, bool atomic)
@@ -903,11 +967,156 @@ static int i2c_imx_unreg_slave(struct i2c_client *client)
return ret;
}
+static inline int i2c_imx_isr_acked(struct imx_i2c_struct *i2c_imx)
+{
+ i2c_imx->isr_result = 0;
+
+ if (imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR) & I2SR_RXAK) {
+ i2c_imx->state = IMX_I2C_STATE_FAILED;
+ i2c_imx->isr_result = -ENXIO;
+ wake_up(&i2c_imx->queue);
+ }
+
+ return i2c_imx->isr_result;
+}
+
+static inline int i2c_imx_isr_write(struct imx_i2c_struct *i2c_imx)
+{
+ int result;
+
+ result = i2c_imx_isr_acked(i2c_imx);
+ if (result)
+ return result;
+
+ if (i2c_imx->msg->len == i2c_imx->msg_buf_idx)
+ return 0;
+
+ imx_i2c_write_reg(i2c_imx->msg->buf[i2c_imx->msg_buf_idx++], i2c_imx, IMX_I2C_I2DR);
+
+ return 1;
+}
+
+static inline int i2c_imx_isr_read(struct imx_i2c_struct *i2c_imx)
+{
+ int result;
+ unsigned int temp;
+
+ result = i2c_imx_isr_acked(i2c_imx);
+ if (result)
+ return result;
+
+ /* setup bus to read data */
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ temp &= ~I2CR_MTX;
+ if (i2c_imx->msg->len - 1)
+ temp &= ~I2CR_TXAK;
+
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+ imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */
+
+ return 0;
+}
+
+static inline void i2c_imx_isr_read_continue(struct imx_i2c_struct *i2c_imx)
+{
+ unsigned int temp;
+
+ if ((i2c_imx->msg->len - 1) == i2c_imx->msg_buf_idx) {
+ if (i2c_imx->is_lastmsg) {
+ /*
+ * It must generate STOP before read I2DR to prevent
+ * controller from generating another clock cycle
+ */
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ if (!(temp & I2CR_MSTA))
+ i2c_imx->stopped = 1;
+ temp &= ~(I2CR_MSTA | I2CR_MTX);
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+ } else {
+ /*
+ * For i2c master receiver repeat restart operation like:
+ * read -> repeat MSTA -> read/write
+ * The controller must set MTX before read the last byte in
+ * the first read operation, otherwise the first read cost
+ * one extra clock cycle.
+ */
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ temp |= I2CR_MTX;
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+ }
+ } else if (i2c_imx->msg_buf_idx == (i2c_imx->msg->len - 2)) {
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ temp |= I2CR_TXAK;
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+ }
+
+ i2c_imx->msg->buf[i2c_imx->msg_buf_idx++] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
+}
+
+static inline void i2c_imx_isr_read_block_data_len(struct imx_i2c_struct *i2c_imx)
+{
+ u8 len = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
+
+ if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) {
+ i2c_imx->isr_result = -EPROTO;
+ i2c_imx->state = IMX_I2C_STATE_FAILED;
+ wake_up(&i2c_imx->queue);
+ }
+ i2c_imx->msg->len += len;
+}
+
static irqreturn_t i2c_imx_master_isr(struct imx_i2c_struct *i2c_imx, unsigned int status)
{
- /* save status register */
- i2c_imx->i2csr = status;
- wake_up(&i2c_imx->queue);
+ /*
+ * This state machine handles I2C reception and transmission in non-DMA
+ * mode. We must process all the data in the ISR to reduce the delay
+ * between two consecutive messages. If the data is not processed in
+ * the ISR, SMBus devices may timeout, leading to a bus error.
+ */
+ switch (i2c_imx->state) {
+ case IMX_I2C_STATE_DMA:
+ i2c_imx->i2csr = status;
+ wake_up(&i2c_imx->queue);
+ break;
+
+ case IMX_I2C_STATE_READ:
+ if (i2c_imx_isr_read(i2c_imx))
+ break;
+ i2c_imx->state = IMX_I2C_STATE_READ_CONTINUE;
+ break;
+
+ case IMX_I2C_STATE_READ_CONTINUE:
+ i2c_imx_isr_read_continue(i2c_imx);
+ if (i2c_imx->msg_buf_idx == i2c_imx->msg->len) {
+ i2c_imx->state = IMX_I2C_STATE_DONE;
+ wake_up(&i2c_imx->queue);
+ }
+ break;
+
+ case IMX_I2C_STATE_READ_BLOCK_DATA:
+ if (i2c_imx_isr_read(i2c_imx))
+ break;
+ i2c_imx->state = IMX_I2C_STATE_READ_BLOCK_DATA_LEN;
+ break;
+
+ case IMX_I2C_STATE_READ_BLOCK_DATA_LEN:
+ i2c_imx_isr_read_block_data_len(i2c_imx);
+ i2c_imx->state = IMX_I2C_STATE_READ_CONTINUE;
+ break;
+
+ case IMX_I2C_STATE_WRITE:
+ if (i2c_imx_isr_write(i2c_imx))
+ break;
+ i2c_imx->state = IMX_I2C_STATE_DONE;
+ wake_up(&i2c_imx->queue);
+ break;
+
+ default:
+ i2c_imx->i2csr = status;
+ i2c_imx->state = IMX_I2C_STATE_FAILED;
+ i2c_imx->isr_result = -EINVAL;
+ wake_up(&i2c_imx->queue);
+ }
return IRQ_HANDLED;
}
@@ -954,6 +1163,8 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
struct imx_i2c_dma *dma = i2c_imx->dma;
struct device *dev = &i2c_imx->adapter.dev;
+ i2c_imx->state = IMX_I2C_STATE_DMA;
+
dma->chan_using = dma->chan_tx;
dma->dma_transfer_dir = DMA_MEM_TO_DEV;
dma->dma_data_dir = DMA_TO_DEVICE;
@@ -1006,6 +1217,42 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
return i2c_imx_acked(i2c_imx);
}
+static int i2c_imx_prepare_read(struct imx_i2c_struct *i2c_imx,
+ struct i2c_msg *msgs, bool use_dma)
+{
+ int result;
+ unsigned int temp = 0;
+
+ /* write slave address */
+ imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
+ result = i2c_imx_trx_complete(i2c_imx, !use_dma);
+ if (result)
+ return result;
+ result = i2c_imx_acked(i2c_imx);
+ if (result)
+ return result;
+
+ dev_dbg(&i2c_imx->adapter.dev, "<%s> setup bus\n", __func__);
+
+ /* setup bus to read data */
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ temp &= ~I2CR_MTX;
+
+ /*
+ * Reset the I2CR_TXAK flag initially for SMBus block read since the
+ * length is unknown
+ */
+ if (msgs->len - 1)
+ temp &= ~I2CR_TXAK;
+ if (use_dma)
+ temp |= I2CR_DMAEN;
+
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+ imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */
+
+ return 0;
+}
+
static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
struct i2c_msg *msgs, bool is_lastmsg)
{
@@ -1016,6 +1263,13 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
struct imx_i2c_dma *dma = i2c_imx->dma;
struct device *dev = &i2c_imx->adapter.dev;
+ i2c_imx->state = IMX_I2C_STATE_DMA;
+
+ result = i2c_imx_prepare_read(i2c_imx, msgs, true);
+ if (result)
+ return result;
+
+ dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__);
dma->chan_using = dma->chan_rx;
dma->dma_transfer_dir = DMA_DEV_TO_MEM;
@@ -1092,8 +1346,8 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
return 0;
}
-static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs,
- bool atomic)
+static int i2c_imx_atomic_write(struct imx_i2c_struct *i2c_imx,
+ struct i2c_msg *msgs)
{
int i, result;
@@ -1102,7 +1356,7 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs,
/* write slave address */
imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
- result = i2c_imx_trx_complete(i2c_imx, atomic);
+ result = i2c_imx_trx_complete(i2c_imx, true);
if (result)
return result;
result = i2c_imx_acked(i2c_imx);
@@ -1116,7 +1370,7 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs,
"<%s> write byte: B%d=0x%X\n",
__func__, i, msgs->buf[i]);
imx_i2c_write_reg(msgs->buf[i], i2c_imx, IMX_I2C_I2DR);
- result = i2c_imx_trx_complete(i2c_imx, atomic);
+ result = i2c_imx_trx_complete(i2c_imx, true);
if (result)
return result;
result = i2c_imx_acked(i2c_imx);
@@ -1126,55 +1380,54 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs,
return 0;
}
-static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs,
- bool is_lastmsg, bool atomic)
+static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
+{
+ dev_dbg(&i2c_imx->adapter.dev, "<%s> write slave address: addr=0x%x\n",
+ __func__, i2c_8bit_addr_from_msg(msgs));
+
+ i2c_imx->state = IMX_I2C_STATE_WRITE;
+ i2c_imx->msg = msgs;
+ i2c_imx->msg_buf_idx = 0;
+
+ /*
+ * By writing the device address we start the state machine in the ISR.
+ * The ISR will report when it is done or when it fails.
+ */
+ imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
+ wait_event_timeout(i2c_imx->queue,
+ i2c_imx->state == IMX_I2C_STATE_DONE ||
+ i2c_imx->state == IMX_I2C_STATE_FAILED,
+ (msgs->len + 1) * HZ / 10);
+ if (i2c_imx->state == IMX_I2C_STATE_FAILED) {
+ dev_dbg(&i2c_imx->adapter.dev, "<%s> write failed with %d\n",
+ __func__, i2c_imx->isr_result);
+ return i2c_imx->isr_result;
+ }
+ if (i2c_imx->state != IMX_I2C_STATE_DONE) {
+ dev_err(&i2c_imx->adapter.dev, "<%s> write timedout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int i2c_imx_atomic_read(struct imx_i2c_struct *i2c_imx,
+ struct i2c_msg *msgs, bool is_lastmsg)
{
int i, result;
unsigned int temp;
int block_data = msgs->flags & I2C_M_RECV_LEN;
- int use_dma = i2c_imx->dma && msgs->flags & I2C_M_DMA_SAFE &&
- msgs->len >= DMA_THRESHOLD && !block_data;
- dev_dbg(&i2c_imx->adapter.dev,
- "<%s> write slave address: addr=0x%x\n",
- __func__, i2c_8bit_addr_from_msg(msgs));
-
- /* write slave address */
- imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
- result = i2c_imx_trx_complete(i2c_imx, atomic);
- if (result)
- return result;
- result = i2c_imx_acked(i2c_imx);
+ result = i2c_imx_prepare_read(i2c_imx, msgs, false);
if (result)
return result;
- dev_dbg(&i2c_imx->adapter.dev, "<%s> setup bus\n", __func__);
-
- /* setup bus to read data */
- temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
- temp &= ~I2CR_MTX;
-
- /*
- * Reset the I2CR_TXAK flag initially for SMBus block read since the
- * length is unknown
- */
- if ((msgs->len - 1) || block_data)
- temp &= ~I2CR_TXAK;
- if (use_dma)
- temp |= I2CR_DMAEN;
- imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
- imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */
-
dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__);
- if (use_dma)
- return i2c_imx_dma_read(i2c_imx, msgs, is_lastmsg);
-
/* read data */
for (i = 0; i < msgs->len; i++) {
u8 len = 0;
- result = i2c_imx_trx_complete(i2c_imx, atomic);
+ result = i2c_imx_trx_complete(i2c_imx, true);
if (result)
return result;
/*
@@ -1205,7 +1458,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs,
temp &= ~(I2CR_MSTA | I2CR_MTX);
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
if (!i2c_imx->stopped)
- i2c_imx_bus_busy(i2c_imx, 0, atomic);
+ i2c_imx_bus_busy(i2c_imx, 0, true);
} else {
/*
* For i2c master receiver repeat restart operation like:
@@ -1236,6 +1489,48 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs,
return 0;
}
+static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs,
+ bool is_lastmsg)
+{
+ int block_data = msgs->flags & I2C_M_RECV_LEN;
+
+ dev_dbg(&i2c_imx->adapter.dev,
+ "<%s> write slave address: addr=0x%x\n",
+ __func__, i2c_8bit_addr_from_msg(msgs));
+
+ i2c_imx->is_lastmsg = is_lastmsg;
+
+ if (block_data)
+ i2c_imx->state = IMX_I2C_STATE_READ_BLOCK_DATA;
+ else
+ i2c_imx->state = IMX_I2C_STATE_READ;
+ i2c_imx->msg = msgs;
+ i2c_imx->msg_buf_idx = 0;
+
+ /*
+ * By writing the device address we start the state machine in the ISR.
+ * The ISR will report when it is done or when it fails.
+ */
+ imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
+ wait_event_timeout(i2c_imx->queue,
+ i2c_imx->state == IMX_I2C_STATE_DONE ||
+ i2c_imx->state == IMX_I2C_STATE_FAILED,
+ (msgs->len + 1) * HZ / 10);
+ if (i2c_imx->state == IMX_I2C_STATE_FAILED) {
+ dev_dbg(&i2c_imx->adapter.dev, "<%s> read failed with %d\n",
+ __func__, i2c_imx->isr_result);
+ return i2c_imx->isr_result;
+ }
+ if (i2c_imx->state != IMX_I2C_STATE_DONE) {
+ dev_err(&i2c_imx->adapter.dev, "<%s> read timedout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ if (!i2c_imx->stopped)
+ return i2c_imx_bus_busy(i2c_imx, 0, false);
+
+ return 0;
+}
+
static int i2c_imx_xfer_common(struct i2c_adapter *adapter,
struct i2c_msg *msgs, int num, bool atomic)
{
@@ -1243,6 +1538,7 @@ static int i2c_imx_xfer_common(struct i2c_adapter *adapter,
int result;
bool is_lastmsg = false;
struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter);
+ int use_dma = 0;
/* Start I2C transfer */
result = i2c_imx_start(i2c_imx, atomic);
@@ -1295,15 +1591,25 @@ static int i2c_imx_xfer_common(struct i2c_adapter *adapter,
(temp & I2SR_SRW ? 1 : 0), (temp & I2SR_IIF ? 1 : 0),
(temp & I2SR_RXAK ? 1 : 0));
#endif
+
+ use_dma = i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD &&
+ msgs[i].flags & I2C_M_DMA_SAFE;
if (msgs[i].flags & I2C_M_RD) {
- result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg, atomic);
+ int block_data = msgs->flags & I2C_M_RECV_LEN;
+
+ if (atomic)
+ result = i2c_imx_atomic_read(i2c_imx, &msgs[i], is_lastmsg);
+ else if (use_dma && !block_data)
+ result = i2c_imx_dma_read(i2c_imx, &msgs[i], is_lastmsg);
+ else
+ result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg);
} else {
- if (!atomic &&
- i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD &&
- msgs[i].flags & I2C_M_DMA_SAFE)
+ if (atomic)
+ result = i2c_imx_atomic_write(i2c_imx, &msgs[i]);
+ else if (use_dma)
result = i2c_imx_dma_write(i2c_imx, &msgs[i]);
else
- result = i2c_imx_write(i2c_imx, &msgs[i], atomic);
+ result = i2c_imx_write(i2c_imx, &msgs[i]);
}
if (result)
goto fail0;
@@ -1462,12 +1768,19 @@ static int i2c_imx_probe(struct platform_device *pdev)
goto rpm_disable;
/* Request IRQ */
- ret = request_irq(irq, i2c_imx_isr, IRQF_SHARED, pdev->name, i2c_imx);
+ ret = request_irq(irq, i2c_imx_isr, IRQF_SHARED | IRQF_NO_SUSPEND,
+ pdev->name, i2c_imx);
if (ret) {
dev_err(&pdev->dev, "can't claim irq %d\n", irq);
goto rpm_disable;
}
+ /*
+ * We use the single-master property for backward compatibility.
+ * By default multi master mode is enabled.
+ */
+ i2c_imx->multi_master = !of_property_read_bool(pdev->dev.of_node, "single-master");
+
/* Set up clock divider */
i2c_imx->bitrate = I2C_MAX_STANDARD_MODE_FREQ;
ret = of_property_read_u32(pdev->dev.of_node,
@@ -1476,7 +1789,11 @@ static int i2c_imx_probe(struct platform_device *pdev)
i2c_imx->bitrate = pdata->bitrate;
i2c_imx->clk_change_nb.notifier_call = i2c_imx_clk_notifier_call;
clk_notifier_register(i2c_imx->clk, &i2c_imx->clk_change_nb);
- i2c_imx_set_clk(i2c_imx, clk_get_rate(i2c_imx->clk));
+ ret = i2c_imx_set_clk(i2c_imx, clk_get_rate(i2c_imx->clk));
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get I2C clock\n");
+ goto clk_notifier_unregister;
+ }
i2c_imx_reset_regs(i2c_imx);
@@ -1486,6 +1803,22 @@ static int i2c_imx_probe(struct platform_device *pdev)
if (ret == -EPROBE_DEFER)
goto clk_notifier_unregister;
+ /*
+ * DMA mode should be optional for I2C, when encountering DMA errors,
+ * no need to exit I2C probe. Only print warning to show DMA error and
+ * use PIO mode directly to ensure I2C bus available as much as possible.
+ */
+ ret = i2c_imx_dma_request(i2c_imx, phy_addr);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ goto clk_notifier_unregister;
+ else if (ret == -ENODEV)
+ dev_dbg(&pdev->dev, "Only use PIO mode\n");
+ else
+ dev_warn(&pdev->dev, "Failed to setup DMA (%pe), only use PIO mode\n",
+ ERR_PTR(ret));
+ }
+
/* Add I2C adapter */
ret = i2c_add_numbered_adapter(&i2c_imx->adapter);
if (ret < 0)
@@ -1500,9 +1833,6 @@ static int i2c_imx_probe(struct platform_device *pdev)
i2c_imx->adapter.name);
dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
- /* Init DMA config if supported */
- i2c_imx_dma_request(i2c_imx, phy_addr);
-
return 0; /* Return OK */
clk_notifier_unregister:
@@ -1554,8 +1884,7 @@ static int i2c_imx_runtime_suspend(struct device *dev)
struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev);
clk_disable(i2c_imx->clk);
-
- return 0;
+ return pinctrl_pm_select_sleep_state(dev);
}
static int i2c_imx_runtime_resume(struct device *dev)
@@ -1563,6 +1892,10 @@ static int i2c_imx_runtime_resume(struct device *dev)
struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev);
int ret;
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ return ret;
+
ret = clk_enable(i2c_imx->clk);
if (ret)
dev_err(dev, "can't enable I2C clock, ret=%d\n", ret);
@@ -1570,13 +1903,49 @@ static int i2c_imx_runtime_resume(struct device *dev)
return ret;
}
+static int i2c_imx_suspend(struct device *dev)
+{
+ /*
+ * Some I2C devices may need the I2C controller to remain active
+ * during resume_noirq() or suspend_noirq(). If the controller is
+ * autosuspended, there is no way to wake it up once runtime PM is
+ * disabled (in suspend_late()).
+ *
+ * During system resume, the I2C controller will be available only
+ * after runtime PM is re-enabled (in resume_early()). However, this
+ * may be too late for some devices.
+ *
+ * Wake up the controller in the suspend() callback while runtime PM
+ * is still enabled. The I2C controller will remain available until
+ * the suspend_noirq() callback (pm_runtime_force_suspend()) is
+ * called. During resume, the I2C controller can be restored by the
+ * resume_noirq() callback (pm_runtime_force_resume()).
+ *
+ * Finally, the resume() callback re-enables autosuspend, ensuring
+ * the I2C controller remains available until the system enters
+ * suspend_noirq() and from resume_noirq().
+ */
+ return pm_runtime_resume_and_get(dev);
+}
+
+static int i2c_imx_resume(struct device *dev)
+{
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
static const struct dev_pm_ops i2c_imx_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SYSTEM_SLEEP_PM_OPS(i2c_imx_suspend, i2c_imx_resume)
RUNTIME_PM_OPS(i2c_imx_runtime_suspend, i2c_imx_runtime_resume, NULL)
};
static struct platform_driver i2c_imx_driver = {
.probe = i2c_imx_probe,
- .remove_new = i2c_imx_remove,
+ .remove = i2c_imx_remove,
.driver = {
.name = DRIVER_NAME,
.pm = pm_ptr(&i2c_imx_pm_ops),
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 859c14e340e7..ce5ca5b90b39 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -524,7 +524,7 @@ MODULE_DEVICE_TABLE(of, i2c_iop3xx_match);
static struct platform_driver iop3xx_i2c_driver = {
.probe = iop3xx_i2c_probe,
- .remove_new = iop3xx_i2c_remove,
+ .remove = iop3xx_i2c_remove,
.driver = {
.name = "IOP3xx-I2C",
.of_match_table = i2c_iop3xx_match,
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index f59158489ad9..a2ac992f9cb0 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -1,41 +1,39 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- i2c-isch.c - Linux kernel driver for Intel SCH chipset SMBus
- - Based on i2c-piix4.c
- Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl> and
- Philip Edelbrock <phil@netroedge.com>
- - Intel SCH support
- Copyright (c) 2007 - 2008 Jacob Jun Pan <jacob.jun.pan@intel.com>
-
-*/
+ * Linux kernel driver for Intel SCH chipset SMBus
+ * - Based on i2c-piix4.c
+ * Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl> and
+ * Philip Edelbrock <phil@netroedge.com>
+ * - Intel SCH support
+ * Copyright (c) 2007 - 2008 Jacob Jun Pan <jacob.jun.pan@intel.com>
+ */
-/*
- Supports:
- Intel SCH chipsets (AF82US15W, AF82US15L, AF82UL11L)
- Note: we assume there can only be one device, with one SMBus interface.
-*/
+/* Supports: Intel SCH chipsets (AF82US15W, AF82US15L, AF82UL11L) */
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gfp_types.h>
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
+#include <linux/sprintf.h>
#include <linux/stddef.h>
-#include <linux/ioport.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
+#include <linux/string_choices.h>
+#include <linux/types.h>
/* SCH SMBus address offsets */
-#define SMBHSTCNT (0 + sch_smba)
-#define SMBHSTSTS (1 + sch_smba)
-#define SMBHSTCLK (2 + sch_smba)
-#define SMBHSTADD (4 + sch_smba) /* TSA */
-#define SMBHSTCMD (5 + sch_smba)
-#define SMBHSTDAT0 (6 + sch_smba)
-#define SMBHSTDAT1 (7 + sch_smba)
-#define SMBBLKDAT (0x20 + sch_smba)
-
-/* Other settings */
-#define MAX_RETRIES 5000
+#define SMBHSTCNT 0x00
+#define SMBHSTSTS 0x01
+#define SMBHSTCLK 0x02
+#define SMBHSTADD 0x04 /* TSA */
+#define SMBHSTCMD 0x05
+#define SMBHSTDAT0 0x06
+#define SMBHSTDAT1 0x07
+#define SMBBLKDAT 0x20
/* I2C constants */
#define SCH_QUICK 0x00
@@ -44,109 +42,134 @@
#define SCH_WORD_DATA 0x03
#define SCH_BLOCK_DATA 0x05
-static unsigned short sch_smba;
-static struct i2c_adapter sch_adapter;
+struct sch_i2c {
+ struct i2c_adapter adapter;
+ void __iomem *smba;
+};
+
static int backbone_speed = 33000; /* backbone speed in kHz */
-module_param(backbone_speed, int, S_IRUSR | S_IWUSR);
+module_param(backbone_speed, int, 0600);
MODULE_PARM_DESC(backbone_speed, "Backbone speed in kHz, (default = 33000)");
-/*
- * Start the i2c transaction -- the i2c_access will prepare the transaction
- * and this function will execute it.
- * return 0 for success and others for failure.
+static inline u8 sch_io_rd8(struct sch_i2c *priv, unsigned int offset)
+{
+ return ioread8(priv->smba + offset);
+}
+
+static inline void sch_io_wr8(struct sch_i2c *priv, unsigned int offset, u8 value)
+{
+ iowrite8(value, priv->smba + offset);
+}
+
+static inline u16 sch_io_rd16(struct sch_i2c *priv, unsigned int offset)
+{
+ return ioread16(priv->smba + offset);
+}
+
+static inline void sch_io_wr16(struct sch_i2c *priv, unsigned int offset, u16 value)
+{
+ iowrite16(value, priv->smba + offset);
+}
+
+/**
+ * sch_transaction - Start the i2c transaction
+ * @adap: the i2c adapter pointer
+ *
+ * The sch_access() will prepare the transaction and
+ * this function will execute it.
+ *
+ * Return: 0 for success and others for failure.
*/
-static int sch_transaction(void)
+static int sch_transaction(struct i2c_adapter *adap)
{
+ struct sch_i2c *priv = container_of(adap, struct sch_i2c, adapter);
int temp;
- int result = 0;
- int retries = 0;
+ int rc;
- dev_dbg(&sch_adapter.dev, "Transaction (pre): CNT=%02x, CMD=%02x, "
- "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb(SMBHSTCNT),
- inb(SMBHSTCMD), inb(SMBHSTADD), inb(SMBHSTDAT0),
- inb(SMBHSTDAT1));
+ dev_dbg(&adap->dev,
+ "Transaction (pre): CNT=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n",
+ sch_io_rd8(priv, SMBHSTCNT), sch_io_rd8(priv, SMBHSTCMD),
+ sch_io_rd8(priv, SMBHSTADD),
+ sch_io_rd8(priv, SMBHSTDAT0), sch_io_rd8(priv, SMBHSTDAT1));
/* Make sure the SMBus host is ready to start transmitting */
- temp = inb(SMBHSTSTS) & 0x0f;
+ temp = sch_io_rd8(priv, SMBHSTSTS) & 0x0f;
if (temp) {
/* Can not be busy since we checked it in sch_access */
- if (temp & 0x01) {
- dev_dbg(&sch_adapter.dev, "Completion (%02x). "
- "Clear...\n", temp);
- }
- if (temp & 0x06) {
- dev_dbg(&sch_adapter.dev, "SMBus error (%02x). "
- "Resetting...\n", temp);
- }
- outb(temp, SMBHSTSTS);
- temp = inb(SMBHSTSTS) & 0x0f;
+ if (temp & 0x01)
+ dev_dbg(&adap->dev, "Completion (%02x). Clear...\n", temp);
+ if (temp & 0x06)
+ dev_dbg(&adap->dev, "SMBus error (%02x). Resetting...\n", temp);
+ sch_io_wr8(priv, SMBHSTSTS, temp);
+ temp = sch_io_rd8(priv, SMBHSTSTS) & 0x0f;
if (temp) {
- dev_err(&sch_adapter.dev,
- "SMBus is not ready: (%02x)\n", temp);
+ dev_err(&adap->dev, "SMBus is not ready: (%02x)\n", temp);
return -EAGAIN;
}
}
- /* start the transaction by setting bit 4 */
- outb(inb(SMBHSTCNT) | 0x10, SMBHSTCNT);
-
- do {
- usleep_range(100, 200);
- temp = inb(SMBHSTSTS) & 0x0f;
- } while ((temp & 0x08) && (retries++ < MAX_RETRIES));
+ /* Start the transaction by setting bit 4 */
+ temp = sch_io_rd8(priv, SMBHSTCNT);
+ temp |= 0x10;
+ sch_io_wr8(priv, SMBHSTCNT, temp);
+ rc = read_poll_timeout(sch_io_rd8, temp, !(temp & 0x08), 200, 500000, true, priv, SMBHSTSTS);
/* If the SMBus is still busy, we give up */
- if (retries > MAX_RETRIES) {
- dev_err(&sch_adapter.dev, "SMBus Timeout!\n");
- result = -ETIMEDOUT;
+ if (rc) {
+ dev_err(&adap->dev, "SMBus Timeout!\n");
} else if (temp & 0x04) {
- result = -EIO;
- dev_dbg(&sch_adapter.dev, "Bus collision! SMBus may be "
- "locked until next hard reset. (sorry!)\n");
+ rc = -EIO;
+ dev_dbg(&adap->dev, "Bus collision! SMBus may be locked until next hard reset. (sorry!)\n");
/* Clock stops and target is stuck in mid-transmission */
} else if (temp & 0x02) {
- result = -EIO;
- dev_err(&sch_adapter.dev, "Error: no response!\n");
+ rc = -EIO;
+ dev_err(&adap->dev, "Error: no response!\n");
} else if (temp & 0x01) {
- dev_dbg(&sch_adapter.dev, "Post complete!\n");
- outb(temp, SMBHSTSTS);
- temp = inb(SMBHSTSTS) & 0x07;
+ dev_dbg(&adap->dev, "Post complete!\n");
+ sch_io_wr8(priv, SMBHSTSTS, temp & 0x0f);
+ temp = sch_io_rd8(priv, SMBHSTSTS) & 0x07;
if (temp & 0x06) {
/* Completion clear failed */
- dev_dbg(&sch_adapter.dev, "Failed reset at end of "
- "transaction (%02x), Bus error!\n", temp);
+ dev_dbg(&adap->dev,
+ "Failed reset at end of transaction (%02x), Bus error!\n", temp);
}
} else {
- result = -ENXIO;
- dev_dbg(&sch_adapter.dev, "No such address.\n");
+ rc = -ENXIO;
+ dev_dbg(&adap->dev, "No such address.\n");
}
- dev_dbg(&sch_adapter.dev, "Transaction (post): CNT=%02x, CMD=%02x, "
- "ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb(SMBHSTCNT),
- inb(SMBHSTCMD), inb(SMBHSTADD), inb(SMBHSTDAT0),
- inb(SMBHSTDAT1));
- return result;
+ dev_dbg(&adap->dev, "Transaction (post): CNT=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n",
+ sch_io_rd8(priv, SMBHSTCNT), sch_io_rd8(priv, SMBHSTCMD),
+ sch_io_rd8(priv, SMBHSTADD),
+ sch_io_rd8(priv, SMBHSTDAT0), sch_io_rd8(priv, SMBHSTDAT1));
+ return rc;
}
-/*
- * This is the main access entry for i2c-sch access
- * adap is i2c_adapter pointer, addr is the i2c device bus address, read_write
- * (0 for read and 1 for write), size is i2c transaction type and data is the
- * union of transaction for data to be transferred or data read from bus.
- * return 0 for success and others for failure.
+/**
+ * sch_access - the main access entry for i2c-sch access
+ * @adap: the i2c adapter pointer
+ * @addr: the i2c device bus address
+ * @flags: I2C_CLIENT_* flags (usually zero or I2C_CLIENT_PEC)
+ * @read_write: 0 for read and 1 for write
+ * @command: Byte interpreted by slave, for protocols which use such bytes
+ * @size: the i2c transaction type
+ * @data: the union of transaction for data to be transferred or data read from bus
+ *
+ * Return: 0 for success and others for failure.
*/
static s32 sch_access(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write,
u8 command, int size, union i2c_smbus_data *data)
{
+ struct sch_i2c *priv = container_of(adap, struct sch_i2c, adapter);
int i, len, temp, rc;
/* Make sure the SMBus host is not busy */
- temp = inb(SMBHSTSTS) & 0x0f;
+ temp = sch_io_rd8(priv, SMBHSTSTS) & 0x0f;
if (temp & 0x08) {
- dev_dbg(&sch_adapter.dev, "SMBus busy (%02x)\n", temp);
+ dev_dbg(&adap->dev, "SMBus busy (%02x)\n", temp);
return -EAGAIN;
}
- temp = inw(SMBHSTCLK);
+ temp = sch_io_rd16(priv, SMBHSTCLK);
if (!temp) {
/*
* We can't determine if we have 33 or 25 MHz clock for
@@ -154,50 +177,48 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr,
* 100 kHz. If we actually run at 25 MHz the bus will be
* run ~75 kHz instead which should do no harm.
*/
- dev_notice(&sch_adapter.dev,
- "Clock divider uninitialized. Setting defaults\n");
- outw(backbone_speed / (4 * 100), SMBHSTCLK);
+ dev_notice(&adap->dev, "Clock divider uninitialized. Setting defaults\n");
+ sch_io_wr16(priv, SMBHSTCLK, backbone_speed / (4 * 100));
}
- dev_dbg(&sch_adapter.dev, "access size: %d %s\n", size,
- (read_write)?"READ":"WRITE");
+ dev_dbg(&adap->dev, "access size: %d %s\n", size, str_read_write(read_write));
switch (size) {
case I2C_SMBUS_QUICK:
- outb((addr << 1) | read_write, SMBHSTADD);
+ sch_io_wr8(priv, SMBHSTADD, (addr << 1) | read_write);
size = SCH_QUICK;
break;
case I2C_SMBUS_BYTE:
- outb((addr << 1) | read_write, SMBHSTADD);
+ sch_io_wr8(priv, SMBHSTADD, (addr << 1) | read_write);
if (read_write == I2C_SMBUS_WRITE)
- outb(command, SMBHSTCMD);
+ sch_io_wr8(priv, SMBHSTCMD, command);
size = SCH_BYTE;
break;
case I2C_SMBUS_BYTE_DATA:
- outb((addr << 1) | read_write, SMBHSTADD);
- outb(command, SMBHSTCMD);
+ sch_io_wr8(priv, SMBHSTADD, (addr << 1) | read_write);
+ sch_io_wr8(priv, SMBHSTCMD, command);
if (read_write == I2C_SMBUS_WRITE)
- outb(data->byte, SMBHSTDAT0);
+ sch_io_wr8(priv, SMBHSTDAT0, data->byte);
size = SCH_BYTE_DATA;
break;
case I2C_SMBUS_WORD_DATA:
- outb((addr << 1) | read_write, SMBHSTADD);
- outb(command, SMBHSTCMD);
+ sch_io_wr8(priv, SMBHSTADD, (addr << 1) | read_write);
+ sch_io_wr8(priv, SMBHSTCMD, command);
if (read_write == I2C_SMBUS_WRITE) {
- outb(data->word & 0xff, SMBHSTDAT0);
- outb((data->word & 0xff00) >> 8, SMBHSTDAT1);
+ sch_io_wr8(priv, SMBHSTDAT0, data->word >> 0);
+ sch_io_wr8(priv, SMBHSTDAT1, data->word >> 8);
}
size = SCH_WORD_DATA;
break;
case I2C_SMBUS_BLOCK_DATA:
- outb((addr << 1) | read_write, SMBHSTADD);
- outb(command, SMBHSTCMD);
+ sch_io_wr8(priv, SMBHSTADD, (addr << 1) | read_write);
+ sch_io_wr8(priv, SMBHSTCMD, command);
if (read_write == I2C_SMBUS_WRITE) {
len = data->block[0];
if (len == 0 || len > I2C_SMBUS_BLOCK_MAX)
return -EINVAL;
- outb(len, SMBHSTDAT0);
+ sch_io_wr8(priv, SMBHSTDAT0, len);
for (i = 1; i <= len; i++)
- outb(data->block[i], SMBBLKDAT+i-1);
+ sch_io_wr8(priv, SMBBLKDAT + i - 1, data->block[i]);
}
size = SCH_BLOCK_DATA;
break;
@@ -205,10 +226,13 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr,
dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
return -EOPNOTSUPP;
}
- dev_dbg(&sch_adapter.dev, "write size %d to 0x%04x\n", size, SMBHSTCNT);
- outb((inb(SMBHSTCNT) & 0xb0) | (size & 0x7), SMBHSTCNT);
+ dev_dbg(&adap->dev, "write size %d to 0x%04x\n", size, SMBHSTCNT);
+
+ temp = sch_io_rd8(priv, SMBHSTCNT);
+ temp = (temp & 0xb0) | (size & 0x7);
+ sch_io_wr8(priv, SMBHSTCNT, temp);
- rc = sch_transaction();
+ rc = sch_transaction(adap);
if (rc) /* Error in transaction */
return rc;
@@ -218,17 +242,18 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr,
switch (size) {
case SCH_BYTE:
case SCH_BYTE_DATA:
- data->byte = inb(SMBHSTDAT0);
+ data->byte = sch_io_rd8(priv, SMBHSTDAT0);
break;
case SCH_WORD_DATA:
- data->word = inb(SMBHSTDAT0) + (inb(SMBHSTDAT1) << 8);
+ data->word = (sch_io_rd8(priv, SMBHSTDAT0) << 0) +
+ (sch_io_rd8(priv, SMBHSTDAT1) << 8);
break;
case SCH_BLOCK_DATA:
- data->block[0] = inb(SMBHSTDAT0);
+ data->block[0] = sch_io_rd8(priv, SMBHSTDAT0);
if (data->block[0] == 0 || data->block[0] > I2C_SMBUS_BLOCK_MAX)
return -EPROTO;
for (i = 1; i <= data->block[0]; i++)
- data->block[i] = inb(SMBBLKDAT+i-1);
+ data->block[i] = sch_io_rd8(priv, SMBBLKDAT + i - 1);
break;
}
return 0;
@@ -246,51 +271,34 @@ static const struct i2c_algorithm smbus_algorithm = {
.functionality = sch_func,
};
-static struct i2c_adapter sch_adapter = {
- .owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON,
- .algo = &smbus_algorithm,
-};
-
-static int smbus_sch_probe(struct platform_device *dev)
+static int smbus_sch_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct sch_i2c *priv;
struct resource *res;
- int retval;
- res = platform_get_resource(dev, IORESOURCE_IO, 0);
- if (!res)
- return -EBUSY;
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- if (!devm_request_region(&dev->dev, res->start, resource_size(res),
- dev->name)) {
- dev_err(&dev->dev, "SMBus region 0x%x already in use!\n",
- sch_smba);
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res)
return -EBUSY;
- }
-
- sch_smba = res->start;
- dev_dbg(&dev->dev, "SMBA = 0x%X\n", sch_smba);
+ priv->smba = devm_ioport_map(dev, res->start, resource_size(res));
+ if (!priv->smba)
+ return dev_err_probe(dev, -EBUSY, "SMBus region %pR already in use!\n", res);
- /* set up the sysfs linkage to our parent device */
- sch_adapter.dev.parent = &dev->dev;
+ /* Set up the sysfs linkage to our parent device */
+ priv->adapter.dev.parent = dev;
+ priv->adapter.owner = THIS_MODULE;
+ priv->adapter.class = I2C_CLASS_HWMON;
+ priv->adapter.algo = &smbus_algorithm;
- snprintf(sch_adapter.name, sizeof(sch_adapter.name),
- "SMBus SCH adapter at %04x", sch_smba);
+ snprintf(priv->adapter.name, sizeof(priv->adapter.name),
+ "SMBus SCH adapter at %04x", (unsigned short)res->start);
- retval = i2c_add_adapter(&sch_adapter);
- if (retval)
- sch_smba = 0;
-
- return retval;
-}
-
-static void smbus_sch_remove(struct platform_device *pdev)
-{
- if (sch_smba) {
- i2c_del_adapter(&sch_adapter);
- sch_smba = 0;
- }
+ return devm_i2c_add_adapter(dev, &priv->adapter);
}
static struct platform_driver smbus_sch_driver = {
@@ -298,7 +306,6 @@ static struct platform_driver smbus_sch_driver = {
.name = "isch_smbus",
},
.probe = smbus_sch_probe,
- .remove_new = smbus_sch_remove,
};
module_platform_driver(smbus_sch_driver);
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 92cc5b091137..664a5471d933 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -847,7 +847,7 @@ static void jz4780_i2c_remove(struct platform_device *pdev)
static struct platform_driver jz4780_i2c_driver = {
.probe = jz4780_i2c_probe,
- .remove_new = jz4780_i2c_remove,
+ .remove = jz4780_i2c_remove,
.driver = {
.name = "jz4780-i2c",
.of_match_table = jz4780_i2c_of_matches,
diff --git a/drivers/i2c/busses/i2c-keba.c b/drivers/i2c/busses/i2c-keba.c
index 759732a07ef0..7b9ed2592f5b 100644
--- a/drivers/i2c/busses/i2c-keba.c
+++ b/drivers/i2c/busses/i2c-keba.c
@@ -464,12 +464,8 @@ static void ki2c_unregister_devices(struct ki2c *ki2c)
{
int i;
- for (i = 0; i < ki2c->client_size; i++) {
- struct i2c_client *client = ki2c->client[i];
-
- if (client)
- i2c_unregister_device(client);
- }
+ for (i = 0; i < ki2c->client_size; i++)
+ i2c_unregister_device(ki2c->client[i]);
}
static int ki2c_register_devices(struct ki2c *ki2c)
diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c
index eb66942e0b7d..212196af68ba 100644
--- a/drivers/i2c/busses/i2c-kempld.c
+++ b/drivers/i2c/busses/i2c-kempld.c
@@ -385,7 +385,7 @@ static struct platform_driver kempld_i2c_driver = {
.pm = pm_sleep_ptr(&kempld_i2c_pm_ops),
},
.probe = kempld_i2c_probe,
- .remove_new = kempld_i2c_remove,
+ .remove = kempld_i2c_remove,
};
module_platform_driver(kempld_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-ljca.c b/drivers/i2c/busses/i2c-ljca.c
index 1dc516ef0fdd..93274f0c2d72 100644
--- a/drivers/i2c/busses/i2c-ljca.c
+++ b/drivers/i2c/busses/i2c-ljca.c
@@ -340,4 +340,4 @@ MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
MODULE_AUTHOR("Lixu Zhang <lixu.zhang@intel.com>");
MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB-I2C driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(LJCA);
+MODULE_IMPORT_NS("LJCA");
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
index 9fb33cbf7419..6943a0de860a 100644
--- a/drivers/i2c/busses/i2c-lpc2k.c
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -462,7 +462,7 @@ MODULE_DEVICE_TABLE(of, lpc2k_i2c_match);
static struct platform_driver i2c_lpc2k_driver = {
.probe = i2c_lpc2k_probe,
- .remove_new = i2c_lpc2k_remove,
+ .remove = i2c_lpc2k_remove,
.driver = {
.name = "lpc2k-i2c",
.pm = pm_sleep_ptr(&i2c_lpc2k_dev_pm_ops),
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index c7b203cc4434..e1d69537353b 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -565,7 +565,7 @@ MODULE_DEVICE_TABLE(of, meson_i2c_match);
static struct platform_driver meson_i2c_driver = {
.probe = meson_i2c_probe,
- .remove_new = meson_i2c_remove,
+ .remove = meson_i2c_remove,
.driver = {
.name = "meson-i2c",
.of_match_table = meson_i2c_match,
diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c
index 0b0a1c4d17ca..5db73429125c 100644
--- a/drivers/i2c/busses/i2c-microchip-corei2c.c
+++ b/drivers/i2c/busses/i2c-microchip-corei2c.c
@@ -93,27 +93,35 @@
* @base: pointer to register struct
* @dev: device reference
* @i2c_clk: clock reference for i2c input clock
+ * @msg_queue: pointer to the messages requiring sending
* @buf: pointer to msg buffer for easier use
* @msg_complete: xfer completion object
* @adapter: core i2c abstraction
* @msg_err: error code for completed message
* @bus_clk_rate: current i2c bus clock rate
* @isr_status: cached copy of local ISR status
+ * @total_num: total number of messages to be sent/received
+ * @current_num: index of the current message being sent/received
* @msg_len: number of bytes transferred in msg
* @addr: address of the current slave
+ * @restart_needed: whether or not a repeated start is required after current message
*/
struct mchp_corei2c_dev {
void __iomem *base;
struct device *dev;
struct clk *i2c_clk;
+ struct i2c_msg *msg_queue;
u8 *buf;
struct completion msg_complete;
struct i2c_adapter adapter;
int msg_err;
+ int total_num;
+ int current_num;
u32 bus_clk_rate;
u32 isr_status;
u16 msg_len;
u8 addr;
+ bool restart_needed;
};
static void mchp_corei2c_core_disable(struct mchp_corei2c_dev *idev)
@@ -222,6 +230,47 @@ static int mchp_corei2c_fill_tx(struct mchp_corei2c_dev *idev)
return 0;
}
+static void mchp_corei2c_next_msg(struct mchp_corei2c_dev *idev)
+{
+ struct i2c_msg *this_msg;
+ u8 ctrl;
+
+ if (idev->current_num >= idev->total_num) {
+ complete(&idev->msg_complete);
+ return;
+ }
+
+ /*
+ * If there's been an error, the isr needs to return control
+ * to the "main" part of the driver, so as not to keep sending
+ * messages once it completes and clears the SI bit.
+ */
+ if (idev->msg_err) {
+ complete(&idev->msg_complete);
+ return;
+ }
+
+ this_msg = idev->msg_queue++;
+
+ if (idev->current_num < (idev->total_num - 1)) {
+ struct i2c_msg *next_msg = idev->msg_queue;
+
+ idev->restart_needed = next_msg->flags & I2C_M_RD;
+ } else {
+ idev->restart_needed = false;
+ }
+
+ idev->addr = i2c_8bit_addr_from_msg(this_msg);
+ idev->msg_len = this_msg->len;
+ idev->buf = this_msg->buf;
+
+ ctrl = readb(idev->base + CORE_I2C_CTRL);
+ ctrl |= CTRL_STA;
+ writeb(ctrl, idev->base + CORE_I2C_CTRL);
+
+ idev->current_num++;
+}
+
static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
{
u32 status = idev->isr_status;
@@ -238,8 +287,6 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
ctrl &= ~CTRL_STA;
writeb(idev->addr, idev->base + CORE_I2C_DATA);
writeb(ctrl, idev->base + CORE_I2C_CTRL);
- if (idev->msg_len == 0)
- finished = true;
break;
case STATUS_M_ARB_LOST:
idev->msg_err = -EAGAIN;
@@ -247,10 +294,14 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
break;
case STATUS_M_SLAW_ACK:
case STATUS_M_TX_DATA_ACK:
- if (idev->msg_len > 0)
+ if (idev->msg_len > 0) {
mchp_corei2c_fill_tx(idev);
- else
- last_byte = true;
+ } else {
+ if (idev->restart_needed)
+ finished = true;
+ else
+ last_byte = true;
+ }
break;
case STATUS_M_TX_DATA_NACK:
case STATUS_M_SLAR_NACK:
@@ -287,7 +338,7 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
mchp_corei2c_stop(idev);
if (last_byte || finished)
- complete(&idev->msg_complete);
+ mchp_corei2c_next_msg(idev);
return IRQ_HANDLED;
}
@@ -311,21 +362,48 @@ static irqreturn_t mchp_corei2c_isr(int irq, void *_dev)
return ret;
}
-static int mchp_corei2c_xfer_msg(struct mchp_corei2c_dev *idev,
- struct i2c_msg *msg)
+static int mchp_corei2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
{
- u8 ctrl;
+ struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap);
+ struct i2c_msg *this_msg = msgs;
unsigned long time_left;
+ u8 ctrl;
+
+ mchp_corei2c_core_enable(idev);
+
+ /*
+ * The isr controls the flow of a transfer, this info needs to be saved
+ * to a location that it can access the queue information from.
+ */
+ idev->restart_needed = false;
+ idev->msg_queue = msgs;
+ idev->total_num = num;
+ idev->current_num = 0;
- idev->addr = i2c_8bit_addr_from_msg(msg);
- idev->msg_len = msg->len;
- idev->buf = msg->buf;
+ /*
+ * But the first entry to the isr is triggered by the start in this
+ * function, so the first message needs to be "dequeued".
+ */
+ idev->addr = i2c_8bit_addr_from_msg(this_msg);
+ idev->msg_len = this_msg->len;
+ idev->buf = this_msg->buf;
idev->msg_err = 0;
- reinit_completion(&idev->msg_complete);
+ if (idev->total_num > 1) {
+ struct i2c_msg *next_msg = msgs + 1;
- mchp_corei2c_core_enable(idev);
+ idev->restart_needed = next_msg->flags & I2C_M_RD;
+ }
+ idev->current_num++;
+ idev->msg_queue++;
+
+ reinit_completion(&idev->msg_complete);
+
+ /*
+ * Send the first start to pass control to the isr
+ */
ctrl = readb(idev->base + CORE_I2C_CTRL);
ctrl |= CTRL_STA;
writeb(ctrl, idev->base + CORE_I2C_CTRL);
@@ -335,20 +413,8 @@ static int mchp_corei2c_xfer_msg(struct mchp_corei2c_dev *idev,
if (!time_left)
return -ETIMEDOUT;
- return idev->msg_err;
-}
-
-static int mchp_corei2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num)
-{
- struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap);
- int i, ret;
-
- for (i = 0; i < num; i++) {
- ret = mchp_corei2c_xfer_msg(idev, msgs++);
- if (ret)
- return ret;
- }
+ if (idev->msg_err)
+ return idev->msg_err;
return num;
}
@@ -462,7 +528,7 @@ MODULE_DEVICE_TABLE(of, mchp_corei2c_of_match);
static struct platform_driver mchp_corei2c_driver = {
.probe = mchp_corei2c_probe,
- .remove_new = mchp_corei2c_remove,
+ .remove = mchp_corei2c_remove,
.driver = {
.name = "microchip-corei2c",
.of_match_table = mchp_corei2c_of_match,
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
index b3a73921ab69..21f67f3b65b6 100644
--- a/drivers/i2c/busses/i2c-mlxbf.c
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -2456,7 +2456,7 @@ static void mlxbf_i2c_remove(struct platform_device *pdev)
static struct platform_driver mlxbf_i2c_driver = {
.probe = mlxbf_i2c_probe,
- .remove_new = mlxbf_i2c_remove,
+ .remove = mlxbf_i2c_remove,
.driver = {
.name = "i2c-mlxbf",
.acpi_match_table = ACPI_PTR(mlxbf_i2c_acpi_ids),
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
index 8223f6d29eb3..07d3cadbf510 100644
--- a/drivers/i2c/busses/i2c-mlxcpld.c
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -591,7 +591,7 @@ static void mlxcpld_i2c_remove(struct platform_device *pdev)
static struct platform_driver mlxcpld_i2c_driver = {
.probe = mlxcpld_i2c_probe,
- .remove_new = mlxcpld_i2c_remove,
+ .remove = mlxcpld_i2c_remove,
.driver = {
.name = MLXCPLD_I2C_DEVICE_NAME,
},
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 236d6b8ba867..28c5c5c1fb7a 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -938,7 +938,7 @@ MODULE_DEVICE_TABLE(of, mpc_i2c_of_match);
/* Structure for a device driver */
static struct platform_driver mpc_i2c_driver = {
.probe = fsl_i2c_probe,
- .remove_new = fsl_i2c_remove,
+ .remove = fsl_i2c_remove,
.driver = {
.name = "mpc-i2c",
.of_match_table = mpc_i2c_of_match,
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index e0ba653dec2d..5bd342047d59 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -1550,7 +1550,7 @@ static const struct dev_pm_ops mtk_i2c_pm = {
static struct platform_driver mtk_i2c_driver = {
.probe = mtk_i2c_probe,
- .remove_new = mtk_i2c_remove,
+ .remove = mtk_i2c_remove,
.driver = {
.name = I2C_DRV_NAME,
.pm = pm_sleep_ptr(&mtk_i2c_pm),
diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c
index 23d417ff5e71..2103f21f9ddd 100644
--- a/drivers/i2c/busses/i2c-mt7621.c
+++ b/drivers/i2c/busses/i2c-mt7621.c
@@ -331,7 +331,7 @@ static void mtk_i2c_remove(struct platform_device *pdev)
static struct platform_driver mtk_i2c_driver = {
.probe = mtk_i2c_probe,
- .remove_new = mtk_i2c_remove,
+ .remove = mtk_i2c_remove,
.driver = {
.name = "i2c-mt7621",
.of_match_table = i2c_mtk_dt_ids,
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 29f94efedf60..874309580c33 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -1104,7 +1104,7 @@ static const struct dev_pm_ops mv64xxx_i2c_pm_ops = {
static struct platform_driver mv64xxx_i2c_driver = {
.probe = mv64xxx_i2c_probe,
- .remove_new = mv64xxx_i2c_remove,
+ .remove = mv64xxx_i2c_remove,
.driver = {
.name = MV64XXX_I2C_CTLR_NAME,
.pm = &mv64xxx_i2c_pm_ops,
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 36def0a9c95c..ad62d56b2186 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -881,7 +881,7 @@ static struct platform_driver mxs_i2c_driver = {
.of_match_table = mxs_i2c_dt_ids,
},
.probe = mxs_i2c_probe,
- .remove_new = mxs_i2c_remove,
+ .remove = mxs_i2c_remove,
};
static int __init mxs_i2c_init(void)
diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
deleted file mode 100644
index 69a71bc9830d..000000000000
--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
+++ /dev/null
@@ -1,240 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * i2c-nforce2-s4985.c - i2c-nforce2 extras for the Tyan S4985 motherboard
- *
- * Copyright (C) 2008 Jean Delvare <jdelvare@suse.de>
- */
-
-/*
- * We select the channels by sending commands to the Philips
- * PCA9556 chip at I2C address 0x18. The main adapter is used for
- * the non-multiplexed part of the bus, and 4 virtual adapters
- * are defined for the multiplexed addresses: 0x50-0x53 (memory
- * module EEPROM) located on channels 1-4. We define one virtual
- * adapter per CPU, which corresponds to one multiplexed channel:
- * CPU0: virtual adapter 1, channel 1
- * CPU1: virtual adapter 2, channel 2
- * CPU2: virtual adapter 3, channel 3
- * CPU3: virtual adapter 4, channel 4
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/mutex.h>
-
-extern struct i2c_adapter *nforce2_smbus;
-
-static struct i2c_adapter *s4985_adapter;
-static struct i2c_algorithm *s4985_algo;
-
-/* Wrapper access functions for multiplexed SMBus */
-static DEFINE_MUTEX(nforce2_lock);
-
-static s32 nforce2_access_virt0(struct i2c_adapter *adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size,
- union i2c_smbus_data *data)
-{
- int error;
-
- /* We exclude the multiplexed addresses */
- if ((addr & 0xfc) == 0x50 || (addr & 0xfc) == 0x30
- || addr == 0x18)
- return -ENXIO;
-
- mutex_lock(&nforce2_lock);
- error = nforce2_smbus->algo->smbus_xfer(adap, addr, flags, read_write,
- command, size, data);
- mutex_unlock(&nforce2_lock);
-
- return error;
-}
-
-/* We remember the last used channels combination so as to only switch
- channels when it is really needed. This greatly reduces the SMBus
- overhead, but also assumes that nobody will be writing to the PCA9556
- in our back. */
-static u8 last_channels;
-
-static inline s32 nforce2_access_channel(struct i2c_adapter *adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size,
- union i2c_smbus_data *data,
- u8 channels)
-{
- int error;
-
- /* We exclude the non-multiplexed addresses */
- if ((addr & 0xfc) != 0x50 && (addr & 0xfc) != 0x30)
- return -ENXIO;
-
- mutex_lock(&nforce2_lock);
- if (last_channels != channels) {
- union i2c_smbus_data mplxdata;
- mplxdata.byte = channels;
-
- error = nforce2_smbus->algo->smbus_xfer(adap, 0x18, 0,
- I2C_SMBUS_WRITE, 0x01,
- I2C_SMBUS_BYTE_DATA,
- &mplxdata);
- if (error)
- goto UNLOCK;
- last_channels = channels;
- }
- error = nforce2_smbus->algo->smbus_xfer(adap, addr, flags, read_write,
- command, size, data);
-
-UNLOCK:
- mutex_unlock(&nforce2_lock);
- return error;
-}
-
-static s32 nforce2_access_virt1(struct i2c_adapter *adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size,
- union i2c_smbus_data *data)
-{
- /* CPU0: channel 1 enabled */
- return nforce2_access_channel(adap, addr, flags, read_write, command,
- size, data, 0x02);
-}
-
-static s32 nforce2_access_virt2(struct i2c_adapter *adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size,
- union i2c_smbus_data *data)
-{
- /* CPU1: channel 2 enabled */
- return nforce2_access_channel(adap, addr, flags, read_write, command,
- size, data, 0x04);
-}
-
-static s32 nforce2_access_virt3(struct i2c_adapter *adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size,
- union i2c_smbus_data *data)
-{
- /* CPU2: channel 3 enabled */
- return nforce2_access_channel(adap, addr, flags, read_write, command,
- size, data, 0x08);
-}
-
-static s32 nforce2_access_virt4(struct i2c_adapter *adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size,
- union i2c_smbus_data *data)
-{
- /* CPU3: channel 4 enabled */
- return nforce2_access_channel(adap, addr, flags, read_write, command,
- size, data, 0x10);
-}
-
-static int __init nforce2_s4985_init(void)
-{
- int i, error;
- union i2c_smbus_data ioconfig;
-
- if (!nforce2_smbus)
- return -ENODEV;
-
- /* Configure the PCA9556 multiplexer */
- ioconfig.byte = 0x00; /* All I/O to output mode */
- error = i2c_smbus_xfer(nforce2_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03,
- I2C_SMBUS_BYTE_DATA, &ioconfig);
- if (error) {
- dev_err(&nforce2_smbus->dev, "PCA9556 configuration failed\n");
- error = -EIO;
- goto ERROR0;
- }
-
- /* Unregister physical bus */
- i2c_del_adapter(nforce2_smbus);
-
- printk(KERN_INFO "Enabling SMBus multiplexing for Tyan S4985\n");
- /* Define the 5 virtual adapters and algorithms structures */
- s4985_adapter = kcalloc(5, sizeof(struct i2c_adapter), GFP_KERNEL);
- if (!s4985_adapter) {
- error = -ENOMEM;
- goto ERROR1;
- }
- s4985_algo = kcalloc(5, sizeof(struct i2c_algorithm), GFP_KERNEL);
- if (!s4985_algo) {
- error = -ENOMEM;
- goto ERROR2;
- }
-
- /* Fill in the new structures */
- s4985_algo[0] = *(nforce2_smbus->algo);
- s4985_algo[0].smbus_xfer = nforce2_access_virt0;
- s4985_adapter[0] = *nforce2_smbus;
- s4985_adapter[0].algo = s4985_algo;
- s4985_adapter[0].dev.parent = nforce2_smbus->dev.parent;
- for (i = 1; i < 5; i++) {
- s4985_algo[i] = *(nforce2_smbus->algo);
- s4985_adapter[i] = *nforce2_smbus;
- snprintf(s4985_adapter[i].name, sizeof(s4985_adapter[i].name),
- "SMBus nForce2 adapter (CPU%d)", i - 1);
- s4985_adapter[i].algo = s4985_algo + i;
- s4985_adapter[i].dev.parent = nforce2_smbus->dev.parent;
- }
- s4985_algo[1].smbus_xfer = nforce2_access_virt1;
- s4985_algo[2].smbus_xfer = nforce2_access_virt2;
- s4985_algo[3].smbus_xfer = nforce2_access_virt3;
- s4985_algo[4].smbus_xfer = nforce2_access_virt4;
-
- /* Register virtual adapters */
- for (i = 0; i < 5; i++) {
- error = i2c_add_adapter(s4985_adapter + i);
- if (error) {
- printk(KERN_ERR "i2c-nforce2-s4985: "
- "Virtual adapter %d registration "
- "failed, module not inserted\n", i);
- for (i--; i >= 0; i--)
- i2c_del_adapter(s4985_adapter + i);
- goto ERROR3;
- }
- }
-
- return 0;
-
-ERROR3:
- kfree(s4985_algo);
- s4985_algo = NULL;
-ERROR2:
- kfree(s4985_adapter);
- s4985_adapter = NULL;
-ERROR1:
- /* Restore physical bus */
- i2c_add_adapter(nforce2_smbus);
-ERROR0:
- return error;
-}
-
-static void __exit nforce2_s4985_exit(void)
-{
- if (s4985_adapter) {
- int i;
-
- for (i = 0; i < 5; i++)
- i2c_del_adapter(s4985_adapter+i);
- kfree(s4985_adapter);
- s4985_adapter = NULL;
- }
- kfree(s4985_algo);
- s4985_algo = NULL;
-
- /* Restore physical bus */
- if (i2c_add_adapter(nforce2_smbus))
- printk(KERN_ERR "i2c-nforce2-s4985: "
- "Physical bus restoration failed\n");
-}
-
-MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
-MODULE_DESCRIPTION("S4985 SMBus multiplexing");
-MODULE_LICENSE("GPL");
-
-module_init(nforce2_s4985_init);
-module_exit(nforce2_s4985_exit);
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index fab662e6bc08..d58a308582e4 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -117,20 +117,6 @@ static const struct dmi_system_id nforce2_dmi_blacklist2[] = {
static struct pci_driver nforce2_driver;
-/* For multiplexing support, we need a global reference to the 1st
- SMBus channel */
-#if IS_ENABLED(CONFIG_I2C_NFORCE2_S4985)
-struct i2c_adapter *nforce2_smbus;
-EXPORT_SYMBOL_GPL(nforce2_smbus);
-
-static void nforce2_set_reference(struct i2c_adapter *adap)
-{
- nforce2_smbus = adap;
-}
-#else
-static inline void nforce2_set_reference(struct i2c_adapter *adap) { }
-#endif
-
static void nforce2_abort(struct i2c_adapter *adap)
{
struct nforce2_smbus *smbus = adap->algo_data;
@@ -411,7 +397,6 @@ static int nforce2_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
}
- nforce2_set_reference(&smbuses[0].adapter);
return 0;
}
@@ -420,7 +405,6 @@ static void nforce2_remove(struct pci_dev *dev)
{
struct nforce2_smbus *smbuses = pci_get_drvdata(dev);
- nforce2_set_reference(NULL);
if (smbuses[0].base) {
i2c_del_adapter(&smbuses[0].adapter);
release_region(smbuses[0].base, smbuses[0].size);
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index ad0f02acdb12..d2877e4cc28d 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -6,10 +6,10 @@
* I2C master mode controller driver, used in Nomadik 8815
* and Ux500 platforms.
*
- * The Mobileye EyeQ5 platform is also supported; it uses
+ * The Mobileye EyeQ5 and EyeQ6H platforms are also supported; they use
* the same Ux500/DB8500 IP block with two quirks:
* - The memory bus only supports 32-bit accesses.
- * - A register must be configured for the I2C speed mode;
+ * - (only EyeQ5) A register must be configured for the I2C speed mode;
* it is located in a shared register region called OLB.
*
* Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
@@ -26,6 +26,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -396,7 +397,7 @@ static u32 load_i2c_mcr_reg(struct nmk_i2c_dev *priv, u16 flags)
*/
static void setup_i2c_controller(struct nmk_i2c_dev *priv)
{
- u32 brcr1, brcr2;
+ u32 brcr;
u32 i2c_clk, div;
u32 ns;
u16 slsu;
@@ -443,7 +444,7 @@ static void setup_i2c_controller(struct nmk_i2c_dev *priv)
/*
* The spec says, in case of std. mode the divider is
* 2 whereas it is 3 for fast and fastplus mode of
- * operation. TODO - high speed support.
+ * operation.
*/
div = (priv->clk_freq > I2C_MAX_STANDARD_MODE_FREQ) ? 3 : 2;
@@ -451,30 +452,23 @@ static void setup_i2c_controller(struct nmk_i2c_dev *priv)
* generate the mask for baud rate counters. The controller
* has two baud rate counters. One is used for High speed
* operation, and the other is for std, fast mode, fast mode
- * plus operation. Currently we do not supprt high speed mode
- * so set brcr1 to 0.
+ * plus operation.
+ *
+ * BRCR is a clock divider amount. Pick highest value that
+ * leads to rate strictly below target. Eg when asking for
+ * 400kHz you want a bus rate <=400kHz (and not >=400kHz).
*/
- brcr1 = FIELD_PREP(I2C_BRCR_BRCNT1, 0);
- brcr2 = FIELD_PREP(I2C_BRCR_BRCNT2, i2c_clk / (priv->clk_freq * div));
+ brcr = DIV_ROUND_UP(i2c_clk, priv->clk_freq * div);
+
+ if (priv->sm == I2C_FREQ_MODE_HIGH_SPEED)
+ brcr = FIELD_PREP(I2C_BRCR_BRCNT1, brcr);
+ else
+ brcr = FIELD_PREP(I2C_BRCR_BRCNT2, brcr);
/* set the baud rate counter register */
- writel((brcr1 | brcr2), priv->virtbase + I2C_BRCR);
+ writel(brcr, priv->virtbase + I2C_BRCR);
- /*
- * set the speed mode. Currently we support
- * only standard and fast mode of operation
- * TODO - support for fast mode plus (up to 1Mb/s)
- * and high speed (up to 3.4 Mb/s)
- */
- if (priv->sm > I2C_FREQ_MODE_FAST) {
- dev_err(&priv->adev->dev,
- "do not support this mode defaulting to std. mode\n");
- brcr2 = FIELD_PREP(I2C_BRCR_BRCNT2,
- i2c_clk / (I2C_MAX_STANDARD_MODE_FREQ * 2));
- writel((brcr1 | brcr2), priv->virtbase + I2C_BRCR);
- writel(FIELD_PREP(I2C_CR_SM, I2C_FREQ_MODE_STANDARD),
- priv->virtbase + I2C_CR);
- }
+ /* set the speed mode */
writel(FIELD_PREP(I2C_CR_SM, priv->sm), priv->virtbase + I2C_CR);
/* set the Tx and Rx FIFO threshold */
@@ -1015,11 +1009,14 @@ static void nmk_i2c_of_probe(struct device_node *np,
if (of_property_read_u32(np, "clock-frequency", &priv->clk_freq))
priv->clk_freq = I2C_MAX_STANDARD_MODE_FREQ;
- /* This driver only supports 'standard' and 'fast' modes of operation. */
if (priv->clk_freq <= I2C_MAX_STANDARD_MODE_FREQ)
priv->sm = I2C_FREQ_MODE_STANDARD;
- else
+ else if (priv->clk_freq <= I2C_MAX_FAST_MODE_FREQ)
priv->sm = I2C_FREQ_MODE_FAST;
+ else if (priv->clk_freq <= I2C_MAX_FAST_MODE_PLUS_FREQ)
+ priv->sm = I2C_FREQ_MODE_FAST_PLUS;
+ else
+ priv->sm = I2C_FREQ_MODE_HIGH_SPEED;
priv->tft = 1; /* Tx FIFO threshold */
priv->rft = 8; /* Rx FIFO threshold */
@@ -1046,8 +1043,6 @@ static int nmk_i2c_eyeq5_probe(struct nmk_i2c_dev *priv)
struct regmap *olb;
unsigned int id;
- priv->has_32b_bus = true;
-
olb = syscon_regmap_lookup_by_phandle_args(np, "mobileye,olb", 1, &id);
if (IS_ERR(olb))
return PTR_ERR(olb);
@@ -1068,15 +1063,40 @@ static int nmk_i2c_eyeq5_probe(struct nmk_i2c_dev *priv)
return 0;
}
+#define NMK_I2C_EYEQ_FLAG_32B_BUS BIT(0)
+#define NMK_I2C_EYEQ_FLAG_IS_EYEQ5 BIT(1)
+
+static const struct of_device_id nmk_i2c_eyeq_match_table[] = {
+ {
+ .compatible = "mobileye,eyeq5-i2c",
+ .data = (void *)(NMK_I2C_EYEQ_FLAG_32B_BUS | NMK_I2C_EYEQ_FLAG_IS_EYEQ5),
+ },
+ {
+ .compatible = "mobileye,eyeq6h-i2c",
+ .data = (void *)NMK_I2C_EYEQ_FLAG_32B_BUS,
+ },
+ { /* sentinel */ }
+};
+
static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
{
- int ret = 0;
- struct nmk_i2c_dev *priv;
+ struct i2c_vendor_data *vendor = id->data;
+ u32 max_fifo_threshold = (vendor->fifodepth / 2) - 1;
struct device_node *np = adev->dev.of_node;
+ const struct of_device_id *match;
struct device *dev = &adev->dev;
+ unsigned long match_flags = 0;
+ struct nmk_i2c_dev *priv;
struct i2c_adapter *adap;
- struct i2c_vendor_data *vendor = id->data;
- u32 max_fifo_threshold = (vendor->fifodepth / 2) - 1;
+ int ret = 0;
+
+ /*
+ * We do not want to attach a .of_match_table to our amba driver.
+ * Do not convert to device_get_match_data().
+ */
+ match = of_match_device(nmk_i2c_eyeq_match_table, dev);
+ if (match)
+ match_flags = (unsigned long)match->data;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -1084,10 +1104,10 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
priv->vendor = vendor;
priv->adev = adev;
- priv->has_32b_bus = false;
+ priv->has_32b_bus = match_flags & NMK_I2C_EYEQ_FLAG_32B_BUS;
nmk_i2c_of_probe(np, priv);
- if (of_device_is_compatible(np, "mobileye,eyeq5-i2c")) {
+ if (match_flags & NMK_I2C_EYEQ_FLAG_IS_EYEQ5) {
ret = nmk_i2c_eyeq5_probe(priv);
if (ret)
return dev_err_probe(dev, ret, "failed OLB lookup\n");
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index bbcb4d6668ce..3ca08b8ef8af 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -263,6 +263,265 @@ static const int npcm_i2caddr[I2C_NUM_OWN_ADDR] = {
#define I2C_FREQ_MIN_HZ 10000
#define I2C_FREQ_MAX_HZ I2C_MAX_FAST_MODE_PLUS_FREQ
+struct smb_timing_t {
+ u32 core_clk;
+ u8 hldt;
+ u8 dbcnt;
+ u16 sclfrq;
+ u8 scllt;
+ u8 sclht;
+ bool fast_mode;
+};
+
+static struct smb_timing_t smb_timing_100khz[] = {
+ {
+ .core_clk = 100000000, .hldt = 0x2A, .dbcnt = 0x4,
+ .sclfrq = 0xFB, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 62500000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x9D, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 50000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x7E, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 48000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x79, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 40000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x65, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 30000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x4C, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 29000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x49, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 26000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x42, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 25000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x3F, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 24000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x3D, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 20000000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x33, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 16180000, .hldt = 0x2A, .dbcnt = 0x1,
+ .sclfrq = 0x29, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 15000000, .hldt = 0x23, .dbcnt = 0x1,
+ .sclfrq = 0x26, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 13000000, .hldt = 0x1D, .dbcnt = 0x1,
+ .sclfrq = 0x21, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 12000000, .hldt = 0x1B, .dbcnt = 0x1,
+ .sclfrq = 0x1F, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 10000000, .hldt = 0x18, .dbcnt = 0x1,
+ .sclfrq = 0x1A, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 9000000, .hldt = 0x16, .dbcnt = 0x1,
+ .sclfrq = 0x17, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 8090000, .hldt = 0x14, .dbcnt = 0x1,
+ .sclfrq = 0x15, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 7500000, .hldt = 0x7, .dbcnt = 0x1,
+ .sclfrq = 0x13, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 6500000, .hldt = 0xE, .dbcnt = 0x1,
+ .sclfrq = 0x11, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+ {
+ .core_clk = 4000000, .hldt = 0x9, .dbcnt = 0x1,
+ .sclfrq = 0xB, .scllt = 0x0, .sclht = 0x0,
+ .fast_mode = false,
+ },
+};
+
+static struct smb_timing_t smb_timing_400khz[] = {
+ {
+ .core_clk = 100000000, .hldt = 0x2A, .dbcnt = 0x3,
+ .sclfrq = 0x0, .scllt = 0x47, .sclht = 0x35,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 62500000, .hldt = 0x2A, .dbcnt = 0x2,
+ .sclfrq = 0x0, .scllt = 0x2C, .sclht = 0x22,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 50000000, .hldt = 0x21, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x24, .sclht = 0x1B,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 48000000, .hldt = 0x1E, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x24, .sclht = 0x19,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 40000000, .hldt = 0x1B, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x1E, .sclht = 0x14,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 33000000, .hldt = 0x15, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x19, .sclht = 0x11,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 30000000, .hldt = 0x15, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x19, .sclht = 0xD,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 29000000, .hldt = 0x11, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x15, .sclht = 0x10,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 26000000, .hldt = 0x10, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x13, .sclht = 0xE,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 25000000, .hldt = 0xF, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x13, .sclht = 0xD,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 24000000, .hldt = 0xD, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x12, .sclht = 0xD,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 20000000, .hldt = 0xB, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0xF, .sclht = 0xA,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 16180000, .hldt = 0xA, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0xC, .sclht = 0x9,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 15000000, .hldt = 0x9, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0xB, .sclht = 0x8,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 13000000, .hldt = 0x7, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0xA, .sclht = 0x7,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 12000000, .hldt = 0x7, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0xA, .sclht = 0x6,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 10000000, .hldt = 0x6, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x8, .sclht = 0x5,
+ .fast_mode = true,
+ },
+};
+
+static struct smb_timing_t smb_timing_1000khz[] = {
+ {
+ .core_clk = 100000000, .hldt = 0x15, .dbcnt = 0x4,
+ .sclfrq = 0x0, .scllt = 0x1C, .sclht = 0x15,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 62500000, .hldt = 0xF, .dbcnt = 0x3,
+ .sclfrq = 0x0, .scllt = 0x11, .sclht = 0xE,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 50000000, .hldt = 0xA, .dbcnt = 0x2,
+ .sclfrq = 0x0, .scllt = 0xE, .sclht = 0xB,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 48000000, .hldt = 0x9, .dbcnt = 0x2,
+ .sclfrq = 0x0, .scllt = 0xD, .sclht = 0xB,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 41000000, .hldt = 0x9, .dbcnt = 0x2,
+ .sclfrq = 0x0, .scllt = 0xC, .sclht = 0x9,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 40000000, .hldt = 0x8, .dbcnt = 0x2,
+ .sclfrq = 0x0, .scllt = 0xB, .sclht = 0x9,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 33000000, .hldt = 0x7, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0xA, .sclht = 0x7,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 25000000, .hldt = 0x4, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x7, .sclht = 0x6,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 24000000, .hldt = 0x7, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x8, .sclht = 0x5,
+ .fast_mode = true,
+ },
+ {
+ .core_clk = 20000000, .hldt = 0x4, .dbcnt = 0x1,
+ .sclfrq = 0x0, .scllt = 0x6, .sclht = 0x4,
+ .fast_mode = true,
+ },
+};
+
struct npcm_i2c_data {
u8 fifo_size;
u32 segctl_init_val;
@@ -334,6 +593,7 @@ struct npcm_i2c {
u64 nack_cnt;
u64 timeout_cnt;
u64 tx_complete_cnt;
+ bool ber_state; /* Indicate the bus error state */
};
static inline void npcm_i2c_select_bank(struct npcm_i2c *bus,
@@ -1521,6 +1781,7 @@ static void npcm_i2c_irq_handle_ber(struct npcm_i2c *bus)
if (npcm_i2c_is_master(bus)) {
npcm_i2c_master_abort(bus);
} else {
+ bus->ber_state = true;
npcm_i2c_clear_master_status(bus);
/* Clear BB (BUS BUSY) bit */
@@ -1628,13 +1889,10 @@ static void npcm_i2c_irq_handle_sda(struct npcm_i2c *bus, u8 i2cst)
npcm_i2c_wr_byte(bus, bus->dest_addr | BIT(0));
/* SDA interrupt, after start\restart */
} else {
- if (NPCM_I2CST_XMIT & i2cst) {
- bus->operation = I2C_WRITE_OPER;
+ if (bus->operation == I2C_WRITE_OPER)
npcm_i2c_irq_master_handler_write(bus);
- } else {
- bus->operation = I2C_READ_OPER;
+ else if (bus->operation == I2C_READ_OPER)
npcm_i2c_irq_master_handler_read(bus);
- }
}
}
@@ -1667,6 +1925,12 @@ static int npcm_i2c_int_master_handler(struct npcm_i2c *bus)
(FIELD_GET(NPCM_I2CCST3_EO_BUSY,
ioread8(bus->reg + NPCM_I2CCST3)))) {
npcm_i2c_irq_handle_eob(bus);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ /* reenable slave if it was enabled */
+ if (bus->slave)
+ iowrite8(bus->slave->addr | NPCM_I2CADDR_SAEN,
+ bus->reg + NPCM_I2CADDR1);
+#endif
return 0;
}
@@ -1702,6 +1966,7 @@ static int npcm_i2c_recovery_tgclk(struct i2c_adapter *_adap)
dev_dbg(bus->dev, "bus%d-0x%x recovery skipped, bus not stuck",
bus->num, bus->dest_addr);
npcm_i2c_reset(bus);
+ bus->ber_state = false;
return 0;
}
@@ -1766,6 +2031,7 @@ static int npcm_i2c_recovery_tgclk(struct i2c_adapter *_adap)
if (bus->rec_succ_cnt < ULLONG_MAX)
bus->rec_succ_cnt++;
}
+ bus->ber_state = false;
return status;
}
@@ -1804,102 +2070,45 @@ static void npcm_i2c_recovery_init(struct i2c_adapter *_adap)
*/
static int npcm_i2c_init_clk(struct npcm_i2c *bus, u32 bus_freq_hz)
{
- u32 k1 = 0;
- u32 k2 = 0;
- u8 dbnct = 0;
- u32 sclfrq = 0;
- u8 hldt = 7;
+ struct smb_timing_t *smb_timing;
+ u8 scl_table_cnt = 0, table_size = 0;
u8 fast_mode = 0;
- u32 src_clk_khz;
- u32 bus_freq_khz;
- src_clk_khz = bus->apb_clk / 1000;
- bus_freq_khz = bus_freq_hz / 1000;
bus->bus_freq = bus_freq_hz;
- /* 100KHz and below: */
- if (bus_freq_hz <= I2C_MAX_STANDARD_MODE_FREQ) {
- sclfrq = src_clk_khz / (bus_freq_khz * 4);
-
- if (sclfrq < SCLFRQ_MIN || sclfrq > SCLFRQ_MAX)
- return -EDOM;
-
- if (src_clk_khz >= 40000)
- hldt = 17;
- else if (src_clk_khz >= 12500)
- hldt = 15;
- else
- hldt = 7;
- }
-
- /* 400KHz: */
- else if (bus_freq_hz <= I2C_MAX_FAST_MODE_FREQ) {
- sclfrq = 0;
+ switch (bus_freq_hz) {
+ case I2C_MAX_STANDARD_MODE_FREQ:
+ smb_timing = smb_timing_100khz;
+ table_size = ARRAY_SIZE(smb_timing_100khz);
+ break;
+ case I2C_MAX_FAST_MODE_FREQ:
+ smb_timing = smb_timing_400khz;
+ table_size = ARRAY_SIZE(smb_timing_400khz);
fast_mode = I2CCTL3_400K_MODE;
-
- if (src_clk_khz < 7500)
- /* 400KHZ cannot be supported for core clock < 7.5MHz */
- return -EDOM;
-
- else if (src_clk_khz >= 50000) {
- k1 = 80;
- k2 = 48;
- hldt = 12;
- dbnct = 7;
- }
-
- /* Master or Slave with frequency > 25MHz */
- else if (src_clk_khz > 25000) {
- hldt = clk_coef(src_clk_khz, 300) + 7;
- k1 = clk_coef(src_clk_khz, 1600);
- k2 = clk_coef(src_clk_khz, 900);
- }
- }
-
- /* 1MHz: */
- else if (bus_freq_hz <= I2C_MAX_FAST_MODE_PLUS_FREQ) {
- sclfrq = 0;
+ break;
+ case I2C_MAX_FAST_MODE_PLUS_FREQ:
+ smb_timing = smb_timing_1000khz;
+ table_size = ARRAY_SIZE(smb_timing_1000khz);
fast_mode = I2CCTL3_400K_MODE;
-
- /* 1MHZ cannot be supported for core clock < 24 MHz */
- if (src_clk_khz < 24000)
- return -EDOM;
-
- k1 = clk_coef(src_clk_khz, 620);
- k2 = clk_coef(src_clk_khz, 380);
-
- /* Core clk > 40 MHz */
- if (src_clk_khz > 40000) {
- /*
- * Set HLDT:
- * SDA hold time: (HLDT-7) * T(CLK) >= 120
- * HLDT = 120/T(CLK) + 7 = 120 * FREQ(CLK) + 7
- */
- hldt = clk_coef(src_clk_khz, 120) + 7;
- } else {
- hldt = 7;
- dbnct = 2;
- }
+ break;
+ default:
+ return -EINVAL;
}
- /* Frequency larger than 1 MHz is not supported */
- else
- return -EINVAL;
+ for (scl_table_cnt = 0; scl_table_cnt < table_size; scl_table_cnt++)
+ if (bus->apb_clk >= smb_timing[scl_table_cnt].core_clk)
+ break;
- if (bus_freq_hz >= I2C_MAX_FAST_MODE_FREQ) {
- k1 = round_up(k1, 2);
- k2 = round_up(k2 + 1, 2);
- if (k1 < SCLFRQ_MIN || k1 > SCLFRQ_MAX ||
- k2 < SCLFRQ_MIN || k2 > SCLFRQ_MAX)
- return -EDOM;
- }
+ if (scl_table_cnt == table_size)
+ return -EINVAL;
/* write sclfrq value. bits [6:0] are in I2CCTL2 reg */
- iowrite8(FIELD_PREP(I2CCTL2_SCLFRQ6_0, sclfrq & 0x7F),
+ iowrite8(FIELD_PREP(I2CCTL2_SCLFRQ6_0, smb_timing[scl_table_cnt].sclfrq & 0x7F),
bus->reg + NPCM_I2CCTL2);
/* bits [8:7] are in I2CCTL3 reg */
- iowrite8(fast_mode | FIELD_PREP(I2CCTL3_SCLFRQ8_7, (sclfrq >> 7) & 0x3),
+ iowrite8(FIELD_PREP(I2CCTL3_SCLFRQ8_7, (smb_timing[scl_table_cnt].sclfrq >> 7) & 0x3) |
+ fast_mode,
bus->reg + NPCM_I2CCTL3);
/* Select Bank 0 to access NPCM_I2CCTL4/NPCM_I2CCTL5 */
@@ -1911,13 +2120,13 @@ static int npcm_i2c_init_clk(struct npcm_i2c *bus, u32 bus_freq_hz)
* k1 = 2 * SCLLT7-0 -> Low Time = k1 / 2
* k2 = 2 * SCLLT7-0 -> High Time = k2 / 2
*/
- iowrite8(k1 / 2, bus->reg + NPCM_I2CSCLLT);
- iowrite8(k2 / 2, bus->reg + NPCM_I2CSCLHT);
+ iowrite8(smb_timing[scl_table_cnt].scllt, bus->reg + NPCM_I2CSCLLT);
+ iowrite8(smb_timing[scl_table_cnt].sclht, bus->reg + NPCM_I2CSCLHT);
- iowrite8(dbnct, bus->reg + NPCM_I2CCTL5);
+ iowrite8(smb_timing[scl_table_cnt].dbcnt, bus->reg + NPCM_I2CCTL5);
}
- iowrite8(hldt, bus->reg + NPCM_I2CCTL4);
+ iowrite8(smb_timing[scl_table_cnt].hldt, bus->reg + NPCM_I2CCTL4);
/* Return to Bank 1, and stay there by default: */
npcm_i2c_select_bank(bus, I2C_BANK_1);
@@ -2034,7 +2243,7 @@ static irqreturn_t npcm_i2c_bus_irq(int irq, void *dev_id)
}
static bool npcm_i2c_master_start_xmit(struct npcm_i2c *bus,
- u8 slave_addr, u16 nwrite, u16 nread,
+ u16 nwrite, u16 nread,
u8 *write_data, u8 *read_data,
bool use_PEC, bool use_read_block)
{
@@ -2042,7 +2251,6 @@ static bool npcm_i2c_master_start_xmit(struct npcm_i2c *bus,
bus->cmd_err = -EBUSY;
return false;
}
- bus->dest_addr = slave_addr << 1;
bus->wr_buf = write_data;
bus->wr_size = nwrite;
bus->wr_ind = 0;
@@ -2085,7 +2293,6 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
unsigned long time_left, flags;
u16 nwrite, nread;
u8 *write_data, *read_data;
- u8 slave_addr;
unsigned long timeout;
bool read_block = false;
bool read_PEC = false;
@@ -2098,7 +2305,6 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
}
msg0 = &msgs[0];
- slave_addr = msg0->addr;
if (msg0->flags & I2C_M_RD) { /* read */
nwrite = 0;
write_data = NULL;
@@ -2131,19 +2337,12 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
}
}
- /*
- * Adaptive TimeOut: estimated time in usec + 100% margin:
- * 2: double the timeout for clock stretching case
- * 9: bits per transaction (including the ack/nack)
- */
- timeout_usec = (2 * 9 * USEC_PER_SEC / bus->bus_freq) * (2 + nread + nwrite);
- timeout = max_t(unsigned long, bus->adap.timeout, usecs_to_jiffies(timeout_usec));
if (nwrite >= 32 * 1024 || nread >= 32 * 1024) {
dev_err(bus->dev, "i2c%d buffer too big\n", bus->num);
return -EINVAL;
}
- time_left = jiffies + timeout + 1;
+ time_left = jiffies + bus->adap.timeout / bus->adap.retries + 1;
do {
/*
* we must clear slave address immediately when the bus is not
@@ -2161,7 +2360,31 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
} while (time_is_after_jiffies(time_left) && bus_busy);
- if (bus_busy) {
+ /*
+ * Store the address early in a global position to ensure it is
+ * accessible for a potential call to i2c_recover_bus().
+ *
+ * Since the transfer might be a read operation, remove the I2C_M_RD flag
+ * from the bus->dest_addr for the i2c_recover_bus() call later.
+ *
+ * The i2c_recover_bus() uses the address in a write direction to recover
+ * the i2c bus if some error condition occurs.
+ *
+ * Remove the I2C_M_RD flag from the address since npcm_i2c_master_start_xmit()
+ * handles the read/write operation internally.
+ */
+ bus->dest_addr = i2c_8bit_addr_from_msg(msg0) & ~I2C_M_RD;
+
+ /*
+ * Check the BER (bus error) state, when ber_state is true, it means that the module
+ * detects the bus error which is caused by some factor like that the electricity
+ * noise occurs on the bus. Under this condition, the module is reset and the bus
+ * gets recovered.
+ *
+ * While ber_state is false, the module reset and bus recovery also get done as the
+ * bus is busy.
+ */
+ if (bus_busy || bus->ber_state) {
iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST);
npcm_i2c_reset(bus);
i2c_recover_bus(adap);
@@ -2169,7 +2392,6 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
}
npcm_i2c_init_params(bus);
- bus->dest_addr = slave_addr;
bus->msgs = msgs;
bus->msgs_num = num;
bus->cmd_err = 0;
@@ -2179,9 +2401,17 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
npcm_i2c_int_enable(bus, true);
- if (npcm_i2c_master_start_xmit(bus, slave_addr, nwrite, nread,
+ if (npcm_i2c_master_start_xmit(bus, nwrite, nread,
write_data, read_data, read_PEC,
read_block)) {
+ /*
+ * Adaptive TimeOut: estimated time in usec + 100% margin:
+ * 2: double the timeout for clock stretching case
+ * 9: bits per transaction (including the ack/nack)
+ */
+ timeout_usec = (2 * 9 * USEC_PER_SEC / bus->bus_freq) * (2 + nread + nwrite);
+ timeout = max_t(unsigned long, bus->adap.timeout / bus->adap.retries,
+ usecs_to_jiffies(timeout_usec));
time_left = wait_for_completion_timeout(&bus->cmd_complete,
timeout);
@@ -2307,7 +2537,12 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
adap = &bus->adap;
adap->owner = THIS_MODULE;
adap->retries = 3;
- adap->timeout = msecs_to_jiffies(35);
+ /*
+ * The users want to connect a lot of masters on the same bus.
+ * This timeout is used to determine the time it takes to take bus ownership.
+ * The transactions are very long, so waiting 35ms is not enough.
+ */
+ adap->timeout = 2 * HZ;
adap->algo = &npcm_i2c_algo;
adap->quirks = &npcm_i2c_quirks;
adap->algo_data = bus;
@@ -2363,7 +2598,7 @@ MODULE_DEVICE_TABLE(of, npcm_i2c_bus_of_table);
static struct platform_driver npcm_i2c_bus_driver = {
.probe = npcm_i2c_probe_bus,
- .remove_new = npcm_i2c_remove_bus,
+ .remove = npcm_i2c_remove_bus,
.driver = {
.name = "nuvoton-i2c",
.of_match_table = npcm_i2c_bus_of_table,
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 482b37c8a129..0f67e57cdeff 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -769,7 +769,7 @@ static DEFINE_NOIRQ_DEV_PM_OPS(ocores_i2c_pm,
static struct platform_driver ocores_i2c_driver = {
.probe = ocores_i2c_probe,
- .remove_new = ocores_i2c_remove,
+ .remove = ocores_i2c_remove,
.driver = {
.name = "ocores-i2c",
.of_match_table = ocores_i2c_match,
diff --git a/drivers/i2c/busses/i2c-octeon-platdrv.c b/drivers/i2c/busses/i2c-octeon-platdrv.c
index dc6dff95c68c..edfca7b20f29 100644
--- a/drivers/i2c/busses/i2c-octeon-platdrv.c
+++ b/drivers/i2c/busses/i2c-octeon-platdrv.c
@@ -269,7 +269,7 @@ MODULE_DEVICE_TABLE(of, octeon_i2c_match);
static struct platform_driver octeon_i2c_driver = {
.probe = octeon_i2c_probe,
- .remove_new = octeon_i2c_remove,
+ .remove = octeon_i2c_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = octeon_i2c_match,
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 1d9ad25c89ae..92faf03d64cf 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1605,7 +1605,7 @@ static const struct dev_pm_ops omap_i2c_pm_ops = {
static struct platform_driver omap_i2c_driver = {
.probe = omap_i2c_probe,
- .remove_new = omap_i2c_remove,
+ .remove = omap_i2c_remove,
.driver = {
.name = "omap_i2c",
.pm = pm_ptr(&omap_i2c_pm_ops),
diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c
index d9dd71cf37fd..c9b62892397a 100644
--- a/drivers/i2c/busses/i2c-opal.c
+++ b/drivers/i2c/busses/i2c-opal.c
@@ -249,7 +249,7 @@ MODULE_DEVICE_TABLE(of, i2c_opal_of_match);
static struct platform_driver i2c_opal_driver = {
.probe = i2c_opal_probe,
- .remove_new = i2c_opal_remove,
+ .remove = i2c_opal_remove,
.driver = {
.name = "i2c-opal",
.of_match_table = i2c_opal_of_match,
diff --git a/drivers/i2c/busses/i2c-pasemi-platform.c b/drivers/i2c/busses/i2c-pasemi-platform.c
index 5fbfb9b41744..a486a37d3863 100644
--- a/drivers/i2c/busses/i2c-pasemi-platform.c
+++ b/drivers/i2c/busses/i2c-pasemi-platform.c
@@ -104,7 +104,7 @@ static struct platform_driver pasemi_platform_i2c_driver = {
.of_match_table = pasemi_platform_i2c_of_match,
},
.probe = pasemi_platform_i2c_probe,
- .remove_new = pasemi_platform_i2c_remove,
+ .remove = pasemi_platform_i2c_remove,
};
module_platform_driver(pasemi_platform_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index b8d5480c54f6..87da8241b927 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -238,7 +238,7 @@ MODULE_DEVICE_TABLE(of, i2c_pca_of_match_table);
static struct platform_driver i2c_pca_pf_driver = {
.probe = i2c_pca_pf_probe,
- .remove_new = i2c_pca_pf_remove,
+ .remove = i2c_pca_pf_remove,
.driver = {
.name = "i2c-pca-platform",
.of_match_table = of_match_ptr(i2c_pca_of_match_table),
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index febbd9950d8f..dd75916157f0 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -35,20 +35,7 @@
#include <linux/acpi.h>
#include <linux/io.h>
-
-/* PIIX4 SMBus address offsets */
-#define SMBHSTSTS (0 + piix4_smba)
-#define SMBHSLVSTS (1 + piix4_smba)
-#define SMBHSTCNT (2 + piix4_smba)
-#define SMBHSTCMD (3 + piix4_smba)
-#define SMBHSTADD (4 + piix4_smba)
-#define SMBHSTDAT0 (5 + piix4_smba)
-#define SMBHSTDAT1 (6 + piix4_smba)
-#define SMBBLKDAT (7 + piix4_smba)
-#define SMBSLVCNT (8 + piix4_smba)
-#define SMBSHDWCMD (9 + piix4_smba)
-#define SMBSLVEVT (0xA + piix4_smba)
-#define SMBSLVDAT (0xC + piix4_smba)
+#include "i2c-piix4.h"
/* count for request_region */
#define SMBIOSIZE 9
@@ -70,7 +57,6 @@
#define PIIX4_BYTE 0x04
#define PIIX4_BYTE_DATA 0x08
#define PIIX4_WORD_DATA 0x0C
-#define PIIX4_BLOCK_DATA 0x14
/* Multi-port constants */
#define PIIX4_MAX_ADAPTERS 4
@@ -101,6 +87,7 @@
#define SB800_PIIX4_FCH_PM_ADDR 0xFED80300
#define SB800_PIIX4_FCH_PM_SIZE 8
+#define SB800_ASF_ACPI_PATH "\\_SB.ASFC"
/* insmod parameters */
@@ -160,11 +147,6 @@ static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
};
static const char *piix4_aux_port_name_sb800 = " port 1";
-struct sb800_mmio_cfg {
- void __iomem *addr;
- bool use_mmio;
-};
-
struct i2c_piix4_adapdata {
unsigned short smba;
@@ -175,8 +157,7 @@ struct i2c_piix4_adapdata {
struct sb800_mmio_cfg mmio_cfg;
};
-static int piix4_sb800_region_request(struct device *dev,
- struct sb800_mmio_cfg *mmio_cfg)
+int piix4_sb800_region_request(struct device *dev, struct sb800_mmio_cfg *mmio_cfg)
{
if (mmio_cfg->use_mmio) {
void __iomem *addr;
@@ -214,9 +195,9 @@ static int piix4_sb800_region_request(struct device *dev,
return 0;
}
+EXPORT_SYMBOL_NS_GPL(piix4_sb800_region_request, "PIIX4_SMBUS");
-static void piix4_sb800_region_release(struct device *dev,
- struct sb800_mmio_cfg *mmio_cfg)
+void piix4_sb800_region_release(struct device *dev, struct sb800_mmio_cfg *mmio_cfg)
{
if (mmio_cfg->use_mmio) {
iounmap(mmio_cfg->addr);
@@ -227,6 +208,7 @@ static void piix4_sb800_region_release(struct device *dev,
release_region(SB800_PIIX4_SMB_IDX, SB800_PIIX4_SMB_MAP_SIZE);
}
+EXPORT_SYMBOL_NS_GPL(piix4_sb800_region_release, "PIIX4_SMBUS");
static bool piix4_sb800_use_mmio(struct pci_dev *PIIX4_dev)
{
@@ -536,10 +518,8 @@ static int piix4_setup_aux(struct pci_dev *PIIX4_dev,
return piix4_smba;
}
-static int piix4_transaction(struct i2c_adapter *piix4_adapter)
+int piix4_transaction(struct i2c_adapter *piix4_adapter, unsigned short piix4_smba)
{
- struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(piix4_adapter);
- unsigned short piix4_smba = adapdata->smba;
int temp;
int result = 0;
int timeout = 0;
@@ -611,6 +591,7 @@ static int piix4_transaction(struct i2c_adapter *piix4_adapter)
inb_p(SMBHSTDAT1));
return result;
}
+EXPORT_SYMBOL_NS_GPL(piix4_transaction, "PIIX4_SMBUS");
/* Return negative errno on error. */
static s32 piix4_access(struct i2c_adapter * adap, u16 addr,
@@ -675,7 +656,7 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr,
outb_p((size & 0x1C) + (ENABLE_INT9 & 1), SMBHSTCNT);
- status = piix4_transaction(adap);
+ status = piix4_transaction(adap, piix4_smba);
if (status)
return status;
@@ -764,7 +745,7 @@ static void piix4_imc_wakeup(void)
release_region(KERNCZ_IMC_IDX, 2);
}
-static int piix4_sb800_port_sel(u8 port, struct sb800_mmio_cfg *mmio_cfg)
+int piix4_sb800_port_sel(u8 port, struct sb800_mmio_cfg *mmio_cfg)
{
u8 smba_en_lo, val;
@@ -786,6 +767,7 @@ static int piix4_sb800_port_sel(u8 port, struct sb800_mmio_cfg *mmio_cfg)
return (smba_en_lo & piix4_port_mask_sb800);
}
+EXPORT_SYMBOL_NS_GPL(piix4_sb800_port_sel, "PIIX4_SMBUS");
/*
* Handles access to multiple SMBus ports on the SB800.
@@ -1043,6 +1025,9 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int retval;
bool is_sb800 = false;
+ bool is_asf = false;
+ acpi_status status;
+ acpi_handle handle;
if ((dev->vendor == PCI_VENDOR_ID_ATI &&
dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
@@ -1105,10 +1090,16 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
}
+ status = acpi_get_handle(NULL, (acpi_string)SB800_ASF_ACPI_PATH, &handle);
+ if (ACPI_SUCCESS(status))
+ is_asf = true;
+
if (dev->vendor == PCI_VENDOR_ID_AMD &&
(dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS ||
dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS)) {
- retval = piix4_setup_sb800(dev, id, 1);
+ /* Do not setup AUX port if ASF is enabled */
+ if (!is_asf)
+ retval = piix4_setup_sb800(dev, id, 1);
}
if (retval > 0) {
diff --git a/drivers/i2c/busses/i2c-piix4.h b/drivers/i2c/busses/i2c-piix4.h
new file mode 100644
index 000000000000..36bc6ce82a27
--- /dev/null
+++ b/drivers/i2c/busses/i2c-piix4.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PIIX4/SB800 SMBus Interfaces
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Sanket Goswami <Sanket.Goswami@amd.com>
+ */
+
+#ifndef I2C_PIIX4_H
+#define I2C_PIIX4_H
+
+#include <linux/types.h>
+
+/* PIIX4 SMBus address offsets */
+#define SMBHSTSTS (0x00 + piix4_smba)
+#define SMBHSLVSTS (0x01 + piix4_smba)
+#define SMBHSTCNT (0x02 + piix4_smba)
+#define SMBHSTCMD (0x03 + piix4_smba)
+#define SMBHSTADD (0x04 + piix4_smba)
+#define SMBHSTDAT0 (0x05 + piix4_smba)
+#define SMBHSTDAT1 (0x06 + piix4_smba)
+#define SMBBLKDAT (0x07 + piix4_smba)
+#define SMBSLVCNT (0x08 + piix4_smba)
+#define SMBSHDWCMD (0x09 + piix4_smba)
+#define SMBSLVEVT (0x0A + piix4_smba)
+#define SMBSLVDAT (0x0C + piix4_smba)
+
+/* PIIX4 constants */
+#define PIIX4_BLOCK_DATA 0x14
+
+struct sb800_mmio_cfg {
+ void __iomem *addr;
+ bool use_mmio;
+};
+
+int piix4_sb800_port_sel(u8 port, struct sb800_mmio_cfg *mmio_cfg);
+int piix4_transaction(struct i2c_adapter *piix4_adapter, unsigned short piix4_smba);
+int piix4_sb800_region_request(struct device *dev, struct sb800_mmio_cfg *mmio_cfg);
+void piix4_sb800_region_release(struct device *dev, struct sb800_mmio_cfg *mmio_cfg);
+
+#endif /* I2C_PIIX4_H */
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 1dafadda73af..9a1af5bbd604 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -95,7 +95,7 @@ enum {
static inline int wait_timeout(struct i2c_pnx_algo_data *data)
{
- long timeout = data->timeout;
+ long timeout = jiffies_to_msecs(data->timeout);
while (timeout > 0 &&
(ioread32(I2C_REG_STS(data)) & mstatus_active)) {
mdelay(1);
@@ -106,7 +106,7 @@ static inline int wait_timeout(struct i2c_pnx_algo_data *data)
static inline int wait_reset(struct i2c_pnx_algo_data *data)
{
- long timeout = data->timeout;
+ long timeout = jiffies_to_msecs(data->timeout);
while (timeout > 0 &&
(ioread32(I2C_REG_CTL(data)) & mcntrl_reset)) {
mdelay(1);
@@ -733,7 +733,7 @@ static struct platform_driver i2c_pnx_driver = {
.pm = pm_sleep_ptr(&i2c_pnx_pm),
},
.probe = i2c_pnx_probe,
- .remove_new = i2c_pnx_remove,
+ .remove = i2c_pnx_remove,
};
static int __init i2c_adap_pnx_init(void)
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index b6b03539f626..9a867c817db0 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -437,7 +437,7 @@ static int i2c_powermac_probe(struct platform_device *dev)
static struct platform_driver i2c_powermac_driver = {
.probe = i2c_powermac_probe,
- .remove_new = i2c_powermac_remove,
+ .remove = i2c_powermac_remove,
.driver = {
.name = "i2c-powermac",
.bus = &platform_bus_type,
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 4d76e71cdd4b..cb6988482673 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1574,7 +1574,7 @@ static const struct dev_pm_ops i2c_pxa_dev_pm_ops = {
static struct platform_driver i2c_pxa_driver = {
.probe = i2c_pxa_probe,
- .remove_new = i2c_pxa_remove,
+ .remove = i2c_pxa_remove,
.driver = {
.name = "pxa2xx-i2c",
.pm = pm_sleep_ptr(&i2c_pxa_dev_pm_ops),
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index 414882c57d7f..05b73326afd4 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -120,7 +120,6 @@ struct cci_data {
unsigned int num_masters;
struct i2c_adapter_quirks quirks;
u16 queue_size[NUM_QUEUES];
- unsigned long cci_clk_rate;
struct hw_params params[3];
};
@@ -523,7 +522,6 @@ static const struct dev_pm_ops qcom_cci_pm = {
static int cci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- unsigned long cci_clk_rate = 0;
struct device_node *child;
struct resource *r;
struct cci *cci;
@@ -594,22 +592,6 @@ static int cci_probe(struct platform_device *pdev)
return dev_err_probe(dev, -EINVAL, "not enough clocks in DT\n");
cci->nclocks = ret;
- /* Retrieve CCI clock rate */
- for (i = 0; i < cci->nclocks; i++) {
- if (!strcmp(cci->clocks[i].id, "cci")) {
- cci_clk_rate = clk_get_rate(cci->clocks[i].clk);
- break;
- }
- }
-
- if (cci_clk_rate != cci->data->cci_clk_rate) {
- /* cci clock set by the bootloader or via assigned clock rate
- * in DT.
- */
- dev_warn(dev, "Found %lu cci clk rate while %lu was expected\n",
- cci_clk_rate, cci->data->cci_clk_rate);
- }
-
ret = cci_enable_clocks(cci);
if (ret < 0)
return ret;
@@ -699,7 +681,6 @@ static const struct cci_data cci_v1_data = {
.max_write_len = 10,
.max_read_len = 12,
},
- .cci_clk_rate = 19200000,
.params[I2C_MODE_STANDARD] = {
.thigh = 78,
.tlow = 114,
@@ -733,7 +714,6 @@ static const struct cci_data cci_v1_5_data = {
.max_write_len = 10,
.max_read_len = 12,
},
- .cci_clk_rate = 19200000,
.params[I2C_MODE_STANDARD] = {
.thigh = 78,
.tlow = 114,
@@ -767,7 +747,6 @@ static const struct cci_data cci_v2_data = {
.max_write_len = 11,
.max_read_len = 12,
},
- .cci_clk_rate = 37500000,
.params[I2C_MODE_STANDARD] = {
.thigh = 201,